diff --git a/.gitignore b/.gitignore index 5e1669b687d12ede6f94a5033432c1dcc799c299..985d6989e02f7583ec1b72f32bde79147d615d2d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +*.DS_Store .vscode/ /doc/fluid/menu.zh.json /doc/fluid/menu.en.json diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e06e02f312e8c594a28249e7e9d32eb5a60bf7e9 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,42 @@ +- repo: https://github.com/pre-commit/mirrors-yapf.git + sha: v0.16.0 + hooks: + - id: yapf + files: \.py$ +- repo: https://github.com/pre-commit/pre-commit-hooks + sha: a11d9314b22d8f8c7556443875b731ef05965464 + hooks: + - id: check-merge-conflict + - id: check-symlinks + - id: detect-private-key + files: (?!.*paddle)^.*$ + - id: end-of-file-fixer + files: \.md$ + - id: trailing-whitespace + files: \.md$ +- repo: https://github.com/Lucas-C/pre-commit-hooks + sha: v1.0.1 + hooks: + - id: forbid-crlf + files: \.md$ + - id: remove-crlf + files: \.md$ + - id: forbid-tabs + files: \.md$ + - id: remove-tabs + files: \.md$ +- repo: https://github.com/reyoung/pre-commit-hooks-jinja-compile.git + sha: 4a369cc72a4a2b8d3813ab8cc17abb5f5b21ef6c + hooks: + - id: convert-jinja2-into-html + # The argument means repleace filename from pattern `.*/([^/]*)\.tmpl` to `\1` + args: ['--filename_pattern=.*/([^/]*)\.tmpl', '--filename_repl=\1'] +- repo: local + hooks: + - id: convert-markdown-into-html + name: convert-markdown-into-html + description: Convert README.md into index.html and README.cn.md into index.cn.html + entry: python .pre-commit-hooks/convert_markdown_into_html.py + language: system + files: .+README(\.cn)?\.md$ + diff --git a/.pre-commit-hooks/convert_markdown_into_html.py b/.pre-commit-hooks/convert_markdown_into_html.py new file mode 100644 index 0000000000000000000000000000000000000000..4f1db82b3ff085cc2e027bfc024d1bb8d87d6be8 --- /dev/null +++ b/.pre-commit-hooks/convert_markdown_into_html.py @@ -0,0 +1,95 @@ +import argparse +import re +import sys + +HEAD = """ + + + + + + + + + + + + + + + + +
+
+ + + + + + + +""" + + +def convert_markdown_into_html(argv=None): + parser = argparse.ArgumentParser() + parser.add_argument('filenames', nargs='*', help='Filenames to fix') + args = parser.parse_args(argv) + + retv = 0 + + for filename in args.filenames: + with open( + re.sub(r"README", "index", re.sub(r"\.md$", ".html", + filename)), "w") as output: + output.write(HEAD) + with open(filename) as input: + for line in input: + output.write(line) + output.write(TAIL) + + return retv + + +if __name__ == '__main__': + sys.exit(convert_markdown_into_html()) diff --git a/.pre-commit-hooks/convert_markdown_into_ipynb.sh b/.pre-commit-hooks/convert_markdown_into_ipynb.sh new file mode 100644 index 0000000000000000000000000000000000000000..dbcb1046d82010f776792287b45abdebf5b097ee --- /dev/null +++ b/.pre-commit-hooks/convert_markdown_into_ipynb.sh @@ -0,0 +1,9 @@ +#!/bin/sh +for file in $@ ; do + markdown-to-ipynb < $file > ${file%.*}".ipynb" + if [ $? -ne 0 ]; then + echo >&2 "markdown-to-ipynb $file error" + exit 1 + fi +done + diff --git a/ci_scripts/api_white_list.txt b/ci_scripts/api_white_list.txt new file mode 100644 index 0000000000000000000000000000000000000000..a50852af163ac286d3e8cba504078e746783fd79 --- /dev/null +++ b/ci_scripts/api_white_list.txt @@ -0,0 +1,10 @@ +paddle/fluid/DistributeTranspiler_cn.rst +paddle/fluid/DistributeTranspilerConfig_cn.rst +paddle/fluid/transpiler/HashName_cn.rst +paddle/fluid/memory_optimize_cn.rst +paddle/fluid/release_memory_cn.rst +paddle/optimizer/Dpsgd_cn.rst +paddle/reader/ComposeNotAligned_cn.rst +paddle/fluid/layers/scatter_cn.rst +paddle/tensor/manipulation/scatter_cn.rst +paddle/distributed/fleet/Fleet_cn.rst diff --git a/ci_scripts/check_api_cn.sh b/ci_scripts/check_api_cn.sh new file mode 100644 index 0000000000000000000000000000000000000000..c2cd521f9376d5e2bc0a255b6a8d47bf183a5ad6 --- /dev/null +++ b/ci_scripts/check_api_cn.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +git_files=`git diff --numstat upstream/$BRANCH | awk '{print $NF}'` + +for file in `echo $git_files`;do + grep "code-block" ../$file + if [ $? -eq 0 ] ;then + echo $file | grep "doc/paddle/api/paddle/.*_cn.rst" + if [ $? -eq 0 ];then + api_file=`echo $file | sed 's#doc/paddle/api/##g'` + grep -w "${api_file}" ${DIR_PATH}/api_white_list.txt + if [ $? -ne 0 ];then + python chinese_samplecode_processor.py ../$file + if [ $? -ne 0 ];then + echo "chinese sample code failed" + exit 5 + fi + fi + fi + fi +done + diff --git a/ci_scripts/check_code.sh b/ci_scripts/check_code.sh new file mode 100644 index 0000000000000000000000000000000000000000..3841b912fbdce3fe4343056ce69e46aaece9c5e2 --- /dev/null +++ b/ci_scripts/check_code.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash + +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#================================================= +# Utils +#================================================= + +set -ex + +if [ -z ${BRANCH} ]; then + BRANCH="develop" +fi + +BENCHMARK_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}")/.." && pwd )" +echo ${BENCHMARK_ROOT} + +function prepare_env(){ + # Install tensorflow and other packages + pip install pre-commit==1.21 pylint==1.9.5 pytest==4.6.9 +} + +function abort(){ + echo "Your change doesn't follow benchmark's code style." 1>&2 + echo "Please use pre-commit to check what is wrong." 1>&2 + exit 1 +} + + +function check_style(){ + trap 'abort' 0 + pre-commit install + commit_files=on + for file_name in `git diff --numstat upstream/$BRANCH| awk '{print $NF}'`;do + if ! pre-commit run --files ../$file_name ; then + git diff + commit_files=off + fi + done + if [ $commit_files == 'off' ];then + echo "code format error" + exit 1 + fi + trap 0 +} + +prepare_env +check_style diff --git a/ci_scripts/check_pr_approval.py b/ci_scripts/check_pr_approval.py new file mode 100644 index 0000000000000000000000000000000000000000..937b0be7562fab93157c16b942631f0a580dfc68 --- /dev/null +++ b/ci_scripts/check_pr_approval.py @@ -0,0 +1,49 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import sys +import json + + +def check_approval(count, required_reviewers): + json_buff = "" + for line in sys.stdin: + json_buff = "".join([json_buff, line]) + json_resp = json.loads(json_buff) + approves = 0 + approved_user_ids = [] + for review in json_resp: + if review["state"] == "APPROVED": + approves += 1 + approved_user_ids.append(review["user"]["id"]) + + # convert to int + required_reviewers_int = set() + for rr in required_reviewers: + required_reviewers_int.add(int(rr)) + + if len(set(approved_user_ids) & required_reviewers_int) >= count: + print("TRUE") + else: + print("FALSE") + + +if __name__ == "__main__": + if len(sys.argv) > 1 and sys.argv[1].isdigit(): + check_approval(int(sys.argv[1]), sys.argv[2:]) + else: + print( + "Usage: python check_pr_approval.py [count] [required reviewer id] ..." + ) diff --git a/ci_scripts/checkapproval.sh b/ci_scripts/checkapproval.sh new file mode 100644 index 0000000000000000000000000000000000000000..9bd4b99296598c78df81ec65d533fe8cab4ecdc4 --- /dev/null +++ b/ci_scripts/checkapproval.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +API_FILES=("doc/paddle/api/paddle") + +for API_FILE in ${API_FILES[*]}; do + API_CHANGE=`git diff --name-only upstream/$BRANCH | grep "${API_FILE}"` + if [ "${API_CHANGE}" ];then + approval_line=`curl -H "Authorization: token ${GITHUB_API_TOKEN}" https://api.github.com/repos/PaddlePaddle/FluidDoc/pulls/${GIT_PR_ID}/reviews?per_page=10000` + if [ "${API_FILE}" == "doc/paddle/api/paddle" ];then + APPROVALS=`echo ${approval_line} | python ./check_pr_approval.py 1 2870059 27208573 29231 28379894 23093488 11935832` + fi + fi + if [ "${APPROVALS}" == "FALSE" ]; then + if [ "${API_FILE}" == "doc/paddle/api/paddle" ];then + echo "You must have one TPM (saxon-zh or swtkiwi or jzhang533 or Heeenrrry or dingjiaweiww or TCChenlong) approval for the api change! ${API_FILE} for the management reason of API interface and API document." + fi + exit 1 + fi +done + diff --git a/ci_scripts/chinese_samplecode_processor.py b/ci_scripts/chinese_samplecode_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..1c40523fc1f2c8fbb02985f67c895b5f8b7c6506 --- /dev/null +++ b/ci_scripts/chinese_samplecode_processor.py @@ -0,0 +1,234 @@ +import math +import os +import pickle +import shutil +import subprocess +import multiprocessing +import sys + + +def remove_desc_code(srcls, filename): + if filename == 'fluid_cn/one_hot_cn.rst': + srcls.pop(13) + srcls.pop(28) + srcls.pop(44) + if filename == 'layers_cn/one_hot_cn.rst': + srcls.pop(15) + srcls.pop(30) + srcls.pop(46) + if filename == 'profiler_cn/profiler_cn.rst': + srcls.pop(41) + if filename == 'layers_cn/natural_exp_decay_cn.rst': + srcls.pop(13) + if filename == 'layers_cn/transpose_cn.rst': + srcls.pop(20) + if filename == 'layers_cn/array_length_cn.rst': + srcls.pop(36) + if filename == 'layers_cn/inverse_time_decay_cn.rst': + srcls.pop(13) + if filename == 'layers_cn/stack_cn.rst': + srcls.pop(12) + srcls.pop(33) + if filename == 'layers_cn/sums_cn.rst': + srcls.pop(11) + if filename == 'layers_cn/sum_cn.rst': + for i in range(len(srcls) - 1, 61, -1): + srcls.pop(i) + if filename == 'layers_cn/softmax_cn.rst': + srcls.pop(30) + srcls.pop(57) + if filename == 'layers_cn/array_write_cn.rst': + srcls.pop(37) + if filename == 'layers_cn/lod_append_cn.rst': + srcls.pop(11) + if filename == 'layers_cn/reorder_lod_tensor_by_rank_cn.rst': + srcls.pop(25) + if filename == 'layers_cn/round_cn.rst': + srcls.pop(10) + if filename == 'layers_cn/squeeze_cn.rst': + srcls.pop(11) + srcls.pop(19) + srcls.pop(27) + if filename == 'layers_cn/unsqueeze_cn.rst': + srcls.pop(11) + if filename == 'layers_cn/array_read_cn.rst': + srcls.pop(51) + if filename == 'layers_cn/scatter_cn.rst': + srcls.pop(9) + if filename == 'layers_cn/topk_cn.rst': + srcls.pop(11) + if filename == 'optimizer_cn/ModelAverage_cn.rst': + srcls.pop(15) + return srcls + + +def check_indent(code_line): + indent = "" + for c in code_line: + if c == '\t': + indent += ' ' + elif c == ' ': + indent += ' ' + if c != ' ' and c != '\t': + break + return indent + + +def find_all(src_str, substr): + indices = [] + get_one = src_str.find(substr) + while get_one != -1: + indices.append(get_one) + get_one = src_str.find(substr, get_one + 1) + return indices + + +def extract_sample_code(srcfile, status_all): + filename = srcfile.name + srcc = srcfile.read() + srcfile.seek(0, 0) + srcls = srcfile.readlines() + srcls = remove_desc_code( + srcls, filename) # remove description info for samplecode + status = [] + sample_code_begins = find_all(srcc, " code-block:: python") + if len(sample_code_begins) == 0: + status.append(-1) + + else: + for i in range(0, len(srcls)): + if srcls[i].find(".. code-block:: python") != -1: + content = "" + start = i + + blank_line = 1 + while srcls[start + blank_line].strip() == '': + blank_line += 1 + + startindent = "" + # remove indent error + if srcls[start + blank_line].find("from") != -1: + startindent += srcls[start + blank_line][:srcls[ + start + blank_line].find("from")] + elif srcls[start + blank_line].find("import") != -1: + startindent += srcls[start + blank_line][:srcls[ + start + blank_line].find("import")] + else: + startindent += check_indent(srcls[start + blank_line]) + content += srcls[start + blank_line][len(startindent):] + for j in range(start + blank_line + 1, len(srcls)): + # planish a blank line + if not srcls[j].startswith(startindent) and srcls[ + j] != '\n': + break + if srcls[j].find(" code-block:: python") != -1: + break + content += srcls[j].replace(startindent, "", 1) + status.append(run_sample_code(content, filename)) + + status_all[filename] = status + return status_all + + +def run_sample_code(content, filename): + # three status ,-1:no sample code; 1: running error; 0:normal + fname = filename.split("/")[-1].replace("_cn", "").replace(".rst", + "") + ".py" + tempf = open("temp/" + fname, 'w') + content = "# -*- coding: utf-8 -*-\n" + content + tempf.write(content) + tempf.close() + cmd = ["python", "temp/" + fname] + + subprc = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + _, error = subprc.communicate() + err = "".join(error.decode(encoding='utf-8')) + + if subprc.returncode != 0: + print("\nSample code error found in ", filename, ":\n") + print(err) + status = 1 + else: + status = 0 + os.remove("temp/" + fname) + return status + + +def test(file): + temp = [] + src = open(file, 'r') + status_all = {} + extract_sample_code(src, status_all) + temp.append(status_all) + src.close() + return temp + + +if os.path.isdir("temp"): + shutil.rmtree("temp") +if os.path.isdir("infer_model"): + shutil.rmtree("infer_model") +if os.path.isdir("image"): + shutil.rmtree("image") +if os.path.isdir("my_paddle_model"): + shutil.rmtree("my_paddle_model") +if os.path.isdir("my_paddle_vars"): + shutil.rmtree("my_paddle_vars") + +if not os.path.isdir("temp"): + os.mkdir("temp") + +output = [] + +if len(sys.argv) < 2: + print("Error: inadequate number of arguments") + print("Please one file") + sys.exit(1) +else: + if not os.path.exists(sys.argv[1]): + print("File not found") + sys.exit(1) + res = test(sys.argv[1]) + output.append(res) + +status_groups = {-1: [], 0: [], 1: []} +# polishes show format +ci_pass = True +for one_file in output: + for dicts in one_file: + for key in dicts: + status = dicts[key] + for ele in status: + if ele != 0: + ci_pass = False + break + if len(status) == 1: + status_groups[status[0]].append(key) + else: + for u in range(0, len(status)): + status_groups[status[u]].append(key + '_' + str(u + 1)) + +error_api = status_groups[-1] + status_groups[1] +total_error_number = len(error_api) + +print("****************************************************") +print("----------------End of the Check--------------------") +print("****************************************************") +if total_error_number > 0: + print("Error sample code number is:{}".format(total_error_number)) + type_one_number = len(status_groups[-1]) + type_two_number = len(status_groups[1]) + if type_one_number > 0: + print("Error type one sample number is:{}".format(type_one_number)) + print("Error raised from type one:no sample code.", + str(status_groups[-1])) + if type_two_number > 0: + print("Error type two sample number is:{}".format(type_two_number)) + print("Error raised from type two:running error sample code.", + str(status_groups[1])) +if not ci_pass: + print("Mistakes found in sample codes.") + exit(1) +else: + print("Sample code check is successful!") diff --git a/ci_scripts/ci_start.sh b/ci_scripts/ci_start.sh new file mode 100644 index 0000000000000000000000000000000000000000..d1d2773fd3b4ea354af08a9bd02a2416734c397a --- /dev/null +++ b/ci_scripts/ci_start.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +export DIR_PATH=${PWD} + +/bin/bash ${DIR_PATH}/check_code.sh +if [ $? -ne 0 ];then + echo "code format error" + exit 1 +fi + +/bin/bash -x ${DIR_PATH}/check_api_cn.sh +if [ $? -ne 0 ];then + exit 1 +fi + +/bin/bash ${DIR_PATH}/checkapproval.sh \ No newline at end of file diff --git a/doc/fluid/advanced_guide/addon_development/contribute_code/local_dev_guide.md b/doc/fluid/advanced_guide/addon_development/contribute_code/local_dev_guide.md index 93f4d5cb950a1559928964ffe3de245600e43a6a..d9c1f4f5bd641fe1ca037ee499997cdedbcd408a 100644 --- a/doc/fluid/advanced_guide/addon_development/contribute_code/local_dev_guide.md +++ b/doc/fluid/advanced_guide/addon_development/contribute_code/local_dev_guide.md @@ -9,7 +9,24 @@ - 通过所有单元测试。 - 请遵守[提交代码的一些约定](#提交代码的一些约定)。 -以下教程将指导您提交代码。 + +## 使用官方开发镜像(推荐) + +``` +# 第一次启动(CPU开发) +docker run -it --cpu-shares=20000 --name=username --net=host --privileged --rm -v $(pwd):/Paddle hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash +# 第一次启动(GPU开发) +nvidia-docker run -it --cpu-shares=20000 --name=username --net=host --privileged --rm -v $(pwd):/Paddle hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash +# 后面几次启动 +docker exec -it username bash +``` + +不同开发者启动docker的命令不一样,以上只是推荐命令。如果使用自己习惯的命令,一定要加参数--privileged(GPU的CUPTI库调用需要) + +**推荐使用官方开发镜像 hub.baidubce.com/paddlepaddle/paddle:latest-dev 提交代码。** + +**以下教程将指导您提交代码。** + ## [Fork](https://help.github.com/articles/fork-a-repo/) 跳转到[PaddlePaddle](https://github.com/PaddlePaddle/Paddle) GitHub首页,然后单击 `Fork` 按钮,生成自己目录下的仓库,比如 。 @@ -42,7 +59,7 @@ Paddle 目前使用[Git流分支模型](http://nvie.com/posts/a-successful-git-b Paddle 开发人员使用 [pre-commit](http://pre-commit.com/) 工具来管理 Git 预提交钩子。 它可以帮助我们格式化源代码(C++,Python),在提交(commit)前自动检查一些基本事宜(如每个文件只有一个 EOL,Git 中不要添加大文件等)。 -`pre-commit`测试是 Travis-CI 中单元测试的一部分,不满足钩子的 PR 不能被提交到 Paddle,首先安装并在当前目录运行它: +`pre-commit`测试是 CI 中单元测试的一部分,不满足钩子的 PR 不能被提交到 Paddle,首先安装并在当前目录运行它: ```bash ➜ pip install pre-commit @@ -51,7 +68,7 @@ Paddle 开发人员使用 [pre-commit](http://pre-commit.com/) 工具来管理 G Paddle 使用 `clang-format` 来调整 C/C++ 源代码格式,请确保 `clang-format` 版本在 3.8 以上。 -注:通过`pip install pre-commit`和`conda install -c conda-forge pre-commit`安装的`yapf`稍有不同的,Paddle 开发人员使用的是`pip install pre-commit`。 +注:通过`pip install pre-commit`和`conda install -c conda-forge pre-commit`安装的`yapf`稍有不同的,Paddle 开发人员使用的是`pip install pre-commit`,使用Paddle docker镜像会自带`pre-commit`不需要单独安装。 ## 开始开发 @@ -66,19 +83,53 @@ Changes not staged for commit: (use "git add ..." to update what will be committed) (use "git checkout -- ..." to discard changes in working directory) - modified: README.md + modified: README.md Untracked files: (use "git add ..." to include in what will be committed) - test + test no changes added to commit (use "git add" and/or "git commit -a") ``` -## 编译和单元测试 +## 编译 + +创建并进入/Paddle/build路径下: + + mkdir -p /Paddle/build && cd /Paddle/build + +执行cmake: + + + * 对于需要编译**CPU版本PaddlePaddle**的用户: + + For Python2: cmake .. -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + For Python3: cmake .. -DPY_VERSION=3.5 -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + + * 对于需要编译**GPU版本PaddlePaddle**的用户: + + For Python2: cmake .. -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + For Python3: cmake .. -DPY_VERSION=3.5 -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + +执行编译: + + make -j$(nproc) + + 如:make -j16,使用16核编译 + +安装编译好的whl包:首先进入/Paddle/build/python/dist目录下找到生成的.whl包后,然后当前机器或目标机器安装编译好的.whl包: + + For Python2: pip install -U(whl包的名字) + For Python3: pip3.5 install -U(whl包的名字) 关于编译 PaddlePaddle 的源码,请参见[从源码编译](../../../install/compile/fromsource.html) 选择对应的操作系统。 + +## 单元测试 + + 单测运行(重复运行多次,避免随机失败)如重复运行100次的命令如下: + ctest --repeat-until-fail 100 -R test_xx + 关于单元测试,可参考[Op单元测试](../new_op/new_op.html#id7) 的运行方法。 ## 提交(commit) @@ -92,7 +143,7 @@ On branch test Untracked files: (use "git add ..." to include in what will be committed) - test + test nothing added to commit but untracked files present (use "git add" to track) ➜ git add test @@ -115,15 +166,6 @@ clang-formater.......................................(no files to check)Skipped create mode 100644 233 ``` - 需要注意的是:您需要在commit中添加说明(commit message)以触发CI单测,写法如下: - -```bash -# 触发develop分支的CI单测 -➜ git commit -m "test=develop" - -# 触发release/1.1分支的CI单侧 -➜ git commit -m "test=release/1.1" -``` ## 保持本地仓库最新 @@ -135,8 +177,8 @@ clang-formater.......................................(no files to check)Skipped ➜ git remote origin ➜ git remote -v -origin https://github.com/USERNAME/Paddle (fetch) -origin https://github.com/USERNAME/Paddle (push) +origin https://github.com/USERNAME/Paddle (fetch) +origin https://github.com/USERNAME/Paddle (push) ``` 这里 origin 是我们 clone 的远程仓库的名字,也就是自己用户名下的 Paddle,接下来我们创建一个原始 Paddle 仓库的远程主机,命名为 upstream。 diff --git a/doc/fluid/advanced_guide/addon_development/contribute_code/local_dev_guide_en.md b/doc/fluid/advanced_guide/addon_development/contribute_code/local_dev_guide_en.md index 48a65cb6573fef3aeb6a5cbc7d9b88cb52a7b5bf..3158b23326094b7a2da4f1f87445d6518ea5f57a 100644 --- a/doc/fluid/advanced_guide/addon_development/contribute_code/local_dev_guide_en.md +++ b/doc/fluid/advanced_guide/addon_development/contribute_code/local_dev_guide_en.md @@ -9,7 +9,22 @@ You will learn how to develop programs in local environment under the guidelines - Pass through all unit tests. - Please follow [regulations of submitting codes](#regulations of submitting codes). -The following guidiance tells you how to submit code. +## Use official development images(recommended) + +``` +# First start(CPU development) +docker run -it --cpu-shares=20000 --name=username --net=host --privileged --rm -v $(pwd):/Paddle hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash +# First start(GPU development) +nvidia-docker run -it --cpu-shares=20000 --name=username --net=host --privileged --rm -v $(pwd):/Paddle hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash +# Next start +docker exec -it username bash +``` +Different developers have different commands to start docker. The above are only recommended commands. If you use the command you are used to, you must add the parameter --privileged (needed by the GPU CUPTI library call) + +**It is recommended to use the official development mirror hub.baidubce.com/paddlepaddle/paddle:latest-dev to submit the code.** + +**The following guidiance tells you how to submit code.** + ## [Fork](https://help.github.com/articles/fork-a-repo/) Transfer to the home page of Github [PaddlePaddle](https://github.com/PaddlePaddle/Paddle) ,and then click button `Fork` to generate the git under your own file directory,such as 。 @@ -44,7 +59,7 @@ It is worth noting that before the checkout, you need to keep the current branch Paddle developers use the [pre-commit](http://pre-commit.com/) tool to manage Git pre-commit hooks. It helps us format the source code (C++, Python) and automatically check some basic things before committing (such as having only one EOL per file, not adding large files in Git, etc.). -The `pre-commit` test is part of the unit test in Travis-CI. A PR that does not satisfy the hook cannot be submitted to Paddle. Install `pre-commit` first and then run it in current directory: +The `pre-commit` test is part of the unit test in CI. A PR that does not satisfy the hook cannot be submitted to Paddle. Install `pre-commit` first and then run it in current directory: ```bash @@ -54,7 +69,7 @@ The `pre-commit` test is part of the unit test in Travis-CI. A PR that does not Paddle modify the format of C/C++ source code with `clang-format` .Make sure the version of `clang-format` is above 3.8. -Note:There are differences between the installation of `yapf` with `pip install pre-commit` and that with `conda install -c conda-forge pre-commit` . Paddle developers use `pip install pre-commit` 。 +Note:There are differences between the installation of `yapf` with `pip install pre-commit` and that with `conda install -c conda-forge pre-commit` . Paddle developers use `pip install pre-commit`, Using Paddle docker image will `pre-commit`without separate installation . ## Start development @@ -76,7 +91,45 @@ Untracked files: no changes added to commit (use "git add" and/or "git commit -a") ``` -## Build and test +## Build + +Create and enter the /Paddle/build path + + mkdir -p /Paddle/build && cd /Paddle/build + +Execute cmake: + + + * For users who need to compile the **CPU version PaddlePaddle**: + + For Python2: cmake .. -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + For Python3: cmake .. -DPY_VERSION=3.5 -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + + + * For users who need to compile the **GPU version PaddlePaddle**: + + For Python2: cmake .. -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + For Python3: cmake .. -DPY_VERSION=3.5 -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + + +Execute compilation: + + make -j$(nproc) + + Such as: make -j16, using 16 core compilation + +After compiling successfully, go to the `/paddle/build/python/dist` directory and find the generated `.whl` package.Install the compiled .whl package on the current machine or target machine: + + For Python2: pip install -U(whl package name) + For Python3: pip3.5 install -U(whl package name) + +Please refer to [Compile From Source Code](../../../install/compile/fromsource_en.html) about more information of building PaddlePaddle source codes. + +## Test + + Run Test (Run 100 times) + ctest --repeat-until-fail 100 -R test_xx + Please refer to [Compile From Source Code](../../../install/compile/fromsource_en.html) about more information of building PaddlePaddle source codes. Please refer to [Op Unit Tests](../new_op/new_op_en.html#unit-tests) about more information of running unit tests. @@ -113,14 +166,6 @@ clang-formater.......................................(no files to check)Skipped create mode 100644 233 ``` - Attention needs to be paid:you need to add commit message to trigger CI test.The command is as follows: - -```bash -# Touch CI single test of develop branch -➜ git commit -m "test=develop" -# Touch CI single test of release/1.1 branch -➜ git commit -m "test=release/1.1" -``` ## Keep the latest local repository diff --git a/doc/fluid/advanced_guide/addon_development/contribute_code/submit_pr_guide.md b/doc/fluid/advanced_guide/addon_development/contribute_code/submit_pr_guide.md index a43d38ee5470494757f59b94e9248010f1c7c775..06cb8c07caa63adbe3198d69c3727b2a0ba2ba11 100644 --- a/doc/fluid/advanced_guide/addon_development/contribute_code/submit_pr_guide.md +++ b/doc/fluid/advanced_guide/addon_development/contribute_code/submit_pr_guide.md @@ -26,7 +26,7 @@
- +
diff --git a/doc/fluid/advanced_guide/addon_development/contribute_code/submit_pr_guide_en.md b/doc/fluid/advanced_guide/addon_development/contribute_code/submit_pr_guide_en.md index d71d92e3632d323e95fa45342c822183ab844e45..ee28c9f91eb62e710f58342044277a45898ee7eb 100644 --- a/doc/fluid/advanced_guide/addon_development/contribute_code/submit_pr_guide_en.md +++ b/doc/fluid/advanced_guide/addon_development/contribute_code/submit_pr_guide_en.md @@ -26,7 +26,7 @@ For the first time to submit Pull Request,you need to sign CLA(Contributor Licen
- +
diff --git a/doc/fluid/advanced_guide/addon_development/design_idea/fluid_design_idea.md b/doc/fluid/advanced_guide/addon_development/design_idea/fluid_design_idea.md index f56a35f1684504ec7b370342a0dc1ee8613061e3..0a321990f6aff2633c91de366c315d875d9425e5 100644 --- a/doc/fluid/advanced_guide/addon_development/design_idea/fluid_design_idea.md +++ b/doc/fluid/advanced_guide/addon_development/design_idea/fluid_design_idea.md @@ -56,12 +56,20 @@ blocks中包含: block的概念与通用程序一致,例如在下列这段C++代码中包含三个block: ``` cpp -int main(){ //block 0 - int i = 0; - if (i<10){ //block 1 - for (int j=0;j<10;j++){ //block 2 - } +#include + +int main() { + int x = 5; // block 0 + int y = 4; // block 0 + int out; // block 0 + + if (x < y) { // block 0 + out = 1; // block 1 + } else { + out = 0; // block 2 } + + std::cout << out << std::endl; return 0; } ``` @@ -69,27 +77,20 @@ int main(){ //block 0 类似的,在下列 Paddle 的 Program 包含3段block: ```python -import paddle.fluid as fluid # block 0 - -limit = fluid.layers.fill_constant_batch_size_like( - input=label, dtype='int64', shape=[1], value=5.0) -cond = fluid.layers.less_than(x=label, y=limit) - -ie = fluid.layers.IfElse(cond) -with ie.true_block(): # block 1 - true_image = ie.input(image) - hidden = fluid.layers.fc(input=true_image, size=100, act='tanh') - prob = fluid.layers.fc(input=hidden, size=10, act='softmax') - ie.output(prob) - -with ie.false_block(): # block 2 - false_image = ie.input(image) - hidden = fluid.layers.fc( - input=false_image, size=200, act='tanh') - prob = fluid.layers.fc(input=hidden, size=10, act='softmax') - ie.output(prob) - -prob = ie() +import paddle.fluid as fluid + +x = fluid.data(name='x', shape=[1], dtype='int64') # block 0 +y = fluid.data(name='y', shape=[1], dtype='int64') # block 0 + +def true_block(): + return fluid.layers.fill_constant(dtype='int64', value=1, shape=[1]) # block 1 + +def false_block(): + return fluid.layers.fill_constant(dtype='int64', value=0, shape=[1]) # block 2 + +condition = fluid.layers.less_than(x, y) # block 0 + +out = fluid.layers.cond(condition, true_block, false_block) # block 0 ``` ### BlockDesc and ProgramDesc diff --git a/doc/fluid/advanced_guide/addon_development/design_idea/fluid_design_idea_en.md b/doc/fluid/advanced_guide/addon_development/design_idea/fluid_design_idea_en.md index 9cb2821a4964ca752d3be03631909a6dd6d9431a..1830096f1e53ab544348cbedb107962307773564 100644 --- a/doc/fluid/advanced_guide/addon_development/design_idea/fluid_design_idea_en.md +++ b/doc/fluid/advanced_guide/addon_development/design_idea/fluid_design_idea_en.md @@ -59,40 +59,41 @@ The blocks contain: The concept of block is the same with that in generic programs. For example, there are three blocks in the following C++ code: ``` cpp -int main(){ //block 0 - int i = 0; - if (i<10){ //block 1 - for (int j=0;j<10;j++){ //block 2 - } - } - return 0; +#include + +int main() { + int x = 5; // block 0 + int y = 4; // block 0 + int out; // block 0 + + if (x < y) { // block 0 + out = 1; // block 1 + } else { + out = 0; // block 2 + } + + std::cout << out << std::endl; + return 0; } ``` Similarly, the following Program contains 3 blocks: ```python -import paddle.fluid as fluid # block 0 - -limit = fluid.layers.fill_constant_batch_size_like( - Input=label, dtype='int64', shape=[1], value=5.0) -cond = fluid.layers.less_than(x=label, y=limit) - -ie = fluid.layers.IfElse(cond) -with ie.true_block(): # block 1 - true_image = ie.input(image) - hidden = fluid.layers.fc(input=true_image, size=100, act='tanh') - prob = fluid.layers.fc(input=hidden, size=10, act='softmax') - ie.output(prob) - -with ie.false_block(): # block 2 - false_image = ie.input(image) - hidden = fluid.layers.fc( - input=false_image, size=200, act='tanh') - prob = fluid.layers.fc(input=hidden, size=10, act='softmax') - ie.output(prob) - -prob = ie() +import paddle.fluid as fluid + +x = fluid.data(name='x', shape=[1], dtype='int64') # block 0 +y = fluid.data(name='y', shape=[1], dtype='int64') # block 0 + +def true_block(): + return fluid.layers.fill_constant(dtype='int64', value=1, shape=[1]) # block 1 + +def false_block(): + return fluid.layers.fill_constant(dtype='int64', value=0, shape=[1]) # block 2 + +condition = fluid.layers.less_than(x, y) # block 0 + +out = fluid.layers.cond(condition, true_block, false_block) # block 0 ``` ### BlockDesc and ProgramDesc @@ -229,8 +230,8 @@ import numpy train_data=numpy.array([[1.0],[2.0],[3.0],[4.0]]).astype('float32') y_true = numpy.array([[2.0],[4.0],[6.0],[8.0]]).astype('float32') # Define the network -x = fluid.layers.data(name="x",shape=[1],dtype='float32') -y = fluid.layers.data(name="y",shape=[1],dtype='float32') +x = fluid.data(name="x",shape=[None, 1],dtype='float32') +y = fluid.data(name="y",shape=[None, 1],dtype='float32') y_predict = fluid.layers.fc(input=x,size=1,act=None) #definition loss function cost = fluid.layers.square_error_cost(input=y_predict,label=y) @@ -299,7 +300,7 @@ As you can see from the output, the entire definition process is transformed int BlockDesc contains defined vars and a series of ops. Take input x as an example. In python code, x is 1D data of data type "float 32": ```python -x = fluid.layers.data(name="x",shape=[1],dtype='float32') +x = fluid.data(name="x",shape=[None, 1],dtype='float32') ``` In BlockDesc, the variable x is described as: ``` @@ -348,7 +349,7 @@ Since there are multiple columns of incoming and outgoing data, fluid defines tr ```python # Start training outs = exe.run( - feed={'x':train_data,'y':y_true}, + feed={'x':train_data,'y':y_true}, fetch_list=[y_predict.name,avg_cost.name]) ``` The above code defines that train_data is to be passed into the x variable, y_true is to be passed into the y variable, and output the predicted value of y and the last round value of cost. diff --git a/doc/fluid/advanced_guide/addon_development/new_op/custom_op.md b/doc/fluid/advanced_guide/addon_development/new_op/custom_op.md index f83220be29aab54716bb71cd144d3359a7e9d9ff..c8bf3b461d5e3be845c174c4e2daeecdd1bc326a 100644 --- a/doc/fluid/advanced_guide/addon_development/new_op/custom_op.md +++ b/doc/fluid/advanced_guide/addon_development/new_op/custom_op.md @@ -77,14 +77,12 @@ class Relu2GradMaker : public framework::SingleGradOpMaker { public: using framework::SingleGradOpMaker::SingleGradOpMaker; - std::unique_ptr Apply() const override { - auto* op = new T(); + void Apply(GradOpPtr op) const override { op->SetType("relu2_grad"); op->SetInput("Y", this->Output("Y")); op->SetInput(framework::GradVarName("Y"), this->OutputGrad("Y")); op->SetAttrMap(this->Attrs()); op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); - return std::unique_ptr(op); } }; @@ -142,7 +140,7 @@ REGISTER_OP_CPU_KERNEL(relu2_grad, -ReLU OP的GPU实现, ``relu_op.cc`` 文件: +ReLU OP的GPU实现, ``relu_op.cu`` 文件: ``` // relu_op.cu @@ -272,8 +270,8 @@ g++ relu_op.cc relu_op.cu.o -o relu2_op.so -shared -fPIC -std=c++11 -O3 -DPADDLE 注意点: -1. NVCC编译GPU OP的cu文件时,需要加 `-DPADDLE_WITH_CUDA -DEIGEN_USE_GPU -DPADDLE_USE_DSO` 。 -2. 如果安装的PaddlePaddle不包含MKLDNN,则需要去掉编译选项`-DPADDLE_WITH_MKLDNN`。默认的安装包已包含MKLDNN。 +1. 通过NVCC编译CUDA源文件时,需要加编译选项 `-DPADDLE_WITH_CUDA -DEIGEN_USE_GPU -DPADDLE_USE_DSO`,在框架源码中会使用这些宏定义进行条件编译。用户自定义的C++ OP实现编译时,选项的开启状态需要和核心框架编译行为一致。如`EIGEN_USE_GPU`是使用Eigen数学库的GPU实现时需要增加的编译选项。 +2. 如果飞桨安装包中不包含MKLDNN库,则需要去掉编译选项`-DPADDLE_WITH_MKLDNN`。核心框架源码中(比如tensor.h)有使用此宏定义进行条件编译,该选项是否打开同样需要和核心框架编译行为保持一致。默认的飞桨安装包中含有MKLDNN库。 3. 可多个OP编译到同一个动态库中。 4. 通过pip方式安装的PaddlePaddle由GCC 4.8编译得到,由于GCC 4.8和GCC 5以上**C++11 ABI不兼容**,您编写的自定义OP,需要通过GCC 4.8编译。若是GCC 5及以上的环境上使用自定义OP,推荐使用[Docker安装PaddlePaddle](https://www.paddlepaddle.org.cn/install/doc/docker),使得编Paddle和编译自定义OP的GCC版本相同。 @@ -333,6 +331,11 @@ np.allclose(out, np.maximum(x,0.)) ## FAQ -1. Q:如果出现类似错误: cannot open shared object file: No such file or directory. +1. Q: 如果出现类似错误: `relu2_op.so: cannot open shared object file: No such file or directory` 以及 `libpaddle_framework.so: cannot open shared object file: No such file or directory`。 - A: 需要设置动态库的路径到环境变量LD_LIBRARY_PATH中。 + A: 需要将`relu2_op.so`所在路径以及`libpaddle_framework.so`路径(即`paddle.sysconfig.get_lib()`得到路径)设置到环境变量LD_LIBRARY_PATH中: + + ``` + # 假如relu2_op.so路径是:`paddle/test`,对于Linux环境设置: + export LD_LIBRARY_PATH=paddle/test:$( python -c 'import paddle; print(paddle.sysconfig.get_lib())'):$LD_LIBRARY_PATH + ``` diff --git a/doc/fluid/advanced_guide/addon_development/new_op/new_op.md b/doc/fluid/advanced_guide/addon_development/new_op/new_op.md index cf1a72ddfa9c6980827b14691383254c3bfa8e85..7b22163538445e9929ff3e2684c0efd41a536ec3 100644 --- a/doc/fluid/advanced_guide/addon_development/new_op/new_op.md +++ b/doc/fluid/advanced_guide/addon_development/new_op/new_op.md @@ -61,6 +61,9 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("X", "(Tensor), The first input tensor of mul op."); AddInput("Y", "(Tensor), The second input tensor of mul op."); AddOutput("Out", "(Tensor), The output tensor of mul op."); + AddAttr("use_mkldnn", + "(bool, default false) Only used in mkldnn kernel") + .SetDefault(false); AddAttr( "x_num_col_dims", R"DOC((int, default 1), The mul_op can take tensors with more than two @@ -91,18 +94,34 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker { )DOC") .SetDefault(1) .EqualGreaterThan(1); + AddAttr( + "scale_x", + "scale_x to be used for int8 mul input data x. scale_x has the" + "same purpose as scale_in in OPs that support quantization." + "Only to be used with MKL-DNN INT8") + .SetDefault(1.0f); + AddAttr>( + "scale_y", + "scale_y to be used for int8 mul input data y. scale_y has the" + "same purpose as scale_weights in OPs that support quantization." + "Only to be used with MKL-DNN INT8") + .SetDefault({1.0f}); + AddAttr("scale_out", + "scale_out to be used for int8 output data." + "Only used with MKL-DNN INT8") + .SetDefault(1.0f); + AddAttr( + "force_fp32_output", + "(bool, default false) Force quantize kernel output FP32, only " + "used in quantized MKL-DNN.") + .SetDefault(false); AddComment(R"DOC( Mul Operator. - This operator is used to perform matrix multiplication for input $X$ and $Y$. - The equation is: - $$Out = X * Y$$ - Both the input $X$ and $Y$ can carry the LoD (Level of Details) information, or not. But the output only shares the LoD information with input $X$. - )DOC"); } }; @@ -112,34 +131,34 @@ or not. But the output only shares the LoD information with input $X$. 开发者通过覆盖`framework::OpProtoAndCheckerMaker`中的`Make`函数来定义Op所对应的Proto,通过`AddInput`添加输入参数,通过`AddOutput`添加输出参数,通过`AddAttr`添加属性参数,通过`AddComment`添加Op的注释。这些函数会将对应内容添加到`OpProto`中。 -上面的代码在`MulOp`中添加两个输入`X`和`Y`,添加了一个输出`Out`,并解释了各自含义,命名请遵守[命名规范](https://github.com/PaddlePaddle/FluidDoc/blob/release/1.2/doc/fluid/dev/name_convention.md)。 +上面的代码在`MulOp`中添加两个输入`X`和`Y`,添加了一个输出`Out`,以及`use_mkldnn`等属性,并解释了各自含义,命名请遵守[命名规范](https://github.com/PaddlePaddle/FluidDoc/blob/release/1.2/doc/fluid/dev/name_convention.md)。 -### 定义GradProtoMaker类 -通常情况下,每个Op的会有一个对应的`GradProtoMaker`,为方便代码编写,fluid提供了默认的`GradProtoMaker`,即:`DefaultGradProtoMaker`。`DefaultGradProtoMaker`会使用前向Op的全部输入(`Input`)输出(`Output`)以及输出变量所对应的梯度(`Output@Grad`)作为反向Op的输入,将前向Op的输入变量所对应的的梯度(`Input@Grad`)作为输出。 +### 定义GradOpMaker类 +通常情况下,大部分Op只有一个对应的反向Op,每个Op的会有一个对应的`GradOpMaker`。为方便代码编写,fluid为只有提供了一个模板类[`SingleGradOpMaker`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/grad_op_desc_maker.h#L188)。`MulOp`的`GradOpMaker`需要继承这个模板类,并在`Apply()`方法中设置反向Op的输入、输出和属性。此外,fluid还提供了一个默认的`GradOpMaker`, +[`DefaultGradOpMaker`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/grad_op_desc_maker.h#L227),该模板类会使用前向Op的全部输入(`Input`)输出(`Output`)以及输出变量所对应的梯度(`Output@Grad`)作为反向Op的输入,将前向Op的输入变量所对应的的梯度(`Input@Grad`)作为输出。 **注意:** 不要将反向Op不会用到的变量放到反向Op的输入列表中,这样会导致这些不会被反向Op用到的变量的空间不能够及时回收,进而有可能导致用到该Op的模型可以设置的batch_size较低。 -比如`relu`操作的前向操作为:`out.device(d) = x.cwiseMax(static_cast(0));`反向操作为:`dx.device(d) = dout * (out > static_cast(0)).template cast();`。显然,反向操作中只是用到了`out`、`dout`、`dx`,没有用到`x`。 +比如`relu`操作的前向操作为:`out.device(d) = x.cwiseMax(static_cast(0));`反向操作为:`dx.device(d) = dout * (out > static_cast(0)).template cast();`。显然,反向操作中只是用到了`out`、`dout`、`dx`,没有用到`x`。因此,通常不建议使用默认的`DefaultGradOpMaker`。 -下面示例定义了`MulOp`的GradProtoMaker。 +下面示例定义了`MulOp`的`GradOpMaker`。 ```cpp -class MulOpGradMaker : public framework::SingleGradOpDescMaker { +template +class MulOpGradMaker : public framework::SingleGradOpMaker { public: - using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + using framework::SingleGradOpMaker::SingleGradOpMaker; protected: - std::unique_ptr Apply() const override { - std::unique_ptr retv(new framework::OpDesc()); + void Apply(GradOpPtr retv) const override { retv->SetType("mul_grad"); - retv->SetInput("X", Input("X")); - retv->SetInput("Y", Input("Y")); - retv->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); - retv->SetOutput(framework::GradVarName("X"), InputGrad("X")); - retv->SetOutput(framework::GradVarName("Y"), InputGrad("Y")); - retv->SetAttrMap(Attrs()); - return retv; + retv->SetInput("X", this->Input("X")); + retv->SetInput("Y", this->Input("Y")); + retv->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); + retv->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); + retv->SetOutput(framework::GradVarName("Y"), this->InputGrad("Y")); + retv->SetAttrMap(this->Attrs()); } }; ``` @@ -148,7 +167,8 @@ class MulOpGradMaker : public framework::SingleGradOpDescMaker { - 有些Op的前向逻辑和反向逻辑是一样的,比如[`ScaleOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/scale_op.cc).这种情况下,前向Op和反向Op的Kernel可以为同一个。 - 有些前向Op所对应的反向Op可能有多个,比如[`SumOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/sum_op.cc),这种情况下,`GradMaker`需要继承`framework::GradOpDescMakerBase`。 -- 有些Op的反向对应另一个Op的前向,比如[`SplitOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/split_op.h),这种情况下,[`SplitGradMaker`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/split_op.h#L52)中定义的`SplitOp`反向Op的Type就是`concat`, +- 有些Op的反向对应另一个Op的前向,比如[`SplitOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/split_op.h),这种情况下,[`SplitGradMaker`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/split_op.h#L157)中定义的`SplitOp`反向Op的Type就是`concat`, +- 为高效地同时支持命令式编程模式(动态图)和声明式编程模式(静态图),`SingleGradOpMaker`是一个模板类,在注册Operator时需要同时注册`MulOpGradMaker`(声明式编程模式使用)和`MulOpGradMaker`(命令式编程模式使用)。 ### 定义Operator类 @@ -159,12 +179,16 @@ class MulOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of MulOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) of MulOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of MulOp should not be null."); + PADDLE_ENFORCE_EQ( + ctx->HasInput("X"), true, + platform::errors::NotFound("Input(X) of MulOp should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("Y"), true, + platform::errors::NotFound("Input(Y) of MulOp should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("Out"), true, + platform::errors::NotFound("Output(Out) of MulOp should not be null.")); auto x_dims = ctx->GetInputDim("X"); auto y_dims = ctx->GetInputDim("Y"); @@ -176,23 +200,42 @@ class MulOp : public framework::OperatorWithKernel { << " x_num_col_dims=" << x_num_col_dims << " y_num_col_dims=" << y_num_col_dims; + PADDLE_ENFORCE_NE(framework::product(y_dims), 0, + platform::errors::PreconditionNotMet( + "The Input variable Y(%s) has not " + "been initialized. You may need to confirm " + "if you put exe.run(startup_program) " + "after optimizer.minimize function.", + ctx->Inputs("Y").front())); PADDLE_ENFORCE_GT( x_dims.size(), x_num_col_dims, - "The input tensor X's rank of MulOp should be larger than " - "x_num_col_dims."); + platform::errors::InvalidArgument( + "The input tensor X's dimensions of MulOp " + "should be larger than x_num_col_dims. But received X's " + "dimensions = %d, X's shape = [%s], x_num_col_dims = %d.", + x_dims.size(), x_dims, x_num_col_dims)); PADDLE_ENFORCE_GT( y_dims.size(), y_num_col_dims, - "The input tensor Y's rank of MulOp should be larger than " - "y_num_col_dims: %ld vs %ld", - y_dims.size(), y_num_col_dims); + platform::errors::InvalidArgument( + "The input tensor Y's dimensions of MulOp " + "should be larger than y_num_col_dims. But received Y's " + "dimensions = %d, Y's shape = [%s], y_num_col_dims = %d.", + y_dims.size(), y_dims, y_num_col_dims)); auto x_mat_dims = framework::flatten_to_2d(x_dims, x_num_col_dims); auto y_mat_dims = framework::flatten_to_2d(y_dims, y_num_col_dims); - PADDLE_ENFORCE_EQ(x_mat_dims[1], y_mat_dims[0], - "First matrix's width must be equal with second matrix's " - "height. %s, %s", - x_mat_dims[1], y_mat_dims[0]); + PADDLE_ENFORCE_EQ( + x_mat_dims[1], y_mat_dims[0], + platform::errors::InvalidArgument( + "After flatten the input tensor X and Y to 2-D dimensions " + "matrix X1 and Y1, the matrix X1's width must be equal with matrix " + "Y1's height. But received X's shape = [%s], X1's shape = [%s], " + "X1's " + "width = %s; Y's shape = [%s], Y1's shape = [%s], Y1's height = " + "%s.", + x_dims, x_mat_dims, x_mat_dims[1], y_dims, y_mat_dims, + y_mat_dims[0])); std::vector output_dims; output_dims.reserve( static_cast(x_num_col_dims + y_dims.size() - y_num_col_dims)); @@ -208,10 +251,34 @@ class MulOp : public framework::OperatorWithKernel { ctx->SetOutputDim("Out", framework::make_ddim(output_dims)); ctx->ShareLoD("X", /*->*/ "Out"); } + + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const { + framework::LibraryType library = framework::LibraryType::kPlain; + framework::DataLayout layout = framework::DataLayout::kAnyLayout; + int customized_type_value = + framework::OpKernelType::kDefaultCustomizedTypeValue; + auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X"); +#ifdef PADDLE_WITH_MKLDNN + if (library == framework::LibraryType::kPlain && + platform::CanMKLDNNBeUsed(ctx)) { + library = framework::LibraryType::kMKLDNN; + layout = framework::DataLayout::kMKLDNN; + + if (input_data_type == framework::DataTypeTrait::DataType() || + input_data_type == framework::DataTypeTrait::DataType()) { + customized_type_value = kMULMKLDNNINT8; + } + } +#endif + + return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout, + library, customized_type_value); + } }; ``` -[`MulOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/mul_op.cc#L22)继承自`OperatorWithKernel`。`public`成员: +[`MulOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/mul_op.cc#L30)继承自`OperatorWithKernel`。`public`成员: ```cpp using framework::OperatorWithKernel::OperatorWithKernel; @@ -226,15 +293,17 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs, : OperatorWithKernel(type, inputs, outputs, attrs) {} ``` -还需要重写`InferShape`接口。`InferShape`为const函数,不能修改Op的成员变量,参数为`framework::InferShapeContext* ctx`,通过该参数可获取到输入输出以及属性。它的功能是: +此外,Operator类通常需要重写`InferShape`接口,并在有必要时重写`GetExpectedKernelType`接口。`InferShape`为const函数,不能修改Op的成员变量,参数为`framework::InferShapeContext* ctx`,通过该参数可获取到输入输出以及属性。它的功能是: - 做检查, 尽早报错:检查输入数据维度、类型等是否合法。 - 设置输出Tensor的形状以及LoD信息。 +`GetExpectedKernelType`接口OperatorWithKernel类中用于获取指定设备(例如CPU,GPU)上指定数据类型(例如double,float)的OpKernel的方法。该方法的重写可见请参考[写C++ OP相关注意事项](op_notes.html#getexpectedkerneltype)。 + 通常`OpProtoMaker`和`Op`类的定义写在`.cc`文件中,和下面将要介绍的注册函数一起放在`.cc`中 ### InferShape区分 compile time 和 run time -在我们的静态图网络中,`InferShape`操作在[编译时(compile time)和运行时(run time)](https://github.com/PaddlePaddle/FluidDoc/blob/release/1.2/doc/fluid/getstarted/Developer's_Guide_to_Paddle_Fluid.md#%E8%AE%A9%E6%88%91%E4%BB%AC%E5%9C%A8fluid%E7%A8%8B%E5%BA%8F%E5%AE%9E%E4%BE%8B%E4%B8%AD%E5%8C%BA%E5%88%86%E7%BC%96%E8%AF%91%E6%97%B6%E5%92%8C%E8%BF%90%E8%A1%8C%E6%97%B6)都会被调用,在compile time时,由于真实的维度未知,框架内部用-1来表示,在run time时,用实际的维度表示,因此维度的值在compile time和 run time时可能不一致,如果存在维度的判断和运算操作,InferShape就需要区分compile time 和 run time。 +在我们的声明式编程模式网络中,`InferShape`操作在[编译时(compile time)和运行时(run time)](https://github.com/PaddlePaddle/FluidDoc/blob/release/1.2/doc/fluid/getstarted/Developer's_Guide_to_Paddle_Fluid.md#%E8%AE%A9%E6%88%91%E4%BB%AC%E5%9C%A8fluid%E7%A8%8B%E5%BA%8F%E5%AE%9E%E4%BE%8B%E4%B8%AD%E5%8C%BA%E5%88%86%E7%BC%96%E8%AF%91%E6%97%B6%E5%92%8C%E8%BF%90%E8%A1%8C%E6%97%B6)都会被调用,在compile time时,由于真实的维度未知,框架内部用-1来表示,在run time时,用实际的维度表示,因此维度的值在compile time和 run time时可能不一致,如果存在维度的判断和运算操作,InferShape就需要区分compile time 和 run time。 以下两种情况需要区分compile time和 run time。 @@ -286,7 +355,7 @@ y_dim[i] = x_dim[i] + z_dim[i] - 运算: -1和其他数做任何运算都要等于-1 **参考代码** -1. 判断的实现方法可以参考cross_entropy_op.cc,cross_entropy_op 要求X和labels的两个输入,除了最后一维以外,其他的维度完全一致 +1. 判断的实现方法可以参考[cross_entropy_op](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/cross_entropy_op.cc#L39),cross_entropy_op 要求X和labels的两个输入,除了最后一维以外,其他的维度完全一致 ```cpp bool contain_unknown_dim = framework::contain_unknown_dim(x_dims) || @@ -300,31 +369,35 @@ y_dim[i] = x_dim[i] + z_dim[i] } ``` -2. 运算的实现可以参考concat_op.cc,concat在InferShape判断时,除了进行concat轴之外,其他的维度完全一致;在生成output的维度时,把concat轴的维度求和,其他的维度和输入保持一致。 +2. 运算的实现可以参考[concat_op](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/concat_op.cc#L59),concat在InferShape判断时,调用`ComputeAndCheckShape`,除了进行concat轴之外,其他的维度完全一致;在生成output的维度时,把concat轴的维度求和,其他的维度和输入保持一致。 ```cpp - auto out_dims = ins[0]; + const size_t n = inputs_dims.size(); + auto out_dims = inputs_dims[0]; size_t in_zero_dims_size = out_dims.size(); for (size_t i = 1; i < n; i++) { for (size_t j = 0; j < in_zero_dims_size; j++) { if (j == axis) { - if (ctx->IsRuntime()) { - out_dims[axis] += ins[i][j]; + if (is_runtime) { + out_dims[axis] += inputs_dims[i][j]; } else { - if (ins[i][j] == -1) { + if (inputs_dims[i][j] == -1) { out_dims[axis] = -1; } else { - out_dims[axis] += ins[i][j]; + out_dims[axis] += inputs_dims[i][j]; } } } else { bool check_shape = - ctx->IsRuntime() || (out_dims[j] > 0 && ins[i][j] > 0); + is_runtime || (out_dims[j] > 0 && inputs_dims[i][j] > 0); if (check_shape) { // check all shape in run time - PADDLE_ENFORCE_EQ(out_dims[j], ins[i][j], - "Input tensors should have the same " - "elements except the specify axis."); + PADDLE_ENFORCE_EQ( + inputs_dims[0][j], inputs_dims[i][j], + "ShapeError: Dimension %d in inputs' shapes must be equal. " + "But recevied input[0]'s shape = " + "[%s], input[%d]'s shape = [%s].", + j, inputs_dims[0], i, inputs_dims[i]); } } } @@ -332,7 +405,6 @@ y_dim[i] = x_dim[i] + z_dim[i] ``` - ### 定义OpKernel类 `MulKernel`继承自`framework::OpKernel`,带有下面两个模板参数: @@ -405,9 +477,12 @@ class MulKernel : public framework::OpKernel { ```cpp namespace ops = paddle::operators; - REGISTER_OPERATOR(mul, ops::MulOp, ops::MulOpMaker, - ops::MulOpGradMaker) - REGISTER_OPERATOR(mul_grad, ops::MulGradOp) + REGISTER_OPERATOR(mul, ops::MulOp, ops::MulOpMaker, ops::MulOpInferVarType, + ops::MulOpGradMaker, + ops::MulOpGradMaker); + + REGISTER_OPERATOR(mul_grad, ops::MulGradOp); + REGISTER_OP_CPU_KERNEL(mul, ops::MulKernel, ops::MulKernel); @@ -416,11 +491,7 @@ class MulKernel : public framework::OpKernel { ops::MulGradKernel); ``` - 在上面的代码中: - - - `REGISTER_OPERATOR` : 注册`ops::MulOp`类,类型名为`mul`,该类的`ProtoMaker`为`ops::MulOpMaker`,注册`ops::MulOpGrad`,类型名为`mul_grad`。 - - - `REGISTER_OP_CPU_KERNEL` :注册`ops::MulKernel`类,并特化模板参数为`paddle::platform::CPUPlace`和`float`类型,同理,注册`ops::MulGradKernel`类。 + 在上面的代码中,使用`REGISTER_OPERATOR`注册了`ops::MulOp`类,类型名为`mul`,该类的`ProtoMaker`为`ops::MulOpMaker`,其`GradOpMaker`分别是`ops::MulOpGradMaker`(声明式编程模式使用)和`ops::MulOpGradMaker`(命令式编程模式使用),并使用`REGISTER_OPERATOR`注册`ops::MulGradOp`,类型名为`mul_grad`。然后,使用`REGISTER_OP_CPU_KERNEL`注册了`ops::MulKernel`类,并特化模板参数为设备为`paddle::platform::CPUPlace`、数据类型为`float`类型和`double`类型;同理,注册`ops::MulGradKernel`类。 - 在 `.cu`文件中注册CUDA Kernel。 @@ -442,27 +513,8 @@ class MulKernel : public framework::OpKernel { **注意:** -在运行Op时,框架系统会根据输入数据所在的设备、输入数据的类型等信息自动的选择合适的OpKernel,比如输入的数据是在GPU上,并且为`float`类型,框架系统会选择由`REGISTER_OP_CUDA_KERNEL`注册的`ops::MulKernel`。如果用户希望指定运行时可被调用的OpKernel,用户需要覆盖`framework::OperatorWithKernel`中的`GetExpectedKernelType`函数,比如`ConvOp`会根据属性`use_cudnn`为`false`还是为`true`决定是否调用cudnn库中提供的conv操作。 +在运行Op时,框架系统会根据输入数据所在的设备、输入数据的类型等信息自动的选择合适的OpKernel,比如输入的数据是在GPU上,并且为`float`类型,框架系统会选择由`REGISTER_OP_CUDA_KERNEL`注册的`ops::MulKernel`。如果用户希望指定运行时可被调用的OpKernel,用户需要覆盖`framework::OperatorWithKernel`中的`GetExpectedKernelType`函数,比如`MulOp`会根据属性`use_mkldnn`为`false`还是为`true`决定是否调用mkldnn库来完成计算。 -``` -framework::OpKernelType ConvOp::GetExpectedKernelType( - const framework::ExecutionContext& ctx) const { - int customized_type_value = - framework::OpKernelType::kDefaultCustomizedTypeValue; - framework::LibraryType library{framework::LibraryType::kPlain}; - auto input_data_type = ctx.Input("Input")->type(); - std::string data_format = ctx.Attr("data_format"); - framework::DataLayout layout = framework::StringToDataLayout(data_format); -#ifdef PADDLE_WITH_CUDA - if (ctx.Attr("use_cudnn")) { - library = framework::LibraryType::kCUDNN; - } -#endif - auto type = framework::OpKernelType(input_data_type, ctx.GetPlace(), layout, - library, customized_type_value); - return type; -} -``` ### 编译 diff --git a/doc/fluid/advanced_guide/addon_development/new_op/op_notes.md b/doc/fluid/advanced_guide/addon_development/new_op/op_notes.md index 69a42f0096a936b9ff718f8b73dbb5c18f0cd9dd..ddae81c39873944f11f3ee227ebd51e785943df6 100644 --- a/doc/fluid/advanced_guide/addon_development/new_op/op_notes.md +++ b/doc/fluid/advanced_guide/addon_development/new_op/op_notes.md @@ -157,13 +157,31 @@ ShareDataWith的功能是使两个Tensor共享底层buffer,在调用这个操 目前稀疏梯度在做更新的时候会先对梯度做merge,即对相同参数的梯度做累加,然后做参数以及附加参数(如velocity)的更新。 ### 8.显存优化 + +#### 8.1 为可原位计算的Op注册Inplace +有些Op的计算逻辑中,输出可以复用输入的显存空间,也可称为原位计算。例如[`reshape_op`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/reshape_op.cc)中,输出`Out`可以复用输入`X`的显存空间,因为该Op的计算逻辑不会改变`X`的实际数据,只是修改它的shape,输出和输入复用同一块显存空间不影响结果。对于这类OP,可以注册`Inlace`,从而让框架在运行时自动地进行显存优化。 + +fluid提供了`DECLARE_INPLACE_OP_INFERER`宏用于注册`Inplace`,该宏第一个参数是一个类名,如`ReshapeOpInplaceInToOut`;第二个参数是一对复用的输入输出,以`{"X", "Out"}`的形式给出。在`REGISTER_OPERATOR`时, +可以将类名传传入,从而为该Op注册`Inplace`。 + +``` +DECLARE_INPLACE_OP_INFERER(ReshapeOpInplaceInToOut, {"X", "Out"}); + +REGISTER_OPERATOR( + reshape, ops::ReshapeOp, ops::ReshapeOpMaker, + paddle::framework::DefaultGradOpMaker, + paddle::framework::DefaultGradOpMaker, + ops::ReshapeOpInplaceInToOut); +``` + +#### 8.2 减少OP中的无关变量 通常反向Op会依赖于前向Op的某些输入(Input)、输出(Output),以供反向Op计算使用。但有些情况下,反向Op不需要前向Op的所有输入和输出;有些情况下,反向Op只需要前向Op的部分输入和输出;有些情况下,反向Op只需要使用前向Op中输入和输出变量的Shape和LoD信息。若Op开发者在注册反向Op时,将不必要的前向Op输入和输出作为反向Op的输入,会导致这部分显存无法被框架现有的显存优化策略优化,从而导致模型显存占用过高。 所以在写注册反向Op时需要注意以下几点: -- Fluid提供的`DefaultGradOpDescMaker`,默认会将前向op的所有输入(`Input`)、输出(`Output`)以及输出变量所对应的梯度(`Output@Grad`)作为反向Op的输入,将前向Op输入所对应的梯度(`Input@Grad`)作为反向Op的输出。所以在使用`DefaultGradOpDescMaker`时需要考虑是否有些变量在计算中不被用到。 -- 如果`DefaultGradOpDescMaker`不能够满足需求,需要用户自己手动构建`GradOpDescMaker`,具体实现请参考[相关文档](new_op.html#permalink-4--gradprotomaker-); -- 如果有些反向Op需要依赖前向Op的输入或输出变量的的Shape或LoD,但不依赖于变量中Tensor的Buffer,且不能根据其他变量推断出该Shape和LoD,需要对该变量(以下称该变量为`X`)在反向Op中进行注册`NoNeedBufferVarsInference`。**一旦注册了`NoNeedBufferVarsIference`,反向op中就不能读写该变量对应的Tensor中的buffer,只能调用Tensor的dims()和lod()方法,同时,反向Op中的`GetExpectedKernelType()`必须要重写,并且`GetExpectedKernelType()`中不能访问`X`变量中Tensor的type()方法**。比如在`SliceOpGrad`中只会用到`Input`中变量的Shape信息,所以需要为对`Input`在`SliceOpGrad`上进行注册: +- Fluid提供的`DefaultGradOpMaker`,默认会将前向op的所有输入(`Input`)、输出(`Output`)以及输出变量所对应的梯度(`Output@Grad`)作为反向Op的输入,将前向Op输入所对应的梯度(`Input@Grad`)作为反向Op的输出。所以在使用`DefaultGradOpMaker`时需要考虑是否有些变量在计算中不被用到。 +- 如果`DefaultGradOpMaker`不能够满足需求,需要用户自己手动构建`GradOpMaker`,具体实现请参考[相关文档](new_op.html#gradopmaker); +- 如果有些反向Op需要依赖前向Op的输入或输出变量的的Shape或LoD,但不依赖于变量中Tensor的Buffer,且不能根据其他变量推断出该Shape和LoD,则可以通过`DECLARE_NO_NEED_BUFFER_VARS_INFERER`接口对该变量(以下称该变量为`X`)在反向Op中进行注册`NoNeedBufferVars`。**一旦注册了`NoNeedBufferVars`,反向op中就不能读写该变量对应的Tensor中的buffer,只能调用Tensor的dims()和lod()方法,同时,反向Op中的`GetExpectedKernelType()`必须要重写,并且`GetExpectedKernelType()`中不能访问`X`变量中Tensor的type()方法**。比如在`SliceOpGrad`中只会用到`Input`中变量的Shape信息,所以需要为对`Input`在`SliceOpGrad`上进行注册: ``` namespace paddle { namespace operators { @@ -185,30 +203,44 @@ class SliceOpGrad : public framework::OperatorWithKernel { }; -class SliceOpGradMaker : public framework::SingleGradOpDescMaker { +template +class SliceOpGradMaker : public framework::SingleGradOpMaker { public: - using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + using framework::SingleGradOpMaker::SingleGradOpMaker; protected: - std::unique_ptr Apply() const override { - auto* bind = new framework::OpDesc(); - bind->SetInput("Input", Input("Input")); - bind->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); - bind->SetOutput(framework::GradVarName("Input"), InputGrad("Input")); - bind->SetAttrMap(Attrs()); + void Apply(GradOpPtr bind) const override { + bind->SetInput("Input", this->Input("Input")); + if (this->HasInput("StartsTensor")) { + bind->SetInput("StartsTensor", this->Input("StartsTensor")); + } + if (this->HasInput("EndsTensor")) { + bind->SetInput("EndsTensor", this->Input("EndsTensor")); + } + if (this->HasInput("StartsTensorList")) { + bind->SetInput("StartsTensorList", this->Input("StartsTensorList")); + } + if (this->HasInput("EndsTensorList")) { + bind->SetInput("EndsTensorList", this->Input("EndsTensorList")); + } + bind->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); + bind->SetOutput(framework::GradVarName("Input"), this->InputGrad("Input")); + bind->SetAttrMap(this->Attrs()); bind->SetType("slice_grad"); - return std::unique_ptr(bind); } }; -DECLARE_NO_NEED_BUFFER_VARS_INFERENCE(SliceOpGradNoNeedBufferVarsInference, - "Input"); +DECLARE_NO_NEED_BUFFER_VARS_INFERER(SliceOpGradNoNeedBufferVarsInference, + "Input"); } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OPERATOR(slice, ops::SliceOp, ops::SliceOpMaker, - ops::SliceOpGradMaker); + ops::SliceOpGradMaker, + ops::SliceOpGradMaker); REGISTER_OPERATOR(slice_grad, ops::SliceOpGrad, + ops::SliceDoubleOpGradMaker, + ops::SliceDoubleOpGradMaker, ops::SliceOpGradNoNeedBufferVarsInference); ``` diff --git a/doc/fluid/advanced_guide/data_preparing/feeding_data.rst b/doc/fluid/advanced_guide/data_preparing/feeding_data.rst index 6fcf529f7174af29623b567bec2c248da174db01..c0749a418e1e380e748d0c1e4cfc0993a0b73d04 100644 --- a/doc/fluid/advanced_guide/data_preparing/feeding_data.rst +++ b/doc/fluid/advanced_guide/data_preparing/feeding_data.rst @@ -4,7 +4,7 @@ 同步数据读取 ############## -PaddlePaddle Fluid支持使用 :code:`fluid.layers.data()` 配置数据层; +PaddlePaddle Fluid支持使用 :code:`fluid.data()` 配置数据层; 再使用 Numpy Array 或者直接使用Python创建C++的 :code:`fluid.LoDTensor` , 通过 :code:`Executor.run(feed=...)` 传给 :code:`fluid.Executor` 或 :code:`fluid.ParallelExecutor` 。 @@ -12,29 +12,25 @@ PaddlePaddle Fluid支持使用 :code:`fluid.layers.data()` 配置数据层; 数据层配置 ########## -通过 :code:`fluid.layers.data()` 可以配置神经网络中需要的数据层。具体方法为: +通过 :code:`fluid.data()` 可以配置神经网络中需要的数据层。具体方法为: .. code-block:: python import paddle.fluid as fluid - image = fluid.layers.data(name="image", shape=[3, 224, 224]) - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + image = fluid.data(name="image", shape=[None, 3, 224, 224]) + label = fluid.data(name="label", shape=[None, 1], dtype="int64") # use image/label as layer input prediction = fluid.layers.fc(input=image, size=1000, act="softmax") loss = fluid.layers.cross_entropy(input=prediction, label=label) ... -上段代码中,:code:`image` 和 :code:`label` 是通过 :code:`fluid.layers.data` -创建的两个输入数据层。其中 :code:`image` 是 :code:`[3, 224, 224]` 维度的浮点数据; -:code:`label` 是 :code:`[1]` 维度的整数数据。这里需要注意的是: +上段代码中,:code:`image` 和 :code:`label` 是通过 :code:`fluid.data` +创建的两个输入数据层。其中 :code:`image` 是 :code:`[None, 3, 224, 224]` 维度的浮点数据; +:code:`label` 是 :code:`[None, 1]` 维度的整数数据。这里需要注意的是: -1. Fluid中默认使用 :code:`-1` 表示 batch size 维度,默认情况下会在 :code:`shape` - 的第一个维度添加 :code:`-1` 。 所以 上段代码中, 我们可以接受将一个 - :code:`[32, 3, 224, 224]` 的numpy array传给 :code:`image` 。 如果想自定义batch size - 维度的位置的话,请设置 :code:`fluid.layers.data(append_batch_size=False)` 。 - 请参考进阶使用中的 :ref:`user_guide_customize_batch_size_rank` 。 +1. Executor在执行的时候,会检查定义的数据层数据和feed的数据的 :code:`shape` 和 :code:`dtype` 是否一致,如果不一致,程序会报错退出。对于一些任务,在不同的轮数,数据的某些维度会变化,可以将维度的值设置为None,例如第0维会变化,可以将 :code:`shape` 设置为 :code:`[None, 3, 224, 224]` 。 2. Fluid中用来做类别标签的数据类型是 :code:`int64`,并且标签从0开始。可用数据类型请参考 :ref:`user_guide_paddle_support_data_types`。 @@ -69,17 +65,17 @@ PaddlePaddle Fluid支持使用 :code:`fluid.layers.data()` 配置数据层; 序列数据是PaddlePaddle Fluid支持的特殊数据类型,可以使用 :code:`LoDTensor` 作为 输入数据类型。它需要用户: 1. 传入一个mini-batch需要被训练的所有数据; 2.每个序列的长度信息。 -用户可以使用 :code:`fluid.create_lod_tensor` 来创建 :code:`LoDTensor`。 +用户可以使用 :code:`fluid.create_lod_tensor` 来创建 :code:`LoDTensor` 。 -传入序列信息的时候,需要设置序列嵌套深度,:code:`lod_level`。 -例如训练数据是词汇组成的句子,:code:`lod_level=1`;训练数据是 词汇先组成了句子, -句子再组成了段落,那么 :code:`lod_level=2`。 +传入序列信息的时候,需要设置序列嵌套深度,:code:`lod_level` 。 +例如训练数据是词汇组成的句子,:code:`lod_level=1` ;训练数据是 词汇先组成了句子, +句子再组成了段落,那么 :code:`lod_level=2` 。 例如: .. code-block:: python - sentence = fluid.layers.data(name="sentence", dtype="int64", shape=[1], lod_level=1) + sentence = fluid.data(name="sentence", dtype="int64", shape=[None, 1], lod_level=1) ... @@ -91,8 +87,8 @@ PaddlePaddle Fluid支持使用 :code:`fluid.layers.data()` 配置数据层; ) }) -训练数据 :code:`sentence` 包含三个样本,他们的长度分别是 :code:`4, 1, 2`。 -他们分别是 :code:`data[0:4]`, :code:`data[4:5]` 和 :code:`data[5:7]`。 +训练数据 :code:`sentence` 包含三个样本,他们的长度分别是 :code:`4, 1, 2` 。 +他们分别是 :code:`data[0:4]`, :code:`data[4:5]` 和 :code:`data[5:7]` 。 如何分别设置ParallelExecutor中每个设备的训练数据 ------------------------------------------------ @@ -123,36 +119,6 @@ PaddlePaddle Fluid支持使用 :code:`fluid.layers.data()` 配置数据层; 上述代码中,GPU0会训练 32 个样本,而 GPU1训练 16 个样本。 -.. _user_guide_customize_batch_size_rank: - -自定义BatchSize维度 -------------------- - -PaddlePaddle Fluid默认batch size是数据的第一维度,以 :code:`-1` 表示。但是在高级 -使用中,batch_size 可以固定,也可以是其他维度或者多个维度来表示。这都需要设置 -:code:`fluid.layers.data(append_batch_size=False)` 来完成。 - -1. 固定batch size维度 - - .. code-block:: python - - image = fluid.layers.data(name="image", shape=[32, 784], append_batch_size=False) - - 这里,:code:`image` 永远是一个 :code:`[32, 784]` 大小的矩阵。 - -2. 使用其他维度表示batch size - - .. code-block:: python - - sentence = fluid.layers.data(name="sentence", - shape=[80, -1, 1], - append_batch_size=False, - dtype="int64") - - 这里 :code:`sentence` 的中间维度是batch size。这种数据排布会用在定长的循环神经 - 网络中。 - - .. _user_guide_paddle_support_data_types: Fluid目前支持的数据类型 diff --git a/doc/fluid/advanced_guide/data_preparing/feeding_data_en.rst b/doc/fluid/advanced_guide/data_preparing/feeding_data_en.rst index 09367520e77af892fd2ac4e8ce2d23541d15bcf3..9afedfa1082232d4a972343a9dbf8df88af8ee8e 100644 --- a/doc/fluid/advanced_guide/data_preparing/feeding_data_en.rst +++ b/doc/fluid/advanced_guide/data_preparing/feeding_data_en.rst @@ -4,7 +4,7 @@ Take Numpy Array as Training Data ################################# -PaddlePaddle Fluid supports configuring data layer with :code:`fluid.layers.data()` . +PaddlePaddle Fluid supports configuring data layer with :code:`fluid.data()` . Then you can use Numpy Array or directly use Python to create C++ :code:`fluid.LoDTensor` , and then feed it to :code:`fluid.Executor` or :code:`fluid.ParallelExecutor` through :code:`Executor.run(feed=...)` . @@ -12,23 +12,23 @@ through :code:`Executor.run(feed=...)` . Configure Data Layer ############################ -With :code:`fluid.layers.data()` , you can configure data layer in neural network. Details are as follows: +With :code:`fluid.data()` , you can configure data layer in neural network. Details are as follows: .. code-block:: python import paddle.fluid as fluid - image = fluid.layers.data(name="image", shape=[3, 224, 224]) - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + image = fluid.data(name="image", shape=[None, 3, 224, 224]) + label = fluid.data(name="label", shape=[None, 1], dtype="int64") # use image/label as layer input prediction = fluid.layers.fc(input=image, size=1000, act="softmax") loss = fluid.layers.cross_entropy(input=prediction, label=label) ... -In the code above, :code:`image` and :code:`label` are two input data layers created by :code:`fluid.layers.data` . :code:`image` is float data of shape :code:`[3, 224, 224]` ; :code:`label` is the int data of shape :code:`[1]` . Note that: +In the code above, :code:`image` and :code:`label` are two input data layers created by :code:`fluid.data` . :code:`image` is float data of shape :code:`[None, 3, 224, 224]` ; :code:`label` is the int data of shape :code:`[None, 1]` . Note that: -1. :code:`-1` is represented for the dimension of batch size by default in Fluid. And :code:`-1` is added to the first dimension of :code:`shape` by default. Therefore in the code above, it would be alright to transfer numpy array of :code:`[32, 3, 224, 224]` to :code:`image` . If you want to customize the position of the batch size dimension, please set :code:`fluid.layers.data(append_batch_size=False)` .Please refer to the tutorial in the advanced user guide: :ref:`user_guide_customize_batch_size_rank_en` . +1. When the program is executing, executor will check whether the :code:`shape` and :code:`dtype` defined and feeded are consistent. If they are not consistent, the program will exit with an error. In some tasks, the dimension will change in different training steps. For this case, the value of the dimension can be set to None. For example, the :code:`shape` can be set to :code:`[None, 3, 224, 224]` when the 0th dimension will change. 2. Data type of category labels in Fluid is :code:`int64` and the label starts from 0. About the supported data types,please refer to :ref:`user_guide_paddle_support_data_types_en` . @@ -76,7 +76,7 @@ For example: .. code-block:: python - sentence = fluid.layers.data(name="sentence", dtype="int64", shape=[1], lod_level=1) + sentence = fluid.data(name="sentence", dtype="int64", shape=[None, 1], lod_level=1) ... @@ -122,32 +122,6 @@ For example: In the code above, GPU0 will train 32 samples and GPU1 will train 16 samples. -.. _user_guide_customize_batch_size_rank_en: - -Customize the BatchSize dimension ------------------------------------- - -Batch size is the first dimension of data by default in PaddlePaddle Fluid, indicated by :code:`-1` .But in advanced usage, batch_size could be fixed or respresented by other dimension or multiple dimensions, which could be implemented by setting :code:`fluid.layers.data(append_batch_size=False)` . - -1. fixed BatchSize dimension - - .. code-block:: python - - image = fluid.layers.data(name="image", shape=[32, 784], append_batch_size=False) - - Here :code:`image` is always a matrix with size of :code:`[32, 784]` . - -2. batch size expressed by other dimension - - .. code-block:: python - - sentence = fluid.layers.data(name="sentence", - shape=[80, -1, 1], - append_batch_size=False, - dtype="int64") - - Here the middle dimension of :code:`sentence` is batch size. This type of data layout is applied in fixed-length recurrent neural networks. - .. _user_guide_paddle_support_data_types_en: Data types supported by Fluid diff --git a/doc/fluid/advanced_guide/data_preparing/reader.md b/doc/fluid/advanced_guide/data_preparing/reader.md index bfba87966ea57c4c0d5a166077185b55727b024d..8647dd45bef5be20b41ded78e70e850bc98c2c7d 100644 --- a/doc/fluid/advanced_guide/data_preparing/reader.md +++ b/doc/fluid/advanced_guide/data_preparing/reader.md @@ -193,14 +193,3 @@ def image_reader_creator(image_path, label_path, n): reader = image_reader_creator("/path/to/image_file", "/path/to/label_file", 1024) paddle.train(paddle.batch(reader, 128), {"image":0, "label":1}, ...) ``` - -### How is `paddle.train` implemented - -An example implementation of paddle.train is: - -```python -def train(batch_reader, mapping, batch_size, total_pass): - for pass_idx in range(total_pass): - for mini_batch in batch_reader(): # this loop will never end in online learning. - do_forward_backward(mini_batch, mapping) -``` diff --git a/doc/fluid/advanced_guide/distributed_training/cluster_quick_start.rst b/doc/fluid/advanced_guide/distributed_training/cluster_quick_start.rst index 5509d34403aedd8fd92dd3978fed10b723073d0a..1988aee0ae578f584b723bdf38010945b264320d 100644 --- a/doc/fluid/advanced_guide/distributed_training/cluster_quick_start.rst +++ b/doc/fluid/advanced_guide/distributed_training/cluster_quick_start.rst @@ -14,7 +14,7 @@ * - [x] 成功安装Paddle Fluid,如果尚未安装,请参考 `快速开始 `_ + [x] 成功安装Paddle Fluid,如果尚未安装,请参考 `快速开始 `_ * [x] 学会最基本的单机训练方法,请参考 `单机训练 `_ 中描述的单卡训练,进行学习 @@ -113,7 +113,7 @@ main_function(args.is_local) -* 说明:示例中使用的IO方法是dataset,想了解具体的文档和用法请参考 `Dataset API `_ 。示例中使用的 ``train_from_dataset`` 接口,想了解具体的文档和使用方法请参考 `Executor API `_ 。示例中的 ``from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet`` 表示引入参数服务器架构进行分布式训练,如果想更进一步了解Fleet API的更多选项和示例,请参考 `Fleet API `_ +* 说明:示例中使用的IO方法是dataset,想了解具体的文档和用法请参考 `Dataset API `_ 。示例中使用的 ``train_from_dataset`` 接口,想了解具体的文档和使用方法请参考 `Executor API `_ 。示例中的 ``from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet`` 表示引入参数服务器架构进行分布式训练,如果想更进一步了解Fleet API的更多选项和示例,请参考 `Fleet API `_ 单机训练启动命令 diff --git a/doc/fluid/advanced_guide/distributed_training/cluster_quick_start_en.rst b/doc/fluid/advanced_guide/distributed_training/cluster_quick_start_en.rst index ad9868e38bf4c3f4b953749db45064436c972661..ff8ea39c02200f8397c9d3bd9454fd6d01214f51 100644 --- a/doc/fluid/advanced_guide/distributed_training/cluster_quick_start_en.rst +++ b/doc/fluid/advanced_guide/distributed_training/cluster_quick_start_en.rst @@ -1,193 +1,159 @@ -.. _cluster_quick_start_en: +Quick start for distributed training +==================================== -Quick Start with Distributed Training -========================== +Distributed training with Fleet API +----------------------------------- -Preparation --------------------- -In this article, we'll show you how to quickly start a PaddlePaddle distributed training task in a cluster. Before you start, do some preparatory work as follows: - -1. Prepare a connected training cluster. Here we use 4 training nodes with format ``*.paddlepaddle.com`` to represent the host name of the node. You can modify it according to the actual situation. - -2. Make sure you have read :ref:`install_steps` before you start and can run PaddlePaddle on all nodes of the cluster. +Since Paddle Fluid `Release +1.5.1 `__, +it is officially recommended to use the Fleet API for distributed +training. For the introduction of the Fleet API, please refer to `Fleet +Design Doc `__. -Example code -------------- - -Let's use a very simple linear regression model as an example to explain how to start a distributed training task with 2 pserver server nodes and 2 trainer nodes. You can save this code as ``dist_train.py`` . +Preparation +~~~~~~~~~~~ + +- [x] Install Paddle Fluid. If not already installed, please refer to + `Beginner’s + Guide `__. +- [x] Master the most basic single node training method. Please refer + to the single card training described in `Single-node + training `__. + +Click-through rate prediction +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Here, we will use a simple example, click-through rate prediction task, +to illustrate how to configure Fleet API for distributed training, and +gives an example by using a single node environment to simulate the +distributed environment. The source code of the example comes from `CTR +with +Fleet `__. + +In order to facilitate learning, the example given here is a mixed code +of single node and multi node. You can start single node or multi node +tasks through different startup commands. For the part of obtaining data +and the logic of data preprocessing, please refer to the source code and +description of `CTR with +Fleet `__. .. code:: python - + from __future__ import print_function + from args import parse_args import os - import paddle import paddle.fluid as fluid - - # train reader - BATCH_SIZE = 20 - EPOCH_NUM = 30 - BATCH_SIZE = 8 - - train_reader = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.uci_housing.train(), buf_size=500), - batch_size=BATCH_SIZE) - - def train(): - y = fluid.layers.data(name='y', shape=[1], dtype='float32') - x = fluid.layers.data(name='x', shape=[13], dtype='float32') - y_predict = fluid.layers.fc(input=x, size=1, act=None) - - loss = fluid.layers.square_error_cost(input=y_predict, label=y) - avg_loss = fluid.layers.mean(loss) - opt = fluid.optimizer.SGD(learning_rate=0.001) - opt.minimize(avg_loss) - - place = fluid.CPUPlace() - feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) - exe = fluid.Executor(place) - - # fetch distributed training environment setting - training_role = os.getenv("PADDLE_TRAINING_ROLE", None) - port = os.getenv("PADDLE_PSERVER_PORT", "6174") - pserver_ips = os.getenv("PADDLE_PSERVER_IPS", "") - trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0")) - eplist = [] - for ip in pserver_ips.split(","): - eplist.append(':'.join([ip, port])) - pserver_endpoints = ",".join(eplist) - trainers = int(os.getenv("PADDLE_TRAINERS")) - current_endpoint = os.getenv("PADDLE_CURRENT_IP", "") + ":" + port - - t = fluid.DistributeTranspiler() - t.transpile( - trainer_id = trainer_id, - pservers = pserver_endpoints, - trainers = trainers) - - if training_role == "PSERVER": - pserver_prog = t.get_pserver_program(current_endpoint) - startup_prog = t.get_startup_program(current_endpoint, pserver_prog) - exe.run(startup_prog) - exe.run(pserver_prog) - elif training_role == "TRAINER": - trainer_prog = t.get_trainer_program() + import sys + from network_conf import ctr_dnn_model_dataset + import paddle.fluid.incubate.fleet.base.role_maker as role_maker + + from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet + from paddle.fluid.transpiler.distribute_transpiler import DistributeTranspilerConfig + + dense_feature_dim = 13 + sparse_feature_dim = 10000001 + batch_size = 100 + thread_num = 10 + embedding_size = 10 + args = parse_args() + + def main_function(is_local): + # common code for local training and distributed training + dense_input = fluid.layers.data( + name="dense_input", shape=[dense_feature_dim], dtype='float32') + + sparse_input_ids = [ + fluid.layers.data(name="C" + str(i), shape=[1], lod_level=1, + dtype="int64") for i in range(1, 27)] + + label = fluid.layers.data(name="label", shape=[1], dtype="int64") + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_use_var([dense_input] + sparse_input_ids + [label]) + pipe_command = "python criteo_reader.py %d" % sparse_feature_dim + dataset.set_pipe_command(pipe_command) + dataset.set_batch_size(batch_size) + dataset.set_thread(thread_num) + + whole_filelist = ["raw_data/part-%d" % x + for x in range(len(os.listdir("raw_data")))] + + dataset.set_filelist(whole_filelist) + loss, auc_var, batch_auc_var = ctr_dnn_model_dataset( + dense_input, sparse_input_ids, label, embedding_size, + sparse_feature_dim) + + exe = fluid.Executor(fluid.CPUPlace()) + def train_loop(epoch=20): + for i in range(epoch): + exe.train_from_dataset(program=fluid.default_main_program(), + dataset=dataset, + fetch_list=[auc_var], + fetch_info=["auc"], + debug=False) + # local training + def local_train(): + optimizer = fluid.optimizer.SGD(learning_rate=1e-4) + optimizer.minimize(loss) exe.run(fluid.default_startup_program()) - - for epoch in range(EPOCH_NUM): - for batch_id, batch_data in enumerate(train_reader()): - avg_loss_value, = exe.run(trainer_prog, - feed=feeder.feed(batch_data), - fetch_list=[avg_loss]) - if (batch_id + 1) % 10 == 0: - print("Epoch: {0}, Batch: {1}, loss: {2}".format( - epoch, batch_id, avg_loss_value[0])) - # destory the resource of current trainer node in pserver server node - exe.close() + train_loop() + + # distributed training + def dist_train(): + role = role_maker.PaddleCloudRoleMaker() + fleet.init(role) + strategy = DistributeTranspilerConfig() + strategy.sync_mode = False + optimizer = fluid.optimizer.SGD(learning_rate=1e-4) + optimizer = fleet.distributed_optimizer(optimizer, strategy) + optimizer.minimize(loss) + + if fleet.is_server(): + fleet.init_server() + fleet.run_server() + elif fleet.is_worker(): + fleet.init_worker() + exe.run(fluid.default_startup_program()) + train_loop() + if is_local: + local_train() else: - raise AssertionError("PADDLE_TRAINING_ROLE should be one of [TRAINER, PSERVER]") - - train() - - -Environment Variables ------------------------------------- - -When starting a distributed training task, different environment variables are used to represent different node roles, details as follows: - -.. list-table:: - :header-rows: 1 - - * - Environment Variable - - Data Type - - Example - - Description - * - :code:`PADDLE_TRAINING_ROLE` - - str - - :code:`PSERVER,TRANERR` - - role of current training node - * - :code:`PADDLE_PSERVER_IPS` - - str - - :code:`ps0.paddlepaddle.com, ps1.paddlepaddle.com` - - The IP addresses or hostnames of all pserver nodes in the distributed training task, separated by "," - * - :code:`PADDLE_PSERVER_PORT` - - int - - 6174 - - port that the pserver process listens to - * - :code:`PADDLE_TRAINERS` - - int - - 2 - - Number of trainer nodes in a distributed training task - * - :code:`PADDLE_CURRENT_IP` - - str - - :code:`ps0.paddlepaddle.com` - - IP address or hostname of the current pserver node - * - :code:`PADDLE_TRAINER_ID` - - str - - 0 - - ID of the current trainer node (unique), in the range of [0, PADDLE_TRAINERS) - -**Note:** Environment variables are just a way to get runtime information. In practical tasks, you can use command line parameters to obtain runtime information. - -API related to Distributed Training ---------------------------------- - -DistributeTranspiler -~~~~~~~~~~~~~~~~~~~~~~ - -The machines in distributed training tasks based on the pserver-trainer architecture are divided into two roles: Parameter Server (pserver) and trainer. In Fluid, users only need to configure the network configuration required for single node training. The ``DistributeTranspiler`` module automatically modifies the single-node network settings into settings on which pserver and trainer needs to run based on the role of current training node: + dist_train() -.. code:: python + if __name__ == '__main__': + main_function(args.is_local) - t = fluid.DistributeTranspiler() - t.transpile( - trainer_id = trainer_id, - pservers = pserver_endpoints, - trainers = trainers) - if PADDLE_TRAINING_ROLE == "TRAINER": - # fetch the trainer program and execute it - trainer_prog = t.get_trainer_program() - ... +- Note: The IO method used in this example is dataset, please refer to + `Dataset + API `__ + for specific documents and usage. For the ``train_from_dataset`` + interface, please refer to `Executor + API `__. + ``from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet`` + in this example means to introduce parameter server architecture for + distributed training, which you can refer to `Fleet + API `__ + for getting more about the options and examples of Fleet API. - elif PADDLE_TRAINER_ROLE == "PSERVER": - # fetch the pserver program and execute it - pserver_prog = t.get_pserver_program(current_endpoint) - ... +Start command of single node training +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. code:: bash -Exe.close() -~~~~~~~~~~~~~~ + python train.py --is_local 1 +Start command of single machine simulation distributed training +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The status information of all trainer nodes is saved in the pserver node. When trainer finishes training, ``exe.close()`` should be called to notify all PServer nodes to release the resources of the current Trainer nodes: +Here we use launch\_ps, a built-in launcher of paddle, which users can +specify the number of workers and servers to start the parameter server +tasks. -.. code:: python +.. code:: bash + + python -m paddle.distributed.launch_ps --worker_num 2 --server_num 2 train.py - exe = fluid.Executor(fluid.CPUPlace()) - # training process ... - exe.close() # notify PServer to destory the resource - -Note: every trainer needs to call exe.close() when the trainer finishes. - -Start a Distributed Training Task ----------------------------------- - -.. list-table:: - :header-rows: 1 - - - * - Start Node - - Start Command - - Description - * - ps0.paddlepaddle.com - - :code:`PADDLE_TRAINING_ROLE=PSERVER PADDLE_CURRENT_IP=ps0.paddlepaddle.com PADDLE_PSERVER_IPS=ps0.paddlepaddle.com, ps1.paddlepaddle.com PADDLE_TRAINERS=2 PADDLE_PSERVER_PORT=6174 python fluid_dist.py` - - Start pserver node - * - ps1.paddlepaddle.com - - :code:`PADDLE_TRAINING_ROLE=PSERVER PADDLE_CURRENT_IP=ps1.paddlepaddle.com PADDLE_PSERVER_IPS=ps0.paddlepaddle.com, ps1.paddlepaddle.com PADDLE_TRAINERS=2 PADDLE_PSERVER_PORT=6174 python fluid_dist.py` - - Start pserver node - * - trainer0.paddlepaddle.com - - :code:`PADDLE_TRAINING_ROLE=TRAINER PADDLE_PSERVER_IPS=ps0.paddlepaddle.com, ps1.paddlepaddle.com PADDLE_TRAINERS=2 PADDLE_TRAINER_ID=0 PADDLE_PSERVER_PORT=6174 python fluid_dist.py` - - Start the number 0 Trainer Node - * - trainer1.paddlepaddle.com - - :code:`PADDLE_TRAINING_ROLE=TRAINER PADDLE_PSERVER_IPS=ps0.paddlepaddle.com, ps1.paddlepaddle.com PADDLE_TRAINERS=2 PADDLE_TRAINER_ID=1 PADDLE_PSERVER_PORT=6174 python fluid_dist.py` - - Start the number 1 trainer node +The task running log can be viewed in the logs directory of the working +directory. When you can use a single machine to simulate distributed +training, you can perform true multi node distributed training. We +recommend that users refer directly to +`百度云运行分布式任务的示例 `__. diff --git a/doc/fluid/advanced_guide/distributed_training/fleet_api_howto_cn.rst b/doc/fluid/advanced_guide/distributed_training/fleet_api_howto_cn.rst index 15d5508a914563dc20e1a33cac352dee7352213f..21f3ea861452ee7bf1dc12fdd65bd8fdcbc8ea6c 100644 --- a/doc/fluid/advanced_guide/distributed_training/fleet_api_howto_cn.rst +++ b/doc/fluid/advanced_guide/distributed_training/fleet_api_howto_cn.rst @@ -51,8 +51,8 @@ API最常见的两种使用场景,用一个模型做示例,目的是让用 from nets import mlp from utils import gen_data - input_x = fluid.layers.data(name="x", shape=[32], dtype='float32') - input_y = fluid.layers.data(name="y", shape=[1], dtype='int64') + input_x = fluid.data(name="x", shape=[None, 32], dtype='float32') + input_y = fluid.data(name="y", shape=[None, 1], dtype='int64') cost = mlp(input_x, input_y) optimizer = fluid.optimizer.SGD(learning_rate=0.01) @@ -79,8 +79,8 @@ API最常见的两种使用场景,用一个模型做示例,目的是让用 from paddle.fluid.incubate.fleet.base import role_maker from utils import gen_data - input_x = fluid.layers.data(name="x", shape=[32], dtype='float32') - input_y = fluid.layers.data(name="y", shape=[1], dtype='int64') + input_x = fluid.data(name="x", shape=[None, 32], dtype='float32') + input_y = fluid.data(name="y", shape=[None, 1], dtype='int64') cost = mlp(input_x, input_y) optimizer = fluid.optimizer.SGD(learning_rate=0.01) @@ -119,8 +119,8 @@ API最常见的两种使用场景,用一个模型做示例,目的是让用 from paddle.fluid.incubate.fleet.base import role_maker from utils import gen_data - input_x = fluid.layers.data(name="x", shape=[32], dtype='float32') - input_y = fluid.layers.data(name="y", shape=[1], dtype='int64') + input_x = fluid.data(name="x", shape=[None, 32], dtype='float32') + input_y = fluid.data(name="y", shape=[None, 1], dtype='int64') cost = mlp(input_x, input_y) optimizer = fluid.optimizer.SGD(learning_rate=0.01) diff --git a/doc/fluid/advanced_guide/distributed_training/index_cn.rst b/doc/fluid/advanced_guide/distributed_training/index_cn.rst index 79daa2718e7d2a4a9205cebd34a654bda56b6264..1d2ad003bfd174ebd5c903e68acf3b3001fe3dfb 100644 --- a/doc/fluid/advanced_guide/distributed_training/index_cn.rst +++ b/doc/fluid/advanced_guide/distributed_training/index_cn.rst @@ -6,5 +6,4 @@ :maxdepth: 1 cluster_quick_start.rst - cluster_howto.rst fleet_api_howto_cn.rst diff --git a/doc/fluid/advanced_guide/dygraph_to_static/debugging_cn.md b/doc/fluid/advanced_guide/dygraph_to_static/debugging_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..40bc3e002eadc30f03383cf4d262931179001322 --- /dev/null +++ b/doc/fluid/advanced_guide/dygraph_to_static/debugging_cn.md @@ -0,0 +1,204 @@ +# 调试方法 + +本节内容将介绍动态图转静态图(下文简称:动转静)推荐的几种调试方法。 + +> **注解:** +> +> 请确保转换前的动态图代码能够成功运行,建议使用 [paddle.jit.ProgramTranslator().enable(False)](../../api_cn/dygraph_cn/ProgramTranslator_cn.html#enable)关闭动转静功能,直接运行动态图,如下: + +```python +import paddle +import numpy as np +paddle.disable_static() +# 关闭动转静动能 +paddle.jit.ProgramTranslator().enable(False) + +@paddle.jit.to_static +def func(x): + x = paddle.to_tensor(x) + if x > 3: + x = x - 1 + return x + +func(np.ones([3, 2])) +``` + +## 断点调试 +使用动转静功能时,您可以使用断点调试代码。 +例如,在代码中,调用 `pdb.set_trace()`: +```Python +import pdb + +@paddle.jit.to_static +def func(x): + x = paddle.to_tensor(x) + pdb.set_trace() + if x > 3: + x = x - 1 + return x +``` +执行以下代码,将会在转化后的静态图代码中使用调试器: +```Python +func(np.ones([3, 2])) +``` + +运行结果: +```bash +> /tmp/tmpR809hf.py(6)func() +-> def true_fn_0(x): +(Pdb) n +> /tmp/tmpR809hf.py(6)func() +-> def false_fn_0(x): +... +``` + +如果您想在原始的动态图代码中使用调试器,请先调用 [`paddle.jit.ProgramTranslator().enable(False)`](../../api_cn/dygraph_cn/ProgramTranslator_cn.html#enable),如下: +```python +paddle.jit.ProgramTranslator().enable(False) +func(np.ones([3, 2])) +``` +运行结果: +```bash +> (10)func() +-> if x > 3: +... + +``` + +## 打印转换后的代码 +您可以打印转换后的静态图代码,有2种方法: + +1. 使用被装饰后的函数的 `code` 属性 + 如下代码中,装饰器 `paddle.jit.to_static` 会将函数 `func` 转化为一个类对象 `StaticLayer`,可以使用 StaticLayer 的 `code` 属性来获得转化后的代码。 + ```Python + @paddle.jit.to_static + def func(x): + x = paddle.to_tensor(x) + if x > 3: + x = x - 1 + return x + + print(func.code) + ``` + 运行结果: + + ```bash + + def func(x): + x = fluid.layers.assign(x) + + def true_fn_0(x): + x = x - 1 + return x + + def false_fn_0(x): + return x + x = fluid.dygraph.dygraph_to_static.convert_operators.convert_ifelse(x > + 3, true_fn_0, false_fn_0, (x,), (x,), (x,)) + return x + ``` + +2. 使用 `set_code_level(level)` 或环境变量 `TRANSLATOR_CODE_LEVEL=level` + + 通过调用 `set_code_level` 或设置环境变量 `TRANSLATOR_CODE_LEVEL`,可以在日志中查看转换后的代码: + + ```python + @paddle.jit.to_static + def func(x): + x = paddle.to_tensor(x) + if x > 3: + x = x - 1 + return x + + paddle.jit.set_code_level() # 也可设置 os.environ["TRANSLATOR_CODE_LEVEL"] = '100',效果相同 + func(np.ones([1])) + ``` + 运行结果: + + ```bash + 2020-XX-XX 00:00:00,980-INFO: After the level 100 ast transformer: 'All Transformers', the transformed code: + def func(x): + x = fluid.layers.assign(x) + + def true_fn_0(x): + x = x - 1 + return x + + def false_fn_0(x): + return x + x = fluid.dygraph.dygraph_to_static.convert_operators.convert_ifelse(x > + 3, true_fn_0, false_fn_0, (x,), (x,), (x,)) + return x + ``` + `set_code_level` 函数可以设置查看不同的AST Transformer转化后的代码,详情请见 [set_code_level](../../../paddle/api/paddle/fluid/dygraph/jit/set_code_level_cn.html)。 + +## 使用 `print` +`print` 函数可以用来查看变量,该函数在动转静中会被转化。当仅打印 Paddle Tensor 时,实际运行时会被转换为 Paddle 算子 [Print](../../api_cn/layers_cn/Print_cn.html),否则仍然运行 `print`。 +```python +@paddle.jit.to_static +def func(x): + x = paddle.to_tensor(x) + + # 打印x,x是Paddle Tensor,实际运行时会运行Paddle Print(x) + print(x) + + # 打印注释,非Paddle Tensor,实际运行时仍运行print + print("Here call print function.") + + if len(x) > 3: + x = x - 1 + else: + x = paddle.ones(shape=[1]) + return x + +func(np.ones([1])) +``` + +运行结果: +```bash +Variable: assign_0.tmp_0 + - lod: {} + - place: CPUPlace + - shape: [1] + - layout: NCHW + - dtype: double + - data: [1] +Here call print function. +``` + +## 日志打印 +ProgramTranslator在日志中记录了额外的调试信息,以帮助您了解动转静过程中函数是否被成功转换。 +您可以调用 [`paddle.jit.set_verbosity(level)`]((../../../paddle/api/paddle/fluid/dygraph/jit/set_verbosity_cn.html)) 或设置环境变量 `TRANSLATOR_VERBOSITY=level` 来设置日志详细等级,并查看不同等级的日志信息。目前,`level` 可以取值0-3: +- 0: 无日志 +- 1: 包括了动转静转化流程的信息,如转换前的源码、转换的可调用对象 +- 2: 包括以上信息,还包括更详细函数转化日志 +- 3: 包括以上信息,以及更详细的动转静日志 + +> **注意:** +> +> 日志中包括了源代码等信息,请在共享日志前确保它不包含敏感信息。 + +可以在代码运行前调用 `paddle.jit.set_verbosity` 控制日志详细程度: +```python +paddle.jit.set_verbosity(3) +``` +或者设置环境变量 `TRANSLATOR_VERBOSITY`: +```python +import os +os.environ["TRANSLATOR_VERBOSITY"] = '3' +``` + +运行结果: +```bash +2020-XX-XX 00:00:00,123-Level 1: Source code: +@paddle.jit.to_static +def func(x): + x = paddle.to_tensor(x) + if len(x) > 3: + x = x - 1 + else: + x = paddle.ones(shape=[1]) + return x + +2020-XX-XX 00:00:00,152-Level 1: Convert callable object: convert . +``` diff --git a/doc/fluid/advanced_guide/dygraph_to_static/debugging_en.md b/doc/fluid/advanced_guide/dygraph_to_static/debugging_en.md new file mode 100644 index 0000000000000000000000000000000000000000..2ca87c976b0a9fc30dc588d1b4a8f814463f42de --- /dev/null +++ b/doc/fluid/advanced_guide/dygraph_to_static/debugging_en.md @@ -0,0 +1,202 @@ +# Debugging Methods + +This section will introduce several debugging methods recommended by Dynamic Graph to Static Graph (hereafter called Dynamic-to-Staic). + +> **NOTE:** +> +> Please ensure that the dynamic graph code before transformation can run successfully. It is recommended to call [paddle.jit.ProgramTranslator().enable(False)](../../api/dygraph/ProgramTranslator_en.html#enable) to disable Dynamic-to-Static, and run dynamic graph code as follows: + + +```python +import paddle +import numpy as np +paddle.disable_static() + +# Disable Dynamic-to-Static +paddle.jit.ProgramTranslator().enable(False) + +@paddle.jit.to_static +def func(x): + x = paddle.to_tensor(x) + if x > 3: + x = x - 1 + return x + +func(np.ones([3, 2])) +``` + +## Breakpoint Debugging +When using Dynamic-to-Static, you can use breakpoints to debug. + +For example, call `pdb.set_trace()` in your code: +```Python +import pdb + +@paddle.jit.to_static +def func(x): + x = paddle.to_tensor(x) + pdb.set_trace() + if x > 3: + x = x - 1 + return x +``` +Executing the following code will land the debugger in the transformed static graph code: +```Python +func(np.ones([3, 2])) +``` + +```bash +> /tmp/tmpR809hf.py(6)func() +-> def true_fn_0(x): +(Pdb) n +> /tmp/tmpR809hf.py(6)func() +-> def false_fn_0(x): +... +``` + +Calling [`paddle.jit.ProgramTranslator().enable(False)`](../../api/dygraph/ProgramTranslator_en.html#enable) before executing the code will land the debugger in the original dynamic graph code: +```python +paddle.jit.ProgramTranslator().enable(False) +func(np.ones([3, 2])) +``` + +```bash +> (10)func() +-> if x > 3: +... + +``` + +## Print Transformed Code + +There are two ways to print the transformed static graph code: + +1. Use the attribute `code` of the decorated function + + In the following code, the decorator `paddle.jit.to_static` transforms `func` into a class object `StaticLayer`. You can use the `code` attribute of `StaticLayer` to get the transformed code. + ```Python + @paddle.jit.to_static + def func(x): + x = paddle.to_tensor(x) + if x > 3: + x = x - 1 + return x + + print(func.code) + ``` + ```bash + + def func(x): + x = fluid.layers.assign(x) + + def true_fn_0(x): + x = x - 1 + return x + + def false_fn_0(x): + return x + x = fluid.dygraph.dygraph_to_static.convert_operators.convert_ifelse(x > + 3, true_fn_0, false_fn_0, (x,), (x,), (x,)) + return x + ``` +2. Call `set_code_level(level)` or set environment variable `TRANSLATOR_CODE_LEVEL=level` + + You can view the transformed code in the log by calling `set_code_level` or set environment variable `TRANSLATOR_CODE_LEVEL`. + + ```python + @paddle.jit.to_static + def func(x): + x = paddle.to_tensor(x) + if x > 3: + x = x - 1 + return x + + paddle.jit.set_code_level() # the same effect to set os.environ["TRANSLATOR_CODE_LEVEL"] = '100' + func(np.ones([1])) + ``` + + ```bash + 2020-XX-XX 00:00:00,980-INFO: After the level 100 ast transformer: 'All Transformers', the transformed code: + def func(x): + x = fluid.layers.assign(x) + + def true_fn_0(x): + x = x - 1 + return x + + def false_fn_0(x): + return x + x = fluid.dygraph.dygraph_to_static.convert_operators.convert_ifelse(x > + 3, true_fn_0, false_fn_0, (x,), (x,), (x,)) + return x + ``` + `set_code_level` can set different levels to view the code transformed by different ast transformers. For details, please refer to [set_code_level](../../../paddle/api/paddle/fluid/dygraph/jit/set_code_level_en.html)。 + +## `print` +You can call `print` to view variables. `print` will be transformed when using Dynamic-to-Static. When only Paddle Tensor is printed, `print` will be transformed and call Paddle operator [Print](../../api/layers/Print.html) in runtime. Otherwise, call python `print`. + +```python +@paddle.jit.to_static +def func(x): + x = paddle.to_tensor(x) + # x is a Paddle Tensor, so it will run Paddle Print(x) actually. + print(x) + + # The string is not a Paddle Tensor, so it will run print as-is. + print("Here call print function.") + + if len(x) > 3: + x = x - 1 + else: + x = paddle.ones(shape=[1]) + return x + +func(np.ones([1])) +``` + +```bash +Variable: assign_0.tmp_0 + - lod: {} + - place: CPUPlace + - shape: [1] + - layout: NCHW + - dtype: double + - data: [1] +Here call print function. +``` + +## Log Printing +ProgramTranslator can log additional debugging information to help you know whether the function was successfully transformed or not. + +You can call [`paddle.jit.set_verbosity(level)`](../../../paddle/api/paddle/fluid/dygraph/jit/set_verbosity_en.html) or set environment variable `TRANSLATOR_VERBOSITY=level` to enable logging and view logs of different levels. The argument `level` varies from 0 to 3: +- 0: no logging +- 1: includes the information in Dynamic-to-Static tranformation process, such as the source code not transformed, the callable object to transform and so on +- 2: includes above and more detailed function transformation logs +- 3: includes above and extremely verbose logging + +> **WARNING:** +> +> The logs includes information such as source code. Please make sure logs don't contain any sensitive information before sharing them. + +You can call `paddle.jit.set_verbosity` to control the verbosity level of logs: +```python +paddle.jit.set_verbosity(3) +``` +or use the environment variable `TRANSLATOR_VERBOSITY`: +```python +import os +os.environ["TRANSLATOR_VERBOSITY"] = '3' +``` + +```bash +2020-XX-XX 00:00:00,123-Level 1: Source code: +@paddle.jit.to_static +def func(x): + x = paddle.to_tensor(x) + if len(x) > 3: + x = x - 1 + else: + x = paddle.ones(shape=[1]) + return x + +2020-XX-XX 00:00:00,152-Level 1: Convert callable object: convert . diff --git a/doc/fluid/advanced_guide/dygraph_to_static/error_handling_cn.md b/doc/fluid/advanced_guide/dygraph_to_static/error_handling_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..bb92cb80aa7a2485e2203177be2b3a4813602d91 --- /dev/null +++ b/doc/fluid/advanced_guide/dygraph_to_static/error_handling_cn.md @@ -0,0 +1,160 @@ +# 报错信息处理 + +本节内容将介绍使用动态图转静态图(下文简称:动转静)功能发生异常时,[ProgramTranslator](./program_translator_cn.html)对报错信息做的处理,以帮助您更好地理解动转静报错信息。使用动转静功能运行动态图代码时,内部可以分为2个步骤:动态图代码转换成静态图代码,运行静态图代码。接下来将分别介绍这2个步骤中的异常报错情况。 + +## 动转静过程中的异常 +在动态图代码转换成静态图代码的过程中,如果ProgramTranslator无法转换一个函数时,将会显示警告信息,并尝试直接运行该函数。 +如下代码中,函数 `inner_func` 在调用前被转换成静态图代码,当 `x = inner_func(data)` 调用该函数时,不能重复转换,会给出警告信息: + +```python +import paddle +import numpy as np + +paddle.disable_static() + +@paddle.jit.to_static +def func(): + def inner_func(x): + x_tensor = paddle.to_tensor(x) + return x_tensor + data = np.ones([3]).astype("int32") + x = inner_func(data) + return x +func() +``` + +ProgramTranslator打印的警告信息如下: + +```bash +WARNING: doesn't have to be transformed to static function because it has been transformed before, it will be run as-is. +``` + +## 运行转换后的代码报错 + +如果在动转静后的静态图代码中发生异常,ProgramTranslator 会捕获该异常,增强异常报错信息,将静态图代码报错行映射到转换前的动态图代码,并重新抛出该异常。 +重新抛出的异常具有以下特点: + +- 隐藏了部分对用户无用的动转静过程调用栈; +- 转换前的代码会给出提示:"In User Code:"; +- 报错信息中包含了转换前的原始动态图代码; + +例如,运行以下代码,在静态图构建时,即编译期会抛出异常: + +```python +import paddle +import numpy as np + +paddle.disable_static() + +@paddle.jit.to_static +def func(x): + x = paddle.to_tensor(x) + x = paddle.reshape(x, shape=[-1, -1]) + return x + +func(np.ones([3, 2])) +``` + +运行结果: +```bash +Traceback (most recent call last): + in () + func(np.ones([3, 2])) + File "paddle/fluid/dygraph/dygraph_to_static/program_translator.py", line 332, in __call__ + raise new_exception +AssertionError: In user code: + + File "", line 7, in func + x = fluid.layers.reshape(x, shape=[-1, -1]) + File "paddle/fluid/layers/nn.py", line 6193, in reshape + attrs["shape"] = get_attr_shape(shape) + File "paddle/fluid/layers/nn.py", line 6169, in get_attr_shape + "be -1. But received shape[%d] is also -1." % dim_idx) + AssertionError: Only one dimension value of 'shape' in reshape can be -1. But received shape[1] is also -1. +``` + +上述报错信息可以分为3点: + +1. 报错栈中,涉及代码转换过程的信息栈默认会被隐藏,不进行展示,以减少干扰信息。 + +2. ProgramTranslator处理后的报错信息中,会包含提示"In user code:",表示之后的报错栈中,包含动转静前的动态图代码,即用户写的代码: + ```bash + AssertionError: In user code: + + File "", line 7, in func + x = fluid.layers.reshape(x, shape=[-1, -1]) + File "paddle/fluid/layers/nn.py", line 6193, in reshape + attrs["shape"] = get_attr_shape(shape) + File "paddle/fluid/layers/nn.py", line 6169, in get_attr_shape + "be -1. But received shape[%d] is also -1." % dim_idx) + ``` + 其中,`File "", line 7, in func` 是转换前的代码位置信息,`x = fluid.layers.reshape(x, shape=[-1, -1])` 是转换前的代码。 + +3. 新的异常中,包含原始报错中的的报错信息,如下: + ```bash + AssertionError: Only one dimension value of 'shape' in reshape can be -1. But received shape[1] is also -1. + ``` + +运行以下代码,在静态图运行时,即运行期会抛出异常: + +```Python +@paddle.jit.to_static +def func(x): + x = paddle.to_tensor(x) + two = paddle.fill_constant(shape=[1], value=2, dtype="int32") + x = paddle.reshape(x, shape=[1, two]) + return x + +func(np.ones([3]).astype("int32")) +``` + +运行结果: + +```bash +Traceback (most recent call last): + File "", line 10, in () + func(np.ones([3]).astype("int32")) + File "paddle/fluid/dygraph/dygraph_to_static/program_translator.py", line 332, in __call__ + raise new_exception + +EnforceNotMet: In user code: + + File "", line 7, in func + x = paddle.reshape(x, shape=[1, two]) + File "paddle/tensor/manipulation.py", line 1347, in reshape + return paddle.fluid.layers.reshape(x=x, shape=shape, name=name) + File "paddle/fluid/layers/nn.py", line 6209, in reshape + "XShape": x_shape}) + File "paddle/fluid/layer_helper.py", line 43, in append_op + return self.main_program.current_block().append_op(*args, **kwargs) + File "paddle/fluid/framework.py", line 2880, in append_op + attrs=kwargs.get("attrs", None)) + File "paddle/fluid/framework.py", line 1977, in __init__ + for frame in traceback.extract_stack(): + +-------------------------------------- +C++ Traceback (most recent call last): +-------------------------------------- +0 paddle::imperative::Tracer::TraceOp(std::string const&, paddle::imperative::NameVarBaseMap const&, paddle::imperative::NameVarBaseMap const&, paddle::framework::AttributeMap, paddle::platform::Place const&, bool) +1 paddle::imperative::OpBase::Run(paddle::framework::OperatorBase const&, paddle::imperative::NameVarBaseMap const&, paddle::imperative::NameVarBaseMap const&, paddle::framework::AttributeMap const&, paddle::platform::Place const&) +2 paddle::imperative::PreparedOp::Run(paddle::imperative::NameVarBaseMap const&, paddle::imperative::NameVarBaseMap const&, paddle::framework::AttributeMap const&) +3 std::_Function_handler >::operator()(char const*, char const*, int) const::{lambda(paddle::framework::ExecutionContext const&)#1}>::_M_invoke(std::_Any_data const&, paddle::framework::ExecutionContext const&) +4 paddle::operators::RunProgramOpKernel::Compute(paddle::framework::ExecutionContext const&) const +5 paddle::framework::Executor::RunPartialPreparedContext(paddle::framework::ExecutorPrepareContext*, paddle::framework::Scope*, long, long, bool, bool, bool) +6 paddle::framework::OperatorBase::Run(paddle::framework::Scope const&, paddle::platform::Place const&) +7 paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, paddle::platform::Place const&) const +8 paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, paddle::platform::Place const&, paddle::framework::RuntimeContext*) const +9 paddle::operators::ReshapeKernel::operator()(paddle::framework::ExecutionContext const&) const +10 paddle::operators::ReshapeOp::ValidateShape(std::vector >, paddle::framework::DDim const&) +11 paddle::platform::EnforceNotMet::EnforceNotMet(std::string const&, char const*, int) +12 paddle::platform::GetCurrentTraceBackString() + +---------------------- +Error Message Summary: +---------------------- +InvalidArgumentError: The 'shape' in ReshapeOp is invalid. The input tensor X'size must be equal to the capacity of 'shape'. But received X's shape = [3], X's size = 3, 'shape' is [1, 2], the capacity of 'shape' is 2. + [Hint: Expected capacity == in_size, but received capacity:2 != in_size:3.] (at /paddle/paddle/fluid/operators/reshape_op.cc:206) + [operator < reshape2 > error] [operator < run_program > error] +``` + +上述异常中,除了隐藏部分报错栈、报错定位到转换前的动态图代码外,报错信息中包含了C++报错栈 `C++ Traceback` 和 `Error Message Summary`,这是 Paddle 的 C++ 端异常信息,经处理后在 Python 的异常信息中显示。 diff --git a/doc/fluid/advanced_guide/dygraph_to_static/error_handling_en.md b/doc/fluid/advanced_guide/dygraph_to_static/error_handling_en.md new file mode 100644 index 0000000000000000000000000000000000000000..22c9eb8b37a93c68c0e20a83844be14a560bd6a7 --- /dev/null +++ b/doc/fluid/advanced_guide/dygraph_to_static/error_handling_en.md @@ -0,0 +1,160 @@ +# Error Handling + +This section will introduce the error information when an exception occurs, so as to help you better understand the Dynamic-to-Static error information. +When running the transformed static graph code, the internal procedure can be divided into two steps: the dynamic graph code is transformed into the static graph code, and the static graph code is run. We will introduce the error reporting in these two steps. + +## Exceptions in Dynamic-to-Static Transformation + +If ProgramTranslator cannot transform a function, it will display a warning message and try to run the function as-is. + +In the following code, the function `inner_func` is transformed before calling. When calling `inner_func` in `x = inner_func(data)`, it is not allowed to transform repeatedly, and a warning message will be given: + +```python +import paddle +import numpy as np + +paddle.disable_static() + +@paddle.jit.to_static +def func(): + def inner_func(x): + x_tensor = paddle.to_tensor(x) + return x_tensor + data = np.ones([3]).astype("int32") + x = inner_func(data) + return x +func() +``` + +The warning message is as follows: +```bash +WARNING: doesn't have to be transformed to static function because it has been transformed before, it will be run as-is. +``` +## Exceptions in Running Transformed Code + +When an exception occurs in the transformed code by ProgramTranslator, the exception is caught and the error message is augmented. It maps the error line of the static graph code to the un-transformed dynamic graph code, and then re-raises the exception. + +Among the features of the re-raised exception: + +- Some useless call stacks of Dynamic-to-Static are hidden; +- A prompt will be given before the un-transformed code: "In User Code:"; +- The error message includes references to the original dynamic graph code before transformation; + +For example, if executing the following code, an exception is raised when the static graph is built, that is, at compile time: + +```python +import paddle +import numpy as np + +paddle.disable_static() + +@paddle.jit.to_static +def func(x): + x = paddle.to_tensor(x) + x = paddle.reshape(x, shape=[-1, -1]) + return x + +func(np.ones([3, 2])) +``` + +```bash +Traceback (most recent call last): + in () + func(np.ones([3, 2])) + File "paddle/fluid/dygraph/dygraph_to_static/program_translator.py", line 332, in __call__ + raise new_exception +AssertionError: In user code: + + File "", line 7, in func + x = fluid.layers.reshape(x, shape=[-1, -1]) + File "paddle/fluid/layers/nn.py", line 6193, in reshape + attrs["shape"] = get_attr_shape(shape) + File "paddle/fluid/layers/nn.py", line 6169, in get_attr_shape + "be -1. But received shape[%d] is also -1." % dim_idx) + AssertionError: Only one dimension value of 'shape' in reshape can be -1. But received shape[1] is also -1. +``` + +The above error information can be divided into three points: + +1. In the error stack, the call stacks related to the code transformation process are hidden by default and not displayed, so as to avoid confusion. + +2. In the error message processed by ProgramTranslator, a prompt "In user code:" will be included, which means that the following error stacks contains the original dynamic graph code, that is, the code written by the user: + + ```bash + AssertionError: In user code: + + File "", line 7, in func + x = fluid.layers.reshape(x, shape=[-1, -1]) + File "paddle/fluid/layers/nn.py", line 6193, in reshape + attrs["shape"] = get_attr_shape(shape) + File "paddle/fluid/layers/nn.py", line 6169, in get_attr_shape + "be -1. But received shape[%d] is also -1." % dim_idx) + ``` + `File "", line 7, in func` is the location information of un-transformed code, `x = fluid.layers.reshape(x, shape=[-1, -1])` is the un-transformed code. + +3. The new exception contains the message that the exception originally reported, as follows: + ```bash + AssertionError: Only one dimension value of 'shape' in reshape can be -1. But received shape[1] is also -1. + ``` + +If execute the following code, an exception is raised when the static graph is executed at runtime: + +```Python +@paddle.jit.to_static +def func(x): + x = paddle.to_tensor(x) + two = paddle.fill_constant(shape=[1], value=2, dtype="int32") + x = paddle.reshape(x, shape=[1, two]) + return x + +func(np.ones([3]).astype("int32")) +``` + +```bash +Traceback (most recent call last): + File "", line 10, in () + func(np.ones([3]).astype("int32")) + File "paddle/fluid/dygraph/dygraph_to_static/program_translator.py", line 332, in __call__ + raise new_exception + +EnforceNotMet: In user code: + + File "", line 7, in func + x = paddle.reshape(x, shape=[1, two]) + File "paddle/tensor/manipulation.py", line 1347, in reshape + return paddle.fluid.layers.reshape(x=x, shape=shape, name=name) + File "paddle/fluid/layers/nn.py", line 6209, in reshape + "XShape": x_shape}) + File "paddle/fluid/layer_helper.py", line 43, in append_op + return self.main_program.current_block().append_op(*args, **kwargs) + File "paddle/fluid/framework.py", line 2880, in append_op + attrs=kwargs.get("attrs", None)) + File "paddle/fluid/framework.py", line 1977, in __init__ + for frame in traceback.extract_stack(): + +-------------------------------------- +C++ Traceback (most recent call last): +-------------------------------------- +0 paddle::imperative::Tracer::TraceOp(std::string const&, paddle::imperative::NameVarBaseMap const&, paddle::imperative::NameVarBaseMap const&, paddle::framework::AttributeMap, paddle::platform::Place const&, bool) +1 paddle::imperative::OpBase::Run(paddle::framework::OperatorBase const&, paddle::imperative::NameVarBaseMap const&, paddle::imperative::NameVarBaseMap const&, paddle::framework::AttributeMap const&, paddle::platform::Place const&) +2 paddle::imperative::PreparedOp::Run(paddle::imperative::NameVarBaseMap const&, paddle::imperative::NameVarBaseMap const&, paddle::framework::AttributeMap const&) +3 std::_Function_handler >::operator()(char const*, char const*, int) const::{lambda(paddle::framework::ExecutionContext const&)#1}>::_M_invoke(std::_Any_data const&, paddle::framework::ExecutionContext const&) +4 paddle::operators::RunProgramOpKernel::Compute(paddle::framework::ExecutionContext const&) const +5 paddle::framework::Executor::RunPartialPreparedContext(paddle::framework::ExecutorPrepareContext*, paddle::framework::Scope*, long, long, bool, bool, bool) +6 paddle::framework::OperatorBase::Run(paddle::framework::Scope const&, paddle::platform::Place const&) +7 paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, paddle::platform::Place const&) const +8 paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, paddle::platform::Place const&, paddle::framework::RuntimeContext*) const +9 paddle::operators::ReshapeKernel::operator()(paddle::framework::ExecutionContext const&) const +10 paddle::operators::ReshapeOp::ValidateShape(std::vector >, paddle::framework::DDim const&) +11 paddle::platform::EnforceNotMet::EnforceNotMet(std::string const&, char const*, int) +12 paddle::platform::GetCurrentTraceBackString() + +---------------------- +Error Message Summary: +---------------------- +InvalidArgumentError: The 'shape' in ReshapeOp is invalid. The input tensor X'size must be equal to the capacity of 'shape'. But received X's shape = [3], X's size = 3, 'shape' is [1, 2], the capacity of 'shape' is 2. + [Hint: Expected capacity == in_size, but received capacity:2 != in_size:3.] (at /paddle/paddle/fluid/operators/reshape_op.cc:206) + [operator < reshape2 > error] [operator < run_program > error] +``` + +In the above exception, in addition to hiding part of the error stack and locating the error to the un-transformed dynamic graph code, the error information includes the c++ error stack `C++ Traceback` and `Error Message Summary`, which are the exception from C++ and are displayed in Python exception after processing. diff --git a/doc/fluid/advanced_guide/dygraph_to_static/grammar_list_cn.rst b/doc/fluid/advanced_guide/dygraph_to_static/grammar_list_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e1a0867bb021b560a5380dfb997b318e19ab0e8f --- /dev/null +++ b/doc/fluid/advanced_guide/dygraph_to_static/grammar_list_cn.rst @@ -0,0 +1,122 @@ +ProgramTranslator支持的语法 +========================== + +ProgramTranslator本质是把Python运行语法转写为PaddlePaddle静态图代码,但是Python语法的表达能力和PaddlePaddle静态图表达能力存在不同,这使得一些代码无法被转换。 + +本章节我们将详细讲述在动转静过程中支持转化哪些语法,不支持哪些语法,并且讲述如何改写代码能够解决语法不支持的场景。 + +动转静支持的语法分为以下几个大类: + +控制流相关关键词 +------------------ + +控制流指if-elif-else,while等能够控制程序语句执行顺序的关键字。PaddlePaddle静态图通过cond,while_loop API来实现条件判断和循环,如果动态图Python控制流的判断条件或循环条件依赖 PaddlePaddle Tensor,动转静后会被转化为等价的PaddlePaddle控制流接口,否则仍然使用Python控制流逻辑运行。在动转静过程中这些关键字的转化情况为: + +1. if-elif-else 条件 + +当 ``if <条件>`` 中的条件是Tensor时,ProgramTranslator会把该if-elif-else语句转化为等价的cond API语句。否则会按普通Python if-elif-else的逻辑运行。需注意cond支持的Tensor只能是numel为1的bool Tensor,所以请使用这种Tensor进行条件判断,其他Tensor会报错。 + +2. while 循环 + +当while循环中的条件是Tensor时,ProgramTranslator会把该while语句转化为等价的while_loop API语句,否则会按普通Python while运行。需注意while循环条件中的Tensor只能是numel为1的bool Tensor,所以请使用这种Tensor进行条件判断,其他Tensor会报错。 + + +3. for 循环 + +3.1 ``for _ in range(__)`` 循环 + +ProgramTranslator先将其转化为等价的Python while循环,然后按while循环的逻辑进行动静转换。 + +3.2 ``for _ in x`` 循环 + +当x是Python容器或迭代器,则会用普通Python逻辑运行。当x是Tensor时,会转化为循环中每次对应拿出x[0], x[1], ... 。 + +3.3 ``for idx, val in enumerate(x)`` 循环 + +当x是Python容器或迭代器,则会用普通Python逻辑运行。当x是Tensor时,idx会转化为依次0,1,...的1-D Tensor。val会转化为循环中每次对应拿出x[0], x[1], ... 。 + +4. break,continue + +ProgramTranslator 可以支持在循环中添加break,continue语句,其底层实现原理是对于要break,continue的部分在相应时候使用cond在一定条件下跳过执行。 + +5. return + +ProgramTranslator 支持在循环,条件判断中return结果而不需要一定在函数末尾return。也能够支持return不同长度tuple和不同类型的Tensor。其底层实现原理是对return后的部分相应使用cond在一定条件下跳过执行。 + + +一些需要转化的运算类型 +------------------------ + +1. +,-,*,/,**, >, <, >= , <=, == 等Python内置运算 + +由于静态图有重载这些基本运算符,所以这些被ProgramTranslator转化后都适用相应重载的运算符,动转静支持此类运算。 + +2. and,or,not 逻辑运算 + +Python内置and,or,not逻辑运算关键词,ProgramTranslator在语句的运算时会判断逻辑运算关键词运行的对象是否是Tensor,如果都是Tensor,我们将其转化为静态图对应的逻辑运算接口并运行。 + +3. 类型转化 + +动态图中可以直接用Python的类型转化语法来转化Tensor类型。例如x是Tensor时,float(x)可以将x的类型转化为float。ProgramTranslator在运行时判断x是否是Tensor,如果是,则在动转静时使用静态图cast接口转化相应的Tensor类型。 + +Python 函数相关 +--------------------- + +1. print + +如果x是Tensor,在动态图模式中print(x)可以打印x的值。在动转静过程中我们把此转化为静态图的Print接口实现,使得在静态图中也能打印。如果print的参数不是Tensor,那么我们没有把相应print语句进行转写。 + +2. len + +如果x是Tensor,在动态图模式中len(x)可以获得x第0维度的长度。在动转静中我们把此转化为静态图shape接口,并返回shape的第0维。另外如果x是个TensorArray,那么len(x)将会使用静态图接口control_flow.array_length返回TensorArray的长度。对于其他情况,动转静时会按照普通Python len函数运行。 + +3. lambda 表达式 + +动转静允许写带有Python lambda表达式的语句,并且我们会适当改写使得返回对应结果。 + +4. 函数内再调用函数 + +对于函数内调用其他函数的情况,ProgramTranslator也会对内部的函数递归地进行动转静,这样做的好处是可以在最外层函数只需加一次装饰器即可,而不需要每个函数都加装饰器。但需要注意,动转静还不支持函数递归调用自己,详细原因请查看下文动转静无法正确运行的情况。 + +报错异常相关 +-------------- + +1. assert + +如果x是Tensor,在动态图中可以通过assert x来强制x为True或者非0值,在动转静中我们把此转化为静态图Assert接口支持此功能。 + + +Python基本容器 +--------------- + +1. list:对于一个list如果里面元素都是Tensor,那么动转静会转化其为TensorArray,静态图TensorArray可以支持append,pop,修改操作。因此ProgramTranslator在元素皆为Tensor的list中支持上面三种操作。换言之,其他list操作,比如sort无法支持。对于list中并非所有元素是Tensor的情况,ProgramTranslator会将其作为普通Python list运行。 + +2. dict:ProgramTranslator会将相应的dict中的Tensor添加进静态图Program,因此使用dict是动转静支持的语法。 + +动转静无法正确运行的情况 +-------------------------- + +1. Reshape后的变量调用其shape作为PaddlePaddle API参数。 + +具体表现比如 ``x = reshape(x, shape=shape_tensor)`` ,再使用 ``x.shape[0]`` 的值进行其他操作。这种情况会由于动态图和静态图的本质不同而使得动态图能够运行,但静态图运行失败。其原因是动态图情况下,API是直接返回运行结果,因此 ``x.shape`` 在经过reshape运算后是确定的。但是在转化为静态图后,因为静态图API只是组网,``shape_tensor`` 的值在组网时是不知道的,所以 ``reshape`` 接口组网完,静态图并不知道 ``x.shape`` 的值。PaddlePaddle静态图用-1表示未知的shape值,此时 ``x`` 的shape每个维度会被设为-1,而不是期望的值。 + +遇到这类情况我们建议尽量固定shape值,减少reshape操作。 + +2. 多重list嵌套读写Tensor + +具体表现如 ``l = [[tensor1, tensor2], [tensor3, tensor4]]`` ,因为现在动转静将元素全是Tensor的list转化为TensorArray,而PaddlePaddle的TensorArray还不支持多维数组,因此这种情况下,动转静无法正确运行。 + +遇到这类情况我们建议尽量用一维list,或者自己使用PaddlePaddle的create_array,array_read,array_write接口编写为TensorArray。 + +3. Tensor值在被装饰函数中转成numpy array进行运算 + +具体表现为在被装饰函数中没有返回Tensor时就使用 ``numpy.array(tensor)`` 将Tensor转化为numpy array并使用numpy接口进行运算。这种情况在动态图下因为Tensor有值是可以正常运行的,但是在静态图时由于Tensor只是组网变量,在没有运行时没有数值,因此无法进行numpy运算。 + +遇到这种情况我们建议在动转静的函数中尽量使用PaddlePaddle接口替代numpy接口进行运算。 + +4. 一个函数递归调用自己 + +ProgramTranslator还无法支持一个函数递归调用自己,原因是递归常常会用 ``if-else`` 构造停止递归的条件。然而这样的停止条件在静态图下只是一个 ``cond`` 组网,组网并不能在编译阶段决定自己组多少次,会导致函数运行时一直组网递归直至栈溢出,因此ProgramTranslator还无法支持一个函数递归调用自己。 + +遇到这种情况我们建议将代码改为非递归写法。 + diff --git a/doc/fluid/advanced_guide/dygraph_to_static/grammar_list_en.rst b/doc/fluid/advanced_guide/dygraph_to_static/grammar_list_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..0c88a997165f06df3e51e01145ddfec9558aedc8 --- /dev/null +++ b/doc/fluid/advanced_guide/dygraph_to_static/grammar_list_en.rst @@ -0,0 +1,124 @@ +Supported Grammars +================== + +The key part of ProgramTranslator is transforming Python grammar into PaddlePaddle static graph code, but there exists difference between Python and PaddlePaddle static graph which causes some limitation of the code transformation. + +In this section we will talk about the supported grammars and unsupported grammars, also give some suggestions when the grammar is unsupported. + +There are several kinds of supported grammars: + +Control flow keywords +--------------------- + +Control flow means those keywords that controls the execution order of program statements, for example ``if-elif-else, while`` . Conditional operation and loop were implemented as ``cond, while_loop`` APIs in PaddlePaddle static graph. If the condition of a Python dygraph control flow depends on PaddlePaddle Tensor, the ProgramTranslator will convert the control flow into equivalent PaddlePaddle control flow APIs, else it will still be executed as Python control flow. The transformations of those control flow keywords are listed below: + +1. ``if-elif-else`` statements + +If the condition of ``if `` is Tensor, ProgramTranslator will turn this ``if-elif-else`` statement to equivalent PaddlePaddle static graph ``cond`` statements, otherwise the ``if-elif-else`` statement is executed as normal Python conditional statement. Note that ``cond`` API only accepts input conditional Tensor with numel equals to 1, so please use this kind of Tensor to write dygraph conditional statement, other Tensors will cause error. + +2. ``while`` loop + +If the condition of ``while`` is Tensor, ProgramTranslator will turn this ``while`` statement to equivalent PaddlePaddle static graph ``while_loop`` statements, otherwise the ``while`` statement is executed as normal Python ``while`` loop statement. Note that ``while_loop`` API only accepts input conditional Tensor with numel equals to 1, so please use this kind of Tensor to write dygraph loop condition statement, other Tensors will cause error. + +3. ``for`` loop + +3.1 ``for _ in range(__)`` loop + +Firstly, ProgramTranslator will transform it into equivalent Python while loop, then convert dygraph to static graph by same logic of ``while`` loop. + +3.2 ``for _ in x`` loop + +If ``x`` is a Python container, iterator, or generator, it will be executed as original Python statement. Otherwise ``x`` is a Tensor, ProgramTranslator will transform the loop into PaddlePaddle static graph loop and fetches ``x[0], x[1], ...`` as loop iteration variable in each loop iteration. + +3.3 ``for idx, val in enumerate(x)`` loop + +If ``x`` is a Python container, iterator, or generator, it will be executed as original Python statement. Otherwise ``x`` is a Tensor, Program +Translator will transform the loop into PaddlePaddle static graph loop. The ``idx`` will be transformed to 1-D tensor with value ``0, 1, ...`` and the ``val`` will be transformed to ``x[0], x[1], ...`` in each loop iteration. + +4. ``break, continue`` + +ProgramTranslator supports ``break, continue`` statements in loop. ProgramTranslator will add some PaddlePaddle static graph ``cond`` statements to skip execution of corresponding part when ``break, continue`` condition is meet. + +5. ``return`` + +ProgramTranslator supports ``return`` in a conditonal block or loop body, not necessary to be at the end of a function. It also supports returning tuple with various length of Tensors with different dtype. The implementation is adding some PaddlePaddle static graph ``cond`` statement to skipparts of code when ``return`` is triggered. + + +Some Python basic operators +--------------------------- + +1. ``+, -, *, /, **, >, <, >= , <=, ==`` etc. + +Because PaddlePaddle static graph overrides those Python basic arithmetic operators and comparison operators, ProgramTranslator can support those operators. + +2. ``and, or, not`` logical operators + +Python has ``and, or, not`` keywards as basic logical operators, ProgramTranslator will check whether the variables of the logical operators are Tensors, if they are Tensors, ProgramTranslator replaces the ``and, or, not`` statements into corresponding PaddlePaddle static graph logical operator and run it. + +3. Type casting + +In dygraph mode, users can use Python type casting grammar. For instance, if ``x`` is a Tensor, ``float(x)`` casts the data type of ``x`` to float. ProgramTranslator will check whether ``x`` is a Tensor during run time, if it is, the casting sentence will be modified to PaddlePaddle static graph ``cast`` API so that its dtype can be changed in the dygraph to static transformation. + +Python functions +------------------------------ + +1. ``print`` + +In dygraph mode, ``print(x)`` will print Tensor value if ``x`` is a Tensor. ProgramTranslator converts the built-in ``print`` to PaddlePaddle static graph ``Print`` API during dygraph to static graph transformation if the arguments are Tensors, otherwise ProgramTranslator won't convert the ``print``. + +2. ``len`` + +If ``x`` is a Tensor, ``len(x)`` can get the length at 0-dimension of ``x`` in dygraph mode. ProgramTranslator turns it to PaddlePaddle static graph ``shape`` API and returns the 0-dimension of the ``shape``, else if ``x`` is a TensorArray, then ``len(x)`` will be transformed to static graph API ``control_flow.array_length`` to return the length of TensorArray. In other cases, the ``len`` function will be executed as Python built-in ``len`` + +3. lambda expression + +ProgramTranslator supports Python lambda expression and it modifies code to return the expected result. + + +4. Calling function + +If the transformed function calls another function, ProgramTranslator also transform the called function. The benefit is that users can add one decorator at the outside function to do transformation, no need to add the decorator for each function. Note that ProgramTranslator doesn't support +that a function calls itself recursively, the details is in the unsupported grammars section below. + + +Errors and Exceptions +--------------------- + +1. ``assert`` + +If ``x`` is a Tensor, ``assert x`` statement can assert ``x`` to be ``True`` or non-zero value in dygraph mode. ProgramTranslator converts the statement into PaddlePaddle static graph ``Assert`` API to support this grammar. + + +Python containers +----------------- + +1. ``list``: if all elements in a list are Tensors, then ProgramTranslator converts it to TensorArray. PaddlePaddle static graph TensorArray supports append, pop, and modify, other list operations such as sort cannot be supported. When not all elements in a list are Tensors, ProgramTranslator will treat it as normal Python list. + +2. ``dict``: ProgramTranslator will add the Tensors in a dict into PaddlePaddle static graph ``Program``, so ``dict`` is supported by ProgramTranslator. + +Unsupported grammars +-------------------- + +1. Use the shape of output tensor of ``reshape`` + +For example, ``x = reshape(x, shape=shape_tensor)`` , then use ``x.shape[0]`` to do other operation. Due to the difference between dygraph and static graph, it is okay in dygraph but it will fail in static graph. The reason is that APIs return computation result in dygraph mode, so ``x.shape`` has deterministic value after calling ``reshape`` . However, static graph doesn't have the value ``shape_tensor`` during building network, so PaddlePaddle doesn't know the value of ``x.shape`` after calling ``reshape``. PaddlePaddle static graph will set -1 to represent unknown shape value for each dimension of ``x.shape`` in this case, not the expected value. + +We suggest to set fixed shape value as much as possible, reduce the reshape operation. + +2. List of list of Tensor + +For example: ``l = [[tensor1, tensor2], [tensor3, tensor4]]``, because ProgramTranslator transformed a list whose elements are all Tensors into PaddlePaddle static graph TensorArray, but TensorArray doesn't support multi-dimensions, ProgramTranslator cannot run this case. + +We suggest to use 1-D list at most time, or use PaddlePaddle API ``create_array, array_read, array_write`` to control TensorArray. + +3. Convert Tensor to numpy array and do operation + +For example, user doesn't return Tensor in the decorated function but call ``numpy.array(tensor)`` to convert Tensor to numpy array and then use numpy API to compute on it. In dygraph mode, it is okey because Tensor has value, but Tensor is variable for building network in static graph mode, it doesn't contain value if not in static graph running time, so we cannot do numpy calculation on it. + +We suggest to use PaddlePaddle APIs to replace numpy API in this case. + +4. A function calls itself recursively + +ProgramTranslator doesn't support a function calls itself recursively, the reason is that recursive function usually uses ``if-else`` for a condition to stop the recursion, the stop condition will be transformed to a ``cond`` in static graph mode. Since ``cond`` just builds network, it cannot determine how many times it recursively builds network during network built stage, so the function will recursively call itself and build network until stack overflow. Due to above reason, ProgramTranslator cannot support a function calls itself recursively now. + +We suggest to write non-recursive function in this case. diff --git a/doc/fluid/advanced_guide/dygraph_to_static/index_cn.rst b/doc/fluid/advanced_guide/dygraph_to_static/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8dbc8d7bcf15df9b3ae3c571c76df82db223b436 --- /dev/null +++ b/doc/fluid/advanced_guide/dygraph_to_static/index_cn.rst @@ -0,0 +1,20 @@ +############### +动态图转静态图 +############### + +- `动态图转静态图 `_ :介绍了动态图转静态图的基本使用方法和架构原理 + +- `支持语法列表 `_ :介绍了动态图转静态图支持的语法以及罗列不支持的语法写法 + +- `报错信息处理 `_ :介绍了动态图转静态图的报错信息处理方法 + +- `调试方法 `_ :介绍了动态图转静态图支持的调试方法 + + +.. toctree:: + :hidden: + + grammar_list_cn.rst + program_translator_cn.rst + error_handling_cn.md + debugging_cn.md diff --git a/doc/fluid/advanced_guide/dygraph_to_static/index_en.rst b/doc/fluid/advanced_guide/dygraph_to_static/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..6ff26175e9483c0b834bac65308af8eedec71794 --- /dev/null +++ b/doc/fluid/advanced_guide/dygraph_to_static/index_en.rst @@ -0,0 +1,20 @@ +####################### +Dygraph to Static Graph +####################### + +- `Dygraph to Static Graph `_ :Introduce the basic usage for transforming dygraph code into static code and the architecture of ProgramTranslator. + +- `Supported Grammars `_ :Introduce the grammars supported by ProgramTranslator and list unsupport grammars. + +- `Error Handling `_ :Introduce the error handling by ProgramTranslator. + +- `Debugging Methods `_ :Introduce the debugging methods when using ProgramTranslator. + +.. toctree:: + :hidden: + + grammar_list_en.rst + program_translator_en.rst + error_handling_en.md + debugging_en.md + diff --git a/doc/fluid/advanced_guide/dygraph_to_static/program_translator_cn.rst b/doc/fluid/advanced_guide/dygraph_to_static/program_translator_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..03852fc58a6bbe9c515de9138c40d38e837d25a8 --- /dev/null +++ b/doc/fluid/advanced_guide/dygraph_to_static/program_translator_cn.rst @@ -0,0 +1,182 @@ +动态图转静态图 +================ + +动态图有诸多优点,包括易用的接口,python风格的编程体验,友好的debug交互机制等。在动态图模式下,代码是按照我们编写的顺序依次执行。这种机制更符合Python程序员的习惯,可以很方便地将大脑中的想法快速地转化为实际代码,也更容易调试。但在性能方面,Python执行开销较大,与C++有一定差距。因此在工业界的许多部署场景中(如大型推荐系统、移动端)都倾向于直接使用C++来提速。 + +相比动态图,静态图在部署方面更具有性能的优势。静态图程序在编译执行时,先搭建模型的神经网络结构,然后再对神经网络执行计算操作。预先搭建好的神经网络可以脱离Python依赖,在C++端被重新解析执行,而且拥有整体网络结构也能进行一些网络结构的优化。 + +动态图代码更易编写和debug,但在部署性能上,静态图更具优势。因此我们新增了动态图转静态图的功能,支持用户依然使用动态图编写组网代码。PaddlePaddle会对用户代码进行分析,自动转换为静态图网络结构,兼顾了动态图易用性和静态图部署性能两方面优势。 + +基本使用方法 +-------------- + +PaddlePaddle提供了两种动态图转静态图的方式,基于动态图trace的TracedLayer与基于源代码级别转换的ProgramTranslator。 + +1. 基于trace的TracedLayer: + +trace是指在模型运行时记录下其运行过哪些算子。TracedLayer就是基于这种技术,在一次执行动态图的过程中,记录所有运行的算子,并构建和保存静态图模型。一个使用例子如下: + +我们先定义一个简单的Fully Connected网络: + +.. code-block:: python + + import numpy as np + import paddle + + class SimpleFcLayer(paddle.nn.Layer): + def __init__(self, feature_size, batch_size, fc_size): + super(SimpleFCLayer, self).__init__() + self._linear = paddle.nn.Linear(feature_size, fc_size) + self._offset = paddle.to_tensor( + np.random.random((batch_size, fc_size)).astype('float32')) + + def forward(self, x): + fc = self._linear(x) + return fc + self._offset + + +接下来是TracedLayer如何存储模型: + +.. code-block:: python + import paddle + from paddle.jit import TracedLayer + + paddle.disable_static() + + fc_layer = SimpleFcLayer(3, 4, 2) + in_np = np.random.random([3, 4]).astype('float32') + # 将numpy的ndarray类型的数据转换为Tensor类型 + input_var = paddle.to_tensor(in_np) + # 通过 TracerLayer.trace 接口将命令式模型转换为声明式模型 + out_dygraph, static_layer = TracedLayer.trace(fc_layer, inputs=[input_var]) + save_dirname = './saved_infer_model' + # 将转换后的模型保存 + static_layer.save_inference_model(save_dirname, feed=[0], fetch=[0]) + + +载入的模型可以使用静态图方式运行 + +.. code-block:: python + + place = paddle.CPUPlace() + exe = paddle.Executor(place) + program, feed_vars, fetch_vars = paddle.io.load_inference_model(save_dirname, exe) + fetch, = exe.run(program, feed={feed_vars[0]: in_np}, fetch_list=fetch_vars) + + +但是也正如我们阐述的原理,trace只是记录了一次执行涉及算子,若在用户的模型代码中,包含了依赖数据条件(包括输入的值或者shape)的控制流分支,即根据数据条件触发运行不同的算子,则TracedLayer无法正常工作。比如下面 + + +.. code-block:: python + + import paddle + + def func(input_var) + # if判断与输入input_var的shape有关 + if input_var.shape[0] > 1: + return paddle.cast(input_var, "float64") + else: + return paddle.cast(input_var, "int64") + + paddle.disable_static() + in_np = np.array([-2]).astype('int') + input_var = paddle.to_tensor(in_np) + out = func(input_var) + + +上例如果在使用TracedLayer.trace(func, inputs=[input_var]),由于trace只能记录if-else其中跑的一次算子,模型就无法按用户想要的根据input_var的形状进行if-else控制流保存。类似的控制流还有while/for循环的情况 + +2. 基于源代码转写的ProgramTranslator + +对于依赖数据的控制流,我们使用基于源代码转写的ProgramTranslator来进行动态图转静态图。其基本原理是通过分析Python代码来将动态图代码转写为静态图代码,并在底层自动帮用户使用执行器运行。其基本使用方法十分简便,只需要在要转化的函数(该函数也可以是用户自定义动态图Layer的forward函数)前添加一个装饰器 ``@paddle.jit.to_static`` ,上面的例子转化如下,并且可以依旧使用该函数运行得到结果: + +.. code-block:: python + + import paddle + + @paddle.jit.to_static + def func(input_var) + # if判断与输入input_var的shape有关 + if input_var.shape[0] > 1: + out = paddle.cast(input_var, "float64") + else: + out = paddle.cast(input_var, "int64") + + paddle.disable_static() + in_np = np.array([-2]).astype('int') + input_var = paddle.to_tensor(in_np) + func(input_var) + + +若要存储转化后的静态图模型,可以调用 ``paddle.jit.save`` ,我们再以SimpleFcLayer为例,需要在SimpleFcLayer的forward函数添加装饰器: + +.. code-block:: python + + import numpy as np + import paddle + + class SimpleFcLayer(paddle.nn.Layer): + def __init__(self, feature_size, batch_size, fc_size): + super(SimpleFCLayer, self).__init__() + self._linear = paddle.nn.Linear(feature_size, fc_size) + self._offset = paddle.to_tensor( + np.random.random((batch_size, fc_size)).astype('float32')) + + @paddle.jit.to_static + def forward(self, x): + fc = self._linear(x) + return fc + self._offset + + +存储该模型可以使用paddle.jit.save接口: + +.. code-block:: python + + import paddle + + paddle.disable_static() + + fc_layer = SimpleFcLayer(3, 4, 2) + in_np = np.random.random([3, 4]).astype('float32') + input_var = paddle.to_tensor(in_np) + out = fc_layer(input_var) + + paddle.jit.save(fc_layer, "./fc_layer_dy2stat", input_spec=[input_var]) + +内部架构原理 +-------------- + +TracedLayer的原理就是trace,相对简单,因此我们在这里不展开描述。本节将主要阐述ProgramTranslator基于源代码将动态图代码转化为静态图代码。 + + +转化过程发生在用户开始调用被装饰的函数,转换过程在装饰器中实现。我们将内部涉及的过程分为以下几步: + +1. 函数与缓存 + +动态图转静态图的主体是函数(Function)。对于函数内包含的PaddlePaddle接口,如果是仅计算相关算子代码语句,那么因为PaddlePaddle动态图和静态图接口一致,我们不需要额外转换这些代码为静态图代码。但是对于动态图,此类代码接口是直接运行计算和返回结果,而对于静态图此类代码接口其实是组网。那么如果被转化的函数被调用多次,动态图转静态图后会多次组网添加对应算子,这显然会导致问题。为了解决这个问题以及为了加速动转静转化过程,我们维护了被装饰器装饰的函数(Function)与其输入形状(shape),数据类型(dtype)映射到被转化后组网的Program的缓存(Cache)。当要被转化的函数命中缓存,我们直接用对应存储的Program运行静态图得到结果,否则我们才进行语句转化,并且转化成功后的Program存储进缓存。 + +2. 动态图源码转AST(抽象语法树) + +动态图转静态图的最核心部分类似一个编译器,解析动态图代码语句为AST,再对应AST进行改写,最后反转回成静态图代码。从函数转化为代码字符串可以使用Python的inspect.getsource。从字符串Python提供了自带的 `ast `_ 库来解析字符串为AST,但是由于Python2,Python3的语法略有不同,为了避免我们需要额外处理这些Python2,Python3的不同情况,我们使用了统一Python2,Python3的开源AST处理 `gast库 `_ 。这些接口使得函数转化为AST没有本质上的困难。 + +3. AST改写和静态图源码转换 + +这部分为动转静最核心的部分,我们对支持的各种语法进行ast转写。其中最重要的Python控制流,if-else,while,for循环被分别分析转化为PaddlePaddle静态图接口cond,while_loop等接口实现。我们对想转化的每一种主要语法创建一个Transformer(这里的Transformer是Python ast转写的概念,而不是自然语言处理NLP领域的Transformer),每个Transformer扫一遍AST并进行对应的改写。最后被转化完成的AST我们使用gast提供的接口转回成源码。 + +4. 静态图源码作为动态图一部分运行的技术 + +为了动静转化更加易用和被转化的代码能在动态图中复用,我们在拥有源码后运行生成Program,并将这个Program作为一个大op,包装成动态图的一个op,这样既能把用户的代码转为静态图提速或者保存部署,另一方面如果用户想在Python层使用生成的静态图代码作为动态图的一部分继续训练或者别的动态图运算也是可以直接使用。 + +5. 易用性与Debug功能在动转静过程的实现 + +正如AST转写类似编译器,而一般编译器都会提供debug断点,报错,输出一些中间代码等功能。我们在进行动转静时,万一用户的动态图代码出错,或者用户想断点调试,或者用户想看看被转化后的静态图代码是否符合其预期,我们也希望能够像编译器一样提供这些易用性功能,使得动转静兼顾性能和部署同时还具有易用性。我们这里将列出这些功能的实现方式 + +A. 报错对应到动态图代码行。由于被转化后的静态图代码和原动态图代码不同,Python运行出错时会报静态图的错误,因此我们在每一次AST转写时添加AST节点对应的原动态图代码行等信息,在Python报错栈中将静态图的报错转化成对应的动态图源码报错 + +B. 设置断点功能。我们保留了被转化后代码的中的pdb.set_trace(), 用户可以使用这种方式进行断点调试 + +C. 查看最后转化的静态图代码。我们输出为一个StaticLayer class,这个StaticLayer可以直接被调用,但是也存储转化后的代码,可以调用StaticLayer.code来获得转化后的代码。 + +D. 输出中间转化状态代码,甚至不同语法Transformer转化的代码,比如经过for循环转化后代码是什么样的。我们开放接口设定了log level来让用户可以打印中间状态转化的代码。 + + diff --git a/doc/fluid/advanced_guide/dygraph_to_static/program_translator_en.rst b/doc/fluid/advanced_guide/dygraph_to_static/program_translator_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..573ddbb79893acb9386c87bec373035a439037b4 --- /dev/null +++ b/doc/fluid/advanced_guide/dygraph_to_static/program_translator_en.rst @@ -0,0 +1,178 @@ +Dygraph to Static Graph +======================= + +The imperative-style coding of PaddlePaddle takes advantage of flexibility, Pythonic coding, and easy-to-debug interface. In dygraph mode, code immediately executes kernels and gets numerical results, which allows users to enjoy traditional Pythonic code order. Therefore it is efficient to transform idea into real code and simple to debug. However, Python code is usually slower than C++ thus lots of industrial systems (such as large recommend system, mobile devices) prefer to deploy with C++ implementation. + +Static graph is better at speed and portability. Static graph builds the network structure during compiling time and then does computation. The built network intermediate representation can be executed in C++ and gets rids of Python dependency. + +While dygraph has usability and debug benefits and static graph yields performance and deployment advantage, we adds functionality to convert dygraph to static graph. Users use imperative mode to write dygraph code and PaddlePaddle will analyze the Python syntax and turn it into network structure of static graph mode. Our approach retains both the usability of dygraph and portability of static graph. + +Basic Usage +-------------- + +PaddlePaddle has two ways to transform dygraph to static graph. TracedLayer extracts computation graph through tracing and ProgramTranslator gets computation graph through source code transformation. + + +1. TracedLayer: + +Tracing means recording the operators when running a model. TracedLayer is based on this technique. It runs dygraph program once and records all operators, then constructs static graph model and saves it. Now take a glance at an usage example: + +Define a simple fully connected network: + +.. code-block:: python + + import numpy as np + import paddle + + class SimpleFcLayer(paddle.nn.Layer): + def __init__(self, feature_size, batch_size, fc_size): + super(SimpleFCLayer, self).__init__() + self._linear = paddle.nn.Linear(feature_size, fc_size) + self._offset = paddle.to_tensor( + np.random.random((batch_size, fc_size)).astype('float32')) + + def forward(self, x): + fc = self._linear(x) + return fc + self._offset + +Save model by TracedLayer: + +.. code-block:: python + + import paddle + from paddle.jit import TracedLayer + + paddle.disable_static() + + fc_layer = SimpleFcLayer(3, 4, 2) + in_np = np.random.random([3, 4]).astype('float32') + # Turn numpy ndarray into Tensor + input_var = paddle.to_tensor(in_np) + # Transforming imperative mode into declarative mode by TracerLayer.trace + out_dygraph, static_layer = TracedLayer.trace(fc_layer, inputs=[input_var]) + save_dirname = './saved_infer_model' + # Save the transformed model + static_layer.save_inference_model(save_dirname, feed=[0], fetch=[0]) + +Load model and run it in static graph mode: + +.. code-block:: python + + place = paddle.CPUPlace() + exe = paddle.Executor(place) + program, feed_vars, fetch_vars = paddle.io.load_inference_model(save_dirname, exe) + fetch, = exe.run(program, feed={feed_vars[0]: in_np}, fetch_list=fetch_vars) + +However, as tracing only records operators once, if user's code contains Tensor-dependent (including Tensor value or Tensor shape) control flow, that is the Tensor can cause different operators being executed, then TracedLayer cannot handle this case. For instance: + +.. code-block:: python + + import paddle + + def func(input_var) + # if condition depends on the shape of input_var + if input_var.shape[0] > 1: + return paddle.cast(input_var, "float64") + else: + return paddle.cast(input_var, "int64") + + paddle.disable_static() + in_np = np.array([-2]).astype('int') + input_var = paddle.to_tensor(in_np) + out = func(input_var) + +If we apply TracedLayer.trace(func, inputs=[input_var]) on above example, tracing can take record of operators in only one branch of if-else, then the model can not be saved as what user orignally means. The similar situations applies to while/for loop. + +2. ProgramTranslator + +For the Tensor-dependent control flow, we use source-code-translate based ProgramTranslator to convert dygraph into static graph. The basic idea is analyzing Python source code and turning into static graph code, then run the static graph code using Executor. The basic usage of ProgramTranslator is simple, put a decorator ``@paddle.jit.to_static`` before the definition of the function to transform (the function can also be a method of a class, e.g., the ``forward`` function of user-defined imperative Layer). Above Tensor-dependent example can be transformed correctly by ProgramTranslator as below: + +.. code-block:: python + + import paddle + + @paddle.jit.to_static + def func(input_var) + # if condition depends on the shape of input_var + if input_var.shape[0] > 1: + out = paddle.cast(input_var, "float64") + else: + out = paddle.cast(input_var, "int64") + + paddle.disable_static() + in_np = np.array([-2]).astype('int') + input_var = paddle.to_tensor(in_np) + func(input_var) + +To save the transformed model, we can call ``paddle.jit.save`` . Let's take ``SimpleFcLayer`` as an example again, we put decorator at the ``forward`` method of ``SimpleFcLayer`` : + +.. code-block:: python + + import numpy as np + import paddle + + class SimpleFcLayer(paddle.nn.Layer): + def __init__(self, feature_size, batch_size, fc_size): + super(SimpleFCLayer, self).__init__() + self._linear = paddle.nn.Linear(feature_size, fc_size) + self._offset = paddle.to_tensor( + np.random.random((batch_size, fc_size)).astype('float32')) + + @paddle.jit.to_static + def forward(self, x): + fc = self._linear(x) + return fc + self._offset + + +Calling ``paddle.jit.save`` to save above model: + +.. code-block:: python + + import paddle + + paddle.disable_static() + + fc_layer = SimpleFcLayer(3, 4, 2) + in_np = np.random.random([3, 4]).astype('float32') + input_var = paddle.to_tensor(in_np) + out = fc_layer(input_var) + + paddle.jit.save(fc_layer, "./fc_layer_dy2stat") + + +Architecture +-------------- + +The basic idea of TracedLayer is tracing, it is relatively simple so we won't expend here. This section will talk about the source code transformation of ProgramTranslator. + +The transformation is implemented in the decorator so transformation happens when user calls the decorated function, the procedure includes these steps: + +1. Function and cache. + +The entity for transforming dygraph to static graph is the decorated function. For the PaddlePaddle APIs in the function, since they are same code under dygraph mode and static mode, we don't have to transform those code. However, those APIs are computation in dygraph model while they are building network in static graph mode, if the transformed functions are called multiple times, those APIs will build network multiple times in static graph, which can cause problem. To solve it as well as speed up the transformation, we maintain a cache that maps from function, input shapes, input data types to the Program built by the transformed function. If the function hits cache, we run the stored Program in static graph mode to get result, else we do the code transformation on the function and store the transformed Program into the cache. + +2. From dygraph source code to AST (Abstract Syntax Tree) + +The core of transforming dygraph to static graph is similar to a compiler, we parse the dygraph code into AST, change AST, then turn it back into static graph code. We use Python ``inspect.getsource`` to get the source code string of the function. Python provides ``ast`` library to parse string code into AST, but Python2, Python3 have slight grammar difference. To avoid the work to handle different grammars, we used an open source AST library `gast `_ that provides compatibility AST among various Python versions. There is no essential difficulty to turn function into AST with these library. + +3. Transform AST and turn it to static graph code + +This part is the key part in ProgramTranslator, we modify AST for supported grammars. Those important Python control flows, such as ``if-elif-else, while, for`` loop are converted to PaddlePaddle static graph API ``cond, while_loop`` and so on. We created a Transformer (AST-to-AST Transformer in Python, not the Transformer in Natural Language Process) to transform each grammar. Every Transformer scans AST and modify it. Lastly, we turn AST back to source code string by ``gast`` library. + +4. Running static graph code as part of dygraph + +In order to increase usability and re-use the transformed static graph code in dygraph, we wrap the generated Program as an dygraph op, the op can run the forward and backward computation of transformed Program. Then we can not only speed up dygraph code or save it for deployment, but also enable user to run part of their dygraph code in static graph mode so that they can continue training or other dygraph computation in their dygraph code. + +5. Error handling and Debug + +Compiler usually supports debug functionality like breakpoint, throwing exception, print some mid-level codes. ProgramTranslator is similar to a compiler, users may would like to set breakpoints for debugging, or see whether the transformed static graph code is expected. So we also implemented those error handling and debug functionality. Here we list those functions and their implementation. + +A. Report errors/exceptions on dygraph code line. Because the transformed static graph code is different to original dygraph code, when Python executes the static graph code, the exceptions will be reported at static graph code. To locate the corresponding dygraph code, we attach some informations such as line number on AST nodes when we transform AST, then we can re-write the static graph exception to the corresponding dygraph code exception. + +B. We support ``pdb.set_trace()`` when running ProgramTranslator, user can add this line to set breakpoints. + +C. Check the transformed static graph code. Our transformed output is a Python class named ``StaticLayer``, this class can be called, but it also stores the transformed code string. Users could call ``StaticLayer.code`` to get the converted code. + +D. Print mid-level transformed code, such as what's the code after transforming ``for`` loop. We provide APIs to set log level to let user check the mid-level code. + + diff --git a/doc/fluid/advanced_guide/evaluation_debugging/debug/visualdl.md b/doc/fluid/advanced_guide/evaluation_debugging/debug/visualdl.md index dc197730da5ba212066fc231885ce57081d0447e..df0149674045239242735bad0c778d74f9d17811 100644 --- a/doc/fluid/advanced_guide/evaluation_debugging/debug/visualdl.md +++ b/doc/fluid/advanced_guide/evaluation_debugging/debug/visualdl.md @@ -1,248 +1,300 @@ # VisualDL 工具简介 + +

- +

-## 介绍 -VisualDL是一个面向深度学习任务设计的可视化工具,包含了scalar、参数分布、模型结构、图像可视化等功能,项目正处于高速迭代中,新的组件会不断加入。 -目前大多数DNN平台均使用Python作为配置语言,VisualDL原生支持python的使用, -通过在模型的Python配置中添加几行,便可以为训练过程提供丰富的可视化支持。 -除了Python SDK之外,VisualDL底层采用C++编写,其暴露的C++ SDK也可以集成到其他平台中, -实现原生的性能和定制效果。 +VisualDL是飞桨可视化分析工具,以丰富的图表呈现训练参数变化趋势、模型结构、数据样本、直方图、PR曲线及高维数据分布。可帮助用户更清晰直观地理解深度学习模型训练过程及模型结构,进而实现高效的模型优化。 -## 组件 -VisualDL 目前支持以下组件: +具体功能使用方式请参见**VisualDL使用指南**。项目正处于高速迭代中,敬请期待新组件的加入。 -- scalar -- histogram -- image -- audio -- graph -- high dimensional +VisualDL支持浏览器种类:Chrome(81和83)、Safari 13、FireFox(77和78)、Edge(Chromium版)。 -### Scalar -可以用于展示训练测试的误差趋势 +VisualDL原生支持python的使用, 通过在模型的Python配置中添加几行代码,便可为训练过程提供丰富的可视化支持。 -

- -

-### Histogram -用于可视化任何tensor中元素分布的变化趋势 +## 目录 -

- -

+* [核心亮点](#核心亮点) +* [安装方式](#安装方式) +* [使用方式](#使用方式) +* [可视化功能概览](#可视化功能概览) +* [开源贡献](#开源贡献) +* [更多细节](#更多细节) +* [技术交流](#技术交流) -### Image -可以用于可视化任何tensor,或模型生成的图片 -

- -

-### Audio -可用于播放输入或生成的音频样本 +## 核心亮点 -### Graph +### 简单易用 -VisualDL的graph支持paddle program的展示,同时兼容 ONNX(Open Neural Network Exchange)[https://github.com/onnx/onnx],通过与 python SDK的结合,VisualDL可以兼容包括 PaddlePaddle, pytorch, mxnet在内的大部分主流DNN平台。 +API设计简洁易懂,使用简单。模型结构一键实现可视化。 -

- -

+### 功能丰富 -要进行paddle模型的展示,需要进行以下两步操作: +功能覆盖标量、数据样本、图结构、直方图、PR曲线及数据降维可视化。 -1. 在paddle代码中,调用`fluid.io.save_inference_model()`接口保存模型 -2. 在命令行界面,使用`visualdl --model_pb [paddle_model_dir]` 加载paddle模型 +### 高兼容性 +全面支持Paddle、ONNX、Caffe等市面主流模型结构可视化,广泛支持各类用户进行可视化分析。 -### High Dimensional -用高维度数据映射在2D/3D来可视化嵌入 +### 全面支持 + +与飞桨服务平台及工具组件全面打通,为您在飞桨生态系统中提供最佳使用体验。 -

- -

-## 快速尝试 -请使用下面的命令,来快速测试 VisualDL。 +## 安装方式 + +### 使用pip安装 + +```shell +pip install --upgrade --pre visualdl ``` -# 安装,建議是在虚拟环境或anaconda下。 -pip install --upgrade visualdl -# 运行一个例子,vdl_create_scratch_log 将创建测试日志 -vdl_create_scratch_log -visualdl --logdir=scratch_log --port=8080 +### 使用代码安装 -# 访问 http://127.0.0.1:8080 ``` +git clone https://github.com/PaddlePaddle/VisualDL.git +cd VisualDL -如果出现`TypeError: __init__() got an unexpected keyword argument 'file'`, 是因为protobuf不是3.5以上,运行`pip install --upgrade protobuf`就能解决。 +python setup.py bdist_wheel +pip install --upgrade dist/visualdl-*.whl +``` -如果以上步骤还有出现其他问题,很可能是因为python或pip不同版本或不同位置所致,以下安装方法能解决。 +需要注意,官方自2020年1月1日起不再维护Python2,为了保障代码可用性,VisualDL现仅支持Python3 -## 使用 virtualenv 安装 +## 使用方式 -[Virtualenv](https://virtualenv.pypa.io/en/stable/) 能创建独立Python环境,也能确保Python和pip的相对位置正确。 +VisualDL将训练过程中的数据、参数等信息储存至日志文件中后,启动面板即可查看可视化结果。 -在macOS上,安装pip和virtualenv如下: -``` -sudo easy_install pip -pip install --upgrade virtualenv -``` +### 1. 记录日志 -在Linux上,安装pip和virtualenv如下: -``` -sudo apt-get install python3-pip python3-dev python-virtualenv +VisualDL的后端提供了Python SDK,可通过LogWriter定制一个日志记录器,接口如下: + +```python +class LogWriter(logdir=None, + comment='', + max_queue=10, + flush_secs=120, + filename_suffix='', + write_to_disk=True, + **kwargs) ``` -然后创建一个虚拟环境: +#### 接口参数 + +| 参数 | 格式 | 含义 | +| --------------- | ------- | ------------------------------------------------------------ | +| logdir | string | 日志文件所在的路径,VisualDL将在此路径下建立日志文件并进行记录,如果不填则默认为`runs/${CURRENT_TIME}` | +| comment | string | 为日志文件夹名添加后缀,如果制定了logdir则此项无效 | +| max_queue | int | 日志记录消息队列的最大容量,达到此容量则立即写入到日志文件 | +| flush_secs | int | 日志记录消息队列的最大缓存时间,达到此时间则立即写入到日志文件 | +| filename_suffix | string | 为默认的日志文件名添加后缀 | +| write_to_disk | boolean | 是否写入到磁盘 | + +#### 示例 + +设置日志文件并记录标量数据: + +```python +from visualdl import LogWriter + +# 在`./log/scalar_test/train`路径下建立日志文件 +with LogWriter(logdir="./log/scalar_test/train") as writer: + # 使用scalar组件记录一个标量数据 + writer.add_scalar(tag="acc", step=1, value=0.5678) + writer.add_scalar(tag="acc", step=2, value=0.6878) + writer.add_scalar(tag="acc", step=3, value=0.9878) ``` -virtualenv ~/vdl # for Python2.7 -virtualenv -p python3 ~/vdl for Python 3.x + +### 2. 启动面板 + +在上述示例中,日志已记录三组标量数据,现可启动VisualDL面板查看日志的可视化结果,共有两种启动方式: + +#### 在命令行启动 + +使用命令行启动VisualDL面板,命令格式如下: + +```python +visualdl --logdir --host --port --cache-timeout --language --public-path --api-only ``` -```~/vdl``` 是你的Virtualenv目录, 你也可以选择任一目录。 +参数详情: + +| 参数 | 意义 | +| --------------- | ------------------------------------------------------------ | +| --logdir | 设定日志所在目录,可以指定多个目录,VisualDL将遍历并且迭代寻找指定目录的子目录,将所有实验结果进行可视化 | +| --model | 设定模型文件路径(非文件夹路径),VisualDL将在此路径指定的模型文件进行可视化,目前可支持PaddlePaddle、ONNX、Keras、Core ML、Caffe等多种模型结构,详情可查看[graph支持模型种类]([https://github.com/PaddlePaddle/VisualDL/blob/develop/docs/components/README.md#Graph--%E7%BD%91%E7%BB%9C%E7%BB%93%E6%9E%84%E7%BB%84%E4%BB%B6](https://github.com/PaddlePaddle/VisualDL/blob/develop/docs/components/README.md#Graph--网络结构组件)) | +| --host | 设定IP,默认为`127.0.0.1` | +| --port | 设定端口,默认为`8040` | +| --cache-timeout | 后端缓存时间,在缓存时间内前端多次请求同一url,返回的数据从缓存中获取,默认为20秒 | +| --language | VisualDL面板语言,可指定为'EN'或'ZH',默认为浏览器使用语言 | +| --public-path | VisualDL面板URL路径,默认是'/app',即访问地址为'http://<host>:<port>/app' | +| --api-only | 是否只提供API,如果设置此参数,则VisualDL不提供页面展示,只提供API服务,此时API地址为'http://<host>:<port>/<public_path>/api';若没有设置public_path参数,则默认为'http://<host>:<port>/api' | + +针对上一步生成的日志,启动命令为: -激活虚拟环境如下: ``` -source ~/vdl/bin/activate +visualdl --logdir ./log ``` -现在再安装 VisualDL 和运行范例: +#### 在Python脚本中启动 +支持在Python脚本中启动VisualDL面板,接口如下: + +```python +visualdl.server.app.run(logdir, + host="127.0.0.1", + port=8080, + cache_timeout=20, + language=None, + public_path=None, + api_only=False, + open_browser=False) ``` -pip install --upgrade visualdl -# 运行一个例子,vdl_create_scratch_log 将创建测试日志 -vdl_create_scratch_log -visualdl --logdir=scratch_log --port=8080 +请注意:除`logdir`外,其他参数均为不定参数,传递时请指明参数名。 + +接口参数具体如下: + +| 参数 | 格式 | 含义 | +| ------------- | ------------------------------------------------ | ------------------------------------------------------------ | +| logdir | string或list[string_1, string_2, ... , string_n] | 日志文件所在的路径,VisualDL将在此路径下递归搜索日志文件并进行可视化,可指定单个或多个路径 | +| model | string | 模型文件路径(非文件夹路径),VisualDL将在此路径指定的模型文件进行可视化 | +| host | string | 指定启动服务的ip,默认为`127.0.0.1` | +| port | int | 启动服务端口,默认为`8040` | +| cache_timeout | int | 后端缓存时间,在缓存时间内前端多次请求同一url,返回的数据从缓存中获取,默认为20秒 | +| language | string | VisualDL面板语言,可指定为'en'或'zh',默认为浏览器使用语言 | +| public_path | string | VisualDL面板URL路径,默认是'/app',即访问地址为'http://:/app' | +| api_only | boolean | 是否只提供API,如果设置此参数,则VisualDL不提供页面展示,只提供API服务,此时API地址为'http://://api';若没有设置public_path参数,则默认为http://:/api' | +| open_browser | boolean | 是否打开浏览器,设置为True则在启动后自动打开浏览器并访问VisualDL面板,若设置api_only,则忽略此参数 | + +针对上一步生成的日志,我们的启动脚本为: -# 访问 http://127.0.0.1:8080 +```python +from visualdl.server import app + +app.run(logdir="./log") ``` -如果在虚拟环境下仍然遇到安装问题,请尝试以下方法。 +在使用任意一种方式启动VisualDL面板后,打开浏览器访问VisualDL面板,即可查看日志的可视化结果,如图: +

+ +

-## 使用 Anaconda 安装 -Anaconda是一个用于科学计算的Python发行版,提供了包管理与环境管理的功能,可以很方便地解决多版本python并存、切换以及各种第三方包安装问题。 -请根据[Anaconda下载网站](https://www.anaconda.com/download) 的指示去下载和安装Anaconda. -下载Python 3.6版本的command-Line installer. +## 可视化功能概览 -创建conda环境名字为```vdl```或任何名字: -``` -conda create -n vdl pip python=2.7 # or python=3.3, etc. -``` +### Scalar -激活conda环境如下: -``` -source activate vdl -``` +以图表形式实时展示训练过程参数,如loss、accuracy。让用户通过观察单组或多组训练参数变化,了解训练过程,加速模型调优。具有两大特点: -现在再安装 VisualDL 和运行范例: +#### 动态展示 -``` -pip install --upgrade visualdl +在启动VisualDL后,LogReader将不断增量的读取日志中数据并供前端调用展示,因此能够在训练中同步观测指标变化,如下图: -# 运行一个例子,vdl_create_scratch_log 将创建测试日志 -vdl_create_scratch_log -visualdl --logdir=scratch_log --port=8080 +

+ +

-# 访问 http://127.0.0.1:8080 -``` -如果仍然遇到安装问题,请尝试以下用源代码安装方法。 -### 使用代码安装 -``` -#建議是在虚拟环境或anaconda下。 -git clone https://github.com/PaddlePaddle/VisualDL.git -cd VisualDL +#### 多实验对比 -python setup.py bdist_wheel -pip install --upgrade dist/visualdl-*.whl -``` +只需在启动VisualDL时将每个实验日志所在路径同时传入即可,每个实验中相同tag的指标将绘制在一张图中同步呈现,如下图: -如果打包和安装遇到其他问题,不安装只想运行Visual DL可以看[这里](https://github.com/PaddlePaddle/VisualDL/blob/develop/docs/develop/how_to_dev_frontend_cn.md) +

+ +

-## SDK -VisualDL 同时提供了python SDK 和 C++ SDK 来实现不同方式的使用。 -### Python SDK -VisualDL 现在支持 Python 2和 Python 3。 +### Image -以最简单的Scalar组件为例,尝试创建一个scalar组件并插入多个时间步的数据: +实时展示训练过程中的图像数据,用于观察不同训练阶段的图像变化,进而深入了解训练过程及效果。 -```python -import random -from visualdl import LogWriter +

+ +

-logdir = "./tmp" -logger = LogWriter(logdir, sync_cycle=10000) -# mark the components with 'train' label. -with logger.mode("train"): - # create a scalar component called 'scalars/scalar0' - scalar0 = logger.scalar("scalars/scalar0") -# add some records during DL model running. -for step in range(100): - scalar0.add_record(step, random.random()) -``` +### Audio -### C++ SDK -上面 Python SDK 中代码完全一致的C++ SDK用法如下 -```c++ -#include -#include -#include "visualdl/sdk.h" +实时查看训练过程中的音频数据,监控语音识别与合成等任务的训练过程。 -namespace vs = visualdl; -namespace cp = visualdl::components; +

+ +

-int main() { - const std::string dir = "./tmp"; - vs::LogWriter logger(dir, 10000); - logger.SetMode("train"); - auto tablet = logger.AddTablet("scalars/scalar0"); - cp::Scalar scalar0(tablet); +### Graph - for (int step = 0; step < 1000; step++) { - float v = (float)std::rand() / RAND_MAX; - scalar0.AddRecord(step, v); - } +一键可视化模型的网络结构。可查看模型属性、节点信息、节点输入输出等,并支持节点搜索,辅助用户快速分析模型结构与了解数据流向。 - return 0; -} -``` -## 启动Board -当训练过程中已经产生了日志数据,就可以启动board进行实时预览可视化信息 +

+ +

-``` -visualdl --logdir -``` -board 还支持一下参数来实现远程的访问: -- `--host` 设定IP -- `--port` 设定端口 -- `-m / --model_pb` 指定 ONNX 格式的模型文件 +### Histogram + +以直方图形式展示Tensor(weight、bias、gradient等)数据在训练过程中的变化趋势。深入了解模型各层效果,帮助开发者精准调整模型结构。 + +- Offset模式 + +

+ +

-### 贡献 -VisualDL 是由 [PaddlePaddle](http://www.paddlepaddle.org/) 和 -[ECharts](http://echarts.baidu.com/) 合作推出的开源项目。我们欢迎所有人使用,提意见以及贡献代码。 + +- Overlay模式 + +

+ +

+ + + +### PR Curve + +精度-召回率曲线,帮助开发者权衡模型精度和召回率之间的平衡,设定最佳阈值。 + +

+ +

+ + +### High Dimensional + +将高维数据进行降维展示,目前支持T-SNE、PCA两种降维方式,用于深入分析高维数据间的关系,方便用户根据数据特征进行算法优化。 + +

+ +

+ +## 开源贡献 + +VisualDL 是由 [PaddlePaddle](https://www.paddlepaddle.org/) 和 [ECharts](https://echarts.apache.org/) 合作推出的开源项目。 +Graph 相关功能由 [Netron](https://github.com/lutzroeder/netron) 提供技术支持。 +欢迎所有人使用,提意见以及贡献代码。 + ## 更多细节 -想了解更多关于VisualDL的使用介绍,请查看[文档](https://github.com/PaddlePaddle/VisualDL/tree/develop/demo) +想了解更多关于VisualDL可视化功能的使用详情介绍,请查看**VisualDL使用指南**。 + +## 技术交流 + +欢迎您加入VisualDL官方QQ群:1045783368 与飞桨团队以及其他用户共同针对VisualDL进行讨论与交流。 diff --git a/doc/fluid/advanced_guide/evaluation_debugging/debug/visualdl_usage.md b/doc/fluid/advanced_guide/evaluation_debugging/debug/visualdl_usage.md index dc1778d82d3c4780630525ca33518f891a2f0b2f..e6a6445e3d4a89501f236bba6cf5623304ab3024 100644 --- a/doc/fluid/advanced_guide/evaluation_debugging/debug/visualdl_usage.md +++ b/doc/fluid/advanced_guide/evaluation_debugging/debug/visualdl_usage.md @@ -1,622 +1,774 @@ # VisualDL 使用指南 -## 概述 +### 概述 + VisualDL 是一个面向深度学习任务设计的可视化工具。VisualDL 利用了丰富的图表来展示数据,用户可以更直观、清晰地查看数据的特征与变化趋势,有助于分析数据、及时发现错误,进而改进神经网络模型的设计。 -目前,VisualDL 支持 scalar, histogram, image, text, audio, high dimensional, graph 这七个组件: +目前,VisualDL 支持 scalar, image, audio, graph, histogram, pr curve, high dimensional 七个组件,项目正处于高速迭代中,敬请期待新组件的加入。 -|组件名称|展示图表|作用| -|:----:|:---:|:---| -|scalar|折线图|动态展示损失函数值、准确率等标量数据| -|histogram|直方图|动态展示参数矩阵的数值分布与变化趋势,便于查看权重矩阵、偏置项、梯度等参数的变化| -|image|图片|显示图片,可显示输入图片和处理后的结果,便于查看中间过程的变化| -|text|文本|展示文本,有助于 NLP 等领域的用户进行数据分析和结果判断| -|audio|音频|可直接播放音频,也支持下载,有助于语音识别等领域的用户进行数据分析和结果判断| -|high dimensional|坐标|将高维数据映射到 2D/3D 空间来可视化嵌入,便于观察不同数据的相关性| -|graph|有向图|展示神经网络的模型结构| +| 组件名称 | 展示图表 | 作用 | +| :-------------------------------------------------: | :--------: | :----------------------------------------------------------- | +| [ Scalar](#Scalar--标量组件) | 折线图 | 动态展示损失函数值、准确率等标量数据 | +| [Image](#Image--图片可视化组件) | 图片可视化 | 显示图片,可显示输入图片和处理后的结果,便于查看中间过程的变化 | +| [Audio](#Audio--音频播放组件) | 音频播放 | 播放训练过程中的音频数据,监控语音识别与合成等任务的训练过程 | +| [Graph](#Graph--网络结构组件) | 网络结构 | 展示网络结构、节点属性及数据流向,辅助学习、优化网络结构 | +| [Histogram](#Histogram--直方图组件) | 直方图 | 展示训练过程中权重、梯度等张量的分布 | +| [PR Curve](#PR-Curve--PR曲线组件) | 折线图 | 权衡精度与召回率之间的平衡关系,便于选择最佳阈值 | +| [High Dimensional](#High-Dimensional--数据降维组件) | 数据降维 | 将高维数据映射到 2D/3D 空间来可视化嵌入,便于观察不同数据的相关性 | -## 动态添加数据组件 +## Scalar -- 折线图组件 -要想使用 VisualDL 的 scalar, histogram, image, text, audio, high dimensional 这六个组件来添加数据,都必须先初始化记录器 `LogWriter`,以设置数据在本地磁盘的保存路径以及同步周期。此后各个组件的输入数据会先保存到本地磁盘,进而才能加载到前端网页中展示。 +### 介绍 -### LogWriter -- 记录器 +Scalar 组件的输入数据类型为标量,该组件的作用是将训练参数以折线图形式呈现。将损失函数值、准确率等标量数据作为参数传入 scalar 组件,即可画出折线图,便于观察变化趋势。 -LogWriter 是一个数据记录器,在数据记录过程中,LogWriter 会周期性地将数据写入指定路径。 +### 记录接口 -LogWriter 的定义为: +Scalar 组件的记录接口如下: ```python -class LogWriter(dir, sync_cycle) +add_scalar(tag, value, step, walltime=None) ``` -> :param dir : 指定日志文件的保存路径。 -> :param sync_cycle : 同步周期。经过 sync_cycle 次添加数据的操作,就执行一次将数据从内存写入磁盘的操作。 -> :return: 函数返回一个 LogWriter 对象。 - -例1 创建一个 LogWriter 对象 - -```python -# 创建一个 LogWriter 对象 log_writer -log_writer = LogWriter("./log", sync_cycle=10) -``` +接口参数说明如下: -LogWriter类的成员函数包括: +| 参数 | 格式 | 含义 | +| -------- | ------ | ------------------------------------------- | +| tag | string | 记录指标的标志,如`train/loss`,不能含有`%` | +| value | float | 要记录的数据值 | +| step | int | 记录的步数 | +| walltime | int | 记录数据的时间戳,默认为当前时间戳 | -* `mode()`; -* `scalar()`, `histogram()`, `image()`, `text()`, `audio()`, `embedding()`; +### Demo -成员函数 `mode()` 用于指定模式。模式的名称是自定义的,比如训练`train`,验证`validation`,测试`test`,第一层卷积`conv_layer1`。 有着相同模式名称的组件作为一个整体,用户可在前端网页中的 `Runs` 按钮中选择显示哪个模式的数据(默认是显示全部模式)。 +- 基础使用 -成员函数 `scalar()`, `histogram()`, `image()`, `text()`, `audio()`, `embedding()` 用于创建组件。 - -例2 LogWriter 创建组件 +下面展示了使用 Scalar 组件记录数据的示例,代码文件请见[Scalar组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/scalar_test.py) ```python -# 设定模式为 train,创建一个 scalar 组件 -with log_writer.mode("train") as logger: - train_scalar = logger.scalar("acc") -# 设定模式为test,创建一个 image 组件 -with log_writer.mode("test") as shower: - test_image = shower.image("conv_image", 10, 1) +from visualdl import LogWriter + +if __name__ == '__main__': + value = [i/1000.0 for i in range(1000)] + # 初始化一个记录器 + with LogWriter(logdir="./log/scalar_test/train") as writer: + for step in range(1000): + # 向记录器添加一个tag为`acc`的数据 + writer.add_scalar(tag="acc", step=step, value=value[step]) + # 向记录器添加一个tag为`loss`的数据 + writer.add_scalar(tag="loss", step=step, value=1/(value[step] + 1)) ``` -### scalar -- 折线图组件 +运行上述程序后,在命令行执行 -scalar 组件的输入数据类型为标量,该组件的作用是画折线图。将损失函数值、准确率等标量数据作为参数传入 scalar 组件,即可画出折线图,便于观察变化趋势。 +```shell +visualdl --logdir ./log --port 8080 +``` -想通过 scalar 组件画折线图,只需先设定 LogWriter 对象的成员函数 `scalar()`,即可使用 `add_record()` 函数添加数据。这两个函数的具体用法如下: +接着在浏览器打开`http://127.0.0.1:8080`,即可查看以下折线图。 -* LogWriter 对象的成员函数 `scalar()`: +

+ +

-```python -def scalar(tag, type) -``` -> :param tag : 标签,tag 相同的折线在同一子框,否则不同,tag 的名称中不能有 % 这个字符。 -> :param type : 数据类型,可选“float”, "double", "int",默认值为 "float"。 -> :return: 函数返回一个 ScalarWriter 对象。 -* scalar 组件的成员函数 `add_record()`: +- 多组实验对比 -```python -def add_record(step, value) -``` +下面展示了使用Scalar组件实现多组实验对比 -> :param step : 步进数,标记这是第几个添加的数据。 -> :param value : 输入数据。 +多组实验对比的实现分为两步: -例3 scalar 组件示例程序 [Github](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/component/scalar-demo.py) +1. 创建子日志文件储存每组实验的参数数据 +2. 将数据写入scalar组件时,**使用相同的tag**,即可实现对比**不同实验**的**同一类型参数** ```python -# coding=utf-8 from visualdl import LogWriter -# 创建 LogWriter 对象 -log_writer = LogWriter("./log", sync_cycle=20) +if __name__ == '__main__': + value = [i/1000.0 for i in range(1000)] + # 步骤一:创建父文件夹:log与子文件夹:scalar_test + with LogWriter(logdir="./log/scalar_test") as writer: + for step in range(1000): + # 步骤二:向记录器添加一个tag为`train/acc`的数据 + writer.add_scalar(tag="train/acc", step=step, value=value[step]) + # 步骤二:向记录器添加一个tag为`train/loss`的数据 + writer.add_scalar(tag="train/loss", step=step, value=1/(value[step] + 1)) + # 步骤一:创建第二个子文件夹scalar_test2 + value = [i/500.0 for i in range(1000)] + with LogWriter(logdir="./log/scalar_test2") as writer: + for step in range(1000): + # 步骤二:在同样名为`train/acc`下添加scalar_test2的accuracy的数据 + writer.add_scalar(tag="train/acc", step=step, value=value[step]) + # 步骤二:在同样名为`train/loss`下添加scalar_test2的loss的数据 + writer.add_scalar(tag="train/loss", step=step, value=1/(value[step] + 1)) +``` + +运行上述程序后,在命令行执行 -# 创建 scalar 组件,模式为 train -with log_writer.mode("train") as logger: - train_acc = logger.scalar("acc") - train_loss = logger.scalar("loss") +```shell +visualdl --logdir ./log --port 8080 +``` -# 创建 scalar 组件,模式设为 test, tag 设为 acc -with log_writer.mode("test") as logger: - test_acc = logger.scalar("acc") +接着在浏览器打开`http://127.0.0.1:8080`,即可查看以下折线图,对比「scalar_test」和「scalar_test2」的Accuracy和Loss。 -value = [i/1000.0 for i in range(1000)] -for step in range(1000): - # 向名称为 acc 的图中添加模式为train的数据 - train_acc.add_record(step, value[step]) +

+ +

- # 向名称为 loss 的图中添加模式为train的数据 - train_loss.add_record(step, 1 / (value[step] + 1)) - # 向名称为 acc 的图中添加模式为test的数据 - test_acc.add_record(step, 1 - value[step]) -``` +*多组实验对比的应用案例可参考AI Studio项目:[VisualDL 2.0--眼疾识别训练可视化](https://aistudio.baidu.com/aistudio/projectdetail/502834) -运行上述程序后,在命令行中执行 -```shell -visualdl --logdir ./log --host 0.0.0.0 --port 8080 -``` +### 功能操作说明 -接着在浏览器打开 [http://0.0.0.0:8080](http://0.0.0.0:8080),即可查看以下折线图。 +* 支持数据卡片「最大化」、「还原」、「坐标系转化」(y轴对数坐标)、「下载」折线图

-
-图1. scalar 组件展示折线图
+

-VisualDL 页面的右边侧栏有各个组件的调节选项,以 scalar 组件为例: -* Smoothing : 用于调节曲线的平滑度。 -* X-axis : 折线图的横坐标参数,可选 `Step`, `Relative`, `Wall Time`,分别表示横轴设为步进数、相对值、数据采集的时间。 -* Tooltip sorting : 标签排序方法,可选 `default`, `descending`, `ascending`, `nearest`,分别表示默认排序、按名称降序、按名称升序、按最新更新时间排序。 -VisualDL 页面的右边侧栏的最下方还有一个 `RUNNING` 按钮,此时前端定期从后端同步数据,刷新页面。点击可切换为红色的 `STOPPED`,暂停前端的数据更新。 -### histogram -- 直方图组件 -histogram 组件的作用是以直方图的形式显示输入数据的分布。在训练过程中,把一些参数(例如权重矩阵 w,偏置项 b,梯度)传给 histogram 组件,就可以查看参数分布在训练过程中的变化趋势。 +* 数据点Hover展示详细信息 -想通过 histogram 组件画参数直方图,只需先设定 LogWriter 对象的成员函数 `histogram()`,即可使用 `add_record()` 函数添加数据。这两个函数的具体用法如下: +

+ +

-* LogWriter 对象的成员函数 `histogram()`: -```python -def histogram(tag, num_buckets, type) -``` -> :param tag : 标签,结合 LogWriter 指定的模式,决定输入参数显示的子框。 -> :param num_buckets : 直方图的柱子数量。 -> :param type : 数据类型,可选“float”, "double", "int",默认值为 "float"。 -> :return: 函数返回一个 HistogramWriter 对象。 -* histogram 组件的成员函数 `add_record()`: -```python -def add_record(step, data) -``` +* 可搜索卡片标签,展示目标图像 -> :param step : 步进数,标记这是第几组添加的数据。 -> :param data : 输入参数, 数据类型为 list[]。 +

+ +

-例4 histogram 组件示例程序 [Github](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/component/histogram-demo.py) -```python -# coding=utf-8 -import numpy as np -from visualdl import LogWriter -# 创建 LogWriter 对象 -log_writer = LogWriter('./log', sync_cycle=10) -# 创建 histogram 组件,模式为train -with log_writer.mode("train") as logger: - param1_histogram = logger.histogram("param1", num_buckets=100) -# 设定步数为 1 - 100 -for step in range(1, 101): - # 添加的数据为随机分布,所在区间值变小 - interval_start = 1 + 2 * step/100.0 - interval_end = 6 - 2 * step/100.0 - data = np.random.uniform(interval_start, interval_end, size=(10000)) +* 可搜索打点数据标签,展示特定数据 + +

+ +

- # 使用 add_record() 函数添加数据 - param1_histogram.add_record(step, data) -``` -运行上述程序后,在命令行中执行 -```shell -visualdl --logdir ./log --host 0.0.0.0 --port 8080 -``` -接着在浏览器打开[http://0.0.0.0:8080](http://0.0.0.0:8080),即可查看 histogram 组件的直方图。其中横坐标为参数的数值,曲线上的值为相应参数的个数。右边纵轴的值为 Step,不同 Step 的数据组用不同颜色加以区分。 +* X轴有三种衡量尺度 + +1. Step:迭代次数 +2. Walltime:训练绝对时间 +3. Relative:训练时长

-
-图2. histogram 组件展示直方图
+

-### image -- 图片可视化组件 -image 组件用于显示图片。在程序运行过程中,将图片数据传入 image 组件,就可在 VisualDL 的前端网页看到相应图片。 -使用 image 组件添加数据,需要先设定 LogWriter 对象的成员函数 `image()`,即可结合 `start_sampling()`, `is_sample_taken()`, `set_sample()` 和 `finish_sample()` 这四个 image 组件的成员函数来完成。这几个函数的定义及用法如下: +* 可调整曲线平滑度,以便更好的展现参数整体的变化趋势 -* LogWriter 对象的成员函数 `image()`: +

+ +

-```python -def image(tag, num_samples, step_cycle) -``` -> :param tag : 标签,结合 set_sample() 的参数 index,决定图片显示的子框。 -> :param num_samples : 设置单个 step 的采样数,页面上的图片数目也等于 num_samples。 -> :param step_cycle : 将 step_cycle 个 step 的数据存储到日志中,默认值为 1。 -> :return: 函数返回一个 ImageWriter 对象。 -* 开始新的采样周期 - 开辟一块内存空间,用于存放采样的数据: -```python -def start_sampling() -``` +## Image -- 图片可视化组件 -* 判断该图片是否应被采样,当返回值为 `-1`,表示不用采样,否则,应被采样: +### 介绍 -```python -def is_sample_taken() -``` +Image 组件用于显示图片数据随训练的变化。在模型训练过程中,将图片数据传入 Image 组件,就可在 VisualDL 的前端网页查看相应图片。 -* 使用函数 `set_sample()` 添加图片数据: +### 记录接口 + +Image 组件的记录接口如下: ```python -def set_sample(index, image_shape, image_data) +add_image(tag, img, step, walltime=None) ``` -> :param index : 索引号,与 tag 组合使用,决定图片显示的子框。 -> :param image_shape : 图片的形状,[weight, height, 通道数(RGB 为 3,灰度图为 1)]。 -> :param image_data : 图片的数据格式为矩阵,通常为 numpy.ndarray,经 flatten() 后变为行向量。 +接口参数说明如下: -* 结束当前的采样周期,将已采样的数据存到磁盘,并释放这一块内存空间: +| 参数 | 格式 | 含义 | +| -------- | ------------- | ------------------------------------------- | +| tag | string | 记录指标的标志,如`train/loss`,不能含有`%` | +| img | numpy.ndarray | 以ndarray格式表示的图片 | +| step | int | 记录的步数 | +| walltime | int | 记录数据的时间戳,默认为当前时间戳 | -```python -def finish_sample() -``` +### Demo + +下面展示了使用 Image 组件记录数据的示例,代码文件请见[Image组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/image_test.py) -例5 image 组件示例程序 [Github](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/component/image-demo.py) ```python -# coding=utf-8 import numpy as np -from visualdl import LogWriter from PIL import Image +from visualdl import LogWriter def random_crop(img): - ''' - 此函数用于获取图片数据 img 的 100*100 的随机分块 - ''' + """获取图片的随机 100x100 分片 + """ img = Image.open(img) w, h = img.size random_w = np.random.randint(0, w - 100) random_h = np.random.randint(0, h - 100) - return img.crop((random_w, random_h, random_w + 100, random_h + 100)) - - -# 创建 LogWriter 对象 -log_writer = LogWriter("./log", sync_cycle=10) - -# 创建 image 组件,模式为train, 采样数设为 ns -ns = 2 -with log_writer.mode("train") as logger: - input_image = logger.image(tag="test", num_samples=ns) - -# 一般要设置一个变量 sample_num,用于记录当前已采样了几个 image 数据 -sample_num = 0 - -for step in range(6): - # 设置start_sampling() 的条件,满足条件时,开始采样 - if sample_num == 0: - input_image.start_sampling() - - # 获取idx - idx = input_image.is_sample_taken() - # 如果 idx != -1,采样,否则跳过 - if idx != -1: - # 获取图片数据 - image_path = "test.jpg" - image_data = np.array(random_crop(image_path)) - # 使用 set_sample() 函数添加数据 - # flatten() 用于把 ndarray 由矩阵变为行向量 - input_image.set_sample(idx, image_data.shape, image_data.flatten()) - sample_num += 1 - - # 如果完成了当前轮的采样,则调用finish_sample() - if sample_num % ns == 0: - input_image.finish_sampling() - sample_num = 0 + r = img.crop((random_w, random_h, random_w + 100, random_h + 100)) + return np.asarray(r) + + +if __name__ == '__main__': + # 初始化一个记录器 + with LogWriter(logdir="./log/image_test/train") as writer: + for step in range(6): + # 添加一个图片数据 + writer.add_image(tag="eye", + img=random_crop("../../docs/images/eye.jpg"), + step=step) ``` -运行上述程序后,在命令行中执行 +运行上述程序后,在命令行执行 + ```shell -visualdl --logdir ./log --host 0.0.0.0 --port 8080 +visualdl --logdir ./log --port 8080 ``` -接着在浏览器打开 [http://0.0.0.0:8080](http://0.0.0.0:8080),点击页面最上方的`SAMPLES`选项,即可查看 image 组件的展示图片。每一张子图都有一条浅绿色的横轴,拖动即可展示不同 step 的图片。 +在浏览器输入`http://127.0.0.1:8080`,即可查看图片数据。

-
-图3. image 组件展示图片
+

-### text -- 文本组件 -text 组件用于显示文本,在程序运行过程中,将文本数据传入 text 组件,即可在 VisualDL 的前端网页中查看。 - -想要通过 text 组件添加数据,只需先设定 LogWriter 对象的成员函数 `text()`,即可使用 `add_record()` 函数来完成。这两个函数的具体用法如下: -* LogWriter 对象的成员函数 `text()`: -```python -def text(tag) -``` -> :param tag : 标签,结合 LogWriter 设定的模式,决定文本显示的子框。 -> :return: 函数返回一个 TextWriter 对象。 - -* text 组件的成员函数 `add_record()`: - -```python -def add_record(step, str) -``` - -> :param step : 步进数,标记这是第几组添加的数据。 -> :param str : 输入文本,数据类型为 string。 - -例6 text 组件示例程序 [Github](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/component/text-demo.py) - -```python -# coding=utf-8 -from visualdl import LogWriter +### 功能操作说明 -# 创建 LogWriter 对象 -log_writter = LogWriter("./log", sync_cycle=10) +可搜索图片标签显示对应图片数据 -# 创建 text 组件,模式为 train, 标签为 test -with log_writter.mode("train") as logger: - vdl_text_comp = logger.text(tag="test") +

+ +

-# 使用 add_record() 函数添加数据 -for i in range(1, 6): - vdl_text_comp.add_record(i, "这是第 %d 个 step 的数据。" % i) - vdl_text_comp.add_record(i, "This is data %d ." % i) -``` -运行上述程序后,在命令行中执行 -```shell -visualdl --logdir ./log --host 0.0.0.0 --port 8080 -``` -接着在浏览器打开 [http://0.0.0.0:8080](http://0.0.0.0:8080),点击页面最上方的 `SAMPLES` 选项,即可查看 text 组件的展示文本。每一张小框都有一条浅绿色的横轴,拖动即可显示不同 step 的文本。 +支持滑动Step/迭代次数查看不同迭代次数下的图片数据

-
-图4. text 组件展示文本
+

-### audio -- 音频播放组件 - audio 为音频播放组件,在程序运行过程中,将音频数据传入 audio 组件,就可以在 VisualDL 的前端网页中直接播放或下载。 -使用 audio 组件添加数据,需要先设定 LogWriter 对象的成员函数 `audio()`,即可结合 `start_sampling()`, `is_sample_taken()`, `set_sample()` 和 `finish_sample()` 这四个 audio 组件的成员函数来完成。这几个函数的定义和用法如下: -* LogWriter 对象的成员函数 `audio()`: -```python -def audio(tag, num_samples, step_cycle) -``` +## Audio--音频播放组件 -> :param tag : 标签,结合 set_sample() 的参数 index,决定音频播放的子框。 -> :param num_samples : 设置单个 step 的采样数,页面上的音频数目也等于 num_samples。 -> :param step_cycle : 将 step_cycle 个 step 的数据存储到日志中,默认值为 1。 -> :return: 函数返回一个 AudioWriter 对象。 +### 介绍 -* 开始新的采样周期 - 开辟一块内存空间,用于存放采样的数据: +Audio组件实时查看训练过程中的音频数据,监控语音识别与合成等任务的训练过程。 -```python -def start_sampling() -``` +### 记录接口 -* 判断该音频是否应被采样,当返回值为 `-1`,表示不用采样,否则,应被采样: +Audio 组件的记录接口如下: ```python -def is_sample_taken() +add_audio(tag, audio_array, step, sample_rate) ``` -* 使用函数 `set_sample()` 添加音频数据: +接口参数说明如下: -```python -def set_sample(index, audio_params, audio_data) -``` +| 参数 | 格式 | 含义 | +| ----------- | ------------- | ------------------------------------------ | +| tag | string | 记录指标的标志,如`audio_tag`,不能含有`%` | +| audio_arry | numpy.ndarray | 以ndarray格式表示的音频 | +| step | int | 记录的步数 | +| sample_rate | int | 采样率,**注意正确填写对应音频的原采样率** | -> :param index : 索引号,结合 tag,决定音频播放的子框。 -> :param audio_params : 音频的参数 [sample rate, sample width, channel],其中 sample rate 为采样率, sample width 为每一帧采样的字节数, channel 为通道数(单声道设为1,双声道设为2,四声道设为4,以此类推)。 -> :param audio_data :音频数据,音频数据的格式一般为 numpy.ndarray,经 flatten() 后变为行向量。 +### Demo -* 结束当前的采样周期,将已采样的数据存到磁盘,并释放这一块内存空间: +下面展示了使用 Audio 组件记录数据的示例,代码文件请见[Audio组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/audio_test.py) ```python -def finish_sample() -``` - -例7 audio 组件示例程序 [Github](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/component/audio-demo.py) -```python -# coding=utf-8 +from visualdl import LogWriter import numpy as np import wave -from visualdl import LogWriter def read_audio_data(audio_path): """ - 读取音频数据 + Get audio data. """ CHUNK = 4096 f = wave.open(audio_path, "rb") wavdata = [] chunk = f.readframes(CHUNK) - while chunk: - data = np.fromstring(chunk, dtype='uint8') + data = np.frombuffer(chunk, dtype='uint8') wavdata.extend(data) chunk = f.readframes(CHUNK) - # 8k sample rate, 16bit frame, 1 channel shape = [8000, 2, 1] - return shape, wavdata -# 创建一个 LogWriter 对象 -log_writter = LogWriter("./log", sync_cycle=10) - -# 创建 audio 组件,模式为 train -ns = 2 -with log_writter.mode("train") as logger: - input_audio = logger.audio(tag="test", num_samples=ns) - -# 一般要设定一个变量 audio_sample_num,用来记录当前已采样了几段 audio 数据 -audio_sample_num = 0 - -for step in range(9): - # 设置 start_sampling() 的条件,满足条件时,开始采样 - if audio_sample_num == 0: - input_audio.start_sampling() - - # 获取 idx - idx = input_audio.is_sample_taken() - # 如果 idx != -1,采样,否则跳过 - if idx != -1: - # 读取数据,音频文件的格式可以为 .wav .mp3 等 - audio_path = "test.wav" - audio_shape, audio_data = read_audio_data(audio_path) - # 使用 set_sample()函数添加数据 - input_audio.set_sample(idx, audio_shape, audio_data) - audio_sample_num += 1 - - # 如果完成了当前轮的采样,则调用 finish_sample() - if audio_sample_num % ns ==0: - input_audio.finish_sampling() - audio_sample_num = 0 +if __name__ == '__main__': + with LogWriter(logdir="./log") as writer: + audio_shape, audio_data = read_audio_data("./testing.wav") + audio_data = np.array(audio_data) + writer.add_audio(tag="audio_tag", + audio_array=audio_data, + step=0, + sample_rate=8000) ``` -运行上述程序后,在命令行中执行 +运行上述程序后,在命令行执行 ```shell -visualdl --logdir ./log --host 0.0.0.0 --port 8080 +visualdl --logdir ./log --port 8080 ``` -接着在浏览器打开[http://0.0.0.0:8080](http://0.0.0.0:8080),点击页面最上方的 `SAMPLES` 选项,即有音频的小框,可以播放和下载。每一张小框中都有一条浅绿色的横轴,拖动即可选择不同 step 的音频段。 +在浏览器输入`http://127.0.0.1:8080`,即可查看音频数据。 + +

+ +

+ + + +### 功能操作说明 + +- 可搜索音频标签显示对应音频数据 + +

+ +

+ + + +- 支持滑动Step/迭代次数试听不同迭代次数下的音频数据 + +

+ +

+ + + +- 支持播放/暂停音频数据 + +

+ +

+ + + +- 支持音量调节

-
-图5. audio 组件播放音频
+

-### high dimensional -- 数据降维组件 -high dimensional 组件的作用就是将数据映射到 2D/3D 空间来做可视化嵌入,这有利于了解不同数据的相关性。high dimensional 组件支持以下两种降维算法: -* PCA : Principle Component Analysis 主成分分析 -* [t-SNE](https://lvdmaaten.github.io/tsne/) : t-distributed stochastic neighbor embedding t-分布式随机领域嵌入 +- 支持音频下载 + +

+ +

+ + + + +## Graph--网络结构组件 + +### 介绍 + +Graph组件一键可视化模型的网络结构。用于查看模型属性、节点信息、节点输入输出等,并进行节点搜索,协助开发者们快速分析模型结构与了解数据流向。 + +### Demo + +共有两种启动方式: + +- 前端模型文件拖拽上传: + + - 如只需使用Graph组件,则无需添加任何参数,在命令行执行`visualdl`后即可启动面板进行上传。 + - 如果同时需使用其他功能,在命令行指定日志文件路径(以`./log`为例)即可启动面板进行上传: + + ```shell + visualdl --logdir ./log --port 8080 + ``` + +

+ +

+ + + +- 后端启动Graph: + + - 在命令行加入参数`--model`并指定**模型文件**路径(非文件夹路径),即可启动并查看网络结构可视化: + + ```shell + visualdl --model ./log/model --port 8080 + ``` + +

+ +

-想使用 high dimensional 组件,只需先设定 LogWriter 对象的成员函数 `embedding()`,即可使用 `add_embeddings_with_word_dict()` 函数添加数据。这两个函数的定义及用法如下: -* LogWriter 对象的成员函数 `embedding()` 不需输入参数,函数返回一个 embeddingWriter 对象: + +### 功能操作说明 + +- 一键上传模型 + - 支持模型格式:PaddlePaddle、ONNX、Keras、Core ML、Caffe、Caffe2、Darknet、MXNet、ncnn、TensorFlow Lite + - 实验性支持模型格式:TorchScript、PyTorch、Torch、 ArmNN、BigDL、Chainer、CNTK、Deeplearning4j、MediaPipe、ML.NET、MNN、OpenVINO、Scikit-learn、Tengine、TensorFlow.js、TensorFlow + +

+ +

+ + + +- 支持上下左右任意拖拽模型、放大和缩小模型 + +

+ +

+ + + +- 搜索定位到对应节点 + +

+ +

+ + + +- 点击查看模型属性 + +

+ +

+ + + +

+ +

+ + + +- 支持选择模型展示的信息 + +

+ +

+ + + +- 支持以PNG、SVG格式导出模型结构图 + +

+ +

+ + + +- 点击节点即可展示对应属性信息 + +

+ +

+ + + +- 支持一键更换模型 + +

+ +

+ + + +## Histogram--直方图组件 + +### 介绍 + +Histogram组件以直方图形式展示Tensor(weight、bias、gradient等)数据在训练过程中的变化趋势。深入了解模型各层效果,帮助开发者精准调整模型结构。 + +### 记录接口 + +Histogram 组件的记录接口如下: ```python -def embedding() +add_histogram(tag, values, step, walltime=None, buckets=10) ``` -* high dimensional 的成员函数 `add_embeddings_with_word_dict()`: +接口参数说明如下: + +| 参数 | 格式 | 含义 | +| -------- | --------------------- | ------------------------------------------- | +| tag | string | 记录指标的标志,如`train/loss`,不能含有`%` | +| values | numpy.ndarray or list | 以ndarray或list格式表示的数据 | +| step | int | 记录的步数 | +| walltime | int | 记录数据的时间戳,默认为当前时间戳 | +| buckets | int | 生成直方图的分段数,默认为10 | + +### Demo + +下面展示了使用 Histogram组件记录数据的示例,代码文件请见[Histogram组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/histogram_test.py) ```python -def add_embeddings_with_word_dict(data, Dict) +from visualdl import LogWriter +import numpy as np + + +if __name__ == '__main__': + values = np.arange(0, 1000) + with LogWriter(logdir="./log/histogram_test/train") as writer: + for index in range(1, 101): + interval_start = 1 + 2 * index / 100.0 + interval_end = 6 - 2 * index / 100.0 + data = np.random.uniform(interval_start, interval_end, size=(10000)) + writer.add_histogram(tag='default tag', + values=data, + step=index, + buckets=10) +``` + +运行上述程序后,在命令行执行 + +```shell +visualdl --logdir ./log --port 8080 ``` -> :param data : 输入数据,数据类型为 List[List(float)]。 -> :param Dict : 字典, 数据类型为 Dict[str, int]。 +在浏览器输入`http://127.0.0.1:8080`,即可查看训练参数直方图。 -例8 high dimensional 组件示例程序 [Github](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/component/embedding-demo.py) +### 功能操作说明 + +- 支持数据卡片「最大化」、直方图「下载」 + +

+ +

+ +- 可选择Offset或Overlay模式 + +

+ +

+ + + - Offset模式 + +

+ +

+ + + + - Overlay模式 + +

+ +

+ + +- 数据点Hover展示参数值、训练步数、频次 + + - 在第240次训练步数时,权重为-0.0031,且出现的频次是2734次 + +

+ +

+ +- 可搜索卡片标签,展示目标直方图 + +

+ +

+ +- 可搜索打点数据标签,展示特定数据流 + +

+ +

+ +## PR Curve--PR曲线组件 + +### 介绍 + +PR Curve以折线图形式呈现精度与召回率的权衡分析,清晰直观了解模型训练效果,便于分析模型是否达到理想标准。 + +### 记录接口 + +PR Curve组件的记录接口如下: + +```python +add_pr_curve(tag, labels, predictions, step=None, num_thresholds=10) +``` + +接口参数说明如下: + +| 参数 | 格式 | 含义 | +| -------------- | --------------------- | ------------------------------------------- | +| tag | string | 记录指标的标志,如`train/loss`,不能含有`%` | +| labels | numpy.ndarray or list | 以ndarray或list格式表示的实际类别 | +| predictions | numpy.ndarray or list | 以ndarray或list格式表示的预测类别 | +| step | int | 记录的步数 | +| num_thresholds | int | 阈值设置的个数,默认为10,最大值为127 | + +### Demo + +下面展示了使用 PR Curve 组件记录数据的示例,代码文件请见[PR Curve组件](#https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/pr_curve_test.py) ```python -# coding=utf-8 -import numpy as np from visualdl import LogWriter +import numpy as np -# 创建一个 LogWriter 对象 -log_writer = LogWriter("./log", sync_cycle=10) - -# 创建一个 high dimensional 组件,模式设为 train -with log_writer.mode("train") as logger: - train_embedding = logger.embedding() - -# 第一个参数为数据,数据类型为 List[List(float)] -hot_vectors = np.random.uniform(1, 2, size=(10, 3)) -# 第二个参数为字典,数据类型为 Dict[str, int] -# 其中第一个分量为坐标点的名称, 第二个分量为该坐标对应原数据的第几行分量 -word_dict = { - "label_1": 5, - "label_2": 4, - "label_3": 3, - "label_4": 2, - "label_5": 1,} - -# 使用 add_embeddings_with_word_dict(data, Dict) -train_embedding.add_embeddings_with_word_dict(hot_vectors, word_dict) +with LogWriter("./log/pr_curve_test/train") as writer: + for step in range(3): + labels = np.random.randint(2, size=100) + predictions = np.random.rand(100) + writer.add_pr_curve(tag='pr_curve', + labels=labels, + predictions=predictions, + step=step, + num_thresholds=5) ``` -运行上述程序后,在命令行中执行 +运行上述程序后,在命令行执行 ```shell -visualdl --logdir ./log --host 0.0.0.0 --port 8080 +visualdl --logdir ./log --port 8080 ``` -接着在浏览器打开[http://0.0.0.0:8080](http://0.0.0.0:8080),点击页面最上方的 `HIGHDIMENSIONAL` 选项,即可查看数据映射后的相对位置。 +接着在浏览器打开`http://127.0.0.1:8080`,即可查看PR Curve

-
-图6. high dimensional 组件展示平面坐标
+

-

-
-图7. high dimensional 组件展示直角坐标
-

-## graph -- 神经网络可视化组件 -graph 组件用于神经网络模型结构的可视化,该组件可以展示 Paddle 格式和 [ONNX](https://onnx.ai) 格式保存的模型。graph 组件可帮助用户理解神经网络的模型结构,也有助于排查神经网络的配置错误。 +### 功能操作说明 -与其他需要记录数据的组件不同,使用 graph 组件的唯一要素就是指定模型文件的存放位置,即在 `visualdl` 命令中增加选项 `--model_pb` 来指定模型文件的存放路径,则可在前端看到相应效果。 +- 支持数据卡片「最大化」,「还原」、「下载」PR曲线 -例9 graph 组件示例程序(下面示例展示了如何用 Paddle 保存一个 Lenet-5 模型)[Github](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/component/graph-demo.py) +

+ +

+ +- 数据点Hover展示详细信息:阈值对应的TP、TN、FP、FN + +

+ +

+ +- 可搜索卡片标签,展示目标图表 + +

+ +

+ +- 可搜索打点数据标签,展示特定数据 + +

+ +

+ + +- 支持查看不同训练步数下的PR曲线 + +

+ +

+ +- X轴-时间显示类型有三种衡量尺度 + + - Step:迭代次数 + - Walltime:训练绝对时间 + - Relative:训练时长 + +

+ +

+ +## High Dimensional -- 数据降维组件 + +### 介绍 + +High Dimensional 组件将高维数据进行降维展示,用于深入分析高维数据间的关系。目前支持以下两种降维算法: + + - PCA : Principle Component Analysis 主成分分析 + - t-SNE : t-distributed stochastic neighbor embedding t-分布式随机领域嵌入 + +### 记录接口 + +High Dimensional 组件的记录接口如下: ```python -# coding=utf-8 -import paddle.fluid as fluid - - -def lenet_5(img): - ''' - 定义神经网络结构 - ''' - conv1 = fluid.nets.simple_img_conv_pool( - input=img, - filter_size=5, - num_filters=20, - pool_size=2, - pool_stride=2, - act="relu") - - conv1_bn = fluid.layers.batch_norm(input=conv1) - - conv2 = fluid.nets.simple_img_conv_pool( - input=conv1_bn, - filter_size=5, - num_filters=50, - pool_size=2, - pool_stride=2, - act="relu") - - predition = fluid.layers.fc(input=conv2, size=10, act="softmax") - return predition - - -# 变量赋值 -image = fluid.layers.data(name="img", shape=[1, 28, 28], dtype="float32") -predition = lenet_5(image) - -place = fluid.CPUPlace() -exe = fluid.Executor(place=place) -exe.run(fluid.default_startup_program()) - -# 使用函数 save_inference_model() 保存 paddle 模型 -fluid.io.save_inference_model( - "./paddle_lenet_5_model", - feeded_var_names=[image.name], - target_vars=[predition], - executor=exe) +add_embeddings(tag, labels, hot_vectors, walltime=None) ``` -运行上述程序后,在命令行中执行 +接口参数说明如下: + +| 参数 | 格式 | 含义 | +| ----------- | ------------------- | ---------------------------------------------------- | +| tag | string | 记录指标的标志,如`default`,不能含有`%` | +| labels | numpy.array 或 list | 一维数组表示的标签,每个元素是一个string类型的字符串 | +| hot_vectors | numpy.array or list | 与labels一一对应,每个元素可以看作是某个标签的特征 | +| walltime | int | 记录数据的时间戳,默认为当前时间戳 | + +### Demo + +下面展示了使用 High Dimensional 组件记录数据的示例,代码文件请见[High Dimensional组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/high_dimensional_test.py) + +```python +from visualdl import LogWriter + + +if __name__ == '__main__': + hot_vectors = [ + [1.3561076367500755, 1.3116267195134017, 1.6785401875616097], + [1.1039614644440658, 1.8891609992484688, 1.32030488587171], + [1.9924524852447711, 1.9358920727142739, 1.2124401279391606], + [1.4129542689796446, 1.7372166387197474, 1.7317806077076527], + [1.3913371800587777, 1.4684674577930312, 1.5214136352476377]] + + labels = ["label_1", "label_2", "label_3", "label_4", "label_5"] + # 初始化一个记录器 + with LogWriter(logdir="./log/high_dimensional_test/train") as writer: + # 将一组labels和对应的hot_vectors传入记录器进行记录 + writer.add_embeddings(tag='default', + labels=labels, + hot_vectors=hot_vectors) +``` + +运行上述程序后,在命令行执行 ```shell -visualdl --logdir ./log --host 0.0.0.0 --port 8080 --model_pb paddle_lenet_5_model +visualdl --logdir ./log --port 8080 ``` -接着在浏览器打开[http://0.0.0.0:8080](http://0.0.0.0:8080),点击页面最上方的`GRAPHS`选项,即可查看 Lenet-5 的模型结构。 +接着在浏览器打开`http://127.0.0.1:8080`,即可查看降维后的可视化数据。

-
-图8. graph 组件展示 Lenet-5 的模型结构
+

+ + + + + +# diff --git a/doc/fluid/advanced_guide/evaluation_debugging/evaluation/metrics.rst b/doc/fluid/advanced_guide/evaluation_debugging/evaluation/metrics.rst index 9995734289f4a65c59d72efe21b5e3dcc496820d..0f46dced718ddf34cdab23eae029254f904ae3e7 100644 --- a/doc/fluid/advanced_guide/evaluation_debugging/evaluation/metrics.rst +++ b/doc/fluid/advanced_guide/evaluation_debugging/evaluation/metrics.rst @@ -17,6 +17,7 @@ paddle.fluid.metrics模块提供了一系列常用的模型评价指标; 用户 不同类型的任务,会选用不同的评价指标。 回归问题通常会用RMSE(均方根误差)、MAE(平均绝对误差)、R-Square(R平方)等 + AUC(Area Under Cure)指标则常被用在分类任务(classification)上 目标检测任务(Object Detection)则经常会用到mAP(Mean Average Precision) diff --git a/doc/fluid/advanced_guide/flags/flags_cn.rst b/doc/fluid/advanced_guide/flags/flags_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5d0d414725666c1d90f5d58c26dc4536f08f439f --- /dev/null +++ b/doc/fluid/advanced_guide/flags/flags_cn.rst @@ -0,0 +1,28 @@ + +环境变量FLAGS +================== + +调用说明 +---------- + +PaddlePaddle中的环境变量FLAGS支持两种设置方式。 + +- 通过export来设置环境变量,如 :code:`export FLAGS_eager_delete_tensor_gb = 1.0` 。 + +- 通过API::code:`get_flag` 和 :code:`set_flags` 来打印和设置环境变量FLAGS。API使用详情请参考 :ref:`cn_api_fluid_get_flags` 与 :ref:`cn_api_fluid_set_flags` 。 + + +环境变量FLAGS功能分类 +---------------------- + +.. toctree:: + :maxdepth: 1 + + cudnn_cn.rst + data_cn.rst + debug_cn.rst + device_cn.rst + distributed_cn.rst + executor_cn.rst + memory_cn.rst + others_cn.rst diff --git a/doc/fluid/advanced_guide/flags/flags_en.rst b/doc/fluid/advanced_guide/flags/flags_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..b24c551c78d7bc74a76901c717b792f78b4237e3 --- /dev/null +++ b/doc/fluid/advanced_guide/flags/flags_en.rst @@ -0,0 +1,74 @@ +================== +FLAGS +================== + +Usage +------ +These FLAGS in PaddlePaddle can be set in two ways. + +- Set the FLAGS through export. For example: :code:`export FLAGS_eager_delete_tensor_gb = 1.0` . + +- Through :code:`get_flags` and :code:`set_flags` to print and set the environment variables. For more information of using these API, please refer to :ref:`api_fluid_get_flags` and :ref:`api_fluid_get_flags` . + + +FLAGS Quick Search +------------------ + +.. toctree:: + :maxdepth: 1 + + + cudnn_en.rst + data_en.rst + debug_en.rst + device_en.rst + distributed_en.rst + executor_en.rst + memory_en.rst + others_en.rst + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/fluid/advanced_guide/flags/memory_cn.rst b/doc/fluid/advanced_guide/flags/memory_cn.rst index cbafa94a0e5b28570cbb16a92f17a947bd3458fd..94676721c2d0baca9a2d744e7dbc7064c7eed279 100644 --- a/doc/fluid/advanced_guide/flags/memory_cn.rst +++ b/doc/fluid/advanced_guide/flags/memory_cn.rst @@ -11,13 +11,14 @@ FLAGS_allocator_strategy 取值范围 --------------- -String型,['naive_best_fit', 'auto_growth']中的一个。缺省值为'naive_best_fit'。 +String型,['naive_best_fit', 'auto_growth']中的一个。缺省值如果编译Paddle CMake时使用-DON_INFER=ON为'naive_best_fit'。 +其他默认情况为'auto_growth'。PaddlePaddle pip安装包的默认策略也是'auto_growth' 示例 -------- -FLAGS_allocator_strategy=naive_best_fit - 使用预分配best fit分配器。 +FLAGS_allocator_strategy=naive_best_fit - 使用预分配best fit分配器,PaddlePaddle会先占用大多比例的可用内存/显存,在Paddle具体数据使用时分配,这种方式预占空间较大,但内存/显存碎片较少(比如能够支持模型的最大batch size会变大)。 -FLAGS_allocator_strategy=auto_growth - 使用auto growth分配器。 +FLAGS_allocator_strategy=auto_growth - 使用auto growth分配器。PaddlePaddle会随着真实数据需要再占用内存/显存,但内存/显存可能会产生碎片(比如能够支持模型的最大batch size会变小)。 FLAGS_eager_delete_scope diff --git a/doc/fluid/advanced_guide/flags/memory_en.rst b/doc/fluid/advanced_guide/flags/memory_en.rst index 8702a4082006ab05b0a983f3b117fba7617b558f..0e630e7d93d51e668397b9c88fbfd75ad45f9395 100644 --- a/doc/fluid/advanced_guide/flags/memory_en.rst +++ b/doc/fluid/advanced_guide/flags/memory_en.rst @@ -11,13 +11,13 @@ Use to choose allocator strategy of PaddlePaddle. Values accepted --------------- -String, enum in ['naive_best_fit', 'auto_growth']. The default value is 'naive_best_fit'. +String, enum in ['naive_best_fit', 'auto_growth']. The default value will be 'naive_best_fit' if users compile PaddlePaddle with -DON_INFER=ON CMake flag, otherwise is 'auto_growth'. The default PaddlePaddle pip package uses 'auto_growth'. Example -------- -FLAGS_allocator_strategy=naive_best_fit would use the pre-allocated best fit allocator. +FLAGS_allocator_strategy=naive_best_fit would use the pre-allocated best fit allocator. 'naive_best_fit' strategy would occupy almost all GPU memory by default but leads to less memory fragmentation (i.e., maximum batch size of models may be larger). -FLAGS_allocator_strategy=auto_growth would use the auto growth allocator. +FLAGS_allocator_strategy=auto_growth would use the auto growth allocator. 'auto_growth' strategy would allocate GPU memory on demand but may lead to more memory fragmentation (i.e., maximum batch size of models may be smaller). diff --git a/doc/fluid/advanced_guide/flags_cn.rst b/doc/fluid/advanced_guide/flags_cn.rst deleted file mode 100644 index 46abff4b275abbc723cd49db49a9d277bd804f80..0000000000000000000000000000000000000000 --- a/doc/fluid/advanced_guide/flags_cn.rst +++ /dev/null @@ -1,17 +0,0 @@ - -环境变量FLAGS -================== - - -.. toctree:: - :maxdepth: 1 - - - flags/cudnn_cn.rst - flags/data_cn.rst - flags/debug_cn.rst - flags/device_cn.rst - flags/distributed_cn.rst - flags/executor_cn.rst - flags/memory_cn.rst - flags/others_cn.rst diff --git a/doc/fluid/advanced_guide/flags_en.rst b/doc/fluid/advanced_guide/flags_en.rst deleted file mode 100644 index 9c8c3d621ebca52a15a8b61f53c2d090a124f875..0000000000000000000000000000000000000000 --- a/doc/fluid/advanced_guide/flags_en.rst +++ /dev/null @@ -1,63 +0,0 @@ -================== -FLAGS -================== - - -.. toctree:: - :maxdepth: 1 - - - flags/cudnn_en.rst - flags/data_en.rst - flags/debug_en.rst - flags/device_en.rst - flags/distributed_en.rst - flags/executor_en.rst - flags/memory_en.rst - flags/others_en.rst - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/doc/fluid/advanced_guide/index_cn.rst b/doc/fluid/advanced_guide/index_cn.rst index d0364810b95b467a0c333790e8412971dfe32105..4fa3243cea9d68126fb59fe769c8d3a0bdc2f008 100644 --- a/doc/fluid/advanced_guide/index_cn.rst +++ b/doc/fluid/advanced_guide/index_cn.rst @@ -2,31 +2,14 @@ 进阶指南 ######## -如果您已比较熟练使用飞桨来完成常规任务,期望了解更多飞桨在工业部署方面的能力,或者尝试自己的二次开发,请阅读: +如果您已经学会使用飞桨来完成常规任务,期望了解更多飞桨在工业部署方面的能力,请阅读: - - `数据准备 <../advanced_guide/data_preparing/index_cn.html>`_:介绍高效的同步异步数据读取方法 - - - `分布式训练 <../advanced_guide/distributed_training/index_cn.html>`_ :介绍如何使用分布式训练 - `预测与部署 <../advanced_guide/inference_deployment/index_cn.html>`_ :介绍如何应用训练好的模型进行预测 - - `性能调优 <../advanced_guide/performance_improving/index_cn.html>`_ :介绍飞桨使用过程中的调优方法 - - - `模型评估/调试 <../advanced_guide/evaluation_debugging/index_cn.html>`_ :介绍模型评估与调试的典型方法 - - - `二次开发 <../advanced_guide/addon_development/index_cn.html>`_ :介绍如何新增Operator和如何向飞桨开源社区贡献代码 - - - `环境变量FLAGS <../advanced_guide/flags/index_cn.html>`_ - - .. toctree:: :hidden: - data_preparing/index_cn.rst - distributed_training/index_cn.rst + dygraph_to_static/index_cn.rst inference_deployment/index_cn.rst - performance_improving/index_cn.rst - evaluation_debugging/index_cn.rst - addon_development/index_cn.rst - flags_cn.rst - + flags/flags_cn.rst diff --git a/doc/fluid/advanced_guide/index_en.rst b/doc/fluid/advanced_guide/index_en.rst index a3201508de5e4218e78c81245708f9d7fd21b6a8..f65d0ce22e3520de008a6f0706c8fb4b4483cfee 100644 --- a/doc/fluid/advanced_guide/index_en.rst +++ b/doc/fluid/advanced_guide/index_en.rst @@ -8,30 +8,14 @@ Advanced User Guides So far you have already been familiar with PaddlePaddle. And the next expectation, read more on: - - `Prepare Data `_:How to prepare the data efficiently. - - - `Distributed Training `_ :How to apply the distributed training in your projects. - `Deploy Inference Model `_ :How to deploy the trained network to perform practical inference - - `Practice Improving `_ :How to do profiling for Fluid programs - - - `Model Evaluation and Debugging `_ :How to evaluate your program. - - - `Addon Development `_ :How to contribute codes and documentation to our communities - - - `FLAGS `_ - .. toctree:: :hidden: - data_preparing/index_en.rst - distributed_training/index_en.rst + dygraph_to_static/index_en.rst inference_deployment/index_en.rst - performance_improving/index_en.rst - evaluation_debugging/index_en.rst - addon_development/index_en.rst - flags_en.rst - + flags/flags_en.rst diff --git a/doc/fluid/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.rst b/doc/fluid/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.rst index 06630753d5c27eeff84806be902b586b1c563368..788341863e1fe669ab10bc634d948fa7c6ef481c 100644 --- a/doc/fluid/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.rst +++ b/doc/fluid/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.rst @@ -1,23 +1,22 @@ .. _install_or_build_cpp_inference_lib: -安装与编译C++预测库 +安装与编译 Linux 预测库 =========================== 直接下载安装 ------------- .. csv-table:: - :header: "版本说明", "预测库(1.7.0版本)", "预测库(develop版本)" + :header: "版本说明", "预测库(1.8.4版本)", "预测库(develop版本)" :widths: 3, 2, 2 - "ubuntu14.04_cpu_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" - "ubuntu14.04_cpu_avx_openblas", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" - "ubuntu14.04_cpu_noavx_openblas", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" - "ubuntu14.04_cuda9.0_cudnn7_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" - "ubuntu14.04_cuda10.0_cudnn7_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" - "ubuntu14.04_cuda9.0_cudnn7_avx_mkl_trt5", "`fluid_inference.tgz `_", - "ubuntu14.04_cuda10.0_cudnn7_avx_mkl_trt5", "`fluid_inference.tgz `_", - "nv-jetson-cuda10-cudnn7.5-trt5", "`fluid_inference.tar.gz `_", + "ubuntu14.04_cpu_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" + "ubuntu14.04_cpu_avx_openblas", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" + "ubuntu14.04_cpu_noavx_openblas", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" + "ubuntu14.04_cuda9.0_cudnn7_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" + "ubuntu14.04_cuda10.0_cudnn7_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" + "ubuntu14.04_cuda10.1_cudnn7.6_avx_mkl_trt6", "`fluid_inference.tgz `_", + "nv-jetson-cuda10-cudnn7.5-trt5", "`fluid_inference.tar.gz `_", 从源码编译 @@ -40,23 +39,28 @@ WITH_NV_JETSON OFF 在NV Jetson硬件上编译时需 建议按照推荐值设置,以避免链接不必要的库。其它可选编译选项按需进行设定。 -首先从github拉取最新代码并安装nccl +首先从github拉取最新代码 .. code-block:: bash - git clone https://github.com/paddlepaddle/paddle + git clone https://github.com/paddlepaddle/Paddle + cd Paddle # 建议使用git checkout切换到Paddle稳定的版本,如: - git checkout v1.6.2 + git checkout v1.8.4 + +**note**: 如果您是多卡机器,建议安装NCCL;如果您是单卡机器则可以在编译时显示指定WITH_NCCL=OFF来跳过这一步。注意如果WITH_NCCL=ON,且没有安装NCCL,则编译会报错。 + +.. code-block:: bash git clone https://github.com/NVIDIA/nccl.git + cd nccl make -j4 make install -**note**: 单卡机器上不会用到nccl但仍存在依赖, 后续会考虑将此依赖去除。 **Server端预测库源码编译** -下面的代码片段配制编译选项并进行编译(需要将PADDLE_ROOT替换为PaddlePaddle预测库的安装路径): +下面的代码片段配制编译选项并进行编译(需要将PADDLE_ROOT替换为PaddlePaddle预测库的安装路径,WITH_NCCL根据实际情况进行修改): .. code-block:: bash @@ -70,6 +74,7 @@ WITH_NV_JETSON OFF 在NV Jetson硬件上编译时需 -DWITH_MKL=OFF \ -DWITH_GPU=OFF \ -DON_INFER=ON \ + -DWITH_NCCL=OFF \ .. make make inference_lib_dist @@ -118,7 +123,7 @@ NVIDIA Jetson是NVIDIA推出的嵌入式AI平台,Paddle Inference支持在 NVI make inference_lib_dist -j4 3. 样例测试 - 请参照官网样例:https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_usage/deploy/inference/paddle_tensorrt_infer.html#id2 + 请参照官网样例:https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/performance_improving/inference_improving/paddle_tensorrt_infer.html#id2 **FAQ** @@ -165,28 +170,21 @@ NVIDIA Jetson是NVIDIA推出的嵌入式AI平台,Paddle Inference支持在 NVI │   ├── libpaddle_fluid.a │   └── libpaddle_fluid.so ├── third_party - │   ├── boost - │   │   └── boost - │   ├── eigen3 - │   │   ├── Eigen - │   │   └── unsupported │   └── install │   ├── gflags │   ├── glog │   ├── mkldnn │   ├── mklml - │   ├── protobuf - │   ├── xxhash - │   └── zlib + │   └── protobuf └── version.txt version.txt 中记录了该预测库的版本信息,包括Git Commit ID、使用OpenBlas或MKL数学库、CUDA/CUDNN版本号,如: .. code-block:: text - GIT COMMIT ID: cc9028b90ef50a825a722c55e5fda4b7cd26b0d6 + GIT COMMIT ID: 0231f58e592ad9f673ac1832d8c495c8ed65d24f WITH_MKL: ON WITH_MKLDNN: ON WITH_GPU: ON - CUDA version: 8.0 + CUDA version: 10.1 CUDNN version: v7 diff --git a/doc/fluid/advanced_guide/inference_deployment/inference/build_and_install_lib_en.rst b/doc/fluid/advanced_guide/inference_deployment/inference/build_and_install_lib_en.rst index 43422f0820a37149b7b9072250283984f2463503..9ed8bc9c8da226bb20dd987fc64f7070a5ba89b7 100644 --- a/doc/fluid/advanced_guide/inference_deployment/inference/build_and_install_lib_en.rst +++ b/doc/fluid/advanced_guide/inference_deployment/inference/build_and_install_lib_en.rst @@ -1,23 +1,22 @@ .. _install_or_build_cpp_inference_lib_en: -Install and Compile C++ Inference Library +Install and Compile C++ Inference Library on Linux ============================================= Direct Download and Installation --------------------------------- .. csv-table:: c++ inference library list - :header: "version description", "inference library(1.7.0 version)", "inference library(develop version)" + :header: "version description", "inference library(1.8.4 version)", "inference library(develop version)" :widths: 3, 2, 2 - "ubuntu14.04_cpu_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" - "ubuntu14.04_cpu_avx_openblas", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" - "ubuntu14.04_cpu_noavx_openblas", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" - "ubuntu14.04_cuda9.0_cudnn7_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" - "ubuntu14.04_cuda10.0_cudnn7_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" - "ubuntu14.04_cuda9.0_cudnn7_avx_mkl_trt5", "`fluid_inference.tgz `_", - "ubuntu14.04_cuda10.0_cudnn7_avx_mkl_trt5", "`fluid_inference.tgz `_", - "nv-jetson-cuda10-cudnn7.5-trt5", "`fluid_inference.tar.gz `_", + "ubuntu14.04_cpu_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" + "ubuntu14.04_cpu_avx_openblas", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" + "ubuntu14.04_cpu_noavx_openblas", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" + "ubuntu14.04_cuda9.0_cudnn7_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" + "ubuntu14.04_cuda10.0_cudnn7_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" + "ubuntu14.04_cuda10.1_cudnn7.6_avx_mkl_trt6", "`fluid_inference.tgz `_", + "nv-jetson-cuda10-cudnn7.5-trt5", "`fluid_inference.tar.gz `_", Build from Source Code ----------------------- @@ -41,23 +40,29 @@ WITH_NV_JETSON OFF build inference libs on NV Jetson It is recommended to configure options according to the recommended values to avoid linking unnecessary libraries. Other options can be set if it is necessary. -Firstly we pull the latest code from github and install nccl. +Firstly we pull the latest code from github. .. code-block:: bash - git clone https://github.com/paddlepaddle/paddle - # Use git checkout to switch to stable versions such as v1.6.2 - git checkout v1.6.2 + git clone https://github.com/paddlepaddle/Paddle + cd Paddle + # Use git checkout to switch to stable versions such as v1.8.4 + git checkout v1.8.4 + + +**note**: If your environment is a multi-card machine, it is recommended to install nccl; otherwise, you can skip this step by specifying WITH_NCCL = OFF during compilation. Note that if WITH_NCCL = ON, and NCCL is not installed, the compiler will report an error. + +.. code-block:: bash git clone https://github.com/NVIDIA/nccl.git + cd nccl make -j4 make install -**note**: nccl is not used but still needed in building. This dependence will be removed later. **build inference libs on server** -Following codes set the configurations and execute building(PADDLE_ROOT should be set to the actual installing path of inference libs). +Following codes set the configurations and execute building(PADDLE_ROOT should be set to the actual installing path of inference libs, WITH_NCCL should be modified according to the actual environment.). .. code-block:: bash @@ -72,6 +77,7 @@ Following codes set the configurations and execute building(PADDLE_ROOT should b -DWITH_MKL=OFF \ -DWITH_GPU=OFF \ -DON_INFER=ON \ + -DWITH_NCCL=OFF \ .. make make inference_lib_dist @@ -121,7 +127,7 @@ NVIDIA Jetson is an AI computing platform in embedded systems introduced by NVID make inference_lib_dist -j4 3. Test with samples - Please refer to samples on https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_usage/deploy/inference/paddle_tensorrt_infer.html#id2 + Please refer to samples on https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/performance_improving/inference_improving/paddle_tensorrt_infer.html#id2 **FAQ** diff --git a/doc/fluid/advanced_guide/inference_deployment/inference/c_infer_cn.md b/doc/fluid/advanced_guide/inference_deployment/inference/c_infer_cn.md index 95c338feb4674c8eb2e6a1d0d5ea883d523b43bf..6f031621fc06570581de1c69ff3a19e015389162 100644 --- a/doc/fluid/advanced_guide/inference_deployment/inference/c_infer_cn.md +++ b/doc/fluid/advanced_guide/inference_deployment/inference/c_infer_cn.md @@ -27,7 +27,7 @@ Fluid提供了高度优化的[C++预测库](./native_infer.html),为了方便 * `void PD_DisableGpu(PD_AnalysisConfig* config)`: 禁用GPU。 * `int PD_GpuDeviceId(const PD_AnalysisConfig* config)`: 返回使用的GPU设备的ID。 * `void PD_SwitchIrOptim(PD_AnalysisConfig* config, bool x)`: 设置预测是否开启IR优化。 -* `void PD_EnableTensorRtEngine(PD_AnalysisConfig* config, int workspace_size, int max_batch_size, int min_subgraph_size, Precision precision, bool use_static, bool use_calib_mode)`: 开启TensorRT。关于参数的解释,详见``使用Paddle-TensorRT库预测``。 +* `void PD_EnableTensorRtEngine(PD_AnalysisConfig* config, int workspace_size, int max_batch_size, int min_subgraph_size, Precision precision, bool use_static, bool use_calib_mode)`: 开启TensorRT。关于参数的解释,详见[使用Paddle-TensorRT库预测](../../performance_improving/inference_improving/paddle_tensorrt_infer.html)。 * `void PD_EnableMKLDNN(PD_AnalysisConfig* config)`: 开启MKLDNN。 #### 代码示例 diff --git a/doc/fluid/advanced_guide/inference_deployment/inference/windows_cpp_inference.md b/doc/fluid/advanced_guide/inference_deployment/inference/windows_cpp_inference.md index b8b051a1311938ee53f58b86c1d794f9d225e86d..8d86bca09b809bde9f779151656b824fbd7efff3 100644 --- a/doc/fluid/advanced_guide/inference_deployment/inference/windows_cpp_inference.md +++ b/doc/fluid/advanced_guide/inference_deployment/inference/windows_cpp_inference.md @@ -1,18 +1,17 @@ -安装与编译Windows预测库 +安装与编译 Windows 预测库 =========================== 下载安装包与对应的测试环境 ------------- -| 版本说明 | 预测库(1.7.0版本) | 编译器 | 构建工具 | cuDNN | CUDA | +| 版本说明 | 预测库(1.8.4版本) | 编译器 | 构建工具 | cuDNN | CUDA | |:---------|:-------------------|:-------------------|:----------------|:--------|:-------| -| cpu_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.0/win-infer/mkl/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3| CMake v3.16.0 | -| cpu_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.0/win-infer/open/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3| CMake v3.16.0 | -| cuda9.0_cudnn7_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.0/win-infer/mkl/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.4.1 | 9.0 | -| cuda10.0_cudnn7_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.0/win-infer/mkl/post107/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.5.0 | 10.0 | -| cuda9.0_cudnn7_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.0/win-infer/open/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.4.1 | 9.0 | -| cuda10.0_cudnn7_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.0/win-infer/open/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.5.0 | 10.0 | +| cpu_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/mkl/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3| CMake v3.16.0 | +| cpu_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/open/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3| CMake v3.16.0 | +| cuda9.0_cudnn7_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/mkl/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.3.1 | 9.0 | +| cuda9.0_cudnn7_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/open/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.3.1 | 9.0 | +| cuda10.0_cudnn7_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/mkl/post107/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.4.1 | 10.0 | ### 硬件环境 diff --git a/doc/fluid/advanced_guide/inference_deployment/inference/windows_cpp_inference_en.md b/doc/fluid/advanced_guide/inference_deployment/inference/windows_cpp_inference_en.md index 480a4563b8413d219cc7028800f1625744b0837e..fc85af2ef49a32f935cb25cc7504f4e933e3a320 100644 --- a/doc/fluid/advanced_guide/inference_deployment/inference/windows_cpp_inference_en.md +++ b/doc/fluid/advanced_guide/inference_deployment/inference/windows_cpp_inference_en.md @@ -5,14 +5,13 @@ Install and Compile C++ Inference Library on Windows Direct Download and Install ------------- -| Version | Inference Libraries(v1.7.0) | Compiler | Build tools | cuDNN | CUDA | +| Version | Inference Libraries(v1.8.4) | Compiler | Build tools | cuDNN | CUDA | |:---------|:-------------------|:-------------------|:----------------|:--------|:-------| -| cpu_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.0/win-infer/mkl/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3| CMake v3.16.0 | -| cpu_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.0/win-infer/open/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3| CMake v3.16.0 | -| cuda9.0_cudnn7_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.0/win-infer/mkl/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.4.1 | 9.0 | -| cuda10.0_cudnn7_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.0/win-infer/mkl/post107/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.5.0 | 10.0 | -| cuda9.0_cudnn7_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.0/win-infer/open/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.4.1 | 9.0 | -| cuda10.0_cudnn7_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.0/win-infer/open/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.5.0 | 10.0 | +| cpu_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/mkl/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3| CMake v3.16.0 | +| cpu_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/open/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3| CMake v3.16.0 | +| cuda9.0_cudnn7_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/mkl/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.3.1 | 9.0 | +| cuda9.0_cudnn7_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/open/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.3.1 | 9.0 | +| cuda10.0_cudnn7_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/mkl/post107/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.4.1 | 10.0 | ### Hardware Environment diff --git a/doc/fluid/advanced_guide/inference_deployment/mobile/mobile_index.md b/doc/fluid/advanced_guide/inference_deployment/mobile/mobile_index.md index 66c7d3222803abbdc0723b94085f85b6daa9cb5a..e977a8d7d4e95d50a0ecad3235bb89a8fc9ef7b5 100644 --- a/doc/fluid/advanced_guide/inference_deployment/mobile/mobile_index.md +++ b/doc/fluid/advanced_guide/inference_deployment/mobile/mobile_index.md @@ -1,8 +1,8 @@ # Paddle-Lite -Paddle Lite为Paddle-Mobile的升级版,定位支持包括手机移动端在内更多场景的轻量化高效预测,支持更广泛的硬件和平台,是一个高性能、轻量级的深度学习预测引擎。在保持和PaddlePaddle无缝对接外,也兼容支持其他训练框架产出的模型。 +Paddle-Lite为Paddle-Mobile的升级版,定位支持包括手机移动端在内更多场景的轻量化高效预测,支持更广泛的硬件和平台,是一个高性能、轻量级的深度学习预测引擎。在保持和PaddlePaddle无缝对接外,也兼容支持其他训练框架产出的模型。 -完整使用文档位于 [PaddleLite 文档](https://paddlepaddle.github.io/Paddle-Lite/) 。 +完整使用文档位于 [Paddle-Lite 文档](https://paddle-lite.readthedocs.io/zh/latest/) 。 ## 特性 @@ -13,39 +13,39 @@ Paddle Lite为Paddle-Mobile的升级版,定位支持包括手机移动端在 ### 高性能 极致的 ARM CPU 性能优化,针对不同微架构特点实现kernel的定制,最大发挥计算性能,在主流模型上展现出领先的速度优势。 -支持INT8量化计算,结合 [PaddleSlim 模型压缩工具](https://github.com/PaddlePaddle/models/tree/v1.5/PaddleSlim) 中 INT8量化训练功能,可以提供高精度高性能的预测能力。 +支持量化模型,结合[PaddleSlim 模型压缩工具](https://github.com/PaddlePaddle/models/tree/v1.5/PaddleSlim) 中量化功能,可以提供高精度高性能的预测能力。 在Huawei NPU, FPGA上也具有有很好的性能表现。 -最新 Benchmark 位于 [benchmark](https://paddlepaddle.github.io/Paddle-Lite/develop/benchmark/)。 +最新性能数据位于 [Benchmark 文档](https://paddle-lite.readthedocs.io/zh/latest/benchmark/benchmark.html)。 ### 通用性 -硬件方面,Paddle Lite 的架构设计为多硬件兼容支持做了良好设计。除了支持ARM CPU、Mali GPU、Adreno GPU,还特别支持了华为 NPU,以及 FPGA 等边缘设备广泛使用的硬件。即将支持支持包括寒武纪、比特大陆等AI芯片,未来会增加对更多硬件的支持。 +硬件方面,Paddle-Lite 的架构设计为多硬件兼容支持做了良好设计。除了支持ARM CPU、Mali GPU、Adreno GPU,还特别支持了华为 NPU,以及 FPGA 等边缘设备广泛使用的硬件。即将支持支持包括寒武纪、比特大陆等AI芯片,未来会增加对更多硬件的支持。 -模型支持方面,Paddle Lite和PaddlePaddle训练框架的Op对齐,提供更广泛的模型支持能力。目前已严格验证18个模型85个OP的精度和性能,对视觉类模型做到了较为充分的支持,覆盖分类、检测和定位,包含了特色的OCR模型的支持。未来会持续增加更多模型的支持验证。 +模型支持方面,Paddle-Lite和PaddlePaddle训练框架的Op对齐,提供更广泛的模型支持能力。目前已严格验证18个模型85个OP的精度和性能,对视觉类模型做到了较为充分的支持,覆盖分类、检测和定位,包含了特色的OCR模型的支持。未来会持续增加更多模型的支持验证。 -框架兼容方面:除了PaddlePaddle外,对其他训练框架也提供兼容支持。当前,支持Caffe 和 TensorFlow 训练出来的模型,通过X2Paddle (https://github.com/PaddlePaddle/X2Paddle) 转换工具实现。接下来将会对ONNX等格式模型提供兼容支持。 +框架兼容方面:除了PaddlePaddle外,对其他训练框架也提供兼容支持。当前,支持Caffe 和 TensorFlow 训练出来的模型,通过[X2Paddle] (https://github.com/PaddlePaddle/X2Paddle) 转换工具实现。接下来将会对ONNX等格式模型提供兼容支持。 ## 架构 -PaddleLite 的架构设计着重考虑了对多硬件和平台的支持,并且强化了多个硬件在一个模型中混合执行的能力,多个层面的性能优化处理,以及对端侧应用的轻量化设计。 +Paddle-Lite 的架构设计着重考虑了对多硬件和平台的支持,并且强化了多个硬件在一个模型中混合执行的能力,多个层面的性能优化处理,以及对端侧应用的轻量化设计。 ![](https://github.com/Superjomn/_tmp_images/raw/master/images/paddle-lite-architecture.png) 其中,Analysis Phase 包括了 MIR(Machine IR) 相关模块,能够对原有的模型的计算图针对具体的硬件列表进行算子融合、计算裁剪 在内的多种优化。Execution Phase 只涉及到Kernel 的执行,且可以单独部署,以支持极致的轻量级部署。 -## Paddle-Mobile升级为Paddle Lite的说明 +## Paddle-Mobile升级为Paddle-Lite的说明 原Paddle-Mobile作为一个致力于嵌入式平台的PaddlePaddle预测引擎,已支持多种硬件平台,包括ARM CPU、 Mali GPU、Adreno GPU,以及支持苹果设备的GPU Metal实现、ZU5、ZU9等FPGA开发板、树莓派等arm-linux开发板。在百度内已经过广泛业务场景应用验证。对应设计文档可参考: [mobile/README](https://github.com/PaddlePaddle/Paddle-Lite/blob/develop/mobile/README.md) -Paddle-Mobile 整体升级重构并更名为Paddle Lite后,原paddle-mobile 的底层能力大部分已集成到[新架构 ](https://github.com/PaddlePaddle/Paddle-Lite/tree/develop/lite)下。作为过渡,暂时保留原Paddle-mobile代码。 主体代码位于 `mobile/` 目录中,后续一段时间会继续维护,并完成全部迁移。新功能会统一到[新架构 ](https://github.com/PaddlePaddle/Paddle-Lite/tree/develop/lite)下开发。 +Paddle-Mobile 整体升级重构并更名为Paddle-Lite后,原paddle-mobile 的底层能力大部分已集成到[新架构 ](https://github.com/PaddlePaddle/Paddle-Lite/tree/develop/lite)下。作为过渡,暂时保留原Paddle-mobile代码。 主体代码位于 `mobile/` 目录中,后续一段时间会继续维护,并完成全部迁移。新功能会统一到[新架构 ](https://github.com/PaddlePaddle/Paddle-Lite/tree/develop/lite)下开发。 metal, web的模块相对独立,会继续在 `./metal` 和 `./web` 目录下开发和维护。对苹果设备的GPU Metal实现的需求及web前端预测需求,可以直接进入这两个目录。 ## 致谢 -Paddle Lite 借鉴了以下开源项目: +Paddle-Lite 借鉴了以下开源项目: - [ARM compute library](https://github.com/ARM-software/ComputeLibrary) -- [Anakin](https://github.com/PaddlePaddle/Anakin) ,Anakin对应底层的一些优化实现已被集成到Paddle Lite。Anakin作为PaddlePaddle组织下的一个高性能预测项目,极具前瞻性,对Paddle Lite有重要贡献。Anakin已和本项目实现整合。之后,Anakin不再升级。 +- [Anakin](https://github.com/PaddlePaddle/Anakin) ,Anakin对应底层的一些优化实现已被集成到Paddle-Lite。Anakin作为PaddlePaddle组织下的一个高性能预测项目,极具前瞻性,对Paddle-Lite有重要贡献。Anakin已和本项目实现整合。之后,Anakin不再升级。 ## 交流与反馈 * 欢迎您通过Github Issues来提交问题、报告与建议 diff --git a/doc/fluid/advanced_guide/performance_improving/amp/amp.md b/doc/fluid/advanced_guide/performance_improving/amp/amp.md new file mode 100644 index 0000000000000000000000000000000000000000..3a41a447f78cf3bc119abb7754292edbbc23050a --- /dev/null +++ b/doc/fluid/advanced_guide/performance_improving/amp/amp.md @@ -0,0 +1,171 @@ +# 混合精度训练最佳实践 + +Automatic Mixed Precision (AMP) 是一种自动混合使用半精度(FP16)和单精度(FP32)来加速模型训练的技术。AMP技术可方便用户快速将使用 FP32 训练的模型修改为使用混合精度训练,并通过黑白名单和动态`loss scaling`来保证训练时的数值稳定性进而避免梯度Infinite或者NaN(Not a Number)。借力于新一代NVIDIA GPU中Tensor Cores的计算性能,PaddlePaddle AMP技术在ResNet50、Transformer等模型上训练速度相对于FP32训练加速比可达1.5~2.9。 + +### 半精度浮点类型FP16 + +如图 1 所示,半精度(Float Precision16,FP16)是一种相对较新的浮点类型,在计算机中使用2字节(16位)存储。在IEEE 754-2008标准中,它亦被称作binary16。与计算中常用的单精度(FP32)和双精度(FP64)类型相比,FP16更适于在精度要求不高的场景中使用。 + +
+ missing +
图 1. 半精度和单精度数据示意图
+
+ +### 英伟达GPU的FP16算力 + +在使用相同的超参数下,混合精度训练使用半精度浮点(FP16)和单精度(FP32)浮点即可达到与使用纯单精度训练相同的准确率,并可加速模型的训练速度。这主要得益于英伟达推出的Volta及Turing架构GPU在使用FP16计算时具有如下特点: + +* FP16可降低一半的内存带宽和存储需求,这使得在相同的硬件条件下研究人员可使用更大更复杂的模型以及更大的batch size大小。 +* FP16可以充分利用英伟达Volta及Turing架构GPU提供的Tensor Cores技术。在相同的GPU硬件上,Tensor Cores的FP16计算吞吐量是FP32的8倍。 + +### PaddlePaddle AMP功能——牛刀小试 + +如前文所述,使用FP16数据类型可能会造成计算精度上的损失,但对深度学习领域而言,并不是所有计算都要求很高的精度,一些局部的精度损失对最终训练效果影响很微弱,却能使吞吐和训练速度带来大幅提升。因此,混合精度计算的需求应运而生。具体而言,训练过程中将一些对精度损失不敏感且能利用Tensor Cores进行加速的运算使用半精度处理,而对精度损失敏感部分依然保持FP32计算精度,用以最大限度提升访存和计算效率。 + +为了避免对每个具体模型人工地去设计和尝试精度混合的方法,PaddlePaadle框架提供自动混合精度训练(AMP)功能,解放"炼丹师"的双手。在PaddlePaddle中使用AMP训练是一件十分容易的事情,用户只需要增加一行代码即可将原有的FP32训练转变为AMP训练。下面以`MNIST`为例介绍PaddlePaddle AMP功能的使用示例。 + +**MNIST网络定义** + +```python +import paddle.fluid as fluid + +def MNIST(data, class_dim): + conv1 = fluid.layers.conv2d(data, 16, 5, 1, act=None, data_format='NHWC') + bn1 = fluid.layers.batch_norm(conv1, act='relu', data_layout='NHWC') + pool1 = fluid.layers.pool2d(bn1, 2, 'max', 2, data_format='NHWC') + conv2 = fluid.layers.conv2d(pool1, 64, 5, 1, act=None, data_format='NHWC') + bn2 = fluid.layers.batch_norm(conv2, act='relu', data_layout='NHWC') + pool2 = fluid.layers.pool2d(bn2, 2, 'max', 2, data_format='NHWC') + fc1 = fluid.layers.fc(pool2, size=64, act='relu') + fc2 = fluid.layers.fc(fc1, size=class_dim, act='softmax') + return fc2 +``` + +针对CV(Computer Vision)类模型组网,为获得更高的训练性能需要注意如下三点: + +* `conv2d`、`batch_norm`以及`pool2d`等需要将数据布局设置为`NHWC`,这样有助于使用TensorCore技术加速计算过程1。 +* Tensor Cores要求在使用FP16加速卷积运算时conv2d的输入/输出通道数为8的倍数2,因此设计网络时推荐将conv2d层的输入/输出通道数设置为8的倍数。 +* Tensor Cores要求在使用FP16加速矩阵乘运算时矩阵行数和列数均为8的倍数3,因此设计网络时推荐将fc层的size参数设置为8的倍数。 + + +**FP32 训练** + +为了训练 MNIST 网络,还需要定义损失函数来更新权重参数,此处使用的优化器是SGDOptimizer。为了简化说明,这里省略了迭代训练的相关代码,仅体现损失函数及优化器定义相关的内容。 + +```python +import paddle +import numpy as np + +data = fluid.layers.data( + name='image', shape=[None, 28, 28, 1], dtype='float32') +label = fluid.layers.data(name='label', shape=[None, 1], dtype='int64') + +out = MNIST(data, class_dim=10) +loss = fluid.layers.cross_entropy(input=out, label=label) +avg_loss = fluid.layers.mean(loss) + +sgd = fluid.optimizer.SGDOptimizer(learning_rate=1e-3) +sgd.minimize(avg_loss) +``` + +**AMP训练** + +与FP32训练相比,用户仅需使用PaddlePaddle提供的`fluid.contrib.mixed_precision.decorate` 函数将原来的优化器SGDOptimizer进行封装,然后使用封装后的优化器(mp_sgd)更新参数梯度即可完成向AMP训练的转换,代码如下所示: + +```python +sgd = SGDOptimizer(learning_rate=1e-3) +# 此处只需要使用fluid.contrib.mixed_precision.decorate将sgd封装成AMP训练所需的 +# 优化器mp_sgd,并使用mp_sgd.minimize(avg_loss)代替原来的sgd.minimize(avg_loss)语句即可。 +mp_sgd = fluid.contrib.mixed_precision.decorator.decorate(sgd) +mp_sgd.minimize(avg_loss) +``` + +运行上述混合精度训练python脚本时为得到更好的执行性能可配置如下环境参数,并保证cudnn版本在7.4.1及以上。 + +```shell +export FLAGS_conv_workspace_size_limit=1024 # MB,根据所使用的GPU显存容量及模型特点设置数值,值越大越有可能选择到更快的卷积算法 +export FLAGS_cudnn_exhaustive_search=1 # 使用穷举搜索方法来选择快速卷积算法 +export FLAGS_cudnn_batchnorm_spatial_persistent=1 # 用于触发batch_norm和relu的融合 +``` + +上述即为最简单的PaddlePaddle AMP功能使用方法。ResNet50模型的AMP训练示例可[点击此处](https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/image_classification/README.md#%E6%B7%B7%E5%90%88%E7%B2%BE%E5%BA%A6%E8%AE%AD%E7%BB%83)查看,其他模型使用PaddlePaddle AMP的方法也与此类似。若AMP训练过程中出现连续的loss nan等不收敛现象,可尝试使用[check nan inf工具](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/flags/check_nan_inf_cn.html#span-id-speed-span)进行调试。 + + +### PaddlePaddle AMP功能——进阶使用 + +上一小节所述均为默认AMP训练行为,用户当然也可以改变一些默认的参数设置来满足特定的模型训练场景需求。接下来的章节将介绍PaddlePaddle AMP功能使用中用户可配置的参数行为,即进阶使用技巧。 + +#### 自定义黑白名单 + +PaddlePaddle AMP功能实现中根据FP16数据类型计算稳定性和加速效果在框架内部定义了算子(Op)的黑白名单。具体来说,将对FP16计算友好且能利用Tensor Cores的Op归类于白名单,将使用FP16计算会导致数值不稳定的Op归类于黑名单,将对FP16计算没有多少影响的Op归类于灰名单。然而,框架开发人员不可能考虑到所有的网络模型情况,尤其是那些特殊场景中使用到的模型。用户可以在使用`fluid.contrib.mixed_precision.decorate` 函数时通过指定自定义的黑白名单列表来改变默认的FP16计算行为。 + +```python +sgd = SGDOptimizer(learning_rate=1e-3) +# list1是白名单op列表,list2是黑名单op列表,list3是黑名单var_name列表(凡是以这些黑名单var_name为输入或输出的op均会被视为黑名单op) +amp_list = AutoMixedPrecisionLists(custom_white_list=list1, custom_black_list=list2, custom_black_varnames=list3) +mp_sgd = fluid.contrib.mixed_precision.decorator.decorate(sgd, amp_list) +mp_sgd.minimize(avg_loss) +``` + +#### 自动loss scaling + +为了避免梯度Infinite或者NAN,PaddlePaddle AMP功能支持根据训练过程中梯度的数值自动调整loss scale值。用户在使用`fluid.contrib.mixed_precision.decorate` 函数时也可以改变与loss scaling相关的参数设置,示例如下: + +```python +sgd = SGDOptimizer(learning_rate=1e-3) +mp_sgd = fluid.contrib.mixed_precision.decorator.decorate(sgd, + amp_lists=None, + init_loss_scaling=2**8, + incr_every_n_steps=500, + decr_every_n_nan_or_inf=4, + incr_ratio=2.0, + decr_ratio=0.5, + use_dynamic_loss_scaling=True) +mp_sgd.minimize(avg_loss) +``` + +`init_loss_scaling `、`incr_every_n_steps` 以及`decr_every_n_nan_or_inf`等参数控制着自动loss scaling的行为。它们仅当 `use_dynamic_loss_scaling`设置为True时有效。下面详述这些参数的意义: + +* init_loss_scaling(float):初始loss scaling值。 +* incr_every_n_steps(int):每经过incr_every_n_steps个连续的正常梯度值才会增大loss scaling值。 +* decr_every_n_nan_or_inf(int):每经过decr_every_n_nan_or_inf个连续的无效梯度值(nan或者inf)才会减小loss scaling值。 +* incr_ratio(float):每次增大loss scaling值的扩增倍数,其为大于1的浮点数。 +* decr_ratio(float):每次减小loss scaling值的比例系数,其为小于1的浮点数。 + +### 多卡GPU训练的优化 + +PaddlePaddle AMP功能对多卡GPU训练进行了深度优化。如图 2 所示,优化之前的参数梯度更新特点:梯度计算时虽然使用的是FP16数据类型,但是不同GPU卡之间的梯度传输数据类型仍为FP32。 + +
+ missing +
图 2. 不同GPU卡之间传输梯度使用FP32数据类型(优化前)
+
+ +为了降低GPU多卡之间的梯度传输带宽,我们将梯度传输提前至`Cast`操作之前,而每个GPU卡在得到对应的FP16梯度后再执行`Cast`操作将其转变为FP32类型,具体操作详见图2。这一优化在训练大模型时对减少带宽占用尤其有效,如多卡训练BERT-Large模型。 + +
+ missing +
图 3. 不同GPU卡之间传输梯度使用FP16数据类型(优化后)
+
+ +### 训练性能对比(AMP VS FP32) + +PaddlePaddle AMP技术在ResNet50、Transformer等模型上训练速度相对于FP32训练上均有可观的加速比,下面是ResNet50和ERNIE Large模型的AMP训练相对于FP32训练的加速效果。 + + + + + + + +
图 4. Paddle AMP训练加速效果(横坐标为卡数,如8*8代表8机8卡)
missing missing
+ +从图4所示的图表可以看出,ResNet50的AMP训练相对与FP32训练加速比可达$2.8 \times$以上,而ERNIE Large的AMP训练相对与FP32训练加速比亦可达 $1.7 \times -- 2.1 \times$ 。 + +### 参考文献 + +*

Mixed Precision Training

+*

使用自动混合精度加速 PaddlePaddle 训练

+*

Tensor Layouts In Memory: NCHW vs NHWC

+*

Channels In And Out Requirements

+*

Matrix-Matrix Multiplication Requirements

diff --git a/doc/fluid/advanced_guide/performance_improving/analysis_tools/host_memory_profiling_cn.md b/doc/fluid/advanced_guide/performance_improving/analysis_tools/host_memory_profiling_cn.md deleted file mode 100644 index 0e7196c5d22cefa041dcf3661221e4b24328ef56..0000000000000000000000000000000000000000 --- a/doc/fluid/advanced_guide/performance_improving/analysis_tools/host_memory_profiling_cn.md +++ /dev/null @@ -1,88 +0,0 @@ -# 堆内存分析和优化 - -计算机程序都可能有内存泄漏的风险。**内存泄漏**一般是由于程序在堆(heap)上分配了内存而没有释放,随着程序的运行占用的内存越来越大,一方面会影响程序的稳定性,可能让运行速度越来越慢,或者造成oom,甚至会影响运行程序的机器的稳定性,造成宕机。 - - -目前有很多内存泄漏分析工具,比较经典的有[valgrind](http://valgrind.org/docs/manual/quick-start.html#quick-start.intro), [gperftools](https://gperftools.github.io/gperftools/)。 - -因为Fluid是用Python驱动C++ core来运行,valgrind直接分析非常困难,需要自己编译debug版本的、带valgrind支持的专用Python版本,而且输出的信息中大部分是Python自己的符号和调用信息,分析起来很困难,另外使用valgrind会让程序运行速度变得非常慢,所以不建议使用。 - -本教程主要介绍[gperftools](https://gperftools.github.io/gperftools/)的使用。 - -gperftool主要支持以下四个功能: - -- thread-caching malloc -- heap-checking using tcmalloc -- heap-profiling using tcmalloc -- CPU profiler - -Paddle也提供了基于gperftool的[CPU性能分析教程](./cpu_profiling_cn.html)。 - -对于堆内存的分析,主要用到thread-caching malloc和heap-profiling using tcmalloc。 - -## 环境 - -本教程基于paddle提供的Docker开发环境paddlepaddle/paddle:latest-dev,基于Ubuntu 16.04.4 LTS环境。 - -## 使用流程 - -- 安装google-perftools - -``` -apt-get install libunwind-dev -apt-get install google-perftools -``` - -- 安装pprof - -``` -go get -u github.com/google/pprof -``` - -- 设置运行环境 - -``` -export PPROF_PATH=/root/gopath/bin/pprof -export PPROF_BINARY_PATH=/root/gopath/bin/pprof -export LD_PRELOAD=/usr/lib/libtcmalloc.so.4 -``` - -- 使用heap profile来运行python程序。本质上是周期性的对堆的分配情况做一次快照。 - -``` -# HEAPPROFILE 设置生成的堆分析文件的目录和文件前缀 -# HEAP_PROFILE_ALLOCATION_INTERVAL 设置每分配多少存储dump一次dump,默认1GB -env HEAPPROFILE="./perf_log/test.log" HEAP_PROFILE_ALLOCATION_INTERVAL=209715200 python trainer.py -``` - -随着程序的运行,会在perf_log这个文件夹下生成很多文件,如下: - -``` --rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0001.heap --rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0002.heap --rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0003.heap --rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0004.heap --rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0005.heap --rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0006.heap -``` - -- 使用pprof对heap文件进行分析。分析有两种模式: - - 完整模式。会对当前heap做一个分析,显示目前分配内存一些调用路径。 - - ``` - pprof --pdf python test.log.0012.heap - ``` - 上述命令会生成一个profile00x.pdf的文件,可以直接打开,例如:[memory_cpu_allocator](https://github.com/jacquesqiao/Paddle/blob/bd2ea0e1f84bb6522a66d44a072598153634cade/doc/fluid/howto/optimization/memory_cpu_allocator.pdf)。从下图可以看出,在CPU版本fluid的运行过程中,分配存储最多的模块式CPUAllocator. 而别的模块相对而言分配内存较少,所以被忽略了,这对于分配内存泄漏是很不方便的,因为泄漏是一个缓慢的过程,在这种图中是无法看到的。 - - ![result](https://user-images.githubusercontent.com/3048612/40964027-a54033e4-68dc-11e8-836a-144910c4bb8c.png) - - - Diff模式。可以对两个时刻的heap做diff,把一些内存分配没有发生变化的模块去掉,而把增量部分显示出来。 - ``` - pprof --pdf --base test.log.0010.heap python test.log.1045.heap - ``` - 生成的结果为:[`memory_leak_protobuf`](https://github.com/jacquesqiao/Paddle/blob/bd2ea0e1f84bb6522a66d44a072598153634cade/doc/fluid/howto/optimization/memory_leak_protobuf.pdf) - - 从图中可以看出:ProgramDesc这个结构,在两个版本之间增长了200MB+,所以这里有很大的内存泄漏的可能性,最终结果也确实证明是这里造成了泄漏。 - - ![result](https://user-images.githubusercontent.com/3048612/40964057-b434d5e4-68dc-11e8-894b-8ab62bcf26c2.png) - ![result](https://user-images.githubusercontent.com/3048612/40964063-b7dbee44-68dc-11e8-9719-da279f86477f.png) diff --git a/doc/fluid/advanced_guide/performance_improving/analysis_tools/index_cn.rst b/doc/fluid/advanced_guide/performance_improving/analysis_tools/index_cn.rst index 78d5992651dd24db0b1b13115b477fa750712f79..3bb5ba2c568fc5e6c78485c3cc60a66e3a2841bf 100644 --- a/doc/fluid/advanced_guide/performance_improving/analysis_tools/index_cn.rst +++ b/doc/fluid/advanced_guide/performance_improving/analysis_tools/index_cn.rst @@ -1,3 +1,5 @@ +.. _api_guide_analysis_tools: + ############### 性能优化分析及工具 ############### diff --git a/doc/fluid/advanced_guide/performance_improving/analysis_tools/timeline_cn.md b/doc/fluid/advanced_guide/performance_improving/analysis_tools/timeline_cn.md index 1d4fcee690376125579aa5eacd4f7987f6671192..e40afcf3f4cc311747de9be5cbe9eacc2ca44175 100644 --- a/doc/fluid/advanced_guide/performance_improving/analysis_tools/timeline_cn.md +++ b/doc/fluid/advanced_guide/performance_improving/analysis_tools/timeline_cn.md @@ -52,7 +52,6 @@ python Paddle/tools/timeline.py --profile_path=/tmp/profile --timeline_path=time 1. 打开chrome浏览器,访问,用`load`按钮来加载生成的`timeline`文件。 - ![chrome tracing](../tracing.jpeg) 1. 结果如下图所示,可以放大来查看timeline的细节信息。 diff --git a/doc/fluid/advanced_guide/performance_improving/analysis_tools/timeline_en.md b/doc/fluid/advanced_guide/performance_improving/analysis_tools/timeline_en.md index 936695b2b82f0eb461f7cd415482ca81ca882e23..fb51802a168452a0649ebbcd0a6f4d37c07ea823 100644 --- a/doc/fluid/advanced_guide/performance_improving/analysis_tools/timeline_en.md +++ b/doc/fluid/advanced_guide/performance_improving/analysis_tools/timeline_en.md @@ -52,7 +52,6 @@ python Paddle/tools/timeline.py --profile_path=/tmp/profile --timeline_path=time 3. Open chrome and visit , use `load` button to load the generated `timeline` file. - ![chrome tracing](./tracing.jpeg) diff --git a/doc/fluid/advanced_guide/performance_improving/device_switching/device_switching.md b/doc/fluid/advanced_guide/performance_improving/device_switching/device_switching.md new file mode 100644 index 0000000000000000000000000000000000000000..c20f1abf113a51632d20eb1c2340b85cd3d67aa3 --- /dev/null +++ b/doc/fluid/advanced_guide/performance_improving/device_switching/device_switching.md @@ -0,0 +1,199 @@ +# 运行时设备切换 + +Paddle提供了[fluid.CUDAPlace](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/CUDAPlace_cn.html)以及[fluid.CPUPlace](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/CPUPlace_cn.html)用于指定运行时的设备。这两个接口用于指定全局的设备,从1.8版本开始,Paddle提供了[device_guard](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/fluid_cn/device_guard_cn.html)接口,用于指定部分OP的运行设备,此教程会介绍device_guard的使用场景,以及如何使用该接口对模型进行优化。 + +如果使用了`fluid.CUDAPlace`设置了全局的执行设备,框架将尽可能地将OP设置在GPU上执行,因此有可能会遇到显存不够的情况。`device_guard`可以用于设置OP的执行设备,如果将部分层设置在CPU上运行,就能够充分利用CPU大内存的优势,避免显存超出。 + +有时尽管指定了全局的执行设备为GPU,但框架在自动分配OP执行设备时,可能会将部分OP设置在CPU上执行。另外,个别OP会将输出存储在CPU上。在以上的场景中,常常会发生不同设备间的数据传输,可能会影响模型的性能。使用`device_guard`可以避免模型运行中不必要的数据传输。在下面的内容中,将会详细介绍如何通过[profile](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/profiler_cn.html)工具分析数据传输开销,以及如何使用`device_guard`避免不必要的数据传输,从而提升模型性能。 + +## 如何避免显存超出 + +下面示例代码中的`embedding`层,其参数`size`包含两个元素,第一个元素为`vocab_size` (词表大小), 第二个为`emb_size`(`embedding`层维度)。实际场景中,词表可能会非常大。示例代码中,词表大小被设置为10000000。如果在GPU模式下运行,该层创建的权重矩阵的大小为(10000000, 150),仅这一层就需要5.59G的显存,如果词表大小继续增加,极有可能会导致显存超出。 + +```python +import paddle.fluid as fluid + +data = fluid.layers.fill_constant(shape=[1], value=128, dtype='int64') +label = fluid.layers.fill_constant(shape=[1, 150], value=0.5, dtype='float32') +emb = fluid.embedding(input=data, size=(10000000, 150), dtype='float32') +out = fluid.layers.l2_normalize(x=emb, axis=-1) + +cost = fluid.layers.square_error_cost(input=out, label=label) +avg_cost = fluid.layers.mean(cost) +sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) +sgd_optimizer.minimize(avg_cost) + +place = fluid.CUDAPlace(0) +exe = fluid.Executor(place) +exe.run(fluid.default_startup_program()) +result = exe.run(fluid.default_main_program(), fetch_list=[avg_cost]) +``` + +`embedding`是根据`input`中的`id`信息从`embedding`矩阵中查询对应`embedding`信息,在CPU上进行计算,其速度也是可接受的。因此,可以参考如下代码,使用`device_guard`将`embedding`层设置在CPU上,以利用CPU内存资源。那么,除了`embedding`层,其他各层都会在GPU上运行。 + +```python +import paddle.fluid as fluid + +data = fluid.layers.fill_constant(shape=[1], value=128, dtype='int64') +label = fluid.layers.fill_constant(shape=[1, 150], value=0.5, dtype='float32') +with fluid.device_guard("cpu"): + emb = fluid.embedding(input=data, size=(10000000, 150), dtype='float32') +out = fluid.layers.l2_normalize(x=emb, axis=-1) + +cost = fluid.layers.square_error_cost(input=out, label=label) +avg_cost = fluid.layers.mean(cost) +sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) +sgd_optimizer.minimize(avg_cost) + +place = fluid.CUDAPlace(0) +exe = fluid.Executor(place) +exe.run(fluid.default_startup_program()) +result = exe.run(fluid.default_main_program(), fetch_list=[avg_cost]) +``` + +在显存足够的情况下,可不必进行这样的设置。 + +## 如何减少数据传输 +### 使用profile工具确认是否发生了数据传输 +首先对模型的性能数据进行分析,找到发生数据传输的原因。如下列代码所示,可以利用[profile](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/profiler_cn.html)工具进行分析。 + +```python +import paddle.fluid as fluid +import paddle.fluid.compiler as compiler +import paddle.fluid.profiler as profiler + +data1 = fluid.layers.fill_constant(shape=[1, 3, 8, 8], value=0.5, dtype='float32') +data2 = fluid.layers.fill_constant(shape=[1, 3, 5, 5], value=0.5, dtype='float32') +shape = fluid.layers.shape(data2) +shape = fluid.layers.slice(shape, axes=[0], starts=[0], ends=[4]) +out = fluid.layers.crop_tensor(data1, shape=shape) +place = fluid.CUDAPlace(0) +exe = fluid.Executor(place) +exe.run(fluid.default_startup_program()) +compiled_prog = compiler.CompiledProgram(fluid.default_main_program()) +with profiler.profiler('All', 'total') as prof: + for i in range(10): + result = exe.run(program=compiled_prog, fetch_list=[out]) +``` + +在程序运行结束后,将会自动地打印出profile report。在下面的profile report中,可以看到 `GpuMemCpy Summary`中给出了2项数据传输的调用耗时。在OP执行过程中,如果输入Tensor所在的设备与OP执行的设备不同,就会发生`GpuMemcpySync`,通常我们可以直接优化的就是这一项。进一步分析,可以看到`slice`和`crop_tensor`执行中都发生了`GpuMemcpySync`。尽管我们在程序中设置了GPU模式运行,但是框架中有些OP,例如shape,会将输出结果放在CPU上。 + +```text +-------------------------> Profiling Report <------------------------- + +Note! This Report merge all thread info into one. +Place: All +Time unit: ms +Sorted by total time in descending order in the same thread + +Total time: 26.6328 + Computation time Total: 13.3133 Ratio: 49.9884% + Framework overhead Total: 13.3195 Ratio: 50.0116% + +------------------------- GpuMemCpy Summary ------------------------- + +GpuMemcpy Calls: 30 Total: 1.47508 Ratio: 5.5386% + GpuMemcpyAsync Calls: 10 Total: 0.443514 Ratio: 1.66529% + GpuMemcpySync Calls: 20 Total: 1.03157 Ratio: 3.87331% + +------------------------- Event Summary ------------------------- + +Event Calls Total CPU Time (Ratio) GPU Time (Ratio) Min. Max. Ave. Ratio. +FastThreadedSSAGraphExecutorPrepare 10 9.16493 9.152509 (0.998645) 0.012417 (0.001355) 0.025192 8.85968 0.916493 0.344122 +shape 10 8.33057 8.330568 (1.000000) 0.000000 (0.000000) 0.030711 7.99849 0.833057 0.312793 +fill_constant 20 4.06097 4.024522 (0.991025) 0.036449 (0.008975) 0.075087 0.888959 0.203049 0.15248 +slice 10 1.78033 1.750439 (0.983212) 0.029888 (0.016788) 0.148503 0.290851 0.178033 0.0668471 + GpuMemcpySync:CPU->GPU 10 0.45524 0.446312 (0.980388) 0.008928 (0.019612) 0.039089 0.060694 0.045524 0.0170932 +crop_tensor 10 1.67658 1.620542 (0.966578) 0.056034 (0.033422) 0.143906 0.258776 0.167658 0.0629515 + GpuMemcpySync:GPU->CPU 10 0.57633 0.552906 (0.959357) 0.023424 (0.040643) 0.050657 0.076322 0.057633 0.0216398 +Fetch 10 0.919361 0.895201 (0.973721) 0.024160 (0.026279) 0.082935 0.138122 0.0919361 0.0345199 + GpuMemcpyAsync:GPU->CPU 10 0.443514 0.419354 (0.945526) 0.024160 (0.054474) 0.040639 0.059673 0.0443514 0.0166529 +ScopeBufferedMonitor::post_local_exec_scopes_process 10 0.341999 0.341999 (1.000000) 0.000000 (0.000000) 0.028436 0.057134 0.0341999 0.0128413 +eager_deletion 30 0.287236 0.287236 (1.000000) 0.000000 (0.000000) 0.005452 0.022696 0.00957453 0.010785 +ScopeBufferedMonitor::pre_local_exec_scopes_process 10 0.047864 0.047864 (1.000000) 0.000000 (0.000000) 0.003668 0.011592 0.0047864 0.00179718 +InitLocalVars 1 0.022981 0.022981 (1.000000) 0.000000 (0.000000) 0.022981 0.022981 0.022981 0.000862883 +``` +### 通过log查看发生数据传输的具体位置 + +以上的示例程序比较简单,我们只用看profile report就能知道具体是哪些算子发生了数据传输。但是当模型比较复杂时,可能需要去查看更加详细的调试信息,可以打印出运行时的log去确定发生数据传输的具体位置。依然以上述程序为例,执行`GLOG_vmodule=operator=3 python test_case.py`,会得到如下log信息,会发现发生了2次数据传输: + +- `shape`输出的结果在CPU上,在`slice`运行时,`shape`的输出被拷贝到GPU上 +- `slice`执行完的结果在GPU上,当`crop_tensor`执行时,它会被拷贝到CPU上。 + +```text +I0406 14:56:23.286592 17516 operator.cc:180] CUDAPlace(0) Op(shape), inputs:{Input[fill_constant_1.tmp_0:float[1, 3, 5, 5]({})]}, outputs:{Out[shape_0.tmp_0:int[4]({})]}. +I0406 14:56:23.286628 17516 eager_deletion_op_handle.cc:107] Erase variable fill_constant_1.tmp_0 on CUDAPlace(0) +I0406 14:56:23.286725 17516 operator.cc:1210] Transform Variable shape_0.tmp_0 from data_type[int]:data_layout[NCHW]:place[CPUPlace]:library_type[PLAIN] to data_type[int]:data_layout[ANY_LAYOUT]:place[CUDAPlace(0)]:library_type[PLAIN] +I0406 14:56:23.286763 17516 scope.cc:169] Create variable shape_0.tmp_0 +I0406 14:56:23.286784 17516 data_device_transform.cc:21] DeviceTransform in, src_place CPUPlace dst_place: CUDAPlace(0) +I0406 14:56:23.286867 17516 tensor_util.cu:129] TensorCopySync 4 from CPUPlace to CUDAPlace(0) +I0406 14:56:23.287099 17516 operator.cc:180] CUDAPlace(0) Op(slice), inputs:{EndsTensor[], EndsTensorList[], Input[shape_0.tmp_0:int[4]({})], StartsTensor[], StartsTensorList[]}, outputs:{Out[slice_0.tmp_0:int[4]({})]}. +I0406 14:56:23.287140 17516 eager_deletion_op_handle.cc:107] Erase variable shape_0.tmp_0 on CUDAPlace(0) +I0406 14:56:23.287220 17516 tensor_util.cu:129] TensorCopySync 4 from CUDAPlace(0) to CPUPlace +I0406 14:56:23.287473 17516 operator.cc:180] CUDAPlace(0) Op(crop_tensor), inputs:{Offsets[], OffsetsTensor[], Shape[slice_0.tmp_0:int[4]({})], ShapeTensor[], X[fill_constant_0.tmp_0:float[1, 3, 8, 8]({})]}, outputs:{Out[crop_tensor_0.tmp_0:float[1, 3, 5, 5]({})]}. +``` + +### 使用device_guard避免不必要的数据传输 + +在上面的例子中,`shape`输出的是一个1-D的Tensor,因此对于`slice`而言计算量很小。这种情况下如果将`slice`设置在CPU上运行,就可以避免2次数据传输。修改后的程序如下: + +```python +import paddle.fluid as fluid +import paddle.fluid.compiler as compiler +import paddle.fluid.profiler as profiler + +data1 = fluid.layers.fill_constant(shape=[1, 3, 8, 8], value=0.5, dtype='float32') +data2 = fluid.layers.fill_constant(shape=[1, 3, 5, 5], value=0.5, dtype='float32') +shape = fluid.layers.shape(data2) +with fluid.device_guard("cpu"): + shape = fluid.layers.slice(shape, axes=[0], starts=[0], ends=[4]) +out = fluid.layers.crop_tensor(data1, shape=shape) +place = fluid.CUDAPlace(0) +exe = fluid.Executor(place) +exe.run(fluid.default_startup_program()) +compiled_prog = compiler.CompiledProgram(fluid.default_main_program()) +with profiler.profiler('All', 'total') as prof: + for i in range(10): + result = exe.run(program=compiled_prog, fetch_list=[out]) +``` +再次观察profile report中`GpuMemCpy Summary`的内容,可以看到`GpuMemCpySync`已经被消除。在实际的模型中,若`GpuMemCpySync` 调用耗时占比较大,并且可以通过设置`device_guard`避免,那么就能够带来一定的性能提升。 + +```text +-------------------------> Profiling Report <------------------------- + +Note! This Report merge all thread info into one. +Place: All +Time unit: ms +Sorted by total time in descending order in the same thread + +Total time: 14.5345 + Computation time Total: 4.47587 Ratio: 30.7948% + Framework overhead Total: 10.0586 Ratio: 69.2052% + +------------------------- GpuMemCpy Summary ------------------------- + +GpuMemcpy Calls: 10 Total: 0.457033 Ratio: 3.14447% + GpuMemcpyAsync Calls: 10 Total: 0.457033 Ratio: 3.14447% + +------------------------- Event Summary ------------------------- + +Event Calls Total CPU Time (Ratio) GPU Time (Ratio) Min. Max. Ave. Ratio. +FastThreadedSSAGraphExecutorPrepare 10 7.70113 7.689066 (0.998433) 0.012064 (0.001567) 0.032657 7.39363 0.770113 0.529852 +fill_constant 20 2.62299 2.587022 (0.986287) 0.035968 (0.013713) 0.071097 0.342082 0.13115 0.180466 +shape 10 1.93504 1.935040 (1.000000) 0.000000 (0.000000) 0.026774 1.6016 0.193504 0.133134 +Fetch 10 0.880496 0.858512 (0.975032) 0.021984 (0.024968) 0.07392 0.140896 0.0880496 0.0605797 + GpuMemcpyAsync:GPU->CPU 10 0.457033 0.435049 (0.951898) 0.021984 (0.048102) 0.037836 0.071424 0.0457033 0.0314447 +crop_tensor 10 0.705426 0.671506 (0.951916) 0.033920 (0.048084) 0.05841 0.123901 0.0705426 0.0485346 +slice 10 0.324241 0.324241 (1.000000) 0.000000 (0.000000) 0.024299 0.07213 0.0324241 0.0223084 +eager_deletion 30 0.250524 0.250524 (1.000000) 0.000000 (0.000000) 0.004171 0.016235 0.0083508 0.0172365 +ScopeBufferedMonitor::post_local_exec_scopes_process 10 0.047794 0.047794 (1.000000) 0.000000 (0.000000) 0.003344 0.014131 0.0047794 0.00328831 +InitLocalVars 1 0.034629 0.034629 (1.000000) 0.000000 (0.000000) 0.034629 0.034629 0.034629 0.00238254 +ScopeBufferedMonitor::pre_local_exec_scopes_process 10 0.032231 0.032231 (1.000000) 0.000000 (0.000000) 0.002952 0.004076 0.0032231 0.00221755 +``` + +### 总结 + +- 使用profile工具对模型进行分析,看是否存在GpuMemcpySync的调用耗时。若存在,则进一步分析发生数据传输的原因。 +- 可以通过profile report找到发生GpuMemcpySync的OP。如果需要,可以通过打印log,找到GpuMemcpySync发生的具体位置。 +- 尝试使用`device_guard`设置部分OP的运行设备,来减少GpuMemcpySync的调用。 +- 最后可以通过比较修改前后模型的profile report,或者其他用来衡量性能的指标,确认修改后是否带来了性能提升。 diff --git a/doc/fluid/advanced_guide/performance_improving/index_cn.rst b/doc/fluid/advanced_guide/performance_improving/index_cn.rst index 9103496255d6637161065237ac53a856f033a835..b50f091f8c70328d37c7cf3dc92a5b0f14a08f33 100644 --- a/doc/fluid/advanced_guide/performance_improving/index_cn.rst +++ b/doc/fluid/advanced_guide/performance_improving/index_cn.rst @@ -7,6 +7,8 @@ singlenode_training_improving/training_best_practice.rst singlenode_training_improving/memory_optimize.rst + device_switching/device_switching.md + amp/amp.md multinode_training_improving/cpu_train_best_practice.rst multinode_training_improving/dist_training_gpu.rst multinode_training_improving/gpu_training_with_recompute.rst diff --git a/doc/fluid/advanced_guide/performance_improving/index_en.rst b/doc/fluid/advanced_guide/performance_improving/index_en.rst index 30d74b04013e56cb84f790c4ba265bad2f4a3d17..f57e2a3d060daabf6733c969a9e85de69bc5ae24 100644 --- a/doc/fluid/advanced_guide/performance_improving/index_en.rst +++ b/doc/fluid/advanced_guide/performance_improving/index_en.rst @@ -5,7 +5,7 @@ Practice Improving .. toctree:: :maxdepth: 1 - + singlenode_training_improving/memory_optimize_en.rst multinode_training_improving/cpu_train_best_practice_en.rst multinode_training_improving/gpu_training_with_recompute_en.rst inference_improving/paddle_tensorrt_infer_en.md diff --git a/doc/fluid/advanced_guide/performance_improving/multinode_training_improving/dist_training_gpu.rst b/doc/fluid/advanced_guide/performance_improving/multinode_training_improving/dist_training_gpu.rst index 7f9ed763122d872fb9fa0ddc46cd6492f4b0b31a..ebe02dc6f577f47f323da7d0a967d952f0e5124e 100644 --- a/doc/fluid/advanced_guide/performance_improving/multinode_training_improving/dist_training_gpu.rst +++ b/doc/fluid/advanced_guide/performance_improving/multinode_training_improving/dist_training_gpu.rst @@ -7,34 +7,39 @@ 开始优化您的GPU分布式训练任务 --------------------------- -PaddlePaddle Fluid可以支持在现代GPU [#]_ 服务器集群上完成高性能分布式训练。 -通常可以通过以下方法优化在多机多卡环境训练性能,建议在进行性能优化时, -检查每项优化点并验证对应提升,从而提升最终的性能。 +PaddlePaddle Fluid支持在现代GPU [#]_ 服务器集群上完成高性能分布式训练。通常可以通过以下方法优化在多机多卡环境训练性能,建议在进行性能优化时,检查每项优化点并验证对应提升,从而提升最终的性能。 -一个简单的验证当前的训练程序是否需要进一步优化性能的方法, -是查看GPU的计算利用率 [#]_ ,通常用 :code:`nvidia-smi` 命令查看。 -如果GPU利用率较低,则可能存在较大的优化空间。 -下面主要从环境变量设置、训练策略设置、数据准备和训练方式四个方向介绍GPU分布式训练中常用的方法。 +一个简单的验证当前的训练程序是否需要进一步优化性能的方法,是查看GPU的计算利用率 [#]_ ,通常用 :code:`nvidia-smi` 命令查看。如果GPU利用率较低,则可能存在较大的优化空间。下面主要从数据准备、训练策略设置和训练方式三个方面介绍GPU分布式训练中常用的优化方法。 -1、环境变量设置 -============= +1、数据准备 +=========== -环境变量设置表 +数据读取的优化在GPU训练中至关重要,尤其在不断增加batch_size提升吞吐时,计算对reader性能会有更高对要求,优化reader性能需要考虑的点包括: -.. csv-table:: - :header: "调节项", "可选值", "说明" - :widths: 3, 3, 5 + - 使用 :code:`DataLoader` 。参考 `这里 `_ 使用DataLoader,并建议开启 :code:`use_double_buffer` 。 + - reader返回uint8类型数据。图片在解码后一般会以uint8类型存储,如果在reader中转换成float类型数据,会将数据体积扩大4倍。直接返回uint8数据,然后在GPU上转化成float类型进行训练可以提升数据读取效率。 + - 减少reader初始化时间 (infinite read)。在训练任务开始执行第一轮训练时,reader开始不断异步地从磁盘或其他存储中读取数据并执行预处理,然后将处理好的数据填充到队列中供计算使用。从0开始填充这个队列直到数据可以源源不断供给计算,需要一定时间的预热。所以,如果每轮训练都重新填充队列,会产生一些时间的开销。所以,在使用DataLoader时,可以让reader函数不断地产生数据,直到训练循环结束: - ":code:`FLAGS_sync_nccl_allreduce`", "0,1", "是否同步AllReduce操作。1表示开启,每次调用等待AllReduce同步" - ":code:`FLAGS_fraction_of_gpu_memory_to_use`", "0~1之间的float值", "预先分配显存的占比" - ":code:`NCCL_IB_DISABLE` ", "0,1", "是否启用RDMA多机通信。如果机器硬件支持,可以设置1,开启RDMA支持" + .. code-block:: python + :linenos: -说明: + def infinite_reader(file_path): + while True: + with open(file_path) as fn: + for line in fn: + yield process(line) + + def train(): + ... + for pass_id in xrange(NUM_PASSES): + if pass_id == 0: + data_loader.start() + for batch_id in (iters_per_pass): + exe.run() + data_loader.reset() -- 关于 :code:`FLAGS_sync_nccl_allreduce` ,配置 :code:`FLAGS_sync_nccl_allreduce=1` 让每次allreduce操作都等待完成,可以提升性能,详细原因和分析可以参考:https://github.com/PaddlePaddle/Paddle/issues/15049。 -- 关于 :code:`FLAGS_fraction_of_gpu_memory_to_use` ,配置 :code:`FLAGS_fraction_of_gpu_memory_to_use=0.95` ,0.95是指95%的显存会预先分配。设置的范围是0.0~1.0。注意,设置成0.0会让每次显存分配都调用 :code:`cudaMalloc` 这样会极大的降低训练性能。 -- 关于 :code:`NCCL_IB_DISABLE` ,在使用NCCL2模式训练时,其会默认尝试开启RDMA通信,如果系统不支持,则会自动降级为使用TCP通信。可以通过打开环境变量 :code:`NCCL_DEBUG=INFO` 查看NCCL是否选择了开启RDMA通信。如果需要强制使用TCP方式通信,可以设置 :code:`NCCL_IB_DISABLE=1` 。 +另外,可以使用DALI库提升数据处理性能。DALI是NVIDIA开发的数据加载库,更多内容请参考 `官网文档 `_ 。飞桨中如何结合使用DALI库请参考 `使用示例 `_ 。 2、训练策略设置 =========== @@ -48,9 +53,11 @@ PaddlePaddle Fluid可以支持在现代GPU [#]_ 服务器集群上完成高性 ":code:`num_threads`", "int", "1", "CPU线程数" ":code:`nccl_comm_num`", "int", "1", "nccl通信器数量" ":code:`fuse_all_reduce_ops`", "bool", "False", "多卡训练时,将AllReduce操纵进行融合" - ":code:`use_hierarchical_allreduce` ", "bool", "False","分级式reduce" + ":code:`use_hierarchical_allreduce` ", "bool", "False", "分级式reduce" ":code:`num_iteration_per_drop_scope`", "int", "1", "scope drop频率,设置每隔几个batch的迭代之后执行一次清理scope" ":code:`fetch_frequency`", "int", "1", "fetch的刷新频率" + ":code:`fuse_bn_act_ops`", "bool", "False", "是否开启batch normalization和激活函数的融合" + ":code:`fuse_elewise_add_act_ops`", "bool", "False", "是否开启elementwise add函数和激活函数的融合" 说明: @@ -58,7 +65,7 @@ PaddlePaddle Fluid可以支持在现代GPU [#]_ 服务器集群上完成高性 - 关于AllReduce融合 :code:`fuse_all_reduce_ops` ,默认情况下会将同一layer中参数的梯度的AllReduce操作合并成一个,比如对于 :code:`fluid.layers.fc` 中有Weight和Bias两个参数,打开该选项之后,原本需要两次AllReduce操作,现在只用一次AllReduce 操作。此外,为支持更大粒度的参数梯度融合,Paddle提供了 :code:`FLAGS_fuse_parameter_memory_size` 和 :code:`FLAGS_fuse_parameter_groups_size` 两个环境变量选项。用户可以指定融合AllReduce操作之后,每个AllReduce操作的梯度字节数,比如希望每次AllReduce调用传输16MB的梯度,:code:`export FLAGS_fuse_parameter_memory_size=16` ,经验值为总通信量的十分之一。可以指定每次AllReduce操作的最大层数,即到达该层数就进行AllReduce,如指定50层 :code:`export FLAGS_fuse_parameter_groups_size=50` 。注意:目前不支持sparse参数梯度。 - 关于使用分级式reduce :code:`use_hierarchical_allreduce` 。对于多机模式,针对小数据量的通信,Ring AllReduce通信效率低,采用Hierarchical AllReduce可以解决该问题。 - 关于降低scope drop频率 :code:`num_iteration_per_drop_scope` 和fetch频率 :code:`fetch_frequency` 。减少scope drop和fetch频率,可以减少频繁的变量内存申请、释放和拷贝,从而提升性能。 -- 其他训练策略的参数可以参考 `这里 <../best_practice/training_best_practice.html>`_ 。 +- 关于操作融合:通过参数融合可以提升训练性能。 设置这些参数可以参考: @@ -88,67 +95,12 @@ PaddlePaddle Fluid可以支持在现代GPU [#]_ 服务器集群上完成高性 exe.run([]) -3、数据准备 -=========== - -1、使用GPU完成部分图片预处理 - -如果可能,使用GPU完成部分数据预处理,比如图片Tensor的归一化: - -.. code-block:: python - :linenos: - - image = fluid.layers.data() - img_mean = fluid.layers.create_global_var([3, 1, 1], 0.0, "float32", name="img_mean", persistable=True) - img_std = fluid.layers.create_global_var([3, 1, 1], 0.0, "float32", name="img_std", persistable=True) - t1 = fluid.layers.elementwise_sub(image / 255.0, img_mean, axis=1) - image = fluid.layers.elementwise_div(t1, img_std, axis=1) - -对输入的图片Tensor,使用 :code:`fluid.layers` 完成图片数据归一化预处理, -这样可以减轻CPU预处理数据的负担,提升总体训练速度。 - -2、优化reader性能 - -数据读取的优化在GPU训练中至关重要,尤其在不断增加batch_size提升吞吐时,计算对reader性能会有更高对要求, -优化reader性能需要考虑的点包括: - - - 使用 :code:`pyreader` 。参考 `这里 <../../user_guides/howto/prepare_data/use_py_reader.html>`_ 使用pyreader,并开启 :code:`use_double_buffer` 。 - - reader返回uint8类型数据。图片在解码后一般会以uint8类型存储,如果在reader中转换成float类型数据,会将数据体积扩大4倍。直接返回uint8数据,然后在GPU上转化成float类型进行训练 - - 减少reader初始化时间 (infinite read) - 在训练任务开始执行第一轮训练时,reader开始异步的,不断的从磁盘或其他存储中读取数据并执行预处理,然后将处理好的数据 - 填充到队列中供计算使用。从0开始填充这个队列直到数据可以源源不断供给计算,需要一定时间的预热。所以,如果每轮训练 - 都重新填充队列,会产生一些时间的开销。所以,在使用pyreader时,可以让reader函数不断的产生数据,直到训练循环手动break: - - .. code-block:: python - :linenos: - - def infinite_reader(file_path): - while True: - with open(file_path) as fn: - for line in fn: - yield process(line) - - def train(): - ... - for pass_id in xrange(NUM_PASSES): - if pass_id == 0: - pyreader.start() - for batch_id in (iters_per_pass): - exe.run() - pyreader.reset() - -4、训练方式 +3、训练方式 =========== 1、Local SGD -GPU多机多卡同步训练过程中存在慢trainer现象, -即每步中训练快的trainer的同步通信需要等待训练慢的trainer。 -由于每步中慢trainer的rank具有随机性, -因此我们使用局部异步训练的方式——LocalSGD, -通过多步异步训练(无通信阻塞)实现慢trainer时间均摊, -从而提升同步训练性能。 -Local SGD训练方式主要有三个参数,分别是: +GPU多机多卡同步训练过程中存在慢trainer现象,即每步中训练快的trainer的同步通信需要等待训练慢的trainer。由于每步中慢trainer的rank具有随机性,因此我们使用局部异步训练的方式——LocalSGD,通过多步异步训练(无通信阻塞)实现慢trainer时间均摊,从而提升同步训练性能。Local SGD训练方式主要有三个参数,分别是: .. csv-table:: :header: "选项", "类型", "可选值", "说明" @@ -163,18 +115,14 @@ Local SGD训练方式主要有三个参数,分别是: - Local SGD的warmup步长 :code:`local_sgd_is_warm_steps` 影响最终模型的泛化能力,一般需要等到模型参数稳定之后在进行Local SGD训练,经验值可以将学习率第一次下降时的epoch作为warmup步长,之后再进行Local SGD训练。 - Local SGD步长 :code:`local_sgd_steps` ,一般该值越大,通信次数越少,训练速度越快,但随之而来的时模型精度下降。经验值设置为2或者4。 -具体的Local SGD的训练代码可以参考: -https://github.com/PaddlePaddle/Fleet/tree/develop/examples/local_sgd/resnet +具体的Local SGD的训练代码可以参考:https://github.com/PaddlePaddle/Fleet/tree/develop/examples/local_sgd/resnet 2、使用混合精度训练 -V100 GPU提供了 `Tensor Core `_ 可以在混合精度计算 -场景极大的提升性能。使用混合精度计算的例子可以参考: -https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification#using-mixed-precision-training +V100 GPU提供了 `Tensor Core `_ 可以在混合精度计算场景极大的提升性能。使用混合精度计算的例子可以参考:https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification#using-mixed-precision-training -目前Paddle只提供在两个模型(ResNet, BERT)的混合精度计算实现并支持static loss scaling,其他模型使用混合精度也 -可以参考以上的实现完成验证。 +目前Paddle只提供在两个模型(ResNet, BERT)的混合精度计算实现并支持static loss scaling,其他模型使用混合精度也可以参考以上的实现完成验证。 附录 ---- diff --git a/doc/fluid/advanced_guide/performance_improving/singlenode_training_improving/memory_optimize.rst b/doc/fluid/advanced_guide/performance_improving/singlenode_training_improving/memory_optimize.rst index 5875f94b2d436aec41b531004799b70bf017f463..ae9238813a614c7e3c022e06c59995e21f589c10 100644 --- a/doc/fluid/advanced_guide/performance_improving/singlenode_training_improving/memory_optimize.rst +++ b/doc/fluid/advanced_guide/performance_improving/singlenode_training_improving/memory_optimize.rst @@ -7,20 +7,26 @@ 1. PaddlePaddle的显存分配策略 =========================== -1.1. 显存预分配策略 ----------------- +1.1. 显存自增长AutoGrowth策略 +-------------------------- +自1.6+的版本起,PaddlePaddle支持显存自增长AutoGrowth策略,按需分配显存,且已于1.7+版本中默认开启,方便用户在同一张GPU卡上同时运行多个任务。 -由于原生的CUDA系统调用 :code:`cudaMalloc` 和 :code:`cudaFree` 均是同步操作,非常耗时。因此PaddlePaddle采用了显存预分配的策略加速显存分配。具体方式为: +由于原生的CUDA系统调用 :code:`cudaMalloc` 和 :code:`cudaFree` 均是同步操作,非常耗时。 +因此显存自增长AutoGrowth策略会缓存已分配到的显存,供后续分配使用,具体方式为: -- 在分配requested_size大小的显存时, - - 若requested_size <= chunk_size,则框架会预先分配chunk_size大小的显存池chunk,并从chunk中分出requested_size大小的块返回。之后每次申请显存都会从chunk中分配。 - - 若requested_size > chunk_size,则框架会直接调用 :code:`cudaMalloc` 分配requested_size大小的显存返回。 +- 在前几次显存分配时,框架会调用 :code:`cudaMalloc` 按需分配,但释放时不会调用 :code:`cudaFree` 返回给GPU,而是在框架内部缓存起来。 -- 在释放free_size大小的显存时, - - 若free_size <= chunk_size,则框架会将该显存放回预分配的chunk中,而不是直接返回给CUDA。 - - 若free_size > chunk_size,则框架会直接调用 :code:`cudaFree` 将显存返回给CUDA。 +- 在随后的显存分配时,框架会首先检查缓存的显存中是否有合适的块,若有则从中分割出所需的显存空间返回,否则才调用 :code:`cudaMalloc` 直接从GPU中分配。随后的显存释放亦会缓存起来供后续分配使用。 + +因此,显存自增长AutoGrowth策略会在前几个batch训练时分配较慢(因为频繁调用 :code:`cudaMalloc` ),在随后训练过程中基本不会影响模型训练速度。 -上述的chunk_size由环境变量 :code:`FLAGS_fraction_of_gpu_memory_to_use` 确定,chunk_size的计算公式为: +1.2. 显存预分配策略 +---------------- + +除了显存自增长AutoGrowth策略以外,PaddlePaddle还提供了显存预分配策略。显存预分配策略是PaddlePaddle 1.7版本前的默认显存分配策略。 + +显存预分配策略会在第一次分配时分配很大chunk_size的显存块,随后的显存分配大多从预分配的显存块中切分获得。 +其中,chunk_size由环境变量 :code:`FLAGS_fraction_of_gpu_memory_to_use` 确定,chunk_size的计算公式为: .. code-block:: python @@ -28,7 +34,17 @@ :code:`FLAGS_fraction_of_gpu_memory_to_use` 的默认值为0.92,即框架预先分配显卡92%的当前可用显存值。 -若你的GPU卡上有其他任务占用显存,你可以适当将 :code:`FLAGS_fraction_of_gpu_memory_to_use` 减少,保证框架能预分配到合适的chunk,例如: +显存预分配策略分配显存的具体方式为: + +- 在分配requested_size大小的显存时, + - 若requested_size <= chunk_size,则框架会预先分配chunk_size大小的显存池chunk,并从chunk中分出requested_size大小的块返回。之后每次申请显存都会从chunk中分配。 + - 若requested_size > chunk_size,则框架会直接调用 :code:`cudaMalloc` 分配requested_size大小的显存返回。 + +- 在释放free_size大小的显存时, + - 若free_size <= chunk_size,则框架会将该显存放回预分配的chunk中,而不是直接返回给CUDA。 + - 若free_size > chunk_size,则框架会直接调用 :code:`cudaFree` 将显存返回给CUDA。 + +若你的GPU卡上有其他任务占用显存,你可以适当将 :code:`FLAGS_fraction_of_gpu_memory_to_use` 减少,保证框架能预分配到合适的显存块,例如: .. code-block:: shell @@ -37,30 +53,23 @@ 若 :code:`FLAGS_fraction_of_gpu_memory_to_use` 设为0,则每次显存分配和释放均会调用 :code:`cudaMalloc` 和 :code:`cudaFree` ,会严重影响性能,不建议你使用。 只有当你想测量网络的实际显存占用量时,你可以设置 :code:`FLAGS_fraction_of_gpu_memory_to_use` 为0,观察nvidia-smi显示的显存占用情况。 -1.2. 显存自增长AutoGrowth策略 --------------------------- -在1.6+的版本中,PaddlePaddle支持显存自增长AutoGrowth策略,按需分配显存。若您希望按需分配显存,您可选择使用显存自增长AutoGrowth策略。 - -在前几次显存分配时,会调用 :code:`cudaMalloc` 按需分配,但释放时不会调用 :code:`cudaFree` 返回给GPU,而是在框架内部缓存起来。 +1.3. 显存分配策略的选择方式 +----------------------- +自1.6+版本起,PaddlePaddle同时支持显存自增长AutoGrowth策略和显存预分配策略,并通过环境变量 :code:`FLAGS_allocator_strategy` 控制。 -在随后的显存分配时,会首先检查缓存的显存中是否有合适的块,若有则从中分割出所需的显存空间返回,否则才调用 :code:`cudaMalloc` 直接从GPU中分配。随后的显存释放亦会缓存起来供后续分配使用。 - -因此,显存自增长AutoGrowth策略会在前几个batch训练时分配较慢(因为频繁调用 :code:`cudaMalloc` ),在随后训练过程中基本不会影响模型训练速度。 - -显存自增长AutoGrowth策略通过设置环境变量 :code:`FLAGS_allocator_strategy` 开启,设置方式为: +选择显存自增长AutoGrowth的方式为: .. code-block:: shell - export FLAGS_allocator_strategy=auto_growth + export FLAGS_allocator_strategy=auto_growth # 选择显存自增长AutoGrowth策略 -对应地,显存预分配策略通过以下方法开启: +选择显存预分配策略的方式为: .. code-block:: shell - export FLAGS_allocator_strategy=naive_best_fit - -环境变量 :code:`FLAGS_allocator_strategy` 的默认值为naive_best_fit,表示默认使用显存预分配策略。 + export FLAGS_allocator_strategy=naive_best_fit # 选择显存预分配策略 +此外,自1.7.2+版本起,PaddlePaddle提供了环境变量 :code:`FLAGS_gpu_memory_limit_mb` ,用于控制单个任务进程可分配的最大显存,单位是MB。默认值是0,表示没有限制,可分配全部显存。如果设置为大于0的值,则会在分配的显存超过限制时报错,即使此时系统还存在空闲的显存空间。 2. PaddlePaddle的存储优化策略 =========================== diff --git a/doc/fluid/advanced_guide/performance_improving/singlenode_training_improving/memory_optimize_en.rst b/doc/fluid/advanced_guide/performance_improving/singlenode_training_improving/memory_optimize_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..2a1e3ecb0c52cc198c8e6c6e54f8a175222434a6 --- /dev/null +++ b/doc/fluid/advanced_guide/performance_improving/singlenode_training_improving/memory_optimize_en.rst @@ -0,0 +1,178 @@ +.. _api_guide_memory_optimize_en: + +########### +Memory Allocation and Optimization +########### + +1. Memory Allocation Strategy +=========================== + +1.1. AutoGrowth Strategy +-------------------------- + +Since version 1.6+, PaddlePaddle supports the AutoGrowth strategy, which allocates memory on demand. +AutoGrowth strategy has been enabled by default in version 1.7+, making it convenient for users to +run multiple tasks on the same GPU card at the same time. + +Because the native CUDA system calls :code:`cudaMalloc` and :code:`cudaFree` are synchronous operations, +which are very time-consuming, the AutoGrowth strategy will cache the allocated memory for subsequent allocation. +The specific methods are as follows: + +- In the first few memory allocations, PaddlePaddle framework will call :code:`cudaMalloc` and allocate memory on demand. When releasing the allocated memory, it will not call :code:`cudaFree` to return the memory to GPU, but cache the memory inside the framework. + +- In the subsequent allocations, PaddlePaddle framework will first check if there is a fit block (block size larger than the required memory size) in the cached memory. If there is, it will split the required memory from the fit block and return. Otherwise, it will call :code:`cudaMalloc` to allocate memory from GPU. The allocated memory are also cached when being released for subsequent allocation. + +Therefore, the AutoGrowth strategy may slow the speed in the first few batches of model training, +but will not affect the speed in the subsequent training process. + +1.2. Pre-Allocation Strategy +---------------- + +In addition to the AutoGrowth strategy, paddlepaddle also provides a Pre-Allocation strategy, +which is the default memory allocation strategy before paddlepaddle 1.7. + +The Pre-Allocation strategy allocates a large size chunk at the first allocation, and the subsequent memory allocation is mostly obtained from the pre allocated memory chunk. +Among them, the chunk size is determined by the environment variable :code:`FLAGS_fraction_of_gpu_memory_to_use`, and the calculation formula of chunk size is: + +.. code-block:: python + + chunk_size = FLAGS_fraction_of_gpu_memory_to_use * number of current available memory of a single GPU card + +The default value of :code:`FLAGS_fraction_of_gpu_memory_to_use` is 0.92, that is, the framework will pre allocates +92% of the currently available memory of the GPU card. + +The specific way of Pre-Allocation strategy to allocate GPU memory is: + +- When allocating memory of requested_size, + - If requested_size <= chunk_size, the framework will first allocate a memory chunk of chunk_size, then split a block of requested_size and return the block. Every subsequent memory allocation will be performed on the chunk. + - If requested_size > chunk_size, the framework will call :code:`cudaMalloc` to allocate memory block of requested_size and return. + +- When freeing memory of requested_size, + - If free_size <= chunk_size, the framework will put the memory block back into the pre-allocated chunk, instead of returning back to GPU. + - If free_size > chunk_size, the framework will call :code:`cudaFree` and return the memory back to GPU. + +If there are other tasks on your GPU card that occupy the memory, you can appropriately decrease :code:`FLAGS_fraction_of_gpu_memory_to_use` +to ensure that the framework can pre-allocate the memory block of appropriate size, for example + +.. code-block:: shell + + export FLAGS_fraction_of_gpu_memory_to_use=0.4 # Pre-allocate 40% memory of a single GPU card + +If :code:`FLAGS_fraction_of_gpu_memory_to_use` is set to 0, the framework will call :code:`cudaMalloc` and :code:`cudaFree` every time the memory is allocated and released, which will seriously affect the performance and is not recommended. Only when you want to measure the actual memory usage of the network, you could set :code:`FLAGS_fraction_of_gpu_memory_to_use` to 0, and observe the memory usage of command nvidia-smi display. + +1.3. Configuration of memory allocation strategy +----------------------- +Since version 1.6+, PaddlePaddle supports both the AutoGrowth strategy and the Pre-Allocation Strategy, and control the strategy used in framework by +the environment variable :code:`FLAGS_allocator_strategy`. + +Use AutoGrowth strategy: + +.. code-block:: shell + + export FLAGS_allocator_strategy=auto_growth # Use AutoGrowth strategy + +Use Pre-Allocation strategy: + +.. code-block:: shell + + export FLAGS_allocator_strategy=naive_best_fit # Use Pre-Allocation strategy + +Plus, since version 1.7.2+, PaddlePaddle provides an environment variable :code:`FLAGS_gpu_memory_limit_mb`, which controls the maximum gpu memory limit that the process can allocate. +If it is equal to 0, there would be no limit and all gpu memory would be available to the process. If it is larger than 0, the process would raise out of memory error if the allocated +memory exceeds the limit even though there is available memory on the gpu card. The unit is MB and default value is 0. + +2. Memory Optimization Strategy +=========================== + +Paddlepaddle provides several general memory optimization methods to optimize the memory usage of your network (including general memory and GPU memory). + +2.1. GC Strategy: memory garbage eager collection +------------------------- + +The principle of GC(Garbage Collection)is to release the memory space of useless variables eagerly during network running, +in order to save memory space. GC is suitable for training and inference using Executor or ParallelExecutor, but it is not suitable for C++ inference library. + +**Since version 1.6+, GC Strategy is enabled by default.** + +GC Strategy is controlled by 3 environment variable: + + +- :code:`FLAGS_eager_delete_tensor_gb` + +Variable to enable GC, its data type is double. The default value is -1 in PaddlePaddle with version < 1.6, +and is 0 in PaddlePaddle with version >= 1.6. GC Strategy will cache a certain amount of memory garbage and release it uniformly. +:code:`FLAGS_eager_delete_tensor_gb` means the threshold of cached memory garbage, the unit of which is GB. **It is recommended to set** :code:`FLAGS_eager_delete_tensor_gb=0`. + +If :code:`FLAGS_eager_delete_tensor_gb=0`, once there is memory garbage, it will be collected immediately to save memory. + +If :code:`FLAGS_eager_delete_tensor_gb=1`, the memory garbage is collected when the cached amount of garbage reaches 1GB. + +If :code:`FLAGS_eager_delete_tensor_gb<0`, GC Strategy is disabled. + + +- :code:`FLAGS_memory_fraction_of_eager_deletion` + +Variable to control GC Strategy, its data type is double. The default value is 1, range [0,1]. It is only suitable for ParallelExecutor or CompiledProgram+with_data_parallel. +GC will sort the variables in descending order according to the memory space occupied by the variables, +and only collect the memory space of top :code:`FLAGS_memory_fraction_of_eager_deletion` variables. +**It is recommended to remain default value**, that is :code:`FLAGS_memory_fraction_of_eager_deletion=1`. + +If :code:`FLAGS_memory_fraction_of_eager_deletion=0.6`, top 60% variables will be collected. + +If :code:`FLAGS_memory_fraction_of_eager_deletion=0`, no variable will be collected, GC Strategy is disabled. + +If :code:`FLAGS_memory_fraction_of_eager_deletion=1`, all variables will be collected. + + +- :code:`FLAGS_fast_eager_deletion_mode` + +Variable to enable fast GC Strategy, its type is bool. The default value is True, which means use fast GC Strategy. +Fast GC Strategy will collect the memory garbage immediately instead of waiting for CUDA Kernel finish. **It is recommended to remain default value**, that is :code:`FLAGS_fast_eager_deletion_mode=True`. + + +2.2. Inplace Strategy: output reuses input inside operator +---------------------------------- + +The principle of Inplace strategy is that the output of some operators can reuses the memory space of input. +For example, the output and input of operator :code:`reshape` can reuse the same memory space. + +Inplace Strategy is suitable for ParallelExecutor or CompiledProgram+with_data_parallel, which can be set through :code:`BuildStrategy`. +The Strategy is not suitable for Executor+Program or C++ inference library. + +**Since version 1.6+, Inplace Strategy is enabled by default.** + +The specific way of Inplace strategy is: + +.. code-block:: python + + build_strategy = fluid.BuildStrategy() + build_strategy.enable_inplace = True # Enable Inplace Strategy + + compiled_program = fluid.CompiledProgram(train_program) + .with_data_parallel(loss_name=loss.name, build_strategy=build_strategy) + + +In PaddlePaddle with version < 1.6, due to of some design problems, when the Inplace Strategy is enabled, +the variable in fetch_list in the subsequent :code:`exe.run` must be persistent. +That is, if you the variables you want to fetch are loss and acc, you must set: + +.. code-block:: python + + loss.persistable = True + acc.persistable = True + + +**Since version 1.6+, setting variables in fetch_list to persistable is not needed.** + + +3. Memory Optimization Best Practice +======================= + +We recommend the best memory optimization strategy as: + +- Enable GC strategy:set :code:`FLAGS_eager_delete_tensor_gb=0`. + +- Enable Inplace strategy:set :code:`build_strategy.enable_inplace = True`, and set variables in fetch_list to persistable using :code:`var.persistable = True` when the version of PaddlePaddle < 1.6. + +**Since version 1.6+, the above optimal strategy have been enabled by default and setting variables in fetch_list to persistable is not needed.** + diff --git a/doc/fluid/advanced_guide/performance_improving/singlenode_training_improving/training_best_practice.rst b/doc/fluid/advanced_guide/performance_improving/singlenode_training_improving/training_best_practice.rst index 7a8e2573ec7b77e0335b24d042a11edfbd2098ed..95e71abd70e3605f94f3faa0aa1367db499b022b 100644 --- a/doc/fluid/advanced_guide/performance_improving/singlenode_training_improving/training_best_practice.rst +++ b/doc/fluid/advanced_guide/performance_improving/singlenode_training_improving/training_best_practice.rst @@ -1,4 +1,5 @@ -.. training_best_practice: +.. _api_guide_singlenode_training_best_practice: + ##################### 单机训练优秀实践 @@ -13,7 +14,7 @@ PaddlePaddle Fluid可以支持在现代CPU、GPU平台上进行训练。如果 1. 网络构建过程中的配置优化 -============= +================== 这部分优化与具体的模型有关,在这里,我们列举出一些优化过程中遇到过的一些示例。 @@ -35,93 +36,145 @@ cuDNN是NVIDIA提供的深度神经网络计算库,其中包含了很多神经 bias_attr=None, use_cudnn=True, act=None, - name=None) + name=None, + data_format="NCHW") 在 :code:`use_cudnn=True` 时,框架底层调用的是cuDNN中的卷积操作。 通常cuDNN库提供的操作具有很好的性能表现,其性能明显优于Paddle原生的CUDA实现,比如 :code:`conv2d` 。但是cuDNN中有些操作的性能较差,比如: :code:`conv2d_transpose` 在 :code:`batch_size=1` 时、:code:`pool2d` 在 :code:`global_pooling=True` 时等,这些情况下,cuDNN实现的性能差于Paddle的CUDA实现,建议手动设置 :code:`use_cudnn=False` 。 -1.2 使用融合功能的API -^^^^^^^^^^^^^^^^ +1.2 减少模型中Layer的个数 +^^^^^^^^^^^^^^^^^^ + +为方便用户使用,飞桨提供一些不同粒度的Layer,其中有些Layer的组合可以通过单个Layer完成。比如: -Paddle提供一些粗粒度的API,这些API融合了多个细粒度API的计算,比如: +(1) :code:`fluid.layers.softmax_with_cross_entropy` ,该操作其实是 :code:`fluid.layers.softmax` 和 :code:`fluid.layers.cross_entropy` 的组合,因此如果模型中有出现 .. code-block:: python logits = fluid.layers.softmax(logits) loss = fluid.layers.cross_entropy(logits, label, ignore_index=255) -和 +可以直接替换成 .. code-block:: python loss = fluid.layers.softmax_with_cross_entropy(logits, label, ignore_index=255, numeric_stable_mode=True) -用户网络配置中使用融合功能的API,通常能取得更好的计算性能。 + +(2) 如果模型中需要对数据进行标准化,可以直接使用 :code:`fluid.layers.data_norm` ,而不用通过一系列layer组合出数据的标准化操作。 + +因此,建议在构建模型时优先使用飞桨提供的单个Layer完成所需操作,这样减少模型中Layer的个数,并因此加速模型训练。 + 2. 数据准备优化 ============= -2.1 分析数据准备部分的耗时 -^^^^^^^^^^^^^^^^ +数据准备通常分为两部分:第一部分是数据加载,即程序从磁盘中加载训练/预测数据;第二部分是数据预处理,程序对加载的数据进行预处理,比如图像任务通常需要进行数据增强、Shuffle等。 +这两部分需要用户根据自己的模型需要进行设置,只需要最后得到Data Reader接口即可。Data Reader返回iterable对象,可以每次返回一条样本或者一组样本。代码示例如下: -数据准备部分通常分为两个部分:数据读取部分和预处理部分。 +.. code-block:: python + + def data_reader(width, height): + def reader(): + while True: + yield np.random.uniform(-1, 1,size=width*height), np.random.randint(0,10) + return reader + train_data_reader = data_reader(32, 32) -- 数据读取部分:用户需要在Python端从磁盘中加载数据,然后将数据feed到Fluid的执行器中。 -- 数据预处理部分:用户需要在Python端进行数据预处理,比如图像任务通常需要进行数据增强、裁剪等。 -Fluid提供了两种数据读取方式:**同步数据读取** 和 **异步数据读取**,详情请参考文档 `如何准备数据 `_ 。 +Paddle提供了两种方式从Data Reader中读取数据: :ref:`user_guide_use_numpy_array_as_train_data` 和 :ref:`user_guides_use_py_reader` ,详情请参考文档 :ref:`user_guide_prepare_data` 。 -2.1.1 同步数据读取 ->>>>>>>>>>>>>>> +2.1 同步数据读取 +^^^^^^^^^^^^^^^^ 同步数据读取是一种简单并且直观的数据准备方式,代码示例如下: .. code-block:: python + image = fluid.data(name="image", shape=[None, 1, 28, 28], dtype="float32") + label = fluid.data(name="label", shape=[None, 1], dtype="int64") + # 模型定义 + # …… + prediction = fluid.layers.fc(input=image, size=10) + loss = fluid.layers.cross_entropy(input=prediction, label=label) + avg_loss = fluid.layers.mean(loss) + # …… + # 读取数据 + # paddle.dataset.mnist.train()返回数据读取的Reader,每次可以从Reader中读取一条样本,batch_size为128 + train_reader = paddle.batch(paddle.dataset.mnist.train(), 128) + # 读取数据 end = time.time() for batch_id, batch in enumerate(train_reader): data_time = time.time() - end # 训练网络 - executor.run(feed=[...], fetch_list=[...]) + executor.run(feed={...}, fetch_list=[...]) batch_time = time.time() - end end = time.time() -用户通过调用自己编写的reader函数,reader每次输出一个batch的数据,并将数据传递给执行器。因此数据准备和执行是顺序进行的,用户可通过加入Python计时函数 time.time() 来统计数据准备部分和执行部分所占用的时间。 -2.1.2 异步数据读取 ->>>>>>>>>>>>>>> +用户首先需要通过 :code:`fluid.data` 定义模型的输入,然后根据输入构建模型,最后从事先自定义的Reader函数中获取一个batch的数据,并将数据传递给执行器。 + +采用同步数据读取方式时,用户可通过加入Python计时函数 :code:`time.time()` 来统计数据准备部分和执行部分所占用的时间。 +由于数据准备和执行是顺序进行的,所以程序的执行速度可能较慢。如果用户想进行模型调试的话,同步数据读取是一个不错的选择。 -Paddle里面使用py_reader接口来实现异步数据读取,代码示例如下: + +2.2 异步数据读取 +^^^^^^^^^^^^^^^^ + +Paddle里面使用 paddle.fluid.io. :ref:`cn_api_fluid_io_DataLoader` 接口来实现异步数据读取,代码示例如下: .. code-block:: python - # 启动py_reader - train_py_reader.start() + image = fluid.data(name="image", shape=[None, 1, 28, 28], dtype="float32") + label = fluid.data(name="label", shape=[None, 1], dtype="int64") + dataloader = fluid.io.DataLoader.from_generator( + feed_list=[image, label], + capacity=64, + iterable=False, + use_double_buffer=True) + # 模型定义 + # …… + prediction = fluid.layers.fc(input=image, size=10) + loss = fluid.layers.cross_entropy(input=prediction, label=label) + avg_loss = fluid.layers.mean(loss) + # …… + # 读取数据 + train_reader = paddle.batch(paddle.dataset.mnist.train(), 128) + data_loader.set_batch_generator(train_reader, places=places) + + # 启动data_loader + data_loader.start() batch_id = 0 try: end = time.time() while True: - print("queue size: ", train_py_reader.queue.size()) + print("queue size: ", data_loader.queue.size()) loss, = executor.run(fetch_list=[...]) # ... batch_time = time.time() - end end = time.time() batch_id += 1 except fluid.core.EOFException: - train_py_reader.reset() + data_loader.reset() -使用异步数据读取时,Paddle的C++端会维护一个数据队列,Python端通过单独的线程向C++端的数据队列传入数据。用户可以在训练过程中输出数据队列中数据的个数,如果queue size始终不为空,表明Python端数据准备的速度比模型执行的速度快,这种情况下Python端的数据读取可能不是瓶颈。 +用户首先需要通过 :code:`fluid.io.DataLoader.from_generator` 定义DataLoader对象,并使用 :code:`set_batch_generator` 方法将自定义的Reader与DataLoader绑定。 +若DataLoader被定义成不可迭代的( :code:`iterable=False` ),在训练开始之前,通过调用 :code:`start()` 方法来启动数据读取。 +在数据读取结束之后, :code:`executor.run` 会抛出 :code:`fluid.core.EOFException` ,表示训练已经遍历完Reader中的所有数据。 -此外,Paddle提供的一些FLAGS也能很好的帮助分析性能,比如通过设置 :code:`export FLAGS_reader_queue_speed_test_mode=True` ,数据队列中的训练数据在被读取之后,不会从数据队列中弹出,这样能够保证数据队列始终不为空,这样就能够很好的评估出数据读取所占的开销。**注意,FLAGS_reader_queue_speed_test_mode只能在分析的时候打开,正常训练模型时需要关闭**。 +采用异步数据读取时,Python端和C++端共同维护一个数据队列,Python端启动一个线程,负责向队列中插入数据,C++端在训练/预测过程中,从数据队列中获取数据,并将该数据从对队列中移除。 +用户可以在程序运行过程中,监测数据队列是否为空,如果队列始终不为空,表明数据准备的速度比模型执行的速度快,这种情况下数据读取可能不是瓶颈。 -2.2 优化数据准备速度的方法 -^^^^^^^^^^^^^^^^ +另外,Paddle提供的一些FLAGS也能很好的帮助分析性能。如果用户希望评估一下在完全没有数据读取开销情况下模型的性能,可以设置一下环境变量::code:`FLAGS_reader_queue_speed_test_mode` ,在该变量为True情况下,C++端从数据队列中获取数据之后,不会从数据队列中移除,这样能够保证数据队列始终不为空,从而避免了C++端读取数据时的等待开销。 -- 为降低训练的整体时间,建议用户使用异步数据读取的方式,并开启 :code:`use_double_buffer` 。此外,用户可根据模型的实际情况设置数据队列的大小。 -- 如果数据准备的时间大于模型执行的时间,或者出现了数据队列为空的情况,这时候需要考虑对Python的用户reader进行加速。常用的方法为:**使用Python多进程准备数据**。一个简单的使用多进程准备数据的示例,请参考 `YOLOv3 `_ 。 -- Python端的数据预处理,都是使用CPU完成。如果Paddle提供了相应功能的API,可将这部分预处理功能写到模型配置中,如此Paddle就可以使用GPU来完成该预处理功能,这样也可以减轻CPU预处理数据的负担,提升总体训练速度。 +**需要特别注意的是,** :code:`FLAGS_reader_queue_speed_test_mode` **只能在性能分析的时候打开,正常训练模型时需要关闭。** + +为降低训练的整体时间,建议用户使用异步数据读取的方式,并开启 :code:`use_double_buffer=True` 。用户可根据模型的实际情况设置数据队列的大小。 +如果数据准备的时间大于模型执行的时间,或者出现了数据队列为空的情况,就需要考虑对数据读取Reader进行加速。 +常用的方法是 **使用Python多进程准备数据** ,一个简单的使用多进程准备数据的示例,可以参考 `YOLOv3 `_ 。 + +Python端的数据预处理,都是使用CPU完成。如果Paddle提供了相应功能的API,可将这部分预处理功能写到模型配置中,如此Paddle就可以使用GPU来完成该预处理功能,这样也可以减轻CPU预处理数据的负担,提升总体训练速度。 3. 模型训练相关优化 ============= @@ -129,85 +182,152 @@ Paddle里面使用py_reader接口来实现异步数据读取,代码示例如 3.1 执行器介绍 ^^^^^^^^^^^^^^^^ -目前Paddle中有两个执行器, :code:`Executor` 和 :code:`ParallelExecutor` ,这两个执行器的区别: +目前Paddle的Python API中提供了 :code:`fluid.compiler.CompiledProgram` 的概念,用户可以通过 :code:`CompiledProgram` 将传入的program进行编译。 +如果希望采用数据并行模式训练,只需要将 :code:`CompiledProgram` 返回的对象调用一下 :code:`with_data_parallel` 即可,最后统一通过 :code:`executor.run(…)` 执行compiled_program。 -执行调度器 ->>>>>>>>>>>>>>> +虽然统一通过 :code:`executor.run(…)` 接口来执行,实际底层的执行策略有两种,对应C++部分的两个执行器,即 :code:`Executor` 和 :code:`ParallelExecutor` ,如果用户采用数据并行模式,C++部分使用的是 :code:`ParallelExecutor` ,除此之外都是使用 :code:`Executor` 。 +这两个执行器的差别: -.. csv-table:: +.. csv-table:: :header: "执行器 ", "执行对象", "执行策略" :widths: 3, 3, 5 ":code:`Executor`", ":code:`Program`", "根据 :code:`Program` 中Operator定义的先后顺序依次运行。" ":code:`ParallelExecutor`", "SSA Graph", "根据Graph中各个节点之间的依赖关系,通过多线程运行。" -为了更好的分析模型, :code:`ParallelExecutor` 内部首先会将输入的 :code:`Program` 转为SSA Graph,然后根据 :code:`build_strategy` 中的配置,通过一系列的Pass对Graph进行优化,比如:memory optimize,operator fuse等优化。最后根据 :code:`execution_strategy` 中的配置执行训练任务。 -此外, :code:`ParallelExecutor` 支持数据并行,即单进程多卡和多进程多卡,关于 :code:`ParallelExecutor` 的具体介绍请参考 `文档 `_ 。 +可以看出, :code:`Executor` 的内部逻辑非常简单,但性能可能会弱一些,因为 :code:`Executor` 对于program中的操作是串行执行的。 +而 :code:`ParallelExecutor` 首先会将program转变为计算图,并分析计算图中节点间的连接关系,对图中没有相互依赖的节点(OP),通过多线程并行执行。 -为了统一 :code:`ParallelExecutor` 接口和 :code:`Executor` 接口,Paddle提供了 :code:`fluid.compiler.CompiledProgram` 接口,在数据并行模式下,该接口底层调用的是 :code:`ParallelExecutor` 。 +因此, :code:`Executor` 是一个轻量级的执行器,目前主要用于参数初始化、模型保存、模型加载。 +:code:`ParallelExecutor` 是 :code:`Executor` 的升级版本,目前 :code:`ParallelExecutor` 主要用于模型训练,包括单机单卡、单机多卡以及多机多卡训练。 -3.2 BuildStrategy中参数配置说明 -^^^^^^^^^^^^^^^^ -BuildStrategy配置选项 ->>>>>>>>>>>>>>> +:code:`ParallelExecutor` 执行计算图之前,可以对计算图进行一些优化,比如使计算图中的一些操作是In-place的、将计算图中的参数更新操作进行融合等。 +用户还可以调整 :code:`ParallelExecutor` 执行过程中的一些配置,比如执行计算图的线程数等。这些配置分别是构建策略(BuildStrategy)和执行策略(ExecutionStrategy)参数来设置的。 + + +一个简单的使用示例如下: + +.. code-block:: python + + build_strategy = fluid.BuildStrategy() + build_strategy.enable_inplace = True + build_strategy.fuse_all_optimizer_ops=True + + exec_strategy = fluid.ExecutionStrategy() + exec_strategy.num_threads = 4 + + train_program = fluid.compiler.CompiledProgram(main_program).with_data_parallel( + loss_name=loss.name, + build_strategy=build_strategy, + exec_strategy=exec_strategy) + + place = fluid.CUDAPlace(0) + exe = Executor(place) + # 使用DataLoader读取数据,因此执行时不需要设置feed + fetch_outs = exe.run(train_program, fetch_list=[loss.name]) -.. csv-table:: + + +3.2 构建策略(BuildStrategy)配置参数介绍 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +BuildStrategy中提供了一些关于计算图优化的策略,这些策略可以在不同程度上提升模型的训练速度,但是其中一些策略与模型的结构有关,比如 :code:`fuse_all_optimizer_ops` 不支持sparse梯度,我们正在积极的完善这些策略,并在下一个版本将这些策略默认打开。 + +构建策略的详细介绍如下: + +.. csv-table:: :header: "选项", "类型", "默认值", "说明" :widths: 3, 3, 3, 5 - ":code:`reduce_strategy`", ":code:`fluid.BuildStrategy.ReduceStrategy`", ":code:`fluid.BuildStrategy.ReduceStrategy.AllReduce`", "使用数据并行训练模型时选用 :code:`AllReduce` 模式训练还是 :code:`Reduce` 模式训练." - ":code:`enable_backward_optimizer_op_deps`", "bool", "FALSE", "在反向操作和参数更新操作之间添加依赖,保证在所有的反向操作都运行结束之后才开始运行参数更新操作." - ":code:`fuse_all_optimizer_ops`", "bool", "FALSE", "对模型中的参数更新算法进行融合." - ":code:`fuse_all_reduce_ops`", "bool", "FALSE", "多卡训练时,将all_reduce Op进行融合." - ":code:`fuse_relu_depthwise_conv`", "bool", "FALSE", "如果模型中存在relu和depthwise_conv,并且是连接的,即relu->depthwise_conv,该选项可以将这两个操作合并为一个." - ":code:`fuse_broadcast_ops`", "bool", "FALSE", "在 :code:`Reduce` 模式下,对最后的多个Broadcast操作融合为一个." - ":code:`mkldnn_enabled_op_types`", "list", "{}", "如果是CPU训练,可以用 :code:`mkldnn_enabled_op_types` 指明模型中的那些操作可以使用MKLDNN库,如果不进行设置,模型可以使用MKLDNN库的所有操作都会使用MKLDNN库." + ":code:`reduce_strategy`", ":code:`fluid.BuildStrategy.ReduceStrategy`", ":code:`fluid.BuildStrategy.ReduceStrategy.AllReduce`", "使用数据并行训练模型时选用 :code:`AllReduce` 模式训练还是 :code:`Reduce` 模式训练。" + ":code:`enable_backward_optimizer_op_deps`", "bool", "True", "在反向操作和参数更新操作之间添加依赖,保证在所有的反向操作都运行结束之后才开始运行参数更新操作。" + ":code:`fuse_all_optimizer_ops`", "bool", "False", "对模型中的参数更新算法进行融合。" + ":code:`fuse_all_reduce_ops`", "bool", "False", "多卡训练时,将all_reduce操作进行融合。" + ":code:`fuse_relu_depthwise_conv`", "bool", "False", "如果模型中存在relu和depthwise_conv,并且是连接的,即relu->depthwise_conv,该选项可以将这两个操作合并为一个。" + ":code:`fuse_broadcast_ops`", "bool", "False", "在 :code:`Reduce` 模式下,将最后的多个Broadcast操作融合为一个。" + ":code:`mkldnn_enabled_op_types`", "list", "{}", "如果是CPU训练,可以用 :code:`mkldnn_enabled_op_types` 指明模型中的那些操作可以使用MKLDNN库。默认情况下,模型中用到的操作如果在Paddle目前支持的可以使用mkldnn库计算的列表中,这些操作都会调用mkldnn库的接口进行计算。" + ":code:`debug_graphviz_path`", "str", "{}", "将Graph以graphviz格式输出到debug_graphviz_path所指定的文件中。" -说明: - - 关于 :code:`reduce_strategy` ,在 :code:`ParallelExecutor` 对于数据并行支持两种参数更新模式: :code:`AllReduce` 和 :code:`Reduce` 。在 :code:`AllReduce` 模式下,各个节点上计算得到梯度之后,调用 :code:`AllReduce` 操作,梯度在各个节点上聚合,然后各个节点分别进行参数更新。在 :code:`Reduce` 模式下,参数的更新操作被均匀的分配到各个节点上,即各个节点计算得到梯度之后,将梯度在指定的节点上进行 :code:`Reduce` ,然后在该节点上,最后将更新之后的参数Broadcast到其他节点。即:如果模型中有100个参数需要更新,训练时使用的是4个节点,在 :code:`AllReduce` 模式下,各个节点需要分别对这100个参数进行更新;在 :code:`Reduce` 模式下,各个节点需要分别对这25个参数进行更新,最后将更新的参数Broadcast到其他节点上. - - 关于 :code:`enable_backward_optimizer_op_deps` ,在多卡训练时,打开该选项可能会提升训练速度。 - - 关于 :code:`fuse_all_optimizer_ops` ,目前只支持SGD、Adam和Momentum算法。**注意:目前不支持sparse参数梯度** 。 - - 关于 :code:`fuse_all_reduce_ops` ,多GPU训练时,可以对 :code:`AllReduce` 操作进行融合,以减少 :code:`AllReduce` 的调用次数。默认情况下会将同一layer中参数的梯度的 :code:`AllReduce` 操作合并成一个,比如对于 :code:`fluid.layers.fc` 中有Weight和Bias两个参数,打开该选项之后,原本需要两次 :code:`AllReduce` 操作,现在只用一次 :code:`AllReduce` 操作。此外,为支持更大粒度的参数梯度融合,Paddle提供了 :code:`FLAGS_fuse_parameter_memory_size` 选项,用户可以指定融合AllReduce操作之后,每个 :code:`AllReduce` 操作的梯度字节数,比如希望每次 :code:`AllReduce` 调用传输64MB的梯度,:code:`export FLAGS_fuse_parameter_memory_size=64` 。**注意:目前不支持sparse参数梯度**。 - - 关于 :code:`mkldnn_enabled_op_types` ,支持mkldnn库的Op有:transpose, sum, softmax, requantize, quantize, pool2d, lrn, gaussian_random, fc, dequantize, conv2d_transpose, conv2d, conv3d, concat, batch_norm, relu, tanh, sqrt, abs. +参数说明: -3.3 ExecutionStrategy中的配置参数 -^^^^^^^^^^^^^^^^ -ExecutionStrategy配置选项 ->>>>>>>>>>>>>>> +(1) 关于 :code:`reduce_strategy` ,在 :code:`ParallelExecutor` 对于数据并行支持两种参数更新模式: :code:`AllReduce` 和 :code:`Reduce` 。在 :code:`AllReduce` 模式下,各个节点上计算得到梯度之后,调用 :code:`AllReduce` 操作,梯度在各个节点上聚合,然后各个节点分别进行参数更新。在 :code:`Reduce` 模式下,参数的更新操作被均匀的分配到各个节点上,即各个节点计算得到梯度之后,将梯度在指定的节点上进行 :code:`Reduce` ,然后在该节点上,最后将更新之后的参数Broadcast到其他节点。即:如果模型中有100个参数需要更新,训练时使用的是4个节点,在 :code:`AllReduce` 模式下,各个节点需要分别对这100个参数进行更新;在 :code:`Reduce` 模式下,各个节点需要分别对这25个参数进行更新,最后将更新的参数Broadcast到其他节点上。注意:如果是使用CPU进行数据并行训练,在Reduce模式下,不同CPUPlace上的参数是共享的,所以在各个CPUPlace上完成参数更新之后不用将更新后的参数Broadcast到其他CPUPlace。 + +(2) 关于 :code:`enable_backward_optimizer_op_deps` ,在多卡训练时,打开该选项可能会提升训练速度。 + +(3) 关于 :code:`fuse_all_optimizer_ops` ,目前只支持SGD、Adam和Momentum算法。 **注意:目前不支持sparse参数梯度** 。 + +(4) 关于 :code:`fuse_all_reduce_ops` ,多GPU训练时,可以对 :code:`AllReduce` 操作进行融合,以减少 :code:`AllReduce` 的调用次数。默认情况下会将同一layer中参数的梯度的 :code:`AllReduce` 操作合并成一个,比如对于 :code:`fluid.layers.fc` 中有Weight和Bias两个参数,打开该选项之后,原本需要两次 :code:`AllReduce` 操作,现在只用一次 :code:`AllReduce` 操作。此外,为支持更大粒度的参数梯度融合,Paddle提供了 :code:`FLAGS_fuse_parameter_memory_size` 选项,用户可以指定融合AllReduce操作之后,每个 :code:`AllReduce` 操作的梯度字节数,比如希望每次 :code:`AllReduce` 调用传输64MB的梯度,:code:`export FLAGS_fuse_parameter_memory_size=64` 。 **注意:目前不支持sparse参数梯度** 。 + +(5) 关于 :code:`mkldnn_enabled_op_types` ,目前Paddle的Op中可以使用mkldnn库计算的操作包括:transpose、sum、softmax、requantize、quantize、pool2d、lrn、gaussian_random、fc、dequantize、conv2d_transpose、conv2d、conv3d、concat、batch_norm、relu、tanh、sqrt、abs。 + + +3.3 执行策略(ExecutionStrategy)配置参数介绍 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +ExecutionStrategy中提供了关于计算图执行时的一些配置,这些配置可能会影响模型的训练速度。同时,这些配置与模型的结构有关,如果用户希望模型训练速度更快,可以调整一下这些配置。在后续的优化中,我们会对这部分进行优化,根据输入模型结构动态调整这些设置。 + +ExecutionStrategy配置选项说明: -.. csv-table:: +.. csv-table:: :header: "选项", "类型", "默认值", "说明" :widths: 3, 3, 5, 5 - ":code:`num_iteration_per_drop_scope`", "INT", "1", "经过多少次迭代之后清理一次local execution scope" + ":code:`num_iteration_per_drop_scope`", "INT", "100", "经过多少次迭代之后清理一次local execution scope" ":code:`num_threads`", "INT", "对于CPU:2*dev_count;对于GPU:4*dev_count. (这是一个经验值)", ":code:`ParallelExecutor` 中执行所有Op使用的线程池大小" 说明: - - 关于 :code:`num_iteration_per_drop_scope` ,框架在运行过程中会产生一些临时变量,这些变量被放在local execution scope中。通常每经过一个batch就要清理一下local execution scope中的变量,但是由于GPU是异步设备,在清理local execution scope之前需要对所有的GPU调用一次同步操作,因此耗费的时间较长。为此我们在 :code:`execution_strategy` 中添加了 :code:`num_iteration_per_drop_scope` 选项。用户可以指定经过多少次迭代之后清理一次local execution scope。 - - 关于 :code:`num_threads` ,:code:`ParallelExecutor` 根据Op之间的依赖关系确定Op的执行顺序,即:当Op的输入都已经变为ready状态之后,该Op会被放到一个队列中,等待被执行。 :code:`ParallelExecutor` 内部有一个任务调度线程和一个线程池,任务调度线程从队列中取出所有Ready的Op,并将其放到线程队列中。 :code:`num_threads` 表示线程池的大小。根据以往的经验,对于CPU任务,:code:`num_threads=2*dev_count` 时性能较好,对于GPU任务,:code:`num_threads=4*dev_count` 时性能较好。**注意:线程池不是越大越好**。 -执行策略配置推荐 ->>>>>>>>>>>>>>> +(1) 关于 :code:`num_iteration_per_drop_scope` ,框架在运行过程中会产生一些临时变量,默认每经过一个batch就要清理一下临时变量。由于GPU是异步设备,在清理之前需要对所有的GPU调用一次同步操作,因此耗费的时间较长。为此我们在execution_strategy中添加了 :code:`num_iteration_per_drop_scope` 选项。用户可以指定经过多少次迭代之后清理一次。 -- 在显存足够的前提下,建议将 :code:`exec_strategy.num_iteration_per_drop_scope` 设置成一个较大的值,比如设置 :code:`exec_strategy.num_iteration_per_drop_scope=100` ,这样可以避免反复地申请和释放内存。该配置对于一些模型的优化效果较为明显。 -- 对于一些较小的模型,比如mnist、language_model等,多个线程乱序调度op的开销大于其收益,因此推荐设置 :code:`exec_strategy.num_threads=1` 。 +(2) 关于 :code:`num_threads` ,:code:`ParallelExecutor` 根据Op之间的依赖关系确定Op的执行顺序,即:当Op的输入都已经变为ready状态之后,该Op会被放到一个队列中,等待被执行。 :code:`ParallelExecutor` 内部有一个任务调度线程和一个线程池,任务调度线程从队列中取出所有Ready的Op,并将其放到线程队列中。 :code:`num_threads` 表示线程池的大小。根据以往的经验,对于CPU任务,:code:`num_threads=2*dev_count` 时性能较好,对于GPU任务,:code:`num_threads=4*dev_count` 时性能较好。 **注意:线程池不是越大越好** 。 -CPU训练设置 ->>>>>>>>>>>>>>> -- 如果使用CPU做数据并行训练,需要指定环境变量CPU_NUM,这个环境变量指定程序运行过程中使用的 :code:`CPUPlace` 的个数。 -- 如果使用CPU进行数据并行训练,并且 :code:`build_strategy.reduce_strategy` = :code:`fluid.BuildStrategy.ReduceStrategy.Reduce` ,所有 :code:`CPUPlace` 上的参数是共享的,因此对于一些使用CPU进行数据并行训练的模型,选用 :code:`Reduce` 模式可能会更快一些。 +4. 运行时FLAGS设置优化 +================= -4. 运行时FLAGS设置 -============= -Fluid中有一些FLAGS可以有助于性能优化: +Paddle中有一些FLAGS可以有助于性能优化: -- FLAGS_fraction_of_gpu_memory_to_use表示每次分配GPU显存的最小单位,取值范围为[0, 1)。由于CUDA原生的显存分配cuMalloc和释放cuFree操作均是同步操作,非常耗时,因此将FLAGS_fraction_of_gpu_memory_to_use设置成一个较大的值,比如0.92(默认值),可以显著地加速训练的速度。 -- FLAGS_cudnn_exhaustive_search表示cuDNN在选取conv实现算法时采取穷举搜索策略,因此往往能选取到一个更快的conv实现算法,这对于CNN网络通常都是有加速的。但穷举搜索往往也会增加cuDNN的显存需求,因此用户可根据模型的实际情况选择是否设置该变量。 -- FLAGS_enable_cublas_tensor_op_math表示是否使用TensorCore加速计算cuBLAS。这个环境变量只在Tesla V100以及更新的GPU上适用,且可能会带来一定的精度损失。 +(1) :code:`FLAGS_cudnn_exhaustive_search` 表示在调用cuDNN中的卷积操作时,根据输入数据的shape等信息,采取穷举搜索的策略从算法库中选取到更快的卷积算法,进而实现对模型中卷积操作的加速。需要注意的是: + - 在搜索算法过程中需要使用较多的显存,如果用户的模型中卷积操作较多,或者GPU卡显存较小,可能会出现显存不足问题。 + - 通过穷举搜索选择好算法之后,该算法会进入Cache,以便下次运行时,如果输入数据的shape等信息不变,直接使用Cache中算法。 -5. 使用Profile工具进行性能分析 -============= +(2) :code:`FLAGS_enable_cublas_tensor_op_math` 表示是否使用TensorCore加速cuBLAS等NV提供的库中的操作。需要注意的是,这个环境变量只在Tesla V100以及更新的GPU上适用,且可能会带来一定的精度损失,通常该损失不会影响模型的收敛性。 + + +5. 优秀实践 +================= + +(1) 尽可能的使用飞桨提供的单个layer实现所需操作。 +(2) 采用异步数据读取。 +(3) 模型训练相关优化: + + - 使用ParallelExecutor作为底层执行器。单卡训练,也可以调用with_data_parallel方法。代码示例: + + .. code-block:: python + + compiled_prog = compiler.CompiledProgram( + fluid.default_main_program()).with_data_parallel( + loss_name=loss.name) + + - 如果模型中参数的梯度都是非sparse的,可以打开fuse_all_optimizer_ops选项,将多个参数更新操作融合为一个。 + - 如果是多卡训练,可以打开enable_backward_optimizer_op_deps、fuse_all_reduce_ops选项。如果想指定每次每次AllReduce操作的数据大小,可以设置 :code:`FLAGS_fuse_parameter_memory_size`,比如 :code:`export FLAGS_fuse_parameter_memory_size=1` ,表示每次AllReduce调用传输1MB的梯度。 + - 使用CPU做数据并行训练时,推荐使用Reduce模型,因为在使用CPU进行数据并行训练时,在Reduce模式下,不同CPUPlace 上的参数是共享的,所以在各个CPUPlace 上完成参数更新之后不用将更新后的参数Broadcast到其他CPUPlace上,这对提升速度也有很大帮助。 + - 如果是Reduce模式,可打开fuse_broadcast_ops选项。 + - 如果用户的模型较小,比如mnist、language_model等,可以将num_threads设为1。 + - 在显存足够的前提下,建议将 :code:`exec_strategy.num_iteration_per_drop_scope` 设置成一个较大的值,比如设置为100,这样可以避免反复地申请和释放内存。 + +目前我们正在推进这些配置自动化的工作:即根据输入的模型结构自动配置这些选项,争取在下一个版本中实现,敬请期待。 + +(4) FLAGS设置 + +.. code-block:: bash + + FLAGS_cudnn_exhaustive_search = True + FLAGS_enable_cublas_tensor_op_math = True + + +6. 使用Profile工具进行性能分析 +====================== -为方便用户更好的发现程序中的性能瓶颈,Paddle提供了多种Profile工具,这些工具的详细介绍和使用说明请参考 `性能调优 `_ 。 +为方便用户更好的发现程序中的性能瓶颈,Paddle提供了多种Profile工具,这些工具的详细介绍和使用说明请参考 :ref:`api_guide_analysis_tools` 。 diff --git a/doc/fluid/api/dataset.rst b/doc/fluid/api/dataset.rst index a8b6bcab25b1a77b30678852c9d0d35153943b40..f86bb206329ab3599e780586a9a4d7cec7a93fa8 100644 --- a/doc/fluid/api/dataset.rst +++ b/doc/fluid/api/dataset.rst @@ -1,5 +1,5 @@ ============= -fluid.dataset +paddle.dataset ============= .. toctree:: diff --git a/doc/fluid/api/declarative.rst b/doc/fluid/api/declarative.rst new file mode 100644 index 0000000000000000000000000000000000000000..93046be14be068b36748c3ad7d611ac5b443df4c --- /dev/null +++ b/doc/fluid/api/declarative.rst @@ -0,0 +1,28 @@ +======================= +paddle.declarative +======================= + +.. toctree:: + :maxdepth: 1 + + declarative/batch_norm.rst + declarative/bilinear_tensor_product.rst + declarative/conv2d.rst + declarative/conv2d_transpose.rst + declarative/conv3d.rst + declarative/conv3d_transpose.rst + declarative/create_parameter.rst + declarative/crf_decoding.rst + declarative/data_norm.rst + declarative/deformable_conv.rst + declarative/embedding.rst + declarative/fc.rst + declarative/group_norm.rst + declarative/hsigmoid.rst + declarative/instance_norm.rst + declarative/layer_norm.rst + declarative/multi_box_head.rst + declarative/nce.rst + declarative/prelu.rst + declarative/row_conv.rst + declarative/spectral_norm.rst diff --git a/doc/fluid/api/declarative/batch_norm.rst b/doc/fluid/api/declarative/batch_norm.rst new file mode 100644 index 0000000000000000000000000000000000000000..23971945e133fd96ada68d9d197bd71fe947c2fd --- /dev/null +++ b/doc/fluid/api/declarative/batch_norm.rst @@ -0,0 +1,7 @@ +.. _api_declarative_batch_norm: + +batch_norm +------------------------------- +:doc_source: paddle.fluid.layers.batch_norm + + diff --git a/doc/fluid/api/declarative/bilinear_tensor_product.rst b/doc/fluid/api/declarative/bilinear_tensor_product.rst new file mode 100644 index 0000000000000000000000000000000000000000..122717ae08cfda5647a4f53a7b2a33bd0b715aea --- /dev/null +++ b/doc/fluid/api/declarative/bilinear_tensor_product.rst @@ -0,0 +1,7 @@ +.. _api_declarative_bilinear_tensor_product: + +bilinear_tensor_product +------------------------------- +:doc_source: paddle.fluid.layers.bilinear_tensor_product + + diff --git a/doc/fluid/api/declarative/conv2d.rst b/doc/fluid/api/declarative/conv2d.rst new file mode 100644 index 0000000000000000000000000000000000000000..ba8cff6e43af600c77880540ae401c0184651c21 --- /dev/null +++ b/doc/fluid/api/declarative/conv2d.rst @@ -0,0 +1,7 @@ +.. _api_declarative_conv2d: + +conv2d +------------------------------- +:doc_source: paddle.fluid.layers.conv2d + + diff --git a/doc/fluid/api/declarative/conv2d_transpose.rst b/doc/fluid/api/declarative/conv2d_transpose.rst new file mode 100644 index 0000000000000000000000000000000000000000..1a24283c8b91efb43100bc954960c6d15e68c7f5 --- /dev/null +++ b/doc/fluid/api/declarative/conv2d_transpose.rst @@ -0,0 +1,7 @@ +.. _api_declarative_conv2d_transpose: + +conv2d_transpose +------------------------------- +:doc_source: paddle.fluid.layers.conv2d_transpose + + diff --git a/doc/fluid/api/declarative/conv3d.rst b/doc/fluid/api/declarative/conv3d.rst new file mode 100644 index 0000000000000000000000000000000000000000..3e2bbaacc284f46e6f09b2512d2c863291c0d7c8 --- /dev/null +++ b/doc/fluid/api/declarative/conv3d.rst @@ -0,0 +1,7 @@ +.. _api_declarative_conv3d: + +conv3d +------------------------------- +:doc_source: paddle.fluid.layers.conv3d + + diff --git a/doc/fluid/api/declarative/conv3d_transpose.rst b/doc/fluid/api/declarative/conv3d_transpose.rst new file mode 100644 index 0000000000000000000000000000000000000000..08a5b8104ab70f856ec67a71dbbe9475413a6663 --- /dev/null +++ b/doc/fluid/api/declarative/conv3d_transpose.rst @@ -0,0 +1,7 @@ +.. _api_declarative_conv3d_transpose: + +conv3d_transpose +------------------------------- +:doc_source: paddle.fluid.layers.conv3d_transpose + + diff --git a/doc/fluid/api/declarative/create_parameter.rst b/doc/fluid/api/declarative/create_parameter.rst new file mode 100644 index 0000000000000000000000000000000000000000..25d15ecbc4fcced99430ce5a5887c1ff369b2f19 --- /dev/null +++ b/doc/fluid/api/declarative/create_parameter.rst @@ -0,0 +1,7 @@ +.. _api_declarative_create_parameter: + +create_parameter +------------------------------- +:doc_source: paddle.fluid.layers.create_parameter + + diff --git a/doc/fluid/api/declarative/crf_decoding.rst b/doc/fluid/api/declarative/crf_decoding.rst new file mode 100644 index 0000000000000000000000000000000000000000..fc7fb3d705d302ef74974cbb6daa9198de5b9b63 --- /dev/null +++ b/doc/fluid/api/declarative/crf_decoding.rst @@ -0,0 +1,7 @@ +.. _api_declarative_crf_decoding: + +crf_decoding +------------------------------- +:doc_source: paddle.fluid.layers.crf_decoding + + diff --git a/doc/fluid/api/declarative/data_norm.rst b/doc/fluid/api/declarative/data_norm.rst new file mode 100644 index 0000000000000000000000000000000000000000..87d7aa57d5f144261349dc946ac37cace909cd89 --- /dev/null +++ b/doc/fluid/api/declarative/data_norm.rst @@ -0,0 +1,7 @@ +.. _api_declarative_data_norm: + +data_norm +------------------------------- +:doc_source: paddle.fluid.layers.data_norm + + diff --git a/doc/fluid/api/declarative/deformable_conv.rst b/doc/fluid/api/declarative/deformable_conv.rst new file mode 100644 index 0000000000000000000000000000000000000000..a0d39dc4cad5fe0e103d3e3a43a81be97fc36f3b --- /dev/null +++ b/doc/fluid/api/declarative/deformable_conv.rst @@ -0,0 +1,7 @@ +.. _api_declarative_deformable_conv: + +deformable_conv +------------------------------- +:doc_source: paddle.fluid.layers.deformable_conv + + diff --git a/doc/fluid/api/declarative/embedding.rst b/doc/fluid/api/declarative/embedding.rst new file mode 100644 index 0000000000000000000000000000000000000000..d37d2c77ab287184ba7f0a1bfa2370234b0813aa --- /dev/null +++ b/doc/fluid/api/declarative/embedding.rst @@ -0,0 +1,7 @@ +.. _api_declarative_embedding: + +embedding +------------------------------- +:doc_source: paddle.fluid.input.embedding + + diff --git a/doc/fluid/api/declarative/fc.rst b/doc/fluid/api/declarative/fc.rst new file mode 100644 index 0000000000000000000000000000000000000000..97ed17e4d4c291c9d770a205e5749d72f4ca0a57 --- /dev/null +++ b/doc/fluid/api/declarative/fc.rst @@ -0,0 +1,7 @@ +.. _api_declarative_fc: + +fc +------------------------------- +:doc_source: paddle.fluid.layers.fc + + diff --git a/doc/fluid/api/declarative/group_norm.rst b/doc/fluid/api/declarative/group_norm.rst new file mode 100644 index 0000000000000000000000000000000000000000..bbc40ce66cfc4f632452d038f10492fb59ec3d3a --- /dev/null +++ b/doc/fluid/api/declarative/group_norm.rst @@ -0,0 +1,7 @@ +.. _api_declarative_group_norm: + +group_norm +------------------------------- +:doc_source: paddle.fluid.layers.group_norm + + diff --git a/doc/fluid/api/declarative/hsigmoid.rst b/doc/fluid/api/declarative/hsigmoid.rst new file mode 100644 index 0000000000000000000000000000000000000000..c7d5264f242b40cdaaa629eb4d1b082ec1fe6840 --- /dev/null +++ b/doc/fluid/api/declarative/hsigmoid.rst @@ -0,0 +1,7 @@ +.. _api_declarative_hsigmoid: + +hsigmoid +------------------------------- +:doc_source: paddle.fluid.layers.hsigmoid + + diff --git a/doc/fluid/api/declarative/instance_norm.rst b/doc/fluid/api/declarative/instance_norm.rst new file mode 100644 index 0000000000000000000000000000000000000000..25adf44297f53110c62515f20bec49a60ec17654 --- /dev/null +++ b/doc/fluid/api/declarative/instance_norm.rst @@ -0,0 +1,7 @@ +.. _api_declarative_instance_norm: + +instance_norm +------------------------------- +:doc_source: paddle.fluid.layers.instance_norm + + diff --git a/doc/fluid/api/declarative/layer_norm.rst b/doc/fluid/api/declarative/layer_norm.rst new file mode 100644 index 0000000000000000000000000000000000000000..d9826c7804691db7b884504294d65febc89ed765 --- /dev/null +++ b/doc/fluid/api/declarative/layer_norm.rst @@ -0,0 +1,7 @@ +.. _api_declarative_layer_norm: + +layer_norm +------------------------------- +:doc_source: paddle.fluid.layers.layer_norm + + diff --git a/doc/fluid/api/declarative/multi_box_head.rst b/doc/fluid/api/declarative/multi_box_head.rst new file mode 100644 index 0000000000000000000000000000000000000000..a80a8187a26e5923cde16b6e12c13f66c6744655 --- /dev/null +++ b/doc/fluid/api/declarative/multi_box_head.rst @@ -0,0 +1,7 @@ +.. _api_declarative_multi_box_head: + +multi_box_head +------------------------------- +:doc_source: paddle.fluid.layers.multi_box_head + + diff --git a/doc/fluid/api/declarative/nce.rst b/doc/fluid/api/declarative/nce.rst new file mode 100644 index 0000000000000000000000000000000000000000..9fe774511e603ba46f7a409fe750f7cdc7e4b4a5 --- /dev/null +++ b/doc/fluid/api/declarative/nce.rst @@ -0,0 +1,7 @@ +.. _api_declarative_nce: + +nce +------------------------------- +:doc_source: paddle.fluid.layers.nce + + diff --git a/doc/fluid/api/declarative/prelu.rst b/doc/fluid/api/declarative/prelu.rst new file mode 100644 index 0000000000000000000000000000000000000000..bbfc1bc138610fd5a15b0ca177c713a26c8ac449 --- /dev/null +++ b/doc/fluid/api/declarative/prelu.rst @@ -0,0 +1,7 @@ +.. _api_declarative_prelu: + +prelu +------------------------------- +:doc_source: paddle.fluid.layers.prelu + + diff --git a/doc/fluid/api/declarative/row_conv.rst b/doc/fluid/api/declarative/row_conv.rst new file mode 100644 index 0000000000000000000000000000000000000000..69942f0e0c3246904f546ae8fd649dab84dcaeb4 --- /dev/null +++ b/doc/fluid/api/declarative/row_conv.rst @@ -0,0 +1,7 @@ +.. _api_declarative_row_conv: + +row_conv +------------------------------- +:doc_source: paddle.fluid.layers.row_conv + + diff --git a/doc/fluid/api/declarative/spectral_norm.rst b/doc/fluid/api/declarative/spectral_norm.rst new file mode 100644 index 0000000000000000000000000000000000000000..1f797b8503a4f1f0ac3a2c569ecb0d8426638d66 --- /dev/null +++ b/doc/fluid/api/declarative/spectral_norm.rst @@ -0,0 +1,7 @@ +.. _api_declarative_spectral_norm: + +spectral_norm +------------------------------- +:doc_source: paddle.fluid.layers.spectral_norm + + diff --git a/doc/fluid/api/distributed.rst b/doc/fluid/api/distributed.rst new file mode 100644 index 0000000000000000000000000000000000000000..fddddbb11adca1374a6b496eb26e4d8468a3d13c --- /dev/null +++ b/doc/fluid/api/distributed.rst @@ -0,0 +1,13 @@ +================== +paddle.distributed +================== + +.. toctree:: + :maxdepth: 1 + + distributed/get_rank.rst + distributed/get_world_size.rst + distributed/init_parallel_env.rst + distributed/ParallelEnv.rst + distributed/prepare_context.rst + distributed/spawn.rst diff --git a/doc/fluid/api/distributed/ParallelEnv.rst b/doc/fluid/api/distributed/ParallelEnv.rst new file mode 100644 index 0000000000000000000000000000000000000000..46b07c64f0358c4bf322e1325e2c6cb31fd2ea33 --- /dev/null +++ b/doc/fluid/api/distributed/ParallelEnv.rst @@ -0,0 +1,5 @@ +.. _api_distributed_ParallelEnv: + +ParallelEnv +------------------------------- +:doc_source: paddle.fluid.dygraph.parallel.ParallelEnv \ No newline at end of file diff --git a/doc/fluid/api/distributed/get_rank.rst b/doc/fluid/api/distributed/get_rank.rst new file mode 100644 index 0000000000000000000000000000000000000000..98a64831423e486877b47db8cbda7a54d1849f20 --- /dev/null +++ b/doc/fluid/api/distributed/get_rank.rst @@ -0,0 +1,10 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_distributed_get_rank: + +get_rank +-------- + +.. autofunction:: paddle.distributed.get_rank + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/distributed/get_world_size.rst b/doc/fluid/api/distributed/get_world_size.rst new file mode 100644 index 0000000000000000000000000000000000000000..2de447e1bc276586e2220b043e9fffe48d21f6db --- /dev/null +++ b/doc/fluid/api/distributed/get_world_size.rst @@ -0,0 +1,10 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_distributed_get_world_size: + +get_world_size +-------------- + +.. autofunction:: paddle.distributed.get_world_size + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/distributed/init_parallel_env.rst b/doc/fluid/api/distributed/init_parallel_env.rst new file mode 100644 index 0000000000000000000000000000000000000000..99473dd347676e0f88cce8454ccccf60c5e1da17 --- /dev/null +++ b/doc/fluid/api/distributed/init_parallel_env.rst @@ -0,0 +1,10 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_distributed_init_parallel_env: + +init_parallel_env +----------------- + +.. autofunction:: paddle.distributed.init_parallel_env + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/distributed/prepare_context.rst b/doc/fluid/api/distributed/prepare_context.rst new file mode 100644 index 0000000000000000000000000000000000000000..abe6865f52ff82032a7e9873f492e5b531407ebf --- /dev/null +++ b/doc/fluid/api/distributed/prepare_context.rst @@ -0,0 +1,5 @@ +.. _api_distributed_prepare_context: + +prepare_context +------------------------------- +:doc_source: paddle.fluid.dygraph.parallel.prepare_context diff --git a/doc/fluid/api/distributed/spawn.rst b/doc/fluid/api/distributed/spawn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9c7895932fce5a5c7e55850eb37caabe680ef724 --- /dev/null +++ b/doc/fluid/api/distributed/spawn.rst @@ -0,0 +1,10 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_distributed_spawn: + +spawn +----- + +.. autofunction:: paddle.distributed.spawn + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/dygraph.rst b/doc/fluid/api/dygraph.rst index 8f78d0e9e4334b53e39e52a308af8bb8d1892ddb..1df3cded70c9a2ca385a4a6138f3b340da2739aa 100644 --- a/doc/fluid/api/dygraph.rst +++ b/doc/fluid/api/dygraph.rst @@ -5,7 +5,6 @@ fluid.dygraph .. toctree:: :maxdepth: 1 - dygraph/BackwardStrategy.rst dygraph/BatchNorm.rst dygraph/BilinearTensorProduct.rst dygraph/Conv2D.rst @@ -13,13 +12,25 @@ fluid.dygraph dygraph/Conv3D.rst dygraph/Conv3DTranspose.rst dygraph/CosineDecay.rst + dygraph/DataParallel.rst + dygraph/declarative.rst + dygraph/disable_dygraph.rst + dygraph/Dropout.rst + dygraph/dygraph_to_static_code.rst + dygraph/dygraph_to_static_func.rst dygraph/dygraph_to_static_output.rst + dygraph/dygraph_to_static_program.rst dygraph/Embedding.rst + dygraph/enable_dygraph.rst + dygraph/enabled.rst dygraph/ExponentialDecay.rst + dygraph/grad.rst dygraph/GroupNorm.rst dygraph/GRUUnit.rst dygraph/guard.rst + dygraph/InstanceNorm.rst dygraph/InverseTimeDecay.rst + dygraph/jit.rst dygraph/Layer.rst dygraph/LayerList.rst dygraph/LayerNorm.rst @@ -29,16 +40,20 @@ fluid.dygraph dygraph/NCE.rst dygraph/no_grad.rst dygraph/NoamDecay.rst + dygraph/ParallelEnv.rst dygraph/ParameterList.rst dygraph/PiecewiseDecay.rst dygraph/PolynomialDecay.rst dygraph/Pool2D.rst dygraph/PRelu.rst dygraph/prepare_context.rst + dygraph/ProgramTranslator.rst + dygraph/ReduceLROnPlateau.rst dygraph/save_dygraph.rst dygraph/Sequential.rst dygraph/SpectralNorm.rst dygraph/to_variable.rst dygraph/TracedLayer.rst dygraph/Tracer.rst + dygraph/TranslatedLayer.rst dygraph/TreeConv.rst diff --git a/doc/fluid/api/dygraph/BackwardStrategy.rst b/doc/fluid/api/dygraph/BackwardStrategy.rst deleted file mode 100644 index c818ed2051d52cbe55d63c0456614220e9401058..0000000000000000000000000000000000000000 --- a/doc/fluid/api/dygraph/BackwardStrategy.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` - !DO NOT EDIT THIS FILE MANUALLY! - -.. _api_fluid_dygraph_BackwardStrategy: - -BackwardStrategy ----------------- - -.. autoclass:: paddle.fluid.dygraph.BackwardStrategy - :members: - :noindex: - diff --git a/doc/fluid/api/dygraph/DataParallel.rst b/doc/fluid/api/dygraph/DataParallel.rst new file mode 100644 index 0000000000000000000000000000000000000000..33c8a3fd80181e529c1c6728f867cc952b818eb4 --- /dev/null +++ b/doc/fluid/api/dygraph/DataParallel.rst @@ -0,0 +1,12 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_dygraph_DataParallel: + +DataParallel +------------ + +.. autoclass:: paddle.fluid.dygraph.DataParallel + :members: + :noindex: + diff --git a/doc/fluid/api/dygraph/Dropout.rst b/doc/fluid/api/dygraph/Dropout.rst new file mode 100644 index 0000000000000000000000000000000000000000..697101e001c6a9b0b03ffe430aa80435fee5529d --- /dev/null +++ b/doc/fluid/api/dygraph/Dropout.rst @@ -0,0 +1,12 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_dygraph_Dropout: + +Dropout +------- + +.. autoclass:: paddle.fluid.dygraph.Dropout + :members: + :noindex: + diff --git a/doc/fluid/api/dygraph/GRUCell.rst b/doc/fluid/api/dygraph/GRUCell.rst new file mode 100644 index 0000000000000000000000000000000000000000..30d104af9d4084233fb2a12df2ad2042b00f8e89 --- /dev/null +++ b/doc/fluid/api/dygraph/GRUCell.rst @@ -0,0 +1,12 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_dygraph_GRUCell: + +GRUCell +------- + +.. autoclass:: paddle.fluid.dygraph.GRUCell + :members: + :noindex: + diff --git a/doc/fluid/api/dygraph/InstanceNorm.rst b/doc/fluid/api/dygraph/InstanceNorm.rst new file mode 100644 index 0000000000000000000000000000000000000000..2e09ea9754bbceb1b1af9111dcfcfb404e0c70f4 --- /dev/null +++ b/doc/fluid/api/dygraph/InstanceNorm.rst @@ -0,0 +1,12 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_dygraph_InstanceNorm: + +InstanceNorm +--------- + +.. autoclass:: paddle.fluid.dygraph.InstanceNorm + :members: + :noindex: + diff --git a/doc/fluid/api/dygraph/LSTMCell.rst b/doc/fluid/api/dygraph/LSTMCell.rst new file mode 100644 index 0000000000000000000000000000000000000000..243ab36f9ec85701aa501ab25a9ee4cdc5e3ec5d --- /dev/null +++ b/doc/fluid/api/dygraph/LSTMCell.rst @@ -0,0 +1,12 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_dygraph_LSTMCell: + +LSTMCell +-------- + +.. autoclass:: paddle.fluid.dygraph.LSTMCell + :members: + :noindex: + diff --git a/doc/fluid/api/dygraph/ParallelEnv.rst b/doc/fluid/api/dygraph/ParallelEnv.rst new file mode 100644 index 0000000000000000000000000000000000000000..25514b2bf3de97aec0b4e1bca109e5f52534dfe3 --- /dev/null +++ b/doc/fluid/api/dygraph/ParallelEnv.rst @@ -0,0 +1,12 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_dygraph_ParallelEnv: + +ParallelEnv +----------- + +.. autoclass:: paddle.fluid.dygraph.ParallelEnv + :members: + :noindex: + diff --git a/doc/fluid/api/dygraph/ReduceLROnPlateau.rst b/doc/fluid/api/dygraph/ReduceLROnPlateau.rst new file mode 100644 index 0000000000000000000000000000000000000000..d03ce41e1d45d51c2fe611c3f5607a399ca6cf3b --- /dev/null +++ b/doc/fluid/api/dygraph/ReduceLROnPlateau.rst @@ -0,0 +1,12 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_dygraph_ReduceLROnPlateau: + +ReduceLROnPlateau +----------------- + +.. autoclass:: paddle.fluid.dygraph.ReduceLROnPlateau + :members: + :noindex: + diff --git a/doc/fluid/api/dygraph/TranslatedLayer.rst b/doc/fluid/api/dygraph/TranslatedLayer.rst new file mode 100644 index 0000000000000000000000000000000000000000..a6f7fd9411e5179999a8bda3f1ae197092343a7a --- /dev/null +++ b/doc/fluid/api/dygraph/TranslatedLayer.rst @@ -0,0 +1,8 @@ +.. _api_fluid_dygraph_TranslatedLayer: + +TranslatedLayer +----------------------- + +.. autoclass:: paddle.fluid.dygraph.TranslatedLayer + :members: + :noindex: diff --git a/doc/fluid/api/dygraph/disable_dygraph.rst b/doc/fluid/api/dygraph/disable_dygraph.rst new file mode 100644 index 0000000000000000000000000000000000000000..17adf7a7559fe31260a4d37b618bafa5e0575b57 --- /dev/null +++ b/doc/fluid/api/dygraph/disable_dygraph.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_dygraph_disable_dygraph: + +disable_dygraph +--------------- + +.. autofunction:: paddle.fluid.dygraph.disable_dygraph + :noindex: + diff --git a/doc/fluid/api/dygraph/dygraph_to_static_code.rst b/doc/fluid/api/dygraph/dygraph_to_static_code.rst new file mode 100644 index 0000000000000000000000000000000000000000..bd6af528d903316df1c1c03f63392ace8af7c55b --- /dev/null +++ b/doc/fluid/api/dygraph/dygraph_to_static_code.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_dygraph_dygraph_to_static_code: + +dygraph_to_static_code +---------------------- + +.. autofunction:: paddle.fluid.dygraph.dygraph_to_static_code + :noindex: + diff --git a/doc/fluid/api/dygraph/dygraph_to_static_func.rst b/doc/fluid/api/dygraph/dygraph_to_static_func.rst new file mode 100644 index 0000000000000000000000000000000000000000..d73ac96d88263e5c759b260ddd7dd45f57b9fe71 --- /dev/null +++ b/doc/fluid/api/dygraph/dygraph_to_static_func.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_dygraph_dygraph_to_static_func: + +dygraph_to_static_func +---------------------- + +.. autofunction:: paddle.fluid.dygraph.dygraph_to_static_func + :noindex: + diff --git a/doc/fluid/api/dygraph/dygraph_to_static_program.rst b/doc/fluid/api/dygraph/dygraph_to_static_program.rst new file mode 100644 index 0000000000000000000000000000000000000000..1f481533654b6db79b70a3a9b2235ec8a9696ec5 --- /dev/null +++ b/doc/fluid/api/dygraph/dygraph_to_static_program.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_dygraph_dygraph_to_static_program: + +dygraph_to_static_program +------------------------- + +.. autofunction:: paddle.fluid.dygraph.dygraph_to_static_program + :noindex: + diff --git a/doc/fluid/api/dygraph/enable_dygraph.rst b/doc/fluid/api/dygraph/enable_dygraph.rst new file mode 100644 index 0000000000000000000000000000000000000000..02dfdcd457761c8533118bcd7b505f427ab5d849 --- /dev/null +++ b/doc/fluid/api/dygraph/enable_dygraph.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_dygraph_enable_dygraph: + +enable_dygraph +-------------- + +.. autofunction:: paddle.fluid.dygraph.enable_dygraph + :noindex: + diff --git a/doc/fluid/api/dygraph/enabled.rst b/doc/fluid/api/dygraph/enabled.rst new file mode 100644 index 0000000000000000000000000000000000000000..dc2bfa7649ef185be254a6a71161b613726e7449 --- /dev/null +++ b/doc/fluid/api/dygraph/enabled.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_dygraph_enabled: + +enabled +------- + +.. autofunction:: paddle.fluid.dygraph.enabled + :noindex: + diff --git a/doc/fluid/api/dygraph/grad.rst b/doc/fluid/api/dygraph/grad.rst new file mode 100644 index 0000000000000000000000000000000000000000..01973660288d4148827cd8a7cc19584950b1ed9a --- /dev/null +++ b/doc/fluid/api/dygraph/grad.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_paddle_grad: + +grad +---- + +.. autofunction:: paddle.grad + :noindex: + diff --git a/doc/fluid/api/dygraph/jit.rst b/doc/fluid/api/dygraph/jit.rst new file mode 100644 index 0000000000000000000000000000000000000000..7853a048535c045bae18f71c8b4d7f1e44cc65eb --- /dev/null +++ b/doc/fluid/api/dygraph/jit.rst @@ -0,0 +1,10 @@ +=== +jit +=== + +.. toctree:: + :maxdepth: 1 + + jit/save.rst + jit/load.rst + jit/SaveLoadConfig.rst diff --git a/doc/fluid/api/dygraph/jit/SaveLoadConfig.rst b/doc/fluid/api/dygraph/jit/SaveLoadConfig.rst new file mode 100644 index 0000000000000000000000000000000000000000..e8d1d3bfbc35eca0c05594b540a0cd15c19cebe1 --- /dev/null +++ b/doc/fluid/api/dygraph/jit/SaveLoadConfig.rst @@ -0,0 +1,8 @@ +.. _api_fluid_dygraph_jit_SaveLoadConfig: + +SaveLoadConfig +------------------------------- + +.. autoclass:: paddle.fluid.dygraph.jit.SaveLoadConfig + :members: + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/dygraph/jit/load.rst b/doc/fluid/api/dygraph/jit/load.rst new file mode 100644 index 0000000000000000000000000000000000000000..51f59909873dd46bb43e42bdc2258a990580c24c --- /dev/null +++ b/doc/fluid/api/dygraph/jit/load.rst @@ -0,0 +1,7 @@ +.. _api_fluid_dygraph_jit_load: + +load +------------ + +.. autofunction:: paddle.fluid.dygraph.jit.load + :noindex: diff --git a/doc/fluid/api/dygraph/jit/save.rst b/doc/fluid/api/dygraph/jit/save.rst new file mode 100644 index 0000000000000000000000000000000000000000..fb55029c2870b8c56edd93c4907ae0894036eabe --- /dev/null +++ b/doc/fluid/api/dygraph/jit/save.rst @@ -0,0 +1,7 @@ +.. _api_fluid_dygraph_jit_save: + +save +------------ + +.. autofunction:: paddle.fluid.dygraph.jit.save + :noindex: diff --git a/doc/fluid/api/fluid.rst b/doc/fluid/api/fluid.rst index 1c07ca0a011d02488f833d6474e19985172508c8..1440d541e1d7abe78aa11609c33ceda0ce8a4f93 100644 --- a/doc/fluid/api/fluid.rst +++ b/doc/fluid/api/fluid.rst @@ -7,6 +7,7 @@ fluid fluid/BuildStrategy.rst fluid/CompiledProgram.rst + fluid/ComplexVariable.rst fluid/cpu_places.rst fluid/CPUPlace.rst fluid/create_lod_tensor.rst @@ -20,11 +21,15 @@ fluid fluid/DataFeeder.rst fluid/default_main_program.rst fluid/default_startup_program.rst + fluid/device_guard.rst + fluid/disable_dygraph.rst fluid/DistributeTranspiler.rst fluid/DistributeTranspilerConfig.rst fluid/embedding.rst + fluid/enable_dygraph.rst fluid/ExecutionStrategy.rst fluid/Executor.rst + fluid/get_flags.rst fluid/global_scope.rst fluid/gradients.rst fluid/in_dygraph_mode.rst @@ -44,6 +49,7 @@ fluid fluid/require_version.rst fluid/save.rst fluid/scope_guard.rst + fluid/set_flags.rst fluid/Tensor.rst fluid/Variable.rst fluid/WeightNormParamAttr.rst diff --git a/doc/fluid/api/fluid/device_guard.rst b/doc/fluid/api/fluid/device_guard.rst new file mode 100644 index 0000000000000000000000000000000000000000..d8d611168644c45322972669bdd2806f393bcf43 --- /dev/null +++ b/doc/fluid/api/fluid/device_guard.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_device_guard: + +device_guard +------------ + +.. autofunction:: paddle.fluid.device_guard + :noindex: + diff --git a/doc/fluid/api/fluid/disable_dygraph.rst b/doc/fluid/api/fluid/disable_dygraph.rst new file mode 100644 index 0000000000000000000000000000000000000000..481ab4a5a8de51006f976b012660eca123b2a39b --- /dev/null +++ b/doc/fluid/api/fluid/disable_dygraph.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_disable_dygraph: + +disable_dygraph +--------------- + +.. autofunction:: paddle.fluid.disable_dygraph + :noindex: + diff --git a/doc/fluid/api/fluid/enable_dygraph.rst b/doc/fluid/api/fluid/enable_dygraph.rst new file mode 100644 index 0000000000000000000000000000000000000000..389919cfc96cbaaf2ac2dbb83267f3af93a19e87 --- /dev/null +++ b/doc/fluid/api/fluid/enable_dygraph.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_enable_dygraph: + +enable_dygraph +-------------- + +.. autofunction:: paddle.fluid.enable_dygraph + :noindex: + diff --git a/doc/fluid/api/fluid/get_flags.rst b/doc/fluid/api/fluid/get_flags.rst new file mode 100644 index 0000000000000000000000000000000000000000..2432965408118fe7c58d2898c2871391a32750a2 --- /dev/null +++ b/doc/fluid/api/fluid/get_flags.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_get_flags: + +get_flags +--------- + +.. autofunction:: paddle.fluid.get_flags + :noindex: + diff --git a/doc/fluid/api/fluid/set_flags.rst b/doc/fluid/api/fluid/set_flags.rst new file mode 100644 index 0000000000000000000000000000000000000000..730438b200ee575912c940d616f0dbffdcf73d41 --- /dev/null +++ b/doc/fluid/api/fluid/set_flags.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_set_flags: + +set_flags +--------- + +.. autofunction:: paddle.fluid.set_flags + :noindex: + diff --git a/doc/fluid/api/framework.rst b/doc/fluid/api/framework.rst new file mode 100644 index 0000000000000000000000000000000000000000..f95b77c5cb7b1907aaaa58b9e620e9447c9b66f5 --- /dev/null +++ b/doc/fluid/api/framework.rst @@ -0,0 +1,31 @@ +======================= +paddle.framework +======================= + +.. toctree:: + :maxdepth: 1 + + framework/append_backward.rst + framework/BuildStrategy.rst + framework/CompiledProgram.rst + framework/CPUPlace.rst + framework/create_global_var.rst + framework/create_parameter.rst + framework/CUDAPinnedPlace.rst + framework/CUDAPlace.rst + framework/default_main_program.rst + framework/default_startup_program.rst + framework/ExecutionStrateg y.rst + framework/Executor.rst + framework/global_scope.rst + framework/gradients.rst + framework/name_scope.rst + framework/ParallelExecutor.rst + framework/ParamAttr.rst + framework/Print.rst + framework/Program.rst + framework/program_guard.rst + framework/py_func.rst + framework/scope_guard.rst + framework/Variable.rst + framework/WeightNormParamAttr.rst diff --git a/doc/fluid/api/framework/BuildStrategy.rst b/doc/fluid/api/framework/BuildStrategy.rst new file mode 100644 index 0000000000000000000000000000000000000000..94384f0b54e63183e9214419141e87112b0db443 --- /dev/null +++ b/doc/fluid/api/framework/BuildStrategy.rst @@ -0,0 +1,7 @@ +.. _api_framework_BuildStrategy: + +BuildStrategy +------------------------------- +:doc_source: paddle.fluid.compiler.BuildStrategy + + diff --git a/doc/fluid/api/framework/CPUPlace.rst b/doc/fluid/api/framework/CPUPlace.rst new file mode 100644 index 0000000000000000000000000000000000000000..322c101361512ef6ced7a2e1430cc9570ecdfd73 --- /dev/null +++ b/doc/fluid/api/framework/CPUPlace.rst @@ -0,0 +1,7 @@ +.. _api_framework_CPUPlace: + +CPUPlace +------------------------------- +:doc_source: paddle.fluid.core.CPUPlace + + diff --git a/doc/fluid/api/framework/CUDAPinnedPlace.rst b/doc/fluid/api/framework/CUDAPinnedPlace.rst new file mode 100644 index 0000000000000000000000000000000000000000..912861885f41674cda40dd9c3d8438e10141181b --- /dev/null +++ b/doc/fluid/api/framework/CUDAPinnedPlace.rst @@ -0,0 +1,7 @@ +.. _api_framework_CUDAPinnedPlace: + +CUDAPinnedPlace +------------------------------- +:doc_source: paddle.fluid.core.CUDAPinnedPlace + + diff --git a/doc/fluid/api/framework/CUDAPlace.rst b/doc/fluid/api/framework/CUDAPlace.rst new file mode 100644 index 0000000000000000000000000000000000000000..ea2b3bd80337b431632ddad875b703a81ac96461 --- /dev/null +++ b/doc/fluid/api/framework/CUDAPlace.rst @@ -0,0 +1,7 @@ +.. _api_framework_CUDAPlace: + +CUDAPlace +------------------------------- +:doc_source: paddle.fluid.core.CUDAPlace + + diff --git a/doc/fluid/api/framework/CompiledProgram.rst b/doc/fluid/api/framework/CompiledProgram.rst new file mode 100644 index 0000000000000000000000000000000000000000..62240afc324523a7d1e106d90e22a2ea68d6bee7 --- /dev/null +++ b/doc/fluid/api/framework/CompiledProgram.rst @@ -0,0 +1,7 @@ +.. _api_framework_CompiledProgram: + +CompiledProgram +------------------------------- +:doc_source: paddle.fluid.compiler.CompiledProgram + + diff --git a/doc/fluid/api/framework/ExecutionStrateg y.rst b/doc/fluid/api/framework/ExecutionStrateg y.rst new file mode 100644 index 0000000000000000000000000000000000000000..bfc3a5c88c4d89b8b2c75bc9708796b7953becef --- /dev/null +++ b/doc/fluid/api/framework/ExecutionStrateg y.rst @@ -0,0 +1,7 @@ +.. _api_framework_ExecutionStrateg y: + +ExecutionStrateg y +------------------------------- +:doc_source: paddle.fluid.compiler.ExecutionStrateg y + + diff --git a/doc/fluid/api/framework/Executor.rst b/doc/fluid/api/framework/Executor.rst new file mode 100644 index 0000000000000000000000000000000000000000..9ca83758b42f769291671e2acc46f4db815f5bd1 --- /dev/null +++ b/doc/fluid/api/framework/Executor.rst @@ -0,0 +1,7 @@ +.. _api_framework_Executor: + +Executor +------------------------------- +:doc_source: paddle.fluid.executor.Executor + + diff --git a/doc/fluid/api/framework/ParallelExecutor.rst b/doc/fluid/api/framework/ParallelExecutor.rst new file mode 100644 index 0000000000000000000000000000000000000000..f08e60ec512077ba378cb3193344ee5d0ed8a3a0 --- /dev/null +++ b/doc/fluid/api/framework/ParallelExecutor.rst @@ -0,0 +1,7 @@ +.. _api_framework_ParallelExecutor: + +ParallelExecutor +------------------------------- +:doc_source: paddle.fluid.parallel_executor.ParallelExecutor + + diff --git a/doc/fluid/api/framework/ParamAttr.rst b/doc/fluid/api/framework/ParamAttr.rst new file mode 100644 index 0000000000000000000000000000000000000000..505d47e2c53d861d08056401e88c9caf06a33996 --- /dev/null +++ b/doc/fluid/api/framework/ParamAttr.rst @@ -0,0 +1,7 @@ +.. _api_framework_ParamAttr: + +ParamAttr +------------------------------- +:doc_source: paddle.fluid.param_attr.ParamAttr + + diff --git a/doc/fluid/api/framework/Print.rst b/doc/fluid/api/framework/Print.rst new file mode 100644 index 0000000000000000000000000000000000000000..159499711821704c12f638f1b59efe0b3e5c92f1 --- /dev/null +++ b/doc/fluid/api/framework/Print.rst @@ -0,0 +1,7 @@ +.. _api_framework_Print: + +Print +------------------------------- +:doc_source: paddle.fluid.layers.control_flow.Print + + diff --git a/doc/fluid/api/framework/Program.rst b/doc/fluid/api/framework/Program.rst new file mode 100644 index 0000000000000000000000000000000000000000..f118dc60164aee8ccc56e0b9aa0954883c39a203 --- /dev/null +++ b/doc/fluid/api/framework/Program.rst @@ -0,0 +1,7 @@ +.. _api_framework_Program: + +Program +------------------------------- +:doc_source: paddle.fluid.framework.Program + + diff --git a/doc/fluid/api/framework/Variable.rst b/doc/fluid/api/framework/Variable.rst new file mode 100644 index 0000000000000000000000000000000000000000..f25d40720945653915f12a67f81d918696e178c1 --- /dev/null +++ b/doc/fluid/api/framework/Variable.rst @@ -0,0 +1,7 @@ +.. _api_framework_Variable: + +Variable +------------------------------- +:doc_source: paddle.fluid.framework.Variable + + diff --git a/doc/fluid/api/framework/WeightNormParamAttr.rst b/doc/fluid/api/framework/WeightNormParamAttr.rst new file mode 100644 index 0000000000000000000000000000000000000000..dc669a67885fe742876c8e4a00e1fd6849e97ec9 --- /dev/null +++ b/doc/fluid/api/framework/WeightNormParamAttr.rst @@ -0,0 +1,7 @@ +.. _api_framework_WeightNormParamAttr: + +WeightNormParamAttr +------------------------------- +:doc_source: paddle.fluid.param_attr.WeightNormParamAttr + + diff --git a/doc/fluid/api/framework/append_backward.rst b/doc/fluid/api/framework/append_backward.rst new file mode 100644 index 0000000000000000000000000000000000000000..a8b85f6364c10bccaccc4b5f3a0ee255bb279849 --- /dev/null +++ b/doc/fluid/api/framework/append_backward.rst @@ -0,0 +1,7 @@ +.. _api_framework_append_backward: + +append_backward +------------------------------- +:doc_source: paddle.fluid.backward.append_backward + + diff --git a/doc/fluid/api/framework/create_global_var.rst b/doc/fluid/api/framework/create_global_var.rst new file mode 100644 index 0000000000000000000000000000000000000000..7606f1657574568651a2d02d4b82740da35e8155 --- /dev/null +++ b/doc/fluid/api/framework/create_global_var.rst @@ -0,0 +1,7 @@ +.. _api_framework_create_global_var: + +create_global_var +------------------------------- +:doc_source: paddle.fluid.layers.tensor.create_global_var + + diff --git a/doc/fluid/api/framework/create_parameter.rst b/doc/fluid/api/framework/create_parameter.rst new file mode 100644 index 0000000000000000000000000000000000000000..97c4c1e58d0d926617e3a39fd743ffc6530f4066 --- /dev/null +++ b/doc/fluid/api/framework/create_parameter.rst @@ -0,0 +1,7 @@ +.. _api_framework_create_parameter: + +create_parameter +------------------------------- +:doc_source: paddle.fluid.layers.create_parameter + + diff --git a/doc/fluid/api/framework/default_main_program.rst b/doc/fluid/api/framework/default_main_program.rst new file mode 100644 index 0000000000000000000000000000000000000000..774845e799986d60abda4d3aa7a84346b586a9c1 --- /dev/null +++ b/doc/fluid/api/framework/default_main_program.rst @@ -0,0 +1,7 @@ +.. _api_framework_default_main_program: + +default_main_program +------------------------------- +:doc_source: paddle.fluid.framework.default_main_program + + diff --git a/doc/fluid/api/framework/default_startup_program.rst b/doc/fluid/api/framework/default_startup_program.rst new file mode 100644 index 0000000000000000000000000000000000000000..72872cc3ac2a325f8c459a8a8c9d8f681e7aaff7 --- /dev/null +++ b/doc/fluid/api/framework/default_startup_program.rst @@ -0,0 +1,7 @@ +.. _api_framework_default_startup_program: + +default_startup_program +------------------------------- +:doc_source: paddle.fluid.framework.default_startup_program + + diff --git a/doc/fluid/api/framework/global_scope.rst b/doc/fluid/api/framework/global_scope.rst new file mode 100644 index 0000000000000000000000000000000000000000..452256cf58458495e204e55cb585858eb85f741d --- /dev/null +++ b/doc/fluid/api/framework/global_scope.rst @@ -0,0 +1,7 @@ +.. _api_framework_global_scope: + +global_scope +------------------------------- +:doc_source: paddle.fluid.executor.global_scope + + diff --git a/doc/fluid/api/framework/gradients.rst b/doc/fluid/api/framework/gradients.rst new file mode 100644 index 0000000000000000000000000000000000000000..2c082237025e5597e910e5786d1b06ac88cfcfeb --- /dev/null +++ b/doc/fluid/api/framework/gradients.rst @@ -0,0 +1,7 @@ +.. _api_framework_gradients: + +gradients +------------------------------- +:doc_source: paddle.fluid.backward.gradients + + diff --git a/doc/fluid/api/framework/name_scope.rst b/doc/fluid/api/framework/name_scope.rst new file mode 100644 index 0000000000000000000000000000000000000000..616c4cd677bca531fefa3c4719839cc52341a608 --- /dev/null +++ b/doc/fluid/api/framework/name_scope.rst @@ -0,0 +1,7 @@ +.. _api_framework_name_scope: + +name_scope +------------------------------- +:doc_source: paddle.fluid.framework.name_scope + + diff --git a/doc/fluid/api/framework/program_guard.rst b/doc/fluid/api/framework/program_guard.rst new file mode 100644 index 0000000000000000000000000000000000000000..d68a50af22b0225a3c3059f02c9c9e3c8a2b57b9 --- /dev/null +++ b/doc/fluid/api/framework/program_guard.rst @@ -0,0 +1,7 @@ +.. _api_framework_program_guard: + +program_guard +------------------------------- +:doc_source: paddle.fluid.framework.program_guard + + diff --git a/doc/fluid/api/framework/py_func.rst b/doc/fluid/api/framework/py_func.rst new file mode 100644 index 0000000000000000000000000000000000000000..57a4c4194a4523fecdfc0c7d0f4e00880e300bb3 --- /dev/null +++ b/doc/fluid/api/framework/py_func.rst @@ -0,0 +1,7 @@ +.. _api_framework_py_func: + +py_func +------------------------------- +:doc_source: paddle.fluid.layers.nn.py_func + + diff --git a/doc/fluid/api/framework/scope_guard.rst b/doc/fluid/api/framework/scope_guard.rst new file mode 100644 index 0000000000000000000000000000000000000000..661564de6eef232a19842a903c5ca76ba1abceea --- /dev/null +++ b/doc/fluid/api/framework/scope_guard.rst @@ -0,0 +1,7 @@ +.. _api_framework_scope_guard: + +scope_guard +------------------------------- +:doc_source: paddle.fluid.executor.scope_guard + + diff --git a/doc/fluid/api/gen_doc.py b/doc/fluid/api/gen_doc.py index a45f854ef38715f66d019a58edb4e17dcb1a74c5..1f4f3ad49332e585cc99c23228928dd59138b50a 100644 --- a/doc/fluid/api/gen_doc.py +++ b/doc/fluid/api/gen_doc.py @@ -19,18 +19,41 @@ import types import os import contextlib import paddle.fluid as fluid +import paddle.tensor as tensor +import paddle.nn as nn +import paddle.optimizer as optimizer + +#import paddle.complex as complex +#import paddle.framework as framework + def parse_arg(): parser = argparse.ArgumentParser() parser.add_argument('--submodules', nargs="*") parser.add_argument( - '--module_name', type=str, help='Generate the documentation of which module') + '--module_name', + type=str, + help='Generate the documentation of which module') parser.add_argument( '--module_prefix', type=str, help='Generate the prefix of module') parser.add_argument( - '--output', type=str, help='Output file or output directory for output rst') + '--output', + type=str, + help='Output file or output directory for output rst') + parser.add_argument( + '--output_name', + type=str, + help='Output file or output directory for output rst') parser.add_argument( - '--to_multiple_files', type=bool, default=False, help='Whether to separate to multiple files') + '--output_dir', + type=str, + help='Output file or output directory for output rst') + parser.add_argument( + '--to_multiple_files', + type=bool, + default=False, + help='Whether to separate to multiple files') + return parser.parse_args() def print_item(self, name): @@ -44,8 +67,9 @@ def parse_arg(): else: pass + class DocGenerator(object): - def __init__(self, module_name=None, module_prefix=None): + def __init__(self, module_name=None, module_prefix=None): self.module_name = module_name self.module_prefix = module_prefix self.stream = None @@ -53,7 +77,7 @@ class DocGenerator(object): @contextlib.contextmanager def guard(self, filename): assert self.stream is None, "stream must be None" - self.stream = open(filename, 'w') + self.stream = open(filename, 'w') yield self.stream.close() self.stream = None @@ -61,20 +85,21 @@ class DocGenerator(object): def print_submodule(self, submodule_name): submodule = getattr(self.module, submodule_name) if submodule is None: - raise ValueError("Cannot find submodule {0}".format(submodule_name)) + raise ValueError( + "Cannot find submodule {0}".format(submodule_name)) self.print_section(submodule_name) - for item in sorted(submodule.__all__,key=str.lower): + for item in sorted(submodule.__all__, key=str.lower): self.print_item(item) def print_current_module(self): - for item in sorted(self.module.__all__,key=str.lower): + for item in sorted(self.module.__all__, key=str.lower): self.print_item(item) def print_section(self, name): self._print_header_(name, dot='=', is_title=False) - def print_item(self, name): + def print_item(self, name, output_name): item = getattr(self.module, name, None) if isinstance(item, types.TypeType): self.print_class(name) @@ -82,7 +107,7 @@ class DocGenerator(object): self.print_method(name) else: self.stream.close() - path = os.getcwd()+"/fluid/"+name+".rst" + path = os.getcwd() + "/" + output_name + "/" + name + ".rst" if name != "PipeReader": os.remove(path) @@ -140,7 +165,9 @@ class DocGenerator(object): self.stream.write(".. _api_{0}_{1}:\n\n".format("_".join( self.module_prefix.split(".")), name)) -def generate_doc(module_name, module_prefix, output, to_multiple_files): + +def generate_doc(module_name, module_prefix, output, output_name, + to_multiple_files, output_dir): if module_name == "": module_name = None @@ -150,25 +177,31 @@ def generate_doc(module_name, module_prefix, output, to_multiple_files): gen = DocGenerator() if module_name is None: - gen.module = fluid - gen.module_name = 'fluid' + gen.module = eval(output_name) + gen.module_name = str(output_name) else: - gen.module = fluid + gen.module = eval(output_name) for each_module_name in module_name.split('.'): if not hasattr(gen.module, each_module_name): raise ValueError("Cannot find fluid.{0}".format(module_name)) else: gen.module = getattr(gen.module, each_module_name) - gen.module_name = "fluid." + module_name + gen.module_name = output_name + "." + module_name if module_prefix is None: gen.module_prefix = gen.module_name else: - gen.module_prefix = "fluid." + module_prefix + gen.module_prefix = output_name + "." + module_prefix + + dirname = output if to_multiple_files else os.path.dirname(output) + + if output_dir != None: + dirname = output_dir + "/" + dirname + output = output_dir + "/" + output - dirname = output if to_multiple_files else os.path.dirname(output) - if len(dirname) > 0 and (not os.path.exists(dirname) or not os.path.isdir(dirname)): + if len(dirname) > 0 and (not os.path.exists(dirname) or + not os.path.isdir(dirname)): os.makedirs(dirname) if not to_multiple_files: @@ -177,7 +210,7 @@ def generate_doc(module_name, module_prefix, output, to_multiple_files): prefix_len = len(gen.module_prefix) assert gen.module_prefix == gen.module_name[0:prefix_len], \ "module_prefix must be prefix of module_name" - diff_name = gen.module_name[prefix_len+1:] + diff_name = gen.module_name[prefix_len + 1:] if diff_name != "": header_name = diff_name else: @@ -189,17 +222,18 @@ def generate_doc(module_name, module_prefix, output, to_multiple_files): gen._print_header_(header_name, dot='=', is_title=True) gen.print_current_module() else: - apis = sorted(gen.module.__all__,key=str.lower) + apis = sorted(gen.module.__all__, key=str.lower) for api in apis: header_name = api with gen.guard(os.path.join(output, api + '.rst')): gen.print_header_reminder() - gen.print_item(api) + gen.print_item(api, output_name) def main(): args = parse_arg() - generate_doc(args.module_name, args.module_prefix, args.output, args.to_multiple_files) + generate_doc(args.module_name, args.module_prefix, args.output, + args.output_name, args.to_multiple_files, args.output_dir) if __name__ == '__main__': diff --git a/doc/fluid/api/gen_doc.sh b/doc/fluid/api/gen_doc.sh index 1e833161ef0e225e4725136ad3d466d945b69bec..5284b277e24cf9ea8eeaf79c0aeb86c8fe5f6904 100644 --- a/doc/fluid/api/gen_doc.sh +++ b/doc/fluid/api/gen_doc.sh @@ -1,23 +1,44 @@ #!/bin/bash -#for module in nn -#do -# python gen_doc.py --module_name layers.${module} --module_prefix layers --output layers/${module} --to_multiple_files True -#done - -#for module in control_flow nn io ops tensor learning_rate_scheduler detection metric_op -#do -# python gen_doc.py --module_name layers.${module} --module_prefix layers --output layers/${module}.rst -#done - -for module in layers dataset clip metrics executor initializer io nets optimizer profiler regularizer transpiler backward profiler unique_name dygraph +for module in layers dataset clip metrics executor initializer io nets optimizer profiler regularizer transpiler backward profiler unique_name dygraph framework do - python gen_doc.py --module_name ${module} --module_prefix ${module} --output ${module} --to_multiple_files True + python gen_doc.py --module_name ${module} --module_prefix ${module} --output ${module} --output_name fluid --to_multiple_files True python gen_module_index.py ${module} fluid.${module} done -python gen_doc.py --module_name "" --module_prefix "" --output fluid --to_multiple_files True +python gen_doc.py --module_name "" --module_prefix "" --output fluid --output_name fluid --to_multiple_files True python gen_module_index.py fluid fluid +# tensor +for module in math random stat linalg search +do + python gen_doc.py --module_name ${module} --module_prefix ${module} --output ${module} --output_name tensor --to_multiple_files True --output_dir tensor + python gen_module_index.py tensor.${module} ${module} +done + +python gen_module_index.py tensor paddle.tensor + +for module in math manipulation linalg +do + python gen_doc.py --module_name tensor.${module} --module_prefix tensor.${module} --output tensor/${module} --output_name complex --to_multiple_files True --output_dir complex + python gen_module_index.py complex.tensor.${module} ${module} +done + +python gen_module_index.py complex.tensor tensor +python gen_module_index.py complex paddle.complex +python gen_module_index.py framework paddle.framework + + +# nn +for module in loss activation +do + python gen_doc.py --module_name ${module} --module_prefix ${module} --output ${module} --output_name nn --to_multiple_files True --output_dir nn + python gen_module_index.py nn.${module} ${module} +done + +python gen_doc.py --module_name "" --module_prefix "" --output nn --output_name nn --to_multiple_files True +python gen_module_index.py nn paddle.nn + +# index.rst python gen_index.py diff --git a/doc/fluid/api/gen_index.py b/doc/fluid/api/gen_index.py index d34142b395bb49a35cac273afbe60a571fcdf3c2..4cc7272b03aa0fec3eefe543d7ff7ad791d6e1fd 100644 --- a/doc/fluid/api/gen_index.py +++ b/doc/fluid/api/gen_index.py @@ -4,7 +4,7 @@ import glob import os if __name__ == '__main__': - with open('index_en.rst', 'w') as file_object: + with open('index_en.rst', 'w') as file_object: file_object = open('index_en.rst', 'w') file_object.write('''============= API Reference @@ -14,11 +14,10 @@ API Reference :maxdepth: 1 ../api_guides/index_en.rst - fluid.rst ''') target_dirs = ['.', 'data'] - + file_names = [] for target_dir in target_dirs: if target_dir == '.': @@ -28,5 +27,14 @@ API Reference file_names.extend(glob.glob(pattern)) for file_name in sorted(file_names): - if file_name not in ['index_en.rst', 'fluid.rst']: - file_object.write(' '+file_name + "\n") + with open(file_name, 'r') as f: + for i in range(2): + line = f.readline().strip() + if line.find('paddle.') != -1: + file_object.write(' ' + file_name + "\n") + file_names.remove(file_name) + + file_object.write(' ' + 'fluid.rst' + "\n") + for file_name in sorted(file_names): + if file_name not in ['index_en.rst']: + file_object.write(' ' + file_name + "\n") diff --git a/doc/fluid/api/imperative.rst b/doc/fluid/api/imperative.rst new file mode 100644 index 0000000000000000000000000000000000000000..f138e06701b138dc109dab2e3b1c17832658d390 --- /dev/null +++ b/doc/fluid/api/imperative.rst @@ -0,0 +1,29 @@ +======================= +paddle.imperative +======================= + +.. toctree:: + :maxdepth: 1 + + imperative/CosineDecay.rst + imperative/DataParallel.rst + imperative/declarative.rst + imperative/enabled.rst + imperative/ExponentialDecay.rst + imperative/grad.rst + imperative/guard.rst + imperative/InverseTimeDecay.rst + imperative/jit.rst + imperative/load.rst + imperative/NaturalExpDecay.rst + imperative/no_grad.rst + imperative/NoamDecay.rst + imperative/ParallelEnv.rst + imperative/PiecewiseDecay.rst + imperative/PolynomialDecay.rst + imperative/prepare_context.rst + imperative/ProgramTranslator.rst + imperative/save.rst + imperative/to_variable.rst + imperative/TracedLayer.rst + imperative/TranslatedLayer.rst diff --git a/doc/fluid/api/imperative/CosineDecay.rst b/doc/fluid/api/imperative/CosineDecay.rst new file mode 100644 index 0000000000000000000000000000000000000000..5082f7224c9e7ef13e8e52f0f58a4689fd0e878e --- /dev/null +++ b/doc/fluid/api/imperative/CosineDecay.rst @@ -0,0 +1,7 @@ +.. _api_imperative_CosineDecay: + +CosineDecay +------------------------------- +:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.CosineDecay + + diff --git a/doc/fluid/api/imperative/DataParallel.rst b/doc/fluid/api/imperative/DataParallel.rst new file mode 100644 index 0000000000000000000000000000000000000000..9ee8b0b73be302f847e1a8fef795334e3c063911 --- /dev/null +++ b/doc/fluid/api/imperative/DataParallel.rst @@ -0,0 +1,7 @@ +.. _api_imperative_DataParallel: + +DataParallel +------------------------------- +:doc_source: paddle.fluid.dygraph.parallel.DataParallel + + diff --git a/doc/fluid/api/imperative/ExponentialDecay.rst b/doc/fluid/api/imperative/ExponentialDecay.rst new file mode 100644 index 0000000000000000000000000000000000000000..758a162e99ec1d29bd82dd064159e8c3df48558b --- /dev/null +++ b/doc/fluid/api/imperative/ExponentialDecay.rst @@ -0,0 +1,7 @@ +.. _api_imperative_ExponentialDecay: + +ExponentialDecay +------------------------------- +:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.ExponentialDecay + + diff --git a/doc/fluid/api/imperative/InverseTimeDecay.rst b/doc/fluid/api/imperative/InverseTimeDecay.rst new file mode 100644 index 0000000000000000000000000000000000000000..0e760981dc587181d6882f2cd53b662dc123b44c --- /dev/null +++ b/doc/fluid/api/imperative/InverseTimeDecay.rst @@ -0,0 +1,7 @@ +.. _api_imperative_InverseTimeDecay: + +InverseTimeDecay +------------------------------- +:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.InverseTimeDecay + + diff --git a/doc/fluid/api/imperative/NaturalExpDecay.rst b/doc/fluid/api/imperative/NaturalExpDecay.rst new file mode 100644 index 0000000000000000000000000000000000000000..50ed2fd4a0a6dc0d0b58ff3867ccfcdb718039da --- /dev/null +++ b/doc/fluid/api/imperative/NaturalExpDecay.rst @@ -0,0 +1,7 @@ +.. _api_imperative_NaturalExpDecay: + +NaturalExpDecay +------------------------------- +:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.NaturalExpDecay + + diff --git a/doc/fluid/api/imperative/NoamDecay.rst b/doc/fluid/api/imperative/NoamDecay.rst new file mode 100644 index 0000000000000000000000000000000000000000..b126e65f1d9dc151d21269119c0e272a2ba7d9bb --- /dev/null +++ b/doc/fluid/api/imperative/NoamDecay.rst @@ -0,0 +1,7 @@ +.. _api_imperative_NoamDecay: + +NoamDecay +------------------------------- +:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.NoamDecay + + diff --git a/doc/fluid/api/imperative/ParallelEnv.rst b/doc/fluid/api/imperative/ParallelEnv.rst new file mode 100644 index 0000000000000000000000000000000000000000..edfe9fdb960ccb2c9f9487297869e820d900db9d --- /dev/null +++ b/doc/fluid/api/imperative/ParallelEnv.rst @@ -0,0 +1,7 @@ +.. _api_imperative_ParallelEnv: + +ParallelEnv +------------------------------- +:doc_source: paddle.fluid.dygraph.parallel.ParallelEnv + + diff --git a/doc/fluid/api/imperative/PiecewiseDecay.rst b/doc/fluid/api/imperative/PiecewiseDecay.rst new file mode 100644 index 0000000000000000000000000000000000000000..438c6c012c547366cae0475b7925da9d46b32970 --- /dev/null +++ b/doc/fluid/api/imperative/PiecewiseDecay.rst @@ -0,0 +1,7 @@ +.. _api_imperative_PiecewiseDecay: + +PiecewiseDecay +------------------------------- +:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.PiecewiseDecay + + diff --git a/doc/fluid/api/imperative/PolynomialDecay.rst b/doc/fluid/api/imperative/PolynomialDecay.rst new file mode 100644 index 0000000000000000000000000000000000000000..c4ba271cc5f33b70a945ca15ff7d9847a945ccd1 --- /dev/null +++ b/doc/fluid/api/imperative/PolynomialDecay.rst @@ -0,0 +1,7 @@ +.. _api_imperative_PolynomialDecay: + +PolynomialDecay +------------------------------- +:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.PolynomialDecay + + diff --git a/doc/fluid/api/imperative/ProgramTranslator.rst b/doc/fluid/api/imperative/ProgramTranslator.rst new file mode 100644 index 0000000000000000000000000000000000000000..3b1117d15aafd012bdf7a886ec3d43c3881cf9a0 --- /dev/null +++ b/doc/fluid/api/imperative/ProgramTranslator.rst @@ -0,0 +1,7 @@ +.. _api_imperative_ProgramTranslator: + +ProgramTranslator +------------------------------- +:doc_source: paddle.fluid.dygraph.ProgramTranslator + + diff --git a/doc/fluid/api/imperative/TracedLayer.rst b/doc/fluid/api/imperative/TracedLayer.rst new file mode 100644 index 0000000000000000000000000000000000000000..0d623135fe8f46b7f2bfe48f3b4f9c56c19ab9da --- /dev/null +++ b/doc/fluid/api/imperative/TracedLayer.rst @@ -0,0 +1,7 @@ +.. _api_imperative_TracedLayer: + +TracedLayer +------------------------------- +:doc_source: paddle.fluid.dygraph.jit.TracedLayer + + diff --git a/doc/fluid/api/imperative/TranslatedLayer.rst b/doc/fluid/api/imperative/TranslatedLayer.rst new file mode 100644 index 0000000000000000000000000000000000000000..0299a9f57392e267ae015947345249784fd929f5 --- /dev/null +++ b/doc/fluid/api/imperative/TranslatedLayer.rst @@ -0,0 +1,5 @@ +.. _api_imperative_TranslatedLayer: + +TranslatedLayer +------------------------------- +:doc_source: paddle.fluid.dygraph.io.TranslatedLayer diff --git a/doc/fluid/api/imperative/declarative.rst b/doc/fluid/api/imperative/declarative.rst new file mode 100644 index 0000000000000000000000000000000000000000..1d79e5161282a7344568d9223bb5f5a99fefd44f --- /dev/null +++ b/doc/fluid/api/imperative/declarative.rst @@ -0,0 +1,7 @@ +.. _api_imperative_declarative: + +declarative +------------------------------- +:doc_source: paddle.fluid.dygraph.jit.declarative + + diff --git a/doc/fluid/api/imperative/enabled.rst b/doc/fluid/api/imperative/enabled.rst new file mode 100644 index 0000000000000000000000000000000000000000..7279ce01e99e43e7e4a58f8484fbae582ebf62b0 --- /dev/null +++ b/doc/fluid/api/imperative/enabled.rst @@ -0,0 +1,7 @@ +.. _api_imperative_enabled: + +enabled +------------------------------- +:doc_source: paddle.fluid.dygraph.base.enabled + + diff --git a/doc/fluid/api/imperative/grad.rst b/doc/fluid/api/imperative/grad.rst new file mode 100644 index 0000000000000000000000000000000000000000..e147b53f0304af215ebd98b9b3b225f698a6fa31 --- /dev/null +++ b/doc/fluid/api/imperative/grad.rst @@ -0,0 +1,7 @@ +.. _api_imperative_grad: + +grad +------------------------------- +:doc_source: paddle.fluid.dygraph.base.grad + + diff --git a/doc/fluid/api/imperative/guard.rst b/doc/fluid/api/imperative/guard.rst new file mode 100644 index 0000000000000000000000000000000000000000..776b12e7ad95b98784b27e2d31252e01d9e0a557 --- /dev/null +++ b/doc/fluid/api/imperative/guard.rst @@ -0,0 +1,7 @@ +.. _api_imperative_guard: + +guard +------------------------------- +:doc_source: paddle.fluid.dygraph.base.guard + + diff --git a/doc/fluid/api/imperative/jit.rst b/doc/fluid/api/imperative/jit.rst new file mode 100644 index 0000000000000000000000000000000000000000..7853a048535c045bae18f71c8b4d7f1e44cc65eb --- /dev/null +++ b/doc/fluid/api/imperative/jit.rst @@ -0,0 +1,10 @@ +=== +jit +=== + +.. toctree:: + :maxdepth: 1 + + jit/save.rst + jit/load.rst + jit/SaveLoadConfig.rst diff --git a/doc/fluid/api/imperative/jit/SaveLoadConfig.rst b/doc/fluid/api/imperative/jit/SaveLoadConfig.rst new file mode 100644 index 0000000000000000000000000000000000000000..cab85776ec33f9cab2dc788ebbb3081fca1d4035 --- /dev/null +++ b/doc/fluid/api/imperative/jit/SaveLoadConfig.rst @@ -0,0 +1,5 @@ +.. _api_imperative_jit_SaveLoadConfig: + +SaveLoadConfig +------------------------------- +:doc_source: paddle.fluid.dygraph.jit.SaveLoadConfig diff --git a/doc/fluid/api/imperative/jit/load.rst b/doc/fluid/api/imperative/jit/load.rst new file mode 100644 index 0000000000000000000000000000000000000000..723a87936a8f26653eb2b34f361aa35a4b3fd74f --- /dev/null +++ b/doc/fluid/api/imperative/jit/load.rst @@ -0,0 +1,5 @@ +.. _api_imperative_jit_load: + +load +------------------------------- +:doc_source: paddle.fluid.dygraph.jit.load diff --git a/doc/fluid/api/imperative/jit/save.rst b/doc/fluid/api/imperative/jit/save.rst new file mode 100644 index 0000000000000000000000000000000000000000..b809a99166e35edd65af253dffe40053776a68dc --- /dev/null +++ b/doc/fluid/api/imperative/jit/save.rst @@ -0,0 +1,5 @@ +.. _api_imperative_jit_save: + +save +------------------------------- +:doc_source: paddle.fluid.dygraph.jit.save diff --git a/doc/fluid/api/imperative/load.rst b/doc/fluid/api/imperative/load.rst new file mode 100644 index 0000000000000000000000000000000000000000..819117f0388e32e1893d3827b70ce88780340644 --- /dev/null +++ b/doc/fluid/api/imperative/load.rst @@ -0,0 +1,7 @@ +.. _api_imperative_load: + +load +------------------------------- +:doc_source: paddle.fluid.dygraph.checkpoint.load_dygraph + + diff --git a/doc/fluid/api/imperative/no_grad.rst b/doc/fluid/api/imperative/no_grad.rst new file mode 100644 index 0000000000000000000000000000000000000000..0a737ec558cf6d96d6d6204046164871f0138f9d --- /dev/null +++ b/doc/fluid/api/imperative/no_grad.rst @@ -0,0 +1,7 @@ +.. _api_imperative_no_grad: + +no_grad +------------------------------- +:doc_source: paddle.fluid.dygraph.base.no_grad + + diff --git a/doc/fluid/api/imperative/prepare_context.rst b/doc/fluid/api/imperative/prepare_context.rst new file mode 100644 index 0000000000000000000000000000000000000000..33c89846e1725cb70cd170f08223318e583dc531 --- /dev/null +++ b/doc/fluid/api/imperative/prepare_context.rst @@ -0,0 +1,7 @@ +.. _api_imperative_prepare_context: + +prepare_context +------------------------------- +:doc_source: paddle.fluid.dygraph.parallel.prepare_context + + diff --git a/doc/fluid/api/imperative/save.rst b/doc/fluid/api/imperative/save.rst new file mode 100644 index 0000000000000000000000000000000000000000..90353ff25557fafe4c50959289867df3dd39b54c --- /dev/null +++ b/doc/fluid/api/imperative/save.rst @@ -0,0 +1,7 @@ +.. _api_imperative_save: + +save +------------------------------- +:doc_source: paddle.fluid.dygraph.checkpoint.save_dygraph + + diff --git a/doc/fluid/api/imperative/to_variable.rst b/doc/fluid/api/imperative/to_variable.rst new file mode 100644 index 0000000000000000000000000000000000000000..21bb639afc7e6f13facece0cb38680ae3735fedb --- /dev/null +++ b/doc/fluid/api/imperative/to_variable.rst @@ -0,0 +1,7 @@ +.. _api_imperative_to_variable: + +to_variable +------------------------------- +:doc_source: paddle.fluid.dygraph.base.to_variable + + diff --git a/doc/fluid/api/index_en.rst b/doc/fluid/api/index_en.rst index dce0d7cf9679ca2b47fd2a2713c31d5538b6a61c..e3ac0dc69c3487f301cb42c6053cf2f4bd82ccdc 100644 --- a/doc/fluid/api/index_en.rst +++ b/doc/fluid/api/index_en.rst @@ -6,21 +6,32 @@ API Reference :maxdepth: 1 ../api_guides/index_en.rst + dataset.rst + declarative.rst + distributed.rst + framework.rst + imperative.rst + io.rst + metric.rst + nn.rst + optimizer.rst + static.rst + tensor.rst fluid.rst backward.rst clip.rst data/data_reader.rst data/dataset.rst - dataset.rst dygraph.rst executor.rst + fluid.rst initializer.rst - io.rst layers.rst metrics.rst nets.rst - optimizer.rst + paddle.rst profiler.rst regularizer.rst transpiler.rst unique_name.rst + review_tmp.rst diff --git a/doc/fluid/api/io.rst b/doc/fluid/api/io.rst index db67b4f7c9e7d51326ea92f4086739f0b2e0bdb2..666d584ca533027ddeb3f1a16575365804260b81 100644 --- a/doc/fluid/api/io.rst +++ b/doc/fluid/api/io.rst @@ -1,17 +1,19 @@ ======== -fluid.io +paddle.io ======== .. toctree:: :maxdepth: 1 io/batch.rst + io/BatchSampler.rst io/buffered.rst io/cache.rst io/chain.rst io/compose.rst io/ComposeNotAligned.rst io/DataLoader.rst + io/Dataset.rst io/firstn.rst io/get_program_parameter.rst io/get_program_persistable_vars.rst @@ -26,9 +28,12 @@ fluid.io io/PyReader.rst io/save.rst io/save_inference_model.rst + io/save_inference_model.rst io/save_params.rst io/save_persistables.rst io/save_vars.rst io/set_program_state.rst + io/set_program_state.rst + io/shuffle.rst io/shuffle.rst io/xmap_readers.rst diff --git a/doc/fluid/api/io/BatchSampler.rst b/doc/fluid/api/io/BatchSampler.rst new file mode 100644 index 0000000000000000000000000000000000000000..91b872bec319a6c9e4c2f7031448b73afbab5a9c --- /dev/null +++ b/doc/fluid/api/io/BatchSampler.rst @@ -0,0 +1,7 @@ +.. _api_io_BatchSampler: + +BatchSampler +------------------------------- +:doc_source: paddle.fluid.dataloader.BatchSampler + + diff --git a/doc/fluid/api/io/ComposeNotAligned.rst b/doc/fluid/api/io/ComposeNotAligned.rst index c2f3f465b7dd287e22c5bdcf2401c40d5494d4f1..3968d80ce3cc5c174763c7b6161c80e3c3840042 100644 --- a/doc/fluid/api/io/ComposeNotAligned.rst +++ b/doc/fluid/api/io/ComposeNotAligned.rst @@ -11,4 +11,3 @@ ComposeNotAligned :inherited-members: :noindex: -This indicates an error state of compose API, which will raise when outputs of readers are not aligned. diff --git a/doc/fluid/api/io/Dataset.rst b/doc/fluid/api/io/Dataset.rst new file mode 100644 index 0000000000000000000000000000000000000000..b86e0a377ff44d7c8c231a1f5af317cf904d3a3e --- /dev/null +++ b/doc/fluid/api/io/Dataset.rst @@ -0,0 +1,7 @@ +.. _api_io_Dataset: + +Dataset +------------------------------- +:doc_source: paddle.fluid.dataloader.Dataset + + diff --git a/doc/fluid/api/layers.rst b/doc/fluid/api/layers.rst index 5c45ac4bdcbf740089a69c4d112fcba63bf10425..0f1fe3c222c5266deacca8603ff10ce9fed33429 100644 --- a/doc/fluid/api/layers.rst +++ b/doc/fluid/api/layers.rst @@ -25,6 +25,7 @@ fluid.layers layers/atan.rst layers/auc.rst layers/autoincreased_step_counter.rst + layers/BasicDecoder.rst layers/batch_norm.rst layers/beam_search.rst layers/beam_search_decode.rst @@ -68,6 +69,7 @@ fluid.layers layers/cumsum.rst layers/data.rst layers/data_norm.rst + layers/DecodeHelper.rst layers/Decoder.rst layers/deformable_conv.rst layers/deformable_roi_pooling.rst @@ -121,6 +123,7 @@ fluid.layers layers/get_tensor_from_selected_rows.rst layers/greater_equal.rst layers/greater_than.rst + layers/GreedyEmbeddingHelper.rst layers/grid_sampler.rst layers/group_norm.rst layers/gru_unit.rst @@ -138,6 +141,7 @@ fluid.layers layers/image_resize.rst layers/image_resize_short.rst layers/increment.rst + layers/inplace_abn.rst layers/instance_norm.rst layers/inverse_time_decay.rst layers/iou_similarity.rst @@ -178,6 +182,7 @@ fluid.layers layers/mul.rst layers/multi_box_head.rst layers/multiclass_nms.rst + layers/matrix_nms.rst layers/multiplex.rst layers/MultivariateNormalDiag.rst layers/natural_exp_decay.rst @@ -239,6 +244,7 @@ fluid.layers layers/rpn_target_assign.rst layers/rsqrt.rst layers/sampled_softmax_with_cross_entropy.rst + layers/SampleEmbeddingHelper.rst layers/sampling_id.rst layers/scale.rst layers/scatter.rst @@ -304,6 +310,7 @@ fluid.layers layers/tensor_array_to_tensor.rst layers/thresholded_relu.rst layers/topk.rst + layers/TrainingHelper.rst layers/transpose.rst layers/unfold.rst layers/Uniform.rst diff --git a/doc/fluid/api/layers/BasicDecoder.rst b/doc/fluid/api/layers/BasicDecoder.rst new file mode 100644 index 0000000000000000000000000000000000000000..8eb0f78dc8621d42061dfe944106c64007a0d0c1 --- /dev/null +++ b/doc/fluid/api/layers/BasicDecoder.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_layers_BasicDecoder: + +BasicDecoder +------------ + +.. autoclass:: paddle.fluid.layers.BasicDecoder + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/layers/DecodeHelper.rst b/doc/fluid/api/layers/DecodeHelper.rst new file mode 100644 index 0000000000000000000000000000000000000000..ba475f2a1daea0e842d07d0372de2a828de2931a --- /dev/null +++ b/doc/fluid/api/layers/DecodeHelper.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_layers_DecodeHelper: + +DecodeHelper +------------ + +.. autoclass:: paddle.fluid.layers.DecodeHelper + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/layers/GreedyEmbeddingHelper.rst b/doc/fluid/api/layers/GreedyEmbeddingHelper.rst new file mode 100644 index 0000000000000000000000000000000000000000..fb7741ec499fcff9404d384b760c03613c63a327 --- /dev/null +++ b/doc/fluid/api/layers/GreedyEmbeddingHelper.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_layers_GreedyEmbeddingHelper: + +GreedyEmbeddingHelper +--------------------- + +.. autoclass:: paddle.fluid.layers.GreedyEmbeddingHelper + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/layers/SampleEmbeddingHelper.rst b/doc/fluid/api/layers/SampleEmbeddingHelper.rst new file mode 100644 index 0000000000000000000000000000000000000000..99b9ca39900643e5c5c5806106c66a151fdf9a27 --- /dev/null +++ b/doc/fluid/api/layers/SampleEmbeddingHelper.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_layers_SampleEmbeddingHelper: + +SampleEmbeddingHelper +--------------------- + +.. autoclass:: paddle.fluid.layers.SampleEmbeddingHelper + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/layers/TrainingHelper.rst b/doc/fluid/api/layers/TrainingHelper.rst new file mode 100644 index 0000000000000000000000000000000000000000..247ac73d1f15cc9413ec60e0dd4d7d9047e308de --- /dev/null +++ b/doc/fluid/api/layers/TrainingHelper.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_layers_TrainingHelper: + +TrainingHelper +-------------- + +.. autoclass:: paddle.fluid.layers.TrainingHelper + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/layers/inplace_abn.rst b/doc/fluid/api/layers/inplace_abn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b3b31942f37c7cb43a5d95f1d6965acaf08efdca --- /dev/null +++ b/doc/fluid/api/layers/inplace_abn.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_layers_inplace_abn: + +inplace_abn +----------- + +.. autofunction:: paddle.fluid.layers.inplace_abn + :noindex: + diff --git a/doc/fluid/api/layers/matrix_nms.rst b/doc/fluid/api/layers/matrix_nms.rst new file mode 100644 index 0000000000000000000000000000000000000000..60bbbeb151bdd87861c37b625139988ce7db9467 --- /dev/null +++ b/doc/fluid/api/layers/matrix_nms.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_layers_matrix_nms: + +matrix_nms +-------------- + +.. autofunction:: paddle.fluid.layers.matrix_nms + :noindex: + diff --git a/doc/fluid/api/metric.rst b/doc/fluid/api/metric.rst new file mode 100644 index 0000000000000000000000000000000000000000..80dc846d48298d9c735dd242e706b69b3a0b4a44 --- /dev/null +++ b/doc/fluid/api/metric.rst @@ -0,0 +1,20 @@ +======================= +paddle.metric +======================= + +.. toctree:: + :maxdepth: 1 + + metric/Accuracy.rst + metric/accuracy.rst + metric/Auc.rst + metric/auc.rst + metric/chunk_eval.rst + metric/ChunkEvaluator.rst + metric/CompositeMetric.rst + metric/cos_sim.rst + metric/DetectionMAP.rst + metric/EditDistance.rst + metric/mean_iou.rst + metric/Precision.rst + metric/Recall.rst diff --git a/doc/fluid/api/metric/ChunkEvaluator.rst b/doc/fluid/api/metric/ChunkEvaluator.rst new file mode 100644 index 0000000000000000000000000000000000000000..0df999058475a86bf32673b784185bb678063e71 --- /dev/null +++ b/doc/fluid/api/metric/ChunkEvaluator.rst @@ -0,0 +1,7 @@ +.. _api_metric_ChunkEvaluator: + +ChunkEvaluator +------------------------------- +:doc_source: paddle.fluid.metrics.ChunkEvaluator + + diff --git a/doc/fluid/api/metric/CompositeMetric.rst b/doc/fluid/api/metric/CompositeMetric.rst new file mode 100644 index 0000000000000000000000000000000000000000..1a752c376c00d17b526145196ba1ca63f7cd65ad --- /dev/null +++ b/doc/fluid/api/metric/CompositeMetric.rst @@ -0,0 +1,7 @@ +.. _api_metric_CompositeMetric: + +CompositeMetric +------------------------------- +:doc_source: paddle.fluid.metrics.CompositeMetric + + diff --git a/doc/fluid/api/metric/DetectionMAP.rst b/doc/fluid/api/metric/DetectionMAP.rst new file mode 100644 index 0000000000000000000000000000000000000000..a1175150c8785242ad997c4794711fbc945e89a3 --- /dev/null +++ b/doc/fluid/api/metric/DetectionMAP.rst @@ -0,0 +1,7 @@ +.. _api_metric_DetectionMAP: + +DetectionMAP +------------------------------- +:doc_source: paddle.fluid.metrics.DetectionMAP + + diff --git a/doc/fluid/api/metric/EditDistance.rst b/doc/fluid/api/metric/EditDistance.rst new file mode 100644 index 0000000000000000000000000000000000000000..b8902e4e8ebfb51385ae518c5e119aaf07f0afab --- /dev/null +++ b/doc/fluid/api/metric/EditDistance.rst @@ -0,0 +1,7 @@ +.. _api_metric_EditDistance: + +EditDistance +------------------------------- +:doc_source: paddle.fluid.metrics.EditDistance + + diff --git a/doc/fluid/api/metric/Precision.rst b/doc/fluid/api/metric/Precision.rst new file mode 100644 index 0000000000000000000000000000000000000000..fcbb3ccda0f4ecfb0ae068bed8e004b094768435 --- /dev/null +++ b/doc/fluid/api/metric/Precision.rst @@ -0,0 +1,7 @@ +.. _api_metric_Precision: + +Precision +------------------------------- +:doc_source: paddle.fluid.metrics.Precision + + diff --git a/doc/fluid/api/metric/Recall.rst b/doc/fluid/api/metric/Recall.rst new file mode 100644 index 0000000000000000000000000000000000000000..7f28b47824166c3137d035526c7ae3c2d08beaaf --- /dev/null +++ b/doc/fluid/api/metric/Recall.rst @@ -0,0 +1,7 @@ +.. _api_metric_Recall: + +Recall +------------------------------- +:doc_source: paddle.fluid.metrics.Recall + + diff --git a/doc/fluid/api/metric/accuracy.rst b/doc/fluid/api/metric/accuracy.rst new file mode 100644 index 0000000000000000000000000000000000000000..6d2395456b67127d758792c2c9a685c30d369117 --- /dev/null +++ b/doc/fluid/api/metric/accuracy.rst @@ -0,0 +1,7 @@ +.. _api_metric_accuracy: + +accuracy +------------------------------- +:doc_source: paddle.fluid.layers.metric_op.accuracy + + diff --git a/doc/fluid/api/metric/auc.rst b/doc/fluid/api/metric/auc.rst new file mode 100644 index 0000000000000000000000000000000000000000..475751ed33ca4ffe4f908fbde115b7b5444d4990 --- /dev/null +++ b/doc/fluid/api/metric/auc.rst @@ -0,0 +1,7 @@ +.. _api_metric_auc: + +auc +------------------------------- +:doc_source: paddle.fluid.layers.metric_op.auc + + diff --git a/doc/fluid/api/metric/chunk_eval.rst b/doc/fluid/api/metric/chunk_eval.rst new file mode 100644 index 0000000000000000000000000000000000000000..3a8a19c9ac350820e58813e284aee34d9788c385 --- /dev/null +++ b/doc/fluid/api/metric/chunk_eval.rst @@ -0,0 +1,7 @@ +.. _api_metric_chunk_eval: + +chunk_eval +------------------------------- +:doc_source: paddle.fluid.layers.nn.chunk_eval + + diff --git a/doc/fluid/api/metric/cos_sim.rst b/doc/fluid/api/metric/cos_sim.rst new file mode 100644 index 0000000000000000000000000000000000000000..44f9e1b3561f33ce3a0276fe4751602438e36233 --- /dev/null +++ b/doc/fluid/api/metric/cos_sim.rst @@ -0,0 +1,7 @@ +.. _api_metric_cos_sim: + +cos_sim +------------------------------- +:doc_source: paddle.fluid.layers.nn.cos_sim + + diff --git a/doc/fluid/api/metric/mean_iou.rst b/doc/fluid/api/metric/mean_iou.rst new file mode 100644 index 0000000000000000000000000000000000000000..cff10d56ee46bb209f1288b589c4f448de2f4557 --- /dev/null +++ b/doc/fluid/api/metric/mean_iou.rst @@ -0,0 +1,7 @@ +.. _api_metric_mean_iou: + +mean_iou +------------------------------- +:doc_source: paddle.fluid.layers.nn.mean_iou + + diff --git a/doc/fluid/api/nn.rst b/doc/fluid/api/nn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ef50a1e01e4a900edcc8a1eb073144477b08d584 --- /dev/null +++ b/doc/fluid/api/nn.rst @@ -0,0 +1,175 @@ +========= +paddle.nn +========= + +.. toctree:: + :maxdepth: 1 + + nn/activation.rst + nn/adaptive_pool2d.rst + nn/adaptive_pool3d.rst + nn/add_position_encoding.rst + nn/affine_channel.rst + nn/affine_grid.rst + nn/anchor_generator.rst + nn/assign.rst + nn/BatchNorm.rst + nn/beam_search.rst + nn/beam_search_decode.rst + nn/BilinearTensorProduct.rst + nn/bipartite_match.rst + nn/box_clip.rst + nn/box_coder.rst + nn/box_decoder_and_assign.rst + nn/bpr_loss.rst + nn/brelu.rst + nn/case.rst + nn/center_loss.rst + nn/clip.rst + nn/clip_by_norm.rst + nn/collect_fpn_proposals.rst + nn/cond.rst + nn/ConstantPad1d.rst + nn/ConstantPad2d.rst + nn/ConstantPad3d.rst + nn/continuous_value_model.rst + nn/Conv2d.rst + nn/Conv3d.rst + nn/ConvTranspose2d.rst + nn/ConvTranspose3d.rst + nn/cosine_decay.rst + nn/cosine_similarity.rst + nn/CosineSimilarity.rst + nn/cross_entropy.rst + nn/data.rst + nn/deformable_roi_pooling.rst + nn/density_prior_box.rst + nn/detection_output.rst + nn/dice_loss.rst + nn/distribute_fpn_proposals.rst + nn/dropout.rst + nn/edit_distance.rst + nn/elu.rst + nn/Embedding.rst + nn/erf.rst + nn/exponential_decay.rst + nn/filter_by_instag.rst + nn/fsp_matrix.rst + nn/functional.rst + nn/gather_tree.rst + nn/gelu.rst + nn/generate_mask_labels.rst + nn/generate_proposal_labels.rst + nn/generate_proposals.rst + nn/GradientClipByGlobalNorm.rst + nn/GradientClipByNorm.rst + nn/GradientClipByValue.rst + nn/grid_sampler.rst + nn/GroupNorm.rst + nn/hardshrink.rst + nn/hardtanh.rst + nn/hard_sigmoid.rst + nn/hard_swish.rst + nn/hash.rst + nn/hsigmoid.rst + nn/huber_loss.rst + nn/image_resize.rst + nn/image_resize_short.rst + nn/initializer.rst + nn/inverse_time_decay.rst + nn/iou_similarity.rst + nn/kldiv_loss.rst + nn/l2_normalize.rst + nn/label_smooth.rst + nn/Layer.rst + nn/LayerList.rst + nn/LayerNorm.rst + nn/leaky_relu.rst + nn/Linear.rst + nn/linear_lr_warmup.rst + nn/log_loss.rst + nn/log_sigmoid.rst + nn/log_softmax.rst + nn/loss.rst + nn/lrn.rst + nn/margin_rank_loss.rst + nn/matrix_nms.rst + nn/maxout.rst + nn/mse_loss.rst + nn/multiclass_nms.rst + nn/natural_exp_decay.rst + nn/noam_decay.rst + nn/npair_loss.rst + nn/one_hot.rst + nn/pad.rst + nn/pad2d.rst + nn/pad_constant_like.rst + nn/ParameterList.rst + nn/piecewise_decay.rst + nn/pixel_shuffle.rst + nn/polygon_box_transform.rst + nn/polynomial_decay.rst + nn/Pool2D.rst + nn/pool3d.rst + nn/prelu.rst + nn/prior_box.rst + nn/prroi_pool.rst + nn/psroi_pool.rst + nn/random_crop.rst + nn/rank_loss.rst + nn/ReflectionPad1d.rst + nn/ReflectionPad2d.rst + nn/ReLU.rst + nn/relu.rst + nn/relu6.rst + nn/ReplicationPad1d.rst + nn/ReplicationPad2d.rst + nn/ReplicationPad3d.rst + nn/resize_bilinear.rst + nn/resize_nearest.rst + nn/resize_trilinear.rst + nn/retinanet_detection_output.rst + nn/retinanet_target_assign.rst + nn/roi_align.rst + nn/roi_perspective_transform.rst + nn/roi_pool.rst + nn/row_conv.rst + nn/rpn_target_assign.rst + nn/sampled_softmax_with_cross_entropy.rst + nn/selu.rst + nn/Sequential.rst + nn/shuffle_channel.rst + nn/sigmoid_cross_entropy_with_logits.rst + nn/sigmoid_focal_loss.rst + nn/similarity_focus.rst + nn/smooth_l1.rst + nn/soft_relu.rst + nn/softmax.rst + nn/softmax_with_cross_entropy.rst + nn/softplus.rst + nn/softshrink.rst + nn/softsign.rst + nn/space_to_depth.rst + nn/SpectralNorm.rst + nn/square_error_cost.rst + nn/ssd_loss.rst + nn/swish.rst + nn/switch_case.rst + nn/tanhshrink.rst + nn/target_assign.rst + nn/teacher_student_sigmoid_loss.rst + nn/temporal_shift.rst + nn/thresholded_relu.rst + nn/unfold.rst + nn/warpctc.rst + nn/while_loop.rst + nn/yolo_box.rst + nn/yolov3_loss.rst + nn/functional/loss/margin_ranking_loss.rst + nn/functional/activation/sigmoid.rst + nn/layer/loss/MarginRankingLoss.rst + nn/ZeroPad2d.rst + nn/AdaptiveAvgPool2d.rst + nn/AdaptiveAvgPool3d.rst + nn/layer/activation/Sigmoid.rst + nn/Bilinear.rst \ No newline at end of file diff --git a/doc/fluid/api/nn/AdaptiveAvgPool2d.rst b/doc/fluid/api/nn/AdaptiveAvgPool2d.rst new file mode 100644 index 0000000000000000000000000000000000000000..26518d01d0e432d262e4434ced097fbf12e4a117 --- /dev/null +++ b/doc/fluid/api/nn/AdaptiveAvgPool2d.rst @@ -0,0 +1,10 @@ +.. _api_nn_pooling_AdaptiveAvgPool2d: + +AdaptiveAvgPool2d +----------------- + +.. autoclass:: paddle.nn.AdaptiveAvgPool2d + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/nn/AdaptiveAvgPool3d.rst b/doc/fluid/api/nn/AdaptiveAvgPool3d.rst new file mode 100644 index 0000000000000000000000000000000000000000..70f9e87f2b59bfbf7f754e4a615c49fa85fb6b4f --- /dev/null +++ b/doc/fluid/api/nn/AdaptiveAvgPool3d.rst @@ -0,0 +1,10 @@ +.. _api_nn_pooling_AdaptiveAvgPool3d: + +AdaptiveAvgPool3d +----------------- + +.. autoclass:: paddle.nn.AdaptiveAvgPool3d + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/nn/BatchNorm.rst b/doc/fluid/api/nn/BatchNorm.rst new file mode 100644 index 0000000000000000000000000000000000000000..b88ccdfa124f2bb0fab620878c61eb62ba22cbe0 --- /dev/null +++ b/doc/fluid/api/nn/BatchNorm.rst @@ -0,0 +1,7 @@ +.. _api_nn_BatchNorm: + +BatchNorm +------------------------------- +:doc_source: paddle.fluid.dygraph.BatchNorm + + diff --git a/doc/fluid/api/nn/Bilinear.rst b/doc/fluid/api/nn/Bilinear.rst new file mode 100644 index 0000000000000000000000000000000000000000..cbd2847b06f01a779e930c042b614219714b89a7 --- /dev/null +++ b/doc/fluid/api/nn/Bilinear.rst @@ -0,0 +1,8 @@ +.. _api_nn_Bilinear: + +Bilinear +------------------------------- + +.. autofunction:: paddle.nn.Bilinear + :noindex: + diff --git a/doc/fluid/api/nn/BilinearTensorProduct.rst b/doc/fluid/api/nn/BilinearTensorProduct.rst new file mode 100644 index 0000000000000000000000000000000000000000..ca0a1245228168f0f036ab0b7f0aff7819928870 --- /dev/null +++ b/doc/fluid/api/nn/BilinearTensorProduct.rst @@ -0,0 +1,7 @@ +.. _api_nn_BilinearTensorProduct: + +BilinearTensorProduct +------------------------------- +:doc_source: paddle.fluid.dygraph.BilinearTensorProduct + + diff --git a/doc/fluid/api/nn/ConstantPad1d.rst b/doc/fluid/api/nn/ConstantPad1d.rst new file mode 100644 index 0000000000000000000000000000000000000000..c50dceea85368677f9b7655f1e086266afc87746 --- /dev/null +++ b/doc/fluid/api/nn/ConstantPad1d.rst @@ -0,0 +1,7 @@ +.. _api_nn_ConstantPad1d: + +ConstantPad1d +------------------------------- +:doc_source: paddle.nn.ConstantPad1d + + diff --git a/doc/fluid/api/nn/ConstantPad2d.rst b/doc/fluid/api/nn/ConstantPad2d.rst new file mode 100644 index 0000000000000000000000000000000000000000..a1b614c7e5e9f2521cf74f5a9e5641632f9f1e86 --- /dev/null +++ b/doc/fluid/api/nn/ConstantPad2d.rst @@ -0,0 +1,7 @@ +.. _api_nn_ConstantPad2d: + +ConstantPad2d +------------------------------- +:doc_source: paddle.nn.ConstantPad2d + + diff --git a/doc/fluid/api/nn/ConstantPad3d.rst b/doc/fluid/api/nn/ConstantPad3d.rst new file mode 100644 index 0000000000000000000000000000000000000000..65bf1aad222361d749893a73882dde6005803290 --- /dev/null +++ b/doc/fluid/api/nn/ConstantPad3d.rst @@ -0,0 +1,7 @@ +.. _api_nn_ConstantPad3d: + +ConstantPad3d +------------------------------- +:doc_source: paddle.nn.ConstantPad3d + + diff --git a/doc/fluid/api/nn/Conv2d.rst b/doc/fluid/api/nn/Conv2d.rst new file mode 100644 index 0000000000000000000000000000000000000000..239d4440f1463028830fe7678a04fe909d4392ce --- /dev/null +++ b/doc/fluid/api/nn/Conv2d.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_layer_conv_Conv2d: + +Conv2d +------ + +.. autoclass:: paddle.nn.layer.conv.Conv2d + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/nn/Conv3d.rst b/doc/fluid/api/nn/Conv3d.rst new file mode 100644 index 0000000000000000000000000000000000000000..23a878b43d5ea9fbc0d8ca1e0f9a6cad806782db --- /dev/null +++ b/doc/fluid/api/nn/Conv3d.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_layer_conv_Conv3d: + +Conv3d +------ + +.. autoclass:: paddle.nn.layer.conv.Conv3d + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/nn/ConvTranspose2d.rst b/doc/fluid/api/nn/ConvTranspose2d.rst new file mode 100644 index 0000000000000000000000000000000000000000..0c1198338dea65925b1097cde244e9b22b3d618c --- /dev/null +++ b/doc/fluid/api/nn/ConvTranspose2d.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_layer_conv_ConvTranspose2d: + +ConvTranspose2d +--------------- + +.. autoclass:: paddle.nn.layer.conv.ConvTranspose2d + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/nn/ConvTranspose3d.rst b/doc/fluid/api/nn/ConvTranspose3d.rst new file mode 100644 index 0000000000000000000000000000000000000000..5d0345a4f3498ced130156c0380393df6c3fb85d --- /dev/null +++ b/doc/fluid/api/nn/ConvTranspose3d.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_layer_conv_ConvTranspose3d: + +ConvTranspose3d +--------------- + +.. autoclass:: paddle.nn.layer.conv.ConvTranspose3d + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/nn/CosineSimilarity.rst b/doc/fluid/api/nn/CosineSimilarity.rst new file mode 100644 index 0000000000000000000000000000000000000000..36e235f0752ed84dce258dae4e3248e1173cad8a --- /dev/null +++ b/doc/fluid/api/nn/CosineSimilarity.rst @@ -0,0 +1,7 @@ +.. _api_nn_CosineSimilarity: + +CosineSimilarity +------------------------------- +:doc_source: paddle.nn.CosineSimilarity + + diff --git a/doc/fluid/api/nn/Embedding.rst b/doc/fluid/api/nn/Embedding.rst new file mode 100644 index 0000000000000000000000000000000000000000..b9ec790b5deb9232eac98d57896b7f730ab297da --- /dev/null +++ b/doc/fluid/api/nn/Embedding.rst @@ -0,0 +1,7 @@ +.. _api_nn_Embedding: + +Embedding +------------------------------- +:doc_source: paddle.fluid.dygraph.Embedding + + diff --git a/doc/fluid/api/nn/GradientClipByGlobalNorm.rst b/doc/fluid/api/nn/GradientClipByGlobalNorm.rst new file mode 100644 index 0000000000000000000000000000000000000000..2c414d9e4135f8e1f26e886935acea806eb43e3a --- /dev/null +++ b/doc/fluid/api/nn/GradientClipByGlobalNorm.rst @@ -0,0 +1,7 @@ +.. _api_nn_GradientClipByGlobalNorm: + +GradientClipByGlobalNorm +------------------------------- +:doc_source: paddle.fluid.clip.GradientClipByGlobalNorm + + diff --git a/doc/fluid/api/nn/GradientClipByNorm.rst b/doc/fluid/api/nn/GradientClipByNorm.rst new file mode 100644 index 0000000000000000000000000000000000000000..cf514e3e3c651815b3709ce937d66d77cef730f0 --- /dev/null +++ b/doc/fluid/api/nn/GradientClipByNorm.rst @@ -0,0 +1,7 @@ +.. _api_nn_GradientClipByNorm: + +GradientClipByNorm +------------------------------- +:doc_source: paddle.fluid.clip.GradientClipByNorm + + diff --git a/doc/fluid/api/nn/GradientClipByValue.rst b/doc/fluid/api/nn/GradientClipByValue.rst new file mode 100644 index 0000000000000000000000000000000000000000..52d86f6d05072ef49e0e682fee32c0193fdaada7 --- /dev/null +++ b/doc/fluid/api/nn/GradientClipByValue.rst @@ -0,0 +1,7 @@ +.. _api_nn_GradientClipByValue: + +GradientClipByValue +------------------------------- +:doc_source: paddle.fluid.clip.GradientClipByValue + + diff --git a/doc/fluid/api/nn/GroupNorm.rst b/doc/fluid/api/nn/GroupNorm.rst new file mode 100644 index 0000000000000000000000000000000000000000..0c87d6b16b9d0b02fe0833198599fa6024f8b0a9 --- /dev/null +++ b/doc/fluid/api/nn/GroupNorm.rst @@ -0,0 +1,7 @@ +.. _api_nn_GroupNorm: + +GroupNorm +------------------------------- +:doc_source: paddle.fluid.dygraph.GroupNorm + + diff --git a/doc/fluid/api/nn/Layer.rst b/doc/fluid/api/nn/Layer.rst new file mode 100644 index 0000000000000000000000000000000000000000..326cbc4098aaed53991e5cb5851090d5d9e5c041 --- /dev/null +++ b/doc/fluid/api/nn/Layer.rst @@ -0,0 +1,7 @@ +.. _api_nn_Layer: + +Layer +------------------------------- +:doc_source: paddle.fluid.dygraph.layers.Layer + + diff --git a/doc/fluid/api/nn/LayerList.rst b/doc/fluid/api/nn/LayerList.rst new file mode 100644 index 0000000000000000000000000000000000000000..1589900fd7c9c0cf7e2dedf622be3d3eac7722b1 --- /dev/null +++ b/doc/fluid/api/nn/LayerList.rst @@ -0,0 +1,7 @@ +.. _api_nn_LayerList: + +LayerList +------------------------------- +:doc_source: paddle.fluid.dygraph.container.LayerList + + diff --git a/doc/fluid/api/nn/LayerNorm.rst b/doc/fluid/api/nn/LayerNorm.rst new file mode 100644 index 0000000000000000000000000000000000000000..162d53ad754cbaaa4c96430309fc30f1c6820fdc --- /dev/null +++ b/doc/fluid/api/nn/LayerNorm.rst @@ -0,0 +1,7 @@ +.. _api_nn_LayerNorm: + +LayerNorm +------------------------------- +:doc_source: paddle.fluid.dygraph.LayerNorm + + diff --git a/doc/fluid/api/nn/Linear.rst b/doc/fluid/api/nn/Linear.rst new file mode 100644 index 0000000000000000000000000000000000000000..239d4d2d22d4558aef5cb1fa7fb993e156871596 --- /dev/null +++ b/doc/fluid/api/nn/Linear.rst @@ -0,0 +1,7 @@ +.. _api_nn_Linear: + +Linear +------------------------------- +:doc_source: paddle.fluid.dygraph.Linear + + diff --git a/doc/fluid/api/nn/ParameterList.rst b/doc/fluid/api/nn/ParameterList.rst new file mode 100644 index 0000000000000000000000000000000000000000..b278270dc5b4bb8fcf7248a3c58e5210ac26e219 --- /dev/null +++ b/doc/fluid/api/nn/ParameterList.rst @@ -0,0 +1,7 @@ +.. _api_nn_ParameterList: + +ParameterList +------------------------------- +:doc_source: paddle.fluid.dygraph.container.ParameterList + + diff --git a/doc/fluid/api/nn/Pool2D.rst b/doc/fluid/api/nn/Pool2D.rst new file mode 100644 index 0000000000000000000000000000000000000000..e34535daab0f95181dea2841a9ffb7c5732b7a50 --- /dev/null +++ b/doc/fluid/api/nn/Pool2D.rst @@ -0,0 +1,7 @@ +.. _api_nn_Pool2D: + +Pool2D +------------------------------- +:doc_source: paddle.fluid.dygraph.Pool2D + + diff --git a/doc/fluid/api/nn/ReLU.rst b/doc/fluid/api/nn/ReLU.rst new file mode 100644 index 0000000000000000000000000000000000000000..b186a2b174d77ad0fc635d169fa065b659358c99 --- /dev/null +++ b/doc/fluid/api/nn/ReLU.rst @@ -0,0 +1,7 @@ +.. _api_nn_ReLU: + +ReLU +------------------------------- +:doc_source: paddle.fluid.layers.relu + + diff --git a/doc/fluid/api/nn/ReflectionPad1d.rst b/doc/fluid/api/nn/ReflectionPad1d.rst new file mode 100644 index 0000000000000000000000000000000000000000..48998894854e59228aa26de7ec1bdbb8c3e98ec4 --- /dev/null +++ b/doc/fluid/api/nn/ReflectionPad1d.rst @@ -0,0 +1,7 @@ +.. _api_nn_ReflectionPad1d: + +ReflectionPad1d +------------------------------- +:doc_source: paddle.nn.ReflectionPad1d + + diff --git a/doc/fluid/api/nn/ReflectionPad2d.rst b/doc/fluid/api/nn/ReflectionPad2d.rst new file mode 100644 index 0000000000000000000000000000000000000000..50ade0a1acd71415d99839addbf85f271a4dc854 --- /dev/null +++ b/doc/fluid/api/nn/ReflectionPad2d.rst @@ -0,0 +1,7 @@ +.. _api_nn_ReflectionPad2d: + +ReflectionPad2d +------------------------------- +:doc_source: paddle.nn.ReflectionPad2d + + diff --git a/doc/fluid/api/nn/ReplicationPad1d.rst b/doc/fluid/api/nn/ReplicationPad1d.rst new file mode 100644 index 0000000000000000000000000000000000000000..da4c6217ac8a94d30b1e677732d27847bacd86cd --- /dev/null +++ b/doc/fluid/api/nn/ReplicationPad1d.rst @@ -0,0 +1,7 @@ +.. _api_nn_ReplicationPad1d: + +ReplicationPad1d +------------------------------- +:doc_source: paddle.nn.ReplicationPad1d + + diff --git a/doc/fluid/api/nn/ReplicationPad2d.rst b/doc/fluid/api/nn/ReplicationPad2d.rst new file mode 100644 index 0000000000000000000000000000000000000000..b388ba3b97290b8bccdbcf49621ae16170909694 --- /dev/null +++ b/doc/fluid/api/nn/ReplicationPad2d.rst @@ -0,0 +1,7 @@ +.. _api_nn_ReplicationPad2d: + +ReplicationPad2d +------------------------------- +:doc_source: paddle.nn.ReplicationPad2d + + diff --git a/doc/fluid/api/nn/ReplicationPad3d.rst b/doc/fluid/api/nn/ReplicationPad3d.rst new file mode 100644 index 0000000000000000000000000000000000000000..9faba1ccda736bb2b8593e5a84593b71a88f365b --- /dev/null +++ b/doc/fluid/api/nn/ReplicationPad3d.rst @@ -0,0 +1,7 @@ +.. _api_nn_ReplicationPad3d: + +ReplicationPad3d +------------------------------- +:doc_source: paddle.nn.ReplicationPad3d + + diff --git a/doc/fluid/api/nn/Sequential.rst b/doc/fluid/api/nn/Sequential.rst new file mode 100644 index 0000000000000000000000000000000000000000..7f12fe92b7898310608fa2b3443d975eb59fd001 --- /dev/null +++ b/doc/fluid/api/nn/Sequential.rst @@ -0,0 +1,7 @@ +.. _api_nn_Sequential: + +Sequential +------------------------------- +:doc_source: paddle.fluid.dygraph.container.Sequential + + diff --git a/doc/fluid/api/nn/SpectralNorm.rst b/doc/fluid/api/nn/SpectralNorm.rst new file mode 100644 index 0000000000000000000000000000000000000000..aff8682c35ae6b0f57bc7dff861bb21f7881fd64 --- /dev/null +++ b/doc/fluid/api/nn/SpectralNorm.rst @@ -0,0 +1,7 @@ +.. _api_nn_SpectralNorm: + +SpectralNorm +------------------------------- +:doc_source: paddle.fluid.dygraph.SpectralNorm + + diff --git a/doc/fluid/api/nn/ZeroPad2d.rst b/doc/fluid/api/nn/ZeroPad2d.rst new file mode 100644 index 0000000000000000000000000000000000000000..1b71c19f98021f7cdefa4ff44425434bbbd55252 --- /dev/null +++ b/doc/fluid/api/nn/ZeroPad2d.rst @@ -0,0 +1,7 @@ +.. _api_nn_ZeroPad2d: + +ZeroPad2d +------------------------------- +:doc_source: paddle.nn.ZeroPad2d + + diff --git a/doc/fluid/api/nn/activation.rst b/doc/fluid/api/nn/activation.rst new file mode 100644 index 0000000000000000000000000000000000000000..5e1d1be87c05e0c533311573cec81ca980f88b92 --- /dev/null +++ b/doc/fluid/api/nn/activation.rst @@ -0,0 +1,22 @@ +========== +activation +========== + +.. toctree:: + :maxdepth: 1 + + activation/ELU.rst + activation/GELU.rst + activation/Hardshrink.rst + activation/Tanh.rst + activation/Hardtanh.rst + activation/LogSigmoid.rst + activation/PReLU.rst + activation/ReLU.rst + activation/ReLU6.rst + activation/SELU.rst + activation/Softmax.rst + activation/Softplus.rst + activation/Softshrink.rst + activation/Softsign.rst + activation/Tanhshrink.rst diff --git a/doc/fluid/api/nn/activation/ELU.rst b/doc/fluid/api/nn/activation/ELU.rst new file mode 100644 index 0000000000000000000000000000000000000000..d98897b3706e5dc2796989c68c4b86adcdf5fa31 --- /dev/null +++ b/doc/fluid/api/nn/activation/ELU.rst @@ -0,0 +1,7 @@ +.. _api_nn_activation_ELU: + +ELU +------------------------------- + +.. autoclass:: paddle.nn.ELU + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/nn/activation/GELU.rst b/doc/fluid/api/nn/activation/GELU.rst new file mode 100644 index 0000000000000000000000000000000000000000..3aa80fa5b5b45af194ec0c71e4a537a4e9cb0bec --- /dev/null +++ b/doc/fluid/api/nn/activation/GELU.rst @@ -0,0 +1,7 @@ +.. _api_nn_activation_GELU: + +GELU +------------------------------- + +.. autoclass:: paddle.nn.GELU + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/nn/activation/Hardshrink.rst b/doc/fluid/api/nn/activation/Hardshrink.rst new file mode 100644 index 0000000000000000000000000000000000000000..552e6a2a9883ed37f55544ed0f148920bd08f46a --- /dev/null +++ b/doc/fluid/api/nn/activation/Hardshrink.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_activation_Hardshrink: + +Hardshrink +--------- + +.. autoclass:: paddle.nn.activation.Hardshrink + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/nn/activation/Hardtanh.rst b/doc/fluid/api/nn/activation/Hardtanh.rst new file mode 100644 index 0000000000000000000000000000000000000000..5509d334ae5a21f5af8bbd7f53c0bce46453a120 --- /dev/null +++ b/doc/fluid/api/nn/activation/Hardtanh.rst @@ -0,0 +1,7 @@ +.. _api_nn_activation_Hardtanh: + +Hardtanh +------------------------------- + +.. autoclass:: paddle.nn.Hardtanh + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/nn/activation/LogSigmoid.rst b/doc/fluid/api/nn/activation/LogSigmoid.rst new file mode 100644 index 0000000000000000000000000000000000000000..0407712d267bb29f215714ca77782ae6dce1eed9 --- /dev/null +++ b/doc/fluid/api/nn/activation/LogSigmoid.rst @@ -0,0 +1,7 @@ +.. _api_nn_activation_LogSigmoid: + +LogSigmoid +------------------------------- + +.. autoclass:: paddle.nn.LogSigmoid + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/nn/activation/PReLU.rst b/doc/fluid/api/nn/activation/PReLU.rst new file mode 100644 index 0000000000000000000000000000000000000000..bb4c2c3f0055c805e516816036de1571989c5d2c --- /dev/null +++ b/doc/fluid/api/nn/activation/PReLU.rst @@ -0,0 +1,7 @@ +.. _api_nn_activation_PReLU: + +PReLU +------------------------------- + +.. autoclass:: paddle.nn.PReLU + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/nn/activation/ReLU.rst b/doc/fluid/api/nn/activation/ReLU.rst new file mode 100644 index 0000000000000000000000000000000000000000..32a742dac0a9c1cda06128c2d5985448d0c7ab47 --- /dev/null +++ b/doc/fluid/api/nn/activation/ReLU.rst @@ -0,0 +1,7 @@ +.. _api_nn_activation_ReLU: + +ReLU +------------------------------- + +.. autoclass:: paddle.nn.ReLU + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/nn/activation/ReLU6.rst b/doc/fluid/api/nn/activation/ReLU6.rst new file mode 100644 index 0000000000000000000000000000000000000000..8c0f9dfdd6dcc42eb5a0e4a39d1ad7e3026444b0 --- /dev/null +++ b/doc/fluid/api/nn/activation/ReLU6.rst @@ -0,0 +1,8 @@ +.. _api_nn_activation_ReLU6: + +ReLU6 +--------- + +.. autoclass:: paddle.nn.ReLU6 + :noindex: + diff --git a/doc/fluid/api/nn/activation/SELU.rst b/doc/fluid/api/nn/activation/SELU.rst new file mode 100644 index 0000000000000000000000000000000000000000..1817d2f7f01233390eca8bf91315ff09e068e6bd --- /dev/null +++ b/doc/fluid/api/nn/activation/SELU.rst @@ -0,0 +1,8 @@ +.. _api_nn_activation_SELU: + +SELU +--------- + +.. autoclass:: paddle.nn.SELU + :noindex: + diff --git a/doc/fluid/api/nn/activation/Softmax.rst b/doc/fluid/api/nn/activation/Softmax.rst new file mode 100644 index 0000000000000000000000000000000000000000..a39a3161092cba5a774a5bcb9817e42049d5e039 --- /dev/null +++ b/doc/fluid/api/nn/activation/Softmax.rst @@ -0,0 +1,7 @@ +.. _api_nn_activation_Softmax: + +Softmax +------------------------------- + +.. autoclass:: paddle.nn.Softmax + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/nn/activation/Softplus.rst b/doc/fluid/api/nn/activation/Softplus.rst new file mode 100644 index 0000000000000000000000000000000000000000..3d914222a4b6ac4996d867ac1bfa06cf6fdd4d06 --- /dev/null +++ b/doc/fluid/api/nn/activation/Softplus.rst @@ -0,0 +1,8 @@ +.. _api_nn_activation_Softplus: + +Softplus +--------- + +.. autoclass:: paddle.nn.Softplus + :noindex: + diff --git a/doc/fluid/api/nn/activation/Softshrink.rst b/doc/fluid/api/nn/activation/Softshrink.rst new file mode 100644 index 0000000000000000000000000000000000000000..0c332c8fe505563bda387aec3020691e5fe8525a --- /dev/null +++ b/doc/fluid/api/nn/activation/Softshrink.rst @@ -0,0 +1,8 @@ +.. _api_nn_activation_Softshrink: + +Softshrink +---------- + +.. autoclass:: paddle.nn.Softshrink + :noindex: + diff --git a/doc/fluid/api/nn/activation/Softsign.rst b/doc/fluid/api/nn/activation/Softsign.rst new file mode 100644 index 0000000000000000000000000000000000000000..9bedacb898e320ebd8986004330fceda2af55479 --- /dev/null +++ b/doc/fluid/api/nn/activation/Softsign.rst @@ -0,0 +1,8 @@ +.. _api_nn_activation_Softsign: + +Softsign +--------- + +.. autoclass:: paddle.nn.Softsign + :noindex: + diff --git a/doc/fluid/api/nn/activation/Tanh.rst b/doc/fluid/api/nn/activation/Tanh.rst new file mode 100644 index 0000000000000000000000000000000000000000..80cb491331f1f6bbe8afec91c444709c196ce131 --- /dev/null +++ b/doc/fluid/api/nn/activation/Tanh.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_activation_Tanh: + +Tanh +--------- + +.. autoclass:: paddle.nn.layer.activation.Tanh + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/nn/activation/Tanhshrink.rst b/doc/fluid/api/nn/activation/Tanhshrink.rst new file mode 100644 index 0000000000000000000000000000000000000000..8715fa2726342bcb988a2a442240b7a2673abbc3 --- /dev/null +++ b/doc/fluid/api/nn/activation/Tanhshrink.rst @@ -0,0 +1,8 @@ +.. _api_nn_activation_Tanhshrink: + +Tanhshrink +---------- + +.. autoclass:: paddle.nn.Tanhshrink + :noindex: + diff --git a/doc/fluid/api/nn/adaptive_pool2d.rst b/doc/fluid/api/nn/adaptive_pool2d.rst new file mode 100644 index 0000000000000000000000000000000000000000..dae2332c76d95c9788208dfbc6ea5ee6ae14ad43 --- /dev/null +++ b/doc/fluid/api/nn/adaptive_pool2d.rst @@ -0,0 +1,7 @@ +.. _api_nn_adaptive_pool2d: + +adaptive_pool2d +------------------------------- +:doc_source: paddle.fluid.layers.adaptive_pool2d + + diff --git a/doc/fluid/api/nn/adaptive_pool3d.rst b/doc/fluid/api/nn/adaptive_pool3d.rst new file mode 100644 index 0000000000000000000000000000000000000000..8b19cca895edd712090eeb53d220c328bd6545bd --- /dev/null +++ b/doc/fluid/api/nn/adaptive_pool3d.rst @@ -0,0 +1,7 @@ +.. _api_nn_adaptive_pool3d: + +adaptive_pool3d +------------------------------- +:doc_source: paddle.fluid.layers.adaptive_pool3d + + diff --git a/doc/fluid/api/nn/add_position_encoding.rst b/doc/fluid/api/nn/add_position_encoding.rst new file mode 100644 index 0000000000000000000000000000000000000000..7f07f2ba62e729502edd54f526d516564259bdc6 --- /dev/null +++ b/doc/fluid/api/nn/add_position_encoding.rst @@ -0,0 +1,7 @@ +.. _api_nn_add_position_encoding: + +add_position_encoding +------------------------------- +:doc_source: paddle.fluid.layers.add_position_encoding + + diff --git a/doc/fluid/api/nn/affine_channel.rst b/doc/fluid/api/nn/affine_channel.rst new file mode 100644 index 0000000000000000000000000000000000000000..0a54a9e083330daf815f3592889b492e568da34f --- /dev/null +++ b/doc/fluid/api/nn/affine_channel.rst @@ -0,0 +1,7 @@ +.. _api_nn_affine_channel: + +affine_channel +------------------------------- +:doc_source: paddle.fluid.layers.affine_channel + + diff --git a/doc/fluid/api/nn/affine_grid.rst b/doc/fluid/api/nn/affine_grid.rst new file mode 100644 index 0000000000000000000000000000000000000000..cc7cea269e63d05dd24cd3089c473b580211b904 --- /dev/null +++ b/doc/fluid/api/nn/affine_grid.rst @@ -0,0 +1,7 @@ +.. _api_nn_affine_grid: + +affine_grid +------------------------------- +:doc_source: paddle.fluid.layers.affine_grid + + diff --git a/doc/fluid/api/nn/anchor_generator.rst b/doc/fluid/api/nn/anchor_generator.rst new file mode 100644 index 0000000000000000000000000000000000000000..7bec2a4776765c888dd8ecdaab221c61308fd894 --- /dev/null +++ b/doc/fluid/api/nn/anchor_generator.rst @@ -0,0 +1,7 @@ +.. _api_nn_anchor_generator: + +anchor_generator +------------------------------- +:doc_source: paddle.fluid.layers.anchor_generator + + diff --git a/doc/fluid/api/nn/assign.rst b/doc/fluid/api/nn/assign.rst new file mode 100644 index 0000000000000000000000000000000000000000..946b6b807def0e8b49f7d903c52be0badba09235 --- /dev/null +++ b/doc/fluid/api/nn/assign.rst @@ -0,0 +1,7 @@ +.. _api_nn_assign: + +assign +------------------------------- +:doc_source: paddle.fluid.layers.assign + + diff --git a/doc/fluid/api/nn/beam_search.rst b/doc/fluid/api/nn/beam_search.rst new file mode 100644 index 0000000000000000000000000000000000000000..67db0c4dba18512e4b315de41637185f55210240 --- /dev/null +++ b/doc/fluid/api/nn/beam_search.rst @@ -0,0 +1,7 @@ +.. _api_nn_beam_search: + +beam_search +------------------------------- +:doc_source: paddle.fluid.layers.beam_search + + diff --git a/doc/fluid/api/nn/beam_search_decode.rst b/doc/fluid/api/nn/beam_search_decode.rst new file mode 100644 index 0000000000000000000000000000000000000000..dc1cb52203bc8c7f57127f3e12bbdb9d481810fa --- /dev/null +++ b/doc/fluid/api/nn/beam_search_decode.rst @@ -0,0 +1,7 @@ +.. _api_nn_beam_search_decode: + +beam_search_decode +------------------------------- +:doc_source: paddle.fluid.layers.beam_search_decode + + diff --git a/doc/fluid/api/nn/bipartite_match.rst b/doc/fluid/api/nn/bipartite_match.rst new file mode 100644 index 0000000000000000000000000000000000000000..cf48e0589418f80c0ea25cbd68808464697d8572 --- /dev/null +++ b/doc/fluid/api/nn/bipartite_match.rst @@ -0,0 +1,7 @@ +.. _api_nn_bipartite_match: + +bipartite_match +------------------------------- +:doc_source: paddle.fluid.layers.bipartite_match + + diff --git a/doc/fluid/api/nn/box_clip.rst b/doc/fluid/api/nn/box_clip.rst new file mode 100644 index 0000000000000000000000000000000000000000..23a86b82bfb03fba37f29f16daf96a118b780a79 --- /dev/null +++ b/doc/fluid/api/nn/box_clip.rst @@ -0,0 +1,7 @@ +.. _api_nn_box_clip: + +box_clip +------------------------------- +:doc_source: paddle.fluid.layers.box_clip + + diff --git a/doc/fluid/api/nn/box_coder.rst b/doc/fluid/api/nn/box_coder.rst new file mode 100644 index 0000000000000000000000000000000000000000..8500acde7cbb63947cfad7d8ef01c3c6a75852b7 --- /dev/null +++ b/doc/fluid/api/nn/box_coder.rst @@ -0,0 +1,7 @@ +.. _api_nn_box_coder: + +box_coder +------------------------------- +:doc_source: paddle.fluid.layers.box_coder + + diff --git a/doc/fluid/api/nn/box_decoder_and_assign.rst b/doc/fluid/api/nn/box_decoder_and_assign.rst new file mode 100644 index 0000000000000000000000000000000000000000..66dcc3223c03f3b7bc858e4228935e703fa2caeb --- /dev/null +++ b/doc/fluid/api/nn/box_decoder_and_assign.rst @@ -0,0 +1,7 @@ +.. _api_nn_box_decoder_and_assign: + +box_decoder_and_assign +------------------------------- +:doc_source: paddle.fluid.layers.box_decoder_and_assign + + diff --git a/doc/fluid/api/nn/bpr_loss.rst b/doc/fluid/api/nn/bpr_loss.rst new file mode 100644 index 0000000000000000000000000000000000000000..5d47ce597af9e8be264ebdc8dfb099df45df54f5 --- /dev/null +++ b/doc/fluid/api/nn/bpr_loss.rst @@ -0,0 +1,7 @@ +.. _api_nn_bpr_loss: + +bpr_loss +------------------------------- +:doc_source: paddle.fluid.layers.bpr_loss + + diff --git a/doc/fluid/api/nn/brelu.rst b/doc/fluid/api/nn/brelu.rst new file mode 100644 index 0000000000000000000000000000000000000000..093d2cb408cfebffb13f3f2f1efb3a589e7e23ba --- /dev/null +++ b/doc/fluid/api/nn/brelu.rst @@ -0,0 +1,7 @@ +.. _api_nn_brelu: + +brelu +------------------------------- +:doc_source: paddle.fluid.layers.brelu + + diff --git a/doc/fluid/api/nn/case.rst b/doc/fluid/api/nn/case.rst new file mode 100644 index 0000000000000000000000000000000000000000..51e8a02b68bba61f7e79f655f75e478fbcaff792 --- /dev/null +++ b/doc/fluid/api/nn/case.rst @@ -0,0 +1,7 @@ +.. _api_nn_case: + +case +------------------------------- +:doc_source: paddle.fluid.layers.case + + diff --git a/doc/fluid/api/nn/center_loss.rst b/doc/fluid/api/nn/center_loss.rst new file mode 100644 index 0000000000000000000000000000000000000000..315cadcfa825ee984321f394725e05c515b7a5f9 --- /dev/null +++ b/doc/fluid/api/nn/center_loss.rst @@ -0,0 +1,7 @@ +.. _api_nn_center_loss: + +center_loss +------------------------------- +:doc_source: paddle.fluid.layers.center_loss + + diff --git a/doc/fluid/api/nn/clip.rst b/doc/fluid/api/nn/clip.rst new file mode 100644 index 0000000000000000000000000000000000000000..c4f53ffd8ef9f7a72277004f485a036f20ee9893 --- /dev/null +++ b/doc/fluid/api/nn/clip.rst @@ -0,0 +1,7 @@ +.. _api_nn_clip: + +clip +------------------------------- +:doc_source: paddle.fluid.layers.clip + + diff --git a/doc/fluid/api/nn/clip_by_norm.rst b/doc/fluid/api/nn/clip_by_norm.rst new file mode 100644 index 0000000000000000000000000000000000000000..b6ea08ae79320cc30b05c96df0beb7a089fd487d --- /dev/null +++ b/doc/fluid/api/nn/clip_by_norm.rst @@ -0,0 +1,7 @@ +.. _api_nn_clip_by_norm: + +clip_by_norm +------------------------------- +:doc_source: paddle.fluid.layers.clip_by_norm + + diff --git a/doc/fluid/api/nn/collect_fpn_proposals.rst b/doc/fluid/api/nn/collect_fpn_proposals.rst new file mode 100644 index 0000000000000000000000000000000000000000..ed338114695b413c346a613d8da75e6f126a0ed4 --- /dev/null +++ b/doc/fluid/api/nn/collect_fpn_proposals.rst @@ -0,0 +1,7 @@ +.. _api_nn_collect_fpn_proposals: + +collect_fpn_proposals +------------------------------- +:doc_source: paddle.fluid.layers.collect_fpn_proposals + + diff --git a/doc/fluid/api/nn/cond.rst b/doc/fluid/api/nn/cond.rst new file mode 100644 index 0000000000000000000000000000000000000000..3abd98e61cee38074152cedf6b14ae4ab6462943 --- /dev/null +++ b/doc/fluid/api/nn/cond.rst @@ -0,0 +1,7 @@ +.. _api_nn_cond: + +cond +------------------------------- +:doc_source: paddle.fluid.layers.cond + + diff --git a/doc/fluid/api/nn/continuous_value_model.rst b/doc/fluid/api/nn/continuous_value_model.rst new file mode 100644 index 0000000000000000000000000000000000000000..95e3694575659092ae0864b42ca21e19b39c15b3 --- /dev/null +++ b/doc/fluid/api/nn/continuous_value_model.rst @@ -0,0 +1,7 @@ +.. _api_nn_continuous_value_model: + +continuous_value_model +------------------------------- +:doc_source: paddle.fluid.layers.continuous_value_model + + diff --git a/doc/fluid/api/nn/cosine_decay.rst b/doc/fluid/api/nn/cosine_decay.rst new file mode 100644 index 0000000000000000000000000000000000000000..0592799d43c6d883085c0fa7c67ffe29446e4253 --- /dev/null +++ b/doc/fluid/api/nn/cosine_decay.rst @@ -0,0 +1,7 @@ +.. _api_nn_cosine_decay: + +cosine_decay +------------------------------- +:doc_source: paddle.fluid.layers.cosine_decay + + diff --git a/doc/fluid/api/nn/cosine_similarity.rst b/doc/fluid/api/nn/cosine_similarity.rst new file mode 100644 index 0000000000000000000000000000000000000000..d568494ff0536262c65a4ef3f8e80aad168d1819 --- /dev/null +++ b/doc/fluid/api/nn/cosine_similarity.rst @@ -0,0 +1,7 @@ +.. _api_nn_cosine_similarity: + +cosine_similarity +------------------------------- +:doc_source: paddle.nn.functional.cosine_similarity + + diff --git a/doc/fluid/api/nn/cross_entropy.rst b/doc/fluid/api/nn/cross_entropy.rst new file mode 100644 index 0000000000000000000000000000000000000000..68aeb05528fa22bd986b586a77a3d11aed324eac --- /dev/null +++ b/doc/fluid/api/nn/cross_entropy.rst @@ -0,0 +1,7 @@ +.. _api_nn_cross_entropy: + +cross_entropy +------------------------------- +:doc_source: paddle.fluid.layers.cross_entropy + + diff --git a/doc/fluid/api/nn/data.rst b/doc/fluid/api/nn/data.rst new file mode 100644 index 0000000000000000000000000000000000000000..b4cb9b1d0037da6e6cf65b00995f01ab11e29306 --- /dev/null +++ b/doc/fluid/api/nn/data.rst @@ -0,0 +1,7 @@ +.. _api_nn_data: + +data +------------------------------- +:doc_source: paddle.fluid.data + + diff --git a/doc/fluid/api/nn/deformable_roi_pooling.rst b/doc/fluid/api/nn/deformable_roi_pooling.rst new file mode 100644 index 0000000000000000000000000000000000000000..eb675c94570eb5bb0ad5c58e63c4769e68c6bdcc --- /dev/null +++ b/doc/fluid/api/nn/deformable_roi_pooling.rst @@ -0,0 +1,7 @@ +.. _api_nn_deformable_roi_pooling: + +deformable_roi_pooling +------------------------------- +:doc_source: paddle.fluid.layers.deformable_roi_pooling + + diff --git a/doc/fluid/api/nn/density_prior_box.rst b/doc/fluid/api/nn/density_prior_box.rst new file mode 100644 index 0000000000000000000000000000000000000000..008db64af8dd4f0a56c34a7c0ba8ccd08d15e546 --- /dev/null +++ b/doc/fluid/api/nn/density_prior_box.rst @@ -0,0 +1,7 @@ +.. _api_nn_density_prior_box: + +density_prior_box +------------------------------- +:doc_source: paddle.fluid.layers.density_prior_box + + diff --git a/doc/fluid/api/nn/detection_output.rst b/doc/fluid/api/nn/detection_output.rst new file mode 100644 index 0000000000000000000000000000000000000000..478a451d6b70668dcc03ebaea8ed86150d0a95ad --- /dev/null +++ b/doc/fluid/api/nn/detection_output.rst @@ -0,0 +1,7 @@ +.. _api_nn_detection_output: + +detection_output +------------------------------- +:doc_source: paddle.fluid.layers.detection_output + + diff --git a/doc/fluid/api/nn/dice_loss.rst b/doc/fluid/api/nn/dice_loss.rst new file mode 100644 index 0000000000000000000000000000000000000000..f7ac57ebea346f99abfd659b4028aba1d53db2a6 --- /dev/null +++ b/doc/fluid/api/nn/dice_loss.rst @@ -0,0 +1,7 @@ +.. _api_nn_dice_loss: + +dice_loss +------------------------------- +:doc_source: paddle.fluid.layers.dice_loss + + diff --git a/doc/fluid/api/nn/distribute_fpn_proposals.rst b/doc/fluid/api/nn/distribute_fpn_proposals.rst new file mode 100644 index 0000000000000000000000000000000000000000..1a40b2e71db3acccb899731a30fd8cbd32c71332 --- /dev/null +++ b/doc/fluid/api/nn/distribute_fpn_proposals.rst @@ -0,0 +1,7 @@ +.. _api_nn_distribute_fpn_proposals: + +distribute_fpn_proposals +------------------------------- +:doc_source: paddle.fluid.layers.distribute_fpn_proposals + + diff --git a/doc/fluid/api/nn/dropout.rst b/doc/fluid/api/nn/dropout.rst new file mode 100644 index 0000000000000000000000000000000000000000..34277a1696816f65838ef17c576b864d74a43429 --- /dev/null +++ b/doc/fluid/api/nn/dropout.rst @@ -0,0 +1,7 @@ +.. _api_nn_dropout: + +dropout +------------------------------- +:doc_source: paddle.fluid.layers.dropout + + diff --git a/doc/fluid/api/nn/edit_distance.rst b/doc/fluid/api/nn/edit_distance.rst new file mode 100644 index 0000000000000000000000000000000000000000..8673f388a89c501b1df15e17c4bfb0767d884d93 --- /dev/null +++ b/doc/fluid/api/nn/edit_distance.rst @@ -0,0 +1,7 @@ +.. _api_nn_edit_distance: + +edit_distance +------------------------------- +:doc_source: paddle.fluid.layers.edit_distance + + diff --git a/doc/fluid/api/nn/elu.rst b/doc/fluid/api/nn/elu.rst new file mode 100644 index 0000000000000000000000000000000000000000..cb526089915c31f2761ba0be40087af8233c3632 --- /dev/null +++ b/doc/fluid/api/nn/elu.rst @@ -0,0 +1,9 @@ +.. _api_nn_elu: + +elu +------------------------------- + +.. autofunction:: paddle.nn.functional.elu + :noindex: + + diff --git a/doc/fluid/api/nn/erf.rst b/doc/fluid/api/nn/erf.rst new file mode 100644 index 0000000000000000000000000000000000000000..ed8205a211d0ff276ef1145b9985049b90e452ab --- /dev/null +++ b/doc/fluid/api/nn/erf.rst @@ -0,0 +1,7 @@ +.. _api_nn_erf: + +erf +------------------------------- +:doc_source: paddle.fluid.layers.erf + + diff --git a/doc/fluid/api/nn/exponential_decay.rst b/doc/fluid/api/nn/exponential_decay.rst new file mode 100644 index 0000000000000000000000000000000000000000..c27917b73e7be52a1bd1edec900343774333a216 --- /dev/null +++ b/doc/fluid/api/nn/exponential_decay.rst @@ -0,0 +1,7 @@ +.. _api_nn_exponential_decay: + +exponential_decay +------------------------------- +:doc_source: paddle.fluid.layers.exponential_decay + + diff --git a/doc/fluid/api/nn/filter_by_instag.rst b/doc/fluid/api/nn/filter_by_instag.rst new file mode 100644 index 0000000000000000000000000000000000000000..3587ac608f0d321d83dca11de89966f628c9eed0 --- /dev/null +++ b/doc/fluid/api/nn/filter_by_instag.rst @@ -0,0 +1,7 @@ +.. _api_nn_filter_by_instag: + +filter_by_instag +------------------------------- +:doc_source: paddle.fluid.layers.filter_by_instag + + diff --git a/doc/fluid/api/nn/fsp_matrix.rst b/doc/fluid/api/nn/fsp_matrix.rst new file mode 100644 index 0000000000000000000000000000000000000000..e44a51d85c2697948ac64a744c31d99dbf913e4a --- /dev/null +++ b/doc/fluid/api/nn/fsp_matrix.rst @@ -0,0 +1,7 @@ +.. _api_nn_fsp_matrix: + +fsp_matrix +------------------------------- +:doc_source: paddle.fluid.layers.fsp_matrix + + diff --git a/doc/fluid/api/nn/functional.rst b/doc/fluid/api/nn/functional.rst new file mode 100644 index 0000000000000000000000000000000000000000..1c8c2d3fcf0c3f4a76a82b50a7157ad66966be81 --- /dev/null +++ b/doc/fluid/api/nn/functional.rst @@ -0,0 +1,21 @@ +========== +functional +========== + +.. toctree:: + :maxdepth: 1 + + functional/binary_cross_entropy_with_logits.rst + functional/l1_loss.rst + functional/nll_loss.rst + functional/mse_loss.rst + functional/kl_div.rst + functional/one_hot.rst + functional/ctc_loss.rst + functional/adaptive_avg_pool2d.rst + functional/adaptive_avg_pool3d.rst + functional/conv2d.rst + functional/conv3d.rst + functional/conv_transpose2d.rst + functional/conv_transpose3d.rst + functional/bilinear.rst diff --git a/doc/fluid/api/nn/functional/activation/sigmoid.rst b/doc/fluid/api/nn/functional/activation/sigmoid.rst new file mode 100644 index 0000000000000000000000000000000000000000..3ed969b9d6ff4102aeaea815ed5746a11d76a57d --- /dev/null +++ b/doc/fluid/api/nn/functional/activation/sigmoid.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_functional_activation_sigmoid: + +sigmoid +------- + +.. autofunction:: paddle.nn.functional.activation.sigmoid + :noindex: + diff --git a/doc/fluid/api/nn/functional/adaptive_avg_pool2d.rst b/doc/fluid/api/nn/functional/adaptive_avg_pool2d.rst new file mode 100644 index 0000000000000000000000000000000000000000..d0eff20c18b8ea6d58cfe0405c16aee6f721d30f --- /dev/null +++ b/doc/fluid/api/nn/functional/adaptive_avg_pool2d.rst @@ -0,0 +1,8 @@ +.. _api_nn_functional_adaptive_avg_pool2d: + +adaptive_avg_pool2d +-------------------- + +.. autofunction:: paddle.nn.functional.adaptive_avg_pool2d + :noindex: + diff --git a/doc/fluid/api/nn/functional/adaptive_avg_pool3d.rst b/doc/fluid/api/nn/functional/adaptive_avg_pool3d.rst new file mode 100644 index 0000000000000000000000000000000000000000..4765c3e11179f685033023fa8e7cd50846367d55 --- /dev/null +++ b/doc/fluid/api/nn/functional/adaptive_avg_pool3d.rst @@ -0,0 +1,8 @@ +.. _api_nn_functional_adaptive_avg_pool3d: + +adaptive_avg_pool3d +-------------------- + +.. autofunction:: paddle.nn.functional.adaptive_avg_pool3d + :noindex: + diff --git a/doc/fluid/api/nn/functional/bilinear.rst b/doc/fluid/api/nn/functional/bilinear.rst new file mode 100644 index 0000000000000000000000000000000000000000..0b15e7c385a851236db50ef84e62bec3bbb59312 --- /dev/null +++ b/doc/fluid/api/nn/functional/bilinear.rst @@ -0,0 +1,8 @@ +.. _api_nn_functional_bilinear: + +bilinear +-------------------- + +.. autofunction:: paddle.nn.functional.bilinear + :noindex: + diff --git a/doc/fluid/api/nn/functional/binary_cross_entropy.rst b/doc/fluid/api/nn/functional/binary_cross_entropy.rst new file mode 100644 index 0000000000000000000000000000000000000000..adafad14aac0b6523c147a0b6803b208004a4fdc --- /dev/null +++ b/doc/fluid/api/nn/functional/binary_cross_entropy.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_functional_binary_cross_entropy: + +binary_cross_entropy +-------------------- + +.. autofunction:: paddle.nn.functional.binary_cross_entropy + :noindex: + diff --git a/doc/fluid/api/nn/functional/binary_cross_entropy_with_logits.rst b/doc/fluid/api/nn/functional/binary_cross_entropy_with_logits.rst new file mode 100644 index 0000000000000000000000000000000000000000..4ee93c942d3a6c7bb123de1e05e96d3a486c157c --- /dev/null +++ b/doc/fluid/api/nn/functional/binary_cross_entropy_with_logits.rst @@ -0,0 +1,10 @@ +.. _api_nn_functional_binary_cross_entropy_with_logits: + +binary_cross_entropy_with_logits +-------------------------------- + +.. autoclass:: paddle.nn.functional.binary_cross_entropy_with_logits + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/nn/functional/conv2d.rst b/doc/fluid/api/nn/functional/conv2d.rst new file mode 100644 index 0000000000000000000000000000000000000000..bdf3753b941f5a6678575786eaeed8f8e4238d39 --- /dev/null +++ b/doc/fluid/api/nn/functional/conv2d.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_functional_conv_conv2d: + +conv2d +------ + +.. autofunction:: paddle.nn.functional.conv.conv2d + :noindex: + diff --git a/doc/fluid/api/nn/functional/conv3d.rst b/doc/fluid/api/nn/functional/conv3d.rst new file mode 100644 index 0000000000000000000000000000000000000000..11588b19ac8b64b90b10a50faa61aefe74e6203c --- /dev/null +++ b/doc/fluid/api/nn/functional/conv3d.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_functional_conv_conv3d: + +conv3d +------ + +.. autofunction:: paddle.nn.functional.conv.conv3d + :noindex: + diff --git a/doc/fluid/api/nn/functional/conv_transpose2d.rst b/doc/fluid/api/nn/functional/conv_transpose2d.rst new file mode 100644 index 0000000000000000000000000000000000000000..da09490ed2f02740c80f64387f4f18dab4916825 --- /dev/null +++ b/doc/fluid/api/nn/functional/conv_transpose2d.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_functional_conv_conv_transpose2d: + +conv_transpose2d +---------------- + +.. autofunction:: paddle.nn.functional.conv.conv_transpose2d + :noindex: + diff --git a/doc/fluid/api/nn/functional/conv_transpose3d.rst b/doc/fluid/api/nn/functional/conv_transpose3d.rst new file mode 100644 index 0000000000000000000000000000000000000000..722a2d8b8ee481e43ab16bb1e69d46534c49f7d4 --- /dev/null +++ b/doc/fluid/api/nn/functional/conv_transpose3d.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_functional_conv_conv_transpose3d: + +conv_transpose3d +---------------- + +.. autofunction:: paddle.nn.functional.conv.conv_transpose3d + :noindex: + diff --git a/doc/fluid/api/nn/functional/ctc_loss.rst b/doc/fluid/api/nn/functional/ctc_loss.rst new file mode 100644 index 0000000000000000000000000000000000000000..c77513c956eb592fc3b1335f08d75b37b0080bff --- /dev/null +++ b/doc/fluid/api/nn/functional/ctc_loss.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_functional_ctc_loss: + +ctc_loss +-------- + +.. autofunction:: paddle.nn.functional.loss.ctc_loss + :noindex: + diff --git a/doc/fluid/api/nn/functional/kl_div.rst b/doc/fluid/api/nn/functional/kl_div.rst new file mode 100644 index 0000000000000000000000000000000000000000..cbd2e10aa2381a61bb03f7b94d99a86ac53fcd23 --- /dev/null +++ b/doc/fluid/api/nn/functional/kl_div.rst @@ -0,0 +1,10 @@ +.. _api_nn_functional_kl_div: + +kl_div +------------------------------- + +.. autoclass:: paddle.nn.functional.kl_div + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/nn/functional/l1_loss.rst b/doc/fluid/api/nn/functional/l1_loss.rst new file mode 100644 index 0000000000000000000000000000000000000000..01a3ea06e7d034eb70744146816e6d0a166b749d --- /dev/null +++ b/doc/fluid/api/nn/functional/l1_loss.rst @@ -0,0 +1,10 @@ +.. _api_nn_functional_l1_loss: + +l1_loss +------ + +.. autoclass:: paddle.nn.functional.l1_loss + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/nn/functional/loss/margin_ranking_loss.rst b/doc/fluid/api/nn/functional/loss/margin_ranking_loss.rst new file mode 100644 index 0000000000000000000000000000000000000000..e92eadc126a49d8a46bcfc06960eb39dcdc35fec --- /dev/null +++ b/doc/fluid/api/nn/functional/loss/margin_ranking_loss.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_functional_loss_margin_ranking_loss: + +margin_ranking_loss +------------------- + +.. autofunction:: paddle.nn.functional.loss.margin_ranking_loss + :noindex: + diff --git a/doc/fluid/api/nn/functional/mse_loss.rst b/doc/fluid/api/nn/functional/mse_loss.rst new file mode 100644 index 0000000000000000000000000000000000000000..b5ec8c58b5a10c206e85184f36e414396fc7d9b9 --- /dev/null +++ b/doc/fluid/api/nn/functional/mse_loss.rst @@ -0,0 +1,10 @@ +.. _api_nn_functional_mse_loss: + +mse_loss +------ + +.. autoclass:: paddle.nn.functional.mse_loss + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/nn/functional/nll_loss.rst b/doc/fluid/api/nn/functional/nll_loss.rst new file mode 100644 index 0000000000000000000000000000000000000000..6f0ce4093ac8a9cefc202e4346457edd4b2c6ae1 --- /dev/null +++ b/doc/fluid/api/nn/functional/nll_loss.rst @@ -0,0 +1,10 @@ +.. _api_nn_functional_nll_loss: + +nll_loss +------------------------------- + +.. autoclass:: paddle.nn.functional.nll_loss + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/nn/functional/one_hot.rst b/doc/fluid/api/nn/functional/one_hot.rst new file mode 100644 index 0000000000000000000000000000000000000000..a8e5272e9a38a3c61a7803ca04dc90803ff147df --- /dev/null +++ b/doc/fluid/api/nn/functional/one_hot.rst @@ -0,0 +1,7 @@ +.. _api_nn_functional_one_hot: + +one_hot +--------- + +.. autofunction:: paddle.nn.functional.one_hot + :noindex: diff --git a/doc/fluid/api/nn/gather_tree.rst b/doc/fluid/api/nn/gather_tree.rst new file mode 100644 index 0000000000000000000000000000000000000000..7901d38d5ae27a8ec0c76a415646d34a3b844105 --- /dev/null +++ b/doc/fluid/api/nn/gather_tree.rst @@ -0,0 +1,7 @@ +.. _api_nn_gather_tree: + +gather_tree +------------------------------- +:doc_source: paddle.fluid.layers.gather_tree + + diff --git a/doc/fluid/api/nn/gelu.rst b/doc/fluid/api/nn/gelu.rst new file mode 100644 index 0000000000000000000000000000000000000000..f4b5d57f8a45154f80cd3ba8754b3b676c9ddb73 --- /dev/null +++ b/doc/fluid/api/nn/gelu.rst @@ -0,0 +1,9 @@ +.. _api_nn_gelu: + +gelu +------------------------------- + +.. autofunction:: paddle.nn.functional.gelu + :noindex: + + diff --git a/doc/fluid/api/nn/generate_mask_labels.rst b/doc/fluid/api/nn/generate_mask_labels.rst new file mode 100644 index 0000000000000000000000000000000000000000..01b99e4bddf75ab7a9b8beb32284e6b9f1a16d48 --- /dev/null +++ b/doc/fluid/api/nn/generate_mask_labels.rst @@ -0,0 +1,7 @@ +.. _api_nn_generate_mask_labels: + +generate_mask_labels +------------------------------- +:doc_source: paddle.fluid.layers.generate_mask_labels + + diff --git a/doc/fluid/api/nn/generate_proposal_labels.rst b/doc/fluid/api/nn/generate_proposal_labels.rst new file mode 100644 index 0000000000000000000000000000000000000000..75b0d264dd57cce5be3d6b2d72e571aefa21d8bd --- /dev/null +++ b/doc/fluid/api/nn/generate_proposal_labels.rst @@ -0,0 +1,7 @@ +.. _api_nn_generate_proposal_labels: + +generate_proposal_labels +------------------------------- +:doc_source: paddle.fluid.layers.generate_proposal_labels + + diff --git a/doc/fluid/api/nn/generate_proposals.rst b/doc/fluid/api/nn/generate_proposals.rst new file mode 100644 index 0000000000000000000000000000000000000000..47057daa5385689a2344132a8931886c5c80f0a8 --- /dev/null +++ b/doc/fluid/api/nn/generate_proposals.rst @@ -0,0 +1,7 @@ +.. _api_nn_generate_proposals: + +generate_proposals +------------------------------- +:doc_source: paddle.fluid.layers.generate_proposals + + diff --git a/doc/fluid/api/nn/grid_sampler.rst b/doc/fluid/api/nn/grid_sampler.rst new file mode 100644 index 0000000000000000000000000000000000000000..a16575cc78a5cd88abfe2ea0b700d9a4c63f42a4 --- /dev/null +++ b/doc/fluid/api/nn/grid_sampler.rst @@ -0,0 +1,7 @@ +.. _api_nn_grid_sampler: + +grid_sampler +------------------------------- +:doc_source: paddle.fluid.layers.grid_sampler + + diff --git a/doc/fluid/api/nn/hard_sigmoid.rst b/doc/fluid/api/nn/hard_sigmoid.rst new file mode 100644 index 0000000000000000000000000000000000000000..2570f0da1aab233cf7e80def8ebdcacc0e6a6e4e --- /dev/null +++ b/doc/fluid/api/nn/hard_sigmoid.rst @@ -0,0 +1,7 @@ +.. _api_nn_hard_sigmoid: + +hard_sigmoid +------------------------------- +:doc_source: paddle.fluid.layers.hard_sigmoid + + diff --git a/doc/fluid/api/nn/hard_swish.rst b/doc/fluid/api/nn/hard_swish.rst new file mode 100644 index 0000000000000000000000000000000000000000..8a44bfb974ea366d8541117f838ddc9dbf76a514 --- /dev/null +++ b/doc/fluid/api/nn/hard_swish.rst @@ -0,0 +1,7 @@ +.. _api_nn_hard_swish: + +hard_swish +------------------------------- +:doc_source: paddle.fluid.layers.hard_swish + + diff --git a/doc/fluid/api/nn/hardshrink.rst b/doc/fluid/api/nn/hardshrink.rst new file mode 100644 index 0000000000000000000000000000000000000000..48b98f2a5366941aa80c5dcd6b64b5a089378860 --- /dev/null +++ b/doc/fluid/api/nn/hardshrink.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_hardshrink: + +hardshrink +---------- + +.. autofunction:: paddle.nn.functional.hardshrink + :noindex: + diff --git a/doc/fluid/api/nn/hardtanh.rst b/doc/fluid/api/nn/hardtanh.rst new file mode 100644 index 0000000000000000000000000000000000000000..d5c796a6638119b9cf0aaf0271b785cb8a54e27d --- /dev/null +++ b/doc/fluid/api/nn/hardtanh.rst @@ -0,0 +1,7 @@ +.. _api_nn_hardtanh: + +hardtanh +------------------------------- + +.. autofunction:: paddle.nn.functional.hardtanh + :noindex: diff --git a/doc/fluid/api/nn/hash.rst b/doc/fluid/api/nn/hash.rst new file mode 100644 index 0000000000000000000000000000000000000000..94c594c22db97978534bb5285ad6ce122384dcf7 --- /dev/null +++ b/doc/fluid/api/nn/hash.rst @@ -0,0 +1,7 @@ +.. _api_nn_hash: + +hash +------------------------------- +:doc_source: paddle.fluid.layers.hash + + diff --git a/doc/fluid/api/nn/hsigmoid.rst b/doc/fluid/api/nn/hsigmoid.rst new file mode 100644 index 0000000000000000000000000000000000000000..8e6e391581f3755b7b319dae486f05d173483bf4 --- /dev/null +++ b/doc/fluid/api/nn/hsigmoid.rst @@ -0,0 +1,7 @@ +.. _api_nn_hsigmoid: + +hsigmoid +------------------------------- +:doc_source: paddle.fluid.layers.hsigmoid + + diff --git a/doc/fluid/api/nn/huber_loss.rst b/doc/fluid/api/nn/huber_loss.rst new file mode 100644 index 0000000000000000000000000000000000000000..5403abfbaedaff8c032b5e0734fe071368006f95 --- /dev/null +++ b/doc/fluid/api/nn/huber_loss.rst @@ -0,0 +1,7 @@ +.. _api_nn_huber_loss: + +huber_loss +------------------------------- +:doc_source: paddle.fluid.layers.huber_loss + + diff --git a/doc/fluid/api/nn/image_resize.rst b/doc/fluid/api/nn/image_resize.rst new file mode 100644 index 0000000000000000000000000000000000000000..724df43ce156a5632c584e8103aed56b1d53937d --- /dev/null +++ b/doc/fluid/api/nn/image_resize.rst @@ -0,0 +1,7 @@ +.. _api_nn_image_resize: + +image_resize +------------------------------- +:doc_source: paddle.fluid.layers.image_resize + + diff --git a/doc/fluid/api/nn/image_resize_short.rst b/doc/fluid/api/nn/image_resize_short.rst new file mode 100644 index 0000000000000000000000000000000000000000..13b242973c90acac6db414d60a16e3fe74ec479a --- /dev/null +++ b/doc/fluid/api/nn/image_resize_short.rst @@ -0,0 +1,7 @@ +.. _api_nn_image_resize_short: + +image_resize_short +------------------------------- +:doc_source: paddle.fluid.layers.image_resize_short + + diff --git a/doc/fluid/api/nn/initializer.rst b/doc/fluid/api/nn/initializer.rst new file mode 100644 index 0000000000000000000000000000000000000000..c373707888da5732c1eff5786f2a27347f248b4a --- /dev/null +++ b/doc/fluid/api/nn/initializer.rst @@ -0,0 +1,15 @@ +======================= +paddle.nn.initializer +======================= + +.. toctree:: + :maxdepth: 1 + +initializer/Bilinear.rst + +initializer/Constant.rst +initializer/MSRA.rst +initializer/Normal.rst +initializer/TruncatedNormal.rst +initializer/Uniform.rst +initializer/Xavier.rst diff --git a/doc/fluid/api/nn/initializer/Bilinear.rst b/doc/fluid/api/nn/initializer/Bilinear.rst new file mode 100644 index 0000000000000000000000000000000000000000..dcbcde63d32526d19ad7924825d804b75d0559a6 --- /dev/null +++ b/doc/fluid/api/nn/initializer/Bilinear.rst @@ -0,0 +1,7 @@ +.. _api_nn_initializer_Bilinear: + +Bilinear +------------------------------- +:doc_source: paddle.fluid.initializer.Bilinear + + diff --git a/doc/fluid/api/nn/initializer/Constant.rst b/doc/fluid/api/nn/initializer/Constant.rst new file mode 100644 index 0000000000000000000000000000000000000000..9a1515be6ebd1b0f7dbbd997aa8f1fd42e556238 --- /dev/null +++ b/doc/fluid/api/nn/initializer/Constant.rst @@ -0,0 +1,7 @@ +.. _api_nn_initializer_Constant: + +Constant +------------------------------- +:doc_source: paddle.fluid.initializer.Constant + + diff --git a/doc/fluid/api/nn/initializer/MSRA.rst b/doc/fluid/api/nn/initializer/MSRA.rst new file mode 100644 index 0000000000000000000000000000000000000000..6fd0ba7e775afa83d8ee90408c3b35760e74dcfc --- /dev/null +++ b/doc/fluid/api/nn/initializer/MSRA.rst @@ -0,0 +1,7 @@ +.. _api_nn_initializer_MSRA: + +MSRA +------------------------------- +:doc_source: paddle.fluid.initializer.MSRA + + diff --git a/doc/fluid/api/nn/initializer/Normal.rst b/doc/fluid/api/nn/initializer/Normal.rst new file mode 100644 index 0000000000000000000000000000000000000000..da03e41daeb80ef44a36f4e78898d4f6b2896fe1 --- /dev/null +++ b/doc/fluid/api/nn/initializer/Normal.rst @@ -0,0 +1,7 @@ +.. _api_nn_initializer_Normal: + +Normal +------------------------------- +:doc_source: paddle.fluid.initializer.Normal + + diff --git a/doc/fluid/api/nn/initializer/TruncatedNormal.rst b/doc/fluid/api/nn/initializer/TruncatedNormal.rst new file mode 100644 index 0000000000000000000000000000000000000000..d1b8de9e61d9deb283d9e5c17512fbb4ae9667c9 --- /dev/null +++ b/doc/fluid/api/nn/initializer/TruncatedNormal.rst @@ -0,0 +1,7 @@ +.. _api_nn_initializer_TruncatedNormal: + +TruncatedNormal +------------------------------- +:doc_source: paddle.fluid.initializer.TruncatedNormal + + diff --git a/doc/fluid/api/nn/initializer/Uniform.rst b/doc/fluid/api/nn/initializer/Uniform.rst new file mode 100644 index 0000000000000000000000000000000000000000..fc7ceb0add6adb2b702f71382ba1aa97b9f16257 --- /dev/null +++ b/doc/fluid/api/nn/initializer/Uniform.rst @@ -0,0 +1,7 @@ +.. _api_nn_initializer_Uniform: + +Uniform +------------------------------- +:doc_source: paddle.fluid.initializer.Uniform + + diff --git a/doc/fluid/api/nn/initializer/Xavier.rst b/doc/fluid/api/nn/initializer/Xavier.rst new file mode 100644 index 0000000000000000000000000000000000000000..03d722686e8dd611531083dd2c3d1d9da6b6d4f1 --- /dev/null +++ b/doc/fluid/api/nn/initializer/Xavier.rst @@ -0,0 +1,7 @@ +.. _api_nn_initializer_Xavier: + +Xavier +------------------------------- +:doc_source: paddle.fluid.initializer.Xavier + + diff --git a/doc/fluid/api/nn/inverse_time_decay.rst b/doc/fluid/api/nn/inverse_time_decay.rst new file mode 100644 index 0000000000000000000000000000000000000000..0f238f53c7595ede6897587a0cf0d077ea3d98d2 --- /dev/null +++ b/doc/fluid/api/nn/inverse_time_decay.rst @@ -0,0 +1,7 @@ +.. _api_nn_inverse_time_decay: + +inverse_time_decay +------------------------------- +:doc_source: paddle.fluid.layers.inverse_time_decay + + diff --git a/doc/fluid/api/nn/iou_similarity.rst b/doc/fluid/api/nn/iou_similarity.rst new file mode 100644 index 0000000000000000000000000000000000000000..e81a38b061a461e73c3966b3118d6507802b37d2 --- /dev/null +++ b/doc/fluid/api/nn/iou_similarity.rst @@ -0,0 +1,7 @@ +.. _api_nn_iou_similarity: + +iou_similarity +------------------------------- +:doc_source: paddle.fluid.layers.iou_similarity + + diff --git a/doc/fluid/api/nn/kldiv_loss.rst b/doc/fluid/api/nn/kldiv_loss.rst new file mode 100644 index 0000000000000000000000000000000000000000..185dbbd71a4d37a00c040cdbc75d758941607cf5 --- /dev/null +++ b/doc/fluid/api/nn/kldiv_loss.rst @@ -0,0 +1,7 @@ +.. _api_nn_kldiv_loss: + +kldiv_loss +------------------------------- +:doc_source: paddle.fluid.layers.kldiv_loss + + diff --git a/doc/fluid/api/nn/l2_normalize.rst b/doc/fluid/api/nn/l2_normalize.rst new file mode 100644 index 0000000000000000000000000000000000000000..41c75aa8ca1545956b3e95322b66ad00cc06a914 --- /dev/null +++ b/doc/fluid/api/nn/l2_normalize.rst @@ -0,0 +1,7 @@ +.. _api_nn_l2_normalize: + +l2_normalize +------------------------------- +:doc_source: paddle.fluid.layers.l2_normalize + + diff --git a/doc/fluid/api/nn/label_smooth.rst b/doc/fluid/api/nn/label_smooth.rst new file mode 100644 index 0000000000000000000000000000000000000000..119775ec66dd09666490c2d3d45ee12bfec569dc --- /dev/null +++ b/doc/fluid/api/nn/label_smooth.rst @@ -0,0 +1,7 @@ +.. _api_nn_label_smooth: + +label_smooth +------------------------------- +:doc_source: paddle.fluid.layers.label_smooth + + diff --git a/doc/fluid/api/nn/layer/activation/Sigmoid.rst b/doc/fluid/api/nn/layer/activation/Sigmoid.rst new file mode 100644 index 0000000000000000000000000000000000000000..46a0c2d263eaf10cbc81a342135ace1b58b2e2f4 --- /dev/null +++ b/doc/fluid/api/nn/layer/activation/Sigmoid.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_layer_activation_Sigmoid: + +Sigmoid +------- + +.. autoclass:: paddle.nn.layer.activation.Sigmoid + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/nn/layer/loss/MarginRankingLoss.rst b/doc/fluid/api/nn/layer/loss/MarginRankingLoss.rst new file mode 100644 index 0000000000000000000000000000000000000000..d69d1deff5defab24b2f12ea877c3a208a801478 --- /dev/null +++ b/doc/fluid/api/nn/layer/loss/MarginRankingLoss.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_layer_loss_MarginRankingLoss: + +MarginRankingLoss +----------------- + +.. autoclass:: paddle.nn.layer.loss.MarginRankingLoss + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/nn/leaky_relu.rst b/doc/fluid/api/nn/leaky_relu.rst new file mode 100644 index 0000000000000000000000000000000000000000..cf38cdb92c3d432c66f95aaed46449c52677287f --- /dev/null +++ b/doc/fluid/api/nn/leaky_relu.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_leaky_relu: + +leaky_relu +---------- + +.. autofunction:: paddle.nn.functional.leaky_relu + :noindex: + diff --git a/doc/fluid/api/nn/linear_lr_warmup.rst b/doc/fluid/api/nn/linear_lr_warmup.rst new file mode 100644 index 0000000000000000000000000000000000000000..e778f3061778ac4ae3d4fd10510a9ad0142294f7 --- /dev/null +++ b/doc/fluid/api/nn/linear_lr_warmup.rst @@ -0,0 +1,7 @@ +.. _api_nn_linear_lr_warmup: + +linear_lr_warmup +------------------------------- +:doc_source: paddle.fluid.layers.linear_lr_warmup + + diff --git a/doc/fluid/api/nn/log_loss.rst b/doc/fluid/api/nn/log_loss.rst new file mode 100644 index 0000000000000000000000000000000000000000..f57ea2cfa0cb472ed3a19eb377f1d8d5c0a1c752 --- /dev/null +++ b/doc/fluid/api/nn/log_loss.rst @@ -0,0 +1,7 @@ +.. _api_nn_log_loss: + +log_loss +------------------------------- +:doc_source: paddle.fluid.layers.log_loss + + diff --git a/doc/fluid/api/nn/log_sigmoid.rst b/doc/fluid/api/nn/log_sigmoid.rst new file mode 100644 index 0000000000000000000000000000000000000000..c8df00e2f286bf8bf4864b8f708e8d1fe5453034 --- /dev/null +++ b/doc/fluid/api/nn/log_sigmoid.rst @@ -0,0 +1,9 @@ +.. _api_nn_log_sigmoid: + +log_sigmoid +------------------------------- + +.. autofunction:: paddle.nn.functional.log_sigmoid + :noindex: + + diff --git a/doc/fluid/api/nn/log_softmax.rst b/doc/fluid/api/nn/log_softmax.rst new file mode 100644 index 0000000000000000000000000000000000000000..88e8b52219798fb016f567414ac88157e4e107b6 --- /dev/null +++ b/doc/fluid/api/nn/log_softmax.rst @@ -0,0 +1,10 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_log_softmax: + +log_softmax +----------- + +.. autofunction:: paddle.nn.functional.log_softmax + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/nn/loss.rst b/doc/fluid/api/nn/loss.rst new file mode 100644 index 0000000000000000000000000000000000000000..c9c351b78183ad0fea22a5df9505176a81199b37 --- /dev/null +++ b/doc/fluid/api/nn/loss.rst @@ -0,0 +1,17 @@ +==== +loss +==== + +.. toctree:: + :maxdepth: 1 + + loss/BCELoss.rst + loss/BCEWithLogitsLoss.rst + loss/CrossEntropyLoss.rst + loss/L1Loss.rst + loss/MSELoss.rst + loss/NLLLoss.rst + loss/KLDivLoss.rst + loss/SmoothL1Loss.rst + loss/CTCLoss.rst + loss/KLDivLoss.rst diff --git a/doc/fluid/api/nn/loss/BCELoss.rst b/doc/fluid/api/nn/loss/BCELoss.rst new file mode 100644 index 0000000000000000000000000000000000000000..2dd78661c46835024ad4826c6108a3115e329e45 --- /dev/null +++ b/doc/fluid/api/nn/loss/BCELoss.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_loss_BCELoss: + +BCELoss +------------------------------- + +.. autoclass:: paddle.nn.loss.BCELoss + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/nn/loss/BCEWithLogitsLoss.rst b/doc/fluid/api/nn/loss/BCEWithLogitsLoss.rst new file mode 100644 index 0000000000000000000000000000000000000000..03bc6f4e4309d49152cec2b264a9ec9ebad5b373 --- /dev/null +++ b/doc/fluid/api/nn/loss/BCEWithLogitsLoss.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_loss_BCEWithLogitsLoss: + +BCEWithLogitsLoss +------------------------------- + +.. autoclass:: paddle.nn.loss.BCEWithLogitsLoss + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/nn/loss/CTCLoss.rst b/doc/fluid/api/nn/loss/CTCLoss.rst new file mode 100644 index 0000000000000000000000000000000000000000..13ce8ac53cac0808338ca95ff152c491b79706a2 --- /dev/null +++ b/doc/fluid/api/nn/loss/CTCLoss.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_loss_CTCLoss: + +CTCLoss +------- + +.. autoclass:: paddle.nn.loss.CTCLoss + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/nn/loss/KLDivLoss.rst b/doc/fluid/api/nn/loss/KLDivLoss.rst new file mode 100644 index 0000000000000000000000000000000000000000..797e815e29bfa1e42acb446461a8df499c961469 --- /dev/null +++ b/doc/fluid/api/nn/loss/KLDivLoss.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_loss_KLDivLoss: + +KLDivLoss +------------------------------- + +.. autoclass:: paddle.nn.loss.KLDivLoss + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/nn/loss/L1Loss.rst b/doc/fluid/api/nn/loss/L1Loss.rst new file mode 100644 index 0000000000000000000000000000000000000000..161cb38c80a87c4ba38ed685133a705811ec9103 --- /dev/null +++ b/doc/fluid/api/nn/loss/L1Loss.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_loss_L1Loss: + +L1Loss +------ + +.. autoclass:: paddle.nn.loss.L1Loss + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/nn/loss/NLLLoss.rst b/doc/fluid/api/nn/loss/NLLLoss.rst new file mode 100644 index 0000000000000000000000000000000000000000..c1a0c26de51b8869a8eccb2150c8e5635159f1de --- /dev/null +++ b/doc/fluid/api/nn/loss/NLLLoss.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_loss_NLLLoss: + +NLLLoss +------------------------------- + +.. autoclass:: paddle.nn.loss.NLLLoss + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/nn/loss/SmoothL1Loss.rst b/doc/fluid/api/nn/loss/SmoothL1Loss.rst new file mode 100644 index 0000000000000000000000000000000000000000..7f5654a5587bde56359b2c59fd5c6a4df5a08208 --- /dev/null +++ b/doc/fluid/api/nn/loss/SmoothL1Loss.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_nn_loss_SmoothL1Loss: + +SmoothL1Loss +------------------------------- + +.. autoclass:: paddle.nn.loss.SmoothL1Loss + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/nn/lrn.rst b/doc/fluid/api/nn/lrn.rst new file mode 100644 index 0000000000000000000000000000000000000000..40bb92de7fac547a62a9136c453121bb05dc609e --- /dev/null +++ b/doc/fluid/api/nn/lrn.rst @@ -0,0 +1,7 @@ +.. _api_nn_lrn: + +lrn +------------------------------- +:doc_source: paddle.fluid.layers.lrn + + diff --git a/doc/fluid/api/nn/matrix_nms.rst b/doc/fluid/api/nn/matrix_nms.rst new file mode 100644 index 0000000000000000000000000000000000000000..49529d0faf1118dc3c61018d1be232b5d7ff5b63 --- /dev/null +++ b/doc/fluid/api/nn/matrix_nms.rst @@ -0,0 +1,5 @@ +.. _api_nn_matrix_nms: + +matrix_nms +------------------------------- +:doc_source: paddle.fluid.layers.matrix_nms diff --git a/doc/fluid/api/nn/maxout.rst b/doc/fluid/api/nn/maxout.rst new file mode 100644 index 0000000000000000000000000000000000000000..dab8bb5de3e7f5346e0ebb3118f34fa63152c28a --- /dev/null +++ b/doc/fluid/api/nn/maxout.rst @@ -0,0 +1,7 @@ +.. _api_nn_maxout: + +maxout +------------------------------- +:doc_source: paddle.fluid.layers.maxout + + diff --git a/doc/fluid/api/nn/mse_loss.rst b/doc/fluid/api/nn/mse_loss.rst new file mode 100644 index 0000000000000000000000000000000000000000..30ed19b977a9cd960c70b1a53dcc52aa28bfe16f --- /dev/null +++ b/doc/fluid/api/nn/mse_loss.rst @@ -0,0 +1,7 @@ +.. _api_nn_mse_loss: + +mse_loss +------------------------------- +:doc_source: paddle.fluid.layers.mse_loss + + diff --git a/doc/fluid/api/nn/multiclass_nms.rst b/doc/fluid/api/nn/multiclass_nms.rst new file mode 100644 index 0000000000000000000000000000000000000000..b81b4a72a50185e4b815d9b252d5747ed6890ef4 --- /dev/null +++ b/doc/fluid/api/nn/multiclass_nms.rst @@ -0,0 +1,7 @@ +.. _api_nn_multiclass_nms: + +multiclass_nms +------------------------------- +:doc_source: paddle.fluid.layers.multiclass_nms + + diff --git a/doc/fluid/api/nn/natural_exp_decay.rst b/doc/fluid/api/nn/natural_exp_decay.rst new file mode 100644 index 0000000000000000000000000000000000000000..9ea89ca41ccca7e712da71ef89cf6851e17548a6 --- /dev/null +++ b/doc/fluid/api/nn/natural_exp_decay.rst @@ -0,0 +1,7 @@ +.. _api_nn_natural_exp_decay: + +natural_exp_decay +------------------------------- +:doc_source: paddle.fluid.layers.natural_exp_decay + + diff --git a/doc/fluid/api/nn/noam_decay.rst b/doc/fluid/api/nn/noam_decay.rst new file mode 100644 index 0000000000000000000000000000000000000000..dc1b2e6086e96c9aeba4c2bd0d14de5ebdf9b360 --- /dev/null +++ b/doc/fluid/api/nn/noam_decay.rst @@ -0,0 +1,7 @@ +.. _api_nn_noam_decay: + +noam_decay +------------------------------- +:doc_source: paddle.fluid.layers.noam_decay + + diff --git a/doc/fluid/api/nn/npair_loss.rst b/doc/fluid/api/nn/npair_loss.rst new file mode 100644 index 0000000000000000000000000000000000000000..029cb1d9bc127d2dbfb1d954300192e3636130bb --- /dev/null +++ b/doc/fluid/api/nn/npair_loss.rst @@ -0,0 +1,7 @@ +.. _api_nn_npair_loss: + +npair_loss +------------------------------- +:doc_source: paddle.fluid.layers.npair_loss + + diff --git a/doc/fluid/api/nn/one_hot.rst b/doc/fluid/api/nn/one_hot.rst new file mode 100644 index 0000000000000000000000000000000000000000..f017ed733490ba1226ab7e807fee14f85bc0308b --- /dev/null +++ b/doc/fluid/api/nn/one_hot.rst @@ -0,0 +1,7 @@ +.. _api_nn_one_hot: + +one_hot +------------------------------- +:doc_source: paddle.fluid.one_hot + + diff --git a/doc/fluid/api/nn/pad.rst b/doc/fluid/api/nn/pad.rst new file mode 100644 index 0000000000000000000000000000000000000000..4459de858d4d80544d92acff9ca3e62c8446e890 --- /dev/null +++ b/doc/fluid/api/nn/pad.rst @@ -0,0 +1,7 @@ +.. _api_nn_pad: + +pad +------------------------------- +:doc_source: paddle.nn.functional.pad + + diff --git a/doc/fluid/api/nn/pad2d.rst b/doc/fluid/api/nn/pad2d.rst new file mode 100644 index 0000000000000000000000000000000000000000..46004778395c7d080549eaa190d3539a78734a15 --- /dev/null +++ b/doc/fluid/api/nn/pad2d.rst @@ -0,0 +1,7 @@ +.. _api_nn_pad2d: + +pad2d +------------------------------- +:doc_source: paddle.fluid.layers.pad2d + + diff --git a/doc/fluid/api/nn/pad_constant_like.rst b/doc/fluid/api/nn/pad_constant_like.rst new file mode 100644 index 0000000000000000000000000000000000000000..cb37618896562f128fd0e0a46a7f3422b6c13891 --- /dev/null +++ b/doc/fluid/api/nn/pad_constant_like.rst @@ -0,0 +1,7 @@ +.. _api_nn_pad_constant_like: + +pad_constant_like +------------------------------- +:doc_source: paddle.fluid.layers.pad_constant_like + + diff --git a/doc/fluid/api/nn/piecewise_decay.rst b/doc/fluid/api/nn/piecewise_decay.rst new file mode 100644 index 0000000000000000000000000000000000000000..3fedfda0477f2e46eca6a0ab22c92a1f54a21039 --- /dev/null +++ b/doc/fluid/api/nn/piecewise_decay.rst @@ -0,0 +1,7 @@ +.. _api_nn_piecewise_decay: + +piecewise_decay +------------------------------- +:doc_source: paddle.fluid.layers.piecewise_decay + + diff --git a/doc/fluid/api/nn/pixel_shuffle.rst b/doc/fluid/api/nn/pixel_shuffle.rst new file mode 100644 index 0000000000000000000000000000000000000000..d10cd106f3d1fb5ae71fd4ef700531e6ee1f253f --- /dev/null +++ b/doc/fluid/api/nn/pixel_shuffle.rst @@ -0,0 +1,7 @@ +.. _api_nn_pixel_shuffle: + +pixel_shuffle +------------------------------- +:doc_source: paddle.fluid.layers.pixel_shuffle + + diff --git a/doc/fluid/api/nn/polygon_box_transform.rst b/doc/fluid/api/nn/polygon_box_transform.rst new file mode 100644 index 0000000000000000000000000000000000000000..a993817b38a16227a04d1553f39881ea38ca8ef1 --- /dev/null +++ b/doc/fluid/api/nn/polygon_box_transform.rst @@ -0,0 +1,7 @@ +.. _api_nn_polygon_box_transform: + +polygon_box_transform +------------------------------- +:doc_source: paddle.fluid.layers.polygon_box_transform + + diff --git a/doc/fluid/api/nn/polynomial_decay.rst b/doc/fluid/api/nn/polynomial_decay.rst new file mode 100644 index 0000000000000000000000000000000000000000..5f810058e42e80c9f6aab7d2b72ab3b615e16135 --- /dev/null +++ b/doc/fluid/api/nn/polynomial_decay.rst @@ -0,0 +1,7 @@ +.. _api_nn_polynomial_decay: + +polynomial_decay +------------------------------- +:doc_source: paddle.fluid.layers.polynomial_decay + + diff --git a/doc/fluid/api/nn/pool3d.rst b/doc/fluid/api/nn/pool3d.rst new file mode 100644 index 0000000000000000000000000000000000000000..efc4abfb951ac7d79550d86adec725bb888aceaa --- /dev/null +++ b/doc/fluid/api/nn/pool3d.rst @@ -0,0 +1,7 @@ +.. _api_nn_pool3d: + +pool3d +------------------------------- +:doc_source: paddle.fluid.layers.pool3d + + diff --git a/doc/fluid/api/nn/prelu.rst b/doc/fluid/api/nn/prelu.rst new file mode 100644 index 0000000000000000000000000000000000000000..9de04524eca18ea1b1ad3d71676bae0a5c64a273 --- /dev/null +++ b/doc/fluid/api/nn/prelu.rst @@ -0,0 +1,7 @@ +.. _api_nn_prelu: + +prelu +------------------------------- + +.. autofunction:: paddle.nn.functional.prelu + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/nn/prior_box.rst b/doc/fluid/api/nn/prior_box.rst new file mode 100644 index 0000000000000000000000000000000000000000..774f0d16865392229eb2f3cb71dee466ddf6109f --- /dev/null +++ b/doc/fluid/api/nn/prior_box.rst @@ -0,0 +1,7 @@ +.. _api_nn_prior_box: + +prior_box +------------------------------- +:doc_source: paddle.fluid.layers.prior_box + + diff --git a/doc/fluid/api/nn/prroi_pool.rst b/doc/fluid/api/nn/prroi_pool.rst new file mode 100644 index 0000000000000000000000000000000000000000..b65c861e3fc21ea59995f4c7e30c820aa44f23ae --- /dev/null +++ b/doc/fluid/api/nn/prroi_pool.rst @@ -0,0 +1,7 @@ +.. _api_nn_prroi_pool: + +prroi_pool +------------------------------- +:doc_source: paddle.fluid.layers.prroi_pool + + diff --git a/doc/fluid/api/nn/psroi_pool.rst b/doc/fluid/api/nn/psroi_pool.rst new file mode 100644 index 0000000000000000000000000000000000000000..57dbd9aff8936a83f4cce749b8819492c2acdc0c --- /dev/null +++ b/doc/fluid/api/nn/psroi_pool.rst @@ -0,0 +1,7 @@ +.. _api_nn_psroi_pool: + +psroi_pool +------------------------------- +:doc_source: paddle.fluid.layers.psroi_pool + + diff --git a/doc/fluid/api/nn/random_crop.rst b/doc/fluid/api/nn/random_crop.rst new file mode 100644 index 0000000000000000000000000000000000000000..6a20767bd9dc3d7be494f890e4109be41e7d7f8f --- /dev/null +++ b/doc/fluid/api/nn/random_crop.rst @@ -0,0 +1,7 @@ +.. _api_nn_random_crop: + +random_crop +------------------------------- +:doc_source: paddle.fluid.layers.random_crop + + diff --git a/doc/fluid/api/nn/rank_loss.rst b/doc/fluid/api/nn/rank_loss.rst new file mode 100644 index 0000000000000000000000000000000000000000..67bbd6cf8732fee639da0e582ca57a3ed8050616 --- /dev/null +++ b/doc/fluid/api/nn/rank_loss.rst @@ -0,0 +1,7 @@ +.. _api_nn_rank_loss: + +rank_loss +------------------------------- +:doc_source: paddle.fluid.layers.rank_loss + + diff --git a/doc/fluid/api/nn/relu.rst b/doc/fluid/api/nn/relu.rst new file mode 100644 index 0000000000000000000000000000000000000000..b186a2b174d77ad0fc635d169fa065b659358c99 --- /dev/null +++ b/doc/fluid/api/nn/relu.rst @@ -0,0 +1,7 @@ +.. _api_nn_ReLU: + +ReLU +------------------------------- +:doc_source: paddle.fluid.layers.relu + + diff --git a/doc/fluid/api/nn/relu6.rst b/doc/fluid/api/nn/relu6.rst new file mode 100644 index 0000000000000000000000000000000000000000..1346795b4166c57470ea65a96e36e08e9e5b879d --- /dev/null +++ b/doc/fluid/api/nn/relu6.rst @@ -0,0 +1,9 @@ +.. _api_nn_relu6: + +relu6 +---------- + +.. autofunction:: paddle.nn.functional.relu6 + :noindex: + + diff --git a/doc/fluid/api/nn/resize_bilinear.rst b/doc/fluid/api/nn/resize_bilinear.rst new file mode 100644 index 0000000000000000000000000000000000000000..1d07a29455d0d5480eaddf3a2e2bcfe7eb6897ad --- /dev/null +++ b/doc/fluid/api/nn/resize_bilinear.rst @@ -0,0 +1,7 @@ +.. _api_nn_resize_bilinear: + +resize_bilinear +------------------------------- +:doc_source: paddle.fluid.layers.resize_bilinear + + diff --git a/doc/fluid/api/nn/resize_nearest.rst b/doc/fluid/api/nn/resize_nearest.rst new file mode 100644 index 0000000000000000000000000000000000000000..ce415988779dac88cf8179da59913a0fc49a99fb --- /dev/null +++ b/doc/fluid/api/nn/resize_nearest.rst @@ -0,0 +1,7 @@ +.. _api_nn_resize_nearest: + +resize_nearest +------------------------------- +:doc_source: paddle.fluid.layers.resize_nearest + + diff --git a/doc/fluid/api/nn/resize_trilinear.rst b/doc/fluid/api/nn/resize_trilinear.rst new file mode 100644 index 0000000000000000000000000000000000000000..f138279c3a0ecb287e27fdb495786d8fe625c6a4 --- /dev/null +++ b/doc/fluid/api/nn/resize_trilinear.rst @@ -0,0 +1,7 @@ +.. _api_nn_resize_trilinear: + +resize_trilinear +------------------------------- +:doc_source: paddle.fluid.layers.resize_trilinear + + diff --git a/doc/fluid/api/nn/retinanet_detection_output.rst b/doc/fluid/api/nn/retinanet_detection_output.rst new file mode 100644 index 0000000000000000000000000000000000000000..7e87d6d6e2508ed207478d604358ad98e9a325d5 --- /dev/null +++ b/doc/fluid/api/nn/retinanet_detection_output.rst @@ -0,0 +1,7 @@ +.. _api_nn_retinanet_detection_output: + +retinanet_detection_output +------------------------------- +:doc_source: paddle.fluid.layers.retinanet_detection_output + + diff --git a/doc/fluid/api/nn/retinanet_target_assign.rst b/doc/fluid/api/nn/retinanet_target_assign.rst new file mode 100644 index 0000000000000000000000000000000000000000..35ddef7d38f4e59c7764004c0ae729865ee7cc8b --- /dev/null +++ b/doc/fluid/api/nn/retinanet_target_assign.rst @@ -0,0 +1,7 @@ +.. _api_nn_retinanet_target_assign: + +retinanet_target_assign +------------------------------- +:doc_source: paddle.fluid.layers.retinanet_target_assign + + diff --git a/doc/fluid/api/nn/roi_align.rst b/doc/fluid/api/nn/roi_align.rst new file mode 100644 index 0000000000000000000000000000000000000000..d8de66d0ca330b5c00502d6b31d685d769f42616 --- /dev/null +++ b/doc/fluid/api/nn/roi_align.rst @@ -0,0 +1,7 @@ +.. _api_nn_roi_align: + +roi_align +------------------------------- +:doc_source: paddle.fluid.layers.roi_align + + diff --git a/doc/fluid/api/nn/roi_perspective_transform.rst b/doc/fluid/api/nn/roi_perspective_transform.rst new file mode 100644 index 0000000000000000000000000000000000000000..a8a1f3b6456f806822f818dbb6e8dfa890edd17b --- /dev/null +++ b/doc/fluid/api/nn/roi_perspective_transform.rst @@ -0,0 +1,7 @@ +.. _api_nn_roi_perspective_transform: + +roi_perspective_transform +------------------------------- +:doc_source: paddle.fluid.layers.roi_perspective_transform + + diff --git a/doc/fluid/api/nn/roi_pool.rst b/doc/fluid/api/nn/roi_pool.rst new file mode 100644 index 0000000000000000000000000000000000000000..c5df77edae18d5abfae0683e1706b6f86a12b18e --- /dev/null +++ b/doc/fluid/api/nn/roi_pool.rst @@ -0,0 +1,7 @@ +.. _api_nn_roi_pool: + +roi_pool +------------------------------- +:doc_source: paddle.fluid.layers.roi_pool + + diff --git a/doc/fluid/api/nn/row_conv.rst b/doc/fluid/api/nn/row_conv.rst new file mode 100644 index 0000000000000000000000000000000000000000..93eb2dc4dd89a038653e9cba728cff085e713e70 --- /dev/null +++ b/doc/fluid/api/nn/row_conv.rst @@ -0,0 +1,7 @@ +.. _api_nn_row_conv: + +row_conv +------------------------------- +:doc_source: paddle.fluid.layers.row_conv + + diff --git a/doc/fluid/api/nn/rpn_target_assign.rst b/doc/fluid/api/nn/rpn_target_assign.rst new file mode 100644 index 0000000000000000000000000000000000000000..91c2213e7bed2b094f64fce29d51720b97304e19 --- /dev/null +++ b/doc/fluid/api/nn/rpn_target_assign.rst @@ -0,0 +1,7 @@ +.. _api_nn_rpn_target_assign: + +rpn_target_assign +------------------------------- +:doc_source: paddle.fluid.layers.rpn_target_assign + + diff --git a/doc/fluid/api/nn/sampled_softmax_with_cross_entropy.rst b/doc/fluid/api/nn/sampled_softmax_with_cross_entropy.rst new file mode 100644 index 0000000000000000000000000000000000000000..a9619939163ac5bc1ed64e24d197975bc81ce28f --- /dev/null +++ b/doc/fluid/api/nn/sampled_softmax_with_cross_entropy.rst @@ -0,0 +1,7 @@ +.. _api_nn_sampled_softmax_with_cross_entropy: + +sampled_softmax_with_cross_entropy +------------------------------- +:doc_source: paddle.fluid.layers.sampled_softmax_with_cross_entropy + + diff --git a/doc/fluid/api/nn/selu.rst b/doc/fluid/api/nn/selu.rst new file mode 100644 index 0000000000000000000000000000000000000000..cce236e7c93fec18b2eba99ae55a107e4a556a39 --- /dev/null +++ b/doc/fluid/api/nn/selu.rst @@ -0,0 +1,9 @@ +.. _api_nn_selu: + +selu +---------- + +.. autofunction:: paddle.nn.functional.selu + :noindex: + + diff --git a/doc/fluid/api/nn/shuffle_channel.rst b/doc/fluid/api/nn/shuffle_channel.rst new file mode 100644 index 0000000000000000000000000000000000000000..746eadbd98b7974655993b4c871f1834e6b6a386 --- /dev/null +++ b/doc/fluid/api/nn/shuffle_channel.rst @@ -0,0 +1,7 @@ +.. _api_nn_shuffle_channel: + +shuffle_channel +------------------------------- +:doc_source: paddle.fluid.layers.shuffle_channel + + diff --git a/doc/fluid/api/nn/sigmoid_cross_entropy_with_logits.rst b/doc/fluid/api/nn/sigmoid_cross_entropy_with_logits.rst new file mode 100644 index 0000000000000000000000000000000000000000..a6b2fef68f11a716eef991c239d9c3eeb8701b1e --- /dev/null +++ b/doc/fluid/api/nn/sigmoid_cross_entropy_with_logits.rst @@ -0,0 +1,7 @@ +.. _api_nn_sigmoid_cross_entropy_with_logits: + +sigmoid_cross_entropy_with_logits +------------------------------- +:doc_source: paddle.fluid.layers.sigmoid_cross_entropy_with_logits + + diff --git a/doc/fluid/api/nn/sigmoid_focal_loss.rst b/doc/fluid/api/nn/sigmoid_focal_loss.rst new file mode 100644 index 0000000000000000000000000000000000000000..9ea53101ffe367a8233922049413ad78b8690ebc --- /dev/null +++ b/doc/fluid/api/nn/sigmoid_focal_loss.rst @@ -0,0 +1,7 @@ +.. _api_nn_sigmoid_focal_loss: + +sigmoid_focal_loss +------------------------------- +:doc_source: paddle.fluid.layers.sigmoid_focal_loss + + diff --git a/doc/fluid/api/nn/similarity_focus.rst b/doc/fluid/api/nn/similarity_focus.rst new file mode 100644 index 0000000000000000000000000000000000000000..7f3bacb72cfcb2ec7a33b5d38f8df8748212f62d --- /dev/null +++ b/doc/fluid/api/nn/similarity_focus.rst @@ -0,0 +1,7 @@ +.. _api_nn_similarity_focus: + +similarity_focus +------------------------------- +:doc_source: paddle.fluid.layers.similarity_focus + + diff --git a/doc/fluid/api/nn/smooth_l1.rst b/doc/fluid/api/nn/smooth_l1.rst new file mode 100644 index 0000000000000000000000000000000000000000..6329397aed97a52ae3b2adeb0b0f5456c2f925d5 --- /dev/null +++ b/doc/fluid/api/nn/smooth_l1.rst @@ -0,0 +1,7 @@ +.. _api_nn_smooth_l1: + +smooth_l1 +------------------------------- +:doc_source: paddle.fluid.layers.smooth_l1 + + diff --git a/doc/fluid/api/nn/soft_relu.rst b/doc/fluid/api/nn/soft_relu.rst new file mode 100644 index 0000000000000000000000000000000000000000..bcf0ad835e3054a58bc833f73f671a58be48e638 --- /dev/null +++ b/doc/fluid/api/nn/soft_relu.rst @@ -0,0 +1,7 @@ +.. _api_nn_soft_relu: + +soft_relu +------------------------------- +:doc_source: paddle.fluid.layers.soft_relu + + diff --git a/doc/fluid/api/nn/softmax.rst b/doc/fluid/api/nn/softmax.rst new file mode 100644 index 0000000000000000000000000000000000000000..f97889cdd33799cec1af362a0a22c47de6ba0feb --- /dev/null +++ b/doc/fluid/api/nn/softmax.rst @@ -0,0 +1,8 @@ +.. _api_nn_softmax: + +softmax +------------------------------- + +.. autofunction:: paddle.nn.functional.softmax + :noindex: + diff --git a/doc/fluid/api/nn/softmax_with_cross_entropy.rst b/doc/fluid/api/nn/softmax_with_cross_entropy.rst new file mode 100644 index 0000000000000000000000000000000000000000..fe70c8fd2ab487e9342336740b267b1b51edd729 --- /dev/null +++ b/doc/fluid/api/nn/softmax_with_cross_entropy.rst @@ -0,0 +1,7 @@ +.. _api_nn_softmax_with_cross_entropy: + +softmax_with_cross_entropy +------------------------------- +:doc_source: paddle.fluid.layers.softmax_with_cross_entropy + + diff --git a/doc/fluid/api/nn/softplus.rst b/doc/fluid/api/nn/softplus.rst new file mode 100644 index 0000000000000000000000000000000000000000..2c059b78c40d9ef5c7e7ce3fead8abda28e26d5f --- /dev/null +++ b/doc/fluid/api/nn/softplus.rst @@ -0,0 +1,9 @@ +.. _api_nn_softplus: + +softplus +---------- + +.. autofunction:: paddle.nn.functional.softplus + :noindex: + + diff --git a/doc/fluid/api/nn/softshrink.rst b/doc/fluid/api/nn/softshrink.rst new file mode 100644 index 0000000000000000000000000000000000000000..dce2b14edd22aa12df3eb6dc348a9d031d9de79d --- /dev/null +++ b/doc/fluid/api/nn/softshrink.rst @@ -0,0 +1,9 @@ +.. _api_nn_softshrink: + +softshrink +---------- + +.. autofunction:: paddle.nn.functional.softshrink + :noindex: + + diff --git a/doc/fluid/api/nn/softsign.rst b/doc/fluid/api/nn/softsign.rst new file mode 100644 index 0000000000000000000000000000000000000000..f2b85114794a14b3df7bab955bef7d7dd1bb06b3 --- /dev/null +++ b/doc/fluid/api/nn/softsign.rst @@ -0,0 +1,8 @@ +.. _api_nn_softsign: + +softsign +---------- + +.. autofunction:: paddle.nn.functional.softsign + :noindex: + diff --git a/doc/fluid/api/nn/space_to_depth.rst b/doc/fluid/api/nn/space_to_depth.rst new file mode 100644 index 0000000000000000000000000000000000000000..a68d52a828b6492b522381ce4d6490d2da93e18a --- /dev/null +++ b/doc/fluid/api/nn/space_to_depth.rst @@ -0,0 +1,7 @@ +.. _api_nn_space_to_depth: + +space_to_depth +------------------------------- +:doc_source: paddle.fluid.layers.space_to_depth + + diff --git a/doc/fluid/api/nn/square_error_cost.rst b/doc/fluid/api/nn/square_error_cost.rst new file mode 100644 index 0000000000000000000000000000000000000000..efaba14bd90383bf17ffca190d5f49cb4ac44e7f --- /dev/null +++ b/doc/fluid/api/nn/square_error_cost.rst @@ -0,0 +1,7 @@ +.. _api_nn_square_error_cost: + +square_error_cost +------------------------------- +:doc_source: paddle.fluid.layers.square_error_cost + + diff --git a/doc/fluid/api/nn/ssd_loss.rst b/doc/fluid/api/nn/ssd_loss.rst new file mode 100644 index 0000000000000000000000000000000000000000..c2eaab6fabf69930b9a761a059412e59ce135f6c --- /dev/null +++ b/doc/fluid/api/nn/ssd_loss.rst @@ -0,0 +1,7 @@ +.. _api_nn_ssd_loss: + +ssd_loss +------------------------------- +:doc_source: paddle.fluid.layers.ssd_loss + + diff --git a/doc/fluid/api/nn/swish.rst b/doc/fluid/api/nn/swish.rst new file mode 100644 index 0000000000000000000000000000000000000000..d5c06008d5085e24b99cdbeb4ffb32b3daf23169 --- /dev/null +++ b/doc/fluid/api/nn/swish.rst @@ -0,0 +1,7 @@ +.. _api_nn_swish: + +swish +------------------------------- +:doc_source: paddle.fluid.layers.swish + + diff --git a/doc/fluid/api/nn/switch_case.rst b/doc/fluid/api/nn/switch_case.rst new file mode 100644 index 0000000000000000000000000000000000000000..a5e260a6a26b7b78b0c4b1058ebb7dd8e8221650 --- /dev/null +++ b/doc/fluid/api/nn/switch_case.rst @@ -0,0 +1,7 @@ +.. _api_nn_switch_case: + +switch_case +------------------------------- +:doc_source: paddle.fluid.layers.switch_case + + diff --git a/doc/fluid/api/nn/tanhshrink.rst b/doc/fluid/api/nn/tanhshrink.rst new file mode 100644 index 0000000000000000000000000000000000000000..88160bcc6098f5566f86a4128a4f8a09e79384cc --- /dev/null +++ b/doc/fluid/api/nn/tanhshrink.rst @@ -0,0 +1,9 @@ +.. _api_nn_tanhshrink: + +tanhshrink +----------- + +.. autofunction:: paddle.nn.functional.tanhshrink + :noindex: + + diff --git a/doc/fluid/api/nn/target_assign.rst b/doc/fluid/api/nn/target_assign.rst new file mode 100644 index 0000000000000000000000000000000000000000..0a1337fafa87f830fa70458cafdad5fea62274df --- /dev/null +++ b/doc/fluid/api/nn/target_assign.rst @@ -0,0 +1,7 @@ +.. _api_nn_target_assign: + +target_assign +------------------------------- +:doc_source: paddle.fluid.layers.target_assign + + diff --git a/doc/fluid/api/nn/teacher_student_sigmoid_loss.rst b/doc/fluid/api/nn/teacher_student_sigmoid_loss.rst new file mode 100644 index 0000000000000000000000000000000000000000..d31dbd96f7cb4abb70d94e9dde8927c6e7cea3d3 --- /dev/null +++ b/doc/fluid/api/nn/teacher_student_sigmoid_loss.rst @@ -0,0 +1,7 @@ +.. _api_nn_teacher_student_sigmoid_loss: + +teacher_student_sigmoid_loss +------------------------------- +:doc_source: paddle.fluid.layers.teacher_student_sigmoid_loss + + diff --git a/doc/fluid/api/nn/temporal_shift.rst b/doc/fluid/api/nn/temporal_shift.rst new file mode 100644 index 0000000000000000000000000000000000000000..85eb76bad6ad68e0267f9a77b8d893040133ee6b --- /dev/null +++ b/doc/fluid/api/nn/temporal_shift.rst @@ -0,0 +1,7 @@ +.. _api_nn_temporal_shift: + +temporal_shift +------------------------------- +:doc_source: paddle.fluid.layers.temporal_shift + + diff --git a/doc/fluid/api/nn/thresholded_relu.rst b/doc/fluid/api/nn/thresholded_relu.rst new file mode 100644 index 0000000000000000000000000000000000000000..b35c9d260c25f4b47d487e0d0b6eec1e43540937 --- /dev/null +++ b/doc/fluid/api/nn/thresholded_relu.rst @@ -0,0 +1,7 @@ +.. _api_nn_thresholded_relu: + +thresholded_relu +------------------------------- +:doc_source: paddle.fluid.layers.thresholded_relu + + diff --git a/doc/fluid/api/nn/unfold.rst b/doc/fluid/api/nn/unfold.rst new file mode 100644 index 0000000000000000000000000000000000000000..9427a16eeda20d5c68542257f8ca08d6c8ac2f3a --- /dev/null +++ b/doc/fluid/api/nn/unfold.rst @@ -0,0 +1,7 @@ +.. _api_nn_unfold: + +unfold +------------------------------- +:doc_source: paddle.fluid.layers.unfold + + diff --git a/doc/fluid/api/nn/warpctc.rst b/doc/fluid/api/nn/warpctc.rst new file mode 100644 index 0000000000000000000000000000000000000000..a7d791d58a4e985f620be9b43d2134422be9f8c8 --- /dev/null +++ b/doc/fluid/api/nn/warpctc.rst @@ -0,0 +1,7 @@ +.. _api_nn_warpctc: + +warpctc +------------------------------- +:doc_source: paddle.fluid.layers.warpctc + + diff --git a/doc/fluid/api/nn/while_loop.rst b/doc/fluid/api/nn/while_loop.rst new file mode 100644 index 0000000000000000000000000000000000000000..9083e5c9184fb2b73462ccccac0c3ec74408ca1e --- /dev/null +++ b/doc/fluid/api/nn/while_loop.rst @@ -0,0 +1,7 @@ +.. _api_nn_while_loop: + +while_loop +------------------------------- +:doc_source: paddle.fluid.layers.while_loop + + diff --git a/doc/fluid/api/nn/yolo_box.rst b/doc/fluid/api/nn/yolo_box.rst new file mode 100644 index 0000000000000000000000000000000000000000..dc6394dc027fea0a9b66172a221f921b2727c7a2 --- /dev/null +++ b/doc/fluid/api/nn/yolo_box.rst @@ -0,0 +1,7 @@ +.. _api_nn_yolo_box: + +yolo_box +------------------------------- +:doc_source: paddle.fluid.layers.yolo_box + + diff --git a/doc/fluid/api/nn/yolov3_loss.rst b/doc/fluid/api/nn/yolov3_loss.rst new file mode 100644 index 0000000000000000000000000000000000000000..6b47f3c3b640725c35fcd34b6333782fdfb8a0a0 --- /dev/null +++ b/doc/fluid/api/nn/yolov3_loss.rst @@ -0,0 +1,7 @@ +.. _api_nn_yolov3_loss: + +yolov3_loss +------------------------------- +:doc_source: paddle.fluid.layers.yolov3_loss + + diff --git a/doc/fluid/api/optimizer.rst b/doc/fluid/api/optimizer.rst index 6f0377a58632782c3f12ccfbad658224286af3b5..b71f55f55e9791c4d0b5790449965aa6f4d4098e 100644 --- a/doc/fluid/api/optimizer.rst +++ b/doc/fluid/api/optimizer.rst @@ -1,6 +1,6 @@ -=============== -fluid.optimizer -=============== +====================== +paddle.optimizer +====================== .. toctree:: :maxdepth: 1 @@ -11,8 +11,7 @@ fluid.optimizer optimizer/AdagradOptimizer.rst optimizer/Adam.rst optimizer/Adamax.rst - optimizer/AdamaxOptimizer.rst - optimizer/AdamOptimizer.rst + optimizer/AdamW.rst optimizer/DecayedAdagrad.rst optimizer/DecayedAdagradOptimizer.rst optimizer/DGCMomentumOptimizer.rst @@ -28,8 +27,20 @@ fluid.optimizer optimizer/ModelAverage.rst optimizer/Momentum.rst optimizer/MomentumOptimizer.rst - optimizer/PipelineOptimizer.rst optimizer/RecomputeOptimizer.rst - optimizer/RMSPropOptimizer.rst + optimizer/RMSProp.rst optimizer/SGD.rst optimizer/SGDOptimizer.rst + optimizer/Optimizer.rst + optimizer/NoamLR.rst + optimizer/PiecewiseLR.rst + optimizer/NaturalExpLR.rst + optimizer/InverseTimeLR.rst + optimizer/PolynomialLR.rst + optimizer/LinearLrWarmup.rst + optimizer/ExponentialLR.rst + optimizer/MultiStepLR.rst + optimizer/StepLR.rst + optimizer/LambdaLR.rst + optimizer/ReduceLROnPlateau.rst + optimizer/CosineAnnealingLR.rst diff --git a/doc/fluid/api/optimizer/Adadelta.rst b/doc/fluid/api/optimizer/Adadelta.rst index cba6c6fc6f6f5743c883b9be706e7ded8355531a..3d4d505c5a37304dfb9a1827a07900f392b06eb3 100644 --- a/doc/fluid/api/optimizer/Adadelta.rst +++ b/doc/fluid/api/optimizer/Adadelta.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_Adadelta: +.. _api_optimizer_Adadelta: Adadelta -------- -.. autoclass:: paddle.fluid.optimizer.Adadelta +.. autoclass:: paddle.optimizer.Adadelta :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/AdadeltaOptimizer.rst b/doc/fluid/api/optimizer/AdadeltaOptimizer.rst index 14692902f21bafcac34ca0460de71576f47cfb5b..160a64b4d2d9bc0496c857e42c96295650ebef89 100644 --- a/doc/fluid/api/optimizer/AdadeltaOptimizer.rst +++ b/doc/fluid/api/optimizer/AdadeltaOptimizer.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_AdadeltaOptimizer: +.. _api_optimizer_AdadeltaOptimizer: AdadeltaOptimizer ----------------- -.. autoclass:: paddle.fluid.optimizer.AdadeltaOptimizer +.. autoclass:: paddle.optimizer.AdadeltaOptimizer :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/Adagrad.rst b/doc/fluid/api/optimizer/Adagrad.rst index b955fa9f7df88b14eb749695f9af6e4c5e704f19..deef2879fbe595a7b8f81980e729c35f9d5d4209 100644 --- a/doc/fluid/api/optimizer/Adagrad.rst +++ b/doc/fluid/api/optimizer/Adagrad.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_Adagrad: +.. _api_optimizer_Adagrad: Adagrad ------- -.. autoclass:: paddle.fluid.optimizer.Adagrad +.. autoclass:: paddle.optimizer.Adagrad :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/AdagradOptimizer.rst b/doc/fluid/api/optimizer/AdagradOptimizer.rst index e52a9de102f86f386ceb38c65325e7ac53c34982..e125000739edf62aedfca8f40e3782730f1ca0c9 100644 --- a/doc/fluid/api/optimizer/AdagradOptimizer.rst +++ b/doc/fluid/api/optimizer/AdagradOptimizer.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_AdagradOptimizer: +.. _api_optimizer_AdagradOptimizer: AdagradOptimizer ---------------- -.. autoclass:: paddle.fluid.optimizer.AdagradOptimizer +.. autoclass:: paddle.optimizer.AdagradOptimizer :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/Adam.rst b/doc/fluid/api/optimizer/Adam.rst index 4c7f6fd062f5b25d7202554eab1dc01546cebc14..f9bbe07dc855d9610e8c85892bd188b91cbd6aa4 100644 --- a/doc/fluid/api/optimizer/Adam.rst +++ b/doc/fluid/api/optimizer/Adam.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_Adam: +.. _api_optimizer_Adam: Adam ---- -.. autoclass:: paddle.fluid.optimizer.Adam +.. autoclass:: paddle.optimizer.Adam :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/AdamOptimizer.rst b/doc/fluid/api/optimizer/AdamOptimizer.rst deleted file mode 100644 index 9a966f54c29bbef6153c099e373797deb3ae8995..0000000000000000000000000000000000000000 --- a/doc/fluid/api/optimizer/AdamOptimizer.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` - !DO NOT EDIT THIS FILE MANUALLY! - -.. _api_fluid_optimizer_AdamOptimizer: - -AdamOptimizer -------------- - -.. autoclass:: paddle.fluid.optimizer.AdamOptimizer - :members: - :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load - :noindex: - diff --git a/doc/fluid/api/optimizer/AdamW.rst b/doc/fluid/api/optimizer/AdamW.rst new file mode 100644 index 0000000000000000000000000000000000000000..c76eb48cdf1bcd7f4167d02cead45a0b7434ddae --- /dev/null +++ b/doc/fluid/api/optimizer/AdamW.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_optimizer_AdamW: + +AdamW +----- + +.. autoclass:: paddle.optimizer.AdamW + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/optimizer/Adamax.rst b/doc/fluid/api/optimizer/Adamax.rst index 7f0ed4935516d67a9944ad633219ccc1fdcd4753..36fb8509f0b596dca21a1c2fcc6b12c1e3b77fe0 100644 --- a/doc/fluid/api/optimizer/Adamax.rst +++ b/doc/fluid/api/optimizer/Adamax.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_Adamax: +.. _api_optimizer_Adamax: Adamax ------ -.. autoclass:: paddle.fluid.optimizer.Adamax +.. autoclass:: paddle.optimizer.Adamax :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/AdamaxOptimizer.rst b/doc/fluid/api/optimizer/AdamaxOptimizer.rst deleted file mode 100644 index b27b7aab44cb76f611ffa2e5512dd5e98597a602..0000000000000000000000000000000000000000 --- a/doc/fluid/api/optimizer/AdamaxOptimizer.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` - !DO NOT EDIT THIS FILE MANUALLY! - -.. _api_fluid_optimizer_AdamaxOptimizer: - -AdamaxOptimizer ---------------- - -.. autoclass:: paddle.fluid.optimizer.AdamaxOptimizer - :members: - :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load - :noindex: - diff --git a/doc/fluid/api/optimizer/CosineAnnealingLR.rst b/doc/fluid/api/optimizer/CosineAnnealingLR.rst new file mode 100644 index 0000000000000000000000000000000000000000..508ed824d4c8519e1096dc4a4442378b2f4b784f --- /dev/null +++ b/doc/fluid/api/optimizer/CosineAnnealingLR.rst @@ -0,0 +1,14 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_optimizer_CosineAnnealingLR: + +CosineAnnealingLR +------------------- + +.. autoclass:: paddle.optimizer.CosineAnnealingLR + :members: + :inherited-members: + :exclude-members: set_dict, set_state_dict, state_dict + :noindex: + diff --git a/doc/fluid/api/optimizer/DGCMomentumOptimizer.rst b/doc/fluid/api/optimizer/DGCMomentumOptimizer.rst index 2305e30ef77ec39ab0341d8d940fdcc447d0f4f6..aa7a3517c38066965084b2bc990621b7f494a008 100644 --- a/doc/fluid/api/optimizer/DGCMomentumOptimizer.rst +++ b/doc/fluid/api/optimizer/DGCMomentumOptimizer.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_DGCMomentumOptimizer: +.. _api_optimizer_DGCMomentumOptimizer: DGCMomentumOptimizer -------------------- -.. autoclass:: paddle.fluid.optimizer.DGCMomentumOptimizer +.. autoclass:: paddle.optimizer.DGCMomentumOptimizer :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/DecayedAdagrad.rst b/doc/fluid/api/optimizer/DecayedAdagrad.rst index f2b37dda5cf323328d34005e49b36ccfa2436051..e3f1c574d8b236ed6e5883030b2793f6056ce996 100644 --- a/doc/fluid/api/optimizer/DecayedAdagrad.rst +++ b/doc/fluid/api/optimizer/DecayedAdagrad.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_DecayedAdagrad: +.. _api_optimizer_DecayedAdagrad: DecayedAdagrad -------------- -.. autoclass:: paddle.fluid.optimizer.DecayedAdagrad +.. autoclass:: paddle.optimizer.DecayedAdagrad :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/DecayedAdagradOptimizer.rst b/doc/fluid/api/optimizer/DecayedAdagradOptimizer.rst index f0aa277010fcbbc11642be3f6f50f97e1c5a7197..cf0d4452bec9dfa944cd1e8cf365ae899e10cf1c 100644 --- a/doc/fluid/api/optimizer/DecayedAdagradOptimizer.rst +++ b/doc/fluid/api/optimizer/DecayedAdagradOptimizer.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_DecayedAdagradOptimizer: +.. _api_optimizer_DecayedAdagradOptimizer: DecayedAdagradOptimizer ----------------------- -.. autoclass:: paddle.fluid.optimizer.DecayedAdagradOptimizer +.. autoclass:: paddle.optimizer.DecayedAdagradOptimizer :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/Dpsgd.rst b/doc/fluid/api/optimizer/Dpsgd.rst index 161606af2c53bb8766141f0a703f2af312ccd55b..f8fbfbf653ab191facfd0b9ca032bc334daa84b6 100644 --- a/doc/fluid/api/optimizer/Dpsgd.rst +++ b/doc/fluid/api/optimizer/Dpsgd.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_Dpsgd: +.. _api_optimizer_Dpsgd: Dpsgd ----- -.. autoclass:: paddle.fluid.optimizer.Dpsgd +.. autoclass:: paddle.optimizer.Dpsgd :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/DpsgdOptimizer.rst b/doc/fluid/api/optimizer/DpsgdOptimizer.rst index d2462515a648507e296f7dd31da44c7aaeb3633f..cbb1b8a92780f48665247e8627429914b7f92740 100644 --- a/doc/fluid/api/optimizer/DpsgdOptimizer.rst +++ b/doc/fluid/api/optimizer/DpsgdOptimizer.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_DpsgdOptimizer: +.. _api_optimizer_DpsgdOptimizer: DpsgdOptimizer -------------- -.. autoclass:: paddle.fluid.optimizer.DpsgdOptimizer +.. autoclass:: paddle.optimizer.DpsgdOptimizer :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/ExponentialLR.rst b/doc/fluid/api/optimizer/ExponentialLR.rst new file mode 100644 index 0000000000000000000000000000000000000000..5a22fb820347b0bf30b2eb25ab9cace7230e60f1 --- /dev/null +++ b/doc/fluid/api/optimizer/ExponentialLR.rst @@ -0,0 +1,14 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_optimizer_ExponentialLR: + +ExponentialLR +------------------- + +.. autoclass:: paddle.optimizer.ExponentialLR + :members: + :inherited-members: + :exclude-members: set_dict, set_state_dict, state_dict + :noindex: + diff --git a/doc/fluid/api/optimizer/ExponentialMovingAverage.rst b/doc/fluid/api/optimizer/ExponentialMovingAverage.rst index 41f2b39ae95e9cd4a2598863e13ad455d7bb81dc..173608910d27c20f582bfaae90316214d994867a 100644 --- a/doc/fluid/api/optimizer/ExponentialMovingAverage.rst +++ b/doc/fluid/api/optimizer/ExponentialMovingAverage.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_ExponentialMovingAverage: +.. _api_optimizer_ExponentialMovingAverage: ExponentialMovingAverage ------------------------ -.. autoclass:: paddle.fluid.optimizer.ExponentialMovingAverage +.. autoclass:: paddle.optimizer.ExponentialMovingAverage :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/Ftrl.rst b/doc/fluid/api/optimizer/Ftrl.rst index f8bcb617dbf81a51ac2c35b7f7bdd2f50b90c39e..85a5ab6eee34296f7546073cbadb9ac5ea6044eb 100644 --- a/doc/fluid/api/optimizer/Ftrl.rst +++ b/doc/fluid/api/optimizer/Ftrl.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_Ftrl: +.. _api_optimizer_Ftrl: Ftrl ---- -.. autoclass:: paddle.fluid.optimizer.Ftrl +.. autoclass:: paddle.optimizer.Ftrl :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/FtrlOptimizer.rst b/doc/fluid/api/optimizer/FtrlOptimizer.rst index 3875801869f6e2dd70b54a222f047fe4339fb27f..fcbbcc52eafc471cb3604b9183e71d34210c7667 100644 --- a/doc/fluid/api/optimizer/FtrlOptimizer.rst +++ b/doc/fluid/api/optimizer/FtrlOptimizer.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_FtrlOptimizer: +.. _api_optimizer_FtrlOptimizer: FtrlOptimizer ------------- -.. autoclass:: paddle.fluid.optimizer.FtrlOptimizer +.. autoclass:: paddle.optimizer.FtrlOptimizer :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/InverseTimeLR.rst b/doc/fluid/api/optimizer/InverseTimeLR.rst new file mode 100644 index 0000000000000000000000000000000000000000..34fa8859a463d05ccf15161ae974c4cd598df4d2 --- /dev/null +++ b/doc/fluid/api/optimizer/InverseTimeLR.rst @@ -0,0 +1,14 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_optimizer_InverseTimeLR: + +InverseTimeLR +------------------- + +.. autoclass:: paddle.optimizer.InverseTimeLR + :members: + :inherited-members: + :exclude-members: set_dict, set_state_dict, state_dict + :noindex: + diff --git a/doc/fluid/api/optimizer/LambOptimizer.rst b/doc/fluid/api/optimizer/LambOptimizer.rst index db8fc7153c7bc766f03faa7d758ed080e2ea2ca8..f661af2276be2f2b406847fba6cc6043bbd5f0d6 100644 --- a/doc/fluid/api/optimizer/LambOptimizer.rst +++ b/doc/fluid/api/optimizer/LambOptimizer.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_LambOptimizer: +.. _api_optimizer_LambOptimizer: LambOptimizer ------------- -.. autoclass:: paddle.fluid.optimizer.LambOptimizer +.. autoclass:: paddle.optimizer.LambOptimizer :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/LambdaLR.rst b/doc/fluid/api/optimizer/LambdaLR.rst new file mode 100644 index 0000000000000000000000000000000000000000..d37bd5019e211d47c1a686d9e2af49c7f1ad684e --- /dev/null +++ b/doc/fluid/api/optimizer/LambdaLR.rst @@ -0,0 +1,14 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_optimizer_LambdaLR: + +LambdaLR +------------------- + +.. autoclass:: paddle.optimizer.LambdaLR + :members: + :inherited-members: + :exclude-members: set_dict, set_state_dict, state_dict + :noindex: + diff --git a/doc/fluid/api/optimizer/LarsMomentum.rst b/doc/fluid/api/optimizer/LarsMomentum.rst index 396f93416f830b286eb6c0f93f799523d651b2dd..199afcd78c62b1987ed06eaab71eae3542fac303 100644 --- a/doc/fluid/api/optimizer/LarsMomentum.rst +++ b/doc/fluid/api/optimizer/LarsMomentum.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_LarsMomentum: +.. _api_optimizer_LarsMomentum: LarsMomentum ------------ -.. autoclass:: paddle.fluid.optimizer.LarsMomentum +.. autoclass:: paddle.optimizer.LarsMomentum :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/LarsMomentumOptimizer.rst b/doc/fluid/api/optimizer/LarsMomentumOptimizer.rst index daf1631c128d85fe9d2807694261ea740e900f8d..a19d0025d49100b912165b840361b093baad61d9 100644 --- a/doc/fluid/api/optimizer/LarsMomentumOptimizer.rst +++ b/doc/fluid/api/optimizer/LarsMomentumOptimizer.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_LarsMomentumOptimizer: +.. _api_optimizer_LarsMomentumOptimizer: LarsMomentumOptimizer --------------------- -.. autoclass:: paddle.fluid.optimizer.LarsMomentumOptimizer +.. autoclass:: paddle.optimizer.LarsMomentumOptimizer :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/LinearLrWarmup.rst b/doc/fluid/api/optimizer/LinearLrWarmup.rst new file mode 100644 index 0000000000000000000000000000000000000000..0fa5ebf876247210f543b22e9813f9d6c3118c5f --- /dev/null +++ b/doc/fluid/api/optimizer/LinearLrWarmup.rst @@ -0,0 +1,14 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_optimizer_LinearLrWarmup: + +LinearLrWarmup +------------------- + +.. autoclass:: paddle.optimizer.LinearLrWarmup + :members: + :inherited-members: + :exclude-members: set_dict, set_state_dict, state_dict + :noindex: + diff --git a/doc/fluid/api/optimizer/LookaheadOptimizer.rst b/doc/fluid/api/optimizer/LookaheadOptimizer.rst index e87be3eefdf307189f380e13ba548855db88df7d..663b5662ceea49052e602dfac441717543960a53 100644 --- a/doc/fluid/api/optimizer/LookaheadOptimizer.rst +++ b/doc/fluid/api/optimizer/LookaheadOptimizer.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_LookaheadOptimizer: +.. _api_optimizer_LookaheadOptimizer: LookaheadOptimizer ------------------ -.. autoclass:: paddle.fluid.optimizer.LookaheadOptimizer +.. autoclass:: paddle.optimizer.LookaheadOptimizer :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/ModelAverage.rst b/doc/fluid/api/optimizer/ModelAverage.rst index 86e458ef704c18173b5586b5042768a037ba32d9..5a67fc1b195c15f9dabe32253b06f9c60017e96e 100644 --- a/doc/fluid/api/optimizer/ModelAverage.rst +++ b/doc/fluid/api/optimizer/ModelAverage.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_ModelAverage: +.. _api_optimizer_ModelAverage: ModelAverage ------------ -.. autoclass:: paddle.fluid.optimizer.ModelAverage +.. autoclass:: paddle.optimizer.ModelAverage :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/Momentum.rst b/doc/fluid/api/optimizer/Momentum.rst index 7b54d7cc1fcbb434c929cf27a86f577372cd1d91..f0ef70be9ee60de6bdc4de18d95594db18ad1eea 100644 --- a/doc/fluid/api/optimizer/Momentum.rst +++ b/doc/fluid/api/optimizer/Momentum.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_Momentum: +.. _api_optimizer_Momentum: Momentum -------- -.. autoclass:: paddle.fluid.optimizer.Momentum +.. autoclass:: paddle.optimizer.Momentum :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/MomentumOptimizer.rst b/doc/fluid/api/optimizer/MomentumOptimizer.rst index 1f0c6c632918b8ed7a32c8c16a1dad642ed2e64f..495c5fe91a06890424db93158407ddf28f5854d7 100644 --- a/doc/fluid/api/optimizer/MomentumOptimizer.rst +++ b/doc/fluid/api/optimizer/MomentumOptimizer.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_MomentumOptimizer: +.. _api_optimizer_MomentumOptimizer: MomentumOptimizer ----------------- -.. autoclass:: paddle.fluid.optimizer.MomentumOptimizer +.. autoclass:: paddle.optimizer.MomentumOptimizer :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/MultiStepLR.rst b/doc/fluid/api/optimizer/MultiStepLR.rst new file mode 100644 index 0000000000000000000000000000000000000000..b5ee3da3f3ec8b6a4af5308bba42f63afa2c6d75 --- /dev/null +++ b/doc/fluid/api/optimizer/MultiStepLR.rst @@ -0,0 +1,14 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_optimizer_MultiStepLR: + +MultiStepLR +------------------- + +.. autoclass:: paddle.optimizer.MultiStepLR + :members: + :inherited-members: + :exclude-members: set_dict, set_state_dict, state_dict + :noindex: + diff --git a/doc/fluid/api/optimizer/NaturalExpLR.rst b/doc/fluid/api/optimizer/NaturalExpLR.rst new file mode 100644 index 0000000000000000000000000000000000000000..2bb653c7d7e0364c1d77ddfa8460c29a5b7937b8 --- /dev/null +++ b/doc/fluid/api/optimizer/NaturalExpLR.rst @@ -0,0 +1,14 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_optimizer_NaturalExpLR: + +NaturalExpLR +------------------- + +.. autoclass:: paddle.optimizer.NaturalExpLR + :members: + :inherited-members: + :exclude-members: set_dict, set_state_dict, state_dict + :noindex: + diff --git a/doc/fluid/api/optimizer/NoamLR.rst b/doc/fluid/api/optimizer/NoamLR.rst new file mode 100644 index 0000000000000000000000000000000000000000..9abbe5997cd7970e280749e94afabc6aa99891d9 --- /dev/null +++ b/doc/fluid/api/optimizer/NoamLR.rst @@ -0,0 +1,14 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_optimizer_NoamLR: + +NoamLR +------------------- + +.. autoclass:: paddle.optimizer.NoamLR + :members: + :inherited-members: + :exclude-members: set_dict, set_state_dict, state_dict + :noindex: + diff --git a/doc/fluid/api/optimizer/Optimizer.rst b/doc/fluid/api/optimizer/Optimizer.rst new file mode 100644 index 0000000000000000000000000000000000000000..1ef98eab55c524d18c27a19f951c8d081c26894a --- /dev/null +++ b/doc/fluid/api/optimizer/Optimizer.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_optimizer_Optimizer: + +Optimizer +--------- + +.. autoclass:: paddle.optimizer.Optimizer + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/optimizer/PiecewiseLR.rst b/doc/fluid/api/optimizer/PiecewiseLR.rst new file mode 100644 index 0000000000000000000000000000000000000000..e9dea7704ca393f76ceb61cd2c0359f1505a09ee --- /dev/null +++ b/doc/fluid/api/optimizer/PiecewiseLR.rst @@ -0,0 +1,14 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_optimizer_PiecewiseLR: + +PiecewiseLR +------------------- + +.. autoclass:: paddle.optimizer.PiecewiseLR + :members: + :inherited-members: + :exclude-members: set_dict, set_state_dict, state_dict + :noindex: + diff --git a/doc/fluid/api/optimizer/PipelineOptimizer.rst b/doc/fluid/api/optimizer/PipelineOptimizer.rst deleted file mode 100644 index 87e6f4026d49f4db11dec390faf325082bb1fdbe..0000000000000000000000000000000000000000 --- a/doc/fluid/api/optimizer/PipelineOptimizer.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` - !DO NOT EDIT THIS FILE MANUALLY! - -.. _api_fluid_optimizer_PipelineOptimizer: - -PipelineOptimizer ------------------ - -.. autoclass:: paddle.fluid.optimizer.PipelineOptimizer - :members: - :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load - :noindex: - diff --git a/doc/fluid/api/optimizer/PolynomialLR.rst b/doc/fluid/api/optimizer/PolynomialLR.rst new file mode 100644 index 0000000000000000000000000000000000000000..5ce54b1517fb3407a6f4715d66afd2520809f3d8 --- /dev/null +++ b/doc/fluid/api/optimizer/PolynomialLR.rst @@ -0,0 +1,14 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_optimizer_PolynomialLR: + +PolynomialLR +------------------- + +.. autoclass:: paddle.optimizer.PolynomialLR + :members: + :inherited-members: + :exclude-members: set_dict, set_state_dict, state_dict + :noindex: + diff --git a/doc/fluid/api/optimizer/RMSProp.rst b/doc/fluid/api/optimizer/RMSProp.rst new file mode 100644 index 0000000000000000000000000000000000000000..903acc26a3316e312ea1145819aec347e57e7109 --- /dev/null +++ b/doc/fluid/api/optimizer/RMSProp.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_optimizer_RMSProp: + +RMSProp +------- + +.. autoclass:: paddle.optimizer.RMSProp + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/optimizer/RMSPropOptimizer.rst b/doc/fluid/api/optimizer/RMSPropOptimizer.rst deleted file mode 100644 index 237c4ea71e45039063acce502b9c0bd9800e9ffd..0000000000000000000000000000000000000000 --- a/doc/fluid/api/optimizer/RMSPropOptimizer.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` - !DO NOT EDIT THIS FILE MANUALLY! - -.. _api_fluid_optimizer_RMSPropOptimizer: - -RMSPropOptimizer ----------------- - -.. autoclass:: paddle.fluid.optimizer.RMSPropOptimizer - :members: - :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load - :noindex: - diff --git a/doc/fluid/api/optimizer/RecomputeOptimizer.rst b/doc/fluid/api/optimizer/RecomputeOptimizer.rst index 479037eebbb22fa8ea02951ed6e380d5753fb171..4891f58c146ef39aaec42adcd6667bba76052f8a 100644 --- a/doc/fluid/api/optimizer/RecomputeOptimizer.rst +++ b/doc/fluid/api/optimizer/RecomputeOptimizer.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_RecomputeOptimizer: +.. _api_optimizer_RecomputeOptimizer: RecomputeOptimizer ------------------ -.. autoclass:: paddle.fluid.optimizer.RecomputeOptimizer +.. autoclass:: paddle.optimizer.RecomputeOptimizer :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/ReduceLROnPlateau.rst b/doc/fluid/api/optimizer/ReduceLROnPlateau.rst new file mode 100644 index 0000000000000000000000000000000000000000..f31bfefddf714db6a7fdcacfb208014fbe791e1f --- /dev/null +++ b/doc/fluid/api/optimizer/ReduceLROnPlateau.rst @@ -0,0 +1,14 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_optimizer_ReduceLROnPlateau: + +ReduceLROnPlateau +------------------- + +.. autoclass:: paddle.optimizer.ReduceLROnPlateau + :members: + :inherited-members: + :exclude-members: set_dict, set_state_dict, state_dict + :noindex: + diff --git a/doc/fluid/api/optimizer/SGD.rst b/doc/fluid/api/optimizer/SGD.rst index fa18269ad7536cc5aeaed1dd8f80775b3cf87775..9af1bc8d60cd30df57fcbd1b8b1ac8a00073c6df 100644 --- a/doc/fluid/api/optimizer/SGD.rst +++ b/doc/fluid/api/optimizer/SGD.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_SGD: +.. _api_optimizer_SGD: SGD --- -.. autoclass:: paddle.fluid.optimizer.SGD +.. autoclass:: paddle.optimizer.SGD :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/SGDOptimizer.rst b/doc/fluid/api/optimizer/SGDOptimizer.rst index c6ec7ea6fd0d64b50676584c3b60733b274986fe..e36d63d41aa762daaf0dca943fb4de91de7bfc75 100644 --- a/doc/fluid/api/optimizer/SGDOptimizer.rst +++ b/doc/fluid/api/optimizer/SGDOptimizer.rst @@ -1,14 +1,13 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -.. _api_fluid_optimizer_SGDOptimizer: +.. _api_optimizer_SGDOptimizer: SGDOptimizer ------------ -.. autoclass:: paddle.fluid.optimizer.SGDOptimizer +.. autoclass:: paddle.optimizer.SGDOptimizer :members: :inherited-members: - :exclude-members: apply_gradients, apply_optimize, backward, load :noindex: diff --git a/doc/fluid/api/optimizer/StepLR.rst b/doc/fluid/api/optimizer/StepLR.rst new file mode 100644 index 0000000000000000000000000000000000000000..0524a33239072b4a6afee9803882e675879adb93 --- /dev/null +++ b/doc/fluid/api/optimizer/StepLR.rst @@ -0,0 +1,14 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_optimizer_StepLR: + +StepLR +------------------- + +.. autoclass:: paddle.optimizer.StepLR + :members: + :inherited-members: + :exclude-members: set_dict, set_state_dict, state_dict + :noindex: + diff --git a/doc/fluid/api/paddle.rst b/doc/fluid/api/paddle.rst new file mode 100644 index 0000000000000000000000000000000000000000..45d26734e0d95205876b1b586d1c24becbf522ce --- /dev/null +++ b/doc/fluid/api/paddle.rst @@ -0,0 +1,184 @@ +======================= +paddle +======================= + +.. toctree:: + :maxdepth: 1 + + paddle/abs.rst + paddle/acos.rst + paddle/add.rst + paddle/addcmul.rst + paddle/addmm.rst + paddle/allclose.rst + paddle/append_backward.rst + paddle/arange.rst + paddle/argmax.rst + paddle/argmin.rst + paddle/argsort.rst + paddle/asin.rst + paddle/atan.rst + paddle/bmm.rst + paddle/BuildStrategy.rst + paddle/cast.rst + paddle/ceil.rst + paddle/cholesky.rst + paddle/chunk.rst + paddle/clamp.rst + paddle/CompiledProgram.rst + paddle/concat.rst + paddle/cos.rst + paddle/CPUPlace.rst + paddle/create_global_var.rst + paddle/create_parameter.rst + paddle/create_tensor.rst + paddle/crop_tensor.rst + paddle/cross.rst + paddle/CUDAPinnedPlace.rst + paddle/CUDAPlace.rst + paddle/cumsum.rst + paddle/DataParallel.rst + paddle/default_main_program.rst + paddle/default_startup_program.rst + paddle/diag.rst + paddle/disable_imperative.rst + paddle/dist.rst + paddle/distribution.rst + paddle/div.rst + paddle/dot.rst + paddle/elementwise_add.rst + paddle/elementwise_div.rst + paddle/elementwise_floordiv.rst + paddle/elementwise_mod.rst + paddle/elementwise_mul.rst + paddle/elementwise_pow.rst + paddle/elementwise_sub.rst + paddle/elementwise_sum.rst + paddle/enable_imperative.rst + paddle/equal.rst + paddle/equal_all.rst + paddle/erf.rst + paddle/ExecutionStrategy.rst + paddle/Executor.rst + paddle/exp.rst + paddle/expand.rst + paddle/expand_as.rst + paddle/eye.rst + paddle/fill_constant.rst + paddle/flatten.rst + paddle/flip.rst + paddle/floor.rst + paddle/full.rst + paddle/full_like.rst + paddle/gather.rst + paddle/gather_nd.rst + paddle/global_scope.rst + paddle/gradients.rst + paddle/greater_equal.rst + paddle/greater_than.rst + paddle/has_inf.rst + paddle/has_nan.rst + paddle/in_imperative_mode.rst + paddle/increment.rst + paddle/index_sample.rst + paddle/index_select.rst + paddle/inverse.rst + paddle/is_empty.rst + paddle/isfinite.rst + paddle/kron.rst + paddle/less_equal.rst + paddle/less_than.rst + paddle/linspace.rst + paddle/load.rst + paddle/log.rst + paddle/log1p.rst + paddle/logical_and.rst + paddle/logical_not.rst + paddle/logical_or.rst + paddle/logical_xor.rst + paddle/logsumexp.rst + paddle/manual_seed.rst + paddle/masked_select.rst + paddle/matmul.rst + paddle/max.rst + paddle/maximum.rst + paddle/mean.rst + paddle/meshgrid.rst + paddle/min.rst + paddle/minimum.rst + paddle/mm.rst + paddle/mul.rst + paddle/multiplex.rst + paddle/name_scope.rst + paddle/nonzero.rst + paddle/norm.rst + paddle/not_equal.rst + paddle/ones.rst + paddle/ones_like.rst + paddle/numel.rst + paddle/ParallelExecutor.rst + paddle/ParamAttr.rst + paddle/pow.rst + paddle/Print.rst + paddle/Program.rst + paddle/program_guard.rst + paddle/py_func.rst + paddle/rand.rst + paddle/randint.rst + paddle/randn.rst + paddle/randperm.rst + paddle/rank.rst + paddle/reciprocal.rst + paddle/reduce_all.rst + paddle/reduce_any.rst + paddle/reduce_max.rst + paddle/reduce_mean.rst + paddle/reduce_min.rst + paddle/reduce_prod.rst + paddle/reduce_sum.rst + paddle/reshape.rst + paddle/reverse.rst + paddle/roll.rst + paddle/round.rst + paddle/rsqrt.rst + paddle/save.rst + paddle/scale.rst + paddle/scatter.rst + paddle/scatter_nd.rst + paddle/scatter_nd_add.rst + paddle/scope_guard.rst + paddle/shape.rst + paddle/shard_index.rst + paddle/shuffle.rst + paddle/sign.rst + paddle/sin.rst + paddle/slice.rst + paddle/sort.rst + paddle/split.rst + paddle/sqrt.rst + paddle/square.rst + paddle/squeeze.rst + paddle/stack.rst + paddle/stanh.rst + paddle/std.rst + paddle/strided_slice.rst + paddle/sum.rst + paddle/sums.rst + paddle/t.rst + paddle/tanh.rst + paddle/topk.rst + paddle/trace.rst + paddle/transpose.rst + paddle/tril.rst + paddle/triu.rst + paddle/unbind.rst + paddle/unique.rst + paddle/unique_with_counts.rst + paddle/unsqueeze.rst + paddle/unstack.rst + paddle/var.rst + paddle/Variable.rst + paddle/WeightNormParamAttr.rst + paddle/where.rst + paddle/zeros.rst + paddle/zeros_like.rst diff --git a/doc/fluid/api/paddle/BuildStrategy.rst b/doc/fluid/api/paddle/BuildStrategy.rst new file mode 100644 index 0000000000000000000000000000000000000000..44536026e28abfae9b44cbee7e8b3d534eef468e --- /dev/null +++ b/doc/fluid/api/paddle/BuildStrategy.rst @@ -0,0 +1,7 @@ +.. _api_paddle_BuildStrategy: + +BuildStrategy +------------------------------- +:doc_source: paddle.fluid.compiler.BuildStrategy + + diff --git a/doc/fluid/api/paddle/CPUPlace.rst b/doc/fluid/api/paddle/CPUPlace.rst new file mode 100644 index 0000000000000000000000000000000000000000..3b586e8f448782de776fca5a2501feb40b2f1748 --- /dev/null +++ b/doc/fluid/api/paddle/CPUPlace.rst @@ -0,0 +1,7 @@ +.. _api_paddle_CPUPlace: + +CPUPlace +------------------------------- +:doc_source: paddle.fluid.core.CPUPlace + + diff --git a/doc/fluid/api/paddle/CUDAPinnedPlace.rst b/doc/fluid/api/paddle/CUDAPinnedPlace.rst new file mode 100644 index 0000000000000000000000000000000000000000..2034660f2e7dd32a57319a3048446a63ab8fbd09 --- /dev/null +++ b/doc/fluid/api/paddle/CUDAPinnedPlace.rst @@ -0,0 +1,7 @@ +.. _api_paddle_CUDAPinnedPlace: + +CUDAPinnedPlace +------------------------------- +:doc_source: paddle.fluid.core.CUDAPinnedPlace + + diff --git a/doc/fluid/api/paddle/CUDAPlace.rst b/doc/fluid/api/paddle/CUDAPlace.rst new file mode 100644 index 0000000000000000000000000000000000000000..c0563bbe76c969f51379d25140a2a17bb7a4da08 --- /dev/null +++ b/doc/fluid/api/paddle/CUDAPlace.rst @@ -0,0 +1,7 @@ +.. _api_paddle_CUDAPlace: + +CUDAPlace +------------------------------- +:doc_source: paddle.fluid.core.CUDAPlace + + diff --git a/doc/fluid/api/paddle/CompiledProgram.rst b/doc/fluid/api/paddle/CompiledProgram.rst new file mode 100644 index 0000000000000000000000000000000000000000..f317bf12c45b29bc28025220a41e27b6383de854 --- /dev/null +++ b/doc/fluid/api/paddle/CompiledProgram.rst @@ -0,0 +1,7 @@ +.. _api_paddle_CompiledProgram: + +CompiledProgram +------------------------------- +:doc_source: paddle.fluid.compiler.CompiledProgram + + diff --git a/doc/fluid/api/paddle/DataParallel.rst b/doc/fluid/api/paddle/DataParallel.rst new file mode 100644 index 0000000000000000000000000000000000000000..0c79fbe9b16051b0201bb575a84f2bdb397ba1b1 --- /dev/null +++ b/doc/fluid/api/paddle/DataParallel.rst @@ -0,0 +1,7 @@ +.. _api_paddle_DataParallel: + +DataParallel +------------------------------- +:doc_source: paddle.fluid.dygraph.parallel.DataParallel + + diff --git a/doc/fluid/api/paddle/ExecutionStrategy.rst b/doc/fluid/api/paddle/ExecutionStrategy.rst new file mode 100644 index 0000000000000000000000000000000000000000..6df5ca375f2e26b5bd9d4fe999461c41be9ad315 --- /dev/null +++ b/doc/fluid/api/paddle/ExecutionStrategy.rst @@ -0,0 +1,7 @@ +.. _api_paddle_ExecutionStrategy: + +ExecutionStrategy +------------------------------- +:doc_source: paddle.fluid.ExecutionStrategy + + diff --git a/doc/fluid/api/paddle/Executor.rst b/doc/fluid/api/paddle/Executor.rst new file mode 100644 index 0000000000000000000000000000000000000000..9d47503fea22b5225c0deed4d51f1ca20bbd7f27 --- /dev/null +++ b/doc/fluid/api/paddle/Executor.rst @@ -0,0 +1,7 @@ +.. _api_paddle_Executor: + +Executor +------------------------------- +:doc_source: paddle.fluid.executor.Executor + + diff --git a/doc/fluid/api/paddle/ParallelExecutor.rst b/doc/fluid/api/paddle/ParallelExecutor.rst new file mode 100644 index 0000000000000000000000000000000000000000..de5797d80ffefb50230c88d816e8fc1d8a188a7d --- /dev/null +++ b/doc/fluid/api/paddle/ParallelExecutor.rst @@ -0,0 +1,7 @@ +.. _api_paddle_ParallelExecutor: + +ParallelExecutor +------------------------------- +:doc_source: paddle.fluid.parallel_executor.ParallelExecutor + + diff --git a/doc/fluid/api/paddle/ParamAttr.rst b/doc/fluid/api/paddle/ParamAttr.rst new file mode 100644 index 0000000000000000000000000000000000000000..ba9df2c1229b9e15c265343e02e17aaaf14efe2c --- /dev/null +++ b/doc/fluid/api/paddle/ParamAttr.rst @@ -0,0 +1,7 @@ +.. _api_paddle_ParamAttr: + +ParamAttr +------------------------------- +:doc_source: paddle.fluid.param_attr.ParamAttr + + diff --git a/doc/fluid/api/paddle/Print.rst b/doc/fluid/api/paddle/Print.rst new file mode 100644 index 0000000000000000000000000000000000000000..5c78554268bf667f5bccb044218b59e1594da2c7 --- /dev/null +++ b/doc/fluid/api/paddle/Print.rst @@ -0,0 +1,7 @@ +.. _api_paddle_Print: + +Print +------------------------------- +:doc_source: paddle.fluid.layers.control_flow.Print + + diff --git a/doc/fluid/api/paddle/Program.rst b/doc/fluid/api/paddle/Program.rst new file mode 100644 index 0000000000000000000000000000000000000000..2d96075437cfbe6c1702d13c84c71aac99800ab8 --- /dev/null +++ b/doc/fluid/api/paddle/Program.rst @@ -0,0 +1,7 @@ +.. _api_paddle_Program: + +Program +------------------------------- +:doc_source: paddle.fluid.framework.Program + + diff --git a/doc/fluid/api/paddle/Variable.rst b/doc/fluid/api/paddle/Variable.rst new file mode 100644 index 0000000000000000000000000000000000000000..05275dce7eebd7637284769478121e50cc43dcde --- /dev/null +++ b/doc/fluid/api/paddle/Variable.rst @@ -0,0 +1,7 @@ +.. _api_paddle_Variable: + +Variable +------------------------------- +:doc_source: paddle.fluid.framework.Variable + + diff --git a/doc/fluid/api/paddle/WeightNormParamAttr.rst b/doc/fluid/api/paddle/WeightNormParamAttr.rst new file mode 100644 index 0000000000000000000000000000000000000000..d8e844883d6f6231d36c6c74f45ecf4df7b9a1fb --- /dev/null +++ b/doc/fluid/api/paddle/WeightNormParamAttr.rst @@ -0,0 +1,7 @@ +.. _api_paddle_WeightNormParamAttr: + +WeightNormParamAttr +------------------------------- +:doc_source: paddle.fluid.param_attr.WeightNormParamAttr + + diff --git a/doc/fluid/api/paddle/abs.rst b/doc/fluid/api/paddle/abs.rst new file mode 100644 index 0000000000000000000000000000000000000000..01c1ea5aa6ccd3838b4892cba7097cdb3788c190 --- /dev/null +++ b/doc/fluid/api/paddle/abs.rst @@ -0,0 +1,5 @@ +.. _api_paddle_abs: + +abs +------------------------------- +:doc_source: paddle.fluid.layers.abs diff --git a/doc/fluid/api/paddle/acos.rst b/doc/fluid/api/paddle/acos.rst new file mode 100644 index 0000000000000000000000000000000000000000..3abda379fa7d81b7e93f68e12626fb31a26b5d75 --- /dev/null +++ b/doc/fluid/api/paddle/acos.rst @@ -0,0 +1,7 @@ +.. _api_paddle_acos: + +acos +------------------------------- +:doc_source: paddle.fluid.layers.acos + + diff --git a/doc/fluid/api/paddle/add.rst b/doc/fluid/api/paddle/add.rst new file mode 100644 index 0000000000000000000000000000000000000000..60c1446a007b13d9b94d0a5ea35db78289ac4497 --- /dev/null +++ b/doc/fluid/api/paddle/add.rst @@ -0,0 +1,7 @@ +.. _api_paddle_add: + +add +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_add + + diff --git a/doc/fluid/api/paddle/addcmul.rst b/doc/fluid/api/paddle/addcmul.rst new file mode 100644 index 0000000000000000000000000000000000000000..f6b547d99755172de97d4dfbde9c587bd7ad57b8 --- /dev/null +++ b/doc/fluid/api/paddle/addcmul.rst @@ -0,0 +1,7 @@ +.. _api_paddle_addcmul: + +addcmul +------------------------------- +:doc_source: paddle.tensor.addcmul + + diff --git a/doc/fluid/api/paddle/addmm.rst b/doc/fluid/api/paddle/addmm.rst new file mode 100644 index 0000000000000000000000000000000000000000..786c1b1f1134e8e38cb31d42884cc24b200c124a --- /dev/null +++ b/doc/fluid/api/paddle/addmm.rst @@ -0,0 +1,7 @@ +.. _api_paddle_addmm: + +addmm +------------------------------- +:doc_source: paddle.tensor.addmm + + diff --git a/doc/fluid/api/paddle/allclose.rst b/doc/fluid/api/paddle/allclose.rst new file mode 100644 index 0000000000000000000000000000000000000000..96041b974bd345d8838496f32c120ddf8346a3da --- /dev/null +++ b/doc/fluid/api/paddle/allclose.rst @@ -0,0 +1,7 @@ +.. _api_paddle_allclose: + +allclose +------------------------------- +:doc_source: paddle.tensor.allclose + + diff --git a/doc/fluid/api/paddle/append_backward.rst b/doc/fluid/api/paddle/append_backward.rst new file mode 100644 index 0000000000000000000000000000000000000000..608533d3c2b8efc7f71f7f1fe59be75f375100f3 --- /dev/null +++ b/doc/fluid/api/paddle/append_backward.rst @@ -0,0 +1,7 @@ +.. _api_paddle_append_backward: + +append_backward +------------------------------- +:doc_source: paddle.fluid.backward.append_backward + + diff --git a/doc/fluid/api/paddle/arange.rst b/doc/fluid/api/paddle/arange.rst new file mode 100644 index 0000000000000000000000000000000000000000..6c5d29a89c506bb01b8e4ff6b88899f7938b02f5 --- /dev/null +++ b/doc/fluid/api/paddle/arange.rst @@ -0,0 +1,7 @@ +.. _api_paddle_arange: + +arange +------------------------------- +:doc_source: paddle.fluid.layers.range + + diff --git a/doc/fluid/api/paddle/argmax.rst b/doc/fluid/api/paddle/argmax.rst new file mode 100644 index 0000000000000000000000000000000000000000..dc1665f9f199d4b288686710fa60d25eadd075c3 --- /dev/null +++ b/doc/fluid/api/paddle/argmax.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_paddle_argmax: + +argmax +------ + +.. autofunction:: paddle.tensor.search.argmax + :noindex: + diff --git a/doc/fluid/api/paddle/argmin.rst b/doc/fluid/api/paddle/argmin.rst new file mode 100644 index 0000000000000000000000000000000000000000..18c2334659a92457e4be4f739d94a5f8c633442e --- /dev/null +++ b/doc/fluid/api/paddle/argmin.rst @@ -0,0 +1,12 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_paddle_argmin: + +argmin +------ + +.. autofunction:: paddle.tensor.search.argmin + :noindex: + + diff --git a/doc/fluid/api/paddle/argsort.rst b/doc/fluid/api/paddle/argsort.rst new file mode 100644 index 0000000000000000000000000000000000000000..716f7e79312bcc0f83abff33bf3684b6a6b68500 --- /dev/null +++ b/doc/fluid/api/paddle/argsort.rst @@ -0,0 +1,7 @@ +.. _api_paddle_argsort: + +argsort +------------------------------- +:doc_source: paddle.tensor.argsort + + diff --git a/doc/fluid/api/paddle/asin.rst b/doc/fluid/api/paddle/asin.rst new file mode 100644 index 0000000000000000000000000000000000000000..4ceb1076217e6f2b09e7103f61414be334dcc12b --- /dev/null +++ b/doc/fluid/api/paddle/asin.rst @@ -0,0 +1,7 @@ +.. _api_paddle_asin: + +asin +------------------------------- +:doc_source: paddle.fluid.layers.asin + + diff --git a/doc/fluid/api/paddle/atan.rst b/doc/fluid/api/paddle/atan.rst new file mode 100644 index 0000000000000000000000000000000000000000..1a71ae54d75bf5393bb2565267678c47da90f7a2 --- /dev/null +++ b/doc/fluid/api/paddle/atan.rst @@ -0,0 +1,7 @@ +.. _api_paddle_atan: + +atan +------------------------------- +:doc_source: paddle.fluid.layers.atan + + diff --git a/doc/fluid/api/paddle/bmm.rst b/doc/fluid/api/paddle/bmm.rst new file mode 100644 index 0000000000000000000000000000000000000000..1298fe18888d97ab9c32bad2a17fdef86ba6c5b6 --- /dev/null +++ b/doc/fluid/api/paddle/bmm.rst @@ -0,0 +1,7 @@ +.. _api_paddle_bmm: + +bmm +------------------------------- +:doc_source: paddle.tensor.bmm + + diff --git a/doc/fluid/api/paddle/cast.rst b/doc/fluid/api/paddle/cast.rst new file mode 100644 index 0000000000000000000000000000000000000000..cb19a606c852b52651b97d2c45e8c948532012ba --- /dev/null +++ b/doc/fluid/api/paddle/cast.rst @@ -0,0 +1,7 @@ +.. _api_paddle_cast: + +cast +------------------------------- +:doc_source: paddle.fluid.layers.cast + + diff --git a/doc/fluid/api/paddle/ceil.rst b/doc/fluid/api/paddle/ceil.rst new file mode 100644 index 0000000000000000000000000000000000000000..97e25c0a6952a54b2b81c3f64eb9be405c98903f --- /dev/null +++ b/doc/fluid/api/paddle/ceil.rst @@ -0,0 +1,7 @@ +.. _api_paddle_ceil: + +ceil +------------------------------- +:doc_source: paddle.fluid.layers.ceil + + diff --git a/doc/fluid/api/paddle/cholesky.rst b/doc/fluid/api/paddle/cholesky.rst new file mode 100644 index 0000000000000000000000000000000000000000..23447546614321c65256fc09bff6e2b00603ed24 --- /dev/null +++ b/doc/fluid/api/paddle/cholesky.rst @@ -0,0 +1,7 @@ +.. _api_paddle_cholesky: + +cholesky +------------------------------- +:doc_source: paddle.tensor.cholesky + + diff --git a/doc/fluid/api/paddle/clamp.rst b/doc/fluid/api/paddle/clamp.rst new file mode 100644 index 0000000000000000000000000000000000000000..e56b4d1017c4c5a67dafb39f1e3dac2ad03d5c46 --- /dev/null +++ b/doc/fluid/api/paddle/clamp.rst @@ -0,0 +1,7 @@ +.. _api_paddle_clamp: + +clamp +------------------------------- +:doc_source: paddle.tensor.clamp + + diff --git a/doc/fluid/api/paddle/concat.rst b/doc/fluid/api/paddle/concat.rst new file mode 100644 index 0000000000000000000000000000000000000000..4d26f1b3b9486c23e9e8fa94d85044e9f388da46 --- /dev/null +++ b/doc/fluid/api/paddle/concat.rst @@ -0,0 +1,7 @@ +.. _api_paddle_concat: + +concat +------------------------------- +:doc_source: paddle.fluid.layers.concat + + diff --git a/doc/fluid/api/paddle/cos.rst b/doc/fluid/api/paddle/cos.rst new file mode 100644 index 0000000000000000000000000000000000000000..180c5cb0088c8fa407181f2c7b71535832191980 --- /dev/null +++ b/doc/fluid/api/paddle/cos.rst @@ -0,0 +1,7 @@ +.. _api_paddle_cos: + +cos +------------------------------- +:doc_source: paddle.fluid.layers.cos + + diff --git a/doc/fluid/api/paddle/create_global_var.rst b/doc/fluid/api/paddle/create_global_var.rst new file mode 100644 index 0000000000000000000000000000000000000000..05ae31708a02d7f3d8d0b98afb4f96c20aabc48d --- /dev/null +++ b/doc/fluid/api/paddle/create_global_var.rst @@ -0,0 +1,7 @@ +.. _api_paddle_create_global_var: + +create_global_var +------------------------------- +:doc_source: paddle.fluid.layers.tensor.create_global_var + + diff --git a/doc/fluid/api/paddle/create_parameter.rst b/doc/fluid/api/paddle/create_parameter.rst new file mode 100644 index 0000000000000000000000000000000000000000..daa33b854a6bcde40a7791ea1c8953ad2c9f2f32 --- /dev/null +++ b/doc/fluid/api/paddle/create_parameter.rst @@ -0,0 +1,7 @@ +.. _api_paddle_create_parameter: + +create_parameter +------------------------------- +:doc_source: paddle.fluid.layers.create_parameter + + diff --git a/doc/fluid/api/paddle/create_tensor.rst b/doc/fluid/api/paddle/create_tensor.rst new file mode 100644 index 0000000000000000000000000000000000000000..3ad8cd6aa5fc3af1118e7ec10eba7d5e466fbc25 --- /dev/null +++ b/doc/fluid/api/paddle/create_tensor.rst @@ -0,0 +1,7 @@ +.. _api_paddle_create_tensor: + +create_tensor +------------------------------- +:doc_source: paddle.fluid.layers.create_tensor + + diff --git a/doc/fluid/api/paddle/crop_tensor.rst b/doc/fluid/api/paddle/crop_tensor.rst new file mode 100644 index 0000000000000000000000000000000000000000..c36fb311aa295b2e8e71f250ec14daadd00e9d7a --- /dev/null +++ b/doc/fluid/api/paddle/crop_tensor.rst @@ -0,0 +1,7 @@ +.. _api_paddle_crop_tensor: + +crop_tensor +------------------------------- +:doc_source: paddle.fluid.layers.crop_tensor + + diff --git a/doc/fluid/api/paddle/cross.rst b/doc/fluid/api/paddle/cross.rst new file mode 100644 index 0000000000000000000000000000000000000000..50c15588babf215c9e0204f571cbda73d28aed03 --- /dev/null +++ b/doc/fluid/api/paddle/cross.rst @@ -0,0 +1,7 @@ +.. _api_paddle_cross: + +cross +------------------------------- +:doc_source: paddle.tensor.cross + + diff --git a/doc/fluid/api/paddle/cumsum.rst b/doc/fluid/api/paddle/cumsum.rst new file mode 100644 index 0000000000000000000000000000000000000000..673296e8836d1116f16d65b73a4f781241538dd4 --- /dev/null +++ b/doc/fluid/api/paddle/cumsum.rst @@ -0,0 +1,7 @@ +.. _api_paddle_cumsum: + +cumsum +------------------------------- +:doc_source: paddle.tensor.cumsum + + diff --git a/doc/fluid/api/paddle/default_main_program.rst b/doc/fluid/api/paddle/default_main_program.rst new file mode 100644 index 0000000000000000000000000000000000000000..8f92ac2e19b9920a4c8642ba5e945be0f9f86235 --- /dev/null +++ b/doc/fluid/api/paddle/default_main_program.rst @@ -0,0 +1,7 @@ +.. _api_paddle_default_main_program: + +default_main_program +------------------------------- +:doc_source: paddle.fluid.framework.default_main_program + + diff --git a/doc/fluid/api/paddle/default_startup_program.rst b/doc/fluid/api/paddle/default_startup_program.rst new file mode 100644 index 0000000000000000000000000000000000000000..78b95f4408d62552f111711cf187df890c182cda --- /dev/null +++ b/doc/fluid/api/paddle/default_startup_program.rst @@ -0,0 +1,7 @@ +.. _api_paddle_default_startup_program: + +default_startup_program +------------------------------- +:doc_source: paddle.fluid.framework.default_startup_program + + diff --git a/doc/fluid/api/paddle/diag.rst b/doc/fluid/api/paddle/diag.rst new file mode 100644 index 0000000000000000000000000000000000000000..1b1552f5c21e3eed6c6862088f0b61a3ab06586e --- /dev/null +++ b/doc/fluid/api/paddle/diag.rst @@ -0,0 +1,7 @@ +.. _api_paddle_diag: + +diag +------------------------------- +:doc_source: paddle.fluid.layers.diag + + diff --git a/doc/fluid/api/paddle/disable_imperative.rst b/doc/fluid/api/paddle/disable_imperative.rst new file mode 100644 index 0000000000000000000000000000000000000000..22ec4f3ad822bdea9c4869985ac1dcd47a9b6fc4 --- /dev/null +++ b/doc/fluid/api/paddle/disable_imperative.rst @@ -0,0 +1,7 @@ +.. _api_paddle_disable_imperative: + +disable_imperative +------------------------------- +:doc_source: paddle.fluid.dygraph.base.disable_dygraph + + diff --git a/doc/fluid/api/paddle/dist.rst b/doc/fluid/api/paddle/dist.rst new file mode 100644 index 0000000000000000000000000000000000000000..ca02ef267569a2888c2848446299517f8584fdef --- /dev/null +++ b/doc/fluid/api/paddle/dist.rst @@ -0,0 +1,7 @@ +.. _api_paddle_dist: + +dist +------------------------------- +:doc_source: paddle.tensor.dist + + diff --git a/doc/fluid/api/paddle/distribution.rst b/doc/fluid/api/paddle/distribution.rst new file mode 100644 index 0000000000000000000000000000000000000000..76ee9f2eeb20955bd7ed7824c75d1d28a1856272 --- /dev/null +++ b/doc/fluid/api/paddle/distribution.rst @@ -0,0 +1,10 @@ +============ +distribution +============ + +.. toctree:: + :maxdepth: 1 + + distribution/Distribution.rst + distribution/Normal.rst + distribution/Uniform.rst diff --git a/doc/fluid/api/paddle/distribution/Distribution.rst b/doc/fluid/api/paddle/distribution/Distribution.rst new file mode 100644 index 0000000000000000000000000000000000000000..dbfe5082f27b75fd88efb77c2f89d4991b30c723 --- /dev/null +++ b/doc/fluid/api/paddle/distribution/Distribution.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_distribution_Distribution: + +Distribution +------------ + +.. autoclass:: paddle.distribution.Distribution + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/paddle/distribution/Normal.rst b/doc/fluid/api/paddle/distribution/Normal.rst new file mode 100644 index 0000000000000000000000000000000000000000..f0f972fd7e561d9200fb566bab4f2019100efcbd --- /dev/null +++ b/doc/fluid/api/paddle/distribution/Normal.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_distribution_Normal: + +Normal +------ + +.. autoclass:: paddle.distribution.Normal + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/paddle/distribution/Uniform.rst b/doc/fluid/api/paddle/distribution/Uniform.rst new file mode 100644 index 0000000000000000000000000000000000000000..81b5d8dcdfcee360b680958a4c5005dfcca773ac --- /dev/null +++ b/doc/fluid/api/paddle/distribution/Uniform.rst @@ -0,0 +1,13 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_distribution_Uniform: + +Uniform +------- + +.. autoclass:: paddle.distribution.Uniform + :members: + :inherited-members: + :noindex: + diff --git a/doc/fluid/api/paddle/div.rst b/doc/fluid/api/paddle/div.rst new file mode 100644 index 0000000000000000000000000000000000000000..afb945f5c0a37f62568c788f76ec699b5befeed1 --- /dev/null +++ b/doc/fluid/api/paddle/div.rst @@ -0,0 +1,7 @@ +.. _api_paddle_div: + +div +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_div + + diff --git a/doc/fluid/api/paddle/dot.rst b/doc/fluid/api/paddle/dot.rst new file mode 100644 index 0000000000000000000000000000000000000000..b912365663b14779389246749586e64a98bc5575 --- /dev/null +++ b/doc/fluid/api/paddle/dot.rst @@ -0,0 +1,7 @@ +.. _api_paddle_dot: + +dot +------------------------------- +:doc_source: paddle.tensor.dot + + diff --git a/doc/fluid/api/paddle/elementwise_add.rst b/doc/fluid/api/paddle/elementwise_add.rst new file mode 100644 index 0000000000000000000000000000000000000000..196d8bade7c6b719ee54b4983a911af2c2c3733f --- /dev/null +++ b/doc/fluid/api/paddle/elementwise_add.rst @@ -0,0 +1,7 @@ +.. _api_paddle_elementwise_add: + +elementwise_add +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_add + + diff --git a/doc/fluid/api/paddle/elementwise_div.rst b/doc/fluid/api/paddle/elementwise_div.rst new file mode 100644 index 0000000000000000000000000000000000000000..c2f87134582aea7aee656b3fe416da281dc08d6c --- /dev/null +++ b/doc/fluid/api/paddle/elementwise_div.rst @@ -0,0 +1,7 @@ +.. _api_paddle_elementwise_div: + +elementwise_div +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_div + + diff --git a/doc/fluid/api/paddle/elementwise_floordiv.rst b/doc/fluid/api/paddle/elementwise_floordiv.rst new file mode 100644 index 0000000000000000000000000000000000000000..695ac3f22e5f4abcfad7c7822950e98048cbda2b --- /dev/null +++ b/doc/fluid/api/paddle/elementwise_floordiv.rst @@ -0,0 +1,7 @@ +.. _api_paddle_elementwise_floordiv: + +elementwise_floordiv +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_floordiv + + diff --git a/doc/fluid/api/paddle/elementwise_mod.rst b/doc/fluid/api/paddle/elementwise_mod.rst new file mode 100644 index 0000000000000000000000000000000000000000..5f0416079d9a4aa298ef82051b080c0921b25dae --- /dev/null +++ b/doc/fluid/api/paddle/elementwise_mod.rst @@ -0,0 +1,7 @@ +.. _api_paddle_elementwise_mod: + +elementwise_mod +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_mod + + diff --git a/doc/fluid/api/paddle/elementwise_mul.rst b/doc/fluid/api/paddle/elementwise_mul.rst new file mode 100644 index 0000000000000000000000000000000000000000..1d049d9d5067321d9ea0c0a07e5871cf66dd5167 --- /dev/null +++ b/doc/fluid/api/paddle/elementwise_mul.rst @@ -0,0 +1,7 @@ +.. _api_paddle_elementwise_mul: + +elementwise_mul +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_mul + + diff --git a/doc/fluid/api/paddle/elementwise_pow.rst b/doc/fluid/api/paddle/elementwise_pow.rst new file mode 100644 index 0000000000000000000000000000000000000000..742a3a4fbcb64ca09e69c41953b87093bcd07cb5 --- /dev/null +++ b/doc/fluid/api/paddle/elementwise_pow.rst @@ -0,0 +1,7 @@ +.. _api_paddle_elementwise_pow: + +elementwise_pow +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_pow + + diff --git a/doc/fluid/api/paddle/elementwise_sub.rst b/doc/fluid/api/paddle/elementwise_sub.rst new file mode 100644 index 0000000000000000000000000000000000000000..3aca499fbdf0cfa5956f7f6174aa96b24eb400ea --- /dev/null +++ b/doc/fluid/api/paddle/elementwise_sub.rst @@ -0,0 +1,7 @@ +.. _api_paddle_elementwise_sub: + +elementwise_sub +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_sub + + diff --git a/doc/fluid/api/paddle/elementwise_sum.rst b/doc/fluid/api/paddle/elementwise_sum.rst new file mode 100644 index 0000000000000000000000000000000000000000..6a135aca000efb8fd333d879487b19319ed555db --- /dev/null +++ b/doc/fluid/api/paddle/elementwise_sum.rst @@ -0,0 +1,7 @@ +.. _api_paddle_elementwise_sum: + +elementwise_sum +------------------------------- +:doc_source: paddle.tensor.elementwise_sum + + diff --git a/doc/fluid/api/paddle/enable_imperative.rst b/doc/fluid/api/paddle/enable_imperative.rst new file mode 100644 index 0000000000000000000000000000000000000000..81b39bb938baeed31df3cca6a656e8f34be5137c --- /dev/null +++ b/doc/fluid/api/paddle/enable_imperative.rst @@ -0,0 +1,7 @@ +.. _api_paddle_enable_imperative: + +enable_imperative +------------------------------- +:doc_source: paddle.fluid.dygraph.base.enable_dygraph + + diff --git a/doc/fluid/api/paddle/equal.rst b/doc/fluid/api/paddle/equal.rst new file mode 100644 index 0000000000000000000000000000000000000000..5fa7398800f6c9ecd5692f2e0f1ef55e829016e4 --- /dev/null +++ b/doc/fluid/api/paddle/equal.rst @@ -0,0 +1,7 @@ +.. _api_paddle_equal: + +equal +------------------------------- +:doc_source: paddle.tensor.equal + + diff --git a/doc/fluid/api/paddle/equal_all.rst b/doc/fluid/api/paddle/equal_all.rst new file mode 100644 index 0000000000000000000000000000000000000000..58fc331acc2b3f564dc73bb8c039c17b9b4720f2 --- /dev/null +++ b/doc/fluid/api/paddle/equal_all.rst @@ -0,0 +1,7 @@ +.. _api_paddle_equal_all + +equal_all +------------------------------- +:doc_source: paddle.tensor.equal_all + + diff --git a/doc/fluid/api/paddle/erf.rst b/doc/fluid/api/paddle/erf.rst new file mode 100644 index 0000000000000000000000000000000000000000..9f8736d4dbf93e7010dd4bd8fb8f08ee072f0d6b --- /dev/null +++ b/doc/fluid/api/paddle/erf.rst @@ -0,0 +1,7 @@ +.. _api_paddle_erf: + +erf +------------------------------- +:doc_source: paddle.fluid.layers.erf + + diff --git a/doc/fluid/api/paddle/exp.rst b/doc/fluid/api/paddle/exp.rst new file mode 100644 index 0000000000000000000000000000000000000000..468fafefc0edfb861ca224dbae6ecfec3e14f902 --- /dev/null +++ b/doc/fluid/api/paddle/exp.rst @@ -0,0 +1,7 @@ +.. _api_paddle_exp: + +exp +------------------------------- +:doc_source: paddle.fluid.layers.exp + + diff --git a/doc/fluid/api/paddle/expand.rst b/doc/fluid/api/paddle/expand.rst new file mode 100644 index 0000000000000000000000000000000000000000..4dae2306b0140dbeaad89a4e7dc3cdae91a6b5b0 --- /dev/null +++ b/doc/fluid/api/paddle/expand.rst @@ -0,0 +1,7 @@ +.. _api_paddle_expand: + +expand +------------------------------- +:doc_source: paddle.fluid.layers.expand + + diff --git a/doc/fluid/api/paddle/expand_as.rst b/doc/fluid/api/paddle/expand_as.rst new file mode 100644 index 0000000000000000000000000000000000000000..02c7a2debbd939182be8bc5083c3a14573231623 --- /dev/null +++ b/doc/fluid/api/paddle/expand_as.rst @@ -0,0 +1,7 @@ +.. _api_paddle_expand_as: + +expand_as +------------------------------- +:doc_source: paddle.fluid.layers.expand_as + + diff --git a/doc/fluid/api/paddle/eye.rst b/doc/fluid/api/paddle/eye.rst new file mode 100644 index 0000000000000000000000000000000000000000..66a8522df17f7a4d79a04e37361afe05828fef77 --- /dev/null +++ b/doc/fluid/api/paddle/eye.rst @@ -0,0 +1,7 @@ +.. _api_paddle_eye: + +eye +------------------------------- +:doc_source: paddle.fluid.layers.eye + + diff --git a/doc/fluid/api/paddle/fill_constant.rst b/doc/fluid/api/paddle/fill_constant.rst new file mode 100644 index 0000000000000000000000000000000000000000..0ba3d4267f972bad2d65d8cba001a0c74a5559ef --- /dev/null +++ b/doc/fluid/api/paddle/fill_constant.rst @@ -0,0 +1,7 @@ +.. _api_paddle_fill_constant: + +fill_constant +------------------------------- +:doc_source: paddle.fluid.layers.fill_constant + + diff --git a/doc/fluid/api/paddle/flatten.rst b/doc/fluid/api/paddle/flatten.rst new file mode 100644 index 0000000000000000000000000000000000000000..91d560427c2dbda9823980ce7ae9e8fa5b13eb5a --- /dev/null +++ b/doc/fluid/api/paddle/flatten.rst @@ -0,0 +1,7 @@ +.. _api_paddle_flatten: + +flatten +------------------------------- +:doc_source: paddle.fluid.layers.flatten + + diff --git a/doc/fluid/api/paddle/flip.rst b/doc/fluid/api/paddle/flip.rst new file mode 100644 index 0000000000000000000000000000000000000000..e251b5b75d2ddad2897243a165c4dbeab76916d3 --- /dev/null +++ b/doc/fluid/api/paddle/flip.rst @@ -0,0 +1,7 @@ +.. _api_paddle_flip: + +flip +------------------------------- +:doc_source: paddle.tensor.flip + + diff --git a/doc/fluid/api/paddle/floor.rst b/doc/fluid/api/paddle/floor.rst new file mode 100644 index 0000000000000000000000000000000000000000..dc9940a76718dcff3b1ea520c150aa6a4c80481c --- /dev/null +++ b/doc/fluid/api/paddle/floor.rst @@ -0,0 +1,7 @@ +.. _api_paddle_floor: + +floor +------------------------------- +:doc_source: paddle.fluid.layers.floor + + diff --git a/doc/fluid/api/paddle/full.rst b/doc/fluid/api/paddle/full.rst new file mode 100644 index 0000000000000000000000000000000000000000..5a6869012d23b48100b1f9e4bc11c515bd8dad66 --- /dev/null +++ b/doc/fluid/api/paddle/full.rst @@ -0,0 +1,7 @@ +.. _api_paddle_full: + +full +------------------------------- +:doc_source: paddle.fluid.layers.fill_constant + + diff --git a/doc/fluid/api/paddle/full_like.rst b/doc/fluid/api/paddle/full_like.rst new file mode 100644 index 0000000000000000000000000000000000000000..7262c2c892c854937ea4ac8b79e2269765ceb580 --- /dev/null +++ b/doc/fluid/api/paddle/full_like.rst @@ -0,0 +1,7 @@ +.. _api_paddle_full_like: + +full_like +------------------------------- +:doc_source: paddle.tensor.full_like + + diff --git a/doc/fluid/api/paddle/gather.rst b/doc/fluid/api/paddle/gather.rst new file mode 100644 index 0000000000000000000000000000000000000000..954bde40d2fa4303a7febca28e91d001e312b57f --- /dev/null +++ b/doc/fluid/api/paddle/gather.rst @@ -0,0 +1,7 @@ +.. _api_paddle_gather: + +gather +------------------------------- +:doc_source: paddle.fluid.layers.gather + + diff --git a/doc/fluid/api/paddle/gather_nd.rst b/doc/fluid/api/paddle/gather_nd.rst new file mode 100644 index 0000000000000000000000000000000000000000..d00261c2cc7068f818665cab53edc54f18404cf0 --- /dev/null +++ b/doc/fluid/api/paddle/gather_nd.rst @@ -0,0 +1,7 @@ +.. _api_paddle_gather_nd: + +gather_nd +------------------------------- +:doc_source: paddle.fluid.layers.gather_nd + + diff --git a/doc/fluid/api/paddle/global_scope.rst b/doc/fluid/api/paddle/global_scope.rst new file mode 100644 index 0000000000000000000000000000000000000000..cb2babc1c492f7cfcd89e409e5d76e0d709e4ec3 --- /dev/null +++ b/doc/fluid/api/paddle/global_scope.rst @@ -0,0 +1,7 @@ +.. _api_paddle_global_scope: + +global_scope +------------------------------- +:doc_source: paddle.fluid.executor.global_scope + + diff --git a/doc/fluid/api/paddle/gradients.rst b/doc/fluid/api/paddle/gradients.rst new file mode 100644 index 0000000000000000000000000000000000000000..9a65a55b6bb3d9da580696c3be355a6fba29db25 --- /dev/null +++ b/doc/fluid/api/paddle/gradients.rst @@ -0,0 +1,7 @@ +.. _api_paddle_gradients: + +gradients +------------------------------- +:doc_source: paddle.fluid.backward.gradients + + diff --git a/doc/fluid/api/paddle/greater_equal.rst b/doc/fluid/api/paddle/greater_equal.rst new file mode 100644 index 0000000000000000000000000000000000000000..54afe57ffab5185fc2c3fb92a671e0b726108ab3 --- /dev/null +++ b/doc/fluid/api/paddle/greater_equal.rst @@ -0,0 +1,7 @@ +.. _api_paddle_greater_equal: + +greater_equal +------------------------------- +:doc_source: paddle.tensor.greater_equal + + diff --git a/doc/fluid/api/paddle/greater_than.rst b/doc/fluid/api/paddle/greater_than.rst new file mode 100644 index 0000000000000000000000000000000000000000..04a874dd929d7dae274898c87029059b1b1d6261 --- /dev/null +++ b/doc/fluid/api/paddle/greater_than.rst @@ -0,0 +1,7 @@ +.. _api_paddle_greater_than: + +greater_than +------------------------------- +:doc_source: paddle.tensor.greater_than + + diff --git a/doc/fluid/api/paddle/has_inf.rst b/doc/fluid/api/paddle/has_inf.rst new file mode 100644 index 0000000000000000000000000000000000000000..1efcab91e60a1fd229a066dba95d21f81e513c94 --- /dev/null +++ b/doc/fluid/api/paddle/has_inf.rst @@ -0,0 +1,7 @@ +.. _api_paddle_has_inf: + +has_inf +------------------------------- +:doc_source: paddle.fluid.layers.has_inf + + diff --git a/doc/fluid/api/paddle/has_nan.rst b/doc/fluid/api/paddle/has_nan.rst new file mode 100644 index 0000000000000000000000000000000000000000..59710b0c09a0dce28b1dfaba576f47886f706ee0 --- /dev/null +++ b/doc/fluid/api/paddle/has_nan.rst @@ -0,0 +1,7 @@ +.. _api_paddle_has_nan: + +has_nan +------------------------------- +:doc_source: paddle.fluid.layers.has_nan + + diff --git a/doc/fluid/api/paddle/in_imperative_mode.rst b/doc/fluid/api/paddle/in_imperative_mode.rst new file mode 100644 index 0000000000000000000000000000000000000000..29a1e5ca5ac7d5e7de5475f90f96772968bca40d --- /dev/null +++ b/doc/fluid/api/paddle/in_imperative_mode.rst @@ -0,0 +1,7 @@ +.. _api_paddle_in_imperative_mode: + +in_imperative_mode +------------------------------- +:doc_source: paddle.fluid.framework.in_dygraph_mode + + diff --git a/doc/fluid/api/paddle/increment.rst b/doc/fluid/api/paddle/increment.rst new file mode 100644 index 0000000000000000000000000000000000000000..c63a451ddacd5a51fb9699445bd1bb0071ab331c --- /dev/null +++ b/doc/fluid/api/paddle/increment.rst @@ -0,0 +1,7 @@ +.. _api_paddle_increment: + +increment +------------------------------- +:doc_source: paddle.fluid.layers.increment + + diff --git a/doc/fluid/api/paddle/index_sample.rst b/doc/fluid/api/paddle/index_sample.rst new file mode 100644 index 0000000000000000000000000000000000000000..3a1f07978d8e112be98aa6aabb0140ab4f8d0208 --- /dev/null +++ b/doc/fluid/api/paddle/index_sample.rst @@ -0,0 +1,7 @@ +.. _api_paddle_index_sample: + +index_sample +------------------------------- +:doc_source: paddle.tensor.index_sample + + diff --git a/doc/fluid/api/paddle/index_select.rst b/doc/fluid/api/paddle/index_select.rst new file mode 100644 index 0000000000000000000000000000000000000000..c5486cd2375bd0d7f0baba01c1829c10f66ab45c --- /dev/null +++ b/doc/fluid/api/paddle/index_select.rst @@ -0,0 +1,7 @@ +.. _api_paddle_index_select: + +index_select +------------------------------- +:doc_source: paddle.tensor.index_select + + diff --git a/doc/fluid/api/paddle/inverse.rst b/doc/fluid/api/paddle/inverse.rst new file mode 100644 index 0000000000000000000000000000000000000000..8661db22117b638624e16d6a31bcf4eef321a6b4 --- /dev/null +++ b/doc/fluid/api/paddle/inverse.rst @@ -0,0 +1,7 @@ +.. _api_paddle_inverse: + +inverse +------------------------------- +:doc_source: paddle.tensor.inverse + + diff --git a/doc/fluid/api/paddle/is_empty.rst b/doc/fluid/api/paddle/is_empty.rst new file mode 100644 index 0000000000000000000000000000000000000000..51948a3d0d84391e8d63e176ddaabba026551f98 --- /dev/null +++ b/doc/fluid/api/paddle/is_empty.rst @@ -0,0 +1,7 @@ +.. _api_paddle_is_empty: + +is_empty +------------------------------- +:doc_source: paddle.fluid.layers.is_empty + + diff --git a/doc/fluid/api/paddle/isfinite.rst b/doc/fluid/api/paddle/isfinite.rst new file mode 100644 index 0000000000000000000000000000000000000000..8e4aa8db89b64be36f6e75ec20363fae90a21dbe --- /dev/null +++ b/doc/fluid/api/paddle/isfinite.rst @@ -0,0 +1,7 @@ +.. _api_paddle_isfinite: + +isfinite +------------------------------- +:doc_source: paddle.fluid.layers.isfinite + + diff --git a/doc/fluid/api/paddle/kron.rst b/doc/fluid/api/paddle/kron.rst new file mode 100644 index 0000000000000000000000000000000000000000..8768fd230672dbbecb06ce955ac57d3f1cb37f3f --- /dev/null +++ b/doc/fluid/api/paddle/kron.rst @@ -0,0 +1,7 @@ +.. _api_paddle_kron: + +kron +------------------------------- +:doc_source: paddle.tensor.kron + + diff --git a/doc/fluid/api/paddle/less_equal.rst b/doc/fluid/api/paddle/less_equal.rst new file mode 100644 index 0000000000000000000000000000000000000000..3fc5e2ce2b819dfed7ca8b64841836229c86d3e4 --- /dev/null +++ b/doc/fluid/api/paddle/less_equal.rst @@ -0,0 +1,7 @@ +.. _api_paddle_less_equal: + +less_equal +------------------------------- +:doc_source: paddle.tensor.less_equal + + diff --git a/doc/fluid/api/paddle/less_than.rst b/doc/fluid/api/paddle/less_than.rst new file mode 100644 index 0000000000000000000000000000000000000000..7df6eb441d37a2fe8bf95e43a48df8471115ad2c --- /dev/null +++ b/doc/fluid/api/paddle/less_than.rst @@ -0,0 +1,7 @@ +.. _api_paddle_less_than: + +less_than +------------------------------- +:doc_source: paddle.tensor.less_than + + diff --git a/doc/fluid/api/paddle/linspace.rst b/doc/fluid/api/paddle/linspace.rst new file mode 100644 index 0000000000000000000000000000000000000000..268cdf16f5018d8bc4ba840306f710d6f9d6aedb --- /dev/null +++ b/doc/fluid/api/paddle/linspace.rst @@ -0,0 +1,7 @@ +.. _api_paddle_linspace: + +linspace +------------------------------- +:doc_source: paddle.fluid.layers.linspace + + diff --git a/doc/fluid/api/paddle/load.rst b/doc/fluid/api/paddle/load.rst new file mode 100644 index 0000000000000000000000000000000000000000..953efbe850dd244fe021e6444cb0f6ef77bd8184 --- /dev/null +++ b/doc/fluid/api/paddle/load.rst @@ -0,0 +1,7 @@ +.. _api_paddle_load: + +load +------------------------------- +:doc_source: paddle.fluid.io.load + + diff --git a/doc/fluid/api/paddle/log.rst b/doc/fluid/api/paddle/log.rst new file mode 100644 index 0000000000000000000000000000000000000000..66856a3b9f8cb83e0cf38e0f55bb83377b0f7412 --- /dev/null +++ b/doc/fluid/api/paddle/log.rst @@ -0,0 +1,7 @@ +.. _api_paddle_log: + +log +------------------------------- +:doc_source: paddle.fluid.layers.log + + diff --git a/doc/fluid/api/paddle/log1p.rst b/doc/fluid/api/paddle/log1p.rst new file mode 100644 index 0000000000000000000000000000000000000000..543798763a5cb43bdf28cc01810c502cf5d936b4 --- /dev/null +++ b/doc/fluid/api/paddle/log1p.rst @@ -0,0 +1,7 @@ +.. _api_paddle_log1p: + +log1p +------------------------------- +:doc_source: paddle.tensor.log1p + + diff --git a/doc/fluid/api/paddle/logical_and.rst b/doc/fluid/api/paddle/logical_and.rst new file mode 100644 index 0000000000000000000000000000000000000000..887ea10780f817060fd3b0ffc5d605aef2196408 --- /dev/null +++ b/doc/fluid/api/paddle/logical_and.rst @@ -0,0 +1,7 @@ +.. _api_paddle_logical_and: + +logical_and +------------------------------- +:doc_source: paddle.fluid.layers.logical_and + + diff --git a/doc/fluid/api/paddle/logical_not.rst b/doc/fluid/api/paddle/logical_not.rst new file mode 100644 index 0000000000000000000000000000000000000000..1b6d74c4b31401ac234b70611eca4f552a6d1404 --- /dev/null +++ b/doc/fluid/api/paddle/logical_not.rst @@ -0,0 +1,7 @@ +.. _api_paddle_logical_not: + +logical_not +------------------------------- +:doc_source: paddle.fluid.layers.logical_not + + diff --git a/doc/fluid/api/paddle/logical_or.rst b/doc/fluid/api/paddle/logical_or.rst new file mode 100644 index 0000000000000000000000000000000000000000..01cb12f8f7216abd09ab1c2d40ae759a86992dd1 --- /dev/null +++ b/doc/fluid/api/paddle/logical_or.rst @@ -0,0 +1,7 @@ +.. _api_paddle_logical_or: + +logical_or +------------------------------- +:doc_source: paddle.fluid.layers.logical_or + + diff --git a/doc/fluid/api/paddle/logical_xor.rst b/doc/fluid/api/paddle/logical_xor.rst new file mode 100644 index 0000000000000000000000000000000000000000..cde495c6661418167f4ffa5e0afa32dcd8558619 --- /dev/null +++ b/doc/fluid/api/paddle/logical_xor.rst @@ -0,0 +1,7 @@ +.. _api_paddle_logical_xor: + +logical_xor +------------------------------- +:doc_source: paddle.fluid.layers.logical_xor + + diff --git a/doc/fluid/api/paddle/logsumexp.rst b/doc/fluid/api/paddle/logsumexp.rst new file mode 100644 index 0000000000000000000000000000000000000000..129cb91268ddcacc6c51e69bdfe7e15bebd4554c --- /dev/null +++ b/doc/fluid/api/paddle/logsumexp.rst @@ -0,0 +1,7 @@ +.. _api_paddle_logsumexp: + +logsumexp +------------------------------- +:doc_source: paddle.tensor.logsumexp + + diff --git a/doc/fluid/api/paddle/manual_seed.rst b/doc/fluid/api/paddle/manual_seed.rst new file mode 100644 index 0000000000000000000000000000000000000000..7be7452dabf1c72502e76d38e1cdb047beebe2e8 --- /dev/null +++ b/doc/fluid/api/paddle/manual_seed.rst @@ -0,0 +1,7 @@ +.. _api_paddle_manual_seed: + +manual_seed +------------------------------- +:doc_source: paddle.framework.manual_seed + + diff --git a/doc/fluid/api/paddle/matmul.rst b/doc/fluid/api/paddle/matmul.rst new file mode 100644 index 0000000000000000000000000000000000000000..27235e5599a23802e61014e3096a1fcb733cd474 --- /dev/null +++ b/doc/fluid/api/paddle/matmul.rst @@ -0,0 +1,7 @@ +.. _api_paddle_matmul: + +matmul +------------------------------- +:doc_source: paddle.tensor.matmul + + diff --git a/doc/fluid/api/paddle/max.rst b/doc/fluid/api/paddle/max.rst new file mode 100644 index 0000000000000000000000000000000000000000..0d28148a8dcc0ac31744450c954e9a125e475add --- /dev/null +++ b/doc/fluid/api/paddle/max.rst @@ -0,0 +1,7 @@ +.. _api_paddle_max: + +max +------------------------------- +:doc_source: paddle.tensor.max + + diff --git a/doc/fluid/api/paddle/maximum.rst b/doc/fluid/api/paddle/maximum.rst new file mode 100644 index 0000000000000000000000000000000000000000..c85f8a97710efb559e5f73c586eb45798224e8db --- /dev/null +++ b/doc/fluid/api/paddle/maximum.rst @@ -0,0 +1,7 @@ +.. _api_paddle_maximum: + +maximum +------------------------------- +:doc_source: paddle.tensor.maximum + + diff --git a/doc/fluid/api/paddle/mean.rst b/doc/fluid/api/paddle/mean.rst new file mode 100644 index 0000000000000000000000000000000000000000..0af4250a201baa96f7fa8b117790aa4efbcaf121 --- /dev/null +++ b/doc/fluid/api/paddle/mean.rst @@ -0,0 +1,7 @@ +.. _api_paddle_mean: + +mean +------------------------------- +:doc_source: paddle.fluid.layers.mean + + diff --git a/doc/fluid/api/paddle/meshgrid.rst b/doc/fluid/api/paddle/meshgrid.rst new file mode 100644 index 0000000000000000000000000000000000000000..08bbe433dea3b987748a1aa0a349b7a97a026bf0 --- /dev/null +++ b/doc/fluid/api/paddle/meshgrid.rst @@ -0,0 +1,7 @@ +.. _api_paddle_meshgrid: + +meshgrid +------------------------------- +:doc_source: paddle.tensor.meshgrid + + diff --git a/doc/fluid/api/paddle/min.rst b/doc/fluid/api/paddle/min.rst new file mode 100644 index 0000000000000000000000000000000000000000..bb99109471c0ab684fdd7646fc446abe8aafe6cb --- /dev/null +++ b/doc/fluid/api/paddle/min.rst @@ -0,0 +1,7 @@ +.. _api_paddle_min: + +min +------------------------------- +:doc_source: paddle.tensor.min + + diff --git a/doc/fluid/api/paddle/minimum.rst b/doc/fluid/api/paddle/minimum.rst new file mode 100644 index 0000000000000000000000000000000000000000..41391741da78620231f5fe1a9c5ee3ea73ce70be --- /dev/null +++ b/doc/fluid/api/paddle/minimum.rst @@ -0,0 +1,7 @@ +.. _api_paddle_minimum: + +minimum +------------------------------- +:doc_source: paddle.tensor.minimum + + diff --git a/doc/fluid/api/paddle/mm.rst b/doc/fluid/api/paddle/mm.rst new file mode 100644 index 0000000000000000000000000000000000000000..50650110b6a61ea0eac2575460121e260ce3d85b --- /dev/null +++ b/doc/fluid/api/paddle/mm.rst @@ -0,0 +1,7 @@ +.. _api_paddle_mm: + +mm +------------------------------- +:doc_source: paddle.fluid.layers.matmul + + diff --git a/doc/fluid/api/paddle/mul.rst b/doc/fluid/api/paddle/mul.rst new file mode 100644 index 0000000000000000000000000000000000000000..7cf24d0dc276736e652afc643237aff41bd3d119 --- /dev/null +++ b/doc/fluid/api/paddle/mul.rst @@ -0,0 +1,7 @@ +.. _api_paddle_mul: + +mul +------------------------------- +:doc_source: paddle.fluid.layers.mul + + diff --git a/doc/fluid/api/paddle/multiplex.rst b/doc/fluid/api/paddle/multiplex.rst new file mode 100644 index 0000000000000000000000000000000000000000..de115db709418f9fda0d586495f2ebc5ed7de537 --- /dev/null +++ b/doc/fluid/api/paddle/multiplex.rst @@ -0,0 +1,7 @@ +.. _api_paddle_multiplex: + +multiplex +------------------------------- +:doc_source: paddle.fluid.layers.multiplex + + diff --git a/doc/fluid/api/paddle/name_scope.rst b/doc/fluid/api/paddle/name_scope.rst new file mode 100644 index 0000000000000000000000000000000000000000..7eb875efc0bac9bac54cf63594031c2168843eab --- /dev/null +++ b/doc/fluid/api/paddle/name_scope.rst @@ -0,0 +1,7 @@ +.. _api_paddle_name_scope: + +name_scope +------------------------------- +:doc_source: paddle.fluid.framework.name_scope + + diff --git a/doc/fluid/api/paddle/nonzero.rst b/doc/fluid/api/paddle/nonzero.rst new file mode 100644 index 0000000000000000000000000000000000000000..be55e77dece3e727509e1598c0ae384c1f26d4a4 --- /dev/null +++ b/doc/fluid/api/paddle/nonzero.rst @@ -0,0 +1,7 @@ +.. _api_paddle_nonzero: + +nonzero +------------------------------- +:doc_source: paddle.tensor.nonzero + + diff --git a/doc/fluid/api/paddle/norm.rst b/doc/fluid/api/paddle/norm.rst new file mode 100644 index 0000000000000000000000000000000000000000..7bff0a32baff3499cb8eff85d30282d750b9b25b --- /dev/null +++ b/doc/fluid/api/paddle/norm.rst @@ -0,0 +1,7 @@ +.. _api_paddle_norm: + +norm +------------------------------- +:doc_source: paddle.tensor.norm + + diff --git a/doc/fluid/api/paddle/not_equal.rst b/doc/fluid/api/paddle/not_equal.rst new file mode 100644 index 0000000000000000000000000000000000000000..4fd1cbe809d9dded938f2014124ee9b738b1d9cd --- /dev/null +++ b/doc/fluid/api/paddle/not_equal.rst @@ -0,0 +1,7 @@ +.. _api_paddle_not_equal: + +not_equal +------------------------------- +:doc_source: paddle.tensor.not_equal + + diff --git a/doc/fluid/api/paddle/ones.rst b/doc/fluid/api/paddle/ones.rst new file mode 100644 index 0000000000000000000000000000000000000000..557fa15d0958b68b14bca8899e8ad91d473c1dc3 --- /dev/null +++ b/doc/fluid/api/paddle/ones.rst @@ -0,0 +1,7 @@ +.. _api_paddle_ones: + +ones +------------------------------- +:doc_source: paddle.fluid.layers.ones + + diff --git a/doc/fluid/api/paddle/ones_like.rst b/doc/fluid/api/paddle/ones_like.rst new file mode 100644 index 0000000000000000000000000000000000000000..2365b0dfdddfedcd5292e08588e31184fa52342e --- /dev/null +++ b/doc/fluid/api/paddle/ones_like.rst @@ -0,0 +1,7 @@ +.. _api_paddle_ones_like: + +ones_like +------------------------------- +:doc_source: paddle.fluid.layers.ones_like + + diff --git a/doc/fluid/api/paddle/pow.rst b/doc/fluid/api/paddle/pow.rst new file mode 100644 index 0000000000000000000000000000000000000000..f253c66b7b19e046c4215352607d8fa9d111f49a --- /dev/null +++ b/doc/fluid/api/paddle/pow.rst @@ -0,0 +1,7 @@ +.. _api_paddle_pow: + +pow +------------------------------- +:doc_source: paddle.fluid.layers.pow + + diff --git a/doc/fluid/api/paddle/program_guard.rst b/doc/fluid/api/paddle/program_guard.rst new file mode 100644 index 0000000000000000000000000000000000000000..3b599489cf7446329405d922182ddd44aa04ba8f --- /dev/null +++ b/doc/fluid/api/paddle/program_guard.rst @@ -0,0 +1,7 @@ +.. _api_paddle_program_guard: + +program_guard +------------------------------- +:doc_source: paddle.fluid.framework.program_guard + + diff --git a/doc/fluid/api/paddle/py_func.rst b/doc/fluid/api/paddle/py_func.rst new file mode 100644 index 0000000000000000000000000000000000000000..f87420f5f0470bb90ff8a79a6316fad3d8af0426 --- /dev/null +++ b/doc/fluid/api/paddle/py_func.rst @@ -0,0 +1,7 @@ +.. _api_paddle_py_func: + +py_func +------------------------------- +:doc_source: paddle.fluid.layers.nn.py_func + + diff --git a/doc/fluid/api/paddle/rand.rst b/doc/fluid/api/paddle/rand.rst new file mode 100644 index 0000000000000000000000000000000000000000..03bb3be2d1733c6cee2b7d528c61d9f7471ac6a4 --- /dev/null +++ b/doc/fluid/api/paddle/rand.rst @@ -0,0 +1,7 @@ +.. _api_paddle_rand: + +rand +------------------------------- +:doc_source: paddle.tensor.rand + + diff --git a/doc/fluid/api/paddle/randint.rst b/doc/fluid/api/paddle/randint.rst new file mode 100644 index 0000000000000000000000000000000000000000..d94900efab5998d5a4d533c219af5166d770d261 --- /dev/null +++ b/doc/fluid/api/paddle/randint.rst @@ -0,0 +1,7 @@ +.. _api_paddle_randint: + +randint +------------------------------- +:doc_source: paddle.tensor.randint + + diff --git a/doc/fluid/api/paddle/randn.rst b/doc/fluid/api/paddle/randn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1709c9a40209b4e9286712de08d0b9dce33cf4f4 --- /dev/null +++ b/doc/fluid/api/paddle/randn.rst @@ -0,0 +1,7 @@ +.. _api_paddle_randn: + +randn +------------------------------- +:doc_source: paddle.tensor.randn + + diff --git a/doc/fluid/api/paddle/randperm.rst b/doc/fluid/api/paddle/randperm.rst new file mode 100644 index 0000000000000000000000000000000000000000..c47a802ea2c8866fa77908c73071ae11b3676225 --- /dev/null +++ b/doc/fluid/api/paddle/randperm.rst @@ -0,0 +1,7 @@ +.. _api_paddle_randperm: + +randperm +------------------------------- +:doc_source: paddle.tensor.randperm + + diff --git a/doc/fluid/api/paddle/rank.rst b/doc/fluid/api/paddle/rank.rst new file mode 100644 index 0000000000000000000000000000000000000000..572e95addf457116681fd3527b17dbcb17acc780 --- /dev/null +++ b/doc/fluid/api/paddle/rank.rst @@ -0,0 +1,7 @@ +.. _api_paddle_rank: + +rank +------------------------------- +:doc_source: paddle.fluid.layers.rank + + diff --git a/doc/fluid/api/paddle/reciprocal.rst b/doc/fluid/api/paddle/reciprocal.rst new file mode 100644 index 0000000000000000000000000000000000000000..0617981662b5f4e62e5075119a08890c64cf9d0b --- /dev/null +++ b/doc/fluid/api/paddle/reciprocal.rst @@ -0,0 +1,7 @@ +.. _api_paddle_reciprocal: + +reciprocal +------------------------------- +:doc_source: paddle.fluid.layers.reciprocal + + diff --git a/doc/fluid/api/paddle/reduce_all.rst b/doc/fluid/api/paddle/reduce_all.rst new file mode 100644 index 0000000000000000000000000000000000000000..aab5eabfe1a4aa7d5a53fc8db53c82dded5b6ffe --- /dev/null +++ b/doc/fluid/api/paddle/reduce_all.rst @@ -0,0 +1,7 @@ +.. _api_paddle_reduce_all: + +reduce_all +------------------------------- +:doc_source: paddle.fluid.layers.reduce_all + + diff --git a/doc/fluid/api/paddle/reduce_any.rst b/doc/fluid/api/paddle/reduce_any.rst new file mode 100644 index 0000000000000000000000000000000000000000..daed4faaa6c08cf15ff1982431d5e38969d79d31 --- /dev/null +++ b/doc/fluid/api/paddle/reduce_any.rst @@ -0,0 +1,7 @@ +.. _api_paddle_reduce_any: + +reduce_any +------------------------------- +:doc_source: paddle.fluid.layers.reduce_any + + diff --git a/doc/fluid/api/paddle/reduce_max.rst b/doc/fluid/api/paddle/reduce_max.rst new file mode 100644 index 0000000000000000000000000000000000000000..d3a2691f6257ec331b1b2b38de13e87b775874b0 --- /dev/null +++ b/doc/fluid/api/paddle/reduce_max.rst @@ -0,0 +1,7 @@ +.. _api_paddle_reduce_max: + +reduce_max +------------------------------- +:doc_source: paddle.fluid.layers.reduce_max + + diff --git a/doc/fluid/api/paddle/reduce_mean.rst b/doc/fluid/api/paddle/reduce_mean.rst new file mode 100644 index 0000000000000000000000000000000000000000..b532f8a5c70b28ca08aa0285d0d658b6b7569ee8 --- /dev/null +++ b/doc/fluid/api/paddle/reduce_mean.rst @@ -0,0 +1,7 @@ +.. _api_paddle_reduce_mean: + +reduce_mean +------------------------------- +:doc_source: paddle.fluid.layers.reduce_mean + + diff --git a/doc/fluid/api/paddle/reduce_min.rst b/doc/fluid/api/paddle/reduce_min.rst new file mode 100644 index 0000000000000000000000000000000000000000..4c329ec02fb39caaf046c25c12c5d73557663265 --- /dev/null +++ b/doc/fluid/api/paddle/reduce_min.rst @@ -0,0 +1,7 @@ +.. _api_paddle_reduce_min: + +reduce_min +------------------------------- +:doc_source: paddle.fluid.layers.reduce_min + + diff --git a/doc/fluid/api/paddle/reduce_prod.rst b/doc/fluid/api/paddle/reduce_prod.rst new file mode 100644 index 0000000000000000000000000000000000000000..b1bc9ccdb098d2e3396ca9ccd0db55de60fe53bc --- /dev/null +++ b/doc/fluid/api/paddle/reduce_prod.rst @@ -0,0 +1,7 @@ +.. _api_paddle_reduce_prod: + +reduce_prod +------------------------------- +:doc_source: paddle.fluid.layers.reduce_prod + + diff --git a/doc/fluid/api/paddle/reduce_sum.rst b/doc/fluid/api/paddle/reduce_sum.rst new file mode 100644 index 0000000000000000000000000000000000000000..08237db0e0277f0dea6b218540f06c0e0cc92a27 --- /dev/null +++ b/doc/fluid/api/paddle/reduce_sum.rst @@ -0,0 +1,7 @@ +.. _api_paddle_reduce_sum: + +reduce_sum +------------------------------- +:doc_source: paddle.fluid.layers.reduce_sum + + diff --git a/doc/fluid/api/paddle/reshape.rst b/doc/fluid/api/paddle/reshape.rst new file mode 100644 index 0000000000000000000000000000000000000000..2f388c010fad9f5c4fe59931cc2306fdb608f710 --- /dev/null +++ b/doc/fluid/api/paddle/reshape.rst @@ -0,0 +1,7 @@ +.. _api_paddle_reshape: + +reshape +------------------------------- +:doc_source: paddle.fluid.layers.reshape + + diff --git a/doc/fluid/api/paddle/reverse.rst b/doc/fluid/api/paddle/reverse.rst new file mode 100644 index 0000000000000000000000000000000000000000..db756f4e14c1acbf1a6ae9d79f22653c7ebba42e --- /dev/null +++ b/doc/fluid/api/paddle/reverse.rst @@ -0,0 +1,7 @@ +.. _api_paddle_reverse: + +reverse +------------------------------- +:doc_source: paddle.fluid.layers.reverse + + diff --git a/doc/fluid/api/paddle/roll.rst b/doc/fluid/api/paddle/roll.rst new file mode 100644 index 0000000000000000000000000000000000000000..9efeee975c1f92291ca85d71b18474636da0389b --- /dev/null +++ b/doc/fluid/api/paddle/roll.rst @@ -0,0 +1,7 @@ +.. _api_paddle_roll: + +roll +------------------------------- +:doc_source: paddle.tensor.roll + + diff --git a/doc/fluid/api/paddle/round.rst b/doc/fluid/api/paddle/round.rst new file mode 100644 index 0000000000000000000000000000000000000000..1ad15979079f35885e880524731724eaab4fd553 --- /dev/null +++ b/doc/fluid/api/paddle/round.rst @@ -0,0 +1,7 @@ +.. _api_paddle_round: + +round +------------------------------- +:doc_source: paddle.fluid.layers.round + + diff --git a/doc/fluid/api/paddle/rsqrt.rst b/doc/fluid/api/paddle/rsqrt.rst new file mode 100644 index 0000000000000000000000000000000000000000..ae0ac79b879069991464b8e8d61a5f9d5e530c2b --- /dev/null +++ b/doc/fluid/api/paddle/rsqrt.rst @@ -0,0 +1,7 @@ +.. _api_paddle_rsqrt: + +rsqrt +------------------------------- +:doc_source: paddle.fluid.layers.rsqrt + + diff --git a/doc/fluid/api/paddle/save.rst b/doc/fluid/api/paddle/save.rst new file mode 100644 index 0000000000000000000000000000000000000000..66bade1e4515407883c381136628da5d43382670 --- /dev/null +++ b/doc/fluid/api/paddle/save.rst @@ -0,0 +1,7 @@ +.. _api_paddle_save: + +save +------------------------------- +:doc_source: paddle.fluid.save + + diff --git a/doc/fluid/api/paddle/scale.rst b/doc/fluid/api/paddle/scale.rst new file mode 100644 index 0000000000000000000000000000000000000000..98c49a0d821c8cb5a6671b17857c198d6c6c0ac5 --- /dev/null +++ b/doc/fluid/api/paddle/scale.rst @@ -0,0 +1,7 @@ +.. _api_paddle_scale: + +scale +------------------------------- +:doc_source: paddle.fluid.layers.scale + + diff --git a/doc/fluid/api/paddle/scatter.rst b/doc/fluid/api/paddle/scatter.rst new file mode 100644 index 0000000000000000000000000000000000000000..d43359b2471fb64c16c926aaf3964f84c2147fa0 --- /dev/null +++ b/doc/fluid/api/paddle/scatter.rst @@ -0,0 +1,7 @@ +.. _api_paddle_scatter: + +scatter +------------------------------- +:doc_source: paddle.fluid.layers.scatter + + diff --git a/doc/fluid/api/paddle/scatter_nd.rst b/doc/fluid/api/paddle/scatter_nd.rst new file mode 100644 index 0000000000000000000000000000000000000000..f54a82400eeee0b30c0c081deb29e041e3798524 --- /dev/null +++ b/doc/fluid/api/paddle/scatter_nd.rst @@ -0,0 +1,7 @@ +.. _api_paddle_scatter_nd: + +scatter_nd +------------------------------- +:doc_source: paddle.fluid.layers.scatter_nd + + diff --git a/doc/fluid/api/paddle/scatter_nd_add.rst b/doc/fluid/api/paddle/scatter_nd_add.rst new file mode 100644 index 0000000000000000000000000000000000000000..33ef5a3df9346f77d4f588c8c9b4a4e64be3660e --- /dev/null +++ b/doc/fluid/api/paddle/scatter_nd_add.rst @@ -0,0 +1,7 @@ +.. _api_paddle_scatter_nd_add: + +scatter_nd_add +------------------------------- +:doc_source: paddle.fluid.layers.scatter_nd_add + + diff --git a/doc/fluid/api/paddle/scope_guard.rst b/doc/fluid/api/paddle/scope_guard.rst new file mode 100644 index 0000000000000000000000000000000000000000..e832b0198d066dfbaefb1c9f741658fdbdfcb59a --- /dev/null +++ b/doc/fluid/api/paddle/scope_guard.rst @@ -0,0 +1,7 @@ +.. _api_paddle_scope_guard: + +scope_guard +------------------------------- +:doc_source: paddle.fluid.executor.scope_guard + + diff --git a/doc/fluid/api/paddle/shape.rst b/doc/fluid/api/paddle/shape.rst new file mode 100644 index 0000000000000000000000000000000000000000..e42db48db15c3274e6be9c837432e70e981ebd87 --- /dev/null +++ b/doc/fluid/api/paddle/shape.rst @@ -0,0 +1,7 @@ +.. _api_paddle_shape: + +shape +------------------------------- +:doc_source: paddle.fluid.layers.shape + + diff --git a/doc/fluid/api/paddle/shard_index.rst b/doc/fluid/api/paddle/shard_index.rst new file mode 100644 index 0000000000000000000000000000000000000000..1bb05638b0879ba57b3bb634bf4f6cd210c15825 --- /dev/null +++ b/doc/fluid/api/paddle/shard_index.rst @@ -0,0 +1,7 @@ +.. _api_paddle_shard_index: + +shard_index +------------------------------- +:doc_source: paddle.fluid.layers.shard_index + + diff --git a/doc/fluid/api/paddle/shuffle.rst b/doc/fluid/api/paddle/shuffle.rst new file mode 100644 index 0000000000000000000000000000000000000000..f08502a88f7bc8fdd59aa6921ffcdca71dd1079d --- /dev/null +++ b/doc/fluid/api/paddle/shuffle.rst @@ -0,0 +1,7 @@ +.. _api_paddle_shuffle: + +shuffle +------------------------------- +:doc_source: paddle.fluid.io.shuffle + + diff --git a/doc/fluid/api/paddle/sign.rst b/doc/fluid/api/paddle/sign.rst new file mode 100644 index 0000000000000000000000000000000000000000..e59f0d96ce524c36f3ed969912b4f6694edf5f99 --- /dev/null +++ b/doc/fluid/api/paddle/sign.rst @@ -0,0 +1,7 @@ +.. _api_paddle_sign: + +sign +------------------------------- +:doc_source: paddle.fluid.layers.sign + + diff --git a/doc/fluid/api/paddle/sin.rst b/doc/fluid/api/paddle/sin.rst new file mode 100644 index 0000000000000000000000000000000000000000..2ec381ac7004a83c3ebb2bfc972c09c031facaa5 --- /dev/null +++ b/doc/fluid/api/paddle/sin.rst @@ -0,0 +1,7 @@ +.. _api_paddle_sin: + +sin +------------------------------- +:doc_source: paddle.fluid.layers.sin + + diff --git a/doc/fluid/api/paddle/slice.rst b/doc/fluid/api/paddle/slice.rst new file mode 100644 index 0000000000000000000000000000000000000000..2e61c444471670a7800259820c719841511ccbce --- /dev/null +++ b/doc/fluid/api/paddle/slice.rst @@ -0,0 +1,7 @@ +.. _api_paddle_slice: + +slice +------------------------------- +:doc_source: paddle.fluid.layers.slice + + diff --git a/doc/fluid/api/paddle/sort.rst b/doc/fluid/api/paddle/sort.rst new file mode 100644 index 0000000000000000000000000000000000000000..5f87357ccb39b52e975ef73c33b557f220c292a2 --- /dev/null +++ b/doc/fluid/api/paddle/sort.rst @@ -0,0 +1,7 @@ +.. _api_paddle_sort: + +sort +------------------------------- +:doc_source: paddle.tensor.sort + + diff --git a/doc/fluid/api/paddle/split.rst b/doc/fluid/api/paddle/split.rst new file mode 100644 index 0000000000000000000000000000000000000000..2655cd709bd157a1d6f56d9414ed31baef7cb489 --- /dev/null +++ b/doc/fluid/api/paddle/split.rst @@ -0,0 +1,7 @@ +.. _api_paddle_split: + +split +------------------------------- +:doc_source: paddle.fluid.layers.split + + diff --git a/doc/fluid/api/paddle/sqrt.rst b/doc/fluid/api/paddle/sqrt.rst new file mode 100644 index 0000000000000000000000000000000000000000..040365fc1e1c8079daf1b0b2783fe7cca21c70be --- /dev/null +++ b/doc/fluid/api/paddle/sqrt.rst @@ -0,0 +1,7 @@ +.. _api_paddle_sqrt: + +sqrt +------------------------------- +:doc_source: paddle.fluid.layers.sqrt + + diff --git a/doc/fluid/api/paddle/square.rst b/doc/fluid/api/paddle/square.rst new file mode 100644 index 0000000000000000000000000000000000000000..1be0cdd5f263974ad7cb677d1a906866f3103df9 --- /dev/null +++ b/doc/fluid/api/paddle/square.rst @@ -0,0 +1,7 @@ +.. _api_paddle_square: + +square +------------------------------- +:doc_source: paddle.fluid.layers.square + + diff --git a/doc/fluid/api/paddle/squeeze.rst b/doc/fluid/api/paddle/squeeze.rst new file mode 100644 index 0000000000000000000000000000000000000000..9ab4a189f16fea1ab483b161ded453a4894a7bff --- /dev/null +++ b/doc/fluid/api/paddle/squeeze.rst @@ -0,0 +1,7 @@ +.. _api_paddle_squeeze: + +squeeze +------------------------------- +:doc_source: paddle.fluid.layers.squeeze + + diff --git a/doc/fluid/api/paddle/stack.rst b/doc/fluid/api/paddle/stack.rst new file mode 100644 index 0000000000000000000000000000000000000000..2bee2c49f255c3b34debfc55d4f22904c654db9b --- /dev/null +++ b/doc/fluid/api/paddle/stack.rst @@ -0,0 +1,7 @@ +.. _api_paddle_stack: + +stack +------------------------------- +:doc_source: paddle.fluid.layers.stack + + diff --git a/doc/fluid/api/paddle/stanh.rst b/doc/fluid/api/paddle/stanh.rst new file mode 100644 index 0000000000000000000000000000000000000000..2244280fb1c611c44ad3dd94adfef492c587ace2 --- /dev/null +++ b/doc/fluid/api/paddle/stanh.rst @@ -0,0 +1,7 @@ +.. _api_paddle_stanh: + +stanh +------------------------------- +:doc_source: paddle.fluid.layers.stanh + + diff --git a/doc/fluid/api/paddle/std.rst b/doc/fluid/api/paddle/std.rst new file mode 100644 index 0000000000000000000000000000000000000000..68766f7557678952879dd66aea5f85d5fdd4a170 --- /dev/null +++ b/doc/fluid/api/paddle/std.rst @@ -0,0 +1,7 @@ +.. _api_paddle_std: + +std +------------------------------- +:doc_source: paddle.tensor.std + + diff --git a/doc/fluid/api/paddle/strided_slice.rst b/doc/fluid/api/paddle/strided_slice.rst new file mode 100644 index 0000000000000000000000000000000000000000..1e445a5d4107fccbc01d064199eb688353dfad32 --- /dev/null +++ b/doc/fluid/api/paddle/strided_slice.rst @@ -0,0 +1,7 @@ +.. _api_paddle_strided_slice: + +strided_slice +------------------------------- +:doc_source: paddle.fluid.layers.strided_slice + + diff --git a/doc/fluid/api/paddle/sum.rst b/doc/fluid/api/paddle/sum.rst new file mode 100644 index 0000000000000000000000000000000000000000..461dccf9197b86a3e576a9e37d05902bfd4c4fa1 --- /dev/null +++ b/doc/fluid/api/paddle/sum.rst @@ -0,0 +1,7 @@ +.. _api_paddle_sum: + +sum +------------------------------- +:doc_source: paddle.fluid.layers.reduce_sum + + diff --git a/doc/fluid/api/paddle/sums.rst b/doc/fluid/api/paddle/sums.rst new file mode 100644 index 0000000000000000000000000000000000000000..6c4c33f6efa59c0bb4e9725cfd5e0f74bbd37040 --- /dev/null +++ b/doc/fluid/api/paddle/sums.rst @@ -0,0 +1,7 @@ +.. _api_paddle_sums: + +sums +------------------------------- +:doc_source: paddle.fluid.layers.sums + + diff --git a/doc/fluid/api/paddle/t.rst b/doc/fluid/api/paddle/t.rst new file mode 100644 index 0000000000000000000000000000000000000000..4958e6c74f8db3d78289639a562fe6831ca262a2 --- /dev/null +++ b/doc/fluid/api/paddle/t.rst @@ -0,0 +1,7 @@ +.. _api_paddle_t: + +t +------------------------------- +:doc_source: paddle.tensor.t + + diff --git a/doc/fluid/api/paddle/tanh.rst b/doc/fluid/api/paddle/tanh.rst new file mode 100644 index 0000000000000000000000000000000000000000..03184f5ef8809599f0f5a379a061a257febdfb0c --- /dev/null +++ b/doc/fluid/api/paddle/tanh.rst @@ -0,0 +1,7 @@ +.. _api_paddle_tanh: + +tanh +------------------------------- +:doc_source: paddle.fluid.layers.tanh + + diff --git a/doc/fluid/api/paddle/topk.rst b/doc/fluid/api/paddle/topk.rst new file mode 100644 index 0000000000000000000000000000000000000000..4da4e8dc490d5028de2bd97cf8b88c9641d867df --- /dev/null +++ b/doc/fluid/api/paddle/topk.rst @@ -0,0 +1,7 @@ +.. _api_paddle_topk: + +topk +------------------------------- +:doc_source: paddle.fluid.layers.topk + + diff --git a/doc/fluid/api/paddle/trace.rst b/doc/fluid/api/paddle/trace.rst new file mode 100644 index 0000000000000000000000000000000000000000..dff070964b091ec9790409a5600f9047e084cda3 --- /dev/null +++ b/doc/fluid/api/paddle/trace.rst @@ -0,0 +1,7 @@ +.. _api_paddle_trace: + +trace +------------------------------- +:doc_source: paddle.tensor.trace + + diff --git a/doc/fluid/api/paddle/transpose.rst b/doc/fluid/api/paddle/transpose.rst new file mode 100644 index 0000000000000000000000000000000000000000..1735e2fd9ba2b66ebf1e36569f3a8b5f12d77160 --- /dev/null +++ b/doc/fluid/api/paddle/transpose.rst @@ -0,0 +1,7 @@ +.. _api_paddle_transpose: + +transpose +------------------------------- +:doc_source: paddle.fluid.layers.transpose + + diff --git a/doc/fluid/api/paddle/tril.rst b/doc/fluid/api/paddle/tril.rst new file mode 100644 index 0000000000000000000000000000000000000000..cc966a8c2a0810385e4d4d6c542a0e98bd9e0333 --- /dev/null +++ b/doc/fluid/api/paddle/tril.rst @@ -0,0 +1,7 @@ +.. _api_paddle_tril: + +tril +------------------------------- +:doc_source: paddle.tensor.tril + + diff --git a/doc/fluid/api/paddle/triu.rst b/doc/fluid/api/paddle/triu.rst new file mode 100644 index 0000000000000000000000000000000000000000..4f3d9cfc38e223a75a004cc8ae1c99dc62550362 --- /dev/null +++ b/doc/fluid/api/paddle/triu.rst @@ -0,0 +1,7 @@ +.. _api_paddle_triu: + +triu +------------------------------- +:doc_source: paddle.tensor.triu + + diff --git a/doc/fluid/api/paddle/unbind.rst b/doc/fluid/api/paddle/unbind.rst new file mode 100644 index 0000000000000000000000000000000000000000..d394d3140260eb3ea58631fb90ad7849cce872db --- /dev/null +++ b/doc/fluid/api/paddle/unbind.rst @@ -0,0 +1,7 @@ +.. _api_paddle_unbind: + +unbind +------------------------------- +:doc_source: paddle.tensor.unbind + + diff --git a/doc/fluid/api/paddle/unique.rst b/doc/fluid/api/paddle/unique.rst new file mode 100644 index 0000000000000000000000000000000000000000..2b0659ce29eeb86bd35597f64c7d69cd2f2172ed --- /dev/null +++ b/doc/fluid/api/paddle/unique.rst @@ -0,0 +1,7 @@ +.. _api_paddle_unique: + +unique +------------------------------- +:doc_source: paddle.fluid.layers.unique + + diff --git a/doc/fluid/api/paddle/unique_with_counts.rst b/doc/fluid/api/paddle/unique_with_counts.rst new file mode 100644 index 0000000000000000000000000000000000000000..3157d27177969afdc5ddf75f4290c75de12a87fd --- /dev/null +++ b/doc/fluid/api/paddle/unique_with_counts.rst @@ -0,0 +1,7 @@ +.. _api_paddle_unique_with_counts: + +unique_with_counts +------------------------------- +:doc_source: paddle.fluid.layers.unique_with_counts + + diff --git a/doc/fluid/api/paddle/unsqueeze.rst b/doc/fluid/api/paddle/unsqueeze.rst new file mode 100644 index 0000000000000000000000000000000000000000..caccc1d7f13865c6aa9003f17e483029d975a4eb --- /dev/null +++ b/doc/fluid/api/paddle/unsqueeze.rst @@ -0,0 +1,7 @@ +.. _api_paddle_unsqueeze: + +unsqueeze +------------------------------- +:doc_source: paddle.fluid.layers.unsqueeze + + diff --git a/doc/fluid/api/paddle/unstack.rst b/doc/fluid/api/paddle/unstack.rst new file mode 100644 index 0000000000000000000000000000000000000000..c21ac4dff3d80327d36bfe97755dfe7850386404 --- /dev/null +++ b/doc/fluid/api/paddle/unstack.rst @@ -0,0 +1,7 @@ +.. _api_paddle_unstack: + +unstack +------------------------------- +:doc_source: paddle.fluid.layers.unstack + + diff --git a/doc/fluid/api/paddle/var.rst b/doc/fluid/api/paddle/var.rst new file mode 100644 index 0000000000000000000000000000000000000000..3ec924c0e4e077b53842b9ca41a550af7c030666 --- /dev/null +++ b/doc/fluid/api/paddle/var.rst @@ -0,0 +1,7 @@ +.. _api_paddle_var: + +var +------------------------------- +:doc_source: paddle.tensor.var + + diff --git a/doc/fluid/api/paddle/where.rst b/doc/fluid/api/paddle/where.rst new file mode 100644 index 0000000000000000000000000000000000000000..f7080671dc15152fc37fc4b207c3184f89bd25fe --- /dev/null +++ b/doc/fluid/api/paddle/where.rst @@ -0,0 +1,7 @@ +.. _api_paddle_where: + +where +------------------------------- +:doc_source: paddle.fluid.layers.cond + + diff --git a/doc/fluid/api/paddle/zeros.rst b/doc/fluid/api/paddle/zeros.rst new file mode 100644 index 0000000000000000000000000000000000000000..9319d948b7a20b235ba54e0ff7c8203db65c9fcb --- /dev/null +++ b/doc/fluid/api/paddle/zeros.rst @@ -0,0 +1,7 @@ +.. _api_paddle_zeros: + +zeros +------------------------------- +:doc_source: paddle.fluid.layers.zeros + + diff --git a/doc/fluid/api/paddle/zeros_like.rst b/doc/fluid/api/paddle/zeros_like.rst new file mode 100644 index 0000000000000000000000000000000000000000..75f97e82ffde14d73e5f2fa40a624930b34d4a57 --- /dev/null +++ b/doc/fluid/api/paddle/zeros_like.rst @@ -0,0 +1,7 @@ +.. _api_paddle_zeros_like: + +zeros_like +------------------------------- +:doc_source: paddle.fluid.layers.zeros_like + + diff --git a/doc/fluid/api/recordio_writer/convert_reader_to_recordio_file.rst b/doc/fluid/api/recordio_writer/convert_reader_to_recordio_file.rst deleted file mode 100644 index af467c260db3ef0942076b9e001e806d9adc9cbb..0000000000000000000000000000000000000000 --- a/doc/fluid/api/recordio_writer/convert_reader_to_recordio_file.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` - !DO NOT EDIT THIS FILE MANUALLY! - -.. _api_fluid_recordio_writer_convert_reader_to_recordio_file: - -convert_reader_to_recordio_file -------------------------------- - -.. autofunction:: paddle.fluid.recordio_writer.convert_reader_to_recordio_file - :noindex: - diff --git a/doc/fluid/api/recordio_writer/convert_reader_to_recordio_files.rst b/doc/fluid/api/recordio_writer/convert_reader_to_recordio_files.rst deleted file mode 100644 index a3a58d24fa7e329fa69581a979701f836ada65eb..0000000000000000000000000000000000000000 --- a/doc/fluid/api/recordio_writer/convert_reader_to_recordio_files.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` - !DO NOT EDIT THIS FILE MANUALLY! - -.. _api_fluid_recordio_writer_convert_reader_to_recordio_files: - -convert_reader_to_recordio_files --------------------------------- - -.. autofunction:: paddle.fluid.recordio_writer.convert_reader_to_recordio_files - :noindex: - diff --git a/doc/fluid/api/review_tmp.rst b/doc/fluid/api/review_tmp.rst new file mode 100644 index 0000000000000000000000000000000000000000..e39366bcef08a15baa15c3cfbb318022a2dc47b2 --- /dev/null +++ b/doc/fluid/api/review_tmp.rst @@ -0,0 +1,9 @@ +================= +paddle.review_tmp +================= + +.. toctree:: + :maxdepth: 1 + + review_tmp/MarginRankingLoss.rst + review_tmp/margin_ranking_loss.rst diff --git a/doc/fluid/api/review_tmp/MarginRankingLoss.rst b/doc/fluid/api/review_tmp/MarginRankingLoss.rst new file mode 100644 index 0000000000000000000000000000000000000000..edc5d1cc57c85be5eb37312c6dc9b8b204b4d9b1 --- /dev/null +++ b/doc/fluid/api/review_tmp/MarginRankingLoss.rst @@ -0,0 +1,9 @@ +.. _api_nn_loss_MarginRankingLoss_tmp: + +MarginRankingLoss +----------------- + +.. autoclass:: paddle.nn.loss.MarginRankingLoss + :members: + :inherited-members: + :noindex: diff --git a/doc/fluid/api/review_tmp/margin_ranking_loss.rst b/doc/fluid/api/review_tmp/margin_ranking_loss.rst new file mode 100644 index 0000000000000000000000000000000000000000..289d1928bf05925dc81238c7ff0dad2623a4d3fc --- /dev/null +++ b/doc/fluid/api/review_tmp/margin_ranking_loss.rst @@ -0,0 +1,7 @@ +.. _api_nn_functional_margin_ranking_loss_tmp: + +margin_ranking_loss +------------------- + +.. autofunction:: paddle.nn.functional.margin_ranking_loss + :noindex: diff --git a/doc/fluid/api/static.rst b/doc/fluid/api/static.rst new file mode 100644 index 0000000000000000000000000000000000000000..fe60795ac04c3a01002be6da5c4d1a94d0fcbf9d --- /dev/null +++ b/doc/fluid/api/static.rst @@ -0,0 +1,12 @@ +======================= +paddle.static +======================= + + + + +.. toctree:: + :maxdepth: 1 + + static/data.rst + static/InputSpec.rst \ No newline at end of file diff --git a/doc/fluid/api/static/InputSpec.rst b/doc/fluid/api/static/InputSpec.rst new file mode 100644 index 0000000000000000000000000000000000000000..89da356c9dd82fdf7e32a0f25e1ccc6601138c80 --- /dev/null +++ b/doc/fluid/api/static/InputSpec.rst @@ -0,0 +1,8 @@ +.. _api_static_InputSpec: + +InputSpec +------------ + +.. autoclass:: paddle.static.InputSpec + :members: + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/static/data.rst b/doc/fluid/api/static/data.rst new file mode 100644 index 0000000000000000000000000000000000000000..5555d9314555acfb69fadcc6ee5edff47fa2dea8 --- /dev/null +++ b/doc/fluid/api/static/data.rst @@ -0,0 +1,7 @@ +.. _api_static_data: + +data +------------ + +.. autofunction:: paddle.static.data + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/tensor.rst b/doc/fluid/api/tensor.rst new file mode 100644 index 0000000000000000000000000000000000000000..b79132329944bd42e8573bf65b2fbc58d2a155f9 --- /dev/null +++ b/doc/fluid/api/tensor.rst @@ -0,0 +1,133 @@ +============= +paddle.tensor +============= + +.. toctree:: + :maxdepth: 1 + + tensor/abs.rst + tensor/acos.rst + tensor/add.rst + tensor/arange.rst + tensor/argmax.rst + tensor/argmin.rst + tensor/argsort.rst + tensor/asin.rst + tensor/atan.rst + tensor/cast.rst + tensor/ceil.rst + tensor/chunk.rst + tensor/concat.rst + tensor/cos.rst + tensor/create_tensor.rst + tensor/crop_tensor.rst + tensor/cross.rst + tensor/cumsum.rst + tensor/diag.rst + tensor/div.rst + tensor/elementwise_add.rst + tensor/elementwise_div.rst + tensor/elementwise_floordiv.rst + tensor/elementwise_mod.rst + tensor/elementwise_mul.rst + tensor/elementwise_pow.rst + tensor/elementwise_sub.rst + tensor/equal_all.rst + tensor/erf.rst + tensor/exp.rst + tensor/expand.rst + tensor/expand_as.rst + tensor/eye.rst + tensor/fill_constant.rst + tensor/flatten.rst + tensor/floor.rst + tensor/full.rst + tensor/full_like.rst + tensor/gather.rst + tensor/gather_nd.rst + tensor/greater_equal.rst + tensor/greater_than.rst + tensor/has_inf.rst + tensor/has_nan.rst + tensor/increment.rst + tensor/is_empty.rst + tensor/index_select.rst + tensor/isfinite.rst + tensor/isinf.rst + tensor/isnan.rst + tensor/less_equal.rst + tensor/less_than.rst + tensor/logic.rst + tensor/linalg.rst + tensor/linspace.rst + tensor/load.rst + tensor/log.rst + tensor/logical_and.rst + tensor/logical_not.rst + tensor/logical_or.rst + tensor/logical_xor.rst + tensor/math.rst + tensor/masked_select.rst + tensor/max.rst + tensor/maximum.rst + tensor/mean.rst + tensor/min.rst + tensor/minimum.rst + tensor/mm.rst + tensor/mul.rst + tensor/multiplex.rst + tensor/norm.rst + tensor/not_equal.rst + tensor/ones.rst + tensor/ones_like.rst + tensor/numel.rst + tensor/pow.rst + tensor/random.rst + tensor/rank.rst + tensor/reciprocal.rst + tensor/reduce_all.rst + tensor/reduce_any.rst + tensor/reduce_max.rst + tensor/reduce_mean.rst + tensor/reduce_min.rst + tensor/reduce_prod.rst + tensor/reduce_sum.rst + tensor/reshape.rst + tensor/reverse.rst + tensor/round.rst + tensor/rsqrt.rst + tensor/save.rst + tensor/scale.rst + tensor/scatter.rst + tensor/scatter_nd.rst + tensor/scatter_nd_add.rst + tensor/search.rst + tensor/shape.rst + tensor/shard_index.rst + tensor/shuffle.rst + tensor/sign.rst + tensor/sin.rst + tensor/slice.rst + tensor/sort.rst + tensor/split.rst + tensor/sqrt.rst + tensor/square.rst + tensor/squeeze.rst + tensor/stack.rst + tensor/stanh.rst + tensor/std.rst + tensor/stat.rst + tensor/strided_slice.rst + tensor/sum.rst + tensor/sums.rst + tensor/tanh.rst + tensor/topk.rst + tensor/transpose.rst + tensor/unique.rst + tensor/unique_with_counts.rst + tensor/unsqueeze.rst + tensor/unstack.rst + tensor/var.rst + tensor/where.rst + tensor/zeros.rst + tensor/zeros_like.rst diff --git a/doc/fluid/api/tensor/abs.rst b/doc/fluid/api/tensor/abs.rst new file mode 100644 index 0000000000000000000000000000000000000000..61b357dc809c72ccf710f7b3e467ce0b6a1b49df --- /dev/null +++ b/doc/fluid/api/tensor/abs.rst @@ -0,0 +1,7 @@ +.. _api_tensor_abs: + +abs +------------------------------- +:doc_source: paddle.fluid.layers.abs + + diff --git a/doc/fluid/api/tensor/acos.rst b/doc/fluid/api/tensor/acos.rst new file mode 100644 index 0000000000000000000000000000000000000000..58ba3bbfb91ef4d02d87a7d7898e85b855818441 --- /dev/null +++ b/doc/fluid/api/tensor/acos.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_acos: + +acos +------------------------------- +:doc_source: paddle.fluid.layers.acos + + diff --git a/doc/fluid/api/tensor/add.rst b/doc/fluid/api/tensor/add.rst new file mode 100644 index 0000000000000000000000000000000000000000..223268b1ef43675ee87e8ba6bf5b75b46e5008aa --- /dev/null +++ b/doc/fluid/api/tensor/add.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_add: + +add +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_add + + diff --git a/doc/fluid/api/tensor/arange.rst b/doc/fluid/api/tensor/arange.rst new file mode 100644 index 0000000000000000000000000000000000000000..6d15d63965f41a9de6debf789b4792a13634e4c2 --- /dev/null +++ b/doc/fluid/api/tensor/arange.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_arange: + +arange +------------------------------- +:doc_source: paddle.fluid.layers.range + + diff --git a/doc/fluid/api/tensor/argmax.rst b/doc/fluid/api/tensor/argmax.rst new file mode 100644 index 0000000000000000000000000000000000000000..8c4e15720bdeb2540f4d01026a5d68985cd32f05 --- /dev/null +++ b/doc/fluid/api/tensor/argmax.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_argmax: + +argmax +------ + +.. autofunction:: paddle.tensor.search.argmax + :noindex: + diff --git a/doc/fluid/api/tensor/argmin.rst b/doc/fluid/api/tensor/argmin.rst new file mode 100644 index 0000000000000000000000000000000000000000..c23f6127472a2a33acddfb35b7f8a21be9353f42 --- /dev/null +++ b/doc/fluid/api/tensor/argmin.rst @@ -0,0 +1,12 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_argmin: + +argmin +------ + +.. autofunction:: paddle.tensor.search.argmin + :noindex: + + diff --git a/doc/fluid/api/tensor/argsort.rst b/doc/fluid/api/tensor/argsort.rst new file mode 100644 index 0000000000000000000000000000000000000000..2168777783e8ff4a2ba5e217ce3f9982f4f97d8f --- /dev/null +++ b/doc/fluid/api/tensor/argsort.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_argsort: + +argsort +------------------------------- +:doc_source: paddle.tensor.argsort + + diff --git a/doc/fluid/api/tensor/asin.rst b/doc/fluid/api/tensor/asin.rst new file mode 100644 index 0000000000000000000000000000000000000000..8fca23d5ab12687ad8f2a5339d46c258aaab6ee6 --- /dev/null +++ b/doc/fluid/api/tensor/asin.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_asin: + +asin +------------------------------- +:doc_source: paddle.fluid.layers.asin + + diff --git a/doc/fluid/api/tensor/atan.rst b/doc/fluid/api/tensor/atan.rst new file mode 100644 index 0000000000000000000000000000000000000000..d26a40d1244ef1dec0e271148727c729d9d9f6cd --- /dev/null +++ b/doc/fluid/api/tensor/atan.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_atan: + +atan +------------------------------- +:doc_source: paddle.fluid.layers.atan + + diff --git a/doc/fluid/api/tensor/cast.rst b/doc/fluid/api/tensor/cast.rst new file mode 100644 index 0000000000000000000000000000000000000000..15182703078a8fa28736f6d6e0636775dee09e78 --- /dev/null +++ b/doc/fluid/api/tensor/cast.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_cast: + +cast +------------------------------- +:doc_source: paddle.fluid.layers.cast + + diff --git a/doc/fluid/api/tensor/ceil.rst b/doc/fluid/api/tensor/ceil.rst new file mode 100644 index 0000000000000000000000000000000000000000..f570078751ca28f9d07b046261f4e010342b4e66 --- /dev/null +++ b/doc/fluid/api/tensor/ceil.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_ceil: + +ceil +------------------------------- +:doc_source: paddle.fluid.layers.ceil + + diff --git a/doc/fluid/api/tensor/chunk.rst b/doc/fluid/api/tensor/chunk.rst new file mode 100644 index 0000000000000000000000000000000000000000..5638cd48cf98b2e2433ab9e500f7f5dc3aa8e255 --- /dev/null +++ b/doc/fluid/api/tensor/chunk.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_manipulation_chunk: + +chunk +------- + +.. autofunction:: paddle.tensor.manipulation.chunk + :noindex: + diff --git a/doc/fluid/api/tensor/concat.rst b/doc/fluid/api/tensor/concat.rst new file mode 100644 index 0000000000000000000000000000000000000000..88bc288edaa3b74aab58780dcda9f942ff840c7b --- /dev/null +++ b/doc/fluid/api/tensor/concat.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_manipulation_concat: + +concat +-------- + +.. autofunction:: paddle.tensor.manipulation.concat + :noindex: + diff --git a/doc/fluid/api/tensor/cos.rst b/doc/fluid/api/tensor/cos.rst new file mode 100644 index 0000000000000000000000000000000000000000..8eb9afad82ed6a492c5032ada95dccb19ec71c69 --- /dev/null +++ b/doc/fluid/api/tensor/cos.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_cos: + +cos +------------------------------- +:doc_source: paddle.fluid.layers.cos + + diff --git a/doc/fluid/api/tensor/create_tensor.rst b/doc/fluid/api/tensor/create_tensor.rst new file mode 100644 index 0000000000000000000000000000000000000000..344f6fefcad4639b21e6bbab03201eb057ce37bb --- /dev/null +++ b/doc/fluid/api/tensor/create_tensor.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_create_tensor: + +create_tensor +------------------------------- +:doc_source: paddle.fluid.layers.create_tensor + + diff --git a/doc/fluid/api/tensor/crop_tensor.rst b/doc/fluid/api/tensor/crop_tensor.rst new file mode 100644 index 0000000000000000000000000000000000000000..2c6b72a4cb69c9bc34b30c53f87562c2e941f8e5 --- /dev/null +++ b/doc/fluid/api/tensor/crop_tensor.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_crop_tensor: + +crop_tensor +------------------------------- +:doc_source: paddle.fluid.layers.crop_tensor + + diff --git a/doc/fluid/api/tensor/cross.rst b/doc/fluid/api/tensor/cross.rst new file mode 100644 index 0000000000000000000000000000000000000000..3bb049f74d7232bd42020fee1b702c313395ba85 --- /dev/null +++ b/doc/fluid/api/tensor/cross.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_cos: + +cross +------------------------------- +:doc_source: paddle.tensor.cross + + diff --git a/doc/fluid/api/tensor/cumsum.rst b/doc/fluid/api/tensor/cumsum.rst new file mode 100644 index 0000000000000000000000000000000000000000..96c1bf0abf8c06621b93624941025e4929652add --- /dev/null +++ b/doc/fluid/api/tensor/cumsum.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_cumsum: + +cumsum +------------------------------- +:doc_source: paddle.tensor.cumsum + + diff --git a/doc/fluid/api/tensor/diag.rst b/doc/fluid/api/tensor/diag.rst new file mode 100644 index 0000000000000000000000000000000000000000..36b4b3b01443b33a26d363da39b9d781b37ea1fb --- /dev/null +++ b/doc/fluid/api/tensor/diag.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_diag: + +diag +------------------------------- +:doc_source: paddle.fluid.layers.diag + + diff --git a/doc/fluid/api/tensor/div.rst b/doc/fluid/api/tensor/div.rst new file mode 100644 index 0000000000000000000000000000000000000000..f3c0e2a1d13e4a5ff29bd9df6b086cee3dd2b511 --- /dev/null +++ b/doc/fluid/api/tensor/div.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_div: + +div +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_div + + diff --git a/doc/fluid/api/tensor/elementwise_add.rst b/doc/fluid/api/tensor/elementwise_add.rst new file mode 100644 index 0000000000000000000000000000000000000000..3c517357f9e8eb560f4750163ee4d6dd278e095f --- /dev/null +++ b/doc/fluid/api/tensor/elementwise_add.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_elementwise_add: + +elementwise_add +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_add + + diff --git a/doc/fluid/api/tensor/elementwise_div.rst b/doc/fluid/api/tensor/elementwise_div.rst new file mode 100644 index 0000000000000000000000000000000000000000..9095a5380b1f9be1bb9ab6372fecc6ee27084b41 --- /dev/null +++ b/doc/fluid/api/tensor/elementwise_div.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_elementwise_div: + +elementwise_div +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_div + + diff --git a/doc/fluid/api/tensor/elementwise_floordiv.rst b/doc/fluid/api/tensor/elementwise_floordiv.rst new file mode 100644 index 0000000000000000000000000000000000000000..9a0b8b433dcd8c43d8d439a969195d62f5f73158 --- /dev/null +++ b/doc/fluid/api/tensor/elementwise_floordiv.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_elementwise_floordiv: + +elementwise_floordiv +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_floordiv + + diff --git a/doc/fluid/api/tensor/elementwise_mod.rst b/doc/fluid/api/tensor/elementwise_mod.rst new file mode 100644 index 0000000000000000000000000000000000000000..05866ce901ae0c9493732f5fb7813b090534c841 --- /dev/null +++ b/doc/fluid/api/tensor/elementwise_mod.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_elementwise_mod: + +elementwise_mod +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_mod + + diff --git a/doc/fluid/api/tensor/elementwise_mul.rst b/doc/fluid/api/tensor/elementwise_mul.rst new file mode 100644 index 0000000000000000000000000000000000000000..bb5c52e3b212241b2e5bb84f487dcc6bd66a28d6 --- /dev/null +++ b/doc/fluid/api/tensor/elementwise_mul.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_elementwise_mul: + +elementwise_mul +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_mul + + diff --git a/doc/fluid/api/tensor/elementwise_pow.rst b/doc/fluid/api/tensor/elementwise_pow.rst new file mode 100644 index 0000000000000000000000000000000000000000..b8c22385f1d6ae69a8d4c984aec79f3023e9b46f --- /dev/null +++ b/doc/fluid/api/tensor/elementwise_pow.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_elementwise_pow: + +elementwise_pow +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_pow + + diff --git a/doc/fluid/api/tensor/elementwise_sub.rst b/doc/fluid/api/tensor/elementwise_sub.rst new file mode 100644 index 0000000000000000000000000000000000000000..4ff7a2e9e347f96bb7008517adb55e5c20cd9cbd --- /dev/null +++ b/doc/fluid/api/tensor/elementwise_sub.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_elementwise_sub: + +elementwise_sub +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_sub + + diff --git a/doc/fluid/api/tensor/equal_all.rst b/doc/fluid/api/tensor/equal_all.rst new file mode 100644 index 0000000000000000000000000000000000000000..5149e6101d64b1e2c8626a1d35693fd503b2d230 --- /dev/null +++ b/doc/fluid/api/tensor/equal_all.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_equal_all: + +equal_all +------------------------------- +:doc_source: paddle.tensor.equal_all + + diff --git a/doc/fluid/api/tensor/erf.rst b/doc/fluid/api/tensor/erf.rst new file mode 100644 index 0000000000000000000000000000000000000000..af13a5d7b6fa9628be88c0f8c3e812d32a94b1bd --- /dev/null +++ b/doc/fluid/api/tensor/erf.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_erf: + +erf +------------------------------- +:doc_source: paddle.fluid.layers.erf + + diff --git a/doc/fluid/api/tensor/exp.rst b/doc/fluid/api/tensor/exp.rst new file mode 100644 index 0000000000000000000000000000000000000000..1f3b948c1a4270b720e4275972cd10041aac8b76 --- /dev/null +++ b/doc/fluid/api/tensor/exp.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_exp: + +exp +------------------------------- +:doc_source: paddle.fluid.layers.exp + + diff --git a/doc/fluid/api/tensor/expand.rst b/doc/fluid/api/tensor/expand.rst new file mode 100644 index 0000000000000000000000000000000000000000..67dbb96e6ef71b9523fe7392abbe4a984aede6d7 --- /dev/null +++ b/doc/fluid/api/tensor/expand.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_expand: + +expand +------------------------------- +:doc_source: paddle.fluid.layers.expand + + diff --git a/doc/fluid/api/tensor/expand_as.rst b/doc/fluid/api/tensor/expand_as.rst new file mode 100644 index 0000000000000000000000000000000000000000..97e2e74e9599a5091a4cbc54e1a27ae95ff4d855 --- /dev/null +++ b/doc/fluid/api/tensor/expand_as.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_expand_as: + +expand_as +------------------------------- +:doc_source: paddle.fluid.layers.expand_as + + diff --git a/doc/fluid/api/tensor/eye.rst b/doc/fluid/api/tensor/eye.rst new file mode 100644 index 0000000000000000000000000000000000000000..be5125af675a52d6a6525e33e40d8f58bd965759 --- /dev/null +++ b/doc/fluid/api/tensor/eye.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_creation_eye: + +eye +-------- + +.. autofunction:: paddle.tensor.creation.eye + :noindex: + diff --git a/doc/fluid/api/tensor/fill_constant.rst b/doc/fluid/api/tensor/fill_constant.rst new file mode 100644 index 0000000000000000000000000000000000000000..db2f80ec780942b244549a673511a053c4a7f6c9 --- /dev/null +++ b/doc/fluid/api/tensor/fill_constant.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_fill_constant: + +fill_constant +------------------------------- +:doc_source: paddle.fluid.layers.fill_constant + + diff --git a/doc/fluid/api/tensor/flatten.rst b/doc/fluid/api/tensor/flatten.rst new file mode 100644 index 0000000000000000000000000000000000000000..c239c754791688d24d25ffdaf24aa50bc1ff617b --- /dev/null +++ b/doc/fluid/api/tensor/flatten.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_flatten: + +flatten +------------------------------- +:doc_source: paddle.fluid.layers.flatten + + diff --git a/doc/fluid/api/tensor/floor.rst b/doc/fluid/api/tensor/floor.rst new file mode 100644 index 0000000000000000000000000000000000000000..869fe176116f3552481b43e0a742240c099f1f21 --- /dev/null +++ b/doc/fluid/api/tensor/floor.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_floor: + +floor +------------------------------- +:doc_source: paddle.fluid.layers.floor + + diff --git a/doc/fluid/api/tensor/full.rst b/doc/fluid/api/tensor/full.rst new file mode 100644 index 0000000000000000000000000000000000000000..842cb4b074c8a1f3dffc674ed9fe86359d1f85e0 --- /dev/null +++ b/doc/fluid/api/tensor/full.rst @@ -0,0 +1,10 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_creation_full: + +full +-------- + +.. autofunction:: paddle.tensor.creation.full + :noindex: diff --git a/doc/fluid/api/tensor/full_like.rst b/doc/fluid/api/tensor/full_like.rst new file mode 100644 index 0000000000000000000000000000000000000000..f5f876d570aab434c874c35663ee1f5ddb2b4b7e --- /dev/null +++ b/doc/fluid/api/tensor/full_like.rst @@ -0,0 +1,10 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_creation_full_like: + +full_like +------------ + +.. autofunction:: paddle.tensor.creation.full_like + :noindex: diff --git a/doc/fluid/api/tensor/gather.rst b/doc/fluid/api/tensor/gather.rst new file mode 100644 index 0000000000000000000000000000000000000000..68137b46c479455e0d8e19cc9411add758e558ec --- /dev/null +++ b/doc/fluid/api/tensor/gather.rst @@ -0,0 +1,12 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_manipulation_gather: + +gather +-------- + +.. autofunction:: paddle.tensor.manipulation.gather + :noindex: + + diff --git a/doc/fluid/api/tensor/gather_nd.rst b/doc/fluid/api/tensor/gather_nd.rst new file mode 100644 index 0000000000000000000000000000000000000000..93c95cb491e4a55c3d2d31c90036266ef980e434 --- /dev/null +++ b/doc/fluid/api/tensor/gather_nd.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_manipulation_gather_nd: + +gather_nd +---------- + +.. autofunction:: paddle.tensor.manipulation.gather_nd + :noindex: + diff --git a/doc/fluid/api/tensor/greater_equal.rst b/doc/fluid/api/tensor/greater_equal.rst new file mode 100644 index 0000000000000000000000000000000000000000..1a1394de05e7b4bf7b4cbfb463e3c9e79206d9cc --- /dev/null +++ b/doc/fluid/api/tensor/greater_equal.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_greater_equal: + +greater_equal +------------------------------- +:doc_source: paddle.tensor.greater_equal + + diff --git a/doc/fluid/api/tensor/greater_than.rst b/doc/fluid/api/tensor/greater_than.rst new file mode 100644 index 0000000000000000000000000000000000000000..b0ff74910eb094120568dc4f3c7f792e221c91b7 --- /dev/null +++ b/doc/fluid/api/tensor/greater_than.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_greater_than: + +greater_than +------------------------------- +:doc_source: paddle.tensor.greater_than + + diff --git a/doc/fluid/api/tensor/has_inf.rst b/doc/fluid/api/tensor/has_inf.rst new file mode 100644 index 0000000000000000000000000000000000000000..0a289aeaabe48bbc4ec4ae15334c36a5df65a758 --- /dev/null +++ b/doc/fluid/api/tensor/has_inf.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_has_inf: + +has_inf +------------------------------- +:doc_source: paddle.fluid.layers.has_inf + + diff --git a/doc/fluid/api/tensor/has_nan.rst b/doc/fluid/api/tensor/has_nan.rst new file mode 100644 index 0000000000000000000000000000000000000000..411f1170345749ee2ca59b45d8a0a2f2d3ccb6c5 --- /dev/null +++ b/doc/fluid/api/tensor/has_nan.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_has_nan: + +has_nan +------------------------------- +:doc_source: paddle.fluid.layers.has_nan + + diff --git a/doc/fluid/api/tensor/increment.rst b/doc/fluid/api/tensor/increment.rst new file mode 100644 index 0000000000000000000000000000000000000000..044ed8ace8be80a869b4cf3f1ff403c2eb419bd5 --- /dev/null +++ b/doc/fluid/api/tensor/increment.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_increment: + +increment +------------------------------- +:doc_source: paddle.fluid.layers.increment + + diff --git a/doc/fluid/api/tensor/index_select.rst b/doc/fluid/api/tensor/index_select.rst new file mode 100644 index 0000000000000000000000000000000000000000..bb97773baefe511c47602988622baf96d6ae110b --- /dev/null +++ b/doc/fluid/api/tensor/index_select.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_search_index_select: + +index_select +------------- + +.. autofunction:: paddle.tensor.search.index_select + :noindex: + diff --git a/doc/fluid/api/tensor/is_empty.rst b/doc/fluid/api/tensor/is_empty.rst new file mode 100644 index 0000000000000000000000000000000000000000..822c45ef1afd5f4b40b24ce2286bc972253a1452 --- /dev/null +++ b/doc/fluid/api/tensor/is_empty.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_is_empty: + +is_empty +------------------------------- +:doc_source: paddle.fluid.layers.is_empty + + diff --git a/doc/fluid/api/tensor/isfinite.rst b/doc/fluid/api/tensor/isfinite.rst new file mode 100644 index 0000000000000000000000000000000000000000..8a599a8ab6eb2217070a5bc5eccea4f378b1ee04 --- /dev/null +++ b/doc/fluid/api/tensor/isfinite.rst @@ -0,0 +1,8 @@ +.. _api_tensor_isfinite: + +isfinite +------------------------------- + +.. autofunction:: paddle.tensor.math.isfinite + :noindex: + diff --git a/doc/fluid/api/tensor/isinf.rst b/doc/fluid/api/tensor/isinf.rst new file mode 100644 index 0000000000000000000000000000000000000000..df86445ecfb15c4fda06f442b7b4c4f7cd0d5c0f --- /dev/null +++ b/doc/fluid/api/tensor/isinf.rst @@ -0,0 +1,8 @@ +.. _api_tensor_isinf: + +isinf +------------------------------- + +.. autofunction:: paddle.tensor.math.isinf + :noindex: + diff --git a/doc/fluid/api/tensor/isnan.rst b/doc/fluid/api/tensor/isnan.rst new file mode 100644 index 0000000000000000000000000000000000000000..0fa742c2d2c26d6040a5f303e925c06114d8b93a --- /dev/null +++ b/doc/fluid/api/tensor/isnan.rst @@ -0,0 +1,8 @@ +.. _api_tensor_isnan: + +isnan +------------------------------- + +.. autofunction:: paddle.tensor.math.isnan + :noindex: + diff --git a/doc/fluid/api/tensor/less_equal.rst b/doc/fluid/api/tensor/less_equal.rst new file mode 100644 index 0000000000000000000000000000000000000000..4adbeb1ccf2972ccb30cb1fb762dbea7a74114a4 --- /dev/null +++ b/doc/fluid/api/tensor/less_equal.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_less_equal: + +less_equal +------------------------------- +:doc_source: paddle.tensor.less_equal + + diff --git a/doc/fluid/api/tensor/less_than.rst b/doc/fluid/api/tensor/less_than.rst new file mode 100644 index 0000000000000000000000000000000000000000..592dc48d66bbdd4c6506e118c98b654bd55e93fe --- /dev/null +++ b/doc/fluid/api/tensor/less_than.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_less_than: + +less_than +------------------------------- +:doc_source: paddle.tensor.less_than + + diff --git a/doc/fluid/api/tensor/linalg.rst b/doc/fluid/api/tensor/linalg.rst new file mode 100644 index 0000000000000000000000000000000000000000..08cabf1c4bfb9ee5be5b8d843ce0936707c8a50c --- /dev/null +++ b/doc/fluid/api/tensor/linalg.rst @@ -0,0 +1,8 @@ +====== +linalg +====== + +.. toctree:: + :maxdepth: 1 + + linalg/dist.rst diff --git a/doc/fluid/api/tensor/linalg/dist.rst b/doc/fluid/api/tensor/linalg/dist.rst new file mode 100644 index 0000000000000000000000000000000000000000..6ca92b5366fcfc3cae450ab8790b0773d276a57c --- /dev/null +++ b/doc/fluid/api/tensor/linalg/dist.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_linalg_dist: + +dist +-------- + +.. autofunction:: paddle.tensor.linalg.dist + :noindex: + diff --git a/doc/fluid/api/tensor/linspace.rst b/doc/fluid/api/tensor/linspace.rst new file mode 100644 index 0000000000000000000000000000000000000000..b8fecff2b9be66aa8bfbb6eb4ebed5f3bf564f8f --- /dev/null +++ b/doc/fluid/api/tensor/linspace.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_creation_linspace: + +linspace +-------- + +.. autofunction:: paddle.tensor.creation.linspace + :noindex: + diff --git a/doc/fluid/api/tensor/load.rst b/doc/fluid/api/tensor/load.rst new file mode 100644 index 0000000000000000000000000000000000000000..eba05933ca3bd83e6dd97d2625b74090b19d513c --- /dev/null +++ b/doc/fluid/api/tensor/load.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_load: + +load +------------------------------- +:doc_source: paddle.fluid.io.load + + diff --git a/doc/fluid/api/tensor/log.rst b/doc/fluid/api/tensor/log.rst new file mode 100644 index 0000000000000000000000000000000000000000..2e748ae1f1aa7979a6b027bf1aa0396171ba6183 --- /dev/null +++ b/doc/fluid/api/tensor/log.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_log: + +log +------------------------------- +:doc_source: paddle.fluid.layers.log + + diff --git a/doc/fluid/api/tensor/logic.rst b/doc/fluid/api/tensor/logic.rst new file mode 100644 index 0000000000000000000000000000000000000000..389c83b100894432c202533508bd2fa173c53246 --- /dev/null +++ b/doc/fluid/api/tensor/logic.rst @@ -0,0 +1,8 @@ +====== +logic +====== + +.. toctree:: + :maxdepth: 1 + + logic/allclose.rst \ No newline at end of file diff --git a/doc/fluid/api/tensor/logic/allclose.rst b/doc/fluid/api/tensor/logic/allclose.rst new file mode 100644 index 0000000000000000000000000000000000000000..72a8c73d61df39271a187aa9fa3e56eb90006844 --- /dev/null +++ b/doc/fluid/api/tensor/logic/allclose.rst @@ -0,0 +1,10 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_logic_allclose: + +allclose +-------- + +.. autofunction:: paddle.tensor.logic.allclose + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/tensor/logical_and.rst b/doc/fluid/api/tensor/logical_and.rst new file mode 100644 index 0000000000000000000000000000000000000000..e9f4b93cfa9175ab2fc4fffb5b1336ca13db7319 --- /dev/null +++ b/doc/fluid/api/tensor/logical_and.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_logical_and: + +logical_and +------------------------------- +:doc_source: paddle.fluid.layers.logical_and + + diff --git a/doc/fluid/api/tensor/logical_not.rst b/doc/fluid/api/tensor/logical_not.rst new file mode 100644 index 0000000000000000000000000000000000000000..db8e64183c86bc959b5c44e21c17f573e124147b --- /dev/null +++ b/doc/fluid/api/tensor/logical_not.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_logical_not: + +logical_not +------------------------------- +:doc_source: paddle.fluid.layers.logical_not + + diff --git a/doc/fluid/api/tensor/logical_or.rst b/doc/fluid/api/tensor/logical_or.rst new file mode 100644 index 0000000000000000000000000000000000000000..ab0747e60aac6295255225dbeed16e08a8acc835 --- /dev/null +++ b/doc/fluid/api/tensor/logical_or.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_logical_or: + +logical_or +------------------------------- +:doc_source: paddle.fluid.layers.logical_or + + diff --git a/doc/fluid/api/tensor/logical_xor.rst b/doc/fluid/api/tensor/logical_xor.rst new file mode 100644 index 0000000000000000000000000000000000000000..660c5cff8281c84e79410a6e058f68bc2003a463 --- /dev/null +++ b/doc/fluid/api/tensor/logical_xor.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_logical_xor: + +logical_xor +------------------------------- +:doc_source: paddle.fluid.layers.logical_xor + + diff --git a/doc/fluid/api/tensor/masked_select.rst b/doc/fluid/api/tensor/masked_select.rst new file mode 100644 index 0000000000000000000000000000000000000000..b3e0d11be9c38ba8e7459903e0198a8d1e39676d --- /dev/null +++ b/doc/fluid/api/tensor/masked_select.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_search_masked_select: + +masked_select +--------------- + +.. autofunction:: paddle.tensor.search.masked_select + :noindex: + diff --git a/doc/fluid/api/tensor/math.rst b/doc/fluid/api/tensor/math.rst new file mode 100644 index 0000000000000000000000000000000000000000..8afa8020941359f9391e81b4ab043a5467ffc516 --- /dev/null +++ b/doc/fluid/api/tensor/math.rst @@ -0,0 +1,32 @@ +==== +math +==== + +.. toctree:: + :maxdepth: 1 + + math/add.rst + math/addcmul.rst + math/addmm.rst + math/atan.rst + math/clamp.rst + math/divide.rst + math/floor_divide.rst + math/remainder.rst + math/floor_mod.rst + math/mod.rst + math/elementwise_sum.rst + math/log1p.rst + math/logsumexp.rst + math/max.rst + math/min.rst + math/mm.rst + math/mul.rst + math/multiply.rst + math/pow.rst + math/prod.rst + math/sign.rst + math/sin.rst + math/sqrt.rst + math/sum.rst + math/tanh.rst diff --git a/doc/fluid/api/tensor/math/add.rst b/doc/fluid/api/tensor/math/add.rst new file mode 100644 index 0000000000000000000000000000000000000000..0b604ac2d1805066bfa24f8a12c8fbed36b14d0d --- /dev/null +++ b/doc/fluid/api/tensor/math/add.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_math_add: + +add +--- + +.. autofunction:: paddle.tensor.math.add + :noindex: + diff --git a/doc/fluid/api/tensor/math/atan.rst b/doc/fluid/api/tensor/math/atan.rst new file mode 100644 index 0000000000000000000000000000000000000000..31b11dbbe4fbc39d7f5c472478cea1544edfefe7 --- /dev/null +++ b/doc/fluid/api/tensor/math/atan.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_math_atan: + +atan +---- + +.. autofunction:: paddle.tensor.math.atan + :noindex: + diff --git a/doc/fluid/api/tensor/math/div.rst b/doc/fluid/api/tensor/math/div.rst new file mode 100644 index 0000000000000000000000000000000000000000..cf8397dbffc36f895319dced427487e7f3851d40 --- /dev/null +++ b/doc/fluid/api/tensor/math/div.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_math_div: + +div +--- + +.. autofunction:: paddle.tensor.math.div + :noindex: + diff --git a/doc/fluid/api/tensor/math/divide.rst b/doc/fluid/api/tensor/math/divide.rst new file mode 100644 index 0000000000000000000000000000000000000000..db7c8aa3cd242d58f34e19845a5102b5a90c6b7e --- /dev/null +++ b/doc/fluid/api/tensor/math/divide.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_math_divide: + +divide +------ + +.. autofunction:: paddle.tensor.math.divide + :noindex: + diff --git a/doc/fluid/api/tensor/math/elementwise_sum.rst b/doc/fluid/api/tensor/math/elementwise_sum.rst new file mode 100644 index 0000000000000000000000000000000000000000..05acb3f78f66192b5eea938cdd528b56da247a22 --- /dev/null +++ b/doc/fluid/api/tensor/math/elementwise_sum.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_math_elementwise_sum: + +elementwise_sum +--------------- + +.. autofunction:: paddle.tensor.math.elementwise_sum + :noindex: + diff --git a/doc/fluid/api/tensor/math/floor_divide.rst b/doc/fluid/api/tensor/math/floor_divide.rst new file mode 100644 index 0000000000000000000000000000000000000000..aed75d9790babc2d0e7007f6df313f1987241e66 --- /dev/null +++ b/doc/fluid/api/tensor/math/floor_divide.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_math_floor_divide: + +floor_divide +------------ + +.. autofunction:: paddle.tensor.math.floor_divide + :noindex: + diff --git a/doc/fluid/api/tensor/math/floor_mod.rst b/doc/fluid/api/tensor/math/floor_mod.rst new file mode 100644 index 0000000000000000000000000000000000000000..655e419119b70e73cc8d68af42692923ae1b6e9c --- /dev/null +++ b/doc/fluid/api/tensor/math/floor_mod.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_math_floor_mod: + +floor_mod +--------- + +.. autofunction:: paddle.tensor.math.floor_mod + :noindex: + diff --git a/doc/fluid/api/tensor/math/logsumexp.rst b/doc/fluid/api/tensor/math/logsumexp.rst new file mode 100644 index 0000000000000000000000000000000000000000..63a25475394863db763250e653ec1348d18e0726 --- /dev/null +++ b/doc/fluid/api/tensor/math/logsumexp.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_math_logsumexp: + +logsumexp +--------- + +.. autofunction:: paddle.tensor.math.logsumexp + :noindex: + diff --git a/doc/fluid/api/tensor/math/mm.rst b/doc/fluid/api/tensor/math/mm.rst new file mode 100644 index 0000000000000000000000000000000000000000..8668c44055f25025ae080b3a08cce39919cd888b --- /dev/null +++ b/doc/fluid/api/tensor/math/mm.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_math_mm: + +mm +-- + +.. autofunction:: paddle.tensor.math.mm + :noindex: + diff --git a/doc/fluid/api/tensor/math/mod.rst b/doc/fluid/api/tensor/math/mod.rst new file mode 100644 index 0000000000000000000000000000000000000000..a5e207a6c43331c150050bc292c32de2db5f4243 --- /dev/null +++ b/doc/fluid/api/tensor/math/mod.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_math_mod: + +mod +--- + +.. autofunction:: paddle.tensor.math.mod + :noindex: + diff --git a/doc/fluid/api/tensor/math/mul.rst b/doc/fluid/api/tensor/math/mul.rst new file mode 100644 index 0000000000000000000000000000000000000000..9b14559a4a35c701b8169cf425c1e8787db3561c --- /dev/null +++ b/doc/fluid/api/tensor/math/mul.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_math_mul: + +mul +--- + +.. autofunction:: paddle.tensor.math.mul + :noindex: + diff --git a/doc/fluid/api/tensor/math/multiply.rst b/doc/fluid/api/tensor/math/multiply.rst new file mode 100644 index 0000000000000000000000000000000000000000..e483e0214e2caac2d38d489d9071be005f4cdb19 --- /dev/null +++ b/doc/fluid/api/tensor/math/multiply.rst @@ -0,0 +1,9 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! +.. _api_tensor_math_multiply: + +multiply +-------- + + .. autofunction:: paddle.tensor.math.multiply + :noindex: \ No newline at end of file diff --git a/doc/fluid/api/tensor/math/pow.rst b/doc/fluid/api/tensor/math/pow.rst new file mode 100644 index 0000000000000000000000000000000000000000..5d0da558dd738aee845ef01c14d62ba4f023e921 --- /dev/null +++ b/doc/fluid/api/tensor/math/pow.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_math_pow: + +pow +--- + +.. autofunction:: paddle.tensor.math.pow + :noindex: + diff --git a/doc/fluid/api/tensor/math/prod.rst b/doc/fluid/api/tensor/math/prod.rst new file mode 100644 index 0000000000000000000000000000000000000000..b5ced4643775d9001501342ef852dfd38108d49d --- /dev/null +++ b/doc/fluid/api/tensor/math/prod.rst @@ -0,0 +1,9 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! +.. _api_tensor_math_prod: + +prod +---- + +.. autofunction:: paddle.tensor.math.prod + :noindex: diff --git a/doc/fluid/api/tensor/math/remainder.rst b/doc/fluid/api/tensor/math/remainder.rst new file mode 100644 index 0000000000000000000000000000000000000000..6d753fd1310ef994c83a46e06cfa8c4befd4ed88 --- /dev/null +++ b/doc/fluid/api/tensor/math/remainder.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_math_remainder: + +remainder +--------- + +.. autofunction:: paddle.tensor.math.remainder + :noindex: + diff --git a/doc/fluid/api/tensor/math/sign.rst b/doc/fluid/api/tensor/math/sign.rst new file mode 100644 index 0000000000000000000000000000000000000000..5ee442aec6366911066c206e5e33c9a6f5e5e743 --- /dev/null +++ b/doc/fluid/api/tensor/math/sign.rst @@ -0,0 +1,10 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! +.. _api_tensor_math_sign: + +sign +------ + +.. autofunction:: paddle.tensor.math.sign + :noindex: + diff --git a/doc/fluid/api/tensor/math/sin.rst b/doc/fluid/api/tensor/math/sin.rst new file mode 100644 index 0000000000000000000000000000000000000000..862334131da6f38a853fcff1ed5860db625561ae --- /dev/null +++ b/doc/fluid/api/tensor/math/sin.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_math_sin: + +sin +--- + +.. autofunction:: paddle.tensor.math.sin + :noindex: + diff --git a/doc/fluid/api/tensor/math/sqrt.rst b/doc/fluid/api/tensor/math/sqrt.rst new file mode 100644 index 0000000000000000000000000000000000000000..c0ad257993458844e18e514731f988b289a8e73f --- /dev/null +++ b/doc/fluid/api/tensor/math/sqrt.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_math_sqrt: + +sqrt +---- + +.. autofunction:: paddle.tensor.math.sqrt + :noindex: + diff --git a/doc/fluid/api/tensor/math/sum.rst b/doc/fluid/api/tensor/math/sum.rst new file mode 100644 index 0000000000000000000000000000000000000000..8946b3aa5056818046793ac856962c6ee4e0d175 --- /dev/null +++ b/doc/fluid/api/tensor/math/sum.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_math_sum: + +sum +--- + +.. autofunction:: paddle.tensor.math.sum + :noindex: + diff --git a/doc/fluid/api/tensor/math/tanh.rst b/doc/fluid/api/tensor/math/tanh.rst new file mode 100644 index 0000000000000000000000000000000000000000..ceb5971e0c3e51565f9fedec4df881eab5f86396 --- /dev/null +++ b/doc/fluid/api/tensor/math/tanh.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_math_tanh: + +tanh +---- + +.. autofunction:: paddle.tensor.math.tanh + :noindex: + diff --git a/doc/fluid/api/tensor/max.rst b/doc/fluid/api/tensor/max.rst new file mode 100644 index 0000000000000000000000000000000000000000..61a8667f8cab06a8433d9ab9e143390d3c1ccbc8 --- /dev/null +++ b/doc/fluid/api/tensor/max.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_max: + +max +------------------------------- +:doc_source: paddle.tensor.max + + diff --git a/doc/fluid/api/tensor/maximum.rst b/doc/fluid/api/tensor/maximum.rst new file mode 100644 index 0000000000000000000000000000000000000000..7c91c5f2bd465a17ceae3a2f602addbd115ed273 --- /dev/null +++ b/doc/fluid/api/tensor/maximum.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_maximum: + +maximum +------------------------------- +:doc_source: paddle.tensor.maximum + + diff --git a/doc/fluid/api/tensor/mean.rst b/doc/fluid/api/tensor/mean.rst new file mode 100644 index 0000000000000000000000000000000000000000..d226a37107af8e67ef4d8ea0bf9a17e536fede36 --- /dev/null +++ b/doc/fluid/api/tensor/mean.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_mean: + +mean +--------- + +.. autofunction:: paddle.tensor.mean + :noindex: + diff --git a/doc/fluid/api/tensor/min.rst b/doc/fluid/api/tensor/min.rst new file mode 100644 index 0000000000000000000000000000000000000000..cdb8df5c370ce66a5e8e39555699e09730bdcf23 --- /dev/null +++ b/doc/fluid/api/tensor/min.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_min: + +min +------------------------------- +:doc_source: paddle.tensor.min + + diff --git a/doc/fluid/api/tensor/minimum.rst b/doc/fluid/api/tensor/minimum.rst new file mode 100644 index 0000000000000000000000000000000000000000..725aaeb8a7f2fa0cf7b1a7fa1d8611a4c4967ac7 --- /dev/null +++ b/doc/fluid/api/tensor/minimum.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_minimum: + +minimum +------------------------------- +:doc_source: paddle.tensor.minimum + + diff --git a/doc/fluid/api/tensor/mm.rst b/doc/fluid/api/tensor/mm.rst new file mode 100644 index 0000000000000000000000000000000000000000..bb11742e525dc371f542f7de2cdea442571891e6 --- /dev/null +++ b/doc/fluid/api/tensor/mm.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_mm: + +mm +------------------------------- +:doc_source: paddle.fluid.layers.matmul + + diff --git a/doc/fluid/api/tensor/mul.rst b/doc/fluid/api/tensor/mul.rst new file mode 100644 index 0000000000000000000000000000000000000000..40af2fcfa1946fcd9040b11f22419960bc892736 --- /dev/null +++ b/doc/fluid/api/tensor/mul.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_mul: + +mul +------------------------------- +:doc_source: paddle.fluid.layers.mul + + diff --git a/doc/fluid/api/tensor/multiplex.rst b/doc/fluid/api/tensor/multiplex.rst new file mode 100644 index 0000000000000000000000000000000000000000..b66aa5287e3bde38007505e5395ff8e931c34c86 --- /dev/null +++ b/doc/fluid/api/tensor/multiplex.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_multiplex: + +multiplex +------------------------------- +:doc_source: paddle.fluid.layers.multiplex + + diff --git a/doc/fluid/api/tensor/norm.rst b/doc/fluid/api/tensor/norm.rst new file mode 100644 index 0000000000000000000000000000000000000000..453ea0b63674dd746c5903e11f495d64b20f89f9 --- /dev/null +++ b/doc/fluid/api/tensor/norm.rst @@ -0,0 +1,10 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_linalg_norm: + +norm +------------ + +.. autofunction:: paddle.tensor.linalg.norm + :noindex: diff --git a/doc/fluid/api/tensor/not_equal.rst b/doc/fluid/api/tensor/not_equal.rst new file mode 100644 index 0000000000000000000000000000000000000000..8aeac42d73c7683ba037bef31a6b68c2acf01064 --- /dev/null +++ b/doc/fluid/api/tensor/not_equal.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_not_equal: + +not_equal +------------------------------- +:doc_source: paddle.tensor.not_equal + + diff --git a/doc/fluid/api/tensor/numel.rst b/doc/fluid/api/tensor/numel.rst new file mode 100644 index 0000000000000000000000000000000000000000..ea48ba7de7a96c302351636d0cd99e5a184c3a5e --- /dev/null +++ b/doc/fluid/api/tensor/numel.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_stat_numel: + +numel +------- + +.. autofunction:: paddle.tensor.stat.numel + :noindex: + diff --git a/doc/fluid/api/tensor/ones.rst b/doc/fluid/api/tensor/ones.rst new file mode 100644 index 0000000000000000000000000000000000000000..150ab46d8795a56d60b014831c2f437562c3bc84 --- /dev/null +++ b/doc/fluid/api/tensor/ones.rst @@ -0,0 +1,12 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_creation_ones: + +ones +-------- + +.. autofunction:: paddle.tensor.creation_ones + :noindex: + + diff --git a/doc/fluid/api/tensor/ones_like.rst b/doc/fluid/api/tensor/ones_like.rst new file mode 100644 index 0000000000000000000000000000000000000000..47ecd764f36b11d425863b0a09a111040aed31d2 --- /dev/null +++ b/doc/fluid/api/tensor/ones_like.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_ones_like: + +ones_like +--------- + +.. autofunction:: paddle.tensor.ones_like + :noindex: + diff --git a/doc/fluid/api/tensor/pow.rst b/doc/fluid/api/tensor/pow.rst new file mode 100644 index 0000000000000000000000000000000000000000..875b26cda0f71e8bbfeda60fe31bb0673d282c4b --- /dev/null +++ b/doc/fluid/api/tensor/pow.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_pow: + +pow +------------------------------- +:doc_source: paddle.fluid.layers.pow + + diff --git a/doc/fluid/api/tensor/random.rst b/doc/fluid/api/tensor/random.rst new file mode 100644 index 0000000000000000000000000000000000000000..b119e70bb3226a9bf24f5d831da1a6b685abbca5 --- /dev/null +++ b/doc/fluid/api/tensor/random.rst @@ -0,0 +1,14 @@ +====== +random +====== + +.. toctree:: + :maxdepth: 1 + + random/normal.rst + random/rand.rst + random/randint.rst + random/randn.rst + random/randperm.rst + random/standard_normal.rst + random/uniform.rst diff --git a/doc/fluid/api/tensor/random/normal.rst b/doc/fluid/api/tensor/random/normal.rst new file mode 100644 index 0000000000000000000000000000000000000000..1617ef9ba4b53424a37ce7972952a27c0b686740 --- /dev/null +++ b/doc/fluid/api/tensor/random/normal.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_random_normal: + +normal +------ + +.. autofunction:: paddle.tensor.random.normal + :noindex: + diff --git a/doc/fluid/api/tensor/random/rand.rst b/doc/fluid/api/tensor/random/rand.rst new file mode 100644 index 0000000000000000000000000000000000000000..7ad959e99d516a080f7e65f66eb385f6e4fe495b --- /dev/null +++ b/doc/fluid/api/tensor/random/rand.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_random_rand: + +rand +---- + +.. autofunction:: paddle.tensor.random.rand + :noindex: + diff --git a/doc/fluid/api/tensor/random/randint.rst b/doc/fluid/api/tensor/random/randint.rst new file mode 100644 index 0000000000000000000000000000000000000000..e5a9d6139f425536ef05ab9ef28f3fba625ce7ad --- /dev/null +++ b/doc/fluid/api/tensor/random/randint.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_random_randint: + +randint +------- + +.. autofunction:: paddle.tensor.random.randint + :noindex: + diff --git a/doc/fluid/api/tensor/random/randn.rst b/doc/fluid/api/tensor/random/randn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6d332aaa1441ff4634a5674bd4765ac0534ab39b --- /dev/null +++ b/doc/fluid/api/tensor/random/randn.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_random_randn: + +randn +----- + +.. autofunction:: paddle.tensor.random.randn + :noindex: + diff --git a/doc/fluid/api/tensor/random/randperm.rst b/doc/fluid/api/tensor/random/randperm.rst new file mode 100644 index 0000000000000000000000000000000000000000..0aa4cc612db88a78abc8b3bb91fc347bed5b6f5a --- /dev/null +++ b/doc/fluid/api/tensor/random/randperm.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_random_randperm: + +randperm +-------- + +.. autofunction:: paddle.tensor.random.randperm + :noindex: + diff --git a/doc/fluid/api/tensor/random/standard_normal.rst b/doc/fluid/api/tensor/random/standard_normal.rst new file mode 100644 index 0000000000000000000000000000000000000000..a279a26c077ea05a62bacd3f709624ea5fc7438d --- /dev/null +++ b/doc/fluid/api/tensor/random/standard_normal.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_random_standard_normal: + +standard_normal +--------------- + +.. autofunction:: paddle.tensor.random.standard_normal + :noindex: + diff --git a/doc/fluid/api/tensor/random/uniform.rst b/doc/fluid/api/tensor/random/uniform.rst new file mode 100644 index 0000000000000000000000000000000000000000..b323d371212dbbbe760267d4ec4551f339b69f52 --- /dev/null +++ b/doc/fluid/api/tensor/random/uniform.rst @@ -0,0 +1,10 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_random_uniform: + +uniform +------- + +.. autofunction:: paddle.tensor.random.uniform + :noindex: diff --git a/doc/fluid/api/tensor/rank.rst b/doc/fluid/api/tensor/rank.rst new file mode 100644 index 0000000000000000000000000000000000000000..716f919d98e9b493e238f265ae1f256b3496dd48 --- /dev/null +++ b/doc/fluid/api/tensor/rank.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_rank: + +rank +------------------------------- +:doc_source: paddle.fluid.layers.rank + + diff --git a/doc/fluid/api/tensor/reciprocal.rst b/doc/fluid/api/tensor/reciprocal.rst new file mode 100644 index 0000000000000000000000000000000000000000..42709596bf9fe2d9c2b10af99cf32653cf897d4f --- /dev/null +++ b/doc/fluid/api/tensor/reciprocal.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_reciprocal: + +reciprocal +------------------------------- +:doc_source: paddle.fluid.layers.reciprocal + + diff --git a/doc/fluid/api/tensor/reduce_all.rst b/doc/fluid/api/tensor/reduce_all.rst new file mode 100644 index 0000000000000000000000000000000000000000..cac7df54d7ec563433de7b7c8528e3513921e940 --- /dev/null +++ b/doc/fluid/api/tensor/reduce_all.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_reduce_all: + +reduce_all +------------------------------- +:doc_source: paddle.fluid.layers.reduce_all + + diff --git a/doc/fluid/api/tensor/reduce_any.rst b/doc/fluid/api/tensor/reduce_any.rst new file mode 100644 index 0000000000000000000000000000000000000000..e3aa0a7210f854332d166c8817913eccb376fcdc --- /dev/null +++ b/doc/fluid/api/tensor/reduce_any.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_reduce_any: + +reduce_any +------------------------------- +:doc_source: paddle.fluid.layers.reduce_any + + diff --git a/doc/fluid/api/tensor/reduce_max.rst b/doc/fluid/api/tensor/reduce_max.rst new file mode 100644 index 0000000000000000000000000000000000000000..7c502ba298f14f72dace417485139146031ab5d4 --- /dev/null +++ b/doc/fluid/api/tensor/reduce_max.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_reduce_max: + +reduce_max +------------------------------- +:doc_source: paddle.fluid.layers.reduce_max + + diff --git a/doc/fluid/api/tensor/reduce_mean.rst b/doc/fluid/api/tensor/reduce_mean.rst new file mode 100644 index 0000000000000000000000000000000000000000..475b86ff0ab1daeee7b0167631fd74b5421cb336 --- /dev/null +++ b/doc/fluid/api/tensor/reduce_mean.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_reduce_mean: + +reduce_mean +------------------------------- +:doc_source: paddle.fluid.layers.reduce_mean + + diff --git a/doc/fluid/api/tensor/reduce_min.rst b/doc/fluid/api/tensor/reduce_min.rst new file mode 100644 index 0000000000000000000000000000000000000000..9fdf51abf5312c097cce4b7363c7233bffb0257e --- /dev/null +++ b/doc/fluid/api/tensor/reduce_min.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_reduce_min: + +reduce_min +------------------------------- +:doc_source: paddle.fluid.layers.reduce_min + + diff --git a/doc/fluid/api/tensor/reduce_prod.rst b/doc/fluid/api/tensor/reduce_prod.rst new file mode 100644 index 0000000000000000000000000000000000000000..63ef33caecf23316de66fe9a2a44a6a7dec3b519 --- /dev/null +++ b/doc/fluid/api/tensor/reduce_prod.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_reduce_prod: + +reduce_prod +------------------------------- +:doc_source: paddle.fluid.layers.reduce_prod + + diff --git a/doc/fluid/api/tensor/reduce_sum.rst b/doc/fluid/api/tensor/reduce_sum.rst new file mode 100644 index 0000000000000000000000000000000000000000..ea9c036b9647aade517d8093cea5beeeac1b8d1a --- /dev/null +++ b/doc/fluid/api/tensor/reduce_sum.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_reduce_sum: + +reduce_sum +------------------------------- +:doc_source: paddle.fluid.layers.reduce_sum + + diff --git a/doc/fluid/api/tensor/reshape.rst b/doc/fluid/api/tensor/reshape.rst new file mode 100644 index 0000000000000000000000000000000000000000..c95f9bcc20d05863e8d4933e9f5f904838d63cc1 --- /dev/null +++ b/doc/fluid/api/tensor/reshape.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_reshape: + +reshape +------------------------------- +:doc_source: paddle.fluid.layers.reshape + + diff --git a/doc/fluid/api/tensor/reverse.rst b/doc/fluid/api/tensor/reverse.rst new file mode 100644 index 0000000000000000000000000000000000000000..0a81ad4c30fadd77b7b68fa86dcad5ac443eb75a --- /dev/null +++ b/doc/fluid/api/tensor/reverse.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_reverse: + +reverse +------------------------------- +:doc_source: paddle.fluid.layers.reverse + + diff --git a/doc/fluid/api/tensor/round.rst b/doc/fluid/api/tensor/round.rst new file mode 100644 index 0000000000000000000000000000000000000000..d333f6c39b1e8c740e3b018aa7e6e6c4d1a27519 --- /dev/null +++ b/doc/fluid/api/tensor/round.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_round: + +round +------------------------------- +:doc_source: paddle.fluid.layers.round + + diff --git a/doc/fluid/api/tensor/rsqrt.rst b/doc/fluid/api/tensor/rsqrt.rst new file mode 100644 index 0000000000000000000000000000000000000000..9e31b2fa26fb986708f82a4b88230abb97a675af --- /dev/null +++ b/doc/fluid/api/tensor/rsqrt.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_rsqrt: + +rsqrt +------------------------------- +:doc_source: paddle.fluid.layers.rsqrt + + diff --git a/doc/fluid/api/tensor/save.rst b/doc/fluid/api/tensor/save.rst new file mode 100644 index 0000000000000000000000000000000000000000..5082778ea025339ef8da7c49a377b8b442359748 --- /dev/null +++ b/doc/fluid/api/tensor/save.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_save: + +save +------------------------------- +:doc_source: paddle.fluid.save + + diff --git a/doc/fluid/api/tensor/scale.rst b/doc/fluid/api/tensor/scale.rst new file mode 100644 index 0000000000000000000000000000000000000000..8d30627b0d464f92e82c29ff5ccc3f3025c962d6 --- /dev/null +++ b/doc/fluid/api/tensor/scale.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_scale: + +scale +------------------------------- +:doc_source: paddle.fluid.layers.scale + + diff --git a/doc/fluid/api/tensor/scatter.rst b/doc/fluid/api/tensor/scatter.rst new file mode 100644 index 0000000000000000000000000000000000000000..4083f11eedcce87ecfcee63632a72b6587eab088 --- /dev/null +++ b/doc/fluid/api/tensor/scatter.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_scatter: + +scatter +------------------------------- +:doc_source: paddle.fluid.layers.scatter + + diff --git a/doc/fluid/api/tensor/scatter_nd.rst b/doc/fluid/api/tensor/scatter_nd.rst new file mode 100644 index 0000000000000000000000000000000000000000..101d51f148a7e86a9f33f49067def59f75c539fa --- /dev/null +++ b/doc/fluid/api/tensor/scatter_nd.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_scatter_nd: + +scatter_nd +------------------------------- +:doc_source: paddle.fluid.layers.scatter_nd + + diff --git a/doc/fluid/api/tensor/scatter_nd_add.rst b/doc/fluid/api/tensor/scatter_nd_add.rst new file mode 100644 index 0000000000000000000000000000000000000000..cc8ebaa65fab98c8a145b67c1d142fa1e0a5146c --- /dev/null +++ b/doc/fluid/api/tensor/scatter_nd_add.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_scatter_nd_add: + +scatter_nd_add +------------------------------- +:doc_source: paddle.fluid.layers.scatter_nd_add + + diff --git a/doc/fluid/api/tensor/shape.rst b/doc/fluid/api/tensor/shape.rst new file mode 100644 index 0000000000000000000000000000000000000000..c141936284983737db47ba463a8fdee4476f0d06 --- /dev/null +++ b/doc/fluid/api/tensor/shape.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_shape: + +shape +------------------------------- +:doc_source: paddle.fluid.layers.shape + + diff --git a/doc/fluid/api/tensor/shard_index.rst b/doc/fluid/api/tensor/shard_index.rst new file mode 100644 index 0000000000000000000000000000000000000000..afee6c6c375ca5ee74857f96a6eead14ac65a979 --- /dev/null +++ b/doc/fluid/api/tensor/shard_index.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_shard_index: + +shard_index +------------------------------- +:doc_source: paddle.fluid.layers.shard_index + + diff --git a/doc/fluid/api/tensor/shuffle.rst b/doc/fluid/api/tensor/shuffle.rst new file mode 100644 index 0000000000000000000000000000000000000000..aaa48eb7df14d29fc50c126879c19bd94e9a5009 --- /dev/null +++ b/doc/fluid/api/tensor/shuffle.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_shuffle: + +shuffle +------------------------------- +:doc_source: paddle.fluid.io.shuffle + + diff --git a/doc/fluid/api/tensor/sign.rst b/doc/fluid/api/tensor/sign.rst new file mode 100644 index 0000000000000000000000000000000000000000..683c09a70d229b44af43399e8648ba7454c101e2 --- /dev/null +++ b/doc/fluid/api/tensor/sign.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_sign: + +sign +------------------------------- +:doc_source: paddle.fluid.layers.sign + + diff --git a/doc/fluid/api/tensor/sin.rst b/doc/fluid/api/tensor/sin.rst new file mode 100644 index 0000000000000000000000000000000000000000..f0821d84f5c9b1695d68c45b78de99579e1808be --- /dev/null +++ b/doc/fluid/api/tensor/sin.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_sin: + +sin +------------------------------- +:doc_source: paddle.fluid.layers.sin + + diff --git a/doc/fluid/api/tensor/slice.rst b/doc/fluid/api/tensor/slice.rst new file mode 100644 index 0000000000000000000000000000000000000000..8b75aa4014df0f3bbc734b1d2b4c129f725bd517 --- /dev/null +++ b/doc/fluid/api/tensor/slice.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_slice: + +slice +------------------------------- +:doc_source: paddle.fluid.layers.slice + + diff --git a/doc/fluid/api/tensor/sort.rst b/doc/fluid/api/tensor/sort.rst new file mode 100644 index 0000000000000000000000000000000000000000..21da4ab432d026f281b69183d95134f1fbadd553 --- /dev/null +++ b/doc/fluid/api/tensor/sort.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_sort: + +sort +------------------------------- +:doc_source: paddle.tensor.sort + + diff --git a/doc/fluid/api/tensor/split.rst b/doc/fluid/api/tensor/split.rst new file mode 100644 index 0000000000000000000000000000000000000000..104169ea5936841521df3f5f63d7342a2258ee95 --- /dev/null +++ b/doc/fluid/api/tensor/split.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_manipulation_split: + +split +-------- + +.. autofunction:: paddle.tensor.manipulation.split + :noindex: + diff --git a/doc/fluid/api/tensor/sqrt.rst b/doc/fluid/api/tensor/sqrt.rst new file mode 100644 index 0000000000000000000000000000000000000000..aef16db387a68c43b4091f47e4b0d006311a6f06 --- /dev/null +++ b/doc/fluid/api/tensor/sqrt.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_sqrt: + +sqrt +------------------------------- +:doc_source: paddle.fluid.layers.sqrt + + diff --git a/doc/fluid/api/tensor/square.rst b/doc/fluid/api/tensor/square.rst new file mode 100644 index 0000000000000000000000000000000000000000..0eb62226e460002fba6a586613bb0bdb50e12763 --- /dev/null +++ b/doc/fluid/api/tensor/square.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_square: + +square +------------------------------- +:doc_source: paddle.fluid.layers.square + + diff --git a/doc/fluid/api/tensor/squeeze.rst b/doc/fluid/api/tensor/squeeze.rst new file mode 100644 index 0000000000000000000000000000000000000000..ef11141080dfa633de50f7c9cc1d7307521fe34b --- /dev/null +++ b/doc/fluid/api/tensor/squeeze.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_squeeze: + +squeeze +------------------------------- +:doc_source: paddle.fluid.layers.squeeze + + diff --git a/doc/fluid/api/tensor/stack.rst b/doc/fluid/api/tensor/stack.rst new file mode 100644 index 0000000000000000000000000000000000000000..02e9d20f0ac6d85efd63348bf0eb6cb8f8ed74e0 --- /dev/null +++ b/doc/fluid/api/tensor/stack.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_stack: + +stack +------------------------------- +:doc_source: paddle.fluid.layers.stack + + diff --git a/doc/fluid/api/tensor/stanh.rst b/doc/fluid/api/tensor/stanh.rst new file mode 100644 index 0000000000000000000000000000000000000000..9450cf6f55fabaf8f0786472d6b3edcd591ff868 --- /dev/null +++ b/doc/fluid/api/tensor/stanh.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_stanh: + +stanh +------------------------------- +:doc_source: paddle.fluid.layers.stanh + + diff --git a/doc/fluid/api/tensor/std.rst b/doc/fluid/api/tensor/std.rst new file mode 100644 index 0000000000000000000000000000000000000000..3d8db35c14dd6ad64f0dd23bbcc42b3e3101481d --- /dev/null +++ b/doc/fluid/api/tensor/std.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_std: + +std +--------- + +.. autofunction:: paddle.tensor.std + :noindex: + diff --git a/doc/fluid/api/tensor/strided_slice.rst b/doc/fluid/api/tensor/strided_slice.rst new file mode 100644 index 0000000000000000000000000000000000000000..249834f67728fc704919ed99f68d0847994efe07 --- /dev/null +++ b/doc/fluid/api/tensor/strided_slice.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_strided_slice: + +strided_slice +------------------------------- +:doc_source: paddle.fluid.layers.strided_slice + + diff --git a/doc/fluid/api/tensor/sum.rst b/doc/fluid/api/tensor/sum.rst new file mode 100644 index 0000000000000000000000000000000000000000..234e723acab4ba4b86fdbf278280113a16340f28 --- /dev/null +++ b/doc/fluid/api/tensor/sum.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_sum: + +sum +------------------------------- +:doc_source: paddle.fluid.layers.reduce_sum + + diff --git a/doc/fluid/api/tensor/sums.rst b/doc/fluid/api/tensor/sums.rst new file mode 100644 index 0000000000000000000000000000000000000000..356c8c673a8a01523dad147eb19f6566e01da999 --- /dev/null +++ b/doc/fluid/api/tensor/sums.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_sums: + +sums +------------------------------- +:doc_source: paddle.fluid.layers.sums + + diff --git a/doc/fluid/api/tensor/tanh.rst b/doc/fluid/api/tensor/tanh.rst new file mode 100644 index 0000000000000000000000000000000000000000..3523aaf1577e80cf64d6d0dfc8b251a01de0875f --- /dev/null +++ b/doc/fluid/api/tensor/tanh.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_tanh: + +tanh +------------------------------- +:doc_source: paddle.fluid.layers.tanh + + diff --git a/doc/fluid/api/tensor/topk.rst b/doc/fluid/api/tensor/topk.rst new file mode 100644 index 0000000000000000000000000000000000000000..33a36eb2426a6d0186cfa067c71c293e4296674c --- /dev/null +++ b/doc/fluid/api/tensor/topk.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_topk: + +topk +------------------------------- +:doc_source: paddle.tensor.topk + + diff --git a/doc/fluid/api/tensor/transpose.rst b/doc/fluid/api/tensor/transpose.rst new file mode 100644 index 0000000000000000000000000000000000000000..5c4ea641482241c3dcfe411ac1996bb40ce20faf --- /dev/null +++ b/doc/fluid/api/tensor/transpose.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_transpose: + +transpose +------------------------------- +:doc_source: paddle.fluid.layers.transpose + + diff --git a/doc/fluid/api/tensor/unique.rst b/doc/fluid/api/tensor/unique.rst new file mode 100644 index 0000000000000000000000000000000000000000..fe8be883a3f7537cb01815ec978455f5908ea355 --- /dev/null +++ b/doc/fluid/api/tensor/unique.rst @@ -0,0 +1,10 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_manipulation_unique: + +unique +-------- + +.. autofunction:: paddle.tensor.manipulation.unique + diff --git a/doc/fluid/api/tensor/unique_with_counts.rst b/doc/fluid/api/tensor/unique_with_counts.rst new file mode 100644 index 0000000000000000000000000000000000000000..3416604445f0a6125eb980e7a1a26501566fd3f3 --- /dev/null +++ b/doc/fluid/api/tensor/unique_with_counts.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_unique_with_counts: + +unique_with_counts +------------------------------- +:doc_source: paddle.fluid.layers.unique_with_counts + + diff --git a/doc/fluid/api/tensor/unsqueeze.rst b/doc/fluid/api/tensor/unsqueeze.rst new file mode 100644 index 0000000000000000000000000000000000000000..229e923a596e7b5e6b2a103708877483f02c87f9 --- /dev/null +++ b/doc/fluid/api/tensor/unsqueeze.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_unsqueeze: + +unsqueeze +------------------------------- +:doc_source: paddle.fluid.layers.unsqueeze + + diff --git a/doc/fluid/api/tensor/unstack.rst b/doc/fluid/api/tensor/unstack.rst new file mode 100644 index 0000000000000000000000000000000000000000..e909f6e14b1a24098871e7a07f772badf9787bea --- /dev/null +++ b/doc/fluid/api/tensor/unstack.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_unstack: + +unstack +------------------------------- +:doc_source: paddle.fluid.layers.unstack + + diff --git a/doc/fluid/api/tensor/var.rst b/doc/fluid/api/tensor/var.rst new file mode 100644 index 0000000000000000000000000000000000000000..4a617dec382b6e4e1d1926987500fec8f3e1c7cd --- /dev/null +++ b/doc/fluid/api/tensor/var.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_var: + +var +--------- + +.. autofunction:: paddle.tensor.var + :noindex: + diff --git a/doc/fluid/api/tensor/where.rst b/doc/fluid/api/tensor/where.rst new file mode 100644 index 0000000000000000000000000000000000000000..d5f306dffd90746d631062d5804fa8f79635e287 --- /dev/null +++ b/doc/fluid/api/tensor/where.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_where: + +where +------------------------------- +:doc_source: paddle.fluid.layers.cond + + diff --git a/doc/fluid/api/tensor/zeros.rst b/doc/fluid/api/tensor/zeros.rst new file mode 100644 index 0000000000000000000000000000000000000000..19d4fb7a2f8dee04db30a920fc6f2b4153eae321 --- /dev/null +++ b/doc/fluid/api/tensor/zeros.rst @@ -0,0 +1,12 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_tensor_creation_zeros: + +zeros +-------- + +.. autofunction:: paddle.tensor.creation.zeros + :noindex: + + diff --git a/doc/fluid/api/tensor/zeros_like.rst b/doc/fluid/api/tensor/zeros_like.rst new file mode 100644 index 0000000000000000000000000000000000000000..150f4ad84abd85db34457d76264a2daa9e863272 --- /dev/null +++ b/doc/fluid/api/tensor/zeros_like.rst @@ -0,0 +1,7 @@ +.. _api_tensor_cn_zeros_like: + +zeros_like +------------------------------- +:doc_source: paddle.fluid.layers.zeros_like + + diff --git a/doc/fluid/api/transpiler.rst b/doc/fluid/api/transpiler.rst index 2492b98136f85ccb49922c08b096be4f7eb96d7d..28905bd06b502b30df36f03d6aea8c1295eef02f 100644 --- a/doc/fluid/api/transpiler.rst +++ b/doc/fluid/api/transpiler.rst @@ -10,4 +10,3 @@ fluid.transpiler transpiler/HashName.rst transpiler/memory_optimize.rst transpiler/release_memory.rst - transpiler/RoundRobin.rst diff --git a/doc/fluid/api/transpiler/RoundRobin.rst b/doc/fluid/api/transpiler/RoundRobin.rst deleted file mode 100644 index 547757d20e8388b3ea51b52a0b4c9e23116f0645..0000000000000000000000000000000000000000 --- a/doc/fluid/api/transpiler/RoundRobin.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` - !DO NOT EDIT THIS FILE MANUALLY! - -.. _api_fluid_transpiler_RoundRobin: - -RoundRobin ----------- - -.. autoclass:: paddle.fluid.transpiler.RoundRobin - :members: - :inherited-members: - :noindex: - diff --git a/doc/fluid/api_cn/api_tree_cn.rst b/doc/fluid/api_cn/api_tree_cn.rst index 3abcd3a1865e465afa80bf7fb3ab0807d3e1d94d..4625348ff97fd825bd85dd4bf9eb82649e637028 100644 --- a/doc/fluid/api_cn/api_tree_cn.rst +++ b/doc/fluid/api_cn/api_tree_cn.rst @@ -13,7 +13,9 @@ API接口 data/dataset_cn.rst data_feeder_cn.rst dataset_cn.rst + distributed_cn.rst dygraph_cn.rst + static_cn.rst executor_cn.rst initializer_cn.rst io_cn.rst @@ -25,5 +27,6 @@ API接口 regularizer_cn.rst transpiler_cn.rst unique_name_cn.rst + static_cn.rst diff --git a/doc/fluid/api_cn/backward_cn/append_backward_cn.rst b/doc/fluid/api_cn/backward_cn/append_backward_cn.rst index 1019bab98b6e6a667c97f5f6bf926b5026023fac..b3aa98f93e501d66687689c47c611677b4d7673e 100644 --- a/doc/fluid/api_cn/backward_cn/append_backward_cn.rst +++ b/doc/fluid/api_cn/backward_cn/append_backward_cn.rst @@ -3,10 +3,13 @@ append_backward ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.backward.append_backward(loss, parameter_list=None, no_grad_set=None, callbacks=None) +:api_attr: 声明式编程模式(静态图) + + + 该接口将向主程序(``main_program``)追加反向部分 。 完整的神经网络训练由前向和反向传播组成。但是当我们配置网络时,我们只需要指定其前向部分。 diff --git a/doc/fluid/api_cn/backward_cn/gradients_cn.rst b/doc/fluid/api_cn/backward_cn/gradients_cn.rst index 5bb45f2074c9202798033d9813cdcaccdc6f2244..0165f7f2f3d0bc61bde4b08c3b79ddf1fee4e4ba 100644 --- a/doc/fluid/api_cn/backward_cn/gradients_cn.rst +++ b/doc/fluid/api_cn/backward_cn/gradients_cn.rst @@ -3,10 +3,13 @@ gradients ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.backward.gradients(targets, inputs, target_gradients=None, no_grad_set=None) +:api_attr: 声明式编程模式(静态图) + + + 将目标梯度反向传播到输入。 参数: diff --git a/doc/fluid/api_cn/clip_cn/ErrorClipByValue_cn.rst b/doc/fluid/api_cn/clip_cn/ErrorClipByValue_cn.rst index cce7037769c731b028d4645a5fe514fa576b0311..c62a90742856f5bd867e7548d432d818105a488e 100644 --- a/doc/fluid/api_cn/clip_cn/ErrorClipByValue_cn.rst +++ b/doc/fluid/api_cn/clip_cn/ErrorClipByValue_cn.rst @@ -1,47 +1,50 @@ -.. _cn_api_fluid_clip_ErrorClipByValue: - -ErrorClipByValue -------------------------------- - -.. py:class:: paddle.fluid.clip.ErrorClipByValue(max, min=None) - -给定一个 Tensor ``t`` (该 Tensor 传入方式见代码示例),对 Tensor 中的元素超出给定最大 ``max`` 和最小界 ``min`` 内区间范围 [min, max] 的元素,重设为所超出界的界值。 - - -- 任何小于min(最小值)的值都被设置为 ``min`` - -- 任何大于max(最大值)的值都被设置为 ``max`` - - -参数: - - **max** (foat) - 要修剪的最大值。 - - **min** (float) - 要修剪的最小值。如果用户没有设置,将被框架默认设置为 ``-max`` 。 - - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - - BATCH_SIZE = 128 - CLIP_MAX = 2e-6 - CLIP_MIN = -1e-6 - prog = fluid.framework.Program() - - with fluid.program_guard(main_program=prog): - image = fluid.layers.data(name='x', shape=[784], dtype='float32') - hidden1 = fluid.layers.fc(input=image, size=128, act='relu') - hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu') - predict = fluid.layers.fc(input=hidden2, size=10, act='softmax') - label = fluid.layers.data(name='y', shape=[1], dtype='int64') - cost = fluid.layers.cross_entropy(input=predict, label=label) - avg_cost = fluid.layers.mean(cost) - prog_clip = prog.clone() - prog_clip.block(0).var(hidden1.name)._set_error_clip( - fluid.clip.ErrorClipByValue(max=CLIP_MAX, min=CLIP_MIN)) - - - - - +.. _cn_api_fluid_clip_ErrorClipByValue: + +ErrorClipByValue +------------------------------- + +.. py:class:: paddle.fluid.clip.ErrorClipByValue(max, min=None) + + + + +给定一个 Tensor ``t`` (该 Tensor 传入方式见代码示例),对 Tensor 中的元素超出给定最大 ``max`` 和最小界 ``min`` 内区间范围 [min, max] 的元素,重设为所超出界的界值。 + + +- 任何小于min(最小值)的值都被设置为 ``min`` + +- 任何大于max(最大值)的值都被设置为 ``max`` + + +参数: + - **max** (foat) - 要修剪的最大值。 + - **min** (float) - 要修剪的最小值。如果用户没有设置,将被框架默认设置为 ``-max`` 。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + BATCH_SIZE = 128 + CLIP_MAX = 2e-6 + CLIP_MIN = -1e-6 + prog = fluid.framework.Program() + + with fluid.program_guard(main_program=prog): + image = fluid.layers.data(name='x', shape=[784], dtype='float32') + hidden1 = fluid.layers.fc(input=image, size=128, act='relu') + hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu') + predict = fluid.layers.fc(input=hidden2, size=10, act='softmax') + label = fluid.layers.data(name='y', shape=[1], dtype='int64') + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(cost) + prog_clip = prog.clone() + prog_clip.block(0).var(hidden1.name)._set_error_clip( + fluid.clip.ErrorClipByValue(max=CLIP_MAX, min=CLIP_MIN)) + + + + + diff --git a/doc/fluid/api_cn/clip_cn/GradientClipByGlobalNorm_cn.rst b/doc/fluid/api_cn/clip_cn/GradientClipByGlobalNorm_cn.rst index 959c75d56bd36cbead37c58fc0a7cf4f344ac9e7..4b714c9c42f52a525902c836f166d54b78e3f318 100644 --- a/doc/fluid/api_cn/clip_cn/GradientClipByGlobalNorm_cn.rst +++ b/doc/fluid/api_cn/clip_cn/GradientClipByGlobalNorm_cn.rst @@ -1,87 +1,107 @@ -.. _cn_api_fluid_clip_GradientClipByGlobalNorm: - -GradientClipByGlobalNorm -------------------------------- - -.. py:class:: paddle.fluid.clip.GradientClipByGlobalNorm(clip_norm, group_name='default_group') - -通过多个 Tensor 的范数之和的比率,来剪切(clip)多个 Tensor ( Tensor 不是从该类传入, 通过 ``fluid.program_guard`` 的 ``main_program`` 参数传入,即公式中的 :math:`t\_list` 见代码实例)。 - -给定一个 Tensor 列表 :math:`t\_list` 和一个剪切比率 ``clip_norm`` ,返回该类的实例作为 ``set_gradient_clip`` 方法的第一个参数, ``set_gradient_clip`` 第二个参数是用来计算被剪切的 Tensor 列表(该值默认为 ``None`` 会基于所有 Tensor 列表来计算全局范数 ``global_norm`` 。 - -剪切过程如下: - -.. math:: - \\t\_list[i]=t\_list[i]∗\frac{clip\_norm}{max(global\_norm,clip\_norm)}\\ - -其中: - -.. math:: - \\global\_norm=\sqrt{\sum_{i=0}^{n-1}(l2norm(t\_list[i]))^2}\\ - - -参数: - - **clip_norm** (float) - 范数最大值 - - **group_name** (str, optional) - 剪切的组名 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import paddle.fluid.core as core - import paddle - - place = core.CPUPlace() - prog = fluid.framework.Program() - startup_program = fluid.framework.Program() - with fluid.program_guard( - main_program=prog, startup_program=startup_program): - image = fluid.layers.data(name='x', shape=[784], dtype='float32') - label = fluid.layers.data(name='y', shape=[1], dtype='int64') - hidden1 = fluid.layers.fc(input=image, size=128, act='relu') - hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu') - predict = fluid.layers.fc(input=hidden2, size=10, act='softmax') - cost = fluid.layers.cross_entropy(input=predict, label=label) - avg_cost = fluid.layers.mean(cost) - - prog_clip = prog.clone() - avg_cost_clip = prog_clip.block(0).var(avg_cost.name) - - p_g = fluid.backward.append_backward(loss=avg_cost) - p_g_clip = fluid.backward.append_backward(loss=avg_cost_clip) - - with fluid.program_guard(main_program=prog_clip, startup_program=startup_program): - fluid.clip.set_gradient_clip( - fluid.clip.GradientClipByGlobalNorm(clip_norm=2.0)) - p_g_clip = fluid.clip.append_gradient_clip_ops(p_g_clip) - - grad_list = [elem[1] for elem in p_g] - grad_clip_list = [elem[1] for elem in p_g_clip] - - train_reader = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.mnist.train(), buf_size=8192), - batch_size=128) - - exe = fluid.Executor(place) - feeder = fluid.DataFeeder(feed_list=[image, label], place=place) - exe.run(startup_program) - - count = 0 - for data in train_reader(): - count += 1 - print("count:%s" % count) - if count > 5: - break - out = exe.run(prog, feed=feeder.feed(data), fetch_list=grad_list) - out_clip = exe.run(prog_clip, - feed=feeder.feed(data), - fetch_list=grad_clip_list) - - - - - - - +.. _cn_api_fluid_clip_GradientClipByGlobalNorm: + +GradientClipByGlobalNorm +------------------------------- + +.. py:class:: paddle.fluid.clip.GradientClipByGlobalNorm(clip_norm, group_name='default_group', need_clip=None) + +:alias_main: paddle.nn.GradientClipByGlobalNorm +:alias: paddle.nn.GradientClipByGlobalNorm,paddle.nn.clip.GradientClipByGlobalNorm +:old_api: paddle.fluid.clip.GradientClipByGlobalNorm + + + +将一个 Tensor列表 :math:`t\_list` 中所有Tensor的L2范数之和,限定在 ``clip_norm`` 范围内。 + +- 如果范数之和大于 ``clip_norm`` ,则所有 Tensor 会乘以一个系数进行压缩 + +- 如果范数之和小于或等于 ``clip_norm`` ,则不会进行任何操作。 + +输入的 Tensor列表 不是从该类里传入, 而是默认会选择 ``Program`` 中全部的梯度,如果 ``need_clip`` 不为None,则可以只选择部分参数进行梯度裁剪。 + +该类需要在初始化 ``optimizer`` 时进行设置后才能生效,可参看 ``optimizer`` 文档(例如: :ref:`cn_api_fluid_optimizer_SGDOptimizer` )。 + +裁剪公式如下: + +.. math:: + \\t\_list[i]=t\_list[i]∗\frac{clip\_norm}{max(global\_norm,clip\_norm)}\\ + +其中: + +.. math:: + \\global\_norm=\sqrt{\sum_{i=0}^{n-1}(l2norm(t\_list[i]))^2}\\ + + +参数: + - **clip_norm** (float) - 所允许的范数最大值 + - **group_name** (str, optional) - 剪切的组名 + - **need_clip** (function, optional) - 类型: 函数。用于指定需要梯度裁剪的参数,该函数接收一个 ``Parameter`` ,返回一个 ``bool`` (True表示需要裁剪,False不需要裁剪)。默认为None,此时会裁剪网络中全部参数。 + +**代码示例1:静态图** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + main_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard( + main_program=main_prog, startup_program=startup_prog): + image = fluid.data( + name='x', shape=[-1, 2], dtype='float32') + predict = fluid.layers.fc(input=image, size=3, act='relu') #Trainable parameters: fc_0.w.0, fc_0.b.0 + loss = fluid.layers.mean(predict) + + # 裁剪网络中全部参数: + clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0) + + # 仅裁剪参数fc_0.w_0时: + # 为need_clip参数传入一个函数fileter_func,fileter_func接收参数的类型为Parameter,返回类型为bool + # def fileter_func(Parameter): + # # 可以较为方便的通过Parameter.name判断(name可以在fluid.ParamAttr中设置,默认为fc_0.w_0、fc_0.b_0) + # return Parameter.name=="fc_0.w_0" + # clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0, need_clip=fileter_func) + + sgd_optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.1, grad_clip=clip) + sgd_optimizer.minimize(loss) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + x = np.random.uniform(-100, 100, (10, 2)).astype('float32') + exe.run(startup_prog) + out = exe.run(main_prog, feed={'x': x}, fetch_list=loss) + + +**代码示例2:动态图** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.dygraph.Linear(10, 10) #可训练参数: linear_0.w.0, linear_0.b.0 + inputs = fluid.layers.uniform_random([32, 10]).astype('float32') + out = linear(fluid.dygraph.to_variable(inputs)) + loss = fluid.layers.reduce_mean(out) + loss.backward() + + # 裁剪网络中全部参数: + clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0) + + # 仅裁剪参数linear_0.w_0时: + # 为need_clip参数传入一个函数fileter_func,fileter_func接收参数的类型为ParamBase,返回类型为bool + # def fileter_func(ParamBase): + # # 可以通过ParamBase.name判断(name可以在fluid.ParamAttr中设置,默认为linear_0.w_0、linear_0.b_0) + # return ParamBase.name == "linear_0.w_0" + # # 注:linear.weight、linear.bias能分别返回dygraph.Linear层的权重与偏差,也可以此来判断 + # return ParamBase.name == linear.weight.name + # clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0, need_clip=fileter_func) + + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=0.1, + parameter_list=linear.parameters(), + grad_clip=clip) + sgd_optimizer.minimize(loss) diff --git a/doc/fluid/api_cn/clip_cn/GradientClipByNorm_cn.rst b/doc/fluid/api_cn/clip_cn/GradientClipByNorm_cn.rst index d3eeb280eb89bdc30dcb681bee30316c9c6975a0..07199a3a60f6683fc06d4b7e4253f2ac58d84bdd 100644 --- a/doc/fluid/api_cn/clip_cn/GradientClipByNorm_cn.rst +++ b/doc/fluid/api_cn/clip_cn/GradientClipByNorm_cn.rst @@ -1,79 +1,111 @@ -.. _cn_api_fluid_clip_GradientClipByNorm: - -GradientClipByNorm -------------------------------- - -.. py:class:: paddle.fluid.clip.GradientClipByNorm(clip_norm) - -将输入多维Tensor :math:`X` 转换为L2范数不超过给定的二范数最大值( ``clip_norm`` )的多维Tensor。(多维Tensor不是从该类传入, 而是通过 ``fluid.program_guard`` 的 ``main_program`` 参数传入)。 - -该类限制了输入多维Tensor :math:`X` 的L2范数不会超过 ``clip_norm`` 。 - -.. math:: - - Out= - \left\{ - \begin{aligned} - & X & & if (norm(X) \leq clip\_norm)\\ - & \frac{clip\_norm∗X}{norm(X)} & & if (norm(X) > clip\_norm) \\ - \end{aligned} - \right. - - -其中 :math:`norm(X)` 代表 :math:`X` 的L2范数 - -.. math:: - \\norm(X) = (\sum_{i=1}^{n}|x_i|^2)^{\frac{1}{2}}\\ - -参数: - - **clip_norm** (float) - 二范数最大值 - - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import paddle.fluid.core as core - import paddle - place = core.CPUPlace() - prog = fluid.framework.Program() - startup_program = fluid.framework.Program() - with fluid.program_guard( - main_program=prog, startup_program=startup_program): - image = fluid.layers.data(name='x', shape=[784], dtype='float32') - label = fluid.layers.data(name='y', shape=[1], dtype='int64') - hidden1 = fluid.layers.fc(input=image, size=128, act='relu') - hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu') - predict = fluid.layers.fc(input=hidden2, size=10, act='softmax') - cost = fluid.layers.cross_entropy(input=predict, label=label) - avg_cost = fluid.layers.mean(cost) - prog_clip = prog.clone() - avg_cost_clip = prog_clip.block(0).var(avg_cost.name) - p_g = fluid.backward.append_backward(loss=avg_cost) - p_g_clip = fluid.backward.append_backward(loss=avg_cost_clip) - with fluid.program_guard(main_program=prog_clip, startup_program=startup_program): - fluid.clip.set_gradient_clip( - fluid.clip.GradientClipByNorm(clip_norm=2.0)) - p_g_clip = fluid.clip.append_gradient_clip_ops(p_g_clip) - grad_list = [elem[1] for elem in p_g] - grad_clip_list = [elem[1] for elem in p_g_clip] - train_reader = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.mnist.train(), buf_size=8192), - batch_size=128) - - exe = fluid.Executor(place) - feeder = fluid.DataFeeder(feed_list=[image, label], place=place) - exe.run(startup_program) - - count = 0 - for data in train_reader(): - count += 1 - print("count:%s" % count) - if count > 5: - break - out = exe.run(prog, feed=feeder.feed(data), fetch_list=grad_list) - out_clip = exe.run(prog_clip, - feed=feeder.feed(data), - fetch_list=grad_clip_list) +.. _cn_api_fluid_clip_GradientClipByNorm: + +GradientClipByNorm +------------------------------- + +.. py:class:: paddle.fluid.clip.GradientClipByNorm(clip_norm, need_clip=None) + +:alias_main: paddle.nn.GradientClipByNorm +:alias: paddle.nn.GradientClipByNorm,paddle.nn.clip.GradientClipByNorm +:old_api: paddle.fluid.clip.GradientClipByNorm + + + +将输入的多维Tensor :math:`X` 的L2范数限制在 ``clip_norm`` 范围之内。 + +- 如果L2范数大于 ``clip_norm`` ,则该 Tensor 会乘以一个系数进行压缩 + +- 如果L2范数小于或等于 ``clip_norm`` ,则不会进行任何操作。 + +输入的 Tensor 不是从该类里传入, 而是默认会选择 ``Program`` 中全部的梯度,如果 ``need_clip`` 不为None,则可以只选择部分参数进行梯度裁剪。 + +该类需要在初始化 ``optimizer`` 时进行设置后才能生效,可参看 ``optimizer`` 文档(例如: :ref:`cn_api_fluid_optimizer_SGDOptimizer` )。 + +裁剪公式如下: + +.. math:: + + Out= + \left\{ + \begin{aligned} + & X & & if (norm(X) \leq clip\_norm)\\ + & \frac{clip\_norm∗X}{norm(X)} & & if (norm(X) > clip\_norm) \\ + \end{aligned} + \right. + + +其中 :math:`norm(X)` 代表 :math:`X` 的L2范数 + +.. math:: + \\norm(X) = (\sum_{i=1}^{n}|x_i|^2)^{\frac{1}{2}}\\ + +参数: + - **clip_norm** (float) - 所允许的二范数最大值。 + - **need_clip** (function, optional) - 类型: 函数。用于指定需要梯度裁剪的参数,该函数接收一个 ``Parameter`` ,返回一个 ``bool`` (True表示需要裁剪,False不需要裁剪)。默认为None,此时会裁剪网络中全部参数。 + +**代码示例1:静态图** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + main_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard( + main_program=main_prog, startup_program=startup_prog): + image = fluid.data( + name='x', shape=[-1, 2], dtype='float32') + predict = fluid.layers.fc(input=image, size=3, act='relu') #可训练参数: fc_0.w.0, fc_0.b.0 + loss = fluid.layers.mean(predict) + + # 裁剪网络中全部参数: + clip = fluid.clip.GradientClipByNorm(clip_norm=1.0) + + # 仅裁剪参数fc_0.w_0时: + # 为need_clip参数传入一个函数fileter_func,fileter_func接收参数的类型为Parameter,返回类型为bool + # def fileter_func(Parameter): + # # 可以较为方便的通过Parameter.name判断(name可以在fluid.ParamAttr中设置,默认为fc_0.w_0、fc_0.b_0) + # return Parameter.name=="fc_0.w_0" + # clip = fluid.clip.GradientClipByNorm(clip_norm=1.0, need_clip=fileter_func) + + sgd_optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.1, grad_clip=clip) + sgd_optimizer.minimize(loss) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + x = np.random.uniform(-100, 100, (10, 2)).astype('float32') + exe.run(startup_prog) + out = exe.run(main_prog, feed={'x': x}, fetch_list=loss) + + +**代码示例2:动态图** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.dygraph.Linear(10, 10) #可训练参数: linear_0.w.0, linear_0.b.0 + inputs = fluid.layers.uniform_random([32, 10]).astype('float32') + out = linear(fluid.dygraph.to_variable(inputs)) + loss = fluid.layers.reduce_mean(out) + loss.backward() + + # 裁剪网络中全部参数: + clip = fluid.clip.GradientClipByNorm(clip_norm=1.0) + + # 仅裁剪参数linear_0.w_0时: + # 为need_clip参数传入一个函数fileter_func,fileter_func接收参数的类型为ParamBase,返回类型为bool + # def fileter_func(ParamBase): + # # 可以通过ParamBase.name判断(name可以在fluid.ParamAttr中设置,默认为linear_0.w_0、linear_0.b_0) + # return ParamBase.name == "linear_0.w_0" + # # 注:linear.weight、linear.bias能分别返回dygraph.Linear层的权重与偏差,也可以此来判断 + # return ParamBase.name == linear.weight.name + # clip = fluid.clip.GradientClipByNorm(clip_norm=1.0, need_clip=fileter_func) + + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=0.1, parameter_list=linear.parameters(), grad_clip=clip) + sgd_optimizer.minimize(loss) \ No newline at end of file diff --git a/doc/fluid/api_cn/clip_cn/GradientClipByValue_cn.rst b/doc/fluid/api_cn/clip_cn/GradientClipByValue_cn.rst index 236459f71277f9ea4fe80ba2a4bd9e3e98a33e0a..58e218cb9888a49d06bbfebdddce3a1506f5fc76 100644 --- a/doc/fluid/api_cn/clip_cn/GradientClipByValue_cn.rst +++ b/doc/fluid/api_cn/clip_cn/GradientClipByValue_cn.rst @@ -1,40 +1,100 @@ -.. _cn_api_fluid_clip_GradientClipByValue: - -GradientClipByValue -------------------------------- - -.. py:class:: paddle.fluid.clip.GradientClipByValue(max, min=None) - -将梯度值(gradient values)的范围压缩到 [min, max]。 - - -给定一个 Tensor ``t`` ,该操作将它的值压缩到 ``min`` 和 ``max`` 之间 - -- 任何小于 ``min`` 的值都被设置为 ``min`` - -- 任何大于 ``max`` 的值都被设置为 ``max`` - -参数: - - **max** (foat) - 要修剪的最大值。 - - **min** (float,optional) - 要修剪的最小值。如果用户没有设置,将被 ``framework`` 设置为 ``-max`` 。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - w_param_attrs = fluid.ParamAttr(name=None, - initializer=fluid.initializer.UniformInitializer(low=-1.0, high=1.0, seed=0), - learning_rate=1.0, - regularizer=fluid.regularizer.L1Decay(1.0), - trainable=True, - gradient_clip=fluid.clip.GradientClipByValue(-1.0, 1.0)) - x = fluid.layers.data(name='x', shape=[10], dtype='float32') - y_predict = fluid.layers.fc(input=x, size=1, param_attr=w_param_attrs) - - - - - - - +.. _cn_api_fluid_clip_GradientClipByValue: + +GradientClipByValue +------------------------------- + +.. py:class:: paddle.fluid.clip.GradientClipByValue(max, min=None, need_clip=None) + +:alias_main: paddle.nn.GradientClipByValue +:alias: paddle.nn.GradientClipByValue,paddle.nn.clip.GradientClipByValue +:old_api: paddle.fluid.clip.GradientClipByValue + + + + +将输入的多维Tensor :math:`X` 的值限制在 [min, max] 范围。 + +输入的 Tensor 不是从该类里传入, 而是默认会选择 ``Program`` 中全部的梯度,如果 ``need_clip`` 不为None,则可以只选择部分参数进行梯度裁剪。 + +该类需要在初始化 ``optimizer`` 时进行设置后才能生效,可参看 ``optimizer`` 文档(例如: :ref:`cn_api_fluid_optimizer_SGDOptimizer` )。 + +给定一个 Tensor ``t`` ,该操作将它的值压缩到 ``min`` 和 ``max`` 之间 + +- 任何小于 ``min`` 的值都被设置为 ``min`` + +- 任何大于 ``max`` 的值都被设置为 ``max`` + +参数: + - **max** (foat) - 要修剪的最大值。 + - **min** (float,optional) - 要修剪的最小值。如果用户没有设置,将被自动设置为 ``-max`` (此时 ``max`` 必须大于0)。 + - **need_clip** (function, optional) - 类型: 函数。用于指定需要梯度裁剪的参数,该函数接收一个 ``Parameter`` ,返回一个 ``bool`` (True表示需要裁剪,False不需要裁剪)。默认为None,此时会裁剪网络中全部参数。 + +**代码示例1:静态图** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + main_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard( + main_program=main_prog, startup_program=startup_prog): + image = fluid.data( + name='x', shape=[-1, 2], dtype='float32') + predict = fluid.layers.fc(input=image, size=3, act='relu') #可训练参数: fc_0.w.0, fc_0.b.0 + loss = fluid.layers.mean(predict) + + # 裁剪网络中全部参数: + clip = fluid.clip.GradientClipByValue(min=-1, max=1) + + # 仅裁剪参数fc_0.w_0时: + # 为need_clip参数传入一个函数fileter_func,fileter_func接收参数的类型为Parameter,返回类型为bool + # def fileter_func(Parameter): + # # 可以较为方便的通过Parameter.name判断(name可以在fluid.ParamAttr中设置,默认为fc_0.w_0、fc_0.b_0) + # return Parameter.name=="fc_0.w_0" + # clip = fluid.clip.GradientClipByValue(min=-1, max=1, need_clip=fileter_func) + + sgd_optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.1, grad_clip=clip) + sgd_optimizer.minimize(loss) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + x = np.random.uniform(-100, 100, (10, 2)).astype('float32') + exe.run(startup_prog) + out = exe.run(main_prog, feed={'x': x}, fetch_list=loss) + + +**代码示例2:动态图** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.dygraph.Linear(10, 10) #可训练参数: linear_0.w.0, linear_0.b.0 + inputs = fluid.layers.uniform_random([32, 10]).astype('float32') + out = linear(fluid.dygraph.to_variable(inputs)) + loss = fluid.layers.reduce_mean(out) + loss.backward() + + # 裁剪网络中全部参数: + clip = fluid.clip.GradientClipByValue(min=-1, max=1) + + # 仅裁剪参数linear_0.w_0时: + # 为need_clip参数传入一个函数fileter_func,fileter_func接收参数的类型为ParamBase,返回类型为bool + # def fileter_func(ParamBase): + # # 可以通过ParamBase.name判断(name可以在fluid.ParamAttr中设置,默认为linear_0.w_0、linear_0.b_0) + # return ParamBase.name == "linear_0.w_0" + # # 注:linear.weight、linear.bias能分别返回dygraph.Linear层的权重与偏差,可以此来判断 + # return ParamBase.name == linear.weight.name + # clip = fluid.clip.GradientClipByValue(min=-1, max=1, need_clip=fileter_func) + + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=0.1, parameter_list=linear.parameters(), grad_clip=clip) + sgd_optimizer.minimize(loss) + + + diff --git a/doc/fluid/api_cn/clip_cn/set_gradient_clip_cn.rst b/doc/fluid/api_cn/clip_cn/set_gradient_clip_cn.rst index f27853f781f873d188be77ac57e90a9f6352e297..eae01c0c3f58774f874caf19785444c7dae3df64 100644 --- a/doc/fluid/api_cn/clip_cn/set_gradient_clip_cn.rst +++ b/doc/fluid/api_cn/clip_cn/set_gradient_clip_cn.rst @@ -3,16 +3,24 @@ set_gradient_clip ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.clip.set_gradient_clip(clip, param_list=None, program=None) +:api_attr: 声明式编程模式(静态图) + + + +.. warning:: + 此API对位置使用的要求较高,其必须位于组建网络之后, ``minimize`` 之前,因此在未来版本中可能被删除,故不推荐使用。推荐在 ``optimizer`` 初始化时设置梯度裁剪。 + 有三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 如果在 ``optimizer`` 中设置过梯度裁剪,又使用了 ``set_gradient_clip`` ,``set_gradient_clip`` 将不会生效。 + 给指定参数做梯度裁剪。 参数: - - **clip** (BaseGradientClipAttr) - BaseGradientClipAttr子类的实例,如 :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 等,用于描述具体的裁剪方法和属性。 + - **clip** (GradientClipBase) - 梯度裁剪的策略,如 :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 等,用于描述具体的裁剪方法和属性。 - **param_list** (list(Variable),可选) - 需要裁剪的参数列表,可以是参数或参数名称列表。默认值为None,表示裁剪 ``program`` 中的所有参数。 - - **program** (Program,可选) - 参数所在的Program。默认值为None,表示使用 :ref:`cn_api_fluid_default_main_program`。 + - **program** (Program,可选) - 参数所在的Program。默认值为None,表示使用 :ref:`cn_api_fluid_default_main_program` 。 返回: 无。 @@ -59,3 +67,18 @@ set_gradient_clip param_list=[param_var1, param_var2]) sgd = fluid.optimizer.SGD(learning_rate=1e-3) sgd.minimize(loss) + + # network 4: use set_gradient_clip and minimize(grad_clip=clip) together + with fluid.program_guard(fluid.Program(), fluid.Program()): + loss = network() + param_var1 = fluid.default_main_program().global_block().var("fc1_param") + param_var2 = fluid.default_main_program().global_block().var("fc2_param") + clip1 = fluid.clip.GradientClipByValue(min=-1.0, max=1.0) + clip2 = fluid.clip.GradientClipByNorm(clip_norm=1.0) + # 设置梯度裁剪策略:clip1 + fluid.clip.set_gradient_clip(clip1) + + # 设置梯度裁剪策略:clip2 + sgd = fluid.optimizer.SGD(learning_rate=1e-3, grad_clip=clip2) + sgd.minimize(loss) + # 有设置冲突时,set_gradient_clip将不会生效,将以clip2的策略进行梯度裁剪 diff --git a/doc/fluid/api_cn/dataset_cn.rst b/doc/fluid/api_cn/dataset_cn.rst index 076a7ea5e1fcba84eeb71cdeadfa0c3fc3092adf..6ad084eaa9764cd4b5b0c03abaa6e6c011688e89 100644 --- a/doc/fluid/api_cn/dataset_cn.rst +++ b/doc/fluid/api_cn/dataset_cn.rst @@ -1,5 +1,5 @@ ======================= -fluid.dataset +paddle.dataset ======================= diff --git a/doc/fluid/api_cn/dataset_cn/DatasetFactory_cn.rst b/doc/fluid/api_cn/dataset_cn/DatasetFactory_cn.rst index 9641efa1cdb61d4b6eec7a986f8ebaa31ee553ec..901d32c2069c8905031d8f3d9b6abdc89730876a 100644 --- a/doc/fluid/api_cn/dataset_cn/DatasetFactory_cn.rst +++ b/doc/fluid/api_cn/dataset_cn/DatasetFactory_cn.rst @@ -1,34 +1,37 @@ -.. _cn_api_fluid_dataset_DatasetFactory: - -DatasetFactory -------------------------------- - -.. py:class:: paddle.fluid.dataset.DatasetFactory - -DatasetFactory是一个按数据集名称创建数据集的 "工厂",可以创建“QueueDataset”,“InMemoryDataset”或“FileInstantDataset”,默认为“QueueDataset”。 - - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") - -.. py:method:: create_dataset(datafeed_class='QueueDataset') - -创建“QueueDataset”,“InMemoryDataset” 或 “FileInstantDataset”,默认为“QueueDataset”。 - - -参数: - - **datafeed_class** (str) – datafeed类名,为QueueDataset或InMemoryDataset。默认为QueueDataset。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() - - - +.. _cn_api_fluid_dataset_DatasetFactory: + +DatasetFactory +------------------------------- + +.. py:class:: paddle.fluid.dataset.DatasetFactory + + + + +DatasetFactory是一个按数据集名称创建数据集的 "工厂",可以创建“QueueDataset”,“InMemoryDataset”或“FileInstantDataset”,默认为“QueueDataset”。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + +.. py:method:: create_dataset(datafeed_class='QueueDataset') + +创建“QueueDataset”,“InMemoryDataset” 或 “FileInstantDataset”,默认为“QueueDataset”。 + + +参数: + - **datafeed_class** (str) – datafeed类名,为QueueDataset或InMemoryDataset。默认为QueueDataset。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + + + diff --git a/doc/fluid/api_cn/dataset_cn/InMemoryDataset_cn.rst b/doc/fluid/api_cn/dataset_cn/InMemoryDataset_cn.rst index fc2878f1fc81b907125f236cf92d905190fa194f..7699284c681fd87008d3b8ad41db89a93fd8d788 100644 --- a/doc/fluid/api_cn/dataset_cn/InMemoryDataset_cn.rst +++ b/doc/fluid/api_cn/dataset_cn/InMemoryDataset_cn.rst @@ -1,354 +1,357 @@ -.. _cn_api_fluid_dataset_InMemoryDataset: - -InMemoryDataset -------------------------------- - -.. py:class:: paddle.fluid.dataset.InMemoryDataset - -InMemoryDataset会向内存中加载数据并在训练前缓冲数据。此类由DatasetFactory创建。 - -**代码示例**: - -.. code-block:: python - - dataset = paddle.fluid.DatasetFactory().create_dataset(“InMemoryDataset”) - -.. py:method:: set_queue_num(queue_num) - -设置 ``Dataset`` 输出队列数量,训练进程会从队列中获取数据。 - -参数: - - **queue_num** (int) - dataset输出队列数量 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") - dataset.set_queue_num(12) - -.. py:method:: set_fleet_send_batch_size(fleet_send_batch_size) - -设置发送batch的大小 - -参数: - - **fleet_send_batch_size** (int) - 设置发送batch的大小。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") - dataset.set_fleet_send_batch_size(800) - -.. py:method:: set_merge_by_lineid(var_list, erase_duplicate_feas=True, min_merge_size=2, keep_unmerged-ins=True) - -通过样本id来设置合并,一些线id的实例将会在shuffle之后进行合并,你应该在一个data生成器里面解析样本id。 - -参数: - - **var_list** (list) - 可以被合并的特征列表,其中的每一个元素都是一个 ``Variable`` 。一些类特征我们通常不把它们合并为同样的样本id,所以用户应当指定哪个类特征可以被合并。 - - **erase_duplicate_feas** (bool) - 合并的时候是否删除重复的特征值。默认为True。 - - **min_merge_size** (int) - 合并的最小数量。默认为2。 - - **keep_unmerged_ins** (bool) - 是否保留没有合并的样本,比如有着独特id的样本,或者重复id的数量小于 ``min_merge_size`` 的样本。 - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") - dataset.set_merge_by_lineid() - -.. py:method:: load_into_memory() - -向内存中加载数据。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") - filelist = ["a.txt", "b.txt"] - dataset.set_filelist(filelist) - dataset.load_into_memory() - -.. py:method:: preload_into_memory() - -向内存中以异步模式加载数据。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") - filelist = ["a.txt", "b.txt"] - dataset.set_filelist(filelist) - dataset.preload_into_memory() - dataset.wait_preload_done() - -.. py:method:: wait_preload_done() - -等待 ``preload_into_memory`` 完成。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") - filelist = ["a.txt", "b.txt"] - dataset.set_filelist(filelist) - dataset.preload_into_memory() - dataset.wait_preload_done() - -.. py:method:: local_shuffle() - -局域shuffle。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") - filelist = ["a.txt", "b.txt"] - dataset.set_filelist(filelist) - dataset.load_into_memory() - dataset.local_shuffle() - - -.. py:method:: global_shuffle(fleet=None) - -全局shuffle。 - -只能用在分布式模式(单机多进程或多机多进程)中。您如果在分布式模式中运行,应当传递fleet而非None。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") - filelist = ["a.txt", "b.txt"] - dataset.set_filelist(filelist) - dataset.load_into_memory() - dataset.global_shuffle(fleet) - -参数: - - **fleet** (Fleet) – fleet单例。默认为None。 - - -.. py:method:: release_memory() - -当数据不再使用时,释放InMemoryDataset内存数据。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") - filelist = ["a.txt", "b.txt"] - dataset.set_filelist(filelist) - dataset.load_into_memory() - dataset.global_shuffle(fleet) - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) - exe.train_from_dataset(fluid.default_main_program(), dataset) - dataset.release_memory() - -.. py:method:: get_memory_data_size(fleet=None) - -用户可以调用此函数以了解加载进内存后所有workers中的样本数量。 - -.. note:: - 该函数可能会导致性能不佳,因为它具有barrier。 - -参数: - - **fleet** (Fleet) – fleet对象。 - -返回:内存数据的大小。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") - filelist = ["a.txt", "b.txt"] - dataset.set_filelist(filelist) - dataset.load_into_memory() - print dataset.get_memory_data_size(fleet) - - -.. py:method:: get_shuffle_data_size(fleet=None) - -获取shuffle数据大小,用户可以调用此函数以了解局域/全局shuffle后所有workers中的样本数量。 - -.. note:: - 该函数可能会导致局域shuffle性能不佳,因为它具有barrier。但其不影响局域shuffle。 - -参数: - - **fleet** (Fleet) – fleet对象。 - -返回:shuffle数据的大小。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") - filelist = ["a.txt", "b.txt"] - dataset.set_filelist(filelist) - dataset.load_into_memory() - dataset.global_shuffle(fleet) - print dataset.get_shuffle_data_size(fleet) - - -.. py:method:: set_batch_size(batch_size) - -设置batch size。在训练期间生效。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() - dataset.set_batch_size(128) - -参数: - - **batch_size** (int) - batch size - -.. py:method:: set_fea_eval(record_candidate_size, fea_eval=True) - -设置特征打乱特征验证模式,来修正特征level的重要性, 特征打乱需要 ``fea_eval`` 被设置为True。 - -参数: - - **record_candidate_size** (int) - 打乱一个特征的候选实例大小 - - **fea_eval** (bool) - 是否设置特征验证模式来打乱特征,默认为True。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset(“InMemoryDataset”) - dataset.set_fea_eval(1000000, True) - -.. py:method:: desc() - -为 ``DataFeedDesc`` 返回一个缓存信息。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() - print(dataset.desc()) - -返回:一个字符串信息 - -.. py:method:: set_filelist(filelist) - -在当前的worker中设置文件列表。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() - dataset.set_filelist(["a.txt", "b.txt"]) - -参数: - - **filelist** (list) - 文件列表 - -.. py:method:: set_hdfs_config(fs_name, fs_ugi) - -设置hdfs配置:fs名称与ugi。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() - dataset.set_hdfs_config("my_fs_name", "my_fs_ugi") - -参数: - - **fs_name** (str) - fs名称 - - **fs_ugi** (str) - fs ugi - -.. py:method:: set_pipe_command(pipe_coommand) - -在当前的 ``dataset`` 中设置pipe命令。pipe命令只能使用UNIX的pipe命令 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() - dataset.set_pipe_command("python my_script.py") - -参数: - - **pipe_command** (str) - pipe命令 - -.. py:method:: set_thread(thread_num) - -设置进程数量,等于readers的数量。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() - dataset.set_thread(12) - -参数: - - **thread_num** (int) - 进程数量 - -.. py:method:: set_use_var(var_list) - -设置将要使用的 ``Variable`` 。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() - dataset.set_use_var([data, label]) - -参数: - - **var_list** (list) - variable 列表 - -.. py:method:: slots_shuffle(slots) - -该方法是在特征层次上的一个打乱方法,经常被用在有着较大缩放率实例的稀疏矩阵上,为了比较metric,比如auc,在一个或者多个有着baseline的特征上做特征打乱来验证特征level的重要性。 - -参数: - - **slots** (list[string]) - 要打乱特征的集合 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset(“InMemoryDataset”) - dataset.set_merge_by_lineid() - #支持slot 0 - dataset.slots_shuffle([‘0’]) - - - +.. _cn_api_fluid_dataset_InMemoryDataset: + +InMemoryDataset +------------------------------- + +.. py:class:: paddle.fluid.dataset.InMemoryDataset + + + + +InMemoryDataset会向内存中加载数据并在训练前缓冲数据。此类由DatasetFactory创建。 + +**代码示例**: + +.. code-block:: python + + dataset = paddle.fluid.DatasetFactory().create_dataset(“InMemoryDataset”) + +.. py:method:: set_queue_num(queue_num) + +设置 ``Dataset`` 输出队列数量,训练进程会从队列中获取数据。 + +参数: + - **queue_num** (int) - dataset输出队列数量 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + dataset.set_queue_num(12) + +.. py:method:: set_fleet_send_batch_size(fleet_send_batch_size) + +设置发送batch的大小 + +参数: + - **fleet_send_batch_size** (int) - 设置发送batch的大小。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + dataset.set_fleet_send_batch_size(800) + +.. py:method:: set_merge_by_lineid(var_list, erase_duplicate_feas=True, min_merge_size=2, keep_unmerged-ins=True) + +通过样本id来设置合并,一些线id的实例将会在shuffle之后进行合并,你应该在一个data生成器里面解析样本id。 + +参数: + - **var_list** (list) - 可以被合并的特征列表,其中的每一个元素都是一个 ``Variable`` 。一些类特征我们通常不把它们合并为同样的样本id,所以用户应当指定哪个类特征可以被合并。 + - **erase_duplicate_feas** (bool) - 合并的时候是否删除重复的特征值。默认为True。 + - **min_merge_size** (int) - 合并的最小数量。默认为2。 + - **keep_unmerged_ins** (bool) - 是否保留没有合并的样本,比如有着独特id的样本,或者重复id的数量小于 ``min_merge_size`` 的样本。 + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + dataset.set_merge_by_lineid() + +.. py:method:: load_into_memory() + +向内存中加载数据。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + filelist = ["a.txt", "b.txt"] + dataset.set_filelist(filelist) + dataset.load_into_memory() + +.. py:method:: preload_into_memory() + +向内存中以异步模式加载数据。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + filelist = ["a.txt", "b.txt"] + dataset.set_filelist(filelist) + dataset.preload_into_memory() + dataset.wait_preload_done() + +.. py:method:: wait_preload_done() + +等待 ``preload_into_memory`` 完成。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + filelist = ["a.txt", "b.txt"] + dataset.set_filelist(filelist) + dataset.preload_into_memory() + dataset.wait_preload_done() + +.. py:method:: local_shuffle() + +局域shuffle。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + filelist = ["a.txt", "b.txt"] + dataset.set_filelist(filelist) + dataset.load_into_memory() + dataset.local_shuffle() + + +.. py:method:: global_shuffle(fleet=None) + +全局shuffle。 + +只能用在分布式模式(单机多进程或多机多进程)中。您如果在分布式模式中运行,应当传递fleet而非None。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + filelist = ["a.txt", "b.txt"] + dataset.set_filelist(filelist) + dataset.load_into_memory() + dataset.global_shuffle(fleet) + +参数: + - **fleet** (Fleet) – fleet单例。默认为None。 + + +.. py:method:: release_memory() + +当数据不再使用时,释放InMemoryDataset内存数据。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + filelist = ["a.txt", "b.txt"] + dataset.set_filelist(filelist) + dataset.load_into_memory() + dataset.global_shuffle(fleet) + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + exe.train_from_dataset(fluid.default_main_program(), dataset) + dataset.release_memory() + +.. py:method:: get_memory_data_size(fleet=None) + +用户可以调用此函数以了解加载进内存后所有workers中的样本数量。 + +.. note:: + 该函数可能会导致性能不佳,因为它具有barrier。 + +参数: + - **fleet** (Fleet) – fleet对象。 + +返回:内存数据的大小。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + filelist = ["a.txt", "b.txt"] + dataset.set_filelist(filelist) + dataset.load_into_memory() + print dataset.get_memory_data_size(fleet) + + +.. py:method:: get_shuffle_data_size(fleet=None) + +获取shuffle数据大小,用户可以调用此函数以了解局域/全局shuffle后所有workers中的样本数量。 + +.. note:: + 该函数可能会导致局域shuffle性能不佳,因为它具有barrier。但其不影响局域shuffle。 + +参数: + - **fleet** (Fleet) – fleet对象。 + +返回:shuffle数据的大小。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + filelist = ["a.txt", "b.txt"] + dataset.set_filelist(filelist) + dataset.load_into_memory() + dataset.global_shuffle(fleet) + print dataset.get_shuffle_data_size(fleet) + + +.. py:method:: set_batch_size(batch_size) + +设置batch size。在训练期间生效。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_batch_size(128) + +参数: + - **batch_size** (int) - batch size + +.. py:method:: set_fea_eval(record_candidate_size, fea_eval=True) + +设置特征打乱特征验证模式,来修正特征level的重要性, 特征打乱需要 ``fea_eval`` 被设置为True。 + +参数: + - **record_candidate_size** (int) - 打乱一个特征的候选实例大小 + - **fea_eval** (bool) - 是否设置特征验证模式来打乱特征,默认为True。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset(“InMemoryDataset”) + dataset.set_fea_eval(1000000, True) + +.. py:method:: desc() + +为 ``DataFeedDesc`` 返回一个缓存信息。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + print(dataset.desc()) + +返回:一个字符串信息 + +.. py:method:: set_filelist(filelist) + +在当前的worker中设置文件列表。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_filelist(["a.txt", "b.txt"]) + +参数: + - **filelist** (list) - 文件列表 + +.. py:method:: set_hdfs_config(fs_name, fs_ugi) + +设置hdfs配置:fs名称与ugi。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_hdfs_config("my_fs_name", "my_fs_ugi") + +参数: + - **fs_name** (str) - fs名称 + - **fs_ugi** (str) - fs ugi + +.. py:method:: set_pipe_command(pipe_coommand) + +在当前的 ``dataset`` 中设置pipe命令。pipe命令只能使用UNIX的pipe命令 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_pipe_command("python my_script.py") + +参数: + - **pipe_command** (str) - pipe命令 + +.. py:method:: set_thread(thread_num) + +设置进程数量,等于readers的数量。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_thread(12) + +参数: + - **thread_num** (int) - 进程数量 + +.. py:method:: set_use_var(var_list) + +设置将要使用的 ``Variable`` 。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_use_var([data, label]) + +参数: + - **var_list** (list) - variable 列表 + +.. py:method:: slots_shuffle(slots) + +该方法是在特征层次上的一个打乱方法,经常被用在有着较大缩放率实例的稀疏矩阵上,为了比较metric,比如auc,在一个或者多个有着baseline的特征上做特征打乱来验证特征level的重要性。 + +参数: + - **slots** (list[string]) - 要打乱特征的集合 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset(“InMemoryDataset”) + dataset.set_merge_by_lineid() + #支持slot 0 + dataset.slots_shuffle([‘0’]) + + + diff --git a/doc/fluid/api_cn/dataset_cn/QueueDataset_cn.rst b/doc/fluid/api_cn/dataset_cn/QueueDataset_cn.rst index 2046dcb2b9d47fc46629874949035b5e4fceb487..298bf2bb4ecc8b356327af82570e7bb07e2e2907 100644 --- a/doc/fluid/api_cn/dataset_cn/QueueDataset_cn.rst +++ b/doc/fluid/api_cn/dataset_cn/QueueDataset_cn.rst @@ -1,185 +1,188 @@ -.. _cn_api_fluid_dataset_QueueDataset: - -QueueDataset -------------------------------- - -.. py:class:: paddle.fluid.dataset.QueueDataset - -流式处理数据。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("QueueDataset") - - - -.. py:method:: local_shuffle() - -局域shuffle数据 - -QueueDataset中不支持局域shuffle,可能抛出NotImplementedError - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("QueueDataset") - dataset.local_shuffle() - - - -.. py:method:: global_shuffle(fleet=None) - -全局shuffle数据 - -QueueDataset中不支持全局shuffle,可能抛出NotImplementedError - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet - dataset = fluid.DatasetFactory().create_dataset("QueueDataset") - dataset.global_shuffle(fleet) - -.. py:method:: desc() - -为 ``DataFeedDesc`` 返回一个缓存信息。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() - print(dataset.desc()) - -返回:一个字符串信息 - -.. py:method:: set_batch_size(batch_size) - -设置batch size。在训练期间生效。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() - dataset.set_batch_size(128) - -参数: - - **batch_size** (int) - batch size - -.. py:method:: set_fea_eval(record_candidate_size,fea_eval) - -参数: - - **record_candidate_size** (int) - 打乱一个特征的候选实例大小 - - **fea_eval** (bool) - 是否设置特征验证模式来打乱特征,默认为True。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset(“InMemoryDataset”) - dataset.set_fea_eval(1000000, True) - -.. py:method:: set_filelist(filelist) - -在当前的worker中设置文件列表。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() - dataset.set_filelist(["a.txt", "b.txt"]) - -参数: - - **filelist** (list) - 文件列表 - -.. py:method:: set_hdfs_config(fs_name, fs_ugi) - -设置hdfs配置:fs名称与ugi。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() - dataset.set_hdfs_config("my_fs_name", "my_fs_ugi") - -参数: - - **fs_name** (str) - fs名称 - - **fs_ugi** (str) - fs ugi - -.. py:method:: set_pipe_command(pipe_coommand) - -在当前的 ``dataset`` 中设置pipe命令。pipe命令只能使用UNIX的pipe命令 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() - dataset.set_pipe_command("python my_script.py") - -参数: - - **pipe_command** (str) - pipe命令 - -.. py:method:: set_thread(thread_num) - -设置进程数量,等于readers的数量。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() - dataset.set_thread(12) - -参数: - - **thread_num** (int) - 进程数量 - -.. py:method:: set_use_var(var_list) - -设置将要使用的 ``Variable`` 。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() - dataset.set_use_var([data, label]) - -参数: - - **var_list** (list) - variable 列表 - -.. py:method:: slots_shuffle(slots) - -该方法是在特征层次上的一个打乱方法,经常被用在有着较大缩放率实例的稀疏矩阵上,为了比较metric,比如auc,在一个或者多个有着baseline的特征上做特征打乱来验证特征level的重要性。 - -参数: - - **slots** (list[string]) - 要打乱特征的集合 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset(“InMemoryDataset”) - dataset.set_merge_by_lineid() - #支持slot 0 - dataset.slots_shuffle([‘0’]) - +.. _cn_api_fluid_dataset_QueueDataset: + +QueueDataset +------------------------------- + +.. py:class:: paddle.fluid.dataset.QueueDataset + + + + +流式处理数据。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset("QueueDataset") + + + +.. py:method:: local_shuffle() + +局域shuffle数据 + +QueueDataset中不支持局域shuffle,可能抛出NotImplementedError + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset("QueueDataset") + dataset.local_shuffle() + + + +.. py:method:: global_shuffle(fleet=None) + +全局shuffle数据 + +QueueDataset中不支持全局shuffle,可能抛出NotImplementedError + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet + dataset = fluid.DatasetFactory().create_dataset("QueueDataset") + dataset.global_shuffle(fleet) + +.. py:method:: desc() + +为 ``DataFeedDesc`` 返回一个缓存信息。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + print(dataset.desc()) + +返回:一个字符串信息 + +.. py:method:: set_batch_size(batch_size) + +设置batch size。在训练期间生效。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_batch_size(128) + +参数: + - **batch_size** (int) - batch size + +.. py:method:: set_fea_eval(record_candidate_size,fea_eval) + +参数: + - **record_candidate_size** (int) - 打乱一个特征的候选实例大小 + - **fea_eval** (bool) - 是否设置特征验证模式来打乱特征,默认为True。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset(“InMemoryDataset”) + dataset.set_fea_eval(1000000, True) + +.. py:method:: set_filelist(filelist) + +在当前的worker中设置文件列表。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_filelist(["a.txt", "b.txt"]) + +参数: + - **filelist** (list) - 文件列表 + +.. py:method:: set_hdfs_config(fs_name, fs_ugi) + +设置hdfs配置:fs名称与ugi。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_hdfs_config("my_fs_name", "my_fs_ugi") + +参数: + - **fs_name** (str) - fs名称 + - **fs_ugi** (str) - fs ugi + +.. py:method:: set_pipe_command(pipe_coommand) + +在当前的 ``dataset`` 中设置pipe命令。pipe命令只能使用UNIX的pipe命令 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_pipe_command("python my_script.py") + +参数: + - **pipe_command** (str) - pipe命令 + +.. py:method:: set_thread(thread_num) + +设置进程数量,等于readers的数量。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_thread(12) + +参数: + - **thread_num** (int) - 进程数量 + +.. py:method:: set_use_var(var_list) + +设置将要使用的 ``Variable`` 。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_use_var([data, label]) + +参数: + - **var_list** (list) - variable 列表 + +.. py:method:: slots_shuffle(slots) + +该方法是在特征层次上的一个打乱方法,经常被用在有着较大缩放率实例的稀疏矩阵上,为了比较metric,比如auc,在一个或者多个有着baseline的特征上做特征打乱来验证特征level的重要性。 + +参数: + - **slots** (list[string]) - 要打乱特征的集合 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset(“InMemoryDataset”) + dataset.set_merge_by_lineid() + #支持slot 0 + dataset.slots_shuffle([‘0’]) + diff --git a/doc/fluid/api_cn/declarative_cn.rst b/doc/fluid/api_cn/declarative_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..91a1c40516b91d21fa735f5b18b17fafd5d5dba6 --- /dev/null +++ b/doc/fluid/api_cn/declarative_cn.rst @@ -0,0 +1,28 @@ +======================= +paddle.declarative +======================= + +.. toctree:: + :maxdepth: 1 + + declarative_cn/batch_norm_cn.rst + declarative_cn/bilinear_tensor_product_cn.rst + declarative_cn/conv2d_cn.rst + declarative_cn/conv2d_transpose_cn.rst + declarative_cn/conv3d_cn.rst + declarative_cn/conv3d_transpose_cn.rst + declarative_cn/create_parameter_cn.rst + declarative_cn/crf_decoding_cn.rst + declarative_cn/data_norm_cn.rst + declarative_cn/deformable_conv_cn.rst + declarative_cn/embedding_cn.rst + declarative_cn/fc_cn.rst + declarative_cn/group_norm_cn.rst + declarative_cn/hsigmoid_cn.rst + declarative_cn/instance_norm_cn.rst + declarative_cn/layer_norm_cn.rst + declarative_cn/multi_box_head_cn.rst + declarative_cn/nce_cn.rst + declarative_cn/prelu_cn.rst + declarative_cn/row_conv_cn.rst + declarative_cn/spectral_norm_cn.rst diff --git a/doc/fluid/api_cn/declarative_cn/batch_norm_cn.rst b/doc/fluid/api_cn/declarative_cn/batch_norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..aa4fde30267813a7f4a750ca0b8ba77cbc319e03 --- /dev/null +++ b/doc/fluid/api_cn/declarative_cn/batch_norm_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_declarative_cn_batch_norm: + +batch_norm +------------------------------- +:doc_source: paddle.fluid.layers.batch_norm + + diff --git a/doc/fluid/api_cn/declarative_cn/bilinear_tensor_product_cn.rst b/doc/fluid/api_cn/declarative_cn/bilinear_tensor_product_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..18c59e8a49bbaab3d7a2878b51e0eee067c06348 --- /dev/null +++ b/doc/fluid/api_cn/declarative_cn/bilinear_tensor_product_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_declarative_cn_bilinear_tensor_product: + +bilinear_tensor_product +------------------------------- +:doc_source: paddle.fluid.layers.bilinear_tensor_product + + diff --git a/doc/fluid/api_cn/declarative_cn/conv2d_cn.rst b/doc/fluid/api_cn/declarative_cn/conv2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b64c16df0ff49223b470a1ebb5819f8590ba9f79 --- /dev/null +++ b/doc/fluid/api_cn/declarative_cn/conv2d_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_declarative_cn_conv2d: + +conv2d +------------------------------- +:doc_source: paddle.fluid.layers.conv2d + + diff --git a/doc/fluid/api_cn/declarative_cn/conv2d_transpose_cn.rst b/doc/fluid/api_cn/declarative_cn/conv2d_transpose_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1f02331663cc0662ba2db3419752111a0de8dc07 --- /dev/null +++ b/doc/fluid/api_cn/declarative_cn/conv2d_transpose_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_declarative_cn_conv2d_transpose: + +conv2d_transpose +------------------------------- +:doc_source: paddle.fluid.layers.conv2d_transpose + + diff --git a/doc/fluid/api_cn/declarative_cn/conv3d_cn.rst b/doc/fluid/api_cn/declarative_cn/conv3d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..271dd69c77d37598e0828b0c32f231dee4e6568e --- /dev/null +++ b/doc/fluid/api_cn/declarative_cn/conv3d_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_declarative_cn_conv3d: + +conv3d +------------------------------- +:doc_source: paddle.fluid.layers.conv3d + + diff --git a/doc/fluid/api_cn/declarative_cn/conv3d_transpose_cn.rst b/doc/fluid/api_cn/declarative_cn/conv3d_transpose_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bcde71e60369226cd1ca8a4f230e11034c16a18f --- /dev/null +++ b/doc/fluid/api_cn/declarative_cn/conv3d_transpose_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_declarative_cn_conv3d_transpose: + +conv3d_transpose +------------------------------- +:doc_source: paddle.fluid.layers.conv3d_transpose + + diff --git a/doc/fluid/api_cn/declarative_cn/create_parameter_cn.rst b/doc/fluid/api_cn/declarative_cn/create_parameter_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..489beae017352305b3a20ec705e9310e8ae15250 --- /dev/null +++ b/doc/fluid/api_cn/declarative_cn/create_parameter_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_declarative_cn_create_parameter: + +create_parameter +------------------------------- +:doc_source: paddle.fluid.layers.create_parameter + + diff --git a/doc/fluid/api_cn/declarative_cn/crf_decoding_cn.rst b/doc/fluid/api_cn/declarative_cn/crf_decoding_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e8194b5b0bd48d4fb52edda70023ce70cce8096c --- /dev/null +++ b/doc/fluid/api_cn/declarative_cn/crf_decoding_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_declarative_cn_crf_decoding: + +crf_decoding +------------------------------- +:doc_source: paddle.fluid.layers.crf_decoding + + diff --git a/doc/fluid/api_cn/declarative_cn/data_norm_cn.rst b/doc/fluid/api_cn/declarative_cn/data_norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a52b0ab1d612c86a06972b9a40787c54469d8829 --- /dev/null +++ b/doc/fluid/api_cn/declarative_cn/data_norm_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_declarative_cn_data_norm: + +data_norm +------------------------------- +:doc_source: paddle.fluid.layers.data_norm + + diff --git a/doc/fluid/api_cn/declarative_cn/deformable_conv_cn.rst b/doc/fluid/api_cn/declarative_cn/deformable_conv_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4008e90948f7b34aa86b26d79d2fe54ed885d929 --- /dev/null +++ b/doc/fluid/api_cn/declarative_cn/deformable_conv_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_declarative_cn_deformable_conv: + +deformable_conv +------------------------------- +:doc_source: paddle.fluid.layers.deformable_conv + + diff --git a/doc/fluid/api_cn/declarative_cn/embedding_cn.rst b/doc/fluid/api_cn/declarative_cn/embedding_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..296102ffed1a3c21afe617836cf38171c4dc9cc7 --- /dev/null +++ b/doc/fluid/api_cn/declarative_cn/embedding_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_declarative_cn_embedding: + +embedding +------------------------------- +:doc_source: paddle.fluid.input.embedding + + diff --git a/doc/fluid/api_cn/declarative_cn/fc_cn.rst b/doc/fluid/api_cn/declarative_cn/fc_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a0aa6224d58fb79ace46565d6c3f1ccc9e1f7fdc --- /dev/null +++ b/doc/fluid/api_cn/declarative_cn/fc_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_declarative_cn_fc: + +fc +------------------------------- +:doc_source: paddle.fluid.layers.fc + + diff --git a/doc/fluid/api_cn/declarative_cn/group_norm_cn.rst b/doc/fluid/api_cn/declarative_cn/group_norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7e49fc62ce63bdd4fb738f26299a6fe09d87d798 --- /dev/null +++ b/doc/fluid/api_cn/declarative_cn/group_norm_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_declarative_cn_group_norm: + +group_norm +------------------------------- +:doc_source: paddle.fluid.layers.group_norm + + diff --git a/doc/fluid/api_cn/declarative_cn/hsigmoid_cn.rst b/doc/fluid/api_cn/declarative_cn/hsigmoid_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9fe20b0cdc08acacedec20998ad6640957539200 --- /dev/null +++ b/doc/fluid/api_cn/declarative_cn/hsigmoid_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_declarative_cn_hsigmoid: + +hsigmoid +------------------------------- +:doc_source: paddle.fluid.layers.hsigmoid + + diff --git a/doc/fluid/api_cn/declarative_cn/instance_norm_cn.rst b/doc/fluid/api_cn/declarative_cn/instance_norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f21555863014924fabf53a1bcb2d8211140edd1f --- /dev/null +++ b/doc/fluid/api_cn/declarative_cn/instance_norm_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_declarative_cn_instance_norm: + +instance_norm +------------------------------- +:doc_source: paddle.fluid.layers.instance_norm + + diff --git a/doc/fluid/api_cn/declarative_cn/layer_norm_cn.rst b/doc/fluid/api_cn/declarative_cn/layer_norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..71f6ec2cc327ff06a1d5dcd44db2068531c0f1b2 --- /dev/null +++ b/doc/fluid/api_cn/declarative_cn/layer_norm_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_declarative_cn_layer_norm: + +layer_norm +------------------------------- +:doc_source: paddle.fluid.layers.layer_norm + + diff --git a/doc/fluid/api_cn/declarative_cn/multi_box_head_cn.rst b/doc/fluid/api_cn/declarative_cn/multi_box_head_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5d942c82de8b45b511fdc00869d60c4c886b0a36 --- /dev/null +++ b/doc/fluid/api_cn/declarative_cn/multi_box_head_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_declarative_cn_multi_box_head: + +multi_box_head +------------------------------- +:doc_source: paddle.fluid.layers.multi_box_head + + diff --git a/doc/fluid/api_cn/declarative_cn/nce_cn.rst b/doc/fluid/api_cn/declarative_cn/nce_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b6d4bbcbe292ef88334cad9094d767067265eccd --- /dev/null +++ b/doc/fluid/api_cn/declarative_cn/nce_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_declarative_cn_nce: + +nce +------------------------------- +:doc_source: paddle.fluid.layers.nce + + diff --git a/doc/fluid/api_cn/declarative_cn/prelu_cn.rst b/doc/fluid/api_cn/declarative_cn/prelu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..82fef8c6e06b84e06665e247cf88025178857da7 --- /dev/null +++ b/doc/fluid/api_cn/declarative_cn/prelu_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_declarative_cn_prelu: + +prelu +------------------------------- +:doc_source: paddle.fluid.layers.prelu + + diff --git a/doc/fluid/api_cn/declarative_cn/row_conv_cn.rst b/doc/fluid/api_cn/declarative_cn/row_conv_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fd07da80492eb4dab754a2689af24badccbd0725 --- /dev/null +++ b/doc/fluid/api_cn/declarative_cn/row_conv_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_declarative_cn_row_conv: + +row_conv +------------------------------- +:doc_source: paddle.fluid.layers.row_conv + + diff --git a/doc/fluid/api_cn/declarative_cn/spectral_norm_cn.rst b/doc/fluid/api_cn/declarative_cn/spectral_norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..17ffd21a9b95131283ffb99a50a86073474d4798 --- /dev/null +++ b/doc/fluid/api_cn/declarative_cn/spectral_norm_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_declarative_cn_spectral_norm: + +spectral_norm +------------------------------- +:doc_source: paddle.fluid.layers.spectral_norm + + diff --git a/doc/fluid/api_cn/distributed_cn.rst b/doc/fluid/api_cn/distributed_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ff75e9ce3331c65f8313ac160d8750e313527231 --- /dev/null +++ b/doc/fluid/api_cn/distributed_cn.rst @@ -0,0 +1,22 @@ +======================= +paddle.distributed +======================= + + + + +.. toctree:: + :maxdepth: 1 + + distributed_cn/all_gather_cn.rst + distributed_cn/all_reduce_cn.rst + distributed_cn/barrier_cn.rst + distributed_cn/broadcast_cn.rst + distributed_cn/get_rank_cn.rst + distributed_cn/get_world_size_cn.rst + distributed_cn/init_parallel_env_cn.rst + distributed_cn/ParallelEnv_cn.rst + distributed_cn/prepare_context_cn.rst + distributed_cn/reduce_cn.rst + distributed_cn/scatter_cn.rst + distributed_cn/spawn_cn.rst diff --git a/doc/fluid/api_cn/distributed_cn/ParallelEnv_cn.rst b/doc/fluid/api_cn/distributed_cn/ParallelEnv_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c56a483466aa81edc4910df07c8078bdccd095e1 --- /dev/null +++ b/doc/fluid/api_cn/distributed_cn/ParallelEnv_cn.rst @@ -0,0 +1,5 @@ +.. _cn_api_distributed_ParallelEnv: + +ParallelEnv +------------------------------- +:doc_source: paddle.fluid.dygraph.parallel.ParallelEnv \ No newline at end of file diff --git a/doc/fluid/api_cn/distributed_cn/all_gather_cn.rst b/doc/fluid/api_cn/distributed_cn/all_gather_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6cbe19db63cad604c75657c9da83b18951475fdc --- /dev/null +++ b/doc/fluid/api_cn/distributed_cn/all_gather_cn.rst @@ -0,0 +1,44 @@ +.. _cn_api_distributed_all_gather: + +all_gather +------------------------------- + + +.. py:function:: paddle.distributed.all_gather(tensor_list, tensor, group=0) + +进程组内所有进程的指定tensor进行聚合操作,并返回给所有进程聚合的结果。 + +参数 +::::::::: + - tensor_list (list) - 操作的输出Tensor列表。列表中的每个元素均为Tensor,每个Tensor的数据类型为:float16、float32、float64、int32、int64。 + - tensor (Tensor) - 操作的输入Tensor。Tensor的数据类型为:float16、float32、float64、int32、int64。 + - group (int,可选) - 工作的进程组编号,默认为0。 + +返回 +::::::::: +无 + +代码示例 +::::::::: +.. code-block:: python + + import numpy as np + import paddle + from paddle.distributed import init_parallel_env + + paddle.disable_static() + paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) + init_parallel_env() + tensor_list = [] + if paddle.distributed.ParallelEnv().local_rank == 0: + np_data1 = np.array([[4, 5, 6], [4, 5, 6]]) + np_data2 = np.array([[4, 5, 6], [4, 5, 6]]) + data1 = paddle.to_tensor(np_data1) + data2 = paddle.to_tensor(np_data2) + paddle.distributed.all_gather(tensor_list, data1) + else: + np_data1 = np.array([[1, 2, 3], [1, 2, 3]]) + np_data2 = np.array([[1, 2, 3], [1, 2, 3]]) + data1 = paddle.to_tensor(np_data1) + data2 = paddle.to_tensor(np_data2) + paddle.distributed.all_gather(tensor_list, data2) diff --git a/doc/fluid/api_cn/distributed_cn/all_reduce_cn.rst b/doc/fluid/api_cn/distributed_cn/all_reduce_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fc183c32190520eafa94e715dfc1a4822f80e54a --- /dev/null +++ b/doc/fluid/api_cn/distributed_cn/all_reduce_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_distributed_all_reduce: + +all_reduce +------------------------------- + + +.. py:function:: paddle.distributed.all_reduce(tensor, op=ReduceOp.SUM, group=0) + +进程组内所有进程的指定tensor进行归约操作,并返回给所有进程归约的结果。 + +参数 +::::::::: + - tensor (Tensor) - 操作的输入Tensor,同时也会将归约结果返回至此Tensor中。Tensor的数据类型为:float16、float32、float64、int32、int64。 + - op (ReduceOp.SUM|ReduceOp.MAX|ReduceOp.Min|ReduceOp.PROD,可选) - 归约的具体操作,比如求和,取最大值,取最小值和求乘积,默认为求和归约。 + - group (int,可选) - 工作的进程组编号,默认为0。 + +返回 +::::::::: +无 + +代码示例 +::::::::: +.. code-block:: python + + import numpy as np + import paddle + from paddle.distributed import ReduceOp + from paddle.distributed import init_parallel_env + + paddle.disable_static() + paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) + init_parallel_env() + if paddle.distributed.ParallelEnv().local_rank == 0: + np_data = np.array([[4, 5, 6], [4, 5, 6]]) + else: + np_data = np.array([[1, 2, 3], [1, 2, 3]]) + data = paddle.to_tensor(np_data) + paddle.distributed.all_reduce(data) + out = data.numpy() + # [[5, 7, 9], [5, 7, 9]] diff --git a/doc/fluid/api_cn/distributed_cn/barrier_cn.rst b/doc/fluid/api_cn/distributed_cn/barrier_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fb63526cfd0163cec91396144b2e0c3c0f9beace --- /dev/null +++ b/doc/fluid/api_cn/distributed_cn/barrier_cn.rst @@ -0,0 +1,29 @@ +.. _cn_api_distributed_barrier: + +barrier +------------------------------- + + +.. py:function:: paddle.distributed.barrier(group=0) + +同步进程组内的所有进程。 + +参数 +::::::::: + - group (int,可选) - 工作的进程组编号,默认为0。 + +返回 +::::::::: +无 + +代码示例 +::::::::: +.. code-block:: python + + import paddle + from paddle.distributed import init_parallel_env + + paddle.disable_static() + paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) + init_parallel_env() + paddle.distributed.barrier() diff --git a/doc/fluid/api_cn/distributed_cn/broadcast_cn.rst b/doc/fluid/api_cn/distributed_cn/broadcast_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..33e653b56648ff6f97b64b19fffcf64bca4a989c --- /dev/null +++ b/doc/fluid/api_cn/distributed_cn/broadcast_cn.rst @@ -0,0 +1,39 @@ +.. _cn_api_distributed_broadcast: + +broadcast +------------------------------- + + +.. py:function:: paddle.distributed.broadcast(tensor, src, group=0) + +广播一个Tensor给其他所有进程 + +参数 +::::::::: + - tensor (Tensor) - 如果当前进程编号是源,那么这个Tensor变量将被发送给其他进程,否则这个Tensor将接收源发送过来的数据。Tensor的数据类型为:float16、float32、float64、int32、int64。 + - src (int) - 发送源的进程编号。 + - group (int,可选) - 工作的进程组编号,默认为0。 + +返回 +::::::::: +无 + +代码示例 +::::::::: +.. code-block:: python + + import numpy as np + import paddle + from paddle.distributed import init_parallel_env + + paddle.disable_static() + paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) + init_parallel_env() + if paddle.distributed.ParallelEnv().local_rank == 0: + np_data = np.array([[4, 5, 6], [4, 5, 6]]) + else: + np_data = np.array([[1, 2, 3], [1, 2, 3]]) + data = paddle.to_tensor(np_data) + paddle.distributed.broadcast(data, 1) + out = data.numpy() + # [[1, 2, 3], [1, 2, 3]] diff --git a/doc/fluid/api_cn/distributed_cn/get_rank_cn.rst b/doc/fluid/api_cn/distributed_cn/get_rank_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..75ad8fc79baa6a560fe956799c1a00bc9d67376d --- /dev/null +++ b/doc/fluid/api_cn/distributed_cn/get_rank_cn.rst @@ -0,0 +1,25 @@ +.. _cn_api_distributed_get_rank: + +get_rank +---------- + +.. py:function:: paddle.distributed.get_rank() + +返回当前进程的rank。 + +当前进程rank的值等于环境变量 ``PADDLE_TRAINER_ID`` 的值,默认值为0。 + +返回 +::::::::: +(int) 当前进程的rank。 + +代码示例 +::::::::: +.. code-block:: python + + import paddle + import paddle.distributed as dist + + # execute this command in terminal: export PADDLE_TRAINER_ID=0 + print("The rank is %d" % dist.get_rank()) + # The rank is 0 diff --git a/doc/fluid/api_cn/distributed_cn/get_world_size_cn.rst b/doc/fluid/api_cn/distributed_cn/get_world_size_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..08342de3c1f44b96762eefb1d9ae96918112d9dd --- /dev/null +++ b/doc/fluid/api_cn/distributed_cn/get_world_size_cn.rst @@ -0,0 +1,25 @@ +.. _cn_api_distributed_get_world_size: + +get_world_size +---------------- + +.. py:function:: paddle.distributed.get_world_size() + +返回参与当前任务的进程数。 + +当前进程数等于环境变量 ``PADDLE_TRAINERS_NUM`` 的值,默认值为1。 + +返回 +::::::::: +(int) 参与任务的进程数。 + +代码示例 +::::::::: +.. code-block:: python + + import paddle + import paddle.distributed as dist + + # execute this command in terminal: export PADDLE_TRAINERS_NUM=4 + print("The world_size is %d" % dist.get_world_size()) + # The world_size is 4 diff --git a/doc/fluid/api_cn/distributed_cn/init_parallel_env_cn.rst b/doc/fluid/api_cn/distributed_cn/init_parallel_env_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..eafe9f10f0548931828798b28896ef59f211eefe --- /dev/null +++ b/doc/fluid/api_cn/distributed_cn/init_parallel_env_cn.rst @@ -0,0 +1,64 @@ +.. _cn_api_distributed_init_parallel_env: + +init_parallel_env +----------------- + +.. py:function:: paddle.distributed.init_parallel_env() + +初始化动态图模式下的并行训练环境。 + +.. note:: + 目前仅支持初始化GPU训练环境,使用NCCL进行通信。 + +返回 +::::::::: +无 + +代码示例 +::::::::: +.. code-block:: python + + import paddle + import paddle.nn as nn + import paddle.optimizer as opt + import paddle.distributed as dist + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear1 = nn.Linear(10, 10) + self._linear2 = nn.Linear(10, 1) + + def forward(self, x): + return self._linear2(self._linear1(x)) + + def train(): + # 1. enable dynamic mode + paddle.disable_static() + + # 2. initialize parallel environment + dist.init_parallel_env() + + # 3. create data parallel layer & optimizer + layer = LinearNet() + dp_layer = paddle.DataParallel(layer) + + loss_fn = nn.MSELoss() + adam = opt.Adam( + learning_rate=0.001, parameters=dp_layer.parameters()) + + # 4. run layer + inputs = paddle.randn([10, 10], 'float32') + outputs = dp_layer(inputs) + labels = paddle.randn([10, 1], 'float32') + loss = loss_fn(outputs, labels) + + loss = dp_layer.scale_loss(loss) + loss.backward() + dp_layer.apply_collective_grads() + + adam.step() + adam.clear_grad() + + if __name__ == '__main__': + dist.spawn(train) diff --git a/doc/fluid/api_cn/distributed_cn/prepare_context_cn.rst b/doc/fluid/api_cn/distributed_cn/prepare_context_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..656ff3c3498c9adcb080dcd45da46967e7117e01 --- /dev/null +++ b/doc/fluid/api_cn/distributed_cn/prepare_context_cn.rst @@ -0,0 +1,5 @@ +.. _cn_api_distributed_prepare_context: + +prepare_context +------------------------------- +:doc_source: paddle.fluid.dygraph.parallel.prepare_context diff --git a/doc/fluid/api_cn/distributed_cn/reduce_cn.rst b/doc/fluid/api_cn/distributed_cn/reduce_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6f471a67a8a0a17617f18208ca9b596ce8182f5e --- /dev/null +++ b/doc/fluid/api_cn/distributed_cn/reduce_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_distributed_reduce: + +reduce +------------------------------- + + +.. py:function:: paddle.distributed.reduce(tensor, dst, op=ReduceOp.SUM, group=0) + +进程组内所有进程的指定tensor进行归约操作,并返回给所有进程归约的结果。 + +参数 +::::::::: + - tensor (Tensor) - 操作的输入Tensor,结果返回至目标进程号的Tensor中。Tensor的数据类型为:float16、float32、float64、int32、int64。 + - dst (int) - 返回操作结果的目标进程编号。 + - op (ReduceOp.SUM|ReduceOp.MAX|ReduceOp.Min|ReduceOp.PROD,可选) - 归约的具体操作,比如求和,取最大值,取最小值和求乘积,默认为求和归约。 + - group (int,可选) - 工作的进程组编号,默认为0。 + +返回 +::::::::: +无 + +代码示例 +::::::::: +.. code-block:: python + + import numpy as np + import paddle + from paddle.distributed import init_parallel_env + + paddle.disable_static() + paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) + init_parallel_env() + if paddle.distributed.ParallelEnv().local_rank == 0: + np_data = np.array([[4, 5, 6], [4, 5, 6]]) + else: + np_data = np.array([[1, 2, 3], [1, 2, 3]]) + data = paddle.to_tensor(np_data) + paddle.distributed.reduce(data, 0) + out = data.numpy() + # [[5, 7, 9], [5, 7, 9]] diff --git a/doc/fluid/api_cn/distributed_cn/scatter_cn.rst b/doc/fluid/api_cn/distributed_cn/scatter_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a238202658419ab62594f3817e2923468e56f4fb --- /dev/null +++ b/doc/fluid/api_cn/distributed_cn/scatter_cn.rst @@ -0,0 +1,45 @@ +.. _cn_api_distributed_scatter: + +scatter +------------------------------- + + +.. py:function:: paddle.distributed.scatter(tensor, tensor_list=None, src=0, group=0) + +进程组内指定进程源的tensor列表分发到其他所有进程中。 + +参数 +::::::::: + - tensor (Tensor) - 操作的输出Tensor。Tensor的数据类型为:float16、float32、float64、int32、int64。 + - tensor_list (list,可选) - 操作的输入Tensor列表,默认为None。列表中的每个元素均为Tensor,每个Tensor的数据类型为:float16、float32、float64、int32、int64。 + - src (int,可选) - 操作的源进程号,该进程号的Tensor列表将分发到其他进程中。默认为0。 + - group (int,可选) - 工作的进程组编号,默认为0。 + +返回 +::::::::: +无 + +代码示例 +::::::::: +.. code-block:: python + + import numpy as np + import paddle + from paddle.distributed import init_parallel_env + + paddle.disable_static() + paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) + init_parallel_env() + if paddle.distributed.ParallelEnv().local_rank == 0: + np_data1 = np.array([7, 8, 9]) + np_data2 = np.array([10, 11, 12]) + else: + np_data1 = np.array([1, 2, 3]) + np_data2 = np.array([4, 5, 6]) + data1 = paddle.to_tensor(np_data1) + data2 = paddle.to_tensor(np_data2) + if paddle.distributed.ParallelEnv().local_rank == 0: + paddle.distributed.scatter(data1, src=1) + else: + paddle.distributed.scatter(data1, tensor_list=[data1, data2], src=1) + out = data1.numpy() diff --git a/doc/fluid/api_cn/distributed_cn/spawn_cn.rst b/doc/fluid/api_cn/distributed_cn/spawn_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..21f8f762f5052474aac91c31ada8f76b664594b2 --- /dev/null +++ b/doc/fluid/api_cn/distributed_cn/spawn_cn.rst @@ -0,0 +1,105 @@ +.. _cn_api_distributed_spawn: + +spawn +----- + +.. py:function:: paddle.distributed.spawn(func, args=(), nprocs=-1, join=True, daemon=False, **options) + +使用 ``spawn`` 方法启动多进程任务。 + +参数 +::::::::: + - func (function) - 由 ``spawn`` 方法启动的进程所调用的目标函数。该目标函数需要能够被 ``pickled`` (序列化),所以目标函数必须定义为模块的一级函数,不能是内部子函数或者类方法。 + - args (tuple, 可选) - 传入目标函数 ``func`` 的参数。 + - nprocs (int, 可选) - 启动进程的数目。默认值为-1。当 ``nproc`` 为-1时,模型执行时将会从环境变量中获取当前可用的所有设备进行使用:如果使用GPU执行任务,将会从环境变量 ``CUDA_VISIBLE_DEVICES`` 中获取当前所有可用的设备ID;如果使用CPU执行任务,将会从环境变量 ``CPU_NUM`` 中获取当前可用的CPU设备数,例如,可以通过指令 ``export CPU_NUM=4`` 配置默认可用CPU设备数,如果此环境变量没有设置,将会默认设置该环境变量的值为1。 + - join (bool, 可选) - 对所有启动的进程执行阻塞的 ``join`` ,等待进程执行结束。默认为True。 + - daemon (bool, 可选) - 配置启动进程的 ``daemon`` 属性。默认为False。 + - **options (dict, 可选) - 其他初始化并行执行环境的配置选项。目前支持以下选项: (1) start_method (string) - 启动子进程的方法。进程的启动方法可以是 ``spawn`` , ``fork`` , ``forkserver`` 。 因为CUDA运行时环境不支持 ``fork`` 方法,当在子进程中使用CUDA时,需要使用 ``spawn`` 或者 ``forkserver`` 方法启动进程。默认方法为 ``spawn`` ; (2) cluster_node_ips (string) - 运行集群的节点(机器)IP,例如 "192.168.0.16,192.168.0.17" ,默认值为 "127.0.0.1" ; (3) node_ip (string) - 当前节点(机器)的IP。例如 "192.168.0.16" , 默认值为 "127.0.0.1" ; (4) started_port (int) - 一个训练节点(机器)上各训练进程的起始端口。例如 6170. 默认值为None ; (5) selected_gpus (string) - 指定训练使用的GPU ID, 例如 "0,1,2,3" , 默认值为None ; (6) print_config (bool) - 打印当前并行训练的配置, 默认值为False ; (7) use_paddlecloud (bool) - 配置是否使用PaddleCloud启动多进程任务,默认值为False。 + +返回 +::::::::: + ``MultiprocessContext`` 对象,持有创建的多个进程。 + +代码示例 +::::::::: +.. code-block:: python + + from __future__ import print_function + + import paddle + import paddle.nn as nn + import paddle.optimizer as opt + import paddle.distributed as dist + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear1 = nn.Linear(10, 10) + self._linear2 = nn.Linear(10, 1) + + def forward(self, x): + return self._linear2(self._linear1(x)) + + def train(print_result=False): + # 1. enable dynamic mode + paddle.disable_static() + + # 2. initialize parallel environment + dist.init_parallel_env() + + # 3. create data parallel layer & optimizer + layer = LinearNet() + dp_layer = paddle.DataParallel(layer) + + loss_fn = nn.MSELoss() + adam = opt.Adam( + learning_rate=0.001, parameters=dp_layer.parameters()) + + # 4. run layer + inputs = paddle.randn([10, 10], 'float32') + outputs = dp_layer(inputs) + labels = paddle.randn([10, 1], 'float32') + loss = loss_fn(outputs, labels) + + if print_result is True: + print("loss:", loss.numpy()) + + loss = dp_layer.scale_loss(loss) + loss.backward() + dp_layer.apply_collective_grads() + + adam.step() + adam.clear_grad() + + # Usage 1: only pass function. + # If your training method no need any argument, and + # use all visible devices for parallel training. + if __name__ == '__main__': + dist.spawn(train) + + # Usage 2: pass function and arguments. + # If your training method need some arguments, and + # use all visible devices for parallel training. + if __name__ == '__main__': + dist.spawn(train, args=(True,)) + + # Usage 3: pass function, arguments and nprocs. + # If your training method need some arguments, and + # only use part of visible devices for parallel training. + # If your machine hold 8 cards {0,1,2,3,4,5,6,7}, + # this case will use cards {0,1}; If you set + # CUDA_VISIBLE_DEVICES=4,5,6,7, this case will use + # cards {4,5} + if __name__ == '__main__': + dist.spawn(train, args=(True,), nprocs=2) + + # Usage 4: pass function, arguments, nprocs and selected_gpus. + # If your training method need some arguments, and + # only use part of visible devices for parallel training, + # but you can't set your machine's environment varibale + # CUDA_VISIBLE_DEVICES, such as it is None or all cards + # {0,1,2,3,4,5,6,7}, you can pass `selelcted_gpus` to + # select the GPU cards you want to use. For example, + # this case will use cards {4,5} if your machine hold 8 cards. + if __name__ == '__main__': + dist.spawn(train, args=(True,), nprocs=2, selelcted_gpus='4,5') \ No newline at end of file diff --git a/doc/fluid/api_cn/dygraph_cn.rst b/doc/fluid/api_cn/dygraph_cn.rst index 23b6e135713c3bbdad323a9cc1d8113d27653377..889d6c89603ccec58c067833703a70f22354f06e 100644 --- a/doc/fluid/api_cn/dygraph_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn.rst @@ -8,40 +8,54 @@ fluid.dygraph .. toctree:: :maxdepth: 1 - dygraph_cn/BackwardStrategy_cn.rst dygraph_cn/BatchNorm_cn.rst dygraph_cn/BilinearTensorProduct_cn.rst dygraph_cn/Conv2D_cn.rst dygraph_cn/Conv2DTranspose_cn.rst dygraph_cn/Conv3D_cn.rst dygraph_cn/Conv3DTranspose_cn.rst + dygraph_cn/CosineAnnealingDecay_cn.rst dygraph_cn/CosineDecay_cn.rst + dygraph_cn/DataParallel_cn.rst + dygraph_cn/declarative_cn.rst + dygraph_cn/Dropout_cn.rst dygraph_cn/Embedding_cn.rst dygraph_cn/ExponentialDecay_cn.rst dygraph_cn/FC_cn.rst + dygraph_cn/grad_cn.rst dygraph_cn/GroupNorm_cn.rst dygraph_cn/GRUUnit_cn.rst dygraph_cn/guard_cn.rst + dygraph_cn/InstanceNorm_cn.rst dygraph_cn/InverseTimeDecay_cn.rst + dygraph_cn/jit_cn.rst + dygraph_cn/LambdaDecay_cn.rst dygraph_cn/Layer_cn.rst dygraph_cn/LayerList_cn.rst dygraph_cn/LayerNorm_cn.rst dygraph_cn/Linear_cn.rst dygraph_cn/load_dygraph_cn.rst + dygraph_cn/MultiStepDecay_cn.rst dygraph_cn/NaturalExpDecay_cn.rst dygraph_cn/NCE_cn.rst dygraph_cn/NoamDecay_cn.rst + dygraph_cn/ParallelEnv_cn.rst dygraph_cn/ParameterList_cn.rst - dygraph_cn/no_grad_cn.rst + dygraph_cn/no_grad_cn.rst dygraph_cn/PiecewiseDecay_cn.rst dygraph_cn/PolynomialDecay_cn.rst dygraph_cn/Pool2D_cn.rst dygraph_cn/PRelu_cn.rst dygraph_cn/prepare_context_cn.rst + dygraph_cn/ProgramTranslator_cn.rst + dygraph_cn/ReduceLROnPlateau_cn.rst dygraph_cn/save_dygraph_cn.rst dygraph_cn/Sequential_cn.rst dygraph_cn/SpectralNorm_cn.rst + dygraph_cn/StepDecay_cn.rst dygraph_cn/to_variable_cn.rst dygraph_cn/TracedLayer_cn.rst dygraph_cn/Tracer_cn.rst + dygraph_cn/TranslatedLayer_cn.rst dygraph_cn/TreeConv_cn.rst + dygraph_cn/enabled_cn.rst diff --git a/doc/fluid/api_cn/dygraph_cn/BackwardStrategy_cn.rst b/doc/fluid/api_cn/dygraph_cn/BackwardStrategy_cn.rst deleted file mode 100644 index 5e8fc9945d206ac7936a48c7a8ad53517f2889b2..0000000000000000000000000000000000000000 --- a/doc/fluid/api_cn/dygraph_cn/BackwardStrategy_cn.rst +++ /dev/null @@ -1,46 +0,0 @@ -.. _cn_api_fluid_dygraph_BackwardStrategy: - -BackwardStrategy -------------------------------- - -**注意:该API仅支持【动态图】模式** - -.. py:class:: paddle.fluid.dygraph.BackwardStrategy - -**注意:该API只在动态图下生效** - -BackwardStrategy是描述动态图反向执行的策略,主要功能是定义动态图反向执行时的不同策略 - -**属性:** - -.. py:attribute:: sort_sum_gradient - -是否按照前向执行的逆序加和多个梯度,例如当 x_var( :ref:`api_guide_Variable` )作为多个OP(这里以 :ref:`cn_api_fluid_layers_scale` 为例)的输入时,其产生的梯度是否按照前向书写时的 -逆序加和,默认为False - - -**代码示例** - -.. code-block:: python - - import numpy as np - import paddle.fluid as fluid - - x = np.ones([2, 2], np.float32) - with fluid.dygraph.guard(): - x_var = fluid.dygraph.to_variable(x) - sums_inputs = [] - # 这里x_var将作为多个输入scale的输入 - for _ in range(10): - sums_inputs.append(fluid.layers.scale(x_var)) - ret2 = fluid.layers.sums(sums_inputs) - loss2 = fluid.layers.reduce_sum(ret2) - backward_strategy = fluid.dygraph.BackwardStrategy() - backward_strategy.sort_sum_gradient = True - loss2.backward(backward_strategy) - - - - - - diff --git a/doc/fluid/api_cn/dygraph_cn/BatchNorm_cn.rst b/doc/fluid/api_cn/dygraph_cn/BatchNorm_cn.rst index 7b2030eca47a2852c4d381e1d32542aef8aab3dc..bede52decd8cf46131fd2cdf6b0b91673fd34781 100644 --- a/doc/fluid/api_cn/dygraph_cn/BatchNorm_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/BatchNorm_cn.rst @@ -5,6 +5,12 @@ BatchNorm .. py:class:: paddle.fluid.dygraph.BatchNorm(num_channels, act=None, is_test=False, momentum=0.9, epsilon=1e-05, param_attr=None, bias_attr=None, dtype='float32', data_layout='NCHW', in_place=False, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=False, use_global_stats=False, trainable_statistics=False) +:alias_main: paddle.nn.BatchNorm +:alias: paddle.nn.BatchNorm,paddle.nn.layer.BatchNorm,paddle.nn.layer.norm.BatchNorm +:old_api: paddle.fluid.dygraph.BatchNorm + + + 该接口用于构建 ``BatchNorm`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其中实现了批归一化层(Batch Normalization Layer)的功能,可用作卷积和全连接操作的批归一化函数,根据当前批次数据按通道计算的均值和方差进行归一化。更多详情请参考 : `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ 当use_global_stats = False时,:math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是minibatch的统计数据。计算公式如下: diff --git a/doc/fluid/api_cn/dygraph_cn/BilinearTensorProduct_cn.rst b/doc/fluid/api_cn/dygraph_cn/BilinearTensorProduct_cn.rst index 095f9ad0d318733fc638126a2340f59b9c9c550f..59b18c17a32951135e431ef036a6d32771651399 100644 --- a/doc/fluid/api_cn/dygraph_cn/BilinearTensorProduct_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/BilinearTensorProduct_cn.rst @@ -5,6 +5,12 @@ BilinearTensorProduct .. py:class:: paddle.fluid.dygraph.BilinearTensorProduct(input1_dim, input2_dim, output_dim, name=None, act=None, param_attr=None, bias_attr=None, dtype="float32") +:alias_main: paddle.nn.BilinearTensorProduct +:alias: paddle.nn.BilinearTensorProduct,paddle.nn.layer.BilinearTensorProduct,paddle.nn.layer.common.BilinearTensorProduct +:old_api: paddle.fluid.dygraph.BilinearTensorProduct + + + 该接口用于构建 ``BilinearTensorProduct`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。双线性乘积计算式子如下。 .. math:: diff --git a/doc/fluid/api_cn/dygraph_cn/Conv2DTranspose_cn.rst b/doc/fluid/api_cn/dygraph_cn/Conv2DTranspose_cn.rst index 60e0f101f01d150cc9696a4bdbd07246f643b785..b500bdbf6c7884f1bc263532eb0f3db379790fa8 100644 --- a/doc/fluid/api_cn/dygraph_cn/Conv2DTranspose_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/Conv2DTranspose_cn.rst @@ -5,6 +5,9 @@ Conv2DTranspose .. py:class:: paddle.fluid.dygraph.Conv2DTranspose(num_channels, num_filters, filter_size, output_size=None, padding=0, stride=1, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, dtype="float32") + + + 该接口用于构建 ``Conv2DTranspose`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其将在神经网络中构建一个二维卷积转置层(Convlution2D Transpose Layer),其根据输入(input)、滤波器参数(num_filters、filter_size)、步长(stride)、填充(padding)、膨胀系数(dilation)、组数(groups)来计算得到输出特征图。输入和输出是 ``NCHW`` 格式,N是批数据大小,C是特征图个数,H是特征图高度,W是特征图宽度。滤波器的维度是 [M, C, H, W] ,M是输入特征图个数,C是输出特征图个数,H是滤波器高度,W是滤波器宽度。如果组数大于1,C等于输入特征图个数除以组数的结果。如果提供了偏移属性和激活函数类型,卷积的结果会和偏移相加,激活函数会作用在最终结果上。转置卷积的计算过程相当于卷积的反向计算,转置卷积又被称为反卷积(但其实并不是真正的反卷积)。详情请参考: `Conv2DTranspose `_ 。 输入 ``X`` 和输出 ``Out`` 的函数关系如下: @@ -71,7 +74,7 @@ Conv2DTranspose with fluid.dygraph.guard(): data = np.random.random((3, 32, 32, 5)).astype('float32') conv2DTranspose = fluid.dygraph.nn.Conv2DTranspose( - 'Conv2DTranspose', num_filters=2, filter_size=3) + num_channels=32, num_filters=2, filter_size=3) ret = conv2DTranspose(fluid.dygraph.base.to_variable(data)) 属性 diff --git a/doc/fluid/api_cn/dygraph_cn/Conv2D_cn.rst b/doc/fluid/api_cn/dygraph_cn/Conv2D_cn.rst index 0dd3f2de92119fb6ccfb5d482ab7c6b43469e3bc..3e81c4a31738d78234527178a1408c7cc03519ef 100644 --- a/doc/fluid/api_cn/dygraph_cn/Conv2D_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/Conv2D_cn.rst @@ -5,6 +5,9 @@ Conv2D .. py:class:: paddle.fluid.dygraph.Conv2D(num_channels, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, dtype='float32') + + + 该接口用于构建 ``Conv2D`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其将在神经网络中构建一个二维卷积层(Convolution2D Layer),其根据输入、滤波器参数(num_filters、filter_size)、步长(stride)、填充(padding)、膨胀系数(dilation)、组数(groups)参数来计算得到输出特征图。输入和输出是 ``NCHW`` 格式,N是批数据大小,C是特征图个数,H是特征图高度,W是特征图宽度。滤波器的维度是 [M, C, H, W] ,M是输出特征图个数,C是输入特征图个数,H是滤波器高度,W是滤波器宽度。如果组数大于1,C等于输入特征图个数除以组数的结果。如果提供了偏移属性和激活函数类型,卷积的结果会和偏移相加,激活函数会作用在最终结果上。详情请参考: `卷积 `_ 。 对每个输入 ``X`` ,有等式: @@ -43,7 +46,7 @@ Conv2D 参数: - **num_channels** (int) - 输入图像的通道数。 - - **num_fliters** (int) - 滤波器的个数,和输出特征图个数相同。 + - **num_filters** (int) - 滤波器的个数,和输出特征图个数相同。 - **filter_size** (int|tuple) - 滤波器大小。如果 ``filter_size`` 是一个元组,则必须包含两个整型数,分别表示滤波器高度和宽度。否则,表示滤波器高度和宽度均为 ``filter_size`` 。 - **stride** (int|tuple, 可选) - 步长大小。如果 ``stride`` 为元组,则必须包含两个整型数,分别表示垂直和水平滑动步长。否则,表示垂直和水平滑动步长均为 ``stride`` 。默认值:1。 - **padding** (int|tuple, 可选) - 填充大小。如果 ``padding`` 为元组,则必须包含两个整型数,分别表示竖直和水平边界填充大小。否则,表示竖直和水平边界填充大小均为 ``padding`` 。默认值:0。 diff --git a/doc/fluid/api_cn/dygraph_cn/Conv3DTranspose_cn.rst b/doc/fluid/api_cn/dygraph_cn/Conv3DTranspose_cn.rst index cda322bc004a5d0b595a534d8d83fca7e3639f4f..a3e4134d1e6a31dd8e206bcf5546d511a79e9d7a 100644 --- a/doc/fluid/api_cn/dygraph_cn/Conv3DTranspose_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/Conv3DTranspose_cn.rst @@ -6,6 +6,9 @@ Conv3DTranspose .. py:class:: paddle.fluid.dygraph.Conv3DTranspose(num_channels, num_filters, filter_size, output_size=None, padding=0, stride=1, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, dtype="float32") + + + 该接口用于构建 ``Conv3DTranspose`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。3D卷积转置层(Convlution3D transpose layer)根据输入(input)、滤波器(filter)和卷积核膨胀(dilations)、步长(stride)、填充来计算输出特征层大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCDHW格式。其中 ``N`` 为batch大小, ``C`` 为通道数(channel), ``D`` 为特征深度, ``H`` 为特征高度, ``W`` 为特征宽度。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解卷积转置层细节,请参考下面的说明和 参考文献_ 。如果参数bias_attr不为False, 转置卷积计算会添加偏置项。如果act不为None,则转置卷积计算之后添加相应的激活函数。 diff --git a/doc/fluid/api_cn/dygraph_cn/Conv3D_cn.rst b/doc/fluid/api_cn/dygraph_cn/Conv3D_cn.rst index 7199bf75b2513a4a3e1c3bac96f6c68ab79f35b0..9e0291edba17ab22bc5288e21b4e16ee5a0305f0 100644 --- a/doc/fluid/api_cn/dygraph_cn/Conv3D_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/Conv3D_cn.rst @@ -6,6 +6,9 @@ Conv3D .. py:class:: paddle.fluid.dygraph.Conv3D(num_channels, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, dtype="float32") + + + 该接口用于构建 ``Conv3D`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。3D卷积层(convolution3D layer)根据输入、滤波器(filter)、步长(stride)、填充(padding)、膨胀(dilations)、组数参数计算得到输出。输入和输出是[N, C, D, H, W]的多维tensor,其中N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度。卷积三维(Convlution3D)和卷积二维(Convlution2D)相似,但多了一维深度(depth)。如果提供了bias属性和激活函数类型,bias会添加到卷积(convolution)的结果中相应的激活函数会作用在最终结果上。 对每个输入X,有等式: diff --git a/doc/fluid/api_cn/dygraph_cn/CosineDecay_cn.rst b/doc/fluid/api_cn/dygraph_cn/CosineDecay_cn.rst index 321f7d607e73bc9a65b506471d72dd0bf261f6f6..92ffd6338eddd0034b8d2cb3f06ddb7d9c537018 100644 --- a/doc/fluid/api_cn/dygraph_cn/CosineDecay_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/CosineDecay_cn.rst @@ -3,10 +3,13 @@ CosineDecay ------------------------------- -**注意:该API仅支持【动态图】模式** .. py:class:: paddle.fluid.dygraph.CosineDecay(learning_rate, step_each_epoch, epochs, begin=0, step=1, dtype='float32') +:api_attr: 命令式编程模式(动态图) + + + 该接口提供按余弦函数衰减学习率的功能。 余弦衰减的计算方式如下。 diff --git a/doc/fluid/api_cn/dygraph_cn/DataParallel_cn.rst b/doc/fluid/api_cn/dygraph_cn/DataParallel_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0258a6136d02cbcc5f822fbc7f5d066136cf2d27 --- /dev/null +++ b/doc/fluid/api_cn/dygraph_cn/DataParallel_cn.rst @@ -0,0 +1,208 @@ +.. _cn_api_fluid_dygraph_DataParallel: + +DataParallel +------------ + +.. py:class:: paddle.fluid.dygraph.DataParallel(layers, strategy) + +:api_attr: 命令式编程模式(动态图) + +通过数据并行模式执行动态图模型。 + +目前,``DataParallel`` 仅支持以多进程的方式执行动态图模型。 + +支持两种使用方式: + +1. 使用 ``paddle.distributed.spawn`` 方法启动,例如: + + ``python demo.py`` (spawn need to be called in ``__main__`` method) + +2. 使用 ``paddle.distributed.launch`` 方法启动,例如: + +``python -m paddle.distributed.launch –selected_gpus=0,1 demo.py`` + +其中 ``demo.py`` 脚本的代码可以是下面的示例代码。 + +参数: + - **Layer** (Layer) - 需要通过数据并行方式执行的模型。 + - **strategy** (ParallelStrategy,可选) - (deprecated) 数据并行的策略,包括并行执行的环境配置。默认为None。 + +返回:支持数据并行的 ``Layer`` + +返回类型:Layer实例 + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.nn as nn + import paddle.optimizer as opt + import paddle.distributed as dist + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear1 = nn.Linear(10, 10) + self._linear2 = nn.Linear(10, 1) + + def forward(self, x): + return self._linear2(self._linear1(x)) + + def train(): + # 1. enable dynamic mode + paddle.disable_static() + + # 2. initialize parallel environment + dist.init_parallel_env() + + # 3. create data parallel layer & optimizer + layer = LinearNet() + dp_layer = paddle.DataParallel(layer) + + loss_fn = nn.MSELoss() + adam = opt.Adam( + learning_rate=0.001, parameters=dp_layer.parameters()) + + # 4. run layer + inputs = paddle.randn([10, 10], 'float32') + outputs = dp_layer(inputs) + labels = paddle.randn([10, 1], 'float32') + loss = loss_fn(outputs, labels) + + loss = dp_layer.scale_loss(loss) + loss.backward() + dp_layer.apply_collective_grads() + + adam.step() + adam.clear_grad() + + if __name__ == '__main__': + # 1. start by ``paddle.distributed.spawn`` (default) + dist.spawn(train, nprocs=2) + # 2. start by ``paddle.distributed.launch`` + # train() + +.. py:method:: scale_loss(loss) + +缩放模型损失值 ``loss`` 。在数据并行模式中,损失值 ``loss`` 需要根据并行训练进程的数目进行缩放。 + +如果不在数据并行模式下,会直接返回原 ``loss`` 。 + +参数: + - **loss** (Variable) - 当前模型的损失值。 + +返回:缩放后的损失值 ``loss`` + +返回类型:Variable + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import paddle.optimizer as opt + import paddle.distributed as dist + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear1 = nn.Linear(10, 10) + self._linear2 = nn.Linear(10, 1) + + def forward(self, x): + return self._linear2(self._linear1(x)) + + def train(): + # 1. enable dynamic mode + paddle.disable_static() + + # 2. initialize parallel environment + dist.init_parallel_env() + + # 3. create data parallel layer & optimizer + layer = LinearNet() + dp_layer = paddle.DataParallel(layer) + + loss_fn = nn.MSELoss() + adam = opt.Adam( + learning_rate=0.001, parameters=dp_layer.parameters()) + + # 4. run layer + inputs = paddle.randn([10, 10], 'float32') + outputs = dp_layer(inputs) + labels = paddle.randn([10, 1], 'float32') + loss = loss_fn(outputs, labels) + + loss = dp_layer.scale_loss(loss) + loss.backward() + dp_layer.apply_collective_grads() + + adam.step() + adam.clear_grad() + + if __name__ == '__main__': + # 1. start by ``paddle.distributed.spawn`` (default) + dist.spawn(train, nprocs=2) + # 2. start by ``paddle.distributed.launch`` + # train() + + +.. py:method:: apply_collective_grads() + +AllReduce(规约)参数的梯度值。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import paddle.optimizer as opt + import paddle.distributed as dist + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear1 = nn.Linear(10, 10) + self._linear2 = nn.Linear(10, 1) + + def forward(self, x): + return self._linear2(self._linear1(x)) + + def train(): + # 1. enable dynamic mode + paddle.disable_static() + + # 2. initialize parallel environment + dist.init_parallel_env() + + # 3. create data parallel layer & optimizer + layer = LinearNet() + dp_layer = paddle.DataParallel(layer) + + loss_fn = nn.MSELoss() + adam = opt.Adam( + learning_rate=0.001, parameters=dp_layer.parameters()) + + # 4. run layer + inputs = paddle.randn([10, 10], 'float32') + outputs = dp_layer(inputs) + labels = paddle.randn([10, 1], 'float32') + loss = loss_fn(outputs, labels) + + loss = dp_layer.scale_loss(loss) + loss.backward() + dp_layer.apply_collective_grads() + + adam.step() + adam.clear_grad() + + if __name__ == '__main__': + # 1. start by ``paddle.distributed.spawn`` (default) + dist.spawn(train, nprocs=2) + # 2. start by ``paddle.distributed.launch`` + # train() diff --git a/doc/fluid/api_cn/dygraph_cn/Dropout_cn.rst b/doc/fluid/api_cn/dygraph_cn/Dropout_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ce72d582a0abaad2c4db1f2634049388203f8b51 --- /dev/null +++ b/doc/fluid/api_cn/dygraph_cn/Dropout_cn.rst @@ -0,0 +1,53 @@ +.. _cn_api_fluid_dygraph_Dropout: + +Dropout +------------------------------- + +.. py:class:: paddle.fluid.dygraph.Dropout(p=0.5, seed=None, dropout_implementation='downgrade_in_infer', is_test=False) + +丢弃或者保持输入的每个元素独立。Dropout是一种正则化手段,通过在训练过程中阻止神经元节点间的相关性来减少过拟合。根据给定的丢弃概率,dropout操作符按丢弃概率随机将一些神经元输出设置为0,其他的仍保持不变。 + +Dropout层可以删除,提高执行效率。 + +参数: + - **p** (float32,可选) - 输入单元的丢弃概率,即输入单元设置为0的概率。默认值:0.5 + - **seed** (int,可选) - 整型数据,用于创建随机种子。如果该参数设为None,则使用随机种子。注:如果给定一个整型种子,始终丢弃相同的输出单元。训练过程中勿用固定不变的种子。默认值:None。 + - **dropout_implementation** (str,可选) - 丢弃单元的方式,有两种'downgrade_in_infer'和'upscale_in_train'两种选择,默认:'downgrade_in_infer'。具体作用可以参考一下描述。 + + 1. downgrade_in_infer(default), 在预测时减小输出结果 + + - train: out = input * mask + + - inference: out = input * (1.0 - p) + + (mask是一个张量,维度和输入维度相同,值为0或1,值为0的比例即为 ``p`` ) + + 2. upscale_in_train, 增加训练时的结果 + + - train: out = input * mask / ( 1.0 - p ) + + - inference: out = input + + (mask是一个张量,维度和输入维度相同,值为0或1,值为0的比例即为 ``p`` ) + + - **is_test** (bool,可选) - 标记是否是测试阶段。此标志仅对静态图模式有效。对于动态图模式,请使用 ``eval()`` 接口。默认:False。 + +返回:无 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.dygraph.base import to_variable + import numpy as np + + x = np.random.random(size=(3, 10, 3, 7)).astype('float32') + with fluid.dygraph.guard(): + x = to_variable(x) + m = fluid.dygraph.Dropout(p=0.5) + droped_train = m(x) + # 切换到 eval 模式 + m.eval() + droped_eval = m(x) + diff --git a/doc/fluid/api_cn/dygraph_cn/Embedding_cn.rst b/doc/fluid/api_cn/dygraph_cn/Embedding_cn.rst index d285ace3f0e1a2871e61313714d81f49d95243cb..985bf1c9eb834aa024ebab792a00b151328ef77a 100644 --- a/doc/fluid/api_cn/dygraph_cn/Embedding_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/Embedding_cn.rst @@ -5,6 +5,12 @@ Embedding .. py:class:: paddle.fluid.dygraph.Embedding(size, is_sparse=False, is_distributed=False, padding_idx=None, param_attr=None, dtype='float32') +:alias_main: paddle.nn.Embedding +:alias: paddle.nn.Embedding,paddle.nn.layer.Embedding,paddle.nn.layer.common.Embedding +:old_api: paddle.fluid.dygraph.Embedding + + + 嵌入层(Embedding Layer) 该接口用于构建 ``Embedding`` 的一个可调用对象,具体用法参照 ``代码示例`` 。其根据input中的id信息从embedding矩阵中查询对应embedding信息,并会根据输入的size (vocab_size, emb_size)和dtype自动构造一个二维embedding矩阵。 diff --git a/doc/fluid/api_cn/dygraph_cn/ExponentialDecay_cn.rst b/doc/fluid/api_cn/dygraph_cn/ExponentialDecay_cn.rst index 0bb670041b2595a4b3fa74565990e66fd696bff4..8a110ba82f47d912428e2e8b6bec45d30e62a739 100644 --- a/doc/fluid/api_cn/dygraph_cn/ExponentialDecay_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/ExponentialDecay_cn.rst @@ -3,10 +3,13 @@ ExponentialDecay ------------------------------- -**注意:该API仅支持【动态图】模式** .. py:class:: paddle.fluid.dygraph.ExponentialDecay(learning_rate, decay_steps, decay_rate, staircase=False, begin=0, step=1, dtype=’float32‘) +:api_attr: 命令式编程模式(动态图) + + + 该接口提供一种学习率按指数函数衰减的功能。 指数衰减的计算方式如下。 diff --git a/doc/fluid/api_cn/dygraph_cn/GRUUnit_cn.rst b/doc/fluid/api_cn/dygraph_cn/GRUUnit_cn.rst index e0b44fa34f890a6cf159841c9f5e586849c12a19..c2f986da1df15d700808d8f57596f15e0c7c6c6c 100644 --- a/doc/fluid/api_cn/dygraph_cn/GRUUnit_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/GRUUnit_cn.rst @@ -5,6 +5,9 @@ GRUUnit .. py:class:: paddle.fluid.dygraph.GRUUnit(name_scope, size, param_attr=None, bias_attr=None, activation='tanh', gate_activation='sigmoid', origin_mode=False, dtype='float32') + + + 该接口用于构建 ``GRU(Gated Recurrent Unit)`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其用于完成单个时间步内GRU的计算,支持以下两种计算方式: 如果origin_mode为True,则使用的运算公式来自论文 diff --git a/doc/fluid/api_cn/dygraph_cn/GroupNorm_cn.rst b/doc/fluid/api_cn/dygraph_cn/GroupNorm_cn.rst index e1f3aaa149c685a948839b813602deb9b6c1b671..5a619d25a616a3cc8922373c335fad404623ba6d 100644 --- a/doc/fluid/api_cn/dygraph_cn/GroupNorm_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/GroupNorm_cn.rst @@ -5,6 +5,12 @@ GroupNorm .. py:class:: paddle.fluid.dygraph.GroupNorm(channels, groups, epsilon=1e-05, param_attr=None, bias_attr=None, act=None, data_layout='NCHW', dtype="float32") +:alias_main: paddle.nn.GroupNorm +:alias: paddle.nn.GroupNorm,paddle.nn.layer.GroupNorm,paddle.nn.layer.norm.GroupNorm +:old_api: paddle.fluid.dygraph.GroupNorm + + + **Group Normalization层** 该接口用于构建 ``GroupNorm`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其中实现了组归一化层的功能。更多详情请参考: `Group Normalization `_ 。 diff --git a/doc/fluid/api_cn/dygraph_cn/InstanceNorm_cn.rst b/doc/fluid/api_cn/dygraph_cn/InstanceNorm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..334fa5109f3ba0f52b1039357e08032671cd1849 --- /dev/null +++ b/doc/fluid/api_cn/dygraph_cn/InstanceNorm_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_fluid_dygraph_InstanceNorm: + +InstanceNorm +------------------------------- + +.. py:class:: paddle.fluid.dygraph.InstanceNorm(num_channels, epsilon=1e-05, param_attr=None, bias_attr=None, dtype='float32') + +该接口用于构建 ``InstanceNorm`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。 + +可用作卷积和全连接操作的实例正则化函数,根据每个样本的每个通道的均值和方差信息进行正则化。该层需要的数据格式如下: + +NCHW[batch,in_channels,in_height,in_width] + +更多详情请参考 : `Instance Normalization: The Missing Ingredient for Fast Stylization `_ + +``input`` 是mini-batch的输入。 + +.. math:: + \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mean of each channel in each sample in a batch \\ + \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \quad &// variance of each channel in each sample a batch \\ + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \quad &// normalize \\ + y_i &\gets \gamma \hat{x_i} + \beta \quad &// scale-and-shift + + +参数: + - **num_channels** (int)- 指明输入 ``Tensor`` 的通道数量。 + - **epsilon** (float,默认1e-05)- 为了当前输入做标准化时得到稳定的结果而加在的分母上的扰动值。默认值为1e-5。 + - **param_attr** (ParamAttr|None) - instance_norm 权重参数的属性,可以设置为None或者一个ParamAttr的类(ParamAttr中可以指定参数的各种属性)。 如果设为None,则默认的参数初始化为1.0。如果在ParamAttr指定了属性时, instance_norm创建相应属性的param_attr(权重)参数。默认:None。 + - **bias_attr** (ParamAttr|None) - instance_norm 偏置参数的属性,可以设置为None或者一个ParamAttr的类(ParamAttr中可以指定参数的各种属性)。如果设为None,默认的参数初始化为0.0。如果在ParamAttr指定了参数的属性时, instance_norm创建相应属性的bias_attr(偏置)参数。默认:None。 + - **dtype** (string,默认float32)- 指明输入 ``Tensor`` 的数据类型,可以为float32或float64。默认:float32。 + +返回:无 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.dygraph.base import to_variable + import numpy as np + import paddle + + # x's shape is [1, 3, 1, 2] + x = np.array([[[[1.0, 8.0]], [[10.0, 5.0]], [[4.0, 6.0]]]]).astype('float32') + with fluid.dygraph.guard(): + x = to_variable(x) + instanceNorm = paddle.nn.InstanceNorm(3) + ret = instanceNorm(x) + # ret's shape is [1, 3, 1, 2]; value is [-1 1 0.999999 -0.999999 -0.999995 0.999995] + print(ret) + diff --git a/doc/fluid/api_cn/dygraph_cn/InverseTimeDecay_cn.rst b/doc/fluid/api_cn/dygraph_cn/InverseTimeDecay_cn.rst index 70bf0b9c37ce9fcb7f20ea448c20fb03f1a2dbe8..c693cc97791290950e90b6d5126edccb7d8ceed4 100644 --- a/doc/fluid/api_cn/dygraph_cn/InverseTimeDecay_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/InverseTimeDecay_cn.rst @@ -3,10 +3,13 @@ InverseTimeDecay ------------------------------- -**注意:该API仅支持【动态图】模式** .. py:class:: paddle.fluid.dygraph.InverseTimeDecay(learning_rate, decay_steps, decay_rate, staircase=False, begin=0, step=1, dtype='float32') +:api_attr: 命令式编程模式(动态图) + + + 该接口提供反时限学习率衰减的功能。 反时限学习率衰减计算方式如下。 diff --git a/doc/fluid/api_cn/dygraph_cn/LambdaDecay_cn.rst b/doc/fluid/api_cn/dygraph_cn/LambdaDecay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1b59cfbaf888fe971e6d39a72579848108d57094 --- /dev/null +++ b/doc/fluid/api_cn/dygraph_cn/LambdaDecay_cn.rst @@ -0,0 +1,65 @@ +.. _cn_api_fluid_dygraph_LambdaDecay: + +LambdaDecay +------------------------------- + + +.. py:class:: paddle.fluid.dygraph.LambdaDecay(learning_rate, lr_lambda) + +:api_attr: 命令式编程模式(动态图) + + +该API提供 lambda函数 设置学习率的功能。 ``lr_lambda`` 为一个lambda函数,其通过 ``epoch`` 计算出一个因子,该因子会乘以初始学习率。 + +算法可以描述为: + +.. code-block:: text + + learning_rate = 0.5 # init learning_rate + lr_lambda = lambda epoch: 0.95 ** epoch + + learning_rate = 0.5 # epoch 0 + learning_rate = 0.475 # epoch 1 + learning_rate = 0.45125 # epoch 2 + +参数: + - **learning_rate** (float|int) - 初始化的学习率。可以是Python的float或int。 + - **lr_lambda** (function) - ``lr_lambda`` 为一个lambda函数,其通过 ``epoch`` 计算出一个因子,该因子会乘以初始学习率。 + +返回: 无 + +**代码示例**: + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + with fluid.dygraph.guard(): + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = fluid.dygraph.Linear(10, 10) + input = fluid.dygraph.to_variable(x) + scheduler = fluid.dygraph.LambdaDecay(0.5, lr_lambda=lambda x: 0.95**x) + adam = fluid.optimizer.Adam(learning_rate = scheduler, parameter_list = linear.parameters()) + for epoch in range(6): + for batch_id in range(5): + out = linear(input) + loss = fluid.layers.reduce_mean(out) + adam.minimize(loss) + scheduler.epoch() + print("epoch:%d, current lr is %f" .format(epoch, adam.current_step_lr())) + # epoch:0, current lr is 0.5 + # epoch:1, current lr is 0.475 + # epoch:2, current lr is 0.45125 + +.. py:method:: epoch(epoch=None) +通过当前的 epoch 调整学习率,调整后的学习率将会在下一次调用 ``optimizer.minimize`` 时生效。 + +参数: + - **epoch** (int|float,可选) - 类型:int或float。指定当前的epoch数。默认:无,此时将会自动累计epoch数。 + +返回: + 无 + +**代码示例**: + + 参照上述示例代码。 diff --git a/doc/fluid/api_cn/dygraph_cn/LayerList_cn.rst b/doc/fluid/api_cn/dygraph_cn/LayerList_cn.rst index 7186e7df7ccd072d56f960d5ac20c1f8f3dce6c9..0dc2468dff0ca366300ee50558cab2c56df68aad 100644 --- a/doc/fluid/api_cn/dygraph_cn/LayerList_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/LayerList_cn.rst @@ -5,6 +5,9 @@ LayerList .. py:class:: paddle.fluid.dygraph.LayerList(sublayers=None) + + + LayerList用于保存子层列表,它包含的子层将被正确地注册和添加。列表中的子层可以像常规python列表一样被索引。 参数: diff --git a/doc/fluid/api_cn/dygraph_cn/LayerNorm_cn.rst b/doc/fluid/api_cn/dygraph_cn/LayerNorm_cn.rst index 96ef964915564c591d08dc217a7fd588f883d4a3..2fd08fe7ab4ddf0fb1055a49bb96ac70bcb087d5 100644 --- a/doc/fluid/api_cn/dygraph_cn/LayerNorm_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/LayerNorm_cn.rst @@ -5,6 +5,12 @@ LayerNorm .. py:class:: paddle.fluid.dygraph.LayerNorm(normalized_shape, scale=True, shift=True, begin_norm_axis=1, epsilon=1e-05, param_attr=None, bias_attr=None, act=None, dtype="float32") +:alias_main: paddle.nn.LayerNorm +:alias: paddle.nn.LayerNorm,paddle.nn.layer.LayerNorm,paddle.nn.layer.norm.LayerNorm +:old_api: paddle.fluid.dygraph.LayerNorm + + + 该接口用于构建 ``LayerNorm`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其中实现了层归一化层(Layer Normalization Layer)的功能,其可以应用于小批量输入数据。更多详情请参考:`Layer Normalization `_ 计算公式如下 @@ -47,7 +53,7 @@ LayerNorm x = numpy.random.random((3, 32, 32)).astype('float32') with fluid.dygraph.guard(): x = to_variable(x) - layernorm = fluid.LayerNorm('LayerNorm', begin_norm_axis=1) - ret = layernorm(x) + layerNorm = fluid.LayerNorm([32, 32]) + ret = layerNorm(x) diff --git a/doc/fluid/api_cn/dygraph_cn/Layer_cn.rst b/doc/fluid/api_cn/dygraph_cn/Layer_cn.rst index fcc1bf6fc03b41cb7bbd1095fb754d9b4c115944..ffce7959f30a98d46783db09b3c4f8b0a657777b 100644 --- a/doc/fluid/api_cn/dygraph_cn/Layer_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/Layer_cn.rst @@ -5,6 +5,9 @@ Layer .. py:class:: paddle.fluid.dygraph.Layer(name_scope=None, dtype=core.VarDesc.VarType.FP32) + + + 基于OOD实现的动态图Layer,包含该Layer的参数、前序运行的结构等信息。 参数: @@ -13,6 +16,18 @@ Layer 返回:无 +.. py:method:: train() + +将此层及其所有子层设置为训练模式。这只会影响某些模块,如Dropout和BatchNorm。 + +返回:无 + +.. py:method:: eval() + +将此层及其所有子层设置为预测模式。这只会影响某些模块,如Dropout和BatchNorm。 + +返回:无 + .. py:method:: full_name() Layer的全名。组成方式为: ``name_scope`` + “/” + MyLayer.__class__.__name__ 。 @@ -21,6 +36,100 @@ Layer的全名。组成方式为: ``name_scope`` + “/” + MyLayer.__class__ 返回类型:str +.. py:method:: register_forward_pre_hook(hook) + +为Layer注册一个 ``forward pre-hook`` 函数,该 ``hook`` 函数将会在 ``forward`` 函数调用之前被调用。 + +``hook`` 函数具有以下形式:它的 ``input`` 是 ``Layer`` 的 ``input`` ,并且可以返回一个元组或者单个修改值;如果返回单个修改值,则将值包装到一个元组中。用户可以使用该函数来查看或修改 ``Layer`` ``forward`` 函数的输入。 + +hook(Layer, input) -> None or modified input + +参数: + - **hook** (function) - 被注册为 ``forward pre-hook`` 的函数 + +返回:一个 ``HookRemoveHelper`` 类对象,可通过调用 ``hook_remove_helper.remove()`` 来删除注册的hook函数。 + +返回类型: ``HookRemoveHelper`` 类对象 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # forward_pre_hook函数修改了layer的输入:input = input * 2 + def forward_pre_hook(layer, input): + # 改变输入值 + input_return = (input[0] * 2) + return input_return + + with fluid.dygraph.guard(): + linear = fluid.Linear(13, 5, dtype="float32") + + # 注册hook + forward_pre_hook_handle = linear.register_forward_pre_hook(forward_pre_hook) + + value0 = np.arange(26).reshape(2, 13).astype("float32") + in0 = fluid.dygraph.to_variable(value0) + out0 = linear(in0) + + # 移除hook + forward_pre_hook_handle.remove() + + value1 = value0 * 2 + in1 = fluid.dygraph.to_variable(value1) + out1 = linear(in1) + + # hook改变了layer的输入(input = input * 2),所以out0等于out1 + assert (out0.numpy() == out1.numpy()).any() + +.. py:method:: register_forward_post_hook(hook) + +为Layer注册一个 ``forward post-hook`` 函数,该 ``hook`` 函数将会在 ``forward`` 函数调用之后被调用。 + +``hook`` 函数具有以下形式,它的 ``input`` 和 ``output`` 是 ``Layer`` 的 ``input`` 和 ``output`` 。用户可以用该函数来查看和修改 ``Layer`` ``forward`` 函数的输出。 + +hook(Layer, input, output) -> None or modified output + +参数: + - **hook** (function) - 被注册为 ``forward post-hook`` 的函数 + +返回:一个 ``HookRemoveHelper`` 类对象,可通过调用 ``hook_remove_helper.remove()`` 来删除注册的hook函数。 + +返回类型: ``HookRemoveHelper`` 类对象 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # forward_post_hook函数改变了layer的输出:output = output * 2 + def forward_post_hook(layer, input, output): + # 改变输出值 + return output * 2 + + with fluid.dygraph.guard(): + linear = fluid.Linear(13, 5, dtype="float32") + + # 注册hook + forward_post_hook_handle = linear.register_forward_post_hook(forward_post_hook) + + value1 = np.arange(26).reshape(2, 13).astype("float32") + in1 = fluid.dygraph.to_variable(value1) + + out0 = linear(in1) + + # remove the hook + forward_post_hook_handle.remove() + + out1 = linear(in1) + + # hook改变了layer的输出(output = output * 2),所以out0等于out1 * 2 + assert (out0.numpy() == (out1.numpy()) * 2).any() + .. py:method:: create_parameter(shape, attr=None, dtype="float32", is_bias=False, default_initializer=None) 为Layer创建参数。 @@ -147,6 +256,87 @@ Layer的全名。组成方式为: ``name_scope`` + “/” + MyLayer.__class__ for prefix, layer in model.named_sublayers(): print(prefix, layer) +.. py:method:: register_buffer(name, variable, persistable=True) + +将一个Variable注册为buffer。 + +buffer是一个非参数类型的变量,不会被优化器更新,但在评估或预测阶段可能是必要的状态变量。比如 ``BatchNorm`` 中的均值和方差。 + +注册的buffer默认是可持久性的,会被保存到 ``state_dict`` 中。如果指定 ``persistable`` 参数为False,则会注册一个非持久性的buffer,即不会同步和保存到 ``state_dict`` 中。 + +参数: + - **name** (str) - 注册buffer的名字。可以通过此名字来访问已注册的buffer。 + - **variable** (Variable) - 将被注册为buffer的变量。 + - **persistable** (bool, 可选) - 注册的buffer是否需要可持久性地保存到 ``state_dict`` 中。 + +返回:None + +返回类型:None + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.Linear(10, 3) + value = np.array([0]).astype("float32") + buffer = fluid.dygraph.to_variable(value) + linear.register_buffer("buf_name", buffer, persistable=True) + + # get the buffer by attribute. + print(linear.buf_name) + +.. py:method:: buffers(include_sublayers=True) + +返回一个由当前层及其子层的所有buffers组成的列表。 + +参数: + - **include_sublayers** (bool, 可选) - 是否返回子层的buffers。如果为True,返回的列表中包含子层的buffers。默认值:True。 + +返回:一个由当前层及其子层的所有buffers组成的列表,列表中的元素类型为Variable。 + +返回类型:list + +.. py:method:: named_buffers(prefix='', include_sublayers=True) + +返回层中所有buffers的迭代器,生成名称和buffer的元组。 + +参数: + - **prefix** (str, 可选) - 在所有buffer名称前加的前缀。默认值:''。 + - **include_sublayers** (bool, 可选) - 是否返回子层的buffers。如果为True,返回的列表中包含子层的buffers。默认值:True。 + +返回:产出名称和buffer的元组的迭代器。 + +返回类型:iterator + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + fc1 = fluid.Linear(10, 3) + buffer1 = fluid.dygraph.to_variable(np.array([0]).astype("float32")) + # register a variable as buffer by specific `persistable` + fc1.register_buffer("buf_name_1", buffer1, persistable=True) + + fc2 = fluid.Linear(3, 10) + buffer2 = fluid.dygraph.to_variable(np.array([1]).astype("float32")) + # register a buffer by assigning an attribute with Variable. + # The `persistable` can only be False by this way. + fc2.buf_name_2 = buffer2 + + model = fluid.dygraph.Sequential(fc1, fc2) + + # get all named buffers + for name, buffer in model.named_buffers(): + print(name, buffer) + .. py:method:: forward(*inputs, **kwargs) 定义每次调用时执行的计算。应该被所有子类覆盖。 @@ -181,13 +371,13 @@ Layer的全名。组成方式为: ``name_scope`` + “/” + MyLayer.__class__ .. py:method:: state_dict(destination=None, include_sublayers=True) -获取当前层及其子层的所有参数。并将所有参数存放在dict结构中。 +获取当前层及其子层的所有参数和可持久性buffers。并将所有参数和buffers存放在dict结构中。 参数: - - **destination** (dict, 可选) - 如果提供 ``destination`` ,则所有参数都将存放在 ``destination`` 中。 默认值:None。 - - **include_sublayers** (bool, 可选) - 如果设置为True,则包括子层的参数。默认值:True。 + - **destination** (dict, 可选) - 如果提供 ``destination`` ,则所有参数和可持久性buffers都将存放在 ``destination`` 中。 默认值:None。 + - **include_sublayers** (bool, 可选) - 如果设置为True,则包括子层的参数和buffers。默认值:True。 -返回:包含所有参数的dict +返回:包含所有参数和可持久行buffers的dict 返回类型:dict @@ -203,11 +393,11 @@ Layer的全名。组成方式为: ``name_scope`` + “/” + MyLayer.__class__ .. py:method:: set_dict(stat_dict, include_sublayers=True) -根据传入的 ``stat_dict`` 设置参数。 所有参数将由 ``stat_dict`` 中的 ``Tensor`` 设置。 +根据传入的 ``stat_dict`` 设置参数和可持久性buffers。 所有参数和buffers将由 ``stat_dict`` 中的 ``Tensor`` 设置。 参数: - - **state_dict** (dict) - 包含所有参数的dict。 - - **include_sublayers** (bool, 可选) - 如果设置为True,则还包括子层的参数。 默认值:True。 + - **state_dict** (dict) - 包含所有参数和可持久性buffers的dict。 + - **include_sublayers** (bool, 可选) - 如果设置为True,则还包括子层的参数和buffers。 默认值:True。 返回:None @@ -228,11 +418,11 @@ Layer的全名。组成方式为: ``name_scope`` + “/” + MyLayer.__class__ .. warning:: 该函数将被弃用。请使用set_dict函数。 -根据传入的 ``stat_dict`` 设置参数。 所有参数将由 ``stat_dict`` 中的 ``Tensor`` 设置。 +根据传入的 ``stat_dict`` 设置参数和可持久性buffers。 所有参数和buffers将由 ``stat_dict`` 中的 ``Tensor`` 设置。 参数: - - **state_dict** (dict) - 包含所有参数的dict。 - - **include_sublayers** (bool, 可选) - 如果设置为True,则还包括子层的参数。 默认值:True。 + - **state_dict** (dict) - 包含所有参数和可持久性buffers的dict。 + - **include_sublayers** (bool, 可选) - 如果设置为True,则还包括子层的参数和buffers。 默认值:True。 返回:None diff --git a/doc/fluid/api_cn/dygraph_cn/Linear_cn.rst b/doc/fluid/api_cn/dygraph_cn/Linear_cn.rst index 848484d4f77adedf5d72c3e047fc38bf6b9c64df..c741deb393c1eda09d4c8daa1521139c564e8ce9 100644 --- a/doc/fluid/api_cn/dygraph_cn/Linear_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/Linear_cn.rst @@ -5,6 +5,12 @@ Linear .. py:class:: paddle.fluid.dygraph.Linear(input_dim, output_dim, param_attr=None, bias_attr=None, act=None, dtype='float32') +:alias_main: paddle.nn.Linear +:alias: paddle.nn.Linear,paddle.nn.layer.Linear,paddle.nn.layer.common.Linear +:old_api: paddle.fluid.dygraph.Linear + + + **线性变换层:** diff --git a/doc/fluid/api_cn/dygraph_cn/MultiStepDecay_cn.rst b/doc/fluid/api_cn/dygraph_cn/MultiStepDecay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..896f90066ca8463f34bee8005b0f46bd4fd68a25 --- /dev/null +++ b/doc/fluid/api_cn/dygraph_cn/MultiStepDecay_cn.rst @@ -0,0 +1,72 @@ +.. _cn_api_fluid_dygraph_MultiStepDecay: + +MultiStepDecay +------------------------------- + + +.. py:class:: paddle.fluid.dygraph.MultiStepDecay(learning_rate, milestones, decay_rate=0.1) + +:api_attr: 命令式编程模式(动态图) + + +该接口提供 ``MultiStep`` 衰减学习率的功能。 + +算法可以描述为: + +.. code-block:: text + + learning_rate = 0.5 + milestones = [30, 50] + decay_rate = 0.1 + if epoch < 30: + learning_rate = 0.5 + elif epoch < 50: + learning_rate = 0.05 + else: + learning_rate = 0.005 + +参数: + - **learning_rate** (float|int) - 初始化的学习率。可以是Python的float或int。 + - **milestones** (tuple|list) - 列表或元组。必须是递增的。 + - **decay_rate** (float, optional) - 学习率的衰减率。 ``new_lr = origin_lr * decay_rate`` 。其值应该小于1.0。默认:0.1。 + +返回: 无 + +**代码示例**: + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + with fluid.dygraph.guard(): + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = fluid.dygraph.Linear(10, 10) + input = fluid.dygraph.to_variable(x) + scheduler = fluid.dygraph.MultiStepDecay(0.5, milestones=[3, 5]) + adam = fluid.optimizer.Adam(learning_rate = scheduler, parameter_list = linear.parameters()) + for epoch in range(6): + for batch_id in range(5): + out = linear(input) + loss = fluid.layers.reduce_mean(out) + adam.minimize(loss) + scheduler.epoch() + print("epoch:{}, current lr is {}" .format(epoch, adam.current_step_lr())) + # epoch:0, current lr is 0.5 + # epoch:1, current lr is 0.5 + # epoch:2, current lr is 0.5 + # epoch:3, current lr is 0.05 + # epoch:4, current lr is 0.05 + # epoch:5, current lr is 0.005 + +.. py:method:: epoch(epoch=None) +通过当前的 epoch 调整学习率,调整后的学习率将会在下一次调用 ``optimizer.minimize`` 时生效。 + +参数: + - **epoch** (int|float,可选) - 类型:int或float。指定当前的epoch数。默认:无,此时将会自动累计epoch数。 + +返回: + 无 + +**代码示例**: + + 参照上述示例代码。 diff --git a/doc/fluid/api_cn/dygraph_cn/NCE_cn.rst b/doc/fluid/api_cn/dygraph_cn/NCE_cn.rst index 63bb80d14525c460efcfd91c52a59e687bf722b5..45302572ed5bfb377763073775d19cbed4310079 100644 --- a/doc/fluid/api_cn/dygraph_cn/NCE_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/NCE_cn.rst @@ -5,6 +5,9 @@ NCE .. py:class:: paddle.fluid.dygraph.NCE(num_total_classes, dim, param_attr=None, bias_attr=None, num_neg_samples=None, sampler='uniform', custom_dist=None, seed=0, is_sparse=False, dtype="float32") + + + 该接口用于构建 ``NCE`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其中实现了 ``NCE`` 损失函数的功能,其默认使用均匀分布进行抽样,计算并返回噪音对比估计( noise-contrastive estimation training loss)。更多详情请参考:`Noise-contrastive estimation: A new estimation principle for unnormalized statistical models `_ 参数: @@ -42,7 +45,6 @@ NCE words.append(fluid.dygraph.base.to_variable(inp_word[i])) emb = fluid.Embedding( - 'embedding', size=[dict_size, 32], param_attr='emb.w', is_sparse=False) @@ -57,17 +59,17 @@ NCE embs3 = fluid.layers.concat(input=embs3, axis=1) nce = fluid.NCE( - num_total_classes=dict_size, - dim=embs3.shape[1], - num_neg_samples=2, - sampler="custom_dist", - custom_dist=nid_freq_arr.tolist(), - seed=1, - param_attr='nce.w', - bias_attr='nce.b') + num_total_classes=dict_size, + dim=embs3.shape[1], + num_neg_samples=2, + sampler="custom_dist", + custom_dist=nid_freq_arr.tolist(), + seed=1, + param_attr='nce.w', + bias_attr='nce.b') wl = fluid.layers.unsqueeze(words[label_word], axes=[0]) - nce_loss3 = nce(embs3, words[label_word]) + nce_loss3 = nce(embs3, wl) 属性 :::::::::::: diff --git a/doc/fluid/api_cn/dygraph_cn/NaturalExpDecay_cn.rst b/doc/fluid/api_cn/dygraph_cn/NaturalExpDecay_cn.rst index b2ad39a8a3f566becda4e8ddf6db206ed89a4da0..03a7e10a1cc4b66180ff118d8408ce21d9f5a30a 100644 --- a/doc/fluid/api_cn/dygraph_cn/NaturalExpDecay_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/NaturalExpDecay_cn.rst @@ -3,10 +3,13 @@ NaturalExpDecay ------------------------------- -**注意:该API仅支持【动态图】模式** .. py:class:: paddle.fluid.dygraph.NaturalExpDecay(learning_rate, decay_steps, decay_rate, staircase=False, begin=0, step=1, dtype='float32') +:api_attr: 命令式编程模式(动态图) + + + 该接口提供按自然指数衰减学习率的功能。 自然指数衰减的计算方式如下。 @@ -39,7 +42,7 @@ NaturalExpDecay - **staircase** (bool,可选) - 若为True, 学习率变化曲线呈阶梯状,若为False,学习率变化值曲线为平滑的曲线。默认值为False。 - **begin** (int,可选) – 起始步,即以上运算式子中global_step的初始化值。默认值为0。 - **step** (int,可选) – 步大小,即以上运算式子中global_step的每次的增量值。默认值为1。 - - **dtype** – (str,可选) 初始化学习率变量的数据类型,可以为"float32", "float64"。默认值为"float32"。 + - **dtype** (str,可选) – 学习率值的数据类型,可以为"float32", "float64"。默认值为"float32"。 返回: 无 @@ -50,12 +53,14 @@ NaturalExpDecay import paddle.fluid as fluid base_lr = 0.1 with fluid.dygraph.guard(): + emb = fluid.dygraph.Embedding([10, 10]) sgd_optimizer = fluid.optimizer.SGD( learning_rate=fluid.dygraph.NaturalExpDecay( - learning_rate=base_lr, - decay_steps=10000, - decay_rate=0.5, - staircase=True)) + learning_rate=base_lr, + decay_steps=10000, + decay_rate=0.5, + staircase=True), + parameter_list=emb.parameters()) diff --git a/doc/fluid/api_cn/dygraph_cn/NoamDecay_cn.rst b/doc/fluid/api_cn/dygraph_cn/NoamDecay_cn.rst index 1b58aefb9ce6788bdeca195cc3479647a56da846..390e8b3e3f5a7ea1dac71fbd75468d26b042b4c6 100644 --- a/doc/fluid/api_cn/dygraph_cn/NoamDecay_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/NoamDecay_cn.rst @@ -3,9 +3,12 @@ NoamDecay ------------------------------- -**注意:该API仅支持【动态图】模式** -.. py:class:: paddle.fluid.dygraph.NoamDecay(d_model, warmup_steps, begin=1, step=1, dtype='float32') +.. py:class:: paddle.fluid.dygraph.NoamDecay(d_model, warmup_steps, begin=1, step=1, dtype='float32', learning_rate=1.0) + +:api_attr: 命令式编程模式(动态图) + + 该接口提供Noam衰减学习率的功能。 @@ -13,7 +16,7 @@ Noam衰减的计算方式如下。 .. math:: - decayed\_learning\_rate = d_{model}^{-0.5} * min(global\_steps^{-0.5}, global\_steps * warmup\_steps^{-1.5}) + decayed\_learning\_rate = learning\_rate * d_{model}^{-0.5} * min(global\_steps^{-0.5}, global\_steps * warmup\_steps^{-1.5}) 关于Noam衰减的更多细节请参考 `attention is all you need `_ @@ -28,6 +31,7 @@ Noam衰减的计算方式如下。 - **begin** (int,可选) – 起始步。即以上运算式子中global_steps的初始值。默认值为0。 - **step** (int,可选) – 步大小。即以上运算式子中global_steps的递增值。默认值为1。 - **dtype** (str,可选) – 学习率值的数据类型,可以为"float32", "float64"。默认值为"float32"。 + - **learning_rate** (Variable|float|int,可选) - 初始学习率。如果类型为Variable,则为shape为[1]的Tensor,数据类型为float32或float64;也可以是python的int类型。默认值为1.0。 返回: 无 @@ -39,7 +43,9 @@ Noam衰减的计算方式如下。 warmup_steps = 100 learning_rate = 0.01 with fluid.dygraph.guard(): + emb = fluid.dygraph.Embedding([10, 10]) optimizer = fluid.optimizer.SGD( learning_rate = fluid.dygraph.NoamDecay( 1/(warmup_steps *(learning_rate ** 2)), - warmup_steps) ) + warmup_steps), + parameter_list = emb.parameters()) diff --git a/doc/fluid/api_cn/dygraph_cn/PRelu_cn.rst b/doc/fluid/api_cn/dygraph_cn/PRelu_cn.rst index 57ac3449cd5fd4bde7b1f4638759d6c93c3557d9..8252bee477151d00696254cbe2ae4ea8dda01261 100644 --- a/doc/fluid/api_cn/dygraph_cn/PRelu_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/PRelu_cn.rst @@ -5,6 +5,9 @@ PRelu .. py:class:: paddle.fluid.dygraph.PRelu(mode, input_shape=None, param_attr=None, dtype="float32") + + + 该接口用于构建 ``PRelu`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其中实现了 ``PRelu`` 激活函数的三种激活方式。 计算公式如下: diff --git a/doc/fluid/api_cn/dygraph_cn/ParallelEnv_cn.rst b/doc/fluid/api_cn/dygraph_cn/ParallelEnv_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..846001c6e98239282f6a971d82a174d3e32068c5 --- /dev/null +++ b/doc/fluid/api_cn/dygraph_cn/ParallelEnv_cn.rst @@ -0,0 +1,145 @@ +.. _cn_api_fluid_dygraph_ParallelEnv: + +ParallelEnv +------------------------------- + +.. py:class:: paddle.fluid.dygraph.ParallelEnv() + +**注意:** + **这个类的曾用名为 Env, 这个旧的名字会被废弃,请使用新的类名 ParallelEnv。** + +这个类用于获取动态图模型并行执行所需的环境变量值。 + +动态图并行模式现在需要使用 `paddle.distributed.launch` 模块启动,所需的环境变量默认由 `paddle.distributed.launch` 模块自动配置。 + +ParallelEnv通常需要和 `fluid.dygraph.DataParallel` 一起使用,用于配置动态图并行执行。 + +**示例代码:** + .. code-block:: python + + # 这个示例需要由paddle.distributed.launch启动, 用法为: + # python -m paddle.distributed.launch --selected_gpus=0,1 example.py + # 脚本example.py中的代码是下面这个示例. + + import numpy as np + import paddle.fluid as fluid + import paddle.fluid.dygraph as dygraph + from paddle.fluid.optimizer import AdamOptimizer + from paddle.fluid.dygraph.nn import Linear + from paddle.fluid.dygraph.base import to_variable + + place = fluid.CUDAPlace(fluid.dygraph.ParallelEnv().dev_id) + with fluid.dygraph.guard(place=place): + + # 准备数据并行的环境 + strategy=dygraph.prepare_context() + + linear = Linear(1, 10, act="softmax") + adam = fluid.optimizer.AdamOptimizer() + + # 配置模型为并行模型 + linear = dygraph.DataParallel(linear, strategy) + + x_data = np.random.random(size=[10, 1]).astype(np.float32) + data = to_variable(x_data) + + hidden = linear(data) + avg_loss = fluid.layers.mean(hidden) + + # 根据参与训练GPU卡的数量对loss值进行缩放 + avg_loss = linear.scale_loss(avg_loss) + + avg_loss.backward() + + # 收集各个GPU卡上的梯度值 + linear.apply_collective_grads() + + adam.minimize(avg_loss) + linear.clear_gradients() + +属性 +:::::::::::: + +.. py:attribute:: nranks + +参与训练进程的数量,一般也是训练所使用GPU卡的数量。 + +此属性的值等于环境变量 `PADDLE_TRAINERS_NUM` 的值。默认值为1。 + +**示例代码** + .. code-block:: python + + # 在Linux环境,提前执行此命令: export PADDLE_TRAINERS_NUM=4 + import paddle.fluid as fluid + + env = fluid.dygraph.ParallelEnv() + print("The nranks is %d" % env.nranks) + # The nranks is 4 + + +.. py:attribute:: local_rank + +当前训练进程的编号。 + +此属性的值等于环境变量 `PADDLE_TRAINER_ID` 的值。默认值是0。 + +**示例代码** + .. code-block:: python + + # 在Linux环境,提前执行此命令: export PADDLE_TRAINER_ID=0 + import paddle.fluid as fluid + + env = fluid.dygraph.ParallelEnv() + print("The local rank is %d" % env.local_rank) + # The local rank is 0 + + +.. py:attribute:: dev_id + +当前用于并行训练的GPU的编号。 + +此属性的值等于环境变量 `FLAGS_selected_gpus` 的值。默认值是0。 + +**示例代码** + .. code-block:: python + + # 在Linux环境,提前执行此命令: export FLAGS_selected_gpus=1 + import paddle.fluid as fluid + + env = fluid.dygraph.ParallelEnv() + print("The device id are %d" % env.dev_id) + # The device id are 1 + + +.. py:attribute:: current_endpoint + +当前训练进程的终端节点IP与相应端口,形式为(机器节点IP:端口号)。例如:127.0.0.1:6170。 + +此属性的值等于环境变量 `PADDLE_CURRENT_ENDPOINT` 的值。默认值为空字符串""。 + +**示例代码** + .. code-block:: python + + # 在Linux环境,提前执行此命令: export PADDLE_CURRENT_ENDPOINT=127.0.0.1:6170 + import paddle.fluid as fluid + + env = fluid.dygraph.ParallelEnv() + print("The current endpoint are %s" % env.current_endpoint) + # The current endpoint are 127.0.0.1:6170 + + +.. py:attribute:: trainer_endpoints + +当前任务所有参与训练进程的终端节点IP与相应端口,用于在NCCL2初始化的时候建立通信,广播NCCL ID。 + +此属性的值等于环境变量 `PADDLE_TRAINER_ENDPOINTS` 的值。默认值为空字符串""。 + +**示例代码** + .. code-block:: python + + # 在Linux环境,提前执行此命令: export PADDLE_TRAINER_ENDPOINTS=127.0.0.1:6170,127.0.0.1:6171 + import paddle.fluid as fluid + + env = fluid.dygraph.ParallelEnv() + print("The trainer endpoints are %s" % env.trainer_endpoints) + # The trainer endpoints are ['127.0.0.1:6170', '127.0.0.1:6171'] \ No newline at end of file diff --git a/doc/fluid/api_cn/dygraph_cn/ParameterList_cn.rst b/doc/fluid/api_cn/dygraph_cn/ParameterList_cn.rst index 6d3c29ebf836a900e14f2c5625e7ad2263d52252..82ca04fef6bde4a149153642c29b0e449931da9e 100644 --- a/doc/fluid/api_cn/dygraph_cn/ParameterList_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/ParameterList_cn.rst @@ -5,6 +5,9 @@ ParameterList .. py:class:: paddle.fluid.dygraph.ParameterList(parameters=None) + + + 参数列表容器。此容器的行为类似于Python列表,但它包含的参数将被正确地注册和添加。 参数: diff --git a/doc/fluid/api_cn/dygraph_cn/PiecewiseDecay_cn.rst b/doc/fluid/api_cn/dygraph_cn/PiecewiseDecay_cn.rst index 74478aabded2640d36e461cfe7075bc4435dc137..d483d5d34ed9a271f1b61f1ed97b8c09ab2626f2 100644 --- a/doc/fluid/api_cn/dygraph_cn/PiecewiseDecay_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/PiecewiseDecay_cn.rst @@ -3,10 +3,13 @@ PiecewiseDecay ------------------------------- -**注意:该API仅支持【动态图】模式** .. py:class:: paddle.fluid.dygraph.PiecewiseDecay(boundaries, values, begin, step=1, dtype='float32') +:api_attr: 命令式编程模式(动态图) + + + 该接口提供对初始学习率进行分段(piecewise)常数衰减的功能。 分段常数衰减的过程举例描述如下。 @@ -35,8 +38,10 @@ PiecewiseDecay boundaries = [10000, 20000] values = [1.0, 0.5, 0.1] with fluid.dygraph.guard(): + emb = fluid.dygraph.Embedding( [10, 10] ) optimizer = fluid.optimizer.SGD( - learning_rate=fluid.dygraph.PiecewiseDecay(boundaries, values, 0) ) + learning_rate=fluid.dygraph.PiecewiseDecay(boundaries, values, 0), + parameter_list = emb.parameters() ) diff --git a/doc/fluid/api_cn/dygraph_cn/PolynomialDecay_cn.rst b/doc/fluid/api_cn/dygraph_cn/PolynomialDecay_cn.rst index 0a2a989688719208d6d783918fb8e441448185b8..df03f3f10ef90733cf7fc2027efc2028018da1b0 100644 --- a/doc/fluid/api_cn/dygraph_cn/PolynomialDecay_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/PolynomialDecay_cn.rst @@ -3,10 +3,13 @@ PolynomialDecay ------------------------------- -**注意:该API仅支持【动态图】模式** .. py:class:: paddle.fluid.dygraph.PolynomialDecay(learning_rate, decay_steps, end_learning_rate=0.0001, power=1.0, cycle=False, begin=0, step=1, dtype='float32') +:api_attr: 命令式编程模式(动态图) + + + 该接口提供学习率按多项式衰减的功能。通过多项式衰减函数,使得学习率值逐步从初始的 ``learning_rate``,衰减到 ``end_learning_rate`` 。 计算方式如下。 @@ -52,10 +55,8 @@ PolynomialDecay total_step = 5000 end_lr = 0 with fluid.dygraph.guard(): + emb = fluid.dygraph.Embedding( [10, 10]) optimizer = fluid.optimizer.SGD( learning_rate = fluid.dygraph.PolynomialDecay( - start_lr, total_step, end_lr, power=1.0) ) - - - - + start_lr, total_step, end_lr, power=1.0), + parameter_list = emb.parameters()) diff --git a/doc/fluid/api_cn/dygraph_cn/Pool2D_cn.rst b/doc/fluid/api_cn/dygraph_cn/Pool2D_cn.rst index 0869d810c1906d479dc43bd8231e810bc6a810fc..e66ec6b3237edbe73446be147aef39efe3cb66a8 100644 --- a/doc/fluid/api_cn/dygraph_cn/Pool2D_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/Pool2D_cn.rst @@ -3,11 +3,17 @@ Pool2D ------------------------------- -.. py:class:: paddle.fluid.dygraph.Pool2D(pool_size=-1, pool_type='max', pool_stride=1, pool_padding=0, global_pooling=False, use_cudnn=True, ceil_mode=False, exclusive=True) +.. py:class:: paddle.fluid.dygraph.Pool2D(pool_size=-1, pool_type='max', pool_stride=1, pool_padding=0, global_pooling=False, use_cudnn=True, ceil_mode=False, exclusive=True, data_format="NCHW") + +:alias_main: paddle.nn.Pool2D +:alias: paddle.nn.Pool2D,paddle.nn.layer.Pool2D,paddle.nn.layer.common.Pool2D +:old_api: paddle.fluid.dygraph.Pool2D + + 该接口用于构建 ``Pool2D`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其将在神经网络中构建一个二维池化层,并使用上述输入参数的池化配置,为二维空间池化操作,根据 ``input`` , 池化类型 ``pool_type`` , 池化核大小 ``pool_size`` , 步长 ``pool_stride`` ,填充 ``pool_padding`` 这些参数得到输出。 -输入X和输出Out是NCHW格式,N为批大小,C是通道数,H是特征高度,W是特征宽度。参数( ``ksize``, ``strides``, ``paddings`` )含有两个整型元素。分别表示高度和宽度上的参数。输入X的大小和输出Out的大小可能不一致。 +输入X和输出Out默认是NCHW格式,N为批大小,C是通道数,H是特征高度,W是特征宽度。参数( ``ksize``, ``strides``, ``paddings`` )含有两个整型元素。分别表示高度和宽度上的参数。输入X的大小和输出Out的大小可能不一致。 例如: @@ -60,13 +66,15 @@ Pool2D - **use_cudnn** (bool, 可选)- 是否用cudnn核,只有已安装cudnn库时才有效。默认True。 - **ceil_mode** (bool, 可选)- 是否用ceil函数计算输出高度和宽度。如果设为False,则使用floor函数。默认为False。 - **exclusive** (bool, 可选) - 是否在平均池化模式忽略填充值。默认为True。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 返回:无 抛出异常: - - ``ValueError`` - 如果 ``pool_type`` 既不是“max”也不是“avg” - - ``ValueError`` - 如果 ``global_pooling`` 为False并且‘pool_size’为-1 - - ``ValueError`` - 如果 ``use_cudnn`` 不是bool值 + - ``ValueError`` - 如果 ``pool_type`` 既不是“max”也不是“avg”。 + - ``ValueError`` - 如果 ``global_pooling`` 为False并且 ``pool_size`` 为-1。 + - ``ValueError`` - 如果 ``use_cudnn`` 不是bool值。 + - ``ValueError`` - 如果 ``data_format`` 既不是"NCHW"也不是"NHWC"。 **代码示例** @@ -77,11 +85,11 @@ Pool2D import numpy as np with fluid.dygraph.guard(): - data = np.random.random((3, 32, 32, 5)).astype('float32') - pool2d = fluid.dygraph.Pool2D(pool_size=2, + data = np.random.random((3, 32, 32, 5)).astype('float32') + pool2d = fluid.dygraph.Pool2D(pool_size=2, pool_type='max', pool_stride=1, global_pooling=False) - pool2d_res = pool2d(to_variable(data)) + pool2d_res = pool2d(to_variable(data)) diff --git a/doc/fluid/api_cn/dygraph_cn/ProgramTranslator_cn.rst b/doc/fluid/api_cn/dygraph_cn/ProgramTranslator_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..75cd816fbdeb794fbfb8efd8ff471c16d6812875 --- /dev/null +++ b/doc/fluid/api_cn/dygraph_cn/ProgramTranslator_cn.rst @@ -0,0 +1,263 @@ +.. _cn_api_fluid_dygraph_ProgramTranslator + +ProgramTranslator +------------------------------- + +.. py:class:: paddle.fluid.dygraph.dygraph_to_static.ProgramTranslator() + +将动态图函数转为静态图函数的类。该类是个单例(singleton)。 + +参数: + 无。 + +返回:ProgramTranslator 单例对象。 + +返回类型:ProgramTranslator。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + + # 以下两种调用方法得到同一个对象,因为ProgramTranslator是个单例 + fluid.dygraph.ProgramTranslator() + fluid.dygraph.ProgramTranslator.get_instance() + +.. py:method:: enable(enable_declarative) + +全局开启或关闭动态图转化为静态图。 + +参数: + - **enable_declarative** (bool) - 设置True或者False来打开或关闭declarative 。 + +返回:None。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + @fluid.dygraph.jit.declarative + def func(x): + x = fluid.dygraph.to_variable(x) + if fluid.layers.mean(x) > 0: + x_v = x - 1 + else: + x_v = x + 1 + return x_v + + prog_trans = fluid.dygraph.ProgramTranslator() + prog_trans.enable(False) + + x = np.ones([1, 2]) + # The declarative is disabled so the func is run in dygraph + with fluid.dygraph.guard(): + print(func(x).numpy()) # [[2. 2.]] + +.. py:method:: get_output(dygraph_func, *args, **kwargs) + +返回动态图函数输出的VarBase,但是该动态图函数的数值计算过程会被转化为静态图模式运行。 + +参数: + - **dygraph_func** (callable) - 动态图函数。 + - **args, kwargs** - 动态图函数的输入。 + +返回:包含数值结果的VarBase或者VarBase的元组,是输入动态图函数的返回值。 + +返回类型:VarBase或者VarBase的元组。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def func(x): + x = fluid.dygraph.to_variable(x) + if fluid.layers.mean(x) > 0: + x_v = x - 1 + else: + x_v = x + 1 + return x_v + + prog_trans = fluid.dygraph.ProgramTranslator() + + with fluid.dygraph.guard(): + x = np.ones([1, 2]) + x_v = prog_trans.get_output(func, x) + print(x_v.numpy()) # [[0. 0.]] + +.. py:method:: get_func(dygraph_func) + +返回一个可调用函数,该函数将输入动态图函数接口转化为静态图组网接口。组网接口不像动态图接口,其并不直接返回数据结果。用户需要自行处理对应的Program和Eexecutor。 + +参数: + - **dygraph_func** (callable) - 动态图函数。 + +返回:将动态图接口转为静态图组网接口的可调用函数。 + +返回类型:可调用函数。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def func(x): + x = fluid.dygraph.to_variable(x) + if fluid.layers.mean(x) > 0: + x_v = x - 1 + else: + x_v = x + 1 + return x_v + + prog_trans = fluid.dygraph.ProgramTranslator() + + static_func = prog_trans.get_func(func) + print(callable(static_func)) # True + +.. py:method:: get_program(dygraph_func, *args, **kwargs) + +返回动态图函数转化后的静态图Program和输入输出Varaible。用户可以使用Executor来执行该Program。 + +参数: + - **dygraph_func** (callable) - 动态图函数。 + - **args, kwargs** - 动态图函数的输入。 + +返回:元组(main_program, startup_program, inputs, outputs) + main_program: 转化后的main program。 + startup_program: 转化后的startup program。 + inputs: 输入Variable的列表,这些Variable可以在执行去feed。 + outputs: 输出Variable的列表,这些Variable可以在运行时被fetch。 + +返回类型:类型为(Program, Program, list(Variable), list(Variable)) 的元组。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def func(x): + x = fluid.dygraph.to_variable(x) + if fluid.layers.mean(x) > 0: + x_v = x - 1 + else: + x_v = x + 1 + return x_v + + prog_trans = fluid.dygraph.ProgramTranslator() + + x = np.ones([1, 2]) + main_prog, start_prog, inputs, outputs = prog_trans.get_program(func, x) + print([i.name for i in inputs]) + # ['feed_0'] 需要被feed的输入Variable名字,对应x + print([o.name for o in outputs]) + # ['_generated_var_4'] 需要被fetch的输出Variable名字,对应x_v + +.. py:method:: get_code(dygraph_func) + +返回动态图函数转化后的静态图代码字符串。 + +参数: + - **dygraph_func** (callable) - 动态图函数。 + +返回:转化后的静态图代码字符串。 + +返回类型:str。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def func(x): + x = fluid.dygraph.to_variable(x) + if fluid.layers.mean(x) > 0: + x_v = x - 1 + else: + x_v = x + 1 + return x_v + + prog_trans = fluid.dygraph.ProgramTranslator() + + code = prog_trans.get_code(func) + print(type(code)) # + +.. py:method:: save_inference_model(dirname, feed=None, fetch=None) + +将现有模型保存为预测模型。保存过程会裁剪main program,只保存和预测输入输出有关的部分,构建成新的Program,并将此Program和相关参数保存到指定dirname路径下,被保存的模型可以被 :ref:`cn_api_fluid_io_load_inference_model` 或者C++预测接口使用。 + +参数: + - **dirname** (str) - 存储预测模型的目录。 + - **feed (list[int], 可选)** - 预测模型要保存的输入Variable的序号。如果为None,则动态图函数的所有输入变量将被保存。默认值为None。 + - **fetch (list[int], 可选)** - 预测模型要保存的输出Variable的序号。如果为None,则动态图函数的所有输出变量将被保存。默认值为None。 + +返回:None。 + +**示例代码** + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + from paddle.fluid.dygraph import Linear + from paddle.fluid.dygraph import declarative + from paddle.fluid.dygraph import ProgramTranslator + + class SimpleNet(fluid.dygraph.Layer): + def __init__(self, in_size, out_size): + super(SimpleNet, self).__init__() + self._linear = Linear(in_size, out_size) + + @declarative + def forward(self, x): + y = self._linear(x) + z = self._linear(y) + loss = fluid.layers.mean(z) + return z, loss + + with fluid.dygraph.guard(fluid.CPUPlace()): + net = SimpleNet(8, 8) + adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) + x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) + for i in range(10): + loss, out = net(x) + loss.backward() + adam.minimize(loss) + net.clear_gradients() + # 保存模型 + # 注意fetch=[0]意味着我们将序号为0的动态图return输出'z'作为预测输出 + prog_trans = ProgramTranslator() + prog_trans.save_inference_model("./dy2stat_infer_model", fetch=[0]) + + # 在这个例子中,预测模型会根据输出'z'进行裁剪。被裁剪后的Program 会被保 + # 存在"./dy2stat_infer_model" 文件夹,并且参数也会保存为同一个文件夹下 + # 不同文件。 + +.. py:method:: get_program_cache() + +返回ProgramCache单例。这个方法是PaddlePaddle开发者用来管理ProgramTranslator中的Program缓存,普通用户不需要使用这个方法。 + +返回:ProgramTranslator中的ProgramCache。 + +返回类型:ProgramCache。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + + prog_trans = fluid.dygraph.ProgramTranslator() + prog_cache = prog_trans.get_program_cache() + diff --git a/doc/fluid/api_cn/dygraph_cn/ReduceLROnPlateau_cn.rst b/doc/fluid/api_cn/dygraph_cn/ReduceLROnPlateau_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b0a0b75f7b31244421f02cab719a342461a9f7c1 --- /dev/null +++ b/doc/fluid/api_cn/dygraph_cn/ReduceLROnPlateau_cn.rst @@ -0,0 +1,89 @@ +.. _cn_api_fluid_dygraph_ReduceLROnPlateau: + +ReduceLROnPlateau +------------------------------- + +**注意:该API仅支持【动态图】模式** + +.. py:class:: paddle.fluid.dygraph.ReduceLROnPlateau(learning_rate, mode='min', decay_rate=0.1, patience=10, verbose=False, threshold=1e-4, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-8, dtype='float32') + +该API为 ``loss`` 自适应的学习率衰减策略。默认情况下,当 ``loss`` 停止下降时,降低学习率(如果将 ``mode`` 设置为 `'max'` ,此时判断逻辑相反, ``loss`` 停止上升时降低学习率)。其思想是:一旦模型表现不再提升,将学习率降低2-10倍对模型的训练往往有益。 + +``loss`` 是传入到该类方法 ``step`` 中的参数,其必须是shape为[1]的1-D Tensor。 如果 ``loss`` 停止下降(``mode`` 为 `min` 时)超过 ``patience`` 个epoch,学习率将会减小为 +`learning_rate * decay_rate` 。 + +此外,每降低一次学习率后,将会进入一个时长为 ``cooldown`` 个epoch的冷静期,在冷静期内,将不会监控 ``loss`` 的变化情况,也不会衰减。 +在冷静期之后,会继续监控 ``loss`` 的上升或下降。 + +参数: + - **learning_rate** (Variable|float|int) - 初始学习率。其类型可以是Python的float类型,如果输入int类型则会被转为float类型。其也可以是shape为[1]的 + 1-D Tensor,且相应数据类型必须为 "float32" 或 "float64" 。 + - **mode** (str,可选) - `'min'` 和 `'max'` 之一。通常情况下,为 `'min'` ,此时当 ``loss`` 停止下降时学习率将减小。默认:`'min'` 。 + (注意:仅在特殊用法时,可以将其设置为 `'max'` ,此时判断逻辑相反, ``loss`` 停止上升学习率才减小) + - **decay_rate** (float,可选) - 学习率衰减的比例。`new_lr = origin_lr * decay_rate` ,它是值小于1.0的float型数字,默认: 0.1。 + - **patience** (int,可选) - 当 ``loss`` 连续 ``patience`` 个epoch没有下降(mode: 'min')或上升(mode: 'max')时,学习率才会减小。默认:10。 + - **verbose** (bool,可选) - 如果为 ``True`` , 会在每次更新optimizer中的learning_rate时,打印信息。默认:``False`` 。 + - **threshold** (float,可选) - ``threshold`` 和 ``threshold_mode`` 两个参数将会决定 ``loss`` 最小变化的阈值。小于该阈值的变化 + 将会被忽视。默认:1e-4。 + - **threshold_mode** (str,可选) - `'rel'` 和 `'abs'` 之一。在 `'rel'` 模式下, ``loss`` 最小变化的阈值是 `last_loss * threshold` , + 其中 ``last_loss`` 是 ``loss`` 在上个epoch的值。在 `'abs'` 模式下,``loss`` 最小变化的阈值是 `threshold` 。 默认:`'rel'`。 + - **cooldown** (int,可选) - 在学习速率每次减小之后,会进入时长为 ``cooldown`` 个epoch的冷静期。默认:0。 + - **min_lr** (float,可选) - 最小的学习率。减小后的学习率最低下界限。默认:0。 + - **eps** (float,可选) - 如果新旧学习率间的差异小于 ``eps`` ,则不会更新。默认值:1e-8。 + - **dtype** (str,可选) – 学习率值的数据类型,可以为"float32", "float64"。默认:"float32"。 + +返回: ``loss`` 自适应的学习率 + +返回类型:Variable + +**代码示例**: + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + with fluid.dygraph.guard(): + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = fluid.dygraph.Linear(10, 10) + input = fluid.dygraph.to_variable(x) + + adam = fluid.optimizer.Adam( + learning_rate = fluid.dygraph.ReduceLROnPlateau( + learning_rate = 1.0, + decay_rate = 0.5, + patience = 5, + verbose = True, + cooldown = 3), + parameter_list = linear.parameters()) + + for epoch in range(10): + total_loss = 0 + for bath_id in range(5): + out = linear(input) + loss = fluid.layers.reduce_mean(out) + total_loss += loss + adam.minimize(loss) + + avg_loss = total_loss/5 + + # 根据传入total_loss,调整学习率 + reduce_lr.step(avg_loss) + lr = adam.current_step_lr() + print("current avg_loss is %s, current lr is %s" % (avg_loss.numpy()[0], lr)) + + + +.. py:method:: step(loss) +需要在每个epoch调用该方法,其根据传入的 ``loss`` 调整optimizer中的学习率,调整后的学习率将会在下一次调用 ``optimizer.minimize`` 时生效。 + +参数: + - **loss** (Variable) - 类型:Variable,shape为[1]的1-D Tensor。将被用来判断是否需要降低学习率。如果 ``loss`` 连续 ``patience`` 个epochs没有下降, + 将会降低学习率。 + +返回: + 无 + +**代码示例**: + + 参照其类中的说明。 diff --git a/doc/fluid/api_cn/dygraph_cn/Sequential_cn.rst b/doc/fluid/api_cn/dygraph_cn/Sequential_cn.rst index ed66dcfb5bc6a3957291f63cab26161a85161471..b39b4a556aae0612cff2ea4d33a7fbc0af10107a 100644 --- a/doc/fluid/api_cn/dygraph_cn/Sequential_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/Sequential_cn.rst @@ -5,6 +5,9 @@ Sequential .. py:class:: paddle.fluid.dygraph.Sequential(*layers) + + + 顺序容器。子Layer将按构造函数参数的顺序添加到此容器中。传递给构造函数的参数可以Layers或可迭代的name Layer元组。 参数: diff --git a/doc/fluid/api_cn/dygraph_cn/SpectralNorm_cn.rst b/doc/fluid/api_cn/dygraph_cn/SpectralNorm_cn.rst index 471ccd5536184d3b94757a9a03b9c1576572ed1c..d1677c60a870a6214860b5bb49418b35805d5bd9 100644 --- a/doc/fluid/api_cn/dygraph_cn/SpectralNorm_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/SpectralNorm_cn.rst @@ -5,6 +5,12 @@ SpectralNorm .. py:class:: paddle.fluid.dygraph.SpectralNorm(weight_shape, dim=0, power_iters=1, eps=1e-12, name=None, dtype="float32") +:alias_main: paddle.nn.SpectralNorm +:alias: paddle.nn.SpectralNorm,paddle.nn.layer.SpectralNorm,paddle.nn.layer.norm.SpectralNorm +:old_api: paddle.fluid.dygraph.SpectralNorm + + + 该接口用于构建 ``SpectralNorm`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其中实现了谱归一化层的功能,用于计算fc、conv1d、conv2d、conv3d层的权重参数的谱正则值,输入权重参数应分别为2-D, 3-D, 4-D, 5-D张量,输出张量与输入张量维度相同。谱特征值计算方式如下: 步骤1:生成形状为[H]的向量U,以及形状为[W]的向量V,其中H是输入权重张量的第 ``dim`` 个维度,W是剩余维度的乘积。 diff --git a/doc/fluid/api_cn/dygraph_cn/StepDecay_cn.rst b/doc/fluid/api_cn/dygraph_cn/StepDecay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0016cf85752bff268a481a389f37f69e964414b6 --- /dev/null +++ b/doc/fluid/api_cn/dygraph_cn/StepDecay_cn.rst @@ -0,0 +1,73 @@ +.. _cn_api_fluid_dygraph_StepDecay: + +StepDecay +------------------------------- + + +.. py:class:: paddle.fluid.dygraph.StepDecay(learning_rate, step_size, decay_rate=0.1) + +:api_attr: 命令式编程模式(动态图) + + +该接口提供 ``step_size`` 衰减学习率的功能,每经过 ``step_size`` 个 ``epoch`` 时会通过 ``decay_rate`` 衰减一次学习率。 + +算法可以描述为: + +.. code-block:: text + + learning_rate = 0.5 + step_size = 30 + decay_rate = 0.1 + learning_rate = 0.5 if epoch < 30 + learning_rate = 0.05 if 30 <= epoch < 60 + learning_rate = 0.005 if 60 <= epoch < 90 + ... + +参数: + - **learning_rate** (float|int) - 初始化的学习率。可以是Python的float或int。 + - **step_size** (int) - 学习率每衰减一次的间隔。 + - **decay_rate** (float, optional) - 学习率的衰减率。 ``new_lr = origin_lr * decay_rate`` 。其值应该小于1.0。默认:0.1。 + +返回: 无 + +**代码示例**: + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + with fluid.dygraph.guard(): + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = fluid.dygraph.Linear(10, 10) + input = fluid.dygraph.to_variable(x) + scheduler = fluid.dygraph.StepDecay(0.5, step_size=3) + adam = fluid.optimizer.Adam(learning_rate = scheduler, parameter_list = linear.parameters()) + for epoch in range(9): + for batch_id in range(5): + out = linear(input) + loss = fluid.layers.reduce_mean(out) + adam.minimize(loss) + scheduler.epoch() + print("epoch:{}, current lr is {}" .format(epoch, adam.current_step_lr())) + # epoch:0, current lr is 0.5 + # epoch:1, current lr is 0.5 + # epoch:2, current lr is 0.5 + # epoch:3, current lr is 0.05 + # epoch:4, current lr is 0.05 + # epoch:5, current lr is 0.05 + # epoch:6, current lr is 0.005 + # epoch:7, current lr is 0.005 + # epoch:8, current lr is 0.005 + +.. py:method:: epoch(epoch=None) +通过当前的 epoch 调整学习率,调整后的学习率将会在下一次调用 ``optimizer.minimize`` 时生效。 + +参数: + - **epoch** (int|float,可选) - 类型:int或float。指定当前的epoch数。默认:无,此时将会自动累计epoch数。 + +返回: + 无 + +**代码示例**: + + 参照上述示例代码。 diff --git a/doc/fluid/api_cn/dygraph_cn/TracedLayer_cn.rst b/doc/fluid/api_cn/dygraph_cn/TracedLayer_cn.rst index cb014477f5bbc3fac89e774a391214d45ae3434a..342cc84f4c1a0992e5b1628956c9a4534e368770 100644 --- a/doc/fluid/api_cn/dygraph_cn/TracedLayer_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/TracedLayer_cn.rst @@ -3,10 +3,13 @@ TracedLayer ------------------------------- -**注意:该API仅支持【动态图】模式** .. py:class:: paddle.fluid.dygraph.TracedLayer(program, parameters, feed_names, fetch_names) +:api_attr: 命令式编程模式(动态图) + + + TracedLayer用于将前向动态图模型转换为静态图模型,主要用于将动态图保存后做在线C++预测。除此以外,用户也可使用转换后的静态图模型在Python端做预测,通常比原先的动态图性能更好。 TracedLayer使用 ``Executor`` 和 ``CompiledProgram`` 运行静态图模型。转换后的静态图模型与原动态图模型共享参数。 diff --git a/doc/fluid/api_cn/dygraph_cn/TranslatedLayer_cn.rst b/doc/fluid/api_cn/dygraph_cn/TranslatedLayer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0924a155a0ffb8861dd0f233cf0b0eff3dd8b169 --- /dev/null +++ b/doc/fluid/api_cn/dygraph_cn/TranslatedLayer_cn.rst @@ -0,0 +1,84 @@ +.. _cn_api_fluid_dygraph_TranslatedLayer: + +TranslatedLayer +------------------------------- + +.. py:class:: paddle.fluid.dygraph.TranslatedLayer(programs, persistable_vars) + +``TranslatedLayer`` 是一个命令式编程模式 :ref:`cn_api_fluid_dygraph_Layer` 的继承类, +通过 :ref:`cn_api_fluid_dygraph_jit_load` 载入构建。能够像一般 ``Layer`` 一样在train或者eval模式下使用。 + +.. note:: + ``TranslatedLayer`` 对象不能够通过构造函数创建,仅能够通过 :ref:`cn_api_fluid_dygraph_jit_load` 接口载入构建。 + +**示例代码:** + .. code-block:: python + + import numpy as np + import paddle.fluid as fluid + from paddle.fluid.dygraph import Linear + from paddle.fluid.dygraph import declarative + BATCH_SIZE = 32 + BATCH_NUM = 20 + def random_batch_reader(): + def _get_random_images_and_labels(image_shape, label_shape): + image = np.random.random(size=image_shape).astype('float32') + label = np.random.random(size=label_shape).astype('int64') + return image, label + def __reader__(): + for _ in range(BATCH_NUM): + batch_image, batch_label = _get_random_images_and_labels( + [BATCH_SIZE, 784], [BATCH_SIZE, 1]) + yield batch_image, batch_label + return __reader__ + class LinearNet(fluid.dygraph.Layer): + def __init__(self, in_size, out_size): + super(LinearNet, self).__init__() + self._linear = Linear(in_size, out_size) + @declarative + def forward(self, x): + return self._linear(x) + # 开启命令式编程模式 + fluid.enable_dygraph() + # 1. 训练存储模型. + # 创建网络 + net = LinearNet(784, 1) + adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) + # 创建DataLoader + train_loader = fluid.io.DataLoader.from_generator(capacity=5) + train_loader.set_batch_generator(random_batch_reader()) + # 训练 + for data in train_loader(): + img, label = data + label.stop_gradient = True + cost = net(img) + loss = fluid.layers.cross_entropy(cost, label) + avg_loss = fluid.layers.mean(loss) + avg_loss.backward() + adam.minimize(avg_loss) + net.clear_gradients() + model_path = "linear.example.model" + fluid.dygraph.jit.save( + layer=net, + model_path=model_path, + input_spec=[img]) + # 2. 载入模型构建TranslatedLayer + translated_layer = fluid.dygraph.jit.load(model_path) + # 预测 + translated_layer.eval() + x = fluid.dygraph.to_variable(np.random.random((1, 784)).astype('float32')) + pred = translated_layer(x) + # fine-tune训练 + translated_layer.train() + adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=translated_layer.parameters()) + train_loader = fluid.io.DataLoader.from_generator(capacity=5) + train_loader.set_batch_generator(random_batch_reader()) + for data in train_loader(): + img, label = data + label.stop_gradient = True + cost = translated_layer(img) + loss = fluid.layers.cross_entropy(cost, label) + avg_loss = fluid.layers.mean(loss) + avg_loss.backward() + adam.minimize(avg_loss) + translated_layer.clear_gradients() diff --git a/doc/fluid/api_cn/dygraph_cn/TreeConv_cn.rst b/doc/fluid/api_cn/dygraph_cn/TreeConv_cn.rst index 23033ddb57b926eac22edc8d135181fdea9222f5..699a3f71ec5feb18f3da4d86f0c8df7566cb5c82 100644 --- a/doc/fluid/api_cn/dygraph_cn/TreeConv_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/TreeConv_cn.rst @@ -5,6 +5,9 @@ TreeConv .. py:class:: paddle.fluid.dygraph.TreeConv(feature_size, output_size, num_filters=1, max_depth=2, act='tanh', param_attr=None, bias_attr=None, name=None, dtype="float32") + + + 该接口用于构建 ``TreeConv`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其将在神经网络中构建一个基于树结构的卷积(Tree-Based Convolution)运算。基于树的卷积是基于树的卷积神经网络(TBCNN,Tree-Based Convolution Neural Network)的一部分,它用于对树结构进行分类,例如抽象语法树。 Tree-Based Convolution提出了一种称为连续二叉树的数据结构,它将多路(multiway)树视为二叉树。详情请参考: `基于树的卷积论文 `_ 。 diff --git a/doc/fluid/api_cn/dygraph_cn/declarative_cn.rst b/doc/fluid/api_cn/dygraph_cn/declarative_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9920b5b7af2d6913189ac6d0255cea41995e524d --- /dev/null +++ b/doc/fluid/api_cn/dygraph_cn/declarative_cn.rst @@ -0,0 +1,32 @@ +.. _cn_api_fluid_dygraph_declarative: + +declarative +------------------------------- + +.. py:decorator:: paddle.fluid.dygraph.jit.declarative + +本装饰器将函数内的动态图API转化为静态图API。此装饰器自动处理静态图模式下的Program和Executor,并将结果作为动态图Tensor返回。输出的动态图Tensor可以继续进行动态图训练、预测或其他运算。如果被装饰的函数里面调用其他动态图函数,被调用的函数也会被转化为静态图函数。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + from paddle.fluid.dygraph.jit import declarative + + fluid.enable_dygraph() + + @declarative + def func(x): + x = fluid.dygraph.to_variable(x) + if fluid.layers.mean(x) < 0: + x_v = x - 1 + else: + x_v = x + 1 + return x_v + + x = np.ones([1, 2]) + x_v = func(x) + print(x_v.numpy()) # [[2. 2.]] + diff --git a/doc/fluid/api_cn/dygraph_cn/enabled_cn.rst b/doc/fluid/api_cn/dygraph_cn/enabled_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e5716e76456a99ba5724369d4c2aaba7bfa129f8 --- /dev/null +++ b/doc/fluid/api_cn/dygraph_cn/enabled_cn.rst @@ -0,0 +1,25 @@ +.. _cn_api_fluid_dygraph_enabled: + +enabled +------------------------------- + +.. py:method:: paddle.fluid.dygraph.enabled() + +这个函数用于检查程序是否运行在动态图模式。你可以使用 :ref:`cn_api_fluid_dygraph_guard` api进入动态图模式。或者使用 :ref:`cn_api_fluid_enable_dygraph` 和 :ref:`cn_api_fluid_disable_dygraph` api打开、关闭动态图模式。 + +注意: `fluid.dygraph.enabled` 实际上调用了 :ref:`cn_api_fluid_in_dygraph_mode` api,所以推荐使用 :ref:`cn_api_fluid_in_dygraph_mode` api。 + +返回: 程序是否运行在动态图模式。 + +返回类型: bool + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + + fluid.enable_dygraph() # Now we are in dygragh mode + print(fluid.dygraph.enabled()) # True + fluid.disable_dygraph() + print(fluid.dygraph.enabled()) # False diff --git a/doc/fluid/api_cn/dygraph_cn/grad_cn.rst b/doc/fluid/api_cn/dygraph_cn/grad_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1b164d104b6a6432b11b641431165186700b9381 --- /dev/null +++ b/doc/fluid/api_cn/dygraph_cn/grad_cn.rst @@ -0,0 +1,102 @@ +.. _cn_api_paddle_grad: + +grad +------------------------------- + +**注意:该API仅支持【动态图】模式** + +.. py:method:: paddle.grad(outputs, inputs, grad_outputs=None, retain_graph=None, create_graph=False, only_inputs=True, allow_unused=False, no_grad_vars=None) + +对于每个 `inputs` ,计算所有 `outputs` 相对于其的梯度和。 + +参数: + - **outputs** (Tensor|list(Tensor)|tuple(Tensor)) – 用于计算梯度的图的输出变量,或多个输出变量构成的list/tuple。 + - **inputs** (Tensor|list(Tensor)|tuple(Tensor)) - 用于计算梯度的图的输入变量,或多个输入变量构成的list/tuple。该API的每个返回值对应每个 `inputs` 的梯度。 + - **grad_outputs** (Tensor|list(Tensor|None)|tuple(Tensor|None), 可选) - `outputs` 变量梯度的初始值。若 `grad_outputs` 为None,则 `outputs` 梯度的初始值均为全1的Tensor。若 `grad_outputs` 不为None,它必须与 `outputs` 的长度相等,此时,若 `grad_outputs` 的第i个元素为None,则第i个 `outputs` 的梯度初始值为全1的Tensor;若 `grad_outputs` 的第i个元素为Tensor,则第i个 `outputs` 的梯度初始值为 `grad_outputs` 的第i个元素。默认值为None。 + - **retain_graph** (bool, 可选) - 是否保留计算梯度的前向图。若值为True,则前向图会保留,用户可对同一张图求两次反向。若值为False,则前向图会释放。默认值为None,表示值与 `create_graph` 相等。 + - **create_graph** (bool, 可选) - 是否创建计算过程中的反向图。若值为True,则可支持计算高阶导数。若值为False,则计算过程中的反向图会释放。默认值为False。 + - **only_inputs** (bool, 可选) - 是否只计算 `inputs` 的梯度。若值为False,则图中所有叶节点变量的梯度均会计算,并进行累加。若值为True,则只会计算 `inputs` 的梯度。默认值为True。only_inputs=False功能正在开发中,目前尚不支持。 + - **allow_unused** (bool, 可选) - 决定当某些 `inputs` 变量不在计算图中时抛出错误还是返回None。若某些 `inputs` 变量不在计算图中(即它们的梯度为None),则当allowed_unused=False时会抛出错误,当allow_unused=True时会返回None作为这些变量的梯度。默认值为False。 + - **no_grad_vars** (Tensor|list(Tensor)|tuple(Tensor)|set(Tensor), 可选) - 指明不需要计算梯度的变量。默认值为None。 + +返回: tuple(Tensor),其长度等于 `inputs` 中的变量个数,且第i个返回的变量是所有 `outputs` 相对于第i个 `inputs` 的梯度之和。 + +**示例代码 1** + .. code-block:: python + + import paddle + paddle.disable_static() + + def test_dygraph_grad(create_graph): + x = paddle.ones(shape=[1], dtype='float32') + x.stop_gradient = False + y = x * x + + # Since y = x * x, dx = 2 * x + dx = paddle.grad( + outputs=[y], + inputs=[x], + create_graph=create_graph, + retain_graph=True)[0] + + z = y + dx + + # If create_graph = False, the gradient of dx + # would not be backpropagated. Therefore, + # z = x * x + dx, and x.gradient() = 2 * x = 2.0 + + # If create_graph = True, the gradient of dx + # would be backpropagated. Therefore, + # z = x * x + dx = x * x + 2 * x, and + # x.gradient() = 2 * x + 2 = 4.0 + + z.backward() + return x.gradient() + + print(test_dygraph_grad(create_graph=False)) # [2.] + print(test_dygraph_grad(create_graph=True)) # [4.] + +**示例代码 2** + .. code-block:: python + + import paddle + paddle.disable_static() + + def test_dygraph_grad(grad_outputs=None): + x = paddle.fill_constant(shape=[1], value=2.0, dtype='float32') + x.stop_gradient = False + + y1 = x * x + y2 = x * 3 + + # If grad_outputs=None, dy1 = [1], dy2 = [1]. + # If grad_outputs=[g1, g2], then: + # - dy1 = [1] if g1 is None else g1 + # - dy2 = [1] if g2 is None else g2 + + # Since y1 = x * x, dx = 2 * x * dy1. + # Since y2 = x * 3, dx = 3 * dy2. + # Therefore, the final result would be: + # dx = 2 * x * dy1 + 3 * dy2 = 4 * dy1 + 3 * dy2. + + dx = paddle.grad( + outputs=[y1, y2], + inputs=[x], + grad_outputs=grad_outputs)[0] + + return dx.numpy() + + grad_value = paddle.fill_constant(shape=[1], value=4.0, dtype='float32') + + # dy1 = [1], dy2 = [1] + print(test_dygraph_grad(None)) # [7.] + + # dy1 = [1], dy2 = [4] + print(test_dygraph_grad([None, grad_value])) # [16.] + + # dy1 = [4], dy2 = [1] + print(test_dygraph_grad([grad_value, None])) # [19.] + + # dy1 = [3], dy2 = [4] + grad_y1 = paddle.fill_constant(shape=[1], value=3.0, dtype='float32') + print(test_dygraph_grad([grad_y1, grad_value])) # [24.] \ No newline at end of file diff --git a/doc/fluid/api_cn/dygraph_cn/guard_cn.rst b/doc/fluid/api_cn/dygraph_cn/guard_cn.rst index b8dd4156daa5806b5be2678f884b16f3c599dd8f..651e8b6b5998545b5b8cf6553bc39c5b59495c25 100644 --- a/doc/fluid/api_cn/dygraph_cn/guard_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/guard_cn.rst @@ -3,10 +3,13 @@ guard ------------------------------- -**注意:该API仅支持【动态图】模式** .. py:function:: paddle.fluid.dygraph.guard(place=None) +:api_attr: 命令式编程模式(动态图) + + + 通过with语句创建一个dygraph运行的context,执行context代码。 参数: diff --git a/doc/fluid/api_cn/dygraph_cn/jit_cn.rst b/doc/fluid/api_cn/dygraph_cn/jit_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6899679ce577b0ae25304c638fa9a618b8e060cd --- /dev/null +++ b/doc/fluid/api_cn/dygraph_cn/jit_cn.rst @@ -0,0 +1,12 @@ +=== +jit +=== + +.. toctree:: + :maxdepth: 1 + + jit_cn/save_cn.rst + jit_cn/set_code_level_cn.rst + jit_cn/set_verbosity_cn.rst + jit_cn/load_cn.rst + jit_cn/SaveLoadConfig_cn.rst diff --git a/doc/fluid/api_cn/dygraph_cn/jit_cn/SaveLoadConfig_cn.rst b/doc/fluid/api_cn/dygraph_cn/jit_cn/SaveLoadConfig_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cbee1bab234be6f53f83061c52139093513d321b --- /dev/null +++ b/doc/fluid/api_cn/dygraph_cn/jit_cn/SaveLoadConfig_cn.rst @@ -0,0 +1,273 @@ +.. _cn_api_fluid_dygraph_jit_SaveLoadConfig: + +SaveLoadConfig +------------------------------- + +.. py:class:: paddle.fluid.dygraph.jit.SaveLoadConfig() + +用于配置接口 :ref:`cn_api_fluid_dygraph_jit_save` 和 :ref:`cn_api_fluid_dygraph_jit_load` 存储载入 :ref:`cn_api_fluid_dygraph_TranslatedLayer` 时的附加选项。 + +**示例代码:** + + 1. 在存储模型时使用 ``SaveLoadConfig`` + + .. code-block:: python + + import numpy as np + import paddle.fluid as fluid + from paddle.fluid.dygraph import Linear + from paddle.fluid.dygraph import declarative + class SimpleNet(fluid.dygraph.Layer): + def __init__(self, in_size, out_size): + super(SimpleNet, self).__init__() + self._linear = Linear(in_size, out_size) + @declarative + def forward(self, x): + y = self._linear(x) + z = self._linear(y) + return z + # 开启命令式编程模式 + fluid.enable_dygraph() + # 训练模型 + net = SimpleNet(8, 8) + adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) + x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) + for i in range(10): + out = net(x) + loss = fluid.layers.mean(out) + loss.backward() + adam.minimize(loss) + net.clear_gradients() + # 在存储模型时使用SaveLoadConfig + model_path = "simplenet.example.model" + configs = fluid.dygraph.jit.SaveLoadConfig() + configs.model_filename = "__simplenet__" + fluid.dygraph.jit.save( + layer=net, + model_path=model_path, + input_spec=[x], + configs=configs) + + 2. 在载入模型时使用 ``SaveLoadConfig`` + + .. code-block:: python + + import numpy as np + import paddle.fluid as fluid + # 开启命令式编程模式 + fluid.enable_dygraph() + # 在载入模型时使用SaveLoadconfig + model_path = "simplenet.example.model" + configs = fluid.dygraph.jit.SaveLoadConfig() + configs.model_filename = "__simplenet__" + infer_net = fluid.dygraph.jit.load(model_path, configs=configs) + # 预测 + x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) + pred = infer_net(x) + +属性 +:::::::::::: + +.. py:attribute:: output_spec + +选择保存模型( :ref:`cn_api_fluid_dygraph_TranslatedLayer` )的输出变量,通过指定的这些变量能够使模型仅计算特定的结果。 +默认情况下,原始 :ref:`cn_api_fluid_dygraph_Layer` 的forward方法的所有返回变量都将配置为存储后模型 :ref:`cn_api_fluid_dygraph_TranslatedLayer` 的输出变量。 + +``output_spec`` 属性类型需要是 ``list[Variable]``。如果输入的 ``output_spec`` 列表不是原始 :ref:`cn_api_fluid_dygraph_Layer` 的forward方法的所有返回变量, +将会依据输入的 ``output_spec`` 列表对存储的模型进行裁剪。 + +.. note:: + ``output_spec`` 属性仅在存储模型时使用。 + +**示例代码:** + .. code-block:: python + + import numpy as np + import paddle.fluid as fluid + from paddle.fluid.dygraph import Linear + from paddle.fluid.dygraph import declarative + class SimpleNet(fluid.dygraph.Layer): + def __init__(self, in_size, out_size): + super(SimpleNet, self).__init__() + self._linear = Linear(in_size, out_size) + @declarative + def forward(self, x): + y = self._linear(x) + z = self._linear(y) + loss = fluid.layers.mean(z) + return z, loss + # 开启命令式编程模式 + fluid.enable_dygraph() + # 训练模型 + net = SimpleNet(8, 8) + adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) + x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) + for i in range(10): + out, loss = net(x) + loss.backward() + adam.minimize(loss) + net.clear_gradients() + # 使用SaveLoadconfig.output_spec + model_path = "simplenet.example.model.output_spec" + configs = fluid.dygraph.jit.SaveLoadConfig() + # 仅在存储模型中保留预测结果,丢弃loss + configs.output_spec = [out] + fluid.dygraph.jit.save( + layer=net, + model_path=model_path, + input_spec=[x], + configs=configs) + infer_net = fluid.dygraph.jit.load(model_path, configs=configs) + x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) + # 仅有预测结果输出 + pred = infer_net(x) + + +.. py:attribute:: model_filename + +存储转写 :ref:`cn_api_fluid_dygraph_Layer` 模型结构 ``Program`` 的文件名称。默认文件名为 ``__model__``。 + +**示例代码** + .. code-block:: python + + import numpy as np + import paddle.fluid as fluid + from paddle.fluid.dygraph import Linear + from paddle.fluid.dygraph import declarative + class SimpleNet(fluid.dygraph.Layer): + def __init__(self, in_size, out_size): + super(SimpleNet, self).__init__() + self._linear = Linear(in_size, out_size) + @declarative + def forward(self, x): + y = self._linear(x) + z = self._linear(y) + return z + # 开启命令式编程模式 + fluid.enable_dygraph() + # 训练模型 + net = SimpleNet(8, 8) + adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) + x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) + for i in range(10): + out = net(x) + loss = fluid.layers.mean(out) + loss.backward() + adam.minimize(loss) + net.clear_gradients() + model_path = "simplenet.example.model.model_filename" + configs = fluid.dygraph.jit.SaveLoadConfig() + configs.model_filename = "__simplenet__" + # 配置configs.model_filename存储模型 + fluid.dygraph.jit.save( + layer=net, + model_path=model_path, + input_spec=[x], + configs=configs) + # [结果] 存储模型目录文件包括: + # __simplenet__ __variables__ __variables.info__ + # 配置configs.model_filename载入模型 + infer_net = fluid.dygraph.jit.load(model_path, configs=configs) + x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) + pred = infer_net(x) + + +.. py:attribute:: params_filename + +存储转写 :ref:`cn_api_fluid_dygraph_Layer` 所有持久参数(包括 ``Parameters`` 和持久的 ``Buffers``)的文件名称。默认文件名称为 ``__variable__``。 + +**示例代码** + .. code-block:: python + + import numpy as np + import paddle.fluid as fluid + from paddle.fluid.dygraph import Linear + from paddle.fluid.dygraph import declarative + class SimpleNet(fluid.dygraph.Layer): + def __init__(self, in_size, out_size): + super(SimpleNet, self).__init__() + self._linear = Linear(in_size, out_size) + @declarative + def forward(self, x): + y = self._linear(x) + z = self._linear(y) + return z + # 开启命令式编程模式 + fluid.enable_dygraph() + # 训练模型 + net = SimpleNet(8, 8) + adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) + x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) + for i in range(10): + out = net(x) + loss = fluid.layers.mean(out) + loss.backward() + adam.minimize(loss) + net.clear_gradients() + model_path = "simplenet.example.model.params_filename" + configs = fluid.dygraph.jit.SaveLoadConfig() + configs.params_filename = "__params__" + # 配置configs.params_filename存储模型 + fluid.dygraph.jit.save( + layer=net, + model_path=model_path, + input_spec=[x], + configs=configs) + # [结果] 存储模型目录文件包括: + # __model__ __params__ __variables.info__ + # 配置configs.params_filename载入模型 + infer_net = fluid.dygraph.jit.load(model_path, configs=configs) + x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) + pred = infer_net(x) + + +.. py:attribute:: separate_params + +配置是否将 :ref:`cn_api_fluid_dygraph_Layer` 的参数存储为分散的文件。 +(这是为了兼容接口 :ref:`cn_api_fluid_io_save_inference_model` 的行为) + +如果设置为 ``True`` ,每个参数将会被存储为一个文件,文件名为参数名,同时``SaveLoadConfig.params_filename`` 指定的文件名将不会生效。默认为 ``False``。 + +**示例代码** + .. code-block:: python + + import numpy as np + import paddle.fluid as fluid + from paddle.fluid.dygraph import Linear + from paddle.fluid.dygraph import declarative + class SimpleNet(fluid.dygraph.Layer): + def __init__(self, in_size, out_size): + super(SimpleNet, self).__init__() + self._linear = Linear(in_size, out_size) + @declarative + def forward(self, x): + y = self._linear(x) + z = self._linear(y) + return z + # 开启命令式编程模式 + fluid.enable_dygraph() + # 训练模型 + net = SimpleNet(8, 8) + adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) + x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) + for i in range(10): + out = net(x) + loss = fluid.layers.mean(out) + loss.backward() + adam.minimize(loss) + net.clear_gradients() + model_path = "simplenet.example.model.separate_params" + configs = fluid.dygraph.jit.SaveLoadConfig() + configs.separate_params = True + # 配置configs.separate_params存储模型 + fluid.dygraph.jit.save( + layer=net, + model_path=model_path, + input_spec=[x], + configs=configs) + # [结果] 存储模型目录文件包括: + # linear_0.b_0 linear_0.w_0 __model__ __variables.info__ + # 配置configs.params_filename载入模型 + infer_net = fluid.dygraph.jit.load(model_path, configs=configs) + x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) + pred = infer_net(x) diff --git a/doc/fluid/api_cn/dygraph_cn/jit_cn/load_cn.rst b/doc/fluid/api_cn/dygraph_cn/jit_cn/load_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f001976971c5e84eb93c62fa5a5d77c8f2a9a335 --- /dev/null +++ b/doc/fluid/api_cn/dygraph_cn/jit_cn/load_cn.rst @@ -0,0 +1,168 @@ +.. _cn_api_fluid_dygraph_jit_load: + +load +----------------- + +.. py:function:: paddle.fluid.dygraph.jit.load(model_path, configs=None) + +:api_attr: 命令式编程模式(动态图) + +将接口 :ref:`cn_api_fluid_dygraph_jit_save` 或者 :ref:`cn_api_fluid_io_save_inference_model` 存储的模型载入为 :ref:`cn_api_fluid_dygraph_TranslatedLayer` ,用于预测推理或者fine-tune训练。 + +.. note:: + 由于一些历史原因,如果载入的模型是通过 :ref:`cn_api_fluid_io_save_inference_model` 存储的, + 在使用它进行fine-tune训练时会存在一些局限: + 1. 命令式编程模式不支持 ``LoDTensor`` ,所有原先输入变量或者参数依赖于LoD信息的模型暂时无法使用; + 2. 所有存储模型的feed变量都需要被传入 ``Translatedlayer`` 的forward方法; + 3. 原模型变量的 ``stop_gradient`` 信息已丢失且无法准确恢复; + 4. 原模型参数的 ``trainable`` 信息已丢失且无法准确恢复。 + +参数: + - **model_path** (str) - 存储模型的目录。 + - **configs** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象。默认为 ``None``。 + +返回:TranslatedLayer - 一个能够执行存储模型的 ``Layer`` 对象。 + +**示例代码** + +1. 载入由接口 :ref:`cn_api_fluid_dygraph_jit_save` 存储的模型进行预测推理及fine-tune训练。 + + .. code-block:: python + + import numpy as np + import paddle.fluid as fluid + from paddle.fluid.dygraph import Linear + from paddle.fluid.dygraph import declarative + BATCH_SIZE = 32 + BATCH_NUM = 20 + def random_batch_reader(): + def _get_random_images_and_labels(image_shape, label_shape): + image = np.random.random(size=image_shape).astype('float32') + label = np.random.random(size=label_shape).astype('int64') + return image, label + def __reader__(): + for _ in range(BATCH_NUM): + batch_image, batch_label = _get_random_images_and_labels( + [BATCH_SIZE, 784], [BATCH_SIZE, 1]) + yield batch_image, batch_label + return __reader__ + class LinearNet(fluid.dygraph.Layer): + def __init__(self, in_size, out_size): + super(LinearNet, self).__init__() + self._linear = Linear(in_size, out_size) + @declarative + def forward(self, x): + return self._linear(x) + # 开启命令式编程模式 + fluid.enable_dygraph() + # 1. 训练存储模型. + # 创建网络 + net = LinearNet(784, 1) + adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) + # 创建DataLoader + train_loader = fluid.io.DataLoader.from_generator(capacity=5) + train_loader.set_batch_generator(random_batch_reader()) + # 训练 + for data in train_loader(): + img, label = data + label.stop_gradient = True + cost = net(img) + loss = fluid.layers.cross_entropy(cost, label) + avg_loss = fluid.layers.mean(loss) + avg_loss.backward() + adam.minimize(avg_loss) + net.clear_gradients() + model_path = "linear.example.model" + fluid.dygraph.jit.save( + layer=net, + model_path=model_path, + input_spec=[img]) + # 2. 载入模型 & 预测 + # 载入模型 + infer_net = fluid.dygraph.jit.load(model_path) + # 预测 + x = fluid.dygraph.to_variable(np.random.random((1, 784)).astype('float32')) + pred = infer_net(x) + # 3. 载入模型 & fine-tune训练 + # 载入模型 + train_net = fluid.dygraph.jit.load(model_path) + train_net.train() + adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=train_net.parameters()) + # 创建DataLoader + train_loader = fluid.io.DataLoader.from_generator(capacity=5) + train_loader.set_batch_generator(random_batch_reader()) + # fine-tune训练 + for data in train_loader(): + img, label = data + label.stop_gradient = True + cost = train_net(img) + loss = fluid.layers.cross_entropy(cost, label) + avg_loss = fluid.layers.mean(loss) + avg_loss.backward() + adam.minimize(avg_loss) + train_net.clear_gradients() + + +2. 载入由接口 :ref:`cn_api_fluid_io_save_inference_model` 存储的模型进行预测推理及fine-tune训练。 + + .. code-block:: python + + import numpy as np + import paddle.fluid as fluid + BATCH_SIZE = 32 + BATCH_NUM = 20 + def random_batch_reader(): + def _get_random_images_and_labels(image_shape, label_shape): + image = np.random.random(size=image_shape).astype('float32') + label = np.random.random(size=label_shape).astype('int64') + return image, label + def __reader__(): + for _ in range(BATCH_NUM): + batch_image, batch_label = _get_random_images_and_labels( + [BATCH_SIZE, 784], [BATCH_SIZE, 1]) + yield batch_image, batch_label + return __reader__ + img = fluid.data(name='img', shape=[None, 784], dtype='float32') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') + pred = fluid.layers.fc(input=img, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=pred, label=label) + avg_loss = fluid.layers.mean(loss) + optimizer = fluid.optimizer.SGD(learning_rate=0.001) + optimizer.minimize(avg_loss) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + loader = fluid.io.DataLoader.from_generator( + feed_list=[img, label], capacity=5, iterable=True) + loader.set_batch_generator(random_batch_reader(), places=place) + # 1. 训练 & 存储预测模型 + for data in loader(): + exe.run( + fluid.default_main_program(), + feed=data, + fetch_list=[avg_loss]) + model_path = "fc.example.model" + fluid.io.save_inference_model( + model_path, ["img"], [pred], exe) + # 开启命令式编程模式 + fluid.enable_dygraph() + # 2. 载入模型 & 预测 + fc = fluid.dygraph.jit.load(model_path) + x = fluid.dygraph.to_variable(np.random.random((1, 784)).astype('float32')) + pred = fc(x) + # 3. 载入模型 & fine-tune训练 + fc = fluid.dygraph.jit.load(model_path) + fc.train() + sgd = fluid.optimizer.SGD(learning_rate=0.001, + parameter_list=fc.parameters()) + train_loader = fluid.io.DataLoader.from_generator(capacity=5) + train_loader.set_batch_generator( + random_batch_reader(), places=place) + for data in train_loader(): + img, label = data + label.stop_gradient = True + cost = fc(img) + loss = fluid.layers.cross_entropy(cost, label) + avg_loss = fluid.layers.mean(loss) + avg_loss.backward() + sgd.minimize(avg_loss) diff --git a/doc/fluid/api_cn/dygraph_cn/jit_cn/save_cn.rst b/doc/fluid/api_cn/dygraph_cn/jit_cn/save_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f0276316bacd0d7b7cb7ef6df12b1f9ac08b759f --- /dev/null +++ b/doc/fluid/api_cn/dygraph_cn/jit_cn/save_cn.rst @@ -0,0 +1,80 @@ +.. _cn_api_fluid_dygraph_jit_save: + +save +----------------- + +.. py:function:: paddle.fluid.dygraph.jit.save(layer, model_path, input_spec=None, configs=None) + +将输入的经过 ``@declarative`` 装饰的 :ref:`cn_api_fluid_dygraph_Layer` 存储为 :ref:`cn_api_fluid_dygraph_TranslatedLayer` 格式的模型, +载入后可用于预测推理或者fine-tune训练。 + +该接口将会将输入 :ref:`cn_api_fluid_dygraph_Layer` 转写后的模型结构 ``Program`` 和所有必要的持久参数变量存储至输入路径 ``model_path`` 中。 + +默认存储的 ``Program`` 文件名为 ``__model__``, 默认存储持久参数变量的文件名为 ``__variables__``, +同时会将变量的一些描述信息存储至文件 ``__variables.info__``,这些额外的信息将在fine-tune训练中使用。 + +存储的模型能够被以下API载入使用: + - :ref:`cn_api_fluid_dygraph_jit_load` + - :ref:`cn_api_fluid_io_load_inference_model` (需要配置参数 ``params_filename='__variables__'`` ) + - 其他预测库API + +参数: + - **layer** (Layer) - 需要存储的 :ref:`cn_api_fluid_dygraph_Layer` 对象。输入的 ``Layer`` 需要经过 ``@declarative`` 装饰。 + - **model_path** (str) - 存储模型的目录。 + - **input_spec** (list[Variable], 可选) - 描述存储模型的输入。此参数是传入当前存储的 ``TranslatedLayer`` forward方法的一个示例输入。如果为 ``None`` ,所有原 ``Layer`` forward方法的输入变量将都会被配置为存储模型的输入变量。默认为 ``None``。 + - **configs** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象。默认为 ``None``。 + +返回:无 + +**示例代码** + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + from paddle.fluid.dygraph import Linear + from paddle.fluid.dygraph import declarative + BATCH_SIZE = 32 + BATCH_NUM = 20 + def random_batch_reader(): + def _get_random_images_and_labels(image_shape, label_shape): + image = np.random.random(size=image_shape).astype('float32') + label = np.random.random(size=label_shape).astype('int64') + return image, label + def __reader__(): + for _ in range(BATCH_NUM): + batch_image, batch_label = _get_random_images_and_labels( + [BATCH_SIZE, 784], [BATCH_SIZE, 1]) + yield batch_image, batch_label + return __reader__ + class LinearNet(fluid.dygraph.Layer): + def __init__(self, in_size, out_size): + super(LinearNet, self).__init__() + self._linear = Linear(in_size, out_size) + @declarative + def forward(self, x): + return self._linear(x) + # 开启命令式编程模式 + fluid.enable_dygraph() + # 创建网络 + net = LinearNet(784, 1) + adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) + # 创建DataLoader + train_loader = fluid.io.DataLoader.from_generator(capacity=5) + train_loader.set_batch_generator(random_batch_reader()) + # 训练 + for data in train_loader(): + img, label = data + label.stop_gradient = True + cost = net(img) + loss = fluid.layers.cross_entropy(cost, label) + avg_loss = fluid.layers.mean(loss) + avg_loss.backward() + adam.minimize(avg_loss) + net.clear_gradients() + # 存储模型 + model_path = "linear.example.model" + fluid.dygraph.jit.save( + layer=net, + model_path=model_path, + input_spec=[img]) diff --git a/doc/fluid/api_cn/dygraph_cn/jit_cn/set_code_level_cn.rst b/doc/fluid/api_cn/dygraph_cn/jit_cn/set_code_level_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..73b0830cdb7655a5b5246cdd95831264ef77d847 --- /dev/null +++ b/doc/fluid/api_cn/dygraph_cn/jit_cn/set_code_level_cn.rst @@ -0,0 +1,34 @@ +.. _cn_api_fluid_dygraph_jit_set_code_level: + +set_code_level +----------------- + +.. py:function:: paddle.fluid.dygraph.jit.set_code_level(level=100) + +设置代码级别,打印该级别 AST Transformer 转化后的代码。 + +有两种方法设置代码级别: + +1. 调用函数 ``set_code_level`` +2. 设置环境变量 ``TRANSLATOR_CODE_LEVEL`` + +.. note:: + 函数 ``set_code_level`` 的优先级高于环境变量 ``TRANSLATOR_CODE_LEVEL``。 + + +参数: + - **level** (int) - 打印的代码级别。默认值为100,这意味着打印的是所有 AST Transformer 转化后的代码。 + +**示例代码** + +.. code-block:: python + + import os + import paddle + + paddle.jit.set_code_level(2) + # It will print the transformed code at level 2, which means to print the code after second transformer, + # as the date of August 28, 2020, it is CastTransformer. + + os.environ['TRANSLATOR_CODE_LEVEL'] = '3' + # The code level is now 3, but it has no effect because it has a lower priority than `set_code_level` diff --git a/doc/fluid/api_cn/dygraph_cn/jit_cn/set_verbosity_cn.rst b/doc/fluid/api_cn/dygraph_cn/jit_cn/set_verbosity_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c185e94e5d28dc8ada4c7a8af854c83bbcc557bf --- /dev/null +++ b/doc/fluid/api_cn/dygraph_cn/jit_cn/set_verbosity_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_fluid_dygraph_jit_set_verbosity: + +set_verbosity +----------------- + +.. py:function:: paddle.fluid.dygraph.jit.set_verbosity(level=0) + +设置动态图转静态图的日志详细级别。 + +有两种方法设置日志详细级别: + +1. 调用函数 ``set_verbosity`` +2. 设置环境变量 ``TRANSLATOR_VERBOSITY`` + +.. note:: + 函数 ``set_verbosity`` 的优先级高于环境变量 ``TRANSLATOR_VERBOSITY``。 + + +参数: + - **level** (int) - 日志详细级别。值越大,表示越详细。默认值为0,表示不显示日志。 + +**示例代码** + +.. code-block:: python + + import os + import paddle + + paddle.jit.set_verbosity(1) + # The verbosity level is now 1 + + os.environ['TRANSLATOR_VERBOSITY'] = '3' + # The verbosity level is now 3, but it has no effect because it has a lower priority than `set_verbosity` + + diff --git a/doc/fluid/api_cn/dygraph_cn/load_dygraph_cn.rst b/doc/fluid/api_cn/dygraph_cn/load_dygraph_cn.rst index 3219e2d0490d0f7714d6fb7d2cda1103aa0f6b9c..39b18d7830eaafa66c6a99a770cf8e85b8fc32b5 100644 --- a/doc/fluid/api_cn/dygraph_cn/load_dygraph_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/load_dygraph_cn.rst @@ -3,10 +3,13 @@ load_dygraph ------------------------------- -**注意:该API仅支持【动态图】模式** .. py:function:: paddle.fluid.dygraph.load_dygraph(model_path) +:api_attr: 命令式编程模式(动态图) + + + 该接口尝试从磁盘中加载参数或优化器的 ``dict`` 。 该接口会同时加载 ``model_path + ".pdparams"`` 和 ``model_path + ".pdopt"`` 中的内容。 @@ -32,7 +35,8 @@ load_dygraph emb = fluid.dygraph.Embedding([10, 10]) state_dict = emb.state_dict() fluid.save_dygraph( state_dict, "paddle_dy") - adam = fluid.optimizer.Adam( learning_rate = fluid.layers.noam_decay( 100, 10000) ) + adam = fluid.optimizer.Adam( learning_rate = fluid.layers.noam_decay( 100, 10000) , + parameter_list = emb.parameters() ) state_dict = adam.state_dict() fluid.save_dygraph( state_dict, "paddle_dy") diff --git a/doc/fluid/api_cn/dygraph_cn/no_grad_cn.rst b/doc/fluid/api_cn/dygraph_cn/no_grad_cn.rst index 32083c7dc686308db11ff68a9417d05880b99461..9c84e82da4a895d898ea34154d71a19190f744eb 100644 --- a/doc/fluid/api_cn/dygraph_cn/no_grad_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/no_grad_cn.rst @@ -3,31 +3,50 @@ no_grad ------------------------------- -**注意:该API仅支持【动态图】模式** -.. py:method:: paddle.fluid.dygraph.no_grad(func) +.. py:class:: paddle.fluid.dygraph.no_grad -在动态图模式中,此装饰器将会避免 ``func`` 被装饰时创建反向传播网络。 +:api_attr: 命令式编程模式(动态图) +:old_api: paddle.fluid.dygraph.no_grad -参数: - - **func** (str) – 不需要梯度的函数。 + +创建一个上下文来禁用动态图梯度计算。在此模式下,每次计算的结果都将具有stop_gradient=True。 + +也可以用作一个装饰器(需要创建实例对象作为装饰器)。 **代码示例** .. code-block:: python - import numpy as np import paddle.fluid as fluid - @fluid.dygraph.no_grad + paddle.enable_imperative() + + # 用作生成器 + + data = np.array([[2, 3], [4, 5]]).astype('float32') + l0 = fluid.Linear(2, 2) # l0.weight.gradient() is None + l1 = fluid.Linear(2, 2) + with fluid.no_grad(): + # l1.weight.stop_gradient is False + tmp = l1.weight * 2 # tmp.stop_gradient is True + x = fluid.dygraph.to_variable(data) + y = l0(x) + tmp + o = l1(y) + o.backward() + print(tmp.gradient() is None) # True + print(l0.weight.gradient() is None) # False + + # 用作装饰器 + + @fluid.no_grad() def test_layer(): - with fluid.dygraph.guard(): - inp = np.ones([3, 1024], dtype='float32') - t = fluid.dygraph.base.to_variable(inp) - linear1 = fluid.Linear(1024, 4, bias_attr=False) - linear2 = fluid.Linear(4, 4) - ret = linear1(t) - dy_ret = linear2(ret) + inp = np.ones([3, 1024], dtype='float32') + t = fluid.dygraph.base.to_variable(inp) + linear1 = fluid.Linear(1024, 4, bias_attr=False) + linear2 = fluid.Linear(4, 4) + ret = linear1(t) + dy_ret = linear2(ret) test_layer() diff --git a/doc/fluid/api_cn/dygraph_cn/prepare_context_cn.rst b/doc/fluid/api_cn/dygraph_cn/prepare_context_cn.rst index 295cc0bb89fcb932ebb571411f41719c4adce91d..cd18f8d75081a1fbb868e6b92dd890465bbccb29 100644 --- a/doc/fluid/api_cn/dygraph_cn/prepare_context_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/prepare_context_cn.rst @@ -5,6 +5,10 @@ prepare_context .. py:class:: paddle.fluid.dygraph.prepare_context(strategy=None) +:api_attr: 命令式编程模式(动态图) + + + 该API是进行多进程多卡训练的环境配置接口,接受一个ParallelStrategy结构体变量作为输入。当strategy属性中的nums_trainer小于2时,API会直接返回,当nums_trainer大于1且为CUDAPlace时,由于目前动态图模式仅支持GPU多卡训练,仅能配置NCCL多卡训练的环境,所以此时会对NCCL环境进行配置,具体内容包括:生成NCCL ID,并广播至参与训练的各进程,用于支持的处理器同步操作,创建并配置NCCL通信器等。 参数: diff --git a/doc/fluid/api_cn/dygraph_cn/save_dygraph_cn.rst b/doc/fluid/api_cn/dygraph_cn/save_dygraph_cn.rst index cc28502a29d542127f03d23be3ee2cf8ad9fd6e0..e830d9c8d071908716d1439efbed07cca13a456e 100644 --- a/doc/fluid/api_cn/dygraph_cn/save_dygraph_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/save_dygraph_cn.rst @@ -3,10 +3,13 @@ save_dygraph ------------------------------- -**注意:该API仅支持【动态图】模式** .. py:function:: paddle.fluid.dygraph.save_dygraph(state_dict, model_path) +:api_attr: 命令式编程模式(动态图) + + + 该接口将传入的参数或优化器的 ``dict`` 保存到磁盘上。 ``state_dict`` 是通过 :ref:`cn_api_fluid_dygraph_Layer` 的 ``state_dict()`` 方法得到的。 @@ -29,15 +32,13 @@ save_dygraph import paddle.fluid as fluid with fluid.dygraph.guard(): - emb = fluid.dygraph.Embedding( - size=[10, 32], - param_attr='emb.w', - is_sparse=False) + emb = fluid.dygraph.Embedding([10, 10]) + state_dict = emb.state_dict() - fluid.save_dygraph(state_dict, "paddle_dy") # 会保存为 paddle_dy.pdparams + fluid.save_dygraph( state_dict, "paddle_dy") # 会保存为 paddle_dy.pdparams + + adam = fluid.optimizer.Adam( learning_rate = fluid.layers.noam_decay( 100, 10000), + parameter_list = emb.parameters() ) - adam = fluid.optimizer.Adam( - learning_rate=fluid.layers.noam_decay(100, 10000), - parameter_list = emb.parameters()) state_dict = adam.state_dict() - fluid.save_dygraph(state_dict, "paddle_dy") # 会保存为 paddle_dy.pdopt + fluid.save_dygraph( state_dict, "paddle_dy") # 会保存为 paddle_dy.pdopt \ No newline at end of file diff --git a/doc/fluid/api_cn/dygraph_cn/to_variable_cn.rst b/doc/fluid/api_cn/dygraph_cn/to_variable_cn.rst index 60bb44397abd23986fdaa191316c5e2857847ac4..b562d2cafb0b5f90458ed194677ddee783118e1b 100644 --- a/doc/fluid/api_cn/dygraph_cn/to_variable_cn.rst +++ b/doc/fluid/api_cn/dygraph_cn/to_variable_cn.rst @@ -3,19 +3,26 @@ to_variable ------------------------------- -**注意:该API仅支持【动态图】模式** .. py:function:: paddle.fluid.dygraph.to_variable(value, name=None, zero_copy=None) -该函数实现从numpy\.ndarray对象或者Variable对象创建一个 ``Variable`` 类型的对象。 + +:api_attr: 命令式编程模式(动态图) + + + +该函数实现从tuple、list、numpy\.ndarray、Variable、ComplexVariable 对象创建一个 ``Variable`` 类型的对象。 + 参数: - - **value** (ndarray|Variable) – 需要转换的numpy\.ndarray或Variable对象,维度可以为多维,数据类型为numpy\.{float16, float32, float64, int16, int32, int64, uint8, uint16}中的一种。 + - **value** (tuple|list|ndarray|Variable|Tensor|ComplexVariable) – 初始化的数据。可以是tuple、list、numpy\.ndarray、Variable、ComplexVariable。 + 维度可以为多维,数据类型为numpy\.{float16, float32, float64, int16, int32, int64, uint8, uint16}中的一种。 - **name** (str, 可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 - **zero_copy** (bool, 可选) – 是否与输入的numpy数组共享内存。此参数仅适用于CPUPlace,当它为None时将设置为True。默认值为None。 + - **dtype** (str, 可选) - 返回的 ``Variable`` 所需的数据类型。可以是 'bool','float16','float32','float64','int8','int16','int32','int64','uint8'。默认值: None。 -返回:如果 ``value`` 是numpy\.ndarray对象,返回由numpy\.ndarray对象创建的 ``Tensor`` ,其数据类型和维度与 ``value`` 一致;如果 ``value`` 是Variable对象,返回 ``value`` 。 +返回:如果 ``value`` 是tuple/list/numpy\.ndarray对象,返回对应numpy\.ndarray对象创建的 ``Tensor`` ;如果 ``value`` 是Variable对象,直接返回 ``value`` 。 返回类型:Variable @@ -25,13 +32,25 @@ to_variable import numpy as np import paddle.fluid as fluid - with fluid.dygraph.guard(fluid.CPUPlace()): + x = np.ones([2, 2], np.float32) y = fluid.dygraph.to_variable(x, zero_copy=False) x[0][0] = -1 y[0][0].numpy() # array([1.], dtype=float32) + y = fluid.dygraph.to_variable(x) x[0][0] = 0 y[0][0].numpy() # array([0.], dtype=float32) + c = np.array([2+1j, 2]) + z = fluid.dygraph.to_variable(c) + z.numpy() # array([2.+1.j, 2.+0.j]) + z.dtype # 'complex128' + + y = fluid.dygraph.to_variable([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]]) + y.shape # [3L, 2L] + y = fluid.dygraph.to_variable(((0.1, 1.2), (2.2, 3.1), (4.9, 5.2)), dtype='int32') + y.shape # [3L, 2L] + y.dtype # core.VarDesc.VarType.INT32 + diff --git a/doc/fluid/api_cn/executor_cn/Executor_cn.rst b/doc/fluid/api_cn/executor_cn/Executor_cn.rst index f879d28d0aac0a6a14c3cda9494771d15198b5b1..273b6bc79031e78ee56f65b4f7dbf575748d6f6b 100644 --- a/doc/fluid/api_cn/executor_cn/Executor_cn.rst +++ b/doc/fluid/api_cn/executor_cn/Executor_cn.rst @@ -3,14 +3,17 @@ Executor ------------------------------- -**注意:该API仅支持【静态图】模式** -.. py:class:: paddle.fluid.executor.Executor (place) +.. py:class:: paddle.fluid.executor.Executor (place=None) -Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传入设备。 +:api_attr: 声明式编程模式(静态图) + + + +Executor支持单GPU、多GPU以及CPU运行。 参数: - - **place** (fluid.CPUPlace()|fluid.CUDAPlace(N)) – 该参数表示Executor执行所在的设备,这里的N为GPU对应的ID。 + - **place** (fluid.CPUPlace()|fluid.CUDAPlace(N)|None) – 该参数表示Executor执行所在的设备,这里的N为GPU对应的ID。当该参数为 `None` 时,PaddlePaddle会根据其安装版本来设置默认设备。当PaddlePaddle是CPU版时,默认运行设备将会设置为 `fluid.CPUPlace()` ;当PaddlePaddle是GPU版本时,默认执行设备将会设置为 `fluid.CUDAPlace(0)` 。默认值为None。 返回:初始化后的 ``Executor`` 对象 @@ -25,14 +28,18 @@ Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传 import numpy import os - use_cuda = True - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) + # 显式设置运行设备 + # use_cuda = True + # place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + # exe = fluid.Executor(place) + + # 如果不显示设置运行设备,PaddlePaddle会设置默认运行设备 + exe = fluid.Executor() train_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - data = fluid.layers.data(name='X', shape=[1], dtype='float32') + data = fluid.data(name='X', shape=[None, 1], dtype='float32') hidden = fluid.layers.fc(input=data, size=10) loss = fluid.layers.mean(hidden) fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) @@ -54,8 +61,13 @@ Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传 # 否则fluid会把逻辑核的所有数目设为CPU_NUM, # 在这种情况下,输入的batch size应大于CPU_NUM, # 否则程序会异常中断。 - if not use_cuda: - os.environ['CPU_NUM'] = str(2) + + # 显式设置运行设备 + # if not use_cuda: + # os.environ['CPU_NUM'] = str(2) + + # 未显示设置运行设备且安装的Paddle为CPU版本 + os.environ['CPU_NUM'] = str(2) compiled_prog = compiler.CompiledProgram( train_program).with_data_parallel( @@ -83,7 +95,7 @@ Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传 exe.close() -.. py:method:: run(program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_name='fetch', scope=None, return_numpy=True,use_program_cache=False) +.. py:method:: run(program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_name='fetch', scope=None, return_numpy=True, use_program_cache=False, use_prune=False) 执行指定的Program或者CompiledProgram。需要注意的是,执行器会执行Program或CompiledProgram中的所有算子,而不会根据fetch_list对Program或CompiledProgram中的算子进行裁剪。同时,需要传入运行该模型用到的scope,如果没有指定scope,执行器将使用全局scope,即fluid.global_scope()。 @@ -96,6 +108,7 @@ Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传 - **scope** (Scope) – 该参数表示执行当前program所使用的作用域,用户可以为不同的program指定不同的作用域。默认值:fluid.global_scope()。 - **return_numpy** (bool) – 该参数表示是否将返回返回的计算结果(fetch list中指定的变量)转化为numpy;如果为False,则每个变量返回的类型为LoDTensor,否则返回变量的类型为numpy.ndarray。默认为:True。 - **use_program_cache** (bool) – 该参数表示是否对输入的Program进行缓存。如果该参数为True,在以下情况时,模型运行速度可能会更快:输入的program为 ``fluid.Program`` ,并且模型运行过程中,调用该接口的参数(program、 feed变量名和fetch_list变量)名始终不变。默认为:False。 + - **use_prune** (bool) – 该参数表示是否对输入的Program进行剪枝。如果该参数为True,输入的Program会在run之前根据 ``feed`` 和 ``fetch_list`` 进行剪枝,剪枝的逻辑是将产生 ``feed`` 的 ``Variable`` 和 ``Operator`` 以及不产生 ``fetch_list`` 的 ``Variable`` 和 ``Operator`` 进行裁剪。默认为:False,表示不进行剪枝。请注意,如果将 ``Optimizer.minimize()`` 方法返回的 ``tuple`` 传入 ``fetch_list`` 中,则 ``use_prune`` 会被重写为True,并且会开启剪枝。 返回:返回fetch_list中指定的变量值 @@ -117,7 +130,7 @@ Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传 place = fluid.CPUPlace() # fluid.CUDAPlace(0) exe = fluid.Executor(place) - data = fluid.layers.data(name='X', shape=[1], dtype='float32') + data = fluid.data(name='X', shape=[None, 1], dtype='float32') hidden = fluid.layers.fc(input=data, size=10) loss = fluid.layers.mean(hidden) adam = fluid.optimizer.Adam() @@ -162,8 +175,8 @@ train_from_dataset可以非常容易扩展到大规模分布式在线和离线 place = fluid.CPUPlace() # 通过设置place = fluid.CUDAPlace(0)使用GPU exe = fluid.Executor(place) - x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64") - y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1) + x = fluid.data(name="x", shape=[None, 10, 10], dtype="int64") + y = fluid.data(name="y", shape=[None, 1], dtype="int64", lod_level=1) dataset = fluid.DatasetFactory().create_dataset() dataset.set_use_var([x, y]) dataset.set_thread(1) @@ -197,12 +210,13 @@ train_from_dataset可以非常容易扩展到大规模分布式在线和离线 import paddle.fluid as fluid place = fluid.CPUPlace() # 使用GPU时可设置place = fluid.CUDAPlace(0) exe = fluid.Executor(place) - x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64") - y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1) + x = fluid.data(name="x", shape=[None, 10, 10], dtype="int64") + y = fluid.data(name="y", shape=[None, 1], dtype="int64", lod_level=1) dataset = fluid.DatasetFactory().create_dataset() dataset.set_use_var([x, y]) dataset.set_thread(1) filelist = [] # 您可以设置您自己的filelist,如filelist = ["dataA.txt"] dataset.set_filelist(filelist) exe.run(fluid.default_startup_program()) - exe.infer_from_dataset(program=fluid.default_main_program(),dataset=dataset) + exe.infer_from_dataset(program=fluid.default_main_program(), + dataset=dataset) diff --git a/doc/fluid/api_cn/executor_cn/global_scope_cn.rst b/doc/fluid/api_cn/executor_cn/global_scope_cn.rst index 1c7aec0a19a37676e77aefebd0781f2649ddf5e6..277bb23db092c0b44704ac5905a968f364d3bc7b 100644 --- a/doc/fluid/api_cn/executor_cn/global_scope_cn.rst +++ b/doc/fluid/api_cn/executor_cn/global_scope_cn.rst @@ -3,10 +3,13 @@ global_scope ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.global_scope() +:api_attr: 声明式编程模式(静态图) + + + 获取全局/默认作用域实例。很多API使用默认 ``global_scope`` ,例如 ``Executor.run`` 等。 返回:全局/默认作用域实例 diff --git a/doc/fluid/api_cn/executor_cn/scope_guard_cn.rst b/doc/fluid/api_cn/executor_cn/scope_guard_cn.rst index ced17a622b58e1179653400e8e690e9f3ffb26d7..e220cd8d451708031d2526534a686a852ba67807 100644 --- a/doc/fluid/api_cn/executor_cn/scope_guard_cn.rst +++ b/doc/fluid/api_cn/executor_cn/scope_guard_cn.rst @@ -3,10 +3,13 @@ scope_guard ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.executor.scope_guard (scope) +:api_attr: 声明式编程模式(静态图) + + + 该接口通过 python 的 ``with`` 语句切换作用域(scope)。 作用域记录了变量名和变量 ( :ref:`api_guide_Variable` ) 之间的映射关系,类似于编程语言中的大括号。 diff --git a/doc/fluid/api_cn/fluid_cn.rst b/doc/fluid/api_cn/fluid_cn.rst old mode 100644 new mode 100755 index 054f39e772568dee67df2cb37acc0e81efc1857a..ae356abbb90744b68bbd3661c13bcc8a606351c6 --- a/doc/fluid/api_cn/fluid_cn.rst +++ b/doc/fluid/api_cn/fluid_cn.rst @@ -23,11 +23,15 @@ fluid fluid_cn/DataFeeder_cn.rst fluid_cn/default_main_program_cn.rst fluid_cn/default_startup_program_cn.rst + fluid_cn/disable_dygraph_cn.rst + fluid_cn/device_guard_cn.rst fluid_cn/DistributeTranspiler_cn.rst fluid_cn/DistributeTranspilerConfig_cn.rst fluid_cn/embedding_cn.rst + fluid_cn/enable_dygraph_cn.rst fluid_cn/ExecutionStrategy_cn.rst fluid_cn/Executor_cn.rst + fluid_cn/get_flags_cn.rst fluid_cn/global_scope_cn.rst fluid_cn/gradients_cn.rst fluid_cn/in_dygraph_mode_cn.rst @@ -47,6 +51,8 @@ fluid fluid_cn/require_version_cn.rst fluid_cn/save_cn.rst fluid_cn/scope_guard_cn.rst + fluid_cn/set_flags_cn.rst + fluid_cn/set_global_initializer_cn.rst fluid_cn/Tensor_cn.rst fluid_cn/Variable_cn.rst fluid_cn/WeightNormParamAttr_cn.rst diff --git a/doc/fluid/api_cn/fluid_cn/BuildStrategy_cn.rst b/doc/fluid/api_cn/fluid_cn/BuildStrategy_cn.rst index 98cb0e800f79a1c25f3d92248d3a26de2191de8d..2d6f2fa0586898b53490af9f7a5bbb0a481a3aaa 100644 --- a/doc/fluid/api_cn/fluid_cn/BuildStrategy_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/BuildStrategy_cn.rst @@ -3,11 +3,14 @@ BuildStrategy ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:class:: paddle.fluid.BuildStrategy -``BuildStrategy`` 使用户更方便地控制[ ``ParallelExecutor`` ](../fluid_cn.html\#parallelexecutor)中计算图的建造方法,可通过设置 ``ParallelExecutor`` 中的 ``BuildStrategy`` 成员来实现此功能。 +:api_attr: 声明式编程模式(静态图) + + + +``BuildStrategy`` 使用户更方便地控制 :ref:`cn_api_fluid_ParallelExecutor` 中计算图的建造方法,可通过设置 ``ParallelExecutor`` 中的 ``BuildStrategy`` 成员来实现此功能。 **代码示例** @@ -68,6 +71,7 @@ bool类型。表明是否融合(fuse) broadcast ops。该选项指在Reduce模 **代码示例** .. code-block:: python + import paddle.fluid as fluid build_strategy = fluid.BuildStrategy() build_strategy.fuse_broadcast_ops = True @@ -108,6 +112,7 @@ bool类型。表明是否融合(fuse) relu和depthwise_conv2d,节省GPU内存 import os import numpy as np + import paddle.fluid as fluid import paddle.fluid.compiler as compiler use_cuda = True diff --git a/doc/fluid/api_cn/fluid_cn/CPUPlace_cn.rst b/doc/fluid/api_cn/fluid_cn/CPUPlace_cn.rst index e07af202a05fd5bce8716cc61351954a39af005d..e091352c9018b355e234f8407625199d51c48555 100644 --- a/doc/fluid/api_cn/fluid_cn/CPUPlace_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/CPUPlace_cn.rst @@ -5,6 +5,9 @@ CPUPlace .. py:class:: paddle.fluid.CPUPlace + + + ``CPUPlace`` 是一个设备描述符,表示一个分配或将要分配 ``Tensor`` 或 ``LoDTensor`` 的 ``CPU`` 设备。 **代码示例** diff --git a/doc/fluid/api_cn/fluid_cn/CUDAPinnedPlace_cn.rst b/doc/fluid/api_cn/fluid_cn/CUDAPinnedPlace_cn.rst index 59a8f8e74c13916017e5fcda1c804f8a27928f27..a3e669344b2bac46b8cb57d24bbc633bb3549be3 100644 --- a/doc/fluid/api_cn/fluid_cn/CUDAPinnedPlace_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/CUDAPinnedPlace_cn.rst @@ -5,6 +5,9 @@ CUDAPinnedPlace .. py:class:: paddle.fluid.CUDAPinnedPlace + + + ``CUDAPinnedPlace`` 是一个设备描述符,它所指代的页锁定内存由 CUDA 函数 ``cudaHostAlloc()`` 在主机内存上分配,主机的操作系统将不会对这块内存进行分页和交换操作,可以通过直接内存访问技术访问,加速主机和 GPU 之间的数据拷贝。 有关 CUDA 的数据转移和 ``pinned memory``,参见 `官方文档 `_ 。 diff --git a/doc/fluid/api_cn/fluid_cn/CUDAPlace_cn.rst b/doc/fluid/api_cn/fluid_cn/CUDAPlace_cn.rst index 0ebbf7f6fe993b38bfe502c1aaef40e496464380..ba7cf62280b52b17dc310c8d9c1a5a4ca2cc6feb 100644 --- a/doc/fluid/api_cn/fluid_cn/CUDAPlace_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/CUDAPlace_cn.rst @@ -5,6 +5,9 @@ CUDAPlace .. py:class:: paddle.fluid.CUDAPlace + + + .. note:: 多卡任务请先使用 FLAGS_selected_gpus 环境变量设置可见的GPU设备,下个版本将会修正 CUDA_VISIBLE_DEVICES 环境变量无效的问题。 diff --git a/doc/fluid/api_cn/fluid_cn/CompiledProgram_cn.rst b/doc/fluid/api_cn/fluid_cn/CompiledProgram_cn.rst index f9ec0995503393891741794489233c88df3f4d24..c6576c63468cac361d12294eb06eb8063d33814f 100644 --- a/doc/fluid/api_cn/fluid_cn/CompiledProgram_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/CompiledProgram_cn.rst @@ -3,10 +3,13 @@ CompiledProgram ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:class:: paddle.fluid.CompiledProgram(program_or_graph, build_strategy=None) +:api_attr: 声明式编程模式(静态图) + + + CompiledProgram根据 `build_strategy` 的配置将输入的Program或Graph进行转换和优化,例如:计算图中算子融合、计算图执行过程中开启内存/显存优化等,关于build_strategy更多信息。请参阅 ``fluid.BuildStrategy`` 。 参数: @@ -22,34 +25,29 @@ CompiledProgram根据 `build_strategy` 的配置将输入的Program或Graph进 .. code-block:: python import paddle.fluid as fluid - import paddle.fluid.compiler as compiler import numpy - import os - + place = fluid.CUDAPlace(0) # fluid.CPUPlace() exe = fluid.Executor(place) - - data = fluid.layers.data(name='X', shape=[1], dtype='float32') + + data = fluid.data(name='X', shape=[None, 1], dtype='float32') hidden = fluid.layers.fc(input=data, size=10) loss = fluid.layers.mean(hidden) fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) exe.run(fluid.default_startup_program()) - build_strategy = fluid.BuildStrategy() - build_strategy.fuse_all_optimizer_ops = True - compiled_prog = compiler.CompiledProgram( - fluid.default_main_program(), - build_strategy=build_strategy) - + compiled_prog = fluid.CompiledProgram( + fluid.default_main_program()) + x = numpy.random.random(size=(10, 1)).astype('float32') loss_data, = exe.run(compiled_prog, - feed={"X": x}, - fetch_list=[loss.name]) + feed={"X": x}, + fetch_list=[loss.name]) .. py:method:: with_data_parallel(loss_name=None, build_strategy=None, exec_strategy=None, share_vars_from=None, places=None) -该接口用于将输入的Program或Graph进行转换,以便通过数据并行模式运行该模型。用户可以通过 `build_strategy` 和 `exec_strategy` 设置计算图构建和计算图执行过程中可以进行的一些优化,例如:将梯度聚合的AllReduce操作进行融合、指定计算图运行过程中使用的线程池大小等。**注意:如果在构建CompiledProgram和调用with_data_parallel时都指定了build_strategy,在CompiledProgram中的build_strategy会被复写,因此,如果是数据并行训练,建议在调用with_data_parallel接口是设置build_strategy**。 +该接口用于将输入的Program或Graph进行转换,以便通过数据并行模式运行该模型。用户可以通过 `build_strategy` 和 `exec_strategy` 设置计算图构建和计算图执行过程中可以进行的一些优化,例如:将梯度聚合的AllReduce操作进行融合、指定计算图运行过程中使用的线程池大小等。**注意:如果在构建CompiledProgram和调用with_data_parallel时都指定了build_strategy,在CompiledProgram中的build_strategy会被复写,因此,如果是数据并行训练,建议在调用with_data_parallel接口时设置build_strategy**。 参数: - **loss_name** (str) - 该参数为模型最后得到的损失变量的名字,**注意:如果是模型训练,必须设置loss_name,否则计算结果可能会有问题。** 默认为:None。 @@ -70,45 +68,47 @@ CompiledProgram根据 `build_strategy` 的配置将输入的Program或Graph进 **代码示例** .. code-block:: python - + import paddle.fluid as fluid - import paddle.fluid.compiler as compiler import numpy import os - + use_cuda = True place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + parallel_places = [fluid.CUDAPlace(0), fluid.CUDAPlace(1)] if use_cuda else [fluid.CPUPlace()] * 2 + # 注意:如果你使用CPU运行程序,需要具体设置CPU_NUM, # 否则fluid会把逻辑核的所有数目设为CPU_NUM, # 在这种情况下,输入的batch size应大于CPU_NUM, # 否则程序会异常中断。 if not use_cuda: os.environ['CPU_NUM'] = str(2) - + exe = fluid.Executor(place) - - data = fluid.layers.data(name='X', shape=[1], dtype='float32') + + data = fluid.data(name='X', shape=[None, 1], dtype='float32') hidden = fluid.layers.fc(input=data, size=10) loss = fluid.layers.mean(hidden) + test_program = fluid.default_main_program().clone(for_test=True) fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) - + exe.run(fluid.default_startup_program()) - build_strategy = fluid.BuildStrategy() - build_strategy.fuse_all_reduce_ops = True - compiled_train_prog = compiler.CompiledProgram( - fluid.default_main_program()).with_data_parallel( - loss_name=loss.name, build_strategy=build_strategy) - # 注意:如果此处不设置share_vars_from=compiled_train_prog,测试过程中用的参数与训练使用的参数是不一致 - compiled_test_prog = compiler.CompiledProgram( - test_program).with_data_parallel( - share_vars_from=compiled_train_prog) + compiled_train_prog = fluid.CompiledProgram( + fluid.default_main_program()).with_data_parallel( + loss_name=loss.name, places=parallel_places) + # 注意:如果此处不设置share_vars_from=compiled_train_prog, + # 测试过程中用的参数与训练使用的参数是不一致 + compiled_test_prog = fluid.CompiledProgram( + test_program).with_data_parallel( + share_vars_from=compiled_train_prog, + places=parallel_places) train_data = numpy.random.random(size=(10, 1)).astype('float32') loss_data, = exe.run(compiled_train_prog, - feed={"X": train_data}, - fetch_list=[loss.name]) + feed={"X": train_data}, + fetch_list=[loss.name]) test_data = numpy.random.random(size=(10, 1)).astype('float32') loss_data, = exe.run(compiled_test_prog, - feed={"X": test_data}, - fetch_list=[loss.name]) \ No newline at end of file + feed={"X": test_data}, + fetch_list=[loss.name]) \ No newline at end of file diff --git a/doc/fluid/api_cn/fluid_cn/DataFeedDesc_cn.rst b/doc/fluid/api_cn/fluid_cn/DataFeedDesc_cn.rst index 18f2ef957e98cfb9270509eadd9bc2b47c823a5f..29cf9b2a43c30fe7c6dd47af702eef29f9a863ec 100644 --- a/doc/fluid/api_cn/fluid_cn/DataFeedDesc_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/DataFeedDesc_cn.rst @@ -3,10 +3,13 @@ DataFeedDesc ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:class:: paddle.fluid.DataFeedDesc(proto_file) +:api_attr: 声明式编程模式(静态图) + + + 描述训练数据的格式。输入是一个文件路径名,其内容是protobuf message。 可以参考 :code:`paddle/fluid/framework/data_feed.proto` 查看我们如何定义message diff --git a/doc/fluid/api_cn/fluid_cn/DataFeeder_cn.rst b/doc/fluid/api_cn/fluid_cn/DataFeeder_cn.rst index 944974858402570fef9d3c4a6e795fac1ea3ab8f..1151d922e5db990139ac616e2aca2d617c1931f5 100644 --- a/doc/fluid/api_cn/fluid_cn/DataFeeder_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/DataFeeder_cn.rst @@ -3,10 +3,13 @@ DataFeeder ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:class:: paddle.fluid.DataFeeder(feed_list, place, program=None) +:api_attr: 声明式编程模式(静态图) + + + ``DataFeeder`` 负责将reader(读取器)返回的数据转成一种特殊的数据结构,使它们可以输入到 ``Executor`` 和 ``ParallelExecutor`` 中。 diff --git a/doc/fluid/api_cn/fluid_cn/DistributeTranspilerConfig_cn.rst b/doc/fluid/api_cn/fluid_cn/DistributeTranspilerConfig_cn.rst index 7f4e94850a0016bd7bc6dbe72bf554a79000bcc9..398aceb246145d47be589ac166159b4259bedc87 100644 --- a/doc/fluid/api_cn/fluid_cn/DistributeTranspilerConfig_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/DistributeTranspilerConfig_cn.rst @@ -6,6 +6,9 @@ DistributeTranspilerConfig .. py:class:: paddle.fluid.DistributeTranspilerConfig + + + .. py:attribute:: slice_var_up (bool) 为多个Pserver(parameter server)将tensor切片, 默认为True。 diff --git a/doc/fluid/api_cn/fluid_cn/DistributeTranspiler_cn.rst b/doc/fluid/api_cn/fluid_cn/DistributeTranspiler_cn.rst index 8876419cd8271936283b7a45cd2aa2858941bdb6..ab4fb17c706485902823d4bdc04c7e6c30498944 100644 --- a/doc/fluid/api_cn/fluid_cn/DistributeTranspiler_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/DistributeTranspiler_cn.rst @@ -6,6 +6,9 @@ DistributeTranspiler .. py:class:: paddle.fluid.DistributeTranspiler (config=None) + + + 该类可以把fluid program转变为分布式数据并行计算的program, 有PServer和NCCL2两种模式。 在Pserver(全称:parameter server)模式下, 通过 ``transpile`` 将用于单机训练的 ``program`` 转译为可用于parameter server的分布式架构(即PServer,参数服务器)来进行训练的program。 在NCCL2模式下, 通过 ``transpile`` 将用于单机训练的 ``program`` 转译为可用于NCCL2的分布式架构来进行训练的program。在NCCL2模式下,transpiler会在 ``startup_program`` 中附加一个 ``NCCL_ID`` 广播 diff --git a/doc/fluid/api_cn/fluid_cn/ExecutionStrategy_cn.rst b/doc/fluid/api_cn/fluid_cn/ExecutionStrategy_cn.rst index 4d6cc28fa05d6a14f74650d6078a98ba06fb9d5c..25b623fbffda98bd12aea2579d882f5e33d97a43 100644 --- a/doc/fluid/api_cn/fluid_cn/ExecutionStrategy_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/ExecutionStrategy_cn.rst @@ -3,10 +3,13 @@ ExecutionStrategy ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:class:: paddle.fluid.ExecutionStrategy +:api_attr: 声明式编程模式(静态图) + + + 通过设置 ``ExecutionStrategy`` 中的选项,用户可以对执行器的执行配置进行调整,比如设置执行器中线程池的大小等。 返回:初始化后的ExecutionStrategy的实例 @@ -33,7 +36,7 @@ ExecutionStrategy train_exe = fluid.ParallelExecutor(use_cuda=False, loss_name=avg_loss.name, - exec_strategy=exec_strategy) + exec_strategy=exec_strategy) .. py:attribute:: num_iteration_per_drop_scope diff --git a/doc/fluid/api_cn/fluid_cn/Executor_cn.rst b/doc/fluid/api_cn/fluid_cn/Executor_cn.rst index d668825c5b716f46affec5db405301379a90cf5e..7a2053a891c88a99d7336528f33b7ef87bb25f11 100644 --- a/doc/fluid/api_cn/fluid_cn/Executor_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/Executor_cn.rst @@ -4,14 +4,17 @@ Executor ------------------------------- -**注意:该API仅支持【静态图】模式** -.. py:class:: paddle.fluid.Executor (place) +.. py:class:: paddle.fluid.Executor (place=None) -Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传入设备。 +:api_attr: 声明式编程模式(静态图) + + + +Executor支持单GPU、多GPU以及CPU运行。 参数: - - **place** (fluid.CPUPlace()|fluid.CUDAPlace(N)) – 该参数表示Executor执行所在的设备,这里的N为GPU对应的ID。 + - **place** (fluid.CPUPlace()|fluid.CUDAPlace(N)|None) – 该参数表示Executor执行所在的设备,这里的N为GPU对应的ID。当该参数为 `None` 时,PaddlePaddle会根据其安装版本设置默认的运行设备。当安装的Paddle为CPU版时,默认运行设置会设置成 `CPUPlace()` ,而当Paddle为GPU版时,默认运行设备会设置成 `CUDAPlace(0)` 。默认值为None。 返回:初始化后的 ``Executor`` 对象 @@ -26,9 +29,13 @@ Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传 import numpy import os - use_cuda = True - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) + # 显式设置运行设备 + # use_cuda = True + # place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + # exe = fluid.Executor(place) + + # 如果不显示设置运行设备,PaddlePaddle会设置默认运行设备 + exe = fluid.Executor() train_program = fluid.Program() startup_program = fluid.Program() @@ -55,8 +62,13 @@ Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传 # 否则fluid会把逻辑核的所有数目设为CPU_NUM, # 在这种情况下,输入的batch size应大于CPU_NUM, # 否则程序会异常中断。 - if not use_cuda: - os.environ['CPU_NUM'] = str(2) + + # 显式设置运行设备 + # if not use_cuda: + # os.environ['CPU_NUM'] = str(2) + + # 未显示设置运行设备且安装的Paddle为CPU版本 + os.environ['CPU_NUM'] = str(2) compiled_prog = compiler.CompiledProgram( train_program).with_data_parallel( @@ -84,7 +96,7 @@ Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传 exe.close() -.. py:method:: run(program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_name='fetch', scope=None, return_numpy=True,use_program_cache=False) +.. py:method:: run(program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_name='fetch', scope=None, return_numpy=True, use_program_cache=False, return_merged=True) 执行指定的Program或者CompiledProgram。需要注意的是,执行器会执行Program或CompiledProgram中的所有算子,而不会根据fetch_list对Program或CompiledProgram中的算子进行裁剪。同时,需要传入运行该模型用到的scope,如果没有指定scope,执行器将使用全局scope,即fluid.global_scope()。 @@ -95,9 +107,10 @@ Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传 - **feed_var_name** (str) – 该参数表示数据输入算子(feed operator)的输入变量名称。默认为:"feed"。 - **fetch_var_name** (str) – 该参数表示结果获取算子(fetch operator)的输出变量名称。默认为:"fetch"。 - **scope** (Scope) – 该参数表示执行当前program所使用的作用域,用户可以为不同的program指定不同的作用域。默认值:fluid.global_scope()。 - - **return_numpy** (bool) – 该参数表示是否将返回返回的计算结果(fetch list中指定的变量)转化为numpy;如果为False,则每个变量返回的类型为LoDTensor,否则返回变量的类型为numpy.ndarray。默认为:True。 + - **return_numpy** (bool) – 该参数表示是否将返回的计算结果(fetch list中指定的变量)转化为numpy;如果为False,则每个变量返回的类型为LoDTensor,否则返回变量的类型为numpy.ndarray。默认为:True。 - **use_program_cache** (bool) – 该参数表示是否对输入的Program进行缓存。如果该参数为True,在以下情况时,模型运行速度可能会更快:输入的program为 ``fluid.Program`` ,并且模型运行过程中,调用该接口的参数(program、 feed变量名和fetch_list变量)名始终不变。默认为:False。 - + - **return_merged** (bool) – 该参数表示是否按照执行设备维度将返回的计算结果(fetch list中指定的变量)进行合并。如果 ``return_merged`` 设为False,返回值类型是一个Tensor的二维列表( ``return_numpy`` 设为Fasle时)或者一个numpy.ndarray的二维列表( ``return_numpy`` 设为True时)。如果 ``return_merged`` 设为True,返回值类型是一个Tensor的一维列表( ``return_numpy`` 设为Fasle时)或者一个numpy.ndarray的一维列表( ``return_numpy`` 设为True时)。更多细节请参考示例代码2。如果返回的计算结果是变长的,请设置 ``return_merged`` 为False,即不按照执行设备维度合并返回的计算结果。该参数的默认值为True,但这仅是为了兼容性考虑,在未来的版本中默认值可能会更改为False。 + 返回:返回fetch_list中指定的变量值 返回类型:List @@ -107,7 +120,7 @@ Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传 2. 如果可用的CPU核数或GPU卡数大于1,则fetch出来的结果为不同设备上的相同变量值(fetch_list中的变量)在第0维拼接在一起。 -**示例代码** +**示例代码1** .. code-block:: python @@ -131,6 +144,62 @@ Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传 outs = exe.run(feed={'X': x}, fetch_list=[loss.name]) + +**示例代码2** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + # 创建Executor对象 + place = fluid.CUDAPlace(0) + exe = fluid.Executor(place) + data = fluid.data(name='X', shape=[None, 1], dtype='float32') + class_dim = 2 + prediction = fluid.layers.fc(input=data, size=class_dim) + loss = fluid.layers.mean(prediction) + adam = fluid.optimizer.Adam() + adam.minimize(loss) + # 运行且仅运行一次startup program + exe.run(fluid.default_startup_program()) + build_strategy = fluid.BuildStrategy() + binary = fluid.CompiledProgram(fluid.default_main_program()).with_data_parallel( + loss_name=loss.name, build_strategy=build_strategy) + batch_size = 6 + x = np.random.random(size=(batch_size, 1)).astype('float32') + # 1) 设置 return_merged 参数为False以获取不合并的计算结果: + unmerged_prediction, = exe.run(binary, feed={'X': x}, + fetch_list=[prediction.name], + return_merged=False) + # 如果用户使用两个GPU卡来运行此python代码示例,输出结果将为(2, 3, class_dim)。 + # 输出结果中第一个维度值代表所使用的GPU卡数,而第二个维度值代表batch_size和所使用 + # 的GPU卡数之商。 + print("The unmerged prediction shape: {}".format(np.array(unmerged_prediction).shape)) + print(unmerged_prediction) + # 2) 设置 return_merged 参数为True以获取合并的计算结果: + merged_prediction, = exe.run(binary, feed={'X': x}, + fetch_list=[prediction.name], + return_merged=True) + # 如果用户使用两个GPU卡来运行此python代码示例,输出结果将为(6, class_dim)。输出结果 + # 中第一个维度值代表batch_size值。 + print("The merged prediction shape: {}".format(np.array(merged_prediction).shape)) + print(merged_prediction) + # 输出: + # The unmerged prediction shape: (2, 3, 2) + # [array([[-0.37620035, -0.19752218], + # [-0.3561043 , -0.18697084], + # [-0.24129935, -0.12669306]], dtype=float32), array([[-0.24489994, -0.12858354], + # [-0.49041364, -0.25748932], + # [-0.44331917, -0.23276259]], dtype=float32)] + # The merged prediction shape: (6, 2) + # [[-0.37789783 -0.19921964] + # [-0.3577645 -0.18863106] + # [-0.24274671 -0.12814042] + # [-0.24635398 -0.13003758] + # [-0.49232286 -0.25939852] + # [-0.44514108 -0.2345845 ]] + + .. py:method:: infer_from_dataset(program=None, dataset=None, scope=None, thread=0, debug=False, fetch_list=None, fetch_info=None, print_period=100) infer_from_dataset的文档与train_from_dataset几乎完全相同,只是在分布式训练中,推进梯度将在infer_from_dataset中禁用。 infer_from_dataset()可以非常容易地用于多线程中的评估。 diff --git a/doc/fluid/api_cn/fluid_cn/LoDTensorArray_cn.rst b/doc/fluid/api_cn/fluid_cn/LoDTensorArray_cn.rst index 6127ccf69939d1a3adb9d2a56538bc950af4bddf..4fa9be57d50dbec6f1137f7cf04fdc83f349af79 100644 --- a/doc/fluid/api_cn/fluid_cn/LoDTensorArray_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/LoDTensorArray_cn.rst @@ -5,6 +5,9 @@ LoDTensorArray .. py:class:: paddle.fluid.LoDTensorArray + + + LoDTensorArray是由LoDTensor组成的数组,支持"[]"运算符、len()函数和for迭代等。 **示例代码** diff --git a/doc/fluid/api_cn/fluid_cn/LoDTensor_cn.rst b/doc/fluid/api_cn/fluid_cn/LoDTensor_cn.rst index 3897fafac9e68c23a0d758e990ea8591804eebb9..2eaf09eecf00e779296f282482324e4b2a8385cc 100644 --- a/doc/fluid/api_cn/fluid_cn/LoDTensor_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/LoDTensor_cn.rst @@ -6,6 +6,9 @@ LoDTensor .. py:class:: paddle.fluid.LoDTensor + + + LoDTensor是一个具有LoD(Level of Details)信息的张量(Tensor),可用于表示变长序列,详见 :ref:`cn_user_guide_lod_tensor` 。 LoDTensor可以通过 ``np.array(lod_tensor)`` 方法转换为numpy.ndarray。 diff --git a/doc/fluid/api_cn/fluid_cn/ParallelExecutor_cn.rst b/doc/fluid/api_cn/fluid_cn/ParallelExecutor_cn.rst index 1d2f405a8ebf818fc68879efffa4fc44a177c076..8e391956ed24e66a6fb736dc4165b14a7e734cad 100644 --- a/doc/fluid/api_cn/fluid_cn/ParallelExecutor_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/ParallelExecutor_cn.rst @@ -3,10 +3,13 @@ ParallelExecutor ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:class:: paddle.fluid.ParallelExecutor(use_cuda, loss_name=None, main_program=None, share_vars_from=None, exec_strategy=None, build_strategy=None, num_trainers=1, trainer_id=0, scope=None) +:api_attr: 声明式编程模式(静态图) + + + ``ParallelExecutor`` 是 ``Executor`` 的一个升级版本,可以支持基于数据并行的多节点模型训练和测试。如果采用数据并行模式, ``ParallelExecutor`` 在构造时会将参数分发到不同的节点上,并将输入的 ``Program`` 拷贝到不同的节点,在执行过程中,各个节点独立运行模型,将模型反向计算得到的参数梯度在多个节点之间进行聚合,之后各个节点独立的进行参数的更新。如果使用GPU运行模型,即 ``use_cuda=True`` ,节点指代GPU, ``ParallelExecutor`` 将自动获取在当前机器上可用的GPU资源,用户也可以通过在环境变量设置可用的GPU资源,例如:希望使用GPU0、GPU1计算,export CUDA_VISIBLEDEVICES=0,1;如果在CPU上进行操作,即 ``use_cuda=False`` ,节点指代CPU,**注意:此时需要用户在环境变量中手动添加 CPU_NUM ,并将该值设置为CPU设备的个数,例如:export CPU_NUM=4,如果没有设置该环境变量,执行器会在环境变量中添加该变量,并将其值设为1**。 参数: diff --git a/doc/fluid/api_cn/fluid_cn/ParamAttr_cn.rst b/doc/fluid/api_cn/fluid_cn/ParamAttr_cn.rst index 43975efd6a3b0723ad7ce51d3399b3de99d8dc5e..641ed94ab2d0b52e9ef2de4cdd783f61e5df672f 100644 --- a/doc/fluid/api_cn/fluid_cn/ParamAttr_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/ParamAttr_cn.rst @@ -5,7 +5,14 @@ ParamAttr ------------------------------- -.. py:class:: paddle.fluid.ParamAttr(name=None, initializer=None, learning_rate=1.0, regularizer=None, trainable=True, gradient_clip=None, do_model_average=False) +.. py:class:: paddle.fluid.ParamAttr(name=None, initializer=None, learning_rate=1.0, regularizer=None, trainable=True, do_model_average=False) + + + + +.. note:: + 该类中的 ``gradient_clip`` 属性在2.0版本会废弃,推荐在初始化 ``optimizer`` 时设置梯度裁剪。共有三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 + :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 创建一个参数属性对象,用户可设置参数的名称、初始化方式、学习率、正则化规则、是否需要训练、梯度裁剪方式、是否做模型平均等属性。 @@ -13,9 +20,10 @@ ParamAttr - **name** (str,可选) - 参数的名称。默认值为None,表示框架自动创建参数的名称。 - **initializer** (Initializer,可选) - 参数的初始化方式。默认值为None,表示权重参数采用Xavier初始化方式,偏置参数采用全0初始化方式。 - **learning_rate** (float) - 参数的学习率。实际参数的学习率等于全局学习率乘以参数的学习率,再乘以learning rate schedule的系数。 - - **regularizer** (WeightDecayRegularizer,可选) - 正则化因子。默认值为None,表示没有正则化因子。 + - **regularizer** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` ,如果在 ``optimizer`` (例如 :ref:`cn_api_fluid_optimizer_SGDOptimizer` ) 中也 + 设置了正则化,``optimizer`` 中的正则化将被忽略。默认值为None,表示没有正则化。 - **trainable** (bool) - 参数是否需要训练。默认值为True,表示需要训练。 - - **gradient_clip** (BaseGradientClipAttr,可选) - 梯度裁剪方式。默认值为None,表示不需要梯度裁剪。 - **do_model_average** (bool) - 是否做模型平均。默认值为False,表示不做模型平均。 返回: 表示参数属性的对象。 diff --git a/doc/fluid/api_cn/fluid_cn/Program_cn.rst b/doc/fluid/api_cn/fluid_cn/Program_cn.rst index 3fc5c40939c615a2b15db8144ed28ff6c05e0e52..2a611c7b0e913f8ed6ceb13ad88566c101dc8ef6 100644 --- a/doc/fluid/api_cn/fluid_cn/Program_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/Program_cn.rst @@ -5,6 +5,9 @@ Program .. py:class:: paddle.fluid.Program + + + **注意:默认情况下,Paddle Fluid内部默认含有** :ref:`cn_api_fluid_default_startup_program` **和** :ref:`cn_api_fluid_default_main_program` **,它们共享参数。** :ref:`cn_api_fluid_default_startup_program` **只运行一次来初始化参数,** :ref:`cn_api_fluid_default_main_program` **在每个mini batch中运行并更新权重。** Program是Paddle Fluid对于计算图的一种静态描述,使用Program的构造函数可以创建一个Program。Program中包括至少一个 :ref:`api_guide_Block` ,当 :ref:`api_guide_Block` 中存在条件选择的控制流OP(例如 :ref:`cn_api_fluid_layers_While` 等)时,该Program将会含有嵌套着的 :ref:`api_guide_Block` 即控制流外部的 :ref:`api_guide_Block` 将包含着控制流内部的 :ref:`api_guide_Block` ,而嵌套的 :ref:`api_guide_Block` 的元素访问控制将由具体的控制流OP来决定。关于Program具体的结构和包含的类型请参阅 `framework.proto `_ @@ -57,13 +60,12 @@ Program是Paddle Fluid对于计算图的一种静态描述,使用Program的构 import paddle.fluid as fluid prog = fluid.default_main_program() - a = fluid.layers.data(name="X", shape=[2,3], dtype="float32", append_batch_size=False) - c = fluid.layers.fc(a, size=3) + x = fluid.layers.data(name="X", shape=[2,3], dtype="float32", append_batch_size=False) + pred = fluid.layers.fc(x, size=3) prog_string = prog.to_string(throw_on_error=True, with_details=False) prog_string_with_details = prog.to_string(throw_on_error=False, with_details=True) - print(prog_string) - print("\n =============== with_details =============== \n") - print(prog_string_with_details) + print("program string without detail: {}".format(prog_string)) + print("program string with detail: {}".format(prog_string_with_details)) .. py:method:: clone(for_test=False) @@ -82,16 +84,19 @@ Program是Paddle Fluid对于计算图的一种静态描述,使用Program的构 **代码示例** - .. code-block:: python + :: - import paddle.fluid as fluid - ## 我们推荐在使用 Optimizer前使用clone()接口 - test_program = fluid.default_main_program().clone(for_test=True) - optimizer = fluid.optimizer.Momentum(learning_rate=0.01, momentum=0.9) - optimizer.minimize() + import paddle.fluid as fluid + img = fluid.layers.data(name='image', shape=[784]) + pred = fluid.layers.fc(input=img, size=10, act='relu') + loss = fluid.layers.mean(pred) + ## 我们推荐在使用 Optimizer前使用clone()接口 + test_program = fluid.default_main_program().clone(for_test=True) + optimizer = fluid.optimizer.Momentum(learning_rate=0.01, momentum=0.9) + optimizer.minimize(loss) 参数: - - **for_test** (bool) – 取值为True时,clone方法内部会把operator的属性 ``is_test`` 设置为 True, 并裁剪反向OP和参数优化OP + - **for_test** (bool) – 取值为True时,clone方法内部会把operator的属性 ``is_test`` 设置为 True, 并裁剪反向OP和参数优化OP,默认值为False 返回:当 ``for_test=True`` 时返回一个新的、仅包含当前Program前向内容的Program。否则返回一个新的,和当前Program完全相同的Program @@ -150,7 +155,7 @@ Program是Paddle Fluid对于计算图的一种静态描述,使用Program的构 input=fluid.layers.fc(hidden, size=10, act='softmax'), label=fluid.layers.data(name='label', shape=[1], dtype='int64')) avg_loss = fluid.layers.mean(loss) - test_program = train_program.clone(for_test=False) + test_program = train_program.clone(for_test=True) print_prog(test_program) # 由于需要使训练和测试参数共享,我们需要使用训练的 ``startup_program`` @@ -182,7 +187,8 @@ Program是Paddle Fluid对于计算图的一种静态描述,使用Program的构 for key, value in sorted(six.iteritems(op.all_attrs())): if key not in ['op_callstack', 'op_role_var']: print(" [ attrs: {}: {} ]".format(key, value)) - def network(is_test): + + def network(): img = fluid.layers.data(name='image', shape=[784]) hidden = fluid.layers.fc(input=img, size=200, act='relu') hidden = fluid.layers.dropout(hidden, dropout_prob=0.5) @@ -192,19 +198,19 @@ Program是Paddle Fluid对于计算图的一种静态描述,使用Program的构 avg_loss = fluid.layers.mean(loss) return avg_loss - train_program_2 = fluid.Program() startup_program_2 = fluid.Program() test_program_2 = fluid.Program() with fluid.program_guard(train_program_2, startup_program_2): with fluid.unique_name.guard(): - sgd = fluid.optimizer.SGD(learning_rate=1e-3) - sgd.minimize(avg_loss) + avg_loss = network() + sgd = fluid.optimizer.SGD(learning_rate=1e-3) + sgd.minimize(avg_loss) # 不使用测试阶段的启动程序 - with fluid.program_guard(test_program_2, fluid.Program()): + with fluid.program_guard(test_program_2, startup_program_2): with fluid.unique_name.guard(): - loss = network(is_test=True) - print(test_program_2) + avg_loss = network() + print_prog(test_program_2) 上边两个代码片段生成和打印的Program是一样的。 @@ -268,24 +274,7 @@ Program是Paddle Fluid对于计算图的一种静态描述,使用Program的构 .. py:attribute:: random_seed -**注意:必须在相关OP被添加之前设置。例如** - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - - prog = fluid.default_main_program() - random_seed = prog.random_seed - x_var = fluid.layers.data(name="X", shape=[3,3], dtype="float32", append_batch_size=False) - - # 这里我们必须要在fluid.layers.dropout之前设置random_seed - print(random_seed) - prog.random_seed = 1 - z_var = fluid.layers.dropout(x_var, 0.7) - - print(prog.random_seed) +**注意:必须在相关OP被添加之前设置。** 程序中随机运算符的默认随机种子。0意味着随机生成随机种子。 @@ -301,12 +290,16 @@ Program是Paddle Fluid对于计算图的一种静态描述,使用Program的构 prog = fluid.default_main_program() random_seed = prog.random_seed + x_var = fluid.layers.data(name="X", shape=[3,3], dtype="float32", append_batch_size=False) print(random_seed) - prog.random_seed = 1 - print(prog.random_seed) - ## 0 ## 默认的random seed是 0 + + # 这里我们必须要在fluid.layers.dropout之前设置random_seed + prog.random_seed = 1 + z_var = fluid.layers.dropout(x_var, 0.7) + + print(prog.random_seed) ## 1 ## 修改后random seed变成了 1 diff --git a/doc/fluid/api_cn/fluid_cn/Tensor_cn.rst b/doc/fluid/api_cn/fluid_cn/Tensor_cn.rst index da5698d18fff154660774cefabf645d4f7ff141d..0cae5aac94cd27f7e3432a19b295f2b03ef606b8 100644 --- a/doc/fluid/api_cn/fluid_cn/Tensor_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/Tensor_cn.rst @@ -5,6 +5,9 @@ Tensor .. py:function:: paddle.fluid.Tensor + + + Tensor用于表示多维张量,可以通过 ``np.array(tensor)`` 方法转换为numpy.ndarray。 **示例代码** diff --git a/doc/fluid/api_cn/fluid_cn/Variable_cn.rst b/doc/fluid/api_cn/fluid_cn/Variable_cn.rst index 77e282afbccd5fa701892ae5c0dc286a1465d68f..83af840cce8702bc2af188a1cab30f3561049d98 100644 --- a/doc/fluid/api_cn/fluid_cn/Variable_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/Variable_cn.rst @@ -5,6 +5,9 @@ Variable .. py:class:: paddle.fluid.Variable + + + **注意:** **1. 请不要直接调用** `Variable` **的构造函数,因为这会造成严重的错误发生!** @@ -142,7 +145,7 @@ Variable **参数:** - - **backward_strategy**: ( :ref:`cn_api_fluid_dygraph_BackwardStrategy` ) 使用何种 :ref:`cn_api_fluid_dygraph_BackwardStrategy` 聚合反向的梯度 + - **retain_graph** (bool,可选) – 该参数用于确定反向梯度更新完成后反向梯度计算图是否需要保留(retain_graph为True则保留反向梯度计算图)。若用户打算在执行完该方法( :code:`backward` )后,继续向之前已构建的计算图中添加更多的Op,则需要设置 :code:`retain_graph` 值为True(这样才会保留之前计算得到的梯度)。可以看出,将 :code:`retain_graph` 设置为False可降低内存的占用。默认值为False。 返回:无 @@ -150,23 +153,20 @@ Variable **示例代码** .. code-block:: python - import paddle.fluid as fluid import numpy as np - + import paddle + paddle.disable_static() x = np.ones([2, 2], np.float32) - with fluid.dygraph.guard(): - inputs2 = [] - for _ in range(10): - tmp = fluid.dygraph.base.to_variable(x) - # 如果这里我们不为输入tmp设置stop_gradient=False,那么后面loss2也将因为这个链路都不需要梯度 - # 而不产生梯度 - tmp.stop_gradient=False - inputs2.append(tmp) - ret2 = fluid.layers.sums(inputs2) - loss2 = fluid.layers.reduce_sum(ret2) - backward_strategy = fluid.dygraph.BackwardStrategy() - backward_strategy.sort_sum_gradient = True - loss2.backward(backward_strategy) + inputs = [] + for _ in range(10): + tmp = paddle.to_tensor(x) + # 如果这里我们不为输入tmp设置stop_gradient=False,那么后面loss也将因为这个链路都不需要梯度 + # 而不产生梯度 + tmp.stop_gradient=False + inputs.append(tmp) + ret = paddle.sums(inputs) + loss = paddle.reduce_sum(ret) + loss.backward() .. py:method:: gradient() @@ -199,9 +199,7 @@ Variable inputs2.append(tmp) ret2 = fluid.layers.sums(inputs2) loss2 = fluid.layers.reduce_sum(ret2) - backward_strategy = fluid.dygraph.BackwardStrategy() - backward_strategy.sort_sum_gradient = True - loss2.backward(backward_strategy) + loss2.backward() print(loss2.gradient()) # example2: 返回tuple of ndarray @@ -245,9 +243,7 @@ Variable inputs2.append(tmp) ret2 = fluid.layers.sums(inputs2) loss2 = fluid.layers.reduce_sum(ret2) - backward_strategy = fluid.dygraph.BackwardStrategy() - backward_strategy.sort_sum_gradient = True - loss2.backward(backward_strategy) + loss2.backward() print(loss2.gradient()) loss2.clear_gradient() print("After clear {}".format(loss2.gradient())) @@ -348,6 +344,7 @@ Variable .. code-block:: python import paddle.fluid as fluid + import numpy as np with fluid.dygraph.guard(): value0 = np.arange(26).reshape(2, 13).astype("float32") @@ -363,9 +360,9 @@ Variable out1.stop_gradient = True out = fluid.layers.concat(input=[out1, out2, c], axis=1) out.backward() - # 可以发现这里linear的参数变成了 - assert (linear.weight.gradient() == 0).all() - assert (out1.gradient() == 0).all() + # 可以发现这里linear的参数梯度变成了None + assert linear.weight.gradient() is None + assert out1.gradient() is None .. py:attribute:: persistable diff --git a/doc/fluid/api_cn/fluid_cn/WeightNormParamAttr_cn.rst b/doc/fluid/api_cn/fluid_cn/WeightNormParamAttr_cn.rst index 88946bb7ae93ddbd01da63fc16769f53d16a8023..d17b0380c5f8c5903b75926f3ec326d4d3726320 100644 --- a/doc/fluid/api_cn/fluid_cn/WeightNormParamAttr_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/WeightNormParamAttr_cn.rst @@ -3,10 +3,16 @@ WeightNormParamAttr ------------------------------- -**注意:该API仅支持【静态图】模式** -.. py:class:: paddle.fluid.WeightNormParamAttr(dim=None, name=None, initializer=None, learning_rate=1.0, regularizer=None, trainable=True, gradient_clip=None, do_model_average=False) +.. py:class:: paddle.fluid.WeightNormParamAttr(dim=None, name=None, initializer=None, learning_rate=1.0, regularizer=None, trainable=True, do_model_average=False) +:api_attr: 声明式编程模式(静态图) + + + +.. note:: + 该类中的 ``gradient_clip`` 属性在2.0版本会废弃,推荐在初始化 ``optimizer`` 时设置梯度裁剪。共有三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 + :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 该类定义了权重归一化(Weight Normalization)的参数。权重归一化可以将神经网络中权重向量的长度与其方向解耦,详细的定义与实现可以参考论文:`Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks `_ @@ -15,9 +21,10 @@ WeightNormParamAttr - **name** (None|str) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认为None。 - **initializer** (Initializer) - 初始化参数方法,例如 ``initializer = fluid.initializer.ConstantInitializer(1.0)`` 。默认为None,如果为None则使用默认初始化函数 `Xavier()` 。 - **learning_rate** (float32) - 学习率,优化过程 :math:`global\_lr∗parameter\_lr∗scheduler\_factor` 的学习速率,默认为1.0。 - - **regularizer** (WeightDecayRegularizer) - 正则化方法,例如 ``regularizer = fluid.regularizer.L2DecayRegularizer(regularization_coeff=0.1)`` 。默认为None,如果为None则对权重不做正则化。 + - **regularizer** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` ,如果在 ``optimizer`` (例如 :ref:`cn_api_fluid_optimizer_SGDOptimizer` ) 中也 + 设置了正则化,``optimizer`` 中的正则化将被忽略。默认值为None,表示没有正则化。 - **trainable** (bool) - 可选,指明参数是否可训练,默认为True。 - - **gradient_clip** - 梯度裁剪(Gradient Clipping)的方法,例如 ``gradient_clip = fluid.clip.GradientClipByNorm(clip_norm=2.0))`` 。默认为None,如果为None则对权重不做裁剪。 - **do_model_average** (bool) - 可选,指明参数是否需要模型平均化操作(Model Average),默认为False。 @@ -36,7 +43,6 @@ WeightNormParamAttr learning_rate=1.0, regularizer=fluid.regularizer.L2DecayRegularizer(regularization_coeff=0.1), trainable=True, - gradient_clip=fluid.clip.GradientClipByNorm(clip_norm=2.0), do_model_average=False)) diff --git a/doc/fluid/api_cn/fluid_cn/cpu_places_cn.rst b/doc/fluid/api_cn/fluid_cn/cpu_places_cn.rst index 973547e0adc5f082dbb1c3edf29681f7dc15e2fe..124973fc786ad84108f2478809a735e6ce45a081 100644 --- a/doc/fluid/api_cn/fluid_cn/cpu_places_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/cpu_places_cn.rst @@ -5,6 +5,9 @@ cpu_places .. py:function:: paddle.fluid.cpu_places(device_count=None) + + + 该接口创建 ``device_count`` 个 ``fluid.CPUPlace`` 对象,并返回所创建的对象列表。 如果 ``device_count`` 为 ``None``,则设备数目将由环境变量 ``CPU_NUM`` 确定。如果未设置 ``CPU_NUM`` 环境变量,则设备数目会默认设为1,也就是说, ``CPU_NUM=1``。 diff --git a/doc/fluid/api_cn/fluid_cn/create_lod_tensor_cn.rst b/doc/fluid/api_cn/fluid_cn/create_lod_tensor_cn.rst index 1a6884d1f042237ddab5b22ac193965a3634e7c6..386b0632f1a0256e8cad62c2143a05c7684ded1d 100644 --- a/doc/fluid/api_cn/fluid_cn/create_lod_tensor_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/create_lod_tensor_cn.rst @@ -6,6 +6,9 @@ create_lod_tensor .. py:function:: paddle.fluid.create_lod_tensor(data, recursive_seq_lens, place) + + + 从一个numpy数组、list或LoDTensor创建一个新的LoDTensor。 具体实现方法如下: diff --git a/doc/fluid/api_cn/fluid_cn/create_random_int_lodtensor_cn.rst b/doc/fluid/api_cn/fluid_cn/create_random_int_lodtensor_cn.rst index ee3999183571ef62e41aaa97f0734434e22628f1..afe15fe1d4c92d7422d497cecc29ed10c2f4b14f 100644 --- a/doc/fluid/api_cn/fluid_cn/create_random_int_lodtensor_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/create_random_int_lodtensor_cn.rst @@ -4,10 +4,13 @@ create_random_int_lodtensor ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.create_random_int_lodtensor(recursive_seq_lens, base_shape, place, low, high) +:api_attr: 声明式编程模式(静态图) + + + 创建一个包含随机整数的LoDTensor。 具体实现方法如下: diff --git a/doc/fluid/api_cn/fluid_cn/cuda_pinned_places_cn.rst b/doc/fluid/api_cn/fluid_cn/cuda_pinned_places_cn.rst index d4cf156130daffd4b9fc5281a760b3b25d6b7ede..9c3955b528ce692e7c0d1ba3f6da0431080a7272 100644 --- a/doc/fluid/api_cn/fluid_cn/cuda_pinned_places_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/cuda_pinned_places_cn.rst @@ -8,6 +8,9 @@ cuda_pinned_places + + + 该接口创建 ``device_count`` 个 ``fluid.CUDAPinnedPlace`` ( fluid. :ref:`cn_api_fluid_CUDAPinnedPlace` ) 对象,并返回所创建的对象列表。 如果 ``device_count`` 为 ``None``,实际设备数目将由当前任务中使用的GPU设备数决定。用户可通过以下2种方式设置任务可用的GPU设备: diff --git a/doc/fluid/api_cn/fluid_cn/cuda_places_cn.rst b/doc/fluid/api_cn/fluid_cn/cuda_places_cn.rst index babb94512a7649449dee100a65705badf7379d62..b0294e9cb5b2df17d525a053c68005d0355bbe2e 100644 --- a/doc/fluid/api_cn/fluid_cn/cuda_places_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/cuda_places_cn.rst @@ -5,6 +5,9 @@ cuda_places .. py:function:: paddle.fluid.cuda_places(device_ids=None) + + + .. note:: 多卡任务请先使用 FLAGS_selected_gpus 环境变量设置可见的GPU设备,下个版本将会修正 CUDA_VISIBLE_DEVICES 环境变量无效的问题。 diff --git a/doc/fluid/api_cn/fluid_cn/data_cn.rst b/doc/fluid/api_cn/fluid_cn/data_cn.rst index 67f99be46b3645dccff53a734b8d506c7092d989..14a6ab6ea1d94dcdc3586417ef9c85db98783c74 100644 --- a/doc/fluid/api_cn/fluid_cn/data_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/data_cn.rst @@ -3,10 +3,12 @@ data ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.data(name, shape, dtype='float32', lod_level=0) + + + 该OP会在全局block中创建变量(Variable),该全局变量可被计算图中的算子(operator)访问。该变量可作为占位符用于数据输入。例如用执行器(Executor)feed数据进该变量 注意: diff --git a/doc/fluid/api_cn/fluid_cn/default_main_program_cn.rst b/doc/fluid/api_cn/fluid_cn/default_main_program_cn.rst index 6a2eeb42130baa1b71cc95e6faf1d50720693410..4759fafea72a09002fc9e497baeb99983b2c6218 100644 --- a/doc/fluid/api_cn/fluid_cn/default_main_program_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/default_main_program_cn.rst @@ -6,6 +6,9 @@ default_main_program .. py:function:: paddle.fluid.default_main_program() + + + 此接口可以获取当前用于存储op和variable描述信息的 ``default main program`` ``fluid.layers`` 接口中添加的op和variable会存储在 ``default main program`` 中 @@ -28,8 +31,8 @@ default_main_program import paddle.fluid as fluid #示例网络: - data = fluid.layers.data(name='image', shape=[3, 224, 224], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + data = fluid.data(name='image', shape=[None, 3, 224, 224], dtype='float32') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') conv1 = fluid.layers.conv2d(data, 4, 5, 1, act=None) bn1 = fluid.layers.batch_norm(conv1, act='relu') diff --git a/doc/fluid/api_cn/fluid_cn/default_startup_program_cn.rst b/doc/fluid/api_cn/fluid_cn/default_startup_program_cn.rst index 2c25eb00f74484682af5495e2cc386a1d67690bf..bfc247c29b6a952d59e8ac524a558f32843cb536 100644 --- a/doc/fluid/api_cn/fluid_cn/default_startup_program_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/default_startup_program_cn.rst @@ -10,6 +10,9 @@ default_startup_program + + + 该函数可以获取默认/全局 startup :ref:`cn_api_fluid_Program` (初始化启动程序)。 :ref:`_cn_api_fluid_layers` 中的函数会新建参数或 :ref:`cn_api_paddle_data_reader_reader` (读取器) 或 `NCCL `_ 句柄作为全局变量。 diff --git a/doc/fluid/api_cn/fluid_cn/device_guard_cn.rst b/doc/fluid/api_cn/fluid_cn/device_guard_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..7d369cc9740652a4d6d4c5a23ff723fdfa0dbdc4 --- /dev/null +++ b/doc/fluid/api_cn/fluid_cn/device_guard_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_fluid_device_guard: + +device_guard +------------------------------- + +**注意:该API仅支持【静态图】模式** + +.. py:function:: paddle.fluid.device_guard(device=None) + +一个用于指定OP运行设备的上下文管理器。 + +参数: + - **device** (str|None) – 指定上下文中使用的设备。它可以是'cpu'或者'gpu‘,当它被设置为'cpu'或者'gpu'时,创建在该上下文中的OP将被运行在CPUPlace或者CUDAPlace上。若设置为'gpu',同时程序运行在单卡模式下,设备的索引将与执行器的设备索引保持一致。默认值:None,在该上下文中的OP将被自动地分配设备。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + support_gpu = fluid.is_compiled_with_cuda() + place = fluid.CPUPlace() + if support_gpu: + place = fluid.CUDAPlace(0) + # if GPU is supported, the three OPs below will be automatically assigned to CUDAPlace(0) + data1 = fluid.layers.fill_constant(shape=[1, 3, 8, 8], value=0.5, dtype='float32') + data2 = fluid.layers.fill_constant(shape=[1, 3, 5, 5], value=0.5, dtype='float32') + shape = fluid.layers.shape(data2) + with fluid.device_guard("cpu"): + # Ops created here will be placed on CPUPlace + shape = fluid.layers.slice(shape, axes=[0], starts=[0], ends=[4]) + with fluid.device_guard('gpu'): + # if GPU is supported, OPs created here will be placed on CUDAPlace(0), otherwise on CPUPlace + out = fluid.layers.crop_tensor(data1, shape=shape) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + result = exe.run(fetch_list=[out]) diff --git a/doc/fluid/api_cn/fluid_cn/disable_dygraph_cn.rst b/doc/fluid/api_cn/fluid_cn/disable_dygraph_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..59dc22b7b491cf59f13fe9586f6d98bbaa86f00b --- /dev/null +++ b/doc/fluid/api_cn/fluid_cn/disable_dygraph_cn.rst @@ -0,0 +1,22 @@ +.. _cn_api_fluid_disable_dygraph: + +disable_dygraph +------------------------------- + +.. py:function:: paddle.fluid.disable_dygraph() + +该接口关闭动态图模式。 + +返回:无 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + fluid.enable_dygraph() # Now we are in dygraph mode + print(fluid.in_dygraph_mode()) # True + fluid.disable_dygraph() + print(fluid.in_dygraph_mode()) # False + diff --git a/doc/fluid/api_cn/fluid_cn/embedding_cn.rst b/doc/fluid/api_cn/fluid_cn/embedding_cn.rst index d8d5f0c356bc087f1bea6d64e8515ebe985adc51..ee70bead5caa9abd9db685f1709cae196ced9028 100644 --- a/doc/fluid/api_cn/fluid_cn/embedding_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/embedding_cn.rst @@ -3,10 +3,13 @@ embedding ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.embedding(input, size, is_sparse=False, is_distributed=False, padding_idx=None, param_attr=None, dtype='float32') +:api_attr: 声明式编程模式(静态图) + + + 该OP根据input中的id信息从embedding矩阵中查询对应embedding信息,函数会根据输入的size (vocab_size, emb_size)和dtype自动构造一个二维embedding矩阵。 输出的Tensor的shape是在输入Tensor shape的最后一维后面添加了emb_size的维度。 diff --git a/doc/fluid/api_cn/fluid_cn/enable_dygraph_cn.rst b/doc/fluid/api_cn/fluid_cn/enable_dygraph_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0df485bd856a4c70f3638db7c2f6b7470c143fe1 --- /dev/null +++ b/doc/fluid/api_cn/fluid_cn/enable_dygraph_cn.rst @@ -0,0 +1,25 @@ +.. _cn_api_fluid_enable_dygraph: + +enable_dygraph +------------------------------- + +.. py:function:: paddle.fluid.enable_dygraph(place=None) + +该接口打开动态图模式。 + +参数: + - **place** (fluid.CPUPlace 或 fluid.CUDAPlace,可选) - 执行动态图的设备数目。若为None,则设备根据paddle的编译方式决定。默认值为 ``None``。 + +返回:无 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + fluid.enable_dygraph() # Now we are in dygraph mode + print(fluid.in_dygraph_mode()) # True + fluid.disable_dygraph() + print(fluid.in_dygraph_mode()) # False + diff --git a/doc/fluid/api_cn/fluid_cn/get_flags_cn.rst b/doc/fluid/api_cn/fluid_cn/get_flags_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e0323cf1f3e52c6c927ad7944a7e26a0a051442f --- /dev/null +++ b/doc/fluid/api_cn/fluid_cn/get_flags_cn.rst @@ -0,0 +1,21 @@ +.. _cn_api_fluid_get_flags: + +get_flags +------------------------------- + +.. py:function:: paddle.fluid.get_flags(flags) +用于获取Paddle框架中环境变量FLAGS的当前值。 + +参数: + - **flags** (list|tuple|str) - 需要获取的环境变量FLAGS的名称。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + flags = ['FLAGS_eager_delete_tensor_gb', 'FLAGS_check_nan_inf'] + res = fluid.get_flags(flags) + print(res) + # {'FLAGS_eager_delete_tensor_gb': 0.0, 'FLAGS_check_nan_inf': False} diff --git a/doc/fluid/api_cn/fluid_cn/global_scope_cn.rst b/doc/fluid/api_cn/fluid_cn/global_scope_cn.rst index 7f649449c9eb386df7929b72f118b032f23fe6e0..86031eedc8c1c8f8fc9083054b189194dba6a009 100644 --- a/doc/fluid/api_cn/fluid_cn/global_scope_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/global_scope_cn.rst @@ -3,10 +3,13 @@ global_scope ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.global_scope() +:api_attr: 声明式编程模式(静态图) + + + 获取全局/默认作用域实例。很多API使用默认 ``global_scope`` ,例如 ``Executor.run`` 等。 返回:全局/默认作用域实例 diff --git a/doc/fluid/api_cn/fluid_cn/gradients_cn.rst b/doc/fluid/api_cn/fluid_cn/gradients_cn.rst index 7e2e7d4fd635de3332aea4f293951567dd66c79c..b5813a0a1c6b73aaecff4bb3939fbc3ef8d1b594 100644 --- a/doc/fluid/api_cn/fluid_cn/gradients_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/gradients_cn.rst @@ -3,10 +3,13 @@ gradients ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.gradients(targets, inputs, target_gradients=None, no_grad_set=None) +:api_attr: 声明式编程模式(静态图) + + + 将目标梯度反向传播到输入。 参数: @@ -26,7 +29,7 @@ gradients import paddle.fluid as fluid - x = fluid.layers.data(name='x', shape=[2,8,8], dtype='float32') + x = fluid.data(name='x', shape=[None,2,8,8], dtype='float32') x.stop_gradient=False y = fluid.layers.conv2d(x, 4, 1, bias_attr=False) y = fluid.layers.relu(y) diff --git a/doc/fluid/api_cn/fluid_cn/in_dygraph_mode_cn.rst b/doc/fluid/api_cn/fluid_cn/in_dygraph_mode_cn.rst index 528dac5988992ad5e5d0e32bf28e0c49af4fa134..06c960ce5c3debdc422d8098744b5e7ecaa73bb5 100644 --- a/doc/fluid/api_cn/fluid_cn/in_dygraph_mode_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/in_dygraph_mode_cn.rst @@ -5,6 +5,9 @@ in_dygraph_mode .. py:function:: paddle.fluid.in_dygraph_mode() + + + 该接口检查程序是否在动态图模式中运行。 可以通过 ``fluid.dygraph.guard`` 接口开启动态图模式。 @@ -16,11 +19,11 @@ in_dygraph_mode .. code-block:: python - from __future__ import print_function import paddle.fluid as fluid - if fluid.in_dygraph_mode(): - print('running in dygraph mode') - else: - print('not running in dygraph mode') + + fluid.enable_dygraph() # 现在进入 dygragh 模式 + print(fluid.in_dygraph_mode()) # True + fluid.disable_dygraph() + print(fluid.in_dygraph_mode()) # False diff --git a/doc/fluid/api_cn/fluid_cn/is_compiled_with_cuda_cn.rst b/doc/fluid/api_cn/fluid_cn/is_compiled_with_cuda_cn.rst index 58112a408b1d57275e69a37ca48ba1bf7e55db6f..5f2741e78783c432eb22fd82509e2f3ebf7c808e 100644 --- a/doc/fluid/api_cn/fluid_cn/is_compiled_with_cuda_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/is_compiled_with_cuda_cn.rst @@ -5,6 +5,9 @@ is_compiled_with_cuda .. py:function:: paddle.fluid.is_compiled_with_cuda() + + + 检查 ``whl`` 包是否可以被用来在GPU上运行模型 返回:支持gpu则为True,否则为False。 diff --git a/doc/fluid/api_cn/fluid_cn/load_cn.rst b/doc/fluid/api_cn/fluid_cn/load_cn.rst index f353a2457bce5f46c08ce647c6c4836df090330c..a12a65fbed8eacfcf1f17246fc3ee3001dd81c70 100644 --- a/doc/fluid/api_cn/fluid_cn/load_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/load_cn.rst @@ -5,6 +5,10 @@ load .. py:function:: paddle.fluid.load(program, model_path, executor=None, var_list=None) +:api_attr: 声明式编程模式(静态图) + + + 该接口从Program中过滤出参数和优化器信息,然后从文件中获取相应的值。 如果Program和加载的文件之间参数的维度或数据类型不匹配,将引发异常。 diff --git a/doc/fluid/api_cn/fluid_cn/load_op_library_cn.rst b/doc/fluid/api_cn/fluid_cn/load_op_library_cn.rst index e5bb133e10cf5bbf59d609f661f0c38b134fe44b..944b78dc4e9cc84a2cabc3951d3091969f3dc763 100644 --- a/doc/fluid/api_cn/fluid_cn/load_op_library_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/load_op_library_cn.rst @@ -5,6 +5,10 @@ load_op_library .. py:class:: paddle.fluid.load_op_library +:api_attr: 声明式编程模式(静态图) + + + ``load_op_library`` 用于自定义C++算子中,用来加载算子动态共享库。加载库后,注册好的算子及其Kernel实现将在PaddlePaddle主进程中可以被调用。 请注意,自定义算子的类型不能与框架中的现有算子类型相同。 参数: diff --git a/doc/fluid/api_cn/fluid_cn/memory_optimize_cn.rst b/doc/fluid/api_cn/fluid_cn/memory_optimize_cn.rst index 4b987db2c809c97859bdcc2c10cc5784489552e6..bc95cc3995e565d50cc846d613d449bb89d6e936 100644 --- a/doc/fluid/api_cn/fluid_cn/memory_optimize_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/memory_optimize_cn.rst @@ -3,9 +3,12 @@ memory_optimize ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.memory_optimize(input_program, skip_opt_set=None, print_log=False, level=0, skip_grads=True) +:api_attr: 声明式编程模式(静态图) + + + **从1.6版本开始此接口不再推荐使用,请不要在新写的代码中使用它,1.6+版本已默认开启更优的存储优化策略** diff --git a/doc/fluid/api_cn/fluid_cn/name_scope_cn.rst b/doc/fluid/api_cn/fluid_cn/name_scope_cn.rst index 7f7be417930571c326a6be93b8a051caa3266cef..bf17054ba22ba285d436886c5e7491c8447c0132 100644 --- a/doc/fluid/api_cn/fluid_cn/name_scope_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/name_scope_cn.rst @@ -3,10 +3,13 @@ name_scope ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.name_scope(prefix=None) +:api_attr: 声明式编程模式(静态图) + + + 该函数为operators生成不同的命名空间。该函数只用于调试和可视化,不建议用在其它方面。 diff --git a/doc/fluid/api_cn/fluid_cn/one_hot_cn.rst b/doc/fluid/api_cn/fluid_cn/one_hot_cn.rst index 0d36a980ebe7aa4940b98d58b81ff91d997e0594..fbf08df2176015fbe1a50ffef5da07e2a958089c 100644 --- a/doc/fluid/api_cn/fluid_cn/one_hot_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/one_hot_cn.rst @@ -5,6 +5,12 @@ one_hot .. py:function:: paddle.fluid.one_hot(input, depth, allow_out_of_range=False) +:alias_main: paddle.nn.functional.one_hot +:alias: paddle.nn.functional.one_hot,paddle.nn.functional.common.one_hot +:old_api: paddle.fluid.one_hot + + + 该OP将输入(input)中的每个id转换为一个one-hot向量,其长度为 ``depth`` ,该id对应的向量维度上的值为1,其余维度的值为0。 输出的Tensor(或LoDTensor)的shape是在输入shape的最后一维后面添加了depth的维度。 diff --git a/doc/fluid/api_cn/fluid_cn/program_guard_cn.rst b/doc/fluid/api_cn/fluid_cn/program_guard_cn.rst index 83c53aeab3e1791ca5f99fa1db623a24e465acdd..d1b9e68b08e74b52c029aa848ab5bd382cc36e9c 100644 --- a/doc/fluid/api_cn/fluid_cn/program_guard_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/program_guard_cn.rst @@ -3,10 +3,13 @@ program_guard ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.program_guard(main_program, startup_program=None) +:api_attr: 声明式编程模式(静态图) + + + 该接口应配合使用python的 ``with`` 语句来将 ``with`` block 里的算子和变量添加进指定的全局主程序(main program)和启动程序(startup program)。 ``with`` 语句块中的fluid.layers下各接口将在新的main program(主程序)中添加operators(算子)和variables(变量)。 @@ -23,7 +26,7 @@ program_guard main_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): - data = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + data = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') hidden = fluid.layers.fc(input=data, size=10, act='relu') 例如,当组的网不需要startup_program初始化各变量时,可以传入一个临时的program。 @@ -36,5 +39,5 @@ program_guard main_program = fluid.Program() # 如果您不需要关心startup program,传入一个临时值即可 with fluid.program_guard(main_program, fluid.Program()): - data = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + data = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') diff --git a/doc/fluid/api_cn/fluid_cn/release_memory_cn.rst b/doc/fluid/api_cn/fluid_cn/release_memory_cn.rst index 253401270e141585cb874aa5d02a2470c530ec3c..ef68a966501630d8a29aadee6622f2e16bfca447 100644 --- a/doc/fluid/api_cn/fluid_cn/release_memory_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/release_memory_cn.rst @@ -3,8 +3,11 @@ release_memory ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.release_memory(input_program, skip_opt_set=None) +:api_attr: 声明式编程模式(静态图) + + + **从1.6版本开始此接口不再推荐使用,请不要在新写的代码中使用它,1.6+版本已默认开启更优的存储优化策略** diff --git a/doc/fluid/api_cn/fluid_cn/require_version_cn.rst b/doc/fluid/api_cn/fluid_cn/require_version_cn.rst index bcaa95f2baa853b55fbec35863c06018f1e22b08..19f14cb37fe9adc927141d647e61ddfa843bc3d2 100644 --- a/doc/fluid/api_cn/fluid_cn/require_version_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/require_version_cn.rst @@ -4,6 +4,9 @@ require_version ------------------------------- .. py:function:: paddle.fluid.require_version(min_version, max_version=None) + + + 该接口用于检查已安装的飞桨版本是否介于[``min_version``, ``max_version``]之间(包含 ``min_version`` 和 ``max_version`` ),如果已安装的版本低于 ``min_version`` 或者高于 ``max_version`` ,将会抛出异常。该接口无返回值。 参数: diff --git a/doc/fluid/api_cn/fluid_cn/save_cn.rst b/doc/fluid/api_cn/fluid_cn/save_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a15b23f2b5b1b798943dcb6d7ef49ce8fac229f1 --- /dev/null +++ b/doc/fluid/api_cn/fluid_cn/save_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_fluid_save: + +save +------------------------------- + + +.. py:function:: paddle.fluid.save(program, model_path) + +:api_attr: 声明式编程模式(静态图) +:alias_main: paddle.save +:alias: paddle.save,paddle.tensor.save,paddle.tensor.io.save +:old_api: paddle.fluid.save + + + +该接口将传入的参数、优化器信息和网络描述保存到 ``model_path`` 。 + +参数包含所有的可训练 :ref:`cn_api_fluid_Variable` ,将保存到后缀为 ``.pdparams`` 的文件中。 + +优化器信息包含优化器使用的所有变量。对于Adam优化器,包含beta1、beta2、momentum等。 +所有信息将保存到后缀为 ``.pdopt`` 的文件中。(如果优化器没有需要保存的变量(如sgd),则不会生成)。 + +网络描述是程序的描述。它只用于部署。描述将保存到后缀为 ``.pdmodel`` 的文件中。 + +参数: + - **program** ( :ref:`cn_api_fluid_Program` ) – 要保存的Program。 + - **model_path** (str) – 保存program的文件前缀。格式为 ``目录名称/文件前缀``。如果文件前缀为空字符串,会引发异常。 + +返回: 无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + x = fluid.data(name="x", shape=[10, 10], dtype='float32') + y = fluid.layers.fc(x, 10) + z = fluid.layers.fc(y, 10) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + fluid.save(fluid.default_main_program(), "./test_path") + + + + + + + diff --git a/doc/fluid/api_cn/fluid_cn/scope_guard_cn.rst b/doc/fluid/api_cn/fluid_cn/scope_guard_cn.rst index 2bbbe08642c4303c4399db5f1c34aab9904e3280..df0566e1b7c3e4931bf69e3734773e4a333f4d57 100644 --- a/doc/fluid/api_cn/fluid_cn/scope_guard_cn.rst +++ b/doc/fluid/api_cn/fluid_cn/scope_guard_cn.rst @@ -3,10 +3,13 @@ scope_guard ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.scope_guard(scope) +:api_attr: 声明式编程模式(静态图) + + + 该接口通过 python 的 ``with`` 语句切换作用域(scope)。 作用域记录了变量名和变量 ( :ref:`api_guide_Variable` ) 之间的映射关系,类似于编程语言中的大括号。 diff --git a/doc/fluid/api_cn/fluid_cn/set_flags_cn.rst b/doc/fluid/api_cn/fluid_cn/set_flags_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a4f6fe1cd02bd2a691059b6732a04e757ab0304e --- /dev/null +++ b/doc/fluid/api_cn/fluid_cn/set_flags_cn.rst @@ -0,0 +1,18 @@ +.. _cn_api_fluid_set_flags: + +set_flags +------------------------------- + +.. py:function:: paddle.fluid.set_flags(flags) +用于设置Paddle框架中环境变量FLAGS的值。 + +参数: + - **flags** (dict) - 包含想要设置的环境变量FLAGS的名称和值的字典。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + + fluid.set_flags({'FLAGS_eager_delete_tensor_gb': 1.0}) diff --git a/doc/fluid/api_cn/fluid_cn/set_global_initializer_cn.rst b/doc/fluid/api_cn/fluid_cn/set_global_initializer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..023629cf753daf5ecfb29a1b1984fbd184604bc4 --- /dev/null +++ b/doc/fluid/api_cn/fluid_cn/set_global_initializer_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_fluid_set_global_initializer: + +set_global_initializer +------------------------------- + +.. py:function:: paddle.fluid.set_global_initializer(weight_init, bias_init=None) + +该API用于设置Paddle框架中全局的参数初始化方法。该API只对位于其后的代码生效。 + +模型参数为模型中的weight和bias统称,在fluid中对应fluid.Parameter类,继承自fluid.Variable,是一种可持久化的variable。 +该API的设置仅对模型参数生效,对通过 :ref:`cn_api_fluid_layers_create_global_var` 、 :ref:`cn_api_fluid_layers_create_tensor` 等API创建的变量不会生效。 + +如果创建网络层时还通过 ``param_attr`` 、 ``bias_attr`` 设置了初始化方式,这里的全局设置将不会生效,因为其优先级更低。 + +参数: + - **weight_init** (Initializer) - 设置框架的全局的weight参数初始化方法。 + - **bias_init** (Initializer,可选) - 设置框架的全局的bias参数初始化方法。默认:None。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + fluid.set_global_initializer(fluid.initializer.Uniform(), fluid.initializer.Constant()) + x = fluid.data(name="x", shape=[1, 3, 32, 32]) + + # conv1的weight参数是通过Uniform来初始化 + # conv1的bias参数是通过Constant来初始化 + conv1 = fluid.layers.conv2d(x, 5, 3) + + # 如果同时设置了param_attr/bias_attr, 全局初始化将不会生效 + # conv2的weight参数是通过Xavier来初始化 + # conv2的bias参数是通过Normal来初始化 + conv2 = fluid.layers.conv2d(conv1, 5, 3, + param_attr=fluid.initializer.Xavier(), + bias_attr=fluid.initializer.Normal()) + + # 取消全局参数初始化的设置 + fluid.set_global_initializer(None) \ No newline at end of file diff --git a/doc/fluid/api_cn/framework_cn.rst b/doc/fluid/api_cn/framework_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..54cf4aa250eb480bcce81f7cb0a93a8b49df264e --- /dev/null +++ b/doc/fluid/api_cn/framework_cn.rst @@ -0,0 +1,39 @@ +======================= +paddle.framework +======================= + + + + +.. toctree:: + :maxdepth: 1 + + framework_cn/append_backward_cn.rst + framework_cn/BuildStrategy_cn.rst + framework_cn/CompiledProgram_cn.rst + framework_cn/CPUPlace_cn.rst + framework_cn/create_global_var_cn.rst + framework_cn/create_parameter_cn.rst + framework_cn/CUDAPinnedPlace_cn.rst + framework_cn/CUDAPlace_cn.rst + framework_cn/default_main_program_cn.rst + framework_cn/default_startup_program_cn.rst + framework_cn/ExecutionStrateg y_cn.rst + framework_cn/Executor_cn.rst + framework_cn/get_default_dtype_cn.rst + framework_cn/global_scope_cn.rst + framework_cn/gradients_cn.rst + framework_cn/manual_seed_cn.rst + framework_cn/get_cuda_rng_state_cn.rst + framework_cn/set_cuda_rng_state_cn.rst + framework_cn/name_scope_cn.rst + framework_cn/ParallelExecutor_cn.rst + framework_cn/ParamAttr_cn.rst + framework_cn/Print_cn.rst + framework_cn/Program_cn.rst + framework_cn/program_guard_cn.rst + framework_cn/py_func_cn.rst + framework_cn/scope_guard_cn.rst + framework_cn/set_default_dtype_cn.rst + framework_cn/Variable_cn.rst + framework_cn/WeightNormParamAttr_cn.rst diff --git a/doc/fluid/api_cn/framework_cn/BuildStrategy_cn.rst b/doc/fluid/api_cn/framework_cn/BuildStrategy_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..55d6084d53c71728e7dac4c1c56e913c828b2b5d --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/BuildStrategy_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_framework_cn_BuildStrategy: + +BuildStrategy +------------------------------- +:doc_source: paddle.fluid.compiler.BuildStrategy + + diff --git a/doc/fluid/api_cn/framework_cn/CPUPlace_cn.rst b/doc/fluid/api_cn/framework_cn/CPUPlace_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fbbe61cb7bee6054bc4416bbfca77b1ea9bde803 --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/CPUPlace_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_framework_cn_CPUPlace: + +CPUPlace +------------------------------- +:doc_source: paddle.fluid.core.CPUPlace + + diff --git a/doc/fluid/api_cn/framework_cn/CUDAPinnedPlace_cn.rst b/doc/fluid/api_cn/framework_cn/CUDAPinnedPlace_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..49e697a6d9a940ffb9c21695fc43143bf4fe531d --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/CUDAPinnedPlace_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_framework_cn_CUDAPinnedPlace: + +CUDAPinnedPlace +------------------------------- +:doc_source: paddle.fluid.core.CUDAPinnedPlace + + diff --git a/doc/fluid/api_cn/framework_cn/CUDAPlace_cn.rst b/doc/fluid/api_cn/framework_cn/CUDAPlace_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d04858456cbc3e47d492ff1ef6126f1425ce3076 --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/CUDAPlace_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_framework_cn_CUDAPlace: + +CUDAPlace +------------------------------- +:doc_source: paddle.fluid.core.CUDAPlace + + diff --git a/doc/fluid/api_cn/framework_cn/CompiledProgram_cn.rst b/doc/fluid/api_cn/framework_cn/CompiledProgram_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4332e00c7dad9a217401e22a1fa5cfb42817fe08 --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/CompiledProgram_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_framework_cn_CompiledProgram: + +CompiledProgram +------------------------------- +:doc_source: paddle.fluid.compiler.CompiledProgram + + diff --git a/doc/fluid/api_cn/framework_cn/ExecutionStrateg y_cn.rst b/doc/fluid/api_cn/framework_cn/ExecutionStrateg y_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..889c1c1ecd3f03b9a5ce00715e87c76eb4ed191f --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/ExecutionStrateg y_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_framework_cn_ExecutionStrateg y: + +ExecutionStrateg y +------------------------------- +:doc_source: paddle.fluid.compiler.ExecutionStrateg y + + diff --git a/doc/fluid/api_cn/framework_cn/Executor_cn.rst b/doc/fluid/api_cn/framework_cn/Executor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9597fcd5d704565e1caa32588bab0c3a52ca719f --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/Executor_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_framework_cn_Executor: + +Executor +------------------------------- +:doc_source: paddle.fluid.executor.Executor + + diff --git a/doc/fluid/api_cn/framework_cn/ParallelExecutor_cn.rst b/doc/fluid/api_cn/framework_cn/ParallelExecutor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1fe55329fbfd3e67a47071271494600e79bc7f5c --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/ParallelExecutor_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_framework_cn_ParallelExecutor: + +ParallelExecutor +------------------------------- +:doc_source: paddle.fluid.parallel_executor.ParallelExecutor + + diff --git a/doc/fluid/api_cn/framework_cn/ParamAttr_cn.rst b/doc/fluid/api_cn/framework_cn/ParamAttr_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fdafdf1514c76de3a5c2946cdaeb1ae608cd97e5 --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/ParamAttr_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_framework_cn_ParamAttr: + +ParamAttr +------------------------------- +:doc_source: paddle.fluid.param_attr.ParamAttr + + diff --git a/doc/fluid/api_cn/framework_cn/Print_cn.rst b/doc/fluid/api_cn/framework_cn/Print_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a2be923d06a0d52d151f194ce7b07305e88bb3f2 --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/Print_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_framework_cn_Print: + +Print +------------------------------- +:doc_source: paddle.fluid.layers.control_flow.Print + + diff --git a/doc/fluid/api_cn/framework_cn/Program_cn.rst b/doc/fluid/api_cn/framework_cn/Program_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..74c50c89d8291c1da510d747f8c150bc96e309db --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/Program_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_framework_cn_Program: + +Program +------------------------------- +:doc_source: paddle.fluid.framework.Program + + diff --git a/doc/fluid/api_cn/framework_cn/Variable_cn.rst b/doc/fluid/api_cn/framework_cn/Variable_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2f9e6e06072bc65c320a4d2beb5cb4f1c46bc6d3 --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/Variable_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_framework_cn_Variable: + +Variable +------------------------------- +:doc_source: paddle.fluid.framework.Variable + + diff --git a/doc/fluid/api_cn/framework_cn/WeightNormParamAttr_cn.rst b/doc/fluid/api_cn/framework_cn/WeightNormParamAttr_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a1d4d7fe0c01b806f1fc059c5f6db2113b7d1391 --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/WeightNormParamAttr_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_framework_cn_WeightNormParamAttr: + +WeightNormParamAttr +------------------------------- +:doc_source: paddle.fluid.param_attr.WeightNormParamAttr + + diff --git a/doc/fluid/api_cn/framework_cn/append_backward_cn.rst b/doc/fluid/api_cn/framework_cn/append_backward_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cd799f1d60b8d86a05fc5ab3cf1a32c824b11485 --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/append_backward_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_framework_cn_append_backward: + +append_backward +------------------------------- +:doc_source: paddle.fluid.backward.append_backward + + diff --git a/doc/fluid/api_cn/framework_cn/create_global_var_cn.rst b/doc/fluid/api_cn/framework_cn/create_global_var_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2524a5f56870cd1d398e363f3210f4eab9628a8b --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/create_global_var_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_framework_cn_create_global_var: + +create_global_var +------------------------------- +:doc_source: paddle.fluid.layers.tensor.create_global_var + + diff --git a/doc/fluid/api_cn/framework_cn/create_parameter_cn.rst b/doc/fluid/api_cn/framework_cn/create_parameter_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5bf4989e63ec1474c4e411415b0d8e1834740301 --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/create_parameter_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_framework_cn_create_parameter: + +create_parameter +------------------------------- +:doc_source: paddle.fluid.layers.create_parameter + + diff --git a/doc/fluid/api_cn/framework_cn/default_main_program_cn.rst b/doc/fluid/api_cn/framework_cn/default_main_program_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..823b70c49cd2d91a3ce0e190293ec3d067635eb1 --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/default_main_program_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_framework_cn_default_main_program: + +default_main_program +------------------------------- +:doc_source: paddle.fluid.framework.default_main_program + + diff --git a/doc/fluid/api_cn/framework_cn/default_startup_program_cn.rst b/doc/fluid/api_cn/framework_cn/default_startup_program_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..50dfc93c9e5b6ae2c33596567bed24991337a7ff --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/default_startup_program_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_framework_cn_default_startup_program: + +default_startup_program +------------------------------- +:doc_source: paddle.fluid.framework.default_startup_program + + diff --git a/doc/fluid/api_cn/framework_cn/get_cuda_rng_state_cn.rst b/doc/fluid/api_cn/framework_cn/get_cuda_rng_state_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..35d76080ff458cfaed76a32fc594c2dedd8a033d --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/get_cuda_rng_state_cn.rst @@ -0,0 +1,24 @@ +.. _cn_api_paddle_framework_get_cuda_rng_state: + +get_cuda_rng_state +------------------------------- + +.. py:function:: paddle.framework.get_cuda_rng_state() + + +获取cuda随机数生成器的状态信息 + + +参数: + + 无 + +返回: + GeneratorState:对象 + +**代码示例**: + +.. code-block:: python + + import paddle + sts = paddle.get_cuda_rng_state() diff --git a/doc/fluid/api_cn/framework_cn/get_default_dtype_cn.rst b/doc/fluid/api_cn/framework_cn/get_default_dtype_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cedfe95fb87a30c2038e33484298affc4439c047 --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/get_default_dtype_cn.rst @@ -0,0 +1,23 @@ +.. _cn_api_paddle_framework_get_default_dtype: + +get_default_dtype +------------------------------- + +.. py:function:: paddle.get_default_dtype() + + +得到当前全局的dtype。 该值初始是float32。 + + +参数: + + 无 + +返回: string,这个全局dtype仅支持float16、float32、float64 + +**代码示例**: + +.. code-block:: python + + import paddle + paddle.get_default_dtype() diff --git a/doc/fluid/api_cn/framework_cn/global_scope_cn.rst b/doc/fluid/api_cn/framework_cn/global_scope_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fa8317ccd449bfcacbfc399ad2b6139607ff3ae9 --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/global_scope_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_framework_cn_global_scope: + +global_scope +------------------------------- +:doc_source: paddle.fluid.executor.global_scope + + diff --git a/doc/fluid/api_cn/framework_cn/gradients_cn.rst b/doc/fluid/api_cn/framework_cn/gradients_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4303d5753a28bac6c62136cfd29d1d5a0decffc5 --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/gradients_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_framework_cn_gradients: + +gradients +------------------------------- +:doc_source: paddle.fluid.backward.gradients + + diff --git a/doc/fluid/api_cn/framework_cn/manual_seed_cn.rst b/doc/fluid/api_cn/framework_cn/manual_seed_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7ddf88f632df1a1c4fd6aea961b9cf30d75c682c --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/manual_seed_cn.rst @@ -0,0 +1,24 @@ +.. _cn_api_paddle_framework_manual_seed: + +manual_seed +------------------------------- + +.. py:function:: paddle.framework.manual_seed(seed) + + +设置全局默认generator的随机种子。 + + +参数: + + - **seed** (int) - 要设置的的随机种子,推荐使用较大的整数。 + +返回: + Generator:全局默认generator对象。 + +**代码示例**: + +.. code-block:: python + + import paddle + paddle.manual_seed(102) diff --git a/doc/fluid/api_cn/framework_cn/name_scope_cn.rst b/doc/fluid/api_cn/framework_cn/name_scope_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fa537fa5f86529e46de1a9d8c6aa318fe4a0acc0 --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/name_scope_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_framework_cn_name_scope: + +name_scope +------------------------------- +:doc_source: paddle.fluid.framework.name_scope + + diff --git a/doc/fluid/api_cn/framework_cn/program_guard_cn.rst b/doc/fluid/api_cn/framework_cn/program_guard_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a472d59f9d11e9ae00171c7cbe631d56e4ffdcca --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/program_guard_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_framework_cn_program_guard: + +program_guard +------------------------------- +:doc_source: paddle.fluid.framework.program_guard + + diff --git a/doc/fluid/api_cn/framework_cn/py_func_cn.rst b/doc/fluid/api_cn/framework_cn/py_func_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..369601410f3f6bbba7948aca2c65dc17f2a27e34 --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/py_func_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_framework_cn_py_func: + +py_func +------------------------------- +:doc_source: paddle.fluid.layers.nn.py_func + + diff --git a/doc/fluid/api_cn/framework_cn/scope_guard_cn.rst b/doc/fluid/api_cn/framework_cn/scope_guard_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..570e2aef328233a268931a141c04c0d540924e0f --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/scope_guard_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_framework_cn_scope_guard: + +scope_guard +------------------------------- +:doc_source: paddle.fluid.executor.scope_guard + + diff --git a/doc/fluid/api_cn/framework_cn/set_cuda_rng_state_cn.rst b/doc/fluid/api_cn/framework_cn/set_cuda_rng_state_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e63225b81eba139c52a9fdb2fecc54ab6ede2e93 --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/set_cuda_rng_state_cn.rst @@ -0,0 +1,25 @@ +.. _cn_api_paddle_framework_set_cuda_rng_state: + +set_cuda_rng_state +------------------------------- + +.. py:function:: paddle.framework.set_cuda_rng_state(state_list) + + +设置cuda随机数生成器的状态信息 + + +参数: + + - **state_list** (list [GeneratorState]) - 需要设置的随机数生成器状态信息列表,通过get_cuda_rng_state()获取。 + +返回: + 无 + +**代码示例**: + +.. code-block:: python + + import paddle + sts = paddle.get_cuda_rng_state() + paddle.set_cuda_rng_state(sts) diff --git a/doc/fluid/api_cn/framework_cn/set_default_dtype_cn.rst b/doc/fluid/api_cn/framework_cn/set_default_dtype_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9c6c6fe948acb39b526261c9ef92b8eaf4b59858 --- /dev/null +++ b/doc/fluid/api_cn/framework_cn/set_default_dtype_cn.rst @@ -0,0 +1,23 @@ +.. _cn_api_paddle_framework_set_default_dtype: + +set_default_dtype +------------------------------- + +.. py:function:: paddle.set_default_dtype(d) + + +设置默认的全局dtype。 默认的全局dtype最初是float32。 + + +参数: + + - **d** (string|np.dtype) - 设为默认值的dtype。 它仅支持float16,float32和float64。 + +返回: 无 + +**代码示例**: + +.. code-block:: python + + import paddle + paddle.set_default_dtype("float32") diff --git a/doc/fluid/api_cn/gen_index.py b/doc/fluid/api_cn/gen_index.py index 35c33d0bfb128ac92ec0c77f93cc4433facb1cb2..4c97004d53137e4c5faa6dd58d5777e9e5106d64 100644 --- a/doc/fluid/api_cn/gen_index.py +++ b/doc/fluid/api_cn/gen_index.py @@ -14,8 +14,20 @@ API Reference ''') file_object.write(' ../api_guides/index_cn.rst'+'\n') + + file_names = [] + file_names = glob.glob("*.rst") + + for file_name in sorted(file_names): + with open(file_name, 'r')as f: + for i in range(2): + line = f.readline().strip() + if line.find('paddle.') != -1: + file_object.write(' '+file_name + "\n") + file_names.remove(file_name) + file_object.write(' fluid_cn.rst'+'\n') - for file_name in sorted(glob.glob("*.rst")): + for file_name in sorted(file_names): if file_name != 'index.rst' and file_name != 'index_cn.rst' and file_name != 'fluid_cn.rst': file_object.write(' '+file_name + "\n") file_object.close( ) diff --git a/doc/fluid/api_cn/imperative_cn.rst b/doc/fluid/api_cn/imperative_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0f99f1c8f7decf504eb675c70b8a81c2715cf6db --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn.rst @@ -0,0 +1,31 @@ +======================= +paddle.imperative +======================= + +.. toctree:: + :maxdepth: 1 + + imperative_cn/CosineDecay_cn.rst + imperative_cn/DataParallel_cn.rst + imperative_cn/declarative_cn.rst + imperative_cn/enabled_cn.rst + imperative_cn/ExponentialDecay_cn.rst + imperative_cn/grad_cn.rst + imperative_cn/guard_cn.rst + imperative_cn/InverseTimeDecay_cn.rst + imperative_cn/jit_cn.rst + imperative_cn/load_cn.rst + imperative_cn/load_dygraph_cn.rst + imperative_cn/NaturalExpDecay_cn.rst + imperative_cn/no_grad_cn.rst + imperative_cn/NoamDecay_cn.rst + imperative_cn/ParallelEnv_cn.rst + imperative_cn/PiecewiseDecay_cn.rst + imperative_cn/PolynomialDecay_cn.rst + imperative_cn/prepare_context_cn.rst + imperative_cn/ProgramTranslator_cn.rst + imperative_cn/save_cn.rst + imperative_cn/save_dygraph_cn.rst + imperative_cn/to_variable_cn.rst + imperative_cn/TracedLayer_cn.rst + imperative_cn/TranslatedLayer_cn.rst diff --git a/doc/fluid/api_cn/imperative_cn/CosineDecay_cn.rst b/doc/fluid/api_cn/imperative_cn/CosineDecay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bbfb9d267d5a95ecba6fa1448539e5147e151a50 --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/CosineDecay_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_imperative_cn_CosineDecay: + +CosineDecay +------------------------------- +:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.CosineDecay + + diff --git a/doc/fluid/api_cn/imperative_cn/DataParallel_cn.rst b/doc/fluid/api_cn/imperative_cn/DataParallel_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6fc3900aa741404a9efc384ea511ebd60db81576 --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/DataParallel_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_imperative_cn_DataParallel: + +DataParallel +------------------------------- +:doc_source: paddle.fluid.dygraph.parallel.DataParallel + + diff --git a/doc/fluid/api_cn/imperative_cn/ExponentialDecay_cn.rst b/doc/fluid/api_cn/imperative_cn/ExponentialDecay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6caba0a407430f2696ce40c267de8b5c3cf314d8 --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/ExponentialDecay_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_imperative_cn_ExponentialDecay: + +ExponentialDecay +------------------------------- +:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.ExponentialDecay + + diff --git a/doc/fluid/api_cn/imperative_cn/InverseTimeDecay_cn.rst b/doc/fluid/api_cn/imperative_cn/InverseTimeDecay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c08fb8be19228648219de9d346acc3b817febff8 --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/InverseTimeDecay_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_imperative_cn_InverseTimeDecay: + +InverseTimeDecay +------------------------------- +:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.InverseTimeDecay + + diff --git a/doc/fluid/api_cn/imperative_cn/NaturalExpDecay_cn.rst b/doc/fluid/api_cn/imperative_cn/NaturalExpDecay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7c1cae72f52f35651cf38b6bcf7ff67daf85f696 --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/NaturalExpDecay_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_imperative_cn_NaturalExpDecay: + +NaturalExpDecay +------------------------------- +:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.NaturalExpDecay + + diff --git a/doc/fluid/api_cn/imperative_cn/NoamDecay_cn.rst b/doc/fluid/api_cn/imperative_cn/NoamDecay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..12a4c8a26d6dbd86447b83b110e83644670d37f7 --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/NoamDecay_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_imperative_cn_NoamDecay: + +NoamDecay +------------------------------- +:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.NoamDecay + + diff --git a/doc/fluid/api_cn/imperative_cn/ParallelEnv_cn.rst b/doc/fluid/api_cn/imperative_cn/ParallelEnv_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a6e6717d78b350fd81e8b1defa50f48c7ae8c05e --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/ParallelEnv_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_imperative_cn_ParallelEnv: + +ParallelEnv +------------------------------- +:doc_source: paddle.fluid.dygraph.parallel.ParallelEnv + + diff --git a/doc/fluid/api_cn/imperative_cn/PiecewiseDecay_cn.rst b/doc/fluid/api_cn/imperative_cn/PiecewiseDecay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fb3ace6068630a572bc711c9331007b762ccc503 --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/PiecewiseDecay_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_imperative_cn_PiecewiseDecay: + +PiecewiseDecay +------------------------------- +:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.PiecewiseDecay + + diff --git a/doc/fluid/api_cn/imperative_cn/PolynomialDecay_cn.rst b/doc/fluid/api_cn/imperative_cn/PolynomialDecay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9d2ab31fcff95c385f5b1556006c657f7704ed51 --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/PolynomialDecay_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_imperative_cn_PolynomialDecay: + +PolynomialDecay +------------------------------- +:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.PolynomialDecay + + diff --git a/doc/fluid/api_cn/imperative_cn/ProgramTranslator_cn.rst b/doc/fluid/api_cn/imperative_cn/ProgramTranslator_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3ff078a775b7a42c47078e5d7bc7c1b41fcb1a02 --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/ProgramTranslator_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_imperative_cn_ProgramTranslator: + +ProgramTranslator +------------------------------- +:doc_source: paddle.fluid.dygraph.ProgramTranslator + + diff --git a/doc/fluid/api_cn/imperative_cn/TracedLayer_cn.rst b/doc/fluid/api_cn/imperative_cn/TracedLayer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..de2697d6d9f068cb1870de9f23f4d8b502e2568d --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/TracedLayer_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_imperative_cn_TracedLayer: + +TracedLayer +------------------------------- +:doc_source: paddle.fluid.dygraph.jit.TracedLayer + + diff --git a/doc/fluid/api_cn/imperative_cn/TranslatedLayer_cn.rst b/doc/fluid/api_cn/imperative_cn/TranslatedLayer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..546094acf65c4fb30341d60ea157576601ae8766 --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/TranslatedLayer_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_imperative_TranslatedLayer: + +TranslatedLayer +------------------------------- +:doc_source: paddle.fluid.dygraph.io.TranslatedLayer + + diff --git a/doc/fluid/api_cn/imperative_cn/declarative_cn.rst b/doc/fluid/api_cn/imperative_cn/declarative_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a72beb42f9f20921e44f27fba6e799a8091575fe --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/declarative_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_imperative_cn_declarative: + +declarative +------------------------------- +:doc_source: paddle.fluid.dygraph.jit.declarative + + diff --git a/doc/fluid/api_cn/imperative_cn/enabled_cn.rst b/doc/fluid/api_cn/imperative_cn/enabled_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a127f528edd20149f51f9b0ba6954aa0a2bfe661 --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/enabled_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_imperative_cn_enabled: + +enabled +------------------------------- +:doc_source: paddle.fluid.dygraph.base.enabled + + diff --git a/doc/fluid/api_cn/imperative_cn/grad_cn.rst b/doc/fluid/api_cn/imperative_cn/grad_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e3d75a0ee5c372a1d9d483eab34bae932b333d48 --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/grad_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_imperative_cn_grad: + +grad +------------------------------- +:doc_source: paddle.fluid.dygraph.base.grad + + diff --git a/doc/fluid/api_cn/imperative_cn/guard_cn.rst b/doc/fluid/api_cn/imperative_cn/guard_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..93db3250064b355d2f9f712202b11ffc6bffe518 --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/guard_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_imperative_cn_guard: + +guard +------------------------------- +:doc_source: paddle.fluid.dygraph.base.guard + + diff --git a/doc/fluid/api_cn/imperative_cn/jit_cn.rst b/doc/fluid/api_cn/imperative_cn/jit_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..baa8019a5285b3dd667fec3921097c54e37b45ee --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/jit_cn.rst @@ -0,0 +1,12 @@ +=== +jit +=== + +.. toctree:: + :maxdepth: 1 + + jit_cn/save_cn.rst + jit_cn/set_code_level_cn.rst + jit_cn/set_verbosity.rst + jit_cn/load_cn.rst + jit_cn/SaveLoadConfig_cn.rst diff --git a/doc/fluid/api_cn/imperative_cn/jit_cn/SaveLoadConfig_cn.rst b/doc/fluid/api_cn/imperative_cn/jit_cn/SaveLoadConfig_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..154e987bdb3ed8d3d86858b13b797b897b8eed62 --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/jit_cn/SaveLoadConfig_cn.rst @@ -0,0 +1,5 @@ +.. _cn_api_imperative_jit_SaveLoadConfig: + +SaveLoadConfig +------------------------------- +:doc_source: paddle.fluid.dygraph.jit.SaveLoadConfig \ No newline at end of file diff --git a/doc/fluid/api_cn/imperative_cn/jit_cn/load_cn.rst b/doc/fluid/api_cn/imperative_cn/jit_cn/load_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a326fa58f1bf3634a498113bccac46627df0d8e1 --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/jit_cn/load_cn.rst @@ -0,0 +1,5 @@ +.. _cn_api_imperative_jit_load: + +load +------------------------------- +:doc_source: paddle.fluid.dygraph.jit.load \ No newline at end of file diff --git a/doc/fluid/api_cn/imperative_cn/jit_cn/save_cn.rst b/doc/fluid/api_cn/imperative_cn/jit_cn/save_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0c36588fa37794a81db4c534e2f54ad8aaddd66f --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/jit_cn/save_cn.rst @@ -0,0 +1,5 @@ +.. _cn_api_imperative_jit_save: + +save +------------------------------- +:doc_source: paddle.fluid.dygraph.jit.save \ No newline at end of file diff --git a/doc/fluid/api_cn/imperative_cn/jit_cn/set_code_level_cn.rst b/doc/fluid/api_cn/imperative_cn/jit_cn/set_code_level_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e57014212735b9c3e7610ad9221e96439960f90b --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/jit_cn/set_code_level_cn.rst @@ -0,0 +1,5 @@ +.. _cn_api_imperative_jit_set_code_level: + +set_code_level +------------------------------- +:doc_source: paddle.fluid.dygraph.jit.set_code_level \ No newline at end of file diff --git a/doc/fluid/api_cn/imperative_cn/jit_cn/set_verbosity_cn.rst b/doc/fluid/api_cn/imperative_cn/jit_cn/set_verbosity_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a20b6c67dd4472da83ecabcbaad519cabce0f3c9 --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/jit_cn/set_verbosity_cn.rst @@ -0,0 +1,5 @@ +.. _cn_api_imperative_jit_set_verbosity: + +set_verbosity +------------------------------- +:doc_source: paddle.fluid.dygraph.jit.set_verbosity \ No newline at end of file diff --git a/doc/fluid/api_cn/imperative_cn/load_cn.rst b/doc/fluid/api_cn/imperative_cn/load_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3c078604d2585703458260c82ee8dd9a2f2f5a65 --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/load_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_imperative_cn_load: + +load +------------------------------- +:doc_source: paddle.fluid.dygraph.checkpoint.load_dygraph + + diff --git a/doc/fluid/api_cn/imperative_cn/no_grad_cn.rst b/doc/fluid/api_cn/imperative_cn/no_grad_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..989ffa9f24f2ddac08a2365b71fd9929e11d2728 --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/no_grad_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_imperative_cn_no_grad: + +no_grad +------------------------------- +:doc_source: paddle.fluid.dygraph.base.no_grad + + diff --git a/doc/fluid/api_cn/imperative_cn/prepare_context_cn.rst b/doc/fluid/api_cn/imperative_cn/prepare_context_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..27a531db0286e3803478ecc734a0b565261882d4 --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/prepare_context_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_imperative_cn_prepare_context: + +prepare_context +------------------------------- +:doc_source: paddle.fluid.dygraph.parallel.prepare_context + + diff --git a/doc/fluid/api_cn/imperative_cn/save_cn.rst b/doc/fluid/api_cn/imperative_cn/save_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d515e9c076e610ff296d4e84469f37e1f738d83e --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/save_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_imperative_cn_save: + +save +------------------------------- +:doc_source: paddle.fluid.dygraph.checkpoint.save_dygraph + + diff --git a/doc/fluid/api_cn/imperative_cn/to_variable_cn.rst b/doc/fluid/api_cn/imperative_cn/to_variable_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d2557bc0b630ce4e03d2d802d041313db3068365 --- /dev/null +++ b/doc/fluid/api_cn/imperative_cn/to_variable_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_imperative_cn_to_variable: + +to_variable +------------------------------- +:doc_source: paddle.fluid.dygraph.base.to_variable + + diff --git a/doc/fluid/api_cn/incubate_cn.rst b/doc/fluid/api_cn/incubate_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3174d2d82b96b5396e67feabf572ac595fbbaee6 --- /dev/null +++ b/doc/fluid/api_cn/incubate_cn.rst @@ -0,0 +1,11 @@ +======================= +paddle.incubate +======================= + + + + +.. toctree:: + :maxdepth: 1 + + incubate_cn/hapi_cn.rst diff --git a/doc/fluid/api_cn/incubate_cn/hapi_cn.rst b/doc/fluid/api_cn/incubate_cn/hapi_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..983c10047db01ce2ad6accab9c6320b4ebdbb4fa --- /dev/null +++ b/doc/fluid/api_cn/incubate_cn/hapi_cn.rst @@ -0,0 +1,12 @@ +======================= +hapi +======================= + + + + +.. toctree:: + :maxdepth: 1 + + hapi_cn/Model_cn.rst + hapi_cn/set_device_cn.rst diff --git a/doc/fluid/api_cn/incubate_cn/hapi_cn/Model_cn.rst b/doc/fluid/api_cn/incubate_cn/hapi_cn/Model_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4e147a58a436901c93e9363cf48e62ecc434a4b7 --- /dev/null +++ b/doc/fluid/api_cn/incubate_cn/hapi_cn/Model_cn.rst @@ -0,0 +1,530 @@ +.. _cn_api_paddle_incubate_hapi_model_Model: + +Model +------------------------------- + +.. py:class:: paddle.incubate.hapi.model.Model() + + ``Model`` 对象是一个具备训练、测试、推理的神经网络。该对象同时支持静态图和动态图模式,通过 ``fluid.enable_dygraph()`` 来切换。需要注意的是,该开关需要在实例化 ``Model`` 对象之前使用。 在静态图模式下,输入需要使用 ``hapi.Input`` 来定义。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.fluid as fluid + + from paddle.incubate.hapi.model import Model, Input, set_device + from paddle.incubate.hapi.loss import CrossEntropy + from paddle.incubate.hapi.datasets import MNIST + from paddle.incubate.hapi.metrics import Accuracy + + class MyModel(Model): + def __init__(self): + super(MyModel, self).__init__() + self._fc = fluid.dygraph.Linear(784, 10, act='softmax') + def forward(self, x): + y = self._fc(x) + return y + device = set_device('cpu') + + # 切换成动态图模式,默认使用静态图模式 + fluid.enable_dygraph(device) + + model = MyModel() + optim = fluid.optimizer.SGD(learning_rate=1e-3, + parameter_list=model.parameters()) + + inputs = [Input([None, 784], 'float32', name='x')] + labels = [Input([None, 1], 'int64', name='label')] + + mnist_data = MNIST(mode='train', chw_format=False) + model.prepare(optim, + CrossEntropy(average=True), + Accuracy(), + inputs, + labels, + device=device) + model.fit(mnist_data, epochs=2, batch_size=32, verbose=1) + + +.. py:function:: train_batch(inputs, labels=None) + +在一个批次的数据上进行训练。 + +参数: + - **inputs** (list) - 1维列表,每个元素都是一批次的输入数据,数据类型为 ``numpy.ndarray``。 + - **labels** (list) - 1维列表,每个元素都是一批次的输入标签,数据类型为 ``numpy.ndarray`` 。默认值:None。 + +返回:一个列表,包含了训练损失函数的值,如果定义了评估函数,还会包含评估函数得到的指标。 + +返回类型:list + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + + from paddle.fluid.dygraph import Linear + from paddle.incubate.hapi.loss import CrossEntropy + from paddle.incubate.hapi.model import Model, Input, set_device + + class MyModel(Model): + def __init__(self): + super(MyModel, self).__init__() + self._fc = Linear(784, 10, act='softmax') + def forward(self, x): + y = self._fc(x) + return y + + device = set_device('cpu') + fluid.enable_dygraph(device) + + model = MyModel() + optim = fluid.optimizer.SGD(learning_rate=1e-3, + parameter_list=model.parameters()) + + inputs = [Input([None, 784], 'float32', name='x')] + labels = [Input([None, 1], 'int64', name='label')] + model.prepare(optim, + CrossEntropy(average=True), + inputs=inputs, + labels=labels, + device=device) + data = np.random.random(size=(4,784)).astype(np.float32) + label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64) + loss = model.train_batch([data], [label]) + print(loss) + +.. py:function:: eval_batch(inputs, labels=None) + +在一个批次的数据上进行评估。 + +参数: + - **inputs** (list) - 1维列表,每个元素都是一批次的输入数据,数据类型为 ``numpy.ndarray`` 。 + - **labels** (list) - 1维列表,每个元素都是一批次的输入标签,数据类型为 ``numpy.ndarray`` 。默认值:None。 + +返回:一个列表,包含了评估损失函数的值,如果定义了评估函数,还会包含评估函数得到的指标。 + +返回类型:list + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + + from paddle.incubate.hapi.loss import CrossEntropy + from paddle.incubate.hapi.model import Model, Input, set_device + + class MyModel(Model): + def __init__(self): + super(MyModel, self).__init__() + self._fc = fluid.dygraph.Linear(784, 10, act='softmax') + def forward(self, x): + y = self._fc(x) + return y + + device = set_device('cpu') + fluid.enable_dygraph(device) + + model = MyModel() + optim = fluid.optimizer.SGD(learning_rate=1e-3, + parameter_list=model.parameters()) + + inputs = [Input([None, 784], 'float32', name='x')] + labels = [Input([None, 1], 'int64', name='label')] + model.prepare(optim, + CrossEntropy(average=True), + inputs=inputs, + labels=labels, + device=device) + data = np.random.random(size=(4,784)).astype(np.float32) + label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64) + loss = model.eval_batch([data], [label]) + print(loss) + +.. py:function:: test_batch(inputs) + +在一个批次的数据上进行测试。 + +参数: + - **inputs** (list) - 1维列表,每个元素都是一批次的输入数据,数据类型为 ``numpy.ndarray`` 。 + +返回:一个列表,包含了模型的输出。 + +返回类型:list + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + from paddle.incubate.hapi.model import Model, Input, set_device + + class MyModel(Model): + def __init__(self): + super(MyModel, self).__init__() + self._fc = fluid.dygraph.Linear(784, 1, act='softmax') + def forward(self, x): + y = self._fc(x) + return y + + device = set_device('cpu') + fluid.enable_dygraph(device) + + model = MyModel() + inputs = [Input([None, 784], 'float32', name='x')] + model.prepare(inputs=inputs, + device=device) + data = np.random.random(size=(4,784)).astype(np.float32) + out = model.eval_batch([data]) + print(out) + +.. py:function:: save(path): + +将模型的参数和训练过程中优化器的信息保存到指定的路径。所有的模型参数都会保存到一个后缀为 ``.pdparams`` 的文件中。 +所有的优化器信息和相关参数,比如 ``Adam`` 优化器中的 ``beta1`` , ``beta2`` ,``momentum`` 等,都会被保存到后缀为 ``.pdopt`` +的文件中。 + +参数: + - **path** (str) - 保存的文件名前缀。格式如 ``dirname/file_prefix`` 或者 ``file_prefix`` 。 + +返回:None + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.incubate.hapi.model import Model, set_device + + class MyModel(Model): + def __init__(self): + super(MyModel, self).__init__() + self._fc = fluid.dygraph.Linear(784, 1, act='softmax') + def forward(self, x): + y = self._fc(x) + return y + + device = set_device('cpu') + fluid.enable_dygraph(device) + model = MyModel() + model.save('checkpoint/test') + +.. py:function:: load(path, skip_mismatch=False, reset_optimizer=False): + +从指定的文件中载入模型参数和优化器参数,如果不想恢复优化器参数信息,优化器信息文件可以不存在。 + +参数: + - **path** (str) - 保存参数或优化器信息的文件前缀。格式如 ``path.pdparams`` 或者 ``path.pdopt`` ,后者是非必要的,如果不想恢复优化器信息。 + - **skip_mismatch** (bool) - 是否需要跳过保存的模型文件中形状或名称不匹配的参数,设置为 ``False`` 时,当遇到不匹配的参数会抛出一个错误。默认值:False。 + - **reset_optimizer** (bool) - 设置为 ``True`` 时,会忽略提供的优化器信息文件。否则会载入提供的优化器信息。默认值:False。 + +返回:None + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.incubate.hapi.model import Model, set_device + + class MyModel(Model): + def __init__(self): + super(MyModel, self).__init__() + self._fc = fluid.dygraph.Linear(784, 1, act='softmax') + def forward(self, x): + y = self._fc(x) + return y + + device = set_device('cpu') + fluid.enable_dygraph(device) + model = MyModel() + model.load('checkpoint/test') + +.. py:function:: parameters(*args, **kwargs): + +返回一个包含模型所有参数的列表。 + +返回:在静态图中返回一个包含 ``Parameter`` 的列表,在动态图中返回一个包含 ``ParamBase`` 的列表。 + +**代码示例**: + +.. code-block:: python + import paddle.fluid as fluid + + from paddle.incubate.hapi.model import Model, Input, set_device + + class MyModel(Model): + def __init__(self): + super(MyModel, self).__init__() + self._fc = fluid.dygraph.Linear(20, 10, act='softmax') + def forward(self, x): + y = self._fc(x) + return y + + fluid.enable_dygraph() + model = MyModel() + params = model.parameters() + + +.. py:function:: prepare(optimizer=None, loss_function=None, metrics=None, inputs=None, labels=None, device=None): + +返回一个包含模型所有参数的列表。 + +参数: + - **optimizer** (Optimizer) - 当训练模型的,该参数必须被设定。当评估或测试的时候,该参数可以不设定。默认值:None。 + - **loss_function** (Loss) - 当训练模型的,该参数必须被设定。默认值:None。 + - **metrics** (Metric|list[Metric]) - 当该参数被设定时,所有给定的评估方法会在训练和测试时被运行,并返回对应的指标。默认值:None。 + - **inputs** (Input|list[Input]|dict) - 网络的输入,对于静态图,该参数必须给定。默认值:None。 + - **labels** (Input|list[Input]|dict) - 标签,网络的输入。对于静态图,在训练和评估时该参数必须给定。默认值:None。 + - **device** (str|fluid.CUDAPlace|fluid.CPUPlace|None) - 网络运行的设备,当不指定时,会根据环境和安装的 ``paddle`` 自动选择。默认值:None。 + +返回:None + +.. py:function:: fit(train_data=None, eval_data=None, batch_size=1, epochs=1, eval_freq=1, log_freq=10, save_dir=None, save_freq=1, verbose=2, drop_last=False, shuffle=True, num_workers=0, callbacks=None): + +训练模型。当 ``eval_data`` 给定时,会在 ``eval_freq`` 个 ``epoch`` 后进行一次评估。 + +参数: + - **train_data** (Dataset|DataLoader) - 一个可迭代的数据源,推荐给定一个 ``paddle paddle.io.Dataset`` 或 ``paddle.io.Dataloader`` 的实例。默认值:None。 + - **eval_data** (Dataset|DataLoader) - 一个可迭代的数据源,推荐给定一个 ``paddle paddle.io.Dataset`` 或 ``paddle.io.Dataloader`` 的实例。当给定时,会在每个 ``epoch`` 后都会进行评估。默认值:None。 + - **batch_size** (int) - 训练数据或评估数据的批大小,当 ``train_data`` 或 ``eval_data`` 为 ``DataLoader`` 的实例时,该参数会被忽略。默认值:1。 + - **epochs** (int) - 训练的轮数。默认值:1。 + - **eval_freq** (int) - 评估的频率,多少个 ``epoch`` 评估一次。默认值:1。 + - **log_freq** (int) - 日志打印的频率,多少个 ``step`` 打印一次日志。默认值:1。 + - **save_dir** (str|None) - 保存模型的文件夹,如果不设定,将不保存模型。默认值:None。 + - **save_freq** (int) - 保存模型的频率,多少个 ``epoch`` 保存一次模型。默认值:1。 + - **verbose** (int) - 可视化的模型,必须为0,1,2。当设定为0时,不打印日志,设定为1时,使用进度条的方式打印日志,设定为2时,一行一行地打印日志。默认值:2。 + - **drop_last** (bool) - 是否丢弃训练数据中最后几个不足设定的批次大小的数据。默认值:False。 + - **shuffle** (bool) - 是否对训练数据进行洗牌。当 ``train_data`` 为 ``DataLoader`` 的实例时,该参数会被忽略。默认值:True。 + - **num_workers** (int) - 启动子进程用于读取数据的数量。当 ``train_data`` 和 ``eval_data`` 都为 ``DataLoader`` 的实例时,该参数会被忽略。默认值:True。 + - **callbacks** (Callback|list[Callback]|None) - ``Callback`` 的一个实例或实例列表。该参数不给定时,默认会插入 ``ProgBarLogger`` 和 ``ModelCheckpoint`` 这两个实例。默认值:None。 + +返回:None + +**代码示例**: + +.. code-block:: python + + # 1. 使用Dataset训练,并设置batch_size的例子。 + import paddle.fluid as fluid + + from paddle.incubate.hapi.model import Model, Input, set_device + from paddle.incubate.hapi.loss import CrossEntropy + from paddle.incubate.hapi.metrics import Accuracy + from paddle.incubate.hapi.datasets import MNIST + from paddle.incubate.hapi.vision.models import LeNet + + dynamic = True + device = set_device('cpu') + fluid.enable_dygraph(device) if dynamic else None + + train_dataset = MNIST(mode='train') + val_dataset = MNIST(mode='test') + + inputs = [Input([None, 1, 28, 28], 'float32', name='image')] + labels = [Input([None, 1], 'int64', name='label')] + + model = LeNet() + optim = fluid.optimizer.Adam( + learning_rate=0.001, parameter_list=model.parameters()) + model.prepare( + optim, + CrossEntropy(), + Accuracy(topk=(1, 2)), + inputs=inputs, + labels=labels, + device=device) + model.fit(train_dataset, + val_dataset, + epochs=2, + batch_size=64, + save_dir='mnist_checkpoint') + + # 2. 使用Dataloader训练的例子. + + from paddle.incubate.hapi.model import Model, Input, set_device + from paddle.incubate.hapi.loss import CrossEntropy + from paddle.incubate.hapi.metrics import Accuracy + from paddle.incubate.hapi.datasets import MNIST + from paddle.incubate.hapi.vision.models import LeNet + + dynamic = True + device = set_device('cpu') + fluid.enable_dygraph(device) if dynamic else None + + train_dataset = MNIST(mode='train') + train_loader = fluid.io.DataLoader(train_dataset, + places=device, batch_size=64) + val_dataset = MNIST(mode='test') + val_loader = fluid.io.DataLoader(val_dataset, + places=device, batch_size=64) + + inputs = [Input([None, 1, 28, 28], 'float32', name='image')] + labels = [Input([None, 1], 'int64', name='label')] + + model = LeNet() + optim = fluid.optimizer.Adam( + learning_rate=0.001, parameter_list=model.parameters()) + model.prepare( + optim, + CrossEntropy(), + Accuracy(topk=(1, 2)), + inputs=inputs, + labels=labels, + device=device) + model.fit(train_loader, + val_loader, + epochs=2, + save_dir='mnist_checkpoint') + + +.. py:function:: evaluate(eval_data, batch_size=1, log_freq=10, verbose=2, num_workers=0, callbacks=None): + +评估模型。 + +参数: + - **eval_data** (Dataset|DataLoader) - 一个可迭代的数据源,推荐给定一个 ``paddle paddle.io.Dataset`` 或 ``paddle.io.Dataloader`` 的实例。默认值:None。 + - **batch_size** (int) - 训练数据或评估数据的批大小,当 ``eval_data`` 为 ``DataLoader`` 的实例时,该参数会被忽略。默认值:1。 + - **log_freq** (int) - 日志打印的频率,多少个 ``step`` 打印一次日志。默认值:1。 + - **verbose** (int) - 可视化的模型,必须为0,1,2。当设定为0时,不打印日志,设定为1时,使用进度条的方式打印日志,设定为2时,一行一行地打印日志。默认值:2。 + - **num_workers** (int) - 启动子进程用于读取数据的数量。当 ``eval_data`` 为 ``DataLoader`` 的实例时,该参数会被忽略。默认值:True。 + - **callbacks** (Callback|list[Callback]|None) - ``Callback`` 的一个实例或实例列表。该参数不给定时,默认会插入 ``ProgBarLogger`` 和 ``ModelCheckpoint`` 这两个实例。默认值:None。 + +返回:None + +**代码示例**: + +.. code-block:: python + + # declarative mode + import numpy as np + from paddle.incubate.hapi.metrics import Accuracy + from paddle.incubate.hapi.datasets import MNIST + from paddle.incubate.hapi.vision.transforms import Compose,Resize + from paddle.incubate.hapi.vision.models import LeNet + from paddle.incubate.hapi.model import Input, set_device + + + inputs = [Input([-1, 1, 28, 28], 'float32', name='image')] + labels = [Input([None, 1], 'int64', name='label')] + + val_dataset = MNIST(mode='test') + + model = LeNet() + model.prepare(metrics=Accuracy(), inputs=inputs, labels=labels) + + result = model.evaluate(val_dataset, batch_size=64) + print(result) + + # imperative mode + import paddle.fluid.dygraph as dg + place = set_device('cpu') + with dg.guard(place) as g: + model = LeNet() + model.prepare(metrics=Accuracy(), inputs=inputs, labels=labels) + + result = model.evaluate(val_dataset, batch_size=64) + print(result) + + +.. py:function:: predict(test_data, batch_size=1, num_workers=0, stack_outputs=False, callbacks=None): + +模型预测。 + +参数: + - **test_data** (Dataset|DataLoader) - 一个可迭代的数据源,推荐给定一个 ``paddle paddle.io.Dataset`` 或 ``paddle.io.Dataloader`` 的实例。默认值:None。 + - **batch_size** (int) - 训练数据或评估数据的批大小,当 ``eval_data`` 为 ``DataLoader`` 的实例时,该参数会被忽略。默认值:1。 + - **num_workers** (int) - 启动子进程用于读取数据的数量。当 ``eval_data`` 为 ``DataLoader`` 的实例时,该参数会被忽略。默认值:True。 + - **stack_outputs** (bool) - 是否将输出进行堆叠。默认值:False。 + - **callbacks** (Callback|list[Callback]|None) - ``Callback`` 的一个实例或实例列表。默认值:None。 + +返回:None + +**代码示例**: + +.. code-block:: python + + # declarative mode + import numpy as np + from paddle.incubate.hapi.metrics import Accuracy + from paddle.incubate.hapi.datasets import MNIST + from paddle.incubate.hapi.vision.transforms import Compose,Resize + from paddle.incubate.hapi.vision.models import LeNet + from paddle.incubate.hapi.model import Input, set_device + + class MnistDataset(MNIST): + def __init__(self, mode, return_label=True): + super(MnistDataset, self).__init__(mode=mode) + self.return_label = return_label + + def __getitem__(self, idx): + img = np.reshape(self.images[idx], [1, 28, 28]) + if self.return_label: + return img, np.array(self.labels[idx]).astype('int64') + return img, + + def __len__(self): + return len(self.images) + + inputs = [Input([-1, 1, 28, 28], 'float32', name='image')] + + test_dataset = MnistDataset(mode='test', return_label=False) + + model = LeNet() + model.prepare(inputs=inputs) + + result = model.predict(test_dataset, batch_size=64) + print(result) + + # imperative mode + import paddle.fluid.dygraph as dg + place = set_device('cpu') + with dg.guard(place) as g: + model = LeNet() + model.prepare(inputs=inputs) + + result = model.predict(test_dataset, batch_size=64) + print(result) + + +.. py:function:: save_inference_model(save_dir, model_filename=None, params_filename=None, model_only=False): + +模型预测。 + +参数: + - **save_dir** (str) - 保存推理模型的路径。 + - **model_filename** (str,可选) - 保存预测模型结构 ``Inference Program`` 的文件名称。若设置为None,则使用 ``__model__`` 作为默认的文件名。默认值:None。 + - **params_filename** (str,可选) - 保存预测模型所有相关参数的文件名称。若设置为None,则模型参数被保存在单独的文件中。 + - **model_only** (bool,可选) - 若为True,则只保存预测模型的网络结构,而不保存预测模型的网络参数。默认值:False。 + +返回:None + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + + from paddle.incubate.hapi.model import Model, Input + + class MyModel(Model): + def __init__(self): + super(MyModel, self).__init__() + self._fc = fluid.dygraph.Linear(784, 1, act='softmax') + def forward(self, x): + y = self._fc(x) + return y + + model = MyModel() + inputs = [Input([-1, 1, 784], 'float32', name='input')] + model.prepare(inputs=inputs) + + model.save_inference_model('checkpoint/test') \ No newline at end of file diff --git a/doc/fluid/api_cn/index_cn.rst b/doc/fluid/api_cn/index_cn.rst index 8599fb58c60845168918446808735b2cec68951d..f8e5363f412582fa92ab61661e3d992050d71961 100644 --- a/doc/fluid/api_cn/index_cn.rst +++ b/doc/fluid/api_cn/index_cn.rst @@ -2,15 +2,119 @@ API Reference ============= + + +基础API +------- + +飞桨2.0提供了新的API,可以同时支持声明式和命令式两种开发模式,比如paddle.nn.Linear,避免在两种模式下使用不同的API造成困惑。原飞桨1.x的API位于paddle.fluid目录下,其中部分组网类的API,只能用于声明式开发,比如:fluid.layers.fc,无法用于命令式开发。 + +飞桨2.0对API的目录结构进行了调整,从原来的paddle.fluid目录调整到paddle目录下,使得开发接口更加清晰,调整后的目录结构如下: + ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| 目录 | 功能和包含API | ++=====================+===========================================================================================================+ +| paddle.\* | paddle根目录下保留了常用API的别名,当前包括:paddle.tensor, paddle.framework目录下的所有API | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.tensor | 跟tensor操作相关的API,比如:创建zeros, 矩阵运算matmul, 变换concat, 计算elementwise\_add, 查找argmax等 | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.nn | 跟组网相关的API,比如:输入占位符data/Input,控制流while\_loop/cond,损失函数,卷积,LSTM等,激活函数等 | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.framework | 基础框架相关的API,比如:Variable, Program, Executor等 | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.imperative | imprerative模式专用的API,比如:to\_variable, prepare\_context等 | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.optimizer | 优化算法相关API,比如:SGD,Adagrad, Adam等 | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.metric | 评估指标计算相关的API,比如:accuracy, cos\_sim等 | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.io | 数据输入输出相关API,比如:save, load, Dataset, DataLoader等 | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.device | 设备管理相关API,比如:CPUPlace, CUDAPlace等 | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.fleet | 分布式相关API | ++---------------------+-----------------------------------------------------------------------------------------------------------+ + +同时飞桨2.0对部分Paddle +1.x版本的API进行了清理,删除了部分不再推荐使用的API,具体信息请参考Release +Note。 + + +高层API +------- + +使用飞桨进行深度学习任务的开发,整体过程包括数据处理、组网、训练、评估、模型导出、预测部署这些基本的操作。这些基本操作在不同的任务中会反复出现,使用基础API进行开发时,需要开发者重复地写这些基础操作的代码,增加了模型开发的工作量。高层API针对这些基础操作进行了封装,提供更高层的开发接口,开发者只需要关心数据处理和自定义组网,其他工作可以通过调用高层API来完成。在MNIST手写数字识别任务中,对比动态图基础API的实现方式,通过使用高层API可以减少80%的非组网类代码。 + +使用高层API的另外一个好处是,可以通过一行代码\ ``paddle.enable_imperative``\ ,切换命令式编程模式和声明式编程模式。在开发阶段,可以使用的命令式编程模式,方便调试;开发完成后,可以切换到声明式编程模式,加速训练和方便部署。兼具了命令式编程实时执行,容易调试的优点,以及声明式编程全局优化和容易部署的优点。 + +以下为高层API的一个基础示例 + +.. code:: python + + import numpy as np + import paddle + import paddle.nn.functional as F + from paddle.incubate.hapi.model import Model, Input, Loss + from paddle.incubate.hapi.loss import CrossEntropy + + #高层API的组网方式需要继承Model,Model类实现了模型执行所需的逻辑 + class SimpleNet(Model): + def __init__(self, in_size, out_size): + super(SimpleNet, self).__init__() + self._linear = paddle.nn.Linear(in_size, out_size) + def forward(self, x): + y = self._linear(x) + z = self._linear(y) + pred = F.softmax(z) + return pred + + #兼容声明式开发模式,定义数据形状类型,如果不使用声明式编程模式,可以不定义数据占位符 + inputs = [Input([None, 8], 'float32', name='image')] + labels = [Input([None, 1], 'int64', name='labels')] + + #定义模型网络结构,包括指定损失函数和优化算法 + model = SimpleNet(8, 8) + optimizer = paddle.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=model.parameters()) + model.prepare(optimizer, CrossEntropy(), None, inputs, labels, device='cpu') + + #切换执行模式 + paddle.enable_imperative(paddle.CPUPlace()) + + #基于batch的训练 + batch_num = 10 + x = np.random.random((4, 8)).astype('float32') + y = np.random.randint(0, 8, (4, 1)).astype('int64') + for i in range(batch_num): + model.train_batch(inputs=x, labels=y) + +更多高层API开发的模型和示例请参考github Repo: +`hapi `__ + + .. toctree:: :maxdepth: 1 ../api_guides/index_cn.rst + paddle_cn.rst + dataset_cn.rst + tensor_cn.rst + nn_cn.rst + imperative_cn.rst + declarative_cn.rst + optimizer_cn.rst + static_cn.rst + metric_cn.rst + framework_cn.rst + io_cn.rst + utils_cn.rst + incubate_cn.rst fluid_cn.rst - api_tree_cn.rst backward_cn.rst clip_cn.rst + data_cn/data_reader_cn.rst + data_cn/dataset_cn.rst dataset_cn.rst + distributed_cn.rst dygraph_cn.rst executor_cn.rst initializer_cn.rst @@ -18,8 +122,8 @@ API Reference layers_cn.rst metrics_cn.rst nets_cn.rst - optimizer_cn.rst profiler_cn.rst regularizer_cn.rst transpiler_cn.rst unique_name_cn.rst + static_cn.rst diff --git a/doc/fluid/api_cn/initializer_cn/BilinearInitializer_cn.rst b/doc/fluid/api_cn/initializer_cn/BilinearInitializer_cn.rst index 4e3031e4f0a8d4d4ac5879f450289fa3d600168d..58f1dc08bb7a89a7fc4f43342adef144b50e0ba9 100644 --- a/doc/fluid/api_cn/initializer_cn/BilinearInitializer_cn.rst +++ b/doc/fluid/api_cn/initializer_cn/BilinearInitializer_cn.rst @@ -1,44 +1,47 @@ -.. _cn_api_fluid_initializer_BilinearInitializer: - -BilinearInitializer -------------------------------- - -.. py:class:: paddle.fluid.initializer.BilinearInitializer()) - +.. _cn_api_fluid_initializer_BilinearInitializer: + +BilinearInitializer +------------------------------- + +.. py:class:: paddle.fluid.initializer.BilinearInitializer()) + + + + 该接口为参数初始化函数,用于转置卷积函数中,对输入进行上采样。用户通过任意整型因子放大shape为(B,C,H,W)的特征图。 - + 返回:对象 用法如下: - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid import math - factor = 2 - C = 2 - H = W = 32 - w_attr = fluid.ParamAttr( - learning_rate=0., - regularizer=fluid.regularizer.L2Decay(0.), - initializer=fluid.initializer.BilinearInitializer()) - x = fluid.layers.data(name="data", shape=[4, H, W], - dtype="float32") - conv_up = fluid.layers.conv2d_transpose( - input=x, - num_filters=C, - output_size=None, - filter_size=2 * factor - factor % 2, - padding=int(math.ceil((factor - 1) / 2.)), - stride=factor, - groups=C, - param_attr=w_attr, - bias_attr=False) - -上述代码实现的是将输入x(shape=[-1, 4, H, W])经过转置卷积得到shape=[-1, C, H*factor, W*factor]的输出,num_filters = C和groups = C 表示这是按通道转置的卷积函数,输出通道为C,转置卷积的groups为C。滤波器shape为(C,1,K,K),K为filter_size。该初始化函数为滤波器的每个通道设置(K,K)插值核。输出特征图的最终输出shape为(B,C,factor*H,factor*W)。注意学习率和权重衰减设为0,以便在训练过程中双线性插值的系数值保持不变 - - - - + factor = 2 + C = 2 + H = W = 32 + w_attr = fluid.ParamAttr( + learning_rate=0., + regularizer=fluid.regularizer.L2Decay(0.), + initializer=fluid.initializer.BilinearInitializer()) + x = fluid.layers.data(name="data", shape=[4, H, W], + dtype="float32") + conv_up = fluid.layers.conv2d_transpose( + input=x, + num_filters=C, + output_size=None, + filter_size=2 * factor - factor % 2, + padding=int(math.ceil((factor - 1) / 2.)), + stride=factor, + groups=C, + param_attr=w_attr, + bias_attr=False) + +上述代码实现的是将输入x(shape=[-1, 4, H, W])经过转置卷积得到shape=[-1, C, H*factor, W*factor]的输出,num_filters = C和groups = C 表示这是按通道转置的卷积函数,输出通道为C,转置卷积的groups为C。滤波器shape为(C,1,K,K),K为filter_size。该初始化函数为滤波器的每个通道设置(K,K)插值核。输出特征图的最终输出shape为(B,C,factor*H,factor*W)。注意学习率和权重衰减设为0,以便在训练过程中双线性插值的系数值保持不变 + + + + diff --git a/doc/fluid/api_cn/initializer_cn/Bilinear_cn.rst b/doc/fluid/api_cn/initializer_cn/Bilinear_cn.rst index da9b802f33a88ce642633663c7a0c93abc9a4e7a..78664ec87a1a3daa53bc50f94bd0ab832b8b466e 100644 --- a/doc/fluid/api_cn/initializer_cn/Bilinear_cn.rst +++ b/doc/fluid/api_cn/initializer_cn/Bilinear_cn.rst @@ -1,10 +1,16 @@ -.. _cn_api_fluid_initializer_Bilinear: - -Bilinear -------------------------------- - -.. py:attribute:: paddle.fluid.initializer.Bilinear - -``BilinearInitializer`` 的别名 - - +.. _cn_api_fluid_initializer_Bilinear: + +Bilinear +------------------------------- + +.. py:attribute:: paddle.fluid.initializer.Bilinear + +:alias_main: paddle.nn.initializer.Bilinear +:alias: paddle.nn.initializer.Bilinear +:old_api: paddle.fluid.initializer.Bilinear + + + +``BilinearInitializer`` 的别名 + + diff --git a/doc/fluid/api_cn/initializer_cn/ConstantInitializer_cn.rst b/doc/fluid/api_cn/initializer_cn/ConstantInitializer_cn.rst index a73fab89ddd7f82ce1d203fdcf5fdbf01b0784ac..f5acebd8919da242f5eac12e8349a7d1c606edfa 100644 --- a/doc/fluid/api_cn/initializer_cn/ConstantInitializer_cn.rst +++ b/doc/fluid/api_cn/initializer_cn/ConstantInitializer_cn.rst @@ -5,6 +5,9 @@ ConstantInitializer .. py:class:: paddle.fluid.initializer.ConstantInitializer(value=0.0, force_cpu=False) + + + 该接口为常量初始化函数,用于权重初始化,通过输入的value值初始化输入变量; 参数: diff --git a/doc/fluid/api_cn/initializer_cn/Constant_cn.rst b/doc/fluid/api_cn/initializer_cn/Constant_cn.rst index 5ffd7cc8858f399e14c7a68d06e9d9204c6b66e8..fdc567bd1b7ee4c9ddae0e13c018b54e0f2fe7e1 100644 --- a/doc/fluid/api_cn/initializer_cn/Constant_cn.rst +++ b/doc/fluid/api_cn/initializer_cn/Constant_cn.rst @@ -1,10 +1,16 @@ -.. _cn_api_fluid_initializer_Constant: - -Constant -------------------------------- - -.. py:attribute:: paddle.fluid.initializer.Constant - -``ConstantInitializer`` 的别名 - - +.. _cn_api_fluid_initializer_Constant: + +Constant +------------------------------- + +.. py:attribute:: paddle.fluid.initializer.Constant + +:alias_main: paddle.nn.initializer.Constant +:alias: paddle.nn.initializer.Constant +:old_api: paddle.fluid.initializer.Constant + + + +``ConstantInitializer`` 的别名 + + diff --git a/doc/fluid/api_cn/initializer_cn/MSRAInitializer_cn.rst b/doc/fluid/api_cn/initializer_cn/MSRAInitializer_cn.rst index d87e53642fc0ac22ab7141f20793cd31a40b1363..ac42f93ff6bf15bb95d7a1d6db68ba96705d8eca 100644 --- a/doc/fluid/api_cn/initializer_cn/MSRAInitializer_cn.rst +++ b/doc/fluid/api_cn/initializer_cn/MSRAInitializer_cn.rst @@ -1,46 +1,49 @@ -.. _cn_api_fluid_initializer_MSRAInitializer: - -MSRAInitializer -------------------------------- - -.. py:class:: paddle.fluid.initializer.MSRAInitializer(uniform=True, fan_in=None, seed=0) - -该接口实现MSRA方式的权重初始化(a.k.a. Kaiming初始化) - -该接口为权重初始化函数,方法来自Kaiming He,Xiangyu Zhang,Shaoqing Ren 和 Jian Sun所写的论文: `Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification `_ 。这是一个鲁棒性特别强的初始化方法,并且适应了非线性激活函数(rectifier nonlinearities)。 -可以选择使用均匀分布或者正太分布初始化权重; -在均匀分布中,范围为[-x,x],其中: - -.. math:: - - x = \sqrt{\frac{6.0}{fan\_in}} - -在正态分布中,均值为0,标准差为: - -.. math:: - - \sqrt{\frac{2.0}{fan\_in}} - -参数: - - **uniform** (bool) - 为True表示使用均匀分布,为False表示使用正态分布 - - **fan_in** (float16|float32) - MSRAInitializer的fan_in。如果为None,fan_in沿伸自变量,多设置为None - - **seed** (int32) - 随机种子 +.. _cn_api_fluid_initializer_MSRAInitializer: + +MSRAInitializer +------------------------------- + +.. py:class:: paddle.fluid.initializer.MSRAInitializer(uniform=True, fan_in=None, seed=0) + + + + +该接口实现MSRA方式的权重初始化(a.k.a. Kaiming初始化) + +该接口为权重初始化函数,方法来自Kaiming He,Xiangyu Zhang,Shaoqing Ren 和 Jian Sun所写的论文: `Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification `_ 。这是一个鲁棒性特别强的初始化方法,并且适应了非线性激活函数(rectifier nonlinearities)。 +可以选择使用均匀分布或者正态分布初始化权重; +在均匀分布中,范围为[-x,x],其中: + +.. math:: + + x = \sqrt{\frac{6.0}{fan\_in}} + +在正态分布中,均值为0,标准差为: + +.. math:: + + \sqrt{\frac{2.0}{fan\_in}} + +参数: + - **uniform** (bool) - 为True表示使用均匀分布,为False表示使用正态分布 + - **fan_in** (float16|float32) - MSRAInitializer的fan_in。如果为None,fan_in沿伸自变量,多设置为None + - **seed** (int32) - 随机种子 返回:对象 - -.. note:: - - 在大多数情况下推荐设置fan_in为None - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") - fc = fluid.layers.fc(input=x, size=10, param_attr=fluid.initializer.MSRAInitializer(uniform=False)) - - - - - + +.. note:: + + 在大多数情况下推荐设置fan_in为None + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") + fc = fluid.layers.fc(input=x, size=10, param_attr=fluid.initializer.MSRAInitializer(uniform=False)) + + + + + diff --git a/doc/fluid/api_cn/initializer_cn/MSRA_cn.rst b/doc/fluid/api_cn/initializer_cn/MSRA_cn.rst index b795ebc826c9a98799a7c699f5e630ffbbded7da..00ff2f29bc574fe3aa24d401fb05783eb23b01c1 100644 --- a/doc/fluid/api_cn/initializer_cn/MSRA_cn.rst +++ b/doc/fluid/api_cn/initializer_cn/MSRA_cn.rst @@ -1,9 +1,15 @@ -.. _cn_api_fluid_initializer_MSRA: - -MSRA -------------------------------- - -.. py:attribute:: paddle.fluid.initializer.MSRA - -``MSRAInitializer`` 的别名 - +.. _cn_api_fluid_initializer_MSRA: + +MSRA +------------------------------- + +.. py:attribute:: paddle.fluid.initializer.MSRA + +:alias_main: paddle.nn.initializer.MSRA +:alias: paddle.nn.initializer.MSRA +:old_api: paddle.fluid.initializer.MSRA + + + +``MSRAInitializer`` 的别名 + diff --git a/doc/fluid/api_cn/initializer_cn/NormalInitializer_cn.rst b/doc/fluid/api_cn/initializer_cn/NormalInitializer_cn.rst index 3a8972cc8c89cb171f71bb617bc72feff91617ad..3574b444b4cbe3684c6f9210892655a1b18b8c3c 100644 --- a/doc/fluid/api_cn/initializer_cn/NormalInitializer_cn.rst +++ b/doc/fluid/api_cn/initializer_cn/NormalInitializer_cn.rst @@ -1,25 +1,28 @@ -.. _cn_api_fluid_initializer_NormalInitializer: - -NormalInitializer -------------------------------- - -.. py:class:: paddle.fluid.initializer.NormalInitializer(loc=0.0, scale=1.0, seed=0) - -随机正态(高斯)分布初始化函数 - -参数: - - **loc** (float16|float32) - 正态分布的平均值 - - **scale** (float16|float32) - 正态分布的标准差 - - **seed** (int32) - 随机种子 - -返回:对象 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") - fc = fluid.layers.fc(input=x, size=10, - param_attr=fluid.initializer.Normal(loc=0.0, scale=2.0)) - +.. _cn_api_fluid_initializer_NormalInitializer: + +NormalInitializer +------------------------------- + +.. py:class:: paddle.fluid.initializer.NormalInitializer(loc=0.0, scale=1.0, seed=0) + + + + +随机正态(高斯)分布初始化函数 + +参数: + - **loc** (float16|float32) - 正态分布的平均值 + - **scale** (float16|float32) - 正态分布的标准差 + - **seed** (int32) - 随机种子 + +返回:对象 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") + fc = fluid.layers.fc(input=x, size=10, + param_attr=fluid.initializer.Normal(loc=0.0, scale=2.0)) + diff --git a/doc/fluid/api_cn/initializer_cn/Normal_cn.rst b/doc/fluid/api_cn/initializer_cn/Normal_cn.rst index db6f424a5c94987760923d25de1730303a71f86f..a7ae0fe063be50f8af93a4197f68c09157a17733 100644 --- a/doc/fluid/api_cn/initializer_cn/Normal_cn.rst +++ b/doc/fluid/api_cn/initializer_cn/Normal_cn.rst @@ -1,10 +1,16 @@ -.. _cn_api_fluid_initializer_Normal: - -Normal -------------------------------- - -.. py:attribute:: paddle.fluid.initializer.Normal - -``NormalInitializer`` 的别名 - - +.. _cn_api_fluid_initializer_Normal: + +Normal +------------------------------- + +.. py:attribute:: paddle.fluid.initializer.Normal + +:alias_main: paddle.nn.initializer.Normal +:alias: paddle.nn.initializer.Normal +:old_api: paddle.fluid.initializer.Normal + + + +``NormalInitializer`` 的别名 + + diff --git a/doc/fluid/api_cn/initializer_cn/NumpyArrayInitializer_cn.rst b/doc/fluid/api_cn/initializer_cn/NumpyArrayInitializer_cn.rst index 1e495ad97ca5cf87d8bb126f4665231aad85f921..7c0365ccba17c21f28907047bb902e25fb0df43b 100644 --- a/doc/fluid/api_cn/initializer_cn/NumpyArrayInitializer_cn.rst +++ b/doc/fluid/api_cn/initializer_cn/NumpyArrayInitializer_cn.rst @@ -1,26 +1,30 @@ -.. _cn_api_fluid_initializer_NumpyArrayInitializer: - -NumpyArrayInitializer -------------------------------- - -.. py:class:: paddle.fluid.initializer.NumpyArrayInitializer(value) - -该OP使用Numpy型数组来初始化参数变量。 - -参数: - - **value** (numpy) - 用于初始化变量的一个Numpy型数组。 - -返回:张量(Tensor) - -返回类型:变量(Variable) - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - x = fluid.layers.data(name="x", shape=[5], dtype='float32') - fc = fluid.layers.fc(input=x, size=10, - param_attr=fluid.initializer.NumpyArrayInitializer(numpy.array([1,2]))) - - +.. _cn_api_fluid_initializer_NumpyArrayInitializer: + +NumpyArrayInitializer +------------------------------- + +.. py:class:: paddle.fluid.initializer.NumpyArrayInitializer(value) + + + + +该OP使用Numpy型数组来初始化参数变量。 + +参数: + - **value** (numpy) - 用于初始化变量的一个Numpy型数组。 + +返回:张量(Tensor) + +返回类型:变量(Variable) + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + x1 = fluid.data(name="x1", shape=[2, 1], dtype='float32') + fc = fluid.layers.fc(input=x1, size=10, + param_attr=fluid.initializer.NumpyArrayInitializer(numpy.array([1,2]))) + + diff --git a/doc/fluid/api_cn/initializer_cn/TruncatedNormalInitializer_cn.rst b/doc/fluid/api_cn/initializer_cn/TruncatedNormalInitializer_cn.rst index 8bf4f08af663e4142e8ad8b41f259563f3409cc6..4378efd743426e1ea3453c2ad43d986aca2f50f1 100644 --- a/doc/fluid/api_cn/initializer_cn/TruncatedNormalInitializer_cn.rst +++ b/doc/fluid/api_cn/initializer_cn/TruncatedNormalInitializer_cn.rst @@ -1,32 +1,35 @@ -.. _cn_api_fluid_initializer_TruncatedNormalInitializer: - -TruncatedNormalInitializer -------------------------------- - -.. py:class:: paddle.fluid.initializer.TruncatedNormalInitializer(loc=0.0, scale=1.0, seed=0) - -Random Truncated Normal(高斯)分布初始化函数 - -参数: - - **loc** (float16|float32) - 正态分布的平均值 - - **scale** (float16|float32) - 正态分布的标准差 - - **seed** (int32) - 随机种子 - -返回:对象 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - x = fluid.layers.data(name='x', shape=[1], dtype='float32') - fc = fluid.layers.fc(input=x, size=10, - param_attr=fluid.initializer.TruncatedNormal(loc=0.0, scale=2.0)) - - - - - - - - +.. _cn_api_fluid_initializer_TruncatedNormalInitializer: + +TruncatedNormalInitializer +------------------------------- + +.. py:class:: paddle.fluid.initializer.TruncatedNormalInitializer(loc=0.0, scale=1.0, seed=0) + + + + +Random Truncated Normal(高斯)分布初始化函数 + +参数: + - **loc** (float16|float32) - 正态分布的平均值 + - **scale** (float16|float32) - 正态分布的标准差 + - **seed** (int32) - 随机种子 + +返回:对象 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.data(name='x', shape=[1], dtype='float32') + fc = fluid.layers.fc(input=x, size=10, + param_attr=fluid.initializer.TruncatedNormal(loc=0.0, scale=2.0)) + + + + + + + + diff --git a/doc/fluid/api_cn/initializer_cn/TruncatedNormal_cn.rst b/doc/fluid/api_cn/initializer_cn/TruncatedNormal_cn.rst index 4cec1290655b963b764e88f19034da0f657815d6..d36b1c53f17417cc2f06b6641a16ed4de7a0b6f7 100644 --- a/doc/fluid/api_cn/initializer_cn/TruncatedNormal_cn.rst +++ b/doc/fluid/api_cn/initializer_cn/TruncatedNormal_cn.rst @@ -1,10 +1,16 @@ -.. _cn_api_fluid_initializer_TruncatedNormal: - -TruncatedNormal -------------------------------- - -.. py:attribute:: paddle.fluid.initializer.TruncatedNormal - -``TruncatedNormalInitializer`` 的别名 - - +.. _cn_api_fluid_initializer_TruncatedNormal: + +TruncatedNormal +------------------------------- + +.. py:attribute:: paddle.fluid.initializer.TruncatedNormal + +:alias_main: paddle.nn.initializer.TruncatedNormal +:alias: paddle.nn.initializer.TruncatedNormal +:old_api: paddle.fluid.initializer.TruncatedNormal + + + +``TruncatedNormalInitializer`` 的别名 + + diff --git a/doc/fluid/api_cn/initializer_cn/UniformInitializer_cn.rst b/doc/fluid/api_cn/initializer_cn/UniformInitializer_cn.rst index ec6e624973328ec1a0d5bbf09ba47e25e034e338..873451e6710b00a7cc7394634f4f7842bbca5ab6 100644 --- a/doc/fluid/api_cn/initializer_cn/UniformInitializer_cn.rst +++ b/doc/fluid/api_cn/initializer_cn/UniformInitializer_cn.rst @@ -1,32 +1,35 @@ -.. _cn_api_fluid_initializer_UniformInitializer: - -UniformInitializer -------------------------------- - -.. py:class:: paddle.fluid.initializer.UniformInitializer(low=-1.0, high=1.0, seed=0) - -随机均匀分布初始化器 - -参数: - - **low** (float16|float32) - 下界 - - **high** (float16|float32) - 上界 - - **seed** (int32) - 随机种子 - -返回:对象 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - x = fluid.layers.data(name='x', shape=[1], dtype='float32') - fc = fluid.layers.fc(input=x, size=10, - param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5)) - - - - - - - - +.. _cn_api_fluid_initializer_UniformInitializer: + +UniformInitializer +------------------------------- + +.. py:class:: paddle.fluid.initializer.UniformInitializer(low=-1.0, high=1.0, seed=0) + + + + +随机均匀分布初始化器 + +参数: + - **low** (float16|float32) - 下界 + - **high** (float16|float32) - 上界 + - **seed** (int32) - 随机种子 + +返回:对象 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.data(name='x', shape=[1], dtype='float32') + fc = fluid.layers.fc(input=x, size=10, + param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5)) + + + + + + + + diff --git a/doc/fluid/api_cn/initializer_cn/Uniform_cn.rst b/doc/fluid/api_cn/initializer_cn/Uniform_cn.rst index 81f0b1647741b268aae93b6d3b287eeabacde4ce..48a7efeeeddd34a99effca7c37e1c8dea99bf761 100644 --- a/doc/fluid/api_cn/initializer_cn/Uniform_cn.rst +++ b/doc/fluid/api_cn/initializer_cn/Uniform_cn.rst @@ -1,11 +1,17 @@ -.. _cn_api_fluid_initializer_Uniform: - -Uniform -------------------------------- - -.. py:attribute:: paddle.fluid.initializer.Uniform - -``UniformInitializer`` 的别名 - - - +.. _cn_api_fluid_initializer_Uniform: + +Uniform +------------------------------- + +.. py:attribute:: paddle.fluid.initializer.Uniform + +:alias_main: paddle.nn.initializer.Uniform +:alias: paddle.nn.initializer.Uniform +:old_api: paddle.fluid.initializer.Uniform + + + +``UniformInitializer`` 的别名 + + + diff --git a/doc/fluid/api_cn/initializer_cn/XavierInitializer_cn.rst b/doc/fluid/api_cn/initializer_cn/XavierInitializer_cn.rst index 4669437654a11f8a6c6b4824ed5c69dbd7757a41..b26098bf14107371c748445ad9493ba0f7331ffb 100644 --- a/doc/fluid/api_cn/initializer_cn/XavierInitializer_cn.rst +++ b/doc/fluid/api_cn/initializer_cn/XavierInitializer_cn.rst @@ -1,48 +1,51 @@ -.. _cn_api_fluid_initializer_XavierInitializer: - -XavierInitializer -------------------------------- - -.. py:class:: paddle.fluid.initializer.XavierInitializer(uniform=True, fan_in=None, fan_out=None, seed=0) - -该类实现Xavier权重初始化方法( Xavier weight initializer),Xavier权重初始化方法出自Xavier Glorot和Yoshua Bengio的论文 `Understanding the difficulty of training deep feedforward neural networks `_ - -该初始化函数用于保持所有层的梯度尺度几乎一致。 - -在均匀分布的情况下,取值范围为[-x,x],其中: - -.. math:: - - x = \sqrt{\frac{6.0}{fan\_in+fan\_out}} - -正态分布的情况下,均值为0,标准差为: - -.. math:: - - x = \sqrt{\frac{2.0}{fan\_in+fan\_out}} - -参数: - - **uniform** (bool) - 是否用均匀分布,默认为True。如果为False,则使用正态分布。 - - **fan_in** (float) - 当前网络层的输入神经元个数。如果为None,则从变量中推断,默认为None。 - - **fan_out** (float) - 当前网络层的输出神经元个数。如果为None,则从变量中推断,默认为None。 - - **seed** (int) - 随机种子 - -.. note:: - - 在大多数情况下推荐将fan_in和fan_out设置为None - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - queries = fluid.layers.data(name='x', shape=[1], dtype='float32') - fc = fluid.layers.fc( - input=queries, size=10, - param_attr=fluid.initializer.Xavier(uniform=False)) - - - - - - +.. _cn_api_fluid_initializer_XavierInitializer: + +XavierInitializer +------------------------------- + +.. py:class:: paddle.fluid.initializer.XavierInitializer(uniform=True, fan_in=None, fan_out=None, seed=0) + + + + +该类实现Xavier权重初始化方法( Xavier weight initializer),Xavier权重初始化方法出自Xavier Glorot和Yoshua Bengio的论文 `Understanding the difficulty of training deep feedforward neural networks `_ + +该初始化函数用于保持所有层的梯度尺度几乎一致。 + +在均匀分布的情况下,取值范围为[-x,x],其中: + +.. math:: + + x = \sqrt{\frac{6.0}{fan\_in+fan\_out}} + +正态分布的情况下,均值为0,标准差为: + +.. math:: + + x = \sqrt{\frac{2.0}{fan\_in+fan\_out}} + +参数: + - **uniform** (bool) - 是否用均匀分布,默认为True。如果为False,则使用正态分布。 + - **fan_in** (float) - 当前网络层的输入神经元个数。如果为None,则从变量中推断,默认为None。 + - **fan_out** (float) - 当前网络层的输出神经元个数。如果为None,则从变量中推断,默认为None。 + - **seed** (int) - 随机种子 + +.. note:: + + 在大多数情况下推荐将fan_in和fan_out设置为None + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + queries = fluid.layers.data(name='x', shape=[1], dtype='float32') + fc = fluid.layers.fc( + input=queries, size=10, + param_attr=fluid.initializer.Xavier(uniform=False)) + + + + + + diff --git a/doc/fluid/api_cn/initializer_cn/Xavier_cn.rst b/doc/fluid/api_cn/initializer_cn/Xavier_cn.rst index 69e632a7177a60d736c01a532e36e15ba5c55620..e19b2c7db8dcb80cadf0e7da5f3d10e39ba301d5 100644 --- a/doc/fluid/api_cn/initializer_cn/Xavier_cn.rst +++ b/doc/fluid/api_cn/initializer_cn/Xavier_cn.rst @@ -1,14 +1,20 @@ -.. _cn_api_fluid_initializer_Xavier: - -Xavier -------------------------------- - -.. py:attribute:: paddle.fluid.initializer.Xavier - -``XavierInitializer`` 的别名 - - - - - - +.. _cn_api_fluid_initializer_Xavier: + +Xavier +------------------------------- + +.. py:attribute:: paddle.fluid.initializer.Xavier + +:alias_main: paddle.nn.initializer.Xavier +:alias: paddle.nn.initializer.Xavier +:old_api: paddle.fluid.initializer.Xavier + + + +``XavierInitializer`` 的别名 + + + + + + diff --git a/doc/fluid/api_cn/initializer_cn/force_init_on_cpu_cn.rst b/doc/fluid/api_cn/initializer_cn/force_init_on_cpu_cn.rst index 7957a26a5a9de6b1e466a68a22f4a0c0db7696b5..8b3cf60ae1fe5aeb91431ad49fb71801f47b1c57 100644 --- a/doc/fluid/api_cn/initializer_cn/force_init_on_cpu_cn.rst +++ b/doc/fluid/api_cn/initializer_cn/force_init_on_cpu_cn.rst @@ -1,31 +1,34 @@ -.. _cn_api_fluid_initializer_force_init_on_cpu: - -force_init_on_cpu -------------------------------- - -.. py:function:: paddle.fluid.initializer.force_init_on_cpu() - -该接口获得一个是否强制在CPU上初始化变量的布尔型标志位。 - -返回:状态,是否应强制在CPU上强制进行变量初始化 - -返回类型:bool - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - if fluid.initializer.force_init_on_cpu(): - step = fluid.layers.create_global_var(shape=[2,3], value=1.0, dtype='float32') - - - - - - - - - - - +.. _cn_api_fluid_initializer_force_init_on_cpu: + +force_init_on_cpu +------------------------------- + +.. py:function:: paddle.fluid.initializer.force_init_on_cpu() + + + + +该接口获得一个是否强制在CPU上初始化变量的布尔型标志位。 + +返回:状态,是否应强制在CPU上强制进行变量初始化 + +返回类型:bool + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + if fluid.initializer.force_init_on_cpu(): + step = fluid.layers.create_global_var(shape=[2,3], value=1.0, dtype='float32') + + + + + + + + + + + diff --git a/doc/fluid/api_cn/initializer_cn/init_on_cpu_cn.rst b/doc/fluid/api_cn/initializer_cn/init_on_cpu_cn.rst index 00e7b1d86e8d30ea4418d41f993a5002e886ad6a..db51f83b5dc08536236586f85b251bb92878df5c 100644 --- a/doc/fluid/api_cn/initializer_cn/init_on_cpu_cn.rst +++ b/doc/fluid/api_cn/initializer_cn/init_on_cpu_cn.rst @@ -1,21 +1,24 @@ -.. _cn_api_fluid_initializer_init_on_cpu: - -init_on_cpu -------------------------------- - -.. py:function:: paddle.fluid.initializer.init_on_cpu() - -该接口设置强制变量在 cpu 上初始化。 - -返回:无 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - with fluid.initializer.init_on_cpu(): - step = fluid.layers.create_global_var(shape=[2,3], value=1.0, dtype='float32') - - - +.. _cn_api_fluid_initializer_init_on_cpu: + +init_on_cpu +------------------------------- + +.. py:function:: paddle.fluid.initializer.init_on_cpu() + + + + +该接口设置强制变量在 cpu 上初始化。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + with fluid.initializer.init_on_cpu(): + step = fluid.layers.create_global_var(shape=[2,3], value=1.0, dtype='float32') + + + diff --git a/doc/fluid/api_cn/io_cn.rst b/doc/fluid/api_cn/io_cn.rst index 5115abb0da44ee9289895ece7f20854f70ebb0a1..1bee21a710d0366a1f7db206c18e377fc64bfe26 100644 --- a/doc/fluid/api_cn/io_cn.rst +++ b/doc/fluid/api_cn/io_cn.rst @@ -1,6 +1,6 @@ ======================= -fluid.io +paddle.io ======================= @@ -10,13 +10,18 @@ fluid.io :maxdepth: 1 io_cn/batch_cn.rst + io_cn/BatchSampler_cn.rst io_cn/buffered_cn.rst io_cn/cache_cn.rst io_cn/chain_cn.rst io_cn/compose_cn.rst io_cn/ComposeNotAligned_cn.rst - io_cn/DataLoader_cn.rst + io_cn/DataLoader_cn.rst + io_cn/DataLoader_cn.rst + io_cn/Dataset_cn.rst io_cn/firstn_cn.rst + io_cn/get_program_parameter_cn.rst + io_cn/get_program_persistable_vars_cn.rst io_cn/load_cn.rst io_cn/load_inference_model_cn.rst io_cn/load_params_cn.rst @@ -34,4 +39,3 @@ fluid.io io_cn/set_program_state_cn.rst io_cn/shuffle_cn.rst io_cn/xmap_readers_cn.rst - diff --git a/doc/fluid/api_cn/io_cn/BatchSampler_cn.rst b/doc/fluid/api_cn/io_cn/BatchSampler_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d97e1af250b27c1b08959581b1a3981979a38d32 --- /dev/null +++ b/doc/fluid/api_cn/io_cn/BatchSampler_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_io_cn_BatchSampler: + +BatchSampler +------------------------------- +:doc_source: paddle.fluid.dataloader.BatchSampler + + diff --git a/doc/fluid/api_cn/io_cn/DataLoader_cn.rst b/doc/fluid/api_cn/io_cn/DataLoader_cn.rst index aa01b837c7811089aab976819c6b2be178bf0d94..e8e1b68d3aadb0c9f5e3f958f4b096313e60c6e5 100644 --- a/doc/fluid/api_cn/io_cn/DataLoader_cn.rst +++ b/doc/fluid/api_cn/io_cn/DataLoader_cn.rst @@ -6,7 +6,13 @@ DataLoader .. py:class:: paddle.fluid.io.DataLoader -.. py:method:: from_generator(feed_list=None, capacity=None, use_double_buffer=True, iterable=True, return_list=False, use_multiprocess=False) + + + +.. py:method:: from_generator(feed_list=None, capacity=None, use_double_buffer=True, iterable=True, return_list=False, use_multiprocess=False, drop_last=True) + +.. note:: + 框架保证DataLoader的数据加载顺序与用户提供的数据源读取顺序一致。 创建一个DataLoader对象用于加载Python生成器产生的数据。数据会由Python线程预先读取,并异步送入一个队列中。 @@ -26,12 +32,13 @@ DataLoader - **iterable** (bool) - 所创建的DataLoader对象是否可迭代。 - **return_list** (bool) - 每个设备上的数据是否以list形式返回。仅在iterable = True模式下有效。若return_list = False,每个设备上的返回数据均是str -> LoDTensor的映射表,其中映射表的key是每个输入变量的名称。若return_list = True,则每个设备上的返回数据均是list(LoDTensor)。推荐在静态图模式下使用return_list = False,在动态图模式下使用return_list = True。 - **use_multiprocess** (bool) - 设置是否是用多进程加速动态图的数据载入过程。注意:该参数的设置仅在动态图模式下有效, 在静态图模式下,该参数设置与否均无任何影响。默认值为False。 + - **drop_last** (bool): 是否丢弃最后的不足CPU/GPU设备数的批次。默认值为True。在网络训练时,用户不能设置drop_last=False,此时所有CPU/GPU设备均应从DataLoader中读取到数据。在网络预测时,用户可以设置drop_last=False,此时最后不足CPU/GPU设备数的批次可以进行预测。 返回: 被创建的DataLoader对象 返回类型: loader (DataLoader) -**代码示例** +**代码示例 1** .. code-block:: python @@ -165,6 +172,50 @@ DataLoader assert relu.shape == [BATCH_SIZE, 784] +**代码示例 2** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + import os + + # We use 2 CPU cores to run inference network + os.environ['CPU_NUM'] = '2' + + # The data source has only 3 batches, which can not be + # divided evenly to each CPU core + def batch_generator(): + for i in range(3): + yield np.array([i+1]).astype('float32'), + + x = fluid.data(name='x', shape=[None], dtype='float32') + y = x * x + + def run_inference(drop_last): + loader = fluid.io.DataLoader.from_generator(feed_list=[x], + capacity=8, drop_last=drop_last) + loader.set_batch_generator(batch_generator, fluid.cpu_places()) + + exe = fluid.Executor(fluid.CPUPlace()) + prog = fluid.CompiledProgram(fluid.default_main_program()) + prog = prog.with_data_parallel() + + result = [] + for data in loader(): + each_ret, = exe.run(prog, feed=data, fetch_list=[y]) + result.extend(each_ret) + return result + + # Set drop_last to True, so that the last batch whose + # number is less than CPU core number would be discarded. + print(run_inference(drop_last=True)) # [1.0, 4.0] + + # Set drop_last to False, so that the last batch whose + # number is less than CPU core number can be tested. + print(run_inference(drop_last=False)) # [1.0, 4.0, 9.0] + + .. py:method:: from_dataset(dataset, places, drop_last=True) 创建一个DataLoader对象用于加载Dataset产生的数据。目前,Dataset仅支持Linux系统下使用。 diff --git a/doc/fluid/api_cn/io_cn/Dataset_cn.rst b/doc/fluid/api_cn/io_cn/Dataset_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9cc4bf215405232188e577ad074d784ef2c06424 --- /dev/null +++ b/doc/fluid/api_cn/io_cn/Dataset_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_io_cn_Dataset: + +Dataset +------------------------------- +:doc_source: paddle.fluid.dataloader.Dataset + + diff --git a/doc/fluid/api_cn/io_cn/PyReader_cn.rst b/doc/fluid/api_cn/io_cn/PyReader_cn.rst index 9f9fb25f74fd6a6b7e792ec7ef12903dee00e80d..10920cb264f87170a5394d4a4be70a74596c6c02 100644 --- a/doc/fluid/api_cn/io_cn/PyReader_cn.rst +++ b/doc/fluid/api_cn/io_cn/PyReader_cn.rst @@ -1,386 +1,389 @@ -.. _cn_api_fluid_io_PyReader: - -PyReader -------------------------------- - -.. py:class:: paddle.fluid.io.PyReader(feed_list=None, capacity=None, use_double_buffer=True, iterable=True, return_list=False) - - -在python中为数据输入创建一个reader对象。将使用python线程预取数据,并将其异步插入队列。当调用Executor.run(…)时,将自动提取队列中的数据。 - -参数: - - **feed_list** (list(Variable)|tuple(Variable)) - feed变量列表,由 ``fluid.layers.data()`` 创建。 - - **capacity** (int) - PyReader对象内部维护队列的容量大小。单位是batch数量。若reader读取速度较快,建议设置较大的capacity值。 - - **use_double_buffer** (bool) - 是否使用 ``double_buffer_reader`` 。若use_double_buffer=True,PyReader会异步地预读取下一个batch的数据,可加速数据读取过程,但同时会占用少量的CPU/GPU存储,即一个batch输入数据的存储空间。 - - **iterable** (bool) - 所创建的DataLoader对象是否可迭代。 - - **return_list** (bool) - 每个设备上的数据是否以list形式返回。仅在iterable = True模式下有效。若return_list = False,每个设备上的返回数据均是str -> LoDTensor的映射表,其中映射表的key是每个输入变量的名称。若return_list = True,则每个设备上的返回数据均是list(LoDTensor)。推荐在静态图模式下使用return_list = False,在动态图模式下使用return_list = True。 - - -返回: 被创建的reader对象 - -返回类型: reader (Reader) - - -**代码示例** - -1.如果iterable=False,则创建的PyReader对象几乎与 ``fluid.layers.py_reader()`` 相同。算子将被插入program中。用户应该在每个epoch之前调用 ``start()`` ,并在epoch结束时捕获 ``Executor.run()`` 抛出的 ``fluid.core.EOFException`` 。一旦捕获到异常,用户应该调用 ``reset()`` 手动重置reader。 - -.. code-block:: python - - import paddle - import paddle.fluid as fluid - import numpy as np - - EPOCH_NUM = 3 - ITER_NUM = 5 - BATCH_SIZE = 3 - - def network(image, label): - # 用户定义网络,此处以softmax回归为例 - predict = fluid.layers.fc(input=image, size=10, act='softmax') - return fluid.layers.cross_entropy(input=predict, label=label) - - def reader_creator_random_image_and_label(height, width): - def reader(): - for i in range(ITER_NUM): - fake_image = np.random.uniform(low=0, - high=255, - size=[height, width]) - fake_label = np.ones([1]) - yield fake_image, fake_label - return reader - - image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - - reader = fluid.io.PyReader(feed_list=[image, label], - capacity=4, - iterable=False) - - user_defined_reader = reader_creator_random_image_and_label(784, 784) - reader.decorate_sample_list_generator( - paddle.batch(user_defined_reader, batch_size=BATCH_SIZE)) - - loss = network(image, label) - executor = fluid.Executor(fluid.CPUPlace()) - executor.run(fluid.default_startup_program()) - for i in range(EPOCH_NUM): - reader.start() - while True: - try: - executor.run(feed=None) - except fluid.core.EOFException: - reader.reset() - break - - -2.如果iterable=True,则创建的PyReader对象与程序分离。程序中不会插入任何算子。在本例中,创建的reader是一个python生成器,它是可迭代的。用户应将从PyReader对象生成的数据输入 ``Executor.run(feed=...)`` 。 - -.. code-block:: python - - import paddle - import paddle.fluid as fluid - import numpy as np - - EPOCH_NUM = 3 - ITER_NUM = 5 - BATCH_SIZE = 10 - - def network(image, label): - # 用户定义网络,此处以softmax回归为例 - predict = fluid.layers.fc(input=image, size=10, act='softmax') - return fluid.layers.cross_entropy(input=predict, label=label) - - def reader_creator_random_image(height, width): - def reader(): - for i in range(ITER_NUM): - fake_image = np.random.uniform(low=0, high=255, size=[height, width]), - fake_label = np.ones([1]) - yield fake_image, fake_label - return reader - - image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True, return_list=False) - - user_defined_reader = reader_creator_random_image(784, 784) - reader.decorate_sample_list_generator( - paddle.batch(user_defined_reader, batch_size=BATCH_SIZE), - fluid.core.CPUPlace()) - loss = network(image, label) - executor = fluid.Executor(fluid.CPUPlace()) - executor.run(fluid.default_startup_program()) - - for _ in range(EPOCH_NUM): - for data in reader(): - executor.run(feed=data, fetch_list=[loss]) - -3. return_list=True,返回值将用list表示而非dict,通常用于动态图模式中。 - -.. code-block:: python - - import paddle - import paddle.fluid as fluid - import numpy as np - - EPOCH_NUM = 3 - ITER_NUM = 5 - BATCH_SIZE = 10 - - def reader_creator_random_image(height, width): - def reader(): - for i in range(ITER_NUM): - yield np.random.uniform(low=0, high=255, size=[height, width]), \ - np.random.random_integers(low=0, high=9, size=[1]) - return reader - - place = fluid.CPUPlace() - with fluid.dygraph.guard(place): - py_reader = fluid.io.PyReader(capacity=2, return_list=True) - user_defined_reader = reader_creator_random_image(784, 784) - py_reader.decorate_sample_list_generator( - paddle.batch(user_defined_reader, batch_size=BATCH_SIZE), - place) - for image, label in py_reader(): - relu = fluid.layers.relu(image) - -.. py:method:: start() - -启动数据输入线程。只能在reader对象不可迭代时调用。 - -**代码示例** - -.. code-block:: python - - import paddle - import paddle.fluid as fluid - import numpy as np - - BATCH_SIZE = 10 - - def generator(): - for i in range(5): - yield np.random.uniform(low=0, high=255, size=[784, 784]), - - image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') - reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False) - reader.decorate_sample_list_generator( - paddle.batch(generator, batch_size=BATCH_SIZE)) - - executor = fluid.Executor(fluid.CPUPlace()) - executor.run(fluid.default_startup_program()) - for i in range(3): - reader.start() - while True: - try: - executor.run(feed=None) - except fluid.core.EOFException: - reader.reset() - break - -.. py:method:: reset() - -当 ``fluid.core.EOFException`` 抛出时重置reader对象。只能在reader对象不可迭代时调用。 - -**代码示例** - -.. code-block:: python - - import paddle - import paddle.fluid as fluid - import numpy as np - - BATCH_SIZE = 10 - - def generator(): - for i in range(5): - yield np.random.uniform(low=0, high=255, size=[784, 784]), - - image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') - reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False) - reader.decorate_sample_list_generator( - paddle.batch(generator, batch_size=BATCH_SIZE)) - - executor = fluid.Executor(fluid.CPUPlace()) - executor.run(fluid.default_startup_program()) - for i in range(3): - reader.start() - while True: - try: - executor.run(feed=None) - except fluid.core.EOFException: - reader.reset() - break - -.. py:method:: decorate_sample_generator(sample_generator, batch_size, drop_last=True, places=None) - -设置PyReader对象的数据源。 - -提供的 ``sample_generator`` 应该是一个python生成器,它生成的数据类型应为list(numpy.ndarray)。 - -当PyReader对象可迭代时,必须设置 ``places`` 。 - -如果所有的输入都没有LOD,这个方法比 ``decorate_sample_list_generator(paddle.batch(sample_generator, ...))`` 更快。 - -参数: - - **sample_generator** (generator) – Python生成器,yield 类型为list(numpy.ndarray) - - **batch_size** (int) – batch size,必须大于0 - - **drop_last** (bool) – 当样本数小于batch数量时,是否删除最后一个batch - - **places** (None|list(CUDAPlace)|list(CPUPlace)) – 位置列表。当PyReader可迭代时必须被提供 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - EPOCH_NUM = 3 - ITER_NUM = 15 - BATCH_SIZE = 3 - - def network(image, label): - # 用户定义网络,此处以softmax回归为例 - predict = fluid.layers.fc(input=image, size=10, act='softmax') - return fluid.layers.cross_entropy(input=predict, label=label) - - def random_image_and_label_generator(height, width): - def generator(): - for i in range(ITER_NUM): - fake_image = np.random.uniform(low=0, - high=255, - size=[height, width]) - fake_label = np.array([1]) - yield fake_image, fake_label - return generator - - image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) - - user_defined_generator = random_image_and_label_generator(784, 784) - reader.decorate_sample_generator(user_defined_generator, - batch_size=BATCH_SIZE, - places=[fluid.CPUPlace()]) - loss = network(image, label) - executor = fluid.Executor(fluid.CPUPlace()) - executor.run(fluid.default_startup_program()) - - for _ in range(EPOCH_NUM): - for data in reader(): - executor.run(feed=data, fetch_list=[loss]) - -.. py:method:: decorate_sample_list_generator(reader, places=None) - -设置PyReader对象的数据源。 - -提供的 ``reader`` 应该是一个python生成器,它生成列表(numpy.ndarray)类型的批处理数据。 - -当PyReader对象不可迭代时,必须设置 ``places`` 。 - -参数: - - **reader** (generator) – 返回列表(numpy.ndarray)类型的批处理数据的Python生成器 - - **places** (None|list(CUDAPlace)|list(CPUPlace)) – 位置列表。当PyReader可迭代时必须被提供 - -**代码示例** - -.. code-block:: python - - import paddle - import paddle.fluid as fluid - import numpy as np - - EPOCH_NUM = 3 - ITER_NUM = 15 - BATCH_SIZE = 3 - - def network(image, label): - # 用户定义网络,此处以softmax回归为例 - predict = fluid.layers.fc(input=image, size=10, act='softmax') - return fluid.layers.cross_entropy(input=predict, label=label) - - def random_image_and_label_generator(height, width): - def generator(): - for i in range(ITER_NUM): - fake_image = np.random.uniform(low=0, - high=255, - size=[height, width]) - fake_label = np.ones([1]) - yield fake_image, fake_label - return generator - - image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) - - user_defined_generator = random_image_and_label_generator(784, 784) - reader.decorate_sample_list_generator( - paddle.batch(user_defined_generator, batch_size=BATCH_SIZE), - fluid.core.CPUPlace()) - loss = network(image, label) - executor = fluid.Executor(fluid.core.CPUPlace()) - executor.run(fluid.default_startup_program()) - - for _ in range(EPOCH_NUM): - for data in reader(): - executor.run(feed=data, fetch_list=[loss]) - -.. py:method:: decorate_batch_generator(reader, places=None) - -设置PyReader对象的数据源。 - -提供的 ``reader`` 应该是一个python生成器,它生成列表(numpy.ndarray)类型或LoDTensor类型的批处理数据。 - -当PyReader对象不可迭代时,必须设置 ``places`` 。 - -参数: - - **reader** (generator) – 返回LoDTensor类型的批处理数据的Python生成器 - - **places** (None|list(CUDAPlace)|list(CPUPlace)) – 位置列表。当PyReader可迭代时必须被提供 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - EPOCH_NUM = 3 - ITER_NUM = 15 - BATCH_SIZE = 3 - - def network(image, label): - # 用户定义网络,此处以softmax回归为例 - predict = fluid.layers.fc(input=image, size=10, act='softmax') - return fluid.layers.cross_entropy(input=predict, label=label) - - def random_image_and_label_generator(height, width): - def generator(): - for i in range(ITER_NUM): - batch_image = np.random.uniform(low=0, - high=255, - size=[BATCH_SIZE, height, width]) - batch_label = np.ones([BATCH_SIZE, 1]) - batch_image = batch_image.astype('float32') - batch_label = batch_label.astype('int64') - yield batch_image, batch_label - return generator - - image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) - - user_defined_generator = random_image_and_label_generator(784, 784) - reader.decorate_batch_generator(user_defined_generator, fluid.CPUPlace()) - - loss = network(image, label) - executor = fluid.Executor(fluid.CPUPlace()) - executor.run(fluid.default_startup_program()) - - for _ in range(EPOCH_NUM): - for data in reader(): - executor.run(feed=data, fetch_list=[loss]) - - -.. py:method:: next() - -获取下一个数据。用户不应直接调用此方法。此方法用于PaddlePaddle框架内部实现Python 2.x的迭代器协议。 +.. _cn_api_fluid_io_PyReader: + +PyReader +------------------------------- + +.. py:class:: paddle.fluid.io.PyReader(feed_list=None, capacity=None, use_double_buffer=True, iterable=True, return_list=False) + + + + + +在python中为数据输入创建一个reader对象。将使用python线程预取数据,并将其异步插入队列。当调用Executor.run(…)时,将自动提取队列中的数据。 + +参数: + - **feed_list** (list(Variable)|tuple(Variable)) - feed变量列表,由 ``fluid.layers.data()`` 创建。 + - **capacity** (int) - PyReader对象内部维护队列的容量大小。单位是batch数量。若reader读取速度较快,建议设置较大的capacity值。 + - **use_double_buffer** (bool) - 是否使用 ``double_buffer_reader`` 。若use_double_buffer=True,PyReader会异步地预读取下一个batch的数据,可加速数据读取过程,但同时会占用少量的CPU/GPU存储,即一个batch输入数据的存储空间。 + - **iterable** (bool) - 所创建的DataLoader对象是否可迭代。 + - **return_list** (bool) - 每个设备上的数据是否以list形式返回。仅在iterable = True模式下有效。若return_list = False,每个设备上的返回数据均是str -> LoDTensor的映射表,其中映射表的key是每个输入变量的名称。若return_list = True,则每个设备上的返回数据均是list(LoDTensor)。推荐在静态图模式下使用return_list = False,在动态图模式下使用return_list = True。 + + +返回: 被创建的reader对象 + +返回类型: reader (Reader) + + +**代码示例** + +1.如果iterable=False,则创建的PyReader对象几乎与 ``fluid.layers.py_reader()`` 相同。算子将被插入program中。用户应该在每个epoch之前调用 ``start()`` ,并在epoch结束时捕获 ``Executor.run()`` 抛出的 ``fluid.core.EOFException`` 。一旦捕获到异常,用户应该调用 ``reset()`` 手动重置reader。 + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + EPOCH_NUM = 3 + ITER_NUM = 5 + BATCH_SIZE = 3 + + def network(image, label): + # 用户定义网络,此处以softmax回归为例 + predict = fluid.layers.fc(input=image, size=10, act='softmax') + return fluid.layers.cross_entropy(input=predict, label=label) + + def reader_creator_random_image_and_label(height, width): + def reader(): + for i in range(ITER_NUM): + fake_image = np.random.uniform(low=0, + high=255, + size=[height, width]) + fake_label = np.ones([1]) + yield fake_image, fake_label + return reader + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + + reader = fluid.io.PyReader(feed_list=[image, label], + capacity=4, + iterable=False) + + user_defined_reader = reader_creator_random_image_and_label(784, 784) + reader.decorate_sample_list_generator( + paddle.batch(user_defined_reader, batch_size=BATCH_SIZE)) + + loss = network(image, label) + executor = fluid.Executor(fluid.CPUPlace()) + executor.run(fluid.default_startup_program()) + for i in range(EPOCH_NUM): + reader.start() + while True: + try: + executor.run(feed=None) + except fluid.core.EOFException: + reader.reset() + break + + +2.如果iterable=True,则创建的PyReader对象与程序分离。程序中不会插入任何算子。在本例中,创建的reader是一个python生成器,它是可迭代的。用户应将从PyReader对象生成的数据输入 ``Executor.run(feed=...)`` 。 + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + EPOCH_NUM = 3 + ITER_NUM = 5 + BATCH_SIZE = 10 + + def network(image, label): + # 用户定义网络,此处以softmax回归为例 + predict = fluid.layers.fc(input=image, size=10, act='softmax') + return fluid.layers.cross_entropy(input=predict, label=label) + + def reader_creator_random_image(height, width): + def reader(): + for i in range(ITER_NUM): + fake_image = np.random.uniform(low=0, high=255, size=[height, width]), + fake_label = np.ones([1]) + yield fake_image, fake_label + return reader + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True, return_list=False) + + user_defined_reader = reader_creator_random_image(784, 784) + reader.decorate_sample_list_generator( + paddle.batch(user_defined_reader, batch_size=BATCH_SIZE), + fluid.core.CPUPlace()) + loss = network(image, label) + executor = fluid.Executor(fluid.CPUPlace()) + executor.run(fluid.default_startup_program()) + + for _ in range(EPOCH_NUM): + for data in reader(): + executor.run(feed=data, fetch_list=[loss]) + +3. return_list=True,返回值将用list表示而非dict,通常用于动态图模式中。 + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + EPOCH_NUM = 3 + ITER_NUM = 5 + BATCH_SIZE = 10 + + def reader_creator_random_image(height, width): + def reader(): + for i in range(ITER_NUM): + yield np.random.uniform(low=0, high=255, size=[height, width]), \ + np.random.random_integers(low=0, high=9, size=[1]) + return reader + + place = fluid.CPUPlace() + with fluid.dygraph.guard(place): + py_reader = fluid.io.PyReader(capacity=2, return_list=True) + user_defined_reader = reader_creator_random_image(784, 784) + py_reader.decorate_sample_list_generator( + paddle.batch(user_defined_reader, batch_size=BATCH_SIZE), + place) + for image, label in py_reader(): + relu = fluid.layers.relu(image) + +.. py:method:: start() + +启动数据输入线程。只能在reader对象不可迭代时调用。 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + BATCH_SIZE = 10 + + def generator(): + for i in range(5): + yield np.random.uniform(low=0, high=255, size=[784, 784]), + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False) + reader.decorate_sample_list_generator( + paddle.batch(generator, batch_size=BATCH_SIZE)) + + executor = fluid.Executor(fluid.CPUPlace()) + executor.run(fluid.default_startup_program()) + for i in range(3): + reader.start() + while True: + try: + executor.run(feed=None) + except fluid.core.EOFException: + reader.reset() + break + +.. py:method:: reset() + +当 ``fluid.core.EOFException`` 抛出时重置reader对象。只能在reader对象不可迭代时调用。 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + BATCH_SIZE = 10 + + def generator(): + for i in range(5): + yield np.random.uniform(low=0, high=255, size=[784, 784]), + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False) + reader.decorate_sample_list_generator( + paddle.batch(generator, batch_size=BATCH_SIZE)) + + executor = fluid.Executor(fluid.CPUPlace()) + executor.run(fluid.default_startup_program()) + for i in range(3): + reader.start() + while True: + try: + executor.run(feed=None) + except fluid.core.EOFException: + reader.reset() + break + +.. py:method:: decorate_sample_generator(sample_generator, batch_size, drop_last=True, places=None) + +设置PyReader对象的数据源。 + +提供的 ``sample_generator`` 应该是一个python生成器,它生成的数据类型应为list(numpy.ndarray)。 + +当PyReader对象可迭代时,必须设置 ``places`` 。 + +如果所有的输入都没有LOD,这个方法比 ``decorate_sample_list_generator(paddle.batch(sample_generator, ...))`` 更快。 + +参数: + - **sample_generator** (generator) – Python生成器,yield 类型为list(numpy.ndarray) + - **batch_size** (int) – batch size,必须大于0 + - **drop_last** (bool) – 当样本数小于batch数量时,是否删除最后一个batch + - **places** (None|list(CUDAPlace)|list(CPUPlace)) – 位置列表。当PyReader可迭代时必须被提供 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + EPOCH_NUM = 3 + ITER_NUM = 15 + BATCH_SIZE = 3 + + def network(image, label): + # 用户定义网络,此处以softmax回归为例 + predict = fluid.layers.fc(input=image, size=10, act='softmax') + return fluid.layers.cross_entropy(input=predict, label=label) + + def random_image_and_label_generator(height, width): + def generator(): + for i in range(ITER_NUM): + fake_image = np.random.uniform(low=0, + high=255, + size=[height, width]) + fake_label = np.array([1]) + yield fake_image, fake_label + return generator + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) + + user_defined_generator = random_image_and_label_generator(784, 784) + reader.decorate_sample_generator(user_defined_generator, + batch_size=BATCH_SIZE, + places=[fluid.CPUPlace()]) + loss = network(image, label) + executor = fluid.Executor(fluid.CPUPlace()) + executor.run(fluid.default_startup_program()) + + for _ in range(EPOCH_NUM): + for data in reader(): + executor.run(feed=data, fetch_list=[loss]) + +.. py:method:: decorate_sample_list_generator(reader, places=None) + +设置PyReader对象的数据源。 + +提供的 ``reader`` 应该是一个python生成器,它生成列表(numpy.ndarray)类型的批处理数据。 + +当PyReader对象不可迭代时,必须设置 ``places`` 。 + +参数: + - **reader** (generator) – 返回列表(numpy.ndarray)类型的批处理数据的Python生成器 + - **places** (None|list(CUDAPlace)|list(CPUPlace)) – 位置列表。当PyReader可迭代时必须被提供 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + EPOCH_NUM = 3 + ITER_NUM = 15 + BATCH_SIZE = 3 + + def network(image, label): + # 用户定义网络,此处以softmax回归为例 + predict = fluid.layers.fc(input=image, size=10, act='softmax') + return fluid.layers.cross_entropy(input=predict, label=label) + + def random_image_and_label_generator(height, width): + def generator(): + for i in range(ITER_NUM): + fake_image = np.random.uniform(low=0, + high=255, + size=[height, width]) + fake_label = np.ones([1]) + yield fake_image, fake_label + return generator + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) + + user_defined_generator = random_image_and_label_generator(784, 784) + reader.decorate_sample_list_generator( + paddle.batch(user_defined_generator, batch_size=BATCH_SIZE), + fluid.core.CPUPlace()) + loss = network(image, label) + executor = fluid.Executor(fluid.core.CPUPlace()) + executor.run(fluid.default_startup_program()) + + for _ in range(EPOCH_NUM): + for data in reader(): + executor.run(feed=data, fetch_list=[loss]) + +.. py:method:: decorate_batch_generator(reader, places=None) + +设置PyReader对象的数据源。 + +提供的 ``reader`` 应该是一个python生成器,它生成列表(numpy.ndarray)类型或LoDTensor类型的批处理数据。 + +当PyReader对象不可迭代时,必须设置 ``places`` 。 + +参数: + - **reader** (generator) – 返回LoDTensor类型的批处理数据的Python生成器 + - **places** (None|list(CUDAPlace)|list(CPUPlace)) – 位置列表。当PyReader可迭代时必须被提供 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + EPOCH_NUM = 3 + ITER_NUM = 15 + BATCH_SIZE = 3 + + def network(image, label): + # 用户定义网络,此处以softmax回归为例 + predict = fluid.layers.fc(input=image, size=10, act='softmax') + return fluid.layers.cross_entropy(input=predict, label=label) + + def random_image_and_label_generator(height, width): + def generator(): + for i in range(ITER_NUM): + batch_image = np.random.uniform(low=0, + high=255, + size=[BATCH_SIZE, height, width]) + batch_label = np.ones([BATCH_SIZE, 1]) + batch_image = batch_image.astype('float32') + batch_label = batch_label.astype('int64') + yield batch_image, batch_label + return generator + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) + + user_defined_generator = random_image_and_label_generator(784, 784) + reader.decorate_batch_generator(user_defined_generator, fluid.CPUPlace()) + + loss = network(image, label) + executor = fluid.Executor(fluid.CPUPlace()) + executor.run(fluid.default_startup_program()) + + for _ in range(EPOCH_NUM): + for data in reader(): + executor.run(feed=data, fetch_list=[loss]) + + +.. py:method:: next() + +获取下一个数据。用户不应直接调用此方法。此方法用于PaddlePaddle框架内部实现Python 2.x的迭代器协议。 diff --git a/doc/fluid/api_cn/io_cn/batch_cn.rst b/doc/fluid/api_cn/io_cn/batch_cn.rst index d93e4b1d5166a7bb1f73f31f8c9a82c36ae68df1..3e3804abfb573be5a561fa7fb48603a15bf5f87c 100644 --- a/doc/fluid/api_cn/io_cn/batch_cn.rst +++ b/doc/fluid/api_cn/io_cn/batch_cn.rst @@ -5,6 +5,9 @@ batch .. py:function:: paddle.fluid.io.batch(reader, batch_size, drop_last=False) + + + 该接口是一个reader的装饰器。返回的reader将输入reader的数据打包成指定的batch_size大小的批处理数据(batched data)。 参数: diff --git a/doc/fluid/api_cn/io_cn/buffered_cn.rst b/doc/fluid/api_cn/io_cn/buffered_cn.rst index 26f7a2c243134217209cd1518a7b5c45e970a506..027c6346f735874d8c9accc38024c09b5940c429 100644 --- a/doc/fluid/api_cn/io_cn/buffered_cn.rst +++ b/doc/fluid/api_cn/io_cn/buffered_cn.rst @@ -5,6 +5,9 @@ buffered .. py:function:: paddle.fluid.io.buffered(reader, size) + + + 创建一个缓存数据读取器,它读取数据并且存储进缓存区,从缓存区读取数据将会加速,只要缓存不是空的。 参数: diff --git a/doc/fluid/api_cn/io_cn/cache_cn.rst b/doc/fluid/api_cn/io_cn/cache_cn.rst index aa37f22044aef34a35359d36b357e538a078ab70..e93e4c85d134c0feb2ff813a6127c89816baed76 100644 --- a/doc/fluid/api_cn/io_cn/cache_cn.rst +++ b/doc/fluid/api_cn/io_cn/cache_cn.rst @@ -5,6 +5,9 @@ cache .. py:function:: paddle.fluid.io.cache(reader) + + + 缓存reader数据到内存中,小心此方法可能会花长时间来处理数据,并且会占用大量内存。 ``reader()`` 只能被调用一次。 参数: diff --git a/doc/fluid/api_cn/io_cn/chain_cn.rst b/doc/fluid/api_cn/io_cn/chain_cn.rst index 9b90d3d1c4b27b23982eadfcf8694392961e8252..4a4872d268cc2dfbddad0a6d4720be54e5eb41c9 100644 --- a/doc/fluid/api_cn/io_cn/chain_cn.rst +++ b/doc/fluid/api_cn/io_cn/chain_cn.rst @@ -5,6 +5,9 @@ chain .. py:function:: paddle.fluid.io.chain(*readers) + + + 该接口将多个数据读取器组成一个数据读取器,它依次返回多个数据读取器的输出数据,同时不改变输出数据原先的格式。 举例来说,如果有3个输入读取器且输出分别为[0,0,0]、[10,10,10]和[20,20,20],那么调用该接口产生的新数据读取器的输出为[0,0,0], [10,10,10], [20,20,20]。 diff --git a/doc/fluid/api_cn/io_cn/compose_cn.rst b/doc/fluid/api_cn/io_cn/compose_cn.rst index 0153ceadc2fc6f158539ab9e5069dd4079d2a118..b4393bab5db375413712c27b60fb0699e4d85370 100644 --- a/doc/fluid/api_cn/io_cn/compose_cn.rst +++ b/doc/fluid/api_cn/io_cn/compose_cn.rst @@ -5,6 +5,9 @@ compose .. py:function:: paddle.fluid.io.compose(*readers, **kwargs) + + + 该接口将多个数据读取器组合为一个数据读取器,返回读取器的输出包含所有输入读取器的输出。 例如:如果输入为三个reader,三个reader的输出分别为:(1,2)、3、(4,5),则组合reader的输出为:(1,2,3,4,5)。 diff --git a/doc/fluid/api_cn/io_cn/firstn_cn.rst b/doc/fluid/api_cn/io_cn/firstn_cn.rst index 0123e25a19af6acfb729fb1f7eab565cd76d8b64..ba9c1d427ab3ec1946dab0e78f1a2021a712fe94 100644 --- a/doc/fluid/api_cn/io_cn/firstn_cn.rst +++ b/doc/fluid/api_cn/io_cn/firstn_cn.rst @@ -5,6 +5,9 @@ firstn .. py:function:: paddle.fluid.io.firstn(reader, n) + + + 该接口创建一个数据读取器,它可以返回的最大样本数为n。 参数: diff --git a/doc/fluid/api_cn/io_cn/get_program_parameter_cn.rst b/doc/fluid/api_cn/io_cn/get_program_parameter_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..43490ac9dff81daad4195593af4a2b4c5e443004 --- /dev/null +++ b/doc/fluid/api_cn/io_cn/get_program_parameter_cn.rst @@ -0,0 +1,30 @@ +.. _cn_api_fluid_io_get_program_parameter: + +get_program_parameter +------------------------------- + +.. py:function:: paddle.fluid.io.get_program_parameter(program) + +:api_attr: 声明式编程模式(静态图) + + + +该接口从Program中获取所有参数。 + +参数: + - **program** ( :ref:`cn_api_fluid_Program` ) – 从该Program中获取参数。 + +返回: 包含此Program中所有参数的list + +返回类型: list + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.data(name="img", shape=[64, 784]) + w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32', name='fc_w') + b = fluid.layers.create_parameter(shape=[200], dtype='float32', name='fc_b') + list_para = fluid.io.get_program_parameter( fluid.default_main_program() ) + diff --git a/doc/fluid/api_cn/io_cn/get_program_persistable_vars_cn.rst b/doc/fluid/api_cn/io_cn/get_program_persistable_vars_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b7eb886d371c97f09fa8b4e25ba731313f3f9108 --- /dev/null +++ b/doc/fluid/api_cn/io_cn/get_program_persistable_vars_cn.rst @@ -0,0 +1,30 @@ +.. _cn_api_fluid_io_get_program_persistable_vars: + +get_program_persistable_vars +------------------------------- + +.. py:function:: paddle.fluid.io.get_program_persistable_vars(program) + +:api_attr: 声明式编程模式(静态图) + + + +该接口从Program中获取所有persistable的变量。 + +参数: + - **program** ( :ref:`cn_api_fluid_Program` ) – 从该Program中获取persistable的变量。 + +返回: 包含此Program中所有persistable的变量 + +返回类型: list + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.data(name="img", shape=[64, 784]) + w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32', name='fc_w') + b = fluid.layers.create_parameter(shape=[200], dtype='float32', name='fc_b') + list_para = fluid.io.get_program_persistable_vars( fluid.default_main_program() ) + diff --git a/doc/fluid/api_cn/io_cn/load_cn.rst b/doc/fluid/api_cn/io_cn/load_cn.rst index d702dff7812ecf5a80de86eedb6c6269f3cf83bf..863dd4ea083bd3d663e4e3a4b06eaed787086b2d 100644 --- a/doc/fluid/api_cn/io_cn/load_cn.rst +++ b/doc/fluid/api_cn/io_cn/load_cn.rst @@ -5,4 +5,11 @@ load .. py:function:: paddle.fluid.io.load(program, model_path, executor=None, var_list=None) -``fluid.io.load`` 是 ``fluid.load`` 的别名 +:api_attr: 声明式编程模式(静态图) +:alias_main: paddle.load +:alias: paddle.load,paddle.tensor.load,paddle.tensor.io.load +:old_api: paddle.fluid.io.load + + + +``fluid.io.load`` 是 :ref:`cn_api_fluid_load` 的别名 diff --git a/doc/fluid/api_cn/io_cn/load_inference_model_cn.rst b/doc/fluid/api_cn/io_cn/load_inference_model_cn.rst index 6e053d3b80d9b12ab8e42e6df81f851fff326a5d..e53d9661a7bac90805a6e880a2c3046b68caa503 100644 --- a/doc/fluid/api_cn/io_cn/load_inference_model_cn.rst +++ b/doc/fluid/api_cn/io_cn/load_inference_model_cn.rst @@ -3,10 +3,13 @@ load_inference_model ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.io.load_inference_model(dirname, executor, model_filename=None, params_filename=None, pserver_endpoints=None) +:api_attr: 声明式编程模式(静态图) + + + 从指定文件路径中加载预测模型(Inference Model),即调用该接口可获得模型结构(Inference Program)和模型参数。若只想加载预训练后的模型参数,请使用 :ref:`cn_api_fluid_io_load_params` 接口。更多细节请参考 :ref:`api_guide_model_save_reader` 。 参数: diff --git a/doc/fluid/api_cn/io_cn/load_params_cn.rst b/doc/fluid/api_cn/io_cn/load_params_cn.rst index 4c178a482a72b8e6b9907fc8fb87a4ca5a977672..53a5bccc6f2024abf67a06320249a6652266077d 100644 --- a/doc/fluid/api_cn/io_cn/load_params_cn.rst +++ b/doc/fluid/api_cn/io_cn/load_params_cn.rst @@ -3,10 +3,13 @@ load_params ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.io.load_params(executor, dirname, main_program=None, filename=None) +:api_attr: 声明式编程模式(静态图) + + + 该接口从指定的 ``main_program`` 中筛选出所有模型参数变量,并根据目录 ``dirname`` 或 ``filename`` 提供的参数文件对这些模型参数进行赋值。 使用 ``dirname`` 指定模型参数的存储路径。若模型参数变量以分离文件的形式存储在 ``dirname`` 指定的目录下,则设置 ``filename`` 值为None;若所有模型参数存储在一个单独的二进制文件中,则使用 ``filename`` 来指明这个二进制文件。 diff --git a/doc/fluid/api_cn/io_cn/load_persistables_cn.rst b/doc/fluid/api_cn/io_cn/load_persistables_cn.rst index 2d9def84ed16d6ce3940dab624cfb59450224f5e..24d3eac7270cee0b3b1e61be1c21ac671099e1b3 100644 --- a/doc/fluid/api_cn/io_cn/load_persistables_cn.rst +++ b/doc/fluid/api_cn/io_cn/load_persistables_cn.rst @@ -3,10 +3,13 @@ load_persistables ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.io.load_persistables(executor, dirname, main_program=None, filename=None) +:api_attr: 声明式编程模式(静态图) + + + 该接口从给定的 ``main_program`` 中取出所有 ``persistable==True`` 的变量(即持久性变量,详见 :ref:`api_guide_model_save_reader` ),并根据目录 ``dirname`` 或 ``filename`` 提供的参数文件对这些持久性变量进行赋值。 使用 ``dirname`` 指定持久性变量的存储路径。若持久性变量以分离文件的形式保存在 ``dirname`` 指定的目录下,则设置 ``filename`` 值为None;若所有持久性变量保存在一个单独的二进制文件中,则使用 ``filename`` 来指明这个二进制文件。 diff --git a/doc/fluid/api_cn/io_cn/load_program_state_cn.rst b/doc/fluid/api_cn/io_cn/load_program_state_cn.rst index bee50c0770b91590161de01b255d18a0ea3915c0..60b457b35043c25545522a808817f5f72899aa5b 100644 --- a/doc/fluid/api_cn/io_cn/load_program_state_cn.rst +++ b/doc/fluid/api_cn/io_cn/load_program_state_cn.rst @@ -5,6 +5,10 @@ load_program_state .. py:function:: paddle.fluid.io.load_program_state(model_path, var_list=None) +:api_attr: 声明式编程模式(静态图) + + + 该接口从本地加载 ``Program`` 的参数和优化器的变量信息到内存中。 参数: diff --git a/doc/fluid/api_cn/io_cn/load_vars_cn.rst b/doc/fluid/api_cn/io_cn/load_vars_cn.rst index 99b24eb1dc2f7635a174dd31734ebcb70d9171be..4126ae5eccefb3c713d20607031dd1ab2642f6fa 100644 --- a/doc/fluid/api_cn/io_cn/load_vars_cn.rst +++ b/doc/fluid/api_cn/io_cn/load_vars_cn.rst @@ -1,65 +1,69 @@ -.. _cn_api_fluid_io_load_vars: - -load_vars -------------------------------- - -.. py:function:: paddle.fluid.io.load_vars(executor, dirname, main_program=None, vars=None, predicate=None, filename=None) - -该接口从文件中加载 ``Program`` 的变量。 - -通过 ``vars`` 指定需要加载的变量,或者通过 ``predicate`` 筛选需要加载的变量, ``vars`` 和 ``predicate`` 不能同时为None。 - -参数: - - **executor** (Executor) – 运行的执行器,执行器的介绍请参考 :ref:`api_guide_model_save_reader` 。 - - **dirname** (str) – 加载变量所在的目录路径。 - - **main_program** (Program,可选) – 需要加载变量的 ``Program`` , ``Program`` 的介绍请参考 :ref:`api_guide_Program` 。如果 ``main_program`` 为None,则使用默认的主程序。默认值为None。 - - **vars** (list[Variable],可选) – 通过该列表指定需要加载的变量。默认值为None。 - - **predicate** (function,可选) – 通过该函数筛选 :math:`predicate(variable)== True` 的变量进行加载。如果通过 ``vars`` 指定了需要加载的变量,则该参数无效。默认值为None。 - - **filename** (str,可选) – 加载所有变量的文件。如果所有待加载变量是保存在一个文件中,则设置 ``filename`` 为该文件名;如果所有待加载变量是按照变量名称单独保存成文件,则设置 ``filename`` 为None。默认值为None。 - -返回: 无 - -抛出异常: - - ``TypeError`` - 如果main_program不是Program的实例,也不是None。 - - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - main_prog = fluid.Program() - startup_prog = fluid.Program() - with fluid.program_guard(main_prog, startup_prog): - data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False) - w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32', name='fc_w') - b = fluid.layers.create_parameter(shape=[200], dtype='float32', name='fc_b') - hidden_w = fluid.layers.matmul(x=data, y=w) - hidden_b = fluid.layers.elementwise_add(hidden_w, b) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(startup_prog) - - # 示例一:用vars来指定加载变量。 - path = "./my_paddle_vars" - var_list = [w, b] - fluid.io.save_vars(executor=exe, dirname=path, vars=var_list, - filename="vars_file") - fluid.io.load_vars(executor=exe, dirname=path, vars=var_list, - filename="vars_file") - # 加载w和b。它们被保存在'var_file'的文件中,所在路径为 "./my_paddle_model" 。 - - # 示例二:通过predicate来筛选加载变量。 - def name_has_fc(var): - res = "fc" in var.name - return res - - param_path = "./my_paddle_model" - fluid.io.save_vars(executor=exe, dirname=param_path, main_program=main_prog, vars=None, predicate=name_has_fc) - fluid.io.load_vars(executor=exe, dirname=param_path, main_program=main_prog, vars=None, predicate=name_has_fc) - #加载 `main_program` 中变量名包含 ‘fc’ 的所有变量 - #此前所有变量应该保存在不同文件中 - - - - +.. _cn_api_fluid_io_load_vars: + +load_vars +------------------------------- + +.. py:function:: paddle.fluid.io.load_vars(executor, dirname, main_program=None, vars=None, predicate=None, filename=None) + +:api_attr: 声明式编程模式(静态图) + + + +该接口从文件中加载 ``Program`` 的变量。 + +通过 ``vars`` 指定需要加载的变量,或者通过 ``predicate`` 筛选需要加载的变量, ``vars`` 和 ``predicate`` 不能同时为None。 + +参数: + - **executor** (Executor) – 运行的执行器,执行器的介绍请参考 :ref:`api_guide_model_save_reader` 。 + - **dirname** (str) – 加载变量所在的目录路径。 + - **main_program** (Program,可选) – 需要加载变量的 ``Program`` , ``Program`` 的介绍请参考 :ref:`api_guide_Program` 。如果 ``main_program`` 为None,则使用默认的主程序。默认值为None。 + - **vars** (list[Variable],可选) – 通过该列表指定需要加载的变量。默认值为None。 + - **predicate** (function,可选) – 通过该函数筛选 :math:`predicate(variable)== True` 的变量进行加载。如果通过 ``vars`` 指定了需要加载的变量,则该参数无效。默认值为None。 + - **filename** (str,可选) – 加载所有变量的文件。如果所有待加载变量是保存在一个文件中,则设置 ``filename`` 为该文件名;如果所有待加载变量是按照变量名称单独保存成文件,则设置 ``filename`` 为None。默认值为None。 + +返回: 无 + +抛出异常: + - ``TypeError`` - 如果main_program不是Program的实例,也不是None。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + main_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard(main_prog, startup_prog): + data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False) + w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32', name='fc_w') + b = fluid.layers.create_parameter(shape=[200], dtype='float32', name='fc_b') + hidden_w = fluid.layers.matmul(x=data, y=w) + hidden_b = fluid.layers.elementwise_add(hidden_w, b) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(startup_prog) + + # 示例一:用vars来指定加载变量。 + path = "./my_paddle_vars" + var_list = [w, b] + fluid.io.save_vars(executor=exe, dirname=path, vars=var_list, + filename="vars_file") + fluid.io.load_vars(executor=exe, dirname=path, vars=var_list, + filename="vars_file") + # 加载w和b。它们被保存在'var_file'的文件中,所在路径为 "./my_paddle_model" 。 + + # 示例二:通过predicate来筛选加载变量。 + def name_has_fc(var): + res = "fc" in var.name + return res + + param_path = "./my_paddle_model" + fluid.io.save_vars(executor=exe, dirname=param_path, main_program=main_prog, vars=None, predicate=name_has_fc) + fluid.io.load_vars(executor=exe, dirname=param_path, main_program=main_prog, vars=None, predicate=name_has_fc) + #加载 `main_program` 中变量名包含 ‘fc’ 的所有变量 + #此前所有变量应该保存在不同文件中 + + + + diff --git a/doc/fluid/api_cn/io_cn/map_readers_cn.rst b/doc/fluid/api_cn/io_cn/map_readers_cn.rst index 55e9b7bae25ef1e2ac72a29da77a62b31a4daca3..cb50e62839cd523184fef37d4ff9dc625c52fc6b 100644 --- a/doc/fluid/api_cn/io_cn/map_readers_cn.rst +++ b/doc/fluid/api_cn/io_cn/map_readers_cn.rst @@ -5,6 +5,9 @@ map_readers .. py:function:: paddle.fluid.io.map_readers(func, *readers) + + + 该接口将创建一个数据读取器(Reader),其中 `func` 函数的输出将直接作为新数据读取器的输出, `readers` 的输出将作为函数 `func` 的输入参数。 例如:如果输入的 `readers` 为两个输出分别为:2、3 的 `reader` ,输入的 `func` 为乘法函数 `mul(x, y)` ,则得到的新建 `reader` 的输出为:6。 diff --git a/doc/fluid/api_cn/io_cn/multiprocess_reader_cn.rst b/doc/fluid/api_cn/io_cn/multiprocess_reader_cn.rst index 4c4b697c55a0f6710ac56ddf56b022f3633f8cc8..ab600239eb702ffa2c503f88d5bd9d6ef6bda443 100644 --- a/doc/fluid/api_cn/io_cn/multiprocess_reader_cn.rst +++ b/doc/fluid/api_cn/io_cn/multiprocess_reader_cn.rst @@ -5,6 +5,9 @@ multiprocess_reader .. py:function:: paddle.fluid.io.multiprocess_reader(readers, use_pipe=True, queue_size=1000) + + + 使用python多进程从 ``readers`` 中读取数据,然后使用 ``multiprocessing.Pipe`` 或 ``multiprocessing.Queue`` 合并所有数据。 ``readers`` 列表中的每个reader会被创建一个独立的进程来调用,reader之间应该相互独立,互不影响,避免出现多进程读取的冲突问题. multiprocess.queue需要/dev/shm的rw访问权限,某些平台不支持。 diff --git a/doc/fluid/api_cn/io_cn/save_cn.rst b/doc/fluid/api_cn/io_cn/save_cn.rst index 638c911d9c74a4395b21446ae3dcc282f254857a..f5095aecb12322cda0284c9337bcf27c30dd0194 100644 --- a/doc/fluid/api_cn/io_cn/save_cn.rst +++ b/doc/fluid/api_cn/io_cn/save_cn.rst @@ -3,43 +3,11 @@ save ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.io.save(program, model_path) -该接口将传入的参数、优化器信息和网络描述保存到 ``model_path`` 。 - -参数包含所有的可训练 :ref:`cn_api_fluid_Variable` ,将保存到后缀为 ``.pdparams`` 的文件中。 - -优化器信息包含优化器使用的所有变量。对于Adam优化器,包含beta1、beta2、momentum等。 -所有信息将保存到后缀为 ``.pdopt`` 的文件中。(如果优化器没有需要保存的变量(如sgd),则不会生成)。 - -网络描述是程序的描述。它只用于部署。描述将保存到后缀为 ``.pdmodel`` 的文件中。 - -参数: - - **program** ( :ref:`cn_api_fluid_Program` ) – 要保存的Program。 - - **model_path** (str) – 保存program的文件前缀。格式为 ``目录名称/文件前缀``。如果文件前缀为空字符串,会引发异常。 - -返回: 无 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - - x = fluid.data(name="x", shape=[10, 10], dtype='float32') - y = fluid.layers.fc(x, 10) - z = fluid.layers.fc(y, 10) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - - fluid.save(fluid.default_main_program(), "./test_path") - - - - +:api_attr: 声明式编程模式(静态图) +``fluid.io.save`` 是 :ref:`cn_api_fluid_save` 的别名 diff --git a/doc/fluid/api_cn/io_cn/save_inference_model_cn.rst b/doc/fluid/api_cn/io_cn/save_inference_model_cn.rst index 966a05a79c93660f0999dcc5cda2b53569b3c512..e085e41395819349bbd3b4e87aed7f351a009cfe 100644 --- a/doc/fluid/api_cn/io_cn/save_inference_model_cn.rst +++ b/doc/fluid/api_cn/io_cn/save_inference_model_cn.rst @@ -3,10 +3,13 @@ save_inference_model ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.io.save_inference_model(dirname, feeded_var_names, target_vars, executor, main_program=None, model_filename=None, params_filename=None, export_for_deployment=True, program_only=False) +:api_attr: 声明式编程模式(静态图) + + + 修剪指定的 ``main_program`` 以构建一个专门用于预测的 ``Inference Program`` ( ``Program`` 含义详见 :ref:`api_guide_Program` )。 所得到的 ``Inference Program`` 及其对应的所有相关参数均被保存到 ``dirname`` 指定的目录中。若只想保存训练后的模型参数,请使用 :ref:`cn_api_fluid_io_save_params` 接口。更多细节请参考 :ref:`api_guide_model_save_reader` 。 **注意:dirname用于指定保存预测模型结构和参数的目录。若需要将模型参数保存在指定目录的若干文件中,请设置params_filename的值为None; 若需要将所有模型参数保存在一个单独的二进制文件中,请使用params_filename来指定该二进制文件的名称。** diff --git a/doc/fluid/api_cn/io_cn/save_params_cn.rst b/doc/fluid/api_cn/io_cn/save_params_cn.rst index 8353acdda6ba9c1cb6c67c5ff0479e93519b40bd..879c16ed0d192fbeaa9e803f07b245bd6d5ec076 100644 --- a/doc/fluid/api_cn/io_cn/save_params_cn.rst +++ b/doc/fluid/api_cn/io_cn/save_params_cn.rst @@ -3,10 +3,13 @@ save_params ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.io.save_params(executor, dirname, main_program=None, filename=None) +:api_attr: 声明式编程模式(静态图) + + + 该OP从 ``main_program`` 中取出所有参数,然后将它们保存到 ``dirname`` 目录下或名为 ``filename`` 的文件中。 ``dirname`` 用于指定保存参数的目标路径。若想将参数保存到多个独立文件中,设置 ``filename=None`` ; 若想将所有参数保存在单个文件中,请设置 ``filename`` 来指定该文件的名称。 diff --git a/doc/fluid/api_cn/io_cn/save_persistables_cn.rst b/doc/fluid/api_cn/io_cn/save_persistables_cn.rst index b832914d9a8914988c7f60c4839de65fc9ab9f84..9de51518e0caabcaf2a42a258116039753e5bac2 100644 --- a/doc/fluid/api_cn/io_cn/save_persistables_cn.rst +++ b/doc/fluid/api_cn/io_cn/save_persistables_cn.rst @@ -3,10 +3,13 @@ save_persistables ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.io.save_persistables(executor, dirname, main_program=None, filename=None) +:api_attr: 声明式编程模式(静态图) + + + 该OP从给定 ``main_program`` 中取出所有持久性变量(详见 :ref:`api_guide_model_save_reader` ),然后将它们保存到目录 ``dirname`` 中或 ``filename`` 指定的文件中。 ``dirname`` 用于指定保存持久性变量的目录。如果想将持久性变量保存到指定目录的若干文件中,请设置 ``filename=None`` ; 若想将所有持久性变量保存在同一个文件中,请设置 ``filename`` 来指定文件的名称。 diff --git a/doc/fluid/api_cn/io_cn/save_vars_cn.rst b/doc/fluid/api_cn/io_cn/save_vars_cn.rst index 325c21628a89df83ef222a34afcf65f75736b815..7530540754accd0f4658ba885721fab22493d738 100644 --- a/doc/fluid/api_cn/io_cn/save_vars_cn.rst +++ b/doc/fluid/api_cn/io_cn/save_vars_cn.rst @@ -3,10 +3,13 @@ save_vars ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.io.save_vars(executor, dirname, main_program=None, vars=None, predicate=None, filename=None) +:api_attr: 声明式编程模式(静态图) + + + 该接口将 ``Program`` 的变量保存到文件中。 通过 ``vars`` 指定需要保存的变量,或者通过 ``predicate`` 筛选需要保存的变量, ``vars`` 和 ``predicate`` 不能同时为None。 diff --git a/doc/fluid/api_cn/io_cn/set_program_state_cn.rst b/doc/fluid/api_cn/io_cn/set_program_state_cn.rst index 668c7fb5f743340861e1c7c8888548a003b46140..9af0478f0c88d1bc469c094725b13a33a7f28763 100644 --- a/doc/fluid/api_cn/io_cn/set_program_state_cn.rst +++ b/doc/fluid/api_cn/io_cn/set_program_state_cn.rst @@ -5,6 +5,10 @@ set_program_state .. py:function:: paddle.fluid.io.set_program_state(program, state_dict) +:api_attr: 声明式编程模式(静态图) + + + 利用 ``state_dict`` 设置 ``Program`` 的参数和优化器信息。 如果参数的 shape 或 dtype 不匹配,则会引发异常。 diff --git a/doc/fluid/api_cn/io_cn/shuffle_cn.rst b/doc/fluid/api_cn/io_cn/shuffle_cn.rst index a986fc7b3d218b39c0062cd3390c269adae292a1..7b64c23b7e5706e23a9051f60b3a55bea7ea6573 100644 --- a/doc/fluid/api_cn/io_cn/shuffle_cn.rst +++ b/doc/fluid/api_cn/io_cn/shuffle_cn.rst @@ -5,6 +5,12 @@ shuffle .. py:function:: paddle.fluid.io.shuffle(reader, buffer_size) +:alias_main: paddle.shuffle +:alias: paddle.shuffle,paddle.tensor.shuffle,paddle.tensor.random.shuffle +:old_api: paddle.fluid.io.shuffle + + + 该接口创建一个数据读取器,其功能是将原始数据读取器的数据打乱,然后返回无序的数据。 从原始数据读取器取出buf_size个数据到缓冲区,将缓冲区数据打乱,然后将无序的数据依次返回。当缓冲区数据全部输出后,再次执行上述步骤。 diff --git a/doc/fluid/api_cn/io_cn/xmap_readers_cn.rst b/doc/fluid/api_cn/io_cn/xmap_readers_cn.rst index 7177d665242ce8e5a5137d83c60ec773eb4e9f05..5f434ecb9525c4f8dd746319d2b4baace88c89ae 100644 --- a/doc/fluid/api_cn/io_cn/xmap_readers_cn.rst +++ b/doc/fluid/api_cn/io_cn/xmap_readers_cn.rst @@ -5,6 +5,9 @@ xmap_readers .. py:function:: paddle.fluid.io.xmap_readers(mapper, reader, process_num, buffer_size, order=False) + + + 多线程下,使用自定义映射器 reader 返回样本到输出队列。 参数: diff --git a/doc/fluid/api_cn/layers_cn.rst b/doc/fluid/api_cn/layers_cn.rst index e4b4a374f57f8dc05fd2781c74d21399612859f0..8990362cf904ee1e252ed0a04a0dfaedd5707350 100644 --- a/doc/fluid/api_cn/layers_cn.rst +++ b/doc/fluid/api_cn/layers_cn.rst @@ -31,6 +31,7 @@ fluid.layers layers_cn/auc_cn.rst layers_cn/autoincreased_step_counter_cn.rst layers_cn/batch_norm_cn.rst + layers_cn/BasicDecoder_cn.rst layers_cn/beam_search_cn.rst layers_cn/beam_search_decode_cn.rst layers_cn/bilinear_tensor_product_cn.rst @@ -87,6 +88,7 @@ fluid.layers layers_cn/dynamic_lstmp_cn.rst layers_cn/dynamic_decode_cn.rst layers_cn/Decoder_cn.rst + layers_cn/DecodeHelper_cn.rst layers_cn/DynamicRNN_cn.rst layers_cn/edit_distance_cn.rst layers_cn/elementwise_add_cn.rst @@ -108,7 +110,6 @@ fluid.layers layers_cn/exponential_decay_cn.rst layers_cn/eye_cn.rst layers_cn/fc_cn.rst - layers_cn/fill_constant_batch_size_like_cn.rst layers_cn/fill_constant_cn.rst layers_cn/filter_by_instag_cn.rst layers_cn/flatten_cn.rst @@ -117,7 +118,6 @@ fluid.layers layers_cn/gather_cn.rst layers_cn/gather_nd_cn.rst layers_cn/gather_tree_cn.rst - layers_cn/gaussian_random_batch_size_like_cn.rst layers_cn/gaussian_random_cn.rst layers_cn/gelu_cn.rst layers_cn/generate_mask_labels_cn.rst @@ -126,6 +126,7 @@ fluid.layers layers_cn/get_tensor_from_selected_rows_cn.rst layers_cn/greater_equal_cn.rst layers_cn/greater_than_cn.rst + layers_cn/GreedyEmbeddingHelper_cn.rst layers_cn/grid_sampler_cn.rst layers_cn/group_norm_cn.rst layers_cn/gru_unit_cn.rst @@ -244,6 +245,7 @@ fluid.layers layers_cn/rsqrt_cn.rst layers_cn/RNNCell_cn.rst layers_cn/sampled_softmax_with_cross_entropy_cn.rst + layers_cn/SampleEmbeddingHelper_cn.rst layers_cn/sampling_id_cn.rst layers_cn/scale_cn.rst layers_cn/scatter_cn.rst @@ -310,10 +312,10 @@ fluid.layers layers_cn/thresholded_relu_cn.rst layers_cn/topk_cn.rst layers_cn/transpose_cn.rst + layers_cn/TrainingHelper_cn.rst layers_cn/unfold_cn.rst layers_cn/Uniform_cn.rst layers_cn/uniform_random_cn.rst - layers_cn/uniform_random_batch_size_like_cn.rst layers_cn/unique_cn.rst layers_cn/unique_with_counts_cn.rst layers_cn/unsqueeze_cn.rst diff --git a/doc/fluid/api_cn/layers_cn/BasicDecoder_cn.rst b/doc/fluid/api_cn/layers_cn/BasicDecoder_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..dd3820d852961be4903b9e7b6f0f10ca1eac35b8 --- /dev/null +++ b/doc/fluid/api_cn/layers_cn/BasicDecoder_cn.rst @@ -0,0 +1,80 @@ +.. _cn_api_fluid_layers_BasicDecoder: + +BasicDecoder +------------------------------- + + +.. py:class:: paddle.fluid.layers.BasicDecoder(cell, helper, output_fn=None) + +BasicDecoder是 :ref:`cn_api_fluid_layers_Decoder` 的子类,它组装了 :ref:`cn_api_fluid_layers_RNNCell` 和 :ref:`cn_api_fluid_layers_DecodeHelper` 的实例作为成员,其中DecodeHelper用来实现不同的解码策略。它依次执行以下步骤来完成单步解码: + +1. 执行 :code:`cell_outputs, cell_states = cell.call(inputs, states)` 以获取输出和新的状态。 + +2. 执行 :code:`sample_ids = helper.sample(time, cell_outputs, cell_states)` 以采样id并将其作为当前步的解码结果。 + +3. 执行 :code:`finished, next_inputs, next_states = helper.next_inputs(time, cell_outputs, cell_states, sample_ids)` 以产生下一解码步的结束标识、输入和状态。 + +参数: + - **cell** (RNNCell) - RNNCell的实例或者具有相同接口定义的对象。 + - **helper** (DecodeHelper) - DecodeHelper的实例。 + - **output_fn** (可选) - 处理cell输出的接口,在采样之前使用。默认值None。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + + start_tokens = fluid.data(name="start_tokens", + shape=[None], + dtype="int64") + + trg_embeder = lambda x: fluid.embedding( + x, size=[10000, 128], param_attr=fluid.ParamAttr(name="trg_embedding")) + output_layer = lambda x: layers.fc(x, + size=10000, + num_flatten_dims=len(x.shape) - 1, + param_attr=fluid.ParamAttr(name= + "output_w"), + bias_attr=False) + helper = layers.SampleEmbeddingHelper(trg_embeder, start_tokens=start_tokens, end_token=1) + decoder_cell = layers.GRUCell(hidden_size=128) + decoder = layers.BasicDecoder(decoder_cell, helper, output_fn=output_layer) + outputs = layers.dynamic_decode( + decoder=decoder, inits=decoder_cell.get_initial_states(start_tokens)) + +.. py:method:: initialize(initial_cell_states) + +初始化,包括helper的初始化和cell的初始化,cell初始化直接使用 :code:`initial_cell_states` 作为结果。 + +参数: + - **initial_cell_states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。这是由调用者 :ref:`cn_api_fluid_layers_dynamic_decode` 提供的参数。 + +返回::code:`(initial_inputs, initial_states, finished)` 的三元组。 :code:`initial_inputs, initial_states` 均是单个tensor变量或tensor变量组成的嵌套结构, :code:`finished` 是bool类型的tensor。 :code:`initial_inputs, finished` 与 :code:`helper.initialize()` 返回的内容相同; :code:`initial_states` 与输入参数中的 :code:`initial_cell_states` 的相同。 + +返回类型:tuple + +.. py:class:: OutputWrapper(cell_outputs, sample_ids) + + :code:`step()` 的返回值中 :code:`outputs` 使用的数据结构,是一个由 :code:`cell_outputs` 和 :code:`sample_ids` 这两个字段构成的命名元组。 + +.. py:method:: step(time, inputs, states, **kwargs) + +按照以下步骤执行单步解码: + +1. 执行 :code:`cell_outputs, cell_states = cell.call(inputs, states)` 以获取输出和新的状态。 + +2. 执行 :code:`sample_ids = helper.sample(time, cell_outputs, cell_states)` 以采样id并将其作为当前步的解码结果。 + +3. 执行 :code:`finished, next_inputs, next_states = helper.next_inputs(time, cell_outputs, cell_states, sample_ids)` 以产生下一解码步的结束标识、输入和状态。 + +参数: + - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。 + - **inputs** (Variable) - tensor变量。在第一个解码时间步时与由 :code:`initialize()` 返回的 :code:`initial_inputs` 相同,其他时间步与由 :code:`step()` 返回的 :code:`next_inputs` 相同。 + - **states** (Variable) - tensor变量的结构。在第一个解码时间步时与 :code:`initialize()` 返回的 :code:`initial_states` 相同,其他时间步与由 :code:`step()` 返回的 :code:`next_states` 相同。 + - **kwargs** - 附加的关键字参数,由调用者 :ref:`cn_api_fluid_layers_dynamic_decode` 提供。 + +返回: :code:`(outputs, next_states, next_inputs, finished)` 的四元组。 :code:`outputs` 是包含 :code:`cell_outputs` 和 :code:`sample_ids` 两个字段的命名元组,其中 :code:`cell_outputs` 是 :code:`cell.call()` 的结果, :code:`sample_ids` 是 :code:`helper.sample()` 的结果; :code:`next_states, next_inputs` 分别和输入参数中的 :code:`states, inputs` 有相同的的结构、形状和数据类型; :code:`finished` 是一个bool类型的tensor,形状是 :math:`[batch\_size]` 。 + +返回类型:tuple diff --git a/doc/fluid/api_cn/layers_cn/BeamSearchDecoder_cn.rst b/doc/fluid/api_cn/layers_cn/BeamSearchDecoder_cn.rst index 45c372bc2d13ad6a17ffde72d07d7bfc00087e34..d62d05ae86bda97df4fe06e328653df5251db4cd 100644 --- a/doc/fluid/api_cn/layers_cn/BeamSearchDecoder_cn.rst +++ b/doc/fluid/api_cn/layers_cn/BeamSearchDecoder_cn.rst @@ -4,9 +4,12 @@ BeamSearchDecoder ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:class:: paddle.fluid.layers.BeamSearchDecoder(cell, start_token, end_token, beam_size, embedding_fn=None, output_fn=None) + +:api_attr: 声明式编程模式(静态图) + + 带beam search解码策略的解码器。该接口包装一个cell来计算概率,然后执行一个beam search步骤计算得分,并为每个解码步骤选择候选输出。更多详细信息请参阅 `Beam search `_ @@ -17,7 +20,7 @@ BeamSearchDecoder - **start_token** (int) - 起始标记id。 - **end_token** (int) - 结束标记id。 - **beam_size** (int) - 在beam search中使用的beam宽度。 - - **embedding_fn** (可选) - 处理选中的候选id的接口。通常,它是一个将词id转换为词嵌入的嵌入层,函数的返回值作为 :code:`cell.call` 接口的 :code:`input` 参数。如果 :code:`embedding_fn` 未提供,则必须在 :code:`cell.call` 中实现词嵌入转换。默认值None。 + - **embedding_fn** (可选) - 处理选中的候选id的接口。它通常是一个将词id转换为词嵌入的嵌入层,其返回值将作为 :code:`cell.call` 接口的 :code:`input` 参数。**注意** ,这里要使用 :ref:`cn_api_fluid_embedding` 而非 :ref:`cn_api_fluid_layers_embedding`,因为选中的id的形状是 :math:`[batch\_size, beam\_size]` ,如果使用后者则还需要在这里提供unsqueeze。如果 :code:`embedding_fn` 未提供,则必须在 :code:`cell.call` 中实现词嵌入转换。默认值None。 - **output_fn** (可选) - 处理cell输出的接口,在计算得分和选择候选标记id之前使用。默认值None。 **示例代码** @@ -82,8 +85,7 @@ BeamSearchDecoder 此函数输入形状为 :math:`[batch\_size,s_0,s_1,...]` 的tensor t,由minibatch中的样本 :math:`t[0],...,t[batch\_size-1]` 组成。将其扩展为形状 :math:`[ batch\_size,beam\_size,s_0,s_1,...]` 的tensor,由 :math:`t[0],t[0],...,t[1],t[1],...` 组成,其中每个minibatch中的样本重复 :math:`beam\_size` 次。 参数: - - **probs** (Variable) - 形状为 :math:`[batch\_size,beam\_size,vocab\_size]` 的tensor,表示对数概率。其数据类型应为float32。 - - **finish** (Variable) - 形状为 :math:`[batch\_size,beam\_size]` 的tensor,表示所有beam的完成状态。其数据类型应为bool。 + - **x** (Variable) - 形状为 :math:`[batch\_size, ...]` 的tenosr。数据类型应为float32,float64,int32,int64或bool。 返回:具有与 :code:`x` 相同的形状和数据类型的tensor,其中未完成的beam保持不变,而已完成的beam被替换成特殊的tensor(tensor中所有概率质量被分配给EOS标记)。 @@ -121,7 +123,7 @@ BeamSearchDecoder 参数: - **initial_cell_states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。调用者提供的参数。 -返回:一个元组 :code:`(initial_inputs, initial_states, finished)`。:code:`initial_inputs` 是一个tensor,当 :code:`embedding_fn` 为None时,由 :code:`start_token` 填充,形状为 :math:`[batch\_size,beam\_size,1]` ;否则使用 :code:`embedding_fn(t)` 返回的值。:code:`initial_states` 是tensor变量的嵌套结构(命名元组,字段包括 :code:`cell_states,log_probs,finished,lengths`),其中 :code:`log_probs,finished,lengths` 都含有一个tensor,形状为 :math:`[batch\_size, beam\_size]`,数据类型为float32,bool,int64。:code:`cell_states` 具有与输入参数 :code:`initial_cell_states` 相同结构的值,但形状扩展为 :math:`[batch\_size,beam\_size,...]`。 :code:`finished` 是一个布尔型tensor,由False填充,形状为 :math:`[batch\_size,beam\_size]`。 +返回:一个元组 :code:`(initial_inputs, initial_states, finished)`。:code:`initial_inputs` 是一个tensor,当 :code:`embedding_fn` 为None时,该tensor t的形状为 :math:`[batch\_size,beam\_size]` ,值为 :code:`start_token` ;否则使用 :code:`embedding_fn(t)` 返回的值。:code:`initial_states` 是tensor变量的嵌套结构(命名元组,字段包括 :code:`cell_states,log_probs,finished,lengths`),其中 :code:`log_probs,finished,lengths` 都含有一个tensor,形状为 :math:`[batch\_size, beam\_size]`,数据类型为float32,bool,int64。:code:`cell_states` 具有与输入参数 :code:`initial_cell_states` 相同结构的值,但形状扩展为 :math:`[batch\_size,beam\_size,...]`。 :code:`finished` 是一个布尔型tensor,由False填充,形状为 :math:`[batch\_size,beam\_size]`。 返回类型:tuple @@ -133,7 +135,7 @@ BeamSearchDecoder - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。 - **logits** (Variable) - 形状为 :math:`[batch\_size,beam\_size,vocab\_size]` 的tensor,表示当前时间步的logits。其数据类型为float32。 - **next_cell_states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。它的结构,形状和数据类型与 :code:`initialize()` 的返回值 :code:`initial_states` 中的 :code:`cell_states` 相同。它代表该cell的下一个状态。 - - **beam_state** (Variable) - tensor变量的结构。在第一个解码步骤与 :code:`initialize()` 返回的 :code:`initial_states` 同,其他步骤与 :code:`initialize()` 返回的 :code:`beam_search_state` 相同。 + - **beam_state** (Variable) - tensor变量的结构。在第一个解码步骤与 :code:`initialize()` 返回的 :code:`initial_states` 同,其他步骤与 :code:`step()` 返回的 :code:`beam_search_state` 相同。 返回:一个元组 :code:`(beam_search_output, beam_search_state)`。:code:`beam_search_output` 是tensor变量的命名元组,字段为 :code:`scores,predicted_ids parent_ids`。其中 :code:`scores,predicted_ids,parent_ids` 都含有一个tensor,形状为 :math:`[batch\_size,beam\_size]`,数据类型为float32 ,int64,int64。:code:`beam_search_state` 具有与输入参数 :code:`beam_state` 相同的结构,形状和数据类型。 @@ -144,9 +146,9 @@ BeamSearchDecoder 执行beam search解码步骤,该步骤使用 :code:`cell` 来计算概率,然后执行beam search步骤以计算得分并选择候选标记ID。 参数: - - **time** (Variable) - 调用者提供的形状为[1]的int64tensor,表示当前解码的时间步长。 + - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。。 - **inputs** (Variable) - tensor变量。在第一个解码时间步时与由 :code:`initialize()` 返回的 :code:`initial_inputs` 相同,其他时间步与由 :code:`step()` 返回的 :code:`next_inputs` 相同。 - - **States** (Variable) - tensor变量的结构。在第一个解码时间步时与 :code:`initialize()` 返回的 :code:`initial_states` 相同,其他时间步与由 :code:`step()` 返回的 :code:`beam_search_state` 相同。 + - **states** (Variable) - tensor变量的结构。在第一个解码时间步时与 :code:`initialize()` 返回的 :code:`initial_states` 相同,其他时间步与由 :code:`step()` 返回的 :code:`beam_search_state` 相同。 - **kwargs** - 附加的关键字参数,由调用者提供。 返回:一个元组 :code:`(beam_search_output,beam_search_state,next_inputs,finish)` 。:code:`beam_search_state` 和参数 :code:`states` 具有相同的结构,形状和数据类型。 :code:`next_inputs` 与输入参数 :code:`inputs` 具有相同的结构,形状和数据类型。 :code:`beam_search_output` 是tensor变量的命名元组(字段包括 :code:`scores,predicted_ids,parent_ids` ),其中 :code:`scores,predicted_ids,parent_ids` 都含有一个tensor,形状为 :math:`[batch\_size,beam\_size]`,数据类型为float32 ,int64,int64。:code:`finished` 是一个bool类型的tensor,形状为 :math:`[batch\_size,beam\_size]`。 @@ -165,12 +167,3 @@ BeamSearchDecoder 返回:一个元组 :code:`(predicted_ids, final_states)`。:code:`predicted_ids` 是一个tensor,形状为 :math:`[time\_step,batch\_size,beam\_size]`,数据类型为int64。:code:`final_states` 与输入参数 :code:`final_states` 相同。 返回类型:tuple - -.. py:method:: output_dtype() - -用于beam search输出的数据类型的嵌套结构。它是一个命名元组,字段包括 :code:`scores, predicted_ids, parent_ids`。 - -参数:无。 - -返回:用于beam search输出的数据类型的命名元组。 - diff --git a/doc/fluid/api_cn/layers_cn/Categorical_cn.rst b/doc/fluid/api_cn/layers_cn/Categorical_cn.rst index 7cf79dbbf9e9d19ffc4997e0cbadf52207bfe156..9265a666d8460b9b37679ae73f1e5a653e3576f3 100644 --- a/doc/fluid/api_cn/layers_cn/Categorical_cn.rst +++ b/doc/fluid/api_cn/layers_cn/Categorical_cn.rst @@ -5,6 +5,9 @@ Categorical .. py:class:: paddle.fluid.layers.Categorical(logits) + + + 类别分布是一种离散概率分布,其随机变量可以取K个相互独立类别的其中一个。 概率质量函数(pmf)为: diff --git a/doc/fluid/api_cn/layers_cn/DecodeHelper_cn.rst b/doc/fluid/api_cn/layers_cn/DecodeHelper_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..14ad49c37982245c138bb04b7377d9b40edc6fa1 --- /dev/null +++ b/doc/fluid/api_cn/layers_cn/DecodeHelper_cn.rst @@ -0,0 +1,44 @@ +.. _cn_api_fluid_layers_DecodeHelper: + +DecodeHelper +------------------------------- + + +.. py:class:: paddle.fluid.layers.DecodeHelper() + +DecodeHelper是一个基类,其子类的实例将在 :ref:`cn_api_fluid_layers_BasicDecoder` 中使用。它提供了在动态解码时采样和产生下一解码步的输入的接口。 + +.. py:method:: initialize() + +初始化以产生第一个解码步的输入和每个序列是否结束的初始标识。这是 :ref:`cn_api_fluid_layers_BasicDecoder` 初始化的一部分。 + +返回::code:`(initial_inputs, initial_finished)` 的二元组, :code:`initial_inputs` 是单个tensor变量或tensor变量组成的嵌套结构,tensor的形状是 :math:`[batch\_size, ...]` 。 :code:`initial_finished` 是一个bool类型且形状为 :math:`[batch\_size]` 的tensor。 + +返回类型:tuple + +.. py:method:: sample(time, outputs, states) + +根据 :code:`outputs` 以特定的方式进行采样,该方法是 :code:`BasicDecoder.step` 中的一部分。 + +参数: + - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。 + - **outputs** (Variable) - tensor变量,通常其数据类型为float32或float64,形状为 :math:`[batch\_size, vocabulary\_size]` ,表示当前解码步预测产生的logit(未归一化的概率),和由 :code:`BasicDecoder.output_fn(BasicDecoder.cell.call())` 返回的 :code:`outputs` 是同一内容。 + - **states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构,和由 :code:`BasicDecoder.cell.call()` 返回的 :code:`new_states` 是同一内容。 + +返回:数据类型为int64形状为 :math:`[batch\_size]` 的tensor,表示采样得到的id。 + +返回类型:Variable + +.. py:method:: next_inputs(time, outputs, states, sample_ids) + +产生下一解码步的输入、状态,以及每个序列是否结束的标识。该方法是 :code:`BasicDecoder.step` 中的一部分。 + +参数: + - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。 + - **outputs** (Variable) - tensor变量,通常其数据类型为float32或float64,形状为 :math:`[batch\_size, vocabulary\_size]` ,表示当前解码步预测产生的logit(未归一化的概率),和由 :code:`BasicDecoder.output_fn(BasicDecoder.cell.call())` 返回的 :code:`outputs` 是同一内容。 + - **states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构,和由 :code:`BasicDecoder.cell.call()` 返回的 :code:`new_states` 是同一内容。 + - **sample_ids** (Variable) - 数据类型为int64形状为 :math:`[batch\_size]` 的tensor,和由 :code:`sample()` 返回的 :code:`sample_ids` 是同一内容。 + +返回: :code:`(finished, next_inputs, next_states)` 的三元组。 :code:`next_inputs, next_states` 均是单个tensor变量或tensor变量组成的嵌套结构, :code:`next_states` 和输入参数中的 :code:`states` 具有相同的结构、形状和数据类型; :code:`finished` 是一个bool类型且形状为 :math:`[batch\_size]` 的tensor。 + +返回类型:tuple diff --git a/doc/fluid/api_cn/layers_cn/Decoder_cn.rst b/doc/fluid/api_cn/layers_cn/Decoder_cn.rst index 893119253f16ceb445cdbcac726a054a8e5e8ad3..ffe67dc97342f0ef561d0350c38806ed8bd15ce5 100644 --- a/doc/fluid/api_cn/layers_cn/Decoder_cn.rst +++ b/doc/fluid/api_cn/layers_cn/Decoder_cn.rst @@ -4,10 +4,13 @@ Decoder ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:class:: paddle.fluid.layers.Decoder() +:api_attr: 声明式编程模式(静态图) + + + Decoder是dynamic_decode中使用的任何decoder实例的基类。它提供了为每一个时间步生成输出的接口,可用于生成序列。 @@ -36,13 +39,28 @@ Decoder提供的主要抽象为: 返回类型:tuple -.. py:method:: step(time, inputs, states) +.. py:method:: step(time, inputs, states, **kwargs) 在解码的每个时间步中被调用的接口 参数: - - **outputs** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。 结构和数据类型与 :code:`output_dtype` 相同。 tensor堆叠所有时间步长的输出从而具有shape :math:`[time\_step,batch\_size,...]` ,由调用者完成。 - - **final_states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。 它是 :code:`decoder.step` 在最后一个解码步返回的 :code:`next_states`, 因此具有与任何时间步长的状态相同的结构,形状和数据类型。 + - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。。 + - **inputs** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。在第一个解码时间步时与由 :code:`initialize()` 返回的 :code:`initial_inputs` 相同,其他时间步与由 :code:`step()` 返回的 :code:`next_inputs` 相同。 + - **states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。在第一个解码时间步时与 :code:`initialize()` 返回的 :code:`initial_states` 相同,其他时间步与由 :code:`step()` 返回的 :code:`beam_search_state` 相同。 + - **kwargs** - 附加的关键字参数,由调用者提供。 + +返回:一个元组 :code:`(outputs, next_states, next_inputs, finished)` 。:code:`next_states` 和 :code:`next_inputs` 都是单个tensor变量或tensor变量组成的嵌套结构,且结构、形状和数据类型均分别与输入参数中的 :code:`states` 和 :code:`inputs` 相同。 :code:`outputs` 是单个tensor变量或tensor变量组成的嵌套结构。 :code:`finished` 是一个bool类型的tensor变量。 + +返回类型:tuple + +.. py:method:: finalize(self, outputs, final_states, sequence_lengths) + +如果提供了实现,将在整个解码迭代结束后被执行一次。 + +参数: + - **outputs** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。 其中每个tensor的形状均为 :math:`[time\_step,batch\_size,...]` ,是将所有解码步中与其对应的的输出进行堆叠的结果,这个过程由其调用者完成。 + - **final_states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。 它是 :code:`decoder.step` 在最后一个解码步返回的 :code:`next_states`, 因此具有与任何时间步的状态相同的结构,形状和数据类型。 + - **kwargs** - 命名关键字参数,由提供调用者。 返回:一个元组 :code:`(final_outputs, final_states)` 。:code:`final_outputs` 和 :code:`final_states` 都是单个tensor变量或tensor变量组成的嵌套结构。 diff --git a/doc/fluid/api_cn/layers_cn/DynamicRNN_cn.rst b/doc/fluid/api_cn/layers_cn/DynamicRNN_cn.rst index 50f6f41880b7473dad630765958ecb0f368a7127..e082d6bf6e58467803050e426a8fa1cc1fcc9193 100644 --- a/doc/fluid/api_cn/layers_cn/DynamicRNN_cn.rst +++ b/doc/fluid/api_cn/layers_cn/DynamicRNN_cn.rst @@ -3,10 +3,13 @@ DynamicRNN =================== -**注意:该API仅支持【静态图】模式** .. py:class:: paddle.fluid.layers.DynamicRNN(name=None) +:api_attr: 声明式编程模式(静态图) + + + **注意:该类型的输入仅支持LoDTensor,如果您需要处理的输入数据是Tensor类型, 请使用StaticRNN( fluid.layers.** :ref:`cn_api_fluid_layers_StaticRNN` **)。** diff --git a/doc/fluid/api_cn/layers_cn/GRUCell_cn.rst b/doc/fluid/api_cn/layers_cn/GRUCell_cn.rst index 3acb79d737531d6f2648081c25336d636e119611..a714757be122b2c7dd2cd936f809725991807996 100644 --- a/doc/fluid/api_cn/layers_cn/GRUCell_cn.rst +++ b/doc/fluid/api_cn/layers_cn/GRUCell_cn.rst @@ -3,9 +3,12 @@ GRUCell ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:class:: paddle.fluid.layers.GRUCell(hidden_size, param_attr=None, bias_attr=None, gate_activation=None, activation=None, dtype="float32", name="GRUCell") + +:api_attr: 声明式编程模式(静态图) + + 门控循环单元(Gated Recurrent Unit)。通过对 :code:`fluid.contrib.layers.rnn_impl.BasicGRUUnit` 包装,来让它可以应用于RNNCell。 @@ -35,7 +38,7 @@ GRUCell .. code-block:: python import paddle.fluid.layers as layers - cell = layers.rnn.GRUCell(hidden_size=256) + cell = layers.GRUCell(hidden_size=256) .. py:method:: call(inputs, states) diff --git a/doc/fluid/api_cn/layers_cn/GreedyEmbeddingHelper_cn.rst b/doc/fluid/api_cn/layers_cn/GreedyEmbeddingHelper_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a01e1ab3e575b54855d774e29057ffc2b7d04a8f --- /dev/null +++ b/doc/fluid/api_cn/layers_cn/GreedyEmbeddingHelper_cn.rst @@ -0,0 +1,74 @@ +.. _cn_api_fluid_layers_GreedyEmbeddingHelper: + +GreedyEmbeddingHelper +------------------------------- + + +.. py:class:: paddle.fluid.layers.GreedyEmbeddingHelper(embedding_fn, start_tokens, end_token) + +GreedyEmbeddingHelper是 :ref:`cn_api_fluid_layers_DecodeHelper` 的子类。作为解码helper,它使用 :code:`argmax` 进行采样,并将采样结果送入embedding层,以此作为下一解码步的输入。 + +参数: + - **embedding_fn** (callable) - 作用于 :code:`argmax` 结果的函数,通常是一个将词id转换为词嵌入的embedding层,**注意** ,这里要使用 :ref:`cn_api_fluid_embedding` 而非 :ref:`cn_api_fluid_layers_embedding`,因为选中的id的形状是 :math:`[batch\_size]` ,如果使用后者则还需要在这里提供unsqueeze。 + - **start_tokens** (Variable) - 形状为 :math:`[batch\_size]` 、数据类型为int64、 值为起始标记id的tensor。 + - **end_token** (int) - 结束标记id。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + + start_tokens = fluid.data(name="start_tokens", + shape=[None], + dtype="int64") + + trg_embeder = lambda x: fluid.embedding( + x, size=[10000, 128], param_attr=fluid.ParamAttr(name="trg_embedding")) + output_layer = lambda x: layers.fc(x, + size=10000, + num_flatten_dims=len(x.shape) - 1, + param_attr=fluid.ParamAttr(name= + "output_w"), + bias_attr=False) + helper = layers.GreedyEmbeddingHelper(trg_embeder, start_tokens=start_tokens, end_token=1) + decoder_cell = layers.GRUCell(hidden_size=128) + decoder = layers.BasicDecoder(decoder_cell, helper, output_fn=output_layer) + outputs = layers.dynamic_decode( + decoder=decoder, inits=decoder_cell.get_initial_states(start_tokens)) + +.. py:method:: initialize() + +GreedyEmbeddingHelper初始化,其使用构造函数中的 :code:`start_tokens` 作为第一个解码步的输入,并给出每个序列是否结束的初始标识。这是 :ref:`cn_api_fluid_layers_BasicDecoder` 初始化的一部分。 + +返回::code:`(initial_inputs, initial_finished)` 的二元组, :code:`initial_inputs` 同构造函数中的 :code:`start_tokens` ; :code:`initial_finished` 是一个bool类型、值为False的tensor,其形状和 :code:`start_tokens` 相同。 + +返回类型:tuple + +.. py:method:: sample(time, outputs, states) + +使用 :code:`argmax` 根据 `outputs` 进行采样。 + +参数: + - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。 + - **outputs** (Variable) - tensor变量,通常其数据类型为float32或float64,形状为 :math:`[batch\_size, vocabulary\_size]` ,表示当前解码步预测产生的logit(未归一化的概率),和由 :code:`BasicDecoder.output_fn(BasicDecoder.cell.call())` 返回的 :code:`outputs` 是同一内容。 + - **states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构,和由 :code:`BasicDecoder.cell.call()` 返回的 :code:`new_states` 是同一内容。 + +返回:数据类型为int64形状为 :math:`[batch\_size]` 的tensor,表示采样得到的id。 + +返回类型:Variable + +.. py:method:: next_inputs(time, outputs, states, sample_ids) + +对 :code:`sample_ids` 使用 :code:`embedding_fn` ,以此作为下一解码步的输入;同时直接使用输入参数中的 :code:`states` 作为下一解码步的状态;并通过判别 :code:`sample_ids` 是否得到 :code:`end_token`,依此产生每个序列是否结束的标识。 + +参数: + - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。 + - **outputs** (Variable) - tensor变量,通常其数据类型为float32或float64,形状为 :math:`[batch\_size, vocabulary\_size]` ,表示当前解码步预测产生的logit(未归一化的概率),和由 :code:`BasicDecoder.output_fn(BasicDecoder.cell.call())` 返回的 :code:`outputs` 是同一内容。 + - **states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构,和由 :code:`BasicDecoder.cell.call()` 返回的 :code:`new_states` 是同一内容。 + - **sample_ids** (Variable) - 数据类型为int64形状为 :math:`[batch\_size]` 的tensor,和由 :code:`sample()` 返回的 :code:`sample_ids` 是同一内容。 + +返回: :code:`(finished, next_inputs, next_states)` 的三元组。 :code:`next_inputs, next_states` 均是单个tensor变量或tensor变量组成的嵌套结构,tensor的形状是 :math:`[batch\_size, ...]` , :code:`next_states` 和输入参数中的 :code:`states` 相同; :code:`finished` 是一个bool类型且形状为 :math:`[batch\_size]` 的tensor。 + +返回类型:tuple diff --git a/doc/fluid/api_cn/layers_cn/IfElse_cn.rst b/doc/fluid/api_cn/layers_cn/IfElse_cn.rst index 295fd454fbcdde779b1f9c98ce69902a7857b724..2ba7ff3b9eef50d97ba80f5e323df16ce7d6e815 100644 --- a/doc/fluid/api_cn/layers_cn/IfElse_cn.rst +++ b/doc/fluid/api_cn/layers_cn/IfElse_cn.rst @@ -3,10 +3,13 @@ IfElse ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:class:: paddle.fluid.layers.IfElse(cond, name=None) +:api_attr: 声明式编程模式(静态图) + + + 该类用于实现IfElse分支控制功能, IfElse包含两个Block,true_block,false_block,IfElse会将满足True或False条件的数据分别放入不同的block运行。 cond是一个shape为[N, 1]、数据类型为bool的2-D tensor,表示输入数据对应部分的执行条件。 diff --git a/doc/fluid/api_cn/layers_cn/LSTMCell_cn.rst b/doc/fluid/api_cn/layers_cn/LSTMCell_cn.rst index 09d21492757c836fe1e4799e586e6b6d7001a204..183dd4ff210000f8c9138d18d402d431d245dd86 100644 --- a/doc/fluid/api_cn/layers_cn/LSTMCell_cn.rst +++ b/doc/fluid/api_cn/layers_cn/LSTMCell_cn.rst @@ -4,9 +4,12 @@ LSTMCell ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:class:: paddle.fluid.layers.LSTMCell(hidden_size, param_attr=None, bias_attr=None, gate_activation=None, activation=None, forget_bias=1.0, dtype="float32", name="LSTMCell") + +:api_attr: 声明式编程模式(静态图) + + 长短期记忆单元(Long-Short Term Memory)。通过对 :code:`fluid.contrib.layers.rnn_impl.BasicLSTMUnit` 包装,来让它可以应用于RNNCell。 @@ -38,7 +41,7 @@ LSTMCell .. code-block:: python import paddle.fluid.layers as layers - cell = layers.rnn.LSTMCell(hidden_size=256) + cell = layers.LSTMCell(hidden_size=256) .. py:method:: call(inputs, states) @@ -61,4 +64,4 @@ LSTMCell的 :code:`state_shape` 是一个具有两个形状的列表::math:`[[ 返回:LSTMCell的 :code:`state_shape` -返回类型:list \ No newline at end of file +返回类型:list diff --git a/doc/fluid/api_cn/layers_cn/MultivariateNormalDiag_cn.rst b/doc/fluid/api_cn/layers_cn/MultivariateNormalDiag_cn.rst index 9af102144b2ee832e04a282560034b8b487450c5..63e1d1bb2492d6f56c6ccbd7d7ada4505087ec25 100644 --- a/doc/fluid/api_cn/layers_cn/MultivariateNormalDiag_cn.rst +++ b/doc/fluid/api_cn/layers_cn/MultivariateNormalDiag_cn.rst @@ -5,6 +5,9 @@ MultivariateNormalDiag .. py:class:: paddle.fluid.layers.MultivariateNormalDiag(loc, scale) + + + 多元高斯分布 概率密度函数(pdf)为: diff --git a/doc/fluid/api_cn/layers_cn/Normal_cn.rst b/doc/fluid/api_cn/layers_cn/Normal_cn.rst index d33e568d52bd2b6d2d827cf8bf24216a86474270..ce50e67bd12563ee7c24b6ab4141acf0ccf0c303 100644 --- a/doc/fluid/api_cn/layers_cn/Normal_cn.rst +++ b/doc/fluid/api_cn/layers_cn/Normal_cn.rst @@ -5,6 +5,9 @@ Normal .. py:class:: paddle.fluid.layers.Normal(loc, scale) + + + 正态分布 数学公式: @@ -31,7 +34,7 @@ Normal import numpy as np from paddle.fluid import layers - from paddle.fluid.layers import Normal + from paddle.fluid.layers import Normal # 定义参数为float的正态分布。 dist = Normal(loc=0., scale=3.) diff --git a/doc/fluid/api_cn/layers_cn/Print_cn.rst b/doc/fluid/api_cn/layers_cn/Print_cn.rst index 1851dd7145a80cb06d4d37f5098d891e5fe39718..422bab77d425f81f2097ef9e297a7cf50335b1d6 100644 --- a/doc/fluid/api_cn/layers_cn/Print_cn.rst +++ b/doc/fluid/api_cn/layers_cn/Print_cn.rst @@ -3,10 +3,13 @@ Print ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.Print(input, first_n=-1, message=None, summarize=20, print_tensor_name=True, print_tensor_type=True, print_tensor_shape=True, print_tensor_lod=True, print_phase='both') +:api_attr: 声明式编程模式(静态图) + + + **Print操作命令** 该OP创建一个打印操作,打印正在访问的Tensor内容。 diff --git a/doc/fluid/api_cn/layers_cn/RNNCell_cn.rst b/doc/fluid/api_cn/layers_cn/RNNCell_cn.rst index d00a275f949673afa1392a1d5631fdef45451be6..1368e2ac33f57a483ced44c49ccf65aa83671f7a 100644 --- a/doc/fluid/api_cn/layers_cn/RNNCell_cn.rst +++ b/doc/fluid/api_cn/layers_cn/RNNCell_cn.rst @@ -4,9 +4,12 @@ RNNCell ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:class:: paddle.fluid.layers.RNNCell(name=None) + +:api_attr: 声明式编程模式(静态图) + + RNNCell是抽象的基类,代表将输入和状态映射到输出和新状态的计算,主要用于RNN。 .. py:method:: call(inputs, states, **kwargs) @@ -18,11 +21,11 @@ RNNCell是抽象的基类,代表将输入和状态映射到输出和新状态 - **states** - 状态,单个tensor变量或tensor变量组成的嵌套结构。 - **kwargs** - 附加的关键字参数,由调用者提供。          -返回:输出和新状态。输出和新状态都可以是嵌套的tensor变量。新状态必须具有与状态相同的结构。 +返回:包含输出和新状态的二元组 :code:`(outputs,new_states)` 。输出和新状态都可以是嵌套的tensor变量。新状态必须具有与状态相同的结构。 返回类型:tuple -.. py:method:: get_initial_states(batch_ref, shape=None, dtype=None, init_value=0) +.. py:method:: get_initial_states(batch_ref, shape=None, dtype=None, init_value=0, batch_dim_idx=0) 该接口根据提供的形状,数据类型和初始值来初始化状态。 @@ -31,6 +34,7 @@ RNNCell是抽象的基类,代表将输入和状态映射到输出和新状态 - **shape** - 单个形状或形状组成的嵌套结构,单个形状是整数的列表或元组。 如果形状的第一维不是batch大小,则自动插入-1作为batch大小。 如果该项为None,将使用属性 :code:`state_shape`。默认值为None。 - **dtype** - 单个数据类型或由数据类型组成的嵌套结构。该结构必须与shape的结构相同,例外是当状态中的所有tensor都具有相同的数据类型,这时可以使用单个数据类型。 如果是None并且属性 :code:`cell.state_shape` 不可用,则float32将用作数据类型。 默认值为None。 - **init_value** - 用于初始化状态的浮点值。 + - **batch_dim_idx** - 用于指示 :code:`batch_ref` 中batch所在维度的int值,默认值为0。 返回:和shape具有相同结构的tensor变量,代表初始状态。 @@ -38,9 +42,9 @@ RNNCell是抽象的基类,代表将输入和状态映射到输出和新状态 .. py:method:: state_shape() -该接口用于初始化cell的状态。 单个形状或由形状组成的嵌套结构,单个形状可以是整数的列表或元组(如果形状的第一维不是batch大小,则自动插入-1作为batch大小)。 当没有使用 :code:`get_initial_states` 初始化状态或 :code:`get_initial_states` 没有提供 :code:`shape` 参数的时候,不用实现该方法。 +抽象方法(属性),该接口用于初始化cell的状态。 单个形状或由形状组成的嵌套结构,单个形状可以是整数的列表或元组(如果形状的第一维不是batch大小,则自动插入-1作为batch大小)。 当没有使用 :code:`get_initial_states` 初始化状态或 :code:`get_initial_states` 没有提供 :code:`shape` 参数的时候,不用实现该方法。 .. py:method:: state_dtype() -该接口用于初始化cell的状态。 单个数据类型或由数据类型组成的嵌套结构,该结构必须与 :code:`shape` 的结构相同,例外是当状态中的所有tensor都具有相同的数据类型,这时可以使用单个数据类型。 当没有使用 :code:`get_initial_states` 初始化状态或 :code:`get_initial_states` 没有提供 :code:`dtype` 参数的时候,不用实现该方法。 +抽象方法(属性),该接口用于初始化cell的状态。 单个数据类型或由数据类型组成的嵌套结构,该结构必须与 :code:`shape` 的结构相同,例外是当状态中的所有tensor都具有相同的数据类型,这时可以使用单个数据类型。 当没有使用 :code:`get_initial_states` 初始化状态或 :code:`get_initial_states` 没有提供 :code:`dtype` 参数的时候,不用实现该方法。 diff --git a/doc/fluid/api_cn/layers_cn/SampleEmbeddingHelper_cn.rst b/doc/fluid/api_cn/layers_cn/SampleEmbeddingHelper_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c38b80052fe9040d84d3ed3ba353e6e02cfe5a9c --- /dev/null +++ b/doc/fluid/api_cn/layers_cn/SampleEmbeddingHelper_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_fluid_layers_SampleEmbeddingHelper: + +SampleEmbeddingHelper +------------------------------- + + +.. py:class:: paddle.fluid.layers.SampleEmbeddingHelper(embedding_fn, start_tokens, end_token, softmax_temperature=None, seed=None) + +SampleEmbeddingHelper是 :ref:`cn_api_fluid_layers_GreedyEmbeddingHelper` 的子类。作为解码helper,它通过采样而非使用 :code:`argmax` 并将采样结果送入embedding层,以此作为下一解码步的输入。 + +参数: + - **embedding_fn** (callable) - 作用于 :code:`argmax` 结果的函数,通常是一个将词id转换为词嵌入的embedding层,**注意** ,这里要使用 :ref:`cn_api_fluid_embedding` 而非 :ref:`cn_api_fluid_layers_embedding`,因为选中的id的形状是 :math:`[batch\_size]` ,如果使用后者则还需要在这里提供unsqueeze。 + - **start_tokens** (Variable) - 形状为 :math:`[batch\_size]` 、数据类型为int64、 值为起始标记id的tensor。 + - **end_token** (int) - 结束标记id。 + - **softmax_temperature** (float,可选) - 该值用于在softmax计算前除以logits。温度越高(大于1.0)随机性越大,温度越低则越趋向于argmax。该值必须大于0,默认值None等同于1.0。 + - **seed** (int,可选) - 采样使用的随机种子。默认为None,表示不使用固定的随机种子。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + + start_tokens = fluid.data(name="start_tokens", + shape=[None], + dtype="int64") + + trg_embeder = lambda x: fluid.embedding( + x, size=[10000, 128], param_attr=fluid.ParamAttr(name="trg_embedding")) + output_layer = lambda x: layers.fc(x, + size=10000, + num_flatten_dims=len(x.shape) - 1, + param_attr=fluid.ParamAttr(name= + "output_w"), + bias_attr=False) + helper = layers.SampleEmbeddingHelper(trg_embeder, start_tokens=start_tokens, end_token=1) + decoder_cell = layers.GRUCell(hidden_size=128) + decoder = layers.BasicDecoder(decoder_cell, helper, output_fn=output_layer) + outputs = layers.dynamic_decode( + decoder=decoder, inits=decoder_cell.get_initial_states(start_tokens)) + +.. py:method:: sample(time, outputs, states) + +根据一个多项分布进行采样,此分布由 :code:`softmax(outputs/softmax_temperature)` 计算得到。 + +参数: + - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。 + - **outputs** (Variable) - tensor变量,通常其数据类型为float32或float64,形状为 :math:`[batch\_size, vocabulary\_size]` ,表示当前解码步预测产生的logit(未归一化的概率),和由 :code:`BasicDecoder.output_fn(BasicDecoder.cell.call())` 返回的 :code:`outputs` 是同一内容。 + - **states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构,和由 :code:`BasicDecoder.cell.call()` 返回的 :code:`new_states` 是同一内容。 + +返回:数据类型为int64形状为 :math:`[batch\_size]` 的tensor,表示采样得到的id。 + +返回类型:Variable diff --git a/doc/fluid/api_cn/layers_cn/StaticRNN_cn.rst b/doc/fluid/api_cn/layers_cn/StaticRNN_cn.rst index 013ee3b9962a556acf5682d54ab6541dde961c41..ee524611b8f70b678c80d24a89728b9af20da90f 100644 --- a/doc/fluid/api_cn/layers_cn/StaticRNN_cn.rst +++ b/doc/fluid/api_cn/layers_cn/StaticRNN_cn.rst @@ -3,10 +3,13 @@ StaticRNN ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:class:: paddle.fluid.layers.StaticRNN(name=None) +:api_attr: 声明式编程模式(静态图) + + + 该OP用来处理一批序列数据,其中每个样本序列的长度必须相等。StaticRNN将序列按照时间步长展开,用户需要定义每个时间步中的处理逻辑。 参数: diff --git a/doc/fluid/api_cn/layers_cn/Switch_cn.rst b/doc/fluid/api_cn/layers_cn/Switch_cn.rst index 67e2ea544fefe2b814e91628b226f45779c6a4c0..e4d132861ae84f5d6ba6befc42a7b6811aed4d40 100644 --- a/doc/fluid/api_cn/layers_cn/Switch_cn.rst +++ b/doc/fluid/api_cn/layers_cn/Switch_cn.rst @@ -3,10 +3,13 @@ Switch ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:class:: paddle.fluid.layers.Switch (name=None) +:api_attr: 声明式编程模式(静态图) + + + 该类用于实现Switch分支控制功能。Switch分支包含多个case分支和一个default分支,Switch控制流会依次检查各case分支条件是否满足,并仅执行第一个满足条件的case分支后面的语句。若不存在满足条件的case分支,则仅执行default分支后面的语句。 .. note:: diff --git a/doc/fluid/api_cn/layers_cn/TrainingHelper_cn.rst b/doc/fluid/api_cn/layers_cn/TrainingHelper_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5d140dbf8ac61370b3a0c7a33a50f56c378e4929 --- /dev/null +++ b/doc/fluid/api_cn/layers_cn/TrainingHelper_cn.rst @@ -0,0 +1,70 @@ +.. _cn_api_fluid_layers_TrainingHelper: + +TrainingHelper +------------------------------- + + +.. py:class:: paddle.fluid.layers.TrainingHelper(inputs, sequence_length, time_major=False) + +TrainingHelper是 :ref:`cn_api_fluid_layers_DecodeHelper` 的子类。作为解码helper,它在每个解码时间步通过在完整序列输入 :code:`inputs` 的相应位置切片作为各步的输入,并且使用 :code:`argmax` 根据 :code:`cell.call()` 的输出进行采样。 +由于要求有完整的序列输入 :code:`inputs` ,TrainingHelper主要用于以teach-forcing的方式进行最大似然训练,采样得到的内容通常不会使用。 + +参数: + - **inputs** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。当 :code:`time_major == False` 时,tensor的形状应为 :math:`[batch\_size, sequence\_length, ...]`;当 :code:`time_major == True` 时,tensor的形状应为 :math:`[sequence\_length, batch\_size, ...]`。在解码的每一步都要从中切片取出相应的数据。 + - **sequence_length** (Variable) - 形状为 :math:`[batch\_size]` 的tensor。它存储了 :code:`inputs` 中每个样本的实际长度,可以据此来标识每个解码步中每个样本是否结束。 + - **time_major** (bool,可选) - 指示输入tensor和输出tensor中包含的tensor的数据组织。如果为False,则数据组织为batch为主,形状为 :math:`[batch\_size,sequence\_length,...]`。如果为True,则数据组织为time为主,形状为 :math:`[sequence\_length,batch\_size,...]`。默认值:False。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + trg_emb = fluid.data(name="trg_emb", + shape=[None, None, 128], + dtype="float32") + trg_seq_length = fluid.data(name="trg_seq_length", + shape=[None], + dtype="int64") + helper = layers.TrainingHelper(trg_emb, trg_seq_length) + decoder_cell = layers.GRUCell(hidden_size=128) + decoder = layers.BasicDecoder(decoder_cell, helper) + outputs = layers.dynamic_decode( + decoder, + inits=decoder_cell.get_initial_states(trg_emb), + is_test=False) + +.. py:method:: initialize() + +TrainingHelper初始化,其通过在完整序列输入 :code:`inputs` 中首个时间步的位置上切片,以此作为第一个解码步的输入,并给出每个序列是否结束的初始标识。这是 :ref:`cn_api_fluid_layers_BasicDecoder` 初始化的一部分。 + +返回::code:`(initial_inputs, initial_finished)` 的二元组, :code:`initial_inputs` 是单个tensor变量或tensor变量组成的嵌套结构,tensor的形状是 :math:`[batch\_size, ...]` 。 :code:`initial_finished` 是一个bool类型且形状为 :math:`[batch\_size]` 的tensor。 + +返回类型:tuple + +.. py:method:: sample(time, outputs, states) + +使用 :code:`argmax` 根据 `outputs` 进行采样。由于使用完整序列中的切片作为下一解码步的输入,采样得到的内容通常不会使用。 + +参数: + - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。 + - **outputs** (Variable) - tensor变量,通常其数据类型为float32或float64,形状为 :math:`[batch\_size, vocabulary\_size]` ,表示当前解码步预测产生的logit(未归一化的概率),和由 :code:`BasicDecoder.output_fn(BasicDecoder.cell.call())` 返回的 :code:`outputs` 是同一内容。 + - **states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构,和由 :code:`BasicDecoder.cell.call()` 返回的 :code:`new_states` 是同一内容。 + +返回:数据类型为int64形状为 :math:`[batch\_size]` 的tensor,表示采样得到的id。 + +返回类型:Variable + +.. py:method:: next_inputs(time, outputs, states, sample_ids) + +从完整序列输入中当前时间步的位置上切片,以此作为产生下一解码步的输入;同时直接使用输入参数中的 :code:`states` 作为下一解码步的状态;并比较当前时间与每个序列的大小,依此产生每个序列是否结束的标识。 + +参数: + - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。 + - **outputs** (Variable) - tensor变量,通常其数据类型为float32或float64,形状为 :math:`[batch\_size, vocabulary\_size]` ,表示当前解码步预测产生的logit(未归一化的概率),和由 :code:`BasicDecoder.output_fn(BasicDecoder.cell.call())` 返回的 :code:`outputs` 是同一内容。 + - **states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构,和由 :code:`BasicDecoder.cell.call()` 返回的 :code:`new_states` 是同一内容。 + - **sample_ids** (Variable) - 数据类型为int64形状为 :math:`[batch\_size]` 的tensor,和由 :code:`sample()` 返回的 :code:`sample_ids` 是同一内容。 + +返回: :code:`(finished, next_inputs, next_states)` 的三元组。 :code:`next_inputs, next_states` 均是单个tensor变量或tensor变量组成的嵌套结构,tensor的形状是 :math:`[batch\_size, ...]` , :code:`next_states` 和输入参数中的 :code:`states` 相同; :code:`finished` 是一个bool类型且形状为 :math:`[batch\_size]` 的tensor。 + +返回类型:tuple diff --git a/doc/fluid/api_cn/layers_cn/Uniform_cn.rst b/doc/fluid/api_cn/layers_cn/Uniform_cn.rst index 601b1a7b1933492b22059cf96007930dad098933..59e1544b3751afacf4002cfa859a6827df1de187 100644 --- a/doc/fluid/api_cn/layers_cn/Uniform_cn.rst +++ b/doc/fluid/api_cn/layers_cn/Uniform_cn.rst @@ -5,6 +5,9 @@ Uniform .. py:class:: paddle.fluid.layers.Uniform(low, high) + + + 均匀分布 概率密度函数(pdf)为: diff --git a/doc/fluid/api_cn/layers_cn/While_cn.rst b/doc/fluid/api_cn/layers_cn/While_cn.rst index 74d4da040a40c77ce90d519ca2157c31f23de7b9..0e851830bd2dfeca29306e43cae88acb9aa2b798 100644 --- a/doc/fluid/api_cn/layers_cn/While_cn.rst +++ b/doc/fluid/api_cn/layers_cn/While_cn.rst @@ -3,10 +3,13 @@ While ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:class:: paddle.fluid.layers.While (cond, is_test=False, name=None) +:api_attr: 声明式编程模式(静态图) + + + 该类用于实现while循环控制功能,只要循环条件cond为True,就循环执行while循环体中的语句,直到cond为False为止。 @@ -14,14 +17,18 @@ While 如果参数 ``cond`` 的形状为[1],强烈建议您使用新的OP :ref:`cn_api_fluid_layers_while_loop` 而不是 ``While``。 OP :ref:`cn_api_fluid_layers_while_loop` 的使用方式更简单,并且调用该OP所用的代码更少且功能与 ``While`` 一样。 +**注意:** + 在 ``While`` 中创建的局部变量类似于C++中的while,无法被外部引用,因此无法通过 ``Executor`` 中的 ``fetch_list`` 来获取。 + 若想实现该功能,PaddlePaddle提供了 ``assign`` 接口将局部变量赋值到外部,请参考示例代码2 或参考 `issue#22724 `_ 。 + 参数: - **cond** (Variable) – 用于判断循环继续进行的条件,为数据类型bool型的Tensor,其shape必须为[1]。 - **is_test** (bool,可选) – 用于表明是否在测试阶段执行,默认值为False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 -**代码示例** +**代码示例 1** -.. code-block:: python +.. code-block:: python # 该示例代码展示整数循环+1,循环10次,输出计数结果 import paddle.fluid as fluid @@ -44,7 +51,33 @@ While print(res) # [array([10])] +**代码示例 2** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) + loop_len = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) + one = fluid.layers.fill_constant(shape=[1], dtype='float32', value=1) + data = fluid.data(name='data', shape=[1], dtype='float32') + sums = fluid.layers.fill_constant(shape=[1], dtype='float32', value=0) # 在 While 外先定义要获取的变量,需和要获取的 While 内部的变量名称不同 + cond = fluid.layers.less_than(x=i, y=loop_len) + while_op = fluid.layers.While(cond=cond) + with while_op.block(): + sums_tensor = fluid.layers.elementwise_add(x=data, y=data) + fluid.layers.assign(input=sums_tensor, output=sums) # 将 While 内定义的变量 sums_tenosr 通过 layers.assign 更新至 While 外的变量 sums 中 + i = fluid.layers.increment(x=i, value=1, in_place=True) + data = fluid.layers.elementwise_add(x=data, y=one) + fluid.layers.less_than(x=i, y=loop_len, cond=cond) + + feed_data = np.ones([1]).astype('float32') + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + res = exe.run(fluid.default_main_program(), feed={'data': feed_data}, fetch_list=sums) + print(res[0]) # [2.] # 因 While 内的 data 没有将值更新到 While 外,故循环过后此处 sums 的值为 [2.] diff --git a/doc/fluid/api_cn/layers_cn/abs_cn.rst b/doc/fluid/api_cn/layers_cn/abs_cn.rst index 754f65f3870fa11cfbc242339afa5d21eb0b843c..755477bb7a34a13226d90a7a2c421af3eb792bcf 100644 --- a/doc/fluid/api_cn/layers_cn/abs_cn.rst +++ b/doc/fluid/api_cn/layers_cn/abs_cn.rst @@ -5,23 +5,33 @@ abs .. py:function:: paddle.fluid.layers.abs(x, name=None) -绝对值激活函数。 +:alias_main: paddle.abs +:alias: paddle.abs,paddle.tensor.abs,paddle.tensor.math.abs +:old_api: paddle.fluid.layers.abs + + + +绝对值函数。 .. math:: out = |x| 参数: - - **x** (Variable)- 多维Tensor,数据类型为float32或float64。 - - **name** (str) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。 + - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 -返回:表示绝对值结果的Tensor,数据类型与x相同。 +返回:输出Tensor,与 ``x`` 维度相同、数据类型相同。 -返回类型:Variable +返回类型:Tensor **代码示例**: .. code-block:: python - import paddle.fluid as fluid - data = fluid.layers.data(name="input", shape=[32, 784]) - result = fluid.layers.abs(data) + import paddle + paddle.disable_static() + + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) + out = paddle.abs(x) + print(out.numpy()) + # [0.4 0.2 0.1 0.3] diff --git a/doc/fluid/api_cn/layers_cn/accuracy_cn.rst b/doc/fluid/api_cn/layers_cn/accuracy_cn.rst index b83239c01adf66fe6694121bda5a947540de1a86..15c3d9efcb115065c8b41034ecc0d4703ffb1730 100755 --- a/doc/fluid/api_cn/layers_cn/accuracy_cn.rst +++ b/doc/fluid/api_cn/layers_cn/accuracy_cn.rst @@ -5,6 +5,9 @@ accuracy .. py:function:: paddle.fluid.layers.accuracy(input, label, k=1, correct=None, total=None) + + + accuracy layer。 参考 https://en.wikipedia.org/wiki/Precision_and_recall 使用输入和标签计算准确率。 如果正确的标签在topk个预测值里,则计算结果加1。注意:输出正确率的类型由input类型决定,input和lable的类型可以不一样。 diff --git a/doc/fluid/api_cn/layers_cn/acos_cn.rst b/doc/fluid/api_cn/layers_cn/acos_cn.rst index 408b2ed70566f02ed4edcdc486b1a01671407154..288c3121081ac22005f6bd0926cd6ed42d4675a0 100644 --- a/doc/fluid/api_cn/layers_cn/acos_cn.rst +++ b/doc/fluid/api_cn/layers_cn/acos_cn.rst @@ -5,29 +5,34 @@ acos .. py:function:: paddle.fluid.layers.acos(x, name=None) -arccosine激活函数。 +:alias_main: paddle.acos +:alias: paddle.acos,paddle.tensor.acos,paddle.tensor.math.acos +:old_api: paddle.fluid.layers.acos + + + +arccosine函数。 .. math:: out = cos^{-1}(x) 参数: - - **x(Variable)** - acos的输入Tensor,数据类型为 float32 或 float64 - - **name** (str|None) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 -返回: `acos` 的输出Tensor,数据类型与 `x` 相同。 + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。 + - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 -返回类型: Variable +返回:输出Tensor,与 ``x`` 维度相同、数据类型相同。 +返回类型: Tensor **代码示例**: .. code-block:: python - import paddle.fluid as fluid - data = fluid.layers.data(name="input", shape=[4]) - # if data is [-0.8183, 0.4912, -0.6444, 0.0371] - result = fluid.layers.acos(data) - # result is [2.5293, 1.0573, 2.2711, 1.5336] - - + import paddle + paddle.disable_static() + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) + out = paddle.acos(x) + print(out.numpy()) + # [1.98231317 1.77215425 1.47062891 1.26610367] diff --git a/doc/fluid/api_cn/layers_cn/adaptive_pool2d_cn.rst b/doc/fluid/api_cn/layers_cn/adaptive_pool2d_cn.rst index 8f4c38c6954fe90d614c1b72a3f04fef61a51f93..c2a5026955e2e2c1e5484ec1799ced2851921a88 100644 --- a/doc/fluid/api_cn/layers_cn/adaptive_pool2d_cn.rst +++ b/doc/fluid/api_cn/layers_cn/adaptive_pool2d_cn.rst @@ -5,6 +5,12 @@ adaptive_pool2d .. py:function:: paddle.fluid.layers.adaptive_pool2d(input, pool_size, pool_type='max', require_index=False, name=None) +:alias_main: paddle.nn.functional.adaptive_pool2d +:alias: paddle.nn.functional.adaptive_pool2d,paddle.nn.functional.pooling.adaptive_pool2d +:old_api: paddle.fluid.layers.adaptive_pool2d + + + 该OP使用上述输入参数的池化配置,为二维空间自适应池化操作,根据 ``input`` , 池化类型 ``pool_type`` , 池化核大小 ``pool_size`` 这些参数得到输出。 输入X和输出Out是NCHW格式,N为批大小,C是通道数,H是特征高度,W是特征宽度。参数 ``pool_size`` 含有两个整型元素, 分别代表高度和宽度上的参数。输出Out的H和W维由 ``pool_size`` 决定,即输出shape为 :math:`\left ( N,C,pool_size[0],pool_size[1] \right )` diff --git a/doc/fluid/api_cn/layers_cn/adaptive_pool3d_cn.rst b/doc/fluid/api_cn/layers_cn/adaptive_pool3d_cn.rst index 2b166e0449022c815454d28f4395733090dd9d0e..ec7c4d13210b745f6dba3bd307ab1587558c2535 100644 --- a/doc/fluid/api_cn/layers_cn/adaptive_pool3d_cn.rst +++ b/doc/fluid/api_cn/layers_cn/adaptive_pool3d_cn.rst @@ -5,6 +5,12 @@ adaptive_pool3d .. py:function:: paddle.fluid.layers.adaptive_pool3d(input, pool_size, pool_type='max', require_index=False, name=None) +:alias_main: paddle.nn.functional.adaptive_pool3d +:alias: paddle.nn.functional.adaptive_pool3d,paddle.nn.functional.pooling.adaptive_pool3d +:old_api: paddle.fluid.layers.adaptive_pool3d + + + 该OP使用上述输入参数的池化配置,为二维空间自适应池化操作,根据 ``input`` , 池化类型 ``pool_type`` , 池化核大小 ``pool_size`` 这些参数得到输出。 输入X和输出Out是NCDHW格式,N为批大小,D是特征深度,C是通道数,H是特征高度,W是特征宽度。参数 ``pool_size`` 含有两个整型元素, 分别代表深度,高度和宽度上的参数。输出Out的D, H和W维由 ``pool_size`` 决定,即输出shape为 :math:`\left ( N,C,pool_size[0],pool_size[1],pool_size[2] \right )` diff --git a/doc/fluid/api_cn/layers_cn/add_position_encoding_cn.rst b/doc/fluid/api_cn/layers_cn/add_position_encoding_cn.rst index 855acbfb1695606c7cd75156d9806316b69e918f..abba3377ba359ac496848262f1fe29705ea504b0 100644 --- a/doc/fluid/api_cn/layers_cn/add_position_encoding_cn.rst +++ b/doc/fluid/api_cn/layers_cn/add_position_encoding_cn.rst @@ -5,6 +5,12 @@ add_position_encoding .. py:function:: paddle.fluid.layers.add_position_encoding(input, alpha, beta, name=None) +:alias_main: paddle.nn.functional.add_position_encoding +:alias: paddle.nn.functional.add_position_encoding,paddle.nn.functional.extension.add_position_encoding +:old_api: paddle.fluid.layers.add_position_encoding + + + 该OP将输入inpu中每个位置(序列中的位置)的特征与对应的位置编码加权求和,位置编码可参考论文: `Attention Is All You Need `_ 输出的计算公式如下: @@ -34,14 +40,13 @@ add_position_encoding .. code-block:: python - import paddle.fluid as fluid - - tensor = fluid.layers.data( + import paddle.fluid as fluid + + tensor = fluid.data( name='tensor', - shape=[32, 64, 512], - dtype='float32', - append_batch_size=False) - position_tensor = fluid.layers.add_position_encoding( + shape=[None, 64, 512], + dtype='float32') + position_tensor = fluid.layers.add_position_encoding( input=tensor, alpha=1.0, beta=1.0) @@ -53,4 +58,3 @@ add_position_encoding - diff --git a/doc/fluid/api_cn/layers_cn/affine_channel_cn.rst b/doc/fluid/api_cn/layers_cn/affine_channel_cn.rst index 1d2fafaf639c834cede7b3136111dd50c9420d0e..c810d489a98304681ab230606d87b13bfed49ca4 100644 --- a/doc/fluid/api_cn/layers_cn/affine_channel_cn.rst +++ b/doc/fluid/api_cn/layers_cn/affine_channel_cn.rst @@ -5,6 +5,12 @@ affine_channel .. py:function:: paddle.fluid.layers.affine_channel(x, scale=None, bias=None, data_layout='NCHW', name=None,act=None) +:alias_main: paddle.nn.functional.affine_channel +:alias: paddle.nn.functional.affine_channel,paddle.nn.functional.vision.affine_channel +:old_api: paddle.fluid.layers.affine_channel + + + 对输入的每个 channel 应用单独的仿射变换。用于将空间批量归一化替换为其等价的固定变换。 输入也可以是二维张量,并在第二维应用仿射变换。 diff --git a/doc/fluid/api_cn/layers_cn/affine_grid_cn.rst b/doc/fluid/api_cn/layers_cn/affine_grid_cn.rst index 3f143f3942e3fb2f85142467479747a93ee3a13d..287116d9f5f6845eb0f874a78c329ca49676b14d 100644 --- a/doc/fluid/api_cn/layers_cn/affine_grid_cn.rst +++ b/doc/fluid/api_cn/layers_cn/affine_grid_cn.rst @@ -5,6 +5,12 @@ affine_grid .. py:function:: paddle.fluid.layers.affine_grid(theta, out_shape, name=None) +:alias_main: paddle.nn.functional.affine_grid +:alias: paddle.nn.functional.affine_grid,paddle.nn.functional.vision.affine_grid +:old_api: paddle.fluid.layers.affine_grid + + + 该OP用于生成仿射变换前后的feature maps的坐标映射关系。在视觉应用中,根据该OP得到的映射关系,将输入feature map的像素点变换到对应的坐标,就得到了经过仿射变换的feature map。 参数: diff --git a/doc/fluid/api_cn/layers_cn/anchor_generator_cn.rst b/doc/fluid/api_cn/layers_cn/anchor_generator_cn.rst index 0b7c5269aabe417880b14857002bc81e97086784..ada0130cce84b1f94d57f60859b04f3b83d1d6ca 100644 --- a/doc/fluid/api_cn/layers_cn/anchor_generator_cn.rst +++ b/doc/fluid/api_cn/layers_cn/anchor_generator_cn.rst @@ -5,6 +5,12 @@ anchor_generator .. py:function:: paddle.fluid.layers.anchor_generator(input, anchor_sizes=None, aspect_ratios=None, variance=[0.1, 0.1, 0.2, 0.2], stride=None, offset=0.5, name=None) +:alias_main: paddle.nn.functional.anchor_generator +:alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator +:old_api: paddle.fluid.layers.anchor_generator + + + **Anchor generator operator** 为RCNN算法生成anchor,输入的每一位产生N个anchor,N=size(anchor_sizes)*size(aspect_ratios)。生成anchor的顺序首先是aspect_ratios循环,然后是anchor_sizes循环。 diff --git a/doc/fluid/api_cn/layers_cn/argmax_cn.rst b/doc/fluid/api_cn/layers_cn/argmax_cn.rst index 47b6a49dec02eed0685b48064ce97c32af994cae..d165ce8d6997f060e53c2754239be83b6298ef2c 100644 --- a/doc/fluid/api_cn/layers_cn/argmax_cn.rst +++ b/doc/fluid/api_cn/layers_cn/argmax_cn.rst @@ -5,6 +5,9 @@ argmax .. py:function:: paddle.fluid.layers.argmax(x, axis=0) + + + **argmax** 该OP沿 ``axis`` 计算输入 ``x`` 的最大元素的索引。 diff --git a/doc/fluid/api_cn/layers_cn/argmin_cn.rst b/doc/fluid/api_cn/layers_cn/argmin_cn.rst index 885515bd30dc3c364718fd6b2ed1ab1cc6abf261..74ba5fbc52a2cc285cd9d1a370246e028ec0b14c 100644 --- a/doc/fluid/api_cn/layers_cn/argmin_cn.rst +++ b/doc/fluid/api_cn/layers_cn/argmin_cn.rst @@ -5,6 +5,12 @@ argmin .. py:function:: paddle.fluid.layers.argmin(x, axis=0) +:alias_main: paddle.argmin +:alias: paddle.argmin,paddle.tensor.argmin,paddle.tensor.search.argmin +:old_api: paddle.fluid.layers.argmin + + + **argmin** 该OP沿 ``axis`` 计算输入 ``x`` 的最小元素的索引。 diff --git a/doc/fluid/api_cn/layers_cn/argsort_cn.rst b/doc/fluid/api_cn/layers_cn/argsort_cn.rst index cc35109750567a42d5069f3d6d120ce8410b3066..a681b3beefadc7c601a42a23a958792b5bac9939 100644 --- a/doc/fluid/api_cn/layers_cn/argsort_cn.rst +++ b/doc/fluid/api_cn/layers_cn/argsort_cn.rst @@ -5,11 +5,17 @@ argsort .. py:function:: paddle.fluid.layers.argsort(input,axis=-1,descending=False,name=None) +:alias_main: paddle.argsort +:alias: paddle.argsort,paddle.tensor.argsort,paddle.tensor.search.argsort +:old_api: paddle.fluid.layers.argsort + + + 对输入变量沿给定轴进行排序,输出排序好的数据和相应的索引,其维度和输入相同。**默认升序排列,如果需要降序排列设置** ``descending=True`` 。 参数: - - **input** (Variable) - 输入的多维 ``Tensor`` ,支持的数据类型:float32、float64。 + - **input** (Variable) - 输入的多维 ``Tensor`` ,支持的数据类型:float32、float64、int16、int32、int64、uint8。 - **axis** (int,可选) - 指定对输入Tensor进行运算的轴, ``axis`` 的有效范围是[-R, R),R是输入 ``x`` 的Rank, ``axis`` 为负时与 ``axis`` +R 等价。默认值为0。 - **descending** (bool,可选) - 指定算法排序的方向。如果设置为True,算法按照降序排序。如果设置为False或者不设置,按照升序排序。默认值为False。 - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 diff --git a/doc/fluid/api_cn/layers_cn/array_length_cn.rst b/doc/fluid/api_cn/layers_cn/array_length_cn.rst index b3e94b1b000a6fa541fd7a8b0c901d2d351b8e1c..27fcaf003b8dab728cded431ae24e525d9e4cb47 100644 --- a/doc/fluid/api_cn/layers_cn/array_length_cn.rst +++ b/doc/fluid/api_cn/layers_cn/array_length_cn.rst @@ -5,6 +5,9 @@ array_length .. py:function:: paddle.fluid.layers.array_length(array) + + + 该OP用于获取输入数组 :ref:`cn_api_fluid_LoDTensorArray` 的长度。可以与 :ref:`cn_api_fluid_layers_array_read` 、 :ref:`cn_api_fluid_layers_array_write` 、 :ref:`cn_api_fluid_layers_While` OP结合使用,实现LoDTensorArray的遍历与读写。 参数: diff --git a/doc/fluid/api_cn/layers_cn/array_read_cn.rst b/doc/fluid/api_cn/layers_cn/array_read_cn.rst index 1f5d5432a6b9584e189dade7d5bef758f19fd6d5..6e8cd4fb70ec2f31bbb93822883c0cfc491cb691 100644 --- a/doc/fluid/api_cn/layers_cn/array_read_cn.rst +++ b/doc/fluid/api_cn/layers_cn/array_read_cn.rst @@ -5,6 +5,9 @@ array_read .. py:function:: paddle.fluid.layers.array_read(array,i) + + + 该OP用于读取输入数组 :ref:`cn_api_fluid_LoDTensorArray` 中指定位置的数据, ``array`` 为输入的数组, ``i`` 为指定的读取位置。常与 :ref:`cn_api_fluid_layers_array_write` OP配合使用进行LoDTensorArray的读写。 例1: diff --git a/doc/fluid/api_cn/layers_cn/array_write_cn.rst b/doc/fluid/api_cn/layers_cn/array_write_cn.rst index 532c950938e3d4fb17bf2bf20cfe7f0551a697ac..4f59d605093261366a4fea7d2e48dd9b4011158c 100644 --- a/doc/fluid/api_cn/layers_cn/array_write_cn.rst +++ b/doc/fluid/api_cn/layers_cn/array_write_cn.rst @@ -5,6 +5,9 @@ array_write .. py:function:: paddle.fluid.layers.array_write(x, i, array=None) + + + 该OP将输入的变量 ``x`` 写入到数组 :ref:`cn_api_fluid_LoDTensorArray` ``array`` 的第i个位置,并返回修改后的LoDTensorArray,如果 ``array`` 为None,则创建一个新的LoDTensorArray。常与 :ref:`cn_api_fluid_layers_array_read` OP联合使用对LoDTensorArray进行读写。 参数: diff --git a/doc/fluid/api_cn/layers_cn/asin_cn.rst b/doc/fluid/api_cn/layers_cn/asin_cn.rst index a7fff929d09a7dd5ed1d81615320248e2e620a90..7960b807a60d25f2f4bfb7e3b46695f99e706eac 100644 --- a/doc/fluid/api_cn/layers_cn/asin_cn.rst +++ b/doc/fluid/api_cn/layers_cn/asin_cn.rst @@ -5,29 +5,33 @@ asin .. py:function:: paddle.fluid.layers.asin(x, name=None) -arcsine激活函数。 +:alias_main: paddle.asin +:alias: paddle.asin,paddle.tensor.asin,paddle.tensor.math.asin +:old_api: paddle.fluid.layers.asin + + + +arcsine函数。 .. math:: out = sin^{-1}(x) - 参数: - - **x(Variable)** - asin的输入Tensor,数据类型为 float32 或 float64 - - **name** (str|None) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64、float16。 + - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 -返回: `asin` 的输出Tensor,数据类型与 `x` 相同。 +返回:输出Tensor,与 ``x`` 维度相同、数据类型相同。 -返回类型: Variable +返回类型: Tensor **代码示例**: .. code-block:: python - import paddle.fluid as fluid - data = fluid.layers.data(name="input", shape=[4]) - # if data is [-0.8183, 0.4912, -0.6444, 0.0371] - result = fluid.layers.asin(data) - # result is [-0.9585, 0.5135, -0.7003, 0.0372] - - + import paddle + paddle.disable_static() + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) + out = paddle.asin(x) + print(out.numpy()) + # [-0.41151685 -0.20135792 0.10016742 0.30469265] diff --git a/doc/fluid/api_cn/layers_cn/assign_cn.rst b/doc/fluid/api_cn/layers_cn/assign_cn.rst index d01f0a2aaa328cabd84346e828a77cabe54f3a81..da7b3c8f146aa744735c464902f0d633364e8288 100644 --- a/doc/fluid/api_cn/layers_cn/assign_cn.rst +++ b/doc/fluid/api_cn/layers_cn/assign_cn.rst @@ -5,6 +5,12 @@ assign .. py:function:: paddle.fluid.layers.assign(input,output=None) +:alias_main: paddle.nn.functional.assign +:alias: paddle.nn.functional.assign,paddle.nn.functional.common.assign +:old_api: paddle.fluid.layers.assign + + + 该OP将输入Tensor或numpy数组拷贝至输出Tensor。 参数: diff --git a/doc/fluid/api_cn/layers_cn/atan_cn.rst b/doc/fluid/api_cn/layers_cn/atan_cn.rst index 618151ec5f61f556c41f19417de2206d33b223bb..2b5b11b6f9ffa00fc6bb09520713b22439fea4cf 100644 --- a/doc/fluid/api_cn/layers_cn/atan_cn.rst +++ b/doc/fluid/api_cn/layers_cn/atan_cn.rst @@ -5,30 +5,33 @@ atan .. py:function:: paddle.fluid.layers.atan(x, name=None) -arctanh激活函数。 +:alias_main: paddle.atan +:alias: paddle.atan,paddle.tensor.atan,paddle.tensor.math.atan +:update_api: paddle.fluid.layers.atan + + + +arctangent函数。 .. math:: - out = tanh^{-1}(x) + out = tan^{-1}(x) 参数: - - **x(Variable)** - atan的输入Tensor,数据类型为 float32 或 float64 - - **name** (str|None) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64、float16。 + - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 -返回: `atan` 的输出Tensor,数据类型与 `x` 相同。 +返回:输出Tensor,与 ``x`` 维度相同、数据类型相同。 -返回类型: Variable +返回类型: Tensor **代码示例**: .. code-block:: python - import paddle.fluid as fluid - data = fluid.layers.data(name="input", shape=[4]) - # if data is [-0.8183, 0.4912, -0.6444, 0.0371] - result = fluid.layers.atan(data) - # result is [-0.6858, 0.4566, -0.5724, 0.0371] - - - - + import paddle + paddle.disable_static() + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) + out = paddle.atan(x) + print(out.numpy()) + # [-0.38050638 -0.19739556 0.09966865 0.29145679] diff --git a/doc/fluid/api_cn/layers_cn/auc_cn.rst b/doc/fluid/api_cn/layers_cn/auc_cn.rst index f276b1290771a21be33a7ec1f22eb2ec521b4e4a..e915875f5d306abdf7ebf51cc65a2cbdf66ca5de 100755 --- a/doc/fluid/api_cn/layers_cn/auc_cn.rst +++ b/doc/fluid/api_cn/layers_cn/auc_cn.rst @@ -5,6 +5,9 @@ auc .. py:function:: paddle.fluid.layers.auc(input, label, curve='ROC', num_thresholds=200, topk=1, slide_steps=1) + + + **Area Under the Curve(AUC) Layer** 该层根据前向输出和标签计算AUC,在二分类(binary classification)估计中广泛使用。 diff --git a/doc/fluid/api_cn/layers_cn/autoincreased_step_counter_cn.rst b/doc/fluid/api_cn/layers_cn/autoincreased_step_counter_cn.rst index e3e4768a01166a45f58d41eb6d53e1a3ad689e34..821b793f511e943ec53e253c1df108ac60286fea 100644 --- a/doc/fluid/api_cn/layers_cn/autoincreased_step_counter_cn.rst +++ b/doc/fluid/api_cn/layers_cn/autoincreased_step_counter_cn.rst @@ -3,10 +3,13 @@ autoincreased_step_counter ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.autoincreased_step_counter(counter_name=None, begin=1, step=1) +:api_attr: 声明式编程模式(静态图) + + + 创建一个自增变量,每个迭代累加一次,默认首次返回值为 1,默认累加步长为 1。 参数: diff --git a/doc/fluid/api_cn/layers_cn/batch_norm_cn.rst b/doc/fluid/api_cn/layers_cn/batch_norm_cn.rst index 3d4699eb690296220eba86d07d5f5c0ca46e8087..95fa58257d7180f8a16176aa7b754f0b20124507 100644 --- a/doc/fluid/api_cn/layers_cn/batch_norm_cn.rst +++ b/doc/fluid/api_cn/layers_cn/batch_norm_cn.rst @@ -3,10 +3,13 @@ batch_norm ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.batch_norm(input, act=None, is_test=False, momentum=0.9, epsilon=1e-05, param_attr=None, bias_attr=None, data_layout='NCHW', in_place=False, name=None, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=False, use_global_stats=False) +:api_attr: 声明式编程模式(静态图) + + + 批正则化层(Batch Normalization Layer) 可用作卷积和全连接操作的批正则化函数,根据当前批次数据按通道计算的均值和方差进行正则化。该层需要的数据格式如下: diff --git a/doc/fluid/api_cn/layers_cn/beam_search_cn.rst b/doc/fluid/api_cn/layers_cn/beam_search_cn.rst index c30d45de27424ae751056cc2169b25280ad7e41f..36174f2a63b1dba8a02b7f34af9d2034e3363d6f 100644 --- a/doc/fluid/api_cn/layers_cn/beam_search_cn.rst +++ b/doc/fluid/api_cn/layers_cn/beam_search_cn.rst @@ -5,6 +5,12 @@ beam_search .. py:function:: paddle.fluid.layers.beam_search(pre_ids, pre_scores, ids, scores, beam_size, end_id, level=0, is_accumulated=True, name=None, return_parent_idx=False) +:alias_main: paddle.nn.beam_search +:alias: paddle.nn.beam_search,paddle.nn.decode.beam_search +:old_api: paddle.fluid.layers.beam_search + + + 束搜索(Beam search)是在机器翻译等生成任务中选择候选词的一种经典算法 更多细节参考 `Beam Search `_ diff --git a/doc/fluid/api_cn/layers_cn/beam_search_decode_cn.rst b/doc/fluid/api_cn/layers_cn/beam_search_decode_cn.rst index 5f5f03be1f1a2792195e421d1cf3cf198e529399..a6b46142e1804180cae16c0ab99625fdafa3299c 100644 --- a/doc/fluid/api_cn/layers_cn/beam_search_decode_cn.rst +++ b/doc/fluid/api_cn/layers_cn/beam_search_decode_cn.rst @@ -5,6 +5,12 @@ beam_search_decode .. py:function:: paddle.fluid.layers.beam_search_decode(ids, scores, beam_size, end_id, name=None) +:alias_main: paddle.nn.beam_search_decode +:alias: paddle.nn.beam_search_decode,paddle.nn.decode.beam_search_decode +:old_api: paddle.fluid.layers.beam_search_decode + + + 该OP用在整个束搜索(Beam search)结束后,通过沿 ``ids`` 中保存的搜索路径回溯,为每个源句(样本)构造完整的beam search结果序列并保存在LoDTensor中。LoDTensor的格式和解析方式如下: :: diff --git a/doc/fluid/api_cn/layers_cn/bilinear_tensor_product_cn.rst b/doc/fluid/api_cn/layers_cn/bilinear_tensor_product_cn.rst index 2bf7ce633952f7647e6b8960d5f24e2c44a30e41..d65e2abdc70a6a109b69ae474ed64f49167ff37e 100644 --- a/doc/fluid/api_cn/layers_cn/bilinear_tensor_product_cn.rst +++ b/doc/fluid/api_cn/layers_cn/bilinear_tensor_product_cn.rst @@ -3,10 +3,13 @@ bilinear_tensor_product ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.bilinear_tensor_product(x, y, size, act=None, name=None, param_attr=None, bias_attr=None) +:api_attr: 声明式编程模式(静态图) + + + 该层对两个输入执行双线性张量积。 例如: diff --git a/doc/fluid/api_cn/layers_cn/bipartite_match_cn.rst b/doc/fluid/api_cn/layers_cn/bipartite_match_cn.rst index a0a68904a79ee252d3db2d00a1a190fb9c7f52c7..23f2832d97d605b6a80af69ec8ceafbdc3ad9d40 100644 --- a/doc/fluid/api_cn/layers_cn/bipartite_match_cn.rst +++ b/doc/fluid/api_cn/layers_cn/bipartite_match_cn.rst @@ -5,6 +5,12 @@ bipartite_match .. py:function:: paddle.fluid.layers.bipartite_match(dist_matrix, match_type=None, dist_threshold=None, name=None) +:alias_main: paddle.nn.functional.bipartite_match +:alias: paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match +:old_api: paddle.fluid.layers.bipartite_match + + + 该OP实现了贪心二分匹配算法,该算法用于根据输入距离矩阵获得与最大距离的匹配。对于输入二维矩阵,二分匹配算法可以找到每一行的匹配列(匹配意味着最大距离),也可以找到每列的匹配行。此算子仅计算列到行的匹配索引。对于每个实例,匹配索引的数量是 输入距离矩阵的列号。**该OP仅支持CPU** diff --git a/doc/fluid/api_cn/layers_cn/box_clip_cn.rst b/doc/fluid/api_cn/layers_cn/box_clip_cn.rst index 37d86c883774fe439202036c01ee682a4bb774b7..2b5b4ad767b52a49333a9aada0cfce187e71aa1a 100644 --- a/doc/fluid/api_cn/layers_cn/box_clip_cn.rst +++ b/doc/fluid/api_cn/layers_cn/box_clip_cn.rst @@ -5,6 +5,12 @@ box_clip .. py:function:: paddle.fluid.layers.box_clip(input, im_info, name=None) +:alias_main: paddle.nn.functional.box_clip +:alias: paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip +:old_api: paddle.fluid.layers.box_clip + + + 将检测框框剪切为 ``im_info`` 给出的大小。对于每个输入框,公式如下: :: diff --git a/doc/fluid/api_cn/layers_cn/box_coder_cn.rst b/doc/fluid/api_cn/layers_cn/box_coder_cn.rst index a2e1dc4ddc2f9e1f0e36f890622cf3ba46f53d8c..eb991c1e53e93aa4bfa4b0aa512cf5d78465ee84 100644 --- a/doc/fluid/api_cn/layers_cn/box_coder_cn.rst +++ b/doc/fluid/api_cn/layers_cn/box_coder_cn.rst @@ -5,6 +5,12 @@ box_coder .. py:function:: paddle.fluid.layers.box_coder(prior_box, prior_box_var, target_box, code_type='encode_center_size', box_normalized=True, name=None, axis=0) +:alias_main: paddle.nn.functional.box_coder +:alias: paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder +:old_api: paddle.fluid.layers.box_coder + + + Bounding Box Coder 编码/解码带有先验框信息的目标边界框 diff --git a/doc/fluid/api_cn/layers_cn/box_decoder_and_assign_cn.rst b/doc/fluid/api_cn/layers_cn/box_decoder_and_assign_cn.rst index 252a8c5bfb7d8092dbc9a864cbc07bca5436f73b..df65ef34c8563521040a6851646e4b23192f7424 100644 --- a/doc/fluid/api_cn/layers_cn/box_decoder_and_assign_cn.rst +++ b/doc/fluid/api_cn/layers_cn/box_decoder_and_assign_cn.rst @@ -5,6 +5,12 @@ box_decoder_and_assign .. py:function:: paddle.fluid.layers.box_decoder_and_assign(prior_box, prior_box_var, target_box, box_score, box_clip, name=None) +:alias_main: paddle.nn.functional.box_decoder_and_assign +:alias: paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign +:old_api: paddle.fluid.layers.box_decoder_and_assign + + + 边界框编码器。 根据先验框来解码目标边界框。 diff --git a/doc/fluid/api_cn/layers_cn/bpr_loss_cn.rst b/doc/fluid/api_cn/layers_cn/bpr_loss_cn.rst index 44c38b30634802296af9f29eb45758858790bf62..06195e5198988de8eb5cbdaf2460ea20bc3482bb 100644 --- a/doc/fluid/api_cn/layers_cn/bpr_loss_cn.rst +++ b/doc/fluid/api_cn/layers_cn/bpr_loss_cn.rst @@ -5,6 +5,12 @@ bpr_loss .. py:function:: paddle.fluid.layers.bpr_loss(input, label, name=None) +:alias_main: paddle.nn.functional.bpr_loss +:alias: paddle.nn.functional.bpr_loss,paddle.nn.functional.loss.bpr_loss +:old_api: paddle.fluid.layers.bpr_loss + + + 贝叶斯个性化排序损失函数(Bayesian Personalized Ranking Loss Operator ) diff --git a/doc/fluid/api_cn/layers_cn/brelu_cn.rst b/doc/fluid/api_cn/layers_cn/brelu_cn.rst index 7f65f3e6474a0e7138ec4e5e7afac28d38633db0..0931ad4cea3cfd17ae8476e450762ed7b359c2c9 100644 --- a/doc/fluid/api_cn/layers_cn/brelu_cn.rst +++ b/doc/fluid/api_cn/layers_cn/brelu_cn.rst @@ -5,6 +5,12 @@ brelu .. py:function:: paddle.fluid.layers.brelu(x, t_min=0.0, t_max=24.0, name=None) +:alias_main: paddle.nn.functional.brelu +:alias: paddle.nn.functional.brelu,paddle.nn.functional.activation.brelu +:old_api: paddle.fluid.layers.brelu + + + BReLU 激活函数 diff --git a/doc/fluid/api_cn/layers_cn/case_cn.rst b/doc/fluid/api_cn/layers_cn/case_cn.rst index cc9892af93819912eccac2cffd70574967d1ab7e..f1f76eae157728e7866eed1ef08e43c41dbe9a1f 100644 --- a/doc/fluid/api_cn/layers_cn/case_cn.rst +++ b/doc/fluid/api_cn/layers_cn/case_cn.rst @@ -3,10 +3,16 @@ case ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.case(pred_fn_pairs, default=None, name=None) +:api_attr: 声明式编程模式(静态图) +:alias_main: paddle.nn.case +:alias: paddle.nn.case,paddle.nn.control_flow.case +:old_api: paddle.fluid.layers.case + + + 该OP的运行方式类似于python的if-elif-elif-else。 参数: diff --git a/doc/fluid/api_cn/layers_cn/cast_cn.rst b/doc/fluid/api_cn/layers_cn/cast_cn.rst index 20f847adddfdec1f8ea6a9cf65f4e180d0451c70..8ce45cef9fe74a6016bf1d4f264696cde94d9e71 100644 --- a/doc/fluid/api_cn/layers_cn/cast_cn.rst +++ b/doc/fluid/api_cn/layers_cn/cast_cn.rst @@ -5,6 +5,12 @@ cast .. py:function:: paddle.fluid.layers.cast(x,dtype) +:alias_main: paddle.cast +:alias: paddle.cast,paddle.tensor.cast,paddle.tensor.manipulation.cast +:old_api: paddle.fluid.layers.cast + + + 该OP将 ``x`` 的数据类型转换为 ``dtype`` 并输出。支持输出和输入的数据类型相同。 参数: diff --git a/doc/fluid/api_cn/layers_cn/ceil_cn.rst b/doc/fluid/api_cn/layers_cn/ceil_cn.rst index 76a6e67e66860caa506570ab7b375c7ab23bd7f1..2ee8e634f28e5b154ba365b5f91a519a0b8758f1 100644 --- a/doc/fluid/api_cn/layers_cn/ceil_cn.rst +++ b/doc/fluid/api_cn/layers_cn/ceil_cn.rst @@ -5,6 +5,12 @@ ceil .. py:function:: paddle.fluid.layers.ceil(x, name=None) +:alias_main: paddle.ceil +:alias: paddle.ceil,paddle.tensor.ceil,paddle.tensor.math.ceil +:old_api: paddle.fluid.layers.ceil + + + 向上取整运算函数。 .. math:: @@ -13,24 +19,21 @@ ceil 参数: - - **x** (Variable) - 该OP的输入为多维Tensor。数据类型为float32或float64。 - - **name** (str, 可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为None。 + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64 、float16。 + - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 -返回: 输出为Tensor,与 ``x`` 维度相同、数据类型相同。 +返回:输出Tensor,与 ``x`` 维度相同、数据类型相同。 -返回类型: Variable +返回类型: Tensor **代码示例**: .. code-block:: python - import paddle.fluid as fluid - import numpy as np + import paddle + paddle.disable_static() - input_ceil = np.array([[-1.5,6],[1,15.6]]) - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(input_ceil) - y = fluid.layers.ceil(x) - print(y.numpy()) - # [[-1. 6.] - # [ 1. 16.]] + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) + out = paddle.ceil(x) + print(out.numpy()) + # [-0. -0. 1. 1.] diff --git a/doc/fluid/api_cn/layers_cn/center_loss_cn.rst b/doc/fluid/api_cn/layers_cn/center_loss_cn.rst index 3b4a349fe7436048bf5065338bfc37c003fae19e..82be3da0d52e96c7f9a3d97f1943691513ab4450 100644 --- a/doc/fluid/api_cn/layers_cn/center_loss_cn.rst +++ b/doc/fluid/api_cn/layers_cn/center_loss_cn.rst @@ -3,10 +3,16 @@ center_loss ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.center_loss(input, label, num_classes, alpha, param_attr, update_center=True) +:api_attr: 声明式编程模式(静态图) +:alias_main: paddle.nn.functional.center_loss +:alias: paddle.nn.functional.center_loss,paddle.nn.functional.loss.center_loss +:old_api: paddle.fluid.layers.center_loss + + + 该OP接收一个来自于最后一个隐藏层的输出和目标标签作为输入,返回损失值。为每一个类别提供一个类别中心,计算mini-batch中每个样本与对应类别中心的距离的平均值作为center loss。 对于输入,\(X\)和标签\(Y\),计算公式为: diff --git a/doc/fluid/api_cn/layers_cn/chunk_eval_cn.rst b/doc/fluid/api_cn/layers_cn/chunk_eval_cn.rst index 7542ccc1fa553819deb542d387090d58d57118a9..c264a071ab03ca4a53279da3906db671e499b1d2 100644 --- a/doc/fluid/api_cn/layers_cn/chunk_eval_cn.rst +++ b/doc/fluid/api_cn/layers_cn/chunk_eval_cn.rst @@ -5,6 +5,9 @@ chunk_eval .. py:function:: paddle.fluid.layers.chunk_eval(input, label, chunk_scheme, num_chunk_types, excluded_chunk_types=None, sqe_length=None) + + + 该OP计算语块识别(chunk detection)的准确率、召回率和F1值,常用于命名实体识别(NER,语块识别的一种)等序列标注任务中。 语块识别的基础请参考 `Chunking with Support Vector Machines `_ @@ -79,13 +82,13 @@ chunk_eval dict_size = 10000 label_dict_len = 7 - sequence = fluid.layers.data( - name='id', shape=[1], lod_level=1, dtype='int64') - embedding = fluid.layers.embedding( + sequence = fluid.data( + name='id', shape=[None, 1], lod_level=1, dtype='int64') + embedding = fluid.embedding( input=sequence, size=[dict_size, 512]) hidden = fluid.layers.fc(input=embedding, size=512) - label = fluid.layers.data( - name='label', shape=[1], lod_level=1, dtype='int32') + label = fluid.data( + name='label', shape=[None, 1], lod_level=1, dtype='int64') crf = fluid.layers.linear_chain_crf( input=hidden, label=label, param_attr=fluid.ParamAttr(name="crfw")) crf_decode = fluid.layers.crf_decoding( @@ -94,7 +97,7 @@ chunk_eval input=crf_decode, label=label, chunk_scheme="IOB", - num_chunk_types=(label_dict_len - 1) / 2) + num_chunk_types=int((label_dict_len - 1) / 2)) diff --git a/doc/fluid/api_cn/layers_cn/clip_by_norm_cn.rst b/doc/fluid/api_cn/layers_cn/clip_by_norm_cn.rst index bec5d23dfa4393dc6029cd77203f0fdefdda771d..af3467b58d02d54d02dcac5a792c03f3a5db8405 100644 --- a/doc/fluid/api_cn/layers_cn/clip_by_norm_cn.rst +++ b/doc/fluid/api_cn/layers_cn/clip_by_norm_cn.rst @@ -5,6 +5,12 @@ clip_by_norm .. py:function:: paddle.fluid.layers.clip_by_norm(x, max_norm, name=None) +:alias_main: paddle.nn.clip_by_norm +:alias: paddle.nn.clip_by_norm,paddle.nn.clip.clip_by_norm +:old_api: paddle.fluid.layers.clip_by_norm + + + ClipByNorm算子 此算子将输入 ``X`` 的L2范数限制在 ``max_norm`` 内。如果 ``X`` 的L2范数小于或等于 ``max_norm`` ,则输出(Out)将与 ``X`` 相同。如果X的L2范数大于 ``max_norm`` ,则 ``X`` 将被线性缩放,使得输出(Out)的L2范数等于 ``max_norm`` ,如下面的公式所示: diff --git a/doc/fluid/api_cn/layers_cn/clip_cn.rst b/doc/fluid/api_cn/layers_cn/clip_cn.rst index b100e6f1f7d2fce8cdec508ffc31087bd65c7425..071fe323ffdf8de41a454669e3b23e2f7f976a4b 100644 --- a/doc/fluid/api_cn/layers_cn/clip_cn.rst +++ b/doc/fluid/api_cn/layers_cn/clip_cn.rst @@ -5,6 +5,12 @@ clip .. py:function:: paddle.fluid.layers.clip(x, min, max, name=None) +:alias_main: paddle.nn.clip +:alias: paddle.nn.clip,paddle.nn.clip.clip +:old_api: paddle.fluid.layers.clip + + + 该OP对输入Tensor每个元素的数值进行裁剪,使得输出Tensor元素的数值被限制在区间[min, max]内。具体的计算公式为如下。 .. math:: diff --git a/doc/fluid/api_cn/layers_cn/collect_fpn_proposals_cn.rst b/doc/fluid/api_cn/layers_cn/collect_fpn_proposals_cn.rst index c895d2ddaea97525818071f9920ba2e87daad709..9c00888befa5a82ec0296f9f51496bea1697f273 100644 --- a/doc/fluid/api_cn/layers_cn/collect_fpn_proposals_cn.rst +++ b/doc/fluid/api_cn/layers_cn/collect_fpn_proposals_cn.rst @@ -5,6 +5,12 @@ collect_fpn_proposals .. py:function:: paddle.fluid.layers.collect_fpn_proposals(multi_rois, multi_scores, min_level, max_level, post_nms_top_n, name=None) +:alias_main: paddle.nn.functional.collect_fpn_proposals +:alias: paddle.nn.functional.collect_fpn_proposals,paddle.nn.functional.vision.collect_fpn_proposals +:old_api: paddle.fluid.layers.collect_fpn_proposals + + + **该op仅支持LoDTensor输入**。连接多级RoIs(感兴趣区域)并依据multi_scores选择N个RoIs。此操作执行以下步骤: 1、选择num_level个RoIs和scores作为输入:num_level = max_level - min_level 2、连接num_level个RoIs和scores。 diff --git a/doc/fluid/api_cn/layers_cn/concat_cn.rst b/doc/fluid/api_cn/layers_cn/concat_cn.rst index b35d3f125496b432cf1d7c485d61a3116a32a1a5..a0c2ade9178f1842e355cfd3bdb0e667db38cd2d 100644 --- a/doc/fluid/api_cn/layers_cn/concat_cn.rst +++ b/doc/fluid/api_cn/layers_cn/concat_cn.rst @@ -3,18 +3,17 @@ concat ------------------------------- -.. py:function:: paddle.fluid.layers.concat(input,axis=0,name=None) +.. py:function:: paddle.fluid.layers.concat(input, axis=0, name=None) -该OP对输入沿 ``axis`` 轴进行联结。 + +该OP对输入沿 ``axis`` 轴进行联结,返回一个新的Tensor。 参数: - - **input** (list) - 输入是待联结的多维 ``Tensor`` 组成的 ``list`` ,支持的数据类型为:float32、float64、int32、int64。 - - **axis** (int|Variable,可选) - 整数或者形状为[1]的 ``Tensor``,数据类型为 ``int32``。指定对输入Tensor进行运算的轴, ``axis`` 的有效范围是[-R, R),R是输入 ``input`` 中 ``Tensor`` 的维度, ``axis`` 为负值时与 :math:`axis + R` 等价。默认值为0。 + - **input** (list|tuple|Tensor) - 待联结的Tensor list,Tensor tuple或者Tensor,支持的数据类型为:bool、float16、 float32、float64、int32、int64。 ``input`` 中所有Tensor的数据类型必须一致。 + - **axis** (int|Tensor,可选) - 指定对输入Tensor进行运算的轴,可以是整数或者形状为[1]的Tensor,数据类型为int32或者int64。 ``axis`` 的有效范围是[-R, R),R是输入 ``input`` 中Tensor 的维度, ``axis`` 为负值时与 :math:`axis + R` 等价。默认值为0。 - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 -返回:联结后的 ``Tensor`` ,数据类型和 ``input`` 相同。 - -返回类型:Variable +返回:联结后的 ``Tensor`` ,数据类型和 ``input`` 中的Tensor相同。 **代码示例**: @@ -23,18 +22,18 @@ concat import paddle.fluid as fluid import numpy as np - in1 = np.array([[1,2,3], - [4,5,6]]) - in2 = np.array([[11,12,13], - [14,15,16]]) - in3 = np.array([[21,22], - [23,24]]) + in1 = np.array([[1, 2, 3], + [4, 5, 6]]) + in2 = np.array([[11, 12, 13], + [14, 15, 16]]) + in3 = np.array([[21, 22], + [23, 24]]) with fluid.dygraph.guard(): x1 = fluid.dygraph.to_variable(in1) x2 = fluid.dygraph.to_variable(in2) x3 = fluid.dygraph.to_variable(in3) - out1 = fluid.layers.concat(input=[x1,x2,x3], axis=-1) - out2 = fluid.layers.concat(input=[x1,x2], axis=0) + out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1) + out2 = fluid.layers.concat(input=[x1, x2], axis=0) print(out1.numpy()) # [[ 1 2 3 11 12 13 21 22] # [ 4 5 6 14 15 16 23 24]] diff --git a/doc/fluid/api_cn/layers_cn/cond_cn.rst b/doc/fluid/api_cn/layers_cn/cond_cn.rst index bdca5f2025f9da79678bdf8586d8f59b4a3e59b0..a72495a7392e8a17695a1eee96e286a7bcfba3dc 100644 --- a/doc/fluid/api_cn/layers_cn/cond_cn.rst +++ b/doc/fluid/api_cn/layers_cn/cond_cn.rst @@ -3,10 +3,16 @@ cond ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.cond(pred, true_fn=None, false_fn=None, name=None) +:api_attr: 声明式编程模式(静态图) +:alias_main: paddle.nn.cond +:alias: paddle.nn.cond,paddle.nn.control_flow.cond +:old_api: paddle.fluid.layers.cond + + + 如果 ``pred`` 是 ``True`` ,该API返回 ``true_fn()`` ,否则返回 ``false_fn()`` 。 用户如果不想在 ``callable`` 中做任何事,可以把 ``true_fn`` 或 ``false_fn`` 设为 ``None`` ,此时本API会把该 ``callable`` 视为简单返回 ``None`` 。 diff --git a/doc/fluid/api_cn/layers_cn/continuous_value_model_cn.rst b/doc/fluid/api_cn/layers_cn/continuous_value_model_cn.rst index ab90721deaba47438ae18ad2e0598df54cf150be..bdc610b1739df00f4d04f96ee501cfff7e107a94 100644 --- a/doc/fluid/api_cn/layers_cn/continuous_value_model_cn.rst +++ b/doc/fluid/api_cn/layers_cn/continuous_value_model_cn.rst @@ -5,6 +5,12 @@ continuous_value_model .. py:function:: paddle.fluid.layers.continuous_value_model(input, cvm, use_cvm=True) +:alias_main: paddle.nn.functional.continuous_value_model +:alias: paddle.nn.functional.continuous_value_model,paddle.nn.functional.extension.continuous_value_model +:old_api: paddle.fluid.layers.continuous_value_model + + + **注意:该OP仅支持在CPU运行。** 该OP在CTR项目中,用于去除或处理 ``input`` 中的展示和点击值。 @@ -31,7 +37,8 @@ continuous_value_model input=input, size=[100, 11], dtype='float32') - ones = fluid.layers.fill_constant_batch_size_like(input=label, shape=[-1, 1], dtype="int64", value=1) + label_shape = fluid.layers.shape(label) + ones = fluid.layers.fill_constant(shape=[label_shape[0], 1], dtype="int64", value=1) show_clk = fluid.layers.cast(fluid.layers.concat([ones, label], axis=1), dtype='float32') show_clk.stop_gradient = True input_with_cvm = fluid.layers.continuous_value_model(embed, show_clk, True) diff --git a/doc/fluid/api_cn/layers_cn/conv2d_cn.rst b/doc/fluid/api_cn/layers_cn/conv2d_cn.rst index fb5bcd097e69e16ea166cc7c131641835473c09f..4ae868dfea111869eaa664cc4ca7a763c86651c3 100644 --- a/doc/fluid/api_cn/layers_cn/conv2d_cn.rst +++ b/doc/fluid/api_cn/layers_cn/conv2d_cn.rst @@ -3,10 +3,13 @@ conv2d ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.conv2d(input, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format="NCHW") +:api_attr: 声明式编程模式(静态图) + + + 该OP是二维卷积层(convolution2D layer),根据输入、滤波器、步长(stride)、填充(padding)、膨胀比例(dilations)一组参数计算输出特征层大小。输入和输出是NCHW或NHWC格式,其中N是批尺寸,C是通道数,H是特征高度,W是特征宽度。滤波器是MCHW格式,M是输出图像通道数,C是输入图像通道数,H是滤波器高度,W是滤波器宽度。如果组数(groups)大于1,C等于输入图像通道数除以组数的结果。详情请参考UFLDL's : `卷积 `_ 。如果bias_attr不为False,卷积计算会添加偏置项。如果指定了激活函数类型,相应的激活函数会作用在最终结果上。 对每个输入X,有等式: diff --git a/doc/fluid/api_cn/layers_cn/conv2d_transpose_cn.rst b/doc/fluid/api_cn/layers_cn/conv2d_transpose_cn.rst index cefc7b688eb7943c127b87f0ccc5efd0c07f9b40..3223f07188db98723157b6d8a53336dd9291653a 100644 --- a/doc/fluid/api_cn/layers_cn/conv2d_transpose_cn.rst +++ b/doc/fluid/api_cn/layers_cn/conv2d_transpose_cn.rst @@ -3,10 +3,13 @@ conv2d_transpose ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.conv2d_transpose(input, num_filters, output_size=None, filter_size=None, padding=0, stride=1, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format='NCHW') +:api_attr: 声明式编程模式(静态图) + + + 二维转置卷积层(Convlution2D transpose layer) 该层根据输入(input)、滤波器(filter)和卷积核膨胀比例(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCHW或NHWC格式,其中N为批尺寸,C为通道数(channel),H为特征层高度,W为特征层宽度。滤波器是MCHW格式,M是输出图像通道数,C是输入图像通道数,H是滤波器高度,W是滤波器宽度。如果组数大于1,C等于输入图像通道数除以组数的结果。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解转置卷积层细节,请参考下面的说明和 参考文献_ 。如果参数bias_attr不为False, 转置卷积计算会添加偏置项。如果act不为None,则转置卷积计算之后添加相应的激活函数。 diff --git a/doc/fluid/api_cn/layers_cn/conv3d_cn.rst b/doc/fluid/api_cn/layers_cn/conv3d_cn.rst index 0462892a9c7f3e025f2a18aa1895074faa7bacfa..7468274e7a27a5ce3984d4fb1e0d13daa2237b05 100644 --- a/doc/fluid/api_cn/layers_cn/conv3d_cn.rst +++ b/doc/fluid/api_cn/layers_cn/conv3d_cn.rst @@ -3,10 +3,13 @@ conv3d ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.conv3d(input, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format="NCDHW") +:api_attr: 声明式编程模式(静态图) + + + 该OP是三维卷积层(convolution3D layer),根据输入、滤波器、步长(stride)、填充(padding)、膨胀比例(dilations)一组参数计算得到输出特征层大小。输入和输出是NCDHW或NDWHC格式,其中N是批尺寸,C是通道数,D是特征层深度,H是特征层高度,W是特征层宽度。三维卷积(Convlution3D)和二维卷积(Convlution2D)相似,但多了一维深度信息(depth)。如果bias_attr不为False,卷积计算会添加偏置项。如果指定了激活函数类型,相应的激活函数会作用在最终结果上。 对每个输入X,有等式: diff --git a/doc/fluid/api_cn/layers_cn/conv3d_transpose_cn.rst b/doc/fluid/api_cn/layers_cn/conv3d_transpose_cn.rst index 9832db8a8618dba95d2e90a1616b7b8e9b2e820b..0331df5d6a4fab39a78c68190b0b1066ec5681a9 100644 --- a/doc/fluid/api_cn/layers_cn/conv3d_transpose_cn.rst +++ b/doc/fluid/api_cn/layers_cn/conv3d_transpose_cn.rst @@ -3,10 +3,13 @@ conv3d_transpose ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.conv3d_transpose(input, num_filters, output_size=None, filter_size=None, padding=0, stride=1, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format='NCDHW') +:api_attr: 声明式编程模式(静态图) + + + 三维转置卷积层(Convlution3D transpose layer) 该层根据输入(input)、滤波器(filter)和卷积核膨胀比例(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCDHW或者NDHWC格式。其中N为批尺寸,C为通道数(channel),D为特征深度,H为特征层高度,W为特征层宽度。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解卷积转置层细节,请参考下面的说明和 参考文献_ 。如果参数bias_attr不为False, 转置卷积计算会添加偏置项。如果act不为None,则转置卷积计算之后添加相应的激活函数。 diff --git a/doc/fluid/api_cn/layers_cn/cos_cn.rst b/doc/fluid/api_cn/layers_cn/cos_cn.rst index 9d0f89e5fff1242363ce8d11f9762638e32070dd..7d0727576e0351584d8d2bd5fc7e0fa2f9f32546 100644 --- a/doc/fluid/api_cn/layers_cn/cos_cn.rst +++ b/doc/fluid/api_cn/layers_cn/cos_cn.rst @@ -5,34 +5,36 @@ cos .. py:function:: paddle.fluid.layers.cos(x, name=None) +:alias_main: paddle.cos +:alias: paddle.cos,paddle.tensor.cos,paddle.tensor.math.cos +:old_api: paddle.fluid.layers.cos + + + 余弦函数。 +输入范围是 `(-inf, inf)` , 输出范围是 `[-1,1]`。 + .. math:: out = cos(x) - - 参数: - - **x** (Variable) - 该OP的输入为多维Tensor,数据类型为float32,float64。 - - **name** (str, 可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为None。 - + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64 、float16。 + - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 -返回:输出为Tensor,与 ``x`` 维度相同、数据类型相同。 +返回:输出Tensor,与 ``x`` 维度相同、数据类型相同。 -返回类型:Variable +返回类型:Tensor **代码示例**: .. code-block:: python - import paddle.fluid as fluid - import numpy as np + import paddle + paddle.disable_static() - input_cos = np.array([[-1,np.pi],[1,15.6]]) - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(input_cos) - y = fluid.layers.cos(x) - print(y.numpy()) - # [[ 0.54030231 -1. ] - # [ 0.54030231 -0.99417763]] + x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) + out = paddle.cos(x) + print(out.numpy()) + # [0.92106099 0.98006658 0.99500417 0.95533649] diff --git a/doc/fluid/api_cn/layers_cn/cos_sim_cn.rst b/doc/fluid/api_cn/layers_cn/cos_sim_cn.rst index b6fb77dfc50d1ddcfe8c0251f55a8f0fdb26c9b5..bee83df05f929eb2808de57958d66198e8531d05 100644 --- a/doc/fluid/api_cn/layers_cn/cos_sim_cn.rst +++ b/doc/fluid/api_cn/layers_cn/cos_sim_cn.rst @@ -5,6 +5,9 @@ cos_sim .. py:function:: paddle.fluid.layers.cos_sim(X, Y) + + + 余弦相似度算子(Cosine Similarity Operator) .. math:: diff --git a/doc/fluid/api_cn/layers_cn/cosine_decay_cn.rst b/doc/fluid/api_cn/layers_cn/cosine_decay_cn.rst index b45682cdec8fd7774254b0e36892d321fa8fd94e..9117c06dd61c6bef978cde326fd64075fdeb7657 100644 --- a/doc/fluid/api_cn/layers_cn/cosine_decay_cn.rst +++ b/doc/fluid/api_cn/layers_cn/cosine_decay_cn.rst @@ -5,6 +5,12 @@ cosine_decay .. py:function:: paddle.fluid.layers.cosine_decay(learning_rate, step_each_epoch, epochs) +:alias_main: paddle.nn.functional.cosine_decay +:alias: paddle.nn.functional.cosine_decay,paddle.nn.functional.learning_rate.cosine_decay +:old_api: paddle.fluid.layers.cosine_decay + + + 使用 cosine decay 的衰减方式进行学习率调整。 在训练模型时,建议一边进行训练一边降低学习率。 通过使用此方法,学习速率将通过如下cosine衰减策略进行衰减: diff --git a/doc/fluid/api_cn/layers_cn/create_array_cn.rst b/doc/fluid/api_cn/layers_cn/create_array_cn.rst index 805910b24d947e58e6fa292871bc6018c6e8fc21..1426688b11bafd00a8a115aaf3f3eefcd907979b 100644 --- a/doc/fluid/api_cn/layers_cn/create_array_cn.rst +++ b/doc/fluid/api_cn/layers_cn/create_array_cn.rst @@ -6,6 +6,9 @@ create_array .. py:function:: paddle.fluid.layers.create_array(dtype) + + + 此OP创建一个LoDTensorArray,它可以用作 :ref:`cn_api_fluid_layers_array\_write` , :ref:`cn_api_fluid_layers_array\_read` OP的输入,以及和 :ref:`cn_api_fluid_layers_While` OP 一起创建RNN网络。 diff --git a/doc/fluid/api_cn/layers_cn/create_global_var_cn.rst b/doc/fluid/api_cn/layers_cn/create_global_var_cn.rst index 89d53c61fd91cc22e63cf5267fcb088bafd57414..97034b6dc1c6bc60e8be576cb2332deb1f1d77f7 100644 --- a/doc/fluid/api_cn/layers_cn/create_global_var_cn.rst +++ b/doc/fluid/api_cn/layers_cn/create_global_var_cn.rst @@ -5,6 +5,9 @@ create_global_var .. py:function:: paddle.fluid.layers.create_global_var(shape,value,dtype,persistable=False,force_cpu=False,name=None) + + + 该OP在全局块中创建一个新的Tensor,Tensor的值为 ``value`` 。 参数: @@ -26,7 +29,7 @@ create_global_var import paddle.fluid as fluid import paddle.fluid.layers as layers var = layers.create_global_var(shape=[2,3], value=1.0, dtype='float32', - persistable=True, force_cpu=True, name='new_var') + persistable=True, force_cpu=True, name='new_var') diff --git a/doc/fluid/api_cn/layers_cn/create_parameter_cn.rst b/doc/fluid/api_cn/layers_cn/create_parameter_cn.rst index 224d2bb577b685294f64a2b800a9da5298ac38c4..4176d7507f80e9767b32755f1169fbcbd502bb6a 100644 --- a/doc/fluid/api_cn/layers_cn/create_parameter_cn.rst +++ b/doc/fluid/api_cn/layers_cn/create_parameter_cn.rst @@ -3,10 +3,13 @@ create_parameter ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.create_parameter(shape,dtype,name=None,attr=None,is_bias=False,default_initializer=None) +:api_attr: 声明式编程模式(静态图) + + + 该OP创建一个参数。该参数是一个可学习的变量, 拥有梯度并且可优化。 **注意:这是一个低级别的API。如果您希望自己创建新的op,这个API将非常有用,无需使用layers。** diff --git a/doc/fluid/api_cn/layers_cn/create_py_reader_by_data_cn.rst b/doc/fluid/api_cn/layers_cn/create_py_reader_by_data_cn.rst index 99eecff605fce6644dbbe5d03d2b69ac8696c2cc..ed42dceaa52ef3be13ffc4d2d50d7458fb317584 100644 --- a/doc/fluid/api_cn/layers_cn/create_py_reader_by_data_cn.rst +++ b/doc/fluid/api_cn/layers_cn/create_py_reader_by_data_cn.rst @@ -3,10 +3,13 @@ create_py_reader_by_data ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.create_py_reader_by_data(capacity,feed_list,name=None,use_double_buffer=True) +:api_attr: 声明式编程模式(静态图) + + + 创建一个Python端提供数据的reader。该OP与 :ref:`cn_api_fluid_layers_py_reader` 类似,不同点在于它能够从feed变量列表读取数据。 参数: diff --git a/doc/fluid/api_cn/layers_cn/create_tensor_cn.rst b/doc/fluid/api_cn/layers_cn/create_tensor_cn.rst index 651a5c4901cd8664150a94cc326c7ca64ed804d4..d00d67ebbe70f10d06e8d4bd1d96cfcb6ff7145a 100644 --- a/doc/fluid/api_cn/layers_cn/create_tensor_cn.rst +++ b/doc/fluid/api_cn/layers_cn/create_tensor_cn.rst @@ -5,6 +5,12 @@ create_tensor .. py:function:: paddle.fluid.layers.create_tensor(dtype,name=None,persistable=False) +:alias_main: paddle.create_tensor +:alias: paddle.create_tensor,paddle.tensor.create_tensor,paddle.tensor.creation.create_tensor +:old_api: paddle.fluid.layers.create_tensor + + + 创建数据类型为dtype的Tensor。 参数: diff --git a/doc/fluid/api_cn/layers_cn/crf_decoding_cn.rst b/doc/fluid/api_cn/layers_cn/crf_decoding_cn.rst index 8e9eb802f91d110096e819383e46d39b25e690c7..ea5fd7eb1e56351474a5a72181b8e58e1929811d 100644 --- a/doc/fluid/api_cn/layers_cn/crf_decoding_cn.rst +++ b/doc/fluid/api_cn/layers_cn/crf_decoding_cn.rst @@ -3,10 +3,13 @@ crf_decoding ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.crf_decoding(input, param_attr, label=None, length=None) +:api_attr: 声明式编程模式(静态图) + + + 该层读取由 :ref:`cn_api_fluid_layers_linear_chain_crf` 学习的 emission feature weights(发射状态特征的权重)和 transition feature weights (转移特征的权重) 进行解码。 diff --git a/doc/fluid/api_cn/layers_cn/crop_cn.rst b/doc/fluid/api_cn/layers_cn/crop_cn.rst index d4ccdfaf8827cff8542ac4d28590d099a1617869..18d375f06d5e4277e93995db4de71a3006cbf0fb 100644 --- a/doc/fluid/api_cn/layers_cn/crop_cn.rst +++ b/doc/fluid/api_cn/layers_cn/crop_cn.rst @@ -5,6 +5,9 @@ crop .. py:function:: paddle.fluid.layers.crop(x, shape=None, offsets=None, name=None) + + + 该OP根据偏移量(offsets)和形状(shape),裁剪输入张量。 **注意:** 此OP已被弃用,它将在以后的版本中被删除,请使用 :ref:`cn_api_fluid_layers_crop_tensor` 替代 diff --git a/doc/fluid/api_cn/layers_cn/crop_tensor_cn.rst b/doc/fluid/api_cn/layers_cn/crop_tensor_cn.rst index dcaa6758f9b43bdab2b2aca503306deb8ae0bfb9..79b2de8fcc0259615cb1f5eadc6ee2da4b71359d 100644 --- a/doc/fluid/api_cn/layers_cn/crop_tensor_cn.rst +++ b/doc/fluid/api_cn/layers_cn/crop_tensor_cn.rst @@ -5,6 +5,12 @@ crop_tensor .. py:function:: paddle.fluid.layers.crop_tensor(x, shape=None, offsets=None, name=None) +:alias_main: paddle.crop_tensor +:alias: paddle.crop_tensor,paddle.tensor.crop_tensor,paddle.tensor.creation.crop_tensor +:old_api: paddle.fluid.layers.crop_tensor + + + 根据偏移量(offsets)和形状(shape),裁剪输入(x)Tensor。 **示例**: @@ -98,7 +104,7 @@ crop_tensor # crop3.shape = [-1, 2, 3] # offsets is a list in which each element is a constant or Tensor - offsets_var = fluid.data(name="dim1", shape=[1], dtype="int32") + offsets_var = fluid.data(name="offset", shape=[1], dtype="int32") crop4 = fluid.layers.crop_tensor(x, shape=[-1, 2, 3], offsets=[0, 1, offsets_var]) # crop4.shape = [-1, 2, 3] diff --git a/doc/fluid/api_cn/layers_cn/cross_entropy_cn.rst b/doc/fluid/api_cn/layers_cn/cross_entropy_cn.rst index 5babd50838f17d9e99ab649e9dcebade6f17c132..be571e656a7ee8cdfbd68af0038fa31a0a7fafc5 100644 --- a/doc/fluid/api_cn/layers_cn/cross_entropy_cn.rst +++ b/doc/fluid/api_cn/layers_cn/cross_entropy_cn.rst @@ -5,6 +5,12 @@ cross_entropy .. py:function:: paddle.fluid.layers.cross_entropy(input, label, soft_label=False, ignore_index=-100) +:alias_main: paddle.nn.functional.cross_entropy +:alias: paddle.nn.functional.cross_entropy,paddle.nn.functional.loss.cross_entropy +:old_api: paddle.fluid.layers.cross_entropy + + + 该OP计算输入input和标签label间的交叉熵,可用于计算硬标签或软标签的交叉熵。 1. 硬标签交叉熵算法:若soft_label = False, :math:`label[i_1, i_2, ..., i_k]` 表示每个样本的硬标签值: diff --git a/doc/fluid/api_cn/layers_cn/ctc_greedy_decoder_cn.rst b/doc/fluid/api_cn/layers_cn/ctc_greedy_decoder_cn.rst index 7df10ca633746d75d105b2c9692b083d200018be..c2d8f67ca85f49c121c2a6530b4ad7bcb1632328 100644 --- a/doc/fluid/api_cn/layers_cn/ctc_greedy_decoder_cn.rst +++ b/doc/fluid/api_cn/layers_cn/ctc_greedy_decoder_cn.rst @@ -5,17 +5,21 @@ ctc_greedy_decoder .. py:function:: paddle.fluid.layers.ctc_greedy_decoder(input, blank, name=None) -**注意:该OP的输入input必须是2维LoDTensor, lod_level为1** + + + 该OP用于贪婪策略解码序列,步骤如下: 1. 获取输入中的每一行的最大值索引,也就是numpy.argmax(input, axis=0)。 2. 对于step1结果中的每个序列,合并两个空格之间的重复部分并删除所有空格。 +该API支持两种输入,LoDTensor和Tensor输入,不同输入的代码样例如下: **样例**: :: + # for lod tensor input 已知: input.data = [[0.6, 0.1, 0.3, 0.1], @@ -45,13 +49,38 @@ ctc_greedy_decoder output.lod = [[2, 1]] + # for tensor input + input.data = [[[0.6, 0.1, 0.3, 0.1], + [0.3, 0.2, 0.4, 0.1], + [0.1, 0.5, 0.1, 0.3], + [0.5, 0.1, 0.3, 0.1]], + + [[0.5, 0.1, 0.3, 0.1], + [0.2, 0.2, 0.2, 0.4], + [0.2, 0.2, 0.1, 0.5], + [0.5, 0.1, 0.3, 0.1]]] + + input_length.data = [[4], [4]] + input.shape = [2, 4, 4] + + step1: Apply argmax to first input sequence which is input.data[0:4]. Then we get: + [[0], [2], [1], [0]], for input.data[4:8] is [[0], [3], [3], [0]], shape is [2,4,1] + step2: Change the argmax result to use padding mode, then argmax result is + [[0, 2, 1, 0], [0, 3, 3, 0]], shape is [2, 4], lod is [], input_length is [[4], [4]] + step3: Apply ctc_align to padding argmax result, padding_value is 0 + + Finally: + output.data = [[2, 1, 0, 0], + [3, 0, 0, 0]] + output_length.data = [[2], [1]] + 参数: - - **input** (Variable) — 变长序列的概率,2维LoDTensor, lod_level为1。它的形状是[Lp, num_classes + 1],其中Lp是所有输入序列长度的和,num_classes是类别数目(不包括空白标签)。数据类型是float32或者float64 + - **input** (Variable) — 变长序列的概率, 在输入为LoDTensor情况下,它是具有LoD信息的二维LoDTensor。 形状为[Lp,num_classes +1],其中Lp是所有输入序列的长度之和,num_classes是真实的类数。 在输入为Tensor情况下,它是带有填充的3-D张量,其形状为[batch_size,N,num_classes +1]。 (不包括空白标签)。 数据类型可以是float32或float64。 - **blank** (int) — Connectionist Temporal Classification (CTC) loss空白标签索引, 其数值属于半开区间[0,num_classes + 1) - **name** (str) — (str|None,可选) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None -返回: CTC贪婪解码结果是一个形为(Lp,1)的2维LoDTensor,lod_level为1,其中Lp是所有输出序列的长度之和。如果结果中的所有序列都为空,则输出LoDTensor为[-1],其lod信息为空。 +返回:对于输入为LoDTensor的情况,返回CTC贪婪解码器的结果,即2-D LoDTensor,形状为[Lp,1],数据类型为int64。 “ Lp”是所有输出序列长度的总和。 如果结果中的所有序列均为空,则结果LoDTensor将为[-1],其中LoD为[[]]。对于输入为Tensor的情况,返回一个元组,(output, output_length), 其中,output是一个形状为 [batch_size, N],类型为int64的Tensor。output_length是一个形状为[batch_size, 1],类型为int64的Tensor,表示Tensor输入下,每个输出序列的长度。 返回类型: Variable @@ -60,9 +89,15 @@ ctc_greedy_decoder .. code-block:: python + # for lod mode import paddle.fluid as fluid - x = fluid.layers.data(name='x', shape=[8], dtype='float32') + x = fluid.data(name='x', shape=[None, 8], dtype='float32', lod_level=1) cost = fluid.layers.ctc_greedy_decoder(input=x, blank=0) + # for padding mode + x_pad = fluid.data(name='x_pad', shape=[10, 4, 8], dtype='float32') + x_pad_len = fluid.data(name='x_pad_len', shape=[10, 1], dtype='int64') + out, out_len = fluid.layers.ctc_greedy_decoder(input=x_pad, blank=0, + input_length=x_pad_len) diff --git a/doc/fluid/api_cn/layers_cn/cumsum_cn.rst b/doc/fluid/api_cn/layers_cn/cumsum_cn.rst index 623cf6bc359f1b7ba1127f157c5d1e66627b0b3f..8e6f238b87381651e08b0a0dac4fa441b7605683 100644 --- a/doc/fluid/api_cn/layers_cn/cumsum_cn.rst +++ b/doc/fluid/api_cn/layers_cn/cumsum_cn.rst @@ -5,6 +5,7 @@ cumsum .. py:function:: paddle.fluid.layers.cumsum(x,axis=None,exclusive=None,reverse=None) + 沿给定轴(axis)的元素的累加和。默认结果的第一个元素和输入的第一个元素一致。如果exlusive为True,结果的第一个元素则为0。 参数: diff --git a/doc/fluid/api_cn/layers_cn/data_cn.rst b/doc/fluid/api_cn/layers_cn/data_cn.rst index 298dcf6a85cbec50936ae3c6962157dc3b66e87e..d73c7d55181d91c4115fca7ec8c8f8edcb7a35a5 100644 --- a/doc/fluid/api_cn/layers_cn/data_cn.rst +++ b/doc/fluid/api_cn/layers_cn/data_cn.rst @@ -3,10 +3,12 @@ data ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.data(name, shape, append_batch_size=True, dtype='float32', lod_level=0, type=VarType.LOD_TENSOR, stop_gradient=True) + + + 该OP会在全局block中创建变量(Variable),该全局变量可被计算图中的算子(operator)访问。 注意: diff --git a/doc/fluid/api_cn/layers_cn/data_norm_cn.rst b/doc/fluid/api_cn/layers_cn/data_norm_cn.rst index c98363e67f1e58ed7b232ad44a13ee1e12ad1533..4954dfa22a7c67baa441c7a209ab7df87d8cc4d8 100644 --- a/doc/fluid/api_cn/layers_cn/data_norm_cn.rst +++ b/doc/fluid/api_cn/layers_cn/data_norm_cn.rst @@ -3,10 +3,13 @@ data_norm ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.data_norm(input, act=None, epsilon=1e-05, param_attr=None, data_layout='NCHW', in_place=False, name=None, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=False) +:api_attr: 声明式编程模式(静态图) + + + **数据正则化层** 可用作conv2d和fully_connected操作的正则化函数。 此层所需的数据格式为以下之一: @@ -39,6 +42,7 @@ data_norm - **slot_dim** (int, 默认值为-1) - 一个slot的embedding维度,slot用来表征一类特征的集合,在pslib模式下,通常我们通过slot区分特征id,并从参数服务器(pslib)中提取它们的embedding。embedding的第一维是历史上这个embedding展示的次数。如果本op的输入是由这样的embedding连接而来,那么当这个特征id是新的或空的,则正则化结果可能不实际。为了避免这种情况,我们添加了slot_dim来定位并判断这一维是否为零。如果是的话,我们选择跳过正则化。 - **summary_decay_rate** (float, 默认值为0.9999999) - 更新summary信息时的衰减率。 - **sync_stats** (bool, 默认值False) - 在多GPU卡的场景下可以使用,用来同步多卡间的summary信息。 + - **enable_scale_and_shift** (bool, 默认值False) - 在分布式全局正则化后是否做像batchnorm一样做scale&shift的操作。 返回: 张量变量,是对输入数据进行正则化后的结果。 diff --git a/doc/fluid/api_cn/layers_cn/deformable_conv_cn.rst b/doc/fluid/api_cn/layers_cn/deformable_conv_cn.rst index 55bcb172e759147c4bddbd6b78885652bb538a1a..a74315d5ad7fbcc54bef8869c7ebd11433450b2d 100644 --- a/doc/fluid/api_cn/layers_cn/deformable_conv_cn.rst +++ b/doc/fluid/api_cn/layers_cn/deformable_conv_cn.rst @@ -3,10 +3,13 @@ deformable_conv ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.deformable_conv(input, offset, mask, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, deformable_groups=None, im2col_step=None, param_attr=None, bias_attr=None, modulated=True, name=None) +:api_attr: 声明式编程模式(静态图) + + + **可变形卷积算子** deformable_conv op对输入4-D Tensor计算2-D可变形卷积。给定输入Tensor x,输出Tensor y,可变形卷积运算如下所示: diff --git a/doc/fluid/api_cn/layers_cn/deformable_roi_pooling_cn.rst b/doc/fluid/api_cn/layers_cn/deformable_roi_pooling_cn.rst index bf9d96c1545afb29e0caa9ab6ad5151560693a82..bcccb58ca3fd10fd79903184f59362b936e63804 100644 --- a/doc/fluid/api_cn/layers_cn/deformable_roi_pooling_cn.rst +++ b/doc/fluid/api_cn/layers_cn/deformable_roi_pooling_cn.rst @@ -5,6 +5,12 @@ deformable_roi_pooling .. py:function:: paddle.fluid.layers.deformable_roi_pooling(input, rois, trans, no_trans=False, spatial_scale=1.0, group_size=[1, 1], pooled_height=1, pooled_width=1, part_size=None, sample_per_part=1, trans_std=0.1, position_sensitive=False, name=None) +:alias_main: paddle.nn.functional.deformable_roi_pooling +:alias: paddle.nn.functional.deformable_roi_pooling,paddle.nn.functional.vision.deformable_roi_pooling +:old_api: paddle.fluid.layers.deformable_roi_pooling + + + 可变形感兴趣区域(ROI)池化层 该OP对输入进行了可形变的感兴趣区域(ROI)池化操作。如同 `可形变卷积网络 `_ 描述的一样,它将为每个bin中的像素获取一个偏移量,以便于在合适的位置进行池化。在完成可变形感兴趣区域(ROI)池化操作之后,批量数将变为候选框的数量。 @@ -42,7 +48,7 @@ deformable_roi_pooling .. code-block:: python - #position_sensitive为False + #position_sensitive=False import paddle.fluid as fluid input = fluid.data(name="input", @@ -68,7 +74,7 @@ deformable_roi_pooling trans_std=0.1, position_sensitive=False) - #position_sensitive为True + #position_sensitive=True import paddle.fluid as fluid input = fluid.data(name="input", diff --git a/doc/fluid/api_cn/layers_cn/density_prior_box_cn.rst b/doc/fluid/api_cn/layers_cn/density_prior_box_cn.rst index 0b122b32ea4db9298f6d74218ff1091cd3b8ced4..c88bff497978ffa81b31b332df065ff2e265c77a 100644 --- a/doc/fluid/api_cn/layers_cn/density_prior_box_cn.rst +++ b/doc/fluid/api_cn/layers_cn/density_prior_box_cn.rst @@ -5,6 +5,12 @@ density_prior_box .. py:function:: paddle.fluid.layers.density_prior_box(input, image, densities=None, fixed_sizes=None, fixed_ratios=None, variance=[0.1, 0.1, 0.2, 0.2], clip=False, steps=[0.0, 0.0], offset=0.5, flatten_to_2d=False, name=None) +:alias_main: paddle.nn.functional.density_prior_box +:alias: paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box +:old_api: paddle.fluid.layers.density_prior_box + + + 该OP为SSD算法(Single Shot MultiBox Detector)生成density prior box,在每个 ``input`` 的位置产生N个候选框,其中,N由 ``densities`` , ``fixed_sizes`` 和 ``fixed_ratios`` 来计算。生成的每个输入位置附近的候选框中心(网格点)由 ``densities`` 和 ``density prior box`` 的数量计算,其中 ``density prior box`` 的数量由 ``fixed_sizes`` 和 ``fixed_ratios`` 决定。``fixed_sizes`` 和 ``densities`` 的大小一致。 diff --git a/doc/fluid/api_cn/layers_cn/detection_output_cn.rst b/doc/fluid/api_cn/layers_cn/detection_output_cn.rst index b2c639da9c2c03d1113504430a6d8f41a45122cd..9d39e7881dc9b191dcec464b3a190404aef933d1 100644 --- a/doc/fluid/api_cn/layers_cn/detection_output_cn.rst +++ b/doc/fluid/api_cn/layers_cn/detection_output_cn.rst @@ -5,6 +5,12 @@ detection_output .. py:function:: paddle.fluid.layers.detection_output(loc, scores, prior_box, prior_box_var, background_label=0, nms_threshold=0.3, nms_top_k=400, keep_top_k=200, score_threshold=0.01, nms_eta=1.0) +:alias_main: paddle.nn.functional.detection_output +:alias: paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output +:old_api: paddle.fluid.layers.detection_output + + + 给定回归位置偏移、置信度以及先验框信息计算检测的输出,执行步骤如下: 1.根据先验框(``prior_box``)信息和回归位置偏移解码出预测框坐标。 diff --git a/doc/fluid/api_cn/layers_cn/diag_cn.rst b/doc/fluid/api_cn/layers_cn/diag_cn.rst index 692b64dc5d30e4ecfaf43e8120ccb1972f2971f4..f08ae61e0e44c620ee20cadc979c91450aec9010 100644 --- a/doc/fluid/api_cn/layers_cn/diag_cn.rst +++ b/doc/fluid/api_cn/layers_cn/diag_cn.rst @@ -5,6 +5,12 @@ diag .. py:function:: paddle.fluid.layers.diag(diagonal) +:alias_main: paddle.diag +:alias: paddle.diag,paddle.tensor.diag,paddle.tensor.creation.diag +:old_api: paddle.fluid.layers.diag + + + 该OP创建一个方阵,使用输入diagonal来指定方阵的对角线元素的值。 参数: diff --git a/doc/fluid/api_cn/layers_cn/dice_loss_cn.rst b/doc/fluid/api_cn/layers_cn/dice_loss_cn.rst index 52e0ce76b376bc7c215415aec549bc2a8d8c19ad..af63877e359bf830d35985556e6e94e215adba75 100644 --- a/doc/fluid/api_cn/layers_cn/dice_loss_cn.rst +++ b/doc/fluid/api_cn/layers_cn/dice_loss_cn.rst @@ -5,6 +5,12 @@ dice_loss .. py:function:: paddle.fluid.layers.dice_loss(input, label, epsilon=1e-05) +:alias_main: paddle.nn.functional.dice_loss +:alias: paddle.nn.functional.dice_loss,paddle.nn.functional.loss.dice_loss +:old_api: paddle.fluid.layers.dice_loss + + + 该OP用来比较预测结果跟标签之间的相似度,通常用于二值图像分割,即标签为二值,也可以做多标签的分割。 dice_loss定义为: diff --git a/doc/fluid/api_cn/layers_cn/distribute_fpn_proposals_cn.rst b/doc/fluid/api_cn/layers_cn/distribute_fpn_proposals_cn.rst index b12dc2cfba777b7552ac6c83cac8fcb103263783..33cbb83a59486ec75eef9962b0af3035f61e3415 100644 --- a/doc/fluid/api_cn/layers_cn/distribute_fpn_proposals_cn.rst +++ b/doc/fluid/api_cn/layers_cn/distribute_fpn_proposals_cn.rst @@ -5,6 +5,12 @@ distribute_fpn_proposals .. py:function:: paddle.fluid.layers.distribute_fpn_proposals(fpn_rois, min_level, max_level, refer_level, refer_scale, name=None) +:alias_main: paddle.nn.functional.distribute_fpn_proposals +:alias: paddle.nn.functional.distribute_fpn_proposals,paddle.nn.functional.vision.distribute_fpn_proposals +:old_api: paddle.fluid.layers.distribute_fpn_proposals + + + **该op仅支持LoDTensor输入**。在 Feature Pyramid Networks(FPN)模型中,需要依据proposal的尺度和参考尺度与级别将所有proposal分配到不同的FPN级别中。 此外,为了恢复proposals的顺序,我们返回一个数组,该数组表示当前proposals中的原始RoIs索引。 要计算每个RoI的FPN级别,公式如下: .. math:: diff --git a/doc/fluid/api_cn/layers_cn/double_buffer_cn.rst b/doc/fluid/api_cn/layers_cn/double_buffer_cn.rst index f5a6ed9f70ee895f42259d1204089a4fa4c9446a..f9fe3b110ab2db024599bf4be1687a15f4c1006c 100644 --- a/doc/fluid/api_cn/layers_cn/double_buffer_cn.rst +++ b/doc/fluid/api_cn/layers_cn/double_buffer_cn.rst @@ -6,6 +6,9 @@ double_buffer .. py:function:: paddle.fluid.layers.double_buffer(reader, place=None, name=None) + + + 生成一个双缓冲队列Reader。Reader类有DecoratedReader和FileReader,其中DecoratedReader又可以细分成CustomReader和BufferedReader。这里是基于BufferedReader,数据将复制到具有双缓冲队列的位置(由place指定),如果 ``place=None`` 则将使用executor执行的位置。 参数: @@ -24,13 +27,13 @@ double_buffer .. code-block:: python - import paddle.fluid as fluid - reader = fluid.layers.open_files(filenames=['mnist.recordio'], - shapes=[[-1, 784], [-1, 1]], - lod_levels=[0, 0], - dtypes=['float32', 'int64']) - reader = fluid.layers.double_buffer(reader) - img, label = fluid.layers.read_file(reader) + import paddle.fluid as fluid + reader = fluid.layers.py_reader(capacity=64, + shapes=[(-1, 1, 28, 28), (-1, 1)], + dtypes=['float32', 'int64'], + use_double_buffer=False) + reader = fluid.layers.double_buffer(reader) + image, label = fluid.layers.read_file(reader) diff --git a/doc/fluid/api_cn/layers_cn/dropout_cn.rst b/doc/fluid/api_cn/layers_cn/dropout_cn.rst index ec3149aa993374469a5fc597ab3eef962fd3e349..8c748ec91af6395405bb46a43b3e94b59ebfa153 100644 --- a/doc/fluid/api_cn/layers_cn/dropout_cn.rst +++ b/doc/fluid/api_cn/layers_cn/dropout_cn.rst @@ -5,6 +5,12 @@ dropout .. py:function:: paddle.fluid.layers.dropout(x,dropout_prob,is_test=False,seed=None,name=None,dropout_implementation='downgrade_in_infer') +:alias_main: paddle.nn.functional.dropout +:alias: paddle.nn.functional.dropout,paddle.nn.functional.common.dropout +:old_api: paddle.fluid.layers.dropout + + + dropout操作 丢弃或者保持x的每个元素独立。Dropout是一种正则化手段,通过在训练过程中阻止神经元节点间的相关性来减少过拟合。根据给定的丢弃概率,dropout操作符按丢弃概率随机将一些神经元输出设置为0,其他的仍保持不变。 diff --git a/doc/fluid/api_cn/layers_cn/dynamic_decode_cn.rst b/doc/fluid/api_cn/layers_cn/dynamic_decode_cn.rst index d21497a511bd9b74edac6be9b06ca51b00841642..ea289057a13d6d8d572c15350a6d87d3d072f03d 100644 --- a/doc/fluid/api_cn/layers_cn/dynamic_decode_cn.rst +++ b/doc/fluid/api_cn/layers_cn/dynamic_decode_cn.rst @@ -4,10 +4,13 @@ dynamic_decode ------------------------------- -**注意:该API仅支持【静态图】模式** -.. py:method:: dynamic_decode(decoder, inits=None, max_step_num=None, output_time_major=False, **kwargs): - +.. py:method:: dynamic_decode(decoder, inits=None, max_step_num=None, output_time_major=False, impute_finished=False, is_test=False, return_length=False, **kwargs): + +:api_attr: 声明式编程模式(静态图) + + + 该接口重复执行 :code:`decoder.step()` 直到 其返回的表示完成状态的Tensor中的值全部为True或解码步骤达到 :code:`max_step_num`。 :code:`decode.initialize()` 会在解码循环之前被调用一次。如果 :code:`decoder` 实现了 :code:`finalize` 方法,则 :code:`decoder.finalize()` 在解码循环后将被调用一次。 @@ -17,9 +20,12 @@ dynamic_decode - **inits** (object,可选) - 传递给 :code:`decoder.initialize` 的参数。默认为None。 - **max_step_num** (int,可选) - 最大步数。如果未提供,解码直到解码过程完成( :code:`decode.step()` 返回的表示完成状态的Tensor中的值全部为True)。默认为None。 - **output_time_major** (bool,可选) - 指明最终输出(此方法的第一个返回值)中包含的Tensor的数据布局。如果为False,其将使用batch优先的数据布局, 此时的形状为 :math:`[batch\_size,seq\_len,...]`。如果为True,其将使用time优先的数据布局,此时的形状为 :math:`[seq\_len,batch\_size,...]`。默认值为False。 + - **impute_finished** (bool,可选) - 若为True,对于当前批次中完成状态为结束的样本,将会拷贝其上一步的状态,而非像未结束的实例那样使用 :code:`decode.step()` 返回的 :code:`next_states` 作为新的状态,这保证了返回的最终状态 :code:`final_states` 是正确的;否则,不会区分是否结束,也没有这个拷贝操作。若 :code:`final_states` 会被使用,则这里应该设置为True,这会一定程度上影响速度。默认为False。 + - **is_test** (bool,可选) - 标识是否是预测模式,预测模式下内存占用会更少。默认为False。 + - **return_length** (bool,可选) - 标识是否在返回的元组中额外包含一个存放了所有解码序列实际长度的Tensor。默认为False。 - **kwargs** - 其他命名关键字参数。这些参数将传递给 :code:`decoder.step`。 -返回:一个二元组 :code:`(final_outputs,final_states)`, 其包含了最终的输出和状态,这两者都是Tensor或Tensor的嵌套结构。:code:`final_outputs` 具有与 :code:`decoder.output_dtype` 相同的结构和数据类型, 其中的每个tensor都是对所有解码时间步对应输出的堆叠。 这些tensor也可能会通过 :code:`decoder.finalize` 进行修改。:code:`final_states` 是最后时间步的状态,和 :code:`decoder.initialize` 返回的初始状态具有相同的结构,其中的tensor也具有相同的形状 和数据类型。 +返回:若 :code:`return_length` 为True,则返回三元组 :code:`(final_outputs, final_states, sequence_lengths)` ,否则返回二元组 :code:`(final_outputs, final_states)` 。 :code:`final_outputs, final_states` 包含了最终的输出和状态,这两者都是Tensor或Tensor的嵌套结构。:code:`final_outputs` 具有与 :code:`decoder.step()` 返回的 :code:`outputs` 相同的结构和数据类型, 且其中的每个tensor都是将所有解码步中与其对应的的输出进行堆叠的结果;如果 :code:`decoder` 实现了 :code:`finalize` 方法,这些tensor也可能会通过 :code:`decoder.finalize()` 进行修改。:code:`final_states` 是最后时间步的状态,和 :code:`decoder.initialize()` 返回的初始状态具有相同的结构,形状和数据类型。:code:`sequence_lengths` 是int64类型的tensor,和 :code:`decoder.initialize()` 返回的 :code:`finished` 具有相同的形状,其保存了所有解码序列实际长度。 返回类型:tuple diff --git a/doc/fluid/api_cn/layers_cn/dynamic_gru_cn.rst b/doc/fluid/api_cn/layers_cn/dynamic_gru_cn.rst index 5ef7a469dcd7ca8c881f06e45b2f36c021e736b8..995ac50067ac3dfbf2b0cbbcaaf8ba0a417f8c12 100644 --- a/doc/fluid/api_cn/layers_cn/dynamic_gru_cn.rst +++ b/doc/fluid/api_cn/layers_cn/dynamic_gru_cn.rst @@ -3,10 +3,13 @@ dynamic_gru ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.dynamic_gru(input, size, param_attr=None, bias_attr=None, is_reverse=False, gate_activation='sigmoid', candidate_activation='tanh', h_0=None, origin_mode=False) +:api_attr: 声明式编程模式(静态图) + + + **注意:该OP的输入只能是LoDTensor,如果您需要处理的输入是Tensor类型,请使用StaticRNN(fluid.layers.** :ref:`cn_api_fluid_layers_StaticRNN` **)。** diff --git a/doc/fluid/api_cn/layers_cn/dynamic_lstm_cn.rst b/doc/fluid/api_cn/layers_cn/dynamic_lstm_cn.rst index d5635229f2bbdc02c2831f0b4b7a6aed0142dc68..5c6bc406ba23f5d80dcf6586a9b32ead36019b72 100644 --- a/doc/fluid/api_cn/layers_cn/dynamic_lstm_cn.rst +++ b/doc/fluid/api_cn/layers_cn/dynamic_lstm_cn.rst @@ -3,10 +3,13 @@ dynamic_lstm ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.dynamic_lstm(input, size, h_0=None, c_0=None, param_attr=None, bias_attr=None, use_peepholes=True, is_reverse=False, gate_activation='sigmoid', cell_activation='tanh', candidate_activation='tanh', dtype='float32', name=None) +:api_attr: 声明式编程模式(静态图) + + + 该OP实现了 LSTM,即 Long-Short Term Memory(长短期记忆)运算 - `Hochreiter, S., & Schmidhuber, J. (1997) `_。 .. note:: diff --git a/doc/fluid/api_cn/layers_cn/dynamic_lstmp_cn.rst b/doc/fluid/api_cn/layers_cn/dynamic_lstmp_cn.rst index ce3a45f9fcb681b00a52af17ee2f84ca5b01918e..2306948c00f814e4cee1aa39e819646ab204a91e 100644 --- a/doc/fluid/api_cn/layers_cn/dynamic_lstmp_cn.rst +++ b/doc/fluid/api_cn/layers_cn/dynamic_lstmp_cn.rst @@ -2,10 +2,13 @@ dynamic_lstmp ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.dynamic_lstmp(input, size, proj_size, param_attr=None, bias_attr=None, use_peepholes=True, is_reverse=False, gate_activation='sigmoid', cell_activation='tanh', candidate_activation='tanh', proj_activation='tanh', dtype='float32', name=None, h_0=None, c_0=None, cell_clip=None, proj_clip=None) +:api_attr: 声明式编程模式(静态图) + + + .. note:: 在实现的时候为了提升效率,用户必须将输入先进行线性映射,将维度为 [T, hidden_size] 的输入映射为 [T, 4×hidden_size] 输入,然后再传给该OP。 diff --git a/doc/fluid/api_cn/layers_cn/edit_distance_cn.rst b/doc/fluid/api_cn/layers_cn/edit_distance_cn.rst index 0cf28491a2fa0b0f3bfc27e3d95cc80217c6e5de..5d35868e1336614c624f5565429537bb6698e146 100644 --- a/doc/fluid/api_cn/layers_cn/edit_distance_cn.rst +++ b/doc/fluid/api_cn/layers_cn/edit_distance_cn.rst @@ -6,6 +6,12 @@ edit_distance .. py:function:: paddle.fluid.layers.edit_distance(input,label,normalized=True,ignored_tokens=None, input_length=None, label_length=None) +:alias_main: paddle.nn.functional.edit_distance +:alias: paddle.nn.functional.edit_distance,paddle.nn.functional.loss.edit_distance +:old_api: paddle.fluid.layers.edit_distance + + + 该OP计算一批给定字符串及其参照字符串间的编辑距离。编辑距离也称Levenshtein距离,通过计算从一个字符串变成另一个字符串所需的最少操作步骤来衡量两个字符串的相异度。这里的操作包括插入、删除和替换。 比如给定假设字符串A=“kitten”和参照字符串B=“sitting”,从A变换成B编辑距离为3,至少需要两次替换和一次插入: diff --git a/doc/fluid/api_cn/layers_cn/elementwise_add_cn.rst b/doc/fluid/api_cn/layers_cn/elementwise_add_cn.rst index d1312347fc76bfb8054e7a477a69f6e891b9f563..7414eae16ca524e5388de7a09c2e104aa0174570 100644 --- a/doc/fluid/api_cn/layers_cn/elementwise_add_cn.rst +++ b/doc/fluid/api_cn/layers_cn/elementwise_add_cn.rst @@ -5,6 +5,12 @@ elementwise_add .. py:function:: paddle.fluid.layers.elementwise_add(x, y, axis=-1, act=None, name=None) +:alias_main: paddle.elementwise_add +:alias: paddle.elementwise_add,paddle.tensor.elementwise_add,paddle.tensor.math.elementwise_add +:old_api: paddle.fluid.layers.elementwise_add + + + 该OP是逐元素相加算子,输入 ``x`` 与输入 ``y`` 逐元素相加,并将各个位置的输出元素保存到返回结果中。 等式为: @@ -101,7 +107,7 @@ elementwise_add "y": np.random.randint(1, 5, size=[5]).astype('float32') } x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') - y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') + y = fluid.layers.data(name="y", shape=[5], dtype='float32') # z = x + y z = fluid.layers.elementwise_add(x, y, axis=3) place = fluid.CPUPlace() diff --git a/doc/fluid/api_cn/layers_cn/elementwise_div_cn.rst b/doc/fluid/api_cn/layers_cn/elementwise_div_cn.rst index 562fe7d021eb37a199bc897a3bfc74c2dc560930..d4d12f36b20d39247345933090f2c8b2215b14df 100644 --- a/doc/fluid/api_cn/layers_cn/elementwise_div_cn.rst +++ b/doc/fluid/api_cn/layers_cn/elementwise_div_cn.rst @@ -5,6 +5,12 @@ elementwise_div .. py:function:: paddle.fluid.layers.elementwise_div(x, y, axis=-1, act=None, name=None) +:alias_main: paddle.elementwise_div +:alias: paddle.elementwise_div,paddle.tensor.elementwise_div,paddle.tensor.math.elementwise_div +:old_api: paddle.fluid.layers.elementwise_div + + + 该OP是逐元素相除算子,输入 ``x`` 与输入 ``y`` 逐元素相除,并将各个位置的输出元素保存到返回结果中。 等式是: @@ -101,7 +107,7 @@ elementwise_div "y": np.random.randint(1, 5, size=[5]).astype('float32') } x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') - y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') + y = fluid.layers.data(name="y", shape=[5], dtype='float32') z = fluid.layers.elementwise_div(x, y, axis=3) # z = x / y place = fluid.CPUPlace() diff --git a/doc/fluid/api_cn/layers_cn/elementwise_floordiv_cn.rst b/doc/fluid/api_cn/layers_cn/elementwise_floordiv_cn.rst index 0b6b3b127e633fc2c8d74db6afa0e0e7467c0aec..bd19626b6dc05d5e7f59571960eb2366ce4dd389 100644 --- a/doc/fluid/api_cn/layers_cn/elementwise_floordiv_cn.rst +++ b/doc/fluid/api_cn/layers_cn/elementwise_floordiv_cn.rst @@ -5,6 +5,12 @@ elementwise_floordiv .. py:function:: paddle.fluid.layers.elementwise_floordiv(x, y, axis=-1, act=None, name=None) +:alias_main: paddle.elementwise_floordiv +:alias: paddle.elementwise_floordiv,paddle.tensor.elementwise_floordiv,paddle.tensor.math.elementwise_floordiv +:old_api: paddle.fluid.layers.elementwise_floordiv + + + 该OP是逐元素整除算子,输入 ``x`` 与输入 ``y`` 逐元素整除,并将各个位置的输出元素保存到返回结果中。 等式为: diff --git a/doc/fluid/api_cn/layers_cn/elementwise_max_cn.rst b/doc/fluid/api_cn/layers_cn/elementwise_max_cn.rst index b29f6552b739c9246681f3372e697cd6c4c86c76..b36097fbc71d17711142628ac985e35bdd415f00 100644 --- a/doc/fluid/api_cn/layers_cn/elementwise_max_cn.rst +++ b/doc/fluid/api_cn/layers_cn/elementwise_max_cn.rst @@ -4,6 +4,12 @@ elementwise_max ------------------------------- .. py:function:: paddle.fluid.layers.elementwise_max(x, y, axis=-1, act=None, name=None) + +:alias_main: paddle.elementwise_max +:alias: paddle.elementwise_max,paddle.tensor.elementwise_max,paddle.tensor.math.elementwise_max +:old_api: paddle.fluid.layers.elementwise_max + + 该OP逐元素对比输入的两个多维Tensor,并且把各个位置更大的元素保存到返回结果中。 等式是: diff --git a/doc/fluid/api_cn/layers_cn/elementwise_min_cn.rst b/doc/fluid/api_cn/layers_cn/elementwise_min_cn.rst index 646dcdc1b83ac74b0d1799bd26edfc392a3b3192..22669884e0343785de9263c9c5769ee24fce4bdf 100644 --- a/doc/fluid/api_cn/layers_cn/elementwise_min_cn.rst +++ b/doc/fluid/api_cn/layers_cn/elementwise_min_cn.rst @@ -4,6 +4,12 @@ elementwise_min ------------------------------- .. py:function:: paddle.fluid.layers.elementwise_min(x, y, axis=-1, act=None, name=None) + +:alias_main: paddle.elementwise_min +:alias: paddle.elementwise_min,paddle.tensor.elementwise_min,paddle.tensor.math.elementwise_min +:old_api: paddle.fluid.layers.elementwise_min + + 该OP逐元素对比输入的两个多维Tensor,并且把各个位置更小的元素保存到返回结果中。 等式是: @@ -60,7 +66,7 @@ elementwise_min x = fluid.layers.data(name="x", shape=[3], dtype='float32') y = fluid.layers.data(name="y", shape=[3], dtype='float32') - z = fluid.layers.elementwise_max(x, y) + z = fluid.layers.elementwise_min(x, y) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -84,7 +90,7 @@ elementwise_min x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') - z = fluid.layers.elementwise_max(x, y, axis=1) + z = fluid.layers.elementwise_min(x, y, axis=1) place = fluid.CPUPlace() exe = fluid.Executor(place) diff --git a/doc/fluid/api_cn/layers_cn/elementwise_mod_cn.rst b/doc/fluid/api_cn/layers_cn/elementwise_mod_cn.rst index c83975a358711cd851c0c4035dc72f2f3b4bd963..5e53e8379be388a068554986709826b6db7b0cc1 100644 --- a/doc/fluid/api_cn/layers_cn/elementwise_mod_cn.rst +++ b/doc/fluid/api_cn/layers_cn/elementwise_mod_cn.rst @@ -5,6 +5,12 @@ elementwise_mod .. py:function:: paddle.fluid.layers.elementwise_mod(x, y, axis=-1, act=None, name=None) +:alias_main: paddle.elementwise_mod +:alias: paddle.elementwise_mod,paddle.tensor.elementwise_mod,paddle.tensor.math.elementwise_mod +:old_api: paddle.fluid.layers.elementwise_mod + + + 该OP是逐元素取模算子,输入 ``x`` 与输入 ``y`` 逐元素取模,并将各个位置的输出元素保存到返回结果中。 等式为: diff --git a/doc/fluid/api_cn/layers_cn/elementwise_mul_cn.rst b/doc/fluid/api_cn/layers_cn/elementwise_mul_cn.rst deleted file mode 100644 index 4a0b56f9ef88c577947a5751f06e52cb43300bfb..0000000000000000000000000000000000000000 --- a/doc/fluid/api_cn/layers_cn/elementwise_mul_cn.rst +++ /dev/null @@ -1,117 +0,0 @@ -.. _cn_api_fluid_layers_elementwise_mul: - -elementwise_mul -------------------------------- - -.. py:function:: paddle.fluid.layers.elementwise_mul(x, y, axis=-1, act=None, name=None) - -该OP是逐元素相乘算子,输入 ``x`` 与输入 ``y`` 逐元素相乘,并将各个位置的输出元素保存到返回结果中。 - -等式是: - -.. math:: - Out = X \odot Y - -- :math:`X` :多维Tensor。 -- :math:`Y` :维度必须小于等于X维度的Tensor。 - -对于这个运算算子有2种情况: - 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。 - 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。 - -对于情况2: - 1. 用 :math:`Y` 匹配 :math:`X` 的形状(shape),其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。 - 2. 如果 ``axis`` 为-1(默认值),则 :math:`axis= rank(X)-rank(Y)` 。 - 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。 - -例如: - -.. code-block:: text - - shape(X) = (2, 3, 4, 5), shape(Y) = (,) - shape(X) = (2, 3, 4, 5), shape(Y) = (5,) - shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 - shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 - shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 - shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 - -参数: - - **x** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 - - **y** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 - - **axis** (int32,可选)- ``y`` 的维度对应到 ``x`` 维度上时的索引。默认值为 -1。 - - **act** (str,可选)- 激活函数名称,作用于输出上。默认值为None。详细请参考 :ref:`api_guide_activations` , 常见的激活函数有: ``relu`` ``tanh`` ``sigmoid`` 等。 - - **name** (str,可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 - - -返回: 维度与 ``x`` 相同的 ``Tensor`` 或 ``LoDTensor`` ,数据类型与 ``x`` 相同。 - -返回类型: Variable。 - -**代码示例 1** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - def gen_data(): - return { - "x": np.array([2, 3, 4]), - "y": np.array([1, 5, 2]) - } - x = fluid.layers.data(name="x", shape=[3], dtype='float32') - y = fluid.layers.data(name="y", shape=[3], dtype='float32') - z = fluid.layers.elementwise_mul(x, y) - # z = x * y - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - print(z_value) # [2., 15., 8.] - -**代码示例 2** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - def gen_data(): - return { - "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), - "y": np.random.randint(1, 5, size=[3, 4]).astype('float32') - } - x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') - y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') - z = fluid.layers.elementwise_mul(x, y, axis=1) - # z = x * y - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - print(z_value) # z.shape=[2,3,4,5] - -**代码示例 3** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - def gen_data(): - return { - "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), - "y": np.random.randint(1, 5, size=[5]).astype('float32') - } - x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') - y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') - z = fluid.layers.elementwise_mul(x, y, axis=3) - # z = x * y - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - print(z_value) # z.shape=[2,3,4,5] - - - - - - diff --git a/doc/fluid/api_cn/layers_cn/elementwise_pow_cn.rst b/doc/fluid/api_cn/layers_cn/elementwise_pow_cn.rst index cff5ed77cd0b17764e2a19ec629c31027c4c25c4..6f08b313a4a95d7ba76aef597fc07a43fcbbe884 100644 --- a/doc/fluid/api_cn/layers_cn/elementwise_pow_cn.rst +++ b/doc/fluid/api_cn/layers_cn/elementwise_pow_cn.rst @@ -4,6 +4,12 @@ elementwise_pow ------------------------------- .. py:function:: paddle.fluid.layers.elementwise_pow(x, y, axis=-1, act=None, name=None) + +:alias_main: paddle.elementwise_pow +:alias: paddle.elementwise_pow,paddle.tensor.elementwise_pow,paddle.tensor.math.elementwise_pow +:old_api: paddle.fluid.layers.elementwise_pow + + 该OP逐元素对输入Tensor进行幂操作。 等式是: diff --git a/doc/fluid/api_cn/layers_cn/elementwise_sub_cn.rst b/doc/fluid/api_cn/layers_cn/elementwise_sub_cn.rst index 97c546b05ff5343a8d45554a9bfadd05a99cad80..c5886ad2e0fa696aad8ae192ec8a0925aa6f1e6b 100644 --- a/doc/fluid/api_cn/layers_cn/elementwise_sub_cn.rst +++ b/doc/fluid/api_cn/layers_cn/elementwise_sub_cn.rst @@ -5,6 +5,12 @@ elementwise_sub .. py:function:: paddle.fluid.layers.elementwise_sub(x, y, axis=-1, act=None, name=None) +:alias_main: paddle.elementwise_sub +:alias: paddle.elementwise_sub,paddle.tensor.elementwise_sub,paddle.tensor.math.elementwise_sub +:old_api: paddle.fluid.layers.elementwise_sub + + + 该OP是逐元素相减算子,输入 ``x`` 与输入 ``y`` 逐元素相减,并将各个位置的输出元素保存到返回结果中。 等式是: @@ -101,7 +107,7 @@ elementwise_sub "y": np.random.randint(1, 5, size=[5]).astype('float32') } x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') - y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') + y = fluid.layers.data(name="y", shape=[5], dtype='float32') z = fluid.layers.elementwise_sub(x, y, axis=3) # z = x - y place = fluid.CPUPlace() diff --git a/doc/fluid/api_cn/layers_cn/elu_cn.rst b/doc/fluid/api_cn/layers_cn/elu_cn.rst index 3211148097ec928a3291c550f032bca18031eaf5..6d527ce9359d4b2561e7018b72e79839d158d8c2 100644 --- a/doc/fluid/api_cn/layers_cn/elu_cn.rst +++ b/doc/fluid/api_cn/layers_cn/elu_cn.rst @@ -5,6 +5,12 @@ elu .. py:function:: paddle.fluid.layers.elu(x, alpha=1.0, name=None) +:alias_main: paddle.nn.functional.elu +:alias: paddle.nn.functional.elu,paddle.nn.functional.activation.elu +:old_api: paddle.fluid.layers.elu + + + ELU激活层(ELU Activation Operator) 根据 https://arxiv.org/abs/1511.07289 对输入Tensor中每个元素应用以下计算。 diff --git a/doc/fluid/api_cn/layers_cn/embedding_cn.rst b/doc/fluid/api_cn/layers_cn/embedding_cn.rst index d494c7f657a6fc9ddb66f324753cb5eb4c7db529..b5ad3607114832a799d763d7d68b05a60d65a55d 100644 --- a/doc/fluid/api_cn/layers_cn/embedding_cn.rst +++ b/doc/fluid/api_cn/layers_cn/embedding_cn.rst @@ -3,10 +3,13 @@ embedding ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.embedding(input, size, is_sparse=False, is_distributed=False, padding_idx=None, param_attr=None, dtype='float32') +:api_attr: 声明式编程模式(静态图) + + + 嵌入层(Embedding Layer) **注意:此OP将在未来的版本中被移除!该OP要求输入Tensor shape的最后一维必须为1。推荐使用fluid.** :ref:`cn_api_fluid_embedding` 。 @@ -74,6 +77,8 @@ embedding .. code-block:: python import paddle.fluid as fluid + import numpy as np + data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1) # 示例 1 diff --git a/doc/fluid/api_cn/layers_cn/equal_cn.rst b/doc/fluid/api_cn/layers_cn/equal_cn.rst index 68643936258fc67173fb2a55111d128af965104f..9a66e76cedc7d3997fe8e6cbfefca91232f5734b 100644 --- a/doc/fluid/api_cn/layers_cn/equal_cn.rst +++ b/doc/fluid/api_cn/layers_cn/equal_cn.rst @@ -3,15 +3,16 @@ equal ------------------------------- -.. py:function:: paddle.fluid.layers.equal(x,y,cond=None) +.. py:function:: paddle.fluid.layers.equal(x, y, cond=None, name=None) + 该OP返回 :math:`x==y` 逐元素比较x和y是否相等,x和y的维度应该相同。 参数: - **x** (Variable) - 输入Tensor,支持的数据类型包括 float32, float64,int32, int64。 - **y** (Variable) - 输入Tensor,支持的数据类型包括 float32, float64, int32, int64。 - - **cond** (Variable,可选) - 逐元素比较的结果Tensor,可以是程序中已经创建的任何Variable。默认值为None,此时将创建新的Variable来保存输出结果。 - - **force_cpu** (bool,可选) – 是否强制将输出Tensor存储在CPU。默认值为None,表示将输出Tensor存储在CPU内存上;如果为False,则将输出Tensor存储在运行设备内存上。 + - **cond** (Variable,可选) – 如果为None,则创建一个Tensor来作为进行比较的输出结果,该Tensor的shape和数据类型和输入x一致;如果不为None,则将Tensor作为该OP的输出,数据类型和数据shape需要和输入x一致。默认值为None。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 返回:输出结果的Tensor,输出Tensor的shape和输入一致,Tensor数据类型为bool。 @@ -23,12 +24,16 @@ equal import paddle.fluid as fluid import numpy as np + out_cond =fluid.data(name="input1", shape=[2], dtype='bool') label = fluid.layers.assign(np.array([3, 3], dtype="int32")) limit = fluid.layers.assign(np.array([3, 2], dtype="int32")) - out0 = fluid.layers.equal(x=label,y=limit) #out1=[True, False] - out1 = fluid.layers.equal(x=label,y=limit, cond=out_cond) #out2=[True, False] out_cond=[True, False] - out2 = fluid.layers.equal(x=label,y=limit,force_cpu=False) #out3=[True, False] - out3 = label == limit # out3=[True, False] + label_cond = fluid.layers.assign(np.array([1, 2], dtype="int32")) + + out1 = fluid.layers.equal(x=label,y=limit) #out1=[True, False] + out2 = fluid.layers.equal(x=label_cond,y=limit, cond=out_cond) #out2=[False, True] out_cond=[False, True] + + + diff --git a/doc/fluid/api_cn/layers_cn/erf_cn.rst b/doc/fluid/api_cn/layers_cn/erf_cn.rst index 0c80d4fd54998abd2614a7d946b197c5dcef0e95..c30dc7175303dc3890049ad49cf5ee39505ece2c 100644 --- a/doc/fluid/api_cn/layers_cn/erf_cn.rst +++ b/doc/fluid/api_cn/layers_cn/erf_cn.rst @@ -5,6 +5,12 @@ erf .. py:function:: paddle.fluid.layers.erf(x) +:alias_main: paddle.erf +:alias: paddle.erf,paddle.tensor.erf,paddle.tensor.math.erf,paddle.nn.functional.erf,paddle.nn.functional.activation.erf +:old_api: paddle.fluid.layers.erf + + + 逐元素计算 Erf 激活函数。更多细节请参考 `Error function `_ 。 diff --git a/doc/fluid/api_cn/layers_cn/exp_cn.rst b/doc/fluid/api_cn/layers_cn/exp_cn.rst index 4959d916b95bbe336708baad69a1e8f6298d4ea9..33f053e947410c7be2111cfdb826fdf21295059a 100644 --- a/doc/fluid/api_cn/layers_cn/exp_cn.rst +++ b/doc/fluid/api_cn/layers_cn/exp_cn.rst @@ -5,6 +5,12 @@ exp .. py:function:: paddle.fluid.layers.exp(x, name=None) +:alias_main: paddle.exp +:alias: paddle.exp,paddle.tensor.exp,paddle.tensor.math.exp +:old_api: paddle.fluid.layers.exp + + + 对输入,逐元素进行以自然数e为底指数运算。 .. math:: diff --git a/doc/fluid/api_cn/layers_cn/expand_as_cn.rst b/doc/fluid/api_cn/layers_cn/expand_as_cn.rst index d207d284274053f454b05fa162cdda9d31bf4f99..3781c7c9343c5dc73c05f778814b9ca8b4b4bb50 100644 --- a/doc/fluid/api_cn/layers_cn/expand_as_cn.rst +++ b/doc/fluid/api_cn/layers_cn/expand_as_cn.rst @@ -5,6 +5,12 @@ expand_as .. py:function:: paddle.fluid.layers.expand_as(x, target_tensor, name=None) +:alias_main: paddle.expand_as +:alias: paddle.expand_as,paddle.tensor.expand_as,paddle.tensor.manipulation.expand_as +:old_api: paddle.fluid.layers.expand_as + + + 该OP会根据输入的variable ``target_tensor`` 对输入 ``x`` 的各维度进行广播。通过 ``target_tensor``的维度来为 ``x`` 的每个维度设置广播的次数,使得x 的维度与target_tensor的维度相同。 ``x`` 的秩应小于等于6。注意, ``target_tensor`` 的秩必须与 ``x`` 的秩相同。 注意:``target_tensor`` 对应的每一维必须能整除输入x中对应的维度,否则会报错。比如,target_tensor的维度为[2,6,2],x为[2,3,1],则整除后为[1,2,2],x广播后维度为[2,6,2]。如果target_tensor的维度为[2,5,2],第二维5不能整除x的第二维3,则会报错。 diff --git a/doc/fluid/api_cn/layers_cn/expand_cn.rst b/doc/fluid/api_cn/layers_cn/expand_cn.rst index 22ef8d21ff7b4b2692d6792ff081b895c96783ba..6bd61b1587e60420df20942a8da8c6382a1eef59 100644 --- a/doc/fluid/api_cn/layers_cn/expand_cn.rst +++ b/doc/fluid/api_cn/layers_cn/expand_cn.rst @@ -5,6 +5,12 @@ expand .. py:function:: paddle.fluid.layers.expand(x, expand_times, name=None) +:alias_main: paddle.expand +:alias: paddle.expand,paddle.tensor.expand,paddle.tensor.manipulation.expand +:old_api: paddle.fluid.layers.expand + + + 该OP会根据参数 ``expand_times`` 对输入 ``x`` 的各维度进行复制。通过参数 ``expand_times`` 来为 ``x`` 的每个维度设置复制次数。 ``x`` 的秩应小于等于6。注意, ``expand_times`` 的大小必须与 ``x`` 的秩相同。以下是一个用例: :: diff --git a/doc/fluid/api_cn/layers_cn/exponential_decay_cn.rst b/doc/fluid/api_cn/layers_cn/exponential_decay_cn.rst index 9753731322800bbe72211fb25152cf5167cc7478..edda7f819c0ba67b216aa4c4426e3ebfa0df6ee5 100644 --- a/doc/fluid/api_cn/layers_cn/exponential_decay_cn.rst +++ b/doc/fluid/api_cn/layers_cn/exponential_decay_cn.rst @@ -5,6 +5,12 @@ exponential_decay .. py:function:: paddle.fluid.layers.exponential_decay(learning_rate,decay_steps,decay_rate,staircase=False) +:alias_main: paddle.nn.functional.exponential_decay +:alias: paddle.nn.functional.exponential_decay,paddle.nn.functional.learning_rate.exponential_decay +:old_api: paddle.fluid.layers.exponential_decay + + + 在学习率上运用指数衰减。 训练模型时,在训练过程中降低学习率。每 ``decay_steps`` 步骤中以 ``decay_rate`` 衰减学习率。 diff --git a/doc/fluid/api_cn/layers_cn/eye_cn.rst b/doc/fluid/api_cn/layers_cn/eye_cn.rst index 3ed5702c446a02f192f5239abf91f99943498106..6044ab6e322580b82e1aa1e830b3c23554506c98 100644 --- a/doc/fluid/api_cn/layers_cn/eye_cn.rst +++ b/doc/fluid/api_cn/layers_cn/eye_cn.rst @@ -3,19 +3,20 @@ eye ------------------------------- -.. py:function:: paddle.fluid.layers.eye(num_rows, num_columns=None, batch_shape=None, dtype='float32') +.. py:function:: paddle.fluid.layers.eye(num_rows, num_columns=None, batch_shape=None, dtype='float32', name=None) -该OP用来构建单位矩阵,或一个批次的单位矩阵。 + +该OP用来构建二维Tensor,或一个批次的二维Tensor。 参数: - - **num_rows** (int) - 每一个批矩阵的行数,数据类型为非负int32。 - - **num_columns** (int) - 每一个批矩阵的列数,数据类型为非负int32。若为None,则默认等于num_rows。 - - **batch_shape** (list(int)) - 如若提供,则返回向量的主批次维度将为batch_shape。 - - **dtype** (string) - 返回张量的数据类型,可为int32,int64,float16,float32,float64。 + - **num_rows** (int) - 该批次二维Tensor的行数,数据类型为非负int32。 + - **num_columns** (int, 可选) - 该批次二维Tensor的列数,数据类型为非负int32。若为None,则默认等于num_rows。 + - **batch_shape** (list(int), 可选) - 如若提供,则返回Tensor的主批次维度将为batch_shape。 + - **dtype** (np.dtype|core.VarDesc.VarType|str,可选) - 返回Tensor的数据类型,可为int32,int64,float16,float32,float64,默认数据类型为float32。 + - **name** (str) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 -返回:shape为batch_shape + [num_rows, num_columns]的张量。 +返回: ``shape`` 为batch_shape + [num_rows, num_columns]的Tensor。 -返回类型:Variable(Tensor|LoDTensor)数据类型为int32,int64,float16,float32,float64的Tensor或者LoDTensor。 **代码示例**: diff --git a/doc/fluid/api_cn/layers_cn/fc_cn.rst b/doc/fluid/api_cn/layers_cn/fc_cn.rst index 47836281b609abd836280d6336fada0f51ec43bc..6613b2d8879ab55272add049cb8461999f52866c 100644 --- a/doc/fluid/api_cn/layers_cn/fc_cn.rst +++ b/doc/fluid/api_cn/layers_cn/fc_cn.rst @@ -3,10 +3,13 @@ fc ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.fc(input, size, num_flatten_dims=1, param_attr=None, bias_attr=None, act=None, name=None) +:api_attr: 声明式编程模式(静态图) + + + **全连接层** diff --git a/doc/fluid/api_cn/layers_cn/fill_constant_batch_size_like_cn.rst b/doc/fluid/api_cn/layers_cn/fill_constant_batch_size_like_cn.rst deleted file mode 100644 index f465c471411d30bbdd216dd38a4ca1ecf3b4b0c9..0000000000000000000000000000000000000000 --- a/doc/fluid/api_cn/layers_cn/fill_constant_batch_size_like_cn.rst +++ /dev/null @@ -1,31 +0,0 @@ -.. _cn_api_fluid_layers_fill_constant_batch_size_like: - -fill_constant_batch_size_like -------------------------------- - -.. py:function:: paddle.fluid.layers.fill_constant_batch_size_like(input,shape,dtype,value,input_dim_idx=0,output_dim_idx=0,force_cpu=False) - -该OP创建一个形状为shape并且数据类型为dtype的Tensor,同时用 ``value`` 中提供的常量初始化该Tensor。在输入为LoDTensor并且input_dim_idx为0的 -时候将输出output_dim_idx维度的大小设置为input输入的batch_size的值,创建的Tensor的stop_gradient属性默认为False。 - -参数: - - **input** (Variable)- 输入的Tensor或者LoDTensor,支持数据类型为 float32, float64, int32, int64,bool。 - - **shape** (list)- 创建Tensor的shape,最后创建的LoDTensor的shape可能会依据input发生变动。 - - **dtype** (np.dtype|core.VarDesc.VarType|str)- 创建Tensor的数据类型,支持数据类型为 float32, float64, int32, int64,bool。 - - **value** (float|int)- 用于初始化输出Tensor的常量数据的值。 - - **input_dim_idx** (int)- 当值为0并且输入为LoDTensor的时候,创建Tensor的output_dim_idx维度会设置为input的batch_size值,默认值为0。 - - **output_dim_idx** (int) -用于指定创建的Tensor哪个维度设置为输入batch_size的值,默认值为0。 - - **force_cpu** (bool)- 用于返回的Tensor是否创建在CPU上,默认值为False,若设为true,则数据在CPU上。 - -返回:创建的Tensor, 数据类型为dtype。 - -返回类型:(Variable) - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - like = fluid.layers.fill_constant(shape=[1,2], value=10, dtype='int64') #like=[[10, 10]] - data = fluid.layers.fill_constant_batch_size_like( - input=like, shape=[1], value=0, dtype='int64') #like=[[10, 10]] data=[0] \ No newline at end of file diff --git a/doc/fluid/api_cn/layers_cn/fill_constant_cn.rst b/doc/fluid/api_cn/layers_cn/fill_constant_cn.rst index 867c6420855e6915c35aba0eeda346c998fc8e5a..f7af206495c0640ef87b2806666fdf919015463e 100644 --- a/doc/fluid/api_cn/layers_cn/fill_constant_cn.rst +++ b/doc/fluid/api_cn/layers_cn/fill_constant_cn.rst @@ -5,12 +5,18 @@ fill_constant .. py:function:: paddle.fluid.layers.fill_constant(shape,dtype,value,force_cpu=False,out=None) +:alias_main: paddle.fill_constant +:alias: paddle.fill_constant,paddle.tensor.fill_constant,paddle.tensor.creation.fill_constant +:old_api: paddle.fluid.layers.fill_constant + + + 该OP创建一个形状为shape并且数据类型为dtype的Tensor,同时用 ``value`` 中提供的常量初始化该Tensor。 创建的Tensor的stop_gradient属性默认为True。 参数: - - **shape** (tuple|list)- 创建Tensor的形状。 + - **shape** (tuple|list|Variable)- 要创建的LoDTensor或者SelectedRows的形状。 数据类型为int32或int64。 如果shape是一个列表或元组,则其元素应该是形状为[1]的整数或Tensor。 如果shape是Variable,则它应该是一维Tensor。 - **dtype** (np.dtype|core.VarDesc.VarType|str)- 创建LoDTensor或者SelectedRows的数据类型,支持数据类型为float16, float32, float64, int32, int64。 - **value** (float|int)- 用于初始化输出LoDTensor或者SelectedRows的常量数据的值。 - **force_cpu** (bool)- 用于标志LoDTensor或者SelectedRows是否创建在CPU上,默认值为False,若设为true,则数据必须在CPU上。 @@ -21,6 +27,10 @@ fill_constant 返回类型:变量(Variable) +抛出异常: + - :code:`TypeError`: dtype必须是bool,float16,float32,float64,int32和int64之一,输出Tensor的数据类型必须与dtype相同。 + - :code:`TypeError`: 当 `shape` 的数据类型不是list、tuple、Variable。 + **代码示例**: .. code-block:: python @@ -29,3 +39,11 @@ fill_constant data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') #data1=[[0],[0]] data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1) #data1=[[5],[5]] data2=[[5],[5]] + + # attr shape is a list which contains Variable Tensor. + positive_2 = fluid.layers.fill_constant([1], "int32", 2) + data3 = fluid.layers.fill_constant(shape=[1, positive_2], dtype='float32', value=1.5) # data3=[1.5, 1.5] + + # attr shape is a Variable Tensor. + shape = fluid.layers.fill_constant([1,2], "int32", 2) # shape=[2,2] + data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]] diff --git a/doc/fluid/api_cn/layers_cn/filter_by_instag_cn.rst b/doc/fluid/api_cn/layers_cn/filter_by_instag_cn.rst index de172115d9cf33913a8c99296021a59c1be93ec1..212eef72dc7a6b3dc1506270feabc78fcf29c2f1 100644 --- a/doc/fluid/api_cn/layers_cn/filter_by_instag_cn.rst +++ b/doc/fluid/api_cn/layers_cn/filter_by_instag_cn.rst @@ -5,6 +5,12 @@ filter_by_instag .. py:function:: paddle.fluid.layers.filter_by_instag(ins, ins_tag, filter_tag, is_lod) +:alias_main: paddle.nn.functional.filter_by_instag +:alias: paddle.nn.functional.filter_by_instag,paddle.nn.functional.extension.filter_by_instag +:old_api: paddle.fluid.layers.filter_by_instag + + + 此函数通过instag来过滤ins batch,大量属于同样的tags的样本,我们可以指定我们想要的一些tags,属于这些tags的样本将会被保留在输出中,其余的将会移除。比如,一个batch有4个样本,每个样本都有自己的tag表。 Ins | Ins_Tag | @@ -26,6 +32,7 @@ Lod为[1,1,1,1],filter tags为[1],从上面的定义中,带有标签 - **ins_tag** (Variable) - 输入变量(LoDTensor),通常为1维列表,通过lod info来分割。 - **filter_tag** (Variable) - 输入变量(1D Tensor/List),通常为持有tags的列表。 - **is_lod** (Bool) – 指定样本是否为lod tensor的布尔值。 + - **out_val_if_empty** (Int64) - 如果batch内样本被全部过滤,输出会被指定成这个值。 返回:过滤之后的样本(LoDTensor)和 损失权重(Tensor)。 diff --git a/doc/fluid/api_cn/layers_cn/flatten_cn.rst b/doc/fluid/api_cn/layers_cn/flatten_cn.rst index 2f6e36f79b4311a6b846f9d459761949fda7c296..3e314f655a8cf5b597daeac8507732a9c571d130 100644 --- a/doc/fluid/api_cn/layers_cn/flatten_cn.rst +++ b/doc/fluid/api_cn/layers_cn/flatten_cn.rst @@ -5,6 +5,12 @@ flatten .. py:function:: paddle.fluid.layers.flatten(x, axis=1, name=None) +:alias_main: paddle.flatten +:alias: paddle.flatten,paddle.tensor.flatten,paddle.tensor.manipulation.flatten +:old_api: paddle.fluid.layers.flatten + + + flatten op将输入的多维Tensor展平成2-D Tensor矩阵 例如: diff --git a/doc/fluid/api_cn/layers_cn/floor_cn.rst b/doc/fluid/api_cn/layers_cn/floor_cn.rst index de905921a47b49a5cdd0d374b5303d55b392f34e..e1dc4edb72600bf63fd26555942c75d69678f7f4 100644 --- a/doc/fluid/api_cn/layers_cn/floor_cn.rst +++ b/doc/fluid/api_cn/layers_cn/floor_cn.rst @@ -5,6 +5,12 @@ floor .. py:function:: paddle.fluid.layers.floor(x, name=None) +:alias_main: paddle.floor +:alias: paddle.floor,paddle.tensor.floor,paddle.tensor.math.floor +:old_api: paddle.fluid.layers.floor + + + 向下取整函数。 .. math:: diff --git a/doc/fluid/api_cn/layers_cn/fsp_matrix_cn.rst b/doc/fluid/api_cn/layers_cn/fsp_matrix_cn.rst index 64b31e83040f91baa74797ab07457f2c205bee16..97937d7c8225b6be736eb2616a7a220ab35f4b7a 100644 --- a/doc/fluid/api_cn/layers_cn/fsp_matrix_cn.rst +++ b/doc/fluid/api_cn/layers_cn/fsp_matrix_cn.rst @@ -5,6 +5,12 @@ fsp_matrix .. py:function:: paddle.fluid.layers.fsp_matrix(x, y) +:alias_main: paddle.nn.functional.fsp_matrix +:alias: paddle.nn.functional.fsp_matrix,paddle.nn.functional.vision.fsp_matrix +:old_api: paddle.fluid.layers.fsp_matrix + + + **FSP matrix op** fsp_matrix op用于计算两个4-D Tensor特征图的求解过程(FSP)矩阵。假设特征图x的形状为 :math:`[x\_channel,h,w]` ,特征图y的形状为 :math:`[y\_channel,h,w]` ,fsp_matrix op分两步得到x和y的fsp矩阵: diff --git a/doc/fluid/api_cn/layers_cn/gather_cn.rst b/doc/fluid/api_cn/layers_cn/gather_cn.rst index 5418839fc02bf8f624e1550c56cbc2f11c58608e..9d0caee7b263b87387fcffe9fc24861b8e3950f3 100644 --- a/doc/fluid/api_cn/layers_cn/gather_cn.rst +++ b/doc/fluid/api_cn/layers_cn/gather_cn.rst @@ -5,7 +5,10 @@ gather .. py:function:: paddle.fluid.layers.gather(input, index, overwrite=True) -根据索引 ``index`` 获取输入(input)的最外层维度的条目,并将它们拼接在一起。 + + + +根据索引 ``index`` 获取输入 ``input`` 的最外层维度的条目,并将它们拼接在一起。 .. math:: @@ -26,13 +29,12 @@ gather 参数: - - **input** (Variable) - 输入, 秩 ``rank >= 1`` , 支持的数据类型包括 int32、int64、float32、float64 和 uint8 (CPU)、float16(GPU) 。 - - **index** (Variable) - 索引,秩 ``rank = 1``, 数据类型为 int32 或 int64。 + - **input** (Tensor) - 输入, 秩 ``rank >= 1`` , 支持的数据类型包括 int32、int64、float32、float64 和 uint8 (CPU)、float16(GPU) 。 + - **index** (Tensor) - 索引,秩 ``rank = 1``, 数据类型为 int32 或 int64。 - **overwrite** (bool) - 具有相同索引时在反向更新梯度的模式。如果为 ``True`` ,则使用覆盖模式更新相同索引的梯度;如果为 ``False`` ,则使用累积模式更新相同索引的梯度。默认值为 ``True`` 。 返回:和输入的秩相同的输出张量。 -返回类型:Variable **代码示例** diff --git a/doc/fluid/api_cn/layers_cn/gather_nd_cn.rst b/doc/fluid/api_cn/layers_cn/gather_nd_cn.rst index d43cbf3cf152e01542f305f30ee0cae1ba16a99f..8a570ff2e2840215d6d84712e244c192a2344d22 100644 --- a/doc/fluid/api_cn/layers_cn/gather_nd_cn.rst +++ b/doc/fluid/api_cn/layers_cn/gather_nd_cn.rst @@ -5,6 +5,7 @@ gather_nd .. py:function:: paddle.fluid.layers.gather_nd(input, index, name=None) + 该OP是 :code:`gather` 的高维推广,并且支持多轴同时索引。 :code:`index` 是一个K维度的张量,它可以认为是从 :code:`input` 中取K-1维张量,每一个元素是一个切片: .. math:: @@ -50,18 +51,18 @@ gather_nd 参数: - - **input** (Variable) - 输入张量,数据类型可以是int32,int64,float32,float64, bool。 - - **index** (Variable) - 输入的索引张量,数据类型为非负int32或非负int64。它的维度 :code:`index.rank` 必须大于1,并且 :code:`index.shape[-1] <= input.rank` 。 - - **name** (string) - 该层的名字,默认值为None,表示会自动命名。 + - **input** (Tensor) - 输入Tensor,数据类型可以是int32,int64,float32,float64, bool。 + - **index** (Tensor) - 输入的索引Tensor,其数据类型为int32或者int64。它的维度 :code:`index.rank` 必须大于1,并且 :code:`index.shape[-1] <= input.rank` 。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 返回:shape为index.shape[:-1] + input.shape[index.shape[-1]:]的Tensor|LoDTensor,数据类型与 :code:`input` 一致。 -返回类型:Variable **代码示例**: .. code-block:: python + import paddle import paddle.fluid as fluid x = fluid.layers.data(name='x', shape=[3, 4, 5], dtype='float32') index = fluid.layers.data(name='index', shape=[2, 2], dtype='int32') diff --git a/doc/fluid/api_cn/layers_cn/gather_tree_cn.rst b/doc/fluid/api_cn/layers_cn/gather_tree_cn.rst index af89db7ddad043cfcac2336bd9b4e51a3eb9abd3..0d8354364fcf965fa0e8a1a81d37a188bb8f0f4b 100644 --- a/doc/fluid/api_cn/layers_cn/gather_tree_cn.rst +++ b/doc/fluid/api_cn/layers_cn/gather_tree_cn.rst @@ -5,6 +5,12 @@ gather_tree .. py:function:: paddle.fluid.layers.gather_tree(ids, parents) +:alias_main: paddle.nn.gather_tree +:alias: paddle.nn.gather_tree,paddle.nn.decode.gather_tree +:old_api: paddle.fluid.layers.gather_tree + + + 该OP在整个束搜索(Beam Search)结束后使用。在搜索结束后,可以获得每个时间步选择的的候选词id及其对应的在搜索树中的parent节点, ``ids`` 和 ``parents`` 的形状布局均为 :math:`[max\_time, batch\_size, beam\_size]` ,该OP从最后一个时间步回溯产生完整的id序列。 diff --git a/doc/fluid/api_cn/layers_cn/gaussian_random_batch_size_like_cn.rst b/doc/fluid/api_cn/layers_cn/gaussian_random_batch_size_like_cn.rst deleted file mode 100644 index bc223787946cf7a111ab2ccf1b3c66c7a4dd7cdc..0000000000000000000000000000000000000000 --- a/doc/fluid/api_cn/layers_cn/gaussian_random_batch_size_like_cn.rst +++ /dev/null @@ -1,40 +0,0 @@ -.. _cn_api_fluid_layers_gaussian_random_batch_size_like: - -gaussian_random_batch_size_like -------------------------------- - -.. py:function:: paddle.fluid.layers.gaussian_random_batch_size_like(input, shape, input_dim_idx=0, output_dim_idx=0, mean=0.0, std=1.0, seed=0, dtype='float32') - -使用高斯随机发生器初始化张量。高斯分布的默认均值(mean)为0,默认标准差(std)为 1 。用户可以通过输入参数设置 mean 和 std 。 - -参数: - - **input** (Variable)- 其 input_dim_idx'th 维度指定 batch_size 的张量(Tensor)。 - - **shape** (tuple|list)- 输出的形状。 - - **input_dim_idx** (Int)- (默认值0)输入批量大小维度的索引。 - - **output_dim_idx** (Int)- (默认值0)输出批量大小维度的索引。 - - **mean** (float)- (默认值 0.0)高斯分布的平均值(或中心值)。 - - **std** (float)- (默认值 1.0)高斯分布的标准差(std或spread)。 - - **seed** (int)- (默认值为 0)用于随机数发生器的随机种子。0表示使用系统生成的种子。请注意,如果seed不为0,则此算子每次将始终生成相同的随机数。 - - **dtype** (np.dtype | core.VarDesc.VarType | str)- 输出数据的类型,float32、float_16、int 等。 - -返回:指定形状的张量,由从高斯分布抽样产生的随机数所填充。 - -返回类型:Variable - - - -**代码示例:** - -.. code-block:: python - - import paddle.fluid as fluid - input = fluid.layers.data(name="input", shape=[13, 11], dtype='float32') - - out = fluid.layers.gaussian_random_batch_size_like( - input, shape=[-1, 11], mean=1.0, std=2.0) - - - - - - diff --git a/doc/fluid/api_cn/layers_cn/gaussian_random_cn.rst b/doc/fluid/api_cn/layers_cn/gaussian_random_cn.rst index 4ab6da671cfcb437ca0a2dc7ac13963dac70e9e5..059f19be02e3982a43bcec9a3ccbcd25e9bda5fd 100644 --- a/doc/fluid/api_cn/layers_cn/gaussian_random_cn.rst +++ b/doc/fluid/api_cn/layers_cn/gaussian_random_cn.rst @@ -3,27 +3,29 @@ gaussian_random ------------------------------- -.. py:function:: paddle.fluid.layers.gaussian_random(shape, mean=0.0, std=1.0, seed=0, dtype='float32') +.. py:function:: paddle.fluid.layers.gaussian_random(shape, mean=0.0, std=1.0, seed=0, dtype='float32', name=None) -生成数据符合高斯随机分布的 Tensor。 -参数: - - **shape** (Tuple[int] | List[int])- 生成 Tensor 的形状。 - - **mean** (float)- 随机 Tensor 的均值,默认值为 0.0。 - - **std** (float)- 随机 Tensor 的标准差,默认值为 1.0。 - - **seed** (int)- 随机数种子,默认值为 0。注:seed 设置为 0 表示使用系统的随机数种子。注意如果 seed 不为 0,则此算子每次将始终生成相同的随机数。 - - **dtype** (np.dtype | core.VarDesc.VarType | str)- 输出 Tensor 的数据类型,可选值为 float32,float64。 -返回: - - 符合高斯分布的随机 Tensor。形状为 shape,数据类型为 dtype。 +该OP返回数值符合高斯随机分布的Tensor,形状为 ``shape``,数据类型为 ``dtype``。 -返回类型: +参数: + - **shape** (list|tuple|Tensor) - 生成的随机Tensor的形状。如果 ``shape`` 是list、tuple,则其中的元素可以是int,或者是形状为[1]且数据类型为int32、int64的Tensor。如果 ``shape`` 是Tensor,则是数据类型为int32、int64的1-D Tensor。 + - **mean** (float|int, 可选) - 输出Tensor的均值,支持的数据类型:float、int。默认值为0.0。 + - **std** (float|int, 可选) - 输出Tensor的标准差,支持的数据类型:float、int。默认值为1.0。 + - **seed** (int, 可选) - 随机数种子,默认值为 0。注:seed 设置为 0 表示使用系统的随机数种子。注意如果 seed 不为 0,则此算子每次将始终生成相同的随机数。 + - **dtype** (str|np.dtype|core.VarDesc.VarType, 可选) - 输出Tensor的数据类型,支持float32、float64。默认值为float32。 + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 - - Variable +返回: + Tensor:符合高斯随机分布的Tensor,形状为 ``shape``,数据类型为 ``dtype``。 +抛出异常: + - ``TypeError`` - 如果 ``shape`` 的类型不是list、tuple、Tensor。 + - ``TypeError`` - 如果 ``dtype`` 不是float32、float64。 -**代码示例:** +**代码示例**: .. code-block:: python diff --git a/doc/fluid/api_cn/layers_cn/gelu_cn.rst b/doc/fluid/api_cn/layers_cn/gelu_cn.rst index b3a3d06be47156a2f80e2db288e5239040dc8dcd..c234e3b574b2ee089f6cc39b000f28590ad1630d 100644 --- a/doc/fluid/api_cn/layers_cn/gelu_cn.rst +++ b/doc/fluid/api_cn/layers_cn/gelu_cn.rst @@ -5,14 +5,27 @@ gelu .. py:function:: paddle.fluid.layers.gelu(x) +:alias_main: paddle.nn.functional.gelu +:alias: paddle.nn.functional.gelu,paddle.nn.functional.activation.gelu +:old_api: paddle.fluid.layers.gelu + + + 逐元素计算 Gelu激活函数。更多细节请参考 `Gaussian Error Linear Units `_ 。 +如果使用近似计算: + +.. math:: + out = 0.5 * x * (1 + tanh(\sqrt{\frac{2}{\pi}} * (x + 0.044715x^{3}))) + +如果不使用近似计算: .. math:: out = 0.5 * x * (1 + erf(\frac{x}{\sqrt{2}})) 参数: - **x** (Variable) - Gelu Op 的输入,多维 Tensor 或 LoDTensor,数据类型为 float32 或 float64。 + - **approximate** (bool, 可选) - 是否使用近似计算,默认值为 False。 返回: - 多维 Tensor 或 LoDTensor, 数据类型为 float32 或 float64, 和输入 x 的数据类型相同,形状和输入 x 相同。 diff --git a/doc/fluid/api_cn/layers_cn/generate_mask_labels_cn.rst b/doc/fluid/api_cn/layers_cn/generate_mask_labels_cn.rst index 5e28303903e7dc8a8c05fb2f109979bb8560785d..51854d3898e3f968a1f6704b7784edbcc6155755 100644 --- a/doc/fluid/api_cn/layers_cn/generate_mask_labels_cn.rst +++ b/doc/fluid/api_cn/layers_cn/generate_mask_labels_cn.rst @@ -5,6 +5,12 @@ generate_mask_labels .. py:function:: paddle.fluid.layers.generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois, labels_int32, num_classes, resolution) +:alias_main: paddle.nn.functional.generate_mask_labels +:alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels +:old_api: paddle.fluid.layers.generate_mask_labels + + + **为Mask-RCNN生成mask标签** 对于给定的 RoI (Regions of Interest) 和 输入ground truth的分类标签和分割的坐标标签,采样出前景RoI,并返回其在输入 ``rois`` 中索引位置,并对每个RoI生成 :math:`K*M^{2}` 的二值mask标签。K为类别个数,M是RoI特征图大小。这些输出目标一般用于计算mask分支的损失。 diff --git a/doc/fluid/api_cn/layers_cn/generate_proposal_labels_cn.rst b/doc/fluid/api_cn/layers_cn/generate_proposal_labels_cn.rst index 943638a28ab79b8ea65add5f1f5ac50fd1eca6b4..a957d20deaa3b9f05ce21436c49526c760e5094f 100644 --- a/doc/fluid/api_cn/layers_cn/generate_proposal_labels_cn.rst +++ b/doc/fluid/api_cn/layers_cn/generate_proposal_labels_cn.rst @@ -5,6 +5,12 @@ generate_proposal_labels .. py:function:: paddle.fluid.layers.generate_proposal_labels(rpn_rois, gt_classes, is_crowd, gt_boxes, im_info, batch_size_per_im=256, fg_fraction=0.25, fg_thresh=0.25, bg_thresh_hi=0.5, bg_thresh_lo=0.0, bbox_reg_weights=[0.1, 0.1, 0.2, 0.2], class_nums=None, use_random=True, is_cls_agnostic=False, is_cascade_rcnn=False) +:alias_main: paddle.nn.functional.generate_proposal_labels +:alias: paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels +:old_api: paddle.fluid.layers.generate_proposal_labels + + + **注意:该OP无对应的反向OP** 该OP根据RPN预测产出的bounding boxes和groundtruth,抽取出用来计算loss的foreground boxes and background boxes。 diff --git a/doc/fluid/api_cn/layers_cn/generate_proposals_cn.rst b/doc/fluid/api_cn/layers_cn/generate_proposals_cn.rst index 909d969f5e92cc27e54d78a4766ecb44548b9920..7bd143ec306bbeac65430bca235d6dfc7eb32e45 100644 --- a/doc/fluid/api_cn/layers_cn/generate_proposals_cn.rst +++ b/doc/fluid/api_cn/layers_cn/generate_proposals_cn.rst @@ -5,6 +5,12 @@ generate_proposals .. py:function:: paddle.fluid.layers.generate_proposals(scores, bbox_deltas, im_info, anchors, variances, pre_nms_top_n=6000, post_nms_top_n=1000, nms_thresh=0.5, min_size=0.1, eta=1.0, name=None) +:alias_main: paddle.nn.functional.generate_proposals +:alias: paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals +:old_api: paddle.fluid.layers.generate_proposals + + + 该OP根据每个检测框为foreground对象的概率,推选生成用于后续检测网络的RoIs。 其中的检测框根据 ``anchors`` 和 ``bbox_deltas`` 计算得到。 diff --git a/doc/fluid/api_cn/layers_cn/get_tensor_from_selected_rows_cn.rst b/doc/fluid/api_cn/layers_cn/get_tensor_from_selected_rows_cn.rst index 4a8f190d3528acebc7668de960be0e31a1ae2aa2..3a9b9268aa345959d50b69667659b77203777a7c 100644 --- a/doc/fluid/api_cn/layers_cn/get_tensor_from_selected_rows_cn.rst +++ b/doc/fluid/api_cn/layers_cn/get_tensor_from_selected_rows_cn.rst @@ -5,6 +5,9 @@ get_tensor_from_selected_rows .. py:function:: paddle.fluid.layers.get_tensor_from_selected_rows(x, name=None) + + + 该OP从SelectedRows类型的输入中获取向量数据,以LoDTensor的形式输出。 diff --git a/doc/fluid/api_cn/layers_cn/greater_equal_cn.rst b/doc/fluid/api_cn/layers_cn/greater_equal_cn.rst index 22f3503105fd6ebc5209855a62c684bf5b68c207..7141718419cc55b0fdb4546dcd16bc89c92a2e35 100644 --- a/doc/fluid/api_cn/layers_cn/greater_equal_cn.rst +++ b/doc/fluid/api_cn/layers_cn/greater_equal_cn.rst @@ -3,7 +3,13 @@ greater_equal ------------------------------- -.. py:function:: paddle.fluid.layers.greater_equal(x, y, cond=None) +.. py:function:: paddle.fluid.layers.greater_equal(x, y, cond=None, name=None) + +:alias_main: paddle.greater_equal +:alias: paddle.greater_equal,paddle.tensor.greater_equal,paddle.tensor.logic.greater_equal +:old_api: paddle.fluid.layers.greater_equal + + 该OP逐元素地返回 :math:`x >= y` 的逻辑值,使用重载算子 `>=` 可以有相同的计算函数效果。 @@ -12,7 +18,7 @@ greater_equal - **x** (Variable) – 进行比较的第一个输入,是一个多维的Tensor,数据类型可以是float32,float64,int32,int64。 - **y** (Variable) – 进行比较的第二个输入,是一个多维的Tensor,数据类型可以是float32,float64,int32,int64。 - **cond** (Variable,可选) – 如果为None,则创建一个Tensor来作为进行比较的输出结果,该Tensor的shape,数据类型和输入x一致;如果不为None,则将Tensor作为该OP的输出,数据shape和数据类型需要和输入x一致。默认值为None。 - - **force_cpu** (bool,可选) – 是否强制将输出Tensor存储在CPU。默认值为None,表示将输出Tensor存储在CPU内存上;如果为False,则将输出Tensor存储在运行设备内存上。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 返回:输出结果的Tensor,数据的shape和输入x一致。 @@ -25,13 +31,11 @@ greater_equal import paddle.fluid as fluid import paddle.fluid.layers as layers import numpy as np - label = fluid.layers.assign(np.array([2, 2], dtype='int32')) - limit = fluid.layers.assign(np.array([2, 3], dtype='int32')) - out_cond =fluid.data(name="input1", shape=[2], dtype='bool') + label = layers.assign(np.array([2, 2], dtype='int32')) + limit = layers.assign(np.array([2, 3], dtype='int32')) out = fluid.layers.greater_equal(x=label, y=limit) #out=[True, False] - out1 = fluid.layers.greater_equal(x=label, y=limit, cond=out_cond) #out1=[True, False], out_cond=[True, False] - out2 = fluid.layers.greater_equal(x=label, y=limit, force_cpu=False) #out2=[True, False] - out3 = label >= limit #out3=[True, False] + out_1 = label >= limit #out1=[True, False] + diff --git a/doc/fluid/api_cn/layers_cn/greater_than_cn.rst b/doc/fluid/api_cn/layers_cn/greater_than_cn.rst index bbb3fa55058b51393c9b501f8bf84bc952789d91..3f208e21ad5433125b3c22fa76ee06968a7c8153 100644 --- a/doc/fluid/api_cn/layers_cn/greater_than_cn.rst +++ b/doc/fluid/api_cn/layers_cn/greater_than_cn.rst @@ -3,7 +3,13 @@ greater_than ------------------------------- -.. py:function:: paddle.fluid.layers.greater_than(x, y, cond=None) +.. py:function:: paddle.fluid.layers.greater_than(x, y, cond=None, name=None) + +:alias_main: paddle.greater_than +:alias: paddle.greater_than,paddle.tensor.greater_than,paddle.tensor.logic.greater_than +:old_api: paddle.fluid.layers.greater_than + + 该OP逐元素地返回 :math:`x > y` 的逻辑值,使用重载算子 `>` 可以有相同的计算函数效果。 @@ -11,7 +17,7 @@ greater_than - **x** (Variable) – 进行比较的第一个输入,是一个多维的Tensor,数据类型可以是float32,float64,int32,int64。 - **y** (Variable) – 进行比较的第二个输入,是一个多维的Tensor,数据类型可以是float32,float64,int32,int64。 - **cond** (Variable,可选) – 如果为None,则创建一个Tensor来作为进行比较的输出结果,该Tensor的shape和数据类型和输入x一致;如果不为None,则将Tensor作为该OP的输出,数据类型和数据shape需要和输入x一致。默认值为None。 - - **force_cpu** (bool,可选) – 是否强制将输出Tensor存储在CPU。默认值为None,表示将输出Tensor存储在CPU内存上;如果为False,则将输出Tensor存储在运行设备内存上。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 返回:输出结果的Tensor,数据的shape和输入x一致。 @@ -24,13 +30,11 @@ greater_than import paddle.fluid as fluid import paddle.fluid.layers as layers import numpy as np - label = fluid.layers.assign(np.array([2, 3], dtype='int32')) - limit = fluid.layers.assign(np.array([3, 2], dtype='int32')) - out_cond =fluid.data(name="input1", shape=[2], dtype='bool') + label = layers.assign(np.array([2, 3], dtype='int32')) + limit = layers.assign(np.array([3, 2], dtype='int32')) out = fluid.layers.greater_than(x=label, y=limit) #out=[False, True] - out1 = fluid.layers.greater_than(x=label, y=limit, cond=out_cond) #out1=[False, True], out_cond=[False, True] - out2 = fluid.layers.greater_than(x=label, y=limit, force_cpu=False) #out2=[False, True] - out3 = label > limit #out3=[False, True] + out1 = label > limit #out1=[False, True] + diff --git a/doc/fluid/api_cn/layers_cn/grid_sampler_cn.rst b/doc/fluid/api_cn/layers_cn/grid_sampler_cn.rst index cab3d1e5c9cfbe32119e881cb020c87b6ccd0fbd..8871106520eea576edbddf0b606e8113cfd4e85f 100644 --- a/doc/fluid/api_cn/layers_cn/grid_sampler_cn.rst +++ b/doc/fluid/api_cn/layers_cn/grid_sampler_cn.rst @@ -5,6 +5,12 @@ grid_sampler .. py:function:: paddle.fluid.layers.grid_sampler(x, grid, name=None) +:alias_main: paddle.nn.functional.grid_sampler +:alias: paddle.nn.functional.grid_sampler,paddle.nn.functional.vision.grid_sampler +:old_api: paddle.fluid.layers.grid_sampler + + + 该OP基于flow field网格的对输入X进行双线性插值采样。网格通常由affine_grid生成, shape为[N, H, W, 2],是shape为[N, H, W]的采样点张量的(x, y)坐标。 其中,x坐标是对输入数据X的第四个维度(宽度维度)的索引,y坐标是第三维度(高维度)的索引,最终输出采样值为采样点的4个最接近的角点的双线性插值结果,输出张量的shape为[N, C, H, W]。 diff --git a/doc/fluid/api_cn/layers_cn/group_norm_cn.rst b/doc/fluid/api_cn/layers_cn/group_norm_cn.rst index 91b393788ab27255ae086ca3bc693b3f6ad47f2d..049a9e2f0055ca6ce3220c526f25f0abdd335c3d 100755 --- a/doc/fluid/api_cn/layers_cn/group_norm_cn.rst +++ b/doc/fluid/api_cn/layers_cn/group_norm_cn.rst @@ -3,10 +3,13 @@ group_norm ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.group_norm(input, groups, epsilon=1e-05, param_attr=None, bias_attr=None, act=None, data_layout='NCHW', name=None) +:api_attr: 声明式编程模式(静态图) + + + 参考论文: `Group Normalization `_ 参数: diff --git a/doc/fluid/api_cn/layers_cn/gru_unit_cn.rst b/doc/fluid/api_cn/layers_cn/gru_unit_cn.rst index d4a531b6b10055418ad985f9531fec1f9d5ef225..679e5dfe42bb6c4a3baad252af83d88c3ff737a7 100644 --- a/doc/fluid/api_cn/layers_cn/gru_unit_cn.rst +++ b/doc/fluid/api_cn/layers_cn/gru_unit_cn.rst @@ -3,10 +3,13 @@ gru_unit ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.gru_unit(input, hidden, size, param_attr=None, bias_attr=None, activation='tanh', gate_activation='sigmoid', origin_mode=False) +:api_attr: 声明式编程模式(静态图) + + + Gated Recurrent Unit(GRU)循环神经网络计算单元。该OP用于完成单个时间步内GRU的计算,支持以下两种计算方式: 如果origin_mode为True,则使用的运算公式来自论文 diff --git a/doc/fluid/api_cn/layers_cn/hard_shrink_cn.rst b/doc/fluid/api_cn/layers_cn/hard_shrink_cn.rst index 4756407f8128d1b32743eff55ba347b523e6e679..d139a6a8fe52833cfa1e682bdcc7940d613b2270 100644 --- a/doc/fluid/api_cn/layers_cn/hard_shrink_cn.rst +++ b/doc/fluid/api_cn/layers_cn/hard_shrink_cn.rst @@ -5,6 +5,12 @@ hard_shrink .. py:function:: paddle.fluid.layers.hard_shrink(x,threshold=None) +:alias_main: paddle.nn.functional.hard_shrink +:alias: paddle.nn.functional.hard_shrink,paddle.nn.functional.activation.hard_shrink +:old_api: paddle.fluid.layers.hard_shrink + + + HardShrink激活函数(HardShrink activation operator) diff --git a/doc/fluid/api_cn/layers_cn/hard_sigmoid_cn.rst b/doc/fluid/api_cn/layers_cn/hard_sigmoid_cn.rst index ef14f86f6c9eef9ce1f8f5e095fccd524313508f..f015a799c57376ee406cbac16d68d8a98b72dda4 100644 --- a/doc/fluid/api_cn/layers_cn/hard_sigmoid_cn.rst +++ b/doc/fluid/api_cn/layers_cn/hard_sigmoid_cn.rst @@ -5,6 +5,12 @@ hard_sigmoid .. py:function:: paddle.fluid.layers.hard_sigmoid(x, slope=0.2, offset=0.5, name=None) +:alias_main: paddle.nn.functional.hard_sigmoid +:alias: paddle.nn.functional.hard_sigmoid,paddle.nn.functional.activation.hard_sigmoid +:old_api: paddle.fluid.layers.hard_sigmoid + + + sigmoid的分段线性逼近激活函数,速度比sigmoid快,详细解释参见 https://arxiv.org/abs/1603.00391。 .. math:: diff --git a/doc/fluid/api_cn/layers_cn/hard_swish_cn.rst b/doc/fluid/api_cn/layers_cn/hard_swish_cn.rst index 0319c855872f0b537d2bf8f66aee912dfa41ce7e..8118ea27a2f2c6f1855e02cd8451a1de6d11151a 100644 --- a/doc/fluid/api_cn/layers_cn/hard_swish_cn.rst +++ b/doc/fluid/api_cn/layers_cn/hard_swish_cn.rst @@ -5,6 +5,12 @@ hard_swish .. py:function:: paddle.fluid.layers.hard_swish(x, threshold=6.0, scale=6.0, offset=3.0, name=None) +:alias_main: paddle.nn.functional.hard_swish +:alias: paddle.nn.functional.hard_swish,paddle.nn.functional.activation.hard_swish +:old_api: paddle.fluid.layers.hard_swish + + + 该OP实现了hard_swish激活函数。hard_swish激活函数在MobileNetV3架构中被提出,相较于swish函数,具有数值稳定性好,计算速度快等优点,具体原理请参考: https://arxiv.org/pdf/1905.02244.pdf :math:`out = \frac{x * (min(max(0, x+offset), threshold))}{scale}` diff --git a/doc/fluid/api_cn/layers_cn/has_inf_cn.rst b/doc/fluid/api_cn/layers_cn/has_inf_cn.rst index 5383e047d8582dd53525c5ef8602c45beac0fe02..8bf1cb7b8bfcdb7e1547f75566f0b641d745baf6 100644 --- a/doc/fluid/api_cn/layers_cn/has_inf_cn.rst +++ b/doc/fluid/api_cn/layers_cn/has_inf_cn.rst @@ -5,6 +5,12 @@ has_inf .. py:function:: paddle.fluid.layers.has_inf(x) +:alias_main: paddle.has_inf +:alias: paddle.has_inf,paddle.tensor.has_inf,paddle.tensor.search.has_inf +:old_api: paddle.fluid.layers.has_inf + + + 检查输入的变量(x)中是否包含无穷数(inf)。 参数: diff --git a/doc/fluid/api_cn/layers_cn/has_nan_cn.rst b/doc/fluid/api_cn/layers_cn/has_nan_cn.rst index c939c816edf2a830e495d5f0515c888c47ba7469..0f66985af75a733ba985155c6fd06255ce46b8cc 100644 --- a/doc/fluid/api_cn/layers_cn/has_nan_cn.rst +++ b/doc/fluid/api_cn/layers_cn/has_nan_cn.rst @@ -5,6 +5,12 @@ has_nan .. py:function:: paddle.fluid.layers.has_nan(x) +:alias_main: paddle.has_nan +:alias: paddle.has_nan,paddle.tensor.has_nan,paddle.tensor.search.has_nan +:old_api: paddle.fluid.layers.has_nan + + + 检查输入的变量(x)中是否包含NAN。 参数: diff --git a/doc/fluid/api_cn/layers_cn/hash_cn.rst b/doc/fluid/api_cn/layers_cn/hash_cn.rst index 8f6b1f147ba53888d99dc6153fa0be9ceca61602..a25828a10dc67b9697541d6e83234272ffbbb81a 100644 --- a/doc/fluid/api_cn/layers_cn/hash_cn.rst +++ b/doc/fluid/api_cn/layers_cn/hash_cn.rst @@ -5,6 +5,12 @@ hash .. py:function:: paddle.fluid.layers.hash(input, hash_size, num_hash=1, name=None) +:alias_main: paddle.nn.functional.hash +:alias: paddle.nn.functional.hash,paddle.nn.functional.lod.hash +:old_api: paddle.fluid.layers.hash + + + 该OP将输入 hash 成为一个整数,该数的值小于给定的 ``hash_size`` 。**仅支持输入为LoDTensor**。 该OP使用的哈希算法是:xxHash - `Extremely fast hash algorithm `_ @@ -30,8 +36,8 @@ hash place = fluid.core.CPUPlace() # 构建网络 - x = fluid.layers.data(name="x", shape=[1], dtype="int32", lod_level=1) - res = fluid.layers.hash(name="res",input=x, hash_size=1000, num_hash=4) + x = fluid.data(name="x", shape=[2, 2], dtype="int32", lod_level=1) + res = fluid.layers.hash(name="res", input=x, hash_size=1000, num_hash=4) # 创建CPU执行器 exe = fluid.Executor(place) @@ -39,9 +45,7 @@ hash in1 = np.array([[1,2],[3,4]]).astype("int32") print(in1) - x_i = fluid.core.LoDTensor() - x_i.set(in1,place) - x_i.set_recursive_sequence_lengths([[0,2]]) + x_i = fluid.create_lod_tensor(in1, [[0, 2]], place) res = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res], return_numpy=False) print(np.array(res[0])) # [[[722] diff --git a/doc/fluid/api_cn/layers_cn/hsigmoid_cn.rst b/doc/fluid/api_cn/layers_cn/hsigmoid_cn.rst index 2c3656e21c7600227c0046ee3483d05667bb862c..fa8a9704fe470bea9ceef5c53506ece75b80a44d 100644 --- a/doc/fluid/api_cn/layers_cn/hsigmoid_cn.rst +++ b/doc/fluid/api_cn/layers_cn/hsigmoid_cn.rst @@ -3,10 +3,13 @@ hsigmoid ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.hsigmoid(input, label, num_classes, param_attr=None, bias_attr=None, name=None, path_table=None, path_code=None, is_custom=False, is_sparse=False) +:api_attr: 声明式编程模式(静态图) + + + 层次sigmoid(hierarchical sigmoid),该OP通过构建一个分类二叉树来降低计算复杂度,主要用于加速语言模型的训练过程。 该OP建立的二叉树中每个叶节点表示一个类别(单词),每个非叶子节点代表一个二类别分类器(sigmoid)。对于每个类别(单词),都有一个从根节点到它的唯一路径,hsigmoid累加这条路径上每个非叶子节点的损失得到总损失。 diff --git a/doc/fluid/api_cn/layers_cn/huber_loss_cn.rst b/doc/fluid/api_cn/layers_cn/huber_loss_cn.rst index 62729ffd2114492329bfcbfa0a5bd2a83d769c92..753e38949351d8d6001d3e6df6799a8c27fa68bf 100644 --- a/doc/fluid/api_cn/layers_cn/huber_loss_cn.rst +++ b/doc/fluid/api_cn/layers_cn/huber_loss_cn.rst @@ -5,6 +5,12 @@ huber_loss .. py:function:: paddle.fluid.layers.huber_loss(input, label, delta) +:alias_main: paddle.nn.functional.huber_loss +:alias: paddle.nn.functional.huber_loss,paddle.nn.functional.loss.huber_loss +:old_api: paddle.fluid.layers.huber_loss + + + 该OP计算输入(input)与标签(label)之间的Huber损失。Huber损失是常用的回归损失之一,相较于平方误差损失,Huber损失减小了对异常点的敏感度,更具鲁棒性。 diff --git a/doc/fluid/api_cn/layers_cn/im2sequence_cn.rst b/doc/fluid/api_cn/layers_cn/im2sequence_cn.rst index e60f9f7d6c49087f4f38b70bedb6c7ca920f0d02..88ca3cead90ac752adb9b1326104c0db557c216d 100644 --- a/doc/fluid/api_cn/layers_cn/im2sequence_cn.rst +++ b/doc/fluid/api_cn/layers_cn/im2sequence_cn.rst @@ -3,10 +3,13 @@ im2sequence ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.im2sequence(input, filter_size=1, stride=1, padding=0, input_image_size=None, out_stride=1, name=None) +:api_attr: 声明式编程模式(静态图) + + + 该OP使用 `filter` 扫描输入的Tensor并将输入Tensor转换成序列,返回值的 `shape={input.batch_size * output_height * output_width, filter_size_height* filter_size_width * input.channels}` 。返回值的timestep的个数为 `output_height * output_width` , 每个timestep的维度是 `filter_size_height* filter_size_width * input.channels` 。其中 `output_height` 和 `output_width` 由以下式计算: diff --git a/doc/fluid/api_cn/layers_cn/image_resize_cn.rst b/doc/fluid/api_cn/layers_cn/image_resize_cn.rst index ed44ceae87b6c27189a2284e9182c9f10409ac48..e0331de812743e569839bb9aa831b4667f0ce6a3 100644 --- a/doc/fluid/api_cn/layers_cn/image_resize_cn.rst +++ b/doc/fluid/api_cn/layers_cn/image_resize_cn.rst @@ -5,6 +5,12 @@ image_resize .. py:function:: paddle.fluid.layers.image_resize(input, out_shape=None, scale=None, name=None, resample='BILINEAR', actual_shape=None, align_corners=True, align_mode=1, data_format='NCHW') +:alias_main: paddle.nn.functional.image_resize +:alias: paddle.nn.functional.image_resize,paddle.nn.functional.vision.image_resize +:old_api: paddle.fluid.layers.image_resize + + + **注意:** 参数 ``actual_shape`` 将被弃用,请使用 ``out_shape`` 替代。 该OP用于调整一个batch中图片的大小。 diff --git a/doc/fluid/api_cn/layers_cn/image_resize_short_cn.rst b/doc/fluid/api_cn/layers_cn/image_resize_short_cn.rst index 438261d30c8b6d537de37cc8a70812ac17c67285..93c8a9583d9329ef27606404b477dacc55bca21d 100644 --- a/doc/fluid/api_cn/layers_cn/image_resize_short_cn.rst +++ b/doc/fluid/api_cn/layers_cn/image_resize_short_cn.rst @@ -5,6 +5,12 @@ image_resize_short .. py:function:: paddle.fluid.layers.image_resize_short(input, out_short_len, resample='BILINEAR') +:alias_main: paddle.nn.functional.image_resize_short +:alias: paddle.nn.functional.image_resize_short,paddle.nn.functional.vision.image_resize_short +:old_api: paddle.fluid.layers.image_resize_short + + + 该OP用于调整一批图片的大小。输入图像的短边将被调整为给定的out_short_len 。输入图像的长边按比例调整大小,最终图像的长宽比保持不变。 参数: diff --git a/doc/fluid/api_cn/layers_cn/increment_cn.rst b/doc/fluid/api_cn/layers_cn/increment_cn.rst index 4a5e20b2151c943ec6261d8235244d3b9e6703bc..8edb5db4ab6fb7c7494c40b7bb9e4f2ebac5e04a 100644 --- a/doc/fluid/api_cn/layers_cn/increment_cn.rst +++ b/doc/fluid/api_cn/layers_cn/increment_cn.rst @@ -5,6 +5,12 @@ increment .. py:function:: paddle.fluid.layers.increment(x, value=1.0, in_place=True) +:alias_main: paddle.increment +:alias: paddle.increment,paddle.tensor.increment,paddle.tensor.math.increment +:old_api: paddle.fluid.layers.increment + + + 使输入Tensor ``x`` 的数据累加 ``value`` , 该OP通常用于循环次数的计数。 参数: diff --git a/doc/fluid/api_cn/layers_cn/inplace_abn_cn.rst b/doc/fluid/api_cn/layers_cn/inplace_abn_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..11077c5b78fe34fb0387a9d2dcb7bfd3f73c20b0 --- /dev/null +++ b/doc/fluid/api_cn/layers_cn/inplace_abn_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_fluid_layers_inplace_abn: + +inplace_abn +------------------------------- + +**注意:该API仅支持【静态图】模式** + +.. py:function:: paddle.fluid.layers.inplace_abn(input, act=None, is_test=False, momentum=0.9, epsilon=1e-05, param_attr=None, bias_attr=None, data_layout='NCHW', name=None, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=False, use_global_stats=False, act_alpha=1.0) + +就地批正则化化激活层(Inplace Activation Batch Normalization Layer) + +此层使用就地内存计算批处理正则化和激活来实现节省内存,有关批量正则化计算,请参见 ``fluid.layers.batch_norm`` ,有关就地激活批正则化化的计算,请参考 `In-Place Activated BatchNorm for Memory-Optimized Training of DNNs `_。 + +参数: + - **input** (Variable) - inplace_abn算子的输入特征,是一个Variable类型,输入维度可以是 2, 3, 4, 5。数据类型:flaot16, float32, float64。 + - **act** (string)- 激活函数类型,可以是leaky_realu、relu、prelu等。默认:None。 + - **is_test** (bool) - 指示它是否在测试阶段,非训练阶段使用训练过程中统计到的全局均值和全局方差。默认:False。 + - **momentum** (float|Variable)- 此值用于计算 moving_mean 和 moving_var,是一个float类型或者一个shape为[1],数据类型为float32的Variable类型。更新公式为: :math:`moving\_mean = moving\_mean * momentum + new\_mean * (1. - momentum)` , :math:`moving\_var = moving\_var * momentum + new\_var * (1. - momentum)` , 默认:0.9。 + - **epsilon** (float)- 加在分母上为了数值稳定的值。默认:1e-5。 + - **param_attr** (ParamAttr|None) :指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。inplace_abn算子默认的权重初始化是1.0。 + - **bias_attr** (ParamAttr|None)- 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。inplace_abn算子默认的偏置初始化是0.0。 + - **data_layout** (string) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **name** (str|None) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 + - **moving_mean_name** (string)- moving_mean的名称,存储全局均值。如果将其设置为None, ``inplace_abn`` 将随机命名全局均值;否则, ``inplace_abn`` 将命名全局均值为 ``moving_mean_name`` 。默认:None。 + - **moving_variance_name** (string)- moving_variance的名称,存储全局变量。如果将其设置为None, ``inplace_abn`` 将随机命名全局方差;否则, ``inplace_abn`` 将命名全局方差为 ``moving_variance_name`` 。默认:None。 + - **do_model_average_for_mean_and_var** (bool,默认False)- 是否为mean和variance做模型均值。 + - **use_global_stats** (bool) – 是否使用全局均值和方差。 在预测或测试模式下,将use_global_stats设置为true或将is_test设置为true,并且行为是等效的。 在训练模式中,当设置use_global_stats为True时,在训练期间也使用全局均值和方差。默认:False。 + - **act_alpha** (float) – 当 ``act`` 参数为None、leaky-relu、elu时,会使用就地批正则化激活算法,可通过此参数给定leaky-relu、elu的 ``alpha`` 值。默认:1.0。 + + +返回: 维度和输入相同的Tensor,在输入中运用批正则后的结果。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32') + hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w') + hidden2 = fluid.layers.inplace_abn(input=hidden1) + hidden3 = fluid.layers.inplace_abn(input=hidden2, act='leaky_relu', act_alpha=0.2) diff --git a/doc/fluid/api_cn/layers_cn/instance_norm_cn.rst b/doc/fluid/api_cn/layers_cn/instance_norm_cn.rst index d2e819c7a2f916259747633b822d5cfc68e28762..a10ff93900023ad41c2b7e6461764f060eef3293 100644 --- a/doc/fluid/api_cn/layers_cn/instance_norm_cn.rst +++ b/doc/fluid/api_cn/layers_cn/instance_norm_cn.rst @@ -3,10 +3,13 @@ instance_norm ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.instance_norm(input, epsilon=1e-05, param_attr=None, bias_attr=None, name=None) +:api_attr: 声明式编程模式(静态图) + + + 可用作卷积和全连接操作的实例正则化函数,根据每个样本的每个通道的均值和方差信息进行正则化。该层需要的数据格式如下: diff --git a/doc/fluid/api_cn/layers_cn/inverse_time_decay_cn.rst b/doc/fluid/api_cn/layers_cn/inverse_time_decay_cn.rst index 6f98b6de183a63904ce167cc56ba99ca0df01a3a..1b7abf3fab067769c21faf730179f8fc88e32785 100644 --- a/doc/fluid/api_cn/layers_cn/inverse_time_decay_cn.rst +++ b/doc/fluid/api_cn/layers_cn/inverse_time_decay_cn.rst @@ -5,6 +5,12 @@ inverse_time_decay .. py:function:: paddle.fluid.layers.inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False) +:alias_main: paddle.nn.functional.inverse_time_decay +:alias: paddle.nn.functional.inverse_time_decay,paddle.nn.functional.learning_rate.inverse_time_decay +:old_api: paddle.fluid.layers.inverse_time_decay + + + 在初始学习率上运用逆时衰减。 训练模型时,最好在训练过程中降低学习率。通过执行该函数,将对初始学习率运用逆时衰减函数。 diff --git a/doc/fluid/api_cn/layers_cn/iou_similarity_cn.rst b/doc/fluid/api_cn/layers_cn/iou_similarity_cn.rst index 4bad4777767080615baa1a51022124bfc6fff5f0..befe69f841dcf40fdf4a70e34a91d59673af978d 100644 --- a/doc/fluid/api_cn/layers_cn/iou_similarity_cn.rst +++ b/doc/fluid/api_cn/layers_cn/iou_similarity_cn.rst @@ -5,6 +5,12 @@ iou_similarity .. py:function:: paddle.fluid.layers.iou_similarity(x, y, box_normalized=True, name=None) +:alias_main: paddle.nn.functional.iou_similarity +:alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity +:old_api: paddle.fluid.layers.iou_similarity + + + **IOU Similarity Operator** 计算两个框列表的intersection-over-union(IOU)。框列表 :math:`X` 应为LoDTensor, :math:`Y` 是普通张量, :math:`X` 成批输入的所有实例共享 :math:`Y` 中的框。给定框A和框B,IOU的运算如下: diff --git a/doc/fluid/api_cn/layers_cn/is_empty_cn.rst b/doc/fluid/api_cn/layers_cn/is_empty_cn.rst index ca6e45c94ca1cf7a5d15db29756299ec89c8f102..90d4eb57a74fc3d0975f29c058236e4ed2c0f7ee 100644 --- a/doc/fluid/api_cn/layers_cn/is_empty_cn.rst +++ b/doc/fluid/api_cn/layers_cn/is_empty_cn.rst @@ -5,6 +5,12 @@ is_empty .. py:function:: paddle.fluid.layers.is_empty(x, cond=None) +:alias_main: paddle.is_empty +:alias: paddle.is_empty,paddle.tensor.is_empty,paddle.tensor.logic.is_empty +:old_api: paddle.fluid.layers.is_empty + + + 测试变量是否为空 参数: diff --git a/doc/fluid/api_cn/layers_cn/isfinite_cn.rst b/doc/fluid/api_cn/layers_cn/isfinite_cn.rst index 4b7c8672a85ec977913c08c551038c2f84ed5966..c9b5474e809b248141ad4632e5a5aedd1e11fec9 100644 --- a/doc/fluid/api_cn/layers_cn/isfinite_cn.rst +++ b/doc/fluid/api_cn/layers_cn/isfinite_cn.rst @@ -5,6 +5,12 @@ isfinite .. py:function:: paddle.fluid.layers.isfinite(x) +:alias_main: paddle.isfinite +:alias: paddle.isfinite,paddle.tensor.isfinite,paddle.tensor.logic.isfinite +:old_api: paddle.fluid.layers.isfinite + + + ``注意:此算子的输入 Tensor / LoDTensor 数据类型必须为 int32 / float / double 之一。`` 测试 x 是否包含无穷值(即 nan 或 inf)。若元素均为有穷数,返回真;否则返回假。 diff --git a/doc/fluid/api_cn/layers_cn/kldiv_loss_cn.rst b/doc/fluid/api_cn/layers_cn/kldiv_loss_cn.rst index 506199f3f85915ccdb25244b37db5a65a3b24581..8fc92afbd9748fe5499eb8e8aa0e9add97d24d8b 100644 --- a/doc/fluid/api_cn/layers_cn/kldiv_loss_cn.rst +++ b/doc/fluid/api_cn/layers_cn/kldiv_loss_cn.rst @@ -5,6 +5,12 @@ kldiv_loss .. py:function:: paddle.fluid.layers.kldiv_loss(x, target, reduction='mean', name=None) +:alias_main: paddle.nn.functional.kldiv_loss +:alias: paddle.nn.functional.kldiv_loss,paddle.nn.functional.loss.kldiv_loss +:old_api: paddle.fluid.layers.kldiv_loss + + + 该OP计算输入(X)和输入(Target)之间的Kullback-Leibler散度损失。注意其中输入(X)应为对数概率值,输入(Target)应为概率值。 kL发散损失计算如下: diff --git a/doc/fluid/api_cn/layers_cn/l2_normalize_cn.rst b/doc/fluid/api_cn/layers_cn/l2_normalize_cn.rst index f87458a79eef2d265c8296202ccf51818a1f1fca..f0e50f6af88923236a3ab8661d70ec10fe29ccc0 100644 --- a/doc/fluid/api_cn/layers_cn/l2_normalize_cn.rst +++ b/doc/fluid/api_cn/layers_cn/l2_normalize_cn.rst @@ -5,6 +5,12 @@ l2_normalize .. py:function:: paddle.fluid.layers.l2_normalize(x,axis,epsilon=1e-12,name=None) +:alias_main: paddle.nn.functional.l2_normalize +:alias: paddle.nn.functional.l2_normalize,paddle.nn.functional.norm.l2_normalize +:old_api: paddle.fluid.layers.l2_normalize + + + 该OP计算欧几里得距离之和对x进行归一化。对于1-D张量(系数矩阵的维度固定为0) 计算公式如下: diff --git a/doc/fluid/api_cn/layers_cn/label_smooth_cn.rst b/doc/fluid/api_cn/layers_cn/label_smooth_cn.rst index 488adba84e63c8595f682f1f84227b5cfd2ff608..62292e20d0bbe1245f67a0f58f57a5941f5a9911 100644 --- a/doc/fluid/api_cn/layers_cn/label_smooth_cn.rst +++ b/doc/fluid/api_cn/layers_cn/label_smooth_cn.rst @@ -5,6 +5,12 @@ label_smooth .. py:function:: paddle.fluid.layers.label_smooth(label, prior_dist=None, epsilon=0.1, dtype='float32', name=None) +:alias_main: paddle.nn.functional.label_smooth +:alias: paddle.nn.functional.label_smooth,paddle.nn.functional.common.label_smooth +:old_api: paddle.fluid.layers.label_smooth + + + 该OP实现了标签平滑的功能。标签平滑是一种对分类器层进行正则化的机制,称为标签平滑正则化(LSR)。由于直接优化正确标签的对数似然可能会导致过拟合,降低模型的适应能力,因此提出了标签平滑的方法来降低模型置信度。 标签平滑使用标签 :math:`y` 和一些固定模式随机分布变量 :math:`\mu` 。对 :math:`k` 标签,标签平滑的计算方式如下。 diff --git a/doc/fluid/api_cn/layers_cn/layer_norm_cn.rst b/doc/fluid/api_cn/layers_cn/layer_norm_cn.rst index 3d49372f85e9ac22a480b803fb920c9ddf12e6ee..477b6e6ba82ebca08e62080ee79c7cb788fecbc0 100644 --- a/doc/fluid/api_cn/layers_cn/layer_norm_cn.rst +++ b/doc/fluid/api_cn/layers_cn/layer_norm_cn.rst @@ -3,10 +3,13 @@ layer_norm ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.layer_norm(input, scale=True, shift=True, begin_norm_axis=1, epsilon=1e-05, param_attr=None, bias_attr=None, act=None, name=None) +:api_attr: 声明式编程模式(静态图) + + + 该OP实现了层归一化层(Layer Normalization Layer),其可以应用于小批量输入数据。更多详情请参考:`Layer Normalization `_ 计算公式如下 diff --git a/doc/fluid/api_cn/layers_cn/leaky_relu_cn.rst b/doc/fluid/api_cn/layers_cn/leaky_relu_cn.rst index 635decb48d980f93b448c0c638a5b44bf261ad43..736a39e0dfded848089f7511031addc5cf71dfe4 100644 --- a/doc/fluid/api_cn/layers_cn/leaky_relu_cn.rst +++ b/doc/fluid/api_cn/layers_cn/leaky_relu_cn.rst @@ -5,6 +5,12 @@ leaky_relu .. py:function:: paddle.fluid.layers.leaky_relu(x, alpha=0.02, name=None) +:alias_main: paddle.nn.functional.leaky_relu +:alias: paddle.nn.functional.leaky_relu,paddle.nn.functional.activation.leaky_relu +:old_api: paddle.fluid.layers.leaky_relu + + + LeakyRelu激活函数 .. math:: out=max(x,α∗x) diff --git a/doc/fluid/api_cn/layers_cn/less_equal_cn.rst b/doc/fluid/api_cn/layers_cn/less_equal_cn.rst index dae49dfbb8ff11a5d9336e4f0802972957b21e7e..da8b1b83343ec06b21738177555c79855efbdb2f 100644 --- a/doc/fluid/api_cn/layers_cn/less_equal_cn.rst +++ b/doc/fluid/api_cn/layers_cn/less_equal_cn.rst @@ -3,7 +3,13 @@ less_equal ------------------------------- -.. py:function:: paddle.fluid.layers.less_equal(x, y, cond=None) +.. py:function:: paddle.fluid.layers.less_equal(x, y, cond=None, name=None) + +:alias_main: paddle.less_equal +:alias: paddle.less_equal,paddle.tensor.less_equal,paddle.tensor.logic.less_equal +:old_api: paddle.fluid.layers.less_equal + + 该OP逐元素地返回 :math:`x <= y` 的逻辑值,使用重载算子 `<=` 可以有相同的计算函数效果。 @@ -11,7 +17,7 @@ less_equal - **x** (Variable) – 进行比较的第一个输入,是一个多维的Tensor,数据类型可以是float32,float64,int32,int64。 - **y** (Variable) – 进行比较的第二个输入,是一个多维的Tensor,数据类型可以是float32,float64,int32,int64。 - **cond** (Variable,可选) – 如果为None,则创建一个Tensor来作为进行比较的输出结果,该Tensor的shape和数据类型和输入x一致;如果不为None,则将Tensor作为该OP的输出,数据类型和数据shape需要和输入x一致。默认值为None。 - - **force_cpu** (bool,可选) – 是否强制将输出Tensor存储在CPU。默认值为None,表示将输出Tensor存储在CPU内存上;如果为False,则将输出Tensor存储在运行设备内存上。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 返回:输出结果的Tensor,数据的shape和输入x一致。 @@ -24,13 +30,11 @@ less_equal import paddle.fluid as fluid import paddle.fluid.layers as layers import numpy as np - label = fluid.layers.assign(np.array([1, 3], dtype='int32')) - limit = fluid.layers.assign(np.array([1, 2], dtype='int32')) - out_cond =fluid.data(name="input1", shape=[2], dtype='bool') + label = layers.assign(np.array([1, 3], dtype='int32')) + limit = layers.assign(np.array([1, 2], dtype='int32')) out = fluid.layers.less_equal(x=label, y=limit) #out=[True, False] - out1 = fluid.layers.less_equal(x=label, y=limit, cond=out_cond) #out1=[True, False], out_cond=[True, False] - out2 = fluid.layers.less_equal(x=label, y=limit, force_cpu=False) #out2=[True, False] - out3 = label<= limit #out3=[True, False] + out1 = label<= limit #out1=[True, False] + diff --git a/doc/fluid/api_cn/layers_cn/less_than_cn.rst b/doc/fluid/api_cn/layers_cn/less_than_cn.rst index 7d32d6d3f49d52a4bae11925035d1f34edd56014..6ad37577315a293779e5b1da1a9e449179c9b52f 100644 --- a/doc/fluid/api_cn/layers_cn/less_than_cn.rst +++ b/doc/fluid/api_cn/layers_cn/less_than_cn.rst @@ -3,7 +3,13 @@ less_than ------------------------------- -.. py:function:: paddle.fluid.layers.less_than(x, y, force_cpu=None, cond=None) +.. py:function:: paddle.fluid.layers.less_than(x, y, force_cpu=None, cond=None, name=None) + +:alias_main: paddle.less_than +:alias: paddle.less_than,paddle.tensor.less_than,paddle.tensor.logic.less_than +:old_api: paddle.fluid.layers.less_than + + 该OP逐元素地返回 :math:`x < y` 的逻辑值,使用重载算子 `<` 可以有相同的计算函数效果 @@ -14,6 +20,7 @@ less_than - **y** (Variable) - 进行比较的第二个输入,是一个多维的LoDTensor/Tensor,数据类型可以是float32,float64,int32,int64。 - **force_cpu** (bool) – 如果为True则强制将输出变量写入CPU内存中,否则将其写入目前所在的运算设备上。默认值为False。注意:该属性已弃用,其值始终是False。 - **cond** (Variable,可选) – 指定算子输出结果的LoDTensor/Tensor,可以是程序中已经创建的任何Variable。默认值为None,此时将创建新的Variable来保存输出结果。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 返回:输出结果的LoDTensor/Tensor,数据的shape和输入x一致。 diff --git a/doc/fluid/api_cn/layers_cn/linear_chain_crf_cn.rst b/doc/fluid/api_cn/layers_cn/linear_chain_crf_cn.rst index 90aebef0fb8a866a5ec1c69840b34723232e4db9..cf38de480033e68c8388e08990a991cf5792bfef 100755 --- a/doc/fluid/api_cn/layers_cn/linear_chain_crf_cn.rst +++ b/doc/fluid/api_cn/layers_cn/linear_chain_crf_cn.rst @@ -3,10 +3,13 @@ linear_chain_crf ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.linear_chain_crf(input, label, param_attr=None, length=None) +:api_attr: 声明式编程模式(静态图) + + + 线性链条件随机场(Linear Chain CRF) 条件随机场定义间接概率图,节点代表随机变量,边代表两个变量之间的依赖。CRF学习条件概率 :math:`P\left ( Y|X \right )` , :math:`X = \left ( x_{1},x_{2},...,x_{n} \right )` 是结构性输入,:math:`Y = \left ( y_{1},y_{2},...,y_{n} \right )` 为输入标签。 diff --git a/doc/fluid/api_cn/layers_cn/linear_lr_warmup_cn.rst b/doc/fluid/api_cn/layers_cn/linear_lr_warmup_cn.rst index 59c281d419ad313e08a909b0cd31944ad77407e5..c480c68d074b01191296b96e8ec8d506fbfa437f 100644 --- a/doc/fluid/api_cn/layers_cn/linear_lr_warmup_cn.rst +++ b/doc/fluid/api_cn/layers_cn/linear_lr_warmup_cn.rst @@ -5,6 +5,12 @@ linear_lr_warmup .. py:function:: paddle.fluid.layers.linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr) +:alias_main: paddle.nn.functional.linear_lr_warmup +:alias: paddle.nn.functional.linear_lr_warmup,paddle.nn.functional.learning_rate.linear_lr_warmup +:old_api: paddle.fluid.layers.linear_lr_warmup + + + 该OP使用学习率优化策略-线性学习率热身(warm up)对学习率进行初步调整。在正常调整学习率之前,先逐步增大学习率,具体原理可参考: `Bag of Tricks for Image Classification with Convolutional Neural Networks `_ diff --git a/doc/fluid/api_cn/layers_cn/linspace_cn.rst b/doc/fluid/api_cn/layers_cn/linspace_cn.rst index b1f7d2649919a4c00f0a3d891fa9a8b992c3004a..c301233b746178e17db08dacbf927d0b0b5a89f7 100644 --- a/doc/fluid/api_cn/layers_cn/linspace_cn.rst +++ b/doc/fluid/api_cn/layers_cn/linspace_cn.rst @@ -3,19 +3,20 @@ linspace ------------------------------- -.. py:function:: paddle.fluid.layers.linspace(start, stop, num, dtype) +.. py:function:: paddle.fluid.layers.linspace(start, stop, num, dtype=None, name=None) -该OP在给定区间内返回固定数目的均匀间隔的值。 +该OP返回一个Tensor,Tensor的值为在区间start和stop上均匀间隔的num个值,输出Tensor的长度为num。 +**注意:该OP不进行梯度计算** 参数: - - **start** (float|Variable) – start是区间开始的变量,可以是一个浮点标量,或是一个shape为[1]的Tensor,该Tensor的数据类型可以是float32或者是float64。 - - **stop** (float|Variable) – end是区间结束的变量,可以是一个浮点标量,或是一个shape为[1]的Tensor,该Tensor的数据类型可以是float32或者是float64。 - - **num** (int|Variable) – num是给定区间内需要划分的区间数,可以是一个整型标量,或是一个shape为[1]的Tensor,该Tensor的数据类型需为int32。 - - **dtype** (string) – 输出Tensor的数据类型,可以是‘float32’或者是‘float64’。 + - **start** (int|float|Tensor) – ``start`` 是区间开始的变量,可以是一个浮点标量,或是一个shape为[1]的Tensor,该Tensor的数据类型可以是float32,float64,int32 或者int64。 + - **stop** (int|float|Tensor) – ``stop`` 是区间结束的变量,可以是一个浮点标量,或是一个shape为[1]的Tensor,该Tensor的数据类型可以是float32,float64,int32或者int64。 + - **num** (int|Tensor) – ``num`` 是给定区间内需要划分的区间数,可以是一个整型标量,或是一个shape为[1]的Tensor,该Tensor的数据类型需为int32。 + - **dtype** (np.dtype|str, 可选) – 输出Tensor的数据类型,可以是float32,float64, int32或者int64。如果dtype的数据类型为None,输出Tensor数据类型为float32。 + - **name** (str, 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 返回:表示等间隔划分结果的1-D Tensor,该Tensor的shape大小为 :math:`[num]` ,在mum为1的情况下,仅返回包含start元素值的Tensor。 -返回类型:Variable **代码示例**: diff --git a/doc/fluid/api_cn/layers_cn/load_cn.rst b/doc/fluid/api_cn/layers_cn/load_cn.rst index f3e39758fd05e0824a225c7874bc870f34eaf93d..24760a5325296f9f0b568d2e7f1e4da6d2c35308 100644 --- a/doc/fluid/api_cn/layers_cn/load_cn.rst +++ b/doc/fluid/api_cn/layers_cn/load_cn.rst @@ -5,6 +5,9 @@ load .. py:function:: paddle.fluid.layers.load(out, file_path, load_as_fp16=None) + + + 该OP操作将从磁盘文件中加载LoDTensor/SelectedRows变量。 diff --git a/doc/fluid/api_cn/layers_cn/locality_aware_nms_cn.rst b/doc/fluid/api_cn/layers_cn/locality_aware_nms_cn.rst index 33a8d45038904f9c2ea9f5f090d7695ae2b53b76..f85aead2e4ef836512851fac9e34df172dea3550 100644 --- a/doc/fluid/api_cn/layers_cn/locality_aware_nms_cn.rst +++ b/doc/fluid/api_cn/layers_cn/locality_aware_nms_cn.rst @@ -5,6 +5,9 @@ locality_aware_nms .. py:function:: paddle.fluid.layers.locality_aware_nms(bboxes, scores, score_threshold, nms_top_k, keep_top_k, nms_threshold=0.3, normalized=True, nms_eta=1.0, background_label=-1, name=None) + + + **局部感知NMS** `局部感知NMS `_ 用于对边界框(bounding box)和评分(scores)执行局部感知非极大值抑制(LANMS)。 diff --git a/doc/fluid/api_cn/layers_cn/lod_append_cn.rst b/doc/fluid/api_cn/layers_cn/lod_append_cn.rst index ac96e2db264c64b5de324e7d72b11943fea1bac1..2c7f754233a198fdcdc39d4fcb453ad56c1a81d6 100644 --- a/doc/fluid/api_cn/layers_cn/lod_append_cn.rst +++ b/doc/fluid/api_cn/layers_cn/lod_append_cn.rst @@ -5,6 +5,9 @@ lod_append .. py:function:: paddle.fluid.layers.lod_append(x, level) + + + 给 ``x`` 的LoD添加 ``level`` 。 简单示例: diff --git a/doc/fluid/api_cn/layers_cn/lod_reset_cn.rst b/doc/fluid/api_cn/layers_cn/lod_reset_cn.rst index 20d9ea8b4fe46e4772e12aed7187eca00a138fdd..fbb090b26751d1a372911a7d35cf4425a2048e76 100644 --- a/doc/fluid/api_cn/layers_cn/lod_reset_cn.rst +++ b/doc/fluid/api_cn/layers_cn/lod_reset_cn.rst @@ -5,6 +5,9 @@ lod_reset .. py:function:: paddle.fluid.layers.lod_reset(x, y=None, target_lod=None) + + + 根据给定的参数 ``y`` 或 ``target_lod`` ,重设输入 ``x`` (LoDTensor) 的 LoD 信息。 参数: diff --git a/doc/fluid/api_cn/layers_cn/log_cn.rst b/doc/fluid/api_cn/layers_cn/log_cn.rst index 121761713770df69bf941ebf44c1521e9b4edc23..dc8f40a8081d7a7aa2fa6880761031c333edd88d 100644 --- a/doc/fluid/api_cn/layers_cn/log_cn.rst +++ b/doc/fluid/api_cn/layers_cn/log_cn.rst @@ -5,6 +5,12 @@ log .. py:function:: paddle.fluid.layers.log(x, name=None) +:alias_main: paddle.log +:alias: paddle.log,paddle.tensor.log,paddle.tensor.math.log +:old_api: paddle.fluid.layers.log + + + Log激活函数(计算自然对数) diff --git a/doc/fluid/api_cn/layers_cn/log_loss_cn.rst b/doc/fluid/api_cn/layers_cn/log_loss_cn.rst index 3134ba93f444bdcfd0e9b5f439860cb77b204642..aea52ab5f87ec3c61174a6516f10a650037007e5 100644 --- a/doc/fluid/api_cn/layers_cn/log_loss_cn.rst +++ b/doc/fluid/api_cn/layers_cn/log_loss_cn.rst @@ -5,6 +5,12 @@ log_loss .. py:function:: paddle.fluid.layers.log_loss(input, label, epsilon=0.0001, name=None) +:alias_main: paddle.nn.functional.log_loss +:alias: paddle.nn.functional.log_loss,paddle.nn.functional.loss.log_loss +:old_api: paddle.fluid.layers.log_loss + + + **负log loss层** 该 OP 对输入的预测结果和目标标签进行计算,返回负对数损失值。 diff --git a/doc/fluid/api_cn/layers_cn/logical_and_cn.rst b/doc/fluid/api_cn/layers_cn/logical_and_cn.rst index b5bdc229aeb90248c6de565941808e7c8ba20dd3..09e3efbcbcac121b94fc557a2ccee27357bfac80 100644 --- a/doc/fluid/api_cn/layers_cn/logical_and_cn.rst +++ b/doc/fluid/api_cn/layers_cn/logical_and_cn.rst @@ -3,46 +3,35 @@ logical_and ------------------------------- -.. py:function:: paddle.fluid.layers.logical_and(x, y, out=None, name=None) +.. py:function:: paddle.logical_and(x, y, out=None, name=None) -该OP逐元素的对 ``X`` 和 ``Y`` 两LoDTensor/Tensor进行逻辑与运算。 +该OP逐元素的对 ``x`` 和 ``y`` 进行逻辑与运算。 .. math:: Out = X \&\& Y -参数: - - **x** (Variable)- 逻辑与运算的第一个输入,是一个多维的LoDTensor/Tensor,数据类型只能是bool。 - - **y** (Variable)- 逻辑与运算的第二个输入,是一个多维的LoDTensor/Tensor,数据类型只能是bool。 - - **out** (Variable,可选)- 指定算子输出结果的LoDTensor/Tensor,可以是程序中已经创建的任何Variable。默认值为None,此时将创建新的Variable来保存输出结果。 - - **name** (str,可选)- 该参数供开发人员打印调试信息时使用,具体用法参见 :ref:`api_guide_Name` ,默认值为None。 - -返回:与 ``x`` 维度相同,数据类型相同的LoDTensor/Tensor。 +.. note:: + ``paddle.logical_and`` 遵守broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 -返回类型:Variable +参数: + - **x** (Tensor)- 输入的 `Tensor` ,数据类型为:bool。 + - **y** (Tensor)- 输入的 `Tensor` ,数据类型为:bool。 + - **out** (Tensor,可选)- 指定算子输出结果的 `Tensor` ,可以是程序中已经创建的任何Tensor。默认值为None,此时将创建新的Tensor来保存输出结果。 + - **name** (str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 +返回: ``Tensor`` , 维度``x`` 维度相同,存储运算后的结果。 **代码示例:** .. code-block:: python - import paddle.fluid as fluid - import numpy as np - - # Graph organizing - x = fluid.layers.data(name='x', shape=[2], dtype='bool') - y = fluid.layers.data(name='y', shape=[2], dtype='bool') - res = fluid.layers.logical_and(x=x, y=y) - # The comment lists another available method. - # res = fluid.layers.fill_constant(shape=[2], dtype='bool', value=0) - # fluid.layers.logical_and(x=x, y=y, out=res) - - # Create an executor using CPU as an example - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) - - # Execute - x_i = np.array([[1, 0], [0, 1]]).astype(np.bool) - y_i = np.array([[1, 1], [0, 0]]).astype(np.bool) - res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[res]) - print(res_val) # [[True, False], [False, False]] + import paddle + import numpy as np + paddle.disable_static() + x_data = np.array([True], dtype=np.bool) + y_data = np.array([True, False, True, False], dtype=np.bool) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + res = paddle.logical_and(x, y) + print(res.numpy()) # [True False True False] diff --git a/doc/fluid/api_cn/layers_cn/logical_not_cn.rst b/doc/fluid/api_cn/layers_cn/logical_not_cn.rst index 40747ab04482ad36a95e3ed15dfc81434a3108a5..3eaf0f1719abac0ce3e63ed2867026f349e76fba 100644 --- a/doc/fluid/api_cn/layers_cn/logical_not_cn.rst +++ b/doc/fluid/api_cn/layers_cn/logical_not_cn.rst @@ -3,19 +3,25 @@ logical_not ------------------------------- -.. py:function:: paddle.fluid.layers.logical_not(x, out=None, name=None) +.. py:function:: paddle.logical_not(x, out=None, name=None) -该OP逐元素的对 ``X`` LoDTensor/Tensor进行逻辑非运算 +:alias_main: paddle.logical_not +:alias: paddle.logical_not, paddle.tensor.logical_not, paddle.tensor.logic.logical_not +:old_api: paddle.fluid.layers.logical_not + + + +该OP逐元素的对 ``X`` Variable进行逻辑非运算 .. math:: Out = !X 参数: - - **x** (Variable)- 逻辑非运算的输入,是一个多维的LoDTensor/Tensor,数据类型只能是bool。 - - **out** (Variable,可选)- 指定算子输出结果的LoDTensor/Tensor,可以是程序中已经创建的任何Variable。默认值为None,此时将创建新的Variable来保存输出结果。 + - **x** (Variable)- 逻辑非运算的输入,是一个 Variable,数据类型只能是bool。 + - **out** (Variable,可选)- 指定算子输出结果的 Variable,可以是程序中已经创建的任何 Variable。默认值为None,此时将创建新的Variable来保存输出结果。 - **name** (str,可选)- 该参数供开发人员打印调试信息时使用,具体用法参见 :ref:`api_guide_Name` ,默认值为None。 -返回:与 ``x`` 维度相同,数据类型相同的LoDTensor/Tensor。 +返回:与 ``x`` 维度相同,数据类型相同的 Variable。 返回类型:Variable @@ -23,22 +29,11 @@ logical_not .. code-block:: python - import paddle.fluid as fluid + import paddle import numpy as np - # Graph organizing - x = fluid.layers.data(name='x', shape=[2], dtype='bool') - res = fluid.layers.logical_not(x) - # The comment lists another availble method. - # res = fluid.layers.fill_constant(shape=[2], dtype='bool', value=0) - # fluid.layers.logical_not(x, out=res) - - # Create an executor using CPU as an example - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) - - # Execute - x_i = np.array([[1, 0]]).astype(np.bool) - res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res]) - print(res_val) # [[False, True]] - + paddle.enable_imperative() + x_data = np.array([True, False, True, False], dtype=np.bool) + x = paddle.imperative.to_variable(x_data) + res = paddle.logical_not(x) + print(res.numpy()) # [False True False True] diff --git a/doc/fluid/api_cn/layers_cn/logical_or_cn.rst b/doc/fluid/api_cn/layers_cn/logical_or_cn.rst index dac0d971074c323f922db9de159fdd1fe7b0b166..9cb3420ea323948e47209be35dada5a85d2d51ea 100644 --- a/doc/fluid/api_cn/layers_cn/logical_or_cn.rst +++ b/doc/fluid/api_cn/layers_cn/logical_or_cn.rst @@ -3,46 +3,35 @@ logical_or ------------------------------- -.. py:function:: paddle.fluid.layers.logical_or(x, y, out=None, name=None) +.. py:function:: paddle.logical_or(x, y, out=None, name=None) -该OP逐元素的对 ``X`` 和 ``Y`` 两LoDTensor/Tensor进行逻辑或运算。 +该OP逐元素的对 ``X`` 和 ``Y`` 进行逻辑或运算。 .. math:: Out = X || Y -参数: - - **x** (Variable)- 逻辑或运算的第一个输入,是一个多维的LoDTensor/Tensor,数据类型只能是bool。 - - **y** (Variable)- 逻辑或运算的第二个输入,是一个多维的LoDTensor/Tensor,数据类型只能是bool。 - - **out** (Variable,可选)- 指定算子输出结果的LoDTensor/Tensor,可以是程序中已经创建的任何Variable。默认值为None,此时将创建新的Variable来保存输出结果。 - - **name** (str,可选)- 该参数供开发人员打印调试信息时使用,具体用法参见 :ref:`api_guide_Name` ,默认值为None。 - -返回:与 ``x`` 维度相同,数据类型相同的LoDTensor/Tensor。 +.. note:: + ``paddle.logical_or`` 遵守broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 -返回类型:Variable +参数: + - **x** (Tensor)- 输入的 `Tensor` ,数据类型为:bool。 + - **y** (Tensor)- 输入的 `Tensor` ,数据类型为:bool。 + - **out** (Tensor,可选)- 指定算子输出结果的 `Tensor` ,可以是程序中已经创建的任何Tensor。默认值为None,此时将创建新的Tensor来保存输出结果。 + - **name** (str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 +返回: ``Tensor`` , 维度``x`` 维度相同,存储运算后的结果。 **代码示例:** .. code-block:: python - import paddle.fluid as fluid - import numpy as np - - # Graph organizing - x = fluid.layers.data(name='x', shape=[2], dtype='bool') - y = fluid.layers.data(name='y', shape=[2], dtype='bool') - res = fluid.layers.logical_or(x=x, y=y) - # The comment lists another available method. - # res = fluid.layers.fill_constant(shape=[2], dtype='bool', value=0) - # fluid.layers.logical_or(x=x, y=y, out=res) - - # Create an executor using CPU as an example - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) - - # Execute - x_i = np.array([[1, 0], [0, 1]]).astype(np.bool) - y_i = np.array([[1, 1], [0, 0]]).astype(np.bool) - res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[res]) - print(res_val) # [[True, True], [False, True]] + import paddle + import numpy as np + paddle.disable_static() + x_data = np.array([True, False], dtype=np.bool).reshape(2, 1) + y_data = np.array([True, False, True, False], dtype=np.bool).reshape(2, 2) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + res = paddle.logical_or(x, y) + print(res.numpy()) # [[ True True] [ True False]] diff --git a/doc/fluid/api_cn/layers_cn/logical_xor_cn.rst b/doc/fluid/api_cn/layers_cn/logical_xor_cn.rst index 2482c32a5b1e4dad4d49b95f8665b9fbbf058128..502a5a60e55ea3384cc5a2b579118085bccb9e1f 100644 --- a/doc/fluid/api_cn/layers_cn/logical_xor_cn.rst +++ b/doc/fluid/api_cn/layers_cn/logical_xor_cn.rst @@ -3,47 +3,35 @@ logical_xor ------------------------------- -.. py:function:: paddle.fluid.layers.logical_xor(x, y, out=None, name=None) +.. py:function:: paddle.logical_xor(x, y, out=None, name=None) -该OP逐元素的对 ``X`` 和 ``Y`` 两LoDTensor/Tensor进行逻辑异或运算。 +该OP逐元素的对 ``X`` 和 ``Y`` 进行逻辑异或运算。 .. math:: Out = (X || Y) \&\& !(X \&\& Y) -参数: - - **x** (Variable)- 逻辑异或运算的第一个输入,是一个多维的LoDTensor/Tensor,数据类型只能是bool。 - - **y** (Variable)- 逻辑异或运算的第二个输入,是一个多维的LoDTensor/Tensor,数据类型只能是bool。 - - **out** (Variable,可选)- 指定算子输出结果的LoDTensor/Tensor,可以是程序中已经创建的任何Variable。默认值为None,此时将创建新的Variable来保存输出结果。 - - **name** (str,可选)- 该参数供开发人员打印调试信息时使用,具体用法参见 :ref:`api_guide_Name` ,默认值为None。 - +.. note:: + ``paddle.logical_xor`` 遵守broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 -返回:与 ``x`` 维度相同,数据类型相同的LoDTensor/Tensor。 - -返回类型:Variable +参数: + - **x** (Tensor)- 输入的 `Tensor` ,数据类型为:bool。 + - **y** (Tensor)- 输入的 `Tensor` ,数据类型为:bool。 + - **out** (Tensor,可选)- 指定算子输出结果的 `Tensor` ,可以是程序中已经创建的任何Tensor。默认值为None,此时将创建新的Tensor来保存输出结果。 + - **name** (str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 +返回: ``Tensor`` , 维度``x`` 维度相同,存储运算后的结果。 **代码示例:** .. code-block:: python - import paddle.fluid as fluid - import numpy as np - - # Graph organizing - x = fluid.layers.data(name='x', shape=[2], dtype='bool') - y = fluid.layers.data(name='y', shape=[2], dtype='bool') - res = fluid.layers.logical_xor(x=x, y=y) - # The comment lists another available method. - # res = fluid.layers.fill_constant(shape=[2], dtype='bool', value=0) - # fluid.layers.logical_xor(x=x, y=y, out=res) - - # Create an executor using CPU as an example - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) - - # Execute - x_i = np.array([[1, 0], [0, 1]]).astype(np.bool) - y_i = np.array([[1, 1], [0, 0]]).astype(np.bool) - res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[res]) - print(res_val) # [[False, True], [False, True]] + import paddle + import numpy as np + paddle.disable_static() + x_data = np.array([True, False], dtype=np.bool).reshape([2, 1]) + y_data = np.array([True, False, True, False], dtype=np.bool).reshape([2, 2]) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + res = paddle.logical_xor(x, y) + print(res.numpy()) # [[False, True], [ True, False]] diff --git a/doc/fluid/api_cn/layers_cn/lrn_cn.rst b/doc/fluid/api_cn/layers_cn/lrn_cn.rst index df97c8b979a98df4d1306839ea149ad1d9dbbe79..0465849f1c66e0bb8ce6bbdf3c987e45de01bffb 100644 --- a/doc/fluid/api_cn/layers_cn/lrn_cn.rst +++ b/doc/fluid/api_cn/layers_cn/lrn_cn.rst @@ -5,6 +5,12 @@ lrn .. py:function:: paddle.fluid.layers.lrn(input, n=5, k=1.0, alpha=0.0001, beta=0.75, name=None, data_format='NCHW') +:alias_main: paddle.nn.functional.lrn +:alias: paddle.nn.functional.lrn,paddle.nn.functional.norm.lrn +:old_api: paddle.fluid.layers.lrn + + + 该OP实现了局部响应正则化层(Local Response Normalization Layer),用于对局部输入区域正则化,执行一种侧向抑制(lateral inhibition)。更多详情参考: `ImageNet Classification with Deep Convolutional Neural Networks `_ diff --git a/doc/fluid/api_cn/layers_cn/lstm_cn.rst b/doc/fluid/api_cn/layers_cn/lstm_cn.rst index 206d8227ff84ee97ad1c6f914b4f71e69dca2e68..f42882efaf70c2827f07bc1bbd4f14f0e4710589 100644 --- a/doc/fluid/api_cn/layers_cn/lstm_cn.rst +++ b/doc/fluid/api_cn/layers_cn/lstm_cn.rst @@ -3,14 +3,17 @@ lstm ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.lstm(input, init_h, init_c, max_len, hidden_size, num_layers, dropout_prob=0.0, is_bidirec=False, is_test=False, name=None, default_initializer=None, seed=-1) +:api_attr: 声明式编程模式(静态图) + + + .. note:: 该OP仅支持 GPU 设备运行 -该OP实现了 LSTM,即 Long-Short Term Memory(长短期记忆)运算 - `Hochreiter, S., & Schmidhuber, J. (1997) `_。 +该OP实现了 LSTM,即 Long-Short Term Memory(长短期记忆)运算 - `Hochreiter, S., & Schmidhuber, J. (1997) `_。 该OP的实现不包括 diagonal/peephole 连接,参见 `Gers, F. A., & Schmidhuber, J. (2000) `_。 如果需要使用 peephole 连接方法,请使用 :ref:`cn_api_fluid_layers_dynamic_lstm` 。 @@ -57,7 +60,7 @@ lstm 返回: 经过lstm运算输出的三个Tensor的tuple,包括 -- rnn_out:LSTM hidden的输出结果的Tensor,数据类型与input一致,维度为 :math:`[seq\_len, batch\_size, hidden\_size]` 。如果 ``is_bidirec`` 设置为True,则维度为 :math:`[seq\_len, batch\_size, hidden\_size*2]` +- rnn_out:LSTM hidden的输出结果的Tensor,数据类型与input一致,维度为 :math:`[batch\_size, seq\_len, hidden\_size]` 。如果 ``is_bidirec`` 设置为True,则维度为 :math:`[batch\_size, seq\_len, hidden\_size*2]` - last_h:LSTM最后一步的hidden状态的Tensor,数据类型与input一致,维度为 :math:`[num\_layers, batch\_size, hidden\_size]` 。如果 ``is_bidirec`` 设置为True,则维度为 :math:`[num\_layers*2, batch\_size, hidden\_size]` - last_c:LSTM最后一步的cell状态的Tensor,数据类型与input一致,维度为 :math:`[num\_layers, batch\_size, hidden\_size]` 。如果 ``is_bidirec`` 设置为True,则维度为 :math:`[num\_layers*2, batch\_size, hidden\_size]` @@ -73,12 +76,11 @@ lstm emb_dim = 256 vocab_size = 10000 data = fluid.layers.data(name='x', shape=[-1, 100, 1], - dtype='int32') + dtype='int64') emb = fluid.layers.embedding(input=data, size=[vocab_size, emb_dim], is_sparse=True) batch_size = 20 max_len = 100 dropout_prob = 0.2 - seq_len = 100 hidden_size = 150 num_layers = 1 init_h = layers.fill_constant( [num_layers, batch_size, hidden_size], 'float32', 0.0 ) @@ -87,7 +89,7 @@ lstm rnn_out, last_h, last_c = layers.lstm(emb, init_h, init_c, max_len, hidden_size, num_layers, dropout_prob=dropout_prob) rnn_out.shape # (-1, 100, 150) last_h.shape # (1, 20, 150) - layt_c.shape # (1, 20, 150) + last_c.shape # (1, 20, 150) diff --git a/doc/fluid/api_cn/layers_cn/lstm_unit_cn.rst b/doc/fluid/api_cn/layers_cn/lstm_unit_cn.rst index e24a932b38afae0e9bb6e24cf0afa5ddd5afff22..7e33fb3b748456950cbc55ac3fef511965bd82cb 100644 --- a/doc/fluid/api_cn/layers_cn/lstm_unit_cn.rst +++ b/doc/fluid/api_cn/layers_cn/lstm_unit_cn.rst @@ -3,10 +3,13 @@ lstm_unit ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.lstm_unit(x_t, hidden_t_prev, cell_t_prev, forget_bias=0.0, param_attr=None, bias_attr=None, name=None) +:api_attr: 声明式编程模式(静态图) + + + Long-Short Term Memory(LSTM)循环神经网络计算单元。该OP用于完成单个时间步内LSTM的计算,基于论文 `RECURRENT NEURAL NETWORK REGULARIZATION `_ 中的描述实现, @@ -48,26 +51,18 @@ Long-Short Term Memory(LSTM)循环神经网络计算单元。该OP用于完 **代码示例**: .. code-block:: python - + import paddle.fluid as fluid - + dict_dim, emb_dim, hidden_dim = 128, 64, 512 - data = fluid.layers.data(name='step_data', shape=[1], dtype='int32') - x = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim]) - pre_hidden = fluid.layers.data(name='pre_hidden', shape=[hidden_dim], dtype='float32') - pre_cell = fluid.layers.data(name='pre_cell', shape=[hidden_dim], dtype='float32') + data = fluid.data(name='step_data', shape=[None], dtype='int64') + x = fluid.embedding(input=data, size=[dict_dim, emb_dim]) + pre_hidden = fluid.data( + name='pre_hidden', shape=[None, hidden_dim], dtype='float32') + pre_cell = fluid.data( + name='pre_cell', shape=[None, hidden_dim], dtype='float32') hidden = fluid.layers.lstm_unit( x_t=x, hidden_t_prev=pre_hidden, cell_t_prev=pre_cell) - - - - - - - - - - - + diff --git a/doc/fluid/api_cn/layers_cn/margin_rank_loss_cn.rst b/doc/fluid/api_cn/layers_cn/margin_rank_loss_cn.rst index 8837b7293818083480e875d86993c49d944d167b..0412f85fc6f91c9534d485ab7ac7584d74e80251 100644 --- a/doc/fluid/api_cn/layers_cn/margin_rank_loss_cn.rst +++ b/doc/fluid/api_cn/layers_cn/margin_rank_loss_cn.rst @@ -5,6 +5,12 @@ margin_rank_loss .. py:function:: paddle.fluid.layers.margin_rank_loss(label, left, right, margin=0.1, name=None) +:alias_main: paddle.nn.functional.margin_rank_loss +:alias: paddle.nn.functional.margin_rank_loss,paddle.nn.functional.loss.margin_rank_loss +:old_api: paddle.fluid.layers.margin_rank_loss + + + margin rank loss(间隔排序损失)层。在排序问题中,它可以比较来自排序网络的输入 ``left`` 和输入 ``right`` 的得分。 可用如下等式定义: diff --git a/doc/fluid/api_cn/layers_cn/matmul_cn.rst b/doc/fluid/api_cn/layers_cn/matmul_cn.rst index 27f71423f2e831034b4a3c23745b1e5132554294..8514a410c290fe9de6004751329d2439772bcd99 100644 --- a/doc/fluid/api_cn/layers_cn/matmul_cn.rst +++ b/doc/fluid/api_cn/layers_cn/matmul_cn.rst @@ -5,6 +5,9 @@ matmul .. py:function:: paddle.fluid.layers.matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None) + + + 输入 ``x`` 和输入 ``y`` 矩阵相乘。 两个输入的形状可为任意维度,但当任一输入维度大于3时,两个输入的维度必须相等。 diff --git a/doc/fluid/api_cn/layers_cn/matrix_nms_cn.rst b/doc/fluid/api_cn/layers_cn/matrix_nms_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f0c8af1e6cb09f7137d40bd4899395a3cb6e4e41 --- /dev/null +++ b/doc/fluid/api_cn/layers_cn/matrix_nms_cn.rst @@ -0,0 +1,59 @@ +.. _cn_api_fluid_layers_matrix_nms: + +matrix_nms +------------------------------- + + +.. py:function:: paddle.fluid.layers.matrix_nms(bboxes, scores, score_threshold, post_threshold, nms_top_k, keep_top_k, use_gaussian=False, gaussian_sigma=2., background_label=0, normalized=True, return_index=False, name=None) + +:alias_main: paddle.nn.functional.matrix_nms +:alias: paddle.nn.functional.matrix_nms,paddle.nn.functional.extension.matrix_nms +:old_api: paddle.fluid.layers.matrix_nms + + + +**Matrix NMS** + +该OP使用Matrix NMS算法对边界框(bounding box)和评分(scores)执行多类非极大值抑制(NMS)。 + +如果提供 ``score_threshold`` 阈值且 ``nms_top_k`` 大于-1,则选择置信度分数最大的k个框。 然后按照Matrix NMS算法对分数进行衰减。经过抑制后,如果 ``keep_top_k`` 大于-1, 则每张图片最终保留 ``keep_top_k`` 个检测框。 + +在NMS步骤后,如果keep_top_k大于-1,则每个图像最多保留keep_top_k个框(bounding box)。 + + +参数: + - **bboxes** (Variable) - 形为[N,M,4]的3-D张量,表示将预测M个边界框的预测位置, N是批大小(batch size)。当边界框(bounding box)大小等于4时,每个边界框有四个坐标值,布局为[xmin,ymin,xmax,ymax]。数据类型为float32或float64。 + - **scores** (Variable) – 形为[N,C,M]的3-D张量,表示预测的置信度。 N是批大小(batch size),C是种类数目,M是边界框bounding box的数量。对于每个类别,存在对应于M个边界框的总M个分数。请注意,M等于bboxes的第二维。数据类型为float32或float64。 + - **score_threshold** (float) – 过滤掉低置信度分数的边界框的阈值。 + - **post_threshold** (float) – 经过NMS衰减后,过滤掉低置信度分数的边界框的阈值。 + - **nms_top_k** (int) – 基于 score_threshold 的过滤检测后,根据置信度保留的最大检测次数。 + - **keep_top_k** (int) – 经过NMS抑制后, 最终保留的最大检测次数。如果设置为 -1 ,则则保留全部。 + - **use_gaussian** (bool) – 是否使用高斯函数衰减。默认值:False 。 + - **gaussian_sigma** (float) – 高斯函数的Sigma值,默认值:2.0 。 + - **background_label** (int) – 背景标签(类别)的索引,如果设置为 0 ,则忽略背景标签(类别)。如果设置为 -1 ,则考虑所有类别。默认值:0 + - **normalized** (bool) – 检测是否已经经过正则化。默认值:True 。 + - **return_index** (bool) – 是否同时返回保留检测框的序号。默认值:False 。 + - **name** (str|None) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 + +返回: + - **Out** (Variable) - 形为[No,6]的2-D LoDTensor,表示检测结果。每行有6个值:[标签label,置信度confidence,xmin,ymin,xmax,ymax]。或形为[No,10]的2-D LoDTensor,用来表示检测结果。 每行有10个值:[标签label,置信度confidence,x1,y1,x2,y2,x3,y3,x4,y4]。 No是检测的总数。 如果对所有图像都没有检测到的box,则lod将设置为{1},而Out仅包含一个值-1。 (1.3版本之后,当未检测到box时,lod从{0}更改为{1}) + - **Index** (Variable) - 形为[No,1]的2-D LoDTensor,表示检测结果在整个批次中的序号。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + boxes = fluid.data(name='bboxes', shape=[None,81, 4], + dtype='float32', lod_level=1) + scores = fluid.data(name='scores', shape=[None,81], + dtype='float32', lod_level=1) + out = fluid.layers.matrix_nms(bboxes=boxes, + scores=scores, + background_label=0, + score_threshold=0.5, + post_threshold=0.1, + nms_top_k=400, + keep_top_k=200, + normalized=False) diff --git a/doc/fluid/api_cn/layers_cn/maxout_cn.rst b/doc/fluid/api_cn/layers_cn/maxout_cn.rst index af0a58cf72d01f0dc3576bce9e6488310ae174be..2f73289029229de4436d34bb99030a5eb958d1fb 100644 --- a/doc/fluid/api_cn/layers_cn/maxout_cn.rst +++ b/doc/fluid/api_cn/layers_cn/maxout_cn.rst @@ -5,6 +5,12 @@ maxout .. py:function:: paddle.fluid.layers.maxout(x, groups, name=None, axis=1) +:alias_main: paddle.nn.functional.maxout +:alias: paddle.nn.functional.maxout,paddle.nn.functional.activation.maxout +:old_api: paddle.fluid.layers.maxout + + + 假设输入形状为(N, Ci, H, W),输出形状为(N, Co, H, W),则 :math:`Co=Ci/groups` 运算公式如下: .. math:: diff --git a/doc/fluid/api_cn/layers_cn/mean_cn.rst b/doc/fluid/api_cn/layers_cn/mean_cn.rst index 738a19d683524a622881f4f4da067b71a2ed0b83..69363e1c6ba3934a5ea1f5622d5233552b47fc84 100644 --- a/doc/fluid/api_cn/layers_cn/mean_cn.rst +++ b/doc/fluid/api_cn/layers_cn/mean_cn.rst @@ -5,6 +5,12 @@ mean .. py:function:: paddle.fluid.layers.mean(x, name=None) +:alias_main: paddle.mean +:alias: paddle.mean,paddle.tensor.mean,paddle.tensor.stat.mean +:old_api: paddle.fluid.layers.mean + + + 计算 ``x`` 所有元素的平均值。 参数: diff --git a/doc/fluid/api_cn/layers_cn/mean_iou_cn.rst b/doc/fluid/api_cn/layers_cn/mean_iou_cn.rst index 4525b968f021a96eecb3aaa1552dc2d3e1b251f7..254c9d9e5a4200741675d7c926085526bda5862f 100644 --- a/doc/fluid/api_cn/layers_cn/mean_iou_cn.rst +++ b/doc/fluid/api_cn/layers_cn/mean_iou_cn.rst @@ -5,6 +5,9 @@ mean_iou .. py:function:: paddle.fluid.layers.mean_iou(input, label, num_classes) + + + 该OP计算均值IOU, 均值IOU(Mean Intersection-Over-Union)是语义图像分割中的常用的评价指标之一,它首先计算每个类的IOU,然后计算类之间的平均值。IOU定义如下: .. math:: diff --git a/doc/fluid/api_cn/layers_cn/merge_selected_rows_cn.rst b/doc/fluid/api_cn/layers_cn/merge_selected_rows_cn.rst index 9e0dc9cdb719a8159f5278cfe002f6a05d1bc6a7..d44783ca46c84322e25a4d16a767663ccad95d25 100644 --- a/doc/fluid/api_cn/layers_cn/merge_selected_rows_cn.rst +++ b/doc/fluid/api_cn/layers_cn/merge_selected_rows_cn.rst @@ -5,6 +5,9 @@ merge_selected_rows .. py:function:: paddle.fluid.layers.merge_selected_rows(x, name=None) + + + 累加合并 `SelectedRows `_ ( ``x`` ) 中的重复行,并对行值由小到大重新排序。 参数: diff --git a/doc/fluid/api_cn/layers_cn/mse_loss_cn.rst b/doc/fluid/api_cn/layers_cn/mse_loss_cn.rst index ff8f9972bf53ba5db5f3ade1e1022c04e5fc1719..59678570f9e91da8e2ad46212955ea0802f8a462 100644 --- a/doc/fluid/api_cn/layers_cn/mse_loss_cn.rst +++ b/doc/fluid/api_cn/layers_cn/mse_loss_cn.rst @@ -5,6 +5,12 @@ mse_loss .. py:function:: paddle.fluid.layers.mse_loss(input,label) +:alias_main: paddle.nn.functional.mse_loss +:alias: paddle.nn.functional.mse_loss,paddle.nn.functional.loss.mse_loss +:old_api: paddle.fluid.layers.mse_loss + + + 该OP用于计算预测值和目标值的均方差误差。 对于预测值input和目标值label,公式为: diff --git a/doc/fluid/api_cn/layers_cn/mul_cn.rst b/doc/fluid/api_cn/layers_cn/mul_cn.rst index 03993fc7ee30d38bf62ce631949fa188715acf5d..4ce54f1a02584347177f65889ec546d5352cff50 100644 --- a/doc/fluid/api_cn/layers_cn/mul_cn.rst +++ b/doc/fluid/api_cn/layers_cn/mul_cn.rst @@ -5,6 +5,9 @@ mul .. py:function:: paddle.fluid.layers.mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None) + + + mul算子 此运算是用于对输入x和y执行矩阵乘法。 公式是: diff --git a/doc/fluid/api_cn/layers_cn/multi_box_head_cn.rst b/doc/fluid/api_cn/layers_cn/multi_box_head_cn.rst index 08c7830002290a9b5d02c60dae0e67aa37ae090d..a8fb7337f826d00f919a382c0377acbba01b1f5c 100644 --- a/doc/fluid/api_cn/layers_cn/multi_box_head_cn.rst +++ b/doc/fluid/api_cn/layers_cn/multi_box_head_cn.rst @@ -3,10 +3,13 @@ multi_box_head ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.multi_box_head(inputs, image, base_size, num_classes, aspect_ratios, min_ratio=None, max_ratio=None, min_sizes=None, max_sizes=None, steps=None, step_w=None, step_h=None, offset=0.5, variance=[0.1, 0.1, 0.2, 0.2], flip=True, clip=False, kernel_size=1, pad=0, stride=1, name=None, min_max_aspect_ratios_order=False) +:api_attr: 声明式编程模式(静态图) + + + 基于SSD(Single Shot MultiBox Detector)算法,在不同层输入特征上提取先验框、计算回归的坐标位置和分类的置信度,并合并到一起作为输出,具体参数解释和输出格式参考下面说明。更详细信息,请参阅SSD论文 `SSD:Single Shot MultiBox Detector `_ 的2.2节。 参数: diff --git a/doc/fluid/api_cn/layers_cn/multiclass_nms_cn.rst b/doc/fluid/api_cn/layers_cn/multiclass_nms_cn.rst index 7dc576160b68917901ddbc71649fc94eb9fb118f..12e3c804d95d81742116abfbf8bc71090621db1c 100644 --- a/doc/fluid/api_cn/layers_cn/multiclass_nms_cn.rst +++ b/doc/fluid/api_cn/layers_cn/multiclass_nms_cn.rst @@ -5,6 +5,12 @@ multiclass_nms .. py:function:: paddle.fluid.layers.multiclass_nms(bboxes, scores, score_threshold, nms_top_k, keep_top_k, nms_threshold=0.3, normalized=True, nms_eta=1.0, background_label=0, name=None) +:alias_main: paddle.nn.functional.multiclass_nms +:alias: paddle.nn.functional.multiclass_nms,paddle.nn.functional.extension.multiclass_nms +:old_api: paddle.fluid.layers.multiclass_nms + + + **多分类NMS** 该OP用于对边界框(bounding box)和评分(scores)执行多类非极大值抑制(NMS)。 diff --git a/doc/fluid/api_cn/layers_cn/multiplex_cn.rst b/doc/fluid/api_cn/layers_cn/multiplex_cn.rst index 03c3d091360bfbf3b606b02f4b5c1041124b6b1d..1cc5bf39c256f14165cbf87df1a15e02cee3a070 100644 --- a/doc/fluid/api_cn/layers_cn/multiplex_cn.rst +++ b/doc/fluid/api_cn/layers_cn/multiplex_cn.rst @@ -5,6 +5,12 @@ multiplex .. py:function:: paddle.fluid.layers.multiplex(inputs, index) +:alias_main: paddle.multiplex +:alias: paddle.multiplex,paddle.tensor.multiplex,paddle.tensor.math.multiplex +:old_api: paddle.fluid.layers.multiplex + + + 根据给定的index参数,该OP从每个输入Tensor中选择特定行构造输出Tensor。 设该OP输入包含 :math:`m` 个Tensor,其中 :math:`I_{i}` 代表第i个输入Tensor,:math:`i` 处于区间 :math:`[0,m)`。 diff --git a/doc/fluid/api_cn/layers_cn/multiply_cn.rst b/doc/fluid/api_cn/layers_cn/multiply_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4bb5ee012326ad9ae7ded391a124312f5654b769 --- /dev/null +++ b/doc/fluid/api_cn/layers_cn/multiply_cn.rst @@ -0,0 +1,79 @@ +.. _cn_api_fluid_layers_multiply: + +multiply +------------------------------- + +.. py:function:: paddle.multiply(x, y, axis=-1, name=None) + +:alias_main: paddle.multiply +:alias: paddle.multiply, paddle.tensor.multiply, paddle.tensor.math.multiply + + + +该OP是逐元素相乘算子,输入 ``x`` 与输入 ``y`` 逐元素相乘,并将各个位置的输出元素保存到返回结果中。 + +等式是: + +.. math:: + Out = X \odot Y + +- :math:`X` :多维Tensor。 +- :math:`Y` :维度必须小于等于X维度的Tensor。 + +对于这个运算算子有2种情况: + 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。 + 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。 + +对于情况2: + 1. 用 :math:`Y` 匹配 :math:`X` 的形状(shape),其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。 + 2. 如果 ``axis`` 为-1(默认值),则 :math:`axis= rank(X)-rank(Y)` 。 + 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。 + +例如: + +.. code-block:: text + + shape(X) = (2, 3, 4, 5), shape(Y) = (,) + shape(X) = (2, 3, 4, 5), shape(Y) = (5,) + shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 + shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 + shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 + shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 + +参数: + - **x** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 + - **y** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 + - **axis** (int32,可选)- ``y`` 的维度对应到 ``x`` 维度上时的索引。默认值为 -1。 + - **name** (string,可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + + +返回: 维度与 ``x`` 相同的 ``Tensor`` 或 ``LoDTensor`` ,数据类型与 ``x`` 相同。 + +返回类型: Variable。 + +**代码示例 1** + +.. code-block:: python + + import paddle + import numpy as np + paddle.enable_imperative() + x_data = np.array([[1, 2], [3, 4]], dtype=np.float32) + y_data = np.array([[5, 6], [7, 8]], dtype=np.float32) + x = paddle.imperative.to_variable(x_data) + y = paddle.imperative.to_variable(y_data) + res = paddle.multiply(x, y) + print(res.numpy()) # [[5, 12], [21, 32]] + x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32) + y_data = np.array([1, 2], dtype=np.float32) + x = paddle.imperative.to_variable(x_data) + y = paddle.imperative.to_variable(y_data) + res = paddle.multiply(x, y, axis=1) + print(res.numpy()) # [[[1, 2, 3], [2, 4, 6]]] + + + + + + + diff --git a/doc/fluid/api_cn/layers_cn/natural_exp_decay_cn.rst b/doc/fluid/api_cn/layers_cn/natural_exp_decay_cn.rst index fbc2ca21ee5e9459d17d7e3d237bad2bc32193ec..8fc60de95cc71c793c0d3b825bf63817654fbacc 100644 --- a/doc/fluid/api_cn/layers_cn/natural_exp_decay_cn.rst +++ b/doc/fluid/api_cn/layers_cn/natural_exp_decay_cn.rst @@ -5,6 +5,12 @@ natural_exp_decay .. py:function:: paddle.fluid.layers.natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False) +:alias_main: paddle.nn.functional.natural_exp_decay +:alias: paddle.nn.functional.natural_exp_decay,paddle.nn.functional.learning_rate.natural_exp_decay +:old_api: paddle.fluid.layers.natural_exp_decay + + + 将自然指数衰减运用到初始学习率上。 训练模型时,在训练过程中降低学习率。 自然指数衰减使用自然指数来计算衰减倍率,每 ``decay_steps`` 步衰减倍率的自然指数幂次项上增加 ``decay_rate`` 。 diff --git a/doc/fluid/api_cn/layers_cn/nce_cn.rst b/doc/fluid/api_cn/layers_cn/nce_cn.rst index 20b66474c0e1fee0276947b4fdb64e6fcd237cda..ef4532b84619f8444cd7a01c6821f7eb26c2739b 100644 --- a/doc/fluid/api_cn/layers_cn/nce_cn.rst +++ b/doc/fluid/api_cn/layers_cn/nce_cn.rst @@ -3,10 +3,13 @@ nce ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.nce(input, label, num_total_classes, sample_weight=None, param_attr=None, bias_attr=None, num_neg_samples=None, name=None, sampler='uniform', custom_dist=None, seed=0, is_sparse=False) +:api_attr: 声明式编程模式(静态图) + + + 计算并返回噪音对比估计损失值( noise-contrastive estimation training loss)。 请参考 `Noise-contrastive estimation: A new estimation principle for unnormalized statistical models `_ @@ -40,35 +43,35 @@ nce window_size = 5 words = [] - for i in xrange(window_size): - words.append(fluid.layers.data( - name='word_{0}'.format(i), shape=[1], dtype='int64')) + for i in range(window_size): + words.append(fluid.data( + name='word_{0}'.format(i), shape=[-1, 1], dtype='int64')) dict_size = 10000 label_word = int(window_size / 2) + 1 embs = [] - for i in xrange(window_size): + for i in range(window_size): if i == label_word: continue emb = fluid.layers.embedding(input=words[i], size=[dict_size, 32], - param_attr='embed', is_sparse=True) + param_attr='embed', is_sparse=True) embs.append(emb) embs = fluid.layers.concat(input=embs, axis=1) loss = fluid.layers.nce(input=embs, label=words[label_word], - num_total_classes=dict_size, param_attr='nce.w_0', - bias_attr='nce.b_0') + num_total_classes=dict_size, param_attr='nce.w_0', + bias_attr='nce.b_0') - # 或使用自定义分布 + #or use custom distribution dist = np.array([0.05,0.5,0.1,0.3,0.05]) loss = fluid.layers.nce(input=embs, label=words[label_word], - num_total_classes=5, param_attr='nce.w_1', - bias_attr='nce.b_1', - num_neg_samples=3, - sampler="custom_dist", - custom_dist=dist) + num_total_classes=5, param_attr='nce.w_1', + bias_attr='nce.b_1', + num_neg_samples=3, + sampler="custom_dist", + custom_dist=dist) diff --git a/doc/fluid/api_cn/layers_cn/noam_decay_cn.rst b/doc/fluid/api_cn/layers_cn/noam_decay_cn.rst index 4769b6dd7192b523fad7528a6bc0fe30773a2991..00a7ad2321b6864891b8cd1c8d04a32b39c30b5e 100644 --- a/doc/fluid/api_cn/layers_cn/noam_decay_cn.rst +++ b/doc/fluid/api_cn/layers_cn/noam_decay_cn.rst @@ -3,7 +3,13 @@ noam_decay ------------------------------- -.. py:function:: paddle.fluid.layers.noam_decay(d_model,warmup_steps) +.. py:function:: paddle.fluid.layers.noam_decay(d_model, warmup_steps) + +:alias_main: paddle.nn.functional.noam_decay +:alias: paddle.nn.functional.noam_decay,paddle.nn.functional.learning_rate.noam_decay +:old_api: paddle.fluid.layers.noam_decay + + Noam衰减方法 @@ -14,11 +20,12 @@ noam衰减的numpy实现如下: import paddle.fluid as fluid import numpy as np # 设置超参数 + base_lr = 0.01 d_model = 2 current_steps = 20 warmup_steps = 200 # 计算 - lr_value = np.power(d_model, -0.5) * np.min([ + lr_value = base_lr * np.power(d_model, -0.5) * np.min([ np.power(current_steps, -0.5), np.power(warmup_steps, -1.5) * current_steps]) @@ -27,6 +34,7 @@ noam衰减的numpy实现如下: 参数: - **d_model** (Variable|int) - 模型的输入、输出向量特征维度。类型可设置为标量Tensor,或int值。 - **warmup_steps** (Variable|int) - 预热步数,类型可设置为标量Tensor,或int值。 + - **learning_rate** (Variable|float|int,可选) - 初始学习率。如果类型为Variable,则为shape为[1]的Tensor,数据类型为float32或float64;也可以是python的int类型。默认值为1.0。 返回:衰减的学习率 @@ -41,7 +49,8 @@ noam衰减的numpy实现如下: learning_rate = 0.01 lr = fluid.layers.learning_rate_scheduler.noam_decay( 1/(warmup_steps *(learning_rate ** 2)), - warmup_steps) + warmup_steps, + learning_rate) diff --git a/doc/fluid/api_cn/layers_cn/not_equal_cn.rst b/doc/fluid/api_cn/layers_cn/not_equal_cn.rst index f9e59aa65429b2730dd6b8e7cf8beb9e8f99f9a1..5a9cedf60cc03d67d8571424cffc2af62a583184 100644 --- a/doc/fluid/api_cn/layers_cn/not_equal_cn.rst +++ b/doc/fluid/api_cn/layers_cn/not_equal_cn.rst @@ -3,7 +3,13 @@ not_equal ------------------------------- -.. py:function:: paddle.fluid.layers.not_equal(x, y, cond=None) +.. py:function:: paddle.fluid.layers.not_equal(x, y, cond=None, name=None) + +:alias_main: paddle.not_equal +:alias: paddle.not_equal,paddle.tensor.not_equal,paddle.tensor.logic.not_equal +:old_api: paddle.fluid.layers.not_equal + + 该OP逐元素地返回 :math:`x != y` 的逻辑值,使用重载算子 `!=` 可以有相同的计算函数效果。 @@ -11,8 +17,7 @@ not_equal - **x** (Variable) – 进行比较的第一个输入,是一个多维的Tensor,数据类型可以是float32,float64,int32,int64。 - **y** (Variable) – 进行比较的第二个输入,是一个多维的Tensor,数据类型可以是float32,float64,int32,int64。 - **cond** (Variable,可选) – 如果为None,则创建一个Tensor来作为进行比较的输出结果,该Tensor的shape和数据类型和输入x一致;如果不为None,则将Tensor作为该OP的输出,数据类型和数据shape需要和输入x一致。默认值为None。 - - **force_cpu** (bool,可选) – 是否强制将输出Tensor存储在CPU。默认值为None,表示将输出Tensor存储在CPU内存上;如果为False,则将输出Tensor存储在运行设备内存上。 - + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 返回:输出结果的Tensor,数据的shape和输入x一致。 返回类型:变量(Variable),数据类型为bool类型。 @@ -24,13 +29,12 @@ not_equal import paddle.fluid as fluid import paddle.fluid.layers as layers import numpy as np - label = fluid.layers.assign(np.array([3, 3], dtype="int32")) - limit = fluid.layers.assign(np.array([3, 2], dtype="int32")) - out_cond = fluid.layers.assign(np.array([1, 2], dtype="int32")) - out = fluid.layers.not_equal(x=label, y=limit) # out=[False, True] - out1 = fluid.layers.not_equal(x=label, y=limit, cond=out_cond) #out1=[False, True] out_cond=[False, True] - out2 = fluid.layers.not_equal(x=label, y=limit, force_cpu=False) #out2=[False, True] - out3 = label != limit #out3=[False, True] + label = layers.assign(np.array([2, 3], dtype='int32')) + limit = layers.assign(np.array([3, 2], dtype='int32')) + out = fluid.layers.not_equal(x=label, y=limit) #out=[True, True] + out1 = label != limit #out1=[True, True] + + diff --git a/doc/fluid/api_cn/layers_cn/npair_loss_cn.rst b/doc/fluid/api_cn/layers_cn/npair_loss_cn.rst index 7b8eb851431df2bd282b72774b52879f0acee473..2b6c681656b6c7d452fcdf1999d52dd2c2cdcad1 100644 --- a/doc/fluid/api_cn/layers_cn/npair_loss_cn.rst +++ b/doc/fluid/api_cn/layers_cn/npair_loss_cn.rst @@ -5,6 +5,12 @@ npair_loss .. py:function:: paddle.fluid.layers.npair_loss(anchor, positive, labels, l2_reg=0.002) +:alias_main: paddle.nn.functional.npair_loss +:alias: paddle.nn.functional.npair_loss,paddle.nn.functional.loss.npair_loss +:old_api: paddle.fluid.layers.npair_loss + + + **Npair Loss Layer** 参考阅读 `Improved Deep Metric Learning with Multi class N pair Loss Objective `_ diff --git a/doc/fluid/api_cn/layers_cn/one_hot_cn.rst b/doc/fluid/api_cn/layers_cn/one_hot_cn.rst index d8f66f65e0838418089e18f77648021c635dc539..2aaaf117e6fc9dc5ab26032db91bef286f14e567 100644 --- a/doc/fluid/api_cn/layers_cn/one_hot_cn.rst +++ b/doc/fluid/api_cn/layers_cn/one_hot_cn.rst @@ -5,6 +5,9 @@ one_hot .. py:function:: paddle.fluid.layers.one_hot(input, depth, allow_out_of_range=False) + + + **注意:此OP要求输入Tensor shape的最后一维必须为1。此OP将在未来的版本中被移除!推荐使用fluid.** :ref:`cn_api_fluid_one_hot` 。 该OP将输入(input)中的每个id转换为一个one-hot向量,其长度为 ``depth`` ,该id对应的向量维度上的值为1,其余维度的值为0。 diff --git a/doc/fluid/api_cn/layers_cn/ones_cn.rst b/doc/fluid/api_cn/layers_cn/ones_cn.rst index 881ebd4546387be2a231166bd14bef7d2eb6c02f..1eb93c5e0a86886cf3d88e93a6cea0c6e8e23e92 100644 --- a/doc/fluid/api_cn/layers_cn/ones_cn.rst +++ b/doc/fluid/api_cn/layers_cn/ones_cn.rst @@ -5,18 +5,18 @@ ones .. py:function:: paddle.fluid.layers.ones(shape,dtype,force_cpu=False) -**ones** - -该OP创建形状为 ``shape`` 、数据类型为 ``dtype`` 且值全为1的Tensor,该OP会将stop_gradient设置为True,即停止梯度更新。 +该OP创建形状为 ``shape`` 、数据类型为 ``dtype`` 且值全为1的Tensor。 参数: - - **shape** (tuple|list) - 输出Tensor的形状。 - - **dtype** (np.dtype|core.VarDesc.VarType|str) - 输出Tensor的数据类型,数据类型必须为float16、float32、float64、int32或int64。 - - **force_cpu** (bool) – 是否强制将输出Tensor写入CPU内存。如果 ``force_cpu`` 为False,则将输出Tensor写入当前所在运算设备的内存,默认为False。 + - **shape** (tuple|list|Tensor) - 输出Tensor的形状, ``shape`` 的数据类型为int32或者int64。 + - **dtype** (np.dtype|str) - 输出Tensor的数据类型,数据类型必须为bool、 float16、float32、float64、int32或int64。 + - **force_cpu** (bool, 可选) – 是否强制将输出Tensor写入CPU内存。如果 ``force_cpu`` 为False,则将输出Tensor写入当前所在运算设备的内存,默认为False。 返回:值全为1的Tensor,数据类型和 ``dtype`` 定义的类型一致。 -返回类型:Variable +抛出异常: + - ``TypeError`` - 当 ``dtype`` 不是bool、 float16、float32、float64、int32、int64和None时。 + - ``TypeError`` - 当 ``shape`` 不是tuple、list、或者Tensor时, 当 ``shape`` 为Tensor,其数据类型不是int32或者int64时。 **代码示例**: diff --git a/doc/fluid/api_cn/layers_cn/ones_like_cn.rst b/doc/fluid/api_cn/layers_cn/ones_like_cn.rst index 06082a02bae8d5b6e1595d3adafe5aea063dd110..5d1e6a89788690d771f0d1cb986e4bcf425e5968 100644 --- a/doc/fluid/api_cn/layers_cn/ones_like_cn.rst +++ b/doc/fluid/api_cn/layers_cn/ones_like_cn.rst @@ -5,6 +5,9 @@ ones_like .. py:function:: paddle.fluid.layers.ones_like(x, out=None) + + + ones_like 该功能创建一个形状与类型与x相似的张量,初始值为1。 diff --git a/doc/fluid/api_cn/layers_cn/pad2d_cn.rst b/doc/fluid/api_cn/layers_cn/pad2d_cn.rst index 9f2cb0673b10e867239dc68763eb396a0318ebbd..08e937f50a7eb33bb21adadc2039984ff221cf31 100644 --- a/doc/fluid/api_cn/layers_cn/pad2d_cn.rst +++ b/doc/fluid/api_cn/layers_cn/pad2d_cn.rst @@ -5,6 +5,12 @@ pad2d .. py:function:: paddle.fluid.layers.pad2d(input, paddings=[0, 0, 0, 0], mode='constant', pad_value=0.0, data_format='NCHW', name=None) +:alias_main: paddle.nn.functional.pad2d +:alias: paddle.nn.functional.pad2d,paddle.nn.functional.common.pad2d +:old_api: paddle.fluid.layers.pad2d + + + 该OP依照 paddings 和 mode 属性对input进行2维 ``pad`` 。 参数: @@ -19,36 +25,34 @@ pad2d 返回类型:Variable -示例: +**示例**: .. code-block:: text - 假设X是输入图像: + Input = [[[[1., 2., 3.], + [4., 5., 6.]]]] - X = [[1, 2, 3], - [4, 5, 6]] + Case 0: + paddings = [0, 1, 2, 3], + mode = 'constant' + pad_value = 0 + Out = [[[[0., 0., 1., 2., 3., 0., 0., 0.], + [0., 0., 4., 5., 6., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0.]]]] - Case 0: - paddings = [0, 1, 2, 3], - mode = 'constant' - pad_value = 0 - Out = [[0, 0, 1, 2, 3, 0, 0, 0] - [0, 0, 4, 5, 6, 0, 0, 0] - [0, 0, 0, 0, 0, 0, 0, 0]] + Case 1: + paddings = [0, 1, 2, 1], + mode = 'reflect' + Out = [[[[3., 2., 1., 2., 3., 2.], + [6., 5., 4., 5., 6., 5.], + [3., 2., 1., 2., 3., 2.]]]] - Case 1: - paddings = [0, 1, 2, 1], - mode = 'reflect' - Out = [[3, 2, 1, 2, 3, 2] - [6, 5, 4, 5, 6, 5] - [3, 2, 1, 2, 3, 2]] - - Case 2: - paddings = [0, 1, 2, 1], - mode = 'edge' - Out = [[1, 1, 1, 2, 3, 3] - [4, 4, 4, 5, 6, 6] - [4, 4, 4, 5, 6, 6]] + Case 2: + paddings = [0, 1, 2, 1], + mode = 'edge' + Out = [[[[1., 1., 1., 2., 3., 3.], + [4., 4., 4., 5., 6., 6.], + [4., 4., 4., 5., 6., 6.]]]] @@ -56,8 +60,6 @@ pad2d .. code-block:: python - import paddle.fluid as fluid - data = fluid.layers.data(name='data', shape=[3, 32, 32], dtype='float32') - result = fluid.layers.pad2d(input=data, paddings=[1,2,3,4], mode='reflect') - - + import paddle.fluid as fluid + data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32') + result = fluid.layers.pad2d(input=data, paddings=[0, 1, 2, 3], mode='reflect') diff --git a/doc/fluid/api_cn/layers_cn/pad_cn.rst b/doc/fluid/api_cn/layers_cn/pad_cn.rst index 04ff8cd0fc6b0e103dcabeeed99336e552ca5f9f..a9ed06ded042fe47826667b3d70acfe766181c21 100644 --- a/doc/fluid/api_cn/layers_cn/pad_cn.rst +++ b/doc/fluid/api_cn/layers_cn/pad_cn.rst @@ -5,26 +5,30 @@ pad .. py:function:: paddle.fluid.layers.pad(x, paddings, pad_value=0.0, name=None) +:alias_main: paddle.nn.functional.pad +:alias: paddle.nn.functional.pad,paddle.nn.functional.common.pad +:old_api: paddle.fluid.layers.pad + + + 该OP在Tensor上填充一个由 ``pad_value`` 给出的常数值,填充宽度由 ``paddings`` 指定。 其中,维度 ``i`` 中 ``x`` 内容前填充的值个数用 ``paddings[2*i]`` 表示,维度 ``i`` 中 ``x`` 内容后填充的值个数用 ``paddings[2*i+1]`` 表示。 -**样例**: +**示例**: -:: +.. code-block:: text Given: + x = [[1, 2], [3, 4]] - x = [[1, 2], [3, 4]] - - paddings = [0, 1, 1, 2] + paddings = [0, 1, 1, 2] - pad_value = 0 + pad_value = 0 Return: - - out = [[0, 1, 2, 0, 0] - [0, 3, 4, 0, 0] - [0, 0, 0, 0, 0]] + out = [[0, 1, 2, 0, 0] + [0, 3, 4, 0, 0] + [0, 0, 0, 0, 0]] 参数: @@ -44,15 +48,7 @@ pad # x 为一个秩为2的张量 import paddle.fluid as fluid - x = fluid.layers.data(name='data', shape=[224], dtype='float32') + x = fluid.data(name='data', shape=[300, 300], dtype='float32') out = fluid.layers.pad(x=x, paddings=[0, 1, 1, 2], pad_value=0.) - - - - - - - - diff --git a/doc/fluid/api_cn/layers_cn/pad_constant_like_cn.rst b/doc/fluid/api_cn/layers_cn/pad_constant_like_cn.rst index 3172afab76e7bd0343978a7f999a97fd9c89009a..8f0bad53df1e4c53ef64baa921b1153404d98a5b 100644 --- a/doc/fluid/api_cn/layers_cn/pad_constant_like_cn.rst +++ b/doc/fluid/api_cn/layers_cn/pad_constant_like_cn.rst @@ -5,11 +5,17 @@ pad_constant_like .. py:function:: paddle.fluid.layers.pad_constant_like(x, y, pad_value=0.0, name=None) +:alias_main: paddle.nn.functional.pad_constant_like +:alias: paddle.nn.functional.pad_constant_like,paddle.nn.functional.common.pad_constant_like +:old_api: paddle.fluid.layers.pad_constant_like + + + 该OP使用 ``pad_value`` 填充 ``y`` ,填充到每个维度值的数量由x和y的形状而指定,((0,x.shape[0] - y.shape[0]), ..., (0, x.shape[i] - y.shape[i]), ..., (0, x.shape[n] - y.shape[n]))是每个维度填充的宽度,对于维度i,填充宽度 ``(0, x.shape[i] - y.shape[i])`` ,表示在y的第i维开头不填充,而在末尾填充 ``x.shape[i] - y.shape[i]`` 个位置。该OP要求y与x具有相同的秩,并且对每个维度i, ``y.shape[i] <= x.shape[i]`` 。 -**样例** +**示例**: -:: +.. code-block:: text Given: X = [[[[ 0, 1, 2], @@ -24,30 +30,34 @@ pad_constant_like [27, 28, 29]], [[30, 31, 32], [33, 34, 35]]]] + X.shape = (2, 3, 2, 3) Y = [[[[35, 36, 37]], [[38, 39, 40]], [[41, 42, 43]]]] + Y.shape = (1, 3, 1, 3) - and + And pad_value = 0. - Output is: - out = [[[[35, 36, 37], - [0, 0, 0]], + Return: + Out = [[[[35, 36, 37], + [ 0, 0, 0]], [[38, 39, 40], - [0, 0, 0]], + [ 0, 0, 0]], [[41, 42, 43], - [0, 0, 0]]], - [[[0, 0, 0], - [0, 0, 0]], - [[0, 0, 0], - [0, 0, 0]], - [[0, 0, 0], - [0, 0, 0]]]] - out.shape = [2, 3, 2, 3] + [ 0, 0, 0]]], + [[[ 0, 0, 0], + [ 0, 0, 0]], + [[ 0, 0, 0], + [ 0, 0, 0]], + [[ 0, 0, 0], + [ 0, 0, 0]]]] + + Out.shape = [2, 3, 2, 3] + 参数: - **x** (Variable)- 多维Tensor @@ -66,8 +76,8 @@ pad_constant_like # x是秩为4的tensor, x.shape = (2, 3, 2, 3) # y是秩为4的tensor, y.shape = (1, 3, 1, 3) import paddle.fluid as fluid - x = fluid.layers.data(name='x', shape=[2,3,2,3], dtype='float32') - y = fluid.layers.data(name='y', shape=[1,3,1,3], dtype='float32') + x = fluid.data(name='x', shape=[2,3,2,3], dtype='float32') + y = fluid.data(name='y', shape=[1,3,1,3], dtype='float32') out = fluid.layers.pad_constant_like(x=x, y=y, pad_value=0.) # out是秩为4的tensor, out.shape = [2, 3 ,2 , 3] diff --git a/doc/fluid/api_cn/layers_cn/piecewise_decay_cn.rst b/doc/fluid/api_cn/layers_cn/piecewise_decay_cn.rst index 9aeea557f92951e63710679f21ea31ed0a20287f..ecb2cd6274244631a6390f22a3c09aded78fef1a 100644 --- a/doc/fluid/api_cn/layers_cn/piecewise_decay_cn.rst +++ b/doc/fluid/api_cn/layers_cn/piecewise_decay_cn.rst @@ -5,6 +5,12 @@ piecewise_decay .. py:function:: paddle.fluid.layers.piecewise_decay(boundaries,values) +:alias_main: paddle.nn.functional.piecewise_decay +:alias: paddle.nn.functional.piecewise_decay,paddle.nn.functional.learning_rate.piecewise_decay +:old_api: paddle.fluid.layers.piecewise_decay + + + 对初始学习率进行分段衰减。 该算法可用如下代码描述。 diff --git a/doc/fluid/api_cn/layers_cn/pixel_shuffle_cn.rst b/doc/fluid/api_cn/layers_cn/pixel_shuffle_cn.rst index bd6b1cd77283f48943a5c46ff00d18c70309cac8..07b29dd8892aece514291f781a5e3918dc882834 100644 --- a/doc/fluid/api_cn/layers_cn/pixel_shuffle_cn.rst +++ b/doc/fluid/api_cn/layers_cn/pixel_shuffle_cn.rst @@ -5,6 +5,12 @@ pixel_shuffle .. py:function:: paddle.fluid.layers.pixel_shuffle(x, upscale_factor) +:alias_main: paddle.nn.functional.pixel_shuffle +:alias: paddle.nn.functional.pixel_shuffle,paddle.nn.functional.vision.pixel_shuffle +:old_api: paddle.fluid.layers.pixel_shuffle + + + 该OP将一个形为[N, C, H, W]的Tensor重新排列成形为 [N, C/r**2, H*r, W*r] 的Tensor。这样做有利于实现步长(stride)为1/r的高效sub-pixel(亚像素)卷积。详见Shi等人在2016年发表的论文 `Real Time Single Image and Video Super Resolution Using an Efficient Sub Pixel Convolutional Neural Network `_ 。 .. code-block:: text diff --git a/doc/fluid/api_cn/layers_cn/polygon_box_transform_cn.rst b/doc/fluid/api_cn/layers_cn/polygon_box_transform_cn.rst index 81621b34a0353275a60ae7888bb4ffcdcd91677d..06fa39efbdc758bc5e03dcf64e9554d2f627086b 100644 --- a/doc/fluid/api_cn/layers_cn/polygon_box_transform_cn.rst +++ b/doc/fluid/api_cn/layers_cn/polygon_box_transform_cn.rst @@ -5,6 +5,12 @@ polygon_box_transform .. py:function:: paddle.fluid.layers.polygon_box_transform(input, name=None) +:alias_main: paddle.nn.functional.polygon_box_transform +:alias: paddle.nn.functional.polygon_box_transform,paddle.nn.functional.extension.polygon_box_transform +:old_api: paddle.fluid.layers.polygon_box_transform + + + **PolygonBoxTransform 算子** 该op用于将偏移坐标改变为真实的坐标。 diff --git a/doc/fluid/api_cn/layers_cn/polynomial_decay_cn.rst b/doc/fluid/api_cn/layers_cn/polynomial_decay_cn.rst index ec04e1964d320095ffe1946e1f5c83cdcd347fb2..e03443b0bd7d02177ef045393595f8d06dc4abf5 100644 --- a/doc/fluid/api_cn/layers_cn/polynomial_decay_cn.rst +++ b/doc/fluid/api_cn/layers_cn/polynomial_decay_cn.rst @@ -5,6 +5,12 @@ polynomial_decay .. py:function:: paddle.fluid.layers.polynomial_decay(learning_rate,decay_steps,end_learning_rate=0.0001,power=1.0,cycle=False) +:alias_main: paddle.nn.functional.polynomial_decay +:alias: paddle.nn.functional.polynomial_decay,paddle.nn.functional.learning_rate.polynomial_decay +:old_api: paddle.fluid.layers.polynomial_decay + + + 对初始学习率使用多项式衰减 .. code-block:: text diff --git a/doc/fluid/api_cn/layers_cn/pool2d_cn.rst b/doc/fluid/api_cn/layers_cn/pool2d_cn.rst index af74397a256a93162f1fec898bdf51c7a9a579a8..d1990be374c9807f6272937e7e6c6d75e3e89062 100644 --- a/doc/fluid/api_cn/layers_cn/pool2d_cn.rst +++ b/doc/fluid/api_cn/layers_cn/pool2d_cn.rst @@ -5,6 +5,12 @@ pool2d .. py:function:: paddle.fluid.layers.pool2d(input, pool_size=-1, pool_type='max', pool_stride=1, pool_padding=0, global_pooling=False, use_cudnn=True, ceil_mode=False, name=None, exclusive=True, data_format="NCHW") +:alias_main: paddle.nn.functional.pool2d +:alias: paddle.nn.functional.pool2d,paddle.nn.functional.pooling.pool2d +:old_api: paddle.fluid.layers.pool2d + + + 该OP使用上述输入参数的池化配置,为二维空间池化操作,根据 ``input`` ,池化核大小 ``pool_size`` ,池化类型 ``pool_type`` ,步长 ``pool_stride`` ,填充 ``pool_padding`` 等参数得到输出。 输入 ``input`` 和输出(out)采用NCHW或NHWC格式,N为批大小,C是通道数,H是特征高度,W是特征宽度。 diff --git a/doc/fluid/api_cn/layers_cn/pool3d_cn.rst b/doc/fluid/api_cn/layers_cn/pool3d_cn.rst index 47e314896d920d28782a0ce0f5f1564dfcf3dce0..d23d77b7247696b0b6e53b269a6d24b6f59e6b9d 100644 --- a/doc/fluid/api_cn/layers_cn/pool3d_cn.rst +++ b/doc/fluid/api_cn/layers_cn/pool3d_cn.rst @@ -5,6 +5,12 @@ pool3d .. py:function:: paddle.fluid.layers.pool3d(input, pool_size=-1, pool_type='max', pool_stride=1, pool_padding=0, global_pooling=False, use_cudnn=True, ceil_mode=False, name=None, exclusive=True, data_format="NCDHW") +:alias_main: paddle.nn.functional.pool3d +:alias: paddle.nn.functional.pool3d,paddle.nn.functional.pooling.pool3d +:old_api: paddle.fluid.layers.pool3d + + + 该OP使用上述输入参数的池化配置,为三维空间池化操作,根据 ``input`` ,池化核大小 ``pool_size`` ,池化类型 ``pool_type`` ,步长 ``pool_stride`` 和填充 ``pool_padding`` 等参数计算输出。 输入 ``input`` 和输出(Out)采用NCDHW或NDHWC格式,其中N是批大小,C是通道数,D,H和W分别是特征的深度,高度和宽度。 diff --git a/doc/fluid/api_cn/layers_cn/pow_cn.rst b/doc/fluid/api_cn/layers_cn/pow_cn.rst index 0da2cf5c4df954c5d158a98716338f7d252bf14f..40eaf542138527856d25a002f16a4cf29c891f47 100644 --- a/doc/fluid/api_cn/layers_cn/pow_cn.rst +++ b/doc/fluid/api_cn/layers_cn/pow_cn.rst @@ -3,22 +3,25 @@ pow ------------------------------- -.. py:function:: paddle.fluid.layers.pow(x, factor=1.0, name=None) +.. py:function:: paddle.pow(x, exponent, name=None) + + + 该OP是指数激活算子: .. math:: - out = x^{factor} + out = x^{exponent} **注意:如果需要对输入进行 elementwise_pow 操作,请查使用** :ref:`cn_api_fluid_layers_elementwise_pow` 。 参数: - - **x** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` ,数据类型为 ``float32`` 或 ``float64`` 。 - - **factor** (float32|Variable,可选)- ``float32`` 或形状为[1]的 ``Tensor`` 或 ``LoDTensor``,数据类型为 ``float32``。Pow OP的指数因子。默认值:1.0。 + - **x** (Variable)- 多维 ``Variable``,数据类型为 ``float32`` 或 ``float64`` 。 + - **exponent** (float32|Variable)- ``float32`` 或形状为[1]的 ``Variable``,数据类型为 ``float32``。 - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值: ``None``。 -返回:维度与输入 `x` 相同的 ``Tensor`` 或 ``LoDTensor``,数据类型与 ``x`` 相同。 +返回:维度与输入 `x` 相同的 ``Variable``,数据类型与 ``x`` 相同。 返回类型:Variable。 @@ -27,18 +30,23 @@ pow .. code-block:: python - import paddle.fluid as fluid - - x = fluid.layers.data(name="x", shape=[3,10,32,32], dtype="float32") - - # example 1: argument factor is float - y_1 = fluid.layers.pow(x, factor=2.0) - # y_1 is x^{2.0} + import paddle + import numpy as np + x = fluid.data(name="x", shape=[32,32], dtype="float32") + paddle.enable_imperative() + + # example 1: exponent is a float + x_data = np.array([1, 2, 3]) + exponent = 2 + x = paddle.imperative.to_variable(x_data) + res = paddle.pow(x, exponent) + print(res.numpy()) # [1 4 9] + + # example 2: exponent is a Variable + exponent = paddle.fill_constant(shape=[1], value=2, dtype='float32') + res = paddle.pow(x, exponent) + print(res.numpy()) # [1 4 9] - # example 2: argument factor is Variable - factor_tensor = fluid.layers.fill_constant([1], "float32", 3.0) - y_2 = fluid.layers.pow(x, factor=factor_tensor) - # y_2 is x^{2.0} diff --git a/doc/fluid/api_cn/layers_cn/prelu_cn.rst b/doc/fluid/api_cn/layers_cn/prelu_cn.rst index 81e526344d2df1e10c7995bf07749b6fa4b972ea..b1ea4cfb569fac4c885b8292ba07f022886e0934 100644 --- a/doc/fluid/api_cn/layers_cn/prelu_cn.rst +++ b/doc/fluid/api_cn/layers_cn/prelu_cn.rst @@ -3,10 +3,13 @@ prelu ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.prelu(x, mode, param_attr=None, name=None) +:api_attr: 声明式编程模式(静态图) + + + 等式: .. math:: diff --git a/doc/fluid/api_cn/layers_cn/prior_box_cn.rst b/doc/fluid/api_cn/layers_cn/prior_box_cn.rst index 19998e3a8448e403219c07204530ad1e0c852f7a..85383fbbe6d7f3a60d449c0db069967a12606785 100644 --- a/doc/fluid/api_cn/layers_cn/prior_box_cn.rst +++ b/doc/fluid/api_cn/layers_cn/prior_box_cn.rst @@ -4,6 +4,12 @@ prior_box ------------------------------- .. py:function:: paddle.fluid.layers.prior_box(input,image,min_sizes=None,max_sizes=None,aspect_ratios=[1.0],variance=[0.1,0.1,0.2,0.2],flip=False,clip=False,steps=[0.0,0.0],offset=0.5,name=None,min_max_aspect_ratios_order=False) +:alias_main: paddle.nn.functional.prior_box +:alias: paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box +:old_api: paddle.fluid.layers.prior_box + + + 该OP为SSD(Single Shot MultiBox Detector)算法生成候选框。输入的每个位产生N个候选框,N由min_sizes,max_sizes和aspect_ratios的数目决定,候选框的尺寸在(min_size,max_size)之间,该尺寸根据aspect_ratios在序列中生成。 参数: diff --git a/doc/fluid/api_cn/layers_cn/prroi_pool_cn.rst b/doc/fluid/api_cn/layers_cn/prroi_pool_cn.rst index 18466f5f0911b65d02752986a36043455563c596..43221ea069434bd83fd066e6f1091ca9332f2e7f 100644 --- a/doc/fluid/api_cn/layers_cn/prroi_pool_cn.rst +++ b/doc/fluid/api_cn/layers_cn/prroi_pool_cn.rst @@ -5,6 +5,12 @@ prroi_pool .. py:function:: paddle.fluid.layers.prroi_pool(input, rois, output_channels, spatial_scale, pooled_height, pooled_width, name=None) +:alias_main: paddle.nn.functional.prroi_pool +:alias: paddle.nn.functional.prroi_pool,paddle.nn.functional.vision.prroi_pool +:old_api: paddle.fluid.layers.prroi_pool + + + PRROIPool运算 精确区域池化方法(Precise region of interest pooling,也称为PRROIPooling)是对输入的 "感兴趣区域"(RoI)执行插值处理,将离散的特征图数据映射到一个连续空间,使用二重积分再求均值的方式实现Pooling。 @@ -28,10 +34,18 @@ PRROIPool运算 .. code-block:: python + ## prroi_pool without batch_roi_num import paddle.fluid as fluid - x = fluid.layers.data(name='x', shape=[490, 28, 28], dtype='float32') - rois = fluid.layers.data(name='rois', shape=[4], lod_level=1, dtype='float32') - pool_out = fluid.layers.prroi_pool(x, rois, 10, 1.0, 7, 7) + x = fluid.data(name='x', shape=[None, 490, 28, 28], dtype='float32') + rois = fluid.data(name='rois', shape=[None, 4], lod_level=1, dtype='float32') + pool_out = fluid.layers.prroi_pool(x, rois, 1.0, 7, 7) + + ## prroi_pool with batch_roi_num + batchsize=4 + x2 = fluid.data(name='x2', shape=[batchsize, 490, 28, 28], dtype='float32') + rois2 = fluid.data(name='rois2', shape=[batchsize, 4], dtype='float32') + batch_rois_num = fluid.data(name='rois_nums', shape=[batchsize], dtype='int64') + pool_out2 = fluid.layers.prroi_pool(x2, rois2, 1.0, 7, 7, batch_roi_nums=batch_rois_num) diff --git a/doc/fluid/api_cn/layers_cn/psroi_pool_cn.rst b/doc/fluid/api_cn/layers_cn/psroi_pool_cn.rst index 329585766688eeff43109760e2113c90387a7fa9..92a246f63c4c04e4955322e73359398f03dd17c6 100644 --- a/doc/fluid/api_cn/layers_cn/psroi_pool_cn.rst +++ b/doc/fluid/api_cn/layers_cn/psroi_pool_cn.rst @@ -5,6 +5,12 @@ psroi_pool .. py:function:: paddle.fluid.layers.psroi_pool(input, rois, output_channels, spatial_scale, pooled_height, pooled_width, name=None) +:alias_main: paddle.nn.functional.psroi_pool +:alias: paddle.nn.functional.psroi_pool,paddle.nn.functional.vision.psroi_pool +:old_api: paddle.fluid.layers.psroi_pool + + + **注意 rois必须为2维LoDTensor,lod_level为1** 该OP执行PSROIPooling运算,是位置敏感的感兴趣区域池化方法(Position sensitive region of interest pooling,也称为PSROIPooling)。输入input是位置敏感的评分图,输入rois是感兴趣区域的位置坐标。PSROIPooling不同于普通ROIPooling的地方在于,输入input特征图的不同通道会跟输出特征图上的位置区域相关联,该方法是在R-FCN模型中首次提出来的,更多详细信息请参阅 https://arxiv.org/abs/1605.06409。 diff --git a/doc/fluid/api_cn/layers_cn/py_func_cn.rst b/doc/fluid/api_cn/layers_cn/py_func_cn.rst index dca98b091f3c13f32607009a8bf0c8e41baf615b..79d11adef7a0cf8abfd256ad8d81a58945ab7d86 100644 --- a/doc/fluid/api_cn/layers_cn/py_func_cn.rst +++ b/doc/fluid/api_cn/layers_cn/py_func_cn.rst @@ -3,10 +3,13 @@ py_func ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None) +:api_attr: 声明式编程模式(静态图) + + + PaddlePaddle Fluid通过py_func在Python端注册OP。py_func的设计原理在于Paddle中的LodTensor与numpy数组可以方便的互相转换,从而可使用Python中的numpy API来自定义一个Python OP。 该自定义的Python OP的前向函数是 ``func``, 反向函数是 ``backward_func`` 。 Paddle将在前向部分调用 ``func`` ,并在反向部分调用 ``backward_func`` (如果 ``backward_func`` 不是None)。 ``x`` 为 ``func`` 的输入,必须为LoDTensor类型; ``out`` 为 ``func`` 的输出, 既可以是LoDTensor类型, 也可以是numpy数组。 diff --git a/doc/fluid/api_cn/layers_cn/py_reader_cn.rst b/doc/fluid/api_cn/layers_cn/py_reader_cn.rst index 602a1f8a254e27b95d6fd95fcdec514791f60af2..2f8f6f631479e9220221535b4ead4d0e753ead35 100644 --- a/doc/fluid/api_cn/layers_cn/py_reader_cn.rst +++ b/doc/fluid/api_cn/layers_cn/py_reader_cn.rst @@ -3,10 +3,13 @@ py_reader ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.py_reader(capacity, shapes, dtypes, lod_levels=None, name=None, use_double_buffer=True) +:api_attr: 声明式编程模式(静态图) + + + 创建一个在Python端提供数据的reader @@ -73,70 +76,69 @@ py_reader .. code-block:: python - import paddle - import paddle.fluid as fluid - import paddle.dataset.mnist as mnist - - def network(reader): - img, label = fluid.layers.read_file(reader) - # 用户自定义网络,此处以softmax回归为例 - predict = fluid.layers.fc(input=img, size=10, act='softmax') - loss = fluid.layers.cross_entropy(input=predict, label=label) - return fluid.layers.mean(loss) - - # 新建 train_main_prog 和 train_startup_prog - train_main_prog = fluid.Program() - train_startup_prog = fluid.Program() - with fluid.program_guard(train_main_prog, train_startup_prog): - # 使用 fluid.unique_name.guard() 实现与test program的参数共享 - with fluid.unique_name.guard(): - train_reader = fluid.layers.py_reader(capacity=64, - shapes=[(-1, 1, 28, 28), (-1, 1)], - dtypes=['float32', 'int64'], + import paddle + import paddle.fluid as fluid + import paddle.dataset.mnist as mnist + + def network(reader): + img, label = fluid.layers.read_file(reader) + # 用户自定义网络,此处以softmax回归为例 + predict = fluid.layers.fc(input=img, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=predict, label=label) + return fluid.layers.mean(loss) + + # 新建 train_main_prog 和 train_startup_prog + train_main_prog = fluid.Program() + train_startup_prog = fluid.Program() + with fluid.program_guard(train_main_prog, train_startup_prog): + # 使用 fluid.unique_name.guard() 实现与test program的参数共享 + with fluid.unique_name.guard(): + train_reader = fluid.layers.py_reader(capacity=64, + shapes=[(-1, 1, 28, 28), (-1, 1)], + dtypes=['float32', 'int64'], name='train_reader') - train_reader.decorate_paddle_reader( - paddle.reader.shuffle(paddle.batch(mnist.train(), - batch_size=5), + train_reader.decorate_paddle_reader( + paddle.reader.shuffle(paddle.batch(mnist.train(), + batch_size=5), buf_size=500)) - train_loss = network(train_reader) # 一些网络定义 - adam = fluid.optimizer.Adam(learning_rate=0.01) - adam.minimize(train_loss) - - # Create test_main_prog and test_startup_prog - test_main_prog = fluid.Program() - test_startup_prog = fluid.Program() - with fluid.program_guard(test_main_prog, test_startup_prog): - # 使用 fluid.unique_name.guard() 实现与train program的参数共享 - with fluid.unique_name.guard(): - test_reader = fluid.layers.py_reader(capacity=32, - shapes=[(-1, 1, 28, 28), (-1, 1)], - dtypes=['float32', 'int64'], - name='test_reader') - test_reader.decorate_paddle_reader(paddle.batch(mnist.test(), 512)) - - test_loss = network(test_reader) - - fluid.Executor(fluid.CUDAPlace(0)).run(train_startup_prog) - fluid.Executor(fluid.CUDAPlace(0)).run(test_startup_prog) - - train_exe = fluid.ParallelExecutor(use_cuda=True, - loss_name=train_loss.name, main_program=train_main_prog) - test_exe = fluid.ParallelExecutor(use_cuda=True, - loss_name=test_loss.name, main_program=test_main_prog) - for epoch_id in range(10): - train_reader.start() - try: - while True: - train_exe.run(fetch_list=[train_loss.name]) - except fluid.core.EOFException: - train_reader.reset() - - test_reader.start() - try: - while True: - test_exe.run(fetch_list=[test_loss.name]) - except fluid.core.EOFException: - test_reader.reset() + train_loss = network(train_reader) # 一些网络定义 + adam = fluid.optimizer.Adam(learning_rate=0.01) + adam.minimize(train_loss) + + # Create test_main_prog and test_startup_prog + test_main_prog = fluid.Program() + test_startup_prog = fluid.Program() + with fluid.program_guard(test_main_prog, test_startup_prog): + # 使用 fluid.unique_name.guard() 实现与train program的参数共享 + with fluid.unique_name.guard(): + test_reader = fluid.layers.py_reader(capacity=32, + shapes=[(-1, 1, 28, 28), (-1, 1)], + dtypes=['float32', 'int64'], + name='test_reader') + test_reader.decorate_paddle_reader(paddle.batch(mnist.test(), 512)) + test_loss = network(test_reader) + + fluid.Executor(fluid.CUDAPlace(0)).run(train_startup_prog) + fluid.Executor(fluid.CUDAPlace(0)).run(test_startup_prog) + + train_exe = fluid.ParallelExecutor(use_cuda=True, + loss_name=train_loss.name, main_program=train_main_prog) + test_exe = fluid.ParallelExecutor(use_cuda=True, + loss_name=test_loss.name, main_program=test_main_prog) + for epoch_id in range(10): + train_reader.start() + try: + while True: + train_exe.run(fetch_list=[train_loss.name]) + except fluid.core.EOFException: + train_reader.reset() + + test_reader.start() + try: + while True: + test_exe.run(fetch_list=[test_loss.name]) + except fluid.core.EOFException: + test_reader.reset() diff --git a/doc/fluid/api_cn/layers_cn/random_crop_cn.rst b/doc/fluid/api_cn/layers_cn/random_crop_cn.rst index cb00c7c38ac0da9a1ad1de816e8fbd82ea34116c..e9a229030384d8a9dd28ca0308654510f2326008 100644 --- a/doc/fluid/api_cn/layers_cn/random_crop_cn.rst +++ b/doc/fluid/api_cn/layers_cn/random_crop_cn.rst @@ -5,6 +5,12 @@ random_crop .. py:function:: paddle.fluid.layers.random_crop(x, shape, seed=None) +:alias_main: paddle.nn.functional.random_crop +:alias: paddle.nn.functional.random_crop,paddle.nn.functional.extension.random_crop +:old_api: paddle.fluid.layers.random_crop + + + 该操作对batch中每个实例进行随机裁剪,即每个实例的裁剪位置不同,裁剪位置由均匀分布随机数生成器决定。所有裁剪后的实例都具有相同的维度,由 ``shape`` 参数决定。 参数: diff --git a/doc/fluid/api_cn/layers_cn/range_cn.rst b/doc/fluid/api_cn/layers_cn/range_cn.rst index d2a56c31a807fc7732d9903c1e5ab7ad1faaf5e1..519f5e76f72b649cb924adc6c00342b7b5c54929 100644 --- a/doc/fluid/api_cn/layers_cn/range_cn.rst +++ b/doc/fluid/api_cn/layers_cn/range_cn.rst @@ -3,30 +3,32 @@ range ------------------------------- -.. py:function:: paddle.fluid.layers.range(start, end, step, dtype) +.. py:function:: paddle.fluid.layers.range(start, end, step, dtype, name=None) -该API根据step均匀分隔给定数值区间[start, end),并返回该分隔结果。 +注意:推荐使用 paddle.arange -参数: - - **start** (float32 | float64 | int32 | int64 | Variable) - 区间起点,且区间包括此值, 当类型是Variable时,是shape为 `[1]` 的1-D Tensor。 - - **end** (float32 | float64 | int32 | int64 | Variable) - 区间终点,通常区间不包括此值。但当step不是整数,且浮点数取整会影响输出的长度时例外。 - - **step** (float32 | float64 | int32 | int64 | Variable) - 均匀分割的步长。 - - **dtype** (str | core.VarDesc.VarType) - 输出Tensor的数据类型,可为 `'float32'`, `'float64'`, `'int32'`, `'int64'` 。 +该OP返回以步长 ``step`` 均匀分隔给定数值区间[``start``, ``end``)的1-D Tensor,数据类型为 ``dtype``。 + +当 ``dtype`` 表示浮点类型时,为了避免浮点计算误差,建议给 ``end`` 加上一个极小值epsilon,使边界可以更加明确。 -返回:均匀分割给定数值区间后得到的1-D Tensor, 数据类型为输入 `dtype` 。 +参数: + - **start** (float|int|Tensor) - 区间起点(且区间包括此值)。当 ``start`` 类型是Tensor时,是形状为[1]且数据类型为int32、int64、float32、float64的Tensor。 + - **end** (float|int|Tensor) - 区间终点(且通常区间不包括此值)。当 ``end`` 类型是Tensor时,是形状为[1]且数据类型为int32、int64、float32、float64的Tensor。 + - **step** (float|int|Tensor) - 均匀分割的步长。当 ``step`` 类型是Tensor时,是形状为[1]且数据类型为int32、int64、float32、float64的Tensor。 + - **dtype** (str|np.dtype|core.VarDesc.VarType) - 输出Tensor的数据类型,支持int32、int64、float32、float64。 + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 -返回类型:Variable +返回: + Tensor: 以步长 ``step`` 均匀分割给定数值区间[``start``, ``end``)后得到的1-D Tensor, 数据类型为 ``dtype`` 。 +抛出异常: + - ``TypeError`` - 如果 ``dtype`` 不是int32、int64、float32、float64。 -**代码示例**: +代码示例: .. code-block:: python import paddle.fluid as fluid data = fluid.layers.range(0, 10, 2, 'int32') - - - - - + # [0, 2, 4, 6, 8] diff --git a/doc/fluid/api_cn/layers_cn/rank_cn.rst b/doc/fluid/api_cn/layers_cn/rank_cn.rst index c6cad3995f7149364267800367501e5bc2ecdaa0..0d3aee591f86e84bc4a73fe40e2c31f434b558fa 100644 --- a/doc/fluid/api_cn/layers_cn/rank_cn.rst +++ b/doc/fluid/api_cn/layers_cn/rank_cn.rst @@ -5,6 +5,12 @@ rank .. py:function:: paddle.fluid.layers.rank(input) +:alias_main: paddle.rank +:alias: paddle.rank,paddle.tensor.rank,paddle.tensor.attribute.rank +:old_api: paddle.fluid.layers.rank + + + 该OP用于计算输入Tensor的维度(秩)。 参数: diff --git a/doc/fluid/api_cn/layers_cn/rank_loss_cn.rst b/doc/fluid/api_cn/layers_cn/rank_loss_cn.rst index ee8531b88befc7ef1d588188d3892fb10d5a54fc..3f6f5b2a83df84fa477006499891176be909b1bf 100644 --- a/doc/fluid/api_cn/layers_cn/rank_loss_cn.rst +++ b/doc/fluid/api_cn/layers_cn/rank_loss_cn.rst @@ -5,6 +5,12 @@ rank_loss .. py:function:: paddle.fluid.layers.rank_loss(label, left, right, name=None) +:alias_main: paddle.nn.functional.rank_loss +:alias: paddle.nn.functional.rank_loss,paddle.nn.functional.loss.rank_loss +:old_api: paddle.fluid.layers.rank_loss + + + 该OP实现了RankNet模型中的排序损失层。RankNet是一种文档对(pairwise)排序模型,训练样本由一对文档(假设用A、B来表示)组成。标签(假设用P来表示)表示A的排名是否高于B。更多详情请参考:`RankNet `_ 排序损失层有三个输入: :math:`o_i` 、 :math:`o_j` 和 :math:`\tilde{P_{ij}}` ,输入分别表示RankNet模型对文档A、B的输出得分和标签P的值;排序损失层的输入是批输入数据(批大小大于等于1);标签P的取值可以为: {0, 1} 或 {0, 0.5, 1} ,其中,0.5表示输入文档对排序相同。输入数据的排序损失 :math:`C_{i,j}` 计算过程如下: diff --git a/doc/fluid/api_cn/layers_cn/read_file_cn.rst b/doc/fluid/api_cn/layers_cn/read_file_cn.rst index 688a52d359c9553ce3d448ea7315980625f4d642..c2ac46e1465712cec6c6c5d7645ce8d4f711cb9f 100644 --- a/doc/fluid/api_cn/layers_cn/read_file_cn.rst +++ b/doc/fluid/api_cn/layers_cn/read_file_cn.rst @@ -3,10 +3,13 @@ read_file ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.read_file(reader) +:api_attr: 声明式编程模式(静态图) + + + 从给定的reader中读取数据 reader是一个Variable,它可以是由函数fluid.layers.py_reader()生成的reader,或者是由函数fluid.layers.double_buffer()生成的装饰Variable。 diff --git a/doc/fluid/api_cn/layers_cn/reciprocal_cn.rst b/doc/fluid/api_cn/layers_cn/reciprocal_cn.rst index fa65ac200f513bc9fbfe088d21ce8b6574feefba..a76a495a5112dc3404510b76bc310ad0b0f78e37 100644 --- a/doc/fluid/api_cn/layers_cn/reciprocal_cn.rst +++ b/doc/fluid/api_cn/layers_cn/reciprocal_cn.rst @@ -5,6 +5,12 @@ reciprocal .. py:function:: paddle.fluid.layers.reciprocal(x, name=None) +:alias_main: paddle.reciprocal +:alias: paddle.reciprocal,paddle.tensor.reciprocal,paddle.tensor.math.reciprocal +:old_api: paddle.fluid.layers.reciprocal + + + reciprocal 对输入Tensor取倒数 @@ -23,17 +29,14 @@ reciprocal 对输入Tensor取倒数 .. code-block:: python - import paddle.fluid as fluid - data = fluid.layers.fill_constant(shape=[2], value=4, dtype='float32') #data=[4.0, 4.0] - result = fluid.layers.reciprocal(data) # result=[0.25, 0.25] - - - - - - - + import paddle + import numpy as np + paddle.enable_imperative() + x_data = np.array([1, 2, 3, 4]).astype(np.float32) + x = paddle.imperative.to_variable(x_data) + res = paddle.%s(x) + print(res.numpy()) diff --git a/doc/fluid/api_cn/layers_cn/reduce_all_cn.rst b/doc/fluid/api_cn/layers_cn/reduce_all_cn.rst index dbadb7a968de0a32021b998d7c5c2756e65941be..88a1ba9e3f9081ec3772e88481a15864c41ee86e 100644 --- a/doc/fluid/api_cn/layers_cn/reduce_all_cn.rst +++ b/doc/fluid/api_cn/layers_cn/reduce_all_cn.rst @@ -5,6 +5,12 @@ reduce_all .. py:function:: paddle.fluid.layers.reduce_all(input, dim=None, keep_dim=False, name=None) +:alias_main: paddle.reduce_all +:alias: paddle.reduce_all,paddle.tensor.reduce_all,paddle.tensor.logic.reduce_all +:old_api: paddle.fluid.layers.reduce_all + + + 该OP是对指定维度上的Tensor元素进行与逻辑(&)计算,并输出相应的计算结果。 参数: diff --git a/doc/fluid/api_cn/layers_cn/reduce_any_cn.rst b/doc/fluid/api_cn/layers_cn/reduce_any_cn.rst index afe04344a036d160161197983e0a7e6683791a65..13a23b8139886d391641a6a9068879d51b2fd1ee 100644 --- a/doc/fluid/api_cn/layers_cn/reduce_any_cn.rst +++ b/doc/fluid/api_cn/layers_cn/reduce_any_cn.rst @@ -5,6 +5,12 @@ reduce_any .. py:function:: paddle.fluid.layers.reduce_any(input, dim=None, keep_dim=False, name=None) +:alias_main: paddle.reduce_any +:alias: paddle.reduce_any,paddle.tensor.reduce_any,paddle.tensor.logic.reduce_any +:old_api: paddle.fluid.layers.reduce_any + + + 该OP是对指定维度上的Tensor元素进行或逻辑(|)计算,并输出相应的计算结果。 参数: diff --git a/doc/fluid/api_cn/layers_cn/reduce_max_cn.rst b/doc/fluid/api_cn/layers_cn/reduce_max_cn.rst index 723012415b23d80233267a2badd5af6c3cd343f9..37cc3db57a4d12ac01e43fce0e3b9cea8e609b17 100644 --- a/doc/fluid/api_cn/layers_cn/reduce_max_cn.rst +++ b/doc/fluid/api_cn/layers_cn/reduce_max_cn.rst @@ -5,6 +5,12 @@ reduce_max .. py:function:: paddle.fluid.layers.reduce_max(input, dim=None, keep_dim=False, name=None) +:alias_main: paddle.reduce_max +:alias: paddle.reduce_max,paddle.tensor.reduce_max,paddle.tensor.math.reduce_max +:old_api: paddle.fluid.layers.reduce_max + + + 该OP是对指定维度上的Tensor元素求最大值运算,并输出相应的计算结果。 参数: diff --git a/doc/fluid/api_cn/layers_cn/reduce_mean_cn.rst b/doc/fluid/api_cn/layers_cn/reduce_mean_cn.rst index b7b732daded03fdd7d0a334d12d3a1edfdbf3d22..c94fed0a80b6a766a57f10ad6c5357428b0c0bd4 100644 --- a/doc/fluid/api_cn/layers_cn/reduce_mean_cn.rst +++ b/doc/fluid/api_cn/layers_cn/reduce_mean_cn.rst @@ -5,6 +5,12 @@ reduce_mean .. py:function:: paddle.fluid.layers.reduce_mean(input, dim=None, keep_dim=False, name=None) +:alias_main: paddle.reduce_mean +:alias: paddle.reduce_mean,paddle.tensor.reduce_mean,paddle.tensor.stat.reduce_mean +:old_api: paddle.fluid.layers.reduce_mean + + + 该OP是对指定维度上的Tensor元素进行平均值算,并输出相应的计算结果。 参数: diff --git a/doc/fluid/api_cn/layers_cn/reduce_min_cn.rst b/doc/fluid/api_cn/layers_cn/reduce_min_cn.rst index 509768608c270d5d80d6a1f9084f0c5d66c124b8..2517c935e483cff4a2c7686556e6583126e29bb4 100644 --- a/doc/fluid/api_cn/layers_cn/reduce_min_cn.rst +++ b/doc/fluid/api_cn/layers_cn/reduce_min_cn.rst @@ -5,6 +5,12 @@ reduce_min .. py:function:: paddle.fluid.layers.reduce_min(input, dim=None, keep_dim=False, name=None) +:alias_main: paddle.reduce_min +:alias: paddle.reduce_min,paddle.tensor.reduce_min,paddle.tensor.math.reduce_min +:old_api: paddle.fluid.layers.reduce_min + + + 该OP是对指定维度上的Tensor元素求最小值运算,并输出相应的计算结果。 参数: diff --git a/doc/fluid/api_cn/layers_cn/reduce_prod_cn.rst b/doc/fluid/api_cn/layers_cn/reduce_prod_cn.rst index b420857b8131d8e4d4aa3f4b5ff7c64bc92d1b1c..c0a11bc9204a431a4fba0df588189b9d604dcf97 100644 --- a/doc/fluid/api_cn/layers_cn/reduce_prod_cn.rst +++ b/doc/fluid/api_cn/layers_cn/reduce_prod_cn.rst @@ -5,11 +5,17 @@ reduce_prod .. py:function:: paddle.fluid.layers.reduce_prod(input, dim=None, keep_dim=False, name=None) +:alias_main: paddle.reduce_prod +:alias: paddle.reduce_prod,paddle.tensor.reduce_prod,paddle.tensor.math.reduce_prod +:old_api: paddle.fluid.layers.reduce_prod + + + 该OP是对指定维度上的Tensor元素进行求乘积运算,并输出相应的计算结果。 参数: - **input** (Variable)- 输入变量为多维Tensor或LoDTensor,支持数据类型为float32,float64,int32,int64。 - - **dim** (list | int ,可选)- 求乘积运算的维度。如果为None,则计算所有元素的乘积并返回包含单个元素的Tensor变量,否则必须在 :math:`[−rank(input),rank(input)]` 范围内。如果 :math:`dim [i] <0` ,则维度将变为 :math:`rank+dim[i]` ,默认值为None。 + - **dim** (int|list|tuple ,可选)- 求乘积运算的维度。如果为None,则计算所有元素的乘积并返回包含单个元素的Tensor变量,否则必须在 :math:`[−rank(input),rank(input)]` 范围内。如果 :math:`dim [i] <0` ,则维度将变为 :math:`rank+dim[i]` ,默认值为None。 - **keep_dim** (bool)- 是否在输出Tensor中保留减小的维度。如 keep_dim 为true,否则结果张量的维度将比输入张量小,默认值为False。 - **name** (str , 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 diff --git a/doc/fluid/api_cn/layers_cn/reduce_sum_cn.rst b/doc/fluid/api_cn/layers_cn/reduce_sum_cn.rst index ddabbeab762ebaa5d5835c3c0f18adb4333b86d6..da06baf68be2370b27a1162e24882a615b878589 100644 --- a/doc/fluid/api_cn/layers_cn/reduce_sum_cn.rst +++ b/doc/fluid/api_cn/layers_cn/reduce_sum_cn.rst @@ -5,6 +5,12 @@ reduce_sum .. py:function:: paddle.fluid.layers.reduce_sum(input, dim=None, keep_dim=False, name=None) +:alias_main: paddle.reduce_sum +:alias: paddle.reduce_sum,paddle.tensor.reduce_sum,paddle.tensor.math.reduce_sum +:old_api: paddle.fluid.layers.reduce_sum + + + 该OP是对指定维度上的Tensor元素进行求和运算,并输出相应的计算结果。 参数: diff --git a/doc/fluid/api_cn/layers_cn/relu6_cn.rst b/doc/fluid/api_cn/layers_cn/relu6_cn.rst index bd1ea8709ae0cd1bbc8070a58521851177641ea0..05c83c7e5e48c6fda17a075dd8bbd13f00c62c02 100644 --- a/doc/fluid/api_cn/layers_cn/relu6_cn.rst +++ b/doc/fluid/api_cn/layers_cn/relu6_cn.rst @@ -5,6 +5,12 @@ relu6 .. py:function:: paddle.fluid.layers.relu6(x, threshold=6.0, name=None) +:alias_main: paddle.nn.functional.relu6 +:alias: paddle.nn.functional.relu6,paddle.nn.functional.activation.relu6 +:old_api: paddle.fluid.layers.relu6 + + + relu6激活函数 .. math:: out=min(max(0, x), threshold) diff --git a/doc/fluid/api_cn/layers_cn/relu_cn.rst b/doc/fluid/api_cn/layers_cn/relu_cn.rst index f7cb337b34b1ddff8ac6541d6d75a7bef3eaaf2b..6df2e05bea36cf96f6990269d6bb77952f15549e 100644 --- a/doc/fluid/api_cn/layers_cn/relu_cn.rst +++ b/doc/fluid/api_cn/layers_cn/relu_cn.rst @@ -5,6 +5,9 @@ relu .. py:function:: paddle.fluid.layers.relu(x, name=None) + + + ReLU(Rectified Linear Unit)激活函数 .. math:: Out=max(0,x) diff --git a/doc/fluid/api_cn/layers_cn/reorder_lod_tensor_by_rank_cn.rst b/doc/fluid/api_cn/layers_cn/reorder_lod_tensor_by_rank_cn.rst index 677b110ce2bebf8cc6c6462de28b72e7c0448eeb..7e67ee31003a74f34f64e9403336732388ca0ed2 100644 --- a/doc/fluid/api_cn/layers_cn/reorder_lod_tensor_by_rank_cn.rst +++ b/doc/fluid/api_cn/layers_cn/reorder_lod_tensor_by_rank_cn.rst @@ -6,6 +6,9 @@ reorder_lod_tensor_by_rank .. py:function:: paddle.fluid.layers.reorder_lod_tensor_by_rank(x, rank_table) + + + 该OP根据 ``rank_table`` 中提供的 ``LoDRankTable`` 类型的顺序信息来实现对 ``X`` 的重新排列。 接口参数 ``X`` 是由多个序列(Sequence)组成的的一个批序列(Batch of Sequences), ``rank_table`` 存储着对batch中序列重新排列的 ``LoDRankTable`` 类型的顺序信息。 @@ -20,13 +23,8 @@ reorder_lod_tensor_by_rank 注意:该OP对 ``X`` 进行的排序所依据的 ``LoDRankTable`` 不一定是在 ``X`` 的基础上得出来的。它可以由其他不同的序列得出,并由该OP依据这个 ``LoDRankTable`` 来对 ``X`` 排序。 参数: - - **x** (Variable) - 待根据提供的 ``rank_table`` 进行排序的LoDTensor - - **rank_table** (Variable) - 提供对 ``x`` 重新排列的 ``LoDRankTable`` 类型的顺序信息,构造方法举例如下: - -.. code-block:: python - - rank_data = fluid.layers.data(name=data_desc[1][0], shape=data_desc[1][1]) - rank_table = fluid.layers.control_flow.lod_rank_table(rank_data) + - **x** (Variable) - 待根据提供的 ``rank_table`` 进行排序的LoDTensor. + - **rank_table** (Variable) - 提供对 ``x`` 重新排列的 ``LoDRankTable`` 类型的顺序信息. 返回: 重新排列后的LoDTensor @@ -37,15 +35,33 @@ reorder_lod_tensor_by_rank .. code-block:: python + + import numpy as np import paddle.fluid as fluid - data_desc = (['input', [9], 0], ['ref', [5], 1]) - data = fluid.layers.data(name=data_desc[0][0], shape=data_desc[0][1]) - rank_data = fluid.layers.data(name=data_desc[1][0], shape=data_desc[1][1]) - table = fluid.layers.control_flow.lod_rank_table(rank_data) + + rank_data = fluid.layers.data(name='rank_data', shape=[5], dtype='float32', lod_level=2) + table = fluid.layers.control_flow.lod_rank_table(rank_data, level=1) + + data = fluid.layers.data(name='data', shape=[9], lod_level=2) new_data = fluid.layers.reorder_lod_tensor_by_rank( x=data, rank_table=table) + place=fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + rank_tensor = fluid.create_lod_tensor(np.random.random([14,5]).astype("float32"), [[4,1], [3, 2, 2, 3, 4]], place) + + data_ndarray = np.random.random([27, 9]).astype("float32") + data_lod = [[1, 2, 2, 4, 4], [2, 2, 4, 2, 2, 2, 1, 1, 2, 2, 4, 2, 1]] + data_tensor = fluid.create_lod_tensor(data_ndarray, data_lod, place) + + out = exe.run(fluid.default_main_program(),feed={'data':data_tensor, 'rank_data':rank_tensor}, fetch_list=[new_data], return_numpy=False) + print(out[0]) + # lod: {{0, 4, 5, 9, 11, 13}{0, 2, 6, 8, 9, 11, 13, 14, 15, 17, 19, 23, 25, 27}} + #shape: [27, 9] + diff --git a/doc/fluid/api_cn/layers_cn/reshape_cn.rst b/doc/fluid/api_cn/layers_cn/reshape_cn.rst index c0c8f256e2c76a2a4ea550dcb884bbc1e3832a1e..e2a892314e8361cc76f568014bc32cb0fbb8124c 100644 --- a/doc/fluid/api_cn/layers_cn/reshape_cn.rst +++ b/doc/fluid/api_cn/layers_cn/reshape_cn.rst @@ -5,6 +5,7 @@ reshape .. py:function:: paddle.fluid.layers.reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None) + 该OP在保持输入 ``x`` 数据不变的情况下,改变 ``x`` 的形状。 目标形状可由 ``shape`` 或 ``actual_shape`` 给出。当两个属性同时被指定时,``actual_shape`` 的优先级高于 ``shape`` ,但此时 ``shape`` 只能是整数列表或元组,且在编译时仍然应该正确地设置 ``shape`` 以保证形状推断。 @@ -25,26 +26,21 @@ reshape 2. 给定一个形状为[2,4,6]的三维张量x,目标形状为[2,3,-1,2],则将x变换为形状为[2,3,4,2]的4-D张量,且x的数据保持不变。在这种情况下,目标形状的一个维度被设置为-1,这个维度的值是从x的元素总数和剩余维度推断出来的。 3. 给定一个形状为[2,4,6]的三维张量x,目标形状为[-1,0,3,2],则将x变换为形状为[2,4,3,2]的4-D张量,且x的数据保持不变。在这种情况下,0对应位置的维度值将从x的对应维数中复制,-1对应位置的维度值由x的元素总数和剩余维度推断出来。 -**注意:参数** ``actual_shape`` **之后将被舍弃,只用参数** ``shape`` **来表示目标形状。** +.. warning:: +参数 ``actual_shape`` 之后将被舍弃,只用参数 ``shape`` 来表示目标形状。 参数: - - **x** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor``,数据类型为 ``float32``,``float64``,``int32``,或 ``int64``。 - - **shape** (list|tuple|Variable)- 数据类型是 ``int32`` 。定义目标形状。目标形状最多只能有一个维度为-1。如果 ``shape`` 的类型是 list 或 tuple, 它的元素可以是整数或者形状为[1]的 ``Tensor`` 或 ``LoDTensor``。如果 ``shape`` 的类型是 ``Variable``,则是1-D的 ``Tensor`` 或 ``LoDTensor``。 - - **actual_shape** (Variable,可选)- 1-D ``Tensor`` 或 ``LoDTensor``,默认值:`None`。如果 ``actual_shape`` 被提供,``actual_shape`` 具有比 ``shape`` 更高的优先级,此时 ``shape`` 只能是整数列表或元组。更新提示:``actual_shape`` 在未来的版本中将被舍弃,并用 ``shape`` 代替。 + - **x** (Tensor)- N-D ``Tensor``,数据类型为 ``float32``,``float64``,``int32``,或 ``int64``。 + - **shape** (list|tuple|Tensor)- 数据类型是 ``int32`` 。定义目标形状。目标形状最多只能有一个维度为-1。如果 ``shape`` 的类型是 list 或 tuple, 它的元素可以是整数或者形状为[1]的 ``Tensor``。如果 ``shape`` 的类型是 ``Tensor``,则是1-D的 ``Tensor``。 + - **actual_shape** (Tensor,可选)- 1-D ``Tensor``,默认值:`None`。如果 ``actual_shape`` 被提供,``actual_shape`` 具有比 ``shape`` 更高的优先级,此时 ``shape`` 只能是整数列表或元组。更新提示:``actual_shape`` 在未来的版本中将被舍弃,并用 ``shape`` 代替。 - **act** (str,可选)- 对形状改变后的输入变量做非线性激活操作,激活函数类型可以参考 :ref:`api_guide_activations` 。默认值: ``None``。 - **inplace** (bool,可选)- 如果 ``inplace`` 为 ``True``,则 ``layers.reshape`` 的输入和输出是同一个变量,否则 ``layers.reshape`` 的输入和输出是不同的变量。默认值:``False``。请注意,如果 ``x`` 是多个OP的输入,则 ``inplace`` 必须为False。 - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值: ``None``。 -返回:多维 ``Tensor`` 或 ``LoDTensor``,数据类型与 ``input`` 相同。如果 ``inplace`` 为 ``False``,则返回一个新的变量,否则将改变输入变量 ``x`` 自身。如果 ``act`` 为 ``None``,则直接返回形状改变后的变量,否则返回经过激活函数后的变量。 - -返回类型:Variable。 +返回: +::::::::: +``Tensor``,改变形状后的 ``Tensor``,数据类型与 ``x`` 相同。如果 ``inplace`` 为 ``False``,则返回一个新的变量,否则将改变输入变量 ``x`` 自身。如果 ``act`` 为 ``None``,则直接返回形状改变后的变量,否则返回经过激活函数后的变量。 -抛出异常: - - :code:`TypeError`:``actual_shape`` 的类型应该是 Variable 或 None。 - - :code:`TypeError`:``starts`` 的类型应该是list、tuple 或 Variable。 - - :code:`ValueError`:``shape`` 中至多有一个元素可以是-1。 - - :code:`ValueError`:``shape`` 中的元素为0时,对应的维度应该小于等于``x``的维度。 - - :code:`ValueError`:``shape`` 中的元素除了-1之外,都应该是非负值。 **代码示例** @@ -53,15 +49,15 @@ reshape import paddle.fluid as fluid # example 1: - # attr shape is a list which doesn't contain tensor Variable. - data_1 = fluid.layers.data( - name='data_1', shape=[2, 4, 6], dtype='float32') + # attr shape is a list which doesn't contain Tensors. + data_1 = fluid.data( + name='data_1', shape=[2, 4, 6], dtype='float32') reshaped_1 = fluid.layers.reshape( - x=data_1, shape=[-1, 0, 3, 2], inplace=True) + x=data_1, shape=[-1, 0, 3, 2], inplace=True) # the shape of reshaped_1 is [2,4,3,2]. # example 2: - # attr shape is a list which contains tensor Variable. + # attr shape is a list which contains Tensors. data_2 = fluid.layers.fill_constant([2,25], "int32", 3) dim = fluid.layers.fill_constant([1], "int32", 5) reshaped_2 = fluid.layers.reshape(data_2, shape=[dim, 10]) @@ -69,7 +65,7 @@ reshape # example 3: data_3 = fluid.data( - name="data_3", shape=[2,4,6], dtype='float32') + name="data_3", shape=[2,4,6], dtype='float32') reshaped_3 = fluid.layers.reshape(x=data_3, shape=[6,8]) # the shape of reshaped_3 is [6,8]. diff --git a/doc/fluid/api_cn/layers_cn/resize_bilinear_cn.rst b/doc/fluid/api_cn/layers_cn/resize_bilinear_cn.rst index 233364fa74c22e1cc60e374178ae6f8637ef4420..d0fd0a3f2b04cbf90fe6d4944929b9c93cbaec5f 100644 --- a/doc/fluid/api_cn/layers_cn/resize_bilinear_cn.rst +++ b/doc/fluid/api_cn/layers_cn/resize_bilinear_cn.rst @@ -5,6 +5,12 @@ resize_bilinear .. py:function:: paddle.fluid.layers.resize_bilinear(input, out_shape=None, scale=None, name=None, actual_shape=None, align_corners=True, align_mode=1, data_format='NCHW') +:alias_main: paddle.nn.functional.resize_bilinear +:alias: paddle.nn.functional.resize_bilinear,paddle.nn.functional.vision.resize_bilinear +:old_api: paddle.fluid.layers.resize_bilinear + + + **注意:** 参数 ``actual_shape`` 将被弃用,请使用 ``out_shape`` 替代。 该OP应用双向性插值法调整输入图片的大小,输出形状按优先级由actual_shape、out_shape和scale指定。 diff --git a/doc/fluid/api_cn/layers_cn/resize_nearest_cn.rst b/doc/fluid/api_cn/layers_cn/resize_nearest_cn.rst index fc5b282119f1ab85e413493c9fa201623f46dffd..2d4ff2abf242b745703200a0fb7966486f8faefc 100644 --- a/doc/fluid/api_cn/layers_cn/resize_nearest_cn.rst +++ b/doc/fluid/api_cn/layers_cn/resize_nearest_cn.rst @@ -5,6 +5,12 @@ resize_nearest .. py:function:: paddle.fluid.layers.resize_nearest(input, out_shape=None, scale=None, name=None, actual_shape=None, align_corners=True, data_format='NCHW') +:alias_main: paddle.nn.functional.resize_nearest +:alias: paddle.nn.functional.resize_nearest,paddle.nn.functional.vision.resize_nearest +:old_api: paddle.fluid.layers.resize_nearest + + + 该OP对输入图片进行大小调整,在高度方向宽度方向进行最邻近插值(nearest neighbor interpolation)操作。 输出形状按优先级顺序依据 ``actual_shape`` , ``out_shape`` 和 ``scale`` 而定。 diff --git a/doc/fluid/api_cn/layers_cn/resize_trilinear_cn.rst b/doc/fluid/api_cn/layers_cn/resize_trilinear_cn.rst index a1425cbddf9e9989963c4e77c15ae40fae3474d4..58c62c6ca849b47d12ff1188b88e3cc3797030d2 100644 --- a/doc/fluid/api_cn/layers_cn/resize_trilinear_cn.rst +++ b/doc/fluid/api_cn/layers_cn/resize_trilinear_cn.rst @@ -5,6 +5,12 @@ resize_trilinear .. py:function:: paddle.fluid.layers.resize_trilinear(input, out_shape=None, scale=None, name=None, actual_shape=None, align_corners=True, align_mode=1, data_format='NCDHW') +:alias_main: paddle.nn.functional.resize_trilinear +:alias: paddle.nn.functional.resize_trilinear,paddle.nn.functional.vision.resize_trilinear +:old_api: paddle.fluid.layers.resize_trilinear + + + **注意:** 参数 ``actual_shape`` 将被弃用,请使用 ``out_shape`` 替代。 该层对输入进行放缩,基于给定的由 ``actual_shape`` , ``out_shape`` , ``scale`` 确定的输出shape,进行三线插值。三线插值是包含三个参数的线性插值方程(D方向,H方向, W方向),在一个3D格子上进行三个方向的线性插值。更多细节,请参考维基百科:https://en.wikipedia.org/wiki/Trilinear_interpolation diff --git a/doc/fluid/api_cn/layers_cn/retinanet_detection_output_cn.rst b/doc/fluid/api_cn/layers_cn/retinanet_detection_output_cn.rst index badd3bc07451b013f7130178ef69b680a5e4763a..426ea26b3a675bf121e2ff8e0c8495e715878f52 100644 --- a/doc/fluid/api_cn/layers_cn/retinanet_detection_output_cn.rst +++ b/doc/fluid/api_cn/layers_cn/retinanet_detection_output_cn.rst @@ -5,6 +5,12 @@ retinanet_detection_output .. py:function:: paddle.fluid.layers.retinanet_detection_output(bboxes, scores, anchors, im_info, score_threshold=0.05, nms_top_k=1000, keep_top_k=100, nms_threshold=0.3, nms_eta=1.0) +:alias_main: paddle.nn.functional.retinanet_detection_output +:alias: paddle.nn.functional.retinanet_detection_output,paddle.nn.functional.vision.retinanet_detection_output +:old_api: paddle.fluid.layers.retinanet_detection_output + + + 在 `RetinaNet `_ 中,有多个 `FPN `_ 层会输出用于分类的预测值和位置回归的预测值,该OP通过执行以下步骤将这些预测值转换成最终的检测结果: 1. 在每个FPN层上,先剔除分类预测值小于score_threshold的anchor,然后按分类预测值从大到小排序,选出排名前nms_top_k的anchor,并将这些anchor与其位置回归的预测值做解码操作得到检测框。 @@ -34,27 +40,27 @@ retinanet_detection_output import paddle.fluid as fluid - bboxes_low = fluid.data(name='bboxes_low', shape=[1, 44, 4], - dtype='float32') - bboxes_high = fluid.data(name='bboxes_high', shape=[1, 11, 4], - dtype='float32') - scores_low = fluid.data(name='scores_low', shape=[1, 44, 10], - dtype='float32') - scores_high = fluid.data(name='scores_high', shape=[1, 11, 10], - dtype='float32') - anchors_low = fluid.data(name='anchors_low', shape=[44, 4], - dtype='float32') - anchors_high = fluid.data(name='anchors_high', shape=[11, 4], - dtype='float32') - im_info = fluid.data(name="im_info", shape=[1, 3], - dtype='float32') + bboxes_low = fluid.data( + name='bboxes_low', shape=[1, 44, 4], dtype='float32') + bboxes_high = fluid.data( + name='bboxes_high', shape=[1, 11, 4], dtype='float32') + scores_low = fluid.data( + name='scores_low', shape=[1, 44, 10], dtype='float32') + scores_high = fluid.data( + name='scores_high', shape=[1, 11, 10], dtype='float32') + anchors_low = fluid.data( + name='anchors_low', shape=[44, 4], dtype='float32') + anchors_high = fluid.data( + name='anchors_high', shape=[11, 4], dtype='float32') + im_info = fluid.data( + name="im_info", shape=[1, 3], dtype='float32') nmsed_outs = fluid.layers.retinanet_detection_output( - bboxes=[bboxes_low, bboxes_high], - scores=[scores_low, scores_high], - anchors=[anchors_low, anchors_high], - im_info=im_info, - score_threshold=0.05, - nms_top_k=1000, - keep_top_k=100, - nms_threshold=0.45, - nms_eta=1.) + bboxes=[bboxes_low, bboxes_high], + scores=[scores_low, scores_high], + anchors=[anchors_low, anchors_high], + im_info=im_info, + score_threshold=0.05, + nms_top_k=1000, + keep_top_k=100, + nms_threshold=0.45, + nms_eta=1.0) diff --git a/doc/fluid/api_cn/layers_cn/retinanet_target_assign_cn.rst b/doc/fluid/api_cn/layers_cn/retinanet_target_assign_cn.rst index caa8340ae354cfc0d054d4899e4cb322884dbb1e..cd37a297b2b303429ee17ad9f2f2881245041ebe 100644 --- a/doc/fluid/api_cn/layers_cn/retinanet_target_assign_cn.rst +++ b/doc/fluid/api_cn/layers_cn/retinanet_target_assign_cn.rst @@ -5,6 +5,12 @@ retinanet_target_assign .. py:function:: paddle.fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, gt_labels, is_crowd, im_info, num_classes=1, positive_overlap=0.5, negative_overlap=0.4) +:alias_main: paddle.nn.functional.retinanet_target_assign +:alias: paddle.nn.functional.retinanet_target_assign,paddle.nn.functional.vision.retinanet_target_assign +:old_api: paddle.fluid.layers.retinanet_target_assign + + + 该OP是从输入anchor中找出训练检测模型 `RetinaNet `_ 所需的正负样本,并为每个正负样本分配用于分类的目标值和位置回归的目标值,同时从全部anchor的类别预测值cls_logits、位置预测值bbox_pred中取出属于各正负样本的部分。 正负样本的查找准则如下: @@ -50,7 +56,6 @@ retinanet_target_assign .. code-block:: python import paddle.fluid as fluid - import numpy as np bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4], dtype='float32') @@ -63,11 +68,11 @@ retinanet_target_assign gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4], dtype='float32') gt_labels = fluid.data(name='gt_labels', shape=[10, 1], - dtype='float32') + dtype='int32') is_crowd = fluid.data(name='is_crowd', shape=[1], - dtype='float32') + dtype='int32') im_info = fluid.data(name='im_info', shape=[1, 3], dtype='float32') - score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = + score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \ fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10) diff --git a/doc/fluid/api_cn/layers_cn/reverse_cn.rst b/doc/fluid/api_cn/layers_cn/reverse_cn.rst index 0ea877075e75037b728d908a64e0a06de1cd1804..a4a552705b1dfc6bb389434a3f0cc771232f91c3 100644 --- a/doc/fluid/api_cn/layers_cn/reverse_cn.rst +++ b/doc/fluid/api_cn/layers_cn/reverse_cn.rst @@ -5,14 +5,41 @@ reverse .. py:function:: paddle.fluid.layers.reverse(x,axis) +:alias_main: paddle.reverse +:alias: paddle.reverse,paddle.tensor.reverse,paddle.tensor.manipulation.reverse +:old_api: paddle.fluid.layers.reverse + + + **reverse** 该OP对输入Tensor ``x`` 在指定轴 ``axis`` 上进行数据的逆序操作。 -参数: - - **x** (Variable) - 多维Tensor,类型必须为int32,int64,float32,float64。 - - **axis** (int|tuple|list) - 指定逆序运算的轴,取值范围是[-R, R),R是输入 ``x`` 的Rank, ``axis`` 为负时与 ``axis`` +R 等价。如果 ``axis`` 是一个元组或列表,则在``axis`` 每个元素值所指定的轴上进行逆序运算。 +:: + + 示例1: + 输入是 LoDTensor 类型: + x = [[0, 1, 2], [3, 4, 5], [6, 7, 8]] + axis = [0, 1] + + 输出: + output = [[8, 7, 6], [5, 4, 3], [2, 1, 0]] + 示例2: + 输入是 LoDTensorArray 类型: + x = {[[0, 1], [2, 3]], + [[4, 5, 6]], + [[7], [8], [9]]} + axis = 0 + + 输出: + output = {[[7], [8], [9]], + [[4, 5, 6]], + [[0, 1], [2, 3]]} + +参数: + - **x** (Variable) - 输入为Tensor或LoDTensorArray,数据类型支持bool,int8,int32,int64,float32和float64。若输入是LoDTensorArray类型,则返回一个逆序的LoDTensorArray,其内部Tensor元素的次序保持不变。 + - **axis** (int|tuple|list) - 指定逆序运算的轴,取值范围是[-R, R),R是输入 ``x`` 的Rank, ``axis`` 为负时与 ``axis`` +R 等价。如果 ``axis`` 是一个元组或列表,则在 ``axis`` 每个元素值所指定的轴上进行逆序运算。如果输入是LoDTensorArray类型,axis须是值为0的int,或shape为[1]的list ``[0]`` 、元组 ``(0,)`` 。 返回:逆序后的Tensor,形状、数据类型和 ``x`` 一致。 返回类型:Variable @@ -26,3 +53,13 @@ reverse data = fluid.layers.assign(np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype='float32')) # [[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]] result1 = fluid.layers.reverse(data, 0) # [[6., 7., 8.], [3., 4., 5.], [0., 1., 2.]] result2 = fluid.layers.reverse(data, [0, 1]) # [[8., 7., 6.], [5., 4., 3.], [2., 1., 0.]] + + # 输入为LoDTensorArray时 + data1 = fluid.layers.assign(np.array([[0, 1, 2]], dtype='float32')) + data2 = fluid.layers.assign(np.array([[3, 4, 5]], dtype='float32')) + tensor_array = fluid.layers.create_array(dtype='float32') + i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) + fluid.layers.array_write(data1, i, tensor_array) + fluid.layers.array_write(data2, i+1, tensor_array) + + reversed_tensor_array = fluid.layers.reverse(tensor_array, 0) # {[[3, 4, 5]], [[0, 1, 2]]} diff --git a/doc/fluid/api_cn/layers_cn/rnn_cn.rst b/doc/fluid/api_cn/layers_cn/rnn_cn.rst index bd948e12e34a336ebbf4fc4c56ee0a6e4eb04e95..75faee0a3e3975cc1403cbdec72e187fed7e36ae 100644 --- a/doc/fluid/api_cn/layers_cn/rnn_cn.rst +++ b/doc/fluid/api_cn/layers_cn/rnn_cn.rst @@ -4,9 +4,12 @@ rnn ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:method:: paddle.fluid.layers.rnn(cell, inputs, initial_states=None, sequence_length=None, time_major=False, is_reverse=False, **kwargs) + +:api_attr: 声明式编程模式(静态图) + + rnn创建一个由RNNCell :code:`cell` 指定的递归神经网络,该神经网络重复执行 :code:`cell.call()` 直至达到 :code:`inputs` 的最大长度。 diff --git a/doc/fluid/api_cn/layers_cn/roi_align_cn.rst b/doc/fluid/api_cn/layers_cn/roi_align_cn.rst index bc9d09f185d14af07674c17de58303b486b0745a..c5c72638bb5979df1767cf4f7eb765060e245bea 100644 --- a/doc/fluid/api_cn/layers_cn/roi_align_cn.rst +++ b/doc/fluid/api_cn/layers_cn/roi_align_cn.rst @@ -5,6 +5,12 @@ roi_align .. py:function:: paddle.fluid.layers.roi_align(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0, sampling_ratio=-1, name=None) +:alias_main: paddle.nn.functional.roi_align +:alias: paddle.nn.functional.roi_align,paddle.nn.functional.vision.roi_align +:old_api: paddle.fluid.layers.roi_align + + + **实现RoIAlign操作。** Region of Interests align(直译:有意义、有价值选区对齐) 用于实现双线性插值,它可以将不均匀大小的输入 diff --git a/doc/fluid/api_cn/layers_cn/roi_perspective_transform_cn.rst b/doc/fluid/api_cn/layers_cn/roi_perspective_transform_cn.rst index 71d75db86e8a2429b7b93ec543c3f6b60d36f0b6..1c97a18e0b7897d2b0299bc33d70f94e508531d3 100644 --- a/doc/fluid/api_cn/layers_cn/roi_perspective_transform_cn.rst +++ b/doc/fluid/api_cn/layers_cn/roi_perspective_transform_cn.rst @@ -5,6 +5,12 @@ roi_perspective_transform .. py:function:: paddle.fluid.layers.roi_perspective_transform(input, rois, transformed_height, transformed_width, spatial_scale=1.0) +:alias_main: paddle.nn.functional.roi_perspective_transform +:alias: paddle.nn.functional.roi_perspective_transform,paddle.nn.functional.vision.roi_perspective_transform +:old_api: paddle.fluid.layers.roi_perspective_transform + + + 该OP对RoI区域做透视变换,将不规则的RoI区域变成固定大小的矩形区域,透视变换是线性代数里面的一种基础变换。 参数: diff --git a/doc/fluid/api_cn/layers_cn/roi_pool_cn.rst b/doc/fluid/api_cn/layers_cn/roi_pool_cn.rst index 16ff9764a7be3f9fea1c3f74434a44f44ac5ac87..616c119d22d5d91e30903d82879975b0733ace87 100644 --- a/doc/fluid/api_cn/layers_cn/roi_pool_cn.rst +++ b/doc/fluid/api_cn/layers_cn/roi_pool_cn.rst @@ -5,6 +5,12 @@ roi_pool .. py:function:: paddle.fluid.layers.roi_pool(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0) +:alias_main: paddle.nn.functional.roi_pool +:alias: paddle.nn.functional.roi_pool,paddle.nn.functional.vision.roi_pool +:old_api: paddle.fluid.layers.roi_pool + + + 该OP实现了roi池化操作,对非均匀大小的输入执行最大池化,以获得固定大小的特征映射(例如7*7)。 diff --git a/doc/fluid/api_cn/layers_cn/round_cn.rst b/doc/fluid/api_cn/layers_cn/round_cn.rst index 51ebb254290985796f7a4478edd32f6944e341ee..7de4214b7e9a993490a07b76b28b62142f1d72d2 100644 --- a/doc/fluid/api_cn/layers_cn/round_cn.rst +++ b/doc/fluid/api_cn/layers_cn/round_cn.rst @@ -5,6 +5,12 @@ round .. py:function:: paddle.fluid.layers.round(x, name=None) +:alias_main: paddle.round +:alias: paddle.round,paddle.tensor.round,paddle.tensor.math.round +:old_api: paddle.fluid.layers.round + + + 该OP将输入中的数值四舍五入到最接近的整数数值。 diff --git a/doc/fluid/api_cn/layers_cn/row_conv_cn.rst b/doc/fluid/api_cn/layers_cn/row_conv_cn.rst index 1bca0e610285ad94d67285599a6a4d5c58fc4782..ab16bdbfbb86e42131b331a373b8e66e2a1099c1 100644 --- a/doc/fluid/api_cn/layers_cn/row_conv_cn.rst +++ b/doc/fluid/api_cn/layers_cn/row_conv_cn.rst @@ -3,10 +3,13 @@ row_conv ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.row_conv(input, future_context_size, param_attr=None, act=None) +:api_attr: 声明式编程模式(静态图) + + + 该接口为行卷积(Row-convolution operator)或称之为超前卷积(lookahead convolution),最早介绍于DeepSpeech2论文中,论文链接: ``_ diff --git a/doc/fluid/api_cn/layers_cn/rpn_target_assign_cn.rst b/doc/fluid/api_cn/layers_cn/rpn_target_assign_cn.rst index df80d16cf277ca7d72dce855a1c6fc190ed4a450..3edc0ee06ede956fc25791c2c833ea6b473c2c9e 100644 --- a/doc/fluid/api_cn/layers_cn/rpn_target_assign_cn.rst +++ b/doc/fluid/api_cn/layers_cn/rpn_target_assign_cn.rst @@ -5,6 +5,12 @@ rpn_target_assign .. py:function:: paddle.fluid.layers.rpn_target_assign(bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info, rpn_batch_size_per_im=256, rpn_straddle_thresh=0.0, rpn_fg_fraction=0.5, rpn_positive_overlap=0.7, rpn_negative_overlap=0.3, use_random=True) +:alias_main: paddle.nn.functional.rpn_target_assign +:alias: paddle.nn.functional.rpn_target_assign,paddle.nn.functional.extension.rpn_target_assign +:old_api: paddle.fluid.layers.rpn_target_assign + + + 该OP用于为anchors分配分类标签和回归标签,以便用这些标签对RPN进行训练。 该OP将anchors分为两种类别,正和负。根据Faster-RCNN的paper,正类别anchor包括以下两种anchor: diff --git a/doc/fluid/api_cn/layers_cn/rsqrt_cn.rst b/doc/fluid/api_cn/layers_cn/rsqrt_cn.rst index 6cd896637ba40b17b89dbd0129a345e78e7a4979..0d4a83041be454cdcd2f4e2e6d850d861d3bbbeb 100644 --- a/doc/fluid/api_cn/layers_cn/rsqrt_cn.rst +++ b/doc/fluid/api_cn/layers_cn/rsqrt_cn.rst @@ -5,6 +5,12 @@ rsqrt .. py:function:: paddle.fluid.layers.rsqrt(x, name=None) +:alias_main: paddle.rsqrt +:alias: paddle.rsqrt,paddle.tensor.rsqrt,paddle.tensor.math.rsqrt +:old_api: paddle.fluid.layers.rsqrt + + + 该OP为rsqrt激活函数。 注:输入x应确保为非 **0** 值,否则程序会抛异常退出。 diff --git a/doc/fluid/api_cn/layers_cn/sampled_softmax_with_cross_entropy_cn.rst b/doc/fluid/api_cn/layers_cn/sampled_softmax_with_cross_entropy_cn.rst index 3097aaaab7eafeaa38ceaa49d718363a7fe50e4a..82ca1725079152ea0b411e64ccde808984d8a5a9 100644 --- a/doc/fluid/api_cn/layers_cn/sampled_softmax_with_cross_entropy_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sampled_softmax_with_cross_entropy_cn.rst @@ -5,6 +5,12 @@ sampled_softmax_with_cross_entropy .. py:function:: paddle.fluid.layers.sampled_softmax_with_cross_entropy(logits, label, num_samples, num_true=1, remove_accidental_hits=True, use_customized_samples=False, customized_samples=None, customized_probabilities=None, seed=0) +:alias_main: paddle.nn.functional.sampled_softmax_with_cross_entropy +:alias: paddle.nn.functional.sampled_softmax_with_cross_entropy,paddle.nn.functional.loss.sampled_softmax_with_cross_entropy +:old_api: paddle.fluid.layers.sampled_softmax_with_cross_entropy + + + **Sampled Softmax With Cross Entropy Operator** 对于较大的输出类,采样的交叉熵损失Softmax被广泛地用作输出层。该运算符为所有示例采样若干个样本,并计算每行采样张量的SoftMax标准化值,然后计算交叉熵损失。 @@ -37,7 +43,7 @@ sampled_softmax_with_cross_entropy import paddle.fluid as fluid input = fluid.layers.data(name='data', shape=[256], dtype='float32') - label = fluid.layers.data(name='label', shape=[5], dtype='int64') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') fc = fluid.layers.fc(input=input, size=100) out = fluid.layers.sampled_softmax_with_cross_entropy( logits=fc, label=label, num_samples=25) diff --git a/doc/fluid/api_cn/layers_cn/sampling_id_cn.rst b/doc/fluid/api_cn/layers_cn/sampling_id_cn.rst index 637ba97be936623f912f8e1cb3d994bb7198ea01..098bba3e2e75a62a28ea4bd9c040a1960846f858 100644 --- a/doc/fluid/api_cn/layers_cn/sampling_id_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sampling_id_cn.rst @@ -5,6 +5,9 @@ sampling_id .. py:function:: paddle.fluid.layers.sampling_id(x, min=0.0, max=1.0, seed=0, dtype='float32') + + + 该OP从输入的多项分布中进行采样。 参数: diff --git a/doc/fluid/api_cn/layers_cn/scale_cn.rst b/doc/fluid/api_cn/layers_cn/scale_cn.rst index 26b1a42fb4d6ff8465a46e9c5e50a730539ecb6e..6623f9e451b594e71b28235a54dbe858d98ff9c9 100644 --- a/doc/fluid/api_cn/layers_cn/scale_cn.rst +++ b/doc/fluid/api_cn/layers_cn/scale_cn.rst @@ -5,6 +5,12 @@ scale .. py:function:: paddle.fluid.layers.scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None) +:alias_main: paddle.scale +:alias: paddle.scale,paddle.tensor.scale,paddle.tensor.math.scale +:old_api: paddle.fluid.layers.scale + + + 缩放算子。 对输入Tensor进行缩放和偏置,其公式如下: @@ -56,7 +62,7 @@ scale import numpy as np inputs = fluid.layers.data(name="x", shape=[2, 3], dtype='float32') - scale = fluid.layers.data(name="scale", shape=[1], dtype='float32' + scale = fluid.layers.data(name="scale", shape=[1], dtype='float32', append_batch_size=False) output = fluid.layers.scale(inputs, scale = scale, bias = 1.0) diff --git a/doc/fluid/api_cn/layers_cn/scatter_cn.rst b/doc/fluid/api_cn/layers_cn/scatter_cn.rst index 6792679400ea02f7b6845d6e2678be0a2570cbef..38824d7fbdd7d3aa250f24e4b8185b3399f53b5b 100644 --- a/doc/fluid/api_cn/layers_cn/scatter_cn.rst +++ b/doc/fluid/api_cn/layers_cn/scatter_cn.rst @@ -5,6 +5,12 @@ scatter .. py:function:: paddle.fluid.layers.scatter(input, index, updates, name=None, overwrite=True) +:alias_main: paddle.scatter +:alias: paddle.scatter,paddle.tensor.scatter,paddle.tensor.manipulation.scatter +:old_api: paddle.fluid.layers.scatter + + + 该OP根据index中的索引值将updates数据更新到input中。 .. code-block:: python diff --git a/doc/fluid/api_cn/layers_cn/scatter_nd_add_cn.rst b/doc/fluid/api_cn/layers_cn/scatter_nd_add_cn.rst index 3a703fb7a4c04f6dabed1257fa1207472ee4c5b6..95b26e03a214a8cd04a11ef8b6803c246d9cfcb8 100644 --- a/doc/fluid/api_cn/layers_cn/scatter_nd_add_cn.rst +++ b/doc/fluid/api_cn/layers_cn/scatter_nd_add_cn.rst @@ -5,6 +5,12 @@ scatter_nd_add .. py:function:: paddle.fluid.layers.scatter_nd_add(ref, index, updates, name=None) +:alias_main: paddle.scatter_nd_add +:alias: paddle.scatter_nd_add,paddle.tensor.scatter_nd_add,paddle.tensor.manipulation.scatter_nd_add +:old_api: paddle.fluid.layers.scatter_nd_add + + + 该OP通过对Variable中的单个值或切片应用稀疏加法,从而得到输出的Variable。 :code:`ref` 是维度为 :code:`R` 的张量。 :code:`index` 是维度为 :code:`K` 的张量。因此, :code:`index` 的形状是 :math:`[i_0, i_1, ..., i_{K-2}, Q]` ,其中 :math:`Q \leq R` 。:code:`updates` 是一个维度为 :math:`K - 1 + R - Q` 的张量,它的形状是 :math:`index.shape[:-1] + ref.shape[index.shape[-1]:]` 。 diff --git a/doc/fluid/api_cn/layers_cn/scatter_nd_cn.rst b/doc/fluid/api_cn/layers_cn/scatter_nd_cn.rst index b0e660b03319f087d7995c9699d117a578c1555e..60a0b84a5d774638cf99589cdc78fb42a0cd7d22 100644 --- a/doc/fluid/api_cn/layers_cn/scatter_nd_cn.rst +++ b/doc/fluid/api_cn/layers_cn/scatter_nd_cn.rst @@ -5,6 +5,12 @@ scatter_nd .. py:function:: paddle.fluid.layers.scatter_nd(index, updates, shape, name=None) +:alias_main: paddle.scatter_nd +:alias: paddle.scatter_nd,paddle.tensor.scatter_nd,paddle.tensor.manipulation.scatter_nd +:old_api: paddle.fluid.layers.scatter_nd + + + 该OP根据 :code:`index` ,将 :code:`updates` 添加到一个新的张量中,从而得到输出的Variable。这个操作与 :code:`scatter_nd_add` 类似,除了形状为 :code:`shape` 的张量是通过零初始化的。相应地, :code:`scatter_nd(index, updates, shape)` 等价于 :code:`scatter_nd_add(fluid.layers.zeros(shape, updates.dtype), index, updates)` 。如果 :code:`index` 有重复元素,则将累积相应的更新,因此,由于数值近似问题,索引中重复元素的顺序不同可能会导致不同的输出结果。具体的计算方法可以参见 :code:`scatter_nd_add` 。该OP是 :code:`gather_nd` 的反函数。 参数: diff --git a/doc/fluid/api_cn/layers_cn/selu_cn.rst b/doc/fluid/api_cn/layers_cn/selu_cn.rst index e76935f82b4a2f7edf075a913b279fb6da906cc2..4aee591bcc8dd87d0df0e9f0ef8faed224d5c5f1 100644 --- a/doc/fluid/api_cn/layers_cn/selu_cn.rst +++ b/doc/fluid/api_cn/layers_cn/selu_cn.rst @@ -5,6 +5,12 @@ selu .. py:function:: paddle.fluid.layers.selu(x, scale=None, alpha=None, name=None) +:alias_main: paddle.nn.functional.selu +:alias: paddle.nn.functional.selu,paddle.nn.functional.activation.selu +:old_api: paddle.fluid.layers.selu + + + SeLU激活函数,其公式如下: .. math:: diff --git a/doc/fluid/api_cn/layers_cn/sequence_concat_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_concat_cn.rst index cac6a04da17c4639ceaddc7c7d0bf0086895a6ad..8bb16a4524400184880e1ee3cd36e570d048901d 100644 --- a/doc/fluid/api_cn/layers_cn/sequence_concat_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sequence_concat_cn.rst @@ -3,10 +3,13 @@ sequence_concat ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.sequence_concat(input, name=None) +:api_attr: 声明式编程模式(静态图) + + + **注意:该OP的输入只能是LoDTensor,如果您需要处理的输入是Tensor类型,请使用concat函数(fluid.layers.** :ref:`cn_api_fluid_layers_concat` **)。** **该OP仅支持LoDTensor** ,通过LoDTensor的LoD信息将输入的多个LoDTensor进行连接(concat),输出连接后的LoDTensor。 diff --git a/doc/fluid/api_cn/layers_cn/sequence_conv_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_conv_cn.rst index 1730822f148df0226e1c718f89e5be8726e5429c..9bf232beca7000c5a42c552814aa5ab70179a559 100644 --- a/doc/fluid/api_cn/layers_cn/sequence_conv_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sequence_conv_cn.rst @@ -3,10 +3,13 @@ sequence_conv ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.sequence_conv(input, num_filters, filter_size=3, filter_stride=1, padding=True, padding_start=None, bias_attr=None, param_attr=None, act=None, name=None) +:api_attr: 声明式编程模式(静态图) + + + **注意:该OP的输入只能是LoDTensor,如果您需要处理的输入是Tensor类型,请使用conv2d函数(fluid.layers.** :ref:`cn_api_fluid_layers_conv2d` **)。** 该OP在给定的卷积参数下(如卷积核数目、卷积核大小等),对输入的变长序列(sequence)LoDTensor进行卷积操作。默认情况下,该OP会自适应地在每个输入序列的两端等长地填充全0数据,以确保卷积后的序列输出长度和输入长度一致。支持通过配置 ``padding_start`` 参数来指定序列填充的行为。 diff --git a/doc/fluid/api_cn/layers_cn/sequence_enumerate_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_enumerate_cn.rst index 716220f75f202b18151ce1b8916486c0d1ad4b74..5ac0470fa3a199296d246361c7e4c5528a7e37bc 100644 --- a/doc/fluid/api_cn/layers_cn/sequence_enumerate_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sequence_enumerate_cn.rst @@ -3,10 +3,13 @@ sequence_enumerate ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.sequence_enumerate(input, win_size, pad_value=0, name=None) +:api_attr: 声明式编程模式(静态图) + + + 枚举形状为 ``[d_1, 1]`` 的输入序列所有长度为 ``win_size`` 的子序列,生成一个形状为 ``[d_1, win_size]`` 的新序列,需要时以 ``pad_value`` 填充。 注意,该OP的输入 ``input`` 只能是LodTensor。 diff --git a/doc/fluid/api_cn/layers_cn/sequence_expand_as_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_expand_as_cn.rst index a68eee9f3435179983775431c759d0038bd75bac..c76d64fe47934ec7a52a125b07a14ce2e1fd1442 100644 --- a/doc/fluid/api_cn/layers_cn/sequence_expand_as_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sequence_expand_as_cn.rst @@ -3,10 +3,13 @@ sequence_expand_as ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.sequence_expand_as(x, y, name=None) +:api_attr: 声明式编程模式(静态图) + + + Sequence Expand As Layer,该OP根据输入 ``y`` 的第0级lod对输入 ``x`` 进行扩展。当前实现要求 ``y`` 的lod层数(level)必须为1,且 ``x`` 的第一维必须和 ``y`` 的第0层lod大小相同,所以扩展后的LodTensor具有和 ``y`` 相同的lod。扩展结果与输入 ``x`` 的lod无关,所以无需考虑 ``x`` 的lod。 注意,该OP的输入 ``x`` 可以是Tensor或LoDTensor, ``y`` 只能是LodTensor。 diff --git a/doc/fluid/api_cn/layers_cn/sequence_expand_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_expand_cn.rst index 414719d8d2de85a688326dee7a90ac573fe673c6..81bbbe60230a1c14f09e8c066ba957bac8c054b9 100644 --- a/doc/fluid/api_cn/layers_cn/sequence_expand_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sequence_expand_cn.rst @@ -3,10 +3,13 @@ sequence_expand ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.sequence_expand(x, y, ref_level=-1, name=None) +:api_attr: 声明式编程模式(静态图) + + + 序列扩张层(Sequence Expand Layer),根据输入 ``y`` 的第 ``ref_level`` 层lod对输入 ``x`` 进行扩展。 ``x`` 的lod level最多为1,若 ``x`` 的lod level为1,则 ``x`` 的lod大小必须与 ``y`` 的第 ``ref_level`` 层lod大小相等;若 ``x`` 的lod level为0,则 ``x`` 的第一维大小必须与 ``y`` 第 ``ref_level`` 层大小相等。 ``x`` 的秩最少为2,当 ``x`` 的秩大于2时,将被当作是一个二维张量处理。 注意,该OP的输入 ``x`` 可以是Tensor或LodTensor, ``y`` 只能是LodTensor。 diff --git a/doc/fluid/api_cn/layers_cn/sequence_first_step_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_first_step_cn.rst index 5eabcb568b23c2195f7e9113ed0eb6b133febfb9..bdc2afbba3f1ffe770b9dfc5364b248b6d44d6da 100644 --- a/doc/fluid/api_cn/layers_cn/sequence_first_step_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sequence_first_step_cn.rst @@ -3,10 +3,13 @@ sequence_first_step ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.sequence_first_step(input) +:api_attr: 声明式编程模式(静态图) + + + 该OP **仅支持LoDTensor类型的输入** ,将对输入的LoDTensor,在最后一层lod_level上,选取其每个序列(sequence)的第一个时间步(time_step)的特征向量作为池化后的输出向量。 :: diff --git a/doc/fluid/api_cn/layers_cn/sequence_last_step_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_last_step_cn.rst index 2e0fcffa6435122250fc226caf9ea26699d4ad36..c2f4604c51ef494f4e374ba0efb88d73a1e1b778 100644 --- a/doc/fluid/api_cn/layers_cn/sequence_last_step_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sequence_last_step_cn.rst @@ -3,10 +3,13 @@ sequence_last_step ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.sequence_last_step(input) +:api_attr: 声明式编程模式(静态图) + + + 该OP **仅支持LoDTensor类型的输入** ,将对输入的LoDTensor,在最后一层lod_level上,选取其每个序列(sequence)的最后一个时间步(time-step)的特征向量作为池化后的输出向量。 :: diff --git a/doc/fluid/api_cn/layers_cn/sequence_mask_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_mask_cn.rst index e04c21593d0f129ae21908ee5f848e7949bf909b..ac7fafb2faa1728201dc7452da644c8449a6f728 100644 --- a/doc/fluid/api_cn/layers_cn/sequence_mask_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sequence_mask_cn.rst @@ -5,6 +5,9 @@ sequence_mask .. py:function:: paddle.fluid.layers.sequence_mask(x, maxlen=None, dtype='int64', name=None) + + + 该层根据输入 ``x`` 和 ``maxlen`` 输出一个掩码,数据类型为 ``dtype`` 。 假设 x 是一个形状为 ``[d_1, d_2,…, d_n]`` 的张量, 则输出 y 是一个形状为 ``[d_1, d_2,… ,d_n, maxlen]`` 的掩码,其中: diff --git a/doc/fluid/api_cn/layers_cn/sequence_pad_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_pad_cn.rst index 5f509c960ecc5e5930fe77b7f5e5f1cc3be09801..12ddf6eb2ecd59afe5700187479a636d33cd7fe7 100644 --- a/doc/fluid/api_cn/layers_cn/sequence_pad_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sequence_pad_cn.rst @@ -3,10 +3,13 @@ sequence_pad ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.sequence_pad(x,pad_value,maxlen=None,name=None) +:api_attr: 声明式编程模式(静态图) + + + 序列填充操作符(Sequence Pad Operator),该OP将同一batch中的序列填充到一个一致的长度(由 ``maxlen`` 指定)。填充的新元素的值具体由输入 ``pad_value`` 指定,并会添加到每一个序列的末尾,使得他们最终的长度保持一致。最后返回一个Python tuple ``(Out, Length)`` ,其中LodTensor ``Out`` 为填充后的序列,LodTensor ``Length`` 为填充前的原序列长度信息。 注意,该OP的输入 ``x`` 只能是LodTensor。 diff --git a/doc/fluid/api_cn/layers_cn/sequence_pool_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_pool_cn.rst index e5a1237838851a6e3c0688e9e02d934ab00361ca..480bc75c92f9d5412861391bbcb5ce94361a1701 100644 --- a/doc/fluid/api_cn/layers_cn/sequence_pool_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sequence_pool_cn.rst @@ -3,10 +3,13 @@ sequence_pool ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.sequence_pool(input, pool_type, is_test=False, pad_value=0.0) +:api_attr: 声明式编程模式(静态图) + + + **注意:该OP的输入只能是LoDTensor,如果您需要处理的输入是Tensor类型,请使用pool2d函数(fluid.layers.** :ref:`cn_api_fluid_layers_pool2d` **)。** 该OP **仅支持LoDTensor类型的输入** ,将对输入的LoDTensor进行指定方式的池化(pooling)操作。通过指定pool_type参数,将输入的每个序列(sequence)在最后一层lod_level上或时间步(time-step)上对特征进行诸如sum、average、sqrt等池化操作。 diff --git a/doc/fluid/api_cn/layers_cn/sequence_reshape_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_reshape_cn.rst index 34f80bef63c67113261560df9c8f7e780cd684c9..78385efd975cc5c88768fe787c552b782688e6cb 100644 --- a/doc/fluid/api_cn/layers_cn/sequence_reshape_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sequence_reshape_cn.rst @@ -3,10 +3,13 @@ sequence_reshape ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.sequence_reshape(input, new_dim) +:api_attr: 声明式编程模式(静态图) + + + **注意:该OP的输入只能是LoDTensor,如果您需要处理的输入是Tensor类型,请使用reshape函数(fluid.layers.** :ref:`cn_api_fluid_layers_reshape` **)。** **该OP仅支持LoDTensor** ,在指定 ``new_dim`` 参数下,通过序列原始长度、和原始shape计算出新的shape,以输出包含新维度(new_dim)下的LoDTensor。目前仅支持1-level LoDTensor,请确保(原长度*原维数)可以除以新的维数,且每个序列没有余数。 diff --git a/doc/fluid/api_cn/layers_cn/sequence_reverse_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_reverse_cn.rst index 675ef36d2ae30e27303b2ee67f34a7685cf85d5f..2bad60302dc081653322dee0bbc23a6c20cc84e9 100644 --- a/doc/fluid/api_cn/layers_cn/sequence_reverse_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sequence_reverse_cn.rst @@ -5,6 +5,9 @@ sequence_reverse .. py:function:: paddle.fluid.layers.sequence_reverse(x, name=None) + + + **注意:该OP的输入只能是LoDTensor,如果您需要处理的输入是Tensor类型,请使用reverse函数(fluid.layers.** :ref:`cn_api_fluid_layers_reverse` **)。** **该OP仅支持LoDTensor** ,对于输入的LoDTensor,在每个序列(sequence)上进行反转。目前仅支持对LoD层次(LoD level)为1的LoDTensor进行反转。该OP在构建反向 :ref:`cn_api_fluid_layers_DynamicRNN` 网络时十分有用。 diff --git a/doc/fluid/api_cn/layers_cn/sequence_scatter_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_scatter_cn.rst index 086a9a41fa6697bca3eebbe486b3a14a422e9edb..aaae0735e5ac837e744f7c87a3c7447a894e8e4c 100644 --- a/doc/fluid/api_cn/layers_cn/sequence_scatter_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sequence_scatter_cn.rst @@ -3,10 +3,13 @@ sequence_scatter ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.sequence_scatter(input, index, updates, name=None) +:api_attr: 声明式编程模式(静态图) + + + .. note:: 该OP的输入index,updates必须是LoDTensor。 @@ -45,7 +48,7 @@ output[i][j]的值取决于能否在index中第i+1个区间中找到对应的数 参数: - **input** (Variable) - 维度为 :math:`[N, k_1 ... k_n]` 的Tensor, 支持的数据类型:float32,float64,int32,int64。 - - **index** (Variable) - 包含index信息的LoDTensor,lod level必须等于1,支持的数据类型:int64。 + - **index** (Variable) - 包含index信息的LoDTensor,lod level必须等于1,支持的数据类型:int32,int64。 - **updates** (Variable) - 包含updates信息的LoDTensor,lod level和index一致,数据类型与input的数据类型一致。支持的数据类型:float32,float64,int32,int64。 - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 diff --git a/doc/fluid/api_cn/layers_cn/sequence_slice_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_slice_cn.rst index 4d67aea9acd1c49738242040d25e3a6b69bb3529..4281741274a644ed90949de219105be69e5729b8 100644 --- a/doc/fluid/api_cn/layers_cn/sequence_slice_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sequence_slice_cn.rst @@ -3,10 +3,13 @@ sequence_slice ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.sequence_slice(input, offset, length, name=None) +:api_attr: 声明式编程模式(静态图) + + + **实现Sequence Slice(序列切片)运算** **该OP输入只能是LoDTensor, 如果您需要处理的是Tensor类型,请使用 :ref:`cn_api_fluid_layers_slice` 。** @@ -36,7 +39,7 @@ sequence_slice ``offset`` 从0开始。 参数: - - **input** (Variable) – 输入变量,类型为LoDTensor,承载着完整的序列 + - **input** (Variable) – 输入变量,类型为LoDTensor,承载着完整的序列。数据类型为float32,float64,int32或int64。 - **offset** (Variable) – 指定每个序列切片的起始索引,数据类型为int32或int64。 - **length** (Variable) – 指定每个子序列的长度,数据类型为int32或int64。 - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 diff --git a/doc/fluid/api_cn/layers_cn/sequence_softmax_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_softmax_cn.rst index 0638ffb8a2512ce594d23c3805f81c0c9930bdb5..e59c1fbc8d1925860cbe2f38eba343244f20ce68 100644 --- a/doc/fluid/api_cn/layers_cn/sequence_softmax_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sequence_softmax_cn.rst @@ -3,10 +3,13 @@ sequence_softmax ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.sequence_softmax(input, use_cudnn=False, name=None) +:api_attr: 声明式编程模式(静态图) + + + .. note:: 该OP的输入只能是LoDTensor,如果要处理的输入是Tensor类型,请使用 :ref:`cn_api_fluid_layers_softmax` diff --git a/doc/fluid/api_cn/layers_cn/sequence_unpad_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_unpad_cn.rst index 0b00795fa1a81e3f47f6cc78ca8a8d7041a856c2..8a1587c733d3341dcc21009a72c700bfc9fe2fe3 100644 --- a/doc/fluid/api_cn/layers_cn/sequence_unpad_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sequence_unpad_cn.rst @@ -3,10 +3,13 @@ sequence_unpad ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.sequence_unpad(x, length, name=None) +:api_attr: 声明式编程模式(静态图) + + + .. note:: 该OP的输入为Tensor,输出为LoDTensor。该OP用于移除填充元素,与之对应,还存在进行数据填充的OP sequence_pad,详情见: :ref:`cn_api_fluid_layers_sequence_pad` diff --git a/doc/fluid/api_cn/layers_cn/shape_cn.rst b/doc/fluid/api_cn/layers_cn/shape_cn.rst index 44df86e1d2f7db338f038ce25deb37f8856b7b94..0672af0a3a203fd1d2e866705c2b52ee21421fe9 100644 --- a/doc/fluid/api_cn/layers_cn/shape_cn.rst +++ b/doc/fluid/api_cn/layers_cn/shape_cn.rst @@ -5,14 +5,38 @@ shape .. py:function:: paddle.fluid.layers.shape(input) +:alias_main: paddle.shape +:alias: paddle.shape,paddle.tensor.shape,paddle.tensor.attribute.shape +:old_api: paddle.fluid.layers.shape + + + shape层。 -获得输入Tensor的shape。 +获得输入Tensor或SelectedRows的shape。 + +:: + + 示例1: + 输入是 N-D Tensor类型: + input = [ [1, 2, 3, 4], [5, 6, 7, 8] ] + + 输出shape: + input.shape = [2, 4] + + 示例2: + 输入是 SelectedRows类型: + input.rows = [0, 4, 19] + input.height = 20 + input.value = [ [1, 2], [3, 4], [5, 6] ] # inner tensor + 输出shape: + input.shape = [3, 2] 参数: - - **input** (Variable)- 输入的多维Tensor,数据类型为float32,float64,int32,int64。 + - **input** (Variable)- 输入的多维Tensor或SelectedRows,数据类型为float16,float32,float64,int32,int64。如果输入是SelectedRows类型,则返回其内部持有Tensor的shape。 + -返回: 一个Tensor,表示输入Tensor的shape。 +返回: 一个Tensor,表示输入Tensor或SelectedRows的shape。 返回类型: Variable(Tensor)。 @@ -23,7 +47,7 @@ shape层。 import paddle.fluid as fluid import numpy as np - inputs = fluid.layers.data(name="x", shape=[3, 100, 100], dtype="float32") + inputs = fluid.data(name="x", shape=[3, 100, 100], dtype="float32") output = fluid.layers.shape(inputs) exe = fluid.Executor(fluid.CPUPlace()) diff --git a/doc/fluid/api_cn/layers_cn/shard_index_cn.rst b/doc/fluid/api_cn/layers_cn/shard_index_cn.rst index 4f454e662fe74e6ae41a1fc7ae068183097df6dc..2bf72254ca86f7559f30f3dcec891248a0a64e6b 100644 --- a/doc/fluid/api_cn/layers_cn/shard_index_cn.rst +++ b/doc/fluid/api_cn/layers_cn/shard_index_cn.rst @@ -5,6 +5,12 @@ shard_index .. py:function:: paddle.fluid.layers.shard_index(input, index_num, nshards, shard_id, ignore_value=-1) +:alias_main: paddle.shard_index +:alias: paddle.shard_index,paddle.tensor.shard_index,paddle.tensor.manipulation.shard_index +:old_api: paddle.fluid.layers.shard_index + + + 该函数对输入的索引根据分片(shard)的偏移量重新计算。 索引长度被均分为N个分片,如果输入索引所在的分片跟分片ID对应,则该索引以分片的偏移量为界重新计算,否则更新为默认值(ignore_value)。具体计算为: :: diff --git a/doc/fluid/api_cn/layers_cn/shuffle_channel_cn.rst b/doc/fluid/api_cn/layers_cn/shuffle_channel_cn.rst index a91ce5ea552a528deca87dcd4b6f8755c32712c3..fffbae0a48f36c1cfa80a37ea91fdddf139d75b9 100644 --- a/doc/fluid/api_cn/layers_cn/shuffle_channel_cn.rst +++ b/doc/fluid/api_cn/layers_cn/shuffle_channel_cn.rst @@ -5,6 +5,12 @@ shuffle_channel .. py:function:: paddle.fluid.layers.shuffle_channel(x, group, name=None) +:alias_main: paddle.nn.functional.shuffle_channel +:alias: paddle.nn.functional.shuffle_channel,paddle.nn.functional.vision.shuffle_channel +:old_api: paddle.fluid.layers.shuffle_channel + + + 该OP将输入 ``x`` 的通道混洗重排。 它将每个组中的输入通道分成 ``group`` 个子组,并通过逐一从每个子组中选择元素来获得新的顺序。 请参阅 https://arxiv.org/pdf/1707.01083.pdf diff --git a/doc/fluid/api_cn/layers_cn/shuffle_cn.rst b/doc/fluid/api_cn/layers_cn/shuffle_cn.rst deleted file mode 100644 index 3be4313d48586820adba07b298dabdac23fc86be..0000000000000000000000000000000000000000 --- a/doc/fluid/api_cn/layers_cn/shuffle_cn.rst +++ /dev/null @@ -1,40 +0,0 @@ -.. _cn_api_fluid_layers_shuffle: - -shuffle -------------------------------- - -.. py:function:: paddle.fluid.layers.shuffle(reader, buffer_size) - -创建一个特殊的数据读取器,它的输出数据会被重洗(shuffle)。由原始读取器创建的迭代器得到的输出将会被暂存到shuffle缓存区,其后 -会对其进行重洗运算。shuffle缓存区的大小由参数 ``buffer_size`` 决定。 - -参数: - - **reader** (callable) – 输出会被shuffle的原始reader - - **buffer_size** (int) – 进行shuffle的buffer的大小 - -返回:其输出会被shuffle的一个reader(读取器) - -返回类型:callable - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - raw_reader = fluid.layers.io.open_files(filenames=['./data1.recordio', - './data2.recordio'], - shapes=[(3,224,224), (1,)], - lod_levels=[0, 0], - dtypes=['float32', 'int64'], - thread_num=2, - buffer_size=2) - batch_reader = fluid.layers.batch(reader=raw_reader, batch_size=5) - shuffle_reader = fluid.layers.shuffle(reader=batch_reader, buffer_size=5000) - - - - - - - - diff --git a/doc/fluid/api_cn/layers_cn/sigmoid_cn.rst b/doc/fluid/api_cn/layers_cn/sigmoid_cn.rst index f510edc7e1eecf4f4e72803124a38ce2e1b2c0d7..fb5ccd21da695a2f3c840663797b66a5ccaff54c 100755 --- a/doc/fluid/api_cn/layers_cn/sigmoid_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sigmoid_cn.rst @@ -5,6 +5,9 @@ sigmoid .. py:function:: paddle.fluid.layers.sigmoid(x, name=None) + + + sigmoid激活函数 .. math:: diff --git a/doc/fluid/api_cn/layers_cn/sigmoid_cross_entropy_with_logits_cn.rst b/doc/fluid/api_cn/layers_cn/sigmoid_cross_entropy_with_logits_cn.rst index bbe525f558f899f632359d93af0a82cdd26d2467..18506b0f583475bad23de7f9b3c1e805ca9ada16 100644 --- a/doc/fluid/api_cn/layers_cn/sigmoid_cross_entropy_with_logits_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sigmoid_cross_entropy_with_logits_cn.rst @@ -5,6 +5,12 @@ sigmoid_cross_entropy_with_logits .. py:function:: paddle.fluid.layers.sigmoid_cross_entropy_with_logits(x, label, ignore_index=-100, name=None, normalize=False) +:alias_main: paddle.nn.functional.sigmoid_cross_entropy_with_logits +:alias: paddle.nn.functional.sigmoid_cross_entropy_with_logits,paddle.nn.functional.loss.sigmoid_cross_entropy_with_logits +:old_api: paddle.fluid.layers.sigmoid_cross_entropy_with_logits + + + 在每个类别独立的分类任务中,该OP可以计算按元素的概率误差。可以将其视为预测数据点的标签,其中标签不是互斥的。例如,一篇新闻文章可以同时关于政治,科技,体育或者同时不包含这些内容。 logistic loss可通过下式计算: diff --git a/doc/fluid/api_cn/layers_cn/sigmoid_focal_loss_cn.rst b/doc/fluid/api_cn/layers_cn/sigmoid_focal_loss_cn.rst index b5e33e3b7385808093a1359c9e36b6ed8453c65e..2f47561a89a4560e0c39553205b4d7fa68c7a841 100644 --- a/doc/fluid/api_cn/layers_cn/sigmoid_focal_loss_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sigmoid_focal_loss_cn.rst @@ -3,7 +3,13 @@ sigmoid_focal_loss ------------------------------- -.. py:function:: paddle.fluid.layers.sigmoid_focal_loss(x, label, fg_num, gamma=2, alpha=0.25) +.. py:function:: paddle.fluid.layers.sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25) + +:alias_main: paddle.nn.functional.sigmoid_focal_loss +:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss +:old_api: paddle.fluid.layers.sigmoid_focal_loss + + `Focal Loss `_ 被提出用于解决计算机视觉任务中前景-背景不平衡的问题。该OP先计算输入x中每个元素的sigmoid值,然后计算sigmoid值与类别目标值label之间的Focal Loss。 @@ -28,8 +34,8 @@ Focal Loss的计算过程如下: - **x** (Variable) – 维度为 :math:`[N, C]` 的2-D Tensor,表示全部样本的分类预测值。其中,第一维N是批量内参与训练的样本数量,例如在目标检测中,样本为框级别,N为批量内所有图像的正负样本的数量总和;在图像分类中,样本为图像级别,N为批量内的图像数量总和。第二维:math:`C` 是类别数量( **不包括背景类** )。数据类型为float32或float64。 - **label** (Variable) – 维度为 :math:`[N, 1]` 的2-D Tensor,表示全部样本的分类目标值。其中,第一维N是批量内参与训练的样本数量,第二维1表示每个样本只有一个类别目标值。正样本的目标类别值的取值范围是 :math:`[1, C]` , 负样本的目标类别值是0。数据类型为int32。 - **fg_num** (Variable) – 维度为 :math:`[1]` 的1-D Tensor,表示批量内正样本的数量,需在进入此OP前获取正样本的数量。数据类型为int32。 - - **gamma** (float) – 用于平衡易分样本和难分样本的超参数, 默认值设置为2.0。 - - **alpha** (float) – 用于平衡正样本和负样本的超参数,默认值设置为0.25。 + - **gamma** (int|float) – 用于平衡易分样本和难分样本的超参数, 默认值设置为2.0。 + - **alpha** (int|float) – 用于平衡正样本和负样本的超参数,默认值设置为0.25。 返回: 输入x中每个元素的Focal loss,即维度为 :math:`[N, C]` 的2-D Tensor。 @@ -41,13 +47,70 @@ Focal Loss的计算过程如下: .. code-block:: python + import numpy as np import paddle.fluid as fluid - - input = fluid.data(name='data', shape=[10,80], dtype='float32') - label = fluid.data(name='label', shape=[10,1], dtype='int32') - fg_num = fluid.data(name='fg_num', shape=[1], dtype='int32') - loss = fluid.layers.sigmoid_focal_loss(x=input, - label=label, - fg_num=fg_num, - gamma=2., - alpha=0.25) + + num_classes = 10 # exclude background + image_width = 16 + image_height = 16 + batch_size = 32 + max_iter = 20 + + + def gen_train_data(): + x_data = np.random.uniform(0, 255, (batch_size, 3, image_height, + image_width)).astype('float64') + label_data = np.random.randint(0, num_classes, + (batch_size, 1)).astype('int32') + return {"x": x_data, "label": label_data} + + + def get_focal_loss(pred, label, fg_num, num_classes): + pred = fluid.layers.reshape(pred, [-1, num_classes]) + label = fluid.layers.reshape(label, [-1, 1]) + label.stop_gradient = True + loss = fluid.layers.sigmoid_focal_loss( + pred, label, fg_num, gamma=2.0, alpha=0.25) + loss = fluid.layers.reduce_sum(loss) + return loss + + + def build_model(mode='train'): + x = fluid.data(name="x", shape=[-1, 3, -1, -1], dtype='float64') + output = fluid.layers.pool2d(input=x, pool_type='avg', global_pooling=True) + output = fluid.layers.fc( + input=output, + size=num_classes, + # Notice: size is set to be the number of target classes (excluding backgorund) + # because sigmoid activation will be done in the sigmoid_focal_loss op. + act=None) + if mode == 'train': + label = fluid.data(name="label", shape=[-1, 1], dtype='int32') + # Obtain the fg_num needed by the sigmoid_focal_loss op: + # 0 in label represents background, >=1 in label represents foreground, + # find the elements in label which are greater or equal than 1, then + # computed the numbers of these elements. + data = fluid.layers.fill_constant(shape=[1], value=1, dtype='int32') + fg_label = fluid.layers.greater_equal(label, data) + fg_label = fluid.layers.cast(fg_label, dtype='int32') + fg_num = fluid.layers.reduce_sum(fg_label) + fg_num.stop_gradient = True + avg_loss = get_focal_loss(output, label, fg_num, num_classes) + return avg_loss + else: + # During evaluating or testing phase, + # output of the final fc layer should be connected to a sigmoid layer. + pred = fluid.layers.sigmoid(output) + return pred + + + loss = build_model('train') + moment_optimizer = fluid.optimizer.MomentumOptimizer( + learning_rate=0.001, momentum=0.9) + moment_optimizer.minimize(loss) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + for i in range(max_iter): + outs = exe.run(feed=gen_train_data(), fetch_list=[loss.name]) + print(outs) diff --git a/doc/fluid/api_cn/layers_cn/similarity_focus_cn.rst b/doc/fluid/api_cn/layers_cn/similarity_focus_cn.rst index 576cc89e92fa4e0c46e6dd780820c13ec08e2a08..5024493c825ea66ac0ed63ec35f05cc73e2e45e9 100644 --- a/doc/fluid/api_cn/layers_cn/similarity_focus_cn.rst +++ b/doc/fluid/api_cn/layers_cn/similarity_focus_cn.rst @@ -5,6 +5,12 @@ similarity_focus .. py:function:: paddle.fluid.layers.similarity_focus(input, axis, indexes, name=None) +:alias_main: paddle.nn.functional.similarity_focus +:alias: paddle.nn.functional.similarity_focus,paddle.nn.functional.extension.similarity_focus +:old_api: paddle.fluid.layers.similarity_focus + + + **实现SimilarityFocus(相似度聚焦)运算** 通过以下三个步骤,该层生成一个和输入 ``input`` 同形的 similarity focus mask(相似度聚焦掩码): diff --git a/doc/fluid/api_cn/layers_cn/sin_cn.rst b/doc/fluid/api_cn/layers_cn/sin_cn.rst index 797c86f8ff56bed87a4ab21523f2b68a2c8fa9c6..5dd8bcd2206d95f485e796a3f0958d8fcade98a6 100644 --- a/doc/fluid/api_cn/layers_cn/sin_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sin_cn.rst @@ -5,6 +5,9 @@ sin .. py:function:: paddle.fluid.layers.sin(x, name=None) + + + 计算输入的正弦值。 参数: diff --git a/doc/fluid/api_cn/layers_cn/size_cn.rst b/doc/fluid/api_cn/layers_cn/size_cn.rst index 677e8e3e72be6f2645995a0a2aeb222a4d5f3828..6082053be65007c49e2b1e4e41e8a7cf7164035c 100644 --- a/doc/fluid/api_cn/layers_cn/size_cn.rst +++ b/doc/fluid/api_cn/layers_cn/size_cn.rst @@ -5,6 +5,9 @@ size .. py:function:: paddle.fluid.layers.size(input) + + + 返回张量的单元数量,是一个shape为[1]的int64的张量。 参数: diff --git a/doc/fluid/api_cn/layers_cn/slice_cn.rst b/doc/fluid/api_cn/layers_cn/slice_cn.rst old mode 100644 new mode 100755 index 0a0619109c21b9c281989ab8ccaa6383b5d18d41..4c2214100e77940abb473fa8d5b148187a3a8176 --- a/doc/fluid/api_cn/layers_cn/slice_cn.rst +++ b/doc/fluid/api_cn/layers_cn/slice_cn.rst @@ -5,6 +5,12 @@ slice .. py:function:: paddle.fluid.layers.slice(input, axes, starts, ends) +:alias_main: paddle.slice +:alias: paddle.slice,paddle.tensor.slice,paddle.tensor.manipulation.slice +:old_api: paddle.fluid.layers.slice + + + 该OP沿多个轴生成 ``input`` 的切片。与numpy类似: https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html 该OP使用 ``axes`` 、 ``starts`` 和 ``ends`` 属性来指定轴列表中每个轴的起点和终点位置,并使用此信息来对 ``input`` 切片。如果向 ``starts`` 或 ``ends`` 传递负值如 :math:`-i`,则表示该轴的反向第 :math:`i-1` 个位置(这里以0为初始位置)。如果传递给 ``starts`` 或 ``end`` 的值大于n(维度中的元素数目),则表示n。当切片一个未知数量的维度时,建议传入 ``INT_MAX``。 ``axes`` 、 ``starts`` 和 ``ends`` 三个参数的元素数目必须相等。以下示例将解释切片如何工作: :: @@ -28,7 +34,7 @@ slice 参数: - **input** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor``,数据类型为 ``float16``, ``float32``,``float64``,``int32``,或 ``int64``。 - - **axes** (list|tuple)- 数据类型是 ``int32``。表示进行切片的轴。它是可选的,如果不存在,将被视为 :math:`[0,1,...,len(starts)- 1]`。 + - **axes** (list|tuple)- 数据类型是 ``int32``。表示进行切片的轴。 - **starts** (list|tuple|Variable)- 数据类型是 ``int32``。如果 ``starts`` 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 ``Tensor`` 或 ``LoDTensor``。如果 ``starts`` 的类型是 ``Variable``,则是1-D ``Tensor`` 或 ``LoDTensor``。表示在各个轴上切片的起始索引值。 - **ends** (list|tuple|Variable)- 数据类型是 ``int32``。如果 ``ends`` 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 ``Tensor`` 或 ``LoDTensor``。如果 ``ends`` 的类型是 ``Variable``,则是1-D ``Tensor`` 或 ``LoDTensor``。表示在各个轴上切片的结束索引值。 @@ -39,7 +45,6 @@ slice 抛出异常: - :code:`TypeError`:``starts`` 的类型应该是 list、tuple 或 Variable。 - :code:`TypeError`:``ends`` 的类型应该是 list、tuple 或 Variable。 - **代码示例:** .. code-block:: python diff --git a/doc/fluid/api_cn/layers_cn/smooth_l1_cn.rst b/doc/fluid/api_cn/layers_cn/smooth_l1_cn.rst index 522ccd607183f20143dd195da1d1479770f8f3a1..8b19838d11e7bc0687873b8a880c0d407328cbdd 100755 --- a/doc/fluid/api_cn/layers_cn/smooth_l1_cn.rst +++ b/doc/fluid/api_cn/layers_cn/smooth_l1_cn.rst @@ -5,6 +5,12 @@ smooth_l1 .. py:function:: paddle.fluid.layers.smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None) +:alias_main: paddle.nn.functional.smooth_l1 +:alias: paddle.nn.functional.smooth_l1,paddle.nn.functional.loss.smooth_l1 +:old_api: paddle.fluid.layers.smooth_l1 + + + 该layer计算变量 ``x`` 和 ``y`` 的smooth L1 loss,它以 ``x`` 和 ``y`` 的第一维大小作为批处理大小。对于每个实例,按元素计算smooth L1 loss,然后计算所有loss。输出变量的形状是[batch_size, 1] diff --git a/doc/fluid/api_cn/layers_cn/soft_relu_cn.rst b/doc/fluid/api_cn/layers_cn/soft_relu_cn.rst index 0226abd43b6ba1e0ff76c37741fc1119c4f81747..dc8912ca4f15478b703df53a3f1a58f7e97c5668 100644 --- a/doc/fluid/api_cn/layers_cn/soft_relu_cn.rst +++ b/doc/fluid/api_cn/layers_cn/soft_relu_cn.rst @@ -5,6 +5,12 @@ soft_relu .. py:function:: paddle.fluid.layers.soft_relu(x, threshold=40.0, name=None) +:alias_main: paddle.nn.functional.soft_relu +:alias: paddle.nn.functional.soft_relu,paddle.nn.functional.activation.soft_relu +:old_api: paddle.fluid.layers.soft_relu + + + SoftReLU 激活函数. .. math:: out=ln(1+exp(max(min(x,threshold),-threshold))) diff --git a/doc/fluid/api_cn/layers_cn/softmax_with_cross_entropy_cn.rst b/doc/fluid/api_cn/layers_cn/softmax_with_cross_entropy_cn.rst index 22fbbaf614173991e49d45a8eefbd72230b0f335..9b3539fac66fb03c84eead625c1096dce81af80c 100644 --- a/doc/fluid/api_cn/layers_cn/softmax_with_cross_entropy_cn.rst +++ b/doc/fluid/api_cn/layers_cn/softmax_with_cross_entropy_cn.rst @@ -5,6 +5,12 @@ softmax_with_cross_entropy .. py:function:: paddle.fluid.layers.softmax_with_cross_entropy(logits, label, soft_label=False, ignore_index=-100, numeric_stable_mode=True, return_softmax=False, axis=-1) +:alias_main: paddle.nn.functional.softmax_with_cross_entropy +:alias: paddle.nn.functional.softmax_with_cross_entropy,paddle.nn.functional.loss.softmax_with_cross_entropy +:old_api: paddle.fluid.layers.softmax_with_cross_entropy + + + 该OP实现了softmax交叉熵损失函数。该函数会将softmax操作、交叉熵损失函数的计算过程进行合并,从而提供了数值上更稳定的梯度值。 因为该运算对 ``logits`` 的 ``axis`` 维执行softmax运算,所以它需要未缩放的 ``logits`` 。该运算不应该对softmax运算的输出进行操作,否则会产生错误的结果。 diff --git a/doc/fluid/api_cn/layers_cn/softplus_cn.rst b/doc/fluid/api_cn/layers_cn/softplus_cn.rst index 6f09408dc0ca5df8e439211c4379deeaf0bbf826..34ffe22854ec38f6e4d6379ff4925f002005901a 100644 --- a/doc/fluid/api_cn/layers_cn/softplus_cn.rst +++ b/doc/fluid/api_cn/layers_cn/softplus_cn.rst @@ -5,6 +5,12 @@ softplus .. py:function:: paddle.fluid.layers.softplus(x,name=None) +:alias_main: paddle.nn.functional.softplus +:alias: paddle.nn.functional.softplus,paddle.nn.functional.activation.softplus +:old_api: paddle.fluid.layers.softplus + + + softplus激活函数 .. math:: diff --git a/doc/fluid/api_cn/layers_cn/softshrink_cn.rst b/doc/fluid/api_cn/layers_cn/softshrink_cn.rst index 9fcf42e7d739a0ee61fc134abf890a0caf5b778b..b3a1af822dfe31a96ad9802e5b4795f1ea10624c 100644 --- a/doc/fluid/api_cn/layers_cn/softshrink_cn.rst +++ b/doc/fluid/api_cn/layers_cn/softshrink_cn.rst @@ -5,6 +5,12 @@ softshrink .. py:function:: paddle.fluid.layers.softshrink(x, alpha=None) +:alias_main: paddle.nn.functional.softshrink +:alias: paddle.nn.functional.softshrink,paddle.nn.functional.activation.softshrink +:old_api: paddle.fluid.layers.softshrink + + + Softshrink激活函数 .. math:: diff --git a/doc/fluid/api_cn/layers_cn/softsign_cn.rst b/doc/fluid/api_cn/layers_cn/softsign_cn.rst index e48af0025b33cb0bcea6731379197dca92961a06..ca46bf270800dd0764d000c9b3275a45553c0bb1 100644 --- a/doc/fluid/api_cn/layers_cn/softsign_cn.rst +++ b/doc/fluid/api_cn/layers_cn/softsign_cn.rst @@ -5,6 +5,12 @@ softsign .. py:function:: paddle.fluid.layers.softsign(x,name=None) +:alias_main: paddle.nn.functional.softsign +:alias: paddle.nn.functional.softsign,paddle.nn.functional.activation.softsign +:old_api: paddle.fluid.layers.softsign + + + softsign激活函数 diff --git a/doc/fluid/api_cn/layers_cn/space_to_depth_cn.rst b/doc/fluid/api_cn/layers_cn/space_to_depth_cn.rst index 0acf348857d88d7a557fc19295342b3b6a104905..bfa5dbbf5308f4d347effcbef18f55f131181653 100644 --- a/doc/fluid/api_cn/layers_cn/space_to_depth_cn.rst +++ b/doc/fluid/api_cn/layers_cn/space_to_depth_cn.rst @@ -5,6 +5,12 @@ space_to_depth .. py:function:: paddle.fluid.layers.space_to_depth(x, blocksize, name=None) +:alias_main: paddle.nn.functional.space_to_depth +:alias: paddle.nn.functional.space_to_depth,paddle.nn.functional.vision.space_to_depth +:old_api: paddle.fluid.layers.space_to_depth + + + 该OP对成块的空间数据进行重组,输出一个输入张量的拷贝,其高度和宽度维度上的值移至通道维度。 重组时,依据 ``blocksize`` 指明的数据块大小, 对形为 ``[batch, channel, height, width]`` 的输入张量进行space_to_depth(广度至深度)运算,生成形为 ``[batch, channel * blocksize * blocksize, height/blocksize, width/blocksize]`` 的输出: diff --git a/doc/fluid/api_cn/layers_cn/spectral_norm_cn.rst b/doc/fluid/api_cn/layers_cn/spectral_norm_cn.rst index 5576ecc0fe10405fd75d22ed010917877b0dd142..9ca240e3c7758177fba78b97db6b27d046091340 100644 --- a/doc/fluid/api_cn/layers_cn/spectral_norm_cn.rst +++ b/doc/fluid/api_cn/layers_cn/spectral_norm_cn.rst @@ -3,10 +3,13 @@ spectral_norm ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.spectral_norm(weight, dim=0, power_iters=1, eps=1e-12, name=None) +:api_attr: 声明式编程模式(静态图) + + + **Spectral Normalization Layer** 该OP用于计算了fc、conv1d、conv2d、conv3d层的权重参数的谱正则值,输入权重参数应分别为2-D, 3-D, 4-D, 5-D张量,输出张量与输入张量shape相同。谱特征值计算方式如下。 diff --git a/doc/fluid/api_cn/layers_cn/split_cn.rst b/doc/fluid/api_cn/layers_cn/split_cn.rst index 9285a4b3971c2c1bb0d956dc68323e2dd5c8de74..260756ee823d1b3c341d7f1da5ecf631f692dff4 100644 --- a/doc/fluid/api_cn/layers_cn/split_cn.rst +++ b/doc/fluid/api_cn/layers_cn/split_cn.rst @@ -3,23 +3,21 @@ split ------------------------------- -.. py:function:: paddle.fluid.layers.split(input,num_or_sections,dim=-1,name=None) +.. py:function:: paddle.fluid.layers.split(input, num_or_sections, dim=-1, name=None) + + + 该OP将输入Tensor分割成多个子Tensor。 参数: - - **input** (Variable) - 输入变量,数据类型为float32,float64,int32,int64的多维Tensor或者LoDTensor。 + - **input** (Tensor) - 输入变量,数据类型为bool, float16,float32,float64,int32,int64的多维Tensor。 - **num_or_sections** (int|list|tuple) - 如果 ``num_or_sections`` 是一个整数,则表示Tensor平均划分为相同大小子Tensor的数量。如果 ``num_or_sections`` 是一个list或tuple,那么它的长度代表子Tensor的数量,它的元素可以是整数或者形状为[1]的Tensor,依次代表子Tensor需要分割成的维度的大小。list或tuple的长度不能超过输入Tensor待分割的维度的大小。至多有一个元素值为-1,-1表示该值是由 ``input`` 待分割的维度值和 ``num_or_sections`` 的剩余元素推断出来的。 - - **dim** (int|Variable,可选) - 整数或者形状为[1]的Tensor,数据类型为int32或int64。表示需要分割的维度。如果dim < 0,则划分的维度为rank(input) + dim。默认值为-1。 + - **dim** (int|Tenspr,可选) - 整数或者形状为[1]的Tensor,数据类型为int32或int64。表示需要分割的维度。如果 ``dim < 0`` ,则划分的维度为 ``rank(input) + dim`` 。默认值为-1。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 返回:分割后的Tensor列表。 -返回类型:列表(Variable(Tensor|LoDTensor)),数据类型为int32,int64,float32,float64。 - -抛出异常: - - :code:`TypeError`:``num_or_sections`` 不是int、list 或 tuple。 - - :code:`TypeError`:``dim`` 不是 int 或 Variable。 **代码示例**: @@ -27,27 +25,31 @@ split import paddle.fluid as fluid - # 输入是维度为[3, 9, 5]的Tensor: + # input is a Tensor which shape is [3, 9, 5] input = fluid.data( name="input", shape=[3, 9, 5], dtype="float32") - # 传入num_or_sections为一个整数 - x0, x1, x2 = fluid.layers.split(input, num_or_sections=3, dim=1) - x0.shape # [3, 3, 5] - x1.shape # [3, 3, 5] - x2.shape # [3, 3, 5] - - # 传入num_or_sections为一个整数列表 - x0, x1, x2 = fluid.layers.split(input, num_or_sections=[2, 3, 4], dim=1) - x0.shape # [3, 2, 5] - x1.shape # [3, 3, 5] - x2.shape # [3, 4, 5] - - # 传入num_or_sections为一个整数列表,其中有一个元素为-1 - x0, x1, x2 = fluid.layers.split(input, num_or_sections=[2, 3, -1], dim=1) - x0.shape # [3, 2, 5] - x1.shape # [3, 3, 5] - x2.shape # [3, 4, 5] + out0, out1, out2 = fluid.layers.split(input, num_or_sections=3, dim=1) + # out0.shape [3, 3, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 3, 5] + + out0, out1, out2 = fluid.layers.split(input, num_or_sections=[2, 3, 4], dim=1) + # out0.shape [3, 2, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 4, 5] + + out0, out1, out2 = fluid.layers.split(input, num_or_sections=[2, 3, -1], dim=1) + # out0.shape [3, 2, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 4, 5] + + # dim is negative, the real dim is (rank(input) + axis) which real + # value is 1. + out0, out1, out2 = fluid.layers.split(input, num_or_sections=3, dim=-2) + # out0.shape [3, 3, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 3, 5] diff --git a/doc/fluid/api_cn/layers_cn/sqrt_cn.rst b/doc/fluid/api_cn/layers_cn/sqrt_cn.rst index ac2d4f1bdcbbe972c3497d5a2f18f9e80ff78ae0..779ab45fbf975d0cc9538cd7f2d68f0194e8bc64 100644 --- a/doc/fluid/api_cn/layers_cn/sqrt_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sqrt_cn.rst @@ -5,6 +5,9 @@ sqrt .. py:function:: paddle.fluid.layers.sqrt(x, name=None) + + + 计算输入的算数平方根。 .. math:: out=\sqrt x=x^{1/2} diff --git a/doc/fluid/api_cn/layers_cn/square_cn.rst b/doc/fluid/api_cn/layers_cn/square_cn.rst index 244476502b9b73637b99b7278fcfd2c46f7aaba2..8c2a73306c51ca5ac4fd3d67f7f54549e7e3d222 100644 --- a/doc/fluid/api_cn/layers_cn/square_cn.rst +++ b/doc/fluid/api_cn/layers_cn/square_cn.rst @@ -5,6 +5,12 @@ square .. py:function:: paddle.fluid.layers.square(x,name=None) +:alias_main: paddle.square +:alias: paddle.square,paddle.tensor.square,paddle.tensor.math.square +:old_api: paddle.fluid.layers.square + + + 该OP执行逐元素取平方运算。 .. math:: diff --git a/doc/fluid/api_cn/layers_cn/square_error_cost_cn.rst b/doc/fluid/api_cn/layers_cn/square_error_cost_cn.rst index d740d23dd58c7b7726699481db24d15cd3c86e86..8fbec73a269091f9d671e11d1f182a98140272ed 100644 --- a/doc/fluid/api_cn/layers_cn/square_error_cost_cn.rst +++ b/doc/fluid/api_cn/layers_cn/square_error_cost_cn.rst @@ -5,6 +5,12 @@ square_error_cost .. py:function:: paddle.fluid.layers.square_error_cost(input,label) +:alias_main: paddle.nn.functional.square_error_cost +:alias: paddle.nn.functional.square_error_cost,paddle.nn.functional.loss.square_error_cost +:old_api: paddle.fluid.layers.square_error_cost + + + 该OP用于计算预测值和目标值的方差估计。 对于预测值input和目标值label,公式为: diff --git a/doc/fluid/api_cn/layers_cn/squeeze_cn.rst b/doc/fluid/api_cn/layers_cn/squeeze_cn.rst index 24f2fca4e65318faff4637bda16d950f67565211..026f38455ff2e877cb13952e0c446427196ee83a 100644 --- a/doc/fluid/api_cn/layers_cn/squeeze_cn.rst +++ b/doc/fluid/api_cn/layers_cn/squeeze_cn.rst @@ -5,6 +5,9 @@ squeeze .. py:function:: paddle.fluid.layers.squeeze(input, axes, name=None) + + + 该OP会根据axes压缩输入Tensor的维度。如果指定了axes,则会删除axes中指定的维度,axes指定的维度要等于1。如果没有指定axes,那么所有等于1的维度都会被删除。 - 例1: diff --git a/doc/fluid/api_cn/layers_cn/ssd_loss_cn.rst b/doc/fluid/api_cn/layers_cn/ssd_loss_cn.rst index 1db6b6723744cd658ba2f46a044b43bd141e30cb..da9949a0eea3405be4372eecb2e673e0ad0166d0 100644 --- a/doc/fluid/api_cn/layers_cn/ssd_loss_cn.rst +++ b/doc/fluid/api_cn/layers_cn/ssd_loss_cn.rst @@ -5,6 +5,12 @@ ssd_loss .. py:function:: paddle.fluid.layers.ssd_loss(location, confidence, gt_box, gt_label, prior_box, prior_box_var=None, background_label=0, overlap_threshold=0.5, neg_pos_ratio=3.0, neg_overlap=0.5, loc_loss_weight=1.0, conf_loss_weight=1.0, match_type='per_prediction', mining_type='max_negative', normalize=True, sample_size=None) +:alias_main: paddle.nn.functional.ssd_loss +:alias: paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss +:old_api: paddle.fluid.layers.ssd_loss + + + 该OP用于SSD物体检测算法的多窗口损失层 该层用于计算SSD的损失,给定位置偏移预测,置信度预测,候选框和真实框标签,以及难样本挖掘的类型。通过执行以下步骤,返回的损失是本地化损失(或回归损失)和置信度损失(或分类损失)的加权和: diff --git a/doc/fluid/api_cn/layers_cn/stack_cn.rst b/doc/fluid/api_cn/layers_cn/stack_cn.rst index 910084fb075922f5994e129e9b610d34a8213c65..928d3c5bc1164b8fd08cfde572186d70d3a4e93f 100644 --- a/doc/fluid/api_cn/layers_cn/stack_cn.rst +++ b/doc/fluid/api_cn/layers_cn/stack_cn.rst @@ -6,11 +6,14 @@ stack .. py:function:: paddle.fluid.layers.stack(x, axis=0) + + + 该OP沿 ``axis`` 轴对输入 ``x`` 进行堆叠操作。 - 例1: -.. code-block:: python +.. code-block:: text 输入: x[0].shape = [1, 2] @@ -32,7 +35,7 @@ stack - 例2: -.. code-block:: python +.. code-block:: text 输入: x[0].shape = [1, 2] @@ -52,7 +55,7 @@ stack [5.0, 6.0] ] ] 参数: - - **x** (Variable|list(Variable)) – 输入 x 可以是单个Tensor,或是多个Tensor组成的列表。如果 x 是一个列表,那么这些Tensor的维度必须相同。 假设输入是N维Tensor :math:`[d_0,d_1,...,d_{n−1}]`,则输出变量的维度为N+1维 :math:`[d_0,d_1,...d_{axis-1},len(x),d_{axis}...,d_{n−1}]` 。支持的数据类型: float32,float64,int32,int64。 + - **x** (list(Variable)|tuple(Variable)) – 输入 x 是多个Tensor,且这些Tensor的维度和数据类型必须相同。支持的数据类型: float32,float64,int32,int64。 - **axis** (int, 可选) – 指定对输入Tensor进行堆叠运算的轴,有效 ``axis`` 的范围是: :math:`[-(R+1), R+1)`,R是输入中第一个Tensor的rank。如果 ``axis`` < 0,则 :math:`axis=axis+rank(x[0])+1` 。axis默认值为0。 返回: 堆叠运算后的Tensor,数据类型与输入Tensor相同。输出维度等于 :math:`rank(x[0])+1` 维。 @@ -65,19 +68,10 @@ stack import paddle.fluid as fluid import paddle.fluid.layers as layers - x1 = layers.data(name='x1', shape=[1, 2], dtype='int32') - x2 = layers.data(name='x2', shape=[1, 2], dtype='int32') - #对Tensor List进行堆叠 - data = layers.stack([x1,x2]) # 沿着第0轴进行堆叠,data.shape=[2, 1, 2] - - data = layers.stack([x1,x2], axis=1) # 沿着第1轴进行堆叠,data.shape=[1, 2, 2] - - #单个Tensor的堆叠 - data = layers.stack(x1) # 沿着第0轴进行堆叠,data.shape=[1, 1, 2] - - - - - - + # set batch size=None + x1 = fluid.data(name='x1', shape=[None, 1, 2], dtype='int32') + x2 = fluid.data(name='x2', shape=[None, 1, 2], dtype='int32') + # stack Tensor list + data = layers.stack([x1,x2]) # stack according to axis 0, data.shape=[2, None, 1, 2] + data = layers.stack([x1,x2], axis=1) # stack according to axis 1, data.shape=[None, 2, 1, 2] diff --git a/doc/fluid/api_cn/layers_cn/stanh_cn.rst b/doc/fluid/api_cn/layers_cn/stanh_cn.rst index 156c9058de1678433973978fbcab59ffe4c591fa..d592060093583d3791ba6d4ed3f0d038c603ded5 100644 --- a/doc/fluid/api_cn/layers_cn/stanh_cn.rst +++ b/doc/fluid/api_cn/layers_cn/stanh_cn.rst @@ -5,6 +5,12 @@ stanh .. py:function:: paddle.fluid.layers.stanh(x, scale_a=0.67, scale_b=1.7159, name=None) +:alias_main: paddle.stanh +:alias: paddle.stanh,paddle.tensor.stanh,paddle.tensor.math.stanh +:old_api: paddle.fluid.layers.stanh + + + STanh 激活算子(STanh Activation Operator.) .. math:: diff --git a/doc/fluid/api_cn/layers_cn/strided_slice_cn.rst b/doc/fluid/api_cn/layers_cn/strided_slice_cn.rst index bef16871f5a672fc149454390a075cfbfeb5c599..164559652c31746cd9e77d182e43263839fc5451 100644 --- a/doc/fluid/api_cn/layers_cn/strided_slice_cn.rst +++ b/doc/fluid/api_cn/layers_cn/strided_slice_cn.rst @@ -3,6 +3,12 @@ strided_slice ------------------------------- .. py:function:: paddle.fluid.layers.strided_slice(input, axes, starts, ends, strides) + +:alias_main: paddle.strided_slice +:alias: paddle.strided_slice,paddle.tensor.strided_slice,paddle.tensor.manipulation.strided_slice +:old_api: paddle.fluid.layers.strided_slice + + strided_slice算子。 该OP沿多个轴生成 ``input`` 的切片,与numpy类似: https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html。该OP使用 ``axes`` 、 ``starts`` 和 ``ends`` 属性来指定轴列表中每个轴的起点和终点位置,并使用此信息来对 ``input`` 切片。如果向 ``starts`` 或 ``ends`` 传递负值如 :math:`-i`,则表示该轴的反向第 :math:`i-1` 个位置(这里以0为初始位置), ``strides`` 表示切片的步长, ``strides`` 如果为负数,则按照反方向进行切片。如果传递给 ``starts`` 或 ``ends`` 的值大于n(维度中的元素数目),则表示n。当切片一个未知数量的维度时,建议传入 ``INT_MAX``。 ``axes`` 、 ``starts`` 和 ``ends`` 以及 ``strides`` 四个参数的元素数目必须相等。以下示例将解释切片如何工作: diff --git a/doc/fluid/api_cn/layers_cn/sum_cn.rst b/doc/fluid/api_cn/layers_cn/sum_cn.rst index 237d1b7e1048d60e4c7a28e13224e778a2010689..0dc93e01cf6a6f44293843654f20a6214009e559 100755 --- a/doc/fluid/api_cn/layers_cn/sum_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sum_cn.rst @@ -5,6 +5,9 @@ sum .. py:function:: paddle.fluid.layers.sum(x) + + + 该OP用于对输入的一至多个Tensor或LoDTensor求和。如果输入的是LoDTensor,输出仅与第一个输入共享LoD信息(序列信息)。 例1: diff --git a/doc/fluid/api_cn/layers_cn/sums_cn.rst b/doc/fluid/api_cn/layers_cn/sums_cn.rst index 8db8f0b5a3cd7d20fb6777e020b775dd6bafc5ad..7bec1960d2a080198cf59f74ca997883cb935e94 100644 --- a/doc/fluid/api_cn/layers_cn/sums_cn.rst +++ b/doc/fluid/api_cn/layers_cn/sums_cn.rst @@ -5,6 +5,12 @@ sums .. py:function:: paddle.fluid.layers.sums(input,out=None) +:alias_main: paddle.sums +:alias: paddle.sums,paddle.tensor.sums,paddle.tensor.math.sums +:old_api: paddle.fluid.layers.sums + + + 该OP计算多个输入Tensor逐个元素相加的和。 - 示例:3个Tensor求和 diff --git a/doc/fluid/api_cn/layers_cn/swish_cn.rst b/doc/fluid/api_cn/layers_cn/swish_cn.rst index 0ac085c6130d5dba3f52a5f7694cdd30bfb8e8e3..85ab54d4c1830bdac80f80e1e1454dc3e0308808 100644 --- a/doc/fluid/api_cn/layers_cn/swish_cn.rst +++ b/doc/fluid/api_cn/layers_cn/swish_cn.rst @@ -5,6 +5,12 @@ swish .. py:function:: paddle.fluid.layers.swish(x, beta=1.0, name=None) +:alias_main: paddle.nn.functional.swish +:alias: paddle.nn.functional.swish,paddle.nn.functional.activation.swish +:old_api: paddle.fluid.layers.swish + + + 逐元素计算 Swish 激活函数,参考 `Searching for Activation Functions `_ 。 .. math:: diff --git a/doc/fluid/api_cn/layers_cn/switch_case_cn.rst b/doc/fluid/api_cn/layers_cn/switch_case_cn.rst index d3a86c22ef256170fc3fd0ef84cc82a5168c1cd4..a5b2766d722610d14738c437a485a660eb6e1c66 100644 --- a/doc/fluid/api_cn/layers_cn/switch_case_cn.rst +++ b/doc/fluid/api_cn/layers_cn/switch_case_cn.rst @@ -3,10 +3,16 @@ switch_case ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.switch_case(branch_index, branch_fns, default=None, name=None) +:api_attr: 声明式编程模式(静态图) +:alias_main: paddle.nn.switch_case +:alias: paddle.nn.switch_case,paddle.nn.control_flow.switch_case +:old_api: paddle.fluid.layers.switch_case + + + 该OP的运行方式类似于c++的switch/case。 参数: diff --git a/doc/fluid/api_cn/layers_cn/tanh_cn.rst b/doc/fluid/api_cn/layers_cn/tanh_cn.rst index d7be5afb50af993503fa538d844aafb10ab4f09f..c5fbd21242439402ebda6b4fcd5e6aedec699aa7 100644 --- a/doc/fluid/api_cn/layers_cn/tanh_cn.rst +++ b/doc/fluid/api_cn/layers_cn/tanh_cn.rst @@ -6,6 +6,9 @@ tanh .. py:function:: paddle.fluid.layers.tanh(x, name=None) + + + tanh 激活函数 .. math:: diff --git a/doc/fluid/api_cn/layers_cn/tanh_shrink_cn.rst b/doc/fluid/api_cn/layers_cn/tanh_shrink_cn.rst index f2f225ab97dbf2b08f8d56c649d414702546d3e4..6002d1216918b363c9c7a890d40456c37e9b88f5 100644 --- a/doc/fluid/api_cn/layers_cn/tanh_shrink_cn.rst +++ b/doc/fluid/api_cn/layers_cn/tanh_shrink_cn.rst @@ -5,6 +5,12 @@ tanh_shrink .. py:function:: paddle.fluid.layers.tanh_shrink(x, name=None) +:alias_main: paddle.nn.functional.tanh_shrink +:alias: paddle.nn.functional.tanh_shrink,paddle.nn.functional.activation.tanh_shrink +:old_api: paddle.fluid.layers.tanh_shrink + + + tanh_shrink激活函数 .. math:: diff --git a/doc/fluid/api_cn/layers_cn/target_assign_cn.rst b/doc/fluid/api_cn/layers_cn/target_assign_cn.rst index fa8741c6b83570854396478fb8d33999b6ae0a5d..61bd9787dad47e8778d9259dd2d2c4dbbefeddd8 100644 --- a/doc/fluid/api_cn/layers_cn/target_assign_cn.rst +++ b/doc/fluid/api_cn/layers_cn/target_assign_cn.rst @@ -5,6 +5,12 @@ target_assign .. py:function:: paddle.fluid.layers.target_assign(input, matched_indices, negative_indices=None, mismatch_value=None, name=None) +:alias_main: paddle.nn.functional.target_assign +:alias: paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign +:old_api: paddle.fluid.layers.target_assign + + + 对于每个实例,根据 ``match_indices`` 和 ``negative_indices`` 位置索引, 给输入 ``out`` 和 ``out_weight`` 赋值。输入 ``input`` 和 ``negative_indices`` 均为2-D LoDTensor。假如 ``input`` 中每个实例的行偏移称作lod,该操作计算步骤如下: 1.根据match_indices赋值: diff --git a/doc/fluid/api_cn/layers_cn/teacher_student_sigmoid_loss_cn.rst b/doc/fluid/api_cn/layers_cn/teacher_student_sigmoid_loss_cn.rst index d2ac246563d7a622d9a40537b50a346946be3ce1..84d1000cf3ee645af81f4f1588043c349fe30d0a 100644 --- a/doc/fluid/api_cn/layers_cn/teacher_student_sigmoid_loss_cn.rst +++ b/doc/fluid/api_cn/layers_cn/teacher_student_sigmoid_loss_cn.rst @@ -5,6 +5,12 @@ teacher_student_sigmoid_loss .. py:function:: paddle.fluid.layers.teacher_student_sigmoid_loss(input, label, soft_max_up_bound=15.0, soft_max_lower_bound=-15.0) +:alias_main: paddle.nn.functional.teacher_student_sigmoid_loss +:alias: paddle.nn.functional.teacher_student_sigmoid_loss,paddle.nn.functional.loss.teacher_student_sigmoid_loss +:old_api: paddle.fluid.layers.teacher_student_sigmoid_loss + + + **Teacher Student Log Loss Layer(教师--学生对数损失层)** 定制化需求,用于student萃取teacher的值。此图层接受输入预测和目标标签,并返回teacher_student损失。 diff --git a/doc/fluid/api_cn/layers_cn/temporal_shift_cn.rst b/doc/fluid/api_cn/layers_cn/temporal_shift_cn.rst index 278786d7a529ec89ee821d0720d2e4eb471aaa6d..acde7a2951aeaec7760094c27332bbe023da3cdc 100644 --- a/doc/fluid/api_cn/layers_cn/temporal_shift_cn.rst +++ b/doc/fluid/api_cn/layers_cn/temporal_shift_cn.rst @@ -4,6 +4,12 @@ temporal_shift ------------------------------- .. py:function:: paddle.fluid.layers.temporal_shift(x, seg_num, shift_ratio=0.25, name=None) +:alias_main: paddle.nn.functional.temporal_shift +:alias: paddle.nn.functional.temporal_shift,paddle.nn.functional.extension.temporal_shift +:old_api: paddle.fluid.layers.temporal_shift + + + 该OP用于对输入X做时序通道T上的位移操作,为TSM(Temporal Shift Module)中使用的操作。 输入(X)的形状应为[N*T, C, H, W],N是批大小,T是 ``seg_num`` 指定的时间段号,C是通道号,H和W是特征的高度和宽度。 diff --git a/doc/fluid/api_cn/layers_cn/tensor_array_to_tensor_cn.rst b/doc/fluid/api_cn/layers_cn/tensor_array_to_tensor_cn.rst index f2bbad0957b9bc206855e720df6c96428db96197..777b9a572d2177368b3d2b0f9b091ac426a4d9b6 100644 --- a/doc/fluid/api_cn/layers_cn/tensor_array_to_tensor_cn.rst +++ b/doc/fluid/api_cn/layers_cn/tensor_array_to_tensor_cn.rst @@ -5,6 +5,9 @@ tensor_array_to_tensor .. py:function:: paddle.fluid.layers.tensor_array_to_tensor(input, axis=1, name=None, use_stack=False) + + + 该OP将 ``input`` 这个LoDTensorArray中的所有Tensor沿 ``axis`` 指定的轴进行拼接(concat)或堆叠(stack)。 示例: diff --git a/doc/fluid/api_cn/layers_cn/thresholded_relu_cn.rst b/doc/fluid/api_cn/layers_cn/thresholded_relu_cn.rst index a070b80d6b7f9d572add8b4f2b52f583fb5786dd..e4942697680025e6145c476612d3a1668e643f47 100644 --- a/doc/fluid/api_cn/layers_cn/thresholded_relu_cn.rst +++ b/doc/fluid/api_cn/layers_cn/thresholded_relu_cn.rst @@ -5,6 +5,12 @@ thresholded_relu .. py:function:: paddle.fluid.layers.thresholded_relu(x,threshold=None) +:alias_main: paddle.nn.functional.thresholded_relu +:alias: paddle.nn.functional.thresholded_relu,paddle.nn.functional.activation.thresholded_relu +:old_api: paddle.fluid.layers.thresholded_relu + + + 逐元素计算 ThresholdedRelu激活函数。 .. math:: diff --git a/doc/fluid/api_cn/layers_cn/topk_cn.rst b/doc/fluid/api_cn/layers_cn/topk_cn.rst index 17a1b8efd7e9eceb420d478958e6504dd999136f..acef758bbb5c06c5734742830c2b046f372495af 100644 --- a/doc/fluid/api_cn/layers_cn/topk_cn.rst +++ b/doc/fluid/api_cn/layers_cn/topk_cn.rst @@ -4,6 +4,12 @@ topk ------------------------------- .. py:function:: paddle.fluid.layers.topk(input, k, name=None) +:alias_main: paddle.topk +:alias: paddle.topk,paddle.tensor.topk,paddle.tensor.search.topk +:old_api: paddle.fluid.layers.topk + + + 此OP用于查找输入Tensor的最后一维的前k个最大项,返回它们的值和索引。 如果输入是1-D Tensor,则找到Tensor的前k个最大项,并输出前k个最大项的值和索引。如果输入是更高阶的Tensor,则该OP会基于最后一维计算前k项。 diff --git a/doc/fluid/api_cn/layers_cn/transpose_cn.rst b/doc/fluid/api_cn/layers_cn/transpose_cn.rst index f8fba3a0e57ee9f83847f5a6bdcd3a993ab6984a..b9d6bfb644cbd2210c2ab22818c4ca0609eba906 100644 --- a/doc/fluid/api_cn/layers_cn/transpose_cn.rst +++ b/doc/fluid/api_cn/layers_cn/transpose_cn.rst @@ -5,6 +5,12 @@ transpose .. py:function:: paddle.fluid.layers.transpose(x,perm,name=None) +:alias_main: paddle.transpose +:alias: paddle.transpose,paddle.tensor.transpose,paddle.tensor.linalg.transpose,paddle.tensor.manipulation.transpose +:old_api: paddle.fluid.layers.transpose + + + 该OP根据perm对输入的多维Tensor进行数据重排。返回多维Tensor的第i维对应输入Tensor的perm[i]维。 参数: diff --git a/doc/fluid/api_cn/layers_cn/unfold_cn.rst b/doc/fluid/api_cn/layers_cn/unfold_cn.rst index 663b64807c1f386f38247baa90643f2afe12800f..4422467062eb9865d286f8a08faf834b4d256c76 100644 --- a/doc/fluid/api_cn/layers_cn/unfold_cn.rst +++ b/doc/fluid/api_cn/layers_cn/unfold_cn.rst @@ -5,6 +5,12 @@ unfold .. py:function:: paddle.fluid.layers.unfold(x, kernel_size, strides=1, paddings=0, dilation=1, name=None) +:alias_main: paddle.nn.functional.unfold +:alias: paddle.nn.functional.unfold,paddle.nn.functional.common.unfold +:old_api: paddle.fluid.layers.unfold + + + 该OP实现的功能与卷积中用到的im2col函数一样,通常也被称作为im2col过程。对于每一个卷积核覆盖下的区域,元素会被重新排成一列。当卷积核在整个图片上滑动时,将会形成一系列的列向量。对于每一个输入形状为[N, C, H, W]的 ``x`` ,都将会按照下面公式计算出一个形状为[N, Cout, Lout]的输出。 diff --git a/doc/fluid/api_cn/layers_cn/uniform_random_batch_size_like_cn.rst b/doc/fluid/api_cn/layers_cn/uniform_random_batch_size_like_cn.rst deleted file mode 100644 index a9856492d6e380855b689321fca4cf3b1d9849fb..0000000000000000000000000000000000000000 --- a/doc/fluid/api_cn/layers_cn/uniform_random_batch_size_like_cn.rst +++ /dev/null @@ -1,68 +0,0 @@ -.. _cn_api_fluid_layers_uniform_random_batch_size_like: - -uniform_random_batch_size_like -------------------------------- - -.. py:function:: paddle.fluid.layers.uniform_random_batch_size_like(input, shape, dtype='float32', input_dim_idx=0, output_dim_idx=0, min=-1.0, max=1.0, seed=0) - -该OP使用从范围[min,max)内均匀分布采样的随机值初始化一个Tensor,且输出Tensor的 -指定维度将被设置为与输入Tensor指定维度相同的值。 - -:: - - 示例1: - 给定: - input =[[0.946741 , 0.1357001 , 0.38086128]] # input.shape=[1,3] - shape=[2,4] - 此时,output_dim_idx = 0, input_dim_idx = 0,result.shape[0] = input.shape[0] - 则: - result=[[ 0.3443427 , -0.23056602, 0.3477049 , 0.06139076]] # result.shape=[1,4] - - 示例2: - 给定: - input =[[0.946741 , 0.1357001 , 0.38086128]] # input.shape=[1,3] - shape=[2,4] - input_dim_idx=1 - output_dim_idx=1 - 此时,output_dim_idx = 1, input_dim_idx = 1,result.shape[1] = input.shape[1] - 则: - result=[[-0.23133647, -0.84195036, 0.21441269], - [-0.08774924, 0.25605237, -0.09403259]] # result.shape=[2,3] - -参数: - - **input** (Variable)- 输入Tensor,支持的数据类型:float32。 - - **shape** (list|tuple)- 输出Tensor的维度,类型为list或者tuple。支持的数据类型:int。 - - **input_dim_idx** (int,可选)- 输入Tensor指定维度的索引。该参数指定输入Tensor维度的值将用于调整输出Tensor维度的大小。支持的数据类型:int。默认值为0。 - - **output_dim_idx** (int,可选)- 输出Tensor指定维度的索引。该参数指定输出Tensor的维度将被设置为与输入Tensor指定维度相同的值。支持的数据类型:int。默认值为0。 - - **min** (float,可选)- 要生成的随机值范围的下限,min包含在范围中。支持的数据类型:float。默认值为 1.0。 - - **max** (float,可选)- 要生成的随机值范围的上限,max不包含在范围中。支持的数据类型:float。默认值为1.0。 - - **seed** (int,可选)- 用于生成样本的随机种子。0表示使用系统生成的种子,数据类型为int。注意如果seed不为0,则此算子将始终每次生成相同的随机数。支持的数据类型:int。默认值为0。 - - **dtype** (np.dtype | core.VarDesc.VarType | str,可选) - 输出Tensor的数据类型。支持的数据类型:float32, float64,默认值为float32。 - -返回: 表示随机初始化结果的Tensor,数据类型由dtype参数设置,该Tensor的维度由shape参数和输入Tensor的指定维度共同决定。 - -返回类型: Variable - - -**代码示例:** - -.. code-block:: python - - import paddle.fluid as fluid - import paddle.fluid.layers as layers - - - input = fluid.data(name="input", shape=[13, 11], dtype='float32') - # examp 1: - # input_dim_idx和output_dim_idx使用默认值 - out1 = layers.uniform_random_batch_size_like(input, [3, 5]) - out1_shape = layers.shape(out1) # [13,5] - - # example 2: - # input_dim_idx和output_dim_idx使用指定值 - out2=layers.uniform_random_batch_size_like(input, [3, 5], input_dim_idx=1, output_dim_idx=1) - out2_shape = layers.shape(out2) # [3,11] - - - - diff --git a/doc/fluid/api_cn/layers_cn/uniform_random_cn.rst b/doc/fluid/api_cn/layers_cn/uniform_random_cn.rst index 20f19d1c8850c48fc497d66977318d6466857f0e..14e921926463fb4f02aa1b1bf133e2bc2f8c9bd7 100644 --- a/doc/fluid/api_cn/layers_cn/uniform_random_cn.rst +++ b/doc/fluid/api_cn/layers_cn/uniform_random_cn.rst @@ -3,9 +3,12 @@ uniform_random ------------------------------- -.. py:function:: paddle.fluid.layers.uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0) +.. py:function:: paddle.fluid.layers.uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0, name=None) -该OP使用从范围[min,max)内均匀分布采样的随机值初始化一个Tensor。 + + + +该OP返回数值服从范围[``min``, ``max``)内均匀分布的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 :: @@ -16,18 +19,19 @@ uniform_random result=[[0.8505902, 0.8397286]] 参数: - - **shape** (list|tuple|Variable)-输出Tensor的维度,shape类型支持list,tuple,Variable。如果shape类型是list或者tuple,它的元素可以是整数或者形状为[1]的Tensor,其中整数的数据类型为int,Tensor的数据类型为int32或int64。如果shape的类型是Variable,则是1D的Tensor,Tensor的数据类型为int32或int64。 - - **dtype** (np.dtype|core.VarDesc.VarType|str,可选) – 输出Tensor的数据类型,支持float32(默认), float64。 - - **min** (float,可选)-要生成的随机值范围的下限,min包含在范围中。支持的数据类型:float。默认值为-1.0。 - - **max** (float,可选)-要生成的随机值范围的上限,max不包含在范围中。支持的数据类型:float。默认值为1.0。 - - **seed** (int,可选)-随机种子,用于生成样本。0表示使用系统生成的种子。注意如果种子不为0,该操作符每次都生成同样的随机数。支持的数据类型:int。默认为 0。 + - **shape** (list|tuple|Tensor) - 生成的随机Tensor的形状。如果 ``shape`` 是list、tuple,则其中的元素可以是int,或者是形状为[1]且数据类型为int32、int64的Tensor。如果 ``shape`` 是Tensor,则是数据类型为int32、int64的1-D Tensor。 + - **dtype** (str|np.dtype|core.VarDesc.VarType, 可选) - 输出Tensor的数据类型,支持float32、float64。默认值为float32。 + - **min** (float|int,可选) - 要生成的随机值范围的下限,min包含在范围中。支持的数据类型:float、int。默认值为-1.0。 + - **max** (float|int,可选) - 要生成的随机值范围的上限,max不包含在范围中。支持的数据类型:float、int。默认值为1.0。 + - **seed** (int,可选) - 随机种子,用于生成样本。0表示使用系统生成的种子。注意如果种子不为0,该操作符每次都生成同样的随机数。支持的数据类型:int。默认为 0。 + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 -返回:表示一个随机初始化结果的Tensor,该Tensor的数据类型由dtype参数决定,该Tensor的维度由shape参数决定。 - -返回类型:Variable +返回: + Tensor:数值服从范围[``min``, ``max``)内均匀分布的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 抛出异常: - - :code:`TypeError`: shape的类型应该是list、tuple 或 Variable。 + - ``TypeError`` - 如果 ``shape`` 的类型不是list、tuple、Tensor。 + - ``TypeError`` - 如果 ``dtype`` 不是float32、float64。 **代码示例**: @@ -40,17 +44,17 @@ uniform_random train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): # example 1: - # attr shape is a list which doesn't contain tensor Variable. + # attr shape is a list which doesn't contain Tensor. result_1 = fluid.layers.uniform_random(shape=[3, 4]) # example 2: - # attr shape is a list which contains tensor Variable. + # attr shape is a list which contains Tensor. dim_1 = fluid.layers.fill_constant([1],"int64",3) dim_2 = fluid.layers.fill_constant([1],"int32",5) result_2 = fluid.layers.uniform_random(shape=[dim_1, dim_2]) # example 3: - # attr shape is a Variable, the data type must be int32 or int64 + # attr shape is a Tensor, the data type must be int32 or int64 var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64") result_3 = fluid.layers.uniform_random(var_shape) var_shape_int32 = fluid.data(name='var_shape_int32', shape=[2], dtype="int32") diff --git a/doc/fluid/api_cn/layers_cn/unique_cn.rst b/doc/fluid/api_cn/layers_cn/unique_cn.rst index 719f4e15dfc303b6d457aa24dbb7e0f9a3864d6c..362d511d8b2253fcfd9241f9da9b060420cbd8bf 100644 --- a/doc/fluid/api_cn/layers_cn/unique_cn.rst +++ b/doc/fluid/api_cn/layers_cn/unique_cn.rst @@ -8,8 +8,8 @@ unique unique为 ``x`` 返回一个unique张量和一个指向该unique张量的索引。 参数: - - **x** (Variable) - 一个1维输入张量 - - **dtype** (np.dtype|core.VarDesc.VarType|str) – 索引张量的类型,int32,int64。 + - **x** (Tensor) - 一个1维输入张量 + - **dtype** (np.dtype|str, 可选) – 索引张量的类型,应该为int32或者int64。默认:int32. 返回:元组(out, index)。 ``out`` 为 ``x`` 的指定dtype的unique张量, ``index`` 是一个指向 ``out`` 的索引张量, 用户可以通过该函数来转换原始的 ``x`` 张量的索引。 @@ -21,7 +21,7 @@ unique为 ``x`` 返回一个unique张量和一个指向该unique张量的索引 import numpy as np import paddle.fluid as fluid - x = fluid.assign(np.array([2, 3, 3, 1, 5, 3], dtype='int32')) + x = fluid.layers.assign(np.array([2, 3, 3, 1, 5, 3], dtype='int32')) out, index = fluid.layers.unique(x) # out is [2, 3, 1, 5]; index is [0, 1, 1, 2, 3, 1] diff --git a/doc/fluid/api_cn/layers_cn/unique_with_counts_cn.rst b/doc/fluid/api_cn/layers_cn/unique_with_counts_cn.rst index b0f27acf1f109fd9d7af6fa32bd928f0f92abc6d..906ff316a3f77ab6756b36543bd0ca6e4b55fcf4 100644 --- a/doc/fluid/api_cn/layers_cn/unique_with_counts_cn.rst +++ b/doc/fluid/api_cn/layers_cn/unique_with_counts_cn.rst @@ -4,6 +4,12 @@ unique_with_counts ------------------------------- .. py:function:: paddle.fluid.layers.unique_with_counts(x, dtype='int32') + +:alias_main: paddle.unique_with_counts +:alias: paddle.unique_with_counts,paddle.tensor.unique_with_counts,paddle.tensor.manipulation.unique_with_counts +:old_api: paddle.fluid.layers.unique_with_counts + + 该OP对输入Tensor元素进行去重,获取去重后结果Tensor,同时获取去重后结果在原始输入中的计数Tensor以及在原始输入中的索引Tensor。 注:该OP仅支持 **CPU** ,同时仅支持 **Tensor** diff --git a/doc/fluid/api_cn/layers_cn/unsqueeze_cn.rst b/doc/fluid/api_cn/layers_cn/unsqueeze_cn.rst index 227f447c651433e31afa828cdc383780b04cd56e..3290bebea17c0e989232238b6f5c6c79639467fd 100644 --- a/doc/fluid/api_cn/layers_cn/unsqueeze_cn.rst +++ b/doc/fluid/api_cn/layers_cn/unsqueeze_cn.rst @@ -5,6 +5,9 @@ unsqueeze .. py:function:: paddle.fluid.layers.unsqueeze(input, axes, name=None) + + + 该OP向输入(input)的shape中一个或多个位置(axes)插入维度。 - 示例: diff --git a/doc/fluid/api_cn/layers_cn/unstack_cn.rst b/doc/fluid/api_cn/layers_cn/unstack_cn.rst index 50ac927a2d001334aa1bdb71bebd2b635dce803b..00b8cfe97a1409bc9b845690ba7ec3dd29a77c86 100644 --- a/doc/fluid/api_cn/layers_cn/unstack_cn.rst +++ b/doc/fluid/api_cn/layers_cn/unstack_cn.rst @@ -5,10 +5,16 @@ unstack .. py:function:: paddle.fluid.layers.unstack(x, axis=0, num=None) +:alias_main: paddle.unstack +:alias: paddle.unstack,paddle.tensor.unstack,paddle.tensor.manipulation.unstack +:old_api: paddle.fluid.layers.unstack + + + 该OP将单个dim为 ``D`` 的Tensor沿 ``axis`` 轴unpack为 ``num`` 个dim为 ``(D-1)`` 的Tensor 参数: - - **x** (Variable) – 输入x为 ``dim > 0`` 的Tensor, + - **x** (Tensor) – 输入x为 ``dim > 0`` 的Tensor, 支持的数据类型: float32,float64,int32,int64。 - **axis** (int | 可选) – 输入Tensor进行unpack运算所在的轴,axis的范围为:``[-D, D)`` , @@ -18,7 +24,7 @@ unstack 返回: 长度为num的Tensor列表, 数据类型与输入Tensor相同,dim为 ``(D-1)``。 -返回类型: list(Variable) +返回类型: list(Tensor) 抛出异常: - :code:`ValueError`:``x.shape[axis]`` <= 0 或 ``axis`` 不在[-D, D)范围内 @@ -28,7 +34,7 @@ unstack .. code-block:: python import paddle.fluid as fluid - x = fluid.layers.data(name='x', shape=[2, 3, 5], dtype='float32') #创建一个shape=[2, 3, 5]的Tensor + x = fluid.data(name='x', shape=[2, 3, 5], dtype='float32') #创建一个shape=[2, 3, 5]的Tensor y = fluid.layers.unstack(x, axis=1) #沿着第1轴进行unpack, unpack后为3个shape=[2,5]的Tensor diff --git a/doc/fluid/api_cn/layers_cn/warpctc_cn.rst b/doc/fluid/api_cn/layers_cn/warpctc_cn.rst index f94c4ae13b688f1a42673a4f655de4c589e46278..dbb4c4ca829330844170359127680fe870ac618c 100644 --- a/doc/fluid/api_cn/layers_cn/warpctc_cn.rst +++ b/doc/fluid/api_cn/layers_cn/warpctc_cn.rst @@ -5,6 +5,12 @@ warpctc .. py:function:: paddle.fluid.layers.warpctc(input, label, blank=0, norm_by_times=False, input_length=None, label_length=None) +:alias_main: paddle.nn.functional.warpctc +:alias: paddle.nn.functional.warpctc,paddle.nn.functional.extension.warpctc +:old_api: paddle.fluid.layers.warpctc + + + 该OP用于计算 `CTC loss `_ 。该OP的底层调用了第三方 `baidu-research::warp-ctc `_ 的实现。 参数: diff --git a/doc/fluid/api_cn/layers_cn/where_cn.rst b/doc/fluid/api_cn/layers_cn/where_cn.rst index 8b4fa63c8011f77d8e29fd183f2f549f7b29e6cb..d483b8fa905522125f7abecdf922ba9fd647fcbd 100644 --- a/doc/fluid/api_cn/layers_cn/where_cn.rst +++ b/doc/fluid/api_cn/layers_cn/where_cn.rst @@ -5,6 +5,9 @@ where .. py:function:: paddle.fluid.layers.where(condition) + + + 该OP计算输入元素中为True的元素在输入中的坐标(index)。 参数: diff --git a/doc/fluid/api_cn/layers_cn/while_loop_cn.rst b/doc/fluid/api_cn/layers_cn/while_loop_cn.rst index 04ce95125f0750cc3e4ed6efda640c2df4d3853e..5cfb1f648a2210683736d8786f55eead04f991db 100644 --- a/doc/fluid/api_cn/layers_cn/while_loop_cn.rst +++ b/doc/fluid/api_cn/layers_cn/while_loop_cn.rst @@ -4,12 +4,21 @@ while_loop ____________________________________ -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.layers.while_loop(cond, body, loop_vars, is_test=False, name=None) +:api_attr: 声明式编程模式(静态图) +:alias_main: paddle.nn.while_loop +:alias: paddle.nn.while_loop,paddle.nn.control_flow.while_loop +:old_api: paddle.fluid.layers.while_loop + + + 该API用于实现类似while的循环控制功能,只要循环条件 ``cond`` 的返回值为True,``while_loop`` 则会循环执行循环体 ``body`` ,直到 ``cond`` 的返回值为False。 +**注意:** + ``body`` 中定义的局部变量无法使用 ``Executor`` 的 ``fetch_list`` 来获取的,变量需在 ``body`` 外定义并将其置于 ``loop_vars`` 中进行循环更新后才可通过 ``fetch_list`` 获取。 + 参数: - **cond** (callable) - 返回boolean类型张量的可调用函数,用以判断循环是否继续执行。 ``cond`` 的参数和 ``loop_vars`` 相对应。 - **body** (callable) - 循环执行的结构体。其返回一个包含tensor或LoDTensorArray的列表或元组,且这些tensor或LoDTensorArray的长度,结构,类型和 ``loop_vars`` 中的相同。 且``body`` 的参数与 ``loop_vars`` 相对应。 @@ -49,8 +58,8 @@ ____________________________________ main_program = fluid.default_main_program() startup_program = fluid.default_startup_program() with fluid.program_guard(main_program, startup_program): - i = layers.fill_constant(shape=[1], dtype='int64', value=0) # 循环计数器 - ten = layers.fill_constant(shape=[1], dtype='int64', value=10) # 循环次数 + i = layers.fill_constant(shape=[1], dtype='int64', value=0) # 循环计数器 + ten = layers.fill_constant(shape=[1], dtype='int64', value=10) # 循环次数 i, ten = layers.while_loop(cond, body, [i, ten]) exe = fluid.Executor(fluid.CPUPlace()) diff --git a/doc/fluid/api_cn/layers_cn/yolo_box_cn.rst b/doc/fluid/api_cn/layers_cn/yolo_box_cn.rst index c128079379940ee08a266b653bff4edc6c4b8dea..5d1d7db4f14d31e5ad33eb6854c21740322a1088 100644 --- a/doc/fluid/api_cn/layers_cn/yolo_box_cn.rst +++ b/doc/fluid/api_cn/layers_cn/yolo_box_cn.rst @@ -5,6 +5,12 @@ yolo_box .. py:function:: paddle.fluid.layers.yolo_box(x, img_size, anchors, class_num, conf_thresh, downsample_ratio, clip_bbox=True,name=None) +:alias_main: paddle.nn.functional.yolo_box +:alias: paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box +:old_api: paddle.fluid.layers.yolo_box + + + 该运算符基于YOLOv3网络的输出结果,生成YOLO检测框。 diff --git a/doc/fluid/api_cn/layers_cn/yolov3_loss_cn.rst b/doc/fluid/api_cn/layers_cn/yolov3_loss_cn.rst index 6b210e83a985319f78a510a015ceb5275806f2e5..020dd2cc7d0843b62213141a2033cb2dc3834e41 100644 --- a/doc/fluid/api_cn/layers_cn/yolov3_loss_cn.rst +++ b/doc/fluid/api_cn/layers_cn/yolov3_loss_cn.rst @@ -5,6 +5,12 @@ yolov3_loss .. py:function:: paddle.fluid.layers.yolov3_loss(x, gt_box, gt_label, anchors, anchor_mask, class_num, ignore_thresh, downsample_ratio, gt_score=None, use_label_smooth=True, name=None) +:alias_main: paddle.nn.functional.yolov3_loss +:alias: paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss +:old_api: paddle.fluid.layers.yolov3_loss + + + 该运算通过给定的预测结果和真实框计算yolov3损失。 yolov3 loss前的网络输出形状为[N,C,H,W],H和W应该相同,用来指定网格(grid)大小。每个网格点预测S个边界框(bounding boxes),S由每个尺度中 ``anchors`` 簇的个数指定。在第二维(表示通道的维度)中,C的值应为S *(class_num + 5),class_num是源数据集的对象种类数(如coco中为80),另外,除了存储4个边界框位置坐标x,y,w,h,还包括边界框以及每个anchor框的one-hot关键字的置信度得分。 diff --git a/doc/fluid/api_cn/layers_cn/zeros_cn.rst b/doc/fluid/api_cn/layers_cn/zeros_cn.rst index 702ff0f5dd382f10e622bd587ffe11361e01054f..170007734a9e5e7aa4acd541c032f55bc96cb058 100644 --- a/doc/fluid/api_cn/layers_cn/zeros_cn.rst +++ b/doc/fluid/api_cn/layers_cn/zeros_cn.rst @@ -5,18 +5,18 @@ zeros .. py:function:: paddle.fluid.layers.zeros(shape,dtype,force_cpu=False) -**zeros** - -该OP创建形状为 ``shape`` 、数据类型为 ``dtype`` 且值全为0的Tensor,该OP会将stop_gradient设置为True,即停止梯度更新。 +该OP创建形状为 ``shape`` 、数据类型为 ``dtype`` 且值全为0的Tensor。 参数: - - **shape** (tuple|list) - 输出Tensor的形状。 - - **dtype** (np.dtype|core.VarDesc.VarType|str) - 输出Tensor的数据类型,数据类型必须为float16、float32、float64、int32或int64。 - - **force_cpu** (bool) - 是否强制将输出Tensor写入CPU内存。如果 ``force_cpu`` 为False,则将输出Tensor写入当前所在运算设备的内存,默认为False。 + - **shape** (tuple|list|Tensor) - 输出Tensor的形状, ``shape`` 的数据类型为int32或者int64。 + - **dtype** (np.dtype|str) - 输出Tensor的数据类型,数据类型必须为bool、 float16、float32、float64、int32或int64。 + - **force_cpu** (bool, 可选) - 是否强制将输出Tensor写入CPU内存。如果 ``force_cpu`` 为False,则将输出Tensor写入当前所在运算设备的内存,默认为False。 返回:值全为0的Tensor,数据类型和 ``dtype`` 定义的类型一致。 -返回类型:Variable +抛出异常: + - ``TypeError`` - 当 ``dtype`` 不是bool、 float16、float32、float64、int32、int64。 + - ``TypeError`` - 当 ``shape`` 不是tuple、list、或者Tensor时。 当 ``shape`` 为Tensor,其数据类型不是int32或者int64时。 **代码示例**: diff --git a/doc/fluid/api_cn/layers_cn/zeros_like_cn.rst b/doc/fluid/api_cn/layers_cn/zeros_like_cn.rst index 93c50de034d277c5292831433c04c10489ac0f0f..d75dc7cd1029a8bacaa2e69b28304305c68b5602 100644 --- a/doc/fluid/api_cn/layers_cn/zeros_like_cn.rst +++ b/doc/fluid/api_cn/layers_cn/zeros_like_cn.rst @@ -6,6 +6,9 @@ zeros_like .. py:function:: paddle.fluid.layers.zeros_like(x, out=None) + + + 该OP创建一个和x具有相同的形状和数据类型的全零Tensor。 参数: diff --git a/doc/fluid/api_cn/metric_cn.rst b/doc/fluid/api_cn/metric_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..12ceb76a84cedd332e7ca3cb0b305cfd6a58b5ec --- /dev/null +++ b/doc/fluid/api_cn/metric_cn.rst @@ -0,0 +1,20 @@ +======================= +paddle.metric +======================= + +.. toctree:: + :maxdepth: 1 + + metric_cn/Accuracy_cn.rst + metric_cn/accuracy_cn.rst + metric_cn/Auc_cn.rst + metric_cn/auc_cn.rst + metric_cn/chunk_eval_cn.rst + metric_cn/ChunkEvaluator_cn.rst + metric_cn/CompositeMetric_cn.rst + metric_cn/cos_sim_cn.rst + metric_cn/DetectionMAP_cn.rst + metric_cn/EditDistance_cn.rst + metric_cn/mean_iou_cn.rst + metric_cn/Precision_cn.rst + metric_cn/Recall_cn.rst diff --git a/doc/fluid/api_cn/metric_cn/ChunkEvaluator_cn.rst b/doc/fluid/api_cn/metric_cn/ChunkEvaluator_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5efed6ad082b2304a1af6a646d9ed2d9c28ecbea --- /dev/null +++ b/doc/fluid/api_cn/metric_cn/ChunkEvaluator_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_metric_cn_ChunkEvaluator: + +ChunkEvaluator +------------------------------- +:doc_source: paddle.fluid.metrics.ChunkEvaluator + + diff --git a/doc/fluid/api_cn/metric_cn/CompositeMetric_cn.rst b/doc/fluid/api_cn/metric_cn/CompositeMetric_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..74c68b960befda03017d77414a46fa01e04d1cab --- /dev/null +++ b/doc/fluid/api_cn/metric_cn/CompositeMetric_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_metric_cn_CompositeMetric: + +CompositeMetric +------------------------------- +:doc_source: paddle.fluid.metrics.CompositeMetric + + diff --git a/doc/fluid/api_cn/metric_cn/DetectionMAP_cn.rst b/doc/fluid/api_cn/metric_cn/DetectionMAP_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3a859826bac28ef1d6dc1349873529240e22f338 --- /dev/null +++ b/doc/fluid/api_cn/metric_cn/DetectionMAP_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_metric_cn_DetectionMAP: + +DetectionMAP +------------------------------- +:doc_source: paddle.fluid.metrics.DetectionMAP + + diff --git a/doc/fluid/api_cn/metric_cn/EditDistance_cn.rst b/doc/fluid/api_cn/metric_cn/EditDistance_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8a543bb8d3883b6b861cfb54837359203baf96af --- /dev/null +++ b/doc/fluid/api_cn/metric_cn/EditDistance_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_metric_cn_EditDistance: + +EditDistance +------------------------------- +:doc_source: paddle.fluid.metrics.EditDistance + + diff --git a/doc/fluid/api_cn/metric_cn/Precision_cn.rst b/doc/fluid/api_cn/metric_cn/Precision_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e05b455c6a1e5ef9f87371138f318a18d7d43765 --- /dev/null +++ b/doc/fluid/api_cn/metric_cn/Precision_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_metric_cn_Precision: + +Precision +------------------------------- +:doc_source: paddle.fluid.metrics.Precision + + diff --git a/doc/fluid/api_cn/metric_cn/Recall_cn.rst b/doc/fluid/api_cn/metric_cn/Recall_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f3ca58ebd666ff68b5325b55d77d22315bee7f6e --- /dev/null +++ b/doc/fluid/api_cn/metric_cn/Recall_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_metric_cn_Recall: + +Recall +------------------------------- +:doc_source: paddle.fluid.metrics.Recall + + diff --git a/doc/fluid/api_cn/metric_cn/accuracy_cn.rst b/doc/fluid/api_cn/metric_cn/accuracy_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b95167c76515d3c9f8d97d02bc58c3fec3f075de --- /dev/null +++ b/doc/fluid/api_cn/metric_cn/accuracy_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_metric_cn_accuracy: + +accuracy +------------------------------- +:doc_source: paddle.fluid.layers.metric_op.accuracy + + diff --git a/doc/fluid/api_cn/metric_cn/auc_cn.rst b/doc/fluid/api_cn/metric_cn/auc_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0b2ba00dbc45e6c1a9ed6b8c5b7c0f3ea4946248 --- /dev/null +++ b/doc/fluid/api_cn/metric_cn/auc_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_metric_cn_auc: + +auc +------------------------------- +:doc_source: paddle.fluid.layers.metric_op.auc + + diff --git a/doc/fluid/api_cn/metric_cn/chunk_eval_cn.rst b/doc/fluid/api_cn/metric_cn/chunk_eval_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d6ba85326cd266353277ce26f79bd4a336ca905d --- /dev/null +++ b/doc/fluid/api_cn/metric_cn/chunk_eval_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_metric_cn_chunk_eval: + +chunk_eval +------------------------------- +:doc_source: paddle.fluid.layers.nn.chunk_eval + + diff --git a/doc/fluid/api_cn/metric_cn/cos_sim_cn.rst b/doc/fluid/api_cn/metric_cn/cos_sim_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5d5f6e4d5a6a1c219d15816549470a84961dddfc --- /dev/null +++ b/doc/fluid/api_cn/metric_cn/cos_sim_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_metric_cn_cos_sim: + +cos_sim +------------------------------- +:doc_source: paddle.fluid.layers.nn.cos_sim + + diff --git a/doc/fluid/api_cn/metric_cn/mean_iou_cn.rst b/doc/fluid/api_cn/metric_cn/mean_iou_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1bd9371930e95405245444c0c4ca4a032b9d1853 --- /dev/null +++ b/doc/fluid/api_cn/metric_cn/mean_iou_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_metric_cn_mean_iou: + +mean_iou +------------------------------- +:doc_source: paddle.fluid.layers.nn.mean_iou + + diff --git a/doc/fluid/api_cn/metrics_cn/Accuracy_cn.rst b/doc/fluid/api_cn/metrics_cn/Accuracy_cn.rst index 83d4159f333711452726fadc31b5ec96c111103a..0cb6ea087033b21afd2cea5838f6d1366868b92f 100644 --- a/doc/fluid/api_cn/metrics_cn/Accuracy_cn.rst +++ b/doc/fluid/api_cn/metrics_cn/Accuracy_cn.rst @@ -4,6 +4,9 @@ Accuracy ------------------------------- .. py:class:: paddle.fluid.metrics.Accuracy(name=None) + + + 该接口用来计算多个mini-batch的平均准确率。Accuracy对象有两个状态value和weight。Accuracy的定义参照 https://en.wikipedia.org/wiki/Accuracy_and_precision 。 参数: diff --git a/doc/fluid/api_cn/metrics_cn/Auc_cn.rst b/doc/fluid/api_cn/metrics_cn/Auc_cn.rst index a22d57431cd60e3c63b3e2824220c7cbe41f812d..8e6b7bfea5ec381b7af051ba39fc080291b4fcba 100644 --- a/doc/fluid/api_cn/metrics_cn/Auc_cn.rst +++ b/doc/fluid/api_cn/metrics_cn/Auc_cn.rst @@ -4,6 +4,9 @@ Auc ------------------------------- .. py:class:: paddle.fluid.metrics.Auc(name, curve='ROC', num_thresholds=4095) + + + **注意**:目前只用Python实现Auc,可能速度略慢 该接口计算Auc,在二分类(binary classification)中广泛使用。相关定义参考 https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve 。 diff --git a/doc/fluid/api_cn/metrics_cn/ChunkEvaluator_cn.rst b/doc/fluid/api_cn/metrics_cn/ChunkEvaluator_cn.rst index cefbb6696cd26848ca9d5a4387c84f9d84e6f008..c289db61aed83e2e4a33f8603e70c280a63b2fb2 100644 --- a/doc/fluid/api_cn/metrics_cn/ChunkEvaluator_cn.rst +++ b/doc/fluid/api_cn/metrics_cn/ChunkEvaluator_cn.rst @@ -4,6 +4,9 @@ ChunkEvaluator ------------------------------- .. py:class:: paddle.fluid.metrics.ChunkEvaluator(name=None) + + + 该接口使用mini-batch的chunk_eval累计的counter numbers,来计算准确率、召回率和F1值。ChunkEvaluator有三个状态num_infer_chunks,num_label_chunks和num_correct_chunks,分别对应语块数目、标签中的语块数目、正确识别的语块数目。对于chunking的基础知识,请参考 https://www.aclweb.org/anthology/N01-1025 。ChunkEvalEvaluator计算块检测(chunk detection)的准确率,召回率和F1值,支持IOB, IOE, IOBES和IO标注方案。 参数: diff --git a/doc/fluid/api_cn/metrics_cn/CompositeMetric_cn.rst b/doc/fluid/api_cn/metrics_cn/CompositeMetric_cn.rst index 97bfb107f96ed70ee1b3769afe498ccb371749f9..2b9382f7870ca4a104a6e63fb0e0ddb5e67b64d0 100644 --- a/doc/fluid/api_cn/metrics_cn/CompositeMetric_cn.rst +++ b/doc/fluid/api_cn/metrics_cn/CompositeMetric_cn.rst @@ -5,6 +5,9 @@ CompositeMetric .. py:class:: paddle.fluid.metrics.CompositeMetric(name=None) + + + 创建一个可以容纳若干个评价指标(如F1, accuracy, recall等)的容器,评价指标添加完成后,通过调用eval()方法可自动计算该容器内的所有评价指标。 **注意,只有输入参数列表完全相同的评价指标才可被加入到同一个CompositeMetric实例内。** diff --git a/doc/fluid/api_cn/metrics_cn/DetectionMAP_cn.rst b/doc/fluid/api_cn/metrics_cn/DetectionMAP_cn.rst index f7f5123cf12ad737741d9346caf3ddd04cce8806..44d3700e5535c485a78361ba82c97c7c5b81ca87 100644 --- a/doc/fluid/api_cn/metrics_cn/DetectionMAP_cn.rst +++ b/doc/fluid/api_cn/metrics_cn/DetectionMAP_cn.rst @@ -5,6 +5,9 @@ DetectionMAP .. py:class:: paddle.fluid.metrics.DetectionMAP(input, gt_label, gt_box, gt_difficult=None, class_num=None, background_label=0, overlap_threshold=0.5, evaluate_difficult=True, ap_version='integral') + + + 该OP用于计算检测网络的平均精度(mAP)。 mAP是衡量object detectors精度的指标,比如 Faster R-CNN,SSD等。它不同于召回率,它是最大精度的平均值。 通常步骤如下: diff --git a/doc/fluid/api_cn/metrics_cn/EditDistance_cn.rst b/doc/fluid/api_cn/metrics_cn/EditDistance_cn.rst index 9494f7f1eefe24f097f81af6c692f8fad817d33c..9ffd2a0165d7e718d1cf08d15be5e90f64cf916b 100644 --- a/doc/fluid/api_cn/metrics_cn/EditDistance_cn.rst +++ b/doc/fluid/api_cn/metrics_cn/EditDistance_cn.rst @@ -5,6 +5,9 @@ EditDistance .. py:class:: paddle.fluid.metrics.EditDistance(name) + + + 用于管理字符串的编辑距离。编辑距离是通过计算将一个字符串转换为另一个字符串所需的最小编辑操作数(添加、删除或替换)来量化两个字符串(例如单词)彼此不相似的程度一种方法。 参考 https://en.wikipedia.org/wiki/Edit_distance。 **代码示例** diff --git a/doc/fluid/api_cn/metrics_cn/MetricBase_cn.rst b/doc/fluid/api_cn/metrics_cn/MetricBase_cn.rst index d24aa0470860704935e674c336617308bd3fb2c5..91cfa45e4f86514a1873576088dc10bb30af03d9 100644 --- a/doc/fluid/api_cn/metrics_cn/MetricBase_cn.rst +++ b/doc/fluid/api_cn/metrics_cn/MetricBase_cn.rst @@ -5,6 +5,9 @@ MetricBase .. py:class:: paddle.fluid.metrics.MetricBase(name) + + + 在评估神经网络效果的时候,由于我们常常需要把测试数据切分成mini-batch,并逐次将每个mini-batch送入神经网络进行预测和评估,因此我们每次只能获得当前batch下的评估结果,而并不能一次性获得整个测试集的评估结果。paddle.fluid.metrics正是为了解决这些问题而设计的,大部分paddle.fluid.metrics下的类都具有如下功能: 1. 接受模型对一个batch的预测结果(numpy.array)和这个batch的原始标签(numpy.array)作为输入,并进行特定的计算(如计算准确率,召回率等)。 diff --git a/doc/fluid/api_cn/metrics_cn/Precision_cn.rst b/doc/fluid/api_cn/metrics_cn/Precision_cn.rst index 2d07a011182a5e9e1d00a17243183914a395ad02..29a1cb89a9ebc7185c8ddb596fe8e1494ece800d 100644 --- a/doc/fluid/api_cn/metrics_cn/Precision_cn.rst +++ b/doc/fluid/api_cn/metrics_cn/Precision_cn.rst @@ -5,6 +5,9 @@ Precision .. py:class:: paddle.fluid.metrics.Precision(name=None) + + + 精确率Precision(也称为 positive predictive value,正预测值)是被预测为正样例中实际为正的比例。 https://en.wikipedia.org/wiki/Evaluation_of_binary_classifiers 该类管理二分类任务的precision分数。 diff --git a/doc/fluid/api_cn/metrics_cn/Recall_cn.rst b/doc/fluid/api_cn/metrics_cn/Recall_cn.rst index 263152eb9be6ac5d9482aef81ffb4f81b05ce739..3d4975fc55e42f251e983fd551c760511407f003 100644 --- a/doc/fluid/api_cn/metrics_cn/Recall_cn.rst +++ b/doc/fluid/api_cn/metrics_cn/Recall_cn.rst @@ -5,6 +5,9 @@ Recall .. py:class:: paddle.fluid.metrics.Recall(name=None) + + + 召回率Recall(也称为敏感度)是指得到的相关实例数占相关实例总数的比例。https://en.wikipedia.org/wiki/Precision_and_recall 该类管理二分类任务的召回率。 **代码示例** diff --git a/doc/fluid/api_cn/nets_cn/glu_cn.rst b/doc/fluid/api_cn/nets_cn/glu_cn.rst index ade91e1870d2a6d45fe4090c6feb7f7387b7efbc..7da9b516b272355ef2ca2825153cc4c604cc1a99 100644 --- a/doc/fluid/api_cn/nets_cn/glu_cn.rst +++ b/doc/fluid/api_cn/nets_cn/glu_cn.rst @@ -2,10 +2,13 @@ glu ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.nets.glu(input, dim=-1) +:api_attr: 声明式编程模式(静态图) + + + 门控线性单元 Gated Linear Units (GLU) 由 :ref:`cn_api_fluid_layers_split` ,:ref:`cn_api_fluid_layers_sigmoid` 和 :ref:`cn_api_fluid_layers_elementwise_mul` 组成。特定的,沿着给定维度将输入拆分成两个大小相同的部分,:math:`a` 和 :math:`b` ,按如下方式计算: .. math:: diff --git a/doc/fluid/api_cn/nets_cn/img_conv_group_cn.rst b/doc/fluid/api_cn/nets_cn/img_conv_group_cn.rst index 9b6006778cb884c6b371c62bcce38a4457d9dae0..3647169f78351371afb57920f0b3d91b5dbdd3b6 100644 --- a/doc/fluid/api_cn/nets_cn/img_conv_group_cn.rst +++ b/doc/fluid/api_cn/nets_cn/img_conv_group_cn.rst @@ -3,10 +3,13 @@ img_conv_group ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.nets.img_conv_group(input, conv_num_filter, pool_size, conv_padding=1, conv_filter_size=3, conv_act=None, param_attr=None, conv_with_batchnorm=False, conv_batchnorm_drop_rate=0.0, pool_stride=1, pool_type='max', use_cudnn=True) +:api_attr: 声明式编程模式(静态图) + + + Image Convolution Group由Convolution2d,BatchNorm,DropOut和Pool2d组成。根据输入参数,img_conv_group将使用Convolution2d,BatchNorm,DropOut对Input进行连续计算,得到最后结果。 参数: diff --git a/doc/fluid/api_cn/nets_cn/scaled_dot_product_attention_cn.rst b/doc/fluid/api_cn/nets_cn/scaled_dot_product_attention_cn.rst index 33d3ac3610e99ce186f448726730fb91181c3a39..5356e1ebd230df40ef27786287d71cfeed5a4ff8 100644 --- a/doc/fluid/api_cn/nets_cn/scaled_dot_product_attention_cn.rst +++ b/doc/fluid/api_cn/nets_cn/scaled_dot_product_attention_cn.rst @@ -3,10 +3,13 @@ scaled_dot_product_attention ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.nets.scaled_dot_product_attention(queries, keys, values, num_heads=1, dropout_rate=0.0) +:api_attr: 声明式编程模式(静态图) + + + 该接口实现了的基于点积(并进行了缩放)的多头注意力(Multi-Head Attention)机制。attention可以表述为将一个查询(query)和一组键值对(key-value pair)映射为一个输出;Multi-Head Attention则是使用多路进行attention,而且对attention的输入进行了线性变换。公式如下: @@ -43,31 +46,12 @@ scaled_dot_product_attention .. code-block:: python - import paddle.fluid as fluid - - queries = fluid.layers.data(name="queries", - shape=[3, 5, 9], - dtype="float32", - append_batch_size=False) - queries.stop_gradient = False - keys = fluid.layers.data(name="keys", - shape=[3, 6, 9], - dtype="float32", - append_batch_size=False) - keys.stop_gradient = False - values = fluid.layers.data(name="values", - shape=[3, 6, 10], - dtype="float32", - append_batch_size=False) - values.stop_gradient = False - contexts = fluid.nets.scaled_dot_product_attention(queries, keys, values) - contexts.shape # [3, 5, 10] - - - - - - + import paddle.fluid as fluid + queries = fluid.data(name="queries", shape=[3, 5, 9], dtype="float32") + keys = fluid.data(name="keys", shape=[3, 6, 9], dtype="float32") + values = fluid.data(name="values", shape=[3, 6, 10], dtype="float32") + contexts = fluid.nets.scaled_dot_product_attention(queries, keys, values) + contexts.shape # [3, 5, 10] diff --git a/doc/fluid/api_cn/nets_cn/sequence_conv_pool_cn.rst b/doc/fluid/api_cn/nets_cn/sequence_conv_pool_cn.rst index 592cdb2741406b410272d2842c2dc31124821e7a..2caeffc74a7adf7552ca5cff63f90b20320dfcc3 100644 --- a/doc/fluid/api_cn/nets_cn/sequence_conv_pool_cn.rst +++ b/doc/fluid/api_cn/nets_cn/sequence_conv_pool_cn.rst @@ -3,10 +3,13 @@ sequence_conv_pool ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.nets.sequence_conv_pool(input, num_filters, filter_size, param_attr=None, act='sigmoid', pool_type='max', bias_attr=None) +:api_attr: 声明式编程模式(静态图) + + + **注意:该OP的输入** ``input`` **必须是2维LoDTensor, lod_level为1,如果输入是Tensor,建议使用** :ref:`cn_api_fluid_nets_simple_img_conv_pool` **代替** 该接口由序列卷积( :ref:`cn_api_fluid_layers_sequence_conv` )和池化( :ref:`cn_api_fluid_layers_sequence_pool` )组成 diff --git a/doc/fluid/api_cn/nets_cn/simple_img_conv_pool_cn.rst b/doc/fluid/api_cn/nets_cn/simple_img_conv_pool_cn.rst index cec9e8befc40d5371729f00234ee0ee98ce1737e..0b48604e608f30ec5631df02a9795434749e1bca 100644 --- a/doc/fluid/api_cn/nets_cn/simple_img_conv_pool_cn.rst +++ b/doc/fluid/api_cn/nets_cn/simple_img_conv_pool_cn.rst @@ -3,10 +3,13 @@ simple_img_conv_pool ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.nets.simple_img_conv_pool(input, num_filters, filter_size, pool_size, pool_stride, pool_padding=0, pool_type='max', global_pooling=False, conv_stride=1, conv_padding=0, conv_dilation=1, conv_groups=1, param_attr=None, bias_attr=None, act=None, use_cudnn=True) +:api_attr: 声明式编程模式(静态图) + + + ``simple_img_conv_pool`` 由一个conv2d( :ref:`cn_api_fluid_layers_conv2d` )和一个pool2d( :ref:`cn_api_fluid_layers_pool2d` ) OP组成。 参数: diff --git a/doc/fluid/api_cn/nn_cn.rst b/doc/fluid/api_cn/nn_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8791b1f922b94dcc9625dabc2c85f2fa06aea38f --- /dev/null +++ b/doc/fluid/api_cn/nn_cn.rst @@ -0,0 +1,205 @@ +======================= +paddle.nn +======================= + + + + +.. toctree:: + :maxdepth: 1 + + nn_cn/Conv1d_cn.rst + nn_cn/Conv2d_cn.rst + nn_cn/Conv3d_cn.rst + nn_cn/ConvTranspose2d_cn.rst + nn_cn/ConvTranspose3d_cn.rst + nn_cn/diag_embed_cn.rst + nn_cn/interpolate_cn.rst + nn_cn/Linear_cn.rst + nn_cn/LogSoftmax_cn.rst + nn_cn/ReLU_cn.rst + nn_cn/Upsample_cn.rst + nn_cn/activation_cn.rst + nn_cn/loss_cn.rst + nn_cn/functional_cn.rst + nn_cn/layer_cn.rst + nn_cn/adaptive_pool2d_cn.rst + nn_cn/adaptive_pool3d_cn.rst + nn_cn/add_position_encoding_cn.rst + nn_cn/affine_channel_cn.rst + nn_cn/affine_grid_cn.rst + nn_cn/anchor_generator_cn.rst + nn_cn/assign_cn.rst + nn_cn/BatchNorm_cn.rst + nn_cn/BatchNorm1d_cn.rst + nn_cn/BatchNorm2d_cn.rst + nn_cn/BatchNorm3d_cn.rst + nn_cn/SyncBatchNorm_cn.rst + nn_cn/beam_search_cn.rst + nn_cn/beam_search_decode_cn.rst + nn_cn/BilinearTensorProduct_cn.rst + nn_cn/bipartite_match_cn.rst + nn_cn/box_clip_cn.rst + nn_cn/box_coder_cn.rst + nn_cn/box_decoder_and_assign_cn.rst + nn_cn/bpr_loss_cn.rst + nn_cn/brelu_cn.rst + nn_cn/case_cn.rst + nn_cn/center_loss_cn.rst + nn_cn/clip_by_norm_cn.rst + nn_cn/clip_cn.rst + nn_cn/collect_fpn_proposals_cn.rst + nn_cn/cond_cn.rst + nn_cn/continuous_value_model_cn.rst + nn_cn/conv2d_cn.rst + nn_cn/conv2d_transpose_cn.rst + nn_cn/conv3d_cn.rst + nn_cn/conv3d_transpose_cn.rst + nn_cn/ConstantPad1d_cn.rst + nn_cn/ConstantPad2d_cn.rst + nn_cn/ConstantPad3d_cn.rst + nn_cn/cosine_decay_cn.rst + nn_cn/cosine_similarity_cn.rst + nn_cn/CosineSimilarity_cn.rst + nn_cn/cross_entropy_cn.rst + nn_cn/data_cn.rst + nn_cn/deformable_roi_pooling_cn.rst + nn_cn/density_prior_box_cn.rst + nn_cn/detection_output_cn.rst + nn_cn/dice_loss_cn.rst + nn_cn/distribute_fpn_proposals_cn.rst + nn_cn/dropout_cn.rst + nn_cn/edit_distance_cn.rst + nn_cn/elu_cn.rst + nn_cn/Embedding_cn.rst + nn_cn/erf_cn.rst + nn_cn/exponential_decay_cn.rst + nn_cn/filter_by_instag_cn.rst + nn_cn/fsp_matrix_cn.rst + nn_cn/gather_tree_cn.rst + nn_cn/gelu_cn.rst + nn_cn/generate_mask_labels_cn.rst + nn_cn/generate_proposals_cn.rst + nn_cn/generate_proposal_labels_cn.rst + nn_cn/GradientClipByGlobalNorm_cn.rst + nn_cn/GradientClipByNorm_cn.rst + nn_cn/GradientClipByValue_cn.rst + nn_cn/grid_sampler_cn.rst + nn_cn/GroupNorm_cn.rst + nn_cn/hardshrink_cn.rst + nn_cn/hardtanh_cn.rst + nn_cn/hard_sigmoid_cn.rst + nn_cn/hard_swish_cn.rst + nn_cn/hash_cn.rst + nn_cn/hsigmoid_cn.rst + nn_cn/huber_loss_cn.rst + nn_cn/image_resize_cn.rst + nn_cn/image_resize_short_cn.rst + nn_cn/InstanceNorm1d_cn.rst + nn_cn/InstanceNorm2d_cn.rst + nn_cn/InstanceNorm3d_cn.rst + nn_cn/inverse_time_decay_cn.rst + nn_cn/iou_similarity_cn.rst + nn_cn/kldiv_loss_cn.rst + nn_cn/l2_normalize_cn.rst + nn_cn/label_smooth_cn.rst + nn_cn/LayerList_cn.rst + nn_cn/LayerNorm_cn.rst + nn_cn/Layer_cn.rst + nn_cn/leaky_relu_cn.rst + nn_cn/Linear_cn.rst + nn_cn/linear_lr_warmup_cn.rst + nn_cn/log_loss_cn.rst + nn_cn/log_sigmoid_cn.rst + nn_cn/log_softmax_cn.rst + nn_cn/lrn_cn.rst + nn_cn/margin_ranking_loss_cn.rst + nn_cn/maxout_cn.rst + nn_cn/mse_loss_cn.rst + nn_cn/multiclass_nms_cn.rst + nn_cn/natural_exp_decay_cn.rst + nn_cn/noam_decay_cn.rst + nn_cn/npair_loss_cn.rst + nn_cn/one_hot_cn.rst + nn_cn/pad2d_cn.rst + nn_cn/pad_cn.rst + nn_cn/Pad_cn.rst + nn_cn/pad_constant_like_cn.rst + nn_cn/PairwiseDistance_cn.rst + nn_cn/ParameterList_cn.rst + nn_cn/piecewise_decay_cn.rst + nn_cn/pixel_shuffle_cn.rst + nn_cn/polygon_box_transform_cn.rst + nn_cn/polynomial_decay_cn.rst + nn_cn/pool2d_cn.rst + nn_cn/Pool2D_cn.rst + nn_cn/pool3d_cn.rst + nn_cn/prelu_cn.rst + nn_cn/prior_box_cn.rst + nn_cn/prroi_pool_cn.rst + nn_cn/psroi_pool_cn.rst + nn_cn/random_crop_cn.rst + nn_cn/rank_loss_cn.rst + nn_cn/ReflectionPad1d_cn.rst + nn_cn/ReflectionPad2d_cn.rst + nn_cn/relu6_cn.rst + nn_cn/relu_cn.rst + nn_cn/resize_bilinear_cn.rst + nn_cn/resize_nearest_cn.rst + nn_cn/resize_trilinear_cn.rst + nn_cn/ReplicationPad1d_cn.rst + nn_cn/ReplicationPad2d_cn.rst + nn_cn/ReplicationPad3d_cn.rst + nn_cn/retinanet_detection_output_cn.rst + nn_cn/retinanet_target_assign_cn.rst + nn_cn/roi_align_cn.rst + nn_cn/roi_perspective_transform_cn.rst + nn_cn/roi_pool_cn.rst + nn_cn/row_conv_cn.rst + nn_cn/rpn_target_assign_cn.rst + nn_cn/sampled_softmax_with_cross_entropy_cn.rst + nn_cn/selu_cn.rst + nn_cn/Sequential_cn.rst + nn_cn/shuffle_channel_cn.rst + nn_cn/sigmoid_cross_entropy_with_logits_cn.rst + nn_cn/sigmoid_focal_loss_cn.rst + nn_cn/similarity_focus_cn.rst + nn_cn/smooth_l1_cn.rst + nn_cn/softmax_cn.rst + nn_cn/softmax_with_cross_entropy_cn.rst + nn_cn/softplus_cn.rst + nn_cn/softshrink_cn.rst + nn_cn/softsign_cn.rst + nn_cn/soft_relu_cn.rst + nn_cn/space_to_depth_cn.rst + nn_cn/SpectralNorm_cn.rst + nn_cn/square_error_cost_cn.rst + nn_cn/ssd_loss_cn.rst + nn_cn/swish_cn.rst + nn_cn/switch_case_cn.rst + nn_cn/tanhshrink_cn.rst + nn_cn/target_assign_cn.rst + nn_cn/teacher_student_sigmoid_loss_cn.rst + nn_cn/temporal_shift_cn.rst + nn_cn/thresholded_relu_cn.rst + nn_cn/unfold_cn.rst + nn_cn/warpctc_cn.rst + nn_cn/while_loop_cn.rst + nn_cn/yolov3_loss_cn.rst + nn_cn/yolo_box_cn.rst + nn_cn/loss_cn/MarginRankingLoss_cn.rst + nn_cn/functional_cn/margin_ranking_loss_cn.rst + nn_cn/weight_norm_cn.rst + nn_cn/remove_weight_norm_cn.rst + nn_cn/Dropout_cn.rst + nn_cn/Dropout2D_cn.rst + nn_cn/Dropout3D_cn.rst + nn_cn/AlphaDropout_cn.rst + nn_cn/ZeroPad2d_cn.rst + nn_cn/AdaptiveAvgPool2d_cn.rst + nn_cn/AdaptiveAvgPool3d_cn.rst + nn_cn/AdaptiveAvgPool1d_cn.rst + nn_cn/AdaptiveMaxPool1d_cn.rst + nn_cn/AvgPool1d_cn.rst + nn_cn/MaxPool1d_cn.rst + nn_cn/Bilinear_cn.rst diff --git a/doc/fluid/api_cn/nn_cn/AdaptiveAvgPool1d_cn.rst b/doc/fluid/api_cn/nn_cn/AdaptiveAvgPool1d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..b7ea3869b9b9cfad8c3c9a77a72aa19b1f80e0a8 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/AdaptiveAvgPool1d_cn.rst @@ -0,0 +1,66 @@ +.. _cn_api_nn_AdaptiveAvgPool1d: + + +AdaptiveAvgPool1d +------------------------------- + +.. py:function:: paddle.nn.AdaptiveAvgPool1d(output_size, name=None) + +该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算1D的自适应平均池化。输入和输出都是3-D Tensor, +默认是以 `NCL` 格式表示的,其中 `N` 是 batch size, `C` 是通道数, `L` 是输入特征的长度. + +计算公式如下: + +.. math:: + + lstart &= floor(i * L_{in} / L_{out}) + + lend &= ceil((i + 1) * L_{in} / L_{out}) + + Output(i) &= \frac{sum(Input[lstart:lend])}{(lstart - lend)} + + +参数 +::::::::: + - **output_size** (int): 算子输出特征图的长度,其数据类型为int。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状 +::::::::: + - **x** (Tensor): 默认形状为(批大小,通道数,输出特征长度),即NCL格式的3-D Tensor。 其数据类型为float32或者float64。 + - **output** (Tensor): 默认形状为(批大小,通道数,输出特征长度),即NCL格式的3-D Tensor。 其数据类型与输入x相同。 + +返回 +::::::::: +计算AdaptiveAvgPool1d的可调用对象 + +抛出异常 +::::::::: + - ``ValueError`` - ``output_size`` 应是一个整数。 + +代码示例 +::::::::: + +.. code-block:: python + + # average adaptive pool1d + # suppose input data in shape of [N, C, L], `output_size` is m or [m], + # output shape is [N, C, m], adaptive pool divide L dimension + # of input data into m grids averagely and performs poolings in each + # grid to get output. + # adaptive avg pool performs calculations as follow: + # + # for i in range(m): + # lstart = floor(i * L / m) + # lend = ceil((i + 1) * L / m) + # output[:, :, i] = sum(input[:, :, lstart: lend])/(lstart - lend) + # + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) + AdaptiveAvgPool1d = nn.layer.AdaptiveAvgPool1d(output_size=16) + pool_out = AdaptiveAvgPool1d(data) + # pool_out shape: [1, 3, 16] diff --git a/doc/fluid/api_cn/nn_cn/AdaptiveAvgPool2d_cn.rst b/doc/fluid/api_cn/nn_cn/AdaptiveAvgPool2d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..6d6eaa5044f9b2781c46f449240e8bf158c3fb50 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/AdaptiveAvgPool2d_cn.rst @@ -0,0 +1,72 @@ +AdaptiveAvgPool2d +------------------------------- + +.. py:function:: paddle.nn.AdaptiveAvgPool2d(output_size, data_format="NCHW", name=None) + +该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算2D的自适应平均池化。输入和输出都是4-D Tensor, +默认是以 `NCHW` 格式表示的,其中 `N` 是 batch size, `C` 是通道数, `H` 是输入特征的高度, `H` 是输入特征的宽度。 + +计算公式如下: + +.. math:: + + hstart &= floor(i * H_{in} / H_{out}) + + hend &= ceil((i + 1) * H_{in} / H_{out}) + + wstart &= floor(j * W_{in} / W_{out}) + + wend &= ceil((j + 1) * W_{in} / W_{out}) + + Output(i ,j) &= \frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)} + + +参数 +::::::::: + - **output_size** (int|list|turple): 算子输出特征图的尺寸,如果其是list或turple类型的数值,必须包含两个元素,H和W。H和W既可以是int类型值也可以是None,None表示与输入特征尺寸相同。 + - **data_format** (str): 输入和输出的数据格式,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状 +::::::::: + - **x** (Tensor): 默认形状为(批大小,通道数,高度,宽度),即NCHW格式的4-D Tensor。 其数据类型为float16, float32, float64, int32或int64。 + - **output** (Tensor): 默认形状为(批大小,通道数,输出特征高度,输出特征宽度),即NCHW格式的4-D Tensor。 其数据类型与输入相同。 + + +返回 +::::::::: +计算AdaptiveAvgPool2d的可调用对象 + +抛出异常 +::::::::: + - ``ValueError`` - 如果 ``data_format`` 既不是"NCHW"也不是"NHWC"。 + +代码示例 +::::::::: + +.. code-block:: python + + # adaptive avg pool2d + # suppose input data in shape of [N, C, H, W], `output_size` is [m, n], + # output shape is [N, C, m, n], adaptive pool divide H and W dimensions + # of input data into m * n grids averagely and performs poolings in each + # grid to get output. + # adaptive avg pool performs calculations as follow: + # + # for i in range(m): + # for j in range(n): + # hstart = floor(i * H / m) + # hend = ceil((i + 1) * H / m) + # wstart = floor(i * W / n) + # wend = ceil((i + 1) * W / n) + # output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend]) + # + import paddle + import numpy as np + paddle.disable_static() + input_data = np.random.rand(2, 3, 32, 32) + x = paddle.to_tensor(input_data) + # x.shape is [2, 3, 32, 32] + adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=3) + pool_out = adaptive_avg_pool(x = x) + # pool_out.shape is [2, 3, 3, 3] \ No newline at end of file diff --git a/doc/fluid/api_cn/nn_cn/AdaptiveAvgPool3d_cn.rst b/doc/fluid/api_cn/nn_cn/AdaptiveAvgPool3d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..4315f960cdf53e6f6f25c1b06d600d84d3b03dd2 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/AdaptiveAvgPool3d_cn.rst @@ -0,0 +1,78 @@ +AdaptiveAvgPool3d +------------------------------- + +.. py:function:: paddle.nn.AdaptiveAvgPool3d(output_size, data_format="NCDHW", name=None) + +该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算3D的自适应平均池化。输入和输出都是5-D Tensor, +默认是以 `NCDHW` 格式表示的,其中 `N` 是 batch size, `C` 是通道数, `D` 是特征图长度, `H` 是输入特征的高度, `H` 是输入特征的宽度。 + +计算公式如下: + +.. math:: + + dstart &= floor(i * D_{in} / D_{out}) + + dend &= ceil((i + 1) * D_{in} / D_{out}) + + hstart &= floor(j * H_{in} / H_{out}) + + hend &= ceil((j + 1) * H_{in} / H_{out}) + + wstart &= floor(k * W_{in} / W_{out}) + + wend &= ceil((k + 1) * W_{in} / W_{out}) + + Output(i ,j, k) &= \frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)} + +参数 +::::::::: + - **output_size** (int|list|turple): 算子输出特征图的尺寸,如果其是list或turple类型的数值,必须包含三个元素,D,H和W。D,H和W既可以是int类型值也可以是None,None表示与输入特征尺寸相同。 + - **data_format** (str): 输入和输出的数据格式,可以是"NCDHW"和"NDHWC"。N是批尺寸,C是通道数,D是特征长度,H是特征高度,W是特征宽度。默认值:"NCDHW"。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状 +::::::::: + - **x** (Tensor): 默认形状为(批大小,通道数,长度,高度,宽度),即NCDHW格式的5-D Tensor。 其数据类型为float16, float32, float64, int32或int64. + - **output** (Tensor): 默认形状为(批大小,通道数,输出特征长度,输出特征高度,输出特征宽度),即NCDHW格式的5-D Tensor。 其数据类型与输入相同。 + + +返回 +::::::::: +计算AdaptiveAvgPool3d的可调用对象 + +抛出异常 +::::::::: + - ``ValueError`` - 如果 ``data_format`` 既不是"NCDHW"也不是"NDHWC"。 + +代码示例 +::::::::: + +.. code-block:: python + + # adaptive avg pool3d + # suppose input data in shape of [N, C, D, H, W], `output_size` is [l, m, n], + # output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions + # of input data into l * m * n grids averagely and performs poolings in each + # grid to get output. + # adaptive avg pool performs calculations as follow: + # + # for i in range(l): + # for j in range(m): + # for k in range(n): + # dstart = floor(i * D / l) + # dend = ceil((i + 1) * D / l) + # hstart = floor(j * H / m) + # hend = ceil((j + 1) * H / m) + # wstart = floor(k * W / n) + # wend = ceil((k + 1) * W / n) + # output[:, :, i, j, k] = + # avg(input[:, :, dstart:dend, hstart: hend, wstart: wend]) + import paddle + import numpy as np + paddle.disable_static() + input_data = np.random.rand(2, 3, 8, 32, 32) + x = paddle.to_tensor(input_data) + # x.shape is [2, 3, 8, 32, 32] + adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d(output_size=3) + pool_out = adaptive_avg_pool(x = x) + # pool_out = [2, 3, 3, 3, 3] diff --git a/doc/fluid/api_cn/nn_cn/AdaptiveMaxPool1d_cn.rst b/doc/fluid/api_cn/nn_cn/AdaptiveMaxPool1d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..3d11df5b51d3f6bbede8b92e36091614fd868bc3 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/AdaptiveMaxPool1d_cn.rst @@ -0,0 +1,72 @@ +.. _cn_api_nn_AdaptiveMaxPool1d: + + +AdaptiveMaxPool1d +------------------------------- + +.. py:function:: paddle.nn.AdaptiveMaxPool1d(output_size, return_indices=False, name=None) + +该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算1D的自适应平均池化。输入和输出都是3-D Tensor, +默认是以 `NCL` 格式表示的,其中 `N` 是 batch size, `C` 是通道数, `L` 是输入特征的长度. + +计算公式如下: + +.. math:: + + lstart &= floor(i * L_{in} / L_{out}) + + lend &= ceil((i + 1) * L_{in} / L_{out}) + + Output(i) &= max(Input[lstart:lend]) + + +参数 +::::::::: + - **output_size** (int|list|tuple): 算子输出特征图的长度,其数据类型为int,list或tuple。 + - **return_indices** (bool): 如果设置为True,则会与输出一起返回最大值的索引,默认为False。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状 +::::::::: + - **x** (Tensor): 默认形状为(批大小,通道数,输出特征长度),即NCL格式的3-D Tensor。 其数据类型为float32或者float64。 + - **output** (Tensor): 默认形状为(批大小,通道数,输出特征长度),即NCL格式的3-D Tensor。 其数据类型与输入x相同。 + +返回 +::::::::: +计算AdaptiveMaxPool1d的可调用对象 + +抛出异常 +::::::::: + - ``ValueError`` - ``output_size`` 应是一个整数或长度为1的list,tuple + +代码示例 +::::::::: + +.. code-block:: python + + # max adaptive pool1d + # suppose input data in shape of [N, C, L], `output_size` is m or [m], + # output shape is [N, C, m], adaptive pool divide L dimension + # of input data into m grids averagely and performs poolings in each + # grid to get output. + # adaptive max pool performs calculations as follow: + # + # for i in range(m): + # lstart = floor(i * L / m) + # lend = ceil((i + 1) * L / m) + # output[:, :, i] = max(input[:, :, lstart: lend]) + # + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) + AdaptiveMaxPool1d = nn.layer.AdaptiveMaxPool1d(output_size=16) + pool_out = AdaptiveMaxPool1d(data) + # pool_out shape: [1, 3, 16] + + # for return_indices = true + AdaptiveMaxPool1d = nn.layer.AdaptiveMaxPool1d(output_size=16, return_indices=True) + pool_out, indices = AdaptiveMaxPool1d(data) + # pool_out shape: [1, 3, 16], indices shape: [1, 3, 16] diff --git a/doc/fluid/api_cn/nn_cn/AlphaDropout_cn.rst b/doc/fluid/api_cn/nn_cn/AlphaDropout_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8dfdac7935a55010b9891b9835b8b2446629e75c --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/AlphaDropout_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_nn_AlphaDropout: + +AlphaDropout +------------------------------- + +.. py:function:: paddle.nn.AlphaDropout(p=0.5, name=None) + +AlphaDropout是一种具有自归一化性质的dropout。均值为0,方差为1的输入,经过AlphaDropout计算之后,输出的均值和方差与输入保持一致。AlphaDropout通常与SELU激活函数组合使用。论文请参考: `Self-Normalizing Neural Networks `_ + +在动态图模式下,请使用模型的 `eval()` 方法切换至测试阶段。 + +.. note:: + 对应的 `functional方法` 请参考: :ref:`cn_api_nn_functional_alpha_dropout` 。 + +参数 +::::::::: + - **p** (float): 将输入节点置0的概率,即丢弃概率。默认: 0.5。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: +经过AlphaDropout之后的结果,与输入x形状相同的 `Tensor` 。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + x = np.array([[-1, 1], [-1, 1]]).astype('float32') + x = paddle.to_tensor(x) + m = paddle.nn.AlphaDropout(p=0.5) + y_train = m(x) + m.eval() # switch the model to test phase + y_test = m(x) + print(x.numpy()) + print(y_train.numpy()) + # [[-0.10721093, 1.6655989 ], [-0.7791938, -0.7791938]] (randomly) + print(y_test.numpy()) diff --git a/doc/fluid/api_cn/nn_cn/AvgPool1d_cn.rst b/doc/fluid/api_cn/nn_cn/AvgPool1d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..c24c89ec51ce367586385684a750ef4700aa6b08 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/AvgPool1d_cn.rst @@ -0,0 +1,59 @@ +.. _cn_api_nn_AvgPool1d: + + +AvgPool1d +------------------------------- + +.. py:function:: paddle.nn.AvgPool1d(kernel_size, stride=None, padding=0, count_include_pad=True, ceil_mode=False, name=None) + +该算子根据输入 `x` , `kernel_size` 等参数对一个输入Tensor计算1D的平均池化。输入和输出都是3-D Tensor, +默认是以 `NCL` 格式表示的,其中 `N` 是 batch size, `C` 是通道数, `L` 是输入特征的长度。 + +假设输入形状是(N, C, L),输出形状是 (N, C, L_{out}),卷积核尺寸是k, 1d平均池化计算公式如下: + +.. math:: + + Output(N_i, C_i, l) = mean(Input[N_i, C_i, stride \times l:stride \times l+k]) + +参数 +::::::::: + - **kernel_size** (int|list|tuple): 池化核的尺寸大小. 如果kernel_size为list或tuple类型, 其必须包含一个整数, 最终池化核的大小为该数值。 + - **stride** (int|list|tuple): 池化操作步长. 如果stride为list或tuple类型, 其必须包含一个整数,最终池化操作的步长为该数值。 + - **padding** (string|int|list|tuple): 池化补零的方式. 如果padding是一个字符串,则必须为 `SAME` 或者 `VALID` 。如果是turple或者list类型, 则应是 `[pad_left, pad_right]` 形式。如果padding是一个非0值,那么表示会在输入的两端都padding上同样长度的0。 + - **count_include_pad** (bool): 是否用额外padding的值计算平均池化结果,默认为True。 + - **ceil_mode** (bool): 是否用ceil函数计算输出的height和width,如果设置为False, 则使用floor函数来计算,默认为False。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + + +形状 +::::::::: + - **x** (Tensor): 默认形状为(批大小,通道数,长度),即NCL格式的3-D Tensor。 其数据类型为float32或float64. + - **output** (Tensor): 默认形状为(批大小,通道数,输出特征长度),即NCL格式的3-D Tensor。 其数据类型与输入x相同。 + +返回 +::::::::: +计算AvgPool1d的可调用对象 + + +抛出异常 +::::::::: + - ``ValueError`` - 如果 ``padding`` 是字符串但不是 "SAME" 和 "VALID" 。 + - ``ValueError`` - 如果 ``padding`` 是 "VALID" 但 `ceil_mode` 被设置为True。 + - ``ValueError`` - 如果 ``padding`` 是一个长度大于1的list或turple。 + - ``ShapeError`` - 如果输入x不是一个3-D Tensor。 + - ``ShapeError`` - 如果计算得到的输出形状小于等于0。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) + AvgPool1d = nn.layer.AvgPool1d(kernel_size=2, stride=2, padding=0) + pool_out = AvgPool1d(data) + # pool_out shape: [1, 3, 16] diff --git a/doc/fluid/api_cn/nn_cn/BatchNorm1d_cn.rst b/doc/fluid/api_cn/nn_cn/BatchNorm1d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c25ec52a26365319fea221ed14c0afd79eea200c --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/BatchNorm1d_cn.rst @@ -0,0 +1,74 @@ +.. _cn_api_nn_BatchNorm1d: + +BatchNorm1d +------------------------------- + +.. py:class:: paddle.nn.BatchNorm1d(num_features, momentum=0.9, epsilon=1e-05, weight_attr=None, bias_attr=None, data_format='NCL', track_running_stats=True, name=None): + + +该接口用于构建 ``BatchNorm1d`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。可以处理2D或者3D的Tensor, 实现了批归一化层(Batch Normalization Layer)的功能,可用作卷积和全连接操作的批归一化函数,根据当前批次数据按通道计算的均值和方差进行归一化。更多详情请参考 : `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ + +当训练时 :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是minibatch的统计数据。计算公式如下: + +.. math:: + \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mini-batch-mean \\ + \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \quad &// mini-batch-variance \\ + +- :math:`x` : 批输入数据 +- :math:`m` : 当前批次数据的大小 + +当预测时,track_running_stats = True :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是全局(或运行)统计数据(moving_mean和moving_variance),通常来自预先训练好的模型。计算公式如下: + +.. math:: + + moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global mean \\ + moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global variance \\ + +归一化函数公式如下: + +.. math:: + + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \quad &// normalize \\ + y_i &\gets \gamma \hat{x_i} + \beta \quad &// scale-and-shift \\ + +- :math:`\epsilon` : 添加较小的值到方差中以防止除零 +- :math:`\gamma` : 可训练的比例参数 +- :math:`\beta` : 可训练的偏差参数 + +参数: + - **num_features** (int) - 指明输入 ``Tensor`` 的通道数量。 + - **epsilon** (float, 可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 + - **momentum** (float, 可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var`` 。默认值:0.9。更新公式如上所示。 + - **weight_attr** (ParamAttr|bool, 可选) - 指定权重参数属性的对象。如果为False, 则表示每个通道的伸缩固定为1,不可改变。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **bias_attr** (ParamAttr, 可选) - 指定偏置参数属性的对象。如果为False, 则表示每一个通道的偏移固定为0,不可改变。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **data_format** (string, 可选) - 指定输入数据格式,数据格式可以为“NC"或者"NCL"。默认值:“NCL”。 + - **track_running_stats** (bool, 可选) – 指示是否使用全局均值和方差。在训练时,设置为True表示在训练期间将保存全局均值和方差用于推理。推理时此属性只能设置为True。默认值:True。 + - **name** (string, 可选) – BatchNorm的名称, 默认值为None。更多信息请参见 :ref:`api_guide_Name` 。 + + +返回:无 + +形状: + - input: 形状为(批大小,通道数)的2-D Tensor 或(批大小, 通道数,长度)的3-D Tensor。 + - output: 和输入形状一样。 + +.. note:: +目前训练时设置track_running_stats为False是无效的,实际还是会按照True的方案保存全局均值和方差。之后的版本会修复此问题。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np.random.seed(123) + x_data = np.random.random(size=(2, 1, 3)).astype('float32') + x = paddle.to_tensor(x_data) + batch_norm = paddle.nn.BatchNorm1d(1) + batch_norm_out = batch_norm(x) + + print(batch_norm_out.numpy()) + diff --git a/doc/fluid/api_cn/nn_cn/BatchNorm2d_cn.rst b/doc/fluid/api_cn/nn_cn/BatchNorm2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..af590ce252fa8c65fe46f01a9138bb4adac2554d --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/BatchNorm2d_cn.rst @@ -0,0 +1,74 @@ +.. _cn_api_nn_BatchNorm2d: + +BatchNorm2d +------------------------------- + +.. py:class:: paddle.nn.BatchNorm2d(num_features, momentum=0.9, epsilon=1e-05, weight_attr=None, bias_attr=None, data_format='NCHW', track_running_stats=True, name=None): + + +该接口用于构建 ``BatchNorm2d`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。可以处理4D的Tensor, 实现了批归一化层(Batch Normalization Layer)的功能,可用作卷积和全连接操作的批归一化函数,根据当前批次数据按通道计算的均值和方差进行归一化。更多详情请参考 : `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ + +当训练时 :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是minibatch的统计数据。计算公式如下: + +.. math:: + \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mini-batch-mean \\ + \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \quad &// mini-batch-variance \\ + +- :math:`x` : 批输入数据 +- :math:`m` : 当前批次数据的大小 + +当预测时,track_running_stats = True :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是全局(或运行)统计数据(moving_mean和moving_variance),通常来自预先训练好的模型。计算公式如下: + +.. math:: + + moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global mean \\ + moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global variance \\ + +归一化函数公式如下: + +.. math:: + + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \quad &// normalize \\ + y_i &\gets \gamma \hat{x_i} + \beta \quad &// scale-and-shift \\ + +- :math:`\epsilon` : 添加较小的值到方差中以防止除零 +- :math:`\gamma` : 可训练的比例参数 +- :math:`\beta` : 可训练的偏差参数 + +参数: + - **num_features** (int) - 指明输入 ``Tensor`` 的通道数量。 + - **epsilon** (float, 可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 + - **momentum** (float, 可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var`` 。默认值:0.9。更新公式如上所示。 + - **weight_attr** (ParamAttr|bool, 可选) - 指定权重参数属性的对象。如果为False, 则表示每个通道的伸缩固定为1,不可改变。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **bias_attr** (ParamAttr, 可选) - 指定偏置参数属性的对象。如果为False, 则表示每一个通道的偏移固定为0,不可改变。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **data_format** (string, 可选) - 指定输入数据格式,数据格式可以为"NCHW"。默认值:“NCHW”。 + - **track_running_stats** (bool, 可选) – 指示是否使用全局均值和方差。在训练时,设置为True表示在训练期间将保存全局均值和方差用于推理。推理时此属性只能设置为True。默认值:True。 + - **name** (string, 可选) – BatchNorm的名称, 默认值为None。更多信息请参见 :ref:`api_guide_Name` 。 + + +返回:无 + +形状: + - input: 形状为(批大小,通道数, 高度,宽度)的4-D Tensor 或(批大小, 通道数,宽度,高度)的4-D Tensor。 + - output: 和输入形状一样。 + +.. note:: +目前训练时设置track_running_stats为False是无效的,实际还是会按照True的方案保存全局均值和方差。之后的版本会修复此问题。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np.random.seed(123) + x_data = np.random.random(size=(2, 1, 2, 3)).astype('float32') + x = paddle.to_tensor(x_data) + batch_norm = paddle.nn.BatchNorm2d(1) + batch_norm_out = batch_norm(x) + + print(batch_norm_out.numpy()) + diff --git a/doc/fluid/api_cn/nn_cn/BatchNorm3d_cn.rst b/doc/fluid/api_cn/nn_cn/BatchNorm3d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..98152e9f539fee04642390274c00e48a33c33696 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/BatchNorm3d_cn.rst @@ -0,0 +1,74 @@ +.. _cn_api_nn_BatchNorm3d: + +BatchNorm3d +------------------------------- + +.. py:class:: paddle.nn.BatchNorm3d(num_features, momentum=0.9, epsilon=1e-05, weight_attr=None, bias_attr=None, data_format='NCDHW', track_running_stats=True, name=None): + + +该接口用于构建 ``BatchNorm3d`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。可以处理4D的Tensor, 实现了批归一化层(Batch Normalization Layer)的功能,可用作卷积和全连接操作的批归一化函数,根据当前批次数据按通道计算的均值和方差进行归一化。更多详情请参考 : `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ + +当训练时 :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是minibatch的统计数据。计算公式如下: + +.. math:: + \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mini-batch-mean \\ + \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \quad &// mini-batch-variance \\ + +- :math:`x` : 批输入数据 +- :math:`m` : 当前批次数据的大小 + +当预测时,track_running_stats = True :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是全局(或运行)统计数据(moving_mean和moving_variance),通常来自预先训练好的模型。计算公式如下: + +.. math:: + + moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global mean \\ + moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global variance \\ + +归一化函数公式如下: + +.. math:: + + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \quad &// normalize \\ + y_i &\gets \gamma \hat{x_i} + \beta \quad &// scale-and-shift \\ + +- :math:`\epsilon` : 添加较小的值到方差中以防止除零 +- :math:`\gamma` : 可训练的比例参数 +- :math:`\beta` : 可训练的偏差参数 + +参数: + - **num_features** (int) - 指明输入 ``Tensor`` 的通道数量。 + - **epsilon** (float, 可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 + - **momentum** (float, 可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var`` 。默认值:0.9。更新公式如上所示。 + - **weight_attr** (ParamAttr|bool, 可选) - 指定权重参数属性的对象。如果为False, 则表示每个通道的伸缩固定为1,不可改变。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **bias_attr** (ParamAttr, 可选) - 指定偏置参数属性的对象。如果为False, 则表示每一个通道的偏移固定为0,不可改变。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **data_format** (string, 可选) - 指定输入数据格式,数据格式可以为“NCDHW"。默认值:“NCDHW”。 + - **track_running_stats** (bool, 可选) – 指示是否使用全局均值和方差。在训练时,设置为True表示在训练期间将保存全局均值和方差用于推理。推理时此属性只能设置为True。默认值:True。 + - **name** (string, 可选) – BatchNorm的名称, 默认值为None。更多信息请参见 :ref:`api_guide_Name` 。 + + +返回:无 + +形状: + - input: 形状为(批大小,通道数, 维度,高度,宽度)的5-D Tensor。 + - output: 和输入形状一样。 + +.. note:: +目前训练时设置track_running_stats为False是无效的,实际还是会按照True的方案保存全局均值和方差。之后的版本会修复此问题。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np.random.seed(123) + x_data = np.random.random(size=(2, 1, 2, 2, 3)).astype('float32') + x = paddle.to_tensor(x_data) + batch_norm = paddle.nn.BatchNorm3d(1) + batch_norm_out = batch_norm(x) + + print(batch_norm_out.numpy()) + diff --git a/doc/fluid/api_cn/nn_cn/BatchNorm_cn.rst b/doc/fluid/api_cn/nn_cn/BatchNorm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ce5bfc8381a5805a71eb90906404a36953a3648f --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/BatchNorm_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_BatchNorm: + +BatchNorm +------------------------------- +:doc_source: paddle.fluid.dygraph.BatchNorm + + diff --git a/doc/fluid/api_cn/nn_cn/BilinearTensorProduct_cn.rst b/doc/fluid/api_cn/nn_cn/BilinearTensorProduct_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5a061557d31075bd38378f02f7baa1602e641c33 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/BilinearTensorProduct_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_BilinearTensorProduct: + +BilinearTensorProduct +------------------------------- +:doc_source: paddle.fluid.dygraph.BilinearTensorProduct + + diff --git a/doc/fluid/api_cn/nn_cn/Bilinear_cn.rst b/doc/fluid/api_cn/nn_cn/Bilinear_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..55ccb27c78fdfea567ed8195aa6fd0828ee64cc6 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/Bilinear_cn.rst @@ -0,0 +1,59 @@ +.. _cn_api_nn_Bilinear: + +Bilinear +------------------------------- + +.. py:function:: paddle.nn.Bilinear(in1_features, in2_features, out_features, weight_attr=None, bias_attr=None, name=None) + +该层对两个输入执行双线性张量积。 + +例如: + +.. math:: + + out_{i} = x1 * W_{i} * {x2^\mathrm{T}}, i=0,1,...,size-1 + + out = out + b + +在这个公式中: + - :math:`x1`: 第一个输入,包含 :in1_features个元素,形状为 [batch_size, in1_features]。 + - :math:`x2`: 第二个输入,包含 :in2_features个元素,形状为 [batch_size, in2_features]。 + - :math:`W_{i}`: 第 :i个被学习的权重,形状是 [in1_features, in2_features]。 + - :math:`out_{i}`: 输出的第 :i个元素,形状是 [batch_size, out_features]。 + - :math:`b`: 被学习的偏置参数,形状是 [1, out_features]。 + - :math:`x2^\mathrm{T}`: :math:`x2` 的转置。 + +参数 +::::::::: + - **in1_features** (int): 每个 **x1** 元素的维度。 + - **in2_features** (int): 每个 **x2** 元素的维度。 + - **out_features** (int): 输出张量的维度。 + - **weight_attr** (ParamAttr,可选) :指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。 + - **bias_attr** (ParamAttr,可选) : 指定偏置参数属性的对象。默认值为 None,表示使用默认的偏置参数属性,此时bias的元素会被初始化成0。如果设置成False,则不会有bias加到output结果上。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为 None。 + +属性 +::::::::: + - **weight** 本层的可学习参数,类型为 Parameter + - **bias** 本层的可学习偏置,类型为 Parameter + +返回 +::::::::: +``Tensor``,一个形为 [batch_size, out_features] 的 2-D 张量。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy + + paddle.disable_static() + layer1 = numpy.random.random((5, 5)).astype('float32') + layer2 = numpy.random.random((5, 4)).astype('float32') + bilinear = paddle.nn.Bilinear( + in1_features=5, in2_features=4, out_features=1000) + result = bilinear(paddle.to_tensor(layer1), + paddle.to_tensor(layer2)) # result shape [5, 1000] + diff --git a/doc/fluid/api_cn/nn_cn/ConstantPad1d_cn.rst b/doc/fluid/api_cn/nn_cn/ConstantPad1d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d7025d80e873242b892d0e1deda4bcac895b2e37 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/ConstantPad1d_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_nn_ConstantPad1d: + +ConstantPad1d +------------------------------- +.. py:class:: paddle.nn.ConstantPad1d(padding, value=0.0, data_format="NCL", name=None) + +**ConstantPad1d** + +按照 padding 对输入 以constant模式进行 ``pad``,即填充固定值。 + +参数: + - **padding** (Tensor | List[int32]) - 填充大小。pad的格式为[pad_left, pad_right]。 + - **value** (float32) - 待填充的值,默认值为0.0。 + - **data_format** (str) - 指定input的format,可为 `'NCL'` 或者 `'NLC'`,默认值为`'NCL'`。 + - **name** (str, 可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,缺省值为None。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 2, 3) + pad = [1, 2] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ConstantPad1d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[0. 1. 2. 3. 0. 0.] + # [0. 4. 5. 6. 0. 0.]]] diff --git a/doc/fluid/api_cn/nn_cn/ConstantPad2d_cn.rst b/doc/fluid/api_cn/nn_cn/ConstantPad2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3e8f3e14a80fa3b1e8bd78656fb66b2eb38e9f6d --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/ConstantPad2d_cn.rst @@ -0,0 +1,39 @@ +.. _cn_api_nn_ConstantPad2d: + +ConstantPad2d +------------------------------- +.. py:class:: paddle.nn.ConstantPad2d(padding, value=0.0, data_format="NCHW", name=None) + +**ConstantPad2d** + +按照 padding 对输入 以constant模式进行 ``pad``,即填充固定值。 + +参数: + - **padding** (Tensor | List[int32]) - 填充大小。pad的格式为[pad_left, pad_right, pad_top, pad_bottom]。 + - **value** (float32) - 待填充的值,默认值为0.0。 + - **data_format** (str) - 指定input的format,可为 `'NCHW'` 或者 `'NHWC'`,默认值为`'NCHW'`。 + - **name** (str, 可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,缺省值为None。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 1, 2, 3) + pad = [1, 0, 1, 2] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ConstantPad2d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[[0. 0. 0. 0.] + # [0. 1. 2. 3.] + # [0. 4. 5. 6.] + # [0. 0. 0. 0.] + # [0. 0. 0. 0.]]]] diff --git a/doc/fluid/api_cn/nn_cn/ConstantPad3d_cn.rst b/doc/fluid/api_cn/nn_cn/ConstantPad3d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..35ad8aa795d3d0704f7dbac79ba06b6cd5223b7c --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/ConstantPad3d_cn.rst @@ -0,0 +1,39 @@ +.. _cn_api_nn_ConstantPad3d: + +ConstantPad3d +------------------------------- +.. py:class:: paddle.nn.ConstantPad3d(padding, value=0.0, data_format="NCDHW", name=None) + +**ConstantPad3d** + +按照 padding 对输入 以constant模式进行 ``pad``,即填充固定值。 + +参数: + - **padding** (Tensor | List[int32]) - 填充大小。pad的格式为[pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back]。 + - **value** (float32) - 待填充的值,默认值为0.0。 + - **data_format** (str) - 指定input的format,可为 `'NCDHW'` 或者 `'NDHWC'`,默认值为`'NCDHW'`。 + - **name** (str, 可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,缺省值为None。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 1, 1, 2, 3) + pad = [1, 0, 1, 2, 0, 0] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ConstantPad3d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[[[0. 0. 0. 0.] + # [0. 1. 2. 3.] + # [0. 4. 5. 6.] + # [0. 0. 0. 0.] + # [0. 0. 0. 0.]]]]] diff --git a/doc/fluid/api_cn/nn_cn/Conv1D_cn.rst b/doc/fluid/api_cn/nn_cn/Conv1D_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c4273335337c0b600b685ba953c4588b5020a406 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/Conv1D_cn.rst @@ -0,0 +1,3 @@ +Conv1D +------------------------------- +**版本升级,文档正在开发中** diff --git a/doc/fluid/api_cn/nn_cn/Conv2d_cn.rst b/doc/fluid/api_cn/nn_cn/Conv2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ffc23cb83e0769ed83a67ddd6ab3d41a8b44c4e1 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/Conv2d_cn.rst @@ -0,0 +1,103 @@ +Conv2d +------------------------------- + +.. py:class:: paddle.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, padding_mode='zeros', weight_attr=None, bias_attr=None, data_format="NCHW") + + + +**二维卷积层** + +该OP是二维卷积层(convolution2d layer),根据输入、卷积核、步长(stride)、填充(padding)、空洞大小(dilations)一组参数计算输出特征层大小。输入和输出是NCHW或NHWC格式,其中N是批尺寸,C是通道数,H是特征高度,W是特征宽度。卷积核是MCHW格式,M是输出图像通道数,C是输入图像通道数,H是卷积核高度,W是卷积核宽度。如果组数(groups)大于1,C等于输入图像通道数除以组数的结果。详情请参考UFLDL's : `卷积 `_ 。如果bias_attr不为False,卷积计算会添加偏置项。 + +对每个输入X,有等式: + +.. math:: + + Out = \sigma \left ( W * X + b \right ) + +其中: + - :math:`X` :输入值,NCHW或NHWC格式的4-D Tensor + - :math:`W` :卷积核值,MCHW格式的4-D Tensor + - :math:`*` :卷积操作 + - :math:`b` :偏置值,2-D Tensor,形状为 ``[M,1]`` + - :math:`\sigma` :激活函数 + - :math:`Out` :输出值,NCHW或NHWC格式的4-D Tensor, 和 ``X`` 的形状可能不同 + + +参数: + - **in_channels** (int) - 输入图像的通道数。 + - **out_channels** (int) - 由卷积操作产生的输出的通道数。 + - **kernel_size** (int|list|tuple) - 卷积核大小。可以为单个整数或包含两个整数的元组或列表,分别表示卷积核的高和宽。如果为单个整数,表示卷积核的高和宽都等于该整数。 + - **stride** (int|list|tuple,可选) - 步长大小。可以为单个整数或包含两个整数的元组或列表,分别表示卷积沿着高和宽的步长。如果为单个整数,表示沿着高和宽的步长都等于该整数。默认值:1。 + - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含4个二元组:当 ``data_format`` 为"NCHW"时为 [[0,0], [0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NHWC"时为[[0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含4个整数值:[padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含2个整数值:[padding_height, padding_width],此时padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_height = padding_width = padding。默认值:0。 + - **dilation** (int|list|tuple,可选) - 空洞大小。可以为单个整数或包含两个整数的元组或列表,分别表示卷积核中的元素沿着高和宽的空洞。如果为单个整数,表示高和宽的空洞都等于该整数。默认值:1。 + - **groups** (int,可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和卷积核分别根据通道数量平均分为n组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第n组卷积核和第n组输入进行卷积计算。默认值:1。 + - **padding_mode** (str, 可选): 填充模式。 包括 ``'zeros'``, ``'reflect'``, ``'replicate'`` 或者 ``'circular'``. 默认值: ``'zeros'`` . + - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + + +属性 +:::::::::::: +.. py:attribute:: weight +本层的可学习参数,类型为 ``Parameter`` + +.. py:attribute:: bias +本层的可学习偏置,类型为 ``Parameter`` + +形状: + - 输入: :math:`(N, C_{in}, H_{in}, W_{in})` + - 输出: :math:`(N, C_{out}, H_{out}, W_{out})` + + 其中: + + .. math:: + H_{out} = \frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (kernel\_size[0] - 1) + 1))}{strides[0]} + 1 + + W_{out} = \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (kernel\_size[1] - 1) + 1))}{strides[1]} + 1 + + 如果 ``padding`` = "SAME": + + .. math:: + H_{out} = \frac{(H_{in} + stride[0] - 1)}{stride[0]} + + .. math:: + W_{out} = \frac{(W_{in} + stride[1] - 1)}{stride[1]} + + 如果 ``padding`` = "VALID": + + .. math:: + H_{out} = \frac{\left ( H_{in} -\left ( dilation[0]*\left ( kernel\_size[0]-1 \right )+1 \right ) \right )}{stride[0]}+1 + + W_{out} = \frac{\left ( W_{in} -\left ( dilation[1]*\left ( kernel\_size[1]-1 \right )+1 \right ) \right )}{stride[1]}+1 + + +抛出异常: + - ``ValueError`` - 如果 ``data_format`` 既不是"NCHW"也不是"NHWC"。 + - ``ValueError`` - 如果 ``input`` 的通道数未被明确定义。 + - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``padding`` 含有4个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ShapeError`` - 如果输入不是4-D Tensor。 + - ``ShapeError`` - 如果输入和卷积核的维度大小不相同。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + - ``ShapeError`` - 如果输出的通道数不能被 ``groups`` 整除。 + + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.nn as nn + x = np.random.uniform(-1, 1, (2, 4, 8, 8)).astype('float32') + + paddle.disable_static() + x_var = paddle.to_tensor(x) + conv = nn.Conv2d(4, 6, (3, 3)) + y_var = conv(x_var) + y_np = y_var.numpy() + print(y_np.shape) + + # (2, 6, 6, 6) diff --git a/doc/fluid/api_cn/nn_cn/Conv3d_cn.rst b/doc/fluid/api_cn/nn_cn/Conv3d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1f61e1d4e9f808a89ce2bddb74603744d74fdd47 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/Conv3d_cn.rst @@ -0,0 +1,110 @@ +Conv3d +------------------------------- + +.. py:class:: paddle.nn.Conv3d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, padding_mode='zeros', weight_attr=None, bias_attr=None, data_format="NCDHW") + + + +**三维卷积层** + +该OP是三维卷积层(convolution3D layer),根据输入、卷积核、步长(stride)、填充(padding)、空洞大小(dilations)一组参数计算得到输出特征层大小。输入和输出是NCDHW或NDWHC格式,其中N是批尺寸,C是通道数,D是特征层深度,H是特征层高度,W是特征层宽度。三维卷积(Convlution3D)和二维卷积(Convlution2D)相似,但多了一维深度信息(depth)。如果bias_attr不为False,卷积计算会添加偏置项。 + +对每个输入X,有等式: + +.. math:: + + Out = \sigma \left ( W * X + b \right ) + +其中: + - :math:`X` :输入值,NCDHW或NDHWC格式的5-D Tensor + - :math:`W` :卷积核值,MCDHW格式的5-D Tensor + - :math:`*` :卷积操作 + - :math:`b` :偏置值,2-D Tensor,形为 ``[M,1]`` + - :math:`\sigma` :激活函数 + - :math:`Out` :输出值, NCDHW或NDHWC格式的5-D Tensor,和 ``X`` 的形状可能不同 + +参数: + - **in_channels** (int) - 输入图像的通道数。 + - **out_channels** (int) - 由卷积操作产生的输出的通道数。 + - **kernel_size** (int|list|tuple) - 卷积核大小。可以为单个整数或包含三个整数的元组或列表,分别表示卷积核的深度,高和宽。如果为单个整数,表示卷积核的深度,高和宽都等于该整数。 + - **stride** (int|list|tuple,可选) - 步长大小。可以为单个整数或包含三个整数的元组或列表,分别表示卷积沿着深度,高和宽的步长。如果为单个整数,表示沿着高和宽的步长都等于该整数。默认值:1。 + - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含5个二元组:当 ``data_format`` 为"NCDHW"时为 [[0,0], [0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NDHWC"时为[[0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含6个整数值:[padding_depth_front, padding_depth_back, padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含3个整数值:[padding_depth, padding_height, padding_width],此时 padding_depth_front = padding_depth_back = padding_depth, padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_depth = padding_height = padding_width = padding。默认值:0。 + - **dilation** (int|list|tuple,可选) - 空洞大小。可以为单个整数或包含三个整数的元组或列表,分别表示卷积核中的元素沿着深度,高和宽的空洞。如果为单个整数,表示深度,高和宽的空洞都等于该整数。默认值:1。 + - **groups** (int,可选) - 三维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和卷积核分别根据通道数量平均分为n组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第n组卷积核和第n组输入进行卷积计算。默认值:1。 + - **padding_mode** (str, 可选): 填充模式。 包括 ``'zeros'``, ``'reflect'``, ``'replicate'`` 或者 ``'circular'``. 默认值: ``'zeros'`` . + - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCDHW"和"NDHWC"。N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度。默认值:"NCDHW"。 + + +属性 +:::::::::::: +.. py:attribute:: weight +本层的可学习参数,类型为 ``Parameter`` + +.. py:attribute:: bias +本层的可学习偏置,类型为 ``Parameter`` + +形状: + + - 输入::math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` + - 输出::math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` + + 其中 + + .. math:: + + D_{out} &= \frac{\left ( D_{in} + padding\_depth\_front + padding\_depth\_back-\left ( dilation[0]*\left ( kernel\_size[0]-1 \right )+1 \right ) \right )}{stride[0]}+1 + + H_{out} &= \frac{\left ( H_{in} + padding\_height\_top + padding\_height\_bottom-\left ( dilation[1]*\left ( kernel\_size[1]-1 \right )+1 \right ) \right )}{stride[1]}+1 + + W_{out} &= \frac{\left ( W_{in} + padding\_width\_left + padding\_width\_right -\left ( dilation[2]*\left ( kernel\_size[2]-1 \right )+1 \right ) \right )}{stride[2]}+1 + + 如果 ``padding`` = "SAME": + + .. math:: + D_{out} = \frac{(D_{in} + stride[0] - 1)}{stride[0]} + + H_{out} = \frac{(H_{in} + stride[1] - 1)}{stride[1]} + + W_{out} = \frac{(W_{in} + stride[2] - 1)}{stride[2]} + + 如果 ``padding`` = "VALID": + + .. math:: + D_{out} = \frac{\left ( D_{in} -\left ( dilation[0]*\left ( kernel\_size[0]-1 \right )+1 \right ) \right )}{stride[0]}+1 + + H_{out} = \frac{\left ( H_{in} -\left ( dilation[1]*\left ( kernel\_size[1]-1 \right )+1 \right ) \right )}{stride[1]}+1 + + W_{out} = \frac{\left ( W_{in} -\left ( dilation[2]*\left ( kernel\_size[2]-1 \right )+1 \right ) \right )}{stride[2]}+1 + +抛出异常: + - ``ValueError`` - 如果 ``data_format`` 既不是"NCDHW"也不是"NDHWC"。 + - ``ValueError`` - 如果 ``input`` 的通道数未被明确定义。 + - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``padding`` 含有5个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ShapeError`` - 如果输入不是5-D Tensor。 + - ``ShapeError`` - 如果输入和卷积核的维度大小不相同。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + - ``ShapeError`` - 如果输出的通道数不能被 ``groups`` 整除。 + + +**代码示例**: + +.. code-block:: python + + import numpy as np + + import paddle + import paddle.nn as nn + x = np.random.uniform(-1, 1, (2, 4, 8, 8, 8)).astype('float32') + + paddle.disable_static() + + x_var = paddle.to_tensor(x) + conv = nn.Conv3d(4, 6, (3, 3, 3)) + y_var = conv(x_var) + y_np = y_var.numpy() + print(y_np.shape) + + # (2, 6, 6, 6, 6) diff --git a/doc/fluid/api_cn/nn_cn/ConvTranspose2d_cn.rst b/doc/fluid/api_cn/nn_cn/ConvTranspose2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7cf28d4fbeb6e4bc25f1080636f57696357c9985 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/ConvTranspose2d_cn.rst @@ -0,0 +1,106 @@ +ConvTranspose2d +------------------------------- + +.. py:class:: paddle.nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1, dilation=1, weight_attr=None, bias_attr=None, data_format="NCHW") + + +二维转置卷积层(Convlution2d transpose layer) + +该层根据输入(input)、卷积核(kernel)和空洞大小(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCHW或NHWC格式,其中N为批尺寸,C为通道数(channel),H为特征层高度,W为特征层宽度。卷积核是MCHW格式,M是输出图像通道数,C是输入图像通道数,H是卷积核高度,W是卷积核宽度。如果组数大于1,C等于输入图像通道数除以组数的结果。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解转置卷积层细节,请参考下面的说明和 参考文献_ 。如果参数bias_attr不为False, 转置卷积计算会添加偏置项。 + +.. _参考文献: https://arxiv.org/pdf/1603.07285.pdf + + +输入 :math:`X` 和输出 :math:`Out` 函数关系如下: + +.. math:: + Out=\sigma (W*X+b)\\ + +其中: + - :math:`X` : 输入,具有NCHW或NHWC格式的4-D Tensor + - :math:`W` : 卷积核,具有NCHW格式的4-D Tensor + - :math:`*` : 卷积计算(注意:转置卷积本质上的计算还是卷积) + - :math:`b` : 偏置(bias),2-D Tensor,形状为 ``[M,1]`` + - :math:`σ` : 激活函数 + - :math:`Out` : 输出值,NCHW或NHWC格式的4-D Tensor, 和 ``X`` 的形状可能不同 + + +注意: + +如果output_size为None,则 :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}` ;否则,指定的output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[0]` 之间(不包含 :math:`H^\prime_{out} + strides[0]` ), 并且指定的output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[1]` 之间(不包含 :math:`W^\prime_{out} + strides[1]` )。 + +由于转置卷积可以当成是卷积的反向计算,而根据卷积的输入输出计算公式来说,不同大小的输入特征层可能对应着相同大小的输出特征层,所以对应到转置卷积来说,固定大小的输入特征层对应的输出特征层大小并不唯一。 + +如果指定了output_size, ``conv2d_transpose`` 可以自动计算卷积核的大小。 + +参数: + - **in_channels** (int) - 输入图像的通道数。 + - **out_channels** (int) - 卷积核的个数,和输出特征图通道数相同。 + - **kernel_size** (int|list|tuple) - 卷积核大小。可以为单个整数或包含两个整数的元组或列表,分别表示卷积核的高和宽。如果为单个整数,表示卷积核的高和宽都等于该整数。 + - **stride** (int|tuple, 可选) - 步长大小。如果 ``stride`` 为元组或列表,则必须包含两个整型数,分别表示垂直和水平滑动步长。否则,表示垂直和水平滑动步长均为 ``stride`` 。默认值:1。 + - **padding** (int|tuple, 可选) - 填充大小。如果 ``padding`` 为元组或列表,则必须包含两个整型数,分别表示竖直和水平边界填充大小。否则,表示竖直和水平边界填充大小均为 ``padding`` 。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考下方形状 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。默认值:0。 + - **output_padding** (int|list|tuple, optional): 输出形状上一侧额外添加的大小. 默认值: 0. + - **groups** (int, 可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的分组卷积:当group=2,卷积核的前一半仅和输入特征图的前一半连接。卷积核的后一半仅和输入特征图的后一半连接。默认值:1。 + - **dilation** (int|tuple, 可选) - 空洞大小。可以为单个整数或包含两个整数的元组或列表,分别表示卷积核中的元素沿着高和宽的空洞。如果为单个整数,表示高和宽的空洞都等于该整数。默认值:1。 + - **weight_attr** (ParamAttr, 可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool, 可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + + +形状: + + - 输入::math:`(N,C_{in}, H_{in}, W_{in})` + + + - 输出::math:`(N,C_{out}, H_{out}, W_{out})` + + 其中 + + .. math:: + + & H'_{out} = (H_{in}-1)*strides[0] - pad\_height\_top - pad\_height\_bottom + dilations[0]*(kernel\_size[0]-1)+1\\ + & W'_{out} = (W_{in}-1)*strides[1]- pad\_width\_left - pad\_width\_right + dilations[1]*(kernel\_size[1]-1)+1 \\ + & H_{out}\in[H'_{out},H'_{out} + strides[0])\\ + & W_{out}\in[W'_{out},W'_{out} + strides[1])\\ + + 如果 ``padding`` = "SAME": + + .. math:: + & H'_{out} = \frac{(H_{in} + stride[0] - 1)}{stride[0]}\\ + & W'_{out} = \frac{(W_{in} + stride[1] - 1)}{stride[1]}\\ + + 如果 ``padding`` = "VALID": + + .. math:: + & H'_{out} = (H_{in}-1)*strides[0] + dilations[0]*(kernel\_size[0]-1)+1\\ + & W'_{out} = (W_{in}-1)*strides[1] + dilations[1]*(kernel\_size[1]-1)+1 \\ + +抛出异常: + - ``ValueError`` : 如果输入的shape、filter_size、stride、padding和groups不匹配,抛出ValueError + - ``ValueError`` - 如果 ``data_format`` 既不是"NCHW"也不是"NHWC"。 + - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``padding`` 含有4个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ValueError`` - 如果 ``output_size`` 和 ``filter_size`` 同时为None。 + - ``ShapeError`` - 如果输入不是4-D Tensor。 + - ``ShapeError`` - 如果输入和卷积核的维度大小不相同。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + import paddle.nn as nn + + x = np.random.uniform(-1, 1, (2, 4, 8, 8)).astype('float32') + + paddle.disable_static() + + x_var = paddle.to_tensor(x) + conv = nn.ConvTranspose2d(4, 6, (3, 3)) + y_var = conv(x_var) + y_np = y_var.numpy() + print(y_np.shape) + + # (2, 6, 10, 10) diff --git a/doc/fluid/api_cn/nn_cn/ConvTranspose3d_cn.rst b/doc/fluid/api_cn/nn_cn/ConvTranspose3d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..67c8fb483903219f88b96d84b5032f61fd1df4f0 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/ConvTranspose3d_cn.rst @@ -0,0 +1,106 @@ +ConvTranspose3d +------------------------------- + +.. py:class:: paddle.nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1, dilation=1, weight_attr=None, bias_attr=None, data_format="NCDHW") + + +三维转置卷积层(Convlution3d transpose layer) + +该层根据输入(input)、卷积核(kernel)和卷积核空洞大小(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCDHW或者NDHWC格式。其中N为批尺寸,C为通道数(channel),D为特征深度,H为特征层高度,W为特征层宽度。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解卷积转置层细节,请参考下面的说明和 参考文献_ 。如果参数bias_attr不为False, 转置卷积计算会添加偏置项。 + +.. _参考文献: http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf + +输入 :math:`X` 和输出 :math:`Out` 函数关系如下: + +.. math:: + \\Out=\sigma (W*X+b)\\ + +其中: + - :math:`X` : 输入,具有NCDHW或NDHWC格式的5-D Tensor + - :math:`W` : 卷积核,具有NCDHW格式的5-D Tensor + - :math:`*` : 卷积操作(注意:转置卷积本质上的计算还是卷积) + - :math:`b` : 偏置(bias),2-D Tensor,形状为 ``[M,1]`` + - :math:`σ` : 激活函数 + - :math:`Out` : 输出值,NCDHW或NDHWC格式的5-D Tensor,和 ``X`` 的形状可能不同 + + +注意: + +如果output_size为None,则 :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}` ;否则,指定的output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[0]` 之间(不包含 :math:`H^\prime_{out} + strides[0]` ), 并且指定的output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[1]` 之间(不包含 :math:`W^\prime_{out} + strides[1]` )。 + +由于转置卷积可以当成是卷积的反向计算,而根据卷积的输入输出计算公式来说,不同大小的输入特征层可能对应着相同大小的输出特征层,所以对应到转置卷积来说,固定大小的输入特征层对应的输出特征层大小并不唯一。 + +如果指定了output_size, 该算子可以自动计算卷积核的大小。 + +参数: + - **in_channels** (int) - 输入图像的通道数。 + - **out_channels** (int) - 卷积核的个数,和输出特征图个数相同。 + - **kernel_size** (int|list|tuple) - 卷积核大小。可以为单个整数或包含三个整数的元组或列表,分别表示卷积核的深度,高和宽。如果为单个整数,表示卷积核的深度,高和宽都等于该整数。默认:None。output_size和kernel_size不能同时为None。 + - **stride** (int|tuple, 可选) - 步长大小。如果 ``stride`` 为元组或列表,则必须包含三个整型数,分别表示深度,垂直和水平滑动步长。否则,表示深度,垂直和水平滑动步长均为 ``stride`` 。默认值:1。 + - **padding** (int|tuple, 可选) - 填充大小。如果 ``padding`` 为元组或列表,则必须包含三个整型数,分别表示深度,竖直和水平边界填充大小。否则,表示深度,竖直和水平边界填充大小均为 ``padding`` 。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考下方形状 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。默认值:0。 + - **output_padding** (int|list|tuple, optional): 输出形状上一侧额外添加的大小. 默认值: 0. + - **groups** (int, 可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的分组卷积:当group=2,卷积核的前一半仅和输入特征图的前一半连接。卷积核的后一半仅和输入特征图的后一半连接。默认值:1。 + - **dilation** (int|tuple, 可选) - 空洞大小。可以为单个整数或包含三个整数的元组或列表,分别表示卷积核中的元素沿着深度,高和宽的空洞。如果为单个整数,表示深度,高和宽的空洞都等于该整数。默认值:1。 + - **weight_attr** (ParamAttr, 可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool, 可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCDHW"。 + +形状: + + - 输入::math:`(N,C_{in}, H_{in}, W_{in})` + + + - 输出::math:`(N,C_{out}, H_{out}, W_{out})` + + 其中 + + .. math:: + + & D'_{out}=(D_{in}-1)*strides[0] - pad\_depth\_front - pad\_depth\_back + dilations[0]*(kernel\_size[0]-1)+1\\ + & H'_{out} = (H_{in}-1)*strides[1] - pad\_height\_top - pad\_height\_bottom + dilations[1]*(kernel\_size[1]-1)+1\\ + & W'_{out} = (W_{in}-1)*strides[2]- pad\_width\_left - pad\_width\_right + dilations[2]*(kernel\_size[2]-1)+1 \\ + & D_{out}\in[D'_{out},D'_{out} + strides[0])\\ + & H_{out}\in[H'_{out},H'_{out} + strides[1])\\ + & W_{out}\in[W'_{out},W'_{out} + strides[2])\\ + + 如果 ``padding`` = "SAME": + + .. math:: + & D'_{out} = \frac{(D_{in} + stride[0] - 1)}{stride[0]}\\ + & H'_{out} = \frac{(H_{in} + stride[1] - 1)}{stride[1]}\\ + & W'_{out} = \frac{(W_{in} + stride[2] - 1)}{stride[2]}\\ + + 如果 ``padding`` = "VALID": + + .. math:: + & D'_{out} = (D_{in}-1)*strides[0] + dilations[0]*(kernel\_size[0]-1)+1\\ + & H'_{out} = (H_{in}-1)*strides[1] + dilations[1]*(kernel\_size[1]-1)+1\\ + & W'_{out} = (W_{in}-1)*strides[2] + dilations[2]*(kernel\_size[2]-1)+1 \\ + +抛出异常: + - ``ValueError`` : 如果输入的shape、kernel_size、stride、padding和groups不匹配,抛出ValueError + - ``ValueError`` - 如果 ``data_format`` 既不是"NCHW"也不是"NHWC"。 + - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``padding`` 含有4个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ValueError`` - 如果 ``output_size`` 和 ``filter_size`` 同时为None。 + - ``ShapeError`` - 如果输入不是4-D Tensor。 + - ``ShapeError`` - 如果输入和卷积核的维度大小不相同。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + import paddle.nn as nn + x = np.random.uniform(-1, 1, (2, 4, 8, 8, 8)).astype('float32') + + paddle.disable_static() + x_var = paddle.to_tensor(x) + conv = nn.ConvTranspose3d(4, 6, (3, 3, 3)) + y_var = conv(x_var) + y_np = y_var.numpy() + print(y_np.shape) + + # (2, 6, 10, 10, 10) diff --git a/doc/fluid/api_cn/nn_cn/CosineSimilarity_cn.rst b/doc/fluid/api_cn/nn_cn/CosineSimilarity_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a11bb606ad14afd7c91871c10b618e5255209bde --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/CosineSimilarity_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_nn_CosineSimilarity: + +CosineSimilarity +------------------------------- +.. py:class:: paddle.nn.CosineSimilarity(axis=1, eps=1e-8) + +**CosineSimilarity** + +计算x1与x2沿axis维度的余弦相似度。 + +参数: + - **axis** (int) - 指定计算的维度,会在该维度上计算余弦相似度,默认值为1。 + - **eps** (float) - 很小的值,防止计算时分母为0,默认值为1e-8。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + np.random.seed(0) + x1 = np.random.rand(2,3) + x2 = np.random.rand(2,3) + x1 = paddle.to_tensor(x1) + x2 = paddle.to_tensor(x2) + + cos_sim_func = nn.CosineSimilarity(axis=0) + result = cos_sim_func(x1, x2) + print(result.numpy()) + # [0.99806249 0.9817672 0.94987036] diff --git a/doc/fluid/api_cn/nn_cn/Dropout2D_cn.rst b/doc/fluid/api_cn/nn_cn/Dropout2D_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6b04409a2f1632652c58e66fbba82ddd9c851c99 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/Dropout2D_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_nn_Dropout2D: + +Dropout2D +------------------------------- + +.. py:function:: paddle.nn.Dropout2D(p=0.5, data_format='NCHW', name=None) + +根据丢弃概率 `p` ,在训练过程中随机将某些通道特征图置0(对一个形状为 `NCHW` 的4维张量,通道特征图指的是其中的形状为 `HW` 的2维特征图)。Dropout2D可以提高通道特征图之间的独立性。论文请参考: `Efficient Object Localization Using Convolutional Networks `_ + +在动态图模式下,请使用模型的 `eval()` 方法切换至测试阶段。 + +.. note:: + 对应的 `functional方法` 请参考: :ref:`cn_api_nn_functional_dropout2d` 。 + +参数 +::::::::: + - **p** (float): 将输入通道置0的概率, 即丢弃概率。默认: 0.5。 + - **data_format** (str): 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是 `NCHW` 和 `NHWC` 。其中 `N` 是批尺寸, `C` 是通道数, `H` 是特征高度, `W` 是特征宽度。默认值: `NCHW` 。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +形状 +::::::::: + - **输入** : 4-D `Tensor` 。 + - **输出** : 4-D `Tensor` ,形状与输入相同。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + x = np.random.random(size=(2, 3, 4, 5)).astype('float32') + x = paddle.to_tensor(x) + m = paddle.nn.Dropout2D(p=0.5) + y_train = m(x) + m.eval() # switch the model to test phase + y_test = m(x) + print(x.numpy()) + print(y_train.numpy()) + print(y_test.numpy()) diff --git a/doc/fluid/api_cn/nn_cn/Dropout3D_cn.rst b/doc/fluid/api_cn/nn_cn/Dropout3D_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..56fc7b4fbbe1cd472a6fec368b685668d8808513 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/Dropout3D_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_nn_Dropout3D: + +Dropout3D +------------------------------- + +.. py:function:: paddle.nn.Dropout3D(p=0.5, data_format='NCDHW', name=None) + +根据丢弃概率 `p` ,在训练过程中随机将某些通道特征图置0(对一个形状为 `NCDHW` 的5维张量,通道特征图指的是其中的形状为 `DHW` 的3维特征图)。Dropout3D可以提高通道特征图之间的独立性。论文请参考: `Efficient Object Localization Using Convolutional Networks `_ + +在动态图模式下,请使用模型的 `eval()` 方法切换至测试阶段。 + +.. note:: + 对应的 `functional方法` 请参考: :ref:`cn_api_nn_functional_dropout3d` 。 + +参数 +::::::::: + - **p** (float): 将输入通道置0的概率, 即丢弃概率。默认: 0.5。 + - **data_format** (str): 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是 `NCDHW` 和 `NDHWC` 。其中 `N` 是批尺寸, `C` 是通道数, `D` 是特征深度, `H` 是特征高度, `W` 是特征宽度。默认值: `NCDHW` 。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +形状 +::::::::: + - **输入** : 5-D `Tensor` 。 + - **输出** : 5-D `Tensor` ,形状与输入相同。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + x = np.random.random(size=(2, 3, 4, 5, 6)).astype('float32') + x = paddle.to_tensor(x) + m = paddle.nn.Dropout3D(p=0.5) + y_train = m(x) + m.eval() # switch the model to test phase + y_test = m(x) + print(x.numpy()) + print(y_train.numpy()) + print(y_test.numpy()) diff --git a/doc/fluid/api_cn/nn_cn/Embedding_cn.rst b/doc/fluid/api_cn/nn_cn/Embedding_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b7e9b85897ce8f3a3fab0c67ebfeb529ff9b582a --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/Embedding_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_Embedding: + +Embedding +------------------------------- +:doc_source: paddle.fluid.dygraph.Embedding + + diff --git a/doc/fluid/api_cn/nn_cn/GradientClipByGlobalNorm_cn.rst b/doc/fluid/api_cn/nn_cn/GradientClipByGlobalNorm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3d3996ba299dc16b775990b2dfdc9cd0b15b04bb --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/GradientClipByGlobalNorm_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_GradientClipByGlobalNorm: + +GradientClipByGlobalNorm +------------------------------- +:doc_source: paddle.fluid.clip.GradientClipByGlobalNorm + + diff --git a/doc/fluid/api_cn/nn_cn/GradientClipByNorm_cn.rst b/doc/fluid/api_cn/nn_cn/GradientClipByNorm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..542ef51264478e78335376c5287c139588ba4454 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/GradientClipByNorm_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_GradientClipByNorm: + +GradientClipByNorm +------------------------------- +:doc_source: paddle.fluid.clip.GradientClipByNorm + + diff --git a/doc/fluid/api_cn/nn_cn/GradientClipByValue_cn.rst b/doc/fluid/api_cn/nn_cn/GradientClipByValue_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7c1be9d0de2b39a15e4147222ee811aa6917f186 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/GradientClipByValue_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_GradientClipByValue: + +GradientClipByValue +------------------------------- +:doc_source: paddle.fluid.clip.GradientClipByValue + + diff --git a/doc/fluid/api_cn/nn_cn/GroupNorm_cn.rst b/doc/fluid/api_cn/nn_cn/GroupNorm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4628093cc07d714743d24e26a1ef34832f4bca67 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/GroupNorm_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_nn_GroupNorm: + +GroupNorm +------------------------------- + +.. py:class:: paddle.nn.GroupNorm(num_groups, num_channels, epsilon=1e-05, weight_attr=None, bias_attr=None, data_layout='NCHW, 'name=None) + +**Group Normalization层** + +该接口用于构建 ``GroupNorm`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其中实现了组归一化层的功能。更多详情请参考: `Group Normalization `_ 。 + +参数: + - **num_groups** (int) - 从通道中分离出来的 ``group`` 的数目。 + - **num_channels** (int) - 输入的通道数。 + - **epsilon** (float, 可选) - 为防止方差除零,增加一个很小的值。默认值:1e-05。 + - **weight_attr** (ParamAttr|bool, 可选) - 指定权重参数属性的对象。如果为False, 表示参数不学习。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool, 可选) - 指定偏置参数属性的对象。如果为False, 表示参数不学习。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (string, 可选) - 只支持“NCHW”(num_batches,channels,height,width)格式。默认值:“NCHW”。 + - **name** (string, 可选) – GroupNorm的名称, 默认值为None。更多信息请参见 :ref:`api_guide_Name` 。 + +返回:无 + +形状: + - input: 形状为(批大小,通道数, 高度,宽度)的4-D Tensor。 + - output: 和输入形状一样。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np.random.seed(123) + x_data = np.random.random(size=(2, 6, 2, 2)).astype('float32') + x = paddle.to_tensor(x_data) + group_norm = paddle.nn.GroupNorm(num_channels=6, num_groups=6) + group_norm_out = group_norm(x) + + print(group_norm_out.numpy()) diff --git a/doc/fluid/api_cn/nn_cn/InstanceNorm1d_cn.rst b/doc/fluid/api_cn/nn_cn/InstanceNorm1d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c77698a5506d9483d091474b60430e18b8d4f45b --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/InstanceNorm1d_cn.rst @@ -0,0 +1,60 @@ +.. _cn_api_nn_InstanceNorm1d: + +InstanceNorm1d +------------------------------- + +.. py:class:: paddle.nn.InstanceNorm1d(num_features, epsilon=1e-05, momentum=0.9, weight_attr=None, bias_attr=None, track_running_stats=True, data_format="NCL", name=None): + + +该接口用于构建 ``InstanceNorm1d`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。可以处理2D或者3D的Tensor, 实现了实例归一化层(Instance Normalization Layer)的功能。更多详情请参考 : Instance Normalization: The Missing Ingredient for Fast Stylization . + +``input`` 是mini-batch的输入。 + +.. math:: + \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mean \\ + \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \quad &// variance \\ + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \quad &// normalize \\ + y_i &\gets \gamma \hat{x_i} + \beta \quad &// scale-and-shift + + +Note: + `H` 是高度, `W` 是宽度. + + +参数: + - **num_features** (int) - 指明输入 ``Tensor`` 的通道数量。 + - **epsilon** (float, 可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 + - **momentum** (float, 可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var`` 。默认值:0.9。更新公式如上所示。 + - **weight_attr** (ParamAttr|bool, 可选) - 指定权重参数属性的对象。如果为False, 则表示每个通道的伸缩固定为1,不可改变。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **bias_attr** (ParamAttr, 可选) - 指定偏置参数属性的对象。如果为False, 则表示每一个通道的偏移固定为0,不可改变。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **track_running_stats** (bool, 可选) – 指示是否使用全局均值和方差。在训练时,设置为True表示在训练期间将保存全局均值和方差用于推理。推理时此属性只能设置为True。默认值:True。 + - **data_format** (string, 可选) - 指定输入数据格式,数据格式可以为“NC"或者"NCL"。默认值:“NCL”。 + - **name** (string, 可选) – InstanceNorm的名称, 默认值为None。更多信息请参见 :ref:`api_guide_Name` 。 + + +返回:无 + +形状: + - input: 形状为(批大小,通道数)的2-D Tensor 或(批大小, 通道数,长度)的3-D Tensor。 + - output: 和输入形状一样。 + +.. note:: +目前设置track_running_stats和momentum是无效的。之后的版本会修复此问题。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np.random.seed(123) + x_data = np.random.random(size=(2, 2, 3)).astype('float32') + x = paddle.to_tensor(x_data) + instance_norm = paddle.nn.InstanceNorm1d(2) + instance_norm_out = instance_norm(x) + + print(instance_norm_out.numpy()) + diff --git a/doc/fluid/api_cn/nn_cn/InstanceNorm2d_cn.rst b/doc/fluid/api_cn/nn_cn/InstanceNorm2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cfb2571820a236e2991529841a984e8ae7a09ba3 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/InstanceNorm2d_cn.rst @@ -0,0 +1,60 @@ +.. _cn_api_nn_cn_InstanceNorm2d: + +InstanceNorm2d +------------------------------- + +.. py:class:: paddle.nn.InstanceNorm2d(num_features, epsilon=1e-05, momentum=0.9, weight_attr=None, bias_attr=None, track_running_stats=True, data_format="NCHW", name=None): + + +该接口用于构建 ``InstanceNorm2d`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。可以处理2D或者3D的Tensor, 实现了实例归一化层(Instance Normalization Layer)的功能。更多详情请参考 : Instance Normalization: The Missing Ingredient for Fast Stylization . + +``input`` 是mini-batch的输入。 + +.. math:: + \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mean \\ + \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \quad &// variance \\ + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \quad &// normalize \\ + y_i &\gets \gamma \hat{x_i} + \beta \quad &// scale-and-shift + +Note: + `H` 是高度, `W` 是宽度. + + +参数: + - **num_features** (int) - 指明输入 ``Tensor`` 的通道数量。 + - **epsilon** (float, 可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 + - **momentum** (float, 可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var`` 。默认值:0.9。更新公式如上所示。 + - **weight_attr** (ParamAttr|bool, 可选) - 指定权重参数属性的对象。如果为False, 则表示每个通道的伸缩固定为1,不可改变。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **bias_attr** (ParamAttr, 可选) - 指定偏置参数属性的对象。如果为False, 则表示每一个通道的偏移固定为0,不可改变。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **track_running_stats** (bool, 可选) – 指示是否使用全局均值和方差。在训练时,设置为True表示在训练期间将保存全局均值和方差用于推理。推理时此属性只能设置为True。默认值:True。 + - **data_format** (string, 可选) - 指定输入数据格式,数据格式可以为“NCHW"。默认值:“NCHW”。 + - **name** (string, 可选) – InstanceNorm的名称, 默认值为None。更多信息请参见 :ref:`api_guide_Name` 。 + + +返回:无 + +形状: + - input: 形状为(批大小,通道数,高度,宽度)的4-D Tensor。 + - output: 和输入形状一样。 + +.. note:: +目前设置track_running_stats和momentum是无效的。之后的版本会修复此问题。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np.random.seed(123) + x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32') + x = paddle.to_tensor(x_data) + instance_norm = paddle.nn.InstanceNorm2d(2) + instance_norm_out = instance_norm(x) + + print(instance_norm_out.numpy()) + + diff --git a/doc/fluid/api_cn/nn_cn/InstanceNorm3d_cn.rst b/doc/fluid/api_cn/nn_cn/InstanceNorm3d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e1c4ab4a9e547f99e7f8451fa6c2747887c9c10d --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/InstanceNorm3d_cn.rst @@ -0,0 +1,58 @@ +.. _cn_api_nn_cn_InstanceNorm3d: + +InstanceNorm3d +------------------------------- + +.. py:class:: paddle.nn.InstanceNorm3d(num_features, epsilon=1e-05, momentum=0.9, weight_attr=None, bias_attr=None, track_running_stats=True, data_format="NCDHW", name=None): + +该接口用于构建 ``InstanceNorm3d`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。可以处理5D的Tensor, 实现了实例归一化层(Instance Normalization Layer)的功能。更多详情请参考 : Instance Normalization: The Missing Ingredient for Fast Stylization . + +``input`` 是mini-batch的输入。 + +.. math:: + \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mean \\ + \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \quad &// variance \\ + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \quad &// normalize \\ + y_i &\gets \gamma \hat{x_i} + \beta \quad &// scale-and-shift + +Note: + `H` 是高度, `W` 是宽度. + + +参数: + - **num_features** (int) - 指明输入 ``Tensor`` 的通道数量。 + - **epsilon** (float, 可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 + - **momentum** (float, 可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var`` 。默认值:0.9。更新公式如上所示。 + - **weight_attr** (ParamAttr|bool, 可选) - 指定权重参数属性的对象。如果为False, 则表示每个通道的伸缩固定为1,不可改变。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **bias_attr** (ParamAttr, 可选) - 指定偏置参数属性的对象。如果为False, 则表示每一个通道的偏移固定为0,不可改变。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **track_running_stats** (bool, 可选) – 指示是否使用全局均值和方差。在训练时,设置为True表示在训练期间将保存全局均值和方差用于推理。推理时此属性只能设置为True。默认值:True。 + - **data_format** (string, 可选) - 指定输入数据格式,数据格式可以为"NCDHW"。默认值:“NCDHW”。 + - **name** (string, 可选) – InstanceNorm的名称, 默认值为None。更多信息请参见 :ref:`api_guide_Name` 。 + + +返回:无 + +形状: + - input: 形状为5-D Tensor。 + - output: 和输入形状一样。 + +.. note:: +目前设置track_running_stats和momentum是无效的。之后的版本会修复此问题。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np.random.seed(123) + x_data = np.random.random(size=(2, 2, 2, 2, 3)).astype('float32') + x = paddle.to_tensor(x_data) + instance_norm = paddle.nn.InstanceNorm3d(2) + instance_norm_out = instance_norm(x) + + print(instance_norm_out.numpy()) + diff --git a/doc/fluid/api_cn/nn_cn/LayerList_cn.rst b/doc/fluid/api_cn/nn_cn/LayerList_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5546f5bb9155e442fc28ff04a5edc551f06775ef --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/LayerList_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_LayerList: + +LayerList +------------------------------- +:doc_source: paddle.fluid.dygraph.container.LayerList + + diff --git a/doc/fluid/api_cn/nn_cn/LayerNorm_cn.rst b/doc/fluid/api_cn/nn_cn/LayerNorm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6329fc658a85c632010a57ccbb5cfe671fbdb379 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/LayerNorm_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_nn_LayerNorm: + +LayerNorm +------------------------------- + +.. py:class:: paddle.nn.LayerNorm(normalized_shape, epsilon=1e-05, weight_attr=None, bias_attr=None, name=None) + +该接口用于构建 ``LayerNorm`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其中实现了层归一化层(Layer Normalization Layer)的功能,其可以应用于小批量输入数据。更多详情请参考:`Layer Normalization `_ + +计算公式如下 + +.. math:: + \\\mu=\frac{1}{H}\sum_{i=1}^{H}x_i\\ + + \\\sigma=\sqrt{\frac{1}{H}\sum_i^H{(x_i-\mu)^2} + \epsilon}\\ + + \\y=f(\frac{g}{\sigma}(x-\mu) + b)\\ + +- :math:`x` : 该层神经元的向量表示 +- :math:`H` : 层中隐藏神经元个数 +- :math:`\epsilon` : 添加较小的值到方差中以防止除零 +- :math:`g` : 可训练的比例参数 +- :math:`b` : 可训练的偏差参数 + + +参数: + - **normalized_shape** (int 或 list 或 tuple) – 需规范化的shape,期望的输入shape为 ``[*, normalized_shape[0], normalized_shape[1], ..., normalized_shape[-1]]`` 。如果是单个整数,则此模块将在最后一个维度上规范化(此时最后一维的维度需与该参数相同)。 + - **epsilon** (float, 可选) - 指明在计算过程中是否添加较小的值到方差中以防止除零。默认值:1e-05。 + - **weight_attr** (ParamAttr|bool, 可选) - 指定权重参数属性的对象。如果为False固定为1,不进行学习。默认值为None, 表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr, 可选) - 指定偏置参数属性的对象。如果为False固定为0,不进行学习。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **name** (string, 可选) – LayerNorm的名称, 默认值为None。更多信息请参见 :ref:`api_guide_Name` 。 + +返回:无 + +形状: + - input: 2-D, 3-D, 4-D或5D 的Tensor。 + - output: 和输入形状一样。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np.random.seed(123) + x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32') + x = paddle.to_tensor(x_data) + layer_norm = paddle.nn.LayerNorm(x_data.shape[1:]) + layer_norm_out = layer_norm(x) + + print(layer_norm_out.numpy()) + diff --git a/doc/fluid/api_cn/nn_cn/Layer_cn.rst b/doc/fluid/api_cn/nn_cn/Layer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..357167c5276d083feea6a8e5c381dfeb4e33a395 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/Layer_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_Layer: + +Layer +------------------------------- +:doc_source: paddle.fluid.dygraph.layers.Layer + + diff --git a/doc/fluid/api_cn/nn_cn/Linear_cn.rst b/doc/fluid/api_cn/nn_cn/Linear_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..47aef1f7cf3f86d5e02eda4da802a000df8e1483 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/Linear_cn.rst @@ -0,0 +1,57 @@ +.. _cn_api_fluid_dygraph_Linear: + +Linear +------------------------------- + +.. py:class:: paddle.nn.Linear(input_dim, output_dim, param_attr=None, bias_attr=None, act=None, dtype='float32') + + +**线性变换层:** + +.. math:: + + \\Out = Act({XW + b})\\ + +其中,:math:`X` 为输入的 Tensor, :math:`W` 和 :math:`b` 分别为权重和偏置。 + +Linear 层只接受一个 Tensor 的输入。 +Linear 层将输入 Tensor 与权重矩阵 :math:`W` 相乘,然后生成形状为 :math:`[N,*,output_dim]` 的输出张量, +其中 :math:`N` 是批量大小,:math:`*` 表示任意数量的附加尺寸。 +如果 bias_attr 不是 None,则将创建一个 bias 变量并将其添加到输出中。 +最后,如果激活 act 不是 None,则相应激活函数也将应用于输出上。 + +参数: + - **input_dim** (int) – 线性变换层输入单元的数目。 + - **output_dim** (int) – 线性变换层输出单元的数目。 + - **param_attr** (ParamAttr, 可选) – 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr, 可选) – 指定偏置参数属性的对象,若 `bias_attr` 为bool类型,如果设置为False,表示不会为该层添加偏置;如果设置为True,表示使用默认的偏置参数属性。默认值为None,表示使用默认的偏置参数属性。默认的偏置参数属性将偏置参数的初始值设为0。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **act** (str, 可选) – 应用于输出上的激活函数,如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations` ,默认值为None。 + - **dtype** (str, 可选) – 权重的数据类型,可以为float32或float64。默认为float32。 + +返回:无 + +**代码示例** + +.. code-block:: python + + from paddle.fluid.dygraph.base import to_variable + import paddle + import paddle.fluid as fluid + import numpy as np + + data = np.random.uniform( -1, 1, [30, 10, 32] ).astype('float32') + with fluid.dygraph.guard(): + linear = paddle.nn.Linear(32, 64) + data = to_variable(data) + res = linear(data) # [30, 10, 64] + +属性 +:::::::::::: +.. py:attribute:: weight + +本层的可学习参数,类型为 ``Parameter`` + +.. py:attribute:: bias + +本层的可学习偏置,类型为 ``Parameter`` + diff --git a/doc/fluid/api_cn/nn_cn/MaxPool1d_cn.rst b/doc/fluid/api_cn/nn_cn/MaxPool1d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..e6cddc468a33abc836c54038ed9f8b1c78dc7b3a --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/MaxPool1d_cn.rst @@ -0,0 +1,62 @@ +.. _cn_api_nn_MaxPool1d: + + +MaxPool1d +------------------------------- + +.. py:function:: paddle.nn.MaxPool1d(kernel_size, stride=None, padding=0, return_indices=False, ceil_mode=False, name=None) + +该算子根据输入 `x` , `kernel_size` 等参数对一个输入Tensor计算1D的最大值池化。输入和输出都是3-D Tensor, +默认是以 `NCL` 格式表示的,其中 `N` 是 batch size, `C` 是通道数, `L` 是输入特征的长度。 + +假设输入形状是(N, C, L),输出形状是 (N, C, L_{out}),卷积核尺寸是k, 1d最大值池化计算公式如下: + +.. math:: + + Output(N_i, C_i, l) = max(Input[N_i, C_i, stride \times l:stride \times l+k]) + +参数 +::::::::: + - **kernel_size** (int|list|tuple): 池化核的尺寸大小. 如果kernel_size为list或tuple类型, 其必须包含一个整数, 最终池化核的大小为该数值。 + - **stride** (int|list|tuple): 池化操作步长. 如果stride为list或tuple类型, 其必须包含一个整数,最终池化操作的步长为该数值。 + - **padding** (string|int|list|tuple): 池化补零的方式. 如果padding是一个字符串,则必须为 `SAME` 或者 `VALID` 。 如果是turple或者list类型, 则应是 `[pad_left, pad_right]` 形式。如果padding是一个非0值,那么表示会在输入的两端都padding上同样长度的0。 + - **return_indices** (bool): 是否返回最大值的索引,默认为False。 + - **ceil_mode** (bool): 是否用ceil函数计算输出的height和width,如果设置为False, 则使用floor函数来计算,默认为False。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + + +形状 +::::::::: + - **x** (Tensor): 默认形状为(批大小,通道数,长度),即NCL格式的3-D Tensor。 其数据类型为float32或float64. + - **output** (Tensor): 默认形状为(批大小,通道数,输出特征长度),即NCL格式的3-D Tensor。 其数据类型与输入x相同。 + +返回 +::::::::: +计算MaxPool1d的可调用对象 + +抛出异常 +::::::::: + - ``ValueError`` - 如果 ``padding`` 是字符串但不是 "SAME" 和 "VALID" 。 + - ``ValueError`` - 如果 ``padding`` 是 "VALID" 但 `ceil_mode` 被设置为True。 + - ``ValueError`` - 如果 ``padding`` 是一个长度大于1的list或turple。 + - ``ShapeError`` - 如果输入x不是一个3-D Tensor。 + - ``ShapeError`` - 如果计算得到的输出形状小于等于0。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) + MaxPool1d = nn.layer.MaxPool1d(kernel_size=2, stride=2, padding=0) + pool_out = MaxPool1d(data) + # pool_out shape: [1, 3, 16] + + MaxPool1d = nn.layer.MaxPool1d(kernel_size=2, stride=2, padding=0, return_indices=True) + pool_out, indices = MaxPool1d(data) + # pool_out shape: [1, 3, 16], indices shape: [1, 3, 16] diff --git a/doc/fluid/api_cn/nn_cn/PairwiseDistance_cn.rst b/doc/fluid/api_cn/nn_cn/PairwiseDistance_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8f9e38dfe6b9b550189d78d53be8fb85dd7a5f03 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/PairwiseDistance_cn.rst @@ -0,0 +1,39 @@ +.. _cn_api_nn_PairwiseDistance: + +PairwiseDistance +------------------------------- + +.. py:class:: paddle.nn.PairwiseDistance(p=2., epsilon=1e-6, keepdim=False, name=None) + +该OP计算两个向量(输入 ``x``、``y`` )之间pairwise的距离。该距离通过p范数计算: + + .. math:: + + \Vert x \Vert _p = \left( \sum_{i=1}^n \vert x_i \vert ^ p \right ) ^ {1/p}. + +参数 +:::::::: + - **p** (float,可选)- 指定p阶的范数。默认值为2。 + - **epsilon** (float,可选)- 添加到分母的一个很小值,避免发生除零错误。默认值为1e-6。 + - **keepdim** (bool,可选)- 是否保留输出张量减少的维度。输出结果相对于 ``|x-y|`` 的结果减少一维,除非 :attr:`keepdim` 为True,默认值为False。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +形状 +:::::::: + - **x** (Tensor) - :math:`(N, D)` ,其中D是向量的维度,数据类型为float32或float64。 + - **y** (Tensor) - :math:`(N, D)` ,与 ``x`` 的形状、数据类型相同。 + - **output** (Tensor) - :math:`(N)` ,如果 :attr:`keepdim` 为True,则形状为 :math:`(N, 1)` 。数据类型与 ``x``、 ``y`` 相同。 + +代码示例 +:::::::: + +.. code-block:: python + + import paddle + paddle.disable_static() + x = paddle.to_tensor([[1., 3.], [3., 5.]], dtype='float64') + y = paddle.to_tensor([[5., 6.], [7., 8.]], dtype='float64') + dist = paddle.nn.PairwiseDistance() + distance = dist(x, y) + print(distance.numpy()) # [5. 5.] + diff --git a/doc/fluid/api_cn/nn_cn/ParameterList_cn.rst b/doc/fluid/api_cn/nn_cn/ParameterList_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fbfecd9374c75e2c3e723f59b54703452f113dd6 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/ParameterList_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_ParameterList: + +ParameterList +------------------------------- +:doc_source: paddle.fluid.dygraph.container.ParameterList + + diff --git a/doc/fluid/api_cn/nn_cn/Pool2D_cn.rst b/doc/fluid/api_cn/nn_cn/Pool2D_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b059976544f8d23d5f6d34af9dca70cb397602f1 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/Pool2D_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_Pool2D: + +Pool2D +------------------------------- +:doc_source: paddle.fluid.dygraph.Pool2D + + diff --git a/doc/fluid/api_cn/nn_cn/ReflectionPad1d_cn.rst b/doc/fluid/api_cn/nn_cn/ReflectionPad1d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bba4b39464fe5e8bdb4ee0626d912ad8f9e2e858 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/ReflectionPad1d_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_nn_ReflectionPad1d: + +ReflectionPad1d +------------------------------- +.. py:class:: paddle.nn.ReflectionPad1d(padding, data_format="NCL", name=None) + +**ReflectionPad1d** + +按照 padding 对输入 以reflection模式进行 ``pad``,即填充以输入边界值为轴的映射 。 + +参数: + - **padding** (Tensor | List[int32]) - 填充大小。pad的格式为[pad_left, pad_right]。 + - **data_format** (str) - 指定input的format,可为 `'NCL'` 或者 `'NLC'`,默认值为`'NCL'`。 + - **name** (str, 可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,缺省值为None。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 2, 3) + pad = [1, 2] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ReflectionPad1d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[2. 1. 2. 3. 2. 1.] + # [5. 4. 5. 6. 5. 4.]]] diff --git a/doc/fluid/api_cn/nn_cn/ReflectionPad2d_cn.rst b/doc/fluid/api_cn/nn_cn/ReflectionPad2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..35fdb4bc0f2af5d55629ba5cb9f101dd8f3f2150 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/ReflectionPad2d_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_nn_ReflectionPad2d: + +ReflectionPad2d +------------------------------- +.. py:class:: paddle.nn.ReflectionPad2d(padding, data_format="NCHW", name=None) + +**ReflectionPad2d** + +按照 padding 对输入 以reflection模式进行 ``pad``,即填充以输入边界值为轴的映射 。 + +参数: + - **padding** (Tensor | List[int32]) - 填充大小。pad的格式为[pad_left, pad_right, pad_top, pad_bottom]。。 + - **data_format** (str) - 指定input的format,可为 `'NCHW'` 或者 `'NHWC'`,默认值为`'NCHW'`。 + - **name** (str, 可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,缺省值为None。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 1, 4, 3) + pad = [1, 0, 1, 2] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ReflectionPad2d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[[ 5. 4. 5. 6.] + # [ 2. 1. 2. 3.] + # [ 5. 4. 5. 6.] + # [ 8. 7. 8. 9.] + # [11. 10. 11. 12.] + # [ 8. 7. 8. 9.] + # [ 5. 4. 5. 6.]]]] \ No newline at end of file diff --git a/doc/fluid/api_cn/nn_cn/ReplicationPad1d_cn.rst b/doc/fluid/api_cn/nn_cn/ReplicationPad1d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..aade886104421dcec986096a6288846f54828576 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/ReplicationPad1d_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_nn_ReplicationPad1d: + +ReplicationPad1d +------------------------------- +.. py:class:: paddle.nn.ReplicationPad1d(padding, data_format="NCL", name=None) + +**ReplicationPad1d** + +按照 padding 对输入 以replicate模式进行 ``pad``,即填充输入的边界值。 + +参数: + - **padding** (Tensor | List[int32]) - 填充大小。pad的格式为[pad_left, pad_right]。 + - **data_format** (str) - 指定input的format,可为 `'NCL'` 或者 `'NLC'`,默认值为`'NCL'`。 + - **name** (str, 可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,缺省值为None。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 2, 3) + pad = [1, 2] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ReplicationPad1d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[1. 1. 2. 3. 3. 3.] + # [4. 4. 5. 6. 6. 6.]]] diff --git a/doc/fluid/api_cn/nn_cn/ReplicationPad2d_cn.rst b/doc/fluid/api_cn/nn_cn/ReplicationPad2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..97b41a220b9ccfe3813cdcfd26029126d4b4be22 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/ReplicationPad2d_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_nn_ReplicationPad2d: + +ReplicationPad2d +------------------------------- +.. py:class:: paddle.nn.ReplicationPad2d(padding, data_format="NCHW", name=None) + +**ReplicationPad2d** + +按照 padding 对输入 以replicate模式进行 ``pad``,即填充输入的边界值。 + +参数: + - **padding** (Tensor | List[int32]) - 填充大小。pad的格式为[pad_left, pad_right, pad_top, pad_bottom]。 + - **data_format** (str) - 指定input的format,可为 `'NCHW'` 或者 `'NHWC'`,默认值为`'NCHW'`。 + - **name** (str, 可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,缺省值为None。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 1, 2, 3) + pad = [1, 0, 1, 2] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ReplicationPad2d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[[1. 1. 2. 3.] + # [1. 1. 2. 3.] + # [4. 4. 5. 6.] + # [4. 4. 5. 6.] + # [4. 4. 5. 6.]]]] diff --git a/doc/fluid/api_cn/nn_cn/ReplicationPad3d_cn.rst b/doc/fluid/api_cn/nn_cn/ReplicationPad3d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4d379c21297d0602c97cfeba3f81e06585cc1026 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/ReplicationPad3d_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_nn_ReplicationPad3d: + +ReplicationPad3d +------------------------------- +.. py:class:: paddle.nn.ReplicationPad3d(padding, data_format="NCDHW", name=None) + +**ReplicationPad3d** + +按照 padding 对输入 以replicate模式进行 ``pad``,即填充输入的边界值。 + +参数: + - **padding** (Tensor | List[int32]) - 填充大小。pad的格式为[pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back]。 + - **data_format** (str) - 指定input的format,可为 `'NCDHW'` 或者 `'NDHWC'`,默认值为`'NCDHW'`。 + - **name** (str, 可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,缺省值为None。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 1, 1, 2, 3) + pad = [1, 0, 1, 2, 0, 0] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ReplicationPad3d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[[[1. 1. 2. 3.] + # [1. 1. 2. 3.] + # [4. 4. 5. 6.] + # [4. 4. 5. 6.] + # [4. 4. 5. 6.]]]]] diff --git a/doc/fluid/api_cn/nn_cn/Sequential_cn.rst b/doc/fluid/api_cn/nn_cn/Sequential_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ad9eaa147ea2c48c364f612401d7847919eeb540 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/Sequential_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_Sequential: + +Sequential +------------------------------- +:doc_source: paddle.fluid.dygraph.container.Sequential + + diff --git a/doc/fluid/api_cn/nn_cn/SpectralNorm_cn.rst b/doc/fluid/api_cn/nn_cn/SpectralNorm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bcc69f819beef2ee9b7c197fb1c710e1ca64c405 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/SpectralNorm_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_SpectralNorm: + +SpectralNorm +------------------------------- +:doc_source: paddle.fluid.dygraph.SpectralNorm + + diff --git a/doc/fluid/api_cn/nn_cn/SyncBatchNorm_cn.rst b/doc/fluid/api_cn/nn_cn/SyncBatchNorm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6e6b4615c7db330eaf0379762ffd61d0022680d2 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/SyncBatchNorm_cn.rst @@ -0,0 +1,88 @@ +.. _cn_api_nn_SyncBatchNorm: + +SyncBatchNorm +------------------------------- + +.. py:class:: paddle.nn.SyncBatchNorm(num_features, epsilon=1e-5, momentum=0.9, track_running_stats=True, weight_attr=None, bias_attr=None, data_format='NCHW', name=None) + +该接口用于构建 ``SyncBatchNorm`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。实现了跨卡GPU同步的批归一化(Cross-GPU Synchronized Batch Normalization Layer)的功能,可用在其他层(类似卷积层和全连接层)之后进行归一化操作。根据所有GPU同一批次的数据按照通道计算的均值和方差进行归一化。更多详情请参考 : `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ + +当模型处于训练模式时,:math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是所有GPU上同一minibatch的统计数据。计算公式如下: + +.. math:: + \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mini-batch-mean \\ + \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \quad &// mini-batch-variance \\ + +- :math:`x` : 所有GPU上同一批输入数据 +- :math:`m` : 所有GPU上同一批次数据的大小 + +当模型处于评估模式时,:math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是全局(或运行)统计数据(moving_mean和moving_variance, 这两个统计量通常来自预先训练好的模型)。计算公式如下: + +.. math:: + + moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global mean \\ + moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global variance \\ + +归一化函数公式如下: + +.. math:: + + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \quad &// normalize \\ + y_i &\gets \gamma \hat{x_i} + \beta \quad &// scale-and-shift \\ + +- :math:`\epsilon` : 添加较小的值到方差中以防止除零 +- :math:`\gamma` : 可训练的比例参数 +- :math:`\beta` : 可训练的偏差参数 + +参数: + - **num_features** (int) - 指明输入 ``Tensor`` 的通道数量。 + - **epsilon** (float, 可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 + - **momentum** (float, 可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var`` 。默认值:0.9。更新公式如上所示。 + - **weight_attr** (ParamAttr|bool, 可选) - 指定权重参数属性的对象。如果设置为 ``False`` ,则表示本层没有可训练的权重参数。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool, 可选) - 指定偏置参数属性的对象。如果设置为 ``False`` ,则表示本层没有可训练的偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **track_running_stats** (bool, 可选)- 是否计算全局均值和方差。默认: ``True`` ,表示计算全局均值和方差。 + +形状: + - input: 一个二维到五维的 ``Tensor`` 。 + - output: 和input 相同形状的 ``Tensor`` 。 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + x = np.array([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32') + paddle.disable_static() + x = paddle.to_tensor(x) + if paddle.fluid.is_compiled_with_cuda(): + sync_batch_norm = nn.SyncBatchNorm(2) + hidden1 = sync_batch_norm(x) + print(hidden1.numpy()) + # [[[[0.26824948, 1.0936325],[0.26824948, -1.6301316]],[[ 0.8095662, -0.665287],[-1.2744656, 1.1301866 ]]]] + + +方法 +::::::::: +convert_sync_batchnorm(layer) +''''''''''''''''''''''''''''' + +该接口用于把 ``BatchNorm*d`` 层转换为 ``SyncBatchNorm`` 层。 + +参数: + - **layer** (paddle.nn.Layer) - 包含一个或多个 ``BatchNorm*d`` 层的模型。 + +返回: + 如果原始模型中有 ``BatchNorm*d`` 层, 则把 ``BatchNorm*d`` 层转换为 ``SyncBatchNorm`` 层的原始模型。 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + paddle.disable_static() + model = nn.Sequential(nn.Conv2d(3, 5, 3), nn.BatchNorm2d(5)) + sync_model = nn.SyncBatchNorm.convert_sync_batchnorm(model) + diff --git a/doc/fluid/api_cn/nn_cn/Upsample_cn.rst b/doc/fluid/api_cn/nn_cn/Upsample_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3038eb1827983331dfac7c4c7ab43381587e8ae5 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/Upsample_cn.rst @@ -0,0 +1,194 @@ +.. _cn_api_paddle_nn_UpSample: + +UpSample +------------------------------- + +.. py:class:: paddle.nn.UpSample(out_shape=None, scale=None, resample='BILINEAR', actual_shape=None, align_corners=True, align_mode=1, data_format='NCHW') + +**注意:** 参数 ``actual_shape`` 将被弃用,请使用 ``out_shape`` 替代。 + +该OP用于调整一个batch中图片的大小。 + +输入为3-D Tensor时形状为(num_batches, channels, in_w),输入为4-D Tensor时形状为(num_batches, channels, in_h, in_w)或者(num_batches, in_h, in_w, channels),输入为5-D Tensor时形状为(num_batches, channels, in_d, in_h, in_w)或者(num_batches, in_d, in_h, in_w, channels),并且调整大小只适用于深度,高度和宽度对应的维度。 + +支持的插值方法: + NEAREST:最近邻插值 + + LINEAR:线性插值 + + BILINEAR:双线性插值 + + TRALINEAR:三线性插值 + + BICUBIC:双三次插值 + + +最近邻插值是在输入张量的高度和宽度上进行最近邻插值。 + +线性插值是用一条线连接两个已知量来确定两个已知量之间的一个未知量的值的方法。 + +双线性插值是线性插值的扩展,用于在直线2D网格上插值两个变量(例如,该操作中的H方向和W方向)的函数。 关键思想是首先在一个方向上执行线性插值,然后在另一个方向上再次执行线性插值。 + +三线插值是线性插值的一种扩展,是3参数的插值方程(比如op里的D,H,W方向),在三个方向上进行线性插值。 + +双三次插值是在二维网格上对数据点进行插值的三次插值的扩展,它能创造出比双线性和最近临插值更为光滑的图像边缘。 + +Align_corners和align_mode是可选参数,插值的计算方法可以由它们选择。 + +示例: + +:: + + For scale: + + if align_corners = True && out_size > 1 : + + scale_factor = (in_size-1.0)/(out_size-1.0) + + else: + + scale_factor = float(in_size/out_size) + + + Nearest neighbor interpolation: + + if: + align_corners = False + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = \left \lfloor {H_{in} * scale_{}factor}} \right \rfloor + W_out = \left \lfloor {W_{in} * scale_{}factor}} \right \rfloor + + else: + align_corners = True + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = round(H_{in} * scale_{factor}) + W_out = round(W_{in} * scale_{factor}) + + Bilinear interpolation: + + if: + align_corners = False , align_mode = 0 + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = (H_{in}+0.5) * scale_{factor} - 0.5 + W_out = (W_{in}+0.5) * scale_{factor} - 0.5 + + + else: + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = H_{in} * scale_{factor} + W_out = W_{in} * scale_{factor} + + Bicubic interpolation: + + if: + align_corners = False + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = (H_{in}+0.5) * scale_{factor} - 0.5 + W_out = (W_{in}+0.5) * scale_{factor} - 0.5 + + else: + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = H_{in} * scale_{factor} + W_out = W_{in} * scale_{factor} + + Trilinear interpolation: + + if: + align_corners = False , align_mode = 0 + + input : (N,C,D_in,H_in,W_in) + output: (N,C,D_out,H_out,W_out) where: + + D_out = (D_{in}+0.5) * scale_{factor} - 0.5 + H_out = (H_{in}+0.5) * scale_{factor} - 0.5 + W_out = (W_{in}+0.5) * scale_{factor} - 0.5 + + + else: + + input : (N,C,D_in,H_in,W_in) + output: (N,C,D_out,H_out,W_out) where: + + D_out = D_{in} * scale_{factor} + H_out = H_{in} * scale_{factor} + W_out = W_{in} * scale_{factor} + + +有关最近邻插值的详细信息,请参阅维基百科: +https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation + +有关线性插值的详细信息,请参阅维基百科: +https://en.wikipedia.org/wiki/Linear_interpolation + +有关双线性插值的详细信息,请参阅维基百科: +https://en.wikipedia.org/wiki/Bilinear_interpolation + +有关三线插值的详细信息,请参阅维基百科: +https://en.wikipedia.org/wiki/Trilinear_interpolation + +有关双三次插值的详细信息,请参阅维基百科: +https://en.wikipedia.org/wiki/Bicubic_interpolation + +参数: + - **input** (Variable) - 3-D,4-D或5-D Tensor,数据类型为float32、float64或uint8,其数据格式由参数 ``data_format`` 指定。 + - **out_shape** (list|tuple|Variable|None) - 输出Tensor,输入为3D张量时,形状为为(out_w)的1-D Tensor。输入为4D张量时,形状为为(out_h, out_w)的2-D Tensor。输入为5-D Tensor时,形状为(out_d, out_h, out_w)的3-D Tensor。如果 :code:`out_shape` 是列表,每一个元素可以是整数或者形状为[1]的变量。如果 :code:`out_shape` 是变量,则其维度大小为1。默认值为None。 + - **scale** (float|Variable|None)-输入的高度或宽度的乘数因子 。 out_shape和scale至少要设置一个。out_shape的优先级高于scale。默认值为None。 + - **name** (str|None) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。默认值为None。 + - **resample** (str) - 插值方法。支持“线性”,“双线性”,“三线性”,“临近插值”,"双三次"。默认值为双线性插值。 + - **align_corners** (bool)- 一个可选的bool型参数,如果为True,则将输入和输出张量的4个角落像素的中心对齐,并保留角点像素的值。 默认值为True + - **align_mode** (int)- 双线性插值的可选项。 可以是 '0' 代表src_idx = scale *(dst_indx + 0.5)-0.5;如果为'1' ,代表src_idx = scale * dst_index。 + - **data_format** (str,可选)- 指定输入的数据格式,输出的数据格式将与输入保持一致。对于3-D Tensor,支持 NCHW(num_batches, channels, width),对于4-D Tensor,支持 NCHW(num_batches, channels, height, width) 或者 NHWC(num_batches, height, width, channels),对于5-D Tensor,支持 NCDHW(num_batches, channels, depth, height, width)或者 NDHWC(num_batches, depth, height, width, channels),默认值:'NCHW'。 + +返回:3-D Tensor,形状为 (num_batches, channels, out_w) ;4-D Tensor,形状为 (num_batches, channels, out_h, out_w) 或 (num_batches, out_h, out_w, channels);或者5-D Tensor,形状为 (num_batches, channels, out_d, out_h, out_w) 或 (num_batches, out_d, out_h, out_w, channels)。 + +返回类型: 变量(variable) + +抛出异常: + - :code:`TypeError` - out_shape应该是一个列表、元组或变量。 + - :code:`TypeError` - actual_shape应该是变量或None。 + - :code:`ValueError` - image_resize的"resample"只能是"LINEAR"或"BILINEAR"或"TRILINEAR"或"NEAREST"或"BICUBIC"。 + - :code:`ValueError` - out_shape 和 scale 不可同时为 None。 + - :code:`ValueError` - out_shape 的长度必须为2如果输入是4D张量。 + - :code:`ValueError` - out_shape 的长度必须为3如果输入是5D张量。 + - :code:`ValueError` - scale应大于0。 + - :code:`TypeError` - align_corners 应为bool型。 + - :code:`ValueError` - align_mode 只能取 ‘0’ 或 ‘1’。 + - :code:`ValueError` - data_format 只能取 ‘NCW’、 ‘NCHW’、‘NHWC’、‘NCDHW’ 或者 ‘NDHWC’。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + upsample_op = paddle.nn.UpSample(out_shape=[12,12]) + input_data = np.random.rand(2,3,6,10).astype("float32") + + #imperative mode + import paddle.fluid.dygraph as dg + with dg.guard(place) as g: + input = dg.to_variable(input_data) + output = upsample_op(input=input) + + print(output.shape) + # [2L, 3L, 12L, 12L] \ No newline at end of file diff --git a/doc/fluid/api_cn/nn_cn/ZeroPad2d_cn.rst b/doc/fluid/api_cn/nn_cn/ZeroPad2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ed6a69b86fdb93f4172a14d638abd08022910e1b --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/ZeroPad2d_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_nn_ZeroPad2d: + +ZeroPad2d +------------------------------- +.. py:class:: paddle.nn.ZeroPad2d(padding, data_format="NCHW", name=None) + +**ZeroPad2d** + +按照 padding 对输入填充固定值0。 + +参数: + - **padding** (Tensor | List[int32]) - 填充大小。pad的格式为[pad_left, pad_right, pad_top, pad_bottom]。 + - **data_format** (str) - 指定input的format,可为 `'NCHW'` 或者 `'NHWC'`,默认值为`'NCHW'`。 + - **name** (str, 可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,缺省值为None。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 1, 2, 3) + pad = [1, 0, 1, 2] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ZeroPad2d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[[0. 0. 0. 0.] + # [0. 1. 2. 3.] + # [0. 4. 5. 6.] + # [0. 0. 0. 0.] + # [0. 0. 0. 0.]]]] diff --git a/doc/fluid/api_cn/nn_cn/activation_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..934b8022273b68e5558408d0bc4ffa32d97baafa --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/activation_cn.rst @@ -0,0 +1,28 @@ +======================= +activation +======================= + + + + +.. toctree:: + :maxdepth: 1 + + activation_cn/ELU_cn.rst + activation_cn/GELU_cn.rst + activation_cn/Hardshrink_cn.rst + activation_cn/Tanh_cn.rst + activation_cn/Hardtanh_cn.rst + activation_cn/LeakyReLU_cn.rst + activation_cn/LogSigmoid_cn.rst + activation_cn/LogSoftmax_cn.rst + activation_cn/PReLU_cn.rst + activation_cn/ReLU_cn.rst + activation_cn/ReLU6_cn.rst + activation_cn/SELU_cn.rst + activation_cn/Sigmoid_cn.rst + activation_cn/Softmax_cn.rst + activation_cn/Softplus_cn.rst + activation_cn/Softshrink_cn.rst + activation_cn/Softsign_cn.rst + activation_cn/Tanhshrink_cn.rst diff --git a/doc/fluid/api_cn/nn_cn/activation_cn/ELU_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn/ELU_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f2cb3cfb242282ccca4ffa6b7355adb2a9e9e1ca --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/activation_cn/ELU_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_nn_ELU: + +ELU +------------------------------- +.. py:class:: paddle.nn.ELU(alpha=1.0, name=None) + +ELU激活层(ELU Activation Operator) + +根据 `Exponential Linear Units `_ 对输入Tensor中每个元素应用以下计算。 + +.. math:: + + ELU(x) = max(0, x) + min(0, \alpha * (e^{x} − 1)) + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - alpha (float, 可选) - ELU的alpha值,默认值为1.0。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([[-1, 6],[1, 15.6]])) + m = paddle.nn.ELU(0.2) + out = m(x) + # [[-0.12642411 6. ] + # [ 1. 15.6 ]] diff --git a/doc/fluid/api_cn/nn_cn/activation_cn/GELU_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn/GELU_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3493bf7f6ea266f1c392908fc933c25a664af549 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/activation_cn/GELU_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_nn_GELU: + +GELU +------------------------------- +.. py:class:: paddle.nn.GELU(approximate=False, name=None) + +GELU激活层(GELU Activation Operator) + +逐元素计算 GELU激活函数。更多细节请参考 `Gaussian Error Linear Units `_ 。 + +如果使用近似计算: + +.. math:: + GELU(x) = 0.5 * x * (1 + tanh(\sqrt{\frac{2}{\pi}} * (x + 0.044715x^{3}))) + +如果不使用近似计算: + +.. math:: + GELU(x) = 0.5 * x * (1 + erf(\frac{x}{\sqrt{2}})) + + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - approximate (bool, 可选) - 是否使用近似计算,默认值为 False,即不使用近似计算。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([[-1, 0.5],[1, 1.5]])) + + m = paddle.nn.GELU() + out = m(x) # [-0.158655 0.345731 0.841345 1.39979] + + m = paddle.nn.GELU(True) + out = m(x) # [-0.158808 0.345714 0.841192 1.39957] diff --git a/doc/fluid/api_cn/nn_cn/activation_cn/Hardshrink_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn/Hardshrink_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0dea4122ddfa5349bb64cedb9a1583afaf680f0c --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/activation_cn/Hardshrink_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_nn_Hardshrink: + +Hardshrink +------------------------------- +.. py:class:: paddle.nn.Hardshrink(threshold=0.5, name=None) + +Hardshrink激活层 + +.. math:: + + Hardshrink(x)= + \left\{ + \begin{aligned} + &x, & & if \ x > threshold \\ + &x, & & if \ x < -threshold \\ + &0, & & if \ others + \end{aligned} + \right. + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - threshold (float, 可选) - Hardshrink激活计算公式中的threshold值。默认值为0.5。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor([-1, 0.3, 2.5]) + m = paddle.nn.Hardshrink() + out = m(x) # [-1., 0., 2.5] diff --git a/doc/fluid/api_cn/nn_cn/activation_cn/Hardtanh_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn/Hardtanh_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d3955f41d3d8a9177ec1d0cb42a8103fa39a83ea --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/activation_cn/Hardtanh_cn.rst @@ -0,0 +1,45 @@ +.. _cn_api_nn_Hardtanh: + +Hardtanh +------------------------------- +.. py:class:: paddle.nn.Hardtanh(min=-1.0, max=1.0, name=None) + +Hardtanh激活层(Hardtanh Activation Operator)。计算公式如下: + +.. math:: + + Hardtanh(x)= + \left\{ + \begin{aligned} + &max, & & if \ x > max \\ + &min, & & if \ x < min \\ + &x, & & if \ others + \end{aligned} + \right. + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - min (float, 可选) - Hardtanh激活计算公式中的min值。默认值为-1。 + - max (float, 可选) - Hardtanh激活计算公式中的max值。默认值为1。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-1.5, 0.3, 2.5])) + m = paddle.nn.Hardtanh() + out = m(x) # # [-1., 0.3, 1.] diff --git a/doc/fluid/api_cn/nn_cn/activation_cn/LeakyReLU_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn/LeakyReLU_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..62d710903d488957b9360f88a448c4c267212023 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/activation_cn/LeakyReLU_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_nn_LeakyReLU: + +LeakyReLU +------------------------------- +.. py:class:: paddle.nn.LeakyReLU(negative_slope=0.01, name=None) + +LeakyReLU 激活层 + +.. math:: + + LeakyReLU(x)= + \left\{ + \begin{aligned} + &x, & & if \ x >= 0 \\ + &negative\_slope * x, & & otherwise \\ + \end{aligned} + \right. \\ + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - negative_slope (float,可选) - :math:`x < 0` 时的斜率。默认值为0.01。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + m = paddle.nn.LeakyReLU() + x = paddle.to_tensor(np.array([-2, 0, 1], 'float32')) + out = m(x) # [-0.02, 0., 1.] diff --git a/doc/fluid/api_cn/nn_cn/activation_cn/LogSigmoid_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn/LogSigmoid_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..754f59fce5b6ab07dd2c0c699d7f86bff9bd93bb --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/activation_cn/LogSigmoid_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_nn_LogSigmoid: + +LogSigmoid +------------------------------- +.. py:class:: paddle.nn.LogSigmoid(name=None) + +LogSigmoid激活层。计算公式如下: + +.. math:: + + LogSigmoid(x) = \log \frac{1}{1 + e^{-x}} + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([1.0, 2.0, 3.0, 4.0])) + m = paddle.nn.LogSigmoid() + out = m(x) # [-0.313262 -0.126928 -0.0485874 -0.0181499] diff --git a/doc/fluid/api_cn/nn_cn/activation_cn/LogSoftmax_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn/LogSoftmax_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..96bbc3a886f9535bf02cb1954645d5182e669120 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/activation_cn/LogSoftmax_cn.rst @@ -0,0 +1,47 @@ +.. _cn_api_nn_LogSoftmax: + +LogSoftmax +------------------------------- +.. py:class:: paddle.nn.LogSoftmax(axis=-1, name=None) + +LogSoftmax激活层,计算公式如下: + +.. math:: + + Out[i, j] = log(softmax(x)) + = log(\\frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}) + +参数 +:::::::::: + - axis (int, 可选) - 指定对输入Tensor进行运算的轴。``axis`` 的有效范围是[-D, D),D是输入Tensor的维度, ``axis`` 为负值时与 :math:`axis + D` 等价。默认值为-1。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = np.array([[[-2.0, 3.0, -4.0, 5.0], + [3.0, -4.0, 5.0, -6.0], + [-7.0, -8.0, 8.0, 9.0]], + [[1.0, -2.0, -3.0, 4.0], + [-5.0, 6.0, 7.0, -8.0], + [6.0, 7.0, 8.0, 9.0]]], 'float32') + m = paddle.nn.LogSoftmax() + x = paddle.to_tensor(x) + out = m(x) + # [[[ -7.1278396 -2.1278396 -9.127839 -0.12783948] + # [ -2.1270514 -9.127051 -0.12705144 -11.127051 ] + # [-16.313261 -17.313261 -1.3132617 -0.31326184]] + # [[ -3.0518122 -6.051812 -7.051812 -0.051812 ] + # [-12.313267 -1.3132664 -0.3132665 -15.313267 ] + # [ -3.4401896 -2.4401896 -1.4401896 -0.44018966]]] diff --git a/doc/fluid/api_cn/nn_cn/activation_cn/PReLU_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn/PReLU_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d36cee798c13583d8abbe05aacc8d3e44c791bbc --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/activation_cn/PReLU_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_nn_PReLU: + +PReLU +------------------------------- +.. py:class:: paddle.nn.PReLU(num_parameters=1, init=0.25, weight_attr=None, name=None) + +PReLU激活层(PReLU Activation Operator)。计算公式如下: + +如果使用近似计算: + +.. math:: + + PReLU(x) = max(0, x) + weight * min(0, x) + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - num_parameters (int, 可选) - 可训练`weight`数量,支持2种输入:1 - 输入中的所有元素使用同一个`weight`值; 输入的通道数 - 在同一个通道中的元素使用同一个`weight`值。默认为1。 + - init (float, 可选) - `weight`的初始值。默认为0.25。 + - weight_attr (ParamAttr, 可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor,默认数据类型为float32。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + paddle.set_default_dtype("float64") + + data = np.array([[[[-2.0, 3.0, -4.0, 5.0], + [ 3.0, -4.0, 5.0, -6.0], + [-7.0, -8.0, 8.0, 9.0]], + [[ 1.0, -2.0, -3.0, 4.0], + [-5.0, 6.0, 7.0, -8.0], + [ 6.0, 7.0, 8.0, 9.0]]]], 'float64') + x = paddle.to_tensor(data) + m = paddle.nn.PReLU(1, 0.25) + out = m(x) + # [[[[-0.5 , 3. , -1. , 5. ], + # [ 3. , -1. , 5. , -1.5 ], + # [-1.75, -2. , 8. , 9. ]], + # [[ 1. , -0.5 , -0.75, 4. ], + # [-1.25, 6. , 7. , -2. ], + # [ 6. , 7. , 8. , 9. ]]]] \ No newline at end of file diff --git a/doc/fluid/api_cn/nn_cn/activation_cn/ReLU6_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn/ReLU6_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0ba5f2af0ec631c33f97e3091044309d20906868 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/activation_cn/ReLU6_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_nn_ReLU6: + +ReLU6 +------------------------------- +.. py:class:: paddle.nn.ReLU6(name=None) + +ReLU6激活层 + +.. math:: + + ReLU6(x) = min(max(0,x), 6) + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-1, 0.3, 6.5])) + m = paddle.nn.ReLU6() + out = m(x) # [0, 0.3, 6] diff --git a/doc/fluid/api_cn/nn_cn/activation_cn/ReLU_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn/ReLU_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ac39225bf61db1653fc7d62a7339a824f992aa0e --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/activation_cn/ReLU_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_nn_ReLU: + +ReLU +------------------------------- +.. py:class:: paddle.nn.ReLU(name=None) + +ReLU激活层(Rectified Linear Unit)。计算公式如下: + +.. math:: + + ReLU(x) = max(0, x) + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-2, 0, 1]).astype('float32')) + m = paddle.nn.ReLU() + out = m(x) # [0., 0., 1.] diff --git a/doc/fluid/api_cn/nn_cn/activation_cn/SELU_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn/SELU_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2bd2ce11b2bf60c8e1aafd5f1fe2d44c51ee1f30 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/activation_cn/SELU_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_nn_SELU: + +SELU +------------------------------- +.. py:class:: paddle.nn.SELU(scale=1.0507009873554804934193349852946, alpha=1.6732632423543772848170429916717, name=None) + +SELU激活层 + +.. math:: + + SELU(x)= scale * + \begin{cases} + x, \text{if } x > 0 \\ + alpha * e^{x} - alpha, \text{if } x <= 0 + \end{cases} + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - scale (float, 可选) - SELU激活计算公式中的scale值,必须大于1.0。默认值为1.0507009873554804934193349852946。 + - alpha (float, 可选) - SELU激活计算公式中的alpha值,必须大于等于零。默认值为1.6732632423543772848170429916717。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([[0.0, 1.0],[2.0, 3.0]])) + m = paddle.nn.SELU() + out = m(x) # [[0, 1.050701],[2.101402, 3.152103]] diff --git a/doc/fluid/api_cn/nn_cn/activation_cn/Softmax_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn/Softmax_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ba6f60076ea114a210ac1d0a4b919a0b99a02a0f --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/activation_cn/Softmax_cn.rst @@ -0,0 +1,117 @@ +.. _cn_api_nn_Softmax: + +Softmax +------------------------------- +.. py:class:: paddle.nn.Softmax(axis=-1, name=None) + +Softmax激活层,OP的计算过程如下: + +步骤1:输入 ``x`` 的 ``axis`` 维会被置换到最后一维; + +步骤2:将输入 ``x`` 在逻辑上变换为二维矩阵。二维矩阵第一维(列长度)是输入除最后一维之外的其他维度值的乘积,第二维(行长度)和输入 ``axis`` 维的长度相同;对于矩阵的每一行,softmax操作对其进行重新缩放,使得该行的每个元素在 \[0,1\] 范围内,并且总和为1; + +步骤3:softmax操作执行完成后,执行步骤1和步骤2的逆运算,将二维矩阵恢复至和输入 ``x`` 相同的维度。 + +上述步骤2中softmax操作计算过程如下: + + - 对于二维矩阵的每一行,计算K维向量(K是输入第 ``axis`` 维的长度)中指定位置的指数值和全部位置指数值的和。 + + - 指定位置指数值与全部位置指数值之和的比值就是softmax操作的输出。 + +对于二维矩阵中的第i行和第j列有: + +.. math:: + + Softmax[i, j] = \frac{\exp(x[i, j])}{\sum_j(exp(x[i, j])} + +- 示例1(矩阵一共有三维。axis = -1,表示沿着最后一维(即第三维)做softmax操作) + +.. code-block:: text + + # input + + x.shape = [2, 3, 4] + + x.data = [[[2.0, 3.0, 4.0, 5.0], + [3.0, 4.0, 5.0, 6.0], + [7.0, 8.0, 8.0, 9.0]], + [[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [6.0, 7.0, 8.0, 9.0]]] + + axis = -1 + + # output + + out.shape = [2, 3, 4] + + out.data = [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.07232949, 0.19661193, 0.19661193, 0.53444665]], + [[0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]] + +- 示例2(矩阵一共有三维。axis = 1,表示沿着第二维做softmax操作) + +.. code-block:: text + + # input + + x.shape = [2, 3, 4] + + x.data = [[[2.0, 3.0, 4.0, 5.0], + [3.0, 4.0, 5.0, 6.0], + [7.0, 8.0, 8.0, 9.0]], + [[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [6.0, 7.0, 8.0, 9.0]]] + + axis = 1 + + # output + + out.shape = [2, 3, 4] + + out.data = [[[0.00657326, 0.00657326, 0.01714783, 0.01714783], + [0.01786798, 0.01786798, 0.04661262, 0.04661262], + [0.97555875, 0.97555875, 0.93623955, 0.93623955]], + [[0.00490169, 0.00490169, 0.00490169, 0.00490169], + [0.26762315, 0.26762315, 0.26762315, 0.26762315], + [0.72747516, 0.72747516, 0.72747516, 0.72747516]]] + +参数 +:::::::::: + - axis (int, 可选) - 指定对输入Tensor进行运算的轴。``axis`` 的有效范围是[-D, D),D是输入Tensor的维度, ``axis`` 为负值时与 :math:`axis + D` 等价。默认值为-1。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = np.array([[[-2.0, 3.0, -4.0, 5.0], + [3.0, -4.0, 5.0, -6.0], + [-7.0, -8.0, 8.0, 9.0]], + [[1.0, -2.0, -3.0, 4.0], + [-5.0, 6.0, 7.0, -8.0], + [6.0, 7.0, 8.0, 9.0]]], 'float32') + x = paddle.to_tensor(x) + m = paddle.nn.Softmax() + out = m(x) + # [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426], + # [0.0320586 , 0.08714432, 0.23688282, 0.64391426], + # [0.07232949, 0.19661193, 0.19661193, 0.53444665]], + # [[0.0320586 , 0.08714432, 0.23688282, 0.64391426], + # [0.0320586 , 0.08714432, 0.23688282, 0.64391426], + # [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]] diff --git a/doc/fluid/api_cn/nn_cn/activation_cn/Softplus_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn/Softplus_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cce23fcb7046fbdc66eb61a099517077a6378750 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/activation_cn/Softplus_cn.rst @@ -0,0 +1,39 @@ +.. _cn_api_nn_Softplus: + +Softplus +------------------------------- +.. py:class:: paddle.nn.Softplus(beta=1, threshold=20, name=None) + +Softplus激活层 + +.. math:: + + Softplus(x) = \frac{1}{beta} * \log(1 + e^{beta * x}) \\ + \text{为了保证数值稳定性, 当}\,beta * x > threshold\,\text{时,函数转变为线性函数x}. + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - beta (float, 可选) - Softplus激活计算公式中的beta值。默认值为1。 + - threshold (float, 可选) - Softplus激活计算公式中的threshold值。默认值为20。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) + m = paddle.nn.Softplus() + out = m(x) # [0.513015, 0.598139, 0.744397, 0.854355] diff --git a/doc/fluid/api_cn/nn_cn/activation_cn/Softshrink_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn/Softshrink_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d04cf74e560a9176824a324d7ecd340e227b59dc --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/activation_cn/Softshrink_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_nn_Softshrink: + +Softshrink +------------------------------- +.. py:class:: paddle.nn.Softshrink(threshold=0.5, name=None) + +Softshrink激活层 + +.. math:: + + Softshrink(x)= \begin{cases} + x - threshold, \text{if } x > threshold \\ + x + threshold, \text{if } x < -threshold \\ + 0, \text{otherwise} + \end{cases} + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - threshold (float, 可选) - Softshrink激活计算公式中的threshold值,必须大于等于零。默认值为0.5。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8])) + m = paddle.nn.Softshrink() + out = m(x) # [-0.4, 0, 0, 0.3] diff --git a/doc/fluid/api_cn/nn_cn/activation_cn/Softsign_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn/Softsign_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8029d97d80dc9bcf506bb127fc2499f7564029de --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/activation_cn/Softsign_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_nn_Softsign: + +Softsign +------------------------------- +.. py:class:: paddle.nn.Softsign(name=None) + +Softsign激活层 + +.. math:: + + Softsign(x) = \frac{x}{1 + |x|} + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) + m = paddle.nn.Softsign() + out = m(x) # [-0.285714, -0.166667, 0.0909091, 0.230769] diff --git a/doc/fluid/api_cn/nn_cn/activation_cn/Tanh_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn/Tanh_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4f97cdb57f053dd24bca94e945afae7ffa6fd04a --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/activation_cn/Tanh_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_nn_Tanh: + +Tanh +------------------------------- +.. py:class:: paddle.nn.Tanh(name=None) + +Tanh激活层 + +.. math:: + Tanh(x) = \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}} + + +参数 +:::::::::: + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) + m = paddle.nn.Tanh() + out = m(x) + print(out.numpy()) + # [-0.37994896 -0.19737532 0.09966799 0.29131261] \ No newline at end of file diff --git a/doc/fluid/api_cn/nn_cn/activation_cn/Tanhshrink_cn.rst b/doc/fluid/api_cn/nn_cn/activation_cn/Tanhshrink_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6c657fd800a7d4a475bb0277df42bf9dfea7eeb2 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/activation_cn/Tanhshrink_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_nn_Tanhshrink: + +Tanhshrink +------------------------------- +.. py:class:: paddle.nn.Tanhshrink(name=None) + +Tanhshrink激活层 + +.. math:: + + Tanhshrink(x) = x - tanh(x) + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) + m = paddle.nn.Tanhshrink() + out = m(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739] diff --git a/doc/fluid/api_cn/nn_cn/adaptive_pool2d_cn.rst b/doc/fluid/api_cn/nn_cn/adaptive_pool2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..dcdc8930fc625af519f39f71870e7a08e248ecdf --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/adaptive_pool2d_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_adaptive_pool2d: + +adaptive_pool2d +------------------------------- +:doc_source: paddle.fluid.layers.adaptive_pool2d + + diff --git a/doc/fluid/api_cn/nn_cn/adaptive_pool3d_cn.rst b/doc/fluid/api_cn/nn_cn/adaptive_pool3d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3ba26ad441a247c282ebba92bfdf3bda86a24557 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/adaptive_pool3d_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_adaptive_pool3d: + +adaptive_pool3d +------------------------------- +:doc_source: paddle.fluid.layers.adaptive_pool3d + + diff --git a/doc/fluid/api_cn/nn_cn/add_position_encoding_cn.rst b/doc/fluid/api_cn/nn_cn/add_position_encoding_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1533f06101a513771bd88c87cb1f9893172d878f --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/add_position_encoding_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_add_position_encoding: + +add_position_encoding +------------------------------- +:doc_source: paddle.fluid.layers.add_position_encoding + + diff --git a/doc/fluid/api_cn/nn_cn/affine_channel_cn.rst b/doc/fluid/api_cn/nn_cn/affine_channel_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..44b8c7b89618a3f0a59a3c044040f348ca7cf62f --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/affine_channel_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_affine_channel: + +affine_channel +------------------------------- +:doc_source: paddle.fluid.layers.affine_channel + + diff --git a/doc/fluid/api_cn/nn_cn/affine_grid_cn.rst b/doc/fluid/api_cn/nn_cn/affine_grid_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3393b0adc62c76e3854fa335f45c2f324beb0837 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/affine_grid_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_affine_grid: + +affine_grid +------------------------------- +:doc_source: paddle.fluid.layers.affine_grid + + diff --git a/doc/fluid/api_cn/nn_cn/anchor_generator_cn.rst b/doc/fluid/api_cn/nn_cn/anchor_generator_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..321b61d38400944e5fa0a1899808a66f949e9198 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/anchor_generator_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_anchor_generator: + +anchor_generator +------------------------------- +:doc_source: paddle.fluid.layers.anchor_generator + + diff --git a/doc/fluid/api_cn/nn_cn/assign_cn.rst b/doc/fluid/api_cn/nn_cn/assign_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cfd3d8e5f03645cf5beea6022590e984f5c03f49 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/assign_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_assign: + +assign +------------------------------- +:doc_source: paddle.fluid.layers.assign + + diff --git a/doc/fluid/api_cn/nn_cn/beam_search_cn.rst b/doc/fluid/api_cn/nn_cn/beam_search_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..958198a9106d6e2b6529f352a2a52f0d0ac5a9d9 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/beam_search_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_beam_search: + +beam_search +------------------------------- +:doc_source: paddle.fluid.layers.beam_search + + diff --git a/doc/fluid/api_cn/nn_cn/beam_search_decode_cn.rst b/doc/fluid/api_cn/nn_cn/beam_search_decode_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a5f94592aa29f3ae98f87918b99ed5b170d49011 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/beam_search_decode_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_beam_search_decode: + +beam_search_decode +------------------------------- +:doc_source: paddle.fluid.layers.beam_search_decode + + diff --git a/doc/fluid/api_cn/nn_cn/bipartite_match_cn.rst b/doc/fluid/api_cn/nn_cn/bipartite_match_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7226f75ec520e60bc7f2208e2acb1f9a8158b786 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/bipartite_match_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_bipartite_match: + +bipartite_match +------------------------------- +:doc_source: paddle.fluid.layers.bipartite_match + + diff --git a/doc/fluid/api_cn/nn_cn/box_clip_cn.rst b/doc/fluid/api_cn/nn_cn/box_clip_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0177e8884e8f3050d7028462cad2aab83eaf378b --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/box_clip_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_box_clip: + +box_clip +------------------------------- +:doc_source: paddle.fluid.layers.box_clip + + diff --git a/doc/fluid/api_cn/nn_cn/box_coder_cn.rst b/doc/fluid/api_cn/nn_cn/box_coder_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2ee7e683d5c343e24e22aca2483f90b7bc9b4ca0 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/box_coder_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_box_coder: + +box_coder +------------------------------- +:doc_source: paddle.fluid.layers.box_coder + + diff --git a/doc/fluid/api_cn/nn_cn/box_decoder_and_assign_cn.rst b/doc/fluid/api_cn/nn_cn/box_decoder_and_assign_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..97a508a91298a0eabd972a933cf7452eb5f39070 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/box_decoder_and_assign_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_box_decoder_and_assign: + +box_decoder_and_assign +------------------------------- +:doc_source: paddle.fluid.layers.box_decoder_and_assign + + diff --git a/doc/fluid/api_cn/nn_cn/bpr_loss_cn.rst b/doc/fluid/api_cn/nn_cn/bpr_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a584a3e24bd24860020d14c4460c87f15e403582 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/bpr_loss_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_bpr_loss: + +bpr_loss +------------------------------- +:doc_source: paddle.fluid.layers.bpr_loss + + diff --git a/doc/fluid/api_cn/nn_cn/brelu_cn.rst b/doc/fluid/api_cn/nn_cn/brelu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..29468ae9246417b8cbd18ff39007edbfecbd357c --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/brelu_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_brelu: + +brelu +------------------------------- +:doc_source: paddle.fluid.layers.brelu + + diff --git a/doc/fluid/api_cn/nn_cn/case_cn.rst b/doc/fluid/api_cn/nn_cn/case_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..424bcc1439dfa2b88e75f35e8304c5b0aa203a3b --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/case_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_case: + +case +------------------------------- +:doc_source: paddle.fluid.layers.case + + diff --git a/doc/fluid/api_cn/nn_cn/center_loss_cn.rst b/doc/fluid/api_cn/nn_cn/center_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e59aab849316ba98eb5636691868fa29656258f0 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/center_loss_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_center_loss: + +center_loss +------------------------------- +:doc_source: paddle.fluid.layers.center_loss + + diff --git a/doc/fluid/api_cn/nn_cn/clip_by_norm_cn.rst b/doc/fluid/api_cn/nn_cn/clip_by_norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..edb354bbdc7b36ac5472a819597bd328c55701cf --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/clip_by_norm_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_clip_by_norm: + +clip_by_norm +------------------------------- +:doc_source: paddle.fluid.layers.clip_by_norm + + diff --git a/doc/fluid/api_cn/nn_cn/clip_cn.rst b/doc/fluid/api_cn/nn_cn/clip_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5948b5966ce227f51fc5f50912f70806ec0d6254 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/clip_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_clip: + +clip +------------------------------- +:doc_source: paddle.fluid.layers.clip + + diff --git a/doc/fluid/api_cn/nn_cn/collect_fpn_proposals_cn.rst b/doc/fluid/api_cn/nn_cn/collect_fpn_proposals_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d34c97e8f92ff3d1613de09d141ce5c9da941a76 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/collect_fpn_proposals_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_collect_fpn_proposals: + +collect_fpn_proposals +------------------------------- +:doc_source: paddle.fluid.layers.collect_fpn_proposals + + diff --git a/doc/fluid/api_cn/nn_cn/cond_cn.rst b/doc/fluid/api_cn/nn_cn/cond_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a50a8dd0d4ac57c7b52d269d8c78b4c52b8a2ca6 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/cond_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_cond: + +cond +------------------------------- +:doc_source: paddle.fluid.layers.cond + + diff --git a/doc/fluid/api_cn/nn_cn/continuous_value_model_cn.rst b/doc/fluid/api_cn/nn_cn/continuous_value_model_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..de904d4ce1b520887794189865676f19a04ce16f --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/continuous_value_model_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_continuous_value_model: + +continuous_value_model +------------------------------- +:doc_source: paddle.fluid.layers.continuous_value_model + + diff --git a/doc/fluid/api_cn/nn_cn/cosine_decay_cn.rst b/doc/fluid/api_cn/nn_cn/cosine_decay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..640bfabad91bdbba4bc47916bc14bf17848305f2 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/cosine_decay_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_cosine_decay: + +cosine_decay +------------------------------- +:doc_source: paddle.fluid.layers.cosine_decay + + diff --git a/doc/fluid/api_cn/nn_cn/cosine_similarity_cn.rst b/doc/fluid/api_cn/nn_cn/cosine_similarity_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c2f4109534b5485e5154cc140a1661d94b3945f8 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/cosine_similarity_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_paddle_nn_cosine_similarity: + +cosine_similarity +------------------------------- + +.. py:function:: paddle.nn.functional.cosine_similarity(x1, x2, axis=1, eps=1e-8) + +该OP用于计算x1与x2沿axis维度的余弦相似度。 + +参数: + - **x1** (Tensor) - Tensor,数据类型支持float32, float64。 + - **x2** (Tensor) - Tensor,数据类型支持float32, float64。 + - **axis** (int) - 指定计算的维度,会在该维度上计算余弦相似度,默认值为1。 + - **eps** (float) - 很小的值,防止计算时分母为0,默认值为1e-8。 + + +返回: 余弦相似度的计算结果,数据类型与x1, x2相同。 + +返回类型:Tensor + + + +**代码示例:** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + np.random.seed(0) + x1 = np.random.rand(2,3) + x2 = np.random.rand(2,3) + x1 = paddle.to_tensor(x1) + x2 = paddle.to_tensor(x2) + result = paddle.nn.functional.cosine_similarity(x1, x2, axis=0) + print(result.numpy()) + # [0.99806249 0.9817672 0.94987036] + + + diff --git a/doc/fluid/api_cn/nn_cn/cross_entropy_cn.rst b/doc/fluid/api_cn/nn_cn/cross_entropy_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..72019f12865e83bece88f6c1fae4a3b4aa3d6934 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/cross_entropy_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_cross_entropy: + +cross_entropy +------------------------------- +:doc_source: paddle.fluid.layers.cross_entropy + + diff --git a/doc/fluid/api_cn/nn_cn/data_cn.rst b/doc/fluid/api_cn/nn_cn/data_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7504500332158f909aef8c89eabb6ba1235931f2 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/data_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_data: + +data +------------------------------- +:doc_source: paddle.fluid.data + + diff --git a/doc/fluid/api_cn/nn_cn/deformable_roi_pooling_cn.rst b/doc/fluid/api_cn/nn_cn/deformable_roi_pooling_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..db8ae054603ee29204417ffa37537f524412d8d7 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/deformable_roi_pooling_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_deformable_roi_pooling: + +deformable_roi_pooling +------------------------------- +:doc_source: paddle.fluid.layers.deformable_roi_pooling + + diff --git a/doc/fluid/api_cn/nn_cn/density_prior_box_cn.rst b/doc/fluid/api_cn/nn_cn/density_prior_box_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8e319438325bb1a72984d957ed6a7650d91dc8cf --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/density_prior_box_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_density_prior_box: + +density_prior_box +------------------------------- +:doc_source: paddle.fluid.layers.density_prior_box + + diff --git a/doc/fluid/api_cn/nn_cn/detection_output_cn.rst b/doc/fluid/api_cn/nn_cn/detection_output_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ca670eb0ca14afb6a34c8710023d232df4873f1b --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/detection_output_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_detection_output: + +detection_output +------------------------------- +:doc_source: paddle.fluid.layers.detection_output + + diff --git a/doc/fluid/api_cn/nn_cn/diag_embed_cn.rst b/doc/fluid/api_cn/nn_cn/diag_embed_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f41a82046486ffc2f9229c5d20def7a8bf648d4b --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/diag_embed_cn.rst @@ -0,0 +1,78 @@ +.. _cn_api_functional_diag_embed: + +diag_embed +------------------------------- + +.. py:function:: paddle.functional.diag_embed(input, offset=0, dim1=-2, dim2=-1): + +:alias_main: paddle.nn.functional.diag_embed +:alias: paddle.nn.functional.diag_embed,paddle.nn.functional.extension.diag_embed + + + + 该 OP 创建一个 Tensor,其在指定的 2D 平面(由 ``dim1`` 和 ``dim2`` 指定)上的对角线由输入 ``input`` 填充。 + 默认的,指定的 2D 平面由返回 Tensor 的最后两维组成。 + + 参数 ``offset`` 确定在指定的二维平面中填充对角线的位置: + + - 如果 offset = 0,则填充主对角线。 + - 如果 offset > 0,则填充主对角线右上的对角线。 + - 如果 offset < 0,则填充主对角线左下的对角线。 + +参数: + - **input** (Variable|numpy.ndarray)- 输入变量,至少为 1D 数组,支持数据类型为 float32,float64,int32,int64。 + - **offset** (int ,可选)- 从指定的二维平面中获取对角线的位置,默认值为 0,既主对角线。 + - **dim1** (int , 可选)- 填充对角线的二维平面的第一维,默认值为 -2。 + - **dim2** (int , 可选)- 填充对角线的二维平面的第二维,默认值为 -1。 + +返回: 指定二维平面填充了对角线的 Tensor。数据类型和输入数据类型一致。 + +返回类型: 变量(Variable) + +**代码示例** + +.. code-block:: python + + import paddle.nn.functional as F + import paddle.fluid.dygraph as dg + import numpy as np + + diag_embed = np.random.randn(2, 3).astype('float32') + # [[ 0.7545889 , -0.25074545, 0.5929117 ], + # [-0.6097662 , -0.01753256, 0.619769 ]] + with dg.guard(): + data1 = F.diag_embed(diag_embed) + data1.numpy() + # [[[ 0.7545889 , 0. , 0. ], + # [ 0. , -0.25074545, 0. ], + # [ 0. , 0. , 0.5929117 ]], + + # [[-0.6097662 , 0. , 0. ], + # [ 0. , -0.01753256, 0. ], + # [ 0. , 0. , 0.619769 ]]] + + data2 = F.diag_embed(diag_embed, offset=-1, dim1=0, dim2=2) + data2.numpy() + # [[[ 0. , 0. , 0. , 0. ], + # [ 0.7545889 , 0. , 0. , 0. ], + # [ 0. , -0.25074545, 0. , 0. ], + # [ 0. , 0. , 0.5929117 , 0. ]], + # + # [[ 0. , 0. , 0. , 0. ], + # [-0.6097662 , 0. , 0. , 0. ], + # [ 0. , -0.01753256, 0. , 0. ], + # [ 0. , 0. , 0.619769 , 0. ]]] + + data3 = F.diag_embed(diag_embed, offset=1, dim1=0, dim2=2) + data3.numpy() + # [[[ 0. , 0.7545889 , 0. , 0. ], + # [ 0. , -0.6097662 , 0. , 0. ]], + # + # [[ 0. , 0. , -0.25074545, 0. ], + # [ 0. , 0. , -0.01753256, 0. ]], + # + # [[ 0. , 0. , 0. , 0.5929117 ], + # [ 0. , 0. , 0. , 0.619769 ]], + # + # [[ 0. , 0. , 0. , 0. ], + # [ 0. , 0. , 0. , 0. ]]] diff --git a/doc/fluid/api_cn/nn_cn/dice_loss_cn.rst b/doc/fluid/api_cn/nn_cn/dice_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9c16a643f401f6aa31af6a02ca29dd60a7923510 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/dice_loss_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_dice_loss: + +dice_loss +------------------------------- +:doc_source: paddle.fluid.layers.dice_loss + + diff --git a/doc/fluid/api_cn/nn_cn/distribute_fpn_proposals_cn.rst b/doc/fluid/api_cn/nn_cn/distribute_fpn_proposals_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c26b3bd3331a268e848f80eb3684007d263ee87c --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/distribute_fpn_proposals_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_distribute_fpn_proposals: + +distribute_fpn_proposals +------------------------------- +:doc_source: paddle.fluid.layers.distribute_fpn_proposals + + diff --git a/doc/fluid/api_cn/nn_cn/edit_distance_cn.rst b/doc/fluid/api_cn/nn_cn/edit_distance_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a58140feef68af71ef17f453715532d52ba6406a --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/edit_distance_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_edit_distance: + +edit_distance +------------------------------- +:doc_source: paddle.fluid.layers.edit_distance + + diff --git a/doc/fluid/api_cn/nn_cn/elu_cn.rst b/doc/fluid/api_cn/nn_cn/elu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c0cf22a5123d75696dc8bdbd7fb78e2d33314c0c --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/elu_cn.rst @@ -0,0 +1,44 @@ +.. _cn_api_nn_cn_elu: + +elu +------------------------------- + +.. py:function:: paddle.nn.functional.elu(x, alpha=1.0, name=None) + +elu激活层(ELU Activation Operator) + +根据 `Exponential Linear Units `_ 对输入Tensor中每个元素应用以下计算。 + +.. math:: + + elu(x) = max(0, x) + min(0, \alpha * (e^{x} − 1)) + +其中,:math:`x` 为输入的 Tensor + +参数: +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - alpha (float, 可选) - elu的alpha值,默认值为1.0。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([[-1,6],[1,15.6]])) + out = F.elu(x, alpha=0.2) + # [[-0.12642411 6. ] + # [ 1. 15.6 ]] + + diff --git a/doc/fluid/api_cn/nn_cn/erf_cn.rst b/doc/fluid/api_cn/nn_cn/erf_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f8b8972049c6802429b27b2921959e1e0c41f2e1 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/erf_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_erf: + +erf +------------------------------- +:doc_source: paddle.fluid.layers.erf + + diff --git a/doc/fluid/api_cn/nn_cn/exponential_decay_cn.rst b/doc/fluid/api_cn/nn_cn/exponential_decay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..42bbbaee3dc64726286d35d6ca6066fa16919675 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/exponential_decay_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_exponential_decay: + +exponential_decay +------------------------------- +:doc_source: paddle.fluid.layers.exponential_decay + + diff --git a/doc/fluid/api_cn/nn_cn/filter_by_instag_cn.rst b/doc/fluid/api_cn/nn_cn/filter_by_instag_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..14cd721f7d3c75a6e7c5bbed46b5ff3a37ad4329 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/filter_by_instag_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_filter_by_instag: + +filter_by_instag +------------------------------- +:doc_source: paddle.fluid.layers.filter_by_instag + + diff --git a/doc/fluid/api_cn/nn_cn/fsp_matrix_cn.rst b/doc/fluid/api_cn/nn_cn/fsp_matrix_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1e602e71236eef663383487b2fe4e2666a7981c9 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/fsp_matrix_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_fsp_matrix: + +fsp_matrix +------------------------------- +:doc_source: paddle.fluid.layers.fsp_matrix + + diff --git a/doc/fluid/api_cn/nn_cn/functional_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d5e5c8e4b94c07d8172a798231e03182aedf693c --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn.rst @@ -0,0 +1,42 @@ +======================= +functional +======================= + + + + +.. toctree:: + :maxdepth: 1 + + functional_cn/binary_cross_entropy_cn.rst + functional_cn/binary_cross_entropy_with_logits_cn.rst + functional_cn/l1_loss_cn.rst + functional_cn/nll_loss_cn.rst + functional_cn/normalize_cn.rst + functional_cn/batch_norm_cn.rst + functional_cn/instance_norm_cn.rst + functional_cn/layer_norm_cn.rst + functional_cn/margin_ranking_loss_cn.rst + functional_cn/mse_loss_cn.rst + functional_cn/ctc_loss_cn.rst + functional_cn/sigmoid_cn.rst + functional_cn/one_hot_cn.rst + functional_cn/dropout_cn.rst + functional_cn/dropout2d_cn.rst + functional_cn/dropout3d_cn.rst + functional_cn/alpha_dropout_cn.rst + functional_cn/mse_loss_cn.rst + functional_cn/adaptive_avg_pool2d_cn.rst + functional_cn/adaptive_avg_pool3d_cn.rst + functional_cn/kl_div_cn.rst + functional_cn/conv2d_cn.rst + functional_cn/conv3d_cn.rst + functional_cn/conv_transpose2d_cn.rst + functional_cn/conv_transpose3d_cn.rst + functional_cn/sigmoid_cn.rst + functional_cn/adaptive_avg_pool1d_cn.rst + functional_cn/adaptive_max_pool1d_cn.rst + functional_cn/avg_pool1d_cn.rst + functional_cn/max_pool1d_cn.rst + functional_cn/cross_entropy_loss_cn.rst + functional_cn/bilinear_cn.rst diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/adaptive_avg_pool1d_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/adaptive_avg_pool1d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..ae3683263fc3f3ea659259873028b7116d483c02 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/adaptive_avg_pool1d_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_nn_functional_adaptive_avg_pool1d: + + +adaptive_avg_pool1d +------------------------------- + +.. py:function:: paddle.nn.functional.adaptive_avg_pool1d(x, output_size, name=None) + +该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算1D的自适应平均池化。输入和输出都是3-D Tensor, +默认是以 `NCL` 格式表示的,其中 `N` 是 batch size, `C` 是通道数, `L` 是输入特征的长度. + +.. note:: + 详细请参考对应的 `Class` 请参考: :ref:`cn_api_nn_AdaptiveAvgPool1d` 。 + + +参数 +::::::::: + - **x** (Tensor): 当前算子的输入, 其是一个形状为 `[N, C, L]` 的3-D Tensor。其中 `N` 是batch size, `C` 是通道数, `L` 是输入特征的长度。 其数据类型为float32或者float64。 + - **output_size** (int): 算子输出特征图的长度,其数据类型为int。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +::::::::: +``Tensor``, 输入 `x` 经过自适应池化计算得到的目标3-D Tensor,其数据类型与输入相同。 + +抛出异常 +::::::::: + - ``ValueError`` - ``output_size`` 应是一个整数。 + +代码示例 +::::::::: + +.. code-block:: python + + # average adaptive pool1d + # suppose input data in shape of [N, C, L], `output_size` is m, + # output shape is [N, C, m], adaptive pool divide L dimension + # of input data into m grids averagely and performs poolings in each + # grid to get output. + # adaptive avg pool performs calculations as follow: + # + # for i in range(m): + # lstart = floor(i * L / m) + # lend = ceil((i + 1) * L / m) + # output[:, :, i] = sum(input[:, :, lstart: lend])/(lstart - lend) + # + import paddle + import paddle.nn.functional as F + import numpy as np + paddle.disable_static() + + data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) + pool_out = F.adaptive_avg_pool1d(data, output_size=16) + # pool_out shape: [1, 3, 16]) diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/adaptive_avg_pool2d_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/adaptive_avg_pool2d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..cd5e3a087a4e187c9015c2a19a96112295012c15 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/adaptive_avg_pool2d_cn.rst @@ -0,0 +1,68 @@ +adaptive_avg_pool2d +------------------------------- + +.. py:function:: paddle.nn.functional.adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None) + +该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算2D的自适应平均池化。输入和输出都是4-D Tensor, +默认是以 `NCHW` 格式表示的,其中 `N` 是 batch size, `C` 是通道数, `H` 是输入特征的高度, `H` 是输入特征的宽度。 + +计算公式如下: + +.. math:: + + hstart &= floor(i * H_{in} / H_{out}) + + hend &= ceil((i + 1) * H_{in} / H_{out}) + + wstart &= floor(j * W_{in} / W_{out}) + + wend &= ceil((j + 1) * W_{in} / W_{out}) + + Output(i ,j) &= \frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)} + + +参数 +::::::::: + - **x** (Tensor): 默认形状为(批大小,通道数,高度,宽度),即NCHW格式的4-D Tensor。 其数据类型为float16, float32, float64, int32或int64. + - **output_size** (int|list|turple): 算子输出特征图的尺寸,如果其是list或turple类型的数值,必须包含两个元素,H和W。H和W既可以是int类型值也可以是None,None表示与输入特征尺寸相同。 + - **data_format** (str): 输入和输出的数据格式,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +::::::::: +``Tensor``, 默认形状为(批大小,通道数,输出特征高度,输出特征宽度),即NCHW格式的4-D Tensor,其数据类型与输入相同。 + +抛出异常 +::::::::: + - ``ValueError`` - 如果 ``data_format`` 既不是"NCHW"也不是"NHWC"。 + +代码示例 +::::::::: + +.. code-block:: python + + # adaptive avg pool2d + # suppose input data in shape of [N, C, H, W], `output_size` is [m, n], + # output shape is [N, C, m, n], adaptive pool divide H and W dimensions + # of input data into m * n grids averagely and performs poolings in each + # grid to get output. + # adaptive avg pool performs calculations as follow: + # + # for i in range(m): + # for j in range(n): + # hstart = floor(i * H / m) + # hend = ceil((i + 1) * H / m) + # wstart = floor(i * W / n) + # wend = ceil((i + 1) * W / n) + # output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend]) + # + import paddle + import numpy as np + paddle.disable_static() + input_data = np.random.rand(2, 3, 32, 32) + x = paddle.to_tensor(input_data) + # x.shape is [2, 3, 32, 32] + pool_out = paddle.nn.functional.adaptive_avg_pool2d( + x = x, + output_size=[3, 3]) + # pool_out.shape is [2, 3, 3, 3] \ No newline at end of file diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/adaptive_avg_pool3d_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/adaptive_avg_pool3d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..756f9d01ef220e7c42253d2a53572dba5f619f43 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/adaptive_avg_pool3d_cn.rst @@ -0,0 +1,76 @@ +adaptive_avg_pool3d +------------------------------- + +.. py:function:: paddle.nn.functional.adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None) + +该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算3D的自适应平均池化。输入和输出都是5-D Tensor, +默认是以 `NCDHW` 格式表示的,其中 `N` 是 batch size, `C` 是通道数, `D` 是特征图长度, `H` 是输入特征的高度, `H` 是输入特征的宽度。 + +计算公式如下: + +.. math:: + + dstart &= floor(i * D_{in} / D_{out}) + + dend &= ceil((i + 1) * D_{in} / D_{out}) + + hstart &= floor(j * H_{in} / H_{out}) + + hend &= ceil((j + 1) * H_{in} / H_{out}) + + wstart &= floor(k * W_{in} / W_{out}) + + wend &= ceil((k + 1) * W_{in} / W_{out}) + + Output(i ,j, k) &= \frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)} + + + +参数 +::::::::: + - **x** (Tensor): 默认形状为(批大小,通道数,长度,高度,宽度),即NCDHW格式的5-D Tensor。 其数据类型为float16, float32, float64, int32或int64. + - **output_size** (int|list|turple): 算子输出特征图的尺寸,如果其是list或turple类型的数值,必须包含三个元素,D,H和W。D,H和W既可以是int类型值也可以是None,None表示与输入特征尺寸相同。 + - **data_format** (str): 输入和输出的数据格式,可以是"NCDHW"和"NDHWC"。N是批尺寸,C是通道数,D是特征长度,H是特征高度,W是特征宽度。默认值:"NCDHW"。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +::::::::: +``Tensor``, 默认形状为(批大小,通道数,输出特征长度,输出特征高度,输出特征宽度),即NCDHW格式的5-D Tensor,其数据类型与输入相同。 + +抛出异常 +::::::::: + - ``ValueError`` - 如果 ``data_format`` 既不是"NCDHW"也不是"NDHWC"。 + +代码示例 +::::::::: + +.. code-block:: python + + # adaptive avg pool3d + # suppose input data in shape of [N, C, D, H, W], `output_size` is [l, m, n], + # output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions + # of input data into l * m * n grids averagely and performs poolings in each + # grid to get output. + # adaptive avg pool performs calculations as follow: + # + # for i in range(l): + # for j in range(m): + # for k in range(n): + # dstart = floor(i * D / l) + # dend = ceil((i + 1) * D / l) + # hstart = floor(j * H / m) + # hend = ceil((j + 1) * H / m) + # wstart = floor(k * W / n) + # wend = ceil((k + 1) * W / n) + # output[:, :, i, j, k] = + # avg(input[:, :, dstart:dend, hstart: hend, wstart: wend]) + import paddle + import numpy as np + paddle.disable_static() + input_data = np.random.rand(2, 3, 8, 32, 32) + x = paddle.to_tensor(input_data) + # x.shape is [2, 3, 8, 32, 32] + pool_out = paddle.nn.functional.adaptive_avg_pool3d( + x = x, + output_size=[3, 3, 3]) + # pool_out.shape is [2, 3, 3, 3, 3] \ No newline at end of file diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/adaptive_max_pool1d_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/adaptive_max_pool1d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..f88c16aa13961842246fcb6d87b5b8b7e675b929 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/adaptive_max_pool1d_cn.rst @@ -0,0 +1,55 @@ +.. _cn_api_nn_functional_adaptive_max_pool1d: + + +adaptive_max_pool1d +------------------------------- + +.. py:function:: paddle.nn.functional.adaptive_max_pool1d(x, output_size, return_indices=False, name=None) + +该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算1D的自适应最大值池化。输入和输出都是3-D Tensor, +默认是以 `NCL` 格式表示的,其中 `N` 是 batch size, `C` 是通道数, `L` 是输入特征的长度. + +.. note:: + 详细请参考对应的 `Class` 请参考: :ref:`cn_api_nn_AdaptiveMaxPool1d` 。 + + +参数 +::::::::: + - **x** (Tensor): 当前算子的输入, 其是一个形状为 `[N, C, L]` 的3-D Tensor。其中 `N` 是batch size, `C` 是通道数, `L` 是输入特征的长度。 其数据类型为float32或者float64。 + - **output_size** (int|list|tuple): 算子输出特征图的长度,其数据类型为int或list,tuple。 + - **return_indices** (bool): 如果设置为True,则会与输出一起返回最大值的索引,默认为False。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +::::::::: +``Tensor``, 输入 `x` 经过自适应池化计算得到的目标3-D Tensor,其数据类型与输入相同。 + +抛出异常 +::::::::: + - ``ValueError`` - 如果 ``output_size`` 不是int类型值。 + +代码示例 +::::::::: + +.. code-block:: python + + # max adaptive pool1d + # suppose input data in shape of [N, C, L], `output_size` is m, + # output shape is [N, C, m], adaptive pool divide L dimension + # of input data into m grids averagely and performs poolings in each + # grid to get output. + # adaptive max pool performs calculations as follow: + # + # for i in range(m): + # lstart = floor(i * L / m) + # lend = ceil((i + 1) * L / m) + # output[:, :, i] = max(input[:, :, lstart: lend]) + # + import paddle + import paddle.nn.functional as F + import numpy as np + paddle.disable_static() + + data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) + pool_out = F.adaptive_max_pool1d(data, output_size=16) + # pool_out shape: [1, 3, 16]) diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/alpha_dropout_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/alpha_dropout_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cad8c3bb7d12fb6a9f9db481103d8e949ca0c688 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/alpha_dropout_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_nn_functional_alpha_dropout: + +alpha_dropout +------------------------------- + +.. py:function:: paddle.nn.functional.alpha_dropout(x, p=0.5, training=True, name=None) + +alpha_dropout是一种具有自归一化性质的dropout。均值为0,方差为1的输入,经过alpha_dropout计算之后,输出的均值和方差与输入保持一致。alpha_dropout通常与SELU激活函数组合使用。 + +参数 +::::::::: + - **x** (Tensor): 输入的多维 `Tensor` ,数据类型为:float32、float64。 + - **p** (float): 将输入节点置0的概率,即丢弃概率。默认: 0.5。 + - **training** (bool): 标记是否为训练阶段。 默认: True。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: +经过alpha_dropout之后的结果,与输入x形状相同的 `Tensor` 。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + x = np.array([[-1, 1], [-1, 1]]).astype('float32') + x = paddle.to_tensor(x) + y_train = paddle.nn.functional.alpha_dropout(x, 0.5) + y_test = paddle.nn.functional.alpha_dropout(x, 0.5, training=False) + print(x.numpy()) + print(y_train.numpy()) + # [[-0.10721093, 1.6655989 ], [-0.7791938, -0.7791938]] (randomly) + print(y_test.numpy()) diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/avg_pool1d_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/avg_pool1d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..0db33aac3c91669b90656ec32a05e1ba875ee843 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/avg_pool1d_cn.rst @@ -0,0 +1,53 @@ +.. _cn_api_nn_functional_avg_pool1d: + + +avg_pool1d +------------------------------- + +.. py:function:: paddle.nn.functional.avg_pool1d(x, kernel_size, stride=None, padding=0, count_include_pad=True, ceil_mode=False, name=None) + +该算子根据输入 `x` , `kernel_size` 等参数对一个输入Tensor计算1D的平均池化。输入和输出都是3-D Tensor, +默认是以 `NCL` 格式表示的,其中 `N` 是 batch size, `C` 是通道数, `L` 是输入特征的长度。 + +.. note:: + 详细请参考对应的 `Class` 请参考: :ref:`cn_api_nn_AvgPool1d` 。 + + +参数 +::::::::: + - **x** (Tensor): 当前算子的输入, 其是一个形状为 `[N, C, L]` 的3-D Tensor。其中 `N` 是batch size, `C` 是通道数, `L` 是输入特征的长度。 其数据类型为float32或者float64。 + - **kernel_size** (int|list|tuple): 池化核的尺寸大小. 如果kernel_size为list或tuple类型, 其必须包含一个整数. + - **stride** (int|list|tuple): 池化操作步长. 如果stride为list或tuple类型, 其必须包含一个整数. + - **padding** (string|int|list|tuple): 池化补零的方式. 如果padding是一个字符串,则必须为 `SAME` 或者 `VALID` 。 如果是turple或者list类型, 则应是 `[pad_left, pad_right]` 形式。如果padding是一个非0值,那么表示会在输入的两端都padding上同样长度的0。 + - **count_include_pad** (bool): 是否用额外padding的值计算平均池化结果,默认为True。 + - **ceil_mode** (bool): 是否用ceil函数计算输出的height和width,如果设置为False, 则使用floor函数来计算,默认为False。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + + +返回 +::::::::: +``Tensor``, 输入 `x` 经过平均池化计算得到的目标3-D Tensor,其数据类型与输入相同。 + +抛出异常 +::::::::: + - ``ValueError`` - 如果 ``padding`` 是字符串但不是 "SAME" 和 "VALID" 。 + - ``ValueError`` - 如果 ``padding`` 是 "VALID" 但 `ceil_mode` 被设置为True。 + - ``ValueError`` - 如果 ``padding`` 是一个长度大于1的list或turple。 + - ``ShapeError`` - 如果输入x不是一个3-D Tensor。 + - ``ShapeError`` - 如果计算得到的输出形状小于等于0。 + + + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + paddle.disable_static() + + data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) + pool_out = F.avg_pool1d(data, kernel_size=2, stride=2, padding=0) + # pool_out shape: [1, 3, 16] diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/batch_norm_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/batch_norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e9fee412f3a8fa00a8197f8be3502d0a9206eafc --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/batch_norm_cn.rst @@ -0,0 +1,47 @@ +.. _cn_api_nn_functional_batch_norm: + +batch_norm +------------------------------- + +.. py:class:: paddle.nn.functional.batch_norm(x, running_mean, running_var, weight, bias, training=False, momentum=0.9, epsilon=1e-05, data_format='NCHW', name=None): + +推荐使用nn.BatchNorm1d,nn.BatchNorm2d, nn.BatchNorm3d,由内部调用此方法。 + +详情见 :ref:`cn_api_nn_BatchNorm1d` 。 + +参数: + - **x** (int) - 输入,数据类型为float32, float64。 + - **running_mean** (Tensor) - 均值的Tensor。 + - **running_var** (Tensor) - 方差的Tensor。 + - **weight** (Tensor) - 权重的Tensor。 + - **bias** (Tensor) - 偏置的Tensor。 + - **momentum** (float, 可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var`` 。默认值:0.9。更新公式如上所示。 + - **epsilon** (float, 可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 + - **data_format** (string, 可选) - 指定输入数据格式,数据格式可以为“NC", "NCL", "NCHW" 或者"NCDHW"。默认值:"NCHW"。 + - **name** (string, 可选) – BatchNorm的名称, 默认值为None。更多信息请参见 :ref:`api_guide_Name` 。 + + +返回:无 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + x = np.random.seed(123) + x = np.random.random(size=(2, 1, 2, 3)).astype('float32') + running_mean = np.random.random(size=1).astype('float32') + running_variance = np.random.random(size=1).astype('float32') + weight_data = np.random.random(size=1).astype('float32') + bias_data = np.random.random(size=1).astype('float32') + x = paddle.to_tensor(x) + rm = paddle.to_tensor(running_mean) + rv = paddle.to_tensor(running_variance) + w = paddle.to_tensor(weight_data) + b = paddle.to_tensor(bias_data) + batch_norm_out = paddle.nn.functional.batch_norm(x, rm, rv, w, b) + print(batch_norm_out.numpy()) diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/bilinear_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/bilinear_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6872bd957cd65779d834d2bda44774f9a04e2c36 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/bilinear_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_nn_functional_bilinear: + +bilinear +------------------------------- + + +.. py:function:: paddle.nn.functional.bilinear(x1, x2, weight, bias=None, name=None) + +该层对两个输入执行双线性张量积。 +详细的计算和返回值维度请参见 :ref:`cn_api_nn_Bilinear` + +参数 +::::::::: + - **x1** (int): 第一个输入的 `Tensor` ,数据类型为:float32、float64。 + - **x2** (int): 第二个输入的 `Tensor` ,数据类型为:float32、float64。 + - **weight** (Parameter) :本层的可学习参数。形状是 [out_features, in1_features, in2_features]。 + - **bias** (Parameter, 可选) : 本层的可学习偏置。形状是 [1, out_features]。默认值为None,如果被设置成None,则不会有bias加到output结果上。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为 None。 + +返回 +::::::::: +``Tensor``,一个形为 [batch_size, out_features] 的 2-D 张量。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy + import paddle.nn.functional as F + + paddle.disable_static() + x1 = numpy.random.random((5, 5)).astype('float32') + x2 = numpy.random.random((5, 4)).astype('float32') + w = numpy.random.random((1000, 5, 4)).astype('float32') + b = numpy.random.random((1, 1000)).astype('float32') + + result = F.bilinear(paddle.to_tensor(x1), paddle.to_tensor(x2), paddle.to_tensor(w), paddle.to_tensor(b)) # result shape [5, 1000] + + diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/binary_cross_entropy_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/binary_cross_entropy_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..11f77916759816ce1ddb451263e3bb42106ee49a --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/binary_cross_entropy_cn.rst @@ -0,0 +1,61 @@ +.. _cn_api_nn_functional_binary_cross_entropy: + +binary_cross_entropy +------------------------------- + +.. py:functional:: paddle.nn.functional.binary_cross_entropy(input, label, weight=None, reduction='mean', name=None) + +该函数用于计算输入 ``input`` 和标签 ``label`` 之间的二值交叉熵损失值。二值交叉熵损失函数公式如下: + +当 `weight` 不为空时,公式为: + +.. math:: + Out = -1 * weight * (label * log(input) + (1 - label) * log(1 - input)) + +当 `weight` 为空时,公式为: + +.. math:: + Out = -1 * (label * log(input) + (1 - label) * log(1 - input)) + +当 `reduction` 为 `none` 时,直接返回最原始的 `Out` 结果。 + +当 `reduction` 为 `mean` 时,最终的输出结果为: + +.. math:: + Out = MEAN(Out) + +当 `reduction` 为 `sum` 时,最终的输出结果为: + +.. math:: + Out = SUM(Out) + + +.. note:: + 输入数据 ``input`` 一般是 ``sigmoid`` 的输出。因为是二分类,所以标签值 ``label`` 应该是0或者1。 + +参数 +::::::::: + - **input** (Tensor) - :math:`[N, *]` , 其中N是batch_size, `*` 是任意其他维度。输入数据 ``input`` 一般是 ``sigmoid`` 的输出。数据类型是float32、float64。 + - **label** (Tensor) - :math:`[N, *]` ,标签 ``label`` 的维度、数据类型与输入 ``input`` 相同。 + - **weight** (Tensor,可选) - 手动指定每个batch二值交叉熵的权重,如果指定的话,维度必须是一个batch的数据的维度。数据类型是float32, float64。默认值是:None。 + - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有: ``'none'``, ``'mean'``, ``'sum'`` 。默认为 ``'mean'``,计算 `BCELoss` 的均值;设置为 ``'sum'`` 时,计算 `BCELoss` 的总和;设置为 ``'none'`` 时,则返回bce_loss。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: + - 输出的结果Tensor。如果 :attr:`reduction` 是 ``'none'``, 则输出的维度为 :math:`[N, *]` , 与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``, 则输出的维度为 :math:`[1]` 。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + + paddle.disable_static() + input = paddle.to_tensor([0.5, 0.6, 0.7], dtype='float32') + label = paddle.to_tensor([1.0, 0.0, 1.0], dtype='float32') + output = F.binary_cross_entropy(input, label) + print(output.numpy()) # [0.65537095] + diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/binary_cross_entropy_with_logits_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/binary_cross_entropy_with_logits_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..18279319b1c0c6f9568e0af6e93bf52bb4cf0b22 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/binary_cross_entropy_with_logits_cn.rst @@ -0,0 +1,59 @@ +.. _cn_api_paddle_nn_functional_binary_cross_entropy_with_logits: + +binary_cross_entropy_with_logits +------------------------------- + +.. py:function:: paddle.nn.functional.binary_cross_entropy_with_logits(logit, label, weight=None, reduction='mean', pos_weight=None, name=None) + +该OP用于计算输入 `logit` 和标签 `label` 间的 `binary cross entropy with logits loss` 损失。 + +该OP结合了 `sigmoid` 操作和 :ref:`api_nn_loss_BCELoss` 操作。同时,我们也可以认为该OP是 ``sigmoid_cross_entrop_with_logits`` 和一些 `reduce` 操作的组合。 + +在每个类别独立的分类任务中,该OP可以计算按元素的概率误差。可以将其视为预测数据点的标签,其中标签不是互斥的。例如,一篇新闻文章可以同时关于政治,科技,体育或者同时不包含这些内容。 + +首先,该OP可通过下式计算损失函数: + +.. math:: + Out = -Labels * \log(\sigma(Logit)) - (1 - Labels) * \log(1 - \sigma(Logit)) + +其中 :math:`\sigma(Logit) = \frac{1}{1 + e^{-Logit}}` , 代入上方计算公式中: + +.. math:: + Out = Logit - Logit * Labels + \log(1 + e^{-Logit}) + +为了计算稳定性,防止当 :math:`Logit<0` 时, :math:`e^{-Logit}` 溢出,loss将采用以下公式计算: + +.. math:: + Out = \max(Logit, 0) - Logit * Labels + \log(1 + e^{-\|Logit\|}) + +然后,当 ``weight`` or ``pos_weight`` 不为None的时候,该算子会在输出Out上乘以相应的权重。张量 ``weight`` 给Batch中的每一条数据赋予不同权重,张量 ``pos_weight`` 给每一类的正例添加相应的权重。 + +最后,该算子会添加 `reduce` 操作到前面的输出Out上。当 `reduction` 为 `none` 时,直接返回最原始的 `Out` 结果。当 `reduction` 为 `mean` 时,返回输出的均值 :math:`Out = MEAN(Out)` 。当 `reduction` 为 `sum` 时,返回输出的求和 :math:`Out = SUM(Out)` 。 + +**注意: 因为是二分类任务,所以标签值应该是0或者1。 + +参数 +::::::::: + - **logit** (Tensor) - :math:`[N, *]` , 其中N是batch_size, `*` 是任意其他维度。输入数据 ``logit`` 一般是线性层的输出,不需要经过 ``sigmoid`` 层。数据类型是float32、float64。 + - **label** (Tensor) - :math:`[N, *]` ,标签 ``label`` 的维度、数据类型与输入 ``logit`` 相同。 + - **weight** (Tensor,可选) - 手动指定每个batch二值交叉熵的权重,如果指定的话,维度必须是一个batch的数据的维度。数据类型是float32, float64。默认值是:None。 + - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有: ``'none'``, ``'mean'``, ``'sum'`` 。默认为 ``'mean'``,计算 `BCELoss` 的均值;设置为 ``'sum'`` 时,计算 `BCELoss` 的总和;设置为 ``'none'`` 时,则返回原始loss。 + - **pos_weight** (Tensor,可选) - 手动指定正类的权重,必须是与类别数相等长度的向量。数据类型是float32, float64。默认值是:None。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: + - Tensor,输出的Tensor。如果 :attr:`reduction` 是 ``'none'``, 则输出的维度为 :math:`[N, *]` , 与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``, 则输出的维度为 :math:`[1]` 。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + paddle.disable_static() + logit = paddle.to_tensor([5.0, 1.0, 3.0], dtype="float32") + label = paddle.to_tensor([1.0, 0.0, 1.0], dtype="float32") + output = paddle.nn.functional.binary_cross_entropy_with_logits(logit, label) + print(output.numpy()) # [0.45618808] + diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/conv2d_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/conv2d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..aa03a61971f36611643c570e689bc254cb86e126 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/conv2d_cn.rst @@ -0,0 +1,106 @@ +conv2d +------------------------------- + +.. py:function:: paddle.nn.functional.conv2d(x, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, data_format="NCHW", name=None) + +该OP是二维卷积层(convolution2d layer),根据输入、卷积核、步长(stride)、填充(padding)、空洞大小(dilations)一组参数计算输出特征层大小。输入和输出是NCHW或NHWC格式,其中N是批尺寸,C是通道数,H是特征高度,W是特征宽度。卷积核是MCHW格式,M是输出图像通道数,C是输入图像通道数,H是卷积核高度,W是卷积核宽度。如果组数(groups)大于1,C等于输入图像通道数除以组数的结果。详情请参考UFLDL's : `卷积 `_ 。如果bias_attr不为False,卷积计算会添加偏置项。如果指定了激活函数类型,相应的激活函数会作用在最终结果上。 + +对每个输入X,有等式: + +.. math:: + + Out = \sigma \left ( W * X + b \right ) + +其中: + - :math:`X` :输入值,NCHW或NHWC格式的4-D Tensor + - :math:`W` :卷积核值,MCHW格式的4-D Tensor + - :math:`*` :卷积操作 + - :math:`b` :偏置值,2-D Tensor,形状为 ``[M,1]`` + - :math:`\sigma` :激活函数 + - :math:`Out` :输出值,NCHW或NHWC格式的4-D Tensor, 和 ``X`` 的形状可能不同 + +**示例** + +- 输入: + + 输入形状::math:`(N,C_{in},H_{in},W_{in})` + + 卷积核形状: :math:`(C_{out},C_{in},H_{f},W_{f})` + +- 输出: + + 输出形状: :math:`(N,C_{out},H_{out},W_{out})` + +其中 + +.. math:: + + H_{out} &= \frac{\left ( H_{in} + padding\_height\_top + padding\_height\_bottom-\left ( dilation[0]*\left ( H_{f}-1 \right )+1 \right ) \right )}{stride[0]}+1 + + W_{out} &= \frac{\left ( W_{in} + padding\_width\_left + padding\_width\_right -\left ( dilation[1]*\left ( W_{f}-1 \right )+1 \right ) \right )}{stride[1]}+1 + +如果 ``padding`` = "SAME": + +.. math:: + H_{out} = \frac{(H_{in} + stride[0] - 1)}{stride[0]} + +.. math:: + W_{out} = \frac{(W_{in} + stride[1] - 1)}{stride[1]} + +如果 ``padding`` = "VALID": + +.. math:: + H_{out} = \frac{\left ( H_{in} -\left ( dilation[0]*\left ( H_{f}-1 \right )+1 \right ) \right )}{stride[0]}+1 + + W_{out} = \frac{\left ( W_{in} -\left ( dilation[1]*\left ( W_{f}-1 \right )+1 \right ) \right )}{stride[1]}+1 + + +参数: + - **x** (Tensor) - 输入是形状为 :math:`[N, C, H, W]` 或 :math:`[N, H, W, C]` 的4-D Tensor,N是批尺寸,C是通道数,H是特征高度,W是特征宽度,数据类型为float16, float32或float64。 + - **weight** (Tensor)) - 形状为 :math:`[M, C/g, kH, kW]` 的卷积核。 M是输出通道数, g是分组的个数,kH是卷积核的高度,kW是卷积核的宽度。 + - **bias** (int|list|tuple) - 偏置项,形状为: :math:`[M,]` 。 + - **stride** (int|list|tuple,可选) - 步长大小。卷积核和输入进行卷积计算时滑动的步长。如果它是一个列表或元组,则必须包含两个整型数:(stride_height,stride_width)。若为一个整数,stride_height = stride_width = stride。默认值:1。 + - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含4个二元组:当 ``data_format`` 为"NCHW"时为 [[0,0], [0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NHWC"时为[[0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含4个整数值:[padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含2个整数值:[padding_height, padding_width],此时padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_height = padding_width = padding。默认值:0。 + - **dilation** (int|list|tuple,可选) - 空洞大小。空洞卷积时会使用该参数,卷积核对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息。如果空洞大小为列表或元组,则必须包含两个整型数:(dilation_height,dilation_width)。若为一个整数,dilation_height = dilation_width = dilation。默认值:1。 + - **groups** (int,可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和卷积核分别根据通道数量平均分为n组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第n组卷积核和第n组输入进行卷积计算。默认值:1。 + - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **name** (str,可选) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值:None。 + +返回:4-D Tensor,数据类型与 ``x`` 一致。返回卷积的结果。 + +返回类型:Tensor。 + +抛出异常: + - ``ValueError`` - 如果 ``data_format`` 既不是"NCHW"也不是"NHWC"。 + - ``ValueError`` - 如果 ``input`` 的通道数未被明确定义。 + - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``padding`` 含有4个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ShapeError`` - 如果输入不是4-D Tensor。 + - ``ShapeError`` - 如果输入和卷积核的维度大小不相同。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + - ``ShapeError`` - 如果输出的通道数不能被 ``groups`` 整除。 + + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + x = np.random.randn(2, 3, 8, 8).astype(np.float32) + w = np.random.randn(6, 3, 3, 3).astype(np.float32) + + paddle.disable_static() + + x_var = paddle.to_tensor(x) + w_var = paddle.to_tensor(w) + y_var = F.conv2d(x_var, w_var) + y_np = y_var.numpy() + + print(y_np.shape) + + # (2, 6, 6, 6) \ No newline at end of file diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/conv3d_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/conv3d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..a96871b1ca9f0540cdadae0e1ba1ce90b5178aac --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/conv3d_cn.rst @@ -0,0 +1,83 @@ +conv3d +------------------------------- + +.. py:function:: paddle.nn.functional.conv3d(x, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, data_format="NCHW", name=None) + +该OP是三维卷积层(convolution3D layer),根据输入、卷积核、步长(stride)、填充(padding)、空洞大小(dilations)一组参数计算得到输出特征层大小。输入和输出是NCDHW或NDWHC格式,其中N是批尺寸,C是通道数,D是特征层深度,H是特征层高度,W是特征层宽度。三维卷积(Convlution3D)和二维卷积(Convlution2D)相似,但多了一维深度信息(depth)。如果bias_attr不为False,卷积计算会添加偏置项。 + +对每个输入X,有等式: + +.. math:: + + Out = \sigma \left ( W * X + b \right ) + +其中: + - :math:`X` :输入值,NCDHW或NDHWC格式的5-D Tensor + - :math:`W` :卷积核值,MCDHW格式的5-D Tensor + - :math:`*` :卷积操作 + - :math:`b` :偏置值,2-D Tensor,形为 ``[M,1]`` + - :math:`\sigma` :激活函数 + - :math:`Out` :输出值, NCDHW或NDHWC格式的5-D Tensor,和 ``X`` 的形状可能不同 + +**示例** + +- 输入: + + 输入形状: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` + + 卷积核形状: :math:`(C_{out}, C_{in}, D_f, H_f, W_f)` + +- 输出: + + 输出形状: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` + +参数: + - **x** (Tensor) - 输入是形状为 :math:`[N, C, H, W]` 或 :math:`[N, H, W, C]` 的4-D Tensor,N是批尺寸,C是通道数,H是特征高度,W是特征宽度,数据类型为float16, float32或float64。 + - **weight** (Tensor) - 形状为 :math:`[M, C/g, kH, kW]` 的卷积核(卷积核)。 M是输出通道数, g是分组的个数,kH是卷积核的高度,kW是卷积核的宽度。 + - **bias** (int|list|tuple) - 偏置项,形状为: :math:`[M,]` 。 + - **stride** (int|list|tuple,可选) - 步长大小。卷积核和输入进行卷积计算时滑动的步长。如果它是一个列表或元组,则必须包含两个整型数:(stride_height,stride_width)。若为一个整数,stride_height = stride_width = stride。默认值:1。 + - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含5个二元组:当 ``data_format`` 为"NCDHW"时为 [[0,0], [0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NDHWC"时为[[0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含6个整数值:[padding_depth_front, padding_depth_back, padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含3个整数值:[padding_depth, padding_height, padding_width],此时 padding_depth_front = padding_depth_back = padding_depth, padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_depth = padding_height = padding_width = padding。默认值:0。 + - **dilation** (int|list|tuple,可选) - 空洞大小。空洞卷积时会使用该参数,卷积核对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息。如果空洞大小为列表或元组,则必须包含两个整型数:(dilation_height,dilation_width)。若为一个整数,dilation_height = dilation_width = dilation。默认值:1。 + - **groups** (int,可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和卷积核分别根据通道数量平均分为n组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第n组卷积核和第n组输入进行卷积计算。默认值:1。 + - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **name** (str,可选) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值:None。 + +返回:5-D Tensor,数据类型与 ``input`` 一致。返回卷积计算的结果。 + +返回类型:Tensor。 + +抛出异常: + - ``ValueError`` - 如果 ``use_cudnn`` 不是bool值。 + - ``ValueError`` - 如果 ``data_format`` 既不是"NCDHW"也不是"NDHWC"。 + - ``ValueError`` - 如果 ``input`` 的通道数未被明确定义。 + - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``padding`` 含有5个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ShapeError`` - 如果输入不是5-D Tensor。 + - ``ShapeError`` - 如果输入和卷积核的维度大小不相同。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + - ``ShapeError`` - 如果输出的通道数不能被 ``groups`` 整除。 + + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.nn.functional as F + + x = np.random.randn(2, 3, 8, 8, 8).astype(np.float32) + w = np.random.randn(6, 3, 3, 3, 3).astype(np.float32) + + paddle.disable_static() + + x_var = paddle.to_tensor(x) + w_var = paddle.to_tensor(w) + y_var = F.conv3d(x_var, w_var) + + y_np = y_var.numpy() + print(y_np.shape) + + # (2, 6, 6, 6, 6) \ No newline at end of file diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/conv_transpose2d_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/conv_transpose2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b2b5b68eb5b2fd410d051b05af1fe5b124aec74b --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/conv_transpose2d_cn.rst @@ -0,0 +1,120 @@ + +conv_transpose2d +------------------------------- + + +.. py:function:: paddle.nn.functional.conv_transpose2d(x, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1, data_format='NCHW', output_size=None, name=None) + + + +二维转置卷积层(Convlution2D transpose layer) + +该层根据输入(input)、卷积核(kernel)和空洞大小(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCHW或NHWC格式,其中N为批尺寸,C为通道数(channel),H为特征层高度,W为特征层宽度。卷积核是MCHW格式,M是输出图像通道数,C是输入图像通道数,H是卷积核高度,W是卷积核宽度。如果组数大于1,C等于输入图像通道数除以组数的结果。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解转置卷积层细节,请参考下面的说明和 参考文献_ 。如果参数bias_attr不为False, 转置卷积计算会添加偏置项。如果act不为None,则转置卷积计算之后添加相应的激活函数。 + +.. _参考文献: https://arxiv.org/pdf/1603.07285.pdf + + +输入 :math:`X` 和输出 :math:`Out` 函数关系如下: + +.. math:: + Out=\sigma (W*X+b)\\ + +其中: + - :math:`X` : 输入,具有NCHW或NHWC格式的4-D Tensor + - :math:`W` : 卷积核,具有NCHW格式的4-D Tensor + - :math:`*` : 卷积计算(注意:转置卷积本质上的计算还是卷积) + - :math:`b` : 偏置(bias),2-D Tensor,形状为 ``[M,1]`` + - :math:`σ` : 激活函数 + - :math:`Out` : 输出值,NCHW或NHWC格式的4-D Tensor, 和 ``X`` 的形状可能不同 + +**示例** + +- 输入: + + 输入Tensor的形状: :math:`(N,C_{in}, H_{in}, W_{in})` + + 卷积核的形状 : :math:`(C_{in}, C_{out}, H_f, W_f)` + +- 输出: + + 输出Tensor的形状 : :math:`(N,C_{out}, H_{out}, W_{out})` + +其中 + +.. math:: + + & H'_{out} = (H_{in}-1)*strides[0] - pad\_height\_top - pad\_height\_bottom + dilations[0]*(H_f-1)+1\\ + & W'_{out} = (W_{in}-1)*strides[1]- pad\_width\_left - pad\_width\_right + dilations[1]*(W_f-1)+1 \\ + & H_{out}\in[H'_{out},H'_{out} + strides[0])\\ + & W_{out}\in[W'_{out},W'_{out} + strides[1])\\ + +如果 ``padding`` = "SAME": + +.. math:: + & H'_{out} = \frac{(H_{in} + stride[0] - 1)}{stride[0]}\\ + & W'_{out} = \frac{(W_{in} + stride[1] - 1)}{stride[1]}\\ + +如果 ``padding`` = "VALID": + +.. math:: + & H'_{out} = (H_{in}-1)*strides[0] + dilations[0]*(H_f-1)+1\\ + & W'_{out} = (W_{in}-1)*strides[1] + dilations[1]*(W_f-1)+1 \\ + +注意: + +如果output_size为None,则 :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}` ;否则,指定的output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[0]` 之间(不包含 :math:`H^\prime_{out} + strides[0]` ), 并且指定的output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[1]` 之间(不包含 :math:`W^\prime_{out} + strides[1]` )。 + +由于转置卷积可以当成是卷积的反向计算,而根据卷积的输入输出计算公式来说,不同大小的输入特征层可能对应着相同大小的输出特征层,所以对应到转置卷积来说,固定大小的输入特征层对应的输出特征层大小并不唯一。 + +如果指定了output_size, ``conv2d_transpose`` 可以自动计算卷积核的大小。 + +参数: + - **x** (Tensor) - 输入是形状为 :math:`[N, C, H, W]` 或 :math:`[N, H, W, C]` 的4-D Tensor,N是批尺寸,C是通道数,H是特征高度,W是特征宽度,数据类型为float16, float32或float64。 + - **weight** (Tensor) - 形状为 :math:`[C, M/g, kH, kW]` 的卷积核(卷积核)。 M是输出通道数, g是分组的个数,kH是卷积核的高度,kW是卷积核的宽度。 + - **bias** (int|list|tuple) - 偏置项,形状为: :math:`[M,]` 。 + - **stride** (int|list|tuple,可选) - 步长大小。如果 ``stride`` 为元组,则必须包含两个整型数,分别表示垂直和水平滑动步长。否则,表示垂直和水平滑动步长均为 ``stride`` 。默认值:1。 + - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含4个二元组:当 ``data_format`` 为"NCHW"时为 [[0,0], [0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NHWC"时为[[0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含4个整数值:[padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含2个整数值:[padding_height, padding_width],此时padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_height = padding_width = padding。默认值:0。 + - **output_padding** (int|list|tuple, optional): 输出形状上一侧额外添加的大小. 默认值: 0. + - **dilation** (int|list|tuple,可选) - 空洞大小。空洞卷积时会使用该参数,卷积核对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息。如果空洞大小为列表或元组,则必须包含两个整型数:(dilation_height,dilation_width)。若为一个整数,dilation_height = dilation_width = dilation。默认值:1。 + - **groups** (int,可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和卷积核分别根据通道数量平均分为n组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第n组卷积核和第n组输入进行卷积计算。默认值:1。 + - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **name** (str,可选) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值:None。 + + +返回:4-D Tensor,数据类型与 ``input`` 一致。如果未指定激活层,则返回转置卷积计算的结果,如果指定激活层,则返回转置卷积和激活计算之后的最终结果。 + +返回类型:Variable + +抛出异常: + - ``ValueError`` : 如果输入的shape、kernel_size、stride、padding和groups不匹配,抛出ValueError + - ``ValueError`` - 如果 ``data_format`` 既不是"NCHW"也不是"NHWC"。 + - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``padding`` 含有4个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ValueError`` - 如果 ``output_size`` 和 ``filter_size`` 同时为None。 + - ``ShapeError`` - 如果输入不是4-D Tensor。 + - ``ShapeError`` - 如果输入和卷积核的维度大小不相同。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + import paddle.nn.functional as F + + x = np.random.randn(2, 3, 8, 8).astype(np.float32) + w = np.random.randn(3, 6, 3, 3).astype(np.float32) + + paddle.disable_static() + x_var = paddle.to_tensor(x) + w_var = paddle.to_tensor(w) + y_var = F.conv_transpose2d(x_var, w_var) + y_np = y_var.numpy() + print(y_np.shape) + + # (2, 6, 10, 10) + + diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/conv_transpose3d_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/conv_transpose3d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4c78cc746ea70c3e2cfebb2c30da4c4c19b84899 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/conv_transpose3d_cn.rst @@ -0,0 +1,124 @@ +conv_transpose3d +------------------------------- + + +.. py:function:: paddle.nn.functional.conv_transpose3d(x, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1, data_format='NCHW', output_size=None, name=None) + + + + +三维转置卷积层(Convlution3d transpose layer) + +该层根据输入(input)、卷积核(kernel)和卷积核空洞大小(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCDHW或者NDHWC格式。其中N为批尺寸,C为通道数(channel),D为特征深度,H为特征层高度,W为特征层宽度。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解卷积转置层细节,请参考下面的说明和 参考文献_ 。如果参数bias_attr不为False, 转置卷积计算会添加偏置项。 + +.. _参考文献: http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf + +输入 :math:`X` 和输出 :math:`Out` 函数关系如下: + +.. math:: + \\Out=\sigma (W*X+b)\\ + +其中: + - :math:`X` : 输入,具有NCDHW或NDHWC格式的5-D Tensor + - :math:`W` : 卷积核,具有NCDHW格式的5-D Tensor + - :math:`*` : 卷积操作(注意:转置卷积本质上的计算还是卷积) + - :math:`b` : 偏置(bias),2-D Tensor,形状为 ``[M,1]`` + - :math:`σ` : 激活函数 + - :math:`Out` : 输出值,NCDHW或NDHWC格式的5-D Tensor,和 ``X`` 的形状可能不同 + +**示例** + +输入: + + 输入的shape::math:`(N,C_{in}, D_{in}, H_{in}, W_{in})` + + 卷积核的shape::math:`(C_{in}, C_{out}, D_f, H_f, W_f)` + +输出: + + 输出的shape::math:`(N,C_{out}, D_{out}, H_{out}, W_{out})` + + +其中: + +.. math:: + + & D'_{out}=(D_{in}-1)*strides[0] - pad\_depth\_front - pad\_depth\_back + dilations[0]*(D_f-1)+1\\ + & H'_{out}=(H_{in}-1)*strides[1] - pad\_height\_top - pad\_height\_bottom + dilations[1]*(H_f-1)+1\\ + & W'_{out}=(W_{in}-1)*strides[2] - pad\_width\_left - pad\_width\_right + dilations[2]*(W_f-1)+1\\ + & D_{out}\in[D'_{out},D'_{out} + strides[0])\\ + & H_{out}\in[H'_{out},H'_{out} + strides[1])\\ + & W_{out}\in[W'_{out},W'_{out} + strides[2])\\ + +如果 ``padding`` = "SAME": + +.. math:: + D'_{out} = \frac{(D_{in} + stride[0] - 1)}{stride[0]}\\ + H'_{out} = \frac{(H_{in} + stride[1] - 1)}{stride[1]}\\ + W'_{out} = \frac{(W_{in} + stride[2] - 1)}{stride[2]}\\ + +如果 ``padding`` = "VALID": + +.. math:: + D'_{out}=(D_{in}-1)*strides[0] + dilations[0]*(D_f-1)+1\\ + H'_{out}=(H_{in}-1)*strides[1] + dilations[1]*(H_f-1)+1\\ + W'_{out}=(W_{in}-1)*strides[2] + dilations[2]*(W_f-1)+1\\ + +注意: + +如果output_size为None,则 :math:`D_{out}` = :math:`D^\prime_{out}` , :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}` ;否则,指定的output_size_depth(输出特征层的深度) :math:`D_{out}` 应当介于 :math:`D^\prime_{out}` 和 :math:`D^\prime_{out} + strides[0]` 之间(不包含 :math:`D^\prime_{out} + strides[0]` ),指定的output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[1]` 之间(不包含 :math:`H^\prime_{out} + strides[1]` ), 并且指定的output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[2]` 之间(不包含 :math:`W^\prime_{out} + strides[2]` )。 + +由于转置卷积可以当成是卷积的反向计算,而根据卷积的输入输出计算公式来说,不同大小的输入特征层可能对应着相同大小的输出特征层,所以对应到转置卷积来说,固定大小的输入特征层对应的输出特征层大小并不唯一。 + +如果指定了output_size, ``conv_transpose3d`` 可以自动计算卷积核的大小。 + +参数: + - **x** (Tensor) - 形状为 :math:`[N, C, D, H, W]` 或 :math:`[N, D, H, W, C]` 的5-D Tensor,N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度,数据类型:float32或float64。 + - **weight** (Tensor) - 形状为 :math:`[C, M/g, kD, kH, kW]` 的卷积核。 M是输出通道数, g是分组的个数,kD是卷积核的深度,kH是卷积核的高度,kW是卷积核的宽度。 + - **bias** (int|list|tuple) - 偏置项,形状为: :math:`[M,]` 。 + - **stride** (int|list|tuple,可选) - 步长大小。如果 ``stride`` 为元组或列表,则必须包含三个整型数,分别表示深度,垂直和水平滑动步长。否则,表示深度,垂直和水平滑动步长均为 ``stride`` 。默认值:1。 + - **padding** (int|list|tuple|str,可选) - 填充padding大小。padding参数在输入特征层每边添加 ``dilation * (kernel_size - 1) - padding`` 个0。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含5个二元组:当 ``data_format`` 为"NCDHW"时为 [[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 ``data_format`` 为"NDHWC"时为[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]];(2)包含6个整数值:[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right];(3)包含3个整数值:[pad_depth, pad_height, pad_width],此时 pad_depth_front = pad_depth_back = pad_depth, pad_height_top = pad_height_bottom = pad_height, pad_width_left = pad_width_right = pad_width。若为一个整数,pad_depth = pad_height = pad_width = padding。默认值:0。 + - **output_padding** (int|list|tuple, optional): 输出形状上一侧额外添加的大小. 默认值: 0. + - **dilation** (int|list|tuple,可选) - 空洞大小。空洞卷积时会使用该参数,卷积核对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息。如果空洞大小为列表或元组,则必须包含两个整型数:(dilation_height,dilation_width)。若为一个整数,dilation_height = dilation_width = dilation。默认值:1。 + - **groups** (int,可选) - 三维转置卷积层的组数。从Alex Krizhevsky的CNN Deep论文中的群卷积中受到启发,当group=2时,输入和卷积核分别根据通道数量平均分为两组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算。默认:group = 1。 + - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **name** (str,可选) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值:None。 + + +返回:5-D Tensor,数据类型与 ``input`` 一致。如果未指定激活层,则返回转置卷积计算的结果,如果指定激活层,则返回转置卷积和激活计算之后的最终结果。 + +返回类型:Tensor + +抛出异常: + - ``ValueError`` - 如果输入的shape、kernel_size、stride、padding和groups不匹配。 + - ``ValueError`` - 如果 ``data_format`` 既不是"NCDHW"也不是"NDHWC"。 + - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``padding`` 含有5个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ValueError`` - 如果 ``output_size`` 和 ``filter_size`` 同时为None。 + - ``ShapeError`` - 如果输入不是5-D Tensor。 + - ``ShapeError`` - 如果输入和卷积核的维度大小不相同。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + +**代码示例** + +.. code-block:: python + + import numpy as np + + import paddle + import paddle.nn.functional as F + + x = np.random.randn(2, 3, 8, 8, 8).astype(np.float32) + w = np.random.randn(3, 6, 3, 3, 3).astype(np.float32) + + paddle.disable_static() + + x_var = paddle.to_tensor(x) + w_var = paddle.to_tensor(w) + y_var = F.conv_transpose3d(x_var, w_var) + y_np = y_var.numpy() + print(y_np.shape) + + # (2, 6, 10, 10, 10) diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/cross_entropy_loss_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/cross_entropy_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3b2bbf0311e59ad86f3279dfb9508198d4c88b77 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/cross_entropy_loss_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_nn_functional_cross_entropy_loss: + +cross_entropy_loss +------------------------------- +.. py:function:: paddle.nn.functional.cross_entropy_loss(input, label, weight=None, ignore_index=-100, reduction='mean') + +该接口计算输入input和标签label间的交叉熵损失 ,它结合了 `LogSoftmax` 和 `NLLLoss` 的计算,可用于训练一个 `n` 类分类器。 + +如果提供 `weight` 参数的话,它是一个 `1-D` 的tensor, 每个值对应每个类别的权重。 +该损失函数的数学计算公式如下: + + .. math:: + loss_j = -\text{input[class]} + + \log\left(\sum_{i=0}^{K}\exp(\text{input}_i)\right), j = 1,..., K + +当 `weight` 不为 `none` 时,损失函数的数学计算公式为: + + .. math:: + loss_j = \text{weight[class]}(-\text{input[class]} + + \log\left(\sum_{i=0}^{K}\exp(\text{input}_i)\right)), j = 1,..., K + + +参数 +::::::::: + - **input** (Tensor): - 输入 `Tensor` ,数据类型为float32或float64。其形状为 :math:`[N, C]` , 其中 `C` 为类别数。对于多维度的情形下,它的形状为 :math:`[N, C, d_1, d_2, ..., d_k]` ,k >= 1。 + - **label** (Tensor): - 输入input对应的标签值,数据类型为int64。其形状为 :math:`[N]` ,每个元素符合条件:0 <= label[i] <= C-1。对于多维度的情形下,它的形状为 :math:`[N, d_1, d_2, ..., d_k]` ,k >= 1。 + - **weight** (Tensor, 可选): - 指定每个类别的权重。其默认为 `None` 。如果提供该参数的话,维度必须为 `C` (类别数)。数据类型为float32或float64。 + - **ignore_index** (int64, 可选): - 指定一个忽略的标签值,此标签值不参与计算。默认值为-100。数据类型为int64。 + - **reduction** (str, 可选): - 指定应用于输出结果的计算方式,数据类型为string,可选值有: `none` , `mean` , `sum` 。默认为 `mean` ,计算 `mini-batch` loss均值。设置为 `sum` 时,计算 `mini-batch` loss的总和。设置为 `none` 时,则返回loss Tensor。 + +返回 +::::::::: + `Tensor` , 返回计算 `cross_entropy_loss` 交叉熵后的损失值。 + + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + paddle.disable_static() + input_data = np.random.random([5, 100]).astype("float64") + label_data = np.random.randint(0, 100, size=(5)).astype(np.int64) + weight_data = np.random.random([100]).astype("float64") + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) + weight = paddle.to_tensor(weight_data) + loss = paddle.nn.functional.cross_entropy(input=input, label=label, weight=weight) + print(loss.numpy()) + diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/ctc_loss_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/ctc_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2722525a61feefc4ea1bb3263e7eec0f88462796 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/ctc_loss_cn.rst @@ -0,0 +1,80 @@ +ctc_loss +------------------------------- + +.. py:function:: paddle.nn.functional.ctc_loss(log_probs, labels, input_lengths, label_lengths, blank=0, reduction='mean') + +该接口用于计算 CTC loss。该接口的底层调用了第三方 baidu-research::warp-ctc 的实现。 +也可以叫做 softmax with CTC,因为 Warp-CTC 库中插入了 softmax 激活函数来对输入的值进行归一化。 + +参数 +::::::::: + - **log_probs** (Tensor): - 经过 padding 的概率序列,其 shape 必须是 [max_logit_length, batch_size, num_classes + 1]。其中 max_logit_length 是最长输入序列的长度。该输入不需要经过 softmax 操作,因为该 OP 的内部对 input 做了 softmax 操作。数据类型仅支持float32。 + - **labels** (Tensor): - 经过 padding 的标签序列,其 shape 为 [batch_size, max_label_length],其中 max_label_length 是最长的 label 序列的长度。数据类型支持int32。 + - **input_lengths** (Tensor): - 表示输入 ``log_probs`` 数据中每个序列的长度,shape为 [batch_size] 。数据类型支持int64。 + - **label_lengths** (Tensor): - 表示 label 中每个序列的长度,shape为 [batch_size] 。数据类型支持int64。 + - **blank** (int,可选): - 空格标记的 ID 值,其取值范围为 [0,num_classes+1) 。数据类型支持int32。默认值为0。 + - **reduction** (string,可选): - 指定应用于输出结果的计算方式,可选值有: ``'none'``, ``'mean'``, ``'sum'``。设置为 ``'mean'`` 时,对 loss 值除以 label_lengths,并返回所得商的均值;设置为 ``'sum'`` 时,返回 loss 值的总和;设置为 ``'none'`` 时,则直接返回输出的 loss 值。默认值为 ``'mean'``。 + +返回 +::::::::: +``Tensor``,输入 ``log_probs`` 和标签 ``labels`` 间的 `ctc loss`。如果 :attr:`reduction` 是 ``'none'``, 则输出 loss 的维度为 [batch_size]。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出Loss的维度为 [1]。数据类型与输入 ``log_probs`` 一致。 + + +代码示例 +::::::::: + +.. code-block:: python + + # declarative mode + import paddle.nn.functional as F + import numpy as np + import paddle + + # length of the longest logit sequence + max_seq_length = 4 + #length of the longest label sequence + max_label_length = 3 + # number of logit sequences + batch_size = 2 + # class num + class_num = 3 + + np.random.seed(1) + log_probs = np.array([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04], + [3.02332580e-01, 1.46755889e-01, 9.23385918e-02]], + + [[1.86260208e-01, 3.45560730e-01, 3.96767467e-01], + [5.38816750e-01, 4.19194520e-01, 6.85219526e-01]], + + [[2.04452246e-01, 8.78117442e-01, 2.73875929e-02], + [6.70467496e-01, 4.17304814e-01, 5.58689833e-01]], + + [[1.40386939e-01, 1.98101491e-01, 8.00744593e-01], + [9.68261600e-01, 3.13424170e-01, 6.92322612e-01]], + + [[8.76389146e-01, 8.94606650e-01, 8.50442126e-02], + [3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]]).astype("float32") + labels = np.array([[1, 2, 2], + [1, 2, 2]]).astype("int32") + input_lengths = np.array([5, 5]).astype("int64") + label_lengths = np.array([3, 3]).astype("int64") + + paddle.disable_static() + log_probs = paddle.to_tensor(log_probs) + labels = paddle.to_tensor(labels) + input_lengths = paddle.to_tensor(input_lengths) + label_lengths = paddle.to_tensor(label_lengths) + + loss = F.ctc_loss(log_probs, labels, + input_lengths, + label_lengths, + blank=0, + reduction='none') + print(loss.numpy()) #[3.9179852 2.9076521] + + loss = F.ctc_loss(log_probs, labels, + input_lengths, + label_lengths, + blank=0, + reduction='mean') + print(loss.numpy()) #[1.1376063] \ No newline at end of file diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/dropout2d_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/dropout2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..35c61c137a4842bc17f7c43eea4eb14d3a595e81 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/dropout2d_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_nn_functional_dropout2d: + +dropout2d +------------------------------- + +.. py:function:: paddle.nn.functional.dropout2d(x, p=0.5, training=True, name=None) + +该算子根据丢弃概率 `p` ,在训练过程中随机将某些通道特征图置0(对一个形状为 `NCHW` 的4维张量,通道特征图指的是其中的形状为 `HW` 的2维特征图)。 + +.. note:: + 该op基于 ``paddle.nn.functional.dropout`` 实现,如您想了解更多,请参见 :ref:`cn_api_nn_functional_dropout` 。 + +参数 +::::::::: + - **x** (Tensor): 形状为[N, C, H, W]或[N, H, W, C]的4D `Tensor` ,数据类型为float32或float64。 + - **p** (float): 将输入通道置0的概率,即丢弃概率。默认: 0.5。 + - **training** (bool): 标记是否为训练阶段。 默认: True。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: +经过dropout2d之后的结果,与输入x形状相同的 `Tensor` 。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + x = np.random.random(size=(2, 3, 4, 5)).astype('float32') + x = paddle.to_tensor(x) + y_train = paddle.nn.functional.dropout2d(x) #train + y_test = paddle.nn.functional.dropout2d(x, training=False) + for i in range(2): + for j in range(3): + print(x.numpy()[i,j,:,:]) + print(y_train.numpy()[i,j,:,:]) + print(y_test.numpy()[i,j,:,:]) diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/dropout3d_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/dropout3d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0fc0ab0ee6a5cf37b81319673b9db8c1c131081c --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/dropout3d_cn.rst @@ -0,0 +1,39 @@ +.. _cn_api_nn_functional_dropout3d: + +dropout3d +------------------------------- + +.. py:function:: paddle.nn.functional.dropout3d(x, p=0.5, training=True, name=None) + +该算子根据丢弃概率 `p` ,在训练过程中随机将某些通道特征图置0(对一个形状为 `NCDHW` 的5维张量,通道指的是其中的形状为 `DHW` 的3维特征图)。 + +.. note:: + 该op基于 ``paddle.nn.functional.dropout`` 实现,如您想了解更多,请参见 :ref:`cn_api_nn_functional_dropout` 。 + +参数 +::::::::: + - **x** (Tensor): 形状为[N, C, D, H, W]或[N, D, H, W, C]的5D `Tensor` ,数据类型为float32或float64。 + - **p** (float): 将输入通道置0的概率,即丢弃概率。默认: 0.5。 + - **training** (bool): 标记是否为训练阶段。 默认: True。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: +经过dropout3d之后的结果,与输入x形状相同的 `Tensor` 。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + x = np.random.random(size=(2, 3, 4, 5, 6)).astype('float32') + x = paddle.to_tensor(x) + y_train = paddle.nn.functional.dropout3d(x) #train + y_test = paddle.nn.functional.dropout3d(x, training=False) + print(x.numpy()[0,0,:,:,:]) + print(y_train.numpy()[0,0,:,:,:]) + print(y_test.numpy()[0,0,:,:,:]) diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/dropout_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/dropout_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a349e66aeb5471ffe5810e2c5fe480d8f736515e --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/dropout_cn.rst @@ -0,0 +1,134 @@ +.. _cn_api_nn_functional_dropout: + +dropout +------------------------------- + +.. py:function:: paddle.nn.functional.dropout(x, p=0.5, axis=None, training=True, mode="upscale_in_train”, name=None) + +Dropout是一种正则化手段,该算子根据给定的丢弃概率 `p` ,在训练过程中随机将一些神经元输出设置为0,通过阻止神经元节点间的相关性来减少过拟合。 + +参数 +::::::::: + - **x** (Tensor): 输入的多维 `Tensor` ,数据类型为:float32、float64。 + - **p** (float): 将输入节点置0的概率,即丢弃概率。默认: 0.5。 + - **axis** (int|list): 指定对输入 `Tensor` 进行dropout操作的轴。默认: None。 + - **training** (bool): 标记是否为训练阶段。 默认: True。 + - **mode** (str): 丢弃单元的方式,有两种'upscale_in_train'和'downscale_in_infer',默认: 'upscale_in_train'。计算方法如下: + + 1. upscale_in_train, 在训练时增大输出结果。 + + - train: out = input * mask / ( 1.0 - p ) + - inference: out = input + + 2. downscale_in_infer, 在预测时减小输出结果 + + - train: out = input * mask + - inference: out = input * (1.0 - p) + + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: +经过dropout之后的结果,与输入x形状相同的 `Tensor` 。 + +使用示例1 +::::::::: +axis参数的默认值为None。当 ``axis=None`` 时,dropout的功能为: 对输入张量x中的任意元素,以丢弃概率p随机将一些元素输出置0。这是我们最常见的dropout用法。 + + - 下面以一个示例来解释它的实现逻辑,同时展示其它参数的含义。 + +.. code-block:: text + + 假定x是形状为2*3的2维张量: + [[1 2 3] + [4 5 6]] + 在对x做dropout时,程序会先生成一个和x相同形状的mask张量,mask中每个元素的值为0或1。 + 每个元素的具体值,则是依据丢弃概率从伯努利分布中随机采样得到。 + 比如,我们可能得到下面这样一个2*3的mask: + [[0 1 0] + [1 0 1]] + 将输入x和生成的mask点积,就得到了随机丢弃部分元素之后的结果: + [[0 2 0] + [4 0 6]] + 假定dropout的概率使用默认值,即 ``p=0.5`` ,若mode参数使用默认值,即 ``mode='upscale_in_train'`` , + 则在训练阶段,最终增大后的结果为: + [[0 4 0 ] + [8 0 12]] + 在测试阶段,输出跟输入一致: + [[1 2 3] + [4 5 6]] + 若参数mode设置为'downscale_in_infer',则训练阶段的输出为: + [[0 2 0] + [4 0 6]] + 在测试阶段,缩小后的输出为: + [[0.5 1. 1.5] + [2. 2.5 3. ]] + +使用示例2 +::::::::: +若参数axis不为None,dropout的功能为:以一定的概率从图像特征或语音序列中丢弃掉整个通道。 + + - axis应设置为: ``[0,1,...,ndim(x)-1]`` 的子集(ndim(x)为输入x的维度),例如: + + - 若x的维度为2,参数axis可能的取值有4种: ``None``, ``[0]``, ``[1]``, ``[0,1]`` + - 若x的维度为3,参数axis可能的取值有8种: ``None``, ``[0]``, ``[1]``, ``[2]``, ``[0,1]``, ``[0,2]``, ``[1,2]``, ``[0,1,2]`` + + - 下面以维度为2的输入张量展示axis参数的用法: + +.. code-block:: text + + 假定x是形状为2*3的2维Tensor: + [[1 2 3] + [4 5 6]] + (1) 若 ``axis=[0]`` , 则表示只在第0个维度做dropout。这时生成mask的形状为2*1。 + 例如,我们可能会得到这样的mask: + [[1] + [0]] + 这个2*1的mask在和x做点积的时候,会首先广播成一个2*3的矩阵: + [[1 1 1] + [0 0 0]] + 点积所得的结果为: + [[1 2 3] + [0 0 0]] + 之后依据其它参数的设置,得到最终的输出结果。 + + (2) 若 ``axis=[1]`` ,则表示只在第1个维度做dropout。这时生成的mask形状为1*3。 + 例如,我们可能会得到这样的mask: + [[1 0 1]] + 这个1*3的mask在和x做点积的时候,会首先广播成一个2*3的矩阵: + [[1 0 1] + [1 0 1]] + 点积所得结果为: + [[1 0 3] + [4 0 6]] + (3) 若 ``axis=[0, 1]`` ,则表示在第0维和第1维上做dropout。此时与默认设置 ``axis=None`` 的作用一致。 + +若输入x为4维张量,形状为 `NCHW` , 当设置 ``axis=[0,1]`` 时,则只会在通道 `N` 和 `C` 上做dropout,通道 `H` 和 `W` 的元素是绑定在一起的,即: ``paddle.nn.functional.dropout(x, p, axis=[0,1])`` , 此时对4维张量中的某个2维特征图(形状 `HW` ),或者全部置0,或者全部保留,这便是dropout2d的实现。详情参考 :ref:`cn_api_nn_functional_dropout2d` 。 + +类似的,若输入x为5维张量,形状为 `NCDHW` , 当设置 ``axis=[0,1]`` 时,便可实现dropout3d。详情参考 :ref:`cn_api_nn_functional_dropout3d` 。 + +.. note:: + 关于广播(broadcasting)机制,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + x = np.array([[1,2,3], [4,5,6]]).astype('float32') + x = paddle.to_tensor(x) + y_train = paddle.nn.functional.dropout(x, 0.5) + y_test = paddle.nn.functional.dropout(x, 0.5, training=False) #test + y_0 = paddle.nn.functional.dropout(x, axis=0) + y_1 = paddle.nn.functional.dropout(x, axis=1) + y_01 = paddle.nn.functional.dropout(x, axis=[0,1]) + print(x.numpy()) + print(y_train.numpy()) + print(y_test.numpy()) + print(y_0.numpy()) + print(y_1.numpy()) + print(y_01.numpy()) diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/instance_norm_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/instance_norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..97a49237f9605b595464027b8e137408fd645483 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/instance_norm_cn.rst @@ -0,0 +1,47 @@ +.. cn_api_nn_functional_instance_norm: + +instance_norm +------------------------------- + +.. py:class:: paddle.nn.functional.instance_norm(x, running_mean, running_var, weight, bias, training=False, epsilon=1e-05, momentum=0.9, use_input_stats=True, data_format='NCHW', name=None): + +推荐使用nn.InstanceNorm1d,nn.InstanceNorm2d, nn.InstanceNorm3d,由内部调用此方法。 + +详情见 :ref:`cn_api_nn_InstanceNorm1d` 。 + +参数: + - **x** (int) - 输入,数据类型为float32, float64。 + - **running_mean** (Tensor) - 均值的Tensor。 + - **running_var** (Tensor) - 方差的Tensor。 + - **weight** (Tensor) - 权重的Tensor。 + - **bias** (Tensor) - 偏置的Tensor。 + - **epsilon** (float, 可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 + - **momentum** (float, 可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var`` 。默认值:0.9。更新公式如上所示。 + - **use_input_stats** (bool, 可选) - 默认是True. + - **data_format** (string, 可选) - 指定输入数据格式,数据格式可以为“NC", "NCL", "NCHW" 或者"NCDHW"。默认值:"NCHW"。 + - **name** (string, 可选) – InstanceNorm的名称, 默认值为None。更多信息请参见 :ref:`api_guide_Name` 。 + +返回:无 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + x = np.random.seed(123) + x = np.random.random(size=(2, 1, 2, 3)).astype('float32') + running_mean = np.random.random(size=1).astype('float32') + running_variance = np.random.random(size=1).astype('float32') + weight_data = np.random.random(size=1).astype('float32') + bias_data = np.random.random(size=1).astype('float32') + x = paddle.to_tensor(x) + rm = paddle.to_tensor(running_mean) + rv = paddle.to_tensor(running_variance) + w = paddle.to_tensor(weight_data) + b = paddle.to_tensor(bias_data) + instance_norm_out = paddle.nn.functional.instance_norm(x, rm, rv, w, b) + print(instance_norm_out.numpy()) diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/kl_div_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/kl_div_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..87aeca4641aa449058405e393e9518ce1e98c9c8 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/kl_div_cn.rst @@ -0,0 +1,65 @@ +kl_div +------------------------------- + +.. py:function:: paddle.nn.functional.kl_div(input, label, reduction='mean', name=None) + +该算子计算输入(Input)和输入(Label)之间的Kullback-Leibler散度损失。注意其中输入(Input)应为对数概率值,输入(Label)应为概率值。 + +kL发散损失计算如下: + +.. math:: + + l(input, label) = label * (log(label) - input) + + +当 ``reduction`` 为 ``none`` 时,输出损失与输入(x)形状相同,各点的损失单独计算,不会对结果做reduction 。 + +当 ``reduction`` 为 ``mean`` 时,输出损失为[1]的形状,输出为所有损失的平均值。 + +当 ``reduction`` 为 ``sum`` 时,输出损失为[1]的形状,输出为所有损失的总和。 + +当 ``reduction`` 为 ``batchmean`` 时,输出损失为[N]的形状,N为批大小,输出为所有损失的总和除以批量大小。 + +参数: + - **input** (Tensor) - KL散度损失算子的输入张量。维度为[N, \*]的多维Tensor,其中N是批大小,\*表示任何数量的附加维度,数据类型为float32或float64。 + - **label** (Tensor) - KL散度损失算子的张量。与输入 ``input`` 的维度和数据类型一致的多维Tensor。 + - **reduction** (str,可选) - 要应用于输出的reduction类型,可用类型为‘none’ | ‘batchmean’ | ‘mean’ | ‘sum’,‘none’表示无reduction,‘batchmean’ 表示输出的总和除以批大小,‘mean’ 表示所有输出的平均值,‘sum’表示输出的总和。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值:None。 + +返回:Tensor KL散度损失。 + + +**代码示例:** + +.. code-block:: python + + import paddle + import numpy as np + import paddle.nn.functional as F + + paddle.disable_static() + + shape = (5, 20) + input = np.random.uniform(-10, 10, shape).astype('float32') + target = np.random.uniform(-10, 10, shape).astype('float32') + + # 'batchmean' reduction, loss shape will be [N] + pred_loss = F.kl_div(paddle.to_tensor(input), + paddle.to_tensor(target), reduction='batchmean') + # shape=[5] + + # 'mean' reduction, loss shape will be [1] + pred_loss = F.kl_div(paddle.to_tensor(input), + paddle.to_tensor(target), reduction='mean') + # shape=[1] + + # 'sum' reduction, loss shape will be [1] + pred_loss = F.kl_div(paddle.to_tensor(input), + paddle.to_tensor(target), reduction='sum') + # shape=[1] + + # 'none' reduction, loss shape is same with input shape + pred_loss = F.kl_div(paddle.to_tensor(input), + paddle.to_tensor(target), reduction='none') + # shape=[5, 20] + diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/l1_loss_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/l1_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7e2e408c23fde0f605b44f3b68a394fe74d35a0a --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/l1_loss_cn.rst @@ -0,0 +1,60 @@ +l1_loss +------------------------------- + +.. py:function:: paddle.nn.functional.l1_loss(input, label, reduction='mean', name=None) + +该接口计算输入 ``input`` 和标签 ``label`` 间的 `L1 loss` 损失。 + +该损失函数的数学计算公式如下: + +当 `reduction` 设置为 ``'none'`` 时, + + .. math:: + Out = \lvert input - label\rvert + +当 `reduction` 设置为 ``'mean'`` 时, + + .. math:: + Out = MEAN(\lvert input - label\rvert) + +当 `reduction` 设置为 ``'sum'`` 时, + + .. math:: + Out = SUM(\lvert input - label\rvert) + + +参数 +::::::::: + - **input** (Tensor): - 输入的Tensor,维度是[N, *], 其中N是batch size, `*` 是任意数量的额外维度。数据类型为:float32、float64、int32、int64。 + - **label** (Tensor): - 标签,维度是[N, *], 与 ``input`` 相同。数据类型为:float32、float64、int32、int64。 + - **reduction** (str, 可选): - 指定应用于输出结果的计算方式,可选值有: ``'none'``, ``'mean'``, ``'sum'`` 。默认为 ``'mean'``,计算 `L1Loss` 的均值;设置为 ``'sum'`` 时,计算 `L1Loss` 的总和;设置为 ``'none'`` 时,则返回 `L1Loss`。 + - **name** (str,可选): - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +::::::::: +``Tensor``, 输入 ``input`` 和标签 ``label`` 间的 `L1 loss` 损失。如果 `reduction` 是 ``'none'``, 则输出Loss的维度为 [N, *], 与输入 ``input`` 相同。如果 `reduction` 是 ``'mean'`` 或 ``'sum'``, 则输出Loss的维度为 [1]。 + + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + paddle.disable_static() + + input = paddle.to_tensor([[1.5, 0.8], [0.2, 1.3]]) + label = paddle.to_tensor([[1.7, 1], [0.4, 0.5]]) + + l1_loss = paddle.nn.functional.l1_loss(input, label) + print(l1_loss.numpy()) + # [0.35] + + l1_loss = paddle.nn.functional.l1_loss(input, label, reduction='none') + print(l1_loss.numpy()) + # [[0.20000005 0.19999999] + # [0.2 0.79999995]] + + l1_loss = paddle.nn.functional.l1_loss(input, label, reduction='sum') + print(l1_loss.numpy()) + # [1.4] diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/layer_norm_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/layer_norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e91822acf6299380ab132fe7d945ac3fc3875487 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/layer_norm_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_nn_functional_layer_norm: + +layer_norm +------------------------------- + +.. py:class:: paddle.nn.functional.layer_norm(x, normalized_shape, weight=None, bias=None, epsilon=1e-05, name=None): + +推荐使用nn.LayerNorm。 + +详情见 :ref:`cn_api_nn_LayerNorm` . + +参数: + - **x** (int) - 输入,数据类型为float32, float64。 + - **normalized_shape** (int|list|tuple) - 期望的输入是 :math:`[*, normalized_shape[0], normalized_shape[1], ..., normalized_shape[-1]]` ,如果是一个整数,会作用在最后一个维度。 + - **weight** (Tensor) - 权重的Tensor, 默认为None。 + - **bias** (Tensor) - 偏置的Tensor, 默认为None。 + - **epsilon** (float, 可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 + - **name** (string, 可选) – LayerNorm的名称, 默认值为None。更多信息请参见 :ref:`api_guide_Name` 。 + + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np.random.seed(123) + x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32') + x = paddle.to_tensor(x_data) + layer_norm_out = paddle.nn.functional.layer_norm(x, x.shape[1:]) + + print(layer_norm_out.numpy()) + diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/margin_ranking_loss_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/margin_ranking_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..febba0d453d55964da83b2ef54e47df456b147e9 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/margin_ranking_loss_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_nn_cn_margin_ranking_loss: + +margin_ranking_loss +------------------------------- + +.. py:function:: paddle.nn.functional.margin_ranking_loss(input, other, label, margin=0.0, reduction='mean', name=None) + +该算子计算输入input,other 和 标签label间的 `margin rank loss` 损失。该损失函数的数学计算公式如下: + + .. math:: + margin\_rank\_loss = max(0, -label * (input - other) + margin) + +当 `reduction` 设置为 ``'mean'`` 时, + + .. math:: + Out = MEAN(margin\_rank\_loss) + +当 `reduction` 设置为 ``'sum'`` 时, + + .. math:: + Out = SUM(margin\_rank\_loss) + +当 `reduction` 设置为 ``'none'`` 时,直接返回最原始的 `margin_rank_loss` 。 + +参数 +:::::::: + - **input** (Tensor):第一个输入的 `Tensor` ,数据类型为:float32、float64。 + - **other** (Tensor):第二个输入的 `Tensor` ,数据类型为:float32、float64。 + - **label** (Tensor):训练数据的标签,数据类型为:float32, float64。 + - **margin** (float,可选): - 用于加和的margin值,默认值为0。 + - **reduction** (string,可选): - 指定应用于输出结果的计算方式,可选值有: ``'none'`` 、 ``'mean'`` 、 ``'sum'`` 。如果设置为 ``'none'`` ,则直接返回 最原始的 ``margin_rank_loss`` 。如果设置为 ``'sum'`` ,则返回 ``margin_rank_loss`` 的总和。如果设置为 ``'mean'`` ,则返回 ``margin_rank_loss`` 的平均值。默认值为 ``'none'`` 。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::: +Tensor, 如果 :attr:`reduction` 为 ``'sum'`` 或者是 ``'mean'`` ,则形状为 :math:`[1]` ,否则shape和输入 `input` 保持一致 。数据类型与 ``input``、 ``other`` 相同。 + +代码示例 +:::::::: + +.. code-block:: python + + import paddle + paddle.disable_static() + + input = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32') + other = paddle.to_tensor([[2, 1], [2, 4]], dtype='float32') + label = paddle.to_tensor([[1, -1], [-1, -1]], dtype='float32') + loss = paddle.nn.functional.margin_ranking_loss(input, other, label) + print(loss.numpy()) # [0.75] diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/max_pool1d_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/max_pool1d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..3ddefaa2d5f1a0c5b010cb3e3894fd3c1154656d --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/max_pool1d_cn.rst @@ -0,0 +1,56 @@ +.. _cn_api_nn_functional_max_pool1d: + + +max_pool1d +------------------------------- + +.. py:function:: paddle.nn.functional.max_pool1d(x, kernel_size, stride=None, padding=0, return_indices=False, ceil_mode=False, name=None) + +该算子根据输入 `x` , `kernel_size` 等参数对一个输入Tensor计算1D的最大值池化。输入和输出都是3-D Tensor, +默认是以 `NCL` 格式表示的,其中 `N` 是 batch size, `C` 是通道数, `L` 是输入特征的长度。 + +.. note:: + 详细请参考对应的 `Class` 请参考: :ref:`cn_api_nn_MaxPool1d` 。 + + +参数 +::::::::: + - **x** (Tensor): 当前算子的输入, 其是一个形状为 `[N, C, L]` 的3-D Tensor。其中 `N` 是batch size, `C` 是通道数, `L` 是输入特征的长度。 其数据类型为float32或者float64。 + - **kernel_size** (int|list|tuple): 池化核的尺寸大小. 如果kernel_size为list或tuple类型, 其必须包含一个整数. + - **stride** (int|list|tuple): 池化操作步长. 如果stride为list或tuple类型, 其必须包含一个整数. + - **padding** (string|int|list|tuple): 池化补零的方式. 如果padding是一个字符串,则必须为 `SAME` 或者 `VALID` 。如果是turple或者list类型, 则应是 `[pad_left, pad_right]` 形式。如果padding是一个非0值,那么表示会在输入的两端都padding上同样长度的0。 + - **return_indices** (bool): 是否返回最大值的索引,默认为False。 + - **ceil_mode** (bool): 是否用ceil函数计算输出的height和width,如果设置为False, 则使用floor函数来计算,默认为False。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + + +返回 +::::::::: +``Tensor``, 输入 `x` 经过最大值池化计算得到的目标3-D Tensor,其数据类型与输入相同。 + + +抛出异常 +::::::::: + - ``ValueError`` - 如果 ``padding`` 是字符串但不是 "SAME" 和 "VALID" 。 + - ``ValueError`` - 如果 ``padding`` 是 "VALID" 但 `ceil_mode` 被设置为True。 + - ``ValueError`` - 如果 ``padding`` 是一个长度大于1的list或turple。 + - ``ShapeError`` - 如果输入x不是一个3-D Tensor。 + - ``ShapeError`` - 如果计算得到的输出形状小于等于0。 + + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + paddle.disable_static() + + data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) + pool_out = F.max_pool1d(data, kernel_size=2, stride=2, padding=0) + # pool_out shape: [1, 3, 16] + + pool_out, indices = F.max_pool1d(data, kernel_size=2, stride=2, padding=0, return_indices=True) + # pool_out shape: [1, 3, 16], indices shape: [1, 3, 16] diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/mse_loss_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/mse_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b5213836569abff9e4f78e04f603f1b727183467 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/mse_loss_cn.rst @@ -0,0 +1,66 @@ +mse_loss +------------------------------- + +.. py:function:: paddle.nn.functional.mse_loss(input, label, reduction='mean', name=None) + +该OP用于计算预测值和目标值的均方差误差。 + +对于预测值input和目标值label,公式为: + +当 `reduction` 设置为 ``'none'`` 时, + + .. math:: + Out = (input - label)^2 + +当 `reduction` 设置为 ``'mean'`` 时, + + .. math:: + Out = \operatorname{mean}((input - label)^2) + +当 `reduction` 设置为 ``'sum'`` 时, + + .. math:: + Out = \operatorname{sum}((input - label)^2) + + +参数: +::::::::: + - **input** (Tensor) - 预测值,维度为 :math:`[N_1, N_2, ..., N_k]` 的多维Tensor。数据类型为float32或float64。 + - **label** (Tensor) - 目标值,维度为 :math:`[N_1, N_2, ..., N_k]` 的多维Tensor。数据类型为float32或float64。 + +返回 +::::::::: +``Tensor``, 输入 ``input`` 和标签 ``label`` 间的 `mse loss` 损失。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + # static graph mode + paddle.enable_static() + mse_loss = paddle.nn.loss.MSELoss() + input = paddle.data(name="input", shape=[1]) + label = paddle.data(name="label", shape=[1]) + place = paddle.CPUPlace() + input_data = np.array([1.5]).astype("float32") + label_data = np.array([1.7]).astype("float32") + output = mse_loss(input,label) + exe = paddle.static.Executor(place) + exe.run(paddle.static.default_startup_program()) + output_data = exe.run( + paddle.static.default_main_program(), + feed={"input":input_data, "label":label_data}, + fetch_list=[output], + return_numpy=True) + print(output_data) + # [array([0.04000002], dtype=float32)] + # dynamic graph mode + paddle.disable_static() + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) + output = mse_loss(input, label) + print(output.numpy()) + # [0.04000002] + diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/nll_loss_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/nll_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4840c999342c5e17715105ea5963dfff7b89112d --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/nll_loss_cn.rst @@ -0,0 +1,45 @@ +.. _cn_api_nn_functional_nll_loss: + +nll_loss +------------------------------- +.. py:function:: paddle.nn.functional.nll_loss(input, label, weight=None, ignore_index=-100, reduction='mean', name=None) + +该接口返回 `negative log likelihood` 。可在 :ref:`cn_api_nn_loss_NLLLoss` 查看详情。 + +参数 +::::::::: + - **input** (Tensor): - 输入 `Tensor`, 其形状为 :math:`[N, C]` , 其中 `C` 为类别数。但是对于多维度的情形下,它的形状为 :math:`[N, C, d_1, d_2, ..., d_K]` 。数据类型为float32或float64。 + - **label** (Tensor): - 输入x对应的标签值。其形状为 :math:`[N,]` 或者 :math:`[N, d_1, d_2, ..., d_K]`, 数据类型为int64。 + - **weight** (Tensor, 可选): - 手动指定每个类别的权重。其默认为 `None` 。如果提供该参数的话,长度必须为 `num_classes` 。数据类型为float32或float64。 + - **ignore_index** (int64, 可选): - 指定一个忽略的标签值,此标签值不参与计算。默认值为-100。数据类型为int64。 + - **reduction** (str, 可选): - 指定应用于输出结果的计算方式,可选值有: `none`, `mean`, `sum` 。默认为 `mean` ,计算 `mini-batch` loss均值。设置为 `sum` 时,计算 `mini-batch` loss的总和。设置为 `none` 时,则返回loss Tensor。数据类型为string。 + - **name** (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: +`Tensor` ,返回存储表示 `negative log likelihood loss` 的损失值。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + from paddle.nn.functional import nll_loss + log_softmax = paddle.nn.LogSoftmax(axis=1) + + input_np = np.array([[0.88103855, 0.9908683 , 0.6226845 ], + [0.53331435, 0.07999352, 0.8549948 ], + [0.25879037, 0.39530203, 0.698465 ], + [0.73427284, 0.63575995, 0.18827209], + [0.05689114, 0.0862954 , 0.6325046 ]]).astype(np.float32) + label_np = np.array([0, 2, 1, 1, 0]).astype(np.int64) + + place = paddle.CPUPlace() + paddle.disable_static(place) + input = paddle.to_tensor(input_np) + log_out = log_softmax(input) + label = paddle.to_tensor(label_np) + result = nll_loss(log_out, label) + print(result.numpy()) # [1.0720209] diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/normalize_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/normalize_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..703a22ddef7b99fe18a9e8d52fb34747ed6cc106 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/normalize_cn.rst @@ -0,0 +1,59 @@ +normalize +------------------------------- + +.. py:function:: paddle.nn.functional.normalize(x, p=2, axis=1, epsilon=1e-12, name=None) + +该接口使用 :math:`L_p` 范数沿维度 ``axis`` 对 ``x`` 进行归一化。计算公式如下: + +.. math:: + + y = \frac{x}{ \max\left( \lvert \lvert x \rvert \rvert_p, epsilon\right) } + +.. math:: + \lvert \lvert x \rvert \rvert_p = \left(\sum_i {\lvert x_i\rvert^p} \right)^{1/p} + +其中 :math:`\sum_i{\lvert x_i\rvert^p}` 沿维度 ``axis`` 进行计算。 + + +参数 +::::::::: + - **x** (Tensor) - 输入可以是N-D Tensor。数据类型为:float32、float64。 + - **p** (float|int, 可选) - 范数公式中的指数值。默认值:2 + - **axis** (int, 可选)- 要进行归一化的轴。如果 ``x`` 是1-D Tensor,轴固定为0。如果 `axis < 0`,轴为 `x.ndim + axis`。-1表示最后一维。 + - **epsilon** (float,可选) - 添加到分母上的值以防止分母除0。默认值为1e-12。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +::::::::: +``Tensor``, 输出的形状和数据类型和 ``x`` 相同。 + +抛出异常: +::::::::: + - ``TypeError`` - 当参数 ``p`` 或者 ``axis`` 的类型不符合要求时。或者当参数 ``x`` 的类型或数据类型不符合要求时。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.nn.functional as F + + paddle.disable_static() + x = np.arange(6, dtype=np.float32).reshape(2,3) + x = paddle.to_tensor(x) + y = F.normalize(x) + print(y.numpy()) + # [[0. 0.4472136 0.8944272 ] + # [0.42426404 0.5656854 0.7071067 ]] + + y = F.normalize(x, p=1.5) + print(y.numpy()) + # [[0. 0.40862012 0.81724024] + # [0.35684016 0.4757869 0.5947336 ]] + + y = F.normalize(x, axis=0) + print(y.numpy()) + # [[0. 0.24253564 0.37139067] + # [1. 0.97014254 0.9284767 ]] diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/one_hot_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/one_hot_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9df99a99421004e31491c28c6223d3b0bfc3f752 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/one_hot_cn.rst @@ -0,0 +1,60 @@ +.. _cn_api_nn_functional_one_hot: + +one_hot +------------------------------- + +.. py:function:: paddle.nn.functional.one_hot(x, num_classes, name=None) +该OP将输入'x'中的每个id转换为一个one-hot向量,其长度为 ``num_classes`` ,该id对应的向量维度上的值为1,其余维度的值为0。 + +输出的Tensor的shape是在输入shape的最后一维后面添加了num_classes的维度。 + +- 示例1: + +.. code-block:: text + + 输入: + X.shape = [4] + X.data = [1, 1, 3, 0] + num_classes = 4 + + 输出: + Out.shape = [4, 4] + Out.data = [[0., 1., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 1.], + [1., 0., 0., 0.]] + +- 示例2: + +.. code-block:: text + + 输入: + X.shape = [4] + X.data = [1, 1, 5, 0] + num_classes = 4 + + 输出:抛出 Illegal value 的异常 + X中第2维的值是5,超过了num_classes,因此抛异常。 + + +参数: + - **x** (Tensor) - 维度为 :math:`[N_1, ..., N_n]` 的多维Tensor,维度至少1维。数据类型为int32或int64。 + - **num_classes** (int) - 用于定义一个one-hot向量的长度。若输入为词id,则 ``num_classes`` 通常取值为词典大小。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:Tensor,转换后的one_hot Tensor,数据类型为float32。 + +**代码示例**: + +.. code-block:: python + + import paddle + label = paddle.data(name="label", shape=[4], dtype="int64") + # label.shape = [4] + # label.data = [1, 1, 3, 0] + one_hot_label = paddle.nn.functional.one_hot(x=label, num_classes=4) + # one_hot_label.shape = [4, 4] + # one_hot_label.data = [[0., 1., 0., 0.], + # [0., 1., 0., 0.], + # [0., 0., 0., 1.], + # [1., 0., 0., 0.]] \ No newline at end of file diff --git a/doc/fluid/api_cn/nn_cn/functional_cn/sigmoid_cn.rst b/doc/fluid/api_cn/nn_cn/functional_cn/sigmoid_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d58730bb9732ac4edb8fc2bfeeab60ae3ea5fafd --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/functional_cn/sigmoid_cn.rst @@ -0,0 +1,33 @@ +.. _cn_api_nn_functional_sigmoid: + +sigmoid +------------------------------- + +.. py:function:: paddle.nn.functional.sigmoid(x, name=None) + +sigmoid 激活函数。 + + .. math:: + + output = \frac{1}{1 + e^{-x}} + +参数 +:::::::: + **x** (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64、int32、int64。 + **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +:::::::: + ``Tensor``, 经过 ``sigmoid`` 计算后的结果, 和输入 `x` 有一样的shape和数据类型。 + +代码示例 +::::::::: +.. code-block:: python + + import paddle + import paddle.nn.functional as F + + paddle.disable_static() + x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0]) + output = F.sigmoid(x) + print(output.numpy()) # [0.7310586, 0.880797, 0.95257413, 0.98201376] diff --git a/doc/fluid/api_cn/nn_cn/gather_tree_cn.rst b/doc/fluid/api_cn/nn_cn/gather_tree_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..529e087426b8d29654c2d17b936573441f4739dc --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/gather_tree_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_gather_tree: + +gather_tree +------------------------------- +:doc_source: paddle.fluid.layers.gather_tree + + diff --git a/doc/fluid/api_cn/nn_cn/gelu_cn.rst b/doc/fluid/api_cn/nn_cn/gelu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..586cd3677d7fddeeddccb47df01b46125dddba08 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/gelu_cn.rst @@ -0,0 +1,48 @@ +.. _cn_api_nn_cn_gelu: + +gelu +------------------------------- + +.. py:function:: paddle.nn.functional.gelu(x, approximate=False, name=None) + +gelu激活层(GELU Activation Operator) + +逐元素计算 gelu激活函数。更多细节请参考 `Gaussian Error Linear Units `_ 。 + +如果使用近似计算: + +.. math:: + gelu(x) = 0.5 * x * (1 + tanh(\sqrt{\frac{2}{\pi}} * (x + 0.044715x^{3}))) + +如果不使用近似计算: + +.. math:: + gelu(x) = 0.5 * x * (1 + erf(\frac{x}{\sqrt{2}})) + +其中,:math:`x` 为输入的 Tensor + +参数: +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - approximate (bool, 可选) - 是否使用近似计算,默认值为 False,表示不使用近似计算。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([[-1, 0.5],[1, 1.5]])) + out1 = F.gelu(x) # [-0.158655 0.345731 0.841345 1.39979] + out2 = F.gelu(x, True) # [-0.158808 0.345714 0.841192 1.39957] + diff --git a/doc/fluid/api_cn/nn_cn/generate_mask_labels_cn.rst b/doc/fluid/api_cn/nn_cn/generate_mask_labels_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..33ffeead7de1ac9473ec6bef011a3a468bd16b36 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/generate_mask_labels_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_generate_mask_labels: + +generate_mask_labels +------------------------------- +:doc_source: paddle.fluid.layers.generate_mask_labels + + diff --git a/doc/fluid/api_cn/nn_cn/generate_proposal_labels_cn.rst b/doc/fluid/api_cn/nn_cn/generate_proposal_labels_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e5ac9b376be27e33fce9398dc57185b3f5d50ff7 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/generate_proposal_labels_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_generate_proposal_labels: + +generate_proposal_labels +------------------------------- +:doc_source: paddle.fluid.layers.generate_proposal_labels + + diff --git a/doc/fluid/api_cn/nn_cn/generate_proposals_cn.rst b/doc/fluid/api_cn/nn_cn/generate_proposals_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8632332ed052f66a5c1541409aceb5226785c870 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/generate_proposals_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_generate_proposals: + +generate_proposals +------------------------------- +:doc_source: paddle.fluid.layers.generate_proposals + + diff --git a/doc/fluid/api_cn/nn_cn/grid_sampler_cn.rst b/doc/fluid/api_cn/nn_cn/grid_sampler_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1fddbc280fe6c41aa2fb54c1e468cc2acec103c5 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/grid_sampler_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_grid_sampler: + +grid_sampler +------------------------------- +:doc_source: paddle.fluid.layers.grid_sampler + + diff --git a/doc/fluid/api_cn/nn_cn/hard_sigmoid_cn.rst b/doc/fluid/api_cn/nn_cn/hard_sigmoid_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..07eb5f5aa7e2c4dbf7542672d5400d14f5e38829 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/hard_sigmoid_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_hard_sigmoid: + +hard_sigmoid +------------------------------- +:doc_source: paddle.fluid.layers.hard_sigmoid + + diff --git a/doc/fluid/api_cn/nn_cn/hard_swish_cn.rst b/doc/fluid/api_cn/nn_cn/hard_swish_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b708d0657cdb979f43bdb0134cc6b7beba9016e9 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/hard_swish_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_hard_swish: + +hard_swish +------------------------------- +:doc_source: paddle.fluid.layers.hard_swish + + diff --git a/doc/fluid/api_cn/nn_cn/hardshrink_cn.rst b/doc/fluid/api_cn/nn_cn/hardshrink_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f849d89d5d77b1dbd8c7056333755413ef1e59d9 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/hardshrink_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_nn_cn_hard_shrink: + +hardshrink +------------------------------- +.. py:function:: paddle.nn.functional.hardshrink(x, threshold=0.5, name=None) + +hardshrink激活层。计算公式如下: + +.. math:: + + hardshrink(x)= + \left\{ + \begin{aligned} + &x, & & if \ x > threshold \\ + &x, & & if \ x < -threshold \\ + &0, & & if \ others + \end{aligned} + \right. + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - threshold (float, 可选) - hard_shrink激活计算公式中的threshold值。默认值为0.5。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +:::::::::: + + import paddle + import paddle.nn.functional as F + + paddle.disable_static() + x = paddle.to_tensor([-1, 0.3, 2.5]) + out = F.hardshrink(x) # [-1., 0., 2.5] diff --git a/doc/fluid/api_cn/nn_cn/hardtanh_cn.rst b/doc/fluid/api_cn/nn_cn/hardtanh_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fa18b323674f9f5f9fac51b41f32c804b2c1852b --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/hardtanh_cn.rst @@ -0,0 +1,45 @@ +.. _cn_api_nn_cn_hardtanh: + +hardtanh +------------------------------- +.. py:function:: paddle.nn.functional.hardtanh(x, min=-1.0, max=1.0, name=None): + +hardtanh激活层(Hardtanh Activation Operator)。计算公式如下: + +.. math:: + + hardtanh(x)= + \left\{ + \begin{aligned} + &max, & & if \ x > max \\ + &min, & & if \ x < min \\ + &x, & & if \ others + \end{aligned} + \right. + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - min (float, 可选) - hardtanh激活计算公式中的min值。默认值为-1。 + - max (float, 可选) - hardtanh激活计算公式中的max值。默认值为1。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-1.5, 0.3, 2.5])) + out = F.hardtanh(x) # [-1., 0.3, 1.] diff --git a/doc/fluid/api_cn/nn_cn/hash_cn.rst b/doc/fluid/api_cn/nn_cn/hash_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6d0caf1dc81244228583df6a40bb02868b2039fc --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/hash_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_hash: + +hash +------------------------------- +:doc_source: paddle.fluid.layers.hash + + diff --git a/doc/fluid/api_cn/nn_cn/hsigmoid_cn.rst b/doc/fluid/api_cn/nn_cn/hsigmoid_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0bf7580b3d2b8c56326e31022c15bcbdf49c44a3 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/hsigmoid_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_hsigmoid: + +hsigmoid +------------------------------- +:doc_source: paddle.fluid.layers.hsigmoid + + diff --git a/doc/fluid/api_cn/nn_cn/huber_loss_cn.rst b/doc/fluid/api_cn/nn_cn/huber_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..30d1f873c2491039e238ccf43d0e4d2f919dfaa1 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/huber_loss_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_huber_loss: + +huber_loss +------------------------------- +:doc_source: paddle.fluid.layers.huber_loss + + diff --git a/doc/fluid/api_cn/nn_cn/image_resize_cn.rst b/doc/fluid/api_cn/nn_cn/image_resize_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..583b740ed797456be628525fa2404778a87cbd12 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/image_resize_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_image_resize: + +image_resize +------------------------------- +:doc_source: paddle.fluid.layers.image_resize + + diff --git a/doc/fluid/api_cn/nn_cn/image_resize_short_cn.rst b/doc/fluid/api_cn/nn_cn/image_resize_short_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f38ba9a9299b2c7f7b3aa329184cb254e35dd242 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/image_resize_short_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_image_resize_short: + +image_resize_short +------------------------------- +:doc_source: paddle.fluid.layers.image_resize_short + + diff --git a/doc/fluid/api_cn/nn_cn/initializer_cn.rst b/doc/fluid/api_cn/nn_cn/initializer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e29d90765a1e232a4aa83320f643f7dc1d71d3ac --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/initializer_cn.rst @@ -0,0 +1,85 @@ +======================= +paddle.nn.initializer +======================= + +.. toctree:: + :maxdepth: 1 + +initializer_cn/Bilinear_cn.rst +initializer_cn/Bilinear_cn.rst +initializer_cn/Bilinear_cn.rst +initializer_cn/Bilinear_cn.rst +initializer_cn/Bilinear_cn.rst +initializer_cn/Bilinear_cn.rst +initializer_cn/Bilinear_cn.rst +initializer_cn/Bilinear_cn.rst +initializer_cn/Bilinear_cn.rst +initializer_cn/Bilinear_cn.rst +initializer_cn/Bilinear_cn.rst + +initializer_cn/Constant_cn.rst +initializer_cn/Constant_cn.rst +initializer_cn/Constant_cn.rst +initializer_cn/Constant_cn.rst +initializer_cn/Constant_cn.rst +initializer_cn/Constant_cn.rst +initializer_cn/Constant_cn.rst +initializer_cn/Constant_cn.rst +initializer_cn/Constant_cn.rst +initializer_cn/Constant_cn.rst +initializer_cn/Constant_cn.rst +initializer_cn/MSRA_cn.rst +initializer_cn/MSRA_cn.rst +initializer_cn/MSRA_cn.rst +initializer_cn/MSRA_cn.rst +initializer_cn/MSRA_cn.rst +initializer_cn/MSRA_cn.rst +initializer_cn/MSRA_cn.rst +initializer_cn/MSRA_cn.rst +initializer_cn/MSRA_cn.rst +initializer_cn/MSRA_cn.rst +initializer_cn/MSRA_cn.rst +initializer_cn/Normal_cn.rst +initializer_cn/Normal_cn.rst +initializer_cn/Normal_cn.rst +initializer_cn/Normal_cn.rst +initializer_cn/Normal_cn.rst +initializer_cn/Normal_cn.rst +initializer_cn/Normal_cn.rst +initializer_cn/Normal_cn.rst +initializer_cn/Normal_cn.rst +initializer_cn/Normal_cn.rst +initializer_cn/Normal_cn.rst +initializer_cn/TruncatedNormal_cn.rst +initializer_cn/TruncatedNormal_cn.rst +initializer_cn/TruncatedNormal_cn.rst +initializer_cn/TruncatedNormal_cn.rst +initializer_cn/TruncatedNormal_cn.rst +initializer_cn/TruncatedNormal_cn.rst +initializer_cn/TruncatedNormal_cn.rst +initializer_cn/TruncatedNormal_cn.rst +initializer_cn/TruncatedNormal_cn.rst +initializer_cn/TruncatedNormal_cn.rst +initializer_cn/TruncatedNormal_cn.rst +initializer_cn/Uniform_cn.rst +initializer_cn/Uniform_cn.rst +initializer_cn/Uniform_cn.rst +initializer_cn/Uniform_cn.rst +initializer_cn/Uniform_cn.rst +initializer_cn/Uniform_cn.rst +initializer_cn/Uniform_cn.rst +initializer_cn/Uniform_cn.rst +initializer_cn/Uniform_cn.rst +initializer_cn/Uniform_cn.rst +initializer_cn/Uniform_cn.rst +initializer_cn/Xavier_cn.rst +initializer_cn/Xavier_cn.rst +initializer_cn/Xavier_cn.rst +initializer_cn/Xavier_cn.rst +initializer_cn/Xavier_cn.rst +initializer_cn/Xavier_cn.rst +initializer_cn/Xavier_cn.rst +initializer_cn/Xavier_cn.rst +initializer_cn/Xavier_cn.rst +initializer_cn/Xavier_cn.rst +initializer_cn/Xavier_cn.rst diff --git a/doc/fluid/api_cn/nn_cn/initializer_cn/Bilinear_cn.rst b/doc/fluid/api_cn/nn_cn/initializer_cn/Bilinear_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..04a59a88dd6e50d711815beb069183966a8950cd --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/initializer_cn/Bilinear_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn/initializer_cn_Bilinear: + +Bilinear +------------------------------- +:doc_source: paddle.fluid.initializer.Bilinear + + diff --git a/doc/fluid/api_cn/nn_cn/initializer_cn/Constant_cn.rst b/doc/fluid/api_cn/nn_cn/initializer_cn/Constant_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fb86cdb81894240990d088fdeadccdbc08cb8db9 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/initializer_cn/Constant_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn/initializer_cn_Constant: + +Constant +------------------------------- +:doc_source: paddle.fluid.initializer.Constant + + diff --git a/doc/fluid/api_cn/nn_cn/initializer_cn/MSRA_cn.rst b/doc/fluid/api_cn/nn_cn/initializer_cn/MSRA_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..abb4b94e20a73f521bd2368190c2217fe9dfafc7 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/initializer_cn/MSRA_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn/initializer_cn_MSRA: + +MSRA +------------------------------- +:doc_source: paddle.fluid.initializer.MSRA + + diff --git a/doc/fluid/api_cn/nn_cn/initializer_cn/Normal_cn.rst b/doc/fluid/api_cn/nn_cn/initializer_cn/Normal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1f8f789de2a3cbad006448b484334dbed8acad4c --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/initializer_cn/Normal_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn/initializer_cn_Normal: + +Normal +------------------------------- +:doc_source: paddle.fluid.initializer.Normal + + diff --git a/doc/fluid/api_cn/nn_cn/initializer_cn/TruncatedNormal_cn.rst b/doc/fluid/api_cn/nn_cn/initializer_cn/TruncatedNormal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..30c6e11aa150fffc6a3c01b9a7cdbb7d72c8ff89 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/initializer_cn/TruncatedNormal_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn/initializer_cn_TruncatedNormal: + +TruncatedNormal +------------------------------- +:doc_source: paddle.fluid.initializer.TruncatedNormal + + diff --git a/doc/fluid/api_cn/nn_cn/initializer_cn/Uniform_cn.rst b/doc/fluid/api_cn/nn_cn/initializer_cn/Uniform_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b75c7ae0bde0f14633abcad05ab85b4b15582eed --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/initializer_cn/Uniform_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn/initializer_cn_Uniform: + +Uniform +------------------------------- +:doc_source: paddle.fluid.initializer.Uniform + + diff --git a/doc/fluid/api_cn/nn_cn/initializer_cn/Xavier_cn.rst b/doc/fluid/api_cn/nn_cn/initializer_cn/Xavier_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7e42c2e87e6924f84d9e75acb57ff1872f8bd5b4 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/initializer_cn/Xavier_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn/initializer_cn_Xavier: + +Xavier +------------------------------- +:doc_source: paddle.fluid.initializer.Xavier + + diff --git a/doc/fluid/api_cn/nn_cn/interpolate_cn.rst b/doc/fluid/api_cn/nn_cn/interpolate_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1b1c1e3a629fcb38be1ec0c49fb580e1c8313962 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/interpolate_cn.rst @@ -0,0 +1,208 @@ +.. _cn_api_paddle_nn_functioanl_interpolate: + +Inerpolate +------------------------------- + +.. py:function:: paddle.nn.functioanl.interpolate(input, out_shape=None, scale=None, name=None, resample='BILINEAR', actual_shape=None, align_corners=True, align_mode=1, data_format='NCHW') + +:alias_main: paddle.nn.functional.interpolate +:alias: paddle.nn.functional.interpolate,paddle.nn.functional.common.interpolate + + + +**注意:** 参数 ``actual_shape`` 将被弃用,请使用 ``out_shape`` 替代。 + +该OP用于调整一个batch中图片的大小。 + +输入为4-D Tensor时形状为(num_batches, channels, in_h, in_w)或者(num_batches, in_h, in_w, channels),输入为5-D Tensor时形状为(num_batches, channels, in_d, in_h, in_w)或者(num_batches, in_d, in_h, in_w, channels),并且调整大小只适用于深度,高度和宽度对应的维度。 + +支持的插值方法: + + NEAREST:最近邻插值 + + BILINEAR:双线性插值 + + TRALINEAR:三线性插值 + + BICUBIC:双三次插值 + + + + +最近邻插值是在输入张量的高度和宽度上进行最近邻插值。 + +双线性插值是线性插值的扩展,用于在直线2D网格上插值两个变量(例如,该操作中的H方向和W方向)的函数。 关键思想是首先在一个方向上执行线性插值,然后在另一个方向上再次执行线性插值。 + +三线插值是线性插值的一种扩展,是3参数的插值方程(比如op里的D,H,W方向),在三个方向上进行线性插值。 + +双三次插值是在二维网格上对数据点进行插值的三次插值的扩展,它能创造出比双线性和最近临插值更为光滑的图像边缘。 + +Align_corners和align_mode是可选参数,插值的计算方法可以由它们选择。 + +示例: + +:: + + + scale 计算方法: + + if align_corners = True && out_size > 1 : + + scale_factor = (in_size-1.0)/(out_size-1.0) + + else: + + scale_factor = float(in_size/out_size) + + + 不同插值方式的输出纬度计算规则: + + Nearest neighbor interpolation: + + if: + align_corners = False + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = \left \lfloor {H_{in} * scale_{}factor}} \right \rfloor + W_out = \left \lfloor {W_{in} * scale_{}factor}} \right \rfloor + + else: + align_corners = True + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = round(H_{in} * scale_{factor}) + W_out = round(W_{in} * scale_{factor}) + + Bilinear interpolation: + + if: + align_corners = False , align_mode = 0 + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = (H_{in}+0.5) * scale_{factor} - 0.5 + W_out = (W_{in}+0.5) * scale_{factor} - 0.5 + + + else: + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = H_{in} * scale_{factor} + W_out = W_{in} * scale_{factor} + + Bicubic interpolation: + + if: + align_corners = False + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = (H_{in}+0.5) * scale_{factor} - 0.5 + W_out = (W_{in}+0.5) * scale_{factor} - 0.5 + + else: + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = H_{in} * scale_{factor} + W_out = W_{in} * scale_{factor} + + Trilinear interpolation: + + if: + align_corners = False , align_mode = 0 + + input : (N,C,D_in,H_in,W_in) + output: (N,C,D_out,H_out,W_out) where: + + D_out = (D_{in}+0.5) * scale_{factor} - 0.5 + H_out = (H_{in}+0.5) * scale_{factor} - 0.5 + W_out = (W_{in}+0.5) * scale_{factor} - 0.5 + + + else: + + input : (N,C,D_in,H_in,W_in) + output: (N,C,D_out,H_out,W_out) where: + + D_out = D_{in} * scale_{factor} + H_out = H_{in} * scale_{factor} + W_out = W_{in} * scale_{factor} + + +有关最近邻插值的详细信息,请参阅维基百科: +https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation + +有关双线性插值的详细信息,请参阅维基百科: +https://en.wikipedia.org/wiki/Bilinear_interpolation + +有关三线插值的详细信息,请参阅维基百科: +https://en.wikipedia.org/wiki/Trilinear_interpolation + +有关双三次插值的详细信息,请参阅维基百科: +https://en.wikipedia.org/wiki/Bicubic_interpolation + +参数: + - **input** (Variable) - 4-D或5-D Tensor,数据类型为float32、float64或uint8,其数据格式由参数 ``data_format`` 指定。 + - **out_shape** (list|tuple|Variable|None) - 输出Tensor,输入为4D张量时,形状为为(out_h, out_w)的2-D Tensor。输入为5-D Tensor时,形状为(out_d, out_h, out_w)的3-D Tensor。如果 :code:`out_shape` 是列表,每一个元素可以是整数或者形状为[1]的变量。如果 :code:`out_shape` 是变量,则其维度大小为1。默认值为None。 + - **scale** (float|Variable|None)-输入的高度或宽度的乘数因子 。 out_shape和scale至少要设置一个。out_shape的优先级高于scale。默认值为None。 + - **name** (str|None) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。默认值为None。 + - **resample** (str) - 插值方法。支持“双线性”,“三线性”,“临近插值”,"双三次"。默认值为双线性插值。 + - **actual_shape** (Variable) - 可选输入,用于动态指定输出形状。如果指定actual_shape,图像将根据给定的形状调整大小,而不是根据指定形状的 :code:`out_shape` 和 :code:`scale` 进行调整。也就是说, :code:`actual_shape` 具有最高的优先级。如果希望动态指定输出形状,建议使用 :code:`out_shape` ,因为 :code:`actual_shape` 未来将被弃用。在使用actual_shape指定输出形状时,还需要设置out_shape和scale之一,否则在图形构建阶段会出现错误。默认值:None + - **align_corners** (bool)- 一个可选的bool型参数,如果为True,则将输入和输出张量的4个角落像素的中心对齐,并保留角点像素的值。 默认值为True + - **align_mode** (int)- 双线性插值的可选项。 可以是 '0' 代表src_idx = scale *(dst_indx + 0.5)-0.5;如果为'1' ,代表src_idx = scale * dst_index。 + - **data_format** (str,可选)- 指定输入的数据格式,输出的数据格式将与输入保持一致。对于4-D Tensor,支持 NCHW(num_batches, channels, height, width) 或者 NHWC(num_batches, height, width, channels),对于5-D Tensor,支持 NCDHW(num_batches, channels, depth, height, width)或者 NDHWC(num_batches, depth, height, width, channels),默认值:'NCHW'。 + +返回:4-D Tensor,形状为 (num_batches, channels, out_h, out_w) 或 (num_batches, out_h, out_w, channels);或者5-D Tensor,形状为 (num_batches, channels, out_d, out_h, out_w) 或 (num_batches, out_d, out_h, out_w, channels)。 + +返回类型: 变量(variable) + +抛出异常: + - :code:`TypeError` - out_shape应该是一个列表、元组或变量。 + - :code:`TypeError` - actual_shape应该是变量或None。 + - :code:`ValueError` - image_resize的"resample"只能是"BILINEAR"或"TRILINEAR"或"NEAREST"或"BICUBIC"。 + - :code:`ValueError` - out_shape 和 scale 不可同时为 None。 + - :code:`ValueError` - out_shape 的长度必须为2如果输入是4D张量。 + - :code:`ValueError` - out_shape 的长度必须为3如果输入是5D张量。 + - :code:`ValueError` - scale应大于0。 + - :code:`TypeError` - align_corners 应为bool型。 + - :code:`ValueError` - align_mode 只能取 ‘0’ 或 ‘1’。 + - :code:`ValueError` - data_format 只能取 ‘NCHW’、‘NHWC’、‘NCDHW’ 或者 ‘NDHWC’。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + input = fluid.data(name="input", shape=[None,3,6,10]) + output = paddle.nn.functional.interpolate(input=input,out_shape=[12,12]) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + input_data = np.random.rand(2,3,6,10).astype("float32") + output_data = exe.run(fluid.default_main_program(), + feed={"input":input_data}, + fetch_list=[output], + return_numpy=True) + print(output_data[0].shape) + # (2, 3, 12, 12) + + #imperative mode + import paddle.fluid.dygraph as dg + with dg.guard(place) as g: + input = dg.to_variable(input_data) + output = paddle.nn.functional.interpolate(input=input, out_shape=[12,12]) + print(output.shape) + # [2L, 3L, 12L, 12L] + diff --git a/doc/fluid/api_cn/nn_cn/inverse_time_decay_cn.rst b/doc/fluid/api_cn/nn_cn/inverse_time_decay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..67129f0ed67ff4d447bade80509e63ae1fc6f8d5 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/inverse_time_decay_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_inverse_time_decay: + +inverse_time_decay +------------------------------- +:doc_source: paddle.fluid.layers.inverse_time_decay + + diff --git a/doc/fluid/api_cn/nn_cn/iou_similarity_cn.rst b/doc/fluid/api_cn/nn_cn/iou_similarity_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..73123a935d07910d0900a103c11e741920553296 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/iou_similarity_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_iou_similarity: + +iou_similarity +------------------------------- +:doc_source: paddle.fluid.layers.iou_similarity + + diff --git a/doc/fluid/api_cn/nn_cn/l2_normalize_cn.rst b/doc/fluid/api_cn/nn_cn/l2_normalize_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2fd7f7efc501fd55be25e7e02d16d27c3b81a398 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/l2_normalize_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_l2_normalize: + +l2_normalize +------------------------------- +:doc_source: paddle.fluid.layers.l2_normalize + + diff --git a/doc/fluid/api_cn/nn_cn/label_smooth_cn.rst b/doc/fluid/api_cn/nn_cn/label_smooth_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a57b2f99093614c96efdcba45d11da2fbde09d61 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/label_smooth_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_label_smooth: + +label_smooth +------------------------------- +:doc_source: paddle.fluid.layers.label_smooth + + diff --git a/doc/fluid/api_cn/nn_cn/layer_cn.rst b/doc/fluid/api_cn/nn_cn/layer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..357167c5276d083feea6a8e5c381dfeb4e33a395 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/layer_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_Layer: + +Layer +------------------------------- +:doc_source: paddle.fluid.dygraph.layers.Layer + + diff --git a/doc/fluid/api_cn/nn_cn/layer_cn/Sigmoid_cn.rst b/doc/fluid/api_cn/nn_cn/layer_cn/Sigmoid_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ad3d29982aa4276beacf09c57f723f82201e86b7 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/layer_cn/Sigmoid_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_nn_layer_Sigmoid: + +Sigmoid +------------------------------- + +.. py:class:: paddle.nn.layer.Sigmoid(name=None) + +该接口用于创建一个 ``Sigmoid`` 的可调用类。 这个类可以计算输入 `x` 经过激活函数 `sigmoid` 之后的值。 + + .. math:: + + output = \frac{1}{1 + e^{-x}} + +参数 +:::::::: + - **name** (str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +形状 +:::::::: + - **x** (Tensor)- N-D tensor, 可以支持的数据类型是float16,float32,float64。 + +返回 +:::::::: + 返回计算 ``Sigmoid`` 的可调用对象。 + + +代码示例 +:::::::: + +.. code-block:: python + + import paddle + + paddle.disable_static() + m = paddle.nn.Sigmoid() + x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0]) + output = m(x) + print(output.numpy()) # [0.7310586, 0.880797, 0.95257413, 0.98201376 diff --git a/doc/fluid/api_cn/nn_cn/leaky_relu_cn.rst b/doc/fluid/api_cn/nn_cn/leaky_relu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a0bb19d6ec7f383294f7943389fdd962a6d94bbb --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/leaky_relu_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_nn_cn_leaky_relu: + +leaky_relu +------------------------------- +.. py:function:: paddle.nn.functional.leaky_relu(x, negative_slope=0.01, name=None) + +leaky_relu激活层。计算公式如下: + +.. math:: + + LeakyReLU(x)= + \left\{ + \begin{aligned} + &x, & & if \ x >= 0 \\ + &negative\_slope * x, & & otherwise \\ + \end{aligned} + \right. \\ + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。 + - negative_slope (float,可选) - :math:`x < 0` 时的斜率。默认值为0.01。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-2, 0, 1], 'float32')) + out = F.leaky_relu(x) # [-0.02, 0., 1.] diff --git a/doc/fluid/api_cn/nn_cn/linear_lr_warmup_cn.rst b/doc/fluid/api_cn/nn_cn/linear_lr_warmup_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..115e46d33178a49b368228270c85fe8fc11a4fa4 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/linear_lr_warmup_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_linear_lr_warmup: + +linear_lr_warmup +------------------------------- +:doc_source: paddle.fluid.layers.linear_lr_warmup + + diff --git a/doc/fluid/api_cn/nn_cn/log_loss_cn.rst b/doc/fluid/api_cn/nn_cn/log_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9ac27c7643d05c63bb684bd7eb1f2c9e05a3535f --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/log_loss_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_log_loss: + +log_loss +------------------------------- +:doc_source: paddle.fluid.layers.log_loss + + diff --git a/doc/fluid/api_cn/nn_cn/log_sigmoid_cn.rst b/doc/fluid/api_cn/nn_cn/log_sigmoid_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bf9b685eae444306229bd8968d413042a6008fe8 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/log_sigmoid_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_nn_cn_log_sigmoid: + +log_sigmoid +------------------------------- + +.. py:function:: paddle.nn.functional.log_sigmoid(x, name=None) + +log_sigmoid激活层。计算公式如下: + +.. math:: + + log\_sigmoid(x) = \log \frac{1}{1 + e^{-x}} + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + + paddle.disable_static() + + x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0]) + out = F.log_sigmoid(x) # [-0.313262 -0.126928 -0.0485874 -0.0181499] diff --git a/doc/fluid/api_cn/nn_cn/log_softmax_cn.rst b/doc/fluid/api_cn/nn_cn/log_softmax_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5509a6a4f18b928ffa4426b7bedfda88926f5017 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/log_softmax_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_nn_cn_log_softmax: + +log_softmax +------------------------------- +.. py:function:: paddle.nn.functional.log_softmax(x, axis=-1, dtype=None, name=None) + +该OP实现了log_softmax层。OP的计算公式如下: + +.. math:: + + Out[i, j] = log(softmax(x)) = log(\frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}) + +参数 +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - axis (int, 可选) - 指定对输入 ``x`` 进行运算的轴。``axis`` 的有效范围是[-D, D),D是输入 ``x`` 的维度, ``axis`` 为负值时与 :math:`axis + D` 等价。默认值为-1。 + - dtype (str|np.dtype|core.VarDesc.VarType, 可选) - 输入Tensor的数据类型。如果指定了 ``dtype`` ,则输入Tensor的数据类型会在计算前转换到 ``dtype`` 。``dtype``可以用来避免数据溢出。如果 ``dtype`` 为None,则输出Tensor的数据类型和 ``x`` 相同。默认值为None。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,形状和 ``x`` 相同,数据类型为 ``dtype`` 或者和 ``x`` 相同。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = np.array([[[-2.0, 3.0, -4.0, 5.0], + [3.0, -4.0, 5.0, -6.0], + [-7.0, -8.0, 8.0, 9.0]], + [[1.0, -2.0, -3.0, 4.0], + [-5.0, 6.0, 7.0, -8.0], + [6.0, 7.0, 8.0, 9.0]]]).astype('float32') + x = paddle.to_tensor(x) + out1 = F.log_softmax(x) + out2 = F.log_softmax(x, dtype='float64') + # out1's data type is float32; out2's data type is float64 + # out1 and out2's value is as follows: + # [[[ -7.1278396 -2.1278396 -9.127839 -0.12783948] + # [ -2.1270514 -9.127051 -0.12705144 -11.127051 ] + # [-16.313261 -17.313261 -1.3132617 -0.31326184]] + # [[ -3.0518122 -6.051812 -7.051812 -0.051812 ] + # [-12.313267 -1.3132664 -0.3132665 -15.313267 ] + # [ -3.4401896 -2.4401896 -1.4401896 -0.44018966]]] diff --git a/doc/fluid/api_cn/nn_cn/loss_cn.rst b/doc/fluid/api_cn/nn_cn/loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..24081ffd6a342bfd826af4be21efc866fb120147 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/loss_cn.rst @@ -0,0 +1,20 @@ +======================= +loss +======================= + + + + +.. toctree:: + :maxdepth: 1 + + loss_cn/BCELoss_cn.rst + loss_cn/BCEWithLogitsLoss_cn.rst + loss_cn/CrossEntropyLoss_cn.rst + loss_cn/L1Loss_cn.rst + loss_cn/MarginRankingLoss_cn.rst + loss_cn/MSELoss_cn.rst + loss_cn/NLLLoss_cn.rst + loss_cn/KLDivLoss_cn.rst + loss_cn/SmoothL1Loss.rst + loss_cn/CTCLoss_cn.rst diff --git a/doc/fluid/api_cn/nn_cn/loss_cn/BCELoss_cn.rst b/doc/fluid/api_cn/nn_cn/loss_cn/BCELoss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0231b6eb5a19db9448376be8f7106dfe083604b7 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/loss_cn/BCELoss_cn.rst @@ -0,0 +1,65 @@ +.. _cn_api_paddle_nn_BCELoss: + +BCELoss +------------------------------- + +.. py:class:: paddle.nn.BCELoss(weight=None, reduction='mean', name=None) + +该接口用于创建一个BCELoss的可调用类,用于计算输入 ``input`` 和标签 ``label`` 之间的二值交叉熵损失值。二值交叉熵损失函数公式如下: + +当 `weight` 不为空时,公式为: + +.. math:: + Out = -1 * weight * (label * log(input) + (1 - label) * log(1 - input)) + +当 `weight` 为空时,公式为: + +.. math:: + Out = -1 * (label * log(input) + (1 - label) * log(1 - input)) + +当 `reduction` 为 `none` 时,直接返回最原始的 `Out` 结果。 + +当 `reduction` 为 `mean` 时,最终的输出结果为: + +.. math:: + Out = MEAN(Out) + +当 `reduction` 为 `sum` 时,最终的输出结果为: + +.. math:: + Out = SUM(Out) + + +.. note:: + 输入数据 ``input`` 一般是 ``sigmoid`` 的输出。因为是二分类,所以标签值 ``label`` 应该是0或者1。 + +参数 +::::::::: + - **weight** (Tensor,可选) - 手动指定每个batch二值交叉熵的权重,如果指定的话,维度必须是一个batch的数据的维度。数据类型是float32, float64。默认值是:None。 + - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有: ``'none'``, ``'mean'``, ``'sum'`` 。默认为 ``'mean'``,计算 `BCELoss` 的均值;设置为 ``'sum'`` 时,计算 `BCELoss` 的总和;设置为 ``'none'`` 时,则返回bce_loss。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +形状 +::::::::: + - **input** (Tensor) - :math:`(N, *)` , 其中N是batch_size, `*` 是任意其他维度。输入数据 ``input`` 一般是 ``sigmoid`` 的输出。数据类型是float32、float64。 + - **label** (Tensor) - :math:`(N, *)` ,标签 ``label`` 的维度、数据类型与输入 ``input`` 相同。 + - **output** (Tensor) - 输出的Tensor。如果 :attr:`reduction` 是 ``'none'``, 则输出的维度为 :math:`(N, *)` , 与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``, 则输出的维度为 :math:`[1]` 。 + +返回 +::::::::: + 返回计算BCELoss的可调用对象。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + + paddle.disable_static() + input = paddle.to_tensor([0.5, 0.6, 0.7], dtype='float32') + label = paddle.to_tensor([1.0, 0.0, 1.0], dtype='float32') + bce_loss = paddle.nn.loss.BCELoss() + output = bce_loss(input, label) + print(output.numpy()) # [0.65537095] + diff --git a/doc/fluid/api_cn/nn_cn/loss_cn/BCEWithLogitsLoss_cn.rst b/doc/fluid/api_cn/nn_cn/loss_cn/BCEWithLogitsLoss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..aef2aae152051cadf3ffca5f19568d8e68f73c3b --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/loss_cn/BCEWithLogitsLoss_cn.rst @@ -0,0 +1,64 @@ +.. _cn_api_paddle_nn_BCEWithLogitsLoss: + +BCEWithLogitsLoss +------------------------------- + +.. py:class:: paddle.nn.BCEWithLogitsLoss(weight=None, reduction='mean', pos_weight=None, name=None) + +该OP可创建一个BCEWithLogitsLoss的可调用类,计算输入 `logit` 和标签 `label` 间的 `binary cross entropy with logits loss` 损失。 + +该OP结合了 `sigmoid` 操作和 :ref:`api_nn_loss_BCELoss` 操作。同时,我们也可以认为该OP是 ``sigmoid_cross_entrop_with_logits`` 和一些 `reduce` 操作的组合。 + +在每个类别独立的分类任务中,该OP可以计算按元素的概率误差。可以将其视为预测数据点的标签,其中标签不是互斥的。例如,一篇新闻文章可以同时关于政治,科技,体育或者同时不包含这些内容。 + +首先,该OP可通过下式计算损失函数: + +.. math:: + Out = -Labels * \log(\sigma(Logit)) - (1 - Labels) * \log(1 - \sigma(Logit)) + +其中 :math:`\sigma(Logit) = \frac{1}{1 + e^{-Logit}}` , 代入上方计算公式中: + +.. math:: + Out = Logit - Logit * Labels + \log(1 + e^{-Logit}) + +为了计算稳定性,防止当 :math:`Logit<0` 时, :math:`e^{-Logit}` 溢出,loss将采用以下公式计算: + +.. math:: + Out = \max(Logit, 0) - Logit * Labels + \log(1 + e^{-\|Logit\|}) + +然后,当 ``weight`` or ``pos_weight`` 不为None的时候,该算子会在输出Out上乘以相应的权重。张量 ``weight`` 给Batch中的每一条数据赋予不同权重,张量 ``pos_weight`` 给每一类的正例添加相应的权重。 + +最后,该算子会添加 `reduce` 操作到前面的输出Out上。当 `reduction` 为 `none` 时,直接返回最原始的 `Out` 结果。当 `reduction` 为 `mean` 时,返回输出的均值 :math:`Out = MEAN(Out)` 。当 `reduction` 为 `sum` 时,返回输出的求和 :math:`Out = SUM(Out)` 。 + +**注意: 因为是二分类任务,所以标签值应该是0或者1。 + +参数 +::::::::: + - **weight** (Tensor,可选) - 手动指定每个batch二值交叉熵的权重,如果指定的话,维度必须是一个batch的数据的维度。数据类型是float32, float64。默认值是:None。 + - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有: ``'none'``, ``'mean'``, ``'sum'`` 。默认为 ``'mean'``,计算 `BCELoss` 的均值;设置为 ``'sum'`` 时,计算 `BCELoss` 的总和;设置为 ``'none'`` 时,则返回原始loss。 + - **pos_weight** (Tensor,可选) - 手动指定正类的权重,必须是与类别数相等长度的向量。数据类型是float32, float64。默认值是:None。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +形状 +::::::::: + - **logit** (Tensor) - :math:`[N, *]` , 其中N是batch_size, `*` 是任意其他维度。输入数据 ``logit`` 一般是线性层的输出,不需要经过 ``sigmoid`` 层。数据类型是float32、float64。 + - **label** (Tensor) - :math:`[N, *]` ,标签 ``label`` 的维度、数据类型与输入 ``logit`` 相同。 + - **output** (Tensor) - 输出的Tensor。如果 :attr:`reduction` 是 ``'none'``, 则输出的维度为 :math:`[N, *]` , 与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``, 则输出的维度为 :math:`[1]` 。 + +返回 +::::::::: + 返回计算BCEWithLogitsLoss的可调用对象。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + paddle.disable_static() + logit = paddle.to_tensor([5.0, 1.0, 3.0], dtype="float32") + label = paddle.to_tensor([1.0, 0.0, 1.0], dtype="float32") + bce_logit_loss = paddle.nn.BCEWithLogitsLoss() + output = bce_logit_loss(logit, label) + print(output.numpy()) # [0.45618808] + diff --git a/doc/fluid/api_cn/nn_cn/loss_cn/CTCLoss_cn.rst b/doc/fluid/api_cn/nn_cn/loss_cn/CTCLoss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a2ad0c28a4058d8cbdfc447b9f890e0ce9d24185 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/loss_cn/CTCLoss_cn.rst @@ -0,0 +1,78 @@ +CTCLoss +------------------------------- + +.. py:class:: paddle.nn.loss.CTCLoss(blank=0, reduction='mean') + +该接口用于计算 CTC loss。该接口的底层调用了第三方 baidu-research::warp-ctc 的实现。 +也可以叫做 softmax with CTC,因为 Warp-CTC 库中插入了 softmax 激活函数来对输入的值进行归一化。 + +参数 +::::::::: + - **blank** (int,可选): - 空格标记的 ID 值,其取值范围为 [0,num_classes+1) 。数据类型支持int32。默认值为0。 + - **reduction** (string,可选): - 指定应用于输出结果的计算方式,可选值有: ``'none'``, ``'mean'``, ``'sum'`` 。设置为 ``'mean'`` 时,对 loss 值除以 label_lengths,并返回所得商的均值;设置为 ``'sum'`` 时,返回 loss 值的总和;设置为 ``'none'`` 时,则直接返回输出的 loss 值。默认值为 ``'mean'``。 + +形状 +::::::::: + - **log_probs** (Tensor): - 经过 padding 的概率序列,其 shape 必须是 [max_logit_length, batch_size, num_classes + 1]。其中 max_logit_length 是最长输入序列的长度。该输入不需要经过 softmax 操作,因为该 OP 的内部对 input 做了 softmax 操作。数据类型仅支持float32。 + - **labels** (Tensor): - 经过 padding 的标签序列,其 shape 为 [batch_size, max_label_length],其中 max_label_length 是最长的 label 序列的长度。数据类型支持int32。 + - **input_lengths** (Tensor): - 表示输入 ``log_probs`` 数据中每个序列的长度,shape为 [batch_size] 。数据类型支持int64。 + - **label_lengths** (Tensor): - 表示 label 中每个序列的长度,shape为 [batch_size] 。数据类型支持int64。 + +返回 +::::::::: +``Tensor``,输入 ``log_probs`` 和标签 ``labels`` 间的 `ctc loss`。如果 :attr:`reduction` 是 ``'none'``,则输出 loss 的维度为 [batch_size]。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``, 则输出Loss的维度为 [1]。数据类型与输入 ``log_probs`` 一致。 + +代码示例 +::::::::: + +.. code-block:: python + + # declarative mode + import numpy as np + import paddle + + # length of the longest logit sequence + max_seq_length = 4 + #length of the longest label sequence + max_label_length = 3 + # number of logit sequences + batch_size = 2 + # class num + class_num = 3 + + np.random.seed(1) + log_probs = np.array([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04], + [3.02332580e-01, 1.46755889e-01, 9.23385918e-02]], + + [[1.86260208e-01, 3.45560730e-01, 3.96767467e-01], + [5.38816750e-01, 4.19194520e-01, 6.85219526e-01]], + + [[2.04452246e-01, 8.78117442e-01, 2.73875929e-02], + [6.70467496e-01, 4.17304814e-01, 5.58689833e-01]], + + [[1.40386939e-01, 1.98101491e-01, 8.00744593e-01], + [9.68261600e-01, 3.13424170e-01, 6.92322612e-01]], + + [[8.76389146e-01, 8.94606650e-01, 8.50442126e-02], + [3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]]).astype("float32") + labels = np.array([[1, 2, 2], + [1, 2, 2]]).astype("int32") + input_lengths = np.array([5, 5]).astype("int64") + label_lengths = np.array([3, 3]).astype("int64") + + paddle.disable_static() + log_probs = paddle.to_tensor(log_probs) + labels = paddle.to_tensor(labels) + input_lengths = paddle.to_tensor(input_lengths) + label_lengths = paddle.to_tensor(label_lengths) + + loss = paddle.nn.CTCLoss(blank=0, reduction='none')(log_probs, labels, + input_lengths, + label_lengths) + print(loss.numpy()) #[3.9179852 2.9076521] + + loss = paddle.nn.CTCLoss(blank=0, reduction='mean')(log_probs, labels, + input_lengths, + label_lengths) + print(loss.numpy()) #[1.1376063] + diff --git a/doc/fluid/api_cn/nn_cn/loss_cn/CrossEntropyLoss_cn.rst b/doc/fluid/api_cn/nn_cn/loss_cn/CrossEntropyLoss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0535e8690f6902f24fb31e39fa06d61bf4c88117 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/loss_cn/CrossEntropyLoss_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_nn_loss_CrossEntropyLoss: + +CrossEntropyLoss +------------------------------- + +.. py:function:: paddle.nn.loss.CrossEntropyLoss(weight=None, ignore_index=-100, reduction='mean') + +该OP计算输入input和标签label间的交叉熵损失 ,它结合了 `LogSoftmax` 和 `NLLLoss` 的OP计算,可用于训练一个 `n` 类分类器。 + +如果提供 `weight` 参数的话,它是一个 `1-D` 的tensor, 每个值对应每个类别的权重。 +该损失函数的数学计算公式如下: + + .. math:: + loss_j = -\text{input[class]} + + \log\left(\sum_{i=0}^{K}\exp(\text{input}_i)\right), j = 1,..., K + +当 `weight` 不为 `none` 时,损失函数的数学计算公式为: + + .. math:: + loss_j = \text{weight[class]}(-\text{input[class]} + + \log\left(\sum_{i=0}^{K}\exp(\text{input}_i)\right)), j = 1,..., K + + +参数 +::::::::: + - **weight** (Tensor, 可选): - 指定每个类别的权重。其默认为 `None` 。如果提供该参数的话,维度必须为 `C` (类别数)。数据类型为float32或float64。 + - **ignore_index** (int64, 可选): - 指定一个忽略的标签值,此标签值不参与计算。默认值为-100。数据类型为int64。 + - **reduction** (str, 可选): - 指定应用于输出结果的计算方式,数据类型为string,可选值有: `none`, `mean`, `sum` 。默认为 `mean` ,计算 `mini-batch` loss均值。设置为 `sum` 时,计算 `mini-batch` loss的总和。设置为 `none` 时,则返回loss Tensor。 + +形状 +::::::::: + - **input** (Tensor): - 输入 `Tensor` ,数据类型为float32或float64。其形状为 :math:`[N, C]` , 其中 `C` 为类别数。对于多维度的情形下,它的形状为 :math:`[N, C, d_1, d_2, ..., d_k]` ,k >= 1。 + - **label** (Tensor): - 输入input对应的标签值,数据类型为int64。其形状为 :math:`[N]` ,每个元素符合条件:0 <= label[i] <= C-1。对于多维度的情形下,它的形状为 :math:`[N, d_1, d_2, ..., d_k]` ,k >= 1。 + - **output** (Tensor): - 计算 `CrossEntropyLoss` 交叉熵后的损失值。 + + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + input_data = np.random.random([5, 100]).astype("float64") + label_data = np.random.randint(0, 100, size=(5)).astype(np.int64) + weight_data = np.random.random([100]).astype("float64") + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) + weight = paddle.to_tensor(weight_data) + ce_loss = paddle.nn.loss.CrossEntropyLoss(weight=weight, reduction='mean') + output = ce_loss(input, label) + print(output.numpy()) + diff --git a/doc/fluid/api_cn/nn_cn/loss_cn/KLDivLoss_cn.rst b/doc/fluid/api_cn/nn_cn/loss_cn/KLDivLoss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..49267b8a1afc0b755a06881067949cdd79ebdae2 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/loss_cn/KLDivLoss_cn.rst @@ -0,0 +1,68 @@ +KLDivLoss +------------------------------- + +.. py:class:: paddle.nn.loss.KLDivLoss(reduction='mean') + +该算子计算输入(Input)和输入(Label)之间的Kullback-Leibler散度损失。注意其中输入(Input)应为对数概率值,输入(Label)应为概率值。 + +kL发散损失计算如下: + +.. math:: + + l(input, label) = label * (log(label) - input) + + +当 ``reduction`` 为 ``none`` 时,输出损失与输入(input)形状相同,各点的损失单独计算,不会对结果做reduction 。 + +当 ``reduction`` 为 ``mean`` 时,输出损失为[1]的形状,输出为所有损失的平均值。 + +当 ``reduction`` 为 ``sum`` 时,输出损失为[1]的形状,输出为所有损失的总和。 + +当 ``reduction`` 为 ``batchmean`` 时,输出损失为[N]的形状,N为批大小,输出为所有损失的总和除以批量大小。 + +参数: + - **reduction** (str,可选) - 要应用于输出的reduction类型,可用类型为‘none’ | ‘batchmean’ | ‘mean’ | ‘sum’,‘none’表示无reduction,‘batchmean’ 表示输出的总和除以批大小,‘mean’ 表示所有输出的平均值,‘sum’表示输出的总和。 + +形状: + - **input** (Tensor): - 输入的Tensor,维度是[N, *], 其中N是batch size, `*` 是任意数量的额外维度。数据类型为:float32、float64。 + - **label** (Tensor): - 标签,维度是[N, *], 与 ``input`` 相同。数据类型为:float32、float64。 + - **output** (Tensor): - 输入 ``input`` 和标签 ``label`` 间的kl散度。如果 `reduction` 是 ``'none'``, 则输出Loss的维度为 [N, *], 与输入 ``input`` 相同。如果 `reduction` 是 ``'batchmean'`` 、 ``'mean'`` 或 ``'sum'``, 则输出Loss的维度为 [1]。 + +**代码示例:** + +.. code-block:: python + + import paddle + import numpy as np + import paddle.nn as nn + + paddle.disable_static() + + shape = (5, 20) + x = np.random.uniform(-10, 10, shape).astype('float32') + target = np.random.uniform(-10, 10, shape).astype('float32') + + # 'batchmean' reduction, loss shape will be [N] + kldiv_criterion = nn.KLDivLoss(reduction='batchmean') + pred_loss = kldiv_criterion(paddle.to_tensor(x), + paddle.to_tensor(target)) + # shape=[5] + + # 'mean' reduction, loss shape will be [1] + kldiv_criterion = nn.KLDivLoss(reduction='mean') + pred_loss = kldiv_criterion(paddle.to_tensor(x), + paddle.to_tensor(target)) + # shape=[1] + + # 'sum' reduction, loss shape will be [1] + kldiv_criterion = nn.KLDivLoss(reduction='sum') + pred_loss = kldiv_criterion(paddle.to_tensor(x), + paddle.to_tensor(target)) + # shape=[1] + + # 'none' reduction, loss shape is same with X shape + kldiv_criterion = nn.KLDivLoss(reduction='none') + pred_loss = kldiv_criterion(paddle.to_tensor(x), + paddle.to_tensor(target)) + # shape=[5, 20] + diff --git a/doc/fluid/api_cn/nn_cn/loss_cn/L1Loss_cn.rst b/doc/fluid/api_cn/nn_cn/loss_cn/L1Loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..050a1c0b19073d294bb519a447b0ec2f284bae57 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/loss_cn/L1Loss_cn.rst @@ -0,0 +1,63 @@ +L1Loss +------------------------------- + +.. py:class:: paddle.nn.loss.L1Loss(reduction='mean', name=None) + +该接口用于创建一个L1Loss的可调用类,L1Loss计算输入input和标签label间的 `L1 loss` 损失。 + +该损失函数的数学计算公式如下: + +当 `reduction` 设置为 ``'none'`` 时, + + .. math:: + Out = \lvert input - label\rvert + +当 `reduction` 设置为 ``'mean'`` 时, + + .. math:: + Out = MEAN(\lvert input - label\rvert) + +当 `reduction` 设置为 ``'sum'`` 时, + + .. math:: + Out = SUM(\lvert input - label\rvert) + + +参数 +::::::::: + - **reduction** (str, 可选): - 指定应用于输出结果的计算方式,可选值有: ``'none'``, ``'mean'``, ``'sum'`` 。默认为 ``'mean'``,计算 `L1Loss` 的均值;设置为 ``'sum'`` 时,计算 `L1Loss` 的总和;设置为 ``'none'`` 时,则返回 `L1Loss`。 + - **name** (str,可选): - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状 +::::::::: + - **input** (Tensor): - 输入的Tensor,维度是[N, *], 其中N是batch size, `*` 是任意数量的额外维度。数据类型为:float32、float64、int32、int64。 + - **label** (Tensor): - 标签,维度是[N, *], 与 ``input`` 相同。数据类型为:float32、float64、int32、int64。 + - **output** (Tensor): - 输入 ``input`` 和标签 ``label`` 间的 `L1 loss` 损失。如果 `reduction` 是 ``'none'``, 则输出Loss的维度为 [N, *], 与输入 ``input`` 相同。如果 `reduction` 是 ``'mean'`` 或 ``'sum'``, 则输出Loss的维度为 [1]。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + + paddle.disable_static() + input = paddle.to_tensor([[1.5, 0.8], [0.2, 1.3]]) + label = paddle.to_tensor([[1.7, 1], [0.4, 0.5]]) + + l1_loss = paddle.nn.loss.L1Loss() + output = l1_loss(input, label) + print(output.numpy()) + # [0.35] + + l1_loss = paddle.nn.loss.L1Loss(reduction='sum') + output = l1_loss(input, label) + print(output.numpy()) + # [1.4] + + l1_loss = paddle.nn.loss.L1Loss(reduction='none') + output = l1_loss(input, label) + print(output.numpy()) + # [[0.20000005 0.19999999] + # [0.2 0.79999995]] + diff --git a/doc/fluid/api_cn/nn_cn/loss_cn/MSELoss_cn.rst b/doc/fluid/api_cn/nn_cn/loss_cn/MSELoss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d147671a0d703dbbc4e57bed5d335c5b13ebd43c --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/loss_cn/MSELoss_cn.rst @@ -0,0 +1,70 @@ +MSELoss +------------------------------- + +.. py:function:: paddle.nn.loss.MSELoss(reduction='mean') + +该OP用于计算预测值和目标值的均方差误差。 + +对于预测值input和目标值label: + +当reduction为'none'时: + +.. math:: + Out = (input - label)^2 + +当`reduction`为`'mean'`时: + +.. math:: + Out = \operatorname{mean}((input - label)^2) + +当`reduction`为`'sum'`时: + +.. math:: + Out = \operatorname{sum}((input - label)^2) + +参数: + - **reduction** (str, 可选) - 约简方式,可以是 'none' | 'mean' | 'sum'。设为'none'时不使用约简,设为'mean'时返回loss的均值,设为'sum'时返回loss的和。 + +形状: + - **input** (Tensor) - 预测值,维度为 :math:`[N_1, N_2, ..., N_k]` 的多维Tensor。数据类型为float32或float64。 + - **label** (Tensor) - 目标值,维度为 :math:`[N_1, N_2, ..., N_k]` 的多维Tensor。数据类型为float32或float64。 + + +返回:变量(Tensor), 预测值和目标值的均方差, 数值类型与输入相同 + + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + + + # static graph mode + paddle.enable_static() + mse_loss = paddle.nn.loss.MSELoss() + input = paddle.data(name="input", shape=[1]) + label = paddle.data(name="label", shape=[1]) + place = paddle.CPUPlace() + input_data = np.array([1.5]).astype("float32") + label_data = np.array([1.7]).astype("float32") + + output = mse_loss(input,label) + exe = paddle.static.Executor(place) + exe.run(paddle.static.default_startup_program()) + output_data = exe.run( + paddle.static.default_main_program(), + feed={"input":input_data, "label":label_data}, + fetch_list=[output], + return_numpy=True) + print(output_data) + # [array([0.04000002], dtype=float32)] + + # dynamic graph mode + paddle.disable_static() + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) + output = mse_loss(input, label) + print(output.numpy()) + # [0.04000002] diff --git a/doc/fluid/api_cn/nn_cn/loss_cn/MarginRankingLoss_cn.rst b/doc/fluid/api_cn/nn_cn/loss_cn/MarginRankingLoss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e82ea4b0bca147c1439e3bef4330321a410ddcc1 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/loss_cn/MarginRankingLoss_cn.rst @@ -0,0 +1,57 @@ +.. _cn_api_nn_loss_MarginRankingLoss: + +MarginRankingLoss +------------------------------- + +.. py:class:: paddle.nn.loss.MarginRankingLoss(margin=0.0, reduction='mean', name=None) + +该接口用于创建一个 ``MarginRankingLoss`` 的可调用类,计算输入input,other 和 标签label间的 `margin rank loss` 损失。 + +该损失函数的数学计算公式如下: + + .. math:: + margin\_rank\_loss = max(0, -label * (input - other) + margin) + +当 `reduction` 设置为 ``'mean'`` 时, + + .. math:: + Out = MEAN(margin\_rank\_loss) + +当 `reduction` 设置为 ``'sum'`` 时, + + .. math:: + Out = SUM(margin\_rank\_loss) + +当 `reduction` 设置为 ``'none'`` 时,直接返回最原始的 `margin_rank_loss` 。 + +参数 +:::::::: + - **margin** (float,可选): - 用于加和的margin值,默认值为0。 + - **reduction** (string,可选): - 指定应用于输出结果的计算方式,可选值有: ``'none'`` 、 ``'mean'`` 、 ``'sum'`` 。如果设置为 ``'none'`` ,则直接返回 最原始的 ``margin_rank_loss`` 。如果设置为 ``'sum'`` ,则返回 ``margin_rank_loss`` 的总和。如果设置为 ``'mean'`` ,则返回 ``margin_rank_loss`` 的平均值。默认值为 ``'none'`` 。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状 +:::::::: + - **input** - N-D Tensor, 维度是[N,*] 其中N 是batch size,`*` 是任意数量的额外维度,数据类型为float32或float64。 + - **other** - 与 ``input`` 的形状、数据类型相同。 + - **label** - 与 ``input`` 的形状、数据类型相同。 + - **output** - 如果 :attr:`reduction` 为 ``'sum'`` 或者是 ``'mean'`` ,则形状为 :math:`[1]` ,否则shape和输入 `input` 保持一致 。数据类型与 ``input``、 ``other`` 相同。 + +返回 +:::::::: +返回计算MarginRankingLoss的可调用对象。 + +代码示例 +:::::::: + +.. code-block:: python + + import paddle + paddle.disable_static() + + input = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32') + other = paddle.to_tensor([[2, 1], [2, 4]], dtype='float32') + label = paddle.to_tensor([[1, -1], [-1, -1]], dtype='float32') + margin_rank_loss = paddle.nn.MarginRankingLoss() + loss = margin_rank_loss(input, other, label) + print(loss.numpy()) # [0.75] diff --git a/doc/fluid/api_cn/nn_cn/loss_cn/NLLLoss_cn.rst b/doc/fluid/api_cn/nn_cn/loss_cn/NLLLoss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..93172ab232dc3384e931743394f2f5d72bdc5990 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/loss_cn/NLLLoss_cn.rst @@ -0,0 +1,70 @@ +.. _cn_api_nn_loss_NLLLoss: + +NLLLoss +------------------------------- + +.. py:class:: paddle.nn.loss.NLLLoss(weight=None, ignore_index=-100, reduction='mean', name=None) + +该接口可创建一个NLLLoss可调用类,计算输入x和标签label间的 `negative log likelihood loss` 损失 ,可用于训练一个 `n` 类分类器。 + +如果提供 `weight` 参数的话,它是一个 `1-D` 的tensor, 里面的值对应类别的权重。当你的训练集样本 +不均衡的话,使用这个参数是非常有用的。 + +该损失函数的数学计算公式如下: + +当 `reduction` 设置为 `none` 时,损失函数的数学计算公式为: + + .. math:: + \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad + l_n = - w_{y_n} x_{n,y_n}, \quad + w_{c} = \text{weight}[c] \cdot \mathbb{1}\{c \not= \text{ignore_index}\}, + +其中 `N` 表示 `batch_size` 。如果 `reduction` 的值不是 `none` (默认为 `mean`),那么此时损失函数 +的数学计算公式为: + + .. math:: + \ell(x, y) = \begin{cases} + \sum_{n=1}^N \frac{1}{\sum_{n=1}^N w_{y_n}} l_n, & + \text{if reduction} = \text{'mean';}\\ + \sum_{n=1}^N l_n, & + \text{if reduction} = \text{'sum'.} + \end{cases} + +参数 +::::::::: + - **weight** (Tensor, 可选): - 手动指定每个类别的权重。其默认为 `None` 。如果提供该参数的话,长度必须为 `num_classes` 。数据类型为float32或float64。 + - **ignore_index** (int64, 可选): - 指定一个忽略的标签值,此标签值不参与计算。默认值为-100。数据类型为int64。 + - **reduction** (str, 可选): - 指定应用于输出结果的计算方式,可选值有: `none`, `mean`, `sum` 。默认为 `mean` ,计算 `mini-batch` loss均值。设置为 `sum` 时,计算 `mini-batch` loss的总和。设置为 `none` 时,则返回loss Tensor。数据类型为string。 + - **name** (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +形状 +::::::::: + - **input** (Tensor): - 输入 `Tensor`, 其形状为 :math:`[N, C]` , 其中 `C` 为类别数。但是对于多维度的情形下,它的形状为 :math:`[N, C, d_1, d_2, ..., d_K]` 。数据类型为float32或float64。 + - **label** (Tensor): - 输入 `input` 对应的标签值。其形状为 :math:`[N,]` 或者 :math:`[N, d_1, d_2, ..., d_K]`, 数据类型为int64。 + - **output** (Tensor): - 输入 `input` 和 `label` 间的 `negative log likelihood loss` 损失。如果 `reduction` 为 `'none'` ,则输出Loss形状为 `[N, *]` 。 如果 `reduction` 为 `'sum'` 或者 `'mean'` ,则输出Loss形状为 `'[1]'` 。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + nll_loss = paddle.nn.layer.NLLLoss() + log_softmax = paddle.nn.LogSoftmax(axis=1) + + input_np = np.array([[0.88103855, 0.9908683 , 0.6226845 ], + [0.53331435, 0.07999352, 0.8549948 ], + [0.25879037, 0.39530203, 0.698465 ], + [0.73427284, 0.63575995, 0.18827209], + [0.05689114, 0.0862954 , 0.6325046 ]]).astype(np.float32) + label_np = np.array([0, 2, 1, 1, 0]).astype(np.int64) + + place = paddle.CPUPlace() + paddle.disable_static(place) + input = paddle.to_tensor(input_np) + log_out = log_softmax(input) + label = paddle.to_tensor(label_np) + result = nll_loss(log_out, label) + print(result.numpy()) # [1.0720209] diff --git a/doc/fluid/api_cn/nn_cn/loss_cn/SmoothL1Loss_cn.rst b/doc/fluid/api_cn/nn_cn/loss_cn/SmoothL1Loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8a5fee1e07cb45a055fc2eb65b45233d1ca6cbdd --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/loss_cn/SmoothL1Loss_cn.rst @@ -0,0 +1,52 @@ +SmoothL1Loss +------------------------------- + +.. py:class:: paddle.nn.loss.SmoothL1Loss(reduction='mean', delta=1.0, name=None) + +该OP计算输入input和标签label间的SmoothL1损失,如果逐个元素的绝对误差低于1,则创建使用平方项的条件 +,否则为L1损失。在某些情况下,它可以防止爆炸梯度, 也称为Huber损失,该损失函数的数学计算公式如下: + + .. math:: + loss(x,y)=\\frac{1}{n}\\sum_{i}z_i + +`z_i`的计算公式如下: + + .. math:: + + \\mathop{z_i}=\\left\\{\\begin{array}{rcl} + 0.5(x_i - y_i)^2 & & {if |x_i - y_i| < delta} \\\\ + delta * |x_i - y_i| - 0.5 * delta^2 & & {otherwise} + \\end{array} \\right. + +参数 +:::::::::: + - **reduction** (string, 可选): - 指定应用于输出结果的计算方式,数据类型为string,可选值有: `none`, `mean`, `sum` 。默认为 `mean` ,计算 `mini-batch` loss均值。设置为 `sum` 时,计算 `mini-batch` loss的总和。设置为 `none` 时,则返回loss Tensor。 + - **delta** (string, 可选): SmoothL1Loss损失的阈值参数,用于控制Huber损失对线性误差或平方误差的侧重。数据类型为float32。 默认值= 1.0。 + - **name** (string, 可选): - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +调用参数 +:::::::::: + - **input** (Tensor): 输入 `Tensor`, 数据类型为float32。其形状为 :math:`[N, C]` , 其中 `C` 为类别数。对于多维度的情形下,它的形状为 :math:`[N, C, d_1, d_2, ..., d_k]`,k >= 1。 + - **label** (Tensor): 输入input对应的标签值,数据类型为float32。数据类型和input相同。 + + + +返回:返回计算 `SmoothL1Loss` 后的损失值。 + +返回类型:Tensor + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + input = np.random.rand(3,3).astype("float32") + label = np.random.rand(3,3).astype("float32") + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) + loss = paddle.nn.SmoothL1Loss() + output = loss(input, label) + print(output.numpy()) diff --git a/doc/fluid/api_cn/nn_cn/lrn_cn.rst b/doc/fluid/api_cn/nn_cn/lrn_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..159805388517550fc0e41f53a55a2c21f1821232 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/lrn_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_lrn: + +lrn +------------------------------- +:doc_source: paddle.fluid.layers.lrn + + diff --git a/doc/fluid/api_cn/nn_cn/matrix_nms_cn.rst b/doc/fluid/api_cn/nn_cn/matrix_nms_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..44eb539c03b92839d202f3fed0ac8b37879dbb7f --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/matrix_nms_cn.rst @@ -0,0 +1,5 @@ +.. _cn_api_nn_cn_matrix_nms: + +matrix_nms +------------------------------- +:doc_source: paddle.fluid.layers.matrix_nms diff --git a/doc/fluid/api_cn/nn_cn/maxout_cn.rst b/doc/fluid/api_cn/nn_cn/maxout_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4105eaf3de95d29d36372677412b0b7e65d99315 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/maxout_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_maxout: + +maxout +------------------------------- +:doc_source: paddle.fluid.layers.maxout + + diff --git a/doc/fluid/api_cn/nn_cn/mse_loss_cn.rst b/doc/fluid/api_cn/nn_cn/mse_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..504fa424330b8aef355617a5f7aa0f5d10bc2897 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/mse_loss_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_mse_loss: + +mse_loss +------------------------------- +:doc_source: paddle.fluid.layers.mse_loss + + diff --git a/doc/fluid/api_cn/nn_cn/multiclass_nms_cn.rst b/doc/fluid/api_cn/nn_cn/multiclass_nms_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f4908cc23d9b6336bf48ff3144daaa214ba42a50 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/multiclass_nms_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_multiclass_nms: + +multiclass_nms +------------------------------- +:doc_source: paddle.fluid.layers.multiclass_nms + + diff --git a/doc/fluid/api_cn/nn_cn/natural_exp_decay_cn.rst b/doc/fluid/api_cn/nn_cn/natural_exp_decay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..977ff5bed4e1a1284f3ace60fb649d155e85620d --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/natural_exp_decay_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_natural_exp_decay: + +natural_exp_decay +------------------------------- +:doc_source: paddle.fluid.layers.natural_exp_decay + + diff --git a/doc/fluid/api_cn/nn_cn/noam_decay_cn.rst b/doc/fluid/api_cn/nn_cn/noam_decay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..28abc482544daef0ff0d93c777b52dcb1e9c7233 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/noam_decay_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_noam_decay: + +noam_decay +------------------------------- +:doc_source: paddle.fluid.layers.noam_decay + + diff --git a/doc/fluid/api_cn/nn_cn/npair_loss_cn.rst b/doc/fluid/api_cn/nn_cn/npair_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..067aaa5ef4bc71bea9a0a439a1c53de011307d25 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/npair_loss_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_npair_loss: + +npair_loss +------------------------------- +:doc_source: paddle.fluid.layers.npair_loss + + diff --git a/doc/fluid/api_cn/nn_cn/one_hot_cn.rst b/doc/fluid/api_cn/nn_cn/one_hot_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1b014f78f253639e97fe56822d6d78f0423959c5 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/one_hot_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_one_hot: + +one_hot +------------------------------- +:doc_source: paddle.fluid.one_hot + + diff --git a/doc/fluid/api_cn/nn_cn/pad2d_cn.rst b/doc/fluid/api_cn/nn_cn/pad2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f38cc50e520f71bef67c0ce50ef3c49da054d3fe --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/pad2d_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_pad2d: + +pad2d +------------------------------- +:doc_source: paddle.fluid.layers.pad2d + + diff --git a/doc/fluid/api_cn/nn_cn/pad_cn.rst b/doc/fluid/api_cn/nn_cn/pad_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d4d7b998b550be9cf85b9c02ab2e05b6414a68fe --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/pad_cn.rst @@ -0,0 +1,97 @@ +.. _cn_api_nn_cn_pad: + +pad +------------------------------- + +.. py:function:: paddle.nn.functional.pad(x, pad, mode="constant", value=0.0, data_format="NCHW", name=None) + +该OP依照 pad 和 mode 属性对input进行 ``pad`` 。 + +参数: + - **x** (Tensor) - Tensor,format可以为 `'NCL'`, `'NLC'`, `'NCHW'`, `'NHWC'`, `'NCDHW'` + 或 `'NDHWC'`,默认值为`'NCHW'`,数据类型支持float16, float32, float64, int32, int64。 + - **pad** (Tensor | List[int32]) - 填充大小。当输入维度为3时,pad的格式为[pad_left, pad_right]; + 当输入维度为4时,pad的格式为[pad_left, pad_right, pad_top, pad_bottom]; + 当输入维度为5时,pad的格式为[pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back]。 + - **mode** (str) - padding的四种模式,分别为 `'constant'`, `'reflect'`, `'replicate'` 和`'circular'`。 + `'constant'` 表示填充常数 `value`;`'reflect'` 表示填充以input边界值为轴的映射;`'replicate'` 表示 + 填充input边界值;`'circular'`为循环填充input。具体结果可见以下示例。默认值为 `'constant'` 。 + - **value** (float32) - 以 `'constant'` 模式填充区域时填充的值。默认值为0.0。 + - **data_format** (str) - 指定input的format,可为 `'NCL'`, `'NLC'`, `'NCHW'`, `'NHWC'`, `'NCDHW'` + 或 `'NDHWC'`,默认值为`'NCHW'`。 + - **name** (str, 可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,缺省值为None。 +返回: 对input进行``pad`` 的结果,数据类型和input相同。 + +返回类型:Tensor + +**示例**: + +.. code-block:: text + + x = [[[[[1., 2., 3.], + [4., 5., 6.]]]]] + + Case 0: + pad = [2, 2, 1, 1, 0, 0], + mode = 'constant' + pad_value = 0 + Out = [[[[[0. 0. 0. 0. 0. 0. 0.] + [0. 0. 1. 2. 3. 0. 0.] + [0. 0. 4. 5. 6. 0. 0.] + [0. 0. 0. 0. 0. 0. 0.]]]]] + + Case 1: + pad = [2, 2, 1, 1, 0, 0], + mode = 'reflect' + Out = [[[[[6. 5. 4. 5. 6. 5. 4.] + [3. 2. 1. 2. 3. 2. 1.] + [6. 5. 4. 5. 6. 5. 4.] + [3. 2. 1. 2. 3. 2. 1.]]]]] + + Case 2: + pad = [2, 2, 1, 1, 0, 0], + mode = 'replicate' + Out = [[[[[1. 1. 1. 2. 3. 3. 3.] + [1. 1. 1. 2. 3. 3. 3.] + [4. 4. 4. 5. 6. 6. 6.] + [4. 4. 4. 5. 6. 6. 6.]]]]] + + Case 3: + pad = [2, 2, 1, 1, 0, 0], + mode = 'circular' + Out = [[[[[5. 6. 4. 5. 6. 4. 5.] + [2. 3. 1. 2. 3. 1. 2.] + [5. 6. 4. 5. 6. 4. 5.] + [2. 3. 1. 2. 3. 1. 2.]]]]] + +**代码示例:** + +.. code-block:: python + + import numpy as np + import paddle + import paddle.nn.functional as F + + paddle.disable_static() + + # example 1 + x_shape = (1, 1, 3) + x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape) + 1 + tensor_x = paddle.to_tensor(x) + y = F.pad(tensor_x, [2, 3], value=1, mode='constant') + print(y.numpy()) + # [[[1. 1. 1. 2. 3. 1. 1. 1.]]] + + # example 2 + x_shape = (1, 1, 2, 3) + x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape) + 1 + tensor_x = paddle.to_tensor(x) + y = F.pad(tensor_x, [1, 2, 1, 1], value=1, mode='circular') + print(y.numpy()) + # [[[[6. 4. 5. 6. 4. 5.] + # [3. 1. 2. 3. 1. 2.] + # [6. 4. 5. 6. 4. 5.] + # [3. 1. 2. 3. 1. 2.]]]] + + + diff --git a/doc/fluid/api_cn/nn_cn/pad_constant_like_cn.rst b/doc/fluid/api_cn/nn_cn/pad_constant_like_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f313f9abbe8ba8c71f123182b5c0e3310f615f38 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/pad_constant_like_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_pad_constant_like: + +pad_constant_like +------------------------------- +:doc_source: paddle.fluid.layers.pad_constant_like + + diff --git a/doc/fluid/api_cn/nn_cn/piecewise_decay_cn.rst b/doc/fluid/api_cn/nn_cn/piecewise_decay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..83776f427873ba63a5c1ff55fd3656256790f388 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/piecewise_decay_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_piecewise_decay: + +piecewise_decay +------------------------------- +:doc_source: paddle.fluid.layers.piecewise_decay + + diff --git a/doc/fluid/api_cn/nn_cn/pixel_shuffle_cn.rst b/doc/fluid/api_cn/nn_cn/pixel_shuffle_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1987ae08b1f81ebbe0c1c2b0827a9a9b9e5db4c1 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/pixel_shuffle_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_pixel_shuffle: + +pixel_shuffle +------------------------------- +:doc_source: paddle.fluid.layers.pixel_shuffle + + diff --git a/doc/fluid/api_cn/nn_cn/polygon_box_transform_cn.rst b/doc/fluid/api_cn/nn_cn/polygon_box_transform_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..10048525e6fadeeb643160271a5d1ad7628b06ea --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/polygon_box_transform_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_polygon_box_transform: + +polygon_box_transform +------------------------------- +:doc_source: paddle.fluid.layers.polygon_box_transform + + diff --git a/doc/fluid/api_cn/nn_cn/polynomial_decay_cn.rst b/doc/fluid/api_cn/nn_cn/polynomial_decay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..af59b6a4049186f232df7fd109cc71f9085bf796 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/polynomial_decay_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_polynomial_decay: + +polynomial_decay +------------------------------- +:doc_source: paddle.fluid.layers.polynomial_decay + + diff --git a/doc/fluid/api_cn/nn_cn/pool3d_cn.rst b/doc/fluid/api_cn/nn_cn/pool3d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..811d442825c122486287db32604969c9a5bbba53 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/pool3d_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_pool3d: + +pool3d +------------------------------- +:doc_source: paddle.fluid.layers.pool3d + + diff --git a/doc/fluid/api_cn/nn_cn/prelu_cn.rst b/doc/fluid/api_cn/nn_cn/prelu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..caa1681a91964f84679793a7a795a812bc839a2a --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/prelu_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_nn_cn_prelu: + +prelu +------------------------------- +.. py:function:: paddle.nn.functional.prelu(x, weight, name=None): + +prelu激活层(PRelu Activation Operator)。计算公式如下: + +.. math:: + + prelu(x) = max(0, x) + weight * min(0, x) + +其中,:math:`x` 和 `weight` 为输入的 Tensor + +参数 +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - weight (Tensor) - 可训练参数,数据类型同``x`` 一致,形状支持2种:[1] 或者 [in],其中`in`为输入的通道数。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + data = np.array([[[[-2.0, 3.0, -4.0, 5.0], + [ 3.0, -4.0, 5.0, -6.0], + [-7.0, -8.0, 8.0, 9.0]], + [[ 1.0, -2.0, -3.0, 4.0], + [-5.0, 6.0, 7.0, -8.0], + [ 6.0, 7.0, 8.0, 9.0]]]], 'float32') + x = paddle.to_tensor(data) + w = paddle.to_tensor(np.array([0.25]).astype('float32')) + out = F.prelu(x, w) + # [[[[-0.5 , 3. , -1. , 5. ], + # [ 3. , -1. , 5. , -1.5 ], + # [-1.75, -2. , 8. , 9. ]], + # [[ 1. , -0.5 , -0.75, 4. ], + # [-1.25, 6. , 7. , -2. ], + # [ 6. , 7. , 8. , 9. ]]]] \ No newline at end of file diff --git a/doc/fluid/api_cn/nn_cn/prior_box_cn.rst b/doc/fluid/api_cn/nn_cn/prior_box_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1aa197c3fa3d9fc94f0da17bc65c60278c65e8a7 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/prior_box_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_prior_box: + +prior_box +------------------------------- +:doc_source: paddle.fluid.layers.prior_box + + diff --git a/doc/fluid/api_cn/nn_cn/prroi_pool_cn.rst b/doc/fluid/api_cn/nn_cn/prroi_pool_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..734231b3146ac8a0dccc120415aeb327d3556ce3 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/prroi_pool_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_prroi_pool: + +prroi_pool +------------------------------- +:doc_source: paddle.fluid.layers.prroi_pool + + diff --git a/doc/fluid/api_cn/nn_cn/psroi_pool_cn.rst b/doc/fluid/api_cn/nn_cn/psroi_pool_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1984083ffb06b24472d0afedc1edbdacf95ff2c1 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/psroi_pool_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_psroi_pool: + +psroi_pool +------------------------------- +:doc_source: paddle.fluid.layers.psroi_pool + + diff --git a/doc/fluid/api_cn/nn_cn/random_crop_cn.rst b/doc/fluid/api_cn/nn_cn/random_crop_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f6f8864221c2b1800e987ab41666dc14ea484991 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/random_crop_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_random_crop: + +random_crop +------------------------------- +:doc_source: paddle.fluid.layers.random_crop + + diff --git a/doc/fluid/api_cn/nn_cn/rank_loss_cn.rst b/doc/fluid/api_cn/nn_cn/rank_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b25b23637ffc6eaf1e951ccb89c27c6b46e24eb7 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/rank_loss_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_rank_loss: + +rank_loss +------------------------------- +:doc_source: paddle.fluid.layers.rank_loss + + diff --git a/doc/fluid/api_cn/nn_cn/relu6_cn.rst b/doc/fluid/api_cn/nn_cn/relu6_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b4c98f8fce5e63674f35bda7951018e9eb2d1fa7 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/relu6_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_nn_cn_relu6: + +relu6 +------------------------------- + +.. py:function:: paddle.nn.functional.relu6(x, name=None) + +relu6激活层 + +.. math:: + + relu6(x) = min(max(0,x), 6) + +其中,:math:`x` 为输入的 Tensor + +参数: +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-1, 0.3, 6.5])) + out = F.relu6(x) # [0, 0.3, 6] diff --git a/doc/fluid/api_cn/nn_cn/relu_cn.rst b/doc/fluid/api_cn/nn_cn/relu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..447d0bb01514d42aa74f6265d52cd8ed42c40880 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/relu_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_nn_cn_relu: + +relu +------------------------------- + +.. py:function:: paddle.nn.functional.relu(x, name=None) + +relu激活层(Rectified Linear Unit)。计算公式如下: + +.. math:: + + relu(x) = max(0, x) + +其中,:math:`x` 为输入的 Tensor + + +参数 +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-2, 0, 1]).astype('float32')) + out = F.relu(x) # [0., 0., 1.] diff --git a/doc/fluid/api_cn/nn_cn/remove_weight_norm_cn.rst b/doc/fluid/api_cn/nn_cn/remove_weight_norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3c14b5b625d9cdff1024ae9fcafb0a5057cdd6a0 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/remove_weight_norm_cn.rst @@ -0,0 +1,29 @@ +.. _cn_api_nn_cn_remove_weight_norm: + +remove_weight_norm +------------------------------- + +.. py:function:: paddle.nn.utils.remove_weight_norm(layer, name='weight') + +移除传入 ``layer`` 中的权重归一化。 + +参数: + - **layer** (paddle.nn.Layer) - 要添加权重归一化的层。 + - **name** (str, 可选) - 权重参数的名字。默认:'weight'. + +返回: + ``Layer`` , 移除权重归一化hook之后的层 + +**代码示例** + +.. code-block:: python + + import paddle + from paddle.nn import Conv2d + from paddle.nn.utils import weight_norm, remove_weight_norm + paddle.disable_static() + conv = Conv2d(3, 5, 3) + wn = weight_norm(conv) + remove_weight_norm(conv) + # print(conv.weight_g) + # AttributeError: 'Conv2D' object has no attribute 'weight_g' diff --git a/doc/fluid/api_cn/nn_cn/resize_bilinear_cn.rst b/doc/fluid/api_cn/nn_cn/resize_bilinear_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..df8ffc7437119cf45e3861b979eb6b4569421502 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/resize_bilinear_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_resize_bilinear: + +resize_bilinear +------------------------------- +:doc_source: paddle.fluid.layers.resize_bilinear + + diff --git a/doc/fluid/api_cn/nn_cn/resize_nearest_cn.rst b/doc/fluid/api_cn/nn_cn/resize_nearest_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7e0bf96341cbb8a69d886e9de6d2cda77428ea20 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/resize_nearest_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_resize_nearest: + +resize_nearest +------------------------------- +:doc_source: paddle.fluid.layers.resize_nearest + + diff --git a/doc/fluid/api_cn/nn_cn/resize_trilinear_cn.rst b/doc/fluid/api_cn/nn_cn/resize_trilinear_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8bee153d126d4790c9c3ac8548fe2504cdeb300c --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/resize_trilinear_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_resize_trilinear: + +resize_trilinear +------------------------------- +:doc_source: paddle.fluid.layers.resize_trilinear + + diff --git a/doc/fluid/api_cn/nn_cn/retinanet_detection_output_cn.rst b/doc/fluid/api_cn/nn_cn/retinanet_detection_output_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d87740dfa1c6860826031fe47cee104f126fbe21 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/retinanet_detection_output_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_retinanet_detection_output: + +retinanet_detection_output +------------------------------- +:doc_source: paddle.fluid.layers.retinanet_detection_output + + diff --git a/doc/fluid/api_cn/nn_cn/retinanet_target_assign_cn.rst b/doc/fluid/api_cn/nn_cn/retinanet_target_assign_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..111957f4200fd6474d5cdeae86f58d58b18041cd --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/retinanet_target_assign_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_retinanet_target_assign: + +retinanet_target_assign +------------------------------- +:doc_source: paddle.fluid.layers.retinanet_target_assign + + diff --git a/doc/fluid/api_cn/nn_cn/roi_align_cn.rst b/doc/fluid/api_cn/nn_cn/roi_align_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1a1e1ae2c40f1b173f7c1ee23d47e7627c8a2ce7 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/roi_align_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_roi_align: + +roi_align +------------------------------- +:doc_source: paddle.fluid.layers.roi_align + + diff --git a/doc/fluid/api_cn/nn_cn/roi_perspective_transform_cn.rst b/doc/fluid/api_cn/nn_cn/roi_perspective_transform_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5a760666721bde44c33411d6ef0a6e07624eed4c --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/roi_perspective_transform_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_roi_perspective_transform: + +roi_perspective_transform +------------------------------- +:doc_source: paddle.fluid.layers.roi_perspective_transform + + diff --git a/doc/fluid/api_cn/nn_cn/roi_pool_cn.rst b/doc/fluid/api_cn/nn_cn/roi_pool_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c14cf783069d3149387c1630e17930b8aac712ab --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/roi_pool_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_roi_pool: + +roi_pool +------------------------------- +:doc_source: paddle.fluid.layers.roi_pool + + diff --git a/doc/fluid/api_cn/nn_cn/row_conv_cn.rst b/doc/fluid/api_cn/nn_cn/row_conv_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..163e1d19cf71221c9d7dcd2e54f89b9cf2446137 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/row_conv_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_row_conv: + +row_conv +------------------------------- +:doc_source: paddle.fluid.layers.row_conv + + diff --git a/doc/fluid/api_cn/nn_cn/rpn_target_assign_cn.rst b/doc/fluid/api_cn/nn_cn/rpn_target_assign_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c2513ed65923dd453d9bf8b7bc8f11f63e5d0dc2 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/rpn_target_assign_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_rpn_target_assign: + +rpn_target_assign +------------------------------- +:doc_source: paddle.fluid.layers.rpn_target_assign + + diff --git a/doc/fluid/api_cn/nn_cn/sampled_softmax_with_cross_entropy_cn.rst b/doc/fluid/api_cn/nn_cn/sampled_softmax_with_cross_entropy_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..19e5a6abe90ac24cdc744ac11b7503d3c2dab699 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/sampled_softmax_with_cross_entropy_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_sampled_softmax_with_cross_entropy: + +sampled_softmax_with_cross_entropy +------------------------------- +:doc_source: paddle.fluid.layers.sampled_softmax_with_cross_entropy + + diff --git a/doc/fluid/api_cn/nn_cn/selu_cn.rst b/doc/fluid/api_cn/nn_cn/selu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a52d54bad21e955dbbd74dcd853d54aae32e1a56 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/selu_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_nn_cn_selu: + +selu +------------------------------- + +.. py:function:: paddle.nn.functional.selu(x, scale=1.0507009873554804934193349852946, alpha=1.6732632423543772848170429916717, name=None) + +selu激活层 + +.. math:: + + selu(x)= scale * + \begin{cases} + x, \text{if } x > 0 \\ + alpha * e^{x} - alpha, \text{if } x <= 0 + \end{cases} + +其中,:math:`x` 为输入的 Tensor + +参数: +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - scale (float, 可选) - selu激活计算公式中的scale值,必须大于1.0。默认值为1.0507009873554804934193349852946。 + - alpha (float, 可选) - selu激活计算公式中的alpha值,必须大于等于零。默认值为1.6732632423543772848170429916717。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([[0.0, 1.0],[2.0, 3.0]])) + out = F.selu(x) # [[0, 1.050701],[2.101402, 3.152103]] diff --git a/doc/fluid/api_cn/nn_cn/shuffle_channel_cn.rst b/doc/fluid/api_cn/nn_cn/shuffle_channel_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..287657413a2cbcd7e5a44ec1500c98d239d5e4b2 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/shuffle_channel_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_shuffle_channel: + +shuffle_channel +------------------------------- +:doc_source: paddle.fluid.layers.shuffle_channel + + diff --git a/doc/fluid/api_cn/nn_cn/sigmoid_cross_entropy_with_logits_cn.rst b/doc/fluid/api_cn/nn_cn/sigmoid_cross_entropy_with_logits_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0f98b42aef9faf9039444f12abbac01e1b95298c --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/sigmoid_cross_entropy_with_logits_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_sigmoid_cross_entropy_with_logits: + +sigmoid_cross_entropy_with_logits +------------------------------- +:doc_source: paddle.fluid.layers.sigmoid_cross_entropy_with_logits + + diff --git a/doc/fluid/api_cn/nn_cn/sigmoid_focal_loss_cn.rst b/doc/fluid/api_cn/nn_cn/sigmoid_focal_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..874993be90f4848002f08ff9d97e218c3e28b067 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/sigmoid_focal_loss_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_sigmoid_focal_loss: + +sigmoid_focal_loss +------------------------------- +:doc_source: paddle.fluid.layers.sigmoid_focal_loss + + diff --git a/doc/fluid/api_cn/nn_cn/similarity_focus_cn.rst b/doc/fluid/api_cn/nn_cn/similarity_focus_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1f0b2f2eb30a05bf3250cd6112d3b60ab78dd156 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/similarity_focus_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_similarity_focus: + +similarity_focus +------------------------------- +:doc_source: paddle.fluid.layers.similarity_focus + + diff --git a/doc/fluid/api_cn/nn_cn/smooth_l1_cn.rst b/doc/fluid/api_cn/nn_cn/smooth_l1_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0d4431b8cfc1b4f5039c6c9eff67066d83666e24 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/smooth_l1_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_smooth_l1: + +smooth_l1 +------------------------------- +:doc_source: paddle.fluid.layers.smooth_l1 + + diff --git a/doc/fluid/api_cn/nn_cn/smooth_l1_loss_cn.rst b/doc/fluid/api_cn/nn_cn/smooth_l1_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..50e419b69b7e9d55ff26b5668b3de02e7b098128 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/smooth_l1_loss_cn.rst @@ -0,0 +1,48 @@ +smooth_l1_loss +------------------------------- + +.. py:function:: paddle.nn.functional.smooth_l1_loss(input, label, reduction='mean', delta=1.0, name=None) + +该OP计算输入input和标签label间的SmoothL1损失,如果逐个元素的绝对误差低于1,则创建使用平方项的条件 +,否则为L1损失。在某些情况下,它可以防止爆炸梯度, 也称为Huber损失,该损失函数的数学计算公式如下: + + .. math:: + loss(x,y)=\\frac{1}{n}\\sum_{i}z_i + +`z_i`的计算公式如下: + + .. math:: + + \\mathop{z_i}=\\left\\{\\begin{array}{rcl} + 0.5(x_i - y_i)^2 & & {if |x_i - y_i| < delta} \\\\ + delta * |x_i - y_i| - 0.5 * delta^2 & & {otherwise} + \\end{array} \\right. + +参数 +:::::::::: + - **input** (Tensor): 输入 `Tensor`, 数据类型为float32。其形状为 :math:`[N, C]` , 其中 `C` 为类别数。对于多维度的情形下,它的形状为 :math:`[N, C, d_1, d_2, ..., d_k]`,k >= 1。 + - **label** (Tensor): 输入input对应的标签值,数据类型为float32。数据类型和input相同。 + - **reduction** (string, 可选): - 指定应用于输出结果的计算方式,数据类型为string,可选值有: `none`, `mean`, `sum` 。默认为 `mean` ,计算`mini-batch` loss均值。设置为 `sum` 时,计算 `mini-batch` loss的总和。设置为 `none` 时,则返回loss Tensor。 + - **delta** (string, 可选): SmoothL1Loss损失的阈值参数,用于控制Huber损失对线性误差或平方误差的侧重。数据类型为float32。 默认值= 1.0。 + - **name** (string,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + + + +返回:返回计算 `smooth_l1_loss` 后的损失值。 + +返回类型:Tensor + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + input = np.random.rand(3,3).astype("float32") + label = np.random.rand(3,3).astype("float32") + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) + output = paddle.nn.functioanl.smooth_l1_loss(input,label) + print(output.numpy()) diff --git a/doc/fluid/api_cn/nn_cn/soft_relu_cn.rst b/doc/fluid/api_cn/nn_cn/soft_relu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..31b24cc252c0d32c00da833ba839e2457cca8269 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/soft_relu_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_soft_relu: + +soft_relu +------------------------------- +:doc_source: paddle.fluid.layers.soft_relu + + diff --git a/doc/fluid/api_cn/nn_cn/softmax_cn.rst b/doc/fluid/api_cn/nn_cn/softmax_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..74b1605f2b433c57bfae76b4629341450d451677 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/softmax_cn.rst @@ -0,0 +1,123 @@ +.. _cn_api_nn_cn_softmax: + +softmax +------------------------------- +.. py:function:: paddle.nn.functional.softmax(x, axis=-1, dtype=None, name=None) + + +该OP实现了softmax层。OP的计算过程如下: + +步骤1:输入 ``x`` 的 ``axis`` 维会被置换到最后一维; + +步骤2:将输入 ``x`` 在逻辑上变换为二维矩阵。二维矩阵第一维(列长度)是输入除最后一维之外的其他维度值的乘积,第二维(行长度)和输入 ``axis`` 维的长度相同;对于矩阵的每一行,softmax操作对其进行重新缩放,使得该行的每个元素在 \[0,1\] 范围内,并且总和为1; + +步骤3:softmax操作执行完成后,执行步骤1和步骤2的逆运算,将二维矩阵恢复至和输入 ``x`` 相同的维度。 + +上述步骤2中softmax操作计算过程如下: + + - 对于二维矩阵的每一行,计算K维向量(K是输入第 ``axis`` 维的长度)中指定位置的指数值和全部位置指数值的和。 + + - 指定位置指数值与全部位置指数值之和的比值就是softmax操作的输出。 + +对于二维矩阵中的第i行和第j列有: + +.. math:: + + softmax[i, j] = \frac{\exp(x[i, j])}{\sum_j(exp(x[i, j])} + +- 示例1(矩阵一共有三维。axis = -1,表示沿着最后一维(即第三维)做softmax操作) + +.. code-block:: text + + # input + + x.shape = [2, 3, 4] + + x.data = [[[2.0, 3.0, 4.0, 5.0], + [3.0, 4.0, 5.0, 6.0], + [7.0, 8.0, 8.0, 9.0]], + [[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [6.0, 7.0, 8.0, 9.0]]] + + axis = -1 + + # output + + out.shape = [2, 3, 4] + + out.data = [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.07232949, 0.19661193, 0.19661193, 0.53444665]], + [[0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]] + +- 示例2(矩阵一共有三维。axis = 1,表示沿着第二维做softmax操作) + +.. code-block:: text + + # input + + x.shape = [2, 3, 4] + + x.data = [[[2.0, 3.0, 4.0, 5.0], + [3.0, 4.0, 5.0, 6.0], + [7.0, 8.0, 8.0, 9.0]], + [[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [6.0, 7.0, 8.0, 9.0]]] + + axis = 1 + + # output + + out.shape = [2, 3, 4] + + out.data = [[[0.00657326, 0.00657326, 0.01714783, 0.01714783], + [0.01786798, 0.01786798, 0.04661262, 0.04661262], + [0.97555875, 0.97555875, 0.93623955, 0.93623955]], + [[0.00490169, 0.00490169, 0.00490169, 0.00490169], + [0.26762315, 0.26762315, 0.26762315, 0.26762315], + [0.72747516, 0.72747516, 0.72747516, 0.72747516]]] + + +参数 +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - axis (int, 可选) - 指定对输入 ``x`` 进行运算的轴。``axis`` 的有效范围是[-D, D),D是输入 ``x`` 的维度, ``axis`` 为负值时与 :math:`axis + D` 等价。默认值为-1。 + - dtype (str|np.dtype|core.VarDesc.VarType, 可选) - 输入Tensor的数据类型。如果指定了 ``dtype`` ,则输入Tensor的数据类型会在计算前转换到 ``dtype`` 。``dtype``可以用来避免数据溢出。如果 ``dtype`` 为None,则输出Tensor的数据类型和 ``x`` 相同。默认值为None。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,形状和 ``x`` 相同,数据类型为 ``dtype`` 或者和 ``x`` 相同。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = np.array([[[2.0, 3.0, 4.0, 5.0], + [3.0, 4.0, 5.0, 6.0], + [7.0, 8.0, 8.0, 9.0]], + [[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [6.0, 7.0, 8.0, 9.0]]], 'float32') + x = paddle.to_tensor(x) + out1 = F.softmax(x) + out2 = F.softmax(x, dtype='float64') + # out1's data type is float32; out2's data type is float64 + # out1 and out2's value is as follows: + # [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426], + # [0.0320586 , 0.08714432, 0.23688282, 0.64391426], + # [0.07232949, 0.19661193, 0.19661193, 0.53444665]], + # [[0.0320586 , 0.08714432, 0.23688282, 0.64391426], + # [0.0320586 , 0.08714432, 0.23688282, 0.64391426], + # [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]] diff --git a/doc/fluid/api_cn/nn_cn/softmax_with_cross_entropy_cn.rst b/doc/fluid/api_cn/nn_cn/softmax_with_cross_entropy_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f8b85908c16f4651c0ea9019d08ed695c1b7aec0 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/softmax_with_cross_entropy_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_softmax_with_cross_entropy: + +softmax_with_cross_entropy +------------------------------- +:doc_source: paddle.fluid.layers.softmax_with_cross_entropy + + diff --git a/doc/fluid/api_cn/nn_cn/softplus_cn.rst b/doc/fluid/api_cn/nn_cn/softplus_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..dabf852e2ade608c739fa9e0900935132c7b7b2d --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/softplus_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_nn_cn_softplus: + +softplus +------------------------------- + +.. py:function:: paddle.nn.functional.softplus(x, beta=1, threshold=20, name=None) + +softplus激活层 + +.. math:: + + softplus(x) = \frac{1}{beta} * \log(1 + e^{beta * x}) \\ + \text{为了保证数值稳定性, 当}\,beta * x > threshold\,\text{时,函数转变为线性函数x}. + +其中,:math:`x` 为输入的 Tensor + +参数: +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - beta (float, 可选) - Softplus激活计算公式中的beta值。默认值为1。 + - threshold (float, 可选) - Softplus激活计算公式中的threshold值。默认值为20。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) + out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355] diff --git a/doc/fluid/api_cn/nn_cn/softshrink_cn.rst b/doc/fluid/api_cn/nn_cn/softshrink_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d630a80d553919f7814247ac5b6049b1f3146f84 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/softshrink_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_nn_cn_softshrink: + +softshrink +------------------------------- + +.. py:function:: paddle.nn.functional.softshrink(x, threshold=0.5, name=None) + +softshrink激活层 + +.. math:: + + softshrink(x)= \begin{cases} + x - threshold, \text{if } x > threshold \\ + x + threshold, \text{if } x < -threshold \\ + 0, \text{otherwise} + \end{cases} + +其中,:math:`x` 为输入的 Tensor + +参数: +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - threshold (float, 可选) - softshrink激活计算公式中的threshold值,必须大于等于零。默认值为0.5。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8])) + out = F.softshrink(x) # [-0.4, 0, 0, 0.3] diff --git a/doc/fluid/api_cn/nn_cn/softsign_cn.rst b/doc/fluid/api_cn/nn_cn/softsign_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..24c34bdbd2e89e28cc09e3ce421763afbafda529 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/softsign_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_nn_cn_softsign: + +softsign +------------------------------- + +.. py:function:: paddle.nn.functional.softsign(x, name=None) + +softsign激活层 + +.. math:: + + softsign(x) = \frac{x}{1 + |x|} + +其中,:math:`x` 为输入的 Tensor + +参数: +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) + out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769] diff --git a/doc/fluid/api_cn/nn_cn/space_to_depth_cn.rst b/doc/fluid/api_cn/nn_cn/space_to_depth_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1e594f6f215b0da47363e53bd023679caf4caca6 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/space_to_depth_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_space_to_depth: + +space_to_depth +------------------------------- +:doc_source: paddle.fluid.layers.space_to_depth + + diff --git a/doc/fluid/api_cn/nn_cn/square_error_cost_cn.rst b/doc/fluid/api_cn/nn_cn/square_error_cost_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e59e57331517ecb51a4ae43739d9ffcd354a08b1 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/square_error_cost_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_square_error_cost: + +square_error_cost +------------------------------- +:doc_source: paddle.fluid.layers.square_error_cost + + diff --git a/doc/fluid/api_cn/nn_cn/ssd_loss_cn.rst b/doc/fluid/api_cn/nn_cn/ssd_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b9e194baacbf4811646ed1927cc0799ca2c0b23e --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/ssd_loss_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_ssd_loss: + +ssd_loss +------------------------------- +:doc_source: paddle.fluid.layers.ssd_loss + + diff --git a/doc/fluid/api_cn/nn_cn/swish_cn.rst b/doc/fluid/api_cn/nn_cn/swish_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fc4e2116c68cf7a515e1b59db31a857068b3e555 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/swish_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_swish: + +swish +------------------------------- +:doc_source: paddle.fluid.layers.swish + + diff --git a/doc/fluid/api_cn/nn_cn/switch_case_cn.rst b/doc/fluid/api_cn/nn_cn/switch_case_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..84d160847bfb6039a48b2fa85da129ae113f0aad --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/switch_case_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_switch_case: + +switch_case +------------------------------- +:doc_source: paddle.fluid.layers.switch_case + + diff --git a/doc/fluid/api_cn/nn_cn/tanhshrink_cn.rst b/doc/fluid/api_cn/nn_cn/tanhshrink_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2e49b2b5156ab0d27298fbeefdc0802061cb09ca --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/tanhshrink_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_nn_cn_tanhshrink: + +tanhshrink +------------------------------- + +.. py:function:: paddle.nn.functional.tanhshrink(x, name=None) + +tanhshrink激活层 + +.. math:: + + tanhshrink(x) = x - tanh(x) + +其中,:math:`x` 为输入的 Tensor + +参数: +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) + out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739] diff --git a/doc/fluid/api_cn/nn_cn/target_assign_cn.rst b/doc/fluid/api_cn/nn_cn/target_assign_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..754b01b0d3b0d13cb6d0eb2daafa3af105065a7f --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/target_assign_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_target_assign: + +target_assign +------------------------------- +:doc_source: paddle.fluid.layers.target_assign + + diff --git a/doc/fluid/api_cn/nn_cn/teacher_student_sigmoid_loss_cn.rst b/doc/fluid/api_cn/nn_cn/teacher_student_sigmoid_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d33b958d5b7d7af7bcac2d0494b305318ff672ee --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/teacher_student_sigmoid_loss_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_teacher_student_sigmoid_loss: + +teacher_student_sigmoid_loss +------------------------------- +:doc_source: paddle.fluid.layers.teacher_student_sigmoid_loss + + diff --git a/doc/fluid/api_cn/nn_cn/temporal_shift_cn.rst b/doc/fluid/api_cn/nn_cn/temporal_shift_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..52e24ff3f3dc13d2e1ab7c8345b8ad34c0c0cb0c --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/temporal_shift_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_temporal_shift: + +temporal_shift +------------------------------- +:doc_source: paddle.fluid.layers.temporal_shift + + diff --git a/doc/fluid/api_cn/nn_cn/thresholded_relu_cn.rst b/doc/fluid/api_cn/nn_cn/thresholded_relu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1a981064484c3f63b6a1253c229e9b4676ea0263 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/thresholded_relu_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_thresholded_relu: + +thresholded_relu +------------------------------- +:doc_source: paddle.fluid.layers.thresholded_relu + + diff --git a/doc/fluid/api_cn/nn_cn/unfold_cn.rst b/doc/fluid/api_cn/nn_cn/unfold_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2ccd367b03b7e4a5ff48a8070c5f02aef1b63852 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/unfold_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_unfold: + +unfold +------------------------------- +:doc_source: paddle.fluid.layers.unfold + + diff --git a/doc/fluid/api_cn/nn_cn/warpctc_cn.rst b/doc/fluid/api_cn/nn_cn/warpctc_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a8b0f21ce29ab956237cdc851db1a1274b24222b --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/warpctc_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_warpctc: + +warpctc +------------------------------- +:doc_source: paddle.fluid.layers.warpctc + + diff --git a/doc/fluid/api_cn/nn_cn/weight_norm_cn.rst b/doc/fluid/api_cn/nn_cn/weight_norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c427c956be98b56fcffb88fbf5b1fcb12da9dc65 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/weight_norm_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_nn_cn_weight_norm: + +weight_norm +------------------------------- + +.. py:function:: paddle.nn.utils.weight_norm(layer, name='weight', dim=0) + +该接口根据以下公式对传入的 ``layer`` 中的权重参数进行归一化: + +.. math:: + \mathbf{w} = g \dfrac{v}{\|v\|} + +权重归一化可以将神经网络中权重向量的长度与其方向解耦,权重归一化可以用两个变量(例如: 代表长度的变量 `weight_g` 和代表方向的变量 `weight_v`)来代替由名字(例如: `weight`)指定的变量。详细可以参考论文: `A Simple Reparameterization to Accelerate Training of Deep Neural Networks `_ + +参数: + - **layer** (paddle.nn.Layer) - 要添加权重归一化的层。 + - **name** (str, 可选) - 权重参数的名字。默认:'weight'. + - **dim** (int|None, 可选) - 进行归一化操作的切片所在维度,是小于权重Tensor rank的非负数。比如卷积的权重shape是 [cout,cin,kh,kw] , rank是4,则dim可以选0,1,2,3;fc的权重shape是 [cout,cin] ,rank是2,dim可以选0,1。 如果为None就对所有维度上的元素做归一化。默认:0。 + +返回: + ``Layer`` , 添加了权重归一化hook的层 + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + from paddle.nn import Conv2d + from paddle.nn.utils import weight_norm + x = np.array([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32') + paddle.disable_static() + conv = Conv2d(3, 5, 3) + wn = weight_norm(conv) + print(conv.weight_g.shape) + # [5] + print(conv.weight_v.shape) + # [5, 3, 3, 3] diff --git a/doc/fluid/api_cn/nn_cn/while_loop_cn.rst b/doc/fluid/api_cn/nn_cn/while_loop_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..909755fd39385d98b1c4f1410ca01e800162b75c --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/while_loop_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_while_loop: + +while_loop +------------------------------- +:doc_source: paddle.fluid.layers.while_loop + + diff --git a/doc/fluid/api_cn/nn_cn/yolo_box_cn.rst b/doc/fluid/api_cn/nn_cn/yolo_box_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7f1e99c33ee0d8b4e50cb209e592cf85c7ff9f00 --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/yolo_box_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_yolo_box: + +yolo_box +------------------------------- +:doc_source: paddle.fluid.layers.yolo_box + + diff --git a/doc/fluid/api_cn/nn_cn/yolov3_loss_cn.rst b/doc/fluid/api_cn/nn_cn/yolov3_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e5625a31052fce3a9169c2b8e1b70780a5c8df5f --- /dev/null +++ b/doc/fluid/api_cn/nn_cn/yolov3_loss_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_nn_cn_yolov3_loss: + +yolov3_loss +------------------------------- +:doc_source: paddle.fluid.layers.yolov3_loss + + diff --git a/doc/fluid/api_cn/optimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn.rst index 56c9c5286b0a739eabcf86aa1454347bf6ec543f..54d246789c4ca65e12549f1667263a872a5eddf4 100644 --- a/doc/fluid/api_cn/optimizer_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn.rst @@ -1,5 +1,5 @@ ======================= -fluid.optimizer +paddle.optimizer ======================= @@ -14,8 +14,7 @@ fluid.optimizer optimizer_cn/AdagradOptimizer_cn.rst optimizer_cn/Adam_cn.rst optimizer_cn/Adamax_cn.rst - optimizer_cn/AdamaxOptimizer_cn.rst - optimizer_cn/AdamOptimizer_cn.rst + optimizer_cn/AdamW_cn.rst optimizer_cn/DecayedAdagrad_cn.rst optimizer_cn/DecayedAdagradOptimizer_cn.rst optimizer_cn/DGCMomentumOptimizer_cn.rst @@ -31,8 +30,20 @@ fluid.optimizer optimizer_cn/ModelAverage_cn.rst optimizer_cn/Momentum_cn.rst optimizer_cn/MomentumOptimizer_cn.rst - optimizer_cn/PipelineOptimizer_cn.rst optimizer_cn/RecomputeOptimizer_cn.rst - optimizer_cn/RMSPropOptimizer_cn.rst + optimizer_cn/RMSProp_cn.rst optimizer_cn/SGD_cn.rst optimizer_cn/SGDOptimizer_cn.rst + optimizer_cn/Optimizer_cn.rst + optimizer_cn/lr_scheduler_cn/CosineAnnealingLR_cn.rst + optimizer_cn/lr_scheduler_cn/ExponentialLR_cn.rst + optimizer_cn/lr_scheduler_cn/InverseTimeLR_cn.rst + optimizer_cn/lr_scheduler_cn/LambdaLR_cn.rst + optimizer_cn/lr_scheduler_cn/MultiStepLR_cn.rst + optimizer_cn/lr_scheduler_cn/NaturalExpLR_cn.rst + optimizer_cn/lr_scheduler_cn/NoamLR_cn.rst + optimizer_cn/lr_scheduler_cn/PiecewiseLR_cn.rst + optimizer_cn/lr_scheduler_cn/PolynomiaLR_cn.rst + optimizer_cn/lr_scheduler_cn/ReduceLROnPlateauLR_cn.rst + optimizer_cn/lr_scheduler_cn/StepLR_cn.rst + optimizer_cn/lr_scheduler_cn/LinearLrWarmup_cn.rst diff --git a/doc/fluid/api_cn/optimizer_cn/AdadeltaOptimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/AdadeltaOptimizer_cn.rst index dcd4fa67e18c2ac5ddc56af1db1d5a752ba850fd..e1d5168744d910a7b930eb1f2ff1dc532707dd49 100644 --- a/doc/fluid/api_cn/optimizer_cn/AdadeltaOptimizer_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/AdadeltaOptimizer_cn.rst @@ -3,7 +3,10 @@ AdadeltaOptimizer ------------------------------- -.. py:class:: paddle.fluid.optimizer.AdadeltaOptimizer(learning_rate, epsilon=1.0e-6, rho=0.95, parameter_list=None, regularization=None, name=None) +.. py:class:: paddle.fluid.optimizer.AdadeltaOptimizer(learning_rate, epsilon=1.0e-6, rho=0.95, parameter_list=None, regularization=None, grad_clip=None, name=None) + + + **注意:此接口不支持稀疏参数更新。** @@ -23,7 +26,11 @@ Adadelta优化器,具体细节可参考论文 `ADADELTA: AN ADAPTIVE LEARNING - **epsilon** (float) - 维持数值稳定性的浮点型值,默认值为1.0e-6。 - **rho** (float) - 算法中的衰减率,默认值为0.95。 - **parameter_list** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **regularization** (WeightDecayRegularizer,可选) - 正则化方法,例如fluid.regularizer.L2DecayRegularizer等。默认值为None,表示无正则化。 + - **regularization** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 **代码示例** @@ -40,7 +47,7 @@ Adadelta优化器,具体细节可参考论文 `ADADELTA: AN ADAPTIVE LEARNING optimizer_ops, params_grads = optimizer.minimize(cost) -.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None, grad_clip=None) +.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None) 为训练网络添加反向和参数优化部分,进而使损失最小化。 @@ -49,9 +56,8 @@ Adadelta优化器,具体细节可参考论文 `ADADELTA: AN ADAPTIVE LEARNING - **startup_program** (Program,可选) – 参数所在的startup program。默认值为None,表示 :ref:`cn_api_fluid_default_startup_program` 。 - **parameter_list** (list,可选) – 待更新的Parameter或者Parameter.name组成的列表。默认值为None,表示所有参数均需要更新。 - **no_grad_set** (set,可选) – 不需要更新的Parameter或者Parameter.name组成的集合。默认值为None。 - - **grad_clip** (GradClipBase,可选) – 梯度裁剪的策略,目前仅在动态图模式下有效。 -返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。 +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 返回类型: tuple @@ -95,6 +101,49 @@ Adadelta优化器,具体细节可参考论文 `ADADELTA: AN ADAPTIVE LEARNING optimizer.minimize(out) optimizer.clear_gradients() +.. py:method:: set_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float|Variable) - 需要设置的学习率的值。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.dygraph.nn.Linear(10, 10) + adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters()) + # 通过Python float数值手动设置学习率 + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + + + # 通过 框架的Variable 设置学习率 + lr_var = fluid.layers.create_global_var(shape=[1], value=0.7, dtype='float32') + adam.set_lr(lr_var) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.7 + + .. py:method:: current_step_lr() diff --git a/doc/fluid/api_cn/optimizer_cn/Adadelta_cn.rst b/doc/fluid/api_cn/optimizer_cn/Adadelta_cn.rst index 30ffded73505f783cf5270691872e89482ec9da5..fabe7b81fa54401bea6e21de5cfa5cce4bc5a701 100644 --- a/doc/fluid/api_cn/optimizer_cn/Adadelta_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Adadelta_cn.rst @@ -5,6 +5,9 @@ Adadelta .. py:attribute:: paddle.fluid.optimizer.Adadelta + + + ``AdadeltaOptimizer`` 的别名 diff --git a/doc/fluid/api_cn/optimizer_cn/AdagradOptimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/AdagradOptimizer_cn.rst index 53b5b9774cc7d8177493b4f1af31c4950e5b37a0..0837c86c5a6298dde6b75e9c937699fa7e2c91c8 100644 --- a/doc/fluid/api_cn/optimizer_cn/AdagradOptimizer_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/AdagradOptimizer_cn.rst @@ -3,7 +3,10 @@ AdagradOptimizer ------------------------------- -.. py:class:: paddle.fluid.optimizer.AdagradOptimizer(learning_rate, epsilon=1e-06, parameter_list=None, regularization=None, name=None, initial_accumulator_value=0.0) +.. py:class:: paddle.fluid.optimizer.AdagradOptimizer(learning_rate, epsilon=1e-06, parameter_list=None, regularization=None, grad_clip=None, name=None, initial_accumulator_value=0.0) + + + Adaptive Gradient 优化器(自适应梯度优化器,简称Adagrad)可以针对不同参数样本数不平均的问题,自适应地为各个参数分配不同的学习率。 @@ -25,7 +28,11 @@ Adaptive Gradient 优化器(自适应梯度优化器,简称Adagrad)可以针 - **learning_rate** (float|Variable) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个值为浮点型的Variable - **epsilon** (float, 可选) - 维持数值稳定性的浮点型值,默认值为1e-06 - **parameter_list** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **regularization** (WeightDecayRegularizer, 可选) - 正则化函数,用于减少泛化误差。例如可以是 :ref:`cn_api_fluid_regularizer_L2DecayRegularizer` ,默认值为None + - **regularization** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 - **name** (str, 可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None - **initial_accumulator_value** (float, 可选) - moment累加器的初始值,默认值为0.0 @@ -50,7 +57,7 @@ Adaptive Gradient 优化器(自适应梯度优化器,简称Adagrad)可以针 feed={"inp": np_inp}, fetch_list=[out.name]) -.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None, grad_clip=None) +.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None) 为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameter_list中的Parameters,最小化网络损失值loss。 @@ -59,9 +66,8 @@ Adaptive Gradient 优化器(自适应梯度优化器,简称Adagrad)可以针 - **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` - **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合。默认值为None - - **grad_clip** (GradClipBase, 可选) – 梯度裁剪的策略,静态图模式不需要使用本参数,当前本参数只支持在dygraph模式下的梯度裁剪,未来本参数可能会调整,默认值为None - -返回: (optimize_ops, params_grads),数据类型为(list, list),其中optimize_ops是minimize接口为网络添加的OP列表,params_grads是一个由(param, grad)变量对组成的列表,param是Parameter,grad是该Parameter对应的梯度值 + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 返回类型: tuple @@ -114,6 +120,49 @@ Adaptive Gradient 优化器(自适应梯度优化器,简称Adagrad)可以针 optimizer.minimize(out) optimizer.clear_gradients() +.. py:method:: set_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float|Variable) - 需要设置的学习率的值。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.dygraph.nn.Linear(10, 10) + adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters()) + # 通过Python float数值手动设置学习率 + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + + + # 通过 框架的Variable 设置学习率 + lr_var = fluid.layers.create_global_var(shape=[1], value=0.7, dtype='float32') + adam.set_lr(lr_var) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.7 + + .. py:method:: current_step_lr() diff --git a/doc/fluid/api_cn/optimizer_cn/Adagrad_cn.rst b/doc/fluid/api_cn/optimizer_cn/Adagrad_cn.rst index a266464f71724b2050163e8e30f8b2c530222df1..f4304ba8b4d89f0d1b446cf31e487e6c9ebb4c34 100644 --- a/doc/fluid/api_cn/optimizer_cn/Adagrad_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Adagrad_cn.rst @@ -5,6 +5,9 @@ Adagrad .. py:attribute:: paddle.fluid.optimizer.Adagrad + + + ``AdagradOptimizer`` 的别名 diff --git a/doc/fluid/api_cn/optimizer_cn/AdamOptimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/AdamOptimizer_cn.rst deleted file mode 100644 index 4b7683ffc2bdb8d50750c6aafc5d6012b2a2efb9..0000000000000000000000000000000000000000 --- a/doc/fluid/api_cn/optimizer_cn/AdamOptimizer_cn.rst +++ /dev/null @@ -1,240 +0,0 @@ -.. _cn_api_fluid_optimizer_AdamOptimizer: - -AdamOptimizer -------------------------------- - -.. py:class:: paddle.fluid.optimizer.AdamOptimizer(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameter_list=None, regularization=None, name=None, lazy_mode=False) - -Adam优化器出自 `Adam论文 `_ 的第二节,能够利用梯度的一阶矩估计和二阶矩估计动态调整每个参数的学习率。 - -其参数更新的计算公式如下: - -.. math:: - \\t = t + 1 -.. math:: - moment\_1\_out=\beta_1∗moment\_1+(1−\beta_1)∗grad -.. math:: - moment\_2\_out=\beta_2∗moment\_2+(1−\beta_2)∗grad*grad -.. math:: - learning\_rate=\frac{learning\_rate}{1-\beta_1^t} -.. math:: - param\_out=param-learning\_rate*\frac{moment\_1}{\sqrt{moment\_2}+\epsilon}\\ - -相关论文:`Adam: A Method for Stochastic Optimization `_ - -参数: - - **learning_rate** (float|Variable,可选) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个值为浮点型的Variable,默认值为0.001 - - **parameter_list** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **beta1** (float|Variable, 可选) - 一阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Variable类型。默认值为0.9 - - **beta2** (float|Variable, 可选) - 二阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Variable类型。默认值为0.999 - - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 - - **regularization** (WeightDecayRegularizer, 可选) - 正则化函数,用于减少泛化误差。例如可以是 :ref:`cn_api_fluid_regularizer_L2DecayRegularizer` ,默认值为None - - **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None - - **lazy_mode** (bool, 可选) - 设为True时,仅更新当前具有梯度的元素。官方Adam算法有两个移动平均累加器(moving-average accumulators)。累加器在每一步都会更新。在密集模式和稀疏模式下,两条移动平均线的每个元素都会更新。如果参数非常大,那么更新可能很慢。 lazy mode仅更新当前具有梯度的元素,所以它会更快。但是这种模式与原始的算法有不同的描述,可能会导致不同的结果,默认为False - - -**代码示例** - -.. code-block:: python - - import paddle - import paddle.fluid as fluid - - place = fluid.CPUPlace() - main = fluid.Program() - with fluid.program_guard(main): - x = fluid.layers.data(name='x', shape=[13], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') - y_predict = fluid.layers.fc(input=x, size=1, act=None) - cost = fluid.layers.square_error_cost(input=y_predict, label=y) - avg_cost = fluid.layers.mean(cost) - adam_optimizer = fluid.optimizer.AdamOptimizer(0.01) - adam_optimizer.minimize(avg_cost) - - fetch_list = [avg_cost] - train_reader = paddle.batch( - paddle.dataset.uci_housing.train(), batch_size=1) - feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - for data in train_reader(): - exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) - -.. code-block:: python - - # Adam with beta1/beta2 as Variable - import paddle - import paddle.fluid as fluid - import paddle.fluid.layers.learning_rate_scheduler as lr_scheduler - - place = fluid.CPUPlace() - main = fluid.Program() - with fluid.program_guard(main): - x = fluid.data(name='x', shape=[None, 13], dtype='float32') - y = fluid.data(name='y', shape=[None, 1], dtype='float32') - y_predict = fluid.layers.fc(input=x, size=1, act=None) - cost = fluid.layers.square_error_cost(input=y_predict, label=y) - avg_cost = fluid.layers.mean(cost) - - # define beta decay variable - def get_decayed_betas(beta1_init, beta2_init, decay_steps, decay_rate) - global_step = lr_scheduler._decay_step_counter() - - beta1 = fluid.layers.create_global_var( - shape=[1], - value=float(beta1_init), - dtype='float32', - # set persistable for save checkpoints and resume - persistable=True, - name="beta1") - beta2 = fluid.layers.create_global_var( - shape=[1], - value=float(beta2_init), - dtype='float32', - # set persistable for save checkpoints and resume - persistable=True, - name="beta2") - - div_res = global_step / decay_steps - decayed_beta1 = beta1_init * (decay_rate**div_res) - decayed_beta2 = beta2_init * (decay_rate**div_res) - fluid.layers.assign(decayed_beta1, beta1) - fluid.layers.assign(decayed_beta2, beta2) - - return beta1, beta2 - - beta1, beta2 = get_decayed_betas(0.9, 0.99, 1e5, 0.9) - adam_optimizer = fluid.optimizer.AdamOptimizer( - learning_rate=0.01, - beta1=beta1 - beta2=beta2) - adam_optimizer.minimize(avg_cost) - - fetch_list = [avg_cost] - train_reader = paddle.batch( - paddle.dataset.uci_housing.train(), batch_size=1) - feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - for data in train_reader(): - exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) - - -.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None, grad_clip=None) - -为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameter_list中的Parameters,最小化网络损失值loss。 - -参数: - - **loss** (Variable) – 需要最小化的损失值变量 - - **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` - - **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter - - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None - - **grad_clip** (GradClipBase, 可选) – 梯度裁剪的策略,静态图模式不需要使用本参数,当前本参数只支持在dygraph模式下的梯度裁剪,未来本参数可能会调整,默认值为None - -返回: (optimize_ops, params_grads),数据类型为(list, list),其中optimize_ops是minimize接口为网络添加的OP列表,params_grads是一个由(param, grad)变量对组成的列表,param是Parameter,grad是该Parameter对应的梯度值 - -返回类型: tuple - -**代码示例** - -.. code-block:: python - - import numpy - import paddle.fluid as fluid - - x = fluid.layers.data(name='X', shape=[13], dtype='float32') - y = fluid.layers.data(name='Y', shape=[1], dtype='float32') - y_predict = fluid.layers.fc(input=x, size=1, act=None) - cost = fluid.layers.square_error_cost(input=y_predict, label=y) - loss = fluid.layers.mean(cost) - adam = fluid.optimizer.AdamOptimizer(learning_rate=0.2) - adam.minimize(loss) - - place = fluid.CPUPlace() # fluid.CUDAPlace(0) - exe = fluid.Executor(place) - - x = numpy.random.random(size=(10, 13)).astype('float32') - y = numpy.random.random(size=(10, 1)).astype('float32') - exe.run(fluid.default_startup_program()) - outs = exe.run(program=fluid.default_main_program(), - feed={'X': x, 'Y': y}, - fetch_list=[loss.name]) - - -.. py:method:: clear_gradients() - -**注意:** - - **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** - - -清除需要优化的参数的梯度。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - with fluid.dygraph.guard(): - value = np.arange(26).reshape(2, 13).astype("float32") - a = fluid.dygraph.to_variable(value) - linear = fluid.Linear(13, 5, dtype="float32") - optimizer = fluid.optimizer.Adam(learning_rate=0.02, - parameter_list=linear.parameters()) - out = linear(a) - out.backward() - optimizer.minimize(out) - optimizer.clear_gradients() - - -.. py:method:: current_step_lr() - -**注意:** - - **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** - -获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。 - -返回:当前步骤的学习率。 - -返回类型:float - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - # example1: LearningRateDecay is not used, return value is all the same - with fluid.dygraph.guard(): - emb = fluid.dygraph.Embedding([10, 10]) - adam = fluid.optimizer.Adam(0.001, parameter_list = emb.parameters()) - lr = adam.current_step_lr() - print(lr) # 0.001 - - # example2: PiecewiseDecay is used, return the step learning rate - with fluid.dygraph.guard(): - inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") - linear = fluid.dygraph.nn.Linear(10, 10) - inp = fluid.dygraph.to_variable(inp) - out = linear(inp) - loss = fluid.layers.reduce_mean(out) - - bd = [2, 4, 6, 8] - value = [0.2, 0.4, 0.6, 0.8, 1.0] - adam = fluid.optimizer.Adam(fluid.dygraph.PiecewiseDecay(bd, value, 0), - parameter_list=linear.parameters()) - - # first step: learning rate is 0.2 - np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True - - # learning rate for different steps - ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] - for i in range(12): - adam.minimize(loss) - lr = adam.current_step_lr() - np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True - diff --git a/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..352cfcf892d1b450585a74651b78e5017d98aef3 --- /dev/null +++ b/doc/fluid/api_cn/optimizer_cn/AdamW_cn.rst @@ -0,0 +1,241 @@ +.. _cn_api_paddle_optimizer_AdamW: + +AdamW +------------------------------- + +.. py:class:: paddle.optimizer.AdamW(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameters=None, weight_decay=0.01, apply_decay_param_fun=None, grad_clip=None, name=None, lazy_mode=False) + + + + +AdamW优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION 论文 `,用来解决Adam优化器中L2正则化失效的问题。 + +其参数更新的计算公式如下: + +.. math:: + \\t = t + 1 +.. math:: + moment\_1\_out=\beta_1∗moment\_1+(1−\beta_1)∗grad +.. math:: + moment\_2\_out=\beta_2∗moment\_2+(1−\beta_2)∗grad*grad +.. math:: + learning\_rate=learning\_rate*\frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} +.. math:: + param\_out=param-learning\_rate*(\frac{moment\_1}{\sqrt{moment\_2}+\epsilon} + \lambda * param) + +相关论文:`Adam: A Method for Stochastic Optimization `_ + +参数: + - **learning_rate** (float|_LRScheduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001 + - **beta1** (float|Tensor, 可选) - 一阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.9 + - **beta2** (float|Tensor, 可选) - 二阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.999 + - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 + - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **weight_decay** (float|Tensor, 可选) - 权重衰减系数,是一个float类型或者shape为[1] ,数据类型为float32的Tensor类型。默认值为0.01 + - **apply_decay_param_fun** (function|None, 可选): 传入函数时,只有可以使 apply_decay_param_fun(Tensor)==True的Tensor会更新参数。只有在想要指定要更新的参数时使用。默认值为None + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 + - **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None + - **lazy_mode** (bool, 可选) - 设为True时,仅更新当前具有梯度的元素。官方Adam算法有两个移动平均累加器(moving-average accumulators)。累加器在每一步都会更新。在密集模式和稀疏模式下,两条移动平均线的每个元素都会更新。如果参数非常大,那么更新可能很慢。 lazy mode仅更新当前具有梯度的元素,所以它会更快。但是这种模式与原始的算法有不同的描述,可能会导致不同的结果,默认为False + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + adam = paddle.optimizer.AdamW(weight_decay=0.01, learning_rate=0.1, + parameters=linear.parameters()) + out.backward() + adam.step() + adam.clear_grad() + + +.. py:method:: step() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +执行一次优化器并进行参数更新。 + +返回:None。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5) + adam = paddle.optimizer.AdamW(learning_rate = 0.01, + weight_decay = 0.01, + parameters = linear.parameters()) + out = linear(a) + out.backward() + adam.step() + adam.clear_grad() + +.. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None) + +为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 + +参数: + - **loss** (Tensor) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + + beta1 = paddle.to_tensor([0.9], dtype="float32") + beta2 = paddle.to_tensor([0.99], dtype="float32") + + adam = paddle.optimizer.Adam(learning_rate=0.1, + parameters=linear.parameters(), + weight_decay=0.01) + out.backward() + adam.minimize(loss) + adam.clear_grad() + +.. py:method:: clear_grad() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +清除需要优化的参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5) + optimizer = paddle.optimizer.AdamW(weight_decay=0.01, + learning_rate=0.02, + parameters=linear.parameters()) + out = linear(a) + out.backward() + optimizer.step() + optimizer.clear_grad() + +.. py:method:: set_lr(value) + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float) - 需要设置的学习率的值。 + +返回:None + +**代码示例** + +.. code-block:: python + + import paddle + paddle.disable_static() + linear = paddle.nn.Linear(10, 10) + + adam = paddle.optimizer.AdamW(weight_decay=0.01, + learning_rate=0.1, parameters=linear.parameters()) + + # set learning rate manually by python float value + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + lr = adam.get_lr() + print("current lr is {}".format(lr)) + # Print: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + +.. py:method:: get_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 + +返回:float,当前步骤的学习率。 + + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + # example1: _LRScheduler is not used, return value is all the same + paddle.disable_static() + emb = paddle.nn.Embedding(10, 10, sparse=False) + adam = paddle.optimizer.AdamW(learning_rate=0.001, parameters = emb.parameters(),weight_decay=0.01) + lr = adam.get_lr() + print(lr) # 0.001 + + # example2: PiecewiseLR is used, return the step learning rate + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + scheduler = paddle.optimizer.PiecewiseLR(bd, value, 0) + adam = paddle.optimizer.AdamW(scheduler, + parameters=linear.parameters(), + weight_decay=0.01) + + # first step: learning rate is 0.2 + np.allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.step() + lr = adam.get_lr() + scheduler.step() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True diff --git a/doc/fluid/api_cn/optimizer_cn/Adam_cn.rst b/doc/fluid/api_cn/optimizer_cn/Adam_cn.rst index b62de8a70e59d21bd5ee6ec6d736e4ddd2b4363d..6c57911eddad02aa0b43c62bb6230fd6a188112e 100644 --- a/doc/fluid/api_cn/optimizer_cn/Adam_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Adam_cn.rst @@ -1,13 +1,262 @@ -.. _cn_api_fluid_optimizer_Adam: +.. _cn_api_paddle_optimizer_Adam: Adam ------------------------------- -.. py:attribute:: paddle.fluid.optimizer.Adam +.. py:class:: paddle.optimizer.Adam(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameters=None, weight_decay=None, grad_clip=None, name=None, lazy_mode=False) -``AdamOptimizer`` 的别名 +Adam优化器出自 `Adam论文 `_ 的第二节,能够利用梯度的一阶矩估计和二阶矩估计动态调整每个参数的学习率。 +其参数更新的计算公式如下: +.. math:: + \\t = t + 1 +.. math:: + moment\_1\_out=\beta_1∗moment\_1+(1−\beta_1)∗grad +.. math:: + moment\_2\_out=\beta_2∗moment\_2+(1−\beta_2)∗grad*grad +.. math:: + learning\_rate=learning\_rate*\frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} +.. math:: + param\_out=param-learning\_rate*\frac{moment\_1}{\sqrt{moment\_2}+\epsilon}\\ + +相关论文:`Adam: A Method for Stochastic Optimization `_ + +参数: + - **learning_rate** (float|_LRScheduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001 + - **beta1** (float|Tensor, 可选) - 一阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.9 + - **beta2** (float|Tensor, 可选) - 二阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.999 + - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 + - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 + - **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None + - **lazy_mode** (bool, 可选) - 设为True时,仅更新当前具有梯度的元素。官方Adam算法有两个移动平均累加器(moving-average accumulators)。累加器在每一步都会更新。在密集模式和稀疏模式下,两条移动平均线的每个元素都会更新。如果参数非常大,那么更新可能很慢。 lazy mode仅更新当前具有梯度的元素,所以它会更快。但是这种模式与原始的算法有不同的描述,可能会导致不同的结果,默认为False + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + adam = paddle.optimizer.Adam(learning_rate=0.1, + parameters=linear.parameters()) + out.backward() + adam.step() + adam.clear_grad() + +.. code-block:: python + + # Adam with beta1/beta2 as Tensor and weight_decay as float + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + + beta1 = paddle.to_tensor([0.9], dtype="float32") + beta2 = paddle.to_tensor([0.99], dtype="float32") + + adam = paddle.optimizer.Adam(learning_rate=0.1, + parameters=linear.parameters(), + beta1=beta1, + beta2=beta2, + weight_decay=0.01) + out.backward() + adam.step() + adam.clear_grad() + +.. py:method:: step() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +执行一次优化器并进行参数更新。 + +返回:None。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5) + adam = paddle.optimizer.Adam(learning_rate = 0.01, + parameters = linear.parameters()) + out = linear(a) + out.backward() + adam.step() + adam.clear_grad() + +.. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None) + +为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 + +参数: + - **loss** (Tensor) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + + beta1 = paddle.to_tensor([0.9], dtype="float32") + beta2 = paddle.to_tensor([0.99], dtype="float32") + + adam = paddle.optimizer.Adam(learning_rate=0.1, + parameters=linear.parameters(), + weight_decay=0.01) + out.backward() + adam.minimize(loss) + adam.clear_grad() + +.. py:method:: clear_grad() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +清除需要优化的参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5) + optimizer = paddle.optimizer.Adam(learning_rate=0.02, + parameters=linear.parameters()) + out = linear(a) + out.backward() + optimizer.step() + optimizer.clear_grad() + +.. py:method:: set_lr(value) + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float) - 需要设置的学习率的值。 + +返回:None + +**代码示例** + +.. code-block:: python + + import paddle + paddle.disable_static() + linear = paddle.nn.Linear(10, 10) + + adam = paddle.optimizer.Adam(0.1, parameters=linear.parameters()) + + # set learning rate manually by python float value + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + lr = adam.get_lr() + print("current lr is {}".format(lr)) + # Print: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + +.. py:method:: get_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 + +返回:float,当前步骤的学习率。 + + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + # example1: _LRScheduler is not used, return value is all the same + paddle.disable_static() + emb = paddle.nn.Embedding(10, 10, sparse=False) + adam = paddle.optimizer.Adam(0.001, parameters = emb.parameters()) + lr = adam.get_lr() + print(lr) # 0.001 + + # example2: PiecewiseLR is used, return the step learning rate + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + scheduler = paddle.optimizer.PiecewiseLR(bd, value, 0) + adam = paddle.optimizer.Adam(scheduler, + parameters=linear.parameters()) + + # first step: learning rate is 0.2 + np.allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.step() + lr = adam.get_lr() + scheduler.step() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True diff --git a/doc/fluid/api_cn/optimizer_cn/AdamaxOptimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/AdamaxOptimizer_cn.rst deleted file mode 100644 index 4dcfd2c789150771d427f73fded8a5706f07bbfb..0000000000000000000000000000000000000000 --- a/doc/fluid/api_cn/optimizer_cn/AdamaxOptimizer_cn.rst +++ /dev/null @@ -1,180 +0,0 @@ -.. _cn_api_fluid_optimizer_AdamaxOptimizer: - -AdamaxOptimizer -------------------------------- - -.. py:class:: paddle.fluid.optimizer.AdamaxOptimizer(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameter_list=None, regularization=None, name=None) - -Adamax优化器是参考 `Adam论文 `_ 第7节Adamax优化相关内容所实现的。Adamax算法是基于无穷大范数的 `Adam `_ 算法的一个变种,使学习率更新的算法更加稳定和简单。 - -其参数更新的计算公式如下: - -.. math:: - \\t = t + 1 -.. math:: - moment\_out=\beta_1∗moment+(1−\beta_1)∗grad -.. math:: - inf\_norm\_out=\max{(\beta_2∗inf\_norm+\epsilon, \left|grad\right|)} -.. math:: - learning\_rate=\frac{learning\_rate}{1-\beta_1^t} -.. math:: - param\_out=param−learning\_rate*\frac{moment\_out}{inf\_norm\_out}\\ - -相关论文:`Adam: A Method for Stochastic Optimization `_ - -论文中没有 ``epsilon`` 参数。但是,为了保持数值稳定性, 避免除0错误, 此处增加了这个参数。 - -参数: - - **learning_rate** (float|Variable,可选) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个值为浮点型的Variable,默认值为0.001 - - **beta1** (float, 可选) - 一阶矩估计的指数衰减率,默认值为0.9 - - **beta2** (float, 可选) - 二阶矩估计的指数衰减率,默认值为0.999 - - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 - - **parameter_list** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **regularization** (WeightDecayRegularizer, 可选) - 正则化函数,用于减少泛化误差。例如可以是 :ref:`cn_api_fluid_regularizer_L2DecayRegularizer` ,默认值为None - - **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None - -.. note:: - 目前 ``AdamaxOptimizer`` 不支持 Sparse Parameter Optimization(稀疏参数优化)。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - import numpy - - # First create the Executor. - place = fluid.CPUPlace() # fluid.CUDAPlace(0) - exe = fluid.Executor(place) - - train_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): - data = fluid.layers.data(name='X', shape=[1], dtype='float32') - hidden = fluid.layers.fc(input=data, size=10) - loss = fluid.layers.mean(hidden) - adam = fluid.optimizer.AdamaxOptimizer(learning_rate=0.2) - adam.minimize(loss) - - # Run the startup program once and only once. - exe.run(startup_program) - - x = numpy.random.random(size=(10, 1)).astype('float32') - outs = exe.run(program=train_program, - feed={'X': x}, - fetch_list=[loss.name]) - -.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None, grad_clip=None) - -为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameter_list中的Parameters,最小化网络损失值loss。 - -参数: - - **loss** (Variable) – 需要最小化的损失值变量 - - **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` - - **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter - - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成集合,默认值为None - - **grad_clip** (GradClipBase, 可选) – 梯度裁剪的策略,静态图模式不需要使用本参数,当前本参数只支持在dygraph模式下的梯度裁剪,未来本参数可能会调整,默认值为None - -返回: (optimize_ops, params_grads),数据类型为(list, list),其中optimize_ops是minimize接口为网络添加的OP列表,params_grads是一个由(param, grad)变量对组成的列表,param是Parameter,grad是该Parameter对应的梯度值 - -**代码示例**: - -.. code-block:: python - - import numpy - import paddle.fluid as fluid - - data = fluid.layers.data(name='X', shape=[1], dtype='float32') - hidden = fluid.layers.fc(input=data, size=10) - loss = fluid.layers.mean(hidden) - adam = fluid.optimizer.Adamax(learning_rate=0.2) - adam.minimize(loss) - - place = fluid.CPUPlace() # fluid.CUDAPlace(0) - exe = fluid.Executor(place) - - x = numpy.random.random(size=(10, 1)).astype('float32') - exe.run(fluid.default_startup_program()) - outs = exe.run(program=fluid.default_main_program(), - feed={'X': x}, - fetch_list=[loss.name]) - - - -.. py:method:: clear_gradients() - -**注意:** - - **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** - - -清除需要优化的参数的梯度。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - with fluid.dygraph.guard(): - value = np.arange(26).reshape(2, 13).astype("float32") - a = fluid.dygraph.to_variable(value) - linear = fluid.Linear(13, 5, dtype="float32") - optimizer = fluid.optimizer.AdamaxOptimizer(learning_rate=0.2, - parameter_list=linear.parameters()) - out = linear(a) - out.backward() - optimizer.minimize(out) - optimizer.clear_gradients() - - -.. py:method:: current_step_lr() - -**注意:** - - **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** - -获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。 - -返回:当前步骤的学习率。 - -返回类型:float - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - # example1: LearningRateDecay is not used, return value is all the same - with fluid.dygraph.guard(): - emb = fluid.dygraph.Embedding([10, 10]) - adam = fluid.optimizer.Adam(0.001, parameter_list = emb.parameters()) - lr = adam.current_step_lr() - print(lr) # 0.001 - - # example2: PiecewiseDecay is used, return the step learning rate - with fluid.dygraph.guard(): - inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") - linear = fluid.dygraph.nn.Linear(10, 10) - inp = fluid.dygraph.to_variable(inp) - out = linear(inp) - loss = fluid.layers.reduce_mean(out) - - bd = [2, 4, 6, 8] - value = [0.2, 0.4, 0.6, 0.8, 1.0] - adam = fluid.optimizer.Adam(fluid.dygraph.PiecewiseDecay(bd, value, 0), - parameter_list=linear.parameters()) - - # first step: learning rate is 0.2 - np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True - - # learning rate for different steps - ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] - for i in range(12): - adam.minimize(loss) - lr = adam.current_step_lr() - np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True - diff --git a/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst b/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst index 3eb6c0a8cdfe4bf2e109923edc8c2bbf029a5f54..b38c446571272905ab42e25ce18a463846c300cf 100644 --- a/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Adamax_cn.rst @@ -1,14 +1,242 @@ -.. _cn_api_fluid_optimizer_Adamax: +.. _cn_api_paddle_optimizer_Adamax: Adamax ------------------------------- -.. py:attribute:: paddle.fluid.optimizer.Adamax +.. py:class:: paddle.optimizer.Adamax(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameters=None, weight_decay=None, grad_clip=None, name=None) -``AdamaxOptimizer`` 的别名 +Adamax优化器是参考 `Adam论文 `_ 第7节Adamax优化相关内容所实现的。Adamax算法是基于无穷大范数的 `Adam `_ 算法的一个变种,使学习率更新的算法更加稳定和简单。 +其参数更新的计算公式如下: +.. math:: + \\t = t + 1 +.. math:: + moment\_out=\beta_1∗moment+(1−\beta_1)∗grad +.. math:: + inf\_norm\_out=\max{(\beta_2∗inf\_norm+\epsilon, \left|grad\right|)} +.. math:: + learning\_rate=\frac{learning\_rate}{1-\beta_1^t} +.. math:: + param\_out=param−learning\_rate*\frac{moment\_out}{inf\_norm\_out}\\ +相关论文:`Adam: A Method for Stochastic Optimization `_ + +论文中没有 ``epsilon`` 参数。但是,为了保持数值稳定性, 避免除0错误, 此处增加了这个参数。 + +参数: + - **learning_rate** (float|_LRScheduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001 + - **beta1** (float, 可选) - 一阶矩估计的指数衰减率,默认值为0.9 + - **beta2** (float, 可选) - 二阶矩估计的指数衰减率,默认值为0.999 + - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 + - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 + - **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None + +.. note:: + 目前 ``Adamax`` 不支持 Sparse Parameter Optimization(稀疏参数优化)。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + adam = paddle.optimizer.Adamax(learning_rate=0.1, + parameters=linear.parameters()) + out.backward() + adam.step() + adam.clear_grad() + + +.. py:method:: step() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +执行一次优化器并进行参数更新。 + +返回:None。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5) + adam = paddle.optimizer.Adam(learning_rate = 0.01, + parameters = linear.parameters()) + out = linear(a) + out.backward() + adam.step() + adam.clear_grad() + +.. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None) + +为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 + +参数: + - **loss** (Tensor) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成集合,默认值为None + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + + beta1 = paddle.to_tensor([0.9], dtype="float32") + beta2 = paddle.to_tensor([0.99], dtype="float32") + + adam = paddle.optimizer.Adamax(learning_rate=0.1, + parameters=linear.parameters(), + weight_decay=0.01) + out.backward() + adam.minimize(loss) + adam.clear_grad() + + +.. py:method:: clear_grad() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +清除需要优化的参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5) + optimizer = paddle.optimizer.Adamax(learning_rate=0.02, + parameters=linear.parameters()) + out = linear(a) + out.backward() + optimizer.step() + optimizer.clear_grad() + +.. py:method:: set_lr(value) + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float) - 需要设置的学习率的值。 + +返回:None + +**代码示例** + +.. code-block:: python + + import paddle + paddle.disable_static() + linear = paddle.nn.Linear(10, 10) + + adam = paddle.optimizer.Adamax(0.1, parameters=linear.parameters()) + + # set learning rate manually by python float value + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + lr = adam.get_lr() + print("current lr is {}".format(lr)) + # Print: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + +.. py:method:: get_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 + +返回:float,当前步骤的学习率。 + + +**代码示例** + +.. code-block:: python + + + import numpy as np + import paddle + # example1: _LRScheduler is not used, return value is all the same + paddle.disable_static() + emb = paddle.nn.Embedding(10, 10, sparse=False) + adam = paddle.optimizer.Adamax(0.001, parameters = emb.parameters()) + lr = adam.get_lr() + print(lr) # 0.001 + + # example2: PiecewiseLR is used, return the step learning rate + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + scheduler = paddle.optimizer.PiecewiseLR(bd, value, 0) + adam = paddle.optimizer.Adamax(scheduler, + parameters=linear.parameters()) + + # first step: learning rate is 0.2 + np.allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.step() + lr = adam.get_lr() + scheduler.step() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True diff --git a/doc/fluid/api_cn/optimizer_cn/DGCMomentumOptimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/DGCMomentumOptimizer_cn.rst index fc09d52ef887f3b056814e15b4fe78cca53d8176..f3b97033584826804284250a97d134650b813b17 100644 --- a/doc/fluid/api_cn/optimizer_cn/DGCMomentumOptimizer_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/DGCMomentumOptimizer_cn.rst @@ -3,9 +3,12 @@ DGCMomentumOptimizer ------------------------------- -**注意:该API仅支持【静态图】模式** -.. py:class:: paddle.fluid.optimizer.DGCMomentumOptimizer(learning_rate, momentum, rampup_begin_step, rampup_step=1, sparsity=[0.999], use_nesterov=False, local_grad_clip_norm=None, num_trainers=None, regularization=None, name=None) +.. py:class:: paddle.fluid.optimizer.DGCMomentumOptimizer(learning_rate, momentum, rampup_begin_step, rampup_step=1, sparsity=[0.999], use_nesterov=False, local_grad_clip_norm=None, num_trainers=None, regularization=None, grad_clip=None, name=None) + +:api_attr: 声明式编程模式(静态图) + + DGC(深度梯度压缩)Momentum 优化器。原始论文: https://arxiv.org/abs/1712.01887 @@ -33,7 +36,10 @@ DGC还使用动量因子掩藏(momentum factor masking)和预训练(warm-u - **use_nesterov** (bool) - 启用Nesterov momentum。 True意味着使用Nesterov。默认值False。 - **local_grad_clip_norm** (float,可选) - 局部梯度裁减标准值。可选,默认为None,表示不需要裁减。 - **num_trainers** (int,可选) - 训练节点的数量。可选,默认为None。 - - **regularization** (WeightDecayRegularizer,可选) - 正则器, 如 :ref:`cn_api_fluid_regularizer_L2DecayRegularizer`。可选,默认为None。 + - **regularization** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipByNorm, 可选) – 梯度裁剪的策略,``DGCMomentumOptimizer`` 仅支持 :ref:`cn_api_fluid_clip_GradientClipByNorm` 裁剪策略,如果不为该类型,将会抛出类型异常。默认值为None,此时将不进行梯度裁剪。 - **name** (str,可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 **代码示例** @@ -67,6 +73,29 @@ DGC还使用动量因子掩藏(momentum factor masking)和预训练(warm-u .. code-block:: python import paddle.fluid as fluid + + def network(): + x = fluid.layers.data(name='x', shape=[1], dtype='int64', lod_level=0) + y = fluid.layers.data(name='y', shape=[1], dtype='int64', lod_level=0) + emb_x = fluid.layers.embedding( + input=x, + size=[10, 2], + is_sparse=False) + emb_y = fluid.layers.embedding( + input=y, + size=[10, 2], + is_sparse=False) + + concat = fluid.layers.concat([emb_x, emb_y], axis=1) + + fc = fluid.layers.fc(input=concat, + name="fc", + size=1, + num_flatten_dims=1, + bias_attr=False) + loss = fluid.layers.reduce_mean(fc) + return loss + loss = network() optimizer = fluid.optimizer.SGD(learning_rate=0.1) params_grads = optimizer.backward(loss) @@ -107,7 +136,7 @@ DGC还使用动量因子掩藏(momentum factor masking)和预训练(warm-u 详见apply_gradients的示例 -.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None, grad_clip=None) +.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None) 通过更新parameter_list来添加操作,进而使损失最小化。 @@ -119,9 +148,8 @@ DGC还使用动量因子掩藏(momentum factor masking)和预训练(warm-u - **startup_program** (Program) – 用于初始化在parameter_list中参数的startup_program - **parameter_list** (list) – 待更新的Variables组成的列表 - **no_grad_set** (set|None) – 应该被无视的Variables集合 - - **grad_clip** (GradClipBase|None) – 梯度裁剪的策略 - -返回: (optimize_ops, params_grads),分别为附加的算子列表;一个由(param, grad) 变量对组成的列表,用于优化 + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 返回类型: tuple diff --git a/doc/fluid/api_cn/optimizer_cn/DecayedAdagradOptimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/DecayedAdagradOptimizer_cn.rst index 6341be07028835b0d1f941cc55f37d9fcd349384..7777b674d3aa0c0cebd4f3a39c3a53f00634e239 100644 --- a/doc/fluid/api_cn/optimizer_cn/DecayedAdagradOptimizer_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/DecayedAdagradOptimizer_cn.rst @@ -3,7 +3,10 @@ DecayedAdagradOptimizer ------------------------------- -.. py:class:: paddle.fluid.optimizer.DecayedAdagradOptimizer(learning_rate, decay=0.95, epsilon=1e-06, parameter_list=None, regularization=None, name=None) +.. py:class:: paddle.fluid.optimizer.DecayedAdagradOptimizer(learning_rate, decay=0.95, epsilon=1e-06, parameter_list=None, regularization=None, grad_clip=None, name=None) + + + Decayed Adagrad优化器,可以看做是引入了衰减率的 `Adagrad `_ 算法,用于解决使用 :ref:`cn_api_fluid_optimizer_AdagradOptimizer` 优化器时,在模型训练中后期学习率急剧下降的问题。 @@ -23,7 +26,11 @@ Decayed Adagrad优化器,可以看做是引入了衰减率的 `Adagrad `_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float|Variable) - 需要设置的学习率的值。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.dygraph.nn.Linear(10, 10) + adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters()) + # 通过Python float数值手动设置学习率 + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + + + # 通过 框架的Variable 设置学习率 + lr_var = fluid.layers.create_global_var(shape=[1], value=0.7, dtype='float32') + adam.set_lr(lr_var) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.7 + + .. py:method:: current_step_lr() diff --git a/doc/fluid/api_cn/optimizer_cn/DecayedAdagrad_cn.rst b/doc/fluid/api_cn/optimizer_cn/DecayedAdagrad_cn.rst index 8cf6f966b68de7714e807fedd7912f8bcf0f185a..215758e1f13843b4f60d299c229bac6b7721c5c4 100644 --- a/doc/fluid/api_cn/optimizer_cn/DecayedAdagrad_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/DecayedAdagrad_cn.rst @@ -5,6 +5,9 @@ DecayedAdagrad .. py:attribute:: paddle.fluid.optimizer.DecayedAdagrad + + + ``DecayedAdagradOptimizer`` 的别名 diff --git a/doc/fluid/api_cn/optimizer_cn/DpsgdOptimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/DpsgdOptimizer_cn.rst index 19a7e4c1f459c191a4ed4ad418c845963d78b32a..6bfeeec2248ee46f5f118aa73905996fd2982bac 100644 --- a/doc/fluid/api_cn/optimizer_cn/DpsgdOptimizer_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/DpsgdOptimizer_cn.rst @@ -5,6 +5,9 @@ DpsgdOptimizer .. py:class:: paddle.fluid.optimizer.DpsgdOptimizer(learning_rate=0.001, clip=0.9, batch_size=0.999, sigma=1e-8) + + + Dpsgd优化器是参考CCS16论文 `《Deep Learning with Differential Privacy》 `_ 相关内容实现的。 其参数更新的计算公式如下: @@ -54,7 +57,7 @@ Dpsgd优化器是参考CCS16论文 `《Deep Learning with Differential Privacy feed={'X': x}, fetch_list=[loss.name]) -.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None, grad_clip=None) +.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None) 为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameter_list中的Parameters,最小化网络损失值loss。 @@ -63,9 +66,8 @@ Dpsgd优化器是参考CCS16论文 `《Deep Learning with Differential Privacy - **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` - **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成集合,默认值为None - - **grad_clip** (GradClipBase, 可选) – 梯度裁剪的策略,静态图模式不需要使用本参数,当前本参数只支持在dygraph模式下的梯度裁剪,未来本参数可能会调整,默认值为None - -返回: (optimize_ops, params_grads),数据类型为(list, list),其中optimize_ops是minimize接口为网络添加的OP列表,params_grads是一个由(param, grad)变量对组成的列表,param是Parameter,grad是该Parameter对应的梯度值 + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 **代码示例**: diff --git a/doc/fluid/api_cn/optimizer_cn/Dpsgd_cn.rst b/doc/fluid/api_cn/optimizer_cn/Dpsgd_cn.rst index cea679cd94b82e1960f66f60a27cb402889cf708..a6fde680ba5c3c0e82913e85b1bac83aa8d7623c 100644 --- a/doc/fluid/api_cn/optimizer_cn/Dpsgd_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Dpsgd_cn.rst @@ -5,6 +5,9 @@ Dpsgd .. py:attribute:: paddle.fluid.optimizer.Dpsgd + + + ``DpsgdOptimizer`` 的别名 diff --git a/doc/fluid/api_cn/optimizer_cn/ExponentialMovingAverage_cn.rst b/doc/fluid/api_cn/optimizer_cn/ExponentialMovingAverage_cn.rst index c10b81866feb9d874341d8feee1b2b7e6df8e719..f2e6bfa7d4533a26c8c7d83f4b15f72fead6b459 100644 --- a/doc/fluid/api_cn/optimizer_cn/ExponentialMovingAverage_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/ExponentialMovingAverage_cn.rst @@ -3,10 +3,13 @@ ExponentialMovingAverage ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:class:: paddle.fluid.optimizer.ExponentialMovingAverage(decay=0.999, thres_steps=None, name=None) +:api_attr: 声明式编程模式(静态图) + + + 用指数衰减计算参数的滑动平均值。给定参数 :math:`\theta` ,它的指数滑动平均值 (exponential moving average, EMA) 为 .. math:: diff --git a/doc/fluid/api_cn/optimizer_cn/FtrlOptimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/FtrlOptimizer_cn.rst index cbdd0923095afa5ec09811734e680828495372b8..149c890ea37a12d5983a34d3a6ee0a677d7ad65a 100644 --- a/doc/fluid/api_cn/optimizer_cn/FtrlOptimizer_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/FtrlOptimizer_cn.rst @@ -3,7 +3,10 @@ FtrlOptimizer ------------------------------- -.. py:class:: paddle.fluid.optimizer.FtrlOptimizer(learning_rate, l1=0.0, l2=0.0, lr_power=-0.5, parameter_list=None, regularization=None, name=None) +.. py:class:: paddle.fluid.optimizer.FtrlOptimizer(learning_rate, l1=0.0, l2=0.0, lr_power=-0.5, parameter_list=None, regularization=None, grad_clip=None, name=None) + + + 该接口实现FTRL (Follow The Regularized Leader) Optimizer. @@ -34,7 +37,11 @@ FTRL 原始论文: ( `https://www.eecs.tufts.edu/~dsculley/papers/ad-click-predi - **l1** (float,可选) - L1 regularization strength,默认值0.0。 - **l2** (float,可选) - L2 regularization strength,默认值0.0。 - **lr_power** (float,可选) - 学习率降低指数,默认值-0.5。 - - **regularization** - 正则化器,例如 ``fluid.regularizer.L2DecayRegularizer`` 。 + - **regularization** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 - **name** (str, 可选) - 可选的名称前缀,一般无需设置,默认值为None。 抛出异常: @@ -73,7 +80,7 @@ FTRL 原始论文: ( `https://www.eecs.tufts.edu/~dsculley/papers/ad-click-predi **注意:目前, FtrlOptimizer 不支持 sparse parameter optimization。** -.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None, grad_clip=None) +.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None) 通过更新parameter_list来添加操作,进而使损失最小化。 @@ -85,9 +92,8 @@ FTRL 原始论文: ( `https://www.eecs.tufts.edu/~dsculley/papers/ad-click-predi - **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` - **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None - - **grad_clip** (GradClipBase, 可选) – 梯度裁剪的策略,静态图模式不需要使用本参数,当前本参数只支持在dygraph模式下的梯度裁剪,未来本参数可能会调整,默认值为None - -返回: (optimize_ops, params_grads),数据类型为(list, list),其中optimize_ops是minimize接口为网络添加的OP列表,params_grads是一个由(param, grad)变量对组成的列表,param是Parameter,grad是该Parameter对应的梯度值 + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 返回类型: tuple @@ -119,6 +125,48 @@ FTRL 原始论文: ( `https://www.eecs.tufts.edu/~dsculley/papers/ad-click-predi optimizer.minimize(out) optimizer.clear_gradients() +.. py:method:: set_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float|Variable) - 需要设置的学习率的值。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.dygraph.nn.Linear(10, 10) + adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters()) + # 通过Python float数值手动设置学习率 + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + + + # 通过 框架的Variable 设置学习率 + lr_var = fluid.layers.create_global_var(shape=[1], value=0.7, dtype='float32') + adam.set_lr(lr_var) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.7 + .. py:method:: current_step_lr() diff --git a/doc/fluid/api_cn/optimizer_cn/Ftrl_cn.rst b/doc/fluid/api_cn/optimizer_cn/Ftrl_cn.rst index ccdc903919647ac25cbf0e8c7966517596549ada..51856bf25d8165751d74df062408e21a517af929 100644 --- a/doc/fluid/api_cn/optimizer_cn/Ftrl_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Ftrl_cn.rst @@ -5,6 +5,9 @@ Ftrl .. py:attribute:: paddle.fluid.optimizer.Ftrl + + + ``FtrlOptimizer`` 的别名 diff --git a/doc/fluid/api_cn/optimizer_cn/LambOptimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/LambOptimizer_cn.rst index 60dd8f4db8a428fbc61de426267ef680dff0c811..0be07027ecd6260fc691b11ee1cb38fbf72ba143 100644 --- a/doc/fluid/api_cn/optimizer_cn/LambOptimizer_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/LambOptimizer_cn.rst @@ -3,7 +3,10 @@ LambOptimizer ------------------------------- -.. py:class:: paddle.fluid.optimizer.LambOptimizer(learning_rate=0.001, lamb_weight_decay=0.01, beta1=0.9, beta2=0.999, epsilon=1e-06, parameter_list=None, regularization=None, exclude_from_weight_decay_fn=None, name=None) +.. py:class:: paddle.fluid.optimizer.LambOptimizer(learning_rate=0.001, lamb_weight_decay=0.01, beta1=0.9, beta2=0.999, epsilon=1e-06, parameter_list=None, regularization=None, grad_clip=None, exclude_from_weight_decay_fn=None, name=None) + + + LAMB(Layer-wise Adaptive Moments optimizer for Batching training)优化器 LAMB的优化器旨在不降低精度的前提下增大训练的批量大小,其支持自适应的逐元素更新和精确的分层校正。 更多信息请参考 `Large Batch Optimization for @@ -30,7 +33,11 @@ Deep Learning: Training BERT in 76 minutes `_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float|Variable) - 需要设置的学习率的值。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.dygraph.nn.Linear(10, 10) + adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters()) + # 通过Python float数值手动设置学习率 + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + + + # 通过 框架的Variable 设置学习率 + lr_var = fluid.layers.create_global_var(shape=[1], value=0.7, dtype='float32') + adam.set_lr(lr_var) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.7 + + .. py:method:: current_step_lr() diff --git a/doc/fluid/api_cn/optimizer_cn/LarsMomentumOptimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/LarsMomentumOptimizer_cn.rst index bec1c01565a9180fa5c91c236df0ffd3f24604b9..63f22d05a09267924e4e5081fec6d2e4b757910d 100644 --- a/doc/fluid/api_cn/optimizer_cn/LarsMomentumOptimizer_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/LarsMomentumOptimizer_cn.rst @@ -3,7 +3,10 @@ LarsMomentumOptimizer ------------------------------- -.. py:class:: paddle.fluid.optimizer.LarsMomentumOptimizer(learning_rate, momentum, lars_coeff=0.001, lars_weight_decay=0.0005, parameter_list=None, regularization=None, name=None) +.. py:class:: paddle.fluid.optimizer.LarsMomentumOptimizer(learning_rate, momentum, lars_coeff=0.001, lars_weight_decay=0.0005, parameter_list=None, regularization=None, grad_clip=None, name=None) + + + 该接口实现LARS支持的Momentum优化器 @@ -22,7 +25,11 @@ LarsMomentumOptimizer - **parameter_list** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - **lars_coeff** (float,可选) - 定义LARS本地学习率的权重,默认值0.001。 - **lars_weight_decay** (float,可选) - 使用LARS进行衰减的权重衰减系数,默认值0.0005。 - - **regularization** - 正则化函数,例如 :code:`fluid.regularizer.L2DecayRegularizer`。 + - **regularization** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 - **name** (str, 可选) - 可选的名称前缀,一般无需设置,默认值为None。 @@ -31,6 +38,7 @@ LarsMomentumOptimizer .. code-block:: python import paddle.fluid as fluid + import numpy as np np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) inp = fluid.layers.data( @@ -48,7 +56,7 @@ LarsMomentumOptimizer -.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None, grad_clip=None) +.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None) 通过更新parameter_list来添加操作,进而使损失最小化。 @@ -60,9 +68,8 @@ LarsMomentumOptimizer - **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` - **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的的集合,默认值为None - - **grad_clip** (GradClipBase, 可选) – 梯度裁剪的策略,静态图模式不需要使用本参数,当前本参数只支持在dygraph模式下的梯度裁剪,未来本参数可能会调整,默认值为None - -返回: (optimize_ops, params_grads),数据类型为(list, list),其中optimize_ops是minimize接口为网络添加的OP列表,params_grads是一个由(param, grad)变量对组成的列表,param是Parameter,grad是该Parameter对应的梯度值 + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 返回类型: tuple @@ -94,6 +101,49 @@ LarsMomentumOptimizer optimizer.minimize(out) optimizer.clear_gradients() +.. py:method:: set_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float|Variable) - 需要设置的学习率的值。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.dygraph.nn.Linear(10, 10) + adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters()) + # 通过Python float数值手动设置学习率 + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + + + # 通过 框架的Variable 设置学习率 + lr_var = fluid.layers.create_global_var(shape=[1], value=0.7, dtype='float32') + adam.set_lr(lr_var) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.7 + + .. py:method:: current_step_lr() diff --git a/doc/fluid/api_cn/optimizer_cn/LarsMomentum_cn.rst b/doc/fluid/api_cn/optimizer_cn/LarsMomentum_cn.rst index 4042f077eac6ffb0094d09c514decd11d2f638bf..d5d4db1b2d8fc6eb306b40ab6d24a58418aa69e2 100644 --- a/doc/fluid/api_cn/optimizer_cn/LarsMomentum_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/LarsMomentum_cn.rst @@ -5,6 +5,9 @@ LarsMomentum .. py:attribute:: paddle.fluid.optimizer.LarsMomentum + + + ``fluid.optimizer.LarsMomentumOptimizer`` 的别名 diff --git a/doc/fluid/api_cn/optimizer_cn/LookaheadOptimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/LookaheadOptimizer_cn.rst index e41690bef88c5c0341a4a7316d14c9f0c3a38d95..2ea449f453bdeecc0e2d9e9e473218b04cfb767e 100644 --- a/doc/fluid/api_cn/optimizer_cn/LookaheadOptimizer_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/LookaheadOptimizer_cn.rst @@ -3,10 +3,13 @@ LookaheadOptimizer ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:class:: paddle.fluid.optimizer.LookaheadOptimizer(inner_optimizer, alpha=0.5, k=5) +:api_attr: 声明式编程模式(静态图) + + + 本类实现了Lookahead优化算法:https://arxiv.org/abs/1907.08610。Lookahead优化算法在内存中保存两部分参数:快参数和慢参数。每个训练步次,inner_optimizer都更新快参数;每隔k个训练步次,Lookahead更新慢参数,如下: .. math:: @@ -26,7 +29,7 @@ LookaheadOptimizer import paddle import paddle.fluid as fluid - import numpy as np + import numpy.random as random x = fluid.layers.data(name='x', shape=[2], dtype='float32') label = fluid.layers.data(name="label", shape=[1], dtype="int64") @@ -43,11 +46,14 @@ LookaheadOptimizer exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) + def train_reader(limit=5): + for i in range(limit): + yield random.random([2]).astype('float32'), random.random([1]).astype('int64') + feeder = fluid.DataFeeder(feed_list=[x, label], place=place) - - step = 0 - while(step < 10): - step += 1 + reader = paddle.batch(paddle.reader.shuffle(train_reader, buf_size=50000),batch_size=1) + + for batch_data in reader(): exe.run(fluid.default_main_program(), feed=feeder.feed(batch_data)) diff --git a/doc/fluid/api_cn/optimizer_cn/ModelAverage_cn.rst b/doc/fluid/api_cn/optimizer_cn/ModelAverage_cn.rst index c9fb4e1b06c1531f74ac484b42e0cef9bcbab1e2..9ef6dfeae328669f5f385d7db0bb8a2249f5f2ad 100644 --- a/doc/fluid/api_cn/optimizer_cn/ModelAverage_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/ModelAverage_cn.rst @@ -3,10 +3,13 @@ ModelAverage ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:class:: paddle.fluid.optimizer.ModelAverage(average_window_rate, min_average_window=10000, max_average_window=10000, regularization=None, name=None) +:api_attr: 声明式编程模式(静态图) + + + ModelAverage优化器,在训练过程中累积特定连续的历史Parameters,累积的历史范围可以用传入的average_window参数来控制,在预测时使用平均后的Parameters,通常可以提高预测的精度。 在滑动窗口中累积Parameters的平均值,将结果将保存在临时变量中,通过调用 ``apply()`` 方法可应用于当前模型的Parameters,使用 ``restore()`` 方法恢复当前模型Parameters的值。 @@ -26,7 +29,9 @@ ModelAverage优化器,在训练过程中累积特定连续的历史Parameters - **average_window_rate** (float) – 相对于Parameters更新次数的窗口长度计算比率 - **min_average_window** (int, 可选) – 平均值计算窗口长度的最小值,默认值为10000 - **max_average_window** (int, 可选) – 平均值计算窗口长度的最大值,推荐设置为一轮训练中mini-batchs的数目,默认值为10000 - - **regularization** (WeightDecayRegularizer, 可选) – 正则化函数,用于减少泛化误差。例如可以是 :ref:`cn_api_fluid_regularizer_L2DecayRegularizer` ,默认值为None + - **regularization** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 - **name** (str, 可选)– 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None **代码示例** diff --git a/doc/fluid/api_cn/optimizer_cn/MomentumOptimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/MomentumOptimizer_cn.rst index 2feb44a3a6a2eb9dfefa624647d95cb333a9b57c..c2c39c5a8fad49c25a80ba2668eb0a332698dda7 100644 --- a/doc/fluid/api_cn/optimizer_cn/MomentumOptimizer_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/MomentumOptimizer_cn.rst @@ -3,7 +3,10 @@ MomentumOptimizer ------------------------------- -.. py:class:: paddle.fluid.optimizer.MomentumOptimizer(learning_rate, momentum, parameter_list=None, use_nesterov=False, regularization=None, name=None) +.. py:class:: paddle.fluid.optimizer.MomentumOptimizer(learning_rate, momentum, parameter_list=None, use_nesterov=False, regularization=None, grad_clip=None, name=None) + + + 该接口实现含有速度状态的Simple Momentum 优化器 @@ -20,7 +23,11 @@ MomentumOptimizer - **momentum** (float) - 动量因子。 - **parameter_list** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - **use_nesterov** (bool,可选) - 赋能牛顿动量,默认值False。 - - **regularization** - 正则化函数,,例如 :code:`fluid.regularizer.L2DecayRegularizer`,默认值None。 + - **regularization** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 - **name** (str, 可选) - 可选的名称前缀,一般无需设置,默认值为None。 **代码示例**: @@ -54,7 +61,7 @@ MomentumOptimizer -.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None, grad_clip=None) +.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None) 为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameter_list中的Parameters,最小化网络损失值loss。 @@ -63,9 +70,8 @@ MomentumOptimizer - **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` - **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None - - **grad_clip** (GradClipBase, 可选) – 梯度裁剪的策略,静态图模式不需要使用本参数,当前本参数只支持在dygraph模式下的梯度裁剪,未来本参数可能会调整,默认值为None - -返回: (optimize_ops, params_grads),数据类型为(list, list),其中optimize_ops是minimize接口为网络添加的OP列表,params_grads是一个由(param, grad)变量对组成的列表,param是Parameter,grad是该Parameter对应的梯度值 + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 返回类型: tuple @@ -128,6 +134,50 @@ MomentumOptimizer optimizer.clear_gradients() +.. py:method:: set_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float|Variable) - 需要设置的学习率的值。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.dygraph.nn.Linear(10, 10) + adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters()) + # 通过Python float数值手动设置学习率 + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + + + # 通过 框架的Variable 设置学习率 + lr_var = fluid.layers.create_global_var(shape=[1], value=0.7, dtype='float32') + adam.set_lr(lr_var) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.7 + + + .. py:method:: current_step_lr() **注意:** diff --git a/doc/fluid/api_cn/optimizer_cn/Momentum_cn.rst b/doc/fluid/api_cn/optimizer_cn/Momentum_cn.rst index bc4d35164a531f1a9e535643542bd2b1322d8d8a..80e940e19f2fbc1a92e60f20fe32a3200c8ba94b 100644 --- a/doc/fluid/api_cn/optimizer_cn/Momentum_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/Momentum_cn.rst @@ -5,6 +5,9 @@ Momentum .. py:attribute:: paddle.fluid.optimizer.Momentum + + + ``MomentumOptimizer`` 的别名 diff --git a/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5aa31be296502db20d199843a4613537185caa1d --- /dev/null +++ b/doc/fluid/api_cn/optimizer_cn/Optimizer_cn.rst @@ -0,0 +1,220 @@ +.. _cn_api_paddle_optimizer_Optimizer: + +Optimizer +------------------------------- + +.. py:class:: paddle.optimizer.Optimizer(learning_rate=0.001, epsilon=1e-08, parameters=None, weight_decay=None, grad_clip=None, name=None) + + + +优化器的基类。 + +参数: + - **learning_rate** (float|_LRSeduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001 + - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 + - **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None + + +**代码示例** + +.. code-block:: python + + #以子类Adam为例 + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + adam = paddle.optimizer.Adam(learning_rate=0.1, + parameters=linear.parameters()) + out.backward() + adam.step() + adam.clear_grad() + +.. py:method:: step() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +执行一次优化器并进行参数更新。 + +返回:None。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5) + # This can be any optimizer supported by dygraph. + adam = paddle.optimizer.Adam(learning_rate = 0.01, + parameters = linear.parameters()) + out = linear(a) + out.backward() + adam.step() + adam.clear_grad() + +.. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None) + +为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 + +参数: + - **loss** (Tensor) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + + beta1 = paddle.to_tensor([0.9], dtype="float32") + beta2 = paddle.to_tensor([0.99], dtype="float32") + + adam = paddle.optimizer.Adam(learning_rate=0.1, + parameters=linear.parameters(), + weight_decay=0.01) + out.backward() + adam.minimize(loss) + adam.clear_grad() + +.. py:method:: clear_grad() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +清除需要优化的参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5) + optimizer = paddle.optimizer.Adam(learning_rate=0.02, + parameters=linear.parameters()) + out = linear(a) + out.backward() + optimizer.step() + optimizer.clear_grad() + +.. py:method:: set_lr(value) + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float) - 需要设置的学习率的值。 + +返回:None + +**代码示例** + +.. code-block:: python + + import paddle + paddle.disable_static() + linear = paddle.nn.Linear(10, 10) + + adam = paddle.optimizer.Adam(0.1, parameters=linear.parameters()) + + # set learning rate manually by python float value + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + lr = adam.get_lr() + print("current lr is {}".format(lr)) + # Print: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + +.. py:method:: get_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 + +返回:float,当前步骤的学习率。 + + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + # example1: _LRScheduler is not used, return value is all the same + paddle.disable_static() + emb = paddle.nn.Embedding(10, 10, sparse=False) + adam = paddle.optimizer.Adam(0.001, parameters = emb.parameters()) + lr = adam.get_lr() + print(lr) # 0.001 + + # example2: PiecewiseLR is used, return the step learning rate + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + scheduler = paddle.optimizer.PiecewiseLR(bd, value, 0) + adam = paddle.optimizer.Adam(scheduler, + parameters=linear.parameters()) + + # first step: learning rate is 0.2 + np.allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.step() + lr = adam.get_lr() + scheduler.step() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True + diff --git a/doc/fluid/api_cn/optimizer_cn/PipelineOptimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/PipelineOptimizer_cn.rst deleted file mode 100644 index e0dae3a34fd1a615be6b29cbb143677ee8cf447d..0000000000000000000000000000000000000000 --- a/doc/fluid/api_cn/optimizer_cn/PipelineOptimizer_cn.rst +++ /dev/null @@ -1,67 +0,0 @@ -.. _cn_api_fluid_optimizer_PipelineOptimizer: - -PipelineOptimizer -------------------------------- - -**注意:该API仅支持【静态图】模式** - -.. py:class:: paddle.fluid.optimizer.PipelineOptimizer(optimizer, cut_list=None, place_list=None, concurrency_list=None, queue_size=30, sync_steps=1, start_cpu_core_id=0) - -使用流水线模式进行训练。 -Program会根据切分列表cut_list进行分割。如果cut_list的长度是k,则整个program(包括反向部分)将被分割为2*k-1个section。 所以place_list和concurrency_list的长度也必须是2*k-1。 - -.. note:: - - 虽然我们在流水线训练模式中采用异步更新的方式来加速,但最终的效果会依赖于每条流水线的训练进程。我们将在未来尝试同步模式。 - -参数: - - **optimizer** (Optimizer) - 基础优化器,如SGD - - **cut_list** (list of Variable list) - main_program的cut变量列表 - - **place_list** (list of Place) - 对应section运行所在的place - - **concurrency_list** (list of int) - 指定每个section的并发度列表 - - **queue_size** (int) - 每个section都会消费其输入队列(in-scope queue)中的scope,并向输出队列(out-scope queue)产出scope。 此参数的作用就是指定队列的大小。 可选,默认值:30 - - **sync_steps** (int) - 不同显卡之间的同步周期数。可选,默认值:1 - - **start_cpu_core_id** (int) - 指定所使用的第一个CPU核的id。可选,默认值:0 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import paddle.fluid.layers as layers - x = fluid.layers.data(name='x', shape=[1], dtype='int64', lod_level=0) - y = fluid.layers.data(name='y', shape=[1], dtype='int64', lod_level=0) - emb_x = layers.embedding(input=x, param_attr=fluid.ParamAttr(name="embx"), size=[10,2], is_sparse=False) - emb_y = layers.embedding(input=y, param_attr=fluid.ParamAttr(name="emby",learning_rate=0.9), size=[10,2], is_sparse=False) - concat = layers.concat([emb_x, emb_y], axis=1) - fc = layers.fc(input=concat, name="fc", size=1, num_flatten_dims=1, bias_attr=False) - loss = layers.reduce_mean(fc) - optimizer = fluid.optimizer.SGD(learning_rate=0.5) - optimizer = fluid.optimizer.PipelineOptimizer(optimizer, - cut_list=[[emb_x, emb_y], [loss]], - place_list=[fluid.CPUPlace(), fluid.CUDAPlace(0), fluid.CPUPlace()], - concurrency_list=[1, 1, 4], - queue_size=2, - sync_steps=1, - ) - optimizer.minimize(loss) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"] - dataset = fluid.DatasetFactory().create_dataset("FileInstantDataset") - dataset.set_use_var([x,y]) - dataset.set_batch_size(batch_size) - dataset.set_filelist(filelist) - exe.train_from_dataset( - fluid.default_main_program(), - dataset, - thread=2, - debug=False, - fetch_list=[], - fetch_info=[], - print_period=1) - - - - diff --git a/doc/fluid/api_cn/optimizer_cn/RMSPropOptimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/RMSPropOptimizer_cn.rst deleted file mode 100644 index 459ca943d644aa9c74bbbc154cc565f94ad9aa71..0000000000000000000000000000000000000000 --- a/doc/fluid/api_cn/optimizer_cn/RMSPropOptimizer_cn.rst +++ /dev/null @@ -1,197 +0,0 @@ -.. _cn_api_fluid_optimizer_RMSPropOptimizer: - -RMSPropOptimizer -------------------------------- - -.. py:class:: paddle.fluid.optimizer.RMSPropOptimizer(learning_rate, rho=0.95, epsilon=1e-06, momentum=0.0, centered=False, parameter_list=None, regularization=None, name=None) - -该接口实现均方根传播(RMSProp)法,是一种未发表的,自适应学习率的方法。原演示幻灯片中提出了RMSProp:[http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf]中的第29张。等式如下所示: - -.. math:: - r(w, t) & = \rho r(w, t-1) + (1 - \rho)(\nabla Q_{i}(w))^2\\ - w & = w - \frac{\eta} {\sqrt{r(w,t) + \epsilon}} \nabla Q_{i}(w) - -第一个等式计算每个权重平方梯度的移动平均值,然后将梯度除以 :math:`sqrtv(w,t)` 。 - -.. math:: - r(w, t) & = \rho r(w, t-1) + (1 - \rho)(\nabla Q_{i}(w))^2\\ - v(w, t) & = \beta v(w, t-1) +\frac{\eta} {\sqrt{r(w,t) +\epsilon}} \nabla Q_{i}(w)\\ - w & = w - v(w, t) - -如果居中为真: - -.. math:: - r(w, t) & = \rho r(w, t-1) + (1 - \rho)(\nabla Q_{i}(w))^2\\ - g(w, t) & = \rho g(w, t-1) + (1 -\rho)\nabla Q_{i}(w)\\ - v(w, t) & = \beta v(w, t-1) + \frac{\eta} {\sqrt{r(w,t) - (g(w, t))^2 +\epsilon}} \nabla Q_{i}(w)\\ - w & = w - v(w, t) - -其中, :math:`ρ` 是超参数,典型值为0.9,0.95等。 :math:`beta` 是动量术语。 :math:`epsilon` 是一个平滑项,用于避免除零,通常设置在1e-4到1e-8的范围内。 - -参数: - - **learning_rate** (float) - 全局学习率。 - - **parameter_list** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **rho** (float,可选) - rho是等式中的 :math:`rho` ,默认值0.95。 - - **epsilon** (float,可选) - 等式中的epsilon是平滑项,避免被零除,默认值1e-6。 - - **momentum** (float,可选) - 方程中的β是动量项,默认值0.0。 - - **centered** (bool,可选) - 如果为True,则通过梯度的估计方差,对梯度进行归一化;如果False,则由未centered的第二个moment归一化。将此设置为True有助于模型训练,但会消耗额外计算和内存资源。默认为False。 - - **regularization** - 正则器项,如 ``fluid.regularizer.L2DecayRegularizer`` 。 - - **name** (str, 可选) - 可选的名称前缀,一般无需设置,默认值为None。 - -抛出异常: - - ``ValueError`` -如果 ``learning_rate`` , ``rho`` , ``epsilon`` , ``momentum`` 为None。 - -**示例代码** - -.. code-block:: python - - import paddle - import paddle.fluid as fluid - import numpy as np - - place = fluid.CPUPlace() - main = fluid.Program() - with fluid.program_guard(main): - x = fluid.layers.data(name='x', shape=[13], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') - y_predict = fluid.layers.fc(input=x, size=1, act=None) - cost = fluid.layers.square_error_cost(input=y_predict, label=y) - avg_cost = fluid.layers.mean(cost) - - rms_optimizer = fluid.optimizer.RMSProp(learning_rate=0.1) - rms_optimizer.minimize(avg_cost) - - fetch_list = [avg_cost] - train_reader = paddle.batch( - paddle.dataset.uci_housing.train(), batch_size=1) - feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - for data in train_reader(): - exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) - - - -.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None, grad_clip=None) - -为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameter_list中的Parameters,最小化网络损失值loss。 - -参数: - - **loss** (Variable) – 需要最小化的损失值变量 - - **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` - - **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter - - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None - - **grad_clip** (GradClipBase, 可选) – 梯度裁剪的策略,静态图模式不需要使用本参数,当前本参数只支持在dygraph模式下的梯度裁剪,未来本参数可能会调整,默认值为None - -返回: (optimize_ops, params_grads),数据类型为(list, list),其中optimize_ops是minimize接口为网络添加的OP列表,params_grads是一个由(param, grad)变量对组成的列表,param是Parameter,grad是该Parameter对应的梯度值 - -返回类型: tuple - -**示例代码** - -.. code-block:: python - - import paddle - import paddle.fluid as fluid - import numpy as np - - place = fluid.CPUPlace() - main = fluid.Program() - with fluid.program_guard(main): - x = fluid.layers.data(name='x', shape=[13], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') - y_predict = fluid.layers.fc(input=x, size=1, act=None) - cost = fluid.layers.square_error_cost(input=y_predict, label=y) - avg_cost = fluid.layers.mean(cost) - - rms_optimizer = fluid.optimizer.RMSProp(learning_rate=0.1) - rms_optimizer.minimize(avg_cost) - - fetch_list = [avg_cost] - train_reader = paddle.batch( - paddle.dataset.uci_housing.train(), batch_size=1) - feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - for data in train_reader(): - exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) - - - -.. py:method:: clear_gradients() - -**注意:** - - **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** - - -清除需要优化的参数的梯度。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - with fluid.dygraph.guard(): - value = np.arange(26).reshape(2, 13).astype("float32") - a = fluid.dygraph.to_variable(value) - linear = fluid.Linear(13, 5, dtype="float32") - optimizer = fluid.optimizer.RMSPropOptimizer(learning_rate=0.01, - parameter_list=linear.parameters()) - out = linear(a) - out.backward() - optimizer.minimize(out) - optimizer.clear_gradients() - - -.. py:method:: current_step_lr() - -**注意:** - - **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** - -获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。 - -返回:当前步骤的学习率。 - -返回类型:float - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - # example1: LearningRateDecay is not used, return value is all the same - with fluid.dygraph.guard(): - emb = fluid.dygraph.Embedding([10, 10]) - adam = fluid.optimizer.Adam(0.001, parameter_list = emb.parameters()) - lr = adam.current_step_lr() - print(lr) # 0.001 - - # example2: PiecewiseDecay is used, return the step learning rate - with fluid.dygraph.guard(): - inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") - linear = fluid.dygraph.nn.Linear(10, 10) - inp = fluid.dygraph.to_variable(inp) - out = linear(inp) - loss = fluid.layers.reduce_mean(out) - - bd = [2, 4, 6, 8] - value = [0.2, 0.4, 0.6, 0.8, 1.0] - adam = fluid.optimizer.Adam(fluid.dygraph.PiecewiseDecay(bd, value, 0), - parameter_list=linear.parameters()) - - # first step: learning rate is 0.2 - np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True - - # learning rate for different steps - ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] - for i in range(12): - adam.minimize(loss) - lr = adam.current_step_lr() - np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True - diff --git a/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2439a95494df7f5199e877cf9f66841dba5fc8a0 --- /dev/null +++ b/doc/fluid/api_cn/optimizer_cn/RMSProp_cn.rst @@ -0,0 +1,251 @@ +.. _cn_api_paddle_optimizer_RMSProp: + +RMSProp +------------------------------- + +.. py:class:: paddle.optimizer.RMSProp(learning_rate, rho=0.95, epsilon=1e-06, momentum=0.0, centered=False, parameters=None, weight_decay=None, grad_clip=None, name=None) + + + + +该接口实现均方根传播(RMSProp)法,是一种未发表的,自适应学习率的方法。原演示幻灯片中提出了RMSProp:[http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf]中的第29张。等式如下所示: + +.. math:: + r(w, t) & = \rho r(w, t-1) + (1 - \rho)(\nabla Q_{i}(w))^2\\ + w & = w - \frac{\eta} {\sqrt{r(w,t) + \epsilon}} \nabla Q_{i}(w) + +第一个等式计算每个权重平方梯度的移动平均值,然后将梯度除以 :math:`sqrtv(w,t)` 。 + +.. math:: + r(w, t) & = \rho r(w, t-1) + (1 - \rho)(\nabla Q_{i}(w))^2\\ + v(w, t) & = \beta v(w, t-1) +\frac{\eta} {\sqrt{r(w,t) +\epsilon}} \nabla Q_{i}(w)\\ + w & = w - v(w, t) + +如果居中为真: + +.. math:: + r(w, t) & = \rho r(w, t-1) + (1 - \rho)(\nabla Q_{i}(w))^2\\ + g(w, t) & = \rho g(w, t-1) + (1 -\rho)\nabla Q_{i}(w)\\ + v(w, t) & = \beta v(w, t-1) + \frac{\eta} {\sqrt{r(w,t) - (g(w, t))^2 +\epsilon}} \nabla Q_{i}(w)\\ + w & = w - v(w, t) + +其中, :math:`ρ` 是超参数,典型值为0.9,0.95等。 :math:`beta` 是动量术语。 :math:`epsilon` 是一个平滑项,用于避免除零,通常设置在1e-4到1e-8的范围内。 + +参数: + - **learning_rate** (float) - 全局学习率。 + - **rho** (float,可选) - rho是等式中的 :math:`rho` ,默认值0.95。 + - **epsilon** (float,可选) - 等式中的epsilon是平滑项,避免被零除,默认值1e-6。 + - **momentum** (float,可选) - 方程中的β是动量项,默认值0.0。 + - **centered** (bool,可选) - 如果为True,则通过梯度的估计方差,对梯度进行归一化;如果False,则由未centered的第二个moment归一化。将此设置为True有助于模型训练,但会消耗额外计算和内存资源。默认为False。 + - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 + - **name** (str, 可选) - 可选的名称前缀,一般无需设置,默认值为None。 + +抛出异常: + - ``ValueError`` -如果 ``learning_rate`` , ``rho`` , ``epsilon`` , ``momentum`` 为None。 + +**示例代码** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + + beta1 = paddle.to_tensor([0.9], dtype="float32") + beta2 = paddle.to_tensor([0.99], dtype="float32") + + adam = paddle.optimizer.RMSProp(learning_rate=0.1, + parameters=linear.parameters(), + weight_decay=0.01) + out.backward() + adam.step() + adam.clear_grad() + +.. py:method:: step() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +执行一次优化器并进行参数更新。 + +返回:None。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5) + adam = paddle.optimizer.RMSProp(learning_rate = 0.01, + parameters = linear.parameters()) + out = linear(a) + out.backward() + adam.step() + adam.clear_grad() + +.. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None) + +为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 + +参数: + - **loss** (Tensor) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + + +**示例代码** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + + beta1 = paddle.to_tensor([0.9], dtype="float32") + beta2 = paddle.to_tensor([0.99], dtype="float32") + + adam = paddle.optimizer.RMSProp(learning_rate=0.1, + parameters=linear.parameters(), + weight_decay=0.01) + out.backward() + adam.minimize(loss) + adam.clear_grad() + +.. py:method:: clear_gradients() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +清除需要优化的参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5) + optimizer = paddle.optimizer.RMSProp(learning_rate=0.02, + parameters=linear.parameters()) + out = linear(a) + out.backward() + optimizer.step() + optimizer.clear_gradients() + +.. py:method:: set_lr(value) + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float) - 需要设置的学习率的值。 + +返回:None + +**代码示例** + +.. code-block:: python + + + import paddle + paddle.disable_static() + linear = paddle.nn.Linear(10, 10) + + adam = paddle.optimizer.RMSProp(0.1, parameters=linear.parameters()) + + # set learning rate manually by python float value + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + lr = adam.get_lr() + print("current lr is {}".format(lr)) + # Print: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + +.. py:method:: get_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 + +返回:float,当前步骤的学习率。 + + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + # example1: _LRScheduler is not used, return value is all the same + paddle.disable_static() + emb = paddle.nn.Embedding(10, 10, sparse=False) + adam = paddle.optimizer.RMSProp(0.001, parameters = emb.parameters()) + lr = adam.get_lr() + print(lr) # 0.001 + + # example2: PiecewiseDecay is used, return the step learning rate + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + scheduler = paddle.optimizer.PiecewiseLR(bd, value, 0) + adam = paddle.optimizer.RMSProp(scheduler, + parameters=linear.parameters()) + + # first step: learning rate is 0.2 + np.allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.step() + lr = adam.get_lr() + scheduler.step() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True diff --git a/doc/fluid/api_cn/optimizer_cn/RecomputeOptimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/RecomputeOptimizer_cn.rst index 44eb5acc817b82daf734a48c3b0f454a4659ad2c..d521b74471a50245f85085500ca9f716e9ba5d62 100644 --- a/doc/fluid/api_cn/optimizer_cn/RecomputeOptimizer_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/RecomputeOptimizer_cn.rst @@ -3,10 +3,13 @@ RecomputeOptimizer ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:class:: paddle.fluid.optimizer.RecomputeOptimizer(optimizer) +:api_attr: 声明式编程模式(静态图) + + + 通常来讲,一个深度学习的训练流程包含了三个子步骤:首先,运行前向算子来计算Variable和loss的值;其次,运行反向算子来计算参数的梯度;最后,应用优化算法以更新参数值。 在前向运算过程中,反向运算会用到的Variable都会保存在内存中,当模型深度很深时,这会占用大量的内存。 @@ -93,8 +96,7 @@ RecomputeOptimizer cost, startup_program=None, parameter_list=None, - no_grad_set=None, - checkpoints=[fc_1, pred]) + no_grad_set=None) program = cost.block.program with framework.program_guard(program, None): @@ -139,8 +141,7 @@ RecomputeOptimizer cost, startup_program=None, parameter_list=None, - no_grad_set=None, - checkpoints=[fc_1, pred]) + no_grad_set=None) optimize_ops = sgd.apply_optimize( cost, startup_program=None, params_grads=params_grads) @@ -188,45 +189,8 @@ RecomputeOptimizer cost, startup_program=None, parameter_list=None, - no_grad_set=None, - checkpoints=[fc_1, pred]) + no_grad_set=None) print("Finished backward") -.. py:method:: load(stat_dict) - -Recompute Optimizer 目前不支持load函数 - -参数: - - **stat_dict** – load_persistable方法加载的dict - -**代码示例** - -.. code-block:: python - - - import paddle.fluid as fluid - import paddle.compat as cpt - - def mlp(input_x, input_y, hid_dim=128, label_dim=2): - fc_1 = fluid.layers.fc(input=input_x, size=hid_dim) - prediction = fluid.layers.fc(input=[fc_1], size=label_dim, act='softmax') - cost = fluid.layers.cross_entropy(input=prediction, label=input_y) - sum_cost = fluid.layers.reduce_mean(cost) - return sum_cost, fc_1, prediction - - input_x = fluid.layers.data(name="x", shape=[32], dtype='float32') - input_y = fluid.layers.data(name="y", shape=[1], dtype='int64') - cost, fc_1, pred = mlp(input_x, input_y) - print("Finished FF") - - sgd = fluid.optimizer.Adam(learning_rate=0.01) - sgd = fluid.optimizer.RecomputeOptimizer(sgd) - sgd._set_checkpoints([fc_1, pred]) - try: - stat_dict = {} - sgd.load(stat_dict) - except NotImplementedError as e: - print(cpt.get_exception_message(e)) - diff --git a/doc/fluid/api_cn/optimizer_cn/SGDOptimizer_cn.rst b/doc/fluid/api_cn/optimizer_cn/SGDOptimizer_cn.rst index 1c95ff00db1983ac1ba28d78fc5c96f59ed44a72..582c590bb04393acd13add208c5b75b2032d0167 100644 --- a/doc/fluid/api_cn/optimizer_cn/SGDOptimizer_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/SGDOptimizer_cn.rst @@ -3,7 +3,10 @@ SGDOptimizer ------------------------------- -.. py:class:: paddle.fluid.optimizer.SGDOptimizer(learning_rate, parameter_list=None, regularization=None, name=None) +.. py:class:: paddle.fluid.optimizer.SGDOptimizer(learning_rate, parameter_list=None, regularization=None, grad_clip=None, name=None) + + + 该接口实现随机梯度下降算法的优化器 @@ -14,9 +17,13 @@ SGDOptimizer 参数: - **learning_rate** (float|Variable) - 用于更新参数的学习率。可以是浮点值,也可以是具有一个浮点值作为数据元素的变量。 - **parameter_list** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **regularization** - 一个正则化器,例如 ``fluid.regularizer.L2DecayRegularizer`` 。 + - **regularization** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 - **name** (str, 可选) - 可选的名称前缀,一般无需设置,默认值为None。 - + **代码示例** @@ -49,7 +56,7 @@ SGDOptimizer -.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None, grad_clip=None) +.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None) 为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameter_list中的Parameters,最小化网络损失值loss。 @@ -58,10 +65,8 @@ SGDOptimizer - **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` - **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None - - **grad_clip** (GradClipBase, 可选) – 梯度裁剪的策略,静态图模式不需要使用本参数,当前本参数只支持在dygraph模式下的梯度裁剪,未来本参数可能会调整,默认值为None - -返回: (optimize_ops, params_grads),数据类型为(list, list),其中optimize_ops是minimize接口为网络添加的OP列表,params_grads是一个由(param, grad)变量对组成的列表,param是Parameter,grad是该Parameter对应的梯度值 - + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 返回类型: tuple **代码示例** @@ -122,6 +127,48 @@ SGDOptimizer optimizer.minimize(out) optimizer.clear_gradients() +.. py:method:: set_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float|Variable) - 需要设置的学习率的值。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.dygraph.nn.Linear(10, 10) + adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters()) + # 通过Python float数值手动设置学习率 + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + + + # 通过 框架的Variable 设置学习率 + lr_var = fluid.layers.create_global_var(shape=[1], value=0.7, dtype='float32') + adam.set_lr(lr_var) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.7 + .. py:method:: current_step_lr() diff --git a/doc/fluid/api_cn/optimizer_cn/SGD_cn.rst b/doc/fluid/api_cn/optimizer_cn/SGD_cn.rst index 3079189e675fbc695e91ed5f8ac38cde6b4aa2cb..8810f79d267ab312ae682332240e047ad10771e6 100644 --- a/doc/fluid/api_cn/optimizer_cn/SGD_cn.rst +++ b/doc/fluid/api_cn/optimizer_cn/SGD_cn.rst @@ -5,6 +5,9 @@ SGD .. py:attribute:: paddle.fluid.optimizer.SGD + + + ``SGDOptimizer`` 的别名 diff --git a/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/CosineAnnealingLR_cn.rst b/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/CosineAnnealingLR_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..177ee846f284d232209f27cfd35a51938a527a5c --- /dev/null +++ b/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/CosineAnnealingLR_cn.rst @@ -0,0 +1,100 @@ +.. _cn_api_paddle_optimizer_CosineAnnealingLR: + +CosineAnnealingLR +----------------------------------- + +.. py:class:: paddle.optimizer.lr_scheduler.CosineAnnealingLR(learning_rate, T_max, eta_min=0, last_epoch=-1, verbose=False) + +该接口使用 ``cosine annealing`` 方式来动态调整学习率。 + +.. math:: + \begin{aligned} + \eta_t & = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 + + \cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right), + & T_{cur} \neq (2k+1)T_{max}; \\ + \eta_{t+1} & = \eta_{t} + \frac{1}{2}(\eta_{max} - \eta_{min}) + \left(1 - \cos\left(\frac{1}{T_{max}}\pi\right)\right), + & T_{cur} = (2k+1)T_{max}. + \end{aligned} + + +:math:`\eta_{max}` 的初始值为 ``learning_rate``, :math:`T_{cur}` 是SGDR(重启训练SGD)训练过程中的当前训练轮数。SGDR的训练方法可以参考文档 `SGDR: Stochastic Gradient Descent with Warm Restarts `_. +这里只是实现了 ``cosine annealing`` 动态学习率,热启训练部分没有实现。 + + +参数 +::::::::: + - **learning_rate** (float):初始学习率,可以是Python的float。 + - **T_max** (float|int):训练的上限轮数,是学习率衰减周期的一半。 + - **eta_min** (float|int, 可选):学习率的下限,即公式中的 :math:`\eta_{min}` 。默认值为0。 + - **last_epoch** (int,可选): 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 + - **verbose** (bool,可选):如果是 `True` ,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 + +返回 +::::::::: + 返回计算CosineAnnealingLR的可调用对象。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + # train on default imperative mode + paddle.disable_static() + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.lr_scheduler.CosineAnnealingLR(learning_rate=0.5, T_max=10, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameter_list=linear.parameters()) + for epoch in range(20): + for batch_id in range(2): + x = paddle.to_tensor(x) + out = linear(x) + loss = paddle.reduce_mean(out) + loss.backward() + sgd.minimize(loss) + linear.clear_gradients() + scheduler.step() + + # train on static mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr_scheduler.CosineAnnealingLR(learning_rate=0.5, T_max=10, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(2): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() + +.. py:method:: step(epoch=None) + +step函数需要在优化器的 `step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 + +参数: + - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + +返回: + 无。 + +**代码示例** : + + 参照上述示例代码。 + diff --git a/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/ExponentialLR_cn.rst b/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/ExponentialLR_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7360bd3f3c758f7f5a4c71f73b1e6ef12cfcae14 --- /dev/null +++ b/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/ExponentialLR_cn.rst @@ -0,0 +1,92 @@ +.. _cn_api_paddle_optimizer_ExponentialLR: + +ExponentialLR +----------------------------------- + +.. py:class:: paddle.optimizer.lr_scheduler.ExponentialLR(learning_rate, gamma, last_epoch=-1, verbose=False) + +该接口提供一种学习率按指数函数衰减的功能。 + +衰减函数可以用以下公式表示: + +.. math:: + + new\_learning\_rate = last\_learning\_rate * gamma + +参数 +::::::::: + - **learning_rate** (float) - 初始学习率,数据类型为Python float。 + - **gamma** (float):衰减率,new_lr = origin_lr * gamma。 + - **last_epoch** (int,可选): 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率 。 + - **verbose** (bool,可选):如果是 `True` ,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 + +返回 +::::::::: +返回计算ExponentialLR的可调用对象。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + # train on default dygraph mode + paddle.disable_static() + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.lr_scheduler.ExponentialLR(learning_rate=0.5, gamma=0.9, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameter_list=linear.parameters()) + for epoch in range(20): + for batch_id in range(2): + x = paddle.to_tensor(x) + out = linear(x) + loss = paddle.reduce_mean(out) + loss.backward() + sgd.minimize(loss) + linear.clear_gradients() + scheduler.step() + + # train on static mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr_scheduler.ExponentialLR(learning_rate=0.5, gamma=0.9, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(2): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() + + +.. py:method:: step(epoch=None) + +step函数需要在优化器的 `step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 + +参数: + - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + +返回: + 无。 + +**代码示例** : + + 参照上述示例代码。 + + diff --git a/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/InverseTimeLR_cn.rst b/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/InverseTimeLR_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7738cf5c504c5533ae9a69ebf262b9c18195c27b --- /dev/null +++ b/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/InverseTimeLR_cn.rst @@ -0,0 +1,94 @@ +.. _cn_api_paddle_optimizer_InverseTimeLR: + +InverseTimeLR +------------------------------- + + +.. py:class:: paddle.optimizer.lr_scheduler.InverseTimeLR(learning_rate, gamma, last_epoch=-1, verbose=False) + + +该接口提供反时限学习率衰减的功能。 + +反时限学习率衰减计算方式如下。 + +当staircase为False时,计算公式为: + +.. math:: + + new\_learning\_rate = \\frac{learning\_rate}{1 + gamma * epoch} + + +参数 +::::::::: + - **learning_rate** (float) - 初始学习率,数据类型为Python float。 + - **gamma** (float):衰减率,new_lr = origin_lr * gamma。 + - **last_epoch** (int,可选): 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 + - **verbose** (bool,可选):如果是 `True` ,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 + +返回 +::::::::: +返回计算InverseTimeLR的可调用对象。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + # train on default dygraph mode + paddle.disable_static() + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.lr_scheduler.InverseTimeLR(learning_rate=0.5, gamma=0.1, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameter_list=linear.parameters()) + for epoch in range(20): + for batch_id in range(2): + x = paddle.to_tensor(x) + out = linear(x) + loss = paddle.reduce_mean(out) + loss.backward() + sgd.minimize(loss) + linear.clear_gradients() + scheduler.step() + + # train on static mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr_scheduler.InverseTimeLR(learning_rate=0.5, gamma=0.1, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(2): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() + +.. py:method:: step(epoch=None) + +step函数需要在优化器的 `step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 + +参数: + - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + +返回: + 无。 + +**代码示例** : + + 参照上述示例代码。 diff --git a/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/LambdaLR_cn.rst b/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/LambdaLR_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5ae95b411a617ea8b6b41e5042a96ad88ea872be --- /dev/null +++ b/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/LambdaLR_cn.rst @@ -0,0 +1,95 @@ +.. _cn_api_paddle_optimizer_LambdaLR: + +LambdaLR +----------------------------------- + +.. py:class:: paddle.optimizer.lr_scheduler.LambdaLR(learning_rate, lr_lambda, last_epoch=-1, verbose=False) + +该接口提供 ``lambda`` 函数设置学习率的功能。 ``lr_lambda`` 为一个 ``lambda`` 函数,其通过 ``epoch`` 计算出一个因子,该因子会乘以初始学习率。。 + +衰减过程可以参考以下代码: + +.. code-block:: python + + learning_rate = 0.5 # init learning_rate + lr_lambda = lambda epoch: 0.95 ** epoch + learning_rate = 0.5 # epoch 0 + learning_rate = 0.475 # epoch 1 + learning_rate = 0.45125 # epoch 2 + + +参数 +::::::::: + - **learning_rate** (float) - 初始学习率,数据类型为Python float。 + - **lr_lambda** (function):lr_lambda 为一个lambda函数,其通过 epoch 计算出一个因子,该因子会乘以初始学习率。 + - **last_epoch** (int,可选): 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率 。 + - **verbose** (bool,可选):如果是 `True` ,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 + +返回 +::::::::: + 返回计算LambdaLR的可调用对象。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + # train on default dygraph mode + paddle.disable_static() + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.lr_scheduler.LambdaLR(learning_rate=0.5, lr_lambda=lambda x:0.95**x, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameter_list=linear.parameters()) + for epoch in range(20): + for batch_id in range(2): + x = paddle.to_tensor(x) + out = linear(x) + loss = paddle.reduce_mean(out) + loss.backward() + sgd.minimize(loss) + linear.clear_gradients() + scheduler.step() + + # train on static mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr_scheduler.LambdaLR(learning_rate=0.5, lr_lambda=lambda x:0.95**x, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(2): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() + +.. py:method:: step(epoch=None) + +step函数需要在优化器的 `step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 + +参数: + - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + +返回: + 无。 + +**代码示例** : + + 参照上述示例代码。 + diff --git a/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/LinearLrWarmup_cn.rst b/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/LinearLrWarmup_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a998567058693d62903618246658883b2f9310d9 --- /dev/null +++ b/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/LinearLrWarmup_cn.rst @@ -0,0 +1,105 @@ +.. _cn_api_paddle_optimizer_LinearLrWarmup: + +LinearLrWarmup +----------------------------------- + +.. py:class:: paddle.optimizer.lr_scheduler.LinearLrWarmup(learing_rate, warmup_steps, start_lr, end_lr, last_epoch=-1, verbose=False) + +该接口提供一种学习率优化策略-线性学习率热身(warm up)对学习率进行初步调整。在正常调整学习率之前,先逐步增大学习率。 + +当训练步数小于热身步数(warmup_steps)时,学习率lr按如下方式更新: + +.. code-block:: text + + linear_step = end_lr - start_lr + lr = start_lr + linear_step * (epoch / warmup_steps) + +当训练步数大于等于热身步数(warmup_steps)时,学习率lr为: + +.. code-block:: text + + lr = learning_rate + +其中learning_rate为热身之后的学习率。 + +参数 +::::::::: + - **learning rate** (float|_LRScheduler):热启训练之后的学习率,可以是Python的float或_LRScheduler子类。 + - **warmup_steps** (int):进行warm up过程的步数。 + - **start_lr** (float):warm up的起始学习率。 + - **end_lr** (float):warm up的最终学习率。 + - **last_epoch** (int,可选): 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率 。 + - **verbose** (bool,可选):如果是 `True` ,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 + + +返回 +::::::::: +返回计算LinearLrWarmup的可调用对象。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + # train on default dygraph mode + paddle.disable_static() + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.LinearLrWarmup( + learning_rate=0.5, warmup_steps=20, start_lr=0, end_lr=0.5, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameter_list=linear.parameters()) + for epoch in range(20): + for batch_id in range(2): + x = paddle.to_tensor(x) + out = linear(x) + loss = paddle.reduce_mean(out) + loss.backward() + sgd.minimize(loss) + linear.clear_gradients() + scheduler.step() + + # train on static mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr_scheduler.LinearLrWarmup( + learning_rate=0.5, warmup_steps=20, start_lr=0, end_lr=0.5, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(2): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() + +.. py:method:: step(epoch=None) + +step函数需要在优化器的 `step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 + +参数: + - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + +返回: + 无。 + +**代码示例** : + + 参照上述示例代码。 + + diff --git a/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/MultiStepLR_cn.rst b/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/MultiStepLR_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e424b3153122a05f63202c5559d0ecd0f3825096 --- /dev/null +++ b/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/MultiStepLR_cn.rst @@ -0,0 +1,100 @@ +.. _cn_api_paddle_optimizer_MultiStepLR: + +MultiStepLR +----------------------------------- + +.. py:class:: paddle.optimizer.lr_scheduler.MultiStepLR(learning_rate, milestones, gamma=0.1, last_epoch=-1, verbose=False) + +该接口提供一种学习率按指定轮数衰减的功能。 + +衰减过程可以参考以下代码: + +.. code-block:: text + + learning_rate = 0.5 + milestones = [30, 50] + gamma = 0.1 + if epoch < 30: + learning_rate = 0.5 + elif epoch < 50: + learning_rate = 0.05 # 0.5 * 0.1 + else: + learning_rate = 0.005 # 0.05 * 0.1 + +参数 +::::::::: + - **learning_rate** (float) - 初始学习率,数据类型为Python float。 + - **milestones** :(list):轮数下标列表。必须递增。 + - **gamma** (float,可选):衰减率,new_lr = origin_lr * gamma, 衰减率必须小于等于1.0,默认值为0.1。 + - **last_epoch** (int,可选): 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率 。 + - **verbose** (bool,可选):如果是 `True` ,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 + + +返回 +::::::::: +返回计算MultiStepLR的可调用对象。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + # train on default dygraph mode + paddle.disable_static() + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.lr_scheduler.MultiStepLR(learning_rate=0.5, milestones=[2, 4, 6], gamma=0.8, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameter_list=linear.parameters()) + for epoch in range(20): + for batch_id in range(2): + x = paddle.to_tensor(x) + out = linear(x) + loss = paddle.reduce_mean(out) + loss.backward() + sgd.minimize(loss) + linear.clear_gradients() + scheduler.step() + + # train on static mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr_scheduler.MultiStepLR(learning_rate=0.5, milestones=[2, 4, 6], gamma=0.8, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(2): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() + +.. py:method:: step(epoch=None) + +step函数需要在优化器的 `step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 + +参数: + - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + +返回: + 无。 + +**代码示例** : + + 参照上述示例代码。 + diff --git a/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/NaturalExpLR_cn.rst b/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/NaturalExpLR_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c89cd03a7aee7356d7315518c69c59d4c87793aa --- /dev/null +++ b/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/NaturalExpLR_cn.rst @@ -0,0 +1,91 @@ +.. _cn_api_paddle_optimizer_NaturalExpLR: + +NaturalExpLR +------------------------------- + + +.. py:class:: paddle.optimizer.lr_scheduler.NaturalExpLR(learning_rate, gama, last_epoch=-1, verbose=False) + +该接口提供按自然指数衰减学习率的功能。 + +自然指数衰减的计算方式如下。 + +.. math:: + + decayed\_learning\_rate = learning\_rate * e^{y} + +参数 +::::::::: + - **learning_rate** (float) - 初始学习率,数据类型为Python float。 + - **gamma** (float):衰减率。 + - **last_epoch** (int,可选): 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 + - **verbose** (bool,可选):如果是 `True` ,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 + +返回 +::::::::: +返回计算NaturalExpLR的可调用对象。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + # train on default dygraph mode + paddle.disable_static() + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.lr_scheduler.NaturalExpLR(learning_rate=0.5, gamma=0.1, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameter_list=linear.parameters()) + for epoch in range(20): + for batch_id in range(2): + x = paddle.to_tensor(x) + out = linear(x) + loss = paddle.reduce_mean(out) + loss.backward() + sgd.minimize(loss) + linear.clear_gradients() + scheduler.step() + + # train on static mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr_scheduler.NaturalExpLR(learning_rate=0.5, gamma=0.1, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(2): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() + +.. py:method:: step(epoch=None) + +step函数需要在优化器的 `step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 + +参数: + - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + +返回: + 无。 + +**代码示例** : + + 参照上述示例代码。 + diff --git a/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/NoamLR_cn.rst b/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/NoamLR_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..eadb423134cadd459d1719b658756c1bb2cfcfb3 --- /dev/null +++ b/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/NoamLR_cn.rst @@ -0,0 +1,98 @@ +.. _cn_api_paddle_optimizer_NoamLR: + +NoamLR +------------------------------- + + +.. py:class:: paddle.optimizer.lr_scheduler.NoamLR(d_model, warmup_steps, learning_rate=1.0, last_epoch=-1, verbose=False) + + +该接口提供Noam衰减学习率的功能。 + +Noam衰减的计算方式如下。 + +.. math:: + + decayed\_learning\_rate = learning\_rate * d_{model}^{-0.5} * min(epoch^{-0.5}, epoch * warmup\_steps^{-1.5}) + +关于Noam衰减的更多细节请参考 `attention is all you need `_ + +参数 +::::::::: + - **d$_{model}$** (int) - 模型的输入、输出向量特征维度,为超参数。数据类型为Python int。 + - **warmup_steps** (int) - 预热步数,为超参数。数据类型为Python int。 + - **learning_rate** (float) - 初始学习率,数据类型为Python float。默认值为1.0。 + - **last_epoch** (int,可选): 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 + - **verbose** (bool,可选):如果是 `True` ,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 + +返回 +::::::::: +返回计算NoamLR的可调用对象。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + # train on default dygraph mode + paddle.disable_static() + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.lr_scheduler.NoamLR(d_model=0.01, warmup_steps=100, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameter_list=linear.parameters()) + for epoch in range(20): + for batch_id in range(2): + x = paddle.to_tensor(x) + out = linear(x) + loss = paddle.reduce_mean(out) + loss.backward() + sgd.minimize(loss) + linear.clear_gradients() + scheduler.step() + + # train on static mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr_scheduler.NoamLR(d_model=0.01, warmup_steps=100, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(2): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() + + + +.. py:method:: step(epoch=None) + +step函数需要在优化器的 `step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 + +参数: + - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + +返回: + 无。 + +**代码示例** : + + 参照上述示例代码。 + + diff --git a/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/PiecewiseLR_cn.rst b/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/PiecewiseLR_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5f22bd57cf2646c2246b9238e28ffb548ab819db --- /dev/null +++ b/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/PiecewiseLR_cn.rst @@ -0,0 +1,95 @@ +.. _cn_api_paddle_optimizer_PiecewiseLR: + +PiecewiseLR +------------------------------- + + +.. py:class:: paddle.optimizer.lr_scheduler.PiecewiseLR(boundaries, values, last_epoch=-1, verbose=False) + + +该接口提供对初始学习率进行分段(piecewise)常数衰减的功能。 + +分段常数衰减的过程举例描述如下。 + +.. code-block:: text + + 例如,设定的boundaries列表为[100, 200],候选学习率常量列表values为[1.0, 0.5, 0.1],则: + 1、在当前训练步数epoch小于100步,学习率值为1.0。 + 2、在当前训练步数epoch大于或等于100步,并且小于200步时,学习率值为0.5。 + 3、在当前训练步数epoch大于或等于200步时,学习率值为0.1。 + + +参数 +::::::::: + - **boundaries** (list):指定衰减的步数边界。列表的数据元素为Python int类型。 + - **values** (list) :备选学习率列表。数据元素类型为Python float的列表。与边界值列表有对应的关系。 + - **last_epoch** (int,可选):上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率 。 + - **verbose** (bool,可选):如果是 `True` ,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 + +返回 +::::::::: +返回计算PiecewiseLR的可调用对象。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + # train on default dygraph mode + paddle.disable_static() + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.lr_scheduler.PiecewiseLR(boundaries=[3, 6, 9], values=[0.1, 0.2, 0.3, 0.4], verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameter_list=linear.parameters()) + for epoch in range(20): + for batch_id in range(2): + x = paddle.to_tensor(x) + out = linear(x) + loss = paddle.reduce_mean(out) + loss.backward() + sgd.minimize(loss) + linear.clear_gradients() + scheduler.step() + + # train on static mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr_scheduler.PiecewiseLR(boundaries=[3, 6, 9], values=[0.1, 0.2, 0.3, 0.4], verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(2): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() + +.. py:method:: step(epoch=None) + +step函数需要在优化器的 `step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 + +参数: + - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + +返回: + 无。 + +**代码示例** : + + 参照上述示例代码。 diff --git a/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/PolynomiaLR_cn.rst b/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/PolynomiaLR_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9b42b12ff71274afc9c83eb54158fb2e7512cb19 --- /dev/null +++ b/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/PolynomiaLR_cn.rst @@ -0,0 +1,107 @@ +.. _cn_api_paddle_optimizer_PolynomialLR: + +PolynomialLR +------------------------------- + + +.. py:class:: paddle.optimizer.lr_scheduler.PolynomialLR(learning_rate, decay_steps, end_lr=0.0001, power=1.0, cycle=False, last_epoch=-1, verbose=False) + + +该接口提供学习率按多项式衰减的功能。通过多项式衰减函数,使得学习率值逐步从初始的 ``learning_rate``,衰减到 ``end_learning_rate`` 。 + +计算方式如下。 + +若cycle为True,则计算公式为: + +.. math:: + + decay\_steps &= decay\_steps * math.ceil(\frac{epoch}{decay\_steps}) \\ + decayed\_learning\_rate &= (learning\_rate-end\_learning\_rate)*(1-\frac{epoch}{decay\_steps})^{power}+end\_learning\_rate + +若cycle为False,则计算公式为: + +.. math:: + + epoch &= min(epoch, decay\_steps) \\ + decayed\_learning\_rate &= (learning\_rate-end\_learning\_rate)*(1-\frac{epoch}{decay\_steps})^{power}+end\_learning\_rate + + +参数 +::::::::: + - **learning_rate** (float) - 初始学习率,数据类型为Python float。 + - **decay_steps** (int):进行衰减的步长,这个决定了衰减周期。 + - **end_lr** (float,可选)- 最小的最终学习率。默认值为0.0001。 + - **power** (float,可选)- 多项式的幂。默认值为1.0。 + - **cycle** (bool,可选)- 学习率下降后是否重新上升。若为True,则学习率衰减到最低学习率值时,会出现上升。若为False,则学习率曲线则单调递减。默认值为False。 + - **last_epoch** (int,可选): 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 + - **verbose** (bool,可选):如果是 `True` ,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 + +返回 +::::::::: +返回计算PolynomialLR的可调用对象。 + + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + # train on default dygraph mode + paddle.disable_static() + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.lr_scheduler.PolynomialLR(learning_rate=0.5, decay_steps=20, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameter_list=linear.parameters()) + for epoch in range(20): + for batch_id in range(2): + x = paddle.to_tensor(x) + out = linear(x) + loss = paddle.reduce_mean(out) + loss.backward() + sgd.minimize(loss) + linear.clear_gradients() + scheduler.step() + + # train on static mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr_scheduler.PolynomialLR(learning_rate=0.5, decay_steps=20, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(2): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() + +.. py:method:: step(epoch=None) + +step函数需要在优化器的 `step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 + +参数: + - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + +返回: + 无。 + +**代码示例** : + + 参照上述示例代码。 + diff --git a/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/ReduceLROnPlateauLR_cn.rst b/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/ReduceLROnPlateauLR_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bdd446e15433f670fd267712678f3542779056fe --- /dev/null +++ b/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/ReduceLROnPlateauLR_cn.rst @@ -0,0 +1,94 @@ +.. _cn_api_paddle_optimizer_ReduceLROnPlateau: + +ReduceLROnPlateau +----------------------------------- + +.. py:class:: paddle.optimizer.lr_scheduler.ReduceLROnPlateau(learning_rate, mode='min', factor=0.1, patience=10, threshold=1e-4, threshold_mode='rel', cooldown=0, min_lr=0, epsilon=1e-8, verbose=False) + +该API为 ``loss`` 自适应的学习率衰减策略。默认情况下,当 ``loss`` 停止下降时,降低学习率(如果将 mode 设置为 'max' ,此时判断逻辑相反,loss 停止上升时降低学习率)。其思想是:一旦模型表现不再提升,将学习率降低2-10倍对模型的训练往往有益。 +loss 是传入到该类方法 ``step`` 中的参数,其必须是shape为[1]的1-D Tensor。 如果 loss 停止下降(mode 为 min 时)超过 ``patience`` 个epoch,学习率将会减小为 learning_rate * factor。 +此外,每降低一次学习率后,将会进入一个时长为 cooldown 个epoch的冷静期,在冷静期内,将不会监控 loss 的变化情况,也不会衰减。 在冷静期之后,会继续监控 loss 的上升或下降。 + + +参数 +::::::::: + - **learning_rate** (float) - 初始学习率,数据类型为Python float。 + - **mode** (str,可选)'min' 和 'max' 之一。通常情况下,为 'min' ,此时当 loss 停止下降时学习率将减小。默认:'min' 。 (注意:仅在特殊用法时,可以将其设置为 'max' ,此时判断逻辑相反, loss 停止上升学习率才减小) + - **factor** (float,可选) - 学习率衰减的比例。new_lr = origin_lr * factor,它是值小于1.0的float型数字,默认: 0.1。 + - **patience** (int,可选)- 当 loss 连续 patience 个epoch没有下降(mode: 'min')或上升(mode: 'max')时,学习率才会减小。默认:10。 + - **threshold** (float,可选)- threshold 和 threshold_mode 两个参数将会决定 loss 最小变化的阈值。小于该阈值的变化 将会被忽视。默认:1e-4。 + - **threshold_mode** (str,可选)- 'rel' 和 'abs' 之一。在 'rel' 模式下, loss 最小变化的阈值是 last_loss * threshold , 其中 last_loss 是 loss 在上个epoch的值。在 'abs' 模式下,loss 最小变化的阈值是 threshold 。 默认:'rel'。 + - **cooldown** (int,可选)- 在学习速率每次减小之后,会进入时长为 ``cooldown`` 个 step 的冷静期。默认:0。 + - **min_lr** (float,可选) - 最小的学习率。减小后的学习率最低下界限。默认:0。 + - **epsilon** (float,可选)- 如果新旧学习率间的差异小于epsilon ,则不会更新。默认值:1e-8。 + - **verbose** (bool,可选):如果是 `True` ,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 + +返回 +::::::::: + 返回计算ReduceLROnPlateau的可调用对象。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + # train on default dygraph mode + paddle.disable_static() + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.lr_scheduler.ReduceLROnPlateau(learning_rate=1.0, factor=0.5, patience=5, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameter_list=linear.parameters()) + for epoch in range(20): + for batch_id in range(2): + x = paddle.to_tensor(x) + out = linear(x) + loss = paddle.reduce_mean(out) + loss.backward() + sgd.minimize(loss) + linear.clear_gradients() + scheduler.step(loss) + + # train on static mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr_scheduler.ReduceLROnPlateau(learning_rate=1.0, factor=0.5, patience=5, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(2): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step(out[0]) + + +.. py:method:: step(metrics, epoch=None) + +step函数需要在优化器的 `step()` 函数之后调用,其根据传入的 metrics 调整optimizer中的学习率,调整后的学习率将会在下一个 ``step`` 时生效。 + +参数: + metrics (Tensor|numpy.ndarray|float)-用来判断是否需要降低学习率。如果 loss 连续 patience 个 ``steps`` 没有下降, 将会降低学习率。可以是Tensor或者numpy.array,但是shape必须为[1] 。 + - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + +返回: + 无 + +**代码示例**: + + 参照上述示例代码。 diff --git a/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/StepLR_cn.rst b/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/StepLR_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cf4ffa74295a0ca21d5ebf9b905c4c40263edd19 --- /dev/null +++ b/doc/fluid/api_cn/optimizer_cn/lr_scheduler_cn/StepLR_cn.rst @@ -0,0 +1,100 @@ +.. _cn_api_paddle_optimizer_StepLR: + +StepLR +----------------------------------- + +.. py:class:: paddle.optimizer.lr_scheduler.StepLR(learning_rate, step_size, gamma=0.1, last_epoch=-1, verbose=False) + +该接口提供一种学习率按指定 `间隔` 轮数衰减的功能。 + +衰减过程可以参考以下代码: + +.. code-block:: text + + learning_rate = 0.5 + step_size = 30 + gamma = 0.1 + if epoch < 30: + learning_rate = 0.5 + elif epoch < 60: + learning_rate = 0.05 # 0.5 * 0.1 + else: + learning_rate = 0.005 # 0.05 * 0.1 + +参数 +::::::::: + - **learning_rate** (float) - 初始学习率,数据类型为Python float。 + - **step_size** :(int):学习率衰减轮数间隔。 + - **gamma** (float, 可选):衰减率,new_lr = origin_lr * gamma, 衰减率必须小于等于1.0,默认值为0.1。 + - **last_epoch** (int,可选): 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率 。 + - **verbose** (bool,可选):如果是 `True` ,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 + + +返回 +::::::::: +返回计算StepLR的可调用对象。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + # train on default dygraph mode + paddle.disable_static() + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.lr_scheduler.StepLR(learning_rate=0.5, step_size=5, gamma=0.8, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameter_list=linear.parameters()) + for epoch in range(20): + for batch_id in range(2): + x = paddle.to_tensor(x) + out = linear(x) + loss = paddle.reduce_mean(out) + loss.backward() + sgd.minimize(loss) + linear.clear_gradients() + scheduler.step() + + # train on static mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr_scheduler.StepLR(learning_rate=0.5, step_size=5, gamma=0.8, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(2): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() + +.. py:method:: step(epoch=None) + +step函数需要在优化器的 `step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 + +参数: + - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + +返回: + 无。 + +**代码示例** : + + 参照上述示例代码。 + diff --git a/doc/fluid/api_cn/paddle_cn.rst b/doc/fluid/api_cn/paddle_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2ce64a6a74f554cfe10673682a13ce8eaf7628d5 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn.rst @@ -0,0 +1,184 @@ +======================= +paddle +======================= + +.. toctree:: + :maxdepth: 1 + + paddle_cn/abs_cn.rst + paddle_cn/acos_cn.rst + paddle_cn/addcmul_cn.rst + paddle_cn/addmm_cn.rst + paddle_cn/allclose_cn.rst + paddle_cn/append_backward_cn.rst + paddle_cn/argmax_cn.rst + paddle_cn/argmin_cn.rst + paddle_cn/argsort_cn.rst + paddle_cn/asin_cn.rst + paddle_cn/atan_cn.rst + paddle_cn/bmm_cn.rst + paddle_cn/BuildStrategy_cn.rst + paddle_cn/cast_cn.rst + paddle_cn/ceil_cn.rst + paddle_cn/cholesky_cn.rst + paddle_cn/chunk_cn.rst + paddle_cn/clamp_cn.rst + paddle_cn/clip_cn.rst + paddle_cn/CompiledProgram_cn.rst + paddle_cn/concat_cn.rst + paddle_cn/cond_cn.rst + paddle_cn/cos_cn.rst + paddle_cn/CPUPlace_cn.rst + paddle_cn/create_global_var_cn.rst + paddle_cn/create_parameter_cn.rst + paddle_cn/create_tensor_cn.rst + paddle_cn/crop_tensor_cn.rst + paddle_cn/cross_cn.rst + paddle_cn/CUDAPinnedPlace_cn.rst + paddle_cn/CUDAPlace_cn.rst + paddle_cn/cumsum_cn.rst + paddle_cn/default_main_program_cn.rst + paddle_cn/default_startup_program_cn.rst + paddle_cn/diag_cn.rst + paddle_cn/disable_dygraph_cn.rst + paddle_cn/dist_cn.rst + paddle_cn/distribution_cn.rst + paddle_cn/dot_cn.rst + paddle_cn/elementwise_add_cn.rst + paddle_cn/elementwise_div_cn.rst + paddle_cn/elementwise_floordiv_cn.rst + paddle_cn/elementwise_mod_cn.rst + paddle_cn/elementwise_mul_cn.rst + paddle_cn/elementwise_pow_cn.rst + paddle_cn/elementwise_sub_cn.rst + paddle_cn/elementwise_sum_cn.rst + paddle_cn/enable_dygraph_cn.rst + paddle_cn/equal_cn.rst + paddle_cn/erf_cn.rst + paddle_cn/ExecutionStrategy_cn.rst + paddle_cn/Executor_cn.rst + paddle_cn/expand_as_cn.rst + paddle_cn/expand_cn.rst + paddle_cn/exp_cn.rst + paddle_cn/eye_cn.rst + paddle_cn/fill_constant_cn.rst + paddle_cn/flatten_cn.rst + paddle_cn/flip_cn.rst + paddle_cn/floor_cn.rst + paddle_cn/full_cn.rst + paddle_cn/full_like_cn.rst + paddle_cn/gather_cn.rst + paddle_cn/gather_nd_cn.rst + paddle_cn/global_scope_cn.rst + paddle_cn/gradients_cn.rst + paddle_cn/greater_equal_cn.rst + paddle_cn/greater_than_cn.rst + paddle_cn/has_inf_cn.rst + paddle_cn/has_nan_cn.rst + paddle_cn/increment_cn.rst + paddle_cn/index_sample_cn.rst + paddle_cn/index_select_cn.rst + paddle_cn/inverse_cn.rst + paddle_cn/in_dygraph_mode_cn.rst + paddle_cn/isfinite_cn.rst + paddle_cn/is_empty_cn.rst + paddle_cn/kron_cn.rst + paddle_cn/l2_normalize_cn.rst + paddle_cn/less_equal_cn.rst + paddle_cn/less_than_cn.rst + paddle_cn/linspace_cn.rst + paddle_cn/load_cn.rst + paddle_cn/log1p_cn.rst + paddle_cn/logical_and_cn.rst + paddle_cn/logical_not_cn.rst + paddle_cn/logical_or_cn.rst + paddle_cn/logical_xor_cn.rst + paddle_cn/logsumexp_cn.rst + paddle_cn/log_cn.rst + paddle_cn/manual_seed_cn.rst + paddle_cn/get_cuda_rng_state_cn.rst + paddle_cn/set_cuda_rng_state_cn.rst + paddle_cn/masked_select_cn.rst + paddle_cn/matmul_cn.rst + paddle_cn/max_cn.rst + paddle_cn/maximum_cn.rst + paddle_cn/mean_cn.rst + paddle_cn/meshgrid_cn.rst + paddle_cn/min_cn.rst + paddle_cn/minimum_cn.rst + paddle_cn/multiplex_cn.rst + paddle_cn/mul_cn.rst + paddle_cn/name_scope_cn.rst + paddle_cn/no_grad_cn.rst + paddle_cn/nonzero_cn.rst + paddle_cn/normal_cn.rst + paddle_cn/not_equal_cn.rst + paddle_cn/numel_cn.rst + paddle_cn/ones_cn.rst + paddle_cn/ones_like_cn.rst + paddle_cn/ParallelExecutor_cn.rst + paddle_cn/ParamAttr_cn.rst + paddle_cn/pow_cn.rst + paddle_cn/Print_cn.rst + paddle_cn/Program_cn.rst + paddle_cn/program_guard_cn.rst + paddle_cn/py_func_cn.rst + paddle_cn/randint_cn.rst + paddle_cn/randn_cn.rst + paddle_cn/randperm_cn.rst + paddle_cn/rand_cn.rst + paddle_cn/range_cn.rst + paddle_cn/rank_cn.rst + paddle_cn/reciprocal_cn.rst + paddle_cn/reduce_all_cn.rst + paddle_cn/reduce_any_cn.rst + paddle_cn/reduce_max_cn.rst + paddle_cn/reduce_mean_cn.rst + paddle_cn/reduce_min_cn.rst + paddle_cn/reduce_prod_cn.rst + paddle_cn/reduce_sum_cn.rst + paddle_cn/reshape_cn.rst + paddle_cn/reverse_cn.rst + paddle_cn/roll_cn.rst + paddle_cn/round_cn.rst + paddle_cn/rsqrt_cn.rst + paddle_cn/save_cn.rst + paddle_cn/scale_cn.rst + paddle_cn/scatter_cn.rst + paddle_cn/scatter_nd_add_cn.rst + paddle_cn/scatter_nd_cn.rst + paddle_cn/scope_guard_cn.rst + paddle_cn/shape_cn.rst + paddle_cn/shard_index_cn.rst + paddle_cn/shuffle_cn.rst + paddle_cn/sign_cn.rst + paddle_cn/sin_cn.rst + paddle_cn/slice_cn.rst + paddle_cn/split_cn.rst + paddle_cn/sqrt_cn.rst + paddle_cn/square_cn.rst + paddle_cn/squeeze_cn.rst + paddle_cn/stack_cn.rst + paddle_cn/standard_normal.rst + paddle_cn/stanh_cn.rst + paddle_cn/std_cn.rst + paddle_cn/strided_slice_cn.rst + paddle_cn/sums_cn.rst + paddle_cn/tanh_cn.rst + paddle_cn/tile_cn.rst + paddle_cn/topk_cn.rst + paddle_cn/trace_cn.rst + paddle_cn/transpose_cn.rst + paddle_cn/tril_cn.rst + paddle_cn/triu_cn.rst + paddle_cn/t_cn.rst + paddle_cn/unbind_cn.rst + paddle_cn/unique_cn.rst + paddle_cn/unique_with_counts_cn.rst + paddle_cn/unsqueeze_cn.rst + paddle_cn/unstack_cn.rst + paddle_cn/Variable_cn.rst + paddle_cn/var_cn.rst + paddle_cn/WeightNormParamAttr_cn.rst + paddle_cn/zeros_cn.rst + paddle_cn/zeros_like_cn.rst diff --git a/doc/fluid/api_cn/paddle_cn/BuildStrategy_cn.rst b/doc/fluid/api_cn/paddle_cn/BuildStrategy_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..edd934c300240b123f517efa2a3c21ad969a858a --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/BuildStrategy_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_BuildStrategy: + +BuildStrategy +------------------------------- +:doc_source: paddle.fluid.compiler.BuildStrategy + + diff --git a/doc/fluid/api_cn/paddle_cn/CPUPlace_cn.rst b/doc/fluid/api_cn/paddle_cn/CPUPlace_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..72c05c06b811d1d5cca3c1761111b6c27bbb28f5 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/CPUPlace_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_CPUPlace: + +CPUPlace +------------------------------- +:doc_source: paddle.fluid.core.CPUPlace + + diff --git a/doc/fluid/api_cn/paddle_cn/CUDAPinnedPlace_cn.rst b/doc/fluid/api_cn/paddle_cn/CUDAPinnedPlace_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..78e4d2089530156509b491aae435e38e6976f299 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/CUDAPinnedPlace_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_CUDAPinnedPlace: + +CUDAPinnedPlace +------------------------------- +:doc_source: paddle.fluid.core.CUDAPinnedPlace + + diff --git a/doc/fluid/api_cn/paddle_cn/CUDAPlace_cn.rst b/doc/fluid/api_cn/paddle_cn/CUDAPlace_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e121ebfa2e9dda8eee82e7e25631762cd2d4fc95 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/CUDAPlace_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_CUDAPlace: + +CUDAPlace +------------------------------- +:doc_source: paddle.fluid.core.CUDAPlace + + diff --git a/doc/fluid/api_cn/paddle_cn/CompiledProgram_cn.rst b/doc/fluid/api_cn/paddle_cn/CompiledProgram_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..68a1411137d74308f725d44a06ec2b278db30fc4 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/CompiledProgram_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_CompiledProgram: + +CompiledProgram +------------------------------- +:doc_source: paddle.fluid.compiler.CompiledProgram + + diff --git a/doc/fluid/api_cn/paddle_cn/ExecutionStrategy_cn.rst b/doc/fluid/api_cn/paddle_cn/ExecutionStrategy_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1e929c0114cfaf4324bf3d43f9473fd0895ad7e7 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/ExecutionStrategy_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_ExecutionStrategy: + +ExecutionStrategy +------------------------------- +:doc_source: paddle.fluid.ExecutionStrategy + + diff --git a/doc/fluid/api_cn/paddle_cn/Executor_cn.rst b/doc/fluid/api_cn/paddle_cn/Executor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e24ad85c7393b403a641d6d88ea0c1c096738b75 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/Executor_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_Executor: + +Executor +------------------------------- +:doc_source: paddle.fluid.executor.Executor + + diff --git a/doc/fluid/api_cn/paddle_cn/ParallelExecutor_cn.rst b/doc/fluid/api_cn/paddle_cn/ParallelExecutor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..355655e3b19304e6c5a09f1fe6a4fcc9392a6318 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/ParallelExecutor_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_ParallelExecutor: + +ParallelExecutor +------------------------------- +:doc_source: paddle.fluid.parallel_executor.ParallelExecutor + + diff --git a/doc/fluid/api_cn/paddle_cn/ParamAttr_cn.rst b/doc/fluid/api_cn/paddle_cn/ParamAttr_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f8b75a19c306f4aa96ec768a7c9a7fa6feeec84d --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/ParamAttr_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_ParamAttr: + +ParamAttr +------------------------------- +:doc_source: paddle.fluid.param_attr.ParamAttr + + diff --git a/doc/fluid/api_cn/paddle_cn/Print_cn.rst b/doc/fluid/api_cn/paddle_cn/Print_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d670d3f66ea0282f31119cb829a546c587ec6967 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/Print_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_Print: + +Print +------------------------------- +:doc_source: paddle.fluid.layers.control_flow.Print + + diff --git a/doc/fluid/api_cn/paddle_cn/Program_cn.rst b/doc/fluid/api_cn/paddle_cn/Program_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..03b2c8ad18be4d2695de6f649c4c270d52e14c4d --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/Program_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_Program: + +Program +------------------------------- +:doc_source: paddle.fluid.framework.Program + + diff --git a/doc/fluid/api_cn/paddle_cn/Variable_cn.rst b/doc/fluid/api_cn/paddle_cn/Variable_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..04ec02466852f0f08443edb8d697b82ae86db273 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/Variable_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_Variable: + +Variable +------------------------------- +:doc_source: paddle.fluid.framework.Variable + + diff --git a/doc/fluid/api_cn/paddle_cn/WeightNormParamAttr_cn.rst b/doc/fluid/api_cn/paddle_cn/WeightNormParamAttr_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c1c85ecc48f3b7d7548749440861a174a2ebb8b8 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/WeightNormParamAttr_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_WeightNormParamAttr: + +WeightNormParamAttr +------------------------------- +:doc_source: paddle.fluid.param_attr.WeightNormParamAttr + + diff --git a/doc/fluid/api_cn/paddle_cn/abs_cn.rst b/doc/fluid/api_cn/paddle_cn/abs_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/fluid/api_cn/paddle_cn/acos_cn.rst b/doc/fluid/api_cn/paddle_cn/acos_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..50f398f9a71730c0e7e276c73033b651a9e62893 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/acos_cn.rst @@ -0,0 +1,5 @@ +.. _cn_api_paddle_cn_acos: + +acos +------------------------------- +:doc_source: paddle.fluid.layers.acos diff --git a/doc/fluid/api_cn/paddle_cn/add_cn.rst b/doc/fluid/api_cn/paddle_cn/add_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8fe64a011995437277129bd9070b2a53b6a56543 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/add_cn.rst @@ -0,0 +1,5 @@ +.. _cn_api_paddle_cn_add: + +add +------------------------------- +:doc_source: paddle.tensor.add diff --git a/doc/fluid/api_cn/paddle_cn/addcmul_cn.rst b/doc/fluid/api_cn/paddle_cn/addcmul_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8e5f47366e408bb1be1f91fcf41739bf2a6f84a6 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/addcmul_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_addcmul: + +addcmul +------------------------------- +:doc_source: paddle.tensor.addcmul + + diff --git a/doc/fluid/api_cn/paddle_cn/addmm_cn.rst b/doc/fluid/api_cn/paddle_cn/addmm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0f433263ab45345472d15064ab74e44338645874 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/addmm_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_addmm: + +addmm +------------------------------- +:doc_source: paddle.tensor.addmm + + diff --git a/doc/fluid/api_cn/paddle_cn/allclose_cn.rst b/doc/fluid/api_cn/paddle_cn/allclose_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f18de5eff6c9f0acbd6c2ce3edb22f4feec76240 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/allclose_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_allclose: + +allclose +------------------------------- +:doc_source: paddle.tensor.allclose + + diff --git a/doc/fluid/api_cn/paddle_cn/append_backward_cn.rst b/doc/fluid/api_cn/paddle_cn/append_backward_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c35ec3cb85e9945553116869b2306042c5a294a6 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/append_backward_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_append_backward: + +append_backward +------------------------------- +:doc_source: paddle.fluid.backward.append_backward + + diff --git a/doc/fluid/api_cn/paddle_cn/arange_cn.rst b/doc/fluid/api_cn/paddle_cn/arange_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..68cbf48ead4f21ce1d88a04627af1e97f2eaf6ce --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/arange_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_arange: + +arange +------------------------------- +:doc_source: paddle.fluid.layers.range + + diff --git a/doc/fluid/api_cn/paddle_cn/argmax_cn.rst b/doc/fluid/api_cn/paddle_cn/argmax_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bf9d65bcfb92733f68a101326869668e9fff5c9a --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/argmax_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_argmax: + +argmax +------------------------------- +:doc_source: paddle.fluid.layers.argmax + + diff --git a/doc/fluid/api_cn/paddle_cn/argmin_cn.rst b/doc/fluid/api_cn/paddle_cn/argmin_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0a37230d014df85624eb44deade2cedeb4b7ec80 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/argmin_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_argmin: + +argmin +------------------------------- +:doc_source: paddle.fluid.layers.argmin + + diff --git a/doc/fluid/api_cn/paddle_cn/argsort_cn.rst b/doc/fluid/api_cn/paddle_cn/argsort_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e9f128a9ae18a775306eb869b4e3e769270ed1e3 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/argsort_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_argsort: + +argsort +------------------------------- +:doc_source: paddle.tensor.argsort + + diff --git a/doc/fluid/api_cn/paddle_cn/asin_cn.rst b/doc/fluid/api_cn/paddle_cn/asin_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..76457e520512ca23924f8ee26a8284aa8b6b3fe2 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/asin_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_asin: + +asin +------------------------------- +:doc_source: paddle.fluid.layers.asin + + diff --git a/doc/fluid/api_cn/paddle_cn/atan_cn.rst b/doc/fluid/api_cn/paddle_cn/atan_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..60c1e9f3e5f9b26313803ffe03ec9a091c86c2da --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/atan_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_atan: + +atan +------------------------------- +:doc_source: paddle.fluid.layers.atan + + diff --git a/doc/fluid/api_cn/paddle_cn/bmm_cn.rst b/doc/fluid/api_cn/paddle_cn/bmm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7344ed881ec1e85bb345fe68999f4ff406bd5024 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/bmm_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_bmm: + +bmm +------------------------------- +:doc_source: paddle.tensor.bmm + + diff --git a/doc/fluid/api_cn/paddle_cn/cast_cn.rst b/doc/fluid/api_cn/paddle_cn/cast_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6749316932820213964361f025fa2610a59f841a --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/cast_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_cast: + +cast +------------------------------- +:doc_source: paddle.fluid.layers.cast + + diff --git a/doc/fluid/api_cn/paddle_cn/ceil_cn.rst b/doc/fluid/api_cn/paddle_cn/ceil_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9fbef712bf487b458490218e17d44ac7f6ef0ccd --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/ceil_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_ceil: + +ceil +------------------------------- +:doc_source: paddle.fluid.layers.ceil + + diff --git a/doc/fluid/api_cn/paddle_cn/cholesky_cn.rst b/doc/fluid/api_cn/paddle_cn/cholesky_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..65616f69d4cc3d1d293d5cdc8d04532817f8b8b9 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/cholesky_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_cholesky: + +cholesky +------------------------------- +:doc_source: paddle.tensor.cholesky + + diff --git a/doc/fluid/api_cn/paddle_cn/clip_cn.rst b/doc/fluid/api_cn/paddle_cn/clip_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bef260e12c8b4eb2389bda00a4def1348b0c4eac --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/clip_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_clip: + +clip +------------------------------- +:doc_source: paddle.tensor.clip + + diff --git a/doc/fluid/api_cn/paddle_cn/concat_cn.rst b/doc/fluid/api_cn/paddle_cn/concat_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8ecc87904b69e24c5e32fe9818b75df5c7984705 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/concat_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_concat: + +concat +------------------------------- +:doc_source: paddle.fluid.layers.concat + + diff --git a/doc/fluid/api_cn/paddle_cn/cos_cn.rst b/doc/fluid/api_cn/paddle_cn/cos_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e64217eb117e16a5550522bba0caf95742112e23 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/cos_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_cos: + +cos +------------------------------- +:doc_source: paddle.fluid.layers.cos + + diff --git a/doc/fluid/api_cn/paddle_cn/create_global_var_cn.rst b/doc/fluid/api_cn/paddle_cn/create_global_var_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..84d78df3c34888595e444bb4eacaffd4b611205a --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/create_global_var_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_create_global_var: + +create_global_var +------------------------------- +:doc_source: paddle.fluid.layers.tensor.create_global_var + + diff --git a/doc/fluid/api_cn/paddle_cn/create_parameter_cn.rst b/doc/fluid/api_cn/paddle_cn/create_parameter_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5571c52b9824c719be69118972aaf785542e2f88 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/create_parameter_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_create_parameter: + +create_parameter +------------------------------- +:doc_source: paddle.fluid.layers.create_parameter + + diff --git a/doc/fluid/api_cn/paddle_cn/create_tensor_cn.rst b/doc/fluid/api_cn/paddle_cn/create_tensor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..29bcfdec169f0e48899bc59a923725584f2d011a --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/create_tensor_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_create_tensor: + +create_tensor +------------------------------- +:doc_source: paddle.fluid.layers.create_tensor + + diff --git a/doc/fluid/api_cn/paddle_cn/crop_tensor_cn.rst b/doc/fluid/api_cn/paddle_cn/crop_tensor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..93e0d76ff87e0a62e58ff2de600d9c993011db79 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/crop_tensor_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_crop_tensor: + +crop_tensor +------------------------------- +:doc_source: paddle.fluid.layers.crop_tensor + + diff --git a/doc/fluid/api_cn/paddle_cn/cross_cn.rst b/doc/fluid/api_cn/paddle_cn/cross_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cf6859f3c3f0037223514fe6fbefda2e6470829d --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/cross_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_cross: + +cross +------------------------------- +:doc_source: paddle.tensor.cross + + diff --git a/doc/fluid/api_cn/paddle_cn/cumsum_cn.rst b/doc/fluid/api_cn/paddle_cn/cumsum_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..89fbc20a319e12b7ffff36e2b241d316f9616dbe --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/cumsum_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_cumsum: + +cumsum +------------------------------- +:doc_source: paddle.tensor.cumsum + + diff --git a/doc/fluid/api_cn/paddle_cn/default_main_program_cn.rst b/doc/fluid/api_cn/paddle_cn/default_main_program_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3afd8c3c8ffd713dd5f4329a7fd36f041d48d0c0 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/default_main_program_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_default_main_program: + +default_main_program +------------------------------- +:doc_source: paddle.fluid.framework.default_main_program + + diff --git a/doc/fluid/api_cn/paddle_cn/default_startup_program_cn.rst b/doc/fluid/api_cn/paddle_cn/default_startup_program_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0e3afd4c5b7d133b1d0cd9d086f8e4eb33fdcd7b --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/default_startup_program_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_default_startup_program: + +default_startup_program +------------------------------- +:doc_source: paddle.fluid.framework.default_startup_program + + diff --git a/doc/fluid/api_cn/paddle_cn/diag_cn.rst b/doc/fluid/api_cn/paddle_cn/diag_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f525efb5890531e85eee24367e96b3f62c8fb0ea --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/diag_cn.rst @@ -0,0 +1,84 @@ +.. _cn_api_paddle_cn_diag: + +diag +------------------------------- + +.. py:function:: paddle.diag(x, offset=0, padding_value=0, name=None) + + +如果 ``x`` 是向量(1-D张量),则返回带有 ``x`` 元素作为对角线的2-D方阵。 + +如果 ``x`` 是矩阵(2-D张量),则提取 ``x`` 的对角线元素,以1-D张量返回。 + +参数 ``offset`` 控制对角线偏移量: + +- 如果 ``offset`` = 0,则为主对角线。 +- 如果 ``offset`` > 0,则为上对角线。 +- 如果 ``offset`` < 0,则为下对角线。 + +参数 +::::::::: + - x(Tensor):输入的 `Tensor`。它的形状可以是一维或二维。其数据类型应为float32,float64,int32,int64。 + - offset(int,可选):对角线偏移量。正值表示上对角线,0表示主对角线,负值表示下对角线。 + - padding_value(int|float,可选):使用此值来填充指定对角线以外的区域。仅在输入为一维张量时生效。默认值为0。 + - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +::::::::: +``Tensor``,方阵或向量。输出数据类型与输入数据类型相同。 + + +代码示例 1 +::::::::: + +.. code-block:: python + + import paddle + + paddle.disable_static() + x = paddle.to_tensor([1, 2, 3]) + y = paddle.diag(x) + print(y.numpy()) + # [[1 0 0] + # [0 2 0] + # [0 0 3]] + + y = paddle.diag(x, offset=1) + print(y.numpy()) + # [[0 1 0 0] + # [0 0 2 0] + # [0 0 0 3] + # [0 0 0 0]] + + y = paddle.diag(x, padding_value=6) + print(y.numpy()) + # [[1 6 6] + # [6 2 6] + # [6 6 3]] + + +代码示例 2 +::::::::: + +.. code-block:: python + + import paddle + + paddle.disable_static() + x = paddle.to_tensor([[1, 2, 3], [4, 5, 6]]) + y = paddle.diag(x) + print(y.numpy()) + # [1 5] + + y = paddle.diag(x, offset=1) + print(y.numpy()) + # [2 6] + + y = paddle.diag(x, offset=-1) + print(y.numpy()) + # [4] + + + + + diff --git a/doc/fluid/api_cn/paddle_cn/disable_imperative_cn.rst b/doc/fluid/api_cn/paddle_cn/disable_imperative_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6437fbf3f7d35edd79ae4ac636ae07e9bbe4a269 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/disable_imperative_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_disable_imperative: + +disable_imperative +------------------------------- +:doc_source: paddle.fluid.dygraph.base.disable_dygraph + + diff --git a/doc/fluid/api_cn/paddle_cn/dist_cn.rst b/doc/fluid/api_cn/paddle_cn/dist_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..91407a83b91efc9545c11928693ae23be3b65e4b --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/dist_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_dist: + +dist +------------------------------- +:doc_source: paddle.tensor.dist + + diff --git a/doc/fluid/api_cn/paddle_cn/distribution_cn.rst b/doc/fluid/api_cn/paddle_cn/distribution_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..acbdd00b7a187372a5c74f5f39ba238984331025 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/distribution_cn.rst @@ -0,0 +1,10 @@ +============ +distribution +============ + +.. toctree:: + :maxdepth: 1 + + distribution_cn/Distribution_cn.rst + distribution_cn/Normal_cn.rst + distribution_cn/Uniform_cn.rst diff --git a/doc/fluid/api_cn/paddle_cn/distribution_cn/Distribution_cn.rst b/doc/fluid/api_cn/paddle_cn/distribution_cn/Distribution_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c513c281df548f83a62a5183a6ef2b9f19e04ca9 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/distribution_cn/Distribution_cn.rst @@ -0,0 +1,49 @@ +.. _cn_api_distribution_Distribution: + +Distribution +------------------------------- + +.. py:class:: paddle.distribution.Distribution() + + + + +概率分布的抽象基类,在具体的分布中实现具体功能。 + + +.. py:function:: sample() + +从分布中采样 + +.. py:function:: entropy() + +分布的信息熵 + +.. py:function:: log_prob(value) + +对数概率密度函数 + +参数: + - **value** (Tensor) - 输入张量。 + +.. py:function:: probs(value) + +概率密度函数 + +参数: + - **value** (Tensor) - 输入张量。 + +.. py:function:: kl_divergence(other) + +两个分布之间的KL散度。 + +参数: + - **other** (Distribution) - Distribution的实例。 + + + + + + + + diff --git a/doc/fluid/api_cn/paddle_cn/distribution_cn/Normal_cn.rst b/doc/fluid/api_cn/paddle_cn/distribution_cn/Normal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c1bc0c8b57648787594e35f9ae9c9469e8eca81d --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/distribution_cn/Normal_cn.rst @@ -0,0 +1,128 @@ +.. _cn_api_distribution_Normal: + +Normal +------------------------------- + +.. py:class:: paddle.distribution.Normal(loc, scale, name=None) + + + + +正态分布 + +数学公式: + +.. math:: + + pdf(x; \mu, \sigma) = \frac{1}{Z}e^{\frac {-0.5 (x - \mu)^2} {\sigma^2} } + + Z = (2 \pi \sigma^2)^{0.5} + +上面的数学公式中: + +:math:`loc = \mu` : 平均值。 +:math:`scale = \sigma` : 标准差。 +:math:`Z`: 正态分布常量。 + +参数: + - **loc** (int|float|list|numpy.ndarray|Tensor) - 正态分布平均值。数据类型为int、float32、list、numpy.ndarray或Tensor。 + - **scale** (int|float|list|numpy.ndarray|Tensor) - 正态分布标准差。数据类型为int、float32、list、numpy.ndarray或Tensor。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + from paddle.distribution import Normal + + paddle.disable_static() + # Define a single scalar Normal distribution. + dist = Normal(loc=0., scale=3.) + # Define a batch of two scalar valued Normals. + # The first has mean 1 and standard deviation 11, the second 2 and 22. + dist = Normal(loc=[1., 2.], scale=[11., 22.]) + # Get 3 samples, returning a 3 x 2 tensor. + dist.sample([3]) + + # Define a batch of two scalar valued Normals. + # Both have mean 1, but different standard deviations. + dist = Normal(loc=1., scale=[11., 22.]) + + # Complete example + value_npdata = np.array([0.8], dtype="float32") + value_tensor = paddle.to_tensor(value_npdata) + + normal_a = Normal([0.], [1.]) + normal_b = Normal([0.5], [2.]) + sample = normal_a.sample([2]) + # a random tensor created by normal distribution with shape: [2, 1] + entropy = normal_a.entropy() + # [1.4189385] with shape: [1] + lp = normal_a.log_prob(value_tensor) + # [-1.2389386] with shape: [1] + p = normal_a.probs(value_tensor) + # [0.28969154] with shape: [1] + kl = normal_a.kl_divergence(normal_b) + # [0.34939718] with shape: [1] + + +.. py:function:: sample(shape, seed=0) + +生成指定维度的样本 + +参数: + - **shape** (list) - 1维列表,指定生成样本的维度。数据类型为int32。 + - **seed** (int) - 长整型数。 + +返回:预先设计好维度的张量, 数据类型为float32 + +返回类型:Tensor + +.. py:function:: entropy() + +信息熵 + +返回:正态分布的信息熵, 数据类型为float32 + +返回类型:Tensor + +.. py:function:: log_prob(value) + +对数概率密度函数 + +参数: + - **value** (Tensor) - 输入张量。数据类型为float32或float64。 + +返回:对数概率, 数据类型与value相同 + +返回类型:Tensor + +.. py:function:: probs(value) + +概率密度函数 + +参数: + - **value** (Tensor) - 输入张量。数据类型为float32或float64。 + +返回:概率, 数据类型与value相同 + +返回类型:Tensor + +.. py:function:: kl_divergence(other) + +两个正态分布之间的KL散度。 + +参数: + - **other** (Normal) - Normal的实例。 + +返回:两个正态分布之间的KL散度, 数据类型为float32 + +返回类型:Tensor + + + + + + diff --git a/doc/fluid/api_cn/paddle_cn/distribution_cn/Uniform_cn.rst b/doc/fluid/api_cn/paddle_cn/distribution_cn/Uniform_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..382d1e9c66ff2691e045caf41f37241f21ba5978 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/distribution_cn/Uniform_cn.rst @@ -0,0 +1,115 @@ +.. _cn_api_distribution_Uniform: + +Uniform +------------------------------- + +.. py:class:: paddle.distribution.Uniform(low, high, name=None) + + + + +均匀分布 + +概率密度函数(pdf)为: + +.. math:: + + pdf(x; a, b) = \frac{1}{Z}, a <=x < b + + Z = b - a + +上面的数学公式中: + +:math:`low = a` 。 +:math:`high = b` 。 +:math:`Z`: 正态分布常量。 + +参数low和high的维度必须能够支持广播。 + +参数: + - **low** (int|float|list|numpy.ndarray|Tensor) - 均匀分布的下边界。数据类型为int、float32、list、numpy.ndarray或Tensor。 + - **high** (int|float|list|numpy.ndarray|Tensor) - 均匀分布的上边界。数据类型为int、float32、list、numpy.ndarray或Tensor。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + from paddle.distribution import Uniform + + paddle.disable_static() + # Without broadcasting, a single uniform distribution [3, 4]: + u1 = Uniform(low=3.0, high=4.0) + # 2 distributions [1, 3], [2, 4] + u2 = Uniform(low=[1.0, 2.0], high=[3.0, 4.0]) + # 4 distributions + u3 = Uniform(low=[[1.0, 2.0], [3.0, 4.0]], + high=[[1.5, 2.5], [3.5, 4.5]]) + + # With broadcasting: + u4 = Uniform(low=3.0, high=[5.0, 6.0, 7.0]) + + # Complete example + value_npdata = np.array([0.8], dtype="float32") + value_tensor = paddle.to_tensor(value_npdata) + + uniform = Uniform([0.], [2.]) + + sample = uniform.sample([2]) + # a random tensor created by uniform distribution with shape: [2, 1] + entropy = uniform.entropy() + # [0.6931472] with shape: [1] + lp = uniform.log_prob(value_tensor) + # [-0.6931472] with shape: [1] + p = uniform.probs(value_tensor) + # [0.5] with shape: [1] + + +.. py:function:: sample(shape, seed=0) + +生成指定维度的样本 + +参数: + - **shape** (list) - 1维列表,指定生成样本的维度。数据类型为int32。 + - **seed** (int) - 长整型数。 + +返回:预先设计好维度的张量, 数据类型为float32 + +返回类型:Tensor + +.. py:function:: entropy() + +信息熵 + +返回:均匀分布的信息熵, 数据类型为float32 + +返回类型:Tensor + +.. py:function:: log_prob(value) + +对数概率密度函数 + +参数: + - **value** (Tensor) - 输入张量。数据类型为float32或float64。 + +返回:对数概率, 数据类型与value相同 + +返回类型:Tensor + +.. py:function:: probs(value) + +概率密度函数 + +参数: + - **value** (Tensor) - 输入张量。数据类型为float32或float64。 + +返回:概率, 数据类型与value相同 + +返回类型:Tensor + + + + + diff --git a/doc/fluid/api_cn/paddle_cn/div_cn.rst b/doc/fluid/api_cn/paddle_cn/div_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..df2cdf040bc862e26f0ed39913c254e0d707cc2b --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/div_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_div: + +div +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_div + + diff --git a/doc/fluid/api_cn/paddle_cn/dot_cn.rst b/doc/fluid/api_cn/paddle_cn/dot_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5b318d40c29e3c3eb0a8171d2efce7ff7f9a9c4b --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/dot_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_dot: + +dot +------------------------------- +:doc_source: paddle.tensor.dot + + diff --git a/doc/fluid/api_cn/paddle_cn/elementwise_add_cn.rst b/doc/fluid/api_cn/paddle_cn/elementwise_add_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d93922f6951573505e0764ca571cc59d94949f8e --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/elementwise_add_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_elementwise_add: + +elementwise_add +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_add + + diff --git a/doc/fluid/api_cn/paddle_cn/elementwise_div_cn.rst b/doc/fluid/api_cn/paddle_cn/elementwise_div_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..81954791543e2a3c279d54146b9cbffc3c29749d --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/elementwise_div_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_elementwise_div: + +elementwise_div +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_div + + diff --git a/doc/fluid/api_cn/paddle_cn/elementwise_floordiv_cn.rst b/doc/fluid/api_cn/paddle_cn/elementwise_floordiv_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3dda78b70c32f8f5c745fcd9fd2e74d9d434542a --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/elementwise_floordiv_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_elementwise_floordiv: + +elementwise_floordiv +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_floordiv + + diff --git a/doc/fluid/api_cn/paddle_cn/elementwise_mod_cn.rst b/doc/fluid/api_cn/paddle_cn/elementwise_mod_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b42a38ddf522015ea335b1d660edbc2efab56452 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/elementwise_mod_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_elementwise_mod: + +elementwise_mod +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_mod + + diff --git a/doc/fluid/api_cn/paddle_cn/elementwise_mul_cn.rst b/doc/fluid/api_cn/paddle_cn/elementwise_mul_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1e1c6c168fa453d90b7b356cfc9abb167e316ea2 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/elementwise_mul_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_elementwise_mul: + +elementwise_mul +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_mul + + diff --git a/doc/fluid/api_cn/paddle_cn/elementwise_pow_cn.rst b/doc/fluid/api_cn/paddle_cn/elementwise_pow_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b6c976b6828be1b3479af5b275bb89d4a7c9d260 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/elementwise_pow_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_elementwise_pow: + +elementwise_pow +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_pow + + diff --git a/doc/fluid/api_cn/paddle_cn/elementwise_sub_cn.rst b/doc/fluid/api_cn/paddle_cn/elementwise_sub_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ba65fd95f372d2cb60d79f7a871c195e884153f5 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/elementwise_sub_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_elementwise_sub: + +elementwise_sub +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_sub + + diff --git a/doc/fluid/api_cn/paddle_cn/elementwise_sum_cn.rst b/doc/fluid/api_cn/paddle_cn/elementwise_sum_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b33ccb531256d53f38bb516a673f370d6f0921a6 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/elementwise_sum_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_elementwise_sum: + +elementwise_sum +------------------------------- +:doc_source: paddle.tensor.elementwise_sum + + diff --git a/doc/fluid/api_cn/paddle_cn/enable_imperative_cn.rst b/doc/fluid/api_cn/paddle_cn/enable_imperative_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a198179f2f14989312c246563e309254c0fb3f22 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/enable_imperative_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_enable_imperative: + +enable_imperative +------------------------------- +:doc_source: paddle.fluid.dygraph.base.enable_dygraph + + diff --git a/doc/fluid/api_cn/paddle_cn/equal_all_cn.rst b/doc/fluid/api_cn/paddle_cn/equal_all_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..72c333de05f5fc7ea25242077485211fe07c8e8e --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/equal_all_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_equal_all: + +equal_all +------------------------------- +:doc_source: paddle.tensor.equal_all + + diff --git a/doc/fluid/api_cn/paddle_cn/equal_cn.rst b/doc/fluid/api_cn/paddle_cn/equal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b0ffbbc3368a6b29d9f81fb63ddb1e762add623d --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/equal_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_equal: + +equal +------------------------------- +:doc_source: paddle.tensor.equal + + diff --git a/doc/fluid/api_cn/paddle_cn/erf_cn.rst b/doc/fluid/api_cn/paddle_cn/erf_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bb6d44c5498047047804190c93cf0d6e047527e8 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/erf_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_erf: + +erf +------------------------------- +:doc_source: paddle.fluid.layers.erf + + diff --git a/doc/fluid/api_cn/paddle_cn/exp_cn.rst b/doc/fluid/api_cn/paddle_cn/exp_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1eafe55de63a49992fb59654d79ef49dc878b6fb --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/exp_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_exp: + +exp +------------------------------- +:doc_source: paddle.fluid.layers.exp + + diff --git a/doc/fluid/api_cn/paddle_cn/expand_as_cn.rst b/doc/fluid/api_cn/paddle_cn/expand_as_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..448bb6afbc2c509d6245c066c634326a74e68958 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/expand_as_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_expand_as: + +expand_as +------------------------------- +:doc_source: paddle.tensor.expand_as + + diff --git a/doc/fluid/api_cn/paddle_cn/expand_cn.rst b/doc/fluid/api_cn/paddle_cn/expand_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cdff8439ea3a3fb6e36436957c875552ee72caab --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/expand_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_expand: + +expand +------------------------------- +:doc_source: paddle.tensor.expand + + diff --git a/doc/fluid/api_cn/paddle_cn/eye_cn.rst b/doc/fluid/api_cn/paddle_cn/eye_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a46b9de5ca06901b315c2a872a6eef7fc5ce75d6 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/eye_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_eye: + +eye +------------------------------- +:doc_source: paddle.fluid.layers.eye + + diff --git a/doc/fluid/api_cn/paddle_cn/fill_constant_cn.rst b/doc/fluid/api_cn/paddle_cn/fill_constant_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f3ab99dfd87ec16f0d86b51a1598a6735246c581 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/fill_constant_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_fill_constant: + +fill_constant +------------------------------- +:doc_source: paddle.fluid.layers.fill_constant + + diff --git a/doc/fluid/api_cn/paddle_cn/flatten_cn.rst b/doc/fluid/api_cn/paddle_cn/flatten_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6c4aa8e486500bbf29f08afb7d60b068d4f29058 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/flatten_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_flatten: + +flatten +------------------------------- +:doc_source: paddle.fluid.layers.flatten + + diff --git a/doc/fluid/api_cn/paddle_cn/flip_cn.rst b/doc/fluid/api_cn/paddle_cn/flip_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d8bfe40878a72bbc2f1ccb7819a4b9e3fa1ae6ad --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/flip_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_flip: + +flip +------------------------------- +:doc_source: paddle.tensor.flip + + diff --git a/doc/fluid/api_cn/paddle_cn/floor_cn.rst b/doc/fluid/api_cn/paddle_cn/floor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..46880fc3213f569283f412d8211785ec4d7584ee --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/floor_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_floor: + +floor +------------------------------- +:doc_source: paddle.fluid.layers.floor + + diff --git a/doc/fluid/api_cn/paddle_cn/full_cn.rst b/doc/fluid/api_cn/paddle_cn/full_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8a90e43fd2007486379d3f2acbeaae9cae0152cf --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/full_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_full: + +full +------------------------------- +:doc_source: paddle.fluid.layers.fill_constant + + diff --git a/doc/fluid/api_cn/paddle_cn/full_like_cn.rst b/doc/fluid/api_cn/paddle_cn/full_like_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..895e80ad4167609db0e3ef0d89c5ea7a7663565e --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/full_like_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_full_like: + +full_like +------------------------------- +:doc_source: paddle.tensor.full_like + + diff --git a/doc/fluid/api_cn/paddle_cn/gather_cn.rst b/doc/fluid/api_cn/paddle_cn/gather_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..83347aa1ce6c6d9c1ab805b8c294a1ac944bfae3 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/gather_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_gather: + +gather +------------------------------- +:doc_source: paddle.fluid.layers.gather + + diff --git a/doc/fluid/api_cn/paddle_cn/gather_nd_cn.rst b/doc/fluid/api_cn/paddle_cn/gather_nd_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..da486af1c7eae83f4ecf623f538547e1495b42ca --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/gather_nd_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_gather_nd: + +gather_nd +------------------------------- +:doc_source: paddle.fluid.layers.gather_nd + + diff --git a/doc/fluid/api_cn/paddle_cn/get_cuda_rng_state_cn.rst b/doc/fluid/api_cn/paddle_cn/get_cuda_rng_state_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..92d9ee8452636caaf9bdb815f5089a03e472ead4 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/get_cuda_rng_state_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_get_cuda_rng_state: + +get_cuda_rng_state +------------------------------- +:doc_source: paddle.framework.get_cuda_rng_state + + diff --git a/doc/fluid/api_cn/paddle_cn/global_scope_cn.rst b/doc/fluid/api_cn/paddle_cn/global_scope_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b56a54a80292913ed341c9d5b39f9da068bf5541 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/global_scope_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_global_scope: + +global_scope +------------------------------- +:doc_source: paddle.fluid.executor.global_scope + + diff --git a/doc/fluid/api_cn/paddle_cn/gradients_cn.rst b/doc/fluid/api_cn/paddle_cn/gradients_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a951414c84c303dfd6e123a3e18af3c49f46e366 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/gradients_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_gradients: + +gradients +------------------------------- +:doc_source: paddle.fluid.backward.gradients + + diff --git a/doc/fluid/api_cn/paddle_cn/greater_equal_cn.rst b/doc/fluid/api_cn/paddle_cn/greater_equal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1f4367acce3364471e5c5ab8cb5a91ede96de6a2 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/greater_equal_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_greater_equal: + +greater_equal +------------------------------- +:doc_source: paddle.tensor.greater_equal + + diff --git a/doc/fluid/api_cn/paddle_cn/greater_than_cn.rst b/doc/fluid/api_cn/paddle_cn/greater_than_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f15865d063c8f972e040d24c3d508a6dd11d2264 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/greater_than_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_greater_than: + +greater_than +------------------------------- +:doc_source: paddle.tensor.greater_than + + diff --git a/doc/fluid/api_cn/paddle_cn/has_inf_cn.rst b/doc/fluid/api_cn/paddle_cn/has_inf_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..38962e52b0a74c9cc2a3cace7e036afb60730580 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/has_inf_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_has_inf: + +has_inf +------------------------------- +:doc_source: paddle.fluid.layers.has_inf + + diff --git a/doc/fluid/api_cn/paddle_cn/has_nan_cn.rst b/doc/fluid/api_cn/paddle_cn/has_nan_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..60a60e17b9d43686cdf01f2c6499386b2e8ef6cc --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/has_nan_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_has_nan: + +has_nan +------------------------------- +:doc_source: paddle.fluid.layers.has_nan + + diff --git a/doc/fluid/api_cn/paddle_cn/in_imperative_mode_cn.rst b/doc/fluid/api_cn/paddle_cn/in_imperative_mode_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..451aac266289f290a21f6def2899fcf7b3663885 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/in_imperative_mode_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_in_imperative_mode: + +in_imperative_mode +------------------------------- +:doc_source: paddle.fluid.framework.in_dygraph_mode + + diff --git a/doc/fluid/api_cn/paddle_cn/increment_cn.rst b/doc/fluid/api_cn/paddle_cn/increment_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9bb1e82b113f039706a5833e694c79ee1233b06d --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/increment_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_increment: + +increment +------------------------------- +:doc_source: paddle.fluid.layers.increment + + diff --git a/doc/fluid/api_cn/paddle_cn/index_sample_cn.rst b/doc/fluid/api_cn/paddle_cn/index_sample_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ad3f6089f2a527a774389e603a85fa69bbc536df --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/index_sample_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_index_sample: + +index_sample +------------------------------- +:doc_source: paddle.tensor.index_sample + + diff --git a/doc/fluid/api_cn/paddle_cn/index_select_cn.rst b/doc/fluid/api_cn/paddle_cn/index_select_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..dc82f1e52e05d08ac8b567f4f6e2fcf6d7ae6120 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/index_select_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_index_select: + +index_select +------------------------------- +:doc_source: paddle.tensor.index_select + + diff --git a/doc/fluid/api_cn/paddle_cn/inverse_cn.rst b/doc/fluid/api_cn/paddle_cn/inverse_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1c0fa6cee9dd2b61eebf36eb465aecb5e083531d --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/inverse_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_inverse: + +inverse +------------------------------- +:doc_source: paddle.tensor.inverse + + diff --git a/doc/fluid/api_cn/paddle_cn/is_empty_cn.rst b/doc/fluid/api_cn/paddle_cn/is_empty_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f72778f5f6b9af555c6fda122648c3df583bed54 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/is_empty_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_is_empty: + +is_empty +------------------------------- +:doc_source: paddle.fluid.layers.is_empty + + diff --git a/doc/fluid/api_cn/paddle_cn/isfinite_cn.rst b/doc/fluid/api_cn/paddle_cn/isfinite_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0ffdec05b552c6f38c6dc06f9ed1be89caed0039 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/isfinite_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_isfinite: + +isfinite +------------------------------- +:doc_source: paddle.fluid.layers.isfinite + + diff --git a/doc/fluid/api_cn/paddle_cn/kron_cn.rst b/doc/fluid/api_cn/paddle_cn/kron_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..703dc72df762a0ab47e150d74fa6966f6740ad6d --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/kron_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_kron: + +kron +------------------------------- +:doc_source: paddle.tensor.kron + + diff --git a/doc/fluid/api_cn/paddle_cn/less_equal_cn.rst b/doc/fluid/api_cn/paddle_cn/less_equal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..481e3f2863f10cee11765e97eb46ae5f252a3357 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/less_equal_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_less_equal: + +less_equal +------------------------------- +:doc_source: paddle.tensor.less_equal + + diff --git a/doc/fluid/api_cn/paddle_cn/less_than_cn.rst b/doc/fluid/api_cn/paddle_cn/less_than_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3f0802843bed23a6547b36efb3abe1d51a8e1519 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/less_than_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_less_than: + +less_than +------------------------------- +:doc_source: paddle.tensor.less_than + + diff --git a/doc/fluid/api_cn/paddle_cn/linspace_cn.rst b/doc/fluid/api_cn/paddle_cn/linspace_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..065a2f077c980be5513b0fdfd19a36c20b91f87e --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/linspace_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_linspace: + +linspace +------------------------------- +:doc_source: paddle.fluid.layers.linspace + + diff --git a/doc/fluid/api_cn/paddle_cn/load_cn.rst b/doc/fluid/api_cn/paddle_cn/load_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..18fefd04b7f78734193a3ae31fa9e2f7bb4ff577 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/load_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_load: + +load +------------------------------- +:doc_source: paddle.fluid.io.load + + diff --git a/doc/fluid/api_cn/paddle_cn/log1p_cn.rst b/doc/fluid/api_cn/paddle_cn/log1p_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..67c86cc925b38b49ec481b0bc2f31482daf807b2 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/log1p_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_log1p: + +log1p +------------------------------- +:doc_source: paddle.tensor.log1p + + diff --git a/doc/fluid/api_cn/paddle_cn/log_cn.rst b/doc/fluid/api_cn/paddle_cn/log_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..71b7fda304585346303cd7996f576e04f09312a8 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/log_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_log: + +log +------------------------------- +:doc_source: paddle.fluid.layers.log + + diff --git a/doc/fluid/api_cn/paddle_cn/logical_and_cn.rst b/doc/fluid/api_cn/paddle_cn/logical_and_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..545ec836b524bd752d1462a958394b7d4a36d016 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/logical_and_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_logical_and: + +logical_and +------------------------------- +:doc_source: paddle.fluid.layers.logical_and + + diff --git a/doc/fluid/api_cn/paddle_cn/logical_not_cn.rst b/doc/fluid/api_cn/paddle_cn/logical_not_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..91a2ff8d06c959dd726f9b7ee02c57b67fe8eb96 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/logical_not_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_logical_not: + +logical_not +------------------------------- +:doc_source: paddle.fluid.layers.logical_not + + diff --git a/doc/fluid/api_cn/paddle_cn/logical_or_cn.rst b/doc/fluid/api_cn/paddle_cn/logical_or_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c3c2945da4606304a43603991b8721cfcdecb70b --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/logical_or_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_logical_or: + +logical_or +------------------------------- +:doc_source: paddle.fluid.layers.logical_or + + diff --git a/doc/fluid/api_cn/paddle_cn/logical_xor_cn.rst b/doc/fluid/api_cn/paddle_cn/logical_xor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9f491d426bc96ab99dec9bff14af6618cfcbc414 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/logical_xor_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_logical_xor: + +logical_xor +------------------------------- +:doc_source: paddle.fluid.layers.logical_xor + + diff --git a/doc/fluid/api_cn/paddle_cn/logsumexp_cn.rst b/doc/fluid/api_cn/paddle_cn/logsumexp_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8df6a472f3f328dc1831200d3fac0f08560787d8 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/logsumexp_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_logsumexp: + +logsumexp +------------------------------- +:doc_source: paddle.tensor.logsumexp + + diff --git a/doc/fluid/api_cn/paddle_cn/manual_seed_cn.rst b/doc/fluid/api_cn/paddle_cn/manual_seed_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2a1a1232c9ba6c2b019baaa347e6d74bb53741e0 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/manual_seed_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_manual_seed: + +manual_seed +------------------------------- +:doc_source: paddle.framework.manual_seed + + diff --git a/doc/fluid/api_cn/paddle_cn/matmul_cn.rst b/doc/fluid/api_cn/paddle_cn/matmul_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0c506dc32a680927683deda652af90174d702586 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/matmul_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_matmul: + +matmul +------------------------------- +:doc_source: paddle.tensor.matmul + + diff --git a/doc/fluid/api_cn/paddle_cn/max_cn.rst b/doc/fluid/api_cn/paddle_cn/max_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..abff79ad0e6ec9ae57dfa964c4af1cbba2829bd5 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/max_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_max: + +max +------------------------------- +:doc_source: paddle.fluid.layers.reduce_max + + diff --git a/doc/fluid/api_cn/paddle_cn/mean_cn.rst b/doc/fluid/api_cn/paddle_cn/mean_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..add1cc00729e6ec12497bed8b058f20ef6b79852 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/mean_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_mean: + +mean +------------------------------- +:doc_source: paddle.fluid.layers.mean + + diff --git a/doc/fluid/api_cn/paddle_cn/meshgrid_cn.rst b/doc/fluid/api_cn/paddle_cn/meshgrid_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9e331eb0530c37b959cdf1ebd52d06aaa5a2927c --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/meshgrid_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_meshgrid: + +meshgrid +------------------------------- +:doc_source: paddle.tensor.meshgrid + + diff --git a/doc/fluid/api_cn/paddle_cn/min_cn.rst b/doc/fluid/api_cn/paddle_cn/min_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e2125373ff7bd2395011fabee95d67f269572e51 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/min_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_min: + +min +------------------------------- +:doc_source: paddle.fluid.layers.reduce_min + + diff --git a/doc/fluid/api_cn/paddle_cn/mm_cn.rst b/doc/fluid/api_cn/paddle_cn/mm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3f20a8b9f1a15e797617ce8766717d4ede0e98a3 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/mm_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_mm: + +mm +------------------------------- +:doc_source: paddle.fluid.layers.matmul + + diff --git a/doc/fluid/api_cn/paddle_cn/mul_cn.rst b/doc/fluid/api_cn/paddle_cn/mul_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3d604ba2606fc7cdc28ed2db0f74e2f3a6c4f8c7 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/mul_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_mul: + +mul +------------------------------- +:doc_source: paddle.fluid.layers.mul + + diff --git a/doc/fluid/api_cn/paddle_cn/multiplex_cn.rst b/doc/fluid/api_cn/paddle_cn/multiplex_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3886abb5f53604c07822bf3acd0540b4d721abdc --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/multiplex_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_multiplex: + +multiplex +------------------------------- +:doc_source: paddle.fluid.layers.multiplex + + diff --git a/doc/fluid/api_cn/paddle_cn/name_scope_cn.rst b/doc/fluid/api_cn/paddle_cn/name_scope_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a64be807bf8e4d2fe91a7b81700c3a2d9b62c411 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/name_scope_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_name_scope: + +name_scope +------------------------------- +:doc_source: paddle.fluid.framework.name_scope + + diff --git a/doc/fluid/api_cn/paddle_cn/no_grad_cn.rst b/doc/fluid/api_cn/paddle_cn/no_grad_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bb9748f3d7adce6d1539c82568f6f5828133f73f --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/no_grad_cn.rst @@ -0,0 +1,49 @@ +.. _cn_api_paddle_no_grad: + +no_grad +------------------------------- + +.. py:class:: paddle.fluid.dygraph.no_grad_ + +:api_attr: 命令式编程模式(动态图) + + +创建一个上下文来禁用动态图梯度计算。在此模式下,每次计算的结果都将具有stop_gradient=True。 + +也可以用作一个装饰器(需要创建实例对象作为装饰器)。 + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + + paddle.disable_static() + + # 用作生成器 + + data = np.array([[2, 3], [4, 5]]).astype('float32') + l0 = paddle.nn.Linear(2, 2) # l0.weight.gradient() is None + l1 = paddle.nn.Linear(2, 2) + with paddle.no_grad(): + # l1.weight.stop_gradient is False + tmp = l1.weight * 2 # tmp.stop_gradient is True + x = paddle.to_tensor(data) + y = l0(x) + tmp + o = l1(y) + o.backward() + print(tmp.gradient() is None) # True + print(l0.weight.gradient() is None) # False + + # 用作装饰器 + @paddle.no_grad() + def test_layer(): + inp = np.ones([3, 1024], dtype='float32') + t = paddle.to_tensor(inp) + linear1 = paddle.nn.Linear(1024, 4, bias_attr=False) + linear2 = paddle.nn.Linear(4, 4) + ret = linear1(t) + dy_ret = linear2(ret) + + test_layer() diff --git a/doc/fluid/api_cn/paddle_cn/nonzero_cn.rst b/doc/fluid/api_cn/paddle_cn/nonzero_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f207da073ac241ddeaae54a93afc1c351ef58747 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/nonzero_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_nonzero: + +nonzero +------------------------------- +:doc_source: paddle.tensor.nonzero + + diff --git a/doc/fluid/api_cn/paddle_cn/norm_cn.rst b/doc/fluid/api_cn/paddle_cn/norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ed5a9a73e8dd8ed2eaac253fb5e8123196e44fe8 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/norm_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_norm: + +norm +------------------------------- +:doc_source: paddle.fluid.layers.l2_normalize + + diff --git a/doc/fluid/api_cn/paddle_cn/normal_cn.rst b/doc/fluid/api_cn/paddle_cn/normal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..637b99d5d9f486ad13c7fe0434516f2fdde8cf39 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/normal_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_normal: + +normal +------------------------------- +:doc_source: paddle.tensor.normal + + diff --git a/doc/fluid/api_cn/paddle_cn/not_equal_cn.rst b/doc/fluid/api_cn/paddle_cn/not_equal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..872637f9e816a7fd1fa910d91441994b58c884f8 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/not_equal_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_not_equal: + +not_equal +------------------------------- +:doc_source: paddle.tensor.not_equal + + diff --git a/doc/fluid/api_cn/paddle_cn/ones_cn.rst b/doc/fluid/api_cn/paddle_cn/ones_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..51d5ba8f81b5afdce93d6c8331734bb1d7671e3b --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/ones_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_ones: + +ones +------------------------------- +:doc_source: paddle.fluid.layers.ones + + diff --git a/doc/fluid/api_cn/paddle_cn/ones_like_cn.rst b/doc/fluid/api_cn/paddle_cn/ones_like_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..017398cc6a190ab2e66fdbd6a283359f9cb17a32 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/ones_like_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_ones_like: + +ones_like +------------------------------- +:doc_source: paddle.fluid.layers.ones_like + + diff --git a/doc/fluid/api_cn/paddle_cn/pow_cn.rst b/doc/fluid/api_cn/paddle_cn/pow_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7d86955d9eee58af3a93a11485c385afd7ff5da4 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/pow_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_pow: + +pow +------------------------------- +:doc_source: paddle.fluid.layers.pow + + diff --git a/doc/fluid/api_cn/paddle_cn/program_guard_cn.rst b/doc/fluid/api_cn/paddle_cn/program_guard_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1efc04947caf3c1ae156b0147bf465f0e48a8181 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/program_guard_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_program_guard: + +program_guard +------------------------------- +:doc_source: paddle.fluid.framework.program_guard + + diff --git a/doc/fluid/api_cn/paddle_cn/py_func_cn.rst b/doc/fluid/api_cn/paddle_cn/py_func_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..da78c2a5e0212efa9df0cebe2e86678e44d24b08 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/py_func_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_py_func: + +py_func +------------------------------- +:doc_source: paddle.fluid.layers.nn.py_func + + diff --git a/doc/fluid/api_cn/paddle_cn/rand_cn.rst b/doc/fluid/api_cn/paddle_cn/rand_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d6311e9ef1d0f3be5ad418ec8e44cbc129f3478a --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/rand_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_rand: + +rand +------------------------------- +:doc_source: paddle.tensor.rand + + diff --git a/doc/fluid/api_cn/paddle_cn/randint_cn.rst b/doc/fluid/api_cn/paddle_cn/randint_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..85703b832f1527778e65e9d52c7afc7887f4d759 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/randint_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_randint: + +randint +------------------------------- +:doc_source: paddle.tensor.randint + + diff --git a/doc/fluid/api_cn/paddle_cn/randn_cn.rst b/doc/fluid/api_cn/paddle_cn/randn_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3f8916869dd757fb57afccf1f476f18f6128be6b --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/randn_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_randn: + +randn +------------------------------- +:doc_source: paddle.tensor.randn + + diff --git a/doc/fluid/api_cn/paddle_cn/randperm_cn.rst b/doc/fluid/api_cn/paddle_cn/randperm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3ce37a4e80ac933c7bc380f19b1575e09fb6bcef --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/randperm_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_randperm: + +randperm +------------------------------- +:doc_source: paddle.tensor.randperm + + diff --git a/doc/fluid/api_cn/paddle_cn/rank_cn.rst b/doc/fluid/api_cn/paddle_cn/rank_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f90f6b3626c4996380b66c73495bf3eddee8e1d5 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/rank_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_rank: + +rank +------------------------------- +:doc_source: paddle.fluid.layers.rank + + diff --git a/doc/fluid/api_cn/paddle_cn/reciprocal_cn.rst b/doc/fluid/api_cn/paddle_cn/reciprocal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1867a2d3064a95629f49cd94440d29aad48c6316 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/reciprocal_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_reciprocal: + +reciprocal +------------------------------- +:doc_source: paddle.fluid.layers.reciprocal + + diff --git a/doc/fluid/api_cn/paddle_cn/reduce_all_cn.rst b/doc/fluid/api_cn/paddle_cn/reduce_all_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ca6aff3be83a3c6aaba451bcca6d864e552cff1f --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/reduce_all_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_reduce_all: + +reduce_all +------------------------------- +:doc_source: paddle.fluid.layers.reduce_all + + diff --git a/doc/fluid/api_cn/paddle_cn/reduce_any_cn.rst b/doc/fluid/api_cn/paddle_cn/reduce_any_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2391139fcbe7db59ee2ac1ae5341483e78f2da96 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/reduce_any_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_reduce_any: + +reduce_any +------------------------------- +:doc_source: paddle.fluid.layers.reduce_any + + diff --git a/doc/fluid/api_cn/paddle_cn/reduce_max_cn.rst b/doc/fluid/api_cn/paddle_cn/reduce_max_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2a4f995c929a5edece1525114f2fcb104c4c8556 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/reduce_max_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_reduce_max: + +reduce_max +------------------------------- +:doc_source: paddle.fluid.layers.reduce_max + + diff --git a/doc/fluid/api_cn/paddle_cn/reduce_mean_cn.rst b/doc/fluid/api_cn/paddle_cn/reduce_mean_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..70712bdc09977822b3ad1bacbb660dd7b4f4521f --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/reduce_mean_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_reduce_mean: + +reduce_mean +------------------------------- +:doc_source: paddle.fluid.layers.reduce_mean + + diff --git a/doc/fluid/api_cn/paddle_cn/reduce_min_cn.rst b/doc/fluid/api_cn/paddle_cn/reduce_min_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..72e38e8811ef2761c73ed02980e1fa9dcc82e0b0 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/reduce_min_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_reduce_min: + +reduce_min +------------------------------- +:doc_source: paddle.fluid.layers.reduce_min + + diff --git a/doc/fluid/api_cn/paddle_cn/reduce_prod_cn.rst b/doc/fluid/api_cn/paddle_cn/reduce_prod_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..043245a92ae0424be0dfbe4718ca25b481b8cf80 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/reduce_prod_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_reduce_prod: + +reduce_prod +------------------------------- +:doc_source: paddle.fluid.layers.reduce_prod + + diff --git a/doc/fluid/api_cn/paddle_cn/reduce_sum_cn.rst b/doc/fluid/api_cn/paddle_cn/reduce_sum_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9e6e626106c3d47dc6e8905b07727fe2af6d37fe --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/reduce_sum_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_reduce_sum: + +reduce_sum +------------------------------- +:doc_source: paddle.fluid.layers.reduce_sum + + diff --git a/doc/fluid/api_cn/paddle_cn/reshape_cn.rst b/doc/fluid/api_cn/paddle_cn/reshape_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7fb169f7f764c49d3f745b008ad073c548251a0c --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/reshape_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_reshape: + +reshape +------------------------------- +:doc_source: paddle.fluid.layers.reshape + + diff --git a/doc/fluid/api_cn/paddle_cn/reverse_cn.rst b/doc/fluid/api_cn/paddle_cn/reverse_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a92f274d13641271c20d41a18fb47c1407a8d9ea --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/reverse_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_reverse: + +reverse +------------------------------- +:doc_source: paddle.fluid.layers.reverse + + diff --git a/doc/fluid/api_cn/paddle_cn/roll_cn.rst b/doc/fluid/api_cn/paddle_cn/roll_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..331a0822e5482945a9d873b9d1999a08783e3ff2 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/roll_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_roll: + +roll +------------------------------- +:doc_source: paddle.tensor.roll + + diff --git a/doc/fluid/api_cn/paddle_cn/round_cn.rst b/doc/fluid/api_cn/paddle_cn/round_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..78ced08e88f157d87d19dd8201e3ca2fccf17dd4 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/round_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_round: + +round +------------------------------- +:doc_source: paddle.fluid.layers.round + + diff --git a/doc/fluid/api_cn/paddle_cn/rsqrt_cn.rst b/doc/fluid/api_cn/paddle_cn/rsqrt_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9af5286077536e1355311a24c8cfd63c5e691428 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/rsqrt_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_rsqrt: + +rsqrt +------------------------------- +:doc_source: paddle.fluid.layers.rsqrt + + diff --git a/doc/fluid/api_cn/paddle_cn/save_cn.rst b/doc/fluid/api_cn/paddle_cn/save_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e6a28b44062350cc85d0d9f0ea87d7f8e739da53 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/save_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_save: + +save +------------------------------- +:doc_source: paddle.fluid.save + + diff --git a/doc/fluid/api_cn/paddle_cn/scale_cn.rst b/doc/fluid/api_cn/paddle_cn/scale_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3899f101df34613ec35cea7a977c07852e45b35b --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/scale_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_scale: + +scale +------------------------------- +:doc_source: paddle.fluid.layers.scale + + diff --git a/doc/fluid/api_cn/paddle_cn/scatter_cn.rst b/doc/fluid/api_cn/paddle_cn/scatter_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..619013d41ee2601a6b22ea153239f2a56294960e --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/scatter_cn.rst @@ -0,0 +1,3 @@ +scatter +------------------------------- +:doc_source: paddle.tensor.scatter \ No newline at end of file diff --git a/doc/fluid/api_cn/paddle_cn/scatter_nd_add_cn.rst b/doc/fluid/api_cn/paddle_cn/scatter_nd_add_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..deec614290f474d012a315d3665313ae75590aeb --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/scatter_nd_add_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_scatter_nd_add: + +scatter_nd_add +------------------------------- +:doc_source: paddle.fluid.layers.scatter_nd_add + + diff --git a/doc/fluid/api_cn/paddle_cn/scatter_nd_cn.rst b/doc/fluid/api_cn/paddle_cn/scatter_nd_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..53295606800f8f251bf0e04cfcc89bcb79e9a58f --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/scatter_nd_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_scatter_nd: + +scatter_nd +------------------------------- +:doc_source: paddle.fluid.layers.scatter_nd + + diff --git a/doc/fluid/api_cn/paddle_cn/scope_guard_cn.rst b/doc/fluid/api_cn/paddle_cn/scope_guard_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e3ddb0048dd84112cedb7de353749709460ed83b --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/scope_guard_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_scope_guard: + +scope_guard +------------------------------- +:doc_source: paddle.fluid.executor.scope_guard + + diff --git a/doc/fluid/api_cn/paddle_cn/set_cuda_rng_state_cn.rst b/doc/fluid/api_cn/paddle_cn/set_cuda_rng_state_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f628ae3c361e81600f992b16e89e9f4f703a63de --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/set_cuda_rng_state_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_set_cuda_rng_state: + +set_cuda_rng_state +------------------------------- +:doc_source: paddle.framework.set_cuda_rng_state + + diff --git a/doc/fluid/api_cn/paddle_cn/shape_cn.rst b/doc/fluid/api_cn/paddle_cn/shape_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..24d0f1fee0cda331bc569181ef62e69232e1a24d --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/shape_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_shape: + +shape +------------------------------- +:doc_source: paddle.fluid.layers.shape + + diff --git a/doc/fluid/api_cn/paddle_cn/shard_index_cn.rst b/doc/fluid/api_cn/paddle_cn/shard_index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..032b21bcf84a0c35f044bf27dcaad1a9433be024 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/shard_index_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_shard_index: + +shard_index +------------------------------- +:doc_source: paddle.fluid.layers.shard_index + + diff --git a/doc/fluid/api_cn/paddle_cn/shuffle_cn.rst b/doc/fluid/api_cn/paddle_cn/shuffle_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..19d295b154811f2543934a7afe98f812bdbe8bea --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/shuffle_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_shuffle: + +shuffle +------------------------------- +:doc_source: paddle.fluid.io.shuffle + + diff --git a/doc/fluid/api_cn/paddle_cn/sign_cn.rst b/doc/fluid/api_cn/paddle_cn/sign_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d091e89f3f1525d2af5e7e90bb89a90582ec49cf --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/sign_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_sign: + +sign +------------------------------- +:doc_source: paddle.fluid.layers.sign + + diff --git a/doc/fluid/api_cn/paddle_cn/sin_cn.rst b/doc/fluid/api_cn/paddle_cn/sin_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e9495ad34731fd985243cff36dfa40a068803f56 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/sin_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_sin: + +sin +------------------------------- +:doc_source: paddle.fluid.layers.sin + + diff --git a/doc/fluid/api_cn/paddle_cn/slice_cn.rst b/doc/fluid/api_cn/paddle_cn/slice_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..88ab4afaf405eb7834cbeaadaf7539db8ef3764f --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/slice_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_slice: + +slice +------------------------------- +:doc_source: paddle.fluid.layers.slice + + diff --git a/doc/fluid/api_cn/paddle_cn/sort_cn.rst b/doc/fluid/api_cn/paddle_cn/sort_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d4ee20b7e89a316cb2d7e72f54c2d58ec6191fc5 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/sort_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_sort: + +sort +------------------------------- +:doc_source: paddle.tensor.sort + + diff --git a/doc/fluid/api_cn/paddle_cn/split_cn.rst b/doc/fluid/api_cn/paddle_cn/split_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5a6389aed71a8239ccd72a6d0b50cea541392349 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/split_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_split: + +split +------------------------------- +:doc_source: paddle.fluid.layers.split + + diff --git a/doc/fluid/api_cn/paddle_cn/sqrt_cn.rst b/doc/fluid/api_cn/paddle_cn/sqrt_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d11e40e259f97b20184a49dba66628357a67397b --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/sqrt_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_sqrt: + +sqrt +------------------------------- +:doc_source: paddle.fluid.layers.sqrt + + diff --git a/doc/fluid/api_cn/paddle_cn/square_cn.rst b/doc/fluid/api_cn/paddle_cn/square_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ebcc5415146372918168d0bcc6aca4346f45e742 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/square_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_square: + +square +------------------------------- +:doc_source: paddle.fluid.layers.square + + diff --git a/doc/fluid/api_cn/paddle_cn/squeeze_cn.rst b/doc/fluid/api_cn/paddle_cn/squeeze_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3980a524dc5783e70295bcf7f0b4fa5c23f9c378 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/squeeze_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_squeeze: + +squeeze +------------------------------- +:doc_source: paddle.fluid.layers.squeeze + + diff --git a/doc/fluid/api_cn/paddle_cn/stack_cn.rst b/doc/fluid/api_cn/paddle_cn/stack_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7668680d2b3d9c6988ba71e932affa46613da467 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/stack_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_stack: + +stack +------------------------------- +:doc_source: paddle.fluid.layers.stack + + diff --git a/doc/fluid/api_cn/paddle_cn/standard_normal_cn.rst b/doc/fluid/api_cn/paddle_cn/standard_normal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..43d8419574c17089c3ab231915aa9bce8c5cddd5 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/standard_normal_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_standard_normal: + +standard_normal +------------------------------- +:doc_source: paddle.tensor.standard_normal + + diff --git a/doc/fluid/api_cn/paddle_cn/stanh_cn.rst b/doc/fluid/api_cn/paddle_cn/stanh_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8a6bed539ccc4105ad0a0ce11e1f77dc67e5be25 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/stanh_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_stanh: + +stanh +------------------------------- +:doc_source: paddle.fluid.layers.stanh + + diff --git a/doc/fluid/api_cn/paddle_cn/std_cn.rst b/doc/fluid/api_cn/paddle_cn/std_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d3e99d47ab9f6b442d89e609c23fdeae70f1f943 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/std_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_std: + +std +------------------------------- +:doc_source: paddle.tensor.std + + diff --git a/doc/fluid/api_cn/paddle_cn/strided_slice_cn.rst b/doc/fluid/api_cn/paddle_cn/strided_slice_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..55c2f6ac32e525512266bb3086165361e59a54ae --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/strided_slice_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_strided_slice: + +strided_slice +------------------------------- +:doc_source: paddle.fluid.layers.strided_slice + + diff --git a/doc/fluid/api_cn/paddle_cn/sum_cn.rst b/doc/fluid/api_cn/paddle_cn/sum_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..724e8e83b9b4afbddaabee3d270943b521351b11 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/sum_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_sum: + +sum +------------------------------- +:doc_source: paddle.fluid.layers.reduce_sum + + diff --git a/doc/fluid/api_cn/paddle_cn/sums_cn.rst b/doc/fluid/api_cn/paddle_cn/sums_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c13aa06a2b0b61458208872626d23449d35261c1 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/sums_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_sums: + +sums +------------------------------- +:doc_source: paddle.fluid.layers.sums + + diff --git a/doc/fluid/api_cn/paddle_cn/t_cn.rst b/doc/fluid/api_cn/paddle_cn/t_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2c4a9e3ed9b62caabe902f074b15de6dd6d8a30b --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/t_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_t: + +t +------------------------------- +:doc_source: paddle.tensor.t + + diff --git a/doc/fluid/api_cn/paddle_cn/tanh_cn.rst b/doc/fluid/api_cn/paddle_cn/tanh_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cd1b2a854d1e790ed99e6fa3901a5154662bd2e4 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/tanh_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_tanh: + +tanh +------------------------------- +:doc_source: paddle.fluid.layers.tanh + + diff --git a/doc/fluid/api_cn/paddle_cn/tile_cn.rst b/doc/fluid/api_cn/paddle_cn/tile_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6d74f6795a6d484ddcf04c0d859ddda06c9af460 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/tile_cn.rst @@ -0,0 +1,6 @@ +.. _cn_api_paddle_cn_tile: + +tile +------------------------------- +:doc_source: paddle.tensor.tile + diff --git a/doc/fluid/api_cn/paddle_cn/topk_cn.rst b/doc/fluid/api_cn/paddle_cn/topk_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0214051799daf0c3cb883b3252677e9797c48bb3 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/topk_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_topk: + +topk +------------------------------- +:doc_source: paddle.fluid.layers.topk + + diff --git a/doc/fluid/api_cn/paddle_cn/trace_cn.rst b/doc/fluid/api_cn/paddle_cn/trace_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5c8d80f5673e4defd6e00fc9a2d94b2c02387601 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/trace_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_trace: + +trace +------------------------------- +:doc_source: paddle.tensor.trace + + diff --git a/doc/fluid/api_cn/paddle_cn/transpose_cn.rst b/doc/fluid/api_cn/paddle_cn/transpose_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..26dc3e5e3e0fc038be3a6390937608715b884e7c --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/transpose_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_transpose: + +transpose +------------------------------- +:doc_source: paddle.fluid.layers.transpose + + diff --git a/doc/fluid/api_cn/paddle_cn/tril_cn.rst b/doc/fluid/api_cn/paddle_cn/tril_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9a6e9ac60b898b7b27f3ca7ee32747812f0698bf --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/tril_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_tril: + +tril +------------------------------- +:doc_source: paddle.tensor.tril + + diff --git a/doc/fluid/api_cn/paddle_cn/triu_cn.rst b/doc/fluid/api_cn/paddle_cn/triu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..66a032873dbf3c26b40deaa558e274bb4c63e402 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/triu_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_triu: + +triu +------------------------------- +:doc_source: paddle.tensor.triu + + diff --git a/doc/fluid/api_cn/paddle_cn/unbind_cn.rst b/doc/fluid/api_cn/paddle_cn/unbind_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e9b35c5b5ba055f5e397de822a39137daaf0a6af --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/unbind_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_unbind: + +unbind +------------------------------- +:doc_source: paddle.tensor.unbind + + diff --git a/doc/fluid/api_cn/paddle_cn/unique_cn.rst b/doc/fluid/api_cn/paddle_cn/unique_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bfb9216ea2220202909dc019ede1df04046d7d8f --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/unique_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_unique: + +unique +------------------------------- +:doc_source: paddle.tensor.unique + + diff --git a/doc/fluid/api_cn/paddle_cn/unique_with_counts_cn.rst b/doc/fluid/api_cn/paddle_cn/unique_with_counts_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8194cab68ce858c88db0b74923f7dcc8398f09b6 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/unique_with_counts_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_unique_with_counts: + +unique_with_counts +------------------------------- +:doc_source: paddle.fluid.layers.unique_with_counts + + diff --git a/doc/fluid/api_cn/paddle_cn/unsqueeze_cn.rst b/doc/fluid/api_cn/paddle_cn/unsqueeze_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7d8a6cbaeaebdf7945d82282ada550542cefee42 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/unsqueeze_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_unsqueeze: + +unsqueeze +------------------------------- +:doc_source: paddle.fluid.layers.unsqueeze + + diff --git a/doc/fluid/api_cn/paddle_cn/unstack_cn.rst b/doc/fluid/api_cn/paddle_cn/unstack_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..db732870975e470256892aa32d9960b99582b3b3 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/unstack_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_unstack: + +unstack +------------------------------- +:doc_source: paddle.fluid.layers.unstack + + diff --git a/doc/fluid/api_cn/paddle_cn/var_cn.rst b/doc/fluid/api_cn/paddle_cn/var_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f59ff94d4010fdcaaa2ea92b6980788687daee25 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/var_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_var: + +var +------------------------------- +:doc_source: paddle.tensor.var + + diff --git a/doc/fluid/api_cn/paddle_cn/where_cn.rst b/doc/fluid/api_cn/paddle_cn/where_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c584798539aaf679e0aca09b02fd4cba55ba5434 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/where_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_where: + +where +------------------------------- +:doc_source: paddle.fluid.layers.cond + + diff --git a/doc/fluid/api_cn/paddle_cn/zeros_cn.rst b/doc/fluid/api_cn/paddle_cn/zeros_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4789e28426d573ceb6a1fa67450759268df2b2b5 --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/zeros_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_zeros: + +zeros +------------------------------- +:doc_source: paddle.fluid.layers.zeros + + diff --git a/doc/fluid/api_cn/paddle_cn/zeros_like_cn.rst b/doc/fluid/api_cn/paddle_cn/zeros_like_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5130519800e3c43fe1f37c29388a2e477d27c54f --- /dev/null +++ b/doc/fluid/api_cn/paddle_cn/zeros_like_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_paddle_cn_zeros_like: + +zeros_like +------------------------------- +:doc_source: paddle.fluid.layers.zeros_like + + diff --git a/doc/fluid/api_cn/profiler_cn/cuda_profiler_cn.rst b/doc/fluid/api_cn/profiler_cn/cuda_profiler_cn.rst index d10f39ceade0509fb4ef9458c60616afe30d11df..ff292887d1b39cf7d11cd06569a22f18ae8d1505 100644 --- a/doc/fluid/api_cn/profiler_cn/cuda_profiler_cn.rst +++ b/doc/fluid/api_cn/profiler_cn/cuda_profiler_cn.rst @@ -1,46 +1,49 @@ -.. _cn_api_fluid_profiler_cuda_profiler: - -cuda_profiler -------------------------------- - -.. py:function:: paddle.fluid.profiler.cuda_profiler(output_file, output_mode=None, config=None) - - -CUDA性能分析器。该分析器通过调用CUDA运行时编程接口,对CUDA程序进行性能分析,并将分析结果写入输出文件output_file。输出格式由output_mode参数控制,性能分析配置选项由config参数控制。得到输出文件后,用户可使用 `NVIDIA Visual Profiler `_ 工具来加载这个输出文件以获得可视化结果。 - - -参数: - - **output_file** (str) – 输出文件名称, 输出结果将会写入该文件。 - - **output_mode** (str,可选) – 输出格式,有两种可以选择,分别是 key-value 键值对格式'kvp' 和 逗号分割的格式'csv'(默认格式)。 - - **config** (list, 可选) – NVIDIA性能分析配置列表,默认值为None时会选择以下配置:['gpustarttimestamp', 'gpuendtimestamp', 'gridsize3d', 'threadblocksize', 'streamid', 'enableonstart 0', 'conckerneltrace']。上述每个配置的含义和更多配置选项,请参考 `Compute Command Line Profiler User Guide `_ 。 - -抛出异常: - - ``ValueError`` - 如果输出格式output_mode不是'kvp'、'csv'两者之一,会抛出异常。 - -返回: 无 - -**代码示例** - - -.. code-block:: python - - import paddle.fluid as fluid - import paddle.fluid.profiler as profiler - import numpy as np - - epoc = 8 - dshape = [4, 3, 28, 28] - data = fluid.layers.data(name='data', shape=[3, 28, 28], dtype='float32') - conv = fluid.layers.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1]) - - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - - output_file = 'cuda_profiler.txt' - with profiler.cuda_profiler(output_file, 'csv') as nvprof: - for i in range(epoc): - input = np.random.random(dshape).astype('float32') - exe.run(fluid.default_main_program(), feed={'data': input}) - - # 之后可以使用 NVIDIA Visual Profile 可视化结果 +.. _cn_api_fluid_profiler_cuda_profiler: + +cuda_profiler +------------------------------- + +.. py:function:: paddle.fluid.profiler.cuda_profiler(output_file, output_mode=None, config=None) + + + + + +CUDA性能分析器。该分析器通过调用CUDA运行时编程接口,对CUDA程序进行性能分析,并将分析结果写入输出文件output_file。输出格式由output_mode参数控制,性能分析配置选项由config参数控制。得到输出文件后,用户可使用 `NVIDIA Visual Profiler `_ 工具来加载这个输出文件以获得可视化结果。 + + +参数: + - **output_file** (str) – 输出文件名称, 输出结果将会写入该文件。 + - **output_mode** (str,可选) – 输出格式,有两种可以选择,分别是 key-value 键值对格式'kvp' 和 逗号分割的格式'csv'(默认格式)。 + - **config** (list, 可选) – NVIDIA性能分析配置列表,默认值为None时会选择以下配置:['gpustarttimestamp', 'gpuendtimestamp', 'gridsize3d', 'threadblocksize', 'streamid', 'enableonstart 0', 'conckerneltrace']。上述每个配置的含义和更多配置选项,请参考 `Compute Command Line Profiler User Guide `_ 。 + +抛出异常: + - ``ValueError`` - 如果输出格式output_mode不是'kvp'、'csv'两者之一,会抛出异常。 + +返回: 无 + +**代码示例** + + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.profiler as profiler + import numpy as np + + epoc = 8 + dshape = [4, 3, 28, 28] + data = fluid.data(name='data', shape=[None, 3, 28, 28], dtype='float32') + conv = fluid.layers.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1]) + + place = fluid.CUDAPlace(0) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + output_file = 'cuda_profiler.txt' + with profiler.cuda_profiler(output_file, 'csv') as nvprof: + for i in range(epoc): + input = np.random.random(dshape).astype('float32') + exe.run(fluid.default_main_program(), feed={'data': input}) + + # 之后可以使用 NVIDIA Visual Profile 可视化结果 diff --git a/doc/fluid/api_cn/profiler_cn/profiler_cn.rst b/doc/fluid/api_cn/profiler_cn/profiler_cn.rst index 72901c5d01407385e2cfcd8bd81c74e2d7b73e65..c3ae718c596611f11731ad4cef6b993f66bfaf62 100644 --- a/doc/fluid/api_cn/profiler_cn/profiler_cn.rst +++ b/doc/fluid/api_cn/profiler_cn/profiler_cn.rst @@ -1,73 +1,77 @@ -.. _cn_api_fluid_profiler_profiler: - -profiler -------------------------------- - -.. py:function:: paddle.fluid.profiler.profiler(state, sorted_key=None, profile_path='/tmp/profile') - -通用性能分析器 。与 :ref:`cn_api_fluid_profiler_cuda_profiler` 不同,此分析器可用于分析CPU和GPU程序。 - -参数: - - **state** (str) – 性能分析状态, 取值为 'CPU' 或 'GPU' 或 'All'。'CPU'表示只分析CPU上的性能;'GPU'表示同时分析CPU和GPU上的性能;'All'表示除了同时分析CPU和GPU上的性能外,还将生成 `性能分析的时间轴信息 <../../advanced_usage/development/profiling/timeline_cn.html>`_ 。 - - **sorted_key** (str,可选) – 性能分析结果的打印顺序,取值为None、'call'、'total'、'max'、'min'、'ave'之一。默认值为None,表示按照第一次结束时间顺序打印;'call'表示按调用的数量进行排序;'total'表示按总执行时间排序;'max'表示按最大执行时间排序;'min'表示按最小执行时间排序;'ave'表示按平均执行时间排序。 - - **profile_path** (str,可选) – 如果性能分析状态为'All', 将生成的时间轴信息写入profile_path,默认输出文件为 ``/tmp/profile`` 。 - -抛出异常: - - ``ValueError`` – 如果state取值不在 ['CPU', 'GPU', 'All']中,或sorted_key取值不在 [None, 'calls', 'total', 'max', 'min', 'ave']中,则抛出异常。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import paddle.fluid.profiler as profiler - import numpy as np - - epoc = 8 - dshape = [4, 3, 28, 28] - data = fluid.layers.data(name='data', shape=[3, 28, 28], dtype='float32') - conv = fluid.layers.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1]) - - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - - with profiler.profiler('CPU', 'total', '/tmp/profile') as prof: - for i in range(epoc): - input = np.random.random(dshape).astype('float32') - exe.run(fluid.default_main_program(), feed={'data': input}) - -**结果示例** - -.. code-block:: python - - #### sorted_key = 'total', 'calls', 'max', 'min', 'ave' 结果 #### - # 示例结果中,除了Sorted by number of xxx in descending order in the same thread 这句随着sorted_key变化而不同,其余均相同。 - # 原因是,示例结果中,上述5列都已经按从大到小排列了。 - -------------------------> Profiling Report <------------------------- - - Place: CPU - Time unit: ms - Sorted by total time in descending order in the same thread - #Sorted by number of calls in descending order in the same thread - #Sorted by number of max in descending order in the same thread - #Sorted by number of min in descending order in the same thread - #Sorted by number of avg in descending order in the same thread - - Event Calls Total Min. Max. Ave. Ratio. - thread0::conv2d 8 129.406 0.304303 127.076 16.1758 0.983319 - thread0::elementwise_add 8 2.11865 0.193486 0.525592 0.264832 0.016099 - thread0::feed 8 0.076649 0.006834 0.024616 0.00958112 0.000582432 - - #### sorted_key = None 结果 #### - # 示例结果中,是按照Op结束时间顺序打印,因此打印顺序为feed->conv2d->elementwise_add - -------------------------> Profiling Report <------------------------- - - Place: CPU - Time unit: ms - Sorted by event first end time in descending order in the same thread - - Event Calls Total Min. Max. Ave. Ratio. - thread0::feed 8 0.077419 0.006608 0.023349 0.00967738 0.00775934 - thread0::conv2d 8 7.93456 0.291385 5.63342 0.99182 0.795243 - thread0::elementwise_add 8 1.96555 0.191884 0.518004 0.245693 0.196998 +.. _cn_api_fluid_profiler_profiler: + +profiler +------------------------------- + +.. py:function:: paddle.fluid.profiler.profiler(state, sorted_key=None, profile_path='/tmp/profile', tracer_option='Default') + + + + +通用性能分析器 。与 :ref:`cn_api_fluid_profiler_cuda_profiler` 不同,此分析器可用于分析CPU和GPU程序。 + +参数: + - **state** (str) – 性能分析状态, 取值为 'CPU' 或 'GPU' 或 'All'。'CPU'表示只分析CPU上的性能;'GPU'表示同时分析CPU和GPU上的性能;'All'表示除了同时分析CPU和GPU上的性能外,还将生成 `性能分析的时间轴信息 <../../advanced_usage/development/profiling/timeline_cn.html>`_ 。 + - **sorted_key** (str,可选) – 性能分析结果的打印顺序,取值为None、'call'、'total'、'max'、'min'、'ave'之一。默认值为None,表示按照第一次结束时间顺序打印;'call'表示按调用的数量进行排序;'total'表示按总执行时间排序;'max'表示按最大执行时间排序;'min'表示按最小执行时间排序;'ave'表示按平均执行时间排序。 + - **profile_path** (str,可选) – 如果性能分析状态为'All', 将生成的时间轴信息写入profile_path,默认输出文件为 ``/tmp/profile`` 。 + - **tracer_option** (str,可选) – 性能分析选项取值为 'Default' 或 'OpDetail' 或 'AllOpDetail', 此选项用于设置性能分析层次并打印不同层次的性能分析结果, `Default` 选项打印不同Op类型的性能分析结果, `OpDetail` 则会打印不同OP类型更详细的性能分析结果,比如compute和data transform。 `AllOpDetail` 和 `OpDetail` 类似,但是打印的是不同Op名字的性能分析结果。 + +抛出异常: + - ``ValueError`` – 如果state取值不在 ['CPU', 'GPU', 'All']中,或sorted_key取值不在 [None, 'calls', 'total', 'max', 'min', 'ave']中,则抛出异常。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.profiler as profiler + import numpy as np + + epoc = 8 + dshape = [4, 3, 28, 28] + data = fluid.layers.data(name='data', shape=[3, 28, 28], dtype='float32') + conv = fluid.layers.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1]) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + with profiler.profiler('CPU', 'total', '/tmp/profile') as prof: + for i in range(epoc): + input = np.random.random(dshape).astype('float32') + exe.run(fluid.default_main_program(), feed={'data': input}) + +**结果示例** + +.. code-block:: text + + #### sorted_key = 'total', 'calls', 'max', 'min', 'ave' 结果 #### + # 示例结果中,除了Sorted by number of xxx in descending order in the same thread 这句随着sorted_key变化而不同,其余均相同。 + # 原因是,示例结果中,上述5列都已经按从大到小排列了。 + -------------------------> Profiling Report <------------------------- + + Place: CPU + Time unit: ms + Sorted by total time in descending order in the same thread + #Sorted by number of calls in descending order in the same thread + #Sorted by number of max in descending order in the same thread + #Sorted by number of min in descending order in the same thread + #Sorted by number of avg in descending order in the same thread + + Event Calls Total Min. Max. Ave. Ratio. + thread0::conv2d 8 129.406 0.304303 127.076 16.1758 0.983319 + thread0::elementwise_add 8 2.11865 0.193486 0.525592 0.264832 0.016099 + thread0::feed 8 0.076649 0.006834 0.024616 0.00958112 0.000582432 + + #### sorted_key = None 结果 #### + # 示例结果中,是按照Op结束时间顺序打印,因此打印顺序为feed->conv2d->elementwise_add + -------------------------> Profiling Report <------------------------- + + Place: CPU + Time unit: ms + Sorted by event first end time in descending order in the same thread + + Event Calls Total Min. Max. Ave. Ratio. + thread0::feed 8 0.077419 0.006608 0.023349 0.00967738 0.00775934 + thread0::conv2d 8 7.93456 0.291385 5.63342 0.99182 0.795243 + thread0::elementwise_add 8 1.96555 0.191884 0.518004 0.245693 0.196998 diff --git a/doc/fluid/api_cn/profiler_cn/reset_profiler_cn.rst b/doc/fluid/api_cn/profiler_cn/reset_profiler_cn.rst index 53ba5e8b094cb09b7249803adf3c4902dde6287b..575f408a74a153885468fb0ef527972273f1d049 100644 --- a/doc/fluid/api_cn/profiler_cn/reset_profiler_cn.rst +++ b/doc/fluid/api_cn/profiler_cn/reset_profiler_cn.rst @@ -1,20 +1,23 @@ -.. _cn_api_fluid_profiler_reset_profiler: - -reset_profiler -------------------------------- - -.. py:function:: paddle.fluid.profiler.reset_profiler() - -清除之前的性能分析记录。此接口不能和 :ref:`cn_api_fluid_profiler_cuda_profiler` 一起使用 ,但它可以和 :ref:`cn_api_fluid_profiler_start_profiler` 、:ref:`cn_api_fluid_profiler_stop_profiler` 和 :ref:`cn_api_fluid_profiler_profiler` 一起使用。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import paddle.fluid.profiler as profiler - with profiler.profiler('CPU', 'total', '/tmp/profile'): - for iter in range(10): - if iter == 2: - profiler.reset_profiler() - # ... +.. _cn_api_fluid_profiler_reset_profiler: + +reset_profiler +------------------------------- + +.. py:function:: paddle.fluid.profiler.reset_profiler() + + + + +清除之前的性能分析记录。此接口不能和 :ref:`cn_api_fluid_profiler_cuda_profiler` 一起使用 ,但它可以和 :ref:`cn_api_fluid_profiler_start_profiler` 、:ref:`cn_api_fluid_profiler_stop_profiler` 和 :ref:`cn_api_fluid_profiler_profiler` 一起使用。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.profiler as profiler + with profiler.profiler('CPU', 'total', '/tmp/profile'): + for iter in range(10): + if iter == 2: + profiler.reset_profiler() + # ... diff --git a/doc/fluid/api_cn/profiler_cn/start_profiler_cn.rst b/doc/fluid/api_cn/profiler_cn/start_profiler_cn.rst index 00a54c9d7cd32c0c3298abdbe6c696598db2b100..0eaca4a57366e23575b8d4bf0cc9752f4e10ce4d 100644 --- a/doc/fluid/api_cn/profiler_cn/start_profiler_cn.rst +++ b/doc/fluid/api_cn/profiler_cn/start_profiler_cn.rst @@ -1,28 +1,32 @@ -.. _cn_api_fluid_profiler_start_profiler: - -start_profiler -------------------------------- - -.. py:function:: paddle.fluid.profiler.start_profiler(state) - -激活使用性能分析器。除了 :ref:`cn_api_fluid_profiler_profiler` 外,用户还可以使用 :ref:`cn_api_fluid_profiler_start_profiler` 和 :ref:`cn_api_fluid_profiler_stop_profiler` 来激活和停止使用性能分析器。 - -参数: - - **state** (str) – 性能分析状态, 取值为 'CPU' 或 'GPU' 或 'All'。'CPU'表示只分析CPU上的性能;'GPU'表示同时分析CPU和GPU上的性能;'All'表示除了同时分析CPU和GPU上的性能外,还将生成性能分析的时间轴信息 :ref:`fluid_timeline` 。 - -抛出异常: - - ``ValueError`` – 如果state取值不在 ['CPU', 'GPU', 'All']中,则抛出异常。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import paddle.fluid.profiler as profiler - - profiler.start_profiler('GPU') - for iter in range(10): - if iter == 2: - profiler.reset_profiler() - # except each iteration - profiler.stop_profiler('total', '/tmp/profile') +.. _cn_api_fluid_profiler_start_profiler: + +start_profiler +------------------------------- + +.. py:function:: paddle.fluid.profiler.start_profiler(state, tracer_option='Default') + + + + +激活使用性能分析器。除了 :ref:`cn_api_fluid_profiler_profiler` 外,用户还可以使用 :ref:`cn_api_fluid_profiler_start_profiler` 和 :ref:`cn_api_fluid_profiler_stop_profiler` 来激活和停止使用性能分析器。 + +参数: + - **state** (str) – 性能分析状态, 取值为 'CPU' 或 'GPU' 或 'All'。'CPU'表示只分析CPU上的性能;'GPU'表示同时分析CPU和GPU上的性能;'All'表示除了同时分析CPU和GPU上的性能外,还将生成性能分析的时间轴信息 :ref:`fluid_timeline` 。 + - **tracer_option** (str,可选) – 性能分析选项取值为 'Default' 或 'OpDetail' 或 'AllOpDetail', 此选项用于设置性能分析层次并打印不同层次的性能分析结果, `Default` 选项打印不同Op类型的性能分析结果, `OpDetail` 则会打印不同OP类型更详细的性能分析结果,比如compute和data transform。 `AllOpDetail` 和 `OpDetail` 类似,但是打印的是不同Op名字的性能分析结果。 + +抛出异常: + - ``ValueError`` – 如果state取值不在 ['CPU', 'GPU', 'All']中或者tracer_option取值不在['Default', 'OpDetail', 'AllOpDetail']中,则抛出异常 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.profiler as profiler + + profiler.start_profiler('GPU') + for iter in range(10): + if iter == 2: + profiler.reset_profiler() + # except each iteration + profiler.stop_profiler('total', '/tmp/profile') diff --git a/doc/fluid/api_cn/profiler_cn/stop_profiler_cn.rst b/doc/fluid/api_cn/profiler_cn/stop_profiler_cn.rst index e78ab2fc31079e853d11f2d9479db84e0b15686f..85f2cbb116cc88d9adfcfb830d6b9f0d203292c5 100644 --- a/doc/fluid/api_cn/profiler_cn/stop_profiler_cn.rst +++ b/doc/fluid/api_cn/profiler_cn/stop_profiler_cn.rst @@ -1,30 +1,33 @@ -.. _cn_api_fluid_profiler_stop_profiler: - -stop_profiler -------------------------------- - -.. py:function:: paddle.fluid.profiler.stop_profiler(sorted_key=None, profile_path='/tmp/profile') - -停止使用性能分析器。除了 :ref:`cn_api_fluid_profiler_profiler` 外,用户还可以使用 :ref:`cn_api_fluid_profiler_start_profiler` 和 :ref:`cn_api_fluid_profiler_stop_profiler` 来激活和停止使用性能分析器。 - -参数: - - **sorted_key** (str,可选) – 性能分析结果的打印顺序,取值为None、'call'、'total'、'max'、'min'、'ave'之一。默认值为None,表示按照第一次结束时间顺序打印;'call'表示按调用的数量进行排序;'total'表示按总执行时间排序;'max'表示按最大执行时间排序;'min'表示按最小执行时间排序;'ave'表示按平均执行时间排序。 - - **profile_path** (str,可选) – 如果性能分析状态为'All', 将生成的时间轴信息写入profile_path,默认输出文件为 ``/tmp/profile`` 。 - - -抛出异常: - - ``ValueError`` – 如果sorted_key取值不在 [None, 'calls', 'total', 'max', 'min', 'ave']中,则抛出异常。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import paddle.fluid.profiler as profiler - - profiler.start_profiler('GPU') - for iter in range(10): - if iter == 2: - profiler.reset_profiler() - # except each iteration - profiler.stop_profiler('total', '/tmp/profile') +.. _cn_api_fluid_profiler_stop_profiler: + +stop_profiler +------------------------------- + +.. py:function:: paddle.fluid.profiler.stop_profiler(sorted_key=None, profile_path='/tmp/profile') + + + + +停止使用性能分析器。除了 :ref:`cn_api_fluid_profiler_profiler` 外,用户还可以使用 :ref:`cn_api_fluid_profiler_start_profiler` 和 :ref:`cn_api_fluid_profiler_stop_profiler` 来激活和停止使用性能分析器。 + +参数: + - **sorted_key** (str,可选) – 性能分析结果的打印顺序,取值为None、'call'、'total'、'max'、'min'、'ave'之一。默认值为None,表示按照第一次结束时间顺序打印;'call'表示按调用的数量进行排序;'total'表示按总执行时间排序;'max'表示按最大执行时间排序;'min'表示按最小执行时间排序;'ave'表示按平均执行时间排序。 + - **profile_path** (str,可选) – 如果性能分析状态为'All', 将生成的时间轴信息写入profile_path,默认输出文件为 ``/tmp/profile`` 。 + + +抛出异常: + - ``ValueError`` – 如果sorted_key取值不在 [None, 'calls', 'total', 'max', 'min', 'ave']中,则抛出异常。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.profiler as profiler + + profiler.start_profiler('GPU') + for iter in range(10): + if iter == 2: + profiler.reset_profiler() + # except each iteration + profiler.stop_profiler('total', '/tmp/profile') diff --git a/doc/fluid/api_cn/regularizer_cn/L1DecayRegularizer_cn.rst b/doc/fluid/api_cn/regularizer_cn/L1DecayRegularizer_cn.rst index 72067682559e251e997167de3dce4e6fbe63d58e..90ebd61ec981aaaf1889ed3f7b3a66949591f1b7 100644 --- a/doc/fluid/api_cn/regularizer_cn/L1DecayRegularizer_cn.rst +++ b/doc/fluid/api_cn/regularizer_cn/L1DecayRegularizer_cn.rst @@ -1,11 +1,14 @@ -.. _cn_api_fluid_regularizer_L1DecayRegularizer: - -L1DecayRegularizer -------------------------------- - -.. py:class:: paddle.fluid.regularizer.L1DecayRegularizer(regularization_coeff=0.0) - -**注意:paddle.fluid.regularizer.L1DecayRegularizer是paddle.fluid.regularizer.L1Decay的别名,推荐使用paddle.fluid.regularizer.L1Decay。** - -详见 :ref:`cn_api_fluid_regularizer_L1Decay` 接口的使用文档。 - +.. _cn_api_fluid_regularizer_L1DecayRegularizer: + +L1DecayRegularizer +------------------------------- + +.. py:class:: paddle.fluid.regularizer.L1DecayRegularizer(regularization_coeff=0.0) + + + + +**注意:paddle.fluid.regularizer.L1DecayRegularizer是paddle.fluid.regularizer.L1Decay的别名,推荐使用paddle.fluid.regularizer.L1Decay。** + +详见 :ref:`cn_api_fluid_regularizer_L1Decay` 接口的使用文档。 + diff --git a/doc/fluid/api_cn/regularizer_cn/L1Decay_cn.rst b/doc/fluid/api_cn/regularizer_cn/L1Decay_cn.rst index 9cf0766e5447f6de12201f9b9a206dc6ee3d2fa5..d5b0dc6f548ce0708004df7adcf06522886fdc17 100644 --- a/doc/fluid/api_cn/regularizer_cn/L1Decay_cn.rst +++ b/doc/fluid/api_cn/regularizer_cn/L1Decay_cn.rst @@ -1,41 +1,74 @@ - -.. _cn_api_fluid_regularizer_L1Decay: - -L1Decay -------------------------------- - -.. py:attribute:: paddle.fluid.regularizer.L1Decay(regularization_coeff=0.0) - -L1Decay实现L1权重衰减正则化,用于模型训练,使得权重矩阵稀疏。 - -具体实现中,L1权重衰减正则化的计算公式如下: - -.. math:: - \\L1WeightDecay=reg\_coeff∗sign(parameter)\\ - -参数: - - **regularization_coeff** (float) – L1正则化系数,默认值为0.0。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - - main_prog = fluid.Program() - startup_prog = fluid.Program() - with fluid.program_guard(main_prog, startup_prog): - data = fluid.layers.data(name='image', shape=[3, 28, 28], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - hidden = fluid.layers.fc(input=data, size=128, act='relu') - prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') - loss = fluid.layers.cross_entropy(input=prediction, label=label) - avg_loss = fluid.layers.mean(loss) - optimizer = fluid.optimizer.Adagrad( - learning_rate=1e-4, - regularization=fluid.regularizer.L1Decay( - regularization_coeff=0.1)) - optimizer.minimize(avg_loss) - - - + +.. _cn_api_fluid_regularizer_L1Decay: + +L1Decay +------------------------------- + +.. py:attribute:: paddle.fluid.regularizer.L1Decay(regularization_coeff=0.0) + + + + +L1Decay实现L1权重衰减正则化,用于模型训练,使得权重矩阵稀疏。 + +该类生成的实例对象,需要设置在 :ref:`cn_api_fluid_ParamAttr` 或者 ``optimizer`` +(例如 :ref:`cn_api_fluid_optimizer_SGDOptimizer` )中,在 ``ParamAttr`` 中设置时, +只对该网络层中的参数生效;在 ``optimizer`` 中设置时,会对所有的参数生效;如果同时设置, +在 ``ParamAttr`` 中设置的优先级会高于在 ``optimizer`` 中设置。 + +具体实现中,L1权重衰减正则化的计算公式如下: + +.. math:: + \\L1WeightDecay=reg\_coeff∗sign(parameter)\\ + +参数: + - **regularization_coeff** (float) – L1正则化系数,默认值为0.0。 + +**代码示例1** + +.. code-block:: python + + import paddle.fluid as fluid + + main_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard(main_prog, startup_prog): + data = fluid.layers.data(name='image', shape=[3, 28, 28], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + hidden = fluid.layers.fc(input=data, size=128, act='relu') + prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + avg_loss = fluid.layers.mean(loss) + optimizer = fluid.optimizer.Adagrad( + learning_rate=1e-4, + regularization=fluid.regularizer.L1Decay( + regularization_coeff=0.1)) + optimizer.minimize(avg_loss) + + +**代码示例2** + +.. code-block:: python + + # 在 ParamAttr 和 optimizer 中同时设置正则化 + import paddle.fluid as fluid + l1 = fluid.regularizer.L1Decay(regularization_coeff=0.1) + l2 = fluid.regularizer.L2Decay(regularization_coeff=0.1) + x = fluid.layers.uniform_random([3,4]) + + # 在ParamAttr中设置L1正则化 + w_param = fluid.ParamAttr(regularizer=l1) + hidden1 = fluid.layers.fc(x, 8, param_attr=w_param) # fc_0.w_0(L1), fc_0.b_0 + hidden2 = fluid.layers.fc(hidden1, 16, param_attr=w_param) # fc_1.w_0(L1), fc_1.b_0 + predict = fluid.layers.fc(hidden2, 32) # fc_3.w_0, fc_3.b_0 + avg_loss = fluid.layers.mean(predict) + + # 在optimizer中设置L2正则化 + optimizer = fluid.optimizer.SGD(learning_rate=1e-4, regularization=l2) + optimizer.minimize(avg_loss) + + # 将会打印出提示信息: + # Regularization of [fc_0.w_0, fc_1.w_0] have been set by ParamAttr or WeightNormParamAttr already. + # So, the Regularization of Optimizer will not take effect for these parameters! + + diff --git a/doc/fluid/api_cn/regularizer_cn/L2DecayRegularizer_cn.rst b/doc/fluid/api_cn/regularizer_cn/L2DecayRegularizer_cn.rst index 4c6d2a4d22512aee3a686464534e0969e5cd68cf..6e72e53138d365fee1af9b07cb8bce36147999fe 100644 --- a/doc/fluid/api_cn/regularizer_cn/L2DecayRegularizer_cn.rst +++ b/doc/fluid/api_cn/regularizer_cn/L2DecayRegularizer_cn.rst @@ -1,12 +1,15 @@ -.. _cn_api_fluid_regularizer_L2DecayRegularizer: - -L2DecayRegularizer -------------------------------- - -.. py:class:: paddle.fluid.regularizer.L2DecayRegularizer(regularization_coeff=0.0) - -**注意:paddle.fluid.regularizer.L2DecayRegularizer是paddle.fluid.regularizer.L2Decay的别名,推荐使用paddle.fluid.regularizer.L2Decay。** - -详见 :ref:`cn_api_fluid_regularizer_L2Decay` 接口的使用文档。 - - +.. _cn_api_fluid_regularizer_L2DecayRegularizer: + +L2DecayRegularizer +------------------------------- + +.. py:class:: paddle.fluid.regularizer.L2DecayRegularizer(regularization_coeff=0.0) + + + + +**注意:paddle.fluid.regularizer.L2DecayRegularizer是paddle.fluid.regularizer.L2Decay的别名,推荐使用paddle.fluid.regularizer.L2Decay。** + +详见 :ref:`cn_api_fluid_regularizer_L2Decay` 接口的使用文档。 + + diff --git a/doc/fluid/api_cn/regularizer_cn/L2Decay_cn.rst b/doc/fluid/api_cn/regularizer_cn/L2Decay_cn.rst index 1de52411dc4404de435973ab142455f1755c44d9..d9c55c53f8c838965b7e5e588d9de4a2a6cce5d4 100644 --- a/doc/fluid/api_cn/regularizer_cn/L2Decay_cn.rst +++ b/doc/fluid/api_cn/regularizer_cn/L2Decay_cn.rst @@ -1,40 +1,72 @@ -.. _cn_api_fluid_regularizer_L2Decay: - -L2Decay -------------------------------- - -.. py:attribute:: paddle.fluid.regularizer.L2Decay - -L2Decay实现L2权重衰减正则化,用于模型训练,有助于防止模型对训练数据过拟合。 - -具体实现中,L2权重衰减正则化的计算公式如下: - -.. math:: - \\L2WeightDecay=reg\_coeff*parameter\\ - -参数: - - **regularization_coeff** (float) – 正则化系数,默认值为0.0。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - - main_prog = fluid.Program() - startup_prog = fluid.Program() - with fluid.program_guard(main_prog, startup_prog): - data = fluid.layers.data(name='image', shape=[3, 28, 28], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - hidden = fluid.layers.fc(input=data, size=128, act='relu') - prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') - loss = fluid.layers.cross_entropy(input=prediction, label=label) - avg_loss = fluid.layers.mean(loss) - optimizer = fluid.optimizer.Adagrad( - learning_rate=1e-4, - regularization=fluid.regularizer.L2Decay( - regularization_coeff=0.1)) - optimizer.minimize(avg_loss) - - - +.. _cn_api_fluid_regularizer_L2Decay: + +L2Decay +------------------------------- + +.. py:attribute:: paddle.fluid.regularizer.L2Decay + + + + +L2Decay实现L2权重衰减正则化,用于模型训练,有助于防止模型对训练数据过拟合。 + +该类生成的实例对象,需要设置在 :ref:`cn_api_fluid_ParamAttr` 或者 ``optimizer`` +(例如 :ref:`cn_api_fluid_optimizer_SGDOptimizer` )中,在 ``ParamAttr`` 中设置时, +只对该网络层中的参数生效;在 ``optimizer`` 中设置时,会对所有的参数生效;如果同时设置, +在 ``ParamAttr`` 中设置的优先级会高于在 ``optimizer`` 中设置。 + +具体实现中,L2权重衰减正则化的计算公式如下: + +.. math:: + \\L2WeightDecay=reg\_coeff*parameter\\ + +参数: + - **regularization_coeff** (float) – 正则化系数,默认值为0.0。 + +**代码示例1** + +.. code-block:: python + + import paddle.fluid as fluid + + main_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard(main_prog, startup_prog): + data = fluid.layers.data(name='image', shape=[3, 28, 28], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + hidden = fluid.layers.fc(input=data, size=128, act='relu') + prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + avg_loss = fluid.layers.mean(loss) + optimizer = fluid.optimizer.Adagrad( + learning_rate=1e-4, + regularization=fluid.regularizer.L2Decay( + regularization_coeff=0.1)) + optimizer.minimize(avg_loss) + + +**代码示例2** + +.. code-block:: python + + # 在 ParamAttr 和 optimizer 中同时设置正则化 + import paddle.fluid as fluid + l1 = fluid.regularizer.L1Decay(regularization_coeff=0.1) + l2 = fluid.regularizer.L2Decay(regularization_coeff=0.1) + x = fluid.layers.uniform_random([3,4]) + + # 在ParamAttr中设置L1正则化 + w_param = fluid.ParamAttr(regularizer=l1) + hidden1 = fluid.layers.fc(x, 8, param_attr=w_param) # fc_0.w_0(L1), fc_0.b_0 + hidden2 = fluid.layers.fc(hidden1, 16, param_attr=w_param) # fc_1.w_0(L1), fc_1.b_0 + predict = fluid.layers.fc(hidden2, 32) # fc_3.w_0, fc_3.b_0 + avg_loss = fluid.layers.mean(predict) + + # 在optimizer中设置L2正则化 + optimizer = fluid.optimizer.SGD(learning_rate=1e-4, regularization=l2) + optimizer.minimize(avg_loss) + + # 将会打印出提示信息: + # Regularization of [fc_0.w_0, fc_1.w_0] have been set by ParamAttr or WeightNormParamAttr already. + # So, the Regularization of Optimizer will not take effect for these parameters! + diff --git a/doc/fluid/api_cn/static_cn.rst b/doc/fluid/api_cn/static_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8df002eb6b7724f4cabd9a80b11b1f3e6806242e --- /dev/null +++ b/doc/fluid/api_cn/static_cn.rst @@ -0,0 +1,13 @@ +======================= +paddle.static +======================= + + + + +.. toctree:: + :maxdepth: 1 + + static_cn/data_cn.rst + static_cn/InputSpec_cn.rst + diff --git a/doc/fluid/api_cn/static_cn/InputSpec_cn.rst b/doc/fluid/api_cn/static_cn/InputSpec_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e431a1a11943ef9b9d98d2f135824f23956a016a --- /dev/null +++ b/doc/fluid/api_cn/static_cn/InputSpec_cn.rst @@ -0,0 +1,126 @@ +.. _cn_api_static_cn_InputSpec: + +InputSpec +------------------------------- + + +.. py:class:: paddle.static.InputSpec(shape=None, dtype='float32', name=None) +用于描述模型输入的签名信息,包括shape、dtype和name。 + +此接口常用于指定高层API中模型的输入张量信息,或动态图转静态图时,指定被 ``paddle.jit.to_static`` 装饰的forward函数每个输入参数的张量信息。 + +参数: + - **shape** (list|tuple)- 声明维度信息的list或tuple,默认值为None。 + - **dtype** (np.dtype|VarType|str,可选)- 数据类型,支持bool,float16,float32,float64,int8,int16,int32,int64,uint8。默认值为float32。 + - **name** (str)- 被创建对象的名字,具体用法请参见 :ref:`api_guide_Name` 。 + +返回:初始化后的 ``InputSpec`` 对象 + +返回类型:InputSpec + +**代码示例** + +.. code-block:: python + + from paddle.static import InputSpec + + input = InputSpec([None, 784], 'float32', 'x') + label = InputSpec([None, 1], 'int64', 'label') + print(input) # InputSpec(shape=(-1, 784), dtype=VarType.FP32, name=x) + print(label) # InputSpec(shape=(-1, 1), dtype=VarType.INT64, name=label) + + +.. py:method:: from_tensor(tensor, name=None) +该接口将根据输入Tensor的shape、dtype等信息构建InputSpec对象。 + +参数: + - **tensor** (Tensor) - 用于构建InputSpec的源Tensor + - **name** (str): 被创建对象的名字,具体用法请参见 :ref:`api_guide_Name` 。 默认为:None。 + + +返回:根据Tensor信息构造的 ``InputSpec`` 对象 + +返回类型:InputSpec + + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + from paddle.static import InputSpec + + paddle.disable_static() + + x = paddle.to_tensor(np.ones([2, 2], np.float32)) + x_spec = InputSpec.from_tensor(x, name='x') + print(x_spec) # InputSpec(shape=(2, 2), dtype=VarType.FP32, name=x) + + +.. py:method:: from_numpy(ndarray, name=None) +该接口将根据输入numpy ndarray的shape、dtype等信息构建InputSpec对象。 + +参数: + - **ndarray** (Tensor) - 用于构建InputSpec的numpy ndarray + - **name** (str): 被创建对象的名字,具体用法请参见 :ref:`api_guide_Name` 。 默认为:None。 + + +返回:根据ndarray信息构造的 ``InputSpec`` 对象 + +返回类型:InputSpec + + +**代码示例** + +.. code-block:: python + + import numpy as np + from paddle.static import InputSpec + + x = np.ones([2, 2], np.float32) + x_spec = InputSpec.from_numpy(x, name='x') + print(x_spec) # InputSpec(shape=(2, 2), dtype=VarType.FP32, name=x) + + +.. py:method:: batch(batch_size) +该接口将batch_size插入到当前InputSpec对象的shape元组最前面。 + +参数: + - **batch_size** (int) - 被插入的batch size整型数值 + +返回: 更新shape信息后的 ``InputSpec`` 对象 + +返回类型:InputSpec + + +**代码示例** + +.. code-block:: python + + from paddle.static import InputSpec + + x_spec = InputSpec(shape=[64], dtype='float32', name='x') + x_spec.batch(4) + print(x_spec) # InputSpec(shape=(4, 64), dtype=VarType.FP32, name=x) + + +.. py:method:: unbatch() +该接口将当前InputSpec对象shape[0]值移除。 + + +返回: 更新shape信息后的 ``InputSpec`` 对象 + +返回类型:InputSpec + + +**代码示例** + +.. code-block:: python + + from paddle.static import InputSpec + + x_spec = InputSpec(shape=[4, 64], dtype='float32', name='x') + x_spec.unbatch() + print(x_spec) # InputSpec(shape=(64,), dtype=VarType.FP32, name=x) + diff --git a/doc/fluid/api_cn/static_cn/data_cn.rst b/doc/fluid/api_cn/static_cn/data_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..860068cbaf6259ccac54f619fdfbf3b48e3d3c87 --- /dev/null +++ b/doc/fluid/api_cn/static_cn/data_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_static_cn_data: + +data +------------------------------- + + +.. py:function:: paddle.static.data(name, shape, dtype=None, lod_level=0) + + + + +该OP会在全局block中创建变量(Variable),该全局变量可被计算图中的算子(operator)访问。该变量可作为占位符用于数据输入。例如用执行器(Executor)feed数据进该变量,当 ``dtype`` 为None时, ``dtype`` 将通过 ``padle.get_default_dtype()`` 获取全局类型。 + + +参数: + - **name** (str)- 被创建的变量的名字,具体用法请参见 :ref:`api_guide_Name` 。 + - **shape** (list|tuple)- 声明维度信息的list或tuple。可以在某个维度上设置None或-1,以指示该维度可以是任何大小。例如,将可变batchsize设置为None或-1。 + - **dtype** (np.dtype|str,可选)- 数据类型,支持bool,float16,float32,float64,int8,int16,int32,int64,uint8。默认值为None。当 ``dtype`` 为None时, ``dtype`` 将通过 ``padle.get_default_dtype()`` 获取全局类型。 + - **lod_level** (int,可选)- LoDTensor变量的LoD level数,LoD level是PaddlePaddle的高级特性,一般任务中不会需要更改此默认值,关于LoD level的详细适用场景和用法请见 :ref:`cn_user_guide_lod_tensor` 。默认值为0。 + +返回:全局变量,可进行数据访问 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + import paddle + # Creates a variable with fixed size [3, 2, 1] + # User can only feed data of the same shape to x + # the dtype is not set, so it will set "float32" by + # paddle.get_default_dtype(). You can use paddle.get_default_dtype() to + # change the global dtype + x = paddle.static.data(name='x', shape=[3, 2, 1]) + # Creates a variable with changeable batch size -1. + # Users can feed data of any batch size into y, + # but size of each data sample has to be [2, 1] + y = paddle.static.data(name='y', shape=[-1, 2, 1], dtype='float32') + z = x + y + # In this example, we will feed x and y with np-ndarray "1" + # and fetch z, like implementing "1 + 1 = 2" in PaddlePaddle + feed_data = np.ones(shape=[3, 2, 1], dtype=np.float32) + exe = fluid.Executor(fluid.CPUPlace()) + out = exe.run(fluid.default_main_program(), + feed={ + 'x': feed_data, + 'y': feed_data + }, + fetch_list=[z.name]) + # np-ndarray of shape=[3, 2, 1], dtype=float32, whose elements are 2 + print(out) diff --git a/doc/fluid/api_cn/tensor_cn.rst b/doc/fluid/api_cn/tensor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c2d1d9fcb985c1cd050abb076d8de9446dad18ab --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn.rst @@ -0,0 +1,178 @@ +======================= +paddle.tensor +======================= + + + + +.. toctree:: + :maxdepth: 1 + + tensor_cn/acos_cn.rst + tensor_cn/add_cn.rst + tensor_cn/addcmul_cn.rst + tensor_cn/addmm_cn.rst + tensor_cn/allclose_cn.rst + tensor_cn/arange_cn.rst + tensor_cn/argmax_cn.rst + tensor_cn/argmin_cn.rst + tensor_cn/argsort_cn.rst + tensor_cn/asin_cn.rst + tensor_cn/atan_cn.rst + tensor_cn/bernoulli_cn.rst + tensor_cn/bmm_cn.rst + tensor_cn/cast_cn.rst + tensor_cn/ceil_cn.rst + tensor_cn/cholesky_cn.rst + tensor_cn/chunk_cn.rst + tensor_cn/clamp_cn.rst + tensor_cn/clip_cn.rst + tensor_cn/concat_cn.rst + tensor_cn/cond_cn.rst + tensor_cn/cos_cn.rst + tensor_cn/create_tensor_cn.rst + tensor_cn/crop_tensor_cn.rst + tensor_cn/cross_cn.rst + tensor_cn/cumsum_cn.rst + tensor_cn/diag_cn.rst + tensor_cn/dist_cn.rst + tensor_cn/divide_cn.rst + tensor_cn/floor_divide_cn.rst + tensor_cn/remainder_cn.rst + tensor_cn/mod_cn.rst + tensor_cn/floor_mod_cn.rst + tensor_cn/dot_cn.rst + tensor_cn/einsum_cn.rst + tensor_cn/elementwise_add_cn.rst + tensor_cn/elementwise_div_cn.rst + tensor_cn/elementwise_floordiv_cn.rst + tensor_cn/elementwise_mod_cn.rst + tensor_cn/elementwise_mul_cn.rst + tensor_cn/elementwise_pow_cn.rst + tensor_cn/elementwise_sub_cn.rst + tensor_cn/elementwise_sum_cn.rst + tensor_cn/equal_cn.rst + tensor_cn/equal_all_cn.rst + tensor_cn/erf_cn.rst + tensor_cn/exp_cn.rst + tensor_cn/expand_as_cn.rst + tensor_cn/expand_cn.rst + tensor_cn/eye_cn.rst + tensor_cn/fill_constant_cn.rst + tensor_cn/flatten_cn.rst + tensor_cn/flip_cn.rst + tensor_cn/floor_cn.rst + tensor_cn/full_cn.rst + tensor_cn/full_like_cn.rst + tensor_cn/gather_cn.rst + tensor_cn/gather_nd_cn.rst + tensor_cn/greater_equal_cn.rst + tensor_cn/greater_than_cn.rst + tensor_cn/has_inf_cn.rst + tensor_cn/has_nan_cn.rst + tensor_cn/histogram_cn.rst + tensor_cn/increment_cn.rst + tensor_cn/index_sample_cn.rst + tensor_cn/index_select_cn.rst + tensor_cn/inverse_cn.rst + tensor_cn/is_empty_cn.rst + tensor_cn/isfinite_cn.rst + tensor_cn/isinf_cn.rst + tensor_cn/isnan_cn.rst + tensor_cn/kron_cn.rst + tensor_cn/l2_normalize_cn.rst + tensor_cn/less_equal_cn.rst + tensor_cn/less_than_cn.rst + tensor_cn/linspace_cn.rst + tensor_cn/load_cn.rst + tensor_cn/log_cn.rst + tensor_cn/log1p_cn.rst + tensor_cn/logical_and_cn.rst + tensor_cn/logical_not_cn.rst + tensor_cn/logical_or_cn.rst + tensor_cn/logical_xor_cn.rst + tensor_cn/logsumexp_cn.rst + tensor_cn/masked_select_cn.rst + tensor_cn/math_cn.rst + tensor_cn/matmul_cn.rst + tensor_cn/max_cn.rst + tensor_cn/maximum_cn.rst + tensor_cn/mean_cn.rst + tensor_cn/meshgrid_cn.rst + tensor_cn/min_cn.rst + tensor_cn/minimum_cn.rst + tensor_cn/mm_cn.rst + tensor_cn/mul_cn.rst + tensor_cn/multiplex_cn.rst + tensor_cn/multiply_cn.rst + tensor_cn/nonzero_cn.rst + tensor_cn/norm_cn.rst + tensor_cn/normal_cn.rst + tensor_cn/not_equal_cn.rst + tensor_cn/numel_cn.rst + tensor_cn/ones_cn.rst + tensor_cn/ones_like_cn.rst + tensor_cn/pow_cn.rst + tensor_cn/prod_cn.rst + tensor_cn/rand_cn.rst + tensor_cn/randint_cn.rst + tensor_cn/randn_cn.rst + tensor_cn/randperm_cn.rst + tensor_cn/range_cn.rst + tensor_cn/rank_cn.rst + tensor_cn/reciprocal_cn.rst + tensor_cn/reduce_all_cn.rst + tensor_cn/reduce_any_cn.rst + tensor_cn/reduce_max_cn.rst + tensor_cn/reduce_mean_cn.rst + tensor_cn/reduce_min_cn.rst + tensor_cn/reduce_prod_cn.rst + tensor_cn/reduce_sum_cn.rst + tensor_cn/reshape_cn.rst + tensor_cn/reverse_cn.rst + tensor_cn/roll_cn.rst + tensor_cn/round_cn.rst + tensor_cn/rsqrt_cn.rst + tensor_cn/save_cn.rst + tensor_cn/scale_cn.rst + tensor_cn/scatter_cn.rst + tensor_cn/scatter_nd_add_cn.rst + tensor_cn/scatter_nd_cn.rst + tensor_cn/shape_cn.rst + tensor_cn/shard_index_cn.rst + tensor_cn/shuffle_cn.rst + tensor_cn/sign_cn.rst + tensor_cn/sin_cn.rst + tensor_cn/slice_cn.rst + tensor_cn/sort_cn.rst + tensor_cn/split_cn.rst + tensor_cn/sqrt_cn.rst + tensor_cn/square_cn.rst + tensor_cn/squeeze_cn.rst + tensor_cn/stack_cn.rst + tensor_cn/standard_normal_cn.rst + tensor_cn/stanh_cn.rst + tensor_cn/std_cn.rst + tensor_cn/strided_slice_cn.rst + tensor_cn/sum_cn.rst + tensor_cn/sums_cn.rst + tensor_cn/t_cn.rst + tensor_cn/tanh_cn.rst + tensor_cn/tensordot_cn.rst + tensor_cn/tile_cn.rst + tensor_cn/topk_cn.rst + tensor_cn/trace_cn.rst + tensor_cn/transpose_cn.rst + tensor_cn/tril_cn.rst + tensor_cn/triu_cn.rst + tensor_cn/unbind_cn.rst + tensor_cn/uniform_cn.rst + tensor_cn/unique_cn.rst + tensor_cn/unique_with_counts_cn.rst + tensor_cn/unsqueeze_cn.rst + tensor_cn/unstack_cn.rst + tensor_cn/var_cn.rst + tensor_cn/where_cn.rst + tensor_cn/zeros_cn.rst + tensor_cn/zeros_like_cn.rst + diff --git a/doc/fluid/api_cn/tensor_cn/abs_cn.rst b/doc/fluid/api_cn/tensor_cn/abs_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d895035ad2302814fe46ddd297c040eca84ad327 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/abs_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_abs: + +abs +------------------------------- +:doc_source: paddle.fluid.layers.abs + + diff --git a/doc/fluid/api_cn/tensor_cn/acos_cn.rst b/doc/fluid/api_cn/tensor_cn/acos_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1d7dd21942f22e227d821997f2a95d151c4ebbac --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/acos_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_acos: + +acos +------------------------------- +:doc_source: paddle.fluid.layers.acos + + diff --git a/doc/fluid/api_cn/tensor_cn/add_cn.rst b/doc/fluid/api_cn/tensor_cn/add_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..94162e5c8419121731a5dc89905c2e5bd9b1d898 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/add_cn.rst @@ -0,0 +1,49 @@ +.. _cn_api_tensor_add: + +add +------------------------------- + +.. py:function:: paddle.add(x, y, name=None) + +:alias_main: paddle.add +:alias: paddle.add,paddle.tensor.add,paddle.tensor.math.add +:update_api: paddle.fluid.layers.elementwise_add + + +该OP是逐元素相加算子,输入 ``x`` 与输入 ``y`` 逐元素相加,并将各个位置的输出元素保存到返回结果中。 + +输入 ``x`` 与输入 ``y`` 必须可以广播为相同形状, 关于广播规则,请参考 :ref:`use_guide_broadcasting` + +等式为: + +.. math:: + Out = X + Y + +- :math:`X` :多维Tensor。 +- :math:`Y` :多维Tensor。 + +参数: + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64、int32、int64。 + - y (Tensor) - 输入的Tensor,数据类型为:float32、float64、int32、int64。 + - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回: 多维Tensor, 数据类型与 ``x`` 相同, 维度为广播后的形状。 + +返回类型: Tensor + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + np_x = np.array([2, 3, 4]).astype('float64') + np_y = np.array([1, 5, 2]).astype('float64') + x = paddle.imperative.to_variable(np_x) + y = paddle.imperative.to_variable(np_y) + z = paddle.add(x, y) + np_z = z.numpy() + print(np_z) # [3., 8., 6. ] diff --git a/doc/fluid/api_cn/tensor_cn/addcmul_cn.rst b/doc/fluid/api_cn/tensor_cn/addcmul_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..53ac51e9c7e9b13e2d1731c468681dbab981fb56 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/addcmul_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_tensor_addcmul: + +addcmul +------------------------------- + +.. py:function:: paddle.addcmul(input, tensor1, tensor2, value=1.0, out=None, name=None) + +:alias_main: paddle.addcmul +:alias: paddle.addcmul,paddle.tensor.addcmul,paddle.tensor.math.addcmul + + + +计算tensor1和tensor2的逐元素乘积,然后将结果乘以标量value,再加到input上输出。其中input, tensor1, tensor2的维度必须是可广播的。 + +计算过程的公式为: +.. math:: + out = input + value * tensor1 * tensor2 + +参数: + - **input** (Variable) : 输入Tensor input,数据类型支持float32, float64, int32, int64。 + - **itensor1** (Variable) : 输入Tensor tensor1,数据类型支持float32, float64, int32, int64。 + - **itensor2** (Variable) : 输入Tensor tensor2,数据类型支持float32, float64, int32, int64。 + - **value** (int|float) : 乘以tensor1*tensor2的标量。如果输入input类型为float32或float64,value类型必须为float,如果输入input类型为int32或int64,value类型必须为int。 + - **out** (Variable, 可选) – 指定存储运算结果的Tensor。如果设置为None或者不设置,将创建新的Tensor存储运算结果,默认值为None。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:计算得到的Tensor。Tensor数据类型与输入input数据类型一致。 + +返回类型:变量(Variable) + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + + input = fluid.data(name='input', dtype='float32', shape=[3, 4]) + tensor1 = fluid.data(name='tenosr1', dtype='float32', shape=[1, 4]) + tensor2 = fluid.data(name='tensor2', dtype='float32', shape=[3, 4]) + data = paddle.addcmul(input, tensor1, tensor2, value=1.0) + diff --git a/doc/fluid/api_cn/tensor_cn/addmm_cn.rst b/doc/fluid/api_cn/tensor_cn/addmm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0b2fc8802e137c328f4b37643b900d5417bdd733 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/addmm_cn.rst @@ -0,0 +1,57 @@ +.. _cn_api_tensor_addmm: + + +addmm +------------------------------- + +.. py:function:: paddle.addmm(input, x, y, alpha=1.0, beta=1.0, name=None) + +:alias_main: paddle.addmm +:alias: paddle.addmm,paddle.tensor.addmm,paddle.tensor.math.addmm + + + +计算x和y的乘积,将结果乘以标量alpha,再加上input与beta的乘积,得到输出。其中input与x、y乘积的维度必须是可广播的。 + +计算过程的公式为: + +.. math:: + out = alpha * x * y + beta * input + +参数: + - **input** (Variable):输入Tensor input,数据类型支持float32, float64。 + - **x** (Variable):输入Tensor x,数据类型支持float32, float64。 + - **y** (Variable):输入Tensor y,数据类型支持float32, float64。 + - **alpha** (float,可选):乘以x*y的标量,数据类型支持float32, float64,默认值为1.0。 + - **beta** (float,可选):乘以input的标量,数据类型支持float32, float64,默认值为1.0。 + - **name** (str,可选):具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:计算得到的Tensor。Tensor数据类型与输入input数据类型一致。 + +返回类型:变量(Variable) + + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.fluid as fluid + + input = fluid.data(name='input', shape=[2, 2], dtype='float32') + x = fluid.data(name='x', shape=[2, 2], dtype='float32') + y = fluid.data(name='y', shape=[2, 2], dtype='float32') + out = paddle.addmm( input=input, x=x, y=y, alpha=5.0, beta=0.5 ) + + data_x = np.ones((2, 2)).astype(np.float32) + data_y = np.ones((2, 2)).astype(np.float32) + data_input = np.ones((2, 2)).astype(np.float32) + + place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + exe = fluid.Executor(place) + results = exe.run(fluid.default_main_program(), + fetch_list=[out], feed={"input": data_input, 'x': data_x, "y": data_y}) + print(np.array(results[0])) + # [[10.5 10.5] + # [10.5 10.5]] diff --git a/doc/fluid/api_cn/tensor_cn/allclose_cn.rst b/doc/fluid/api_cn/tensor_cn/allclose_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c483e3a112f2513f8db0bb7095dc1f99e7a4abd3 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/allclose_cn.rst @@ -0,0 +1,58 @@ +.. _cn_api_tensor_allclose: + +allclose +------------------------------- + +.. py:function:: paddle.allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None) + +逐个检查x和y的所有元素是否均满足如下条件: + +.. math:: + \left| x - y \right| \leq atol + rtol \times \left| y \right| + +该API的行为类似于 :math:`numpy.allclose` ,即当两个待比较Tensor的所有元素均在一定容忍误差范围内视为相等则该API返回True值。 + +参数: + - **x** (Tensor) - 输入的 `Tensor` ,数据类型为:float32、float64。 + - **y** (Tensor) - 输入的 `Tensor` ,数据类型为:float32、float64。 + - **rtol** (float,可选) - 相对容忍误差,默认值为1e-5。 + - **atol** (float,可选) - 绝对容忍误差,默认值为1e-8。 + - **equal_nan** (bool,可选) - 如果设置为True,则两个NaN数值将被视为相等,默认值为False。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:计算得到的布尔类型单值Tensor。 + +**代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + np_x = np.array([10000., 1e-07]).astype("float32") + np_y = np.array([10000.1, 1e-08]).astype("float32") + x = paddle.to_tensor (np_x) + y = paddle.to_tensor (np_y) + result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, + equal_nan=False, name="ignore_nan") + np_result1 = result1.numpy() + # [False] + result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, + equal_nan=True, name="equal_nan") + np_result2 = result2.numpy() + # [False] + + np_x = np.array([1.0, float('nan')]).astype("float32") + np_y = np.array([1.0, float('nan')]).astype("float32") + x = paddle.to_tensor (np_x) + y = paddle.to_tensor (np_y) + result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, + equal_nan=False, name="ignore_nan") + np_result1 = result1.numpy() + # [False] + result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, + equal_nan=True, name="equal_nan") + np_result2 = result2.numpy() + # [True] diff --git a/doc/fluid/api_cn/tensor_cn/arange_cn.rst b/doc/fluid/api_cn/tensor_cn/arange_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..10f343f3399364e490f0ec6c2ebd87a2dca62c59 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/arange_cn.rst @@ -0,0 +1,55 @@ +.. _cn_api_paddle_tensor_arange + +arange +------------------------------- + +.. py:function:: paddle.arange(start=0, end=None, step=1, dtype=None, name=None) + +:alias_main: paddle.arange +:alias: paddle.tensor.arange, paddle.tensor.creation.arange + + + +该OP返回以步长 ``step`` 均匀分隔给定数值区间[``start``, ``end``)的1-D Tensor,数据类型为 ``dtype``。 + +当 ``dtype`` 表示浮点类型时,为了避免浮点计算误差,建议给 ``end`` 加上一个极小值epsilon,使边界可以更加明确。 + +参数 +:::::::::: + - **start** (float|int|Tensor) - 区间起点(且区间包括此值)。当 ``start`` 类型是Tensor时,是形状为[1]且数据类型为int32、int64、float32、float64的Tensor。如果仅指定 ``start`` ,而 ``end`` 为None,则区间为[0, ``start``)。默认值为0。 + - **end** (float|int|Tensor, 可选) - 区间终点(且通常区间不包括此值)。当 ``end`` 类型是Tensor时,是形状为[1]且数据类型为int32、int64、float32、float64的Tensor。默认值为None。 + - **step** (float|int|Tensor, 可选) - 均匀分割的步长。当 ``step`` 类型是Tensor时,是形状为[1]且数据类型为int32、int64、float32、float64的Tensor。默认值为1。 + - **dtype** (str|np.dtype|core.VarDesc.VarType, 可选) - 输出Tensor的数据类型,支持int32、int64、float32、float64。当该参数值为None时, 输出Tensor的数据类型为int64。默认值为None. + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回 +:::::::::: + Tensor: 以步长 ``step`` 均匀分割给定数值区间[``start``, ``end``)后得到的1-D Tensor, 数据类型为 ``dtype`` 。 + +抛出异常 +:::::::::: + - ``TypeError`` - 如果 ``dtype`` 不是int32、int64、float32、float64。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + + out1 = paddle.arange(5) + # [0, 1, 2, 3, 4] + + out2 = paddle.arange(3, 9, 2.0) + # [3, 5, 7] + + # use 4.999 instead of 5.0 to avoid floating point rounding errors + out3 = paddle.arange(4.999, dtype='float32') + # [0., 1., 2., 3., 4.] + + start_var = paddle.imperative.to_variable(np.array([3])) + out4 = paddle.arange(start_var, 7) + # [3, 4, 5, 6] diff --git a/doc/fluid/api_cn/tensor_cn/argmax_cn.rst b/doc/fluid/api_cn/tensor_cn/argmax_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d2fb738b4cd99c1cf6d458ed994aae2b7186f09d --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/argmax_cn.rst @@ -0,0 +1,44 @@ +.. _cn_api_tensor_argmax: + +argmax +------------------------------- + +.. py:function:: paddle.argmax(x, axis=None, keepdim=False, dtype='int64', name=None) + + +该OP沿 ``axis`` 计算输入 ``x`` 的最大元素的索引。 + +参数 +:::::::: + - **x** (Tensor) - 输入的多维 ``Tensor`` ,支持的数据类型:float32、float64、int16、int32、int64、uint8。 + - **axis** (int,可选) - 指定对输入Tensor进行运算的轴, ``axis`` 的有效范围是[-R, R),R是输入 ``x`` 的维度个数, ``axis`` 为负数时,进行计算的 ``axis`` 与 ``axis`` + R 一致。默认值为None, 将会对输入的 `x` 进行平铺展开,返回最大值的索引。 + - **keepdim** (bool,可选)- 是否保留进行最大值索引操作的轴,默认值为False。 + - **dtype** (np.dtype|str,可选)- 输出Tensor的数据类型,可选值为int32,int64,默认值为int64,将返回int64类型的结果。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回 +:::::::: +``Tensor``, 如果设置 :attr:`dtype` 为 `int32` 时,返回的tensor的数据类型为 `int32` ,其它情况将返回的tensor的数据类型为 `int64` 。 + + +示例代码 +:::::::: + +.. code-block:: python + + import numpy as np + import paddle + + paddle.disable_static() + data = [[5,8,9,5], + [0,0,1,7], + [6,9,2,4]] + x = paddle.to_tensor(data) + out1 = paddle.argmax(x) + print(out1.numpy()) # 2 + out2 = paddle.argmax(x, axis=1) + print(out2.numpy()) + # [2 3 1] + out3 = paddle.argmax(x, axis=-1) + print(out3.numpy()) + # [2 3 1] diff --git a/doc/fluid/api_cn/tensor_cn/argmin_cn.rst b/doc/fluid/api_cn/tensor_cn/argmin_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4fd851764cbffe8a5ba93e3605034a9c55854e47 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/argmin_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_tensor_argmin: + +argmin +------------------------------- + +.. py:function:: paddle.argmin(x, axis=None, keepdim=False, dtype='int64', name=None) + + +该OP沿 ``axis`` 计算输入 ``x`` 的最小元素的索引。 + +参数 +:::::::: + - **x** (Tensor) - 输入的多维 ``Tensor`` ,支持的数据类型:float32、float64、int16、int32、int64、uint8。 + - **axis** (int,可选) - 指定对输入Tensor进行运算的轴, ``axis`` 的有效范围是[-R, R),R是输入 ``x`` 的维度个数, ``axis`` 为负数时,进行计算的 ``axis`` 与 ``axis`` + R 一致。默认值为None, 将会对输入的 `x` 进行平铺展开,返回最小值的索引。 + - **keepdim** (bool,可选)- 是否保留进行最小值索引操作的轴,默认值为False。 + - **dtype** (np.dtype|str, 可选)- 输出Tensor的数据类型,可选值为int32,int64,默认值为'int64',将返回int64类型的结果。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回 +:::::::: +``Tensor``, 如果设置 :attr:`dtype` 为 `int32` 时,返回的tensor的数据类型为 `int32` ,其它情况将返回的tensor的数据类型为 `int64` 。 + +示例代码 +:::::::: + +.. code-block:: python + + import numpy as np + import paddle + + paddle.disable_static() + data = [[5,8,9,5], + [0,0,1,7], + [6,9,2,4]] + x = paddle.to_tensor(data) + out1 = paddle.argmin(x) + print(out1.numpy()) # 4 + out2 = paddle.argmin(x, axis=1) + print(out2.numpy()) + # [0 0 2] + out3 = paddle.argmin(x, axis=-1) + print(out3.numpy()) + # [0 0 2] diff --git a/doc/fluid/api_cn/tensor_cn/argsort_cn.rst b/doc/fluid/api_cn/tensor_cn/argsort_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..43d2dd420eb8fa255e35ff229272f7b3aeb50df0 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/argsort_cn.rst @@ -0,0 +1,62 @@ +.. _cn_api_tensor_cn_argsort: + +argsort +------------------------------- + +.. py:function:: paddle.argsort(x, axis=-1, descending=False, name=None) + +:alias_main: paddle.argsort +:alias: paddle.argsort,paddle.tensor.argsort,paddle.tensor.search.argsort + +对输入变量沿给定轴进行排序,输出排序好的数据的相应索引,其维度和输入相同。默认升序排列,如果需要降序排列设置 ``descending=True`` 。 + + +参数: + - **x** (Tensor) - 输入的多维 ``Tensor`` ,支持的数据类型:float32、float64、int16、int32、int64、uint8。 + - **axis** (int,可选) - 指定对输入Tensor进行运算的轴, ``axis`` 的有效范围是[-R, R),R是输入 ``x`` 的Rank, ``axis`` 为负时与 ``axis`` +R 等价。默认值为0。 + - **descending** (bool,可选) - 指定算法排序的方向。如果设置为True,算法按照降序排序。如果设置为False或者不设置,按照升序排序。默认值为False。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:Tensor, 排序后索引信息(与 ``x`` 维度信息一致),数据类型为int64。 + + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.imperative as imperative + import numpy as np + + paddle.enable_imperative() + input_array = np.array([[[5,8,9,5], + [0,0,1,7], + [6,9,2,4]], + [[5,2,4,2], + [4,7,7,9], + [1,7,0,6]]]).astype(np.float32) + x = imperative.to_variable(input_array) + out1 = paddle.argsort(x=x, axis=-1) + out2 = paddle.argsort(x=x, axis=0) + out3 = paddle.argsort(x=x, axis=1) + print(out1.numpy()) + #[[[0 3 1 2] + # [0 1 2 3] + # [2 3 0 1]] + # [[1 3 2 0] + # [0 1 2 3] + # [2 0 3 1]]] + print(out2.numpy()) + #[[[0 1 1 1] + # [0 0 0 0] + # [1 1 1 0]] + # [[1 0 0 0] + # [1 1 1 1] + # [0 0 0 1]]] + print(out3.numpy()) + #[[[1 1 1 2] + # [0 0 2 0] + # [2 2 0 1]] + # [[2 0 2 0] + # [1 1 0 2] + # [0 2 1 1]]] diff --git a/doc/fluid/api_cn/tensor_cn/asin_cn.rst b/doc/fluid/api_cn/tensor_cn/asin_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f4a15d787c10ac39ea7ed58203f0e2c639726def --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/asin_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_asin: + +asin +------------------------------- +:doc_source: paddle.fluid.layers.asin + + diff --git a/doc/fluid/api_cn/tensor_cn/atan_cn.rst b/doc/fluid/api_cn/tensor_cn/atan_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..382f6b09b17eefdb766665dd5f21857c3fda9822 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/atan_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_tensor_atan: + +atan +------------------------------- + +.. py:function:: paddle.atan(x, name=None, out=None) + +arctanh 激活函数。 + +.. math:: + out = tanh^{-1}(x) + +参数: + - **x(Variable)** - atan的输入Tensor,数据类型为 float32 或 float64 + - **name** (str|None) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 + - **out** (Variable, 可选) – 指定存储运算结果的Tensor。如果设置为None或者不设置,将创建新的Tensor存储运算结果,默认值为None。 + +返回:返回类型为Variable(Tensor|LoDTensor), 数据类型同输入一致。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.fluid as fluid + + inputs = fluid.layers.data(name="x", shape = [3], dtype='float32') + output = paddle.atan(inputs) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + img = np.array([-0.8183, 0.4912, -0.6444, 0.0371]).astype(np.float32) + + res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) + print(res) + #[array([-0.6858003, 0.45658287, -0.5724284, 0.03708299], dtype=float32)] diff --git a/doc/fluid/api_cn/tensor_cn/bernoulli_cn.rst b/doc/fluid/api_cn/tensor_cn/bernoulli_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a18096b54472967d22d7707b2198581770569a0e --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/bernoulli_cn.rst @@ -0,0 +1,49 @@ +.. _cn_api_tensor_bernoulli: + +bernoulli +------------------------------- + +.. py:function:: paddle.bernoulli(x, name=None) + + + + +该OP以输入 ``x`` 为概率,生成一个伯努利分布(0-1分布)的Tensor,输出Tensor的形状和数据类型与输入 ``x`` 相同。 + +.. math:: + out_i \sim Bernoulli(p = x_i) + +参数: + - **x** (Tensor) - 输入的概率值。数据类型为 ``float32`` 、``float64`` . + - **name** (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回: + Tensor:伯努利分布的随机Tensor,形状和数据类型为与输入 ``x`` 相同。 + + +**代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.rand([2, 3]) + print(x.numpy()) + # [[0.11272584 0.3890902 0.7730957 ] + # [0.10351662 0.8510418 0.63806665]] + + out = paddle.bernoulli(x) + print(out.numpy()) + # [[0. 0. 1.] + # [0. 0. 1.]] + + + + + + + + diff --git a/doc/fluid/api_cn/tensor_cn/bmm_cn.rst b/doc/fluid/api_cn/tensor_cn/bmm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8e83f59cf3477c007cdfd8f5b8aef0f56c0f7be4 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/bmm_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_paddle_tensor_bmm: + +bmm +------------------------------- + +.. py:function:: paddle.tensor.bmm(x, y, name=None): + +:alias_main: paddle.bmm +:alias: paddle.bmm,paddle.tensor.bmm,paddle.tensor.linalg.bmm + + + +对输入x及输入y进行矩阵相乘。 + +两个输入的维度必须等于3,并且矩阵x和矩阵y的第一维必须相等 + +同时矩阵x的第二维必须等于矩阵y的第三维 + +例如:若x和y分别为(b, m, k)和 (b, k, n)的矩阵,则函数的输出为一个(b, m, n)的矩阵 + +**参数**: + + -**x** (Variable) : 输入变量,类型为 Tensor 或 LoDTensor。 + -**y** (Variable) : 输入变量,类型为 Tensor 或 LoDTensor。 + -**name** (str|None) : 该层名称(可选),如果设置为空,则自动为该层命名。 + +**返回**: + - Variable (Tensor / LoDTensor),矩阵相乘后的结果。 + +**返回类型**: + - Variable(变量)。 + + +**示例**: + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + # size input1: (2, 2, 3) and input2: (2, 3, 2) + input1 = np.array([[[1.0, 1.0, 1.0],[2.0, 2.0, 2.0]],[[3.0, 3.0, 3.0],[4.0, 4.0, 4.0]]]) + input2 = np.array([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]],[[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) + with fluid.dygraph.guard(): + x = fluid.dygraph.to_variable(input1) + y = fluid.dygraph.to_variable(input2) + out = paddle.bmm(x, y) + #output size: (2, 2, 2) + #output value: + #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] + out_np = out.numpy() + diff --git a/doc/fluid/api_cn/tensor_cn/cast_cn.rst b/doc/fluid/api_cn/tensor_cn/cast_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b831b5b66927fec1ca85da3491e3a64a0faad2b0 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/cast_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_cast: + +cast +------------------------------- +:doc_source: paddle.fluid.layers.cast + + diff --git a/doc/fluid/api_cn/tensor_cn/ceil_cn.rst b/doc/fluid/api_cn/tensor_cn/ceil_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2a8d815a223d1adbca8f07097b13d58d63d22b4c --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/ceil_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_ceil: + +ceil +------------------------------- +:doc_source: paddle.fluid.layers.ceil + + diff --git a/doc/fluid/api_cn/tensor_cn/cholesky_cn.rst b/doc/fluid/api_cn/tensor_cn/cholesky_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5c2b0b8e07a99d837333e389ef9d61eef338689a --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/cholesky_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_tensor_cholesky: + +cholesky +------------------------------- + +.. py:function:: paddle.cholesky(x, upper=False, name=None) + +:alias_main: paddle.cholesky +:alias: paddle.cholesky, paddle.tensor.cholesky, paddle.tensor.linalg.cholesky + + + +计算一个对称正定矩阵或一批对称正定矩阵的Cholesky分解。如果 `upper` 是 `True` , +则分解形式为 :math:`A = U ^ {T} U` , 返回的矩阵U是上三角矩阵。 +否则,分解形式为 :math:`A = LL ^ {T}` ,并返回矩阵 :math:`L` 是下三角矩阵。 + +参数: + - **x** (Variable)- 输入变量为多维Tensor,它的维度应该为 `[*, M, N]` ,其中*为零或更大的批次尺寸,并且最里面的两个维度上的矩阵都应为对称的正定矩阵,支持数据类型为float32,float64。 + - **upper** (bool)- 指示是否返回上三角矩阵或下三角矩阵。默认值:False。 + - **name** (str , 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: 与 `x` 具有相同形状和数据类型的Tensor。它代表了Cholesky分解生成的三角矩阵。 + +返回类型: 变量(Variable) + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + a = np.random.rand(3, 3) + a_t = np.transpose(a, [1, 0]) + x_data = np.matmul(a, a_t) + 1e-03 + x = paddle.imperative.to_variable(x_data) + out = paddle.cholesky(x, upper=False) + print(out.numpy()) + # [[1.190523 0. 0. ] + # [0.9906703 0.27676893 0. ] + # [1.25450498 0.05600871 0.06400121]] diff --git a/doc/fluid/api_cn/tensor_cn/chunk_cn.rst b/doc/fluid/api_cn/tensor_cn/chunk_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..da78f9e2951ff2e4fce59a3ab55b087ce9d87f94 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/chunk_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_tensor_cn_chunk: + +chunk +------------------------------- + +.. py:function:: paddle.chunk(x, chunks, axis=0, name=None) + +该OP将输入Tensor分割成多个子Tensor。 + +**参数**: + - **x** (Tensor) - 输入变量,数据类型为bool, float16, float32,float64,int32,int64的多维Tensor。 + - **chunks** (int) - ``chunks`` 是一个整数,表示将输入Tensor划分成多少个相同大小的子Tensor。 + - **axis** (int|Tensor,可选) - 整数或者形状为[1]的Tensor,数据类型为int32或int64。表示需要分割的维度。如果 ``axis < 0`` ,则划分的维度为 ``rank(x) + axis`` 。默认值为0。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:分割后的Tensor列表。 + + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + + paddle.disable_static() + # x is a Tensor which shape is [3, 9, 5] + x_np = np.random.random([3, 9, 5]).astype("int32") + x = paddle.to_tensor(x_np) + + out0, out1, out2 = paddle.chunk(x, chunks=3, axis=1) + # out0.shape [3, 3, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 3, 5] + + + # axis is negative, the real axis is (rank(x) + axis) which real + # value is 1. + out0, out1, out2 = paddle.chunk(x, chunks=3, axis=-2) + # out0.shape [3, 3, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 3, 5] diff --git a/doc/fluid/api_cn/tensor_cn/clip_cn.rst b/doc/fluid/api_cn/tensor_cn/clip_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a84d2a57ccd7362d6211198756b3b4dc89bfdf5d --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/clip_cn.rst @@ -0,0 +1,46 @@ +.. _cn_api_tensor_clip: + +clip +------------------------------- + +.. py:function:: paddle.clip(x, min=None, max=None, name=None) + +:alias_main: paddle.clip +:alias: paddle.clip,paddle.tensor.clip,paddle.tensor.math.clip + + + +该OP将输入的所有元素进行剪裁,使得输出元素限制在[min, max]内,具体公式如下: + +.. math:: + + Out = MIN(MAX(x, min), max) + +参数: + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。 + - min (float32|Tensor, 可选) - 裁剪的最小值,输入中小于该值的元素将由该元素代替,若参数为空,则不对输入的最小值做限制。数据类型可以是float32或形状为[1]的Tensor,类型可以为int32,float32,float64,默认值为None。 + - max (float32|Tensor, 可选) - 裁剪的最大值,输入中大于该值的元素将由该元素代替,若参数为空,则不对输入的最大值做限制。数据类型可以是float32或形状为[1]的Tensor,类型可以为int32,float32,float64,默认值为None。 + - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回:输出Tensor,与 ``x`` 维度相同、数据类型相同。 + +返回类型:Tensor + +**代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + x = np.array([[1.2,3.5], [4.5,6.4]]).astype('float32') + x1 = paddle.to_tensor(x) + out1 = paddle.clip(x1, min=3.5, max=5.0) + out2 = paddle.clip(x1, min=2.5) + print(out1.numpy()) + # [[3.5, 3.5] + # [4.5, 5.0]] + print(out2.numpy()) + # [[2.5, 3.5] + # [[4.5, 6.4] diff --git a/doc/fluid/api_cn/tensor_cn/concat_cn.rst b/doc/fluid/api_cn/tensor_cn/concat_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..548f46e7804dd538369b580f52e351c34e3df91c --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/concat_cn.rst @@ -0,0 +1,49 @@ +.. _cn_api_tensor_concat: + +concat +------------------------------- + +.. py:function:: paddle.tensor.concat(x, axis=0, name=None) + + +该OP对输入沿 ``axis`` 轴进行联结,返回一个新的Tensor。 + +参数: + - **x** (list|tuple) - 待联结的Tensor list或者Tensor tuple ,支持的数据类型为:bool, float16, float32、float64、int32、int64, ``x`` 中所有Tensor的数据类型应该一致。 + - **axis** (int|Tensor,可选) - 指定对输入 ``x`` 进行运算的轴,可以是整数或者形状为[1]的Tensor,数据类型为int32或者int64。 ``axis`` 的有效范围是[-R, R),R是输入 ``x`` 中Tensor的维度, ``axis`` 为负值时与 :math:`axis + R` 等价。默认值为0。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:联结后的Tensor ,数据类型和 ``x`` 中的Tensor相同。 + + +**代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() # Now we are in imperative mode + in1 = np.array([[1, 2, 3], + [4, 5, 6]]) + in2 = np.array([[11, 12, 13], + [14, 15, 16]]) + in3 = np.array([[21, 22], + [23, 24]]) + x1 = paddle.to_tensor(in1) + x2 = paddle.to_tensor(in2) + x3 = paddle.to_tensor(in3) + zero = paddle.full(shape=[1], dtype='int32', fill_value=0) + # When the axis is negative, the real axis is (axis + Rank(x)) + # As follow, axis is -1, Rank(x) is 2, the real axis is 1 + out1 = paddle.concat(x=[x1, x2, x3], axis=-1) + out2 = paddle.concat(x=[x1, x2], axis=0) + out3 = paddle.concat(x=[x1, x2], axis=zero) + # out1 + # [[ 1 2 3 11 12 13 21 22] + # [ 4 5 6 14 15 16 23 24]] + # out2 out3 + # [[ 1 2 3] + # [ 4 5 6] + # [11 12 13] + # [14 15 16]] diff --git a/doc/fluid/api_cn/tensor_cn/cos_cn.rst b/doc/fluid/api_cn/tensor_cn/cos_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..08bb5e8d0a08d232d869777a00b7c755e601c02a --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/cos_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_cos: + +cos +------------------------------- +:doc_source: paddle.fluid.layers.cos + + diff --git a/doc/fluid/api_cn/tensor_cn/create_tensor_cn.rst b/doc/fluid/api_cn/tensor_cn/create_tensor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cae378880062e99cb8cc258a879de31d1d2102da --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/create_tensor_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_create_tensor: + +create_tensor +------------------------------- +:doc_source: paddle.fluid.layers.create_tensor + + diff --git a/doc/fluid/api_cn/tensor_cn/crop_tensor_cn.rst b/doc/fluid/api_cn/tensor_cn/crop_tensor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fcf5a7d3f273a4ebd55f941796bef463d0d886b3 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/crop_tensor_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_crop_tensor: + +crop_tensor +------------------------------- +:doc_source: paddle.fluid.layers.crop_tensor + + diff --git a/doc/fluid/api_cn/tensor_cn/cross_cn.rst b/doc/fluid/api_cn/tensor_cn/cross_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4bfb4ed980490ece02796281afd9a7b527367ac7 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/cross_cn.rst @@ -0,0 +1,57 @@ +.. _cn_api_tensor_linalg_cross: + +cross +------------------------------- + +.. py:function:: paddle.cross(x, y, axis=None, name=None) + +:alias_main: paddle.cross +:alias: paddle.cross,paddle.tensor.cross,paddle.tensor.linalg.cross + + + +计算张量 ``x`` 和 ``y`` 在 ``axis`` 维度上的向量积(叉积)。 ``x`` 和 ``y`` 必须有相同的形状, +且指定的 ``axis`` 的长度必须为3. 如果未指定 ``axis`` ,默认选取第一个长度为3的 ``axis`` . + +**参数**: + - **x** (Variable)– 第一个输入张量。 + - **y** (Variable)– 第二个输入张量。 + - **axis** (int, optional) – 沿着此维进行向量积操作。默认选取第一个长度为3的 ``axis`` . + - **name** (str,可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +**返回**:向量积的结果。 + +**返回类型**:Variable + +**代码示例**: + +.. code-block:: python + + import paddle + from paddle.imperative import to_variable + import numpy as np + + paddle.enable_imperative() + + data_x = np.array([[1.0, 1.0, 1.0], + [2.0, 2.0, 2.0], + [3.0, 3.0, 3.0]]) + data_y = np.array([[1.0, 1.0, 1.0], + [1.0, 1.0, 1.0], + [1.0, 1.0, 1.0]]) + x = to_variable(data_x) + y = to_variable(data_y) + + z1 = paddle.cross(x, y) + print(z1.numpy()) + # [[-1. -1. -1.] + # [ 2. 2. 2.] + # [-1. -1. -1.]] + + z2 = paddle.cross(x, y, axis=1) + print(z2.numpy()) + # [[0. 0. 0.] + # [0. 0. 0.] + # [0. 0. 0.]] + + diff --git a/doc/fluid/api_cn/tensor_cn/cumsum_cn.rst b/doc/fluid/api_cn/tensor_cn/cumsum_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..71896b1bc17808d6b70873ef45a5587d1ad6cce1 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/cumsum_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_tensor_cn_cumsum: + +cumsum +------------------------------- + +.. py:function:: paddle.cumsum(x, axis=None, dtype=None, name=None) + + + +沿给定 ``axis`` 计算张量 ``x`` 的累加和。结果的第一个元素和输入的第一个元素相同。 + +参数: + - **x** (Tensor) - 累加的输入,需要进行累加操作的Tensor. + - **axis** (int,可选) - 指明需要累加的维度。-1代表最后一维。默认:None,将输入展开为一维变量再进行累加计算。 + - **dtype** (str,可选) - 输出Tensor的数据类型,支持int32、int64、float32、float64. 如果指定了,那么在执行操作之前,输入张量将被转换为dtype. 这对于防止数据类型溢出非常有用。默认为:None. + - **name** (str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回:累加的结果,即累加器的输出。 + +返回类型:Tensor + +**代码示例**: + +.. code-block:: python + + import paddle + from paddle.imperative import to_variable + import numpy as np + + paddle.enable_imperative() + data_np = np.arange(12).reshape(3, 4) + data = to_variable(data_np) + + y = paddle.cumsum(data) + print(y.numpy()) + # [ 0 1 3 6 10 15 21 28 36 45 55 66] + + y = paddle.cumsum(data, axis=0) + print(y.numpy()) + # [[ 0 1 2 3] + # [ 4 6 8 10] + # [12 15 18 21]] + + y = paddle.cumsum(data, axis=-1) + print(y.numpy()) + # [[ 0 1 3 6] + # [ 4 9 15 22] + # [ 8 17 27 38]] + + y = paddle.cumsum(data, dtype='float64') + print(y.dtype) + # VarType.FP64 + + diff --git a/doc/fluid/api_cn/tensor_cn/diag_cn.rst b/doc/fluid/api_cn/tensor_cn/diag_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c00f7afc075d78fa8a3cda554c5ebd052e1419f1 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/diag_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_diag: + +diag +------------------------------- +:doc_source: paddle.fluid.layers.diag + + diff --git a/doc/fluid/api_cn/tensor_cn/dist_cn.rst b/doc/fluid/api_cn/tensor_cn/dist_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..836567b87ca004af13417245badff3c8014dd0ac --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/dist_cn.rst @@ -0,0 +1,80 @@ +.. _cn_api_tensor_linalg_dist: + +dist +------------------------------- + +.. py:function:: paddle.tensor.linalg.dist(x, y, p=2) + +:alias_main: paddle.dist +:alias: paddle.dist,paddle.tensor.dist,paddle.tensor.linalg.dist + + + +该OP用于计算 `(x-y)` 的 p 范数(p-norm),需要注意这不是严格意义上的范数,仅作为距离的度量。输入 `x` 和 `y` 的形状(shape)必须是可广播的(broadcastable)。其含义如下,详情请参考 `numpy的广播概念 `_ : + +- 每个输入都至少有1维 +- 对两个输入的维度从后向前匹配,两个输入每一维的大小需要满足3个条件中的任意一个:相等、其中一个为1或者其中一个不存在。 + +定义 `z = x - y` ,`x` 和 `y` 的形状是可广播的,那么 `z` 的形状可按照下列步骤得到: + +(1) 如果 `x` 和 `y` 的维数不同,先对维数较少的这个输入的维度往前补1。 + +例如,`x` 的形状为[8, 1, 6, 1],`y` 的形状为[7, 1, 5],对 `y` 的维度补1, + +x (4-D Tensor): 8 x 1 x 6 x 1 + +y (4-D Tensor): 1 x 7 x 1 x 5 + +(2) 确定输出 `z` 每一维度的大小:从两个输入的维度中选取最大值。 + +z (4-D Tensor): 8 x 7 x 6 x 5 + +若两个输入的维数相同,则输出的大小可直接用步骤2确定。以下是 `p` 取不同值时,范数的计算公式: + +当 `p = 0` ,定义 $0^0 = 0$,则 z 的零范数是 `z` 中非零元素的个数。 + +.. math:: + ||z||_{0}=\lim_{p \rightarrow 0}\sum_{i=1}^{m}|z_i|^{p} + +当 `p = inf` ,`z` 的无穷范数是 `z` 所有元素中的最大值。 + +.. math:: + ||z||_\infty=\max_i |z_i| + +当 `p = -inf` ,`z` 的负无穷范数是 `z` 所有元素中的最小值。 + +.. math:: + ||z||_{-\infty}=\min_i |z_i| + +其他情况下,`z` 的 `p` 范数使用以下公式计算: + +.. math:: + ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\frac{1}{p}} + +参数: + - **x** (Variable): 1-D 到 6-D Tensor,数据类型为float32或float64。 + - **y** (Variable): 1-D 到 6-D Tensor,数据类型为float32或float64。 + - **p** (float, optional): 用于设置需要计算的范数,数据类型为float32或float64。默认值为2. + +返回: `(x-y)` 的 `p` 范数。 + +返回类型: Variable + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + with fluid.dygraph.guard(): + x = fluid.dygraph.to_variable(np.array([[3, 3],[3, 3]]).astype(np.float32)) + y = fluid.dygraph.to_variable(np.array([[3, 3],[3, 1]]).astype(np.float32)) + out = paddle.dist(x, y, 0) + print(out.numpy()) # out = [1.] + out = paddle.dist(x, y, 2) + print(out.numpy()) # out = [2.] + out = paddle.dist(x, y, float("inf")) + print(out.numpy()) # out = [2.] + out = paddle.dist(x, y, float("-inf")) + print(out.numpy()) # out = [0.] diff --git a/doc/fluid/api_cn/tensor_cn/divide_cn.rst b/doc/fluid/api_cn/tensor_cn/divide_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d0b0113c24644f5e5e55b4a3a73722da63610df4 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/divide_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_tensor_divide: + +divide +------------------------------- + +.. py:function:: paddle.divide(x, y, name=None) + +该OP是逐元素相除算子,输入 ``x`` 与输入 ``y`` 逐元素相除,并将各个位置的输出元素保存到返回结果中。 +输入 ``x`` 与输入 ``y`` 必须可以广播为相同形状, 关于广播规则,请参考 :ref:`use_guide_broadcasting` + +等式为: + +.. math:: + Out = X / Y + +- :math:`X` :多维Tensor。 +- :math:`Y` :多维Tensor。 + +参数: + - x(Tensor)- 多维Tensor。数据类型为float32 、float64、int32或int64。 + - y(Tensor)- 多维Tensor。数据类型为float32 、float64、int32或int64。 + - name(str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + + +返回: 多维 Tensor, 数据类型与 ``x`` 相同,维度为广播后的形状。 + +返回类型: Tensor + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np_x = np.array([2, 3, 4]).astype('float64') + np_y = np.array([1, 5, 2]).astype('float64') + x = paddle.to_tensor(np_x) + y = paddle.to_tensor(np_y) + z = paddle.divide(x, y) + print(z.numpy()) # [2., 0.6, 2.] diff --git a/doc/fluid/api_cn/tensor_cn/dot_cn.rst b/doc/fluid/api_cn/tensor_cn/dot_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5914eb30d807572d0cc439690ea834c89a11a355 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/dot_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_paddle_tensor_linalg_dot: + +dot +------------------------------- + +.. py:function:: paddle.tensor.linalg.dot(x, y, name=None) + + +该OP计算向量的内积 + +.. note:: + + 支持1维和2维Tensor. + +参数: + - **x** (Tensor)- 1维或2维 ``Tensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **y** (Tensor)- 1维或2维 ``Tensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **name** (str,可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + + +返回: ``Tensor`` ,数据类型与 ``x`` 相同。 + +返回类型: Tensor。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) + y_data = np.random.uniform(1, 3, [10]).astype(np.float32) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + z = paddle.dot(x, y) + print(z.numpy()) diff --git a/doc/fluid/api_cn/tensor_cn/einsum_cn.rst b/doc/fluid/api_cn/tensor_cn/einsum_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7ea2cdba41f1769bee383153f5969f527b0e9baa --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/einsum_cn.rst @@ -0,0 +1,3 @@ +einsum +------------------------------- +**版本升级,文档正在开发中** diff --git a/doc/fluid/api_cn/tensor_cn/elementwise_add_cn.rst b/doc/fluid/api_cn/tensor_cn/elementwise_add_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..57d82904493922c294f87139b5166299a550b8eb --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/elementwise_add_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_elementwise_add: + +elementwise_add +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_add + + diff --git a/doc/fluid/api_cn/tensor_cn/elementwise_div_cn.rst b/doc/fluid/api_cn/tensor_cn/elementwise_div_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9f4e53a0e1ecce56f9074403ef852df90b0eedfb --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/elementwise_div_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_elementwise_div: + +elementwise_div +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_div + + diff --git a/doc/fluid/api_cn/tensor_cn/elementwise_floordiv_cn.rst b/doc/fluid/api_cn/tensor_cn/elementwise_floordiv_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ebbd1e44fc4b2235d1e04bd52a8298b1bd96aad9 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/elementwise_floordiv_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_elementwise_floordiv: + +elementwise_floordiv +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_floordiv + + diff --git a/doc/fluid/api_cn/tensor_cn/elementwise_mod_cn.rst b/doc/fluid/api_cn/tensor_cn/elementwise_mod_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6e55a50eb7d0f3b29d133da9f6cde18883fac2ce --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/elementwise_mod_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_elementwise_mod: + +elementwise_mod +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_mod + + diff --git a/doc/fluid/api_cn/tensor_cn/elementwise_mul_cn.rst b/doc/fluid/api_cn/tensor_cn/elementwise_mul_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6846ff371e62edf690a2da2047ae563d742a2816 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/elementwise_mul_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_elementwise_mul: + +elementwise_mul +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_mul + + diff --git a/doc/fluid/api_cn/tensor_cn/elementwise_pow_cn.rst b/doc/fluid/api_cn/tensor_cn/elementwise_pow_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..40b64919adc1f8712c7069e3f2cd25969877e6e9 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/elementwise_pow_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_elementwise_pow: + +elementwise_pow +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_pow + + diff --git a/doc/fluid/api_cn/tensor_cn/elementwise_sub_cn.rst b/doc/fluid/api_cn/tensor_cn/elementwise_sub_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4f01a2f869c852db917160d0ecb45263f7fd079b --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/elementwise_sub_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_elementwise_sub: + +elementwise_sub +------------------------------- +:doc_source: paddle.fluid.layers.elementwise_sub + + diff --git a/doc/fluid/api_cn/tensor_cn/elementwise_sum_cn.rst b/doc/fluid/api_cn/tensor_cn/elementwise_sum_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0fa2c794769ce9cf1abc202ac13babaf7dbec557 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/elementwise_sum_cn.rst @@ -0,0 +1,80 @@ +.. _cn_api_tensor_elementwise_sum: + +elementwise_sum +------------------------------- + +.. py:function:: paddle.elementwise_sum(inputs, name=None) + +:alias_main: paddle.elementwise_sum +:alias: paddle.elementwise_sum,paddle.tensor.elementwise_sum,paddle.tensor.math.elementwise_sum + + + +该OP用于对输入的一至多个Tensor或LoDTensor求和。如果输入的是LoDTensor,输出仅与第一个输入共享LoD信息(序列信息)。 + +例1: +:: + 输入: + input.shape = [2, 3] + input = [[1, 2, 3], + [4, 5, 6]] + + 输出: + output.shape = [2, 3] + output = [[1, 2, 3], + [4, 5, 6]] + +例2: +:: + 输入: + 第一个输入: + input1.shape = [2, 3] + input1 = [[1, 2, 3], + [4, 5, 6]] + + 第二个输入: + input2.shape = [2, 3] + input2 = [[7, 8, 9], + [10, 11, 12]] + + 输出: + output.shape = [2, 3] + output = [[8, 10, 12], + [14, 16, 18]] + +参数: + - **inputs** (Variable|list(Variable)) - 输入的一至多个Variable。如果输入了多个Variable,则不同Variable间的shape和数据类型应保持一致。Variable为多维Tensor或LoDTensor,数据类型支持:float32,float64,int32,int64。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:对输入 ``inputs`` 中的Variable求和后的结果,shape和数据类型与 ``inputs`` 一致。 + +返回类型:Variable + + +**代码示例:** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + input0 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=5) + input1 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=3) + sum = paddle.elementwise_sum([input0, input1]) + + #用户可以通过executor打印出求和的结果 + out = fluid.layers.Print(sum, message="the sum of input0 and input1: ") + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_main_program()) + + #打印出的数据为: + 1570701754 the sum of input0 and input1: The place is:CPUPlace + Tensor[elementwise_sum_0.tmp_0] + shape: [2,3,] + dtype: l + data: 8,8,8,8,8,8, + + #输出了shape为[2,3]的Tensor,与输入的shape一致 + #dtype为对应C++数据类型,在不同环境下可能显示值不同,但本质相同 + #例如:如果Tensor中数据类型是int64,则对应的C++数据类型为int64_t,所以dtype值为typeid(int64_t).name(), + # 其在MacOS下为'x',linux下为'l',Windows下为'__int64',都表示64位整型变量 + diff --git a/doc/fluid/api_cn/tensor_cn/equal_all_cn.rst b/doc/fluid/api_cn/tensor_cn/equal_all_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b1269776011012b2c33df1101ad6c97948728a3d --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/equal_all_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_tensor_equal_all: + +equal_all +------------------------------- + +.. py:function:: paddle.equal_all(x, y, name=None) + +:alias_main: paddle.equal_all +:alias: paddle.equal_all,paddle.tensor.equal_all,paddle.tensor.logic.equal_all + +该OP返回:返回的结果只有一个元素值,如果所有相同位置的元素相同返回True,否则返回False。 + +**注:该OP输出的结果不返回梯度。** + + +参数: + - **x** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64,int32, int64。 + - **y** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64, int32, int64。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:输出结果为Tensor,Tensor数据类型为bool。 + +返回类型:变量(Tensor) + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.imperative as imperative + paddle.enable_imperative() + paddle.enable_imperative() + x = imperative.to_variable(np.array([1, 2, 3])) + y = imperative.to_variable(np.array([1, 2, 3])) + z = imperative.to_variable(np.array([1, 4, 3])) + result1 = paddle.equal_all(x, y) + print(result1.numpy()) # result1 = [True ] + result2 = paddle.equal_all(x, z) + print(result2.numpy()) # result2 = [False ] diff --git a/doc/fluid/api_cn/tensor_cn/equal_cn.rst b/doc/fluid/api_cn/tensor_cn/equal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e8b1dff6087609b56b406dc5680e176a48faca22 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/equal_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_tensor_equal: + +equal +------------------------------- +.. py:function:: paddle.equal(x, y, name=None) + +:alias_main: paddle.equal +:alias: paddle.equal,paddle.tensor.equal,paddle.tensor.logic.equal + +该OP返回 :math:`x==y` 逐元素比较x和y是否相等,相同位置的元素相同则返回True,否则返回False。使用重载算子 `==` 可以有相同的计算函数效果 + +**注:该OP输出的结果不返回梯度。** + +参数: + - **x** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64,int32, int64。 + - **y** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64, int32, int64。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + + +返回:输出结果的Tensor,输出Tensor的shape和输入一致,Tensor数据类型为bool。 + +返回类型:变量(Tensor) + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.imperative as imperative + paddle.enable_imperative() + x = imperative.to_variable(np.array([1, 2, 3])) + y = imperative.to_variable(np.array([1, 3, 2])) + result1 = paddle.equal(x, y) + print(result1.numpy()) # result1 = [True False False] + diff --git a/doc/fluid/api_cn/tensor_cn/erf_cn.rst b/doc/fluid/api_cn/tensor_cn/erf_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b56832508a4ff7985dd237d60d7bc34d69486724 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/erf_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_tensor_erf: + +erf +------------------------------- + +.. py:function:: paddle.erf(x, name=None) + + + +逐元素计算 Erf 激活函数。更多细节请参考 `Error function `_ 。 + + +.. math:: + out = \frac{2}{\sqrt{\pi}} \int_{0}^{x}e^{- \eta^{2}}d\eta + +参数: + - **x** (Tensor) - 输入的 `Tensor` ,数据类型为: float16, float32, float64。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回: + - Tensor,对输入x进行erf激活后的Tensor,形状、数据类型与输入 x 一致。 + + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + paddle.disable_static() + x_data = np.array([-0.4, -0.2, 0.1, 0.3]) + x = paddle.to_tensor(x_data) + out = paddle.erf(x) + print(out.numpy()) + # [-0.42839236 -0.22270259 0.11246292 0.32862676] diff --git a/doc/fluid/api_cn/tensor_cn/exp_cn.rst b/doc/fluid/api_cn/tensor_cn/exp_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c0046a97a25f300d5894b31130f2a5a0459c2901 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/exp_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_exp: + +exp +------------------------------- +:doc_source: paddle.fluid.layers.exp + + diff --git a/doc/fluid/api_cn/tensor_cn/expand_as_cn.rst b/doc/fluid/api_cn/tensor_cn/expand_as_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2f62f67233c8e52f87f77e703c3e4499cb8fc40c --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/expand_as_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_tensor_expand_as: + +expand_as +------------------------------- + +.. py:function:: paddle.expand_as(x, y, name=None) + +根据 ``y`` 的形状扩展 ``x`` ,扩展后, ``x`` 的形状和 ``y`` 的形状相同。 + +``x`` 的维数和 ``y`` 的维数应小于等于6,并且 ``y`` 的维数应该大于等于 ``x`` 的维数。扩展的维度的维度值应该为1。 + +参数 +::::::::: + - x (Tensor) - 输入的Tensor,数据类型为:bool、float32、float64、int32或int64。 + - y (Tensor) - 给定输入 ``x`` 扩展后的形状。 + - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: +``Tensor`` ,数据类型与 ``x`` 相同。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np_data_x = np.array([1, 2, 3]).astype('int32') + np_data_y = np.array([[1, 2, 3], [4, 5, 6]]).astype('int32') + data_x = paddle.to_tensor(np_data_x) + data_y = paddle.to_tensor(np_data_y) + out = paddle.expand_as(data_x, data_y) + np_out = out.numpy() + # [[1, 2, 3], [1, 2, 3]] + diff --git a/doc/fluid/api_cn/tensor_cn/expand_cn.rst b/doc/fluid/api_cn/tensor_cn/expand_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..94af4e93fe13c2d5ff220c50b6a18f37e0491851 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/expand_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_tensor_expand: + +expand +------------------------------- + +.. py:function:: paddle.expand(x, shape, name=None) + +根据 ``shape`` 指定的形状扩展 ``x`` ,扩展后, ``x`` 的形状和 ``shape`` 指定的形状一致。 + +``x`` 的维数和 ``shape`` 的元素数应小于等于6,并且 ``shape`` 中的元素数应该大于等于 ``x`` 的维数。扩展的维度的维度值应该为1。 + +参数 +::::::::: + - x (Tensor) - 输入的Tensor,数据类型为:bool、float32、float64、int32或int64。 + - shape (tuple|list|Tensor) - 给定输入 ``x`` 扩展后的形状,若 ``shape`` 为list或者tuple,则其中的元素值应该为整数或者1-D Tensor,若 ``shape`` 类型为Tensor,则其应该为1-D Tensor。 + - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: +``Tensor`` ,数据类型与 ``x`` 相同。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np_data = np.array([1, 2, 3]).astype('int32') + data = paddle.to_tensor(np_data) + out = paddle.expand(data, [2, 3]) + np_out = out.numpy() + # [[1, 2, 3], [1, 2, 3]] + diff --git a/doc/fluid/api_cn/tensor_cn/eye_cn.rst b/doc/fluid/api_cn/tensor_cn/eye_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c105c4b06b517488c1f0100ffb4cd4ee26ac8f89 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/eye_cn.rst @@ -0,0 +1,39 @@ +.. _cn_api_paddle_tensor_eye: + +eye +------------------------------- + +.. py:function:: paddle.tensor.eye(num_rows, num_columns=None, dtype=None, name=None) + +该OP用来构建二维Tensor(主对角线元素为1,其他元素为0)。 + +参数: + - **num_rows** (int) - 生成2-D Tensor的行数,数据类型为非负int32。 + - **num_columns** (int,可选) - 生成2-D Tensor的列数,数据类型为非负int32。若为None,则默认等于num_rows。 + - **dtype** (np.dtype|str, 可选) - 返回Tensor的数据类型,可为float16,float32,float64, int32, int64。若为None, 则默认等于float32。 + - **name** (str, 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: ``shape`` 为 [num_rows, num_columns]的Tensor。 + + +抛出异常: + - ``TypeError``: - 如果 ``dtype`` 的类型不是float16, float32, float64, int32, int64其中之一。 + - ``TypeError``: - 如果 ``num_columns`` 不是非负整数或者 ``num_rows`` 不是非负整数。 + +**代码示例**: + +.. code-block:: python + + import paddle + paddle.disable_static() # Now we are in imperative mode + data = paddle.eye(3, dtype='int32') + # [[1 0 0] + # [0 1 0] + # [0 0 1]] + data = paddle.eye(2, 3, dtype='int32') + # [[1 0 0] + # [0 1 0]] + + + + diff --git a/doc/fluid/api_cn/tensor_cn/fill_constant_cn.rst b/doc/fluid/api_cn/tensor_cn/fill_constant_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0c6579b68ce55e05eb34203fb055ff367d5e380f --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/fill_constant_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_fill_constant: + +fill_constant +------------------------------- +:doc_source: paddle.fluid.layers.fill_constant + + diff --git a/doc/fluid/api_cn/tensor_cn/flatten_cn.rst b/doc/fluid/api_cn/tensor_cn/flatten_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..359bedd2da518e1a286e96e387a640380f51c396 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/flatten_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_flatten: + +flatten +------------------------------- +:doc_source: paddle.fluid.layers.flatten + + diff --git a/doc/fluid/api_cn/tensor_cn/flip_cn.rst b/doc/fluid/api_cn/tensor_cn/flip_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d8f1f7efd1b9a2c74d9902ae4c0c58d6dafa6f22 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/flip_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_tensor_flip: + +flip +------------------------------- + +.. py:function:: paddle.flip(x, axis, name=None): + +:alias_main: paddle.flip +:alias: paddle.flip, paddle.tensor.flip, paddle.tensor.manipulation.flip + + + +该OP沿指定轴反转n维tensor. + +参数: + - **x** (Variable) - 输入张量。维度为多维,数据类型为bool, int32, int64, float32或float64。 + - **axis** (list) - 需要翻转的轴。当 ``axis[i] < 0`` 时,实际的计算维度为 ndim(x) + axis[i],其中i为axis的索引。 + - **name** (str|None) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。默认值为None。 + +返回:在指定axis上翻转后的张量,与输入x数据类型相同。 + +返回类型:Variable,与输入x数据类型相同。 + +抛出异常: + - ``TypeError`` - 当输出 ``out`` 和输入 ``x`` 数据类型不一致时候。 + - ``ValueError`` - 当参数 ``axis`` 不合法时。 + +**代码示例1**: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + + image_shape=(3, 2, 2) + x = np.arange(image_shape[0] * image_shape[1] * image_shape[2]).reshape(image_shape) + x = x.astype('float32') + img = paddle.imperative.to_variable(x) + out = paddle.flip(img, [0,1]) + print(out) # [[[10,11][8, 9]],[[6, 7],[4, 5]] [[2, 3],[0, 1]]] + diff --git a/doc/fluid/api_cn/tensor_cn/floor_cn.rst b/doc/fluid/api_cn/tensor_cn/floor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4737b238da51cec5c033c1ecf053ed49f0df985d --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/floor_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_floor: + +floor +------------------------------- +:doc_source: paddle.fluid.layers.floor + + diff --git a/doc/fluid/api_cn/tensor_cn/floor_divide_cn.rst b/doc/fluid/api_cn/tensor_cn/floor_divide_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c63ae4fd10451f280169d67c8156fa30f92649d7 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/floor_divide_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_tensor_floor_divide: + +floor_divide +------------------------------- + +.. py:function:: paddle.floor_divide(x, y, name=None) + +该OP是逐元素整除算子,输入 ``x`` 与输入 ``y`` 逐元素整除,并将各个位置的输出元素保存到返回结果中。 +输入 ``x`` 与输入 ``y`` 必须可以广播为相同形状, 关于广播规则,请参考 :ref:`use_guide_broadcasting` + +等式为: + +.. math:: + Out = X // Y + +- :math:`X` :多维Tensor。 +- :math:`Y` :多维Tensor。 + +参数: + - x(Tensor)- 多维Tensor。数据类型为int32或int64。 + - y(Tensor)- 多维Tensor。数据类型为int32或int64。 + - name(str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + + +返回: 多维 Tensor, 数据类型与 ``x`` 相同,维度为广播后的形状。 + +返回类型: Tensor + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np_x = np.array([2, 3, 8, 7]) + np_y = np.array([1, 5, 3, 3]) + x = paddle.to_tensor(np_x) + y = paddle.to_tensor(np_y) + z = paddle.floor_divide(x, y) + print(z.numpy()) # [2, 0, 2, 2] \ No newline at end of file diff --git a/doc/fluid/api_cn/tensor_cn/floor_mod_cn.rst b/doc/fluid/api_cn/tensor_cn/floor_mod_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e9b9773c09dfef8d15f85f62edd4ebcb09c86b4b --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/floor_mod_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_floor_mod: + +floor_mod +------------------------------- +:doc_source: paddle.tensor.remainder + + diff --git a/doc/fluid/api_cn/tensor_cn/full_cn.rst b/doc/fluid/api_cn/tensor_cn/full_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c1d47170014bed30360eae294d3a8ebc8767452c --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/full_cn.rst @@ -0,0 +1,47 @@ +.. _cn_api_tensor_full: + +full +------------------------------- + +.. py:function:: paddle.full(shape, fill_value, dtype=None, name=None) + + + +该OP创建形状大小为 ``shape`` 并且数据类型为 ``dtype`` 的Tensor,其中元素值均为 ``fill_value`` 。 + +参数: + - **shape** (list|tuple|Tensor) – 指定创建Tensor的形状(shape), 数据类型为int32 或者int64。 + - **fill_value** (bool|float|int|Tensor) - 用于初始化输出Tensor的常量数据的值。注意:该参数不可超过输出变量数据类型的表示范围。 + - **dtype** (np.dtype|str, 可选)- 输出变量的数据类型。若为None,则输出变量的数据类型和输入变量相同,默认值为None。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:返回一个存储结果的Tensor,数据类型和dtype相同。 + + +**代码示例**: + +.. code-block:: python + + import paddle + + paddle.disable_static() # Now we are in imperative mode + data1 = paddle.full(shape=[2,1], fill_value=0, dtype='int64') + #[[0] + # [0]] + + # attr shape is a list which contains Tensor. + positive_2 = paddle.fill_constant([1], "int32", 2) + data3 = paddle.full(shape=[1, positive_2], dtype='float32', fill_value=1.5) + # [[1.5 1.5]] + + # attr shape is a Tensor. + shape = paddle.fill_constant([2], "int32", 2) + data4 = paddle.full(shape=shape, dtype='bool', fill_value=True) + # [[True True] + # [True True]] + + # attr fill_value is a Tensor. + val = paddle.fill_constant([1], "float32", 2.0) + data5 = paddle.full(shape=[2,1], fill_value=val, dtype='float32') + # [[2.0] + # [2.0]] diff --git a/doc/fluid/api_cn/tensor_cn/full_like_cn.rst b/doc/fluid/api_cn/tensor_cn/full_like_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..48b2a38cd67faa56b87942ac7aeea70d360eb25a --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/full_like_cn.rst @@ -0,0 +1,32 @@ +.. _cn_api_tensor_full_like: + +full_like +------------------------------- + +.. py:function:: paddle.full_like(x, fill_value, dtype=None, name=None) + + +该OP创建一个和 ``x`` 具有相同的形状并且数据类型为 ``dtype`` 的Tensor,其中元素值均为 ``fill_value`` , 当 ``dtype`` 为None的时候,Tensor数据类型和输入 ``x`` 相同。 + +参数: + - **x** (Tensor) – 输入Tensor, 输出Tensor和x具有相同的形状,x的数据类型可以是bool,float16,float32,float64,int32,int64。 + - **fill_value** (bool|float|int) - 用于初始化输出张量的常量数据的值。注意:该参数不可超过输出变量数据类型的表示范围。 + - **dtype** (np.dtype|str, 可选)- 输出变量的数据类型。若参数为None,则输出变量的数据类型和输入变量相同,默认值为None。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:返回一个存储结果的Tensor,数据类型和dtype相同。 + + + **代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() # Now we are in imperative mode + input = paddle.full(shape=[2, 3], fill_value=0.0, dtype='float32', name='input') + output = paddle.full_like(input, 2.0) + # [[2. 2. 2.] + # [2. 2. 2.]] + diff --git a/doc/fluid/api_cn/tensor_cn/gather_cn.rst b/doc/fluid/api_cn/tensor_cn/gather_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ed1288b473a546be541f7ac8284d207f5324723a --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/gather_cn.rst @@ -0,0 +1,48 @@ +.. _cn_api_paddle_tensor_gather + +gather +------------------------------- + +.. py:function:: paddle.gather(x, index, axis=None, name=None) + +根据索引 index 获取输入 ``x`` 的指定 ``aixs`` 维度的条目,并将它们拼接在一起。 + +.. code-block:: text + + X = [[1, 2], + [3, 4], + [5, 6]] + + Index = [1, 2] + + axis = 0 + + Then: + + Out = [[3, 4], + [5, 6]] + +**参数**: + - **x** (Tensor) - 输入 Tensor, 秩 ``rank >= 1`` , 支持的数据类型包括 int32、int64、float32、float64 和 uint8 (CPU)、float16(GPU) 。 + - **index** (Tensor) - 索引 Tensor,秩 ``rank = 1``, 数据类型为 int32 或 int64。 + - **axis** (Tensor) - 指定index 获取输入的维度, ``axis`` 的类型可以是int或者Tensor,当 ``axis`` 为Tensor的时候其数据类型为int32 或者int64。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +**返回**:和输入的秩相同的输出Tensor。 + + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + + paddle.disable_static() + input_1 = np.array([[1,2],[3,4],[5,6]]) + index_1 = np.array([0,1]) + input = paddle.to_tensor(input_1) + index = paddle.to_tensor(index_1) + output = paddle.gather(input, index, axis=0) + # expected output: [[1,2],[3,4]] + diff --git a/doc/fluid/api_cn/tensor_cn/gather_nd_cn.rst b/doc/fluid/api_cn/tensor_cn/gather_nd_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8aa603b1e614d02f64900271827991a5a3edd6cd --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/gather_nd_cn.rst @@ -0,0 +1,76 @@ +.. _cn_api_tensor_cn_gather_nd: + +gather_nd +------------------------------- +.. py:function:: paddle.gather_nd(x, index, name=None) + + +该OP是 :code:`gather` 的高维推广,并且支持多轴同时索引。 :code:`index` 是一个K维度的张量,它可以认为是从 :code:`x` 中取K-1维张量,每一个元素是一个切片: + +.. math:: + output[(i_0, ..., i_{K-2})] = x[index[(i_0, ..., i_{K-2})]] + +显然, :code:`index.shape[-1] <= x.rank` 并且输出张量的维度是 :code:`index.shape[:-1] + x.shape[index.shape[-1]:]` 。 + +示例: + +:: + + 给定: + x = [[[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]], + [[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]]] + x.shape = (2, 3, 4) + + - 案例 1: + index = [[1]] + + gather_nd(x, index) + = [x[1, :, :]] + = [[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]] + + - 案例 2: + + index = [[0,2]] + gather_nd(x, index) + = [x[0, 2, :]] + = [8, 9, 10, 11] + + - 案例 3: + + index = [[1, 2, 3]] + gather_nd(x, index) + = [x[1, 2, 3]] + = [23] + + +参数: + - **x** (Tensor) - 输入Tensor,数据类型可以是int32,int64,float32,float64, bool。 + - **index** (Tensor) - 输入的索引Tensor,其数据类型int32或者int64。它的维度 :code:`index.rank` 必须大于1,并且 :code:`index.shape[-1] <= x.rank` 。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:shape为index.shape[:-1] + x.shape[index.shape[-1]:]的Tensor,数据类型与 :code:`x` 一致。 + + +**代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np_x = np.array([[[1, 2], [3, 4], [5, 6]], + [[7, 8], [9, 10], [11, 12]]]) + np_index = [[0, 1]] + x = paddle.to_tensor(np_x) + index = paddle.to_tensor(np_index) + + output = paddle.gather_nd(x, index) #[[3, 4]] + + diff --git a/doc/fluid/api_cn/tensor_cn/greater_equal_cn.rst b/doc/fluid/api_cn/tensor_cn/greater_equal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6eedd6c3bdcfd5fe971dad36fea0452eea45b10b --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/greater_equal_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_tensor_cn_greater_equal: + +greater_equal +------------------------------- +.. py:function:: paddle.greater_equal(x, y, name=None) + +:alias_main: paddle.greater_equal +:alias: paddle.greater_equal,paddle.tensor.greater_equal,paddle.tensor.logic.greater_equal + +该OP逐元素地返回 :math:`x >= y` 的逻辑值,相同位置前者输入大于等于后者输入则返回True,否则返回False。使用重载算子 `>=` 可以有相同的计算函数效果。 + +**注:该OP输出的结果不返回梯度。** + +参数: + - **x** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64,int32, int64。 + - **y** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64, int32, int64。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + + +返回:输出结果的Tensor,输出Tensor的shape和输入一致,Tensor数据类型为bool。 + +返回类型:变量(Tensor) + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.imperative as imperative + paddle.enable_imperative() + x = imperative.to_variable(np.array([1, 2, 3])) + y = imperative.to_variable(np.array([1, 3, 2])) + result1 = paddle.greater_equal(x, y) + print(result1.numpy()) # result1 = [True False True] + + diff --git a/doc/fluid/api_cn/tensor_cn/greater_than_cn.rst b/doc/fluid/api_cn/tensor_cn/greater_than_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..33df3e31c019f4b0ce943f40d5f838cd3d22b19a --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/greater_than_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_tensor_cn_greater_than: + +greater_than +------------------------------- +.. py:function:: paddle.greater_than(x, y, name=None) + +:alias_main: paddle.greater_than +:alias: paddle.greater_than,paddle.tensor.greater_than,paddle.tensor.logic.greater_than + +该OP返回 :math:`x>y` 逐元素比较x和y是否相等,相同位置前者输入大于等于后者输入则返回True,否则返回False。使用重载算子 `>` 可以有相同的计算函数效果 + +**注:该OP输出的结果不返回梯度。** + +参数: + - **x** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64,int32, int64。 + - **y** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64, int32, int64。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + + +返回:输出结果的Tensor,输出Tensor的shape和输入一致,Tensor数据类型为bool。 + +返回类型:变量(Tensor) + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.imperative as imperative + paddle.enable_imperative() + x = imperative.to_variable(np.array([1, 2, 3])) + y = imperative.to_variable(np.array([1, 3, 2])) + result1 = paddle.greater_than(x, y) + print(result1.numpy()) # result1 = [False False True] diff --git a/doc/fluid/api_cn/tensor_cn/has_inf_cn.rst b/doc/fluid/api_cn/tensor_cn/has_inf_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b575614f60b20b8dd35d8b8521cacb2a9d1fd95a --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/has_inf_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_has_inf: + +has_inf +------------------------------- +:doc_source: paddle.fluid.layers.has_inf + + diff --git a/doc/fluid/api_cn/tensor_cn/has_nan_cn.rst b/doc/fluid/api_cn/tensor_cn/has_nan_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..636a16c97b0ecc88575f53328ffc4c8aa97f5218 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/has_nan_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_has_nan: + +has_nan +------------------------------- +:doc_source: paddle.fluid.layers.has_nan + + diff --git a/doc/fluid/api_cn/tensor_cn/histogram_cn.rst b/doc/fluid/api_cn/tensor_cn/histogram_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d57084fa6f76f8640688f8d2fb0b2a84d81a8994 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/histogram_cn.rst @@ -0,0 +1,53 @@ +.. _cn_api_tensor_histogram: + +histogram +------------------------------- + +.. py:function:: paddle.histogram(input, bins=100, min=0, max=0): + +计算输入张量的直方图。以min和max为range边界,将其均分成bins个直条,然后将排序好的数据划分到各个直条(bins)中。如果min和max都为0, 则利用数据中的最大最小值作为边界。 + +参数: + - **input** (Variable) - 输入Tensor。维度为多维,数据类型为int32, int64, float32或float64。 + - **bins** (int) - 直方图 bins(直条)的个数,默认为100。 + - **min** (int) - range的下边界(包含),默认为0。 + - **max** (int) - range的上边界(包含),默认为0。 + +返回:直方图。 + +返回类型:Variable,数据为int64类型,维度为(nbins,)。 + +抛出异常: + - ``ValueError`` - 当输入 ``bin``, ``min``, ``max``不合法时。 + +**代码示例1**: + +.. code-block:: python + + import paddle + import numpy as np + startup_program = paddle.Program() + train_program = paddle.Program() + with paddle.program_guard(train_program, startup_program): + inputs = paddle.data(name='input', dtype='int32', shape=[2,3]) + output = paddle.histogram(inputs, bins=5, min=1, max=5) + place = paddle.CPUPlace() + exe = paddle.Executor(place) + exe.run(startup_program) + img = np.array([[2, 4, 2], [2, 5, 4]]).astype(np.int32) + res = exe.run(train_program, + feed={'input': img}, + fetch_list=[output]) + print(np.array(res[0])) # [0, 3, 0, 2, 1] + +**代码示例2**: + +.. code-block:: python + + import paddle + import numpy as np + with paddle.imperative.guard(paddle.CPUPlace()): + inputs_np = np.array([0.5, 1.5, 2.5]).astype(np.float) + inputs = paddle.imperative.to_variable(inputs_np) + result = paddle.histogram(inputs, bins=5, min=1, max=5) + print(result) # [1, 1, 0, 0, 0] diff --git a/doc/fluid/api_cn/tensor_cn/increment_cn.rst b/doc/fluid/api_cn/tensor_cn/increment_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ce87c53a762b0b2ae5662bce4bc2bf0b3948eccd --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/increment_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_increment: + +increment +------------------------------- +:doc_source: paddle.fluid.layers.increment + + diff --git a/doc/fluid/api_cn/tensor_cn/index_sample_cn.rst b/doc/fluid/api_cn/tensor_cn/index_sample_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d11aef519753bf0543b984b4b76539e5fd76f8ce --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/index_sample_cn.rst @@ -0,0 +1,75 @@ +.. _cn_api_tensor_search_index_sample: + +index_sample +------------------------------- + +.. py:function:: paddle.index_sample(x, index) + +:alias_main: paddle.index_sample +:alias: paddle.index_sample,paddle.tensor.index_sample,paddle.tensor.search.index_sample + + + +该OP实现对输入 ``x`` 中的元素进行批量抽样,取 ``index`` 指定的对应下标的元素,按index中出现的先后顺序组织,填充为一个新的张量。 + +该OP中 ``x`` 与 ``index`` 都是 ``2-D`` 张量。 ``index`` 的第一维度与输入 ``x`` 的第一维度必须相同, ``index`` 的第二维度没有大小要求,可以重复索引相同下标元素。 + +**参数**: + - **x** (Variable)– 输入的二维张量,数据类型为 int32,int64,float32,float64。 + - **index** (Variable)– 包含索引下标的二维张量。数据类型为 int32,int64。 + +**返回**: + -**Variable** ,数据类型与输入 ``x`` 相同,维度与 ``index`` 相同。 + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + data = np.array([[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [9.0, 10.0, 11.0, 12.0]]).astype('float32') + + data_index = np.array([[0, 1, 2], + [1, 2, 3], + [0, 0, 0]]).astype('int32') + + target_data = np.array([[100, 200, 300, 400], + [500, 600, 700, 800], + [900, 1000, 1100, 1200]]).astype('int32') + + + with fluid.dygraph.guard(): + x = fluid.dygraph.to_variable(data) + index = fluid.dygraph.to_variable(data_index) + target = fluid.dygraph.to_variable(target_data) + + out_z1 = paddle.index_sample(x, index) + print(out_z1.numpy()) + #[[1. 2. 3.] + # [6. 7. 8.] + # [9. 9. 9.]] + + # 巧妙用法:使用topk op产出的top元素的下标 + # 在另一个tensor中索引对应位置的元素 + top_value, top_index = fluid.layers.topk(x, k=2) + out_z2 = paddle.index_sample(target, top_index) + print(top_value.numpy()) + #[[ 4. 3.] + # [ 8. 7.] + # [12. 11.]] + + print(top_index.numpy()) + #[[3 2] + # [3 2] + # [3 2]] + + print(out_z2.numpy()) + #[[ 400 300] + # [ 800 700] + # [1200 1100]] + + diff --git a/doc/fluid/api_cn/tensor_cn/index_select_cn.rst b/doc/fluid/api_cn/tensor_cn/index_select_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fe60cbc5aec076d5e8c0c9235330fbecd33da432 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/index_select_cn.rst @@ -0,0 +1,45 @@ +.. _cn_api_tensor_search_index_select: + +index_select +------------------------------- + +.. py:function:: paddle.index_select(x, index, axis=0, name=None) + + + +该OP沿着指定轴 ``axis`` 对输入 ``x`` 进行索引,取 ``index`` 中指定的相应项,创建并返回到一个新的Tensor。这里 ``index`` 是一个 ``1-D`` Tensor。除 ``axis`` 轴外,返回的Tensor其余维度大小和输入 ``x`` 相等 , ``axis`` 维度的大小等于 ``index`` 的大小。 + +**参数**: + - **x** (Tensor)– 输入Tensor。 ``x`` 的数据类型可以是float32,float64,int32,int64。 + - **index** (Tensor)– 包含索引下标的1-D Tensor。 + - **axis** (int, 可选) – 索引轴,若未指定,则默认选取第0维。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +**返回**: + -**Tensor**: 返回一个数据类型同输入的Tensor。 + + +**代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() # Now we are in imperative mode + data = np.array([[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [9.0, 10.0, 11.0, 12.0]]) + data_index = np.array([0, 1, 1]).astype('int32') + + x = paddle.to_tensor(data) + index = paddle.to_tensor(data_index) + out_z1 = paddle.index_select(x=x, index=index) + #[[1. 2. 3. 4.] + # [5. 6. 7. 8.] + # [5. 6. 7. 8.]] + out_z2 = paddle.index_select(x=x, index=index, axis=1) + #[[ 1. 2. 2.] + # [ 5. 6. 6.] + # [ 9. 10. 10.]] + diff --git a/doc/fluid/api_cn/tensor_cn/inverse_cn.rst b/doc/fluid/api_cn/tensor_cn/inverse_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a8f79737f9f3e71afbd4f1e4896ed6c3b0e6e5da --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/inverse_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_tensor_inverse: + +inverse +------------------------------- + +.. py:function:: paddle.inverse(x, name=None) + +:alias_main: paddle.inverse +:alias: paddle.inverse, paddle.tensor.inverse, paddle.tensor.math.inverse + + + +计算方阵的逆。方阵是行数和列数相等的矩阵。输入可以是一个方阵(2-D张量),或者是批次方阵(维数大于2时)。 + +**参数**: + - **x** (Variable) – 输入张量,最后两维的大小必须相等。如果输入张量的维数大于2,则被视为2-D矩阵的批次(batch)。支持的数据类型:float32,float64。 + - **name** (str,可选) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 + +**返回**: 数据类型同输入。 + +返回类型: Variable + +抛出异常: + - :code:`TypeError` ,x不是Variable类型,或者数据类型不是float32、float64时 + - :code:`ValueError` ,x的维数小于2时 + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + + mat_np = np.array([[2, 0], [0, 2]]).astype("float32") + paddle.enable_imperative() + mat = paddle.imperative.to_variable(mat_np) + inv = paddle.inverse(mat) + print(inv) # [[0.5, 0], [0, 0.5]] diff --git a/doc/fluid/api_cn/tensor_cn/is_empty_cn.rst b/doc/fluid/api_cn/tensor_cn/is_empty_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..432e2fcf4a055b4d9983993abcf45c93a641ba87 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/is_empty_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_is_empty: + +is_empty +------------------------------- +:doc_source: paddle.fluid.layers.is_empty + + diff --git a/doc/fluid/api_cn/tensor_cn/isfinite_cn.rst b/doc/fluid/api_cn/tensor_cn/isfinite_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0cc20e3e2335cc100d6275c6936d7bc347b17454 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/isfinite_cn.rst @@ -0,0 +1,30 @@ +.. _cn_api_tensor_isfinite: + +isfinite +----------------------------- + +.. py:function:: paddle.tensor.isfinite(x, name=None) + +返回输入tensor的每一个值是否为 `Finite` (既非 `+/-INF` 也非 `+/-NaN` )。 + +参数 +::::::::: + - **x** (Tensor): 输入的 `Tensor` ,数据类型为:float16、float32、float64、int32、int64。 + - **name** (str, 可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: +``Tensor``, 每个元素是一个bool值,表示输入 `x` 的每个元素是否为 `Finite` (既非 `+/-INF` 也非 `+/-NaN` )。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + x_np = np.array([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')]) + x = paddle.to_tensor(x_np) + out = paddle.tensor.isfinite(x) + print(out.numpy()) # [False True True False True False False] diff --git a/doc/fluid/api_cn/tensor_cn/isinf_cn.rst b/doc/fluid/api_cn/tensor_cn/isinf_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1fdd20485f6c504d5dfcbd18198a5475eac79872 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/isinf_cn.rst @@ -0,0 +1,30 @@ +.. _cn_api_tensor_isinf: + +isinf +----------------------------- + +.. py:function:: paddle.tensor.isinf(x, name=None) + +返回输入tensor的每一个值是否为 `+/-INF` 。 + +参数 +::::::::: + - **x** (Tensor): 输入的 `Tensor` ,数据类型为:float16、float32、float64、int32、int64。 + - **name** (str, 可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: +``Tensor``, 每个元素是一个bool值,表示输入 `x` 的每个元素是否为 `+/-INF` 。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + x_np = np.array([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')]) + x = paddle.to_tensor(x_np) + out = paddle.tensor.isinf(x) + print(out.numpy()) # [ True False False True False False False] diff --git a/doc/fluid/api_cn/tensor_cn/isnan_cn.rst b/doc/fluid/api_cn/tensor_cn/isnan_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5765ebf184254b87a72c9b5eb8a142d6cef879b1 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/isnan_cn.rst @@ -0,0 +1,30 @@ +.. _cn_api_tensor_isnan: + +isnan +----------------------------- + +.. py:function:: paddle.tensor.isnan(x, name=None) + +返回输入tensor的每一个值是否为 `+/-NaN` 。 + +参数 +::::::::: + - **x** (Tensor): 输入的 `Tensor` ,数据类型为:float16、float32、float64、int32、int64。 + - **name** (str, 可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: +``Tensor``, 每个元素是一个bool值,表示输入 `x` 的每个元素是否为 `+/-NaN` 。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + x_np = np.array([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')]) + x = paddle.to_tensor(x_np) + out = paddle.tensor.isnan(x) + print(out.numpy()) # [False False False False False True True] diff --git a/doc/fluid/api_cn/tensor_cn/kron_cn.rst b/doc/fluid/api_cn/tensor_cn/kron_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..74cd64a5c59cef931a5727dff05d9c181853cec4 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/kron_cn.rst @@ -0,0 +1,78 @@ +.. _cn_api_paddle_tensor_kron: + +kron +------------------------------- + +.. py:function:: paddle.tensor.kron(x, y, out=None, name=None) + +:alias_main: paddle.kron +:alias: paddle.kron,paddle.tensor.kron,paddle.tensor.math.kron + + + + +Kronecker Product 算子。 + +该 OP 计算两个张量的克罗内克积,结果是一个合成的张量,由第二个张量经过第一个张量中的元素缩放 +后的组块构成。 + + +这个 OP 预设两个张量 $X$ 和 $Y$ 的秩 (rank) 相同,如有必要,将会在秩较小的张量的形状前面补 +上 1。令 $X$ 的形状是 [$r_0$, $r_1$, ..., $r_N$],$Y$ 的形状是 +[$s_0$, $s_1$, ..., $s_N$],那么输出张量的形状是 +[$r_{0}s_{0}$, $r_{1}s_{1}$, ..., $r_{N}s_{N}$]. 其中的元素是 $X$ 和 $Y$ 中的元素 +的乘积。 + +公式为 + +.. math:: + + output[k_{0}, k_{1}, ..., k_{N}] = X[i_{0}, i_{1}, ..., i_{N}] * + Y[j_{0}, j_{1}, ..., j_{N}] + + +其中 + +.. math:: + + k_{t} = i_{t} * s_{t} + j_{t}, t = 0, 1, ..., N + + +参数: + - **x** (Variable) – Kron OP 的第一个输入。多维 Tensor,数据类型为 float16, float32, float64, int32 或 int64。 + - **y** (Variable) – Kron OP 的第二个输入。多维 Tensor,数据类型为 float16, float32, float64, int32 或 int64,与 x 相同。 + - **out** (Variable, 可选) - 指定算子输出结果的 Tensor,可以是程序中已经创建的任何 Variable。默认值为 None,此时将创建新的 Variable 来保存输出结果。 + - **name** (str,可选) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为 None。 + +返回: + - Kron OP 的输出。多维 Tensor,数据类型为 float16, float32, float64, int32 或 int64,与 x 一致。 + +返回类型: Variable + + +**代码示例** + +.. code-block:: python + + import paddle + from paddle import fluid + import paddle.fluid.dygraph as dg + import numpy as np + + a = np.arange(1, 5).reshape(2, 2).astype(np.float32) + b = np.arange(1, 10).reshape(3, 3).astype(np.float32) + + place = fluid.CPUPlace() + with dg.guard(place): + a_var = dg.to_variable(a) + b_var = dg.to_variable(b) + c_var = paddle.kron(a_var, b_var) + c_np = c_var.numpy() + print(c_np) + + #[[ 1. 2. 3. 2. 4. 6.] + # [ 4. 5. 6. 8. 10. 12.] + # [ 7. 8. 9. 14. 16. 18.] + # [ 3. 6. 9. 4. 8. 12.] + # [12. 15. 18. 16. 20. 24.] + # [21. 24. 27. 28. 32. 36.]] diff --git a/doc/fluid/api_cn/tensor_cn/less_equal_cn.rst b/doc/fluid/api_cn/tensor_cn/less_equal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..63427b1442ca0cab965045eff3b7bde02fee137e --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/less_equal_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_tensor_cn_less_equal: + +less_equal +------------------------------- +.. py:function:: paddle.less_equal(x, y, name=None) + +:alias_main: paddle.less_equal +:alias: paddle.less_equal,paddle.tensor.less_equal,paddle.tensor.logic.less_equal + +该OP逐元素地返回 :math:`x <= y` 的逻辑值,相同位置前者输入小于等于后者输入则返回True,否则返回False。使用重载算子 `<=` 可以有相同的计算函数效果。 + +**注:该OP输出的结果不返回梯度。** + +参数: + - **x** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64,int32, int64。 + - **y** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64, int32, int64。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + + +返回:输出结果的Tensor,输出Tensor的shape和输入一致,Tensor数据类型为bool。 + +返回类型:变量(Tensor) + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.imperative as imperative + paddle.enable_imperative() + x = imperative.to_variable(np.array([1, 2, 3])) + y = imperative.to_variable(np.array([1, 3, 2])) + result1 = paddle.less_equal(x, y) + print(result1.numpy()) # result1 = [True True False] + + diff --git a/doc/fluid/api_cn/tensor_cn/less_than_cn.rst b/doc/fluid/api_cn/tensor_cn/less_than_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e49b092cc2d5ce062cfac8551026c616d7befca2 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/less_than_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_tensor_cn_less_than: + +less_than +------------------------------- +.. py:function:: paddle.less_than(x, y, name=None) + +:alias_main: paddle.less_than +:alias: paddle.less_than,paddle.tensor.less_than,paddle.tensor.logic.less_than + +该OP逐元素地返回 :math:`x < y` 的逻辑值,相同位置前者输入小于后者输入则返回True,否则返回False。使用重载算子 `<` 可以有相同的计算函数效果。 + +**注:该OP输出的结果不返回梯度。** + +参数: + - **x** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64,int32, int64。 + - **y** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64, int32, int64。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + + +返回:输出结果的Tensor,输出Tensor的shape和输入一致,Tensor数据类型为bool。 + +返回类型:变量(Tensor) + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.imperative as imperative + paddle.enable_imperative() + x = imperative.to_variable(np.array([1, 2, 3])) + y = imperative.to_variable(np.array([1, 3, 2])) + result1 = paddle.less_than(x, y) + print(result1.numpy()) # result1 = [False True False] + + diff --git a/doc/fluid/api_cn/tensor_cn/linspace_cn.rst b/doc/fluid/api_cn/tensor_cn/linspace_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..70c12181b9a95b0803ed2154352284855d5240d0 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/linspace_cn.rst @@ -0,0 +1,30 @@ +.. _cn_api_tensor_linspace: + +linspace +------------------------------- + +.. py:function:: paddle.linspace(start, stop, num, dtype=None, name=None) + +该OP返回一个Tensor,Tensor的值为在区间start和stop上均匀间隔的num个值,输出Tensor的长度为num。 +**注意:该OP不进行梯度计算** + +参数: + - **start** (int|float|Tensor) – ``start`` 是区间开始的变量,可以是一个浮点标量,或是一个shape为[1]的Tensor,该Tensor的数据类型可以是float32,float64,int32 或者int64。 + - **stop** (int|float|Tensor) – ``stop`` 是区间结束的变量,可以是一个浮点标量,或是一个shape为[1]的Tensor,该Tensor的数据类型可以是float32,float64,int32或者int64。 + - **num** (int|Tensor) – ``num`` 是给定区间内需要划分的区间数,可以是一个整型标量,或是一个shape为[1]的Tensor,该Tensor的数据类型需为int32。 + - **dtype** (np.dtype|str, 可选) – 输出Tensor的数据类型。数据类型可以是float32,float64,int32,int64。如果dtype的数据类型为None,输出Tensor数据类型为float32。 + - **name** (str, 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:表示等间隔划分结果的1-D Tensor,该Tensor的shape大小为 :math:`[num]` ,在num为1的情况下,仅返回包含start元素值的Tensor。 + + +**代码示例**: + +.. code-block:: python + + import paddle + + paddle.disable_static() + data = paddle.linspace(0, 10, 5, dtype='float32') # [0.0, 2.5, 5.0, 7.5, 10.0] + data = paddle.linspace(0, 10, 1, dtype='float32') # [0.0] + diff --git a/doc/fluid/api_cn/tensor_cn/load_cn.rst b/doc/fluid/api_cn/tensor_cn/load_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..578340664a1a74a4015500b1523fd1a0a857cf30 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/load_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_load: + +load +------------------------------- +:doc_source: paddle.fluid.io.load + + diff --git a/doc/fluid/api_cn/tensor_cn/log1p_cn.rst b/doc/fluid/api_cn/tensor_cn/log1p_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ff96c608014e706910dbfc30fd11df02995717a8 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/log1p_cn.rst @@ -0,0 +1,44 @@ +.. _cn_api_paddle_tensor_log1p: + +log1p +------------------------------- + +.. py:function:: paddle.log1p(x, name=None) + +:alias_main: paddle.log1p +:alias: paddle.log1p,paddle.tensor.log1p,paddle.tensor.math.log1p + + + + +该OP计算Log1p(加一的自然对数)结果。 + +.. math:: + \\Out=ln(x+1)\\ + + +参数: + - **x** (Tensor) – 指定输入为一个多维的Tensor。数据类型为float32,float64。 + - **name** (str,可选) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 + +返回:Log1p算子自然对数输出 + +返回类型: Tensor - 该OP的输出为一个多维的Tensor,数据类型为输入一致。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + x = np.array([[1, 2], [3, 4]]).astype('float32') + x1 = paddle.imperative.to_variable(x) + + out1 = paddle.log1p(x1) + print(out1.numpy()) + # [[0.6931472 1.0986123] + # [1.3862944 1.609438 ]] + diff --git a/doc/fluid/api_cn/tensor_cn/log_cn.rst b/doc/fluid/api_cn/tensor_cn/log_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5cfc4f49472eda096bafea023bc329d6a8934943 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/log_cn.rst @@ -0,0 +1,44 @@ +.. _cn_api_tensor_cn_log: + +log +------------------------------- + +.. py:function:: paddle.log(x, name=None) + +:alias_main: paddle.log +:alias: paddle.log,paddle.tensor.log,paddle.tensor.math.log +:old_api: paddle.fluid.layers.log + + + + +Log激活函数(计算自然对数) + +.. math:: + \\Out=ln(x)\\ + + +参数: + - **x** (Tensor) – 指定输入为一个多维的Tensor。数据类型为float32,float64。 + - **name** (str,可选) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 + +返回:Log算子自然对数输出 + +返回类型: Tensor - 该OP的输出为一个多维的Tensor,数据类型为输入一致。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + x = np.array([[1, 2], [3, 4]]).astype('float32') + x1 = paddle.imperative.to_variable(x) + + out1 = paddle.log(x1) + print(out1.numpy()) + # [[0. 0.6931472] + # [1.0986123 1.3862944]] diff --git a/doc/fluid/api_cn/tensor_cn/logical_and_cn.rst b/doc/fluid/api_cn/tensor_cn/logical_and_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d070a69d29890d9e1cced5ffc180d954852294b4 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/logical_and_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_logical_and: + +logical_and +------------------------------- +:doc_source: paddle.fluid.layers.logical_and + + diff --git a/doc/fluid/api_cn/tensor_cn/logical_not_cn.rst b/doc/fluid/api_cn/tensor_cn/logical_not_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2e149a4ad61112a17082bbc2df712594718f749d --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/logical_not_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_logical_not: + +logical_not +------------------------------- +:doc_source: paddle.fluid.layers.logical_not + + diff --git a/doc/fluid/api_cn/tensor_cn/logical_or_cn.rst b/doc/fluid/api_cn/tensor_cn/logical_or_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c1fef616d202a0318ce68e44ba4ff62e5665db2f --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/logical_or_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_logical_or: + +logical_or +------------------------------- +:doc_source: paddle.fluid.layers.logical_or + + diff --git a/doc/fluid/api_cn/tensor_cn/logical_xor_cn.rst b/doc/fluid/api_cn/tensor_cn/logical_xor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..35b34f4d3b58a89603d111152d2d848d2fc7beef --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/logical_xor_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_logical_xor: + +logical_xor +------------------------------- +:doc_source: paddle.fluid.layers.logical_xor + + diff --git a/doc/fluid/api_cn/tensor_cn/logsumexp_cn.rst b/doc/fluid/api_cn/tensor_cn/logsumexp_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c95d8d611840f53c99f2205dad88139a0633694f --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/logsumexp_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_paddle_tensor_math_logsumexp: + +logsumexp +------------------------------- + +.. py:function:: paddle.tensor.math.logsumexp(x, axis=None, keepdim=False, name=None) + +该OP沿着 ``axis`` 计算 ``x`` 的以e为底的指数的和的自然对数。计算公式如下: + +.. math:: + logsumexp(x) = \log\sum exp(x) + +参数 +:::::::::: + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64 。 + - axis (int|list|tuple, 可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是int、list(int)、tuple(int)。如果 ``axis`` 包含多个维度,则沿着 ``axis`` 中的所有轴进行计算。``axis`` 或者其中的元素值应该在范围[-D, D)内,D是 ``x`` 的维度。如果 ``axis`` 或者其中的元素值小于0,则等价于 :math:`axis + D` 。如果 ``axis`` 是None,则对 ``x`` 的全部元素计算平均值。默认值为None。 + - keepdim (bool, 可选) - 是否在输出Tensor中保留减小的维度。如果 ``keepdim`` 为True,则输出Tensor和 ``x`` 具有相同的维度(减少的维度除外,减少的维度的大小为1)。否则,输出Tensor的形状会在 ``axis`` 上进行squeeze操作。默认值为False。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,沿着 ``axis`` 进行logsumexp计算的结果,数据类型和 ``x`` 相同。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = np.array([[-1.5, 0., 2.], [3., 1.2, -2.4]]) + x = paddle.to_tensor(x) + out1 = paddle.logsumexp(x) # [3.4691226] + out2 = paddle.logsumexp(x, 1) # [2.15317821, 3.15684602] diff --git a/doc/fluid/api_cn/tensor_cn/masked_select_cn.rst b/doc/fluid/api_cn/tensor_cn/masked_select_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cecffda2c283dde127660a0a1b7083f64dbe102d --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/masked_select_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_tensor_masked_select: + +masked_select +------------------------------- + +.. py:function:: paddle.masked_select(x, mask, name=None) + + + +该OP返回一个1-D 的Tensor, Tensor的值是根据 ``mask`` 对输入 ``x`` 进行选择的, ``mask`` 的数据类型是bool 。 + +参数: + - **x** (Tensor) - 输入Tensor, 数据类型为float32, float64,int32 或者int64。 + - **mask** (Tensor) - 用于索引的二进制掩码的Tensor,数据类型维bool。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:返回一个根据 ``mask`` 选择的的Tensor + + +抛出异常: + - ``TypeError``: - 如果 ``x`` 不是 Tensor 或者 ``x`` 是Tensor的时候的数据类型不是 float32, float64, int32, int64其中之一。 + - ``TypeError``: - 如果 ``mask`` 不是 Tensor 或者 ``mask`` 是Tensor的时候的数据类型不是 bool。 + +**代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + data = np.array([[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [9.0, 10.0, 11.0, 12.0]]).astype('float32') + + mask_data = np.array([[True, False, False, False], + [True, True, False, False], + [True, False, False, False]]).astype('bool') + x = paddle.to_tensor(data) + mask = paddle.to_tensor(mask_data) + out = paddle.masked_select(x, mask) + #[1.0 5.0 6.0 9.0] + diff --git a/doc/fluid/api_cn/tensor_cn/math_cn.rst b/doc/fluid/api_cn/tensor_cn/math_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d16bb3003b2ae0b2aa4fe65255a045fc4a1f420a --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/math_cn.rst @@ -0,0 +1,3 @@ +math +------------------------------- +**版本升级,文档正在开发中** diff --git a/doc/fluid/api_cn/tensor_cn/matmul_cn.rst b/doc/fluid/api_cn/tensor_cn/matmul_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c12ac055bb88cb94f32f91554f72684a3852da70 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/matmul_cn.rst @@ -0,0 +1,93 @@ +.. _cn_api_tensor_matmul: + +matmul +------------------------------- + +.. py:function:: paddle.matmul(x, y, transpose_x=False, transpose_y=False, name=None) + +该op是计算两个Tensor的乘积,遵循完整的广播规则,关于广播规则,请参考 :ref:`use_guide_broadcasting` 。 +并且其行为与 ``numpy.matmul`` 一致。目前,输入张量的维数可以是任意数量, ``matmul`` 可以用于 +实现 ``dot`` , ``matmul`` 和 ``batchmatmul`` 。实际行为取决于输入 ``x`` 、输入 ``y`` 、 ``transpose_x`` , +``transpose_y`` 。具体如下: + +- 如果 ``transpose`` 为真,则对应 Tensor 的后两维会转置。如果Tensor的一维,则转置无效。假定 ``x`` 是一个 shape=[D] 的一维 Tensor,则 ``x`` 视为 [1, D]。然而, ``y`` 是一个shape=[D]的一维Tensor,则视为[D, 1]。 + +乘法行为取决于 ``x`` 和 ``y`` 的尺寸。 具体如下: + +- 如果两个张量均为一维,则获得点积结果。 + +- 如果两个张量都是二维的,则获得矩阵与矩阵的乘积。 + +- 如果 ``x`` 是1维的,而 ``y`` 是2维的,则将1放在 ``x`` 维度之前,以进行矩阵乘法。矩阵相乘后,将删除前置尺寸。 + +- 如果 ``x`` 是2维的,而 ``y`` 是1维的,获得矩阵与向量的乘积。 + +- 如果两个输入至少为一维,且至少一个输入为N维(其中N> 2),则将获得批矩阵乘法。 如果第一个自变量是一维的,则将1放在其维度的前面,以便进行批量矩阵的乘法运算,然后将其删除。 如果第二个参数为一维,则将1附加到其维度后面,以实现成批矩阵倍数的目的,然后将其删除。 根据广播规则广播非矩阵维度(不包括最后两个维度)。 例如,如果输入 ``x`` 是(j,1,n,m)Tensor,另一个 ``y`` 是(k,m,p)Tensor,则out将是(j,k,n,p)张量。 + +参数 +::::::::: + - **x** (Tensor) : 输入变量,类型为 Tensor,数据类型为float32, float64。 + - **y** (Tensor) : 输入变量,类型为 Tensor,数据类型为float32, float64。 + - **transpose_x** (bool,可选) : 相乘前是否转置 x,默认值为False。 + - **transpose_y** (bool,可选) : 相乘前是否转置 y,默认值为False。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: +::::::::: + + - Tensor,矩阵相乘后的结果,数据类型和输入数据类型一致。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + + # vector * vector + x_data = np.random.random([10]).astype(np.float32) + y_data = np.random.random([10]).astype(np.float32) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + z = paddle.matmul(x, y) + print(z.numpy().shape) + # [1] + + # matrix * vector + x_data = np.random.random([10, 5]).astype(np.float32) + y_data = np.random.random([5]).astype(np.float32) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + z = paddle.matmul(x, y) + print(z.numpy().shape) + # [10] + + # batched matrix * broadcasted vector + x_data = np.random.random([10, 5, 2]).astype(np.float32) + y_data = np.random.random([2]).astype(np.float32) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + z = paddle.matmul(x, y) + print(z.numpy().shape) + # [10, 5] + + # batched matrix * batched matrix + x_data = np.random.random([10, 5, 2]).astype(np.float32) + y_data = np.random.random([10, 2, 5]).astype(np.float32) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + z = paddle.matmul(x, y) + print(z.numpy().shape) + # [10, 5, 5] + + # batched matrix * broadcasted matrix + x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) + y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + z = paddle.matmul(x, y) + print(z.numpy().shape) + # [10, 3, 5, 5] + diff --git a/doc/fluid/api_cn/tensor_cn/max_cn.rst b/doc/fluid/api_cn/tensor_cn/max_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e0a3520d5490dd79e937172842d6546fe2904cd1 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/max_cn.rst @@ -0,0 +1,62 @@ +.. _cn_api_paddle_tensor_max: + +max +------------------------------- + +.. py:function:: paddle.tensor.max(x, axis=None, keepdim=False, name=None) + +:alias_main: paddle.max +:alias: paddle.max,paddle.tensor.max,paddle.tensor.math.max + +该OP是对指定维度上的Tensor元素求最大值运算,并输出相应的计算结果。 + +参数 +::::::::: + - **x** (Tensor)- Tensor,支持数据类型为float32,float64,int32,int64。 + - **axis** (list | int ,可选)- 求最大值运算的维度。如果为None,则计算所有元素的最大值并返回包含单个元素的Tensor变量,否则必须在 :math:`[-x.ndim, x.ndim]` 范围内。如果 :math:`axis[i] <0` ,则维度将变为 :math:`x.ndim+axis[i]` ,默认值为None。 + - **keepdim** (bool)- 是否在输出Tensor中保留减小的维度。如果keepdim 为 False,结果张量的维度将比输入张量的小,默认值为False。 + - **name** (str, 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回 +::::::::: + Tensor, 在指定axis上进行求最大值运算的Tensor,数据类型和输入数据类型一致。 + + +代码示例 +:::::::::: + +.. code-block:: python + + import numpy as np + import paddle + + paddle.disable_static() + + # data_x is a variable with shape [2, 4] + # the axis is a int element + x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9], + [0.1, 0.2, 0.6, 0.7]]) + result1 = paddle.max(x) + print(result1.numpy()) + #[0.9] + result2 = paddle.max(x, axis=0) + print(result2.numpy()) + #[0.2 0.3 0.6 0.9] + result3 = paddle.max(x, axis=-1) + print(result3.numpy()) + #[0.9 0.7] + result4 = paddle.max(x, axis=1, keepdim=True) + print(result4.numpy()) + #[[0.9] + # [0.7]] + + # data_y is a variable with shape [2, 2, 2] + # the axis is list + y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]], + [[5.0, 6.0], [7.0, 8.0]]]) + result5 = paddle.max(y, axis=[1, 2]) + print(result5.numpy()) + #[4. 8.] + result6 = paddle.max(y, axis=[0, 1]) + print(result6.numpy()) + #[7. 8.] diff --git a/doc/fluid/api_cn/tensor_cn/maximum_cn.rst b/doc/fluid/api_cn/tensor_cn/maximum_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bad60bbd1d2efadbfd46e257665757523c1e4a1e --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/maximum_cn.rst @@ -0,0 +1,87 @@ +.. _cn_api_paddle_tensor_maximum: + +maximum +------------------------------- + +.. py:function:: paddle.tensor.maximum(x, y, axis=-1, name=None) + +:alias_main: paddle.maximum +:alias: paddle.maximum,paddle.tensor.maximum,paddle.tensor.math.maximum + +该OP逐元素对比输入的两个多维Tensor,并且把各个位置更大的元素保存到返回结果中。 + +等式是: + +.. math:: + Out = max(X, Y) + +- :math:`X` :多维Tensor。 +- :math:`Y` :多维Tensor。 + +此运算算子有两种情况: + 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。 + 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。 + +对于情况2: + 1. 用 :math:`Y` 的 ``shape`` 匹配 :math:`X` 的 ``shape``,其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。 + 2. 如果 ``axis`` < 0(默认值为-1),则 :math:`axis = abs(X.ndim - Y.ndim) - axis - 1` 。 + 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。 + +例如: + +.. code-block:: text + + shape(X) = (2, 3, 4, 5), shape(Y) = (,) + shape(X) = (2, 3, 4, 5), shape(Y) = (5,) + shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 + shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 + shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 + shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 + +具体的飞桨的广播(broadcasting)机制可以参考 `<> `_ 。 + +参数 +::::::::: + - **x** (Tensor)- 多维Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **y** (Tensor)- 多维Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **axis** (int32, 可选)- Y的维度对应到X维度上时的索引。默认值为 -1。 + - **name** (string, 可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: + Tensor,维度和数据类型与 ``x`` 相同的多维Tensor。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + + x = paddle.to_tensor([[1, 2], [3, 4]]) + y = paddle.to_tensor([[5, 6], [7, 8]]) + res = paddle.maximum(x, y) + print(res.numpy()) + #[[5. 6.] + # [7. 8.]] + + x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]]) + y = paddle.to_tensor([1, 2]) + res = paddle.maximum(x, y, axis=1) + print(res.numpy()) + #[[[1. 2. 3.] + # [2. 2. 3.]]] + + x = paddle.to_tensor([2, 3, 5], dtype='float32') + y = paddle.to_tensor([1, 4, np.nan], dtype='float32') + res = paddle.maximum(x, y) + print(res.numpy()) + #[ 2. 4. nan] + + x = paddle.to_tensor([5, 3, np.inf], dtype='float32') + y = paddle.to_tensor([1, 4, 5], dtype='float32') + res = paddle.maximum(x, y) + print(res.numpy()) + #[ 5. 4. inf] diff --git a/doc/fluid/api_cn/tensor_cn/mean_cn.rst b/doc/fluid/api_cn/tensor_cn/mean_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8ac774a6b4471daca40ba4ab7ee8308fe3539b84 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/mean_cn.rst @@ -0,0 +1,53 @@ +.. _cn_api_tensor_cn_mean: + +mean +------------------------------- + +.. py:function:: paddle.mean(x, axis=None, keepdim=False, name=None) + + + +该OP沿 ``axis`` 计算 ``x`` 的平均值。 + +参数 +:::::::::: + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。 + - axis (int|list|tuple, 可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是int、list(int)、tuple(int)。如果 ``axis`` 包含多个维度,则沿着 ``axis`` 中的所有轴进行计算。``axis`` 或者其中的元素值应该在范围[-D, D)内,D是 ``x`` 的维度。如果 ``axis`` 或者其中的元素值小于0,则等价于 :math:`axis + D` 。如果 ``axis`` 是None,则对 ``x`` 的全部元素计算平均值。默认值为None。 + - keepdim (bool, 可选) - 是否在输出Tensor中保留减小的维度。如果 ``keepdim`` 为True,则输出Tensor和 ``x`` 具有相同的维度(减少的维度除外,减少的维度的大小为1)。否则,输出Tensor的形状会在 ``axis`` 上进行squeeze操作。默认值为False。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,沿着 ``axis`` 进行平均值计算的结果,数据类型和 ``x`` 相同。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = np.array([[[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]], + [[13, 14, 15, 16], + [17, 18, 19, 20], + [21, 22, 23, 24]]], 'float32') + x = paddle.to_tensor(x) + out1 = paddle.mean(x) + # [12.5] + out2 = paddle.mean(x, axis=-1) + # [[ 2.5 6.5 10.5] + # [14.5 18.5 22.5]] + out3 = paddle.mean(x, axis=-1, keepdim=True) + # [[[ 2.5] + # [ 6.5] + # [10.5]] + # [[14.5] + # [18.5] + # [22.5]]] + out4 = paddle.mean(x, axis=[0, 2]) + # [ 8.5 12.5 16.5] diff --git a/doc/fluid/api_cn/tensor_cn/meshgrid_cn.rst b/doc/fluid/api_cn/tensor_cn/meshgrid_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6c8aedc1a1aa24a952e668c1f5c0ce53756f8d9e --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/meshgrid_cn.rst @@ -0,0 +1,43 @@ + +.. _cn_api_paddle_tensor_meshgrid: + +meshgrid +------------------------------- + +.. py:function:: paddle.tensor.meshgrid(*args, **kargs) + +:alias_main: paddle.meshgrid +:alias: paddle.meshgrid, paddle.tensor.meshgrid, paddle.tensor.creation.meshgrid + + + +该OP的输入是张量或者包含张量的列表, 包含 k 个一维张量,对每个张量做扩充操作,输出 k 个 k 维张量。 + +参数: + - \* **args** (Variable|Variable数组)- 输入变量为 k 个一维张量,形状分别为(N1,), (N2,), ..., (Nk, )。支持数据类型为float32,float64,int32,int64。 + - ** **kargs** (可选)- 目前只接受name参数(str),具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: +k 个 k 维张量,每个张量的形状均为(N1, N2, ..., Nk)。 + +返回类型: 变量(Variable) + +**代码示例** + + + +.. code-block:: python + + #动态图示例 + import paddle + import numpy as np + + paddle.enable_imperative() + + input_3 = np.random.randint(0, 100, [100, ]).astype('int32') + input_4 = np.random.randint(0, 100, [200, ]).astype('int32') + tensor_3 = paddle.imperative.to_variable(input_3) + tensor_4 = paddle.imperative.to_variable(input_4) + grid_x, grid_y = paddle.tensor.meshgrid(tensor_3, tensor_4) + #the shape of grid_x is (100, 200) + #the shape of grid_y is (100, 200) diff --git a/doc/fluid/api_cn/tensor_cn/min_cn.rst b/doc/fluid/api_cn/tensor_cn/min_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d3034d0d0054712bb80de2b2de69e7aa0a75ae18 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/min_cn.rst @@ -0,0 +1,57 @@ +.. _cn_api_paddle_tensor_min: + +min +------------------------------- + +.. py:function:: paddle.tensor.min(x, axis=None, keepdim=False, name=None) + +:alias_main: paddle.min +:alias: paddle.min,paddle.tensor.min,paddle.tensor.math.min + +该OP是对指定维度上的Tensor元素求最小值运算,并输出相应的计算结果。 + +参数 +::::::::: + - **x** (Tensor)- Tensor,支持数据类型为float32,float64,int32,int64。 + - **axis** (list | int ,可选)- 求最小值运算的维度。如果为None,则计算所有元素的最小值并返回包含单个元素的Tensor变量,否则必须在 :math:`[−x.ndim, x.ndim]` 范围内。如果 :math:`axis[i] < 0` ,则维度将变为 :math:`x.ndim+axis[i]` ,默认值为None。 + - **keepdim** (bool)- 是否在输出Tensor中保留减小的维度。如果keepdim 为False,结果张量的维度将比输入张量的小,默认值为False。 + - **name** (str, 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回 +::::::::: + Tensor,在指定axis上进行求最小值运算的Tensor,数据类型和输入数据类型一致。 + + +代码示例 +:::::::::: +.. code-block:: python + + import paddle + paddle.disable_static() + + # the axis is a int element + x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9], + [0.1, 0.2, 0.6, 0.7]]) + result1 = paddle.min(x) + print(result1.numpy()) + #[0.1] + result2 = paddle.min(x, axis=0) + print(result2.numpy()) + #[0.1 0.2 0.5 0.7] + result3 = paddle.min(x, axis=-1) + print(result3.numpy()) + #[0.2 0.1] + result4 = paddle.min(x, axis=1, keepdim=True) + print(result4.numpy()) + #[[0.2] + # [0.1]] + + # the axis is list + y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]], + [[5.0, 6.0], [7.0, 8.0]]]) + result5 = paddle.min(y, axis=[1, 2]) + print(result5.numpy()) + #[1. 5.] + result6 = paddle.min(y, axis=[0, 1]) + print(result6.numpy()) + #[1. 2.] diff --git a/doc/fluid/api_cn/tensor_cn/minimum_cn.rst b/doc/fluid/api_cn/tensor_cn/minimum_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1a8fda137aed190fdc730fa0ff07a1b21536a154 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/minimum_cn.rst @@ -0,0 +1,88 @@ +.. _cn_api_paddle_tensor_minimum: + +minimum +------------------------------- + +.. py:function:: paddle.tensor.minimum(x, y, axis=-1, name=None) + +:alias_main: paddle.minimum +:alias: paddle.minimum,paddle.tensor.minimum,paddle.tensor.math.minimum + +该OP逐元素对比输入的两个多维Tensor,并且把各个位置更小的元素保存到返回结果中。 + +等式是: + +.. math:: + Out = min(X, Y) + +- :math:`X` :多维Tensor。 +- :math:`Y` :多维Tensor。 + +此运算算子有两种情况: + 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。 + 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。 + +对于情况2: + 1. 用 :math:`Y` 的 ``shape`` 匹配 :math:`X` 的 ``shape``,其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。 + 2. 如果 ``axis`` < 0(默认值为-1),则 :math:`axis = abs(X.ndim - Y.ndim) - axis - 1` 。 + 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。 + +例如: + +.. code-block:: text + + shape(X) = (2, 3, 4, 5), shape(Y) = (,) + shape(X) = (2, 3, 4, 5), shape(Y) = (5,) + shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 + shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 + shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 + shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 + +具体的飞桨的广播(broadcasting)机制可以参考 `<> `_ 。 + +参数 +::::::::: + - **x** (Tensor)- 多维Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **y** (Tensor)- 多维Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **axis** (int32, 可选)- Y的维度对应到X维度上时的索引。默认值为 -1。 + - **name** (string, 可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: + Tensor,维度和数据类型与 ``x`` 相同的多维Tensor。 + + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + + x = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32') + y = paddle.to_tensor([[5, 6], [7, 8]], dtype='float32') + res = paddle.minimum(x, y) + print(res.numpy()) + #[[1. 2.] + # [3. 4.]] + + x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]], dtype='float32') + y = paddle.to_tensor([1, 2], dtype='float32') + res = paddle.minimum(x, y, axis=1) + print(res.numpy()) + #[[[1. 1. 1.] + # [2. 2. 2.]]] + + x = paddle.to_tensor([2, 3, 5], dtype='float32') + y = paddle.to_tensor([1, 4, np.nan], dtype='float32') + res = paddle.minimum(x, y) + print(res.numpy()) + #[ 1. 3. nan] + + x = paddle.to_tensor([5, 3, np.inf], dtype='float32') + y = paddle.to_tensor([1, 4, 5], dtype='float32') + res = paddle.minimum(x, y) + print(res.numpy()) + #[1. 3. 5.] diff --git a/doc/fluid/api_cn/tensor_cn/mm_cn.rst b/doc/fluid/api_cn/tensor_cn/mm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..083f02ea991a5d6aff06198a41dc127cd9f5f4c3 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/mm_cn.rst @@ -0,0 +1,80 @@ +.. _cn_api_tensor_mm: + +mm +------------------------------- + +.. py:function:: paddle.mm(input, mat2, out=None, name=None) + +:alias_main: paddle.mm +:alias: paddle.mm,paddle.tensor.mm,paddle.tensor.math.mm +:update_api: paddle.fluid.layers.matmul + + + +用于两个输入矩阵的相乘。 + +两个输入的形状可为任意维度,但当任一输入维度大于3时,两个输入的维度必须相等。 + +如果原始 Tensor input 或 mat2 的秩为 1 且未转置,则矩阵相乘后的前置或附加维度 1 将移除。 + +参数: + - **input** (Variable) : 输入变量,类型为 Tensor 或 LoDTensor。 + - **mat2** (Variable) : 输入变量,类型为 Tensor 或 LoDTensor。 + - **out** (Variable, 可选) – 指定存储运算结果的Tensor。如果设置为None或者不设置,将创建新的Tensor存储运算结果,默认值为None。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: + - Variable (Tensor / LoDTensor),矩阵相乘后的结果。 + +返回类型: + - Variable(变量)。 + +:: + + * 例 1: + + input: [B, ..., M, K], mat2: [B, ..., K, N] + out: [B, ..., M, N] + + * 例 2: + + input: [B, M, K], mat2: [B, K, N] + out: [B, M, N] + + * 例 3: + + input: [B, M, K], mat2: [K, N] + out: [B, M, N] + + * 例 4: + + input: [M, K], mat2: [K, N] + out: [M, N] + + * 例 5: + + input: [B, M, K], mat2: [K] + out: [B, M] + + * 例 6: + + input: [K], mat2: [K] + out: [1] + + * 例 7: + + input: [M], mat2: [N] + out: [M, N] + + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + + input = fluid.data(name='input', shape=[2, 3], dtype='float32') + mat2 = fluid.data(name='mat2', shape=[3, 2], dtype='float32') + out = paddle.mm(input, mat2) # out shape is [2, 2] + diff --git a/doc/fluid/api_cn/tensor_cn/mod_cn.rst b/doc/fluid/api_cn/tensor_cn/mod_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b7f34c81e5937ac5ed96e7b63a9d2cc080c5b01d --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/mod_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_mod: + +mod +------------------------------- +:doc_source: paddle.tensor.remainder + + diff --git a/doc/fluid/api_cn/tensor_cn/mul_cn.rst b/doc/fluid/api_cn/tensor_cn/mul_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6e5fea3571adf9ca98948b6bbb38c44cfd44ad9d --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/mul_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_tensor_argmax: + +mul +------------------------------- + +.. py:function:: paddle.mul(x, y, x_num_col_dims=1, y_num_col_dims=1, out=None, name=None) + +:alias_main: paddle.mul +:alias: paddle.mul,paddle.tensor.mul,paddle.tensor.math.mul +:update_api: paddle.fluid.layers.mul + + + + +mul算子 +此运算是用于对输入x和y执行矩阵乘法。 +公式是: + +.. math:: + out = x * y + +输入x和y都可以携带LoD(详细程度)信息。但输出仅与输入x共享LoD信息。 + +参数: + - **x** (Variable) - 乘法运算的第一个输入张量Tensor/LoDTensor。 + - **y** (Variable) - 乘法运算的第二个输入张量Tensor/LoDTensor。 + - **x_num_col_dims** (int,可选) - 默认值1, 可以将具有两个以上维度的张量作为输入。如果输入x是具有多于两个维度的张量,则输入x将先展平为二维矩阵。展平规则是:前 ``num_col_dims`` 将被展平成最终矩阵的第一个维度(矩阵的高度),其余的 rank(x) - num_col_dims 维度被展平成最终矩阵的第二个维度(矩阵的宽度)。结果是展平矩阵的高度等于x的前 ``x_num_col_dims`` 维数的乘积,展平矩阵的宽度等于x的最后一个 rank(x)- ``num_col_dims`` 个剩余维度的维数的乘积。例如,假设x是一个5-D张量,形状为(2,3,4,5,6),并且 ``x_num_col_dims`` 的值为3。 则扁平化后的张量具有的形即为(2x3x4,5x6)=(24,30)。 + - **y_num_col_dims** (int,可选) - 默认值1, 可以将具有两个以上维度的张量作为输入。如果输入y是具有多于两个维度的张量,则y将首先展平为二维矩阵。 ``y_num_col_dims`` 属性确定y的展平方式。有关更多详细信息,请参阅 ``x_num_col_dims`` 的注释。 + - **out** (Variable, 可选) - 默认值None,如果out不为空,则矩阵乘法运算结果存储在out变量中。 + - **name** (str,可选) - 默认值None,输出的名称。该参数供开发人员打印调试信息时使用,具体用法参见 :ref:`api_guide_name`。当out和name同时不为空时,结果输出变量名与out保持一致。 + +返回:Variable(Tensor)乘法运算输出张量。 + +返回类型:变量(Variable)。 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + dataX = fluid.data(name="dataX", shape=[2, 5], dtype="float32") + dataY = fluid.data(name="dataY", shape=[5, 3], dtype="float32") + + res = fluid.data(name="output", shape=[2, 3], dtype="float32") + output = paddle.mul(dataX, dataY, + x_num_col_dims = 1, + y_num_col_dims = 1, + out=res) + + diff --git a/doc/fluid/api_cn/tensor_cn/multiplex_cn.rst b/doc/fluid/api_cn/tensor_cn/multiplex_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..dce5624bcfc347807431d8ba8fa7839fa7cc5a36 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/multiplex_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_multiplex: + +multiplex +------------------------------- +:doc_source: paddle.fluid.layers.multiplex + + diff --git a/doc/fluid/api_cn/tensor_cn/mutiply_cn.rst b/doc/fluid/api_cn/tensor_cn/mutiply_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6ae587b7227a313b847e9cca7ba4948cf2335f63 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/mutiply_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_tensor_multiply: + +multiply +-------- + +.. py:function:: paddle.multiply(x, y, name=None) + +该OP是逐元素相除算子,输入 ``x`` 与输入 ``y`` 逐元素相除,并将各个位置的输出元素保存到返回结果中。 +输入 ``x`` 与输入 ``y`` 必须可以广播为相同形状, 关于广播规则,请参考 :ref:`use_guide_broadcasting` + +等式为: + +.. math:: + Out = X * Y + +- :math:`X` :多维Tensor。 +- :math:`Y` :多维Tensor。 + +参数: + - x(Tensor)- 多维Tensor。数据类型为float32 、float64、int32或int64。 + - y(Tensor)- 多维Tensor。数据类型为float32 、float64、int32或int64。 + - name(str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + + +返回: 多维 Tensor, 数据类型与 ``x`` 相同,维度为广播后的形状。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np_x = np.array([2, 3, 4]).astype('float64') + np_y = np.array([1, 5, 2]).astype('float64') + x = paddle.to_tensor(np_x) + y = paddle.to_tensor(np_y) + z = paddle.multiply(x, y) + print(z.numpy()) # [2., 0.6, 2.] diff --git a/doc/fluid/api_cn/tensor_cn/nonzero_cn.rst b/doc/fluid/api_cn/tensor_cn/nonzero_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..79cdb2e56c7c56af7192fe2ece7af56e42b3a6f1 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/nonzero_cn.rst @@ -0,0 +1,72 @@ +.. _cn_api_tensor_search_nonzero: + +nonzero +------------------------------- + +.. py:function:: paddle.nonzero(input, as_tuple=False) + +:alias_main: paddle.nonzero +:alias: paddle.nonzero,paddle.tensor.nonzero,paddle.tensor.search.nonzero + + + +该OP返回输入 ``input`` 中非零元素的坐标。如果输入 ``input`` 有 ``n`` 维,共包含 ``z`` 个非零元素,当 ``as_tuple = False`` 时, +返回结果是一个 ``shape`` 等于 ``[z x n]`` 的 ``Tensor`` , 第 ``i`` 行代表输入中第 ``i`` 个非零元素的坐标;当 ``as_tuple = True`` 时, +返回结果是由 ``n`` 个大小为 ``z`` 的 ``1-D Tensor`` 构成的元组,第 ``i`` 个 ``1-D Tensor`` 记录输入的非零元素在第 ``i`` 维的坐标。 + +**参数**: + - **input** (Variable)– 输入张量。 + - **as_tuple** (bool, optinal) - 返回格式。是否以 ``1-D Tensor`` 构成的元组格式返回。 + +**返回**: + - **Variable** (Tensor or tuple(1-D Tensor)),数据类型为 **INT64** 。 + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + data1 = np.array([[1.0, 0.0, 0.0], + [0.0, 2.0, 0.0], + [0.0, 0.0, 3.0]]) + data2 = np.array([0.0, 1.0, 0.0, 3.0]) + data3 = np.array([0.0, 0.0, 0.0]) + with fluid.dygraph.guard(): + x1 = fluid.dygraph.to_variable(data1) + x2 = fluid.dygraph.to_variable(data2) + x3 = fluid.dygraph.to_variable(data3) + out_z1 = paddle.nonzero(x1) + print(out_z1.numpy()) + #[[0 0] + # [1 1] + # [2 2]] + out_z1_tuple = paddle.nonzero(x1, as_tuple=True) + for out in out_z1_tuple: + print(out.numpy()) + #[[0] + # [1] + # [2]] + #[[0] + # [1] + # [2]] + out_z2 = paddle.nonzero(x2) + print(out_z2.numpy()) + #[[1] + # [3]] + out_z2_tuple = paddle.nonzero(x2, as_tuple=True) + for out in out_z2_tuple: + print(out.numpy()) + #[[1] + # [3]] + out_z3 = paddle.nonzero(x3) + print(out_z3.numpy()) + #[] + out_z3_tuple = paddle.nonzero(x3, as_tuple=True) + for out in out_z3_tuple: + print(out.numpy()) + #[] + + diff --git a/doc/fluid/api_cn/tensor_cn/norm_cn.rst b/doc/fluid/api_cn/tensor_cn/norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ea6ebd82d2aff71ea5164ce22063a5bf5723cdd7 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/norm_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_tensor_norm: + +norm +------------------------------- + +.. py:function:: paddle.norm(input, p='fro', axis=None, keepdim=False, out=None, name=None): + +:alias_main: paddle.norm +:alias: paddle.norm,paddle.tensor.norm,paddle.tensor.linalg.norm + + + +该OP将计算给定Tensor的矩阵范数(Frobenius 范数)和向量范数(向量1范数、2范数、或者通常的p范数). + +参数: + - **input** (Variable) - 输入Tensor。维度为多维,数据类型为float32或float64。 + - **p** (float|string, 可选) - 范数的种类。目前支持的值为 `fro`、 `1`、 `2`,和任何正实数p对应的p范数。 + - **axis** (int|list, 可选) - 使用范数计算的轴。如果 ``axis`` 为int或者只有一个元素的list,``norm`` API会计算输入Tensor的向量范数。如果axis为包含两个元素的list,API会计算输入Tensor的矩阵范数。 当 ``axis < 0`` 时,实际的计算维度为 rank(input) + axis。 + - **keepdim** (bool,可选) - 是否在输出的Tensor中保留和输入一样的维度,默认值为False。当 :attr:`keepdim` 为False时,输出的Tensor会比输入 :attr:`input` 的维度少一些。 + - **out** (Variable,可选) - 指定输出的Tensor,默认值为None。out的数据类型必须与输入 ``input`` 一致。 + - **name** (str|None) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。默认值为None。 + + 返回:在指定axis上进行范数计算的Tensor,与输入input数据类型相同。 + + 返回类型:Variable,与输入input数据类型相同。 + +抛出异常: + - ``TypeError`` - 当输出 ``out`` 和输入 ``input`` 数据类型不一致时候。 + - ``ValueError`` - 当参数 ``p`` 或者 ``axis`` 不合法时。 + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + x = fluid.data(name='x', shape=[2, 3, 5], dtype='float64') + + # compute frobenius norm along last two dimensions. + out_fro = paddle.norm(x, p='fro', axis=[1,2]) + + # compute 2-order vector norm along last dimension. + out_pnorm = paddle.norm(x, p=2, axis=-1) diff --git a/doc/fluid/api_cn/tensor_cn/normal_cn.rst b/doc/fluid/api_cn/tensor_cn/normal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bcefb2f566e339cbc7fa220f33cda0f7e9e21749 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/normal_cn.rst @@ -0,0 +1,48 @@ +.. _cn_api_tensor_normal: + +normal +------------------------------- + +.. py:function:: paddle.normal(mean=0.0, std=1.0, shape=None, name=None) + + +该OP返回符合正态分布(均值为 ``mean`` ,标准差为 ``std`` 的正态随机分布)的随机Tensor。 + +如果 ``mean`` 是Tensor,则输出Tensor和 ``mean`` 具有相同的形状和数据类型。 +如果 ``mean`` 不是Tensor,且 ``std`` 是Tensor,则输出Tensor和 ``std`` 具有相同的形状和数据类型。 +如果 ``mean`` 和 ``std`` 都不是Tensor,则输出Tensor的形状为 ``shape``,数据类型为float32。 + +如果 ``mean`` 和 ``std`` 都是Tensor,则 ``mean`` 和 ``std`` 的元素个数应该相同。 + +参数 +:::::::::: + - mean (float|Tensor, 可选) - 输出Tensor的正态分布的平均值。如果 ``mean`` 是float,则表示输出Tensor中所有元素的正态分布的平均值。如果 ``mean`` 是Tensor(支持的数据类型为float32、float64),则表示输出Tensor中每个元素的正态分布的平均值。默认值为0.0 + - std (float|Tensor, 可选) - 输出Tensor的正态分布的标准差。如果 ``std`` 是float,则表示输出Tensor中所有元素的正态分布的标准差。如果 ``std`` 是Tensor(支持的数据类型为float32、float64),则表示输出Tensor中每个元素的正态分布的标准差。默认值为0.0 + - shape (list|tuple|Tensor, 可选) - 生成的随机Tensor的形状。如果 ``shape`` 是list、tuple,则其中的元素可以是int,或者是形状为[1]且数据类型为int32、int64的Tensor。如果 ``shape`` 是Tensor,则是数据类型为int32、int64的1-D Tensor。如果 ``mean`` 或者 ``std`` 是Tensor,输出Tensor的形状和 ``mean`` 或者 ``std`` 相同(此时 ``shape`` 无效)。默认值为None。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + Tensor:符合正态分布(均值为 ``mean`` ,标准差为 ``std`` 的正态随机分布)的随机Tensor。 + +示例代码 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + out1 = paddle.normal(shape=[2, 3]) + # [[ 0.17501129 0.32364586 1.561118 ] + # [-1.7232178 1.1545963 -0.76156676]] + + mean_tensor = paddle.to_tensor(np.array([1.0, 2.0, 3.0])) + out2 = paddle.normal(mean=mean_tensor) + # [ 0.18644847 -1.19434458 3.93694787] + + std_tensor = paddle.to_tensor(np.array([1.0, 2.0, 3.0])) + out3 = paddle.normal(mean=mean_tensor, std=std_tensor) + # [1.00780561 3.78457445 5.81058198] diff --git a/doc/fluid/api_cn/tensor_cn/not_equal_cn.rst b/doc/fluid/api_cn/tensor_cn/not_equal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8a81f4f08ff2b31d4f434c169be51d3ca3703103 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/not_equal_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_tensor_not_equal: + +not_equal +------------------------------- +.. py:function:: paddle.not_equal(x, y, name=None) + +:alias_main: paddle.not_equal +:alias: paddle.not_equal,paddle.tensor.not_equal,paddle.tensor.logic.not_equal + +该OP返回 :math:`x!=y` 逐元素比较x和y是否相等,相同位置的元素不相同则返回True,否则返回False。使用重载算子 `!=` 可以有相同的计算函数效果 + +**注:该OP输出的结果不返回梯度。** + +参数: + - **x** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64,int32, int64。 + - **y** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64, int32, int64。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + + +返回:输出结果的Tensor,输出Tensor的shape和输入一致,Tensor数据类型为bool。 + +返回类型:变量(Tensor) + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.imperative as imperative + paddle.enable_imperative() + x = imperative.to_variable(np.array([1, 2, 3])) + y = imperative.to_variable(np.array([1, 3, 2])) + result1 = paddle.not_equal(x, y) + print(result1.numpy()) # result1 = [False True True] + + diff --git a/doc/fluid/api_cn/tensor_cn/numel_cn.rst b/doc/fluid/api_cn/tensor_cn/numel_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c22bee48074fc0c538f137f1ab6c5b56d8205864 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/numel_cn.rst @@ -0,0 +1,26 @@ +.. _cn_api_tensor_numel: + +numel +------------------------------- + +.. py:function:: paddle.numel(x) + + +该OP返回一个长度为1并且元素值为输入 ``x`` 元素个数的Tensor。 + +参数: + - **x** (Tensor) - 输入Tensor,数据类型为float16, float32, float64, int32, int64 。 + +返回: 返回长度为1并且元素值为 ``x`` 元素个数的Tensor。 + + +**代码示例**: + +.. code-block:: python + + import paddle + + paddle.disable_static() + x = paddle.full(shape=[4, 5, 7], fill_value=0, dtype='int32') + numel = paddle.numel(x) # 140 + diff --git a/doc/fluid/api_cn/tensor_cn/ones_cn.rst b/doc/fluid/api_cn/tensor_cn/ones_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..05e9ded48eab47f1a2530e8f45f11a727fd71796 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/ones_cn.rst @@ -0,0 +1,46 @@ +.. _cn_api_tensor_ones: + +ones +------------------------------- + +.. py:function:: paddle.ones(shape, dtype=None) + + + +该OP创建形状为 ``shape`` 、数据类型为 ``dtype`` 且值全为1的Tensor。 + +参数: + - **shape** (tuple|list|Tensor) - 输出Tensor的形状, ``shape`` 的数据类型为int32或者int64。 + - **dtype** (np.dtype|str, 可选) - 输出Tensor的数据类型,数据类型必须为bool、 float16、float32、float64、int32或int64。如果 ``dtype`` 为None,默认数据类型为float32。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:值全为1的Tensor,数据类型和 ``dtype`` 定义的类型一致。 + + +抛出异常: + - ``TypeError`` - 当 ``dtype`` 不是bool、 float16、float32、float64、int32、int64和None时。 + - ``TypeError`` - 当 ``shape`` 不是tuple、list、或者Tensor的时, 当 ``shape`` 为Tensor时,其数据类型不是int32或者int64。 + +**代码示例**: + +.. code-block:: python + + import paddle + + paddle.disable_static() + + #default dtype for ones OP + data1 = paddle.ones(shape=[3, 2]) + # [[1. 1.] + # [1. 1.] + # [1. 1.]] + data2 = paddle.ones(shape=[2, 2], dtype='int32') + # [[1 1] + # [1 1]] + + #attr shape is a Variable Tensor + shape = paddle.fill_constant(shape=[2], dtype='int32', value=2) + data3 = paddle.ones(shape=shape, dtype='int32') + # [[1 1] + # [1 1]] + diff --git a/doc/fluid/api_cn/tensor_cn/ones_like_cn.rst b/doc/fluid/api_cn/tensor_cn/ones_like_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..33b189c7aaeebf5cc9f54f9f3d4fca38cb5de752 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/ones_like_cn.rst @@ -0,0 +1,39 @@ +.. _cn_api_tensor_ones_like: + +ones_like +------------------------------- + +.. py:function:: paddle.ones_like(x, dtype=None, name=None) + +:alias_main: paddle.ones_like +:alias: paddle.tensor.ones_like, paddle.tensor.creation.ones_like + +该OP返回一个和 ``x`` 具有相同形状的数值都为1的Tensor,数据类型为 ``dtype`` 或者和 ``x`` 相同。 + +参数 +:::::::::: + - **x** (Tensor) – 输入的Tensor,数据类型可以是bool,float16, float32,float64,int32,int64。输出Tensor的形状和 ``x`` 相同。如果 ``dtype`` 为None,则输出Tensor的数据类型与 ``x`` 相同。 + - **dtype** (str|np.dtype|core.VarDesc.VarType, 可选) - 输出Tensor的数据类型,支持bool,float16, float32,float64,int32,int64。当该参数值为None时, 输出Tensor的数据类型与 ``x`` 相同。默认值为None. + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回 +:::::::::: + Tensor:和 ``x`` 具有相同形状的数值都为1的Tensor,数据类型为 ``dtype`` 或者和 ``x`` 相同。 + +抛出异常 +:::::::::: + - ``TypeError`` - 如果 ``dtype`` 不是bool、float16、float32、float64、int32、int64。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + + x = paddle.imperative.to_variable(np.array([1,2,3], dtype='float32')) + out1 = paddle.ones_like(x) # [1., 1., 1.] + out2 = paddle.ones_like(x, dtype='int32') # [1, 1, 1] diff --git a/doc/fluid/api_cn/tensor_cn/pow_cn.rst b/doc/fluid/api_cn/tensor_cn/pow_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..34052fbc5e659dfcdace901325ab1387963e07a6 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/pow_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_tensor_argmax: + +pow +------------------------------- + +.. py:function:: paddle.pow(x, y, name=None): + + +该OP是指数激活算子: + +.. math:: + out = x^{y} + +参数: + - **x** (Tensor)- 多维 ``Tensor``,数据类型为 ``float32`` 或 ``float64`` 或 ``int32`` 或 ``int64``。 + - **y** (Tensor)- 多维 ``Tensor``,数据类型为 ``float32`` 或 ``float64`` 或 ``int32`` 或 ``int64``。Pow OP的指数因子。默认值:1.0。 + - **name** (str) - 默认值None,输出的名称。该参数供开发人员打印调试信息时使用,具体用法参见 :ref:`api_guide_name`。 + +返回: Tensor,数据类型和input ``x`` 一致。 + +**代码示例:** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + # 示例1: 参数y是个浮点数 + x_data = np.array([1, 2, 3]) + y = 2 + x = paddle.to_tensor(x_data) + res = paddle.pow(x, y) + # print(res.numpy()) # [1 4 9] + + # 示例2: 参数y是个Tensor + y = paddle.fill_constant(shape=[1], value=2, dtype='float32') + res = paddle.pow(x, y) + print(res.numpy()) # [1 4 9] + + \ No newline at end of file diff --git a/doc/fluid/api_cn/tensor_cn/prod_cn.rst b/doc/fluid/api_cn/tensor_cn/prod_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..64058ada576905be353bdb8f97ad50369db56688 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/prod_cn.rst @@ -0,0 +1,69 @@ +.. _cn_api_tensor_cn_prod: + +prod +------------------------------- + +.. py:function:: paddle.prod(x, axis=None, keepdim=False, dtype=None, name=None) + + + +对指定维度上的Tensor元素进行求乘积运算,并输出相应的计算结果。 + +参数: + - **x** (Tensor) - 输入的 `Tensor` ,数据类型为:float32、float64、int32、int64。 + - **axis** (int|list|tuple,可选) - 求乘积运算的维度。如果是None,则计算所有元素的乘积并返回包含单个元素的Tensor,否则该参数必须在 :math:`[-x.ndim, x.ndim)` 范围内。如果 :math:`axis[i] < 0` ,则维度将变为 :math:`x.ndim + axis[i]` ,默认为None。 + - **keepdim** (bool,可选) - 是否在输出 `Tensor` 中保留减小的维度。如 `keepdim` 为True,否则结果张量的维度将比输入张量小,默认值为False。 + - **dtype** (str,可选) - 输出Tensor的数据类型,支持int32、int64、float32、float64。如果指定了该参数,那么在执行操作之前,输入Tensor将被转换为dtype类型. 这对于防止数据类型溢出非常有用。若参数为空,则输出变量的数据类型和输入变量相同,默认为:None。 + - **name** (str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回:指定axis上累乘的结果的Tensor。 + + +**代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + # the axis is a int element + data_x = np.array([[0.2, 0.3, 0.5, 0.9], + [0.1, 0.2, 0.6, 0.7]]).astype(np.float32) + x = paddle.to_tensor(data_x) + out1 = paddle.prod(x) + print(out1.numpy()) + # [0.0002268] + + out2 = paddle.prod(x, -1) + print(out2.numpy()) + # [0.027 0.0084] + + out3 = paddle.prod(x, 0) + print(out3.numpy()) + # [0.02 0.06 0.3 0.63] + print(out3.numpy().dtype) + # float32 + + out4 = paddle.prod(x, 0, keepdim=True) + print(out4.numpy()) + # [[0.02 0.06 0.3 0.63]] + + out5 = paddle.prod(x, 0, dtype='int64') + print(out5.numpy()) + # [0 0 0 0] + print(out5.numpy().dtype) + # int64 + + # the axis is list + data_y = np.array([[[1.0, 2.0], [3.0, 4.0]], + [[5.0, 6.0], [7.0, 8.0]]]) + y = paddle.to_tensor(data_y) + out6 = paddle.prod(y, [0, 1]) + print(out6.numpy()) + # [105. 384.] + + out7 = paddle.prod(y, (1, 2)) + print(out7.numpy()) + # [ 24. 1680.] diff --git a/doc/fluid/api_cn/tensor_cn/rand_cn.rst b/doc/fluid/api_cn/tensor_cn/rand_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..36fc3f1a6fdecf051a212e7099382e75c859702d --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/rand_cn.rst @@ -0,0 +1,59 @@ +.. _cn_api_tensor_random_rand: + +rand +---------------------- + +.. py:function:: paddle.rand(shape, dtype=None, name=None) + +:alias_main: paddle.rand +:alias: paddle.tensor.rand, paddle.tensor.random.rand + + + +该OP返回符合均匀分布的,范围在[0, 1)的Tensor,形状为 ``shape``,数据类型为 ``dtype``。 + +参数 +:::::::::: + - **shape** (list|tuple|Tensor) - 生成的随机Tensor的形状。如果 ``shape`` 是list、tuple,则其中的元素可以是int,或者是形状为[1]且数据类型为int32、int64的Tensor。如果 ``shape`` 是Tensor,则是数据类型为int32、int64的1-D Tensor。 + - **dtype** (str|np.dtype|core.VarDesc.VarType, 可选) - 输出Tensor的数据类型,支持float32、float64。当该参数值为None时, 输出Tensor的数据类型为float32。默认值为None. + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回 +:::::::::: + Tensor: 符合均匀分布的范围为[0, 1)的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 + +抛出异常 +:::::::::: + - ``TypeError`` - 如果 ``shape`` 的类型不是list、tuple、Tensor。 + - ``TypeError`` - 如果 ``dtype`` 不是float32、float64。 + +示例代码 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + # example 1: attr shape is a list which doesn't contain Tensor. + result_1 = paddle.rand(shape=[2, 3]) + # [[0.451152 , 0.55825245, 0.403311 ], + # [0.22550228, 0.22106001, 0.7877319 ]] + + # example 2: attr shape is a list which contains Tensor. + dim_1 = paddle.fill_constant([1], "int64", 2) + dim_2 = paddle.fill_constant([1], "int32", 3) + result_2 = paddle.rand(shape=[dim_1, dim_2, 2]) + # [[[0.8879919 0.25788337] + # [0.28826773 0.9712097 ] + # [0.26438272 0.01796806]] + # [[0.33633623 0.28654453] + # [0.79109055 0.7305809 ] + # [0.870881 0.2984597 ]]] + + # example 3: attr shape is a Tensor, the data type must be int64 or int32. + var_shape = paddle.imperative.to_variable(np.array([2, 3])) + result_3 = paddle.rand(var_shape) + # [[0.22920267 0.841956 0.05981819] + # [0.4836288 0.24573246 0.7516129 ]] diff --git a/doc/fluid/api_cn/tensor_cn/randint_cn.rst b/doc/fluid/api_cn/tensor_cn/randint_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e2ef78ff5d2294b795bce9e136f039a5270a15dd --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/randint_cn.rst @@ -0,0 +1,72 @@ +.. _cn_api_tensor_randint: + +randint +------------------------------- + +.. py:function:: paddle.randint(low=0, high=None, shape=[1], dtype=None, name=None) + +:alias_main: paddle.randint +:alias: paddle.tensor.randint, paddle.tensor.random.randint + + + +该OP返回服从均匀分布的、范围在[``low``, ``high``)的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。当 ``high`` 为None时(默认),均匀采样的区间为[0, ``low``)。 + +参数 +:::::::::: + - **low** (int) - 要生成的随机值范围的下限,``low`` 包含在范围中。当 ``high`` 为None时,均匀采样的区间为[0, ``low``)。默认值为0。 + - **high** (int, 可选) - 要生成的随机值范围的上限,``high`` 不包含在范围中。默认值为None,此时范围是[0, ``low``)。 + - **shape** (list|tuple|Tensor) - 生成的随机Tensor的形状。如果 ``shape`` 是list、tuple,则其中的元素可以是int,或者是形状为[1]且数据类型为int32、int64的Tensor。如果 ``shape`` 是Tensor,则是数据类型为int32、int64的1-D Tensor。。默认值为[1]。 + - **dtype** (str|np.dtype|core.VarDesc.VarType, 可选) - 输出Tensor的数据类型,支持int32、int64。当该参数值为None时, 输出Tensor的数据类型为int64。默认值为None. + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回 +:::::::::: + Tensor:从区间[``low``,``high``)内均匀分布采样的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 + +抛出异常 +:::::::::: + - ``TypeError`` - 如果 ``shape`` 的类型不是list、tuple、Tensor。 + - ``TypeError`` - 如果 ``dtype`` 不是int32、int64。 + - ``ValueError`` - 如果 ``high`` 不大于 ``low``;或者 ``high`` 为None,且 ``low`` 不大于0。 + +代码示例 +::::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + + # example 1: + # attr shape is a list which doesn't contain Tensor. + result_1 = paddle.randint(low=-5, high=5, shape=[3]) + # [0, -3, 2] + + # example 2: + # attr shape is a list which contains Tensor. + dim_1 = paddle.fill_constant([1], "int64", 2) + dim_2 = paddle.fill_constant([1], "int32", 3) + result_2 = paddle.randint(low=-5, high=5, shape=[dim_1, dim_2], dtype="int32") + print(result_2.numpy()) + # [[ 0, -1, -3], + # [ 4, -2, 0]] + + # example 3: + # attr shape is a Tensor + var_shape = paddle.imperative.to_variable(np.array([3])) + result_3 = paddle.randint(low=-5, high=5, shape=var_shape) + # [-2, 2, 3] + + # example 4: + # date type is int32 + result_4 = paddle.randint(low=-5, high=5, shape=[3], dtype='int32') + # [-5, 4, -4] + + # example 5: + # Input only one parameter + # low=0, high=10, shape=[1], dtype='int64' + result_5 = paddle.randint(10) + # [7] diff --git a/doc/fluid/api_cn/tensor_cn/randn_cn.rst b/doc/fluid/api_cn/tensor_cn/randn_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e465b0c75eb5dbb06386abfc5d6f41746b6e967b --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/randn_cn.rst @@ -0,0 +1,60 @@ +.. _cn_api_tensor_random_randn: + +randn +------------------------------- + +.. py:function:: paddle.randn(shape, dtype=None, name=None) + +:alias_main: paddle.randn +:alias: paddle.tensor.randn, paddle.tensor.random.randn + + + +该OP返回符合标准正态分布(均值为0,标准差为1的正态随机分布)的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 + +参数 +:::::::::: + - **shape** (list|tuple|Tensor) - 生成的随机Tensor的形状。如果 ``shape`` 是list、tuple,则其中的元素可以是int,或者是形状为[1]且数据类型为int32、int64的Tensor。如果 ``shape`` 是Tensor,则是数据类型为int32、int64的1-D Tensor。 + - **dtype** (str|np.dtype|core.VarDesc.VarType, 可选) - 输出Tensor的数据类型,支持float32、float64。当该参数值为None时, 输出Tensor的数据类型为float32。默认值为None. + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回 +:::::::::: + Tensor:符合标准正态分布的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 + +抛出异常 +:::::::::: + - ``TypeError`` - 如果 ``shape`` 的类型不是list、tuple、Tensor。 + - ``TypeError`` - 如果 ``dtype`` 不是float32、float64。 + +示例代码 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + + # example 1: attr shape is a list which doesn't contain Tensor. + result_1 = paddle.randn(shape=[2, 3]) + # [[-2.923464 0.11934398 -0.51249987] + # [ 0.39632758 0.08177969 0.2692008 ]] + + # example 2: attr shape is a list which contains Tensor. + dim_1 = paddle.fill_constant([1], "int64", 2) + dim_2 = paddle.fill_constant([1], "int32", 3) + result_2 = paddle.randn(shape=[dim_1, dim_2, 2]) + # [[[-2.8852394 -0.25898588] + # [-0.47420555 0.17683524] + # [-0.7989969 0.00754541]] + # [[ 0.85201347 0.32320443] + # [ 1.1399018 0.48336947] + # [ 0.8086993 0.6868893 ]]] + + # example 3: attr shape is a Tensor, the data type must be int64 or int32. + var_shape = paddle.imperative.to_variable(np.array([2, 3])) + result_3 = paddle.randn(var_shape) + # [[-2.878077 0.17099959 0.05111201] + # [-0.3761474 -1.044801 1.1870178 ]] diff --git a/doc/fluid/api_cn/tensor_cn/randperm_cn.rst b/doc/fluid/api_cn/tensor_cn/randperm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d3c756a0fc9ded21c9b78d95dd532b9ba4aa26e9 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/randperm_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_tensor_random_randperm: + +randperm +------------------------------- + +.. py:function:: paddle.randperm(n, dtype="int64", name=None) + +:alias_main: paddle.randperm +:alias: paddle.tensor.randperm, paddle.tensor.random.randperm + +该OP返回一个数值在0到n-1、随机排列的1-D Tensor,数据类型为 ``dtype``。 + +参数: +:::::::::: + - **n** (int) - 随机序列的上限(不包括在序列中),应该大于0。 + - **dtype** (str|np.dtype|core.VarDesc.VarType, 可选) - 输出Tensor的数据类型,支持int32、int64、float32、float64。默认值为"int64". + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回 +:::::::::: + Tensor:一个数值在0到n-1、随机排列的1-D Tensor,数据类型为 ``dtype`` 。 + +抛出异常 +:::::::::: + - ValueError - 如果 ``n`` 不大于0. + - TypeError - 如果 ``dtype`` 不是int32、int64、float32、float64. + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + + paddle.enable_imperative() + + result_1 = paddle.randperm(5) + # [4 1 2 3 0] + + result_2 = paddle.randperm(7, 'int32') + # [1 6 2 0 4 3 5] diff --git a/doc/fluid/api_cn/tensor_cn/rank_cn.rst b/doc/fluid/api_cn/tensor_cn/rank_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..26afba8595a166109ecda36fa31178edbdc90eab --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/rank_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_rank: + +rank +------------------------------- +:doc_source: paddle.fluid.layers.rank + + diff --git a/doc/fluid/api_cn/tensor_cn/reciprocal_cn.rst b/doc/fluid/api_cn/tensor_cn/reciprocal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a8a2e24a93a3ad3be62ce060e3c59e23793d1cff --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/reciprocal_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_reciprocal: + +reciprocal +------------------------------- +:doc_source: paddle.fluid.layers.reciprocal + + diff --git a/doc/fluid/api_cn/tensor_cn/reduce_all_cn.rst b/doc/fluid/api_cn/tensor_cn/reduce_all_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..07706d214831043e9839567eab6523cbdca8a99f --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/reduce_all_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_reduce_all: + +reduce_all +------------------------------- +:doc_source: paddle.fluid.layers.reduce_all + + diff --git a/doc/fluid/api_cn/tensor_cn/reduce_any_cn.rst b/doc/fluid/api_cn/tensor_cn/reduce_any_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..436fad60d8a46f835c0e82fbc00bc06ea8b3aad0 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/reduce_any_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_reduce_any: + +reduce_any +------------------------------- +:doc_source: paddle.fluid.layers.reduce_any + + diff --git a/doc/fluid/api_cn/tensor_cn/reduce_max_cn.rst b/doc/fluid/api_cn/tensor_cn/reduce_max_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3d46442896e49fdcb6f7ab488cb55da22d4e706e --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/reduce_max_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_reduce_max: + +reduce_max +------------------------------- +:doc_source: paddle.fluid.layers.reduce_max + + diff --git a/doc/fluid/api_cn/tensor_cn/reduce_mean_cn.rst b/doc/fluid/api_cn/tensor_cn/reduce_mean_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d94ec2323b1691d50402b97acf4e20521d7e533f --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/reduce_mean_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_reduce_mean: + +reduce_mean +------------------------------- +:doc_source: paddle.fluid.layers.reduce_mean + + diff --git a/doc/fluid/api_cn/tensor_cn/reduce_min_cn.rst b/doc/fluid/api_cn/tensor_cn/reduce_min_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..89f8b08f8062a545b5a745fdde3ea8085c189b7f --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/reduce_min_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_reduce_min: + +reduce_min +------------------------------- +:doc_source: paddle.fluid.layers.reduce_min + + diff --git a/doc/fluid/api_cn/tensor_cn/reduce_prod_cn.rst b/doc/fluid/api_cn/tensor_cn/reduce_prod_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f0ffde3fc18f6ab958c4020cb8400ae6f16a1de4 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/reduce_prod_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_reduce_prod: + +reduce_prod +------------------------------- +:doc_source: paddle.fluid.layers.reduce_prod + + diff --git a/doc/fluid/api_cn/tensor_cn/reduce_sum_cn.rst b/doc/fluid/api_cn/tensor_cn/reduce_sum_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..657c15cb385cc0d46ed69ef9fc80c216638960cc --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/reduce_sum_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_reduce_sum: + +reduce_sum +------------------------------- +:doc_source: paddle.fluid.layers.reduce_sum + + diff --git a/doc/fluid/api_cn/tensor_cn/remainder_cn.rst b/doc/fluid/api_cn/tensor_cn/remainder_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ccf124eb8a344c65e216f368d8b748c20c484a0f --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/remainder_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_tensor_remainder: + +remainder +------------------------------- + +.. py:function:: paddle.remainder(x, y, name=None) + +该OP是逐元素取模算子,输入 ``x`` 与输入 ``y`` 逐元素取模,并将各个位置的输出元素保存到返回结果中。 +输入 ``x`` 与输入 ``y`` 必须可以广播为相同形状, 关于广播规则,请参考 :ref:`use_guide_broadcasting` + +等式为: + +.. math:: + Out = X \% Y + +- :math:`X` :多维Tensor。 +- :math:`Y` :多维Tensor。 + +参数: + - x(Tensor)- 多维Tensor。数据类型为float32 、float64、int32或int64。 + - y(Tensor)- 多维Tensor。数据类型为float32 、float64、int32或int64。 + - name(str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + + +返回: 多维 Tensor, 数据类型与 ``x`` 相同,维度为广播后的形状。 + +返回类型: Tensor + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + np_x = np.array([2, 3, 8, 7]) + np_y = np.array([1, 5, 3, 3]) + x = paddle.to_tensor(np_x) + y = paddle.to_tensor(np_y) + z = paddle.remainder(x, y) + print(z.numpy()) # [0, 3, 2, 1] \ No newline at end of file diff --git a/doc/fluid/api_cn/tensor_cn/reshape_cn.rst b/doc/fluid/api_cn/tensor_cn/reshape_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..08a2f9a6dff3b5fdb542cc278e346173482167ac --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/reshape_cn.rst @@ -0,0 +1,63 @@ +.. _cn_api_tensor_cn_reshape: + +reshape +------------------------------- + +.. py:function:: paddle.reshape(x, shape, name=None) + +:alias_main: paddle.reshape +:alias: paddle.reshape,paddle.tensor.reshape,paddle.tensor.manipulation.reshape + + +该OP在保持输入 ``x`` 数据不变的情况下,改变 ``x`` 的形状。 + +在指定目标shape时存在一些技巧: + +.. code-block:: text + + 1. -1 表示这个维度的值是从x的元素总数和剩余维度推断出来的。因此,有且只有一个维度可以被设置为-1。 + 2. 0 表示实际的维数是从x的对应维数中复制出来的,因此shape中0的索引值不能超过x的维度。 + + +这里有一些例子来解释它们: + +.. code-block:: text + + 1. 给定一个形状为[2,4,6]的三维张量x,目标形状为[6,8],则将x变换为形状为[6,8]的2-D张量,且x的数据保持不变。 + 2. 给定一个形状为[2,4,6]的三维张量x,目标形状为[2,3,-1,2],则将x变换为形状为[2,3,4,2]的4-D张量,且x的数据保持不变。在这种情况下,目标形状的一个维度被设置为-1,这个维度的值是从x的元素总数和剩余维度推断出来的。 + 3. 给定一个形状为[2,4,6]的三维张量x,目标形状为[-1,0,3,2],则将x变换为形状为[2,4,3,2]的4-D张量,且x的数据保持不变。在这种情况下,0对应位置的维度值将从x的对应维数中复制,-1对应位置的维度值由x的元素总数和剩余维度推断出来。 + + +参数: + - **x** (Tensor)- 多维 ``Tensor``,数据类型为 ``float32``,``float64``,``int32``,或 ``int64``。 + - **shape** (list|tuple|Tensor)- 数据类型是 ``int32`` 。定义目标形状。目标形状最多只能有一个维度为-1。如果 ``shape`` 的类型是 list 或 tuple, 它的元素可以是整数或者形状为[1]的 ``Tensor`` 或 ``LoDTensor``。如果 ``shape`` 的类型是 ``Tensor``,则是1-D的 ``Tensor`` 或 ``LoDTensor``。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值: ``None``。 + +返回: +::::::::: +``Tensor``, 改变形状后的 ``Tensor``,数据类型与 ``x`` 相同。 + + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + + paddle.disable_static() + + data = np.random.random([2, 4, 6]).astype("float32") + x = paddle.to_tensor(data) + + positive_four = paddle.fill_constant([1], "int32", 4) + + out_1 = paddle.reshape(x, [-1, 0, 3, 2]) + # the shape of out_1 is [2,4,3,2]. + + out_2 = paddle.reshape(x, shape=[positive_four, 12]) + # the shape of out_2 is [4, 12]. + + shape_tensor = paddle.to_tensor(np.array([8, 6]).astype("int32")) + out_3 = paddle.reshape(x, shape=shape_tensor) + # the shape of out_2 is [8, 6]. diff --git a/doc/fluid/api_cn/tensor_cn/reverse_cn.rst b/doc/fluid/api_cn/tensor_cn/reverse_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7ccc107b12747768af593c1be064ec0bc8f4795e --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/reverse_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_reverse: + +reverse +------------------------------- +:doc_source: paddle.fluid.layers.reverse + + diff --git a/doc/fluid/api_cn/tensor_cn/roll_cn.rst b/doc/fluid/api_cn/tensor_cn/roll_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2b5f2685ef1fbeeeda52030f99065e83c76723c5 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/roll_cn.rst @@ -0,0 +1,47 @@ +.. _cn_api_tensor_manipulation_roll: + +roll +------------------------------- + +.. py:function:: paddle.roll(x, shifts, axis=None, name=None): + +:alias_main: paddle.roll +:alias: paddle.roll, paddle.tensor.roll, paddle.tensor.manipulation.roll + + + +该OP沿着指定维度 ``axis`` 对输入 ``x`` 进行循环滚动,当元素移动到最后位置时,会从第一个位置重新插入。如果 ``axis`` 为 ``None`` ,则输入在被循环滚动之前,会先展平成 ``1-D Tensor`` ,滚动操作完成后恢复成原来的形状。 + +**参数**: + - **x** (Variable)– 输入张量。 + - **shifts** (int|list|tuple) - 滚动位移。如果 ``shifts`` 是一个元组或者列表,则 ``axis`` 必须是相同大小的元组或者列表,输入张量将依次沿着每个维度滚动相应的数值。 + - **axis** (int|list|tuple, optinal) – 滚动轴。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +**返回**: + - **Variable**,数据类型同输入。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + + data = np.array([[1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + [7.0, 8.0, 9.0]]) + paddle.enable_imperative() + x = paddle.imperative.to_variable(data) + out_z1 = paddle.roll(x, shifts=1) + print(out_z1.numpy()) + #[[9. 1. 2.] + # [3. 4. 5.] + # [6. 7. 8.]] + out_z2 = paddle.roll(x, shifts=1, axis=0) + print(out_z2.numpy()) + #[[7. 8. 9.] + # [1. 2. 3.] + # [4. 5. 6.]] + + diff --git a/doc/fluid/api_cn/tensor_cn/round_cn.rst b/doc/fluid/api_cn/tensor_cn/round_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0e5eed214ae6a74521e030803a36525c21e2820b --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/round_cn.rst @@ -0,0 +1,31 @@ +.. _cn_api_tensor_cn_round: + +round +------------------------------- + +.. py:function:: paddle.round(x, name=None) + + + +该OP将输入中的数值四舍五入到最接近的整数数值。 + +参数: + - **x** (Tensor) - 输入的 `Tensor` ,数据类型为: float16, float32, float64。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回: + - Tensor,对输入x四舍五入后的Tensor,形状、数据类型与输入x一致。 + + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + paddle.disable_static() + x_data = np.array([-0.5, -0.2, 0.6, 1.5]) + x = paddle.to_tensor(x_data) + out = paddle.round(x) + print(out.numpy()) + # [-1. -0. 1. 2.] diff --git a/doc/fluid/api_cn/tensor_cn/rsqrt_cn.rst b/doc/fluid/api_cn/tensor_cn/rsqrt_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..98bb2483cb9055a8c1010eeec753286902ce4ab5 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/rsqrt_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_tensor_cn_rsqrt: + +rsqrt +------------------------------- + +.. py:function:: paddle.rsqrt(x, name=None) + + + + +该OP为rsqrt激活函数。 + +注:输入x应确保为非 **0** 值,否则程序会抛异常退出。 + +其运算公式如下: + +.. math:: + out = \frac{1}{\sqrt{x}} + + +参数: + - **x** (Tensor) - 输入的 `Tensor` ,数据类型为:float32、float64。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回: + - Tensor,对输入x进行rsqrt激活后的Tensor,形状、数据类型与输入x一致。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + paddle.disable_static() + x_data = np.array([0.1, 0.2, 0.3, 0.4]) + x = paddle.to_tensor(x_data) + out = paddle.rsqrt(x) + print(out.numpy()) + # [3.16227766 2.23606798 1.82574186 1.58113883] + diff --git a/doc/fluid/api_cn/tensor_cn/save_cn.rst b/doc/fluid/api_cn/tensor_cn/save_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..88752be5c7e38af0a5611a62542edfdc8f2aebf3 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/save_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_save: + +save +------------------------------- +:doc_source: paddle.fluid.save + + diff --git a/doc/fluid/api_cn/tensor_cn/scale_cn.rst b/doc/fluid/api_cn/tensor_cn/scale_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d4537eda340ec9b2dc7dadfb2d3f34a22affe8a2 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/scale_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_scale: + +scale +------------------------------- +:doc_source: paddle.fluid.layers.scale + + diff --git a/doc/fluid/api_cn/tensor_cn/scatter_cn.rst b/doc/fluid/api_cn/tensor_cn/scatter_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..43a9aad6d7d9f4d06a6cd3f544ed13af6428c1bd --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/scatter_cn.rst @@ -0,0 +1,78 @@ +.. _cn_api_paddle_cn_scatter: + +scatter +------------------------------- +.. py:function:: paddle.scatter(x, index, updates, overwrite=True, name=None) + + +通过基于 ``updates`` 来更新选定索引 ``index`` 上的输入来获得输出。具体行为如下: + + .. code-block:: python + + import numpy as np + #input: + x = np.array([[1, 1], [2, 2], [3, 3]]) + index = np.array([2, 1, 0, 1]) + # shape of updates should be the same as x + # shape of updates with dim > 1 should be the same as input + updates = np.array([[1, 1], [2, 2], [3, 3], [4, 4]]) + overwrite = False + # calculation: + if not overwrite: + for i in range(len(index)): + x[index[i]] = np.zeros((2)) + for i in range(len(index)): + if (overwrite): + x[index[i]] = updates[i] + else: + x[index[i]] += updates[i] + # output: + out = np.array([[3, 3], [6, 6], [1, 1]]) + out.shape # [3, 2] + +**Notice:** +因为 ``updates`` 的应用顺序是不确定的,因此,如果索引 ``index`` 包含重复项,则输出将具有不确定性。 + + +参数: + - **x** (Tensor) - ndim> = 1的输入N-D张量。 数据类型可以是float32,float64。 + - **index** (Tensor)- 一维Tensor。 数据类型可以是int32,int64。 ``index`` 的长度不能超过 ``updates`` 的长度,并且 ``index`` 中的值不能超过输入的长度。 + - **updates** (Tensor)- 根据 ``index`` 使用 ``update`` 参数更新输入 ``x`` 。 形状应与输入 ``x`` 相同,并且dim>1的dim值应与输入 ``x`` 相同。 + - **overwrite** (bool,可选)- 指定索引 ``index`` 相同时,更新输出的方式。如果为True,则使用覆盖模式更新相同索引的输出,如果为False,则使用累加模式更新相同索引的输出。默认值为True。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:Tensor,与x有相同形状和数据类型。 + + +**代码示例:** + .. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + x_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float32) + index_data = np.array([2, 1, 0, 1]).astype(np.int64) + updates_data = np.array([[1, 1], [2, 2], [3, 3], [4, 4]]).astype(np.float32) + + x = paddle.to_tensor(x_data) + index = paddle.to_tensor(index_data) + updates = paddle.to_tensor(updates_data) + + output1 = paddle.scatter(x, index, updates, overwrite=False) + # [[3., 3.], + # [6., 6.], + # [1., 1.]] + output2 = paddle.scatter(x, index, updates, overwrite=True) + # CPU device: + # [[3., 3.], + # [4., 4.], + # [1., 1.]] + # GPU device maybe have two results because of the repeated numbers in index + # result 1: + # [[3., 3.], + # [4., 4.], + # [1., 1.]] + # result 2: + # [[3., 3.], + # [2., 2.], + # [1., 1.]] diff --git a/doc/fluid/api_cn/tensor_cn/scatter_nd_add_cn.rst b/doc/fluid/api_cn/tensor_cn/scatter_nd_add_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cb60233684857fcf28f9cb7693c0dc11973b1693 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/scatter_nd_add_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_scatter_nd_add: + +scatter_nd_add +------------------------------- +:doc_source: paddle.fluid.layers.scatter_nd_add + + diff --git a/doc/fluid/api_cn/tensor_cn/scatter_nd_cn.rst b/doc/fluid/api_cn/tensor_cn/scatter_nd_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b256aa9c17aa8e1ac8f61f04044f2e93190ecd1a --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/scatter_nd_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_scatter_nd: + +scatter_nd +------------------------------- +:doc_source: paddle.fluid.layers.scatter_nd + + diff --git a/doc/fluid/api_cn/tensor_cn/shape_cn.rst b/doc/fluid/api_cn/tensor_cn/shape_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..82d6ac2f1d5cb28c211a4c66e929a3f04203857a --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/shape_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_shape: + +shape +------------------------------- +:doc_source: paddle.fluid.layers.shape + + diff --git a/doc/fluid/api_cn/tensor_cn/shard_index_cn.rst b/doc/fluid/api_cn/tensor_cn/shard_index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..62256a197bcc4d8b9c2ea7274c31565f3b81db14 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/shard_index_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_shard_index: + +shard_index +------------------------------- +:doc_source: paddle.fluid.layers.shard_index + + diff --git a/doc/fluid/api_cn/tensor_cn/shuffle_cn.rst b/doc/fluid/api_cn/tensor_cn/shuffle_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8556ac592a86ac2ea13812fb07a5abab77c1995c --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/shuffle_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_shuffle: + +shuffle +------------------------------- +:doc_source: paddle.fluid.io.shuffle + + diff --git a/doc/fluid/api_cn/tensor_cn/sign_cn.rst b/doc/fluid/api_cn/tensor_cn/sign_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e1443fc2dfa3aedf65a2997f6b10bd91593d6325 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/sign_cn.rst @@ -0,0 +1,30 @@ +.. _cn_api_tensor_sign: + +sign +------------------------------- + +.. py:function:: paddle.sign(x, name=None) + +此OP对输入x中每个元素进行正负判断,并且输出正负判断值:1代表正,-1代表负,0代表零。 + +参数: + - **x** (Tensor) – 进行正负值判断的多维Tensor,数据类型为 float16, float32或float64。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:输出正负号Tensor,数据的shape大小及数据类型和输入 ``x`` 一致。 + +返回类型:Tensor + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + + data = np.array([3.0, 0.0, -2.0, 1.7], dtype='float32') + paddle.disable_static() + x = paddle.to_tensor(data) + out = paddle.sign(x=x) + print(out) # [1.0, 0.0, -1.0, 1.0] + diff --git a/doc/fluid/api_cn/tensor_cn/sin_cn.rst b/doc/fluid/api_cn/tensor_cn/sin_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..36f6255bb9a046f97b4463ab17ac18a0b439d4aa --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/sin_cn.rst @@ -0,0 +1,34 @@ +.. _cn_api_tensor_sin: + +sin +------------------------------- + +.. py:function:: paddle.sin(x, name=None) + + + +计算输入的正弦值。 + +.. math:: + out = sin(x) + +参数: + - **x** (Tensor) - 输入的 `Tensor` ,数据类型为: float16, float32, float64。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回: + - Tensor,对输入x计算sin值后的Tensor,形状、数据类型同输入x一致。 + + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + paddle.disable_static() + x_data = np.array([-0.4, -0.2, 0.1, 0.3]) + x = paddle.to_tensor(x_data) + out = paddle.sin(x) + print(out.numpy()) + # [-0.38941834 -0.19866933 0.09983342 0.29552021] diff --git a/doc/fluid/api_cn/tensor_cn/slice_cn.rst b/doc/fluid/api_cn/tensor_cn/slice_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4b43fa4b148dcda6adb05574d1564ed24924d53e --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/slice_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_slice: + +slice +------------------------------- +:doc_source: paddle.fluid.layers.slice + + diff --git a/doc/fluid/api_cn/tensor_cn/sort_cn.rst b/doc/fluid/api_cn/tensor_cn/sort_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..dc791485e0327e588091e628b3efdb8e3b04fe00 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/sort_cn.rst @@ -0,0 +1,63 @@ +.. _cn_api_tensor_sort: + +sort +------------------------------- + +.. py:function:: paddle.sort(x, axis=-1, descending=False, name=None) + +:alias_main: paddle.sort +:alias: paddle.sort,paddle.tensor.sort,paddle.tensor.search.sort + + +对输入变量沿给定轴进行排序,输出排序好的数据,其维度和输入相同。默认升序排列,如果需要降序排列设置 ``descending=True`` 。 + + +参数: + - **x** (Tensor) - 输入的多维 ``Tensor`` ,支持的数据类型:float32、float64、int16、int32、int64、uint8。 + - **axis** (int,可选) - 指定对输入Tensor进行运算的轴, ``axis`` 的有效范围是[-R, R),R是输入 ``x`` 的Rank, ``axis`` 为负时与 ``axis`` +R 等价。默认值为0。 + - **descending** (bool,可选) - 指定算法排序的方向。如果设置为True,算法按照降序排序。如果设置为False或者不设置,按照升序排序。默认值为False。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:Tensor, 排序后的输出(与 ``x`` 维度相同、数据类型相同)。 + + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.imperative as imperative + import numpy as np + + paddle.enable_imperative() + input_array = np.array([[[5,8,9,5], + [0,0,1,7], + [6,9,2,4]], + [[5,2,4,2], + [4,7,7,9], + [1,7,0,6]]]).astype(np.float32) + x = imperative.to_variable(input_array) + out1 = paddle.sort(x=x, axis=-1) + out2 = paddle.sort(x=x, axis=0) + out3 = paddle.sort(x=x, axis=1) + print(out1.numpy()) + #[[[5. 5. 8. 9.] + # [0. 0. 1. 7.] + # [2. 4. 6. 9.]] + # [[2. 2. 4. 5.] + # [4. 7. 7. 9.] + # [0. 1. 6. 7.]]] + print(out2.numpy()) + #[[[5. 2. 4. 2.] + # [0. 0. 1. 7.] + # [1. 7. 0. 4.]] + # [[5. 8. 9. 5.] + # [4. 7. 7. 9.] + # [6. 9. 2. 6.]]] + print(out3.numpy()) + #[[[0. 0. 1. 4.] + # [5. 8. 2. 5.] + # [6. 9. 9. 7.]] + # [[1. 2. 0. 2.] + # [4. 7. 4. 6.] + # [5. 7. 7. 9.]]] diff --git a/doc/fluid/api_cn/tensor_cn/split_cn.rst b/doc/fluid/api_cn/tensor_cn/split_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d7421732749cd2cb0892d98371d2ca6d0da50540 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/split_cn.rst @@ -0,0 +1,52 @@ +.. _cn_api_paddle_tensor_split +split +------------------------------- + +.. py:function:: paddle.tensor.split(x, num_or_sections, axis=0, name=None) + + + +该OP将输入Tensor分割成多个子Tensor。 + +**参数**: + - **x** (Tensor) - 输入变量,数据类型为bool, float16, float32,float64,int32,int64的多维Tensor。 + - **num_or_sections** (int|list|tuple) - 如果 ``num_or_sections`` 是一个整数,则表示Tensor平均划分为相同大小子Tensor的数量。如果 ``num_or_sections`` 是一个list或tuple,那么它的长度代表子Tensor的数量,它的元素可以是整数或者形状为[1]的Tensor,依次代表子Tensor需要分割成的维度的大小。list或tuple的长度不能超过输入Tensor待分割的维度的大小。在list或tuple中,至多有一个元素值为-1,表示该值是由 ``x`` 的维度和其他 ``num_or_sections`` 中元素推断出来的。例如对一个维度为[4,6,6]Tensor的第三维进行分割时,指定 ``num_or_sections=[2,-1,1]`` ,输出的三个Tensor维度分别为:[4,6,2],[4,6,3],[4,6,1]。 + - **axis** (int|Tensor,可选) - 整数或者形状为[1]的Tensor,数据类型为int32或int64。表示需要分割的维度。如果 ``axis < 0`` ,则划分的维度为 ``rank(x) + axis`` 。默认值为0。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:分割后的Tensor列表。 + + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + + paddle.disable_static() + # x is a Tensor which shape is [3, 9, 5] + x_np = np.random.random([3, 9, 5]).astype("int32") + x = paddle.to_tensor(x_np) + + out0, out1, out22 = paddle.split(x, num_or_sections=3, axis=1) + # out0.shape [3, 3, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 3, 5] + + out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, 4], axis=1) + # out0.shape [3, 2, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 4, 5] + + out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, -1], axis=1) + # out0.shape [3, 2, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 4, 5] + + # axis is negative, the real axis is (rank(x) + axis) which real + # value is 1. + out0, out1, out2 = paddle.split(x, num_or_sections=3, axis=-2) + # out0.shape [3, 3, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 3, 5] diff --git a/doc/fluid/api_cn/tensor_cn/sqrt_cn.rst b/doc/fluid/api_cn/tensor_cn/sqrt_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..18067626a15099296f4580d110863fc7f639f907 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/sqrt_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_tensor_sqrt: + +sqrt +------------------------------- + +.. py:function:: paddle.sqrt(x, name=None) + +:alias_main: paddle.sqrt +:alias: paddle.sqrt,paddle.tensor.sqrt,paddle.tensor.math.sqrt +:update_api: paddle.fluid.layers.sqrt + + + +计算输入的算数平方根。 + +.. math:: + out=\sqrt x=x^{1/2} + +.. note:: + 请确保输入中的数值是非负数。 + +参数: + + - **x** (Tensor) - 支持任意维度的Tensor。数据类型为float32,float64或float16。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:返回类型为Tensor, 数据类型同输入一致。 + +**代码示例**: + +.. code-block:: python + + import paddle + paddle.disable_static() + x = paddle.to_tensor([0.1, 0.2, 0.3, 0.4]) + out = paddle.sqrt(x) + print(out.numpy()) + # [0.31622777 0.4472136 0.54772256 0.63245553] diff --git a/doc/fluid/api_cn/tensor_cn/square_cn.rst b/doc/fluid/api_cn/tensor_cn/square_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..be30b04f93f3d5f874d7a79abb3d7182d18f8abe --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/square_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_tensor_cn_square: + +square +------------------------------- + +.. py:function:: paddle.square(x,name=None) + + + + +该OP执行逐元素取平方运算。 + +.. math:: + out = x^2 + +参数: + - **x** (Tensor) - 输入的 `Tensor` ,数据类型为:float32、float64, float16, int32, int64。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回: + - Tensor,对输入x取平方后的Tensor,形状、数据类型与输入x一致。 + + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + paddle.disable_static() + x_data = np.array([-0.4, -0.2, 0.1, 0.3]) + x = paddle.to_tensor(x_data) + out = paddle.square(x) + print(out.numpy()) + # [0.16 0.04 0.01 0.09] diff --git a/doc/fluid/api_cn/tensor_cn/squeeze_cn.rst b/doc/fluid/api_cn/tensor_cn/squeeze_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a188e8c431546227dd7246d7684331271dcb185d --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/squeeze_cn.rst @@ -0,0 +1,63 @@ +.. _cn_api_paddle_tensor_squeeze +squeeze +------------------------------- + +.. py:function:: paddle.tensor.squeeze(x, axis=None, name=None) + + +该OP会删除输入Tensor的Shape中尺寸为1的维度。如果指定了axis,则会删除axis中指定的尺寸为1的维度。如果没有指定axis,那么所有等于1的维度都会被删除。 + +.. code-block:: text + + Case 1: + + Input: + x.shape = [1, 3, 1, 5] # If axis is not provided, all dims equal of size 1 will be removed. + axis = None + Output: + out.shape = [3, 5] + + Case 2: + + Input: + x.shape = [1, 3, 1, 5] # If axis is provided, it will remove the dimension(s) by given axis that of size 1. + axis = 0 + Output: + out.shape = [3, 1, 5] + + Case 3: + + Input: + x.shape = [1, 3, 1, 5] # If the dimension of one given axis (3) is not of size 1, the dimension remain unchanged. + axis = [0, 2, 3] + Output: + out.shape = [3, 5] + + Case 4: + + Input: + x.shape = [1, 3, 1, 5] # If axis is negative, axis = axis + ndim (number of dimensions in x). + axis = [-2] + Output: + out.shape = [1, 3, 5] + +**参数**: + - **x** (Tensor) - 输入的 `Tensor` ,数据类型为:float32、float64、bool、int8、int32、int64。 + - **axis** (int|list|tuple, 可选) - 输入一个或一列整数,代表要压缩的轴。axis的范围: [−ndim(x), ndim(x))] 。 如果axis为负数, 则axis=axis+ndim(x) 。 + - **name** (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +**返回**:返回对维度进行压缩后的Tensor,数据类型与输入Tensor一致。 + +**返回类型**:Tensor + +**代码示例**: + +.. code-block:: python + + import paddle + + paddle.disable_static() + + x = paddle.rand([5, 1, 10]) + output = paddle.squeeze(x, axis=1) + # output.shape [5, 10] diff --git a/doc/fluid/api_cn/tensor_cn/stack_cn.rst b/doc/fluid/api_cn/tensor_cn/stack_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..33953b9821290741b713d1a2eedcee1432522075 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/stack_cn.rst @@ -0,0 +1,80 @@ +.. _cn_api_paddle_tensor_arange +stack +------------------------------- + +.. py:function:: paddle.tensor.stack(x, axis=0, name=None) + + + +该OP沿 axis 轴对输入 x 进行堆叠操作。要求所有输入Tensor有相同的Shape和数据类型。 +例如,输入 x 为 N 个 Shape 为 [A, B]的 Tensor, 如果 ``axis==0`` , 则输出 Tensor 的 Shape 为 [N, A, B]; 如果 ``axis==1`` , 则输出 Tensor 的 Shape 为 [A, N, B]; 以此类推。 + +.. code-block:: text + + Case 1: + + Input: + x[0].shape = [1, 2] + x[0].data = [ [1.0 , 2.0 ] ] + x[1].shape = [1, 2] + x[1].data = [ [3.0 , 4.0 ] ] + x[2].shape = [1, 2] + x[2].data = [ [5.0 , 6.0 ] ] + + Attrs: + axis = 0 + + Output: + Out.dims = [3, 1, 2] + Out.data =[ [ [1.0, 2.0] ], + [ [3.0, 4.0] ], + [ [5.0, 6.0] ] ] + + + Case 2: + + Input: + x[0].shape = [1, 2] + x[0].data = [ [1.0 , 2.0 ] ] + x[1].shape = [1, 2] + x[1].data = [ [3.0 , 4.0 ] ] + x[2].shape = [1, 2] + x[2].data = [ [5.0 , 6.0 ] ] + + + Attrs: + axis = 1 or axis = -2 # If axis = -2, axis = axis+ndim(x[0])+1 = -2+2+1 = 1. + + Output: + Out.shape = [1, 3, 2] + Out.data =[ [ [1.0, 2.0] + [3.0, 4.0] + [5.0, 6.0] ] ] + +**参数**: + - **x** (list[Tensor]|tuple[Tensor]) – 输入 x 是多个Tensor,且这些Tensor的维度和数据类型必须相同。支持的数据类型: float32,float64,int32,int64。 + + - **axis** (int, 可选) – 指定对输入Tensor进行堆叠运算的轴,有效 axis 的范围是: [−(R+1),R+1],R是输入中第一个Tensor的维数。如果 axis < 0,则 axis=axis+R+1 。默认值为0。 + + - **name** (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +**返回**:堆叠运算后的Tensor,数据类型与输入Tensor相同。 + +**返回类型**:Variable + +**代码示例**: + +.. code-block:: python + + import paddle + paddle.disable_static() + x1 = paddle.to_tensor([[1.0, 2.0]]) + x2 = paddle.to_tensor([[3.0, 4.0]]) + x3 = paddle.to_tensor([[5.0, 6.0]]) + + out = paddle.stack([x1, x2, x3], axis=0) + print(out.shape) # [3, 1, 2] + print(out.numpy()) + # [[[1., 2.]], + # [[3., 4.]], + # [[5., 6.]]] diff --git a/doc/fluid/api_cn/tensor_cn/standard_normal_cn.rst b/doc/fluid/api_cn/tensor_cn/standard_normal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ef21c863e5e9c7608cfa439d3c79f85150a942dc --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/standard_normal_cn.rst @@ -0,0 +1,56 @@ +.. _cn_api_tensor_random_standard_normal: + +standard_normal +------------------------------- + +.. py:function:: paddle.standard_normal(shape, dtype=None, name=None) + + +该OP返回符合标准正态分布(均值为0,标准差为1的正态随机分布)的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 + +参数 +:::::::::: + - **shape** (list|tuple|Tensor) - 生成的随机Tensor的形状。如果 ``shape`` 是list、tuple,则其中的元素可以是int,或者是形状为[1]且数据类型为int32、int64的Tensor。如果 ``shape`` 是Tensor,则是数据类型为int32、int64的1-D Tensor。 + - **dtype** (str|np.dtype|core.VarDesc.VarType, 可选) - 输出Tensor的数据类型,支持float32、float64。当该参数值为None时, 输出Tensor的数据类型为float32。默认值为None. + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回 +:::::::::: + Tensor:符合标准正态分布的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 + +抛出异常 +:::::::::: + - ``TypeError`` - 如果 ``shape`` 的类型不是list、tuple、Tensor。 + - ``TypeError`` - 如果 ``dtype`` 不是float32、float64。 + +示例代码 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + # example 1: attr shape is a list which doesn't contain Tensor. + result_1 = paddle.standard_normal(shape=[2, 3]) + # [[-2.923464 , 0.11934398, -0.51249987], + # [ 0.39632758, 0.08177969, 0.2692008 ]] + + # example 2: attr shape is a list which contains Tensor. + dim_1 = paddle.fill_constant([1], "int64", 2) + dim_2 = paddle.fill_constant([1], "int32", 3) + result_2 = paddle.standard_normal(shape=[dim_1, dim_2, 2]) + # [[[-2.8852394 , -0.25898588], + # [-0.47420555, 0.17683524], + # [-0.7989969 , 0.00754541]], + # [[ 0.85201347, 0.32320443], + # [ 1.1399018 , 0.48336947], + # [ 0.8086993 , 0.6868893 ]]] + + # example 3: attr shape is a Tensor, the data type must be int64 or int32. + var_shape = paddle.to_tensor(np.array([2, 3])) + result_3 = paddle.standard_normal(var_shape) + # [[-2.878077 , 0.17099959, 0.05111201] + # [-0.3761474, -1.044801 , 1.1870178 ]] diff --git a/doc/fluid/api_cn/tensor_cn/stanh_cn.rst b/doc/fluid/api_cn/tensor_cn/stanh_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..34f1ec15cf5129524881896991d0c7b73cf9fa4f --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/stanh_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_stanh: + +stanh +------------------------------- +:doc_source: paddle.fluid.layers.stanh + + diff --git a/doc/fluid/api_cn/tensor_cn/std_cn.rst b/doc/fluid/api_cn/tensor_cn/std_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..eb30280cc8b88e4ced5e54e1ad62ecda4af5f5d1 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/std_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_tensor_cn_std: + +std +------------------------------- + +.. py:function:: paddle.std(x, axis=None, unbiased=True, keepdim=False, name=None) + +沿给定的轴 ``axis`` 计算 ``x`` 中元素的标准差。 + +参数 +:::::::::: + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。 + - axis (int|list|tuple, 可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是int、list(int)、tuple(int)。如果 ``axis`` 包含多个维度,则沿着 ``axis`` 中的所有轴进行计算。``axis`` 或者其中的元素值应该在范围[-D, D)内,D是 ``x`` 的维度。如果 ``axis`` 或者其中的元素值小于0,则等价于 :math:`axis + D` 。如果 ``axis`` 是None,则对 ``x`` 的全部元素计算标准差。默认值为None。 + - unbiased (bool, 可选) - 是否使用无偏估计来计算标准差。使用 :math:`N` 来代表在 axis 上的维度,如果 ``unbiased`` 为True, 则在计算中使用 :math:`N - 1` 作为除数。为 False 时将使用 :math:`N` 作为除数。默认值为True。 + - keepdim (bool, 可选) - 是否在输出Tensor中保留减小的维度。如果 ``keepdim`` 为True,则输出Tensor和 ``x`` 具有相同的维度(减少的维度除外,减少的维度的大小为1)。否则,输出Tensor的形状会在 ``axis`` 上进行squeeze操作。默认值为False。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,沿着 ``axis`` 进行标准差计算的结果,数据类型和 ``x`` 相同。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = np.array([[1.0, 2.0, 3.0], [1.0, 4.0, 5.0]]) + x = paddle.to_tensor(x) + out1 = paddle.std(x) + # [1.63299316] + out2 = paddle.std(x, axis=1) + # [1. 2.081666] diff --git a/doc/fluid/api_cn/tensor_cn/strided_slice_cn.rst b/doc/fluid/api_cn/tensor_cn/strided_slice_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b4d897da20af7eb1a82dd3c1b073f0cf1e56bb77 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/strided_slice_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_strided_slice: + +strided_slice +------------------------------- +:doc_source: paddle.fluid.layers.strided_slice + + diff --git a/doc/fluid/api_cn/tensor_cn/sum_cn.rst b/doc/fluid/api_cn/tensor_cn/sum_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..47afc379c37747e1c87c2e53d0eea7276899818b --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/sum_cn.rst @@ -0,0 +1,47 @@ +.. _cn_api_tensor_sum: + +sum +------------------------------- + +.. py:function:: paddle.sum(x, axis=None, dtype=None, keepdim=False, name=None) + +该OP是对指定维度上的Tensor元素进行求和运算,并输出相应的计算结果。 + +参数: + - **x** (Tensor)- 输入变量为多维Tensor,支持数据类型为float32,float64,int32,int64。 + - **axis** (int | list | tuple ,可选)- 求和运算的维度。如果为None,则计算所有元素的和并返回包含单个元素的Tensor变量,否则必须在 :math:`[−rank(x),rank(x)]` 范围内。如果 :math:`axis [i] <0` ,则维度将变为 :math:`rank+axis[i]` ,默认值为None。 + - **dtype** (str , 可选)- 输出变量的数据类型。若参数为空,则输出变量的数据类型和输入变量相同,默认值为None。 + - **keepdim** (bool)- 是否在输出Tensor中保留减小的维度。如 keepdim 为true,否则结果张量的维度将比输入张量小,默认值为False。 + - **name** (str , 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: + ``Tensor``,在指定维度上进行求和运算的Tensor,数据类型和输入数据类型一致。 + + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + paddle.disable_static() + + # x is a Tensor variable with following elements: + # [[0.2, 0.3, 0.5, 0.9] + # [0.1, 0.2, 0.6, 0.7]] + # Each example is followed by the corresponding output tensor. + x_data = np.array([[0.2, 0.3, 0.5, 0.9],[0.1, 0.2, 0.6, 0.7]]).astype('float32') + x = paddle.to_tensor(x_data) + out1 = paddle.sum(x) # [3.5] + out2 = paddle.sum(x, axis=0) # [0.3, 0.5, 1.1, 1.6] + out3 = paddle.sum(x, axis=-1) # [1.9, 1.6] + out4 = paddle.sum(x, axis=1, keepdim=True) # [[1.9], [1.6]] + + # y is a Tensor variable with shape [2, 2, 2] and elements as below: + # [[[1, 2], [3, 4]], + # [[5, 6], [7, 8]]] + # Each example is followed by the corresponding output tensor. + y_data = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]).astype('float32') + y = paddle.to_tensor(y_data) + out5 = paddle.sum(y, axis=[1, 2]) # [10, 26] + out6 = paddle.sum(y, axis=[0, 1]) # [16, 20] diff --git a/doc/fluid/api_cn/tensor_cn/sums_cn.rst b/doc/fluid/api_cn/tensor_cn/sums_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7ed2833cafe7e189053c14e087214fb49b0877ef --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/sums_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_sums: + +sums +------------------------------- +:doc_source: paddle.fluid.layers.sums + + diff --git a/doc/fluid/api_cn/tensor_cn/t_cn.rst b/doc/fluid/api_cn/tensor_cn/t_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cc36c5729ba55a76687836d71c577a7bdbf415ba --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/t_cn.rst @@ -0,0 +1,53 @@ +.. _cn_api_paddle_tensor_t: + +t +------------------------------- + +.. py:function:: paddle.tensor.t(input, name=None) + +:alias_main: paddle.t +:alias: paddle.t,paddle.tensor.t,paddle.tensor.linalg.t + + + +该OP对小于等于2维的Tensor进行数据转置。0维和1维Tensor返回本身,2维Tensor等价于perm设置为0,1的 :ref:`cn_api_fluid_layers_transpose` 函数。 + +参数: + - **input** (Variable) - 输入:N维(N<=2)Tensor,可选的数据类型为float16, float32, float64, int32, int64。 + - **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None + +返回: N维Tensor + +返回类型:Variable + +**示例**: + +.. code-block:: python + + # 例1 (0-D tensor) + x = tensor([0.79]) + paddle.t(x) = tensor([0.79]) + + # 例2 (1-D tensor) + x = tensor([0.79, 0.84, 0.32]) + paddle.t(x) = tensor([0.79, 0.84, 0.32]) + + # 例3 (2-D tensor) + x = tensor([0.79, 0.84, 0.32], + [0.64, 0.14, 0.57]) + paddle.t(x) = tensor([0.79, 0.64], + [0.84, 0.14], + [0.32, 0.57]) + + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + x = fluid.data(name='x', shape=[2, 3], dtype='float32') + x_transposed = paddle.t(x) # paddle.t 等价于 paddle.tensor.t + print(x_transposed.shape) + #(3L, 2L) + diff --git a/doc/fluid/api_cn/tensor_cn/tanh_cn.rst b/doc/fluid/api_cn/tensor_cn/tanh_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ee81756c3cf07006510b44204f56ebdc43ffa442 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/tanh_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_tensor_tanh: + +tanh +------------------------------- + +.. py:function:: paddle.tanh(x, name=None) + + +tanh 激活函数 + +.. math:: + out = \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}} + +参数: + + - **x** (Tensor) - Tanh算子的输入, 多维Tensor,数据类型为 float16,float32或float64。 + - **name** (str, 可选) - 该层名称(可选,默认为None)。具体用法请参见 :ref:`api_guide_Name`。 + +返回: tanh的输出Tensor,和输入有着相同类型和shape。 + +返回类型: Tensor + +**代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x_data = np.array([-0.4, -0.2, 0.1, 0.3]) + x = paddle.to_tensor(x_data) + out = paddle.tanh(x) + print(out.numpy()) + # [-0.37994896 -0.19737532 0.09966799 0.29131261] + diff --git a/doc/fluid/api_cn/tensor_cn/tensordot_cn.rst b/doc/fluid/api_cn/tensor_cn/tensordot_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c6fc45ede4fe40099f5b3a0e1876728c1ad92f6b --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/tensordot_cn.rst @@ -0,0 +1,3 @@ +tensordot +------------------------------- +**版本升级,文档正在开发中** diff --git a/doc/fluid/api_cn/tensor_cn/tile_cn.rst b/doc/fluid/api_cn/tensor_cn/tile_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3d8e989d4fbf50ce7e87148fd101adc417b567e5 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/tile_cn.rst @@ -0,0 +1,45 @@ +.. _cn_api_tensor_tile: + +tile +------------------------------- + +.. py:function:: paddle.tile(x, repeat_times, name=None) + +根据参数 ``repeat_times`` 对输入 ``x`` 的各维度进行复制。 + +``x`` 的维数和 ``repeat_times`` 中的元素数量应小于等于6,并且repeat_times中的元素数量应该小于等于6。 + +参数 +::::::::: + - x (Tensor) - 输入的Tensor,数据类型为:bool、float32、float64、int32或int64。 + - repeat_times (list|tuple|Tensor) - 指定输入 ``x`` 每个维度的复制次数。如果 ``repeat_times`` 的类型是list或tuple,它的元素可以是整数或者数据类型为int32的1-D Tensor。如果 ``repeat_times`` 的类型是Tensor,则是数据类型为int32的1-D Tensor。 + - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: +``Tensor`` ,数据类型与 ``x`` 相同。返回值的第i维的大小等于 ``x[i] * repeat_times[i]`` 。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np_data = np.array([1, 2, 3]).astype('int32') + data = paddle.to_tensor(np_data) + out = paddle.tile(data, repeat_times=[2, 1]) + np_out = out.numpy() + # [[1, 2, 3], [1, 2, 3]] + + out = paddle.tile(data, repeat_times=[2, 2]) + np_out = out.numpy() + # [[1, 2, 3, 1, 2, 3], [1, 2, 3, 1, 2, 3]] + + np_repeat_times = np.array([2, 1]).astype("int32") + repeat_times = paddle.to_tensor(np_repeat_times) + out = paddle.tile(data, repeat_times=repeat_times) + np_out = out.numpy() + # [[1, 2, 3], [1, 2, 3]] diff --git a/doc/fluid/api_cn/tensor_cn/topk_cn.rst b/doc/fluid/api_cn/tensor_cn/topk_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..47d73f11b02224ad073f1b5fe53fd9a85cddfe8b --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/topk_cn.rst @@ -0,0 +1,63 @@ +.. _cn_api_tensor_cn_topk: + +topk +------------------------------- + +.. py:function:: paddle.topk(x, k, axis=None, largest=True, sorted=True, name=None) + +该OP沿着可选的 ``axis`` 查找topk最大或者最小的结果和结果所在的索引信息。 +如果是一维Tensor,则直接返回topk查询的结果。如果是多维Tensor,则在指定的轴上查询topk的结果。 + +参数 +::::::::: + - **x** (Tensor) - 输入的多维 ``Tensor`` ,支持的数据类型:float32、float64、int32、int64。 + - **k** (int,Tensor) - 在指定的轴上进行top寻找的数量。 + - **axis** (int,可选) - 指定对输入Tensor进行运算的轴, ``axis`` 的有效范围是[-R, R),R是输入 ``x`` 的Rank, ``axis`` 为负时与 ``axis`` + R 等价。默认值为-1。 + - **largest** (bool,可选) - 指定算法排序的方向。如果设置为True,排序算法按照降序的算法排序,否则按照升序排序。默认值为True。 + - **sorted** (bool,可选) - 控制返回的结果是否按照有序返回,默认为True。在gpu上总是返回有序的结果。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回 +::::::::: +tuple(Tensor), 返回topk的结果和结果的索引信息。结果的数据类型和输入 ``x`` 一致。索引的数据类型是int64。 + +代码示例 +::::::::: + + +.. code-block:: python + + import numpy as np + import paddle + + paddle.disable_static() + + data_1 = np.array([1, 4, 5, 7]) + tensor_1 = paddle.to_tensor(data_1) + value_1, indices_1 = paddle.topk(tensor_1, k=1) + print(value_1.numpy()) + # [7] + print(indices_1.numpy()) + # [3] + data_2 = np.array([[1, 4, 5, 7], [2, 6, 2, 5]]) + tensor_2 = paddle.to_tensor(data_2) + value_2, indices_2 = paddle.topk(tensor_2, k=1) + print(value_2.numpy()) + # [[7] + # [6]] + print(indices_2.numpy()) + # [[3] + # [1]] + value_3, indices_3 = paddle.topk(tensor_2, k=1, axis=-1) + print(value_3.numpy()) + # [[7] + # [6]] + print(indices_3.numpy()) + # [[3] + # [1]] + value_4, indices_4 = paddle.topk(tensor_2, k=1, axis=0) + print(value_4.numpy()) + # [[2 6 5 7]] + print(indices_4.numpy()) + # [[1 1 0 0]] + diff --git a/doc/fluid/api_cn/tensor_cn/trace_cn.rst b/doc/fluid/api_cn/tensor_cn/trace_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..53fb3edc54ffac22508d792ea34971c85d50b471 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/trace_cn.rst @@ -0,0 +1,53 @@ +.. _cn_api_tensor_trace: + +trace +------------------------------- + +.. py:function:: paddle.trace(x, offset=0, axis1=0, axis2=1, name=None) + +:alias_main: paddle.trace +:alias: paddle.trace, paddle.tensor.trace, paddle.tensor.math.trace + + + +该 OP 计算输入 Tensor 在指定平面上的对角线元素之和,并输出相应的计算结果。 + +如果输入是 2D Tensor,则返回对角线元素之和。 + +如果输入的维度大于 2D,则返回一个由对角线元素之和组成的数组,其中对角线从由 axis1 和 axis2 指定的二维平面中获得。默认由输入的前两维组成获得对角线的 2D 平面。 + +参数 ``offset`` 确定从指定的二维平面中获取对角线的位置: + + - 如果 offset = 0,则取主对角线。 + - 如果 offset > 0,则取主对角线右上的对角线。 + - 如果 offset < 0,则取主对角线左下的对角线。 + +参数: + - **x** (Variable)- 输入张量,至少为 2D 数组,支持数据类型为 float32,float64,int32,int64。 + - **offset** (int ,可选)- 从指定的二维平面中获取对角线的位置,默认值为 0,既主对角线。 + - **axis1** (int , 可选)- 获取对角线的二维平面的第一维,默认值为 0。 + - **axis2** (int , 可选)- 获取对角线的二维平面的第二维,默认值为 1。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: 指定二维平面的对角线元素之和。数据类型和输入数据类型一致。 + +返回类型: 变量(Variable) + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + case1 = np.random.randn(2, 3).astype('float32') + case2 = np.random.randn(3, 10, 10).astype('float32') + case3 = np.random.randn(3, 10, 5, 10).astype('float32') + + paddle.enable_imperative() + case1 = paddle.imperative.to_variable(case1) + case2 = paddle.imperative.to_variable(case2) + case3 = paddle.imperative.to_variable(case3) + data1 = paddle.trace(case1) # data1.shape = [1] + data2 = paddle.trace(case2, offset=1, axis1=1, axis2=2) # data2.shape = [3] + data3 = paddle.trace(case3, offset=-3, axis1=1, axis2=-1) # data2.shape = [3, 5] diff --git a/doc/fluid/api_cn/tensor_cn/transpose_cn.rst b/doc/fluid/api_cn/tensor_cn/transpose_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7ade0a6924cb2857cac0104137db82c026753cf9 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/transpose_cn.rst @@ -0,0 +1,3 @@ +transpose +------------------------------- +**版本升级,文档正在开发中** diff --git a/doc/fluid/api_cn/tensor_cn/tril_cn.rst b/doc/fluid/api_cn/tensor_cn/tril_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6ee8e4195d54432c58e7a024743ceda84334a23c --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/tril_cn.rst @@ -0,0 +1,63 @@ +.. _cn_api_tensor_tril: + +tril +------------------------------- + +.. py:function:: paddle.tensor.tril(input, diagonal=0, name=None) + +:alias_main: paddle.tril +:alias: paddle.tril,paddle.tensor.tril,paddle.tensor.creation.tril + + + +返回输入矩阵 `input` 的下三角部分,其余部分被设为0。 +矩形的下三角部分被定义为对角线上和下方的元素。 + +参数: + - **input** (Variable) : 输入Tensor input,数据类型支持 `float32`, `float64`, `int32`, `int64` 。 + - **diagonal** (int,可选) : 指定的对角线,默认值为0。如果diagonal = 0,表示主对角线; 如果diagonal是正数,表示主对角线之上的对角线; 如果diagonal是负数,表示主对角线之下的对角线。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:计算得到的Tensor。Tensor数据类型与输入 `input` 数据类型一致。 + +返回类型:Variable + + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle.tensor as tensor + import paddle.fluid as fluid + + data = np.arange(1, 13, dtype="int64").reshape(3,-1) + # array([[ 1, 2, 3, 4], + # [ 5, 6, 7, 8], + # [ 9, 10, 11, 12]]) + x = fluid.data(shape=(-1, 4), dtype='int64', name='x') + exe = fluid.Executor(fluid.CPUPlace()) + + # example 1, default diagonal + tril = tensor.tril(x) + tril_out, = exe.run(fluid.default_main_program(), feed={"x": data}, + fetch_list=[tril], return_numpy=True) + # array([[ 1, 0, 0, 0], + # [ 5, 6, 0, 0], + # [ 9, 10, 11, 0]]) + + # example 2, positive diagonal value + tril = tensor.tril(x, diagonal=2) + tril_out, = exe.run(fluid.default_main_program(), feed={"x": data}, + fetch_list=[tril], return_numpy=True) + # array([[ 1, 2, 3, 0], + # [ 5, 6, 7, 8], + # [ 9, 10, 11, 12]]) + + # example 3, negative diagonal value + tril = tensor.tril(x, diagonal=-1) + tril_out, = exe.run(fluid.default_main_program(), feed={"x": data}, + fetch_list=[tril], return_numpy=True) + # array([[ 0, 0, 0, 0], + # [ 5, 0, 0, 0], + # [ 9, 10, 0, 0]]) diff --git a/doc/fluid/api_cn/tensor_cn/triu_cn.rst b/doc/fluid/api_cn/tensor_cn/triu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..06775bf379c63cf31ec540bb28e62c9909636e37 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/triu_cn.rst @@ -0,0 +1,64 @@ +.. _cn_api_tensor_triu: + +triu +------------------------------- + +.. py:function:: paddle.tensor.triu(input, diagonal=0, name=None) + +:alias_main: paddle.triu +:alias: paddle.triu,paddle.tensor.triu,paddle.tensor.creation.triu + + + +返回输入矩阵 `input` 的上三角部分,其余部分被设为0。 +矩形的上三角部分被定义为对角线上和上方的元素。 + +参数: + - **input** (Variable) : 输入Tensor input,数据类型支持 `float32`, `float64`, `int32`, `int64` 。 + - **diagonal** (int,可选) : 指定的对角线,默认值为0。如果diagonal = 0,表示主对角线; 如果diagonal是正数,表示主对角线之上的对角线; 如果diagonal是负数,表示主对角线之下的对角线。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:计算得到的Tensor。Tensor数据类型与输入 `input` 数据类型一致。 + +返回类型:Variable + + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + import paddle.tensor as tensor + + data = np.arange(1, 13, dtype="int64").reshape(3,-1) + # array([[ 1, 2, 3, 4], + # [ 5, 6, 7, 8], + # [ 9, 10, 11, 12]]) + x = fluid.data(shape=(-1, 4), dtype='int64', name='x') + exe = fluid.Executor(fluid.CPUPlace()) + + # example 1, default diagonal + triu = tensor.triu(x) + triu_out, = exe.run(fluid.default_main_program(), feed={"x": data}, + fetch_list=[triu], return_numpy=True) + # array([[ 1, 2, 3, 4], + # [ 0, 6, 7, 8], + # [ 0, 0, 11, 12]]) + + # example 2, positive diagonal value + triu = tensor.triu(x, diagonal=2) + triu_out, = exe.run(fluid.default_main_program(), feed={"x": data}, + fetch_list=[triu], return_numpy=True) + # array([[0, 0, 3, 4], + # [0, 0, 0, 8], + # [0, 0, 0, 0]]) + + # example 3, negative diagonal value + triu = tensor.triu(x, diagonal=-1) + triu_out, = exe.run(fluid.default_main_program(), feed={"x": data}, + fetch_list=[triu], return_numpy=True) + # array([[ 1, 2, 3, 4], + # [ 5, 6, 7, 8], + # [ 0, 10, 11, 12]]) + diff --git a/doc/fluid/api_cn/tensor_cn/unbind_cn.rst b/doc/fluid/api_cn/tensor_cn/unbind_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1ed191175d7e85592225ffc3c19c8faeda8c83d2 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/unbind_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_paddle_tensor_unbind +unbind +------------------------------- + +.. py:function:: paddle.tensor.unbind(input, axis=0) + +:alias_main: paddle.unbind +:alias: paddle.unbind,paddle.tensor.unbind,paddle.tensor.manipulation.unbind + + + +该OP将输入Tensor按照指定的维度分割成多个子Tensor。 + +**参数**: + - **input** (Variable) - 输入变量,数据类型为float32,float64,int32,int64的多维Tensor。 + - **axis** (int32|int64,可选) - 数据类型为int32或int64,表示需要分割的维度。如果axis < 0,则划分的维度为rank(input) + axis。默认值为0。 + +**返回**:分割后的Tensor列表。 + +**返回类型**:列表(Variable),数据类型为int32,int64,float32,float64。 + +**代码示例**: + +.. code-block:: python + + import paddle + # input is a variable which shape is [3, 4, 5] + input = paddle.fluid.data( + name="input", shape=[3, 4, 5], dtype="float32") + [x0, x1, x2] = paddle.tensor.unbind(input, axis=0) + # x0.shape [4, 5] + # x1.shape [4, 5] + # x2.shape [4, 5] + [x0, x1, x2, x3] = paddle.tensor.unbind(input, axis=1) + # x0.shape [3, 5] + # x1.shape [3, 5] + # x2.shape [3, 5] + # x3.shape [3, 5] diff --git a/doc/fluid/api_cn/tensor_cn/uniform_cn.rst b/doc/fluid/api_cn/tensor_cn/uniform_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f5996a72d4f88a38ad0d71fc84b002c3a69bbd2d --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/uniform_cn.rst @@ -0,0 +1,77 @@ +.. _cn_api_tensor_uniform: + +uniform +------------------------------- + +.. py:function:: paddle.uniform(shape, dtype='float32', min=-1.0, max=1.0, seed=0, name=None) + + + + +该OP返回数值服从范围[``min``, ``max``)内均匀分布的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 + +:: + + 示例1: + 给定: + shape=[1,2] + 则输出为: + result=[[0.8505902, 0.8397286]] + +参数: + - **shape** (list|tuple|Tensor) - 生成的随机Tensor的形状。如果 ``shape`` 是list、tuple,则其中的元素可以是int,或者是形状为[1]且数据类型为int32、int64的Tensor。如果 ``shape`` 是Tensor,则是数据类型为int32、int64的1-D Tensor。 + - **dtype** (str|np.dtype, 可选) - 输出Tensor的数据类型,支持float32、float64。默认值为float32。 + - **min** (float|int,可选) - 要生成的随机值范围的下限,min包含在范围中。支持的数据类型:float、int。默认值为-1.0。 + - **max** (float|int,可选) - 要生成的随机值范围的上限,max不包含在范围中。支持的数据类型:float、int。默认值为1.0。 + - **seed** (int,可选) - 随机种子,用于生成样本。0表示使用系统生成的种子。注意如果种子不为0,该操作符每次都生成同样的随机数。支持的数据类型:int。默认为 0。 + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回: + Tensor:数值服从范围[``min``, ``max``)内均匀分布的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 + +抛出异常: + - ``TypeError`` - 如果 ``shape`` 的类型不是list、tuple、Tensor。 + - ``TypeError`` - 如果 ``dtype`` 不是float32、float64。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + + paddle.disable_static() + + # example 1: + # attr shape is a list which doesn't contain Tensor. + result_1 = paddle.uniform(shape=[3, 4]) + # [[ 0.84524226, 0.6921872, 0.56528175, 0.71690357], + # [-0.34646994, -0.45116323, -0.09902662, -0.11397249], + # [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] + + # example 2: + # attr shape is a list which contains Tensor. + dim_1 = paddle.fill_constant([1], "int64", 2) + dim_2 = paddle.fill_constant([1], "int32", 3) + result_2 = paddle.uniform(shape=[dim_1, dim_2]) + # [[-0.9951253, 0.30757582, 0.9899647 ], + # [ 0.5864527, 0.6607096, -0.8886161 ]] + + # example 3: + # attr shape is a Tensor, the data type must be int64 or int32. + shape = np.array([2, 3]) + shape_tensor = paddle.to_tensor(shape) + + result_3 = paddle.uniform(shape_tensor) + # if shape_tensor's value is [2, 3] + # result_3 is: + # [[-0.8517412, -0.4006908, 0.2551912 ], + # [ 0.3364414, 0.36278176, -0.16085452]] + + + + + + + + diff --git a/doc/fluid/api_cn/tensor_cn/unique_cn.rst b/doc/fluid/api_cn/tensor_cn/unique_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f824b73ee70c000258ed8bcd2b83e8e270ba406f --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/unique_cn.rst @@ -0,0 +1,53 @@ +.. _cn_api_tensor_cn_unique: + +unique +------------------------------- + +.. py:function:: paddle.unique(x, return_index=False, return_inverse=False, return_counts=False, axis=None, dtype="int64", name=None) + +返回Tensor按升序排序后的独有元素。 + +参数: + - **x** (Tensor) - 输入的 `Tensor` ,数据类型为:float32、float64、int32、int64。 + - **return_index** (bool, 可选) - 如果为True,则还返回独有元素在输入Tensor中的索引。 + - **return_inverse** (bool, 可选) - 如果为True,则还返回输入Tensor的元素对应在独有元素中的索引,该索引可用于重构输入Tensor。 + - **return_counts** (bool, 可选) - 如果为True,则还返回每个独有元素在输入Tensor中的个数。 + - **axis** (int, 可选) - 指定选取独有元素的轴。默认值为None,将输入平铺为1-D的Tensor后再选取独有元素。 + - **dtype** (np.dtype|str, 可选) - 用于设置 `index`,`inverse` 或者 `counts` 的类型,应该为int32或者int64。默认:int64. + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: + - **out** (Tensor) - 独有元素构成的Tensor,数据类型与输入一致。 + - **index** (Tensor, 可选) - 独有元素在输入Tensor中的索引,仅在 `return_index` 为True时返回。 + - **inverse** (Tensor, 可选) - 输入Tensor的元素对应在独有元素中的索引,仅在 `return_inverse` 为True时返回。 + - **counts** (Tensor, 可选) - 每个独有元素在输入Tensor中的个数,仅在 `return_counts` 为True时返回。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + + paddle.disable_static() + x_data = np.array([2, 3, 3, 1, 5, 3]) + x = paddle.to_tensor(x_data) + unique = paddle.unique(x) + np_unique = unique.numpy() # [1 2 3 5] + _, indices, inverse, counts = paddle.unique(x, return_index=True, return_inverse=True, return_counts=True) + np_indices = indices.numpy() # [3 0 1 4] + np_inverse = inverse.numpy() # [1 2 2 0 3 2] + np_counts = counts.numpy() # [1 1 3 1] + + x_data = np.array([[2, 1, 3], [3, 0, 1], [2, 1, 3]]) + x = paddle.to_tensor(x_data) + unique = paddle.unique(x) + np_unique = unique.numpy() # [0 1 2 3] + + unique = paddle.unique(x, axis=0) + np_unique = unique.numpy() + # [[2 1 3] + # [3 0 1]] + + + diff --git a/doc/fluid/api_cn/tensor_cn/unique_with_counts_cn.rst b/doc/fluid/api_cn/tensor_cn/unique_with_counts_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..54761def0f7d336f5d7b63a2ad93e2665bc55b66 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/unique_with_counts_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_unique_with_counts: + +unique_with_counts +------------------------------- +:doc_source: paddle.fluid.layers.unique_with_counts + + diff --git a/doc/fluid/api_cn/tensor_cn/unsqueeze_cn.rst b/doc/fluid/api_cn/tensor_cn/unsqueeze_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e8dbe24681b39af44735fa7a651bc85f09c8f9dc --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/unsqueeze_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_paddle_tensor_unsqueeze +unsqueeze +------------------------------- + +.. py:function:: paddle.tensor.unsqueeze(x, axis, name=None) + +该OP向输入Tensor的Shape中一个或多个位置(axis)插入尺寸为1的维度。 + +**参数**: + - **x** (Variable)- 输入的 `Tensor` ,数据类型为:float32、float64、bool、int8、int32、int64。 + - **axis** (int|list|tuple|Tensor) - 表示要插入维度的位置。数据类型是 int32 。如果 axis 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 Tensor 。如果 axes 的类型是 Tensor,则是1-D Tensor。如果 axis 是负数,则 axis=axis+ndim(x)+1 。 + - **name** (str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +**返回**:扩展维度后的多维Tensor,数据类型与输入Tensor一致。 + +**返回类型**:Tensor + +**代码示例**: + +.. code-block:: python + + import paddle + + paddle.disable_static() + x = paddle.rand([5, 10]) + print(x.shape) # [5, 10] + + out1 = paddle.unsqueeze(x, axis=0) + print(out1.shape) # [1, 5, 10] + + out2 = paddle.unsqueeze(x, axis=[0, 2]) + print(out2.shape) # [1, 5, 1, 10] + + axis = paddle.fluid.dygraph.to_variable([0, 1, 2]) + out3 = paddle.unsqueeze(x, axis=axis) + print(out3.shape) # [1, 1, 1, 5, 10] diff --git a/doc/fluid/api_cn/tensor_cn/unstack_cn.rst b/doc/fluid/api_cn/tensor_cn/unstack_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a85a894bcd8ca537e1d7c1e47f267a8ca5be097f --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/unstack_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_unstack: + +unstack +------------------------------- +:doc_source: paddle.fluid.layers.unstack + + diff --git a/doc/fluid/api_cn/tensor_cn/var_cn.rst b/doc/fluid/api_cn/tensor_cn/var_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c44af2301a4ee0d192ec66738433014fced92eb2 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/var_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_tensor_cn_var: + +var +------------------------------- + +.. py:function:: paddle.var(x, axis=None, unbiased=True, keepdim=False, name=None) + +沿给定的轴 ``axis`` 计算 ``x`` 中元素的方差。 + +参数 +:::::::::: + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。 + - axis (int|list|tuple, 可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是int、list(int)、tuple(int)。如果 ``axis`` 包含多个维度,则沿着 ``axis`` 中的所有轴进行计算。``axis`` 或者其中的元素值应该在范围[-D, D)内,D是 ``x`` 的维度。如果 ``axis`` 或者其中的元素值小于0,则等价于 :math:`axis + D` 。如果 ``axis`` 是None,则对 ``x`` 的全部元素计算方差。默认值为None。 + - unbiased (bool, 可选) - 是否使用无偏估计来计算方差。使用 :math:`N` 来代表在 axis 上的维度,如果 ``unbiased`` 为True, 则在计算中使用 :math:`N - 1` 作为除数。为 False 时将使用 :math:`N` 作为除数。默认值为True。 + - keepdim (bool, 可选) - 是否在输出Tensor中保留减小的维度。如果 ``keepdim`` 为True,则输出Tensor和 ``x`` 具有相同的维度(减少的维度除外,减少的维度的大小为1)。否则,输出Tensor的形状会在 ``axis`` 上进行squeeze操作。默认值为False。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,沿着 ``axis`` 进行方差计算的结果,数据类型和 ``x`` 相同。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = np.array([[1.0, 2.0, 3.0], [1.0, 4.0, 5.0]]) + x = paddle.to_tensor(x) + out1 = paddle.var(x) + # [2.66666667] + out2 = paddle.var(x, axis=1) + # [1. 4.33333333] diff --git a/doc/fluid/api_cn/tensor_cn/where_cn.rst b/doc/fluid/api_cn/tensor_cn/where_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bcfabfab33a23f1e56e23bc922c59b965564155e --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/where_cn.rst @@ -0,0 +1,53 @@ +.. _cn_api_tensor_where: + +where +------------------------------- + +.. py:function:: paddle.where(condition, x, y, name=None) + +:alias_main: paddle.where +:alias: paddle.where,paddle.tensor.where,paddle.tensor.search.where +:update_api: paddle.fluid.layers.cond + + + +该OP返回一个根据输入 ``condition``, 选择 ``x`` 或 ``y`` 的元素组成的多维 ``Tensor`` : + +.. math:: + Out_i = + \left\{ + \begin{aligned} + &X_i, & & if \ cond_i \ is \ True \\ + &Y_i, & & if \ cond_i \ is \ False \\ + \end{aligned} + \right. + +参数: + - **condition** (Variable)- 选择 ``x`` 或 ``y`` 元素的条件 。 + - **x** (Variable)- 多维 ``Tensor`` ,数据类型为 ``float32`` 或 ``float64`` 或 ``int32`` 或 ``int64`` 。 + - **y** (Variable)- 多维 ``Tensor`` ,数据类型为 ``float32`` 或 ``float64`` 或 ``int32`` 或 ``int64`` 。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:数据类型与 ``x`` 相同的 ``Tensor`` 。 + +返回类型:Variable。 + + +**代码示例:** + +.. code-block:: python + + import paddle + import numpy as np + import paddle.fluid as fluid + + x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float32") + y_i = np.array([1.0, 1.0, 1.0, 1.0]).astype("float32") + + with fluid.dygraph.guard(): + x = fluid.dygraph.to_variable(x_i) + y = fluid.dygraph.to_variable(y_i) + out = paddle.where(x>1, x, y) + + print(out.numpy()) + #out: [1.0, 1.0, 3.2, 1.2] diff --git a/doc/fluid/api_cn/tensor_cn/zeros_cn.rst b/doc/fluid/api_cn/tensor_cn/zeros_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..40e54e0daaeb91e9bf2d15b4f32ce65a987748e7 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/zeros_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_tensor_zeros: + +zeros +------------------------------- + +.. py:function:: paddle.zeros(shape, dtype=None, name=None) + + + +该OP创建形状为 ``shape`` 、数据类型为 ``dtype`` 且值全为0的Tensor。 + +参数: + - **shape** (tuple|list|Tensor) - 输出Tensor的形状, ``shape`` 的数据类型为int32或者int64。 + - **dtype** (np.dtype|core.VarDesc.VarType|str,可选) - 输出Tensor的数据类型,数据类型必须为bool、float16、float32、float64、int32或int64。若为None,数据类型为float32, 默认为None。 + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回:值全为0的Tensor,数据类型和 ``dtype`` 定义的类型一致。 + +抛出异常: + - ``TypeError`` - 当 ``dtype`` 不是bool、 float16、float32、float64、int32、int64和None时。 + - ``TypeError`` - 当 ``shape`` 不是tuple、list、或者Tensor时, 当 ``shape`` 为Tensor,其数据类型不是int32或者int64时。 + +**代码示例**: + +.. code-block:: python + + import paddle + paddle.disable_static() # Now we are in imperative mode + data = paddle.zeros(shape=[3, 2], dtype='float32') + # [[0. 0.] + # [0. 0.] + # [0. 0.]] + + data = paddle.zeros(shape=[2, 2]) + # [[0. 0.] + # [0. 0.]] + + # shape is a Tensor + shape = paddle.fill_constant(shape=[2], dtype='int32', value=2) + data3 = paddle.zeros(shape=shape, dtype='int32') + # [[0 0] + # [0 0]] + diff --git a/doc/fluid/api_cn/tensor_cn/zeros_like_cn.rst b/doc/fluid/api_cn/tensor_cn/zeros_like_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7cdacfeb44ff8553cc98230f10e309340dab5cd7 --- /dev/null +++ b/doc/fluid/api_cn/tensor_cn/zeros_like_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_tensor_zeros_like: + +zeros_like +------------------------------- + +.. py:function:: paddle.zeros_like(x, dtype=None, name=None) + +:alias_main: paddle.zeros_like +:alias: paddle.tensor.zeros_like, paddle.tensor.creation.zeros_like +:update_api: paddle.fluid.layers.zeros_like + +该OP返回一个和 ``x`` 具有相同的形状的全零Tensor,数据类型为 ``dtype`` 或者和 ``x`` 相同。 + +参数 +:::::::::: + - **x** (Tensor) – 输入的多维Tensor,数据类型可以是bool,float16, float32,float64,int32,int64。输出Tensor的形状和 ``x`` 相同。如果 ``dtype`` 为None,则输出Tensor的数据类型与 ``x`` 相同。 + - **dtype** (str|np.dtype|core.VarDesc.VarType, 可选) - 输出Tensor的数据类型,支持bool,float16, float32,float64,int32,int64。当该参数值为None时, 输出Tensor的数据类型与 ``x`` 相同。默认值为None. + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回 +:::::::::: + Tensor:和 ``x`` 具有相同的形状全零Tensor,数据类型为 ``dtype`` 或者和 ``x`` 相同。 + +抛出异常 +:::::::::: + - ``TypeError`` - 如果 ``dtype`` 不是bool、float16、float32、float64、int32、int64。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + + x = paddle.imperative.to_variable(np.array([1,2,3], dtype='float32')) + out1 = paddle.zeros_like(x) # [0., 0., 0.] + out2 = paddle.zeros_like(x, dtype='int32') # [0, 0, 0] diff --git a/doc/fluid/api_cn/transpiler_cn.rst b/doc/fluid/api_cn/transpiler_cn.rst index 4a84c5fbc465f0f8be342a0abcf07dc886b1eea5..ebf4661d09d8b21dba989e162e13fed5a9bda454 100644 --- a/doc/fluid/api_cn/transpiler_cn.rst +++ b/doc/fluid/api_cn/transpiler_cn.rst @@ -13,4 +13,3 @@ fluid.transpiler transpiler_cn/HashName_cn.rst transpiler_cn/memory_optimize_cn.rst transpiler_cn/release_memory_cn.rst - transpiler_cn/RoundRobin_cn.rst diff --git a/doc/fluid/api_cn/transpiler_cn/DistributeTranspilerConfig_cn.rst b/doc/fluid/api_cn/transpiler_cn/DistributeTranspilerConfig_cn.rst index d1d9cf407f1ccf6b69586904cfce513a74a64f82..4a95daa6428ad1d9be20ae1edc6dd1a0a0549920 100644 --- a/doc/fluid/api_cn/transpiler_cn/DistributeTranspilerConfig_cn.rst +++ b/doc/fluid/api_cn/transpiler_cn/DistributeTranspilerConfig_cn.rst @@ -3,10 +3,13 @@ DistributeTranspilerConfig ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:class:: paddle.fluid.transpiler.DistributeTranspilerConfig +:api_attr: 声明式编程模式(静态图) + + + 单机任务切换为分布式任务的配置类,用户可根据需求进行配置,如指定同步/异步训练,指定节点个数及模型切分逻辑。 返回:None diff --git a/doc/fluid/api_cn/transpiler_cn/DistributeTranspiler_cn.rst b/doc/fluid/api_cn/transpiler_cn/DistributeTranspiler_cn.rst index 85d76cbbeea74afdddd249bf17be8604f8c434f2..92ea6dfbfd198334767f5f0afc6c958dd38d23ea 100644 --- a/doc/fluid/api_cn/transpiler_cn/DistributeTranspiler_cn.rst +++ b/doc/fluid/api_cn/transpiler_cn/DistributeTranspiler_cn.rst @@ -3,10 +3,13 @@ DistributeTranspiler ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:class:: paddle.fluid.transpiler.DistributeTranspiler (config=None) +:api_attr: 声明式编程模式(静态图) + + + 该类可以把fluid program转变为分布式数据并行计算的program, 有PServer和NCCL2两种模式。 在Pserver(全称:parameter server)模式下, 通过 ``transpile`` 将用于单机训练的 ``program`` 转译为可用于parameter server的分布式架构(即PServer,参数服务器)来进行训练的program。 diff --git a/doc/fluid/api_cn/transpiler_cn/HashName_cn.rst b/doc/fluid/api_cn/transpiler_cn/HashName_cn.rst index d9e35ee53f9a5539d4c2791c216a05e78a226f25..9b4d019cbec11dba1a818c95afe81342e2e284b6 100644 --- a/doc/fluid/api_cn/transpiler_cn/HashName_cn.rst +++ b/doc/fluid/api_cn/transpiler_cn/HashName_cn.rst @@ -3,10 +3,13 @@ HashName ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:class:: paddle.fluid.transpiler.HashName(pserver_endpoints) +:api_attr: 声明式编程模式(静态图) + + + 该方法使用 python ``Hash()`` 函数将变量散列到多个parameter server节点。 参数: diff --git a/doc/fluid/api_cn/transpiler_cn/RoundRobin_cn.rst b/doc/fluid/api_cn/transpiler_cn/RoundRobin_cn.rst deleted file mode 100644 index 0eebe7b926e2c4f7ac443d90c47c6061648161dc..0000000000000000000000000000000000000000 --- a/doc/fluid/api_cn/transpiler_cn/RoundRobin_cn.rst +++ /dev/null @@ -1,69 +0,0 @@ -.. _cn_api_fluid_transpiler_RoundRobin: - -RoundRobin -------------------------------- - -**注意:该API仅支持【静态图】模式** - -.. py:class:: paddle.fluid.transpiler.RoundRobin(pserver_endpoints) - -该方法使用 ``RoundRobin`` 的方式将变量散列到多个parameter server终端。 - -`RondRobin `_ - -参数: - - **pserver_endpoints** (list) - endpoint (ip:port)的 list - -返回:实例化后的RoundRobin的对象 - -返回类型:RoundRobin - -**代码示例** - -.. code-block:: python - - import paddle.fluid.transpiler.RoundRobin as RoundRobin - - pserver_endpoints = [“127.0.0.1:6007”, “127.0.0.1:6008”] - vars = [“var1”,”var2”,”var3”,”var4”,”var5”] - rr = RoundRobin(pserver_endpoints) - rr.dispatch(vars) - - -.. py:method:: dispatch(varlist) - -该方法使用RoundRobin的方式将多个参数散列到多个parameter Server终端。 - -参数: - - **varlist** (list) - 参数 (var1, var2, var3) 的 list - -返回:基于varlist中var的顺序,返回参数服务器(ip:port)的列表, 列表中的数据量和varlist的数据量一致。 - -返回类型:list - -**代码示例** - -.. code-block:: python - - pserver_endpoints = [“127.0.0.1:6007”, “127.0.0.1:6008”] - vars = [“var1”,”var2”,”var3”,”var4”,”var5”] - rr = RoundRobin(pserver_endpoints) - rr.dispatch(vars) - - -.. py:method:: reset() - -该方法将重置RoundRobin内置的计数, 计数将重置为0。 - -返回:无。 - -**代码示例** - -.. code-block:: python - - pserver_endpoints = [“127.0.0.1:6007”, “127.0.0.1:6008”] - vars = [“var1”,”var2”,”var3”,”var4”,”var5”] - rr = RoundRobin(pserver_endpoints) - rr.reset() - - diff --git a/doc/fluid/api_cn/transpiler_cn/memory_optimize_cn.rst b/doc/fluid/api_cn/transpiler_cn/memory_optimize_cn.rst index 6df2a4bf9faf5225c24e380b45c38e262a8cd053..d9a5a2b023fc6f1d578d254b5057a9c01d6940b9 100644 --- a/doc/fluid/api_cn/transpiler_cn/memory_optimize_cn.rst +++ b/doc/fluid/api_cn/transpiler_cn/memory_optimize_cn.rst @@ -3,8 +3,11 @@ memory_optimize ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.transpiler.memory_optimize(input_program, skip_opt_set=None, print_log=False, level=0, skip_grads=True) +:api_attr: 声明式编程模式(静态图) + + + **从1.6版本开始此接口不再推荐使用,请不要在新写的代码中使用它,1.6+版本已默认开启更优的存储优化策略** \ No newline at end of file diff --git a/doc/fluid/api_cn/transpiler_cn/release_memory_cn.rst b/doc/fluid/api_cn/transpiler_cn/release_memory_cn.rst index 7387741f9dcf98c2265884e9cac2ae81e8052961..724589bf79cd77f11c3ec9431019d31f08ffd74f 100644 --- a/doc/fluid/api_cn/transpiler_cn/release_memory_cn.rst +++ b/doc/fluid/api_cn/transpiler_cn/release_memory_cn.rst @@ -3,8 +3,11 @@ release_memory ------------------------------- -**注意:该API仅支持【静态图】模式** .. py:function:: paddle.fluid.transpiler.release_memory(input_program, skip_opt_set=None) +:api_attr: 声明式编程模式(静态图) + + + **从1.6版本开始此接口不再推荐使用,请不要在新写的代码中使用它,1.6+版本已默认开启更优的存储优化策略** \ No newline at end of file diff --git a/doc/fluid/api_cn/unique_name_cn/generate_cn.rst b/doc/fluid/api_cn/unique_name_cn/generate_cn.rst index bbc987eb5d58769eb87334d5fba34c06a31fe2b3..4eb8a68c09bf029f5363f04ff9461b5e5aa9ecff 100644 --- a/doc/fluid/api_cn/unique_name_cn/generate_cn.rst +++ b/doc/fluid/api_cn/unique_name_cn/generate_cn.rst @@ -5,6 +5,9 @@ generate .. py:function:: paddle.fluid.unique_name.generate(key) + + + 该接口产生以前缀key开头的唯一名称。目前,Paddle通过从0开始的编号对相同前缀key的名称进行区分。例如,使用key=fc连续调用该接口会产生fc_0, fc_1, fc_2等不同名称。 参数: diff --git a/doc/fluid/api_cn/unique_name_cn/guard_cn.rst b/doc/fluid/api_cn/unique_name_cn/guard_cn.rst index ebbd81fef40f9cde05b86eb52a0674127881aed8..7c457eb15118baf6823be9e404fc2fa053b6a2f6 100644 --- a/doc/fluid/api_cn/unique_name_cn/guard_cn.rst +++ b/doc/fluid/api_cn/unique_name_cn/guard_cn.rst @@ -5,6 +5,9 @@ guard .. py:function:: paddle.fluid.unique_name.guard(new_generator=None) + + + 该接口用于更改命名空间,与with语句一起使用。使用后,在with语句的上下文中使用新的命名空间,调用generate接口时相同前缀的名称将从0开始重新编号。 参数: diff --git a/doc/fluid/api_cn/unique_name_cn/switch_cn.rst b/doc/fluid/api_cn/unique_name_cn/switch_cn.rst index 98fa9e1994d05cbb71dd7a46499ce2b1a0c0bd86..93da7cc1dd97162fbc09b6ae811c2a8f0f11a958 100644 --- a/doc/fluid/api_cn/unique_name_cn/switch_cn.rst +++ b/doc/fluid/api_cn/unique_name_cn/switch_cn.rst @@ -5,6 +5,9 @@ switch .. py:function:: paddle.fluid.unique_name.switch(new_generator=None) + + + 该接口将当前上下文的命名空间切换到新的命名空间。该接口与guard接口都可用于更改命名空间,推荐使用guard接口,配合with语句管理命名空间上下文。 参数: diff --git a/doc/fluid/api_guides/X2Paddle/TensorFlow-Fluid.rst b/doc/fluid/api_guides/X2Paddle/TensorFlow-Fluid.rst index 20ee91c8cd7c19c6521b68962647f46620b436aa..e219eb272ed508e70350ed3a0dfa69f416337a1d 100644 --- a/doc/fluid/api_guides/X2Paddle/TensorFlow-Fluid.rst +++ b/doc/fluid/api_guides/X2Paddle/TensorFlow-Fluid.rst @@ -4,151 +4,152 @@ TensorFlow-Fluid常用接口对应表 ############################### -本文档基于TensorFlow v1.13梳理了常用API与PaddlePaddle API对应关系和差异分析。根据文档对应关系,有TensorFlow使用经验的用户,可根据对应关系,快速熟悉PaddlePaddle的接口使用。 +本文档基于TensorFlow v1.15梳理了常用API与PaddlePaddle API对应关系和差异分析。根据文档对应关系,有TensorFlow使用经验的用户,可根据对应关系,快速熟悉PaddlePaddle的接口使用。 .. csv-table:: :header: "序号", "TensorFlow接口", "Fluid接口", "备注" :widths: 1, 8, 8, 3 - "1", "`tf.abs `_", ":ref:`cn_api_fluid_layers_abs`", "功能一致" - "2", "`tf.add `_", ":ref:`cn_api_fluid_layers_elementwise_add`", "功能一致" - "3", "`tf.argmax `_", ":ref:`cn_api_fluid_layers_argmax`", "功能一致" - "4", "`tf.argmin `_", ":ref:`cn_api_fluid_layers_argmin`", "功能一致" - "5", "`tf.assign `_", ":ref:`cn_api_fluid_layers_assign`", "功能一致" - "6", "`tf.assign_add `_", ":ref:`cn_api_fluid_layers_increment`", "功能一致" - "7", "`tf.case `_", ":ref:`cn_api_fluid_layers_Switch`", "`差异对比 `_" - "8", "`tf.cast `_", ":ref:`cn_api_fluid_layers_cast`", "功能一致" - "9", "`tf.clip_by_global_norm `_", ":ref:`cn_api_fluid_clip_GradientClipByGlobalNorm`", "`差异对比 `_" - "10", "`tf.clip_by_norm `_", ":ref:`cn_api_fluid_layers_clip_by_norm`", "`差异对比 `_" - "11", "`tf.clip_by_value `_", ":ref:`cn_api_fluid_layers_clip`", "功能一致" - "12", "`tf.concat `_", ":ref:`cn_api_fluid_layers_concat`", "功能一致" - "13", "`tf.cond `_", ":ref:`cn_api_fluid_layers_ifElse`", "功能一致" - "14", "`tf.constant `_", ":ref:`cn_api_fluid_layers_fill_constant`", "功能一致" - "15", "`tf.contrib.layers.batch_norm `_", ":ref:`cn_api_fluid_layers_batch_norm`", "功能一致" - "16", "`tf.contrib.layers.flatten `_", ":ref:`cn_api_fluid_layers_flatten`", "`差异对比 `_" - "17", "`tf.contrib.layers.fully_connected `_", ":ref:`cn_api_fluid_layers_fc`", "功能一致" - "18", "`tf.contrib.layers.one_hot_encoding `_", ":ref:`cn_api_fluid_layers_one_hot`", "功能一致" - "19", "`tf.contrib.layers.softmax `_", ":ref:`cn_api_fluid_layers_softmax`", "功能一致" - "20", "`tf.contrib.layers.xavier_initializer `_", ":ref:`cn_api_fluid_initializer_Xavier`", "功能一致" - "21", "`tf.nn.rnn.GRUCell `_", ":ref:`cn_api_fluid_layers_gru_unit`", "`差异对比 `_" - "22", "`tf.nn.rnn.MultiRNNCell `_", "无相应接口", "`Fluid实现 `_" - "23", "`tf.nn.rnn.static_rnn `_", ":ref:`cn_api_fluid_layers_DynamicRNN`", "功能一致" - "24", "`tf.convert_to_tensor `_", ":ref:`cn_api_fluid_layers_assign`", "功能一致" - "25", "`tf.cos `_", ":ref:`cn_api_fluid_layers_cos`", "功能一致" - "26", "`tf.div `_", ":ref:`cn_api_fluid_layers_elementwise_div`", "功能一致" - "27", "`tf.divide `_", ":ref:`cn_api_fluid_layers_elementwise_div`", "功能一致" - "28", "`tf.dropout `_", ":ref:`cn_api_fluid_layers_dropout`", "`差异对比 `_" - "29", "`tf.equal `_", "`运算符== `_", "功能一致" - "30", "`tf.exp `_", ":ref:`cn_api_fluid_layers_exp`", "功能一致" - "31", "`tf.expand_dims `_", ":ref:`cn_api_fluid_layers_unsqueeze`", "`差异对比 `_" - "32", "`tf.fill `_", ":ref:`cn_api_fluid_layers_fill_constant`", "功能一致" - "33", "`tf.floor `_", ":ref:`cn_api_fluid_layers_floor`", "功能一致" - "34", "`tf.gather `_", ":ref:`cn_api_fluid_layers_gather`", "功能一致" - "35", "`tf.greater `_", "`运算符> `_", "功能一致" - "36", "`tf.greater_equal `_", "`运算符>= `_", "功能一致" - "37", "`tf.image.non_max_suppression `_", ":ref:`cn_api_fluid_layers_multiclass_nms`", "`差异对比 `_" - "38", "`tf.image.resize_bilinear `_", ":ref:`cn_api_fluid_layers_resize_bilinear`", "功能一致" - "39", "`tf.image.resize_images `_", ":ref:`cn_api_fluid_layers_image_resize`", "`差异对比 `_" - "40", "`tf.image.resize_nearest_neighbor `_", ":ref:`cn_api_fluid_layers_resize_nearest`", "功能一致" - "41", "`tf.is_finite `_", ":ref:`cn_api_fluid_layers_isfinite`", "`差异对比 `_" - "42", "`tf.layers.batch_normalization `_", ":ref:`cn_api_fluid_layers_batch_norm`", "功能一致" - "43", "`tf.layers.conv2d `_", ":ref:`cn_api_fluid_layers_conv2d`", "`差异对比 `_" - "44", "`tf.layers.dense `_", ":ref:`cn_api_fluid_layers_fc`", "`差异对比 `_" - "45", "`tf.layers.dropout `_", ":ref:`cn_api_fluid_layers_dropout`", "功能一致" - "46", "`tf.layers.Dropout `_", ":ref:`cn_api_fluid_layers_dropout`", "功能一致" - "47", "`tf.layers.flatten `_", ":ref:`cn_api_fluid_layers_flatten`", "功能一致" - "48", "`tf.less `_", "`运算符< `_", "功能一致" - "49", "`tf.less_equal `_", "`运算符<= `_", "功能一致" - "50", "`tf.log `_", ":ref:`cn_api_fluid_layers_log`", "功能一致" - "51", "`tf.logical_and `_", ":ref:`cn_api_fluid_layers_logical_and`", "功能一致" - "52", "`tf.logical_not `_", ":ref:`cn_api_fluid_layers_logical_not`", "功能一致" - "53", "`tf.logical_or `_", ":ref:`cn_api_fluid_layers_logical_or`", "功能一致" - "54", "`tf.losses.mean_squared_error `_", ":ref:`cn_api_fluid_layers_square_error_cost`", "`差异对比 `_" - "55", "`tf.losses.sigmoid_cross_entropy `_", ":ref:`cn_api_fluid_layers_sigmoid_cross_entropy_with_logits`", "`差异对比 `_" - "56", "`tf.losses.softmax_cross_entropy `_", ":ref:`cn_api_fluid_layers_softmax_with_cross_entropy`", "功能一致" - "57", "`tf.matmul `_", ":ref:`cn_api_fluid_layers_matmul`", "`差异对比 `_" - "58", "`tf.maximum `_", ":ref:`cn_api_fluid_layers_elementwise_max`", "功能一致" - "59", "`tf.metrics.accuracy `_", ":ref:`cn_api_fluid_layers_accuracy`", "功能一致" - "60", "`tf.metrics.mean `_", ":ref:`cn_api_fluid_layers_mean`", "功能一致" - "61", "`tf.minimum `_", ":ref:`cn_api_fluid_layers_elementwise_min`", "功能一致" - "62", "`tf.multiply `_", ":ref:`cn_api_fluid_layers_elementwise_mul`", "功能一致" - "63", "`tf.nn.avg_pool `_", ":ref:`cn_api_fluid_layers_pool2d`", "`差异对比 `_" - "64", "`tf.nn.batch_normalization `_", ":ref:`cn_api_fluid_layers_batch_norm`", "功能一致" - "65", "`tf.nn.bidirectional_dynamic_rnn `_", "无相应接口", "`Fluid实现 `_" - "66", "`tf.nn.conv2d `_", ":ref:`cn_api_fluid_layers_conv2d`", "`差异对比 `_" - "67", "`tf.nn.conv2d_transpose `_", ":ref:`cn_api_fluid_layers_conv2d_transpose`", "`差异对比 `_" - "68", "`tf.nn.conv3d_transpose `_", ":ref:`cn_api_fluid_layers_conv3d_transpose`", "`差异对比 `_" - "69", "`tf.nn.depthwise_conv2d `_", ":ref:`cn_api_fluid_layers_conv2d`", "`差异对比 `_" - "70", "`tf.nn.dynamic_rnn `_", ":ref:`cn_api_fluid_layers_DynamicRNN`", "`差异对比 `_" - "71", "`tf.nn.l2_normalize `_", ":ref:`cn_api_fluid_layers_l2_normalize`", "`差异对比 `_" - "72", "`tf.nn.leaky_relu `_", ":ref:`cn_api_fluid_layers_leaky_relu`", "功能一致" - "73", "`tf.nn.lrn `_", ":ref:`cn_api_fluid_layers_lrn`", "`差异对比 `_" - "74", "`tf.nn.max_pool `_", ":ref:`cn_api_fluid_layers_pool2d`", "`差异对比 `_" - "75", "`tf.nn.relu `_", ":ref:`cn_api_fluid_layers_relu`", "功能一致" - "76", "`tf.nn.relu6 `_", ":ref:`cn_api_fluid_layers_relu6`", "功能一致" - "77", "`tf.nn.rnn_cell.LSTMCell `_", ":ref:`cn_api_fluid_layers_lstm_unit`", "`差异对比 `_" - "78", "`tf.nn.separable_conv2d `_", "无相应接口", "`Fluid实现 `_" - "79", "`tf.nn.sigmoid `_", ":ref:`cn_api_fluid_layers_sigmoid`", "功能一致" - "80", "`tf.nn.sigmoid_cross_entropy_with_logits `_", ":ref:`cn_api_fluid_layers_sigmoid_cross_entropy_with_logits`", "功能一致" - "81", "`tf.nn.softmax `_", ":ref:`cn_api_fluid_layers_softmax`", "功能一致" - "82", "`tf.nn.softmax_cross_entropy_with_logits `_", ":ref:`cn_api_fluid_layers_softmax_with_cross_entropy`", "`差异对比 `_" - "83", "`tf.nn.softplus `_", ":ref:`cn_api_fluid_layers_softplus`", "功能一致" - "84", "`tf.nn.softsign `_", ":ref:`cn_api_fluid_layers_softsign`", "功能一致" - "85", "`tf.nn.tanh `_", ":ref:`cn_api_fluid_layers_tanh`", "功能一致" - "86", "`tf.one_hot `_", ":ref:`cn_api_fluid_layers_one_hot`", "`差异对比 `_" - "87", "`tf.ones `_", ":ref:`cn_api_fluid_layers_ones`", "功能一致" - "88", "`tf.intializers.ones `_", ":ref:`cn_api_fluid_initializer_Constant`", "功能一致" - "89", "`tf.pad `_", ":ref:`cn_api_fluid_layers_pad`", "`差异对比 `_" - "90", "`tf.placeholder `_", ":ref:`cn_api_fluid_layers_data`", "`差异对比 `_" - "91", "`tf.pow `_", ":ref:`cn_api_fluid_layers_pow`", "`差异对比 `_" - "92", "`tf.print `_", ":ref:`cn_api_fluid_layers_print`", "`差异对比 `_" - "93", "`tf.py_func `_", ":ref:`cn_api_fluid_layers_py_func`", "功能一致" - "94", "`tf.random_normal `_", ":ref:`cn_api_fluid_layers_gaussian_random`", "功能一致" - "95", "`tf.random_normal_initializer `_", ":ref:`cn_api_fluid_initializer_Normal`", "功能一致" - "96", "`tf.random_uniform `_", ":ref:`cn_api_fluid_layers_uniform_random`", "功能一致" - "97", "`tf.random_uniform_initializer `_", ":ref:`cn_api_fluid_initializer_UniformInitializer`", "功能一致" - "98", "`tf.reduce_logsumexp `_", "无相应接口", "`Fluid实现 `_" - "99", "`tf.reduce_max `_", ":ref:`cn_api_fluid_layers_reduce_max`", "功能一致" - "100", "`tf.reduce_mean `_", ":ref:`cn_api_fluid_layers_reduce_mean`", "功能一致" - "101", "`tf.reduce_min `_", ":ref:`cn_api_fluid_layers_reduce_min`", "功能一致" - "102", "`tf.reduce_sum `_", ":ref:`cn_api_fluid_layers_reduce_sum`", "功能一致" - "103", "`tf.reshape `_", ":ref:`cn_api_fluid_layers_reshape`", "`差异对比 `_" - "104", "`tf.reverse `_", ":ref:`cn_api_fluid_layers_reverse`", "功能一致" - "105", "`tf.reverse_sequence `_", ":ref:`cn_api_fluid_layers_sequence_reverse`", "`差异对比 `_" - "106", "`tf.reverse_v2 `_", ":ref:`cn_api_fluid_layers_reverse`", "功能一致" - "107", "`tf.round `_", ":ref:`cn_api_fluid_layers_round`", "功能一致" - "108", "`tf.rsqrt `_", ":ref:`cn_api_fluid_layers_rsqrt`", "功能一致" - "109", "`tf.scalar_mul `_", ":ref:`cn_api_fluid_layers_scale`", "功能一致" - "110", "`tf.scatter_update `_", ":ref:`cn_api_fluid_layers_scatter`", "`差异对比 `_" - "111", "`tf.sequence_mask `_", ":ref:`cn_api_fluid_layers_sequence_mask`", "功能一致" - "112", "`tf.shape `_", ":ref:`cn_api_fluid_layers_shape`", "功能一致" - "113", "`tf.sigmoid `_", ":ref:`cn_api_fluid_layers_sigmoid`", "功能一致" - "114", "`tf.sin `_", ":ref:`cn_api_fluid_layers_sin`", "功能一致" - "115", "`tf.slice `_", ":ref:`cn_api_fluid_layers_slice`", "`差异对比 `_" - "116", "`tf.split `_", ":ref:`cn_api_fluid_layers_split`", "`差异对比 `_" - "117", "`tf.sqrt `_", ":ref:`cn_api_fluid_layers_sqrt`", "功能一致" - "118", "`tf.square `_", ":ref:`cn_api_fluid_layers_square`", "功能一致" - "119", "`tf.squared_difference `_", "无相应接口", "`Fluid实现 `_" - "120", "`tf.squeeze `_", ":ref:`cn_api_fluid_layers_squeeze`", "功能一致" - "121", "`tf.stack `_", ":ref:`cn_api_fluid_layers_stack`", "功能一致" - "122", "`tf.stop_gradient `_", "无相应接口", "`Fluid实现 `_" - "123", "`tf.subtract `_", ":ref:`cn_api_fluid_layers_elementwise_sub`", "功能一致" - "124", "`tf.tanh `_", ":ref:`cn_api_fluid_layers_tanh`", "功能一致" - "125", "`tf.tile `_", ":ref:`cn_api_fluid_layers_expand`", "功能一致" - "126", "`tf.top_k `_", ":ref:`cn_api_fluid_layers_topk`", "`差异对比 `_" - "127", "`tf.train.AdagradOptimizer `_", ":ref:`cn_api_fluid_optimizer_AdagradOptimizer`", "功能一致" - "128", "`tf.train.AdamOptimizer `_", ":ref:`cn_api_fluid_optimizer_Adam`", "功能一致" - "129", "`tf.train.exponential_decay `_", ":ref:`cn_api_fluid_layers_exponential_decay`", "功能一致" - "130", "`tf.train.GradientDescentOptimizer `_", ":ref:`cn_api_fluid_optimizer_SGDOptimizer`", "功能一致" - "131", "`tf.train.MomentumOptimizer `_", ":ref:`cn_api_fluid_optimizer_MomentumOptimizer`", "功能一致" - "132", "`tf.train.polynomial_decay `_", ":ref:`cn_api_fluid_layers_polynomial_decay`", "功能一致" - "133", "`tf.train.RMSPropOptimizer `_", ":ref:`cn_api_fluid_optimizer_RMSPropOptimizer`", "功能一致" - "134", "`tf.transpose `_", ":ref:`cn_api_fluid_layers_transpose`", "功能一致" - "135", "`tf.truediv `_", ":ref:`cn_api_fluid_layers_elementwise_div`", "功能一致" - "136", "`tf.truncated_normal `_", ":ref:`cn_api_fluid_initializer_TruncatedNormal`", "功能一致" - "137", "`tf.truncated_normal_initializer `_", ":ref:`cn_api_fluid_initializer_TruncatedNormal`", "功能一致" - "138", "`tf.unstack `_", ":ref:`cn_api_fluid_layers_unstack`", "功能一致" - "139", "`tf.Variable `_", ":ref:`cn_api_fluid_layers_create_parameter`", "功能一致" - "140", "`tf.while_loop `_", ":ref:`cn_api_fluid_layers_While`", "`差异对比 `_" - "141", "`tf.zeros `_", ":ref:`cn_api_fluid_layers_zeros`", "功能一致" - "142", "`tf.zeros_initializer `_", ":ref:`cn_api_fluid_initializer_Constant`", "功能一致" + "1", "`tf.abs `_", ":ref:`cn_api_fluid_layers_abs`", "功能一致" + "2", "`tf.add `_", ":ref:`cn_api_fluid_layers_elementwise_add`", "功能一致" + "3", "`tf.argmax `_", ":ref:`cn_api_fluid_layers_argmax`", "功能一致" + "4", "`tf.argmin `_", ":ref:`cn_api_fluid_layers_argmin`", "功能一致" + "5", "`tf.assign `_", ":ref:`cn_api_fluid_layers_assign`", "功能一致" + "6", "`tf.assign_add `_", ":ref:`cn_api_fluid_layers_increment`", "功能一致" + "7", "`tf.case `_", ":ref:`cn_api_fluid_layers_Switch`", "`差异对比 `_" + "8", "`tf.cast `_", ":ref:`cn_api_fluid_layers_cast`", "功能一致" + "9", "`tf.clip_by_global_norm `_", ":ref:`cn_api_fluid_clip_GradientClipByGlobalNorm`", "`差异对比 `_" + "10", "`tf.clip_by_norm `_", ":ref:`cn_api_fluid_layers_clip_by_norm`", "`差异对比 `_" + "11", "`tf.clip_by_value `_", ":ref:`cn_api_fluid_layers_clip`", "功能一致" + "12", "`tf.concat `_", ":ref:`cn_api_fluid_layers_concat`", "功能一致" + "13", "`tf.cond `_", ":ref:`cn_api_fluid_layers_ifElse`", "功能一致" + "14", "`tf.constant `_", ":ref:`cn_api_fluid_layers_fill_constant`", "功能一致" + "15", "`tf.contrib.layers.batch_norm `_", ":ref:`cn_api_fluid_layers_batch_norm`", "功能一致" + "16", "`tf.contrib.layers.flatten `_", ":ref:`cn_api_fluid_layers_flatten`", "`差异对比 `_" + "17", "`tf.contrib.layers.fully_connected `_", ":ref:`cn_api_fluid_layers_fc`", "功能一致" + "18", "`tf.contrib.layers.one_hot_encoding `_", ":ref:`cn_api_fluid_layers_one_hot`", "功能一致" + "19", "`tf.contrib.layers.softmax `_", ":ref:`cn_api_fluid_layers_softmax`", "功能一致" + "20", "`tf.contrib.layers.xavier_initializer `_", ":ref:`cn_api_fluid_initializer_Xavier`", "功能一致" + "21", "`tf.nn.rnn.GRUCell `_", ":ref:`cn_api_fluid_layers_gru_unit`", "`差异对比 `_" + "22", "`tf.nn.rnn.MultiRNNCell `_", "无相应接口", "`Fluid实现 `_" + "23", "`tf.nn.rnn.static_rnn `_", ":ref:`cn_api_fluid_layers_DynamicRNN`", "功能一致" + "24", "`tf.convert_to_tensor `_", ":ref:`cn_api_fluid_layers_assign`", "功能一致" + "25", "`tf.cos `_", ":ref:`cn_api_fluid_layers_cos`", "功能一致" + "26", "`tf.div `_", ":ref:`cn_api_fluid_layers_elementwise_div`", "功能一致" + "27", "`tf.divide `_", ":ref:`cn_api_fluid_layers_elementwise_div`", "功能一致" + "28", "`tf.dropout `_", ":ref:`cn_api_fluid_layers_dropout`", "`差异对比 `_" + "29", "`tf.equal `_", "`运算符== `_", "功能一致" + "30", "`tf.exp `_", ":ref:`cn_api_fluid_layers_exp`", "功能一致" + "31", "`tf.expand_dims `_", ":ref:`cn_api_fluid_layers_unsqueeze`", "`差异对比 `_" + "32", "`tf.fill `_", ":ref:`cn_api_fluid_layers_fill_constant`", "功能一致" + "33", "`tf.floor `_", ":ref:`cn_api_fluid_layers_floor`", "功能一致" + "34", "`tf.gather `_", ":ref:`cn_api_fluid_layers_gather`", "功能一致" + "35", "`tf.greater `_", "`运算符> `_", "功能一致" + "36", "`tf.greater_equal `_", "`运算符>= `_", "功能一致" + "37", "`tf.image.non_max_suppression `_", ":ref:`cn_api_fluid_layers_multiclass_nms`", "`差异对比 `_" + "38", "`tf.image.resize_bilinear `_", ":ref:`cn_api_fluid_layers_resize_bilinear`", "功能一致" + "39", "`tf.image.resize_images `_", ":ref:`cn_api_fluid_layers_image_resize`", "`差异对比 `_" + "40", "`tf.image.resize_nearest_neighbor `_", ":ref:`cn_api_fluid_layers_resize_nearest`", "功能一致" + "41", "`tf.is_finite `_", ":ref:`cn_api_fluid_layers_isfinite`", "`差异对比 `_" + "42", "`tf.layers.batch_normalization `_", ":ref:`cn_api_fluid_layers_batch_norm`", "功能一致" + "43", "`tf.layers.conv2d `_", ":ref:`cn_api_fluid_layers_conv2d`", "`差异对比 `_" + "44", "`tf.layers.dense `_", ":ref:`cn_api_fluid_layers_fc`", "`差异对比 `_" + "45", "`tf.layers.dropout `_", ":ref:`cn_api_fluid_layers_dropout`", "功能一致" + "46", "`tf.layers.Dropout `_", ":ref:`cn_api_fluid_layers_dropout`", "功能一致" + "47", "`tf.layers.flatten `_", ":ref:`cn_api_fluid_layers_flatten`", "功能一致" + "48", "`tf.less `_", "`运算符< `_", "功能一致" + "49", "`tf.less_equal `_", "`运算符<= `_", "功能一致" + "50", "`tf.log `_", ":ref:`cn_api_fluid_layers_log`", "功能一致" + "51", "`tf.logical_and `_", ":ref:`cn_api_fluid_layers_logical_and`", "功能一致" + "52", "`tf.logical_not `_", ":ref:`cn_api_fluid_layers_logical_not`", "功能一致" + "53", "`tf.logical_or `_", ":ref:`cn_api_fluid_layers_logical_or`", "功能一致" + "54", "`tf.losses.mean_squared_error `_", ":ref:`cn_api_fluid_layers_square_error_cost`", "`差异对比 `_" + "55", "`tf.losses.sigmoid_cross_entropy `_", ":ref:`cn_api_fluid_layers_sigmoid_cross_entropy_with_logits`", "`差异对比 `_" + "56", "`tf.losses.softmax_cross_entropy `_", ":ref:`cn_api_fluid_layers_softmax_with_cross_entropy`", "功能一致" + "57", "`tf.matmul `_", ":ref:`cn_api_fluid_layers_matmul`", "`差异对比 `_" + "58", "`tf.maximum `_", ":ref:`cn_api_fluid_layers_elementwise_max`", "功能一致" + "59", "`tf.metrics.accuracy `_", ":ref:`cn_api_fluid_layers_accuracy`", "功能一致" + "60", "`tf.metrics.mean `_", ":ref:`cn_api_fluid_layers_mean`", "功能一致" + "61", "`tf.minimum `_", ":ref:`cn_api_fluid_layers_elementwise_min`", "功能一致" + "62", "`tf.multiply `_", ":ref:`cn_api_fluid_layers_elementwise_mul`", "功能一致" + "63", "`tf.nn.avg_pool `_", ":ref:`cn_api_fluid_layers_pool2d`", "`差异对比 `_" + "64", "`tf.nn.batch_normalization `_", ":ref:`cn_api_fluid_layers_batch_norm`", "功能一致" + "65", "`tf.nn.bidirectional_dynamic_rnn `_", "无相应接口", "`Fluid实现 `_" + "66", "`tf.nn.conv2d `_", ":ref:`cn_api_fluid_layers_conv2d`", "`差异对比 `_" + "67", "`tf.nn.conv2d_transpose `_", ":ref:`cn_api_fluid_layers_conv2d_transpose`", "`差异对比 `_" + "68", "`tf.nn.conv3d_transpose `_", ":ref:`cn_api_fluid_layers_conv3d_transpose`", "`差异对比 `_" + "69", "`tf.nn.depthwise_conv2d `_", ":ref:`cn_api_fluid_layers_conv2d`", "`差异对比 `_" + "70", "`tf.nn.dynamic_rnn `_", ":ref:`cn_api_fluid_layers_DynamicRNN`", "`差异对比 `_" + "71", "`tf.nn.l2_normalize `_", ":ref:`cn_api_fluid_layers_l2_normalize`", "`差异对比 `_" + "72", "`tf.nn.leaky_relu `_", ":ref:`cn_api_fluid_layers_leaky_relu`", "功能一致" + "73", "`tf.nn.lrn `_", ":ref:`cn_api_fluid_layers_lrn`", "`差异对比 `_" + "74", "`tf.nn.max_pool `_", ":ref:`cn_api_fluid_layers_pool2d`", "`差异对比 `_" + "75", "`tf.nn.relu `_", ":ref:`cn_api_fluid_layers_relu`", "功能一致" + "76", "`tf.nn.relu6 `_", ":ref:`cn_api_fluid_layers_relu6`", "功能一致" + "77", "`tf.nn.rnn_cell.LSTMCell `_", ":ref:`cn_api_fluid_layers_lstm_unit`", "`差异对比 `_" + "78", "`tf.nn.separable_conv2d `_", "无相应接口", "`Fluid实现 `_" + "79", "`tf.nn.sigmoid `_", ":ref:`cn_api_fluid_layers_sigmoid`", "功能一致" + "80", "`tf.nn.sigmoid_cross_entropy_with_logits `_", ":ref:`cn_api_fluid_layers_sigmoid_cross_entropy_with_logits`", "功能一致" + "81", "`tf.nn.softmax `_", ":ref:`cn_api_fluid_layers_softmax`", "功能一致" + "82", "`tf.nn.softmax_cross_entropy_with_logits `_", ":ref:`cn_api_fluid_layers_softmax_with_cross_entropy`", "`差异对比 `_" + "83", "`tf.nn.softplus `_", ":ref:`cn_api_fluid_layers_softplus`", "功能一致" + "84", "`tf.nn.softsign `_", ":ref:`cn_api_fluid_layers_softsign`", "功能一致" + "85", "`tf.nn.tanh `_", ":ref:`cn_api_fluid_layers_tanh`", "功能一致" + "86", "`tf.one_hot `_", ":ref:`cn_api_fluid_layers_one_hot`", "`差异对比 `_" + "87", "`tf.ones `_", ":ref:`cn_api_fluid_layers_ones`", "功能一致" + "88", "`tf.intializers.ones `_", ":ref:`cn_api_fluid_initializer_Constant`", "功能一致" + "89", "`tf.pad `_", ":ref:`cn_api_fluid_layers_pad`", "`差异对比 `_" + "90", "`tf.placeholder `_", ":ref:`cn_api_fluid_layers_data`", "`差异对比 `_" + "91", "`tf.pow `_", ":ref:`cn_api_fluid_layers_pow`", "`差异对比 `_" + "92", "`tf.print `_", ":ref:`cn_api_fluid_layers_print`", "`差异对比 `_" + "93", "`tf.py_func `_", ":ref:`cn_api_fluid_layers_py_func`", "功能一致" + "94", "`tf.random_normal `_", ":ref:`cn_api_fluid_layers_gaussian_random`", "功能一致" + "95", "`tf.random_normal_initializer `_", ":ref:`cn_api_fluid_initializer_Normal`", "功能一致" + "96", "`tf.random_uniform `_", ":ref:`cn_api_fluid_layers_uniform_random`", "功能一致" + "97", "`tf.random_uniform_initializer `_", ":ref:`cn_api_fluid_initializer_UniformInitializer`", "功能一致" + "98", "`tf.reduce_logsumexp `_", "无相应接口", "`Fluid实现 `_" + "99", "`tf.reduce_max `_", ":ref:`cn_api_fluid_layers_reduce_max`", "功能一致" + "100", "`tf.reduce_mean `_", ":ref:`cn_api_fluid_layers_reduce_mean`", "功能一致" + "101", "`tf.reduce_min `_", ":ref:`cn_api_fluid_layers_reduce_min`", "功能一致" + "102", "`tf.reduce_sum `_", ":ref:`cn_api_fluid_layers_reduce_sum`", "功能一致" + "103", "`tf.reshape `_", ":ref:`cn_api_fluid_layers_reshape`", "`差异对比 `_" + "104", "`tf.reverse `_", ":ref:`cn_api_fluid_layers_reverse`", "功能一致" + "105", "`tf.reverse_sequence `_", ":ref:`cn_api_fluid_layers_sequence_reverse`", "`差异对比 `_" + "106", "`tf.reverse_v2 `_", ":ref:`cn_api_fluid_layers_reverse`", "功能一致" + "107", "`tf.round `_", ":ref:`cn_api_fluid_layers_round`", "功能一致" + "108", "`tf.rsqrt `_", ":ref:`cn_api_fluid_layers_rsqrt`", "功能一致" + "109", "`tf.scalar_mul `_", ":ref:`cn_api_fluid_layers_scale`", "功能一致" + "110", "`tf.scatter_update `_", ":ref:`cn_api_fluid_layers_scatter`", "`差异对比 `_" + "111", "`tf.sequence_mask `_", ":ref:`cn_api_fluid_layers_sequence_mask`", "功能一致" + "112", "`tf.shape `_", ":ref:`cn_api_fluid_layers_shape`", "功能一致" + "113", "`tf.sigmoid `_", ":ref:`cn_api_fluid_layers_sigmoid`", "功能一致" + "114", "`tf.sin `_", ":ref:`cn_api_fluid_layers_sin`", "功能一致" + "115", "`tf.slice `_", ":ref:`cn_api_fluid_layers_slice`", "`差异对比 `_" + "116", "`tf.split `_", ":ref:`cn_api_fluid_layers_split`", "`差异对比 `_" + "117", "`tf.sqrt `_", ":ref:`cn_api_fluid_layers_sqrt`", "功能一致" + "118", "`tf.square `_", ":ref:`cn_api_fluid_layers_square`", "功能一致" + "119", "`tf.squared_difference `_", "无相应接口", "`Fluid实现 `_" + "120", "`tf.squeeze `_", ":ref:`cn_api_fluid_layers_squeeze`", "功能一致" + "121", "`tf.stack `_", ":ref:`cn_api_fluid_layers_stack`", "功能一致" + "122", "`tf.stop_gradient `_", "无相应接口", "`Fluid实现 `_" + "123", "`tf.subtract `_", ":ref:`cn_api_fluid_layers_elementwise_sub`", "功能一致" + "124", "`tf.tanh `_", ":ref:`cn_api_fluid_layers_tanh`", "功能一致" + "125", "`tf.tile `_", ":ref:`cn_api_fluid_layers_expand`", "功能一致" + "126", "`tf.top_k `_", ":ref:`cn_api_fluid_layers_topk`", "`差异对比 `_" + "127", "`tf.train.AdagradOptimizer `_", ":ref:`cn_api_fluid_optimizer_AdagradOptimizer`", "功能一致" + "128", "`tf.train.AdamOptimizer `_", ":ref:`cn_api_fluid_optimizer_Adam`", "功能一致" + "129", "`tf.train.exponential_decay `_", ":ref:`cn_api_fluid_layers_exponential_decay`", "功能一致" + "130", "`tf.train.GradientDescentOptimizer `_", ":ref:`cn_api_fluid_optimizer_SGDOptimizer`", "功能一致" + "131", "`tf.train.MomentumOptimizer `_", ":ref:`cn_api_fluid_optimizer_MomentumOptimizer`", "功能一致" + "132", "`tf.train.polynomial_decay `_", ":ref:`cn_api_fluid_layers_polynomial_decay`", "功能一致" + "133", "`tf.train.RMSPropOptimizer `_", ":ref:`cn_api_fluid_optimizer_RMSPropOptimizer`", "功能一致" + "134", "`tf.transpose `_", ":ref:`cn_api_fluid_layers_transpose`", "功能一致" + "135", "`tf.truediv `_", ":ref:`cn_api_fluid_layers_elementwise_div`", "功能一致" + "136", "`tf.truncated_normal `_", ":ref:`cn_api_fluid_initializer_TruncatedNormal`", "功能一致" + "137", "`tf.truncated_normal_initializer `_", ":ref:`cn_api_fluid_initializer_TruncatedNormal`", "功能一致" + "138", "`tf.unstack `_", ":ref:`cn_api_fluid_layers_unstack`", "功能一致" + "139", "`tf.Variable `_", ":ref:`cn_api_fluid_layers_create_parameter`", "功能一致" + "140", "`tf.while_loop `_", ":ref:`cn_api_fluid_layers_While`", "`差异对比 `_" + "141", "`tf.zeros `_", ":ref:`cn_api_fluid_layers_zeros`", "功能一致" + "142", "`tf.zeros_initializer `_", ":ref:`cn_api_fluid_initializer_Constant`", "功能一致" + diff --git a/doc/fluid/api_guides/low_level/layers/tensor.rst b/doc/fluid/api_guides/low_level/layers/tensor.rst index e24c1e5f68df25f2a35c0bddd801e6e305b87718..87f953b5a0199fc26f73c9bc020bdb76dedf09f8 100644 --- a/doc/fluid/api_guides/low_level/layers/tensor.rst +++ b/doc/fluid/api_guides/low_level/layers/tensor.rst @@ -56,64 +56,56 @@ Fluid 使用 :code:`sums` 执行对输入数据的加和。 API reference 请参考::ref:`cn_api_fluid_layers_sums` - -7. fill_constant_batch_size_like ---------------------------------- - -Fluid 使用 :code:`fill_constant_batch_size_like` 创建一个具有特定形状、类型和 batch_size 的 Tensor。并且该Tensor的初始值可以被指定为任意常数。其中 batch_size 信息由该tensor的 :code:`input_dim_idx` 和 :code:`output_dim_idx` 确定。 - -API reference 请参考::ref:`cn_api_fluid_layers_fill_constant_batch_size_like` - -8. fill_constant +7. fill_constant ----------------- Fluid 使用 :code:`fill_constant` 创建一个具有特定形状和类型的 Tensor。可以通过 :code:`value` 设置该变量的初始值。 API reference 请参考: :ref:`cn_api_fluid_layers_fill_constant` -9. assign +8. assign --------------- Fluid 使用 :code:`assign` 复制一个变量。 API reference 请参考::ref:`cn_api_fluid_layers_assign` -10. argmin +9. argmin -------------- Fluid 使用 :code:`argmin` 计算输入 Tensor 指定轴上最小元素的索引。 API reference 请参考::ref:`cn_api_fluid_layers_assign` -11. argmax +10. argmax ----------- Fluid 使用 :code:`argmax` 计算输入 Tensor 指定轴上最大元素的索引。 API reference 请参考::ref:`cn_api_fluid_layers_argmax` -12. argsort +11. argsort ------------ Fluid 使用 :code:`argsort` 对输入 Tensor 在指定轴上进行排序,并返回排序后的数据变量及其对应的索引值。 API reference 请参考: :ref:`cn_api_fluid_layers_argsort` -13. ones +12. ones ------------- Fluid 使用 :code:`ones` 创建一个指定大小和数据类型的Tensor,且初始值为1。 API reference 请参考: :ref:`cn_api_fluid_layers_ones` -14. zeros +13. zeros --------------- Fluid 使用 :code:`zeros` 创建一个指定大小和数据类型的Tensor,且初始值为0。 API reference 请参考: :ref:`cn_api_fluid_layers_zeros` -15. reverse +14. reverse ------------------- Fluid 使用 :code:`reverse` 沿指定轴反转 Tensor。 @@ -146,4 +138,4 @@ API reference 请参考: :ref:`cn_api_fluid_create_random_int_lodtensor` Fluid 使用 :code:`reorder_lod_tensor_by_rank` 对输入 LoD_Tensor 的序列信息按指定顺序重拍。 -API reference 请参考::ref:`cn_api_fluid_layers_reorder_lod_tensor_by_rank` \ No newline at end of file +API reference 请参考::ref:`cn_api_fluid_layers_reorder_lod_tensor_by_rank` diff --git a/doc/fluid/api_guides/low_level/layers/tensor_en.rst b/doc/fluid/api_guides/low_level/layers/tensor_en.rst index 0f1e2e088866c1770b2015fffdb4dc2e99d082dc..4a14864d2e6a10d6b8e3fb10ef6769b4cfb3ed52 100755 --- a/doc/fluid/api_guides/low_level/layers/tensor_en.rst +++ b/doc/fluid/api_guides/low_level/layers/tensor_en.rst @@ -56,64 +56,56 @@ Fluid uses :code:`sums` to sum up the input data. API reference : :ref:`api_fluid_layers_sums` - -7. fill_constant_batch_size_like ---------------------------------- - -Fluid uses :code:`fill_constant_batch_size_like` to create a Tensor with a specific shape, type, and batch_size. And the initial value of the Tensor can be specified as an arbitrary constant. The batch_size information is determined by the tensor's :code:`input_dim_idx` and :code:`output_dim_idx`. - -API reference : :ref:`api_fluid_layers_fill_constant_batch_size_like` - -8. fill_constant +7. fill_constant ----------------- Fluid uses :code:`fill_constant` to create a Tensor with a specific shape and type. The initial value of this variable can be set via :code:`value`. API reference : :ref:`api_fluid_layers_fill_constant` -9. assign +8. assign --------------- Fluid uses :code:`assign` to duplicate a variable. API reference : :ref:`api_fluid_layers_assign` -10. argmin +9. argmin -------------- Fluid uses :code:`argmin` to calculate the index of the smallest element on the specified axis of Tensor. API reference : :ref:`api_fluid_layers_argmin` -11. argmax +10. argmax ----------- Fluid uses :code:`argmax` to calculate the index of the largest element on the specified axis of Tensor. API reference : :ref:`api_fluid_layers_argmax` -12. argsort +11. argsort ------------ Fluid uses :code:`argsort` to sort the input Tensor on the specified axis and it will return the sorted data variables and their corresponding index values. API reference : :ref:`api_fluid_layers_argsort` -13. ones +12. ones ------------- Fluid uses :code:`ones` to create a Tensor of the specified size and data type with an initial value of 1. API reference : :ref:`api_fluid_layers_ones` -14. zeros +13. zeros --------------- Fluid uses :code:`zeros` to create a Tensor of the specified size and data type with an initial value of zero. API reference : :ref:`api_fluid_layers_zeros` -15. reverse +14. reverse ------------------- Fluid uses :code:`reverse` to invert Tensor along the specified axis. @@ -146,4 +138,4 @@ API reference : :ref:`api_fluid_create_random_int_lodtensor` Fluid uses :code:`reorder_lod_tensor_by_rank` to reorder the sequence information of the input LoD_Tensor in the specified order. -API reference : :ref:`api_fluid_layers_reorder_lod_tensor_by_rank` \ No newline at end of file +API reference : :ref:`api_fluid_layers_reorder_lod_tensor_by_rank` diff --git a/doc/fluid/api_guides/low_level/program.rst b/doc/fluid/api_guides/low_level/program.rst index 16ad7e55f5163fef36055614cff6d71bc1745669..f51b7468a339721ec95fdfb77507f10a08e9d5e3 100644 --- a/doc/fluid/api_guides/low_level/program.rst +++ b/doc/fluid/api_guides/low_level/program.rst @@ -75,7 +75,7 @@ Name Fluid 中部分网络层里包含了 :code:`name` 参数,如 :ref:`cn_api_fluid_layers_fc` 。此 :code:`name` 一般用来作为网络层输出、权重的前缀标识,具体规则如下: -* 用于网络层输出的前缀标识。若网络层中指定了 :code:`name` 参数,Fluid 将以 ``name值_数字.tmp_数字`` 作为唯一标识对网络层输出进行命名;未指定 :code:`name` 参数时,则以 ``OP名_数字.tmp_数字`` 的方式进行命名,其中的数字会自动递增,以区分同名OP下的不同网络层。 +* 用于网络层输出的前缀标识。若网络层中指定了 :code:`name` 参数,Fluid 将以 ``name值.tmp_数字`` 作为唯一标识对网络层输出进行命名;未指定 :code:`name` 参数时,则以 ``OP名_数字.tmp_数字`` 的方式进行命名,其中的数字会自动递增,以区分同名OP下的不同网络层。 * 用于权重或偏置变量的前缀标识。若在网络层中通过 ``param_attr`` 和 ``bias_attr`` 创建了权重变量或偏置变量, 如 :ref:`cn_api_fluid_layers_embedding` 、 :ref:`cn_api_fluid_layers_fc` ,则 Fluid 会自动生成 ``前缀.w_数字`` 或 ``前缀.b_数字`` 的唯一标识对其进行命名,其中 ``前缀`` 为用户指定的 :code:`name` 或自动生成的 ``OP名_数字`` 。若在 ``param_attr`` 和 ``bias_attr`` 中指定了 :code:`name` ,则用此 :code:`name` ,不再自动生成。细节请参考示例代码。 @@ -95,7 +95,7 @@ Fluid 中部分网络层里包含了 :code:`name` 参数,如 :ref:`cn_api_flui # default name fc_none = fluid.layers.fc(input=emb, size=1) # fc_0.w_0, fc_0.b_0 fc_none = fluid.layers.Print(fc_none) # Tensor[fc_0.tmp_1] - + fc_none1 = fluid.layers.fc(input=emb, size=1) # fc_1.w_0, fc_1.b_0 fc_none1 = fluid.layers.Print(fc_none1) # Tensor[fc_1.tmp_1] diff --git a/doc/fluid/api_guides/low_level/program_en.rst b/doc/fluid/api_guides/low_level/program_en.rst index efdbb2f87fd885fb790ba96f971950048160ab56..3a380229560796e4d9347211196ed58969c8f45f 100644 --- a/doc/fluid/api_guides/low_level/program_en.rst +++ b/doc/fluid/api_guides/low_level/program_en.rst @@ -74,7 +74,7 @@ Name In Fluid, some layers contain the parameter :code:`name` , such as :ref:`api_fluid_layers_fc` . This :code:`name` is generally used as the prefix identification of output and weight in network layers. The specific rules are as follows: -* Prefix identification for output of layers. If :code:`name` is specified in the layer, Fluid will name the output with ``nameValue_number.tmp_number`` . If the :code:`name` is not specified, ``OPName_number.tmp_number`` is automatically generated to name the layer. The numbers are automatically incremented to distinguish different network layers under the same operator. +* Prefix identification for output of layers. If :code:`name` is specified in the layer, Fluid will name the output with ``nameValue.tmp_number`` . If the :code:`name` is not specified, ``OPName_number.tmp_number`` is automatically generated to name the layer. The numbers are automatically incremented to distinguish different network layers under the same operator. * Prefix identification for weight or bias variable. If the weight and bias variables are created by ``param_attr`` and ``bias_attr`` in operator, such as :ref:`api_fluid_layers_embedding` 、 :ref:`api_fluid_layers_fc` , Fluid will generate ``prefix.w_number`` or ``prefix.b_number`` as unique identifier to name them, where the ``prefix`` is :code:`name` specified by users or ``OPName_number`` generated by default. If :code:`name` is specified in ``param_attr`` and ``bias_attr`` , the :code:`name` is no longer generated automatically. Refer to the sample code for details. diff --git a/doc/fluid/beginners_guide/basic_concept/broadcasting.rst b/doc/fluid/beginners_guide/basic_concept/broadcasting.rst new file mode 100644 index 0000000000000000000000000000000000000000..9750bebe8d4750a72430148e2b77188ef4daa87d --- /dev/null +++ b/doc/fluid/beginners_guide/basic_concept/broadcasting.rst @@ -0,0 +1,99 @@ +.. _cn_user_guide_broadcasting: + +================== +广播 (broadcasting) +================== + +飞桨(PaddlePaddle,以下简称Paddle)和其他框架一样,提供的一些API支持广播(broadcasting)机制,允许在一些运算时使用不同形状的张量。 +通常来讲,如果有一个形状较小和一个形状较大的张量,我们希望多次使用较小的张量来对较大的张量执行一些操作,看起来像是较小形状的张量的形状首先被扩展到和较大形状的张量一致,然后做运算。 +值得注意的是,这期间并没有对较小形状张量的数据拷贝操作。 + +飞桨的广播机制主要遵循如下规则;如果两个张量的形状遵循一下规则,我们认为这两个张量是可广播的(参考`Numpy 广播机制 `): + +1. 每个张量至少为一维张量 +2. 从后往前比较张量的形状,当前维度的大小要么相等,要么其中一个等于一,要么其中一个不存在 + +例如: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + + x = paddle.imperative.to_variable(np.ones((2,3,4), np.float32)) + y = paddle.imperative.to_variable(np.ones((2,3,4), np.float32)) + # 两个张量 形状一致,可以广播 + + x = paddle.imperative.to_variable(np.ones((2,3,1,5), np.float32)) + y = paddle.imperative.to_variable(np.ones((3,4,1), np.float32)) + # 从后向前依次比较: + # 第一次:y的维度大小是1 + # 第二次:x的维度大小是1 + # 第三次:x和y的维度大小相等 + # 第四次:y的维度不存在 + # 所以 x和y是可以广播的 + + # 相反 + x = paddle.imperative.to_variable(np.ones((2,3,4), np.float32)) + y = paddle.imperative.to_variable(np.ones((2,3,6), np.float32)) + # 此时x和y是不可广播的,因为第一次比较 4不等于6 + +现在我们知道什么情况下两个张量是可以广播的,两个张量进行广播语义后的结果张量的形状计算规则如下: + +1. 如果两个张量的形状的长度不一致,那么需要在较小形状长度的矩阵像前添加1,只到两个张量的形状长度相等。 +2. 保证两个张量形状相等之后,每个维度上的结果维度就是当前维度上较大的那个。 + +例如: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + + x = paddle.imperative.to_variable(np.ones((2,1,4), np.float32)) + y = paddle.imperative.to_variable(np.ones((3,1), np.float32)) + z = x+y + print(z.shape) + # z的形状: [2,3,4] + + x = paddle.imperative.to_variable(np.ones((2,1,4), np.float32)) + y = paddle.imperative.to_variable(np.ones((3,2), np.float32)) + z = x+y + print(z.shape) + # InvalidArgumentError: Broadcast dimension mismatch. + +除此之外,飞桨的elementwise系列API针对广播机制增加了axis参数,当使用较小形状的y来来匹配较大形状的x的时候,且满足y的形状的长度小于x的形状长度, +axis表示y在x上应用广播机制的时候的起始维度的位置,当设置了asis参数后,张量的维度比较顺序变成了从axis开始,从前向后比较。当axis=-1时,axis = rank(x) - rank(y), +同时y的大小为1的尾部维度将被忽略。 + +例如: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + + x = paddle.imperative.to_variable(np.ones((2,1,4), np.float32)) + y = paddle.imperative.to_variable(np.ones((3,1), np.float32)) + z = paddle.elementwise_add(x,y,axis=1) + # z的形状 [2, 3, 4] + + x = paddle.imperative.to_variable(np.ones((2,3,4,5), np.float32)) + y = paddle.imperative.to_variable(np.ones((4,5), np.float32)) + z = paddle.elementwise_add(x,y,axis=1) + print(z.shape) + # InvalidArgumentError: Broadcast dimension mismatch. + # 因为指定了axis之后,计算广播的维度从axis开始从前向后比较 + + x = paddle.imperative.to_variable(np.ones((2,3,4,5), np.float32)) + y = paddle.imperative.to_variable(np.ones((3), np.float32)) + z = paddle.elementwise_add(x,y,axis=1) + print(z.shape) + # z的形状 [2, 3, 4, 5] + # 因为此时是从axis=1的维度开始,从前向后比较维度进行广播 diff --git a/doc/fluid/beginners_guide/basic_concept/broadcasting_en.rst b/doc/fluid/beginners_guide/basic_concept/broadcasting_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..d5cfc104e6fbad17f6f20b5c494e601390ee7718 --- /dev/null +++ b/doc/fluid/beginners_guide/basic_concept/broadcasting_en.rst @@ -0,0 +1,101 @@ +.. _user_guide_broadcasting + +================== +Broadcasting +================== + +PaddlePaddle provides broadcasting semantics in some APIs like other deep learning frameworks, which allows using tensors with different shapes while operating. +In General, broadcast is the rule how the smaller tensor is “broadcast” across the larger tsnsor so that they have same shapes. +Note that no copies happened while broadcasting. + +In Paddlepaddle, tensors are broadcastable when following rulrs hold(ref: Numpy Broadcasting `): + +1. there should be at least one dimention in each tensor +2. when we compare their shapes element-wise from backward to forward, two dimensions are compatible when +they are equal, or one of them is 1, or one of them does not exist. + +For example: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + + x = paddle.imperative.to_variable(np.ones((2,3,4), np.float32)) + y = paddle.imperative.to_variable(np.ones((2,3,4), np.float32)) + # Two tensor have some shpes are broadcastable + + x = paddle.imperative.to_variable(np.ones((2,3,1,5), np.float32)) + y = paddle.imperative.to_variable(np.ones((3,4,1), np.float32)) + # compare from backward to forward: + # 1st step:y's dimention is 1 + # 2nd step:x's dimention is 1 + # 3rd step:two dimentions are the same + # 4st step:y's dimention does not exist + # So, x and y are broadcastable + + # In Compare + x = paddle.imperative.to_variable(np.ones((2,3,4), np.float32)) + y = paddle.imperative.to_variable(np.ones((2,3,6), np.float32)) + # x and y are not broadcastable because in first step form tail, x's dimention 4 is not equal to y's dimention 6 + +Now we know in what condition two tensors are broadcastable, how to calculate the resulting tensor's size follows the rules: + +1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the tensor with fewer dimensions to make them equal length. +2. Then, for each dimension size, the resulting dimension size is the max of the sizes of x and y along that dimension. + +For example: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + + x = paddle.imperative.to_variable(np.ones((2,1,4), np.float32)) + y = paddle.imperative.to_variable(np.ones((3,1), np.float32)) + z = x+y + print(z.shape) + # z'shape: [2,3,4] + + x = paddle.imperative.to_variable(np.ones((2,1,4), np.float32)) + y = paddle.imperative.to_variable(np.ones((3,2), np.float32)) + z = x+y + print(z.shape) + # InvalidArgumentError: Broadcast dimension mismatch. + +In addition, axis is introduced to PaddlePaddle's broadcasting semantics. when using smaller shape tensor y to broadcast a larger tensor x, +and y's length of dimentions is smaller than x's, we can specify a aixs to indicate the starting dimention to do broadcasting. +In this case, the comparation on dimentions runs from forward to backward started at axis. when axis=-1, axis = rank(x) - rank(y). +when the last dimention of y is 1, it will be ignored. + +For example: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + + x = paddle.imperative.to_variable(np.ones((2,1,4), np.float32)) + y = paddle.imperative.to_variable(np.ones((3,1), np.float32)) + z = paddle.elementwise_add(x,y,axis=1) + # z'shape [2, 3, 4] + + x = paddle.imperative.to_variable(np.ones((2,3,4,5), np.float32)) + y = paddle.imperative.to_variable(np.ones((4,5), np.float32)) + z = paddle.elementwise_add(x,y,axis=1) + print(z.shape) + # InvalidArgumentError: Broadcast dimension mismatch. + # axis is indicated, comparation between dimentions starts at axis. + + x = paddle.imperative.to_variable(np.ones((2,3,4,5), np.float32)) + y = paddle.imperative.to_variable(np.ones((3), np.float32)) + z = paddle.elementwise_add(x,y,axis=1) + print(z.shape) + # z'shape [2, 3, 4, 5] + # Start comparation at axis=1 from forward to backward. diff --git a/doc/fluid/beginners_guide/basic_concept/dygraph/DyGraph.md b/doc/fluid/beginners_guide/basic_concept/dygraph/DyGraph.md deleted file mode 100644 index 81233965bf85f4b7b9f37248f3a30a7436d511a7..0000000000000000000000000000000000000000 --- a/doc/fluid/beginners_guide/basic_concept/dygraph/DyGraph.md +++ /dev/null @@ -1,860 +0,0 @@ -# 动态图机制-DyGraph - -PaddlePaddle的DyGraph模式是一种动态的图执行机制,可以立即执行结果,无需构建整个图。同时,和以往静态的执行计算图不同,DyGraph模式下您的所有操作可以立即获得执行结果,而不必等待所构建的计算图全部执行完成,这样可以让您更加直观地构建PaddlePaddle下的深度学习任务,以及进行模型的调试,同时还减少了大量用于构建静态计算图的代码,使得您编写、调试网络的过程变得更加便捷。 - -PaddlePaddle DyGraph是一个更加灵活易用的模式,可提供: - -* 更加灵活便捷的代码组织结构:使用python的执行控制流程和面向对象的模型设计 -* 更加便捷的调试功能:直接使用python的打印方法即时打印所需要的结果,从而检查正在运行的模型结果便于测试更改 -* 和静态执行图通用的模型代码:同样的模型代码可以使用更加便捷的DyGraph调试,执行,同时也支持使用原有的静态图模式执行 - -有关的动态图机制更多的实际模型示例请参考[Paddle/models/dygraph](https://github.com/PaddlePaddle/models/tree/develop/dygraph) - -## 设置和基本用法 - -1. 升级到最新的PaddlePaddle 1.6.0: - - ``` - pip install -q --upgrade paddlepaddle==1.6.0 - ``` - -2. 使用`fluid.dygraph.guard(place=None)` 上下文: - - ```python - import paddle.fluid as fluid - with fluid.dygraph.guard(): - # write your executable dygraph code here - ``` - -现在您就可以在`fluid.dygraph.guard()`上下文环境中使用DyGraph的模式运行网络了,DyGraph将改变以往PaddlePaddle的执行方式: 现在他们将会立即执行,并且将计算结果返回给Python。 - -Dygraph将非常适合和Numpy一起使用,使用`fluid.dygraph.to_variable(x)`将会将ndarray转换为`fluid.Variable`,而使用`fluid.Variable.numpy()`将可以把任意时刻获取到的计算结果转换为Numpy`ndarray`: - -```python -x = np.ones([2, 2], np.float32) -with fluid.dygraph.guard(): - inputs = [] - for _ in range(10): - inputs.append(fluid.dygraph.to_variable(x)) - ret = fluid.layers.sums(inputs) - print(ret.numpy()) -``` - -得到输出: - -``` -[[10. 10.] -[10. 10.]] -``` - - -> 这里创建了一系列`ndarray`的输入,执行了一个`sum`操作之后,我们可以直接将运行的结果打印出来 - -然后通过调用`reduce_sum`后使用`Variable.backward()`方法执行反向,使用`Variable.gradient()`方法即可获得反向网络执行完成后的梯度值的`ndarray`形式: - - -```python -loss = fluid.layers.reduce_sum(ret) -loss.backward() -print(loss.gradient()) -``` - - -得到输出 : - -``` -[1.] -``` - -## 基于DyGraph构建网络 - -1. 编写一段用于DyGraph执行的Object-Oriented-Designed, PaddlePaddle模型代码主要由以下**两部分**组成: **请注意,如果您设计的这一层结构是包含参数的,则必须要使用继承自`fluid.dygraph.Layer`的Object-Oriented-Designed的类来描述该层的行为。** - - 1. 建立一个可以在DyGraph模式中执行的,Object-Oriented的网络,需要继承自`fluid.dygraph.Layer`,其中需要调用基类的`__init__`方法,在构造函数中,我们通常会执行一些例如参数初始化,子网络初始化的操作,执行这些操作时不依赖于输入的动态信息: - - ```python - class MyLayer(fluid.dygraph.Layer): - def __init__(self, input_size): - super(MyLayer, self).__init__() - self.linear = fluid.dygraph.nn.Linear(input_size, 12) - ``` - - 2. 实现一个`forward(self, *inputs)`的执行函数,该函数将负责执行实际运行时网络的执行逻辑, 该函数将会在每一轮训练/预测中被调用,这里我们将执行一个简单的 `linear` -> `relu` -> `elementwise add` -> `reduce sum`: - - ```python - def forward(self, inputs): - x = self.linear(inputs) - x = fluid.layers.relu(inputs) - self._x_for_debug = x - x = fluid.layers.elementwise_mul(x, x) - x = fluid.layers.reduce_sum(x) - return [x] - ``` - -2. 在`fluid.dygraph.guard()`中执行: - - 1. 使用Numpy构建输入: - - ```python - np_inp = np.array([1.0, 2.0, -1.0], dtype=np.float32) - ``` - - 2. 转换输入的`ndarray`为`Variable`, 并执行前向网络获取返回值: 使用`fluid.dygraph.to_variable(np_inp)`转换Numpy输入为DyGraph接收的输入,然后使用`my_layer(var_inp)[0]`调用callable object并且获取了`x`作为返回值,利用`x.numpy()`方法直接获取了执行得到的`x`的`ndarray`返回值。 - - ```python - with fluid.dygraph.guard(): - var_inp = fluid.dygraph.to_variable(np_inp) - my_layer = MyLayer(np_inp.shape[-1]) - x = my_layer(var_inp)[0] - dy_out = x.numpy() - ``` - - 3. 计算梯度:自动微分对于实现机器学习算法(例如用于训练神经网络的反向传播)来说很有用, 使用`x.backward()`方法可以从某个`fluid.Varaible`开始执行反向网络,同时利用`my_layer._x_for_debug.gradient()`获取了网络中`x`梯度的`ndarray` 返回值: - - ```python - x.backward() - dy_grad = my_layer._x_for_debug.gradient() - ``` - -完整代码如下: - -```python -import paddle.fluid as fluid -import numpy as np - - -class MyLayer(fluid.dygraph.Layer): - def __init__(self, input_size): - super(MyLayer, self).__init__() - self.linear = fluid.dygraph.nn.Linear(input_size, 12) - - def forward(self, inputs): - x = self.linear(inputs) - x = fluid.layers.relu(x) - self._x_for_debug = x - x = fluid.layers.elementwise_mul(x, x) - x = fluid.layers.reduce_sum(x) - return [x] - - -if __name__ == '__main__': - np_inp = np.array([[1.0, 2.0, -1.0]], dtype=np.float32) - with fluid.dygraph.guard(): - var_inp = fluid.dygraph.to_variable(np_inp) - my_layer = MyLayer(np_inp.shape[-1]) - x = my_layer(var_inp)[0] - dy_out = x.numpy() - x.backward() - dy_grad = my_layer._x_for_debug.gradient() - my_layer.clear_gradients() # 将参数梯度清零以保证下一轮训练的正确性 -``` - -### 关于自动剪枝 - -每个 ``Variable`` 都有一个 ``stop_gradient`` 属性,可以用于细粒度地在反向梯度计算时排除部分子图,以提高效率。 - -如果OP只要有一个输入需要梯度,那么该OP的输出也需要梯度。 -相反,只有当OP的所有输入都不需要梯度时,该OP的输出也不需要梯度。 -在所有的 ``Variable`` 都不需要梯度的子图中,反向计算就不会进行计算了。 - -在动态图模式下,除参数以外的所有 ``Variable`` 的 ``stop_gradient`` 属性默认值都为 ``True``,而参数的 ``stop_gradient`` 属性默认值为 ``False``。 -该属性用于自动剪枝,避免不必要的反向运算。 - -例如: - -```python -import paddle.fluid as fluid -import numpy as np - -with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(np.random.randn(5, 5)) # 默认stop_gradient=True - y = fluid.dygraph.to_variable(np.random.randn(5, 5)) # 默认stop_gradient=True - z = fluid.dygraph.to_variable(np.random.randn(5, 5)) - z.stop_gradient = False - a = x + y - a.stop_gradient # True - b = a + z - b.stop_gradient # False -``` - -当你想冻结你的模型的一部分,或者你事先知道你不会使用某些参数的梯度的时候,这个功能是非常有用的。 - -例如: - -```python -import paddle.fluid as fluid -import numpy as np - -with fluid.dygraph.guard(): - value0 = np.arange(26).reshape(2, 13).astype("float32") - value1 = np.arange(6).reshape(2, 3).astype("float32") - value2 = np.arange(10).reshape(2, 5).astype("float32") - fc = fluid.Linear(13, 5, dtype="float32") - fc2 = fluid.Linear(3, 3, dtype="float32") - a = fluid.dygraph.to_variable(value0) - b = fluid.dygraph.to_variable(value1) - c = fluid.dygraph.to_variable(value2) - out1 = fc(a) - out2 = fc2(b) - out1.stop_gradient = True # 将不会对out1这部分子图做反向计算 - out = fluid.layers.concat(input=[out1, out2, c], axis=1) - out.backward() - # 可以发现这里fc参数的梯度都为0 - assert (fc.weight.gradient() == 0).all() - assert (out1.gradient() == 0).all() -``` - -## 使用DyGraph训练模型 - -接下来我们将以“手写数字识别”这个最基础的模型为例,展示如何利用DyGraph模式搭建并训练一个模型: - -有关手写数字识别的相关理论知识请参考[PaddleBook](https://github.com/PaddlePaddle/book/tree/develop/02.recognize_digits)中的内容,我们在这里默认您已经了解了该模型所需的深度学习理论知识。 - -1. 准备数据,我们使用`paddle.dataset.mnist`作为训练所需要的数据集: - - ```python - train_reader = paddle.batch( - paddle.dataset.mnist.train(), batch_size=BATCH_SIZE, drop_last=True) - ``` - -2. 构建网络,虽然您可以根据之前的介绍自己定义所有的网络结构,但是您也可以直接使用`fluid.dygraph.Layer`当中我们为您定制好的一些基础网络结构,这里我们利用`fluid.dygraph.Conv2D`以及`fluid.dygraph.Pool2d`构建了基础的`SimpleImgConvPool`: - - ```python - class SimpleImgConvPool(fluid.dygraph.Layer): - def __init__(self, - num_channels, - num_filters, - filter_size, - pool_size, - pool_stride, - pool_padding=0, - pool_type='max', - global_pooling=False, - conv_stride=1, - conv_padding=0, - conv_dilation=1, - conv_groups=1, - act=None, - use_cudnn=False, - param_attr=None, - bias_attr=None): - super(SimpleImgConvPool, self).__init__() - - self._conv2d = fluid.dygraph.Conv2D( - num_channels=num_channels, - num_filters=num_filters, - filter_size=filter_size, - stride=conv_stride, - padding=conv_padding, - dilation=conv_dilation, - groups=conv_groups, - param_attr=param_attr, - bias_attr=bias_attr, - act=act, - use_cudnn=use_cudnn) - - self._pool2d = fluid.dygraph.Pool2D( - pool_size=pool_size, - pool_type=pool_type, - pool_stride=pool_stride, - pool_padding=pool_padding, - global_pooling=global_pooling, - use_cudnn=use_cudnn) - - def forward(self, inputs): - x = self._conv2d(inputs) - x = self._pool2d(x) - return x - ``` - - > 注意: 构建网络时子网络的定义和使用请在`__init__`中进行, 而子网络的执行则在`forward`函数中进行 - -3. 利用已经构建好的`SimpleImgConvPool`组成最终的`MNIST`网络: - - ```python - class MNIST(fluid.dygraph.Layer): - def __init__(self): - super(MNIST, self).__init__() - - self._simple_img_conv_pool_1 = SimpleImgConvPool( - 1, 20, 5, 2, 2, act="relu") - - self._simple_img_conv_pool_2 = SimpleImgConvPool( - 20, 50, 5, 2, 2, act="relu") - - self.pool_2_shape = 50 * 4 * 4 - SIZE = 10 - scale = (2.0 / (self.pool_2_shape**2 * SIZE))**0.5 - self._fc = fluid.dygraph.Linear( - self.pool_2_shape, - 10, - param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.NormalInitializer( - loc=0.0, scale=scale)), - act="softmax") - - def forward(self, inputs, label=None): - x = self._simple_img_conv_pool_1(inputs) - x = self._simple_img_conv_pool_2(x) - x = fluid.layers.reshape(x, shape=[-1, self.pool_2_shape]) - x = self._fc(x) - if label is not None: - acc = fluid.layers.accuracy(input=x, label=label) - return x, acc - else: - return x - ``` - -4. 在`fluid.dygraph.guard()`中定义配置好的`MNIST`网络结构,此时即使没有训练也可以在`fluid.dygraph.guard()`中调用模型并且检查输出: - - ```python - with fluid.dygraph.guard(): - mnist = MNIST() - train_reader = paddle.batch( - paddle.dataset.mnist.train(), batch_size=32, drop_last=True) - id, data = list(enumerate(train_reader()))[0] - dy_x_data = np.array( - [x[0].reshape(1, 28, 28) - for x in data]).astype('float32') - img = fluid.dygraph.to_variable(dy_x_data) - print("result is: {}".format(mnist(img).numpy())) - ``` - - 输出: - - ``` - result is: [[0.10135901 0.1051138 0.1027941 ... 0.0972859 0.10221873 0.10165327] - [0.09735426 0.09970362 0.10198303 ... 0.10134517 0.10179105 0.10025002] - [0.09539858 0.10213123 0.09543551 ... 0.10613529 0.10535969 0.097991 ] - ... - [0.10120598 0.0996111 0.10512722 ... 0.10067689 0.10088114 0.10071224] - [0.09889644 0.10033772 0.10151272 ... 0.10245881 0.09878646 0.101483 ] - [0.09097178 0.10078511 0.10198414 ... 0.10317434 0.10087223 0.09816764]] - ``` - -5. 构建训练循环,在每一轮参数更新完成后我们调用`mnist.clear_gradients()`来重置梯度: - - ```python - with fluid.dygraph.guard(): - epoch_num = 5 - BATCH_SIZE = 64 - train_reader = paddle.batch( - paddle.dataset.mnist.train(), batch_size=32, drop_last=True) - mnist = MNIST() - adam = fluid.optimizer.AdamOptimizer(learning_rate=0.001, parameter_list=mnist.parameters()) - for epoch in range(epoch_num): - for batch_id, data in enumerate(train_reader()): - dy_x_data = np.array([x[0].reshape(1, 28, 28) - for x in data]).astype('float32') - y_data = np.array( - [x[1] for x in data]).astype('int64').reshape(-1, 1) - - img = fluid.dygraph.to_variable(dy_x_data) - label = fluid.dygraph.to_variable(y_data) - - cost = mnist(img) - - loss = fluid.layers.cross_entropy(cost, label) - avg_loss = fluid.layers.mean(loss) - - if batch_id % 100 == 0 and batch_id is not 0: - print("epoch: {}, batch_id: {}, loss is: {}".format(epoch, batch_id, avg_loss.numpy())) - avg_loss.backward() - adam.minimize(avg_loss) - mnist.clear_gradients() - ``` - -6. 变量及优化器 - - 模型的参数或者任何您希望检测的值可以作为变量封装在类中,然后通过对象获取并使用`numpy()`方法获取其`ndarray`的输出, 在训练过程中您可以使用`mnist.parameters()`来获取到网络中所有的参数,也可以指定某一个`Layer`的某个参数或者`parameters()`来获取该层的所有参数,使用`numpy()`方法随时查看参数的值 - - 反向运行后调用之前定义的`Adam`优化器对象的`minimize`方法进行参数更新: - - ```python - with fluid.dygraph.guard(): - epoch_num = 5 - BATCH_SIZE = 64 - - mnist = MNIST() - adam = fluid.optimizer.AdamOptimizer(learning_rate=0.001, parameter_list=mnist.parameters()) - train_reader = paddle.batch( - paddle.dataset.mnist.train(), batch_size= BATCH_SIZE, drop_last=True) - - np.set_printoptions(precision=3, suppress=True) - for epoch in range(epoch_num): - for batch_id, data in enumerate(train_reader()): - dy_x_data = np.array( - [x[0].reshape(1, 28, 28) - for x in data]).astype('float32') - y_data = np.array( - [x[1] for x in data]).astype('int64').reshape(BATCH_SIZE, 1) - - img = fluid.dygraph.to_variable(dy_x_data) - label = fluid.dygraph.to_variable(y_data) - label.stop_gradient = True - - cost = mnist(img) - loss = fluid.layers.cross_entropy(cost, label) - avg_loss = fluid.layers.mean(loss) - - dy_out = avg_loss.numpy() - - avg_loss.backward() - adam.minimize(avg_loss) - mnist.clear_gradients() - - dy_param_value = {} - for param in mnist.parameters(): - dy_param_value[param.name] = param.numpy() - - if batch_id % 20 == 0: - print("Loss at step {}: {}".format(batch_id, avg_loss.numpy())) - print("Final loss: {}".format(avg_loss.numpy())) - print("_simple_img_conv_pool_1_conv2d W's mean is: {}".format(mnist._simple_img_conv_pool_1._conv2d._filter_param.numpy().mean())) - print("_simple_img_conv_pool_1_conv2d Bias's mean is: {}".format(mnist._simple_img_conv_pool_1._conv2d._bias_param.numpy().mean())) - ``` - - 输出: - - ``` - Loss at step 0: [2.302] - Loss at step 20: [1.616] - Loss at step 40: [1.244] - Loss at step 60: [1.142] - Loss at step 80: [0.911] - Loss at step 100: [0.824] - Loss at step 120: [0.774] - Loss at step 140: [0.626] - Loss at step 160: [0.609] - Loss at step 180: [0.627] - Loss at step 200: [0.466] - Loss at step 220: [0.499] - Loss at step 240: [0.614] - Loss at step 260: [0.585] - Loss at step 280: [0.503] - Loss at step 300: [0.423] - Loss at step 320: [0.509] - Loss at step 340: [0.348] - Loss at step 360: [0.452] - Loss at step 380: [0.397] - Loss at step 400: [0.54] - Loss at step 420: [0.341] - Loss at step 440: [0.337] - Loss at step 460: [0.155] - Final loss: [0.164] - _simple_img_conv_pool_1_conv2d W's mean is: 0.00606656912714 - _simple_img_conv_pool_1_conv2d Bias's mean is: -3.4576318285e-05 - ``` - -7. 性能 - -在使用`fluid.dygraph.guard()`时可以通过传入`fluid.CUDAPlace(0)`或者`fluid.CPUPlace()`来选择执行DyGraph的设备,通常如果不做任何处理将会自动适配您的设备。 - -## 使用多卡训练模型 - -目前PaddlePaddle支持通过多进程方式进行多卡训练,即每个进程对应一张卡。训练过程中,在第一次执行前向操作时,如果该操作需要参数,则会将0号卡的参数Broadcast到其他卡上,确保各个卡上的参数一致;在计算完反向操作之后,将产生的参数梯度在所有卡之间进行聚合;最后在各个GPU卡上分别进行参数更新。 - -```python -place = fluid.CUDAPlace(fluid.dygraph.parallel.Env().dev_id) -with fluid.dygraph.guard(place): - strategy = fluid.dygraph.parallel.prepare_context() - epoch_num = 5 - BATCH_SIZE = 64 - mnist = MNIST() - adam = fluid.optimizer.AdamOptimizer(learning_rate=0.001, parameter_list=mnist.parameters()) - mnist = fluid.dygraph.parallel.DataParallel(mnist, strategy) - - train_reader = paddle.batch( - paddle.dataset.mnist.train(), batch_size=BATCH_SIZE, drop_last=True) - train_reader = fluid.contrib.reader.distributed_batch_reader( - train_reader) - - for epoch in range(epoch_num): - for batch_id, data in enumerate(train_reader()): - dy_x_data = np.array([x[0].reshape(1, 28, 28) - for x in data]).astype('float32') - y_data = np.array( - [x[1] for x in data]).astype('int64').reshape(-1, 1) - - img = fluid.dygraph.to_variable(dy_x_data) - label = fluid.dygraph.to_variable(y_data) - label.stop_gradient = True - - cost, acc = mnist(img, label) - - loss = fluid.layers.cross_entropy(cost, label) - avg_loss = fluid.layers.mean(loss) - - avg_loss = mnist.scale_loss(avg_loss) - avg_loss.backward() - mnist.apply_collective_grads() - - adam.minimize(avg_loss) - mnist.clear_gradients() - if batch_id % 100 == 0 and batch_id is not 0: - print("epoch: {}, batch_id: {}, loss is: {}".format(epoch, batch_id, avg_loss.numpy())) -``` - -动态图单卡训练转多卡训练需要修改的地方主要有四处: -1. 需要从环境变量获取设备的ID,即: - - ```python - place = fluid.CUDAPlace(fluid.dygraph.parallel.Env().dev_id) - ``` - -2. 需要对原模型做一些预处理,即: - - ```python - strategy = fluid.dygraph.parallel.prepare_context() - mnist = MNIST() - adam = AdamOptimizer(learning_rate=0.001, parameter_list=mnist.parameters()) - mnist = fluid.dygraph.parallel.DataParallel(mnist, strategy) - ``` - -3. 数据读取,必须确保每个进程读取的数据是不同的,即所有进程读取数据的交集为空,所有进程读取数据的并集是完整的数据集: - - ```python - train_reader = paddle.batch( - paddle.dataset.mnist.train(), batch_size=BATCH_SIZE, drop_last=True) - train_reader = fluid.contrib.reader.distributed_batch_reader( - train_reader) - ``` - -4. 需要对loss进行调整,以及对参数的梯度进行聚合,即: - - ```python - avg_loss = mnist.scale_loss(avg_loss) - avg_loss.backward() - mnist.apply_collective_grads() - ``` - -Paddle动态图多进程多卡模型训练启动时需要指定使用的GPU,即如果使用`0,1,2,3`卡,启动方式如下: - -``` -python -m paddle.distributed.launch --selected_gpus=0,1,2,3 --log_dir ./mylog train.py -``` - -输出结果为: - -``` ------------ Configuration Arguments ----------- -cluster_node_ips: 127.0.0.1 -log_dir: ./mylog -node_ip: 127.0.0.1 -print_config: True -selected_gpus: 0,1,2,3 -started_port: 6170 -training_script: train.py -training_script_args: ['--use_data_parallel', '1'] -use_paddlecloud: True ------------------------------------------------- -trainers_endpoints: 127.0.0.1:6170,127.0.0.1:6171,127.0.0.1:6172,127.0.0.1:6173 , node_id: 0 , current_node_ip: 127.0.0.1 , num_nodes: 1 , node_ips: ['127.0.0.1'] , nranks: 4 -``` - -此时,程序会将每个进程的输出log导出到./mylog路径下: - -``` -. -├── mylog -│ ├── workerlog.0 -│ ├── workerlog.1 -│ ├── workerlog.2 -│ └── workerlog.3 -└── train.py -``` - -如果不指定`--log_dir`,程序会将打印出所有进程的输出,即: - -``` ------------ Configuration Arguments ----------- -cluster_node_ips: 127.0.0.1 -log_dir: None -node_ip: 127.0.0.1 -print_config: True -selected_gpus: 0,1,2,3 -started_port: 6170 -training_script: train.py -training_script_args: ['--use_data_parallel', '1'] -use_paddlecloud: True ------------------------------------------------- -trainers_endpoints: 127.0.0.1:6170,127.0.0.1:6171,127.0.0.1:6172,127.0.0.1:6173 , node_id: 0 , current_node_ip: 127.0.0.1 , num_nodes: 1 , node_ips: ['127.0.0.1'] , nranks: 4 -grep: warning: GREP_OPTIONS is deprecated; please use an alias or script -grep: warning: GREP_OPTIONS is deprecated; please use an alias or script -grep: warning: GREP_OPTIONS is deprecated; please use an alias or script -grep: warning: GREP_OPTIONS is deprecated; please use an alias or script -I0923 09:32:36.423513 56410 nccl_context.cc:120] init nccl context nranks: 4 local rank: 1 gpu id: 1 -I0923 09:32:36.425287 56411 nccl_context.cc:120] init nccl context nranks: 4 local rank: 2 gpu id: 2 -I0923 09:32:36.429337 56409 nccl_context.cc:120] init nccl context nranks: 4 local rank: 0 gpu id: 0 -I0923 09:32:36.429440 56412 nccl_context.cc:120] init nccl context nranks: 4 local rank: 3 gpu id: 3 -W0923 09:32:42.594097 56412 device_context.cc:198] Please NOTE: device: 3, CUDA Capability: 70, Driver API Version: 9.0, Runtime API Version: 9.0 -W0923 09:32:42.605836 56412 device_context.cc:206] device: 3, cuDNN Version: 7.5. -W0923 09:32:42.632463 56410 device_context.cc:198] Please NOTE: device: 1, CUDA Capability: 70, Driver API Version: 9.0, Runtime API Version: 9.0 -W0923 09:32:42.637948 56410 device_context.cc:206] device: 1, cuDNN Version: 7.5. -W0923 09:32:42.648674 56411 device_context.cc:198] Please NOTE: device: 2, CUDA Capability: 70, Driver API Version: 9.0, Runtime API Version: 9.0 -W0923 09:32:42.654021 56411 device_context.cc:206] device: 2, cuDNN Version: 7.5. -W0923 09:32:43.048696 56409 device_context.cc:198] Please NOTE: device: 0, CUDA Capability: 70, Driver API Version: 9.0, Runtime API Version: 9.0 -W0923 09:32:43.053236 56409 device_context.cc:206] device: 0, cuDNN Version: 7.5. -start data reader (trainers_num: 4, trainer_id: 2) -start data reader (trainers_num: 4, trainer_id: 3) -start data reader (trainers_num: 4, trainer_id: 1) -start data reader (trainers_num: 4, trainer_id: 0) -Loss at epoch 0 step 0: [0.57390565] -Loss at epoch 0 step 0: [0.57523954] -Loss at epoch 0 step 0: [0.575606] -Loss at epoch 0 step 0: [0.5767452] -``` - -## 模型参数的保存 - -动态图由于模型和优化器在不同的对象中存储,模型参数和优化器信息要分别存储。 - -
在模型训练中可以使用 `paddle.fluid.dygraph.save_dygraph(state_dict, model_path)` 来保存模型参数的dict或优化器信息的dict。 - -同样可以使用 `paddle.fluid.dygraph.load_dygraph(model_path)` 获取保存的模型参数的dict和优化器信息的dict。 - -再使用`your_modle_object.set_dict(para_dict)`接口来恢复保存的模型参数从而达到继续训练的目的。 - -以及使用`your_optimizer_object.set_dict(opti_dict)`接口来恢复保存的优化器中的`learning rate decay`值。 - -下面的代码展示了如何在“手写数字识别”任务中保存参数并且读取已经保存的参数来继续训练。 - -```python -import paddle.fluid as fluid - -with fluid.dygraph.guard(): - epoch_num = 5 - BATCH_SIZE = 64 - - mnist = MNIST() - adam = fluid.optimizer.Adam(learning_rate=0.001, parameter_list=mnist.parameters()) - train_reader = paddle.batch( - paddle.dataset.mnist.train(), batch_size= BATCH_SIZE, drop_last=True) - - np.set_printoptions(precision=3, suppress=True) - dy_param_init_value={} - for epoch in range(epoch_num): - for batch_id, data in enumerate(train_reader()): - dy_x_data = np.array( - [x[0].reshape(1, 28, 28) - for x in data]).astype('float32') - y_data = np.array( - [x[1] for x in data]).astype('int64').reshape(BATCH_SIZE, 1) - - img = fluid.dygraph.to_variable(dy_x_data) - label = fluid.dygraph.to_variable(y_data) - label.stop_gradient = True - - cost = mnist(img) - loss = fluid.layers.cross_entropy(cost, label) - avg_loss = fluid.layers.mean(loss) - - dy_out = avg_loss.numpy() - - avg_loss.backward() - adam.minimize(avg_loss) - if batch_id == 20: - fluid.dygraph.save_dygraph(mnist.state_dict(), "paddle_dy") - mnist.clear_gradients() - - if batch_id == 20: - for param in mnist.parameters(): - dy_param_init_value[param.name] = param.numpy() - model, _ = fluid.dygraph.load_dygraph("paddle_dy") - mnist.set_dict(model) - break - if epoch == 0: - break - restore = mnist.parameters() - # check save and load - - success = True - for value in restore: - if (not np.array_equal(value.numpy(), dy_param_init_value[value.name])) or (not np.isfinite(value.numpy().all())) or (np.isnan(value.numpy().any())): - success = False - print("model save and load success? {}".format(success)) -``` - -需要注意的是,如果采用多卡训练,只需要一个进程对模型参数进行保存,因此在保存模型参数时,需要进行指定保存哪个进程的参数,比如 - -```python - if fluid.dygraph.parallel.Env().local_rank == 0: - fluid.dygraph.save_dygraph(mnist.state_dict(), "paddle_dy") -``` - -## 模型评估 - -当我们需要在DyGraph模式下利用搭建的模型进行预测任务,请在`fluid.dygraph.guard()`上下文中调用一次`YourModel.eval()`接口来切换到预测模式。例如,在之前的手写数字识别模型中我们可以使用`mnist.eval()`来切换到预测模式。需要显示地调用`YourModel.eval()`切换到预测模式的原因是,我们默认在`fluid.dygraph.guard()`上下文中是训练模式,训练模式下DyGraph在运行前向网络的时候会自动求导,添加反向网络;而在预测时,DyGraph只需要执行前向的预测网络,不需要进行自动求导并执行反向网络。 - -**请注意,如果您在`GPU`设备中运行`YourModel`模型,并且未调用`loss.backward`(通常来说,是进行预测时),则必须调用`YourModel.eval()`,以避免构建反向网络,否则有可能会导致显存不足。** - -下面的代码展示了如何使用DyGraph模式训练一个用于执行“手写数字识别”任务的模型并保存,并且利用已经保存好的模型进行预测。 - -我们在`fluid.dygraph.guard()`上下文中进行了模型的保存和训练,值得注意的是,当我们需要在训练的过程中进行预测时需要使用`YourModel.eval()`切换到预测模式,并且在预测完成后使用`YourModel.train()`切换回训练模式继续训练。 - -我们在`inference_mnist `中启用另一个`fluid.dygraph.guard()`,并在其上下文中`load`之前保存的`checkpoint`进行预测,同样的在执行预测前需要使用`YourModel.eval()`来切换到预测模式。 - -```python -def test_mnist(reader, model, batch_size): - acc_set = [] - avg_loss_set = [] - for batch_id, data in enumerate(reader()): - dy_x_data = np.array([x[0].reshape(1, 28, 28) - for x in data]).astype('float32') - y_data = np.array( - [x[1] for x in data]).astype('int64').reshape(batch_size, 1) - - img = fluid.dygraph.to_variable(dy_x_data) - label = fluid.dygraph.to_variable(y_data) - label.stop_gradient = True - prediction, acc = model(img, label) - loss = fluid.layers.cross_entropy(input=prediction, label=label) - avg_loss = fluid.layers.mean(loss) - acc_set.append(float(acc.numpy())) - avg_loss_set.append(float(avg_loss.numpy())) - - # get test acc and loss - acc_val_mean = np.array(acc_set).mean() - avg_loss_val_mean = np.array(avg_loss_set).mean() - - return avg_loss_val_mean, acc_val_mean - - -def inference_mnist(): - with fluid.dygraph.guard(): - mnist_infer = MNIST() - # load checkpoint - model_dict, _ = fluid.dygraph.load_dygraph("paddle_dy") - mnist_infer.load_dict(model_dict) - print("checkpoint loaded") - - # start evaluate mode - mnist_infer.eval() - - def load_image(file): - im = Image.open(file).convert('L') - im = im.resize((28, 28), Image.ANTIALIAS) - im = np.array(im).reshape(1, 1, 28, 28).astype(np.float32) - im = im / 255.0 * 2.0 - 1.0 - return im - - cur_dir = os.path.dirname(os.path.realpath(__file__)) - tensor_img = load_image(cur_dir + '/image/infer_3.png') - - results = mnist_infer(fluid.dygraph.to_variable(tensor_img)) - lab = np.argsort(results.numpy()) - print("Inference result of image/infer_3.png is: %d" % lab[0][-1]) - -with fluid.dygraph.guard(): - epoch_num = 1 - BATCH_SIZE = 64 - mnist = MNIST() - adam = fluid.optimizer.AdamOptimizer(learning_rate=0.001, parameter_list=mnist.parameters()) - test_reader = paddle.batch( - paddle.dataset.mnist.test(), batch_size=BATCH_SIZE, drop_last=True) - - train_reader = paddle.batch( - paddle.dataset.mnist.train(), - batch_size=BATCH_SIZE, - drop_last=True) - - for epoch in range(epoch_num): - for batch_id, data in enumerate(train_reader()): - dy_x_data = np.array([x[0].reshape(1, 28, 28) - for x in data]).astype('float32') - y_data = np.array( - [x[1] for x in data]).astype('int64').reshape(-1, 1) - - img = fluid.dygraph.to_variable(dy_x_data) - label = fluid.dygraph.to_variable(y_data) - label.stop_gradient = True - - cost, acc = mnist(img, label) - - loss = fluid.layers.cross_entropy(cost, label) - avg_loss = fluid.layers.mean(loss) - - - avg_loss.backward() - - adam.minimize(avg_loss) - # save checkpoint - mnist.clear_gradients() - if batch_id % 100 == 0: - print("Loss at epoch {} step {}: {:}".format( - epoch, batch_id, avg_loss.numpy())) - - mnist.eval() - test_cost, test_acc = test_mnist(test_reader, mnist, BATCH_SIZE) - mnist.train() - print("Loss at epoch {} , Test avg_loss is: {}, acc is: {}".format( - epoch, test_cost, test_acc)) - - fluid.dygraph.save_dygraph(mnist.state_dict(), "paddle_dy") - print("checkpoint saved") - - inference_mnist() -``` - -输出: - -``` -Loss at epoch 0 step 0: [2.2991252] -Loss at epoch 0 step 100: [0.15491392] -Loss at epoch 0 step 200: [0.13315125] -Loss at epoch 0 step 300: [0.10253005] -Loss at epoch 0 step 400: [0.04266362] -Loss at epoch 0 step 500: [0.08894891] -Loss at epoch 0 step 600: [0.08999012] -Loss at epoch 0 step 700: [0.12975612] -Loss at epoch 0 step 800: [0.15257305] -Loss at epoch 0 step 900: [0.07429226] -Loss at epoch 0 , Test avg_loss is: 0.05995981965082674, acc is: 0.9794671474358975 -checkpoint saved -No optimizer loaded. If you didn't save optimizer, please ignore this. The program can still work with new optimizer. -checkpoint loaded -Inference result of image/infer_3.png is: 3 -``` - -## 编写兼容的模型 - -以上一步中手写数字识别的例子为例,动态图的模型代码可以直接用于静态图中作为模型代码,执行时,直接使用PaddlePaddle静态图执行方式即可,这里以静态图中的`executor`为例, 模型代码可以直接使用之前的模型代码,执行时使用`Executor`执行即可 - -```python -epoch_num = 1 -BATCH_SIZE = 64 -exe = fluid.Executor(fluid.CPUPlace()) - -mnist = MNIST() -sgd = fluid.optimizer.SGDOptimizer(learning_rate=1e-3, parameter_list=mnist.parameters()) -train_reader = paddle.batch( - paddle.dataset.mnist.train(), batch_size=BATCH_SIZE, drop_last=True) -img = fluid.layers.data( - name='pixel', shape=[1, 28, 28], dtype='float32') -label = fluid.layers.data(name='label', shape=[1], dtype='int64') -cost = mnist(img) -loss = fluid.layers.cross_entropy(cost, label) -avg_loss = fluid.layers.mean(loss) -sgd.minimize(avg_loss) - -out = exe.run(fluid.default_startup_program()) - -for epoch in range(epoch_num): - for batch_id, data in enumerate(train_reader()): - static_x_data = np.array( - [x[0].reshape(1, 28, 28) - for x in data]).astype('float32') - y_data = np.array( - [x[1] for x in data]).astype('int64').reshape([BATCH_SIZE, 1]) - - fetch_list = [avg_loss.name] - out = exe.run( - fluid.default_main_program(), - feed={"pixel": static_x_data, - "label": y_data}, - fetch_list=fetch_list) - - static_out = out[0] - - if batch_id % 100 == 0 and batch_id is not 0: - print("epoch: {}, batch_id: {}, loss: {}".format(epoch, batch_id, static_out)) -``` diff --git a/doc/fluid/beginners_guide/basic_concept/index_cn.rst b/doc/fluid/beginners_guide/basic_concept/index_cn.rst index 19a2f61accb351a50b1efa9b7ab568a9df0ba8bb..3f70df0caecfad6314f4d1b91018778d2da80d15 100644 --- a/doc/fluid/beginners_guide/basic_concept/index_cn.rst +++ b/doc/fluid/beginners_guide/basic_concept/index_cn.rst @@ -11,8 +11,7 @@ - `Operator `_ : Operator表示对数据的操作。 - `Program `_ : Program表示对计算过程的描述。 - `Executor `_ : Executor表示执行引擎。 -- `动态图机制-DyGraph <./dygraph/DyGraph.html>`_ : 介绍飞桨动态图执行机制。 - +- `Broadcasting `_ : Paddle对广播支持的说明。 .. toctree:: :hidden: @@ -23,5 +22,4 @@ operator.rst program.rst executor.rst - dygraph/DyGraph.md - + broadcasting.rst diff --git a/doc/fluid/beginners_guide/basic_concept/index_en.rst b/doc/fluid/beginners_guide/basic_concept/index_en.rst index 8f8924576f6d9dec56dc3c78977e2bc024fcc8d4..7dea4c748aeb55fac24efca0b14f75d252288eb3 100644 --- a/doc/fluid/beginners_guide/basic_concept/index_en.rst +++ b/doc/fluid/beginners_guide/basic_concept/index_en.rst @@ -6,13 +6,13 @@ This paper introduces the basic concepts of Paddle: - `Guide to Fluid Programming <./programming_guide/programming_guide_en.html>`_ :introduces the basic concept and usage of Paddle. - `LoD-Tensor User Guide `_ : LoD-Tensor is a high-level feature of Paddle. It adds sequence information on the basis of tensor and supports processing variable length data. - +- `Broadcasting `_ : introduces Paddle provides broadcasting semantics. .. toctree:: :hidden: programming_guide/programming_guide_en.md lod_tensor_en.rst - + broadcasting_en.rst diff --git a/doc/fluid/beginners_guide/basic_concept/operator.rst b/doc/fluid/beginners_guide/basic_concept/operator.rst index 2d7199380429d5fc482ffcd6178a38e0a221d051..cdb567a085ebffd189925fe88921e0c7e7ae041e 100644 --- a/doc/fluid/beginners_guide/basic_concept/operator.rst +++ b/doc/fluid/beginners_guide/basic_concept/operator.rst @@ -1,8 +1,8 @@ .. _cn_user_guide_Operator: -======= +========= Operator -======= +========= 在飞桨(PaddlePaddle,以下简称Paddle)中,所有对数据的操作都由Operator表示 diff --git a/doc/fluid/beginners_guide/basic_concept/programming_guide/programming_guide.md b/doc/fluid/beginners_guide/basic_concept/programming_guide/programming_guide.md index 73d49cd538db661b3b3f43aea7d2489428d6c1bb..cc010e7e00a2c7015777dec6870d1340b482248b 100644 --- a/doc/fluid/beginners_guide/basic_concept/programming_guide/programming_guide.md +++ b/doc/fluid/beginners_guide/basic_concept/programming_guide/programming_guide.md @@ -1,10 +1,10 @@ # 编程指南 -目前飞桨(PaddlePaddle,以下简称Paddle)已经同时支持动态图和静态图两种编程方式, -本文主要侧重于介绍静态图的编程方法,关于动态图编程方法,请参考[动态图机制-DyGraph](../dygraph/DyGraph.html)。 +目前飞桨(PaddlePaddle,以下简称Paddle)已经同时支持命令式编程模式(动态图)和声明式编程模式(静态图)两种编程方式, +本文主要侧重于介绍声明式编程模式的编程方法,关于命令式编程模式编程方法,请参考[命令式编程模式机制-DyGraph](../dygraph/DyGraph.html)。 -阅读完本文档,您将了解在Paddle静态图编程方式中,如何表示和定义数据变量,以及如何完整的组建一个深度学习网络并进行训练。 +阅读完本文档,您将了解在Paddle声明式编程模式编程方式中,如何表示和定义数据变量,以及如何完整的组建一个深度学习网络并进行训练。 ## 数据的表示和定义 @@ -38,7 +38,7 @@ data = fluid.layers.fill_constant(shape=[3, 4], value=16, dtype='int64') 以上例子中,我们只使用了一种数据类型"int64",即有符号64位整数数据类型,更多Paddle目前支持的数据类型请查看:[支持的数据类型](../../../advanced_guide/data_preparing/feeding_data.html#fluid)。 -需要注意的是,在静态图编程方式中,上述定义的Tensor并不具有值(即使创建常量的时候指定了value), +需要注意的是,在声明式编程模式编程方式中,上述定义的Tensor并不具有值(即使创建常量的时候指定了value), 它们仅表示将要执行的操作,在网络执行时(训练或者预测)才会进行真正的赋值操作, 如您直接打印上例代码中的data将会得对其信息的描述: @@ -166,7 +166,7 @@ print outs ## 组建更加复杂的网络 -某些场景下,用户需要根据当前网络中的某些状态,来具体决定后续使用哪一种操作,或者重复执行某些操作。在动态图中,可以方便的使用Python的控制流语句(如for,if-else等)来进行条件判断,但是在静态图中,由于组网阶段并没有实际执行操作,也没有产生中间计算结果,因此无法使用Python的控制流语句来进行条件判断,为此静态图提供了多个控制流API来实现条件判断。这里以[fluid.layers.while_loop](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/layers_cn/while_loop_cn.html)为例来说明如何在静态图中实现条件循环的操作。 +某些场景下,用户需要根据当前网络中的某些状态,来具体决定后续使用哪一种操作,或者重复执行某些操作。在命令式编程模式中,可以方便的使用Python的控制流语句(如for,if-else等)来进行条件判断,但是在声明式编程模式中,由于组网阶段并没有实际执行操作,也没有产生中间计算结果,因此无法使用Python的控制流语句来进行条件判断,为此声明式编程模式提供了多个控制流API来实现条件判断。这里以[fluid.layers.while_loop](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/layers_cn/while_loop_cn.html)为例来说明如何在声明式编程模式中实现条件循环的操作。 while_loop API用于实现类似while/for的循环控制功能,使用一个callable的方法cond作为参数来表示循环的条件,只要cond的返回值为True,while_loop就会循环执行循环体body(也是一个callable的方法),直到 cond 的返回值为False。对于while_loop API的详细定义和具体说明请参考文档[fluid.layers.while_loop](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/layers_cn/while_loop_cn.html)。 @@ -180,7 +180,7 @@ while i < ten: print('i =', i) ``` -在静态图中使用while_loop API实现以上代码的逻辑: +在声明式编程模式中使用while_loop API实现以上代码的逻辑: ```python # 该代码要求安装飞桨1.7+版本 @@ -191,7 +191,7 @@ import paddle.fluid.layers as layers # 定义cond方法,作为while_loop的判断条件 def cond(i, ten): - return i < ten + return i < ten # 定义body方法,作为while_loop的执行体,只要cond返回值为True,while_loop就会一直调用该方法进行计算 # 由于在使用while_loop OP时,cond和body的参数都是由while_loop的loop_vars参数指定的,所以cond和body必须有相同数量的参数列表,因此body中虽然只需要i这个参数,但是仍然要保持参数列表个数为2,此处添加了一个dummy参数来进行"占位" @@ -210,7 +210,7 @@ print(res) #[array([10])] ``` -限于篇幅,上面仅仅用一个最简单的例子来说明如何在静态图中实现循环操作。循环操作在很多应用中都有着重要作用,比如NLP中常用的Transformer模型,在解码(生成)阶段的Beam Search算法中,需要使用循环操作来进行候选的选取与生成,可以参考[Transformer](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/PaddleMT/transformer)模型的实现来进一步学习while_loop在复杂场景下的用法。 +限于篇幅,上面仅仅用一个最简单的例子来说明如何在声明式编程模式中实现循环操作。循环操作在很多应用中都有着重要作用,比如NLP中常用的Transformer模型,在解码(生成)阶段的Beam Search算法中,需要使用循环操作来进行候选的选取与生成,可以参考[Transformer](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/PaddleMT/transformer)模型的实现来进一步学习while_loop在复杂场景下的用法。 除while_loop之外,飞桨还提供fluid.layers.cond API来实现条件分支的操作,以及fluid.layers.switch_case和fluid.layers.case API来实现分支控制功能,具体用法请参考文档:[cond](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/layers_cn/cond_cn.html),[switch_case](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/layers_cn/switch_case_cn.html)和[case](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/layers_cn/case_cn.html#case) @@ -218,7 +218,7 @@ print(res) #[array([10])] 一个典型的模型通常包含4个部分,分别是:输入数据定义,搭建网络(模型前向计算逻辑),定义损失函数,以及选择优化算法。 -下面我们通过一个非常简单的数据预测网络(线性回归),来完整的展示如何使用Paddle静态图方式完成一个深度学习模型的组建和训练。 +下面我们通过一个非常简单的数据预测网络(线性回归),来完整的展示如何使用Paddle声明式编程模式方式完成一个深度学习模型的组建和训练。 问题描述:给定一组数据 $$,求解出函数 $f$,使得 $y=f(x)$,其中$X$,$Y$均为一维张量。最终网络可以依据输入$x$,准确预测出$y_{\_predict}$。 diff --git a/doc/fluid/beginners_guide/basic_concept/variable.rst b/doc/fluid/beginners_guide/basic_concept/variable.rst index eeeba512e55422864692b191f247945205d43f4f..22edb42b3a0caa0e0d5647899420a617fc30b7f7 100644 --- a/doc/fluid/beginners_guide/basic_concept/variable.rst +++ b/doc/fluid/beginners_guide/basic_concept/variable.rst @@ -32,7 +32,7 @@ Paddle 为大部分常见的神经网络基本计算模块都提供了封装。 **2. 占位 Variable** -在静态图模式下,组网的时候通常不知道实际输入的信息,此刻需要一个占位的 :code:`Variable`,表示一个待提供输入的 :code:`Variable` +在声明式编程模式(静态图)模式下,组网的时候通常不知道实际输入的信息,此刻需要一个占位的 :code:`Variable`,表示一个待提供输入的 :code:`Variable` Paddle 中使用 :code:`fluid.data` 来接收输入数据, :code:`fluid.data` 需要提供输入 Tensor 的形状信息,当遇到无法确定的维度时,相应维度指定为 None ,如下面的代码片段所示: diff --git a/doc/fluid/beginners_guide/coding_practice/index_cn.rst b/doc/fluid/beginners_guide/coding_practice/index_cn.rst index da748889d53242ffdf2ec8a8499490880dd17a2e..e1f5f5c5c8782c3d69c64437fd90c964d80bf374 100644 --- a/doc/fluid/beginners_guide/coding_practice/index_cn.rst +++ b/doc/fluid/beginners_guide/coding_practice/index_cn.rst @@ -9,6 +9,5 @@ configure_simple_model/index_cn.rst single_node.rst - test_while_training.rst save_load_variables.rst diff --git a/doc/fluid/beginners_guide/coding_practice/save_load_variables.rst b/doc/fluid/beginners_guide/coding_practice/save_load_variables.rst index 6ce016d84eb8fded69675d4689fb684527ed608e..40bb9b4841005dceab3dcbc0c5b67c15ee68d427 100644 --- a/doc/fluid/beginners_guide/coding_practice/save_load_variables.rst +++ b/doc/fluid/beginners_guide/coding_practice/save_load_variables.rst @@ -204,8 +204,8 @@ save_vars、save_params、save_persistables 以及 save_inference_model的区别 path = "./models" startup_prog = fluid.default_startup_program() exe.run(startup_prog) - fluid.io.load_persistables(exe, path, startup_prog) main_prog = fluid.default_main_program() + fluid.io.load_persistables(exe, path, main_prog) exe.run(main_prog) 上面的例子中,通过调用 :code:`fluid.io.load_persistables` 函数,PaddlePaddle Fluid会从默认 diff --git a/doc/fluid/beginners_guide/coding_practice/save_load_variables_en.rst b/doc/fluid/beginners_guide/coding_practice/save_load_variables_en.rst index 7130cb03b579dd97ad64f02aa401ebd188047393..7d987087fa55a69719ea3557023504b6237182ba 100644 --- a/doc/fluid/beginners_guide/coding_practice/save_load_variables_en.rst +++ b/doc/fluid/beginners_guide/coding_practice/save_load_variables_en.rst @@ -27,6 +27,23 @@ How to save model variables The model variables we need to save are different depending on the application. For example, if we just want to save the model for future predictions, just saving the model parameters will be enough. But if we need to save a checkpoint for future recovery of current training, then we should save all the persistable variables, and even record the current epoch and step id. It is because even though some model variables are not parameters, they are still essential for model training. + +Difference between save_vars、save_params、save_persistables and save_inference_model +########################################################################## +1. :code:`save_inference_model` will prune the inference model based on :code:`feeded_var_names` and :code:`target_vars` , this method will save the ``__model__`` file of the pruned program and the persistable variables in the program. + +2. :code:`save_persistables` this method will not save model, it will save all the persistable variables in the program. + +3. :code:`save_params` this method will not save model, it will save all the parameters in the program. + +4. :code:`save_vars` this method will not save model, it will save the given parameter by user. + + :code:`save_persistables` this method is useful for increment training or checkpoint training, it can save persistable variables in program comprehensively, such as parameter variables, optimizer variables, if you need increment training or checkpoint training, please choose this one. + :code:`save_inference_model` this method is useful for inference, it will save persistable variables and pruned program, if you need program and variables for follow-up high performance inference, please choose this one. + + :code:`save_vars 和 save_params` there methods are only needed in particular cases, we suppose you already know the purpose of there APIs, there are not recommended for use normally. + + Save the model to make prediction for new samples =================================================== @@ -126,8 +143,8 @@ In the above example, by calling the :code:`fluid.io.save_persistables` function path = "./models" startup_prog = fluid.default_startup_program() exe.run(startup_prog) - fluid.io.load_persistables(exe, path, startup_prog) main_prog = fluid.default_main_program() + fluid.io.load_persistables(exe, path, main_prog) exe.run(main_prog) In the above example, by calling the :code:`fluid.io.load_persistables` function, PaddlePaddle Fluid will find persistable variables from all model variables in the default :code:`fluid.Program` , e.t. :code:`prog` . and load them one by one from the specified :code:`path` directory to continue training. diff --git a/doc/fluid/beginners_guide/coding_practice/single_node.rst b/doc/fluid/beginners_guide/coding_practice/single_node.rst index db763891546fbe9e16fd9efbc7304276debf15b7..d63379fc6fdc949441d8961004472429b6ca5d0b 100644 --- a/doc/fluid/beginners_guide/coding_practice/single_node.rst +++ b/doc/fluid/beginners_guide/coding_practice/single_node.rst @@ -17,8 +17,8 @@ import paddle.fluid as fluid - image = fluid.layers.data(name="image", shape=[784]) - label = fluid.layers.data(name="label", shape=[1]) + image = fluid.data(name="image", shape=[None, 784], dtype='float32') + label = fluid.data(name="label", shape=[None, 1], dtype='int64') hidden = fluid.layers.fc(input=image, size=100, act='relu') prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') loss = fluid.layers.cross_entropy(input=prediction, label=label) @@ -60,7 +60,7 @@ 执行单卡训练可以使用 :code:`fluid.Executor()` 中的 :code:`run()` 方法,运行训练\ :code:`fluid.Program` 即可。在运行的时候,用户可以通过 :code:`run(feed=...)`\ -参数传入数据;用户可以通过 :code:`run(fetch=...)` 获取持久的数据。例如:\ +参数传入数据;用户可以通过 :code:`run(fetch=...)` 获取输出数据。例如:\ .. code-block:: python @@ -70,7 +70,7 @@ train_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - data = fluid.layers.data(name='X', shape=[1], dtype='float32') + data = fluid.data(name='X', shape=[None, 1], dtype='float32') hidden = fluid.layers.fc(input=data, size=10) loss = fluid.layers.mean(hidden) sgd = fluid.optimizer.SGD(learning_rate=0.001) @@ -92,14 +92,14 @@ fetch_list=[loss.name]) # Or use CompiledProgram: - compiled_prog = compiler.CompiledProgram(train_program) + compiled_prog = fluid.CompiledProgram(train_program) loss_data, = exe.run(compiled_prog, feed={"X": x}, fetch_list=[loss.name]) 多卡训练 ####################### -在多卡训练中,你可以使用 :code:`fluid.compiler.CompiledProgram` 来编译 :code:`fluid.Program` ,然后调用 :code:`with_data_parallel` 。例如: +在多卡训练中,你可以使用 :code:`fluid.CompiledProgram` 来编译 :code:`fluid.Program` ,然后调用 :code:`with_data_parallel` 。例如: .. code-block:: python @@ -112,7 +112,7 @@ if not use_cuda: os.environ['CPU_NUM'] = str(2) - compiled_prog = compiler.CompiledProgram( + compiled_prog = fluid.CompiledProgram( train_program).with_data_parallel( loss_name=loss.name) loss_data, = exe.run(compiled_prog, @@ -129,3 +129,8 @@ 进阶使用 ############### +.. toctree:: + :maxdepth: 2 + + test_while_training.rst + diff --git a/doc/fluid/beginners_guide/coding_practice/single_node_en.rst b/doc/fluid/beginners_guide/coding_practice/single_node_en.rst index 04105f37b93c6f8750b0a2edf82d17a70cd52e3f..3e69c281c55e1b5367a9795a83d4f8825e3d607e 100644 --- a/doc/fluid/beginners_guide/coding_practice/single_node_en.rst +++ b/doc/fluid/beginners_guide/coding_practice/single_node_en.rst @@ -13,8 +13,8 @@ For example: import paddle.fluid as fluid - image = fluid.layers.data(name="image", shape=[784]) - label = fluid.layers.data(name="label", shape=[1]) + image = fluid.data(name="image", shape=[None, 784], dtype='float32') + label = fluid.data(name="label", shape=[None, 1], dtype='int64') hidden = fluid.layers.fc(input=image, size=100, act='relu') prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') loss = fluid.layers.cross_entropy(input=prediction, label=label) @@ -51,7 +51,7 @@ Single-card Training ##################### Single-card training can be performed through calling :code:`run()` of :code:`fluid.Executor()` to run training :code:`fluid.Program` . -In the runtime, feed data with :code:`run(feed=...)` and get persistable data with :code:`run(fetch=...)` . For example: +In the runtime, users can feed data with :code:`run(feed=...)` and get output data with :code:`run(fetch=...)` . For example: .. code-block:: python @@ -61,7 +61,7 @@ In the runtime, feed data with :code:`run(feed=...)` and get persistable data wi train_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - data = fluid.layers.data(name='X', shape=[1], dtype='float32') + data = fluid.data(name='X', shape=[None, 1], dtype='float32') hidden = fluid.layers.fc(input=data, size=10) loss = fluid.layers.mean(hidden) sgd = fluid.optimizer.SGD(learning_rate=0.001) @@ -81,21 +81,16 @@ In the runtime, feed data with :code:`run(feed=...)` and get persistable data wi loss_data, = exe.run(train_program, feed={"X": x}, fetch_list=[loss.name]) - # Or - # compiled_prog = compiler.CompiledProgram(train_program) - # loss_data, = exe.run(compiled_prog, - # feed={"X": x}, - # fetch_list=[loss.name]) - -Notes: + # Or use CompiledProgram: + compiled_prog = fluid.CompiledProgram(train_program) + loss_data, = exe.run(compiled_prog, + feed={"X": x}, + fetch_list=[loss.name]) -1. About data type supported by feed, please refer to the article :ref:`user_guide_feed_data_to_executor_en`. -2. The return value of :code:`Executor.run` is the variable value of :code:`fetch_list=[...]` .The fetched Variable must be persistable. :code:`fetch_list` can be fed with either Variable list or name list of variables . :code:`Executor.run` returns Fetch result list. -3. If the fetched data contain sequence information, you can set :code:`exe.run(return_numpy=False, ...)` to directly get :code:`fluid.LoDTensor` . You can directly access the information in :code:`fluid.LoDTensor` . Multi-card Training ####################### -In multi-card training, you can use :code:`fluid.compiler.CompiledProgram` to compile the :code:`fluid.Program`, and then call :code:`with_data_parallel`. For example: +In multi-card training, you can use :code:`fluid.CompiledProgram` to compile the :code:`fluid.Program`, and then call :code:`with_data_parallel`. For example: .. code-block:: python @@ -108,7 +103,7 @@ In multi-card training, you can use :code:`fluid.compiler.CompiledProgram` to co if not use_cuda: os.environ['CPU_NUM'] = str(2) - compiled_prog = compiler.CompiledProgram( + compiled_prog = fluid.CompiledProgram( train_program).with_data_parallel( loss_name=loss.name) loss_data, = exe.run(compiled_prog, diff --git a/doc/fluid/beginners_guide/coding_practice/test_while_training.rst b/doc/fluid/beginners_guide/coding_practice/test_while_training.rst index 28015902f4fa9abc08a4aba4f3bc1a614b1bc877..612d8b36a25188dc1cf18427e1df99d667037302 100644 --- a/doc/fluid/beginners_guide/coding_practice/test_while_training.rst +++ b/doc/fluid/beginners_guide/coding_practice/test_while_training.rst @@ -19,20 +19,21 @@ 通过克隆训练 :code:`fluid.Program` 生成测试 :code:`fluid.Program` ======================================================================= -用:code:`Program.clone()` 方法可以复制出新的 :code:`fluid.Program` 。 通过设置 +用 :code:`Program.clone()` 方法可以复制出新的 :code:`fluid.Program` 。 通过设置 :code:`Program.clone(for_test=True)` 复制含有用于测试的操作 :code:`fluid.Program` 。简单的使用方法如下: .. code-block:: python import paddle.fluid as fluid - img = fluid.layers.data(name="image", shape=[784]) + image = fluid.data(name="image", shape=[None, 784], dtype='float32') + label = fluid.data(name="label", shape=[None, 1], dtype="int64") + prediction = fluid.layers.fc( - input=fluid.layers.fc(input=img, size=100, act='relu'), + input=fluid.layers.fc(input=image, size=100, act='relu'), size=10, act='softmax' ) - label = fluid.layers.data(name="label", shape=[1], dtype="int64") loss = fluid.layers.mean(fluid.layers.cross_entropy(input=prediction, label=label)) acc = fluid.layers.accuracy(input=prediction, label=label) @@ -64,9 +65,9 @@ PaddlePaddle Fluid中使用 :code:`fluid.unique_name` 包来随机初始化用 import paddle.fluid as fluid def network(is_test): - file_obj = fluid.layers.open_files(filenames=["test.recordio"] if is_test else ["train.recordio"], ...) - img, label = fluid.layers.read_file(file_obj) - hidden = fluid.layers.fc(input=img, size=100, act="relu") + image = fluid.data(name="image", shape=[None, 784], dtype='float32') + label = fluid.data(name="label", shape=[None, 1], dtype="int64") + hidden = fluid.layers.fc(input=image, size=100, act="relu") hidden = fluid.layers.batch_norm(input=hidden, is_test=is_test) ... return loss @@ -78,7 +79,7 @@ PaddlePaddle Fluid中使用 :code:`fluid.unique_name` 包来随机初始化用 test_program = fluid.Program() with fluid.unique_name.guard(): - with fluid.program_gurad(test_program, fluid.Program()): + with fluid.program_guard(test_program, fluid.Program()): test_loss = network(is_test=True) # fluid.default_main_program() is the train program diff --git a/doc/fluid/beginners_guide/coding_practice/test_while_training_en.rst b/doc/fluid/beginners_guide/coding_practice/test_while_training_en.rst index 2f68ea4ee8a16b52c35a621e64a531d87238c415..a8bebd78c89f51273dfc3f342b84490c4908c65e 100644 --- a/doc/fluid/beginners_guide/coding_practice/test_while_training_en.rst +++ b/doc/fluid/beginners_guide/coding_practice/test_while_training_en.rst @@ -25,13 +25,13 @@ Generate test :code:`fluid.Program` by cloning training :code:`fluid.Program` import paddle.fluid as fluid - img = fluid.layers.data(name="image", shape=[784]) + image = fluid.data(name="image", shape=[None, 784], dtype='float32') + label = fluid.data(name="label", shape=[None, 1], dtype="int64") prediction = fluid.layers.fc( - input=fluid.layers.fc(input=img, size=100, act='relu'), + input=fluid.layers.fc(input=image, size=100, act='relu'), size=10, act='softmax' ) - label = fluid.layers.data(name="label", shape=[1], dtype="int64") loss = fluid.layers.mean(fluid.layers.cross_entropy(input=prediction, label=label)) acc = fluid.layers.accuracy(input=prediction, label=label) @@ -56,9 +56,9 @@ For example: import paddle.fluid as fluid def network(is_test): - file_obj = fluid.layers.open_files(filenames=["test.recordio"] if is_test else ["train.recordio"], ...) - img, label = fluid.layers.read_file(file_obj) - hidden = fluid.layers.fc(input=img, size=100, act="relu") + image = fluid.data(name="image", shape=[None, 784], dtype='float32') + label = fluid.data(name="label", shape=[None, 1], dtype="int64") + hidden = fluid.layers.fc(input=image, size=100, act="relu") hidden = fluid.layers.batch_norm(input=hidden, is_test=is_test) ... return loss @@ -70,7 +70,7 @@ For example: test_program = fluid.Program() with fluid.unique_name.guard(): - with fluid.program_gurad(test_program, fluid.Program()): + with fluid.program_guard(test_program, fluid.Program()): test_loss = network(is_test=True) # fluid.default_main_program() is the train program diff --git a/doc/fluid/beginners_guide/dygraph/DyGraph.md b/doc/fluid/beginners_guide/dygraph/DyGraph.md new file mode 100644 index 0000000000000000000000000000000000000000..6d1089d50a84d0789a362ea5ae4068f8dcc2c414 --- /dev/null +++ b/doc/fluid/beginners_guide/dygraph/DyGraph.md @@ -0,0 +1,814 @@ +# 命令式编程使用教程 + +从编程范式上说,飞桨兼容支持声明式编程和命令式编程,通俗地讲即静态图和动态图。其实飞桨本没有图的概念,在飞桨的设计中,把一个神经网络定义成一段类似程序的描述,也就是用户在写程序的过程中,就定义了模型表达及计算。在声明式编程的控制流实现方面,飞桨借助自己实现的控制流OP而不是python原生的if else和for循环,这使得在飞桨中的定义的program即一个网络模型,可以有一个内部的表达,是可以全局优化编译执行的。考虑对开发者来讲,更愿意使用python原生控制流,飞桨也做了支持,并通过解释方式执行,这就是命令式编程。但整体上,这两种编程范式是相对兼容统一的。飞桨将持续发布更完善的命令式编程功能,同时保持更强劲的性能。 + +飞桨平台中,将神经网络抽象为计算表示**Operator**(算子,常简称OP)和数据表示**Variable**(变量),如 图1 所示。神经网络的每层操作均由一个或若干**Operator**组成,每个**Operator**接受一系列的**Variable**作为输入,经计算后输出一系列的**Variable**。 +
+
图1 Operator和Variable关系示意图
+ +根据**Operator**解析执行方式不同,飞桨支持如下两种编程范式: +* **声明式编程范式(静态图模式)**:先编译后执行的方式。用户需预先定义完整的网络结构,再对网络结构进行编译优化后,才能执行获得计算结果。 +* **命令式编程范式(动态图模式)**:解析式的执行方式。用户无需预先定义完整的网络结构,每写一行网络代码,即可同时获得计算结果。 + +举例来说,假设用户写了一行代码:y=x+1,在声明式编程下,运行此代码只会往计算图中插入一个Tensor加1的**Operator**,此时**Operator**并未真正执行,无法获得y的计算结果。但在命令式编程下,所有**Operator**均是即时执行的,运行完此代码后**Operator**已经执行完毕,用户可直接获得y的计算结果。 + +## 为什么命令式编程越来越流行? + +声明式编程作为较早提出的一种编程范式,提供丰富的 API ,能够快速的实现各种模型;并且可以利用全局的信息进行图优化,优化性能和显存占用;在预测部署方面也可以实现无缝衔接。 但具体实践中声明式编程存在如下问题: +1. 采用先编译后执行的方式,组网阶段和执行阶段割裂,导致调试不方便。 +2. 属于一种符号化的编程方式,要学习新的编程方式,有一定的入门门槛。 +3. 网络结构固定,对于一些树结构的任务支持的不够好。 + +命令式编程的出现很好的解决了这些问题,存在以下优势: +1. 代码运行完成后,可以立马获取结果,支持使用 IDE 断点调试功能,使得调试更方便。 +2. 属于命令式的编程方式,与编写Python的方式类似,更容易上手。 +3. 网络的结构在不同的层次中可以变化,使用更灵活。 + + +综合以上优势,使得命令式编程越来越受开发者的青睐,本章侧重介绍在飞桨中命令式编程的编程方法,包括如下几部分: +1. 如何开启命令式编程 +2. 如何使用命令式编程进行模型训练 +3. 如何基于命令式编程进行多卡训练 +4. 如何部署命令式的模型 +5. 命令式编程常见的使用技巧,如中间变量值/梯度打印、断点调试、阻断反向传递,以及某些场景下如何改写为声明式模式运行。 + + +## 1. 开启命令式编程 + +此文档介绍的内容是基于2.0 alpha,请安装2.0 alpha 版本,安装方式如下: + +``` +pip install -q --upgrade paddlepaddle==2.0.0a0 +``` + +目前飞桨默认的模式是声明式编程,可以通过paddle.enable_imperative()开启命令式编程(也可以通过with paddle.imperative.guard()的方式启动): +``` +paddle.enable_imperative() +``` + +我们先通过一个实例,观察一下命令式编程开启前后执行方式的差别: + + +```python +import numpy as np +import paddle +from paddle.imperative import to_variable + +data = np.ones([2, 2], np.float32) +x = paddle.static.data(name='x', shape=[2,2], dtype='float32') +x += 10 +exe = paddle.Executor() +exe.run(paddle.default_startup_program()) +out = exe.run(fetch_list=[x], feed={'x': data}) +print("result", out) #[[11, 11], [11, 11]] + +# 命令式编程 +paddle.enable_imperative() +x = paddle.imperative.to_variable(data) +x += 10 +print('result', x.numpy()) #[[11, 11], [11, 11]] + +``` +* 命令式编程下,所有操作在运行时就已经完成,更接近我们平时的编程方式,可以随时获取每一个操作的执行结果。 +* 声明式编程下,过程中并没有实际执行操作,上述例子中可以看到只能打印声明的类型,最后需要调用执行器来统一执行所有操作,计算结果需要通过执行器统一返回。 + +## 2. 使用命令式编程进行模型训练 + +接下来我们以一个简单的手写体识别任务为例,说明如何使用飞桨的命令式编程来进行模型的训练。包括如下步骤: + +* 2.1 定义数据读取器:读取数据和预处理操作。 +* 2.2 定义模型和优化器:搭建神经网络结构。 +* 2.3 训练:配置优化器、学习率、训练参数。循环调用训练过程,循环执行“前向计算 + 损失函数 + 反向传播”。 +* 2.4 评估测试:将训练好的模型保存并评估测试。 + +最后介绍一下: +* 2.5 模型参数的保存和加载方法。 + +在前面章节我们已经了解到,“手写数字识别”的任务是:根据一个28 * 28像素的图像,识别图片中的数字。可采用MNIST数据集进行训练。 +![](https://ai-studio-static-online.cdn.bcebos.com/f8ffb092f6354d8c9c0219224db0e87b5490c5715cc346cf87b7098b2c3c2069) + +有关该任务和数据集的详细介绍,可参考:[初识飞桨手写数字识别模型](https://aistudio.baidu.com/aistudio/projectdetail/224342) + +### 2.1 定义数据读取器 + +飞桨提供了多个封装好的数据集API,本任务我们可以通过调用 [paddle.dataset.mnist](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/data/dataset_cn.html) 的 train 函数和 test 函数,直接获取处理好的 MNIST 训练集和测试集;然后调用 [paddle.batch](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/io_cn/batch_cn.html#batch) 接口返回 reader 的装饰器,该 reader 将输入 reader 的数据打包成指定 BATCH_SIZE 大小的批处理数据。 + + +```python +import paddle + +# 定义批大小 +BATCH_SIZE = 64 + +# 通过调用paddle.dataset.mnist的train函数和test函数来构造reader +train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=BATCH_SIZE, drop_last=True) +test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=BATCH_SIZE, drop_last=True) +``` + + + +### 2.2 定义模型和优化器 + +本节我们采用如下网络模型,该模型可以很好的完成“手写数字识别”的任务。模型由卷积层 -> 池化层 -> 卷积层 -> 池化层 -> 全连接层组成,池化层即降采样层。 + +![](https://ai-studio-static-online.cdn.bcebos.com/f9e59d727d68437aaaad8cee410e564c7a80063367bd4fcd9f710a1480ee338c) + + +在开始构建网络模型前,需要了解如下信息: + +> 在命令式编程中,参数和变量的存储管理方式与声明式编程不同。命令式编程下,网络中学习的参数和中间变量,生命周期和 Python 对象的生命周期是一致的。简单来说,一个 Python 对象的生命周期结束,相应的存储空间就会释放。 + +对于一个网络模型,在模型学习的过程中参数会不断更新,所以参数需要在整个学习周期内一直保持存在,因此需要一个机制来保持网络的所有的参数不被释放,飞桨的命令式编程采用了继承自 [paddle.nn.Layer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#layer) 的面向对象设计的方法来管理所有的参数,该方法也更容易模块化组织代码。 + +下面介绍如何通过继承 paddle.nn.Layer 实现一个简单的ConvPool层;该层由一个 [卷积层](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Conv2D_cn.html#conv2d) 和一个 [池化层](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Pool2D_cn.html#pool2d) 组成。 + + +```python +import paddle +from paddle.nn import Conv2D, Pool2D + +# 定义SimpleImgConvPool网络,必须继承自paddle.nn.Layer +# 该网络由一个卷积层和一个池化层组成 + +class SimpleImgConvPool(paddle.nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + pool_size, + pool_stride, + pool_padding=0, + pool_type='max', + global_pooling=False, + conv_stride=1, + conv_padding=0, + conv_dilation=1, + conv_groups=1, + act=None, + use_cudnn=False, + param_attr=None, + bias_attr=None): + super(SimpleImgConvPool, self).__init__() + + self._conv2d = Conv2D( + num_channels=num_channels, + num_filters=num_filters, + filter_size=filter_size, + stride=conv_stride, + padding=conv_padding, + dilation=conv_dilation, + groups=conv_groups, + param_attr=None, + bias_attr=None, + act=act, + use_cudnn=use_cudnn) + + self._pool2d = Pool2D( + pool_size=pool_size, + pool_type=pool_type, + pool_stride=pool_stride, + pool_padding=pool_padding, + global_pooling=global_pooling, + use_cudnn=use_cudnn) + + def forward(self, inputs): + x = self._conv2d(inputs) + x = self._pool2d(x) + return x +``` + +可以看出实现一个 ConvPool 层(即SimpleImgConvPool)分为两个步骤: +1. 定义 \_\_init\_\_ 构造函数。 + +在 \_\_init\_\_ 构造函数中,通常会执行变量初始化、参数初始化、子网络初始化等操作,执行这些操作时不依赖于输入的动态信息。这里我们对子网络(卷积层和池化层)做了初始化操作。 + +2. 定义 forward 函数。 + +该函数负责定义网络运行时的执行逻辑,将会在每一轮训练/预测中被调用。上述示例中,forward 函数的逻辑是先执行一个卷积操作,然后执行一个池化操作。 + + +接下来我们介绍如何利用子网络组合出MNIST网络,该网络由两个 SimpleImgConvPool 子网络和一个全连接层组成。 + + +```python +# 定义MNIST网络,必须继承自paddle.nn.Layer +# 该网络由两个SimpleImgConvPool子网络、reshape层、matmul层、softmax层、accuracy层组成 +class MNIST(paddle.nn.Layer): + def __init__(self): + super(MNIST, self).__init__() + self._simple_img_conv_pool_1 = SimpleImgConvPool( + 1, 20, 5, 2, 2, act="relu") + self._simple_img_conv_pool_2 = SimpleImgConvPool( + 20, 50, 5, 2, 2, act="relu") + + self.pool_2_shape = 50 * 4 * 4 + SIZE = 10 + self.output_weight = self.create_parameter( + [self.pool_2_shape, 10]) + + def forward(self, inputs, label=None): + x = self._simple_img_conv_pool_1(inputs) + x = self._simple_img_conv_pool_2(x) + x = paddle.reshape(x, shape=[-1, self.pool_2_shape]) + x = paddle.matmul(x, self.output_weight) + x = paddle.nn.functional.softmax(x) + if label is not None: + acc = paddle.metric.accuracy(input=x, label=label) + return x, acc + else: + return x +``` + +在这个复杂的 Layer 的 \_\_init\_\_ 构造函数中,包含了更多基础的操作: +1. 变量的初始化:self.pool_2_shape = 50 * 4 * 4 +2. 全连接层参数的创建,通过调用 [Layer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#layer) 的 [create_parameter](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#create_parameter) 接口:self.output_weight = self.create_parameter( [ self.pool_2_shape, 10]) +3. 子 Layer 的构造:self._simple_img_conv_pool_1、self._simple_img_conv_pool_2 + +forward 函数的实现和 前面SimpleImgConvPool 类中的实现方式类似。 + +接下来定义MNIST类的对象,以及优化器。这里优化器我们选择 [AdamOptimizer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/optimizer_cn/AdamOptimizer_cn.html#adamoptimizer) ,通过 [Layer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#layer) 的 [parameters](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#parameters) 接口来读取该网络的全部参数,实现如下: + + +```python +import numpy as np +from paddle.optimizer import AdamOptimizer +from paddle.imperative import to_variable + +paddle.enable_imperative() +# 定义MNIST类的对象 +mnist = MNIST() +# 定义优化器为AdamOptimizer,学习旅learning_rate为0.001 +# 注意命令式编程下必须传入parameter_list参数,该参数为需要优化的网络参数,本例需要优化mnist网络中的所有参数 +adam = AdamOptimizer(learning_rate=0.001, parameter_list=mnist.parameters()) +``` + +### 2.3 训练 + +当我们定义好上述网络结构之后,就可以进行训练了。 + +实现如下: +* 数据读取:读取每批数据,通过 [to_variable](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/to_variable_cn.html#to-variable) 接口将 numpy.ndarray 对象转换为 [Variable](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Variable_cn.html#variable) 类型的对象。 +* 网络正向执行:在正向执行时,用户构造出img和label之后,可利用类似函数调用的方式(如:mnist(img, label))传递参数执行对应网络的 forward 函数。 +* 计算损失值:根据网络返回的计算结果,计算损失值,便于后续执行反向计算。 +* 执行反向计算:需要用户主动调用 [backward](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Variable_cn.html#backward) 接口来执行反向计算。 +* 参数更新:调用优化器的 [minimize](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/optimizer_cn/AdamOptimizer_cn.html#minimize) 接口对参数进行更新。 +* 梯度重置:将本次计算的梯度值清零,以便进行下一次迭代和梯度更新。 +* 保存训练好的模型:通过 [Layer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#layer) 的 [state_dict](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#state_dict) 获取模型的参数;通过 [save_dygraph](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/save_dygraph_cn.html#save-dygraph) 对模型参数进行保存。 + + +```python +import numpy as np +from paddle.optimizer import AdamOptimizer +from paddle.imperative import to_variable + +paddle.enable_imperative() +# 定义MNIST类的对象 +mnist = MNIST() +# 定义优化器为AdamOptimizer,学习旅learning_rate为0.001 +# 注意命令式编程下必须传入parameter_list参数,该参数为需要优化的网络参数,本例需要优化mnist网络中的所有参数 +adam = AdamOptimizer(learning_rate=0.001, parameter_list=mnist.parameters()) + +# 设置全部样本的训练次数 +epoch_num = 5 + +for epoch in range(epoch_num): + for batch_id, data in enumerate(train_reader()): + dy_x_data = np.array([x[0].reshape(1, 28, 28) for x in data]).astype('float32') + y_data = np.array([x[1] for x in data]).astype('int64').reshape(-1, 1) + + img = to_variable(dy_x_data) + label = to_variable(y_data) + + cost, acc = mnist(img, label) + + loss = paddle.nn.functional.cross_entropy(cost, label) + avg_loss = paddle.mean(loss) + avg_loss.backward() + adam.minimize(avg_loss) + mnist.clear_gradients() + + if batch_id % 100 == 0: + print("Loss at epoch {} step {}: {:}".format( + epoch, batch_id, avg_loss.numpy())) + +model_dict = mnist.state_dict() +paddle.imperative.save(model_dict, "save_temp") +``` + + +### 2.4 评估测试 + +模型训练完成,我们已经保存了训练好的模型,接下来进行评估测试。某些OP(如 dropout、batch_norm)需要区分训练模式和评估模式,以标识不同的执行状态。飞桨中OP默认采用的是训练模式(train mode),可通过如下方法切换: + + ``` +model.eval() #切换到评估模式 +model.train() #切换到训练模式 +``` + + +模型评估测试的实现如下: +* 首先定义 MNIST 类的对象 mnist_eval,然后通过 [load_dygraph](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/load_dygraph_cn.html#load-dygraph) 接口加载保存好的模型参数,通过 [Layer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#layer) 的 [set_dict](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#set_dict) 接口将参数导入到模型中,通过 [Layer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#layer) 的 eval 接口切换到预测评估模式。 +* 读取测试数据执行网络正向计算,进行评估测试,输出不同 batch 数据下损失值和准确率的平均值。 + + +```python +paddle.enable_imperative() +mnist_eval = MNIST() +model_dict, _ = paddle.imperative.load("save_temp") +mnist_eval.set_dict(model_dict) +print("checkpoint loaded") + +mnist_eval.eval() + +acc_set = [] +avg_loss_set = [] +for batch_id, data in enumerate(test_reader()): + dy_x_data = np.array([x[0].reshape(1, 28, 28) + for x in data]).astype('float32') + y_data = np.array( + [x[1] for x in data]).astype('int64').reshape(-1, 1) + + img = to_variable(dy_x_data) + label = to_variable(y_data) + + prediction, acc = mnist_eval(img, label) + + loss = paddle.nn.functional.cross_entropy(input=prediction, label=label) + avg_loss = paddle.mean(loss) + acc_set.append(float(acc.numpy())) + avg_loss_set.append(float(avg_loss.numpy())) + +acc_val_mean = np.array(acc_set).mean() +avg_loss_val_mean = np.array(avg_loss_set).mean() +print("Eval avg_loss is: {}, acc is: {}".format(avg_loss_val_mean, acc_val_mean)) +``` + +### 2.5 模型参数的保存和加载 + +在命令式编程下,模型和优化器在不同的模块中,所以模型和优化器分别在不同的对象中存储,使得模型参数和优化器信息需分别存储。 +因此模型的保存需要单独调用模型和优化器中的 state_dict() 接口,同样模型的加载也需要单独进行处理。 + +保存模型 : +1. 保存模型参数:首先通过 minist.state_dict 函数获取 mnist 网络的所有参数,然后通过 paddle.imperative.save 函数将获得的参数保存至以 save_path 为前缀的文件中。 +1. 保存优化器信息:首先通过 adam.state_dict 函数获取 adam 优化器的信息,然后通过 paddle.imperative.save 函数将获得的参数保存至以 save_path 为前缀的文件中。 + * [Layer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#layer) 的 [state_dict](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#state_dict) 接口:该接口可以获取当前层及其子层的所有参数,并将参数存放在 dict 结构中。 + * [Optimizer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/optimizer_cn/AdamOptimizer_cn.html#adamoptimizer) 的 state_dict 接口:该接口可以获取优化器的信息,并将信息存放在 dict 结构中。其中包含优化器使用的所有变量,例如对于 Adam 优化器,包括 beta1、beta2、momentum 等信息。注意如果该优化器的 minimize 函数没有被调用过,则优化器的信息为空。 + * [paddle.imperative.save](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/save_dygraph_cn.html#save-dygraph) 接口:该接口将传入的参数或优化器的 dict 保存到磁盘上。 +``` +# 保存模型参数 +1. paddle.imperative.save(minist.state_dict(), "save_path") +# 保存优化器信息 +2. paddle.imperative.save(adam.state_dict(), "save_path") +``` +加载模型: +1. 通过 paddle.imperative.load 函数获取模型参数信息 model_state 和优化器信息 opt_state; +1. 通过 mnist.set_dict 函数用获取的模型参数信息设置 mnist 网络的参数 +1. 通过 adam.set_dict 函数用获取的优化器信息设置 adam 优化器信息。 + * [Layer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#layer) 的 [set_dict](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#set_dict) 接口:该接口根据传入的 dict 结构设置参数,所有参数将由 dict 结构中的 Tensor 设置。 + * [Optimizer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/optimizer_cn/AdamOptimizer_cn.html#adamoptimizer) 的 set_dict 接口:该接口根据传入的 dict 结构设置优化器信息,例如对于 Adam 优化器,包括 beta1、beta2、momentum 等信息。如果使用了 LearningRateDecay ,则 global_step 信息也将会被设置。 + * [paddle.imperative.load](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/load_dygraph_cn.html#load-dygraph) 接口:该接口尝试从磁盘中加载参数或优化器的 dict 。 +``` +# 获取模型参数和优化器信息 +1. model_state, opt_state= paddle.imperative.load(“save_path”) +# 加载模型参数 +2. mnist.set_dict(model_state) +# 加载优化器信息 +3. adam.set_dict(opt_state) +``` + + +## 3. 多卡训练 + +针对数据量、计算量较大的任务,我们需要多卡并行训练,以提高训练效率。目前命令式编程可支持GPU的单机多卡训练方式,在命令式编程中多卡的启动和单卡略有不同,多卡通过 Python 基础库 subprocess.Popen 在每一张 GPU 上启动单独的 Python 程序的方式,每张卡的程序独立运行,只是在每一轮梯度计算完成之后,所有的程序进行梯度的同步,然后更新训练的参数。 + +我们通过一个实例了解如何进行多卡训练: +>由于AI Studio上未配置多卡环境,所以本实例需在本地构建多卡环境后运行。 + +1. 本实例仍然采用前面定义的 MNIST 网络,可将前面定义的 SimpleImgConvPool、MNIST 网络结构、相关的库导入代码、以及下面多卡训练的示例代码拷贝至本地文件 train.py 中。 + + +```python +import numpy as np +import paddle +from paddle.optimizer import AdamOptimizer +from paddle.imperative import to_variable + +place = paddle.CUDAPlace(paddle.imperative.ParallelEnv().dev_id) +paddle.enable_imperative(place) +strategy = paddle.imperative.prepare_context() +epoch_num = 5 +BATCH_SIZE = 64 +mnist = MNIST() +adam = AdamOptimizer(learning_rate=0.001, parameter_list=mnist.parameters()) +mnist = paddle.imperative.DataParallel(mnist, strategy) + +train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=BATCH_SIZE, drop_last=True) +train_reader = paddle.incubate.reader.distributed_batch_reader( + train_reader) + +for epoch in range(epoch_num): + for batch_id, data in enumerate(train_reader()): + dy_x_data = np.array([x[0].reshape(1, 28, 28) + for x in data]).astype('float32') + y_data = np.array( + [x[1] for x in data]).astype('int64').reshape(-1, 1) + + img = to_variable(dy_x_data) + label = to_variable(y_data) + label.stop_gradient = True + + cost, acc = mnist(img, label) + + loss = paddle.nn.functional.cross_entropy(cost, label) + avg_loss = paddle.mean(loss) + + avg_loss = mnist.scale_loss(avg_loss) + avg_loss.backward() + mnist.apply_collective_grads() + + adam.minimize(avg_loss) + mnist.clear_gradients() + + if batch_id % 100 == 0 and batch_id is not 0: + print("epoch: {}, batch_id: {}, loss is: {}".format(epoch, batch_id, avg_loss.numpy())) + +if paddle.imperative.ParallelEnv().local_rank == 0: + paddle.imperative.save(mnist.state_dict(), "work_0") +``` + +2、飞桨命令式编程多进程多卡模型训练启动时,需要指定使用的 GPU,比如使用 0,1 卡,可执行如下命令启动训练: + + +``` +CUDA_VISIBLE_DEVICES=0,1 python -m paddle.distributed.launch --log_dir ./mylog train.py +``` +其中 log_dir 为存放 log 的地址,train.py 为程序名。 +执行结果如下: + +``` +----------- Configuration Arguments ----------- +cluster_node_ips: 127.0.0.1 +log_dir: ./mylog +node_ip: 127.0.0.1 +print_config: True +selected_gpus: 0,1 +started_port: 6170 +training_script: train.py +training_script_args: [] +use_paddlecloud: False +------------------------------------------------ +trainers_endpoints: 127.0.0.1:6170,127.0.0.1:6171 , node_id: 0 , current_node_ip: 127.0.0.1 , num_nodes: 1 , node_ips: ['127.0.0.1'] , nranks: 2 +``` + +此时,程序会将每个进程的输出 log 导出到 ./mylog 路径下,可以打开 workerlog.0 和 workerlog.1 来查看结果: + +``` +. +├── mylog +│ ├── workerlog.0 +│ └── workerlog.1 +└── train.py +``` + +总结一下,多卡训练相比单卡训练,有如下步骤不同: +1. 通过 ParallelEnv() 的 dev_id 设置程序运行的设备。 +``` +place = paddle.CUDAPlace(paddle.imperative.ParallelEnv().dev_id) +paddle.enable_imperative(place): +``` +2. 准备多卡环境。 +``` +strategy = paddle.imperative.prepare_context() +``` +3. 数据并行模块。 + +在数据并行的时候,我们需要存储和初始化一些多卡相关的信息,这些信息和操作放在 DataParallel 类中,使用的时候,我们需要利用 model(定义的模型) 和 strategy(第二步得到的多卡环境) 信息初始化 DataParallel。 +``` +mnist = paddle.imperative.DataParallel(mnist, strategy) +``` +4. 数据切分。 + +数据切分是一个非常重要的流程,是为了防止每张卡在每一轮训练见到的数据都一样,可以使用 distributed_batch_reader 对单卡的 reader 进行进行切分处理。 用户也可以其他的策略来达到数据切分的目的,比如事先分配好每张卡的数据,这样就可以使用单卡的 reader ,不使用 distributed_batch_reader。 + +``` +train_reader = paddle.incubate.reader.distributed_batch_reader(train_reader) +``` + +5. 单步训练。 + +首先对 loss 进行归一化,然后计算单卡的梯度,最终将所有的梯度聚合。 +``` +avg_loss = mnist.scale_loss(avg_loss) +avg_loss.backward() +mnist.apply_collective_grads() +``` +6. 模型保存。 + +和单卡不同,多卡训练时需逐个进程执行保存操作,多个进程同时保存会使模型文件格式出错。 +``` +if paddle.imperative.ParallelEnv().local_rank == 0: + paddle.imperative.save(mnist.state_dict(), "worker_0") +``` +7. 评估测试。 + +对模型进行评估测试时,如果需要加载模型,须确保评估和保存的操作在同一个进程中,否则可能出现模型尚未保存完成,即启动评估,造成加载出错的问题。如果不需要加载模型,则没有这个问题,在一个进程或多个进程中评估均可。 + +## 4. 模型部署 + +### 4.1 动转静部署 +命令式编程虽然有非常多的优点,但是如果用户希望使用 C++ 部署已经训练好的模型,会存在一些不便利。比如,命令式编程中可使用 Python 原生的控制流,包含 if/else、switch、for/while,这些控制流需要通过一定的机制才能映射到 C++ 端,实现在 C++ 端的部署。 + +
  • 如果用户使用的 if/else、switch、for/while 与输入(包括输入的值和 shape )无关,则可以使用如下命令式模型部署方案: +
    • 使用 TracedLayer 将前向命令式模型转换为声明式模型。可以将模型保存后做在线C++预测
    • +
    • 所有的TracedLayer对象均不应通过构造函数创建,而需通过调用静态方法 TracedLayer.trace(layer, inputs) 创建。
    • +
    • TracedLayer使用 Executor 和 CompiledProgram 运行声明式模型。
  • + +
+ + + +```python +from paddle.imperative import TracedLayer + +paddle.enable_imperative() +# 定义MNIST类的对象 +mnist = MNIST() +in_np = np.random.random([10, 1, 28, 28]).astype('float32') +# 将numpy的ndarray类型的数据转换为Variable类型 +input_var = paddle.imperative.to_variable(in_np) +# 通过 TracerLayer.trace 接口将命令式模型转换为声明式模型 +out_dygraph, static_layer = TracedLayer.trace(mnist, inputs=[input_var]) +save_dirname = './saved_infer_model' +# 将转换后的模型保存 +static_layer.save_inference_model(save_dirname, feed=[0], fetch=[0]) +``` + + +```python +# 声明式编程中需要使用执行器执行之前已经定义好的网络 +place = paddle.CPUPlace() +exe = paddle.Executor(place) +program, feed_vars, fetch_vars = paddle.io.load_inference_model(save_dirname, exe) +# 声明式编程中需要调用执行器的run方法执行计算过程 +fetch, = exe.run(program, feed={feed_vars[0]: in_np}, fetch_list=fetch_vars) +``` + +以上示例中,通过 [TracerLayer.trace](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/TracedLayer_cn.html#trace) 接口来运行命令式模型并将其转换为声明式模型,该接口需要传入命令式模型 mnist 和输入变量列表 [input_var];然后调用 [save_inference_model](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/TracedLayer_cn.html#save_inference_model) 接口将声明式模型保存为用于预测部署的模型,之后利用 [load_inference_model](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/io_cn/load_inference_model_cn.html) 接口将保存的模型加载,并使用 [Executor](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/executor_cn/Executor_cn.html#executor) 执行,检查结果是否正确。 + +[save_inference_model](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/dygraph_cn/TracedLayer_cn.html#save_inference_model) 保存的下来的模型,同样可以使用 C++ 加载部署,具体的操作请参考:[C++ 预测 API介绍](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/inference/native_infer.html) + +* 如果任务中包含了依赖数据的控制流,比如下面这个示例中if条件的判断依赖输入的shape。针对这种场景,可以使用基于ProgramTranslator的方式转成声明式编程的program,通过save_inference_model 接口将声明式模型保存为用于预测部署的模型,之后利用 [load_inference_model](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/io_cn/load_inference_model_cn.html) 接口将保存的模型加载,并使用 [Executor](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/executor_cn/Executor_cn.html#executor) 执行,检查结果是否正确。 + +保存的下来的模型,同样可以使用 C++ 加载部署,具体的操作请参考:[C++ 预测 API介绍](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/inference/native_infer.html) + + +```python +paddle.enable_imperative() +in_np = np.array([-2]).astype('int') +# 将numpy的ndarray类型的数据转换为Variable类型 +input_var = paddle.imperative.to_variable(in_np) +# if判断与输入input_var的shape有关 +if input_var.shape[0] > 1: + print("input_var's shape[0] > 1") +else: + print("input_var's shape[1] < 1") +``` + +* 针对依赖数据的控制流,解决流程如下 1. 添加declarative装饰器; 2. 利用ProgramTranslator进行转换 + +1) 添加declarative装饰器 +首先需要对给MNist类的forward函数添加一个declarative 装饰器,来标记需要转换的代码块,(注:需要在最外层的class的forward函数中添加) + +```python +from paddle.imperative import declarative + +# 定义MNIST网络,必须继承自paddle.nn.Layer +# 该网络由两个SimpleImgConvPool子网络、reshape层、matmul层、softmax层、accuracy层组成 +class MNIST(paddle.nn.Layer): + def __init__(self): + super(MNIST, self).__init__() + self._simple_img_conv_pool_1 = SimpleImgConvPool( + 1, 20, 5, 2, 2, act="relu") + self._simple_img_conv_pool_2 = SimpleImgConvPool( + 20, 50, 5, 2, 2, act="relu") + + self.pool_2_shape = 50 * 4 * 4 + SIZE = 10 + self.output_weight = self.create_parameter( + [self.pool_2_shape, 10]) + + @declarative + def forward(self, inputs, label=None): + x = self._simple_img_conv_pool_1(inputs) + x = self._simple_img_conv_pool_2(x) + x = paddle.reshape(x, shape=[-1, self.pool_2_shape]) + x = paddle.matmul(x, self.output_weight) + x = paddle.nn.functional.softmax(x) + if label is not None: + acc = paddle.metric.accuracy(input=x, label=label) + return x, acc + else: + return x + +``` + + +2) 利用ProgramTranslator进行转换 + + + +```python +import paddle + +paddle.enable_imperative() +prog_trans = paddle.imperative.ProgramTranslator() +mnist = MNIST() + +in_np = np.random.random([10, 1, 28, 28]).astype('float32') +label_np = np.random.randint(0, 10, size=(10,1)).astype( "int64") +input_var = paddle.imperative.to_variable(in_np) +label_var = paddle.imperative.to_variable(label_np) + +out = mnist( input_var, label_var) + +prog_trans.save_inference_model("./mnist_dy2stat", fetch=[0,1]) +``` + +### 4.2 动转静训练 + +由于命令式编程在执行的时候,存在python与c++交互,由于计算图的构建,会引起命令式编程在部分RNN相关的任务性能比声明式编程要差,为了提升这类性能的性能,可以将命令式转换为声明式模型的方法进行训练,转换方式非常简单,仅需要对给MNist类的forward函数添加一个declarative 装饰器,来标记需要转换的代码块。 + +```python +from paddle.imperative import declarative + +# 定义MNIST网络,必须继承自paddle.nn.Layer +# 该网络由两个SimpleImgConvPool子网络、reshape层、matmul层、softmax层、accuracy层组成 +class MNIST(paddle.nn.Layer): + def __init__(self): + super(MNIST, self).__init__() + self._simple_img_conv_pool_1 = SimpleImgConvPool( + 1, 20, 5, 2, 2, act="relu") + self._simple_img_conv_pool_2 = SimpleImgConvPool( + 20, 50, 5, 2, 2, act="relu") + + self.pool_2_shape = 50 * 4 * 4 + SIZE = 10 + self.output_weight = self.create_parameter( + [self.pool_2_shape, 10]) + + @declarative + def forward(self, inputs, label=None): + x = self._simple_img_conv_pool_1(inputs) + x = self._simple_img_conv_pool_2(x) + x = paddle.reshape(x, shape=[-1, self.pool_2_shape]) + x = paddle.matmul(x, self.output_weight) + x = paddle.nn.functional.softmax(x) + if label is not None: + acc = paddle.metric.accuracy(input=x, label=label) + return x, acc + else: + return x + +``` + + + +## 5. 使用技巧 + +### 5.1 中间变量值、梯度打印 + +1. 用户想要查看任意变量的值,可以使用 [numpy](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Variable_cn.html#numpy) 接口。 + +``` +x = y * 10 +print(x.numpy()) +``` + +来直接打印变量的值 + +2. 查看反向的值 +可以在执行了 backward 之后,可以通过 [gradient](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Variable_cn.html#gradient) 接口来看任意变量的梯度 + +``` +x = y * 10 +x.backward() +print(y.gradient()) +``` + +可以直接打印反向梯度的值 + +### 5.2 断点调试 + +因为采用了命令式的编程方式,程序在执行之后,可以立马获取到执行的结果,因此在命令式编程中,用户可以利用IDE提供的断点调试功能,通过查 Variable 的 shape、真实值等信息,有助于发现程序中的问题。 + +1. 如下图所示,在示例程序中设置两个断点,执行到第一个断点的位置,我们可以观察变量 x 和 linear1 的信息。 + +![](https://ai-studio-static-online.cdn.bcebos.com/b9bade026bea4ae797d26dcd4590452d0d563574df6b4e1cbedd0645dcbcb349) +![](https://ai-studio-static-online.cdn.bcebos.com/c2a9096e653044849b98d94758a4ac3a77025351c1134453b2c8d18dc8ad8a73) + +2. 同时可以观察 linear1 中的权重值。 + +![](https://ai-studio-static-online.cdn.bcebos.com/e46576c64de84fa780830e1146afda0acc67fb20ea43452dadfc4949a3aad684) +![](https://ai-studio-static-online.cdn.bcebos.com/c00a6152805a492485ba0bdde773b2ac7f544f56a0364038aa2d0681ed8d0483) +![](https://ai-studio-static-online.cdn.bcebos.com/f9bc8a52eaa24181a6a6832e992feb9e726afa17764146c38fd69e8d008e7994) + + +### 5.3 阻断反向传递 + +在一些任务中,只希望拿到正向预测的值,但是不希望更新参数,或者在反向的时候剪枝,减少计算量,阻断反向的传播, Paddle提供了两种解决方案: [detach](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Variable_cn.html#detach) 接口和 [stop_gradient](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Variable_cn.html#stop_gradient) 接口,建议用户使用 detach 接口。 + +1. detach接口(建议用法) +使用方式如下: + +``` +fw_out = fw_out.detach() +``` + +detach() 接口会产生一个新的、和当前计算图分离的,但是拥有当前变量内容的临时变量。 + +通过该接口可以阻断反向的梯度传递。 + + +```python +import paddle +import numpy as np + +paddle.enable_imperative() +value0 = np.arange(26).reshape(2, 13).astype("float32") +value1 = np.arange(6).reshape(2, 3).astype("float32") +value2 = np.arange(10).reshape(2, 5).astype("float32") + +# 将ndarray类型的数据转换为Variable类型 +a = paddle.imperative.to_variable(value0) +b = paddle.imperative.to_variable(value1) +c = paddle.imperative.to_variable(value2) + +# 构造fc、fc2层 +fc = paddle.nn.Linear(13, 5, dtype="float32") +fc2 = paddle.nn.Linear(3, 3, dtype="float32") + +# 对fc、fc2层执行前向计算 +out1 = fc(a) +out2 = fc2(b) + +# 将不会对out1这部分子图做反向计算 +out1 = out1.detach() + +out = paddle.concat(input=[out1, out2, c], axis=1) +out.backward() + +# 可以发现这里out1.gradient()的值都为0,同时使得fc.weight的grad没有初始化 +assert (out1.gradient() == 0).all() +``` + +2. stop_gradient 接口 + +每个 Variable 都有一个 stop_gradient 属性,可以用于细粒度地在反向梯度计算时排除部分子图,以提高效率。 + +如果OP只要有一个输入需要梯度,那么该OP的输出也需要梯度。相反,只有当OP的所有输入都不需要梯度时,该OP的输出也不需要梯度。在所有的 Variable 都不需要梯度的子图中,反向计算就不会进行计算了。 + +在命令式编程下,除参数以外的所有 Variable 的 stop_gradient 属性默认值都为 True,而参数的 stop_gradient 属性默认值为 False。 该属性用于自动剪枝,避免不必要的反向运算。 + +使用方式如下: + +``` +fw_out.stop_gradient = True +``` + +通过将 Variable 的 stop_gradient 属性设置为 True,当 stop_gradient 设置为 True 时,梯度在反向传播时,遇到该 Variable,就不会继续传递。 + + +```python +import paddle +import numpy as np + +paddle.enable_imperative() +value0 = np.arange(26).reshape(2, 13).astype("float32") +value1 = np.arange(6).reshape(2, 3).astype("float32") +value2 = np.arange(10).reshape(2, 5).astype("float32") + +# 将ndarray类型的数据转换为Variable类型 +a = paddle.imperative.to_variable(value0) +b = paddle.imperative.to_variable(value1) +c = paddle.imperative.to_variable(value2) + +# 构造fc、fc2层 +fc = paddle.nn.Linear(13, 5, dtype="float32") +fc2 = paddle.nn.Linear(3, 3, dtype="float32") + +# 对fc、fc2层执行前向计算 +out1 = fc(a) +out2 = fc2(b) + +# 将不会对out1这部分子图做反向计算 +out1.stop_gradient = True + +out = paddle.concat(input=[out1, out2, c], axis=1) +out.backward() + +# 可以发现这里out1.gradient()的值都为0,同时使得fc.weight的grad没有初始化 +assert (out1.gradient() == 0).all() +``` diff --git a/doc/fluid/beginners_guide/hapi.md b/doc/fluid/beginners_guide/hapi.md new file mode 100644 index 0000000000000000000000000000000000000000..42d74904124dbcf9202d1f328ab2b9227ae3b57d --- /dev/null +++ b/doc/fluid/beginners_guide/hapi.md @@ -0,0 +1,199 @@ +# 高层API介绍 + +## 简介 + +PaddleHapi是飞桨新推出的高层API,PaddleHapi是对飞桨API的进一步封装与升级,提供了更加简洁易用的API,进一步提升了飞桨的易学易用性,并增强飞桨的功能。 + +PaddleHapi面向从深度学习小白到资深开发者的所有人群,对于AI初学者来说,使用PaddleHapi可以简单快速的构建深度学习项目,对于资深开发者来说,可以使用PaddleHapi快速完成算法迭代。 + +PaddleHapi具有以下特点: +- 易学易用: 高层API是对普通动态图API的进一步封装和优化,同时保持与普通API的兼容性,高层API使用更加易学易用,同样的实现使用高层API可以节省大量的代码。 +- 低代码开发: 使用飞桨高层API的一个明显特点是,用户可编程代码量大大缩减。 +- 动静转换: 高层API支持动静转换,用户只需要改一行代码即可实现将动态图代码在静态图模式下训练,既方便用户使用动态图调试模型,又提升了模型训练效率。 + + + +在功能增强与使用方式上,高层API有以下升级: +1. 模型训练方式升级: 高层API中封装了Model类,继承了Model类的神经网络可以仅用几行代码完成模型的训练。 +2. 新增图像处理模块transform: 飞桨新增了图像预处理模块,其中包含十数种数据处理函数,基本涵盖了常用的数据处理、数据增强方法。 +3. 提供常用的神经网络模型可供调用: 高层API中集成了计算机视觉领域和自然语言处理领域常用模型,包括但不限于mobilenet、resnet、yolov3、cyclegan、bert、transformer、seq2seq等等。同时发布了对应模型的预训练模型,用户可以直接使用这些模型或者在此基础上完成二次开发。 + + +![](https://raw.githubusercontent.com/PaddlePaddle/FluidDoc/hapi/doc/fluid/beginners_guide/image/hapi_gif.gif) + + +## 目录 + +* [特性](#1) +* [快速使用](#2) +* [新增功能](#3) +* [使用示例](#4) + + +##

特性

+ +### 易学易用 + +高层API基于飞桨动态图实现,兼容飞桨动态图的所有功能,既秉承了动态图易学、易用、易调试的特点,又对飞桨的动态图做了进一步的封装与优化。 + +### 低代码开发 + +相比较与动态图的算法实现,使用高层API实现的算法可编程代码量更少,原始的动态图训练代码需要20多行代码才能完成模型的训练,使用高层API后,仅用8行代码即可实现相同的功能。 + +使用普通API与高层API实现手写字符识别对比如下图,左边是普通动态图API的实现,右边是使用高层API的实现,可以明显发现,使用高层API的代码量更少。 +![](https://raw.githubusercontent.com/PaddlePaddle/FluidDoc/hapi/doc/fluid/beginners_guide/image/new_hapi.png) + + +### 动静统一 + +高层API中实现了动静统一,用户无需感知到静态图、动态图的区别,只需要改一行代码即可实现将动态图代码在静态图模式下训练。动态图更方便调试模型,静态图的训练方式训练效率更高。 + +高层API默认采用静态图的训练方式,我们可以使用 fluid.enable_dygraph() 切换到动态图模式下运行。 + +``` +fluid.CUDAPlace() +# 一行代码切换动态图训练模式 +fluid.enable_dygraph(place) + +# 声明网络结构 +model = Mnist("mnist") +# 定义优化器 +optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.001, parameter_list=model.parameters()) +# 调用prepare() 完成训练的配置 +model.prepare(optimizer, CrossEntropy(), Accuracy(), inputs, labels, device='gpu') +# 调用 fit(),启动模型的训练 +model.fit(train_dataset, val_dataset, batch_size=100, epochs=1, log_freq=100, save_dir="./output/") +``` + +##

快速使用

+ +以mnist手写字符识别为例,介绍飞桨高层API的使用方式。 + +### 1. 搭建网络结构 + +使用高层API组建网络与动态图的组网方式基本相同,唯一的区别在于,使用高层API组建网络需要继承Model这个类,而普通的动态图组网是需要继承dygraph.Layer类。 + +高层API组网方式如下 +``` +from paddle.incubate.hapi.model import Model, Input +from paddle.incubate.hapi.loss import CrossEntropy + +class Mnist(Model): + def __init__(self, name_scope): + super(Mnist, self).__init__() + self.fc = Linear(input_dim=784, output_dim=10, act="softmax") + + # 定义网络结构的前向计算过程 + def forward(self, inputs): + outputs = self.fc(inputs) + return outputs + +``` + +### 2. 训练准备 + +在开始训练前,需要定义优化器、损失函数、度量函数,准备数据等等。这些过程均可以在高层API Model类中的prepare函数中完成。 + +``` +# 定义输入数据格式 +inputs = [Input([None, 784], 'float32', name='image')] +labels = [Input([None, 1], 'int64', name='label')] + +# 声明网络结构 +model = Mnist("mnist") +optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.001, parameter_list=model.parameters()) +# 使用高层API,prepare() 完成训练的配置 +model.prepare(optimizer, CrossEntropy(), Accuracy(), inputs, labels, device='gpu') +``` + +### 3. 启动训练 + +使用高层API完成训练迭代过程时,使用一行代码即可构建双层循环程序,去控制训练的轮数和数据读取过程。 + +``` +from paddle.incubate.hapi.datasets.mnist import MNIST as MnistDataset +# 定义数据读取器 +train_dataset = MnistDataset(mode='train') +val_dataset = MnistDataset(mode='test') +# 启动训练 +model.fit(train_dataset, val_dataset, batch_size=100, epochs=10, log_freq=100, save_dir="./output/") +``` + +高层API中通过fit函数完成训练的循环过程,只需要设置训练的数据读取器、batchsize大小,迭代的轮数epoch、训练日志打印频率log_freq,保存模型的路径即可。 + +##

新增功能

+ +除了使用高层API实现一行代码启动训练外,还新增了以下功能: +- transform 数据增强模块 +- paddlevision 模型调用模块 + +### transform +vision.transform。图像预处理模块transform包括一系列的图像增强与图像处理实现,对处理计算机视觉相关的任务有很大帮助。 + +下表中列出Transform支持的数据处理和数据增强API,如下所示: + +| transform的数据处理实现 | 函数功能 | | +| :-------- | :----- | :---- | +| Compose | 组合多种数据变换 | +| Resize | 将图像转换为固定大小 | +| RandomResizedCrop | 根据输入比例对图像做随机剪切,然后resize到指定大小 | +| CenterCrop | 以图像的中心为中心对图像做剪切 | | +| CenterCropResize | 对图像做padding,padding后的图像做centercrop,然后resize到指定大小| | +| RandomHorizontalFlip | 随机对图像做水平翻转 | | +| RandomVerticalFlip | 随机对图像做垂直翻转 | | +| Permute | 将数据的的维度换位 | | +| Normalize | 用指定的均值和标准差对数据做归一化 | | +| GaussianNoise | 给数据增加高斯噪声 | | +| BrightnessTransform | 调整输入图像的亮度 | | +| SaturationTransform | 调整输入图像的饱和度 | | +| ContrastTransform | 调整输入图像的对比度 | | +| HueTransform | 调整图像的色调 | | +| ColorJitter | 随机调整图像的亮度、饱和度、对比度、和色调| | + +使用方法如下: +``` + +from paddle.incubate.hapi.vision.transforms import transforms +import cv2 + +img_path = "./output/sample.jpg" +img = cv2.imread(img_path) + +# 使用Compose 将可以将多个数据增强函数组合在一起 +trans_funcs = transforms.Compose([transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.BrightnessTransform(0.2)]) +label = None +img_processed, label = trans_funcs(img, label) + +``` + +上述代码的效果图如下: + +![](https://raw.githubusercontent.com/PaddlePaddle/FluidDoc/hapi/doc/fluid/beginners_guide/image/hapi_transform.png) + + +### paddlevision + +paddlevision中包含了高层API对常用模型的封装,包括ResNet、VGG、MobileNet、yoloV3、darknet、BMN +transformer等等。使用这些现有的模型,可以快速的完成神经网络的训练、finetune等。 + +使用paddlevision中的模型可以简单快速的构建一个深度学习任务,比如13代码即可实现resnet在Imagenet数据集上的训练: + +![](https://raw.githubusercontent.com/PaddlePaddle/FluidDoc/hapi/doc/fluid/beginners_guide/image/paddlevision.png) + + + +##
更多使用示例
+ +更多的高层API使用示例请参考: +- [bert](https://github.com/PaddlePaddle/hapi/tree/master/examples/bert) +- [image classification](https://github.com/PaddlePaddle/hapi/tree/master/examples/image_classification) +- [BMN](https://github.com/PaddlePaddle/hapi/tree/master/examples/bmn) +- [cycleGAN](https://github.com/PaddlePaddle/hapi/tree/master/examples/cyclegan) +- [ocr](https://github.com/PaddlePaddle/hapi/tree/master/examples/ocr) +- [TSM](https://github.com/PaddlePaddle/hapi/tree/master/examples/tsm) +- [yolov3](https://github.com/PaddlePaddle/hapi/tree/master/examples/yolov3) +- [transformer](https://github.com/PaddlePaddle/hapi/tree/master/examples/transformer) +- [seq2seq](https://github.com/PaddlePaddle/hapi/tree/master/examples/seq2seq) +- [style-transfer](https://github.com/PaddlePaddle/hapi/tree/master/examples/style-transfer) diff --git a/doc/fluid/beginners_guide/image/hapi_gif.gif b/doc/fluid/beginners_guide/image/hapi_gif.gif new file mode 100644 index 0000000000000000000000000000000000000000..5d261d2f375e1800858c71bf12158a73edb7d5e9 Binary files /dev/null and b/doc/fluid/beginners_guide/image/hapi_gif.gif differ diff --git a/doc/fluid/beginners_guide/image/hapi_transform.png b/doc/fluid/beginners_guide/image/hapi_transform.png new file mode 100644 index 0000000000000000000000000000000000000000..c93947320c4d8d4a32135ce0873f45a5f5b8e9f1 Binary files /dev/null and b/doc/fluid/beginners_guide/image/hapi_transform.png differ diff --git a/doc/fluid/beginners_guide/image/new_hapi.png b/doc/fluid/beginners_guide/image/new_hapi.png new file mode 100644 index 0000000000000000000000000000000000000000..c0ea7e407b2f6b38a116ca96008d4f84ca21d88f Binary files /dev/null and b/doc/fluid/beginners_guide/image/new_hapi.png differ diff --git a/doc/fluid/beginners_guide/image/paddlevision.png b/doc/fluid/beginners_guide/image/paddlevision.png new file mode 100644 index 0000000000000000000000000000000000000000..eaef430b14bf1f107396b083d38d4a952f1744f2 Binary files /dev/null and b/doc/fluid/beginners_guide/image/paddlevision.png differ diff --git a/doc/fluid/beginners_guide/index_cn.rst b/doc/fluid/beginners_guide/index_cn.rst index 12563074273cb55741fb4232fc3780aa028cdf70..7f751487e98a40297c272bf1ba13077a61161953 100644 --- a/doc/fluid/beginners_guide/index_cn.rst +++ b/doc/fluid/beginners_guide/index_cn.rst @@ -1,22 +1,197 @@ -######## 快速上手 -######## +=========== -PaddlePaddle (PArallel Distributed Deep LEarning)是一个易用、高效、灵活、可扩展的深度学习框架。 +飞桨2.0概述 +----------- +在保持1.x版本工业级大规模高效训练和多平台快速部署优势的前提,飞桨2.0版本重点提升了框架的易用性,主要在用户交互层进行了优化,降低学习门槛,提升开发效率。不管对于初学者还是资深专家,都能够更容易地使用飞桨进行深度学习任务开发,加速前沿算法研究和工业级任务开发。 -您可参考PaddlePaddle的 `Github `_ 了解详情,也可阅读 `版本说明 <../release_note_cn.html>`_ 了解新版本的特性。 +此版本为测试版,还在迭代开发中,目前还没有稳定,后续API会根据反馈有可能进行不兼容的升级。对于想要体验飞桨最新特性的开发者,欢迎试用此版本;对稳定性要求高的工业级应用场景推荐使用Paddle +1.8稳定版本。此版本主推命令式(imperative)开发模式,并提供了高层API的封装。命令式开发模式具有很好的灵活性,高层API可以大幅减少重复代码。对于初学者或基础的任务场景,推荐使用高层API的开发方式,简单易用;对于资深开发者想要实现复杂的功能,推荐使用动态图的API,灵活高效。 -让我们从学习PaddlePaddle基本概念这里开始: +跟1.x版本对比,飞桨2.0版本的重要升级如下: -- `基本概念 <../beginners_guide/basic_concept/index_cn.html>`_:介绍 Paddle的基本概念和使用方法 ++------------+--------------------------------------+-----------------------------------------+ +| | 飞桨1.x版本 | 飞桨2.0版本 | ++============+======================================+=========================================+ +| 开发模式 | 推荐声明式(declarative) | 推荐命令式(imperative) | ++------------+--------------------------------------+-----------------------------------------+ +| 组网方式 | 推荐函数式组网 | 推荐面向对象式组网 | ++------------+--------------------------------------+-----------------------------------------+ +| 高层API | 无 | 封装常见的操作,实现低代码开发 | ++------------+--------------------------------------+-----------------------------------------+ +| 基础API | fluid目录,结构不清晰,存在过时API | paddle目录,整体结构调整,清理废弃API | ++------------+--------------------------------------+-----------------------------------------+ -如果您已经掌握了飞桨的基本概念,期望可以针对实际问题建模、搭建自己网络,编程实践中提供了一些 Paddle 的使用细节供您参考: +开发模式 +-------- -- `编程实践 <../beginners_guide/coding_practice/index_cn.html>`_:介绍如何针对实际问题建模、搭建自己网络 +飞桨同时支持声明式和命令式这两种开发模式,兼具声明式编程的高效和命令式编程的灵活。 +声明式编程模式(通常也被称为静态模式或define-and-run模式),程序可以明确分为网络结构定义和执行这两个阶段。定义阶段声明网络结构,此时并未传入具体的训练数据;执行阶段需要用户通过feed的方式传入具体数据,完成计算后,通过fetch的方式返回计算结果。示例如下: + +.. code:: python + + import numpy + import paddle + # 定义输入数据占位符 + a = paddle.static.data(name="a", shape=[1], dtype='int64') + b = paddle.static.data(name="b", shape=[1], dtype='int64') + # 组建网络(此处网络仅由一个操作构成,即elementwise_add) + result = paddle.elementwise_add(a, b) + # 准备运行网络 + cpu = paddle.CPUPlace() # 定义运算设备,这里选择在CPU下训练 + exe = paddle.Executor(cpu) # 创建执行器 + # 创建输入数据 + x = numpy.array([2]) + y = numpy.array([3]) + # 运行网络 + outs = exe.run( + feed={'a':x, 'b':y}, # 将输入数据x, y分别赋值给变量a,b + fetch_list=[result] # 通过fetch_list参数指定需要获取的变量结果 + ) + #输出运行结果 + print (outs) + #[array([5], dtype=int64)] + +声明式开发模式的优点为在程序执行之前,可以拿到全局的组网信息,方便对计算图进行全局的优化,提升性能;并且由于全局计算图的存在,方便将计算图导出到文件,方便部署到非python语言的开发环境中,比如:C/C++/JavaScript等。声明式开发模式的缺点为,由于网络定义和执行阶段分离,在定义的时候并不知道所执行的具体的数据,程序的开发和调试会比较困难。 + +命令式编程模式(通常也被称为动态模式、eager模式或define-by-run模式),程序在网络结构定义的同时立即执行,能够实时的到执行结果。示例如下: + +.. code:: python + + import numpy + import paddle + from paddle.imperative import to_variable + + # 切换命令式编程模式 + paddle.enable_imperative() + + # 创建数据 + x = to_variable(numpy.array([2])) + y = to_variable(numpy.array([3])) + # 定义运算并执行 + z = paddle.elementwise_add(x, y) + # 输出执行结果 + print (z.numpy()) + +飞桨2.0推荐开发者使用命令式编程,可以使用原生python控制流API,具有灵活,容易开发调试的优点;同时为了兼具声明式编程在性能和部署方面的优势,飞桨提供了自动转换功能,可以将包含python控制流的代码,转换为Program,通过底层的Executor进行执行。 + +组网方式 +-------- + +飞桨1.x大量使用函数式的组网方式,这种方法的好处是写法很简洁,但表达能力偏弱,比如:如果我们想要查看fc隐含的参数的值或者想要对某一个参数进行裁剪时,会很困难,我们需要操作隐含的参数名才能访问。比如: + +.. code:: python + + import paddle.fluid as fluid + + data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") + fc = fluid.layers.fc(input=data, size=1000, act="tanh") + +飞桨2.0推荐使用面向对象式的组网方式,需要通过继承\ ``paddle.nn.Layer``\ 类的\ ``__init__``\ 和\ ``forward``\ 函数实现网络结构自定义,这种方式通过类的成员变量,方便地访问到每个类的成员,比如: + +.. code:: python + + import paddle + + class SimpleNet(paddle.nn.Layer): + def __init__(self, in_size, out_size): + super(SimpleNet, self).__init__() + self._linear = paddle.nn.Linear(in_size, out_size) + + def forward(self, x): + y = self._linear(x) + return y + +高层API +------- + +使用飞桨进行深度学习任务的开发,整体过程包括数据处理、组网、训练、评估、模型导出、预测部署这些基本的操作。这些基本操作在不同的任务中会反复出现,使用基础API进行开发时,需要开发者重复地写这些基础操作的代码,增加了模型开发的工作量。高层API针对这些基础操作进行了封装,提供更高层的开发接口,开发者只需要关心数据处理和自定义组网,其他工作可以通过调用高层API来完成。在MNIST手写数字识别任务中,对比动态图基础API的实现方式,通过使用高层API可以减少80%的非组网类代码。 + +使用高层API的另外一个好处是,可以通过一行代码\ ``paddle.enable_imperative``\ ,切换命令式编程模式和声明式编程模式。在开发阶段,可以使用的命令式编程模式,方便调试;开发完成后,可以切换到声明式编程模式,加速训练和方便部署。兼具了命令式编程实时执行,容易调试的优点,以及声明式编程全局优化和容易部署的优点。 + +以下为高层API的一个基础示例 + +.. code:: python + + import numpy as np + import paddle + import paddle.nn.functional as F + from paddle.incubate.hapi.model import Model, Input, Loss + from paddle.incubate.hapi.loss import CrossEntropy + + #高层API的组网方式需要继承Model,Model类实现了模型执行所需的逻辑 + class SimpleNet(Model): + def __init__(self, in_size, out_size): + super(SimpleNet, self).__init__() + self._linear = paddle.nn.Linear(in_size, out_size) + def forward(self, x): + y = self._linear(x) + z = self._linear(y) + pred = F.softmax(z) + return pred + + #兼容声明式开发模式,定义数据形状类型,如果不使用声明式编程模式,可以不定义数据占位符 + inputs = [Input([None, 8], 'float32', name='image')] + labels = [Input([None, 1], 'int64', name='labels')] + + #定义模型网络结构,包括指定损失函数和优化算法 + model = SimpleNet(8, 8) + optimizer = paddle.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=model.parameters()) + model.prepare(optimizer, CrossEntropy(), None, inputs, labels, device='cpu') + + #切换执行模式 + paddle.enable_imperative(paddle.CPUPlace()) + + #基于batch的训练 + batch_num = 10 + x = np.random.random((4, 8)).astype('float32') + y = np.random.randint(0, 8, (4, 1)).astype('int64') + for i in range(batch_num): + model.train_batch(inputs=x, labels=y) + +更多高层API开发的模型和示例请参考github Repo: +`hapi `__ + +基础API +------- + +飞桨2.0提供了新的API,可以同时支持声明式和命令式两种开发模式,比如paddle.nn.Linear,避免在两种模式下使用不同的API造成困惑。原飞桨1.x的API位于paddle.fluid目录下,其中部分组网类的API,只能用于声明式开发,比如:fluid.layers.fc,无法用于命令式开发。 + +飞桨2.0对API的目录结构进行了调整,从原来的paddle.fluid目录调整到paddle目录下,使得开发接口更加清晰,调整后的目录结构如下: + ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| 目录 | 功能和包含API | ++=====================+===========================================================================================================+ +| paddle.\* | paddle根目录下保留了常用API的别名,当前包括:paddle.tensor, paddle.framework目录下的所有API | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.tensor | 跟tensor操作相关的API,比如:创建zeros, 矩阵运算matmul, 变换concat, 计算elementwise\_add, 查找argmax等 | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.nn | 跟组网相关的API,比如:输入占位符data/Input,控制流while\_loop/cond,损失函数,卷积,LSTM等,激活函数等 | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.framework | 基础框架相关的API,比如:Variable, Program, Executor等 | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.imperative | imprerative模式专用的API,比如:to\_variable, prepare\_context等 | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.optimizer | 优化算法相关API,比如:SGD,Adagrad, Adam等 | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.metric | 评估指标计算相关的API,比如:accuracy, cos\_sim等 | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.io | 数据输入输出相关API,比如:save, load, Dataset, DataLoader等 | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.device | 设备管理相关API,比如:CPUPlace, CUDAPlace等 | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.fleet | 分布式相关API | ++---------------------+-----------------------------------------------------------------------------------------------------------+ + +同时飞桨2.0对部分Paddle +1.x版本的API进行了清理,删除了部分不再推荐使用的API,具体信息请参考Release +Note。 .. toctree:: :hidden: basic_concept/index_cn.rst - coding_practice/index_cn.rst + dygraph/DyGraph.md + hapi.md + diff --git a/doc/fluid/beginners_guide/index_en.rst b/doc/fluid/beginners_guide/index_en.rst deleted file mode 100644 index 1467d9928c4c7b3c63e29b87723a8964f3d3f0ac..0000000000000000000000000000000000000000 --- a/doc/fluid/beginners_guide/index_en.rst +++ /dev/null @@ -1,23 +0,0 @@ -################ -Beginner's Guide -################ - -PaddlePaddle (PArallel Distributed Deep LEarning) is a -simple, efficient and extensible deep learning framework. - -Please refer to `PaddlePaddle Github `_ for details, and `Release Note <../release_note_en.html>`_ for features incorporated in current version. - -Let's start with studying basic concept of PaddlePaddle: - -- `Basic Concept <../beginners_guide/basic_concept/index_en.html>`_ : introduce the basic concept and usage of Paddle - -If you have mastered the basic concept of Paddle and you expect to model and build your own network according to the actual problems, you can refer to some details of the use of paddle in the Coding Practice : - -- `Coding Practice <../beginners_guide/coding_practice/index_en.html>`_ : introduce how to model and build your own network for practical problems - - -.. toctree:: - :hidden: - - basic_concept/index_en.rst - coding_practice/index_en.rst diff --git a/doc/fluid/beginners_guide/install/Tables_en.md b/doc/fluid/beginners_guide/install/Tables_en.md deleted file mode 100755 index ddbda3c18ebf4e5859fe95e2b7ffb75202334ba5..0000000000000000000000000000000000000000 --- a/doc/fluid/beginners_guide/install/Tables_en.md +++ /dev/null @@ -1,601 +0,0 @@ -*** - -# Appendix - - -## Compile Dependency Table - -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Dependency package name Version Description Installation command
CMake 3.4
GCC 4.8 / 5.4 recommends using devtools2 for CentOS
Python 2.7.x. depends on libpython2.7.so apt install python-dev or yum install python-devel
SWIG at least 2.0 apt install swig or yum install swig
wget any apt install wget or yum install wget
openblas any
pip at least 9.0.1 apt install python-pip or yum install Python-pip
numpy >=1.12.0 pip install numpy==1.14.0
protobuf 3.1.0 pip install protobuf==3.1.0
wheel any pip install wheel
patchELF any apt install patchelf or read github patchELF official documentation
go >=1.8 optional
-

- - -*** - -

-## **Compile Option Table** - -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Option Description Default
WITH_GPU Whether to support GPU ON
WITH_C_API Whether to compile CAPI OFF
WITH_DOUBLE Whether to use double precision floating point numeber OFF
WITH_DSO whether to load CUDA dynamic libraries dynamically at runtime, instead of statically loading CUDA dynamic libraries. ON
WITH_AVX whether to compile PaddlePaddle binaries file containing the AVX instruction set ON
WITH_PYTHON Whether the PYTHON interpreter is embedded ON
WITH_STYLE_CHECK Whether to perform code style checking at compile time ON
WITH_TESTING Whether to turn on unit test OFF
WITH_DOC Whether to compile Chinese and English documents OFF
WITH_SWIG_PY Whether to compile PYTHON's SWIG interface, which can be used for predicting and customizing training Auto
WITH_GOLANG Whether to compile the fault-tolerant parameter server of the go language OFF
WITH_MKL Whether to use the MKL math library, if not,using OpenBLAS ON
WITH_SYSTEM_BLAS Whether to use the system's BLAS OFF
WITH_DISTRIBUTE Whether to Compile with distributed version OFF
WITH_RDMA Whether to compile the relevant parts that supports RDMA OFF
WITH_BRPC_RDMA Whether to use BRPC RDMA as RPC protocol OFF
ON_INFER Whether to turn on prediction optimization OFF
DWITH_ANAKIN Whether to Compile ANAKIN OFF
CUDA_ARCH_NAME Build for which GPU architecture All:all available GPU architectures Auto:Automatically detect current GPU architecture
TENSORRT_ROOT Assign TensoRRT path If this flag is not assigned, Paddle will detect TensorRT automatically.
-

- - -**BLAS** - -PaddlePaddle supports two BLAS libraries, [MKL](https://software.intel.com/en-us/mkl) and [OpenBlAS](http://www.openblas.net/). MKL is used by default. If you use MKL and the machine contains the AVX2 instruction set, you will also download the MKL-DNN math library, for details please refer to [here](https://github.com/PaddlePaddle/Paddle/tree/release/0.11.0/doc/design/mkldnn#cmake). - -If you close MKL, OpenBLAS will be used as the BLAS library. - -**CUDA/cuDNN** - -PaddlePaddle automatically finds the CUDA and cuDNN libraries installed in the system for compilation and execution at compile time/runtime. Use the parameter `-DCUDA_ARCH_NAME=Auto` to specify to enable automatic detection of the SM architecture and speed up compilation. - -PaddlePaddle can be compiled and run using any version after cuDNN v5.1, but try to keep the same version of cuDNN in the compiling and running processes. We recommend using the latest version of cuDNN. - -**Configure Compile Options** - -PaddePaddle implements references to various BLAS/CUDA/cuDNN libraries by specifying paths at compile time. When cmake compiles, it first searches the system paths ( `/usr/liby` and `/usr/local/lib` ) for these libraries, and also reads the relevant path variables for searching. Can be set by using the `-D` command, for example: - -> `Cmake .. -DWITH_GPU=ON -DWITH_TESTING=OFF -DCUDNN_ROOT=/opt/cudnnv5` - -**Note**: The settings introduced here for these compilation options are only valid for the first cmake. If you want to reset it later, it is recommended to clean up the entire build directory ( rm -rf ) and then specify it. - - -*** - -

-## **Installation Package List** - - -

- - - - - - - - - - - - - - - - - - - - - -
Version Number Release Discription
paddlepaddle==[version code] such as paddlepaddle==1.5.1 Only support the corresponding version of the CPU PaddlePaddle, please refer to Pypi for the specific version.
paddlepaddle-gpu==1.5.1 Using version 1.5.1 compiled with CUDA 9.0 and cuDNN 7
paddlepaddle-gpu==1.5.1.post87 Using version 1.5.1 compiled with CUDA 8.0 and cuDNN 7
-

- - - -You can find various distributions of PaddlePaddle-gpu in [the Release History](https://pypi.org/project/paddlepaddle-gpu/#history). - -Please note that: paddlepaddle-gpu in windows, will download package compiled with CUDA 8.0 and cuDNN 7 - -*** - -

-## Installation Mirrors and Introduction - -

- - - - - - - - - - - - - - - - - - - - - - - - - -
Version Number Release Description
hub.baidubce.com/paddlepaddle/paddle:latest The latest pre-installed image of the PaddlePaddle CPU version
hub.baidubce.com/paddlepaddle/paddle:latest-dev The latest PaddlePaddle development environment
hub.baidubce.com/paddlepaddle/paddle:[Version] Replace version with a specific version, preinstalled PaddlePaddle image in historical version
hub.baidubce.com/paddlepaddle/paddle:latest-gpu The latest pre-installed image of the PaddlePaddle GPU version
-

- - - -You can find the docker image for each release of PaddlePaddle in the [DockerHub](https://hub.docker.com/r/paddlepaddle/paddle/tags/). - -*** - -

- -## **Multi-version whl package list - Release** - -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Release Instruction cp27-cp27mu cp27-cp27m cp35-cp35m cp36-cp36m cp37-cp37m
cpu-mkl - paddlepaddle-1.5.1-cp27-cp27mu-linux_x86_64.whl - paddlepaddle-1.5.1-cp27-cp27m-linux_x86_64.whl - paddlepaddle-1.5.1-cp35-cp35m-linux_x86_64.whl - paddlepaddle-1.5.1-cp36-cp36m-linux_x86_64.whl - paddlepaddle-1.5.1-cp37-cp37m-linux_x86_64.whl
cpu-openblas - paddlepaddle-1.5.1-cp27-cp27mu-linux_x86_64.whl paddlepaddle-1.5.1-cp27-cp27m-linux_x86_64.whl - paddlepaddle-1.5.1-cp35-cp35m-linux_x86_64.whl - paddlepaddle-1.5.1-cp36-cp36m-linux_x86_64.whl - paddlepaddle-1.5.1-cp37-cp37m-linux_x86_64.whl
cuda8-cudnn7-openblas paddlepaddle_gpu-1.5.1-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp35-cp35m-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp36-cp36m-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp37-cp37m-linux_x86_64.whl
cuda8-cudnn7-mkl paddlepaddle_gpu-1.5.1-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp35-cp35m-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp36-cp36m-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp37-cp37m-linux_x86_64.whl
cuda9-cudnn7-mkl paddlepaddle_gpu-1.5.1-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp35-cp35m-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp36-cp36m-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp37-cp37m-linux_x86_64.whl
cuda10_cudnn7-mkl paddlepaddle_gpu-1.5.1-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp35-cp35m-linux_x86_64.whl - paddlepaddle_gpu-1.5.1-cp36-cp36m-linux_x86_64.whl - paddlepaddle_gpu-1.5.1-cp37-cp37m-linux_x86_64.whl
win_cpu_openblas - - paddlepaddle-1.5.1-cp27-cp27m-win_amd64.whl - paddlepaddle-1.5.1-cp35-cp35m-win_amd64.whl - paddlepaddle-1.5.1-cp36-cp36m-win_amd64.whl - paddlepaddle-1.5.1-cp37-cp37m-win_amd64.whl
win_cuda8_cudnn7_openblas - - paddlepaddle_gpu-1.5.1-cp27-cp27m-win_amd64.whl - paddlepaddle_gpu-1.5.1-cp35-cp35m-win_amd64.whl - paddlepaddle_gpu-1.5.1-cp36-cp36m-win_amd64.whl - paddlepaddle_gpu-1.5.1-cp37-cp37m-win_amd64.whl
win_cuda9_cudnn7_openblas - - paddlepaddle_gpu-1.5.1-cp27-cp27m-win_amd64.whl - paddlepaddle_gpu-1.5.1-cp35-cp35m-win_amd64.whl - paddlepaddle_gpu-1.5.1-cp36-cp36m-win_amd64.whl - paddlepaddle_gpu-1.5.1-cp37-cp37m-win_amd64.whl
mac_cpu - - paddlepaddle-1.5.1-cp27-cp27m-macosx_10_6_intel.whl - paddlepaddle-1.5.1-cp35-cp35m-macosx_10_6_intel.whl - paddlepaddle-1.5.1-cp36-cp36m-macosx_10_6_intel.whl - paddlepaddle-1.5.1-cp37-cp37m-macosx_10_6_intel.whl
-

- - - -

- -## **Multi-version whl package list - dev** - - -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Release Instruction cp27-cp27mu cp27-cp27m cp35-cp35m cp36-cp36m cp37-cp37m
cpu-mkl - paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl - paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl - paddlepaddle-latest-cp35-cp35m-linux_x86_64.whl - paddlepaddle-latest-cp36-cp36m-linux_x86_64.whl - paddlepaddle-latest-cp37-cp37m-linux_x86_64.whl
cpu-openblas - paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl - paddlepaddle-latest-cp35-cp35m-linux_x86_64.whl - paddlepaddle-latest-cp36-cp36m-linux_x86_64.whl - paddlepaddle-latest-cp37-cp37m-linux_x86_64.whl
cuda8-cudnn7-openblas paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-latest-cp35-cp35m-linux_x86_64.whl paddlepaddle_gpu-latest-cp36-cp36m-linux_x86_64.whl paddlepaddle_gpu-latest-cp37-cp37m-linux_x86_64.whl
cuda8-cudnn7-mkl paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-latest-cp35-cp35m-linux_x86_64.whl paddlepaddle_gpu-latest-cp36-cp36m-linux_x86_64.whl paddlepaddle_gpu-latest-cp37-cp37m-linux_x86_64.whl
cuda9-cudnn7-mkl paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-latest-cp35-cp35m-linux_x86_64.whl paddlepaddle_gpu-latest-cp36-cp36m-linux_x86_64.whl paddlepaddle_gpu-latest-cp37-cp37m-linux_x86_64.whl
cuda10_cudnn7-mkl paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-latest-cp35-cp35m-linux_x86_64.whl - paddlepaddle_gpu-latest-cp36-cp36m-linux_x86_64.whl - paddlepaddle_gpu-latest-cp37-cp37m-linux_x86_64.whl
-

- - -

- -## Execute the PaddlePaddle training program in Docker - -*** - -Suppose you have written a PaddlePaddle program in the current directory (such as /home/work): `train.py` ( refer to [PaddlePaddleBook](https://github.com/PaddlePaddle/book/blob/develop/01.fit_a_line/README.cn.md) to write), you can start the training with the following command: - - - cd /home/work - docker run -it -v $PWD:/work hub.baidubce.com/paddlepaddle/paddle /work/train.py - - -In the above commands, the `-it` parameter indicates that the container has been run interactively; `-v $PWD:/work` specifies that the current path (the absolute path where the PWD variable in Linux will expand to the current path) is mounted to the `:/work` directory inside the container: `Hub.baidubce.com/paddlepaddle/paddle` specifies the container to be used; finally `/work/train.py` is the command executed inside the container, ie. the training program. - -Of course, you can also enter into the Docker container and execute or debug your code interactively: - - - docker run -it -v $PWD:/work hub.baidubce.com/paddlepaddle/paddle /bin/bash - cd /work - python train.py - - -**Note: In order to reduce the size, vim is not installed in PaddlePaddle Docker image by default. You can edit the code in the container after executing ** `apt-get install -y vim` **(which installs vim for you) in the container.** - -

- -## Start PaddlePaddle Book tutorial with Docker - -*** - -Use Docker to quickly launch a local Jupyter Notebook containing the PaddlePaddle official Book tutorial, which can be viewed on the web. PaddlePaddle Book is an interactive Jupyter Notebook for users and developers. If you want to learn more about deep learning, PaddlePaddle Book is definitely your best choice. You can read tutorials or create and share interactive documents with code, formulas, charts, and text. - -We provide a Docker image that can run the PaddlePaddle Book directly, running directly: - -`docker run -p 8888:8888 hub.baidubce.com/paddlepaddle/book` - -Domestic users can use the following image source to speed up access: - -`docker run -p 8888:8888 hub.baidubce.com/paddlepaddle/book` - -Then enter the following URL in your browser: - -`http://localhost:8888/` - -It's that simple and bon voyage! For further questions, please refer to the [FAQ](#FAQ). - - -

-## Perform GPU training using Docker - -*** - -In order to ensure that the GPU driver works properly in the image, we recommend using [nvidia-docker](https://github.com/NVIDIA/nvidia-docker) to run the image. Don't forget to install the latest GPU drivers on your physical machine in advance. - -`Nvidia-docker run -it -v $PWD:/work hub.baidubce.com/paddlepaddle/paddle:latest-gpu /bin/bash` - -**Note: If you don't have nvidia-docker installed, you can try the following to mount the CUDA library and Linux devices into the Docker container:** - - - export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') \ - $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}')" - export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}') - docker run ${CUDA_SO} \ - ${DEVICES} -it hub.baidubce.com/paddlepaddle/paddle:latest-gpu - - - -**About AVX:** - -AVX is a set of CPU instructions that speeds up the calculation of PaddlePaddle. The latest PaddlePaddle Docker image is enabled by default for AVX compilation, so if your computer does not support AVX, you need to compile PaddlePaddle to no-avx version separately. - -The following instructions can check if the Linux computer supports AVX: - -`if cat /proc/cpuinfo | grep -i avx; then echo Yes; else echo No; fi` - -If the output is No, you need to choose a mirror that uses no-AVX. diff --git a/doc/fluid/design/mkldnn/inplace/images/inplace.svg b/doc/fluid/design/mkldnn/inplace/images/inplace.svg new file mode 100644 index 0000000000000000000000000000000000000000..69439d30643fd9b40e74498a3b94401413c5c181 --- /dev/null +++ b/doc/fluid/design/mkldnn/inplace/images/inplace.svg @@ -0,0 +1,120 @@ + + + + + + +G + + +cluster_0 + +in-placed + + + +e1 + +relu + + + +b + +b + + + +e1->b + + + + + +e2 + +elementwise_add + + + +e + +b + + + +e2->e + + + + + +e3 + +elementwise_mul + + + +g + +g + + + +e3->g + + + + + +a + +a + + + +a->e1 + + + + + +b->e2 + + + + + +e->e3 + + + + + +d + +d + + + +d->e2 + + + + + +f + +f + + + +f->e3 + + + + + diff --git a/doc/fluid/design/mkldnn/inplace/images/multi-output-inplace.svg b/doc/fluid/design/mkldnn/inplace/images/multi-output-inplace.svg new file mode 100644 index 0000000000000000000000000000000000000000..c046424e5197498689b3db1e776415fca32e5033 --- /dev/null +++ b/doc/fluid/design/mkldnn/inplace/images/multi-output-inplace.svg @@ -0,0 +1,265 @@ + + + + + + +G + + +cluster_before + +before + + +cluster_0 + +to be in-placed + + +cluster_after + +after + + +cluster_0b + +applied in-placed + + + +op1 + +elementwise_add + + + +c + +c + + + +op1->c + + + + + +op2 + +top_k +inputs_vars{c} + + + +d + +d + + + +op2->d + + + + + +e + +e + + + +op2->e + + + + + +op3 + +top_k +inputs_vars{c} + + + +g + +g + + + +op3->g + + + + + +h + +h + + + +op3->h + + + + + +c->op2 + + + + + +c->op3 + + + + + +a + +a + + + +a->op1 + + + + + +b + +b + + + +b->op1 + + + + + +op1b + +elementwise_add + + + +cb + +a + + + +op1b->cb + + + + + +op2b + +top_k +input_vars{a} + + + +db + +d + + + +op2b->db + + + + + +eb + +e + + + +op2b->eb + + + + + +op3b + +top_k +input_vars{a} + + + +gb + +g + + + +op3b->gb + + + + + +hb + +h + + + +op3b->hb + + + + + +cb->op2b + + + + + +cb->op3b + + + + + +ab + +a + + + +ab->op1b + + + + + +bb + +b + + + +bb->op1b + + + + + diff --git a/doc/fluid/design/mkldnn/inplace/images/unwanted-inplace.svg b/doc/fluid/design/mkldnn/inplace/images/unwanted-inplace.svg new file mode 100644 index 0000000000000000000000000000000000000000..2ee14e458f630021559996dc63209793cd7400a2 --- /dev/null +++ b/doc/fluid/design/mkldnn/inplace/images/unwanted-inplace.svg @@ -0,0 +1,74 @@ + + + + + + +G + + +cluster_0 + +in-placed + + + +e1 + +softmax +<oneDNN> + + + +c + +b + + + +e1->c + + + + + +e2 + +layer_norm +<Paddle CPU> + + + +e + +a + + + +e2->e + + + + + +c->e2 + + + + + +a + +a + + + +a->e1 + + + + + diff --git a/doc/fluid/design/mkldnn/inplace/index_en.rst b/doc/fluid/design/mkldnn/inplace/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..7c9c2f838e7f5138d4ed3e2157aa9c0a4de81f5d --- /dev/null +++ b/doc/fluid/design/mkldnn/inplace/index_en.rst @@ -0,0 +1,7 @@ +MKL-DNN IN-PLACE execution support +-------------------------------------- + +.. toctree:: + :maxdepth: 1 + + inplace.md diff --git a/doc/fluid/design/mkldnn/inplace/inplace.md b/doc/fluid/design/mkldnn/inplace/inplace.md new file mode 100644 index 0000000000000000000000000000000000000000..c0b86d80725e3b7ee319754189aa839be89dfe59 --- /dev/null +++ b/doc/fluid/design/mkldnn/inplace/inplace.md @@ -0,0 +1,97 @@ +## Introduction + +PaddlePaddle is implementing concept of in-place execution of some of operators. +The idea of in-place execution is present on following picture: + +![](images/inplace.svg) + +Exemplary graph presents three operators where one of them (type of elementwise_add) is to be performing in-place computation. In-place computation means that input variable (Tensor) is used for both input and output. This means that one of inputs will be overwritten with computational results. In presented picture in-place operator (elementwise_add) is +having two input nodes: *b* and *d* and output *b*. So *b* is used for input and output and underneath it is represented by a one, shared Tensor. So this means that variable *b* is initially holding some input data and after the operator computation, input data is lost and replaced by computation's result. + +Currently assumption is that if operator can have in-place processing then all its kernel (including oneDNN) should be able to work properly in in-place mode. To match this functionality oneDNN integration was extended to support in-place execution for some of its operators: +- activations +- softmax +- elementwise_add +- gelu* +- sum** + +Adventages of in-place computation are: +* lower memory usage +* improved performance of operators + +To have in-place computation, we need to analyze graph to search for where in-place execution could happen +and then make some of variables to be shared by input and output of in-place capable operator. + +Hence there are two parts of in-place support: +- in-place execution support within an operator +- oneDNN inplace C-API pass + +#### in-place execution support within an operator +For in-place execution, oneDNN primitive needs to have the same oneDNN memory object passed as input (src) and output (dst). More precisely, we check if pointers to allocated buffers are the same for input and output +and this indicates if we use one oneDNN memory object or two. For example: + +`auto src_memory_p = handler.AcquireSrcMemory(x);` + +`auto dst_memory_p = x->IsSharedBufferWith(*y) ? + src_memory_p : handler.AcquireDstMemory(y);` + +#### oneDNN in-place pass +As mentioned earlier, idea of in-place pass is to locate operators with oneDNN kerenels that can perform in-place execution and then modify output node's variables to match input node's variable of the operator. + +##### Identifying operators with oneDNN kernels capable of in-place execution +This identification is a result of two checks: +- Whether operator does have *inplaceInferer* structure +- Whether operator is on a list of oneDNN's in-place supported operators + +*InplaceInferer* is a struct that declares a mapping (one of inputs to one of outputs) indicating that +considered operator can perform in-place execution and both vars (mentioned input and output in *InplaceInferer*) will +share a tensor. This is not enough for oneDNN in-place C-API execution as oneDNN library may not provide in-place +computation for all required (to have in-place execution) operators of PaddlePaddle and some of operators would have to +simulate in-place computation through the external buffer which would not bring any benefits, so there is no point enabling those in-place computations for C-API inference. + +##### Restrictions +oneDNN in-place pass is taking advantage of graph pattern detector. So pattern consists of: +Node (Var 1) -> Node (oneDNN Op to be inplaced) -> Node (Var2) -> Node (next op - any type, oneDNN/native CPU - after in-placed one) -> Node (Var3) +Pattern is restricted so that in-placed to be op is of oneDNN type. Due to fact that some operators have +more than one input and their output may be consumed by more than one operator it is expected that pattern +maybe detected multiple times for the same operator e.g. once for one input, then for second input etc.. + +Just having oneDNN operator capable of in-place is not enough to have in-place execution enabled, hence follwing rules +are checked by oneDNN in-place pass: +1. If input node to in-place operator is also an input to different operator, then in-place computation cannot be performed, as there is a risk that other operator consuming in-placed op operator will be executed after in-placed operator and therefore get invalid input data (overwritten by in-place computation). +2. If after in-placed operator there is another operator that is reusing in-place op's input var then in-place cannot happen unless next op can perform in-place computation. Next picture presents the idea. + +![](images/unwanted-inplace.svg) + +In the picture we are seeing that in-place pass is considering to enable in-place execution for softmax oneDNN kernel. All is fine, but next operator after softmax is layer norm (non-oneDNN). Layer norm is already reusing input of softmax due to some earlier memory optimization pass being applied. If we make softmax op to perform in-place computation, then +it will also make layer norm to work in-place (b -> a). The thing is that layer norm cannot work in-place (InplaceInferer is not present), so if we force it do so layer norm will produce invalid result. + +##### In-place pass modification to graph when applied + +When sub-graph is aligned with restrictions then in-place computation can be enabled. This is done by: +1. Changing the name of output node of in-place op to be match input node of in-place op. +2. Renaming output var in output lists of node representing operator. +3. Changing the name of input var in next op inputs list. +4. If next Op is performing in-place computation then we need to updated next op's output as well not to break its + in-place computation. +5. if there are multiple operators after our in-place operator then we need to update all of them (their input vars). Idea is presented in the following picture: + +![](images/multi-output-inplace.svg) + +We can see that there are two *top_k* operators after *elementwise_add* operator that is set to work in-placed. Each of *top_k* is having its own list of input vars, so we need to rename relevant input var to new name. As in-place pattern +consists of: input node -> in-place op -> output node -> next op -> next op's output. For presented graph, there will be 8 patterns detected: +- b -> elementwise_add -> c -> top_k (left one) -> d +- b -> elementwise_add -> c -> top_k (left one) -> e +- b -> elementwise_add -> c -> top_k (right one) -> g +- b -> elementwise_add -> c -> top_k (right one) -> h +- a -> elementwise_add -> c -> top_k (left one) -> d +- a -> elementwise_add -> c -> top_k (left one) -> e +- a -> elementwise_add -> c -> top_k (right one) -> g +- a -> elementwise_add -> c -> top_k (right one) -> h + +Important thing is to remember original name of output, before it is renamed, so later we can +replace this original name in all of next op instances. + +\* oneDNN gelu kernel is able to perform in-place execution, but currently gelu op does not support in-place execution. + +\*\* sum kernel is using oneDNN sum primitive that does not provide in-place exection, so in-place computation is done faked through external buffer. So it was not added into oneDNN inplace pass. diff --git a/doc/fluid/design/mkldnn/inplace/scripts/inplace.dot b/doc/fluid/design/mkldnn/inplace/scripts/inplace.dot new file mode 100644 index 0000000000000000000000000000000000000000..7a5d22bb2922d6977e23c7dca30676d848180bd4 --- /dev/null +++ b/doc/fluid/design/mkldnn/inplace/scripts/inplace.dot @@ -0,0 +1,23 @@ +digraph G { + overlap=false + e1[label="relu"] + e2[label="elementwise_add"] + e3[label="elementwise_mul"] + + a -> e1 + e1 -> b + b[label="b"] + e[label="b"] + + subgraph cluster_0 { + label="in-placed" + b -> e2 + d -> e2 + e2 -> e + } + + + e -> e3 + f -> e3 -> g + +} diff --git a/doc/fluid/design/mkldnn/inplace/scripts/multi-output-inplace.dot b/doc/fluid/design/mkldnn/inplace/scripts/multi-output-inplace.dot new file mode 100644 index 0000000000000000000000000000000000000000..9778f5f18726422befc06125b1030facc7fb22a2 --- /dev/null +++ b/doc/fluid/design/mkldnn/inplace/scripts/multi-output-inplace.dot @@ -0,0 +1,63 @@ +digraph G { +subgraph cluster_before { + label="before" + style=dotted + op1[label="elementwise_add"] + op2[label="top_k\ninputs_vars{c}"] + op3[label="top_k\ninputs_vars{c}"] + + + c[label="c"] + + subgraph cluster_0 { + style=solid + label="to be in-placed" + a -> op1 + b-> op1 + op1 -> c + } + + + c -> op2 + c -> op3 + + op2 -> d + op2 -> e + op3 -> g + op3 -> h +} +subgraph cluster_after { + label="after" + style=dotted + op1b[label="elementwise_add"] + op2b[label="top_k\ninput_vars{a}"] + op3b[label="top_k\ninput_vars{a}"] + + + cb[label="a"] + ab[label="a"] + bb[label="b"] + db[label="d"] + eb[label="e"] + gb[label="g"] + hb[label="h"] + + subgraph cluster_0b { + style=solid + label="applied in-placed" + ab -> op1b + bb-> op1b + op1b -> cb + } + + + cb -> op2b + cb -> op3b + + op2b -> db + op2b -> eb + op3b -> gb + op3b -> hb +} + +} diff --git a/doc/fluid/design/mkldnn/inplace/scripts/unwanted-inplace.dot b/doc/fluid/design/mkldnn/inplace/scripts/unwanted-inplace.dot new file mode 100644 index 0000000000000000000000000000000000000000..a772736c31fb6a520c8e2761326550211a2edf54 --- /dev/null +++ b/doc/fluid/design/mkldnn/inplace/scripts/unwanted-inplace.dot @@ -0,0 +1,17 @@ +digraph G { + + e1[label="softmax\n"] + e2[label="layer_norm\n"] + + c[label="b"] + e[label="a"] + subgraph cluster_0 { + label="in-placed" + a -> e1 + e1 -> c + } + + c -> e2 + e2 -> e + +} diff --git a/doc/fluid/faq/install_cn.md b/doc/fluid/faq/install_cn.md index 2ee368b7b95e939097b80e7b55ebe8fd7a047077..b39873bc233a4d69e5b4eda1681561e7159a6a6c 100644 --- a/doc/fluid/faq/install_cn.md +++ b/doc/fluid/faq/install_cn.md @@ -230,7 +230,7 @@ MacOS本机直接通过源码编译的方式安装PaddlePaddle出现`[paddle/flu + 问题解答 -使用cmake版本为3.4则可。自行编译建议GCC版本:4.8、5.4以及更高。 +CMake我们支持3.10以上版本,但GPU编译时3.12/3.13/3.14版本存在官方[Bug](https://cmake.org/pipermail/cmake/2018-September/068195.html),我们建议您使用CMake3.16版本。自行编译建议GCC版本:4.8、5.4以及更高。 ##### Q: `wget: command not found` diff --git a/doc/fluid/index_cn.rst b/doc/fluid/index_cn.rst index b1c54fd7e9ae02a61e4cd05b9eb86f06b7545914..9241825ba1aade26582dc728f5978444c4989fd8 100644 --- a/doc/fluid/index_cn.rst +++ b/doc/fluid/index_cn.rst @@ -12,8 +12,6 @@ install/index_cn.rst beginners_guide/index_cn.rst - user_guides/index_cn.rst advanced_guide/index_cn.rst api_cn/index_cn.rst - faq/index_cn.rst release_note_cn.md diff --git a/doc/fluid/index_en.rst b/doc/fluid/index_en.rst index 40a4e96e5cd7d293b525f38e862c503915bc8510..706929c4866d64be1c881f762d5871a3926cd13c 100644 --- a/doc/fluid/index_en.rst +++ b/doc/fluid/index_en.rst @@ -6,8 +6,6 @@ install/index_en.rst beginners_guide/index_en.rst - user_guides/index_en.rst advanced_guide/index_en.rst api/index_en.rst - faq/index_en.rst release_note_en.md diff --git a/doc/fluid/install/Tables.md b/doc/fluid/install/Tables.md index 8c3a100d2e45bdded2a5f0f480a06b84ef105843..25969a18ad3cb7ec3b76fb0b214f9b088994bad6 100644 --- a/doc/fluid/install/Tables.md +++ b/doc/fluid/install/Tables.md @@ -5,106 +5,106 @@

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
依赖包名称 版本 说明 安装命令
CMake 3.4
GCC (Linux Only) 4.8 / 5.4 推荐使用CentOS的devtools2
Clang (MacOS Only) 9.0及以上 通常使用MacOS 10.11及以上的系统对应的Clang版本即可
Python(64 bit) 2.7.x. 或 3.5+.x 依赖libpython2.7.so 或 libpython3.5+.so apt install python-dev yum install python-devel 如果安装python3请访问Python 官网
SWIG 最低 2.0 apt install swig yum install swig
wget any apt install wget yum install wget
openblas any 可选
pip >=9.0.1 apt install python-pip yum install python-pip
numpy >=1.12.0 pip install numpy
protobuf >=3.1.0 pip install protobuf
wheel any pip install wheel
patchELF any apt install patchelf 或参见github patchELF 官方文档
go >=1.8 可选
setuptools >= 28.0.0
unrar brew install unrar (For MacOS), apt-get install unrar (For Ubuntu)
依赖包名称 版本 说明 安装命令
CMake 3.10, 3.11, 3.15, 3.16(推荐),3.17 3.12/3.13/3.14 版本存在官方Bug,请跳过该版本
GCC (Linux Only) 4.8 / 5.4 推荐使用CentOS的devtools2
Clang (MacOS Only) 9.0及以上 通常使用MacOS 10.11及以上的系统对应的Clang版本即可
Python(64 bit) 2.7.x. 或 3.5+.x 依赖libpython2.7.so 或 libpython3.5+.so apt install python-dev yum install python-devel 如果安装python3请访问Python 官网
SWIG 最低 2.0 apt install swig yum install swig
wget any apt install wget yum install wget
openblas any 可选
pip >=9.0.1 apt install python-pip yum install python-pip
numpy >=1.12.0 pip install numpy
protobuf >=3.1.0 pip install protobuf
wheel any pip install wheel
patchELF any apt install patchelf 或参见github patchELF 官方文档
go >=1.8 可选
setuptools >= 28.0.0
unrar brew install unrar (For MacOS), apt-get install unrar (For Ubuntu)

@@ -115,76 +115,71 @@

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
选项 说明 默认值
WITH_GPU 是否支持GPU ON
WITH_DSO 是否运行时动态加载CUDA动态库,而非静态加载CUDA动态库 ON
WITH_AVX 是否编译含有AVX指令集的PaddlePaddle二进制文件 ON
WITH_PYTHON 是否内嵌PYTHON解释器 ON
WITH_TESTING 是否开启单元测试 OFF
WITH_MKL 是否使用MKL数学库,如果为否则是用OpenBLAS ON
WITH_SYSTEM_BLAS 是否使用系统自带的BLAS OFF
WITH_DISTRIBUTE 是否编译带有分布式的版本 OFF
WITH_BRPC_RDMA 是否使用BRPC RDMA作为RPC协议 OFF
ON_INFER 是否打开预测优化 OFF
CUDA_ARCH_NAME 是否只针对当前CUDA架构编译 All:编译所有可支持的CUDA架构 可选:Auto 自动识别当前环境的架构编译
TENSORRT_ROOT 指定TensorRT路径 Windows下默认值为'/',Linux下默认值为 '/usr/'
选项 说明 默认值
WITH_GPU 是否支持GPU ON
WITH_AVX 是否编译含有AVX指令集的PaddlePaddle二进制文件 ON
WITH_PYTHON 是否内嵌PYTHON解释器 ON
WITH_TESTING 是否开启单元测试 OFF
WITH_MKL 是否使用MKL数学库,如果为否则是用OpenBLAS ON
WITH_SYSTEM_BLAS 是否使用系统自带的BLAS OFF
WITH_DISTRIBUTE 是否编译带有分布式的版本 OFF
WITH_BRPC_RDMA 是否使用BRPC RDMA作为RPC协议 OFF
ON_INFER 是否打开预测优化 OFF
CUDA_ARCH_NAME 是否只针对当前CUDA架构编译 All:编译所有可支持的CUDA架构 可选:Auto 自动识别当前环境的架构编译
TENSORRT_ROOT 指定TensorRT路径 Windows下默认值为'/',Linux下默认值为 '/usr/'

@@ -220,25 +215,21 @@ PaddePaddle通过编译时指定路径来实现引用各种BLAS/CUDA/cuDNN库。

- - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + +
版本号 版本说明
paddlepaddle==[版本号] 例如 paddlepaddle==1.7.0 只支持CPU对应版本的PaddlePaddle,具体版本请参见Pypi
paddlepaddle-gpu==[版本号] 例如 paddlepaddle-gpu==1.7.0 默认安装支持CUDA 10.0和cuDNN 7的对应[版本号]的PaddlePaddle安装包
paddlepaddle-gpu==[版本号].postXX 例如 paddlepaddle-gpu==1.7.0.post97 支持CUDA 9.0和cuDNN 7的对应PaddlePaddle版本的安装包
版本号 版本说明
paddlepaddle==[版本号] 例如 paddlepaddle==2.0.0a0 只支持CPU对应版本的PaddlePaddle,具体版本请参见Pypi
paddlepaddle-gpu==[版本号] 例如 paddlepaddle-gpu==2.0.0a0 默认安装支持CUDA 10.0和cuDNN 7的对应[版本号]的PaddlePaddle安装包

@@ -246,7 +237,7 @@ PaddePaddle通过编译时指定路径来实现引用各种BLAS/CUDA/cuDNN库。 您可以在 [Release History](https://pypi.org/project/paddlepaddle-gpu/#history) 中找到PaddlePaddle-gpu的各个发行版本。 > 其中`postXX` 对应的是CUDA和cuDNN的版本,`postXX`之前的数字代表Paddle的版本 -需要注意的是,命令中 paddlepaddle-gpu 在windows环境下,会默认安装支持CUDA 10.0和cuDNN 7的对应[版本号]的PaddlePaddle安装包 +需要注意的是,命令中 paddlepaddle-gpu==2.0.0a0 在windows环境下,会默认安装支持CUDA 10.0和cuDNN 7的对应[版本号]的PaddlePaddle安装包

@@ -255,140 +246,79 @@ PaddePaddle通过编译时指定路径来实现引用各种BLAS/CUDA/cuDNN库。

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
版本说明 cp27-cp27mu cp27-cp27m cp35-cp35m cp36-cp36m cp37-cp37m
cpu-mkl - paddlepaddle-1.7.0-cp27-cp27mu-linux_x86_64.whl - paddlepaddle-1.7.0-cp27-cp27m-linux_x86_64.whl - paddlepaddle-1.7.0-cp35-cp35m-linux_x86_64.whl - paddlepaddle-1.7.0-cp36-cp36m-linux_x86_64.whl - paddlepaddle-1.7.0-cp37-cp37m-linux_x86_64.whl
cpu-openblas - paddlepaddle-1.7.0-cp27-cp27mu-linux_x86_64.whl paddlepaddle-1.7.0-cp27-cp27m-linux_x86_64.whl - paddlepaddle-1.7.0-cp35-cp35m-linux_x86_64.whl - paddlepaddle-1.7.0-cp36-cp36m-linux_x86_64.whl - paddlepaddle-1.7.0-cp37-cp37m-linux_x86_64.whl
cuda9-cudnn7-avx-openblas paddlepaddle_gpu-1.7.0-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-1.7.0-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-1.7.0-cp35-cp35m-linux_x86_64.whl paddlepaddle_gpu-1.7.0-cp36-cp36m-linux_x86_64.whl paddlepaddle_gpu-1.7.0-cp37-cp37m-linux_x86_64.whl
cuda9-cudnn7-mkl paddlepaddle_gpu-1.7.0-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-1.7.0-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-1.7.0-cp35-cp35m-linux_x86_64.whl paddlepaddle_gpu-1.7.0-cp36-cp36m-linux_x86_64.whl paddlepaddle_gpu-1.7.0-cp37-cp37m-linux_x86_64.whl
cuda10_cudnn7-mkl paddlepaddle_gpu-1.7.0-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-1.7.0-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-1.7.0-cp35-cp35m-linux_x86_64.whl - paddlepaddle_gpu-1.7.0-cp36-cp36m-linux_x86_64.whl - paddlepaddle_gpu-1.7.0-cp37-cp37m-linux_x86_64.whl
win_cpu_mkl - - paddlepaddle-1.7.0-cp27-cp27m-win_amd64.whl - paddlepaddle-1.7.0-cp35-cp35m-win_amd64.whl - paddlepaddle-1.7.0-cp36-cp36m-win_amd64.whl - paddlepaddle-1.7.0-cp37-cp37m-win_amd64.whl
win_cuda9_cudnn7_mkl - - paddlepaddle_gpu-1.7.0-cp27-cp27m-win_amd64.whl - paddlepaddle_gpu-1.7.0-cp35-cp35m-win_amd64.whl - paddlepaddle_gpu-1.7.0-cp36-cp36m-win_amd64.whl - paddlepaddle_gpu-1.7.0-cp37-cp37m-win_amd64.whl
win_cuda10_cudnn7_mkl - - paddlepaddle_gpu-1.7.0-cp27-cp27m-win_amd64.whl - paddlepaddle_gpu-1.7.0-cp35-cp35m-win_amd64.whl - paddlepaddle_gpu-1.7.0-cp36-cp36m-win_amd64.whl - paddlepaddle_gpu-1.7.0-cp37-cp37m-win_amd64.whl
win_cpu_openblas - - paddlepaddle-1.7.0-cp27-cp27m-win_amd64.whl - paddlepaddle-1.7.0-cp35-cp35m-win_amd64.whl - paddlepaddle-1.7.0-cp36-cp36m-win_amd64.whl - paddlepaddle-1.7.0-cp37-cp37m-win_amd64.whl
win_cuda9_cudnn7_openblas - - paddlepaddle_gpu-1.7.0-cp27-cp27m-win_amd64.whl - paddlepaddle_gpu-1.7.0-cp35-cp35m-win_amd64.whl - paddlepaddle_gpu-1.7.0-cp36-cp36m-win_amd64.whl - paddlepaddle_gpu-1.7.0-cp37-cp37m-win_amd64.whl
mac_cpu - - paddlepaddle-1.7.0-cp27-cp27m-macosx_10_6_intel.whl - paddlepaddle-1.7.0-cp35-cp35m-macosx_10_6_intel.whl - paddlepaddle-1.7.0-cp36-cp36m-macosx_10_6_intel.whl - paddlepaddle-1.7.0-cp37-cp37m-macosx_10_6_intel.whl
版本说明 cp27-cp27mu cp27-cp27m cp35-cp35m cp36-cp36m cp37-cp37m
cpu-mkl + paddlepaddle-2.0.0a0-cp27-cp27mu-linux_x86_64.whl + paddlepaddle-2.0.0a0-cp27-cp27m-linux_x86_64.whl + paddlepaddle-2.0.0a0-cp35-cp35m-linux_x86_64.whl + paddlepaddle-2.0.0a0-cp36-cp36m-linux_x86_64.whl + paddlepaddle-2.0.0a0-cp37-cp37m-linux_x86_64.whl
cuda10_cudnn7-mkl + paddlepaddle_gpu-2.0.0a0-cp27-cp27mu-linux_x86_64.whl + paddlepaddle_gpu-2.0.0a0-cp27-cp27m-linux_x86_64.whl + paddlepaddle_gpu-2.0.0a0-cp35-cp35m-linux_x86_64.whl + paddlepaddle_gpu-2.0.0a0-cp36-cp36m-linux_x86_64.whl + paddlepaddle_gpu-2.0.0a0-cp37-cp37m-linux_x86_64.whl
win_cpu_mkl - + paddlepaddle-2.0.0a0-cp27-cp27m-win_amd64.whl + paddlepaddle-2.0.0a0-cp35-cp35m-win_amd64.whl + paddlepaddle-2.0.0a0-cp36-cp36m-win_amd64.whl + paddlepaddle-2.0.0a0-cp37-cp37m-win_amd64.whl
win_cuda10_cudnn7_mkl - + paddlepaddle_gpu-2.0.0a0-cp27-cp27m-win_amd64.whl + paddlepaddle_gpu-2.0.0a0-cp35-cp35m-win_amd64.whl + paddlepaddle_gpu-2.0.0a0-cp36-cp36m-win_amd64.whl + paddlepaddle_gpu-2.0.0a0-cp37-cp37m-win_amd64.whl
mac_cpu - + paddlepaddle-2.0.0a0-cp27-cp27m-macosx_10_6_intel.whl + paddlepaddle-2.0.0a0-cp35-cp35m-macosx_10_6_intel.whl + paddlepaddle-2.0.0a0-cp36-cp36m-macosx_10_6_intel.whl + paddlepaddle-2.0.0a0-cp37-cp37m-macosx_10_6_intel.whl

@@ -399,12 +329,6 @@ PaddePaddle通过编译时指定路径来实现引用各种BLAS/CUDA/cuDNN库。 cpu-mkl: 支持CPU训练和预测,使用Intel mkl数学库 -cpu-openblas: 支持CPU训练和预测,使用openblas数学库 - -cuda9-cudnn7-openblas: 支持GPU训练和预测,使用openblas数学库 - -cuda9_cudnn7-mkl: 支持GPU训练和预测,使用Intel mkl数学库 - cuda10_cudnn7-mkl: 支持GPU训练和预测,使用Intel mkl数学库 @@ -437,106 +361,108 @@ platform tag: 类似 'linux_x86_64', 'any' ## **多版本whl包列表-dev**

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
版本说明 cp27-cp27mu cp27-cp27m cp35-cp35m cp36-cp36m cp37-cp37m
cpu-mkl - paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl - paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl - paddlepaddle-latest-cp35-cp35m-linux_x86_64.whl - paddlepaddle-latest-cp36-cp36m-linux_x86_64.whl - paddlepaddle-latest-cp37-cp37m-linux_x86_64.whl
cpu-openblas - paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl - paddlepaddle-latest-cp35-cp35m-linux_x86_64.whl - paddlepaddle-latest-cp36-cp36m-linux_x86_64.whl - paddlepaddle-latest-cp37-cp37m-linux_x86_64.whl
cuda9-cudnn7-openblas paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-latest-cp35-cp35m-linux_x86_64.whl paddlepaddle_gpu-latest-cp36-cp36m-linux_x86_64.whl paddlepaddle_gpu-latest-cp37-cp37m-linux_x86_64.whl
cuda9-cudnn7-mkl paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-latest-cp35-cp35m-linux_x86_64.whl paddlepaddle_gpu-latest-cp36-cp36m-linux_x86_64.whl paddlepaddle_gpu-latest-cp37-cp37m-linux_x86_64.whl
cuda10-cudnn7-mkl paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-latest-cp35-cp35m-linux_x86_64.whl - paddlepaddle_gpu-latest-cp36-cp36m-linux_x86_64.whl - paddlepaddle_gpu-latest-cp37-cp37m-linux_x86_64.whl
版本说明 cp27-cp27mu cp27-cp27m cp35-cp35m cp36-cp36m cp37-cp37m
cpu-mkl + paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl + paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl + paddlepaddle-latest-cp35-cp35m-linux_x86_64.whl + paddlepaddle-latest-cp36-cp36m-linux_x86_64.whl + paddlepaddle-latest-cp37-cp37m-linux_x86_64.whl
cpu-openblas + paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl + paddlepaddle-latest-cp35-cp35m-linux_x86_64.whl + paddlepaddle-latest-cp36-cp36m-linux_x86_64.whl + paddlepaddle-latest-cp37-cp37m-linux_x86_64.whl
cuda9-cudnn7-openblas paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-latest-cp35-cp35m-linux_x86_64.whl paddlepaddle_gpu-latest-cp36-cp36m-linux_x86_64.whl paddlepaddle_gpu-latest-cp37-cp37m-linux_x86_64.whl
cuda9-cudnn7-mkl paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-latest-cp35-cp35m-linux_x86_64.whl paddlepaddle_gpu-latest-cp36-cp36m-linux_x86_64.whl paddlepaddle_gpu-latest-cp37-cp37m-linux_x86_64.whl
cuda10-cudnn7-mkl paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-latest-cp35-cp35m-linux_x86_64.whl + paddlepaddle_gpu-latest-cp36-cp36m-linux_x86_64.whl + paddlepaddle_gpu-latest-cp37-cp37m-linux_x86_64.whl

- +

-## **多版本whl包列表(gcc8.2)-release** +## **多版本whl包列表(gcc8.2)-develop**

- - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + +
版本说明 cp27-cp27mu cp27-cp27m cp35-cp35m cp36-cp36m cp37-cp37m
cuda10.1-cudnn7-mkl - paddlepaddle_gpu-1.7.0.gcc8.post107-cp27-cp27m-linux_x86_64.whl - paddlepaddle_gpu-1.7.0.gcc8.post107-cp27-cp27mu-linux_x86_64.whl - paddlepaddle_gpu-1.7.0.gcc8.post107-cp35-cp35m-linux_x86_64.whl - paddlepaddle_gpu-1.7.0.gcc8.post107-cp36-cp36m-linux_x86_64.whl - paddlepaddle_gpu-1.7.0.gcc8.post107-cp37-cp37m-linux_x86_64.whl
版本说明 cp27-cp27mu cp27-cp27m cp35-cp35m cp36-cp36m cp37-cp37m
cuda10.1-cudnn7-mkl + paddlepaddle_gpu-0.0.0-cp27-cp27mu-linux_x86_64.whl + paddlepaddle_gpu-0.0.0-cp27-cp27m-linux_x86_64.whl + paddlepaddle_gpu-0.0.0-cp35-cp35m-linux_x86_64.whl + paddlepaddle_gpu-0.0.0-cp36-cp36m-linux_x86_64.whl + paddlepaddle_gpu-0.0.0-cp37-cp37m-linux_x86_64.whl

+ +

@@ -604,4 +530,3 @@ PaddlePaddle Book是为用户和开发者制作的一个交互式的Jupyter Note export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}') docker run ${CUDA_SO} \ ${DEVICES} -it hub.baidubce.com/paddlepaddle/paddle:latest-gpu - diff --git a/doc/fluid/install/Tables_en.md b/doc/fluid/install/Tables_en.md index a1b1d851f61fd5bf4f8104c75aca5f105e10569d..ec27275a1b43187c5867001ceeb8cbceec94c928 100644 --- a/doc/fluid/install/Tables_en.md +++ b/doc/fluid/install/Tables_en.md @@ -1,4 +1,3 @@ -*** # Appendix @@ -18,8 +17,8 @@ CMake - 3.4 - + 3.10, 3.11, 3.15, 3.16(Recommend),3.17 + There is an official bug in version 3.12/3.13/3.14, please skip this version @@ -27,12 +26,18 @@ 4.8 / 5.4 recommends using devtools2 for CentOS + + + Clang (MacOS Only) + 9.0 and above + Usually use the clang version of MacOS 10.11 and above + - Python - 2.7.x. - depends on libpython2.7.so - apt install python-dev or yum install python-devel + Python(64 bit) + 2.7.x. or 3.5+.x + depends on libpython2.7.so or libpython3.5+.so + apt install python-dev or yum install python-devel if installing python3, please go to Python official website SWIG @@ -49,7 +54,7 @@ openblas any - + optional @@ -62,13 +67,13 @@ numpy >=1.12.0 - pip install numpy==1.14.0 + pip install numpy protobuf - 3.1.0 + >=3.1.0 - pip install protobuf==3.1.0 + pip install protobuf wheel @@ -88,12 +93,23 @@ optional + + setuptools + >= 28.0.0 + + + + + unrar + + + brew install unrar (For MacOS), apt-get install unrar (For Ubuntu) +

-***

## **Compile Option Table** @@ -113,21 +129,6 @@ Whether to support GPU ON - - WITH_C_API - Whether to compile CAPI - OFF - - - WITH_DOUBLE - Whether to use double precision floating point numeber - OFF - - - WITH_DSO - whether to load CUDA dynamic libraries dynamically at runtime, instead of statically loading CUDA dynamic libraries. - ON - WITH_AVX whether to compile PaddlePaddle binaries file containing the AVX instruction set @@ -138,30 +139,11 @@ Whether the PYTHON interpreter is embedded ON - - WITH_STYLE_CHECK - Whether to perform code style checking at compile time - ON - WITH_TESTING Whether to turn on unit test OFF - - WITH_DOC - Whether to compile Chinese and English documents - OFF - - - WITH_SWIG_PY - Whether to compile PYTHON's SWIG interface, which can be used for predicting and customizing training - Auto - - WITH_GOLANG - Whether to compile the fault-tolerant parameter server of the go language - OFF - WITH_MKL Whether to use the MKL math library, if not,using OpenBLAS @@ -177,11 +159,6 @@ Whether to Compile with distributed version OFF - - WITH_RDMA - Whether to compile the relevant parts that supports RDMA - OFF - WITH_BRPC_RDMA Whether to use BRPC RDMA as RPC protocol @@ -193,13 +170,18 @@ OFF - DWITH_ANAKIN - Whether to Compile ANAKIN - OFF + + CUDA_ARCH_NAME + Compile only for current CUDA schema or not + All:Compile all supported CUDA architectures optional: Auto automatically recognizes the schema compilation of the current environment + + + + TENSORRT_ROOT + Specify TensorRT path + The default value under windows is '/', The default value under windows is '/usr/' - - -

+ **BLAS** @@ -223,7 +205,6 @@ PaddePaddle implements references to various BLAS/CUDA/cuDNN libraries by specif **Note**: The settings introduced here for these compilation options are only valid for the first cmake. If you want to reset it later, it is recommended to clean up the entire build directory ( rm -rf ) and then specify it. -***

## **Installation Package List** @@ -239,71 +220,29 @@ PaddePaddle implements references to various BLAS/CUDA/cuDNN libraries by specif - paddlepaddle==[version code] such as paddlepaddle==1.5.1 + paddlepaddle==[version code] such as paddlepaddle==2.0.0a0 Only support the corresponding version of the CPU PaddlePaddle, please refer to Pypi for the specific version. - paddlepaddle-gpu==1.5.1 - Using version 1.5.1 compiled with CUDA 9.0 and cuDNN 7 - - - paddlepaddle-gpu==1.5.1.post87 - Using version 1.5.1 compiled with CUDA 8.0 and cuDNN 7 + paddlepaddle-gpu==[version code], such as paddlepaddle-gpu==2.0.0a0 + The default installation supports the PaddlePaddle installation package corresponding to [version number] of CUDA 10.0 and cuDNN 7

- - You can find various distributions of PaddlePaddle-gpu in [the Release History](https://pypi.org/project/paddlepaddle-gpu/#history). +> 'postxx' corresponds to CUDA and cuDNN versions, and the number before 'postxx' represents the version of Paddle -Please note that: paddlepaddle-gpu in windows, will download package compiled with CUDA 8.0 and cuDNN 7 +Please note that: in the commands, paddlepaddle-gpu==2.0.0a0 will install the installation package of PaddlePaddle that supports CUDA 10.0 and cuDNN 7 by default under Windows environment. -*** - -

-## Installation Mirrors and Introduction -

- - - - - - - - - - - - - - - - - - - - - - - - - -
Version Number Release Description
hub.baidubce.com/paddlepaddle/paddle:latest The latest pre-installed image of the PaddlePaddle CPU version
hub.baidubce.com/paddlepaddle/paddle:latest-dev The latest PaddlePaddle development environment
hub.baidubce.com/paddlepaddle/paddle:[Version] Replace version with a specific version, preinstalled PaddlePaddle image in historical version
hub.baidubce.com/paddlepaddle/paddle:latest-gpu The latest pre-installed image of the PaddlePaddle GPU version
-

- - - -You can find the docker image for each release of PaddlePaddle in the [DockerHub](https://hub.docker.com/r/paddlepaddle/paddle/tags/). - -***

## **Multi-version whl package list - Release** +

@@ -319,127 +258,115 @@ You can find the docker image for each release of PaddlePaddle in the [DockerHub - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + - - - - - - - - + + + + + + + + - - - - + + + + - + - - - - + + + + - - - - - - - - - - - - + + + + - +
cpu-mkl - paddlepaddle-1.5.1-cp27-cp27mu-linux_x86_64.whl - paddlepaddle-1.5.1-cp27-cp27m-linux_x86_64.whl - paddlepaddle-1.5.1-cp35-cp35m-linux_x86_64.whl - paddlepaddle-1.5.1-cp36-cp36m-linux_x86_64.whl - paddlepaddle-1.5.1-cp37-cp37m-linux_x86_64.whl
cpu-openblas - paddlepaddle-1.5.1-cp27-cp27mu-linux_x86_64.whl paddlepaddle-1.5.1-cp27-cp27m-linux_x86_64.whl - paddlepaddle-1.5.1-cp35-cp35m-linux_x86_64.whl - paddlepaddle-1.5.1-cp36-cp36m-linux_x86_64.whl - paddlepaddle-1.5.1-cp37-cp37m-linux_x86_64.whl
cuda8-cudnn7-openblas paddlepaddle_gpu-1.5.1-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp35-cp35m-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp36-cp36m-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp37-cp37m-linux_x86_64.whl
cuda8-cudnn7-mkl paddlepaddle_gpu-1.5.1-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp35-cp35m-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp36-cp36m-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp37-cp37m-linux_x86_64.whl
cuda9-cudnn7-mkl paddlepaddle_gpu-1.5.1-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp35-cp35m-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp36-cp36m-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp37-cp37m-linux_x86_64.whl + paddlepaddle-2.0.0a0-cp27-cp27mu-linux_x86_64.whl + paddlepaddle-2.0.0a0-cp27-cp27m-linux_x86_64.whl + paddlepaddle-2.0.0a0-cp35-cp35m-linux_x86_64.whl + paddlepaddle-2.0.0a0-cp36-cp36m-linux_x86_64.whl + paddlepaddle-2.0.0a0-cp37-cp37m-linux_x86_64.whl
cuda10_cudnn7-mkl paddlepaddle_gpu-1.5.1-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-1.5.1-cp35-cp35m-linux_x86_64.whl - paddlepaddle_gpu-1.5.1-cp36-cp36m-linux_x86_64.whl - paddlepaddle_gpu-1.5.1-cp37-cp37m-linux_x86_64.whl
win_cpu_openblas + paddlepaddle_gpu-2.0.0a0-cp27-cp27mu-linux_x86_64.whl + paddlepaddle_gpu-2.0.0a0-cp27-cp27m-linux_x86_64.whl + paddlepaddle_gpu-2.0.0a0-cp35-cp35m-linux_x86_64.whl + paddlepaddle_gpu-2.0.0a0-cp36-cp36m-linux_x86_64.whl + paddlepaddle_gpu-2.0.0a0-cp37-cp37m-linux_x86_64.whl
win_cpu_mkl - - paddlepaddle-1.5.1-cp27-cp27m-win_amd64.whl - paddlepaddle-1.5.1-cp35-cp35m-win_amd64.whl - paddlepaddle-1.5.1-cp36-cp36m-win_amd64.whl - paddlepaddle-1.5.1-cp37-cp37m-win_amd64.whl + paddlepaddle-2.0.0a0-cp27-cp27m-win_amd64.whl + paddlepaddle-2.0.0a0-cp35-cp35m-win_amd64.whl + paddlepaddle-2.0.0a0-cp36-cp36m-win_amd64.whl + paddlepaddle-2.0.0a0-cp37-cp37m-win_amd64.whl
win_cuda8_cudnn7_openblas win_cuda10_cudnn7_mkl - - paddlepaddle_gpu-1.5.1-cp27-cp27m-win_amd64.whl - paddlepaddle_gpu-1.5.1-cp35-cp35m-win_amd64.whl - paddlepaddle_gpu-1.5.1-cp36-cp36m-win_amd64.whl - paddlepaddle_gpu-1.5.1-cp37-cp37m-win_amd64.whl + paddlepaddle_gpu-2.0.0a0-cp27-cp27m-win_amd64.whl + paddlepaddle_gpu-2.0.0a0-cp35-cp35m-win_amd64.whl + paddlepaddle_gpu-2.0.0a0-cp36-cp36m-win_amd64.whl + paddlepaddle_gpu-2.0.0a0-cp37-cp37m-win_amd64.whl
win_cuda9_cudnn7_openblas - - paddlepaddle_gpu-1.5.1-cp27-cp27m-win_amd64.whl - paddlepaddle_gpu-1.5.1-cp35-cp35m-win_amd64.whl - paddlepaddle_gpu-1.5.1-cp36-cp36m-win_amd64.whl - paddlepaddle_gpu-1.5.1-cp37-cp37m-win_amd64.whl
mac_cpu - - paddlepaddle-1.5.1-cp27-cp27m-macosx_10_6_intel.whl - paddlepaddle-1.5.1-cp35-cp35m-macosx_10_6_intel.whl - paddlepaddle-1.5.1-cp36-cp36m-macosx_10_6_intel.whl - paddlepaddle-1.5.1-cp37-cp37m-macosx_10_6_intel.whl + paddlepaddle-2.0.0a0-cp27-cp27m-macosx_10_6_intel.whl + paddlepaddle-2.0.0a0-cp35-cp35m-macosx_10_6_intel.whl + paddlepaddle-2.0.0a0-cp36-cp36m-macosx_10_6_intel.whl + paddlepaddle-2.0.0a0-cp37-cp37m-macosx_10_6_intel.whl

- -

+### Table instruction -## **Multi-version whl package list - dev** +- Vertical axis + +cpu-mkl: Support CPU training and prediction, use Intel MKL math library + +cuda10_cudnn7-mkl: Support GPU training and prediction, use Intel MKL math library + + +- Transverse axis + +Generally, it is similar to "cp27-cp27mu", in which: + +27:python tag, refers to python2. Similarly, there are "35", "36", "37", etc + +mu:refers to unicode version python, if it is m, refers to non Unicode version Python + +- Installation package naming rules +Each installation package has a unique name. They are named according to the official rules of Python. The form is as follows: +{distribution}-{version}(-{build tag})?-{python tag}-{abi tag}-{platform tag}.whl + +The build tag can be missing, and other parts cannot be missing + +distribution: wheel name + +version: Version, for example 0.14.0 (must be in numeric format) + +python tag: similar to 'py27', 'py2', 'py3', used to indicate the corresponding Python version + +abi tag: similar to 'cp33m', 'abi3', 'none' + +platform tag: similar to 'linux_x86_64', 'any' + + + +

+## **Multi-version whl package list - dev**

- + @@ -450,73 +377,101 @@ You can find the docker image for each release of PaddlePaddle in the [DockerHub - - - - - - - - + - - - - - - - - + + + + + + - - - - - - + + + + + + + + + + + + + + + +
Release Instruction version number cp27-cp27mu cp27-cp27m cp35-cp35m
cpu-mkl + paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl + paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl + paddlepaddle-latest-cp35-cp35m-linux_x86_64.whl + paddlepaddle-latest-cp36-cp36m-linux_x86_64.whl + paddlepaddle-latest-cp37-cp37m-linux_x86_64.whl
cpu-openblas + paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl + paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl paddlepaddle-latest-cp35-cp35m-linux_x86_64.whl + paddlepaddle-latest-cp36-cp36m-linux_x86_64.whl + paddlepaddle-latest-cp37-cp37m-linux_x86_64.whl
cuda8-cudnn7-openblas paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-latest-cp35-cp35m-linux_x86_64.whl paddlepaddle_gpu-latest-cp36-cp36m-linux_x86_64.whl paddlepaddle_gpu-latest-cp37-cp37m-linux_x86_64.whl cuda9-cudnn7-openblas paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-latest-cp35-cp35m-linux_x86_64.whl paddlepaddle_gpu-latest-cp36-cp36m-linux_x86_64.whl paddlepaddle_gpu-latest-cp37-cp37m-linux_x86_64.whl
cuda8-cudnn7-mkl paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-latest-cp35-cp35m-linux_x86_64.whl paddlepaddle_gpu-latest-cp36-cp36m-linux_x86_64.whl paddlepaddle_gpu-latest-cp37-cp37m-linux_x86_64.whl cuda9-cudnn7-mkl paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-latest-cp35-cp35m-linux_x86_64.whl paddlepaddle_gpu-latest-cp36-cp36m-linux_x86_64.whl paddlepaddle_gpu-latest-cp37-cp37m-linux_x86_64.whl
cuda10-cudnn7-mkl paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-latest-cp35-cp35m-linux_x86_64.whl + paddlepaddle_gpu-latest-cp36-cp36m-linux_x86_64.whl + paddlepaddle_gpu-latest-cp37-cp37m-linux_x86_64.whl
+

+ + + +

+## **Multi-version whl package list(gcc8.2)-develop** +

+ + - - - - - - + + + + + + + + - - - - - - + + + + + +
cuda9-cudnn7-mkl paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-latest-cp35-cp35m-linux_x86_64.whl paddlepaddle_gpu-latest-cp36-cp36m-linux_x86_64.whl paddlepaddle_gpu-latest-cp37-cp37m-linux_x86_64.whl 版本说明 cp27-cp27mu cp27-cp27m cp35-cp35m cp36-cp36m cp37-cp37m
cuda10_cudnn7-mkl paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-latest-cp35-cp35m-linux_x86_64.whl - paddlepaddle_gpu-latest-cp36-cp36m-linux_x86_64.whl - paddlepaddle_gpu-latest-cp37-cp37m-linux_x86_64.whl cuda10.1-cudnn7-mkl + paddlepaddle_gpu-0.0.0-cp27-cp27mu-linux_x86_64.whl + paddlepaddle_gpu-0.0.0-cp27-cp27m-linux_x86_64.whl + paddlepaddle_gpu-0.0.0-cp35-cp35m-linux_x86_64.whl + paddlepaddle_gpu-0.0.0-cp36-cp36m-linux_x86_64.whl + paddlepaddle_gpu-0.0.0-cp37-cp37m-linux_x86_64.whl

+ +

+ ## Execute the PaddlePaddle training program in Docker -*** Suppose you have written a PaddlePaddle program in the current directory (such as /home/work): `train.py` ( refer to [PaddlePaddleBook](https://github.com/PaddlePaddle/book/blob/develop/01.fit_a_line/README.cn.md) to write), you can start the training with the following command: @@ -541,7 +496,6 @@ Of course, you can also enter into the Docker container and execute or debug you ## Start PaddlePaddle Book tutorial with Docker -*** Use Docker to quickly launch a local Jupyter Notebook containing the PaddlePaddle official Book tutorial, which can be viewed on the web. PaddlePaddle Book is an interactive Jupyter Notebook for users and developers. If you want to learn more about deep learning, PaddlePaddle Book is definitely your best choice. You can read tutorials or create and share interactive documents with code, formulas, charts, and text. @@ -563,7 +517,6 @@ It's that simple and bon voyage! For further questions, please refer to the [FAQ

## Perform GPU training using Docker -*** In order to ensure that the GPU driver works properly in the image, we recommend using [nvidia-docker](https://github.com/NVIDIA/nvidia-docker) to run the image. Don't forget to install the latest GPU drivers on your physical machine in advance. @@ -577,15 +530,3 @@ In order to ensure that the GPU driver works properly in the image, we recommend export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}') docker run ${CUDA_SO} \ ${DEVICES} -it hub.baidubce.com/paddlepaddle/paddle:latest-gpu - - - -**About AVX:** - -AVX is a set of CPU instructions that speeds up the calculation of PaddlePaddle. The latest PaddlePaddle Docker image is enabled by default for AVX compilation, so if your computer does not support AVX, you need to compile PaddlePaddle to no-avx version separately. - -The following instructions can check if the Linux computer supports AVX: - -`if cat /proc/cpuinfo | grep -i avx; then echo Yes; else echo No; fi` - -If the output is No, you need to choose a mirror that uses no-AVX. diff --git a/doc/fluid/install/compile/compile_CentOS.md b/doc/fluid/install/compile/compile_CentOS.md index 74b8a05f7ff2232dec6a3879f8e96a58c83b16fa..d9a434e79fd30c2b9ad6677322a14d010d81132c 100644 --- a/doc/fluid/install/compile/compile_CentOS.md +++ b/doc/fluid/install/compile/compile_CentOS.md @@ -25,7 +25,6 @@ wget http://developer.download.nvidia.com/compute/machine-learning/repos/rhel7/x86_64/nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm rpm -i nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm - sudo apt-get install -y libnccl2=2.3.7-1+cuda9.0 libnccl-dev=2.3.7-1+cuda9.0 yum update -y yum install -y libnccl-2.3.7-2+cuda9.0 libnccl-devel-2.3.7-2+cuda9.0 libnccl-static-2.3.7-2+cuda9.0 @@ -89,7 +88,7 @@ > -it 与宿主机保持交互状态,`hub.baidubce.com/paddlepaddle/paddle:latest-dev` 使用名为`hub.baidubce.com/paddlepaddle/paddle:latest-dev`的镜像创建Docker容器,/bin/bash 进入容器后启动/bin/bash命令。 - > 注意:hub.baidubce.com/paddlepaddle/paddle:latest-dev内部安装CUDA 8.0。 + > 注意:hub.baidubce.com/paddlepaddle/paddle:latest-dev内部安装CUDA 10.0。 4. 进入Docker后进入paddle目录下: @@ -119,7 +118,7 @@ > 安装protobuf。 - `apt install patchelf` + `yum install patchelf` > 安装patchelf,PatchELF 是一个小而实用的程序,用于修改ELF可执行文件的动态链接器和RPATH。 @@ -153,7 +152,7 @@ 恭喜,至此您已完成PaddlePaddle的编译安装。您只需要进入Docker容器后运行PaddlePaddle,即可开始使用。更多Docker使用请参见[Docker官方文档](https://docs.docker.com) -> 注:PaddlePaddle Docker镜像为了减小体积,默认没有安装`vim`,您可以在容器中执行 `apt-get install -y vim` 来安装 +> 注:PaddlePaddle Docker镜像为了减小体积,默认没有安装`vim`,您可以在容器中执行 `yum install -y vim` 来安装 ### **本机编译** @@ -206,7 +205,7 @@ * 这里特别提供`patchELF`的安装方法,其他的依赖可以使用`yum install`或者`pip install`/`pip3 install` 后跟依赖名称和版本安装: `yum install patchelf` - > 不能使用apt安装的用户请参见patchElF github[官方文档](https://gist.github.com/ruario/80fefd174b3395d34c14) + > 不能使用yum安装的用户请参见patchElF github[官方文档](https://gist.github.com/ruario/80fefd174b3395d34c14) 7. 将PaddlePaddle的源码clone在当下目录下的Paddle的文件夹中,并进入Padde目录下: diff --git a/doc/fluid/install/compile/compile_CentOS_en.md b/doc/fluid/install/compile/compile_CentOS_en.md index 564ac3d4bdffbee586cd8f1c7f5748e4b82e39f8..1d92dc4a472546728ee1a802c5451193c9ab66fa 100644 --- a/doc/fluid/install/compile/compile_CentOS_en.md +++ b/doc/fluid/install/compile/compile_CentOS_en.md @@ -1,31 +1,53 @@ -*** # **Compile on CentOS from Source Code** -This instruction will show you how to compile PaddlePaddle on a 64-bit desktop or laptop and CentOS. The Centos systems we support must meet the following requirements: +## Environment preparation -* CentOS 7 / 6 (this involves whether the related tools can be installed normally) +* **CentOS version (64 bit)** + * **CentOS 6 (not recommended, no official support for compilation problems)** + * **CentOS 7 (GPU version supports CUDA 9.0/9.1/9.2/10.0/10.1 CUDA 9.1, only support single-card mode)** +* **Python version 2.7.15+/3.5.1+/3.6/3.7 (64 bit)** +* **pip or pip3 version 9.0.1+ (64 bit)** -## Determine which version to compile +## Choose CPU/GPU -* **Only PaddlePaddle for CPU is supported.** +* If your computer doesn't have NVIDIA® GPU, please install CPU version of PaddlePaddle -## Choose a compilation method +* If your computer has NVIDIA® GPU, and the following conditions are met,GPU version of PaddlePaddle is recommended. -We provide two compilation methods under the CentOS system: + * **CUDA toolkit 10.0 with cuDNN v7.3+(for multi card support, NCCL2.3.7 or higher)** + * **CUDA toolkit 9.0 with cuDNN v7.3+(for multi card support, NCCL2.3.7 or higher)** + * **Hardware devices with GPU computing power over 1.0** -* Docker source code compilation (the CentOS 6 / 7 GPU version is not supported) (this image already contains python2.7, python3.6, python3.7 environment) -* Direct native source code compilation (does not support all versions of CentOS 6 and GPU versions of CentOS 7) + You can refer to NVIDIA official documents for installation process and configuration method of CUDA and cudnn. Please refer to[CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/),[cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/) -We recommend using **Docker for compilation** because we are installing both the tools and the configuration in a Docker image. This way, if you encounter problems, others can reproduce the problem to help. In addition, for developers accustomed to using Windows and MacOS, there is no need to configure a cross-compilation environment using Docker. It should be emphasized that Docker does not virtualize any hardware. The compiler tools running in the Docker container are actually running directly on the native CPU and operating system. The performance is the same as installing the compiler on the machine. +* * If you need to use multi card environment, please make sure that you have installed nccl2 correctly, or install nccl2 according to the following instructions (here is the installation instructions of nccl2 under ubuntu 16.04, CUDA9 and cuDNN7). For more version of installation information, please refer to NVIDIA[official website](https://developer.nvidia.com/nccl): -Also for those who can't install Docker for a variety of reasons, we also provide a way to **compile directly from sources**, but since the situation on host machine is more complicated, we only support specific systems. + wget http://developer.download.nvidia.com/compute/machine-learning/repos/rhel7/x86_64/nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm + rpm -i nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm + yum update -y + yum install -y libnccl-2.3.7-2+cuda9.0 libnccl-devel-2.3.7-2+cuda9.0 libnccl-static-2.3.7-2+cuda9.0 -### ***Compile with Docker*** -In order to better use Docker and avoid problems, we recommend using **the highest version of Docker**. For details on **installing and using Docker**, please refer to the [official Docker documentation](https://docs.docker.com/install/). +## Installation steps -Once you have **properly installed Docker**, you can start **compiling PaddlePaddle with Docker**: +There are two compilation methods under CentOS system: + +* Compile with Docker(GPU version only supports CentOS 7) +* Local compilation (no official support for compilation problems under CentOS 6) + + +### **Compile with Docker** + +[Docker](https://docs.docker.com/install/) is an open source application container engine. Using docker, you can not only isolate the installation and use of paddlepaddle from the system environment, but also share GPU, network and other resources with the host + +Compiling PaddlePaddle with Docker,you need: + +- On the local host [Install Docker](https://hub.docker.com/search/?type=edition&offering=community) + +- To enable GPU support on Linux, please [Install nvidia-docker](https://github.com/NVIDIA/nvidia-docker) + +Please follow the steps below to install: 1. First select the path where you want to store PaddlePaddle, then use the following command to clone PaddlePaddle's source code from github to a folder named Paddle in the local current directory: @@ -33,44 +55,86 @@ Once you have **properly installed Docker**, you can start **compiling PaddlePad 2. Go to the Paddle directory: `cd Paddle` -3. Take advantage of the image we provided (with this command you don't have to download the image in advance): +3. Create and enter a Docker container that meets the compilation environment: + + * Compile CPU version of PaddlePaddle: + + + + `docker run --name paddle-test -v $PWD:/paddle --network=host -it hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash` + + > --name paddle-test names the Docker container you created as paddle-test; + + + > -v $PWD:/paddle mount the current directory to the /paddle directory in the docker container (PWD variable in Linux will be expanded to [absolute path](https://baike.baidu.com/item/绝对路径/481185) of the current path); + - `docker run --name paddle-test -v $PWD:/paddle --network=host -it` `hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash` + > -it keeps interaction with the host,`hub.baidubce.com/paddlepaddle/paddle:latest-dev` use the image named `hub.baidubce.com/paddlepaddle/paddle:latest-dev` to create Docker container, /bin/bash start the /bin/bash command after entering the container. + + + + * Compile GPU version of PaddlePaddle (only supports CentOS 7): + + + + `nvidia-docker run --name paddle-test -v $PWD:/paddle --network=host -it hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash` + + > --name paddle-test names the Docker container you created as paddle-test; + + + > -v $PWD:/paddle mount the current directory to the /paddle directory in the docker container (PWD variable in Linux will be expanded to [absolute path](https://baike.baidu.com/item/绝对路径/481185) of the current path); + + + + > -it keeps interaction with the host,`hub.baidubce.com/paddlepaddle/paddle:latest-dev` use the image named `hub.baidubce.com/paddlepaddle/paddle:latest-dev` to create Docker container, /bin/bash start the /bin/bash command after entering the container. + + + > Note: hub.baidubce.com/paddlepaddle/paddle:latest-dev internally install CUDA 10.0. - > `--name paddle-test` names the Docker container you created as paddle-test, `-v $PWD:/paddle` mounts the current directory to the /paddle directory in the Docker container (the PWD variable in Linux will expand to the current [Absolute path](https://baike.baidu.com/item/%E7%BB%9D%E5%AF%B9%E8%B7%AF%E5%BE%84/481185)), `-it` keeps interacting with the host, `hub.baidubce.com/paddlepaddle/paddle` creates a Docker container with an image called `hub.baidubce.com/paddlepaddle/paddle:latest-dev`, /bin/bash enters the container After starting the `/bin/bash` command. 4. After entering Docker, go to the paddle directory: `cd paddle` 5. Switch to a more stable version to compile: - `git checkout v1.1` + `git checkout [name of the branch]` + + For example: + + `git checkout release/1.5` + + Note: python3.6、python3.7 version started supporting from release/1.2 branch 6. Create and enter the /paddle/build path: `mkdir -p /paddle/build && cd /paddle/build` -7. Use the following command to install the dependencies: (For Python3: Please select the pip for the python version you wish to use, such as pip3.5, pip3.6) +7. Use the following command to install the dependencies: - For Python2: pip install protobuf==3.1.0 - For Python3: pip3.5 install protobuf==3.1.0 + For Python2: pip install protobuf + For Python3: pip3.5 install protobuf + Note: We used Python3.5 command as an example above, if the version of your Python is 3.6/3.7, please change Python3.5 in the commands to Python3.6/Python3.7 > Install protobuf 3.1.0 - `apt install patchelf` + `yum install patchelf` > Installing patchelf, PatchELF is a small and useful program for modifying the dynamic linker and RPATH of ELF executables. 8. Execute cmake: > For details on the compilation options, see the [compilation options table](../Tables.html/#Compile). + > Please attention to modify parameters `-DPY_VERSION` for the version of Python you want to compile with, for example `-DPY_VERSION=3.5` means the version of python is 3.5.x * For users who need to compile the **CPU version PaddlePaddle**: - `cmake .. -DWITH_FLUID_ONLY=ON -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release` + `cmake .. -DPY_VERSION=3.5 -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release` + + * For users who need to compile the **GPU version PaddlePaddle**: + `cmake .. -DPY_VERSION=3.5 -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release` - > We currently do not support the compilation of the GPU version PaddlePaddle under CentOS. + > We currently do not support the compilation of the GPU version PaddlePaddle under CentOS 6. 9. Execute compilation: @@ -80,30 +144,23 @@ Once you have **properly installed Docker**, you can start **compiling PaddlePad 10. After compiling successfully, go to the `/paddle/build/python/dist` directory and find the generated `.whl` package: `cd /paddle/build/python/dist` -11. Install the compiled `.whl` package on the current machine or target machine: (For Python3: Please select the pip corresponding to the python version you wish to use, such as pip3.5, pip3.6) +11. Install the compiled `.whl` package on the current machine or target machine: + For Python2: pip install -U (whl package name) + For Python3: pip3.5 install -U (whl package name) - For Python2: pip install (whl package name) - For Python3: pip3.5 install (whl package name) + Note: For the command involving Python 3, we use Python 3.5 as an example above, if the version of your Python is 3.6/3.7, please change Python3.5 in the commands to Python3.6/Python3.7 +Congratulations, now that you have successfully installed PaddlePaddle using Docker, you only need to run PaddlePaddle after entering the Docker container. For more Docker usage, please refer to the [official Docker documentation](https://docs.docker.com/). -Now that you have successfully installed PaddlePaddle using Docker, you only need to run PaddlePaddle after entering the Docker container. For more Docker usage, please refer to the [official Docker documentation](https://docs.docker.com/). - -> Notes: In order to reduce the size, `vim` is not installed in PaddlePaddle Docker image by default. You can edit the code in the container after executing `apt-get install -y vim` in the container. - -Congratulations, you have now completed the process of compiling PaddlePaddle using Docker. - - - -

-### *Local compilation* - -**Please strictly follow the order of the following instructions** +> Note: In order to reduce the size, `vim` is not installed in PaddlePaddle Docker image by default. You can edit the code in the container after executing `yum install -y vim` in the container. + +### **Local compilation** 1. Check that your computer and operating system meet the compilation standards we support: `uname -m && cat /etc/*release` -2. Update the source of `yum`: `yum update`, and add the necessary yum source: `yum install -y epel-release`, and install openCV in advance +2. Update the source of `yum`: `yum update`, and add the necessary yum source: `yum install -y epel-release`, and install [OpenCV](https://opencv.org/releases.html) in advance 3. Install the necessary tools `bzip2` and `make`: `yum install -y bzip2 `, `yum install -y make` @@ -143,11 +200,12 @@ Congratulations, you have now completed the process of compiling PaddlePaddle us 3. Find `virtualenvwrapper.sh`: `find / -name virtualenvwrapper.sh` (please find the corresponding Python version of `virtualenvwrapper.sh`) - 4. See the installation method in `virtualenvwrapper.sh`: `cat vitualenvwrapper.sh` + 4. See the installation method in `virtualenvwrapper.sh`: `cat vitualenvwrapper.sh`, this shell file describes the steps and commands - 5. Install `virtualwrapper` + 5. Install `virtualwrapper` as described in `virtualenvwrapper.sh` - 6. Create a virtual environment called `paddle-venv`: `mkvirtualenv paddle-venv` + 6. Set VIRTUALENVWRAPPER_PYTHON:`export VIRTUALENVWRAPPER_PYTHON=[python-lib-path]:$PATH` (here replace the last two levels content of [python-lib-path] with /bin/) + 7. Create virtual environment named `paddle-venv`: `mkvirtualenv paddle-venv` 5. Enter the virtual environment: `workon paddle-venv` @@ -156,7 +214,7 @@ Congratulations, you have now completed the process of compiling PaddlePaddle us * Here is the installation method for `patchELF`. Other dependencies can be installed using `yum install` or `pip install`/`pip3 install` followed by the name and version: `yum install patchelf` - > Users who can't use apt installation can refer to patchElF [github official documentation](https://gist.github.com/ruario/80fefd174b3395d34c14). + > Users who can't use yum installation can refer to patchElF github [official documentation](https://gist.github.com/ruario/80fefd174b3395d34c14). 7. Put the PaddlePaddle source cloned in the Paddle folder in the current directory and go to the Paddle directory: @@ -166,7 +224,11 @@ Congratulations, you have now completed the process of compiling PaddlePaddle us 8. Switch to a more stable release branch for compilation (support for Python 3.6 and 3.7 is added from the 1.2 branch): - - `git checkout release/1.2` + - `git checkout [name of target branch]` + + For example: + + `git checkout release/1.5` 9. And please create and enter a directory called build: @@ -179,30 +241,57 @@ Congratulations, you have now completed the process of compiling PaddlePaddle us * For users who need to compile the **CPU version PaddlePaddle**: - For Python2: cmake .. -DWITH_FLUID_ONLY=ON -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + For Python2: cmake .. -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release For Python3: cmake .. -DPY_VERSION=3.5 -DPYTHON_INCLUDE_DIR=${PYTHON_INCLUDE_DIRS} \ - -DPYTHON_LIBRARY=${PYTHON_LIBRARY} -DWITH_FLUID_ONLY=ON -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + -DPYTHON_LIBRARY=${PYTHON_LIBRARY} -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release > If you encounter `Could NOT find PROTOBUF (missing: PROTOBUF_LIBRARY PROTOBUF_INCLUDE_DIR)`, you can re-execute the cmake command. > Please note that the PY_VERSION parameter is replaced with the python version you need. + + * For users who need to compile the **GPU version PaddlePaddle**: + + 1. Please make sure that you have installed nccl2 correctly, or install nccl2 according to the following instructions (here is ubuntu 16.04, CUDA9, ncDNN7 nccl2 installation instructions), for more information on the installation information please refer to the [NVIDIA official website](https://developer.nvidia.com/nccl/nccl-download): + + i. `wget http://developer.download.nvidia.com/compute/machine-learning/repos/rhel7/x86_64/nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm` + + ii. `rpm -i nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm` + + iii. `yum install -y libnccl-2.3.7-2+cuda9.0 libnccl-devel-2.3.7-2+cuda9.0 libnccl-static-2.3.7-2+cuda9.0` + + 2. If you have already installed `nccl2` correctly, you can start cmake: *(For Python3: Please configure the correct python version for the PY_VERSION parameter)* + + For Python2: cmake .. -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + For Python3: cmake .. -DPYTHON_EXECUTABLE:FILEPATH=[您可执行的Python3的路径] -DPYTHON_INCLUDE_DIR:PATH=[之前的PYTHON_INCLUDE_DIRS] -DPYTHON_LIBRARY:FILEPATH=[之前的PYTHON_LIBRARY] -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + + + Note: For the command involving Python 3, we use Python 3.5 as an example above, if the version of your Python is 3.6/3.7, please change Python3.5 in the commands to Python3.6/Python3.7 + + + 11. Compile with the following command: `make -j$(nproc)` + > Use multicore compilation + + > If “Too many open files” error is displayed during compilation, please use the instruction ulimit -n 8192 to increase the number of files allowed to be opened by the current process. Generally speaking, 8192 can ensure the completion of compilation. + 12. After compiling successfully, go to the `/paddle/build/python/dist `directory and find the generated `.whl` package: `cd /paddle/build/python/dist` 13. Install the compiled `.whl` package on the current machine or target machine: - `Pip install (whl package name) `or `pip3 install (whl package name)` + `Pip install -U (whl package name) `or `pip3 install -U (whl package name)` Congratulations, now you have completed the process of compiling PaddlePaddle natively.

### ***Verify installation*** -After the installation is complete, you can use `python` to enter the Python interpreter and then use `import paddle.fluid` to verify that the installation was successful. +After the installation is complete, you can use `python` or `python3` to enter the Python interpreter and then use `import paddle.fluid as fluid` and then `fluid.install_check.run_check()` to verify that the installation was successful. + +If `Your Paddle Fluid is installed succesfully!` appears, it means the installation was successful.

### ***How to uninstall*** @@ -210,3 +299,6 @@ After the installation is complete, you can use `python` to enter the Python int Please use the following command to uninstall PaddlePaddle (users who use Docker to install PaddlePaddle should use the following command in the container containing PaddlePaddle. Please use the corresponding version of pip): * ***CPU version of PaddlePaddle***: `pip uninstall paddlepaddle` or `pip3 uninstall paddlepaddle` +* ***GPU version of PaddlePaddle***: `pip uninstall paddlepaddle-gpu` or `pip3 uninstall paddlepaddle-gpu` + +Users installing PaddlePaddle with Docker, please use above commands in the container involved PaddlePaddle and attention to use the corresponding version of Pip diff --git a/doc/fluid/install/compile/compile_MacOS.md b/doc/fluid/install/compile/compile_MacOS.md index 9e924abdff5db7759bf341df93f3ee85fdb8eae2..426cf3ba696169aa1051e4de164031860f356aa5 100644 --- a/doc/fluid/install/compile/compile_MacOS.md +++ b/doc/fluid/install/compile/compile_MacOS.md @@ -149,9 +149,10 @@ - a. 这里特别说明一下**CMake**的安装: - 由于我们使用的是CMake3.4请根据以下步骤: + + CMake我们支持3.10以上版本,推荐使用CMake3.16,请根据以下步骤安装: - 1. 从CMake[官方网站](https://cmake.org/files/v3.4/cmake-3.4.3-Darwin-x86_64.dmg)下载CMake镜像并安装 + 1. 从CMake[官方网站](https://cmake.org/files/v3.16/cmake-3.16.0-Darwin-x86_64.dmg)下载CMake镜像并安装 2. 在控制台输入`sudo "/Applications/CMake.app/Contents/bin/cmake-gui" –install` - b. 如果您不想使用系统默认的blas而希望使用自己安装的OPENBLAS请参见[FAQ](../FAQ.html/#OPENBLAS) diff --git a/doc/fluid/install/compile/compile_MacOS_en.md b/doc/fluid/install/compile/compile_MacOS_en.md index 10fee4d91371764c39c6e32c075241105e35b404..b00557ea5f01cf9b611c2649d259941170b600b2 100644 --- a/doc/fluid/install/compile/compile_MacOS_en.md +++ b/doc/fluid/install/compile/compile_MacOS_en.md @@ -1,34 +1,33 @@ -*** # **Compile on MacOS from Source Code** -This instruction will show you how to compile PaddlePaddle on *64-bit desktops or laptops* and MacOS systems. The MacOS systems we support need to meet the following requirements: +## Environment preparation -* MacOS 10.12/10.13/10.14 (this involves whether the related tools can be installed normally) +* **MacOS version 10.11/10.12/10.13/10.14 (64 bit) (not support GPU version)** +* **Python version 2.7.15+/3.5.1+/3.6/3.7 (64 bit)** +* **pip or pip3 version 9.0.1+ (64 bit)** -## Determine which version to compile +## Choose CPU/GPU -* **Only PaddlePaddle for CPU is supported.** +* Currently, only PaddlePaddle for CPU is supported. -## Choose a compilation method +## Installation steps +There are two compilation methods in MacOS system: -Under the MacOS 10.12/10.13/10.14 system we offer 2 ways to compile: +* Compile with docker +* Local compilation -* Docker source compilation (this image already contains python2.7, python3.6, python3.7 environment) -* Direct source code compilation - - -We recommend **using Docker for compilation** because we are installing both the tools and the configuration in a Docker image. This way, if you encounter problems, others can reproduce the problem to help. In addition, for developers accustomed to using Windows and MacOS, there is no need to configure a cross-compilation environment using Docker. It should be emphasized that Docker does not virtualize any hardware. The compiler tools running in the Docker container are actually running directly on the native CPU and operating system. The performance is the same as installing the compiler on the machine. + +### ***Compile with Docker*** -Also for those who can't install Docker for a variety of reasons, we also provide a way to **compile directly from local sources**, but since the situation on this machine is more complicated, we only support specific systems. +[Docker](https://docs.docker.com/install/) is an open source application container engine. Using docker, you can not only isolate the installation and use of paddlepaddle from the system environment, but also share GPU, network and other resources with the host -

-### ***Compile with Docker*** +Compiling PaddlePaddle with Docker,you need: -In order to better use Docker and avoid problems, we recommend using **the highest version of Docker**. For details on **installing and using Docker**, please refer to the [official Docker documentation](https://docs.docker.com/install/). +- On the local host [Install Docker](https://hub.docker.com/search/?type=edition&offering=community) -> Please note that running Docker on MacOS requires logging in with your dockerID, otherwise an Authenticate Failed error will occur. +- Log in to Docker with Docker ID to avoid `Authenticate Failed` error -Once you have **properly installed Docker**, you can start **compiling PaddlePaddle with Docker**: +Please follow the steps below to install: 1. Enter the terminal of the Mac @@ -38,27 +37,40 @@ Once you have **properly installed Docker**, you can start **compiling PaddlePad 3. Go to the Paddle directory: `cd Paddle` -4. Take advantage of the image we provided (with this command you don't have to download the image in advance): +4. Create and enter a Docker container that meets the compilation environment: - `docker run --name paddle-test -v $PWD:/paddle --network=host -it` `hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash` + `docker run --name paddle-test -v $PWD:/paddle --network=host -it hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash` - > --name paddle-test Name the Docker container you created as paddle-test, -v $PWD:/paddle mount the current directory to the /paddle directory in the Docker container (the PWD variable in Linux will expand to the current path's [Absolute path](https://baike.baidu.com/item/绝对路径/481185)), -it keeps interacting with the host, `hub.baidubce.com/paddlepaddle/paddle:latest-dev` creates a Docker container with a mirror named `hub.baidubce.com/paddlepaddle/paddle:latest-dev`, /bin /bash starts the /bin/bash command after entering the container. + > --name paddle-test name the Docker container you created as paddle-test, -5. After entering Docker, go to the paddle directory: `cd paddle` + > -v $PWD:/paddle mount the current directory to the /paddle directory in the Docker container (the PWD variable in Linux will expand to the current path's [Absolute path](https://baike.baidu.com/item/绝对路径/481185)), + + > -it keeps interacting with the host, `hub.baidubce.com/paddlepaddle/paddle:latest-dev` creates a Docker container with a mirror named `hub.baidubce.com/paddlepaddle/paddle:latest-dev`, /bin /bash starts the /bin/bash command after entering the container. + +5. After entering Docker, go to the paddle directory: + + `cd paddle` 6. Switch to a more stable version to compile: - `git checkout v1.1` + `git checkout [name of the branch]` + + For example: + + `git checkout release/1.5` + + Note: python3.6、python3.7 version started supporting from release/1.2 branch 7. Create and enter the /paddle/build path: `mkdir -p /paddle/build && cd /paddle/build` -8. Use the following command to install the dependencies: (For Python3: Please select the pip for the python version you wish to use, such as pip3.5, pip3.6) +8. Use the following command to install the dependencies: + For Python2: pip install protobuf==3.1.0 + For Python3: pip3.5 install protobuf==3.1.0 - For Python2: pip install protobuf==3.1.0 - For Python3: pip3.5 install protobuf==3.1.0 + Note: We used Python3.5 command as an example above, if the version of your Python is 3.6/3.7, please change Python3.5 in the commands to Python3.6/Python3.7 > Install protobuf 3.1.0. @@ -69,12 +81,13 @@ Once you have **properly installed Docker**, you can start **compiling PaddlePad 9. Execute cmake: > For details on the compilation options, see the [compilation options table](../Tables_en.html/#Compile). + > Please attention to modify parameters `-DPY_VERSION` for the version of Python you want to compile with, for example `-DPY_VERSION=3.5` means the version of python is 3.5.x * For users who need to compile the **CPU version PaddlePaddle**: - `cmake .. -DWITH_FLUID_ONLY=ON -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release` + `cmake .. -DPY_VERSION=3.5 -DWITH_GPU=OFF -DWITH_TESTING=OFF -DWITH_AVX=OFF -DCMAKE_BUILD_TYPE=Release` - > We currently do not support the compilation of the GPU version PaddlePaddle under CentOS. + > We currently do not support the compilation of the GPU version PaddlePaddle under CentOS. 10. Execute compilation: @@ -87,23 +100,22 @@ Once you have **properly installed Docker**, you can start **compiling PaddlePad 12. Install the compiled `.whl` package on the current machine or target machine: (For Python3: Please select the pip corresponding to the python version you wish to use, such as pip3.5, pip3.6) - For Python2: pip install (whl package name) - For Python3: pip3.5 install (whl package name) + For Python2: pip install -U (whl package name) + For Python3: pip3.5 install -U (whl package name) + Note: We used Python3.5 command as an example above, if the version of your Python is 3.6/3.7, please change Python3.5 in the commands to Python3.6/Python3.7 -Now that you have successfully installed PaddlePaddle using Docker, you only need to run PaddlePaddle after entering the Docker container. For more Docker usage, please refer to the [official Docker documentation](https://docs.docker.com/). +Congratulations, now that you have successfully installed PaddlePaddle using Docker, you only need to run PaddlePaddle after entering the Docker container. For more Docker usage, please refer to the [official Docker documentation](https://docs.docker.com/). > Note: In order to reduce the size, `vim` is not installed in PaddlePaddle Docker image by default. You can edit the code in the container after executing `apt-get install -y vim` in the container. -Congratulations, you have now completed the process of compiling PaddlePaddle using Docker. - - +

-### ***Native compilation*** +### ***Local compilation*** **Please strictly follow the order of the following instructions** -1. Check that your computer and operating system meet our supported compilation standards: `uname -m` and view the system version `about this Mac`. And install openCV in advance. +1. Check that your computer and operating system meet our supported compilation standards: `uname -m` and view the system version `about this Mac`. And install [OpenCV](https://opencv.org/releases.html) in advance. 2. Install python and pip: @@ -132,7 +144,7 @@ Congratulations, you have now completed the process of compiling PaddlePaddle us - d. Set PYTHON_INCLUDE_DIR: `export PYTHON_INCLUDE_DIRS=[python-include-path]` - - e. Set the system environment variable path: `export PATH=[python-bin-path]:$PATH` (here [python-bin-path] is the result of replacing the last two levels of [python-lib-path] with the path after /bin/ ) + - e. Set the system environment variable path: `export PATH=[python-bin-path]:$PATH` (here [python-bin-path] is the result of replacing the last two levels of [python-lib-path] with the path after /bin/) - f. Set the dynamic library link: `export LD_LIBRARY_PATH=[python-ld-path]` and `export DYLD_LIBRARY_PATH=[python-ld-path]` (here [python-ld-path] is the [python-bin-path]'s parent directory ) @@ -161,7 +173,13 @@ Congratulations, you have now completed the process of compiling PaddlePaddle us 7. Switch to a more stable release branch to compile: (Note that python 3.6, python 3.7 version are supported from the 1.2 branch) - `git checkout release/1.2` + `git checkout [name of the branch]` + + For example: + + `git checkout release/1.5` + + Note: python3.6、python3.7 version started supporting from release/1.2 branch 8. And please create and enter a directory called build: @@ -175,8 +193,8 @@ Congratulations, you have now completed the process of compiling PaddlePaddle us For Python2: cmake .. -DWITH_FLUID_ONLY=ON -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release - For Python3: cmake .. -DPY_VERSION=3.5 -DPYTHON_INCLUDE_DIR=${PYTHON_INCLUDE_DIRS} \-DPYTHON_LIBRARY=${PYTHON_LIBRARY} -DWITH_FLUID_ONLY=ON -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release - + For Python3: cmake .. -DPY_VERSION=3.5 -DPYTHON_INCLUDE_DIR=${PYTHON_INCLUDE_DIRS} \ + -DPYTHON_LIBRARY=${PYTHON_LIBRARY} -DWITH_FLUID_ONLY=ON -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release > ``-DPY_VERSION=3.5`` Please change to the Python version of the installation environment. @@ -188,7 +206,7 @@ Congratulations, you have now completed the process of compiling PaddlePaddle us 12. Install the compiled `.whl` package on the current machine or target machine: - `pip install (whl package name)` or `pip3 install (whl package name)` + `pip install -U (whl package name)` or `pip3 install -U (whl package name)` > If you have multiple python environments and pips installed on your computer, please see the [FAQ](../Tables.html/#MACPRO). @@ -197,7 +215,9 @@ Congratulations, now you have completed the process of compiling PaddlePaddle us

### ***Verify installation*** -After the installation is complete, you can use `python` to enter the Python interpreter and then use `import paddle.fluid` to verify that the installation was successful. +After the installation is complete, you can use `python` or `python3` to enter the Python interpreter and then use `import paddle.fluid as fluid` and then `fluid.install_check.run_check()` to verify that the installation was successful. + +If `Your Paddle Fluid is installed succesfully!` appears, it means the installation was successful.

### ***How to uninstall*** @@ -205,3 +225,5 @@ After the installation is complete, you can use `python` to enter the Python int Please use the following command to uninstall PaddlePaddle (users who use Docker to install PaddlePaddle should use the following command in the container containing PaddlePaddle. Please use the corresponding version of pip): * ***CPU version of PaddlePaddle***: `pip uninstall paddlepaddle` or `pip3 uninstall paddlepaddle` + +Users installing PaddlePaddle with Docker, please use above commands in the container involved PaddlePaddle and attention to use the corresponding version of Pip diff --git a/doc/fluid/install/compile/compile_Ubuntu.md b/doc/fluid/install/compile/compile_Ubuntu.md index 74954df2a1db1f79cc64643aeaa08c8dc64c8f0e..9feea65fd3fdf394ad00eff5436f6a9e65711b60 100644 --- a/doc/fluid/install/compile/compile_Ubuntu.md +++ b/doc/fluid/install/compile/compile_Ubuntu.md @@ -86,7 +86,7 @@ > -it 与宿主机保持交互状态,`hub.baidubce.com/paddlepaddle/paddle:latest-dev` 使用名为`hub.baidubce.com/paddlepaddle/paddle:latest-dev`的镜像创建Docker容器,/bin/bash 进入容器后启动/bin/bash命令。 - > 注意:hub.baidubce.com/paddlepaddle/paddle:latest-dev内部安装CUDA 8.0。 + > 注意:hub.baidubce.com/paddlepaddle/paddle:latest-dev内部安装CUDA 10.0。 4. 进入Docker后进入paddle目录下: diff --git a/doc/fluid/install/compile/compile_Ubuntu_en.md b/doc/fluid/install/compile/compile_Ubuntu_en.md index 37f36b4c829382b71910497e65a9259bd6d58bed..9a1faf3a577acdbfebb99a69ad4cf4507ff3c9ec 100644 --- a/doc/fluid/install/compile/compile_Ubuntu_en.md +++ b/doc/fluid/install/compile/compile_Ubuntu_en.md @@ -1,109 +1,159 @@ -*** # **Compile on Ubuntu from Source Code** -This instruction describes how to compile PaddlePaddle on *64-bit desktops or laptops* and Ubuntu systems. The Ubuntu systems we support must meet the following requirements: +## Environment preparation -* Ubuntu 14.04/16.04/18.04 (this involves whether the related tools can be installed successfully) +* **Ubuntu version (64 bit)** + * **Ubuntu 14.04 (GPU version supports CUDA 10.0/10.1)** + * **Ubuntu 16.04 (GPU version supports CUDA 9.0/9.1/9.2/10.0/10.1)** + * **Ubuntu 18.04 (GPU version supports CUDA 10.0/10.1)** +* **Python version 2.7.15+/3.5.1+/3.6/3.7 (64 bit)** +* **pip or pip3 version 9.0.1+ (64 bit)** -## Determine which version to compile +## Choose CPU/GPU -* **CPU version of PaddlePaddle**, if your system does not have an NVIDIA® GPU, you must install this version. This version is easier than the GPU version. So even if you have a GPU on your computer, we recommend that you first install the CPU version of PaddlePaddle to check if your local environment is suitable. +* If your computer doesn't have NVIDIA® GPU, please install CPU version of PaddlePaddle -* **GPU version of PaddlePaddle**, in order to make the PaddlePaddle program run more quickly, we usually use the GPU to accelerate the PaddlePaddle program, but the GPU version of PaddlePaddle needs to have the NVIDIA® GPU that meets the following conditions (see NVIDIA for the specific installation process and configuration). Official documentation: [For CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/), For [cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/)) +* If your computer has NVIDIA® GPU, and the following conditions are met,GPU version of PaddlePaddle is recommended. + * **CUDA toolkit 10.0 with cuDNN v7.3+(for multi card support, NCCL2.3.7 or higher)** + * **CUDA toolkit 9.0 with cuDNN v7.3+(for multi card support, NCCL2.3.7 or higher)** + * **CUDA toolkit 8.0 with cuDNN v7.1+(for multi card support, NCCL2.1.15-2.2.13)** + * **Hardware devices with GPU computing power over 1.0** - * *CUDA Toolkit 9.0 with cuDNN v7* - * *CUDA Toolkit 8.0 with cuDNN v7* - * *Hardware devices with GPU compute capability exceeding 1.0* + You can refer to NVIDIA official documents for installation process and configuration method of CUDA and cudnn. Please refer to[CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/),[cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/) -## Choose a compilation method +* * If you need to use multi card environment, please make sure that you have installed nccl2 correctly, or install nccl2 according to the following instructions (here is the installation instructions of nccl2 under ubuntu 16.04, CUDA9 and cuDNN7). For more version of installation information, please refer to NVIDIA[official website](https://developer.nvidia.com/nccl): -Under Ubuntu's system we offer 2 ways to compile: -* Docker source compilation (this image already contains python2.7, python3.6, python3.7 environment) + wget https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb + dpkg -i nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb + sudo apt-get install -y libnccl2=2.3.7-1+cuda9.0 libnccl-dev=2.3.7-1+cuda9.0 -* Direct native source code compilation (does not support GPU version under ubuntu18.04) +## Installation steps -We recommend using **Docker for compilation** because we are installing both the tools and the configuration in a Docker image. This way, if you encounter problems, others can reproduce the problem to help. In addition, for developers accustomed to using Windows and MacOS, there is no need to configure a cross-compilation environment using Docker. Someone uses a virtual machine to analogize to Docker. It should be emphasized that Docker does not virtualize any hardware. The compiler tools running in the Docker container are actually running directly on the native CPU and operating system. The performance is the same as installing the compiler on the machine. +There are two compilation methods in Ubuntu system: -We also provide methods that can be **compiled from local source code**, but since the situation on host machine is more complicated, we only provide support for specific systems. +* Compile with docker (GPU version under Ubuntu 18.04 is not supported temporarily) +* Local compilation -

-## ***Compile with Docker*** + +### **Compile with docker** + +[Docker](https://docs.docker.com/install/) is an open source application container engine. Using docker, you can not only isolate the installation and use of paddlepaddle from the system environment, but also share GPU, network and other resources with the host + +Compiling PaddlePaddle with Docker,you need: -In order to better use Docker and avoid problems, we recommend using **the highest version of Docker**. For details on **installing and using Docker**, please refer to [the official Docker documentation](https://docs.docker.com/install/). +- On the local host [Install Docker](https://hub.docker.com/search/?type=edition&offering=community) -> Please note that to install and use the PaddlePaddle version that supports GPU, you must first install nvidia-docker +- To enable GPU support on Linux, please [Install nvidia-docker](https://github.com/NVIDIA/nvidia-docker) -Once you have **properly installed Docker**, you can start **compiling PaddlePaddle with Docker**: +Please follow the steps below to install: -1. First select the path where you want to store PaddlePaddle, then use the following command to clone PaddlePaddle's source code from github to a folder named Paddle in the local current directory: +1. First, select the path where you want to store PaddlePaddle, and then use the following command to clone the source code of PaddlePaddle from GitHub to the folder named Paddle under the current local directory: `git clone https://github.com/PaddlePaddle/Paddle.git` -2. Go to the Paddle directory: `cd Paddle` +2. Enter the Paddle Directory: `cd Paddle` + +3. Create and enter a Docker container that meets the compilation environment: + + * Compile CPU version of PaddlePaddle: + + + + `docker run --name paddle-test -v $PWD:/paddle --network=host -it hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash` + + > --name paddle-test names the Docker container you created as paddle-test; + + + > -v $PWD:/paddle mount the current directory to the /paddle directory in the docker container (PWD variable in Linux will be expanded to [absolute path](https://baike.baidu.com/item/绝对路径/481185) of the current path); + + + > -it keeps interaction with the host,`hub.baidubce.com/paddlepaddle/paddle:latest-dev` use the image named `hub.baidubce.com/paddlepaddle/paddle:latest-dev` to create Docker container, /bin/bash start the /bin/bash command after entering the container. + + + * Compile GPU version of PaddlePaddle: + + + + `nvidia-docker run --name paddle-test -v $PWD:/paddle --network=host -it hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash` + + > --name paddle-test names the Docker container you created as paddle-test; -3. Take advantage of the image we provided (with this command you don't have to download the image in advance): - `docker run --name paddle-test -v $PWD:/paddle --network=host -it hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash` + > -v $PWD:/paddle mount the current directory to the /paddle directory in the docker container (PWD variable in Linux will be expanded to [absolute path](https://baike.baidu.com/item/绝对路径/481185) of the current path); - > --name paddle-test names the Docker container you created as paddle-test, -v $PWD:/paddle mounts the current directory to the /paddle directory in the Docker container (the PWD variable in Linux will expand to the current path's [absolute path](https://baike.baidu.com/item/%E7%BB%9D%E5%AF%B9%E8%B7%AF%E5%BE%84/481185)), -it keeps interacting with the host, `hub.baidubce.com/paddlepaddle/paddle:latest-dev` creates a Docker container with a mirror named `hub.baidubce.com/paddlepaddle/paddle:latest-dev`, /bin /bash Starts the /bin/bash command after entering the container. -4. After entering Docker, go to the paddle directory: `cd paddle` + > -it keeps interaction with the host,`hub.baidubce.com/paddlepaddle/paddle:latest-dev` use the image named `hub.baidubce.com/paddlepaddle/paddle:latest-dev` to create Docker container, /bin/bash start the /bin/bash command after entering the container. -5. Switch to a more stable release branch to compile: (Note that python 3.6, python 3.7 version are supported from the 1.2 branch) - `git checkout release/1.2` + > Note: hub.baidubce.com/paddlepaddle/paddle:latest-dev internally install CUDA 10.0. -6. Create and enter the /paddle/build path: +4. After entering Docker, enter the Paddle Directory: + + `cd paddle` + +5. Switch to a more stable release branch for compilation: + + `git checkout [name of the branch]` + + For example: + + `git checkout release/1.5` + + Note: python3.6、python3.7 version started supporting from release/1.2 branch + +6. Create and enter /paddle/build Directory: `mkdir -p /paddle/build && cd /paddle/build` -7. Use the following command to install the dependencies: (For Python3: Please select the pip for the python version you wish to use, such as pip3.5, pip3.6) +7. Use the following command to install dependencies: - For Python2: pip install protobuf==3.1.0 - For Python3: pip3.5 install protobuf==3.1.0 + For Python2: pip install protobuf + For Python3: pip3.5 install protobuf + Note: We used Python3.5 command as an example above, if the version of your Python is 3.6/3.7, please change Python3.5 in the commands to Python3.6/Python3.7 - > Install protobuf 3.1.0. + > Install protobuf `apt install patchelf` - > Installing patchelf, PatchELF is a small and useful program for modifying the dynamic linker and RPATH of ELF executables. + > Install patchelf + This is a small but useful program, it can be used to modify dynamic linker and RPATH of ELF executable -8. Execute cmake: +8. Execute cmake: - > For the meaning of the specific compiling options, [compilation options table](../Tables.html/#Compile) is your resort. Please note that the parameter `-DPY_VERSION` is the python version used in your current environment. + > For the specific meaning of compilation options, you can read [Compile options table](../Tables.html#Compile) - * For users who need to compile the **CPU version PaddlePaddle**: + > Please attention to modify parameters `-DPY_VERSION` for the version of Python you want to compile with, for example `-DPY_VERSION=3.5` means the version of python is 3.5.x - `cmake .. -DPY_VERSION=3.5 -DWITH_FLUID_ONLY=ON -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release` + * Compile**CPU version of PaddlePaddle**: - * For users who need to compile the **GPU version PaddlePaddle**: + `cmake .. -DPY_VERSION=3.5 -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release` - `cmake .. -DPY_VERSION=3.5 -DWITH_FLUID_ONLY=ON -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release` + * Compile**GPU version of PaddlePaddle**: -9. Execute compilation: + `cmake .. -DPY_VERSION=3.5 -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release` + +9. Execute compiling: `make -j$(nproc)` > Use multicore compilation -10. After compiling successfully, go to the `/paddle/build/python/dist` directory and find the generated `.whl` package: `cd /paddle/build/python/dist` +10. after compiling successful, enter `/paddle/build/python/dist` Directory and find generated `.whl` package: `cd /paddle/build/python/dist` -11. Install the compiled `.whl` package on the current machine or target machine: (For Python3: Please select the pip corresponding to the python version you wish to use, such as pip3.5, pip3.6) +11. Install the compiled `.whl` package on the current machine or target machine: - For Python2: pip install (whl package name) - For Python3: pip3.5 install (whl package name) + For Python2: pip install -U(whl package name) + For Python3: pip3.5 install -U(whl package name) + Note: For the command involving Python 3, we use Python 3.5 as an example above, if the version of your Python is 3.6/3.7, please change Python3.5 in the commands to Python3.6/Python3.7 -Now that you have successfully installed PaddlePaddle using Docker, you only need to run PaddlePaddle after entering the Docker container. For more Docker usage, please refer to the [official Docker documentation](https://docs.docker.com/). +Congratulations, now you have completed the compilation and installation of PaddlePaddle. You only need to enter the Docker container and run PaddlePaddle to start using. For more Docker usage, please refer to [official docker documentation](https://docs.docker.com) > Note: In order to reduce the size, `vim` is not installed in PaddlePaddle Docker image by default. You can edit the code in the container after executing `apt-get install -y vim` in the container. -Congratulations, you have now completed the process of compiling PaddlePaddle using Docker. - - -

+ ### ***Local compilation*** **Please strictly follow the following instructions step by step** @@ -114,26 +164,29 @@ Congratulations, you have now completed the process of compiling PaddlePaddle us 3. We support compiling and installing with virtualenv. First, create a virtual environment called `paddle-venv` with the following command: - * a. Install Python-dev: (Please install python3.x-dev that matches the current environment python version) + * a. Install Python-dev: (Please note that gcc4.8 is not supported in python2.7 under Ubuntu 16.04, please use gcc5.4 to compile Paddle) For Python2: apt install python-dev For Python3: apt install python3.5-dev - * b. Install pip: (Please ensure that pip version is 9.0.1 and above ): (Please note that the version corresponding to python3 is modified) + * b. Install pip: (Please ensure that pip version is 9.0.1 and above ): For Python2: apt install python-pip For Python3: apt-get udpate && apt-get install -y software-properties-common && add-apt-repository ppa:deadsnakes/ppa && apt install curl && curl https://bootstrap.pypa.io/get-pip. Py -o - | python3.5 && easy_install pip - * c. Install the virtual environment `virtualenv` and `virtualenvwrapper` and create a virtual environment called `paddle-venv` (please note the python version) : + * c. Install the virtual environment `virtualenv` and `virtualenvwrapper` and create a virtual environment called `paddle-venv` : - 1. `apt install virtualenv` or `pip install virtualenv` or `pip3 install virtualenv` + 1. `apt install virtualenv` or `pip install virtualenv` or `pip3 install virtualenv` 2. `apt install virtualenvwrapper` or `pip install virtualenvwrapper` or `pip3 install virtualenvwrapper` 3. Find `virtualenvwrapper.sh`: `find / -name virtualenvwrapper.sh` 4. (Only for Python3) Set the interpreter path for the virtual environment: `export VIRTUALENVWRAPPER_PYTHON=/usr/bin/python3.5` - 5. See the installation method in `virtualenvwrapper.sh`: `cat virtualenvwrapper.sh` + 5. See the installation method in `virtualenvwrapper.sh`: `cat virtualenvwrapper.sh`, this shell file describes the steps and commands 6. Install `virtualwrapper` according to the installation method in `virtualenvwrapper.sh` - 7. Create a virtual environment called `paddle-venv`: `mkvirtualenv paddle-venv` + 7. Set VIRTUALENVWRAPPER_PYTHON:`export VIRTUALENVWRAPPER_PYTHON=[python-lib-path]:$PATH` (Here, replace the last two directories of [python-lib-path] with /bin/) + 8. Create a virtual environment called `paddle-venv`: `mkvirtualenv paddle-venv` + + Note: for the above commands involving Python 3, we use Python 3.5 as an example. If your Python version is 3.6 / 3.7, please change Python 3.5 in the above commands to Python 3.6 / Python 3.7 4. Enter the virtual environment: `workon paddle-venv` @@ -155,6 +208,10 @@ Congratulations, you have now completed the process of compiling PaddlePaddle us - `git checkout [name of target branch]` + For example: + + `git checkout release/1.5` + 8. And please create and enter a directory called build: `mkdir build && cd build` @@ -185,12 +242,16 @@ Congratulations, you have now completed the process of compiling PaddlePaddle us For Python3: cmake .. -DPY_VERSION=3.5 -DWITH_FLUID_ONLY=ON -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release - > `-DPY_VERSION=3.5` Please change to the Python version of the installation environment + Note: We used Python3.5 command as an example above, if the version of your Python is 3.6/3.7, please change Python3.5 in the commands to Python3.6/Python3.7 10. Compile with the following command: `make -j$(nproc)` + > compile using multi-core + + > If “Too many open files” error is displayed during compilation, please use the instruction ulimit -n 8192 to increase the number of files allowed to be opened by the current process. Generally speaking, 8192 can ensure the completion of compilation. + 11. After compiling successfully, go to the `/paddle/build/python/dist `directory and find the generated `.whl` package: `cd /paddle/build/python/dist` 12. Install the compiled `.whl` package on the current machine or target machine: @@ -202,12 +263,16 @@ Congratulations, now you have completed the process of compiling PaddlePaddle na

### ***Verify installation*** -After the installation is complete, you can use `python` or `python3` to enter the Python interpreter and then use `import paddle.fluid` to verify that the installation was successful. +After the installation is complete, you can use `python` or `python3` to enter the Python interpreter and then use `import paddle.fluid as fluid` and then `fluid.install_check.run_check()` to verify that the installation was successful. + +If `Your Paddle Fluid is installed succesfully!` appears, it means the installation was successful.

### ***How to uninstall*** -Please use the following command to uninstall PaddlePaddle (users who use Docker to install PaddlePaddle should use the following command in the container containing PaddlePaddle. Please use the corresponding version of pip): +Please use the following command to uninstall PaddlePaddle: - ***CPU version of PaddlePaddle***: `pip uninstall paddlepaddle` or `pip3 uninstall paddlepaddle` - ***GPU version of PaddlePaddle***: `pip uninstall paddlepaddle-gpu` or `pip3 uninstall paddlepaddle-gpu` + +Users installing PaddlePaddle with Docker, please use above commands in the container involved PaddlePaddle and attention to use the corresponding version of Pip diff --git a/doc/fluid/install/compile/compile_Windows.md b/doc/fluid/install/compile/compile_Windows.md index deaa800ea4b95877ee91b198966c6d195c3f9ca7..a7c551c4179b49abbed6c4da90110cc74399f5f5 100644 --- a/doc/fluid/install/compile/compile_Windows.md +++ b/doc/fluid/install/compile/compile_Windows.md @@ -2,9 +2,10 @@ ## 环境准备 -* **Windows 7/8/10 专业版/企业版 (64bit) (GPU版本支持CUDA 9.0/10.0, 且仅支持单卡)** -* **Python 版本 2.7/3.5.1+/3.6/3.7 (64 bit)** -* **pip 或 pip3 版本 9.0.1+ (64 bit)** +* **Windows 7/8/10 专业版/企业版 (64bit)** + * **GPU版本支持CUDA 9.0/9.1/9.2/10.0/10.1,且仅支持单卡** +* **Python 版本 2.7.15+/3.5.1+/3.6/3.7 (64 bit)** +* **pip 版本 9.0.1+ (64 bit)** * **Visual Studio 2015 Update3** ## 选择CPU/GPU @@ -12,7 +13,7 @@ * 如果您的计算机没有 NVIDIA® GPU,请编译CPU版的PaddlePaddle * 如果您的计算机有NVIDIA® GPU,并且满足以下条件,推荐编译GPU版的PaddlePaddle - * **CUDA 工具包9.0/10.0配合cuDNN v7.3+** + * **CUDA 工具包 9.0/9.1/9.2/10.0/10.1 配合 cuDNN v7.3+** * **GPU运算能力超过1.0的硬件设备** ## 安装步骤 @@ -26,19 +27,21 @@ 1. 安装必要的工具 cmake,git 以及 python: - > cmake 需要 3.5 及以上版本, 可在官网[下载](https://cmake.org/download/),并添加到环境变量中。 + > cmake我们支持3.10以上版本,但GPU编译时3.12/3.13/3.14版本存在官方[Bug](https://cmake.org/pipermail/cmake/2018-September/068195.html),我们建议您使用CMake3.16版本,可在官网[下载](https://cmake.org/download/),并添加到环境变量中。 > python 需要 2.7 及以上版本, 可在官网[下载](https://www.python.org/download/releases/2.7/)。 - > 需要安装`numpy, protobuf, wheel` 。python2.7下, 请使用`pip`命令; 如果是python3.x, 请使用`pip3`命令。 + * 安装完python 后请通过 `python --version` 检查python版本是否是预期版本,因为您的计算机可能安装有多个python,您可通过修改环境变量的顺序来处理多个python时的冲突。 - * 安装 numpy 包可以通过命令 `pip install numpy` 或 `pip3 install numpy` - * 安装 protobuf 包可以通过命令 `pip install protobuf` 或 `pip3 install protobuf` - * 安装 wheel 包可以通过命令 `pip install wheel` 或 `pip3 install wheel` + > 需要安装`numpy, protobuf, wheel` 。 请使用`pip`命令; + + * 安装 numpy 包可以通过命令 `pip install numpy` + * 安装 protobuf 包可以通过命令 `pip install protobuf` + * 安装 wheel 包可以通过命令 `pip install wheel` > git可以在官网[下载](https://gitforwindows.org/),并添加到环境变量中。 -2. 将PaddlePaddle的源码clone在当下目录下的Paddle的文件夹中,并进入Padde目录下: +2. 将PaddlePaddle的源码clone在当前目录下的Paddle的文件夹中,并进入Padde目录下: - `git clone https://github.com/PaddlePaddle/Paddle.git` - `cd Paddle` @@ -49,7 +52,7 @@ 例如: - `git checkout release/1.5` + `git checkout release/1.7` 注意:python3.6、python3.7版本从release/1.2分支开始支持 @@ -76,9 +79,9 @@ 如果你的设备信息包含多个Python或CUDA版本,你也可以通过设置路径变量,来指定特定版本的Python或CUDA: - > -DPYTHON_EXECUTABLE 为python的可执行程序(python.exe)的路径 + > -DPYTHON_EXECUTABLE: python的安装目录 - > -DCUDA_TOOLKIT_ROOT_DIR 为cuda安装目录的根路径 + > -DCUDA_TOOLKIT_ROOT_DIR: cuda的安装目录 例如:(仅作示例,请根据你的设备路径信息进行设置) @@ -90,9 +93,9 @@ `cd \Paddle\build\python\dist` -8. 在当前机器或目标机器安装编译好的 `.whl` 包: +8. 安装编译好的 `.whl` 包: - `pip install -U(whl包的名字)` 或 `pip3 install -U(whl包的名字)` + `pip install -U(whl包的名字)` 恭喜,至此您已完成PaddlePaddle的编译安装 @@ -105,8 +108,7 @@ ## **如何卸载** 请使用以下命令卸载PaddlePaddle: -* **CPU版本的PaddlePaddle**: `pip uninstall paddlepaddle` 或 `pip3 uninstall paddlepaddle` +* **CPU版本的PaddlePaddle**: `python -m pip uninstall paddlepaddle` -* **GPU版本的PaddlePaddle**: `pip uninstall paddlepaddle-gpu` 或 `pip3 uninstall paddlepaddle-gpu` +* **GPU版本的PaddlePaddle**: `python -m pip uninstall paddlepaddle-gpu` -使用Docker安装PaddlePaddle的用户,请进入包含PaddlePaddle的容器中使用上述命令,注意使用对应版本的pip diff --git a/doc/fluid/install/compile/compile_Windows_en.md b/doc/fluid/install/compile/compile_Windows_en.md index d0f32024ae740ade747084d9ce10afe5e88ca93d..6af8dfb3310e0b86085368c031eae7ee05e718c9 100644 --- a/doc/fluid/install/compile/compile_Windows_en.md +++ b/doc/fluid/install/compile/compile_Windows_en.md @@ -1,99 +1,118 @@ # **Compile on Windows from Source Code** -This instruction will show you how to compile PaddlePaddle on a *64-bit desktop or laptop* and Windows 10. The Windows systems we support must meet the following requirements: +## Environment preparation -* Windows 10 Professional Edition / Enterprise Edition -* Visual Studio 2015 Update3 +* **Windows 7/8/10 Pro/Enterprise(64bit)** + * **GPU Version support CUDA 9.0/9.1/9.2/10.0/10.1, and only support single GPU** +* **Python version 2.7.15+/3.5.1+/3.6/3.7(64bit)** +* **pip version 9.0.1+(64bit)** +* **Visual Studio 2015 Update3** -## Choose a compilation method +## Choose CPU/GPU -We provide one compilation method under the Windows system: +* If your computer doesn't have NVIDIA® GPU, please install CPU version of PaddlePaddle -* Direct source code compilation +* If your computer has NVIDIA® GPU, and the following conditions are met,GPU version of PaddlePaddle is recommended. + * **CUDA toolkit 9.0/9.1/9.2/10.0/10.1 with cuDNN v7.3+** + * **GPU's computing capability exceeds 1.0** -Since the situation on host machine is more complicated, we only support specific systems. +## Installation steps -Please note: The current version does not support NCCL and distributed related functions. +There is one compilation methods in Windows system: +* Direct native source code compilation(NCCL, distributed and other related functions are not supported temporarily) -### ***Local compilation*** + +### ***Direct native source code compilation*** **Please strictly follow the following instructions step by step** -1. Check that your computer and operating system meet our supported compilation standards +1. Install the necessary tools i.e. cmake, git and python: - * Windows 10 Professional Edition / Enterprise Edition + > CMake requires version 3.10 and above, but there are official [Bug](https://cmake.org/pipermail/cmake/2018-September/068195.html) versions of 3.12/3.13/3.14 when the GPU is compiled, we recommend that you use CMake3. 16 version, available on the official website [download] (https://cmake.org/download/), and add to the ring Environment variables. - * Visual Studio 2015 Update3 + > Python requires version 2.7 and above, which can be downloaded from the [official website](https://www.python.org/download/releases/2.7/). -2. Install the necessary tools i.e. cmake, git and python : + * After installing python, please check whether the python version is the expected version by `python-version`, because you may have more than one python installed on your computer. You can handle conflicts of multiple pythons by changing the order of the environment variables. - > Cmake requires version 3.0 and above, which can be downloaded from the official website and added to the environment variable. [Download here](https://cmake.org/download/). + > `numpy, protobuf, wheel` are needed to be installed. Use the 'pip' command. - > Git can be downloaded on the official website and added to the environment variable. [Download here](https://gitforwindows.org/). + * To Install numpy package you can use command `pip install numpy` - > Python requires version 2.7 and above, and ensure that modules such as numpy, protobuf, wheel are installed. [Download here](https://www.python.org/download/releases/2.7/). + * To Install protobuf package you can use command `pip install protobuf` + * To Install Wheel package you can use command `pip install wheel` - * To Install numpy package you can use command `pip install numpy` or command `pip3 install numpy` + > Git can be downloaded on the [official website](https://gitforwindows.org/) and added to the environment variable. - * To Install protobuf package you can use command `pip install protobuf` or command `pip3 install protobuf` +2. Clone the PaddlePaddle source code to the Paddle subdirectories of the current directory and go to the Paddle subdirectories: - * To Install Wheel package you can use command `pip install wheel` or `pip3 install wheel` + - `git clone https://github.com/PaddlePaddle/Paddle.git` + - `cd Paddle` +3. Switch to a more stable release branch for compilation: -3. Clone the PaddlePaddle source in the Paddle folder in the current directory and go to the Paddle directory: + `git checkout [name of the branch]` - - `git clone https://github.com/PaddlePaddle/Paddle.git` - - `cd Paddle` + For example: -4. Switch to a more stable release branch for compilation : + `git checkout release/1.7` - - `git checkout release/1.5` + Note: python3.6、python3.7 version started supporting from release/1.2 -5. Create a directory called build and enter it: +4. Create a directory called build and enter it: - `mkdir build` - `cd build` -6. Execute cmake: +5. Execute cmake: > For details on the compilation options, see [the compilation options list](../Tables.html/#Compile). * For users who need to compile **the CPU version PaddlePaddle**: - For Python2:`cmake .. -G "Visual Studio 14 2015 Win64" -DPYTHON_INCLUDE_DIR = $ {PYTHON_INCLUDE_DIRS} - -DPYTHON_LIBRARY = $ {PYTHON_LIBRARY} - -DPYTHON_EXECUTABLE = $ {PYTHON_EXECUTABLE} -DWITH_FLUID_ONLY = ON -DWITH_GPU = OFF -DWITH_TESTING = OFF -DCMAKE_BUILD_TYPE =Release` + `cmake .. -G "Visual Studio 14 2015 Win64" -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release` + * For users who need to compile **the GPU version PaddlePaddle**: - For Python3: `cmake .. -G "Visual Studio 14 2015 Win64" -DPY_VERSION = 3.5 -DPYTHON_INCLUDE_DIR = $ {PYTHON_INCLUDE_DIRS} - -DPYTHON_LIBRARY = $ {PYTHON_LIBRARY} - -DPYTHON_EXECUTABLE = $ {PYTHON_EXECUTABLE} -DWITH_FLUID_ONLY = ON -DWITH_GPU = OFF -DWITH_TESTING =OFF -DCMAKE_BUILD_TYPE=Release` + `cmake .. -G "Visual Studio 14 2015 Win64" -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release` + Python2 by default,Python3 please add: -7. Some third-party dependencies (openblas, snappystream) currently require users to provide pre-compiled versions, or download pre-compiled files from `https://github.com/wopeizl/Paddle_deps` and place the entire `third_party` folder in the `build` directory. + > -DPY_VERSION=3 (or 3.5、3.6、3.7) -8. Use Blend for Visual Studio 2015 to open `paddle.sln` file, select the platform `x64`, configure with `Release`, then begin to compile + If your device information contains multiple Python or CUDA, you can also specify a specific version of Python or CUDA by setting the corresponding compile options: -9. Having compiled successfully, go to the `\paddle\build\python\dist`directory and find the generated `.whl` package: + > -DPYTHON_EXECUTABLE: the installation path of python - `cd \paddle\build\python\dist` + > -DCUDA_TOOLKIT_ROOT_DIR: the installation path of CUDA + + For example: (for instance only, please set it according to your actual installation path) -10. Install the compiled `.whl` package on the current machine or target machine: + `cmake .. -G "Visual Studio 14 2015 Win64" -DCMAKE_BUILD_TYPE=Release -DWITH_GPU=ON -DWITH_TESTING=OFF -DPYTHON_EXECUTABLE=C:\\Python36\\python.exe -DCUDA_TOOLKIT_ROOT_DIR="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\v10.0"` - `pip install (whl package name)` or `pip3 install (whl package name)` +6. Use Blend for Visual Studio 2015 to open `paddle.sln` file, select the platform `x64`, configure with `Release`, then begin to compile -Congratulations, now you have completed the process of compiling PaddlePaddle natively. +7. After compilation successfully, go to the `\paddle\build\python\dist` directory and find the generated `.whl` package: + `cd \paddle\build\python\dist` + +8. Install the generated `.whl` package: + + `pip install -U (whl package name)` + +Congratulations, you have completed the process of compiling PaddlePaddle successfully! ### ***Verify installation*** -After the installation is complete, you can use: `python` to enter the Python interpreter and then use `import paddle.fluid`. If there is no error prompted, the installation is successful. +After the compilation and installation is completed, you can use `python` to enter the Python interface, input `import paddle.fluid as fluid` and then `fluid.install_check.run_check()` to verify that the installation was successful. + +If `Your Paddle Fluid is installed succesfully!` appears, it means the compilation and installation was successful. + ### ***How to uninstall*** Please use the following command to uninstall PaddlePaddle: -* ***CPU version of PaddlePaddle*** : `pip uninstall paddlepaddle` or `pip3 uninstall paddlepaddle` +* ***CPU version of PaddlePaddle*** : `pip uninstall paddlepaddle` -* ***GPU version of PaddlePaddle*** : `pip uninstall paddlepaddle-gpu` or `pip3 uninstall paddlepaddle-gpu` +* ***GPU version of PaddlePaddle*** : `pip uninstall paddlepaddle-gpu` diff --git a/doc/fluid/install/index_cn.rst b/doc/fluid/install/index_cn.rst index a64cfb79562f5865ef29689b3656b5f777b098b0..d6a6f0b2cd0d3f60fefb7e2a44f7d9445fc3466b 100644 --- a/doc/fluid/install/index_cn.rst +++ b/doc/fluid/install/index_cn.rst @@ -32,7 +32,7 @@ ================================= * 目前 **PaddlePaddle** 仅支持 **NVIDIA** 显卡的 **CUDA** 驱动 -* 需要安装 `cuDNN `_ ,版本要求 7.3+(For CUDA9/10) +* 需要安装 `cuDNN `_ ,版本要求 7.6+(For CUDA9/10) * 如果您需要 GPU 多卡模式,需要安装 `NCCL 2 `_ * 仅 Ubuntu/CentOS 支持 NCCL 2 技术 @@ -181,20 +181,13 @@ 如果您是使用 Python 2,安装CPU版本的命令为: :: - python -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple + python -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple 或 - python -m pip install paddlepaddle -i https://pypi.tuna.tsinghua.edu.cn/simple - - 如果您是使用 Python 3,安装CPU版本的命令为: - :: - - python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple + python -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple - 或 - - python3 -m pip install paddlepaddle -i https://pypi.tuna.tsinghua.edu.cn/simple + 如果您是使用 Python 3,请将上述命令中的 **python** 更换为 **python3** 进行安装 (2). **GPU版本** :如果您想使用GPU版本请参考如下命令安装 @@ -202,32 +195,14 @@ * 需要您确认您的 GPU 满足上方列出的要求 - 如果您是使用 Python2,请注意用以下指令安装的PaddlePaddle在Windows、Ubuntu、CentOS下默认支持CUDA10.0: + 如果您是使用 Python2,请注意用以下指令安装的PaddlePaddle在Windows、Ubuntu、CentOS下只支持CUDA10.0: :: - python -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple + python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://mirror.baidu.com/pypi/simple 或 - python -m pip install paddlepaddle-gpu -i https://pypi.tuna.tsinghua.edu.cn/simple - - 如果您是使用 Python 2,CUDA 9,cuDNN 7.3+,安装GPU版本的命令为: - :: - - python -m pip install paddlepaddle-gpu==1.7.0.post97 -i https://mirror.baidu.com/pypi/simple - - 或 - - python -m pip install paddlepaddle-gpu==1.7.0.post97 -i https://pypi.tuna.tsinghua.edu.cn/simple - - 如果您是使用 Python 2,CUDA 10.0,cuDNN 7.3+,安装GPU版本的命令为: - :: - - python -m pip install paddlepaddle-gpu==1.7.0.post107 -i https://mirror.baidu.com/pypi/simple - - 或 - - python -m pip install paddlepaddle-gpu==1.7.0.post107 -i https://pypi.tuna.tsinghua.edu.cn/simple + python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple 如果您是使用 Python 3,请将上述命令中的 **python** 更换为 **python3** 进行安装。 @@ -248,275 +223,10 @@ `Windows下安装 `_ -第二种安装方式:使用 conda 安装 -================================ - -您可以选择“使用pip安装”、“使用conda安装”、“使用docker安装”、“从源码编译安装” 四种方式中的任意一种方式进行安装。 - -本节将介绍使用 conda 的安装方式。 - -1. 需要您确认您的 操作系统 满足上方列出的要求 - -2. 需要您确认您的 处理器 满足上方列出的要求 - -3. 对于国内用户无法连接到Anaconda官方源的可以按照以下命令添加清华源进行安装。 - - :: - - conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/ - conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main/ - conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/Paddle/ - conda config --set show_channel_urls yes - -4. 如果您需要新建 conda 的虚拟环境专门给 Paddle 使用(--name后边的环境名称,您可以自己选择): - - 如果您是使用 Python2 并且在 Window 环境下 - - :: - - conda create --name paddle python=2.7 - activate paddle - - 如果您是使用 Python2 并且在 MacOS/Linux 环境下 - - :: - - conda create --name paddle python=2.7 - conda activate paddle - - 如果您是使用 Python3 并且在 Window 环境下,注意:python3版本可以是3.5.1+/3.6/3.7 - - :: - - conda create --name paddle python=3.7 - activate paddle - - 如果您是使用 Python3 并且在 MacOS/Linux 环境下,注意:python3版本可以是3.5.1+/3.6/3.7 - - :: - - conda create --name paddle python=3.7 - conda activate paddle - -5. 确认您需要安装 PaddlePaddle 的 Python 是您预期的位置,因为您计算机可能有多个 Python,进入 Anaconda 的命令行终端,输入以下指令确认 Python 位置 - - 如果您是使用 Python 2,使用以下命令输出 Python 路径,根据您的环境您可能需要将说明中所有命令行中的 python 替换为具体的 Python 路径 - - 在 Windows 环境下,输出 Python 路径的命令为: - - :: - - where python - - 在 MacOS/Linux 环境下,输出 Python 路径的命令为: - - :: - - which python - - 如果您是使用 Python 3,使用以下命令输出 Python 路径,根据您的环境您可能需要将说明中所有命令行中的 python3 替换为 python 或者替换为具体的 Python 路径 - - 在 Windows 环境下,输出 Python 路径的命令为: - - :: - - where python3 - - 在 MacOS/Linux 环境下,输出 Python 路径的命令为: - - :: - - which python3 - -6. 检查 Python 的版本 - - 如果您是使用 Python 2,使用以下命令确认是 2.7.15+ - :: - - python --version - - 如果您是使用 Python 3,使用以下命令确认是 3.5.1+/3.6/3.7 - :: - - python3 --version - -7. 检查 pip 的版本,确认是 9.0.1+ - - 如果您是使用 Python 2 - :: - - python -m ensurepip - python -m pip --version - - 如果您是使用 Python 3 - :: - - python3 -m ensurepip - python3 -m pip --version - -8. 确认 Python 和 pip 是 64 bit,并且处理器架构是x86_64(或称作 x64、Intel 64、AMD64)架构,目前PaddlePaddle不支持arm64架构。下面的第一行输出的是 "64bit" ,第二行输出的是 "x86_64" 、 "x64" 或 "AMD64" 即可: - - 如果您是使用 Python 2 - :: - - python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" - - 如果您是使用 Python 3 - :: - - python3 -c "import platform;print(platform.architecture()[0]);print(platform.machine())" - -9. 如果您希望使用 conda 进行安装PaddlePaddle可以直接使用以下命令: - - (1). **CPU版本** :如果您只是想安装CPU版本请参考如下命令安装 - - :: - - conda install paddlepaddle - - - (2). **GPU版本** :如果您想使用GPU版本请参考如下命令安装 - - 注意: - - * 需要您确认您的 GPU 满足上方列出的要求 - - 如果您是使用 CUDA 8,cuDNN 7.1+,安装GPU版本的命令为: - :: - - conda install paddlepaddle-gpu cudatoolkit=8.0 - - 如果您是使用 CUDA 9,cuDNN 7.3+,安装GPU版本的命令为: - :: - - conda install paddlepaddle-gpu cudatoolkit=9.0 - - - 如果您是使用 CUDA 10.0,cuDNN 7.3+,安装GPU版本的命令为: - :: - - conda install paddlepaddle-gpu cudatoolkit=10.0 - -10. 验证安装 - - 使用 python 或 python3 进入python解释器,输入import paddle.fluid ,再输入 paddle.fluid.install_check.run_check()。 - - 如果出现 Your Paddle Fluid is installed succesfully!,说明您已成功安装。 - -11. 更多帮助信息请参考: - - `conda下安装 `_ - - -第三种安装方式:使用 docker 安装 -================================ - -您可以选择“使用pip安装”、“使用conda安装”、“使用docker安装”、“从源码编译安装” 四种方式中的任意一种方式进行安装。 - -本节将介绍使用 docker 的安装方式。 - -如果您希望使用 `docker `_ 安装PaddlePaddle,可以使用以下命令: - -1. **CPU 版本** - - (1). 首先需要安装 `docker `_ - - 注意: - - * CentOS 6 不支持 docker 方式安装 - - * 处理器需要支持 MKL - - (2). 拉取预安装 PaddlePaddle 的镜像: - :: - - docker pull hub.baidubce.com/paddlepaddle/paddle:1.7.0 - - (3). 用镜像构建并进入Docker容器: - :: - - docker run --name paddle -it -v dir1:dir2 hub.baidubce.com/paddlepaddle/paddle:1.7.0 /bin/bash - - > --name [Name of container] 设定Docker的名称; - - > -it 参数说明容器已和本机交互式运行; - - > -v 参数用于宿主机与容器里文件共享;其中dir1为宿主机目录,dir2为挂载到容器内部的目录,用户可以通过设定dir1和dir2自定义自己的挂载目录;例如:$PWD:/paddle 指定将宿主机的当前路径(Linux中PWD变量会展开为当前路径的绝对路径)挂载到容器内部的 /paddle 目录; - - > hub.baidubce.com/paddlepaddle/paddle:1.7.0 是需要使用的image名称;/bin/bash是在Docker中要执行的命令 - -2. **GPU 版本** - - (1). 首先需要安装 `nvidia-docker `_ - - 注意: - - * 处理器需要支持 MKL - - * 您的计算机需要具有支持 CUDA 驱动的 NVIDIA 显卡 - - * 需要安装 `cuDNN `_ ,版本要求 7.3+(For CUDA9/10), 7.1+(For CUDA 8) - - * 如果您需要 GPU 多卡模式,需要安装 `NCCL 2 `_ - - * 仅 Ubuntu/CentOS 支持 NCCL 2 技术 - - * 需要安装 `CUDA `_ ,根据您系统不同,对 CUDA 版本要求不同: - - * Ubuntu/CentOS 7 ,如果您是使用 nvidia-docker 安装,支持 CUDA 8.0/9.0/9.1/9.2/10.0 - - * Windows/MacOS/CentOS 6 不支持 nvidia-docker 方式安装 - - - (2). 拉取支持 CUDA 10.0 , cuDNN 7.3+ 预安装 PaddlePaddle 的镜像: - :: - - nvidia-docker pull hub.baidubce.com/paddlepaddle/paddle:1.7.0-gpu-cuda10.0-cudnn7 - - (3). 用镜像构建并进入Docker容器: - :: - - nvidia-docker run --name paddle -it -v dir1:dir2 hub.baidubce.com/paddlepaddle/paddle:1.7.0-gpu-cuda10.0-cudnn7 /bin/bash - - > --name [Name of container] 设定Docker的名称; - - > -it 参数说明容器已和本机交互式运行; - - > -v 参数用于宿主机与容器里文件共享;其中dir1为宿主机目录,dir2为挂载到容器内部的目录,用户可以通过设定dir1和dir2自定义自己的挂载目录;例如:$PWD:/paddle 指定将宿主机的当前路径(Linux中PWD变量会展开为当前路径的绝对路径)挂载到容器内部的 /paddle 目录; - - > hub.baidubce.com/paddlepaddle/paddle:1.7.0-gpu-cuda10.0-cudnn7 是需要使用的image名称;/bin/bash是在Docker中要执行的命令 - - 或如果您需要支持 **CUDA 9** 的版本,将上述命令的 **cuda10.0** 替换成 **cuda9.0** 即可 - -3. 如果您的机器不在中国大陆地区,可以直接从DockerHub拉取镜像: - - :: - - docker run --name paddle -it -v dir1:dir2 paddlepaddle/paddle:1.7.0 /bin/bash - - > --name [Name of container] 设定Docker的名称; - - > -it 参数说明容器已和本机交互式运行; - - > -v 参数用于宿主机与容器里文件共享;其中dir1为宿主机目录,dir2为挂载到容器内部的目录,用户可以通过设定dir1和dir2自定义自己的挂载目录;例如:$PWD:/paddle 指定将宿主机的当前路径(Linux中PWD变量会展开为当前路径的绝对路径)挂载到容器内部的 /paddle 目录; - - > paddlepaddle/paddle:1.7.0 是需要使用的image名称;/bin/bash是在Docker中要执行的命令 - -4. 验证安装 - - 使用 python 或 python3 进入python解释器,输入import paddle.fluid ,再输入 paddle.fluid.install_check.run_check()。 - - 如果出现 Your Paddle Fluid is installed succesfully!,说明您已成功安装。 - -5. 更多帮助信息请参考: - - `使用Docker安装 `_ - - -第四种安装方式:使用源代码编译安装 +第二种安装方式:使用源代码编译安装 ==================================== -- 如果您只是使用 PaddlePaddle ,建议从 **pip** 和 **conda** 、 **docker** 三种安装方式中选取一种进行安装即可。 +- 如果您只是使用 PaddlePaddle ,建议使用 **pip** 安装即可。 - 如果您有开发PaddlePaddle的需求,请参考:`从源码编译 `_ .. toctree:: @@ -526,8 +236,6 @@ install_CentOS.md install_MacOS.md install_Windows.md - install_Conda.md - install_Docker.md compile/fromsource.rst Tables.md diff --git a/doc/fluid/install/index_en.rst b/doc/fluid/install/index_en.rst index d5eeed5197ff76b414182a7b3e367d751f1ef350..2c08e52ceacdabbdde459d6d4a020f121fff8dcf 100644 --- a/doc/fluid/install/index_en.rst +++ b/doc/fluid/install/index_en.rst @@ -3,37 +3,235 @@ Installation Manuals ====================== -The manuals will guide you to install and build PaddlePaddle on your 64-bit desktop or laptop. +The manuals will guide you to build and install PaddlePaddle on your 64-bit desktop or laptop. -The versions of Python currently supported: Python 2.7-3.7 +1. Operating system requirements: +============================ -PaddlePaddle currently supports the following environments: +* Windows 7 / 8 / 10, Pro/Enterprise +* Ubuntu 14.04 / 16.04 / 18.04 +* CentOS 6 / 7 +* MacOS 10.11 / 10.12 / 10.13 / 10.14 +* 64-bit operating system is required -* *Ubuntu 14.04 /16.04 /18.04* -* *CentOS 7 / 6* -* *MacOS 10.11 / 10.12 / 10.13 / 10.14* -* *Windows7 / 8/ 10(Pro/Enterprise)* +2. Processor requirements: +============================ +* Processor supports MKL +* The processor architecture is x86_64(or called x64, Intel 64, AMD64). Currently, PaddlePaddle does not support arm64. -Please make sure your environment meets the conditions above. -And the installation assumes your computer possesses 64-bit operating system, and AVX instruction set is supported by the processor, otherwise you should use the version of :code:`no_avx` in `whl package list - Dev `_ . +3. Version requirements of python and pip: +============================ +* Python 2 requires version 2.7.15+ +* Python 3 requires version 3.5.1+/3.6/3.7 +* Python needs pip, and pip requires version 9.0.1+ +* Python and pip requires 64-bit -- If you are planning to use `pip `_ to install PaddlePaddle, please type the following commands directly: +4. PaddlePaddle's support for GPU: +================================= -:code:`pip install paddlepaddle` (latest CPU version of PaddlePaddle) +* Currently, **PaddlePaddle** only supports **CUDA** driver of **NVIDIA** graphics card. +* You need to install `cuDNN `_ , and version 7.6+ is required(For CUDA9/10) +* If you need GPU multi-card mode, you need to install `NCCL 2 `_ -:code:`pip install paddlepaddle-gpu` (latest GPU version of PaddlePaddle) + * Only Ubuntu/CentOS support NCCL 2 +* You need to install `CUDA `_ , depending on your system, there are different requirements for CUDA version: -:code:`pip install paddlepaddle==[pip version]` + * Windows install GPU version - where [pip version] can be looked up in `PyPi.org `_ + * Windows 7 / 8 / 10 support CUDA 9.0 / 10.0 single-card mode, but don't support CUDA 9.1/9.2/10.1 + * don't support install using **nvidia-docker** + * Ubuntu install GPU version -- If you are planning to use `docker `_ to install PaddlePaddle, please type the following commands directly: + * Ubuntu 14.04 supports CUDA 10.0/10.1, but doesn't support CUDA 9.0/9.1/9.2 + * Ubuntu 16.04 supports CUDA 9.0/9.1/9.2/10.0/10.1 + * Ubuntu 18.04 supports CUDA 10.0/10.1, but doesn't support CUDA 9.0/9.1/9.2 + * If you install using **nvidia-docker** , it supports CUDA 9.0/9.1/9.2/10.0/10.1 + * CentOS install GPU version -:code:`docker run --name [Name of container] -it -v $PWD:/paddle paddlepaddle/paddle:[docker version] /bin/bash` + * If you install using native **pip** : - where [docker version] can be looked up in `DockerHub `_ + * CentOS 7 supports CUDA 9.0/9.1/9.2/10.0/10.1, CUDA 9.1 supports single-card mode only + * CentOS 6 supports CUDA 9.0/9.1/9.2/10.0/10.1 single-card mode + * If you compile and install using native source code: + + * CentOS 7 supports CUDA 9.0/9.1/9.2/10.0/10.1, CUDA 9.1 supports single-card mode only + * CentOS 6 is not recommended, we don't provide official support in case of compilation problems + * If you install using **nvidia-docker** , CentOS 7 supports CUDA 9.0/9.1/9.2/10.0/10.1 + * MacOS isn't supported: PaddlePaddle has no GPU support in Mac OS platform + +Please make sure your environment meets the above conditions. If you have other requirements, please refer to `Appendix `_ . + +5. PaddlePaddle's support for NCCL: +================================= + +* Support for Windows + + * not support NCCL +* Support for Ubuntu + + * Ubuntu 14.04: + + * support NCCL v2.4.2-v2.4.8 under CUDA10.1 + * support NCCL v2.3.7-v2.4.8 under CUDA10.0 + * Ubuntu 16.04: + + * support NCCL v2.4.2-v2.4.8 under CUDA10.1 + * support NCCL v2.3.7-v2.4.8 under CUDA10.0/9.2/9.0 + * support NCCL v2.1.15 under CUDA9.1 + * Ubuntu 18.04: + + * support v2.4.2-v2.4.8 under CUDA10.1 + * support NCCL v2.3.7-v2.4.8 under CUDA10.0 +* Support for CentOS + + * CentOS 6: not support NCCL + * CentOS 7: + + * support NCCL v2.4.2-v2.4.8 under CUDA10.1 + * support NCCL v2.3.7-v2.4.8 under CUDA10.0/9.2/9.0 +* Support for MacOS + + * not support NCCL + + +The first way to install: use pip to install +================================ + +You can choose any of the four ways to install: "use pip to install", "use Conda to install", "use Docker to install", "compiling from the source code" + +This section describes how to use pip to install. + +1. You need to confirm that your operating system meets the requirements listed above + +2. You need to confirm that your processor meets the requirements listed above + +3. Confirm that the Python where you need to install PaddlePaddle is your expected location, because your computer may have multiple Python + + If you are using Python 2, use the following command to output Python path. Depending on your environment, you may need to replace Python in all command lines in the description with specific Python path + + In the Windows environment, the command to output Python path is: + + :: + + where python + + In the MacOS/Linux environment, the command to output Python path is: + + :: + + which python + + If you are using Python 3, use the following command to output Python path. Depending on your environment, you may need to replace Python in all command lines in the description with specific Python path + + In the Windows environment, the command to output Python path is: + + :: + + where python3 + + In the MacOS/Linux environment, the command to output Python path is: + + :: + + which python3 + +4. Check the version of Python + + If you are using Python 2,confirm it is 2.7.15+ using command + :: + + python --version + + If you are using Python 3,confirm it is 3.5.1+/3.6/3.7 using command + :: + + python3 --version + +5. Check the version of pip and confirm it is 9.0.1+ + + If you are using Python 2 + :: + + python -m ensurepip + python -m pip --version + + If you are using Python 3 + :: + + python3 -m ensurepip + python3 -m pip --version + +6. Confirm that Python and pip is 64 bit,and the processor architecture is x86_64(or called x64、Intel 64、AMD64)architecture. Currently, PaddlePaddle doesn't support arm64 architecture. The first line below outputs "64bit", and the second line outputs "x86_64", "x64" or "AMD64" : + + If you use Python 2 + :: + + python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" + + If you use Python 3 + :: + + python3 -c "import platform;print(platform.architecture()[0]);print(platform.machine())" + +7. If you want to use `pip `_ to install PaddlePaddle, you can use the command below directly: + + (1). **CPU version** : If you only want to install CPU version, please refer to command below + + If you are using Python 2, command to install CPU version is: + :: + + python -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple + + or + + python -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple + + If you are using Python 3, please change **python** in the above command to **python3** and install. + + + (2). **GPU version** : If you only want to install GPU version, please refer to command below + + + Note: + + * You need to confirm that your GPU meets the requirements listed above + + If you are using Python2, please attention that PaddlePaddle installed through command below only supports CUDA10.0 under Windows、Ubuntu、CentOS: + :: + + python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://mirror.baidu.com/pypi/simple + + or + + python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple + + If you are using Python 3, please change **python** in the above command to **python3** and install. + +8. Verify installation + + After the installation is complete, you can use `python` or `python3` to enter the Python interpreter and then use `import paddle.fluid as fluid` and then `fluid.install_check.run_check()` to verify that the installation was successful. + + If `Your Paddle Fluid is installed succesfully!` appears, it means the installation was successful. + + +9. For more information to help, please refer to: + + `install under Ubuntu `_ + + `install under CentOS `_ + + `install under MacOS `_ + + `install under Windows `_ + + +The second way to install: compile and install with source code +==================================== + +- If you use PaddlePaddle only, we suggest you installation methods **pip** to install. +- If you need to develop PaddlePaddle, please refer to `compile from source code `_ .. toctree:: :hidden: diff --git a/doc/fluid/install/install_CentOS.md b/doc/fluid/install/install_CentOS.md index 82afa784da00f4c433ab661358aafdadb55a19df..b10573a1774c2624252e4ebeb69501678b964744 100644 --- a/doc/fluid/install/install_CentOS.md +++ b/doc/fluid/install/install_CentOS.md @@ -56,7 +56,7 @@ python3 -c "import platform;print(platform.architecture()[0]);print(platform.machine())" * 默认提供的安装包需要计算机支持MKL -* 如果您对机器环境不了解,请下载使用[快速安装脚本](https://fast-install.bj.bcebos.com/fast_install.sh),配套说明请参考[这里](https://github.com/PaddlePaddle/FluidDoc/tree/develop/doc/fluid/beginners_guide/install/install_script.md)。 +* 如果您对机器环境不了解,请下载使用[快速安装脚本](https://fast-install.bj.bcebos.com/fast_install.sh),配套说明请参考[这里](https://github.com/PaddlePaddle/FluidDoc/tree/develop/doc/fluid/install/install_script.md)。 ## 选择CPU/GPU @@ -64,8 +64,8 @@ * 如果您的计算机有NVIDIA® GPU,请确保满足以下条件并且安装GPU版PaddlePaddle - * **CUDA 工具包10.0配合cuDNN v7.3+(如需多卡支持,需配合NCCL2.3.7及更高)** - * **CUDA 工具包9.0配合cuDNN v7.3+(如需多卡支持,需配合NCCL2.3.7及更高)** + * **CUDA 工具包10.0配合cuDNN v7.6+(如需多卡支持,需配合NCCL2.3.7及更高)** + * **CUDA 工具包9.0配合cuDNN v7.6+(如需多卡支持,需配合NCCL2.3.7及更高)** * **GPU运算能力超过1.0的硬件设备** 您可参考NVIDIA官方文档了解CUDA和CUDNN的安装流程和配置方法,请见[CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/),[cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/) @@ -81,11 +81,9 @@ ## 安装方式 -CentOS系统下有5种安装方式: +CentOS系统下有3种安装方式: * pip安装(推荐) -* [conda安装](./install_Conda.html) -* [Docker安装](./install_Docker.html) * [源码编译安装](./compile/compile_CentOS.html#ct_source) * [Docker源码编译安装](./compile/compile_CentOS.html#ct_docker) @@ -94,11 +92,11 @@ CentOS系统下有5种安装方式: ## 安装步骤 * CPU版PaddlePaddle: - * 对于Python 2: `python -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple` 或 `python -m pip install paddlepaddle -i https://pypi.tuna.tsinghua.edu.cn/simple` - * 对于Python 3: `python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple` 或 `python3 -m pip install paddlepaddle -i https://pypi.tuna.tsinghua.edu.cn/simple` + * 对于Python 2: `python -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + * 对于Python 3: `python3 -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python3 -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` * GPU版PaddlePaddle: - * 对于Python 2: `python -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple` 或 `python -m pip install paddlepaddle-gpu -i https://pypi.tuna.tsinghua.edu.cn/simple` - * 对于Python 3: `python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple` 或 `python3 -m pip install paddlepaddle-gpu -i https://pypi.tuna.tsinghua.edu.cn/simple` + * 对于Python 2: `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + * 对于Python 3: `python3 -m pip install paddlepaddle-gpu==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python3 -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` 您可[验证是否安装成功](#check),如有问题请查看[FAQ](./FAQ.html) @@ -107,7 +105,7 @@ CentOS系统下有5种安装方式: * 如果是python2.7, 建议使用`python`命令; 如果是python3.x, 则建议使用`python3`命令 -* `python -m pip install paddlepaddle-gpu -i https://pypi.tuna.tsinghua.edu.cn/simple` 此命令将安装支持CUDA 10.0 cuDNN v7的PaddlePaddle,如您对CUDA或cuDNN版本有不同要求,可用`python -m pip install paddlepaddle-gpu==[版本号] -i https://pypi.tuna.tsinghua.edu.cn/simple`或 `python3 -m pip install paddlepaddle-gpu==[版本号] -i https://pypi.tuna.tsinghua.edu.cn/simple`命令来安装,版本号请见[这里](https://pypi.org/project/paddlepaddle-gpu#history), 关于paddlepaddle与CUDA, cuDNN版本的对应关系请见[安装包列表](./Tables.html#whls) +* `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` 此命令将安装支持CUDA 10.0 cuDNN v7的PaddlePaddle。 * 默认下载最新稳定版的安装包,如需获取开发版安装包,请参考[这里](./Tables.html#ciwhls) diff --git a/doc/fluid/install/install_CentOS_en.md b/doc/fluid/install/install_CentOS_en.md index d0105c04793bcd61e8bcce43841105702c74bea6..b21c1bb23dfea2ede0c2e06f9c83ec3fe91a8fe8 100644 --- a/doc/fluid/install/install_CentOS_en.md +++ b/doc/fluid/install/install_CentOS_en.md @@ -1,148 +1,128 @@ -*** - # **Install on CentOS** -This note will show you how to install PaddlePaddle on a *64-bit desktop or laptop* and CentOS. The CentOS system we support needs to meet the following requirements: - -Please note: Attempts on other systems may cause the installation to fail. Please ensure that your environment meets the conditions. The installation we provide by default requires your computer processor to support the AVX instruction set. Otherwise, please select the version of `no_avx` in [the latest Release installation package list](./Tables.html/#ciwhls-release). - -Under CentOS you can use `cat /proc/cpuinfo | grep avx` to check if your processor supports the AVX instruction set. - -* CentOS 6 / 7 - -## Determine which version to install - -* Only PaddlePaddle for CPU is supported. If your computer does not have an NVIDIA® GPU, you can only install this version. If your computer has a GPU, it is recommended that you install the CPU version of PaddlePaddle first to check if your local environment is suitable. - -* PaddlePaddle with GPU support, in order to make the PaddlePaddle program run more quickly, we accelerate the PaddlePaddle program through the GPU, but the GPU version of PaddlePaddle needs to have the NVIDIA® GPU that meets the following conditions (see the NVIDIA official for the specific installation process and configuration). Documentation: [For CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/), [For cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/)) - - * *CUDA Toolkit 9.0 with cuDNN v7* - * *CUDA Toolkit 8.0 with cuDNN v7* - * *Hardware devices with GPU compute capability exceeding 1.0* - -## Choose an installation method +## Environmental preparation -We offer 4 installation methods under the CentOS system: +* **CentOS Version(64 bit)** + * **CentOS 6 (GPU version supports CUDA 9.0/9.1/9.2/10.0/10.1, only supports single card)** + * **CentOS 7 (GPU version supports CUDA 9.0/9.1/9.2/10.0/10.1, CUDA 9.1 only supports single card)** +* **Python version 2.7.15+/3.5.1+/3.6/3.7 (64 bit)** +* **pip or pip3 version 9.0.1+ (64 bit)** -* Pip installation -* Docker installation (the GPU version is not supported) (the version of python in the image is 2.7) -* Source code compilation and installation (all versions of CentOS 6 and GPU version of CentOS 7 are not supported) -* Docker source compilation and installation (not supported for GPU version) (Python version 2.7, 3.5, 3.6, 3.7 in image) +### Note -**With pip installation** (the easiest way to install), we offer you a pip installation method, but it depends more on your native environment and may have some issues related to your local environment. +* You can use`uname -m && cat /etc/*release` to view the local operating system and bit information +* Confirm that the Python where you need to install PaddlePaddle is your expected location, because your computer may have multiple Python -**Use Docker for installation** (the safest way to install), because we are installing the tools and configuration in a Docker image so that if something goes wrong, others can reproduce the problem for help. In addition, for developers accustomed to using Windows and MacOS, there is no need to configure a cross-compilation environment using Docker. It should be emphasized that Docker does not virtualize any hardware. The compiler tools running in the Docker container are actually run directly on the native CPU and operating system. The performance is the same as installing the compiler on the machine. + * If you are using Python 2, use the following command to output Python path. Depending on the environment, you may need to replace Python in all command lines in the description with specific Python path -Compile and install from [**source**](#ct_source) and [**use Docker**](#ct_docker). This is a process of compiling the PaddlePaddle source code into a binary file and then installing the binary file. Compared with the binary form of PaddlePaddle that has been successfully tested and compiled for you, this manual compilation is more complicated, and we will answer you in detail at the end of this tutorial. + which python + * If you are using Python 3, use the following command to output Python path. Depending on your environment, you may need to replace Python 3 in all command lines in the instructions with Python or specific Python path -

-## ***Install PaddlePaddle using pip*** + which python3 -First, we use the following commands to check if **the environment of this machine** is suitable for installing PaddlePaddle: +* You need to confirm whether the version of Python meets the requirements -`Uname -m && cat /etc/*release` + * If you are using Python 2, use the following command to confirm that it is 2.7.15+ -> The above command will display the operating system and processing bits of the machine. Please make sure your computer is consistent with the requirements of this tutorial. + python --version -Second, your computer needs to meet the following requirements: + * If you are using Python 3, use the following command to confirm that it is 3.5.1+/3.6/3.7 -* Python2.7.x (devel), Pip >= 9.0.1 + python3 --version - > CentOS6 needs to compile Python 2.7 into a [shared library](./FAQ.html/#FAQ). +* It is required to confirm whether the version of pip meets the requirements. The version of pip is required to be 9.0.1+ -* Python3.5+.x (devel), Pip3 >= 9.0.1 + * If you are using Python 2 - > You may have installed pip on your CentOS. Please use pip -V to confirm that we recommend using pip 9.0.1 or higher to install. + python -m ensurepip - Update the source of yum: `yum update` and install the extension source to install pip: `yum install -y epel-release` + python -m pip --version - Use the following command to install or upgrade Python and pip to the required version: + * If you are using Python 3 + python3 -m ensurepip - - For Python2: `sudo yum install python-devel python-pip` - - For Python3: (Please refer to the official Python installation, and pay attention to whether the python3 version is consistent with the python version corresponding to the pip3 command. If there are multiple python3 versions, please specify the pip version such as pip3.7, or add soft link from pip3 to the python version you use. ) + python3 -m pip --version +* You need to confirm that Python and pip are 64bit, and the processor architecture is x86_64(or called x64、Intel 64、AMD64). Currently, paddlepaddle does not support arm64 architecture. The first line below outputs "64bit", and the second line outputs "x86_64", "x64" or "AMD64": + * If you are using Python 2 + python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" - > Even if you already have `Python` in your environment, you need to install the `python develop` package. + * If you are using Python 2 -Here's how to install PaddlePaddle: + python3 -c "import platform;print(platform.architecture()[0]);print(platform.machine())" -1. Use pip install to install PaddlePaddle: +* The installation package provided by default requires computer support for MKL +* If you do not know the machine environment, please download and use[Quick install script](https://fast-install.bj.bcebos.com/fast_install.sh), for instructions please refer to[here](https://github.com/PaddlePaddle/FluidDoc/tree/develop/doc/fluid/install/install_script.md)。 - * For users who need **the CPU version PaddlePaddle**: `pip install paddlepaddle` or `pip3 install paddlepaddle` +## Choose CPU/GPU - * For users who need **the GPU version PaddlePaddle**: `pip install paddlepaddle-gpu` or `pip3 install paddlepaddle-gpu` +* If your computer doesn't have NVIDIA® GPU, please install the CPU version of PaddlePaddle - > 1 . In order to prevent problem "nccl.h cannot be found", please first install nccl2 according to the instructions of [NVIDIA official website](https://developer.nvidia.com/nccl/nccl-download). +* If your computer has NVIDIA® GPU, please make sure that the following conditions are met and install the GPU version of PaddlePaddle - > 2 . If you do not specify the pypi package version number, we will by default provide you with a version of PaddlePaddle that supports Cuda 9/cuDNN v7. + * **CUDA toolkit 10.0 with cuDNN v7.6+(for multi card support, NCCL2.3.7 or higher)** + * **CUDA toolkit 9.0 with cuDNN v7.6+(for multi card support, NCCL2.3.7 or higher)** + * **Hardware devices with GPU computing power over 1.0** - * For users with `Cannot uninstall 'six'.` problems, the probable reason is the existing Python installation issues in your system. In this case, use `pip install paddlepaddle --ignore-installed six`(CPU) or `pip install paddlepaddle-gpu -- Ignore-installed six` (GPU) to resolve. + You can refer to NVIDIA official documents for installation process and configuration method of CUDA and cudnn. Please refer to [CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/),[cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/) - * For users with **other requirements**: `pip install paddlepaddle==[version number]` or `pip3 install paddlepaddle==[version number]` +* 如果您需要使用多卡环境请确保您已经正确安装nccl2,或者按照以下指令安装nccl2(这里提供的是CentOS 7,CUDA9,cuDNN7下nccl2的安装指令),更多版本的安装信息请参考NVIDIA[官方网站](https://developer.nvidia.com/nccl): - > For `the version number`, please refer to [the latest Release installation package list](./Tables.html/#whls). If you need to obtain and install **the latest PaddlePaddle development branch**, you can download and install the latest whl installation package and c-api development package from [the latest dev installation package list](./Tables.html/#ciwhls) or our [CI system](https://paddleci.ngrok.io/project.html?projectId=Manylinux1&tab=projectOverview). To log in, click on "Log in as guest". -Now you have completed the process of installing PaddlePaddle via `pip install`. + wget http://developer.download.nvidia.com/compute/machine-learning/repos/rhel7/x86_64/nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm + rpm -i nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm + yum update -y + yum install -y libnccl-2.3.7-2+cuda9.0 libnccl-devel-2.3.7-2+cuda9.0 libnccl-static-2.3.7-2+cuda9.0 +## Installation method -

-## *Install using Docker* +There are three installation methods under CentOS system: -In order to better use Docker and avoid problems, we recommend using **the highest version of Docker**. For details on installing and using Docker, please refer to [the official Docker documentation](https://docs.docker.com/install/). +* pip installation(recommend) +* [Compile From Source Code](./compile/compile_CentOS_en.html#ct_source) +* [Compile From Docker Source Code](./compile/compile_CentOS_en.html#ct_docker) -> Please note that to install and use the PaddlePaddle version that supports GPU, you must first install [nvidia-docker](https://github.com/NVIDIA/nvidia-docker). +Here is pip installation -Once you have **properly installed Docker**, you can start **installing PaddlePaddle with Docker**. +## Installation steps -1. Use the following command to pull the image we pre-installed for PaddlePaddle: +* CPU version of PaddlePaddle: + * For Python 2: `python -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` or `python -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + * For Python 3: `python3 -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` or `python3 -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` +* GPU version of PaddlePaddle: + * For Python 2: `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + * For Python 3: `python3 -m pip install paddlepaddle-gpu==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python3 -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` - * For users who need a **CPU version of PaddlePaddle**, use the following command to pull the image we pre-installed for your *PaddlePaddle For CPU*: +You can[Verify installation succeeded or not](#check),if you have any questions, you can refer to [FAQ](./FAQ.html) - `Docker pull hub.baidubce.com/paddlepaddle/paddle: 1.2` - * You can also pull any of our Docker images by following the instructions below: +Note: - `Docker pull hub.baidubce.com/paddlepaddle/paddle:[tag]` +* If it is python2.7, it is recommended to use the `python` command; if it is python3.x, it is recommended to use the 'python3' command - > (Please replace [tag] with the contents of [the mirror table](./Tables.html/#dockers)) +* `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` This command will install the PaddlePaddle that supports CUDA 10.0 cuDNN v7. -2. Use the following command to build from the already pulled image and enter the Docker container: - `Docker run --name [Name of container] -it -v $PWD:/paddle /bin/bash` +* Download the latest stable installation package by default. For development installation package, please refer to [here](./Tables.html#ciwhls) - > In the above command, --name [Name of container] sets the name of the Docker; the -it parameter indicates that the container is running interactively with the host machine; -v $PWD:/paddle specifies the current path (the PWD variable in Linux will expand to [The absolute path](https://baike.baidu.com/item/%E7%BB%9D%E5%AF%B9%E8%B7%AF%E5%BE%84/481185) of the current path ) which is mounted to the /paddle directory inside the container; `` specifies the name of the image to use, if you need to use our image please use `hub.baidubce.com/paddlepaddle/paddle:[tag]`. Note: The meaning of the tag is the same as the second step. /bin/bash is the command to be executed in Docker. - -3. (Optional: When you need to enter the Docker container a second time) re-enter the PaddlePaddle container with the following command: - - `Docker start [Name of container]` - - > start the container created previously - - `Docker attach [Name of container]` - - > Enter the started container in the last step. - -Now that you have successfully installed PaddlePaddle using Docker, you only need to run PaddlePaddle after entering the Docker container. For more Docker usage, please refer to [the official Docker documentation](https://docs.docker.com/). - -> Note: In order to reduce the size, `vim` is not installed in PaddlePaddle Docker image by default. You can edit the code in the container after executing `apt-get install -y vim` in the container. + +## ***Verify installation*** +After the installation is complete, you can use `python` or `python3` to enter the Python interpreter and then use `import paddle.fluid` and `fluid.install_check.run_check()` -

-## ***Verify installation*** +If `Your Paddle Fluid is installed succesfully!` appears, to verify that the installation was successful. -After the installation is complete, you can use `python` or `python3` to enter the Python interpreter and then use `import paddle.fluid` to verify that the installation was successful. -

## ***How to uninstall*** -Please use the following command to uninstall PaddlePaddle (users who use Docker to install PaddlePaddle should use the following command in the container containing PaddlePaddle. Please use the corresponding version of pip): +Please use the following command to uninstall PaddlePaddle: -* ***CPU version of PaddlePaddle***: `pip uninstall paddlepaddle` or `pip3 uninstall paddlepaddle` +* ***CPU version of PaddlePaddle***: `python -m pip uninstall paddlepaddle` or `python3 -m pip uninstall paddlepaddle` -* ***GPU version of PaddlePaddle***: `pip uninstall paddlepaddle-gpu` or `pip3 uninstall paddlepaddle-gpu` +* ***GPU version of PaddlePaddle***: `python -m pip uninstall paddlepaddle-gpu` or `python3 -m pip uninstall paddlepaddle-gpu` diff --git a/doc/fluid/install/install_Conda.md b/doc/fluid/install/install_Conda.md deleted file mode 100644 index 2d4157f44e91055c583e3348f3b1bbfd38fe7472..0000000000000000000000000000000000000000 --- a/doc/fluid/install/install_Conda.md +++ /dev/null @@ -1,102 +0,0 @@ -# **使用Conda安装** - -[Anaconda](https://www.anaconda.com/)是一个免费开源的Python和R语言的发行版本,用于计算科学,Anaconda致力于简化包管理和部署。Anaconda的包使用软件包管理系统Conda进行管理。Conda是一个开源包管理系统和环境管理系统,可在Windows、macOS和Linux上运行。 - -## 环境准备 - -在进行PaddlePaddle安装之前请确保您的Anaconda软件环境已经正确安装。软件下载和安装参见Anaconda官网(https://www.anaconda.com/)。在您已经正确安装Anaconda的情况下请按照下列步骤安装PaddlePaddle。 - -## 安装步骤 - -1. 创建虚拟环境 - - 首先根据具体的Python版本创建Anaconda虚拟环境,前PaddlePaddle的Anaconda安装支持以下四种Python安装环境。 - - 如果您想使用的python版本为2.7: - - conda create -n paddle_env python=2.7 - - 如果您想使用的python版本为3.5: - - conda create -n paddle_env python=3.5 - - 如果您想使用的python版本为3.6: - - conda create -n paddle_env python=3.6 - - 如果您想使用的python版本为3.7: - - conda create -n paddle_env python=3.7 - - activate paddle_env (for Windows) 或 conda activate paddle_env (for MacOS/Linux) 命令进入Anaconda虚拟环境。 - -2. 确认您的conda虚拟环境和需要安装PaddlePaddle的Python是您预期的位置,因为您计算机可能有多个Python。进入Anaconda的命令行终端,输入以下指令确认Python位置。 - - 如果您是使用 Python 2,使用以下命令输出 Python 路径,根据您的环境您可能需要将说明中所有命令行中的 python 替换为具体的 Python 路径 - - 在 Windows 环境下,输出 Python 路径的命令为: - - where python - - 在 MacOS/Linux 环境下,输出 Python 路径的命令为: - - which python - - 如果您是使用 Python 3,使用以下命令输出 Python 路径,根据您的环境您可能需要将说明中所有命令行中的 python3 替换为 python 或者替换为具体的 Python 路径 - - 在 Windows 环境下,输出 Python 路径的命令为: - - where python3 - - 在 MacOS/Linux 环境下,输出 Python 路径的命令为: - - which python3 - -3. 检查Python的版本 - - 如果您是使用 Python 2,使用以下命令确认是 2.7.15+ - - python --version - - 如果您是使用 Python 3,使用以下命令确认是 3.5.1+/3.6/3.7 - - python3 --version - -4. 确认Python和pip是64bit,并且处理器架构是x86_64(或称作x64、Intel 64、AMD64)架构,目前PaddlePaddle不支持arm64架构。下面的第一行输出的是"64bit",第二行输出的是"x86_64(或x64、AMD64)"即可: - - 如果您是使用 Python 2 - - python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" - - 如果您是使用 Python 3 - - python3 -c "import platform;print(platform.architecture()[0]);print(platform.machine())" - -5. 安装PaddlePaddle - - (1). **CPU版本**:如果您只是想安装CPU版本请参考如下命令安装 - - conda install paddlepaddle - - (2). **GPU版本**:如果您想使用GPU版本请参考如下命令安装 - - 如果您是使用 CUDA 9,cuDNN 7.3+,安装GPU版本的命令为: - - conda install paddlepaddle-gpu cudatoolkit=9.0 - - 如果您是使用 CUDA 10.0,cuDNN 7.3+,安装GPU版本的命令为: - - conda install paddlepaddle-gpu cudatoolkit=10.0 - -6. 安装环境验证 - - 使用python进入python解释器,输入import paddle.fluid,再输入 paddle.fluid.install_check.run_check()。如果出现“Your Paddle Fluid is installed succesfully!”,说明您已成功安装。 - -## 注意 - -对于国内用户无法连接到Anaconda官方源的可以按照以下命令添加清华源进行安装。 - - conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/ - conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main/ - conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/Paddle/ - conda config --set show_channel_urls yes diff --git a/doc/fluid/install/install_Docker.md b/doc/fluid/install/install_Docker.md deleted file mode 100644 index 8a9963e2d70acbc9efad8b29be0ceaee2bf4d240..0000000000000000000000000000000000000000 --- a/doc/fluid/install/install_Docker.md +++ /dev/null @@ -1,128 +0,0 @@ -# **使用Docker安装** - -[Docker](https://docs.docker.com/install/)是一个开源的应用容器引擎。使用Docker,既可以将PaddlePaddle的安装&使用与系统环境隔离,也可以与主机共享GPU、网络等资源 - -## 环境准备 - -- 目前支持的系统类型,请见[安装说明](./index_cn.html),请注意目前暂不支持在CentOS 6使用Docker - -- 在本地主机上[安装Docker](https://hub.docker.com/search/?type=edition&offering=community) - -- 如需在Linux开启GPU支持,请[安装nvidia-docker](https://github.com/NVIDIA/nvidia-docker) - -## 安装步骤 - -1. 拉取PaddlePaddle镜像 - - * CPU版的PaddlePaddle: `docker pull hub.baidubce.com/paddlepaddle/paddle:[版本号]` - - * GPU版的PaddlePaddle: `docker pull hub.baidubce.com/paddlepaddle/paddle:[版本号]-gpu-cuda9.0-cudnn7` - - 如果您的机器不在中国大陆地区,可以直接从DockerHub拉取镜像: - - * CPU版的PaddlePaddle: `docker pull paddlepaddle/paddle:[版本号]` - - * GPU版的PaddlePaddle: `docker pull paddlepaddle/paddle:[版本号]-gpu-cuda9.0-cudnn7` - - 在`:`后请您填写PaddlePaddle版本号,例如当前版本,更多请见[镜像简介](#dockers),上例中,`cuda9.0-cudnn7` 也仅作示意用,您可以访问[DockerHub](https://hub.docker.com/r/paddlepaddle/paddle/tags/)获取与您机器适配的镜像。 - -2. 构建、进入Docker容器 - - * 使用CPU版本的PaddlePaddle: - - - - `docker run --name [Name of container] -it -v $PWD:/paddle /bin/bash` - - > --name [Name of container] 设定Docker的名称; - - - > -it 参数说明容器已和本机交互式运行; - - - > -v $PWD:/paddle 指定将当前路径(Linux中PWD变量会展开为当前路径的绝对路径)挂载到容器内部的 /paddle 目录; - - > `` 指定需要使用的image名称,您可以通过`docker images`命令查看;/bin/bash是在Docker中要执行的命令 - - - - * 使用GPU版本的PaddlePaddle: - - - - `nvidia-docker run --name [Name of container] -it -v $PWD:/paddle /bin/bash` - - > --name [Name of container] 设定Docker的名称; - - - > -it 参数说明容器已和本机交互式运行; - - - > -v $PWD:/paddle 指定将当前路径(Linux中PWD变量会展开为当前路径的绝对路径)挂载到容器内部的 /paddle 目录; - - > `` 指定需要使用的image名称,您可以通过`docker images`命令查看;/bin/bash是在Docker中要执行的命令 - - - -至此,您已经成功使用Docker安装PaddlePaddle,更多Docker使用请参见[Docker官方文档](https://docs.docker.com) - - -

-### **镜像简介** -

- - - - - - - - - - - - - - - - - - - - - - - - - -
镜像源 镜像说明
hub.baidubce.com/paddlepaddle/paddle:[Version] 安装了指定版本PaddlePaddle
hub.baidubce.com/paddlepaddle/paddle:latest 安装了开发版PaddlePaddle。注意:此版本可能包含尚未发布的特性和不稳定的功能,因此不推荐常规用户或在生产环境中使用。
hub.baidubce.com/paddlepaddle/paddle:latest-gpu 安装了开发版PaddlePaddle(支持GPU)。注意:此版本可能包含尚未发布的特性和不稳定的功能,因此不推荐常规用户或在生产环境中使用。
hub.baidubce.com/paddlepaddle/paddle:latest-dev 安装了PaddlePaddle最新的开发环境
-

- -您可以在 [DockerHub](https://hub.docker.com/r/paddlepaddle/paddle/tags/) 中找到PaddlePaddle的各个发行的版本的docker镜像。 - -### 注意事项 - -* 镜像中Python版本为2.7 -* PaddlePaddle Docker镜像为了减小体积,默认没有安装`vim`,您可以在容器中执行 `apt-get install -y vim` 安装后,在容器中编辑代码 - -### 补充说明 - -* 当您需要第二次进入Docker容器中,使用如下命令: -``` - #启动之前创建的容器 - docker start [Name of container] - - #进入启动的容器 - docker attach [Name of container] -``` -* 如您是Docker新手,您可以参考互联网上的资料学习,例如[Docker教程](http://www.runoob.com/docker/docker-hello-world.html) - -## 如何卸载 - -请您进入Docker容器后,执行如下命令 - -* **CPU版本的PaddlePaddle**: `pip uninstall paddlepaddle` - -* **GPU版本的PaddlePaddle**: `pip uninstall paddlepaddle-gpu` - -或通过`docker rm [Name of container]`来直接删除Docker容器 diff --git a/doc/fluid/install/install_MacOS.md b/doc/fluid/install/install_MacOS.md index 24af303300efb4437599cb47f55a97ab3506085a..d7d386558547203af51bfcf7e99cc530b75862dd 100644 --- a/doc/fluid/install/install_MacOS.md +++ b/doc/fluid/install/install_MacOS.md @@ -60,11 +60,9 @@ ## 安装方式 -MacOS系统下有5种安装方式: +MacOS系统下有3种安装方式: * pip安装(推荐) -* [conda安装](./install_Conda.html) -* [Docker安装](./install_Docker.html) * [源码编译安装](./compile/compile_MacOS.html#mac_source) * [Docker源码编译安装](./compile/compile_MacOS.html#mac_docker) @@ -74,8 +72,8 @@ MacOS系统下有5种安装方式: ## 安装步骤 * CPU版PaddlePaddle: - * 对于Python 2: `python -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple` 或 `python -m pip install paddlepaddle -i https://pypi.tuna.tsinghua.edu.cn/simple` - * 对于Python 3: `python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple` 或 `python3 -m pip install paddlepaddle -i https://pypi.tuna.tsinghua.edu.cn/simple` + * 对于Python 2: `python -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + * 对于Python 3: `python3 -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python3 -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` 您可[验证是否安装成功](#check),如有问题请查看[FAQ](./FAQ.html) diff --git a/doc/fluid/install/install_MacOS_en.md b/doc/fluid/install/install_MacOS_en.md index f511486490a9ebc84739c3d32f17b0e48b815ff3..8e819e7974f5338c5e272e9e5d8585d709698015 100644 --- a/doc/fluid/install/install_MacOS_en.md +++ b/doc/fluid/install/install_MacOS_en.md @@ -1,124 +1,101 @@ *** # **Install on MacOS** -This instruction will show you how to install PaddlePaddle on a *64-bit desktop or laptop* and MacOS system. The MacOS system we support must meet the following requirements. +## Environment preparation -Please note: Attempts on other systems may cause the installation to fail. +* **MacOS version 10.11/10.12/10.13/10.14 (64 bit)(not support GPU version)** +* **Python version 2.7.15+/3.5.1+/3.6/3.7 (64 bit)** +* **pip or pip3 version 9.0.1+ (64 bit)** -* MacOS 10.11/10.12/10.13/10.14 +### Note -## Determine which version to install +* Confirm that the Python where you need to install PaddlePaddle is your expected location, because your computer may have multiple Python -* Only PaddlePaddle for CPU is supported. + * If you are using Python 2, use the following command to output Python path. Depending on the environment, you may need to replace `python` in all command lines + which python + * If you are using Python 3, use the following command to output Python path. Depending on the environment, you may need to replace `python` in all command lines -## Choose an installation method - -Under the MacOS system we offer 3 installation methods: - -* Pip installation (not supported for GPU version) (distributed architecture is not supported under python3) -* Docker installation (the GPU version is not supported) (the version of python in the image is 2.7) -* Docker source compilation and installation (not supported for GPU version) (Python version 2.7, 3.5, 3.6, 3.7 in image) - -**With pip installation** (the easiest way to install), we offer you a pip installation method, but it depends more on your local environment and may have some issues related to your local environment. - -**Use Docker for installation** (the safest way to install), because we have installed the tools and configuration in a Docker image so that if something goes wrong, others can reproduce the problem for help. In addition, for developers accustomed to using Windows and MacOS, there is no need to configure a cross-compilation environment using Docker. It should be emphasized that Docker does not virtualize any hardware. The compiler tools running in the Docker container are actually running directly on the local CPU and operating system. The performance is the same as installing the compiler on the machine. - + which python3 -

-### ***Install using pip*** - -Due to the large difference in Python situation in MacOS, we do not provide quick installation commands. Please follow the steps below to install. - -First, **check whether your computer and operating system** meet our supported compilation standards or not by `uname -m` and view the system version `About This Mac` in the Apple menu. - -Second, your computer needs to meet the following requirements: +* You need to confirm whether the version of Python meets the requirements -> **Please do not use the Python originally provided by MacOS**. For **Python 2**, we recommend Python2.7.15 provided by [Homebrew](https://brew.sh/) or [Python.org](https://www.python.org/ftp/python/2.7.15/python-2.7.15-macosx10.9.pkg). For Python3, please use python3.5.x, Python3.6.x or python3.7.x provided by [Python.org](https://www.python.org/downloads/mac-osx/). + * If you are using Python 2, use the following command to confirm that it is 2.7.15+ + python --version - For python2: brew install python@2 or use Python officially downloaded python2.7.15 - For python3: Use python3.5.x, python3.6.x or python3.7.x downloaded from Python official site + * If you are using Python 3, use the following command to confirm that it is 3.5.1+/3.6/3.7 + python3 --version -* Python2.7.x, Pip >= 9.0.1 +* It is required to confirm whether the pip version meets the requirements. The pip version is required to be 9.0.1+ -* Python3.5.x, Pip3 >= 9.0.1 + * If you are using Python 2 -* Python3.6.x, Pip3 >= 9.0.1 + python -m ensurepip -* Python3.7.x, Pip3 >= 9.0.1 + python -m pip --version - > Note: You may have installed pip on your MacOS. Please use pip -V to confirm that its version is the recommended pip 9.0.1 or higher. + * If you are using Python 3 + python3 -m ensurepip -Here's how to install PaddlePaddle: + python3 -m pip --version -1. Use pip install to install PaddlePaddle: +* Confirm that Python and pip is 64 bit,and the processor architecture is x86_64(or x64、Intel 64、AMD64)architecture. Currently, PaddlePaddle doesn't support arm64 architecture. The first line of output from the following command should be "64bit", and the second line should be "x86_64", "x64" or "AMD64". - * For users who need **the CPU version PaddlePaddle**: `pip install paddlepaddle` or `pip3 install paddlepaddle` + * If you are using Python 2 - * For users with **other requirements**: `pip install paddlepaddle==[version number]` or `pip3 install paddlepaddle==[version number]` + python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" - > For `the version number`, please refer to [the latest Release installation package list](./Tables.html/#ciwhls-release). If you need to obtain and install **the latest PaddlePaddle development branch**, you can download the latest whl installation package and c-api development package from [the CI system](https://paddleci.ngrok.io/project.html?projectId=Manylinux1&tab=projectOverview) and install it. To log in, click on "Log in as guest". + * If you are using Python 3 + python3 -c "import platform;print(platform.architecture()[0]);print(platform.machine())" +* The installation package provided by default requires computer support for MKL -Now you have completed the process of installing PaddlePaddle via `pip install`. - - -

-### ***Install using Docker*** +## Choose CPU/GPU -In order to better use Docker and avoid problems, we recommend using **the highest version of Docker**. For details on **installing and using Docker**, please refer to [the official Docker documentation](https://docs.docker.com/install/). +* Currently, only the CPU version of PaddlePaddle is supported in the MacOS environment -Please note that running docker on MacOS requires logging in with your dockerID, otherwise an `Authenticate Failed` error will occur. - - -If Docker is **properly installed**, you can start **using Docker to install PaddlePaddle**. - -1. Use the following command to pull the image we pre-installed for PaddlePaddle: - - * For users who need **the CPU version of PaddlePaddle**, use the following command to pull the image we pre-installed for your *PaddlePaddle For CPU*: - - `Docker pull hub.baidubce.com/paddlepaddle/paddle: 1.2` - - * You can also pull any of our Docker images by following the instructions below: - - `Docker pull hub.baidubce.com/paddlepaddle/paddle:[tag]` - - > (Please replace [tag] with the contents of [the mirror table](./Tables.html/#dockers)) - -2. Use the following command to build from the already pulled image and enter the Docker container: - - `Docker run --name [Name of container] -it -v $PWD:/paddle /bin/bash` +## Choose an installation method - > In the above command, --name [Name of container] sets the name of the Docker; the -it parameter indicates that the container is running interactively with the host machine; -v $PWD:/paddle specifies the current path (the PWD variable in Linux will expand to [The absolute path](https://baike.baidu.com/item/绝对路径/481185) ) of the current path is mounted to the /paddle directory inside the container; `` specifies the name of the image to use, if you need to use our image please use `hub.baidubce.com/paddlepaddle/paddle:[tag]`. Note: The meaning of tag is the same as the second step; /bin/bash is the command to be executed in Docker. +Under the MacOS system we offer 3 installation methods: -3. (Optional: When you need to enter the Docker container a second time) re-enter the PaddlePaddle container with the following command: +* Pip installation (recommend) +* [Source code compilation and installation](./compile/compile_MacOS.html#mac_source) +* [Docker source code compilation and installation](./compile/compile_MacOS.html#mac_docker) - `Docker start [Name of container]` - > start the container created previously. +We will introduce pip installation here. - `Docker attach [Name of container]` +## Installation steps - > Enter the started container. +* CPU version of PaddlePaddle: + * For Python 2: `python -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` or `python -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + * For Python 3: `python3 -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` or `python3 -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` -Now that you have successfully installed PaddlePaddle using Docker, you only need to run PaddlePaddle after entering the Docker container. For more Docker usage, please refer to [the official Docker documentation](https://docs.docker.com/). +You can[Verify installation succeeded or not](#check), if you have any questions, please check[FAQ](./FAQ.html) -> Note: In order to reduce the size, `vim` is not installed in PaddlePaddle Docker image by default. You can edit the code in the container after executing `apt-get install -y vim` in the container. +Note: +* On MacOS you need to install unrar to support PaddlePaddle, you can use command `brew install unrar` +* For python2.7, we suggest command `python`; for python3.x, we suggest command `python3` +* Download the latest release installation package by default. To obtain the development installation package, please refer to [here](./Tables.html#ciwhls) +* Using Python native to MacOS can cause installation failures. For **Python2**,we recommend to use [Homebrew](https://brew.sh) or python2.7.15 provided by [Python.org](https://www.python.org/ftp/python/2.7.15/python-2.7.15-macosx10.9.pkg); for **Python3**, please use python3.5.x、python3.6.x or python3.7.x provided by [Python.org](https://www.python.org/downloads/mac-osx/). +

## ***Verify installation*** -After the installation is complete, you can use `python` or `python3` to enter the python interpreter and then use `import paddle.fluid` to verify that the installation was successful. +After the installation is completed, you can use `python` or `python3` to enter the Python interface, input `import paddle.fluid as fluid` and then `fluid.install_check.run_check()` to verify that the installation was successful. + +If `Your Paddle Fluid is installed succesfully!` appears, it means the installation was successful.

## ***How to uninstall*** -Please use the following command to uninstall PaddlePaddle (Users who use Docker to install PaddlePaddle should use the following command in the container containing PaddlePaddle. Please use the corresponding version of pip): +Please use the following command to uninstall PaddlePaddle: -* ***CPU version of PaddlePaddle***: `pip uninstall paddlepaddle` or `pip3 uninstall paddlepaddle` +* `python -m pip uninstall paddlepaddle` or `python3 -m pip uninstall paddlepaddle` diff --git a/doc/fluid/install/install_Ubuntu.md b/doc/fluid/install/install_Ubuntu.md index 4f74eeed638931bcca1b1a958a630abdf7ee333f..8004e674aecb0a8a073578a035e41a05fc584206 100644 --- a/doc/fluid/install/install_Ubuntu.md +++ b/doc/fluid/install/install_Ubuntu.md @@ -57,15 +57,15 @@ python3 -c "import platform;print(platform.architecture()[0]);print(platform.machine())" * 默认提供的安装包需要计算机支持MKL -* 如果您对机器环境不了解,请下载使用[快速安装脚本](https://fast-install.bj.bcebos.com/fast_install.sh),配套说明请参考[这里](https://github.com/PaddlePaddle/FluidDoc/tree/develop/doc/fluid/beginners_guide/install/install_script.md)。 +* 如果您对机器环境不了解,请下载使用[快速安装脚本](https://fast-install.bj.bcebos.com/fast_install.sh),配套说明请参考[这里](https://github.com/PaddlePaddle/FluidDoc/tree/develop/doc/fluid/install/install_script.md)。 ## 选择CPU/GPU * 如果您的计算机没有 NVIDIA® GPU,请安装CPU版的PaddlePaddle * 如果您的计算机有 NVIDIA® GPU,并且满足以下条件,推荐安装GPU版的PaddlePaddle - * **CUDA 工具包10.0配合cuDNN v7.3+(如需多卡支持,需配合NCCL2.3.7及更高)** - * **CUDA 工具包9.0配合cuDNN v7.3+(如需多卡支持,需配合NCCL2.3.7及更高)** + * **CUDA 工具包10.0配合cuDNN v7.6+(如需多卡支持,需配合NCCL2.3.7及更高)** + * **CUDA 工具包9.0配合cuDNN v7.6+(如需多卡支持,需配合NCCL2.3.7及更高)** * **GPU运算能力超过1.0的硬件设备** @@ -82,11 +82,9 @@ ## 安装方式 -Ubuntu系统下有5种安装方式: +Ubuntu系统下有3种安装方式: * pip安装(推荐) -* [conda安装](./install_Conda.html) -* [Docker安装](./install_Docker.html) * [源码编译安装](./compile/compile_Ubuntu.html#ubt_source) * [Docker源码编译安装](./compile/compile_Ubuntu.html#ubt_docker) @@ -95,12 +93,12 @@ Ubuntu系统下有5种安装方式: ## 安装步骤 * CPU版PaddlePaddle: - * 对于Python 2: `python -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple` 或 `python -m pip install paddlepaddle -i https://pypi.tuna.tsinghua.edu.cn/simple` - * 对于Python 3: `python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple` 或 `python3 -m pip install paddlepaddle -i https://pypi.tuna.tsinghua.edu.cn/simple` + * 对于Python 2: `python -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + * 对于Python 3: `python3 -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python3 -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` * GPU版PaddlePaddle: - * 对于Python 2: `python -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple` 或 `python -m pip install paddlepaddle-gpu -i https://pypi.tuna.tsinghua.edu.cn/simple` - * 对于Python 3: `python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple` 或 `python3 -m pip install paddlepaddle-gpu -i https://pypi.tuna.tsinghua.edu.cn/simple` + * 对于Python 2: `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + * 对于Python 3: `python3 -m pip install paddlepaddle-gpu==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python3 -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` 您可[验证是否安装成功](#check),如有问题请查看[FAQ](./FAQ.html) @@ -108,7 +106,7 @@ Ubuntu系统下有5种安装方式: * 如果是python2.7, 建议使用`python`命令; 如果是python3.x, 则建议使用`python3`命令 -* `python -m pip install paddlepaddle-gpu -i https://pypi.tuna.tsinghua.edu.cn/simple` 此命令将安装支持CUDA 10.0 cuDNN v7的PaddlePaddle,如您对CUDA或cuDNN版本有不同要求,可用`python -m pip install paddlepaddle-gpu==[版本号] -i https://pypi.tuna.tsinghua.edu.cn/simple`或 `python3 -m pip install paddlepaddle-gpu==[版本号] -i https://pypi.tuna.tsinghua.edu.cn/simple`命令来安装,版本号请见[这里](https://pypi.org/project/paddlepaddle-gpu#history),关于paddlepaddle与CUDA, cuDNN版本的对应关系请见[安装包列表](./Tables.html#whls) +* `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` 此命令将安装支持CUDA 10.0 cuDNN v7的PaddlePaddle。 * 默认下载最新稳定版的安装包,如需获取开发版安装包,请参考[这里](./Tables.html#ciwhls) diff --git a/doc/fluid/install/install_Ubuntu_en.md b/doc/fluid/install/install_Ubuntu_en.md index 0a3e230c688ff82edd7d6f2179ddd64730d17296..2a08b8c54ca6e10431ee5913b9eca0c7423efc72 100644 --- a/doc/fluid/install/install_Ubuntu_en.md +++ b/doc/fluid/install/install_Ubuntu_en.md @@ -1,153 +1,123 @@ -*** - # **Install on Ubuntu** -This instruction describes how to install PaddlePaddle on a *64-bit desktop or laptop* and Ubuntu system. The Ubuntu systems we support must meet the following requirements: - -Please note: Attempts on other systems may cause the installation to fail. Please ensure that your environment meets the conditions. The installation we provide by default requires your computer processor to support the AVX instruction set. Otherwise, please select the version of `no_avx` in the [latest Release installation package list](./Tables_en.html/#ciwhls-release). - -Under Ubuntu, you can use `cat /proc/cpuinfo | grep avx` to check if your processor supports the AVX instruction set. - -* Ubuntu 14.04 /16.04 /18.04 - -## Determine which version to install - -* PaddlePaddle for CPU is supported. If your computer does not have an NVIDIA® GPU, you can only install this version. If your computer has a GPU, it is also recommended that you install the CPU version of PaddlePaddle first to check if your local environment is suitable. - -* PaddlePaddle for GPU is supported. In order to make the PaddlePaddle program run more quickly, we accelerate the PaddlePaddle program through the GPU, but the GPU version of the PaddlePaddle needs to have the NVIDIA® GPU that meets the following conditions (see the NVIDIA official documentation for the specific installation process and configuration: [For CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/), [For cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/)) +## Environment preparation - * *CUDA Toolkit 9.0 with cuDNN v7* - * *CUDA Toolkit 8.0 with cuDNN v7* - * *Hardware devices with GPU computing power exceeding 1.0* +* **Ubuntu version (64 bit)** + * **Ubuntu 14.04 (GPU version supports CUDA 10.0/10.1)** + * **Ubuntu 16.04 (GPU version supports CUDA 9.0/9.1/9.2/10.0/10.1)** + * **Ubuntu 18.04 (GPU version supports CUDA 10.0/10.1)** +* **Python version 2.7.15+/3.5.1+/3.6/3.7 (64 bit)** +* **pip or pip3 version 9.0.1+ (64 bit)** +### Note -## Choose an installation method - -Under the Ubuntu system, we offer 4 installation methods: - -* Pip installation -* Docker installation (the version of python in the image is 2.7) -* Source code compilation and installation -* Docker source code compilation and installation (the python version in the image is 2.7, 3.5, 3.6, 3.7) - +* You can use `uname -m && cat /etc/*release` view the operating system and digit information of the machine +* Confirm that the Python where you need to install PaddlePaddle is your expected location, because your computer may have multiple Python -**With pip installation** (the easiest way to install), we offer you a pip installation method, but it depends more on your native environment and may have some issues related to your local environment. + * If you are using Python 2, use the following command to output Python path. Depending on the environment, you may need to replace Python in all command lines in the description with specific Python path -**Use Docker for installation** (the safest way to install), because we have installed the tools and configuration in a Docker image so that if something goes wrong, others can reproduce the problem for help. In addition, for developers accustomed to using Windows and MacOS, there is no need to configure a cross-compilation environment using Docker. It should be emphasized that Docker does not virtualize any hardware. The compiler tools running in the Docker container are actually running directly on the native CPU and operating system. The performance is the same as installing the compiler on the machine. - -Compile and install from [**source**](#ubt_source) and [**use Docker**](#ubt_docker). This is a process of compiling the PaddlePaddle source code into a binary file and then installing the binary file. Compared to the binary PaddlePaddle that has been successfully tested and compiled for you, the manual compilation is more complicated, we will answer you in detail at the end of the description. - -

-### ***Install using pip*** + which python + * If you are using Python 3, use the following command to output Python path. Depending on the environment, you may need to replace Python 3 in all command lines in the description with Python or specific Python path -First, we use the following commands to **check if the environment of this machine** is suitable for installing PaddlePaddle: + which python3 - uname -m && cat /etc/*release +* You need to confirm that the version of Python meets the requirements + * If you are using Python 2,use the following command to confirm it is 2.7.15+ ->The above command will display the operating system and processing bits of the machine. Please make sure your computer is consistent with the requirements of this tutorial. + python --version -Second, your computer needs to meet any of the following requirements: + * If you are using Python 3,use the following command to confirm it is 3.5.1+/3.6/3.7 -* Python2.7.x (dev), Pip >= 9.0.1 -* Python3.5+.x (dev), Pip3 >= 9.0.1 + python3 --version ->You may have installed pip on your Ubuntu. Please use pip -V or pip3 -V to confirm its version is the recommended pip 9.0.1 or higher. +* You need to confirm that the version of pip meets the requirements, pip version is required 9.0.1+ - Update apt source: `apt update` + * If you are using Python 2 -Use the following command to install or upgrade Python and pip to the required version: (pip and dev installation in python3.6, python3.7 differs greatly across different Ubuntu versions, thus the steps are omitted here) + python -m ensurepip - - For python2: `sudo apt install python-dev python-pip` + python -m pip --version - - For python3.5: `sudo apt install python3.5-dev and curl https://bootstrap.pypa.io/get-pip.py -o - | python3.5 && easy_install pip` + * If you are using Python 3 - - For python3.6, python3.7: We assumed that python3.6 (3.7) and the corresponding versions of dev and pip3 are properly installed by yourself. + python3 -m ensurepip + python3 -m pip --version ->Even if you already have Python 2 or Python 3 in your environment, you need to install Python-dev or Python 3.5 (3.6, 3.7) -dev. +* Confirm that Python and pip is 64 bit,and the processor architecture is x86_64(or called x64、Intel 64、AMD64)architecture. Currently, PaddlePaddle doesn't support arm64 architecture. The first line below outputs "64bit", and the second line outputs "x86_64", "x64" or "AMD64" : -Now let's install PaddlePaddle: + * If you are using Python 2 + python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" + * If you are using Python 3 -1. Use pip install to install PaddlePaddle + python3 -c "import platform;print(platform.architecture()[0]);print(platform.machine())" +* The installation package provided by default requires computer support for MKL +* If you do not know the machine environment, please download and use[Quick install script](https://fast-install.bj.bcebos.com/fast_install.sh), please refer to[here](https://github.com/PaddlePaddle/FluidDoc/tree/develop/doc/fluid/install/install_script.md). - * For users who need **the CPU version PaddlePaddle**: `pip install paddlepaddle` or `pip3 install paddlepaddle` +## Choose CPU/GPU - * For users who need **the GPU version PaddlePaddle**: `pip install paddlepaddle-gpu` or `pip3 install paddlepaddle-gpu` +* If your computer doesn't have NVIDIA® GPU, please install CPU version of PaddlePaddle - > 1.In order to prevent problem "nccl.h cannot be found", please first install nccl2 according to the following command (here is ubuntu 16.04, CUDA9, ncDNN v7 nccl2 installation instructions), for more information about the installation information, please refer to [the NVIDIA official website](https://developer.nvidia.com/nccl/nccl-download): +* If your computer has NVIDIA® GPU, and meet the following conditions, we command you to install PaddlePaddle + * **CUDA toolkit 10.0 with cuDNN v7.6+(for multi card support, NCCL2.3.7 or higher)** + * **CUDA toolkit 9.0 with cuDNN v7.6+(for multi card support, NCCL2.3.7 or higher)** + * **Hardware devices with GPU computing power over 1.0** - i. `Wget http://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb` - ii. `dpkg -i nvidia-machine- Learning-repo-ubuntu1604_1.0.0-1_amd64.deb` - iii. `sudo apt-get install -y libnccl2=2.2.13-1+cuda9.0 libnccl-dev=2.2.13-1+cuda9.0` - > 2.If you do not specify the pypi package version number, we will by default provide you with a version of PaddlePaddle that supports Cuda 9/cuDNN v7. + You can refer to NVIDIA official documents for installation process and configuration method of CUDA and cudnn. Please refer to[CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/),[cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/) - * For users with `Cannot uninstall 'six'.` problems, the probable reason is the existing Python installation issues in your system. In this case, use `pip install paddlepaddle --ignore-installed six`(CPU) or `pip install paddlepaddle-gpu -- Ignore-installed six` (GPU) to resolve. +* If you need to use multi card environment, please make sure that you have installed nccl2 correctly, or install nccl2 according to the following instructions (here is the installation instructions of nccl2 under ubuntu 16.04, CUDA9 and cuDNN7). For more version of installation information, please refer to NVIDIA[official website](https://developer.nvidia.com/nccl): - * For users with **other requirements**: `pip install paddlepaddle==[version number]` or `pip3 install paddlepaddle==[version number]` - > For `the version number`, please refer to [the latest Release installation package list](./Tables.html/#whls). If you need to obtain and install **the latest PaddlePaddle development branch**, you can download and install the latest whl installation package and c-api development package from [the latest dev installation package list](./Tables.html/#ciwhls) or our [CI system](https://paddleci.ngrok.io/project.html?projectId=Manylinux1&tab=projectOverview). To log in, click on "Log in as guest". + wget https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb + dpkg -i nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb + sudo apt-get install -y libnccl2=2.3.7-1+cuda9.0 libnccl-dev=2.3.7-1+cuda9.0 -Now you have completed the process of installing PaddlePaddle using `pip install`. +## Choose an installation method -

-### ***Install using Docker*** - -In order to better use Docker and avoid problems, we recommend using **the highest version of Docker**. For details on **installing and using Docker**, please refer to [the official Docker documentation](https://docs.docker.com/install/). - -> Please note that to install and use the PaddlePaddle version that supports GPU, you must first install [nvidia-docker](https://github.com/NVIDIA/nvidia-docker) - -If Docker is **properly installed**, you can start **installing PaddlePaddle using Docker**. - -1. Use the following command to pull the image we pre-installed for PaddlePaddle: - - * For users who need **a CPU version of PaddlePaddle**, use the following command to pull the image we pre-installed for your *PaddlePaddle For CPU*: - - `Docker pull hub.baidubce.com/paddlepaddle/paddle: 1.2` - - * For users who need **a GPU version of PaddlePaddle**, use the following command to pull the image we pre-installed for your *PaddlePaddle For GPU*: - - `Docker pull hub.baidubce.com/paddlepaddle/paddle:1.2-gpu-cuda9.0-cudnn7` - - * You can also pull any of our Docker images by following the instructions below: - - `Docker pull hub.baidubce.com/paddlepaddle/paddle:[tag]` +Under the Ubuntu system, we offer 3 installation methods: - > (Please replace [tag] with the contents of [the mirror table](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/beginners_guide/install/Tables.html/#dockers)) +* Pip installation (recommended) +* [Source code compilation and installation](./compile/compile_Ubuntu.html#ubt_source) +* [Docker source code compilation and installation](./compile/compile_Ubuntu.html#ubt_docker) -2. Use the following command to build from the already pulled image and enter the Docker container: +We will introduce pip installation here. - `Docker run --name [Name of container] -it -v $PWD:/paddle /bin/bash` +## Installation steps - > In the above command, --name [Name of container] sets the name of the Docker; the `-it` parameter indicates that the container is running interactively with the host machine; -v $PWD:/paddle specifies the current path (the PWD variable in Linux will expand to The absolute path of the current path) is mounted to the /paddle directory inside the container; `` specifies the name of the image to use, if you need to use our image please use `hub.baidubce.com/paddlepaddle/paddle:[tag]`. Note: The meaning of tag is the same as the second step; /bin/bash is the command to be executed in Docker. +* CPU version of PaddlePaddle: + * For Python 2: `python -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` or `python -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + * For Python 3: `python3 -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` or `python3 -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` -3. (Optional: When you need to enter the Docker container a second time) re-enter the PaddlePaddle container with the following command: +* GPU version PaddlePaddle: + * For Python 2: `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` or `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + * For Python 3: `python3 -m pip install paddlepaddle-gpu==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` or `python3 -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` - `Docker start [Name of container]` +You can [verify whether the installation is successful](#check), if you have any questions please see [FAQ](./FAQ.html) - > start the container created previously. +Note: - `Docker attach [Name of container]` +* For python2.7, we recommend to use `python` command; For python3.x, we recommend to use `python3` command. - > Enter the started container. +* `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` This command will install PaddlePaddle supporting CUDA 10.0 cuDNN v7. -Now that you have successfully installed PaddlePaddle using Docker, you only need to run PaddlePaddle after entering the Docker container. For more Docker usage, please refer to [the official Docker documentation](https://docs.docker.com/). ->Note: In order to reduce the size, `vim` is not installed in PaddlePaddle Docker image by default. You can edit the code in the container after executing `apt-get install -y vim` in the container. +* Download the latest stable installation package by default. For development installation package, please refer to[here](./Tables.html#ciwhls) +

## ***Verify installation*** -After the installation is complete, you can use `python` or `python3` to enter the python interpreter and then use `import paddle.fluid` to verify that the installation was successful. - + After the installation is complete, you can use `python` or `python3` to enter the Python interpreter and then use `import paddle.fluid as fluid` and then `fluid.install_check.run_check()` to verify that the installation was successful. + If `Your Paddle Fluid is installed succesfully!` appears, it means the installation was successful.

## ***How to uninstall*** diff --git a/doc/fluid/install/install_Windows.md b/doc/fluid/install/install_Windows.md index 5dc81152668f1cfdb2655bc9f4caaf2a36715ec3..0091a8c5f5d963dcac2a32aac9af74d4d6ac9512 100644 --- a/doc/fluid/install/install_Windows.md +++ b/doc/fluid/install/install_Windows.md @@ -2,55 +2,44 @@ ## 环境准备 -* **Windows 7/8/10 专业版/企业版 (64bit) (GPU版本支持CUDA 9.0/10.0,且仅支持单卡)** +* **Windows 7/8/10 专业版/企业版 (64bit)** + * **GPU版本支持CUDA 9.0/9.1/9.2/10.0/10.1,且仅支持单卡** * **Python 版本 2.7.15+/3.5.1+/3.6/3.7 (64 bit)** -* **pip 或 pip3 版本 9.0.1+ (64 bit)** +* **pip 版本 9.0.1+ (64 bit)** ### 注意事项 -* 确认需要安装 PaddlePaddle 的 Python 是您预期的位置,因为您计算机可能有多个 Python +* 确认您安装PaddlePaddle的 Python 是您预期的版本,因为您计算机可能有多个 Python,使用以下命令 - * 如果您是使用 Python 2,使用以下命令输出 Python 路径,根据的环境您可能需要将说明中所有命令行中的 python 替换为具体的 Python 路径 + python --version - where python + * 如果您是使用 Python 2,输出应是 2.7.15+ - * 如果您是使用 Python 3,使用以下命令输出 Python 路径,根据您的环境您可能需要将说明中所有命令行中的 python3 替换为 python 或者替换为具体的 Python 路径 + * 如果您是使用 Python 3,输出应是 3.5.1+/3.6+/3.7+ - where python3 +* 如果不符合您预期的版本,使用以下命令查看python的路径是否是您预期的位置 -* 需要确认python的版本是否满足要求 + where python - * 如果您是使用 Python 2,使用以下命令确认是 2.7.15+ + * 如果您是使用 Python 2, python2.7的安装目录应位于第一行 - python --version + * 如果您是使用 Python 3, python3.5.1+/3.6+/3.7+的安装目录应位于第一行 - * 如果您是使用 Python 3,使用以下命令确认是 3.5.1+/3.6/3.7 + * 您可以通过以下任意方法进行调整: - python3 --version + * 使用具体的Python路径来执行命令(例如C:\Python36\python.exe对应 Python 3,C:\Python27\python.exe对应 Python 2) + * 在环境变量中,将您预期的安装路径设置在第一顺序位(请在控制面板->系统属性->环境变量->PATH中修改) * 需要确认pip的版本是否满足要求,要求pip版本为9.0.1+ - * 如果您是使用 Python 2 + python -m ensurepip - python -m ensurepip - - python -m pip --version - - * 如果您是使用 Python 3 - - python3 -m ensurepip - - python3 -m pip --version + python -m pip --version * 需要确认Python和pip是64bit,并且处理器架构是x86_64(或称作x64、Intel 64、AMD64)架构,目前PaddlePaddle不支持arm64架构。下面的第一行输出的是"64bit",第二行输出的是"x86_64"、"x64"或"AMD64"即可: - * 如果您是使用 Python 2 + python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" - python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" - - * 如果您是使用 Python 3 - - python3 -c "import platform;print(platform.architecture()[0]);print(platform.machine())" * 默认提供的安装包需要计算机支持MKL,如果您的环境不支持,请在[这里](./Tables.html#ciwhls-release)下载`openblas`版本的安装包 * 当前版本暂不支持NCCL,分布式等相关功能 @@ -60,7 +49,7 @@ * 如果您的计算机没有 NVIDIA® GPU,请安装CPU版的PaddlePaddle * 如果您的计算机有 NVIDIA® GPU,并且满足以下条件,推荐安装GPU版的PaddlePaddle - * **CUDA 工具包9.0/10.0配合cuDNN v7.3+** + * **CUDA 工具包 9.0/10.0 配合 cuDNN v7.3+** * **GPU运算能力超过1.0的硬件设备** 注: 目前官方发布的windows安装包仅包含 CUDA 9.0/10.0 的单卡模式,不包含 CUDA 9.1/9.2/10.1,如需使用,请通过源码自行编译。 @@ -69,10 +58,9 @@ ## 安装方式 -Windows系统下有3种安装方式: +Windows系统下有2种安装方式: * pip安装(推荐) -* [conda安装](./install_Conda.html) * [源码编译安装](./compile/compile_Windows.html#win_source) 这里为您介绍pip安装方式 @@ -80,32 +68,27 @@ Windows系统下有3种安装方式: ## 安装步骤 * CPU版PaddlePaddle: - * 对于Python 2: `python -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple` 或 `python -m pip install paddlepaddle -i https://pypi.tuna.tsinghua.edu.cn/simple` - * 对于Python 3: `python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple` 或 `python3 -m pip install paddlepaddle -i https://pypi.tuna.tsinghua.edu.cn/simple` + * `python -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple`(推荐使用百度源) 或 `python -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` * GPU版PaddlePaddle: - * 对于Python 2: `python -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple` 或 `python -m pip install paddlepaddle-gpu -i https://pypi.tuna.tsinghua.edu.cn/simple` - * 对于Python 3: `python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple` 或 `python3 -m pip install paddlepaddle-gpu -i https://pypi.tuna.tsinghua.edu.cn/simple` + * `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` 您可[验证是否安装成功](#check),如有问题请查看[FAQ](./FAQ.html) 注: -* 如果是python2.7, 建议使用`python`命令; 如果是python3.x, 则建议使用`python3`命令 - - -* `python -m pip install paddlepaddle-gpu -i https://pypi.tuna.tsinghua.edu.cn/simple` 此命令将安装支持CUDA 10.0(配合cuDNN v7.3+)的PaddlePaddle,如您对CUDA或cuDNN版本有不同要求,可用`python -m pip install paddlepaddle-gpu==[版本号] -i https://pypi.tuna.tsinghua.edu.cn/simple`或 `python3 -m pip install paddlepaddle-gpu==[版本号] -i https://pypi.tuna.tsinghua.edu.cn/simple`命令来安装,版本号请见[这里](https://pypi.org/project/paddlepaddle-gpu#history), 关于paddlepaddle与CUDA, cuDNN版本的对应关系请见[安装包列表](./Tables.html#whls) +* `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` 此命令将安装支持CUDA 10.0(配合cuDNN v7.3+)的PaddlePaddle。 ## 验证安装 -安装完成后您可以使用 `python` 或 `python3` 进入python解释器,输入`import paddle.fluid as fluid` ,再输入 +安装完成后您可以使用 `python` 进入python解释器,输入`import paddle.fluid as fluid` ,再输入 `fluid.install_check.run_check()` 如果出现`Your Paddle Fluid is installed succesfully!`,说明您已成功安装。 ## 如何卸载 -* **CPU版本的PaddlePaddle**: `python -m pip uninstall paddlepaddle` 或 `python3 -m pip uninstall paddlepaddle` +* **CPU版本的PaddlePaddle**: `python -m pip uninstall paddlepaddle` -* **GPU版本的PaddlePaddle**: `python -m pip uninstall paddlepaddle-gpu` 或 `python3 -m pip uninstall paddlepaddle-gpu` +* **GPU版本的PaddlePaddle**: `python -m pip uninstall paddlepaddle-gpu` diff --git a/doc/fluid/install/install_Windows_en.md b/doc/fluid/install/install_Windows_en.md index f06bea82c198dc967fe5e2dcdfc9fc3be543f8c6..4f9ea010bf8f7236525961b5fd88d98b69a5b928 100644 --- a/doc/fluid/install/install_Windows_en.md +++ b/doc/fluid/install/install_Windows_en.md @@ -1,58 +1,92 @@ # **Installation on Windows** -## Operating Environment +## Environment Preparation -* *Windows 7/8/10 Pro/Enterprise(64bit)(CUDA 8.0/9.0/10.0 are supported, and only single GPU is supported)* -* *Python 2.7.15+/3.5.1+/3.6/3.7(64bit)* -* *pip or pip3 9.0.1+(64bit)* +* **Windows 7/8/10 Pro/Enterprise(64bit)** + * **GPU Version support CUDA 9.0/9.1/9.2/10.0/10.1, and only support single GPU** +* **Python version 2.7.15+/3.5.1+/3.6/3.7(64bit)** +* **pip version 9.0.1+(64bit)** ### Precautions -* The default installation package requires your computer to support AVX instruction set and MKL. If your environment doesn’t support AVX instruction set and MKL, please download [these](./Tables.html/#ciwhls-release) `no-avx`, `openblas` versions of installation package. -* The current version doesn’t support functions related to NCCL and distributed learning. +* Confirm the Python you install PaddlePaddle is the version you expected, because your computer may have more than one python, use the following command: + + python --version + + * If you are using Python 2, the output should be 2.7.15+ + + * If you are using Python 3, the output should be 3.5.1+/3.6+/3.7+ + +* If Python doesn't match your expected version, use the following command to see if Python's path is where you expect it to be: + + where python + + * If you are using Python 2, The installation directory for python2.7 should be on the first line + + * If you are using Python 3, The installation directory for python3.5.1+/3.6+/3.7+ should be on the first line + + * You can adjust it in any of the following ways: + + * Use specific Python paths to execute commands(e.g. C:\Python36\python.exe corresponding to Python 3,C:\Python27\python.exe corresponding to Python 2) + * By modifying the environment variable, set your expected installation path in the first order (please modify it in control panel -> system properties -> environment variable -> path) + +* Confirm whether the pip version meets the requirements. The pip version is required to be 9.0.1+ + + python -m ensurepip + + python -m pip --version + +* Confirm that Python and pip is 64 bit,and the processor architecture is x86_64(or x64、Intel 64、AMD64)architecture. Currently, PaddlePaddle doesn't support arm64 architecture. The first line of output from the following command should be "64bit", and the second line should be "x86_64", "x64" or "AMD64": + + python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" + + +* The installation package provided by default requires the computer to support MKL. If your environment does not support MKL, please download the `openblas` version of the installation package in [here](./Tables.html#ciwhls-release) +* Nccl, distributed and other related functions are not supported in the current version. + ## CPU or GPU * If your computer doesn’t have NVIDIA® GPU, please install the CPU version of PaddlePaddle * If your computer has NVIDIA® GPU, and it satisfies the following requirements, we recommend you to install the GPU version of PaddlePaddle - * *CUDA Toolkit 8.0 with cuDNN v7.1+, or 9.0/10.0 with cuDNN v7.3+* + * *CUDA Toolkit 9.0/10.0 with cuDNN v7.3+* * *GPU's computing capability exceeds 1.0* -Note: currently, the official Windows installation package only support CUDA 8.0/9.0/10.0 with single GPU, and don't support CUDA 9.1/9.2/10.1. if you need to use, please compile by yourself through the source code. +Note: currently, the official Windows installation package only support CUDA 9.0/10.0 with single GPU, and don't include CUDA 9.1/9.2/10.1. if you need to use, please compile by yourself through the source code. Please refer to the NVIDIA official documents for the installation process and the configuration methods of [CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/) and [cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/). ## Installation Method -There are 3 ways to install PaddlePaddle on Windows: +There are 2 ways to install PaddlePaddle on Windows: * pip installation (recommended) -* [Docker installation](./install_Docker.html) * [source code compilation and installation](./compile/compile_Windows.html/#win_source) We would like to introduce the pip installation here. -## Installation Process +## Installation steps -* CPU version of PaddlePaddle: `pip install paddlepaddle` or `pip3 install paddlepaddle` -* GPU version of PaddlePaddle: `pip install paddlepaddle-gpu` or `pip3 install paddlepaddle-gpu` +* CPU version of PaddlePaddle: + * `python -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple`(Baidu source is recommended) or `python -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` -There is a checking function below for [verifyig whether the installation is successful](#check). If you have any further questions, please check the [FAQ part](./FAQ.html). +* GPU version of PaddlePaddle: + * `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://mirror.baidu.com/pypi/simple`(Baidu source is recommended) or `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` -Notice: +There is a checking function below for [verifyig whether the installation is successful](#check). If you have any further questions, please check the [FAQ](./FAQ.html). -* The version of pip and the version of python should be corresponding: python2.7 corresponds to `pip`; python3.x corresponds to `pip3`. -* `pip install paddlepaddle-gpu` This command will install PaddlePaddle that supports CUDA 8.0(with cuDNN v7.1+), or CUDA 9.0/10.0(with cuDNN v7.3+). +Notice: +* `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` This command will install PaddlePaddle that supports CUDA 10.0(with cuDNN v7.3+). ## Installation Verification -After completing the installation process, you can use `python` or `python3` to enter python interpreter and input `import paddle.fluid as fluid` and then `fluid.install_check.run_check()` to check whether the installation is successful. +After completing the installation process, you can use `python` to enter python interface and input `import paddle.fluid as fluid` and then `fluid.install_check.run_check()` to check whether the installation is successful. If you see `Your Paddle Fluid is installed succesfully!`, your installation is verified successful. ## Uninstall PaddlePaddle -* ***CPU version of PaddlePaddle***: `pip uninstall paddlepaddle` or `pip3 uninstall paddlepaddle` +* ***CPU version of PaddlePaddle***: `python -m pip uninstall paddlepaddle` -* ***GPU version of PaddlePaddle***: `pip uninstall paddlepaddle-gpu` or `pip3 uninstall paddlepaddle-gpu` +* ***GPU version of PaddlePaddle***: `python -m pip uninstall paddlepaddle-gpu` diff --git a/doc/fluid/install/install_script.md b/doc/fluid/install/install_script.md index 536179acc59a00297ec953b1a09b0adf45bd62d2..f35ded4316b9d30f5c9efc23e239d8b8cf8d8ebd 100644 --- a/doc/fluid/install/install_script.md +++ b/doc/fluid/install/install_script.md @@ -17,7 +17,7 @@ 检测您的机器是否安装我们支持的CUDA,cuDNN,具体地: - 1. 在`/usr/local/` 及其子目录下寻找 `cuda/cuda8/cuda9` 目录下的`version.txt`文件(通常如果您以默认方式安装了CUDA)。 如果提示未找到CUDA请使用命令`find / -name version.txt`找到您所需要的CUDA目录下的“version.txt”路径,然后按照提示输入。 + 1. 在`/usr/local/` 及其子目录下寻找 `cuda/cuda8/cuda9/cuda10` 目录下的`version.txt`文件(通常如果您以默认方式安装了CUDA)。 如果提示未找到CUDA请使用命令`find / -name version.txt`找到您所需要的CUDA目录下的“version.txt”路径,然后按照提示输入。 2. 在`/usr` 及其子目录下寻找文件 `cudnn.h` , 如果您的cuDNN未安装在默认路径请使用命令`find / -name cudnn.h`寻找您希望使用的cuDNN版本的`cudnn.h`路径并按提示输入 如果未找到相应文件,则会安装CPU版本的PaddlePaddle diff --git a/doc/fluid/release_note_cn.md b/doc/fluid/release_note_cn.md index 44748ac6685f4dec4891d1547708c235f3f8dc3c..86e340a195b164bc95b5baddd8f21eec7807efbf 100644 --- a/doc/fluid/release_note_cn.md +++ b/doc/fluid/release_note_cn.md @@ -1,340 +1,71 @@ +# Release Note -Release Notes -============== -## 重要更新 +## 重要声明 -本版本对框架功能层面进行了重点增强,预测部署能力全面提升,分布式发布PLSC支持超大规模分类,并对参数服务器模式进行优化整合。对编译选项、编译依赖以及代码库进行了全面清理优化。模型库持续完善,优化了整体层次结构,增加了动态图模型实现。端到端开发套件和工具组件进一步完善。 + - 此版本为测试版,还在迭代中,目前还没有稳定,后续API会根据反馈有可能进行不兼容的升级。对于想要体验飞桨最新特性的开发者,欢迎试用此版本;对稳定性要求高的工业级应用场景推荐使用Paddle 1.8稳定版本。 -**训练框架**:增加自动混合精度训练AMP接口和新控制流接口;优化Tensor使用方式和显存分配策略;新增支持Nvidia DALI GPU数据预处理库;持续优化基础OP的功能和性能;动态图的功能进一步完善,性能大幅提升,对data independent的动态图模型提供转为静态图可预测部署模型的功能;框架调试分析功能和易用性全面提升。 + - 此版本主推命令式编程模式(动态图)的开发方式,并提供了高层API的封装。命令式编程模式具有很好的灵活性,高层API可以大幅减少重复代码。对于初学者或基础的任务场景,推荐使用高层API的开发方式,简单易用;对于资深开发者想要实现复杂的功能,推荐使用命令式编程模式的API,灵活高效。 -**预测部署**:服务器端预测库的Python API大幅优化,新增R语言、Go语言调用预测库的使用方法和示例,强化了量化支持能力;Paddle Lite支持无校准数据的训练后量化方法生成的模型,加强对OpenCL的支持,支持昆仑XPU的预测;模型压缩库PaddleSlim重构裁剪、量化、蒸馏、搜索接口,新增大规模可扩展知识蒸馏框架 Pantheon,与模型库充分打通。 + - 此版本同时对飞桨的API目录体系做了优化,原目录下API会建立alias仍然可用,但建议新的程序使用新目录结构。 -**分布式方面**:参数服务器模式下针对transpiler的同步、半异步、全异步三种模式,后端实现上统一到communicator中,前端接口统一到fleet中,通过fleet strategy灵活选择不同模式;发布大规模分类库PLSC,通过模型并行支持超多类别的分类任务。 +## 基础框架 -**基础模型库**:发布语音合成库Parakeet,包括多个前沿合成算法;PaddleCV新增14个图像分类预训练模型,3D和跟踪方向模型持续丰富;PaddleNLP的分词和词性标注模型支持jieba分词;PaddleRec增加多任务模型MMoE。模型库整体增加了广泛的动态图模型实现。模型库整体层次结构做了调整优化。 +### 基础API -**端到端开发套件**:PaddleDetection和PaddleSeg新增大量模型实现及预训练模型,典型模型的训练速度和精度提升,模型压缩和部署能力大幅提升,使用体验全面优化。发布ElasticRec推荐排序系统,通过K8S进行部署,支持流式训练和在线预测服务。 +- 组网类API实现动静统一,支持在命令式编程模式和声明式编程模式(静态图)两种模式下运行 +- API目录结构调整,Paddle 1.x 版本的API主要位于paddle.fluid目录,本版本对API目录结构进行调整,使得分类更为合理,具体调整规则如下: + - 原fluid.layers下跟tensor操作相关的API移动到paddle.tensor目录 + - 原fluid.layers下跟组网相关的操作移动到paddle.nn目录,带有参数的类型放到paddle.nn.layers目录,函数式的API放到paddle.nn.functional目录 + - 原fluid.dygraph下命令式编程模式专用API移动到paddle.imperative目录 + - 创建paddle.framework目录,用来存放跟框架相关的Program, Executor等API + - 创建paddle.distributed目录,用来存放分布式相关的API + - 创建paddle.optimizer目录,用来存放优化算法相关的API + - 创建paddle.metric目录,用来创建评估指标计算相关的API + - 创建paddle.incubate目录,用来存放孵化中的代码,其中的API有可能会发生调整,该目录存放了复数计算complex和高层API相关的代码 + - 所有在paddle.tensor和paddle.framework目录下的API,在paddle目录下创建别名,比如:paddle.tensor.creation.ones可以使用paddle.ones别名 -**工具组件**:PaddleHub新增52个预训练模型,总数超过100,功能和体验持续优化;多任务学习框架PALM升级内核,开放API调用,支持更多的任务类型;联邦学习PaddleFL新增公开数据集。 +- 新增API如下: + - 在paddle.nn目录新增8个组网类的API: interpolate, LogSoftmax, ReLU, Sigmoid, loss.BCELoss, loss.L1Loss, loss.MSELoss, loss.NLLLoss + - 在paddle.tensor目录新增59个Tensor相关API:add, addcmul, addmm, allclose, arange, argmax, atan, bmm, cholesky, clamp, cross, diag_embed, dist, div, dot, elementwise_equal, elementwise_sum, equal, eye, flip, full, full_like, gather, index_sample, index_select, linspace, log1p, logsumexp, matmul, max, meshgrid, min, mm, mul, nonzero, norm, ones, ones_like, pow, randint, randn, randperm, roll, sin, sort, split, sqrt, squeeze, stack, std, sum, t, tanh, tril, triu, unsqueeze, where, zeros, zeros_like + - 新增device_guard用来指定设备,新增manual_seed用来初始化随机数种子 +- 部分原fluid目录下API,并没有迁移到paddle目录下 + - 原fluid.contrib目录下API,保留在原位置,未迁移:BasicGRUUnit, BasicLSTMUnit, BeamSearchDecoder, Compressor, HDFSClient, InitState, QuantizeTranspiler, StateCell, TrainingDecoder, basic_gru, basic_lstm, convert_dist_to_sparse_program, ctr_metric_bundle, extend_with_decoupled_weight_decay, fused_elemwise_activation, fused_embedding_seq_pool, load_persistables_for_increment, load_persistables_for_inference, match_matrix_tensor, memory_usage, mixed_precision.AutoMixedPrecisionLists, mixed_precision.decorate, multi_download, multi_upload, multiclass_nms2, op_freq_statistic, search_pyramid_hash, sequence_topk_avg_pooling, shuffle_batch, tree_conv, var_conv_2d + - 原LodTensor相关的API,目前还在开发中,暂未迁移:LoDTensor, LoDTensorArray, create_lod_tensor, create_random_int_lodtensor, DynamicRNN, array_length, array_read, array_write, create_array, ctc_greedy_decoder, dynamic_gru, dynamic_lstm, dynamic_lstmp, im2sequence, linear_chain_crf, lod_append, lod_reset, sequence_concat, sequence_conv, sequence_enumerate, sequence_expand, sequence_expand_as, sequence_first_step, sequence_last_step, sequence_mask, sequence_pad, sequence_pool, sequence_reshape, sequence_reverse, sequence_scatter, sequence_slice, sequence_softmax, sequence_unpad, tensor_array_to_tensor + - 原fluid下分布式相关API,目前还在开发中,暂未迁移 + - 原fluid目录以下API,将在高层API中重新实现,未迁移:nets.glu, nets.img_conv_group, nets.scaled_dot_product_attention, nets.sequence_conv_pool, nets.simple_img_conv_pool + - 原fluid目录以下API,有待进一步完善,暂未迁移:dygraph.GRUUnit, layers.DecodeHelper, layers.GreedyEmbeddingHelper, layers.SampleEmbeddingHelper, layers.TrainingHelper, layers.autoincreased_step_counter, profiler.cuda_profiler, profiler.profiler, profiler.reset_profiler, profiler.start_profiler, profiler.stop_profiler + - 原fluid目录以下API不再推荐使用,未迁移:DataFeedDesc, DataFeeder, clip.ErrorClipByValue, clip.set_gradient_clip, dygraph_grad_clip.GradClipByGlobalNorm, dygraph_grad_clip.GradClipByNorm, dygraph_grad_clip.GradClipByValue, initializer.force_init_on_cpu, initializer.init_on_cpu, io.ComposeNotAligned.with_traceback, io.PyReader, io.load_params, io.load_persistables, io.load_vars, io.map_readers, io.multiprocess_reader, io.save_params, io.save_persistables, io.save_vars, io.xmap_readers, layers.BasicDecoder, layers.BeamSearchDecoder, layers.Decoder, layers.GRUCell, layers.IfElse, layers.LSTMCell, layers.RNNCell, layers.StaticRNN, layers.Switch, layers.While, layers.create_py_reader_by_data, layers.crop, layers.data, layers.double_buffer, layers.embedding, layers.fill_constant_batch_size_like, layers.gaussian_random_batch_size_like, layers.get_tensor_from_selected_rows, layers.load, layers.merge_selected_rows, layers.one_hot, layers.py_reader, layers.read_file, layers.reorder_lod_tensor_by_rank, layers.rnn, layers.uniform_random_batch_size_like, memory_optimize, release_memory, transpiler.memory_optimize, transpiler.release_memory +### 高层API -## 训练框架 +- 新增paddle.incubate.hapi目录,对模型开发过程中常见的组网、训练、评估、预测、存取等操作进行封装,实现低代码开发,MNIST手写数字识别任务对比命令式编程模式实现方式,高层API可减少80%执行类代码。 +- 新增Model类封装,继承Layer类,封装模型开发过程中常用的基础功能,包括: + - 提供prepare接口,用于指定损失函数和优化算法 + - 提供fit接口,实现训练和评估,可通过callback方式实现训练过程中执行自定义功能,比如模型存储等 + - 提供evaluate接口,实现评估集上的预测和评估指标计算 + - 提供predict接口,实现特定的测试数据推理预测 + - 提供train_batch接口,实现单batch数据的训练 +- 新增Dataset接口,对常用数据集进行封装,支持数据的随机访问 +- 新增常见Loss和Metric类型的封装 +- 新增CV领域Resize, Normalize等16种常见的数据处理接口 +- 新增CV领域lenet, vgg, resnet, mobilenetv1, mobilenetv2图像分类骨干网络 +- 新增NLP领域MultiHeadAttention, BeamSearchDecoder, TransformerEncoder, TransformerDecoder , DynamicDecode API接口 +- 发布基于高层API实现的12个模型,Transformer,Seq2seq, LAC,BMN, ResNet, YOLOv3, , VGG, MobileNet, TSM, CycleGAN, Bert, OCR -- API - - 增加自动混合精度训练AMP接口:能以通用的方式把一个网络转成混合精度训练,同时保证精度波动在正常范围内 - - 增加新的控制流接口并推荐使用:新增while_loop(循环控制功能)、cond(条件分支功能)、case和switch_case(分支控制功能)4个控制流OP,更加易用,且支持如下新增功能: - - 支持使用python callable作为控制条件或执行体 - - 支持控制流中的不同分支使用不同loss或optimizer - - 支持控制流中的condition部分使用CPU数据或GPU数据 - - 部分API参数支持使用变量列表:针对部分API的parameter_list或no_grad_set参数只支持使用字符串列表的情况,增加对变量列表的支持,使用如下API时不再需要提前获取相关变量的name属性: - - fluid.backward.append_backward(loss, parameter_list=None, no_grad_set=None, callbacks=None) - - fluid.backward.gradients(targets, inputs, target_gradients=None, no_grad_set=None) - - 各种Optimizer的minimize方法,如Adam的minimize:minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None, grad_clip=None) -- 基础功能优化 - - 支持使用numpy的float16类型设置Tensor数据,无需先转换为uint16类型。 - - 支持直接使用负号,得到Tensor的相反数。 - - 显存分配策略: - - 默认策略变为AutoGrowth:在不影响训练速度的情况下,按需申请显存。规避之前的默认显存预分配策略下难以在同一张GPU卡上再起新任务的问题。 - - 多卡任务显存分配调整:将不同GPU卡上的显存分配器设置为Lazy初始化的方式。若用户不使用某张卡,则不会在该卡上申请显存。避免当其他GPU卡上有显存占用时,在空闲GPU卡上跑任务若不设置CUDA_VISIBLE_DEVICES导致显存OOM的问题。 - - OP功能升级 - - elu:该激活函数支持计算二阶梯度。 - - prroi_pool:rois参数可以接受Tensor或LoDTensor类型。 - - conv2d,pool2d,batch_norm,lrn:反向计算全部支持使用MKL-DNN高性能计算库。 - - argsort:支持降序排序(新增descending参数,默认值False)。 -- 基础性能优化 - - DALI预处理加速 - - 增加对Nvidia DALI GPU数据预处理库的支持,可用于加速图片,视频,语音等数据预处理。 - - 自动混合精度训练优化 - - 实现如下优化策略,并配合DALI数据预处理,ResNet50模型训练吞吐大幅提升:V100单卡混合精度训练吞吐从600+ images/sec提升到1000+ images/sec;单机8卡吞吐达到7840 image/sec,4机32卡吞吐达到28594 images/sec。 - - 增强batch_norm和conv2d等op对NHWC数据布局输入的支持,以使用Tensor Core技术加速fp16计算。 - - 基于IR Pass机制对模型中的部分op pattern进行融合,如batch_norm和relu等。 - - 优化elementwise(add,mul)等op的计算kernel。 - - 优化RecomputeOptimizer提升batchsize, 在Bert-large模型上最大batchsize比不使用RecomputeOptimizer增大533.62%,比上一版本提升一倍。 - - OP性能优化 - - 实现embedding和sequence_pool的融合算子fuse_emb_seq_pool,优化bloom_filter中的murmurhash3_x64_128,有效提升部分NLP模型的训练速度。 - - 优化了mean op的GPU性能,输入数据为32*32*8*8的Tensor时,前向计算速度提升2.7倍。 - - 优化assign、lod_reset op,避免不需要的显存拷贝和data transform。 - - 优化了stack OP的kernel实现,XLnet/Ernie模型GPU单卡性能提升4.1%。 -- 动态图 - - 功能优化 - - 移除了动态图Layers 中的 name_scope 参数,使得用户更方便继承和调用。 - - 移除to_variable接口中的block参数,简化了API的使用。 - - 针对模型参数依赖数据的问题,移除了 build_once设计,使得Layers在 **init** 执行完成之后就可以获取到所有的参数表,方便save load、参数初始化、参数debug、参数优化等。 - - 完善自动剪枝,方便用户组网并减少反向计算量。 - - 支持 SelectedRows 操作,使 Embedding 层支持单卡的稀疏更新。 - - 针对框架缺少容器类的问题,新增ParameterList、LayerList、Sequencial功能,方便用户组网。 - - 支持named_sublayers、named_parameters功能,方便用户编程。 - - 支持Linear lr warmup decay策略。 - - 性能优化 - - 优化了python 与c++ 交互,GradMaker、OperatorBase、allocator等。基于LSTM的语言模型任务p在P40机器上性能提升提升270%。 - - 针对optimize中多次调用optimized_guard无用代码导致的性能问题,移除了冗余代码。Transformer模型(batch_size=64)在P40机器上,SGD、Adam等优化器有5%~8%%的性能提升。 - - 针对AdamOptimizer中额外添加scale_op更新beta参数对性能的影响,将beta更新逻辑融合到adam_op中,减少op kernel调用开销。Dialogue-PLATO模型P40机器上性能提升9.67%。 - - 优化动态图异步DataLoader,在Mnist、ResNet、等模型上整体训练速度提升约30%。 - - 新增numpy bridge功能,支持在cpu模式下Tensor和ndarray之间共享底层数据,避免创建Variable时numpy输入需要拷贝的问题,提升效率。 - - 显存优化:提前删除反向不需要Tensor Buffer的前向变量空间的优化策略,在ResNet等模型上最大batch size提升20%-30%以上。 - - 动态图部署 - - 支持TracedLayer接口,实现 data independent的动态图模型转为静态图可预测部署的模型。 -- 调试分析 - - 报错信息优化 :对框架报错信息整体归类,实现报错信息的体系化,同时完成文案优化,帮助用户更快速、准确的定位和解决问题。 - - 优化性能分析profile 功能 - - 增强profiler的功能和准确性,支持不同级别的profile选项,能够在profile数据中记录事件的调用关系并打印出来。 - - 优化nan inf检查调试(通过FLAGS_check_nan_inf生效),性能、功能及输出信息均有较大提升: - - 速度上,v100测试ResNet50模型相比原工具组件约有1000倍性能提升,保持正常训练80%以上的效率。 - - 功能上,增加fp16的支持,可设置环境变量跳过op、op_role、op_var的检查,方便fp16模型的调试。 - - 输出信息更加翔实,除出错的op及tensor名称外,还会打印出错的nan、inf及正常数值的数量以便于调试。 -- 发布cpu训练和预测的轻量级安装包paddlepaddle-tiny,支持window/linux/Mac操作系统以及python27/python35/python36/python37: - - 编译选项:no avx, no ml, no gpu, no unittest - - 裁剪掉slim和部分dataset。 - - linux包体积从90M减小到37M;windows包体积从50.8M减小到9.6M;mac包体积从59M减小到19.8M。 - - 安装requirements依赖从15个减小到7个。 +### 性能优化 -## 预测部署 +- 新增`reshape+transpose+matmul` fuse,使得Ernie量化后 INT8 模型在原来基础上性能提升~4%(在6271机器上)。量化后INT8模型相比未经过DNNL优化(包括fuses等)和量化的FP32模型提速~6.58倍 -- 服务器端预测库 - - Python API - - 支持从内存读写模型,以满足模型加密的需求。 - - 不再在预测模型最后添加 Scale 算子。 - - 新增对ZeroCopy预测的支持,与C++接口基本一致,支持以numpy.ndarray作为输入和输出,在Python端使用更加方便。 - - 在AnalysisConfig中增加多个接口,完整覆盖C++预测的功能,包括删除pass、禁用预测glog等。 - - 其他编程语言的支持 - - 新增R语言、Go语言调用预测库的使用方法和示例 - - 对外提供 ProtoBuf 对应的头文件,方便用户解析模型结构的需求。 - - 带TRT编译的预测库不再从thrid_party中提供TensorRT库,需要用户自行到https://developer.nvidia.com/tensorrt 下载 - - 功能增强: - - 打通Paddle Lite以子图方式接入,已验证 ResNet50。 - - 新增MKL-DNN FC INT8 kernel的支持 - - Paddle-TensorRT支持Ernie模型,Ernie模型(seq length=128) 在T4卡上fp16预测速度为3.6ms, 比fp32加速37%。 - - 量化:在ERNIE INT8精度相比于FP32 精度提升2%下,ERNIE INT8在第二代至强可扩展平台6271上单线程性能优化提升2.70倍,多线程性能提升1.79倍 -- 移动/嵌入式端Paddle Lite(https://github.com/PaddlePaddle/Paddle-Lite) - - 对应发布v2.3版本。 - - model_optimize_tool多项功能升级。 - - 支持“无校准数据的训练后量化方法”,减小模型存储空间(2~4倍)。 - - OpenCL:完成30个Image2D Kernel迁移,涵盖14个OP。 - - 对FPGA、NPU的支持进一步加强;支持昆仑XPU的预测。 - - 发布全新官网文档;新增“无校准数据的训练后量化方法”使用文档。 -- Paddle Serving(https://github.com/PaddlePaddle/Serving): - - 发布bert类语义理解模型的远程文本向量表示预测服务。 - - 发布了paddle-gpu-serving whl包,通过pip安装和Python代码即可部署和使用预测服务; - - 支持Paddlehub中的13种语义理解模型,支持单机多卡,使用Ernie_tiny模型在单张P4 GPU下平均样本长度为7时预测速度为869.56样本每秒。 -- PaddleSlim(https://github.com/PaddlePaddle/PaddleSlim): - - 拆分PaddleSlim为独立repo。 - - 重构裁剪、量化、蒸馏、搜索接口,对用户开放底层接口。 - - 量化: - - 新增基于KL散度的离线量化功能,支持对Embedding层量化。 - - 新增对FC的QAT MKL-DNN量化策略支持 - - 新增PostTrainingQuantization,完整实现训练后量化功能:支持量化30种OP,支持灵活设置需要量化的OP,生成统一格式的量化模型,具有耗时短、易用性强、精度损失较小的优点。 - - 量化训练支持设定需要量化的OP类型。 - - 裁剪: 重构剪裁实现,方便扩展支持更多类型的网络。 - - 搜索: - - 支持SA搜索,增加更多的搜索空间,支持用户自定义搜索空间。 - - 新增one-shot搜索算法,搜索速度比上个版本快20倍。 - - 新增大规模可扩展知识蒸馏框架 Pantheon - - student 与 teacher 、teacher与 teacher 模型之间充分解耦,可分别独立运行在不同的物理设备上,便于充分利用计算资源; - - 支持 teacher 模型的单节点多设备大规模预测,在 BERT 等模型上测试加速比达到线性; - - 用 TCP/IP 协议实现在线蒸馏模式的通信,支持在同一网络环境下,运行在任意两个物理设备上的 teacher 模型和 student 模型之间进行知识传输; - - 统一在线和离线两种蒸馏模式的 API 接口,不同的 teacher 模型可以工作在不同的模式下; - - 在 student 端自动完成知识的归并与知识数据的 batch 重组,便于多 teacher 模型的知识融合。 - - 模型库: - - 发布ResNet50、MobileNet模型的压缩benchmark - - 打通检测库,并发布YOLOv3系列模型的压缩benchmark - - 打通分割库,并发布Deepabv3+系列分割模型的压缩benchmark - - 完善文档: - - 补充API文档;新增入门教程和高级教程;增加ModelZoo文档,覆盖分类、检测、分割任务。所有文档包含中、英文。 +### 调试分析 -## 分布式 +- 针对Program打印内容过于冗长,在调试中利用效率不高的问题,大幅简化Program、Block、Operator、Variable等对象的打印字符串,不损失有效信息的同时提升调试效率 +- 针对第三方库接口`boost::get`不安全,运行中抛出异常难以调试的问题,增加`BOOST_GET`系列宏替换了Paddle中600余处存在风险的`boost::get`,丰富出现`boost::bad_get`异常时的报错信息,具体增加了C++报错信息栈,出错文件及行号、期望输出类型和实际类型等,提升调试体验 -- 参数服务器模式: - - 大幅降低训练过程中的内存占用,在1亿规模embedding任务上,Trainer端内存可以降低90% - - 大幅降低分布式保存模型、加载模型的内存占用, Pserver端内存峰值最大可降低为原先的$1/N,N$为Pserver节点个数。 - - 优化GEO-SGD 稠密参数通信 - - 支持分布式AUC指标计算 - - 新增分布式Barrier功能 - - 非Fleet的transpiler API加入过期警示, 该API计划在PaddlePaddle-Fluid 2.0中移除 - - Communicator加入半异步模式和同步模式 - - TrainFromDataset训练接口支持半异步模式和同步模式 - - Fleet加入DistributedStrategy, 进一步提升分布式易用性, 整合目前分布式相关FLAG - - Fleet pslib模式支持一个program多loss训练,优化训练性能 - - 千亿稀疏模式支持k8s环境。 -- 大规模分类库PLSC:支持受限于显存容量数据并行无法处理的大规模分类问题(https://github.com/PaddlePaddle/PLSC) - - 内建ResNet50、ResNet101和ResNet152三种模型,并支持自定义模型;单机8张V100 GPU配置下,ResNet50模型百万类别训练速度2,122.56 images/s,相比标准ResNet50模型加速倍1.3倍; - - 发布模型在线预测服务plsc-serving whl包,预测人脸识别模型的图片语义向量表示,支持使用用户训练的模型进行预测。ResNet50模型(batch size=256)在单张V100 GPU下预测速度为523.47 images/s; - - 发布基于ResNet50网络和MS1M-ArcFace数据集的预训练模型:https://plsc.bj.bcebos.com/pretrained_model/resnet50_distarcface_ms1mv2.tar.gz。 -- 发布ResNet50混合精度训练benchmark(单卡、多卡、多机)。 +## Bug修复 -## 基础模型库 -(https://github.com/PaddlePaddle/models) + - 修复while loop中存在slice操作时计算结果错误的bug + - 修复inplace ops引起的transformer 模型性能下降问题 + - 通过完善cache key, 解决Ernie精度测试最后一个batch运行失败的问题 + - 修复fluid.dygraph.guard等context中出现异常时无法正确退出的问题 -- PaddleNLP - - seq2seq支持RL和GAN等训练模式 - - 发布分词和词性标注训练模型,利用知识蒸馏框架 Pantheon,在自有数据集上比paddleNLP上LAC上F1值提升1%;合入jieba分词,通过加入use_paddle标签来开启深度学习模型模式;并在在jieba加入paddle版本检测和回退机制,保障用户体验。 - - 增加动态图模型实现:word2vec、senta、transformer、bert、seq2seq、LAC。 -- PaddleSpeech - - 语音合成:发布合成库Parakeet - - 实现语音合成模型数据预处理、训练和合成等的标准工作流 - - 提供对常见数据集的开箱即用的预处理实现 - - 提供语音合成领域常用模型组件,为实现模型提供支持 - - 发布语音合成模型 DeepVoice3、ClarinNet 、TransformerTTS、FastSpeech、WaveNet、WaveFlow - -- PaddleCV - - 图像分类: - - 新增预训练模型SENet-vd、Res2Net、HRNet系列模型总共14个: - - SE_ResNet18_vd,SE_ResNet34_vd,SE_ResNeXt50_vd_32x4d,ResNeXt152_vd_32x4d - - Res2Net50_26w_4s,Res2Net50_14w_8s,Res2Net50_vd_26w_4s - - HRNet_W18_C,HRNet_W30_C,HRNet_W32_C,HRNet_W40_C,HRNet_W44_C,HRNet_W48_C,HRNet_W64_C - - 支持使用DALI加速数据预处理,在ImageNet训练上获得1.5倍(ResNet50) 至3倍以上(ShuffleNet))加速,并大幅提升GPU利用率。 - - 3D方向: - - 发布模型PointNet++、PointRCNN。 - - 跟踪模型库 : - - 发布模型SiamFC、SiamRPN、SiamMASK、ATOM、ATP。 - - 增加动态图模型实现: MobileNet-v1/v2、YOLOv3、FasterRCNN、MaskRCNN、视频分类TSM模型、视频动作定位BMN模型。 - -- PaddleRec - - 发布推荐领域多任务模型MMoE, 适用于工业界大规模多任务联合训练。 - - 增加动态图模型实现:gru4rec、deepfm。 - -## 端到端开发套件 - -- PaddleDetection(https://github.com/PaddlePaddle/PaddleDetection) - - 进一步提升YOLOv3模型精度,COCO数据上精度达到43.2%,相比上个版本绝对提升1.4%。 - - 新增模型实现及预训练模型: - - 新增Google AI Open Images 2019-Object Detction比赛中的最佳单模型CascadeCARCNN-FPN-Dcnv2-Nonlocal ResNet200-vd,同时也发布此算法基于Objects365数据的预训练模型。 - - 新增backbone为CBResNet、Res2Net、HRNet的系列预训练模型。 - - 新增LibraRCNN算法及预训练模型。 - - FasterRCNN R50 FPN模型新增基于GIoU、DIoU、CIoU loss的预训练模型,不降低预测速度的情况下,在COCO数据上精度分别提升1.1%,0.9%,1.3%。 - - 新增模块: - - 主干网络: 新增CBResNet、Res2Net、HRNet。 - - Loss模块: 新增GIoU loss、 DIoU loss、CIoU loss,以及Libra loss,YOLOv3的loss支持细粒度op组合。 - - 后处理模块: 新增softnms,DIOU nms模块。 - - 正则模块: 新增DropBlock模块。 - - 功能优化和改进: - - 加速YOLOv3数据预处理,整体训练提速40%。 - - 优化数据预处理逻辑。 - - 增加人脸检测预测benchmark数据。 - - 增加Paddle预测库Python API下的预测示例。 - - 检测模型压缩 : - - 裁剪: 发布MobileNet-YOLOv3裁剪方案和模型,在VOC数据集上FLOPs - 69.6%, mAP + 1.4%,在COCO数据集上FLOPS-28.8%, mAP + 0.9%; 发布ResNet50vd-dcn-YOLOv3裁剪方案和模型,在COCO数据集上FLOPS - 18.4%, mAP + 0.8%。 - - 蒸馏: 发布MobileNet-YOLOv3蒸馏方案和模型,在VOC数据上mAP + 2.8%,在COCO数据上mAP + 2.1%。 - - 量化: 发布YOLOv3-MobileNet和BlazeFace的量化模型。 - - 裁剪+蒸馏: 发布MobileNet-YOLOv3裁剪+蒸馏方案和模型,在COCO数据集上FLOPS - 69.6%,GPU下预测加速64.5%,mAP - 0.3 %; 发布ResNet50vd-dcn-YOLOv3裁剪+蒸馏方案和模型,基于COCO数据FLOPS - 43.7%,GPU下预测加速24.0%,mAP + 0.6 %。 - - 搜索: 开源BlazeFace-Nas的完整搜索方案。 - - 预测部署: - - 适配Paddle预测库对TensorRT的支持、对FP16精度的支持。 - - 文档: - - 新增数据预处理模块介绍文档、实现自定义数据Reader的文档。 - - 新增如何新增算法模型的文档。 - - 文档部署到网站: https://paddledetection.readthedocs.io/zh/latest/ - -- PaddleSeg(https://github.com/PaddlePaddle/PaddleSeg) - - 新增模型 - - 适用于车道线分割场景的LaneNet模型。 - - 适用于轻量级Fast-SCNN模型。 - - 适用于高精度场景的HRNet语义分割模型 。 - - 发布基于PaddleSlim的多种模型压缩方案: - - 基于Cityscape的Fast-SCNN裁剪方案和模型。 - - 基于Cityscape的Deeplabv3p-Xception和Deeplabv3p-MobilenetV2蒸馏方案。 - - 基于Cityscape的Deeplabv3p-MobilenetV2搜索方案。 - - 基于Cityscape的Deeplabv3p-Mobilenet量化方案和模型。 - - 预测部署能力提升 - - 新增Python轻量级部署。 - - 新增对 FP16、Int8量化模型的TensorRT预测加速支持。 - - 新增DeepLabv3p-MobileNetV2的人像分割Paddle-Lite移动端部署教程和案例。 - - 优化模型导出环节,支持图像预处理和后处理的GPU化,性能提升10%~20%。 - - 提供U-Net, ICNet, PSPNet, DeepLabv3+等模型的在不同尺寸图像的预测性能Benchmark,便于用户根据性能进行模型选型。 - - 体验优化 - - 新增学习率warmup功能,支持与不同的学习率Decay策略配合使用,提升Fine-tuning的稳定性。 - - 支持对标注图使用伪彩色图像格式的保存,提升标注图片的预览体验。 - - 新增自动保存mIoU最优模型的功能。 - - 全面优化文档逻辑,提供如工业质检、眼底筛查等工业场景的AIStudio实战教程。 - -- ElasticRec(https://github.com/PaddlePaddle/ElasticRec) - - - - 发布了ElasticRec推荐排序系统,通过K8S进行部署,支持流式训练和在线预测服务。 - -## 工具组件 - -- PaddleHub(https://github.com/PaddlePaddle/PaddleHub) - - 预训练模型丰富,新增52个预训练模型,目前预训练模型总数100+: - - 语义模型:新增RoBERTa_wwm、BERT_wwm、ERNIE-Tiny等5个语义模型 - - 文本分类:新增黄反鉴别模型3个。 - - 图像分类:新增ResNext-WSL、EfficientNet等共36个图像分类模型。 - - 目标检测:新增行人检测,车辆检测等共5个检测模型。 - - 关键点检测:新增人脸关键点检测和人体姿态关键点检测模型2个。 - - 人脸口罩检测:新增基于PyramidBox-Lite的人脸口罩检测模型2个。 - - 通用人脸检测:新增Ultra Light Fast Generic Face Detector、PyramidBox-Lite等通用人脸检测模型4个。 - - 功能: - - 新增基于Paddle Serving的Bert Service文本向量表示服务。 - - Task灵活性增强,新增Hook机制可以支持用户自定义代码加载。 - - 新增彩色Colorlog,修复日志重复打印问题。 - - 优化代码结果,命令行执行速度提升50% 。 - - 重构Dataset、Reader,适配自定义数据集代码量降低60%。 - - 优化AutoFinetune接口,支持多实验的可视化效果显示。 - - 体验优化 - - 逻辑全面优化,新增丰富的AIStudio教程内容。 - - 官网落地页全新升级,提供在线快速体验和教程指导的功能。 - -- 多任务学习框架PALM(https://github.com/PaddlePaddle/PALM) - - 支持python3和windows - - 升级框架内核和多任务底层机制,开放API调用 - - 灵活的模型保存机制,支持单任务保存和全图保存 - - 支持连续训练和连续预测,单次执行下可自由切换数据集文件 - - 新增模型定制化/自定义功能 - - 重构多任务底层kernel,修复若干影响通用性和稳定性的bugs - - 强化多任务学习能力 - - 支持多任务场景下每个任务有不同的batch size和sequence length - - 修复了多任务多卡训练时,各个显卡上任务不一致的问题 - - 优化了多任务学习调度和终止策略,普遍提升模型泛化能力 - - 强化支持的任务的功能和类型 - - 匹配任务支持增强,支持pairwise learning和多类别(如NLI句子关系判断)。 - - 机器阅读理解任务支持增强,新增用户可控的预处理超参数。 - - 新增支持序列标注任务。 - - 强化大规模训练/推理能力 - - 新增自动多卡预测能力 - - 重构异步reader,多卡场景下支持变长padding - - 新增预训练模型管理和下载模块 - - 支持BERT、ERNIE、RoBERTa等各预训练模型的管理和下载 - - 新增RoBERTa中文预训练模型 - -- 联邦学习PaddleFL(https://github.com/PaddlePaddle/PaddleFL): - - 新增scheduler与submitter功能:scheduler可用于在训练过程中控制trainer是否参加更新 。submitter可用于完成在MPI集群提交paddleFL任务的功能 - - 新增LEAF dataset联邦学习公开数据集,并添加api,用于设置benchmark。支持图像分类,情感分析,字符预测等领域的经典数据集,如MNIST,Sentiment140 - - 根据新增组件,在example中修改了原有的样例,并添加了femnist_demo, submitter_demo样例 - - 优化fl_distribute_transpiler,使FedAvg strategy新增对adam optimizer支持; - - 新增SecAgg strategy(Secure Aggregation),用于实现安全的参数聚合; - -## 代码重构和升级 - -- 编译 - - 增加WITH_NCCL编译选项,单卡用户可显示指定WITH_NCCL=OFF加速编译。 - - 新增编译选项WITH_TP_CACHE,缓存第三方源码,避免重复下载,Windows用户可将其设置为ON,加快编译速度并提高编译稳定性。 - - `CUDA_ARCH_NAME`默认值设成`Auto`(`All`表示编译所有gpu架构,`Auto`表示只编译当前机器gpu架构),对开发者来说,使用`Auto`比`All`节省非常多的编译时间,提高开发效率。 - - 减少了冗余的link环节与产物、多余的文件拷贝,加快了Windows下的编译速度。 -- 外部依赖库 - - 升级MKL-DNN到最新1.1版本。 - - 将预测库与`third_party` 解耦,重构了28个第三方依赖的编译代码,便于统一管理外部依赖。 - - 移除了第三方依赖的私人仓库2个、无用依赖1个、无用的patch下代码2000+行,提高仓库质量。 -- 代码清理、重构和优化 - - 去掉无用的`contrib/float16`目录,删除BRPC下无用的snappy/snappystream依赖。 - - 从 `python/paddle/fluid/layers/nn.py`中,根据API功能拆出`loss.py`和`sequence_lod.py`,减少`nn.py`的代码量,便于阅读。 - - 修复`-Wno-error=sign-compare`的warning对应的代码(共100多处),后续所有该类warning会在编译时报错,提高代码质量 - - 去掉WindowsMSVC编译的`WarningLnk4006/WarningLnk4221`(共约300处),提高仓库质量。 - - 减少reduce_op, expand_op, expand_as_op模版类数量,加速GPU编译和减少whl包70M的空间。 - - 动态图下通过代码自动生成每个OP的pybind函数,用于在layers中直接调用,提高动态图性能并减少与静态图的耦合度。 - -## BUG修复 - -- 修复基于PaddleDetection的 Faster-RCNN使用Python API预测时MKL-DNN报错问题。 -- 修复sum op的GPU实现中,由于部分Tensor没有初始化引起训练挂掉的问题。 -- 修复fill_constant中,value设置为大整数时精度损失的问题。 -- 修复softmax_with_cross_entropy_op在CUDA上的精度不一致问题。 -- 修复clone program时program中的stop_gradient属性不能拷贝到新program的问题。 -- 修复elementwise_pow op在整数上的精度损失问题。 -- 修复一些 GFLAGS 不能在预测库外进行指定的问题。 -- 修复 Analysistor 多线程下若干 Pass 导致预测随机 core 的问题。(fc_gru_fuse_pass,seqconv_eltadd_relu_fuse_pass,attention_lstm_fuse_pass,embedding_fc_lstm_fuse_pass,fc_lstm_fuse_pass,seq_concat_fc_fuse_pass) -- 修复了在使用 NativePredictor 指定使用 CPU 预测后,在同一进程内使用 AnalysisConfig 指定 GPU 不生效的错误。 -- 修复-DWITH_MKL=OFF时编译报错(setup.py拷贝与op_function_cmd出错)的bug。 -- 修复py_func OP无法输入tuple(Variable) 的bug,新增如何写PythonOP的代码示例。 -- 修复sigmoid cudnn kernel错调用成tanh cudnn kernel的问题。 -- 修复部分动态图模式下reshape、depthwiseconv相关的bug;修复网络中部分参数无梯度,导致程序crash 的bug。 -- 修复GradientClip在参数服务器模式下运行错误的BUG。 -- 修复参数服务器全异步模式下内存泄露的问题。 diff --git a/doc/fluid/release_note_en.md b/doc/fluid/release_note_en.md index d48271b633acb52bfdd93d261eb0fba28041d310..05517456150cb454abb4c4c742b5d139a5610df4 100644 --- a/doc/fluid/release_note_en.md +++ b/doc/fluid/release_note_en.md @@ -1,350 +1,71 @@ +# Release Note -Release Notes -============== +## Important Statements -## Important Updates +- This version is a beta version. It is still in iteration and is not stable at present. Incompatible upgrade may be subsequently performed on APIs based on the feedback. For developers who want to experience the latest features of Paddle, welcome to this version. For industrial application scenarios requiring high stability, the stable Paddle Version 1.8 is recommended. +- This version mainly popularizes the imperative programming development method and provides the encapsulation of high-level APIs. The imperative programming(dynamic graph) mode has great flexibility and high-level APIs can greatly reduces duplicated codes. For beginners or basic task scenarios, the high-level API development method is recommended because it is simple and easy to use. For senior developers who want to implement complex functions, the imperative programming API is commended because it is flexible and efficient. +- This version also optimizes the Paddle API directory system. The APIs in the original directory can create an alias and are still available, but it is recommended that new programs use the new directory structure. -In this version, the authors focus on enhancing the framework function level, the forecast deployment capability is fully improved, the distributed release PLSC supports the super-large-scale classification, and the parameter server mode is optimized and integrated. The compilation options, the compilation dependence, and the code library are fully cleaned up and optimized. The model library is continuously improved, the overall hierarchy is optimized, and the implementation of the dynamic graph model is added. The end-to-end development kits and utility components are further perfected. +## Basic Framework -**Training Framework**: An AMP interface and a new control flow interface are added. The tensor usage method and the GPU memory allocation strategy are optimized. A library that supports the Nvidia DALI GPU data preprocessing is added. The function and performance of the basic OP are continually optimized. The function of the dynamic graph is further perfected and the performance is greatly improved. A function that converts the data independent dynamic graph model into the static graph predictable deployment model is provided. The framework debugging analysis function and the ease of use are fully enhanced. +### Basic APIs -**Forecast Deployment**: The Python API of the server-side forecast library is significantly optimized. A usage method and example of the R language and Go language call forecast library are added. The quantification support capability is strengthened. Paddle Lite supports a model generated by the post-training quantification method without calibration data. Tailoring, quantification, distillation, and search interfaces are reconstructed for the model compression library PaddleSlim. A large-scale scalable knowledge distillation framework Pantheon is added to fully connect to the model library. +- Networking APIs achieve dynamic and static unity and support operation in imperative programming and declarative programming modes(static graph) -**Distributed Aspect**: In parameter server mode, the back-end implementation is united into the communicator and the front-end interface is united into the fleet for the synchronous, semi-asynchronous, and fully asynchronous modes of the transpiler. Different modes are flexibly selected using the fleet strategy. A large-scale classification library PLSC is released and the classification tasks of a great many classes are supported using model parallel. +- The API directory structure is adjusted. In the Paddle Version 1.x, the APIs are mainly located in the paddle.fluid directory. This version adjusts the API directory structure so that the classification is more reasonable. The specific adjustment rules are as follows: -**Basic Model Library**: A speech synthesis library Parakeet is released, including several leading-edge synthesis algorithms. 14 image classification pre-training models are added in PaddleCV. The 3D and tracking direction model continues to be enriched. The participle and part-of-speech tagging model of PaddleNLP supports a jieba participle. A multi-task model MMoE is added in PaddleRec. Extensive dynamic graph model implementations are added in the model library as a whole. The overall hierarchy of the model library is adjusted and optimized. + - Moves the APIs related to the tensor operations in the original fluid.layers directory to the paddle.tensor directory + - Moves the networking-related operations in the original fluid.layers directory to the paddle.nn directory. Puts the types with parameters in the paddle.nn.layers directory and the functional APIs in the paddle.nn.functional directory + - Moves the special API for imperative programming in the original fluid.dygraph directory to the paddle.imperative directory + - Creates a paddle.framework directory that is used to store framework-related program, executor, and other APIs + - Creates a paddle.distributed directory that is used to store distributed related APIs + - Creates a paddle.optimizer directory that is used to store APIs related to optimization algorithms + - Creates a paddle.metric directory that is used to create APIs related to evaluation index calculation + - Creates a paddle.incubate directory that is used to store incubating codes. APIs may be adjusted. This directory stores codes related to complex number computation and high-level APIs + - Creates an alias in the paddle directory for all APIs in the paddle.tensor and paddle.framework directories. For example, paddle.tensor.creation.ones can use paddle.ones as an alias -**End-to-End Development Kits**: A large number of model implementations and pre-training models are added in PaddleDetection and PaddleSeg. The training speed and accuracy of typical models are enhanced. The model compression and deployment capabilities are significantly improved. The user experience is fully optimized. A recommended sorting system ElasticRec is released. Deployment is performed via K8S. Streaming training and online forecast services are supported. +- The added APIs are as follows: -**Utility Components**: 52 pre-training models are added in PaddleHub, with a total of more than 100. The function and experience are continuously optimized. The kernel of the multi-task learning framework PALM is upgraded. The API call is open. More task types are supported. An open dataset is added in the federated learning PaddleFL. + - Adds eight networking APIs in the paddle.nn directory: interpolate, LogSoftmax, ReLU, Sigmoid, loss.BCELoss, loss.L1Loss, loss.MSELoss, and loss.NLLLoss + - Adds 59 tensor-related APIs in the paddle.tensor directory: add, addcmul, addmm, allclose, arange, argmax, atan, bmm, cholesky, clamp, cross, diag\_embed, dist, div, dot, elementwise\_equal, elementwise\_sum, equal, eye, flip, full, full\_like, gather, index\_sample, index\_select, linspace, log1p, logsumexp, matmul, max, meshgrid, min, mm, mul, nonzero, norm, ones, ones\_like, pow, randint, randn, randperm, roll, sin, sort, split, sqrt, squeeze, stack, std, sum, t, tanh, tril, triu, unsqueeze, where, zeros, and zeros\_like + - Adds device\_guard that is used to specify a device. Adds manual\_seed that is used to initialize a random number seed -## Training Framework +- Some of the APIs in the original fluid directory have not been migrated to the paddle directory + - The following API under fluid.contrib directory are kept in the original location, not migrated:BasicGRUUnit, BasicLSTMUnit, BeamSearchDecoder, Compressor, HDFSClient, InitState, QuantizeTranspiler, StateCell, TrainingDecoder, basic_gru, basic_lstm, convert_dist_to_sparse_program, ctr_metric_bundle, extend_with_decoupled_weight_decay, fused_elemwise_activation, fused_embedding_seq_pool, load_persistables_for_increment, load_persistables_for_inference, match_matrix_tensor, memory_usage, mixed_precision.AutoMixedPrecisionLists, mixed_precision.decorate, multi_download, multi_upload, multiclass_nms2, op_freq_statistic, search_pyramid_hash, sequence_topk_avg_pooling, shuffle_batch, tree_conv, var_conv_2d + - The following APIs related to LodTensor are still under development and have not been migrated yet:LoDTensor, LoDTensorArray, create_lod_tensor, create_random_int_lodtensor, DynamicRNN, array_length, array_read, array_write, create_array, ctc_greedy_decoder, dynamic_gru, dynamic_lstm, dynamic_lstmp, im2sequence, linear_chain_crf, lod_append, lod_reset, sequence_concat, sequence_conv, sequence_enumerate, sequence_expand, sequence_expand_as, sequence_first_step, sequence_last_step, sequence_mask, sequence_pad, sequence_pool, sequence_reshape, sequence_reverse, sequence_scatter, sequence_slice, sequence_softmax, sequence_unpad, tensor_array_to_tensor + - The following APIs related to distributed training are still under development, not migrated yet + - The following APIs in fluid.nets directory will be implemented with high level API, not migrated:nets.glu, nets.img_conv_group, nets.scaled_dot_product_attention, nets.sequence_conv_pool, nets.simple_img_conv_pool + - The following APIs are to be improved, not migrated:dygraph.GRUUnit, layers.DecodeHelper, layers.GreedyEmbeddingHelper, layers.SampleEmbeddingHelper, layers.TrainingHelper, layers.autoincreased_step_counter, profiler.cuda_profiler, profiler.profiler, profiler.reset_profiler, profiler.start_profiler, profiler.stop_profiler + - The following APIs are no longer recommended and are not migrated:DataFeedDesc, DataFeeder, clip.ErrorClipByValue, clip.set_gradient_clip, dygraph_grad_clip.GradClipByGlobalNorm, dygraph_grad_clip.GradClipByNorm, dygraph_grad_clip.GradClipByValue, initializer.force_init_on_cpu, initializer.init_on_cpu, io.ComposeNotAligned.with_traceback, io.PyReader, io.load_params, io.load_persistables, io.load_vars, io.map_readers, io.multiprocess_reader, io.save_params, io.save_persistables, io.save_vars, io.xmap_readers, layers.BasicDecoder, layers.BeamSearchDecoder, layers.Decoder, layers.GRUCell, layers.IfElse, layers.LSTMCell, layers.RNNCell, layers.StaticRNN, layers.Switch, layers.While, layers.create_py_reader_by_data, layers.crop, layers.data, layers.double_buffer, layers.embedding, layers.fill_constant_batch_size_like, layers.gaussian_random_batch_size_like, layers.get_tensor_from_selected_rows, layers.load, layers.merge_selected_rows, layers.one_hot, layers.py_reader, layers.read_file, layers.reorder_lod_tensor_by_rank, layers.rnn, layers.uniform_random_batch_size_like, memory_optimize, release_memory, transpiler.memory_optimize, transpiler.release_memory -- API - - An AMP interface is added: A network can be converted into mixed accuracy training in a general way while the accuracy fluctuation is ensured to be within the normal range. - - A new control flow interface is added and recommended: Four control flow Ops including while\_loop (loop control function), cond (conditional branch function), case, and switch\_case (branch control function) are added for the ease of use and the following new functions are supported: - - Python callable is used as a control condition or executive. - - Different branches in the control flow use different losses or optimizers. - - Conditions in the control flow partially use CPU or GPU data. - - Parameters of some APIs support the use of a variable list: Support for a variable list is added according to the case that the parameter\_list or no\_grad\_set parameter of some APIs supports only the use of a string list. It is no longer necessary to obtain the name attribute of related variables in advance when using the following APIs: - - fluid.backward.append\_backward(loss, parameter\_list=None, no\_grad\_set=None, callbacks=None) - - fluid.backward.gradients(targets, inputs, target\_gradients=None, no\_grad\_set=None) - - The minimize methods of various optimizers, such as Adam’s minimize: minimize(loss, startup\_program=None, parameter\_list=None, no\_grad\_set=None, grad\_clip=None) -- Basic Function Optimization - - The float16 type of numpy is used to set to Tensor data without the necessity of conversion into the uint16 type first. - - The minus sign is directly used to get the opposite number of Tensor. - - GPU memory Allocation Strategy: - - The default policy is changed to AutoGrowth: The GPU memory is applied for as needed without affecting the training speed. This avoids the problem that it is difficult to restart a new task on the same GPU card under the previous default GPU memory pre-allocation strategy. - - GPU memory allocation adjustment for multi-card tasks: The GPU memory allocators on different GPU cards are set to the Lazy initialization mode. If a user does not use a card, no GPU memory will be applied for on this card. This avoids the GPU memory OOM problem caused by running tasks on idle GPU cards without setting CUDA\_VISIBLE\_DEVICES when any GPU memory is occupied on other GPU cards. - - OP Function Upgrade - - elu: This activation function supports the calculation of second-order gradients. - - Prroi\_pool: The rois parameter may accept the Tensor or LoDTensor type. - - Conv2d, pool2d, batch\_norm, LRN: All reverse calculations support the use of the MKL-DNN high-performance calculation library. - - argsort: The descending sort is supported (A descending parameter is added. The default is False). -- Basic Performance Optimization - - DALI Preprocessing Acceleration - - The support for the Nvidia DALI GPU data preprocessing library is added, which can be used to accelerate the preprocessing of data such as images, videos, and speeches. - - Automatic Mixed Precision Training Optimization - - With the implementation of the following optimization strategy as well as DALI data preprocessing, the training throughput of the ResNet50 model is increased substantially: The mixed accuracy training throughput of a single V100 card is increased to 1,000+ images/s from 600+ images/s. The throughput of 8 cards for a single machine is 7,840 image/s. The throughput of 32 cards for 4 machines is 28,594 images/s. - - The support of batch\_norm, conv2d, and other ops for NHWC data layout input is enhanced to accelerate fp16 calculation using Tensor Core technology. - - Some op patterns in the model such as batch\_norm and relu are fused based on the IR Pass mechanism. - - The kernel of elementwise (add, mul) and other ops is optimized. - - RecomputeOptimizer is optimized to improve the batchsize. In the bert-large model, the maximum batchsize is increased by 533.62% compared with that without using RecomputeOptimizer, doubling the maximum batchsize of the previous version. - - OP Performance Optimization - - The fusion operator fuse\_emb\_seq\_pool of embedding and sequence\_pool is implemented and murmurhash3\_x64\_128 in bloom\_filter is optimized. The training speed of some NLP models is effectively improved. - - The GPU performance of mean op is optimized. When the input data is 32328\*8 Tensor, the forward calculation speed is increased by 2.7 times. - - Optimize assign and lod\_reset op are optimized to avoid unwanted GPU memory copy and data transform. - - The kernel implementation of stack OP is optimized. The performance of a single card of GPU in the XLnet/Ernie model is improved by 4.1%. -- Dynamic Graph - - Function Optimization - - The name\_scope parameter in the dynamic graph Layers is removed to make it easier for users to inherit and call. - - The block parameter in the to\_variable interface is removed to simplify the use of the API. - - As for the problem that model parameters depend on data, the build\_once design is removed so that Layers can get all the parameter tables at the end of **init** execution, which is convenient for load saving, parameter initialization, parameter debugging, and parameter optimization. - - Automatic pruning is improved to facilitate user networking and reduce the reverse calculation amount. - - The SelectedRows operation is supported so that the Embedding layer supports sparse update of a single card. - - As for the problem that the framework lacks containers, ParameterList, LayerList, and Sequencial functions are added to facilitate user networking. - - Named\_sublayers and named\_parameters functions are supported to facilitate user programming. - - The Linear lr warmup decay strategy is supported. - - Performance Optimization - - The interaction of python with c++, GradMaker, OperatorBase, and allocator are optimized. For the LSTM-based language model task p on the P40 machine, the performance is improved by 270%. - - Redundant codes are removed for performance problems caused by calling dead codes of optimized\_guard in optimize for many times. For the Transformer model (batch\_size=64) on the P40 machine, the performance of optimizers such as SGD and Adam is improved by 5% to 8%. - - For the performance impact caused by adding scale\_op extra to update the beta parameter in AdamOptimizer, the beta updating logic is fused into adam\_op to reduce the call overhead of the op kernel. For the Dialogue-PLATO model on the P40 machine, the performance is improved by 9.67%. - - The asynchronous DataLoader of the dynamic graph is optimized. The overall training speed is improved by about 30% in the Mnist, ResNet, and other models. - - The numpy bridge function is added. Sharing the underlying data between Tensor and ndarray in CPU mode is supported to avoid the problem of needing to copy a numpy input when creating variables, and to improve efficiency. - - GPU memory optimization: Optimization strategy of deleting in advance the forward variable space that does not require Tensor Buffer in reverse. The maximum batch size is increased by more than 20%-30% in the ResNet and other models. - - Dynamic Graph Deployment - - The TracedLayer interface is supported. The conversion of the dynamic graph model into the static graph predictable deployment model is implemented. -- Debugging Analysis - - Error message optimization: Framework error messages are classified as a whole to achieve the , systematization of error messages. Copywriting optimization is finished to help users locate and solve problems more quickly and accurately. - - Optimization of the Performance Analysis Profile Function - - The function and accuracy of the profiler is enhanced. Profile options at different levels are supported. The call relation of events can be recorded in the profile data and printed. - - The nan inf check and debugging are optimized (effective through FLAGS\_check\_nan\_inf) and the performance, function, and output information are all greatly improved: - - In terms of speed, the v100 test ResNet50 model has a performance improvement of about 1000 times compared with the original utility components, and maintains an over 80% efficiency for normal training. - - In terms of function, the support for fp16 is added and environment variables can be set to skip the inspection of op, op\_role, and op\_var to facilitate the debugging of the fp16 model. - - The output information is detailed and accurate. Besides wrong op and tensor names, the quantity of wrong nan, inf, and normal numerical values are printed to facilitate debugging. -- A lightweight installation package paddlepaddle-tiny for CPU training and forecast is released and the window/linux/Mac operating system and python27/python35/python36/python37 are supported: - - The following options are compiled: no avx, no ml, no gpu, no unittest - - The slim and some datasets are pruned off. - - The Linux package size is reduced to 37 M from 90 M. The Windows package size is reduced to 9.6 M from 50.8 M. The MAC package size is reduced to 19.8 M from 59 M. - - The number of installation requirement dependencies are reduced to 7 from 15. +### High-level APIs -## Forecast Deployment +- Adds a paddle.incubate.hapi directory. Encapsulates common operations such as networking, training, evaluation, inference, and access during the model development process. Implements low-code development. Uses the imperative programming implementation mode of MNIST task comparison. High-level APIs can reduce 80% of executable codes. +- Adds model-type encapsulation. Inherits the layer type. Encapsulates common basic functions during the model development process, including: + - Provides a prepare API that is used to specify a loss function and an optimization algorithm + - Provides a fit API to implement training and evaluation. Implements the execution of model storage and other user-defined functions during the training process by means of callback + - Provides an evaluate interface to implement the inference and evaluation index calculation on the evaluation set + - Provides a predict interface to implement specific test data inference + - Provides a train\_batch interface to implement the training of single-batch data +- Adds a dataset interface to encapsulate commonly-used data sets and supports random access to data +- Adds encapsulation of common Loss and Metric types +- Adds 16 common data processing interfaces including Resize and Normalize in the CV field +- Adds lenet, vgg, resnet, mobilenetv1, and mobilenetv2 image classification backbone networks in the CV field +- Adds MultiHeadAttention, BeamSearchDecoder, TransformerEncoder, TransformerDecoder, and DynamicDecode APIs in the NLP field +- Releases 12 models based on high-level API implementation, including Transformer, Seq2seq, LAC, BMN, ResNet, YOLOv3, VGG, MobileNet, TSM, CycleGAN, Bert, and OCR -- Server-side Forecast Library - - Python API - - The read and write model from the memory is supported to meet the model encryption requirements. - - The Scale operator is no longer added at the end of the forecast model. - - The support for ZeroCopy forecast is added. The interface is basically the same as the C++ interface and supports numpy.ndarray as input and output. It is easier to use on the Python side. - - Multiple interfaces are added in AnalysisConfig to completely cover the C++ forecast functions, including removing pass and disabling forecast glog. - - Support for Other Programming Languages - - The usage method and example of the R language and Go language call forecast library are added. - - The corresponding header file of ProtoBuf is provided to external users to facilitate users to analyze the requirements for the model structure. - - For a forecast library with TRT compilation, a TensorRT library is not provided from thrid\_party any more and needs to be downloaded by users at https://developer.nvidia.com/tensorrt. - - Function Enhancement: - - Access to Paddle Lite using a submap is achieved and ResNet50 has been verified. - - The support for MKL-DNN FC INT8 kernel is added. - - Paddle-TensorRT supports the Ernie model. For the Ernie model (seq length = 128) on the T4 card, the fp16 forecast speed is 3.6 ms, which is faster than the fp32 forecast speed by 37%. - - Quantification: Under the 2% improvement of the ERNIE INT8 accuracy compared with the FP32 accuracy, the single-threaded performance and the multi-threaded performance are improved by 2.79 times and 1.79 times for ERNIE INT8 on the second-generation Xeon scalable platform 6271 respectively. -- Mobile/Embedded End-side Paddle Lite (https://github.com/PaddlePaddle/Paddle-Lite) - - Version v2.3 is released. - - Multiple functions of Model\_optimize\_tool are upgraded. - - “The post-training quantification method without calibration data” is supported. The model storage space is reduced (by 2 to 4 times). - - OpenCL: The migration of 30 Image2D Kernels are finished and 14 Ops are covered. - - The support for FPGA and NPU is further strengthened. The forecast of Kunlun XPU is supported. - - A new official website document is released. A "post-training quantification method without calibration data" usage document is added. -- Paddle Serving (https://github.com/PaddlePaddle/Serving): - - The forecast service of remote text vector representation of the bert-type semantic understanding model is released. - - A paddle-gpu-serving WHL package is released. The forecast service can be deployed and used through pip installation and Python codes. - - 13 semantic understanding models in Paddlehub are supported. The single-machine multi-card mode is supported. The forecast speed is 869.56 samples/s when the average sample length is 7 under a single P4 GPU using the Ernie\_tiny model. -- PaddleSlim (https://github.com/PaddlePaddle/PaddleSlim): - - PaddleSlim is split into independent repo. - - The tailoring, quantification, distillation and search interfaces are reconstructed. The underlying interfaces are open to users. - - Quantification: - - An offline quantification function based on KL divergence is added. The quantification of the Embedding layer is supported. - - The QAT MKL-DNN quantification strategy support for FC is added. - - PostTrainingQuantization is added to fully implement the post-training quantification function: The quantization of 30 kinds of Ops is supported. The flexible setting of OPs to be quantified is supported. Quantitative models are generated in a unified format . It has the advantages of short time consumption, ease of use, and small precision loss. - - Quantitative training supports setting the type of OP to be quantified. - - Tailoring: The tailoring implementation is reconstructed to support more types of networks. - - Search: - - SA search is supported. More search space is added. User-defined search space is supported. - - A one-shot search algorithm is added. The search speed is 20 times faster than that of the previous version. - - A large-scale scalable knowledge distillation framework Pantheon is added. - - Full decoupling is achieved between student and teacher models and between teacher models. They can independently run on different physical devices respectively to make full use of computing resources. - - The single-node multi-device large-scale forecast of the teacher model is supported. The acceleration ratio is tested to be linear on BERT and other models. - - TCP/IP protocol is used to achieve communication in online distillation mode. Knowledge transmission between teacher and student models running on any two physical devices in the same network environment is supported. - - API interfaces in online and offline distillation modes are unified. Different teacher models may operate in different modes. - - The merging of knowledge and the batch reorganization of knowledge data are completed automatically on the student side to facilitate the knowledge fusion of the multi-teacher model. - - Model Library: - - The compression benchmark of ResNet50 and MobileNet models is released. - - The detection library is connected and the compression benchmark for the YOLOv3 series of models is released. - - The segmentation library is connected and the compression benchmark for the Deepabv3+ series of segmentation models is released. - - Document Improvement: - - An API document is supplemented. An introductory tutorial and an advanced tutorial are added. A ModelZoo document is added to cover classification, detection, and segmentation tasks. All documents contain Chinese and English. +### Performance Optimization -## Distributed +- Adds a `reshape+transpose+matmul` fuse so that the performance of the INT8 model is improved by about 4% (on the 6271 machine) after Ernie quantization. After the quantization, the speed of the INT8 model is increased by about 6.58 times compared with the FP32 model on which DNNL optimization (including fuses) and quantization are not performed -- Parameter Server Mode: - - The memory usage is greatly reduced during training. On 100 million embedding tasks, the Trainer-side memory can be reduced by 90%. - - The memory usage of distributed saving and loading models is greatly reduced. The Pserver-side memory peak value can be minimized to $1/N of the original value, where N$ is the number of Pserver nodes. - - The geo-sgd dense parameter communication is optimized. - - The distributed AUC index calculation is supported. - - A distributed barrier function is added. - - An overdue warning is added in the non-Fleet transpiler API. This API is planned to be removed in PaddlePaddle-Fluid 2.0。 - - Semi-asynchronous and synchronous modes are added in Communicator. - - The TrainFromDataset training interface supports semi-asynchronous and synchronous modes. - - DistributedStrategy is added in Fleet to further improve the distributed ease of use and integrate the current distributed related flags. - - The Fleet pslib mode supports single-program multi-loss training to optimize the training performance. - - 100 billion sparse mode supports the k8s environment. -- Large-scale classification library PLSC: It supports the large-scale classification problem that data parallel cannot solve due to the limitation of video memory capacity (https://github.com/PaddlePaddle/PLSC). - - Three built-in models ResNet50, ResNet101, and ResNet152 are available and User-defined models are supported. Under the single-machine eight-V100 GPU configuration, the ResNet50 model has a million-class training speed of 2,122.56 images/s, which is 1.3 times faster than that of the standard ResNet50 model. - - A plsc-serving whl package for model online forecast service is released to forecasts the image semantic vector representation of the face recognition model. Making a forecast using a user-trained model is supported. The forecast speed of the ResNet50 model (batch size=256) under a single V100 GPU is 523.47 images/s. - - A pre-training model based on the ResNet50 network and the MS1M-ArcFace dataset is released: https://plsc.bj.bcebos.com/pretrained\_model/resnet50\_distarcface\_ms1mv2.tar.gz. -- The benchmark for ResNet50 mixed precision training (single-card, multi-card, and multi-machine) is released. +### Debugging Analysis -## Basic Model Library - -(https://github.com/PaddlePaddle/models) - -- PaddleNLP - - - Seq2seq supports training modes such as RL and GAN. - - A training model for participle and part-of-speech tagging is released. A knowledge distillation framework Pantheon is used. The F1 value for its own dataset is 1% more than that of paddleNLP LAC. Jieba participles are incorporated. The deep learning model mode is enabled by adding a use\_paddle label. In addition, the paddle version detection and rollback mechanism is added in jieba to ensure user experience. - - Dynamic graph model implementations are added: word2vec, senta, transformer, Bert, seq2seq, LAC. - -- PaddleSpeech - - - Speech synthesis: A synthesis library Parakeet is released. - - A standard workflow for data preprocessing, training, and synthesis of the speech synthesis model is implemented. - - The out-of-the-box pre-processing implementation of typical datasets is provided. - - Commonly-used model components in the speech synthesis field are provided to support the model implementation. - - Speech synthesis models DeepVoice3, ClarinNet, TransformerTTS, FastSpeech, WaveNet, and WaveFlow are released. - -- PaddleCV - - - Image Classification: - - A total of 14 pre-training models including SENet-vd, Res2Net, and HRNet series of models are added: - - SE\_ResNet18\_vd, SE\_ResNet34\_vd, SE\_ResNeXt50\_vd\_32x4d, ResNeXt152\_vd\_32x4d - - Res2Net50\_26w\_4s, Res2Net50\_14w\_8s, Res2Net50\_vd\_26w\_4s - - HRNet\_W18\_C, HRNet\_W30\_C, HRNet\_W32\_C, HRNet\_W40\_C, HRNet\_W44\_C, HRNet\_W48\_C, HRNet\_W64\_C - - Accelerating data preprocessing by using DALI is supported. On the ImageNet training, 1.5 times (ResNet50) to more than 3 times (ShuffleNet) the acceleration is obtained and the GPU utilization is greatly improved. - - 3D Direction: - - The models PointNet++ and PointRCNN are released. - - Tracking Model Library: - - The models SiamFC, SiamRPN, SiamMASK, ATOM, and ATP are released. - - Dynamic graph model implementations are added: MobileNet-v1/v2, YOLOv3, FasterRCNN, MaskRCNN, video classification TSM model, and video motion positioning BMN model. - -- PaddleRec - - - A multi-task model MMoE for the recommended field is released and applies to large-scale multi-task joint training in the industrial circles. - - Dynamic graph model implementations are added: gru4rec, deepfm. - -## End-To-End Development Kits - -- PaddleDetection (https://github.com/PaddlePaddle/PaddleDetection) - - - The precision of the YOLOv3 model is further improved. The precision for the COCO data reaches 43.2%, an absolute increase of 1.4% compared with the previous version. - - Model implementations and pre-training models are added: - - The best single model CascadeCARCNN-FPN-Dcnv2-Nonlocal ResNet200-vd in the Google AI Open Images 2019-Object Detction competition is added. A pre-training model of this algorithm based on Objects365 data is also released. - - Backbone is added as CBResNet, Res2Net, and HRNet series of pre-training models. - - A LibraRCNN algorithm and a pre-training model are added. - - GIoU, DIoU, and CIoU loss-based pre-training models are added in the FasterRCNN R50 FPN model. Without reducing the forecast speed, the precision for the COCO data is improved by 1.1%, 0.9%, and 1.3% respectively. - - Added Modules: - - Backbone network: CBResNet, Res2Net, and HRNet are added. - - Loss modules: GIoU loss, DIoU loss, and CIoU loss are added. Libra loss and YOLOv3 loss support a fine-grained op combination. - - Postprocessing modules: The softnms and DIOU nms modules are added. - - Regular module: A DropBlock module is added. - - Functional Optimization and Improvement: - - YOLOv3 data preprocessing is accelerated. The overall training speeds up by 40%. - - The data preprocessing logic is optimized. - - The benchmark data for face detection forecast is added. - - Forecast examples under the Paddle forecast library Python API are added. - - Detection Model Compression: - - Tailoring: A Mobilenet-yolov3MobileNet-YOLOv3 tailoring solution and model are released, with FLOPs - 69.6%, mAP + 1.4% for the VOC dataset, and FLOPS - 28.8%, mAP + 0.9% for the COCO dataset. A ResNet50vd-dcn-YOLOv3 tailoring solution and model are released, with FLOPs - 18.4%, mAP + 0.8% for the COCO dataset. - - Distillation: A MobileNet-YOLOv3 distillation solution and model are released, with mAP + 2.8% for the VOC data and mAP + 2.1% for the COCO data. - - Quantification: YOLOv3-MobileNet and BlazeFace quantitative models are released. - - Tailoring + Distillation: A MobileNet-YOLOv3 tailoring + distillation solution and model are released, with FLOPS - 69.6%, forecast speedup 64.5% under the GPU, mAP - 0.3 % for the COCO dataset. A ResNet50vd-dcn-YOLOv3 tailoring + distillation solution and model are released, with FLOPS - 43.7%, forecast speedup 24.0% under the GPU, mAP + 0.6 % based on the COCO data. - - Search: A complete search solution for the open source blazeface-nas. - - Forecast Deployment: - - The support of the Paddle forecast library for TensorRT and FP16 precision is adapted. - - Documents: - - A document for introducing the data preprocessing module and a document for implementing the user-defined data Reader are added. - - A document about how to add an algorithm model is added. - - Documents are deployed to the website: https://paddledetection.readthedocs.io/zh/latest/ - -- PaddleSeg (https://github.com/PaddlePaddle/PaddleSeg) - - - Added Models - - LaneNet model applicable to lane segmentation scenarios. - - Fast-SCNN model applicable to the lightweight. - - HRNet semantic segmentation model applicable to high-precision scenarios. - - Multiple PaddleSlim-based model compression solutions are released: - - Cityscape-based Fast-SCNN tailoring solution and model. - - Cityscape-based Deeplabv3p-Xception and Deeplabv3p-MobilenetV2 distillation solutions. - - Cityscape-based Deeplabv3p-MobilenetV2 search solution. - - Cityscape-based Deeplabv3p-Mobilenet quantitative solution and model. - - Enhancement of the Forecast Deployment Capability - - Lightweight deployment of Python is added. - - The TensorRT forecast acceleration support for FP16 and Int8 quantitative models is added. - - Tutorials and cases for portrait segmentation Paddle-Lite mobile-side deployment of DeepLabv3p-MobileNetV2 are added. - - Model export is optimized. GPU implementation of image preprocessing and postprocessing is supported. The performance is improved by 10%-20%. - - The benchmark for the forecast performance of U-Net, ICNet, PSPNet, DeepLabv3+, and other models for images of different sizes is provided to facilitate users to select models based on performance. - - Experience Optimization - - A learning rate warmup function is added. It supports the use with different learning rate decay strategies to improve Fine-tuning stability. - - Marked imaged can be saved in pseudo-color image format to improve their preview experience. - - The function of automatically saving an optimal mIoU model is added. - - The document logic is comprehensively optimized. An AIStudio practical tutorial on industrial scenarios such as industrial quality inspection and fundus screening is provided. - -- ElasticRec (https://github.com/PaddlePaddle/ElasticRec) - - - - An ElasticRec recommended sorting system is released. It is deployed through K8S. Streaming training and online forecast service are supported. - -## Utility Components - -- PaddleHub (https://github.com/PaddlePaddle/PaddleHub) - - - The pre-training models are rich, with 52 added pre-training models. Currently, the total number of pre-training models is 100+: - - Semantic models: Five semantic models such as RoBERTa\_wwm, BERT\_wwm, and ERNIE-Tiny are added. - - Text classification: Three yellow anti-identification models are added. - - Image classification: A total of 36 image classification models such as ResNext-WSL and EfficientNet are added. - - Target detection: Five detection models such as pedestrian detection and vehicle detection are added. - - Key point detection: Two models for key point detection of face and body posture are added. - - Face mask detection: Two PyramidBox-Lite-based face mask detection models are added. - - Universal face detection: Four universal Face detection models such as Ultra Light Fast Generic Face Detector and PyramidBox-Lite are added. - - Function: - - A Bert Service text vector representation service based on Paddle Serving is added. - - Task flexibility is enhanced. An added hook mechanism supports the loading of user-defined codes. - - A color Colorlog is added. The problem on the repeated printing of logs is fixed. - - Code results are optimized. The command line execution speed is increased by 50%. - - Dataset and Reader are reconstructed. The quantity of adaptive user-defined dataset codes is reduced by 60%. - - The AutoFinetune interface is optimized. Multi-experiment visualization effect display is supported. - - Experience Optimization - - The logic is fully optimized. Rich AIStudio tutorial contents are added. - - The landing page of the official website has been fully upgraded to provide the function of quick online experience and tutorial guidance. - -- Multi-task learning framework PALM (https://github.com/PaddlePaddle/PALM) - - - Python3 and Windows are supported. - - The framework kernel and the multi-tasking underlying mechanism, are upgraded. The API call is open. - - The flexible model saving mechanism supports single-task saving and full-image saving. - - Continuous training and forecast are supported. Dataset files can be switched over freely under a single execution. - - A model customization/self-definition function is added. - - The multi-task underlying kernel is reconstructed. Some bugs that affect universality and stability are fixed. - - The multi-task learning ability is strengthened. - - It is supported that every task has a different batch size and sequence length under a multi-task scenario. - - The problem on inconsistent tasks on each video card during multi-task multi-card training is fixed. - - The multi-task learning scheduling and termination strategies are optimized to generally improve the model generalization ability. - - The function and type of supported tasks are strengthened. - - Matching task support is enhanced. Pairwise learning and multiple categories (e.g. NLI sentence relation judgment) are supported. - - The support for machine reading comprehension tasks is enhanced. User controllable preprocessing hyper-parameters are added. - - The support for sequence labeling tasks is added. - - The large-scale training/inferential capability is strengthened. - - The automatic multi-card forecast capability is added. - - An asynchronous reader is supported. A variable-length padding is supported in multi-card scenarios. - - A module for the management and downloading of pre-training models is added. - - The management and downloading of pre-training models such as BERT, ERNIE, and RoBERTa are supported. - - A RoBERTa Chinese pre-training model is added. - -- Federated Learning PaddleFL (https://github.com/PaddlePaddle/PaddleFL): - - - The scheduler and submitter functions are added: The scheduler is used to control whether the trainer participates in update during training. The submitter is used to complete the function of submitting paddleFL tasks in the MPI cluster. - - A LEAF dataset federated learning open dataset is added. An API is added to set a benchmark. Classical datasets in the image classification, emotion analysis, character forecast, and other fields , such as MNIST and Sentiment140, are supported. - - According to the added components, the original samples are modified in example and the femnist\_demo and submitter\_demo examples are added - - Fl\_distribute\_transpiler is optimized to add the support of FedAvg strategy for the adam optimizer. - - SecAgg strategy (Secure Aggregation) is added to achieve secure parameter aggregation. - -## Code Reconstruction and Upgrade - -- Compilation - - A compilation option WITH\_NCCL is added. Single-card users can display and specify WITH\_NCCL=OFF to accelerate compilation. - - A compilation option WITH\_TP\_CACHE is added to cache third-party source codes to avoid repeated downloading. Windows users can set it to ON to speed up compilation and improve compilation stability. - - The `CUDA_ARCH_NAME` default value is set to `Auto` (`All` indicates compiling all GPU architectures and `Auto` indicates compiling only the current machine GPU architecture). For developers, a lot of compilation time is saved using `Auto` than using `All`, thus improving development efficiency. - - Redundant links and products and needless file copying are reduced, thus speeding up the compilation in Windows. -- External Dependency Library - - MKL-DNN is upgraded to the latest Version 1.1. - - The forecast library is decoupled from `third_party` and 28 third-party-dependent compilation codes are refactored to facilitate the unified management of external dependencies. - - Two third-party-dependent private warehouses, one unnecessary dependency, and 2000+ lines of unnecessary codes under the patch are removed to improve the warehouse quality. -- Code Cleanup, Refactoring, and Optimization - - The unnecessary `contrib/float16` directory is removed. The unnecessary snappy/snappystream dependency under the BRPC is deleted. - - `loss.py` and `sequence_lod.py` are split out of `python/paddle/fluid/layers/nn.py` according to the API functions, thus reducing the code quantity of `nn.py` and facilitating reading. - - The codes corresponding to the warnings of `-Wno-error=sign-compare` (at a total of more than 100 points) are fixed. An error will be reported for all subsequent warnings of this kind during compilation, thus improving the code quality. - - `WarningLnk4006/WarningLnk4221` compiled by WindowsMSVC (at a total of about 300 points) is removed to improve the warehouse quality. - - The quantity of reduce\_op, expand\_op, and expand\_as\_op templates is reduced to accelerate GPU compilation and reduce whl package space by 70 M. - - The pybind function of every OP is automatically generated under the dynamic graph using codes and directly called in layers to improve the dynamic graph performance and reduce the coupling degree with the static graph. +- To solve the problem of program printing contents being too lengthy and low utilization efficiency during debugging, considerably simplifies the printing strings of objects such as programs, blocks, operators, and variables, thus improving the debugging efficiency without losing effective information +- To solve the problem of insecure third-party library APIs `boost::get` and difficulty in debugging due to exceptions during running, adds the `BOOST_GET` series of macros to replace over 600 risky `boost::get` in Paddle. Richens error message during `boost::bad_get` exceptions. Specifically, adds the C++ error message stack, error file and line No., expected output type, and actual type, thus improving the debugging experience ## Bug Fixes -- Fix the problem of MKL-DNN error when PaddleDetection-based Faster-RCNN uses the Python API to make a forecast. -- Fix the problem of training suspension in the GPU implementation of sum op because some Tensors are not initialized. -- Fix the problem of precision loss when the value in fill\_constant is set to a large integer. -- Fix the problem of precision inconsistency of softmax\_with\_cross\_entropy\_op with regard to the CUDA. -- Fix the problem that when a clone program is fixed, the stop\_gradient attribute in the program can not be copied to a new program. -- Fix the problem of precision loss of elementwise\_pow op with regard to integers. -- Fixed the problem that some GFLAGSs cannot perform specifying outside the forecast library. -- Fix the problem of random forecast core caused by some passes in Analysistor multithreading. (fc\_gru\_fuse\_pass, seqconv\_eltadd\_relu\_fuse\_pass, attention\_lstm\_fuse\_pass, embedding\_fc\_lstm\_fuse\_pass, fc\_lstm\_fuse\_pass, seq\_concat\_fc\_fuse\_pass) -- Fix the error that specifying a GPU in the same process using AnalysisConfig does not take effect after NativePredictor is used to specify the use of CPU forecast. -- Fix the bug of compilation error (setup.py copy and op\_function\_cmd error) in the case of -DWITH\_MKL=OFF. -- Fix the bug that tuple (Variable) cannot be entered in the py\_func OP; add an example of how to write PythonOP codes. -- Fix the problem of the sigmoid cudnn kernel being called as the tanh cudnn kernel by mistake. -- Fix some bugs related to reshape and depthwiseconv in dynamic graph mode; fix the problem of some parameters in the network having no gradient, causing the bug of program crash. -- Fix the bug of running error of GradientClip in parameter server mode. -- Fix the problem of memory leak in full asynchronous mode of of the parameter server. +- Fix the bug of wrong computation results when any slice operation exists in the while loop +- Fix the problem of degradation of the transformer model caused by inplace ops +- Fix the problem of running failure of the last batch in the Ernie precision test +- Fix the problem of failure to correctly exit when exceptions occur in context of fluid.dygraph.guard diff --git a/doc/fluid/user_guides/cv_case/gan/README.cn.md b/doc/fluid/user_guides/cv_case/gan/README.cn.md index 4c0fd2298b34f8fcb87202039a9bf1d9542d279e..40e378c0eb5af72bda915cc7a1467d2bea66ba88 100644 --- a/doc/fluid/user_guides/cv_case/gan/README.cn.md +++ b/doc/fluid/user_guides/cv_case/gan/README.cn.md @@ -285,10 +285,11 @@ with fluid.program_guard(dg_program): dg_logit = D(g_img) # 计算生成图片被判别为真实样本的loss + noise_shape = fluid.layers.shape(noise) dg_loss = loss( dg_logit, - fluid.layers.fill_constant_batch_size_like( - input=noise, dtype='float32', shape=[-1, 1], value=1.0)) + fluid.layers.fill_constant( + dtype='float32', shape=[noise_shape[0], 1], value=1.0)) ``` 使用adam作为优化器,分别优化判别真实图片的loss和判别生成图片的loss。 @@ -311,8 +312,8 @@ batch是一个特殊的decorator,它的输入是一个reader,输出是一个 ```python batch_size = 128 # Minibatch size -train_reader = paddle.batch( - paddle.reader.shuffle( +train_reader = fluid.io.batch( + fluid.io.shuffle( paddle.dataset.mnist.train(), buf_size=60000), batch_size=batch_size) ``` diff --git a/doc/fluid/user_guides/cv_case/gan/README.md b/doc/fluid/user_guides/cv_case/gan/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b23f1589526b53023e52f70f3e05f4fdb872212b --- /dev/null +++ b/doc/fluid/user_guides/cv_case/gan/README.md @@ -0,0 +1,450 @@ +# Generative Adversarial Network + +The source code for this tutorial is in book/09.gan,For the first time to use , please refer to the instruction manual of the Book document. + +### Description: ### +1. Hardware environment requirements: +This article can support running under CPU and GPU +2. CUDA / cuDNN version supported by docker image: +If docker is used to run book, please note that the GPU environment of the default image provided here is CUDA 8 / cuDNN 5. For GPUs requiring CUDA 9 such as NVIDIA Tesla V100, using this image may fail. +3. Consistency of code in documents and scripts: +Please note: to make this article easier to read and use, we split and adjusted the code of dc_gan.py and put it in this article. The code in this article is consistent with the running result of dc_gan.py, which can be verified by running [train.py](https://github.com/PaddlePaddle/book/blob/develop/01.fit_a_line/train.py). + +## Background + +GAN(Generative Adversarial Network \[[1](#Reference)\],GAN for short) is a method of unsupervised learning, learn by two neural networks contest with each other in a game. This method was originally proposed by lan·Goodfellow and others in 2014. The origin paper is [Generative Adversarial Network](https://arxiv.org/abs/1406.2661)。 + +The Generative Adversarial Network consists of a generative network and a discriminative network. Using random sampling from the latent space as input, the output of the generative network needs to imitate the real samples in the training set as much as possible. The input of the discriminative network is the real sample or the output of the generative network. The purpose is to distinguish the output of the generative network from the real samples as much as possible. The two networks oppose each other and continuously adjust the parameters. The purpose is to distinguish the samples generated by the generative network from the real samples as much as possible\[[2](#References)\]. + +GAN is often used to generate fake images \[[3](#References)\] )。In addition, the method is also used to reconstruct 3D models of objects from images, model patterns of motion in video and so on. + +## Result + +In this tutorial, we use MNIST data set as input for training. After 19 rounds of training, we can see that the generated image is very close to the the real image. In the following figure, the first 8 lines are the appearance of the real image, and the last 8 lines are the image generated by the network: +

+
+figure 1. handwritten number generated by GAN +

+ + +## Model Overview + +### GAN + +GAN is a way to learn the generative model of data distribution through adversarial methods. Among them, "Adversarial" refers to the mutual confrontation between Generator and Discriminator. Here, we will take the generated picture as an example to illustrate: + +- The generative network (G) receives a random noise z, and generates an image of approximate samples as much as possible, which is recorded as G(z) +- The discriminative network (D) receives an input image x, and try to distinguish the image is a real sample or a false sample generated by the generative network. The output of the discriminative network is D(x) represents the probability that x is a real image. If D(x) = 1, it means that the discriminative network thinks the input must be a real image, if D(x) = 0, it means that the discriminative network thinks the input must be a false image. + +In the process of training, the two networks fight against each other and finally form a dynamic balance. The above process can be described by the formula as following: + +

+
+

+ +In the best case, G can generate a image G(z), which is very similar to the real image, and it is difficult for D to judge whether the generated picture is true or not, and make a random guess on the true or false of the image, that is D(G(z))=0.5。 + +The following figure shows the training process of GAN. The real image distribution, generated image distribution and discriminative model are black line, green line and blue line respectively in the figure. At the beginning of training, the discriminative model can not distinguish the real images from the generated images. Then, when we fixed the generative model and optimize the discriminative model, the results are shown in the second figure. It can be seen that the discriminative model can distinguish the generated data from the real data. The third step is to fixed the discriminative model, optimize the generative model, and try to make the discriminative model unable to distinguish the generated images from the real images. In this process, it can be seen that the distribution of the images generated by the model is closer to the distribution of the real images. Such iterations continue until the final convergence, and the generated distribution and the real distribution coincide and the discriminative model cannot distinguish the real images from the generated images. + + +

+
+figure 2. GAN training process +

+ +But in the actual process, it is difficult to get this perfect equilibrium point, and the convergence theory of GAN is still in continuous research. + + +### DCGAN + +[DCGAN](https://arxiv.org/abs/1511.06434) \[[4](#Reference)\] is the combination of deep convolution network and GAN, and its basic principle is the same as GAN, but the generative network and discriminative network are replaced by convolution networks (CNN). In order to improve the quality of the generated images and the convergence speed of the network, the DCGAN has made some improvements in the network structure: +- Cancel pooling layer: in the network, all the pooling layers are replaced by the strided convolutions (discriminator) and the fractional-strided convolutions (generator). +- Add batch normalization:add batchnorm in both the generator and the discriminator. +- Use full convolution network: remove FC layer to realize deeper network structure. +- Activation function: in generator(G), Tanh function is used in the last layer, and ReLu function is used in other layers; in discriminator(D), LeakyReLu is used as activation function. + +The structure of generator (G) in DCGAN is as following:: + +

+
+figure 3. Generator(G) in DCGAN +

+ + +## Dataset prepare + +In this tutorial, we use MNIST to train generator and discriminator, and the dataset can be downloaded to the local automatically through the paddle.dataset module. +For detailed introduction of MNIST, please refer to[recognize_digits](https://github.com/PaddlePaddle/book/tree/develop/02.recognize_digits)。 + +## Model Training + + `09.gan/dc_gan.py` shows the whole process of training. + +### Import dependency + +First import necessary dependency. + +```python +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys +import os +import matplotlib +import PIL +import six +import numpy as np +import math +import time +import paddle +import paddle.fluid as fluid + +matplotlib.use('agg') +import matplotlib.pyplot as plt +import matplotlib.gridspec as gridspec +``` +### Defining auxiliary tool + +Define plot function to visualize the process of image generated. + +```python +def plot(gen_data): + pad_dim = 1 + paded = pad_dim + img_dim + gen_data = gen_data.reshape(gen_data.shape[0], img_dim, img_dim) + n = int(math.ceil(math.sqrt(gen_data.shape[0]))) + gen_data = (np.pad( + gen_data, [[0, n * n - gen_data.shape[0]], [pad_dim, 0], [pad_dim, 0]], + 'constant').reshape((n, n, paded, paded)).transpose((0, 2, 1, 3)) + .reshape((n * paded, n * paded))) + fig = plt.figure(figsize=(8, 8)) + plt.axis('off') + plt.imshow(gen_data, cmap='Greys_r', vmin=-1, vmax=1) + return fig +``` + +### Define hyper-parameter + +```python +gf_dim = 64 # the number of basic channels of the generator's feature map. The number of all the channels of feature maps in the generator is a multiple of the number of basic channels +df_dim = 64 # the number of basic channels of the discriminator's feature map. The number of all the channels of feature maps in the discriminator is a multiple of the number of basic channels +gfc_dim = 1024 * 2 # the dimension of full connection layer of generator +dfc_dim = 1024 # the dimension of full connection layer of discriminator +img_dim = 28 # size of the input picture + +NOISE_SIZE = 100 # dimension of input noise +LEARNING_RATE = 2e-4 # learning rate of training + +epoch = 20 # epoch number of training +output = "./output_dcgan" # storage path of model and test results +use_cudnn = False # use cuDNN or not +use_gpu=False # use GPU or not +``` + +### Define network architecture + +- Batch Normalization layer + +Call `fluid.layers.batch_norm` to implement the bn layer. The activation function uses ReLu by default. +```python +def bn(x, name=None, act='relu'): + return fluid.layers.batch_norm( + x, + param_attr=name + '1', + bias_attr=name + '2', + moving_mean_name=name + '3', + moving_variance_name=name + '4', + name=name, + act=act) +``` + +- Convolution layer + +Call `fluid.nets.simple_img_conv_pool` to get the result of convolution and pooling. The kernel size of convolution is 5x5, the pooling window size is 2x2, the window sliding step size is 2, and the activation function type is specified by the specific network structure. + +```python +def conv(x, num_filters, name=None, act=None): + return fluid.nets.simple_img_conv_pool( + input=x, + filter_size=5, + num_filters=num_filters, + pool_size=2, + pool_stride=2, + param_attr=name + 'w', + bias_attr=name + 'b', + use_cudnn=use_cudnn, + act=act) +``` + +- Fully Connected layer + +```python +def fc(x, num_filters, name=None, act=None): + return fluid.layers.fc(input=x, + size=num_filters, + act=act, + param_attr=name + 'w', + bias_attr=name + 'b') +``` + +- Transpose Convolution Layer + +In the generator, we need to generate a full-scale image by random sampling values. DCGAN uses the transpose convolution layer for upsampling. In fluid, we call `fluid.layers.conv2d_transpose` to realize transpose convolution. + +```python +def deconv(x, + num_filters, + name=None, + filter_size=5, + stride=2, + dilation=1, + padding=2, + output_size=None, + act=None): + return fluid.layers.conv2d_transpose( + input=x, + param_attr=name + 'w', + bias_attr=name + 'b', + num_filters=num_filters, + output_size=output_size, + filter_size=filter_size, + stride=stride, + dilation=dilation, + padding=padding, + use_cudnn=use_cudnn, + act=act) +``` + +- Discriminator + +The discriminator uses the real dataset and the fake images generated by the generator to train, and in the training process, try to make the output result of the real data close to 1 and the output result of the fake image close to 0 as far as possible. The discriminator implemented in this tutorial is composed of two convolution_pooling layers and two fully connected layers. The number of neurons in the last fully connected layer is 1, and a binary classification result is output. + +```python +def D(x): + x = fluid.layers.reshape(x=x, shape=[-1, 1, 28, 28]) + x = conv(x, df_dim, act='leaky_relu',name='conv1') + x = bn(conv(x, df_dim * 2,name='conv2'), act='leaky_relu',name='bn1') + x = bn(fc(x, dfc_dim,name='fc1'), act='leaky_relu',name='bn2') + x = fc(x, 1, act='sigmoid',name='fc2') + return x +``` + +- Generator + +The generator consists of two groups of fully connected layers with BN and two groups of transpose convolution layers. The network input is random noise data. The convolution kernel number of the last layer of transposed convolution is 1, indicating that the output is a gray-scale picture. + +```python +def G(x): + x = bn(fc(x, gfc_dim,name='fc3'),name='bn3') + x = bn(fc(x, gf_dim * 2 * img_dim // 4 * img_dim // 4,name='fc4'),name='bn4') + x = fluid.layers.reshape(x, [-1, gf_dim * 2, img_dim // 4, img_dim // 4]) + x = deconv(x, gf_dim * 2, act='relu', output_size=[14, 14],name='deconv1') + x = deconv(x, num_filters=1, filter_size=5, padding=2, act='tanh', output_size=[28, 28],name='deconv2') + x = fluid.layers.reshape(x, shape=[-1, 28 * 28]) + return x +``` +### Loss function + +Loss function uses `sigmoid_cross_entropy_with_logits` + +```python +def loss(x, label): + return fluid.layers.mean( + fluid.layers.sigmoid_cross_entropy_with_logits(x=x, label=label)) +``` + + +### Create Program + +```python +d_program = fluid.Program() +dg_program = fluid.Program() + +# Define the program to distinguish the real picture +with fluid.program_guard(d_program): + # size of the input picture is28*28=784 + img = fluid.data(name='img', shape=[None, 784], dtype='float32') + # label shape=1 + label = fluid.data(name='label', shape=[None, 1], dtype='float32') + d_logit = D(img) + d_loss = loss(d_logit, label) + +# Define the program to distinguish the generated pictures +with fluid.program_guard(dg_program): + noise = fluid.data( + name='noise', shape=[None, NOISE_SIZE], dtype='float32') + # Noise data as input to generate image + g_img = G(x=noise) + + g_program = dg_program.clone() + g_program_test = dg_program.clone(for_test=True) + + # Judge the probability that the generated image is a real sample + dg_logit = D(g_img) + + # Calculate the loss of the generated image as the real sample + noise_shape = fluid.layers.shape(noise) + dg_loss = loss( + dg_logit, + fluid.layers.fill_constant( + dtype='float32', shape=[noise_shape[0], 1], value=1.0)) + +``` +Adam is used as the optimizer to distinguish the loss of the real picture and the loss of the generated picture. + +```python +opt = fluid.optimizer.Adam(learning_rate=LEARNING_RATE) +opt.minimize(loss=d_loss) +parameters = [p.name for p in g_program.global_block().all_parameters()] +opt.minimize(loss=dg_loss, parameter_list=parameters) +``` + +### Dataset Feeders configuration + +Next, we start the training process. paddle.dataset.mnist.train() is used as training dataset. This function returns a reader. The reader in is a python function, which returns one Python yield generator every time it is called. + +The shuffle below is a reader decorator. It accepts a reader A and returns another reader B. Reader B reads the buffer_size training data into a buffer every time, then randomly scrambles its order and outputs it one by one. + +Batch is a special decorator. Its input is a reader and its output is a batched reader. In PaddlePaddle, a reader yields one piece of training data at a time, while a batched reader yields one minibatch at a time. + +```python +batch_size = 128 # Minibatch size + +train_reader = fluid.io.batch( + fluid.io.shuffle( + paddle.dataset.mnist.train(), buf_size=60000), + batch_size=batch_size) +``` + +### Create actuator + +```python +if use_gpu: + exe = fluid.Executor(fluid.CUDAPlace(0)) +else: + exe = fluid.Executor(fluid.CPUPlace()) + +exe.run(fluid.default_startup_program()) +``` + +### Start training + +For each iteration in the training process, the generator and the discriminator set their own iteration times respectively. In order to avoid the discriminator converging to 0 rapidly. In this tutorial, by default, every iteration, the discriminator are trained once and generator twice. + +```python +t_time = 0 +losses = [[], []] + +# The number of iterations of the discriminator +NUM_TRAIN_TIMES_OF_DG = 2 + +# Noise data of final generated image +const_n = np.random.uniform( + low=-1.0, high=1.0, + size=[batch_size, NOISE_SIZE]).astype('float32') + +for pass_id in range(epoch): + for batch_id, data in enumerate(train_reader()): + if len(data) != batch_size: + continue + + # Generating noise data during training + noise_data = np.random.uniform( + low=-1.0, high=1.0, + size=[batch_size, NOISE_SIZE]).astype('float32') + + # Real image + real_image = np.array(list(map(lambda x: x[0], data))).reshape( + -1, 784).astype('float32') + # Real label + real_labels = np.ones( + shape=[real_image.shape[0], 1], dtype='float32') + # Fake label + fake_labels = np.zeros( + shape=[real_image.shape[0], 1], dtype='float32') + total_label = np.concatenate([real_labels, fake_labels]) + s_time = time.time() + + # Fake image + generated_image = exe.run(g_program, + feed={'noise': noise_data}, + fetch_list=[g_img])[0] + + total_images = np.concatenate([real_image, generated_image]) + + # D loss of judging fake pictures as fake + d_loss_1 = exe.run(d_program, + feed={ + 'img': generated_image, + 'label': fake_labels, + }, + fetch_list=[d_loss])[0][0] + + # D loss of judging true pictures as true + d_loss_2 = exe.run(d_program, + feed={ + 'img': real_image, + 'label': real_labels, + }, + fetch_list=[d_loss])[0][0] + + d_loss_n = d_loss_1 + d_loss_2 + losses[0].append(d_loss_n) + + # Training generator + for _ in six.moves.xrange(NUM_TRAIN_TIMES_OF_DG): + noise_data = np.random.uniform( + low=-1.0, high=1.0, + size=[batch_size, NOISE_SIZE]).astype('float32') + dg_loss_n = exe.run(dg_program, + feed={'noise': noise_data}, + fetch_list=[dg_loss])[0][0] + losses[1].append(dg_loss_n) + t_time += (time.time() - s_time) + if batch_id % 10 == 0 : + if not os.path.exists(output): + os.makedirs(output) + # Results of each round + generated_images = exe.run(g_program_test, + feed={'noise': const_n}, + fetch_list=[g_img])[0] + # Connect real pictures to generated pictures + total_images = np.concatenate([real_image, generated_images]) + fig = plot(total_images) + msg = "Epoch ID={0} Batch ID={1} D-Loss={2} DG-Loss={3}\n ".format( + pass_id, batch_id, + d_loss_n, dg_loss_n) + print(msg) + plt.title(msg) + plt.savefig( + '{}/{:04d}_{:04d}.png'.format(output, pass_id, + batch_id), + bbox_inches='tight') + plt.close(fig) +``` + +Print the results of a specific round: + +```python +def display_image(epoch_no,batch_id): + return PIL.Image.open('output_dcgan/{:04d}_{:04d}.png'.format(epoch_no,batch_id)) + +# Observe the generated images of the 10th epoch and 460 batches: +display_image(10,460) +``` + + +## Summary + +DCGAN use a random noise vector as the input, the input is amplified into two-dimensional data through a similar but opposite structure to CNN. By using the generative model of this structure and the discriminative model of CNN structure, DCGAN can achieve considerable results in image generative. In this case, we use DCGAN to generate handwritten digital images. You can try to change dataset to generate images that meet your personal needs, or try to modify the network structure to observe different generative effects. + + +## Reference +[1] Goodfellow, Ian J.; Pouget-Abadie, Jean; Mirza, Mehdi; Xu, Bing; Warde-Farley, David; Ozair, Sherjil; Courville, Aaron; Bengio, Yoshua. Generative Adversarial Networks. 2014. arXiv:1406.2661 [stat.ML]. + +[2] Andrej Karpathy, Pieter Abbeel, Greg Brockman, Peter Chen, Vicki Cheung, Rocky Duan, Ian Goodfellow, Durk Kingma, Jonathan Ho, Rein Houthooft, Tim Salimans, John Schulman, Ilya Sutskever, And Wojciech Zaremba, Generative Models, OpenAI, [April 7, 2016] + +[3] alimans, Tim; Goodfellow, Ian; Zaremba, Wojciech; Cheung, Vicki; Radford, Alec; Chen, Xi. Improved Techniques for Training GANs. 2016. arXiv:1606.03498 [cs.LG]. + +[4] Radford A, Metz L, Chintala S. Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks[J]. Computer Science, 2015. diff --git a/doc/fluid/user_guides/cv_case/gan/dc_gan.py b/doc/fluid/user_guides/cv_case/gan/dc_gan.py index fdab6fcdd8dc8623a2f6179e10d40494579469ba..650897a2803cacc4fb31f339f8fb7958a52359ff 100644 --- a/doc/fluid/user_guides/cv_case/gan/dc_gan.py +++ b/doc/fluid/user_guides/cv_case/gan/dc_gan.py @@ -74,11 +74,11 @@ def train(args): g_program_test = dg_program.clone(for_test=True) dg_logit = D(g_img) + noise_shape = fluid.layers.shape(noise) dg_loss = loss(dg_logit, - fluid.layers.fill_constant_batch_size_like( - input=noise, + fluid.layers.fill_constant( dtype='float32', - shape=[-1, 1], + shape=[noise_shape[0], 1], value=1.0)) opt = fluid.optimizer.Adam(learning_rate=LEARNING_RATE) @@ -93,13 +93,12 @@ def train(args): exe = fluid.Executor(fluid.CUDAPlace(0)) exe.run(fluid.default_startup_program()) if args.enable_ce: - train_reader = paddle.batch( + train_reader = fluid.io.batch( paddle.dataset.mnist.train(), batch_size=args.batch_size) else: - train_reader = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.mnist.train(), buf_size=60000), + train_reader = fluid.io.batch( + fluid.io.shuffle(paddle.dataset.mnist.train(), buf_size=60000), batch_size=args.batch_size) NUM_TRAIN_TIMES_OF_DG = 2 diff --git a/doc/fluid/user_guides/cv_case/gan/index.cn.html b/doc/fluid/user_guides/cv_case/gan/index.cn.html index a6b88e8ae0ad5a09bacae26410dba829a55c0c05..625073db5f907b674747138fef927dfa073c93a2 100644 --- a/doc/fluid/user_guides/cv_case/gan/index.cn.html +++ b/doc/fluid/user_guides/cv_case/gan/index.cn.html @@ -327,10 +327,11 @@ with fluid.program_guard(dg_program): dg_logit = D(g_img) # 计算生成图片被判别为真实样本的loss + noise_shape = fluid.layers.shape(noise) dg_loss = loss( dg_logit, - fluid.layers.fill_constant_batch_size_like( - input=noise, dtype='float32', shape=[-1, 1], value=1.0)) + fluid.layers.fill_constant( + dtype='float32', shape=[noise_shape[0], 1], value=1.0)) ``` 使用adam作为优化器,分别优化判别真实图片的loss和判别生成图片的loss。 @@ -353,8 +354,8 @@ batch是一个特殊的decorator,它的输入是一个reader,输出是一个 ```python batch_size = 128 # Minibatch size -train_reader = paddle.batch( - paddle.reader.shuffle( +train_reader = fluid.io.batch( + fluid.io.shuffle( paddle.dataset.mnist.train(), buf_size=60000), batch_size=batch_size) ``` diff --git a/doc/fluid/user_guides/cv_case/gan/index.html b/doc/fluid/user_guides/cv_case/gan/index.html new file mode 100644 index 0000000000000000000000000000000000000000..f04971c70cf9e3a9a240877d13540b64484e5a74 --- /dev/null +++ b/doc/fluid/user_guides/cv_case/gan/index.html @@ -0,0 +1,514 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/fluid/user_guides/cv_case/gan/network.py b/doc/fluid/user_guides/cv_case/gan/network.py index 8323881dbdc5e2250d3b609812c83944d08a8764..6f0e85f4a81ff6938ec20f2cc4b8c63dffdc235a 100644 --- a/doc/fluid/user_guides/cv_case/gan/network.py +++ b/doc/fluid/user_guides/cv_case/gan/network.py @@ -89,8 +89,9 @@ def deconv(x, def conv_cond_concat(x, y): """Concatenate conditioning vector on feature map axis.""" - ones = fluid.layers.fill_constant_batch_size_like( - x, [-1, y.shape[1], x.shape[2], x.shape[3]], "float32", 1.0) + x_shape = fluid.layers.shape(x) + ones = fluid.layers.fill_constant( + [x_shape[0], y.shape[1], x.shape[2], x.shape[3]], "float32", 1.0) return fluid.layers.concat([x, ones * y], 1) diff --git a/doc/fluid/user_guides/simple_case/image_classification/.gitignore b/doc/fluid/user_guides/cv_case/image_classification/.gitignore similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/.gitignore rename to doc/fluid/user_guides/cv_case/image_classification/.gitignore diff --git a/doc/fluid/user_guides/simple_case/image_classification/.run_ce.sh b/doc/fluid/user_guides/cv_case/image_classification/.run_ce.sh similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/.run_ce.sh rename to doc/fluid/user_guides/cv_case/image_classification/.run_ce.sh diff --git a/doc/fluid/user_guides/simple_case/image_classification/README.cn.md b/doc/fluid/user_guides/cv_case/image_classification/README.cn.md similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/README.cn.md rename to doc/fluid/user_guides/cv_case/image_classification/README.cn.md diff --git a/doc/fluid/user_guides/simple_case/image_classification/README.md b/doc/fluid/user_guides/cv_case/image_classification/README.md similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/README.md rename to doc/fluid/user_guides/cv_case/image_classification/README.md diff --git a/doc/fluid/user_guides/simple_case/image_classification/_ce.py b/doc/fluid/user_guides/cv_case/image_classification/_ce.py similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/_ce.py rename to doc/fluid/user_guides/cv_case/image_classification/_ce.py diff --git a/doc/fluid/user_guides/simple_case/image_classification/image/cifar.png b/doc/fluid/user_guides/cv_case/image_classification/image/cifar.png similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/image/cifar.png rename to doc/fluid/user_guides/cv_case/image_classification/image/cifar.png diff --git a/doc/fluid/user_guides/simple_case/image_classification/image/dog.png b/doc/fluid/user_guides/cv_case/image_classification/image/dog.png similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/image/dog.png rename to doc/fluid/user_guides/cv_case/image_classification/image/dog.png diff --git a/doc/fluid/user_guides/simple_case/image_classification/image/dog_cat.png b/doc/fluid/user_guides/cv_case/image_classification/image/dog_cat.png similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/image/dog_cat.png rename to doc/fluid/user_guides/cv_case/image_classification/image/dog_cat.png diff --git a/doc/fluid/user_guides/simple_case/image_classification/image/fea_conv0.png b/doc/fluid/user_guides/cv_case/image_classification/image/fea_conv0.png similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/image/fea_conv0.png rename to doc/fluid/user_guides/cv_case/image_classification/image/fea_conv0.png diff --git a/doc/fluid/user_guides/simple_case/image_classification/image/flowers.png b/doc/fluid/user_guides/cv_case/image_classification/image/flowers.png similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/image/flowers.png rename to doc/fluid/user_guides/cv_case/image_classification/image/flowers.png diff --git a/doc/fluid/user_guides/simple_case/image_classification/image/googlenet.jpeg b/doc/fluid/user_guides/cv_case/image_classification/image/googlenet.jpeg similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/image/googlenet.jpeg rename to doc/fluid/user_guides/cv_case/image_classification/image/googlenet.jpeg diff --git a/doc/fluid/user_guides/simple_case/image_classification/image/ilsvrc.png b/doc/fluid/user_guides/cv_case/image_classification/image/ilsvrc.png similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/image/ilsvrc.png rename to doc/fluid/user_guides/cv_case/image_classification/image/ilsvrc.png diff --git a/doc/fluid/user_guides/simple_case/image_classification/image/inception.png b/doc/fluid/user_guides/cv_case/image_classification/image/inception.png similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/image/inception.png rename to doc/fluid/user_guides/cv_case/image_classification/image/inception.png diff --git a/doc/fluid/user_guides/simple_case/image_classification/image/inception_en.png b/doc/fluid/user_guides/cv_case/image_classification/image/inception_en.png similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/image/inception_en.png rename to doc/fluid/user_guides/cv_case/image_classification/image/inception_en.png diff --git a/doc/fluid/user_guides/simple_case/image_classification/image/lenet.png b/doc/fluid/user_guides/cv_case/image_classification/image/lenet.png similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/image/lenet.png rename to doc/fluid/user_guides/cv_case/image_classification/image/lenet.png diff --git a/doc/fluid/user_guides/simple_case/image_classification/image/lenet_en.png b/doc/fluid/user_guides/cv_case/image_classification/image/lenet_en.png similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/image/lenet_en.png rename to doc/fluid/user_guides/cv_case/image_classification/image/lenet_en.png diff --git a/doc/fluid/user_guides/simple_case/image_classification/image/plot.png b/doc/fluid/user_guides/cv_case/image_classification/image/plot.png similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/image/plot.png rename to doc/fluid/user_guides/cv_case/image_classification/image/plot.png diff --git a/doc/fluid/user_guides/simple_case/image_classification/image/plot_en.png b/doc/fluid/user_guides/cv_case/image_classification/image/plot_en.png similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/image/plot_en.png rename to doc/fluid/user_guides/cv_case/image_classification/image/plot_en.png diff --git a/doc/fluid/user_guides/simple_case/image_classification/image/resnet.png b/doc/fluid/user_guides/cv_case/image_classification/image/resnet.png similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/image/resnet.png rename to doc/fluid/user_guides/cv_case/image_classification/image/resnet.png diff --git a/doc/fluid/user_guides/simple_case/image_classification/image/resnet_block.jpg b/doc/fluid/user_guides/cv_case/image_classification/image/resnet_block.jpg similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/image/resnet_block.jpg rename to doc/fluid/user_guides/cv_case/image_classification/image/resnet_block.jpg diff --git a/doc/fluid/user_guides/simple_case/image_classification/image/train_and_test.png b/doc/fluid/user_guides/cv_case/image_classification/image/train_and_test.png similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/image/train_and_test.png rename to doc/fluid/user_guides/cv_case/image_classification/image/train_and_test.png diff --git a/doc/fluid/user_guides/simple_case/image_classification/image/variations.png b/doc/fluid/user_guides/cv_case/image_classification/image/variations.png similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/image/variations.png rename to doc/fluid/user_guides/cv_case/image_classification/image/variations.png diff --git a/doc/fluid/user_guides/simple_case/image_classification/image/variations_en.png b/doc/fluid/user_guides/cv_case/image_classification/image/variations_en.png similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/image/variations_en.png rename to doc/fluid/user_guides/cv_case/image_classification/image/variations_en.png diff --git a/doc/fluid/user_guides/simple_case/image_classification/image/vgg16.png b/doc/fluid/user_guides/cv_case/image_classification/image/vgg16.png similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/image/vgg16.png rename to doc/fluid/user_guides/cv_case/image_classification/image/vgg16.png diff --git a/doc/fluid/user_guides/simple_case/image_classification/index.cn.html b/doc/fluid/user_guides/cv_case/image_classification/index.cn.html similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/index.cn.html rename to doc/fluid/user_guides/cv_case/image_classification/index.cn.html diff --git a/doc/fluid/user_guides/simple_case/image_classification/index.html b/doc/fluid/user_guides/cv_case/image_classification/index.html similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/index.html rename to doc/fluid/user_guides/cv_case/image_classification/index.html diff --git a/doc/fluid/user_guides/simple_case/image_classification/resnet.py b/doc/fluid/user_guides/cv_case/image_classification/resnet.py similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/resnet.py rename to doc/fluid/user_guides/cv_case/image_classification/resnet.py diff --git a/doc/fluid/user_guides/simple_case/image_classification/train.py b/doc/fluid/user_guides/cv_case/image_classification/train.py similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/train.py rename to doc/fluid/user_guides/cv_case/image_classification/train.py diff --git a/doc/fluid/user_guides/simple_case/image_classification/vgg.py b/doc/fluid/user_guides/cv_case/image_classification/vgg.py similarity index 100% rename from doc/fluid/user_guides/simple_case/image_classification/vgg.py rename to doc/fluid/user_guides/cv_case/image_classification/vgg.py diff --git a/doc/fluid/user_guides/cv_case/index_cn.rst b/doc/fluid/user_guides/cv_case/index_cn.rst index 8efd1d4d8dced4ee736f0a4522d69a31771ebeb3..6d1b108f45407bd37111bd611e3a5c0663609c32 100644 --- a/doc/fluid/user_guides/cv_case/index_cn.rst +++ b/doc/fluid/user_guides/cv_case/index_cn.rst @@ -2,9 +2,13 @@ 计算机视觉 ################ +.. todo:: + +计算机视觉是一门关于如何运用照相机和计算机来获取我们所需的,被拍摄对象的数据与信息的学问。在这里PaddlePaddle为大家提供了两篇cv的教程供大家学习: .. toctree:: :titlesonly: + image_classification/README.cn.md gan/README.cn.md diff --git a/doc/fluid/user_guides/cv_case/index_en.rst b/doc/fluid/user_guides/cv_case/index_en.rst index fa597ee13258083f41ab5f31f5d111e2583314b6..0523bfb6bd5455557ef3282bdfbcf63b1bccac7f 100644 --- a/doc/fluid/user_guides/cv_case/index_en.rst +++ b/doc/fluid/user_guides/cv_case/index_en.rst @@ -6,5 +6,6 @@ Computer Vision .. toctree:: :titlesonly: + image_classification/README.md gan/README.md diff --git a/doc/fluid/user_guides/index_cn.rst b/doc/fluid/user_guides/index_cn.rst index 20fdb1b9d10d942558b5d785d62f56becd61646c..be7d31882574834be02c9f4a74cc8ee4a5a1ebd9 100644 --- a/doc/fluid/user_guides/index_cn.rst +++ b/doc/fluid/user_guides/index_cn.rst @@ -21,8 +21,6 @@ - `自然语言处理 <../user_guides/nlp_case/index_cn.html>`_: 介绍使用 Paddle 实现自然语言处理方向的案例 - `推荐 <../user_guides/rec_case/index_cn.html>`_:介绍如何使用 Paddle 完成推荐领域任务的案例 - - - `模型库 <../user_guides/models/index_cn.html>`_:介绍了 Paddle 经典的模型库 - `工具组件 <../user_guides/tools/index_cn.html>`_:介绍在 Paddle 工具组件的使用案例 @@ -33,7 +31,6 @@ cv_case/index_cn.rst nlp_case/index_cn.rst rec_case/index_cn.rst - models/index_cn.rst tools/index_cn.rst diff --git a/doc/fluid/user_guides/index_en.rst b/doc/fluid/user_guides/index_en.rst index ad6ad2b41db00ee9c7b0ca5b1adebc49b084c1ae..6af142262b52098706a684ea499dfaba246550e6 100644 --- a/doc/fluid/user_guides/index_en.rst +++ b/doc/fluid/user_guides/index_en.rst @@ -19,6 +19,8 @@ Overview - `Simple Case <../user_guides/simple_case/index_en.html>`_ :introduces basic cases of Paddle + - `Computer Vision <../user_guides/cv_case/index_en.html>`_ :introduces cases of using paddle to realize Computer Vision task + - `Natural Language Processing <../user_guides/nlp_case/index_en.html>`_:introduces cases of using paddle to realize Natural Language Processing tasks - `Recommend <../user_guides/rec_case/index_en.html>`_:introduces cases of using paddle to realize Recommend tasks @@ -29,6 +31,7 @@ Overview :hidden: simple_case/index_en.rst + cv_case/index_en.rst nlp_case/index_en.rst rec_case/index_en.rst models/index_cn.rst diff --git a/doc/fluid/user_guides/nlp_case/index_cn.rst b/doc/fluid/user_guides/nlp_case/index_cn.rst index 8905cb8fc100dfe1a64ed9cde05efeaa88615413..dc001b9fc4734c4c9df59c7bcce7c7f7deffa782 100644 --- a/doc/fluid/user_guides/nlp_case/index_cn.rst +++ b/doc/fluid/user_guides/nlp_case/index_cn.rst @@ -2,6 +2,10 @@ 自然语言处理 ################ +.. todo:: + +自然语言处理(Natural Language Processing)是人工智能和语言学领域的分支学科。此领域探讨如何处理及运用自然语言,特别是如何编程计算机以成功处理大量的自然语言数据。在这里PaddlePaddle为大家提供了三篇NLP领域的学习教程: + .. toctree:: :titlesonly: diff --git a/doc/fluid/user_guides/nlp_case/label_semantic_roles/README.md b/doc/fluid/user_guides/nlp_case/label_semantic_roles/README.md index 3801b0df873fa20868a778a85a4c07f2e1646032..00c5f627fb7eaeea22b37a2fb5744c617f72ab41 100644 --- a/doc/fluid/user_guides/nlp_case/label_semantic_roles/README.md +++ b/doc/fluid/user_guides/nlp_case/label_semantic_roles/README.md @@ -388,8 +388,8 @@ The data introduction section mentions the payment of the CoNLL 2005 training se crf_decode = fluid.layers.crf_decoding( input=feature_out, param_attr=fluid.ParamAttr(name='crfw')) -train_data = paddle.batch( - paddle.reader.shuffle( +train_data = fluid.io.batch( + fluid.io.shuffle( paddle.dataset.conll05.test(), buf_size=8192), batch_size=BATCH_SIZE) diff --git a/doc/fluid/user_guides/nlp_case/label_semantic_roles/train.py b/doc/fluid/user_guides/nlp_case/label_semantic_roles/train.py index 70d05946e4736647a4e402013c49c99c96810567..5ce21ffe0ab380d1030ac1e0d48a32c423fe50ca 100644 --- a/doc/fluid/user_guides/nlp_case/label_semantic_roles/train.py +++ b/doc/fluid/user_guides/nlp_case/label_semantic_roles/train.py @@ -143,8 +143,8 @@ def train(use_cuda, save_dirname=None, is_local=True): # define network topology feature_out = db_lstm(**locals()) - target = fluid.layers.data( - name='target', shape=[1], dtype='int64', lod_level=1) + target = fluid.data( + name='target', shape=[None, 1], dtype='int64', lod_level=1) crf_cost = fluid.layers.linear_chain_crf( input=feature_out, label=target, @@ -165,11 +165,11 @@ def train(use_cuda, save_dirname=None, is_local=True): input=feature_out, param_attr=fluid.ParamAttr(name='crfw')) if args.enable_ce: - train_data = paddle.batch( + train_data = fluid.io.batch( paddle.dataset.conll05.test(), batch_size=BATCH_SIZE) else: - train_data = paddle.batch( - paddle.reader.shuffle( + train_data = fluid.io.batch( + fluid.io.shuffle( paddle.dataset.conll05.test(), buf_size=8192), batch_size=BATCH_SIZE) diff --git a/doc/fluid/user_guides/nlp_case/machine_translation/README.cn.md b/doc/fluid/user_guides/nlp_case/machine_translation/README.cn.md index 0b709b78c98d09df62894fc9c630460cefc5031a..b255601f1742828862e78207315f8544b1c8f2d6 100644 --- a/doc/fluid/user_guides/nlp_case/machine_translation/README.cn.md +++ b/doc/fluid/user_guides/nlp_case/machine_translation/README.cn.md @@ -5,7 +5,7 @@ ### 说明 1. 硬件要求 本文可支持在CPU、GPU下运行 2. 对docker file cuda/cudnn的支持 如果您使用了本文配套的docker镜像,请注意:该镜像对GPU的支持仅限于CUDA 8,cuDNN 5 -3. 文档中代码和seq2seq.py不一致的问题 请注意:为使本文更加易读易用,我们拆分、调整了seq2seq.py的代码并放入本文。本文中代码与seq2seq.py的运行结果一致,如希望直接看到训练脚本输出效果,可运行[seq2seq.py](https://github.com/PaddlePaddle/book/blob/develop/08.machine_translation/seq2seq.py)。 +3. 文档中代码和seq2seq.py不一致的问题 请注意:为使本文更加易读易用,我们拆分、调整了seq2seq.py的代码并放入本文。本文中代码与seq2seq.py的运行结果一致,如希望直接看到训练脚本输出效果,可运行[seq2seq.py](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/user_guides/nlp_case/machine_translation/seq2seq.py)。 ## 背景介绍 @@ -52,7 +52,7 @@ GRU\[[2](#参考文献)\]是Cho等人在LSTM上提出的简化版本,也是RNN - 重置门(reset gate):如果重置门关闭,会忽略掉历史信息,即历史不相干的信息不会影响未来的输出。 - 更新门(update gate):将LSTM的输入门和遗忘门合并,用于控制历史信息对当前时刻隐层输出的影响。如果更新门接近1,会把历史信息传递下去。

-
+
图2. GRU(门控循环单元)

@@ -84,11 +84,11 @@ GRU\[[2](#参考文献)\]是Cho等人在LSTM上提出的简化版本,也是RNN 编码阶段分为三步: -1. one-hot vector表示:将源语言句子$x=\left \{ x_1,x_2,...,x_T \right \}$的每个词$x_i$表示成一个列向量$w_i\epsilon \left \{ 0,1 \right \}^{\left | V \right |},i=1,2,...,T$。这个向量$w_i$的维度与词汇表大小$\left | V \right |$ 相同,并且只有一个维度上有值1(该位置对应该词在词汇表中的位置),其余全是0。 +1. one-hot vector表示:将源语言句子$x = \left(x_1,x_2,...,x_T\right)$的每个词$x_i$表示成一个列向量$w_i$。这个向量$w_i$的维度与词汇表大小$\left | V \right |$ 相同,并且只有一个维度上有值1(该位置对应该词在词汇表中的位置),其余全是0。 2. 映射到低维语义空间的词向量:one-hot vector表示存在两个问题,1)生成的向量维度往往很大,容易造成维数灾难;2)难以刻画词与词之间的关系(如语义相似性,也就是无法很好地表达语义)。因此,需再one-hot vector映射到低维的语义空间,由一个固定维度的稠密向量(称为词向量)表示。记映射矩阵为$C\epsilon R^{K\times \left | V \right |}$,用$s_i=Cw_i$表示第$i$个词的词向量,$K$为向量维度。 -3. 用RNN编码源语言词序列:这一过程的计算公式为$h_i=\varnothing _\theta \left ( h_{i-1}, s_i \right )$,其中$h_0$是一个全零的向量,$\varnothing _\theta$是一个非线性激活函数,最后得到的$\mathbf{h}=\left \{ h_1,..., h_T \right \}$就是RNN依次读入源语言$T$个词的状态编码序列。整句话的向量表示可以采用$\mathbf{h}$在最后一个时间步$T$的状态编码,或使用时间维上的池化(pooling)结果。 +3. 用RNN编码源语言词序列:这一过程的计算公式为$h_i=\phi_{\theta} \left ( h_{i-1}, s_i \right )$,其中$h_0$是一个全零的向量,$\phi _{\theta}$是一个非线性激活函数,最后得到的$\mathbf{h}=\left(h_1,..., h_T \right)$就是RNN依次读入源语言$T$个词的状态编码序列。整句话的向量表示可以采用$\mathbf{h}$在最后一个时间步$T$的状态编码,或使用时间维上的池化(pooling)结果。 第3步也可以使用双向循环神经网络实现更复杂的句编码表示,具体可以用双向GRU实现。前向GRU按照词序列$(x_1,x_2,...,x_T)$的顺序依次编码源语言端词,并得到一系列隐层状态$(\overrightarrow{h_1},\overrightarrow{h_2},...,\overrightarrow{h_T})$。类似的,后向GRU按照$(x_T,x_{T-1},...,x_1)$的顺序依次编码源语言端词,得到$(\overleftarrow{h_1},\overleftarrow{h_2},...,\overleftarrow{h_T})$。最后对于词$x_i$,通过拼接两个GRU的结果得到它的隐层状态,即$h_i=\left [ \overrightarrow{h_i^T},\overleftarrow{h_i^T} \right ]^{T}$。
@@ -107,7 +107,7 @@ GRU\[[2](#参考文献)\]是Cho等人在LSTM上提出的简化版本,也是RNN 其中$\phi _{\theta '}$是一个非线性激活函数;$c$是源语言句子的上下文向量,在不使用注意力机制时,如果[编码器](#编码器)的输出是源语言句子编码后的最后一个元素,则可以定义$c=h_T$;$u_i$是目标语言序列的第$i$个单词,$u_0$是目标语言序列的开始标记``,表示解码开始;$z_i$是$i$时刻解码RNN的隐层状态,$z_0$是一个全零的向量。 -1. 将$z_{i+1}$通过`softmax`归一化,得到目标语言序列的第$i+1$个单词的概率分布$p_{i+1}$。概率分布公式如下: +2. 将$z_{i+1}$通过`softmax`归一化,得到目标语言序列的第$i+1$个单词的概率分布$p_{i+1}$。概率分布公式如下:

@@ -115,9 +115,9 @@ GRU\[[2](#参考文献)\]是Cho等人在LSTM上提出的简化版本,也是RNN 其中$W_sz_{i+1}+b_z$是对每个可能的输出单词进行打分,再用softmax归一化就可以得到第$i+1$个词的概率$p_{i+1}$。 -1. 根据$p_{i+1}$和$u_{i+1}$计算代价。 +3. 根据$p_{i+1}$和$u_{i+1}$计算代价。 -2. 重复步骤1~3,直到目标语言序列中的所有词处理完毕。 +4. 重复步骤1~3,直到目标语言序列中的所有词处理完毕。 机器翻译任务的生成过程,通俗来讲就是根据预先训练的模型来翻译源语言句子。生成过程中的解码阶段和上述训练过程的有所差异,具体介绍请见[柱搜索算法](#柱搜索算法)。 @@ -146,7 +146,7 @@ GRU\[[2](#参考文献)\]是Cho等人在LSTM上提出的简化版本,也是RNN 其中,$align$可以看作是一个对齐模型,用来衡量目标语言中第$i$个词和源语言中第$j$个词的匹配程度。具体而言,这个程度是通过解码RNN的第$i$个隐层状态$z_i$和源语言句子的第$j$个上下文片段$h_j$计算得到的。传统的对齐模型中,目标语言的每个词明确对应源语言的一个或多个词(hard alignment);而在注意力模型中采用的是soft alignment,即任何两个目标语言和源语言词间均存在一定的关联,且这个关联强度是由模型计算得到的实数,因此可以融入整个NMT框架,并通过反向传播算法进行训练。

-
+
图6. 基于注意力机制的解码器

@@ -213,7 +213,7 @@ max_length = 256 # 解码生成句子的最大长度 beam_size = 4 # beam search的柱宽度 batch_size = 64 # batch 中的样本数 -model_save_dir = "machine_translation.inference.model" +model_file = "machine_translation" ``` 接着定义所需要的数据输入: @@ -436,15 +436,16 @@ def loss_func(logits, label, trg_sequence_length): return avg_cost def optimizer_func(): - # 设置梯度裁剪 - fluid.clip.set_gradient_clip( - clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=5.0)) + # 定义梯度裁剪策略 + clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=5.0) # 定义先增后降的学习率策略 lr_decay = fluid.layers.learning_rate_scheduler.noam_decay(hidden_dim, 1000) + # 定义优化器 return fluid.optimizer.Adam( learning_rate=lr_decay, regularization=fluid.regularizer.L2DecayRegularizer( - regularization_coeff=1e-4)) + regularization_coeff=1e-4), + grad_clip=clip) ``` ## 训练模型 @@ -538,7 +539,7 @@ for pass_id in six.moves.xrange(EPOCH_NUM): (pass_id, batch_id, loss_val)) batch_id += 1 # 保存模型 - fluid.io.save_params(exe, model_save_dir, main_program=train_prog) + fluid.save(train_prog, model_file) ``` ## 应用模型 @@ -572,7 +573,7 @@ loader.set_batch_generator(inputs_generator(batch_size, # 定义执行器,加载参数并绑定Program exe = fluid.Executor(places[0]) exe.run(startup_prog) -fluid.io.load_params(exe, model_save_dir, main_program=infer_prog) +fluid.load(infer_prog, model_file, exe) prog = fluid.CompiledProgram(infer_prog).with_data_parallel() ``` @@ -608,7 +609,7 @@ for data in loader(): ```txt Original sentence: A man in an orange hat starring at something . -Translated score and sentence: +Translated sentence: Ein Mann mit einem orangen Schutzhelm starrt auf etwas . Ein Mann mit einem gelben Schutzhelm starrt auf etwas . Ein Mann mit einem gelben Schutzhelm starrt etwas an . diff --git a/doc/fluid/user_guides/nlp_case/machine_translation/README.md b/doc/fluid/user_guides/nlp_case/machine_translation/README.md index 327dd5aaa9bcaf4becabe4ffe214dc8b9f5f7c7a..3d3796f4c6cf5136092aca69f8d0045403f4c236 100644 --- a/doc/fluid/user_guides/nlp_case/machine_translation/README.md +++ b/doc/fluid/user_guides/nlp_case/machine_translation/README.md @@ -155,15 +155,6 @@ import paddle.fluid.layers as pd from paddle.fluid.executor import Executor from functools import partial import os -try: - from paddle.fluid.contrib.trainer import * - from paddle.fluid.contrib.inferencer import * -except ImportError: - print( - "In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib", - file=sys.stderr) - from paddle.fluid.trainer import * - from paddle.fluid.inferencer import * dict_size = 30000 # dictionary dimension source_dict_dim = target_dict_dim = dict_size # source/target language dictionary dimension diff --git a/doc/fluid/user_guides/nlp_case/machine_translation/index.cn.html b/doc/fluid/user_guides/nlp_case/machine_translation/index.cn.html index 61a683444976ce59a5acab8eae1dd6566c62c44d..38a1cb1abf02d1da3640e5dc677586df10614e12 100644 --- a/doc/fluid/user_guides/nlp_case/machine_translation/index.cn.html +++ b/doc/fluid/user_guides/nlp_case/machine_translation/index.cn.html @@ -47,7 +47,7 @@ ### 说明 1. 硬件要求 本文可支持在CPU、GPU下运行 2. 对docker file cuda/cudnn的支持 如果您使用了本文配套的docker镜像,请注意:该镜像对GPU的支持仅限于CUDA 8,cuDNN 5 -3. 文档中代码和seq2seq.py不一致的问题 请注意:为使本文更加易读易用,我们拆分、调整了seq2seq.py的代码并放入本文。本文中代码与seq2seq.py的运行结果一致,如希望直接看到训练脚本输出效果,可运行[seq2seq.py](https://github.com/PaddlePaddle/book/blob/develop/08.machine_translation/seq2seq.py)。 +3. 文档中代码和seq2seq.py不一致的问题 请注意:为使本文更加易读易用,我们拆分、调整了seq2seq.py的代码并放入本文。本文中代码与seq2seq.py的运行结果一致,如希望直接看到训练脚本输出效果,可运行[seq2seq.py](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/user_guides/nlp_case/machine_translation/seq2seq.py)。 ## 背景介绍 @@ -94,7 +94,7 @@ GRU\[[2](#参考文献)\]是Cho等人在LSTM上提出的简化版本,也是RNN - 重置门(reset gate):如果重置门关闭,会忽略掉历史信息,即历史不相干的信息不会影响未来的输出。 - 更新门(update gate):将LSTM的输入门和遗忘门合并,用于控制历史信息对当前时刻隐层输出的影响。如果更新门接近1,会把历史信息传递下去。

-
+
图2. GRU(门控循环单元)

@@ -126,11 +126,11 @@ GRU\[[2](#参考文献)\]是Cho等人在LSTM上提出的简化版本,也是RNN 编码阶段分为三步: -1. one-hot vector表示:将源语言句子$x=\left \{ x_1,x_2,...,x_T \right \}$的每个词$x_i$表示成一个列向量$w_i\epsilon \left \{ 0,1 \right \}^{\left | V \right |},i=1,2,...,T$。这个向量$w_i$的维度与词汇表大小$\left | V \right |$ 相同,并且只有一个维度上有值1(该位置对应该词在词汇表中的位置),其余全是0。 +1. one-hot vector表示:将源语言句子$x = \left(x_1,x_2,...,x_T\right)$的每个词$x_i$表示成一个列向量$w_i$。这个向量$w_i$的维度与词汇表大小$\left | V \right |$ 相同,并且只有一个维度上有值1(该位置对应该词在词汇表中的位置),其余全是0。 2. 映射到低维语义空间的词向量:one-hot vector表示存在两个问题,1)生成的向量维度往往很大,容易造成维数灾难;2)难以刻画词与词之间的关系(如语义相似性,也就是无法很好地表达语义)。因此,需再one-hot vector映射到低维的语义空间,由一个固定维度的稠密向量(称为词向量)表示。记映射矩阵为$C\epsilon R^{K\times \left | V \right |}$,用$s_i=Cw_i$表示第$i$个词的词向量,$K$为向量维度。 -3. 用RNN编码源语言词序列:这一过程的计算公式为$h_i=\varnothing _\theta \left ( h_{i-1}, s_i \right )$,其中$h_0$是一个全零的向量,$\varnothing _\theta$是一个非线性激活函数,最后得到的$\mathbf{h}=\left \{ h_1,..., h_T \right \}$就是RNN依次读入源语言$T$个词的状态编码序列。整句话的向量表示可以采用$\mathbf{h}$在最后一个时间步$T$的状态编码,或使用时间维上的池化(pooling)结果。 +3. 用RNN编码源语言词序列:这一过程的计算公式为$h_i=\phi_{\theta} \left ( h_{i-1}, s_i \right )$,其中$h_0$是一个全零的向量,$\phi _{\theta}$是一个非线性激活函数,最后得到的$\mathbf{h}=\left(h_1,..., h_T \right)$就是RNN依次读入源语言$T$个词的状态编码序列。整句话的向量表示可以采用$\mathbf{h}$在最后一个时间步$T$的状态编码,或使用时间维上的池化(pooling)结果。 第3步也可以使用双向循环神经网络实现更复杂的句编码表示,具体可以用双向GRU实现。前向GRU按照词序列$(x_1,x_2,...,x_T)$的顺序依次编码源语言端词,并得到一系列隐层状态$(\overrightarrow{h_1},\overrightarrow{h_2},...,\overrightarrow{h_T})$。类似的,后向GRU按照$(x_T,x_{T-1},...,x_1)$的顺序依次编码源语言端词,得到$(\overleftarrow{h_1},\overleftarrow{h_2},...,\overleftarrow{h_T})$。最后对于词$x_i$,通过拼接两个GRU的结果得到它的隐层状态,即$h_i=\left [ \overrightarrow{h_i^T},\overleftarrow{h_i^T} \right ]^{T}$。
@@ -149,7 +149,7 @@ GRU\[[2](#参考文献)\]是Cho等人在LSTM上提出的简化版本,也是RNN 其中$\phi _{\theta '}$是一个非线性激活函数;$c$是源语言句子的上下文向量,在不使用注意力机制时,如果[编码器](#编码器)的输出是源语言句子编码后的最后一个元素,则可以定义$c=h_T$;$u_i$是目标语言序列的第$i$个单词,$u_0$是目标语言序列的开始标记``,表示解码开始;$z_i$是$i$时刻解码RNN的隐层状态,$z_0$是一个全零的向量。 -1. 将$z_{i+1}$通过`softmax`归一化,得到目标语言序列的第$i+1$个单词的概率分布$p_{i+1}$。概率分布公式如下: +2. 将$z_{i+1}$通过`softmax`归一化,得到目标语言序列的第$i+1$个单词的概率分布$p_{i+1}$。概率分布公式如下:

@@ -157,9 +157,9 @@ GRU\[[2](#参考文献)\]是Cho等人在LSTM上提出的简化版本,也是RNN 其中$W_sz_{i+1}+b_z$是对每个可能的输出单词进行打分,再用softmax归一化就可以得到第$i+1$个词的概率$p_{i+1}$。 -1. 根据$p_{i+1}$和$u_{i+1}$计算代价。 +3. 根据$p_{i+1}$和$u_{i+1}$计算代价。 -2. 重复步骤1~3,直到目标语言序列中的所有词处理完毕。 +4. 重复步骤1~3,直到目标语言序列中的所有词处理完毕。 机器翻译任务的生成过程,通俗来讲就是根据预先训练的模型来翻译源语言句子。生成过程中的解码阶段和上述训练过程的有所差异,具体介绍请见[柱搜索算法](#柱搜索算法)。 @@ -188,7 +188,7 @@ GRU\[[2](#参考文献)\]是Cho等人在LSTM上提出的简化版本,也是RNN 其中,$align$可以看作是一个对齐模型,用来衡量目标语言中第$i$个词和源语言中第$j$个词的匹配程度。具体而言,这个程度是通过解码RNN的第$i$个隐层状态$z_i$和源语言句子的第$j$个上下文片段$h_j$计算得到的。传统的对齐模型中,目标语言的每个词明确对应源语言的一个或多个词(hard alignment);而在注意力模型中采用的是soft alignment,即任何两个目标语言和源语言词间均存在一定的关联,且这个关联强度是由模型计算得到的实数,因此可以融入整个NMT框架,并通过反向传播算法进行训练。

-
+
图6. 基于注意力机制的解码器

@@ -255,7 +255,7 @@ max_length = 256 # 解码生成句子的最大长度 beam_size = 4 # beam search的柱宽度 batch_size = 64 # batch 中的样本数 -model_save_dir = "machine_translation.inference.model" +model_file = "machine_translation" ``` 接着定义所需要的数据输入: @@ -478,15 +478,16 @@ def loss_func(logits, label, trg_sequence_length): return avg_cost def optimizer_func(): - # 设置梯度裁剪 - fluid.clip.set_gradient_clip( - clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=5.0)) + # 定义梯度裁剪策略 + clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=5.0) # 定义先增后降的学习率策略 lr_decay = fluid.layers.learning_rate_scheduler.noam_decay(hidden_dim, 1000) + # 定义优化器 return fluid.optimizer.Adam( learning_rate=lr_decay, regularization=fluid.regularizer.L2DecayRegularizer( - regularization_coeff=1e-4)) + regularization_coeff=1e-4), + grad_clip=clip) ``` ## 训练模型 @@ -580,7 +581,7 @@ for pass_id in six.moves.xrange(EPOCH_NUM): (pass_id, batch_id, loss_val)) batch_id += 1 # 保存模型 - fluid.io.save_params(exe, model_save_dir, main_program=train_prog) + fluid.save(train_prog, model_file) ``` ## 应用模型 @@ -614,7 +615,7 @@ loader.set_batch_generator(inputs_generator(batch_size, # 定义执行器,加载参数并绑定Program exe = fluid.Executor(places[0]) exe.run(startup_prog) -fluid.io.load_params(exe, model_save_dir, main_program=infer_prog) +fluid.load(infer_prog, model_file, exe) prog = fluid.CompiledProgram(infer_prog).with_data_parallel() ``` @@ -650,7 +651,7 @@ for data in loader(): ```txt Original sentence: A man in an orange hat starring at something . -Translated score and sentence: +Translated sentence: Ein Mann mit einem orangen Schutzhelm starrt auf etwas . Ein Mann mit einem gelben Schutzhelm starrt auf etwas . Ein Mann mit einem gelben Schutzhelm starrt etwas an . diff --git a/doc/fluid/user_guides/nlp_case/machine_translation/index.html b/doc/fluid/user_guides/nlp_case/machine_translation/index.html index 8663a0fb928ca60b95c4184fc5f0983f06ac5ff7..e1bd7d27c1cec1f3e47765b72869583e3941bc65 100644 --- a/doc/fluid/user_guides/nlp_case/machine_translation/index.html +++ b/doc/fluid/user_guides/nlp_case/machine_translation/index.html @@ -197,15 +197,6 @@ import paddle.fluid.layers as pd from paddle.fluid.executor import Executor from functools import partial import os -try: - from paddle.fluid.contrib.trainer import * - from paddle.fluid.contrib.inferencer import * -except ImportError: - print( - "In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib", - file=sys.stderr) - from paddle.fluid.trainer import * - from paddle.fluid.inferencer import * dict_size = 30000 # dictionary dimension source_dict_dim = target_dict_dim = dict_size # source/target language dictionary dimension diff --git a/doc/fluid/user_guides/nlp_case/machine_translation/seq2seq.py b/doc/fluid/user_guides/nlp_case/machine_translation/seq2seq.py index e4c118e54ad6b7880347e02d0549d11c3c6dc9c8..a398c93ff879e0f2cc64a5b4d6ad13b3b660ab49 100644 --- a/doc/fluid/user_guides/nlp_case/machine_translation/seq2seq.py +++ b/doc/fluid/user_guides/nlp_case/machine_translation/seq2seq.py @@ -31,7 +31,7 @@ max_length = 256 beam_size = 4 batch_size = 64 -model_save_dir = "machine_translation.inference.model" +model_file = "machine_translation" class DecoderCell(layers.RNNCell): @@ -216,14 +216,14 @@ def loss_func(logits, label, trg_sequence_length): def optimizer_func(): - fluid.clip.set_gradient_clip(clip=fluid.clip.GradientClipByGlobalNorm( - clip_norm=5.0)) + clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=5.0) lr_decay = fluid.layers.learning_rate_scheduler.noam_decay(hidden_dim, 1000) return fluid.optimizer.Adam( learning_rate=lr_decay, regularization=fluid.regularizer.L2DecayRegularizer( - regularization_coeff=1e-4)) + regularization_coeff=1e-4), + grad_clip=clip) def inputs_generator(batch_size, pad_id, is_train=True): @@ -289,7 +289,7 @@ def train(use_cuda): print('pass_id: %d, batch_id: %d, loss: %f' % (pass_id, batch_id, loss_val)) batch_id += 1 - fluid.io.save_params(exe, model_save_dir, main_program=train_prog) + fluid.save(train_prog, model_file) def infer(use_cuda): @@ -312,7 +312,7 @@ def infer(use_cuda): exe = fluid.Executor(places[0]) exe.run(startup_prog) - fluid.io.load_params(exe, model_save_dir, main_program=infer_prog) + fluid.load(infer_prog, model_file, exe) prog = fluid.CompiledProgram(infer_prog).with_data_parallel() for data in loader(): diff --git a/doc/fluid/user_guides/nlp_case/understand_sentiment/README.cn.md b/doc/fluid/user_guides/nlp_case/understand_sentiment/README.cn.md index e09036cd42293ace4239b9e1424d5fedce0b6a43..199e18ed8854138e962327e9734021ab56807deb 100644 --- a/doc/fluid/user_guides/nlp_case/understand_sentiment/README.cn.md +++ b/doc/fluid/user_guides/nlp_case/understand_sentiment/README.cn.md @@ -151,7 +151,7 @@ BATCH_SIZE = 128 #batch的大小 ```python #文本卷积神经网络 def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): - emb = fluid.layers.embedding( + emb = fluid.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) conv_3 = fluid.nets.sequence_conv_pool( input=emb, @@ -183,7 +183,7 @@ def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num): #计算词向量 - emb = fluid.layers.embedding( + emb = fluid.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) #第一层栈 @@ -268,12 +268,12 @@ print("Loading IMDB word dict....") word_dict = paddle.dataset.imdb.word_dict() print ("Reading training data....") -train_reader = paddle.batch( - paddle.reader.shuffle( +train_reader = fluid.io.batch( + fluid.io.shuffle( paddle.dataset.imdb.train(word_dict), buf_size=25000), batch_size=BATCH_SIZE) print("Reading testing data....") -test_reader = paddle.batch( +test_reader = fluid.io.batch( paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE) ``` word_dict是一个字典序列,是词和label的对应关系,运行下一行可以看到具体内容: @@ -394,7 +394,7 @@ inference_scope = fluid.core.Scope() ```python reviews_str = [ - 'read the book forget the movie', 'this is a great movie', 'this is very bad' + b'read the book forget the movie', b'this is a great movie', b'this is very bad' ] reviews = [c.split() for c in reviews_str] @@ -404,6 +404,7 @@ for c in reviews: lod.append([word_dict.get(words, UNK) for words in c]) base_shape = [[len(c) for c in lod]] +lod = np.array(sum(lod, []), dtype=np.int64) tensor_words = fluid.create_lod_tensor(lod, base_shape, place) ``` diff --git a/doc/fluid/user_guides/nlp_case/understand_sentiment/README.md b/doc/fluid/user_guides/nlp_case/understand_sentiment/README.md index 0220bfe7141f3dff79a4995413e1a3b6b8d1c4cc..00ed9efb77e0d45bc15c212d1d6a67a06aebf07f 100644 --- a/doc/fluid/user_guides/nlp_case/understand_sentiment/README.md +++ b/doc/fluid/user_guides/nlp_case/understand_sentiment/README.md @@ -140,7 +140,7 @@ Note that `fluid.nets.sequence_conv_pool` contains both convolution and pooling ```python #Textconvolution neural network def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): - emb = fluid.layers.embedding( + emb = fluid.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) conv_3 = fluid.nets.sequence_conv_pool( input=emb, @@ -172,7 +172,7 @@ The code of the stack bidirectional LSTM `stacked_lstm_net` is as follows: def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num): # Calculate word vectorvector - emb = fluid.layers.embedding( + emb = fluid.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) #First stack @@ -257,12 +257,12 @@ print("Loading IMDB word dict....") word_dict = paddle.dataset.imdb.word_dict() print ("Reading training data....") -train_reader = paddle.batch( - paddle.reader.shuffle( +train_reader = fluid.io.batch( + fluid.io.shuffle( paddle.dataset.imdb.train(word_dict), buf_size=25000), batch_size=BATCH_SIZE) print("Reading testing data....") -test_reader = paddle.batch( +test_reader = fluid.io.batch( paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE) ``` Word_dict is a dictionary sequence, which is the correspondence between words and labels. You can see it specifically by running the next code: @@ -383,7 +383,7 @@ Then we use `create_lod_tensor` to create the tensor of the detail level. For a ```python reviews_str = [ - 'read the book forget the movie', 'this is a great movie', 'this is very bad' + b'read the book forget the movie', b'this is a great movie', b'this is very bad' ] reviews = [c.split() for c in reviews_str] @@ -393,6 +393,7 @@ for c in reviews: lod.append([word_dict.get(words, UNK) for words in c]) base_shape = [[len(c) for c in lod]] +lod = np.array(sum(lod, []), dtype=np.int64) tensor_words = fluid.create_lod_tensor(lod, base_shape, place) ``` diff --git a/doc/fluid/user_guides/nlp_case/understand_sentiment/index.cn.html b/doc/fluid/user_guides/nlp_case/understand_sentiment/index.cn.html index d111e50a2292a010db8ee89d98b3237ff14790d2..a1d945d359e945f60d16bb135c6e47cf24a555d5 100644 --- a/doc/fluid/user_guides/nlp_case/understand_sentiment/index.cn.html +++ b/doc/fluid/user_guides/nlp_case/understand_sentiment/index.cn.html @@ -193,7 +193,7 @@ BATCH_SIZE = 128 #batch的大小 ```python #文本卷积神经网络 def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): - emb = fluid.layers.embedding( + emb = fluid.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) conv_3 = fluid.nets.sequence_conv_pool( input=emb, @@ -225,7 +225,7 @@ def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num): #计算词向量 - emb = fluid.layers.embedding( + emb = fluid.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) #第一层栈 @@ -310,12 +310,12 @@ print("Loading IMDB word dict....") word_dict = paddle.dataset.imdb.word_dict() print ("Reading training data....") -train_reader = paddle.batch( - paddle.reader.shuffle( +train_reader = fluid.io.batch( + fluid.io.shuffle( paddle.dataset.imdb.train(word_dict), buf_size=25000), batch_size=BATCH_SIZE) print("Reading testing data....") -test_reader = paddle.batch( +test_reader = fluid.io.batch( paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE) ``` word_dict是一个字典序列,是词和label的对应关系,运行下一行可以看到具体内容: @@ -436,7 +436,7 @@ inference_scope = fluid.core.Scope() ```python reviews_str = [ - 'read the book forget the movie', 'this is a great movie', 'this is very bad' + b'read the book forget the movie', b'this is a great movie', b'this is very bad' ] reviews = [c.split() for c in reviews_str] @@ -446,6 +446,7 @@ for c in reviews: lod.append([word_dict.get(words, UNK) for words in c]) base_shape = [[len(c) for c in lod]] +lod = np.array(sum(lod, []), dtype=np.int64) tensor_words = fluid.create_lod_tensor(lod, base_shape, place) ``` diff --git a/doc/fluid/user_guides/nlp_case/understand_sentiment/index.html b/doc/fluid/user_guides/nlp_case/understand_sentiment/index.html index 94ce282506488958d2b8783290f60aa41eea2040..ecd861f300028e4deacb742dbaffffef3c947556 100644 --- a/doc/fluid/user_guides/nlp_case/understand_sentiment/index.html +++ b/doc/fluid/user_guides/nlp_case/understand_sentiment/index.html @@ -182,7 +182,7 @@ Note that `fluid.nets.sequence_conv_pool` contains both convolution and pooling ```python #Textconvolution neural network def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): - emb = fluid.layers.embedding( + emb = fluid.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) conv_3 = fluid.nets.sequence_conv_pool( input=emb, @@ -214,7 +214,7 @@ The code of the stack bidirectional LSTM `stacked_lstm_net` is as follows: def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num): # Calculate word vectorvector - emb = fluid.layers.embedding( + emb = fluid.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) #First stack @@ -299,12 +299,12 @@ print("Loading IMDB word dict....") word_dict = paddle.dataset.imdb.word_dict() print ("Reading training data....") -train_reader = paddle.batch( - paddle.reader.shuffle( +train_reader = fluid.io.batch( + fluid.io.shuffle( paddle.dataset.imdb.train(word_dict), buf_size=25000), batch_size=BATCH_SIZE) print("Reading testing data....") -test_reader = paddle.batch( +test_reader = fluid.io.batch( paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE) ``` Word_dict is a dictionary sequence, which is the correspondence between words and labels. You can see it specifically by running the next code: @@ -425,7 +425,7 @@ Then we use `create_lod_tensor` to create the tensor of the detail level. For a ```python reviews_str = [ - 'read the book forget the movie', 'this is a great movie', 'this is very bad' + b'read the book forget the movie', b'this is a great movie', b'this is very bad' ] reviews = [c.split() for c in reviews_str] @@ -435,6 +435,7 @@ for c in reviews: lod.append([word_dict.get(words, UNK) for words in c]) base_shape = [[len(c) for c in lod]] +lod = np.array(sum(lod, []), dtype=np.int64) tensor_words = fluid.create_lod_tensor(lod, base_shape, place) ``` diff --git a/doc/fluid/user_guides/nlp_case/understand_sentiment/train_conv.py b/doc/fluid/user_guides/nlp_case/understand_sentiment/train_conv.py index 471ea93de2f6cab1eced5022d9610962a6b2725f..456b127b92c6814fa44e7bc579e6e2f95110cb33 100644 --- a/doc/fluid/user_guides/nlp_case/understand_sentiment/train_conv.py +++ b/doc/fluid/user_guides/nlp_case/understand_sentiment/train_conv.py @@ -42,7 +42,7 @@ def parse_args(): def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): - emb = fluid.layers.embedding( + emb = fluid.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) conv_3 = fluid.nets.sequence_conv_pool( input=emb, @@ -88,16 +88,16 @@ def train(use_cuda, params_dirname): print("Reading training data....") if args.enable_ce: - train_reader = paddle.batch( + train_reader = fluid.io.batch( paddle.dataset.imdb.train(word_dict), batch_size=BATCH_SIZE) else: - train_reader = paddle.batch( - paddle.reader.shuffle( + train_reader = fluid.io.batch( + fluid.io.shuffle( paddle.dataset.imdb.train(word_dict), buf_size=25000), batch_size=BATCH_SIZE) print("Reading testing data....") - test_reader = paddle.batch( + test_reader = fluid.io.batch( paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE) feed_order = ['words', 'label'] @@ -205,8 +205,8 @@ def infer(use_cuda, params_dirname=None): # length 3, 4 and 2, respectively. # Note that lod info should be a list of lists. reviews_str = [ - 'read the book forget the movie', 'this is a great movie', - 'this is very bad' + b'read the book forget the movie', b'this is a great movie', + b'this is very bad' ] reviews = [c.split() for c in reviews_str] @@ -216,6 +216,7 @@ def infer(use_cuda, params_dirname=None): lod.append([np.int64(word_dict.get(words, UNK)) for words in c]) base_shape = [[len(c) for c in lod]] + lod = np.array(sum(lod, []), dtype=np.int64) tensor_words = fluid.create_lod_tensor(lod, base_shape, place) assert feed_target_names[0] == "words" diff --git a/doc/fluid/user_guides/nlp_case/understand_sentiment/train_dyn_rnn.py b/doc/fluid/user_guides/nlp_case/understand_sentiment/train_dyn_rnn.py index 6328d906618bcd34bb0f9d17c21827194ae1c328..b782f6435a68c0f01fae9e03568a8dc9946bee70 100644 --- a/doc/fluid/user_guides/nlp_case/understand_sentiment/train_dyn_rnn.py +++ b/doc/fluid/user_guides/nlp_case/understand_sentiment/train_dyn_rnn.py @@ -42,7 +42,7 @@ def parse_args(): def dynamic_rnn_lstm(data, input_dim, class_dim, emb_dim, lstm_size): - emb = fluid.layers.embedding( + emb = fluid.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) sentence = fluid.layers.fc(input=emb, size=lstm_size * 4, act='tanh') @@ -79,16 +79,16 @@ def train(use_cuda, params_dirname): print("Reading training data....") if args.enable_ce: - train_reader = paddle.batch( + train_reader = fluid.io.batch( paddle.dataset.imdb.train(word_dict), batch_size=BATCH_SIZE) else: - train_reader = paddle.batch( - paddle.reader.shuffle( + train_reader = fluid.io.batch( + fluid.io.shuffle( paddle.dataset.imdb.train(word_dict), buf_size=25000), batch_size=BATCH_SIZE) print("Reading testing data....") - test_reader = paddle.batch( + test_reader = fluid.io.batch( paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE) feed_order = ['words', 'label'] @@ -194,8 +194,8 @@ def infer(use_cuda, params_dirname=None): # length 3, 4 and 2, respectively. # Note that lod info should be a list of lists. reviews_str = [ - 'read the book forget the movie', 'this is a great movie', - 'this is very bad' + b'read the book forget the movie', b'this is a great movie', + b'this is very bad' ] reviews = [c.split() for c in reviews_str] @@ -205,6 +205,7 @@ def infer(use_cuda, params_dirname=None): lod.append([np.int64(word_dict.get(words, UNK)) for words in c]) base_shape = [[len(c) for c in lod]] + lod = np.array(sum(lod, []), dtype=np.int64) tensor_words = fluid.create_lod_tensor(lod, base_shape, place) assert feed_target_names[0] == "words" diff --git a/doc/fluid/user_guides/nlp_case/understand_sentiment/train_stacked_lstm.py b/doc/fluid/user_guides/nlp_case/understand_sentiment/train_stacked_lstm.py index cf392f5ebccbbe514ee30b054a2cf46877c94e4b..6db40f42a3fbda12c764d76151db21a1c3e4df04 100644 --- a/doc/fluid/user_guides/nlp_case/understand_sentiment/train_stacked_lstm.py +++ b/doc/fluid/user_guides/nlp_case/understand_sentiment/train_stacked_lstm.py @@ -47,7 +47,7 @@ def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num): assert stacked_num % 2 == 1 - emb = fluid.layers.embedding( + emb = fluid.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) fc1 = fluid.layers.fc(input=emb, size=hid_dim) @@ -99,11 +99,11 @@ def train(use_cuda, params_dirname): print("Reading training data....") if args.enable_ce: - train_reader = paddle.batch( + train_reader = fluid.io.batch( paddle.dataset.imdb.train(word_dict), batch_size=BATCH_SIZE) else: - train_reader = paddle.batch( - paddle.reader.shuffle( + train_reader = fluid.io.batch( + fluid.io.shuffle( paddle.dataset.imdb.train(word_dict), buf_size=25000), batch_size=BATCH_SIZE) @@ -216,8 +216,8 @@ def infer(use_cuda, params_dirname=None): # length 3, 4 and 2, respectively. # Note that lod info should be a list of lists. reviews_str = [ - 'read the book forget the movie', 'this is a great movie', - 'this is very bad' + b'read the book forget the movie', b'this is a great movie', + b'this is very bad' ] reviews = [c.split() for c in reviews_str] @@ -227,6 +227,7 @@ def infer(use_cuda, params_dirname=None): lod.append([np.int64(word_dict.get(words, UNK)) for words in c]) base_shape = [[len(c) for c in lod]] + lod = np.array(sum(lod, []), dtype=np.int64) tensor_words = fluid.create_lod_tensor(lod, base_shape, place) assert feed_target_names[0] == "words" diff --git a/doc/fluid/user_guides/rec_case/index_cn.rst b/doc/fluid/user_guides/rec_case/index_cn.rst index ee8e55abb7f6c00a4f306113ab5bb367456fbdf2..0894710bf03dce07f72ac071e471811ac222a5c7 100644 --- a/doc/fluid/user_guides/rec_case/index_cn.rst +++ b/doc/fluid/user_guides/rec_case/index_cn.rst @@ -2,6 +2,10 @@ 推荐 ################ +.. todo:: + +推荐系统是利用电子商务网站向客户提供商品信息和建议,帮助用户决定应该购买什么产品,模拟销售人员帮助客户完成购买过程。在这里PaddlePaddle为大家提供了一篇个性化推荐的案例详解: + .. toctree:: :titlesonly: diff --git a/doc/fluid/user_guides/simple_case/fit_a_line/README.cn.md b/doc/fluid/user_guides/simple_case/fit_a_line/README.cn.md index 69cc494df6608d9e1026efbe618e532b18fba8f3..eb5b8c3446b97dae8fbe3e7879140216513a1120 100644 --- a/doc/fluid/user_guides/simple_case/fit_a_line/README.cn.md +++ b/doc/fluid/user_guides/simple_case/fit_a_line/README.cn.md @@ -143,13 +143,13 @@ import sys ```python BATCH_SIZE = 20 -train_reader = paddle.batch( - paddle.reader.shuffle( +train_reader = fluid.io.batch( + fluid.io.shuffle( paddle.dataset.uci_housing.train(), buf_size=500), batch_size=BATCH_SIZE) -test_reader = paddle.batch( - paddle.reader.shuffle( +test_reader = fluid.io.batch( + fluid.io.shuffle( paddle.dataset.uci_housing.test(), buf_size=500), batch_size=BATCH_SIZE) ``` @@ -179,13 +179,13 @@ def reader_creator(train_data): yield d[:-1], d[-1:] return reader -train_reader = paddle.batch( - paddle.reader.shuffle( +train_reader = fluid.io.batch( + fluid.io.shuffle( reader_creator(train_data), buf_size=500), batch_size=BATCH_SIZE) -test_reader = paddle.batch( - paddle.reader.shuffle( +test_reader = fluid.io.batch( + fluid.io.shuffle( reader_creator(test_data), buf_size=500), batch_size=BATCH_SIZE) ``` @@ -349,7 +349,7 @@ with fluid.scope_guard(inference_scope): fetch_targets] = fluid.io.load_inference_model(params_dirname, infer_exe) # 载入预训练模型 batch_size = 10 - infer_reader = paddle.batch( + infer_reader = fluid.io.batch( paddle.dataset.uci_housing.test(), batch_size=batch_size) # 准备测试集 infer_data = next(infer_reader()) diff --git a/doc/fluid/user_guides/simple_case/fit_a_line/README.md b/doc/fluid/user_guides/simple_case/fit_a_line/README.md index 8760bbf0c6ac488f183a3adedb7bf6cde4058942..2bc2b24c4d8fa3a4bff0ff29d8fb02f4437e8e95 100644 --- a/doc/fluid/user_guides/simple_case/fit_a_line/README.md +++ b/doc/fluid/user_guides/simple_case/fit_a_line/README.md @@ -145,13 +145,13 @@ Next we define the data feeder for training. The data feeder reads a batch of da ```python BATCH_SIZE = 20 -train_reader = paddle.batch( - paddle.reader.shuffle( +train_reader = fluid.io.batch( + fluid.io.shuffle( paddle.dataset.uci_housing.train(), buf_size=500), batch_size=BATCH_SIZE) -test_reader = paddle.batch( - paddle.reader.shuffle( +test_reader = fluid.io.batch( + fluid.io.shuffle( paddle.dataset.uci_housing.test(), buf_size=500), batch_size=BATCH_SIZE) ``` @@ -181,13 +181,13 @@ def reader_creator(train_data): yield d[:-1], d[-1:] return reader -train_reader = paddle.batch( - paddle.reader.shuffle( +train_reader = fluid.io.batch( + fluid.io.shuffle( reader_creator(train_data), buf_size=500), batch_size=BATCH_SIZE) -test_reader = paddle.batch( - paddle.reader.shuffle( +test_reader = fluid.io.batch( + fluid.io.shuffle( reader_creator(test_data), buf_size=500), batch_size=BATCH_SIZE) ``` @@ -351,7 +351,7 @@ with fluid.scope_guard(inference_scope): fetch_targets] = fluid.io.load_inference_model(params_dirname, infer_exe) # load pre-predict model batch_size = 10 - infer_reader = paddle.batch( + infer_reader = fluid.io.batch( paddle.dataset.uci_housing.test(), batch_size=batch_size) # prepare test dataset infer_data = next(infer_reader()) diff --git a/doc/fluid/user_guides/simple_case/fit_a_line/index.cn.html b/doc/fluid/user_guides/simple_case/fit_a_line/index.cn.html index e47cda5f7e74d4e41d0d341fab126712dc78434a..02117e6d41fdcfdae5262cba59f2b8d1ee92a79d 100644 --- a/doc/fluid/user_guides/simple_case/fit_a_line/index.cn.html +++ b/doc/fluid/user_guides/simple_case/fit_a_line/index.cn.html @@ -185,13 +185,13 @@ import sys ```python BATCH_SIZE = 20 -train_reader = paddle.batch( - paddle.reader.shuffle( +train_reader = fluid.io.batch( + fluid.io.shuffle( paddle.dataset.uci_housing.train(), buf_size=500), batch_size=BATCH_SIZE) -test_reader = paddle.batch( - paddle.reader.shuffle( +test_reader = fluid.io.batch( + fluid.io.shuffle( paddle.dataset.uci_housing.test(), buf_size=500), batch_size=BATCH_SIZE) ``` @@ -221,13 +221,13 @@ def reader_creator(train_data): yield d[:-1], d[-1:] return reader -train_reader = paddle.batch( - paddle.reader.shuffle( +train_reader = fluid.io.batch( + fluid.io.shuffle( reader_creator(train_data), buf_size=500), batch_size=BATCH_SIZE) -test_reader = paddle.batch( - paddle.reader.shuffle( +test_reader = fluid.io.batch( + fluid.io.shuffle( reader_creator(test_data), buf_size=500), batch_size=BATCH_SIZE) ``` @@ -391,7 +391,7 @@ with fluid.scope_guard(inference_scope): fetch_targets] = fluid.io.load_inference_model(params_dirname, infer_exe) # 载入预训练模型 batch_size = 10 - infer_reader = paddle.batch( + infer_reader = fluid.io.batch( paddle.dataset.uci_housing.test(), batch_size=batch_size) # 准备测试集 infer_data = next(infer_reader()) diff --git a/doc/fluid/user_guides/simple_case/fit_a_line/index.html b/doc/fluid/user_guides/simple_case/fit_a_line/index.html index 70e7b0eab98652434f4e030e7888d07290176ef3..3496bd7c54d28fa5c96a6e197bec906548698dee 100644 --- a/doc/fluid/user_guides/simple_case/fit_a_line/index.html +++ b/doc/fluid/user_guides/simple_case/fit_a_line/index.html @@ -187,13 +187,13 @@ Next we define the data feeder for training. The data feeder reads a batch of da ```python BATCH_SIZE = 20 -train_reader = paddle.batch( - paddle.reader.shuffle( +train_reader = fluid.io.batch( + fluid.io.shuffle( paddle.dataset.uci_housing.train(), buf_size=500), batch_size=BATCH_SIZE) -test_reader = paddle.batch( - paddle.reader.shuffle( +test_reader = fluid.io.batch( + fluid.io.shuffle( paddle.dataset.uci_housing.test(), buf_size=500), batch_size=BATCH_SIZE) ``` @@ -223,13 +223,13 @@ def reader_creator(train_data): yield d[:-1], d[-1:] return reader -train_reader = paddle.batch( - paddle.reader.shuffle( +train_reader = fluid.io.batch( + fluid.io.shuffle( reader_creator(train_data), buf_size=500), batch_size=BATCH_SIZE) -test_reader = paddle.batch( - paddle.reader.shuffle( +test_reader = fluid.io.batch( + fluid.io.shuffle( reader_creator(test_data), buf_size=500), batch_size=BATCH_SIZE) ``` @@ -393,7 +393,7 @@ with fluid.scope_guard(inference_scope): fetch_targets] = fluid.io.load_inference_model(params_dirname, infer_exe) # load pre-predict model batch_size = 10 - infer_reader = paddle.batch( + infer_reader = fluid.io.batch( paddle.dataset.uci_housing.test(), batch_size=batch_size) # prepare test dataset infer_data = next(infer_reader()) diff --git a/doc/fluid/user_guides/simple_case/fit_a_line/train.py b/doc/fluid/user_guides/simple_case/fit_a_line/train.py index 41dde8b852a01233f2d2bcc695344b3620a39e18..20fe8592da09af8b339df492622c536fde66e1ce 100644 --- a/doc/fluid/user_guides/simple_case/fit_a_line/train.py +++ b/doc/fluid/user_guides/simple_case/fit_a_line/train.py @@ -74,18 +74,16 @@ def main(): batch_size = 20 if args.enable_ce: - train_reader = paddle.batch( + train_reader = fluid.io.batch( paddle.dataset.uci_housing.train(), batch_size=batch_size) - test_reader = paddle.batch( + test_reader = fluid.io.batch( paddle.dataset.uci_housing.test(), batch_size=batch_size) else: - train_reader = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.uci_housing.train(), buf_size=500), + train_reader = fluid.io.batch( + fluid.io.shuffle(paddle.dataset.uci_housing.train(), buf_size=500), batch_size=batch_size) - test_reader = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.uci_housing.test(), buf_size=500), + test_reader = fluid.io.batch( + fluid.io.shuffle(paddle.dataset.uci_housing.test(), buf_size=500), batch_size=batch_size) # feature vector of length 13 @@ -172,7 +170,7 @@ def main(): ] = fluid.io.load_inference_model(params_dirname, infer_exe) batch_size = 10 - infer_reader = paddle.batch( + infer_reader = fluid.io.batch( paddle.dataset.uci_housing.test(), batch_size=batch_size) infer_data = next(infer_reader()) diff --git a/doc/fluid/user_guides/simple_case/index_cn.rst b/doc/fluid/user_guides/simple_case/index_cn.rst index 4d44cf7e7f2c66890c7ad8b5f5e4a62fe326a06d..4b954ebfdc2449a33e7607d7516ac64c4cce3543 100644 --- a/doc/fluid/user_guides/simple_case/index_cn.rst +++ b/doc/fluid/user_guides/simple_case/index_cn.rst @@ -2,10 +2,13 @@ 简单案例 ################ +.. todo:: + +这里是基于PaddlePaddle实现的简单深度学习入门案例,帮助您更快速的了解飞桨的使用方法,并解决简单深度学习问题,以下是具体的案例详解: + .. toctree:: :titlesonly: fit_a_line/README.cn.md recognize_digits/README.cn.md - image_classification/README.cn.md word2vec/README.cn.md diff --git a/doc/fluid/user_guides/simple_case/index_en.rst b/doc/fluid/user_guides/simple_case/index_en.rst index 91ebfc14573d5ea04a0263cbceb31636928ffe88..bccd0a4b83aed64ab7501460fb6770767755ec0a 100644 --- a/doc/fluid/user_guides/simple_case/index_en.rst +++ b/doc/fluid/user_guides/simple_case/index_en.rst @@ -8,6 +8,5 @@ Simple Case fit_a_line/README.md recognize_digits/README.md - image_classification/README.md word2vec/README.md diff --git a/doc/fluid/user_guides/simple_case/word2vec/README.md b/doc/fluid/user_guides/simple_case/word2vec/README.md index 8d7acaea35d2f4a81e898533c07e525f11dc354f..6393aec48987aaa61f94216858c5da5f67a7fc68 100644 --- a/doc/fluid/user_guides/simple_case/word2vec/README.md +++ b/doc/fluid/user_guides/simple_case/word2vec/README.md @@ -289,15 +289,15 @@ def optimizer_func(): - Now we can start training. This version is much simpler than before. We have ready-made training and test sets: `paddle.dataset.imikolov.train()` and `paddle.dataset.imikolov.test()`. Both will return a reader. In PaddlePaddle, the reader is a Python function that reads the next piece of data when called each time . It is a Python generator. -`paddle.batch` will read in a reader and output a batched reader. We can also output the training of each step and batch during the training process. +`fluid.io.batch` will read in a reader and output a batched reader. We can also output the training of each step and batch during the training process. ```python def train(if_use_cuda, params_dirname, is_sparse=True): place = fluid.CUDAPlace(0) if if_use_cuda else fluid.CPUPlace() - train_reader = paddle.batch( + train_reader = fluid.io.batch( paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE) - test_reader = paddle.batch( + test_reader = fluid.io.batch( paddle.dataset.imikolov.test(word_dict, N), BATCH_SIZE) first_word = fluid.data(name='firstw', shape=[None, 1], dtype='int64') diff --git a/doc/fluid/user_guides/simple_case/word2vec/train.py b/doc/fluid/user_guides/simple_case/word2vec/train.py index d3c9a985c839498c73bcd2a4c053ed4f6cb2ec7a..beed63eb77425165a603fd45d3d9447f2d7bd963 100644 --- a/doc/fluid/user_guides/simple_case/word2vec/train.py +++ b/doc/fluid/user_guides/simple_case/word2vec/train.py @@ -98,9 +98,9 @@ def optimizer_func(): def train(if_use_cuda, params_dirname, is_sparse=True): place = fluid.CUDAPlace(0) if if_use_cuda else fluid.CPUPlace() - train_reader = paddle.batch( + train_reader = fluid.io.batch( paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE) - test_reader = paddle.batch( + test_reader = fluid.io.batch( paddle.dataset.imikolov.test(word_dict, N), BATCH_SIZE) first_word = fluid.data(name='firstw', shape=[None, 1], dtype='int64') diff --git a/doc/fluid/user_guides/tools/deploy_ctr_on_baidu_cloud_cn.rst b/doc/fluid/user_guides/tools/deploy_ctr_on_baidu_cloud_cn.rst deleted file mode 100644 index f679394fe8e0d5c6894fe4f729606e032d8ce21e..0000000000000000000000000000000000000000 --- a/doc/fluid/user_guides/tools/deploy_ctr_on_baidu_cloud_cn.rst +++ /dev/null @@ -1,588 +0,0 @@ -.. role:: raw-html-m2r(raw) - :format: html - - -ELASTIC CTR -=========== - -——百度云分布式训练CTR预估任务和Serving流程一键部署 - - -* `1. 总体概览 <#head1>`_ -* `2. 前置需求 <#head2>`_ -* `3. 分布式训练+Serving方案一键部署 <#head3>`_ -* `4. 查看结果 <#head4>`_ -* `5. 二次开发指南 <#head5>`_ - -:raw-html-m2r:`1. 总体概览` -------------- - -本项目提供了端到端的CTR训练和二次开发的解决方案,主要特点: - - -* 整体方案在k8s环境一键部署,可快速搭建与验证效果 -* 基于Paddle transpiler模式的大规模分布式高速训练 -* 训练资源弹性伸缩 -* 工业级稀疏参数Serving组件,高并发条件下单位时间吞吐总量是Redis的13倍 [\ `注1 <#annotation_1>`_\ ] - -本方案整体流程如下图所示: - - -.. image:: src/overview.png - :target: src/overview.png - :alt: image - - -其中: - - -* trainer/pserver: 训练环节采用PaddlePaddle parameter server模式,对应trainer和pserver角色。分布式训练使用\ `volcano `_\ 做批量任务管理工具 -* file server: 训练产出的模型文件,托管到File Server,供下游模块下载;训练产出的文件包括:ProgramDesc和模型参数,模型参数中最大的embedding由工具转换为seqfile格式,经过一系列流程配送到Cube分布式稀疏参数服务,其余模型参数保持不变,配送到Paddle Serving模块 -* cube-transfer: 负责监控上游训练作业产出的模型文件(hadoop sequence file)变化,拉取到本地,并调用cube-builder构建Cube字典文件;通知cube-agent节点拉取最新的字典文件,并维护各个cube-server上版本一致性 -* cube-builder: 负责将训练作业产出的模型文件(hadoop sequence file格式)转换成可以被cube-server加载的字典文件。字典文件具有特定的数据结构,针对尺寸和内存中访问做了高度优化 -* cube-server: 提供分片kv读写能力的服务节点 -* cube-agent: 与cube-server同机部署,接收cube-transfer下发的字典文件更新命令,拉取数据到本地,通知cube-server进行更新 -* Paddle Serving: 加载CTR预估任务模型ProgramDesc和dense参数,提供预测服务 -* Client: CTR预估任务的demo客户端 - -以上组件串联完成从训练到预测部署的所有流程。本文档所提供的一键部署脚本\ `paddle-suite.sh `_\ 可一键部署上述所有组件。 - -用户可以参考本部署方案,将基于PaddlePaddle的分布式训练和Serving应用到业务环境,也可以在本方案基础上做功能增强和改进,直接使用。具体的,用户可以: - - -* 指定数据集的输入和读取方式,来feed不同的数据集和数据集格式;相应的修改Serving代码以适应新模型 -* 指定训练的规模,包括参数服务器的数量和训练节点的数量 -* 指定Cube参数服务器的分片数量和副本数量 - -在本文第4部分会详细解释以上二次开发的实际操作。 - -本文主要内容: - -**第2节 前置需求** 指导用户从零开始,在百度云上申请BCE集群,并部署volcano工具。本方案需使用\ `volcano `_\ 做训练环节批量任务管理工具,目前在百度云上验证通过 - -**第3节 分布式训练+Serving方案部署** 使用paddle-suite.sh,一键部署分布式训练+serving完整流程;并详细解释脚本每一步的工作和含义 - -**第4节 查看结果** 根据各个pod输出,验证一键安装状态 - -**第5节 二次开发** 提出本一键部署方案可定制改善的部分,给出具体修改位置等 - -:raw-html-m2r:`2. 前置需求` ------------- - -运行本方案前,需要用户已经搭建好k8s集群,并安装好volcano组件。k8s环境部署比较复杂,本文不涉及。百度智能云CCE容器引擎申请后即可使用,仅以百度云上创建k8s为例。 - -2.1 创建k8s集群 -^^^^^^^^^^^^ - -请参考 -`百度智能云CCE容器引擎帮助文档-创建集群 `_\ ,在百度智能云上建立一个集群,节点配置需要满足如下要求 - - -* CPU核数 > 4 - -申请容器引擎示例: - - -.. image:: src/ctr_node.png - :target: src/ctr_node.png - :alt: image - - -创建完成后,即可参考\ `百度智能云CCE容器引擎帮助文档-查看集群 `_\ ,查看刚刚申请的集群信息。 - -2.2 如何操作集群 -^^^^^^^^^^^^^ - -集群的操作可以通过百度云web或者通过kubectl工具进行,推荐用kubectl工具。 - -对于百度云k8s集群,客户端kubectl需要和百度云上kubernetes版本对应,请参考\ `百度智能云CCE容器引擎帮助文档-kubectl管理配置 `_\ 查看当前所用的kubernetes版本,并参考kubernetes官方文档下载对应版本的kubectrl版本进行安装。 - -* 注意: 本操作指南给出的操作步骤都是基于linux操作环境的。 - - -* - 首先请参考\ `官方安装说明 `_\ ,安装和百度云kubernetes版本对应的的kubectl。 - -* - 配置kubectl,下载集群凭证。在集群界面下载集群配置文件,放在kubectl的默认配置路径(请检查~/.kube目录是否存在,若没有请创建) - -.. code-block:: bash - - $ mv kubectl.conf ~/.kube/config - - -* 配置完成后,您即可以使用kubectl从本地计算机访问Kubernetes集群 - -.. code-block:: bash - - $ kubectl get node - - -* 关于kubectl的其他信息,可以参考\ `Overview of kubectl `_\ 。 - -2.3 设置访问权限 -^^^^^^^^^^ - -建立分布式任务需要pod间有API互相访问的权限,可以按如下步骤 - -.. code-block:: bash - - $ kubectl create rolebinding default-view --clusterrole=view --serviceaccount=default:default --namespace=default - -注意: --namespace 指定的default 为创建集群时候的名称 - -2.4 安装Volcano -^^^^^^^^^^ - -我们使用volcano作为训练阶段的批量任务管理工具。关于volcano的详细信息,请参考\ `官方网站 `_\ 的Documentation。 - -执行以下命令安装volcano到k8s集群: - -.. code-block:: bash - - $ kubectl apply -f https://raw.githubusercontent.com/volcano-sh/volcano/master/installer/volcano-development.yaml - - -.. image:: src/ctr_volcano_install.png - :target: src/ctr_volcano_install.png - :alt: image - - -3. :raw-html-m2r:`分布式训练+Serving方案一键部署` ---------------------------------- - -3.1 下载部署方案脚本文件 -^^^^^^^^^^^^ - -请将\ `本方案所需所有脚本文件 `_\ 下载到本地 - -3.2 一键部署 -^^^^^^^^^^^ - -执行以下脚本,一键将所有组件部署到k8s集群。 - -.. code-block:: bash - - $ bash paddle-suite.sh - -请参考\ **3.3-3.8节**\ 验证每一步的安装是否正确,\ **第4节**\ 验证训练过程和预测服务结果。 - -任务的所有脚本文件可以访问\ `这里 `_\ 获取。 - -**注**\ :以下\ **3.3-3.8节所述内容已经在一键部署脚本中包含,无需手动执行**\ 。但为方便理解,将该脚本的每一步执行过程给出说明。 - -3.3 选择一个node作为输出节点 -^^^^^^^^^^^^^^^^ - -.. code-block:: bash - - $ kubectl label nodes $NODE_NAME nodeType=model - -这句话的意思是给这个node做一个标记,之后的文件服务和模型产出都被强制分配在这个node上进行,把NAME的一串字符替换 \$NODE_NAME即可。 - -3.4 启动文件服务器 -^^^^^^^^^^^^^^ - -.. code-block:: bash - - $ kubectl apply -f fileserver.yaml - -运行file server的启动脚本kubectl apply -f ftp.yaml,启动文件服务器 - -验证:通过\ ``kubectl get pod``\ 命令查看是否file-server这个pod已经running,通过\ ``kubectl get service``\ 命令查看是否file-server service是否存在: - -.. code-block:: bash - - $ kubectl get pod - - -.. image:: src/file_server_pod.png - :target: src/file_server_pod.png - :alt: image - - -.. code-block:: - - $ kubectl get service - - -.. image:: src/file_server_svc.png - :target: src/file_server_svc.png - :alt: image - - -3.5 启动Cube稀疏参数服务器 -^^^^^^^^^^^^^^^^ - -.. code-block:: bash - - $ kubectl apply -f cube.yaml - -验证:通过\ ``kubectl get service``\ 命令查看是否cube-0和cube-1这2个service存在,则说明cube server/agent启动成功。 - -.. code-block:: - - $ kubectl get service - - -.. image:: src/cube.png - :target: src/cube.png - :alt: image - - -**注**\ :分片数量可根据稀疏字典大小灵活修改,参考5.3节。 - -3.6 启动Paddle Serving -^^^^^^^^^^^^^^^ - -.. code-block:: bash - - $ kubectl apply -f paddleserving.yaml - -验证:通过\ ``kubectl get pod``\ 查看serving pod是否running状态;通过\ ``kubectl get service``\ 查看paddleserving服务是否存在: - -.. code-block:: bash - - $ kubectl get pod - - -.. image:: src/paddleserving_pod.png - :target: src/paddleserving_pod.png - :alt: image - - -.. code-block:: bash - - $ kubectl get service - - -.. image:: src/paddleserving_svc.png - :target: src/paddleserving_svc.png - :alt: image - - -3.7 启动Cube稀疏参数服务器配送工具 -^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: bash - - $ kubectl apply -f transfer.yaml - -验证:通过\ ``kubectl get pod``\ 查看cube-transfer这个pod是否是running状态 - -.. code-block:: bash - - $ kubectl get pod - -这个cube-transfer配送工具会把训练好的模型从下面要介绍的edl-demo-trainer-0上通过file-server服务拉取到本地,经过cube-builder做格式转换,配送给各个分片cube-server,最终目的是给PaddleServing来进行稀疏参数查询。 - -**在训练任务结束前,cube-transfer会一直等待上游数据产出。直到检测到上游模型文件生成后,开始启动配送。可通过日志观察cube-transfer的工作状态:** - -.. code-block:: - - $ kubectl logs cube-transfer - -如果出现最后wait 5min这样的字样,说明上一轮的模型已经配送成功了,接下来就可以做最后PaddleServing的测试了。 - - -.. image:: src/transfer.png - :target: src/transfer.png - :alt: image - - -3.8 执行Paddle CTR分布式训练 -^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: bash - - $ kubectl apply -f ctr.yaml - -验证:通过\ ``kubectl get pod``\ 查看edl-demo-trainer-0/edl-demo-trainer-1, edl-demo-pserver-0/edl-demo-pserver-1/edl-demo-pserver-2, edl-demo-model-out-trainer-0等pod是否是running状态 - -.. code-block:: bash - - $ kubectl get pod - -我们可以通过\ ``kubectl logs edl-demo-trainer-0``\ 来查看训练的进度,如果pass一直为0就继续等待,通常需要大概3-5分钟的之间会完成第一轮pass,这时候就会生成inference_model。 - - -.. image:: src/ctr.png - :target: src/ctr.png - :alt: image - - -4. :raw-html-m2r:``\ 查看结果\ :raw-html-m2r:`` -------------------------------------------- - -4.1 查看训练日志 -^^^^^^^^^^^^^ - -百度云容器引擎CCE提供了web操作台方便查看pod的运行状态。 - -本次训练任务将启动3个pserver节点,3个trainer节点。 - -可以通过检查pserver和trainer的log来检查任务运行状态。 Trainer日志示例: - - -.. image:: src/ctr_trainer_log.png - :target: src/ctr_trainer_log.png - :alt: image - - -pserver日志示例: - - -.. image:: src/ctr_pserver_log.png - :target: src/ctr_pserver_log.png - :alt: image - - -4.2 验证Paddle Serving预测结果 -^^^^^^^^^^^^^^^^^^^ - -执行 - -.. code-block:: bash - - $ kubectl apply -f paddleclient.yaml - -用如下命令进入容器内,在/client/ctr_prediction目录下,启动CTR预估任务客户端,并通过日志查看预测结果 - -.. code-block:: bash - - # 进入容器 - $ kubectl exec -ti pdservingclient /bin/bash - - # 此命令在容器内执行 - $ bin/ctr_prediction - -如果运行正常的话,会在一段时间后退出,紧接着就可以在log/ctr_prediction.INFO的最后几行看到类似于这样的日志 - - -.. image:: src/paddleclient.png - :target: src/paddleclient.png - :alt: image - - -5. :raw-html-m2r:`二次开发指南` ------------------------------ - -5.1 指定数据集的输入和读取方式 -^^^^^^^^^^^^^^^^^^^ - -现有的数据的输入是从edldemo镜像当中的/workspace/ctr/data/download.sh目录进行下载。下载之后会解压在/workspace/ctr/data/raw文件夹当中,包含train.txt和test.txt。所有的数据的每一行通过空格隔开40个属性。 - -然后在train.py当中给出数据集的读取方式 - - -.. image:: src/pyreader.png - :target: src/pyreader.png - :alt: image - - -这里面包含了连续数据和离散数据。 连续数据是index [1,14),离散数据是index [14, 40),label是index 0,分别对应最后yield[dense_feature] + sparse_feature +[label]。当离散的数据和连续的数据格式和样例有不同,需要用户在这里进行指定,并且可以在__init__函数当中参考样例的写法对连续数据进行归一化。 - -对于数据的来源,文章给出的是download.sh从Criteo官方去下载数据集,然后解压后放在raw文件夹。 - -可以用HDFS/AFS或是其他方式来配送数据集,在启动项中加入相关命令。 - -在改动之后,记得保存相关的docker镜像并推送到云端 - -.. code-block:: bash - - $ docker commit ${DOCKER_CONTAINER_NAME} ${DOCKER_IMAGE_NAME} - $ docker push ${DOCKER_IMAGE_NAME} - -也可以在Dockerfile当中进行修改 - -.. code-block:: bash - - $ docker build -t ${DOCKER_IMAGE_NAME} . - $ docker push ${DOCKER_IMAGE_NAME} - -推荐使用百度云提供的镜像仓库,这里是说明文档\ `推送镜像到镜像仓库 `_\ - -5.2 指定训练规模 -^^^^^^^^^^^^^^ - -在ctr.yaml文件当中,我们会发现这个是在volcano的框架下定义的Job。在Job里面,我们给出了很多Pserver和Trainer的定义,在总体的Job也给出了MinAvailable数量的定义。Pserver和Trainer下面有自己的Replicas,环境变量当中有PSERVER_NUM和TRAINER_MODEL和TRAINER_NUM的数量。通常MinAvailable= PServer Num + Trainer Num,这样我们就可以启动相应的服务。 - - -.. image:: src/ctryaml1.png - :target: src/ctryaml1.png - :alt: image - - -如上图所示,我们需要在min_available处设置合理的数字。例如一个POD占用一个CPU,那么我们就要对集群的总CPU数有一个预估,不要过于接近或事超过集群CPU总和的上限。否则无法满足Volcano的Gang-Schedule机制,就会出现无法分配资源,一直处于Pending的情况。然后第二个红框当中是 - - -.. image:: src/ctryaml2.png - :target: src/ctryaml2.png - :alt: image - - -如上图所示,这个部分是用来专门做模型的输出,这里我们不需要做任何的改动,只要保留一个副本就可以。 - - -.. image:: src/ctryaml3.png - :target: src/ctryaml3.png - :alt: image - - -如上图所示 - -5.3 指定Cube参数服务器的分片数量和副本数量 -^^^^^^^^^^^^^^^^^^^^ - -在cube.yaml文件当中,我们可以看到每一个Cube的节点的定义,有一个\ ``cube server pod``\ 和\ ``cube server service``\ 。如果我们需要增加cube的副本数和分片数,只需要在yaml文件中复制相关的定义和环境变量即可。 - - -.. image:: src/cube_config1.png - :target: src/cube_config1.png - :alt: image - - - -.. image:: src/cube_config2.png - :target: src/cube_config2.png - :alt: image - - -以上两个图片,一个是对Cube POD的定义,一个是对CubeSERVICE的定义。如果需要扩展Cube分片数量,可以复制POD和SERVICE的定义,并重命名它们。示例程序给出的是2个分片,复制之后第3个可以命名为cube-2。 - -5.4 Serving适配新的模型 -^^^^^^^^^^^^^^^^^^^ - -在本示例中,我们如果按照5.1节的方式,修改了CTR模型训练脚本的feed数据格式,就需要相应修改Serving的代码,以适应新的feed样例字段数量和数据类型。 - -本部署方案中Paddle Serving的的预测服务和客户端代码分别为: - -服务端: https://github.com/PaddlePaddle/Serving/blob/develop/demo-serving/op/ctr_prediction_op.cpp - -客户端:https://github.com/PaddlePaddle/Serving/blob/develop/demo-client/src/ctr_prediction.cpp - -用户可在此基础上进行修改。 - -关于Paddle Serving的完整开发模式,可参考\ `Serving从零开始写一个预测服务 `_\ ,以及\ `Paddle Serving的其他文档 `_ - - -注释 ----------- - -注1. :raw-html-m2r:`Cube和Redis性能对比测试环境` ------------------------------------------------------------------------------------ - -Cube和Redis均在百度云环境上部署,测试时只测试单个Cube server和Redis server节点的性能。 - -client端和server端分别位于2台独立的云主机,机器间ping延时为0.3ms-0.5ms。 - -机器配置:Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz 32核 - -Cube测试环境 -^^^^^^^^^^^^ - -测试key 64bit整数,value为10个float (40字节) - -首先用本方案一键部署脚本部署完成。 - -用Paddle Serving的Cube客户端SDK,编写测试代码 - -基本原理,启动k个线程,每个线程访问M次Cube server,每次批量获取N个key,总时间加和求平均。 - -.. list-table:: - :header-rows: 1 - - * - 并发数 (压测线程数) - - batch size - - 平均响应时间 (us) - - total qps - * - 1 - - 1000 - - 1312 - - 762 - * - 4 - - 1000 - - 1496 - - 2674 - * - 8 - - 1000 - - 1585 - - 5047 - * - 16 - - 1000 - - 1866 - - 8574 - * - 24 - - 1000 - - 2236 - - 10733 - * - 32 - - 1000 - - 2602 - - 12298 - - -Redis测试环境 -^^^^^^^^^^^^^ - -测试key 1-1000000之间随机整数,value为40字节字符串 - -server端部署Redis-server (latest stable 5.0.6) - -client端为基于\ `redisplusplus `_\ 编写的客户端\ `get_values.cpp `_ - -基本原理:启动k个线程,每个线程访问M次Redis server,每次用mget批量获取N个key。总时间加和求平均。 - -调用方法: - -.. code-block:: bash - - $ ./get_values -h 192.168.1.1 -t 3 -r 10000 -b 1000 - -其中 --h server所在主机名 --t 并发线程数 --r 每线程请求次数 --b 每个mget请求的key个数 - -.. list-table:: - :header-rows: 1 - - * - 并发数 (压测线程数) - - batch size - - 平均响应时间 (us) - - total qps - * - 1 - - 1000 - - 1159 - - 862 - * - 4 - - 1000 - - 3537 - - 1079 - * - 8 - - 1000 - - 7726 - - 1073 - * - 16 - - 1000 - - 15440 - - 1034 - * - 24 - - 1000 - - 24279 - - 1004 - * - 32 - - 1000 - - 32570 - - 996 - - -测试结论 -^^^^^^^^ - -由于Redis高效的时间驱动模型和全内存操作,在单并发时,Redis平均响应时间比Cube少接近50% (1100us vs. 1680us) - -在扩展性方面,Redis受制于单线程模型,随并发数增加,响应时间加倍增加,而总吞吐在1000qps左右即不再上涨;而Cube则随着压测并发数增加,总的qps一直上涨,说明Cube能够较好处理并发请求,具有良好的扩展能力。 - -RocksDB在线程数较少的时候,平均响应时间和qps慢于Redis,但是在16以及更多线程的测试当中,RocksDB提供了更快的响应时间和更大的qps。 \ No newline at end of file diff --git a/doc/fluid/user_guides/tools/elastic_ctr/deploy_ctr_on_baidu_cloud_cn.md b/doc/fluid/user_guides/tools/elastic_ctr/deploy_ctr_on_baidu_cloud_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..15431e72c484d3bbc3538927ae163fd2aa4447a6 --- /dev/null +++ b/doc/fluid/user_guides/tools/elastic_ctr/deploy_ctr_on_baidu_cloud_cn.md @@ -0,0 +1,440 @@ +ELASTIC CTR +=================== + +——百度云分布式训练CTR预估任务和Serving流程一键部署 + + +* [1. 总体概览](#head1) +* [2. 前置需求](#head2) +* [3. 分布式训练+serving方案一键部署](#head3) +* [4. 查看结果](#head4) +* [5. 二次开发指南](#head5) + + +## 1. 总体概览 + +本项目提供了端到端的CTR训练和二次开发的解决方案,主要特点: + +- 整体方案在k8s环境一键部署,可快速搭建与验证效果 +- 基于Paddle transpiler模式的大规模分布式高速训练 +- 训练资源弹性伸缩 +- 工业级稀疏参数Serving组件,高并发条件下单位时间吞吐总量是redis的13倍 \[[注1](#annotation_1)\] + +本方案整体流程如下图所示: + +![image](elastic_ctr/overview.png) + +其中: + +- trainer/pserver: 训练环节采用PaddlePaddle parameter server模式,对应trainer和pserver角色。分布式训练使用[volcano](https://volcano.sh/)做批量任务管理工具 +- file server: 训练产出的模型文件,托管到File Server,供下游模块下载;训练产出的文件包括:ProgramDesc和模型参数,模型参数中最大的embedding由工具转换为seqfile格式,经过一系列流程配送到cube分布式稀疏参数服务,其余模型参数保持不变,配送到Paddle Serving模块 +- cube-transfer: 负责监控上游训练作业产出的模型文件(hadoop sequence file)变化,拉取到本地,并调用cube-builder构建cube字典文件;通知cube-agent节点拉取最新的字典文件,并维护各个cube-server上版本一致性 +- cube-builder: 负责将训练作业产出的模型文件(hadoop sequence file格式)转换成可以被cube-server加载的字典文件。字典文件具有特定的数据结构,针对尺寸和内存中访问做了高度优化 +- Cube-Server: 提供分片kv读写能力的服务节点 +- Cube-agent: 与cube-server同机部署,接收cube-transfer下发的字典文件更新命令,拉取数据到本地,通知cube-server进行更新 +- Paddle Serving: 加载CTR预估任务模型ProgramDesc和dense参数,提供预测服务 +- Client: CTR预估任务的demo客户端 + +以上组件串联完成从训练到预测部署的所有流程。本文档所提供的一键部署脚本[paddle-suite.sh](https://github.com/PaddlePaddle/Serving/blob/master/doc/resource/paddle-suite.sh)可一键部署上述所有组件。 + +用户可以参考本部署方案,将基于PaddlePaddle的分布式训练和Serving应用到业务环境,也可以在本方案基础上做功能增强和改进,直接使用。具体的,用户可以: + +- 指定数据集的输入和读取方式,来feed不同的数据集和数据集格式;相应的修改Serving代码以适应新模型 +- 指定训练的规模,包括参数服务器的数量和训练节点的数量 +- 指定Cube参数服务器的分片数量和副本数量 + +在本文第5节会详细解释以上二次开发的实际操作。 + +本文主要内容: + +**第2节 前置需求** 指导用户从零开始,在百度云上申请BCE集群,并部署volcano工具。本方案需使用[volcano](https://volcano.sh/)做训练环节批量任务管理工具,目前在百度云上验证通过 + +**第3节 分布式训练+serving方案部署** 使用paddle-suite.sh,一键部署分布式训练+serving完整流程;并详细解释脚本每一步的工作和含义 + +**第4节 查看结果** 根据各个pod输出,验证一键安装状态 + +**第5节 二次开发** 提出本一键部署方案可定制改善的部分,给出具体修改位置等 + +## 2. 前置需求 + +运行本方案前,需要用户已经搭建好k8s集群,并安装好volcano组件。k8s环境部署比较复杂,本文不涉及。百度智能云CCE容器引擎申请后即可使用,仅以百度云上创建k8s为例。 + +### 2.1 创建k8s集群 + +请参考 +[百度智能云CCE容器引擎帮助文档-创建集群](https://cloud.baidu.com/doc/CCE/GettingStarted/24.5C.E5.88.9B.E5.BB.BA.E9.9B.86.E7.BE.A4.html#.E6.93.8D.E4.BD.9C.E6.AD.A5.E9.AA.A4),在百度智能云上建立一个集群,节点配置需要满足如下要求 + +- CPU核数 \> 4 + +申请容器引擎示例: + +![image](elastic_ctr/ctr_node.png) + +创建完成后,即可参考[百度智能云CCE容器引擎帮助文档-查看集群](https://cloud.baidu.com/doc/CCE/GettingStarted.html#.E6.9F.A5.E7.9C.8B.E9.9B.86.E7.BE.A4),查看刚刚申请的集群信息。 + +### 2.2 如何操作集群 + +集群的操作可以通过百度云web或者通过kubectl工具进行,推荐用kubectl工具。 + +对于百度云k8s集群,客户端kubectl需要和百度云上kubernetes版本对应,请参考[百度智能云CCE容器引擎帮助文档-kubectl管理配置](https://cloud.baidu.com/doc/CCE/Developer-GettingStarted.html#.84.1C.DF.97.63.35.64.3B.1A.6E.7D.B1.E4.5B.E3.66)查看当前所用的kubernetes版本,并参考kubernetes官方文档下载对应版本的kubectrl版本进行安装。 + +\* 注意: 本操作指南给出的操作步骤都是基于linux操作环境的。 + +- 首先请参考[官方安装说明](https://kubernetes.io/docs/tasks/tools/install-kubectl/),安装和百度云kubernetes版本对应的的kubectl。 + +- 配置kubectl,下载集群凭证。在集群界面下载集群配置文件,放在kubectl的默认配置路径(请检查\~/.kube目录是否存在,若没有请创建) + +```bash +$ mv kubectl.conf ~/.kube/config +``` + +- 配置完成后,您即可以使用kubectl从本地计算机访问Kubernetes集群 + +```bash +$ kubectl get node +``` + +- 关于kubectl的其他信息,可以参考[Overview of kubectl](https://kubernetes.io/docs/reference/kubectl/overview/)。 + +### 2.3 设置访问权限 + +建立分布式任务需要pod间有API互相访问的权限,可以按如下步骤 + +```bash +$ kubectl create rolebinding default-view --clusterrole=view --serviceaccount=default:default --namespace=default +``` + +注意: --namespace 指定的default 为创建集群时候的名称 + +## 2.4 安装Volcano + +我们使用volcano作为训练阶段的批量任务管理工具。关于volcano的详细信息,请参考[官方网站](https://volcano.sh/)的Documentation。 + +执行以下命令安装volcano到k8s集群: + +```bash +$ kubectl apply -f https://raw.githubusercontent.com/volcano-sh/volcano/master/installer/volcano-development.yaml +``` + +![image](elastic_ctr/ctr_volcano_install.png) + + +## 3. 分布式训练+serving方案一键部署 + +### 3.1 下载部署方案脚本文件 + +请将[本方案所需所有脚本文件](https://github.com/PaddlePaddle/Serving/tree/master/doc/resource)下载到本地 + +### 3.2 一键部署 + +执行以下脚本,一键将所有组件部署到k8s集群。 + +```bash +$ bash paddle-suite.sh +``` + +请参考**3.3-3.8节**验证每一步的安装是否正确,**第4节**验证训练过程和预测服务结果。 + +**[注意!!!]**:以下**3.3-3.8节所述内容已经在一键部署脚本中包含,无需手动执行**。但为方便理解,将该脚本的每一步执行过程给出说明。 + +### 3.3 选择一个node作为输出节点 + +```bash +$ kubectl label nodes $NODE_NAME nodeType=model +``` + +这句话的意思是给这个node做一个标记,之后的文件服务和模型产出都被强制分配在这个node上进行,把NAME的一串字符替换 \$NODE\_NAME即可。 + +### 3.4 启动文件服务器 + +```bash +$ kubectl apply -f fileserver.yaml +``` + +运行file server的启动脚本kubectl apply -f ftp.yaml,启动文件服务器 + +验证:通过`kubectl get pod`命令查看是否file-server这个pod已经running,通过`kubectl get service`命令查看是否file-server service是否存在: +```bash +$ kubectl get pod +``` +![image](elastic_ctr/file_server_pod.png) + +``` +$ kubectl get service +``` + +![image](elastic_ctr/file_server_svc.png) + + +### 3.5 启动Cube稀疏参数服务器 + +```bash +$ kubectl apply -f cube.yaml +``` + +验证:通过`kubectl get service`命令查看是否cube-0和cube-1这2个service存在,则说明cube server/agent启动成功。 + +``` +$ kubectl get service +``` + +![image](elastic_ctr/cube.png) + +**注**:分片数量可根据稀疏字典大小灵活修改,参考5.3节。 + +### 3.6 启动Paddle Serving + +```bash +$ kubectl apply -f paddleserving.yaml +``` + +验证:通过`kubectl get pod`查看serving pod是否running状态;通过`kubectl get service`查看paddleserving服务是否存在: + +```bash +$ kubectl get pod +``` +![image](elastic_ctr/paddleserving_pod.png) + +```bash +$ kubectl get service +``` +![image](elastic_ctr/paddleserving_svc.png) + +### 3.7 启动Cube稀疏参数服务器配送工具 + +```bash +$ kubectl apply -f transfer.yaml +``` + +验证:通过`kubectl get pod`查看cube-transfer这个pod是否是running状态 + +```bash +$ kubectl get pod +``` + +这个cube-transfer配送工具会把训练好的模型从下面要介绍的edl-demo-trainer-0上通过file-server服务拉取到本地,经过cube-builder做格式转换,配送给各个分片cube-server,最终目的是给PaddleServing来进行稀疏参数查询。 + +**在训练任务结束前,cube-transfer会一直等待上游数据产出。直到检测到上游模型文件生成后,开始启动配送。可通过日志观察cube-transfer的工作状态:** + +``` +$ kubectl logs cube-transfer +``` + +如果出现最后wait 5min这样的字样,说明上一轮的模型已经配送成功了,接下来就可以做最后PaddleServing的测试了。 + +![image](elastic_ctr/transfer.png) + + +### 3.8 执行Paddle CTR分布式训练 + +```bash +$ kubectl apply -f ctr.yaml +``` +验证:通过`kubectl get pod`查看edl-demo-trainer-0/edl-demo-trainer-1, edl-demo-pserver-0/edl-demo-pserver-1/edl-demo-pserver-2, edl-demo-model-out-trainer-0等pod是否是running状态 + +```bash +$ kubectl get pod +``` + +我们可以通过`kubectl logs edl-demo-trainer-0`来查看训练的进度,如果pass一直为0就继续等待,通常需要大概3-5分钟的之间会完成第一轮pass,这时候就会生成inference\_model。 + +![image](elastic_ctr/ctr.png) + +## 4. 查看结果 + +### 4.1 查看训练日志 + +百度云容器引擎CCE提供了web操作台方便查看pod的运行状态。 + +本次训练任务将启动3个pserver节点,3个trainer节点。 + +可以通过检查pserver和trainer的log来检查任务运行状态。 Trainer日志示例: + +![image](elastic_ctr/ctr_trainer_log.png) + +pserver日志示例: + +![image](elastic_ctr/ctr_pserver_log.png) + +### 4.2 验证Paddle Serving预测结果 + +执行 + +```bash +$ kubectl apply -f paddleclient.yaml +``` + +用如下命令进入容器内,在/client/ctr\_prediction目录下,启动CTR预估任务客户端,并通过日志查看预测结果 + +```bash +# 进入容器 +$ kubectl exec -ti pdservingclient /bin/bash + +# 此命令在容器内执行 +$ bin/ctr_prediction +``` + +如果运行正常的话,会在一段时间后退出,紧接着就可以在log/ctr\_prediction.INFO的最后几行看到类似于这样的日志 + +![image](elastic_ctr/paddleclient.png) + +## 5. 二次开发指南 + +### 5.1 指定数据集的输入和读取方式 + +现有的数据的输入是从edldemo镜像当中的/workspace/ctr/data/download.sh目录进行下载。下载之后会解压在/workspace/ctr/data/raw文件夹当中,包含train.txt和test.txt。所有的数据的每一行通过空格隔开40个属性。 + +然后在train.py当中给出数据集的读取方式 + +![image](elastic_ctr/pyreader.png) + +这里面包含了连续数据和离散数据。 连续数据是index [1,14),离散数据是index [14, 40),label是index 0,分别对应最后yield[dense\_feature] + sparse\_feature +[label]。当离散的数据和连续的数据格式和样例有不同,需要用户在这里进行指定,并且可以在\_\_init\_\_函数当中参考样例的写法对连续数据进行归一化。 + +对于数据的来源,文章给出的是download.sh从Criteo官方去下载数据集,然后解压后放在raw文件夹。 + +可以用HDFS/AFS或是其他方式来配送数据集,在启动项中加入相关命令。 + +在改动之后,记得保存相关的docker镜像并推送到云端 + +```bash +$ docker commit ${DOCKER_CONTAINER_NAME} ${DOCKER_IMAGE_NAME} +$ docker push ${DOCKER_IMAGE_NAME} +``` + +也可以在Dockerfile当中进行修改 + +```bash +$ docker build -t ${DOCKER_IMAGE_NAME} . +$ docker push ${DOCKER_IMAGE_NAME} +``` + +推荐使用百度云提供的镜像仓库,这里是说明文档[推送镜像到镜像仓库](https://cloud.baidu.com/doc/CCE/s/Yjxppt74z/#%E6%8E%A8%E9%80%81%E9%95%9C%E5%83%8F%E5%88%B0%E9%95%9C%E5%83%8F%E4%BB%93%E5%BA%93) + +### 5.2 指定训练规模 + +在ctr.yaml文件当中,我们会发现这个是在volcano的框架下定义的Job。在Job里面,我们给出了很多Pserver和Trainer的定义,在总体的Job也给出了MinAvailable数量的定义。Pserver和Trainer下面有自己的Replicas,环境变量当中有PSERVER\_NUM和TRAINER\_MODEL和TRAINER\_NUM的数量。通常MinAvailable= PServer Num + Trainer Num,这样我们就可以启动相应的服务。 + +![image](elastic_ctr/ctryaml1.png) + +如上图所示,我们需要在min\_available处设置合理的数字。例如一个POD占用一个CPU,那么我们就要对集群的总CPU数有一个预估,不要过于接近或事超过集群CPU总和的上限。否则无法满足Volcano的Gang-Schedule机制,就会出现无法分配资源,一直处于Pending的情况。然后第二个红框当中是 + +![image](elastic_ctr/ctryaml2.png) + +如上图所示,这个部分是用来专门做模型的输出,这里我们不需要做任何的改动,只要保留一个副本就可以。 + +![image](elastic_ctr/ctryaml3.png) + +如上图所示 + +### 5.3 指定cube参数服务器的分片数量和副本数量 + +在cube.yaml文件当中,我们可以看到每一个cube的节点的定义,有一个`cubeserver pod`和`cube serverservice`。如果我们需要增加cube的副本数和分片数,只需要在yaml文件中复制相关的定义和环境变量即可。 + +![image](elastic_ctr/cube_config1.png) + +![image](elastic_ctr/cube_config2.png) + +以上两个图片,一个是对cube POD的定义,一个是对cubeSERVICE的定义。如果需要扩展Cube分片数量,可以复制POD和SERVICE的定义,并重命名它们。示例程序给出的是2个分片,复制之后第3个可以命名为cube-2。 + +### 5.4 Serving适配新的模型 + +在本示例中,我们如果按照5.1节的方式,修改了CTR模型训练脚本的feed数据格式,就需要相应修改Serving的代码,以适应新的feed样例字段数量和数据类型。 + +本部署方案中Paddle Serving的的预测服务和客户端代码分别为: + +服务端: https://github.com/PaddlePaddle/Serving/blob/develop/demo-serving/op/ctr_prediction_op.cpp + +客户端:https://github.com/PaddlePaddle/Serving/blob/develop/demo-client/src/ctr_prediction.cpp + +用户可在此基础上进行修改。 + + +关于Paddle Serving的完整开发模式,可参考[Paddle Serving文档](https://github.com/PaddlePaddle/Serving/tree/develop/doc) + +## 注释 + +## 注1. Cube和redis性能对比测试环境 + +Cube和Redis均在百度云环境上部署,测试时只测试单个cube server和redis server节点的性能。 + +client端和server端分别位于2台独立的云主机,机器间ping延时为0.3ms-0.5ms。 + +机器配置:Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz 32核 + + +### Cube测试环境 + +测试key 64bit整数,value为10个float (40字节) + +首先用本方案一键部署脚本部署完成。 + +用Paddle Serving的cube客户端SDK,编写测试代码 + +基本原理,启动k个线程,每个线程访问M次cube server,每次批量获取N个key,总时间加和求平均。 + +并发数 (压测线程数) | batch size | 平均响应时间 (us) | total qps +-------|------------|-------------|--------------------------- +1 | 1000 | 1312 | 762 +4 | 1000 | 1496 | 2674 +8 | 1000 | 1585 | 5047 +16 | 1000 | 1866 | 8574 +24 | 1000 | 2236 | 10733 +32 | 1000 | 2602 | 12298 + +### Redis测试环境 + +测试key 1-1000000之间随机整数,value为40字节字符串 + +server端部署redis-server (latest stable 5.0.6) + +client端为基于[redisplusplus](https://github.com/sewenew/redis-plus-plus)编写的客户端[get_values.cpp](https://github.com/PaddlePaddle/Serving/blob/master/doc/resource/get_value.cpp) + +基本原理:启动k个线程,每个线程访问M次redis server,每次用mget批量获取N个key。总时间加和求平均。 + +调用方法: + +```bash +$ ./get_values -h 192.168.1.1 -t 3 -r 10000 -b 1000 +``` + +其中 +\-h server所在主机名 +\-t 并发线程数 +\-r 每线程请求次数 +\-b 每个mget请求的key个数 + +并发数 (压测线程数) | batch size | 平均响应时间 (us) | total qps +-------|------------|-------------|--------------------------- +1 | 1000 | 1643 | 608 +4 | 1000 | 4878 | 819 +8 | 1000 | 9870 | 810 +16 | 1000 | 22177 | 721 +24 | 1000 | 30620 | 783 +32 | 1000 | 37668 | 849 + + +### RocksDB测试环境 + +测试key 1-1000000之间随机整数,value为40字节字符串 + +基本原理:启动k个线程,每个线程访问M次rocksDB,每次用mget批量获取N个key。总时间加和求平均。 + +并发数 (压测线程数) | batch size | 平均响应时间 (us) | total qps +-------|------------|-------------|--------------------------- +1 | 1000 | 11345 | 88 +4 | 1000 | 11210 | 357 +8 | 1000 | 11475 | 697 +16 | 1000 | 12822 | 1248 +24 | 1000 | 14220 | 1688 +32 | 1000 | 17256 | 1854 + + +### 测试结论 + +由于Redis高效的时间驱动模型和全内存操作,在单并发时,redis平均响应时间与cube相差不多% (1643us vs. 1312us) + +在扩展性方面,redis受制于单线程模型,随并发数增加,响应时间加倍增加,而总吞吐在1000qps左右即不再上涨;而cube则随着压测并发数增加,总的qps一直上涨,说明cube能够较好处理并发请求,具有良好的扩展能力。 + +RocksDB在线程数较少的时候,平均响应时间和qps慢于Redis,但是在16以及更多线程的测试当中,RocksDB提供了更快的响应时间和更大的qps。 + + diff --git a/doc/fluid/user_guides/tools/src/baidu_cloud/cluster-info.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/cluster-info.png similarity index 100% rename from doc/fluid/user_guides/tools/src/baidu_cloud/cluster-info.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/cluster-info.png diff --git a/doc/fluid/user_guides/tools/src/baidu_cloud/concole.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/concole.png similarity index 100% rename from doc/fluid/user_guides/tools/src/baidu_cloud/concole.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/concole.png diff --git a/doc/fluid/user_guides/tools/src/baidu_cloud/conf-download.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/conf-download.png similarity index 100% rename from doc/fluid/user_guides/tools/src/baidu_cloud/conf-download.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/conf-download.png diff --git a/doc/fluid/user_guides/tools/src/baidu_cloud/ctr-models.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/ctr-models.png similarity index 100% rename from doc/fluid/user_guides/tools/src/baidu_cloud/ctr-models.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/ctr-models.png diff --git a/doc/fluid/user_guides/tools/src/baidu_cloud/ctr-prediction-end-to-end-deployment.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/ctr-prediction-end-to-end-deployment.png similarity index 100% rename from doc/fluid/user_guides/tools/src/baidu_cloud/ctr-prediction-end-to-end-deployment.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/ctr-prediction-end-to-end-deployment.png diff --git a/doc/fluid/user_guides/tools/src/baidu_cloud/ctr-running.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/ctr-running.png similarity index 100% rename from doc/fluid/user_guides/tools/src/baidu_cloud/ctr-running.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/ctr-running.png diff --git a/doc/fluid/user_guides/tools/src/baidu_cloud/eip.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/eip.png similarity index 100% rename from doc/fluid/user_guides/tools/src/baidu_cloud/eip.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/eip.png diff --git a/doc/fluid/user_guides/tools/src/baidu_cloud/file_server.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/file_server.png similarity index 100% rename from doc/fluid/user_guides/tools/src/baidu_cloud/file_server.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/file_server.png diff --git a/doc/fluid/user_guides/tools/src/baidu_cloud/helm-version.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/helm-version.png similarity index 100% rename from doc/fluid/user_guides/tools/src/baidu_cloud/helm-version.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/helm-version.png diff --git a/doc/fluid/user_guides/tools/src/baidu_cloud/kubectl-version.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/kubectl-version.png similarity index 100% rename from doc/fluid/user_guides/tools/src/baidu_cloud/kubectl-version.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/kubectl-version.png diff --git a/doc/fluid/user_guides/tools/src/baidu_cloud/load_balancer.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/load_balancer.png similarity index 100% rename from doc/fluid/user_guides/tools/src/baidu_cloud/load_balancer.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/load_balancer.png diff --git a/doc/fluid/user_guides/tools/src/baidu_cloud/pserver-log.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/pserver-log.png similarity index 100% rename from doc/fluid/user_guides/tools/src/baidu_cloud/pserver-log.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/pserver-log.png diff --git a/doc/fluid/user_guides/tools/src/baidu_cloud/tiller.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/tiller.png similarity index 100% rename from doc/fluid/user_guides/tools/src/baidu_cloud/tiller.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/tiller.png diff --git a/doc/fluid/user_guides/tools/src/baidu_cloud/trainer-log.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/trainer-log.png similarity index 100% rename from doc/fluid/user_guides/tools/src/baidu_cloud/trainer-log.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/trainer-log.png diff --git a/doc/fluid/user_guides/tools/src/baidu_cloud/volcano.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/volcano.png similarity index 100% rename from doc/fluid/user_guides/tools/src/baidu_cloud/volcano.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/volcano.png diff --git a/doc/fluid/user_guides/tools/src/baidu_cloud/wget_example.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/wget_example.png similarity index 100% rename from doc/fluid/user_guides/tools/src/baidu_cloud/wget_example.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/wget_example.png diff --git a/doc/fluid/user_guides/tools/src/baidu_cloud/workload.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/workload.png similarity index 100% rename from doc/fluid/user_guides/tools/src/baidu_cloud/workload.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/workload.png diff --git a/doc/fluid/user_guides/tools/src/create_gpu_machine.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/create_gpu_machine.png similarity index 100% rename from doc/fluid/user_guides/tools/src/create_gpu_machine.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/create_gpu_machine.png diff --git a/doc/fluid/user_guides/tools/src/create_image.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/create_image.png similarity index 100% rename from doc/fluid/user_guides/tools/src/create_image.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/create_image.png diff --git a/doc/fluid/user_guides/tools/src/create_more_nodes.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/create_more_nodes.png similarity index 100% rename from doc/fluid/user_guides/tools/src/create_more_nodes.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/create_more_nodes.png diff --git a/doc/fluid/user_guides/tools/src/ctr.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/ctr.png similarity index 100% rename from doc/fluid/user_guides/tools/src/ctr.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/ctr.png diff --git a/doc/fluid/user_guides/tools/src/ctr_kubectl_download.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/ctr_kubectl_download.png similarity index 100% rename from doc/fluid/user_guides/tools/src/ctr_kubectl_download.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/ctr_kubectl_download.png diff --git a/doc/fluid/user_guides/tools/src/ctr_node.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/ctr_node.png similarity index 100% rename from doc/fluid/user_guides/tools/src/ctr_node.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/ctr_node.png diff --git a/doc/fluid/user_guides/tools/src/ctr_pods.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/ctr_pods.png similarity index 100% rename from doc/fluid/user_guides/tools/src/ctr_pods.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/ctr_pods.png diff --git a/doc/fluid/user_guides/tools/src/ctr_pserver_log.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/ctr_pserver_log.png similarity index 100% rename from doc/fluid/user_guides/tools/src/ctr_pserver_log.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/ctr_pserver_log.png diff --git a/doc/fluid/user_guides/tools/src/ctr_trainer_log.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/ctr_trainer_log.png similarity index 100% rename from doc/fluid/user_guides/tools/src/ctr_trainer_log.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/ctr_trainer_log.png diff --git a/doc/fluid/user_guides/tools/src/ctr_volcano_install.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/ctr_volcano_install.png similarity index 100% rename from doc/fluid/user_guides/tools/src/ctr_volcano_install.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/ctr_volcano_install.png diff --git a/doc/fluid/user_guides/tools/src/ctryaml1.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/ctryaml1.png similarity index 100% rename from doc/fluid/user_guides/tools/src/ctryaml1.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/ctryaml1.png diff --git a/doc/fluid/user_guides/tools/src/ctryaml2.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/ctryaml2.png similarity index 100% rename from doc/fluid/user_guides/tools/src/ctryaml2.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/ctryaml2.png diff --git a/doc/fluid/user_guides/tools/src/ctryaml3.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/ctryaml3.png similarity index 100% rename from doc/fluid/user_guides/tools/src/ctryaml3.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/ctryaml3.png diff --git a/doc/fluid/user_guides/tools/src/cube.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/cube.png similarity index 100% rename from doc/fluid/user_guides/tools/src/cube.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/cube.png diff --git a/doc/fluid/user_guides/tools/src/cube_config1.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/cube_config1.png similarity index 100% rename from doc/fluid/user_guides/tools/src/cube_config1.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/cube_config1.png diff --git a/doc/fluid/user_guides/tools/src/cube_config2.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/cube_config2.png similarity index 100% rename from doc/fluid/user_guides/tools/src/cube_config2.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/cube_config2.png diff --git a/doc/fluid/user_guides/tools/src/dist_train_demo.py b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_demo.py similarity index 100% rename from doc/fluid/user_guides/tools/src/dist_train_demo.py rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_demo.py diff --git a/doc/fluid/user_guides/tools/src/dist_train_nccl2.graffle b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_nccl2.graffle similarity index 100% rename from doc/fluid/user_guides/tools/src/dist_train_nccl2.graffle rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_nccl2.graffle diff --git a/doc/fluid/user_guides/tools/src/dist_train_nccl2.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_nccl2.png similarity index 100% rename from doc/fluid/user_guides/tools/src/dist_train_nccl2.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_nccl2.png diff --git a/doc/fluid/user_guides/tools/src/dist_train_pserver.graffle b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_pserver.graffle similarity index 100% rename from doc/fluid/user_guides/tools/src/dist_train_pserver.graffle rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_pserver.graffle diff --git a/doc/fluid/user_guides/tools/src/dist_train_pserver.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_pserver.png similarity index 100% rename from doc/fluid/user_guides/tools/src/dist_train_pserver.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_pserver.png diff --git a/doc/fluid/user_guides/tools/src/file_server_pod.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/file_server_pod.png similarity index 100% rename from doc/fluid/user_guides/tools/src/file_server_pod.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/file_server_pod.png diff --git a/doc/fluid/user_guides/tools/src/file_server_svc.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/file_server_svc.png similarity index 100% rename from doc/fluid/user_guides/tools/src/file_server_svc.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/file_server_svc.png diff --git a/doc/fluid/user_guides/tools/src/overview.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/overview.png similarity index 100% rename from doc/fluid/user_guides/tools/src/overview.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/overview.png diff --git a/doc/fluid/user_guides/tools/src/paddleclient.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/paddleclient.png similarity index 100% rename from doc/fluid/user_guides/tools/src/paddleclient.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/paddleclient.png diff --git a/doc/fluid/user_guides/tools/src/paddleserving_pod.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/paddleserving_pod.png similarity index 100% rename from doc/fluid/user_guides/tools/src/paddleserving_pod.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/paddleserving_pod.png diff --git a/doc/fluid/user_guides/tools/src/paddleserving_svc.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/paddleserving_svc.png similarity index 100% rename from doc/fluid/user_guides/tools/src/paddleserving_svc.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/paddleserving_svc.png diff --git a/doc/fluid/user_guides/tools/src/parallelism.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/parallelism.png similarity index 100% rename from doc/fluid/user_guides/tools/src/parallelism.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/parallelism.png diff --git a/doc/fluid/user_guides/tools/src/pyreader.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/pyreader.png similarity index 100% rename from doc/fluid/user_guides/tools/src/pyreader.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/pyreader.png diff --git a/doc/fluid/user_guides/tools/src/release.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/release.png similarity index 100% rename from doc/fluid/user_guides/tools/src/release.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/release.png diff --git a/doc/fluid/user_guides/tools/src/transfer.png b/doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/transfer.png similarity index 100% rename from doc/fluid/user_guides/tools/src/transfer.png rename to doc/fluid/user_guides/tools/elastic_ctr/elastic_ctr/transfer.png diff --git a/doc/fluid/user_guides/tools/elastic_ctr/index.cn.html b/doc/fluid/user_guides/tools/elastic_ctr/index.cn.html new file mode 100644 index 0000000000000000000000000000000000000000..37508f86c47683ca6e97b32424210631f5dc24cb --- /dev/null +++ b/doc/fluid/user_guides/tools/elastic_ctr/index.cn.html @@ -0,0 +1,539 @@ + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/fluid/user_guides/tools/index_cn.rst b/doc/fluid/user_guides/tools/index_cn.rst index 74cf0b44577228aebf929371476587411f7d75ad..b7ad3e355eea252609c156138af56df8b449b0ce 100644 --- a/doc/fluid/user_guides/tools/index_cn.rst +++ b/doc/fluid/user_guides/tools/index_cn.rst @@ -2,7 +2,13 @@ 工具组件 ################ +.. todo:: + +这里PaddlePaddle为大家提供了两篇案例文章:百度云分布式训练CTR预估任务和Serving流程一键部署的案例文章,以及飞桨大规模分类库使用的案例文章。 + + .. toctree:: :titlesonly: - deploy_ctr_on_baidu_cloud_cn.rst + elastic_ctr/deploy_ctr_on_baidu_cloud_cn.rst + plsc/plsc_guider_cn.rst diff --git a/doc/fluid/user_guides/tools/plsc/plsc_guider_cn.rst b/doc/fluid/user_guides/tools/plsc/plsc_guider_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f95cf8abf6e0d29586eba8740e557ee26deae7bb --- /dev/null +++ b/doc/fluid/user_guides/tools/plsc/plsc_guider_cn.rst @@ -0,0 +1,247 @@ + +飞桨大规模分类库简介 +=================== + +图像分类技术日趋成熟,ResNet网络在ImageNet数据集上的top5准确率已超过96%。然而,如何高效地完成百万类别甚至是更大规模的分类任务,则是一个极具挑战性的课题。 + +从多分类神经网络的实现角度分析,其最后一层通常是由全连接层和Softmax构成的组合层,全连接层输出结点数挂钩分类任务的类别数,所以对应的参数量随分类类别数的增长而线性增长。因此,当类别数非常大时,神经网络训练过程占用的显存空间也会很大,甚至是超出单张GPU卡的显存容量,导致神经网络模型无法训练。 + +以新闻推荐系统为例,假设要对百万类细分类别的新闻条目进行分类,那么仅存储全连接层参数就需要约2GB的显存空间(这里假设神经网络最后一层隐层的输出结点的维度为512,并假设以32比特浮点数表示数据)。再考虑神经网络训练过程中生成的数量庞多的中间变量,那么训练过程中需要的存储总量往往会超出单张GPU卡的显存容量。 + +该如何解决这个问题呢?常用的做法是“拆分”。考虑到全连接层的线性可分性,可以将全连接层参数切分到多张GPU卡,采用模型并行方案,减少每张GPU卡的参数存储量。 + +以下图为例,全连接层参数按行切分到不同的GPU卡上。每次训练迭代过程中,各张GPU卡分别以各自的训练数据计算隐层的输出特征(feature),并通过集合通信操作AllGather得到汇聚后的特征。接着,各张GPU卡以汇聚后的特征和部分全连接层参数计算部分logit值(partial logit),并基于此计算神经网络的损失值。 + + +.. image:: ./plsc_overview.png + :target: ./plsc_overview.png + :alt: plsc_overview + :width: 400px + + +这个方案可以有效解决全连接层参数量随分类类别数线性增长导致的显存空间不足的问题。然而,为了实现这一方案,开发者需要基于现有的深度学习平台设计和实现上例描述的所有操作,包括全连接层参数的切分和集合通信等,动辄需要数百行实现代码,大大增加了开发者的负担。 + +现在,开发者的福音来了,飞桨近期开源了基于核心框架构建的大规模分类库(PLSC: PaddlePaddle Large Scale Classification),为用户提供了大规模分类任务从训练到部署的全流程解决方案。只需数行代码,即可实现千万类别分类的神经网络。并且,通过PLSC库提供的serving功能用户可以快速部署模型,提供一站式服务。 + +简单易用,五行代码实现千万类别神经网络 +-------------------------------------- + +飞桨大规模分类库PLSC(以下简称PLSC)封装了大规模分类神经网络实现,提供简洁易用的高层API,用户通过五行代码即可实现千万类别分类神经网络。 + +安装飞桨 +^^^^^^^^ + +可以参考官网下载并安装飞桨: `飞桨安装文档 `_。 + + + +安装PLSC +^^^^^^^^ + +执行下面的命令安装PLSC。 + +.. code-block:: shell + + pip install plsc + +准备模型训练配置代码,保存为train.py文件 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +使用PLSC组建分类神经网络主要包括下面三个步骤: + + +#. + 从plsc包导入Entry类,Entry类封装PLSC所有API的接口类; + +#. + 实例化Entry类的对象; + +#. + 调用Entry类的train方法,开始训练过程。 + +默认情况下,该训练脚本使用的loss值计算方法为'dist_arcface',即将全连接层参数切分到多张GPU卡的模型并行方案,需要使用两张或以上的GPU卡。 + +.. code-block:: python + + from plsc import Entry + if __name__ == "main": + ins = Entry() + ins.set_class_num(1000000) #设置分类类别数 + ins.train() + +启动训练任务 +^^^^^^^^^^^^ + +可以使用下面的命令行启动训练任务,其中selected_gpus参数用于指定训练中使用的GPU卡。 + +.. code-block:: shell + + python -m paddle.distributed.launch \ + --selected_gpus=0,1,2,3,4,5,6,7 \ + train.py + +PLSC训练效果达到SOTA精度 +------------------------ + +PLSC库在多个数据集上可以取得SOTA的训练精度,下表列出PLSC库分别使用MS1M-ArcFace和CASIA数据集作为训练数据,在不同验证数据集上取得的精度。 + +.. list-table:: + :header-rows: 1 + + * - 模型 + - 训练集 + - lfw + - agendb_30 + - cfp_ff + - cfp_fp + - MegaFace (Id/Ver) + * - ResNet50 + - MS1M-ArcFace + - 0.99817 + - 0.99827 + - 0.99857 + - 0.96314 + - 0.980/0.993 + * - ResNet50 + - CASIA + - 0.98950 + - 0.90950 + - 0.99057 + - 0.91500 + - N/A + + +备注:上述模型训练使用的loss_type为'dist_arcface'。更多关于ArcFace的内容请参考 + +**ArcFace:** Additive Angular Margin Loss for Deep Face Recognition + +https://arxiv.org/abs/1801.07698 + +LSC支持多机分布式训练和千万规模分类 +----------------------------------- + +PLSC支持多机分布式训练。一方面,通过多机分布式训练可以将全连接层参数切分到更多的GPU卡,从而支持千万类别分类,并且飞桨大规模分类库理论上支持的分类类别数随着使用的GPU卡数的增加而增加。例如,单机8张V100 GPU配置下支持的最大分类类别数相比不使用PLSC扩大2.52倍。 + +另一方面,使用多机分布式训练可以有效提升训练速度。 + +通过下面几行命令即可启动多机分布式训练。其中,cluster_node_ips参数用于指定所有训练节点的ip地址列表,node_ip参数用于指定当前训练节点的ip地址。 + +.. code-block:: shel + + python -m paddle.distributed.launch \ + --cluster_node_ips="127.0.0.1,127.0.0.2" \ + --node_ip="127.0.0.1" \ + --selected_gpus=0,1,2,3,4,5,6,7 \ + train.py + +下图给出使用不同数量的节点时的训练速度(吞吐)。实验中使用的训练数据集为MS1M-ArcFace,分类类别数为85742,每个节点配备8张NVIDIA V100 GPUs,backbone模型为ResNet50。如图所示,使用飞桨大规模分类库可以取得近似线性的加速比。 + + +.. image:: ./plsc_performance.png + :target: ./plsc_performance.png + :alt: performance + + +PLSC提供从训练到部署的全流程解决方案 +------------------------------------ + +用户完成分类神经网络训练后,通常要基于得到的预训练模型部署预测服务。通过飞桨大规模分类库提供的serving功能可实现快速部署。 + +飞桨大规模分类库提供支持预测服务部署的serving端和client端。serving端基于飞桨服务器端部署库Paddle Serving开发,使用serving端功能可以基于预训练模型快速部署预测服务。client端则提供了和serving端的交互功能,用户通过client端提交查询请求并获取预测结果。只需三步即可完成部署。 + +安装serving端和client端 +^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: shell + + pip install plsc-serving ujson + +通过下面的脚本部署serving端 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: python + + from plsc_serving.run import PLSCServer + fs = PLSCServer() + # 设定使用的模型路径 + fs.with_model(model_path = '/XXX/XXX') + # gpu_index指定使用的gpu,port指定使用的端口 + fs.run(gpu_index = 0, port = 8010) + +通过下面的脚本使用client端功能 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: python + + from face_service import FaceService + with open('./data/00000000.jpg', 'rb') as f: + image = f.read() + fc = FaceService() + # 添加server端连接 + fc.connect('127.0.0.1:8010') + #调用server端预测 + result = fc.encode([image]) + print(result[0]) + fc.close() + +PLSC支持混合精度训练 +-------------------- + +单机8张Nvidia Tesla v100 GPU配置下,混合精度比常规单精度训练速度提升42%。 + +使用混合精度训练可以提升训练的速度,同时减少训练使用的显存开销。开启混合精度训练方法如下: + +.. code-block:: python + + from plsc import Entry + + def main(): + ins = Entry() + ins.set_mixed_precision(True) + ins.train() + if __name__ == "__main__": + main() + +在单机8张Nvidia Tesla v100 GPU配置下,对比resnet50模型单精度训练和混合精度训练的效果,混合精度训练速度可提升42%: + +.. list-table:: + :header-rows: 1 + + * - 模型 + - 单精度训练 + - 混合精度训练 + - 加速比 + * - ResNet50 + - 2567 images/s + - 3643 images/s + - 1.42 + + +关于混合精度训练的内容请参考: + +https://arxiv.org/abs/1710.03740 + +PLSC支持Base64格式图像数据预处理 +-------------------------------- + +实际业务中,一种常见的数据存储格式是将图像数据编码为base64格式,训练数据文件的每一行存储一张base64格式编码的图像数据和该图像的标签,并通常以制表符('\t')分隔图像数据和图像标签。 + +神经网络训练过程中,通常需要对训练数据做全局shuffle。此外,需要切分训练数据,确保每张GPU卡使用相同数量的训练数据。对Base64格式的数据做全局shuffle的开销较大,若在训练过程中执行全局shuffle,会严重影响训练速度。 + +飞桨大规模分类库内置Base64格式数据预处理工具,可以对训练数据做全局shuffle,并将训练数据均分到多个数据文件,确保数据文件的数量和训练中使用的GPU卡数相同,且每个数据文档包含相同数量的训练数据。训练效率显著提升。 + +PLSC支持fine-tuning训练时GPU卡数的动态调整 +------------------------------------------ + +我们有时需要基于预训练模型做fine-tuning这种场景下,fine-tuning阶段的训练GPU卡数和预训练阶段使用的GPU卡数可能不同,尤其是当预训练和fine-tuning是分别由不同的组织执行时。考虑全连接层参数是根据使用的GPU卡数切分的这一情形,当fine-tuning阶段和预训练阶段使用不同的GPU卡数时,在加载模型参数前,用户需要重构模型参数,以适应fine-tuning阶段的GPU卡数。为了简化用户操作,飞桨大规模分类库提供了自动化的模型参数重构功能。当fine-tuning阶段使用的GPU卡数和预训练阶段不同时,飞桨大规模分类库在加载预训练模型参数时会自动根据fine-tuning阶段使用的GPU卡数重构预训练模型参数,以适应fine-tuning阶段的GPU卡数。 + +PLSC助力百度AI口罩检测方案快速上线 +---------------------------------- + +面对疫情,百度近期攻克了戴口罩人脸识别技术难关,快速上线了AI口罩检测方案,并在地铁、园区、厂区等场所上线,高效保障防疫工作。 + +百度AI口罩检测方案采用百度最新的PyramidBox-lite检测算法,加入超过10万张口罩人脸训练数据。为了解决数百万ID数据训练问题,采用飞桨大规模分类库PLSC实现了快速训练。在准确率不变的情况下,召回率提升30%,佩戴口罩的人脸检测准确率超过99%。 + +更多飞桨PLSC的应用方法,欢迎访问飞桨PLSC项目地址: + +https://github.com/PaddlePaddle/PLSC diff --git a/doc/fluid/user_guides/tools/plsc/plsc_overview.png b/doc/fluid/user_guides/tools/plsc/plsc_overview.png new file mode 100644 index 0000000000000000000000000000000000000000..c16aab9609f32dc8d1357a0619ea40ae471fc47a Binary files /dev/null and b/doc/fluid/user_guides/tools/plsc/plsc_overview.png differ diff --git a/doc/fluid/user_guides/tools/plsc/plsc_performance.png b/doc/fluid/user_guides/tools/plsc/plsc_performance.png new file mode 100644 index 0000000000000000000000000000000000000000..a7b2ec22fe879347de8f7dc6413d554634d9d282 Binary files /dev/null and b/doc/fluid/user_guides/tools/plsc/plsc_performance.png differ diff --git a/doc/paddle/advanced_guide/addon_development/contribute-docs/tracing.jpeg b/doc/paddle/advanced_guide/addon_development/contribute-docs/tracing.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..3a49fc4f8a401a9463b0157e2f38c164ca02dcc5 Binary files /dev/null and b/doc/paddle/advanced_guide/addon_development/contribute-docs/tracing.jpeg differ diff --git a/doc/paddle/advanced_guide/addon_development/contribute-docs/write_docs_cn.md b/doc/paddle/advanced_guide/addon_development/contribute-docs/write_docs_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..a88838e6d94926e11fb0502ac3ec3d927a04a1e2 --- /dev/null +++ b/doc/paddle/advanced_guide/addon_development/contribute-docs/write_docs_cn.md @@ -0,0 +1,208 @@ +# 如何贡献文档 + +PaddlePaddle非常欢迎您贡献文档。如果您撰写/翻译的文档满足我们的要求,您的文档将会呈现在paddlapaddle.org网站和Github上供PaddlePaddle的用户阅读。 + +Paddle的文档主要分为以下几个模块: + +- 新手入门:包括安装说明、深度学习基础知识、学习资料等,旨在帮助用户快速安装和入门; + +- 使用指南:包括数据准备、网络配置、训练、Debug、预测部署和模型库文档,旨在为用户提供PaddlePaddle基本用法讲解; + +- 进阶使用:包括服务器端和移动端部署、如何贡献代码/文档、如何性能调优等,旨在满足开发者的需求; + +我们的文档支持[reStructured Text](http://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html)和[Markdown](https://guides.github.com/features/mastering-markdown/) (GitHub风格)格式的内容贡献。 + +撰写文档完成后,您可以使用预览工具查看文档在官网显示的效果,以验证您的文档是否能够在官网正确显示。 + + +## 如何使用预览工具 + +如果您正在修改代码文档(即API),并在Docker容器中使用PaddlePaddle,请在您相应的docker容器中执行下列步骤。因为API的文档生成器依赖于PaddlePaddle。 + +如果您只改进了文本/媒体内容(不需要安装或构建PaddlePaddle),或者正在主机上构建PaddlePaddle,请继续在主机上执行下列步骤。 + +### 1. Clone你希望更新或测试的相关仓库: + +首先下载完整的文档存储仓库,其中`--recurse-submodules`会同步更新FluidDoc中的submodule(所有的submodule均在`FluidDoc/external`中),以保证所有文档可以正常显示: + +``` +git clone --recurse-submodules https://github.com/PaddlePaddle/FluidDoc +``` + +其他可拉取的存储库有: + + +``` +git clone https://github.com/PaddlePaddle/book.git +git clone https://github.com/PaddlePaddle/models.git +git clone https://github.com/PaddlePaddle/Mobile.git + +``` + +您可以将这些本地副本放在电脑的任意目录下,稍后我们会在启动 PaddlePaddle.org时指定这些仓库的位置。 + +### 2. 在新目录下拉取 PaddlePaddle.org 并安装其依赖项 + +在此之前,请确认您的操作系统安装了python的依赖项 + +以ubuntu系统为例,运行: + +``` +sudo apt-get update && apt-get install -y python-dev build-essential +``` + +然后: + +``` +git clone https://github.com/PaddlePaddle/PaddlePaddle.org.git +cd PaddlePaddle.org/portal +``` + +之后需要安装依赖库,请确保在python 2.7.15 或2.7.16 环境下安装。推荐使用Anaconda或virtualenv创建合适的虚拟环境后安装依赖库。 + +安装依赖库: + +``` +pip install -r requirements.txt +``` + +**可选项**:如果你希望实现中英网站转换,以改善PaddlePaddle.org,请安装[GNU gettext](https://www.gnu.org/software/gettext/) + +### 3. 在本地运行 PaddlePaddle.org + +添加您希望加载和构建内容的目录列表(选项包括:--paddle,--book,--models,--mobile) + +运行: + +``` +./runserver --paddle +``` + +**注意:** ``为第一步中paddle副本在您本机的存储地址。 + +如果您需要处理依赖于`book`、`models`或`mobile`存储库内容的文档,您可以添加一个或多个可选项: + +``` +./runserver --paddle \ + --book /external/book \ + --models /external/models \ + --mobile /external/mobile +``` +然后:打开浏览器并导航到http://localhost:8000。 + +>*网站可能需要几秒钟才能成功加载,因为构建需要一定的时间* + +>*如果您是在docker环境下运行的这些步骤,请检查ip确保可以将端口8000映射到您的主机* + +## 贡献新文档或更新API + +所有内容都应该以[Markdown](https://guides.github.com/features/mastering-markdown/) (GitHub风格)的形式编写(尽管在文档中有一些使用.rst格式的遗留内容)。 + + +在完成安装步骤后,您还需要完成下列操作: + + - 在你开始写作之前,我们建议你回顾一下这些关于贡献内容的指南 + + --- + + **贡献新文档** + + + - 创建一个新的` .md` 文件或者在您当前操作的仓库中修改已存在的文章 + - 将新增的文档名,添加到对应的index文件中 + + --- + + **贡献或修改Python API** + + + 在编译代码的docker容器内,或主机的对应位置: + + - 运行脚本 `paddle/scripts/paddle_build.sh`(在 Paddle repo 下) + + ```bash + # 编译paddle的python库 + cd Paddle + ./paddle/scripts/paddle_docker_build.sh gen_doc_lib full + cd .. + ``` + + - 运行预览工具 + + ``` + # 在编译paddle的对应docker镜像中运行预览工具 + + docker run -it -v /Users/xxxx/workspace/paddlepaddle_workplace:/workplace -p 8000:8000 [images_id] /bin/bash + ``` + + > 其中`/Users/xxxx/workspace/paddlepaddle_workplace`请替换成您本机的paddle工作环境,`/workplace`请替换成您相应的 docker 下的工作环境,这一映射会保证我们同时完成编译python库、修改FluidDoc和使用预览工具。 + + > [images_id]为docker中您使用的paddlepaddle的镜像id。 + + - 设置环境变量 + + ``` + # 在docker环境中 + # 设置环境变量`PYTHONPATH`使预览工具可以找到 paddle 的 python 库 + export PYTHONPATH=/workplace/Paddle/build/python/ + ``` + + - 清理旧文件 + + ``` + # 清除历史生成的文件,如果是第一次使用预览工具可以跳过这一步 + rm -rf /workplace/FluidDoc/doc/fluid/menu.json /workplace/FluidDoc/doc/fluid/api/menu.json /tmp/docs/ /tmp/api/ + ``` + + - 启动预览工具 + + ``` + cd /workplace/PaddlePaddle.org/portal + pip install -r requirements.txt + ./runserver --paddle /workplace/FluidDoc/ + ``` + +--- + + **预览修改** + + + + 打开浏览器并导航到http://localhost:8000。 + + 在要更新的页面上,单击右上角的Refresh Content + + 进入使用文档单元后,API部分并不包含内容,希望预览API文档需要点击API目录,几分钟后您将看到生成的 API reference。 + + +## 提交修改 + +如果您希望修改代码,请在`Paddle`仓库下参考[如何贡献代码](../contribute_code/index_cn.html)执行操作。 + +如果您仅修改文档: + + - 修改的内容在`doc`文件夹内,您只需要在`FluidDoc`仓库下提交`PR` + + - 修改的内容在`external`文件夹内: + + 1.在您修改的仓库下提交PR。这是因为:`FluidDoc`仓库只是一个包装器,将其他仓库的链接(git术语的“submodule”)集合在了一起。 + + 2.当您的修改被认可后,更新FluidDoc中对应的`submodule`到源仓库最新的commit-id。 + + > 例如,您更新了book仓库中的develop分支下的文档: + + + > - 进入`FluidDoc/external/book`目录 + > - 更新 commit-id 到最新的提交:`git pull origin develop` + > - 在`FluidDoc`中提交你的修改 + + 3.在`FluidDoc`仓库下为您的修改提交PR + +提交修改与PR的步骤可以参考[如何贡献代码](../contribute_code/index_cn.html) + +## 帮助改进预览工具 + +我们非常欢迎您对平台和支持内容的各个方面做出贡献,以便更好地呈现这些内容。您可以Fork或Clone这个存储库,或者提出问题并提供反馈,以及在issues上提交bug信息。详细内容请参考[开发指南](https://github.com/PaddlePaddle/PaddlePaddle.org/blob/develop/DEVELOPING.md)。 + +## 版权和许可 +PaddlePaddle.org在Apache-2.0的许可下提供。 diff --git a/doc/paddle/advanced_guide/addon_development/contribute-docs/write_docs_en.md b/doc/paddle/advanced_guide/addon_development/contribute-docs/write_docs_en.md new file mode 100644 index 0000000000000000000000000000000000000000..98b9d61df5ce64263a9fd4d2d41af5bf0c8eb266 --- /dev/null +++ b/doc/paddle/advanced_guide/addon_development/contribute-docs/write_docs_en.md @@ -0,0 +1,209 @@ +# How to contribute documentation + +PaddlePaddle encourages you to contribute documentation. If your written or translated documents meet our requirements, your documents will be available on the paddlapaddle.org website and on Github for PaddlePaddle users. + +Paddle's documentation is mainly divided into the following modules: + +- Beginner's Guide: It includes installation instructions, basic knowledge of deep learning, learning materials, etc., designed to help users get started and inspired; + +- User Guides: It includes data preparation, network configuration, training, debug, predictive deployment, and model library to provide users with a tutorial of basic operations in PaddlePaddle; + +- Advanced User Guides: It includes server-side and mobile-end deployment, how to contribute code or documentation, how to optimize performance, etc., designed to meet the needs of developers; + +Our documentation supports contributions in format of [reStructured Text](http://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html) and [Markdown](https://guides.github.com/features/ Mastering-markdown/) (GitHub style) . + +Once the document is written, you can use the preview tool to check how the document appears on the official website to verify that your document is displayed correctly on the official website. + + +## How to use the preview tool + +If you are modifying a code document (i.e. API) and using PaddlePaddle in a Docker container, perform the following steps in your corresponding docker container. Because the API's document generator relies on PaddlePaddle. + +If you have only improved text or media content (you don't need to install or build PaddlePaddle), or are building PaddlePaddle on the host machine, please continue with the following steps on the host. + +### 1. Clone related repository that you wish to update or test: + +First download the full document repository, where `--recurse-submodules` will update the submodules in FluidDoc (all submodules are in `FluidDoc/external`) to ensure that all documents will be displayed properly: + +``` +git clone --recurse-submodules https://github.com/PaddlePaddle/FluidDoc +``` + +Other pullable repositories are: + + +``` +git clone https://github.com/PaddlePaddle/book.git +git clone https://github.com/PaddlePaddle/models.git +git clone https://github.com/PaddlePaddle/Mobile.git + +``` + +You can place these local copies in any directory on your computer, and we will specify the location of these repositories when we start PaddlePaddle.org. + +### 2. Pull PaddlePaddle.org in a new directory and install its dependencies + +Before doing this, please make sure your operating system has python dependencies installed. + +Take the ubuntu system as an example, run: + +``` +sudo apt-get update && apt-get install -y python-dev build-essential +``` + +Then: + +``` +git clone https://github.com/PaddlePaddle/PaddlePaddle.org.git +cd PaddlePaddle.org/portal +``` + +Then install requirements. Please make sure that you install with python 2.7.15 or 2.7.16. We recommend you to use Anaconda or virtualenv to create an appropriate virtual environment first. + +Install requirements: + + +``` +pip install -r requirements.txt +``` + +**Optional**: If you wish to implement a Chinese-English website conversion to improve PaddlePaddle.org, please install [GNU gettext](https://www.gnu.org/software/gettext/) + +### 3. Run PaddlePaddle.org locally + +Add a list of options which you want to load and build (options include: --paddle, --book, --models, --mobile) + +run: + +``` +./runserver --paddle +``` + +**Note:** `` is the directory of the local FluidDoc copy specified in the first step. + +If you need to work with documents that depend on the contents of the `book`, `models`, or `mobile` repositories, you can add one or more options: + +``` +./runserver --paddle \ + --book /external/book \ + --models /external/models \ + --mobile /external/mobile +``` +Then: open your browser and navigate to http://localhost:8000. + +>* The site may take a few seconds to load because the building takes a certain amount of time* + +>* If you are running these steps in a docker environment, please check ip to make sure port 8000 can be mapped to your host* + +## Contribute new documentation or update API + +All content should be written in [Markdown](https://guides.github.com/features/mastering-markdown/) (GitHub style) (although there are some legacy content with the .rst format in the documentation). + + +After completing the installation, you will also need to do: + + - Before you start writing, we suggest that you review the following tips for contributing content. + + --- + + **Contribute new documents** + + + - Create a new `.md` file or modify an existing article in the repository you are currently working on + - Add the new document name to the corresponding index file + + --- + + **Contribute or modify the Python API** + + + In the docker container that compiles the code, or the corresponding location in the host machine: + + - Run the script `paddle/scripts/paddle_build.sh` (under Paddle repo) + + ```bash + # Compile paddle's python library + cd Paddle + ./paddle/scripts/paddle_docker_build.sh gen_doc_lib full + cd .. + ``` + + - Run the preview tool + + ``` + # Run the preview tool in docker image which compiled paddle + + docker run -it -v /Users/xxxx/workspace/paddlepaddle_workplace:/workplace -p 8000:8000 [images_id] /bin/bash + ``` + + > Where `/Users/xxxx/workspace/paddlepaddle_workplace` should be replaced with your local host paddle workspace directory, `/workplace` should be replaced with the working directory in the docker. This mapping will ensure that we compile the python library, modify FluidDoc and use the preview tool at the same time. + + > [images_id] is the image id of the paddlepaddle you use in docker. + + - Set environment variables + + ``` + # In docker environment + # Set the environment variable `PYTHONPATH` so that the preview tool can find the python library for paddle + export PYTHONPATH=/workplace/Paddle/build/python/ + ``` + + - Clean up old files + + ``` + # Clear the previously generated file, if you are using the preview tool for the first time, you can skip this step + rm -rf /workplace/FluidDoc/doc/fluid/menu.json /workplace/FluidDoc/doc/fluid/api/menu.json /tmp/docs/ /tmp/api/ + ``` + + - Launch preview tool + + ``` + cd /workplace/PaddlePaddle.org/portal + pip install -r requirements.txt + ./runserver --paddle /workplace/FluidDoc/ + ``` + +--- + + **Preview modification** + + + + Open your browser and navigate to http://localhost:8000 . + + On the page to be updated, click Refresh Content at the top right corner. + + After entering documentation page, the API section does not contain content. To preview the API document, please click on the API directory and you will see the generated API reference after a few minutes. + + +## Submit changes + +If you wish to modify the code, please refer to [How to contribute code](../contribute_code/index_en.html) under the `Paddle` repository. + +If you just modify the document: + + - The modified content is in the `doc` folder, you only need to submit `PR` in the `FluidDoc` repository. + + - The modified content is in the `external` folder: + + 1. Submit the PR in the repostory you modified. This is because the `FluidDoc` repository is just a wrapper that brings together the links of other repositories (namely, the "submodules" in git context). + + 2. When your changes are approved, update the corresponding `submodule` in FluidDoc to the latest commit-id of the source repository. + + > For example, you updated the document on the develop branch in the book repository: + + + > - Go to the `FluidDoc/external/book` directory + > - Update commit-id to the latest commit: `git pull origin develop` + > - Commit your changes in `FluidDoc` + +3. Pull Request for your changes in the `FluidDoc` repository + +The steps to submit changes and PR can refer to [How to contribute code](../contribute_code/index_en.html) + +## Help improve preview tool + +We encourage your contributions to all aspects of the platform and supportive contents. You can Fork or Clone repository, ask questions and feedback, or submit bugs on issues. For details, please refer to the [Development Guide](https://github.com/PaddlePaddle/PaddlePaddle.org/blob/develop/DEVELOPING.md). + +## Copyright and Licensing +PaddlePaddle.org is available under the Apache-2.0 license. diff --git a/doc/paddle/advanced_guide/addon_development/contribute_code/faq.rst b/doc/paddle/advanced_guide/addon_development/contribute_code/faq.rst new file mode 100644 index 0000000000000000000000000000000000000000..1c98336d19832ee5bf3ad063b05cf987180923ad --- /dev/null +++ b/doc/paddle/advanced_guide/addon_development/contribute_code/faq.rst @@ -0,0 +1,42 @@ +.. _contribute_to_paddle_faq: + +################### +FAQ +################### + +.. contents:: + +1. CLA签署不成功,怎么办? +--------------------------- + +由于 `CLA `_ 是第三方开源库,有时候会不稳定。如果确定自己已签署CLA,但CLA没触发成功,可尝试: + +* 关闭并重新开启本PR,来重新触发CLA。点击 :code:`Close pull request` ,再点击 :code:`Reopen pull request` ,并等待几分钟。 +* 如果上述操作重复2次仍未生效,请重新提一个PR或评论区留言。 + +2. CI没有触发,怎么办? +------------------------ + +* 请在commit信息中添加正确的CI触发规则: + + * develop分支请添加 :code:`test=develop` + * release分支请添加如 :code:`test=release/1.4` 来触发release/1.4分支 + * 文档预览请添加 :code:`test=document_preview` + +* 该CI触发规则以commit为单位,即对同一个PR来说,不管前面的commit是否已经添加,如果新commit想继续触发CI,那么仍然需要添加。 +* 添加CI触发规则后,仍有部分CI没有触发:请关闭并重新开启本PR,来重新触发CI。 + + +3. CI随机挂,即错误信息与本PR无关,怎么办? +-------------------------------------- + +由于develop分支代码的不稳定性,CI可能会随机挂。 +如果确定CI错误和本PR无关,请在评论区贴上错误截图和错误链接。 + +4. 如何修改API.spec? +----------------------- + +为了保证API接口/文档的稳定性,我们对API进行了监控,即API.spec文件。 +修改方法请参考 `diff_api.py `_ 。 + +**注意**:提交PR后请查看下diff,不要改到非本PR修改的API上。 diff --git a/doc/paddle/advanced_guide/addon_development/contribute_code/img/cla_unsigned.png b/doc/paddle/advanced_guide/addon_development/contribute_code/img/cla_unsigned.png new file mode 100644 index 0000000000000000000000000000000000000000..7223f42f360e796e53aef025c48a3256db4e0fcf Binary files /dev/null and b/doc/paddle/advanced_guide/addon_development/contribute_code/img/cla_unsigned.png differ diff --git a/doc/paddle/advanced_guide/addon_development/contribute_code/img/sign_cla.png b/doc/paddle/advanced_guide/addon_development/contribute_code/img/sign_cla.png new file mode 100644 index 0000000000000000000000000000000000000000..6d0ed79474fbdd4dc58adf42ed6a18586a656eaa Binary files /dev/null and b/doc/paddle/advanced_guide/addon_development/contribute_code/img/sign_cla.png differ diff --git a/doc/paddle/advanced_guide/addon_development/contribute_code/index_cn.rst b/doc/paddle/advanced_guide/addon_development/contribute_code/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7d0ae291fa93cab2120677da9c14ab568e348de0 --- /dev/null +++ b/doc/paddle/advanced_guide/addon_development/contribute_code/index_cn.rst @@ -0,0 +1,10 @@ +############ +如何贡献代码 +############ + +.. toctree:: + :maxdepth: 1 + + local_dev_guide.md + submit_pr_guide.md + faq.rst diff --git a/doc/paddle/advanced_guide/addon_development/contribute_code/index_en.rst b/doc/paddle/advanced_guide/addon_development/contribute_code/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..aad872200f5d43199d3fda76b1a557851dbd7c08 --- /dev/null +++ b/doc/paddle/advanced_guide/addon_development/contribute_code/index_en.rst @@ -0,0 +1,9 @@ +################################# +How to contribute codes to Paddle +################################# + +.. toctree:: + :maxdepth: 1 + + local_dev_guide_en.md + submit_pr_guide_en.md diff --git a/doc/paddle/advanced_guide/addon_development/contribute_code/local_dev_guide.md b/doc/paddle/advanced_guide/addon_development/contribute_code/local_dev_guide.md new file mode 100644 index 0000000000000000000000000000000000000000..a381ebe8ad3cc5cf1fc9a16628fb09926b8749cd --- /dev/null +++ b/doc/paddle/advanced_guide/addon_development/contribute_code/local_dev_guide.md @@ -0,0 +1,156 @@ +# 本地开发指南 + +本文将指导您如何在本地进行代码开发 + +## 代码要求 +- 代码注释请遵守 [Doxygen](http://www.doxygen.nl/) 的样式。 +- 确保编译器选项 `WITH_STYLE_CHECK` 已打开,并且编译能通过代码样式检查。 +- 所有代码必须具有单元测试。 +- 通过所有单元测试。 +- 请遵守[提交代码的一些约定](#提交代码的一些约定)。 + +以下教程将指导您提交代码。 +## [Fork](https://help.github.com/articles/fork-a-repo/) + +跳转到[PaddlePaddle](https://github.com/PaddlePaddle/Paddle) GitHub首页,然后单击 `Fork` 按钮,生成自己目录下的仓库,比如 。 + +## 克隆(Clone) + +将远程仓库 clone 到本地: + +```bash +➜ git clone https://github.com/USERNAME/Paddle +➜ cd Paddle +``` + + +## 创建本地分支 + +Paddle 目前使用[Git流分支模型](http://nvie.com/posts/a-successful-git-branching-model/)进行开发,测试,发行和维护,具体请参考 [Paddle 分支规范](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/others/releasing_process.md)。 + +所有的 feature 和 bug fix 的开发工作都应该在一个新的分支上完成,一般从 `develop` 分支上创建新分支。 + +使用 `git checkout -b` 创建并切换到新分支。 + +```bash +➜ git checkout -b my-cool-stuff +``` + +值得注意的是,在 checkout 之前,需要保持当前分支目录 clean,否则会把 untracked 的文件也带到新分支上,这可以通过 `git status` 查看。 + +## 使用 `pre-commit` 钩子 + +Paddle 开发人员使用 [pre-commit](http://pre-commit.com/) 工具来管理 Git 预提交钩子。 它可以帮助我们格式化源代码(C++,Python),在提交(commit)前自动检查一些基本事宜(如每个文件只有一个 EOL,Git 中不要添加大文件等)。 + +`pre-commit`测试是 Travis-CI 中单元测试的一部分,不满足钩子的 PR 不能被提交到 Paddle,首先安装并在当前目录运行它: + +```bash +➜ pip install pre-commit +➜ pre-commit install +``` + +Paddle 使用 `clang-format` 来调整 C/C++ 源代码格式,请确保 `clang-format` 版本在 3.8 以上。 + +注:通过`pip install pre-commit`和`conda install -c conda-forge pre-commit`安装的`yapf`稍有不同的,Paddle 开发人员使用的是`pip install pre-commit`。 + +## 开始开发 + +在本例中,我删除了 README.md 中的一行,并创建了一个新文件。 + +通过 `git status` 查看当前状态,这会提示当前目录的一些变化,同时也可以通过 `git diff` 查看文件具体被修改的内容。 + +```bash +➜ git status +On branch test +Changes not staged for commit: + (use "git add ..." to update what will be committed) + (use "git checkout -- ..." to discard changes in working directory) + + modified: README.md + +Untracked files: + (use "git add ..." to include in what will be committed) + + test + +no changes added to commit (use "git add" and/or "git commit -a") +``` + +## 编译和单元测试 + +关于编译 PaddlePaddle 的源码,请参见[从源码编译](../../../install/compile/fromsource.html) 选择对应的操作系统。 +关于单元测试,可参考[Op单元测试](../new_op/new_op.html#id7) 的运行方法。 + +## 提交(commit) + +接下来我们取消对 README.md 文件的改变,然后提交新添加的 test 文件。 + +```bash +➜ git checkout -- README.md +➜ git status +On branch test +Untracked files: + (use "git add ..." to include in what will be committed) + + test + +nothing added to commit but untracked files present (use "git add" to track) +➜ git add test +``` + +Git 每次提交代码,都需要写提交说明,这可以让其他人知道这次提交做了哪些改变,这可以通过`git commit` 完成。 + +```bash +➜ git commit +CRLF end-lines remover...............................(no files to check)Skipped +yapf.................................................(no files to check)Skipped +Check for added large files..............................................Passed +Check for merge conflicts................................................Passed +Check for broken symlinks................................................Passed +Detect Private Key...................................(no files to check)Skipped +Fix End of Files.....................................(no files to check)Skipped +clang-formater.......................................(no files to check)Skipped +[my-cool-stuff c703c041] add test file + 1 file changed, 0 insertions(+), 0 deletions(-) + create mode 100644 233 +``` + + +## 保持本地仓库最新 + +在准备发起 Pull Request 之前,需要同步原仓库()最新的代码。 + +首先通过 `git remote` 查看当前远程仓库的名字。 + +```bash +➜ git remote +origin +➜ git remote -v +origin https://github.com/USERNAME/Paddle (fetch) +origin https://github.com/USERNAME/Paddle (push) +``` + +这里 origin 是我们 clone 的远程仓库的名字,也就是自己用户名下的 Paddle,接下来我们创建一个原始 Paddle 仓库的远程主机,命名为 upstream。 + +```bash +➜ git remote add upstream https://github.com/PaddlePaddle/Paddle +➜ git remote +origin +upstream +``` + +获取 upstream 的最新代码并更新当前分支。 + +```bash +➜ git fetch upstream +➜ git pull upstream develop +``` + +## Push 到远程仓库 + +将本地的修改推送到 GitHub 上,也就是 https://github.com/USERNAME/Paddle。 + +```bash +# 推送到远程仓库 origin 的 my-cool-stuff 分支上 +➜ git push origin my-cool-stuff +``` diff --git a/doc/paddle/advanced_guide/addon_development/contribute_code/local_dev_guide_en.md b/doc/paddle/advanced_guide/addon_development/contribute_code/local_dev_guide_en.md new file mode 100644 index 0000000000000000000000000000000000000000..52c04f2341a5cbb0da9cd7e4510b80657a7fd0ab --- /dev/null +++ b/doc/paddle/advanced_guide/addon_development/contribute_code/local_dev_guide_en.md @@ -0,0 +1,154 @@ +# Guide of local development + +You will learn how to develop programs in local environment under the guidelines of this document. + +## Requirements of coding +- Please refer to the coding comment format of [Doxygen](http://www.doxygen.nl/) +- Make sure that option of builder `WITH_STYLE_CHECK` is on and the build could pass through the code style check. +- Unit test is needed for all codes. +- Pass through all unit tests. +- Please follow [regulations of submitting codes](#regulations of submitting codes). + +The following guidiance tells you how to submit code. +## [Fork](https://help.github.com/articles/fork-a-repo/) + +Transfer to the home page of Github [PaddlePaddle](https://github.com/PaddlePaddle/Paddle) ,and then click button `Fork` to generate the git under your own file directory,such as 。 + +## Clone + +Clone remote git to local: + +```bash +➜ git clone https://github.com/USERNAME/Paddle +➜ cd Paddle +``` + + +## Create local branch + +At present [Git stream branch model](http://nvie.com/posts/a-successful-git-branching-model/) is applied to Paddle to undergo task of development,test,release and maintenance.Please refer to [branch regulation of Paddle](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/others/releasing_process.md) about details。 + +All development tasks of feature and bug fix should be finished in a new branch which is extended from `develop` branch. + +Create and switch to a new branch with command `git checkout -b`. + + +```bash +➜ git checkout -b my-cool-stuff +``` + +It is worth noting that before the checkout, you need to keep the current branch directory clean, otherwise the untracked file will be brought to the new branch, which can be viewed by `git status` . + + +## Use `pre-commit` hook + +Paddle developers use the [pre-commit](http://pre-commit.com/) tool to manage Git pre-commit hooks. It helps us format the source code (C++, Python) and automatically check some basic things before committing (such as having only one EOL per file, not adding large files in Git, etc.). + +The `pre-commit` test is part of the unit test in Travis-CI. A PR that does not satisfy the hook cannot be submitted to Paddle. Install `pre-commit` first and then run it in current directory: + + +```bash +➜ pip install pre-commit +➜ pre-commit install +``` + +Paddle modify the format of C/C++ source code with `clang-format` .Make sure the version of `clang-format` is above 3.8. + +Note:There are differences between the installation of `yapf` with `pip install pre-commit` and that with `conda install -c conda-forge pre-commit` . Paddle developers use `pip install pre-commit` 。 + +## Start development + +I delete a line of README.md and create a new file in the case. + +View the current state via `git status` , which will prompt some changes to the current directory, and you can also view the file's specific changes via `git diff` . + + +```bash +➜ git status +On branch test +Changes not staged for commit: + (use "git add ..." to update what will be committed) + (use "git checkout -- ..." to discard changes in working directory) + modified: README.md +Untracked files: + (use "git add ..." to include in what will be committed) + test +no changes added to commit (use "git add" and/or "git commit -a") +``` + +## Build and test + +Please refer to [Compile From Source Code](../../../install/compile/fromsource_en.html) about more information of building PaddlePaddle source codes. +Please refer to [Op Unit Tests](../new_op/new_op_en.html#unit-tests) about more information of running unit tests. + +## Commit + +Next we cancel the modification of README.md,and submit new added test file. + +```bash +➜ git checkout -- README.md +➜ git status +On branch test +Untracked files: + (use "git add ..." to include in what will be committed) + test +nothing added to commit but untracked files present (use "git add" to track) +➜ git add test +``` + +It's required that the commit message is also given on every Git commit, through which other developers will be notified of what changes have been made. Type `git commit` to realize it. + +```bash +➜ git commit +CRLF end-lines remover...............................(no files to check)Skipped +yapf.................................................(no files to check)Skipped +Check for added large files..............................................Passed +Check for merge conflicts................................................Passed +Check for broken symlinks................................................Passed +Detect Private Key...................................(no files to check)Skipped +Fix End of Files.....................................(no files to check)Skipped +clang-formater.......................................(no files to check)Skipped +[my-cool-stuff c703c041] add test file + 1 file changed, 0 insertions(+), 0 deletions(-) + create mode 100644 233 +``` + + +## Keep the latest local repository + +It needs to keep up with the latest code of original repository()before Pull Request. + +Check the name of current remote repository with `git remote`. + +```bash +➜ git remote +origin +➜ git remote -v +origin https://github.com/USERNAME/Paddle (fetch) +origin https://github.com/USERNAME/Paddle (push) +``` + +origin is the name of remote repository that we clone,which is also the Paddle under your own account. Next we create a remote host of an original Paddle and name it upstream. + +```bash +➜ git remote add upstream https://github.com/PaddlePaddle/Paddle +➜ git remote +origin +upstream +``` + +Get the latest code of upstream and update current branch. + +```bash +➜ git fetch upstream +➜ git pull upstream develop +``` + +## Push to remote repository + +Push local modification to GitHub(https://github.com/USERNAME/Paddle). + +```bash +# submit it to remote git the branch my-cool-stuff of origin +➜ git push origin my-cool-stuff +``` diff --git a/doc/paddle/advanced_guide/addon_development/contribute_code/submit_pr_guide.md b/doc/paddle/advanced_guide/addon_development/contribute_code/submit_pr_guide.md new file mode 100644 index 0000000000000000000000000000000000000000..06cb8c07caa63adbe3198d69c3727b2a0ba2ba11 --- /dev/null +++ b/doc/paddle/advanced_guide/addon_development/contribute_code/submit_pr_guide.md @@ -0,0 +1,111 @@ +# 提交PR注意事项 + +## 建立 Issue 并完成 Pull Request + +建立一个 Issue 描述问题,并记录它的编号。 + +切换到所建分支,然后点击 `New pull request`。 + +screen shot 2017-04-26 at 9 09 28 pm + +选择目标分支: + +screen shot 2017-04-26 at 9 11 52 pm + +在 PR 的描述说明中,填写 `resolve #Issue编号` 可以在这个 PR 被 merge 后,自动关闭对应的 Issue,具体请见[这里](https://help.github.com/articles/closing-issues-via-commit-messages/)。 + +接下来等待 review,如果有需要修改的地方,参照上述步骤更新 origin 中的对应分支即可。 + +## 签署CLA协议和通过单元测试 + +### 签署CLA + +在首次向PaddlePaddle提交Pull Request时,您需要您签署一次CLA(Contributor License Agreement)协议,以保证您的代码可以被合入,具体签署方式如下: + +- 请您查看PR中的Check部分,找到license/cla,并点击右侧detail,进入CLA网站 + +
+ + + +
+ +- 请您点击CLA网站中的“Sign in with GitHub to agree”,点击完成后将会跳转回您的Pull Request页面 + +
+ + + +
+ + +### 通过单元测试 + +您在Pull Request中每提交一次新的commit后,会触发CI单元测试,请确认您的commit message中已加入必要的说明,请见[提交(commit)](local_dev_guide.html#permalink-8--commit-) + +请您关注您Pull Request中的CI单元测试进程,它将会在几个小时内完成 + +您仅需要关注和自己提交的分支相关的CI项目,例如您向develop分支提交代码,则无需关注release/1.1一栏是否通过测试 + +当所需的测试后都出现了绿色的对勾,表示您本次commit通过了各项单元测试 + +如果所需的测试后出现了红色叉号,代表您本次的commit未通过某项单元测试,在这种情况下,请您点击detail查看报错详情,并将报错原因截图,以评论的方式添加在您的Pull Request中,我们的工作人员将帮您查看 + + +## 删除远程分支 + +在 PR 被 merge 进主仓库后,我们可以在 PR 的页面删除远程仓库的分支。 + +screen shot 2017-04-26 at 9 18 24 pm + +也可以使用 `git push origin :分支名` 删除远程分支,如: + +```bash +➜ git push origin :my-cool-stuff +``` + +## 删除本地分支 + +最后,删除本地分支。 + +```bash +# 切换到 develop 分支 +➜ git checkout develop + +# 删除 my-cool-stuff 分支 +➜ git branch -D my-cool-stuff +``` + +至此,我们就完成了一次代码贡献的过程。 + +## 提交代码的一些约定 + +为了使评审人在评审代码时更好地专注于代码本身,请您每次提交代码时,遵守以下约定: + +1)请保证Travis-CI 中单元测试能顺利通过。如果没过,说明提交的代码存在问题,评审人一般不做评审。 + +2)提交PUll Request前: + +- 请注意commit的数量: + +原因:如果仅仅修改一个文件但提交了十几个commit,每个commit只做了少量的修改,这会给评审人带来很大困扰。评审人需要逐一查看每个commit才能知道做了哪些修改,且不排除commit之间的修改存在相互覆盖的情况。 + +建议:每次提交时,保持尽量少的commit,可以通过`git commit --amend`补充上次的commit。对已经Push到远程仓库的多个commit,可以参考[squash commits after push](http://stackoverflow.com/questions/5667884/how-to-squash-commits-in-git-after-they-have-been-pushed)。 + +- 请注意每个commit的名称:应能反映当前commit的内容,不能太随意。 + +3)如果解决了某个Issue的问题,请在该PUll Request的**第一个**评论框中加上:`fix #issue_number`,这样当该PUll Request被合并后,会自动关闭对应的Issue。关键词包括:close, closes, closed, fix, fixes, fixed, resolve, resolves, resolved,请选择合适的词汇。详细可参考[Closing issues via commit messages](https://help.github.com/articles/closing-issues-via-commit-messages)。 + +此外,在回复评审人意见时,请您遵守以下约定: + +1)评审人的每个意见都必须回复(这是开源社区的基本礼貌,别人帮了忙,应该说谢谢): + + - 对评审意见同意且按其修改完的,给个简单的`Done`即可; + + - 对评审意见不同意的,请给出您自己的反驳理由。 + +2)如果评审意见比较多: + + - 请给出总体的修改情况。 + + - 请采用[start a review](https://help.github.com/articles/reviewing-proposed-changes-in-a-pull-request/)进行回复,而非直接回复的方式。原因是每个回复都会发送一封邮件,会造成邮件灾难。 diff --git a/doc/paddle/advanced_guide/addon_development/contribute_code/submit_pr_guide_en.md b/doc/paddle/advanced_guide/addon_development/contribute_code/submit_pr_guide_en.md new file mode 100644 index 0000000000000000000000000000000000000000..ee28c9f91eb62e710f58342044277a45898ee7eb --- /dev/null +++ b/doc/paddle/advanced_guide/addon_development/contribute_code/submit_pr_guide_en.md @@ -0,0 +1,110 @@ +# Guide of submitting PR to Github + +## Create an Issue and finish Pull Request + +Create an Issue to describe your problem and keep its number. + +Switch to the branch you have created and click `New pull request`。 + +screen shot 2017-04-26 at 9 09 28 pm + +Switch to targeted branch: + +screen shot 2017-04-26 at 9 11 52 pm + +A note of `resolve #Issue number` in PR description results in automatic close of corresponding Issue after the merge of PR.More details can be viewed [here](https://help.github.com/articles/closing-issues-via-commit-messages/)。 + +Then please wait for review.If there is any need to make a modification,you can update corresponding branch in origin following the steps above. + +## Sign CLA and pass unit tests + +### Sign CLA + +For the first time to submit Pull Request,you need to sign CLA(Contributor License Agreement) to ensure merge of your code.Specific steps are listed as follows: + +- Please check the Check in PR to find license/cla and click detail on the right to change into CLA website. + +
+ + + +
+ +- Please click “Sign in with GitHub to agree” in CLA website.It will change into your Pull Request page after the click. + +
+ + + +
+ + +### Pass unit tests + +Every new commit in your Pull Request will trigger CI unit tests,so please make sure that necessary comments have been included in your commit message.Please refer to [commit](local_dev_guide.html#permalink-8--commit-) + +Please note the procedure of CI unit tests in your Pull Request which will be finished in several hours. + +You only need to focus on CI projects associated with your submitted branch.For example,there is no need to check whether release/1.1 pass test or not if you submit code to develop branch. + +Green ticks after all tests means that your commit has passed all unit tests. + +Red cross after the tests means your commit hasn't passed certain unit test.Please click detail to view bug details and make a screenshot of bug,then add it as a comment in your Pull Request.Our stuff will help you check it. + + +## Delete remote branch + +We can delete branches of remote repository in PR page after your PR is successfully merged into master repository. + +screen shot 2017-04-26 at 9 18 24 pm + +We can also delete the branch of remote repository with `git push origin :the_branch_name`,such as: + +```bash +➜ git push origin :my-cool-stuff +``` + +## Delete local branch + +Finally,we delete local branch + +```bash +# Switch to develop branch +➜ git checkout develop +# delete my-cool-stuff branch +➜ git branch -D my-cool-stuff +``` + +And now we finish a full process of code contribution + +## Certain regulations about submitting code + +In order that reviewers focus on code in the code review,please follow these rules every time you submit your code: + +1)Make sure that unit tests in Travis-CI pass through successfully.If it fails,it means problems have been found in submitted code which will not be reviewed by reviewer. + +2)Before the submit of PUll Request: + +- Please note the number of commit: + +Reason:It will bother reviewers a lot if a dozen of commits are submitted after modification of only one file and only a few modifications are updated in every commit.Reviewers have to check commit one by one to figure out the modification.And sometimes it needs to take the overlap among commits into consideration. + +Suggestion:Keep commit concise as much as possible at every submit.You can make a supplyment to the previous commit with `git commit --amend`.About several commits having been pushed to remote repository,you can refer to [squash commits after push](http://stackoverflow.com/questions/5667884/how-to-squash-commits-in-git-after-they-have-been-pushed)。 + +- Pay attention to the name of every commit:It would be better to abstract the content of present commit and be not too arbitrary. + +3)If you have tackled with problems of an Issue,please add `fix #issue_number` to the *first* comment area of PULL Request.Then the corresponding Issue will be closed automatically after the merge of PULL Request.Keywords are including:close, closes, closed, fix, fixes, fixed, resolve, resolves, resolved.Please select appropriate word.Please refer to [Closing issues via commit messages](https://help.github.com/articles/closing-issues-via-commit-messages) for more details. + +In addition,please follow the following regulations in response to the suggestion of reviewers: + +1)A reply to every comment of reviewers(It's a fundamental complimentary conduct in open source community.An expression of appreciation is a need for help from others): + + - If you adopt the suggestion of reviewer and make a modification accordingly, it's courteous to reply with a simple `Done` . + + - Please clarify your reason to the disagreenment + +2)If there are many suggestions + + - Please show general modification + + - Please follow [start a review](https://help.github.com/articles/reviewing-proposed-changes-in-a-pull-request/) to give your reply,instead of directly replying for that every comment will result in sending an email causing email disaster. diff --git a/doc/paddle/advanced_guide/addon_development/design_idea/fluid_design_idea.md b/doc/paddle/advanced_guide/addon_development/design_idea/fluid_design_idea.md new file mode 100644 index 0000000000000000000000000000000000000000..d6ef41bb31335dcea999bf8b37e45be2b9b86e13 --- /dev/null +++ b/doc/paddle/advanced_guide/addon_development/design_idea/fluid_design_idea.md @@ -0,0 +1,364 @@ +# 设计思想 + +## 简介 + +本篇文档主要介绍飞桨(PaddlePaddle,以下简称Paddle)底层的设计思想,帮助用户更好的理解框架运作过程。 + +阅读本文档,您将了解: + +- Paddle 内部的执行流程 +- Program 如何描述模型 +- Executor 如何执行运算 + + +## 1. Paddle内部执行流程 + +Paddle使用一种编译器式的执行流程,分为编译时和运行时两个部分,具体包括:编译器定义 Program ,创建Executor 运行 Program 。 + +本地训练任务执行流程图如下所示: +

+ +

+ + 1. 编译时,用户编写一段python程序,通过调用 Paddle 提供的算子,向一段 Program 中添加变量(Tensor)以及对变量的操作(Operators 或者 Layers)。用户只需要描述核心的前向计算,不需要关心反向计算、分布式下以及异构设备下如何计算。 + + 2. 原始的 Program 在框架内部转换为中间描述语言: `ProgramDesc`。 + + 3. `Transpiler` 接受一段 `ProgramDesc` ,输出一段变化后的 `ProgramDesc` ,作为后端 `Executor` 最终需要执行的 Program 。 `Transpiler` 并非必需步骤。 + + 4. 执行 `ProgramDesc` 中定义的 Operator(可以类比为程序语言中的指令),在执行过程中会为 Operator 创建所需的输入输出并进行管理。 + + +## 2. Program设计思想 + +用户完成网络定义后,一段 Paddle 程序中通常存在 2 个 Program: + + 1. fluid.default_startup_program:定义了模型参数初始化、优化器参数初始化、reader初始化等各种操作。 + + default_startup_program 可以由框架自动生成,使用时无需显式地创建 + + 如果调用修改了参数的默认初始化方式,框架会自动的将相关的修改加入default_startup_program + + 2. fluid.default_main_program :定义了神经网络模型,前向反向计算,以及模型参数更新、优化器参数更新等各种操作。 + + 使用Paddle的核心就是构建起 default_main_program + + + +### Programs and Blocks +Paddle 的 Program 的基本结构是一些嵌套 blocks,形式上类似一段 C++ 或 Java 程序。 + +blocks中包含: + +- 本地变量的定义 +- 一系列的operator + +block的概念与通用程序一致,例如在下列这段C++代码中包含三个block: + +``` cpp +#include + +int main() { + int x = 5; // block 0 + int y = 4; // block 0 + int out; // block 0 + + if (x < y) { // block 0 + out = 1; // block 1 + } else { + out = 0; // block 2 + } + + std::cout << out << std::endl; + return 0; +} +``` + +类似的,在下列 Paddle 的 Program 包含3段block: + +```python +import paddle.fluid as fluid + +x = fluid.data(name='x', shape=[1], dtype='int64') # block 0 +y = fluid.data(name='y', shape=[1], dtype='int64') # block 0 + +def true_block(): + return fluid.layers.fill_constant(dtype='int64', value=1, shape=[1]) # block 1 + +def false_block(): + return fluid.layers.fill_constant(dtype='int64', value=0, shape=[1]) # block 2 + +condition = fluid.layers.less_than(x, y) # block 0 + +out = fluid.layers.cond(condition, true_block, false_block) # block 0 +``` +### BlockDesc and ProgramDesc + +用户描述的block与program信息在Paddle中以[protobuf](https://en.wikipedia.org/wiki/Protocol_Buffers) 格式保存,所有的`protobuf`信息被定义在`framework.proto`中,在Paddle中被称为BlockDesc和ProgramDesc。ProgramDesc和BlockDesc的概念类似于一个[抽象语法树](https://en.wikipedia.org/wiki/Abstract_syntax_tree)。 + +`BlockDesc`中包含本地变量的定义 [vars](../../api_guides/low_level/program.html#variable),和一系列的operator`ops`: + +```cpp +message BlockDesc { + required int32 idx = 1; + required int32 parent_idx = 2; + repeated VarDesc vars = 3; + repeated OpDesc ops = 4; +} + +``` +parent_idx表示父块,因此block中的操作符可以引用本地定义的变量,也可以引用祖先块中定义的变量。 + +Program 中的每层 block 都被压平并存储在数组中。blocks ID是这个数组中块的索引。 + +```cpp +message ProgramDesc { + repeated BlockDesc blocks = 1; +} +``` + +### 使用Blocks的Operator + +[Programs and Blocks](#ProgramsAndBlocks)的例子中,IfElseOp这个Operator包含了两个block——true分支和false分支。 + +下述OpDesc的定义过程描述了一个operator可以包含哪些属性: + +```cpp +message OpDesc { + AttrDesc attrs = 1; + ... +} +``` +属性可以是block的类型,实际上就是上面描述的block ID: +```cpp +message AttrDesc { + required string name = 1; + + enum AttrType { + INT = 1, + STRING = 2, + ... + BLOCK = ... + } + required AttrType type = 2; + + optional int32 block = 10; // when type == BLOCK + ... +} +``` + +## 3. Executor设计思想 + +Executor 在运行时将接受一个`ProgramDesc`、一个`block_id`和一个`Scope`。`ProgramDesc`是`block`的列表,每一项包含`block`中所有参数和`operator`的`protobuf`定义;`block_id`指定入口块;`Scope`是所有变量实例的容器。 + +其中 `Scope` 包含了 `name` 与 `Variable` 的映射,所有变量都被定义在 `Scope` 里。大部分API会默认使用 `global_scope` ,例如 `Executor.run` ,您也可以指定网络运行在某个特定的 `Scope` 中,一个网络可以在不同的 `Scope`内运行,并在该 `Scope` 内更新不同的 `Variable`。 + +完成的编译执行的具体过程如下图所示: + +

+ +

+ +1. Executor 为每一个block创建一个Scope,Block是可嵌套的,因此Scope也是可嵌套的。 +2. 创建所有Scope中的变量。 +3. 创建并执行所有operator。 + + + + +Executor的C++实现代码如下: + +```cpp +class Executor{ + public: + void Run(const ProgramDesc& pdesc, + Scope* scope, + int block_id) { + auto& block = pdesc.Block(block_id); + + //创建所有变量 + for (auto& var : block.AllVars()) + scope->Var(Var->Name()); + } + + //创建OP并执行 + for (auto& op_desc : block.AllOps()){ + auto op = CreateOp(*op_desc); + op->Run(*local_scope, place_); + } + }; +``` + +**创建Executor** + +Paddle中使用fluid.Executor(place)创建Executor,place属性由用户定义,代表程序将在哪里执行。 + +下例代码表示创建一个Executor,其运行场所在CPU内: + +```python +cpu=fluid.CPUPlace() +exe = fluid.Executor(cpu) +``` + +**运行Executor** + +Paddle使用Executor.run来运行程序。定义中通过Feed映射获取数据,通过fetch\_list获取结果: + +```python +... +x = numpy.random.random(size=(10, 1)).astype('float32') +outs = exe.run( + feed={'X': x}, + fetch_list=[loss.name]) +``` + + +## 代码实例 +本节通过[编程指南](../../../beginners_guide/basic_concept/programming_guide/programming_guide.html)中简单的线性回归例子,为您介绍上述内容如何在代码中实现。 + +**定义Program** + +您可以随意定义自己的数据和网络结构,定义的结果都将作为一段 Program 被 Paddle 接收,Program 的基本结构是一些 blocks,本节的 Program 仅包含一个 block 0: + +```python +#加载函数库 +import paddle.fluid as fluid #block 0 +import numpy + +#定义数据 +train_data=numpy.array([[1.0],[2.0],[3.0],[4.0]]).astype('float32') +y_true = numpy.array([[2.0],[4.0],[6.0],[8.0]]).astype('float32') +#定义网络 +x = fluid.data(name="x",shape=[None, 1],dtype='float32') +y = fluid.data(name="y",shape=[None, 1],dtype='float32') +y_predict = fluid.layers.fc(input=x,size=1,act=None) +#定义损失函数 +cost = fluid.layers.square_error_cost(input=y_predict,label=y) +avg_cost = fluid.layers.mean(cost) +#定义优化方法 +sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.01) +sgd_optimizer.minimize(avg_cost) +``` + +完成上述定义,也就是完成了 fluid.default_main_program 的构建过程,fluid.default_main_program 中承载着神经网络模型,前向反向计算,以及优化算法对网络中可学习参数的更新。 + +此时可以输出这段 Program 观察定义好的网络形态: +```python +print(fluid.default_main_program().to_string(True)) +``` +完整ProgramDesc可以在本地查看,本次仅节选前三个变量的结果如下: +``` +blocks { + idx: 0 + parent_idx: -1 + vars { + name: "mean_1.tmp_0" + type { + type: LOD_TENSOR + lod_tensor { + tensor { + data_type: FP32 + dims: 1 + } + } + } + persistable: false + } + vars { + name: "square_error_cost_1.tmp_1" + type { + type: LOD_TENSOR + lod_tensor { + tensor { + data_type: FP32 + dims: -1 + dims: 1 + } + lod_level: 0 + } + } + persistable: false + } + vars { + name: "square_error_cost_1.tmp_0" + type { + type: LOD_TENSOR + lod_tensor { + tensor { + data_type: FP32 + dims: -1 + dims: 1 + } + lod_level: 0 + } + } + persistable: false + ... +``` +从输出结果中可以看到,整个定义过程在框架内部转化为了一段ProgramDesc,以block idx为索引。本次线性回归模型中仅有1个block,ProgramDesc中也仅有block 0一段BlockDesc。 + +BlockDesc中包含定义的 vars 和一系列的 ops,以输入x为例,python代码中定义 x 是一个数据类型为"float32"的1维数据: +```python +x = fluid.data(name="x",shape=[None, 1],dtype='float32') +``` +在BlockDesc中,变量x被描述为: +``` +vars { + name: "x" + type { + type: LOD_TENSOR + lod_tensor { + tensor { + data_type: FP32 + dims: -1 + dims: 1 + } + lod_level: 0 + } + } + persistable: false +``` +在Paddle中所有的数据类型都为LoD-Tensor,对于不存在序列信息的数据(如此处的变量X),其lod_level=0。 + +dims表示数据的维度,这里表示 x 的维度为[-1,1],其中-1是batch的维度,无法确定具体数值时,Paddle 自动用 -1 占位。 + +参数`persistable`表示该变量在整个训练过程中是否为持久化变量。 + +**创建Executor** + +Paddle使用Executor来执行网络训练,Executor运行细节请参考[Executor设计思想](#Executor设计思想)的介绍。作为使用者,实际并不需要了解内部机制。 + +创建Executor只需调用 fluid.Executor(place) 即可,在此之前请您依据训练场所定义place变量: +```python + #在CPU内执行训练 + cpu = fluid.CPUPlace() + #创建Executor + exe = fluid.Executor(cpu) +``` +**运行Executor** + +Paddle使用Executor.run来运行一段Program。 + +正式进行网络训练前,需先执行参数初始化。其中 defalut_startup_program 中定义了模型参数初始化、优化器参数初始化、reader初始化等各种操作。 +```python + #参数初始化 + exe.run(fluid.default_startup_program()) +``` +由于传入数据与传出数据存在多列,因此 Paddle 通过 feed 映射定义数据的传输数据,通过 fetch_list 取出期望结果: +```python +#开始训练 + outs = exe.run( + feed={'x':train_data,'y':y_true}, + fetch_list=[y_predict.name,avg_cost.name]) +``` +上述代码段中定义了train_data传入x变量,y_true传入y变量,输出y的预测值和最后一轮cost值。 + +输出结果为: +``` +[array([[1.5248038], + [3.0496075], + [4.5744114], + [6.099215 ]], dtype=float32), array([1.6935859], dtype=float32)] +``` + +至此您已经了解了Paddle内部的执行流程的核心概念,更多框架使用细节可以参考[典型案例](../../../user_guides/index_cn.html)。 diff --git a/doc/paddle/advanced_guide/addon_development/design_idea/fluid_design_idea_en.md b/doc/paddle/advanced_guide/addon_development/design_idea/fluid_design_idea_en.md new file mode 100644 index 0000000000000000000000000000000000000000..e12eea430d61e94a38cfa22a1ad6ab932962b2a6 --- /dev/null +++ b/doc/paddle/advanced_guide/addon_development/design_idea/fluid_design_idea_en.md @@ -0,0 +1,365 @@ +# Design Principles of Fluid + +## Introduction + +This document mainly introduces the underlying design principles Fluid to help users better understand the operation process of the framework. + +After reading this document, you will learn about: + +- internal execution process of Fluid +- How does Program express the model +- How does Executor perform operations + + +## 1. internal execution process of Fluid + +Fluid uses a compiler-like execution process, which is divided into compile-time and runtime. Specifically, it includes : compiler defines Program, and an Executor is created to run the Program. + +The flow chart of executing a local training task is as follows: +

+ +

+ + 1. At compile time, the user writes a python program that adds variables (Tensor) and operations on variables (Operators or Layers) to a Program by calling the operators provided by Fluid. Users only need to describe the core forward calculations, and do not need to care about backward computing, distributed computing, and calculations on heterogeneous devices. + + 2. The original Program is converted to an intermediate descriptive language ``ProgramDesc`` within the platform: + + 3. The most important functional modules at compile time is ``Transpiler`` . ``Transpiler`` accepts a segment of `ProgramDesc` and outputs a segment of *transformed* ``ProgramDesc`` as the Program to be executed ultimately by backend ``Executor`` . + + 4. The backend ``Executor`` accepts the Program output from ``Transpiler`` , and executes the Operators in the Program in sequence (which can be analogized to the instructions in the programming language). During the execution, the Executor creates the required input and output for the Operator and manages them. + + + + +## 2. Design Principles of Program + +After completing the network definition, there are usually 2 Programs in a Fluid program: + + 1. ``fluid.default_startup_program`` : defines various operations such as creating model parameters, input and output, and initialization of learnable parameters in the model. + + default_startup_program can be automatically generated by the framework and can be used without visible manual creation. + + If the call changes the default initialization method of the parameter, the framework will automatically add the relevant changes to the default_startup_program + + 2. ``fluid.default_main_program`` : defines the neural network model, forward and backward calculations, and updates to the learnable parameters of the network by the optimization algorithm. + + The core of using Fluid is to build ``default_main_program`` + + + +### Programs and Blocks + +The basic structure of Fluid's Program is some nested blocks that are similar in form to a C++ or Java program. + +The blocks contain: + +- Definition of local variables +- A series of operators + +The concept of block is the same with that in generic programs. For example, there are three blocks in the following C++ code: + +``` cpp +#include + +int main() { + int x = 5; // block 0 + int y = 4; // block 0 + int out; // block 0 + + if (x < y) { // block 0 + out = 1; // block 1 + } else { + out = 0; // block 2 + } + + std::cout << out << std::endl; + return 0; +} +``` + +Similarly, the following Program contains 3 blocks: + +```python +import paddle.fluid as fluid + +x = fluid.data(name='x', shape=[1], dtype='int64') # block 0 +y = fluid.data(name='y', shape=[1], dtype='int64') # block 0 + +def true_block(): + return fluid.layers.fill_constant(dtype='int64', value=1, shape=[1]) # block 1 + +def false_block(): + return fluid.layers.fill_constant(dtype='int64', value=0, shape=[1]) # block 2 + +condition = fluid.layers.less_than(x, y) # block 0 + +out = fluid.layers.cond(condition, true_block, false_block) # block 0 +``` +### BlockDesc and ProgramDesc + +The block and program information described by the user is saved in Fluid in [protobuf](https://en.wikipedia.org/wiki/Protocol_Buffers) format, and all ``protobuf`` information is defined in ``framework.proto`` . In Fluid it is called BlockDesc and ProgramDesc. The concepts of ProgramDesc and BlockDesc are similar to an [abstract syntax tree](https://en.wikipedia.org/wiki/Abstract_syntax_tree). + +`BlockDesc` contains the definition of the local variables `vars`, and a series of operators `ops`: + +```cpp +message BlockDesc { + required int32 idx = 1; + required int32 parent_idx = 2; + repeated VarDesc vars = 3; + repeated OpDesc ops = 4; +} +``` +The parent ID represents the parent block, so the operators in the block can not only reference local variables, but also reference variables defined in the ancestor block. + +Each block in the Program is flattened and stored in an array. The blocks ID is the index of the block in this array. + +```cpp +message ProgramDesc { + repeated BlockDesc blocks = 1; +} +``` + +### the Operator using Blocks + +In the example of [Programs and Blocks](#Programs and Blocks), the IfElseOp operator contains two blocks -- the true branch and the false branch. + +The following OpDesc definition process describes what attributes an operator contains: + +```cpp +message OpDesc { + AttrDesc attrs = 1; + ... +} +``` +The attribute can be the type of block, which is actually the block ID described above: +```cpp +message AttrDesc { + required string name = 1; + + enum AttrType { + INT = 1, + STRING = 2, + ... + BLOCK = ... + } + required AttrType type = 2; + + optional int32 block = 10; // when type == BLOCK + ... +} +``` + +## 3. Design Principles of Executor + +Executor will accept a `ProgramDesc`, a `block_id` and a `Scope` at runtime. `ProgramDesc` is a list of `block`, each containing all the parameters in `block` and the `protobuf` definition of `operator`; `block_id` specifying the entry block; `Scope` is the container for all variable instances. + +The specific process of compilation and execution is shown in the following figure: + +

+ +

+ +1. Executor creates a Scope for each block. Block is nestable, so Scope is nestable +2. Create all variables in Scope +3. Create and Run all operators in order + + + + +C++ implementation code of Executor is as follows: + +```cpp +class Executor{ + public: + void Run(const ProgramDesc& pdesc, + scope* scope, + int block_id) { + auto& block = pdesc.Block(block_id); + + // Create all variables + for (auto& var : block.AllVars()) + scope->Var(Var->Name()); + } + + // Create OP and execute in order + for (auto& op_desc : block.AllOps()){ + auto op = CreateOp(*op_desc); + op->Run(*local_scope, place_); + } + } + }; +``` + +**Create Executor** + +Fluid uses Fluid.Executor(place) to create an Executor. The place attribute is defined by user and represents where the program will be run. + +The following code example creates an Executor that runs on CPU: + +```python +cpu=fluid.CPUPlace() +exe = fluid.Executor(cpu) +``` + +**Run Executor** + +Fluid uses Executor.run to run the program. In the definition, the data is obtained through the ``Feed`` dict, and the result is obtained through ``fetch_list``: + +```python +... +x = numpy.random.random(size=(10, 1)).astype('float32') +outs = exe.run( + feed={'X': x}, + fetch_list=[loss.name]) +``` + + +## Code Instance +This section introduces how the above is implemented in your code through a simple linear regression example in the [Fluid Programming Guide](../../../beginners_guide/basic_concept/programming_guide/programming_guide_en.html). + +**Define Program** + +You can freely define your own data and network structure, which will be received as a Program by Fluid. The basic structure of Program is some blocks. The Program in this section contains only block 0: + +```python +#Load function library +import paddle.fluid as fluid #block 0 +import numpy + +# Define data +train_data=numpy.array([[1.0],[2.0],[3.0],[4.0]]).astype('float32') +y_true = numpy.array([[2.0],[4.0],[6.0],[8.0]]).astype('float32') +# Define the network +x = fluid.data(name="x",shape=[None, 1],dtype='float32') +y = fluid.data(name="y",shape=[None, 1],dtype='float32') +y_predict = fluid.layers.fc(input=x,size=1,act=None) +#definition loss function +cost = fluid.layers.square_error_cost(input=y_predict,label=y) +avg_cost = fluid.layers.mean(cost) +#defined optimization method +sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.01) +sgd_optimizer.minimize(avg_cost) +``` + +Finishing the above definition is at the same time finishing the construction process of fluid.default_main_program. fluid.default_main_program carries the neural network model, forward and backward calculation, and the optimization algorithm which updates the learnable parameters in the network. + +At this point you can output this Program to observe the defined network: +```python +print(fluid.default_main_program().to_string(True)) +``` +The complete ProgramDesc can be viewed locally. The first three variables are excerpted and displayed as follows: +``` +blocks { + idx: 0 + parent_idx: -1 + vars { + name: "mean_1.tmp_0" + type { + type: LOD_TENSOR + lod_tensor { + tensor { + data_type: FP32 + dims: 1 + } + } + } + persistable: false + } + vars { + name: "square_error_cost_1.tmp_1" + type { + type: LOD_TENSOR + lod_tensor { + tensor { + data_type: FP32 + dims: -1 + dims: 1 + } + lod_level: 0 + } + } + persistable: false + } + vars { + name: "square_error_cost_1.tmp_0" + type { + type: LOD_TENSOR + lod_tensor { + tensor { + data_type: FP32 + dims: -1 + dims: 1 + } + lod_level: 0 + } + } + persistable: false + ... +``` +As you can see from the output, the entire definition process is transformed into a ProgramDesc inside the framework, indexed by block idx. There is only one block in this linear regression model, and ProgramDesc has only one piece of BlockDesc, namely Block 0. + +BlockDesc contains defined vars and a series of ops. Take input x as an example. In python code, x is 1D data of data type "float 32": +```python +x = fluid.data(name="x",shape=[None, 1],dtype='float32') +``` +In BlockDesc, the variable x is described as: +``` +vars { + name: "x" + type { + type: LOD_TENSOR + lod_tensor { + tensor { + data_type: FP32 + dims: -1 + dims: 1 + } + lod_level: 0 + } + } + persistable: false +``` +All data in Fluid are LoD-Tensor, and for data without sequence information (such as variable X here), lod_level=0. + +``dims`` represents the dimension of the data, and in the example x has the dimension of [-1,1], where -1 is the dimension of the batch. When the specific value cannot be determined, Fluid automatically uses the -1 as placeholder. + +The parameter ``persistable`` indicates whether the variable is a persistent variable throughout the training process. + +**Create Executor** + +Fluid uses Executor to perform network training. For details on Executor operation, please refer to [Executor Design Principles](#Executor Design Ideas). As a user, there is actually no need to understand the internal mechanism. + +To create an Executor, simply call fluid.Executor(place). Before that, please define a place variable based on the training site: +```python + #Execute training on CPU + cpu = fluid.CPUPlace() + #Create Executor + exe = fluid.Executor(cpu) +``` +**Run Executor** + +Fluid uses Executor.run to run a program. + +Before the network training is actually performed, parameter initialization must be performed first. Among them, default_startup_program defines various operations such as creating model parameters, input and output, and initialization of learnable parameters in the model. +```python + #Parameter initialization + exe.run(fluid.default_startup_program()) +``` +Since there are multiple columns of incoming and outgoing data, fluid defines transferred data through the feed mapping, and fetch_list takes out the expected result: +```python +# Start training + outs = exe.run( + feed={'x':train_data,'y':y_true}, + fetch_list=[y_predict.name,avg_cost.name]) +``` +The above code defines that train_data is to be passed into the x variable, y_true is to be passed into the y variable, and output the predicted value of y and the last round value of cost. + +The output is: +``` +[array([[1.5248038], + [3.0496075], + [4.5744114], + [6.099215 ]], dtype=float32), array([1.6935859], dtype=float32)] +``` + +Till now you have already be notified of the core concepts of the internal execution process of Fluid. For more details on the usage of the framework, please refer to the [User Guide](../../../user_guides/index_en.html). diff --git a/doc/paddle/advanced_guide/addon_development/design_idea/image/executor_design.png b/doc/paddle/advanced_guide/addon_development/design_idea/image/executor_design.png new file mode 100644 index 0000000000000000000000000000000000000000..ae0cf2f161da7d0700ecb4f63772d2bfc4c26de0 Binary files /dev/null and b/doc/paddle/advanced_guide/addon_development/design_idea/image/executor_design.png differ diff --git a/doc/paddle/advanced_guide/addon_development/design_idea/image/fluid_process.png b/doc/paddle/advanced_guide/addon_development/design_idea/image/fluid_process.png new file mode 100644 index 0000000000000000000000000000000000000000..5031103e41b811b27071e303dcf750f50a9f7f6c Binary files /dev/null and b/doc/paddle/advanced_guide/addon_development/design_idea/image/fluid_process.png differ diff --git a/doc/paddle/advanced_guide/addon_development/index_cn.rst b/doc/paddle/advanced_guide/addon_development/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d9a12cb14fc1ac6bbf0a73aefd57f5ca5496cc20 --- /dev/null +++ b/doc/paddle/advanced_guide/addon_development/index_cn.rst @@ -0,0 +1,14 @@ +.. _addon_development: + +######## +二次开发 +######## + +.. toctree:: + :maxdepth: 1 + + design_idea/fluid_design_idea.md + new_op/index_cn.rst + contribute_code/index_cn.rst + + diff --git a/doc/paddle/advanced_guide/addon_development/index_en.rst b/doc/paddle/advanced_guide/addon_development/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..0f3b9dd71aeb54634db22074bfd8dcbf60f69576 --- /dev/null +++ b/doc/paddle/advanced_guide/addon_development/index_en.rst @@ -0,0 +1,16 @@ +.. _addon_development_en: + + +################ +Addon Development +################ + +.. toctree:: + :maxdepth: 1 + + design_idea/fluid_design_idea_en.md + new_op/index_en.rst + contribute_code/index_en.rst + + + diff --git a/doc/paddle/advanced_guide/addon_development/new_op/custom_op.md b/doc/paddle/advanced_guide/addon_development/new_op/custom_op.md new file mode 100644 index 0000000000000000000000000000000000000000..2da40b3e11151141fad43cd36e3578a6a503b6f5 --- /dev/null +++ b/doc/paddle/advanced_guide/addon_development/new_op/custom_op.md @@ -0,0 +1,341 @@ +# 如何在框架外部自定义C++ OP + + + +通常,如果PaddlePaddle的Operator(OP)库中没有您所需要的操作,建议先尝试使用已有的OP组合,如果无法组合出您需要的操作,可以尝试使用`fluid.layers.py_func`,也可以按照这篇教程自定义C++ OP。当然,如果用若干OP组合出来的OP性能无法满足您的要求,也可以自定义C++ OP。 + +自定义OP需要以下几个步骤: + +1. 实现OP和注册OP,和在框架内部写OP完全相同,遵守"如何写新的C++ OP"的规范和步骤。当然,实现Gradient OP是可选的。 +2. 编译出动态库。 +3. 封装该OP的Python接口。 +4. 写OP的单测。 + + + +下面通过一个具体的例子来详细的介绍,一步一步教会您如何实现。下面通过实现relu op来介绍。 + + + +## 自定义OP的实现 + +OP的实现与"如何写新的C++ OP"的教程相同,简答的说需要: 1). 定义OP的ProtoMaker,即描述OP的输入、输出、属性信息;2). 实现OP的定义和InferShape,以及OP的kernel函数,反向OP类似。3). 注册OP,以及OP的计算函数。 + +ReLU OP的CPU实现, ``relu_op.cc`` 文件: + +``` +// relu_op.cc +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +// 前向OP的输入X、输出Y、属性 +class Relu2OpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "The input tensor."); + AddOutput("Y", "Output of relu_op"); + AddComment(R"DOC( +Relu Operator. +Y = max(X, 0) +)DOC"); + } +}; + +// 前向OP的定义和InferShape实现,设置输出Y的shape +class Relu2Op : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + auto in_dims = ctx->GetInputDim("X"); + ctx->SetOutputDim("Y", in_dims); + } +}; + +// 实现前向OP的Kernel计算函数: Y = max(0, X) +using Tensor = framework::Tensor; +template +class Relu2Kernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* in_t = ctx.Input("X"); + auto* out_t = ctx.Output("Y"); + auto x = in_t->data(); + // mutable_data分配内存、获取指针 + auto y = out_t->mutable_data(ctx.GetPlace()); + for (int i = 0; i < in_t->numel(); ++i) { + y[i] = std::max(static_cast(0.), x[i]); + } + } +}; + +// 定义反向OP的输入Y和dY、输出dX、属性: +template +class Relu2GradMaker : public framework::SingleGradOpMaker { + public: + using framework::SingleGradOpMaker::SingleGradOpMaker; + + void Apply(GradOpPtr op) const override { + op->SetType("relu2_grad"); + op->SetInput("Y", this->Output("Y")); + op->SetInput(framework::GradVarName("Y"), this->OutputGrad("Y")); + op->SetAttrMap(this->Attrs()); + op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); + } +}; + +// 定义反向OP和InferShape实现,设置dX的shape +class Relu2GradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + auto in_dims = ctx->GetInputDim(framework::GradVarName("Y")); + ctx->SetOutputDim(framework::GradVarName("X"), in_dims); + } +}; + +// 实现反向OP的kernel函数 dx = dy * ( y > 0. ? 1. : 0) +template +class Relu2GradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* dy_t = ctx.Input(framework::GradVarName("Y")); + auto* y_t = ctx.Input("Y"); + auto* dx_t = ctx.Output(framework::GradVarName("X")); + + auto dy = dy_t->data(); + auto y = y_t->data(); + auto dx = dx_t->mutable_data(ctx.GetPlace()); + + for (int i = 0; i < y_t->numel(); ++i) { + dx[i] = dy[i] * (y[i] > static_cast(0) ? 1. : 0.); + } + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +using CPU = paddle::platform::CPUDeviceContext; +// 注册前向和反向op +// 为了和框架内部的relu区分,这里注册的OP type为relu2 +REGISTER_OPERATOR(relu2, + ops::Relu2Op, + ops::Relu2OpMaker, + ops::Relu2GradMaker, + ops::Relu2GradMaker); +REGISTER_OPERATOR(relu2_grad, ops::Relu2GradOp); +// 注册CPU的Kernel +REGISTER_OP_CPU_KERNEL(relu2, + ops::Relu2Kernel, + ops::Relu2Kernel); +REGISTER_OP_CPU_KERNEL(relu2_grad, + ops::Relu2GradKernel, + ops::Relu2GradKernel); +``` + + + +ReLU OP的GPU实现, ``relu_op.cu`` 文件: + +``` +// relu_op.cu +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +__global__ void KeRelu2(const T* x, const int num, T* y) { + int gid = blockIdx.x * blockDim.x + threadIdx.x; + for (int i = gid; i < num; i += blockDim.x * gridDim.x) { + y[i] = max(x[i], static_cast(0.)); + } +} + +// 前向OP的kernel的GPU实现 +template +class Relu2CUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* in_t = ctx.Input("X"); + auto* out_t = ctx.Output("Y"); + auto x = in_t->data(); + auto y = out_t->mutable_data(ctx.GetPlace()); + + auto& dev_ctx = ctx.template device_context(); + + int num = in_t->numel(); + int block = 512; + int grid = (num + block - 1) / block; + KeRelu2<<>>(x, num, y); + } +}; + +template +__global__ void KeRelu2Grad(const T* y, const T* dy, const int num, T* dx) { + int gid = blockIdx.x * blockDim.x + threadIdx.x; + for (int i = gid; i < num; i += blockDim.x * gridDim.x) { + dx[i] = dy[i] * (y[i] > 0 ? 1. : 0.); + } +} + +// 反向OP的kernel的GPU实现 +template +class Relu2GradCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* dy_t = ctx.Input(framework::GradVarName("Y")); + auto* y_t = ctx.Input("Y"); + auto* dx_t = ctx.Output(framework::GradVarName("X")); + + auto dy = dy_t->data(); + auto y = y_t->data(); + auto dx = dx_t->mutable_data(ctx.GetPlace()); + + auto& dev_ctx = ctx.template device_context(); + + int num = dy_t->numel(); + int block = 512; + int grid = (num + block - 1) / block; + KeRelu2Grad<<>>(y, dy, num, dx); + } +}; + +} // namespace operators +} // namespace paddle + +using CUDA = paddle::platform::CUDADeviceContext; +// 注册前向的GPU Kernel +REGISTER_OP_CUDA_KERNEL(relu2, + paddle::operators::Relu2CUDAKernel, + paddle::operators::Relu2CUDAKernel); +// 注册反向的GPU Kernel +REGISTER_OP_CUDA_KERNEL(relu2_grad, + paddle::operators::Relu2GradCUDAKernel, + paddle::operators::Relu2GradCUDAKernel); +``` + +注意点: + +1. OP的type不能和PaddlePaddle已有的OP type相同,否则在Python中使用时会报错。 + + + +## 自定义OP的编译 + +需要将实现的C++、CUDA代码编译成动态库,下面通过g++/nvcc编译,当然您也可以写Makefile或者CMake。 + + + +编译需要include PaddlePaddle的相关头文件,如上面代码 `paddle/fluid/framework/op_registry.h` ,需要链接PaddlePaddle的lib库。 可通过下面命令获取到: + +``` +# python +>>> import paddle +>>> print(paddle.sysconfig.get_include()) +/paddle/pyenv/local/lib/python2.7/site-packages/paddle/include +>>> print(paddle.sysconfig.get_lib()) +/paddle/pyenv/local/lib/python2.7/site-packages/paddle/libs +``` + +下面命令可编译出动态库: + +``` +include_dir=$( python -c 'import paddle; print(paddle.sysconfig.get_include())' ) +lib_dir=$( python -c 'import paddle; print(paddle.sysconfig.get_lib())' ) + +echo $include_dir +echo $lib_dir + +# PaddlePaddel >=1.6.1, 仅需要include ${include_dir} 和 ${include_dir}/third_party +nvcc relu_op.cu -c -o relu_op.cu.o -ccbin cc -DPADDLE_WITH_CUDA -DEIGEN_USE_GPU -DPADDLE_USE_DSO -DPADDLE_WITH_MKLDNN -Xcompiler -fPIC -std=c++11 -Xcompiler -fPIC -w --expt-relaxed-constexpr -O3 -DNVCC \ + -I ${include_dir} \ + -I ${include_dir}/third_party \ + +g++ relu_op.cc relu_op.cu.o -o relu2_op.so -shared -fPIC -std=c++11 -O3 -DPADDLE_WITH_MKLDNN \ + -I ${include_dir} \ + -I ${include_dir}/third_party \ + -L /usr/local/cuda/lib64 \ + -L ${lib_dir} -lpaddle_framework -lcudart +``` + + + +注意点: + +1. 通过NVCC编译CUDA源文件时,需要加编译选项 `-DPADDLE_WITH_CUDA -DEIGEN_USE_GPU -DPADDLE_USE_DSO`,在框架源码中会使用这些宏定义进行条件编译。用户自定义的C++ OP实现编译时,选项的开启状态需要和核心框架编译行为一致。如`EIGEN_USE_GPU`是使用Eigen数学库的GPU实现时需要增加的编译选项。 +2. 如果飞桨安装包中不包含MKLDNN库,则需要去掉编译选项`-DPADDLE_WITH_MKLDNN`。核心框架源码中(比如tensor.h)有使用此宏定义进行条件编译,该选项是否打开同样需要和核心框架编译行为保持一致。默认的飞桨安装包中含有MKLDNN库。 +3. 可多个OP编译到同一个动态库中。 +4. 通过pip方式安装的PaddlePaddle由GCC 4.8编译得到,由于GCC 4.8和GCC 5以上**C++11 ABI不兼容**,您编写的自定义OP,需要通过GCC 4.8编译。若是GCC 5及以上的环境上使用自定义OP,推荐使用[Docker安装PaddlePaddle](https://www.paddlepaddle.org.cn/install/doc/docker),使得编Paddle和编译自定义OP的GCC版本相同。 + + + +## 封装Python Layer接口 + +需要使用 `fluid.load_op_library` 接口调用加载动态库,使得PaddlePaddle的主进程中可以使用用户自定义的OP。 + +``` +# custom_op.py +import paddle.fluid as fluid +# 调用load_op_library加载动态库 +fluid.load_op_library('relu2_op.so') + +from paddle.fluid.layer_helper import LayerHelper + +def relu2(x, name=None): + # relu2的type和在OP中定义的type相同 + helper = LayerHelper("relu2", **locals()) + # 创建输出Variable + out = helper.create_variable_for_type_inference(dtype=x.dtype) + helper.append_op(type="relu2", inputs={"X": x}, outputs={"Y": out}) + return out +``` + +注意点: + +1. 一个动态库只需使用`fluid.load_op_library`在`paddle.fluid` import之后加载一次即可。 +2. Python接口的封装和PaddlePaddle框架内部的封装相同,更多的示例也可以阅读源码中 `python/paddle/fluid/layers/nn.py`的代码示例。 + +## 单测测试 + + 可以写个简单的Python程序测试计算的正确性: + +``` +import numpy as np +import paddle.fluid as fluid +from custom_op import relu2 + +data = fluid.layers.data(name='data', shape=[32], dtype='float32') +relu = relu2(data) +use_gpu = True # or False +place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() +exe = fluid.Executor(place) + +x = np.random.uniform(-1, 1, [4, 32]).astype('float32') +out, = exe.run(feed={'data': x}, fetch_list=[relu]) +np.allclose(out, np.maximum(x,0.)) +``` + +接下来可以在模型中使用您自定义的OP了! + +## 如何在C++预测库中使用 + +暂时不支持在C++预测库中使用,后续会补充在C++预测库中的使用示例。 + +## FAQ + +1. Q: 如果出现类似错误: `relu2_op.so: cannot open shared object file: No such file or directory` 以及 `libpaddle_framework.so: cannot open shared object file: No such file or directory`。 + + A: 需要将`relu2_op.so`所在路径以及`libpaddle_framework.so`路径(即`paddle.sysconfig.get_lib()`得到路径)设置到环境变量LD_LIBRARY_PATH中: + + ``` + # 假如relu2_op.so路径是:`paddle/test`,对于Linux环境设置: + export LD_LIBRARY_PATH=paddle/test:$( python -c 'import paddle; print(paddle.sysconfig.get_lib())'):$LD_LIBRARY_PATH + ``` diff --git a/doc/paddle/advanced_guide/addon_development/new_op/index_cn.rst b/doc/paddle/advanced_guide/addon_development/new_op/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..75b1c4ab086176ebcee9816541e0a7870950de1b --- /dev/null +++ b/doc/paddle/advanced_guide/addon_development/new_op/index_cn.rst @@ -0,0 +1,22 @@ +############# +新增OP +############# + +本部分将指导您如何新增Operator,也包括一些必要的注意事项 + +- `如何写新的C++ op <./new_op.html>`_ + +- `C++ op相关注意事项 <./op_notes.html>`_ + +- `如何写新的Python op <./new_python_op.html>`_ + +- `如何在框架外部自定义C++ op <./custom_op.html>`_ + +.. toctree:: + :hidden: + + new_op.md + op_notes.md + new_python_op.md + custom_op.md + diff --git a/doc/paddle/advanced_guide/addon_development/new_op/index_en.rst b/doc/paddle/advanced_guide/addon_development/new_op/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..081de2dbcedf408df22b94af6c4ee1ee1e888287 --- /dev/null +++ b/doc/paddle/advanced_guide/addon_development/new_op/index_en.rst @@ -0,0 +1,15 @@ +################### +Write New Operators +################### + +This section will guide you how to add an operator, and it also includes some necessary notes. + +- `How to write new operator `_ :guides to write new operators + +- `op notes `_ :notes on developing new operators + +.. toctree:: + :hidden: + + new_op_en.md + op_notes_en.md diff --git a/doc/paddle/advanced_guide/addon_development/new_op/new_op.md b/doc/paddle/advanced_guide/addon_development/new_op/new_op.md new file mode 100644 index 0000000000000000000000000000000000000000..7b22163538445e9929ff3e2684c0efd41a536ec3 --- /dev/null +++ b/doc/paddle/advanced_guide/addon_development/new_op/new_op.md @@ -0,0 +1,715 @@ +# 如何写新的C++ OP + +## 概念简介 + +简单介绍需要用到基类,详细介绍请参考[设计文档](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/motivation/refactorization.md#operatoropwithkernelopkernel)。 + +- `framework::OperatorBase`: Operator(简写,Op)基类。 +- `framework::OpKernel`: Op计算函数的基类,称作Kernel。 +- `framework::OperatorWithKernel`:继承自OperatorBase,Op有计算函数,称作有Kernel。 +- `framework::OpProtoAndCheckerMaker`:描述该Op的输入、输出、属性、注释,主要用于Python API接口生成。 + +根据是否包含Kernel,可以将Op分为两种:包含Kernel的Op和不包含kernel的Op: + +- 包含Kernel的Op继承自`OperatorWithKernel`,这类Op的功能实现与输入的数据类型、数据布局、数据所在的设备以及Op实现所调用第三方库等有关。比如ConvOp,如果使用CPU计算,一般通过调用mkl库中的矩阵乘操作实现,如果使用GPU计算,一般通过调用cublas库中的矩阵乘操作实现,或者直接调用cudnn库中的卷积操作。 +- 不包含Kernel的Op继承自`OperatorBase`,因为这类Op的功能实现与设备以及输入的数据不相关。比如WhileOp、IfElseOp等。 + +本教程主要介绍带Kernel的Op如何写,简单总结Op需要包含的内容如下: + + + + + + + + + + + + + + + + + + + + + + + + + + +
内容定义位置
OpProtoMake定义 .cc 文件
Op定义 .cc 文件
Kernel实现 CPU、CUDA共享Kernel实现在.h 文件中,否则,CPU 实现在.cc 文件中,CUDA 实现在.cu 文件中。
注册Op Op注册实现在.cc 文件;Kernel注册CPU实现在.cc 文件中,CUDA实现在.cu 文件中
+ +实现新的op都添加至目录[paddle/fluid/operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/fluid/operators)下,文件命名以`*_op.h`(如有)、`*_op.cc` 、`*_op.cu`(如有)结尾。**系统会根据文件名自动构建op和其对应的Python扩展。** + +下面以矩阵乘操作,即[MulOp](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/mul_op.cc)为例来介绍如何写带Kernel的Operator。 + +## 实现C++类 +### 定义ProtoMaker类 + +矩阵乘法的公式:$Out = X * Y$, 可见该计算由两个输入,一个输出组成。 + +首先定义`ProtoMaker`来描述该Op的输入、输出,并添加注释: + +```cpp +class MulOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "(Tensor), The first input tensor of mul op."); + AddInput("Y", "(Tensor), The second input tensor of mul op."); + AddOutput("Out", "(Tensor), The output tensor of mul op."); + AddAttr("use_mkldnn", + "(bool, default false) Only used in mkldnn kernel") + .SetDefault(false); + AddAttr( + "x_num_col_dims", + R"DOC((int, default 1), The mul_op can take tensors with more than two + dimensions as its inputs. If the input $X$ is a tensor with more + than two dimensions, $X$ will be flattened into a two-dimensional + matrix first. The flattening rule is: the first `num_col_dims` + will be flattened to form the first dimension of the final matrix + (the height of the matrix), and the rest `rank(X) - num_col_dims` + dimensions are flattened to form the second dimension of the final + matrix (the width of the matrix). As a result, height of the + flattened matrix is equal to the product of $X$'s first + `x_num_col_dims` dimensions' sizes, and width of the flattened + matrix is equal to the product of $X$'s last `rank(x) - num_col_dims` + dimensions' size. For example, suppose $X$ is a 6-dimensional + tensor with the shape [2, 3, 4, 5, 6], and `x_num_col_dims` = 3. + Thus, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = + [24, 30]. + )DOC") + .SetDefault(1) + .EqualGreaterThan(1); + AddAttr( + "y_num_col_dims", + R"DOC((int, default 1), The mul_op can take tensors with more than two, + dimensions as its inputs. If the input $Y$ is a tensor with more + than two dimensions, $Y$ will be flattened into a two-dimensional + matrix first. The attribute `y_num_col_dims` determines how $Y$ is + flattened. See comments of `x_num_col_dims` for more details. + )DOC") + .SetDefault(1) + .EqualGreaterThan(1); + AddAttr( + "scale_x", + "scale_x to be used for int8 mul input data x. scale_x has the" + "same purpose as scale_in in OPs that support quantization." + "Only to be used with MKL-DNN INT8") + .SetDefault(1.0f); + AddAttr>( + "scale_y", + "scale_y to be used for int8 mul input data y. scale_y has the" + "same purpose as scale_weights in OPs that support quantization." + "Only to be used with MKL-DNN INT8") + .SetDefault({1.0f}); + AddAttr("scale_out", + "scale_out to be used for int8 output data." + "Only used with MKL-DNN INT8") + .SetDefault(1.0f); + AddAttr( + "force_fp32_output", + "(bool, default false) Force quantize kernel output FP32, only " + "used in quantized MKL-DNN.") + .SetDefault(false); + AddComment(R"DOC( +Mul Operator. +This operator is used to perform matrix multiplication for input $X$ and $Y$. +The equation is: +$$Out = X * Y$$ +Both the input $X$ and $Y$ can carry the LoD (Level of Details) information, +or not. But the output only shares the LoD information with input $X$. +)DOC"); + } +}; +``` + +[`MulOpMaker`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/mul_op.cc)继承自`framework::OpProtoAndCheckerMaker`。 + +开发者通过覆盖`framework::OpProtoAndCheckerMaker`中的`Make`函数来定义Op所对应的Proto,通过`AddInput`添加输入参数,通过`AddOutput`添加输出参数,通过`AddAttr`添加属性参数,通过`AddComment`添加Op的注释。这些函数会将对应内容添加到`OpProto`中。 + +上面的代码在`MulOp`中添加两个输入`X`和`Y`,添加了一个输出`Out`,以及`use_mkldnn`等属性,并解释了各自含义,命名请遵守[命名规范](https://github.com/PaddlePaddle/FluidDoc/blob/release/1.2/doc/fluid/dev/name_convention.md)。 + +### 定义GradOpMaker类 +通常情况下,大部分Op只有一个对应的反向Op,每个Op的会有一个对应的`GradOpMaker`。为方便代码编写,fluid为只有提供了一个模板类[`SingleGradOpMaker`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/grad_op_desc_maker.h#L188)。`MulOp`的`GradOpMaker`需要继承这个模板类,并在`Apply()`方法中设置反向Op的输入、输出和属性。此外,fluid还提供了一个默认的`GradOpMaker`, +[`DefaultGradOpMaker`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/grad_op_desc_maker.h#L227),该模板类会使用前向Op的全部输入(`Input`)输出(`Output`)以及输出变量所对应的梯度(`Output@Grad`)作为反向Op的输入,将前向Op的输入变量所对应的的梯度(`Input@Grad`)作为输出。 + +**注意:** +不要将反向Op不会用到的变量放到反向Op的输入列表中,这样会导致这些不会被反向Op用到的变量的空间不能够及时回收,进而有可能导致用到该Op的模型可以设置的batch_size较低。 +比如`relu`操作的前向操作为:`out.device(d) = x.cwiseMax(static_cast(0));`反向操作为:`dx.device(d) = dout * (out > static_cast(0)).template cast();`。显然,反向操作中只是用到了`out`、`dout`、`dx`,没有用到`x`。因此,通常不建议使用默认的`DefaultGradOpMaker`。 + + +下面示例定义了`MulOp`的`GradOpMaker`。 + +```cpp +template +class MulOpGradMaker : public framework::SingleGradOpMaker { + public: + using framework::SingleGradOpMaker::SingleGradOpMaker; + + protected: + void Apply(GradOpPtr retv) const override { + retv->SetType("mul_grad"); + retv->SetInput("X", this->Input("X")); + retv->SetInput("Y", this->Input("Y")); + retv->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); + retv->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); + retv->SetOutput(framework::GradVarName("Y"), this->InputGrad("Y")); + retv->SetAttrMap(this->Attrs()); + } +}; +``` + +**注意:** + +- 有些Op的前向逻辑和反向逻辑是一样的,比如[`ScaleOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/scale_op.cc).这种情况下,前向Op和反向Op的Kernel可以为同一个。 +- 有些前向Op所对应的反向Op可能有多个,比如[`SumOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/sum_op.cc),这种情况下,`GradMaker`需要继承`framework::GradOpDescMakerBase`。 +- 有些Op的反向对应另一个Op的前向,比如[`SplitOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/split_op.h),这种情况下,[`SplitGradMaker`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/split_op.h#L157)中定义的`SplitOp`反向Op的Type就是`concat`, +- 为高效地同时支持命令式编程模式(动态图)和声明式编程模式(静态图),`SingleGradOpMaker`是一个模板类,在注册Operator时需要同时注册`MulOpGradMaker`(声明式编程模式使用)和`MulOpGradMaker`(命令式编程模式使用)。 + +### 定义Operator类 + +下面实现了MulOp的定义: + +```cpp +class MulOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE_EQ( + ctx->HasInput("X"), true, + platform::errors::NotFound("Input(X) of MulOp should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("Y"), true, + platform::errors::NotFound("Input(Y) of MulOp should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("Out"), true, + platform::errors::NotFound("Output(Out) of MulOp should not be null.")); + + auto x_dims = ctx->GetInputDim("X"); + auto y_dims = ctx->GetInputDim("Y"); + + int x_num_col_dims = ctx->Attrs().Get("x_num_col_dims"); + int y_num_col_dims = ctx->Attrs().Get("y_num_col_dims"); + + VLOG(3) << "mul operator x.shape=" << x_dims << " y.shape=" << y_dims + << " x_num_col_dims=" << x_num_col_dims + << " y_num_col_dims=" << y_num_col_dims; + + PADDLE_ENFORCE_NE(framework::product(y_dims), 0, + platform::errors::PreconditionNotMet( + "The Input variable Y(%s) has not " + "been initialized. You may need to confirm " + "if you put exe.run(startup_program) " + "after optimizer.minimize function.", + ctx->Inputs("Y").front())); + PADDLE_ENFORCE_GT( + x_dims.size(), x_num_col_dims, + platform::errors::InvalidArgument( + "The input tensor X's dimensions of MulOp " + "should be larger than x_num_col_dims. But received X's " + "dimensions = %d, X's shape = [%s], x_num_col_dims = %d.", + x_dims.size(), x_dims, x_num_col_dims)); + PADDLE_ENFORCE_GT( + y_dims.size(), y_num_col_dims, + platform::errors::InvalidArgument( + "The input tensor Y's dimensions of MulOp " + "should be larger than y_num_col_dims. But received Y's " + "dimensions = %d, Y's shape = [%s], y_num_col_dims = %d.", + y_dims.size(), y_dims, y_num_col_dims)); + + auto x_mat_dims = framework::flatten_to_2d(x_dims, x_num_col_dims); + auto y_mat_dims = framework::flatten_to_2d(y_dims, y_num_col_dims); + + PADDLE_ENFORCE_EQ( + x_mat_dims[1], y_mat_dims[0], + platform::errors::InvalidArgument( + "After flatten the input tensor X and Y to 2-D dimensions " + "matrix X1 and Y1, the matrix X1's width must be equal with matrix " + "Y1's height. But received X's shape = [%s], X1's shape = [%s], " + "X1's " + "width = %s; Y's shape = [%s], Y1's shape = [%s], Y1's height = " + "%s.", + x_dims, x_mat_dims, x_mat_dims[1], y_dims, y_mat_dims, + y_mat_dims[0])); + std::vector output_dims; + output_dims.reserve( + static_cast(x_num_col_dims + y_dims.size() - y_num_col_dims)); + + for (int i = 0; i < x_num_col_dims; ++i) { + output_dims.push_back(x_dims[i]); + } + + for (int i = y_num_col_dims; i < y_dims.size(); ++i) { + output_dims.push_back(y_dims[i]); + } + + ctx->SetOutputDim("Out", framework::make_ddim(output_dims)); + ctx->ShareLoD("X", /*->*/ "Out"); + } + + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const { + framework::LibraryType library = framework::LibraryType::kPlain; + framework::DataLayout layout = framework::DataLayout::kAnyLayout; + int customized_type_value = + framework::OpKernelType::kDefaultCustomizedTypeValue; + auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X"); +#ifdef PADDLE_WITH_MKLDNN + if (library == framework::LibraryType::kPlain && + platform::CanMKLDNNBeUsed(ctx)) { + library = framework::LibraryType::kMKLDNN; + layout = framework::DataLayout::kMKLDNN; + + if (input_data_type == framework::DataTypeTrait::DataType() || + input_data_type == framework::DataTypeTrait::DataType()) { + customized_type_value = kMULMKLDNNINT8; + } + } +#endif + + return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout, + library, customized_type_value); + } +}; +``` + +[`MulOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/mul_op.cc#L30)继承自`OperatorWithKernel`。`public`成员: + +```cpp +using framework::OperatorWithKernel::OperatorWithKernel; +``` + +这句表示使用基类`OperatorWithKernel`的构造函数,也可写成: + +```cpp +MulOp(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorWithKernel(type, inputs, outputs, attrs) {} +``` + +此外,Operator类通常需要重写`InferShape`接口,并在有必要时重写`GetExpectedKernelType`接口。`InferShape`为const函数,不能修改Op的成员变量,参数为`framework::InferShapeContext* ctx`,通过该参数可获取到输入输出以及属性。它的功能是: + + - 做检查, 尽早报错:检查输入数据维度、类型等是否合法。 + - 设置输出Tensor的形状以及LoD信息。 + +`GetExpectedKernelType`接口OperatorWithKernel类中用于获取指定设备(例如CPU,GPU)上指定数据类型(例如double,float)的OpKernel的方法。该方法的重写可见请参考[写C++ OP相关注意事项](op_notes.html#getexpectedkerneltype)。 + +通常`OpProtoMaker`和`Op`类的定义写在`.cc`文件中,和下面将要介绍的注册函数一起放在`.cc`中 + +### InferShape区分 compile time 和 run time +在我们的声明式编程模式网络中,`InferShape`操作在[编译时(compile time)和运行时(run time)](https://github.com/PaddlePaddle/FluidDoc/blob/release/1.2/doc/fluid/getstarted/Developer's_Guide_to_Paddle_Fluid.md#%E8%AE%A9%E6%88%91%E4%BB%AC%E5%9C%A8fluid%E7%A8%8B%E5%BA%8F%E5%AE%9E%E4%BE%8B%E4%B8%AD%E5%8C%BA%E5%88%86%E7%BC%96%E8%AF%91%E6%97%B6%E5%92%8C%E8%BF%90%E8%A1%8C%E6%97%B6)都会被调用,在compile time时,由于真实的维度未知,框架内部用-1来表示,在run time时,用实际的维度表示,因此维度的值在compile time和 run time时可能不一致,如果存在维度的判断和运算操作,InferShape就需要区分compile time 和 run time。 + +以下两种情况需要区分compile time和 run time。 + +**1.检查** + +如以下代码: +```cpp +auto x_dim = ctx->GetInputDim("X"); +int i = xxx; +PADDLE_ENFORCE_GT( x_dim[i] , 10) +``` + +在compile time的时候,x_dim[i]可能等于-1,导致这个PADDLE_ENFORCE_GT报错退出。 + +如果用了以下paddle中定义的宏进行判断: +```cpp +PADDLE_ENFORCE_EQ ( x_dim[i] , 10) +PADDLE_ENFORCE_NE ( x_dim[i] , 10) +PADDLE_ENFORCE_GT ( x_dim[i] , 10) +PADDLE_ENFORCE_GE ( x_dim[i] , 10) +PADDLE_ENFORCE_LT ( x_dim[i] , 10) +PADDLE_ENFORCE_LE ( x_dim[i] , 10) +``` +都需要区分compile time和run time + +**2. 运算** + +如以下代码: +```cpp +auto x_dim = ctx->GetInputDim("X"); +int i = xxx; +y_dim[0] = x_dim[i] + 10 +``` + +在compile time的时候,x_dim[i]可能等于-1,得到的 y_dim[0] 等于 9,是不符合逻辑的 + +如果用到了类似以下的运算操作 +```cpp +y_dim[i] = x_dim[i] + 10 +y_dim[i] = x_dim[i] - 10 +y_dim[i] = x_dim[i] * 10 +y_dim[i] = x_dim[i] / 10 +y_dim[i] = x_dim[i] + z_dim[i] +``` +都需要区分compile time和run time + +**处理的标准**: +- 检查: compile time的时候不判断维度等于-1的情况,但在runtime的时候检查 +- 运算: -1和其他数做任何运算都要等于-1 + +**参考代码** +1. 判断的实现方法可以参考[cross_entropy_op](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/cross_entropy_op.cc#L39),cross_entropy_op 要求X和labels的两个输入,除了最后一维以外,其他的维度完全一致 + +```cpp + bool contain_unknown_dim = framework::contain_unknown_dim(x_dims) || + framework::contain_unknown_dim(label_dims); + bool check = ctx->IsRuntime() || !contain_unknown_dim; + if (check) { + PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 0, rank - 1), + framework::slice_ddim(label_dims, 0, rank - 1), + "Input(X) and Input(Label) shall have the same shape " + "except the last dimension."); + } +``` + +2. 运算的实现可以参考[concat_op](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/concat_op.cc#L59),concat在InferShape判断时,调用`ComputeAndCheckShape`,除了进行concat轴之外,其他的维度完全一致;在生成output的维度时,把concat轴的维度求和,其他的维度和输入保持一致。 + +```cpp + const size_t n = inputs_dims.size(); + auto out_dims = inputs_dims[0]; + size_t in_zero_dims_size = out_dims.size(); + for (size_t i = 1; i < n; i++) { + for (size_t j = 0; j < in_zero_dims_size; j++) { + if (j == axis) { + if (is_runtime) { + out_dims[axis] += inputs_dims[i][j]; + } else { + if (inputs_dims[i][j] == -1) { + out_dims[axis] = -1; + } else { + out_dims[axis] += inputs_dims[i][j]; + } + } + } else { + bool check_shape = + is_runtime || (out_dims[j] > 0 && inputs_dims[i][j] > 0); + if (check_shape) { + // check all shape in run time + PADDLE_ENFORCE_EQ( + inputs_dims[0][j], inputs_dims[i][j], + "ShapeError: Dimension %d in inputs' shapes must be equal. " + "But recevied input[0]'s shape = " + "[%s], input[%d]'s shape = [%s].", + j, inputs_dims[0], i, inputs_dims[i]); + } + } + } + } +``` + + +### 定义OpKernel类 + +`MulKernel`继承自`framework::OpKernel`,带有下面两个模板参数: + +- `typename DeviceContext`: 表示设备类型。不同设备(CPU、CUDA)共享同一个Kernel时,需加该模板参数;不共享则不加,一个不共享的例子是[`SGDOpKernel`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/optimizers/sgd_op.h)。 + +- `typename T` : 表示数据类型,如`float`, `double`, `int16`等。 + +需要为`MulKernel`类重写`Compute`接口。 + +- `Compute`接受一个输入参数:`const framework::ExecutionContext& context`。 + +- 与`InferShapeContext`相比,`ExecutionContext`增加了设备类型,同样可获取到输入输出和属性参数。 + +- `Compute`函数里实现`OpKernel`的具体计算逻辑。 + +Op的输入和输出可分别通过`ExecutionContext::Input()`和`ExecutionContext::Output()`获得。 + +**注意:** 若op的输入/输出的变量类型是`LoDTensor`(fluid默认所有的`Tensor`默认都是`LoDTensor`类型),请写成`ExecutionContext::Input()`和`ExecutionContext::Output()`,不要写`ExecutionContext::Input()`和`ExecutionContext::Output()`。因为若实际的变量类型为`SelectedRows`,`Input()`和`Output()`方法会将`SelectedRows`类型特化为`Tensor`,导致潜在的错误。 + +下面是 `MulKernel` `Compute`的实现: + +```cpp +template +class MulKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + const Tensor* x = context.Input("X"); + const Tensor* y = context.Input("Y"); + Tensor* z = context.Output("Out"); + const Tensor x_matrix = + x->dims().size() > 2 + ? framework::ReshapeToMatrix( + *x, context.template Attr("x_num_col_dims")) + : *x; + const Tensor y_matrix = + y->dims().size() > 2 + ? framework::ReshapeToMatrix( + *y, context.template Attr("y_num_col_dims")) + : *y; + + z->mutable_data(context.GetPlace()); + auto z_dim = z->dims(); + if (z_dim.size() != 2) { + z->Resize({x_matrix.dims()[0], y_matrix.dims()[1]}); + } + + auto blas = math::GetBlas(context); + + blas.MatMul(x_matrix, y_matrix, z); + if (z_dim.size() != 2) { + z->Resize(z_dim); + } + } +}; +``` + +需要注意:**不同设备(CPU、CUDA)共享一个Op定义,是否则共享同一个`OpKernel`,取决于`Compute`调用的函数是否支持不同设备。** + +`MulOp`的CPU、CUDA实现共享同一个`Kernel`。`OpKernel`不共享的例子可以参考:[`SGDOpKernel`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/optimizers/sgd_op.h)。 + +为了使`OpKernel`的计算过程书写更加简单,并且CPU、CUDA的代码可以复用,我们通常借助 Eigen unsupported Tensor模块来实现`Compute`接口。关于在PaddlePaddle中如何使用Eigen库,请参考[使用文档](https://github.com/PaddlePaddle/FluidDoc/blob/release/1.2/doc/fluid/dev/use_eigen_cn.md)。 + +到此,前向Op实现完成。接下来,需要在`.cc`文件中注册该op和kernel。 +反向Op类的定义,反向OpKernel的定义与前向Op类似,这里不再赘述。 + +### 注册Operator + +- 在`.cc`文件中注册前向、反向Op类,注册CPU Kernel。 + + ```cpp + namespace ops = paddle::operators; + REGISTER_OPERATOR(mul, ops::MulOp, ops::MulOpMaker, ops::MulOpInferVarType, + ops::MulOpGradMaker, + ops::MulOpGradMaker); + + REGISTER_OPERATOR(mul_grad, ops::MulGradOp); + + REGISTER_OP_CPU_KERNEL(mul, + ops::MulKernel, + ops::MulKernel); + REGISTER_OP_CPU_KERNEL(mul_grad, + ops::MulGradKernel, + ops::MulGradKernel); + ``` + + 在上面的代码中,使用`REGISTER_OPERATOR`注册了`ops::MulOp`类,类型名为`mul`,该类的`ProtoMaker`为`ops::MulOpMaker`,其`GradOpMaker`分别是`ops::MulOpGradMaker`(声明式编程模式使用)和`ops::MulOpGradMaker`(命令式编程模式使用),并使用`REGISTER_OPERATOR`注册`ops::MulGradOp`,类型名为`mul_grad`。然后,使用`REGISTER_OP_CPU_KERNEL`注册了`ops::MulKernel`类,并特化模板参数为设备为`paddle::platform::CPUPlace`、数据类型为`float`类型和`double`类型;同理,注册`ops::MulGradKernel`类。 + + +- 在 `.cu`文件中注册CUDA Kernel。 + - 请注意,如果CUDA Kernel的实现基于Eigen unsupported模块,那么在 `.cu`的开始请加上宏定义 `#define EIGEN_USE_GPU`,代码示例如下: + + + ```cpp + // if use Eigen unsupported module before include head files + #define EIGEN_USE_GPU + + namespace ops = paddle::operators; + REGISTER_OP_CUDA_KERNEL(mul, + ops::MulKernel, + ops::MulKernel); + REGISTER_OP_CUDA_KERNEL(mul_grad, + ops::MulGradKernel, + ops::MulGradKernel); + ``` + +**注意:** + +在运行Op时,框架系统会根据输入数据所在的设备、输入数据的类型等信息自动的选择合适的OpKernel,比如输入的数据是在GPU上,并且为`float`类型,框架系统会选择由`REGISTER_OP_CUDA_KERNEL`注册的`ops::MulKernel`。如果用户希望指定运行时可被调用的OpKernel,用户需要覆盖`framework::OperatorWithKernel`中的`GetExpectedKernelType`函数,比如`MulOp`会根据属性`use_mkldnn`为`false`还是为`true`决定是否调用mkldnn库来完成计算。 + + +### 编译 + +在`build/paddle/fluid/operators`目录下,运行下面命令可以进行编译: + +``` +make mul_op +``` + +## 绑定Python + +系统会对新增的op自动绑定Python,并链接到生成的lib库中。 + +### 使用mul操作在Python端构建Layer + +在Python端,`mul`操作用于构建FC层,即: + +$$Out = Act({X*W + b})$$ + +具体实现方式可参考[FC层的实现代码](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/layers/nn.py#L205)。 + +## 实现单元测试 + +单测包括对比前向Op不同设备(CPU、CUDA)的实现、对比反向OP不同设备(CPU、CUDA)的实现、反向Op的梯度测试。下面介绍介绍[`MulOp`的单元测试](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/unittests/test_mul_op.py)。 + +**注意:** + +单测中的测试用例需要尽可能的覆盖Op中的所有分支。 + +### 前向Operator单测 + +Op单元测试继承自`OpTest`。各项具体的单元测试在`TestMulOp`里完成。测试Operator,需要: + +1. 在`setUp`函数定义输入、输出,以及相关的属性参数。 + + + > 注意:输入输出请以`ndarray`的类型配置输入/输出,如果需要配置一个带`LOD`的输入/输出,请以`tuple`的形式传入,`tuple`中应该有两个类型为`ndarray`的元素,第一个是实际的数据,第二个是`LOD` + + +2. 生成随机的输入数据。 +3. 在Python脚本中实现与前向operator相同的计算逻辑,得到输出值,与operator前向计算的输出进行对比。 +4. 反向计算已经自动集成进测试框架,直接调用相应接口即可。 + + + ```python + import unittest + import numpy as np + from op_test import OpTest + + + class TestMulOp(OpTest): + def setUp(self): + self.op_type = "mul" + self.inputs = { + 'X': np.random.random((32, 84)).astype("float32"), + 'Y': np.random.random((84, 100)).astype("float32") + } + self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5) + + def test_check_grad_ingore_x(self): + self.check_grad( + ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X")) + + def test_check_grad_ingore_y(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y')) + ``` + + 上面的代码首先导入依赖的包,下面是对`setUp`函数中操作的重要变量的详细解释: + + - `self.op_type = "mul" ` : 定义类型,与operator注册时注册的类型一致。 + - `self.inputs` : 定义输入,类型为`numpy.array`,并初始化。 + - `self.outputs` : 定义输出,并在Python脚本中完成与operator同样的计算逻辑,返回Python端的计算结果。 + +### 反向operator单测 + +而反向测试中: + +- `test_check_grad_normal`中调用`check_grad`使用数值法检测梯度正确性和稳定性。 + - 第一个参数`["X", "Y"]` : 指定对输入变量`X`、`Y`做梯度检测。 + - 第二个参数`"Out"` : 指定前向网络最终的输出目标变量`Out`。 + - 第三个参数`max_relative_error`:指定检测梯度时能容忍的最大错误值。 + +- `test_check_grad_ingore_x`和`test_check_grad_ingore_y`分支用来测试只需要计算一个输入梯度的情况。 + + +### 编译和执行 + +`python/paddle/fluid/tests/unittests/` 目录下新增的 `test_*.py` 单元测试会被自动加入工程进行编译。 + +请注意,**运行单元测试测时需要编译整个工程**,并且编译时需要打开`WITH_TESTING`, 即`cmake -DWITH_TESTING=ON ..`。编译成功后,执行下面的命令来运行单元测试: + +```bash +make test ARGS="-R test_mul_op -V" +``` + +或者: + +```bash +ctest -R test_mul_op +``` + +## 注意事项 + +- 注册Op时的类型名,需要和该Op的名字一样。即不允许在`A_op.cc`里面,注册`REGISTER_OPERATOR(B, ...)`等,这将会导致单元测试出错。 +- 如果Op没有实现CUDA Kernel,请不要创建空的`*_op.cu`,这将会导致单元测试出错。 +- 如果多个Op依赖一些共用的函数,可以创建非`*_op.*`格式的文件来存放,如`gather.h`文件。 + +### PADDLE_ENFORCE使用注意 + +实现Op时检查数据的合法性需要使用PADDLE_ENFORCE以及PADDLE_ENFORCE_EQ等宏定义,基本格式如下: + +``` +PADDLE_ENFORCE(表达式, 错误提示信息) +PADDLE_ENFORCE_EQ(比较对象A, 比较对象B, 错误提示信息) +``` + +如果表达式为真,或者比较对象A=B,则检查通过,否则会终止程序运行,向用户反馈相应的错误提示信息。 +为了确保提示友好易懂,开发者需要注意其使用方法。 + +#### 总体原则 + +任何使用了PADDLE_ENFORCE与PADDLE_ENFORCE_XX检查的地方,必须有详略得当的备注解释!**错误提示信息不能为空!** + +#### 提示信息书写标准 + +1. [required] 哪里错了?为什么错了? + + - 例如:`ValueError: Mismatched label shape` + +2. [optional] 期望的输入是什么样的?实际的输入是怎样的? + + - 例如:`Expected labels dimension=1. Received 4.` + +3. [optional] 能否给出修改意见? + + - 例如:`Suggested Fix:If your classifier expects one-hot encoding label,check your n_classes argument to the estimatorand/or the shape of your label.Otherwise, check the shape of your label.` + +如果并非必要或者简洁的描述即可表达清楚以上要点,根据情况书写亦可。 + +#### FAQ 典型问题 + +1. 无报错信息或报错信息过于简单,不能给用户提供有效的提示! + + 问题示例1 :未写提示信息 + ``` + PADDLE_ENFORCE(ctx->HasInput("X"), ""); + ``` + 问题示例2 :提示信息过于简单 + ``` + PADDLE_ENFORCE(i != nullptr, "i must be set"); // i是什么? + ``` + +2. 在报错信息中使用开发人员定义的变量缩写,不易理解! + + 问题示例: + ``` + PADDLE_ENFORCE(forward_pd != nullptr, + "Fail to find eltwise_fwd_pd in device context"); //eltwise_fwd_pd用户可能看不懂 + ``` + +3. OP内部调用非法接口:Op内部如果出现Output = ShareDataWith(Input) + 问题示例: + ```cpp + auto *out = ctx.Output("Out"); + auto *in = ctx.Input("X"); + out->ShareDataWith(*in); + ``` + Op内部如果出现Output = ShareDataWith(Input),相当于operator图的中有一条隐藏边,连接了Input和Output,这条边无法在图分析中表达,引发基于图优化的错误。 + +4. OP实现的性能实践 + 调用了eigen的broadcast, chop等操作,性能会比手写cuda kernel差几倍以上。此时cpu的实现可以复用eigen,gpu实现可以实现cuda kernel. + + +#### OP InferShape检查提示信息特别说明 + +- 检查输入输出变量,请统一遵循以下格式 +`Input(变量名) of OP名 operator should not be null.` + + 正确示例: + ``` + PADDLE_ENFORCE(ctx->HasInput("Input"), + "Input(Input) of LSTMP operator should not be null."); + ``` + +- 反向Op的输入输出检查,要写明反向Op的名字 + + 正确示例: + ``` + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of LoDResetGrad opreator should not be null."); + ``` diff --git a/doc/paddle/advanced_guide/addon_development/new_op/new_op_en.md b/doc/paddle/advanced_guide/addon_development/new_op/new_op_en.md new file mode 100644 index 0000000000000000000000000000000000000000..eab7b50c7447e20c972b62c4cc9b307fb81e9114 --- /dev/null +++ b/doc/paddle/advanced_guide/addon_development/new_op/new_op_en.md @@ -0,0 +1,478 @@ +# How to write a new operator + + +## Background + +Here are the base types needed. For details, please refer to the design docs. + +- `class OpProtoAndCheckerMaker`: Describes an Operator's input, output, attributes and description, mainly used to interface with Python API. +- `framework::OperatorBase`: Operator (Op)base class. +- `framework::OpKernel`: Base class for Op computation kernel. +- `framework::OperatorWithKernel`: Inherited from OperatorBase, describing an operator with computation kernels. + + +Operators can be categorized into two groups: operator with kernel(s) and operator without kernel(s). An operator with kernel(s) inherits from `OperatorWithKernel` while the one without kernel(s) inherits from `OperatorBase`. This tutorial focuses on implementing operators with kernels. In short, an operator includes the following information: + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Information Where is it defined
OpProtoMake definition `.cc`files, Backward Op does not need an OpProtoMake interface.
Op definition `.cc` files
Kernel implementation The kernel methods shared between CPU and CUDA are defined in `.h` files. CPU-specific kernels live in `.cc` files, while CUDA-specific kernels are implemented in `.cu`files.
Registering the Op Ops are registered in `.cc` files; For Kernel registration, `.cc` files contain the CPU implementation, while `.cu` files contain the CUDA implementation.
+ + +New Operator implementations are added to the list [paddle/operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/fluid/operators), with file names in the format `*_op.h` (if applicable), `*_op.cc`, `*_op.cu` (if applicable).** The system will use the naming scheme to automatically build operators and their corresponding Python extensions.** + + +Let's take matrix multiplication operator, [MulOp](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/mul_op.cc), as an example to introduce the writing of an Operator with Kernel. + + +## Implementing C++ Types + + +### Defining ProtoMaker + +Matrix Multiplication can be written as $Out = X * Y$, meaning that the operation consists of two inputs and one output. + +First, define `ProtoMaker` to describe the Operator's input, output, and additional comments: + +```cpp +class MulOpMaker : public framework::OpProtoAndCheckerMaker { + public: + MulOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "(Tensor), 2D tensor of size (M x K)"); + AddInput("Y", "(Tensor), 2D tensor of size (K x N)"); + AddOutput("Out", "(Tensor), 2D tensor of size (M x N)"); + AddComment(R"DOC( +Two Element Mul Operator. +The equation is: Out = X * Y +)DOC"); + } +}; +``` + +[`MulOpMaker`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/mul_op.cc#L76-L127)is inherited from`framework::OpProtoAndCheckerMaker`, consisting of 2 variables in the constructor: + + - `framework::OpProto` stores Operator input and variable attribute, used for generating Python API interfaces. + - `framework::OpAttrChecker` is used to validate variable attributes. + +The constructor utilizes `AddInput` to add input parameter, `AddOutput` to add output parameter, and `AddComment` to add comments for the Op, so that the corresponding information will be added to `OpProto`. + +The code above adds two inputs `X` and `Y` to `MulOp`, an output `Out`, and their corresponding descriptions. Names are given in accordance to Paddle's [naming convention](https://github.com/PaddlePaddle/FluidDoc/blob/release/1.2/doc/fluid/dev/name_convention.md). + + +An additional example [`ScaleOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/scale_op.cc#L38-L55) is implemented as follows: + + +```cpp +template +class ScaleOpMaker : public framework::OpProtoAndCheckerMaker { + public: + ScaleOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "(Tensor) Input tensor of scale operator."); + AddOutput("Out", "(Tensor) Output tensor of scale operator."); + AddComment(R"DOC( +Scale operator +$$Out = scale*X$$ +)DOC"); + AddAttr("scale", + "(float, default 1.0)" + "The scaling factor of the scale operator.") + .SetDefault(1.0); + } +}; +``` + +Note `AddAttr("scale", "...").SetDefault(1.0);` adds `scale`constant as an attribute, and sets the default value to 1.0. + + +### Defining the GradProtoMaker class + +Each Op must have a corresponding GradProtoMaker. If GradProtoMaker corresponding to the forward Op is not customized, Fluid provides DefaultGradProtoMaker. The default registration will use all input and output, including Input, Output, Output@Grad and so on. Using unnecessary variables will cause waste of memory. +The following example defines ScaleOp's GradProtoMaker. + +```cpp +class ScaleGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + std::unique_ptr Apply() const override { + auto *grad_op = new framework::OpDesc(); + grad_op->SetType("scale"); + grad_op->SetInput("X", OutputGrad("Out")); + grad_op->SetOutput("Out", InputGrad("X")); + grad_op->SetAttr("scale", GetAttr("scale")); + return std::unique_ptr(grad_op); + } +}; +``` + +### Defining Operator + +The following code defines the interface for MulOp: + +```cpp +class MulOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + //never use Input or Output if you want a to get a LoDTensor. + auto dim0 = ctx.Input("X")->dims(); + auto dim1 = ctx.Input("Y")->dims(); + PADDLE_ENFORCE_EQ(dim0.size(), 2, + "input X(%s) should be a tensor with 2 dims, a matrix", + ctx.op_.Input("X")); + PADDLE_ENFORCE_EQ(dim1.size(), 2, + "input Y(%s) should be a tensor with 2 dims, a matrix", + ctx.op_.Input("Y")); + PADDLE_ENFORCE_EQ( + dim0[1], dim1[0], + "First matrix's width must be equal with second matrix's height."); + ctx.Output("Out")->Resize({dim0[0], dim1[1]}); + } +}; +``` + +[`MulOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/mul_op.cc#L24) is inherited from `OperatorWithKernel`. Its `public` member + +```cpp +using framework::OperatorWithKernel::OperatorWithKernel; +``` + +expresses an operator constructor using base class `OperatorWithKernel`, alternatively written as + +```cpp +MulOp(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorWithKernel(type, inputs, outputs, attrs) {} +``` + +`InferShape` interface needs to be re-written.`InferShape` is a const method and cannot modify Op's member variables. Its constant member `const framework::InferShapeContext &ctx` can be used to extract input, output, and attributes. Its functions are + + - 1). validate and error out early: it checks input data dimensions and types. + - 2). configures the tensor shape in the output. + +Usually `OpProtoMaker` and `Op` definitions are written in `.cc` files, which also include the registration methods introduced later. + + +### Defining OpKernel + +`MulKernel` is derived from `framework::OpKernel`, which includes the following templates: + +- `typename DeviceContext` denotes device context type. When different devices, namely the CPU and the CUDA, share the same kernel, this template needs to be added. If they don't share kernels, this must not be added. An example of a non-sharing kernel is [`OnehotCrossEntropyOpKernel`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/cross_entropy_op.h#L43). + +- `typename T` denotes data type, such as `float` or `double`. + +`MulKernel` types need to rewrite the interface for `Compute`. + +- `Compute` takes one input parameter: `const framework::ExecutionContext& context`. +- Compared with `InferShapeContext`, `ExecutionContext` includes device types, and can similarly extract input, output, and attribute variables. +- `Compute` function implements the computation logics of an `OpKernel`. + +The input and output of Op can be obtained by `ExecutionContext::Input()` and `ExecutionContext::Output()` respectively. + +**Note:** If the input/output variable type of op is `LoDTensor` (In Fluid, all Tensors are LoDTensor type by default), please write `ExecutionContext::Input()` and `ExecutionContext:: Output()`, do not write `ExecutionContext::Input()` and `ExecutionContext::Output()`. Because if the actual variable type is `SelectedRows`, the `Input()` and `Output()` methods will specialize the `SelectedRows` type to `Tensor`, causing a potential error. + + +`MulKernel`'s implementation of `Compute` is as follows: + +```cpp +template +class MulKernel : public framework::OpKernel { +public: +void Compute(const framework::ExecutionContext& context) const override { + auto* X = context.Input("X"); + auto* Y = context.Input("Y"); + auto* Z = context.Output("Out"); + Z->mutable_data(context.GetPlace()); + auto& device_context = context.template device_context(); + math::matmul(*X, false, *Y, false, 1, Z, 0, device_context); +} +}; +``` + +Note that **different devices (CPU, CUDA)share one Op definition; whether or not they share the same `OpKernel` depends on whether functions called by `Compute`can support both devices.** + +`MulOp`'s CPU and CUDA share the same `Kernel`. A non-sharing `OpKernel` example can be seen in [`OnehotCrossEntropyOpKernel`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/cross_entropy_op.cc). + +To ease the writing of `OpKernel` compute, and for reusing code cross-device, [`Eigen-unsupported Tensor`](https://bitbucket.org/eigen/eigen/src/default/unsupported/Eigen/CXX11/src/Tensor/README.md?fileviewer=file-view-default) module is used to implement `Compute` interface. To learn about how the Eigen library is used in PaddlePaddle, please see [usage document](https://github.com/PaddlePaddle/FluidDoc/blob/release/1.2/doc/fluid/dev/use_eigen_cn.md). + + +This concludes the forward implementation of an operator. Next its operation and kernel need to be registered in a `.cc` file. + +The definition of its corresponding backward operator, if applicable, is similar to that of an forward operator. **Note that a backward operator does not include a `ProtoMaker`**. + + + +### Registering Operator and OpKernel + +- In `.cc` files, register forward and backward operator classes and the CPU kernel. + + ```cpp + namespace ops = paddle::operators; + REGISTER_OPERATOR(mul, ops::MulOp, ops::MulOpMaker, + paddle::framework::DefaultGradOpDescMaker) + REGISTER_OPERATOR(mul_grad, ops::MulGradOp) + REGISTER_OP_CPU_KERNEL(mul, ops::MulKernel); + REGISTER_OP_CPU_KERNEL(mul_grad, + ops::MulGradKernel); + ``` + + In that code block, + + - `REGISTER_OPERATOR` registers the `ops::MulOp` class, with the type named `mul`. Its `ProtoMaker` is `ops::MulOpMaker`. Register `ops::MulOpGrad` as type named `mul_grad`. + - `REGISTER_OP_CPU_KERNEL` registers `ops::MulKernel` class and specializes template parameters as type `paddle::platform::CPUPlace` and `float`, and also registers `ops::MulGradKernel`. + + +- Registering CUDA Kernel in `.cu` files + - Note that if CUDA Kernel is implemented using the `Eigen unsupported` module, then on top of `.cu`, a macro definition `#define EIGEN_USE_GPU` is needed, such as + + ```cpp + // if use Eigen unsupported module before include head files + #define EIGEN_USE_GPU + + namespace ops = paddle::operators; + REGISTER_OP_CUDA_KERNEL(mul, ops::MulKernel); + REGISTER_OP_CUDA_KERNEL(mul_grad, + ops::MulGradKernel); + + ``` + + +### Compilation + +In folder `build/paddle/fluid/operators`, run the following commands to compile. + +``` +make mul_op +``` + + +## Python Binding + +The system will automatically bind the new op to Python and link it to a generated library. + + +## Unit Tests + +Unit tests for an operator include + +1. comparing a forward operator's implementations on different devices (CPU, CUDA) + +2. comparing a backward operator's implementation on different devices (CPU, CUDA) + +3. a gradient test for the backward operator. + +Here, we introduce the [unit tests for `MulOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/unittests/test_mul_op.py). + + + +### Unit Test for Forward Operators + +The Op unit test is inherited from `OpTest`. More specific unit tests are done in `TestMulOp`. To test the Operator, you need to: + +1. Define input, output, and related property parameters in the `setUp` function. +2. Generate random input data. +3. Implement the same calculation logic as the forward operator in the Python script to get the output, which is to be compared with the output of the forward operator calculation. +4. The backward calculation has been automatically integrated into the test framework and the corresponding interface can be called directly. + + ```python + import unittest + import numpy as np + from op_test import OpTest + + + class TestMulOp(OpTest): + def setUp(self): + self.op_type = "mul" + self.inputs = { + 'X': np.random.random((32, 84)).astype("float32"), + 'Y': np.random.random((84, 100)).astype("float32") + } + self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5) + + def test_check_grad_ingore_x(self): + self.check_grad( + ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X")) + + def test_check_grad_ingore_y(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y')) + ``` + + +The code above first loads required packages. In addition, we have + +- `self.op_type = "mul" ` defines the type that is identical to what the operator's registered type. +- `self.inputs` defines input, with type `numpy.array` and initializes it. +- `self.outputs` defines output and completes the same operator computation in the Python script, and returns its result from the Python script. + + +### Unit Test for Backward Operators + +In the backward operator test: + +- `check_grad` is called in `test_check_grad_normal` to use numerical methods to detect gradient correctness and stability. +- The first parameter `["X", "Y"]` : specifies gradient check for the input variables `X`, `Y`. +- The second parameter `"Out"` : specifies the final output target variable `Out` of the forward network. +- The third parameter `max_relative_error`: specifies the maximum error value that can be tolerated when checking gradients. +- The `test_check_grad_ingore_x` and `test_check_grad_ingore_y` branches are used to test cases where only one input gradient needs to be calculated. + + + +### Compiling and Running + + +Any new unit testing file of the format `test_*.py` added to the directory `python/paddle/fluid/tests/unittests/` is automatically added to the project to compile. + +Note that **running unit tests requires compiling the entire project** and requires compiling with flag `WITH_TESTING` on i.e. `cmake paddle_dir -DWITH_TESTING=ON`. + +After successfully compiling the project, run the following command to run unit tests: + +```bash +make test ARGS="-R test_mul_op -V" +``` + +Or, + +```bash +ctest -R test_mul_op +``` + + + +## Remarks + +- The type with which an operator is registered needs to be identical to the Op's name. Registering `REGISTER_OPERATOR(B, ...)` in `A_op.cc` will cause unit testing failures. +- If the operator does not implement a CUDA kernel, please refrain from creating an empty `*_op.cu` file, or else unit tests will fail. +- If multiple operators rely on some shared methods, a file NOT named `*_op.*` can be created to store them, such as `gather.h`. + + + + + +### PADDLE_ENFORCE Usage Note + +To check the validity of data when implementing Op, you need to use macro definitions such as PADDLE_ENFORCE and PADDLE_ENFORCE_EQ. The basic format is as follows: + +``` +PADDLE_ENFORCE (expression, error message) +PADDLE_ENFORCE_EQ (comparison object A, comparison object B, error message) +``` + +If the expression is true, or the comparison object A=B, the check will be passed, otherwise the program will be terminated and the corresponding error message will be fed back to the user. +In order to ensure that the feedbacks are user-friendly and easy to understand, developers need to pay attention to how to use them. + + + +#### General Principles + +Any place where PADDLE_ENFORCE and PADDLE_ENFORCE_EQ are used must have a properly detailed explanation of the comments! **Error message** can't be empty! + + + +#### Error Message Standard + +1. [required] Where does it go wrong? Why is it wrong? + + - For example: `ValueError: Mismatched label shape` + +2. [optional] What is the expected input? What is the actual input? + + - For example: `Expected labels dimension=1. Received 4.` + +3. [optional] Can you come up with a suggestion? + + - For example: `Suggested Fix: If your classifier expects one-hot encoding label, check your n_classes argument to the estimatorand/or the shape of your label.Otherwise, check the shape of your label.` + +If it is not necessary or concise description is enough to clearly express the above points, just write based on actual needs. + + + +#### Typical Problems + + +1.No error message exists or error message is too short to provide effective notification to the user. + + Problem example 1: Absent message + ``` + PADDLE_ENFORCE(ctx->HasInput("X"), ""); + ``` + Problem example 2: The prompt message is too short + ``` + PADDLE_ENFORCE(i != nullptr, "i must be set"); // What is i? + ``` + +2.Using developer-defined variable abbreviations in error messages is not easy to understand. + + Example of the problem: + ``` + PADDLE_ENFORCE(forward_pd != nullptr, + "Fail to find eltwise_fwd_pd in device context"); //eltwise_fwd_pduser may not be understood + ``` + +3.The OP internally calls the illegal interface: If Op appears inside Output = ShareDataWith(Input) + Example of the problem: + ```cpp + auto *out = ctx.Output("Out"); + auto *in = ctx.Input("X"); + out->ShareDataWith(*in); + ``` + + If there is Output = ShareDataWith(Input) inside Op, it will equivalently indicate a hidden edge in the operator graph, which connects Input and Output. This edge cannot be expressed in graph analysis, causing error based on graph optimization. + +4.Performance of OP implementation. It called eigen's broadcast, chop and other operations, the performance will be over several times worse than the handwritten cuda kernel. At this point, the implementation of cpu can reuse eigen, and the gpu implementation can implement cuda kernel. + + + + +#### Special Instructions for OP InferShape Check Message + +- Check input and output variables, please follow the following format +`Input(variable name) of OP name operator should not be null.` + + The correct example: + ``` + PADDLE_ENFORCE(ctx->HasInput("Input"), + "Input(Input) of LSTMP operator should not be null."); + ``` + +- Backward Op input and output check, to write the name of the backward Op + + The correct example: + ``` + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of LoDResetGrad opreator should not be null."); + ``` diff --git a/doc/paddle/advanced_guide/addon_development/new_op/new_python_op.md b/doc/paddle/advanced_guide/addon_development/new_op/new_python_op.md new file mode 100644 index 0000000000000000000000000000000000000000..06dfa94e899d7fa8e6bfb68a02f5a1f2be7281ce --- /dev/null +++ b/doc/paddle/advanced_guide/addon_development/new_op/new_python_op.md @@ -0,0 +1,140 @@ +# 如何写新的Python OP + +PaddlePaddle Fluid通过 `py_func` 接口支持在Python端自定义OP。 py_func的设计原理在于Paddle中的LodTensor可以与numpy数组可以方便的互相转换,从而可以使用Python中的numpy API来自定义一个Python OP。 + + +## py_func接口概述 + +`py_func` 具体接口为: + +```Python +def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None): + pass +``` + +其中, + +- `x` 是Python Op的输入变量,可以是单个 `Variable` | `tuple[Variable]` | `list[Variable]` 。多个Variable以tuple[Variable]或list[Variale]的形式传入,其中Variable为LoDTensor或Tenosr。 +- `out` 是Python Op的输出变量,可以是单个 `Variable` | `tuple[Variable]` | `list[Variable]` 。其中Variable既可以为LoDTensor或Tensor,也可以为numpy数组。 +- `func` 是Python Op的前向函数。在运行网络前向时,框架会调用 `out = func(*x)` ,根据前向输入 `x` 和前向函数 `func` 计算前向输出 `out`。在 ``func`` 建议先主动将LoDTensor转换为numpy数组,方便灵活的使用numpy相关的操作,如果未转换成numpy,则可能某些操作无法兼容。 +- `backward_func` 是Python Op的反向函数。若 `backward_func` 为 `None` ,则该Python Op没有反向计算逻辑; + 若 `backward_func` 不为 `None`,则框架会在运行网路反向时调用 `backward_func` 计算前向输入 `x` 的梯度。 +- `skip_vars_in_backward_input` 为反向函数 `backward_func` 中不需要的输入,可以是单个 `Variable` | `tuple[Variable]` | `list[Variable]` 。 + + +## 如何使用py_func编写Python Op + +以下以tanh为例,介绍如何利用 `py_func` 编写Python Op。 + +- 第一步:定义前向函数和反向函数 + +前向函数和反向函数均由Python编写,可以方便地使用Python与numpy中的相关API来实现一个自定义的OP。 + +若前向函数的输入为 `x_1`, `x_2`, ..., `x_n` ,输出为`y_1`, `y_2`, ..., `y_m`,则前向函数的定义格式为: +```Python +def foward_func(x_1, x_2, ..., x_n): + ... + return y_1, y_2, ..., y_m +``` + +默认情况下,反向函数的输入参数顺序为:所有前向输入变量 + 所有前向输出变量 + 所有前向输出变量的梯度,因此对应的反向函数的定义格式为: +```Python +def backward_func(x_1, x_2, ..., x_n, y_1, y_2, ..., y_m, dy_1, dy_2, ..., dy_m): + ... + return dx_1, dx_2, ..., dx_n +``` + +若反向函数不需要某些前向输入变量或前向输出变量,可设置 `skip_vars_in_backward_input` 进行排除(步骤三中会叙述具体的排除方法)。 + +注:,x_1, ..., x_n为输入的多个LodTensor,请以tuple(Variable)或list[Variable]的形式在py_func中传入。建议先主动将LodTensor通过numpy.array转换为数组,否则Python与numpy中的某些操作可能无法兼容使用在LodTensor上。 + +此处我们利用numpy的相关API完成tanh的前向函数和反向函数编写。下面给出多个前向与反向函数定义的示例: + +```Python +import numpy as np + +# 前向函数1:模拟tanh激活函数 +def tanh(x): + # 可以直接将LodTensor作为np.tanh的输入参数 + return np.tanh(x) + +# 前向函数2:将两个2-D LodTenosr相加,输入多个LodTensor以list[Variable]或tuple(Variable)形式 +def element_wise_add(x, y): + # 必须先手动将LodTensor转换为numpy数组,否则无法支持numpy的shape操作 + x = np.array(x) + y = np.array(y) + + if x.shape != y.shape: + raise AssertionError("the shape of inputs must be the same!") + + result = np.zeros(x.shape, dtype='int32') + for i in range(len(x)): + for j in range(len(x[0])): + result[i][j] = x[i][j] + y[i][j] + + return result + +# 前向函数3:可用于调试正在运行的网络(打印值) +def debug_func(x): + # 可以直接将LodTensor作为print的输入参数 + print(x) + +# 前向函数1对应的反向函数,默认的输入顺序为:x、out、out的梯度 +def tanh_grad(x, y, dy): + # 必须先手动将LodTensor转换为numpy数组,否则"+/-"等操作无法使用 + return np.array(dy) * (1 - np.square(np.array(y))) +``` + +注意,前向函数和反向函数的输入均是 `LoDTensor` 类型,输出可以是Numpy Array或 `LoDTensor`。 +由于 `LoDTensor` 实现了Python的buffer protocol协议,因此即可通过 `numpy.array` 直接将 `LoDTensor` 转换为numpy Array来进行操作,也可直接将 `LoDTensor` 作为numpy函数的输入参数。但建议先主动转换为numpy Array,则可以任意的使用python与numpy中的所有操作(例如"numpy array的+/-/shape")。 + +tanh的反向函数不需要前向输入x,因此我们可定义一个不需要前向输入x的反向函数,并在后续通过 `skip_vars_in_backward_input` 进行排除 : + +```Python +def tanh_grad_without_x(y, dy): + return np.array(dy) * (1 - np.square(np.array(y))) +``` + +- 第二步:创建前向输出变量 + +我们需调用 `Program.current_block().create_var` 创建前向输出变量。在创建前向输出变量时,必须指明变量的名称name、数据类型dtype和维度shape。 + +```Python +import paddle.fluid as fluid + +def create_tmp_var(program, name, dtype, shape): + return program.current_block().create_var(name=name, dtype=dtype, shape=shape) + +in_var = fluid.layers.data(name='input', dtype='float32', shape=[-1, 28, 28]) + +# 手动创建前向输出变量 +out_var = create_tmp_var(fluid.default_main_program(), name='output', dtype='float32', shape=[-1, 28, 28]) +``` + +- 第三步:调用 `py_func` 组建网络 + +`py_func` 的调用方式为: + +```Python +fluid.layers.py_func(func=tanh, x=in_var, out=out_var, backward_func=tanh_grad) +``` + +若我们不希望在反向函数输入参数中出现前向输入,则可使用 `skip_vars_in_backward_input` 进行排查,简化反向函数的参数列表。 + +```Python +fluid.layers.py_func(func=tanh, x=in_var, out=out_var, backward_func=tanh_grad_without_x, + skip_vars_in_backward_input=in_var) +``` + +至此,使用 `py_func` 编写Python Op的步骤结束。我们可以与使用其他Op一样进行网路训练/预测。 + + +## 注意事项 + +- `py_func` 的前向函数和反向函数内部不应调用 `fluid.layers.xxx` ,因为前向函数和反向函数是在网络运行时调用的,且输入参数均为C++端的 `LoDTensor` ; + 而 `fluid.layers.xxx` 是在组建网络的阶段调用的,且输入参数为Python端的 `Variable` 。 + +- `skip_vars_in_backward_input` 只能跳过前向输入变量和前向输出变量,不能跳过前向输出的梯度。 + +- 若某个前向输出变量没有梯度,则 `backward_func` 将接收到 `None` 的输入。若某个前向输入变量没有梯度,则我们应在 `backward_func` 中主动返回 + `None`。 diff --git a/doc/paddle/advanced_guide/addon_development/new_op/op_inheritance_relation_diagram.png b/doc/paddle/advanced_guide/addon_development/new_op/op_inheritance_relation_diagram.png new file mode 100644 index 0000000000000000000000000000000000000000..ab8926b486986be7c1981d50096827ae24978a00 Binary files /dev/null and b/doc/paddle/advanced_guide/addon_development/new_op/op_inheritance_relation_diagram.png differ diff --git a/doc/paddle/advanced_guide/addon_development/new_op/op_notes.md b/doc/paddle/advanced_guide/addon_development/new_op/op_notes.md new file mode 100644 index 0000000000000000000000000000000000000000..ddae81c39873944f11f3ee227ebd51e785943df6 --- /dev/null +++ b/doc/paddle/advanced_guide/addon_development/new_op/op_notes.md @@ -0,0 +1,364 @@ +# C++ OP相关注意事项 + +## Fluid中Op的构建逻辑 +### 1.Fluid中Op的构建逻辑 +Fluid中所有的Op都继承自`OperatorBase`,且所有的Op都是无状态的,每个Op包含的成员变量只有四个:type、inputs、outputs、attribute。 + +Op的核心方法是Run,Run方法需要两方面的资源:数据资源和计算资源,这两个资源分别通过`Scope`和`Place`获取。框架内部有一个全局的`DeviceContextPool`,用来记录`Place`和`DeviceContext`之间的对应的关系,即每个`Place`有且仅有一个`DeviceContext`与之对应,`DeviceContext`中存放了当前设备的计算资源。比如对于GPU,这些资源包括`cudnn_handle`、`cublas_handle`、`stream`等,**Op内部所有的计算(数据拷贝和CUDA Kernel等)都必须在`DeviceContext`中进行**。 + +Fluid框架的设计理念是可以在多种设备及第三方库上运行,有些Op的实现可能会因为设备或者第三方库的不同而不同。为此,Fluid引入了OpKernel的方式,即一个Op可以有多个OpKernel,这类Op继承自`OperatorWithKernel`,这类Op的代表是conv_op,conv_op的OpKernel有:`GemmConvKernel`、`CUDNNConvOpKernel`、`ConvMKLDNNOpKernel`,且每个OpKernel都有double和float两种数据类型。不需要OpKernel的代表有`WhileOp`等。 + +Operator继承关系图: +![op_inheritance_relation_diagram](./op_inheritance_relation_diagram.png) + +进一步了解可参考:[multi_devices](https://github.com/PaddlePaddle/FluidDoc/tree/develop/doc/fluid/design/multi_devices),[scope](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/concepts/scope.md),[Developer's_Guide_to_Paddle_Fluid](https://github.com/PaddlePaddle/FluidDoc/blob/release/1.2/doc/fluid/getstarted/Developer's_Guide_to_Paddle_Fluid.md) + +### 2.Op的注册逻辑 +每个Operator的注册项包括: + ```C++ + OpCreator creator_; + GradOpMakerFN grad_op_maker_; + proto::OpProto* proto_{nullptr}; + OpAttrChecker* checker_{nullptr}; + InferVarTypeFN infer_var_type_; + InferShapeFN infer_shape_; + ``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
注册项类型说明调用
proto::OpProto Class 存放Op的输入/输出/属性/Op类型 编译时调用
GradOpMakerFN Functor 返回当前Op对应的反向Op的一组OpDesc,因为正向Op的反向可能有多个Op构成 编译时调用
OpAttrChecker Class 对Op的attr进行check 编译时调用
InferVarTypeFN Functor 用于推断输出Var的Type,比如是LoDTensor还是SelectedRows,或者其他 编译时调用
InferShapeFN Functor 用于推断Output的Shape 分为编译时和运行时,编译时是在Python端调用;如果Op继承自OperatorWithKernel,运行时是在op.run中调用
OpCreator Functor 每次调用都会创建一个新的OperatorBase 运行时调用
+ +通常Op注释时需要调用REGISTER_OPERATOR,即: + ``` + REGISTER_OPERATOR(op_type, + OperatorBase + op_maker_and_checker_maker, + op_grad_opmaker, + op_infer_var_shape, + op_infer_var_type) + ``` + +**注意:** + +1. 对于所有Op,前三个参数是必须的,op_type指明op的名字,OperatorBase是该Op的对象,op_maker_and_checker_maker是op的maker以及Op中attr的checker。 +2. 如果该Op有反向,则必须要有op_grad_opmaker,因为在backward会根据正向的Op中获取反向Op的Maker。 +3. 框架提供了一个默认的op_grad_opmaker:`DefaultGradOpDescMaker`,这个Maker会将前向Op的输入和输出都作为反向Op的输入,将前向Op的输入的梯度作为反向Op的输出,并将前向Op的属性拷贝过来。**注意:DefaultGradOpDescMaker会将前向Op的所有输入输出都做反向Op的输入,即使这个输入是没有必要的,这将会导致无法对没有用到的变量做内存优化**。 +4. 框架没有提供默认的op_infer_var_shape方法。如果该Op是无OpKernel的,通常需要用户添加对应的op_infer_var_shape方法;如果该Op是有OpKernel的,需要实现`OperatorWithKernel`中的`InferShape`方法,此时不需要提供op_infer_var_shape方法。具体实现可参考[while_op.cc](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/controlflow/while_op.cc),[conv_op.cc](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/conv_op.cc)。 +5. 框架没有提供默认的op_infer_var_type方法,用户需要根据实际情况添加op_infer_var_type。严格来说每个Op都应该注册一个InferVarType,op_infer_var_type根据输入的Var的type和dtype推断输出Var的type和dtype。**注意:在Python端的LayerHelper中create_variable_for_type_inference操作返回的Variable里面是LoDTensor,C++端的InferVarType可以修改`Variable`的type和dtype**。 + + + +更多内容请参考: [如何写新的Op](new_op.html) + +## 写Op注意事项 +### 1.Op可以支持输入输出类型 +Fluid的Op的输入输出都是`Variable`,从设计上讲,`Variable`中可以存放任意类型,Op的输入输出`Variable`可能是是任意类型,通常情况下`Variable`中存放的是`LoDTensor`、`SelectedRows`。 + +**注意:** + +- 代码中经常出现`context.Input("Input")`,并不表示"Input"的`Variable`是`Tensor`,而是从"Input"的`Variable`的`LoDTensor`中获取`Tensor`。如果"Input"的`Variable`是`SelectedRows`,则会报错。 +- 如果”Input”是`SelectedRows`,`context->GetInputDim("Input")`返回的是`var->Get().GetCompleteDims()`,而不是`SelectedRows`中`Tensor`的Dim。 + +### 2.在Op内部不能对输入的数据做任何的改写 +在Op内部绝不允许对输入数据做任何改写,因为可能存在其他Op需要读这个数据。 + +### 3.OpKernel需要注册的数据类型 +目前要求所有OpKernel都要注册double和float数据类型。 + +### 4.GetExpectedKernelType方法重写 +GetExpectedKernelType方法是OperatorWithKernel类中用于获取指定设备(例如CPU,GPU)上指定数据类型(例如double,float)的OpKernel的方法。该方法通过获取输入变量内部的Tensor数据类型得知需要的Kernel数据类型,但是由于Tensor在此处可能尚未被初始化,所以在该方法内使用输入变量时需要进行必要的初始化检查。在新增含Kernel的Op的时候,关于该方法的重写需要注意以下两点。 + +#### 4.1 仅在必要时重写此方法 + +基类OperatorWithKernel中的GetExpectedKernelType方法对于派生类Op的所有输入变量进行了完备的初始化检查,建议在新增的Op中直接使用基类的此方法,例如: + +- [MeanOp](https://github.com/PaddlePaddle/Paddle/blob/3556514e971bdbb98fdf0f556371c527f4dfa98c/paddle/fluid/operators/mean_op.cc#L39):该Op的所有输入变量在Run之前应该全部被初始化,初始化检查是必要且合理的 + +但是在一些情况下,直接使用基类的GetExpectedKernelType方法无法满足需求,则需要对该方法进行重写,具体情况及示例如下: + +1. OP的输入有多个,且数据类型不同,例如 [AccuracyOp](https://github.com/PaddlePaddle/Paddle/blob/370f0345b6d35a513c8e64d519a0edfc96b9276c/paddle/fluid/operators/metrics/accuracy_op.cc#L80),需要重写GetExpectedKernelType方法,指定用某一输入变量获取kernel类型 + +2. Op包含Dispensable的输入变量,该类输入变量是可选的,当用户未输入时,该类变量未被初始化属于合理情况,例如 [ConvOp](https://github.com/PaddlePaddle/Paddle/blob/250e72d254ccbe3521c29aa2801a1cb15b75ea73/paddle/fluid/operators/conv_op.cc#L206),存在Bias等可选的输入变量,需要重写GetExpectedKernelType方法,指定用必须提供的输入变量获取kernel类型 + +3. Op的部分输入变量即使未被初始化也属于合理情况,例如 [ConcatOp](https://github.com/PaddlePaddle/Paddle/blob/250e72d254ccbe3521c29aa2801a1cb15b75ea73/paddle/fluid/operators/concat_op.cc#L90),输入变量X中有个Tensor需要连接,其中可能包含未被初始化的Tensor,需要重写GetExpectedKernelType方法,使用输入变量X获取kernel的过程中,合理忽略掉部分Tensor为空的情况 + +4. OP的Kernel类型与输入变量无关(可能由其他参数指定),例如 [FillOp](https://github.com/PaddlePaddle/Paddle/blob/efbdad059634bef022d4a3f5b00aef6ef8e88ed6/paddle/fluid/operators/one_hot_op.cc#L72),该Op没有输入,Kernel类型通过Op的dtype参数指定,因此需要重写GetExpectedKernelType方法,用参数指定的数据类型获取kernel类型 + +5. Op Kernel的部分参数在使用某些库时,需要指定为相应的值,因此需要重写GetExpectedKernelType方法,覆盖默认参数 + - 使用CUDNN库:需要指定OpKernel的LibraryType为kCUDNN,例如 [AffineGridOp](https://github.com/PaddlePaddle/Paddle/blob/370f0345b6d35a513c8e64d519a0edfc96b9276c/paddle/fluid/operators/affine_grid_op.cc#L78) + - 使用MKLDNN库:需要指定OpKernel的LibraryType和DataLayout为kMKLDNN [MulOp](https://github.com/PaddlePaddle/Paddle/blob/250e72d254ccbe3521c29aa2801a1cb15b75ea73/paddle/fluid/operators/mul_op.cc#L89) + +#### 4.2 重写此方法时需要对输入变量进行初始化检查 + +在需要重写GetExpectedKernelType方法时,一般会根据某一输入变量获取Kernel的数据类型,此时请使用`OperatorWithKernel::IndicateVarDataType`接口获取变量的dtype,该方法对指定的输入变量进行了必要的初始化检查,详见[Paddle PR #20044](https://github.com/PaddlePaddle/Paddle/pull/20044),实现示例如下,: + +``` + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + OperatorWithKernel::IndicateVarDataType(ctx, "X"), ctx.GetPlace()); + } +``` + +如果未使用带有初始化检查的方法,直接使用了`Tensor->type()`,可能会导致报出`holder_ should not be null. Tensor not initialized yet when Tensor::type()`的错误,例如[Paddle issue #19522](https://github.com/PaddlePaddle/Paddle/issues/19522) ,用户仅凭该错误信息将无法得知具体出错的Op,不利于调试。 + +### 5.Op兼容性问题 +对Op的修改需要考虑兼容性问题,要保证Op修改之后,之前的模型都能够正常加载及运行,即新版本的Paddle预测库能成功加载运行旧版本训练的模型。**所以,需要保证Op的Input、Output和Attribute不能被修改(文档除外)或删除,可以新增Input、Output和Attribute,但是新增的Input,Output必须设置AsDispensable,新增的Attribute必须设置默认值。更多详细内容请参考[OP修改规范:Input/Output/Attribute只能做兼容修改](https://github.com/PaddlePaddle/Paddle/wiki/OP-Input-Output-Attribute-Compatibility-Modification)** 。 + +### 6.ShareDataWith的调用 +ShareDataWith的功能是使两个Tensor共享底层buffer,在调用这个操作的时候需要特别注意,在Op内部不能将ShareDataWith作用在Op的输出上,即Op输出的Tensor必须是Malloc出来的。 + +### 7.稀疏梯度参数更新方法 +目前稀疏梯度在做更新的时候会先对梯度做merge,即对相同参数的梯度做累加,然后做参数以及附加参数(如velocity)的更新。 + +### 8.显存优化 + +#### 8.1 为可原位计算的Op注册Inplace +有些Op的计算逻辑中,输出可以复用输入的显存空间,也可称为原位计算。例如[`reshape_op`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/reshape_op.cc)中,输出`Out`可以复用输入`X`的显存空间,因为该Op的计算逻辑不会改变`X`的实际数据,只是修改它的shape,输出和输入复用同一块显存空间不影响结果。对于这类OP,可以注册`Inlace`,从而让框架在运行时自动地进行显存优化。 + +fluid提供了`DECLARE_INPLACE_OP_INFERER`宏用于注册`Inplace`,该宏第一个参数是一个类名,如`ReshapeOpInplaceInToOut`;第二个参数是一对复用的输入输出,以`{"X", "Out"}`的形式给出。在`REGISTER_OPERATOR`时, +可以将类名传传入,从而为该Op注册`Inplace`。 + +``` +DECLARE_INPLACE_OP_INFERER(ReshapeOpInplaceInToOut, {"X", "Out"}); + +REGISTER_OPERATOR( + reshape, ops::ReshapeOp, ops::ReshapeOpMaker, + paddle::framework::DefaultGradOpMaker, + paddle::framework::DefaultGradOpMaker, + ops::ReshapeOpInplaceInToOut); +``` + +#### 8.2 减少OP中的无关变量 +通常反向Op会依赖于前向Op的某些输入(Input)、输出(Output),以供反向Op计算使用。但有些情况下,反向Op不需要前向Op的所有输入和输出;有些情况下,反向Op只需要前向Op的部分输入和输出;有些情况下,反向Op只需要使用前向Op中输入和输出变量的Shape和LoD信息。若Op开发者在注册反向Op时,将不必要的前向Op输入和输出作为反向Op的输入,会导致这部分显存无法被框架现有的显存优化策略优化,从而导致模型显存占用过高。 + +所以在写注册反向Op时需要注意以下几点: + +- Fluid提供的`DefaultGradOpMaker`,默认会将前向op的所有输入(`Input`)、输出(`Output`)以及输出变量所对应的梯度(`Output@Grad`)作为反向Op的输入,将前向Op输入所对应的梯度(`Input@Grad`)作为反向Op的输出。所以在使用`DefaultGradOpMaker`时需要考虑是否有些变量在计算中不被用到。 +- 如果`DefaultGradOpMaker`不能够满足需求,需要用户自己手动构建`GradOpMaker`,具体实现请参考[相关文档](new_op.html#gradopmaker); +- 如果有些反向Op需要依赖前向Op的输入或输出变量的的Shape或LoD,但不依赖于变量中Tensor的Buffer,且不能根据其他变量推断出该Shape和LoD,则可以通过`DECLARE_NO_NEED_BUFFER_VARS_INFERER`接口对该变量(以下称该变量为`X`)在反向Op中进行注册`NoNeedBufferVars`。**一旦注册了`NoNeedBufferVars`,反向op中就不能读写该变量对应的Tensor中的buffer,只能调用Tensor的dims()和lod()方法,同时,反向Op中的`GetExpectedKernelType()`必须要重写,并且`GetExpectedKernelType()`中不能访问`X`变量中Tensor的type()方法**。比如在`SliceOpGrad`中只会用到`Input`中变量的Shape信息,所以需要为对`Input`在`SliceOpGrad`上进行注册: +``` +namespace paddle { +namespace operators { +// ... +class SliceOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + // ... + } + + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + // Note: don't get data type from ctx.Input("Input"); + auto dtype = ctx.Input(framework::GradVarName("Out"))->type(); + return framework::OpKernelType( dtype, ctx.GetPlace()); + } +}; + + +template +class SliceOpGradMaker : public framework::SingleGradOpMaker { + public: + using framework::SingleGradOpMaker::SingleGradOpMaker; + + protected: + void Apply(GradOpPtr bind) const override { + bind->SetInput("Input", this->Input("Input")); + if (this->HasInput("StartsTensor")) { + bind->SetInput("StartsTensor", this->Input("StartsTensor")); + } + if (this->HasInput("EndsTensor")) { + bind->SetInput("EndsTensor", this->Input("EndsTensor")); + } + if (this->HasInput("StartsTensorList")) { + bind->SetInput("StartsTensorList", this->Input("StartsTensorList")); + } + if (this->HasInput("EndsTensorList")) { + bind->SetInput("EndsTensorList", this->Input("EndsTensorList")); + } + bind->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); + bind->SetOutput(framework::GradVarName("Input"), this->InputGrad("Input")); + bind->SetAttrMap(this->Attrs()); + bind->SetType("slice_grad"); + } +}; + +DECLARE_NO_NEED_BUFFER_VARS_INFERER(SliceOpGradNoNeedBufferVarsInference, + "Input"); +} // namespace operators +} // namespace paddle +namespace ops = paddle::operators; +REGISTER_OPERATOR(slice, ops::SliceOp, ops::SliceOpMaker, + ops::SliceOpGradMaker, + ops::SliceOpGradMaker); +REGISTER_OPERATOR(slice_grad, ops::SliceOpGrad, + ops::SliceDoubleOpGradMaker, + ops::SliceDoubleOpGradMaker, + ops::SliceOpGradNoNeedBufferVarsInference); +``` + +### 9.混合设备调用 +由于GPU是异步执行的,当CPU调用返回之后,GPU端可能还没有真正的执行,所以如果在Op中创建了GPU运行时需要用到的临时变量,当GPU开始运行的时候,该临时变量可能在CPU端已经被释放,这样可能会导致GPU计算出错。 + +关于GPU中的一些同步和异步操作: +``` +The following device operations are asynchronous with respect to the host: + Kernel launches; + Memory copies within a single device's memory; + Memory copies from host to device of a memory block of 64 KB or less; + Memory copies performed by functions that are suffixed with Async; + Memory set function calls. +``` + +关于cudaMemCpy和cudaMemCpyAsync注意事项: + +- 如果数据传输是从GPU端到非页锁定的CPU端,数据传输将是同步,即使调用的是异步拷贝操作。 +- 如果数据传输是从CPU端到CPU端,数据传输将是同步的,即使调用的是异步拷贝操作。 + +更多内容可参考:[Asynchronous Concurrent Execution](https://docs.nvidia.com/cuda/cuda-c-programming-guide/#asynchronous-concurrent-execution),[API synchronization behavior](https://docs.nvidia.com/cuda/cuda-runtime-api/api-sync-behavior.html#api-sync-behavior) + +### 10. LoD 在 Op 内部的传导规范 + +[LoD](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/concepts/lod_tensor.md) 是 Paddle Fluid 框架用来表示变长序列数据的属性,除了仅支持输入是 padding data 的 Op 外,所有 Op 的实现都要考虑 LoD 的传导问题。 + +根据 OP 的计算过程中是否用到 LoD,我们可以将涉及到 LoD 传导问题的 OP 分为两类: LoD-Transparent 与 LoD-Based。 + + + + + + + + + + + + + + + + + + + + + +
类型特点示例
LoD-Transparent 计算过程不依赖 LoD,输入是否有 LoD 不会影响计算的结果,通常是 position-wise 的计算 conv2d_op、batch_norm_op、dropout_op 等
LoD-Based 计算以序列为单位, 计算过程依赖 LoD lstm_op、gru_op、sequence_ops 等
+ +这两类 OP 的 LoD 传导需要考虑前向和反向两个过程。 + +#### 前向传导 + +在前向传导过程,与输入的 LoD 相比较,Op 输出的 LoD 可能出现不变、改变和消失这三种情况: + + - 不变:适用于所有的 LoD-Transparent OP 与部分的 LoD-Based OP。可以在`InferShape` 中调用 `ShareLoD()` 直接将输入 Var 的 LoD 共享给输出 Var, 可参考 [lstm_op](https://github.com/PaddlePaddle/Paddle/blob/a88a1faa48a42a8c3737deb0f05da968d200a7d3/paddle/fluid/operators/lstm_op.cc#L92); 如果有多个输入且都可能存在 LoD 的情况,通常默认共享第一个输入, 例如 [elementwise_ops forward](https://github.com/PaddlePaddle/Paddle/blob/5d6a1fcf16bcb48d2e66306b27d9994d9b07433c/paddle/fluid/operators/elementwise/elementwise_op.h#L69); + + - 改变:适用于部分 LoD-Based OP。在实现 OpKernel 时需考虑输出 LoD 的正确计算,真实的 LoD 在前向计算结束后才能确定,此时仍需要在`InferShape` 中调用 `ShareLoD()`,以确保CompileTime 时对 LoD Level 做了正确的传导,可参考 [sequence_expand_op](https://github.com/PaddlePaddle/Paddle/blob/565d30950138b9f831caa33904d9016cf53c6c2e/paddle/fluid/operators/sequence_ops/sequence_expand_op.cc); + + - 消失:适用于输出不再是序列数据的 LoD-Based OP。此时不用再考虑前向的 LoD 传导问题,可参考 [sequence_pool_op](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/sequence_ops/sequence_pool_op.cc); + +其它重要的注意事项: + + - 实现 LoD-Based OP 时,需要处理好 LoD 传导的边界情况,例如对长度为零的输入的支持,并完善相应的单测,单测 case 覆盖空序列出现在 batch 开头、中间和末尾等位置的情况,可参考 [test_lstm_op.py](https://github.com/PaddlePaddle/Paddle/blob/4292bd8687ababc7737cffbddc0d38ead2138c00/python/paddle/fluid/tests/unittests/test_lstm_op.py#L203-L216) + + - 对 LoD Level 有明确要求的 OP,推荐的做法是在 `InferShape` 中即完成 LoD Level的检查,例如 [sequence_pad_op](https://github.com/PaddlePaddle/Paddle/blob/4292bd8687ababc7737cffbddc0d38ead2138c00/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc#L79)。 + + +#### 反向传导 + +通常来讲,OP 的某个输入 Var 所对应的梯度 GradVar 的 LoD 应该与 Var 自身相同,所以应直接将 Var 的 LoD 共享给 GradVar,可以参考 [elementwise ops 的 backward](https://github.com/PaddlePaddle/Paddle/blob/a88a1faa48a42a8c3737deb0f05da968d200a7d3/paddle/fluid/operators/elementwise/elementwise_op.h#L189-L196) + + +## Op性能优化 +### 1.第三方库的选择 +在写Op过程中优先使用高性能(如cudnn、mkldnn、mklml、eigen等)中提供的操作,但是一定要做benchmark,有些库中的操作在深度学习任务中可能会比较慢。因为高性能库(如eigen等)中提供的操作为了更为通用,在性能方面可能并不是很好,通常深度学习模型中数据量较小,所以有些情况下可能高性能库中提供的某些操作速度较慢。比如Elementwise系列的所有Op(前向和反向),Elementwise操作在模型中调用的次数比较多,尤其是Elementwise_add,在很多操作之后都需要添加偏置项。在之前的实现中Elementwise_op直接调用Eigen库,由于Elementwise操作在很多情况下需要对数据做Broadcast,而实验发现Eigen库做Broadcast的速度比较慢,慢的原因在这个PR[#6229](https://github.com/PaddlePaddle/Paddle/pull/6229)中有描述。 + +### 2.Op性能优化 +Op的计算速度与输入的数据量有关,对于某些Op可以根据输入数据的Shape和Op的属性参数来选择不同的计算方式。比如concat_op,当axis>=1时,在对多个tensor做拼接过程中需要对每个tensor做很多次拷贝,如果是在GPU上,需要调用cudaMemCopy。相对CPU而言,GPU属于外部设备,所以每次调用GPU的操作都会有一定的额外开销,并且当需要拷贝的次数较多时,这种开销就更为凸现。目前concat_op的实现会根据输入数据的Shape以及axis值来选择不同的调用方式,如果输入的tensor较多,且axis不等于0,则将多次拷贝操作转换成一个CUDA Kernel来完成;如果输入tensor较少,且axis等于0,使用直接进行拷贝。相关实验过程在该PR([#8669](https://github.com/PaddlePaddle/Paddle/pull/8669))中有介绍。 + +由于CUDA Kernel的调用有一定的额外开销,所以如果Op中出现多次调用CUDA Kernel,可能会影响Op的执行速度。比如之前的sequence_expand_op中包含很多CUDA Kernel,通常这些CUDA Kernel处理的数据量较小,所以频繁调用这样的Kernel会影响Op的计算速度,这种情况下最好将这些小的CUDA Kernel合并成一个。在优化sequence_expand_op过程(相关PR[#9289](https://github.com/PaddlePaddle/Paddle/pull/9289))中就是采用这种思路,优化后的sequence_expand_op比之前的实现平均快出约1倍左右,相关实验细节在该PR([#9289](https://github.com/PaddlePaddle/Paddle/pull/9289))中有介绍。 + +减少CPU与GPU之间的拷贝和同步操作的次数。比如fetch操作,在每个迭代之后都会对模型参数进行更新并得到一个loss,并且数据从GPU端到没有页锁定的CPU端的拷贝是同步的,所以频繁的fetch多个参数会导致模型训练速度变慢。 + +## Op数值稳定性问题 +### 1.有些Op存在数值稳定性问题 +出现数值稳定性的主要原因程序在多次运行时,对浮点型数据施加操作的顺序可能不同,进而导致最终计算结果不同。而GPU是通过多线程并行计算的方式来加速计算的,所以很容易出现对浮点数施加操作的顺序不固定现象。 + +目前发现cudnn中的卷积操作、cudnn中的MaxPooling、CUDA中CudaAtomicXX、ParallelExecutor的Reduce模式下参数梯度的聚合等操作运行结果是非确定的。 + +为此Fluid中添加了一些FLAGS,比如使用FLAGS_cudnn_deterministic来强制cudnn使用确定性算法、FLAGS_cpu_deterministic强制CPU端的计算使用确定性方法。 + +### 2.WITH_FAST_MATH的开与关 +如果WITH_FAST_MATH是ON,NVCC在编译Paddle和Egien的时候会使用--use_fast_math,这样可能会使CUDA中的一些操作在损失一定精度的情况下变快,比如log、exp、tanh等,但也会使一些操作的计算结果是错的,比如pow操作,具体原因请查看[torch/DEPRECEATED-torch7-distro#132](https://github.com/torch/DEPRECEATED-torch7-distro/issues/132)。 + +## 其他 +### 1.报错信息 +Enforce提示信息不能为空,并且需要写明,因为报错信息可以更快更方便地分析出错误的原因。 + +### 2.Op的数学公式 +如果Op有数学公式,一定要在代码中将数学公式写明,并在Python API的Doc中显示,因为用户在对比不同框架的计算结果时可能需要了解Paddle对Op是怎么实现的。 + +**注意:**在merge到develop分支之前一定进行公式预览。可参考[dynamic_lstmp](../../../api_cn/layers_cn/nn_cn.html#dynamic-lstmp)。 + +### 3.Op变量名的命名要规范 +在定义Op时,Op的输入输出以及属性的命名需要符合规范,具体命名规则请参考:[`name_convention`](https://github.com/PaddlePaddle/FluidDoc/blob/release/1.2/doc/fluid/dev/name_convention.md)。 + +### 4.Python端Op接口中参数的顺序 +Python API中参数的顺序一般按照重要性来排,以fc为例: +``` +def fc(input, + size, + num_flatten_dims=1, + param_attr=None, + bias_attr=None, + act=None, + is_test=False, + name=None) +``` diff --git a/doc/paddle/advanced_guide/addon_development/new_op/op_notes_en.md b/doc/paddle/advanced_guide/addon_development/new_op/op_notes_en.md new file mode 100644 index 0000000000000000000000000000000000000000..0efef6015801234b5318a36a08125403572df8c4 --- /dev/null +++ b/doc/paddle/advanced_guide/addon_development/new_op/op_notes_en.md @@ -0,0 +1,186 @@ +# Notes on operator development + +## Building logic of Fluid's op +### 1.Building logic of Fluid's op +All Ops in Fluid are derived from `OperatorBase` , and all Ops are stateless. Each Op contains only four variable members: type, inputs, outputs, and attribute. + +The core method of Op is Run. The Run method requires two resources: data resources and computing resources. These two resources are obtained respectively from `Scope` and `Place`. Inside the framework, there is a global `DeviceContextPool`, which is used to record the mapping relationship between `Place` and `DeviceContext`, which means each `Place` has only one `DeviceContext` corresponding to it, and `DeviceContext` stores the computing resources of the current device. For example, for GPU, these resources include `cudnn_handle`, `cublas_handle`, `stream`, and so on. All the internal calculations (data copy and CUDA Kernel, etc.) of Op must be done in `DeviceContext`. + +The Fluid framework is designed to run on a variety of devices and third-party libraries, and some Op implementations may vary on different the devices or third-party libraries. Therefore, Fluid introduced the OpKernel's approach, which means an Op can have multiple OpKernels. Such Ops are derived from `OperatorWithKernel`, and the representative of such Ops is conv, the OpKernels of conv_op are: `GemmConvKernel`, `CUDNNConvOpKernel`, `ConvMKLDNNOpKernel`, and each OpKernel has two data types, double and float. Ops that do not need OpKernel inclue `WhileOp` and so on. + +Operator inheritance diagram: +![op_inheritance_relation_diagram](./op_inheritance_relation_diagram.png) + +For further information, please refer to: [multi_devices](https://github.com/PaddlePaddle/FluidDoc/tree/develop/doc/fluid/design/multi_devices) , [scope](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/concepts/scope.md) , [Developer's_Guide_to_Paddle_Fluid](https://github.com/PaddlePaddle/FluidDoc/blob/release/1.2/doc/fluid/getstarted/Developer's_Guide_to_Paddle_Fluid.md) + +### 2.Op's registration logic +The registration entries for each Operator include: + ```C++ + OpCreator creator_; + GradOpMakerFN grad_op_maker_; + proto::OpProto* proto_{nullptr}; + OpAttrChecker* checker_{nullptr}; + InferVarTypeFN infer_var_type_; + InferShapeFN infer_shape_; + ``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Registration EntryTypeDescriptionUsage
proto::OpProto Class Store the input/output/properties/Op type of Op Call at compile time
GradOpMakerFN Functor Return a set of OpDescs of the reverse Op corresponding to the current Op, because the reverse ones of the forward Op may consist of multiple Ops Call at compile time
OpAttrChecker Class Check the Op's attr Call at compile time
InferVarTypeFN Functor Used to infer the type of the output Var, such as LoDTensor, SelectedRows, or others Call at compile time
InferShapeFN Functor Used to infer the Shape of the Output The usage is different at compile time and runtime. At compile time, it is called in Python side; If the Op is derived from OperatorWithKernel, at the runtime it will be called at op.run
OpCreator Functor Create a new OperatorBase for each call Call at runtime
+ +Usually you need to call REGISTER_OPERATOR when you make comments on Op, which is: + ``` + REGISTER_OPERATOR(op_type, + OperatorBase + Op_maker_and_checker_maker, + Op_grad_opmaker, + Op_infer_var_shape, + Op_infer_var_type) + ``` + +**Note:** + +1. For all Op, the first three parameters are required, op_type specifies the name of op, OperatorBase is the object instance of this Op, op_maker_and_checker_maker is the maker of op and the checker of attr in op. +2. If the Op has a reverse, it must have op_grad_opmaker, because in backward, the reverse Op's Maker will be obtained from the forward Op. +3. The framework provides a default op_grad_opmaker:`DefaultGradOpDescMaker`, which will use the input and output of the forward Op as the input of the reverse Op, and the gradients of the input to forward Op's as the output of the reverse Op, and copy the attributes of the forward Op to it. **Note:** DefaultGradOpDescMaker will take all the input and output of the forward Op as the reverse Op input. Even if this input is not necessary, the absence of this will prevent us from doing memory optimization for the unused variables. +4. The framework does not provide a default op_infer_var_shape method. If the Op has no OpKernel, you usually need to add the corresponding op_infer_var_shape method. If the Op has OpKernel, you need to implement the `InferShape` method of `OperatorWithKernel`. You don't need to provide the op_infer_var_shape method. For details, refer to [while_op.cc](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/controlflow/while_op.cc), [conv_op.cc](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/conv_op.cc). +5. The framework does not provide a default op_infer_var_type method, the user needs to add op_infer_var_type according to the actual situation. Strictly speaking, every Op should register an InferVarType, and op_infer_var_type infers the type and dtype of the output Var according to the type and dtype of the input Var. **Note:** In the Python-side LayerHelper, the create_variable_for_type_inference operation returns a Variable which is a LoDTensor. The C++-side InferVarType can modify the type and dtype of the `Variable`. + + +For more details, please refer to: [How to write a new Op](new_op_en.html) + +## Notes on Writing an Op +### 1. input and output types supported by Op +The input and output of Fluid's Ops are `Variable`. In design, `Variable` can store any type. Op's input and output `Variable` may be of any type, and usually the `Variable` stores `LoDTensor` and `SelectedRows` . + +**Note:** + +- `context.Input("Input")` often appears in the code. It does not mean that the `Variable` of "Input" is `Tensor`, but indicates that the `Tensor` is obtained from `LoDTensor` in the `Variable` of the "Input". If the `Variable` of "Input" is `SelectedRows`, an error will be reported. +- If "Input" is `SelectedRows`, `context->GetInputDim("Input")` will return `var->Get().GetCompleteDims()` instead of Dim of `Tensor` in `SelectedRows` . + +### 2. Do not modify the input data inside Op. +Never make any modification of the input data inside Op, as there may be other Ops that need to read this input. + +### 3. The data type needs to be registered for OpKernel +Currently all OpKernel are required to register double and float data types. + +### 4.Op compatibility issue +The modification of Op needs to consider the compatibility problem. Please ensure that the previous model can be loaded and run normally after the modification of Op which means that the model trained by the old version can be loaded and run with Paddle inference library of new version. **So developers should ensure that the Input, Output and Attribute of OPs cannot be modified (except for documents) or deleted. And developers can add Input, Output and Attribute, but the added Input and Output must be set to be dispensable, and the default value of added Attribute must be set. For more details, please refer to [OP Input/Output/Attribute Compatibility Modification](https://github.com/PaddlePaddle/Paddle/wiki/OP-Input-Output-Attribute-Compatibility-Modification(English-Version))**. + +### 5.Call ShareDataWith +The function of ShareDataWith is to make the two Tensors share the underlying buffer. When calling this operation, special attention should be paid. In the Op, the ShareDataWith cannot be applied to the output of Op. In other words, the Tensor of the Op output must be from Malloc. + +### 6. Sparse gradient parameter's update method +At present, the sparse gradient will first merge the gradient when updating, which is to add up the gradients of the same parameter, and then update the parameters and additional parameters (such as velocity). + +### 7. (Video) Memory optimization +If the reverse of Op does not require all of the input and output of the forward op as its input, please do not use `DefaultGradOpDescMaker`, which will prevent Memory/Video Memory optimization for unused variables. + +### 8. Calls made on Hybrid device +Since the GPU is executed asynchronously, the GPU side may not be actually executed after the CPU call returns. Therefore, if you create a temporary variable in Op that you need to use at the GPU runtime, when the GPU starts running, the temporary variable may have been released on the CPU side, which may cause GPU calculation errors. + +Some of the synchronous and asynchronous operations in the GPU: +``` +The following device operations are asynchronous with respect to the host: + Kernel launches; + Memory copies within a single device's memory; + Memory copies from host to device of a memory block of 64 KB or less; + Memory copies performed by functions that are suffixed with Async; + Memory set function calls. +``` + +Note on cudaMemCpy and cudaMemCpyAsync: + +- If the data transfer is from the GPU side to the CPU side with non-pinned memory , the data transfer will be synchronous, even if an asynchronous copy operation is called. +- If the data is transferred from the CPU side to the CPU side, the data transfer will be synchronous, even if an asynchronous copy operation is called. + +For more information, please refer to: [Asynchronous Concurrent Execution](https://docs.nvidia.com/cuda/cuda-c-programming-guide/#asynchronous-concurrent-execution) , [API synchronization behavior](https://Docs.nvidia.com/cuda/cuda-runtime-api/api-sync-behavior.html#api-sync-behavior) + +## Op Performance Optimization +### 1. Selection of third-party libraries +In the process of writing Op, the operations provided by high-performance libraries (such as cudnn, mkldnn, mklml, eigen, etc.) are preferred, but the benchmark must be done. Some operations in the library may be slower in deep learning tasks. Because the operations provided in high-performance libraries (such as eigen, etc.) are more generalized and in terms of performance, they may not be sufficient. Usually the amount of data in the deep learning model is small, so in some cases some of the high-performance libraries may be compromised to a slower speed. For example, all Op (forward and reverse) of the Elementwise set. The Elementwise operation is called relatively frequently in the model. Especially Elementwise_add, which is used to add offset to many operations. In the previous implementation, Elementwise_op directly calls the Eigen library. Since the Elementwise operation needs to broadcast the data in many cases, and the experiment finds that the Eigen library is slower to broadcast, whose reason is in this PR[#6229](https://github.com/PaddlePaddle/Paddle/pull/6229). + +### 2.Op performance optimization +The calculation speed of Op is related to the amount of data input. For some Op, different calculation methods can be selected according to the attribute parameters in Op and Shape of the input data. For example, concat_op, when axis>=1, in the process of concatenating multiple tensors, you need to make many copies for each tensor. If it is on GPU, you need to call cudaMemCopy. Relative to the CPU, the GPU is an external device. So each time the GPU is called, there will a certain overhead. And when more times of copying are required, the overhead is more prominent. At present, the implementation of concat_op will select different calling methods according to the Shape and axis values of the input data. If there are a relatively large number of input tensors, and the axis is not equal to 0, the multiple copy operations will be converted into a CUDA Kernel to complete the process; if input tensor are less, and the axis is equal to 0, direct copy will be used. The relevant experiment is described in this PR ([#8669](https://github.com/PaddlePaddle/Paddle/pull/8669)) . + +Since the call of CUDA Kernel has a certain overhead, multiple calls of the CUDA Kernel in Op may affect the execution speed of Op. For example, the previous sequence_expand_op contains many CUDA Kernels. Usually, these CUDA Kernels process a small amount of data, so frequent calls to such Kernels will affect the calculation speed of Op. In this case, it is better to combine these small CUDA Kernels into one. This idea is used in the optimization of the sequence_expand_op procedure (related PR[#9289](https://github.com/PaddlePaddle/Paddle/pull/9289)). The optimized sequence_expand_op is about twice as fast as the previous implementation, the relevant experiments are introduced in the PR ([#9289](https://github.com/PaddlePaddle/Paddle/pull/9289)). + +Reduce the number of copy and sync operations between the CPU and the GPU. For example, the fetch operation will update the model parameters and get a loss after each iteration, and the copy of the data from the GPU to the Non-Pinned-Memory CPU is synchronous, so frequent fetching for multiple parameters will reduce the model training speed. + +## Op numerical stability +### 1. Some Ops have numerical stability problems +The main reason for numerical stability is that when the program is run multiple times, the order in which the floating-point data is processed may be different, resulting in different final calculation results. The GPU is accelerated by multi-threaded parallel computing, so it is commonplace that the order of operations on floating-point numbers is not fixed. + +At present, it is found that the result of the convolution operation in cudnn, MaxPooling in cudnn, CudaAtomicXX in CUDA, and aggregation of parameter gradients in Reduce mode of ParallelExecutor are not certain. + +For this purpose, some FLAGS is added to the Fluid. For example, FLAGS_cudnn_deterministic is used to force cudnn to use the deterministic algorithm, and FLAGS_cpu_deterministic to force the CPU-side calculation to use the deterministic method. + +### 2.On/Off of WITH_FAST_MATH +If WITH_FAST_MATH is ON, NVCC will use --use_fast_math when compiling Paddle and Egien. This may cause some operations in CUDA to get faster on the condition that they lose some precision, such as log, exp, tanh. But it may lead to wrong results of some operations, such as pow operation, please read [torch/DEPRECEATED-torch7-distro#132](https://github.com/torch/DEPRECEATED-torch7-distro/issues/132) for specific reasons. + +## Other +### 1. Error message +The Enforce prompt message cannot be empty and needs to be written, because the error message can analyze the cause of the error more quickly and conveniently. + +### 2.Op's mathematical formula +If Op has a mathematical formula, be sure to write the mathematical formula in the code and display it in the Doc of the Python API, because the user may need to understand how Paddle implements Op when comparing the calculation results among different frameworks. + +**Note:** The formula preview must be done before the merge to the develop branch. Example: [dynamic_lstmp](../../../api/layers/nn.html#dynamic-lstmp). + +### 3. The order of parameters in the Python-side Op interface +The order of the parameters in the Python API is generally ranked by importance, taking fc as an example: +``` +def fc(input, + size, + num_flatten_dims=1, + param_attr=None, + bias_attr=None, + act=None, + is_test=False, + name=None) +``` diff --git a/doc/paddle/advanced_guide/addon_development/tracing.jpeg b/doc/paddle/advanced_guide/addon_development/tracing.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..3a49fc4f8a401a9463b0157e2f38c164ca02dcc5 Binary files /dev/null and b/doc/paddle/advanced_guide/addon_development/tracing.jpeg differ diff --git a/doc/paddle/advanced_guide/data_preparing/feeding_data.rst b/doc/paddle/advanced_guide/data_preparing/feeding_data.rst new file mode 100644 index 0000000000000000000000000000000000000000..c0749a418e1e380e748d0c1e4cfc0993a0b73d04 --- /dev/null +++ b/doc/paddle/advanced_guide/data_preparing/feeding_data.rst @@ -0,0 +1,137 @@ +.. _user_guide_use_numpy_array_as_train_data: + +############## +同步数据读取 +############## + +PaddlePaddle Fluid支持使用 :code:`fluid.data()` 配置数据层; +再使用 Numpy Array 或者直接使用Python创建C++的 +:code:`fluid.LoDTensor` , 通过 :code:`Executor.run(feed=...)` 传给 +:code:`fluid.Executor` 或 :code:`fluid.ParallelExecutor` 。 + +数据层配置 +########## + +通过 :code:`fluid.data()` 可以配置神经网络中需要的数据层。具体方法为: + +.. code-block:: python + + import paddle.fluid as fluid + + image = fluid.data(name="image", shape=[None, 3, 224, 224]) + label = fluid.data(name="label", shape=[None, 1], dtype="int64") + + # use image/label as layer input + prediction = fluid.layers.fc(input=image, size=1000, act="softmax") + loss = fluid.layers.cross_entropy(input=prediction, label=label) + ... + +上段代码中,:code:`image` 和 :code:`label` 是通过 :code:`fluid.data` +创建的两个输入数据层。其中 :code:`image` 是 :code:`[None, 3, 224, 224]` 维度的浮点数据; +:code:`label` 是 :code:`[None, 1]` 维度的整数数据。这里需要注意的是: + +1. Executor在执行的时候,会检查定义的数据层数据和feed的数据的 :code:`shape` 和 :code:`dtype` 是否一致,如果不一致,程序会报错退出。对于一些任务,在不同的轮数,数据的某些维度会变化,可以将维度的值设置为None,例如第0维会变化,可以将 :code:`shape` 设置为 :code:`[None, 3, 224, 224]` 。 + + +2. Fluid中用来做类别标签的数据类型是 :code:`int64`,并且标签从0开始。可用数据类型请参考 :ref:`user_guide_paddle_support_data_types`。 + +.. _user_guide_feed_data_to_executor: + +传递训练数据给执行器 +#################### + +:code:`Executor.run` 和 :code:`ParallelExecutor.run` 都接受一个 :code:`feed` 参数。 +这个参数是一个Python的字典。它的键是数据层的名字,例如上文代码中的 :code:`image`。 +它的值是对应的numpy array。 + +例如: + +.. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + # init Program + exe.run(fluid.default_startup_program()) + exe.run(feed={ + "image": numpy.random.random(size=(32, 3, 224, 224)).astype('float32'), + "label": numpy.random.random(size=(32, 1)).astype('int64') + }) + +进阶使用 +######## + +如何传入序列数据 +---------------- + +序列数据是PaddlePaddle Fluid支持的特殊数据类型,可以使用 :code:`LoDTensor` 作为 +输入数据类型。它需要用户: 1. 传入一个mini-batch需要被训练的所有数据; +2.每个序列的长度信息。 +用户可以使用 :code:`fluid.create_lod_tensor` 来创建 :code:`LoDTensor` 。 + +传入序列信息的时候,需要设置序列嵌套深度,:code:`lod_level` 。 +例如训练数据是词汇组成的句子,:code:`lod_level=1` ;训练数据是 词汇先组成了句子, +句子再组成了段落,那么 :code:`lod_level=2` 。 + +例如: + +.. code-block:: python + + sentence = fluid.data(name="sentence", dtype="int64", shape=[None, 1], lod_level=1) + + ... + + exe.run(feed={ + "sentence": create_lod_tensor( + data=numpy.array([1, 3, 4, 5, 3, 6, 8], dtype='int64').reshape(-1, 1), + recursive_seq_lens=[[4, 1, 2]], + place=fluid.CPUPlace() + ) + }) + +训练数据 :code:`sentence` 包含三个样本,他们的长度分别是 :code:`4, 1, 2` 。 +他们分别是 :code:`data[0:4]`, :code:`data[4:5]` 和 :code:`data[5:7]` 。 + +如何分别设置ParallelExecutor中每个设备的训练数据 +------------------------------------------------ + +用户将数据传递给使用 :code:`ParallelExecutor.run(feed=...)` 时, +可以显示指定每一个训练设备(例如GPU)上的数据。 +用户需要将一个列表传递给 :code:`feed` 参数,列表中的每一个元素都是一个字典。 +这个字典的键是数据层的名字,值是数据层的值。 + +例如: + +.. code-block:: python + + parallel_executor = fluid.ParallelExecutor() + parallel_executor.run( + feed=[ + { + "image": numpy.random.random(size=(32, 3, 224, 224)).astype('float32'), + "label": numpy.random.random(size=(32, 1)).astype('int64') + }, + { + "image": numpy.random.random(size=(16, 3, 224, 224)).astype('float32'), + "label": numpy.random.random(size=(16, 1)).astype('int64') + }, + ] + ) + +上述代码中,GPU0会训练 32 个样本,而 GPU1训练 16 个样本。 + + +.. _user_guide_paddle_support_data_types: + +Fluid目前支持的数据类型 +----------------------- + +PaddlePaddle Fluid目前支持的数据类型包括: + + * float16: 部分操作支持 + * float32: 主要实数类型 + * float64: 次要实数类型,支持大部分操作 + * int32: 次要标签类型 + * int64: 主要标签类型 + * uint64: 次要标签类型 + * bool: 控制流数据类型 + * int16: 次要标签类型 + * uint8: 输入数据类型,可用于图像像素 diff --git a/doc/paddle/advanced_guide/data_preparing/feeding_data_en.rst b/doc/paddle/advanced_guide/data_preparing/feeding_data_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..9afedfa1082232d4a972343a9dbf8df88af8ee8e --- /dev/null +++ b/doc/paddle/advanced_guide/data_preparing/feeding_data_en.rst @@ -0,0 +1,140 @@ +.. _user_guide_use_numpy_array_as_train_data_en: + +################################# +Take Numpy Array as Training Data +################################# + +PaddlePaddle Fluid supports configuring data layer with :code:`fluid.data()` . +Then you can use Numpy Array or directly use Python to create C++ +:code:`fluid.LoDTensor` , and then feed it to :code:`fluid.Executor` or :code:`fluid.ParallelExecutor` +through :code:`Executor.run(feed=...)` . + +Configure Data Layer +############################ + +With :code:`fluid.data()` , you can configure data layer in neural network. Details are as follows: + +.. code-block:: python + + import paddle.fluid as fluid + + image = fluid.data(name="image", shape=[None, 3, 224, 224]) + label = fluid.data(name="label", shape=[None, 1], dtype="int64") + + # use image/label as layer input + prediction = fluid.layers.fc(input=image, size=1000, act="softmax") + loss = fluid.layers.cross_entropy(input=prediction, label=label) + ... + +In the code above, :code:`image` and :code:`label` are two input data layers created by :code:`fluid.data` . :code:`image` is float data of shape :code:`[None, 3, 224, 224]` ; :code:`label` is the int data of shape :code:`[None, 1]` . Note that: + +1. When the program is executing, executor will check whether the :code:`shape` and :code:`dtype` defined and feeded are consistent. If they are not consistent, the program will exit with an error. In some tasks, the dimension will change in different training steps. For this case, the value of the dimension can be set to None. For example, the :code:`shape` can be set to :code:`[None, 3, 224, 224]` when the 0th dimension will change. + +2. Data type of category labels in Fluid is :code:`int64` and the label starts from 0. About the supported data types,please refer to :ref:`user_guide_paddle_support_data_types_en` . + +.. _user_guide_feed_data_to_executor_en: + +Transfer Train Data to Executor +################################ + +Both :code:`Executor.run` and :code:`ParallelExecutor.run` receive a parameter :code:`feed` . +The parameter is a dict in Python. Its key is the name of data layer,such as :code:`image` in code above. And its value is the corresponding numpy array. + +For example: + +.. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + # init Program + exe.run(fluid.default_startup_program()) + exe.run(feed={ + "image": numpy.random.random(size=(32, 3, 224, 224)).astype('float32'), + "label": numpy.random.random(size=(32, 1)).astype('int64') + }) + +Advanced Usage +############### + +How to feed Sequence Data +-------------------------- + +Sequence data is a unique data type supported by PaddlePaddle Fluid. You can take :code:`LoDTensor` as input data type. + +You need to: + +1. Feed all data to be trained in a mini-batch. + +2. Get the length of each sequence. + +You can use :code:`fluid.create_lod_tensor` to create :code:`LoDTensor` . + +To feed sequence information, it is necessary to set the sequence nested depth :code:`lod_level` . + +For instance, if the training data are sentences consisting of words, :code:`lod_level=1`; if train data are paragraphs which consists of sentences that consists of words, :code:`lod_level=2` . + +For example: + +.. code-block:: python + + sentence = fluid.data(name="sentence", dtype="int64", shape=[None, 1], lod_level=1) + + ... + + exe.run(feed={ + "sentence": create_lod_tensor( + data=numpy.array([1, 3, 4, 5, 3, 6, 8], dtype='int64').reshape(-1, 1), + recursive_seq_lens=[[4, 1, 2]], + place=fluid.CPUPlace() + ) + }) + +Training data :code:`sentence` contain three samples, the lengths of which are :code:`4, 1, 2` respectively. + +They are :code:`data[0:4]`, :code:`data[4:5]` and :code:`data[5:7]` respectively. + +How to prepare training data for every device in ParallelExecutor +------------------------------------------------------------------- + +When you feed data to :code:`ParallelExecutor.run(feed=...)` , +you can explicitly assign data for every training device (such as GPU). + +You need to feed a list to :code:`feed` . Each element of the list is a dict. + +The key of the dict is name of data layer and the value of dict is value of data layer. + +For example: + +.. code-block:: python + + parallel_executor = fluid.ParallelExecutor() + parallel_executor.run( + feed=[ + { + "image": numpy.random.random(size=(32, 3, 224, 224)).astype('float32'), + "label": numpy.random.random(size=(32, 1)).astype('int64') + }, + { + "image": numpy.random.random(size=(16, 3, 224, 224)).astype('float32'), + "label": numpy.random.random(size=(16, 1)).astype('int64') + }, + ] + ) + +In the code above, GPU0 will train 32 samples and GPU1 will train 16 samples. + +.. _user_guide_paddle_support_data_types_en: + +Data types supported by Fluid +------------------------------- + +Data types supported by PaddlePaddle Fluid contains: + + * float16: supported by part of operations + * float32: major data type of real number + * float64: minor data type of real number, supported by most operations + * int32: minor data type of labels + * int64: major data type of labels + * uint64: minor data type of labels + * bool: type of control flow data + * int16: minor type of labels + * uint8: input data type, used for pixel of picture diff --git a/doc/paddle/advanced_guide/data_preparing/index_cn.rst b/doc/paddle/advanced_guide/data_preparing/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6cff321413c2e826ae6d04f98ebf0cf03e6ecf7b --- /dev/null +++ b/doc/paddle/advanced_guide/data_preparing/index_cn.rst @@ -0,0 +1,15 @@ +.. _user_guide_prepare_data: + +######## +准备数据 +######## + +本章详细介绍了如何为神经网络提供数据,包括数据的前期处理与后期的同步、异步读取。 + +.. toctree:: + :maxdepth: 1 + + prepare_steps.rst + reader_cn.md + use_py_reader.rst + feeding_data.rst \ No newline at end of file diff --git a/doc/paddle/advanced_guide/data_preparing/index_en.rst b/doc/paddle/advanced_guide/data_preparing/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..b4d7a337949047ddde5848fa94194ad19174d4b3 --- /dev/null +++ b/doc/paddle/advanced_guide/data_preparing/index_en.rst @@ -0,0 +1,15 @@ +.. _user_guide_prepare_data_en: + +############# +Prepare Data +############# + +This document mainly introduces how to provide data for the network, including Synchronous-method and Asynchronous-method. + +.. toctree:: + :maxdepth: 1 + + prepare_steps_en.rst + reader.md + use_py_reader_en.rst + feeding_data_en.rst \ No newline at end of file diff --git a/doc/paddle/advanced_guide/data_preparing/prepare_steps.rst b/doc/paddle/advanced_guide/data_preparing/prepare_steps.rst new file mode 100644 index 0000000000000000000000000000000000000000..3c6242e093b13b87f7bebb97e41f803fb724bafa --- /dev/null +++ b/doc/paddle/advanced_guide/data_preparing/prepare_steps.rst @@ -0,0 +1,100 @@ +.. _user_guide_prepare_steps: + +######## +准备步骤 +######## + +使用PaddlePaddle Fluid准备数据分为三个步骤: + +Step 1: 自定义Reader生成训练/预测数据 +################################### + +生成的数据类型可以为Numpy Array或LoDTensor。根据Reader返回的数据形式的不同,可分为Batch级的Reader和Sample(样本)级的Reader。 + +Batch级的Reader每次返回一个Batch的数据,Sample级的Reader每次返回单个样本的数据 + +如果您的数据是Sample级的数据,我们提供了一个可以数据预处理和组建batch的工具::code:`Python Reader` 。 + + +Step 2: 在网络配置中定义数据层变量 +################################### +用户需使用 :code:`fluid.data` 在网络中定义数据层变量。定义数据层变量时需指明数据层的名称name、数据类型dtype和维度shape。例如: + +.. code-block:: python + + import paddle.fluid as fluid + + image = fluid.data(name='image', dtype='float32', shape=[None, 28, 28]) + label = fluid.data(name='label', dtype='int64', shape=[None, 1]) + +其中,None表示不确定的维度。此例子中None的含义为batch size。 + +Step 3: 将数据送入网络进行训练/预测 +################################### + +Fluid提供两种方式,分别是异步DataLoader接口方式或同步Feed方式,具体介绍如下: + +- 异步DataLoader接口方式 + +用户需要先使用 :code:`fluid.io.DataLoader` 定义DataLoader对象,然后通过DataLoader对象的set方法设置数据源。 +使用DataLoader接口时,数据传入与模型训练/预测过程是异步进行的,效率较高,推荐使用。 + +- 同步Feed方式 + +用户自行构造输入数据,并在 :code:`fluid.Executor` 或 :code:`fluid.ParallelExecutor` +中使用 :code:`executor.run(feed=...)` 传入训练数据。数据准备和模型训练/预测的过程是同步进行的, +效率较低。 + + +这两种准备数据方法的比较如下: + +======== ================================= ===================================== +对比项 同步Feed方式 异步DataLoader接口方式 +======== ================================= ===================================== +API接口 :code:`executor.run(feed=...)` :code:`fluid.io.DataLoader` +数据格式 Numpy Array或LoDTensor Numpy Array或LoDTensor +数据增强 Python端使用其他库完成 Python端使用其他库完成 +速度 慢 快 +推荐用途 调试模型 工业训练 +======== ================================= ===================================== + +Reader数据类型对使用方式的影响 +########################### + +根据Reader数据类型的不同,上述步骤的具体操作将有所不同,具体介绍如下: + +读取Sample级Reader数据 ++++++++++++++++++++++ + +若自定义的Reader每次返回单个样本的数据,用户需通过以下步骤完成数据送入: + +Step 1. 组建数据 +================ + +调用Fluid提供的Reader相关接口完成组batch和部分的数据预处理功能,具体请参见: `数据预处理工具 <./reader_cn.html>`_ 。 + +Step 2. 送入数据 +================ + +若使用异步DataLoader接口方式送入数据,请调用 :code:`set_sample_generator` 或 :code:`set_sample_list_generator` 接口完成,具体请参见: :ref:`user_guides_use_py_reader` 。 + +若使用同步Feed方式送入数据,请使用DataFeeder接口将Reader数据转换为LoDTensor格式后送入网络,具体请参见 :ref:`cn_api_fluid_DataFeeder` 。 + +读取Batch级Reader数据 +++++++++++++++++++++ + +Step 1. 组建数据 +================ + +由于Batch已经组好,已经满足了Step 1的条件,可以直接进行Step 2。 + +Step 2. 送入数据 +================ + +若使用异步DataLoader接口方式送入数据,请调用DataLoader的 :code:`set_batch_generator` 接口完成,具体方式请参见: :ref:`user_guides_use_py_reader` 。 + +若使用同步Feed方式送入数据,具体请参见: :ref:`user_guide_use_numpy_array_as_train_data` 。 + + + + diff --git a/doc/paddle/advanced_guide/data_preparing/prepare_steps_en.rst b/doc/paddle/advanced_guide/data_preparing/prepare_steps_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..609a6ac188b292a3d93aafcd002755ac145a70ab --- /dev/null +++ b/doc/paddle/advanced_guide/data_preparing/prepare_steps_en.rst @@ -0,0 +1,95 @@ +.. _user_guide_prepare_steps_en: + +############# +Prepare Steps +############# + +Data preparation in PaddlePaddle Fluid can be separated into 3 steps. + +Step 1: Define a reader to generate training/testing data +########################################################## + +The generated data type can be Numpy Array or LoDTensor. According to the different data formats returned by the reader, it can be divided into Batch Reader and Sample Reader. + +The batch reader yields a mini-batch data for each, while the sample reader yields a sample data for each. + +If your reader yields a sample data, we provide a data augmentation and batching tool for you: :code:`Python Reader` . + +Step 2: Define data layer variables in network +############################################### + +Users should use :code:`fluid.data` to define data layer variables. Name, dtype and shape are required when defining. For example, + +.. code-block:: python + + import paddle.fluid as fluid + + image = fluid.data(name='image', dtype='float32', shape=[None, 28, 28]) + label = fluid.data(name='label', dtype='int64', shape=[None, 1]) + +None means that the dimension is uncertain. In this example, None means the batch size. + +Step 3: Send the data to network for training/testing +###################################################### + +PaddlePaddle Fluid provides 2 methods for sending data to the network: Asynchronous DataLoader API, and Synchronous Feed Method. + +- Asynchronous DataLoader API + +User should use :code:`fluid.io.DataLoader` to define a DataLoader object and use its setter method to set the data source. +When using DataLoader API, the process of data sending works asynchronously with network training/testing. +It is an efficient way for sending data and recommended to use. + +- Synchronous Feed Method + +User should create the feeding data beforehand and use :code:`executor.run(feed=...)` to send the data to :code:`fluid.Executor` or :code:`fluid.ParallelExecutor` . +Data preparation and network training/testing work synchronously, which is less efficient. + +Comparison of these 2 methods are as follows: + +========================== ================================= ====================================== +Comparison item Synchronous Feed Method Asynchronous DataLoader API +========================== ================================= ====================================== +API :code:`executor.run(feed=...)` :code:`fluid.io.DataLoader` +Data type Numpy Array or LoDTensor Numpy Array or LoDTensor +Data augmentation use Python for data augmentation use Python for data augmentation +Speed slow rapid +Recommended applications model debugging industrial training +========================== ================================= ====================================== + +Choose different usages for different data formats +################################################### + +According to the different data formats of reader, users should choose different usages for data preparation. + +Read data from sample reader ++++++++++++++++++++++++++++++ + +If user-defined reader is a sample reader, users should use the following steps: + +Step 1. Batching +================= + +Use the data reader interfaces in PaddlePaddle Fluid for data augmentation and batching. Please refer to `Python Reader <./reader.html>`_ for details. + +Step 2. Sending data +===================== + +If using Asynchronous DataLoader API, please use :code:`set_sample_generator` or :code:`set_sample_list_generator` to set the data source for DataLoader. Please refer to :ref:`user_guide_use_py_reader_en` for details. + +If using Synchronous Feed Method, please use DataFeeder to convert the reader data to LoDTensor before sending to the network. Please refer to :ref:`api_fluid_DataFeeder` for details. + +Read data from sample reader ++++++++++++++++++++++++++++++ + +Step 1. Batching +================= + +Since the reader has been a batch reader, this step can be skipped. + +Step 2. Sending data +===================== + +If using Asynchronous DataLoader API, please use :code:`set_batch_generator` to set the data source for DataLoader. Please refer to :ref:`user_guide_use_py_reader_en` for details. + +If using Synchronous Feed Method, please refer to :ref:`user_guide_use_numpy_array_as_train_data_en` for details. \ No newline at end of file diff --git a/doc/paddle/advanced_guide/data_preparing/reader.md b/doc/paddle/advanced_guide/data_preparing/reader.md new file mode 100644 index 0000000000000000000000000000000000000000..8647dd45bef5be20b41ded78e70e850bc98c2c7d --- /dev/null +++ b/doc/paddle/advanced_guide/data_preparing/reader.md @@ -0,0 +1,195 @@ +# Python Reader + +During the training and testing phases, PaddlePaddle programs need to read data. To help the users write code that performs reading input data, we define the following: + +- A *reader*: A function that reads data (from file, network, random number generator, etc) and yields the data items. +- A *reader creator*: A function that returns a reader function. +- A *reader decorator*: A function, which takes in one or more readers, and returns a reader. +- A *batch reader*: A function that reads data (from *reader*, file, network, random number generator, etc) and yields a batch of data items. + +and also provide a function which can convert a reader to a batch reader, frequently used reader creators and reader decorators. + +## Data Reader Interface + +*Data reader* doesn't have to be a function that reads and yields data items. It can just be any function without any parameters that creates an iterable (anything can be used in `for x in iterable`) as follows: + +``` +iterable = data_reader() +``` + +The item produced from the iterable should be a **single** entry of data and **not** a mini batch. The entry of data could be a single item or a tuple of items. Item should be of one of the supported types (e.g., numpy 1d array of float32, int, list of int etc.) + +An example implementation for single item data reader creator is as follows: + +```python +def reader_creator_random_image(width, height): + def reader(): + while True: + yield numpy.random.uniform(-1, 1, size=width*height) + return reader +``` + +An example implementation for multiple item data reader creator is as follows: +```python +def reader_creator_random_image_and_label(width, height, label): + def reader(): + while True: + yield numpy.random.uniform(-1, 1, size=width*height), label + return reader +``` + +## Batch Reader Interface + +*Batch reader* can be any function without any parameters that creates an iterable (anything can be used in `for x in iterable`). The output of the iterable should be a batch (list) of data items. Each item inside the list should be a tuple. + +Here are some valid outputs: + +```python +# a mini batch of three data items. Each data item consist three columns of data, each of which is 1. +[(1, 1, 1), +(2, 2, 2), +(3, 3, 3)] + +# a mini batch of three data items, each data item is a list (single column). +[([1,1,1],), +([2,2,2],), +([3,3,3],)] +``` + +Please note that each item inside the list must be a tuple, below is an invalid output: +```python + # wrong, [1,1,1] needs to be inside a tuple: ([1,1,1],). + # Otherwise it is ambiguous whether [1,1,1] means a single column of data [1, 1, 1], + # or three columns of data, each of which is 1. +[[1,1,1], +[2,2,2], +[3,3,3]] +``` + +It is easy to convert from a reader to a batch reader: + +```python +mnist_train = paddle.dataset.mnist.train() +mnist_train_batch_reader = paddle.batch(mnist_train, 128) +``` + +It is also straight forward to create a custom batch reader: + +```python +def custom_batch_reader(): + while True: + batch = [] + for i in xrange(128): + batch.append((numpy.random.uniform(-1, 1, 28*28),)) # note that it's a tuple being appended. + yield batch + +mnist_random_image_batch_reader = custom_batch_reader +``` + +## Usage + +Following is how we can use the reader with PaddlePaddle: +The batch reader, a mapping from item(s) to data layer, the batch size and the number of total passes will be passed into `paddle.train` as follows: + +```python +# two data layer is created: +image_layer = paddle.layer.data("image", ...) +label_layer = paddle.layer.data("label", ...) + +# ... +batch_reader = paddle.batch(paddle.dataset.mnist.train(), 128) +paddle.train(batch_reader, {"image":0, "label":1}, 128, 10, ...) +``` + +## Data Reader Decorator + +The *Data reader decorator* takes in a single reader or multiple data readers and returns a new data reader. It is similar to a [python decorator](https://wiki.python.org/moin/PythonDecorators), but it does not use `@` in the syntax. + +Since we have a strict interface for data readers (no parameters and return a single data item), a data reader can be used in a flexible way using data reader decorators. Following are a few examples: + +### Prefetch Data + +Since reading data may take some time and training can not proceed without data, it is generally a good idea to prefetch the data. + +Use `paddle.reader.buffered` to prefetch data: + +```python +buffered_reader = paddle.reader.buffered(paddle.dataset.mnist.train(), 100) +``` + +`buffered_reader` will try to buffer (prefetch) `100` data entries. + +### Compose Multiple Data Readers + +For example, if we want to use a source of real images (say reusing mnist dataset), and a source of random images as input for [Generative Adversarial Networks](https://arxiv.org/abs/1406.2661). + +We can do the following : + +```python +def reader_creator_random_image(width, height): + def reader(): + while True: + yield numpy.random.uniform(-1, 1, size=width*height) + return reader + +def reader_creator_bool(t): + def reader(): + while True: + yield t + return reader + +true_reader = reader_creator_bool(True) +false_reader = reader_creator_bool(False) + +reader = paddle.reader.compose(paddle.dataset.mnist.train(), reader_creator_random_image(20, 20), true_reader, false_reader) +# Skipped 1 because paddle.dataset.mnist.train() produces two items per data entry. +# And we don't care about the second item at this time. +paddle.train(paddle.batch(reader, 128), {"true_image":0, "fake_image": 2, "true_label": 3, "false_label": 4}, ...) +``` + +### Shuffle + +Given the shuffle buffer size `n`, `paddle.reader.shuffle` returns a data reader that buffers `n` data entries and shuffles them before a data entry is read. + +Example: +```python +reader = paddle.reader.shuffle(paddle.dataset.mnist.train(), 512) +``` + +## Q & A + +### Why does a reader return only a single entry, and not a mini batch? + +Returning a single entry makes reusing existing data readers much easier (for example, if an existing reader returns 3 entries instead of a single entry, the training code will be more complicated because it needs to handle cases like a batch size 2). + +We provide a function: `paddle.batch` to turn (a single entry) reader into a batch reader. + +### Why do we need a batch reader, isn't is sufficient to give the reader and batch_size as arguments during training ? + +In most of the cases, it would be sufficient to give the reader and batch_size as arguments to the train method. However sometimes the user wants to customize the order of data entries inside a mini batch, or even change the batch size dynamically. For these cases using a batch reader is very efficient and helpful. + +### Why use a dictionary instead of a list to provide mapping? + +Using a dictionary (`{"image":0, "label":1}`) instead of a list (`["image", "label"]`) gives the advantage that the user can easily reuse the items (e.g., using `{"image_a":0, "image_b":0, "label":1}`) or even skip an item (e.g., using `{"image_a":0, "label":2}`). + +### How to create a custom data reader creator ? + +```python +def image_reader_creator(image_path, label_path, n): + def reader(): + f = open(image_path) + l = open(label_path) + images = numpy.fromfile( + f, 'ubyte', count=n * 28 * 28).reshape((n, 28 * 28)).astype('float32') + images = images / 255.0 * 2.0 - 1.0 + labels = numpy.fromfile(l, 'ubyte', count=n).astype("int") + for i in xrange(n): + yield images[i, :], labels[i] # a single entry of data is created each time + f.close() + l.close() + return reader + +# images_reader_creator creates a reader +reader = image_reader_creator("/path/to/image_file", "/path/to/label_file", 1024) +paddle.train(paddle.batch(reader, 128), {"image":0, "label":1}, ...) +``` diff --git a/doc/paddle/advanced_guide/data_preparing/reader_cn.md b/doc/paddle/advanced_guide/data_preparing/reader_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..fc44cc298c0c04fda2dea0a4f561463c0f188978 --- /dev/null +++ b/doc/paddle/advanced_guide/data_preparing/reader_cn.md @@ -0,0 +1,189 @@ +# 数据预处理工具 + +在模型训练和预测阶段,PaddlePaddle程序需要读取训练或预测数据。为了帮助您编写数据读取的代码,我们提供了如下接口: + +- *reader*: 样本级的reader,用于读取数据的函数,数据可来自于文件、网络、随机数生成器等,函数每次返回一个样本数据项。 +- *reader creator*: 接受一个或多个reader作为参数、返回一个新reader的函数。 +- *reader decorator*: 一个函数,接受一个或多个reader,并返回一个reader。 +- *batch reader*: 用于读取数据的函数,数据可来自于文件、网络、随机数生成器等,函数每次返回一个batch大小的数据项。 + +此外,还提供了将reader转换为batch reader的函数,会频繁用到reader creator和reader decorator。 + +## Data Reader 接口 +Data reader不一定要求为读取和遍历数据项的函数。它可以是返回iterable对象(即可以用于`for x in iterable`的任意对象)的任意不带参数的函数: + +``` +iterable = data_reader() +``` + +Iterable对象应产生单项或tuple形式的数据,而不是一个mini batch的数据。产生的数据项应在[支持的类型](./feeding_data.html#fluid) 中,例如float32,int类型的numpy一维矩阵,int类型的列表等。 + +以下是实现单项数据reader creator的示例: + +```python +def reader_creator_random_image(width, height): + def reader(): + while True: + yield numpy.random.uniform(-1, 1, size=width*height) + return reader +``` + +以下是实现多项数据reader creator的示例: + +```python +def reader_creator_random_image_and_label(width, height, label): + def reader(): + while True: + yield numpy.random.uniform(-1, 1, size=width*height), label + return reader +``` + +## Batch Reader 接口 +*Batch reader*可以是返回iterable对象(即可以用于`for x in iterable`的任意对象)的任意不带参数的函数。Iterable的输出应为一个batch(list)的数据项。list中的每个数据项均为一个tuple元组。 + +这里是一些有效输出: + +```python +# 三个数据项组成一个mini batch。每个数据项有三列,每列数据项为1。 +[(1, 1, 1), +(2, 2, 2), +(3, 3, 3)] + +# 三个数据项组成一个mini batch。每个数据项是一个列表(单列)。 +[([1,1,1],), +([2,2,2],), +([3,3,3],)] +``` + +请注意列表里的每个项必须为tuple,下面是一个无效输出: +```python + # 错误, [1,1,1]需在一个tuple内: ([1,1,1],). + # 否则产生歧义,[1,1,1]是否表示数据[1, 1, 1]整体作为单一列。 + # 或者数据的三列,每一列为1。 +[[1,1,1], +[2,2,2], +[3,3,3]] +``` + +很容易将reader转换成batch reader: + +```python +mnist_train = paddle.dataset.mnist.train() +mnist_train_batch_reader = paddle.batch(mnist_train, 128) +``` + +也可以直接创建一个自定义batch reader: + +```python +def custom_batch_reader(): + while True: + batch = [] + for i in xrange(128): + batch.append((numpy.random.uniform(-1, 1, 28*28),)) # note that it's a tuple being appended. + yield batch + +mnist_random_image_batch_reader = custom_batch_reader +``` + +## 使用 +以下是我们如何用PaddlePaddle的reader: + +batch reader是从数据项到数据层(data layer)的映射,batch_size和总pass数通过以下方式传给`paddle.train`: + +```python +# 创建两个数据层: +image_layer = paddle.layer.data("image", ...) +label_layer = paddle.layer.data("label", ...) + +# ... +batch_reader = paddle.batch(paddle.dataset.mnist.train(), 128) +paddle.train(batch_reader, {"image":0, "label":1}, 128, 10, ...) +``` + +## Data Reader装饰器 +*Data reader decorator*接收一个或多个reader对象作为参数,返回一个新的reader对象。它类似于[python decorator](https://wiki.python.org/moin/PythonDecorators) ,但在语法上不需要写`@`。 + +我们对data reader接口有严格限制(无参数并返回单个数据项),data reader可灵活地搭配data reader decorators使用。以下是一些示例: + +### 预取回数据(缓存数据) +由于读数据需要一些时间,而没有数据无法进行训练,因此一般而言数据预读取会是一个很好的方法。 + +用`paddle.reader.buffered`预读取数据: + +```python +buffered_reader = paddle.reader.buffered(paddle.dataset.mnist.train(), 100) +``` + +`buffered_reader`将尝试缓存(预读取)`100`个数据项。 + +### 组成多个Data Reader +例如,如果我们想用实际图像源(也就是复用mnist数据集),和随机图像源作为[Generative Adversarial Networks](https://arxiv.org/abs/1406.2661)的输入。 + +我们可以参照如下: + +```python +def reader_creator_random_image(width, height): + def reader(): + while True: + yield numpy.random.uniform(-1, 1, size=width*height) + return reader + +def reader_creator_bool(t): + def reader(): + while True: + yield t + return reader + +true_reader = reader_creator_bool(True) +false_reader = reader_creator_bool(False) + +reader = paddle.reader.compose(paddle.dataset.mnist.train(), reader_creator_random_image(20, 20), true_reader, false_reader) +# 跳过1因为paddle.dataset.mnist.train()为每个数据项生成两个项。 +# 并且这里我们暂时不考虑第二项。 +paddle.train(paddle.batch(reader, 128), {"true_image":0, "fake_image": 2, "true_label": 3, "false_label": 4}, ...) +``` + +### 随机排序 +给定大小为`n`的随机排序缓存, `paddle.reader.shuffle`返回一个data reader ,缓存`n`个数据项,并在读取一个数据项前进行随机排序。 + +示例: +```python +reader = paddle.reader.shuffle(paddle.dataset.mnist.train(), 512) +``` + +## Q & A + +### 为什么一个reader只返回单项而不是mini batch? + +返回单项,可以更容易地复用已有的data reader,例如如果一个已有的reader返回3项而不是一个单项,这样训练代码会更复杂,因为需要处理像batch_size为2这样的例子。 + +我们提供一个函数来将一个单项reader转换成一个batch reader。 + +### 为什么需要一个batch reader,在训练过程中给出reader和batch_size参数这样不足够吗? + +在大多数情况下,在训练方法中给出reader和batch_size参数是足够的。但有时用户想自定义mini batch里数据项的顺序,或者动态改变batch_size。在这些情况下用batch reader会非常高效有用。 + +### 为什么用字典而不是列表进行映射? + +使用字典(`{"image":0, "label":1}`)而不是列表`["image", "label"]`)有利于用户易于复用数据项,例如使用`{"image_a":0, "image_b":0, "label":1}`,或者甚至跳过数据项,例如使用`{"image_a":0, "label":2}`。 + + +### 如何创建一个自定义data reader? +```python +def image_reader_creator(image_path, label_path, n): + def reader(): + f = open(image_path) + l = open(label_path) + images = numpy.fromfile( + f, 'ubyte', count=n * 28 * 28).reshape((n, 28 * 28)).astype('float32') + images = images / 255.0 * 2.0 - 1.0 + labels = numpy.fromfile(l, 'ubyte', count=n).astype("int") + for i in xrange(n): + yield images[i, :], labels[i] # a single entry of data is created each time + f.close() + l.close() + return reader + +# images_reader_creator创建一个reader +reader = image_reader_creator("/path/to/image_file", "/path/to/label_file", 1024) +``` diff --git a/doc/paddle/advanced_guide/data_preparing/use_py_reader.rst b/doc/paddle/advanced_guide/data_preparing/use_py_reader.rst new file mode 100644 index 0000000000000000000000000000000000000000..851d3c9c0dff209235c0d7fab024f5529f253ae5 --- /dev/null +++ b/doc/paddle/advanced_guide/data_preparing/use_py_reader.rst @@ -0,0 +1,267 @@ +.. _user_guides_use_py_reader: + +############# +异步数据读取 +############# + +除同步Feed方式外,我们提供了DataLoader。DataLoader的性能比 :ref:`user_guide_use_numpy_array_as_train_data` 更好,因为DataLoader的数据读取和模型训练过程是异步进行的,且能与 :code:`double_buffer_reader` 配合以进一步提高数据读取性能。此外, :code:`double_buffer_reader` 负责异步完成CPU Tensor到GPU Tensor的转换,一定程度上提升了数据读取效率。 + +创建DataLoader对象 +################################ + +创建DataLoader对象的方式为: + +.. code-block:: python + + import paddle.fluid as fluid + + image = fluid.data(name='image', dtype='float32', shape=[None, 784]) + label = fluid.data(name='label', dtype='int64', shape=[None, 1]) + + ITERABLE = True + + data_loader = fluid.io.DataLoader.from_generator( + feed_list=[image, label], capacity=64, use_double_buffer=True, iterable=ITERABLE) + +其中, + +- feed_list为需要输入的数据层变量列表; +- capacity为DataLoader对象的缓存区大小,单位为batch数量; +- use_double_buffer默认为True,表示使用 :code:`double_buffer_reader` 。建议开启,可提升数据读取速度; +- iterable默认为True,表示该DataLoader对象是可For-Range迭代的。推荐设置iterable=True。当iterable=True时,DataLoader与Program解耦,定义DataLoader对象不会改变Program;当iterable=False时,DataLoader会在Program中插入数据读取相关的op。 + +需要注意的是:`Program.clone()` (参见 :ref:`cn_api_fluid_Program` )不能实现DataLoader对象的复制。如果您要创建多个不同DataLoader对象(例如训练和预测阶段需创建两个不同的DataLoader),则需重定义两个DataLoader对象。 +若需要共享训练阶段和测试阶段的模型参数,您可以通过 :code:`fluid.unique_name.guard()` 的方式来实现。 +注:Paddle采用变量名区分不同变量,且变量名是根据 :code:`unique_name` 模块中的计数器自动生成的,每生成一个变量名计数值加1。 :code:`fluid.unique_name.guard()` 的作用是重置 :code:`unique_name` 模块中的计数器,保证多次调用 :code:`fluid.unique_name.guard()` 配置网络时对应变量的变量名相同,从而实现参数共享。 + +下面是一个使用DataLoader配置训练阶段和测试阶段网络的例子: + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import paddle.dataset.mnist as mnist + + def network(): + image = fluid.data(name='image', dtype='float32', shape=[None, 784]) + label = fluid.data(name='label', dtype='int64', shape=[None, 1]) + loader = fluid.io.DataLoader.from_generator(feed_list=[image, label], capacity=64) + + # Definition of models + fc = fluid.layers.fc(image, size=10) + xe = fluid.layers.softmax_with_cross_entropy(fc, label) + loss = fluid.layers.reduce_mean(xe) + return loss , loader + + # Create main program and startup program for training + train_prog = fluid.Program() + train_startup = fluid.Program() + + with fluid.program_guard(train_prog, train_startup): + # Use fluid.unique_name.guard() to share parameters with test network + with fluid.unique_name.guard(): + train_loss, train_loader = network() + adam = fluid.optimizer.Adam(learning_rate=0.01) + adam.minimize(train_loss) + + # Create main program and startup program for testing + test_prog = fluid.Program() + test_startup = fluid.Program() + with fluid.program_guard(test_prog, test_startup): + # Use fluid.unique_name.guard() to share parameters with train network + with fluid.unique_name.guard(): + test_loss, test_loader = network() + +设置DataLoader对象的数据源 +################################ + +DataLoader对象通过 :code:`set_sample_generator()` , :code:`set_sample_list_generator` 和 :code:`set_batch_generator()` 方法设置其数据源。 +这三个方法均接收Python生成器 :code:`generator` 作为参数,其区别在于: + +- :code:`set_sample_generator()` 要求 :code:`generator` 返回的数据格式为[img_1, label_1],其中img_1和label_1为单个样本的Numpy Array类型数据。 + +- :code:`set_sample_list_generator()` 要求 :code:`generator` 返回的数据格式为[(img_1, label_1), (img_2, label_2), ..., (img_n, label_n)],其中img_i和label_i均为每个样本的Numpy Array类型数据,n为batch size。 + +- :code:`set_batch_generator()` 要求 :code:`generator` 返回的数据的数据格式为[batched_imgs, batched_labels],其中batched_imgs和batched_labels为batch级的Numpy Array或LoDTensor类型数据。 + +值得注意的是,使用DataLoader做多GPU卡(或多CPU核)训练时,实际的总batch size为用户传入的 :code:`generator` 的batch size乘以设备数量。 + +当DataLoader的iterable=True(默认)时,必须给这三个方法传 :code:`places` 参数, +指定将读取的数据转换为CPU Tensor还是GPU Tensor。当DataLoader的iterable=False时,不需传places参数。 + +例如,假设我们有两个reader,其中fake_sample_reader每次返回一个sample的数据,fake_batch_reader每次返回一个batch的数据。 + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # sample级reader + def fake_sample_reader(): + for _ in range(100): + sample_image = np.random.random(size=(784, )).astype('float32') + sample_label = np.random.random_integers(size=(1, ), low=0, high=9).astype('int64') + yield sample_image, sample_label + + + # batch级reader + def fake_batch_reader(): + batch_size = 32 + for _ in range(100): + batch_image = np.random.random(size=(batch_size, 784)).astype('float32') + batch_label = np.random.random_integers(size=(batch_size, 1), low=0, high=9).astype('int64') + yield batch_image, batch_label + + image1 = fluid.data(name='image1', dtype='float32', shape=[None, 784]) + label1 = fluid.data(name='label1', dtype='int64', shape=[None, 1]) + + image2 = fluid.data(name='image2', dtype='float32', shape=[None, 784]) + label2 = fluid.data(name='label2', dtype='int64', shape=[None, 1]) + + image3 = fluid.data(name='image3', dtype='float32', shape=[None, 784]) + label3 = fluid.data(name='label3', dtype='int64', shape=[None, 1]) + +对应的DataLoader设置如下: + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + + ITERABLE = True + USE_CUDA = True + USE_DATA_PARALLEL = True + + if ITERABLE: + # 若DataLoader可迭代,则必须设置places参数 + if USE_DATA_PARALLEL: + # 若进行多GPU卡训练,则取所有的CUDAPlace + # 若进行多CPU核训练,则取多个CPUPlace,本例中取了8个CPUPlace + places = fluid.cuda_places() if USE_CUDA else fluid.cpu_places(8) + else: + # 若进行单GPU卡训练,则取单个CUDAPlace,本例中0代表0号GPU卡 + # 若进行单CPU核训练,则取单个CPUPlace,本例中1代表1个CPUPlace + places = fluid.cuda_places(0) if USE_CUDA else fluid.cpu_places(1) + else: + # 若DataLoader不可迭代,则不需要设置places参数 + places = None + + # 使用sample级的reader作为DataLoader的数据源 + data_loader1 = fluid.io.DataLoader.from_generator(feed_list=[image1, label1], capacity=10, iterable=ITERABLE) + data_loader1.set_sample_generator(fake_sample_reader, batch_size=32, places=places) + + # 使用sample级的reader + fluid.io.batch设置DataLoader的数据源 + data_loader2 = fluid.io.DataLoader.from_generator(feed_list=[image2, label2], capacity=10, iterable=ITERABLE) + sample_list_reader = fluid.io.batch(fake_sample_reader, batch_size=32) + sample_list_reader = fluid.io.shuffle(sample_list_reader, buf_size=64) # 还可以进行适当的shuffle + data_loader2.set_sample_list_generator(sample_list_reader, places=places) + + # 使用batch级的reader作为DataLoader的数据源 + data_loader3 = fluid.io.DataLoader.from_generator(feed_list=[image3, label3], capacity=10, iterable=ITERABLE) + data_loader3.set_batch_generator(fake_batch_reader, places=places) + +使用DataLoader进行模型训练和测试 +################################ + +使用DataLoader进行模型训练和测试的例程如下。 + +- 第一步,我们需组建训练网络和预测网络,并定义相应的DataLoader对象,设置好DataLoader对象的数据源。 + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import paddle.dataset.mnist as mnist + import six + + ITERABLE = True + + def network(): + # 创建数据层对象 + image = fluid.data(name='image', dtype='float32', shape=[None, 784]) + label = fluid.data(name='label', dtype='int64', shape=[None, 1]) + + # 创建DataLoader对象 + reader = fluid.io.DataLoader.from_generator(feed_list=[image, label], capacity=64, iterable=ITERABLE) + + # Definition of models + fc = fluid.layers.fc(image, size=10) + xe = fluid.layers.softmax_with_cross_entropy(fc, label) + loss = fluid.layers.reduce_mean(xe) + return loss , reader + + # 创建训练的main_program和startup_program + train_prog = fluid.Program() + train_startup = fluid.Program() + + # 定义训练网络 + with fluid.program_guard(train_prog, train_startup): + # fluid.unique_name.guard() to share parameters with test network + with fluid.unique_name.guard(): + train_loss, train_loader = network() + adam = fluid.optimizer.Adam(learning_rate=0.01) + adam.minimize(train_loss) + + # 创建预测的main_program和startup_program + test_prog = fluid.Program() + test_startup = fluid.Program() + + # 定义预测网络 + with fluid.program_guard(test_prog, test_startup): + # Use fluid.unique_name.guard() to share parameters with train network + with fluid.unique_name.guard(): + test_loss, test_loader = network() + + place = fluid.CUDAPlace(0) + exe = fluid.Executor(place) + + # 运行startup_program进行初始化 + exe.run(train_startup) + exe.run(test_startup) + + # Compile programs + train_prog = fluid.CompiledProgram(train_prog).with_data_parallel(loss_name=train_loss.name) + test_prog = fluid.CompiledProgram(test_prog).with_data_parallel(share_vars_from=train_prog) + + # 设置DataLoader的数据源 + places = fluid.cuda_places() if ITERABLE else None + + train_loader.set_sample_list_generator( + fluid.io.shuffle(fluid.io.batch(mnist.train(), 512), buf_size=1024), places=places) + + test_loader.set_sample_list_generator(fluid.io.batch(mnist.test(), 512), places=places) + +- 第二步:根据DataLoader对象是否iterable,选用不同的方式运行网络。 + +若iterable=True,则DataLoader对象是一个Python的生成器,可直接for-range迭代。for-range返回的结果通过exe.run的feed参数传入执行器。 + +.. code-block:: python + + def run_iterable(program, exe, loss, data_loader): + for data in data_loader(): + loss_value = exe.run(program=program, feed=data, fetch_list=[loss]) + print('loss is {}'.format(loss_value)) + + for epoch_id in six.moves.range(10): + run_iterable(train_prog, exe, train_loss, train_loader) + run_iterable(test_prog, exe, test_loss, test_loader) + +若iterable=False,则需在每个epoch开始前,调用 :code:`start()` 方法启动DataLoader对象;并在每个epoch结束时,exe.run会抛出 :code:`fluid.core.EOFException` 异常,在捕获异常后调用 :code:`reset()` 方法重置DataLoader对象的状态, +以便启动下一轮的epoch。iterable=False时无需给exe.run传入feed参数。具体方式为: + +.. code-block:: python + + def run_non_iterable(program, exe, loss, data_loader): + data_loader.start() + try: + while True: + loss_value = exe.run(program=program, fetch_list=[loss]) + print('loss is {}'.format(loss_value)) + except fluid.core.EOFException: + print('End of epoch') + data_loader.reset() + + for epoch_id in six.moves.range(10): + run_non_iterable(train_prog, exe, train_loss, train_loader) + run_non_iterable(test_prog, exe, test_loss, test_loader) + diff --git a/doc/paddle/advanced_guide/data_preparing/use_py_reader_en.rst b/doc/paddle/advanced_guide/data_preparing/use_py_reader_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..b14b4a531ac961a6d43793c93c57931b5e7a0a06 --- /dev/null +++ b/doc/paddle/advanced_guide/data_preparing/use_py_reader_en.rst @@ -0,0 +1,266 @@ +.. _user_guide_use_py_reader_en: + +############################################ +Asynchronous Data Reading +############################################ + +Besides synchronous data reading, we provide DataLoader. The performance of DataLoader is better than :ref:`user_guide_use_numpy_array_as_train_data_en` , because data reading and model training process is asynchronous +when DataLoader is in use, and it can cooperate with :code:`double_buffer_reader` to improve the performance of reading data. What's more, :code:`double_buffer_reader` can achieve the asynchronous transformation from CPU Tensor to GPU Tensor, which improves the efficiency of reading data to some extent. + +Create DataLoader Object +################################ + +You can create DataLoader object as follows: + +.. code-block:: python + + import paddle.fluid as fluid + + image = fluid.data(name='image', dtype='float32', shape=[None, 784]) + label = fluid.data(name='label', dtype='int64', shape=[None, 1]) + + ITERABLE = True + + data_loader = fluid.io.DataLoader.from_generator( + feed_list=[image, label], capacity=64, use_double_buffer=True, iterable=ITERABLE) + +In the code, + +- ``feed_list`` is the list of input variables; +- ``capacity`` is the buffer size of the DataLoader object in batches; +- ``use_double_buffer`` is True by default, which means ``double_buffer_reader`` is used. It is recommended, because it can improve data reading speed; +- ``iterable`` is True by default, which means the DataLoader object is For-Range iterative. When ``iterable = True`` , DataLoader decouples from the Program, which means defining DataLoader objects does not change Program; when When ``iterable = False`` , DataLoader inserts operators related to data reading into Program. + + +Attention: ``Program.clone()`` (reference to :ref:`api_fluid_Program` )can't copy DataLoader objects. +If you want to create multiple DataLoader objects(such as two different DataLoaders in training and inference period respectively), you have to define different DataLoader objects. +While using DataLoader, if you need to share the model parameters of training and testing periods, you can use :code:`fluid.unique_name.guard()`. + +Notes: Paddle use different names to distinguish different variables, and the names are generated by the counter in :code:`unique_name` module, which rises by one every time a variable name is generated. :code:`fluid.unique_name.guard()` aims to reset the counter in :code:`unique_name` module, in order to ensure that the variable names are the same when calling :code:`fluid.unique_name.guard()` repeatedly, so that parameters can be shared. + + +An example of configuring networks during the training and testing periods by DataLoader is as follows: + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import paddle.dataset.mnist as mnist + + def network(): + image = fluid.data(name='image', dtype='float32', shape=[None, 784]) + label = fluid.data(name='label', dtype='int64', shape=[None, 1]) + loader = fluid.io.DataLoader.from_generator(feed_list=[image, label], capacity=64) + + # Define model. + fc = fluid.layers.fc(image, size=10) + xe = fluid.layers.softmax_with_cross_entropy(fc, label) + loss = fluid.layers.reduce_mean(xe) + return loss , loader + + # Create main program and startup program for training. + train_prog = fluid.Program() + train_startup = fluid.Program() + + with fluid.program_guard(train_prog, train_startup): + # Use fluid.unique_name.guard() to share parameters with test network. + with fluid.unique_name.guard(): + train_loss, train_loader = network() + adam = fluid.optimizer.Adam(learning_rate=0.01) + adam.minimize(train_loss) + + # Create main program and startup program for testing. + test_prog = fluid.Program() + test_startup = fluid.Program() + with fluid.program_guard(test_prog, test_startup): + # Use fluid.unique_name.guard() to share parameters with train network + with fluid.unique_name.guard(): + test_loss, test_loader = network() + + +Configure data source of DataLoader object +########################################## +DataLoader object sets the data source by :code:`set_sample_generator()`, :code:`set_sample_list_generator()` or :code:`set_batch_generator()` . These three methods all receive the Python generator :code:`generator` as parameters. The differences of are: + + - :code:`generator` of :code:`set_sample_generator()` should return data of :code:`[img_1, label_1]` type, in which ``img_1`` and ``label_1`` is one sample's data of Numpy array type. + + - :code:`generator` of :code:`set_sample_list_generator()` should return data of :code:`[(img_1, label_1), (img_2, label_2), ..., (img_n, label_n)]` type, in which ``img_i`` and ``label_i`` is one sample's data of Numpy array type, and ``n`` is batch size. + + - :code:`generator` of :code:`set_batch_generator()` should return data of :code:`[batched_imgs, batched_labels]` type, in which ``batched_imgs`` and ``batched_labels`` is one batch's data of Numpy array or LoDTensor type. + +Please note that, when using DataLoader for multi-GPU card (or multi-CPU core) training, the actual total batch size is the batch size of incoming user generator multiplied by the number of devices. + +When :code:`iterable = True` (default) of DataLoader, ``places`` parameters must be passed to these three methods, specifying whether to convert data to CPU Tensor or GPU Tensor. When :code:`iterable = False` of DataLoader, there is no need to pass the ``places`` parameter. + +For example, suppose we have two readers, ``fake_sample_reader`` returns one sample's data at a time and ``fake_batch_reader`` returns one batch's data at a time. + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # Declare sample reader. + def fake_sample_reader(): + for _ in range(100): + sample_image = np.random.random(size=(784, )).astype('float32') + sample_label = np.random.random_integers(size=(1, ), low=0, high=9).astype('int64') + yield sample_image, sample_label + + + # Declare batch reader. + def fake_batch_reader(): + batch_size = 32 + for _ in range(100): + batch_image = np.random.random(size=(batch_size, 784)).astype('float32') + batch_label = np.random.random_integers(size=(batch_size, 1), low=0, high=9).astype('int64') + yield batch_image, batch_label + + image1 = fluid.data(name='image1', dtype='float32', shape=[None, 784]) + label1 = fluid.data(name='label1', dtype='int64', shape=[None, 1]) + + image2 = fluid.data(name='image2', dtype='float32', shape=[None, 784]) + label2 = fluid.data(name='label2', dtype='int64', shape=[None, 1]) + + image3 = fluid.data(name='image3', dtype='float32', shape=[None, 784]) + label3 = fluid.data(name='label3', dtype='int64', shape=[None, 1]) + +The corresponding DataLoader are defined as follows: + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + + ITERABLE = True + USE_CUDA = True + USE_DATA_PARALLEL = True + + if ITERABLE: + # If DataLoader is iterable, places should be set. + if USE_DATA_PARALLEL: + # Use all GPU cards or 8 CPU cores to train. + places = fluid.cuda_places() if USE_CUDA else fluid.cpu_places(8) + else: + # Use single GPU card or CPU core. + places = fluid.cuda_places(0) if USE_CUDA else fluid.cpu_places(1) + else: + # If DataLoader is not iterable, places shouldn't be set. + places = None + + # Use sample reader to configure data source of DataLoader. + data_loader1 = fluid.io.DataLoader.from_generator(feed_list=[image1, label1], capacity=10, iterable=ITERABLE) + data_loader1.set_sample_generator(fake_sample_reader, batch_size=32, places=places) + + # Use sample reader + fluid.io.batch to configure data source of DataLoader. + data_loader2 = fluid.io.DataLoader.from_generator(feed_list=[image2, label2], capacity=10, iterable=ITERABLE) + sample_list_reader = fluid.io.batch(fake_sample_reader, batch_size=32) + sample_list_reader = fluid.io.shuffle(sample_list_reader, buf_size=64) # Shuffle data if needed. + data_loader2.set_sample_list_generator(sample_list_reader, places=places) + + # Use batch to configure data source of DataLoader. + data_loader3 = fluid.io.DataLoader.from_generator(feed_list=[image3, label3], capacity=10, iterable=ITERABLE) + data_loader3.set_batch_generator(fake_batch_reader, places=places) + +Train and test model with DataLoader +################################## + +Examples of using DataLoader to train and test models are as follows: + +- Step 1, we need to set up training network and testing network, define the corresponding DataLoader object, and configure the data source of DataLoader object. + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import paddle.dataset.mnist as mnist + import six + + ITERABLE = True + + def network(): + # Create data holder. + image = fluid.data(name='image', dtype='float32', shape=[None, 784]) + label = fluid.data(name='label', dtype='int64', shape=[None, 1]) + + # Create DataLoader object. + reader = fluid.io.DataLoader.from_generator(feed_list=[image, label], capacity=64, iterable=ITERABLE) + + # Define model. + fc = fluid.layers.fc(image, size=10) + xe = fluid.layers.softmax_with_cross_entropy(fc, label) + loss = fluid.layers.reduce_mean(xe) + return loss , reader + + # Create main program and startup program for training. + train_prog = fluid.Program() + train_startup = fluid.Program() + + # Define training network. + with fluid.program_guard(train_prog, train_startup): + # fluid.unique_name.guard() to share parameters with test network + with fluid.unique_name.guard(): + train_loss, train_loader = network() + adam = fluid.optimizer.Adam(learning_rate=0.01) + adam.minimize(train_loss) + + # Create main program and startup program for testing. + test_prog = fluid.Program() + test_startup = fluid.Program() + + # Define testing network. + with fluid.program_guard(test_prog, test_startup): + # Use fluid.unique_name.guard() to share parameters with train network + with fluid.unique_name.guard(): + test_loss, test_loader = network() + + place = fluid.CUDAPlace(0) + exe = fluid.Executor(place) + + # Run startup_program for initialization. + exe.run(train_startup) + exe.run(test_startup) + + # Compile programs. + train_prog = fluid.CompiledProgram(train_prog).with_data_parallel(loss_name=train_loss.name) + test_prog = fluid.CompiledProgram(test_prog).with_data_parallel(share_vars_from=train_prog) + + # Configure data source of DataLoader. + places = fluid.cuda_places() if ITERABLE else None + + train_loader.set_sample_list_generator( + fluid.io.shuffle(fluid.io.batch(mnist.train(), 512), buf_size=1024), places=places) + + test_loader.set_sample_list_generator(fluid.io.batch(mnist.test(), 512), places=places) + +- Step 2, we choose different ways to run the network according to whether the DataLoader object is iterable or not. + +If :code:`iterable = True`, the DataLoader object is a Python generator that can iterate directly using for-range. The results returned by for-range are passed to the executor through the ``feed`` parameter of ``exe.run()``. + +.. code-block:: python + + def run_iterable(program, exe, loss, data_loader): + for data in data_loader(): + loss_value = exe.run(program=program, feed=data, fetch_list=[loss]) + print('loss is {}'.format(loss_value)) + + for epoch_id in six.moves.range(10): + run_iterable(train_prog, exe, train_loss, train_loader) + run_iterable(test_prog, exe, test_loss, test_loader) + +If :code:`iterable = False`, call the ``start()`` method to start the DataLoader object before each epoch starts, and call the ``reset()`` method to reset the status of the DataLoader object after catching the exception to start the iteration of next epoch, since ``exe.run()`` throws a ``fluid.core.EOFException`` exception at the end of each epoch. When :code:`iterable = False`, there is no need to pass ``feed`` parameter to ``exe.run()``. The specific ways are as follows: + +.. code-block:: python + + def run_non_iterable(program, exe, loss, data_loader): + data_loader.start() + try: + while True: + loss_value = exe.run(program=program, fetch_list=[loss]) + print('loss is {}'.format(loss_value)) + except fluid.core.EOFException: + print('End of epoch') + data_loader.reset() + + for epoch_id in six.moves.range(10): + run_non_iterable(train_prog, exe, train_loss, train_loader) + run_non_iterable(test_prog, exe, test_loss, test_loader) \ No newline at end of file diff --git a/doc/paddle/advanced_guide/distributed_training/cluster_howto.rst b/doc/paddle/advanced_guide/distributed_training/cluster_howto.rst new file mode 100644 index 0000000000000000000000000000000000000000..0c428942065c1bb1d2b6742d7d75d7638d8b404b --- /dev/null +++ b/doc/paddle/advanced_guide/distributed_training/cluster_howto.rst @@ -0,0 +1,271 @@ +.. _cluster_howto: + +分布式训练使用手册 +==================== + +分布式训练基本思想 +--------------- + +分布式深度学习训练通常分为两种并行化方法:数据并行,模型并行,参考下图: + +.. image:: src/parallelism.png + +在模型并行方式下,模型的层和参数将被分布在多个节点上,模型在一个mini-batch的前向和反向训练中,将经过多次跨\ +节点之间的通信。每个节点只保存整个模型的一部分;在数据并行方式下,每个节点保存有完整的模型的层和参数,每个节点\ +独自完成前向和反向计算,然后完成梯度的聚合并同步的更新所有节点上的参数。Fluid目前版本仅提供数据并行方式,另外\ +诸如模型并行的特例实现(超大稀疏模型训练)功能将在后续的文档中予以说明。 + +在数据并行模式的训练中,Fluid使用了两种通信模式,用于应对不同训练任务对分布式训练的要求,分别为RPC通信和Collective +通信。其中RPC通信方式使用 `gRPC `_ ,Collective通信方式使用 +`NCCL2 `_ 。 + +**RPC通信和Collective通信的横向对比如下:** + +.. csv-table:: + :header: "Feature", "Collective", "RPC" + + "Ring-Based通信", "Yes", "No" + "异步训练", "Yes", "Yes" + "分布式模型", "No", "Yes" + "容错训练", "No", "Yes" + "性能", "Faster", "Fast" + +- RPC通信方式的结构: + + .. image:: src/dist_train_pserver.png + + 使用RPC通信方式的数据并行分布式训练,会启动多个pserver进程和多个trainer进程,每个pserver进程\ + 会保存一部分模型参数,并负责接收从trainer发送的梯度并更新这些模型参数;每个trainer进程会保存一份\ + 完整的模型,并使用一部分数据进行训练,然后向pserver发送梯度,最后从pserver拉取更新后的参数。 + + pserver进程可以在和trainer完全不同的计算节点上,也可以和trainer公用节点。一个分布式任务所需要的\ + pserver进程个数通常需要根据实际情况调整,以达到最佳的性能,然而通常来说pserver的进程不会比trainer\ + 更多。 + + **注:** 在使用GPU训练时,pserver可以选择使用GPU或只使用CPU,如果pserver也使用GPU,则会增加一次从CPU拷贝\ + 接收到的梯度数据到GPU的开销,在某些情况下会导致整体训练性能降低。 + + **注:** 在使用GPU训练时,如果每个trainer节点有多个GPU卡,则会先在每个trainer节点的多个卡之间执行\ + NCCL2通信方式的梯度聚合,然后再通过pserver聚合多个节点的梯度。 + +- NCCL2通信方式的结构: + + .. image:: src/dist_train_nccl2.png + + 使用NCCL2(Collective通信方式)进行分布式训练,是不需要启动pserver进程的,每个trainer进程都保存\ + 一份完整的模型参数,在完成计算梯度之后通过trainer之间的相互通信,Reduce梯度数据到所有节点的所有设备\ + 然后每个节点在各自完成参数更新。 + +使用parameter server方式的训练 +------------------------------ + +使用 :code:`transpiler` API可以把单机可以执行的程序快速转变成可以分布式执行的程序。在不同的服务器节点 +上,通过传给 :code:`transpiler` 对应的参数,以获取当前节点需要执行的 :code:`Program` 。 + +需要配置参数包括 +++++++++++++++++++ +.. csv-table:: + :header: "参数", "说明" + + "role", "\ **必选**\ 区分作为pserver启动还是trainer启动,不传给transpile,也可以用其他的变量名或环境变量" + "trainer_id", "\ **必选**\ 如果是trainer进程,用于指定当前trainer在任务中的唯一id,从0开始,在一个任务中需保证不重复" + "pservers", "\ **必选**\ 当前任务所有pserver的ip:port列表字符串,形式比如:127.0.0.1:6170,127.0.0.1:6171" + "trainers", "\ **必选**\ trainer节点的个数" + "sync_mode", "\ **可选**\ True为同步模式,False为异步模式" + "startup_program", "\ **可选**\ 如果startup_program不是默认的fluid.default_startup_program(),需要传入此参数" + "current_endpoint", "\ **可选**\ 只有NCCL2模式需要传这个参数" + +一个例子,假设有两个节点,分别是 :code:`192.168.1.1` 和 :code:`192.168.1.2` ,使用端口6170,启动4个trainer, +则代码可以写成: + +.. code-block:: python + + role = "PSERVER" + trainer_id = 0 # get actual trainer id from cluster + pserver_endpoints = "192.168.1.1:6170,192.168.1.2:6170" + current_endpoint = "192.168.1.1:6170" # get actual current endpoint + trainers = 4 + t = fluid.DistributeTranspiler() + t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) + if role == "PSERVER": + pserver_prog = t.get_pserver_program(current_endpoint) + pserver_startup = t.get_startup_program(current_endpoint, + pserver_prog) + exe.run(pserver_startup) + exe.run(pserver_prog) + elif role == "TRAINER": + train_loop(t.get_trainer_program()) + + +选择同步或异步训练 +++++++++++++++++++ + +Fluid分布式任务可以支持同步训练或异步训练,在同步训练方式下,所有的trainer节点,会在每个mini-batch +同步地合并所有节点的梯度数据并发送给parameter server完成更新,在异步训练方式下,每个trainer没有相互\ +同步等待的过程,可以独立地更新parameter server的参数。通常情况下,使用异步训练方式,可以在trainer节点\ +更多的时候比同步训练方式有更高的总体吞吐量。 + +在调用 :code:`transpile` 函数时,默认会生成同步训练的分布式程序,通过指定 :code:`sync_mode=False` +参数即可生成异步训练的程序: + +.. code-block:: python + + t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers, sync_mode=False) + + + +选择是否使用分布式embedding表进行训练 +++++++++++++++++++++++++++++++++++++ + +embedding被广泛应用在各种网络结构中,尤其是文本处理相关的模型。在某些场景,例如推荐系统或者搜索引擎中, +embedding的feature id可能会非常多,当feature id达到一定数量时,embedding参数会变得很大,一方面可能 +单机内存无法存放导致无法训练,另一方面普通的训练模式每一轮迭代都需要同步完整的参数,参数太大会让通信变得 +非常慢,进而影响训练速度。 + +Fluid支持千亿量级超大规模稀疏特征embedding的训练,embedding参数只会保存在parameter server上,通过 +参数prefetch和梯度稀疏更新的方法,大大减少通信量,提高通信速度。 + +该功能只对分布式训练有效,单机无法使用。 +需要配合稀疏更新一起使用。 + +使用方法,在配置embedding的时候,加上参数 :code:`is_distributed=True` 以及 :code:`is_sparse=True` 即可。 +参数 :code:`dict_size` 定义数据中总的id的数量,id可以是int64范围内的任意值,只要总id个数小于等于dict_size就可以支持。 +所以配置之前需要预估一下数据中总的feature id的数量。 + +.. code-block:: python + + emb = fluid.layers.embedding( + is_distributed=True, + input=input, + size=[dict_size, embedding_width], + is_sparse=True) + + +选择参数分布方法 +++++++++++++++++ + +参数 :code:`split_method` 可以指定参数在parameter server上的分布方式。 + +Fluid默认使用 `RoundRobin `_ +方式将参数分布在多个parameter server上。此方式在默认未关闭参数切分的情况下,参数会较平均的分布在所有的 +parameter server上。如果需要使用其他,可以传入其他的方法,目前可选的方法有: :code:`RoundRobin` 和 +:code:`HashName` 。也可以使用自定义的分布方式,只需要参考 +`这里 `_ +编写自定义的分布函数。 + + +关闭切分参数 +++++++++++++ + +参数 :code:`slice_var_up` 指定是否将较大(大于8192个元素)的参数切分到多个parameter server以均衡计算负载,默认为开启。 + +当模型中的可训练参数体积比较均匀或者使用自定义的参数分布方法是参数均匀分布在多个parameter server上, +可以选择关闭切分参数,这样可以降低切分和重组带来的计算和拷贝开销: + +.. code-block:: python + + t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers, slice_var_up=False) + + +开启内存优化 +++++++++++++ + +在parameter server分布式训练模式下,要开启内存优化 :code:`memory_optimize` 和单机相比,需要注意按照下面的规则配置: + +* 在pserver端,\ **不要**\ 执行 :code:`memory_optimize` +* 在trainer端,先执行 :code:`fluid.memory_optimize` 再执行 :code:`t.transpile()` +* 在trainer端,调用 :code:`memory_optimize` 需要增加 :code:`skip_grads=True` 确保发送的梯度不会被重命名: :code:`fluid.memory_optimize(input_program, skip_grads=True)` + +示例: + +.. code-block:: python + + if role == "TRAINER": + fluid.memory_optimize(fluid.default_main_program(), skip_grads=True) + t = fluid.DistributeTranspiler() + t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) + if role == "PSERVER": + # start pserver here + elif role == "TRAINER": + # start trainer here + + +使用NCCL2通信方式的训练 +-------------------- + +NCCL2模式的分布式训练,由于没有parameter server角色,是trainer之间互相通信,使用时注意: + +* 配置 :code:`fluid.DistributeTranspilerConfig` 中 :code:`mode="nccl2"` 。 +* 调用 :code:`transpile` 时,:code:`trainers` 传入所有trainer节点的endpoint,并且传入参数 :code:`current_endpoint` 。 + 在此步骤中,会在 :code:`startup program` 中增加 :code:`gen_nccl_id_op` 用于在多机程序初始化时同步NCCLID信息。 +* 初始化 :code:`ParallelExecutor` 时传入 :code:`num_trainers` 和 :code:`trainer_id` 。 + 在此步骤中,:code:`ParallelExecutor` 会使用多机方式初始化NCCL2并可以开始在多个节点对每个参数对应的梯度执行跨节点的 + :code:`allreduce` 操作,执行多机同步训练 + +一个例子: + +.. code-block:: python + + trainer_id = 0 # get actual trainer id here + trainers = "192.168.1.1:6170,192.168.1.2:6170" + current_endpoint = "192.168.1.1:6170" + config = fluid.DistributeTranspilerConfig() + config.mode = "nccl2" + t = fluid.DistributeTranspiler(config=config) + t.transpile(trainer_id, trainers=trainers, current_endpoint=current_endpoint) + exe = fluid.ParallelExecutor(use_cuda, + loss_name=loss_name, num_trainers=len(trainers.split(",")), trainer_id=trainer_id) + ... + +NCCL2模式必要参数说明 +++++++++++++++++++++++++++++++++++++++ +.. csv-table:: + :header: "参数", "说明" + + "trainer_id", "(int) 任务中每个trainer节点的唯一ID,从0开始,不能有重复" + "trainers", "(int) 任务中所有trainer节点的endpoint,用于在NCCL2初始化时,广播NCCL ID" + "current_endpoint", "(string) 当前节点的endpoint" + +目前使用NCCL2进行分布式训练仅支持同步训练方式。使用NCCL2方式的分布式训练,更适合模型体积较大,并需要使用\ +同步训练和GPU训练,如果硬件设备支持RDMA和GPU Direct,可以达到很高的分布式训练性能。 + +启动多进程模式 NCCL2 分布式训练作业 ++++++++++++++++++++++++++++++++++ + +通常情况下使用多进程模式启动 NCCL2 分布式训练作业可以获得更好多训练性能,Paddle 提供了 +:code:`paddle.distributed.launch` 模块可以方便地启动多进程作业,启动后每个训练进程将会使用一块独立的 GPU 设备。 +使用时需要注意: + +* 设置节点数:通过环境变量 :code:`PADDLE_NUM_TRAINERS` 设置作业的节点数,此环境变量也会被设置在每个训练进程中。 +* 设置每个节点的设备数:通过启动参数 :code:`--gpus` 可以设置每个节点的 GPU 设备数量,每个进程的序号将会被自动设置在环境变量 + :code:`PADDLE_TRAINER_ID` 中。 +* 数据切分: 多进程模式是每个设备一个进程,一般来说需要每个进程处理一部分训练数据,并且保证所有进程能够处理完整的数据集。 +* 入口文件:入口文件为实际启动的训练脚本。 +* 日志:每个训练进程的日志默认会保存在 :code:`./mylog` 目录下,您也可以通过参数 :code:`--log_dir` 进行指定。 + +启动样例: + +.. code-block:: bash + + > PADDLE_NUM_TRAINERS= python -m paddle.distributed.launch --gpus --arg1 --arg2 ... + + +NCCL2分布式训练注意事项 ++++++++++++++++++++++ + +**注意:** 使用NCCL2模式分布式训练时,需要确保每个节点训练等量的数据,防止在最后一轮训练中任务不退出。通常有两种方式: + +- 随机采样一些数据,补全分配到较少数据的节点上。(推荐使用这种方法,以训练完整的数据集)。 +- 在python代码中,每个节点每个pass只训练固定的batch数,如果这个节点数据较多,则不训练这些多出来的数据。 + +**说明:** 使用NCCL2模式分布式训练时,如果只希望使用一个节点上的部分卡,可以通过配置环境变量::code:`export CUDA_VISIBLE_DEVICES=0,1,2,3` 指定。 + +**注意:** 如果系统中有多个网络设备,需要手动指定NCCL2使用的设备,假设需要使用 :code:`eth2` 为通信设备,需要设定如下环境变量: + + +.. code-block:: bash + + export NCCL_SOCKET_IFNAME=eth2 + +另外NCCL2提供了其他的开关环境变量,比如指定是否开启GPU Direct,是否使用RDMA等,详情可以参考 +`ncclknobs `_ 。 diff --git a/doc/paddle/advanced_guide/distributed_training/cluster_howto_en.rst b/doc/paddle/advanced_guide/distributed_training/cluster_howto_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..6f282c21a6807bfdaedc961eb9510888444cf593 --- /dev/null +++ b/doc/paddle/advanced_guide/distributed_training/cluster_howto_en.rst @@ -0,0 +1,252 @@ +.. _cluster_howto_en: + +Manual for Distributed Training with Fluid +========================================== + +Basic Idea Of Distributed Training +------------------------------------- + +Distributed deep learning training is usually divided into two parallelization methods: data parallelism, model parallelism. Refer to the following figure: + +.. image:: src/parallelism.png + +In the model parallelism mode, the layers and parameters of the model will be distributed on multiple nodes. The model will go through multiple communications across nodes in the feeding forward and back propagation training of a mini-batch. Each node only saves a part of the entire model; + +In data parallelism mode, each node holds the complete layers and parameters of the model, each node performs feeding forward and back propagation calculations on its own, and then conducts the aggregation of the gradients and updates the parameters on all nodes synchronously. + +Current version of Fluid only provides data parallelism mode. In addition, implementations of special cases in model parallelism mode (e.g. large sparse model training ) will be explained in subsequent documents. + +In the training of data parallelism mode, Fluid uses two communication modes to deal with the requirements of distributed training for different training tasks, namely RPC Communication and Collective Communication. The RPC communication method uses `gRPC `_ , Collective communication method uses `NCCL2 `_ . + +.. csv-table:: The table above is a horizontal comparison of RPC communication and Collective communication + :header: "Feature", "Collective", "RPC" + + "Ring-Based Communication", "Yes", "No" + "Asynchronous Training", "Yes", "Yes" + "Distributed Model", "No", "Yes" + "Fault-tolerant Training", "No", "Yes" + "Performance", "Faster", "Fast" + +- Structure of RPC Communication Method: + + .. image:: src/dist_train_pserver.png + + Data-parallelised distributed training in RPC communication mode will start multiple pserver processes and multiple trainer processes, each pserver process will save a part of the model parameters and be responsible for receiving the gradients sent from the trainers and updating these model parameters; Each trainer process will save a copy of the complete model, and use a part of the data to train, then send the gradients to the pservers, finally pull the updated parameters from the pserver. + + The pserver process can be on a compute node that is completely different from the trainer, or it can share the same node with a trainer. The number of pserver processes required for a distributed task usually needs to be adjusted according to the actual situation to achieve the best performance. However, usually pserver processes are no more than trainer processes. + + **Note:** When using GPU training, the pserver can choose to use the GPU or only use the CPU. If the pserver also uses the GPU, it will result in the extra overhead of copying the gradient data received from the CPU to the GPU. In some cases, the overall training performance will be degraded. + + **Note:** When using GPU training, if there are multiple GPU cards in each trainer node, the gradient polymerization will execute in NCCL2 way among the cards in one node, and then in multiple nodes through pserver. + +- Structure of NCCL2 communication method: + + .. image:: src/dist_train_nccl2.png + +NCCL2 (Collective communication method) for distributed training avoids the need of pserver processes. Each trainer process holds a complete set of model parameters. After the calculation of the gradient, the trainer, through mutual communications, "Reduce" the gradient data to all devices of all nodes and then each node completes parameter updates of its own. + +Training in the Parameter Server Manner +---------------------------------------------- + +Use the :code:`transpiler` API to quickly convert a program that can be executed on a single machine into a program that can be executed in a distributed manner. On different server nodes, pass values to corresponding arguments at :code:`transpiler` to get the :code:`Program` which current node is to execute: + + +.. csv-table:: required configuration parameters + :header: "parameter", "description" + + "role", "\ **required**\ distinguishes whether to start as pserver or trainer, this arugument is not passed into ``transpile`` , you can also use other variable names or environment variables" + "trainer_id", "\ **required**\ If it is a trainer process, it is used to specify the unique id of the current trainer in the task, starting from 0, and must be guaranteed not to be repeated in one task" + "pservers", "\ **required**\ ip:port list string of all pservers in current task, for example: 127.0.0.1:6170,127.0.0.1:6171" + "trainers", "\ **required**\ the number of trainer nodes" + "sync_mode", "\ **optional**\ True for synchronous mode, False for asynchronous mode" + "startup_program", "\ **optional**\ If startup_program is not the default fluid.default_startup_program(), this parameter needs to be passed in" + "current_endpoint", "\ **optional**\ This parameter is only required for NCCL2 mode" + +For example, suppose there are two nodes, namely :code:`192.168.1.1` and :code:`192.168.1.2`, use port 6170 to start 4 trainers. +Then the code can be written as: + +.. code-block:: python + + role = "PSERVER" + trainer_id = 0 # get actual trainer id from cluster + pserver_endpoints = "192.168.1.1:6170,192.168.1.2:6170" + current_endpoint = "192.168.1.1:6170" # get actual current endpoint + trainers = 4 + t = fluid.DistributeTranspiler() + t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) + if role == "PSERVER": + pserver_prog = t.get_pserver_program(current_endpoint) + pserver_startup = t.get_startup_program(current_endpoint,Pserver_prog) + exe.run(pserver_startup) + exe.run(pserver_prog) + elif role == "TRAINER": + train_loop(t.get_trainer_program()) + + +Choose Synchronous Or Asynchronous Training ++++++++++++++++++++++++++++++++++++++++++++++ + +Fluid distributed tasks support synchronous training or asynchronous training. + +In the synchronous training mode, all trainer nodes will merge the gradient data of all nodes synchronously per mini-batch and send them to the parameter server to complete the update. + +In the asynchronous mode, each trainer does not wait for each other, and independently update the parameters on the parameter server. + +In general, using the asynchronous training method can have a higher overall throughput than the synchronous training mode when there are more trainer nodes. + +When the :code:`transpile` function is called, the distributed training program is generated by default. The asynchronous training program can be generated by specifying the :code:`sync_mode=False` parameter: + +.. code-block:: python + + t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers, sync_mode=False) + + + +Whether To Use The Distributed Embedding Table For Training +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +Embedding is widely used in various network structures, especially text processing related models. +In some scenarios, such as recommendation systems or search engines, the number of feature ids of embedding may be very large. When it reaches a certain number, the embedding parameter will become very large. +On the one hand, the memory of the single machine may not be competent, resulting in the inability to train. +On the other hand, the normal training mode needs to synchronize the complete set of parameters for each iteration. If the parameter is too large, the communication will become very slow, which will affect the training speed. + +Fluid supports the training of very large scale sparse features embedding at hundred billion level. The embedding parameter is only saved on the parameter server. The parameter prefetch and gradient sparse update method greatly reduce the traffic and improve the communication speed. + +This feature is only valid for distributed training and cannot be used on a single machine. Need to be used with sparse updates. + +Usage: When configuring embedding, add the parameters :code:`is_distributed=True` and :code:`is_sparse=True`. +Parameters :code:`dict_size` Defines the total number of ids in the data. The id can be any value in the int64 range. As long as the total number of ids is less than or equal to dict_size, it can be supported. +So before you configure, you need to estimate the total number of feature ids in the data. + +.. code-block:: python + + emb = fluid.layers.embedding( + is_distributed=True, + input=input, + size=[dict_size, embedding_width], + is_sparse=True) + + +Select Parameter Distribution Method +++++++++++++++++++++++++++++++++++++++ + +Parameter :code:`split_method` can specify how the parameters are distributed on the parameter servers. + +Fluid uses `RoundRobin `_ by default to scatter parameters to multiple parameter servers. +In this case, the parameters are evenly distributed on all parameter servers in the case where the parameter segmentation is not turned off by default. +If you need to use something else, you can pass in other methods. The currently available methods are: :code:`RoundRobin` and :code:`HashName` . You can also use a customized distribution method, just refer to `here `_ +to write customized distribution function + + +Turn Off the slice-up of Parameters +++++++++++++++++++++++++++++++++++++++ + +Parameter :code:`slice_var_up` specifies whether to split large (more than 8192 elements) parameters into multiple parameter servers to balance the computational load. The default is on. + +When the sizes of the trainable parameters in the model are relatively uniform or a customized parameter distribution method is used, which evenly distributes the parameters on multiple parameter servers, you can choose to turn off the slice-up function, which reduces the computational and copying overhead of slicing and reorganization: + +.. code-block:: python + + t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers, slice_var_up=False) + + +Turn On Memory Optimization +++++++++++++++++++++++++++++++ + +In the parameter server distributed training mode, to enable memory optimization :code:`memory_optimize` , compared with a single machine, you need to pay attention to the following rules: + +- On the pserver side, **don't** execute :code:`memory_optimize` +- On the trainer side, execute :code:`fluid.memory_optimize` and then execute :code:`t.transpile()` +- On the trainer side, calling :code:`memory_optimize` needs to add :code:`skip_grads=True` to ensure the gradient sent is not renamed : :code:`fluid.memory_optimize(input_program, skip_grads=True)` + +Example: + +.. code-block:: python + + if role == "TRAINER": + fluid.memory_optimize(fluid.default_main_program(), skip_grads=True) + t = fluid.DistributeTranspiler() + t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) + if role == "PSERVER": + # start pserver here + elif role == "TRAINER": + # start trainer here + + +Training Using NCCL2 Communication +-------------------- + +Distributed training in NCCL2 mode, because there is no parameter server role, the trainers directly communicate with each other. Pay attention to the following tips: + +* Configure :code:`mode="nccl2"` in :code:`fluid.DistributeTranspilerConfig` . +* When calling :code:`transpile`, :code:`trainers` is fed with the endpoints of all trainer nodes, and passed with the argument :code:`current_endpoint` . + In this step, :code:`gen_nccl_id_op` will add in :code:`startup program` to synchronize NCCLID information during the multi-computer program initialization. +* Initialize :code:`ParallelExecutor` with :code:`num_trainers` and :code:`trainer_id` . + In this step, :code:`ParallelExecutor` will initialize NCCL2 by the multi-computer way and do the operations :code:`allreduce` across the nodes for the gradient of every parameter to execute muti-computer training + +For example: + +.. code-block:: python + + trainer_id = 0 # get actual trainer id here + trainers = "192.168.1.1:6170,192.168.1.2:6170" + current_endpoint = "192.168.1.1:6170" + config = fluid.DistributeTranspilerConfig() + config.mode = "nccl2" + t = fluid.DistributeTranspiler(config=config) + t.transpile(trainer_id, trainers=trainers, current_endpoint=current_endpoint) + txe = fluid.ParallelExecutor(use_cuda, + loss_name=loss_name, num_trainers=len(trainers.split(",")), trainer_id=trainer_id) + ... + +.. csv-table:: Description of the necessary parameters for NCCL2 mode + :header: "parameter", "description" + + "trainer_id", "(int)The unique ID of each trainer node in the task, starting at 0, there cannot be any duplication" + "trainers", "(int)endpoints of all trainer nodes in the task, used to broadcast NCCL IDs when NCCL2 is initialized" + "current_endpoint", "(string)endpoint of current node" + +Currently, distributed training using NCCL2 only supports synchronous training. The distributed training using NCCL2 mode is more suitable for the model which is relatively large and needs \ +synchronous training and GPU training. If the hardware device supports RDMA and GPU Direct, this can achieve high distributed training performance. + +Start Up NCCL2 Distributed Training in Muti-Process Mode +++++++++++++++++++++++++++++++++++++++++++++++ + + Usually you can get better multi-training performance by using multi-process mode to start up NCCL2 distributed training assignment. Paddle provides :code:`paddle.distributed.launch` module to start up multi-process assignment, after which each training process will use an independent GPU device. + +Attention during usage: + + * set the number of nodes: set the number of nodes of an assignment by the environment variable :code:`PADDLE_NUM_TRAINERS` , and this variable will also be set in every training process. + * set the number of devices of each node: by activating the parameter :code:`--gpus` , you can set the number of GPU devices of each node, and the sequence number of each process will be set in the environment variable :code:`PADDLE_TRAINER_ID` automatically. + * data segment: mult-process mode means one process in each device. Generally, each process manages a part of training data, in order to make sure that all processes can manage the whole data set. + * entrance file: entrance file is the training script for actual startup. + * journal: for each training process, the joural is saved in the default :code:`./mylog` directory, and you can assign by the parameter :code:`--log_dir` . + + startup example: + + .. code-block:: bash + + > PADDLE_NUM_TRAINERS= python -m paddle.distributed.launch --gpus --arg1 --arg2 ... + +Important Notes on NCCL2 Distributed Training +++++++++++++++++++++++++++++++++++++++++++++++ + +**Note:** When using distributed training in NCCL2 mode, if you only want to use a part of cards in one node, you can appoint by configuring the environment variable :code:`export CUDA_VISIBLE_DEVICES=0,1,2,3` . + +**Note:** Please ensure each node has the same amount of data to train in NCCL2 mode distributed training, which prevents +exit at the final iteration. There are two common ways: + +- Randomly sample some data to complement nodes where less data are distributed. (We recommend this method for sake of a complete dataset to be trained) +- Each node only trains fixed number of batches per pass, which is controlled by python codes. If a node has more data than this fixed amount, then these + marginal data will not be trained. + +**Note** : If there are multiple network devices in the system, you need to manually specify the devices used by NCCL2. + +Assuming you need to use :code:`eth2` as the communication device, you need to set the following environment variables: + +.. code-block:: bash + + export NCCL_SOCKET_IFNAME=eth2 + +In addition, NCCL2 provides other switch environment variables, such as whether to enable GPU Direct, whether to use RDMA, etc. For details, please refer to +`ncclknobs `_ . diff --git a/doc/paddle/advanced_guide/distributed_training/cluster_quick_start.rst b/doc/paddle/advanced_guide/distributed_training/cluster_quick_start.rst new file mode 100644 index 0000000000000000000000000000000000000000..1988aee0ae578f584b723bdf38010945b264320d --- /dev/null +++ b/doc/paddle/advanced_guide/distributed_training/cluster_quick_start.rst @@ -0,0 +1,137 @@ +.. _cluster_quick_start: + +分布式训练快速开始 +================== + +使用Fleet API进行分布式训练 +--------------------------- + +从Paddle Fluid `Release 1.5.1 `_ 开始,官方推荐使用Fleet API进行分布式训练,关于Fleet API的介绍可以参考 `Fleet Design Doc `_ + + +准备条件 +^^^^^^^^ + + +* + [x] 成功安装Paddle Fluid,如果尚未安装,请参考 `快速开始 `_ + +* + [x] 学会最基本的单机训练方法,请参考 `单机训练 `_ 中描述的单卡训练,进行学习 + +点击率预估任务 +^^^^^^^^^^^^^^ + +本文使用一个简单的示例,点击率预估任务,来说明如何使用Fleet API进行分布式训练的配置方法,并利用单机环境模拟分布式环境给出运行示例。示例的源码来自 `CTR with Fleet `_ + + +为了方便学习,这里给出的示例是单机与多机混合的代码,用户可以通过不同的启动命令进行单机或多机任务的启动。获取数据的部分,以及对数据预处理的逻辑可以参考 `CTR with Fleet `_ 的源码和说明,这里不做过多描述。 + +.. code-block:: python + + from __future__ import print_function + from args import parse_args + import os + import paddle.fluid as fluid + import sys + from network_conf import ctr_dnn_model_dataset + import paddle.fluid.incubate.fleet.base.role_maker as role_maker + + from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet + from paddle.fluid.transpiler.distribute_transpiler import DistributeTranspilerConfig + + dense_feature_dim = 13 + sparse_feature_dim = 10000001 + batch_size = 100 + thread_num = 10 + embedding_size = 10 + args = parse_args() + + def main_function(is_local): + # common code for local training and distributed training + dense_input = fluid.layers.data( + name="dense_input", shape=[dense_feature_dim], dtype='float32') + + sparse_input_ids = [ + fluid.layers.data(name="C" + str(i), shape=[1], lod_level=1, + dtype="int64") for i in range(1, 27)] + + label = fluid.layers.data(name="label", shape=[1], dtype="int64") + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_use_var([dense_input] + sparse_input_ids + [label]) + pipe_command = "python criteo_reader.py %d" % sparse_feature_dim + dataset.set_pipe_command(pipe_command) + dataset.set_batch_size(batch_size) + dataset.set_thread(thread_num) + + whole_filelist = ["raw_data/part-%d" % x + for x in range(len(os.listdir("raw_data")))] + + dataset.set_filelist(whole_filelist) + loss, auc_var, batch_auc_var = ctr_dnn_model_dataset( + dense_input, sparse_input_ids, label, embedding_size, + sparse_feature_dim) + + exe = fluid.Executor(fluid.CPUPlace()) + def train_loop(epoch=20): + for i in range(epoch): + exe.train_from_dataset(program=fluid.default_main_program(), + dataset=dataset, + fetch_list=[auc_var], + fetch_info=["auc"], + debug=False) + # local training + def local_train(): + optimizer = fluid.optimizer.SGD(learning_rate=1e-4) + optimizer.minimize(loss) + exe.run(fluid.default_startup_program()) + train_loop() + + # distributed training + def dist_train(): + role = role_maker.PaddleCloudRoleMaker() + fleet.init(role) + strategy = DistributeTranspilerConfig() + strategy.sync_mode = False + optimizer = fluid.optimizer.SGD(learning_rate=1e-4) + optimizer = fleet.distributed_optimizer(optimizer, strategy) + optimizer.minimize(loss) + + if fleet.is_server(): + fleet.init_server() + fleet.run_server() + elif fleet.is_worker(): + fleet.init_worker() + exe.run(fluid.default_startup_program()) + train_loop() + if is_local: + local_train() + else: + dist_train() + + if __name__ == '__main__': + main_function(args.is_local) + + +* 说明:示例中使用的IO方法是dataset,想了解具体的文档和用法请参考 `Dataset API `_ 。示例中使用的 ``train_from_dataset`` 接口,想了解具体的文档和使用方法请参考 `Executor API `_ 。示例中的 ``from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet`` 表示引入参数服务器架构进行分布式训练,如果想更进一步了解Fleet API的更多选项和示例,请参考 `Fleet API `_ + + +单机训练启动命令 +~~~~~~~~~~~~~~~~ + +.. code-block:: bash + + python train.py --is_local 1 + +单机模拟分布式训练的启动命令 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +在单机模拟多机训练的启动命令,这里我们用到了paddle内置的一个启动器launch_ps,用户可以指定worker和server的数量进行参数服务器任务的启动 + +.. code-block:: bash + + python -m paddle.distributed.launch_ps --worker_num 2 --server_num 2 train.py + +任务运行的日志在工作目录的logs目录下可以查看,当您能够使用单机模拟分布式训练,可以进行真正的多机分布式训练。我们建议用户直接参考 `百度云运行分布式任务的示例 `_ + + diff --git a/doc/paddle/advanced_guide/distributed_training/cluster_quick_start_en.rst b/doc/paddle/advanced_guide/distributed_training/cluster_quick_start_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..ff8ea39c02200f8397c9d3bd9454fd6d01214f51 --- /dev/null +++ b/doc/paddle/advanced_guide/distributed_training/cluster_quick_start_en.rst @@ -0,0 +1,159 @@ +Quick start for distributed training +==================================== + +Distributed training with Fleet API +----------------------------------- + +Since Paddle Fluid `Release +1.5.1 `__, +it is officially recommended to use the Fleet API for distributed +training. For the introduction of the Fleet API, please refer to `Fleet +Design Doc `__. + +Preparation +~~~~~~~~~~~ + +- [x] Install Paddle Fluid. If not already installed, please refer to + `Beginner’s + Guide `__. +- [x] Master the most basic single node training method. Please refer + to the single card training described in `Single-node + training `__. + +Click-through rate prediction +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Here, we will use a simple example, click-through rate prediction task, +to illustrate how to configure Fleet API for distributed training, and +gives an example by using a single node environment to simulate the +distributed environment. The source code of the example comes from `CTR +with +Fleet `__. + +In order to facilitate learning, the example given here is a mixed code +of single node and multi node. You can start single node or multi node +tasks through different startup commands. For the part of obtaining data +and the logic of data preprocessing, please refer to the source code and +description of `CTR with +Fleet `__. + +.. code:: python + + from __future__ import print_function + from args import parse_args + import os + import paddle.fluid as fluid + import sys + from network_conf import ctr_dnn_model_dataset + import paddle.fluid.incubate.fleet.base.role_maker as role_maker + + from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet + from paddle.fluid.transpiler.distribute_transpiler import DistributeTranspilerConfig + + dense_feature_dim = 13 + sparse_feature_dim = 10000001 + batch_size = 100 + thread_num = 10 + embedding_size = 10 + args = parse_args() + + def main_function(is_local): + # common code for local training and distributed training + dense_input = fluid.layers.data( + name="dense_input", shape=[dense_feature_dim], dtype='float32') + + sparse_input_ids = [ + fluid.layers.data(name="C" + str(i), shape=[1], lod_level=1, + dtype="int64") for i in range(1, 27)] + + label = fluid.layers.data(name="label", shape=[1], dtype="int64") + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_use_var([dense_input] + sparse_input_ids + [label]) + pipe_command = "python criteo_reader.py %d" % sparse_feature_dim + dataset.set_pipe_command(pipe_command) + dataset.set_batch_size(batch_size) + dataset.set_thread(thread_num) + + whole_filelist = ["raw_data/part-%d" % x + for x in range(len(os.listdir("raw_data")))] + + dataset.set_filelist(whole_filelist) + loss, auc_var, batch_auc_var = ctr_dnn_model_dataset( + dense_input, sparse_input_ids, label, embedding_size, + sparse_feature_dim) + + exe = fluid.Executor(fluid.CPUPlace()) + def train_loop(epoch=20): + for i in range(epoch): + exe.train_from_dataset(program=fluid.default_main_program(), + dataset=dataset, + fetch_list=[auc_var], + fetch_info=["auc"], + debug=False) + # local training + def local_train(): + optimizer = fluid.optimizer.SGD(learning_rate=1e-4) + optimizer.minimize(loss) + exe.run(fluid.default_startup_program()) + train_loop() + + # distributed training + def dist_train(): + role = role_maker.PaddleCloudRoleMaker() + fleet.init(role) + strategy = DistributeTranspilerConfig() + strategy.sync_mode = False + optimizer = fluid.optimizer.SGD(learning_rate=1e-4) + optimizer = fleet.distributed_optimizer(optimizer, strategy) + optimizer.minimize(loss) + + if fleet.is_server(): + fleet.init_server() + fleet.run_server() + elif fleet.is_worker(): + fleet.init_worker() + exe.run(fluid.default_startup_program()) + train_loop() + if is_local: + local_train() + else: + dist_train() + + if __name__ == '__main__': + main_function(args.is_local) + +- Note: The IO method used in this example is dataset, please refer to + `Dataset + API `__ + for specific documents and usage. For the ``train_from_dataset`` + interface, please refer to `Executor + API `__. + ``from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet`` + in this example means to introduce parameter server architecture for + distributed training, which you can refer to `Fleet + API `__ + for getting more about the options and examples of Fleet API. + +Start command of single node training +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code:: bash + + python train.py --is_local 1 + +Start command of single machine simulation distributed training +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Here we use launch\_ps, a built-in launcher of paddle, which users can +specify the number of workers and servers to start the parameter server +tasks. + +.. code:: bash + + python -m paddle.distributed.launch_ps --worker_num 2 --server_num 2 train.py + +The task running log can be viewed in the logs directory of the working +directory. When you can use a single machine to simulate distributed +training, you can perform true multi node distributed training. We +recommend that users refer directly to +`百度云运行分布式任务的示例 `__. diff --git a/doc/paddle/advanced_guide/distributed_training/fleet_api_howto_cn.rst b/doc/paddle/advanced_guide/distributed_training/fleet_api_howto_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..21f3ea861452ee7bf1dc12fdd65bd8fdcbc8ea6c --- /dev/null +++ b/doc/paddle/advanced_guide/distributed_training/fleet_api_howto_cn.rst @@ -0,0 +1,324 @@ + +使用FleetAPI进行分布式训练 +========================== + +FleetAPI 设计说明 +----------------- + +Fleet是PaddlePaddle分布式训练的高级API。Fleet的命名出自于PaddlePaddle,象征一个舰队中的多只双桨船协同工作。Fleet的设计在易用性和算法可扩展性方面做出了权衡。用户可以很容易从单机版的训练程序,通过添加几行代码切换到分布式训练程序。此外,分布式训练的算法也可以通过Fleet +API接口灵活定义。具体的设计原理可以参考\ `Fleet +API设计文档 `_\ 。当前FleetAPI还处于paddle.fluid.incubate目录下,未来功能完备后会放到paddle.fluid目录中,欢迎持续关注。 + +Fleet API快速上手示例 +--------------------- + +下面会针对Fleet +API最常见的两种使用场景,用一个模型做示例,目的是让用户有快速上手体验的模板。快速上手的示例源代码可以在\ `Fleet Quick Start `_ 找到。 + + +* + 假设我们定义MLP网络如下: + + .. code-block:: python + + import paddle.fluid as fluid + + def mlp(input_x, input_y, hid_dim=128, label_dim=2): + fc_1 = fluid.layers.fc(input=input_x, size=hid_dim, act='tanh') + fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim, act='tanh') + prediction = fluid.layers.fc(input=[fc_2], size=label_dim, act='softmax') + cost = fluid.layers.cross_entropy(input=prediction, label=input_y) + avg_cost = fluid.layers.mean(x=cost) + return avg_cost + +* + 定义一个在内存生成数据的Reader如下: + + .. code-block:: python + + import numpy as np + + def gen_data(): + return {"x": np.random.random(size=(128, 32)).astype('float32'), + "y": np.random.randint(2, size=(128, 1)).astype('int64')} + +* + 单机Trainer定义 + + .. code-block:: python + + import paddle.fluid as fluid + from nets import mlp + from utils import gen_data + + input_x = fluid.data(name="x", shape=[None, 32], dtype='float32') + input_y = fluid.data(name="y", shape=[None, 1], dtype='int64') + + cost = mlp(input_x, input_y) + optimizer = fluid.optimizer.SGD(learning_rate=0.01) + optimizer.minimize(cost) + place = fluid.CUDAPlace(0) + + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + step = 1001 + for i in range(step): + cost_val = exe.run(feed=gen_data(), fetch_list=[cost.name]) + print("step%d cost=%f" % (i, cost_val[0])) + +* + Parameter Server训练方法 + + 参数服务器方法对于大规模数据,简单模型的并行训练非常适用,我们基于单机模型的定义给出使用Parameter Server进行训练的示例如下: + + .. code-block:: python + + import paddle.fluid as fluid + from nets import mlp + from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet + from paddle.fluid.incubate.fleet.base import role_maker + from utils import gen_data + + input_x = fluid.data(name="x", shape=[None, 32], dtype='float32') + input_y = fluid.data(name="y", shape=[None, 1], dtype='int64') + + cost = mlp(input_x, input_y) + optimizer = fluid.optimizer.SGD(learning_rate=0.01) + + role = role_maker.PaddleCloudRoleMaker() + fleet.init(role) + optimizer = fleet.distributed_optimizer(optimizer) + optimizer.minimize(cost) + + if fleet.is_server(): + fleet.init_server() + fleet.run_server() + elif fleet.is_worker(): + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + step = 1001 + for i in range(step): + cost_val = exe.run( + program=fluid.default_main_program(), + feed=gen_data(), + fetch_list=[cost.name]) + print("worker_index: %d, step%d cost = %f" % + (fleet.worker_index(), i, cost_val[0])) + +* + Collective训练方法 + + Collective Training通常在GPU多机多卡训练中使用,一般在复杂模型的训练中比较常见,我们基于上面的单机模型定义给出使用Collective方法进行分布式训练的示例如下: + + .. code-block:: python + + import paddle.fluid as fluid + from nets import mlp + from paddle.fluid.incubate.fleet.collective import fleet + from paddle.fluid.incubate.fleet.base import role_maker + from utils import gen_data + + input_x = fluid.data(name="x", shape=[None, 32], dtype='float32') + input_y = fluid.data(name="y", shape=[None, 1], dtype='int64') + + cost = mlp(input_x, input_y) + optimizer = fluid.optimizer.SGD(learning_rate=0.01) + role = role_maker.PaddleCloudRoleMaker(is_collective=True) + fleet.init(role) + + optimizer = fleet.distributed_optimizer(optimizer) + optimizer.minimize(cost) + place = fluid.CUDAPlace(0) + + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + step = 1001 + for i in range(step): + cost_val = exe.run( + program=fluid.default_main_program(), + feed=gen_data(), + fetch_list=[cost.name]) + print("worker_index: %d, step%d cost = %f" % + (fleet.worker_index(), i, cost_val[0])) + +更多使用示例 +------------ + +`点击率预估 `_ + +`语义匹配 `_ + +`向量学习 `_ + +`基于Resnet50的图像分类 `_ + +`基于Transformer的机器翻译 `_ + +`基于Bert的语义表示学习 `_ + +Fleet API相关的接口说明 +----------------------- + +Fleet API接口 +^^^^^^^^^^^^^ + + +* init(role_maker=None) + + * fleet初始化,需要在使用fleet其他接口前先调用,用于定义多机的环境配置 + +* is_worker() + + * Parameter Server训练中使用,判断当前节点是否是Worker节点,是则返回True,否则返回False + +* is_server(model_dir=None) + + * Parameter Server训练中使用,判断当前节点是否是Server节点,是则返回True,否则返回False + +* init_server() + + * Parameter Server训练中,fleet加载model_dir中保存的模型相关参数进行parameter + server的初始化 + +* run_server() + + * Parameter Server训练中使用,用来启动server端服务 + +* init_worker() + + * Parameter Server训练中使用,用来启动worker端服务 + +* stop_worker() + + * 训练结束后,停止worker + +* distributed_optimizer(optimizer, strategy=None) + + * 分布式优化算法装饰器,用户可带入单机optimizer,并配置分布式训练策略,返回一个分布式的optimizer + +RoleMaker +^^^^^^^^^ + + +* + MPISymetricRoleMaker + + + * + 描述:MPISymetricRoleMaker会假设每个节点启动两个进程,1worker+1pserver,这种RoleMaker要求用户的集群上有mpi环境。 + + * + 示例: + + .. code-block:: python + + from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet + from paddle.fluid.incubate.fleet.base import role_maker + + role = role_maker.MPISymetricRoleMaker() + fleet.init(role) + + * + 启动方法: + + .. code-block:: python + + mpirun -np 2 python trainer.py + +* + PaddleCloudRoleMaker + + + * + 描述:PaddleCloudRoleMaker是一个高级封装,支持使用paddle.distributed.launch或者paddle.distributed.launch_ps启动脚本 + + * + Parameter Server训练示例: + + .. code-block:: python + + from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet + from paddle.fluid.incubate.fleet.base import role_maker + + role = role_maker.PaddleCloudRoleMaker() + fleet.init(role) + + * + 启动方法: + + .. code-block:: python + + python -m paddle.distributed.launch_ps --worker_num 2 --server_num 2 trainer.py + + * + Collective训练示例: + + .. code-block:: python + + from paddle.fluid.incubate.fleet.collective import fleet + from paddle.fluid.incubate.fleet.base import role_maker + + role = role_maker.PaddleCloudRoleMaker(is_collective=True) + fleet.init(role) + + * + 启动方法: + + .. code-block:: python + + python -m paddle.distributed.launch trainer.py + +* + UserDefinedRoleMaker + + + * + 描述:用户自定义节点的角色信息,IP和端口信息 + + * + 示例: + + .. code-block:: python + + from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet + from paddle.fluid.incubate.fleet.base import role_maker + + role = role_maker.UserDefinedRoleMaker( + current_id=int(os.getenv("CURRENT_ID")), + role=role_maker.Role.WORKER if bool(int(os.getenv("IS_WORKER"))) + else role_maker.Role.SERVER, + worker_num=int(os.getenv("WORKER_NUM")), + server_endpoints=pserver_endpoints) + fleet.init(role) + +Strategy +^^^^^^^^ + + +* Parameter Server Training + + * Sync_mode + +* Collective Training + + * LocalSGD + * ReduceGrad + +Fleet Mode +^^^^^^^^^^ + + +* + Parameter Server Training + + .. code-block:: python + + from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet + +* + Collective Training + + .. code-block:: python + + from paddle.fluid.incubate.fleet.collective import fleet diff --git a/doc/paddle/advanced_guide/distributed_training/index_cn.rst b/doc/paddle/advanced_guide/distributed_training/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1d2ad003bfd174ebd5c903e68acf3b3001fe3dfb --- /dev/null +++ b/doc/paddle/advanced_guide/distributed_training/index_cn.rst @@ -0,0 +1,9 @@ +########## +分布式训练 +########## + +.. toctree:: + :maxdepth: 1 + + cluster_quick_start.rst + fleet_api_howto_cn.rst diff --git a/doc/paddle/advanced_guide/distributed_training/index_en.rst b/doc/paddle/advanced_guide/distributed_training/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..81e8c515b69fb73873d09a9a7c405550f35a12fe --- /dev/null +++ b/doc/paddle/advanced_guide/distributed_training/index_en.rst @@ -0,0 +1,12 @@ +.. _user_guide_distribute_en: + +###################### +Distributed Training +###################### + +.. toctree:: + :maxdepth: 1 + + cluster_quick_start_en.rst + cluster_howto_en.rst + diff --git a/doc/paddle/advanced_guide/distributed_training/multi_node.rst b/doc/paddle/advanced_guide/distributed_training/multi_node.rst new file mode 100644 index 0000000000000000000000000000000000000000..45e6bd696c5d4b62b72676459d17408ab849a738 --- /dev/null +++ b/doc/paddle/advanced_guide/distributed_training/multi_node.rst @@ -0,0 +1,10 @@ +######## +多机训练 +######## + +.. toctree:: + :maxdepth: 1 + + cluster_quick_start.rst + cluster_howto.rst + fleet_api_howto_cn.rst diff --git a/doc/paddle/advanced_guide/distributed_training/multi_node_en.rst b/doc/paddle/advanced_guide/distributed_training/multi_node_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..e604994a877072f5878a4f606bc239d909afc5b5 --- /dev/null +++ b/doc/paddle/advanced_guide/distributed_training/multi_node_en.rst @@ -0,0 +1,10 @@ +#################### +Multi-node Training +#################### + +.. toctree:: + :maxdepth: 1 + + cluster_quick_start_en.rst + cluster_howto_en.rst + train_on_baidu_cloud_en.rst diff --git a/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/cluster-info.png b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/cluster-info.png new file mode 100644 index 0000000000000000000000000000000000000000..c89880277afa2460f92838ce2301e12eb192a417 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/cluster-info.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/concole.png b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/concole.png new file mode 100644 index 0000000000000000000000000000000000000000..7db02be1bd8b66418015939e594812f08f35ff58 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/concole.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/conf-download.png b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/conf-download.png new file mode 100644 index 0000000000000000000000000000000000000000..f10b62a4da76a6726c5de87d31aaeae27c2982f0 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/conf-download.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/ctr-models.png b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/ctr-models.png new file mode 100644 index 0000000000000000000000000000000000000000..df41a239c38f54d26e3693fe3fc7853cbef12879 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/ctr-models.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/ctr-prediction-end-to-end-deployment.png b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/ctr-prediction-end-to-end-deployment.png new file mode 100644 index 0000000000000000000000000000000000000000..bf2ac77c4e92f8a13639054b92bb1398046882ba Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/ctr-prediction-end-to-end-deployment.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/ctr-running.png b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/ctr-running.png new file mode 100644 index 0000000000000000000000000000000000000000..87992d109fe4c513dec088e2cd20b360c5627b60 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/ctr-running.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/eip.png b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/eip.png new file mode 100644 index 0000000000000000000000000000000000000000..cae4175df0a5bd9836e4974599b35b4b0aa278d6 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/eip.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/file_server.png b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/file_server.png new file mode 100644 index 0000000000000000000000000000000000000000..0389e5785720d14d1d7132f136debf54a0de79fd Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/file_server.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/helm-version.png b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/helm-version.png new file mode 100644 index 0000000000000000000000000000000000000000..cae626abe9234a9c04989d31b51ff9c1a3104244 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/helm-version.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/kubectl-version.png b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/kubectl-version.png new file mode 100644 index 0000000000000000000000000000000000000000..1b073ffd20f742353238a7ba24078a7e562627ba Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/kubectl-version.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/load_balancer.png b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/load_balancer.png new file mode 100644 index 0000000000000000000000000000000000000000..7d1042e5953a487ad438485d5f1af912a97b7ff2 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/load_balancer.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/pserver-log.png b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/pserver-log.png new file mode 100644 index 0000000000000000000000000000000000000000..fa2a6bd436dcedb40b72e5a2b84d7a91afc1f010 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/pserver-log.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/tiller.png b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/tiller.png new file mode 100644 index 0000000000000000000000000000000000000000..b7b8c83613db5f886aa9ecced6567b7379a86246 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/tiller.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/trainer-log.png b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/trainer-log.png new file mode 100644 index 0000000000000000000000000000000000000000..b05a0ec5a4dc7ec164e96458a7adda61b82c3606 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/trainer-log.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/volcano.png b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/volcano.png new file mode 100644 index 0000000000000000000000000000000000000000..1d55f044ec6f04e6181a192aca8cc32834ef9196 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/volcano.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/wget_example.png b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/wget_example.png new file mode 100644 index 0000000000000000000000000000000000000000..5e87da2c71e17eadb27ba8a810c35fa413f5afb1 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/wget_example.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/workload.png b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/workload.png new file mode 100644 index 0000000000000000000000000000000000000000..d45e0b667e1e4c1f6f0b4fe9c000e1359b11ad06 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/baidu_cloud/workload.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/create_gpu_machine.png b/doc/paddle/advanced_guide/distributed_training/src/create_gpu_machine.png new file mode 100644 index 0000000000000000000000000000000000000000..8b98ce5bdf0c1f9921eac1f4f55d31bec028d650 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/create_gpu_machine.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/create_image.png b/doc/paddle/advanced_guide/distributed_training/src/create_image.png new file mode 100644 index 0000000000000000000000000000000000000000..b9a26de49a6ec33707199d2dfde8a741c4222581 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/create_image.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/create_more_nodes.png b/doc/paddle/advanced_guide/distributed_training/src/create_more_nodes.png new file mode 100644 index 0000000000000000000000000000000000000000..656cf6f49bd7e239bfbbd305dc87a5a73a6100d1 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/create_more_nodes.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/ctr.png b/doc/paddle/advanced_guide/distributed_training/src/ctr.png new file mode 100644 index 0000000000000000000000000000000000000000..fea2d8e2591ba4ce05ffdffcf95526d1d5dada0e Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/ctr.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/ctr_kubectl_download.png b/doc/paddle/advanced_guide/distributed_training/src/ctr_kubectl_download.png new file mode 100644 index 0000000000000000000000000000000000000000..b87395a1f19e90d30644a1e28b9e434dda3545ab Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/ctr_kubectl_download.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/ctr_node.png b/doc/paddle/advanced_guide/distributed_training/src/ctr_node.png new file mode 100644 index 0000000000000000000000000000000000000000..9a43c4257316e3bd2879f10a49d0edb74a41d7d7 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/ctr_node.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/ctr_pods.png b/doc/paddle/advanced_guide/distributed_training/src/ctr_pods.png new file mode 100644 index 0000000000000000000000000000000000000000..5e836b2490a978dd3d3664d08e5a4c056fed52a7 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/ctr_pods.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/ctr_pserver_log.png b/doc/paddle/advanced_guide/distributed_training/src/ctr_pserver_log.png new file mode 100644 index 0000000000000000000000000000000000000000..189b40e4f65c49c6e1bfec219759433c683b1ee4 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/ctr_pserver_log.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/ctr_trainer_log.png b/doc/paddle/advanced_guide/distributed_training/src/ctr_trainer_log.png new file mode 100644 index 0000000000000000000000000000000000000000..303e2b1ee95802b16daa35e4cad5a283922504b9 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/ctr_trainer_log.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/ctr_volcano_install.png b/doc/paddle/advanced_guide/distributed_training/src/ctr_volcano_install.png new file mode 100644 index 0000000000000000000000000000000000000000..536fd4b3a68184316b9b96c488b48b7e6403a28b Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/ctr_volcano_install.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/ctryaml1.png b/doc/paddle/advanced_guide/distributed_training/src/ctryaml1.png new file mode 100644 index 0000000000000000000000000000000000000000..d5268a27a4ab4de76c5383d95fd5625f7ace4de3 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/ctryaml1.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/ctryaml2.png b/doc/paddle/advanced_guide/distributed_training/src/ctryaml2.png new file mode 100644 index 0000000000000000000000000000000000000000..d93e55ec07ec289b8c4d008a311f477e6cfd539a Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/ctryaml2.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/ctryaml3.png b/doc/paddle/advanced_guide/distributed_training/src/ctryaml3.png new file mode 100644 index 0000000000000000000000000000000000000000..c0b75395924719b16d68c3eb124d90f7497ee300 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/ctryaml3.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/cube.png b/doc/paddle/advanced_guide/distributed_training/src/cube.png new file mode 100644 index 0000000000000000000000000000000000000000..0757421c20c84c1e0df61454902e4c24cd655df7 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/cube.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/cube_config1.png b/doc/paddle/advanced_guide/distributed_training/src/cube_config1.png new file mode 100644 index 0000000000000000000000000000000000000000..188c4214460814a67d0eafa3cf1af18ded4340fa Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/cube_config1.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/cube_config2.png b/doc/paddle/advanced_guide/distributed_training/src/cube_config2.png new file mode 100644 index 0000000000000000000000000000000000000000..9b5a171e39303f373dbf31f6204baa913cec7130 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/cube_config2.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/dist_train_demo.py b/doc/paddle/advanced_guide/distributed_training/src/dist_train_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..f7209ed2b871197ee55089411c3d1ab6b323fbba --- /dev/null +++ b/doc/paddle/advanced_guide/distributed_training/src/dist_train_demo.py @@ -0,0 +1,111 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import paddle.fluid.core as core +import math +import os +import sys + +import numpy + +import paddle +import paddle.fluid as fluid + +BATCH_SIZE = 64 +PASS_NUM = 1 + + +def loss_net(hidden, label): + prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + avg_loss = fluid.layers.mean(loss) + acc = fluid.layers.accuracy(input=prediction, label=label) + return prediction, avg_loss, acc + + +def conv_net(img, label): + conv_pool_1 = fluid.nets.simple_img_conv_pool( + input=img, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + act="relu") + conv_pool_1 = fluid.layers.batch_norm(conv_pool_1) + conv_pool_2 = fluid.nets.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + pool_size=2, + pool_stride=2, + act="relu") + return loss_net(conv_pool_2, label) + + +def train(use_cuda, role, endpoints, current_endpoint, trainer_id, trainers): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + prediction, avg_loss, acc = conv_net(img, label) + + test_program = fluid.default_main_program().clone(for_test=True) + + optimizer = fluid.optimizer.Adam(learning_rate=0.001) + optimizer.minimize(avg_loss) + + t = fluid.DistributeTranspiler() + t.transpile(trainer_id, pservers=endpoints, trainers=trainers) + if role == "pserver": + prog = t.get_pserver_program(current_endpoint) + startup = t.get_startup_program(current_endpoint, pserver_program=prog) + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(startup) + exe.run(prog) + elif role == "trainer": + prog = t.get_trainer_program() + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + + train_reader = paddle.batch( + paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500), + batch_size=BATCH_SIZE) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=BATCH_SIZE) + feeder = fluid.DataFeeder(feed_list=[img, label], place=place) + exe.run(fluid.default_startup_program()) + for pass_id in range(PASS_NUM): + for batch_id, data in enumerate(train_reader()): + acc_np, avg_loss_np = exe.run( + prog, feed=feeder.feed(data), fetch_list=[acc, avg_loss]) + if (batch_id + 1) % 10 == 0: + print( + 'PassID {0:1}, BatchID {1:04}, Loss {2:2.2}, Acc {3:2.2}'. + format(pass_id, batch_id + 1, + float(avg_loss_np.mean()), float( + acc_np.mean()))) + + +if __name__ == '__main__': + if len(sys.argv) != 6: + print( + "Usage: python %s role endpoints current_endpoint trainer_id trainers" + % sys.argv[0]) + exit(0) + role, endpoints, current_endpoint, trainer_id, trainers = \ + sys.argv[1:] + train(True, role, endpoints, current_endpoint, + int(trainer_id), int(trainers)) diff --git a/doc/paddle/advanced_guide/distributed_training/src/dist_train_nccl2.graffle b/doc/paddle/advanced_guide/distributed_training/src/dist_train_nccl2.graffle new file mode 100644 index 0000000000000000000000000000000000000000..16f6b8835c4ffb82babca56b62ba44494fd6a947 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/dist_train_nccl2.graffle differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/dist_train_nccl2.png b/doc/paddle/advanced_guide/distributed_training/src/dist_train_nccl2.png new file mode 100644 index 0000000000000000000000000000000000000000..587a1a48affdde6809d7f8bf77e1055db7cd8c14 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/dist_train_nccl2.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/dist_train_pserver.graffle b/doc/paddle/advanced_guide/distributed_training/src/dist_train_pserver.graffle new file mode 100644 index 0000000000000000000000000000000000000000..046c4903231e8ca441884674c08b381766c0bbae Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/dist_train_pserver.graffle differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/dist_train_pserver.png b/doc/paddle/advanced_guide/distributed_training/src/dist_train_pserver.png new file mode 100644 index 0000000000000000000000000000000000000000..cd2f92ad1a14ac12efc2c257c8aa3d1ae403b2b1 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/dist_train_pserver.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/file_server_pod.png b/doc/paddle/advanced_guide/distributed_training/src/file_server_pod.png new file mode 100644 index 0000000000000000000000000000000000000000..8086e889ee41d0525025edc4873a0024e4478ae7 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/file_server_pod.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/file_server_svc.png b/doc/paddle/advanced_guide/distributed_training/src/file_server_svc.png new file mode 100644 index 0000000000000000000000000000000000000000..90bfd0c8f1e378874e6dd5859dbda76ecd554265 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/file_server_svc.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/overview.png b/doc/paddle/advanced_guide/distributed_training/src/overview.png new file mode 100644 index 0000000000000000000000000000000000000000..7e94548457ceb9378019f4ae6c3c1af9502a97fe Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/overview.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/paddleclient.png b/doc/paddle/advanced_guide/distributed_training/src/paddleclient.png new file mode 100644 index 0000000000000000000000000000000000000000..69157cd8327712a860e47934a124233bc88ffe60 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/paddleclient.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/paddleserving_pod.png b/doc/paddle/advanced_guide/distributed_training/src/paddleserving_pod.png new file mode 100644 index 0000000000000000000000000000000000000000..6dfddfd17059f1583f6219a4fca56280ef5089c8 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/paddleserving_pod.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/paddleserving_svc.png b/doc/paddle/advanced_guide/distributed_training/src/paddleserving_svc.png new file mode 100644 index 0000000000000000000000000000000000000000..e4f34095053692ff3b01bd0aa4e40b2830b145d8 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/paddleserving_svc.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/parallelism.png b/doc/paddle/advanced_guide/distributed_training/src/parallelism.png new file mode 100644 index 0000000000000000000000000000000000000000..c787907397acb78d5e8ce31481e44dac2f774a4e Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/parallelism.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/pyreader.png b/doc/paddle/advanced_guide/distributed_training/src/pyreader.png new file mode 100644 index 0000000000000000000000000000000000000000..2c887f5705e17eafcd0bf58c045721572c252a24 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/pyreader.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/release.png b/doc/paddle/advanced_guide/distributed_training/src/release.png new file mode 100644 index 0000000000000000000000000000000000000000..75dfd7f0dce96ab57d4a471728e471b0d2b6e5f4 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/release.png differ diff --git a/doc/paddle/advanced_guide/distributed_training/src/transfer.png b/doc/paddle/advanced_guide/distributed_training/src/transfer.png new file mode 100644 index 0000000000000000000000000000000000000000..4a48e4313dd7cadf399ca7c7d056eeacffc66465 Binary files /dev/null and b/doc/paddle/advanced_guide/distributed_training/src/transfer.png differ diff --git a/doc/paddle/advanced_guide/evaluation_debugging/debug/index_cn.rst b/doc/paddle/advanced_guide/evaluation_debugging/debug/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c08ca2c7c7827f6ac83f51c3f9fd5640fd2b4d40 --- /dev/null +++ b/doc/paddle/advanced_guide/evaluation_debugging/debug/index_cn.rst @@ -0,0 +1,16 @@ +.. PaddlePaddle Fluid documentation master file, created by + sphinx-quickstart on Thu Jun 7 17:04:53 2018. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +############## +VisualDL 工具 +############## + +.. toctree:: + :maxdepth: 1 + + + visualdl.md + visualdl_usage.md + diff --git a/doc/paddle/advanced_guide/evaluation_debugging/debug/index_en.rst b/doc/paddle/advanced_guide/evaluation_debugging/debug/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..840edeb8e5ed6369dacba32f0471e1c0edc53938 --- /dev/null +++ b/doc/paddle/advanced_guide/evaluation_debugging/debug/index_en.rst @@ -0,0 +1,8 @@ +VisualDL Tools +========================== + +.. toctree:: + :maxdepth: 1 + + visualdl_en.md + visualdl_usage_en.md diff --git a/doc/paddle/advanced_guide/evaluation_debugging/debug/visualdl.md b/doc/paddle/advanced_guide/evaluation_debugging/debug/visualdl.md new file mode 100644 index 0000000000000000000000000000000000000000..df0149674045239242735bad0c778d74f9d17811 --- /dev/null +++ b/doc/paddle/advanced_guide/evaluation_debugging/debug/visualdl.md @@ -0,0 +1,300 @@ +# VisualDL 工具简介 + + +

+ +

+ + + +VisualDL是飞桨可视化分析工具,以丰富的图表呈现训练参数变化趋势、模型结构、数据样本、直方图、PR曲线及高维数据分布。可帮助用户更清晰直观地理解深度学习模型训练过程及模型结构,进而实现高效的模型优化。 + +具体功能使用方式请参见**VisualDL使用指南**。项目正处于高速迭代中,敬请期待新组件的加入。 + +VisualDL支持浏览器种类:Chrome(81和83)、Safari 13、FireFox(77和78)、Edge(Chromium版)。 + +VisualDL原生支持python的使用, 通过在模型的Python配置中添加几行代码,便可为训练过程提供丰富的可视化支持。 + + + +## 目录 + +* [核心亮点](#核心亮点) +* [安装方式](#安装方式) +* [使用方式](#使用方式) +* [可视化功能概览](#可视化功能概览) +* [开源贡献](#开源贡献) +* [更多细节](#更多细节) +* [技术交流](#技术交流) + + + +## 核心亮点 + +### 简单易用 + +API设计简洁易懂,使用简单。模型结构一键实现可视化。 + +### 功能丰富 + +功能覆盖标量、数据样本、图结构、直方图、PR曲线及数据降维可视化。 + +### 高兼容性 + +全面支持Paddle、ONNX、Caffe等市面主流模型结构可视化,广泛支持各类用户进行可视化分析。 + +### 全面支持 + +与飞桨服务平台及工具组件全面打通,为您在飞桨生态系统中提供最佳使用体验。 + + + +## 安装方式 + +### 使用pip安装 + +```shell +pip install --upgrade --pre visualdl +``` + +### 使用代码安装 + +``` +git clone https://github.com/PaddlePaddle/VisualDL.git +cd VisualDL + +python setup.py bdist_wheel +pip install --upgrade dist/visualdl-*.whl +``` + +需要注意,官方自2020年1月1日起不再维护Python2,为了保障代码可用性,VisualDL现仅支持Python3 + +## 使用方式 + +VisualDL将训练过程中的数据、参数等信息储存至日志文件中后,启动面板即可查看可视化结果。 + +### 1. 记录日志 + +VisualDL的后端提供了Python SDK,可通过LogWriter定制一个日志记录器,接口如下: + +```python +class LogWriter(logdir=None, + comment='', + max_queue=10, + flush_secs=120, + filename_suffix='', + write_to_disk=True, + **kwargs) +``` + +#### 接口参数 + +| 参数 | 格式 | 含义 | +| --------------- | ------- | ------------------------------------------------------------ | +| logdir | string | 日志文件所在的路径,VisualDL将在此路径下建立日志文件并进行记录,如果不填则默认为`runs/${CURRENT_TIME}` | +| comment | string | 为日志文件夹名添加后缀,如果制定了logdir则此项无效 | +| max_queue | int | 日志记录消息队列的最大容量,达到此容量则立即写入到日志文件 | +| flush_secs | int | 日志记录消息队列的最大缓存时间,达到此时间则立即写入到日志文件 | +| filename_suffix | string | 为默认的日志文件名添加后缀 | +| write_to_disk | boolean | 是否写入到磁盘 | + +#### 示例 + +设置日志文件并记录标量数据: + +```python +from visualdl import LogWriter + +# 在`./log/scalar_test/train`路径下建立日志文件 +with LogWriter(logdir="./log/scalar_test/train") as writer: + # 使用scalar组件记录一个标量数据 + writer.add_scalar(tag="acc", step=1, value=0.5678) + writer.add_scalar(tag="acc", step=2, value=0.6878) + writer.add_scalar(tag="acc", step=3, value=0.9878) +``` + +### 2. 启动面板 + +在上述示例中,日志已记录三组标量数据,现可启动VisualDL面板查看日志的可视化结果,共有两种启动方式: + +#### 在命令行启动 + +使用命令行启动VisualDL面板,命令格式如下: + +```python +visualdl --logdir --host --port --cache-timeout --language --public-path --api-only +``` + +参数详情: + +| 参数 | 意义 | +| --------------- | ------------------------------------------------------------ | +| --logdir | 设定日志所在目录,可以指定多个目录,VisualDL将遍历并且迭代寻找指定目录的子目录,将所有实验结果进行可视化 | +| --model | 设定模型文件路径(非文件夹路径),VisualDL将在此路径指定的模型文件进行可视化,目前可支持PaddlePaddle、ONNX、Keras、Core ML、Caffe等多种模型结构,详情可查看[graph支持模型种类]([https://github.com/PaddlePaddle/VisualDL/blob/develop/docs/components/README.md#Graph--%E7%BD%91%E7%BB%9C%E7%BB%93%E6%9E%84%E7%BB%84%E4%BB%B6](https://github.com/PaddlePaddle/VisualDL/blob/develop/docs/components/README.md#Graph--网络结构组件)) | +| --host | 设定IP,默认为`127.0.0.1` | +| --port | 设定端口,默认为`8040` | +| --cache-timeout | 后端缓存时间,在缓存时间内前端多次请求同一url,返回的数据从缓存中获取,默认为20秒 | +| --language | VisualDL面板语言,可指定为'EN'或'ZH',默认为浏览器使用语言 | +| --public-path | VisualDL面板URL路径,默认是'/app',即访问地址为'http://<host>:<port>/app' | +| --api-only | 是否只提供API,如果设置此参数,则VisualDL不提供页面展示,只提供API服务,此时API地址为'http://<host>:<port>/<public_path>/api';若没有设置public_path参数,则默认为'http://<host>:<port>/api' | + +针对上一步生成的日志,启动命令为: + +``` +visualdl --logdir ./log +``` + +#### 在Python脚本中启动 + +支持在Python脚本中启动VisualDL面板,接口如下: + +```python +visualdl.server.app.run(logdir, + host="127.0.0.1", + port=8080, + cache_timeout=20, + language=None, + public_path=None, + api_only=False, + open_browser=False) +``` + +请注意:除`logdir`外,其他参数均为不定参数,传递时请指明参数名。 + +接口参数具体如下: + +| 参数 | 格式 | 含义 | +| ------------- | ------------------------------------------------ | ------------------------------------------------------------ | +| logdir | string或list[string_1, string_2, ... , string_n] | 日志文件所在的路径,VisualDL将在此路径下递归搜索日志文件并进行可视化,可指定单个或多个路径 | +| model | string | 模型文件路径(非文件夹路径),VisualDL将在此路径指定的模型文件进行可视化 | +| host | string | 指定启动服务的ip,默认为`127.0.0.1` | +| port | int | 启动服务端口,默认为`8040` | +| cache_timeout | int | 后端缓存时间,在缓存时间内前端多次请求同一url,返回的数据从缓存中获取,默认为20秒 | +| language | string | VisualDL面板语言,可指定为'en'或'zh',默认为浏览器使用语言 | +| public_path | string | VisualDL面板URL路径,默认是'/app',即访问地址为'http://:/app' | +| api_only | boolean | 是否只提供API,如果设置此参数,则VisualDL不提供页面展示,只提供API服务,此时API地址为'http://://api';若没有设置public_path参数,则默认为http://:/api' | +| open_browser | boolean | 是否打开浏览器,设置为True则在启动后自动打开浏览器并访问VisualDL面板,若设置api_only,则忽略此参数 | + +针对上一步生成的日志,我们的启动脚本为: + +```python +from visualdl.server import app + +app.run(logdir="./log") +``` + +在使用任意一种方式启动VisualDL面板后,打开浏览器访问VisualDL面板,即可查看日志的可视化结果,如图: + +

+ +

+ + + +## 可视化功能概览 + +### Scalar + +以图表形式实时展示训练过程参数,如loss、accuracy。让用户通过观察单组或多组训练参数变化,了解训练过程,加速模型调优。具有两大特点: + +#### 动态展示 + +在启动VisualDL后,LogReader将不断增量的读取日志中数据并供前端调用展示,因此能够在训练中同步观测指标变化,如下图: + +

+ +

+ + + +#### 多实验对比 + +只需在启动VisualDL时将每个实验日志所在路径同时传入即可,每个实验中相同tag的指标将绘制在一张图中同步呈现,如下图: + +

+ +

+ + + +### Image + +实时展示训练过程中的图像数据,用于观察不同训练阶段的图像变化,进而深入了解训练过程及效果。 + +

+ +

+ + + +### Audio + +实时查看训练过程中的音频数据,监控语音识别与合成等任务的训练过程。 + +

+ +

+ + + +### Graph + +一键可视化模型的网络结构。可查看模型属性、节点信息、节点输入输出等,并支持节点搜索,辅助用户快速分析模型结构与了解数据流向。 + +

+ +

+ + + +### Histogram + +以直方图形式展示Tensor(weight、bias、gradient等)数据在训练过程中的变化趋势。深入了解模型各层效果,帮助开发者精准调整模型结构。 + +- Offset模式 + +

+ +

+ + + +- Overlay模式 + +

+ +

+ + + +### PR Curve + +精度-召回率曲线,帮助开发者权衡模型精度和召回率之间的平衡,设定最佳阈值。 + +

+ +

+ + +### High Dimensional + +将高维数据进行降维展示,目前支持T-SNE、PCA两种降维方式,用于深入分析高维数据间的关系,方便用户根据数据特征进行算法优化。 + +

+ +

+ +## 开源贡献 + +VisualDL 是由 [PaddlePaddle](https://www.paddlepaddle.org/) 和 [ECharts](https://echarts.apache.org/) 合作推出的开源项目。 +Graph 相关功能由 [Netron](https://github.com/lutzroeder/netron) 提供技术支持。 +欢迎所有人使用,提意见以及贡献代码。 + + +## 更多细节 + +想了解更多关于VisualDL可视化功能的使用详情介绍,请查看**VisualDL使用指南**。 + +## 技术交流 + +欢迎您加入VisualDL官方QQ群:1045783368 与飞桨团队以及其他用户共同针对VisualDL进行讨论与交流。 diff --git a/doc/paddle/advanced_guide/evaluation_debugging/debug/visualdl_en.md b/doc/paddle/advanced_guide/evaluation_debugging/debug/visualdl_en.md new file mode 100644 index 0000000000000000000000000000000000000000..dbabfd8b4b1fd2b1cf69a14c33570ce8388d249d --- /dev/null +++ b/doc/paddle/advanced_guide/evaluation_debugging/debug/visualdl_en.md @@ -0,0 +1,264 @@ +# Introduction to VisualDL Toolset + +

+ +

+ +## Introduction +VisualDL is a deep learning visualization tool that can help design deep learning jobs. +It includes features such as scalar, parameter distribution, model structure and image visualization. +Currently it is being developed at a high pace. +New features will be continuously added. + +At present, most DNN frameworks use Python as their primary language. VisualDL supports Python by nature. +Users can get plentiful visualization results by simply add a few lines of Python code into their model before training. + +Besides Python SDK, VisualDL was writen in C++ on the low level. It also provides C++ SDK that +can be integrated into other platforms. + + +## Component +VisualDL provides following components: + +- scalar +- histogram +- image +- audio +- graph +- high dimensional + +### Scalar +Scalar can be used to show the trends of error during training. + +

+ +

+ +### Histogram +Histogram can be used to visualize parameter distribution and trends for any tensor. + +

+ +

+ +### Image +Image can be used to visualize any tensor or intermediate generated image. + +

+ +

+ +### Audio +Audio can be used to play input audio samples or generated audio samples. + +### Graph + +VisualDL graph supports displaying paddle model, furthermore is compatible with ONNX ([Open Neural Network Exchange](https://github.com/onnx/onnx)), +Cooperated with Python SDK, VisualDL can be compatible with most major DNN frameworks, including +PaddlePaddle, PyTorch and MXNet. + +

+ +

+ +To display the paddle model, all you have to do is: + +1. call the `fluid.io.save_inference_model()`interface to save paddle model +2. use `visualdl --model_pb [paddle_model_dir]` to load paddle model in command line + + +### High Dimensional +High Dimensional can be used to visualize data embeddings by projecting high-dimensional data into 2D / 3D. + +

+ +

+ +## Quick Start +To give the VisualDL a quick test, please use the following commands. + +``` +# Install the VisualDL. Preferably under a virtual environment or anaconda. +pip install --upgrade visualdl + +# run a demo, vdl_create_scratch_log will create logs for testing. +vdl_create_scratch_log +visualdl --logdir=scratch_log --port=8080 + +# visit http://127.0.0.1:8080 +``` + +If you encounter the error `TypeError: __init__() got an unexpected keyword argument 'file'`, that is due to protobuf version is not 3.5+,simply run `pip install --upgrade protobuf` will fix the issue. + +If you run into any other issues in above steps, it could be error caused by environmental issues by different python or pip versions. +Following installation methods might fix the issues. + +## Install with Virtualenv + +[Virtualenv](https://virtualenv.pypa.io/en/stable/) creates isolated Python environment that prevents interfering +by other Python programs on the same machine and make sure Python and pip are located properly. + +On macOS, install pip and virtualenv by: +``` +sudo easy_install pip +pip install --upgrade virtualenv +``` + +On Linux, install pip and virtualenv by: +``` +sudo apt-get install python3-pip python3-dev python-virtualenv +``` + +Then create a Virtualenv environment by one of following command: +``` +virtualenv ~/vdl # for Python2.7 +virtualenv -p python3 ~/vdl for Python 3.x +``` + +```~/vdl``` will be your Virtualenv directory, you may choose to install anywhere. + +Activate your Virtualenv environment by: +``` +source ~/vdl/bin/activate +``` + +Now you should be able to install VisualDL and run our demo: + +``` +pip install --upgrade visualdl + +# run a demo, vdl_create_scratch_log will create logs for testing. +vdl_create_scratch_log +visualdl --logdir=scratch_log --port=8080 + +# visit http://127.0.0.1:8080 +``` + +If you still have issues installing VisualDL from Virtualenv, try following installation method. + + +## Install with Anaconda + +Anaconda is a python distribution, with installation and package management tools. Also it is an environment manager, +which provides the facility to create different python environments, each with their own settings. + +Follow the instructions on the [Anaconda download site](https://www.anaconda.com/download) to download and install Anaconda. +Download Python 3.6 version command-Line installer. + +Create a conda environment named ```vdl``` or anything you want by: +``` +conda create -n vdl pip python=2.7 # or python=3.3, etc. +``` + +Activate the conda environment by: +``` +source activate vdl +``` + +Now you should be able to install VisualDL and run our demo: + +``` +pip install --upgrade visualdl + +# run a demo, vdl_create_scratch_log will create logs for testing. +vdl_create_scratch_log +visualdl --logdir=scratch_log --port=8080 + +# visit http://127.0.0.1:8080 +``` + +If you still have issues installing VisualDL, try installing from sources as in following section. + + +### Install from source +``` +#Preferably under a virtualenv or anaconda. +git clone https://github.com/PaddlePaddle/VisualDL.git +cd VisualDL + +python setup.py bdist_wheel +pip install --upgrade dist/visualdl-*.whl +``` + +If there are still issues regarding the ```pip install```, you can still start Visual DL by starting the dev server +[here](https://github.com/PaddlePaddle/VisualDL/blob/develop/docs/develop/how_to_dev_frontend_en.md) + + +## SDK +VisualDL provides both Python SDK and C++ SDK in order to fit more use cases. + + +### Python SDK +VisualDL now supports both Python 2 and Python 3. +Below is an example of creating a simple Scalar component and inserting data from different timestamps: + +```python +import random +from visualdl import LogWriter + +logdir = "./tmp" +logger = LogWriter(logdir, sync_cycle=10000) + +# mark the components with 'train' label. +with logger.mode("train"): + # create a scalar component called 'scalars/scalar0' + scalar0 = logger.scalar("scalars/scalar0") + +# add some records during DL model running. +for step in range(100): + scalar0.add_record(step, random.random()) +``` + +### C++ SDK +Here is the C++ SDK identical to the Python SDK example above: + +```c++ +#include +#include +#include "visualdl/logic/sdk.h" + +namespace vs = visualdl; +namespace cp = visualdl::components; + +int main() { + const std::string dir = "./tmp"; + vs::LogWriter logger(dir, 10000); + + logger.SetMode("train"); + auto tablet = logger.AddTablet("scalars/scalar0"); + + cp::Scalar scalar0(tablet); + + for (int step = 0; step < 1000; step++) { + float v = (float)std::rand() / RAND_MAX; + scalar0.AddRecord(step, v); + } + + return 0; +} +``` + +## Launch Visual DL +After some logs have been generated during training, users can launch Visual DL application to see real-time data visualization by: + + +``` +visualdl --logdir +``` + +visualDL also supports following optional parameters: + +- `--host` set IP +- `--port` set port +- `-m / --model_pb` specify ONNX format for model file to view graph + + +### Contribute + +VisualDL is initially created by [PaddlePaddle](http://www.paddlepaddle.org/) and +[ECharts](http://echarts.baidu.com/). +We welcome everyone to use, comment and contribute to VisualDL :) + +## More details + +For more details about how to use VisualDL, please take a look at [documents](https://github.com/PaddlePaddle/VisualDL/tree/develop/demo) diff --git a/doc/paddle/advanced_guide/evaluation_debugging/debug/visualdl_usage.md b/doc/paddle/advanced_guide/evaluation_debugging/debug/visualdl_usage.md new file mode 100644 index 0000000000000000000000000000000000000000..e6a6445e3d4a89501f236bba6cf5623304ab3024 --- /dev/null +++ b/doc/paddle/advanced_guide/evaluation_debugging/debug/visualdl_usage.md @@ -0,0 +1,774 @@ +# VisualDL 使用指南 + +### 概述 + +VisualDL 是一个面向深度学习任务设计的可视化工具。VisualDL 利用了丰富的图表来展示数据,用户可以更直观、清晰地查看数据的特征与变化趋势,有助于分析数据、及时发现错误,进而改进神经网络模型的设计。 + +目前,VisualDL 支持 scalar, image, audio, graph, histogram, pr curve, high dimensional 七个组件,项目正处于高速迭代中,敬请期待新组件的加入。 + +| 组件名称 | 展示图表 | 作用 | +| :-------------------------------------------------: | :--------: | :----------------------------------------------------------- | +| [ Scalar](#Scalar--标量组件) | 折线图 | 动态展示损失函数值、准确率等标量数据 | +| [Image](#Image--图片可视化组件) | 图片可视化 | 显示图片,可显示输入图片和处理后的结果,便于查看中间过程的变化 | +| [Audio](#Audio--音频播放组件) | 音频播放 | 播放训练过程中的音频数据,监控语音识别与合成等任务的训练过程 | +| [Graph](#Graph--网络结构组件) | 网络结构 | 展示网络结构、节点属性及数据流向,辅助学习、优化网络结构 | +| [Histogram](#Histogram--直方图组件) | 直方图 | 展示训练过程中权重、梯度等张量的分布 | +| [PR Curve](#PR-Curve--PR曲线组件) | 折线图 | 权衡精度与召回率之间的平衡关系,便于选择最佳阈值 | +| [High Dimensional](#High-Dimensional--数据降维组件) | 数据降维 | 将高维数据映射到 2D/3D 空间来可视化嵌入,便于观察不同数据的相关性 | + +## Scalar -- 折线图组件 + +### 介绍 + +Scalar 组件的输入数据类型为标量,该组件的作用是将训练参数以折线图形式呈现。将损失函数值、准确率等标量数据作为参数传入 scalar 组件,即可画出折线图,便于观察变化趋势。 + +### 记录接口 + +Scalar 组件的记录接口如下: + +```python +add_scalar(tag, value, step, walltime=None) +``` + +接口参数说明如下: + +| 参数 | 格式 | 含义 | +| -------- | ------ | ------------------------------------------- | +| tag | string | 记录指标的标志,如`train/loss`,不能含有`%` | +| value | float | 要记录的数据值 | +| step | int | 记录的步数 | +| walltime | int | 记录数据的时间戳,默认为当前时间戳 | + +### Demo + +- 基础使用 + +下面展示了使用 Scalar 组件记录数据的示例,代码文件请见[Scalar组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/scalar_test.py) + +```python +from visualdl import LogWriter + +if __name__ == '__main__': + value = [i/1000.0 for i in range(1000)] + # 初始化一个记录器 + with LogWriter(logdir="./log/scalar_test/train") as writer: + for step in range(1000): + # 向记录器添加一个tag为`acc`的数据 + writer.add_scalar(tag="acc", step=step, value=value[step]) + # 向记录器添加一个tag为`loss`的数据 + writer.add_scalar(tag="loss", step=step, value=1/(value[step] + 1)) +``` + +运行上述程序后,在命令行执行 + +```shell +visualdl --logdir ./log --port 8080 +``` + +接着在浏览器打开`http://127.0.0.1:8080`,即可查看以下折线图。 + +

+ +

+ + + +- 多组实验对比 + +下面展示了使用Scalar组件实现多组实验对比 + +多组实验对比的实现分为两步: + +1. 创建子日志文件储存每组实验的参数数据 +2. 将数据写入scalar组件时,**使用相同的tag**,即可实现对比**不同实验**的**同一类型参数** + +```python +from visualdl import LogWriter + +if __name__ == '__main__': + value = [i/1000.0 for i in range(1000)] + # 步骤一:创建父文件夹:log与子文件夹:scalar_test + with LogWriter(logdir="./log/scalar_test") as writer: + for step in range(1000): + # 步骤二:向记录器添加一个tag为`train/acc`的数据 + writer.add_scalar(tag="train/acc", step=step, value=value[step]) + # 步骤二:向记录器添加一个tag为`train/loss`的数据 + writer.add_scalar(tag="train/loss", step=step, value=1/(value[step] + 1)) + # 步骤一:创建第二个子文件夹scalar_test2 + value = [i/500.0 for i in range(1000)] + with LogWriter(logdir="./log/scalar_test2") as writer: + for step in range(1000): + # 步骤二:在同样名为`train/acc`下添加scalar_test2的accuracy的数据 + writer.add_scalar(tag="train/acc", step=step, value=value[step]) + # 步骤二:在同样名为`train/loss`下添加scalar_test2的loss的数据 + writer.add_scalar(tag="train/loss", step=step, value=1/(value[step] + 1)) +``` + +运行上述程序后,在命令行执行 + +```shell +visualdl --logdir ./log --port 8080 +``` + +接着在浏览器打开`http://127.0.0.1:8080`,即可查看以下折线图,对比「scalar_test」和「scalar_test2」的Accuracy和Loss。 + +

+ +

+ + +*多组实验对比的应用案例可参考AI Studio项目:[VisualDL 2.0--眼疾识别训练可视化](https://aistudio.baidu.com/aistudio/projectdetail/502834) + + +### 功能操作说明 + +* 支持数据卡片「最大化」、「还原」、「坐标系转化」(y轴对数坐标)、「下载」折线图 + +

+ +

+ + + + + +* 数据点Hover展示详细信息 + +

+ +

+ + + + + +* 可搜索卡片标签,展示目标图像 + +

+ +

+ + + + + +* 可搜索打点数据标签,展示特定数据 + +

+ +

+ + + + +* X轴有三种衡量尺度 + +1. Step:迭代次数 +2. Walltime:训练绝对时间 +3. Relative:训练时长 + +

+ +

+ + +* 可调整曲线平滑度,以便更好的展现参数整体的变化趋势 + +

+ +

+ + + + +## Image -- 图片可视化组件 + +### 介绍 + +Image 组件用于显示图片数据随训练的变化。在模型训练过程中,将图片数据传入 Image 组件,就可在 VisualDL 的前端网页查看相应图片。 + +### 记录接口 + +Image 组件的记录接口如下: + +```python +add_image(tag, img, step, walltime=None) +``` + +接口参数说明如下: + +| 参数 | 格式 | 含义 | +| -------- | ------------- | ------------------------------------------- | +| tag | string | 记录指标的标志,如`train/loss`,不能含有`%` | +| img | numpy.ndarray | 以ndarray格式表示的图片 | +| step | int | 记录的步数 | +| walltime | int | 记录数据的时间戳,默认为当前时间戳 | + +### Demo + +下面展示了使用 Image 组件记录数据的示例,代码文件请见[Image组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/image_test.py) + +```python +import numpy as np +from PIL import Image +from visualdl import LogWriter + + +def random_crop(img): + """获取图片的随机 100x100 分片 + """ + img = Image.open(img) + w, h = img.size + random_w = np.random.randint(0, w - 100) + random_h = np.random.randint(0, h - 100) + r = img.crop((random_w, random_h, random_w + 100, random_h + 100)) + return np.asarray(r) + + +if __name__ == '__main__': + # 初始化一个记录器 + with LogWriter(logdir="./log/image_test/train") as writer: + for step in range(6): + # 添加一个图片数据 + writer.add_image(tag="eye", + img=random_crop("../../docs/images/eye.jpg"), + step=step) +``` + +运行上述程序后,在命令行执行 + +```shell +visualdl --logdir ./log --port 8080 +``` + +在浏览器输入`http://127.0.0.1:8080`,即可查看图片数据。 + +

+ +

+ + + + +### 功能操作说明 + +可搜索图片标签显示对应图片数据 + +

+ +

+ + + + +支持滑动Step/迭代次数查看不同迭代次数下的图片数据 + +

+ +

+ + + + +## Audio--音频播放组件 + +### 介绍 + +Audio组件实时查看训练过程中的音频数据,监控语音识别与合成等任务的训练过程。 + +### 记录接口 + +Audio 组件的记录接口如下: + +```python +add_audio(tag, audio_array, step, sample_rate) +``` + +接口参数说明如下: + +| 参数 | 格式 | 含义 | +| ----------- | ------------- | ------------------------------------------ | +| tag | string | 记录指标的标志,如`audio_tag`,不能含有`%` | +| audio_arry | numpy.ndarray | 以ndarray格式表示的音频 | +| step | int | 记录的步数 | +| sample_rate | int | 采样率,**注意正确填写对应音频的原采样率** | + +### Demo + +下面展示了使用 Audio 组件记录数据的示例,代码文件请见[Audio组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/audio_test.py) + +```python +from visualdl import LogWriter +import numpy as np +import wave + + +def read_audio_data(audio_path): + """ + Get audio data. + """ + CHUNK = 4096 + f = wave.open(audio_path, "rb") + wavdata = [] + chunk = f.readframes(CHUNK) + while chunk: + data = np.frombuffer(chunk, dtype='uint8') + wavdata.extend(data) + chunk = f.readframes(CHUNK) + # 8k sample rate, 16bit frame, 1 channel + shape = [8000, 2, 1] + return shape, wavdata + + +if __name__ == '__main__': + with LogWriter(logdir="./log") as writer: + audio_shape, audio_data = read_audio_data("./testing.wav") + audio_data = np.array(audio_data) + writer.add_audio(tag="audio_tag", + audio_array=audio_data, + step=0, + sample_rate=8000) +``` + +运行上述程序后,在命令行执行 + +```shell +visualdl --logdir ./log --port 8080 +``` + +在浏览器输入`http://127.0.0.1:8080`,即可查看音频数据。 + +

+ +

+ + + +### 功能操作说明 + +- 可搜索音频标签显示对应音频数据 + +

+ +

+ + + +- 支持滑动Step/迭代次数试听不同迭代次数下的音频数据 + +

+ +

+ + + +- 支持播放/暂停音频数据 + +

+ +

+ + + +- 支持音量调节 + +

+ +

+ + + +- 支持音频下载 + +

+ +

+ + + + +## Graph--网络结构组件 + +### 介绍 + +Graph组件一键可视化模型的网络结构。用于查看模型属性、节点信息、节点输入输出等,并进行节点搜索,协助开发者们快速分析模型结构与了解数据流向。 + +### Demo + +共有两种启动方式: + +- 前端模型文件拖拽上传: + + - 如只需使用Graph组件,则无需添加任何参数,在命令行执行`visualdl`后即可启动面板进行上传。 + - 如果同时需使用其他功能,在命令行指定日志文件路径(以`./log`为例)即可启动面板进行上传: + + ```shell + visualdl --logdir ./log --port 8080 + ``` + +

+ +

+ + + +- 后端启动Graph: + + - 在命令行加入参数`--model`并指定**模型文件**路径(非文件夹路径),即可启动并查看网络结构可视化: + + ```shell + visualdl --model ./log/model --port 8080 + ``` + +

+ +

+ + + +### 功能操作说明 + +- 一键上传模型 + - 支持模型格式:PaddlePaddle、ONNX、Keras、Core ML、Caffe、Caffe2、Darknet、MXNet、ncnn、TensorFlow Lite + - 实验性支持模型格式:TorchScript、PyTorch、Torch、 ArmNN、BigDL、Chainer、CNTK、Deeplearning4j、MediaPipe、ML.NET、MNN、OpenVINO、Scikit-learn、Tengine、TensorFlow.js、TensorFlow + +

+ +

+ + + +- 支持上下左右任意拖拽模型、放大和缩小模型 + +

+ +

+ + + +- 搜索定位到对应节点 + +

+ +

+ + + +- 点击查看模型属性 + +

+ +

+ + + +

+ +

+ + + +- 支持选择模型展示的信息 + +

+ +

+ + + +- 支持以PNG、SVG格式导出模型结构图 + +

+ +

+ + + +- 点击节点即可展示对应属性信息 + +

+ +

+ + + +- 支持一键更换模型 + +

+ +

+ + + +## Histogram--直方图组件 + +### 介绍 + +Histogram组件以直方图形式展示Tensor(weight、bias、gradient等)数据在训练过程中的变化趋势。深入了解模型各层效果,帮助开发者精准调整模型结构。 + +### 记录接口 + +Histogram 组件的记录接口如下: + +```python +add_histogram(tag, values, step, walltime=None, buckets=10) +``` + +接口参数说明如下: + +| 参数 | 格式 | 含义 | +| -------- | --------------------- | ------------------------------------------- | +| tag | string | 记录指标的标志,如`train/loss`,不能含有`%` | +| values | numpy.ndarray or list | 以ndarray或list格式表示的数据 | +| step | int | 记录的步数 | +| walltime | int | 记录数据的时间戳,默认为当前时间戳 | +| buckets | int | 生成直方图的分段数,默认为10 | + +### Demo + +下面展示了使用 Histogram组件记录数据的示例,代码文件请见[Histogram组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/histogram_test.py) + +```python +from visualdl import LogWriter +import numpy as np + + +if __name__ == '__main__': + values = np.arange(0, 1000) + with LogWriter(logdir="./log/histogram_test/train") as writer: + for index in range(1, 101): + interval_start = 1 + 2 * index / 100.0 + interval_end = 6 - 2 * index / 100.0 + data = np.random.uniform(interval_start, interval_end, size=(10000)) + writer.add_histogram(tag='default tag', + values=data, + step=index, + buckets=10) +``` + +运行上述程序后,在命令行执行 + +```shell +visualdl --logdir ./log --port 8080 +``` + +在浏览器输入`http://127.0.0.1:8080`,即可查看训练参数直方图。 + +### 功能操作说明 + +- 支持数据卡片「最大化」、直方图「下载」 + +

+ +

+ +- 可选择Offset或Overlay模式 + +

+ +

+ + + - Offset模式 + +

+ +

+ + + + - Overlay模式 + +

+ +

+ + +- 数据点Hover展示参数值、训练步数、频次 + + - 在第240次训练步数时,权重为-0.0031,且出现的频次是2734次 + +

+ +

+ +- 可搜索卡片标签,展示目标直方图 + +

+ +

+ +- 可搜索打点数据标签,展示特定数据流 + +

+ +

+ +## PR Curve--PR曲线组件 + +### 介绍 + +PR Curve以折线图形式呈现精度与召回率的权衡分析,清晰直观了解模型训练效果,便于分析模型是否达到理想标准。 + +### 记录接口 + +PR Curve组件的记录接口如下: + +```python +add_pr_curve(tag, labels, predictions, step=None, num_thresholds=10) +``` + +接口参数说明如下: + +| 参数 | 格式 | 含义 | +| -------------- | --------------------- | ------------------------------------------- | +| tag | string | 记录指标的标志,如`train/loss`,不能含有`%` | +| labels | numpy.ndarray or list | 以ndarray或list格式表示的实际类别 | +| predictions | numpy.ndarray or list | 以ndarray或list格式表示的预测类别 | +| step | int | 记录的步数 | +| num_thresholds | int | 阈值设置的个数,默认为10,最大值为127 | + +### Demo + +下面展示了使用 PR Curve 组件记录数据的示例,代码文件请见[PR Curve组件](#https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/pr_curve_test.py) + +```python +from visualdl import LogWriter +import numpy as np + +with LogWriter("./log/pr_curve_test/train") as writer: + for step in range(3): + labels = np.random.randint(2, size=100) + predictions = np.random.rand(100) + writer.add_pr_curve(tag='pr_curve', + labels=labels, + predictions=predictions, + step=step, + num_thresholds=5) +``` + +运行上述程序后,在命令行执行 + +```shell +visualdl --logdir ./log --port 8080 +``` + +接着在浏览器打开`http://127.0.0.1:8080`,即可查看PR Curve + +

+ +

+ + + +### 功能操作说明 + +- 支持数据卡片「最大化」,「还原」、「下载」PR曲线 + +

+ +

+ +- 数据点Hover展示详细信息:阈值对应的TP、TN、FP、FN + +

+ +

+ +- 可搜索卡片标签,展示目标图表 + +

+ +

+ +- 可搜索打点数据标签,展示特定数据 + +

+ +

+ + +- 支持查看不同训练步数下的PR曲线 + +

+ +

+ +- X轴-时间显示类型有三种衡量尺度 + + - Step:迭代次数 + - Walltime:训练绝对时间 + - Relative:训练时长 + +

+ +

+ +## High Dimensional -- 数据降维组件 + +### 介绍 + +High Dimensional 组件将高维数据进行降维展示,用于深入分析高维数据间的关系。目前支持以下两种降维算法: + + - PCA : Principle Component Analysis 主成分分析 + - t-SNE : t-distributed stochastic neighbor embedding t-分布式随机领域嵌入 + +### 记录接口 + +High Dimensional 组件的记录接口如下: + +```python +add_embeddings(tag, labels, hot_vectors, walltime=None) +``` + +接口参数说明如下: + +| 参数 | 格式 | 含义 | +| ----------- | ------------------- | ---------------------------------------------------- | +| tag | string | 记录指标的标志,如`default`,不能含有`%` | +| labels | numpy.array 或 list | 一维数组表示的标签,每个元素是一个string类型的字符串 | +| hot_vectors | numpy.array or list | 与labels一一对应,每个元素可以看作是某个标签的特征 | +| walltime | int | 记录数据的时间戳,默认为当前时间戳 | + +### Demo + +下面展示了使用 High Dimensional 组件记录数据的示例,代码文件请见[High Dimensional组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/high_dimensional_test.py) + +```python +from visualdl import LogWriter + + +if __name__ == '__main__': + hot_vectors = [ + [1.3561076367500755, 1.3116267195134017, 1.6785401875616097], + [1.1039614644440658, 1.8891609992484688, 1.32030488587171], + [1.9924524852447711, 1.9358920727142739, 1.2124401279391606], + [1.4129542689796446, 1.7372166387197474, 1.7317806077076527], + [1.3913371800587777, 1.4684674577930312, 1.5214136352476377]] + + labels = ["label_1", "label_2", "label_3", "label_4", "label_5"] + # 初始化一个记录器 + with LogWriter(logdir="./log/high_dimensional_test/train") as writer: + # 将一组labels和对应的hot_vectors传入记录器进行记录 + writer.add_embeddings(tag='default', + labels=labels, + hot_vectors=hot_vectors) +``` + +运行上述程序后,在命令行执行 + +```shell +visualdl --logdir ./log --port 8080 +``` + +接着在浏览器打开`http://127.0.0.1:8080`,即可查看降维后的可视化数据。 + +

+ +

+ + + + + +# diff --git a/doc/paddle/advanced_guide/evaluation_debugging/debug/visualdl_usage_en.md b/doc/paddle/advanced_guide/evaluation_debugging/debug/visualdl_usage_en.md new file mode 100644 index 0000000000000000000000000000000000000000..c76a9cd5a2cdc665f48de0a695734e9c8095a89f --- /dev/null +++ b/doc/paddle/advanced_guide/evaluation_debugging/debug/visualdl_usage_en.md @@ -0,0 +1,633 @@ +# VisualDL user guide + +## Overview + +VisualDL is a toolkit to visualize data generated in deep learning tasks. VisualDL make use of [ECharts](https://echarts.apache.org/en/feature.html) to display the distribution and change tendency of data, so that users can view data more clearly and intuitively. + +To be conductive to analyze the characteristics of data, detect errors, and optimize the neural network model, VisualDL provides seven functional components, including scalar, histogram, image, text, audio, high dimensional and graph. + +| Component name | Display chart | Function of component | +|:----:|:----:|:---| +|scalar| Line Chart | Dynamically display scalar data, such as loss, accuracy, etc.| +|histogram| Histogram | Dynamically display the numerical distribution and change tendency of parameters (such as weight matrix, offset, gradient, etc)| +|image| Image | Dynamically display images, including input images and convolution results, it is conveniently to view the change tendency of intermediate process| +|text| Text | Dynamically display text | +|audio| Audio | Dynamically display audio, users can play directly or choose to download| +|high dimensional| Coordinate | Map high dimensional data into 2D/3D space, for making it easy to observe the correlation of different data| +|graph| Directed Graph | Display the neural networks | + +## Toolkits of adding data + +The six components (scalar, histogram, image, text, audio and high dimensional) are used to add data during program running. Class LogWriter must be initialized before adding data, in order to set the storage path and synchronization cycle. The input parameters of each components will be saved as log file in disk, after that the log file will be loaded into front end to display. + +### LogWriter + +LogWriter is a Python wrapper to write data to log file with the data format defined as in protobuf file [storage.proto](https://github.com/PaddlePaddle/VisualDL/blob/develop/visualdl/storage/storage.proto). + +The definition of LogWriter : + +```python +class LogWriter(dir, sync_cycle) +``` + +> :param dir : the directory path to the saved log files. +> :param sync_cycle : specify how often should the system store data into the file system, that is, system will save the data into the file system once operations count reaches sync_cycle. +> :return: a new LogWriter instance. + +Demo 1. Create a LogWriter instance + +```python +# Create a LogWriter instance named log_writer +log_writer = LogWriter("./log", sync_cycle=10) +``` + +class LogWriter include the following member functions: + +* `mode()` +* `scalar()`, `histogram()`, `image()`, `text()`, `audio()`, `embedding()` + +The member function mode() is used to specify the phase of program running. The input string is customized, such as `test`, `validation`, `test`, `conv_layer1`. Components with same mode are grouped together, so users can choose different modes to display on the frontend webpage. + +The member functions scalar(), histogram(), image(), text(), audio() and embedding() are used to create component instance。 + +Demo 2. Use LogWriter instance to create component instance + +```python +# Set the name of mode to "train", and create a scalar component instance +with log_writer.mode("train") as logger: + train_scalar = logger.scalar("acc") + +# Set the name of mode to "test", and create an image component instance +with log_writer.mode("test") as shower: + test_image = shower.image("conv_image", 10, 1) +``` + +### scalar -- component to draw line charts + +The scalar component is used to draw line charts. By passing scalar data such as loss value, accuracy as input parameters into the scalar() function, the frontend webpage will display the data in the form of line charts. It can facilitate users to grasp the changing tendency of training process. + +The first step of using scalar component is initializing the member function scalar() of LogWriter instance, then you can add data through the member function add_record() of ScalarWriter instance. + +* The member function `scalar()` of LogWriter instance : + +```python +def scalar(tag, type) +``` + +> :param tag : The scalar writer will label the data with tag. +> :param type : Data type, optional choice is limited to “float”, "double", "int", the default setting is "float". +> :return : A ScalarWriter instance to handle step and value records. + +* The member function `add_record()` of ScalarWriter instance : + +```python +def add_record(step, value) +``` + +> :param step : Step number. +> :param value : Input data. + +Demo 3. scalar demo program[Github](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/component/scalar-demo.py) + +```python +# coding=utf-8 +from visualdl import LogWriter + +# Create a LogWriter instance +log_writer = LogWriter("./log", sync_cycle=20) + +# Create two ScalarWriter instances, whose mode is set to be "train" +with log_writer.mode("train") as logger: + train_acc = logger.scalar("acc") + train_loss = logger.scalar("loss") + +# Create a ScalarWriter instance, whose mode is set to be "test" +with log_writer.mode("test") as logger: + test_acc = logger.scalar("acc") + +value = [i/1000.0 for i in range(1000)] +for step in range(1000): + # Add data + train_acc.add_record(step, value[step]) + train_loss.add_record(step, 1 / (value[step] + 1)) + test_acc.add_record(step, 1 - value[step]) +``` + +After running the demo program above, you can start the flask server with command ``visualdl`` : + +```shell +visualdl --logdir ./log --host 0.0.0.0 --port 8080 +``` + +By opening the URL [http://0.0.0.0:8080](http://0.0.0.0:8080) in your browser,you will see the interface below. + +

+
+Figure 1. scalar component displays line charts
+

+ +The right sidebar of VisualDL has adjustment options for each component, take scalar component as example: + +* Smoothing : To adjust the smoothness of the line charts. +* X-axis : The horizontal ordinate of line charts, optional choice : Step, Relative, Wall Time. +* Tooltip sorting : Sorting method of tag, optional choice : default, descending, ascending, nearest. + +There is also a ``RUNNING`` button at the bottom of the right sidebar, the frontend webpage will send request to the flask server for data synchronization. Switching to ``Stopped``, it will pause the data update. + +### histogram -- component to display data distribution + +The histogram component is used to draw histogram for displaying the distribution of input data. By passing some parameters of model training, such as weight matrices, biases, gradient, as input parameters into the `histogram()` function, the frontend webpage will display the data in the form of histogram. It can facilitate users to view the change tendency of parameters distribution. + +The first step of using histogram component is initializing the member function `histogram()` of LogWriter instance, then you can add data through the member function `add_record()` of HistogramWriter instance. + +* The member function histogram() of LogWriter instance : + +```python +def histogram(tag, num_buckets, type) +``` + +> :param tag : The histogram writer will label the data with tag. +> :param num_buckets : The number of pillar in the histogram. +> :param type : Data type, optional choice is limited to “float”, "double", "int", the default setting is "float". +> :return : A HistogramWriter instance to record distribution. + +* The member function add_record() of HistogramWriter instance : + +```python +def add_record(step, value) +``` + +> :param step : Step number. +> :param value : Input data, type is list[]. + +Demo 4. histogram demo program [Github](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/component/histogram-demo.py) + +```python +# coding=utf-8 +import numpy as np +from visualdl import LogWriter + +# Create a LogWriter instance +log_writer = LogWriter('./log', sync_cycle=10) + +# Create a HistogramWriter instance, whose mode is set to be "train" +with log_writer.mode("train") as logger: + param1_histogram = logger.histogram("param1", num_buckets=100) + +# Loop +for step in range(1, 101): + # Create input data + interval_start = 1 + 2 * step/100.0 + interval_end = 6 - 2 * step/100.0 + data = np.random.uniform(interval_start, interval_end, size=(10000)) + + # Use member function add_record() to add data + param1_histogram.add_record(step, data) +``` + +After running the demo program above, you can start the flask server with command ``visualdl`` : + +```shell +visualdl --logdir ./log --host 0.0.0.0 --port 8080 +``` + +By opening the URL [http://0.0.0.0:8080](http://0.0.0.0:8080) in your browser,you will see the interface below. + +

+
+Figure 2. histogram component displays histograms
+

+ +### image -- component to display image + +The image component is used to visualize the image data. By passing the image data (type numpy.ndarray) into the image() function, the frontend webpage will display the image directly. + +The first step of using image component is initializing the member function image() of LogWriter instance. Then you can add data through the member functions start_sampling(), is_sample_taken(), set_sample(), and finish_sample() of ImageWriter instance. + +* The member function image() of LogWriter instance : + +```python +def image(tag, num_samples, step_cycle) +``` + +> :param tag : The image writer will label the image with tag. +> :param num_samples : Appoint the number of samples to take in a step. +> :param step_cycle : Store every `step_cycle` as a record, the default value is 1. +> :return: A ImageWriter instance to sample images. + +* Start a new sampling cycle, allocate memory space for the sampled data + +```python +def start_sampling() +``` + +* Determine whether the picture should be sampled or not. If the return value is -1, it means no sampling, otherwise it should be sampled : + +```python +def is_sample_taken() +``` + +* Add image data : + +```python +def set_sample(index, image_shape, image_data) +``` + +> :param index : Combined with tag, used to determine the sub-frame of the image display. +> :param image_shape : The shape of image, [weight, height, channel(RGB is 3, GrayScale is 1)]. +> :param image_data : Image data with type numpy.ndarray, member function flatten() can turn the shape to row vector. + +* End the current sampling period, load the sampled data into disk, and release the memory space : + +```python +def finish_sample() +``` + +Demo 5. image demo program [Github](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/component/image-demo.py) + +```python +# coding=utf-8 +import numpy as np +from visualdl import LogWriter +from PIL import Image + + +def random_crop(img): + ''' + This function is used to get a random block (100*100 pixels) of data img. + ''' + img = Image.open(img) + w, h = img.size + random_w = np.random.randint(0, w - 100) + random_h = np.random.randint(0, h - 100) + return img.crop((random_w, random_h, random_w + 100, random_h + 100)) + + +# Create a LogWriter instance +log_writer = LogWriter("./log", sync_cycle=10) + +# Create a ImageWriter instance +ns = 2 +with log_writer.mode("train") as logger: + input_image = logger.image(tag="test", num_samples=ns) + +# The variable sample_num is used to record the number of image data that have been sampled +sample_num = 0 + +for step in range(6): + # Set the condition of start_sampling() + if sample_num == 0: + input_image.start_sampling() + + idx = input_image.is_sample_taken() + # if idx != -1,sample this data, otherwise skip + if idx != -1: + # Get image data + image_path = "test.jpg" + image_data = np.array(random_crop(image_path)) + + # Add data + input_image.set_sample(idx, image_data.shape, image_data.flatten()) + sample_num += 1 + + # If sampling of the present period have been completed, call finish_sample() + if sample_num % ns == 0: + input_image.finish_sampling() + sample_num = 0 +``` + +After running the demo program above, you can start the flask server with command ``visualdl`` : + +```shell +visualdl --logdir ./log --host 0.0.0.0 --port 8080 +``` + +By opening the URL [http://0.0.0.0:8080](http://0.0.0.0:8080) in your browser,then click the ``SAMPLES`` option at the top of the webpage, you will see the interface below. + +

+
+Figure 3. image component displays images
+

+ +Each subgraph has a horizontal axis which can be dragged to display images of different steps. + +### text -- component to display text + +The text component is used to visualize the text data. By passing the text data (type string) into the text() function, the frontend webpage will display the image directly. + +The first step of using text component is initializing the member function text() of LogWriter instance, then you can add data through the member function add_record() of TextWriter instance. + +* The member function text() of LogWriter instance : + +```python +def text(tag) +``` + +> :param tag : Combined with tag, used to determine the sub-frame of the image display. + +* The member function add_record() of TextWriter instance : + +```python +def add_record(step, str) +``` + +> :param step : Step number. +> :param value : Input data, type is string. + +Demo 6. text demo program [Github](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/component/text-demo.py) + +```python +# coding=utf-8 +from visualdl import LogWriter + +# create a LogWriter instance +log_writter = LogWriter("./log", sync_cycle=10) + +# Create a TextWriter instance +with log_writter.mode("train") as logger: + vdl_text_comp = logger.text(tag="test") + +# Use member function add_record() to add data +for i in range(1, 6): + vdl_text_comp.add_record(i, "这是第 %d 个 step 的数据。" % i) + vdl_text_comp.add_record(i, "This is data %d ." % i) +``` + +After running the demo program above, you can start the flask server with command ``visualdl`` : + +```shell +visualdl --logdir ./log --host 0.0.0.0 --port 8080 +``` + +By opening the URL [http://0.0.0.0:8080](http://0.0.0.0:8080) in your browser,then click the ``SAMPLES`` option at the top of the webpage, you will see the interface below. + +

+
+Figure 4. text component displays texts
+

+ +Each subgraph has a horizontal axis which can be dragged to display text of different steps. + +### audio -- component to play audio + +The audio component is used to play audio. By passing the audio data (type numpy.ndarray) into the audio() function, users can play audio directly, or choose to download. + +The first step of using audio component is initializing the member function audio() of LogWriter instance. Then you can add data through the member functions start_sampling(), is_sample_taken(), set_sample(), and finish_sample() of AudioWriter instance. + +* The member function audio() of LogWriter instance : + +```python +def audio(tag, num_samples, step_cycle) +``` + +> :param tag : The audio writer will label the audio with tag. +> :param num_samples : Appoint the number of samples to take in a step. +> :param step_cycle : Store every `step_cycle` as a record, the default value is 1. +> :return: An AudioWriter instance to sample images. + +* Start a new sampling cycle, allocate memory space for the sampled data : + +```python +def start_sampling() +``` + +* Determine whether the audio should be sampled or not. If the return value is -1, it means no sampling, otherwise it should be sampled : + +```python +def is_sample_taken() +``` + +* Add audio data : + +```python +def set_sample(index, audio_params, audio_data) +``` + +> :param index : Combined with tag, used to determine the sub-frame of the audio. +> :param audio_params : The parameters of audio, [sample rate, sample width, channels]. +> :param audio_data : Audio data with type numpy.ndarray, member function flatten() can turn the shape to row vector. + +* End the current sampling period, load the sampled data into disk, and release the memory space : + +```python +def finish_sample() +``` + +Demo 7. audio demo program [Github](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/component/audio-demo.py) + +```python +# coding=utf-8 +import numpy as np +import wave +from visualdl import LogWriter + + +def read_audio_data(audio_path): + """ + Read audio data + """ + CHUNK = 4096 + f = wave.open(audio_path, "rb") + wavdata = [] + chunk = f.readframes(CHUNK) + + while chunk: + data = np.fromstring(chunk, dtype='uint8') + wavdata.extend(data) + chunk = f.readframes(CHUNK) + + # 8k sample rate, 16bit frame, 1 channel + shape = [8000, 2, 1] + + return shape, wavdata + + +# Create a LogWriter instance +log_writter = LogWriter("./log", sync_cycle=10) + +# Create an AudioWriter instance +ns = 2 +with log_writter.mode("train") as logger: + input_audio = logger.audio(tag="test", num_samples=ns) + +# The variable sample_num is used to record the number of audio data that have been sampled +audio_sample_num = 0 + +for step in range(9): +# Set the condition of start_sampling() +if audio_sample_num == 0: + input_audio.start_sampling() + + # Get idx + idx = input_audio.is_sample_taken() + # if idx != -1,sample this data, otherwise skip + if idx != -1: + # Read audio data + audio_path = "test.wav" + audio_shape, audio_data = read_audio_data(audio_path) + + # Add data through member function set_samle() + input_audio.set_sample(idx, audio_shape, audio_data) + audio_sample_num += 1 + + # If sampling of the present period have been completed, call finish_sample() + if audio_sample_num % ns ==0: + input_audio.finish_sampling() + audio_sample_num = 0 +``` + +After running the demo program above, you can start the flask server with command ``visualdl`` : + +```shell +visualdl --logdir ./log --host 0.0.0.0 --port 8080 +``` + +By opening the URL [http://0.0.0.0:8080](http://0.0.0.0:8080) in your browser,then click the ``SAMPLES`` option at the top of the webpage, you will see the interface below. + +

+
+Figure 5. audio component displays audios
+

+ +Each subgraph has a horizontal axis which can be dragged to play audio of different steps. + +### high dimensional -- component of dimensionality reduction + +The role of high dimensional component is to map data into 2D or 3D space for embedding visualization, which is helpful for users to understand the relevance of different data. + +The high dimensional component supports the following two dimensionality reduction algorithms : + +* PCA : Principle Component Analysis +* [t-SNE](https://lvdmaaten.github.io/tsne/) : t-distributed stochastic neighbor embedding + +The first step of using audio component is initializing the member function embedding() of LogWriter instance. Then you can add data through the member functions add_embeddings_with_word_dict() of EmbeddingWriter instance. + + +* The member function embedding() of LogWriter instance + +```python +def embedding() +``` + +* The member function add_embeddings_with_word_dict() of EmbeddingWriter instance : + +```python +def add_embeddings_with_word_dict(data, Dict) +``` + +> :param data : input data , type List[List(float)]. +> :param Dict : dictionary, type Dict[str, int]. + +Demo 8. high dimensional demo program [Github](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/component/embedding-demo.py) + +```python +# coding=utf-8 +import numpy as np +from visualdl import LogWriter + +# Create a LogWriter instance +log_writer = LogWriter("./log", sync_cycle=10) + +# Create an EmbeddingWriter instance +with log_writer.mode("train") as logger: + train_embedding = logger.embedding() + +# Initialize data List[List(float)] +hot_vectors = np.random.uniform(1, 2, size=(10, 3)) +word_dict = { + "label_1": 5, + "label_2": 4, + "label_3": 3, + "label_4": 2, + "label_5": 1,} + +# Add data through member function add_embeddings_with_word_dict(data, Dict) +train_embedding.add_embeddings_with_word_dict(hot_vectors, word_dict) +``` + +After running the demo program above, you can start the flask server with command ``visualdl`` : + +```shell +visualdl --logdir ./log --host 0.0.0.0 --port 8080 +``` + +By opening the URL [http://0.0.0.0:8080](http://0.0.0.0:8080) in your browser,then click the ``HIGHDIMENSIONAL`` option at the top of the webpage, you will see the interface below. + +

+
+Figure 6. high dimensional component displays plane coordinates
+

+ +

+
+Figure 7. High dimensional component displays Cartesian coordinates
+

+ +## graph -- component to visualize neural network + +The role of graph component is to visualize neural network. This component can display models with +Paddle format or [ONNX](https://onnx.ai) format. The graph component can help users understand the model structure of the neural network, and also help to troubleshoot neural network configuration errors. + +Unlike other components that need to record data, the only one prerequisite for using graph component is specifying the storage path of the model file. That is, adding the option --model_pb to the command ``visualdl`` to specify the storage path of the model file, then you can see the corresponding neural network in the frontend webpage. + +Demo 9. graph demo program(How to save a Lenet-5 model by Paddle)[Github](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/component/graph-demo.py) + +```python +# coding=utf-8 +import paddle.fluid as fluid + + +def lenet_5(img): + ''' + Define the Lenet-5 model + ''' + conv1 = fluid.nets.simple_img_conv_pool( + input=img, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + act="relu") + + conv1_bn = fluid.layers.batch_norm(input=conv1) + + conv2 = fluid.nets.simple_img_conv_pool( + input=conv1_bn, + filter_size=5, + num_filters=50, + pool_size=2, + pool_stride=2, + act="relu") + + predition = fluid.layers.fc(input=conv2, size=10, act="softmax") + return predition + + +# Variable assignment +image = fluid.layers.data(name="img", shape=[1, 28, 28], dtype="float32") +predition = lenet_5(image) + +place = fluid.CPUPlace() +exe = fluid.Executor(place=place) +exe.run(fluid.default_startup_program()) + +# save the result to "./paddle_lenet_5_model" +fluid.io.save_inference_model( + "./paddle_lenet_5_model", + feeded_var_names=[image.name], + target_vars=[predition], + executor=exe) +``` + +After running the demo program above, you can start the flask server with command ``visualdl`` : + +```shell +visualdl --logdir ./log --host 0.0.0.0 --port 8080 --model_pb paddle_lenet_5_model +``` + +By opening the URL [http://0.0.0.0:8080](http://0.0.0.0:8080) in your browser,then click the `GRAPHS` option at the top of the webpage, you will see the interface below. + +

+
+Figure 8. graph component displays the model structure of Lenet-5
+

diff --git a/doc/paddle/advanced_guide/evaluation_debugging/evaluation/metrics.rst b/doc/paddle/advanced_guide/evaluation_debugging/evaluation/metrics.rst new file mode 100644 index 0000000000000000000000000000000000000000..0f46dced718ddf34cdab23eae029254f904ae3e7 --- /dev/null +++ b/doc/paddle/advanced_guide/evaluation_debugging/evaluation/metrics.rst @@ -0,0 +1,77 @@ +############ +模型评估 +############ + +模型评估是指用评价函数(metrics)来评估模型的好坏,可作为在训练中调整超参数、评估模型效果的重要依据。不同类型的模型任务会选取不同评价函数,常见的如回归类任务会用均方差(MSE),二分类任务会用AUC (Area Under Curve)值等。 + +评价函数和loss函数非常相似,但不参与模型的训练优化。 + + +评价函数的输入为模型的预测值(preds)和标注值(labels),并返回计算后的评价指标。 + +paddle.fluid.metrics模块提供了一系列常用的模型评价指标; 用户也可以通过Python接口定制评价指标,或者通过定制C++ Operator的方式,在GPU上加速评价指标的计算。 + +常用指标 +############ + +不同类型的任务,会选用不同的评价指标。 + +回归问题通常会用RMSE(均方根误差)、MAE(平均绝对误差)、R-Square(R平方)等 + +AUC(Area Under Cure)指标则常被用在分类任务(classification)上 + +目标检测任务(Object Detection)则经常会用到mAP(Mean Average Precision) + +paddle.fluid.metrics中包含了一些常用分类指标,例如Precision, Recall, Accuracy等 + +下面是使用Precision指标的示例: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + metric = fluid.metrics.Precision() + + # generate the preds and labels + + preds = [[0.1], [0.7], [0.8], [0.9], [0.2], + [0.2], [0.3], [0.5], [0.8], [0.6]] + + labels = [[0], [1], [1], [1], [1], + [0], [0], [0], [0], [0]] + + preds = np.array(preds) + labels = np.array(labels) + + metric.update(preds=preds, labels=labels) + numpy_precision = metric.eval() + + print("expect precision: %.2f and got %.2f" % (3.0 / 5.0, numpy_precision)) + + +自定义指标 +############ +Fluid支持自定义指标,可灵活支持各类计算任务。下面是一个自定义的简单计数器评价函数示例: + +其中preds是模型预测值,labels是标注值。 + +.. code-block:: python + + class MyMetric(MetricBase): + def __init__(self, name=None): + super(MyMetric, self).__init__(name) + self.counter = 0 # simple counter + + def reset(self): + self.counter = 0 + + def update(self, preds, labels): + if not _is_numpy_(preds): + raise ValueError("The 'preds' must be a numpy ndarray.") + if not _is_numpy_(labels): + raise ValueError("The 'labels' must be a numpy ndarray.") + self.counter += sum(preds == labels) + + def eval(self): + return self.counter diff --git a/doc/paddle/advanced_guide/evaluation_debugging/evaluation/metrics_en.rst b/doc/paddle/advanced_guide/evaluation_debugging/evaluation/metrics_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..62e75e6909e9e1ffee116aa6a295db221aa5b6f3 --- /dev/null +++ b/doc/paddle/advanced_guide/evaluation_debugging/evaluation/metrics_en.rst @@ -0,0 +1,70 @@ +################ +Model Evaluation +################ + +Model evaluation is to use metrics to reflect the accuracy of the model under the expected target. The metrics are determined by model tasks. Model evaluation is an important basis for adjusting the super-parameters in training and evaluating the effect of the model. The input to the metric function is the predicted preds and labels for the current model, and the output is customized. The metric function is very similar to the loss function, but metric is not a component of the model training network. + +Users can get the current predicted preds and labels through training network, and customize the metric function on the Python side, or accelerate the metric calculation on the GPU by customizing the C++ Operator. + +The ``paddle.fluid.metrics`` module contains this feature. + + +Common metrics +################## + +The metric function varies with different model tasks, and so does the metric construction. + +The labels in regression task are real numbers, you can refer to the MSE (Mean Squared Error) method for help. +The commonly used metrics for classification tasks are classification metrics. The metric function mentioned in this paper is generally metrics of binary classification. For details of metrics for multi-category and multi-label tasks, please read the corresponding API documents. For example, the ranking metric auc function works for multi-classification tasks because these tasks can be used as a 0,1 classification task. +Fluid contains common classification metrics, such as Precision, Recall, Accuracy, etc. Please read the API documentation for more. Take ``Precision`` as an example, the specific method is + +.. code-block:: python + + + import paddle.fluid as fluid + import numpy as np + + metric = fluid.metrics.Precision() + + # generate the preds and labels + + preds = [[0.1], [0.7], [0.8], [0.9], [0.2], + [0.2], [0.3], [0.5], [0.8], [0.6]] + + labels = [[0], [1], [1], [1], [1], + [0], [0], [0], [0], [0]] + + preds = np.array(preds) + labels = np.array(labels) + + metric.update(preds=preds, labels=labels) + numpy_precision = metric.eval() + + print("expect precision: %.2f and got %.2f" % (3.0 / 5.0, numpy_precision)) + + +As for other tasks such as MultiTask Learning, Metric Learning, and Learning To Rank, please refer to the API documentation for their various metric construction methods. + +Custom metrics +################ +Fluid supports custom metrics and is flexible enough to support a wide range of computing tasks. The evaluation of the model is implemented below with a metric function composed of a simple counter, where ``preds`` is the prediction values and ``labels`` is the given labels. + +.. code-block:: python + + class MyMetric(MetricBase): + def __init__(self, name=None): + super(MyMetric, self).__init__(name) + self.counter = 0 # simple counter + + def reset(self): + self.counter = 0 + + def update(self, preds, labels): + if not _is_numpy_(preds): + raise ValueError("The 'preds' must be a numpy ndarray.") + if not _is_numpy_(labels): + raise ValueError("The 'labels' must be a numpy ndarray.") + self.counter += sum(preds == labels) + + def eval(self): + return self.counter diff --git a/doc/paddle/advanced_guide/evaluation_debugging/index_cn.rst b/doc/paddle/advanced_guide/evaluation_debugging/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fd0685530d31f6bd138a2c6e549264af52f0d38c --- /dev/null +++ b/doc/paddle/advanced_guide/evaluation_debugging/index_cn.rst @@ -0,0 +1,15 @@ +############### +模型评估/调试 +############### + +本部分包括两篇文档: + +- `模型评估 `_:介绍常用模型评估指标的构造方法 + +- `Visual DL 工具 `_:介绍如何利用 Visual DL 工具可视化训练过程 + +.. toctree:: + :hidden: + + evaluation/metrics.rst + debug/index_cn.rst diff --git a/doc/paddle/advanced_guide/evaluation_debugging/index_en.rst b/doc/paddle/advanced_guide/evaluation_debugging/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..7def64512edbe6365d2c67694ae45fb3144a4cae --- /dev/null +++ b/doc/paddle/advanced_guide/evaluation_debugging/index_en.rst @@ -0,0 +1,17 @@ +############################### +Model Evaluation and Debugging +############################### + + + +There are two articles in this section: + +- `Model Evaluation `_:This section will introduce the construction of common metrics. + +- `VisualDL Tools `_:How to use Visual DL to visualise training process. + +.. toctree:: + :hidden: + + evaluation/metrics_en.rst + debug/index_en.rst diff --git a/doc/paddle/advanced_guide/flags/check_nan_inf_cn.md b/doc/paddle/advanced_guide/flags/check_nan_inf_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..96d35852b3f0010c15021743f2da2c03ed18fa46 --- /dev/null +++ b/doc/paddle/advanced_guide/flags/check_nan_inf_cn.md @@ -0,0 +1,74 @@ +# check nan inf工具 + +check nan inf工具用于检查Operator的结果是否含有nan(not a number,非有效数)或inf(infinite,无穷大数)。支持float32、double、float16三类浮点型,整型由于不存在nan、inf不作检查。 + +## 使用 + +#### 1. 使用方法 +设置环境变量为FLAGS_check_nan_inf为True或者1即可。 +``` +export FLAGS_check_nan_inf=1 # 或者=True +``` + +#### 2. 进阶使用 +添加上述环境变量后,可以通过设置环境变量跳过op、op类型及op变量的检查。设置的格式如下: +``` +PADDLE_INF_NAN_SKIP_OP="op0,op1,op2" +PADDLE_INF_NAN_SKIP_ROLE="role1,role2,role3" +PADDLE_INF_NAN_SKIP_VAR="op0:var0,op0:var1,op1:var0" +``` +其中上面三个环境变量分别表示跳过op、op类型和op里变量的检查。 +##### 2.1 跳过op检查 +如下设置中前一个只跳过mul op的nan inf检查,后一个设置则跳过mul、softmax_with_cross_entropy这两个op的检查。 +`注意`:op跳过只接受精准匹配,要跳过softmax_with_cross_entropy的检查,不能设置环境变量为softmax_with或者with_cross进行模糊匹配,必须设置softmax_with_cross_entropy全名。 +``` +export PADDLE_INF_NAN_SKIP_OP="mul" +export PADDLE_INF_NAN_SKIP_OP="mul,softmax_with_cross_entropy" +``` +##### 2.2 跳过op类型检查 +目前接受的类型有: forward、backward、optimize、rpc、dist、lrsched、loss、default。正常fp32训练中,不需要跳过op类型进行nan inf检查。但在`fp16`中,在反向过程出现inf会对其进行修正,所以一般需要跳过backward的检查,这也是添加该功能的缘由。 +如下设置中前一个只跳过backward的检查,后一个设置跳过backward、optimize两种类型的检查。同上,op类型跳过也只支持精准匹配。 +``` +export PADDLE_INF_NAN_SKIP_ROLE="backward" +export PADDLE_INF_NAN_SKIP_ROLE="backward,optimize" +``` +##### 2.3 跳过指定op中变量的检查 +如下设置中前一个跳过mul op中fc_0.tmp_0变量,后一个设置则跳过mul op中fc_0.tmp_0和fc_0.tmp_1变量及 dropout op的new_relative变量。 +``` +export PADDLE_INF_NAN_SKIP_VAR="mul:fc_0.tmp_0" +export PADDLE_INF_NAN_SKIP_VAR="mul:fc_0.tmp_0,mul:fc_0.tmp_1,dropout:new_relative" +``` +`注意`:指定op变量检查中,对于op只接受精准匹配,对于变量则为模糊匹配,如上述的mlu op中的fc_0.tmp_0和fc_0.tmp_1变量可用c_0.tmp进行匹配。 + +## 试用 +可以使用单测中的[check_nan_inf_base.py](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/unittests/check_nan_inf_base.py)文件进行试用。该脚本已设置FLAGS_check_nan_inf=1打开check nan inf功能。直接python check_nan_inf_base.py执行即可。 +#### 1. GPU日志信息 +其中GPU的check nan信息由于在GPU中打印,所以nan inf信息会出现在出错信息栈前面。工具中会打印出现inf、nan的op及tensor名称,每个block会打印nan、inf、num中的3个值,并打印各自block中nan、inf、num的数量。 +![gpu_nan_inf.png](check_nan_inf_files/gpu_nan_inf.png) +#### 2. CPU日志信息 +CPU中打印的nan、inf、num会在出错信息栈前面显示,同样打印了nan、inf、num中的三个值,并打印nan、inf、num的数量。check nan信息中op及tensor的名称会在最后显示。 + +![cpu_nan_inf.png](check_nan_inf_files/cpu_nan_inf.png) + +![cpu_nan_inf_op_var.png](check_nan_inf_files/cpu_nan_inf_op_var.png) + +## 速度 +测试环境:v100 32G单卡测试,Resnet50模型,imagenet数据集。`不同环境模型数据集下速度可能不同,以下速度仅供参考` +>不检查nan inf速度,每张卡307.7 images/s。 +检查nan inf速度,每张卡250.2 images/s。 + +## 原理 +#### 1. 工具原理 +对于浮点类型操作,正常数值num,无穷大inf,非数值nan有如下运行关系。更详细可查看[INF, NAN, and NULL](https://wiki.analytica.com/index.php?title=INF,_NAN,_and_NULL_-_Exception_values&title=INF,_NAN,_and_NULL_-_Exception_values) +``` +nan - nan = nan, inf - inf = nan, num - num = 0, +nan + nan = nan, inf + inf = inf, nan + 0 = nan, +inf + 0 = inf, nan + inf = nan, 0 + 0 = 0 +``` +基于此使用如下操作仅需最后检查sum是否为nan或者inf就行了。 +``` +for(value:values): sum += (value-value) +``` + +***`注意`:本文档的进阶使用、速度、原理目前仅在develop版本的paddle生效,并将随1.7版本的paddle发布。 +此前版本的check nan inf工具在GPU上不推荐使用,旧工具速度为0.25 images/s,测试会拖慢1000多倍的训练速度。*** diff --git a/doc/paddle/advanced_guide/flags/check_nan_inf_en.md b/doc/paddle/advanced_guide/flags/check_nan_inf_en.md new file mode 100644 index 0000000000000000000000000000000000000000..046e608cfae90e89dfe06c8ed1001c6cca431bd2 --- /dev/null +++ b/doc/paddle/advanced_guide/flags/check_nan_inf_en.md @@ -0,0 +1,87 @@ +# check nan inf tool + +The check nan inf tool is used to check whether the result of the Operator contains nan(not a number) or inf(infinite number). +Float32, double, and float16 are supported. Integers are not checked because there is no nan or inf. + +## Use +#### 1. Method of use +Set the environment variable FLAGS_check_nan_inf to True or 1. +``` +export FLAGS_check_nan_inf=1 # or set =True +``` + +#### 2. Advanced use +After adding the above environment variables, you can skip the check of op, op role, and variables by setting environment variables. +The format of the setting is as follows: +``` +PADDLE_INF_NAN_SKIP_OP="op0,op1,op2" +PADDLE_INF_NAN_SKIP_ROLE="role1,role2,role3" +PADDLE_INF_NAN_SKIP_VAR="op0:var0,op0:var1,op1:var0" +``` +The three above environment variables respectively indicate skipping the checks of op, op role, and variables in op. +##### 2.1 Skip op check +In the following settings, the previous one only skips the nan inf check of the mul op, and the latter setting skips the check of mul and softmax_with_cross_entropy op. +`Note`: Op skip only accepts exact matches. To skip the softmax_with_cross_entropy check, you cannot set the environment variable to softmax_with or with_cross for fuzzy matching. +You must set the full softmax_with_cross_entropy name. +``` +export PADDLE_INF_NAN_SKIP_OP="mul" +export PADDLE_INF_NAN_SKIP_OP="mul,softmax_with_cross_entropy" +``` +##### 2.2 Skip op role check +The currently accepted types are: forward, backward, optimize, rpc, dist, lrsched, loss, default. +In fp32 training, it is not necessary to skip the nan inf check of the op role. +However in `fp16` training, inf will be corrected in the backpropagation, so it is generally necessary to skip the backward check, which is why this feature is added. +In the following setting, the previous setting only skips the backward check, and the latter setting skips both the backward and optimize checks. +Same as above, the op role skipping only supports exact matching. +``` +export PADDLE_INF_NAN_SKIP_ROLE="backward" +export PADDLE_INF_NAN_SKIP_ROLE="backward,optimize" +``` +##### 2.3 Skip the checking of variables in the specified op +In the following setting, the former skip the fc_0.tmp_0 variable in mul op, and the latter setting skips the fc_0.tmp_0 and fc_0.tmp_1 variables in mul op and the new_relative variable in dropout op. +``` +export PADDLE_INF_NAN_SKIP_VAR="mul:fc_0.tmp_0" +export PADDLE_INF_NAN_SKIP_VAR="mul:fc_0.tmp_0,mul:fc_0.tmp_1,dropout:new_relative" +``` +`Note`: In the specified op variable check, only exact matching is accepted for op, and fuzzy matching is used for variables. +For example, the fc_0.tmp_0 and fc_0.tmp_1 variables in mul op mentioned above can be matched by c_0.tmp + +## Test +You can use the [check_nan_inf_base.py](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/unittests/check_nan_inf_base.py) file for test. +The script has set FLAGS_check_nan_inf=1 to enable the nan inf check. Just execute `python check_nan_inf_base.py` to test. + +#### 1. GPU log information +The check information of the GPU is printed in the GPU, so the nan inf information appears in front of the error information stack. +The tool will print the name of the op and tensor which find inf or nan. Each block will print the three values of nan, inf, and num. +And will print the number of nan, inf, and num in the respective block. +![gpu_nan_inf.png](check_nan_inf_files/gpu_nan_inf.png) + +#### 2. CPU log information +The nan, inf, and num printed in the CPU are displayed in front of the error message stack. +The three values of nan, inf, and num are also printed, and the number of nan, inf, and num is printed. +The name of the op and tensor which has nan or inf will be displayed in the end. +![cpu_nan_inf.png](check_nan_inf_files/cpu_nan_inf.png) +![cpu_nan_inf_op_var.png](check_nan_inf_files/cpu_nan_inf_op_var.png) + +## Speed +Test environment: v100 32G single card, Resnet50 model, Imagenet dataset. +`The speed may be different under different environments and different model datasets. The following speeds are only for reference` +> Without check nan inf speed, 307.7 images/s per card. +Check nan inf speed, 250.2 images/s per card. + +## Principle +#### 1. Tool principle +For floating-point operations, num(normal numeric), inf(infinite), and nan(not a number) have the following relations. +More details can be found in [INF, NAN, and NULL](https://wiki.analytica.com/index.php?title=INF,_NAN,_and_NULL_-_Exception_values&title=INF,_NAN,_and_NULL_-_Exception_values) +``` +nan - nan = nan, inf - inf = nan, num - num = 0, +nan + nan = nan, inf + inf = inf, nan + 0 = nan, +inf + 0 = inf, nan + inf = nan, 0 + 0 = 0 +``` +Based on this, using the following operation and only check the sum is nan or inf is enough. +``` +for(value:values): sum += (value-value) +``` + +***`Note`: The Advanced use, Speed, and Principles of this document are currently only effective in the develop version of the Paddle, and will be released with the 1.7 version of the Paddle. +It is not recommended to use the previous version of the check nan inf tool on the GPU, the speed of old tools is 0.25 images/s,will slow down the training speed by a thousand times.*** diff --git a/doc/paddle/advanced_guide/flags/check_nan_inf_files/cpu_nan_inf.png b/doc/paddle/advanced_guide/flags/check_nan_inf_files/cpu_nan_inf.png new file mode 100644 index 0000000000000000000000000000000000000000..79fd14c363fc34ce88a46e38ab5c99eecd169bb2 Binary files /dev/null and b/doc/paddle/advanced_guide/flags/check_nan_inf_files/cpu_nan_inf.png differ diff --git a/doc/paddle/advanced_guide/flags/check_nan_inf_files/cpu_nan_inf_op_var.png b/doc/paddle/advanced_guide/flags/check_nan_inf_files/cpu_nan_inf_op_var.png new file mode 100644 index 0000000000000000000000000000000000000000..ed7b2f1c7717c68c1651f103a080e384c6a99a74 Binary files /dev/null and b/doc/paddle/advanced_guide/flags/check_nan_inf_files/cpu_nan_inf_op_var.png differ diff --git a/doc/paddle/advanced_guide/flags/check_nan_inf_files/gpu_nan_inf.png b/doc/paddle/advanced_guide/flags/check_nan_inf_files/gpu_nan_inf.png new file mode 100644 index 0000000000000000000000000000000000000000..7a9de8f37f19760753e309a8235d26037294d5db Binary files /dev/null and b/doc/paddle/advanced_guide/flags/check_nan_inf_files/gpu_nan_inf.png differ diff --git a/doc/paddle/advanced_guide/flags/cudnn_cn.rst b/doc/paddle/advanced_guide/flags/cudnn_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..23ce85c89c5c7093660c07c19c1f8c4ffbf771f6 --- /dev/null +++ b/doc/paddle/advanced_guide/flags/cudnn_cn.rst @@ -0,0 +1,71 @@ + +cudnn +================== + + +FLAGS_conv_workspace_size_limit +******************************************* +(始于0.13.0) + +用于选择cuDNN卷积算法的工作区限制大小(单位为MB)。cuDNN的内部函数在这个内存限制范围内获得速度最快的匹配算法。通常,在较大的工作区内可以选择更快的算法,但同时也会显著增加内存空间。用户需要在内存和速度之间进行权衡。 + +取值范围 +--------------- +Uint64型,缺省值为512。即512MB显存工作区。 + +示例 +------- +FLAGS_conv_workspace_size_limit=1024 - 将用于选择cuDNN卷积算法的工作区限制大小设置为1024MB。 + + +FLAGS_cudnn_batchnorm_spatial_persistent +******************************************* +(始于1.4.0) + +表示是否在batchnorm中使用新的批量标准化模式CUDNN_BATCHNORM_SPATIAL_PERSISTENT函数。 + +取值范围 +--------------- +Bool型,缺省值为False。 + +示例 +------- +FLAGS_cudnn_batchnorm_spatial_persistent=True - 开启CUDNN_BATCHNORM_SPATIAL_PERSISTENT模式。 + +注意 +------- +此模式在某些任务中可以更快,因为将为CUDNN_DATA_FLOAT和CUDNN_DATA_HALF数据类型选择优化路径。我们默认将其设置为False的原因是此模式可能使用原子整数缩减(scaled atomic integer reduction)而导致某些输入数据范围的数字溢出。 + + +FLAGS_cudnn_deterministic +******************************************* +(始于0.13.0) + +cuDNN对于同一操作有几种算法,一些算法结果是非确定性的,如卷积算法。该flag用于调试。它表示是否选择cuDNN中的确定性函数。 + +取值范围 +--------------- +Bool型,缺省值为False。 + +示例 +------- +FLAGS_cudnn_deterministic=True - 选择cuDNN中的确定性函数。 + +注意 +------- +现在,在cuDNN卷积和池化Operator中启用此flag。确定性算法速度可能较慢,因此该flag通常用于调试。 + + +FLAGS_cudnn_exhaustive_search +******************************************* +(始于1.2.0) + +表示是否使用穷举搜索方法来选择卷积算法。在cuDNN中有两种搜索方法,启发式搜索和穷举搜索。穷举搜索尝试所有cuDNN算法以选择其中最快的算法。此方法非常耗时,所选择的算法将针对给定的层规格进行缓存。 一旦更改了图层规格(如batch大小,feature map大小),它将再次搜索。 + +取值范围 +--------------- +Bool型,缺省值为False。 + +示例 +------- +FLAGS_cudnn_exhaustive_search=True - 使用穷举搜索方法来选择卷积算法。 \ No newline at end of file diff --git a/doc/paddle/advanced_guide/flags/cudnn_en.rst b/doc/paddle/advanced_guide/flags/cudnn_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..4e47049057279a6de7042c6eff954f6c24dad257 --- /dev/null +++ b/doc/paddle/advanced_guide/flags/cudnn_en.rst @@ -0,0 +1,71 @@ +================== +cudnn +================== + + +FLAGS_conv_workspace_size_limit +******************************************* +(since 0.13.0) + +The workspace limit size in MB unit for choosing cuDNN convolution algorithms. The inner funciton of cuDNN obtain the fastest suited algorithm that fits within this memory limit. Usually, large workspace size may lead to choose faster algorithms, but significant increasing memory workspace. Users need to trade-off between memory and speed. + +Values accepted +--------------- +Uint64. The default value is 512. That is to say, 512MB memory workspace. + +Example +------- +FLAGS_conv_workspace_size_limit=1024 set the workspace limit size for choosing cuDNN convolution algorithms to 1024MB. + + +FLAGS_cudnn_batchnorm_spatial_persistent +******************************************* +(since 1.4.0) + +Indicates whether to use the new batch normalization mode CUDNN_BATCHNORM_SPATIAL_PERSISTENT function in batchnorm. + +Values accepted +--------------- +Bool. The default value is False. + +Example +------- +FLAGS_cudnn_batchnorm_spatial_persistent=True will enable the CUDNN_BATCHNORM_SPATIAL_PERSISTENT mode. + +Note +------- +This mode can be faster in some tasks because an optimized path will be selected for CUDNN_DATA_FLOAT and CUDNN_DATA_HALF data types. The reason we set it to False by default is that this mode may use scaled atomic integer reduction which may cause a numerical overflow for some input data range. + + +FLAGS_cudnn_deterministic +******************************************* +(since 0.13.0) + +For one operation, cuDNN has several algorithms, some algorithm results are non-deterministic, like convolution algorithms. This flag is used for debugging. It indicates whether to choose the deterministic in cuDNN. + +Values accepted +--------------- +Bool. The default value is False. + +Example +------- +FLAGS_cudnn_deterministic=True will choose the deterministic in cuDNN. + +Note +------- +Now this flag is enabled in cuDNN convolution and pooling operator. The deterministic algorithms may slower, so this flag is generally used for debugging. + + +FLAGS_cudnn_exhaustive_search +******************************************* +(since 1.2.0) + +Whether to use exhaustive search method to choose convolution algorithms. There are two search methods, heuristic search and exhaustive search in cuDNN. The exhaustive search attempts all cuDNN algorithms to choose the fastest algorithm. This method is time-consuming, the choosed algorithm will be cached for the given layer specifications. Once the layer specifications (like batch size, feature map size) are changed, it will search again. + +Values accepted +--------------- +Bool. The default value is False. + +Example +------- +FLAGS_cudnn_exhaustive_search=True will use exhaustive search method to choose convolution algorithms. diff --git a/doc/paddle/advanced_guide/flags/data_cn.rst b/doc/paddle/advanced_guide/flags/data_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..db4bd5e3cd30298d10ad9c89d0f38e2407e037b5 --- /dev/null +++ b/doc/paddle/advanced_guide/flags/data_cn.rst @@ -0,0 +1,46 @@ + +数值计算 +================== + + +FLAGS_enable_cublas_tensor_op_math +******************************************* +(始于1.2.0) + +该flag表示是否使用Tensor Core,但可能会因此降低部分精确度。 + +取值范围 +--------------- +Bool型,缺省值为False。 + +示例 +------- +FLAGS_enable_cublas_tensor_op_math=True - 使用Tensor Core。 + + +FLAGS_use_mkldnn +******************************************* +(始于0.13.0) + +在预测或训练过程中,可以通过该选项选择使用Intel MKL-DNN(https://github.com/intel/mkl-dnn)库运行。 +“用于深度神经网络的英特尔(R)数学核心库(Intel(R) MKL-DNN)”是一个用于深度学习应用程序的开源性能库。该库加速了英特尔(R)架构上的深度学习应用程序和框架。Intel MKL-DNN包含矢量化和线程化构建建块,您可以使用它们来实现具有C和C ++接口的深度神经网络(DNN)。 + +取值范围 +--------------- +Bool型,缺省值为False。 + +示例 +------- +FLAGS_use_mkldnn=True - 开启使用MKL-DNN运行。 + +注意 +------- +FLAGS_use_mkldnn仅用于python训练和预测脚本。要在CAPI中启用MKL-DNN,请设置选项 -DWITH_MKLDNN=ON。 +英特尔MKL-DNN支持英特尔64架构和兼容架构。 +该库对基于以下设备的系统进行了优化: +英特尔SSE4.1支持的英特尔凌动(R)处理器; +第4代,第5代,第6代,第7代和第8代英特尔(R)Core(TM)处理器; +英特尔(R)Xeon(R)处理器E3,E5和E7系列(原Sandy Bridge,Ivy Bridge,Haswell和Broadwell); +英特尔(R)Xeon(R)可扩展处理器(原Skylake和Cascade Lake); +英特尔(R)Xeon Phi(TM)处理器(原Knights Landing and Knights Mill); +兼容处理器。 \ No newline at end of file diff --git a/doc/paddle/advanced_guide/flags/data_en.rst b/doc/paddle/advanced_guide/flags/data_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..c156a37dd044b644078b838e0705b977cd86d43b --- /dev/null +++ b/doc/paddle/advanced_guide/flags/data_en.rst @@ -0,0 +1,45 @@ + +data processing +================== + +FLAGS_enable_cublas_tensor_op_math +******************************************* +(since 1.2.0) + +This Flag indicates whether to use Tensor Core, but it may lose some precision. + +Values accepted +--------------- +Bool. The default value is False. + +Example +------- +FLAGS_enable_cublas_tensor_op_math=True will use Tensor Core. + + +FLAGS_use_mkldnn +******************************************* +(since 0.13.0) + +Give a choice to run with Intel MKL-DNN (https://github.com/intel/mkl-dnn) library on inference or training. + +Intel(R) Math Kernel Library for Deep Neural Networks (Intel(R) MKL-DNN) is an open-source performance library for deep-learning applications. The library accelerates deep-learning applications and frameworks on Intel(R) architecture. Intel MKL-DNN contains vectorized and threaded building blocks that you can use to implement deep neural networks (DNN) with C and C++ interfaces. + +Values accepted +--------------- +Bool. The default value is False. + +Example +------- +FLAGS_use_mkldnn=True will enable running with MKL-DNN support. + +Note +------- +FLAGS_use_mkldnn is only used for python training and inference scripts. To enable MKL-DNN in CAPI, set build option -DWITH_MKLDNN=ON +Intel MKL-DNN supports Intel 64 architecture and compatible architectures. The library is optimized for the systems based on: +Intel Atom(R) processor with Intel SSE4.1 support +4th, 5th, 6th, 7th, and 8th generation Intel(R) Core(TM) processor +Intel(R) Xeon(R) processor E3, E5, and E7 family (formerly Sandy Bridge, Ivy Bridge, Haswell, and Broadwell) +Intel(R) Xeon(R) Scalable processors (formerly Skylake and Cascade Lake) +Intel(R) Xeon Phi(TM) processors (formerly Knights Landing and Knights Mill) +and compatible processors. diff --git a/doc/paddle/advanced_guide/flags/debug_cn.rst b/doc/paddle/advanced_guide/flags/debug_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..63eb1bec4b09d02ee7293889cef4e63733cb258e --- /dev/null +++ b/doc/paddle/advanced_guide/flags/debug_cn.rst @@ -0,0 +1,87 @@ + +调试 +================== + + +FLAGS_check_nan_inf +******************** +(始于0.13.0) + +用于调试。它用于检查Operator的结果是否含有Nan或Inf。 + +取值范围 +--------------- +Bool型,缺省值为False。 + +示例 +------- +FLAGS_check_nan_inf=True - 检查Operator的结果是否含有Nan或Inf。 + + +FLAGS_cpu_deterministic +******************************************* +(始于0.15.0) + +该flag用于调试。它表示是否在CPU侧确定计算结果。 在某些情况下,不同求和次序的结果可能不同,例如,`a+b+c+d` 的结果可能与 `c+a+b+d` 的结果不同。 + +取值范围 +--------------- +Bool型,缺省值为False。 + +示例 +------- +FLAGS_cpu_deterministic=True - 在CPU侧确定计算结果。 + + +FLAGS_enable_rpc_profiler +******************************************* +(始于1.0.0) + +是否启用RPC分析器。 + +取值范围 +---------------- +Bool型,缺省值为False。 + +示例 +------- +FLAGS_enable_rpc_profiler=True - 启用RPC分析器并在分析器文件中记录时间线。 + + +FLAGS_multiple_of_cupti_buffer_size +******************************************* +(始于1.4.0) + +该flag用于分析。它表示CUPTI设备缓冲区大小的倍数。如果在profiler过程中程序挂掉或者在chrome://tracing中加载timeline文件时出现异常,请尝试增大此值。 + +取值范围 +--------------- +Int32型,缺省值为1。 + +示例 +------- +FLAGS_multiple_of_cupti_buffer_size=1 - 将CUPTI设备缓冲区大小的倍数设为1。 + + +FLAGS_reader_queue_speed_test_mode +******************************************* +(始于1.1.0) + +将pyreader数据队列设置为测试模式。在测试模式下,pyreader将缓存一些数据,然后执行器将读取缓存的数据,因此阅读器不会成为瓶颈。 + +取值范围 +--------------- +Bool型,缺省值为False。 + +示例 +------- +FLAGS_reader_queue_speed_test_mode=True - 启用pyreader测试模式。 + +注意 +------- +仅当使用py_reader时该flag才有效。 + +.. toctree:: + :hidden: + + check_nan_inf_cn.md diff --git a/doc/paddle/advanced_guide/flags/debug_en.rst b/doc/paddle/advanced_guide/flags/debug_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..713e7d95482483028ce2243459259a85a8696afc --- /dev/null +++ b/doc/paddle/advanced_guide/flags/debug_en.rst @@ -0,0 +1,86 @@ + +debug +================== + +FLAGS_check_nan_inf +************************************** +(since 0.13.0) + +This Flag is used for debugging. It is used to check whether the result of the Operator has Nan or Inf. + +Values accepted +--------------- +Bool. The default value is False. + +Example +------- +FLAGS_check_nan_inf=True will check the result of Operator whether the result has Nan or Inf. + + +FLAGS_cpu_deterministic +******************************************* +(since 0.15.0) + +This Flag is used for debugging. It indicates whether to make the result of computation deterministic in CPU side. In some case, the result of the different order of summing maybe different,for example, the result of `a+b+c+d` may be different with the result of `c+a+b+d`. + +Values accepted +--------------- +Bool. The default value is False. + +Example +------- +FLAGS_cpu_deterministic=True will make the result of computation deterministic in CPU side. + + +FLAGS_enable_rpc_profiler +******************************************* +(Since 1.0.0) + +Enable RPC profiler or not. + +Values accepted +---------------- +Bool. The default value is False. + +Example +------- +FLAGS_enable_rpc_profiler=True will enable rpc profiler and record the timeline to profiler file. + + +FLAGS_multiple_of_cupti_buffer_size +******************************************* +(since 1.4.0) + +This Flag is used for profiling. It indicates the multiple of the CUPTI device buffer size. When you are profiling, if the program breaks down or bugs rise when loading timeline file in chrome://traxing, try increasing this value. + +Values accepted +--------------- +Int32. The default value is 1. + +Example +------- +FLAGS_multiple_of_cupti_buffer_size=1 set the multiple of the CUPTI device buffer size to 1. + + +FLAGS_reader_queue_speed_test_mode +******************************************* +(since 1.1.0) + +Set the pyreader data queue to test mode. In test mode, pyreader will cache some data, executor will then read the cached data, so reader will not be the bottleneck. + +Values accepted +--------------- +Bool. The default value is False. + +Example +------- +FLAGS_reader_queue_speed_test_mode=True will enable the pyreader test mode. + +Note +------- +This flag will work only when you are using py_reader. + +.. toctree:: + :hidden: + + check_nan_inf_en.md \ No newline at end of file diff --git a/doc/paddle/advanced_guide/flags/device_cn.rst b/doc/paddle/advanced_guide/flags/device_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0bed575e98c8fead62ecdc0b71f5ec9922a4fbff --- /dev/null +++ b/doc/paddle/advanced_guide/flags/device_cn.rst @@ -0,0 +1,37 @@ + +设备管理 +================== + + +FLAGS_paddle_num_threads +******************************************* +(始于0.15.0) + +控制每个paddle实例的线程数。 + +取值范围 +--------------- +Int32型,缺省值为1。 + +示例 +------- +FLAGS_paddle_num_threads=2 - 将每个实例的最大线程数设为2。 + + +FLAGS_selected_gpus +******************************************* +(始于1.3) + +设置用于训练或预测的GPU设备。 + +取值范围 +--------------- +以逗号分隔的设备ID列表,其中每个设备ID是一个非负整数,且应小于您的机器拥有的GPU设备总数。 + +示例 +------- +FLAGS_selected_gpus=0,1,2,3,4,5,6,7 - 令0-7号GPU设备用于训练和预测。 + +注意 +------- +使用该flag的原因是我们希望在GPU设备之间使用聚合通信,但通过CUDA_VISIBLE_DEVICES只能使用共享内存。 \ No newline at end of file diff --git a/doc/paddle/advanced_guide/flags/device_en.rst b/doc/paddle/advanced_guide/flags/device_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..5397ee9fc9c4bd6baba3682eaffc8bf3cd66e37c --- /dev/null +++ b/doc/paddle/advanced_guide/flags/device_en.rst @@ -0,0 +1,37 @@ + +device management +================== + + +FLAGS_paddle_num_threads +******************************************* +(since 0.15.0) + +Control the number of threads of each paddle instance. + +Values accepted +--------------- +Int32. The default value is 1. + +Example +------- +FLAGS_paddle_num_threads=2 will enable 2 threads as max number of threads for each instance. + + +FLAGS_selected_gpus +******************************************* +(since 1.3) + +Set the GPU devices used for training or inference. + +Values accepted +--------------- +A comma-separated list of device IDs, where each device ID is a nonnegative integer less than the number of GPU devices your machine have. + +Example +------- +FLAGS_selected_gpus=0,1,2,3,4,5,6,7 makes GPU devices 0-7 to be used for training or inference. + +Note +------- +The reason for using this flag is that we want to use collective communication between GPU devices, but with CUDA_VISIBLE_DEVICES can only use share-memory. \ No newline at end of file diff --git a/doc/paddle/advanced_guide/flags/distributed_cn.rst b/doc/paddle/advanced_guide/flags/distributed_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8c869ab46eb7dc79c9e6963bad8dda4c313914d8 --- /dev/null +++ b/doc/paddle/advanced_guide/flags/distributed_cn.rst @@ -0,0 +1,241 @@ + +分布式 +================== + + +FLAGS_communicator_fake_rpc +********************** +(始于1.5.0) + +当设为True时,通信器不会实际进行rpc调用,因此速度不会受到网络通信的影响。该flag用于调试。 + +取值范围 +--------------- +Bool型,缺省值为False。 + +示例 +------- +FLAGS_communicator_fake_rpc=True - 启用通信器fake模式。 + +注意 +------- +该flag仅用于paddlepaddle的开发者,普通用户不应对其设置。 + + +FLAGS_communicator_independent_recv_thread +************************************** +(始于1.5.0) + +使用独立线程以从参数服务器接收参数。 + +取值范围 +--------------- +Bool型,缺省值为True。 + +示例 +------- +FLAGS_communicator_independent_recv_thread=True - 使用独立线程以从参数服务器接收参数。 + +注意 +------- +开发者使用该flag进行框架的调试与优化,普通用户不应对其设置。 + + +FLAGS_communicator_max_merge_var_num +************************************** +(始于1.5.0) + +要通过通信器合并为一个梯度并发送的最大梯度数。训练器将所有梯度放入队列,然后通信器将从队列中取出梯度并在合并后发送。 + +取值范围 +--------------- +Int32型,缺省值为20。 + +示例 +------- +FLAGS_communicator_max_merge_var_num=16 - 将要通过通信器合并为一个梯度并发送的最大梯度数设为16。 + +注意 +------- +该flag和训练器线程数有着密切关联,缺省值应和线程数一致。 + + +FLAGS_communicator_merge_sparse_grad +******************************************* +(始于1.5.0) + +在发送之前,合并稀疏梯度。 + +取值范围 +--------------- +Bool型,缺省值true。 + +示例 +------- +FLAGS_communicator_merge_sparse_grad=true - 设置合并稀疏梯度。 + +注意 +------- +合并稀疏梯度会耗费时间。如果重复ID较多,内存占用会变少,通信会变快;如果重复ID较少,则并不会节约内存。 + + +FLAGS_communicator_min_send_grad_num_before_recv +******************************************* +(始于1.5.0) + +在通信器中,有一个发送线程向参数服务器发送梯度,一个接收线程从参数服务器接收参数,且它们之间彼此独立。该flag用于控制接收线程的频率。 仅当发送线程至少发送FLAGS_communicator_min_send_grad_num_before_recv数量的梯度时,接收线程才会从参数服务器接收参数。 + +取值范围 +--------------- +Int32型,缺省值为20。 + +示例 +------- +FLAGS_communicator_min_send_grad_num_before_recv=10 - 在接收线程从参数服务器接收参数之前,发送线程发送的梯度数为10。 + +注意 +------- +由于该flag和训练器的训练线程数强相关,而每个训练线程都会发送其梯度,所以缺省值应和线程数一致。 + + +FLAGS_communicator_send_queue_size +******************************************* +(始于1.5.0) + +每个梯度的队列大小。训练器将梯度放入队列,然后通信器将其从队列中取出并发送出去。 当通信器很慢时,队列可能会满,训练器在队列有空间之前被持续阻塞。它用于避免训练比通信快得多,以致太多的梯度没有及时发出的情况。 + +取值范围 +--------------- +Int32型,缺省值为20。 + +示例 +------- +FLAGS_communicator_send_queue_size=10 - 设置每个梯度的队列大小为10。 + +注意 +------- +该flag会影响训练速度,若队列大小过大,速度会变快但结果可能会变差。 + + +FLAGS_communicator_send_wait_times +******************************************* +(始于1.5.0) + +合并数没有达到max_merge_var_num的情况下发送线程等待的次数。 + +取值范围 +--------------- +Int32型,缺省值为5。 + +示例 +------- +FLAGS_communicator_send_wait_times=5 - 将合并数没有达到max_merge_var_num的情况下发送线程等待的次数设为5。 + + +FLAGS_communicator_thread_pool_size +******************************************* +(始于1.5.0) + +设置用于发送梯度和接收参数的线程池大小。 + +取值范围 +--------------- +Int32型,缺省值为5。 + +示例 +------- +FLAGS_communicator_thread_pool_size=10 - 设置线程池大小为10。 + +注意 +------- +大部分情况下,用户不需要设置该flag。 + + +FLAGS_dist_threadpool_size +******************************************* +(始于1.0.0) + +控制用于分布式模块的线程数。如果未设置,则将其设置为硬线程。 + +取值范围 +--------------- +Int32型,缺省值为0。 + +示例 +------- +FLAGS_dist_threadpool_size=10 - 将用于分布式模块的最大线程数设为10。 + + +FLAGS_rpc_deadline +******************************************* +(始于1.0.0) + +它控制rpc通信的deadline超时。 + +取值范围 +--------------- +Int32型,缺省值为180000,单位为ms。 + +示例 +------- +FLAGS_rpc_deadline=180000 - 将deadline超时设为3分钟。 + + +FLAGS_rpc_disable_reuse_port +******************************************* +(始于1.2.0) + +FLAGS_rpc_disable_reuse_port为True时,grpc的 GRPC_ARG_ALLOW_REUSEPORT会被设置为False以禁用SO_REUSEPORT。 + +取值范围 +--------------- +Bool型,缺省值为False。 + +示例 +------- +FLAGS_rpc_disable_reuse_port=True - 禁用SO_REUSEPORT。 + + +FLAGS_rpc_get_thread_num +******************************************* +(始于1.0.0) + +它控制用于从参数服务器获取参数的线程数。 + +取值范围 +--------------- +Int32型,缺省值为12。 + +示例 +------- +FLAGS_rpc_get_thread_num=6 - 将从参数服务器获取参数的线程数设为6。 + + +FLAGS_rpc_send_thread_num +******************************************* +(始于1.0.0) + +它控制用于发送rpc的线程数。 + +取值范围 +--------------- +Int32型,缺省值为12。 + +示例 +------- +FLAGS_rpc_send_thread_num=6 - 将用于发送的线程数设为6。 + + +FLAGS_rpc_server_profile_path +******************************************* +since(v0.15.0) + +设置分析器输出日志文件路径前缀。完整路径为FLAGS_rpc_server_profile_path_listener_id,其中listener_id为随机数。 + +取值范围 +--------------- +String型,缺省值为"./profile_ps"。 + +示例 +------- +FLAGS_rpc_server_profile_path="/tmp/pserver_profile_log" - 在"/tmp/pserver_profile_log_listener_id"中生成配置日志文件。 \ No newline at end of file diff --git a/doc/paddle/advanced_guide/flags/distributed_en.rst b/doc/paddle/advanced_guide/flags/distributed_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..d71803cc62e02b8695e2baceb64eb147011716f1 --- /dev/null +++ b/doc/paddle/advanced_guide/flags/distributed_en.rst @@ -0,0 +1,241 @@ + +distributed +================== + +FLAGS_communicator_fake_rpc +************************************** +(since 1.5.0) + +When set true, communicator will not really do rpc call, so the speed will not be affected by network communication. This flag is used for debugging purpose. + +Values accepted +--------------- +Bool. The default value is false. + +Example +------- +FLAGS_communicator_fake_rpc=True will enable communicator fake mode. + +Note +------- +This flag is only for developer of paddlepaddle, user should not set it. + + +FLAGS_communicator_independent_recv_thread +************************************** +(since 1.5.0) + +use an independent thread to receive parameter from parameter server + +Values accepted +--------------- +Bool. The default value is True. + +Example +------- +FLAGS_communicator_independent_recv_thread=True will use an independent thread to receive parameter from parameter server. + +Note +------- +This flag is for developer to debug and optimize the framework. User should not set it. + + +FLAGS_communicator_max_merge_var_num +************************************** +(since 1.5.0) + +max gradient number to merge and send as one gradient by communicator. Trainer will put all gradients into a queue, then communicator will take the gradients out from the queue and merge them before send. + +Values accepted +--------------- +Int32. The default value is 20. + +Example +------- +FLAGS_communicator_max_merge_var_num=16 will set the max gradient number to merge and send as one gradient to 16. + +Note +------- +This flag has strong relationship with trainer thread num. The default value should be the same with thread num. + + +FLAGS_communicator_merge_sparse_grad +******************************* +(since 1.5.0) + +merge sparse gradient before sending. + +Values accepted +--------------- +Bool. The default value is True. + +Example +------- +FLAGS_communicator_merge_sparse_grad=True will merge sparse gradient before sending. + +Note +------- +Merging sparse gradient would be time-consuming. If the sparse gradient has many duplicated ids, it will save memory and communication could be much faster. Otherwise it will not save memory. + + +FLAGS_communicator_min_send_grad_num_before_recv +******************************************* +(since 1.5.0) + +In communicator, there is one send thread that send gradient to parameter server and one receive thread that receive parameter from parameter server. They work independently. This flag is used to control the frequency of receive thread. Only when the send thread send at least FLAGS_communicator_min_send_grad_num_before_recv gradients will the receive thread receive parameter from parameter server. + +Values accepted +--------------- +Int32. The default value is 20. + +Example +------- +FLAGS_communicator_min_send_grad_num_before_recv=10 will set the number of gradients sent by the send thread to 10 before the receive thread receive parameter from parameter server. + +Note +------- +This flag has strong relation with the training threads of trainer. because each training thread will send it's grad. So the default value should be training thread num. + + +FLAGS_communicator_send_queue_size +******************************************* +(since 1.5.0) + +The queue size for each gradient. Trainer will put gradient into a queue, and communicator will take gradient out from the queue and then send them out. When communicator is slow, the queue may be full and then the trainer will be blocked until the queue has space. It's used to avoid the situation that training is much more faster than communication. There will be too much gradients that is not sent out in time. + +Values accepted +--------------- +Int32. The default value is 20. + +Example +------- +FLAGS_communicator_send_queue_size=10 will set the queue size for each gradient to 10. + +Note +------- +This flag will affect the training speed, if the queue size is larger, the speed may be faster, but may make the result worse. + + +FLAGS_communicator_send_wait_times +******************************************* +(since 1.5.0) + +times that send thread will wait if merge number does not reach max_merge_var_num. + +Values accepted +--------------- +Int32. The default value is 5. + +Example +------- +FLAGS_communicator_send_wait_times=5 set the times that send thread will wait if merge number does not reach max_merge_var_num to 5. + + +FLAGS_communicator_thread_pool_size +******************************************* +(since 1.5.0) + +Set the thread pool size that used to do gradient send and parameter receive. + +Values accepted +--------------- +Int32. The default value is 5. + +Example +------- +FLAGS_communicator_thread_pool_size=10 set the thread pool size to 10. + +Note +------- +Most of time user does not need to set this flag. + + +FLAGS_dist_threadpool_size +******************************************* +(Since 1.0.0) + +Control the number of thread used for distributed module. If it's not set, it will be set to hardware threads. + +Values accepted +--------------- +Int32. The default value is 0. + +Example +------- +FLAGS_dist_threadpool_size=10 will enable 10 threads as max number of thread used for distributed module. + + +FLAGS_rpc_deadline +******************************************* +(Since 1.0.0) + +It controls the deadline timeout of the rpc communication. + +Values accepted +--------------- +Int32. The default value is 180000 in ms. + +Example +------- +FLAGS_rpc_deadline=180000 will set deadline timeout to 3 minute. + + +FLAGS_rpc_disable_reuse_port +******************************************* +(since 1.2.0) + +When FLAGS_rpc_disable_reuse_port is true, the flag of grpc GRPC_ARG_ALLOW_REUSEPORT will be set to false to +disable the use of SO_REUSEPORT if it's available. + +Values accepted +--------------- +Bool. The default value is False. + +Example +------- +FLAGS_rpc_disable_reuse_port=True will disable the use of SO_REUSEPORT. + + +FLAGS_rpc_get_thread_num +******************************************* +(Since 1.0.0) + +It controls the number of threads used to get parameter from parameter server. + +Values accepted +--------------- +Int32. The default value is 12. + +Example +------- +FLAGS_rpc_get_thread_num=6 will use 6 threads to get parameter from parameter server. + + +FLAGS_rpc_send_thread_num +******************************************* +(Since 1.0.0) + +It controls the number of threads used for send rpc. + +Values accepted +--------------- +Int32. The default value is 12. + +Example +------- +FLAGS_rpc_send_thread_num=6 will set number thread used for send to 6. + + +FLAGS_rpc_server_profile_path +******************************************* +since(v0.15.0) + +Set the profiler output log file path prefix. The complete path will be FLAGS_rpc_server_profile_path_listener_id, listener_id is a random number. + +Values accepted +--------------- +String. The default value is "./profile_ps". + +Example +------- +FLAGS_rpc_server_profile_path="/tmp/pserver_profile_log" generate profile log file at "/tmp/pserver_profile_log_listener_id". diff --git a/doc/paddle/advanced_guide/flags/executor_cn.rst b/doc/paddle/advanced_guide/flags/executor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..56c3c7b04dc71029d8d4b42a04f07bf752de3181 --- /dev/null +++ b/doc/paddle/advanced_guide/flags/executor_cn.rst @@ -0,0 +1,67 @@ + +执行器 +================== + + +FLAGS_enable_parallel_graph +******************************************* +(始于1.2.0) + +该flag用于ParallelExecutor以禁用并行图执行模式。 + +取值范围 +--------------- +Bool型,缺省值为False。 + +示例 +------- +FLAGS_enable_parallel_graph=False - 通过ParallelExecutor强制禁用并行图执行模式。 + + +FLAGS_pe_profile_fname +******************************************* +(始于1.3.0) + +该flag用于ParallelExecutor的调试。ParallelExecutor会通过gpertools生成配置文件结果,并将结果存储在FLAGS_pe_profile_fname指定的文件中。仅在编译选项选择 `WITH_PRIFILER=ON` 时有效。如果禁用则设为empty。 + +取值范围 +--------------- +String型,缺省值为empty ("")。 + +示例 +------- +FLAGS_pe_profile_fname="./parallel_executor.perf" - 将配置文件结果存储在parallel_executor.perf中。 + + +FLAGS_print_sub_graph_dir +******************************************* +(始于1.2.0) + +该flag用于调试。如果程序中转换图的某些子图失去连接,则结果可能会出错。我们可以将这些断开连接的子图打印到该flag指定的文件中。如果禁用则设为empty。 + +取值范围 +--------------- +String型,缺省值为empty ("")。 + +示例 +------- +FLAGS_print_sub_graph_dir="./sub_graphs.txt" - 将断开连接的子图打印到"./sub_graphs.txt"。 + + +FLAGS_use_ngraph +******************************************* +(始于1.4.0) + +在预测或训练过程中,可以通过该选项选择使用英特尔nGraph(https://github.com/NervanaSystems/ngraph)引擎。它将在英特尔Xeon CPU上获得很大的性能提升。 + +取值范围 +--------------- +Bool型,缺省值为False。 + +示例 +------- +FLAGS_use_ngraph=True - 开启使用nGraph运行。 + +注意 +------- +英特尔nGraph目前仅在少数模型中支持。我们只验证了[ResNet-50](https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/image_classification/README_ngraph.md)的训练和预测。 \ No newline at end of file diff --git a/doc/paddle/advanced_guide/flags/executor_en.rst b/doc/paddle/advanced_guide/flags/executor_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..7a262c001639a90b86595cd2d4a5607d75f80d59 --- /dev/null +++ b/doc/paddle/advanced_guide/flags/executor_en.rst @@ -0,0 +1,67 @@ + +executor +================== + + +FLAGS_enable_parallel_graph +******************************************* +(since 1.2.0) + +This Flag is used for ParallelExecutor to disable parallel graph execution mode. + +Values accepted +--------------- +Bool. The default value is False. + +Example +------- +FLAGS_enable_parallel_graph=False will force disable parallel graph execution mode by ParallelExecutor. + + +FLAGS_pe_profile_fname +******************************************* +(since 1.3.0) + +This Flag is used for debugging for ParallelExecutor. The ParallelExecutor will generate the profile result by gperftools, and the profile result will be stored in the file which is specified by FLAGS_pe_profile_fname. Only valid when compiled `WITH_PRIFILER=ON`. Empty if disable. + +Values accepted +--------------- +String. The default value is empty (""). + +Example +------- +FLAGS_pe_profile_fname="./parallel_executor.perf" will store the profile result to parallel_executor.perf. + + +FLAGS_print_sub_graph_dir +******************************************* +(since 1.2.0) + +This Flag is used for debugging. If some subgraphs of the transformed graph from the program are disconnected, the result may be problematic. We can print these disconnected subgraphs to a file specified by the flag. Empty if disable. + +Values accepted +--------------- +String. The default value is empty (""). + +Example +------- +FLAGS_print_sub_graph_dir="./sub_graphs.txt" will print the disconnected subgraphs to "./sub_graphs.txt". + + +FLAGS_use_ngraph +******************************************* +(since 1.4.0) + +Give a choice to run with Intel nGraph(https://github.com/NervanaSystems/ngraph) engine on inference or training. This will obtain much performance boost on Intel Xeon CPU. + +Values accepted +--------------- +Bool. The default value is False. + +Example +------- +FLAGS_use_ngraph=True will enable running with nGraph support. + +Note +------- +Intel nGraph is only supported in few models yet. We have only verified [ResNet-50](https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/image_classification/README_ngraph.md) training and inference. \ No newline at end of file diff --git a/doc/paddle/advanced_guide/flags/flags_cn.rst b/doc/paddle/advanced_guide/flags/flags_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5d0d414725666c1d90f5d58c26dc4536f08f439f --- /dev/null +++ b/doc/paddle/advanced_guide/flags/flags_cn.rst @@ -0,0 +1,28 @@ + +环境变量FLAGS +================== + +调用说明 +---------- + +PaddlePaddle中的环境变量FLAGS支持两种设置方式。 + +- 通过export来设置环境变量,如 :code:`export FLAGS_eager_delete_tensor_gb = 1.0` 。 + +- 通过API::code:`get_flag` 和 :code:`set_flags` 来打印和设置环境变量FLAGS。API使用详情请参考 :ref:`cn_api_fluid_get_flags` 与 :ref:`cn_api_fluid_set_flags` 。 + + +环境变量FLAGS功能分类 +---------------------- + +.. toctree:: + :maxdepth: 1 + + cudnn_cn.rst + data_cn.rst + debug_cn.rst + device_cn.rst + distributed_cn.rst + executor_cn.rst + memory_cn.rst + others_cn.rst diff --git a/doc/paddle/advanced_guide/flags/flags_en.rst b/doc/paddle/advanced_guide/flags/flags_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..b24c551c78d7bc74a76901c717b792f78b4237e3 --- /dev/null +++ b/doc/paddle/advanced_guide/flags/flags_en.rst @@ -0,0 +1,74 @@ +================== +FLAGS +================== + +Usage +------ +These FLAGS in PaddlePaddle can be set in two ways. + +- Set the FLAGS through export. For example: :code:`export FLAGS_eager_delete_tensor_gb = 1.0` . + +- Through :code:`get_flags` and :code:`set_flags` to print and set the environment variables. For more information of using these API, please refer to :ref:`api_fluid_get_flags` and :ref:`api_fluid_get_flags` . + + +FLAGS Quick Search +------------------ + +.. toctree:: + :maxdepth: 1 + + + cudnn_en.rst + data_en.rst + debug_en.rst + device_en.rst + distributed_en.rst + executor_en.rst + memory_en.rst + others_en.rst + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/paddle/advanced_guide/flags/memory_cn.rst b/doc/paddle/advanced_guide/flags/memory_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..94676721c2d0baca9a2d744e7dbc7064c7eed279 --- /dev/null +++ b/doc/paddle/advanced_guide/flags/memory_cn.rst @@ -0,0 +1,261 @@ + +存储管理 +================== + + +FLAGS_allocator_strategy +******************** +(始于1.2) + +用于选择PaddlePaddle的分配器策略。 + +取值范围 +--------------- +String型,['naive_best_fit', 'auto_growth']中的一个。缺省值如果编译Paddle CMake时使用-DON_INFER=ON为'naive_best_fit'。 +其他默认情况为'auto_growth'。PaddlePaddle pip安装包的默认策略也是'auto_growth' + +示例 +-------- +FLAGS_allocator_strategy=naive_best_fit - 使用预分配best fit分配器,PaddlePaddle会先占用大多比例的可用内存/显存,在Paddle具体数据使用时分配,这种方式预占空间较大,但内存/显存碎片较少(比如能够支持模型的最大batch size会变大)。 + +FLAGS_allocator_strategy=auto_growth - 使用auto growth分配器。PaddlePaddle会随着真实数据需要再占用内存/显存,但内存/显存可能会产生碎片(比如能够支持模型的最大batch size会变小)。 + + +FLAGS_eager_delete_scope +******************************************* +(始于0.12.0) + +同步局域删除。设置后,它将降低GPU内存使用量,但同时也会减慢销毁变量的速度(性能损害约1%)。 + +取值范围 +--------------- +Bool型,缺省值为True。 + +示例 +------- +FLAGS_eager_delete_scope=True - 同步局域删除。 + + +FLAGS_eager_delete_tensor_gb +******************************************* +(始于1.0.0) + +表示是否使用垃圾回收策略来优化网络的内存使用。如果FLAGS_eager_delete_tensor_gb < 0,则禁用垃圾回收策略。如果FLAGS_eager_delete_tensor_gb >= 0,则启用垃圾回收策略,并在运行网络时回收内存垃圾,这有利于节省内存使用量。它仅在您使用Executor运行程序、编译程序或使用并行数据编译程序时才有用。垃圾回收器直到垃圾的内存大小达到FLAGS_eager_delete_tensor_gb GB时才会释放内存垃圾。 + +取值范围 +--------------- +Double型,单位为GB,缺省值为0.0。 + +示例 +------- +FLAGS_eager_delete_tensor_gb=0.0 - 垃圾占用大小达到0.0GB时释放内存垃圾,即一旦出现垃圾则马上释放。 + +FLAGS_eager_delete_tensor_gb=1.0 - 垃圾占用内存大小达到1.0GB时释放内存垃圾。 + +FLAGS_eager_delete_tensor_gb=-1.0 - 禁用垃圾回收策略。 + +注意 +------- +建议用户在训练大型网络时设置FLAGS_eager_delete_tensor_gb=0.0以启用垃圾回收策略。 + + +FLAGS_fast_eager_deletion_mode +******************************************* +(始于1.3) + +是否使用快速垃圾回收策略。如果未设置,则在CUDA内核结束时释放gpu内存。否则gpu内存将在CUDA内核尚未结束的情况下被释放,从而使垃圾回收策略更快。仅在启用垃圾回收策略时有效。 + +取值范围 +--------------- +Bool型,缺省值为True。 + +示例 +------- +FLAGS_fast_eager_deletion_mode=True - 启用快速垃圾回收策略。 + +FLAGS_fast_eager_deletion_mode=False - 禁用快速垃圾回收策略。 + + +FLAGS_fraction_of_cpu_memory_to_use +******************************************* +(始于1.2.0) + +表示分配的内存块占CPU总内存大小的比例。将来的内存使用将从该内存块分配。 如果内存块没有足够的cpu内存,将从cpu请求分配与内存块相同大小的新的内存块,直到cpu没有足够的内存为止。 + +取值范围 +--------------- +Double型,范围[0, 1],表示初始分配的内存块占CPU内存的比例。缺省值为1.0。 + +示例 +------- +FLAGS_fraction_of_cpu_memory_to_use=0.1 - 分配总CPU内存大小的10%作为初始CPU内存块。 + + +FLAGS_fraction_of_cuda_pinned_memory_to_use +******************************************* +(始于1.2.0) + +表示分配的CUDA Pinned内存块占CPU总内存大小的比例。将来的CUDA Pinned内存使用将从该内存块分配。 如果内存块没有足够的cpu内存,将从cpu请求分配与内存块相同大小的新的内存块,直到cpu没有足够的内存为止。 + +取值范围 +--------------- +Double型,范围[0, 1],表示初始分配的内存块占CPU内存的比例。缺省值为0.5。 + +示例 +------- +FLAGS_fraction_of_cuda_pinned_memory_to_use=0.1 - 分配总CPU内存大小的10%作为初始CUDA Pinned内存块。 + + +FLAGS_fraction_of_gpu_memory_to_use +******************************************* +(始于1.2.0) + +表示分配的显存块占GPU总可用显存大小的比例。将来的显存使用将从该显存块分配。 如果显存块没有足够的gpu显存,将从gpu请求分配与显存块同样大小的新的显存块,直到gpu没有足够的显存为止。 + +取值范围 +--------------- +Double型,范围[0, 1],表示初始分配的显存块占GPU可用显存的比例。 + +示例 +------- +FLAGS_fraction_of_gpu_memory_to_use=0.1 - 分配GPU总可用显存大小的10%作为初始GPU显存块。 + +注意 +------- +Windows系列平台会将FLAGS_fraction_of_gpu_memory_to_use默认设为0.5,Linux则会默认设为0.92。 + + +FLAGS_fuse_parameter_groups_size +******************************************* +(始于1.4.0) + +FLAGS_fuse_parameter_groups_size表示每一组中参数的个数。缺省值是一个经验性的结果。如果fuse_parameter_groups_size为1,则表示组的大小和参数梯度的数目一致。 如果fuse_parameter_groups_size为-1,则表示只有一个组。缺省值为3,这只是一个经验值。 + +取值范围 +--------------- +Int32型,缺省值为3。 + +示例 +------- +FLAGS_fuse_parameter_groups_size=3 - 将单组参数的梯度大小设为3。 + + +FLAGS_fuse_parameter_memory_size +******************************************* +(始于1.5.0) + +FLAGS_fuse_parameter_memory_size表示作为通信调用输入(例如NCCLAllReduce)的单组参数梯度的上限内存大小。默认值为-1.0,表示不根据memory_size设置组。单位是MB。 + +取值范围 +--------------- +Double型,缺省值为-1.0。 + +示例 +------- +FLAGS_fuse_parameter_memory_size=16 - 将单组参数梯度的上限大小设为16MB。 + + +FLAGS_init_allocated_mem +******************************************* +(始于0.15.0) + +是否对分配的内存进行非零值初始化。该flag用于调试,以防止某些Ops假定已分配的内存都是初始化为零的。 + +取值范围 +--------------- +Bool型,缺省值为False。 + +示例 +------- +FLAGS_init_allocated_mem=True - 对分配的内存进行非零初始化。 + +FLAGS_init_allocated_mem=False - 不会对分配的内存进行非零初始化。 + + +FLAGS_initial_cpu_memory_in_mb +******************************************* +(始于0.14.0) + +初始PaddlePaddle分配器的CPU内存块大小,单位为MB。分配器将FLAGS_initial_cpu_memory_in_mb和FLAGS_fraction_of_cpu_memory_to_use*(总物理内存)的最小值作为内存块大小。 + +取值范围 +--------------- +Uint64型,缺省值为500,单位为MB。 + +示例 +------- +FLAGS_initial_cpu_memory_in_mb=100 - 在FLAGS_fraction_of_cpu_memory_to_use*(总物理内存)大于100MB的情况下,首次提出分配请求时,分配器预先分配100MB内存,并在预分配的内存耗尽时再次分配100MB。 + + +FLAGS_initial_gpu_memory_in_mb +******************************************* +(始于1.4.0) + +预分配一块指定大小的GPU显存块。之后的显存使用将从该显存块分配。如果显存块没有足够的显存,将从GPU请求大小为FLAGS_reallocate_gpu_memory_in_mb的显存块,直到GPU没有剩余显存为止。 + +取值范围 +--------------- +Uint64型,大于0,为初始GPU显存大小,单位为MB。 + +示例 +------- +FLAGS_initial_gpu_memory_in_mb=4096 - 分配4GB作为初始GPU显存块大小。 + +注意 +------- +如果设置该flag,则FLAGS_fraction_of_gpu_memory_to_use设置的显存大小将被该flag覆盖。PaddlePaddle将用该flag指定的值分配初始GPU显存。 +如果未设置该flag,即flag默认值为0时,会关闭此显存策略。PaddlePaddle会使用FLAGS_fraction_of_gpu_memory_to_use的策略来分配初始显存块。 + + +FLAGS_memory_fraction_of_eager_deletion +******************************************* +(始于1.4) + +垃圾回收策略释放变量的内存大小百分比。如果FLAGS_memory_fraction_of_eager_deletion = 1.0,则将释放网络中的所有临时变量。如果FLAGS_memory_fraction_of_eager_deletion = 0.0,则不会释放网络中的任何临时变量。如果0.0= 0, garbage collection strategy would be enabled, and collect memory garbages when running network, which is beneficial to saving memory usage. It is only useful when you use Executor to run program, or compile program, or compile program with data parallel. Garbage collector would not release memory garbages until the memory size of garbages reaches FLAGS_eager_delete_tensor_gb GB. + +Values accepted +--------------- +Double, in GB unit. The default value is 0.0. + +Example +------- +FLAGS_eager_delete_tensor_gb=0.0 would make memory garbage release till the memory size of garbages reaches 0.0GB, i.e., release immediately once there is any garbage. + +FLAGS_eager_delete_tensor_gb=1.0 would make memory garbage release till the memory size of garbages reaches 1.0GB. + +FLAGS_eager_delete_tensor_gb=-1.0 would disable garbage collection strategy. + +Note +------- +It is recommended that users enable garbage collection strategy by setting FLAGS_eager_delete_tensor_gb=0.0 when training large network. + + +FLAGS_fast_eager_deletion_mode +******************************************* +(since 1.3) + +Whether to use fast garbage collection strategy. If not set, gpu memory would be released when CUDA kernel ends. Otherwise, gpu memory would be released without waiting CUDA kernel ends, making garbage collection strategy faster. Only valid when garbage collection strategy is enabled. + +Values accepted +--------------- +Bool. The default value is True. + +Example +------- +FLAGS_fast_eager_deletion_mode=True would turn on fast garbage collection strategy. + +FLAGS_fast_eager_deletion_mode=False would turn off fast garbage collection strategy. + +FLAGS_fraction_of_cpu_memory_to_use +******************************************* +(since 1.2.0) + +Allocate a chunk of cpu memory that is this fraction of the total cpu memory size. Future memory usage will be allocated from the chunk. If the chunk doesn't have enough cpu memory, additional chunks of the same size will be requested from cpu until the cpu has no memory left for another chunk. + +Values accepted +--------------- +Double value in range [0, 1] which is the initial CPU memory percentage. The default value is 1.0. + +Example +------- +FLAGS_fraction_of_cpu_memory_to_use=0.1 will allocate 10% total cpu memory size as initial CPU chunk. + + +FLAGS_fraction_of_cuda_pinned_memory_to_use +******************************************* +(since 1.2.0) + +Allocate a chunk of CUDA pinned memory that is this fraction of the total cpu memory size. Future memory usage will be allocated from the chunk. If the chunk doesn't have enough cpu memory, additional chunks of the same size will be requested from cpu until the cpu has no memory left for another chunk. + +Values accepted +--------------- +Double value in range [0, 1] which is the initial CUDA pinned memory percentage. The default value is 0.5. + +Example +------- +FLAGS_fraction_of_cuda_pinned_memory_to_use=0.1 will allocate 10% total cpu memory size as initial CUDA Pinned chunk. + + +FLAGS_fraction_of_gpu_memory_to_use +******************************************* +(since 1.2.0) + +Allocate a chunk of gpu memory that is this fraction of the available gpu memory size. Future memory usage will be allocated from the chunk. If the chunk doesn't have enough gpu memory, additional chunks of the same size will be requested from gpu until the gpu has no memory left for another chunk. + +Values accepted +--------------- +Double value in range [0, 1] which is the initial GPU memory percentage. + +Example +------- +FLAGS_fraction_of_gpu_memory_to_use=0.1 will allocate 10% available gpu memory size as initial GPU chunk. + +Note +------- +Windows series platform will set FLAGS_fraction_of_gpu_memory_to_use to 0.5 by default. +Linux will set FLAGS_fraction_of_gpu_memory_to_use to 0.92 by default. + + +FLAGS_fuse_parameter_groups_size +******************************************* +(since 1.4.0) + +FLAGS_fuse_parameter_groups_size is the size of one group parameters' gradient. The default value is an empirical result. If the fuse_parameter_groups_size is 1, it means that the groups' size is the number of parameters' gradient. If the fuse_parameter_groups_size is -1, it means that there is only one group. The default value is 3, it is an empirical value. + +Values accepted +--------------- +Int32. The default value is 3. + +Example +------- +FLAGS_fuse_parameter_groups_size=3 will set the size of one group parameters' gradient to 3. + + + +FLAGS_fuse_parameter_memory_size +******************************************* +(since 1.5.0) + +FLAGS_fuse_parameter_memory_size indicates the up limited memory size of one group parameters' gradient which is the input of communication calling ( e.g NCCLAllReduce). The default value is -1.0, it means that not set group according to memory_size. The unit is Megabyte. + +Values accepted +--------------- +Double. The default value is -1.0. + +Example +------- +FLAGS_fuse_parameter_memory_size=16 set the up limited memory size of one group parameters' gradient to 16 Megabytes. + + +FLAGS_init_allocated_mem +******************************************* +(since 0.15.0) + +Whether to initialize the allocated memory by some non-zero values. This flag is for debug use to prevent that some ops assumes that the memory allocated is initialized to be zero. + +Values accepted +--------------- +Bool. The default value is False. + +Example +------- +FLAGS_init_allocated_mem=True will make the allocated memory initialize as a non-zero value. + +FLAGS_init_allocated_mem=False will not initialize the allocated memory. + + +FLAGS_initial_cpu_memory_in_mb +******************************************* +(since 0.14.0) + +Initial CPU memory chunk size in MB of PaddlePaddle allocator. Allocator would take the minimal value of FLAGS_initial_cpu_memory_in_mb and FLAGS_fraction_of_cpu_memory_to_use*(total physical memory) as the memory chunk size. + +Values accepted +--------------- +Uint64. The default value is 500 with unit MB. + +Example +------- +FLAGS_initial_cpu_memory_in_mb=100, if FLAGS_fraction_of_cpu_memory_to_use*(total physical memory) > 100MB, then allocator will pre-allocate 100MB when first allocation request raises, and re-allocate 100MB again when the pre-allocated memory is exhaustive. + + +FLAGS_initial_gpu_memory_in_mb +******************************************* +(since 1.4.0) + +Allocate a chunk of GPU memory whose byte size is specified by the flag. Future memory usage will be allocated from the chunk. If the chunk doesn't have enough GPU memory, additional chunks of the GPU memory will be requested from GPU with size specified by FLAGS_reallocate_gpu_memory_in_mb until the GPU has no memory left for the additional chunk. + +Values accepted +--------------- +Uint64 value greater than 0 which is the initial GPU memory size in MB. + +Example +------- +FLAGS_initial_gpu_memory_in_mb=4096 will allocate 4 GB as initial GPU chunk. + +Note +------- +If you set this flag, the memory size set by FLAGS_fraction_of_gpu_memory_to_use will be overrided by this flag, PaddlePaddle will allocate the initial gpu memory with size specified by this flag. +If you don't set this flag, the dafault value 0 will disable this GPU memory strategy. PaddlePaddle will use FLAGS_fraction_of_gpu_memory_to_use to allocate the initial GPU chunk. + + + +FLAGS_memory_fraction_of_eager_deletion +******************************************* +(since 1.4) + +A memory size percentage when garbage collection strategy decides which variables should be released. If FLAGS_memory_fraction_of_eager_deletion=1.0, all temporary variables in the network would be released. If FLAGS_memory_fraction_of_eager_deletion=0.0, all temporary variables in the network would not be released. If 0.0`_ :介绍如何应用训练好的模型进行预测 + +.. toctree:: + :hidden: + + inference_deployment/index_cn.rst + flags/flags_cn.rst diff --git a/doc/paddle/advanced_guide/index_en.rst b/doc/paddle/advanced_guide/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..d6401bceb56b9c185ee0559aeb9aa3234436d9c8 --- /dev/null +++ b/doc/paddle/advanced_guide/index_en.rst @@ -0,0 +1,20 @@ +.. _user_guide_en_: + +#################### +Advanced User Guides +#################### + +.. todo:: + +So far you have already been familiar with PaddlePaddle. And the next expectation, read more on: + + + - `Deploy Inference Model `_ :How to deploy the trained network to perform practical inference + + +.. toctree:: + :hidden: + + inference_deployment/index_en.rst + flags/flags_en.rst + diff --git a/doc/paddle/advanced_guide/inference_deployment/index_cn.rst b/doc/paddle/advanced_guide/inference_deployment/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3a9b202b5702ec4cc6442d745a5ed209742f363c --- /dev/null +++ b/doc/paddle/advanced_guide/inference_deployment/index_cn.rst @@ -0,0 +1,16 @@ +######## +预测部署 +######## + +- `服务器端部署 `_ :介绍了如何在服务器端将模型部署上线 + +- `移动端部署 `_ :介绍了 PaddlePaddle 组织下的嵌入式平台深度学习框架Paddle-Lite + +- `模型压缩 `_ :简要介绍了PaddleSlim模型压缩工具库的特点以及使用说明。 + +.. toctree:: + :hidden: + + inference/index_cn.rst + mobile/index_cn.rst + paddleslim/paddle_slim.md diff --git a/doc/paddle/advanced_guide/inference_deployment/index_en.rst b/doc/paddle/advanced_guide/inference_deployment/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..29ac0e214bae28c61780f68bd9e6acfdc94bbb20 --- /dev/null +++ b/doc/paddle/advanced_guide/inference_deployment/index_en.rst @@ -0,0 +1,13 @@ +####################### +Deploy Inference Model +####################### + +- `Server side Deployment `_ : This section illustrates the method how to deploy and release the trained models on the servers + +- `Model Compression `_ : Introduce the features and usage of PaddleSlim which is a toolkit for model compression. + +.. toctree:: + :hidden: + + inference/index_en.rst + paddleslim/paddle_slim_en.rst diff --git a/doc/paddle/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.rst b/doc/paddle/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c1bfba460db6c12651ac6a04f823812642490c9f --- /dev/null +++ b/doc/paddle/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.rst @@ -0,0 +1,190 @@ +.. _install_or_build_cpp_inference_lib: + +安装与编译 Linux 预测库 +=========================== + +直接下载安装 +------------- + +.. csv-table:: + :header: "版本说明", "预测库(1.8.3版本)", "预测库(develop版本)" + :widths: 3, 2, 2 + + "ubuntu14.04_cpu_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" + "ubuntu14.04_cpu_avx_openblas", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" + "ubuntu14.04_cpu_noavx_openblas", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" + "ubuntu14.04_cuda9.0_cudnn7_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" + "ubuntu14.04_cuda10.0_cudnn7_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" + "ubuntu14.04_cuda10.1_cudnn7.6_avx_mkl_trt6", "`fluid_inference.tgz `_", + "nv-jetson-cuda10-cudnn7.5-trt5", "`fluid_inference.tar.gz `_", + + +从源码编译 +---------- +用户也可以从 PaddlePaddle 核心代码编译C++预测库,只需在编译时配制下面这些编译选项: + +============================ ============= ================== +选项 值 说明 +============================ ============= ================== +CMAKE_BUILD_TYPE Release 编译方式,仅使用预测库设为Release即可 +FLUID_INFERENCE_INSTALL_DIR 安装路径 预测库安装路径 +WITH_PYTHON OFF(推荐) 编译python预测库与whl包 +ON_INFER ON(推荐) 预测时使用,必须设为ON +WITH_GPU ON/OFF 编译支持GPU的预测库 +WITH_MKL ON/OFF 编译支持MKL的预测库 +WITH_MKLDNN ON/OFF 编译支持MKLDNN的预测库 +WITH_XBYAK ON 使用XBYAK编译,在jetson硬件上编译需要设置为OFF +WITH_NV_JETSON OFF 在NV Jetson硬件上编译时需要设为ON +============================ ============= ================== + +建议按照推荐值设置,以避免链接不必要的库。其它可选编译选项按需进行设定。 + +首先从github拉取最新代码 + +.. code-block:: bash + + git clone https://github.com/paddlepaddle/Paddle + cd Paddle + # 建议使用git checkout切换到Paddle稳定的版本,如: + git checkout v1.7.2 + +**note**: 如果您是多卡机器,建议安装NCCL;如果您是单卡机器则可以在编译时显示指定WITH_NCCL=OFF来跳过这一步。注意如果WITH_NCCL=ON,且没有安装NCCL,则编译会报错。 + +.. code-block:: bash + + git clone https://github.com/NVIDIA/nccl.git + cd nccl + make -j4 + make install + + +**Server端预测库源码编译** + +下面的代码片段配制编译选项并进行编译(需要将PADDLE_ROOT替换为PaddlePaddle预测库的安装路径,WITH_NCCL根据实际情况进行修改): + + .. code-block:: bash + + PADDLE_ROOT=/path/of/paddle + cd Paddle + mkdir build + cd build + cmake -DFLUID_INFERENCE_INSTALL_DIR=$PADDLE_ROOT \ + -DCMAKE_BUILD_TYPE=Release \ + -DWITH_PYTHON=OFF \ + -DWITH_MKL=OFF \ + -DWITH_GPU=OFF \ + -DON_INFER=ON \ + -DWITH_NCCL=OFF \ + .. + make + make inference_lib_dist + +**NVIDIA Jetson嵌入式硬件预测库源码编译** + +NVIDIA Jetson是NVIDIA推出的嵌入式AI平台,Paddle Inference支持在 NVIDIA Jetson平台上编译预测库。具体步骤如下: + + 1. 准备环境 + + 开启硬件性能模式 + + .. code-block:: bash + + sudo nvpmodel -m 0 && sudo jetson_clocks + + 如果硬件为Nano,增加swap空间 + + .. code-block:: bash + + #增加DDR可用空间,Xavier默认内存为16G,所以内存足够,如想在Nano上尝试,请执行如下操作。 + sudo fallocate -l 5G /var/swapfile + sudo chmod 600 /var/swapfile + sudo mkswap /var/swapfile + sudo swapon /var/swapfile + sudo bash -c 'echo "/var/swapfile swap swap defaults 0 0" >> /etc/fstab' + + 2. 编译Paddle Inference预测库 + .. code-block:: bash + + cd Paddle + mkdir build + cd build + cmake .. \ + -DWITH_CONTRIB=OFF \ + -DWITH_MKL=OFF \ + -DWITH_MKLDNN=OFF \ + -DWITH_TESTING=OFF \ + -DCMAKE_BUILD_TYPE=Release \ + -DON_INFER=ON \ + -DWITH_PYTHON=OFF \ + -DWITH_XBYAK=OFF \ + -DWITH_NV_JETSON=ON + make -j4 + # 生成预测lib + make inference_lib_dist -j4 + + 3. 样例测试 + 请参照官网样例:https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/performance_improving/inference_improving/paddle_tensorrt_infer.html#id2 + + **FAQ** + + 1. 报错: + + .. code-block:: bash + + ERROR: ../aarch64-linux-gpn/crtn.o: Too many open files. + + 则增加系统同一时间最多可开启的文件数至2048 + + .. code-block:: bash + + ulimit -n 2048 + + 2. 编译卡住 + 可能是下载第三方库较慢的原因,耐心等待或kill掉编译进程重新编译 + + 3. 使用TensorRT报错IPluginFactory或IGpuAllocator缺少虚析构函数 + 下载安装TensorRT后,在NvInfer.h文件中为class IPluginFactory和class IGpuAllocator分别添加虚析构函数: + + .. code-block:: bash + + virtual ~IPluginFactory() {}; + virtual ~IGpuAllocator() {}; + + +成功编译后,使用C++预测库所需的依赖(包括:(1)编译出的PaddlePaddle预测库和头文件;(2)第三方链接库和头文件;(3)版本信息与编译选项信息) +均会存放于PADDLE_ROOT目录中。目录结构如下: + + .. code-block:: text + + PaddleRoot/ + ├── CMakeCache.txt + ├── paddle + │   ├── include + │   │   ├── paddle_anakin_config.h + │   │   ├── paddle_analysis_config.h + │   │   ├── paddle_api.h + │   │   ├── paddle_inference_api.h + │   │   ├── paddle_mkldnn_quantizer_config.h + │   │   └── paddle_pass_builder.h + │   └── lib + │   ├── libpaddle_fluid.a + │   └── libpaddle_fluid.so + ├── third_party + │   └── install + │   ├── gflags + │   ├── glog + │   ├── mkldnn + │   ├── mklml + │   └── protobuf + └── version.txt + +version.txt 中记录了该预测库的版本信息,包括Git Commit ID、使用OpenBlas或MKL数学库、CUDA/CUDNN版本号,如: + + .. code-block:: text + + GIT COMMIT ID: 0231f58e592ad9f673ac1832d8c495c8ed65d24f + WITH_MKL: ON + WITH_MKLDNN: ON + WITH_GPU: ON + CUDA version: 10.1 + CUDNN version: v7 diff --git a/doc/paddle/advanced_guide/inference_deployment/inference/build_and_install_lib_en.rst b/doc/paddle/advanced_guide/inference_deployment/inference/build_and_install_lib_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..545aba61360b0018e3d3a1c28f4e56f4f6005925 --- /dev/null +++ b/doc/paddle/advanced_guide/inference_deployment/inference/build_and_install_lib_en.rst @@ -0,0 +1,204 @@ +.. _install_or_build_cpp_inference_lib_en: + +Install and Compile C++ Inference Library on Linux +============================================= + +Direct Download and Installation +--------------------------------- + +.. csv-table:: c++ inference library list + :header: "version description", "inference library(1.8.3 version)", "inference library(develop version)" + :widths: 3, 2, 2 + + "ubuntu14.04_cpu_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" + "ubuntu14.04_cpu_avx_openblas", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" + "ubuntu14.04_cpu_noavx_openblas", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" + "ubuntu14.04_cuda9.0_cudnn7_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" + "ubuntu14.04_cuda10.0_cudnn7_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_" + "ubuntu14.04_cuda10.1_cudnn7.6_avx_mkl_trt6", "`fluid_inference.tgz `_", + "nv-jetson-cuda10-cudnn7.5-trt5", "`fluid_inference.tar.gz `_", + +Build from Source Code +----------------------- + +Users can also compile C++ inference libraries from the PaddlePaddle core code by specifying the following compile options at compile time: + +============================ =============== ================== +Option Value Description +============================ =============== ================== +CMAKE_BUILD_TYPE Release cmake build type, set to Release if debug messages are not needed +FLUID_INFERENCE_INSTALL_DIR path install path of inference libs +WITH_PYTHON OFF(recomended) build python libs and whl package +ON_INFER ON(recomended) build with inference settings +WITH_GPU ON/OFF build inference libs on GPU +WITH_MKL ON/OFF build inference libs supporting MKL +WITH_MKLDNN ON/OFF build inference libs supporting MKLDNN +WITH_XBYAK ON build with XBYAK, must be OFF when building on NV Jetson platforms +WITH_NV_JETSON OFF build inference libs on NV Jetson platforms +============================ =============== ================== + +It is recommended to configure options according to the recommended values to avoid linking unnecessary libraries. Other options can be set if it is necessary. + + +Firstly we pull the latest code from github. + +.. code-block:: bash + + git clone https://github.com/paddlepaddle/Paddle + cd Paddle + # Use git checkout to switch to stable versions such as v1.7.2 + git checkout v1.7.2 + + +**note**: If your environment is a multi-card machine, it is recommended to install nccl; otherwise, you can skip this step by specifying WITH_NCCL = OFF during compilation. Note that if WITH_NCCL = ON, and NCCL is not installed, the compiler will report an error. + +.. code-block:: bash + + git clone https://github.com/NVIDIA/nccl.git + cd nccl + make -j4 + make install + + +**build inference libs on server** + +Following codes set the configurations and execute building(PADDLE_ROOT should be set to the actual installing path of inference libs, WITH_NCCL should be modified according to the actual environment.). + + .. code-block:: bash + + PADDLE_ROOT=/path/of/capi + git clone https://github.com/PaddlePaddle/Paddle.git + cd Paddle + mkdir build + cd build + cmake -DFLUID_INFERENCE_INSTALL_DIR=$PADDLE_ROOT \ + -DCMAKE_BUILD_TYPE=Release \ + -DWITH_PYTHON=OFF \ + -DWITH_MKL=OFF \ + -DWITH_GPU=OFF \ + -DON_INFER=ON \ + -DWITH_NCCL=OFF \ + .. + make + make inference_lib_dist + +**build inference libs on NVIDIA Jetson platforms** + +NVIDIA Jetson is an AI computing platform in embedded systems introduced by NVIDIA. Paddle Inference supports building inference libs on NVIDIA Jetson platforms. The steps are as following. + + 1. Prepare environments + + Turn on hardware performance mode + + .. code-block:: bash + + sudo nvpmodel -m 0 && sudo jetson_clocks + + if building on Nano hardwares, increase swap memory + + .. code-block:: bash + + # Increase DDR valid space. Default memory allocated is 16G, which is enough for Xavier. Following steps are for Nano hardwares. + sudo fallocate -l 5G /var/swapfile + sudo chmod 600 /var/swapfile + sudo mkswap /var/swapfile + sudo swapon /var/swapfile + sudo bash -c 'echo "/var/swapfile swap swap defaults 0 0" >> /etc/fstab' + + 2. Build paddle inference libs + + .. code-block:: bash + + cd Paddle + mkdir build + cd build + cmake .. \ + -DWITH_CONTRIB=OFF \ + -DWITH_MKL=OFF \ + -DWITH_MKLDNN=OFF \ + -DWITH_TESTING=OFF \ + -DCMAKE_BUILD_TYPE=Release \ + -DON_INFER=ON \ + -DWITH_PYTHON=OFF \ + -DWITH_XBYAK=OFF \ + -DWITH_NV_JETSON=ON + make -j4 + # Generate inference libs + make inference_lib_dist -j4 + + 3. Test with samples + Please refer to samples on https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/performance_improving/inference_improving/paddle_tensorrt_infer.html#id2 + + **FAQ** + + 1. Error: + + .. code-block:: bash + + ERROR: ../aarch64-linux-gpn/crtn.o: Too many open files. + + Fix this by increasing the number of files the system can open at the same time to 2048. + + .. code-block:: bash + + ulimit -n 2048 + + 2. The building process hangs. + Might be downloading third-party libs. Wait or kill the building process and start again. + + 3. Lacking virtual destructors for IPluginFactory or IGpuAllocator when using TensorRT. + After downloading and installing TensorRT, add virtual destructors for IPluginFactory and IGpuAllocator in NvInfer.h: + + .. code-block:: bash + + virtual ~IPluginFactory() {}; + virtual ~IGpuAllocator() {}; + + +After successful compilation, dependencies required by the C++ inference library Will be stored in the PADDLE_ROOT directory. (dependencies including: (1) compiled PaddlePaddle inference library and header files; (2) third-party link libraries and header files; (3) version information and compilation option information) + +The directory structure is: + + .. code-block:: text + + PaddleRoot/ + ├── CMakeCache.txt + ├── paddle + │ ├── include + │ │ ├── paddle_anakin_config.h + │ │ ├── paddle_analysis_config.h + │ │ ├── paddle_api.h + │ │ ├── paddle_inference_api.h + │   │   ├── paddle_mkldnn_quantizer_config.h + │ │ └── paddle_pass_builder.h + │ └── lib + │ ├── libpaddle_fluid.a + │ └── libpaddle_fluid.so + ├── third_party + │ ├── boost + │ │ └── boost + │ ├── eigen3 + │ │ ├── Eigen + │ │ └── unsupported + │ └── install + │ ├── gflags + │ ├── glog + │ ├── mkldnn + │ ├── mklml + │ ├── protobuf + │ ├── snappy + │ ├── snappystream + │ ├── xxhash + │ └── zlib + └── version.txt + +The version information of the inference library is recorded in version.txt, including Git Commit ID, version of OpenBlas, MKL math library, or CUDA/CUDNN. For example: + + .. code-block:: text + + GIT COMMIT ID: cc9028b90ef50a825a722c55e5fda4b7cd26b0d6 + WITH_MKL: ON + WITH_MKLDNN: ON + WITH_GPU: ON + CUDA version: 8.0 + CUDNN version: v7 diff --git a/doc/paddle/advanced_guide/inference_deployment/inference/c_infer_cn.md b/doc/paddle/advanced_guide/inference_deployment/inference/c_infer_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..7ac0ba24cf426ebb64a87e6ed494da19df12a31c --- /dev/null +++ b/doc/paddle/advanced_guide/inference_deployment/inference/c_infer_cn.md @@ -0,0 +1,280 @@ +# C 预测 API介绍 + +Fluid提供了高度优化的[C++预测库](./native_infer.html),为了方便使用,我们也提供了封装了C++预测库对应的C接口。C接口的使用方式,首先是需要`#include paddle_c_api.h`,头文件`paddle_c_api.h`可以在Paddle的仓库中的`paddle/fluid/inference/capi/paddle_c_api.h`找到,或是在编译Paddle的`Paddle/build/`路径下,`build/fluid_inference_c_install_dir/paddle/include/`路径下找到。此外,使用 CAPI 还需要在编译项目的时候,链接相关的编译的库`libpaddle_fluid_c.so`。下面是详细的使用说明。 + +需要说明的是,与 C++ API 不同,C API 为了兼顾多语言封装的需要,将不会再设置默认参数,即使用时,所有的参数都需要用户显式地提供。 + + +## C预测相关数据结构 + +使用C预测API与C++预测API不完全一样,C预测主要包括`PD_AnalysisConfig`, `PD_DataType`, `PD_Predictor`, `PD_Buffer`和`PD_ZeroCopyTensor`。接下来将会进一步详细地介绍这些数据结构以及使用的方法,并提供相应的示例。 + +### PD_AnalysisConfig + +`PD_AnalysisConfig`是创建预测引擎的配置,提供了模型路径设置、预测引擎运行设备选择以及多种优化预测流程的选项,主要包括以下方法 + +* `PD_AnalysisConfig* PD_NewAnalysisConfig()`: 新建一个`PD_AnalysisConfig`的指针。 +* `void PD_DeleteAnalysisConfig(PD_AnalysisConfig* config)`: 删除一个`PD_AnalysisConfig`的指针。 +* `void PD_SetModel(PD_AnalysisConfig* config, const char* model_dir, const char* params_path)`: 设置模型的路径,输入的参数包括`PD_AnalysisConfig`,`model_dir`,`params_path`,其中`model_dir`是指的是模型保存位置的路径,一般不用包括文件名,`params_path`为可选参数,注意: + - 如果不给定`params_path`,即`params_path`为`NULL`,则认为该模型的参数存储路径与`model_dir`一致,且模型文件和参数文件是按照默认的文件名存储的,此时参数文件可能有多个。此时,需要用户输入参数与模型文件的`model_dir`,即模型和参数保存的路径名,不需要指定文件名,同时,需要显式地设置`params_path`为`NULL`。 + - 如果提供了`params_path`,为了方便用户的自定义,则在指明`model_dir`路径最后需要加上模型文件的文件名传入,即`model_dir`传入对应的模型文件的路径,`params_path`传入对应的模型参数文件的路径,需要指定文件名。 +* `const char* PD_ModelDir(const PD_AnalysisConfig* config)`: 如果未指明`PD_SetModel()`的`params_path`,则可以返回模型文件夹路径。 +* `const char* PD_ProgFile(const PD_AnalysisConfig* config)`: 如果是指明`PD_SetModel()`的`params_path`,则可以返回模型文件路径。 +* `const char* PD_ParamsFile(const PD_AnalysisConfig* config)`: 如果是指明`PD_SetModel()`的`params_path`,则可以返回参数文件路径。 +* `void PD_SwitchSpecifyInputNames(PD_AnalysisConfig* config, bool x)`: 设置为`true`是指模型运算在读取输入的时候,依据名称来确定不同的输入,否则根据输入的顺序。使用`PD_ZeroCopyTensor`并且是多输入的情况,建议设置为`true`。 +* `void PD_SwitchUseFeedFetchOps(PD_AnalysisConfig* config, bool x)`: 设置是否使用`feed`,`fetch` op。在使用`PD_ZeroCopyTensor`必须设置该选项为`false`。 +* `void PD_EnableUseGpu(PD_AnalysisConfig* config, uint64_t memory_pool_init_size_mb, int device_id)`: 设置开启GPU,并且设定GPU显存(单位M)和设备的Device ID。 +* `void PD_DisableGpu(PD_AnalysisConfig* config)`: 禁用GPU。 +* `int PD_GpuDeviceId(const PD_AnalysisConfig* config)`: 返回使用的GPU设备的ID。 +* `void PD_SwitchIrOptim(PD_AnalysisConfig* config, bool x)`: 设置预测是否开启IR优化。 +* `void PD_EnableTensorRtEngine(PD_AnalysisConfig* config, int workspace_size, int max_batch_size, int min_subgraph_size, Precision precision, bool use_static, bool use_calib_mode)`: 开启TensorRT。关于参数的解释,详见[使用Paddle-TensorRT库预测](../../performance_improving/inference_improving/paddle_tensorrt_infer.html)。 +* `void PD_EnableMKLDNN(PD_AnalysisConfig* config)`: 开启MKLDNN。 + +#### 代码示例 +首先,新建一个`PD_AnalysisConfig`的指针。 +``` C +PD_AnalysisConfig* config = PD_NewAnalysisConfig(); +``` +如前文所述,设置模型和参数路径有两种形式: +* 当模型文件夹下存在一个以默认文件名保存的模型文件和多个参数文件时,传入模型文件夹路径,模型文件名默认为`__model__`,需要显式地设置`params_path`为`NULL`,不需要指定文件名。 +``` C +const char* model_dir = "./model/"; +PD_SetModel(config, model_dir, NULL); +``` +* 当模型文件夹下只有一个模型文件和一个参数文件,传入模型文件和参数文件,需要指定文件名。 +``` C +const char* model_path = "./model/model"; +const char* params_path = "./params/params"; +PD_SetModel(config, model_path, params_path); +``` + +其他预测引擎配置选项示例如下 +``` C +PD_EnableUseGpu(config, 100, 0); // 初始化100M显存,使用的gpu id为0 +PD_GpuDeviceId(config); // 返回正在使用的gpu id +PD_DisableGpu(config); // 禁用gpu +PD_SwitchIrOptim(config, true); // 开启IR优化 +PD_EnableMKLDNN(config); // 开启MKLDNN +PD_SwitchSpecifyInputNames(config, true); +PD_SwitchUseFeedFetchOps(config, false); +``` + +### PD_ZeroCopyTensor + +`PD_ZeroCopyTensor`是设置数据传入预测运算的数据结构。包括一下成员: + +* `data - (PD_Buffer)`: 设置传入数据的值。 +* `shape - (PD_Buffer)`: 设置传入数据的形状(shape)。 +* `lod - (PD_Buffer)`: 设置数据的`lod`,目前只支持一阶的`lod`。 +* `dtype - (PD_DataType)`: 设置传入数据的数据类型,用枚举`PD_DataType`表示。 +* `name - (char*)`: 设置传入数据的名称。 + +涉及使用`PD_ZeroCopyTensor`有以下方法: + +* `PD_ZeroCopyTensor* PD_NewZeroCopyTensor()`: 新创建一个`PD_ZeroCopyTensor`的指针。 +* `void PD_DeleteZeroCopyTensor(PD_ZeroCopyTensor*)`: 删除一个`PD_ZeroCopyTensor`的指针。 +* `void PD_InitZeroCopyTensor(PD_ZeroCopyTensor*)`: 使用默认初始化一个`PD_ZeroCopyTensor`的指针并分配的内存空间。 +* `void PD_DestroyZeroCopyTensor(PD_ZeroCopyTensor*)`: 删除`PD_ZeroCopyTensor`指针中,`data`,`shape`,`lod`的`PD_Buffer`的变量。 + +### PD_DataType + +`PD_DataType`是一个提供给用户的枚举,用于设定存有用户数据的`PD_ZeroCopyTensor`的数据类型。包括以下成员: + +* `PD_FLOAT32`: 32位浮点型 +* `PD_INT32`: 32位整型 +* `PD_INT64`: 64位整型 +* `PD_UINT8`: 8位无符号整型 + +#### 代码示例 +首先可以新建一个`PD_ZeroCopyTensor`。 +``` C +PD_ZeroCopyTensor input; +PD_InitZeroCopyTensor(&input); +``` +调用设置`PD_ZeroCopyTensor`的数据类型的方式如下: +``` C +input.dtype = PD_FLOAT32; +``` + +### PD_Buffer + +`PD_Buffer`可以用于设置`PD_ZeroCopyTensor`数据结构中,数据的`data`,`shape`和`lod`。包括以下成员: + +* `data`: 输入的数据,类型是`void*`,用于存储数据开始的地址。 +* `length`: 输入数据的实际的字节长度。 +* `capacity`: 为数据分配的内存大小,必定大于等于`length`。 + +### 示例代码 +``` C +PD_ZeroCopyTensor input; +PD_InitZeroCopyTensor(&input); +// 设置输入的名称 +input.name = "data"; +// 设置输入的数据大小 +input.data.capacity = sizeof(float) * 1 * 3 * 300 * 300; +input.data.length = input.data.capacity; +input.data.data = malloc(input.data.capacity); +// 设置数据的输入的形状 shape +int shape[] = {1, 3, 300, 300}; +input.shape.data = (int *)shape; +input.shape.capacity = sizeof(shape); +input.shape.length = sizeof(shape); +// 设置输入数据的类型 +input.dtype = PD_FLOAT32; +``` + +### PD_Predictor + +`PD_Predictor`是一个高性能预测引擎,该引擎通过对计算图的分析,可以完成对计算图的一系列的优化(如OP的融合、内存/显存的优化、 MKLDNN,TensorRT 等底层加速库的支持等)。主要包括一下函数: + +* `PD_Predictor* PD_NewPredictor(const PD_AnalysisConfig* config)`: 创建一个新的`PD_Predictor`的指针。 +* `void PD_DeletePredictor(PD_Predictor* predictor)`: 删除一个`PD_Predictor`的指针。 +* `int PD_GetInputNum(const PD_Predictor* predictor)`: 获取模型输入的个数。 +* `int PD_GetOutputNum(const PD_Predictor* predictor)`: 获取模型输出的个数。 +* `const char* PD_GetInputName(const PD_Predictor* predictor, int n)`: 获取模型第`n`个输入的名称。 +* `const char* PD_GetOutputName(const PD_Predictor* predictor, int n)`: 获取模型第`n`个输出的名称。 +* `void PD_SetZeroCopyInput(PD_Predictor* predictor, const PD_ZeroCopyTensor* tensor)`: 使用`PD_ZeroCopyTensor`数据结构设置模型输入的具体值、形状、lod等信息。目前只支持一阶lod。 +* `void PD_GetZeroCopyOutput(PD_Predictor* predictor, PD_ZeroCopyTensor* tensor)`: 使用`PD_ZeroCopyTensor`数据结构获取模型输出的具体值、形状、lod等信息。目前只支持一阶lod。 +* `void PD_ZeroCopyRun(PD_Predictor* predictor)`: 运行预测的引擎,完成模型由输入到输出的计算。 + +#### 代码示例 + +如前文所述,当完成网络配置`PD_AnalysisConfig`以及输入`PD_ZeroCopyTensor`的设置之后,只需要简单的几行代码就可以获得模型的输出。 + +首先完成`PD_AnalysisConfig`的设置,设置的方式与相关的函数如前文所述,这里同样给出了示例。 + +``` C +PD_AnalysisConfig* config = PD_NewAnalysisConfig(); +const char* model_dir = "./model/"; +PD_SetModel(config, model_dir, NULL); +PD_DisableGpu(config); +PD_SwitchSpecifyInputNames(config, true); // 使用PD_ZeroCopyTensor并且是多输入建议设置。 +PD_SwitchUseFeedFetchOps(config, false); // 使用PD_ZeroCopyTensor一定需要设置为false。 +``` + +其次,完成相应的输入的设置,设置的方式如前文所述,这里同样给出了示例。 + +``` C +PD_ZeroCopyTensor input; +PD_InitZeroCopyTensor(&input); +// 设置输入的名称 +input.name = (char *)(PD_GetInputName(predictor, 0)); +// 设置输入的数据大小 +input.data.capacity = sizeof(float) * 1 * 3 * 300 * 300; +input.data.length = input.data.capacity; +input.data.data = malloc(input.data.capacity); +// 设置数据的输入的形状(shape) +int shape[] = {1, 3, 300, 300}; +input.shape.data = (int *)shape; +input.shape.capacity = sizeof(shape); +input.shape.length = sizeof(shape); +// 设置输入数据的类型 +input.dtype = PD_FLOAT32; +``` + +最后,执行预测引擎,完成计算的步骤。 + +``` C +PD_Predictor *predictor = PD_NewPredictor(config); + +int input_num = PD_GetInputNum(predictor); +printf("Input num: %d\n", input_num); +int output_num = PD_GetOutputNum(predictor); +printf("Output num: %d\n", output_num); + +PD_SetZeroCopyInput(predictor, &input); // 这里只有一个输入,根据多输入情况,可以传入一个数组 + +PD_ZeroCopyRun(predictor); // 执行预测引擎 + +PD_ZeroCopyTensor output; +PD_InitZeroCopyTensor(&output); +output.name = (char *)(PD_GetOutputName(predictor, 0)); +PD_GetZeroCopyOutput(predictor, &output); +``` + +最后,可以根据前文所述的`PD_ZeroCopyTensor`的数据结构,获得返回的数据的值等信息。 + +## 完整使用示例 + +下面是使用Fluid C API进行预测的一个完整示例,使用resnet50模型 + +下载[resnet50模型](http://paddle-inference-dist.bj.bcebos.com/resnet50_model.tar.gz)并解压,运行如下代码将会调用预测引擎。 + +``` C +#include "paddle_c_api.h" +#include +#include + +/* + * The main procedures to run a predictor according to c-api: + * 1. Create config to set how to process the inference. + * 2. Prepare the input PD_ZeroCopyTensor for the inference. + * 3. Set PD_Predictor. + * 4. Call PD_ZeroCopyRun() to start. + * 5. Obtain the output. + * 6. According to the size of the PD_PaddleBuf's data's size, print all the output data. + */ +int main() { + // 配置 PD_AnalysisConfig + PD_AnalysisConfig* config = PD_NewAnalysisConfig(); + PD_DisableGpu(config); + const char* model_path = "./model/model"; + const char* params_path = "./model/params"; + PD_SetModel(config, model_path, params_path); + PD_SwitchSpecifyInputNames(config, true); + PD_SwitchUseFeedFetchOps(config, false); + + // 新建一个 PD_Predictor 的指针 + PD_Predictor *predictor = PD_NewPredictor(config); + // 获取输入输出的个数 + int input_num = PD_GetInputNum(predictor); + printf("Input num: %d\n", input_num); + int output_num = PD_GetOutputNum(predictor); + printf("Output num: %d\n", output_num); + + // 设置输入的数据结构 + PD_ZeroCopyTensor input; + PD_InitZeroCopyTensor(&input); + // 设置输入的名称 + input.name = (char *)(PD_GetInputName(predictor, 0)); + // 设置输入的数据大小 + input.data.capacity = sizeof(float) * 1 * 3 * 318 * 318; + input.data.length = input.data.capacity; + input.data.data = malloc(input.data.capacity); + memset(input.data.data, 0, (sizeof(float) * 3 * 318 * 318)); + + // 设置数据的输入的形状(shape) + int shape[] = {1, 3, 318, 318}; + input.shape.data = (int *)shape; + input.shape.capacity = sizeof(shape); + input.shape.length = sizeof(shape); + // 设置输入数据的类型 + input.dtype = PD_FLOAT32; + + PD_SetZeroCopyInput(predictor, &input); + + // 执行预测引擎 + PD_ZeroCopyRun(predictor); + + // 获取预测输出 + PD_ZeroCopyTensor output; + PD_InitZeroCopyTensor(&output); + output.name = (char *)(PD_GetOutputName(predictor, 0)); + // 获取 output 之后,可以通过该数据结构,读取到 data, shape 等信息 + PD_GetZeroCopyOutput(predictor, &output); + + float* result = (float *)(output.data.data); + int result_length = output.data.length / sizeof(float); + + return 0; +} +``` + +运行以上代码,需要将 paddle_c_api.h 拷贝到指定位置,确保编译时可以找到这个头文件。同时,需要将 libpaddle_fluid_c.so 的路径加入环境变量。 + +最后可以使用 gcc 命令编译。 + +``` shell +gcc ${SOURCE_NAME} \ + -lpaddle_fluid_c +``` diff --git a/doc/paddle/advanced_guide/inference_deployment/inference/image/image1.png b/doc/paddle/advanced_guide/inference_deployment/inference/image/image1.png new file mode 100644 index 0000000000000000000000000000000000000000..04e91da704b07fb68e2d7825e80d384bbfd5ba09 Binary files /dev/null and b/doc/paddle/advanced_guide/inference_deployment/inference/image/image1.png differ diff --git a/doc/paddle/advanced_guide/inference_deployment/inference/image/image2.png b/doc/paddle/advanced_guide/inference_deployment/inference/image/image2.png new file mode 100644 index 0000000000000000000000000000000000000000..2d4ca01ebbffaaad14a6a5eade02baaec4e732f2 Binary files /dev/null and b/doc/paddle/advanced_guide/inference_deployment/inference/image/image2.png differ diff --git a/doc/paddle/advanced_guide/inference_deployment/inference/image/image3.png b/doc/paddle/advanced_guide/inference_deployment/inference/image/image3.png new file mode 100644 index 0000000000000000000000000000000000000000..7eb8c16146175f9d28e0a216ac18788f601e600c Binary files /dev/null and b/doc/paddle/advanced_guide/inference_deployment/inference/image/image3.png differ diff --git a/doc/paddle/advanced_guide/inference_deployment/inference/image/image4.png b/doc/paddle/advanced_guide/inference_deployment/inference/image/image4.png new file mode 100644 index 0000000000000000000000000000000000000000..34a0c21880e29abb1cfbacba0ff8a1c2dde2e757 Binary files /dev/null and b/doc/paddle/advanced_guide/inference_deployment/inference/image/image4.png differ diff --git a/doc/paddle/advanced_guide/inference_deployment/inference/image/image5.png b/doc/paddle/advanced_guide/inference_deployment/inference/image/image5.png new file mode 100644 index 0000000000000000000000000000000000000000..4aa8529185854877e1b3c6bc6236fd8f9902a884 Binary files /dev/null and b/doc/paddle/advanced_guide/inference_deployment/inference/image/image5.png differ diff --git a/doc/paddle/advanced_guide/inference_deployment/inference/image/image6.png b/doc/paddle/advanced_guide/inference_deployment/inference/image/image6.png new file mode 100644 index 0000000000000000000000000000000000000000..499b1dc265d0101515183d0ff78ba6004ad82b07 Binary files /dev/null and b/doc/paddle/advanced_guide/inference_deployment/inference/image/image6.png differ diff --git a/doc/paddle/advanced_guide/inference_deployment/inference/image/image7.png b/doc/paddle/advanced_guide/inference_deployment/inference/image/image7.png new file mode 100644 index 0000000000000000000000000000000000000000..a9f40af362a6e9e507ca549ae846bb6fee28387a Binary files /dev/null and b/doc/paddle/advanced_guide/inference_deployment/inference/image/image7.png differ diff --git a/doc/paddle/advanced_guide/inference_deployment/inference/image/image8.png b/doc/paddle/advanced_guide/inference_deployment/inference/image/image8.png new file mode 100644 index 0000000000000000000000000000000000000000..6db078a7ae6efd8544ee37b4d57f21d0e111fe0c Binary files /dev/null and b/doc/paddle/advanced_guide/inference_deployment/inference/image/image8.png differ diff --git a/doc/paddle/advanced_guide/inference_deployment/inference/image/image9.png b/doc/paddle/advanced_guide/inference_deployment/inference/image/image9.png new file mode 100644 index 0000000000000000000000000000000000000000..f0dea70856a87854e20a5629093ca5ef7b9d0e51 Binary files /dev/null and b/doc/paddle/advanced_guide/inference_deployment/inference/image/image9.png differ diff --git a/doc/paddle/advanced_guide/inference_deployment/inference/image/model_graph_original.png b/doc/paddle/advanced_guide/inference_deployment/inference/image/model_graph_original.png new file mode 100644 index 0000000000000000000000000000000000000000..c1ce03d1cd77f7a8d07ccbca3964642f2faefe00 Binary files /dev/null and b/doc/paddle/advanced_guide/inference_deployment/inference/image/model_graph_original.png differ diff --git a/doc/paddle/advanced_guide/inference_deployment/inference/image/model_graph_trt.png b/doc/paddle/advanced_guide/inference_deployment/inference/image/model_graph_trt.png new file mode 100644 index 0000000000000000000000000000000000000000..6db0d35f0a9bdd7ec9376eb71f69b0ab16924181 Binary files /dev/null and b/doc/paddle/advanced_guide/inference_deployment/inference/image/model_graph_trt.png differ diff --git a/doc/paddle/advanced_guide/inference_deployment/inference/image/project_property.png b/doc/paddle/advanced_guide/inference_deployment/inference/image/project_property.png new file mode 100644 index 0000000000000000000000000000000000000000..194b20402b0b73011abbb1e78a0a5fe90e2f5aac Binary files /dev/null and b/doc/paddle/advanced_guide/inference_deployment/inference/image/project_property.png differ diff --git a/doc/paddle/advanced_guide/inference_deployment/inference/image/runtime_library.png b/doc/paddle/advanced_guide/inference_deployment/inference/image/runtime_library.png new file mode 100644 index 0000000000000000000000000000000000000000..75dcbf5fad03c8af00e5af9690267e72a571ca47 Binary files /dev/null and b/doc/paddle/advanced_guide/inference_deployment/inference/image/runtime_library.png differ diff --git a/doc/paddle/advanced_guide/inference_deployment/inference/index_cn.rst b/doc/paddle/advanced_guide/inference_deployment/inference/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..00103ec8f89c39328e28016a47e7115118dec3d6 --- /dev/null +++ b/doc/paddle/advanced_guide/inference_deployment/inference/index_cn.rst @@ -0,0 +1,15 @@ +############ +服务器端部署 +############ + +PaddlePaddle 提供了C++,C和Python的API来支持模型的部署上线。 + +.. toctree:: + :titlesonly: + + build_and_install_lib_cn.rst + windows_cpp_inference.md + native_infer.md + c_infer_cn.md + python_infer_cn.md + diff --git a/doc/paddle/advanced_guide/inference_deployment/inference/index_en.rst b/doc/paddle/advanced_guide/inference_deployment/inference/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..20e2f32f7cda12fe4513c23ec70387a5d8d6f943 --- /dev/null +++ b/doc/paddle/advanced_guide/inference_deployment/inference/index_en.rst @@ -0,0 +1,13 @@ +###################### +Server-side Deployment +###################### + +PaddlePaddle provides various methods to support deployment and release of trained models. + +.. toctree:: + :titlesonly: + + build_and_install_lib_en.rst + windows_cpp_inference_en.md + native_infer_en.md + paddle_gpu_benchmark_en.md diff --git a/doc/paddle/advanced_guide/inference_deployment/inference/native_infer.md b/doc/paddle/advanced_guide/inference_deployment/inference/native_infer.md new file mode 100644 index 0000000000000000000000000000000000000000..0c7d29ff2837b0dc54657dc45eb37250c7c90501 --- /dev/null +++ b/doc/paddle/advanced_guide/inference_deployment/inference/native_infer.md @@ -0,0 +1,243 @@ +# C++ 预测 API介绍 + +为了更简单方便地预测部署,PaddlePaddle 提供了一套高层 C++ API 预测接口。 + +下面是详细介绍。 + + +## 内容 + +- [使用AnalysisPredictor进行高性能预测](#使用AnalysisPredictor进行高性能预测) +- [使用AnalysisConfig管理预测配置](#使用AnalysisConfig管理预测配置) +- [使用ZeroCopyTensor管理输入/输出](#使用ZeroCopyTensor管理输入/输出) +- [C++预测样例编译测试](#C++预测样例编译测试) +- [性能调优](#性能调优) + + + +## 使用AnalysisPredictor进行高性能预测 +Paddle Fluid采用 AnalysisPredictor 进行预测。AnalysisPredictor 是一个高性能预测引擎,该引擎通过对计算图的分析,完成对计算图的一系列的优化(如OP的融合、内存/显存的优化、 MKLDNN,TensorRT 等底层加速库的支持等),能够大大提升预测性能。 + +为了展示完整的预测流程,下面是一个使用 AnalysisPredictor 进行预测的完整示例,其中涉及到的具体概念和配置会在后续部分展开详细介绍。 + +#### AnalysisPredictor 预测示例 + +``` c++ +#include "paddle_inference_api.h" + +namespace paddle { +void CreateConfig(AnalysisConfig* config, const std::string& model_dirname) { + // 模型从磁盘进行加载 + config->SetModel(model_dirname + "/model", + model_dirname + "/params"); + // config->SetModel(model_dirname); + // 如果模型从内存中加载,可以使用SetModelBuffer接口 + // config->SetModelBuffer(prog_buffer, prog_size, params_buffer, params_size); + config->EnableUseGpu(100 /*设定GPU初始显存池为MB*/, 0 /*设定GPU ID为0*/); //开启GPU预测 + + /* for cpu + config->DisableGpu(); + config->EnableMKLDNN(); // 开启MKLDNN加速 + config->SetCpuMathLibraryNumThreads(10); + */ + + // 使用ZeroCopyTensor,此处必须设置为false + config->SwitchUseFeedFetchOps(false); + // 若输入为多个,此处必须设置为true + config->SwitchSpecifyInputNames(true); + config->SwitchIrDebug(true); // 可视化调试选项,若开启,则会在每个图优化过程后生成dot文件 + // config->SwitchIrOptim(false); // 默认为true。如果设置为false,关闭所有优化 + // config->EnableMemoryOptim(); // 开启内存/显存复用 +} + +void RunAnalysis(int batch_size, std::string model_dirname) { + // 1. 创建AnalysisConfig + AnalysisConfig config; + CreateConfig(&config, model_dirname); + + // 2. 根据config 创建predictor,并准备输入数据,此处以全0数据为例 + auto predictor = CreatePaddlePredictor(config); + int channels = 3; + int height = 224; + int width = 224; + float input[batch_size * channels * height * width] = {0}; + + // 3. 创建输入 + // 使用了ZeroCopy接口,可以避免预测中多余的CPU copy,提升预测性能 + auto input_names = predictor->GetInputNames(); + auto input_t = predictor->GetInputTensor(input_names[0]); + input_t->Reshape({batch_size, channels, height, width}); + input_t->copy_from_cpu(input); + + // 4. 运行预测引擎 + CHECK(predictor->ZeroCopyRun()); + + // 5. 获取输出 + std::vector out_data; + auto output_names = predictor->GetOutputNames(); + auto output_t = predictor->GetOutputTensor(output_names[0]); + std::vector output_shape = output_t->shape(); + int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1, std::multiplies()); + + out_data.resize(out_num); + output_t->copy_to_cpu(out_data.data()); +} +} // namespace paddle + +int main() { + // 模型下载地址 http://paddle-inference-dist.cdn.bcebos.com/tensorrt_test/mobilenet.tar.gz + paddle::RunAnalysis(1, "./mobilenet"); + return 0; +} + + +``` + +## 使用AnalysisConfig管理预测配置 + +AnalysisConfig管理AnalysisPredictor的预测配置,提供了模型路径设置、预测引擎运行设备选择以及多种优化预测流程的选项。配置方法如下: + +#### 通用优化配置 +``` c++ +config->SwitchIrOptim(true); // 开启计算图分析优化,包括OP融合等 +config->EnableMemoryOptim(); // 开启内存/显存复用 +``` +**Note:** 使用ZeroCopyTensor必须设置: +``` c++ +config->SwitchUseFeedFetchOps(false); // 关闭feed和fetch OP使用,使用ZeroCopy接口必须设置此项 +``` + +#### 设置模型和参数路径 +从磁盘加载模型时,根据模型和参数文件存储方式不同,设置AnalysisConfig加载模型和参数的路径有两种形式: + +* 非combined形式:模型文件夹`model_dir`下存在一个模型文件和多个参数文件时,传入模型文件夹路径,模型文件名默认为`__model__`。 +``` c++ +config->SetModel("./model_dir"); +``` + +* combined形式:模型文件夹`model_dir`下只有一个模型文件`model`和一个参数文件`params`时,传入模型文件和参数文件路径。 +``` c++ +config->SetModel("./model_dir/model", "./model_dir/params"); +``` + + +#### 配置CPU预测 + +``` c++ +config->DisableGpu(); // 禁用GPU +config->EnableMKLDNN(); // 开启MKLDNN,可加速CPU预测 +config->SetCpuMathLibraryNumThreads(10); // 设置CPU Math库线程数,CPU核心数支持情况下可加速预测 +``` +#### 配置GPU预测 +``` c++ +config->EnableUseGpu(100, 0); // 初始化100M显存,使用GPU ID为0 +config->GpuDeviceId(); // 返回正在使用的GPU ID +// 开启TensorRT预测,可提升GPU预测性能,需要使用带TensorRT的预测库 +config->EnableTensorRtEngine(1 << 20 /*workspace_size*/, + batch_size /*max_batch_size*/, + 3 /*min_subgraph_size*/, + AnalysisConfig::Precision::kFloat32 /*precision*/, + false /*use_static*/, + false /*use_calib_mode*/); +``` + + +## 使用ZeroCopyTensor管理输入/输出 + +ZeroCopyTensor是AnalysisPredictor的输入/输出数据结构。ZeroCopyTensor的使用可以避免预测时候准备输入以及获取输出时多余的数据copy,提高预测性能。 + +**Note:** 使用ZeroCopyTensor,务必在创建config时设置`config->SwitchUseFeedFetchOps(false);`。 + +``` c++ +// 通过创建的AnalysisPredictor获取输入和输出的tensor +auto input_names = predictor->GetInputNames(); +auto input_t = predictor->GetInputTensor(input_names[0]); +auto output_names = predictor->GetOutputNames(); +auto output_t = predictor->GetOutputTensor(output_names[0]); + +// 对tensor进行reshape +input_t->Reshape({batch_size, channels, height, width}); + +// 通过copy_from_cpu接口,将cpu数据输入;通过copy_to_cpu接口,将输出数据copy到cpu +input_t->copy_from_cpu(input_data /*数据指针*/); +output_t->copy_to_cpu(out_data /*数据指针*/); + +// 设置LOD +std::vector> lod_data = {{0}, {0}}; +input_t->SetLoD(lod_data); + +// 获取Tensor数据指针 +float *input_d = input_t->mutable_data(PaddlePlace::kGPU); // CPU下使用PaddlePlace::kCPU +int output_size; +float *output_d = output_t->data(PaddlePlace::kGPU, &output_size); +``` + +## C++预测样例编译测试 +1. 下载或编译paddle预测库,参考[安装与编译C++预测库](./build_and_install_lib_cn.html)。 +2. 下载[预测样例](https://paddle-inference-dist.bj.bcebos.com/tensorrt_test/paddle_inference_sample_v1.7.tar.gz)并解压,进入`sample/inference`目录下。 + + `inference` 文件夹目录结构如下: + + ``` shell + inference + ├── CMakeLists.txt + ├── mobilenet_test.cc + ├── thread_mobilenet_test.cc + ├── mobilenetv1 + │ ├── model + │ └── params + ├── run.sh + └── run_impl.sh + ``` + + - `mobilenet_test.cc` 为单线程预测的C++源文件 + - `thread_mobilenet_test.cc` 为多线程预测的C++源文件 + - `mobilenetv1` 为模型文件夹 + - `run.sh` 为预测运行脚本文件 + +3. 配置编译与运行脚本 + + 编译运行预测样例之前,需要根据运行环境配置编译与运行脚本`run.sh`。`run.sh`的选项与路径配置的部分如下: + + ``` shell + # 设置是否开启MKL、GPU、TensorRT,如果要使用TensorRT,必须打开GPU + WITH_MKL=ON + WITH_GPU=OFF + USE_TENSORRT=OFF + + # 按照运行环境设置预测库路径、CUDA库路径、CUDNN库路径、TensorRT路径、模型路径 + LIB_DIR=YOUR_LIB_DIR + CUDA_LIB_DIR=YOUR_CUDA_LIB_DIR + CUDNN_LIB_DIR=YOUR_CUDNN_LIB_DIR + TENSORRT_ROOT_DIR=YOUR_TENSORRT_ROOT_DIR + MODEL_DIR=YOUR_MODEL_DIR + ``` + + 按照实际运行环境配置`run.sh`中的选项开关和所需lib路径。 + +4. 编译与运行样例 + + ``` shell + sh run.sh + ``` + +## 性能调优 +### CPU下预测 +1. 在CPU型号允许的情况下,尽量使用带AVX和MKL的版本。 +2. 可以尝试使用Intel的 MKLDNN 加速。 +3. 在CPU可用核心数足够时,可以将设置`config->SetCpuMathLibraryNumThreads(num);`中的num值调高一些。 + +### GPU下预测 +1. 可以尝试打开 TensorRT 子图加速引擎, 通过计算图分析,Paddle可以自动将计算图中部分子图融合,并调用NVIDIA的 TensorRT 来进行加速,详细内容可以参考 [使用Paddle-TensorRT库预测](../../performance_improving/inference_improving/paddle_tensorrt_infer.html)。 + +### 多线程预测 +Paddle Fluid支持通过在不同线程运行多个AnalysisPredictor的方式来优化预测性能,支持CPU和GPU环境。 + +使用多线程预测的样例详见[C++预测样例编译测试](#C++预测样例编译测试)中下载的[预测样例](https://paddle-inference-dist.bj.bcebos.com/tensorrt_test/paddle_inference_sample_v1.7.tar.gz)中的 +`thread_mobilenet_test.cc`文件。可以将`run.sh`中`mobilenet_test`替换成`thread_mobilenet_test`再执行 + +``` +sh run.sh +``` + +即可运行多线程预测样例。 diff --git a/doc/paddle/advanced_guide/inference_deployment/inference/native_infer_en.md b/doc/paddle/advanced_guide/inference_deployment/inference/native_infer_en.md new file mode 100644 index 0000000000000000000000000000000000000000..94aa40495f64307ed8d05ee7e1b0156fc2282ba8 --- /dev/null +++ b/doc/paddle/advanced_guide/inference_deployment/inference/native_infer_en.md @@ -0,0 +1,226 @@ +# Introduction to C++ Inference API + +To make the deployment of inference model more convenient, a set of high-level APIs are provided in Fluid to hide diverse optimization processes in low level. + +Details are as follows: + +## Use AnalysisPredictor to perform high-performance inference +Paddy fluid uses AnalysisPredictor to perform inference. AnalysisPredictor is a high-performance inference engine. Through the analysis of the calculation graph, the engine completes a series of optimization of the calculation graph (such as the integration of OP, the optimization of memory / graphic memory, the support of MKLDNN, TensorRT and other underlying acceleration libraries), which can greatly improve the inference performance. + +In order to show the complete inference process, the following is a complete example of using AnalysisPredictor. The specific concepts and configurations involved will be detailed in the following sections. + +#### AnalysisPredictor sample + +``` c++ +#include "paddle_inference_api.h" + +namespace paddle { +void CreateConfig(AnalysisConfig* config, const std::string& model_dirname) { + // load model from disk + config->SetModel(model_dirname + "/model", + model_dirname + "/params"); + // config->SetModel(model_dirname); + // use SetModelBuffer if load model from memory + // config->SetModelBuffer(prog_buffer, prog_size, params_buffer, params_size); + config->EnableUseGpu(100 /*init graphic memory by 100MB*/, 0 /*set GPUID to 0*/); + + /* for cpu + config->DisableGpu(); + config->EnableMKLDNN(); // enable MKLDNN + config->SetCpuMathLibraryNumThreads(10); + */ + + config->SwitchUseFeedFetchOps(false); + // set to true if there are multiple inputs + config->SwitchSpecifyInputNames(true); + config->SwitchIrDebug(true); // If the visual debugging option is enabled, a dot file will be generated after each graph optimization process + // config->SwitchIrOptim(false); // The default is true. Turn off all optimizations if set to false + // config->EnableMemoryOptim(); // Enable memory / graphic memory reuse +} + +void RunAnalysis(int batch_size, std::string model_dirname) { + // 1. create AnalysisConfig + AnalysisConfig config; + CreateConfig(&config, model_dirname); + + // 2. create predictor based on config, and prepare input data + auto predictor = CreatePaddlePredictor(config); + int channels = 3; + int height = 224; + int width = 224; + float input[batch_size * channels * height * width] = {0}; + + // 3. build inputs + // uses ZeroCopy API here to avoid extra copying from CPU, improving performance + auto input_names = predictor->GetInputNames(); + auto input_t = predictor->GetInputTensor(input_names[0]); + input_t->Reshape({batch_size, channels, height, width}); + input_t->copy_from_cpu(input); + + // 4. run inference + CHECK(predictor->ZeroCopyRun()); + + // 5. get outputs + std::vector out_data; + auto output_names = predictor->GetOutputNames(); + auto output_t = predictor->GetOutputTensor(output_names[0]); + std::vector output_shape = output_t->shape(); + int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1, std::multiplies()); + + out_data.resize(out_num); + output_t->copy_to_cpu(out_data.data()); +} +} // namespace paddle + +int main() { + // the model can be downloaded from http://paddle-inference-dist.cdn.bcebos.com/tensorrt_test/mobilenet.tar.gz + paddle::RunAnalysis(1, "./mobilenet"); + return 0; +} + +``` + +## Use AnalysisConfig to manage inference configurations + +AnalysisConfig manages the inference configuration of AnalysisPredictor, providing model path setting, inference engine running device selection, and a variety of options to optimize the inference process. The configuration method is as follows: + +#### General optimizing configuration +``` c++ +config->SwitchIrOptim(true); // Enable analysis and optimization of calculation graph,including OP fusion, etc +config->EnableMemoryOptim(); // Enable memory / graphic memory reuse +``` +**Note:** Using ZeroCopyTensor requires following setting: +``` c++ +config->SwitchUseFeedFetchOps(false); // disable feed and fetch OP +``` + +#### set model and param path +When loading the model from disk, there are two ways to set the path of AnalysisConfig to load the model and parameters according to the storage mode of the model and parameter file: + +* Non combined form: when there is a model file and multiple parameter files under the model folder 'model_dir', the path of the model folder is passed in. The default name of the model file is'__model_'. +``` c++ +config->SetModel("./model_dir"); +``` + +* Combined form: when there is only one model file 'model' and one parameter file 'params' under the model folder' model_dir ', the model file and parameter file path are passed in. +``` c++ +config->SetModel("./model_dir/model", "./model_dir/params"); +``` + +At compile time, it is proper to co-build with `libpaddle_fluid.a/.so` . + +#### Configure CPU inference + +``` c++ +config->DisableGpu(); // disable GPU +config->EnableMKLDNN(); // enable MKLDNN, accelerating CPU inference +config->SetCpuMathLibraryNumThreads(10); // set number of threads of CPU Math libs, accelerating CPU inference if CPU cores are adequate +``` +#### Configure GPU inference +``` c++ +config->EnableUseGpu(100, 0); // initialize 100M graphic memory, using GPU ID 0 +config->GpuDeviceId(); // Returns the GPU ID being used +// Turn on TRT to improve GPU performance. You need to use library with tensorrt +config->EnableTensorRtEngine(1 << 20 /*workspace_size*/, + batch_size /*max_batch_size*/, + 3 /*min_subgraph_size*/, + AnalysisConfig::Precision::kFloat32 /*precision*/, + false /*use_static*/, + false /*use_calib_mode*/); +``` +## Use ZeroCopyTensor to manage I/O + +ZeroCopyTensor is the input / output data structure of AnalysisPredictor. The use of zerocopytensor can avoid redundant data copy when preparing input and obtaining output, and improve inference performance. + +**Note:** Using zerocopytensor, be sure to set `config->SwitchUseFeedFetchOps(false);`. + +``` c++ +// get input/output tensor +auto input_names = predictor->GetInputNames(); +auto input_t = predictor->GetInputTensor(input_names[0]); +auto output_names = predictor->GetOutputNames(); +auto output_t = predictor->GetOutputTensor(output_names[0]); + +// reshape tensor +input_t->Reshape({batch_size, channels, height, width}); + +// Through the copy_from_cpu interface, the CPU data is prepared; through the copy_to_cpu interface, the output data is copied to the CPU +input_t->copy_from_cpu(input_data /*data pointer*/); +output_t->copy_to_cpu(out_data /*data pointer*/); + +// set LOD +std::vector> lod_data = {{0}, {0}}; +input_t->SetLoD(lod_data); + +// get Tensor data pointer +float *input_d = input_t->mutable_data(PaddlePlace::kGPU); // use PaddlePlace::kCPU when running inference on CPU +int output_size; +float *output_d = output_t->data(PaddlePlace::kGPU, &output_size); +``` + +## C++ inference sample +1. Download or compile C++ Inference Library, refer to [Install and Compile C++ Inference Library](./build_and_install_lib_en.html). +2. Download [C++ inference sample](https://paddle-inference-dist.bj.bcebos.com/tensorrt_test/paddle_inference_sample_v1.7.tar.gz) and uncompress it , then enter `sample/inference` directory. + + `inference` directory structure is as following: + + ``` shell + inference + ├── CMakeLists.txt + ├── mobilenet_test.cc + ├── thread_mobilenet_test.cc + ├── mobilenetv1 + │ ├── model + │ └── params + ├── run.sh + └── run_impl.sh + ``` + + - `mobilenet_test.cc` is the source code for single-thread inference. + - `thread_mobilenet_test.cc` is the source code for multi-thread inference. + - `mobilenetv1` is the model directory. + - `run.sh` is the script for running inference. + +3. Configure script: + + Before running, we need to configure script `run.sh` as following: + + ``` shell + # set whether to enable MKL, GPU or TensorRT. Enabling TensorRT requires WITH_GPU being ON + WITH_MKL=ON + WITH_GPU=OFF + USE_TENSORRT=OFF + + # set path to CUDA lib dir, CUDNN lib dir, TensorRT root dir and model dir + LIB_DIR=YOUR_LIB_DIR + CUDA_LIB_DIR=YOUR_CUDA_LIB_DIR + CUDNN_LIB_DIR=YOUR_CUDNN_LIB_DIR + TENSORRT_ROOT_DIR=YOUR_TENSORRT_ROOT_DIR + MODEL_DIR=YOUR_MODEL_DIR + ``` + + Please configure `run.sh` depending on your environment. + +4. Build and run the sample. + + ``` shell + sh run.sh + ``` + +## Performance tuning +### Tuning on CPU +1. If the CPU model allows, try to use the version with AVX and MKL. +2. You can try to use Intel's MKLDNN acceleration. +3. When the number of CPU cores available is enough, you can increase the num value in the setting `config->SetCpuMathLibraryNumThreads(num);`. + +### Tuning on GPU +1. You can try to open the TensorRT subgraph acceleration engine. Through the graph analysis, Paddle can automatically fuse certain subgraphs, and call NVIDIA's TensorRT for acceleration. For details, please refer to [Use Paddle-TensorRT Library for inference](../../performance_improving/inference_improving/paddle_tensorrt_infer_en.html)。 + +### Tuning with multi-thread +Paddle Fluid supports optimizing prediction performance by running multiple AnalysisPredictors on different threads, and supports CPU and GPU environments. + +sample of using multi-threads is `thread_mobilenet_test.cc` downloaded from [sample](https://paddle-inference-dist.bj.bcebos.com/tensorrt_test/paddle_inference_sample_v1.7.tar.gz). You can change `mobilenet_test` in `run.sh` to `thread_mobilenet_test` to run inference with multi-thread. + +``` +sh run.sh +``` diff --git a/doc/paddle/advanced_guide/inference_deployment/inference/paddle_gpu_benchmark_en.md b/doc/paddle/advanced_guide/inference_deployment/inference/paddle_gpu_benchmark_en.md new file mode 100644 index 0000000000000000000000000000000000000000..538251c033dc5fd964e0ad9326c113bedad69dc1 --- /dev/null +++ b/doc/paddle/advanced_guide/inference_deployment/inference/paddle_gpu_benchmark_en.md @@ -0,0 +1,44 @@ +# Performance Profiling for TensorRT Library + +## Test Environment +- CPU:Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz GPU:Tesla P4 +- TensorRT4.0, CUDA8.0, CUDNNV7 +- Test model ResNet50, MobileNet, ResNet101, Inception V3. + +## Test Targets +**PaddlePaddle, Pytorch, Tensorflow** + +- In test, PaddlePaddle adopts subgraph optimization to integrate TensorRT [model](https://github.com/PaddlePaddle/models/tree/develop/fluid/PaddleCV/image_classification/models) . +- Native implementation is used in Pytorch. Model [address 1](https://github.com/pytorch/vision/tree/master/torchvision/models) , [address 2](https://github.com/marvis/pytorch-mobilenet) . +- Test for TensorFlow contains test for native TF and TF—TRT. **Test for TF—TRT hasn't reached expectation wihch will be complemented later**. Model [address](https://github.com/tensorflow/models) . + + +### ResNet50 + +|batch_size|PaddlePaddle(ms)|Pytorch(ms)|TensorFlow(ms)| +|---|---|---|---| +|1|4.64117 |16.3|10.878| +|5|6.90622| 22.9 |20.62| +|10|7.9758 |40.6|34.36| + +### MobileNet +|batch_size|PaddlePaddle(ms)|Pytorch(ms)|TensorFlow(ms)| +|---|---|---|---| +|1| 1.7541 | 7.8 |2.72| +|5| 3.04666 | 7.8 |3.19| +|10|4.19478 | 14.47 |4.25| + +### ResNet101 +|batch_size|PaddlePaddle(ms)|Pytorch(ms)|TensorFlow(ms)| +|---|---|---|---| +|1|8.95767| 22.48 |18.78| +|5|12.9811 | 33.88 |34.84| +|10|14.1463| 61.97 |57.94| + + +### Inception v3 +|batch_size|PaddlePaddle(ms)|Pytorch(ms)|TensorFlow(ms)| +|---|---|---|---| +|1|15.1613 | 24.2 |19.1| +|5|18.5373 | 34.8 |27.2| +|10|19.2781| 54.8 |36.7| diff --git a/doc/paddle/advanced_guide/inference_deployment/inference/python_infer_cn.md b/doc/paddle/advanced_guide/inference_deployment/inference/python_infer_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..c947b3d8ea9eec7a034a6f2d701c38036ec518ff --- /dev/null +++ b/doc/paddle/advanced_guide/inference_deployment/inference/python_infer_cn.md @@ -0,0 +1,352 @@ +# Python 预测 API介绍 + +Fluid提供了高度优化的[C++预测库](./native_infer.html),为了方便使用,我们也提供了C++预测库对应的Python接口,下面是详细的使用说明。 + + + +## Python预测相关数据结构 + +使用Python预测API与C++预测API相似,主要包括`PaddleTensor`, `PaddleDType`, `AnalysisConfig`和`PaddlePredictor`,分别对应于C++ API中同名的类型。 + +### PaddleTensor + +class paddle.fluid.core.PaddleTensor + +`PaddleTensor`是预测库输入和输出的数据结构,包括以下字段 + +* `name`(str): 指定输入的名称 +* `shape`(tuple|list): Tensor的shape +* `data`(numpy.ndarray): Tensor的数据,可在PaddleTensor构造的时候用`numpy.ndarray`直接传入 +* `dtype`(PaddleDType): Tensor的类型 +* `lod`(List[List[int]]): [LoD](../../../user_guides/howto/basic_concept/lod_tensor.html)信息 + +`PaddleTensor`包括以下方法 + +* `as_ndarray`: 返回`data`对应的numpy数组 + +#### 代码示例 +``` python +tensor = PaddleTensor(name="tensor", data=numpy.array([1, 2, 3], dtype="int32")) +``` +调用`PaddleTensor`的成员字段和方法输出如下: +``` python +>>> tensor.name +'tensor' +>>> tensor.shape +[3] +>>> tensor.dtype +PaddleDType.INT32 +>>> tensor.lod +[] +>>> tensor.as_ndarray() +array([1, 2, 3], dtype=int32) +``` + + +### PaddleDType + +class paddle.fluid.core.PaddleTensor + +`PaddleDType`定义了`PaddleTensor`的数据类型,由传入`PaddleTensor`的numpy数组类型确定,包括以下成员 + +* `INT64`: 64位整型 +* `INT32`: 32位整型 +* `FLOAT32`: 32位浮点型 + +### AnalysisConfig + +class paddle.fluid.core.AnalysisConfig + +`AnalysisConfig`是创建预测引擎的配置,提供了模型路径设置、预测引擎运行设备选择以及多种优化预测流程的选项,主要包括以下方法 + +* `set_model`: 设置模型的路径 +* `model_dir`: 返回模型文件夹路径 +* `prog_file`: 返回模型文件路径 +* `params_file`: 返回参数文件路径 +* `enable_use_gpu`: 设置GPU显存(单位M)和Device ID +* `disable_gpu`: 禁用GPU +* `gpu_device_id`: 返回使用的GPU ID +* `switch_ir_optim`: IR优化(默认开启) +* `enable_tensorrt_engine`: 开启TensorRT +* `enable_mkldnn`: 开启MKLDNN +* `disable_glog_info`: 禁用预测中的glog日志 +* `delete_pass`: 预测的时候删除指定的pass +#### 代码示例 +设置模型和参数路径有两种形式: +* 当模型文件夹下存在一个模型文件和多个参数文件时,传入模型文件夹路径,模型文件名默认为`__model__` +``` python +config = AnalysisConfig("./model") +``` +* 当模型文件夹下只有一个模型文件和一个参数文件时,传入模型文件和参数文件路径 +``` python +config = AnalysisConfig("./model/model", "./model/params") +``` +使用`set_model`方法设置模型和参数路径方式同上 + +其他预测引擎配置选项示例如下 +``` python +config.enable_use_gpu(100, 0) # 初始化100M显存,使用gpu id为0 +config.gpu_device_id() # 返回正在使用的gpu id +config.disable_gpu() # 禁用gpu +config.switch_ir_optim(True) # 开启IR优化 +config.enable_tensorrt_engine(precision_mode=AnalysisConfig.Precision.Float32, + use_calib_mode=True) # 开启TensorRT预测,精度为fp32,开启int8离线量化 +config.enable_mkldnn() # 开启MKLDNN +``` + + + +### PaddlePredictor + +class paddle.fluid.core.PaddlePredictor + +`PaddlePredictor`是运行预测的引擎,由`paddle.fluid.core.create_paddle_predictor(config)`创建,主要提供以下方法 + +* `run`: 输入和返回值均为`PaddleTensor`列表类型,功能为运行预测引擎,返回预测结果 + +#### 代码示例 + +``` python +# 设置完AnalysisConfig后创建预测引擎PaddlePredictor +predictor = create_paddle_predictor(config) + +# 设置输入 +x = numpy.array([1, 2, 3], dtype="int64") +x_t = fluid.core.PaddleTensor(x) + +y = numpy.array([4], dtype = "int64") +y_t = fluid.core.PaddleTensor(y) + +# 运行预测引擎得到结果,返回值是一个PaddleTensor的列表 +results = predictor.run([x_t, y_t]) + +# 获得预测结果,并应用到自己的应用中 +``` + +### 使用ZeroCopyTensor管理输入/输出 + +`ZeroCopyTensor`是`AnalysisPredictor`的一种输入/输出数据结构,与`PaddleTensor`等同。`ZeroCopyTensor`相比于`PaddleTensor`,可以避免预测时候准备输入以及获取输出时多余的数据拷贝,提高预测性能。 + +注意: 需要注意的是,使用`ZeroCopyTensor`,务必在创建`config`时设置`config.switch_use_feed_fetch_ops(False)`用于显式地在模型运行的时候删去`feed`和`fetch`ops,不会影响模型的效果,但是能提升性能。 + +``` python +# 创建predictor +predictor = create_paddle_predictor(config) + +# 获取输入的名称 +input_names = predictor.get_input_names() +input_tensor = predictor.get_input_tensor(input_names[0]) + +# 设置输入 +fake_input = numpy.random.randn(1, 3, 318, 318).astype("float32") +input_tensor.copy_from_cpu(fake_input) + +# 运行predictor +predictor.zero_copy_run() + +# 获取输出 +output_names = predictor.get_output_names() +output_tensor = predictor.get_output_tensor(output_names[0]) +output_data = output_tensor.copy_to_cpu() # numpy.ndarray类型 +``` + +### AnalysisPredictor + +class paddle.fluid.core.AnalysisPredictor + +`AnalysisPredictor`是运行预测的引擎,继承于`PaddlePredictor`,同样是由`paddle.fluid.core.create_paddle_predictor(config)`创建,主要提供以下方法 + +* `zero_copy_run()`: 运行预测引擎,返回预测结果 +* `get_input_names()`: 获取输入的名称 +* `get_input_tensor(input_name: str)`: 根据输入的名称获取对应的`ZeroCopyTensor` +* `get_output_names()`: 获取输出的名称 +* `get_output_tensor(output_name: str)`: 根据输出的名称获取对应的`ZeroCopyTensor` + +#### 代码示例 + +``` python +# 设置完AnalysisConfig后创建预测引擎PaddlePredictor +predictor = create_paddle_predictor(config) + +# 获取输入的名称 +input_names = predictor.get_input_names() +input_tensor = predictor.get_input_tensor(input_names[0]) + +# 设置输入 +fake_input = numpy.random.randn(1, 3, 318, 318).astype("float32") +input_tensor.reshape([1, 3, 318, 318]) +input_tensor.copy_from_cpu(fake_input) + +# 运行predictor +predictor.zero_copy_run() + +# 获取输出 +output_names = predictor.get_output_names() +output_tensor = predictor.get_output_tensor(output_names[0]) +``` + +## 支持方法列表 +* PaddleTensor + * `as_ndarray() -> numpy.ndarray` +* ZeroCopyTensor + * `copy_from_cpu(input: numpy.ndarray) -> None` + * `copy_to_cpu() -> numpy.ndarray` + * `reshape(input: numpy.ndarray|List[int]) -> None` + * `shape() -> List[int]` + * `set_lod(input: numpy.ndarray|List[List[int]]) -> None` + * `lod() -> List[List[int]]` + * `type() -> PaddleDType` +* AnalysisConfig + * `set_model(model_dir: str) -> None` + * `set_model(prog_file: str, params_file: str) -> None` + * `model_dir() -> str` + * `prog_file() -> str` + * `params_file() -> str` + * `enable_use_gpu(memory_pool_init_size_mb: int, device_id: int) -> None` + * `gpu_device_id() -> int` + * `switch_ir_optim(x: bool = True) -> None` + * `enable_tensorrt_engine(workspace_size: int = 1 << 20, + max_batch_size: int, + min_subgraph_size: int, + precision_mode: AnalysisConfig.precision, + use_static: bool, + use_calib_mode: bool) -> None` + * `enable_mkldnn() -> None` + * `disable_glog_info() -> None` + * `delete_pass(pass_name: str) -> None` +* PaddlePredictor + * `run(input: List[PaddleTensor]) -> List[PaddleTensor]` +* AnalysisPredictor + * `zero_copy_run() -> None` + * `get_input_names() -> List[str]` + * `get_input_tensor(input_name: str) -> ZeroCopyTensor` + * `get_output_names() -> List[str]` + * `get_output_tensor(output_name: str) -> ZeroCopyTensor` + +可参考对应的[C++预测接口](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/pybind/inference_api.cc),其中定义了每个接口的参数和返回值 + +## 完整使用示例 + +下面是使用Fluid Python API进行预测的一个完整示例,使用resnet50模型 + +下载[resnet50模型](http://paddle-inference-dist.bj.bcebos.com/resnet50_model.tar.gz)并解压,运行如下命令将会调用预测引擎 + +``` bash +python resnet50_infer.py --model_file ./model/model --params_file ./model/params --batch_size 2 +``` + +`resnet50_infer.py` 的内容是 + +### PaddleTensor的完整使用示例 + +``` python +import argparse +import numpy as np + +from paddle.fluid.core import PaddleTensor +from paddle.fluid.core import AnalysisConfig +from paddle.fluid.core import create_paddle_predictor + + +def main(): + args = parse_args() + + # 设置AnalysisConfig + config = AnalysisConfig(args.model_file, args.params_file) + config.disable_gpu() + + # 创建PaddlePredictor + predictor = create_paddle_predictor(config) + + # 设置输入,此处以随机输入为例,用户可自行输入真实数据 + inputs = fake_input(args.batch_size) + + # 运行预测引擎 + outputs = predictor.run(inputs) + output_num = 512 + + # 获得输出并解析 + output = outputs[0] + print(output.name) + output_data = output.as_ndarray() #return numpy.ndarray + assert list(output_data.shape) == [args.batch_size, output_num] + for i in range(args.batch_size): + print(np.argmax(output_data[i])) + + +def fake_input(batch_size): + shape = [batch_size, 3, 318, 318] + data = np.random.randn(*shape).astype("float32") + image = PaddleTensor(data) + return [image] + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--model_file", type=str, help="model filename") + parser.add_argument("--params_file", type=str, help="parameter filename") + parser.add_argument("--batch_size", type=int, default=1, help="batch size") + + return parser.parse_args() + + +if __name__ == "__main__": + main() +``` + +### ZeroCopyTensor的完整使用示例 + +``` python +import argparse +import numpy as np +from paddle.fluid.core import AnalysisConfig +from paddle.fluid.core import create_paddle_predictor + + +def main(): + args = parse_args() + + # 设置AnalysisConfig + config = set_config(args) + + # 创建PaddlePredictor + predictor = create_paddle_predictor(config) + + # 获取输入的名称 + input_names = predictor.get_input_names() + input_tensor = predictor.get_input_tensor(input_names[0]) + + # 设置输入 + fake_input = np.random.randn(1, 3, 318, 318).astype("float32") + input_tensor.reshape([1, 3, 318, 318]) + input_tensor.copy_from_cpu(fake_input) + + # 运行predictor + predictor.zero_copy_run() + + # 获取输出 + output_names = predictor.get_output_names() + output_tensor = predictor.get_output_tensor(output_names[0]) + output_data = output_tensor.copy_to_cpu() # numpy.ndarray类型 + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--model_file", type=str, help="model filename") + parser.add_argument("--params_file", type=str, help="parameter filename") + parser.add_argument("--batch_size", type=int, default=1, help="batch size") + + return parser.parse_args() + + +def set_config(args): + config = AnalysisConfig(args.model_file, args.params_file) + config.disable_gpu() + config.switch_use_feed_fetch_ops(False) + config.switch_specify_input_names(True) + return config + + +if __name__ == "__main__": + main() +``` diff --git a/doc/paddle/advanced_guide/inference_deployment/inference/windows_cpp_inference.md b/doc/paddle/advanced_guide/inference_deployment/inference/windows_cpp_inference.md new file mode 100644 index 0000000000000000000000000000000000000000..417eaf1e182535b69596876be2ca8cfb3304f6bd --- /dev/null +++ b/doc/paddle/advanced_guide/inference_deployment/inference/windows_cpp_inference.md @@ -0,0 +1,241 @@ + +安装与编译 Windows 预测库 +=========================== + +下载安装包与对应的测试环境 +------------- + +| 版本说明 | 预测库(1.8.3版本) | 编译器 | 构建工具 | cuDNN | CUDA | +|:---------|:-------------------|:-------------------|:----------------|:--------|:-------| +| cpu_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.3/win-infer/mkl/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3| CMake v3.16.0 | +| cpu_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.3/win-infer/open/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3| CMake v3.16.0 | +| cuda9.0_cudnn7_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.3/win-infer/mkl/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.3.1 | 9.0 | +| cuda9.0_cudnn7_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.3/win-infer/open/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.3.1 | 9.0 | +| cuda10.0_cudnn7_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.3/win-infer/mkl/post107/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.4.1 | 10.0 | + +### 硬件环境 + +测试环境硬件配置: + +| CPU | I7-8700K | +|:---------|:-------------------| +| 内存 | 16G | +| 硬盘 | 1T hdd + 256G ssd | +| 显卡 | GTX1080 8G | + +测试环境操作系统使用 win10 家庭版本 + +从源码编译预测库 +-------------- +用户也可以从 PaddlePaddle 核心代码编译C++预测库,只需在编译时配制下面这些编译选项: + +|选项 |说明 | 值 | +|:-------------|:-------|:------------| +|CMAKE_BUILD_TYPE | 配置生成器上的构建类型,windows预测库目前只支持Release | Release | +|ON_INFER | 是否生成预测库,编译预测库时必须设置为ON | ON | +|WITH_GPU | 是否支持GPU | ON/OFF | +|WITH_MKL | 是否使用Intel MKL(数学核心库) | ON/OFF | +|WITH_PYTHON | 是否内嵌PYTHON解释器 | OFF(推荐) | +|MSVC_STATIC_CRT|是否使用/MT 模式进行编译,Windows默认使用 /MT 模式进行编译 |ON/OFF| +|CUDA_TOOKIT_ROOT_DIR|编译GPU预测库时,需设置CUDA的根目录|YOUR_CUDA_PATH| + +请按照推荐值设置,以避免链接不必要的库。其它可选编译选项按需进行设定。 + +更多具体编译选项含义请参见[编译选项表](../../../beginners_guide/install/Tables.html/#Compile) + +Windows下安装与编译预测库步骤:(在Windows命令提示符下执行以下指令) + +1. 将PaddlePaddle的源码clone在当下目录的Paddle文件夹中,并进入Paddle目录: + ```bash + git clone https://github.com/PaddlePaddle/Paddle.git + cd Paddle + ``` + +2. 执行cmake: + - 编译CPU预测 + ```bash + # 创建并进入build目录 + mkdir build + cd build + + cmake .. -G "Visual Studio 14 2015" -A x64 -T host=x64 -DCMAKE_BUILD_TYPE=Release -DWITH_MKL=OFF -DWITH_GPU=OFF -DON_INFER=ON -DWITH_PYTHON=OFF + + # Windows默认使用 /MT 模式进行编译,如果想使用 /MD 模式,请使用以下命令。如不清楚两者的区别,请使用上面的命令 + cmake .. -G "Visual Studio 14 2015" -A x64 -T host=x64 -DCMAKE_BUILD_TYPE=Release -DWITH_MKL=OFF -DWITH_GPU=OFF -DON_INFER=ON -DWITH_PYTHON=OFF -DMSVC_STATIC_CRT=OFF + ``` + - 编译GPU预测库: + ```bash + # -DCUDA_TOOKIT_ROOT_DIR 为cuda根目录,例如-DCUDA_TOOKIT_ROOT_DIR="D:\\cuda" + cmake .. -G "Visual Studio 14 2015" -A x64 -T host=x64 -DCMAKE_BUILD_TYPE=Release -DWITH_MKL=ON -DWITH_GPU=ON -DON_INFER=ON -DWITH_PYTHON=OFF -DCUDA_TOOKIT_ROOT_DIR=YOUR_CUDA_PATH + ``` + +3. 使用Blend for Visual Studio 2015 打开 `paddle.sln` 文件,选择平台为`x64`,配置为`Release`,编译inference_lib_dist项目。 + 操作方法:在Visual Studio中选择相应模块,右键选择"生成"(或者"build") + +编译成功后,使用C++预测库所需的依赖(包括:(1)编译出的PaddlePaddle预测库和头文件;(2)第三方链接库和头文件;(3)版本信息与编译选项信息) +均会存放于`fluid_inference_install_dir`目录中。 + +version.txt 中记录了该预测库的版本信息,包括Git Commit ID、使用OpenBlas或MKL数学库、CUDA/CUDNN版本号,如: + + + GIT COMMIT ID: cc9028b90ef50a825a722c55e5fda4b7cd26b0d6 + WITH_MKL: ON + WITH_MKLDNN: ON + WITH_GPU: ON + CUDA version: 8.0 + CUDNN version: v7 + + +编译预测demo +------------- + +### 硬件环境 + +测试环境硬件配置: + +| CPU | I7-8700K | +|:---------|:-------------------| +| 内存 | 16G | +| 硬盘 | 1T hdd + 256G ssd | +| 显卡 | GTX1080 8G | + +测试环境操作系统使用 win10 家庭版本。 + +### 软件要求 + +**请您严格按照以下步骤进行安装,否则可能会导致安装失败!** + +**安装Visual Studio 2015 update3** + +安装Visual Studio 2015,安装选项中选择安装内容时勾选自定义,选择安装全部关于c,c++,vc++的功能。 + +### 其他要求 + +1. 你需要直接下载Windows预测库或者从Paddle源码编译预测库,确保windows预测库存在。 + +2. 你需要下载Paddle源码,确保demo文件和脚本文件存在: +```bash +git clone https://github.com/PaddlePaddle/Paddle.git +``` +### 编译demo +Windows下编译预测demo步骤:(在Windows命令提示符下执行以下指令) +#### 使用脚本编译运行 + +进入到demo_ci目录,运行脚本`run_windows_demo.bat`,根据提示按需输入参数: +```dos +# path为下载Paddle的目录 +cd path\Paddle\paddle\fluid\inference\api\demo_ci +run_windows_demo.bat +``` + +其中,run_windows_demo.bat 的部分选项如下: + +```dos +gpu_inference=Y #是否使用GPU预测库,默认使用CPU预测库 +use_mkl=Y #该预测库是否使用MKL,默认为Y +use_gpu=Y #是否使用GPU进行预测,默认为N。使用GPU预测需要下载GPU版本预测库 + +paddle_inference_lib=path\fluid_inference_install_dir #设置paddle预测库的路径 +cuda_lib_dir=path\lib\x64 #设置cuda库的路径 +vcvarsall_dir=path\vc\vcvarsall.bat #设置visual studio #本机工具命令提示符路径 +``` +#### 手动编译运行 + +1. 进入demo_ci目录,创建并进入build目录 + ```dos + # path为下载Paddle的目录 + cd path\Paddle\paddle\fluid\inference\api\demo_ci + mkdir build + cd build + ``` + +2. 执行cmake(cmake可以在[官网进行下载](https://cmake.org/download/),并添加到环境变量中): + - 使用CPU预测库编译demo + ```dos + # -DDEMO_NAME 是要编译的文件 + # -DDPADDLE_LIB是预测库目录,例如-DPADDLE_LIB=D:\fluid_inference_install_dir + cmake .. -G "Visual Studio 14 2015" -A x64 -T host=x64 -DWITH_GPU=OFF -DWITH_MKL=ON -DWITH_STATIC_LIB=ON ^ + -DCMAKE_BUILD_TYPE=Release -DDEMO_NAME=simple_on_word2vec -DPADDLE_LIB=path_to_the_paddle_lib -DMSVC_STATIC_CRT=ON + ``` + - 使用GPU预测库编译demo + ```dos + # -DCUDA_LIB CUDA的库目录,例如-DCUDA_LIB=D:\cuda\lib\x64 + cmake .. -G "Visual Studio 14 2015" -A x64 -T host=x64 -DWITH_GPU=ON -DWITH_MKL=ON -DWITH_STATIC_LIB=ON ^ + -DCMAKE_BUILD_TYPE=Release -DDEMO_NAME=simple_on_word2vec -DPADDLE_LIB=path_to_the_paddle_lib -DMSVC_STATIC_CRT=ON -DCUDA_LIB=YOUR_CUDA_LIB + ``` +3. 使用Blend for Visual Studio 2015 打开 `cpp_inference_demo.sln` 文件,选择平台为`x64`,配置为`Release`,编译simple_on_word2vec项目。 + 操作方法: 在Visual Studio中选择相应模块,右键选择"生成"(或者"build") + +4. [下载模型](http://paddle-inference-dist.bj.bcebos.com/word2vec.inference.model.tar.gz)并解压到当前目录,执行命令: + ```dos + # 开启GLOG + set GLOG_v=100 + # 进行预测,path为模型解压后的目录 + Release\simple_on_word2vec.exe --dirname=path\word2vec.inference.model + ``` + +### 实现一个简单预测demo + +[完整的代码示例](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/inference/api/demo_ci/windows_mobilenet.cc) + +本示例使用了AnalysisConfig管理AnalysisPredictor的预测配置,提供了模型路径设置、预测引擎运行设备选择以及使用ZeroCopyTensor管理输入/输出的设置。具体步骤如下: + +1. 创建AnalysisConfig + ```C++ + AnalysisConfig config; + config->SwitchUseFeedFetchOps(false); // 关闭feed和fetch OP使用,使用ZeroCopy接口必须设置此项 + // config->EnableUseGpu(100 /*设定GPU初始显存池为MB*/, 0 /*设定GPU ID为0*/); //开启GPU预测 + ``` + +2. 在config中设置模型和参数路径 + + 从磁盘加载模型时,根据模型和参数文件存储方式不同,设置AnalysisConfig加载模型和参数的路径有两种形式,此处使用combined形式: + - 非combined形式:模型文件夹`model_dir`下存在一个模型文件和多个参数文件时,传入模型文件夹路径,模型文件名默认为`__model__`。 + ``` c++ + config->SetModel("path\\model_dir\\__model__") + ``` + - combined形式:模型文件夹`model_dir`下只有一个模型文件`__model__`和一个参数文件`__params__`时,传入模型文件和参数文件路径。 + ```C++ + config->SetModel("path\\model_dir\\__model__", "path\\model_dir\\__params__"); + ``` +3. 创建predictor,准备输入数据 + ```C++ + std::unique_ptr predictor = CreatePaddlePredictor(config); + int batch_size = 1; + int channels = 3; // channels,height,width三个参数必须与模型中对应输入的shape一致 + int height = 300; + int width = 300; + int nums = batch_size * channels * height * width; + + float* input = new float[nums]; + for (int i = 0; i < nums; ++i) input[i] = 0; + ``` +4. 使用ZeroCopyTensor管理输入 + ```C++ + // 通过创建的AnalysisPredictor获取输入Tensor,该Tensor为ZeroCopyTensor + auto input_names = predictor->GetInputNames(); + auto input_t = predictor->GetInputTensor(input_names[0]); + + // 对Tensor进行reshape,将准备好的输入数据从CPU拷贝到ZeroCopyTensor中 + input_t->Reshape({batch_size, channels, height, width}); + input_t->copy_from_cpu(input); + ``` + +5. 运行预测引擎 + ```C++ + predictor->ZeroCopyRun(); + ``` + +6. 使用ZeroCopyTensor管理输出 + ```C++ + auto output_names = predictor->GetOutputNames(); + auto output_t = predictor->GetOutputTensor(output_names[0]); + std::vector output_shape = output_t->shape(); + int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1, + std::multiplies()); + + out_data.resize(out_num); + output_t->copy_to_cpu(out_data.data()); // 将ZeroCopyTensor中数据拷贝到cpu中,得到输出数据 + delete[] input; + ``` +**Note:** 关于AnalysisPredictor的更多介绍,请参考[C++预测API介绍](./native_infer.html) diff --git a/doc/paddle/advanced_guide/inference_deployment/inference/windows_cpp_inference_en.md b/doc/paddle/advanced_guide/inference_deployment/inference/windows_cpp_inference_en.md new file mode 100644 index 0000000000000000000000000000000000000000..e25ae184810153421013c60c96c9533b00261ae0 --- /dev/null +++ b/doc/paddle/advanced_guide/inference_deployment/inference/windows_cpp_inference_en.md @@ -0,0 +1,248 @@ + +Install and Compile C++ Inference Library on Windows +=========================== + +Direct Download and Install +------------- + +| Version | Inference Libraries(v1.8.3) | Compiler | Build tools | cuDNN | CUDA | +|:---------|:-------------------|:-------------------|:----------------|:--------|:-------| +| cpu_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.3/win-infer/mkl/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3| CMake v3.16.0 | +| cpu_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.3/win-infer/open/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3| CMake v3.16.0 | +| cuda9.0_cudnn7_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.3/win-infer/mkl/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.3.1 | 9.0 | +| cuda9.0_cudnn7_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.3/win-infer/open/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.3.1 | 9.0 | +| cuda10.0_cudnn7_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.3/win-infer/mkl/post107/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.4.1 | 10.0 | + +### Hardware Environment + +Hardware Configuration of the experimental environment: + +| CPU | I7-8700K | +|:--------------|:-------------------| +| Memory | 16G | +| Hard Disk | 1T hdd + 256G ssd | +| Graphics Card | GTX1080 8G | + +The operating system is win10 family version in the experimental environment. + +Build From Source Code +-------------- + +Users can also compile C++ inference libraries from the PaddlePaddle core code by specifying the following compile options at compile time: + +|Option | Description | Value | +|:-------------|:-----|:--------------| +|CMAKE_BUILD_TYPE|Specifies the build type on single-configuration generators, Windows inference library currently only supports Release| Release | +|ON_INFER|Whether to generate the inference library. Must be set to ON when compiling the inference library. | ON | +|WITH_GPU|Whether to support GPU | ON/OFF | +|WITH_MKL|Whether to support MKL | ON/OFF | +|WITH_PYTHON|Whether the PYTHON interpreter is embedded | OFF | +|MSVC_STATIC_CRT|Whether to compile with / MT mode | ON | +|CUDA_TOOKIT_ROOT_DIR | When compiling the GPU inference library, you need to set the CUDA root directory | YOUR_CUDA_PATH | + +For details on the compilation options, see [the compilation options list](../../../beginners_guide/install/Tables_en.html/#Compile) + +**Paddle Windows Inference Library Compilation Steps** + +1. Clone Paddle source code from GitHub: + ```bash + git clone https://github.com/PaddlePaddle/Paddle.git + cd Paddle + ``` + +2. Run Cmake command + + - compile CPU inference library + ```bash + # create build directory + mkdir build + + # change to the build directory + cd build + cmake .. -G "Visual Studio 14 2015" -A x64 -T host=x64 -DCMAKE_BUILD_TYPE=Release -DWITH_MKL=OFF -DWITH_GPU=OFF -DON_INFER=ON -DWITH_PYTHON=OFF + # use -DWITH_MKL to select math library: Intel MKL or OpenBLAS + + # By default on Windows we use /MT for C Runtime Library, If you want to use /MD, please use the below command + # If you have no ideas the differences between the two, use the above one + cmake .. -G "Visual Studio 14 2015" -A x64 -T host=x64 -DCMAKE_BUILD_TYPE=Release -DWITH_MKL=OFF -DWITH_GPU=OFF -DON_INFER=ON -DWITH_PYTHON=OFF -DMSVC_STATIC_CRT=OFF + ``` + - compile GPU inference library + ```bash + # -DCUDA_TOOKIT_ROOT_DIR is cuda root directory, such as -DCUDA_TOOKIT_ROOT_DIR="D:\\cuda" + cmake .. -G "Visual Studio 14 2015" -A x64 -T host=x64 -DCMAKE_BUILD_TYPE=Release -DWITH_MKL=ON -DWITH_GPU=ON -DON_INFER=ON -DWITH_PYTHON=OFF -DCUDA_TOOKIT_ROOT_DIR=YOUR_CUDA_PATH + ``` + +3. Open the `paddle.sln` using VisualStudio 2015, choose the`x64` for Slution Platforms, and `Release` for Solution Configurations, then build the `inference_lib_dist` project in the Solution Explorer(Rigth click the project and click Build). + +The inference library will be installed in `fluid_inference_install_dir`. + +version.txt constains the detailed configurations about the library, including git commit ID、math library, CUDA, CUDNN versions: + + + GIT COMMIT ID: cc9028b90ef50a825a722c55e5fda4b7cd26b0d6 + WITH_MKL: ON + WITH_MKLDNN: ON + WITH_GPU: ON + CUDA version: 8.0 + CUDNN version: v7 + + +Inference Demo Compilation +------------------- + +### Hardware Environment + +Hardware Configuration of the experimental environment: + +| CPU | I7-8700K | +|:--------------|:-------------------| +| Memory | 16G | +| Hard Disk | 1T hdd + 256G ssd | +| Graphics Card | GTX1080 8G | + +The operating system is win10 family version in the experimental environment. + +### Steps to Configure Environment + +**Please strictly follow the subsequent steps to install, otherwise the installation may fail** + +**Install Visual Studio 2015 update3** + +Install Visual Studio 2015. Please choose "customize" for the options of contents to be installed and choose to install all functions relevant to c, c++ and vc++. + +### Other requirements + +1. You need to download the Windows inference library or compile the inference library from Paddle source code. + +2. You need to run the command to get the Paddle source code. +```bash +git clone https://github.com/PaddlePaddle/Paddle.git +``` + +### Usage of Inference demo + +#### Compile with script + +Open the windows command line and run the `run_windows_demo.bat`, and input parameters as required according to the prompts. +```dos +# Path is the directory of Paddle you downloaded. +cd path\Paddle\paddle\fluid\inference\api\demo_ci +run_windows_demo.bat +``` +Some options of the script are as follows: + +```dos +gpu_inference=Y # Use gpu_inference_lib or not(Y/N), default: N. +use_mkl=Y # Use MKL or not(Y/N), default: Y. +use_gpu=Y # Whether to use GPU for prediction, defalut: N. + +paddle_inference_lib=path\fluid_inference_install_dir # Set the path of paddle inference library. +cuda_lib_dir=path\lib\x64 # Set the path of cuda library. +vcvarsall_dir=path\vc\vcvarsall.bat # Set the path of visual studio command prompt. +``` + +#### Compile manually + +1. Create and change to the build directory + ```dos + # path is the directory where Paddle is downloaded + cd path\Paddle\paddle\fluid\inference\api\demo_ci + mkdir build + cd build + ``` +2. Run Cmake command, cmake can be [downloaded at official site](https://cmake.org/download/) and added to environment variables. + - compile inference demo with CPU inference library + ```dos + # Path is the directory where you downloaded paddle. + # -DDEMO_NAME is the file to be built + # DPADDLE_LIB is the path of fluid_install_dir, for example: DPADDLE_LIB=D:\fluid_install_dir + + cmake .. -G "Visual Studio 14 2015" -A x64 -T host=x64 -DWITH_GPU=OFF -DWITH_MKL=OFF -DWITH_STATIC_LIB=ON -DCMAKE_BUILD_TYPE=Release -DDEMO_NAME=simple_on_word2vec -DPADDLE_LIB=path_to_the_paddle_lib -DMSVC_STATIC_CRT=ON + ``` + - compile inference demo with GPU inference library + ```dos + cmake .. -G "Visual Studio 14 2015" -A x64 -T host=x64 -DWITH_GPU=ON -DWITH_MKL=ON -DWITH_STATIC_LIB=ON ^ + -DCMAKE_BUILD_TYPE=Release -DDEMO_NAME=simple_on_word2vec -DPADDLE_LIB=path_to_the_paddle_lib -DMSVC_STATIC_CRT=ON -DCUDA_LIB=YOUR_CUDA_LIB + ``` +3. Open the `cpp_inference_demo.sln` using VisualStudio 2015, choose the`x64` for Slution Platforms, and `Release` for Solution Configurations, then build the `simple_on_word2vec` project in the Solution Explorer(Rigth click the project and click Build). + + In the dependent packages provided, please copy openblas and model files under Release directory to the directory of Release built and generated. + +

+ +

+ +4. [Download model](http://paddle-inference-dist.bj.bcebos.com/word2vec.inference.model.tar.gz) and decompress it to the current directory. Run the command: + ```dos + # Open GLOG + set GLOG_v=100 + + # Start inference, path is the directory where you decompres model + Release\simple_on_word2vec.exe --dirname=path\word2vec.inference.model + ``` + +### Implementing a simple inference demo + +[Complete code example](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/inference/api/demo_ci/windows_mobilenet.cc) + +This example uses Analysisconfig to manage the Analysispredictor prediction configuration. The configuration method is as follows: + +1. Create AnalysisConfig + ``` c++ + AnalysisConfig config; + config->SwitchUseFeedFetchOps(false); // Turn off the use of feed and fetch OP, this must be set when using the ZeroCopy interface. + // config->EnableUseGpu(100 /*Set the GPU initial memory pool to 100MB*/, 0 /*Set GPU ID to 0*/); // Turn on GPU prediction + ``` + +2. Set path of models and parameters + - When there is a model file and multiple parameter files under the model folder `model_dir`, the model folder path is passed in, and the model file name defaults to `__model__`. + ``` c++ + config->SetModel("path\\model_dir\\__model__", "path\\model_dir\\__params__"); + ``` + + - When there is only one model file `__model__` and one parameter file `__params__` in the model folder `model_dir`, the model file and parameter file path are passed in. + ```C++ + config->SetModel("path\\model_dir\\__model__", "path\\model_dir\\__params__"); + ``` + +3. Create predictor and prepare input data + ``` C++ + std::unique_ptr predictor = CreatePaddlePredictor(config); + int batch_size = 1; + int channels = 3; // The parameters of channels, height, and width must be the same as those required by the input in the model. + int height = 300; + int width = 300; + int nums = batch_size * channels * height * width; + + float* input = new float[nums]; + for (int i = 0; i < nums; ++i) input[i] = 0; + ``` + +4. Manage input with ZeroCopyTensor + ```C++ + auto input_names = predictor->GetInputNames(); + auto input_t = predictor->GetInputTensor(input_names[0]); + + // Reshape the input tensor, copy the prepared input data from the CPU to ZeroCopyTensor + input_t->Reshape({batch_size, channels, height, width}); + input_t->copy_from_cpu(input); + ``` + +5. Run prediction engine + ```C++ + predictor->ZeroCopyRun(); + ``` + +6. Manage input with ZeroCopyTensor + ```C++ + auto output_names = predictor->GetOutputNames(); + auto output_t = predictor->GetOutputTensor(output_names[0]); + std::vector output_shape = output_t->shape(); + int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1, + std::multiplies()); + + out_data.resize(out_num); + output_t->copy_to_cpu(out_data.data()); // Copy data from ZeroCopyTensor to cpu + delete[] input; + ``` +**Note:** For more introduction to AnalysisPredictor, please refer to the [introduction of C++ Prediction API](./native_infer_en.html). diff --git a/doc/paddle/advanced_guide/inference_deployment/mobile/for_developer.md b/doc/paddle/advanced_guide/inference_deployment/mobile/for_developer.md new file mode 100644 index 0000000000000000000000000000000000000000..b0ead46d464931d1c0cef26b1514a5bb92770c19 --- /dev/null +++ b/doc/paddle/advanced_guide/inference_deployment/mobile/for_developer.md @@ -0,0 +1,253 @@ +# 开发者文档 + +## 基本概念 + +### Place + +`Place`类确定了kernel运行时的上下文信息,其中包含了kernel运行时所在的平台,执行运算数据的精度以及数据的布局等信息,使得MIR的分析更加清晰准确。它主要的成员变量如下: + +* `TargetType target`: kernel运行时所在的平台,如X86/CUDA/ARM等; +* `PrecisionType precision`: kernel执行运算的数据的精度,如Float, Int8, Fp16等; +* `DataLayoutType layout`: kernel执行运算的数据的布局,如NCHW, NHWC等; + +### OpLite + +`Oplite`类负责协助kernel计算,本身不具备计算功能,主要的接口功能包括: + +* `CheckShape`: 用于检查op的输入/输出参数维度、类型是否合法,以及属性信息是否符合设计; +* `InferShape`: 用于设置输出Tensor的形状信息; +* `CreateKernels`: 创建相关的kernel; +* `Attach`: 用于从`Scope`和`OpDesc`中获取参数的指针,并传递给kernel; + +重要方法及声明如下: + +```c++ +class OpLite : public Registry { + public: + OpLite() = default; + explicit OpLite(const std::string &type) : op_type_(type) {} + explicit OpLite(const std::vector &valid_places) + : valid_places_(valid_places) {} + + void SetValidPlaces(const std::vector &places) { + VLOG(3) << "valid places " << valid_places_.size(); + valid_places_ = places; + } + // Set supported places + const std::vector &valid_places() const { return valid_places_; } + // Check the shape. + virtual bool CheckShape() const { return true; } + // Inference the outputs' shape. + virtual bool InferShape() const { return true; } + // Run this operator. + virtual bool Run(); + + // Link the external execution environ to internal context. + bool Attach(const cpp::OpDesc &opdesc, lite::Scope *scope); + + // Create all the kernels for the valid targets. + std::vector> CreateKernels( + const std::vector &places, const std::string &kernel_type = ""); + + // Assign op param to kernel. + virtual void AttachKernel(KernelBase *kernel) = 0; +}; +``` + +### KernelLite + +为了提升kernel对`Target`, `Precision`, `DataLayout`等多种执行模式的支持,引入了`KernelLite`的概念,它主要有以下特点: + +* 可以通过模版特化不同`Place`和kernel的实现,加强对不同执行模式的支持; +* 轻量级,`KernelLite`类似functor,只有执行的职能,执行效率更高; +* 每个kernel有明确执行的模式,并且可以在analysis time参与分析; +* 依赖简单,便于部署到mobile执行; +* 硬件调度信息等`context`跟具体的kernel绑定,方便定制不同kernel的行为。 + +重要的方法及声明如下: + +```c++ +template +class KernelLite : public KernelBase { + public: + // Run the kernel. + virtual void Run() { CHECK(false) << "Not Implemented"; } + // Set target + TargetType target() const override { return Target; } + // Set precision + PrecisionType precision() const override { return Precision; } + // Set data layout + DataLayoutType layout() const override { return DataLayout; } + Place place() const override { return Place{Target, Precision, DataLayout}; } + void Touch() {} + + KernelLite() = default; + virtual ~KernelLite() = default; +}; +``` + + + +## 架构简介 + +Mobile 在这次升级为 lite 架构, 侧重多硬件、高性能的支持,其主要设计思想如下 + +- 引入 Type system,强化多硬件、量化方法、data layout 的混合调度能力 +- 硬件细节隔离,通过不同编译开关,对支持的任何硬件可以自由插拔 +- 引入 MIR(Machine IR) 的概念,强化带执行环境下的优化支持 +- 优化期和执行期严格隔离,保证预测时轻量和高效率 + +架构图如下 + +![Paddle Inference Refactor1.0](https://github.com/Superjomn/_tmp_images/raw/master/images/lite.jpg) + + + +## 增加新 Kernel的方法 + +下面主要介绍op新增kernel如何写,简单总结新增kernel的实现需要包含如下内容: + +- kernel实现:继承自`KernelLite`类的对应op的Compute类定义与实现,根据输入的数据类型,数据布局,数据所在的设备以及运行时所调用的第三方库的不同实现不同的kernel;server端CPU kernel实现在.h文件中。 +- kernel注册:server端CPU kernel注册实现在.cc文件。 + +## 实现C++类 + +以mul op的CPU Kernel实现为例,mul kernel执行运算的矩阵乘法的公式为*Out* = *X* * *Y*, 可见该计算由两个输入,一个输出组成; 输入输出参数分别从OP的param中获取,如mul op的param定义如下: + +```c++ +struct MulParam { + const lite::Tensor* x{}; + const lite::Tensor* y{}; + lite::Tensor* output{}; + int x_num_col_dims{1}; + int y_num_col_dims{1}; +}; +``` + +下面开始定义`MulCompute`类的实现: + +```c++ +template +class MulCompute : public KernelLite { + public: + using param_t = operators::MulParam; + + void Run() override { + auto& context = ctx_->As(); + auto& param = *param_.get_mutable(); + CHECK(context.x86_device_context()); + + //1. 为output分配内存 + param.output->template mutable_data(); + + // 2. 获取计算用的输入输出 + auto* x = ¶m.x->raw_tensor(); + auto* y = ¶m.y->raw_tensor(); + + auto* z = ¶m.output->raw_tensor(); + + //3. 对输入输出数据进行需要的处理... + Tensor x_matrix, y_matrix; + if (x->dims().size() > 2) { + x_matrix = framework::ReshapeToMatrix(*x, param.x_num_col_dims); + } else { + x_matrix = *x; + } + + //4. 调用数学库进行矩阵的运算... + auto blas = paddle::operators::math::GetBlas( + *context.x86_device_context()); + + blas.MatMul(x_matrix, y_matrix, z); + } + + virtual ~MulCompute() = default; +}; +``` + +`MulCompute`类继承自`kernelLite`, 带有下面两个模版参数: + +- `TARGET(kX86)`: `Target`代表的是硬件信息,如CUDA/X86/ARM/…,表示该kernel运行的硬件平台,在该示例中我们写的是kX86,表示mul这个kernel运行在X86平台上; + +- `PRECISION(kFloat)`:`Precision`代表该kernel运算支持的数据精度信息,示例中写的是`kFloat`, 表示mul这个kernel支持Float数据的运算; + + 需要为`MulCompute`类重写`Run`接口, kernel 的输入和输出分别通过`MulParam`获得,输入/输出的变量类型是`lite::Tensor`。 + +到此,前向mul kernel的实现完成,接下来需要在.cc文件中注册该kernel。 + +## 注册kernel + +在.cc文件中注册实现的kernel: + +```c++ +REGISTER_LITE_KERNEL(mul, kX86, kFloat, kNCHW, + paddle::lite::kernels::x86::MulCompute, def) + .BindInput("X", {LiteType::GetTensorTy(TARGET(kX86))}) + .BindInput("Y", {LiteType::GetTensorTy(TARGET(kX86))}) + .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kX86))}) + .Finalize(); +``` + +在上面的代码中; + +- `REGISTER_LITE_KERNEL`: 注册MulCompute类,并特化模版参数为float类型, 类型名为mul, 运行的平台为X86, 数据精度为float, 数据布局为NCHW; +- 在运行时,框架系统根据输入数据所在的设备,输入数据的类型,数据布局等信息静态的选择合适的kernel执行运算。 + +## 开发环境 + +### Mobile端开发和测试 + +我们提供了移动端开发所需的docker镜像环境,在`paddle/fluid/lite/tools/Dockerfile.mobile`,可以直接通过 +`docker build --file paddle/fluid/lite/tools/Dockerfile.mobile --tag paddle-lite-mobile:latest . `生成镜像文件。 + +该镜像中提供了 + + - Android端的交叉编译环境 + - ARM Linux端的交叉编译环境 + - Android端的模拟器环境 + - 开发所需的格式检查工具 + +#### 相关的cmake选项 + +目前支持如下的编译配置,以生成不同目标上的程序。 + +- `ARM_TARGET_OS` 代表目标操作系统, 目前支持 "android" "armlinux", 默认是Android +- `ARM_TARGET_ARCH_ABI` 代表ARCH,支持输入"armv8"和"armv7",针对OS不一样选择不一样。 + - `-DARM_TARGET_OS="android"` 时 + - "armv8", 等效于 "arm64-v8a"。 default值为这个。 + - "armv7", 等效于 "armeabi-v7a"。 + - `-DARM_TARGET_OS="armlinux"` 时 + - "armv8", 等效于 "arm64"。 default值为这个。 + - "armv7hf", 等效于使用`eabihf`且`-march=armv7-a -mfloat-abi=hard -mfpu=neon-vfpv4 `。 + - "armv7", 等效于使用`eabi`且`-march=armv7-a -mfloat-abi=softfp -mfpu=neon-vfpv4`。 +- `ARM_TARGET_LANG` 代表目标编译的语言, 默认为gcc,支持 gcc和clang两种。 + +注意: ARM Linux当前仅支持在armv8上编译并测试。 + +#### 开发 + +添加新的ARM端kernel,主要分为3部分: + +1. 添加具体的数学计算,在`paddle/fluid/lite/arm/math`中添加对应的数学函数,侧重点在于代码本身的优化,充分利用NEON指令发挥其优势。 +2. 添加kernel声明和调用实例,在`paddle/fluid/lite/kernels/arm`中添加对应kernel的框架声明和调用,侧重点在于每种kernel严格对应输入输出的类型。 +3. 添加单元测试,在`paddle/fluid/lite/kernels/arm`中添加相应的单元测试,并保持其在模拟器或者真机中可以通过。 + +#### 测试 + +我们在镜像开发环境中添加了`arm64-v8a`和`armeabi-v7a`的Android模拟环境,在没有真机环境下,可以很方便的用于测试对应平台上的单元测试。 + +常用步骤如下 + +```shell +# 创建Android avd (armv8) +$ echo n | avdmanager create avd -f -n paddle-armv8 -k "system-images;android-24;google_apis;arm64-v8a" + +# 启动Android armv8 emulator +$ ${ANDROID_HOME}/emulator/emulator -avd paddle-armv8 -noaudio -no-window -gpu off -verbose & + +# 其他正常测试步骤 + +# 关闭所有模拟器 +$ adb devices | grep emulator | cut -f1 | while read line; do adb -s $line emu kill; done +``` diff --git a/doc/paddle/advanced_guide/inference_deployment/mobile/images/Paddle Inference Refactor1.0.jpg b/doc/paddle/advanced_guide/inference_deployment/mobile/images/Paddle Inference Refactor1.0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0e6caa88a932553e212cbd899515ef4f5366839a Binary files /dev/null and b/doc/paddle/advanced_guide/inference_deployment/mobile/images/Paddle Inference Refactor1.0.jpg differ diff --git a/doc/paddle/advanced_guide/inference_deployment/mobile/images/lite-process.png b/doc/paddle/advanced_guide/inference_deployment/mobile/images/lite-process.png new file mode 100644 index 0000000000000000000000000000000000000000..705eb30b4de3f7b1e6ea14136fb083e5e5f8916d Binary files /dev/null and b/doc/paddle/advanced_guide/inference_deployment/mobile/images/lite-process.png differ diff --git a/doc/paddle/advanced_guide/inference_deployment/mobile/images/lite_train_process.png b/doc/paddle/advanced_guide/inference_deployment/mobile/images/lite_train_process.png new file mode 100644 index 0000000000000000000000000000000000000000..d6c45763fb0f7ebf2590807b34aba72038d51892 Binary files /dev/null and b/doc/paddle/advanced_guide/inference_deployment/mobile/images/lite_train_process.png differ diff --git a/doc/paddle/advanced_guide/inference_deployment/mobile/images/op-kernel-relation.png b/doc/paddle/advanced_guide/inference_deployment/mobile/images/op-kernel-relation.png new file mode 100644 index 0000000000000000000000000000000000000000..b5d4eb63fcb75905f2783ac87df742d17553b2d0 Binary files /dev/null and b/doc/paddle/advanced_guide/inference_deployment/mobile/images/op-kernel-relation.png differ diff --git a/doc/paddle/advanced_guide/inference_deployment/mobile/index_cn.rst b/doc/paddle/advanced_guide/inference_deployment/mobile/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..924d920ee8ceae59fc5de78904f14327a2f96d95 --- /dev/null +++ b/doc/paddle/advanced_guide/inference_deployment/mobile/index_cn.rst @@ -0,0 +1,12 @@ +########## +移动端部署 +########## + +本模块介绍了飞桨的端侧推理引擎Paddle-Lite: + +* `Paddle Lite `_:简要介绍了 Paddle-Lite 特点以及使用说明。 + +.. toctree:: + :hidden: + + mobile_index.md diff --git a/doc/paddle/advanced_guide/inference_deployment/mobile/index_en.rst b/doc/paddle/advanced_guide/inference_deployment/mobile/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..0c30f14a6e32266d01c6a9806286aae9853498ac --- /dev/null +++ b/doc/paddle/advanced_guide/inference_deployment/mobile/index_en.rst @@ -0,0 +1,4 @@ +################# +Mobile Deployment +################# + diff --git a/doc/paddle/advanced_guide/inference_deployment/mobile/mobile_index.md b/doc/paddle/advanced_guide/inference_deployment/mobile/mobile_index.md new file mode 100644 index 0000000000000000000000000000000000000000..e977a8d7d4e95d50a0ecad3235bb89a8fc9ef7b5 --- /dev/null +++ b/doc/paddle/advanced_guide/inference_deployment/mobile/mobile_index.md @@ -0,0 +1,58 @@ +# Paddle-Lite + +Paddle-Lite为Paddle-Mobile的升级版,定位支持包括手机移动端在内更多场景的轻量化高效预测,支持更广泛的硬件和平台,是一个高性能、轻量级的深度学习预测引擎。在保持和PaddlePaddle无缝对接外,也兼容支持其他训练框架产出的模型。 + +完整使用文档位于 [Paddle-Lite 文档](https://paddle-lite.readthedocs.io/zh/latest/) 。 + +## 特性 + +### 轻量级 +执行阶段和计算优化阶段实现良好解耦拆分,移动端可以直接部署执行阶段,无任何第三方依赖。 +包含完整的80个 Op+85个 Kernel 的动态库,对于ARMV7只有800K,ARMV8下为1.3M,并可以裁剪到更低。 +在应用部署时,载入模型即可直接预测,无需额外分析优化。 + +### 高性能 +极致的 ARM CPU 性能优化,针对不同微架构特点实现kernel的定制,最大发挥计算性能,在主流模型上展现出领先的速度优势。 +支持量化模型,结合[PaddleSlim 模型压缩工具](https://github.com/PaddlePaddle/models/tree/v1.5/PaddleSlim) 中量化功能,可以提供高精度高性能的预测能力。 +在Huawei NPU, FPGA上也具有有很好的性能表现。 + +最新性能数据位于 [Benchmark 文档](https://paddle-lite.readthedocs.io/zh/latest/benchmark/benchmark.html)。 + +### 通用性 +硬件方面,Paddle-Lite 的架构设计为多硬件兼容支持做了良好设计。除了支持ARM CPU、Mali GPU、Adreno GPU,还特别支持了华为 NPU,以及 FPGA 等边缘设备广泛使用的硬件。即将支持支持包括寒武纪、比特大陆等AI芯片,未来会增加对更多硬件的支持。 + +模型支持方面,Paddle-Lite和PaddlePaddle训练框架的Op对齐,提供更广泛的模型支持能力。目前已严格验证18个模型85个OP的精度和性能,对视觉类模型做到了较为充分的支持,覆盖分类、检测和定位,包含了特色的OCR模型的支持。未来会持续增加更多模型的支持验证。 + +框架兼容方面:除了PaddlePaddle外,对其他训练框架也提供兼容支持。当前,支持Caffe 和 TensorFlow 训练出来的模型,通过[X2Paddle] (https://github.com/PaddlePaddle/X2Paddle) 转换工具实现。接下来将会对ONNX等格式模型提供兼容支持。 + +## 架构 + +Paddle-Lite 的架构设计着重考虑了对多硬件和平台的支持,并且强化了多个硬件在一个模型中混合执行的能力,多个层面的性能优化处理,以及对端侧应用的轻量化设计。 + +![](https://github.com/Superjomn/_tmp_images/raw/master/images/paddle-lite-architecture.png) + +其中,Analysis Phase 包括了 MIR(Machine IR) 相关模块,能够对原有的模型的计算图针对具体的硬件列表进行算子融合、计算裁剪 在内的多种优化。Execution Phase 只涉及到Kernel 的执行,且可以单独部署,以支持极致的轻量级部署。 + + +## Paddle-Mobile升级为Paddle-Lite的说明 +原Paddle-Mobile作为一个致力于嵌入式平台的PaddlePaddle预测引擎,已支持多种硬件平台,包括ARM CPU、 Mali GPU、Adreno GPU,以及支持苹果设备的GPU Metal实现、ZU5、ZU9等FPGA开发板、树莓派等arm-linux开发板。在百度内已经过广泛业务场景应用验证。对应设计文档可参考: [mobile/README](https://github.com/PaddlePaddle/Paddle-Lite/blob/develop/mobile/README.md) + +Paddle-Mobile 整体升级重构并更名为Paddle-Lite后,原paddle-mobile 的底层能力大部分已集成到[新架构 ](https://github.com/PaddlePaddle/Paddle-Lite/tree/develop/lite)下。作为过渡,暂时保留原Paddle-mobile代码。 主体代码位于 `mobile/` 目录中,后续一段时间会继续维护,并完成全部迁移。新功能会统一到[新架构 ](https://github.com/PaddlePaddle/Paddle-Lite/tree/develop/lite)下开发。 + +metal, web的模块相对独立,会继续在 `./metal` 和 `./web` 目录下开发和维护。对苹果设备的GPU Metal实现的需求及web前端预测需求,可以直接进入这两个目录。 + +## 致谢 +Paddle-Lite 借鉴了以下开源项目: + +- [ARM compute library](https://github.com/ARM-software/ComputeLibrary) +- [Anakin](https://github.com/PaddlePaddle/Anakin) ,Anakin对应底层的一些优化实现已被集成到Paddle-Lite。Anakin作为PaddlePaddle组织下的一个高性能预测项目,极具前瞻性,对Paddle-Lite有重要贡献。Anakin已和本项目实现整合。之后,Anakin不再升级。 + +## 交流与反馈 +* 欢迎您通过Github Issues来提交问题、报告与建议 +* 微信公众号:飞桨PaddlePaddle +* QQ群: 696965088 + +

     

+

   微信公众号                官方技术交流QQ群

+ +* 论坛: 欢迎大家在[PaddlePaddle论坛](https://ai.baidu.com/forum/topic/list/168)分享在使用PaddlePaddle中遇到的问题和经验, 营造良好的论坛氛围 diff --git a/doc/paddle/advanced_guide/inference_deployment/mobile/pics/anakin_fm_ch.png b/doc/paddle/advanced_guide/inference_deployment/mobile/pics/anakin_fm_ch.png new file mode 100644 index 0000000000000000000000000000000000000000..52d4992a22397119af949aa7c11a9ea6365c167c Binary files /dev/null and b/doc/paddle/advanced_guide/inference_deployment/mobile/pics/anakin_fm_ch.png differ diff --git a/doc/paddle/advanced_guide/inference_deployment/mobile/pics/int8_design.png b/doc/paddle/advanced_guide/inference_deployment/mobile/pics/int8_design.png new file mode 100644 index 0000000000000000000000000000000000000000..d6feafbd3e35841ab964be3364a641adce2d9892 Binary files /dev/null and b/doc/paddle/advanced_guide/inference_deployment/mobile/pics/int8_design.png differ diff --git a/doc/paddle/advanced_guide/inference_deployment/paddleslim/paddle_slim.md b/doc/paddle/advanced_guide/inference_deployment/paddleslim/paddle_slim.md new file mode 100644 index 0000000000000000000000000000000000000000..8e8bc669c6023bc655e753c4895aa443187063fc --- /dev/null +++ b/doc/paddle/advanced_guide/inference_deployment/paddleslim/paddle_slim.md @@ -0,0 +1,93 @@ +# 模型压缩 + +PaddleSlim是一个模型压缩工具库,包含模型剪裁、定点量化、知识蒸馏、超参搜索和模型结构搜索等一系列模型压缩策略。 + +对于业务用户,PaddleSlim提供完整的模型压缩解决方案,可用于图像分类、检测、分割等各种类型的视觉场景。 +同时也在持续探索NLP领域模型的压缩方案。另外,PaddleSlim提供且在不断完善各种压缩策略在经典开源任务的benchmark, +以便业务用户参考。 + +对于模型压缩算法研究者或开发者,PaddleSlim提供各种压缩策略的底层辅助接口,方便用户复现、调研和使用最新论文方法。 +PaddleSlim会从底层能力、技术咨询合作和业务场景等角度支持开发者进行模型压缩策略相关的创新工作。 + +## 功能 + +- 模型剪裁 + - 卷积通道均匀剪裁 + - 基于敏感度的卷积通道剪裁 + - 基于进化算法的自动剪裁 + +- 定点量化 + - 在线量化训练(training aware) + - 离线量化(post training) + +- 知识蒸馏 + - 支持单进程知识蒸馏 + - 支持多进程分布式知识蒸馏 + +- 神经网络结构自动搜索(NAS) + - 支持基于进化算法的轻量神经网络结构自动搜索 + - 支持One-Shot网络结构自动搜索 + - 支持 FLOPS / 硬件延时约束 + - 支持多平台模型延时评估 + - 支持用户自定义搜索算法和搜索空间 + +## 安装 + +依赖: + +Paddle >= 1.7.0 + +```bash +pip install paddleslim -i https://pypi.org/simple +``` + +## 使用 + +- [快速开始](https://paddlepaddle.github.io/PaddleSlim/quick_start/index.html):通过简单示例介绍如何快速使用PaddleSlim。 +- [进阶教程](https://paddlepaddle.github.io/PaddleSlim/tutorials/index.html):PaddleSlim高阶教程。 +- [模型库](https://paddlepaddle.github.io/PaddleSlim/model_zoo.html):各个压缩策略在图像分类、目标检测和图像语义分割模型上的实验结论,包括模型精度、预测速度和可供下载的预训练模型。 +- [API文档](https://paddlepaddle.github.io/PaddleSlim/api_cn/index.html) +- [Paddle检测库](https://github.com/PaddlePaddle/PaddleDetection/tree/master/slim):介绍如何在检测库中使用PaddleSlim。 +- [Paddle分割库](https://github.com/PaddlePaddle/PaddleSeg/tree/develop/slim):介绍如何在分割库中使用PaddleSlim。 +- [PaddleLite](https://paddlepaddle.github.io/Paddle-Lite/):介绍如何使用预测库PaddleLite部署PaddleSlim产出的模型。 + +## 部分压缩策略效果 + +### 分类模型 + +数据: ImageNet2012; 模型: MobileNetV1; + +|压缩策略 |精度收益(baseline: 70.91%) |模型大小(baseline: 17.0M)| +|:---:|:---:|:---:| +| 知识蒸馏(ResNet50)| **+1.06%** |-| +| 知识蒸馏(ResNet50) + int8量化训练 |**+1.10%**| **-71.76%**| +| 剪裁(FLOPs-50%) + int8量化训练|**-1.71%**|**-86.47%**| + + +### 图像检测模型 + +#### 数据:Pascal VOC;模型:MobileNet-V1-YOLOv3 + +| 压缩方法 | mAP(baseline: 76.2%) | 模型大小(baseline: 94MB) | +| :---------------------: | :------------: | :------------:| +| 知识蒸馏(ResNet34-YOLOv3) | **+2.8%** | - | +| 剪裁 FLOPs -52.88% | **+1.4%** | **-67.76%** | +|知识蒸馏(ResNet34-YOLOv3)+剪裁(FLOPs-69.57%)| **+2.6%**|**-67.00%**| + + +#### 数据:COCO;模型:MobileNet-V1-YOLOv3 + +| 压缩方法 | mAP(baseline: 29.3%) | 模型大小| +| :---------------------: | :------------: | :------:| +| 知识蒸馏(ResNet34-YOLOv3) | **+2.1%** |-| +| 知识蒸馏(ResNet34-YOLOv3)+剪裁(FLOPs-67.56%) | **-0.3%** | **-66.90%**| + +### 搜索 + +数据:ImageNet2012; 模型:MobileNetV2 + +|硬件环境 | 推理耗时 | Top1准确率(baseline:71.90%) | +|:---------------:|:---------:|:--------------------:| +| RK3288 | **-23%** | +0.07% | +| Android cellphone | **-20%** | +0.16% | +| iPhone 6s | **-17%** | +0.32% | diff --git a/doc/paddle/advanced_guide/inference_deployment/paddleslim/paddle_slim_en.rst b/doc/paddle/advanced_guide/inference_deployment/paddleslim/paddle_slim_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..8897b24530119c5b2d136b0d23a33dbf3fd06361 --- /dev/null +++ b/doc/paddle/advanced_guide/inference_deployment/paddleslim/paddle_slim_en.rst @@ -0,0 +1,126 @@ +Model Compression +================== + +PaddleSlim is a toolkit for model compression. It contains a collection of compression strategies, such as pruning, fixed point quantization, knowledge distillation, hyperparameter searching and neural architecture search. + +PaddleSlim provides solutions of compression on computer vision models, such as image classification, object detection and semantic segmentation. Meanwhile, PaddleSlim Keeps exploring advanced compression strategies for language model. Furthermore, benckmark of compression strategies on some open tasks is available for your reference. + +PaddleSlim also provides auxiliary and primitive API for developer and researcher to survey, implement and apply the method in latest papers. PaddleSlim will support developer in ability of framework and technology consulting. + +Features +---------- + +Pruning ++++++++++ + + - Uniform pruning of convolution + - Sensitivity-based prunning + - Automated pruning based evolution search strategy + - Support pruning of various deep architectures such as VGG, ResNet, and MobileNet. + - Support self-defined range of pruning, i.e., layers to be pruned. + +Fixed Point Quantization +++++++++++++++++++++++++ + + - Training aware + - Dynamic strategy: During inference, we quantize models with hyperparameters dynamically estimated from small batches of samples. + - Static strategy: During inference, we quantize models with the same hyperparameters estimated from training data. + - Support layer-wise and channel-wise quantization. + - Post training + +Knowledge Distillation ++++++++++++++++++++++++ + + - Naive knowledge distillation: transfers dark knowledge by merging the teacher and student model into the same Program + - Paddle large-scale scalable knowledge distillation framework Pantheon: a universal solution for knowledge distillation, more flexible than the naive knowledge distillation, and easier to scale to the large-scale applications. + + - Decouple the teacher and student models --- they run in different processes in the same or different nodes, and transfer knowledge via TCP/IP ports or local files; + - Friendly to assemble multiple teacher models and each of them can work in either online or offline mode independently; + - Merge knowledge from different teachers and make batch data for the student model automatically; + - Support the large-scale knowledge prediction of teacher models on multiple devices. + +Neural Architecture Search ++++++++++++++++++++++++++++ + + - Neural architecture search based on evolution strategy. + - Support distributed search. + - One-Shot neural architecture search. + - Support FLOPs and latency constrained search. + - Support the latency estimation on different hardware and platforms. + +Install +-------- + +Requires: + +Paddle >= 1.7.0 + +.. code-block:: bash + + pip install paddleslim -i https://pypi.org/simple + + +Usage +------ + +- `QuickStart `_ : Introduce how to use PaddleSlim by simple examples. +- `Advanced Tutorials `_ : Tutorials about advanced usage of PaddleSlim. +- `Model Zoo `_ : Benchmark and pretrained models. +- `API Documents `_ +- `PaddleDetection `_ : Introduce how to use PaddleSlim in PaddleDetection library. +- `PaddleSeg `_ : Introduce how to use PaddleSlim in PaddleSeg library. +- `PaddleLite `_ : How to use PaddleLite to deploy models generated by PaddleSlim. + +Performance +------------ + +Image Classification ++++++++++++++++++++++ + +Dataset: ImageNet2012; Model: MobileNetV1; + + +===================================================== =========================== ============================ +Method Accuracy(baseline: 70.91%) Model Size(baseline: 17.0M) +===================================================== =========================== ============================ +Knowledge Distillation(ResNet50) +1.06% - +Knowledge Distillation(ResNet50) + int8 quantization +1.10% -71.76% +Pruning(FLOPs-50%) + int8 quantization -1.71% -86.47% +===================================================== =========================== ============================ + +Object Detection ++++++++++++++++++ + +Dataset: Pascal VOC; Model: MobileNet-V1-YOLOv3 + + +============================================================== ===================== =========================== +Method mAP(baseline: 76.2%) Model Size(baseline: 94MB) +============================================================== ===================== =========================== +Knowledge Distillation(ResNet34-YOLOv3) +2.8% - +Pruning(FLOPs -52.88%) +1.4% -67.76% +Knowledge DistillationResNet34-YOLOv3)+Pruning(FLOPs-69.57%) +2.6% -67.00% +============================================================== ===================== =========================== + + +Dataset: COCO; Model: MobileNet-V1-YOLOv3 + +============================================================== ===================== =========================== +Method mAP(baseline: 29.3%) Model Size| +============================================================== ===================== =========================== +Knowledge Distillation(ResNet34-YOLOv3) +2.1% - +Knowledge Distillation(ResNet34-YOLOv3)+Pruning(FLOPs-67.56%) -0.3% -66.90%| +============================================================== ===================== =========================== + +NAS +++++++ + +Dataset: ImageNet2012; Model: MobileNetV2 + +=================== ================ =============================== +Device Infer time cost Top1 accuracy(baseline:71.90%) +=================== ================ =============================== +RK3288 -23% +0.07% +Android cellphone -20% +0.16% +iPhone 6s -17% +0.32% +=================== ================ =============================== diff --git a/doc/paddle/advanced_guide/performance_improving/amp/amp.md b/doc/paddle/advanced_guide/performance_improving/amp/amp.md new file mode 100644 index 0000000000000000000000000000000000000000..3a41a447f78cf3bc119abb7754292edbbc23050a --- /dev/null +++ b/doc/paddle/advanced_guide/performance_improving/amp/amp.md @@ -0,0 +1,171 @@ +# 混合精度训练最佳实践 + +Automatic Mixed Precision (AMP) 是一种自动混合使用半精度(FP16)和单精度(FP32)来加速模型训练的技术。AMP技术可方便用户快速将使用 FP32 训练的模型修改为使用混合精度训练,并通过黑白名单和动态`loss scaling`来保证训练时的数值稳定性进而避免梯度Infinite或者NaN(Not a Number)。借力于新一代NVIDIA GPU中Tensor Cores的计算性能,PaddlePaddle AMP技术在ResNet50、Transformer等模型上训练速度相对于FP32训练加速比可达1.5~2.9。 + +### 半精度浮点类型FP16 + +如图 1 所示,半精度(Float Precision16,FP16)是一种相对较新的浮点类型,在计算机中使用2字节(16位)存储。在IEEE 754-2008标准中,它亦被称作binary16。与计算中常用的单精度(FP32)和双精度(FP64)类型相比,FP16更适于在精度要求不高的场景中使用。 + +
+ missing +
图 1. 半精度和单精度数据示意图
+
+ +### 英伟达GPU的FP16算力 + +在使用相同的超参数下,混合精度训练使用半精度浮点(FP16)和单精度(FP32)浮点即可达到与使用纯单精度训练相同的准确率,并可加速模型的训练速度。这主要得益于英伟达推出的Volta及Turing架构GPU在使用FP16计算时具有如下特点: + +* FP16可降低一半的内存带宽和存储需求,这使得在相同的硬件条件下研究人员可使用更大更复杂的模型以及更大的batch size大小。 +* FP16可以充分利用英伟达Volta及Turing架构GPU提供的Tensor Cores技术。在相同的GPU硬件上,Tensor Cores的FP16计算吞吐量是FP32的8倍。 + +### PaddlePaddle AMP功能——牛刀小试 + +如前文所述,使用FP16数据类型可能会造成计算精度上的损失,但对深度学习领域而言,并不是所有计算都要求很高的精度,一些局部的精度损失对最终训练效果影响很微弱,却能使吞吐和训练速度带来大幅提升。因此,混合精度计算的需求应运而生。具体而言,训练过程中将一些对精度损失不敏感且能利用Tensor Cores进行加速的运算使用半精度处理,而对精度损失敏感部分依然保持FP32计算精度,用以最大限度提升访存和计算效率。 + +为了避免对每个具体模型人工地去设计和尝试精度混合的方法,PaddlePaadle框架提供自动混合精度训练(AMP)功能,解放"炼丹师"的双手。在PaddlePaddle中使用AMP训练是一件十分容易的事情,用户只需要增加一行代码即可将原有的FP32训练转变为AMP训练。下面以`MNIST`为例介绍PaddlePaddle AMP功能的使用示例。 + +**MNIST网络定义** + +```python +import paddle.fluid as fluid + +def MNIST(data, class_dim): + conv1 = fluid.layers.conv2d(data, 16, 5, 1, act=None, data_format='NHWC') + bn1 = fluid.layers.batch_norm(conv1, act='relu', data_layout='NHWC') + pool1 = fluid.layers.pool2d(bn1, 2, 'max', 2, data_format='NHWC') + conv2 = fluid.layers.conv2d(pool1, 64, 5, 1, act=None, data_format='NHWC') + bn2 = fluid.layers.batch_norm(conv2, act='relu', data_layout='NHWC') + pool2 = fluid.layers.pool2d(bn2, 2, 'max', 2, data_format='NHWC') + fc1 = fluid.layers.fc(pool2, size=64, act='relu') + fc2 = fluid.layers.fc(fc1, size=class_dim, act='softmax') + return fc2 +``` + +针对CV(Computer Vision)类模型组网,为获得更高的训练性能需要注意如下三点: + +* `conv2d`、`batch_norm`以及`pool2d`等需要将数据布局设置为`NHWC`,这样有助于使用TensorCore技术加速计算过程1。 +* Tensor Cores要求在使用FP16加速卷积运算时conv2d的输入/输出通道数为8的倍数2,因此设计网络时推荐将conv2d层的输入/输出通道数设置为8的倍数。 +* Tensor Cores要求在使用FP16加速矩阵乘运算时矩阵行数和列数均为8的倍数3,因此设计网络时推荐将fc层的size参数设置为8的倍数。 + + +**FP32 训练** + +为了训练 MNIST 网络,还需要定义损失函数来更新权重参数,此处使用的优化器是SGDOptimizer。为了简化说明,这里省略了迭代训练的相关代码,仅体现损失函数及优化器定义相关的内容。 + +```python +import paddle +import numpy as np + +data = fluid.layers.data( + name='image', shape=[None, 28, 28, 1], dtype='float32') +label = fluid.layers.data(name='label', shape=[None, 1], dtype='int64') + +out = MNIST(data, class_dim=10) +loss = fluid.layers.cross_entropy(input=out, label=label) +avg_loss = fluid.layers.mean(loss) + +sgd = fluid.optimizer.SGDOptimizer(learning_rate=1e-3) +sgd.minimize(avg_loss) +``` + +**AMP训练** + +与FP32训练相比,用户仅需使用PaddlePaddle提供的`fluid.contrib.mixed_precision.decorate` 函数将原来的优化器SGDOptimizer进行封装,然后使用封装后的优化器(mp_sgd)更新参数梯度即可完成向AMP训练的转换,代码如下所示: + +```python +sgd = SGDOptimizer(learning_rate=1e-3) +# 此处只需要使用fluid.contrib.mixed_precision.decorate将sgd封装成AMP训练所需的 +# 优化器mp_sgd,并使用mp_sgd.minimize(avg_loss)代替原来的sgd.minimize(avg_loss)语句即可。 +mp_sgd = fluid.contrib.mixed_precision.decorator.decorate(sgd) +mp_sgd.minimize(avg_loss) +``` + +运行上述混合精度训练python脚本时为得到更好的执行性能可配置如下环境参数,并保证cudnn版本在7.4.1及以上。 + +```shell +export FLAGS_conv_workspace_size_limit=1024 # MB,根据所使用的GPU显存容量及模型特点设置数值,值越大越有可能选择到更快的卷积算法 +export FLAGS_cudnn_exhaustive_search=1 # 使用穷举搜索方法来选择快速卷积算法 +export FLAGS_cudnn_batchnorm_spatial_persistent=1 # 用于触发batch_norm和relu的融合 +``` + +上述即为最简单的PaddlePaddle AMP功能使用方法。ResNet50模型的AMP训练示例可[点击此处](https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/image_classification/README.md#%E6%B7%B7%E5%90%88%E7%B2%BE%E5%BA%A6%E8%AE%AD%E7%BB%83)查看,其他模型使用PaddlePaddle AMP的方法也与此类似。若AMP训练过程中出现连续的loss nan等不收敛现象,可尝试使用[check nan inf工具](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/flags/check_nan_inf_cn.html#span-id-speed-span)进行调试。 + + +### PaddlePaddle AMP功能——进阶使用 + +上一小节所述均为默认AMP训练行为,用户当然也可以改变一些默认的参数设置来满足特定的模型训练场景需求。接下来的章节将介绍PaddlePaddle AMP功能使用中用户可配置的参数行为,即进阶使用技巧。 + +#### 自定义黑白名单 + +PaddlePaddle AMP功能实现中根据FP16数据类型计算稳定性和加速效果在框架内部定义了算子(Op)的黑白名单。具体来说,将对FP16计算友好且能利用Tensor Cores的Op归类于白名单,将使用FP16计算会导致数值不稳定的Op归类于黑名单,将对FP16计算没有多少影响的Op归类于灰名单。然而,框架开发人员不可能考虑到所有的网络模型情况,尤其是那些特殊场景中使用到的模型。用户可以在使用`fluid.contrib.mixed_precision.decorate` 函数时通过指定自定义的黑白名单列表来改变默认的FP16计算行为。 + +```python +sgd = SGDOptimizer(learning_rate=1e-3) +# list1是白名单op列表,list2是黑名单op列表,list3是黑名单var_name列表(凡是以这些黑名单var_name为输入或输出的op均会被视为黑名单op) +amp_list = AutoMixedPrecisionLists(custom_white_list=list1, custom_black_list=list2, custom_black_varnames=list3) +mp_sgd = fluid.contrib.mixed_precision.decorator.decorate(sgd, amp_list) +mp_sgd.minimize(avg_loss) +``` + +#### 自动loss scaling + +为了避免梯度Infinite或者NAN,PaddlePaddle AMP功能支持根据训练过程中梯度的数值自动调整loss scale值。用户在使用`fluid.contrib.mixed_precision.decorate` 函数时也可以改变与loss scaling相关的参数设置,示例如下: + +```python +sgd = SGDOptimizer(learning_rate=1e-3) +mp_sgd = fluid.contrib.mixed_precision.decorator.decorate(sgd, + amp_lists=None, + init_loss_scaling=2**8, + incr_every_n_steps=500, + decr_every_n_nan_or_inf=4, + incr_ratio=2.0, + decr_ratio=0.5, + use_dynamic_loss_scaling=True) +mp_sgd.minimize(avg_loss) +``` + +`init_loss_scaling `、`incr_every_n_steps` 以及`decr_every_n_nan_or_inf`等参数控制着自动loss scaling的行为。它们仅当 `use_dynamic_loss_scaling`设置为True时有效。下面详述这些参数的意义: + +* init_loss_scaling(float):初始loss scaling值。 +* incr_every_n_steps(int):每经过incr_every_n_steps个连续的正常梯度值才会增大loss scaling值。 +* decr_every_n_nan_or_inf(int):每经过decr_every_n_nan_or_inf个连续的无效梯度值(nan或者inf)才会减小loss scaling值。 +* incr_ratio(float):每次增大loss scaling值的扩增倍数,其为大于1的浮点数。 +* decr_ratio(float):每次减小loss scaling值的比例系数,其为小于1的浮点数。 + +### 多卡GPU训练的优化 + +PaddlePaddle AMP功能对多卡GPU训练进行了深度优化。如图 2 所示,优化之前的参数梯度更新特点:梯度计算时虽然使用的是FP16数据类型,但是不同GPU卡之间的梯度传输数据类型仍为FP32。 + +
+ missing +
图 2. 不同GPU卡之间传输梯度使用FP32数据类型(优化前)
+
+ +为了降低GPU多卡之间的梯度传输带宽,我们将梯度传输提前至`Cast`操作之前,而每个GPU卡在得到对应的FP16梯度后再执行`Cast`操作将其转变为FP32类型,具体操作详见图2。这一优化在训练大模型时对减少带宽占用尤其有效,如多卡训练BERT-Large模型。 + +
+ missing +
图 3. 不同GPU卡之间传输梯度使用FP16数据类型(优化后)
+
+ +### 训练性能对比(AMP VS FP32) + +PaddlePaddle AMP技术在ResNet50、Transformer等模型上训练速度相对于FP32训练上均有可观的加速比,下面是ResNet50和ERNIE Large模型的AMP训练相对于FP32训练的加速效果。 + + + + + + + +
图 4. Paddle AMP训练加速效果(横坐标为卡数,如8*8代表8机8卡)
missing missing
+ +从图4所示的图表可以看出,ResNet50的AMP训练相对与FP32训练加速比可达$2.8 \times$以上,而ERNIE Large的AMP训练相对与FP32训练加速比亦可达 $1.7 \times -- 2.1 \times$ 。 + +### 参考文献 + +*

Mixed Precision Training

+*

使用自动混合精度加速 PaddlePaddle 训练

+*

Tensor Layouts In Memory: NCHW vs NHWC

+*

Channels In And Out Requirements

+*

Matrix-Matrix Multiplication Requirements

diff --git a/doc/paddle/advanced_guide/performance_improving/analysis_tools/benchmark_cn.md b/doc/paddle/advanced_guide/performance_improving/analysis_tools/benchmark_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..0aa3147114f101da547473fd6451a15e0ee0e212 --- /dev/null +++ b/doc/paddle/advanced_guide/performance_improving/analysis_tools/benchmark_cn.md @@ -0,0 +1,90 @@ +如何进行基准测试 +=============== +本文介绍如何给深度学习框架做基准测试。基准测试主要包含验证模型的精度和性能两方面,下文包含搭建测试环境,选择基准测试模型,验证测试结果等几方面内容。 + +验证深度学习框架,可分为训练和测试两个阶段, 验证指标略有不同,本文只介绍训练阶段的指标验证。训练阶段关注的是模型训练集上的精度,训练集是完备的,因此关注大batch\_size下的训练速度,关注吞吐量,例如图像模型常用的batch\_size=128, 多卡情况下会加大;预测阶段关注的是在测试集上的精度,线上服务测试数据不能提前收集,因此关注小batch\_size下的预测速度,关注延迟,例如预测服务常用的batch\_size=1, 4等。 + +[Fluid](https://github.com/PaddlePaddle/Paddle>)是PaddlePaddle从0.11.0版本开始引入的设计,本文的基准测试在该版本上完成。 + + +环境搭建 +======== + +基准测试中模型精度和硬件、框架无关,由模型结构和数据共同决定;性能方面由测试硬件和框架性能决定。框架基准测试为了对比框架之间的差异,控制硬件环境,系统库等版本一致。下文中的对比实验都在相同的硬件条件和系统环境条件下进行. + + +不同架构的GPU卡性能差异巨大,在验证模型在GPU上训练性能时,可使用NVIDIA提供的命令:```nvidia-smi``` 检验当前使用的GPU型号,如果测试多卡训练性能,需确认硬件连接是 [nvlink](https://zh.wikipedia.org/zh/NVLink)或 [PCIe](https://zh.wikipedia.org/zh-hans/PCI_Express)。 同样地,CPU型号会极大影响模型在CPU上的训练性能。可读取`/proc/cpuinfo`中的参数,确认当前正在使用的CPU型号。 + +下载GPU对应的Cuda Tool Kit和 Cudnn,或者使用NVIDIA官方发布的nvidia-docker镜像 [nvidia-docker](https://github.com/NVIDIA/nvidia-docker), 镜像内包含了Cuda和Cudnn,本文采用这种方式。 Cuda Tool Kit包含了GPU代码使用到的基础库,影响在此基础上编译出的Fluid二进制运行性能。 + +准备好Cuda环境后,从github上下载Paddle代码并编译,会生成对应的最适合当前GPU的sm\_arch二进制[sm\_arch](https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html)。另外,cudnn对卷积类任务影响巨大,在基准测试中需要小版本一致,例如Cudnn7.0.2与Cudnn7.1.4在Resnet上有5%以上差异。 + + +选择基准模型 +============ + +对框架做基准测试,需要覆盖不同训练任务和不同大小的模型,本文中选取了图像和NLP的最为常用的5个模型。 + +任务种类| 模型名称| 网络结构| 数据集 +:---:|:--:|:---:|:---: +图像生成| CycleGAN| GAN| horse2zebra +图像分类| SE-ResNeXt50| Resnet-50| image-net +语义分割| DeepLab_V3+| ResNets| cityscapes +自然语言| Bert| Transformer| Wikipedia +机器翻译| Transformer| Attention| Wikipedia + +CycleGAN, SE-ResNeXt50, DeepLab_V3+属于CNN模型, Bert, Transformer是一种比传统RNN模型更好的NLP模型。 +[benchmark](https://github.com/PaddlePaddle/Paddle/tree/develop/benchmark/fluid) +基准模型测试脚本中,均跳过了前几个batch的训练过程,原因是加载数据和分配显存受系统当前运行情况影响,会导致统计性能不准确。运行完若干个轮次后,统计对应指标。 + + +基准模型的数据的选择方面,数据量大且验证效果多的公开数据集为首选。图像模型CycleGAN选择了horse2zebra数据集,SE-ResNeXt50选择了[image-net](http://www.image-net.org/challenges/LSVRC/2012/nnoupb)数据集,图像大小预处理为和Imagenet相同大小,因此性能可直接对比。 +NLP模型的公开且影响力大数据集较少,Bert和Transformer模型都选择了[Wikipedia](https://dumps.wikimedia.org/enwiki/latest/enwiki-latest-pages-articles.xml.bz2)数据集。 + + +注意,图像模型每条样本大小相同,图像经过变换后大小一致,因此经过的计算路径基本相同,计算速度和显存占用波动较小,可以从若干个batch的数据中采样得到当前的训练性能数据。而NLP模型由于样本长度不定,计算路径和显存占用也不相同,因此只能完整运行若干个轮次后,统计速度和显存消耗。 +显存分配是特别耗时的操作,因此Fluid默认会占用所有可用显存空间形成显存池,用以加速计算过程中的显存分配。如果需要统计模型真实显存消耗,可设置环境变量`FLAGS_fraction_of_gpu_memory_to_use=0.0`,观察最大显存开销。 + + +测试过程 +======== + +- GPU 单机单卡测试 + +本教程使用了Cuda9, Cudnn7.0.1。来源为:```nvidia/cuda:9.0-cudnn7-devel-ubuntu16.04``` + +``` + nvidia-docker run -it --name CASE_NAME --security-opt seccomp=unconfined -v $PWD/benchmark:/benchmark -v /usr/lib/x86_64-linux-gnu:/usr/lib/x86_64-linux-gnu paddlepaddle/paddle:latest-dev /bin/bash +``` +在单卡上测试,设置CUDA的环境变量使用一块GPU,``CUDA_VISIBLE_DEVICES=0`` +然后代码中设置为使用CUDAPlace,如果使用Paddle代码库中的脚本,只需要命令行参数传入 use_gpu=True即可。 + +``` + >>> import paddle.fluid as fluid + >>> place = fluid.CUDAPlace(0) // 0 指第0块GPU +``` + +测试结果 +======== + +本教程对比相同环境下的Fluid1.4, Pytorch1.1.0和TensorFlow1.12.0的性能表现。 +硬件环境为 CPU: Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz, GPU: Tesla v100(volta) 21729MiB x 1, Nvidia-Driver 384.66。 +系统环境为Ubuntu 16.04.3 LTS, 本文中采用了docker环境,系统版本为nvidia-docker17.05.0-ce。 +测试的Fluid版本为[v.1.4.1](https://github.com/PaddlePaddle/Paddle/tree/v1.4.1) 。 +TensorFlow版本为[v.1.12.0-rc2](https://github.com/tensorflow/tensorflow/tree/v1.12.0-rc2)。 +Pytorch版本为[v.1.1.0](https://github.com/pytorch/pytorch/tree/v1.1.0)。 +使用的脚本和配置见[benchmark](https://github.com/PaddlePaddle/Paddle/tree/develop/benchmark/fluid) 。 +SE-ResNeXt50对比的框架是Pytorch,因为tensorflow上没有对应的模型。 +图表中统计单位为samples/秒。 + + + +- GPU 单机单卡测试结果 + + Model|Fluid GPU| TensorFlow/Pytorch GPU + :---:|:--:|:---: + CycleGAN| 7.3 samples/s| 6.1 samples/s + SE-ResNeXt50| 169.4 samples/s | 153.1 samples/s + DeepLab_V3+| 12.8 samples/s | 6.4 samples/s + Bert| 4.0 samples/s | 3.4 samples/s + Transformer| 4.9 samples/s | 4.7 samples/s diff --git a/doc/paddle/advanced_guide/performance_improving/analysis_tools/cpu_profiling_cn.md b/doc/paddle/advanced_guide/performance_improving/analysis_tools/cpu_profiling_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..198a05a79e19227e90eaafe116217a164cd51a7d --- /dev/null +++ b/doc/paddle/advanced_guide/performance_improving/analysis_tools/cpu_profiling_cn.md @@ -0,0 +1,183 @@ +# CPU性能调优 + +此教程会介绍如何使用Python的cProfile包、Python库yep、Google perftools来进行性能分析 (profiling) 与调优(performance tuning)。 + +Profling 指发现性能瓶颈。系统中的瓶颈可能和程序员开发过程中想象的瓶颈相去甚远。Tuning 指消除瓶颈。性能优化的过程通常是不断重复地 profiling 和 tuning。 + +PaddlePaddle 用户一般通过调用 Python API 编写深度学习程序。大部分 Python API 调用用 C++ 写的 libpaddle.so。所以 PaddlePaddle 的性能分析与调优分为两个部分: + +* Python 代码的性能分析 +* Python 与 C++ 混合代码的性能分析 + + +## Python代码的性能分析 + +### 生成性能分析文件 + +Python标准库中提供了性能分析的工具包,[cProfile](https://docs.python.org/2/library/profile.html)。生成Python性能分析的命令如下: + +```bash +python -m cProfile -o profile.out main.py +``` + +其中 `main.py` 是我们要分析的程序,`-o`标识了一个输出的文件名,用来存储本次性能分析的结果。如果不指定这个文件,`cProfile`会打印到标准输出。 + +### 查看性能分析文件 + +`cProfile` 在main.py 运行完毕后输出`profile.out`。我们可以使用[`cprofilev`](https://github.com/ymichael/cprofilev)来查看性能分析结果。`cprofilev`是一个Python的第三方库。使用它会开启一个HTTP服务,将性能分析结果以网页的形式展示出来: + +```bash +cprofilev -a 0.0.0.0 -p 3214 -f profile.out main.py +``` + +其中`-a`标识HTTP服务绑定的IP。使用`0.0.0.0`允许外网访问这个HTTP服务。`-p`标识HTTP服务的端口。`-f`标识性能分析的结果文件。`main.py`标识被性能分析的源文件。 + +用Web浏览器访问对应网址,即可显示性能分析的结果: + +``` + ncalls tottime percall cumtime percall filename:lineno(function) + 1 0.284 0.284 29.514 29.514 main.py:1() + 4696 0.128 0.000 15.748 0.003 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/executor.py:20(run) + 4696 12.040 0.003 12.040 0.003 {built-in method run} + 1 0.144 0.144 6.534 6.534 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/__init__.py:14() +``` + +每一列的含义是: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
列名含义
ncalls 函数的调用次数
tottime 函数实际使用的总时间。该时间去除掉本函数调用其他函数的时间
percall tottime的每次调用平均时间
cumtime 函数总时间。包含这个函数调用其他函数的时间
percall cumtime的每次调用平均时间
filename:lineno(function) 文件名, 行号,函数名
+ + +### 寻找性能瓶颈 + +通常`tottime`和`cumtime`是寻找瓶颈的关键指标。这两个指标代表了某一个函数真实的运行时间。 + +将性能分析结果按照tottime排序,效果如下: + +```text + 4696 12.040 0.003 12.040 0.003 {built-in method run} + 300005 0.874 0.000 1.681 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/dataset/mnist.py:38(reader) + 107991 0.676 0.000 1.519 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:219(__init__) + 4697 0.626 0.000 2.291 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:428(sync_with_cpp) + 1 0.618 0.618 0.618 0.618 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/__init__.py:1() +``` + +可以看到最耗时的函数是C++端的`run`函数。这需要联合我们第二节`Python`与`C++`混合代码的性能分析来进行调优。而`sync_with_cpp`函数的总共耗时很长,每次调用的耗时也很长。于是我们可以点击`sync_with_cpp`的详细信息,了解其调用关系。 + +```text +Called By: + + Ordered by: internal time + List reduced from 4497 to 2 due to restriction <'sync_with_cpp'> + +Function was called by... + ncalls tottime cumtime +/home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:428(sync_with_cpp) <- 4697 0.626 2.291 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:562(sync_with_cpp) +/home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:562(sync_with_cpp) <- 4696 0.019 2.316 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:487(clone) + 1 0.000 0.001 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:534(append_backward) + + +Called: + + Ordered by: internal time + List reduced from 4497 to 2 due to restriction <'sync_with_cpp'> +``` + +通常观察热点函数间的调用关系,和对应行的代码,就可以了解到问题代码在哪里。当我们做出性能修正后,再次进行性能分析(profiling)即可检查我们调优后的修正是否能够改善程序的性能。 + + + +## Python与C++混合代码的性能分析 + +### 生成性能分析文件 + +C++的性能分析工具非常多。常见的包括`gprof`, `valgrind`, `google-perftools`。但是调试Python中使用的动态链接库与直接调试原始二进制相比增加了很多复杂度。幸而Python的一个第三方库`yep`提供了方便的和`google-perftools`交互的方法。于是这里使用`yep`进行Python与C++混合代码的性能分析 + +使用`yep`前需要安装`google-perftools`与`yep`包。ubuntu下安装命令为 + +```bash +apt update +apt install libgoogle-perftools-dev +pip install yep +``` + +安装完毕后,我们可以通过 + +```bash +python -m yep -v main.py +``` + +生成性能分析文件。生成的性能分析文件为`main.py.prof`。 + +命令行中的`-v`指定在生成性能分析文件之后,在命令行显示分析结果。我们可以在命令行中简单的看一下生成效果。因为C++与Python不同,编译时可能会去掉调试信息,运行时也可能因为多线程产生混乱不可读的性能分析结果。为了生成更可读的性能分析结果,可以采取下面几点措施: + +1. 编译时指定`-g`生成调试信息。使用cmake的话,可以将CMAKE_BUILD_TYPE指定为`RelWithDebInfo`。 +2. 编译时一定要开启优化。单纯的`Debug`编译性能会和`-O2`或者`-O3`有非常大的差别。`Debug`模式下的性能测试是没有意义的。 +3. 运行性能分析的时候,先从单线程开始,再开启多线程,进而多机。毕竟单线程调试更容易。可以设置`OMP_NUM_THREADS=1`这个环境变量关闭openmp优化。 + +### 查看性能分析文件 + +在运行完性能分析后,会生成性能分析结果文件。我们可以使用[`pprof`](https://github.com/google/pprof)来显示性能分析结果。注意,这里使用了用`Go`语言重构后的`pprof`,因为这个工具具有web服务界面,且展示效果更好。 + +安装`pprof`的命令和一般的`Go`程序是一样的,其命令如下: + +```bash +go get github.com/google/pprof +``` + +进而我们可以使用如下命令开启一个HTTP服务: + +```bash +pprof -http=0.0.0.0:3213 `which python` ./main.py.prof +``` + +这行命令中,`-http`指开启HTTP服务。`which python`会产生当前Python二进制的完整路径,进而指定了Python可执行文件的路径。`./main.py.prof`输入了性能分析结果。 + +访问对应的网址,我们可以查看性能分析的结果。结果如下图所示: + +![result](./pprof_1.png) + + +### 寻找性能瓶颈 + +与寻找Python代码的性能瓶颈类似,寻找Python与C++混合代码的性能瓶颈也是要看`tottime`和`cumtime`。而`pprof`展示的调用图也可以帮助我们发现性能中的问题。 + +例如下图中, + +![kernel_perf](./pprof_2.png) + +在一次训练中,乘法和乘法梯度的计算占用2%-4%左右的计算时间。而`MomentumOp`占用了17%左右的计算时间。显然,`MomentumOp`的性能有问题。 + +在`pprof`中,对于性能的关键路径都做出了红色标记。先检查关键路径的性能问题,再检查其他部分的性能问题,可以更有次序的完成性能的优化。 diff --git a/doc/paddle/advanced_guide/performance_improving/analysis_tools/cpu_profiling_en.md b/doc/paddle/advanced_guide/performance_improving/analysis_tools/cpu_profiling_en.md new file mode 100644 index 0000000000000000000000000000000000000000..216694965b3c878a8a5f3ccd2a0cba8d21d9ce05 --- /dev/null +++ b/doc/paddle/advanced_guide/performance_improving/analysis_tools/cpu_profiling_en.md @@ -0,0 +1,224 @@ +# Tune CPU performance + +This tutorial introduces techniques we use to profile and tune the +CPU performance of PaddlePaddle. We will use Python packages +`cProfile` and `yep`, and Google's `perftools`. + +Profiling is the process that reveals performance bottlenecks, +which could be very different from what's in the developers' mind. +Performance tuning is done to fix these bottlenecks. Performance optimization +repeats the steps of profiling and tuning alternatively. + +PaddlePaddle users program AI applications by calling the Python API, which calls +into `libpaddle.so.` written in C++. In this tutorial, we focus on +the profiling and tuning of + +1. the Python code and +1. the mixture of Python and C++ code. + +## Profiling the Python Code + +### Generate the Performance Profiling File + +We can use Python standard +package, [`cProfile`](https://docs.python.org/2/library/profile.html), +to generate Python profiling file. For example: + +```bash +python -m cProfile -o profile.out main.py +``` + +where `main.py` is the program we are going to profile, `-o` specifies +the output file. Without `-o`, `cProfile` would outputs to standard +output. + +### Look into the Profiling File + +`cProfile` generates `profile.out` after `main.py` completes. We can +use [`cprofilev`](https://github.com/ymichael/cprofilev) to look into +the details: + +```bash +cprofilev -a 0.0.0.0 -p 3214 -f profile.out main.py +``` + +where `-a` specifies the HTTP IP, `-p` specifies the port, `-f` +specifies the profiling file, and `main.py` is the source file. + +Open the Web browser and points to the local IP and the specifies +port, we will see the output like the following: + +``` + ncalls tottime percall cumtime percall filename:lineno(function) + 1 0.284 0.284 29.514 29.514 main.py:1() + 4696 0.128 0.000 15.748 0.003 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/executor.py:20(run) + 4696 12.040 0.003 12.040 0.003 {built-in method run} + 1 0.144 0.144 6.534 6.534 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/__init__.py:14() +``` + +where each line corresponds to Python function, and the meaning of +each column is as follows: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
columnmeaning
ncalls the number of calls into a function
tottime the total execution time of the function, not including the execution time of other functions called by the function
percall tottime divided by ncalls
cumtime the total execution time of the function, including the execution time of other functions being called
percall cumtime divided by ncalls
filename:lineno(function) where the function is define
+ +### Identify Performance Bottlenecks + +Usually, `tottime` and the related `percall` time is what we want to +focus on. We can sort above profiling file by tottime: + +```text + 4696 12.040 0.003 12.040 0.003 {built-in method run} + 300005 0.874 0.000 1.681 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/dataset/mnist.py:38(reader) + 107991 0.676 0.000 1.519 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:219(__init__) + 4697 0.626 0.000 2.291 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:428(sync_with_cpp) + 1 0.618 0.618 0.618 0.618 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/__init__.py:1() +``` + +We can see that the most time-consuming function is the `built-in +method run`, which is a C++ function in `libpaddle.so`. We will +explain how to profile C++ code in the next section. At this +moment, let's look into the third function `sync_with_cpp`, which is a +Python function. We can click it to understand more about it: + +``` +Called By: + + Ordered by: internal time + List reduced from 4497 to 2 due to restriction <'sync_with_cpp'> + +Function was called by... + ncalls tottime cumtime +/home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:428(sync_with_cpp) <- 4697 0.626 2.291 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:562(sync_with_cpp) +/home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:562(sync_with_cpp) <- 4696 0.019 2.316 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:487(clone) + 1 0.000 0.001 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:534(append_backward) + + +Called: + + Ordered by: internal time + List reduced from 4497 to 2 due to restriction <'sync_with_cpp'> +``` + +The lists of the callers of `sync_with_cpp` might help us understand +how to improve the function definition. + +## Profiling Python and C++ Code + +### Generate the Profiling File + +To profile a mixture of Python and C++ code, we can use a Python +package, `yep`, that can work with Google's `perftools`, which is a +commonly-used profiler for C/C++ code. + +In Ubuntu systems, we can install `yep` and `perftools` by running the +following commands: + +```bash +apt update +apt install libgoogle-perftools-dev +pip install yep +``` + +Then we can run the following command + +```bash +python -m yep -v main.py +``` + +to generate the profiling file. The default filename is +`main.py.prof`. + +Please be aware of the `-v` command line option, which prints the +analysis results after generating the profiling file. By examining the + the print result, we'd know that if we stripped debug +information from `libpaddle.so` at build time. The following hints +help make sure that the analysis results are readable: + +1. Use GCC command line option `-g` when building `libpaddle.so` so to + include the debug information. The standard building system of + PaddlePaddle is CMake, so you might want to set + `CMAKE_BUILD_TYPE=RelWithDebInfo`. + +1. Use GCC command line option `-O2` or `-O3` to generate optimized + binary code. It doesn't make sense to profile `libpaddle.so` + without optimization, because it would anyway run slowly. + +1. Profiling the single-threaded binary file before the + multi-threading version, because the latter often generates tangled + profiling analysis result. You might want to set environment + variable `OMP_NUM_THREADS=1` to prevents OpenMP from automatically + starting multiple threads. + +### Examining the Profiling File + +The tool we used to examine the profiling file generated by +`perftools` is [`pprof`](https://github.com/google/pprof), which +provides a Web-based GUI like `cprofilev`. + +We can rely on the standard Go toolchain to retrieve the source code +of `pprof` and build it: + +```bash +go get github.com/google/pprof +``` + +Then we can use it to profile `main.py.prof` generated in the previous +section: + +```bash +pprof -http=0.0.0.0:3213 `which python` ./main.py.prof +``` + +Where `-http` specifies the IP and port of the HTTP service. +Directing our Web browser to the service, we would see something like +the following: + +![result](./pprof_1.png) + +### Identifying the Performance Bottlenecks + +Similar to how we work with `cprofilev`, we'd focus on `tottime` and +`cumtime`. + +![kernel_perf](./pprof_2.png) + +We can see that the execution time of multiplication and the computing +of the gradient of multiplication takes 2% to 4% of the total running +time, and `MomentumOp` takes about 17%. Obviously, we'd want to +optimize `MomentumOp`. + +`pprof` would mark performance critical parts of the program in +red. It's a good idea to follow the hints. diff --git a/doc/paddle/advanced_guide/performance_improving/analysis_tools/host_memory_profiling_en.md b/doc/paddle/advanced_guide/performance_improving/analysis_tools/host_memory_profiling_en.md new file mode 100644 index 0000000000000000000000000000000000000000..b1dbee1bd45efebdc23987231144bf5746baf8e1 --- /dev/null +++ b/doc/paddle/advanced_guide/performance_improving/analysis_tools/host_memory_profiling_en.md @@ -0,0 +1,87 @@ +# Heap Memory Profiling and Optimization + +Any computer program has the danger of memory leak. Generally, **Memory Leak** is caused by the unreleased heap memory allocated by the program. As the memory occupied by the program becomes larger and larger, it will affect the stability of the program, which may make the running speed slower or give rise to OoM(Out of Memory). It even compromises the stability of the machine in use, and leads to *downtime* . + + +There are many memory leak analysis tools at present. Typical ones include, [valgrind](http://valgrind.org/docs/manual/quick-start.html#quick-start.intro), [gperftools](https://gperftools.github.io/gperftools/). + +Because Fluid runs in C++ core driven by Python, It is very difficult for valgrind to analyze directly. You need to compile the debug version and dedicated Python version with valgrind support, and most of the output information is Python's own symbols and call information. In addition, valgrind will make the program run very slowly, so it is not recommended. + +Here we mainly introduce the use of [gperftools](https://gperftools.github.io/gperftools/) . + +gperftool mainly supports four functions: + +- thread-caching malloc +- heap-checking using tcmalloc +- heap-profiling using tcmalloc +- CPU profiler + +Paddle also provides a [tutorial on CPU performance analysis](./cpu_profiling_en.html) based on gperftool. + +For the analysis for heap, we mainly use thread-caching malloc and heap-profiling using tcmalloc. + +## Environment + +This tutorial is based on the Docker development environment paddlepaddle/paddle:latest-dev provided by paddle, based on the Ubuntu 16.04.4 LTS environment. + +## Manual + +- Install google-perftools + +``` +apt-get install libunwind-dev +apt-get install google-perftools +``` + +- Install pprof + +``` +go get -u github.com/google/pprof +``` + +- Configure Running Environment + +``` +export PPROF_PATH=/root/gopath/bin/pprof +export PPROF_BINARY_PATH=/root/gopath/bin/pprof +export LD_PRELOAD=/usr/lib/libtcmalloc.so.4 +``` + +- Use heap profile to run python program. The essence of it is to get a snapshot of the heap allocation periodically. + +``` +# HEAPPROFILE sets the directory and file prefix of the generated heap analysis file +# HEAP_PROFILE_ALLOCATION_INTERVAL Sets how many storage dumps are allocated for each dump, default 1GB +env HEAPPROFILE="./perf_log/test.log" HEAP_PROFILE_ALLOCATION_INTERVAL=209715200 python trainer.py +``` + +As the program runs, a lot of files will be generated in the perf_log folder as follows: + +``` +-rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0001.heap +-rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0002.heap +-rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0003.heap +-rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0004.heap +-rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0005.heap +-rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0006.heap +``` + +- Analyze the heap files with pprof. There are two modes of analysis: + - Complete mode. An analysis of the current heap is performed, showing some of the call paths for the current allocation of memory. + + ``` + pprof --pdf python test.log.0012.heap + ``` + The command above will generate a file of profile00x.pdf, which can be opened directly, for example, [memory_cpu_allocator](https://github.com/jacquesqiao/Paddle/blob/bd2ea0e1f84bb6522a66d44a072598153634cade/doc/fluid/howto/optimization/memory_cpu_allocator.pdf). As demonstrated in the chart below, during the running of the CPU version fluid, the module CPUAllocator is allocated with most memory. Other modules are allocated with relatively less memory, so they are ignored. It is very inconvenient for inspecting memory leak for memory leak is a chronic process which cannot be inspected in this picture. + ![result](https://user-images.githubusercontent.com/3048612/40964027-a54033e4-68dc-11e8-836a-144910c4bb8c.png) + + - Diff mode. You can do diff on the heap at two moments, which removes some modules whose memory allocation has not changed, and displays the incremental part. + ``` + pprof --pdf --base test.log.0010.heap python test.log.1045.heap + ``` + The generated result: [`memory_leak_protobuf`](https://github.com/jacquesqiao/Paddle/blob/bd2ea0e1f84bb6522a66d44a072598153634cade/doc/fluid/howto/optimization/memory_leak_protobuf.pdf) + + As shown from the figure: The structure of ProgramDesc has increased by 200MB+ between the two versions, so there is a large possibility that memory leak happens here, and the final result does prove a leak here. + + ![result](https://user-images.githubusercontent.com/3048612/40964057-b434d5e4-68dc-11e8-894b-8ab62bcf26c2.png) + ![result](https://user-images.githubusercontent.com/3048612/40964063-b7dbee44-68dc-11e8-9719-da279f86477f.png) diff --git a/doc/paddle/advanced_guide/performance_improving/analysis_tools/index_cn.rst b/doc/paddle/advanced_guide/performance_improving/analysis_tools/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3bb5ba2c568fc5e6c78485c3cc60a66e3a2841bf --- /dev/null +++ b/doc/paddle/advanced_guide/performance_improving/analysis_tools/index_cn.rst @@ -0,0 +1,18 @@ +.. _api_guide_analysis_tools: + +############### +性能优化分析及工具 +############### + +.. toctree:: + :hidden: + + cpu_profiling_cn.md + host_memory_profiling_cn.md + timeline_cn.md + +本模块介绍 Fluid 使用过程中的调优方法,包括: + +- `CPU性能调优 `_:介绍如何使用 cProfile 包、yep库、Google perftools 进行性能分析与调优 +- `堆内存分析和优化 `_:介绍如何使用 gperftool 进行堆内存分析和优化,以解决内存泄漏的问题 +- `Timeline工具简介 `_ :介绍如何使用 Timeline 工具进行性能分析和调优 diff --git a/doc/paddle/advanced_guide/performance_improving/analysis_tools/index_en.rst b/doc/paddle/advanced_guide/performance_improving/analysis_tools/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..d83eeaeec51a54ec132a0a36de443f4a2903f026 --- /dev/null +++ b/doc/paddle/advanced_guide/performance_improving/analysis_tools/index_en.rst @@ -0,0 +1,20 @@ +####################################### +Performance Profiling and Optimization +####################################### + +.. toctree:: + :hidden: + + + cpu_profiling_en.md + host_memory_profiling_en.md + timeline_en.md + +This section illustrates how to optimize performance of Fluid: + + +- `CPU profiling `_:How to use cProfile, yep, and Google perftools to profile and optimize model performance +- `Heap Memory Profiling and Optimization `_:Use gperftool to perform Heap Memory Profiling and Optimization to solve memory leaks. +- `How to use timeline tool to do profiling `_ :How to use timeline tool to do profile and optimization + + diff --git a/doc/paddle/advanced_guide/performance_improving/analysis_tools/nvvp1.png b/doc/paddle/advanced_guide/performance_improving/analysis_tools/nvvp1.png new file mode 100644 index 0000000000000000000000000000000000000000..1af23ac3c52929b2b0645d2f9fa4d4c6db1f6e77 Binary files /dev/null and b/doc/paddle/advanced_guide/performance_improving/analysis_tools/nvvp1.png differ diff --git a/doc/paddle/advanced_guide/performance_improving/analysis_tools/nvvp2.png b/doc/paddle/advanced_guide/performance_improving/analysis_tools/nvvp2.png new file mode 100644 index 0000000000000000000000000000000000000000..177c9db708da6863d1075f3e615f5962dbe18b29 Binary files /dev/null and b/doc/paddle/advanced_guide/performance_improving/analysis_tools/nvvp2.png differ diff --git a/doc/paddle/advanced_guide/performance_improving/analysis_tools/nvvp3.png b/doc/paddle/advanced_guide/performance_improving/analysis_tools/nvvp3.png new file mode 100644 index 0000000000000000000000000000000000000000..d8f393667d6569b6f1e61ffccac43fae5888b6db Binary files /dev/null and b/doc/paddle/advanced_guide/performance_improving/analysis_tools/nvvp3.png differ diff --git a/doc/paddle/advanced_guide/performance_improving/analysis_tools/nvvp4.png b/doc/paddle/advanced_guide/performance_improving/analysis_tools/nvvp4.png new file mode 100644 index 0000000000000000000000000000000000000000..51f2f3e183295de6cf8ddaf2b3b8a0862aa35f01 Binary files /dev/null and b/doc/paddle/advanced_guide/performance_improving/analysis_tools/nvvp4.png differ diff --git a/doc/paddle/advanced_guide/performance_improving/analysis_tools/pprof_1.png b/doc/paddle/advanced_guide/performance_improving/analysis_tools/pprof_1.png new file mode 100644 index 0000000000000000000000000000000000000000..8e9edbf377672d0ef40f2fc7bd39e746923550cb Binary files /dev/null and b/doc/paddle/advanced_guide/performance_improving/analysis_tools/pprof_1.png differ diff --git a/doc/paddle/advanced_guide/performance_improving/analysis_tools/pprof_2.png b/doc/paddle/advanced_guide/performance_improving/analysis_tools/pprof_2.png new file mode 100644 index 0000000000000000000000000000000000000000..172ba20399ba974d27f4c072425277b69b02520b Binary files /dev/null and b/doc/paddle/advanced_guide/performance_improving/analysis_tools/pprof_2.png differ diff --git a/doc/paddle/advanced_guide/performance_improving/analysis_tools/timeline.jpeg b/doc/paddle/advanced_guide/performance_improving/analysis_tools/timeline.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..38ec3f80c982857531f30a8bb0fa26ea5bf05385 Binary files /dev/null and b/doc/paddle/advanced_guide/performance_improving/analysis_tools/timeline.jpeg differ diff --git a/doc/paddle/advanced_guide/performance_improving/analysis_tools/timeline_cn.md b/doc/paddle/advanced_guide/performance_improving/analysis_tools/timeline_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..e40afcf3f4cc311747de9be5cbe9eacc2ca44175 --- /dev/null +++ b/doc/paddle/advanced_guide/performance_improving/analysis_tools/timeline_cn.md @@ -0,0 +1,77 @@ +# timeline工具简介 + +## 本地使用 + +1. 在训练的主循环外加上`profiler.start_profiler(...)`和`profiler.stop_profiler(...)`。运行之后,代码会在`/tmp/profile`目录下生成一个profile的记录文件。 + + **提示:** + 请不要在timeline记录信息时运行太多次迭代,因为timeline中的记录数量和迭代次数是成正比的。 + + ```python + import numpy as np + import paddle + import paddle.fluid as fluid + from paddle.fluid import profiler + + place = fluid.CPUPlace() + + def reader(): + for i in range(100): + yield [np.random.random([4]).astype('float32'), np.random.random([3]).astype('float32')], + + main_program = fluid.Program() + startup_program = fluid.Program() + + with fluid.program_guard(main_program, startup_program): + data_1 = fluid.layers.data(name='data_1', shape=[1, 2, 2]) + data_2 = fluid.layers.data(name='data_2', shape=[1, 1, 3]) + out = fluid.layers.fc(input=[data_1, data_2], size=2) + # ... + + feeder = fluid.DataFeeder([data_1, data_2], place) + exe = fluid.Executor(place) + exe.run(startup_program) + pass_num = 10 + + for pass_id in range(pass_num): + for batch_id, data in enumerate(reader()): + if pass_id == 0 and batch_id == 5: + profiler.start_profiler("All") + elif pass_id == 0 and batch_id == 10: + profiler.stop_profiler("total", "/tmp/profile") + outs = exe.run(program=main_program, + feed=feeder.feed(data), + fetch_list=[out]) + + ``` + +1. 运行`python paddle/tools/timeline.py`来处理`/tmp/profile`,这个程序默认会生成一个`/tmp/timeline`文件,你也可以用命令行参数来修改这个路径,请参考[timeline.py](https://github.com/PaddlePaddle/Paddle/blob/develop/tools/timeline.py)。 +```python +python Paddle/tools/timeline.py --profile_path=/tmp/profile --timeline_path=timeline +``` + +1. 打开chrome浏览器,访问,用`load`按钮来加载生成的`timeline`文件。 + + +1. 结果如下图所示,可以放大来查看timeline的细节信息。 + + ![chrome timeline](./timeline.jpeg) + +## 分布式使用 +一般来说,分布式的训练程序都会有两种程序:pserver和trainer。我们提供了把pserver和trainer的profile日志用timeline来显示的方式。 + +1. trainer打开方式与[本地使用](#local)部分的第1步相同 + +1. pserver可以通过加两个环境变量打开profile,例如: +``` +FLAGS_rpc_server_profile_period=10 FLAGS_rpc_server_profile_path=./tmp/pserver python train.py +``` + +3. 把pserver和trainer的profile文件生成一个timeline文件,例如: +``` +python /paddle/tools/timeline.py + --profile_path trainer0=local_profile_10_pass0_0,trainer1=local_profile_10_pass0_1,pserver0=./pserver_0,pserver1=./pserver_1 + --timeline_path ./dist.timeline +``` + +4. 在chrome中加载dist.timeline文件,方法和[本地使用](#local)第4步相同。 diff --git a/doc/paddle/advanced_guide/performance_improving/analysis_tools/timeline_en.md b/doc/paddle/advanced_guide/performance_improving/analysis_tools/timeline_en.md new file mode 100644 index 0000000000000000000000000000000000000000..fb51802a168452a0649ebbcd0a6f4d37c07ea823 --- /dev/null +++ b/doc/paddle/advanced_guide/performance_improving/analysis_tools/timeline_en.md @@ -0,0 +1,79 @@ +# How to use timeline tool to do profile + +## Local + +1. Add `profiler.start_profiler(...)` and `profiler.stop_profiler(...)` to the main training loop. After run, the code will generate a profile record file `/tmp/profile`. **Warning**: Please do not run too many batches when use profiler to record timeline information, for the profile record will grow with the batch number. + + ```python + + import numpy as np + import paddle + import paddle.fluid as fluid + from paddle.fluid import profiler + + place = fluid.CPUPlace() + + def reader(): + for i in range(100): + yield [np.random.random([4]).astype('float32'), np.random.random([3]).astype('float32')], + + main_program = fluid.Program() + startup_program = fluid.Program() + + with fluid.program_guard(main_program, startup_program): + data_1 = fluid.layers.data(name='data_1', shape=[1, 2, 2]) + data_2 = fluid.layers.data(name='data_2', shape=[1, 1, 3]) + out = fluid.layers.fc(input=[data_1, data_2], size=2) + # ... + + feeder = fluid.DataFeeder([data_1, data_2], place) + exe = fluid.Executor(place) + exe.run(startup_program) + pass_num = 10 + + for pass_id in range(pass_num): + for batch_id, data in enumerate(reader()): + if pass_id == 0 and batch_id == 5: + profiler.start_profiler("All") + elif pass_id == 0 and batch_id == 10: + profiler.stop_profiler("total", "/tmp/profile") + outs = exe.run(program=main_program, + feed=feeder.feed(data), + fetch_list=[out]) + + ``` + +2. Run `python paddle/tools/timeline.py` to process `/tmp/profile`, it will generate another +file `/tmp/timeline` by default. You can change the path by cmd parameter, please take a look at +[timeline.py](https://github.com/PaddlePaddle/Paddle/blob/develop/tools/timeline.py) for details. +```python +python Paddle/tools/timeline.py --profile_path=/tmp/profile --timeline_path=timeline +``` + +3. Open chrome and visit , use `load` button to load the generated `timeline` file. + + + + +4. The result timeline should be like: + + ![chrome timeline](./timeline.jpeg) + +## Distributed +This tool can support distributed train programs(pserver and trainer) too. + +1. Open traniner profiler just like how to use in [local](#local). + +2. Open pserver profiler: add two environment variables, e.g.: +``` +FLAGS_rpc_server_profile_period=10 FLAGS_rpc_server_profile_path=./tmp/pserver python train.py +``` + +3. Merge pservers' and trainers' profiler file, e.g.: +``` +python /paddle/tools/timeline.py + --profile_path trainer0=local_profile_10_pass0_0,trainer1=local_profile_10_pass0_1,pserver0=./pserver_0,pserver1=./pserver_1 + --timeline_path ./dist.timeline +``` + +4. Load `dist.timeline` in chrome just like the [fourth step in Local](#local_step_4) diff --git a/doc/paddle/advanced_guide/performance_improving/device_switching/device_switching.md b/doc/paddle/advanced_guide/performance_improving/device_switching/device_switching.md new file mode 100644 index 0000000000000000000000000000000000000000..2e46207b43854c8e08217c1cb8b6673ed3ff0c96 --- /dev/null +++ b/doc/paddle/advanced_guide/performance_improving/device_switching/device_switching.md @@ -0,0 +1,199 @@ +# 运行时设备切换 + +Paddle提供了[fluid.CUDAPlace](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/CUDAPlace_cn.html)以及[fluid.CPUPlace](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/CPUPlace_cn.html)用于指定运行时的设备。这两个接口用于指定全局的设备,从1.8版本开始,Paddle提供了[device_guard](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/fluid_cn/device_guard_cn.html)接口,用于指定部分OP的运行设备,此教程会介绍device_guard的使用场景,以及如何使用该接口对模型进行优化。 + +如果使用了`fluid.CUDAPlace`设置了全局的执行设备,框架将尽可能地将OP设置在GPU上执行,因此有可能会遇到显存不够的情况。`device_guard`可以用于设置OP的执行设备,如果将部分层设置在CPU上运行,就能够充分利用CPU大内存的优势,避免显存超出。 + +有时尽管指定了全局的执行设备为GPU,但框架在自动分配OP执行设备时,可能会将部分OP设置在CPU上执行。另外,个别OP会将输出存储在CPU上。在以上的场景中,常常会发生不同设备间的数据传输,可能会影响模型的性能。使用`device_guard`可以避免模型运行中不必要的数据传输。在下面的内容中,将会详细介绍如何通过[profile](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/profiler_cn.html)工具分析数据传输开销,以及如何使用`device_guard`避免不必要的数据传输,从而提升模型性能。 + +## 如何避免显存超出 + +下面示例代码中的`embedding`层,其参数`size`包含两个元素,第一个元素为`vocab_size` (词表大小), 第二个为`emb_size`(`embedding`层维度)。实际场景中,词表可能会非常大。示例代码中,词表大小被设置为10000000。如果在GPU模式下运行,该层创建的权重矩阵的大小为(10000000, 150),仅这一层就需要5.59G的显存,如果词表大小继续增加,极有可能会导致显存超出。 + +```python +import paddle.fluid as fluid + +data = fluid.layers.fill_constant(shape=[1], value=128, dtype='int64') +label = fluid.layers.fill_constant(shape=[1, 150], value=0.5, dtype='float32') +emb = fluid.embedding(input=data, size=(10000000, 150), dtype='float32') +out = fluid.layers.l2_normalize(x=emb, axis=-1) + +cost = fluid.layers.square_error_cost(input=out, label=label) +avg_cost = fluid.layers.mean(cost) +sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) +sgd_optimizer.minimize(avg_cost) + +place = fluid.CUDAPlace(0) +exe = fluid.Executor(place) +exe.run(fluid.default_startup_program()) +result = exe.run(fluid.default_main_program(), fetch_list=[avg_cost]) +``` + +`embedding`是根据`input`中的`id`信息从`embedding`矩阵中查询对应`embedding`信息,在CPU上进行计算,其速度也是可接受的。因此,可以参考如下代码,使用`device_guard`将`embedding`层设置在CPU上,以利用CPU内存资源。那么,除了`embedding`层,其他各层都会在GPU上运行。 + +```python +import paddle.fluid as fluid + +data = fluid.layers.fill_constant(shape=[1], value=128, dtype='int64') +label = fluid.layers.fill_constant(shape=[1, 150], value=0.5, dtype='float32') +with fluid.device_guard("cpu"): + emb = fluid.embedding(input=data, size=(10000000, 150), dtype='float32') +out = fluid.layers.l2_normalize(x=emb, axis=-1) + +cost = fluid.layers.square_error_cost(input=out, label=label) +avg_cost = fluid.layers.mean(cost) +sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) +sgd_optimizer.minimize(avg_cost) + +place = fluid.CUDAPlace(0) +exe = fluid.Executor(place) +exe.run(fluid.default_startup_program()) +result = exe.run(fluid.default_main_program(), fetch_list=[avg_cost]) +``` + +在显存足够的情况下,可不必进行这样的设置。 + +## 如何减少数据传输 +### 使用profile工具确认是否发生了数据传输 +首先对模型的性能数据进行分析,找到发生数据传输的原因。如下列代码所示,可以利用[profile](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/profiler_cn.html)工具进行分析。 + +```python +import paddle.fluid as fluid +import paddle.fluid.compiler as compiler +import paddle.fluid.profiler as profiler + +data1 = fluid.layers.fill_constant(shape=[1, 3, 8, 8], value=0.5, dtype='float32') +data2 = fluid.layers.fill_constant(shape=[1, 3, 5, 5], value=0.5, dtype='float32') +shape = fluid.layers.shape(data2) +shape = fluid.layers.slice(shape, axes=[0], starts=[0], ends=[4]) +out = fluid.layers.crop_tensor(data1, shape=shape) +place = fluid.CUDAPlace(0) +exe = fluid.Executor(place) +exe.run(fluid.default_startup_program()) +compiled_prog = compiler.CompiledProgram(fluid.default_main_program()) +with profiler.profiler('All', 'total') as prof: + for i in range(10): + result = exe.run(program=compiled_prog, fetch_list=[out]) +``` + +在程序运行结束后,将会自动地打印出profile report。在下面的profile report中,可以看到 `GpuMemCpy Summary`中给出了2项数据传输的调用耗时。在OP执行过程中,如果输入Tensor所在的设备与OP执行的设备不同,就会发生`GpuMemcpySync`,通常我们可以直接优化的就是这一项。进一步分析,可以看到`slice`和`crop_tensor`执行中都发生了`GpuMemcpySync`。尽管我们在程序中设置了GPU模式运行,但是框架中有些OP,例如shape,会将输出结果放在CPU上。 + +```text +-------------------------> Profiling Report <------------------------- + +Note! This Report merge all thread info into one. +Place: All +Time unit: ms +Sorted by total time in descending order in the same thread + +Total time: 26.6328 + Computation time Total: 13.3133 Ratio: 49.9884% + Framework overhead Total: 13.3195 Ratio: 50.0116% + +------------------------- GpuMemCpy Summary ------------------------- + +GpuMemcpy Calls: 30 Total: 1.47508 Ratio: 5.5386% + GpuMemcpyAsync Calls: 10 Total: 0.443514 Ratio: 1.66529% + GpuMemcpySync Calls: 20 Total: 1.03157 Ratio: 3.87331% + +------------------------- Event Summary ------------------------- + +Event Calls Total CPU Time (Ratio) GPU Time (Ratio) Min. Max. Ave. Ratio. +FastThreadedSSAGraphExecutorPrepare 10 9.16493 9.152509 (0.998645) 0.012417 (0.001355) 0.025192 8.85968 0.916493 0.344122 +shape 10 8.33057 8.330568 (1.000000) 0.000000 (0.000000) 0.030711 7.99849 0.833057 0.312793 +fill_constant 20 4.06097 4.024522 (0.991025) 0.036449 (0.008975) 0.075087 0.888959 0.203049 0.15248 +slice 10 1.78033 1.750439 (0.983212) 0.029888 (0.016788) 0.148503 0.290851 0.178033 0.0668471 + GpuMemcpySync:CPU->GPU 10 0.45524 0.446312 (0.980388) 0.008928 (0.019612) 0.039089 0.060694 0.045524 0.0170932 +crop_tensor 10 1.67658 1.620542 (0.966578) 0.056034 (0.033422) 0.143906 0.258776 0.167658 0.0629515 + GpuMemcpySync:GPU->CPU 10 0.57633 0.552906 (0.959357) 0.023424 (0.040643) 0.050657 0.076322 0.057633 0.0216398 +Fetch 10 0.919361 0.895201 (0.973721) 0.024160 (0.026279) 0.082935 0.138122 0.0919361 0.0345199 + GpuMemcpyAsync:GPU->CPU 10 0.443514 0.419354 (0.945526) 0.024160 (0.054474) 0.040639 0.059673 0.0443514 0.0166529 +ScopeBufferedMonitor::post_local_exec_scopes_process 10 0.341999 0.341999 (1.000000) 0.000000 (0.000000) 0.028436 0.057134 0.0341999 0.0128413 +eager_deletion 30 0.287236 0.287236 (1.000000) 0.000000 (0.000000) 0.005452 0.022696 0.00957453 0.010785 +ScopeBufferedMonitor::pre_local_exec_scopes_process 10 0.047864 0.047864 (1.000000) 0.000000 (0.000000) 0.003668 0.011592 0.0047864 0.00179718 +InitLocalVars 1 0.022981 0.022981 (1.000000) 0.000000 (0.000000) 0.022981 0.022981 0.022981 0.000862883 +``` +### 通过log查看发生数据传输的具体位置 + +以上的示例程序比较简单,我们只用看profile report就能知道具体是哪些算子发生了数据传输。但是当模型比较复杂时,可能需要去查看更加详细的调试信息,可以打印出运行时的log去确定发生数据传输的具体位置。依然以上述程序为例,执行`GLOG_vmodule=operator=3 python test_case.py`,会得到如下log信息,会发现发生了2次数据传输: + +- `shape`输出的结果在CPU上,在`slice`运行时,`shape`的输出被拷贝到GPU上 +- `slice`执行完的结果在GPU上,当`crop_tensor`执行时,它会被拷贝到CPU上。 + +```text +I0406 14:56:23.286592 17516 operator.cc:180] CUDAPlace(0) Op(shape), inputs:{Input[fill_constant_1.tmp_0:float[1, 3, 5, 5]({})]}, outputs:{Out[shape_0.tmp_0:int[4]({})]}. +I0406 14:56:23.286628 17516 eager_deletion_op_handle.cc:107] Erase variable fill_constant_1.tmp_0 on CUDAPlace(0) +I0406 14:56:23.286725 17516 operator.cc:1210] Transform Variable shape_0.tmp_0 from data_type[int]:data_layout[NCHW]:place[CPUPlace]:library_type[PLAIN] to data_type[int]:data_layout[ANY_LAYOUT]:place[CUDAPlace(0)]:library_type[PLAIN] +I0406 14:56:23.286763 17516 scope.cc:169] Create variable shape_0.tmp_0 +I0406 14:56:23.286784 17516 data_device_transform.cc:21] DeviceTransform in, src_place CPUPlace dst_place: CUDAPlace(0) +I0406 14:56:23.286867 17516 tensor_util.cu:129] TensorCopySync 4 from CPUPlace to CUDAPlace(0) +I0406 14:56:23.287099 17516 operator.cc:180] CUDAPlace(0) Op(slice), inputs:{EndsTensor[], EndsTensorList[], Input[shape_0.tmp_0:int[4]({})], StartsTensor[], StartsTensorList[]}, outputs:{Out[slice_0.tmp_0:int[4]({})]}. +I0406 14:56:23.287140 17516 eager_deletion_op_handle.cc:107] Erase variable shape_0.tmp_0 on CUDAPlace(0) +I0406 14:56:23.287220 17516 tensor_util.cu:129] TensorCopySync 4 from CUDAPlace(0) to CPUPlace +I0406 14:56:23.287473 17516 operator.cc:180] CUDAPlace(0) Op(crop_tensor), inputs:{Offsets[], OffsetsTensor[], Shape[slice_0.tmp_0:int[4]({})], ShapeTensor[], X[fill_constant_0.tmp_0:float[1, 3, 8, 8]({})]}, outputs:{Out[crop_tensor_0.tmp_0:float[1, 3, 5, 5]({})]}. +``` + +### 使用device_guard避免不必要的数据传输 + +在上面的例子中,`shape`输出的是一个1-D的Tensor,因此对于`slice`而言计算量很小。这种情况下如果将`slice`设置在CPU上运行,就可以避免2次数据传输。修改后的程序如下: + +```python +import paddle.fluid as fluid +import paddle.fluid.compiler as compiler +import paddle.fluid.profiler as profiler + +data1 = fluid.layers.fill_constant(shape=[1, 3, 8, 8], value=0.5, dtype='float32') +data2 = fluid.layers.fill_constant(shape=[1, 3, 5, 5], value=0.5, dtype='float32') +shape = fluid.layers.shape(data2) +with fluid.device_guard("cpu"): + shape = fluid.layers.slice(shape, axes=[0], starts=[0], ends=[4]) +out = fluid.layers.crop_tensor(data1, shape=shape) +place = fluid.CUDAPlace(0) +exe = fluid.Executor(place) +exe.run(fluid.default_startup_program()) +compiled_prog = compiler.CompiledProgram(fluid.default_main_program()) +with profiler.profiler('All', 'total') as prof: + for i in range(10): + result = exe.run(program=compiled_prog, fetch_list=[out]) +``` +再次观察profile report中`GpuMemCpy Summary`的内容,可以看到`GpuMemCpySync`已经被消除。在实际的模型中,若`GpuMemCpySync` 调用耗时占比较大,并且可以通过设置`device_guard`避免,那么就能够带来一定的性能提升。 + +```text +-------------------------> Profiling Report <------------------------- + +Note! This Report merge all thread info into one. +Place: All +Time unit: ms +Sorted by total time in descending order in the same thread + +Total time: 14.5345 + Computation time Total: 4.47587 Ratio: 30.7948% + Framework overhead Total: 10.0586 Ratio: 69.2052% + +------------------------- GpuMemCpy Summary ------------------------- + +GpuMemcpy Calls: 10 Total: 0.457033 Ratio: 3.14447% + GpuMemcpyAsync Calls: 10 Total: 0.457033 Ratio: 3.14447% + +------------------------- Event Summary ------------------------- + +Event Calls Total CPU Time (Ratio) GPU Time (Ratio) Min. Max. Ave. Ratio. +FastThreadedSSAGraphExecutorPrepare 10 7.70113 7.689066 (0.998433) 0.012064 (0.001567) 0.032657 7.39363 0.770113 0.529852 +fill_constant 20 2.62299 2.587022 (0.986287) 0.035968 (0.013713) 0.071097 0.342082 0.13115 0.180466 +shape 10 1.93504 1.935040 (1.000000) 0.000000 (0.000000) 0.026774 1.6016 0.193504 0.133134 +Fetch 10 0.880496 0.858512 (0.975032) 0.021984 (0.024968) 0.07392 0.140896 0.0880496 0.0605797 + GpuMemcpyAsync:GPU->CPU 10 0.457033 0.435049 (0.951898) 0.021984 (0.048102) 0.037836 0.071424 0.0457033 0.0314447 +crop_tensor 10 0.705426 0.671506 (0.951916) 0.033920 (0.048084) 0.05841 0.123901 0.0705426 0.0485346 +slice 10 0.324241 0.324241 (1.000000) 0.000000 (0.000000) 0.024299 0.07213 0.0324241 0.0223084 +eager_deletion 30 0.250524 0.250524 (1.000000) 0.000000 (0.000000) 0.004171 0.016235 0.0083508 0.0172365 +ScopeBufferedMonitor::post_local_exec_scopes_process 10 0.047794 0.047794 (1.000000) 0.000000 (0.000000) 0.003344 0.014131 0.0047794 0.00328831 +InitLocalVars 1 0.034629 0.034629 (1.000000) 0.000000 (0.000000) 0.034629 0.034629 0.034629 0.00238254 +ScopeBufferedMonitor::pre_local_exec_scopes_process 10 0.032231 0.032231 (1.000000) 0.000000 (0.000000) 0.002952 0.004076 0.0032231 0.00221755 +``` + +### 总结 + +- 使用profile工具对模型进行分析,看是否存在GpuMemcpySync的调用耗时。若存在,则进一步分析发生数据传输的原因。 +- 可以通过profile report找到发生GpuMemcpySync的OP。如果需要,可以通过打印log,找到GpuMemcpySync发生的具体位置。 +- 尝试使用`device_guard`设置部分OP的运行设备,来减少GpuMemcpySync的调用。 +- 最后可以通过比较修改前后模型的profile report,或者其他用来衡量性能的指标,确认修改后是否带来了性能提升。 diff --git a/doc/paddle/advanced_guide/performance_improving/index_cn.rst b/doc/paddle/advanced_guide/performance_improving/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b50f091f8c70328d37c7cf3dc92a5b0f14a08f33 --- /dev/null +++ b/doc/paddle/advanced_guide/performance_improving/index_cn.rst @@ -0,0 +1,16 @@ +######## +性能调优 +######## + +.. toctree:: + :maxdepth: 1 + + singlenode_training_improving/training_best_practice.rst + singlenode_training_improving/memory_optimize.rst + device_switching/device_switching.md + amp/amp.md + multinode_training_improving/cpu_train_best_practice.rst + multinode_training_improving/dist_training_gpu.rst + multinode_training_improving/gpu_training_with_recompute.rst + inference_improving/paddle_tensorrt_infer.md + analysis_tools/index_cn.rst diff --git a/doc/paddle/advanced_guide/performance_improving/index_en.rst b/doc/paddle/advanced_guide/performance_improving/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..f57e2a3d060daabf6733c969a9e85de69bc5ae24 --- /dev/null +++ b/doc/paddle/advanced_guide/performance_improving/index_en.rst @@ -0,0 +1,12 @@ +############### +Practice Improving +############### + +.. toctree:: + :maxdepth: 1 + + singlenode_training_improving/memory_optimize_en.rst + multinode_training_improving/cpu_train_best_practice_en.rst + multinode_training_improving/gpu_training_with_recompute_en.rst + inference_improving/paddle_tensorrt_infer_en.md + analysis_tools/index_en.rst diff --git a/doc/paddle/advanced_guide/performance_improving/inference_improving/paddle_tensorrt_infer.md b/doc/paddle/advanced_guide/performance_improving/inference_improving/paddle_tensorrt_infer.md new file mode 100644 index 0000000000000000000000000000000000000000..9b231f9430f3d8da1198fc01b1c72abd26b74521 --- /dev/null +++ b/doc/paddle/advanced_guide/performance_improving/inference_improving/paddle_tensorrt_infer.md @@ -0,0 +1,209 @@ +# 使用Paddle-TensorRT库预测 + +NVIDIA TensorRT 是一个高性能的深度学习预测库,可为深度学习推理应用程序提供低延迟和高吞吐量。PaddlePaddle 采用子图的形式对TensorRT进行了集成,即我们可以使用该模块来提升Paddle模型的预测性能。该模块依旧在持续开发中,目前支持的模型如下表所示: + +|分类模型|检测模型|分割模型| +|---|---|---| +|mobilenetv1|yolov3|ICNET| +|resnet50|SSD|| +|vgg16|mask-rcnn|| +|resnext|faster-rcnn|| +|AlexNet|cascade-rcnn|| +|Se-ResNext|retinanet|| +|GoogLeNet|mobilenet-SSD|| +|DPN||| + +在这篇文档中,我们将会对Paddle-TensorRT库的获取、使用和原理进行介绍。 + +**Note:** + +1. 从源码编译时,TensorRT预测库目前仅支持使用GPU编译,且需要设置编译选项TENSORRT_ROOT为TensorRT所在的路径。 +2. Windows支持需要TensorRT 版本5.0以上。 +3. Paddle-TRT目前仅支持固定输入shape。 +4. 下载安装TensorRT后,需要手动在`NvInfer.h`文件中为`class IPluginFactory`和`class IGpuAllocator`分别添加虚析构函数: + ``` c++ + virtual ~IPluginFactory() {}; + virtual ~IGpuAllocator() {}; + ``` + +## 内容 +- [Paddle-TRT使用介绍](#Paddle-TRT使用介绍) +- [Paddle-TRT样例编译测试](#Paddle-TRT样例编译测试) +- [Paddle-TRT INT8使用](#Paddle-TRT_INT8使用) +- [Paddle-TRT子图运行原理](#Paddle-TRT子图运行原理) +- [Paddle-TRT性能测试](#Paddle-TRT性能测试) + +## Paddle-TRT使用介绍 + +在使用AnalysisPredictor时,我们通过配置AnalysisConfig中的接口 + +``` c++ +config->EnableTensorRtEngine(1 << 20 /* workspace_size*/, + batch_size /* max_batch_size*/, + 3 /* min_subgraph_size*/, + AnalysisConfig::Precision::kFloat32 /* precision*/, + false /* use_static*/, + false /* use_calib_mode*/); +``` +的方式来指定使用Paddle-TRT子图方式来运行。 +该接口中的参数的详细介绍如下: + +- **`workspace_size`**,类型:int,默认值为1 << 20。指定TensorRT使用的工作空间大小,TensorRT会在该大小限制下筛选合适的kernel执行预测运算。 +- **`max_batch_size`**,类型:int,默认值为1。需要提前设置最大的batch大小,运行时batch大小不得超过此限定值。 +- **`min_subgraph_size`**,类型:int,默认值为3。Paddle-TRT是以子图的形式运行,为了避免性能损失,当子图内部节点个数大于`min_subgraph_size`的时候,才会使用Paddle-TRT运行。 +- **`precision`**,类型:`enum class Precision {kFloat32 = 0, kHalf, kInt8,};`, 默认值为`AnalysisConfig::Precision::kFloat32`。指定使用TRT的精度,支持FP32(kFloat32),FP16(kHalf),Int8(kInt8)。若需要使用Paddle-TRT int8离线量化校准,需设定`precision`为 `AnalysisConfig::Precision::kInt8`, 且设置`use_calib_mode` 为true。 +- **`use_static`**,类型:bool, 默认值为false。如果指定为true,在初次运行程序的时候会将TRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成。 +- **`use_calib_mode`**,类型:bool, 默认值为false。若要运行Paddle-TRT int8离线量化校准,需要将此选项设置为true。 + +**Note:** Paddle-TRT目前只支持固定shape的输入,不支持变化shape的输入。 + +## Paddle-TRT样例编译测试 + +1. 下载或编译带有 TensorRT 的paddle预测库,参考[安装与编译C++预测库](../../inference_deployment/inference/build_and_install_lib_cn.html)。 +2. 从[NVIDIA官网](https://developer.nvidia.com/nvidia-tensorrt-download)下载对应本地环境中cuda和cudnn版本的TensorRT,需要登陆NVIDIA开发者账号。 +3. 下载[预测样例](https://paddle-inference-dist.bj.bcebos.com/tensorrt_test/paddle_inference_sample_v1.7.tar.gz)并解压,进入`sample/paddle-TRT`目录下。 + + `paddle-TRT` 文件夹目录结构如下: + + ``` + paddle-TRT + ├── CMakeLists.txt + ├── mobilenet_test.cc + ├── fluid_generate_calib_test.cc + ├── fluid_int8_test.cc + ├── mobilenetv1 + │ ├── model + │ └── params + ├── run.sh + └── run_impl.sh + ``` + + - `mobilenet_test.cc` 为使用paddle-TRT预测的C++源文件 + - `fluid_generate_calib_test.cc` 为使用TRT int8离线量化校准的C++源文件 + - `fluid_int8_test.cc` 为使用TRT执行int8预测的C++源文件 + - `mobilenetv1` 为模型文件夹 + - `run.sh` 为预测运行脚本文件 + + 在这里假设样例所在的目录为 `SAMPLE_BASE_DIR/sample/paddle-TRT` + +4. 配置编译与运行脚本 + + 编译运行预测样例之前,需要根据运行环境配置编译与运行脚本`run.sh`。`run.sh`的选项与路径配置的部分如下: + + ```shell + # 设置是否开启MKL、GPU、TensorRT,如果要使用TensorRT,必须打开GPU + WITH_MKL=ON + WITH_GPU=ON + USE_TENSORRT=ON + + # 按照运行环境设置预测库路径、CUDA库路径、CUDNN库路径、TensorRT路径、模型路径 + LIB_DIR=YOUR_LIB_DIR + CUDA_LIB_DIR=YOUR_CUDA_LIB_DIR + CUDNN_LIB_DIR=YOUR_CUDNN_LIB_DIR + TENSORRT_ROOT_DIR=YOUR_TENSORRT_ROOT_DIR + MODEL_DIR=YOUR_MODEL_DIR + ``` + + 按照实际运行环境配置`run.sh`中的选项开关和所需lib路径。 + +5. 编译与运行样例 + + +## Paddle-TRT INT8使用 + +1. Paddle-TRT INT8 简介 + 神经网络的参数在一定程度上是冗余的,在很多任务上,我们可以在保证模型精度的前提下,将Float32的模型转换成Int8的模型。目前,Paddle-TRT支持离线将预训练好的Float32模型转换成Int8的模型,具体的流程如下: + + 1) **生成校准表**(Calibration table):我们准备500张左右的真实输入数据,并将数据输入到模型中去,Paddle-TRT会统计模型中每个op输入和输出值的范围信息,并将其记录到校准表中,这些信息有效减少了模型转换时的信息损失。 + + 2) 生成校准表后,再次运行模型,**Paddle-TRT会自动加载校准表**,并进行INT8模式下的预测。 + +2. 编译测试INT8样例 + 将`run.sh`文件中的`mobilenet_test`改为`fluid_generate_calib_test`,运行 + + ``` shell + sh run.sh + ``` + + 即可执行生成校准表样例,在该样例中,我们随机生成了500个输入来模拟这一过程,在实际业务中,建议大家使用真实样例。运行结束后,在 `SAMPLE_BASE_DIR/sample/paddle-TRT/build/mobilenetv1/_opt_cache` 模型目录下会多出一个名字为trt_calib_*的文件,即校准表。 + + 生成校准表后,将带校准表的模型文件拷贝到特定地址 + + ``` shell + cp -rf SAMPLE_BASE_DIR/sample/paddle-TRT/build/mobilenetv1/ SAMPLE_BASE_DIR/sample/paddle-TRT/mobilenetv1_calib + ``` + + 将`run.sh`文件中的`fluid_generate_calib_test`改为`fluid_int8_test`,将模型路径改为`SAMPLE_BASE_DIR/sample/paddle-TRT/mobilenetv1_calib`,运行 + + ``` shell + sh run.sh + ``` + + 即可执行int8预测样例。 + +## Paddle-TRT子图运行原理 + + PaddlePaddle采用子图的形式对TensorRT进行集成,当模型加载后,神经网络可以表示为由变量和运算节点组成的计算图。Paddle TensorRT实现的功能是对整个图进行扫描,发现图中可以使用TensorRT优化的子图,并使用TensorRT节点替换它们。在模型的推断期间,如果遇到TensorRT节点,Paddle会调用TensorRT库对该节点进行优化,其他的节点调用Paddle的原生实现。TensorRT在推断期间能够进行Op的横向和纵向融合,过滤掉冗余的Op,并对特定平台下的特定的Op选择合适的kernel等进行优化,能够加快模型的预测速度。 + +下图使用一个简单的模型展示了这个过程: + +**原始网络** +

+ +

+ +**转换的网络** +

+ +

+ + + 我们可以在原始模型网络中看到,绿色节点表示可以被TensorRT支持的节点,红色节点表示网络中的变量,黄色表示Paddle只能被Paddle原生实现执行的节点。那些在原始网络中的绿色节点被提取出来汇集成子图,并由一个TensorRT节点代替,成为转换后网络中的`block-25` 节点。在网络运行过程中,如果遇到该节点,Paddle将调用TensorRT库来对其执行。 + +## Paddle-TRT性能测试 + +### 测试环境 +- CPU:Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz GPU:Tesla P4 +- TensorRT4.0, CUDA8.0, CUDNNV7 +- 测试模型 ResNet50,MobileNet,ResNet101, Inception V3. + +### 测试对象 +**PaddlePaddle, Pytorch, Tensorflow** + +- 在测试中,PaddlePaddle使用子图优化的方式集成了TensorRT, 模型[地址](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification/models)。 +- Pytorch使用了原生的实现, 模型[地址1](https://github.com/pytorch/vision/tree/master/torchvision/models)、[地址2](https://github.com/marvis/pytorch-mobilenet)。 +- 对TensorFlow测试包括了对TF的原生的测试,和对TF—TRT的测试,**对TF—TRT的测试并没有达到预期的效果,后期会对其进行补充**, 模型[地址](https://github.com/tensorflow/models)。 + + +#### ResNet50 + +|batch_size|PaddlePaddle(ms)|Pytorch(ms)|TensorFlow(ms)| +|---|---|---|---| +|1|4.64117 |16.3|10.878| +|5|6.90622| 22.9 |20.62| +|10|7.9758 |40.6|34.36| + +#### MobileNet + +|batch_size|PaddlePaddle(ms)|Pytorch(ms)|TensorFlow(ms)| +|---|---|---|---| +|1| 1.7541 | 7.8 |2.72| +|5| 3.04666 | 7.8 |3.19| +|10|4.19478 | 14.47 |4.25| + +#### ResNet101 + +|batch_size|PaddlePaddle(ms)|Pytorch(ms)|TensorFlow(ms)| +|---|---|---|---| +|1|8.95767| 22.48 |18.78| +|5|12.9811 | 33.88 |34.84| +|10|14.1463| 61.97 |57.94| + + +#### Inception v3 + +|batch_size|PaddlePaddle(ms)|Pytorch(ms)|TensorFlow(ms)| +|---|---|---|---| +|1|15.1613 | 24.2 |19.1| +|5|18.5373 | 34.8 |27.2| +|10|19.2781| 54.8 |36.7| diff --git a/doc/paddle/advanced_guide/performance_improving/inference_improving/paddle_tensorrt_infer_en.md b/doc/paddle/advanced_guide/performance_improving/inference_improving/paddle_tensorrt_infer_en.md new file mode 100644 index 0000000000000000000000000000000000000000..cc002e8b5bd835536bd3bd4702c00e6aef5dfe74 --- /dev/null +++ b/doc/paddle/advanced_guide/performance_improving/inference_improving/paddle_tensorrt_infer_en.md @@ -0,0 +1,200 @@ +# Use Paddle-TensorRT Library for inference + +NVIDIA TensorRT is a is a platform for high-performance deep learning inference. It delivers low latency and high throughput for deep learning inference application. +Subgraph is used in PaddlePaddle to preliminarily integrate TensorRT, which enables TensorRT module to enhance inference performance of paddle models. The module is still under development. Currently supported models are as following: + +|classification|detection|segmentation| +|---|---|---| +|mobilenetv1|yolov3|ICNET| +|resnet50|SSD|| +|vgg16|mask-rcnn|| +|resnext|faster-rcnn|| +|AlexNet|cascade-rcnn|| +|Se-ResNext|retinanet|| +|GoogLeNet|mobilenet-SSD|| +|DPN||| + +We will introduce the obtaining, usage and theory of Paddle-TensorRT library in this documentation. + +**Note:** + +1. When compiling from source, TensorRT library currently only supports GPU compilation, and you need to set the compilation option TensorRT_ROOT to the path where tensorrt is located. +2. Windows support requires TensorRT version 5.0 or higher. +3. Paddle-TRT currently only supports fixed input shape. +4. After downloading and installing tensorrt, you need to manually add virtual destructors for `class IPluginFactory` and `class IGpuAllocator` in the `NvInfer.h` file: + ``` c++ + virtual ~IPluginFactory() {}; + virtual ~IGpuAllocator() {}; + ``` + +## Paddle-TRT interface usage + +When using AnalysisPredictor, we enable Paddle-TRT by setting + +``` c++ +config->EnableTensorRtEngine(1 << 20 /* workspace_size*/, + batch_size /* max_batch_size*/, + 3 /* min_subgraph_size*/, + AnalysisConfig::Precision::kFloat32 /* precision*/, + false /* use_static*/, + false /* use_calib_mode*/); +``` +The details of this interface is as following: + +- **`workspace_size`**: type:int, default is 1 << 20. Sets the max workspace size of TRT. TensorRT will choose kernels under this constraint. +- **`max_batch_size`**: type:int, default is 1. Sets the max batch size. Batch sizes during runtime cannot exceed this value. +- **`min_subgraph_size`**: type:int, default is 3. Subgraph is used to integrate TensorRT in PaddlePaddle. To avoid low performance, Paddle-TRT is only enabled when th number of nodes in th subgraph is more than `min_subgraph_size`. +- **`precision`**: type:`enum class Precision {kFloat32 = 0, kHalf, kInt8,};`, default is `AnalysisConfig::Precision::kFloat32`. Sets the precision of TRT, supporting FP32(kFloat32), FP16(kHalf), Int8(kInt8). Using Paddle-TRT int8 calibration requires setting `precision` to `AnalysisConfig::Precision::kInt8`, and `use_calib_mode` to true. +- **`use_static`**: type:bool, default is false. If set to true, Paddle-TRT will serialize optimization information to disk, to deserialize next time without optimizing again. +- **`use_calib_mode`**: type:bool, default is false. Using Paddle-TRT int8 calibration requires setting this option to true. + +**Note:** Paddle-TRT currently only supports fixed input shape. + +## Paddle-TRT example compiling test + +1. Download or compile Paddle Inference with TensorRT support, refer to [Install and Compile C++ Inference Library](../../inference_deployment/inference/build_and_install_lib_en.html). +2. Download NVIDIA TensorRT(with consistent version of cuda and cudnn in local environment) from [NVIDIA TensorRT](https://developer.nvidia.com/nvidia-tensorrt-download) with an NVIDIA developer account. +3. Download [Paddle Inference sample](https://paddle-inference-dist.bj.bcebos.com/tensorrt_test/paddle_inference_sample_v1.7.tar.gz) and uncompress, and enter `sample/paddle-TRT` directory. + + `paddle-TRT` directory structure is as following: + + ``` + paddle-TRT + ├── CMakeLists.txt + ├── mobilenet_test.cc + ├── fluid_generate_calib_test.cc + ├── fluid_int8_test.cc + ├── mobilenetv1 + │ ├── model + │ └── params + ├── run.sh + └── run_impl.sh + ``` + + - `mobilenet_test.cc` is the c++ source code of inference using Paddle-TRT + - `fluid_generate_calib_test.cc` is the c++ source code of inference using Paddle-TRT int8 calibration to generate calibration table + - `fluid_int8_test.cc` is the c++ source code of inference using Paddle-TRT int8 + - `mobilenetv1` is the model dir + - `run.sh` is the script for running inference + + Here we assume that the current directory is `SAMPLE_BASE_DIR/sample/paddle-TRT`. + + ``` shell + # set whether to enable MKL, GPU or TensorRT. Enabling TensorRT requires WITH_GPU being ON + WITH_MKL=ON + WITH_GPU=OFF + USE_TENSORRT=OFF + + # set path to CUDA lib dir, CUDNN lib dir, TensorRT root dir and model dir + LIB_DIR=YOUR_LIB_DIR + CUDA_LIB_DIR=YOUR_CUDA_LIB_DIR + CUDNN_LIB_DIR=YOUR_CUDNN_LIB_DIR + TENSORRT_ROOT_DIR=YOUR_TENSORRT_ROOT_DIR + MODEL_DIR=YOUR_MODEL_DIR + ``` + + Please configure `run.sh` depending on your environment. + +4. Build and run the sample. + + ``` shell + sh run.sh + ``` + +## Paddle-TRT INT8 usage + +1. Paddle-TRT INT8 introduction + The parameters of the neural network are redundant to some extent. In many tasks, we can turn the Float32 model into Int8 model on the premise of precision. At present, Paddle-TRT supports to turn the trained Float32 model into Int8 model off line. The specific processes are as follows: + + 1)**Create the calibration table**. We prepare about 500 real input data, and input the data to the model. Paddle-TRT will count the range information of each op input and output value in the model, and record in the calibration table. The information can reduce the information loss during model transformation. + + 2)After creating the calibration table, run the model again, **Paddle-TRT will load the calibration table automatically**, and conduct the inference in the INT8 mode. + +2. compile and test the INT8 example + + change the `mobilenet_test` in `run.sh` to `fluid_generate_calib_test` and run + + ``` shell + sh run.sh + ``` + + We generate 500 input data to simulate the process, and it's suggested that you use real example for experiment. After the running period, there will be a new file named trt_calib_* under the `SAMPLE_BASE_DIR/sample/paddle-TRT/build/mobilenetv1/_opt_cache` model directory, which is the calibration table. + + Then copy the model dir with calibration infomation to path + + ``` shell + cp -rf SAMPLE_BASE_DIR/sample/paddle-TRT/build/mobilenetv1/ SAMPLE_BASE_DIR/sample/paddle-TRT/mobilenetv1_calib + ``` + + change `fluid_generate_calib_test` in `run.sh` to `fluid_int8_test`, and change model dir path to `SAMPLE_BASE_DIR/sample/paddle-TRT/mobilenetv1_calib` and run + + ``` shell + sh run.sh + ``` + +## Paddle-TRT subgraph operation principle + + Subgraph is used to integrate TensorRT in PaddlePaddle. After model is loaded, neural network can be represented as a computing graph composed of variables and computing nodes. Functions Paddle TensorRT implements are to scan the whole picture, discover subgraphs that can be optimized with TensorRT and replace them with TensorRT nodes. During the inference of model, Paddle will call TensorRT library to optimize TensorRT nodes and call native library of Paddle to optimize other nodes. During the inference, TensorRT can integrate Op horizonally and vertically to filter redundant Ops and is able to choose appropriate kernel for specific Op in specific platform to speed up the inference of model. + + +A simple model expresses the process : + +**Original Network** +

+ +

+ +**Transformed Network** +

+ +

+ + We can see in the Original Network that the green nodes represent nodes supported by TensorRT, the red nodes represent variables in network and yellow nodes represent nodes which can only be operated by native functions in Paddle. Green nodes in original network are extracted to compose subgraph which is replaced by a single TensorRT node to be transformed into `block-25` node in network. When such nodes are encountered during the runtime, TensorRT library will be called to execute them. + +## Paddle-TRT benchmark + +### Test Environment +- CPU:Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz GPU:Tesla P4 +- TensorRT 4.0, CUDA 8.0, CUDNN V7 +- models: ResNet50,MobileNet,ResNet101, Inception V3. + +### Test set +**PaddlePaddle, Pytorch, Tensorflow** + +- PaddlePaddle integrates TensorRT with subgraph, model[link](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification/models)。 +- Pytorch uses original kernels, model[link1](https://github.com/pytorch/vision/tree/master/torchvision/models), [link2](https://github.com/marvis/pytorch-mobilenet)。 +- We tested TF original and TF-TRT**对TF—TRT的测试并没有达到预期的效果,后期会对其进行补充**, model[link](https://github.com/tensorflow/models)。 + + +#### ResNet50 + +|batch_size|PaddlePaddle(ms)|Pytorch(ms)|TensorFlow(ms)| +|---|---|---|---| +|1|4.64117 |16.3|10.878| +|5|6.90622| 22.9 |20.62| +|10|7.9758 |40.6|34.36| + +#### MobileNet + +|batch_size|PaddlePaddle(ms)|Pytorch(ms)|TensorFlow(ms)| +|---|---|---|---| +|1| 1.7541 | 7.8 |2.72| +|5| 3.04666 | 7.8 |3.19| +|10|4.19478 | 14.47 |4.25| + +#### ResNet101 + +|batch_size|PaddlePaddle(ms)|Pytorch(ms)|TensorFlow(ms)| +|---|---|---|---| +|1|8.95767| 22.48 |18.78| +|5|12.9811 | 33.88 |34.84| +|10|14.1463| 61.97 |57.94| + + +#### Inception v3 + +|batch_size|PaddlePaddle(ms)|Pytorch(ms)|TensorFlow(ms)| +|---|---|---|---| +|1|15.1613 | 24.2 |19.1| +|5|18.5373 | 34.8 |27.2| +|10|19.2781| 54.8 |36.7| diff --git a/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice.rst b/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice.rst new file mode 100644 index 0000000000000000000000000000000000000000..2386890a13fe133c45a6495cc274827389f4a875 --- /dev/null +++ b/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice.rst @@ -0,0 +1,161 @@ +.. _api_guide_cpu_training_best_practice: + +#################### +分布式CPU训练优秀实践 +#################### + +提高CPU分布式训练的训练速度,主要要从四个方面来考虑: +1)提高训练速度,主要是提高CPU的使用率;2)提高通信速度,主要是减少通信传输的数据量;3)提高数据IO速度;4)更换分布式训练策略,提高分布式训练速度。 + +提高CPU的使用率 +============= + +提高CPU使用率主要依赖 :code:`ParallelExecutor`,可以充分利用多个CPU的计算能力来加速计算。 + +API详细使用方法参考 :ref:`cn_api_fluid_ParallelExecutor` ,简单实例用法: + +.. code-block:: python + + # 配置执行策略,主要是设置线程数 + exec_strategy = fluid.ExecutionStrategy() + exec_strategy.num_threads = 8 + + # 配置构图策略,对于CPU训练而言,应该使用Reduce模式进行训练 + build_strategy = fluid.BuildStrategy() + if int(os.getenv("CPU_NUM")) > 1: + build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce + + pe = fluid.ParallelExecutor( + use_cuda=False, + loss_name=avg_cost.name, + main_program=main_program, + build_strategy=build_strategy, + exec_strategy=exec_strategy) + +以上参数中: + +- :code:`num_threads` : 模型训练使用的线程数,最好和训练所在机器的物理CPU核数接近 +- :code:`reduce_strategy` : 对于CPU训练而言,应该选择 fluid.BuildStrategy.ReduceStrategy.Reduce + + +通用环境变量配置: + +- :code:`CPU_NUM` :模型副本replica的个数,最好和num_threads一致 + + +提高通信速度 +========== + +要减少通信数据量,提高通信速度,主要是使用稀疏更新 ,目前支持 :ref:`api_guide_sparse_update` 的主要是 :ref:`cn_api_fluid_layers_embedding` 。 + +.. code-block:: python + + data = fluid.layers.data(name='ids', shape=[1], dtype='int64') + fc = fluid.layers.embedding(input=data, size=[dict_size, 16], is_sparse=True) + +以上参数中: + +- :code:`is_sparse` : 配置embedding使用稀疏更新,如果embedding的dict_size很大,而每次数据data很少,建议使用sparse更新方式。 + + +提高数据IO速度 +========== + +要提高CPU分布式的数据IO速度,可以首先考虑使用dataset API进行数据读取。 dataset是一种多生产者多消费者模式的数据读取方法,默认情况下耦合数据读取线程与训练线程,在多线程的训练中,dataset表现出极高的性能优势。 + +API接口介绍可以参考:https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dataset_cn/QueueDataset_cn.html + +结合实际的网络,比如CTR-DNN模型,引入的方法可以参考:https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleRec/ctr/dnn + +最后使用 :code:`train_from_dataset` 接口来进行网络的训练: + +.. code-block:: python + + dataset = fluid.DatasetFactory().create_dataset() + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + exe.train_from_dataset(program=fluid.default_main_program(),dataset=dataset) + + +更换分布式训练策略 +========== + +CPU分布式训练速度进一步提高的核心在于选择合适的分布式训练策略,比如定义通信策略、编译策略、执行策略等等。paddlepaddle于v1.7版本发布了 :code:`DistributedStrategy` 功能,可以十分灵活且方便的指定分布式运行策略。 + +首先需要在代码中引入相关库: + +.. code-block:: python + + from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet + import paddle.fluid.incubate.fleet.base.role_maker as role_maker + from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy_factory import DistributedStrategyFactory + +然后指定CPU分布式运行的训练策略,目前可选配置有四种:同步训练(Sync)、异步训练(Async)、半异步训练(Half-Async)以及GEO训练。不同策略的细节,可以查看设计文档: +https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler/transpiler_cpu.md + +通过如下代码引入上述策略的默认配置,并进行CPU分布式训练: + +.. code-block:: python + + # step1: 引入CPU分布式训练策略 + # 同步训练策略 + strategy = DistributedStrategyFactory.create_sync_strategy() + # 半异步训练策略 + strategy = DistributedStrategyFactory.create_half_async_strategy() + # 异步训练策略 + strategy = DistributedStrategyFactory.create_async_strategy() + # GEO训练策略 + strategy = DistributedStrategyFactory.create_geo_strategy(update_frequency=400) + + # step2: 定义节点角色 + role = role_maker.PaddleCloudRoleMaker() + fleet.init(role) + + # step3: 分布式训练program构建 + optimizer = fluid.optimizer.SGD(learning_rate) # 以SGD优化器为例 + optimizer = fleet.distributed_optimizer(optimizer, strategy) + optimizer.minimize(loss) + + # step4.1: 启动参数服务器节点(Server) + if fleet.is_server(): + fleet.init_server() + fleet.run_server() + + # step4.2: 启动训练节点(Trainer) + elif fleet.is_worker(): + fleet.init_worker() + exe.run(fleet.startup_program) + # Do training + exe.run(fleet.main_program) + fleet.stop_worker() + + +paddlepaddle支持对训练策略中的细节进行调整: + +- 创建compiled_program所需的build_strategy及exec_strategy可以直接基于strategy获得 + +.. code-block:: python + + compiled_program = fluid.compiler.CompiledProgram(fleet.main_program).with_data_parallel( + loss_name=loss.name, + build_strategy=strategy.get_build_strategy(), + exec_strategy=strategy.get_execute_strategy()) + + +- 自定义训练策略细节,支持对DistributeTranspilerConfig、TrainerRuntimeConfig、ServerRuntimeConfig、fluid.ExecutionStrategy、fluid.BuildStrategy进行自定义配置。以DistributeTranspilerConfig为例,修改方式如下所示: + +.. code-block:: python + + strategy = DistributedStrategyFactory.create_sync_strategy() + + # 方式一(推荐): + config = strategy.get_program_config() + config.min_block_size = 81920 + + + # 方式二:调用set_program_config修改组网相关配置,支持DistributeTranspilerConfig和dict两种数据类型 + config = DistributeTranspilerConfig() + config.min_block_size = 81920 + # config = dict() + # config['min_block_size'] = 81920 + strategy.set_program_config(config) \ No newline at end of file diff --git a/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice_en.rst b/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..1ff88252456cd3f00bc268e22159efad9f1529b1 --- /dev/null +++ b/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice_en.rst @@ -0,0 +1,164 @@ +.. _api_guide_cpu_training_best_practice_en: + +###################################################### +Best practices of distributed training on CPU +###################################################### + +To improve the training speed of CPU distributed training, we must consider two aspects: + +1. Improve the training speed mainly by improving utilization rate of CPU; +2. Improve the communication speed mainly by reducing the amount of data transmitted in the communication; +3. Improve the data IO speed by dataset API; +4. Improve the distributed training speed by changing distributed training strategy. + +Improve CPU utilization +============================= + +The CPU utilization mainly depends on :code:`ParallelExecutor`, which can make full use of the computing power of multiple CPUs to speed up the calculation. + +For detailed API usage, please refer to :ref:`api_fluid_ParallelExecutor` . A simple example: + +.. code-block:: python + + # Configure the execution strategy, mainly to set the number of threads + exec_strategy = fluid.ExecutionStrategy() + exec_strategy.num_threads = 8 + + # Configure the composition strategy, for CPU training, you should use the Reduce mode for training. + build_strategy = fluid.BuildStrategy() + if int(os.getenv("CPU_NUM")) > 1: + build_strategy.reduce_strategy=fluid.BuildStrategy.ReduceStrategy.Reduce + + pe = fluid.ParallelExecutor( + use_cuda=False, + loss_name=avg_cost.name, + main_program=main_program, + build_strategy=build_strategy, + exec_strategy=exec_strategy) + +Among the parameters above: + +- :code:`num_threads` : the number of threads used by the model training. It is preferably close to the number of the physical CPU cores of the machine where the training is performed. +- :code:`reduce_strategy` : For CPU training, you should choose fluid.BuildStrategy.ReduceStrategy.Reduce + + +Configuration of general environment variables: + +- :code:`CPU_NUM`: The number of replicas of the model, preferably the same as num_threads + + +Improve communication speed +============================== + +To reduce the amount of communication data and improve communication speed is achieved mainly by using sparse updates, the current support for `sparse update <../layers/sparse_update_en.html>`_ is mainly :ref:`api_fluid_layers_embedding`. + +.. code-block:: python + + data = fluid.layers.data(name='ids', shape=[1], dtype='int64') + fc = fluid.layers.embedding(input=data, size=[dict_size, 16], is_sparse=True) + +Among the parameters above: + +- :code:`is_sparse`: Use sparse updates to configure embedding. If the dict_size of embedding is large but the number of data are very small each time, it is recommended to use the sparse update method. + + +Improve data IO speed +============================== + +To improve the CPU's distributed training speed, you can first consider using the dataset API as data reader. Dataset is a multi producer and multi consumer data reading method. By default, data reading thread and training thread are coupled. In multi-threaded training, dataset shows a high performance advantage. + +Refer to this page for API introduction: https://www.paddlepaddle.org.cn/documentation/docs/en/api/dataset/QueueDataset.html + +Combined with the actual model CTR-DNN, you can learn more about how to use dataset: https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleRec/ctr/dnn + +Using :code:`train_from_dataset` for network training. + +.. code-block:: python + + dataset = fluid.DatasetFactory().create_dataset() + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + exe.train_from_dataset(program=fluid.default_main_program(),dataset=dataset) + + +Change distributed training strategy +============================== + +The core of improving CPU distributed training speed is to choose appropriate distributed training strategy, such as defining communication strategy, compiling strategy, executing strategy and so on. PaddlePaddle released :code:`DistributedStrategy` API in V1.7 version , which can be very flexible and convenient to specify distributed operation strategy. + +First, we need to introduce relevant libraries into the code: + +.. code-block:: python + + from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet + import paddle.fluid.incubate.fleet.base.role_maker as role_maker + from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy_factory import DistributedStrategyFactory + +At present, there are four kinds of training strategies: synchronous training, asynchronous, half asynchronous training and GEO training. For details of different strategies, you can view the design documents: +https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler/transpiler_cpu.md + +The default configuration of the above policy is introduced by the following code: + +.. code-block:: python + + # step1: get distributed strategy + # Sync + strategy = DistributedStrategyFactory.create_sync_strategy() + # Half-Async + strategy = DistributedStrategyFactory.create_half_async_strategy() + # Async + strategy = DistributedStrategyFactory.create_async_strategy() + # GEO + strategy = DistributedStrategyFactory.create_geo_strategy(update_frequency=400) + + # step2: define role of node + role = role_maker.PaddleCloudRoleMaker() + fleet.init(role) + + # step3: get distributed training program + optimizer = fluid.optimizer.SGD(learning_rate) # 以SGD优化器为例 + optimizer = fleet.distributed_optimizer(optimizer, strategy) + optimizer.minimize(loss) + + # step4.1: run parameter server node + if fleet.is_server(): + fleet.init_server() + fleet.run_server() + + # step4.2: run worker node + elif fleet.is_worker(): + fleet.init_worker() + exe.run(fleet.startup_program) + # Do training + exe.run(fleet.main_program) + fleet.stop_worker() + +PaddlePaddle supports adjusting the details of the training strategy: + +- The build_strategy and exec_strategy which used to create compiled_program can generate from strategy: + +.. code-block:: python + + compiled_program = fluid.compiler.CompiledProgram(fleet.main_program).with_data_parallel( + loss_name=loss.name, + build_strategy=strategy.get_build_strategy(), + exec_strategy=strategy.get_execute_strategy()) + + +- Training strategy details can be customized, Paddlepaddle supports customized configuration of distributetranspierconfig, trainerruntimeconfig, serverruntimeconfig, fluid.executionstrategy and fluid.buildstrategy. Take distributetranspillerconfig as an example. The modification method is as follows: + +.. code-block:: python + + strategy = DistributedStrategyFactory.create_sync_strategy() + + # Mode 1 (recommended): + config = strategy.get_program_config() + config.min_block_size = 81920 + + + # Mode 2 + config = DistributeTranspilerConfig() + config.min_block_size = 81920 + # config = dict() + # config['min_block_size'] = 81920 + strategy.set_program_config(config) \ No newline at end of file diff --git a/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/dist_training_gpu.rst b/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/dist_training_gpu.rst new file mode 100644 index 0000000000000000000000000000000000000000..ebe02dc6f577f47f323da7d0a967d952f0e5124e --- /dev/null +++ b/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/dist_training_gpu.rst @@ -0,0 +1,133 @@ +.. _best_practice_dist_training_gpu: + +##################### +分布式GPU训练优秀实践 +##################### + +开始优化您的GPU分布式训练任务 +--------------------------- + +PaddlePaddle Fluid支持在现代GPU [#]_ 服务器集群上完成高性能分布式训练。通常可以通过以下方法优化在多机多卡环境训练性能,建议在进行性能优化时,检查每项优化点并验证对应提升,从而提升最终的性能。 + +一个简单的验证当前的训练程序是否需要进一步优化性能的方法,是查看GPU的计算利用率 [#]_ ,通常用 :code:`nvidia-smi` 命令查看。如果GPU利用率较低,则可能存在较大的优化空间。下面主要从数据准备、训练策略设置和训练方式三个方面介绍GPU分布式训练中常用的优化方法。 + +1、数据准备 +=========== + +数据读取的优化在GPU训练中至关重要,尤其在不断增加batch_size提升吞吐时,计算对reader性能会有更高对要求,优化reader性能需要考虑的点包括: + + - 使用 :code:`DataLoader` 。参考 `这里 `_ 使用DataLoader,并建议开启 :code:`use_double_buffer` 。 + - reader返回uint8类型数据。图片在解码后一般会以uint8类型存储,如果在reader中转换成float类型数据,会将数据体积扩大4倍。直接返回uint8数据,然后在GPU上转化成float类型进行训练可以提升数据读取效率。 + - 减少reader初始化时间 (infinite read)。在训练任务开始执行第一轮训练时,reader开始不断异步地从磁盘或其他存储中读取数据并执行预处理,然后将处理好的数据填充到队列中供计算使用。从0开始填充这个队列直到数据可以源源不断供给计算,需要一定时间的预热。所以,如果每轮训练都重新填充队列,会产生一些时间的开销。所以,在使用DataLoader时,可以让reader函数不断地产生数据,直到训练循环结束: + + .. code-block:: python + :linenos: + + def infinite_reader(file_path): + while True: + with open(file_path) as fn: + for line in fn: + yield process(line) + + def train(): + ... + for pass_id in xrange(NUM_PASSES): + if pass_id == 0: + data_loader.start() + for batch_id in (iters_per_pass): + exe.run() + data_loader.reset() + + +另外,可以使用DALI库提升数据处理性能。DALI是NVIDIA开发的数据加载库,更多内容请参考 `官网文档 `_ 。飞桨中如何结合使用DALI库请参考 `使用示例 `_ 。 + +2、训练策略设置 +=========== + +训练参数设置表 + +.. csv-table:: + :header: "选项", "类型", "默认值", "说明" + :widths: 3, 3, 3, 5 + + ":code:`num_threads`", "int", "1", "CPU线程数" + ":code:`nccl_comm_num`", "int", "1", "nccl通信器数量" + ":code:`fuse_all_reduce_ops`", "bool", "False", "多卡训练时,将AllReduce操纵进行融合" + ":code:`use_hierarchical_allreduce` ", "bool", "False", "分级式reduce" + ":code:`num_iteration_per_drop_scope`", "int", "1", "scope drop频率,设置每隔几个batch的迭代之后执行一次清理scope" + ":code:`fetch_frequency`", "int", "1", "fetch的刷新频率" + ":code:`fuse_bn_act_ops`", "bool", "False", "是否开启batch normalization和激活函数的融合" + ":code:`fuse_elewise_add_act_ops`", "bool", "False", "是否开启elementwise add函数和激活函数的融合" + +说明: + +- 关于设置合适的CPU线程数 :code:`num_threads` 和nccl通信器数量 :code:`nccl_comm_num` 。PaddlePaddle Fluid使用“线程池” [#]_ 模型调度并执行Op,Op在启动GPU计算之前,通常需要CPU的协助,然而如果Op本身占用时间很小,“线程池”模型下又会带来额外的调度开销。使用多进程模式时,如果神经网络的计算图 [#]_ 节点间有较高的并发度,即使每个进程只在一个GPU上运行,使用多个线程可以更大限度的提升GPU利用率。nccl通信器数量 :code:`nccl_comm_num` 可以加快GPU之间的通信效率,建议单机设置为1,多机设置为2。针对CPU线程数 :code:`num_threads` ,建议单机设置为1,多机设置为 :code:`nccl_comm_num` +1。 +- 关于AllReduce融合 :code:`fuse_all_reduce_ops` ,默认情况下会将同一layer中参数的梯度的AllReduce操作合并成一个,比如对于 :code:`fluid.layers.fc` 中有Weight和Bias两个参数,打开该选项之后,原本需要两次AllReduce操作,现在只用一次AllReduce 操作。此外,为支持更大粒度的参数梯度融合,Paddle提供了 :code:`FLAGS_fuse_parameter_memory_size` 和 :code:`FLAGS_fuse_parameter_groups_size` 两个环境变量选项。用户可以指定融合AllReduce操作之后,每个AllReduce操作的梯度字节数,比如希望每次AllReduce调用传输16MB的梯度,:code:`export FLAGS_fuse_parameter_memory_size=16` ,经验值为总通信量的十分之一。可以指定每次AllReduce操作的最大层数,即到达该层数就进行AllReduce,如指定50层 :code:`export FLAGS_fuse_parameter_groups_size=50` 。注意:目前不支持sparse参数梯度。 +- 关于使用分级式reduce :code:`use_hierarchical_allreduce` 。对于多机模式,针对小数据量的通信,Ring AllReduce通信效率低,采用Hierarchical AllReduce可以解决该问题。 +- 关于降低scope drop频率 :code:`num_iteration_per_drop_scope` 和fetch频率 :code:`fetch_frequency` 。减少scope drop和fetch频率,可以减少频繁的变量内存申请、释放和拷贝,从而提升性能。 +- 关于操作融合:通过参数融合可以提升训练性能。 + +设置这些参数可以参考: + +.. code-block:: python + :linenos: + + dist_strategy = DistributedStrategy() + dist_strategy.nccl_comm_num = 2 #建议多机设置为2,单机设置为1 + exec_strategy = fluid.ExecutionStrategy() + exe_st.num_threads = 3 #建议多机设置为nccl_comm_num+1,单机设置为1 + exec_strategy.num_iteration_per_drop_scope = 30 #scope drop频率 + dist_strategy.exec_strategy = exec_strategy + dist_strategy.fuse_all_reduce_ops = True #AllReduce是否融合 + ... + with fluid.program_guard(main_prog, startup_prog): #组网 + params = model.params + optimizer = optimizer_setting(params) + dist_optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy) + dist_optimizer.minimize(avg_cost) + ... + for pass_id in range(PASS_NUM): + batch_id = 0 + while True: + if batch_id % fetch_frequency == 0: #fetch频率 + fetched = exe.run(main_prog, fetch_list) + else: + exe.run([]) + + +3、训练方式 +=========== + +1、Local SGD + +GPU多机多卡同步训练过程中存在慢trainer现象,即每步中训练快的trainer的同步通信需要等待训练慢的trainer。由于每步中慢trainer的rank具有随机性,因此我们使用局部异步训练的方式——LocalSGD,通过多步异步训练(无通信阻塞)实现慢trainer时间均摊,从而提升同步训练性能。Local SGD训练方式主要有三个参数,分别是: + +.. csv-table:: + :header: "选项", "类型", "可选值", "说明" + :widths: 3, 3, 3, 5 + + ":code:`use_local_sgd`", "bool", "False/True", "是否开启Local SGD,默认不开启" + ":code:`local_sgd_is_warm_steps`", "int", "大于0", "训练多少轮之后才使用Local SGD方式训练" + ":code:`local_sgd_steps`", "int", "大于0", "Local SGD的步长" + +说明: + +- Local SGD的warmup步长 :code:`local_sgd_is_warm_steps` 影响最终模型的泛化能力,一般需要等到模型参数稳定之后在进行Local SGD训练,经验值可以将学习率第一次下降时的epoch作为warmup步长,之后再进行Local SGD训练。 +- Local SGD步长 :code:`local_sgd_steps` ,一般该值越大,通信次数越少,训练速度越快,但随之而来的时模型精度下降。经验值设置为2或者4。 + +具体的Local SGD的训练代码可以参考:https://github.com/PaddlePaddle/Fleet/tree/develop/examples/local_sgd/resnet + + +2、使用混合精度训练 + +V100 GPU提供了 `Tensor Core `_ 可以在混合精度计算场景极大的提升性能。使用混合精度计算的例子可以参考:https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification#using-mixed-precision-training + +目前Paddle只提供在两个模型(ResNet, BERT)的混合精度计算实现并支持static loss scaling,其他模型使用混合精度也可以参考以上的实现完成验证。 + +附录 +---- + +.. [#] 现代GPU:指至少支持运行 `CUDA `_ 版本7.5以上的GPU +.. [#] GPU利用率:这里指GPU计算能力被使用部分所占的百分比 +.. [#] https://en.wikipedia.org/wiki/Thread_pool +.. [#] https://en.wikipedia.org/wiki/Data-flow_diagram diff --git a/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_low_bandwidth_dgc.md b/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_low_bandwidth_dgc.md new file mode 100644 index 0000000000000000000000000000000000000000..33fef6f7ac5855639948476f844cc489b0dd7de0 --- /dev/null +++ b/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_low_bandwidth_dgc.md @@ -0,0 +1,123 @@ +# 低配网络的分布式GPU训练 + +## 1. 背景 + 大规模分布式训练需要较高的网络带宽以便进行梯度的聚合更新,这限制了多节点训练时的可扩展性同时也需要昂贵的高带宽设备。在低带宽云网络等环境下进行分布式训练会变得更加糟糕。现有[Deep Gradient Compression](https://arxiv.org/abs/1712.01887)研究表明,分布式SGD中有99.9%的梯度交换都是冗余的,可以使用深度梯度压缩选择重要梯度进行通信来减少通信量,降低对通信带宽的依赖。Paddle目前实现了DGC的稀疏通信方式,可有效在低配网络下进行GPU分布式训练。下面将介绍DGC稀疏通信方式的使用方法、适用场景及基本原理。 + +## 2. 使用方法 +`注意:使用DGC请使用1.6.2及其之后版本,之前版本存在有若干bug。` +DGC稀疏通信算法以DGCMomentumOptimizer接口的形式提供,目前只支持GPU多卡及GPU多机分布式,由于现有fuse策略会造成DGC失效,所以使用DGC时需设置`strategy.fuse_all_reduce_ops=False`关闭fuse。DGC只支持Momentum优化器,使用时把当前代码中的Momentum优化器替换为DGCMomentumOptimizer,并添加DGC所需参数即可。如下代码所示,其中rampup_begin_step表示从第几步开始使用DGC,更详细参数可见[api文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/optimizer_cn/DGCMomentumOptimizer_cn.html#dgcmomentumoptimizer)。 +``` python +import paddle.fluid as fluid +# optimizer = fluid.optimizer.Momentum(learning_rate=0.001, momentum=0.9) +# 替换Momentum优化器,添加DGC所需参数 +optimizer = fluid.optimizer.DGCMomentumOptimizer( + learning_rate=0.001, momentum=0.9, rampup_begin_step=0) +optimizer.minimize(cost) +``` +在fleet中我们提供了[DGC的示例](https://github.com/PaddlePaddle/Fleet/tree/develop/examples/dgc_example)。示例中以数字手写体识别为例,将程序移植为分布式版本(注:DGC亦支持单机多卡),再加上DGC优化器。可参照此示例将单机单卡程序迁移到DGC。在单机单卡迁移到DGC过程中,一般需要先对齐多机Momentum的精度,再对齐DGC的精度。 + +## 3. 调参&适用场景 +### 3.1 预热调参 +对于正常的训练,使用DGC一般需进行预热训练,否则可能会有精度损失。如下图是ResNet50模型Imagenet数据集的训练结果,未进行预热训练的DGC最终损失了约0.3%的精度。 +
+![DGC Resnet50 acc1](images/dgc_resnet50_acc1.png) +
+ +预热训练调参可参照论文的设置。对图像分类,论文在Cifar10和ImageNet数据集上共164和90个epochs的训练中都采用了4个epochs的预热训练。在语言模型PTB数据集上,在共40个epochs的训练中选择了1个epoch进行预热训练。在语音识别AN4数据集上,80个epochs中选择1个epoch进行预热训练。 +论文中使用了75%, 93.75%, 98.4375%, 99.6%, 99.9%稀疏度逐渐提升的策略。由于paddle稀疏梯度聚合通信使用了AllGather,通信量会随卡数增加而增长,所以在卡数较多时不推荐较低稀疏度的预热训练。如75%稀疏度时每张卡会选择25%的梯度进行通信,卡数为32时通信量是正常dense通信的32\*(1-0.75)=8倍,所以前几个epoch使用正常的dense通信为佳。可参照如下写法 +``` python +# 1. 以1252个step为一个epoch,前2个epochs使用正常dense通信,后3个epochs逐步提升稀疏度为99.9% +optimizer = fluid.optimizer.DGCMomentumOptimizer( + learning_rate=0.001, momentum=0.9, rampup_begin_step=1252*2, + rampup_step=1252*3, sparsity=[0.984375, 0.996, 0.999]) +# 2. 前面4个epochs都使用dense通信,之后默认0.999稀疏度运行 +optimizer = fluid.optimizer.DGCMomentumOptimizer( + learning_rate=0.001, momentum=0.9, rampup_begin_step=1252*4) +``` +对于Fine-tuning训练,现测试可无需预热训练,从第0个epoch直接使用DGC即可。 +``` python +# 从第0步开始DGC稀疏通信 +optimizer = fluid.optimizer.DGCMomentumOptimizer( + learning_rate=0.001, momentum=0.9, rampup_begin_step=0) +``` +### 3.2 适用场景 +DGC稀疏通信在低带宽通信瓶颈时会有较大的性能提升,但在单机多卡及RDMA网络通信并非瓶颈情况下,并不会带来性能上的提升。同时由于AllGather的通信量会随卡数的增多而增大,所以DGC的多机训练规模也不宜过大。故DGC适用于低配网络,同时节点规模不宜过大,如>128张卡。在云网络或高带宽网络设备昂贵时,DGC可有效降低训练成本。 + +## 4. 原理 +本节原理部分基本来自[Deep Gradient Compression](https://arxiv.org/abs/1712.01887)论文,本文进行了部分理解翻译,英文较好者建议直接阅读论文。 +### 4.1 梯度稀疏 +DGC的基本思路是通过只传送重要梯度,即只发送大于给定阈值的梯度来减少通信带宽的使用。为避免信息的丢失,DGC会将剩余梯度在局部累加起来,最终这些梯度会累加大到足以传输。 +换个角度,从理论依据上来看,局部梯度累加等同于随时间推移增加batch size,(DGC相当于每一个梯度有自己的batch size)。设定 $F(w)$ 为需要优化的loss函数,则有着N个训练节点的同步分布式SGD更新公式如下 +$$ +F(w)=\\frac{1}{\|\\chi\|}\\sum\_{x\\in\\chi}f(x, w), \\qquad w\_{t+1}=w\_{t}-\\eta\\frac{1}{N b}\\sum\_{k=0}^{N}\\sum\_{x\\in\\mathcal{B}\_{k,t}}\\nabla f\\left(x, w\_{t}\\right) \\tag{1} +$$ +其中$\chi$是训练集,$w$是网络权值,$f(x, w)$是每个样本$x \in \chi$的loss,$\eta$是学习率,N是训练节点个数,$\mathcal{B}_{k, t}$代表第$k$个节点在第$t$个迭代时的minibatch,大小为b。 +考虑权重的第i个值,在T次迭代后,可获得 +$$ +w\_{t+T}^{(i)}=w\_{t}^{(i)}-\\eta T \\cdot \\frac{1}{N b T} \\sum\_{k=1}^{N}\\left(\\sum\_{\\tau=0}^{T-1} \\sum\_{x \\in \\mathcal{B}\_{k, t+\\tau}} \\nabla^{(i)} f\\left(x, w\_{t+\\tau}\\right)\\right) \\tag{2} +$$ +等式2表明局部梯度累加可以被认为batch size从$Nb$增大为$NbT$,其中T是$w^{(i)}$两次更新的稀疏通信间隔。 +### 4.2 局部梯度累加改进 +正常情况,稀疏更新会严重影响收敛性。DGC中采用动量修正(Momentum Correction)和局部梯度裁减(local gradient clipping)来解决这个问题。 +#### 4.2.1 动量修正 +有着N个节点分布式训练中vanilla momentum SGD公式, +$$ +u\_{t}=m u\_{t-1}+\\sum\_{k=1}^{N}\\left(\\nabla\_{k, t}\\right), \\quad w\_{t+1}=w\_{t}-\\eta u\_{t} \\tag{3} +$$ +其中$m$是动量因子,$N$是节点数,$\nabla_{k, t}=\frac{1}{N b} \sum_{x \in \mathcal{B}_{k, t}} \nabla f\left(x, w_{t}\right)$。 +考虑第i个权重$w^{(i)}$,在T次迭代后,权重更新公式如下, +$$ +w\_{t+T}^{(i)}=w\_{t}^{(i)}-\\eta\\left[\\cdots+\\left(\\sum\_{\\tau=0}^{T-2} m^{\\tau}\\right) \\nabla\_{k, t+1}^{(i)}+\\left(\\sum\_{\\tau=0}^{T-1} m^{\\tau}\\right) \\nabla\_{k, t}^{(i)}\\right] \\tag{4} +$$ +如果直接应用动量SGD到稀疏梯度更新中,则有公式, +$$ +v_{k, t}=v_{k, t-1}+\\nabla_{k, t}, \\quad u_{t}=m u_{t-1}+\\sum_{k=1}^{N} \\operatorname{sparse}\\left(v_{k, t}\\right), \\quad w_{t+1}=w_{t}-\\eta u_{t} \\tag{5} +$$ +其中$v_k$是训练节点k上的局部梯度累加项,一旦$v_k$大于某一阈值,则会在第二项中压缩梯度进行动量更新,并使用sparse()函数获得mask清空大于阈值的梯度。 +$w^{(i)}$在T次稀疏更新后的权重为, +$$ +w_{t+T}^{(i)}=w_{t}^{(i)}-\\eta\\left(\\cdots+\\nabla_{k, t+1}^{(i)}+\\nabla_{k, t}^{(i)}\\right) \\tag{6} +$$ +相比传统动量SGD,方程6缺失了累积衰减因子$\sum_{\tau=0}^{T-1} m^{\tau}$,会导致收敛精度的损失。如下图A,正常梯度更新从A点到B点,但是方程6则从A点到C点。当稀疏度很高时,会显著降低模型性能,所以需要在方程5基础上对梯度进行修正。 +
+ + +
+若将方程3中速度项$u_t$当作“梯度”,则方程3第二项可认为是在”梯度“$u_t$上应用传统SGD,前面已经证明了局部梯度累加在传统SGD上是有效的。因此,可以使用方程3局部累加速度项$u_t$而非累加真实的梯度$\nabla_{k, t}$来修正方程5, +$$ +u_{k, t}=m u_{k, t-1}+\\nabla_{k, t}, \\quad v_{k, t}=v_{k, t-1}+u_{k, t}, \\quad w_{t+1}=w_{t}-\\eta \\sum_{k=1}^{N} \\operatorname{sparse}\\left(v_{k, t}\\right) \\tag{7} +$$ +修正后,如上图(b),方程可正常从A点到B点。除了传统动量方程修正,论文还给出了Nesterov动量SGD的修正方程。 +#### 4.2.2 局部梯度修剪 +梯度修剪是防止梯度爆炸的常用方法。这方法由Pascanu等人在2013年提出,当梯度的l2-norms和大于给定阈值时,就对梯度rescale。正常梯度修剪在梯度聚合后使用,而DGC因为每个节点独立的进行局部梯度累加,所以DGC在使用$G_t$累加前对其进行局部梯度修剪。阈值缩放为原来的$N^{-1/2}$ +$$ +thr_{G^{k}}=N^{-1 / 2} \\cdot thr_{G} \\tag{8} +$$ +### 4.3 克服迟滞效应 +因为推迟了较小梯度更新权重的时间,所以会有权重陈旧性问题。稀疏度为99.9%时大部分参数需600到1000步更新一次。迟滞效应会减缓收敛并降低模型精度。DGC中采用动量因子掩藏和预热训练来解决这问题。 +#### 4.3.1 动量因子掩藏 +DGC中使用下面方程来掩藏动量因子减缓陈旧性问题。 +$$ +Mask \\leftarrow\\left|v_{k, t}\\right|>t h r, \\quad v_{k, t} \\leftarrow v_{k, t} \\odot \\neg Mask, \\quad u_{k, t} \\leftarrow u_{k, t} \\odot \\neg Mask \\tag{9} +$$ +此掩码可以停止延迟梯度产生的动量,防止陈旧梯度把权重引入错误的方向。 + +#### 4.3.2 预热训练 +在训练初期,梯度变动剧烈,需要及时更新权重,此时迟滞效应影响会很大。为此DGC采用预热训练的方法,在预热期间使用更小的学习率来减缓网络的变化速度,并使用较小的稀疏度来减少需推迟更新的梯度数量。预热期间会线性增大学习率,指数型增加稀疏度到最终值。 + +### 4.4 正则化(Weight Decay)项修正 +Paddle框架以Weight Decay的形式实现正则化。以L2Decay为例,公式(3)中传统momentum添加weight decay后公式为 +$$ +G_{t}=\\sum_{k=1}^{N}\\left(\\nabla_{k, t}\\right)+\\lambda w_{t}, \\quad u_{t}=m u_{t-1}+G_{t}, \\quad w_{t+1}=w_{t}-\\eta u_{t} \\tag{10} +$$ +其中$\lambda$为Weight Decay系数,$G_{t}$为添加L2Decay项之后的聚合梯度。由于在公式7中进行了局部动量修正,所以按照相同思路在局部梯度上运用修正的Weight Decay项。如下公式在局部梯度上添加局部Weight Decay项即可。 +$$ +\\nabla_{k, t}=\\nabla_{k, t}+\\frac{\\lambda}{N} w_{t} \\tag{11} +$$ +在模型实际训练中,通常会设置weight decay的系数$\lambda=10^{-4}$,在卡数较多如4机32卡的情况下局部weight decay系数为$\frac{\lambda}{N}=\frac{10^{-4}}{32}=3.125*10^{-6}$,在数值精度上偏低,测试训练时会损失一定精度。为此还需对局部weight decay项进行数值修正。如下公式, +$$ +\\nabla_{k, t}^{'}=N \\nabla_{k, t}+\\lambda w_{t}, \\quad +G_{t}^{'}=\\sum_{k=1}^{N}\\left(\\nabla_{k, t}^{'}\\right)=N\\sum_{k=1}^{N}\\left(\\nabla_{k, t}\\right)+N\\lambda w_{t}, \\quad +G_{t}=\\frac{G_{t}^{'}}{N}=\\sum_{k=1}^{N}\\left(\\nabla_{k, t}\\right)+\\lambda w_{t} \\tag{12} +$$ +具体做法为对局部梯度乘以卡数求得$\nabla_{k, t}^{'}$,此时$\lambda$项则无需除以卡数,聚合梯度求得$G_{t}^{'}$再对聚合梯度除以卡数得到$G_{t}$即可。 diff --git a/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_recompute.rst b/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_recompute.rst new file mode 100644 index 0000000000000000000000000000000000000000..07b880616b31ddbd3508c66c604ea1c93e647ee8 --- /dev/null +++ b/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_recompute.rst @@ -0,0 +1,160 @@ + +重计算:大Batch训练特性 +============= + +背景 +--------- + +随着训练数据规模的逐渐增加,训练更大、更深的深度学习模型成为一个主流趋势。目前的深度学习模型训练,通常要求保留前向计算的隐层结果,并且需要保存结果的数量会随着模型层数的增加线性增加,这对于目前能够使用的AI芯片的内存大小是个挑战。Forward Recomputation Backpropagation(FRB)可以在额外增加少量计算的情况下,显著增加模型的层数和宽度,同时也可以显著提升模型训练的batch大小。 + +原理 +--------- + +我们知道,深度学习网络的一次训练迭代包含三个步骤: + +- **前向计算**:运行前向算子(Operator) 来计算中间隐层(Variable)的值 +- **反向计算**:运行反向算子来计算参数(Parameter)的梯度 +- **优化**:应用优化算法以更新参数值 + +在前向计算过程中,前向算子会输出大量的中间计算结果,在Paddle中,使用 +Variable来存储这些隐层的中间结果。当模型层数加深时,其数量可达成千上万个, +占据大量的内存。Paddle的 `显存回收机制 `_ +会及时清除无用的中间结果,以节省存储。 +然而,有些中间结果是反向算子的输入,这些Variable必须存储在内存中,直到相应的反向算子计算完毕。 + +举个简单的例子, 我们定义一个由mul算子构成的网络,其前向计算为: + +.. math:: + + y = W_1 * x + + z = W_2 * y + +其中 :math:`x, y, z` 为向量, :math:`W_1, W_2` 为矩阵。容易知道,求 :math:`W_2` 梯度的反向计算为: + +.. math:: + W_{2}^{'} = z^{'} / y + +可以看到反向计算中用到了前向计算生成的变量 :math:`y` ,因此变量 :math:`y` 必须存储在内存中,直到这个反向算子计算完毕。当模型加深时,我们会有大量的“ :math:`y` ”,占据了大量的内存。 + +Forward Recomputation Backpropagation(FRB)的思想是将深度学习网络切分为k个部分(segments)。对每个segment而言:前向计算时,除了小部分必须存储在内存中的Variable外(我们后续会讨论这些特殊Variable),其他中间结果都将被删除;在反向计算中,首先重新计算一遍前向算子,以获得中间结果,再运行反向算子。简而言之,FRB和普通的网络迭代相比,多计算了一遍前向算子。 + +我们把切分网络的变量叫做checkpoints。 +那么问题来了,如何选择checkpoints呢?自从FRB方法提出以来 \ :sup:`[1], [2]`,大量学者在研究这一关键问题。 +我们知道深度学习网络通常是由一个个模块串联得到的,比如ResNet-50由16个block串联而成, +Bert-Large由24个transformer串联而成,以两个子模块中间的变量作为切分点就是一个很好的选择。 +对于非串联的网络(比如含有大量shortcut结构的网络),FRB也支持对其做切分, +只是可能多耗费一点内存(用于存储shortcut的Variable)。 +Mitsuru Kusumoto \ :sup:`[3]` 等提出了一种基于动态规划的算法, +可以根据指定的内存自动搜索合适的checkpoints,支持各种各样的网络结构。 + +下图是由4个fc Layer、3个relu Layer、1个sigmoid Layer和1个log-loss Layer串联而成的一个网络:最左侧为其前向计算流程、中间是普通的前向计算和反向计算流程、最右侧为添加FRB后的前向计算和反向计算流程。其中方框代表算子(Operator),红点代表前向计算的中间结果、蓝点代表checkpoints。 + +.. image:: images/recompute.png + +注:该例子完整代码位于 `source `_ + +添加FRB后,前向计算中需要存储的中间Variable从4个(红点)变为2个(蓝点), +从而节省了这部分内存。当然了,重计算的部分也产生了新的中间变量, +这就需要根据实际情况来做权衡了。这个例子里的网络比较浅,通常来讲, +对层数较深的网络,FRB节省的内存要远多于新增加的内存。 + +使用方法 +--------- + +我们实现了基于Paddle的FRB算法,叫做RecomputeOptimizer, +您可以根据其 `源码 `_ +与 +`文档 `_ +更深入地了解这一算法。我们为用户提供了两个使用RecomputeOptimizer的方法: +直接调用与Fleet API中使用。在单机单卡或者CPU训练中建议您直接调用RecomputeOptimizer, +在多卡训练或者多机训练任务上建议您在Fleet API中使用Recompute。 + +**1. 直接调用** + +直接调用RecomputeOptimizer非常简单,首先要定义一个经典的Optimizer,比如Adam; +然后在外面包一层RecomputeOptimizer;最后设置checkpoints即可。 + +.. code-block:: python + + import paddle.fluid as fluid + # 定义网络 + def mlp(input_x, input_y, hid_dim=128, label_dim=2): + print(input_x) + fc_1 = fluid.layers.fc(input=input_x, size=hid_dim) + prediction = fluid.layers.fc(input=[fc_1], size=label_dim, act='softmax') + cost = fluid.layers.cross_entropy(input=prediction, label=input_y) + sum_cost = fluid.layers.reduce_mean(cost) + return sum_cost, fc_1, prediction + input_x = fluid.layers.data(name="x", shape=[32], dtype='float32') + input_y = fluid.layers.data(name="y", shape=[1], dtype='int64') + cost, fc_1, pred = mlp(input_x, input_y) + # 定义RecomputeOptimizer + sgd = fluid.optimizer.Adam(learning_rate=0.01) + sgd = fluid.optimizer.RecomputeOptimizer(sgd) + # 设置checkpoints + sgd._set_checkpoints([fc_1, pred]) + # 运行优化算法 + sgd.minimize(cost) + +Recompute原则上适用于所有Optimizer。 + +**2. 在Fleet API中使用Recompute** + +`Fleet API `_ +是基于Fluid的分布式计算高层API。在Fleet API中添加RecomputeOptimizer +仅需要2步: + +- 设置dist_strategy.forward_recompute为True; + +- 设置dist_strategy.recompute_checkpoints。 + +.. code-block:: python + + from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy + dist_strategy = DistributedStrategy() + dist_strategy.forward_recompute = True + dist_strategy.recompute_checkpoints=checkpoints + optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy) + optimizer.minimize(loss) + +为了帮助您快速地用Fleet API使用Recompute任务,我们提供了一些例子, +并且给出了这些例子的计算速度、效果和显存节省情况: + +- 用Recompute做Bert Fine-tuning: `source `_ + +- 用Recompute做目标检测:开发中. + +Q&A +------- + +- **是否支持带有随机性的Op?** + + 目前Paddle中带随机性的Op有:dropout,Recompute支持 + dropout Operator,可以保证重计算与初次计算结果保持一致。 + +- **有没有更多Recompute的官方例子?** + + 更多Recompute的例子将更新在 `examples `_ + 和 `Fleet `_ 库下,欢迎关注。 + +- **有没有添加checkpoints的建议?** + + 我们建议将子网络连接部分的变量添加为checkpoints,即: + 如果一个变量能将网络完全分为前后两部分,那么建议将其加入checkpoints。 + checkpoints的数目会影响内存的消耗:如果checkpoints很少, + 那么Recompute起的作用有限;如果checkpoints数量过多, + 那么checkpoints本身占用的内存量就较大,内存消耗可能不降反升。 + + 我们后续会添加一个估算内存用量的工具, + 可以对每个Operator运算前后的显存用量做可视化, + 帮助用户定位问题。 + +[1] Tianqi Chen, Bing Xu, Chiyuan Zhang, and Carlos Guestrin . Training deep nets with sublinear memory cost. +arXiv preprint, arXiv:1604.06174, 2016. + +[2] Audrunas Gruslys , Rémi Munos , Ivo Danihelka , Marc Lanctot , and Alex Graves. Memory efficient +backpropagation through time. In Advances in Neural Information Processing Systems (NIPS), pages 4125 4133, +2016. + +[3] Kusumoto, Mitsuru, et al. "A Graph Theoretic Framework of Recomputation Algorithms for Memory-Efficient Backpropagation." arXiv preprint arXiv:1905.11722 (2019). diff --git a/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_recompute_en.rst b/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_recompute_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..708ab04ffa7056a8458092514b2f9df3345b988d --- /dev/null +++ b/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_recompute_en.rst @@ -0,0 +1,196 @@ + +Recompute: Training with bigger batch size +============= + +Context +--------- + +As the amount of training data increases, training deeper neural network models becomes more and more popular. Current deep-learning training usually keeps the hidden layer outputs in memory during the forward propagation, +and the number of outputs increases linearly with +the increase of the number of model layers, +which becomes a challenge of the memory size +for common devices. + + +Theory +--------- + +As we know, a training process of a deep-learning network contains 3 steps: + +- **Forward Propagation**:Running forward operators and generate temporary variables as output +- **Backward Propagation**:Running backward operators to compute gradients of parameters +- **Optimization**:Applying optimization algorithm to update parameters + +When the model becomes deeper, the number of temporary variables +generated in the forward propagation process can reach tens +of thousands, occupying a large amount of memory. +The `Garbage Collection mechanism `_ +in Paddle can delete useless variables for the sake of saving memory. +However, some variables serve as inputs of backward operators, +they must be kept in memory until particular operator finish. + +Take a simple example, define a network contains two `mul` operators, +the forward propagation works as follows: + +.. math:: + + y = W_1 * x + + z = W_2 * y + +where :math:`x, y, z` are vectors, :math:`W_1, W_2` are matrix。It is easy to conduct that the gradient of :math:`W_2` is: + +.. math:: + W_{2}^{'} = z^{'} / y + +We can see that :math:`y` is used in the backward propagation process, +thus it must be kept in the memory during the whole forward propagation. +When network grows deeper, more 'y's need to be stored, +adding more requirements to the memory. + +Forward Recomputation Backpropagation(FRB) splits a deep network to k segments. +For each segment, in forward propagation, +most of the temporary variables are erased in time, +except for some special variables (we will talk about that later); +in backward propagation, the forward operators will be recomputed +to get these temporary variables before running backward operators. +In short, FBR runs forward operators twice. + +But how to split the network? A deep learning network usually consists +of connecting modules in series: +ResNet-50 contains 16 blocks and Bert-Large contains 24 transformers. +It is a good choice to treat such modules as segments. +The variables among segments are +called as checkpoints. + +The following picture is a network with 4 fc layers, 3 relu layers, +1 sigmoid layer and 1 log-loss layer in series. +The left column is the forward propagation, +the middle column is the normal backward propagation, +and the right column is the FRB. +Rectangular boxes represent the operators, red dots represent +the intermediate variables in forward computation, blue dots +represent checkpoints and arrows represent the dependencies between operators. + +.. image:: images/recompute.png + +Note: the complete source code of this example: `source `_ + +After applying FBR, the forward computation only needs to store +2 variables (the blue dots) instead of 4 variables (the red +dots), saving the corresponding memories. It is notable that +recomputing operators generate new intermediate variables at the same time, +a trade-off needs to be considered in this situation. +While according to our experiments, +FBR usually saves rather than increase the memory load. + +Usage +--------- + +We have implemented the FRB algorithm named "RecomputeOptimizer" +based on Paddle. More information about this algorithm can +be learned by the `source code `_ +and the +`document `_ +of RecomputeOptimizer. + +There are 2 methods to apply RecomputeOptimizer in your Paddle +program: call RecomputeOptimizer directly or use it with Fleet +API. For single-GPU card training or CPU training, we recommend +directly calling; For multi-GPU training, we +recommend using with Fleet API. + +**1. Directly calling** + +Calling RecomputeOptimizer is very easy: first, define a classic +optimizer, such as Adam; second, wrap it with RecomputeOptimizer; +third, set the checkpoints. + +.. code-block:: python + + import paddle.fluid as fluid + # Define the network + def mlp(input_x, input_y, hid_dim=128, label_dim=2): + print(input_x) + fc_1 = fluid.layers.fc(input=input_x, size=hid_dim) + prediction = fluid.layers.fc(input=[fc_1], size=label_dim, act='softmax') + cost = fluid.layers.cross_entropy(input=prediction, label=input_y) + sum_cost = fluid.layers.reduce_mean(cost) + return sum_cost, fc_1, prediction + input_x = fluid.layers.data(name="x", shape=[32], dtype='float32') + input_y = fluid.layers.data(name="y", shape=[1], dtype='int64') + cost, fc_1, pred = mlp(input_x, input_y) + # define RecomputeOptimizer + sgd = fluid.optimizer.Adam(learning_rate=0.01) + sgd = fluid.optimizer.RecomputeOptimizer(sgd) + # set checkpoints + sgd._set_checkpoints([fc_1, pred]) + # apply optimization + sgd.minimize(cost) + +In principle, recompute is for all kinds of optimizers in Paddle. + +**2. Using Recompute in Fleet API** + +`Fleet API `_ +is a high-level API for distributed training in Fluid. Adding +RecomputeOptimizer to Fluid takes two steps: + +- set dist_strategy.forward_recompute to True + +- set dist_strategy.recompute_checkpoints + +.. code-block:: python + + from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy + dist_strategy = DistributedStrategy() + dist_strategy.forward_recompute = True + dist_strategy.recompute_checkpoints=checkpoints + optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy) + optimizer.minimize(loss) + +We supply some examples of using recompute in Fleet API for users. +We also post corresponding training speed, +test results and memory usages of these examples for reference. + + +- Fine-tuning Bert Large model with recomputing: `source `_ + +- Training object detection models with recomputing:developing. + +Q&A +------- + +- **Does RecomputeOptimizer support operators with random outputs?** + +We currently found that the dropout operator has random results +and RecomputeOptimizer is able to keep the outputs of +first-computation and recomputation consistent. + + +- **Are there more official examples of Recompute?** + + More examples will be updated at `examples `_ +and `Fleet `_ . Feel free to +raise issues if you get any problem with these examples. + +- **How should I set checkpoints?** + +The position of checkpoints is important: +we suggest setting the variable between the sub-model as checkpoints, +that is, set a variable as a checkpoint if it +can separate the network into two parts without short-cut connections. +The number of checkpoints is also important: +too few checkpoints will reduce the memory saved by recomputing while +too many checkpoints will occupy a lot of memory themselves. +We will add a tool to estimate the memory usage with specific checkpoints, +helping users to choose checkpointing variables. + +[1] Tianqi Chen, Bing Xu, Chiyuan Zhang, and Carlos Guestrin . Training deep nets with sublinear memory cost. +arXiv preprint, arXiv:1604.06174, 2016. + +[2] Audrunas Gruslys , Rémi Munos , Ivo Danihelka , Marc Lanctot , and Alex Graves. Memory efficient +backpropagation through time. In Advances in Neural Information Processing Systems (NIPS), pages 4125 4133, +2016. + +[3] Kusumoto, Mitsuru, et al. "A Graph Theoretic Framework of Recomputation Algorithms for Memory-Efficient Backpropagation." arXiv preprint arXiv:1905.11722 (2019). diff --git a/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/images/dgc_resnet50_acc1.png b/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/images/dgc_resnet50_acc1.png new file mode 100644 index 0000000000000000000000000000000000000000..6fe02f64a5ef4ae6cda7204445532396da5e7e6d Binary files /dev/null and b/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/images/dgc_resnet50_acc1.png differ diff --git a/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/images/dgc_with_momentum_correction.png b/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/images/dgc_with_momentum_correction.png new file mode 100644 index 0000000000000000000000000000000000000000..22f169ab479a14f3d2dc73b91b6538941c825cab Binary files /dev/null and b/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/images/dgc_with_momentum_correction.png differ diff --git a/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/images/dgc_without_momentum_correction.png b/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/images/dgc_without_momentum_correction.png new file mode 100644 index 0000000000000000000000000000000000000000..533a4c293dfbc7742bfcebbdd4eb2b38586aaa4f Binary files /dev/null and b/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/images/dgc_without_momentum_correction.png differ diff --git a/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/images/recompute.png b/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/images/recompute.png new file mode 100644 index 0000000000000000000000000000000000000000..11e9778305c8fc11b659bfdcf965117149b0a317 Binary files /dev/null and b/doc/paddle/advanced_guide/performance_improving/multinode_training_improving/images/recompute.png differ diff --git a/doc/paddle/advanced_guide/performance_improving/singlenode_training_improving/memory_optimize.rst b/doc/paddle/advanced_guide/performance_improving/singlenode_training_improving/memory_optimize.rst new file mode 100644 index 0000000000000000000000000000000000000000..ae9238813a614c7e3c022e06c59995e21f589c10 --- /dev/null +++ b/doc/paddle/advanced_guide/performance_improving/singlenode_training_improving/memory_optimize.rst @@ -0,0 +1,158 @@ +.. _api_guide_memory_optimize: + +########### +存储分配与优化 +########### + +1. PaddlePaddle的显存分配策略 +=========================== + +1.1. 显存自增长AutoGrowth策略 +-------------------------- +自1.6+的版本起,PaddlePaddle支持显存自增长AutoGrowth策略,按需分配显存,且已于1.7+版本中默认开启,方便用户在同一张GPU卡上同时运行多个任务。 + +由于原生的CUDA系统调用 :code:`cudaMalloc` 和 :code:`cudaFree` 均是同步操作,非常耗时。 +因此显存自增长AutoGrowth策略会缓存已分配到的显存,供后续分配使用,具体方式为: + +- 在前几次显存分配时,框架会调用 :code:`cudaMalloc` 按需分配,但释放时不会调用 :code:`cudaFree` 返回给GPU,而是在框架内部缓存起来。 + +- 在随后的显存分配时,框架会首先检查缓存的显存中是否有合适的块,若有则从中分割出所需的显存空间返回,否则才调用 :code:`cudaMalloc` 直接从GPU中分配。随后的显存释放亦会缓存起来供后续分配使用。 + +因此,显存自增长AutoGrowth策略会在前几个batch训练时分配较慢(因为频繁调用 :code:`cudaMalloc` ),在随后训练过程中基本不会影响模型训练速度。 + +1.2. 显存预分配策略 +---------------- + +除了显存自增长AutoGrowth策略以外,PaddlePaddle还提供了显存预分配策略。显存预分配策略是PaddlePaddle 1.7版本前的默认显存分配策略。 + +显存预分配策略会在第一次分配时分配很大chunk_size的显存块,随后的显存分配大多从预分配的显存块中切分获得。 +其中,chunk_size由环境变量 :code:`FLAGS_fraction_of_gpu_memory_to_use` 确定,chunk_size的计算公式为: + +.. code-block:: python + + chunk_size = FLAGS_fraction_of_gpu_memory_to_use * 单张GPU卡的当前可用显存值 + +:code:`FLAGS_fraction_of_gpu_memory_to_use` 的默认值为0.92,即框架预先分配显卡92%的当前可用显存值。 + +显存预分配策略分配显存的具体方式为: + +- 在分配requested_size大小的显存时, + - 若requested_size <= chunk_size,则框架会预先分配chunk_size大小的显存池chunk,并从chunk中分出requested_size大小的块返回。之后每次申请显存都会从chunk中分配。 + - 若requested_size > chunk_size,则框架会直接调用 :code:`cudaMalloc` 分配requested_size大小的显存返回。 + +- 在释放free_size大小的显存时, + - 若free_size <= chunk_size,则框架会将该显存放回预分配的chunk中,而不是直接返回给CUDA。 + - 若free_size > chunk_size,则框架会直接调用 :code:`cudaFree` 将显存返回给CUDA。 + +若你的GPU卡上有其他任务占用显存,你可以适当将 :code:`FLAGS_fraction_of_gpu_memory_to_use` 减少,保证框架能预分配到合适的显存块,例如: + +.. code-block:: shell + + export FLAGS_fraction_of_gpu_memory_to_use=0.4 # 预先40%的GPU显存 + +若 :code:`FLAGS_fraction_of_gpu_memory_to_use` 设为0,则每次显存分配和释放均会调用 :code:`cudaMalloc` 和 :code:`cudaFree` ,会严重影响性能,不建议你使用。 +只有当你想测量网络的实际显存占用量时,你可以设置 :code:`FLAGS_fraction_of_gpu_memory_to_use` 为0,观察nvidia-smi显示的显存占用情况。 + +1.3. 显存分配策略的选择方式 +----------------------- +自1.6+版本起,PaddlePaddle同时支持显存自增长AutoGrowth策略和显存预分配策略,并通过环境变量 :code:`FLAGS_allocator_strategy` 控制。 + +选择显存自增长AutoGrowth的方式为: + +.. code-block:: shell + + export FLAGS_allocator_strategy=auto_growth # 选择显存自增长AutoGrowth策略 + +选择显存预分配策略的方式为: + +.. code-block:: shell + + export FLAGS_allocator_strategy=naive_best_fit # 选择显存预分配策略 + +此外,自1.7.2+版本起,PaddlePaddle提供了环境变量 :code:`FLAGS_gpu_memory_limit_mb` ,用于控制单个任务进程可分配的最大显存,单位是MB。默认值是0,表示没有限制,可分配全部显存。如果设置为大于0的值,则会在分配的显存超过限制时报错,即使此时系统还存在空闲的显存空间。 + +2. PaddlePaddle的存储优化策略 +=========================== + +PaddlePaddle提供了多种通用存储优化方法,优化你的网络的存储占用(包括显存和内存)。 + +2.1. GC策略: 存储垃圾及时回收 +------------------------- + +GC(Garbage Collection)的原理是在网络运行阶段及时释放无用变量的存储空间,达到节省存储空间的目的。GC适用于使用Executor,ParallelExecutor做模型训练/预测的场合,但不适用于C++预测库接口。 + +**GC策略已于1.6+版本中默认开启。** + +GC策略由三个环境变量控制: + + +- :code:`FLAGS_eager_delete_tensor_gb` + +GC策略的使能开关,double类型,在<1.6的版本中默认值为-1,在1.6+版本中默认值为0。GC策略会积攒一定大小的存储垃圾后再统一释放,:code:`FLAGS_eager_delete_tensor_gb` 控制的是存储垃圾的阈值,单位是GB。**建议用户设置** :code:`FLAGS_eager_delete_tensor_gb=0` 。 + +若 :code:`FLAGS_eager_delete_tensor_gb=0` ,则一旦有存储垃圾则马上回收,最为节省存储空间。 + +若 :code:`FLAGS_eager_delete_tensor_gb=1` ,则存储垃圾积攒到1G后才触发回收。 + +若 :code:`FLAGS_eager_delete_tensor_gb<0` ,则GC策略关闭。 + + +- :code:`FLAGS_memory_fraction_of_eager_deletion` + +GC策略的调节flag,double类型,默认值为1,范围为[0,1],仅适用于使用ParallelExecutor或CompiledProgram+with_data_parallel的场合。 +GC内部会根据变量占用的存储空间大小,对变量进行降序排列,且仅回收前 :code:`FLAGS_memory_fraction_of_eager_deletion` 大的变量的存储空间。**建议用户维持默认值**,即 :code:`FLAGS_memory_fraction_of_eager_deletion=1` 。 + +若 :code:`FLAGS_memory_fraction_of_eager_deletion=0.6` ,则表示仅回收存储占用60%大的变量的存储空间。 + +若 :code:`FLAGS_memory_fraction_of_eager_deletion=0` ,则表示不回收任何变量的存储空间,GC策略关闭。 + +若 :code:`FLAGS_memory_fraction_of_eager_deletion=1` ,则表示回收所有变量的存储空间。 + + +- :code:`FLAGS_fast_eager_deletion_mode` + +快速GC策略的开关,bool类型,默认值为True,表示使用快速GC策略。快速GC策略会不等待CUDA Kernel结束直接释放显存。**建议用户维持默认值**,即 :code:`FLAGS_fast_eager_deletion_mode=True` 。 + + +2.2. Inplace策略: Op内部的输出复用输入 +---------------------------------- + +Inplace策略的原理是Op的输出复用Op输入的存储空间。例如,reshape操作的输出和输入可复用同一片存储空间。 + +Inplace策略适用于使用ParallelExecutor或CompiledProgram+with_data_parallel的场合,通过 :code:`BuildStrategy` 设置。此策略不支持使用Executor+Program做单卡训练、使用C++预测库接口等场合。 + +**Inplace策略已于1.6+版本中默认开启。** + +具体方式为: + +.. code-block:: python + + build_strategy = fluid.BuildStrategy() + build_strategy.enable_inplace = True # 开启Inplace策略 + + compiled_program = fluid.CompiledProgram(train_program) + .with_data_parallel(loss_name=loss.name, build_strategy=build_strategy) + + +在<1.6的版本中,由于设计上的一些问题,在开启Inplace策略后,必须保证后续exe.run中fetch_list的变量是persistable的,即假如你后续需要fetch的变量为loss和acc,则必须设置: + +.. code-block:: python + + loss.persistable = True + acc.persistable = True + + +**在1.6+的版本中,无需设置fetch变量为persistable。** + + +3. 存储优化Best Practice +======================= + +我们推荐你的最佳存储优化策略为: + +- 开启GC策略:设置 :code:`FLAGS_eager_delete_tensor_gb=0` 。 + +- 开启Inplace策略:设置 :code:`build_strategy.enable_inplace = True` ,并在<1.6版本中设置fetch_list中的 :code:`var.persistable = True` 。 + +**在1.6+的版本中,上述最佳策略均已默认打开,无需手动配置,亦无需设置fetch_list变量为persistable。** + diff --git a/doc/paddle/advanced_guide/performance_improving/singlenode_training_improving/memory_optimize_en.rst b/doc/paddle/advanced_guide/performance_improving/singlenode_training_improving/memory_optimize_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..2a1e3ecb0c52cc198c8e6c6e54f8a175222434a6 --- /dev/null +++ b/doc/paddle/advanced_guide/performance_improving/singlenode_training_improving/memory_optimize_en.rst @@ -0,0 +1,178 @@ +.. _api_guide_memory_optimize_en: + +########### +Memory Allocation and Optimization +########### + +1. Memory Allocation Strategy +=========================== + +1.1. AutoGrowth Strategy +-------------------------- + +Since version 1.6+, PaddlePaddle supports the AutoGrowth strategy, which allocates memory on demand. +AutoGrowth strategy has been enabled by default in version 1.7+, making it convenient for users to +run multiple tasks on the same GPU card at the same time. + +Because the native CUDA system calls :code:`cudaMalloc` and :code:`cudaFree` are synchronous operations, +which are very time-consuming, the AutoGrowth strategy will cache the allocated memory for subsequent allocation. +The specific methods are as follows: + +- In the first few memory allocations, PaddlePaddle framework will call :code:`cudaMalloc` and allocate memory on demand. When releasing the allocated memory, it will not call :code:`cudaFree` to return the memory to GPU, but cache the memory inside the framework. + +- In the subsequent allocations, PaddlePaddle framework will first check if there is a fit block (block size larger than the required memory size) in the cached memory. If there is, it will split the required memory from the fit block and return. Otherwise, it will call :code:`cudaMalloc` to allocate memory from GPU. The allocated memory are also cached when being released for subsequent allocation. + +Therefore, the AutoGrowth strategy may slow the speed in the first few batches of model training, +but will not affect the speed in the subsequent training process. + +1.2. Pre-Allocation Strategy +---------------- + +In addition to the AutoGrowth strategy, paddlepaddle also provides a Pre-Allocation strategy, +which is the default memory allocation strategy before paddlepaddle 1.7. + +The Pre-Allocation strategy allocates a large size chunk at the first allocation, and the subsequent memory allocation is mostly obtained from the pre allocated memory chunk. +Among them, the chunk size is determined by the environment variable :code:`FLAGS_fraction_of_gpu_memory_to_use`, and the calculation formula of chunk size is: + +.. code-block:: python + + chunk_size = FLAGS_fraction_of_gpu_memory_to_use * number of current available memory of a single GPU card + +The default value of :code:`FLAGS_fraction_of_gpu_memory_to_use` is 0.92, that is, the framework will pre allocates +92% of the currently available memory of the GPU card. + +The specific way of Pre-Allocation strategy to allocate GPU memory is: + +- When allocating memory of requested_size, + - If requested_size <= chunk_size, the framework will first allocate a memory chunk of chunk_size, then split a block of requested_size and return the block. Every subsequent memory allocation will be performed on the chunk. + - If requested_size > chunk_size, the framework will call :code:`cudaMalloc` to allocate memory block of requested_size and return. + +- When freeing memory of requested_size, + - If free_size <= chunk_size, the framework will put the memory block back into the pre-allocated chunk, instead of returning back to GPU. + - If free_size > chunk_size, the framework will call :code:`cudaFree` and return the memory back to GPU. + +If there are other tasks on your GPU card that occupy the memory, you can appropriately decrease :code:`FLAGS_fraction_of_gpu_memory_to_use` +to ensure that the framework can pre-allocate the memory block of appropriate size, for example + +.. code-block:: shell + + export FLAGS_fraction_of_gpu_memory_to_use=0.4 # Pre-allocate 40% memory of a single GPU card + +If :code:`FLAGS_fraction_of_gpu_memory_to_use` is set to 0, the framework will call :code:`cudaMalloc` and :code:`cudaFree` every time the memory is allocated and released, which will seriously affect the performance and is not recommended. Only when you want to measure the actual memory usage of the network, you could set :code:`FLAGS_fraction_of_gpu_memory_to_use` to 0, and observe the memory usage of command nvidia-smi display. + +1.3. Configuration of memory allocation strategy +----------------------- +Since version 1.6+, PaddlePaddle supports both the AutoGrowth strategy and the Pre-Allocation Strategy, and control the strategy used in framework by +the environment variable :code:`FLAGS_allocator_strategy`. + +Use AutoGrowth strategy: + +.. code-block:: shell + + export FLAGS_allocator_strategy=auto_growth # Use AutoGrowth strategy + +Use Pre-Allocation strategy: + +.. code-block:: shell + + export FLAGS_allocator_strategy=naive_best_fit # Use Pre-Allocation strategy + +Plus, since version 1.7.2+, PaddlePaddle provides an environment variable :code:`FLAGS_gpu_memory_limit_mb`, which controls the maximum gpu memory limit that the process can allocate. +If it is equal to 0, there would be no limit and all gpu memory would be available to the process. If it is larger than 0, the process would raise out of memory error if the allocated +memory exceeds the limit even though there is available memory on the gpu card. The unit is MB and default value is 0. + +2. Memory Optimization Strategy +=========================== + +Paddlepaddle provides several general memory optimization methods to optimize the memory usage of your network (including general memory and GPU memory). + +2.1. GC Strategy: memory garbage eager collection +------------------------- + +The principle of GC(Garbage Collection)is to release the memory space of useless variables eagerly during network running, +in order to save memory space. GC is suitable for training and inference using Executor or ParallelExecutor, but it is not suitable for C++ inference library. + +**Since version 1.6+, GC Strategy is enabled by default.** + +GC Strategy is controlled by 3 environment variable: + + +- :code:`FLAGS_eager_delete_tensor_gb` + +Variable to enable GC, its data type is double. The default value is -1 in PaddlePaddle with version < 1.6, +and is 0 in PaddlePaddle with version >= 1.6. GC Strategy will cache a certain amount of memory garbage and release it uniformly. +:code:`FLAGS_eager_delete_tensor_gb` means the threshold of cached memory garbage, the unit of which is GB. **It is recommended to set** :code:`FLAGS_eager_delete_tensor_gb=0`. + +If :code:`FLAGS_eager_delete_tensor_gb=0`, once there is memory garbage, it will be collected immediately to save memory. + +If :code:`FLAGS_eager_delete_tensor_gb=1`, the memory garbage is collected when the cached amount of garbage reaches 1GB. + +If :code:`FLAGS_eager_delete_tensor_gb<0`, GC Strategy is disabled. + + +- :code:`FLAGS_memory_fraction_of_eager_deletion` + +Variable to control GC Strategy, its data type is double. The default value is 1, range [0,1]. It is only suitable for ParallelExecutor or CompiledProgram+with_data_parallel. +GC will sort the variables in descending order according to the memory space occupied by the variables, +and only collect the memory space of top :code:`FLAGS_memory_fraction_of_eager_deletion` variables. +**It is recommended to remain default value**, that is :code:`FLAGS_memory_fraction_of_eager_deletion=1`. + +If :code:`FLAGS_memory_fraction_of_eager_deletion=0.6`, top 60% variables will be collected. + +If :code:`FLAGS_memory_fraction_of_eager_deletion=0`, no variable will be collected, GC Strategy is disabled. + +If :code:`FLAGS_memory_fraction_of_eager_deletion=1`, all variables will be collected. + + +- :code:`FLAGS_fast_eager_deletion_mode` + +Variable to enable fast GC Strategy, its type is bool. The default value is True, which means use fast GC Strategy. +Fast GC Strategy will collect the memory garbage immediately instead of waiting for CUDA Kernel finish. **It is recommended to remain default value**, that is :code:`FLAGS_fast_eager_deletion_mode=True`. + + +2.2. Inplace Strategy: output reuses input inside operator +---------------------------------- + +The principle of Inplace strategy is that the output of some operators can reuses the memory space of input. +For example, the output and input of operator :code:`reshape` can reuse the same memory space. + +Inplace Strategy is suitable for ParallelExecutor or CompiledProgram+with_data_parallel, which can be set through :code:`BuildStrategy`. +The Strategy is not suitable for Executor+Program or C++ inference library. + +**Since version 1.6+, Inplace Strategy is enabled by default.** + +The specific way of Inplace strategy is: + +.. code-block:: python + + build_strategy = fluid.BuildStrategy() + build_strategy.enable_inplace = True # Enable Inplace Strategy + + compiled_program = fluid.CompiledProgram(train_program) + .with_data_parallel(loss_name=loss.name, build_strategy=build_strategy) + + +In PaddlePaddle with version < 1.6, due to of some design problems, when the Inplace Strategy is enabled, +the variable in fetch_list in the subsequent :code:`exe.run` must be persistent. +That is, if you the variables you want to fetch are loss and acc, you must set: + +.. code-block:: python + + loss.persistable = True + acc.persistable = True + + +**Since version 1.6+, setting variables in fetch_list to persistable is not needed.** + + +3. Memory Optimization Best Practice +======================= + +We recommend the best memory optimization strategy as: + +- Enable GC strategy:set :code:`FLAGS_eager_delete_tensor_gb=0`. + +- Enable Inplace strategy:set :code:`build_strategy.enable_inplace = True`, and set variables in fetch_list to persistable using :code:`var.persistable = True` when the version of PaddlePaddle < 1.6. + +**Since version 1.6+, the above optimal strategy have been enabled by default and setting variables in fetch_list to persistable is not needed.** + diff --git a/doc/paddle/advanced_guide/performance_improving/singlenode_training_improving/training_best_practice.rst b/doc/paddle/advanced_guide/performance_improving/singlenode_training_improving/training_best_practice.rst new file mode 100644 index 0000000000000000000000000000000000000000..95e71abd70e3605f94f3faa0aa1367db499b022b --- /dev/null +++ b/doc/paddle/advanced_guide/performance_improving/singlenode_training_improving/training_best_practice.rst @@ -0,0 +1,333 @@ +.. _api_guide_singlenode_training_best_practice: + + +##################### +单机训练优秀实践 +##################### + +开始优化您的单机训练任务 +------------------------- + +PaddlePaddle Fluid可以支持在现代CPU、GPU平台上进行训练。如果您发现Fluid进行单机训练的速度较慢,您可以根据这篇文档的建议对您的Fluid程序进行优化。 + +神经网络训练代码通常由三个步骤组成:网络构建、数据准备、模型训练。这篇文档将分别从这三个方向介绍Fluid训练中常用的优化方法。 + + +1. 网络构建过程中的配置优化 +================== + +这部分优化与具体的模型有关,在这里,我们列举出一些优化过程中遇到过的一些示例。 + +1.1 cuDNN操作的选择 +^^^^^^^^^^^^^^^^ + +cuDNN是NVIDIA提供的深度神经网络计算库,其中包含了很多神经网络中常用算子,Paddle中的部分Op底层调用的是cuDNN库,例如 :code:`conv2d` : + +.. code-block:: python + + paddle.fluid.layers.conv2d(input, + num_filters, + filter_size, + stride=1, + padding=0, + dilation=1, + groups=None, + param_attr=None, + bias_attr=None, + use_cudnn=True, + act=None, + name=None, + data_format="NCHW") + +在 :code:`use_cudnn=True` 时,框架底层调用的是cuDNN中的卷积操作。 + +通常cuDNN库提供的操作具有很好的性能表现,其性能明显优于Paddle原生的CUDA实现,比如 :code:`conv2d` 。但是cuDNN中有些操作的性能较差,比如: :code:`conv2d_transpose` 在 :code:`batch_size=1` 时、:code:`pool2d` 在 :code:`global_pooling=True` 时等,这些情况下,cuDNN实现的性能差于Paddle的CUDA实现,建议手动设置 :code:`use_cudnn=False` 。 + +1.2 减少模型中Layer的个数 +^^^^^^^^^^^^^^^^^^ + +为方便用户使用,飞桨提供一些不同粒度的Layer,其中有些Layer的组合可以通过单个Layer完成。比如: + +(1) :code:`fluid.layers.softmax_with_cross_entropy` ,该操作其实是 :code:`fluid.layers.softmax` 和 :code:`fluid.layers.cross_entropy` 的组合,因此如果模型中有出现 + +.. code-block:: python + + logits = fluid.layers.softmax(logits) + loss = fluid.layers.cross_entropy(logits, label, ignore_index=255) + +可以直接替换成 + +.. code-block:: python + + loss = fluid.layers.softmax_with_cross_entropy(logits, label, ignore_index=255, numeric_stable_mode=True) + + +(2) 如果模型中需要对数据进行标准化,可以直接使用 :code:`fluid.layers.data_norm` ,而不用通过一系列layer组合出数据的标准化操作。 + +因此,建议在构建模型时优先使用飞桨提供的单个Layer完成所需操作,这样减少模型中Layer的个数,并因此加速模型训练。 + + +2. 数据准备优化 +============= + +数据准备通常分为两部分:第一部分是数据加载,即程序从磁盘中加载训练/预测数据;第二部分是数据预处理,程序对加载的数据进行预处理,比如图像任务通常需要进行数据增强、Shuffle等。 +这两部分需要用户根据自己的模型需要进行设置,只需要最后得到Data Reader接口即可。Data Reader返回iterable对象,可以每次返回一条样本或者一组样本。代码示例如下: + +.. code-block:: python + + def data_reader(width, height): + def reader(): + while True: + yield np.random.uniform(-1, 1,size=width*height), np.random.randint(0,10) + return reader + train_data_reader = data_reader(32, 32) + + +Paddle提供了两种方式从Data Reader中读取数据: :ref:`user_guide_use_numpy_array_as_train_data` 和 :ref:`user_guides_use_py_reader` ,详情请参考文档 :ref:`user_guide_prepare_data` 。 + +2.1 同步数据读取 +^^^^^^^^^^^^^^^^ + +同步数据读取是一种简单并且直观的数据准备方式,代码示例如下: + +.. code-block:: python + + image = fluid.data(name="image", shape=[None, 1, 28, 28], dtype="float32") + label = fluid.data(name="label", shape=[None, 1], dtype="int64") + # 模型定义 + # …… + prediction = fluid.layers.fc(input=image, size=10) + loss = fluid.layers.cross_entropy(input=prediction, label=label) + avg_loss = fluid.layers.mean(loss) + # …… + # 读取数据 + # paddle.dataset.mnist.train()返回数据读取的Reader,每次可以从Reader中读取一条样本,batch_size为128 + train_reader = paddle.batch(paddle.dataset.mnist.train(), 128) + + # 读取数据 + end = time.time() + for batch_id, batch in enumerate(train_reader): + data_time = time.time() - end + # 训练网络 + executor.run(feed={...}, fetch_list=[...]) + batch_time = time.time() - end + end = time.time() + + +用户首先需要通过 :code:`fluid.data` 定义模型的输入,然后根据输入构建模型,最后从事先自定义的Reader函数中获取一个batch的数据,并将数据传递给执行器。 + +采用同步数据读取方式时,用户可通过加入Python计时函数 :code:`time.time()` 来统计数据准备部分和执行部分所占用的时间。 +由于数据准备和执行是顺序进行的,所以程序的执行速度可能较慢。如果用户想进行模型调试的话,同步数据读取是一个不错的选择。 + + +2.2 异步数据读取 +^^^^^^^^^^^^^^^^ + +Paddle里面使用 paddle.fluid.io. :ref:`cn_api_fluid_io_DataLoader` 接口来实现异步数据读取,代码示例如下: + +.. code-block:: python + + image = fluid.data(name="image", shape=[None, 1, 28, 28], dtype="float32") + label = fluid.data(name="label", shape=[None, 1], dtype="int64") + dataloader = fluid.io.DataLoader.from_generator( + feed_list=[image, label], + capacity=64, + iterable=False, + use_double_buffer=True) + # 模型定义 + # …… + prediction = fluid.layers.fc(input=image, size=10) + loss = fluid.layers.cross_entropy(input=prediction, label=label) + avg_loss = fluid.layers.mean(loss) + # …… + # 读取数据 + train_reader = paddle.batch(paddle.dataset.mnist.train(), 128) + data_loader.set_batch_generator(train_reader, places=places) + + # 启动data_loader + data_loader.start() + batch_id = 0 + try: + end = time.time() + while True: + print("queue size: ", data_loader.queue.size()) + loss, = executor.run(fetch_list=[...]) + # ... + batch_time = time.time() - end + end = time.time() + batch_id += 1 + except fluid.core.EOFException: + data_loader.reset() + +用户首先需要通过 :code:`fluid.io.DataLoader.from_generator` 定义DataLoader对象,并使用 :code:`set_batch_generator` 方法将自定义的Reader与DataLoader绑定。 +若DataLoader被定义成不可迭代的( :code:`iterable=False` ),在训练开始之前,通过调用 :code:`start()` 方法来启动数据读取。 +在数据读取结束之后, :code:`executor.run` 会抛出 :code:`fluid.core.EOFException` ,表示训练已经遍历完Reader中的所有数据。 + +采用异步数据读取时,Python端和C++端共同维护一个数据队列,Python端启动一个线程,负责向队列中插入数据,C++端在训练/预测过程中,从数据队列中获取数据,并将该数据从对队列中移除。 +用户可以在程序运行过程中,监测数据队列是否为空,如果队列始终不为空,表明数据准备的速度比模型执行的速度快,这种情况下数据读取可能不是瓶颈。 + +另外,Paddle提供的一些FLAGS也能很好的帮助分析性能。如果用户希望评估一下在完全没有数据读取开销情况下模型的性能,可以设置一下环境变量::code:`FLAGS_reader_queue_speed_test_mode` ,在该变量为True情况下,C++端从数据队列中获取数据之后,不会从数据队列中移除,这样能够保证数据队列始终不为空,从而避免了C++端读取数据时的等待开销。 + +**需要特别注意的是,** :code:`FLAGS_reader_queue_speed_test_mode` **只能在性能分析的时候打开,正常训练模型时需要关闭。** + +为降低训练的整体时间,建议用户使用异步数据读取的方式,并开启 :code:`use_double_buffer=True` 。用户可根据模型的实际情况设置数据队列的大小。 +如果数据准备的时间大于模型执行的时间,或者出现了数据队列为空的情况,就需要考虑对数据读取Reader进行加速。 +常用的方法是 **使用Python多进程准备数据** ,一个简单的使用多进程准备数据的示例,可以参考 `YOLOv3 `_ 。 + +Python端的数据预处理,都是使用CPU完成。如果Paddle提供了相应功能的API,可将这部分预处理功能写到模型配置中,如此Paddle就可以使用GPU来完成该预处理功能,这样也可以减轻CPU预处理数据的负担,提升总体训练速度。 + +3. 模型训练相关优化 +============= + +3.1 执行器介绍 +^^^^^^^^^^^^^^^^ + +目前Paddle的Python API中提供了 :code:`fluid.compiler.CompiledProgram` 的概念,用户可以通过 :code:`CompiledProgram` 将传入的program进行编译。 +如果希望采用数据并行模式训练,只需要将 :code:`CompiledProgram` 返回的对象调用一下 :code:`with_data_parallel` 即可,最后统一通过 :code:`executor.run(…)` 执行compiled_program。 + +虽然统一通过 :code:`executor.run(…)` 接口来执行,实际底层的执行策略有两种,对应C++部分的两个执行器,即 :code:`Executor` 和 :code:`ParallelExecutor` ,如果用户采用数据并行模式,C++部分使用的是 :code:`ParallelExecutor` ,除此之外都是使用 :code:`Executor` 。 +这两个执行器的差别: + +.. csv-table:: + :header: "执行器 ", "执行对象", "执行策略" + :widths: 3, 3, 5 + + ":code:`Executor`", ":code:`Program`", "根据 :code:`Program` 中Operator定义的先后顺序依次运行。" + ":code:`ParallelExecutor`", "SSA Graph", "根据Graph中各个节点之间的依赖关系,通过多线程运行。" + + +可以看出, :code:`Executor` 的内部逻辑非常简单,但性能可能会弱一些,因为 :code:`Executor` 对于program中的操作是串行执行的。 +而 :code:`ParallelExecutor` 首先会将program转变为计算图,并分析计算图中节点间的连接关系,对图中没有相互依赖的节点(OP),通过多线程并行执行。 + +因此, :code:`Executor` 是一个轻量级的执行器,目前主要用于参数初始化、模型保存、模型加载。 +:code:`ParallelExecutor` 是 :code:`Executor` 的升级版本,目前 :code:`ParallelExecutor` 主要用于模型训练,包括单机单卡、单机多卡以及多机多卡训练。 + +:code:`ParallelExecutor` 执行计算图之前,可以对计算图进行一些优化,比如使计算图中的一些操作是In-place的、将计算图中的参数更新操作进行融合等。 +用户还可以调整 :code:`ParallelExecutor` 执行过程中的一些配置,比如执行计算图的线程数等。这些配置分别是构建策略(BuildStrategy)和执行策略(ExecutionStrategy)参数来设置的。 + + +一个简单的使用示例如下: + +.. code-block:: python + + build_strategy = fluid.BuildStrategy() + build_strategy.enable_inplace = True + build_strategy.fuse_all_optimizer_ops=True + + exec_strategy = fluid.ExecutionStrategy() + exec_strategy.num_threads = 4 + + train_program = fluid.compiler.CompiledProgram(main_program).with_data_parallel( + loss_name=loss.name, + build_strategy=build_strategy, + exec_strategy=exec_strategy) + + place = fluid.CUDAPlace(0) + exe = Executor(place) + # 使用DataLoader读取数据,因此执行时不需要设置feed + fetch_outs = exe.run(train_program, fetch_list=[loss.name]) + + + +3.2 构建策略(BuildStrategy)配置参数介绍 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +BuildStrategy中提供了一些关于计算图优化的策略,这些策略可以在不同程度上提升模型的训练速度,但是其中一些策略与模型的结构有关,比如 :code:`fuse_all_optimizer_ops` 不支持sparse梯度,我们正在积极的完善这些策略,并在下一个版本将这些策略默认打开。 + +构建策略的详细介绍如下: + +.. csv-table:: + :header: "选项", "类型", "默认值", "说明" + :widths: 3, 3, 3, 5 + + ":code:`reduce_strategy`", ":code:`fluid.BuildStrategy.ReduceStrategy`", ":code:`fluid.BuildStrategy.ReduceStrategy.AllReduce`", "使用数据并行训练模型时选用 :code:`AllReduce` 模式训练还是 :code:`Reduce` 模式训练。" + ":code:`enable_backward_optimizer_op_deps`", "bool", "True", "在反向操作和参数更新操作之间添加依赖,保证在所有的反向操作都运行结束之后才开始运行参数更新操作。" + ":code:`fuse_all_optimizer_ops`", "bool", "False", "对模型中的参数更新算法进行融合。" + ":code:`fuse_all_reduce_ops`", "bool", "False", "多卡训练时,将all_reduce操作进行融合。" + ":code:`fuse_relu_depthwise_conv`", "bool", "False", "如果模型中存在relu和depthwise_conv,并且是连接的,即relu->depthwise_conv,该选项可以将这两个操作合并为一个。" + ":code:`fuse_broadcast_ops`", "bool", "False", "在 :code:`Reduce` 模式下,将最后的多个Broadcast操作融合为一个。" + ":code:`mkldnn_enabled_op_types`", "list", "{}", "如果是CPU训练,可以用 :code:`mkldnn_enabled_op_types` 指明模型中的那些操作可以使用MKLDNN库。默认情况下,模型中用到的操作如果在Paddle目前支持的可以使用mkldnn库计算的列表中,这些操作都会调用mkldnn库的接口进行计算。" + ":code:`debug_graphviz_path`", "str", "{}", "将Graph以graphviz格式输出到debug_graphviz_path所指定的文件中。" + +参数说明: + +(1) 关于 :code:`reduce_strategy` ,在 :code:`ParallelExecutor` 对于数据并行支持两种参数更新模式: :code:`AllReduce` 和 :code:`Reduce` 。在 :code:`AllReduce` 模式下,各个节点上计算得到梯度之后,调用 :code:`AllReduce` 操作,梯度在各个节点上聚合,然后各个节点分别进行参数更新。在 :code:`Reduce` 模式下,参数的更新操作被均匀的分配到各个节点上,即各个节点计算得到梯度之后,将梯度在指定的节点上进行 :code:`Reduce` ,然后在该节点上,最后将更新之后的参数Broadcast到其他节点。即:如果模型中有100个参数需要更新,训练时使用的是4个节点,在 :code:`AllReduce` 模式下,各个节点需要分别对这100个参数进行更新;在 :code:`Reduce` 模式下,各个节点需要分别对这25个参数进行更新,最后将更新的参数Broadcast到其他节点上。注意:如果是使用CPU进行数据并行训练,在Reduce模式下,不同CPUPlace上的参数是共享的,所以在各个CPUPlace上完成参数更新之后不用将更新后的参数Broadcast到其他CPUPlace。 + +(2) 关于 :code:`enable_backward_optimizer_op_deps` ,在多卡训练时,打开该选项可能会提升训练速度。 + +(3) 关于 :code:`fuse_all_optimizer_ops` ,目前只支持SGD、Adam和Momentum算法。 **注意:目前不支持sparse参数梯度** 。 + +(4) 关于 :code:`fuse_all_reduce_ops` ,多GPU训练时,可以对 :code:`AllReduce` 操作进行融合,以减少 :code:`AllReduce` 的调用次数。默认情况下会将同一layer中参数的梯度的 :code:`AllReduce` 操作合并成一个,比如对于 :code:`fluid.layers.fc` 中有Weight和Bias两个参数,打开该选项之后,原本需要两次 :code:`AllReduce` 操作,现在只用一次 :code:`AllReduce` 操作。此外,为支持更大粒度的参数梯度融合,Paddle提供了 :code:`FLAGS_fuse_parameter_memory_size` 选项,用户可以指定融合AllReduce操作之后,每个 :code:`AllReduce` 操作的梯度字节数,比如希望每次 :code:`AllReduce` 调用传输64MB的梯度,:code:`export FLAGS_fuse_parameter_memory_size=64` 。 **注意:目前不支持sparse参数梯度** 。 + +(5) 关于 :code:`mkldnn_enabled_op_types` ,目前Paddle的Op中可以使用mkldnn库计算的操作包括:transpose、sum、softmax、requantize、quantize、pool2d、lrn、gaussian_random、fc、dequantize、conv2d_transpose、conv2d、conv3d、concat、batch_norm、relu、tanh、sqrt、abs。 + + +3.3 执行策略(ExecutionStrategy)配置参数介绍 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +ExecutionStrategy中提供了关于计算图执行时的一些配置,这些配置可能会影响模型的训练速度。同时,这些配置与模型的结构有关,如果用户希望模型训练速度更快,可以调整一下这些配置。在后续的优化中,我们会对这部分进行优化,根据输入模型结构动态调整这些设置。 + +ExecutionStrategy配置选项说明: + +.. csv-table:: + :header: "选项", "类型", "默认值", "说明" + :widths: 3, 3, 5, 5 + + ":code:`num_iteration_per_drop_scope`", "INT", "100", "经过多少次迭代之后清理一次local execution scope" + ":code:`num_threads`", "INT", "对于CPU:2*dev_count;对于GPU:4*dev_count. (这是一个经验值)", ":code:`ParallelExecutor` 中执行所有Op使用的线程池大小" + +说明: + +(1) 关于 :code:`num_iteration_per_drop_scope` ,框架在运行过程中会产生一些临时变量,默认每经过一个batch就要清理一下临时变量。由于GPU是异步设备,在清理之前需要对所有的GPU调用一次同步操作,因此耗费的时间较长。为此我们在execution_strategy中添加了 :code:`num_iteration_per_drop_scope` 选项。用户可以指定经过多少次迭代之后清理一次。 + +(2) 关于 :code:`num_threads` ,:code:`ParallelExecutor` 根据Op之间的依赖关系确定Op的执行顺序,即:当Op的输入都已经变为ready状态之后,该Op会被放到一个队列中,等待被执行。 :code:`ParallelExecutor` 内部有一个任务调度线程和一个线程池,任务调度线程从队列中取出所有Ready的Op,并将其放到线程队列中。 :code:`num_threads` 表示线程池的大小。根据以往的经验,对于CPU任务,:code:`num_threads=2*dev_count` 时性能较好,对于GPU任务,:code:`num_threads=4*dev_count` 时性能较好。 **注意:线程池不是越大越好** 。 + + +4. 运行时FLAGS设置优化 +================= + +Paddle中有一些FLAGS可以有助于性能优化: + +(1) :code:`FLAGS_cudnn_exhaustive_search` 表示在调用cuDNN中的卷积操作时,根据输入数据的shape等信息,采取穷举搜索的策略从算法库中选取到更快的卷积算法,进而实现对模型中卷积操作的加速。需要注意的是: + - 在搜索算法过程中需要使用较多的显存,如果用户的模型中卷积操作较多,或者GPU卡显存较小,可能会出现显存不足问题。 + - 通过穷举搜索选择好算法之后,该算法会进入Cache,以便下次运行时,如果输入数据的shape等信息不变,直接使用Cache中算法。 + +(2) :code:`FLAGS_enable_cublas_tensor_op_math` 表示是否使用TensorCore加速cuBLAS等NV提供的库中的操作。需要注意的是,这个环境变量只在Tesla V100以及更新的GPU上适用,且可能会带来一定的精度损失,通常该损失不会影响模型的收敛性。 + + +5. 优秀实践 +================= + +(1) 尽可能的使用飞桨提供的单个layer实现所需操作。 +(2) 采用异步数据读取。 +(3) 模型训练相关优化: + + - 使用ParallelExecutor作为底层执行器。单卡训练,也可以调用with_data_parallel方法。代码示例: + + .. code-block:: python + + compiled_prog = compiler.CompiledProgram( + fluid.default_main_program()).with_data_parallel( + loss_name=loss.name) + + - 如果模型中参数的梯度都是非sparse的,可以打开fuse_all_optimizer_ops选项,将多个参数更新操作融合为一个。 + - 如果是多卡训练,可以打开enable_backward_optimizer_op_deps、fuse_all_reduce_ops选项。如果想指定每次每次AllReduce操作的数据大小,可以设置 :code:`FLAGS_fuse_parameter_memory_size`,比如 :code:`export FLAGS_fuse_parameter_memory_size=1` ,表示每次AllReduce调用传输1MB的梯度。 + - 使用CPU做数据并行训练时,推荐使用Reduce模型,因为在使用CPU进行数据并行训练时,在Reduce模式下,不同CPUPlace 上的参数是共享的,所以在各个CPUPlace 上完成参数更新之后不用将更新后的参数Broadcast到其他CPUPlace上,这对提升速度也有很大帮助。 + - 如果是Reduce模式,可打开fuse_broadcast_ops选项。 + - 如果用户的模型较小,比如mnist、language_model等,可以将num_threads设为1。 + - 在显存足够的前提下,建议将 :code:`exec_strategy.num_iteration_per_drop_scope` 设置成一个较大的值,比如设置为100,这样可以避免反复地申请和释放内存。 + +目前我们正在推进这些配置自动化的工作:即根据输入的模型结构自动配置这些选项,争取在下一个版本中实现,敬请期待。 + +(4) FLAGS设置 + +.. code-block:: bash + + FLAGS_cudnn_exhaustive_search = True + FLAGS_enable_cublas_tensor_op_math = True + + +6. 使用Profile工具进行性能分析 +====================== + +为方便用户更好的发现程序中的性能瓶颈,Paddle提供了多种Profile工具,这些工具的详细介绍和使用说明请参考 :ref:`api_guide_analysis_tools` 。 diff --git a/doc/paddle/api/.rst b/doc/paddle/api/.rst new file mode 100644 index 0000000000000000000000000000000000000000..ca0df950ff465362e6b822d95f5f5a902b7ee1a0 --- /dev/null +++ b/doc/paddle/api/.rst @@ -0,0 +1,7 @@ +===== +fluid +===== + +.. toctree:: + :maxdepth: 1 + diff --git a/doc/paddle/api/alias_api_mapping b/doc/paddle/api/alias_api_mapping new file mode 100644 index 0000000000000000000000000000000000000000..3a43a1cdf51facf13c9a1f8c255d5fd3228bfec4 --- /dev/null +++ b/doc/paddle/api/alias_api_mapping @@ -0,0 +1,572 @@ +paddle.tensor.math.divide paddle.divide,paddle.tensor.divide +paddle.fluid.layers.sigmoid_focal_loss paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss +paddle.fluid.layers.smooth_l1 paddle.nn.functional.smooth_l1,paddle.nn.functional.loss.smooth_l1 +paddle.nn.functional.loss.l1_loss paddle.nn.functional.l1_loss +paddle.nn.functional.loss.margin_ranking_loss paddle.nn.functional.margin_ranking_loss +paddle.nn.layer.pooling.AdaptiveAvgPool3d paddle.nn.AdaptiveAvgPool3d,paddle.nn.layer.AdaptiveAvgPool3d +paddle.nn.functional.common.alpha_dropout paddle.nn.functional.alpha_dropout +paddle.nn.functional.activation.log_sigmoid paddle.nn.functional.log_sigmoid +paddle.fluid.executor.Executor paddle.static.Executor +paddle.nn.functional.pooling.avg_pool2d paddle.nn.functional.avg_pool2d +paddle.fluid.dygraph.checkpoint.load_dygraph paddle.load,paddle.framework.load +paddle.fluid.dygraph.container.Sequential paddle.nn.Sequential +paddle.fluid.dygraph.BilinearTensorProduct paddle.nn.BilinearTensorProduct,paddle.nn.layer.BilinearTensorProduct,paddle.nn.layer.common.BilinearTensorProduct +paddle.fluid.layers.box_coder paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder +paddle.fluid.layers.roi_perspective_transform paddle.nn.functional.roi_perspective_transform,paddle.nn.functional.vision.roi_perspective_transform +paddle.fluid.layers.stanh paddle.stanh,paddle.tensor.stanh,paddle.tensor.math.stanh +paddle.fluid.dygraph.base.enable_dygraph paddle.disable_static +paddle.tensor.math.kron paddle.kron,paddle.tensor.kron +paddle.nn.functional.pooling.avg_pool3d paddle.nn.functional.avg_pool3d +paddle.fluid.layers.crop_tensor paddle.crop_tensor,paddle.tensor.crop_tensor,paddle.tensor.creation.crop_tensor +paddle.fluid.layers.lrn paddle.nn.functional.lrn,paddle.nn.functional.norm.lrn +paddle.tensor.logic.less_than paddle.less_than,paddle.tensor.less_than +paddle.nn.layer.loss.NLLLoss paddle.nn.NLLLoss,paddle.nn.layer.NLLLoss +paddle.fluid.layers.sigmoid paddle.nn.functional.sigmoid,paddle.nn.functional.activation.sigmoid +paddle.fluid.layers.nn.py_func paddle.static.py_func +paddle.nn.layer.norm.GroupNorm paddle.nn.GroupNorm,paddle.nn.layer.GroupNorm +paddle.tensor.stat.mean paddle.mean,paddle.tensor.mean +paddle.fluid.layers.cosine_decay paddle.nn.functional.cosine_decay,paddle.nn.functional.learning_rate.cosine_decay +paddle.fluid.dygraph.BatchNorm paddle.nn.BatchNorm,paddle.nn.layer.BatchNorm,paddle.nn.layer.norm.BatchNorm +paddle.tensor.manipulation.unbind paddle.unbind,paddle.tensor.unbind +paddle.fluid.dygraph.jit.TracedLayer paddle.jit.TracedLayer +paddle.tensor.search.nonzero paddle.nonzero,paddle.tensor.nonzero +paddle.fluid.layers.similarity_focus paddle.nn.functional.similarity_focus,paddle.nn.functional.extension.similarity_focus +paddle.nn.layer.common.UpsamplingBilinear2d paddle.nn.UpsamplingBilinear2d,paddle.nn.layer.UpsamplingBilinear2d +paddle.nn.layer.norm.InstanceNorm paddle.nn.InstanceNorm,paddle.nn.layer.InstanceNorm +paddle.fluid.dygraph.SpectralNorm paddle.nn.SpectralNorm,paddle.nn.layer.SpectralNorm,paddle.nn.layer.norm.SpectralNorm +paddle.fluid.dygraph.ProgramTranslator paddle.jit.ProgramTranslator +paddle.tensor.linalg.t paddle.t,paddle.tensor.t +paddle.tensor.math.floor_divide paddle.floor_divide,paddle.tensor.floor_divide +paddle.fluid.layers.prelu paddle.static.nn.prelu +paddle.tensor.manipulation.chunk paddle.chunk,paddle.tensor.chunk +paddle.tensor.manipulation.reshape paddle.reshape,paddle.tensor.reshape +paddle.fluid.layers.increment paddle.increment,paddle.tensor.increment,paddle.tensor.math.increment +paddle.fluid.compiler.CompiledProgram paddle.static.CompiledProgram +paddle.tensor.manipulation.flip paddle.flip,paddle.reverse,paddle.tensor.flip,paddle.tensor.reverse +paddle.fluid.dygraph.parallel.ParallelEnv paddle.distributed.ParallelEnv +paddle.fluid.layers.hash paddle.nn.functional.hash,paddle.nn.functional.lod.hash +paddle.nn.functional.activation.selu paddle.nn.functional.selu +paddle.nn.functional.input.embedding paddle.nn.functional.embedding +paddle.nn.layer.pooling.AdaptiveMaxPool1d paddle.nn.AdaptiveMaxPool1d,paddle.nn.layer.AdaptiveMaxPool1d +paddle.fluid.layers.control_flow.Print paddle.static.Print +paddle.fluid.layers.roi_pool paddle.nn.functional.roi_pool,paddle.nn.functional.vision.roi_pool +paddle.fluid.layers.beam_search paddle.nn.beam_search,paddle.nn.decode.beam_search +paddle.fluid.layers.sin paddle.sin,paddle.tensor.sin,paddle.tensor.math.sin +paddle.fluid.core.CUDAPinnedPlace paddle.CUDAPinnedPlace,paddle.framework.CUDAPinnedPlace +paddle.fluid.layers.row_conv paddle.static.nn.row_conv +paddle.fluid.dygraph.learning_rate_scheduler.NoamDecay paddle.NoamDecay,paddle.framework.NoamDecay +paddle.tensor.math.logsumexp paddle.logsumexp,paddle.tensor.logsumexp +paddle.fluid.layers.elementwise_sub paddle.elementwise_sub,paddle.tensor.elementwise_sub,paddle.tensor.math.elementwise_sub +paddle.tensor.search.index_select paddle.index_select,paddle.tensor.index_select +paddle.tensor.manipulation.squeeze paddle.squeeze,paddle.tensor.squeeze +paddle.nn.functional.common.interpolate paddle.nn.functional.interpolate +paddle.nn.layer.common.ConstantPad1d paddle.nn.ConstantPad1d,paddle.nn.layer.ConstantPad1d +paddle.fluid.layers.cos paddle.cos,paddle.tensor.cos,paddle.tensor.math.cos +paddle.nn.functional.activation.tanhshrink paddle.nn.functional.tanhshrink +paddle.fluid.layers.random_crop paddle.nn.functional.random_crop,paddle.nn.functional.extension.random_crop +paddle.fluid.layers.anchor_generator paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator +paddle.fluid.dygraph.base.grad paddle.grad,paddle.framework.grad +paddle.tensor.linalg.bmm paddle.bmm,paddle.tensor.bmm +paddle.nn.layer.activation.SELU paddle.nn.SELU +paddle.nn.layer.pooling.AdaptiveMaxPool2d paddle.nn.AdaptiveMaxPool2d,paddle.nn.layer.AdaptiveMaxPool2d +paddle.fluid.layers.sinh paddle.sinh,paddle.tensor.sinh,paddle.tensor.math.sinh +paddle.fluid.layers.has_nan paddle.has_nan,paddle.tensor.has_nan,paddle.tensor.search.has_nan +paddle.tensor.manipulation.expand_as paddle.expand_as,paddle.tensor.expand_as +paddle.fluid.layers.crf_decoding paddle.static.nn.crf_decoding +paddle.fluid.framework.default_main_program paddle.static.default_main_program +paddle.fluid.framework.Variable paddle.Variable,paddle.framework.Variable +paddle.fluid.layers.l2_normalize paddle.nn.functional.l2_normalize,paddle.nn.functional.norm.l2_normalize +paddle.fluid.layers.log_loss paddle.nn.functional.log_loss,paddle.nn.functional.loss.log_loss +paddle.fluid.layers.softmax_with_cross_entropy paddle.nn.functional.softmax_with_cross_entropy,paddle.nn.functional.loss.softmax_with_cross_entropy +paddle.nn.layer.common.ConstantPad2d paddle.nn.ConstantPad2d,paddle.nn.layer.ConstantPad2d +paddle.fluid.layers.sampled_softmax_with_cross_entropy paddle.nn.functional.sampled_softmax_with_cross_entropy,paddle.nn.functional.loss.sampled_softmax_with_cross_entropy +paddle.fluid.layers.elementwise_floordiv paddle.elementwise_floordiv,paddle.tensor.elementwise_floordiv,paddle.tensor.math.elementwise_floordiv +paddle.nn.functional.pooling.adaptive_max_pool1d paddle.nn.functional.adaptive_max_pool1d +paddle.tensor.creation.zeros_like paddle.zeros_like,paddle.tensor.zeros_like +paddle.tensor.creation.zeros paddle.zeros,paddle.tensor.zeros +paddle.nn.layer.norm.SyncBatchNorm paddle.nn.SyncBatchNorm,paddle.nn.layer.SyncBatchNorm +paddle.nn.layer.pooling.AdaptiveMaxPool3d paddle.nn.AdaptiveMaxPool3d,paddle.nn.layer.AdaptiveMaxPool3d +paddle.tensor.linalg.histogram paddle.histogram,paddle.tensor.histogram +paddle.fluid.dygraph.learning_rate_scheduler.CosineDecay paddle.CosineDecay,paddle.framework.CosineDecay +paddle.fluid.dygraph.jit.declarative paddle.jit.to_static +paddle.tensor.logic.equal_all paddle.equal_all,paddle.tensor.equal_all +paddle.nn.functional.activation.softsign paddle.nn.functional.softsign +paddle.fluid.layers.density_prior_box paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box +paddle.fluid.layers.ssd_loss paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss +paddle.nn.layer.common.ConstantPad3d paddle.nn.ConstantPad3d,paddle.nn.layer.ConstantPad3d +paddle.fluid.backward.append_backward paddle.static.append_backward +paddle.nn.functional.pooling.adaptive_max_pool2d paddle.nn.functional.adaptive_max_pool2d +paddle.fluid.layers.generate_mask_labels paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels +paddle.tensor.math.prod paddle.prod,paddle.tensor.prod +paddle.fluid.layers.abs paddle.abs,paddle.tensor.abs,paddle.tensor.math.abs +paddle.fluid.dygraph.base.no_grad_ paddle.no_grad +paddle.fluid.layers.grid_sampler paddle.nn.functional.vision.grid_sampler +paddle.tensor.linalg.cholesky paddle.cholesky,paddle.tensor.cholesky +paddle.tensor.math.sum paddle.sum,paddle.tensor.sum,paddle.nn.functional.common.sum +paddle.fluid.dygraph.learning_rate_scheduler.PiecewiseDecay paddle.PiecewiseDecay,paddle.framework.PiecewiseDecay +paddle.tensor.search.masked_select paddle.masked_select,paddle.tensor.masked_select +paddle.fluid.layers.scatter_nd_add paddle.scatter_nd_add,paddle.tensor.scatter_nd_add,paddle.tensor.manipulation.scatter_nd_add +paddle.fluid.layers.strided_slice paddle.strided_slice,paddle.tensor.strided_slice,paddle.tensor.manipulation.strided_slice +paddle.nn.functional.activation.gelu paddle.nn.functional.gelu +paddle.fluid.layers.shape paddle.shape,paddle.tensor.shape,paddle.tensor.attribute.shape +paddle.fluid.layers.gather_tree paddle.nn.gather_tree,paddle.nn.decode.gather_tree +paddle.fluid.layers.natural_exp_decay paddle.nn.functional.natural_exp_decay,paddle.nn.functional.learning_rate.natural_exp_decay +paddle.tensor.manipulation.flatten paddle.flatten,paddle.tensor.flatten +paddle.fluid.initializer.Bilinear paddle.nn.initializer.Bilinear +paddle.fluid.backward.gradients paddle.static.gradients +paddle.nn.layer.activation.Tanhshrink paddle.nn.Tanhshrink +paddle.fluid.core.CPUPlace paddle.CPUPlace,paddle.framework.CPUPlace +paddle.nn.functional.activation.softmax paddle.nn.functional.softmax +paddle.nn.functional.pooling.adaptive_max_pool3d paddle.nn.functional.adaptive_max_pool3d +paddle.fluid.layers.yolo_box paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box +paddle.nn.functional.activation.hardtanh paddle.nn.functional.hardtanh +paddle.fluid.layers.distribute_fpn_proposals paddle.nn.functional.distribute_fpn_proposals,paddle.nn.functional.vision.distribute_fpn_proposals +paddle.tensor.math.max paddle.max,paddle.tensor.max +paddle.fluid.layers.thresholded_relu paddle.nn.functional.thresholded_relu,paddle.nn.functional.activation.thresholded_relu +paddle.tensor.math.minimum paddle.minimum,paddle.tensor.minimum +paddle.fluid.layers.linspace paddle.linspace,paddle.tensor.linspace,paddle.tensor.creation.linspace +paddle.nn.functional.pooling.adaptive_avg_pool1d paddle.nn.functional.adaptive_avg_pool1d +paddle.fluid.layers.square paddle.square,paddle.tensor.square,paddle.tensor.math.square +paddle.tensor.math.isinf paddle.isinf,paddle.tensor.isinf +paddle.fluid.layers.huber_loss paddle.nn.functional.huber_loss,paddle.nn.functional.loss.huber_loss +paddle.nn.layer.conv.ConvTranspose1d paddle.nn.ConvTranspose1d,paddle.nn.layer.ConvTranspose1d +paddle.fluid.dygraph.base.disable_dygraph paddle.enable_static +paddle.nn.layer.loss.MarginRankingLoss paddle.nn.MarginRankingLoss,paddle.nn.layer.MarginRankingLoss +paddle.nn.functional.loss.cross_entropy paddle.nn.functional.cross_entropy +paddle.framework.random.get_cuda_rng_state paddle.get_cuda_rng_state +paddle.tensor.search.index_sample paddle.index_sample,paddle.tensor.index_sample +paddle.fluid.layers.rank paddle.rank,paddle.tensor.rank,paddle.tensor.attribute.rank +paddle.fluid.layers.data_norm paddle.static.nn.data_norm +paddle.fluid.layers.slice paddle.slice,paddle.tensor.slice,paddle.tensor.manipulation.slice +paddle.nn.layer.pooling.AvgPool1d paddle.nn.AvgPool1d,paddle.nn.layer.AvgPool1d +paddle.fluid.layers.deformable_roi_pooling paddle.nn.functional.deformable_roi_pooling,paddle.nn.functional.vision.deformable_roi_pooling +paddle.fluid.layers.resize_nearest paddle.nn.functional.resize_nearest,paddle.nn.functional.vision.resize_nearest +paddle.fluid.dygraph.jit.set_verbosity paddle.jit.set_verbosity +paddle.tensor.math.mm paddle.mm,paddle.tensor.mm +paddle.fluid.layers.elementwise_pow paddle.elementwise_pow,paddle.tensor.elementwise_pow,paddle.tensor.math.elementwise_pow +paddle.fluid.layers.scatter_nd paddle.scatter_nd,paddle.tensor.scatter_nd,paddle.tensor.manipulation.scatter_nd +paddle.nn.functional.pooling.adaptive_avg_pool2d paddle.nn.functional.adaptive_avg_pool2d +paddle.fluid.layers.bilinear_tensor_product paddle.static.nn.bilinear_tensor_product +paddle.fluid.framework.name_scope paddle.static.name_scope +paddle.fluid.layers.is_empty paddle.is_empty,paddle.tensor.is_empty,paddle.tensor.logic.is_empty +paddle.tensor.math.multiply paddle.multiply,paddle.tensor.multiply +paddle.tensor.creation.Tensor paddle.Tensor +paddle.tensor.creation.to_tensor paddle.to_tensor,paddle.tensor.to_tensor +paddle.fluid.initializer.Normal paddle.nn.initializer.Normal +paddle.nn.layer.common.AlphaDropout paddle.nn.AlphaDropout,paddle.nn.layer.AlphaDropout +paddle.nn.layer.loss.BCELoss paddle.nn.BCELoss,paddle.nn.layer.BCELoss +paddle.nn.layer.conv.ConvTranspose2d paddle.nn.ConvTranspose2d,paddle.nn.layer.ConvTranspose2d +paddle.fluid.dygraph.container.LayerList paddle.nn.LayerList +paddle.fluid.compiler.ExecutionStrategy paddle.static.ExecutionStrategy +paddle.fluid.layers.ceil paddle.ceil,paddle.tensor.ceil,paddle.tensor.math.ceil +paddle.nn.layer.activation.ReLU paddle.nn.layer.ReLU +paddle.tensor.logic.greater_equal paddle.greater_equal,paddle.tensor.greater_equal +paddle.nn.layer.pooling.AvgPool2d paddle.nn.AvgPool2d,paddle.nn.layer.AvgPool2d +paddle.tensor.math.floor_mod paddle.floor_mod,paddle.tensor.floor_mod +paddle.nn.functional.pooling.adaptive_avg_pool3d paddle.nn.functional.adaptive_avg_pool3d +paddle.tensor.math.inverse paddle.inverse,paddle.tensor.inverse +paddle.fluid.layers.label_smooth paddle.nn.functional.label_smooth,paddle.nn.functional.common.label_smooth +paddle.nn.layer.activation.Sigmoid paddle.nn.Sigmoid,paddle.nn.layer.Sigmoid +paddle.fluid.layers.hard_sigmoid paddle.nn.functional.hard_sigmoid,paddle.nn.functional.activation.hard_sigmoid +paddle.nn.layer.norm.BatchNorm1d paddle.nn.BatchNorm1d +paddle.nn.layer.common.ReflectionPad1d paddle.nn.ReflectionPad1d,paddle.nn.layer.ReflectionPad1d +paddle.fluid.layers.fc paddle.static.nn.fc +paddle.framework.random.set_cuda_rng_state paddle.set_cuda_rng_state +paddle.nn.layer.conv.ConvTranspose3d paddle.nn.ConvTranspose3d,paddle.nn.layer.ConvTranspose3d +paddle.fluid.dygraph.layers.Layer paddle.nn.Layer +paddle.tensor.logic.greater_than paddle.greater_than,paddle.tensor.greater_than +paddle.fluid.dygraph.jit.set_code_level paddle.jit.set_code_level +paddle.fluid.layers.hard_swish paddle.nn.functional.hard_swish,paddle.nn.functional.activation.hard_swish +paddle.tensor.math.remainder paddle.remainder,paddle.tensor.remainder +paddle.fluid.layers.filter_by_instag paddle.nn.functional.filter_by_instag,paddle.nn.functional.extension.filter_by_instag +paddle.fluid.layers.shard_index paddle.shard_index,paddle.tensor.shard_index,paddle.tensor.manipulation.shard_index +paddle.nn.layer.pooling.AvgPool3d paddle.nn.AvgPool3d,paddle.nn.layer.AvgPool3d +paddle.fluid.layers.rank_loss paddle.nn.functional.rank_loss,paddle.nn.functional.loss.rank_loss +paddle.fluid.dygraph.jit.save paddle.jit.save +paddle.nn.functional.norm.instance_norm paddle.nn.functional.instance_norm +paddle.nn.functional.conv.conv_transpose1d paddle.nn.functional.conv_transpose1d +paddle.fluid.io.shuffle paddle.shuffle,paddle.tensor.shuffle,paddle.tensor.random.shuffle +paddle.nn.layer.norm.BatchNorm2d paddle.nn.BatchNorm2d +paddle.nn.layer.common.ReflectionPad2d paddle.nn.ReflectionPad2d,paddle.nn.layer.ReflectionPad2d +paddle.nn.layer.conv.Conv1d paddle.nn.Conv1d,paddle.nn.layer.Conv1d +paddle.fluid.param_attr.ParamAttr paddle.ParamAttr,paddle.framework.ParamAttr +paddle.fluid.layers.retinanet_target_assign paddle.nn.functional.retinanet_target_assign,paddle.nn.functional.vision.retinanet_target_assign +paddle.fluid.initializer.Xavier paddle.nn.initializer.Xavier +paddle.fluid.dygraph.parallel.prepare_context paddle.distributed.prepare_context +paddle.tensor.math.pow paddle.pow,paddle.tensor.pow +paddle.fluid.layers.bipartite_match paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match +paddle.fluid.input.embedding paddle.static.nn.embedding +paddle.fluid.clip.GradientClipByGlobalNorm paddle.nn.GradientClipByGlobalNorm,paddle.nn.clip.GradientClipByGlobalNorm +paddle.tensor.math.trace paddle.trace,paddle.tensor.trace +paddle.fluid.layers.reduce_prod paddle.reduce_prod,paddle.tensor.reduce_prod,paddle.tensor.math.reduce_prod +paddle.nn.layer.loss.L1Loss paddle.nn.L1Loss,paddle.nn.layer.L1Loss +paddle.fluid.dygraph.io.TranslatedLayer paddle.jit.TranslatedLayer +paddle.nn.functional.conv.conv_transpose2d paddle.nn.functional.conv_transpose2d +paddle.tensor.manipulation.split paddle.split,paddle.tensor.split +paddle.fluid.layers.tensor.create_parameter paddle.create_parameter,paddle.framework.create_parameter +paddle.nn.layer.activation.Softsign paddle.nn.Softsign +paddle.nn.layer.loss.CrossEntropyLoss paddle.nn.CrossEntropyLoss,paddle.nn.layer.CrossEntropyLoss +paddle.nn.layer.norm.BatchNorm3d paddle.nn.BatchNorm3d +paddle.nn.layer.conv.Conv2d paddle.nn.Conv2d,paddle.nn.layer.Conv2d +paddle.fluid.layers.group_norm paddle.static.nn.group_norm +paddle.nn.layer.activation.LeakyReLU paddle.nn.LeakyReLU,paddle.nn.layer.LeakyReLU +paddle.tensor.search.argmax paddle.argmax,paddle.tensor.argmax +paddle.nn.layer.distance.PairwiseDistance paddle.nn.PairwiseDistance,paddle.nn.layer.PairwiseDistance +paddle.fluid.layers.box_decoder_and_assign paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign +paddle.fluid.executor.global_scope paddle.static.global_scope +paddle.fluid.param_attr.WeightNormParamAttr paddle.static.WeightNormParamAttr +paddle.tensor.math.addmm paddle.addmm,paddle.tensor.addmm +paddle.fluid.framework.Program paddle.static.Program +paddle.fluid.layers.round paddle.round,paddle.tensor.round,paddle.tensor.math.round +paddle.tensor.creation.ones paddle.ones,paddle.tensor.ones +paddle.tensor.random.randint paddle.randint,paddle.tensor.randint +paddle.tensor.search.where paddle.where,paddle.tensor.where +paddle.fluid.layers.pool2d paddle.nn.functional.pool2d,paddle.nn.functional.pooling.pool2d +paddle.nn.functional.norm.batch_norm paddle.nn.functional.batch_norm +paddle.nn.layer.common.UpsamplingNearest2d paddle.nn.UpsamplingNearest2d,paddle.nn.layer.UpsamplingNearest2d +paddle.nn.functional.conv.conv_transpose3d paddle.nn.functional.conv_transpose3d +paddle.fluid.layers.switch_case paddle.nn.switch_case,paddle.nn.control_flow.switch_case +paddle.fluid.layers.image_resize paddle.nn.functional.image_resize,paddle.nn.functional.vision.image_resize +paddle.nn.layer.conv.Conv3d paddle.nn.Conv3d,paddle.nn.layer.Conv3d +paddle.fluid.layers.logical_or paddle.logical_or,paddle.tensor.logical_or,paddle.tensor.logic.logical_or +paddle.fluid.layers.brelu paddle.nn.functional.brelu,paddle.nn.functional.activation.brelu +paddle.tensor.search.argsort paddle.argsort,paddle.tensor.argsort +paddle.tensor.manipulation.gather paddle.gather,paddle.tensor.gather +paddle.fluid.layers.sigmoid_cross_entropy_with_logits paddle.nn.functional.sigmoid_cross_entropy_with_logits,paddle.nn.functional.loss.sigmoid_cross_entropy_with_logits +paddle.fluid.layers.exponential_decay paddle.nn.functional.exponential_decay,paddle.nn.functional.learning_rate.exponential_decay +paddle.fluid.layers.conv2d_transpose paddle.static.nn.conv2d_transpose +paddle.nn.layer.pooling.MaxPool1d paddle.nn.MaxPool1d,paddle.nn.layer.MaxPool1d +paddle.nn.functional.activation.softshrink paddle.nn.functional.softshrink +paddle.nn.functional.activation.log_softmax paddle.nn.functional.log_softmax +paddle.nn.functional.common.bilinear paddle.nn.functional.bilinear +paddle.fluid.initializer.TruncatedNormal paddle.nn.initializer.TruncatedNormal +paddle.fluid.layers.pool3d paddle.nn.functional.pool3d,paddle.nn.functional.pooling.pool3d +paddle.tensor.linalg.norm paddle.norm,paddle.tensor.norm +paddle.fluid.layers.roi_align paddle.nn.functional.roi_align,paddle.nn.functional.vision.roi_align +paddle.nn.functional.common.dropout2d paddle.nn.functional.dropout2d +paddle.nn.functional.loss.smooth_l1_loss paddle.nn.functional.smooth_l1_loss +paddle.fluid.layers.multiclass_nms paddle.nn.functional.multiclass_nms,paddle.nn.functional.extension.multiclass_nms +paddle.fluid.layers.teacher_student_sigmoid_loss paddle.nn.functional.teacher_student_sigmoid_loss,paddle.nn.functional.loss.teacher_student_sigmoid_loss +paddle.nn.functional.activation.prelu paddle.nn.functional.prelu +paddle.tensor.linalg.matmul paddle.matmul,paddle.tensor.matmul +paddle.fluid.layers.generate_proposals paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals +paddle.nn.layer.loss.SmoothL1Loss paddle.nn.SmoothL1Loss,paddle.nn.layer.SmoothL1Loss +paddle.fluid.dygraph.checkpoint.save_dygraph paddle.save,paddle.framework.save +paddle.fluid.core paddle.framework.core +paddle.nn.functional.vision.grid_sample paddle.nn.functional.grid_sample +paddle.tensor.random.rand paddle.rand,paddle.tensor.rand +paddle.fluid.layers.cond paddle.nn.cond,paddle.nn.control_flow.cond +paddle.fluid.layers.square_error_cost paddle.nn.functional.square_error_cost,paddle.nn.functional.loss.square_error_cost +paddle.nn.layer.pooling.MaxPool2d paddle.nn.MaxPool2d,paddle.nn.layer.MaxPool2d +paddle.fluid.layers.multiplex paddle.multiplex,paddle.tensor.multiplex,paddle.tensor.math.multiplex +paddle.fluid.layers.unsqueeze paddle.nn.functional.common.unsqueeze +paddle.nn.layer.common.Pad2D paddle.nn.Pad2D,paddle.nn.layer.Pad2D +paddle.fluid.layers.conv2d paddle.static.nn.conv2d +paddle.fluid.layers.create_parameter paddle.static.nn.create_parameter +paddle.tensor.creation.ones_like paddle.ones_like,paddle.tensor.ones_like +paddle.fluid.layers.sums paddle.sums,paddle.tensor.sums,paddle.tensor.math.sums +paddle.fluid.layers.rnn.birnn paddle.nn.functional.birnn,paddle.nn.functional.rnn.birnn +paddle.fluid.dygraph.base.no_grad paddle.framework.no_grad +paddle.nn.functional.common.dropout3d paddle.nn.functional.dropout3d +paddle.fluid.layers.polygon_box_transform paddle.nn.functional.polygon_box_transform,paddle.nn.functional.extension.polygon_box_transform +paddle.fluid.layers.rnn.rnn paddle.nn.functional.rnn,paddle.nn.functional.rnn.rnn +paddle.tensor.search.sort paddle.sort,paddle.tensor.sort +paddle.tensor.logic.equal paddle.equal,paddle.tensor.equal +paddle.fluid.layers.swish paddle.nn.functional.swish,paddle.nn.functional.activation.swish +paddle.nn.layer.common.ReplicationPad1d paddle.nn.ReplicationPad1d,paddle.nn.layer.ReplicationPad1d +paddle.tensor.math.min paddle.min,paddle.tensor.min +paddle.fluid.dygraph.container.ParameterList paddle.nn.ParameterList +paddle.fluid.layers.reciprocal paddle.reciprocal,paddle.tensor.reciprocal,paddle.tensor.math.reciprocal +paddle.fluid.layers.reduce_mean paddle.reduce_mean,paddle.tensor.reduce_mean,paddle.tensor.stat.reduce_mean +paddle.framework.get_default_dtype paddle.get_default_dtype +paddle.fluid.layers.atan paddle.atan,paddle.tensor.atan,paddle.tensor.math.atan +paddle.fluid.layers.pad2d paddle.nn.functional.pad2d,paddle.nn.functional.common.pad2d +paddle.nn.layer.pooling.MaxPool3d paddle.nn.MaxPool3d,paddle.nn.layer.MaxPool3d +paddle.fluid.io.load paddle.static.load,paddle.tensor.load,paddle.tensor.io.load +paddle.fluid.dygraph.learning_rate_scheduler.ExponentialDecay paddle.ExponentialDecay,paddle.framework.ExponentialDecay +paddle.fluid.layers.conv3d paddle.static.nn.conv3d +paddle.nn.layer.activation.Softmax paddle.nn.Softmax +paddle.fluid.initializer.MSRA paddle.nn.initializer.MSRA +paddle.tensor.random.randn paddle.randn,paddle.tensor.randn +paddle.tensor.logic.less_equal paddle.less_equal,paddle.tensor.less_equal +paddle.nn.layer.loss.CTCLoss paddle.nn.CTCLoss,paddle.nn.layer.CTCLoss +paddle.fluid.layers.reduce_sum paddle.reduce_sum,paddle.tensor.reduce_sum,paddle.tensor.math.reduce_sum +paddle.nn.functional.extension.diag_embed paddle.nn.functional.diag_embed +paddle.nn.layer.common.ReplicationPad2d paddle.nn.ReplicationPad2d,paddle.nn.layer.ReplicationPad2d +paddle.nn.functional.norm.layer_norm paddle.nn.functional.layer_norm +paddle.nn.functional.activation.leaky_relu paddle.nn.functional.leaky_relu +paddle.fluid.layers.target_assign paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign +paddle.tensor.creation.arange paddle.arange,paddle.tensor.arange +paddle.nn.functional.conv.conv1d paddle.nn.functional.conv1d +paddle.nn.functional.norm.normalize paddle.nn.functional.normalize +paddle.fluid.layers.fsp_matrix paddle.nn.functional.fsp_matrix,paddle.nn.functional.vision.fsp_matrix +paddle.tensor.creation.tril paddle.tril,paddle.tensor.tril +paddle.nn.layer.common.Dropout paddle.nn.Dropout,paddle.nn.layer.Dropout +paddle.fluid.clip.GradientClipByValue paddle.nn.GradientClipByValue,paddle.nn.clip.GradientClipByValue +paddle.nn.layer.activation.Softshrink paddle.nn.Softshrink +paddle.fluid.clip.GradientClipByNorm paddle.nn.GradientClipByNorm,paddle.nn.clip.GradientClipByNorm +paddle.tensor.creation.triu paddle.triu,paddle.tensor.triu +paddle.fluid.layers.reduce_all paddle.reduce_all,paddle.tensor.reduce_all,paddle.tensor.logic.reduce_all +paddle.fluid.layers.rpn_target_assign paddle.nn.functional.rpn_target_assign,paddle.nn.functional.extension.rpn_target_assign +paddle.fluid.layers.reduce_max paddle.reduce_max,paddle.tensor.reduce_max,paddle.tensor.math.reduce_max +paddle.tensor.random.randperm paddle.randperm,paddle.tensor.randperm +paddle.fluid.layers.deformable_conv paddle.static.nn.deformable_conv +paddle.tensor.manipulation.unique paddle.unique,paddle.tensor.unique +paddle.tensor.linalg.dot paddle.dot,paddle.tensor.dot +paddle.nn.layer.common.ReplicationPad3d paddle.nn.ReplicationPad3d,paddle.nn.layer.ReplicationPad3d +paddle.fluid.layers.erf paddle.erf,paddle.tensor.erf,paddle.tensor.math.erf,paddle.nn.functional.erf,paddle.nn.functional.activation.erf +paddle.fluid.layers.image_resize_short paddle.nn.functional.image_resize_short,paddle.nn.functional.vision.image_resize_short +paddle.tensor.manipulation.gather_nd paddle.gather_nd,paddle.tensor.gather_nd +paddle.nn.functional.activation.softplus paddle.nn.functional.softplus +paddle.nn.layer.activation.HSigmoid paddle.nn.HSigmoid,paddle.nn.layer.HSigmoid +paddle.fluid.layers.has_inf paddle.has_inf,paddle.tensor.has_inf,paddle.tensor.search.has_inf +paddle.fluid.layers.logical_not paddle.logical_not,paddle.tensor.logical_not,paddle.tensor.logic.logical_not +paddle.tensor.stat.var paddle.var,paddle.tensor.var +paddle.fluid.layers.npair_loss paddle.nn.functional.npair_loss,paddle.nn.functional.loss.npair_loss +paddle.nn.functional.conv.conv2d paddle.nn.functional.conv2d +paddle.fluid.layers.logical_xor paddle.logical_xor,paddle.tensor.logical_xor,paddle.tensor.logic.logical_xor +paddle.fluid.layers.pad_constant_like paddle.nn.functional.pad_constant_like,paddle.nn.functional.common.pad_constant_like +paddle.fluid.framework.in_dygraph_mode paddle.in_dynamic_mode +paddle.tensor.math.addcmul paddle.addcmul,paddle.tensor.addcmul +paddle.tensor.creation.diag paddle.diag,paddle.tensor.diag +paddle.fluid.dygraph.learning_rate_scheduler.InverseTimeDecay paddle.InverseTimeDecay,paddle.framework.InverseTimeDecay +paddle.framework.set_default_dtype paddle.set_default_dtype +paddle.fluid.layers.collect_fpn_proposals paddle.nn.functional.collect_fpn_proposals,paddle.nn.functional.vision.collect_fpn_proposals +paddle.fluid.layers.elementwise_div paddle.elementwise_div,paddle.tensor.elementwise_div,paddle.tensor.math.elementwise_div +paddle.framework.random.manual_seed paddle.manual_seed +paddle.tensor.random.uniform paddle.uniform,paddle.tensor.uniform +paddle.fluid.layers.asin paddle.asin,paddle.tensor.asin,paddle.tensor.math.asin +paddle.fluid.initializer.Constant paddle.nn.initializer.Constant +paddle.fluid.layers.retinanet_detection_output paddle.nn.functional.retinanet_detection_output,paddle.nn.functional.vision.retinanet_detection_output +paddle.fluid.layers.isfinite paddle.tensor.logic.isfinite +paddle.nn.functional.conv.conv3d paddle.nn.functional.conv3d +paddle.nn.functional.loss.nll_loss paddle.nn.functional.nll_loss +paddle.fluid.initializer.Uniform paddle.nn.initializer.Uniform +paddle.tensor.manipulation.roll paddle.roll,paddle.tensor.roll +paddle.nn.layer.common.ZeroPad2d paddle.nn.ZeroPad2d,paddle.nn.layer.ZeroPad2d +paddle.nn.layer.norm.LayerNorm paddle.nn.LayerNorm,paddle.nn.layer.LayerNorm +paddle.fluid.layers.elementwise_mod paddle.elementwise_mod,paddle.tensor.elementwise_mod,paddle.tensor.math.elementwise_mod +paddle.tensor.math.clip paddle.clip,paddle.tensor.clip +paddle.static.input.InputSpec paddle.static.InputSpec +paddle.fluid.layers.acos paddle.acos,paddle.tensor.acos,paddle.tensor.math.acos +paddle.tensor.search.topk paddle.topk,paddle.tensor.topk +paddle.fluid.dygraph.Pool2D paddle.nn.Pool2D,paddle.nn.layer.Pool2D,paddle.nn.layer.common.Pool2D +paddle.tensor.math.isfinite paddle.isfinite,paddle.tensor.isfinite +paddle.fluid.framework.program_guard paddle.static.program_guard +paddle.fluid.layers.reduce_any paddle.reduce_any,paddle.tensor.reduce_any,paddle.tensor.logic.reduce_any +paddle.fluid.layers.inverse_time_decay paddle.nn.functional.inverse_time_decay,paddle.nn.functional.learning_rate.inverse_time_decay +paddle.fluid.layers.space_to_depth paddle.nn.functional.space_to_depth,paddle.nn.functional.vision.space_to_depth +paddle.nn.layer.common.CosineSimilarity paddle.nn.CosineSimilarity,paddle.nn.layer.CosineSimilarity +paddle.nn.layer.extension.RowConv paddle.nn.RowConv,paddle.nn.layer.RowConv +paddle.fluid.layers.shuffle_channel paddle.nn.functional.shuffle_channel,paddle.nn.functional.vision.shuffle_channel +paddle.nn.layer.loss.MSELoss paddle.nn.MSELoss,paddle.nn.layer.MSELoss +paddle.nn.functional.vision.pixel_shuffle paddle.nn.functional.pixel_shuffle +paddle.tensor.math.tanh paddle.tanh,paddle.tensor.tanh,paddle.nn.functional.tanh,paddle.nn.functional.activation.tanh +paddle.fluid.dygraph.jit.load paddle.jit.load +paddle.fluid.layers.batch_norm paddle.static.nn.batch_norm +paddle.fluid.layers.cosh paddle.cosh,paddle.tensor.cosh,paddle.tensor.math.cosh +paddle.nn.layer.common.UpSample paddle.nn.UpSample,paddle.nn.layer.UpSample +paddle.fluid.layers.instance_norm paddle.static.nn.instance_norm +paddle.fluid.layers.temporal_shift paddle.nn.functional.temporal_shift,paddle.nn.functional.extension.temporal_shift +paddle.nn.functional.activation.hardshrink paddle.nn.functional.hardshrink +paddle.fluid.one_hot paddle.nn.functional.common.one_hot +paddle.fluid.layers.clip paddle.nn.clip,paddle.nn.clip.clip +paddle.fluid.framework.ComplexVariable paddle.framework.ComplexVariable +paddle.tensor.search.argmin paddle.argmin,paddle.tensor.argmin +paddle.nn.layer.common.Dropout2d paddle.nn.Dropout2d,paddle.nn.layer.Dropout2d +paddle.tensor.math.elementwise_sum paddle.elementwise_sum,paddle.tensor.elementwise_sum +paddle.tensor.manipulation.concat paddle.concat,paddle.tensor.concat +paddle.tensor.stat.std paddle.std,paddle.tensor.std +paddle.fluid.layers.dice_loss paddle.nn.functional.dice_loss,paddle.nn.functional.loss.dice_loss +paddle.nn.functional.loss.binary_cross_entropy paddle.nn.functional.binary_cross_entropy +paddle.fluid.dygraph.Linear paddle.nn.Linear,paddle.nn.layer.Linear,paddle.nn.layer.common.Linear +paddle.fluid.layers.box_clip paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip +paddle.nn.layer.activation.ReLU6 paddle.nn.ReLU6 +paddle.static.input.data paddle.static.data +paddle.fluid.layers.edit_distance paddle.nn.functional.edit_distance,paddle.nn.functional.loss.edit_distance +paddle.tensor.math.maximum paddle.maximum,paddle.tensor.maximum +paddle.fluid.layers.add_position_encoding paddle.nn.functional.add_position_encoding,paddle.nn.functional.extension.add_position_encoding +paddle.nn.functional.common.cosine_similarity paddle.nn.functional.cosine_similarity +paddle.fluid.layers.floor paddle.floor,paddle.tensor.floor,paddle.tensor.math.floor +paddle.tensor.math.mod paddle.mod,paddle.tensor.mod +paddle.nn.functional.loss.mse_loss paddle.nn.functional.mse_loss +paddle.tensor.math.log1p paddle.log1p,paddle.tensor.log1p +paddle.fluid.layers.continuous_value_model paddle.nn.functional.continuous_value_model,paddle.nn.functional.extension.continuous_value_model +paddle.fluid.layers.unstack paddle.unstack,paddle.tensor.unstack,paddle.tensor.manipulation.unstack +paddle.fluid.layers.rsqrt paddle.rsqrt,paddle.tensor.rsqrt,paddle.tensor.math.rsqrt +paddle.tensor.creation.eye paddle.eye,paddle.tensor.eye +paddle.fluid.layers.unique_with_counts paddle.unique_with_counts,paddle.tensor.unique_with_counts,paddle.tensor.manipulation.unique_with_counts +paddle.nn.layer.common.Dropout3d paddle.nn.Dropout3d,paddle.nn.layer.Dropout3d +paddle.nn.layer.vision.PixelShuffle paddle.nn.layer.PixelShuffle +paddle.fluid.layers.while_loop paddle.nn.while_loop,paddle.nn.control_flow.while_loop +paddle.fluid.dygraph.learning_rate_scheduler.PolynomialDecay paddle.PolynomialDecay,paddle.framework.PolynomialDecay +paddle.nn.functional.loss.ctc_loss paddle.nn.functional.ctc_loss +paddle.nn.layer.common.Embedding paddle.nn.Embedding,paddle.nn.layer.Embedding +paddle.tensor.math.isnan paddle.isnan,paddle.tensor.isnan +paddle.nn.functional.activation.hsigmoid paddle.nn.functional.hsigmoid +paddle.nn.functional.loss.kl_div paddle.nn.functional.kl_div +paddle.tensor.manipulation.expand paddle.expand,paddle.tensor.expand +paddle.fluid.dygraph.jit.SaveLoadConfig paddle.SaveLoadConfig,paddle.framework.SaveLoadConfig +paddle.nn.functional.common.dropout paddle.nn.functional.dropout +paddle.nn.functional.common.linear paddle.nn.functional.linear +paddle.nn.layer.activation.LogSoftmax paddle.nn.LogSoftmax,paddle.nn.layer.LogSoftmax +paddle.nn.layer.loss.KLDivLoss paddle.nn.KLDivLoss,paddle.nn.layer.KLDivLoss +paddle.fluid.core.VarBase paddle.framework.VarBase +paddle.tensor.math.sign paddle.sign,paddle.tensor.sign +paddle.fluid.layers.prior_box paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box +paddle.fluid.layers.adaptive_pool2d paddle.nn.functional.adaptive_pool2d,paddle.nn.functional.pooling.adaptive_pool2d +paddle.fluid.layers.iou_similarity paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity +paddle.fluid.layers.transpose paddle.transpose,paddle.tensor.transpose,paddle.tensor.linalg.transpose,paddle.tensor.manipulation.transpose +paddle.nn.functional.activation.elu paddle.nn.functional.elu +paddle.tensor.manipulation.broadcast_to paddle.broadcast_to,paddle.tensor.broadcast_to +paddle.fluid.layers.tensor.create_global_var paddle.create_global_var,paddle.framework.create_global_var +paddle.fluid.layers.elementwise_add paddle.elementwise_add,paddle.tensor.elementwise_add,paddle.tensor.math.elementwise_add +paddle.fluid.layers.affine_channel paddle.nn.functional.affine_channel,paddle.nn.functional.vision.affine_channel +paddle.tensor.creation.full_like paddle.full_like,paddle.tensor.full_like +paddle.fluid.parallel_executor.ParallelExecutor paddle.static.ParallelExecutor +paddle.tensor.logic.not_equal paddle.not_equal,paddle.tensor.not_equal +paddle.fluid.layers.layer_norm paddle.static.nn.layer_norm +paddle.fluid.layers.nce paddle.static.nn.nce +paddle.nn.functional.activation.relu paddle.nn.functional.relu +paddle.nn.layer.norm.InstanceNorm1d paddle.nn.InstanceNorm1d +paddle.nn.functional.common.pad paddle.nn.functional.pad +paddle.fluid.layers.adaptive_pool3d paddle.nn.functional.adaptive_pool3d,paddle.nn.functional.pooling.adaptive_pool3d +paddle.fluid.layers.psroi_pool paddle.nn.functional.psroi_pool,paddle.nn.functional.vision.psroi_pool +paddle.fluid.layers.spectral_norm paddle.static.nn.spectral_norm +paddle.nn.functional.pooling.max_pool1d paddle.nn.functional.max_pool1d +paddle.fluid.dygraph.learning_rate_scheduler.NaturalExpDecay paddle.NaturalExpDecay,paddle.framework.NaturalExpDecay +paddle.tensor.math.sqrt paddle.nn.functional.common.sqrt +paddle.nn.layer.activation.Softplus paddle.nn.Softplus +paddle.fluid.dygraph.parallel.DataParallel paddle.DataParallel,paddle.framework.DataParallel +paddle.fluid.layers.exp paddle.exp,paddle.tensor.exp,paddle.tensor.math.exp +paddle.tensor.manipulation.scatter paddle.scatter,paddle.tensor.scatter +paddle.fluid.layers.detection_output paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output +paddle.fluid.layers.bpr_loss paddle.nn.functional.bpr_loss,paddle.nn.functional.loss.bpr_loss +paddle.fluid.layers.conv3d_transpose paddle.static.nn.conv3d_transpose +paddle.tensor.stat.numel paddle.numel,paddle.tensor.numel +paddle.nn.functional.activation.relu6 paddle.nn.functional.relu6 +paddle.tensor.math.cumsum paddle.cumsum,paddle.tensor.cumsum +paddle.fluid.layers.resize_trilinear paddle.nn.functional.resize_trilinear,paddle.nn.functional.vision.resize_trilinear +paddle.fluid.save paddle.static.save,paddle.tensor.save,paddle.tensor.io.save +paddle.fluid.layers.scale paddle.scale,paddle.tensor.scale,paddle.tensor.math.scale +paddle.fluid.framework.default_startup_program paddle.static.default_startup_program +paddle.fluid.layers.noam_decay paddle.nn.functional.noam_decay,paddle.nn.functional.learning_rate.noam_decay +paddle.fluid.layers.yolov3_loss paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss +paddle.fluid.layers.piecewise_decay paddle.nn.functional.piecewise_decay,paddle.nn.functional.learning_rate.piecewise_decay +paddle.tensor.linalg.cross paddle.cross,paddle.tensor.cross +paddle.fluid.layers.maxout paddle.nn.functional.maxout,paddle.nn.functional.activation.maxout +paddle.nn.layer.norm.InstanceNorm2d paddle.nn.InstanceNorm2d +paddle.fluid.layers.assign paddle.nn.functional.assign,paddle.nn.functional.common.assign +paddle.fluid.layers.case paddle.nn.case,paddle.nn.control_flow.case +paddle.fluid.core.CUDAPlace paddle.CUDAPlace,paddle.framework.CUDAPlace +paddle.nn.functional.pooling.max_pool2d paddle.nn.functional.max_pool2d +paddle.fluid.layers.resize_bilinear paddle.nn.functional.resize_bilinear,paddle.nn.functional.vision.resize_bilinear +paddle.fluid.layers.clip_by_norm paddle.nn.clip_by_norm,paddle.nn.clip.clip_by_norm +paddle.nn.functional.loss.binary_cross_entropy_with_logits paddle.nn.functional.binary_cross_entropy_with_logits +paddle.fluid.layers.reduce_min paddle.reduce_min,paddle.tensor.reduce_min,paddle.tensor.math.reduce_min +paddle.fluid.layers.fill_constant paddle.fill_constant,paddle.tensor.fill_constant,paddle.tensor.creation.fill_constant +paddle.nn.layer.pooling.AdaptiveAvgPool1d paddle.nn.AdaptiveAvgPool1d,paddle.nn.layer.AdaptiveAvgPool1d +paddle.fluid.layers.squeeze paddle.nn.functional.common.squeeze +paddle.fluid.layers.cast paddle.cast,paddle.tensor.cast,paddle.tensor.manipulation.cast +paddle.fluid.layers.log paddle.log,paddle.tensor.log,paddle.tensor.math.log +paddle.fluid.layers.sqrt paddle.sqrt,paddle.tensor.sqrt,paddle.tensor.math.sqrt +paddle.fluid.layers.elementwise_mul paddle.tensor.elementwise_mul,paddle.tensor.math.elementwise_mul,paddle.nn.functional.common.elementwise_mul +paddle.tensor.manipulation.stack paddle.stack,paddle.tensor.stack +paddle.tensor.manipulation.unsqueeze paddle.unsqueeze,paddle.tensor.unsqueeze +paddle.fluid.layers.hsigmoid paddle.static.nn.hsigmoid +paddle.tensor.math.add paddle.add,paddle.tensor.add +paddle.nn.layer.common.Bilinear paddle.nn.Bilinear,paddle.nn.layer.Bilinear +paddle.nn.functional.extension.row_conv paddle.nn.functional.row_conv +paddle.nn.layer.loss.BCEWithLogitsLoss paddle.nn.BCEWithLogitsLoss,paddle.nn.layer.BCEWithLogitsLoss +paddle.nn.layer.norm.InstanceNorm3d paddle.nn.InstanceNorm3d +paddle.tensor.manipulation.tile paddle.tile,paddle.tensor.tile +paddle.fluid.layers.polynomial_decay paddle.nn.functional.polynomial_decay,paddle.nn.functional.learning_rate.polynomial_decay +paddle.tensor.creation.meshgrid paddle.meshgrid,paddle.tensor.meshgrid +paddle.fluid.layers.prroi_pool paddle.nn.functional.prroi_pool,paddle.nn.functional.vision.prroi_pool +paddle.nn.functional.pooling.max_pool3d paddle.nn.functional.max_pool3d +paddle.tensor.logic.allclose paddle.allclose,paddle.tensor.allclose +paddle.fluid.layers.beam_search_decode paddle.nn.beam_search_decode,paddle.nn.decode.beam_search_decode +paddle.tensor.linalg.dist paddle.dist,paddle.tensor.dist +paddle.fluid.compiler.BuildStrategy paddle.static.BuildStrategy +paddle.fluid.layers.multi_box_head paddle.static.nn.multi_box_head +paddle.fluid.layers.unfold paddle.nn.functional.unfold,paddle.nn.functional.common.unfold +paddle.nn.layer.pooling.AdaptiveAvgPool2d paddle.nn.AdaptiveAvgPool2d,paddle.nn.layer.AdaptiveAvgPool2d +paddle.fluid.layers.logical_and paddle.logical_and,paddle.tensor.logical_and,paddle.tensor.logic.logical_and +paddle.fluid.layers.linear_lr_warmup paddle.nn.functional.linear_lr_warmup,paddle.nn.functional.learning_rate.linear_lr_warmup +paddle.nn.functional.vision.affine_grid paddle.nn.functional.affine_grid +paddle.fluid.layers.generate_proposal_labels paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels +paddle.fluid.dygraph.Flatten paddle.nn.Flatten,paddle.nn.layer.Flatten,paddle.nn.layer.common.Flatten +paddle.nn.functional.pooling.avg_pool1d paddle.nn.functional.avg_pool1d +paddle.fluid.layers.warpctc paddle.nn.functional.warpctc,paddle.nn.functional.extension.warpctc +paddle.fluid.executor.scope_guard paddle.static.scope_guard +paddle.fluid.layers.center_loss paddle.nn.functional.center_loss,paddle.nn.functional.loss.center_loss +paddle.nn.functional.input.one_hot paddle.nn.functional.one_hot +paddle.tensor.creation.full paddle.full,paddle.tensor.full +paddle.fluid.layers.soft_relu paddle.nn.functional.soft_relu,paddle.nn.functional.activation.soft_relu +paddle.hapi.model.Model paddle.Model +paddle.hapi.callbacks.Callback paddle.callbacks.Callback +paddle.hapi.callbacks.ProgBarLogger paddle.callbacks.ProgBarLogger +paddle.hapi.callbacks.ModelCheckpoint paddle.callbacks.ModelCheckpoint +paddle.hapi.model_summary.summary paddle.summary +paddle.vision.models.resnet.ResNet paddle.vision.models.ResNet,paddle.vision.ResNet +paddle.vision.models.resnet.resnet18 paddle.vision.models.resnet18,paddle.vision.resnet18 +paddle.vision.models.resnet.resnet34 paddle.vision.models.resnet34,paddle.vision.resnet34 +paddle.vision.models.resnet.resnet50 paddle.vision.models.resnet50,paddle.vision.resnet50 +paddle.vision.models.resnet.resnet101 paddle.vision.models.resnet101,paddle.vision.resnet101 +paddle.vision.models.resnet.resnet152 paddle.vision.models.resnet152,paddle.vision.resnet152 +paddle.vision.models.vgg.VGG paddle.vision.models.VGG,paddle.vision.VGG +paddle.vision.models.vgg.vgg11 paddle.vision.models.vgg11,paddle.vision.vgg11 +paddle.vision.models.vgg.vgg13 paddle.vision.models.vgg13,paddle.vision.vgg13 +paddle.vision.models.vgg.vgg16 paddle.vision.models.vgg16,paddle.vision.vgg16 +paddle.vision.models.vgg.vgg19 paddle.vision.models.vgg19,paddle.vision.vgg19 +paddle.vision.models.mobilenetv1.MobileNetV1 paddle.vision.models.MobileNetV1,paddle.vision.MobileNetV1 +paddle.vision.models.mobilenetv1.mobilenet_v1 paddle.vision.models.mobilenet_v1,paddle.vision.mobilenet_v1 +paddle.vision.models.mobilenetv2.MobileNetV2 paddle.vision.models.MobileNetV2,paddle.vision.MobileNetV2 +paddle.vision.models.mobilenetv2.mobilenet_v2 paddle.vision.models.mobilenet_v2,paddle.vision.mobilenet_v2 +paddle.vision.models.lenet.LeNet paddle.vision.models.LeNet,paddle.vision.LeNet +paddle.vision.transforms.transforms.Compose paddle.vision.transforms.Compose,paddle.vision.Compose +paddle.vision.transforms.transforms.BatchCompose paddle.vision.transforms.BatchCompose,paddle.vision.BatchCompose +paddle.vision.transforms.transforms.Resize paddle.vision.transforms.Resize,paddle.vision.Resize +paddle.vision.transforms.transforms.RandomResizedCrop paddle.vision.transforms.RandomResizedCrop,paddle.vision.RandomResizedCrop +paddle.vision.transforms.transforms.CenterCropResize paddle.vision.transforms.CenterCropResize,paddle.vision.CenterCropResize +paddle.vision.transforms.transforms.CenterCrop paddle.vision.transforms.CenterCrop,paddle.vision.CenterCrop +paddle.vision.transforms.transforms.RandomHorizontalFlip paddle.vision.transforms.RandomHorizontalFli,paddle.vision.RandomHorizontalFli +paddle.vision.transforms.transforms.RandomVerticalFlip paddle.vision.transforms.RandomVerticalFlip,paddle.vision.RandomVerticalFlip +paddle.vision.transforms.transforms.Permute paddle.vision.transforms.Permute,paddle.vision.Permute +paddle.vision.transforms.transforms.Normalize paddle.vision.transforms.Normalize,paddle.vision.Normalize +paddle.vision.transforms.transforms.GaussianNoise paddle.vision.transforms.GaussianNoise,paddle.vision.GaussianNoise +paddle.vision.transforms.transforms.BrightnessTransform paddle.vision.transforms.BrightnessTransform,paddle.vision.BrightnessTransform +paddle.vision.transforms.transforms.SaturationTransform paddle.vision.transforms.SaturationTransform,paddle.vision.SaturationTransform +paddle.vision.transforms.transforms.ContrastTransform paddle.vision.transforms.ContrastTransform,paddle.vision.ContrastTransform +paddle.vision.transforms.transforms.HueTransform paddle.vision.transforms.HueTransform,paddle.vision.HueTransform +paddle.vision.transforms.transforms.ColorJitter paddle.vision.transforms.ColorJitter,paddle.vision.ColorJitter +paddle.vision.transforms.transforms.RandomCrop paddle.vision.transforms.RandomCrop,paddle.vision.RandomCrop +paddle.vision.transforms.transforms.RandomErasing paddle.vision.transforms.RandomErasing,paddle.vision.RandomErasing +paddle.vision.transforms.transforms.Pad paddle.vision.transforms.Pad,paddle.vision.Pad +paddle.vision.transforms.transforms.RandomRotate paddle.vision.transforms.RandomRotate,paddle.vision.RandomRotate +paddle.vision.transforms.transforms.Grayscale paddle.vision.transforms.Grayscale,paddle.vision.Grayscale +paddle.vision.transforms.functional.flip paddle.vision.transforms.flip,paddle.vision.flip +paddle.vision.transforms.functional.resize paddle.vision.transforms.resize,paddle.vision.resize +paddle.vision.transforms.functional.pad paddle.vision.transforms.pad,paddle.vision.pad +paddle.vision.transforms.functional.rotate paddle.vision.transforms.rotate,paddle.vision.rotate +paddle.vision.transforms.functional.to_grayscale paddle.vision.transforms.to_grayscale,paddle.vision.to_grayscale +paddle.vision.datasets.folder.DatasetFolder paddle.vision.datasets.DatasetFolder,paddle.vision.DatasetFolder +paddle.vision.datasets.folder.ImageFolder paddle.vision.datasets.ImageFolder,paddle.vision.ImageFolder +paddle.vision.datasets.mnist.MNIST paddle.vision.datasets.MNIST,paddle.vision.MNIST +paddle.vision.datasets.flowers.Flowers paddle.vision.datasets.Flowers,paddle.vision.Flowers +paddle.vision.datasets.cifar.Cifar10 paddle.vision.datasets.Cifar10,paddle.vision.Cifar10 +paddle.vision.datasets.cifar.Cifar100 paddle.vision.datasets.Cifar100,paddle.vision.Cifar100 +paddle.vision.datasets.voc2012.VOC2012 paddle.vision.datasets.VOC2012,paddle.vision.VOC2012 +paddle.text.datasets.conll05.Conll05st paddle.text.datasets.Conll05st,paddle.text.Conll05st +paddle.text.datasets.imdb.Imdb paddle.text.datasets.Imdb,paddle.text.Imdb +paddle.text.datasets.imikolov.Imikolov paddle.text.datasets.Imikolov,paddle.text.Imikolov +paddle.text.datasets.movielens.Movielens paddle.text.datasets.Movielens,paddle.text.Movielens +paddle.text.datasets.movie_reviews.MovieReviews paddle.text.datasets.MovieRevie,paddle.text.MovieRevie +paddle.text.datasets.uci_housing.UCIHousing paddle.text.datasets.UCIHousing,paddle.text.UCIHousing +paddle.text.datasets.wmt14.WMT14 paddle.text.datasets.WMT14,paddle.text.WMT14 +paddle.text.datasets.wmt16.WMT16 paddle.text.datasets.WMT16,paddle.text.WMT16 +paddle.metric.metrics.Metric paddle.metric.Metric +paddle.metric.metrics.Accuracy paddle.metric.Accuracy +paddle.metric.metrics.Precision paddle.metric.Precision +paddle.metric.metrics.Recall paddle.metric.Recall +paddle.metric.metrics.Auc paddle.metric.Auc diff --git a/doc/paddle/api/api_label b/doc/paddle/api/api_label new file mode 100644 index 0000000000000000000000000000000000000000..19452f15df7acde93a8ff5134bb2d79c74eaf348 --- /dev/null +++ b/doc/paddle/api/api_label @@ -0,0 +1,991 @@ +to_tensor .. _api_paddle_to_tensor +train .. _api_paddle_dataset_wmt14_train: +roi_pool .. _api_paddle_fluid_layers_roi_pool: +expand .. _api_paddle_fluid_layers_expand: +ReLU6 .. _api_paddle_nn_layer_activation_ReLU6: +pixel_shuffle .. _api_paddle_fluid_layers_pixel_shuffle: +Conv3DTranspose .. _api_paddle_fluid_dygraph_Conv3DTranspose: +cos_sim .. _api_paddle_metric_cos_sim: +Tanh .. _api_paddle_nn_layer_activation_Tanh: +dynamic_lstm .. _api_paddle_fluid_layers_dynamic_lstm: +DatasetFactory .. _api_paddle_distributed_fleet_DatasetFactory: +multi_box_head .. _api_paddle_fluid_layers_multi_box_head: +LSTMCell .. _api_paddle_fluid_dygraph_LSTMCell: +test .. _api_paddle_dataset_imikolov_test: +LogSoftmax .. _api_paddle_nn_layer_activation_LogSoftmax: +basic_gru .. _api_paddle_fluid_contrib_basic_gru: +cross_entropy .. _api_paddle_fluid_layers_cross_entropy: +kldiv_loss .. _api_paddle_fluid_layers_kldiv_loss: +FSShellCmdAborted .. _api_paddle_distributed_fleet_utils_FSShellCmdAborted: +CUDAPinnedPlace .. _api_paddle_framework_CUDAPinnedPlace: +read_file .. _api_paddle_fluid_layers_read_file: +zeros_like .. _api_paddle_tensor_creation_zeros_like: +GraphExecutionOptimizer .. _api_paddle_distributed_fleet_meta_optimizers_GraphExecutionOptimizer: +log1p .. _api_paddle_tensor_math_log1p: +Mode .. _api_paddle_fluid_incubate_fleet_base_mode_Mode: +less_equal .. _api_paddle_fluid_layers_less_equal: +reverse .. _api_paddle_fluid_layers_reverse: +valid .. _api_paddle_dataset_flowers_valid: +sqrt .. _api_paddle_fluid_layers_sqrt: +bmm .. _api_paddle_tensor_linalg_bmm: +sequence_conv .. _api_paddle_fluid_layers_sequence_conv: +resize_bilinear .. _api_paddle_fluid_layers_resize_bilinear: +logical_not .. _api_paddle_fluid_layers_logical_not: +AsyncGraphExecutionOptimizer .. _api_paddle_distributed_fleet_meta_optimizers_AsyncGraphExecutionOptimizer: +SyncBatchNorm .. _api_paddle_nn_SyncBatchNorm: +extend_with_decoupled_weight_decay .. _api_paddle_fluid_contrib_extend_with_decoupled_weight_decay: +uniform_random_batch_size_like .. _api_paddle_fluid_layers_uniform_random_batch_size_like: +fsp_matrix .. _api_paddle_fluid_layers_fsp_matrix: +BilinearTensorProduct .. _api_paddle_fluid_dygraph_BilinearTensorProduct: +huber_loss .. _api_paddle_fluid_layers_huber_loss: +RoleMakerBase .. _api_paddle_distributed_fleet_base_role_maker_RoleMakerBase: +Hardtanh .. _api_paddle_nn_layer_activation_Hardtanh: +DistributedAdam .. _api_paddle_fluid_incubate_fleet_parameter_server_pslib_optimizer_factory_DistributedAdam: +batch .. _api_paddle_io_batch: +run_check .. _api_paddle_fluid_install_check_run_check: +linspace .. _api_paddle_fluid_layers_linspace: +cluster_files_reader .. _api_paddle_dataset_common_cluster_files_reader: +LogSigmoid .. _api_paddle_nn_layer_activation_LogSigmoid: +cuda_places .. _api_paddle_fluid_cuda_places: +unbind .. _api_paddle_fluid_layers_unbind: +log .. _api_paddle_fluid_layers_log: +reduce_mean .. _api_paddle_fluid_layers_reduce_mean: +guard .. _api_paddle_fluid_unique_name_guard: +Layer .. _api_paddle_fluid_dygraph_layers_Layer: +embedding .. _api_paddle_fluid_layers_embedding: +chain .. _api_paddle_io_chain: +BuildStrategy .. _api_paddle_fluid_compiler_BuildStrategy: +rand .. _api_paddle_tensor_random_rand: +enable_dygraph .. _api_paddle_fluid_dygraph_base_enable_dygraph: +Dpsgd .. _api_paddle_optimizer_Dpsgd: +not_equal .. _api_paddle_tensor_logic_not_equal: +size .. _api_paddle_fluid_layers_size: +md5file .. _api_paddle_dataset_common_md5file: +movie_categories .. _api_paddle_dataset_movielens_movie_categories: +Quant2Int8MkldnnPass .. _api_paddle_fluid_contrib_slim_quantization_quant2_int8_mkldnn_pass_Quant2Int8MkldnnPass: +cosine_decay .. _api_paddle_fluid_layers_cosine_decay: +KVHTTPServer .. _api_paddle_distributed_fleet_utils_KVHTTPServer: +gelu .. _api_paddle_fluid_layers_gelu: +triu .. _api_paddle_tensor_creation_triu: +KLDivLoss .. _api_paddle_nn_layer_loss_KLDivLoss: +roi_align .. _api_paddle_fluid_layers_roi_align: +search_pyramid_hash .. _api_paddle_fluid_contrib_search_pyramid_hash: +QuantizationTransformPass .. _api_paddle_fluid_contrib_slim_quantization_quantization_pass_QuantizationTransformPass: +LeakyReLU .. _api_paddle_nn_layer_activation_LeakyReLU: +Tanhshrink .. _api_paddle_nn_layer_activation_Tanhshrink: +tree_conv .. _api_paddle_fluid_contrib_tree_conv: +tanh .. _api_paddle_tensor_math_tanh: +equal_all .. _api_paddle_tensor_logic_equal_all: +FtrlOptimizer .. _api_paddle_optimizer_FtrlOptimizer: +get_default_dtype .. _api_paddle_framework_get_default_dtype: +ConvTranspose3d .. _api_paddle_nn_layer_conv_ConvTranspose3d: +dropout .. _api_paddle_fluid_layers_dropout: +unsqueeze .. _api_paddle_fluid_layers_unsqueeze: +nll_loss .. _api_paddle_nn_functional_loss_nll_loss: +PyReader .. _api_paddle_fluid_reader_PyReader: +Dropout .. _api_paddle_nn_Dropout: +FetchHandlerMonitor .. _api_paddle_fluid_trainer_factory_FetchHandlerMonitor: +meshgrid .. _api_paddle_tensor_creation_meshgrid: +sort .. _api_paddle_tensor_search_sort: +prroi_pool .. _api_paddle_fluid_layers_prroi_pool: +GradientClipByValue .. _api_paddle_fluid_clip_GradientClipByValue: +create_parameter .. _api_paddle_fluid_layers_tensor_create_parameter: +random_crop .. _api_paddle_dataset_image_random_crop: +DownpourSGDOPT .. _api_paddle_fluid_device_worker_DownpourSGDOPT: +assign .. _api_paddle_fluid_layers_assign: +test .. _api_paddle_dataset_movielens_test: +LoDTensorArray .. _api_paddle_fluid_LoDTensorArray: +Conv3D .. _api_paddle_fluid_dygraph_Conv3D: +full .. _api_paddle_tensor_creation_full: +Assert .. _api_paddle_fluid_layers_Assert: +dropout3d .. _api_paddle_nn_functional_dropout3d: +mish .. _api_paddle_fluid_layers_mish: +TrainerFactory .. _api_paddle_fluid_trainer_factory_TrainerFactory: +cosine_similarity .. _api_paddle_nn_functional_cosine_similarity: +ImperativeQuantAware .. _api_paddle_fluid_contrib_slim_quantization_imperative_ImperativeQuantAware: +NCE .. _api_paddle_fluid_dygraph_NCE: +FSFileExistsError .. _api_paddle_distributed_fleet_utils_FSFileExistsError: +AmpScaler .. _api_paddle_fluid_dygraph_AmpScaler: +ctr_metric_bundle .. _api_paddle_fluid_contrib_ctr_metric_bundle: +softmax .. _api_paddle_nn_functional_activation_softmax: +reduce_min .. _api_paddle_fluid_layers_reduce_min: +TracedLayer .. _api_paddle_fluid_dygraph_jit_TracedLayer: +CPUPlace .. _api_paddle_framework_CPUPlace: +is_compiled_with_cuda .. _api_paddle_fluid_is_compiled_with_cuda: +iou_similarity .. _api_paddle_fluid_layers_iou_similarity: +psroi_pool .. _api_paddle_fluid_layers_psroi_pool: +sequence_first_step .. _api_paddle_fluid_layers_sequence_first_step: +mean_iou .. _api_paddle_metric_mean_iou: +AutoMixedPrecisionLists .. _api_paddle_fluid_contrib_mixed_precision_AutoMixedPrecisionLists: +simple_transform .. _api_paddle_dataset_image_simple_transform: +distributed_batch_reader .. _api_paddle_fluid_contrib_distributed_batch_reader: +FS .. _api_paddle_distributed_fleet_utils_FS: +clip_by_norm .. _api_paddle_fluid_layers_clip_by_norm: +warpctc .. _api_paddle_fluid_layers_warpctc: +expand .. _api_paddle_tensor_manipulation_expand: +GradientClipByNorm .. _api_paddle_fluid_clip_GradientClipByNorm: +NoamDecay .. _api_paddle_fluid_dygraph_learning_rate_scheduler_NoamDecay: +EditDistance .. _api_paddle_fluid_evaluator_EditDistance: +acos .. _api_paddle_fluid_layers_acos: +resize_linear .. _api_paddle_fluid_layers_resize_linear: +crf_decoding .. _api_paddle_fluid_layers_crf_decoding: +margin_ranking_loss .. _api_paddle_nn_functional_loss_margin_ranking_loss: +add .. _api_paddle_tensor_math_add: +Executor .. _api_paddle_fluid_executor_Executor: +firstn .. _api_paddle_reader_firstn: +get_program_persistable_vars .. _api_paddle_fluid_io_get_program_persistable_vars: +allclose .. _api_paddle_tensor_logic_allclose: +elementwise_pow .. _api_paddle_fluid_layers_elementwise_pow: +SampleEmbeddingHelper .. _api_paddle_fluid_layers_SampleEmbeddingHelper: +LinearLrWarmup .. _api_paddle_fluid_dygraph_LinearLrWarmup: +load_params .. _api_paddle_fluid_io_load_params: +elementwise_floordiv .. _api_paddle_fluid_layers_elementwise_floordiv: +release_memory .. _api_paddle_fluid_release_memory: +batch_norm .. _api_paddle_fluid_layers_batch_norm: +long_type .. _api_paddle_compat_long_type: +continuous_value_model .. _api_paddle_fluid_layers_continuous_value_model: +autodoc .. _api_paddle_fluid_layers_layer_function_generator_autodoc: +BackwardStrategy .. _api_paddle_framework_BackwardStrategy: +conv_transpose1d .. _api_paddle_nn_functional_conv_transpose1d: +Adam .. _api_paddle_optimizer_Adam: +data .. _api_paddle_fluid_data: +L1Decay .. _api_paddle_fluid_regularizer_L1Decay: +dynamic_gru .. _api_paddle_fluid_layers_dynamic_gru: +min .. _api_paddle_tensor_math_min: +FakeQuantMovingAverage .. _api_paddle_fluid_contrib_slim_quantization_imperative_FakeQuantMovingAverage: +to_text .. _api_paddle_compat_to_text: +RoundRobin .. _api_paddle_fluid_transpiler_RoundRobin: +LarsOptimizer .. _api_paddle_distributed_fleet_meta_optimizers_LarsOptimizer: +binary_cross_entropy_with_logits .. _api_paddle_nn_functional_binary_cross_entropy_with_logits: +generate_layer_fn .. _api_paddle_fluid_layers_layer_function_generator_generate_layer_fn: +Distribution .. _api_paddle_distribution_Distribution: +ReplicationPad1d .. _api_paddle_nn_ReplicationPad1d: +cache .. _api_paddle_reader_cache: +QuantizeTranspiler .. _api_paddle_fluid_contrib_QuantizeTranspiler: +data .. _api_paddle_fluid_layers_data: +isfinite .. _api_paddle_tensor_math_isfinite: +Dropout3D .. _api_paddle_nn_Dropout3D: +max_user_id .. _api_paddle_dataset_movielens_max_user_id: +unbind .. _api_paddle_tensor_manipulation_unbind: +row_conv .. _api_paddle_nn_functional_extension_row_conv: +prelu .. _api_paddle_fluid_layers_prelu: +elu .. _api_paddle_fluid_layers_elu: +BasicDecoder .. _api_paddle_fluid_layers_BasicDecoder: +create_tensor .. _api_paddle_fluid_layers_create_tensor: +ModelAverage .. _api_paddle_optimizer_ModelAverage: +double_buffer .. _api_paddle_fluid_layers_double_buffer: +transpose .. _api_paddle_fluid_layers_transpose: +density_prior_box .. _api_paddle_fluid_layers_density_prior_box: +elementwise_min .. _api_paddle_fluid_layers_elementwise_min: +gather_nd .. _api_paddle_fluid_layers_gather_nd: +map_readers .. _api_paddle_io_map_readers: +load_persistables .. _api_paddle_fluid_io_load_persistables: +load_image_bytes .. _api_paddle_dataset_image_load_image_bytes: +guard .. _api_paddle_fluid_dygraph_guard: +train .. _api_paddle_dataset_voc2012_train: +equal .. _api_paddle_tensor_logic_equal: +sequence_enumerate .. _api_paddle_fluid_layers_sequence_enumerate: +ReLU .. _api_paddle_nn_layer_activation_ReLU: +ParamAttr .. _api_paddle_fluid_param_attr_ParamAttr: +PipelineOptimizer .. _api_paddle_distributed_fleet_meta_optimizers_PipelineOptimizer: +scatter .. _api_paddle_tensor_manipulation_scatter: +greater_than .. _api_paddle_fluid_layers_greater_than: +alpha_dropout .. _api_paddle_nn_functional_alpha_dropout: +conv3d .. _api_paddle_nn_functional_conv_conv3d: +TrainerDesc .. _api_paddle_fluid_trainer_desc_TrainerDesc: +NLLLoss .. _api_paddle_nn_layer_loss_NLLLoss: +image_resize .. _api_paddle_fluid_layers_image_resize: +CUDAPinnedPlace .. _api_paddle_fluid_CUDAPinnedPlace: +save_params .. _api_paddle_fluid_io_save_params: +brelu .. _api_paddle_fluid_layers_brelu: +data .. _api_paddle_static_data: +DecayedAdagrad .. _api_paddle_optimizer_DecayedAdagrad: +multiclass_nms .. _api_paddle_fluid_layers_multiclass_nms: +square_error_cost .. _api_paddle_fluid_layers_square_error_cost: +Accuracy .. _api_paddle_metric_Accuracy: +_pull_box_extended_sparse .. _api_paddle_fluid_contrib__pull_box_extended_sparse: +shuffle_channel .. _api_paddle_fluid_layers_shuffle_channel: +mod .. _api_paddle_tensor_math_mod: +argmax .. _api_paddle_tensor_search_argmax: +MultiStepDecay .. _api_paddle_fluid_dygraph_MultiStepDecay: +flip .. _api_paddle_tensor_manipulation_flip: +tanh_shrink .. _api_paddle_fluid_layers_tanh_shrink: +elementwise_mod .. _api_paddle_fluid_layers_elementwise_mod: +eye .. _api_paddle_tensor_creation_eye: +less_than .. _api_paddle_tensor_logic_less_than: +ReduceLROnPlateau .. _api_paddle_fluid_dygraph_ReduceLROnPlateau: +box_clip .. _api_paddle_fluid_layers_box_clip: +load_persistables_for_increment .. _api_paddle_fluid_contrib_load_persistables_for_increment: +load_dygraph .. _api_paddle_fluid_dygraph_checkpoint_load_dygraph: +LayerHelperBase .. _api_paddle_fluid_layer_helper_base_LayerHelperBase: +CompiledProgram .. _api_paddle_fluid_compiler_CompiledProgram: +thresholded_relu .. _api_paddle_fluid_layers_thresholded_relu: +PipelineOptimizer .. _api_paddle_optimizer_PipelineOptimizer: +clip .. _api_paddle_tensor_math_clip: +squeeze .. _api_paddle_fluid_layers_squeeze: +yolo_box .. _api_paddle_fluid_layers_yolo_box: +beam_search_decode .. _api_paddle_fluid_layers_beam_search_decode: +crop .. _api_paddle_fluid_layers_crop: +load_inference_model .. _api_paddle_io_load_inference_model: +bilinear_tensor_product .. _api_paddle_fluid_layers_bilinear_tensor_product: +disable_dygraph .. _api_paddle_fluid_dygraph_base_disable_dygraph: +isfinite .. _api_paddle_fluid_layers_isfinite: +maxout .. _api_paddle_fluid_layers_maxout: +Uniform .. _api_paddle_fluid_initializer_Uniform: +leaky_relu .. _api_paddle_nn_functional_leaky_relu: +MPISymetricRoleMaker .. _api_paddle_fluid_incubate_fleet_base_role_maker_MPISymetricRoleMaker: +temporal_shift .. _api_paddle_fluid_layers_temporal_shift: +ComplexVariable .. _api_paddle_fluid_ComplexVariable: +DataLoader .. _api_paddle_io_DataLoader: +Uniform .. _api_paddle_distribution_Uniform: +PaddleCloudRoleMaker .. _api_paddle_fluid_incubate_fleet_base_role_maker_PaddleCloudRoleMaker: +ParallelEnv .. _api_paddle_fluid_dygraph_parallel_ParallelEnv: +beam_search .. _api_paddle_fluid_layers_beam_search: +age_table .. _api_paddle_dataset_movielens_age_table: +ErrorClipByValue .. _api_paddle_fluid_clip_ErrorClipByValue: +ExecutionStrategy .. _api_paddle_fluid_compiler_ExecutionStrategy: +conv2d_transpose .. _api_paddle_fluid_layers_conv2d_transpose: +gaussian_random .. _api_paddle_fluid_layers_gaussian_random: +exp .. _api_paddle_fluid_layers_exp: +relu6 .. _api_paddle_nn_functional_relu6: +get_worker_info .. _api_paddle_io_get_worker_info: +ctc_loss .. _api_paddle_nn_functional_ctc_loss: +Conv2DTranspose .. _api_paddle_fluid_dygraph_Conv2DTranspose: +is_empty .. _api_paddle_fluid_layers_is_empty: +DistMultiTrainer .. _api_paddle_fluid_trainer_desc_DistMultiTrainer: +logical_xor .. _api_paddle_fluid_layers_logical_xor: +natural_exp_decay .. _api_paddle_fluid_layers_natural_exp_decay: +maximum .. _api_paddle_tensor_math_maximum: +DygraphToStaticAst .. _api_paddle_fluid_dygraph_dygraph_to_static_DygraphToStaticAst: +sum .. _api_paddle_tensor_math_sum: +L1Loss .. _api_paddle_nn_layer_loss_L1Loss: +shuffle_batch .. _api_paddle_fluid_contrib_shuffle_batch: +bpr_loss .. _api_paddle_fluid_layers_bpr_loss: +hsigmoid .. _api_paddle_nn_functional_activation_hsigmoid: +adaptive_avg_pool3d .. _api_paddle_nn_functional_pooling_adaptive_avg_pool3d: +lod_append .. _api_paddle_fluid_layers_lod_append: +rank_loss .. _api_paddle_fluid_layers_rank_loss: +concat .. _api_paddle_fluid_layers_concat: +LayerList .. _api_paddle_fluid_dygraph_container_LayerList: +declarative .. _api_paddle_fluid_dygraph_jit_declarative: +unsqueeze .. _api_paddle_tensor_manipulation_unsqueeze: +Momentum .. _api_paddle_optimizer_Momentum: +dropout2d .. _api_paddle_nn_functional_dropout2d: +kl_div .. _api_paddle_nn_functional_kl_div: +ConvTranspose2d .. _api_paddle_nn_layer_conv_ConvTranspose2d: +get_word_dict .. _api_paddle_dataset_sentiment_get_word_dict: +binary_cross_entropy .. _api_paddle_nn_functional_binary_cross_entropy: +dropout .. _api_paddle_nn_functional_dropout: +hard_shrink .. _api_paddle_fluid_layers_hard_shrink: +compose .. _api_paddle_io_compose: +generate_proposal_labels .. _api_paddle_fluid_layers_generate_proposal_labels: +LarsMomentumOptimizer .. _api_paddle_optimizer_LarsMomentumOptimizer: +scaled_dot_product_attention .. _api_paddle_fluid_nets_scaled_dot_product_attention: +DecayedAdagradOptimizer .. _api_paddle_optimizer_DecayedAdagradOptimizer: +InMemoryDataset .. _api_paddle_fluid_dataset_InMemoryDataset: +is_compiled_with_xpu .. _api_paddle_fluid_is_compiled_with_xpu: +Conv3d .. _api_paddle_nn_layer_conv_Conv3d: +sampling_id .. _api_paddle_fluid_layers_sampling_id: +tile .. _api_paddle_tensor_manipulation_tile: +argsort .. _api_paddle_fluid_layers_argsort: +save_persistables .. _api_paddle_fluid_io_save_persistables: +MetaOptimizerBase .. _api_paddle_distributed_fleet_meta_optimizers_meta_optimizer_base_MetaOptimizerBase: +full_like .. _api_paddle_tensor_creation_full_like: +tdm_sampler .. _api_paddle_fluid_contrib_tdm_sampler: +distribute_fpn_proposals .. _api_paddle_fluid_layers_distribute_fpn_proposals: +isinf .. _api_paddle_tensor_math_isinf: +arange .. _api_paddle_tensor_creation_arange: +stack .. _api_paddle_fluid_layers_stack: +l1_loss .. _api_paddle_nn_functional_loss_l1_loss: +memory_usage .. _api_paddle_fluid_contrib_memory_usage: +StateCell .. _api_paddle_fluid_contrib_StateCell: +create_array .. _api_paddle_fluid_layers_create_array: +GreedyEmbeddingHelper .. _api_paddle_fluid_layers_GreedyEmbeddingHelper: +ProbabilityEntry .. _api_paddle_fluid_entry_attr_ProbabilityEntry: +LambdaDecay .. _api_paddle_fluid_dygraph_LambdaDecay: +stop_profiler .. _api_paddle_fluid_profiler_stop_profiler: +convert_call .. _api_paddle_fluid_dygraph_dygraph_to_static_convert_call: +PyReader .. _api_paddle_fluid_io_PyReader: +device_guard .. _api_paddle_fluid_device_guard: +Precision .. _api_paddle_metric_Precision: +set_program_state .. _api_paddle_io_set_program_state: +RMSProp .. _api_paddle_optimizer_RMSProp: +ones .. _api_paddle_tensor_creation_ones: +GRUCell .. _api_paddle_fluid_dygraph_GRUCell: +WeightQuantization .. _api_paddle_fluid_contrib_slim_quantization_post_training_quantization_WeightQuantization: +rank .. _api_paddle_fluid_layers_rank: +TrainingHelper .. _api_paddle_fluid_layers_TrainingHelper: +yolov3_loss .. _api_paddle_fluid_layers_yolov3_loss: +generate .. _api_paddle_fluid_unique_name_generate: +Auc .. _api_paddle_metric_Auc: +Normal .. _api_paddle_distribution_Normal: +var .. _api_paddle_tensor_stat_var: +cuda_pinned_places .. _api_paddle_fluid_cuda_pinned_places: +label_smooth .. _api_paddle_fluid_layers_label_smooth: +sequence_topk_avg_pooling .. _api_paddle_fluid_contrib_sequence_topk_avg_pooling: +AdaptiveAvgPool3d .. _api_paddle_nn_layer_pooling_AdaptiveAvgPool3d: +DataParallel .. _api_paddle_fluid_dygraph_parallel_DataParallel: +StaticAnalysisVisitor .. _api_paddle_fluid_dygraph_dygraph_to_static_StaticAnalysisVisitor: +batch_fc .. _api_paddle_fluid_contrib_batch_fc: +one_hot .. _api_paddle_nn_functional_one_hot: +IfElse .. _api_paddle_fluid_layers_IfElse: +MarginRankingLoss .. _api_paddle_nn_layer_loss_MarginRankingLoss: +train .. _api_paddle_dataset_imdb_train: +movie_info .. _api_paddle_dataset_movielens_movie_info: +image_resize_short .. _api_paddle_fluid_layers_image_resize_short: +normalize .. _api_paddle_nn_functional_norm_normalize: +pad .. _api_paddle_nn_functional_pad: +elementwise_div .. _api_paddle_fluid_layers_elementwise_div: +CosineDecay .. _api_paddle_fluid_dygraph_learning_rate_scheduler_CosineDecay: +GradientMergeOptimizer .. _api_paddle_distributed_fleet_meta_optimizers_GradientMergeOptimizer: +global_scope .. _api_paddle_fluid_executor_global_scope: +Program .. _api_paddle_fluid_framework_Program: +Adam .. _api_paddle_fluid_optimizer_Adam: +has_nan .. _api_paddle_fluid_layers_has_nan: +FLEET_GLOBAL_DICT .. _api_paddle_fluid_incubate_fleet_parameter_server_pslib_optimizer_factory_FLEET_GLOBAL_DICT: +PaddleCloudRoleMaker .. _api_paddle_distributed_fleet_PaddleCloudRoleMaker: +amp_guard .. _api_paddle_fluid_dygraph_amp_guard: +elementwise_mul .. _api_paddle_fluid_layers_elementwise_mul: +hard_swish .. _api_paddle_fluid_layers_hard_swish: +deformable_roi_pooling .. _api_paddle_fluid_layers_deformable_roi_pooling: +center_loss .. _api_paddle_fluid_layers_center_loss: +unique_with_counts .. _api_paddle_fluid_layers_unique_with_counts: +pow .. _api_paddle_fluid_layers_pow: +get_movie_title_dict .. _api_paddle_dataset_movielens_get_movie_title_dict: +conv_transpose2d .. _api_paddle_nn_functional_conv_transpose2d: +StepDecay .. _api_paddle_fluid_dygraph_StepDecay: +pow .. _api_paddle_tensor_math_pow: +LoopTransformer .. _api_paddle_fluid_dygraph_dygraph_to_static_LoopTransformer: +UtilBase .. _api_paddle_distributed_fleet_UtilBase: +softsign .. _api_paddle_nn_functional_softsign: +L1DecayRegularizer .. _api_paddle_fluid_regularizer_L1DecayRegularizer: +Adamax .. _api_paddle_optimizer_Adamax: +Optimizer .. _api_paddle_optimizer_Optimizer: +noam_decay .. _api_paddle_fluid_layers_noam_decay: +im2sequence .. _api_paddle_fluid_layers_im2sequence: +hardshrink .. _api_paddle_nn_functional_activation_hardshrink: +zeros_like .. _api_paddle_fluid_layers_zeros_like: +QuantizationFreezePass .. _api_paddle_fluid_contrib_slim_quantization_quantization_pass_QuantizationFreezePass: +AsyncMetaOptimizer .. _api_paddle_distributed_fleet_meta_optimizers_AsyncMetaOptimizer: +atan .. _api_paddle_fluid_layers_atan: +tdm_child .. _api_paddle_fluid_contrib_tdm_child: +dygraph_to_static_func .. _api_paddle_fluid_dygraph_dygraph_to_static_func: +get_lib .. _api_paddle_sysconfig_get_lib: +Flatten .. _api_paddle_fluid_dygraph_Flatten: +resize_trilinear .. _api_paddle_fluid_layers_resize_trilinear: +DataFeedDesc .. _api_paddle_fluid_DataFeedDesc: +NodeVarType .. _api_paddle_fluid_dygraph_dygraph_to_static_NodeVarType: +Softshrink .. _api_paddle_nn_layer_activation_Softshrink: +PostTrainingQuantization .. _api_paddle_fluid_contrib_slim_quantization_post_training_quantization_PostTrainingQuantization: +test10 .. _api_paddle_dataset_cifar_test10: +SGD .. _api_paddle_optimizer_SGD: +retinanet_target_assign .. _api_paddle_fluid_layers_retinanet_target_assign: +less_than .. _api_paddle_fluid_layers_less_than: +gather .. _api_paddle_tensor_manipulation_gather: +DecodeHelper .. _api_paddle_fluid_layers_DecodeHelper: +greater_equal .. _api_paddle_tensor_logic_greater_equal: +firstn .. _api_paddle_io_firstn: +weight_norm .. _api_paddle_nn_utils_weight_norm_hook_weight_norm: +train .. _api_paddle_dataset_flowers_train: +cosh .. _api_paddle_fluid_layers_cosh: +XPUPlace .. _api_paddle_fluid_XPUPlace: +merge_selected_rows .. _api_paddle_fluid_layers_merge_selected_rows: +smooth_l1_loss .. _api_paddle_nn_functional_smooth_l1_loss: +t .. _api_paddle_tensor_linalg_t: +HashName .. _api_paddle_fluid_transpiler_HashName: +CPUPlace .. _api_paddle_fluid_CPUPlace: +Decoder .. _api_paddle_fluid_layers_Decoder: +array_read .. _api_paddle_fluid_layers_array_read: +floor_divide .. _api_paddle_tensor_math_floor_divide: +floor_mod .. _api_paddle_tensor_math_floor_mod: +log_sigmoid .. _api_paddle_nn_functional_log_sigmoid: +generate_mask_labels .. _api_paddle_fluid_layers_generate_mask_labels: +square .. _api_paddle_fluid_layers_square: +reset_profiler .. _api_paddle_fluid_profiler_reset_profiler: +MSRA .. _api_paddle_fluid_initializer_MSRA: +Print .. _api_paddle_fluid_layers_control_flow_Print: +unique .. _api_paddle_fluid_layers_unique: +max_job_id .. _api_paddle_dataset_movielens_max_job_id: +grad .. _api_paddle_fluid_dygraph_base_grad: +KVHandler .. _api_paddle_distributed_fleet_utils_KVHandler: +Switch .. _api_paddle_fluid_layers_Switch: +selu .. _api_paddle_nn_functional_selu: +gradients .. _api_paddle_fluid_backward_gradients: +MSELoss .. _api_paddle_nn_layer_loss_MSELoss: +Pad2D .. _api_paddle_nn_layer_common_Pad2D: +sequence_concat .. _api_paddle_fluid_layers_sequence_concat: +sequence_expand_as .. _api_paddle_fluid_layers_sequence_expand_as: +DistributeTranspilerConfig .. _api_paddle_fluid_DistributeTranspilerConfig: +hsigmoid .. _api_paddle_fluid_layers_hsigmoid: +LambOptimizer .. _api_paddle_distributed_fleet_meta_optimizers_LambOptimizer: +sequence_conv_pool .. _api_paddle_fluid_nets_sequence_conv_pool: +test100 .. _api_paddle_dataset_cifar_test100: +Softsign .. _api_paddle_nn_layer_activation_Softsign: +max .. _api_paddle_tensor_math_max: +ctc_greedy_decoder .. _api_paddle_fluid_layers_ctc_greedy_decoder: +multiprocess_reader .. _api_paddle_reader_multiprocess_reader: +StaticRNN .. _api_paddle_fluid_layers_StaticRNN: +multiply .. _api_paddle_tensor_math_multiply: +IterableDataset .. _api_paddle_io_IterableDataset: +elementwise_sub .. _api_paddle_fluid_layers_elementwise_sub: +gather .. _api_paddle_fluid_layers_gather: +RandomSampler .. _api_paddle_io_RandomSampler: +name_scope .. _api_paddle_fluid_framework_name_scope: +UserDefinedRoleMaker .. _api_paddle_fluid_incubate_fleet_base_role_maker_UserDefinedRoleMaker: +L2DecayRegularizer .. _api_paddle_fluid_regularizer_L2DecayRegularizer: +AdamaxOptimizer .. _api_paddle_fluid_optimizer_AdamaxOptimizer: +SELU .. _api_paddle_nn_layer_activation_SELU: +gather_tree .. _api_paddle_fluid_layers_gather_tree: +Uniform .. _api_paddle_fluid_layers_Uniform: +remove_weight_norm .. _api_paddle_nn_utils_weight_norm_hook_remove_weight_norm: +DownpourSGD .. _api_paddle_fluid_device_worker_DownpourSGD: +sequence_softmax .. _api_paddle_fluid_layers_sequence_softmax: +HDFSClient .. _api_paddle_fluid_contrib_HDFSClient: +abs .. _api_paddle_fluid_layers_abs: +NameVisitor .. _api_paddle_fluid_dygraph_dygraph_to_static_NameVisitor: +chunk_eval .. _api_paddle_metric_chunk_eval: +affine_grid .. _api_paddle_fluid_layers_affine_grid: +fused_elemwise_activation .. _api_paddle_fluid_contrib_fused_elemwise_activation: +norm .. _api_paddle_tensor_linalg_norm: +Normal .. _api_paddle_fluid_layers_Normal: +ConstantPad2d .. _api_paddle_nn_ConstantPad2d: +load_program_state .. _api_paddle_io_load_program_state: +resize_nearest .. _api_paddle_fluid_layers_resize_nearest: +mse_loss .. _api_paddle_nn_functional_mse_loss: +conv_transpose3d .. _api_paddle_nn_functional_conv_transpose3d: +set_device .. _api_paddle_device_set_device: +where .. _api_paddle_fluid_layers_where: +PipelineTrainer .. _api_paddle_fluid_trainer_desc_PipelineTrainer: +anchor_generator .. _api_paddle_fluid_layers_anchor_generator: +train .. _api_paddle_dataset_wmt16_train: +concat .. _api_paddle_tensor_manipulation_concat: +to_variable .. _api_paddle_fluid_dygraph_base_to_variable: +sequence_reshape .. _api_paddle_fluid_layers_sequence_reshape: +hash .. _api_paddle_fluid_layers_hash: +lod_reset .. _api_paddle_fluid_layers_lod_reset: +simple_img_conv_pool .. _api_paddle_fluid_nets_simple_img_conv_pool: +get_device .. _api_paddle_device_get_device: +rsqrt .. _api_paddle_fluid_layers_rsqrt: +to_static_variable_gast_node .. _api_paddle_fluid_dygraph_dygraph_to_static_to_static_variable_gast_node: +matrix_nms .. _api_paddle_fluid_layers_matrix_nms: +user_info .. _api_paddle_dataset_movielens_user_info: +gaussian_random_batch_size_like .. _api_paddle_fluid_layers_gaussian_random_batch_size_like: +MomentumOptimizer .. _api_paddle_optimizer_MomentumOptimizer: +DistributedOptimizer .. _api_paddle_fluid_incubate_fleet_base_fleet_base_DistributedOptimizer: +Fleet .. _api_paddle_distributed_fleet_base_fleet_base_Fleet: +ProgramTranslator .. _api_paddle_fluid_dygraph_ProgramTranslator: +set_default_dtype .. _api_paddle_framework_set_default_dtype: +lstm .. _api_paddle_fluid_layers_lstm: +var_conv_2d .. _api_paddle_fluid_contrib_var_conv_2d: +Variable .. _api_paddle_fluid_framework_Variable: +ChunkEvaluator .. _api_paddle_metric_ChunkEvaluator: +cumsum .. _api_paddle_tensor_math_cumsum: +TruncatedNormal .. _api_paddle_fluid_initializer_TruncatedNormal: +sigmoid_focal_loss .. _api_paddle_fluid_layers_sigmoid_focal_loss: +LayerNorm .. _api_paddle_fluid_dygraph_LayerNorm: +MultiTrainer .. _api_paddle_fluid_trainer_desc_MultiTrainer: +xmap_readers .. _api_paddle_io_xmap_readers: +greater_than .. _api_paddle_tensor_logic_greater_than: +InitState .. _api_paddle_fluid_contrib_InitState: +sequence_pad .. _api_paddle_fluid_layers_sequence_pad: +linear_lr_warmup .. _api_paddle_fluid_layers_linear_lr_warmup: +NumpyArrayInitializer .. _api_paddle_fluid_initializer_NumpyArrayInitializer: +npair_loss .. _api_paddle_fluid_layers_npair_loss: +ZeroPad2d .. _api_paddle_nn_ZeroPad2d: +AdadeltaOptimizer .. _api_paddle_optimizer_AdadeltaOptimizer: +chain .. _api_paddle_reader_chain: +partial_sum .. _api_paddle_fluid_contrib_partial_sum: +logsumexp .. _api_paddle_tensor_math_logsumexp: +scale .. _api_paddle_fluid_layers_scale: +tanh .. _api_paddle_nn_functional_tanh: +bipartite_match .. _api_paddle_fluid_layers_bipartite_match: +InstanceNorm .. _api_paddle_nn_layer_norm_InstanceNorm: +ExponentialMovingAverage .. _api_paddle_optimizer_ExponentialMovingAverage: +AdagradOptimizer .. _api_paddle_optimizer_AdagradOptimizer: +expand_as .. _api_paddle_fluid_layers_expand_as: +masked_select .. _api_paddle_tensor_search_masked_select: +RETURN_NO_VALUE_VAR_NAME .. _api_paddle_fluid_dygraph_dygraph_to_static_return_transformer_RETURN_NO_VALUE_VAR_NAME: +data_layer_not_check .. _api_paddle_fluid_dygraph_dygraph_to_static_data_layer_not_check: +not_equal .. _api_paddle_fluid_layers_not_equal: +BatchNorm .. _api_paddle_fluid_dygraph_BatchNorm: +Hogwild .. _api_paddle_fluid_device_worker_Hogwild: +reshape .. _api_paddle_tensor_manipulation_reshape: +convert_to_static .. _api_paddle_fluid_dygraph_dygraph_to_static_convert_to_static: +UpSample .. _api_paddle_nn_layer_common_UpSample: +load .. _api_paddle_fluid_io_load: +DATA_HOME .. _api_paddle_dataset_common_DATA_HOME: +QuantizedConv2D .. _api_paddle_fluid_contrib_slim_quantization_imperative_QuantizedConv2D: +affine_channel .. _api_paddle_fluid_layers_affine_channel: +uniform .. _api_paddle_tensor_random_uniform: +KVServer .. _api_paddle_distributed_fleet_utils_KVServer: +sigmoid .. _api_paddle_nn_functional_activation_sigmoid: +wrap_decorator .. _api_paddle_fluid_wrapped_decorator_wrap_decorator: +default_collate_fn .. _api_paddle_fluid_reader_default_collate_fn: +filter_by_instag .. _api_paddle_fluid_layers_filter_by_instag: +std .. _api_paddle_tensor_stat_std: +TrainingDecoder .. _api_paddle_fluid_contrib_TrainingDecoder: +AdamOptimizer .. _api_paddle_fluid_optimizer_AdamOptimizer: +save_vars .. _api_paddle_fluid_io_save_vars: +op_freq_statistic .. _api_paddle_fluid_contrib_op_freq_statistic: +split .. _api_paddle_fluid_layers_split: +SGDOptimizer .. _api_paddle_optimizer_SGDOptimizer: +Conv1d .. _api_paddle_nn_layer_conv_Conv1d: +prelu .. _api_paddle_nn_functional_prelu: +unfold .. _api_paddle_fluid_layers_unfold: +smooth_l1 .. _api_paddle_fluid_layers_smooth_l1: +logsigmoid .. _api_paddle_fluid_layers_logsigmoid: +glu .. _api_paddle_fluid_nets_glu: +bilateral_slice .. _api_paddle_fluid_contrib_bilateral_slice: +AdamW .. _api_paddle_optimizer_AdamW: +img_conv_group .. _api_paddle_fluid_nets_img_conv_group: +load_image .. _api_paddle_dataset_image_load_image: +AddQuantDequantPass .. _api_paddle_fluid_contrib_slim_quantization_quantization_pass_AddQuantDequantPass: +DeviceWorker .. _api_paddle_fluid_device_worker_DeviceWorker: +Pool2D .. _api_paddle_fluid_dygraph_Pool2D: +case .. _api_paddle_fluid_layers_case: +RETURN_NO_VALUE_MAGIC_NUM .. _api_paddle_fluid_dygraph_dygraph_to_static_return_transformer_RETURN_NO_VALUE_MAGIC_NUM: +tanh .. _api_paddle_fluid_layers_tanh: +reduce_prod .. _api_paddle_fluid_layers_reduce_prod: +DataFeeder .. _api_paddle_fluid_DataFeeder: +convert_dist_to_sparse_program .. _api_paddle_fluid_contrib_convert_dist_to_sparse_program: +cast .. _api_paddle_fluid_layers_cast: +cpu_places .. _api_paddle_fluid_cpu_places: +Mode .. _api_paddle_fluid_incubate_fleet_base_fleet_base_Mode: +accuracy .. _api_paddle_metric_accuracy: +topk .. _api_paddle_fluid_layers_topk: +TranslatedLayer .. _api_paddle_fluid_dygraph_io_TranslatedLayer: +Fleet .. _api_paddle_fluid_incubate_fleet_base_fleet_base_Fleet: +greater_equal .. _api_paddle_fluid_layers_greater_equal: +sign .. _api_paddle_tensor_math_sign: +EditDistance .. _api_paddle_metric_EditDistance: +piecewise_decay .. _api_paddle_fluid_layers_piecewise_decay: +set_global_initializer .. _api_paddle_fluid_initializer_set_global_initializer: +LoDTensor .. _api_paddle_fluid_LoDTensor: +templatedoc .. _api_paddle_fluid_layers_layer_function_generator_templatedoc: +create_py_reader_by_data .. _api_paddle_fluid_layers_create_py_reader_by_data: +buffered .. _api_paddle_io_buffered: +box_coder .. _api_paddle_fluid_layers_box_coder: +sin .. _api_paddle_fluid_layers_sin: +similarity_focus .. _api_paddle_fluid_layers_similarity_focus: +Scope .. _api_paddle_fluid_Scope: +add_position_encoding .. _api_paddle_fluid_layers_add_position_encoding: +CTCLoss .. _api_paddle_nn_layer_loss_CTCLoss: +cuda_profiler .. _api_paddle_fluid_profiler_cuda_profiler: +Conv2d .. _api_paddle_nn_layer_conv_Conv2d: +uniform_random .. _api_paddle_fluid_layers_uniform_random: +ChunkEvaluator .. _api_paddle_fluid_evaluator_ChunkEvaluator: +test .. _api_paddle_dataset_wmt14_test: +round .. _api_paddle_compat_round: +cond .. _api_paddle_fluid_layers_cond: +lstm_unit .. _api_paddle_fluid_layers_lstm_unit: +to_bytes .. _api_paddle_compat_to_bytes: +roll .. _api_paddle_tensor_manipulation_roll: +divide .. _api_paddle_tensor_math_divide: +DynamicRNN .. _api_paddle_fluid_layers_DynamicRNN: +get_exception_message .. _api_paddle_compat_get_exception_message: +map_readers .. _api_paddle_reader_map_readers: +ConstantPad1d .. _api_paddle_nn_ConstantPad1d: +in_dygraph_mode .. _api_paddle_fluid_framework_in_dygraph_mode: +Generator .. _api_paddle_fluid_generator_Generator: +tanhshrink .. _api_paddle_nn_functional_tanhshrink: +Adamax .. _api_paddle_fluid_optimizer_Adamax: +slice .. _api_paddle_fluid_layers_slice: +multi_download .. _api_paddle_fluid_contrib_multi_download: +center_crop .. _api_paddle_dataset_image_center_crop: +train .. _api_paddle_dataset_mnist_train: +l2_normalize .. _api_paddle_fluid_layers_l2_normalize: +enabled .. _api_paddle_fluid_dygraph_enabled: +get_include .. _api_paddle_sysconfig_get_include: +MultivariateNormalDiag .. _api_paddle_fluid_layers_MultivariateNormalDiag: +inverse .. _api_paddle_tensor_math_inverse: +increment .. _api_paddle_fluid_layers_increment: +test .. _api_paddle_dataset_voc2012_test: +test .. _api_paddle_dataset_mnist_test: +Dataset .. _api_paddle_io_Dataset: +array_write .. _api_paddle_fluid_layers_array_write: +test .. _api_paddle_dataset_sentiment_test: +dynamic_lstmp .. _api_paddle_fluid_layers_dynamic_lstmp: +ssd_loss .. _api_paddle_fluid_layers_ssd_loss: +polygon_box_transform .. _api_paddle_fluid_layers_polygon_box_transform: +Tensor .. _api_paddle_fluid_Tensor: +py_reader .. _api_paddle_fluid_layers_py_reader: +QuantizedLinear .. _api_paddle_fluid_contrib_slim_quantization_imperative_QuantizedLinear: +train .. _api_paddle_dataset_uci_housing_train: +autoincreased_step_counter .. _api_paddle_fluid_layers_autoincreased_step_counter: +BCELoss .. _api_paddle_nn_layer_loss_BCELoss: +relu .. _api_paddle_fluid_layers_relu: +numel .. _api_paddle_tensor_stat_numel: +softplus .. _api_paddle_fluid_layers_softplus: +RoleMakerBase .. _api_paddle_fluid_incubate_fleet_base_role_maker_RoleMakerBase: +ParallelExecutor .. _api_paddle_fluid_parallel_executor_ParallelExecutor: +randperm .. _api_paddle_tensor_random_randperm: +relu6 .. _api_paddle_fluid_layers_relu6: +fill_constant_batch_size_like .. _api_paddle_fluid_layers_fill_constant_batch_size_like: +argmin .. _api_paddle_tensor_search_argmin: +erf .. _api_paddle_fluid_layers_erf: +elementwise_sum .. _api_paddle_tensor_math_elementwise_sum: +RecomputeOptimizer .. _api_paddle_optimizer_RecomputeOptimizer: +memory_optimize .. _api_paddle_fluid_memory_optimize: +diag_embed .. _api_paddle_nn_functional_extension_diag_embed: +flatten .. _api_paddle_fluid_layers_flatten: +floor .. _api_paddle_fluid_layers_floor: +DetectionMAP .. _api_paddle_metric_DetectionMAP: +AlphaDropout .. _api_paddle_nn_AlphaDropout: +strided_slice .. _api_paddle_fluid_layers_strided_slice: +get_embedding .. _api_paddle_dataset_conll05_get_embedding: +log_loss .. _api_paddle_fluid_layers_log_loss: +DistributeTranspiler .. _api_paddle_fluid_DistributeTranspiler: +LocalSGD .. _api_paddle_fluid_transpiler_collective_LocalSGD: +batch_images_from_tar .. _api_paddle_dataset_image_batch_images_from_tar: +sequence_expand .. _api_paddle_fluid_layers_sequence_expand: +save_inference_model .. _api_paddle_io_save_inference_model: +addcmul .. _api_paddle_tensor_math_addcmul: +load .. _api_paddle_jit_load: +append_backward .. _api_paddle_fluid_backward_append_backward: +inplace_abn .. _api_paddle_fluid_layers_inplace_abn: +profiler .. _api_paddle_fluid_profiler_profiler: +train10 .. _api_paddle_dataset_cifar_train10: +TreeConv .. _api_paddle_fluid_dygraph_TreeConv: +HeterXpuTrainer .. _api_paddle_fluid_trainer_desc_HeterXpuTrainer: +val .. _api_paddle_dataset_voc2012_val: +histogram .. _api_paddle_tensor_linalg_histogram: +mm .. _api_paddle_tensor_math_mm: +PReLU .. _api_paddle_nn_layer_activation_PReLU: +reduce_any .. _api_paddle_fluid_layers_reduce_any: +selu .. _api_paddle_fluid_layers_selu: +argmax .. _api_paddle_fluid_layers_argmax: +ExecuteError .. _api_paddle_distributed_fleet_utils_ExecuteError: +LSTMCell .. _api_paddle_fluid_layers_LSTMCell: +compose .. _api_paddle_reader_compose: +multiplex .. _api_paddle_fluid_layers_multiplex: +multi_upload .. _api_paddle_fluid_contrib_multi_upload: +QueueDataset .. _api_paddle_distributed_fleet_QueueDataset: +OutScaleForTrainingPass .. _api_paddle_fluid_contrib_slim_quantization_quantization_pass_OutScaleForTrainingPass: +eye .. _api_paddle_fluid_layers_eye: +equal .. _api_paddle_fluid_layers_equal: +GroupNorm .. _api_paddle_fluid_dygraph_GroupNorm: +sequence_scatter .. _api_paddle_fluid_layers_sequence_scatter: +create_random_int_lodtensor .. _api_paddle_fluid_create_random_int_lodtensor: +leaky_relu .. _api_paddle_fluid_layers_leaky_relu: +exponential_decay .. _api_paddle_fluid_layers_exponential_decay: +NaturalExpDecay .. _api_paddle_fluid_dygraph_learning_rate_scheduler_NaturalExpDecay: +softsign .. _api_paddle_fluid_layers_softsign: +GradAllReduce .. _api_paddle_fluid_transpiler_collective_GradAllReduce: +linear_chain_crf .. _api_paddle_fluid_layers_linear_chain_crf: +LookaheadOptimizer .. _api_paddle_optimizer_LookaheadOptimizer: +L2Decay .. _api_paddle_fluid_regularizer_L2Decay: +left_right_flip .. _api_paddle_dataset_image_left_right_flip: +Section .. _api_paddle_fluid_device_worker_Section: +save_dygraph .. _api_paddle_fluid_dygraph_checkpoint_save_dygraph: +sequence_reverse .. _api_paddle_fluid_layers_sequence_reverse: +isnan .. _api_paddle_tensor_math_isnan: +conv3d .. _api_paddle_fluid_layers_conv3d: +DGCOptimizer .. _api_paddle_distributed_fleet_meta_optimizers_DGCOptimizer: +data_norm .. _api_paddle_fluid_layers_data_norm: +conv3d_transpose .. _api_paddle_fluid_layers_conv3d_transpose: +sum .. _api_paddle_fluid_layers_sum: +asin .. _api_paddle_fluid_layers_asin: +auc .. _api_paddle_metric_auc: +zeros .. _api_paddle_tensor_creation_zeros: +TransformForMobilePass .. _api_paddle_fluid_contrib_slim_quantization_quantization_pass_TransformForMobilePass: +get_dict .. _api_paddle_dataset_wmt14_get_dict: +DetectionMAP .. _api_paddle_fluid_evaluator_DetectionMAP: +minimum .. _api_paddle_tensor_math_minimum: +prod .. _api_paddle_tensor_math_prod: +locality_aware_nms .. _api_paddle_fluid_layers_locality_aware_nms: +RNNCell .. _api_paddle_fluid_layers_RNNCell: +BCEWithLogitsLoss .. _api_paddle_nn_layer_loss_BCEWithLogitsLoss: +ConvertToInt8Pass .. _api_paddle_fluid_contrib_slim_quantization_quantization_pass_ConvertToInt8Pass: +gelu .. _api_paddle_nn_functional_gelu: +adaptive_pool2d .. _api_paddle_fluid_layers_adaptive_pool2d: +decorate .. _api_paddle_fluid_contrib_mixed_precision_decorate: +remainder .. _api_paddle_tensor_math_remainder: +clip .. _api_paddle_fluid_layers_clip: +AMPOptimizer .. _api_paddle_distributed_fleet_meta_optimizers_AMPOptimizer: +Xavier .. _api_paddle_fluid_initializer_Xavier: +sequence_unpad .. _api_paddle_fluid_layers_sequence_unpad: +Embedding .. _api_paddle_fluid_dygraph_Embedding: +UserDefinedRoleMaker .. _api_paddle_distributed_fleet_UserDefinedRoleMaker: +one_hot .. _api_paddle_fluid_layers_one_hot: +MultiSlotDataGenerator .. _api_paddle_fluid_incubate_data_generator_MultiSlotDataGenerator: +addmm .. _api_paddle_tensor_math_addmm: +GRUCell .. _api_paddle_fluid_layers_GRUCell: +get_program_parameter .. _api_paddle_fluid_io_get_program_parameter: +Recall .. _api_paddle_metric_Recall: +Hardshrink .. _api_paddle_nn_layer_activation_Hardshrink: +test .. _api_paddle_dataset_wmt16_test: +create_lod_tensor .. _api_paddle_fluid_create_lod_tensor: +generate_activation_fn .. _api_paddle_fluid_layers_layer_function_generator_generate_activation_fn: +space_to_depth .. _api_paddle_fluid_layers_space_to_depth: +swish .. _api_paddle_fluid_layers_swish: +QueueDataset .. _api_paddle_fluid_dataset_QueueDataset: +train .. _api_paddle_dataset_movielens_train: +reciprocal .. _api_paddle_fluid_layers_reciprocal: +SaveLoadConfig .. _api_paddle_jit_SaveLoadConfig: +MetricBase .. _api_paddle_fluid_metrics_MetricBase: +adaptive_avg_pool2d .. _api_paddle_nn_functional_pooling_adaptive_avg_pool2d: +polynomial_decay .. _api_paddle_fluid_layers_polynomial_decay: +py_func .. _api_paddle_fluid_layers_nn_py_func: +reshape .. _api_paddle_fluid_layers_reshape: +Bilinear .. _api_paddle_fluid_initializer_Bilinear: +get_flags .. _api_paddle_fluid_get_flags: +softshrink .. _api_paddle_nn_functional_softshrink: +OutScaleForInferencePass .. _api_paddle_fluid_contrib_slim_quantization_quantization_pass_OutScaleForInferencePass: +nonzero .. _api_paddle_tensor_search_nonzero: +LocalFS .. _api_paddle_distributed_fleet_utils_LocalFS: +ReturnTransformer .. _api_paddle_fluid_dygraph_dygraph_to_static_return_transformer_ReturnTransformer: +get_tensor_from_selected_rows .. _api_paddle_fluid_layers_get_tensor_from_selected_rows: +dist .. _api_paddle_tensor_linalg_dist: +matmul .. _api_paddle_tensor_linalg_matmul: +DpsgdOptimizer .. _api_paddle_optimizer_DpsgdOptimizer: +soft_relu .. _api_paddle_fluid_layers_soft_relu: +no_grad .. _api_paddle_fluid_dygraph_base_no_grad: +dynamic_decode .. _api_paddle_fluid_layers_dynamic_decode: +bernoulli .. _api_paddle_tensor_random_bernoulli: +ComposeNotAligned .. _api_paddle_reader_ComposeNotAligned: +GRUUnit .. _api_paddle_fluid_dygraph_GRUUnit: +ReplicationPad2d .. _api_paddle_nn_ReplicationPad2d: +detection_output .. _api_paddle_fluid_layers_detection_output: +HSigmoid .. _api_paddle_nn_layer_activation_HSigmoid: +elementwise_add .. _api_paddle_fluid_layers_elementwise_add: +CosineSimilarity .. _api_paddle_nn_CosineSimilarity: +argsort .. _api_paddle_tensor_search_argsort: +prepare_context .. _api_paddle_fluid_dygraph_parallel_prepare_context: +test .. _api_paddle_dataset_flowers_test: +sinh .. _api_paddle_fluid_layers_sinh: +default_collate_fn .. _api_paddle_fluid_io_default_collate_fn: +ones_like .. _api_paddle_fluid_layers_ones_like: +partial_concat .. _api_paddle_fluid_contrib_partial_concat: +Adagrad .. _api_paddle_optimizer_Adagrad: +RMSPropOptimizer .. _api_paddle_fluid_optimizer_RMSPropOptimizer: +edit_distance .. _api_paddle_fluid_layers_edit_distance: +sums .. _api_paddle_fluid_layers_sums: +softmax_with_cross_entropy .. _api_paddle_fluid_layers_softmax_with_cross_entropy: +tril .. _api_paddle_tensor_creation_tril: +shuffle .. _api_paddle_fluid_io_shuffle: +layer_norm .. _api_paddle_fluid_layers_layer_norm: +softplus .. _api_paddle_nn_functional_softplus: +roi_perspective_transform .. _api_paddle_fluid_layers_roi_perspective_transform: +ReflectionPad1d .. _api_paddle_nn_ReflectionPad1d: +adaptive_pool3d .. _api_paddle_fluid_layers_adaptive_pool3d: +grid_sampler .. _api_paddle_fluid_layers_grid_sampler: +tensor_array_to_tensor .. _api_paddle_fluid_layers_tensor_array_to_tensor: +load_op_library .. _api_paddle_fluid_load_op_library: +max_movie_id .. _api_paddle_dataset_movielens_max_movie_id: +ExponentialDecay .. _api_paddle_fluid_dygraph_learning_rate_scheduler_ExponentialDecay: +log_softmax .. _api_paddle_nn_functional_activation_log_softmax: +require_version .. _api_paddle_fluid_require_version: +SequenceSampler .. _api_paddle_io_SequenceSampler: +Dropout2D .. _api_paddle_nn_Dropout2D: +train .. _api_paddle_dataset_imikolov_train: +elementwise_max .. _api_paddle_fluid_layers_elementwise_max: +array_length .. _api_paddle_fluid_layers_array_length: +sampled_softmax_with_cross_entropy .. _api_paddle_fluid_layers_sampled_softmax_with_cross_entropy: +generate_proposals .. _api_paddle_fluid_layers_generate_proposals: +train .. _api_paddle_dataset_sentiment_train: +build_dict .. _api_paddle_dataset_imikolov_build_dict: +collect_fpn_proposals .. _api_paddle_fluid_layers_collect_fpn_proposals: +PolynomialDecay .. _api_paddle_fluid_dygraph_learning_rate_scheduler_PolynomialDecay: +pool2d .. _api_paddle_fluid_layers_pool2d: +CountFilterEntry .. _api_paddle_fluid_entry_attr_CountFilterEntry: +InputSpec .. _api_paddle_static_InputSpec: +default_startup_program .. _api_paddle_fluid_framework_default_startup_program: +index_select .. _api_paddle_tensor_search_index_select: +margin_rank_loss .. _api_paddle_fluid_layers_margin_rank_loss: +randint .. _api_paddle_tensor_random_randint: +less_equal .. _api_paddle_tensor_logic_less_equal: +sign .. _api_paddle_fluid_layers_sign: +inverse_time_decay .. _api_paddle_fluid_layers_inverse_time_decay: +MetaOptimizerFactory .. _api_paddle_distributed_fleet_base_meta_optimizer_factory_MetaOptimizerFactory: +conv1d .. _api_paddle_nn_functional_conv1d: +has_inf .. _api_paddle_fluid_layers_has_inf: +fused_embedding_seq_pool .. _api_paddle_fluid_contrib_fused_embedding_seq_pool: +logical_and .. _api_paddle_fluid_layers_logical_and: +BeamSearchDecoder .. _api_paddle_fluid_contrib_BeamSearchDecoder: +CUDAPlace .. _api_paddle_framework_CUDAPlace: +BreakContinueTransformer .. _api_paddle_fluid_dygraph_dygraph_to_static_break_continue_transformer_BreakContinueTransformer: +LocalSGDOptimizer .. _api_paddle_distributed_fleet_meta_optimizers_LocalSGDOptimizer: +CrossEntropyLoss .. _api_paddle_nn_layer_loss_CrossEntropyLoss: +PairwiseDistance .. _api_paddle_nn_layer_distance_PairwiseDistance: +AstNodeWrapper .. _api_paddle_fluid_dygraph_dygraph_to_static_AstNodeWrapper: +Role .. _api_paddle_fluid_incubate_fleet_base_role_maker_Role: +cross .. _api_paddle_tensor_linalg_cross: +fetch .. _api_paddle_dataset_wmt16_fetch: +buffered .. _api_paddle_reader_buffered: +manual_seed .. _api_paddle_framework_random_manual_seed: +QuantInt8MkldnnPass .. _api_paddle_fluid_contrib_slim_quantization_quant_int8_mkldnn_pass_QuantInt8MkldnnPass: +load_and_transform .. _api_paddle_dataset_image_load_and_transform: +CUDAPlace .. _api_paddle_fluid_CUDAPlace: +one_hot .. _api_paddle_fluid_one_hot: +broadcast_to .. _api_paddle_tensor_manipulation_broadcast_to: +get_cudnn_version .. _api_paddle_device_get_cudnn_version: +get_dict .. _api_paddle_dataset_wmt16_get_dict: +ones_like .. _api_paddle_tensor_creation_ones_like: +cross_entropy .. _api_paddle_nn_functional_cross_entropy: +random_crop .. _api_paddle_fluid_layers_random_crop: +cholesky .. _api_paddle_tensor_linalg_cholesky: +diag .. _api_paddle_fluid_layers_diag: +match_matrix_tensor .. _api_paddle_fluid_contrib_match_matrix_tensor: +GELU .. _api_paddle_nn_layer_activation_GELU: +load_persistables_for_inference .. _api_paddle_fluid_contrib_load_persistables_for_inference: +save .. _api_paddle_jit_save: +reorder_lod_tensor_by_rank .. _api_paddle_fluid_layers_reorder_lod_tensor_by_rank: +FakeQuantAbsMax .. _api_paddle_fluid_contrib_slim_quantization_imperative_FakeQuantAbsMax: +DatasetBase .. _api_paddle_distributed_fleet_DatasetBase: +floor_division .. _api_paddle_compat_floor_division: +while_loop .. _api_paddle_fluid_layers_while_loop: +DistributedStrategy .. _api_paddle_distributed_fleet_DistributedStrategy: +gru_unit .. _api_paddle_fluid_layers_gru_unit: +reduce_all .. _api_paddle_fluid_layers_reduce_all: +GradientClipByGlobalNorm .. _api_paddle_fluid_clip_GradientClipByGlobalNorm: +pad2d .. _api_paddle_fluid_layers_pad2d: +switch_case .. _api_paddle_fluid_layers_switch_case: +unstack .. _api_paddle_fluid_layers_unstack: +spectral_norm .. _api_paddle_fluid_layers_spectral_norm: +switch .. _api_paddle_fluid_unique_name_switch: +BeamSearchDecoder .. _api_paddle_fluid_layers_BeamSearchDecoder: +Softmax .. _api_paddle_nn_layer_activation_Softmax: +hardtanh .. _api_paddle_nn_functional_hardtanh: +DGCMomentumOptimizer .. _api_paddle_optimizer_DGCMomentumOptimizer: +split .. _api_paddle_dataset_common_split: +train100 .. _api_paddle_dataset_cifar_train100: +box_decoder_and_assign .. _api_paddle_fluid_layers_box_decoder_and_assign: +gather_nd .. _api_paddle_tensor_manipulation_gather_nd: +MultiSlotStringDataGenerator .. _api_paddle_fluid_incubate_data_generator_MultiSlotStringDataGenerator: +range .. _api_paddle_fluid_layers_range: +to_tensor .. _api_paddle_tensor_creation_to_tensor: +rnn .. _api_paddle_fluid_layers_rnn: +start_profiler .. _api_paddle_fluid_profiler_start_profiler: +BatchSampler .. _api_paddle_io_BatchSampler: +SmoothL1Loss .. _api_paddle_nn_layer_loss_SmoothL1Loss: +DatasetFactory .. _api_paddle_fluid_dataset_DatasetFactory: +AdaptiveAvgPool2d .. _api_paddle_nn_layer_pooling_AdaptiveAvgPool2d: +sequence_slice .. _api_paddle_fluid_layers_sequence_slice: +SpectralNorm .. _api_paddle_fluid_dygraph_SpectralNorm: +WeightNormParamAttr .. _api_paddle_fluid_param_attr_WeightNormParamAttr: +test .. _api_paddle_dataset_imdb_test: +multiclass_nms2 .. _api_paddle_fluid_contrib_multiclass_nms2: +ones .. _api_paddle_fluid_layers_ones: +Sequential .. _api_paddle_fluid_dygraph_container_Sequential: +reduce_max .. _api_paddle_fluid_layers_reduce_max: +ReflectionPad2d .. _api_paddle_nn_ReflectionPad2d: +xmap_readers .. _api_paddle_reader_xmap_readers: +prior_box .. _api_paddle_fluid_layers_prior_box: +PiecewiseDecay .. _api_paddle_fluid_dygraph_learning_rate_scheduler_PiecewiseDecay: +validation .. _api_paddle_dataset_wmt16_validation: +conv2d .. _api_paddle_nn_functional_conv_conv2d: +InMemoryDataset .. _api_paddle_distributed_fleet_InMemoryDataset: +index_sample .. _api_paddle_tensor_search_index_sample: +cumsum .. _api_paddle_fluid_layers_cumsum: +nce .. _api_paddle_fluid_layers_nce: +stack .. _api_paddle_tensor_manipulation_stack: +get_logger .. _api_paddle_fluid_log_helper_get_logger: +basic_lstm .. _api_paddle_fluid_contrib_basic_lstm: +create_global_var .. _api_paddle_fluid_layers_tensor_create_global_var: +argmin .. _api_paddle_fluid_layers_argmin: +load_vars .. _api_paddle_fluid_io_load_vars: +dot .. _api_paddle_tensor_linalg_dot: +build_dict .. _api_paddle_dataset_imdb_build_dict: +matmul .. _api_paddle_fluid_layers_matmul: +sparse_embedding .. _api_paddle_fluid_contrib_sparse_embedding: +elu .. _api_paddle_nn_functional_elu: +expand_as .. _api_paddle_tensor_manipulation_expand_as: +instance_norm .. _api_paddle_fluid_layers_instance_norm: +cos .. _api_paddle_fluid_layers_cos: +hard_sigmoid .. _api_paddle_fluid_layers_hard_sigmoid: +rank_attention .. _api_paddle_fluid_contrib_rank_attention: +While .. _api_paddle_fluid_layers_While: +sequence_last_step .. _api_paddle_fluid_layers_sequence_last_step: +softshrink .. _api_paddle_fluid_layers_softshrink: +BasicLSTMUnit .. _api_paddle_fluid_contrib_BasicLSTMUnit: +target_assign .. _api_paddle_fluid_layers_target_assign: +Constant .. _api_paddle_fluid_initializer_Constant: +create_static_variable_gast_node .. _api_paddle_fluid_dygraph_dygraph_to_static_create_static_variable_gast_node: +BasicGRUUnit .. _api_paddle_fluid_contrib_BasicGRUUnit: +sequence_pool .. _api_paddle_fluid_layers_sequence_pool: +RowConv .. _api_paddle_nn_layer_extension_RowConv: +scatter_nd_add .. _api_paddle_fluid_layers_scatter_nd_add: +ceil .. _api_paddle_fluid_layers_ceil: +save .. _api_paddle_fluid_save: +teacher_student_sigmoid_loss .. _api_paddle_fluid_layers_teacher_student_sigmoid_loss: +deformable_conv .. _api_paddle_fluid_layers_deformable_conv: +pad .. _api_paddle_fluid_layers_pad: +dice_loss .. _api_paddle_fluid_layers_dice_loss: +Linear .. _api_paddle_fluid_dygraph_Linear: +fill_constant .. _api_paddle_fluid_layers_fill_constant: +kron .. _api_paddle_tensor_math_kron: +embedding .. _api_paddle_fluid_input_embedding: +CompositeMetric .. _api_paddle_metric_CompositeMetric: +pad_constant_like .. _api_paddle_fluid_layers_pad_constant_like: +UserDefinedCollectiveRoleMaker .. _api_paddle_fluid_incubate_fleet_base_role_maker_UserDefinedCollectiveRoleMaker: +HDFSClient .. _api_paddle_distributed_fleet_utils_HDFSClient: +flatten .. _api_paddle_tensor_manipulation_flatten: +signature_safe_contextmanager .. _api_paddle_fluid_wrapped_decorator_signature_safe_contextmanager: +shape .. _api_paddle_fluid_layers_shape: +default_main_program .. _api_paddle_fluid_framework_default_main_program: +row_conv .. _api_paddle_fluid_layers_row_conv: +sigmoid_cross_entropy_with_logits .. _api_paddle_fluid_layers_sigmoid_cross_entropy_with_logits: +relu .. _api_paddle_nn_functional_activation_relu: +stanh .. _api_paddle_fluid_layers_stanh: +normal .. _api_paddle_tensor_random_normal: +set_flags .. _api_paddle_fluid_set_flags: +RecomputeOptimizer .. _api_paddle_distributed_fleet_meta_optimizers_RecomputeOptimizer: +LambOptimizer .. _api_paddle_optimizer_LambOptimizer: +conv2d .. _api_paddle_fluid_layers_conv2d: +ELU .. _api_paddle_nn_layer_activation_ELU: +scatter_nd .. _api_paddle_fluid_layers_scatter_nd: +to_chw .. _api_paddle_dataset_image_to_chw: +cache .. _api_paddle_io_cache: +rpn_target_assign .. _api_paddle_fluid_layers_rpn_target_assign: +logical_or .. _api_paddle_fluid_layers_logical_or: +mse_loss .. _api_paddle_fluid_layers_mse_loss: +sequence_mask .. _api_paddle_fluid_layers_sequence_mask: +mul .. _api_paddle_fluid_layers_mul: +group_norm .. _api_paddle_fluid_layers_group_norm: +test .. _api_paddle_dataset_uci_housing_test: +split .. _api_paddle_tensor_manipulation_split: +Conv2D .. _api_paddle_fluid_dygraph_Conv2D: +round .. _api_paddle_fluid_layers_round: +WeightedAverage .. _api_paddle_fluid_average_WeightedAverage: +mean .. _api_paddle_tensor_stat_mean: +diag .. _api_paddle_tensor_creation_diag: +mean .. _api_paddle_fluid_layers_mean: +GeneralRoleMaker .. _api_paddle_fluid_incubate_fleet_base_role_maker_GeneralRoleMaker: +Ftrl .. _api_paddle_optimizer_Ftrl: +softmax .. _api_paddle_fluid_layers_softmax: +pool3d .. _api_paddle_fluid_layers_pool3d: +FSFileNotExistsError .. _api_paddle_distributed_fleet_utils_FSFileNotExistsError: +Adadelta .. _api_paddle_optimizer_Adadelta: +Sigmoid .. _api_paddle_nn_layer_activation_Sigmoid: +interpolate .. _api_paddle_nn_functional_common_interpolate: +Categorical .. _api_paddle_fluid_layers_Categorical: +fc .. _api_paddle_fluid_layers_fc: +ReplicationPad3d .. _api_paddle_nn_ReplicationPad3d: +reduce_sum .. _api_paddle_fluid_layers_reduce_sum: +InverseTimeDecay .. _api_paddle_fluid_dygraph_learning_rate_scheduler_InverseTimeDecay: +randn .. _api_paddle_tensor_random_randn: +Softplus .. _api_paddle_nn_layer_activation_Softplus: +ConvTranspose1d .. _api_paddle_nn_layer_conv_ConvTranspose1d: +ParameterList .. _api_paddle_fluid_dygraph_container_ParameterList: +Dropout .. _api_paddle_fluid_dygraph_Dropout: +zeros .. _api_paddle_fluid_layers_zeros: +where .. _api_paddle_tensor_search_where: +LarsMomentum .. _api_paddle_optimizer_LarsMomentum: +trace .. _api_paddle_tensor_math_trace: +set_gradient_clip .. _api_paddle_fluid_clip_set_gradient_clip: +FSTimeOut .. _api_paddle_distributed_fleet_utils_FSTimeOut: +crop_tensor .. _api_paddle_fluid_layers_crop_tensor: +load .. _api_paddle_fluid_layers_load: +program_guard .. _api_paddle_fluid_framework_program_guard: +retinanet_detection_output .. _api_paddle_fluid_layers_retinanet_detection_output: +Sampler .. _api_paddle_io_Sampler: +scope_guard .. _api_paddle_fluid_executor_scope_guard: +shard_index .. _api_paddle_fluid_layers_shard_index: +Normal .. _api_paddle_fluid_initializer_Normal: +download .. _api_paddle_dataset_common_download: +PRelu .. _api_paddle_fluid_dygraph_PRelu: +create_parameter .. _api_paddle_fluid_layers_create_parameter: +resize_short .. _api_paddle_dataset_image_resize_short: +ConstantPad3d .. _api_paddle_nn_ConstantPad3d: +lrn .. _api_paddle_fluid_layers_lrn: +scatter .. _api_paddle_fluid_layers_scatter: diff --git a/doc/paddle/api/display_doc_list b/doc/paddle/api/display_doc_list new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/paddle/api/gen_alias_mapping.sh b/doc/paddle/api/gen_alias_mapping.sh new file mode 100755 index 0000000000000000000000000000000000000000..2f1404b8baa696eb887df82b8f93b81efa5476d5 --- /dev/null +++ b/doc/paddle/api/gen_alias_mapping.sh @@ -0,0 +1,74 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/usr/bin/env bash + +if [[ $# -ne 1 ]]; then + echo -e "Usage:\n\t./gen_alias_mapping.sh " + exit 1 +else + PADDLE_ROOT=$1 +fi + +find ${PADDLE_ROOT}/python/ -name '*.py' \ + | xargs grep -v '^#' \ + | grep 'DEFINE_ALIAS' \ + | perl -ne ' + if (/^.*\/python\/(.*):from (\.*)(\w.*) import (.*?)\s+#DEFINE_ALIAS\s+$/) { + my @arr = split(", ", $4); + foreach $i (@arr) { + printf "%s|%s|%s|%d\n", $3, $i, substr($1, 0, -3), length($2); + } + }' \ + | awk -F '[|/]' ' + { + key = ""; + val = ""; + if ($2 ~ /.* as .*/) { + split($2, arr, " as "); + old = arr[1]; + new = arr[2]; + } else { + old = $2; + new = $2; + } + for (i = 3; i <= (NF - 1 - $NF); ++i) { + val = val""$i"."; + } + val = val""$1"."old + for (i = 3; i <= (NF - 1); ++i) { + if ($i != "__init__") { + key = key""$i"."; + } + } + key = key""new; + n2o[key] = val; + } + END { + for (new in n2o) { + old = n2o[new] in n2o ? n2o[n2o[new]] : n2o[new]; + print old, length(new), new; + } + }' \ + | sort -k 1,1 -k 2n,2 \ + | awk ' + { + o2n[$1] = o2n[$1] ? o2n[$1]","$3 : $3; + } + END { + for (i in o2n) { + print i"\t"o2n[i]; + } + }' + diff --git a/doc/paddle/api/gen_doc.py b/doc/paddle/api/gen_doc.py new file mode 100644 index 0000000000000000000000000000000000000000..93926d56b5c44548ee03b80413d976c2adb244e5 --- /dev/null +++ b/doc/paddle/api/gen_doc.py @@ -0,0 +1,270 @@ +import paddle +import os +import shutil +import time +import pkgutil +import types +import contextlib +import argparse + +en_suffix = "_en.rst" +cn_suffix = "_cn.rst" +file_path_dict = {} +same_api_map = {} +alias_api_map = {} +not_display_doc_map = {} +display_doc_map = {} +api_set = set() + + +def get_all_api(root_path='paddle'): + for filefiner, name, ispkg in pkgutil.walk_packages( + path=paddle.__path__, prefix=paddle.__name__ + '.'): + try: + m = eval(name) + except AttributeError: + pass + else: + if hasattr(eval(name), "__all__"): + #may have duplication of api + for api in list(set(eval(name).__all__)): + api_all = name + "." + api + if "," in api: + continue + + try: + fc_id = id(eval(api_all)) + except AttributeError: + pass + else: + api_set.add(api_all) + + +def get_all_same_api(): + for api in api_set: + fc_id = id(eval(api)) + if fc_id in same_api_map: + same_api_map[fc_id].append(api) + else: + same_api_map[fc_id] = [api] + + +def get_not_display_doc_list(file="./not_display_doc_list"): + with open(file, 'r') as f: + for line in f.readlines(): + line = line.strip() + not_display_doc_map[line] = 1 + + +def get_display_doc_map(file="./display_doc_list"): + with open(file, 'r') as f: + for line in f.readlines(): + line = line.strip() + display_doc_map[line] = 1 + + +def get_alias_mapping(file="./alias_api_mapping"): + with open(file, 'r') as f: + for line in f.readlines(): + t = line.strip().split('\t') + real_api = t[0].strip() + alias_apis = t[1].strip().split(',') + alias_api_map[real_api] = alias_apis + + +def is_filter_api(api): + #if api in display_list, just return False + if api in display_doc_map: + return False + + #check api in not_display_list + for key in not_display_doc_map: + #find the api + if key == api: + return True + #find the module + if api.startswith(key): + k_segs = key.split(".") + a_segs = api.split(".") + if k_segs[len(k_segs) - 1] == a_segs[len(k_segs) - 1]: + return True + + #check api in alias map + if api in alias_api_map: + return False + + same_apis = same_api_map[id(eval(api))] + + #api not in alias map + #if the api in alias_map key, others api is alias api + for x in same_apis: + if x in alias_api_map: + return True + + if len(same_apis) > 1: + # find shortest path of api as the real api + # others api as the alias api + shortest = len(same_apis[0].split(".")) + for x in same_apis: + if len(x.split(".")) < shortest: + shortest = len(x.split(".")) + + if len(api.split(".")) == shortest: + return False + else: + return True + return False + + +def gen_en_files(root_path='paddle', api_label_file="api_label"): + backup_path = root_path + "_" + str(int(time.time())) + api_f = open(api_label_file, 'w') + + for api in api_set: + if is_filter_api(api): + continue + module_name = ".".join(api.split(".")[0:-1]) + doc_file = api.split(".")[-1] + + if isinstance(eval(module_name + "." + doc_file), types.ModuleType): + continue + + path = "/".join(api.split(".")[0:-1]) + if not os.path.exists(path): + os.makedirs(path) + f = api.replace(".", "/") + if os.path.exists(f + en_suffix): + continue + os.mknod(f + en_suffix) + gen = EnDocGenerator() + with gen.guard(f + en_suffix): + gen.module_name = module_name + gen.api = doc_file + gen.print_header_reminder() + gen.print_item() + api_f.write(doc_file + "\t" + ".. _api_{0}_{1}:\n".format("_".join( + gen.module_name.split(".")), gen.api)) + api_f.close() + + +def clean_en_files(path="./paddle"): + for root, dirs, files in os.walk(path): + for file in files: + if file.endswith(en_suffix): + os.remove(os.path.join(root, file)) + + +def check_cn_en_match(path="./paddle", diff_file="en_cn_files_diff"): + fo = open(diff_file, 'w') + fo.write("exist\tnot_exits\n") + for root, dirs, files in os.walk(path): + for file in files: + if file.endswith(en_suffix): + cf = file.replace(en_suffix, cn_suffix) + if not os.path.exists(root + "/" + cf): + fo.write( + os.path.join(root, file) + "\t" + os.path.join( + root, cf) + "\n") + + elif file.endswith(cn_suffix): + ef = file.replace(cn_suffix, en_suffix) + if not os.path.exists(root + "/" + ef): + fo.write( + os.path.join(root, file) + "\t" + os.path.join( + root, ef) + "\n") + fo.close() + + +class EnDocGenerator(object): + def __init__(self, name=None, api=None): + self.module_name = name + self.api = api + self.stream = None + + @contextlib.contextmanager + def guard(self, filename): + assert self.stream is None, "stream must be None" + self.stream = open(filename, 'w') + yield + self.stream.close() + self.stream = None + + def print_item(self): + try: + m = eval(self.module_name + "." + self.api) + except AttributeError: + #print("attribute error: module_name=" + self.module_name + ", api=" + self.api) + pass + else: + if isinstance(eval(self.module_name + "." + self.api), type): + self.print_class() + elif isinstance( + eval(self.module_name + "." + self.api), + types.FunctionType): + self.print_function() + + def print_header_reminder(self): + self.stream.write('''.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +''') + + def _print_ref_(self): + self.stream.write(".. _api_{0}_{1}:\n\n".format("_".join( + self.module_name.split(".")), self.api)) + + def _print_header_(self, name, dot, is_title): + dot_line = dot * len(name) + if is_title: + self.stream.write(dot_line) + self.stream.write('\n') + self.stream.write(name) + self.stream.write('\n') + self.stream.write(dot_line) + self.stream.write('\n') + self.stream.write('\n') + + def print_class(self): + self._print_ref_() + self._print_header_(self.api, dot='-', is_title=False) + if "fluid.dygraph" in self.module_name: + self.stream.write('''.. autoclass:: {0}.{1} + :members: + :noindex: + +'''.format(self.module_name, self.api)) + elif "fluid.optimizer" in self.module_name: + self.stream.write('''.. autoclass:: {0}.{1} + :members: + :inherited-members: + :exclude-members: apply_gradients, apply_optimize, backward, load + :noindex: + +'''.format(self.module_name, self.api)) + else: + self.stream.write('''.. autoclass:: {0}.{1} + :members: + :inherited-members: + :noindex: + +'''.format(self.module_name, self.api)) + + def print_function(self): + self._print_ref_() + self._print_header_(self.api, dot='-', is_title=False) + self.stream.write('''.. autofunction:: {0}.{1} + :noindex: + +'''.format(self.module_name, self.api)) + + +if __name__ == "__main__": + get_all_api() + get_not_display_doc_list() + get_display_doc_map() + get_all_same_api() + get_alias_mapping() + + clean_en_files() + gen_en_files() + check_cn_en_match() diff --git a/doc/paddle/api/gen_doc.sh b/doc/paddle/api/gen_doc.sh new file mode 100644 index 0000000000000000000000000000000000000000..ad147e5fc8714e6170adcf8548549906f8ee5be3 --- /dev/null +++ b/doc/paddle/api/gen_doc.sh @@ -0,0 +1,2 @@ +#!/bin/bash +python3 gen_doc.py diff --git a/doc/paddle/api/index_cn.rst b/doc/paddle/api/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..49b4e022aa0399d3a836da96fbf1087caf16daad --- /dev/null +++ b/doc/paddle/api/index_cn.rst @@ -0,0 +1,60 @@ +================== +API 文档 +================== + +PaddlePaddle (PArallel Distributed Deep LEarning)是一个易用、高效、灵活、可扩展的深度学习框架。 +本页列出了PaddlePaddle 2.0-beta所支持的API,您可以在此查看该API的相关信息。 + +此外,您可参考PaddlePaddle的 `GitHub `_ 了解详情,也可阅读 `版本说明 <../release_note_cn.html>`_ 了解新版本的特性。 + +**飞桨框架2.0的API目录结构如下:** + ++-------------------------------+-------------------------------------------------------+ +| 目录 | 功能和包含的API | ++===============================+=======================================================+ +| paddle.\* | paddle | +| | 根目录下保留了常用API的别名,当前包括:paddle.tensor, | +| | paddle.framework目录下的所有API | ++-------------------------------+-------------------------------------------------------+ +| paddle.tensor | 跟tensor操作相关的API,比如:创建zeros, | +| | 矩阵运算matmul, 变换concat, 计算add, 查找argmax等 | ++-------------------------------+-------------------------------------------------------+ +| paddle.nn | 跟组网相关的API,比如:Linear, | +| | Conv2d,损失函数,卷积,LSTM等,激活函数等 | ++-------------------------------+-------------------------------------------------------+ +| paddle.static.nn | 静态图下组网专用A | +| | PI,比如:输入占位符data/Input,控制流while_loop/cond | ++-------------------------------+-------------------------------------------------------+ +| paddle.static | 静态图下基础框架相关API,比如:Variable, Program, | +| | Executor等 | ++-------------------------------+-------------------------------------------------------+ +| paddle.framework | 框架通用API和imprerative模式的API,比如:to_tensor, | +| | prepare_context等 | ++-------------------------------+-------------------------------------------------------+ +| paddle.optimizer | 优化算法相关API,比如:SGD,Adagrad, Adam等 | +| | | ++-------------------------------+-------------------------------------------------------+ +| paddle.optimizer.lr_scheduler | 学习率衰减相关API | +| | | ++-------------------------------+-------------------------------------------------------+ +| paddle.metric | 评估指标计算相关的API,比如:accuracy, auc等 | +| | | ++-------------------------------+-------------------------------------------------------+ +| paddle.io | 数据输入输出相关API,比如:save, load, Dataset, | +| | DataLoader等 | ++-------------------------------+-------------------------------------------------------+ +| paddle.device | 设备管理相关API,比如:CPUPlace, CUDAPlace等 | +| | | ++-------------------------------+-------------------------------------------------------+ +| paddle.distributed | 分布式相关基础API | +| | | ++-------------------------------+-------------------------------------------------------+ +| paddle.distributed.fleet | 分布式相关高层API | +| | | ++-------------------------------+-------------------------------------------------------+ +| paddle.vision | 视觉领域API, | +| | 比如,数据集,数据处理,常用基础网络结构,比如resnet | ++-------------------------------+-------------------------------------------------------+ +| paddle.text | NLP领域API, | +| | 比如,数据集,数据处理,常用网络结构,比如transformer | ++-------------------------------+-------------------------------------------------------+ diff --git a/doc/paddle/api/index_en.rst b/doc/paddle/api/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..e4aadca39569bae5ffaf077a932ad6f89399612f --- /dev/null +++ b/doc/paddle/api/index_en.rst @@ -0,0 +1,65 @@ +================== +API Reference +================== + +PaddlePaddle (PArallel Distributed Deep LEarning) is an efficient, flexible, and extensible deep learning framework. +This page lists the APIs supported by PaddlePaddle 2.0-beta. You can view the information of the APIs here. + +In addition, you can refer to PaddlePaddle's `GitHub `_ for details, or read `Release Notes <../release_note_en.html>`_ to learn about the features of the new version. + +**The API directory structure of PaddlePaddle 2.0-beta is as follows:** + ++-------------------------------+-------------------------------------------------------+ +| Directory | Functions and Included APIs | ++===============================+=======================================================+ +| paddle.* | The aliases of commonly used APIs are reserved in the | +| | paddle root directory, which currently include all | +| | the APIs in the paddle.tensor and paddle.framework | +| | directories | ++-------------------------------+-------------------------------------------------------+ +| paddle.tensor | APIs related to tensor operations such as creating | +| | zeros, matrix operation matmul, transforming concat, | +| | computing add, and finding argmax | ++-------------------------------+-------------------------------------------------------+ +| paddle.nn | Networking-related APIs such as Linear, Conv2d, loss | +| | function, convolution, LSTM,and activation function | ++-------------------------------+-------------------------------------------------------+ +| paddle.static.nn | Special APIs for networking under a static graph such | +| | as input placeholder data/Input and control flow | +| | while_loop/cond | ++-------------------------------+-------------------------------------------------------+ +| paddle.static | APIs related to the basic framework under a static | +| | graph such as Variable, Program, and Executor | ++-------------------------------+-------------------------------------------------------+ +| paddle.framework | Universal APIs and imprerative mode APIs such as | +| | to_variable and prepare_context | ++-------------------------------+-------------------------------------------------------+ +| paddld.optimizer | APIs related to optimization algorithms such as SGD, | +| | Adagrad, and Adam | ++-------------------------------+-------------------------------------------------------+ +| paddle.optimizer.lr_scheduler | APIs related to learning rate attenuation | +| | | ++-------------------------------+-------------------------------------------------------+ +| paddle.metric | APIs related to evaluation index computation such as | +| | accuracy and auc | ++-------------------------------+-------------------------------------------------------+ +| paddle.io | APIs related to data input and output such as save, | +| | load, Dataset, and DataLoader | ++-------------------------------+-------------------------------------------------------+ +| paddle.device | APIs related to device management such as CPUPlace | +| | and CUDAPlace | ++-------------------------------+-------------------------------------------------------+ +| paddle.distributed | Distributed related basic APIs | +| | | ++-------------------------------+-------------------------------------------------------+ +| paddle.distributed.fleet | Distributed related high-level APIs | +| | | ++-------------------------------+-------------------------------------------------------+ +| paddle.vision | Vision domain APIs such as datasets, data processing, | +| | and commonly used basic network structures like | +| | resnet | ++-------------------------------+-------------------------------------------------------+ +| paddle.text | NLP domain APIs such as datasets, data processing, | +| | and commonly used basic network structures like | +| | transformer | ++-------------------------------+-------------------------------------------------------+ diff --git a/doc/paddle/api/not_display_doc_list b/doc/paddle/api/not_display_doc_list new file mode 100644 index 0000000000000000000000000000000000000000..d29ca804fbbd1cd29dbac5b94ac447fcfc99fcec --- /dev/null +++ b/doc/paddle/api/not_display_doc_list @@ -0,0 +1,3 @@ +paddle.utils +paddle.incubate +paddle.hapi.progressbar.ProgressBar diff --git a/doc/paddle/api/paddle/compat/round_cn.rst b/doc/paddle/api/paddle/compat/round_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0e5eed214ae6a74521e030803a36525c21e2820b --- /dev/null +++ b/doc/paddle/api/paddle/compat/round_cn.rst @@ -0,0 +1,31 @@ +.. _cn_api_tensor_cn_round: + +round +------------------------------- + +.. py:function:: paddle.round(x, name=None) + + + +该OP将输入中的数值四舍五入到最接近的整数数值。 + +参数: + - **x** (Tensor) - 输入的 `Tensor` ,数据类型为: float16, float32, float64。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回: + - Tensor,对输入x四舍五入后的Tensor,形状、数据类型与输入x一致。 + + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + paddle.disable_static() + x_data = np.array([-0.5, -0.2, 0.6, 1.5]) + x = paddle.to_tensor(x_data) + out = paddle.round(x) + print(out.numpy()) + # [-1. -0. 1. 2.] diff --git a/doc/paddle/api/paddle/dataset/Conll05_cn.rst b/doc/paddle/api/paddle/dataset/Conll05_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4aed0c84ead74ff9b52f83982509d4c6aa4462a7 --- /dev/null +++ b/doc/paddle/api/paddle/dataset/Conll05_cn.rst @@ -0,0 +1,31 @@ +.. _cn_api_paddle_dataset_Conll05: + +Conll05 +------------------------------- + +Conll05数据集。Paddle深度学习基础中的语义角色标注文档使用这个数据集为例。因为Conll05数据集不是免费公开的,所以默认下载的url是Conll05的测试集(它是公开的)。用户可以将url和md5更改为其Conll数据集。并采用基于维基百科语料库的预训练词向量模型对SRL模型进行初始化。 + + +.. py:function:: paddle.dataset.conll05.get_dict() + +获取维基百科语料库的单词、动词和标签字典。 + + +.. py:function:: paddle.dataset.conll05.get_embedding() + +获取基于维基百科语料库的训练词向量。 + + + +.. py:function:: paddle.dataset.conll05.test() + +Conll05测试数据集的creator。 + +因为训练数据集不是免费公开的,所以用测试数据集进行训练。它返回一个reader creator,reader中的每个样本都有九个特征,包括句子序列、谓词、谓词上下文、谓词上下文标记和标记序列。 + +返回: 训练数据集的reader creator + +返回类型:callable + + + diff --git a/doc/paddle/api/paddle/dataset/cifar_cn.rst b/doc/paddle/api/paddle/dataset/cifar_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b8ece575e4509e3bad6ad130dd2b28a0c2d4cbea --- /dev/null +++ b/doc/paddle/api/paddle/dataset/cifar_cn.rst @@ -0,0 +1,71 @@ +.. _cn_api_paddle_dataset_cifar: + +cifar +------------------------------- + +CIFAR数据集。 + +此模块将从 https://www.cs.toronto.edu/~kriz/cifar.html 下载数据集,并将训练集和测试集解析为paddle reader creator。 + +cifar-10数据集由10个类别的60000张32x32彩色图像组成,每个类别6000张图像。共有5万张训练图像,1万张测试图像。 + +cifar-100数据集与cifar-10类似,只是它有100个类,每个类包含600张图像。每个类有500张训练图像和100张测试图像。 + + + +.. py:function:: paddle.dataset.cifar.train100() + +CIFAR-100训练数据集的creator。 + +它返回一个reader creator, reader中的每个样本的图像像素范围是[0,1],标签范围是[0,9]。 + +返回: 训练数据集的reader creator。 + +返回类型:callable + + +.. py:function:: paddle.dataset.cifar.test100() + +CIFAR-100测试数据集的creator。 + +它返回一个reader creator, reader中的每个样本的图像像素范围是[0,1],标签范围是[0,9]。 + +返回: 测试数据集的reader creator + +返回类型:callable + + +.. py:function:: paddle.dataset.cifar.train10(cycle=False) + +CIFAR-10训练数据集的creator。 + +它返回一个reader creator, reader中的每个样本的图像像素范围是[0,1],标签范围是[0,9]。 + +参数: + - **cycle** (bool) – 是否循环使用数据集 + +返回: 训练数据集的reader creator + +返回类型:callable + + +.. py:function:: paddle.dataset.cifar.test10(cycle=False) + +CIFAR-10测试数据集的creator。 + +它返回一个reader creator, reader中的每个样本的图像像素范围是[0,1],标签范围是[0,9]。 + +参数: + - **cycle** (bool) – 是否循环使用数据集 + +返回: 测试数据集的reader creator + +返回类型:callable + + +.. py:function:: paddle.dataset.cifar.convert(path) + +将数据集转换为recordio格式。 + + + diff --git a/doc/paddle/api/paddle/dataset/common/split_cn.rst b/doc/paddle/api/paddle/dataset/common/split_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d7421732749cd2cb0892d98371d2ca6d0da50540 --- /dev/null +++ b/doc/paddle/api/paddle/dataset/common/split_cn.rst @@ -0,0 +1,52 @@ +.. _cn_api_paddle_tensor_split +split +------------------------------- + +.. py:function:: paddle.tensor.split(x, num_or_sections, axis=0, name=None) + + + +该OP将输入Tensor分割成多个子Tensor。 + +**参数**: + - **x** (Tensor) - 输入变量,数据类型为bool, float16, float32,float64,int32,int64的多维Tensor。 + - **num_or_sections** (int|list|tuple) - 如果 ``num_or_sections`` 是一个整数,则表示Tensor平均划分为相同大小子Tensor的数量。如果 ``num_or_sections`` 是一个list或tuple,那么它的长度代表子Tensor的数量,它的元素可以是整数或者形状为[1]的Tensor,依次代表子Tensor需要分割成的维度的大小。list或tuple的长度不能超过输入Tensor待分割的维度的大小。在list或tuple中,至多有一个元素值为-1,表示该值是由 ``x`` 的维度和其他 ``num_or_sections`` 中元素推断出来的。例如对一个维度为[4,6,6]Tensor的第三维进行分割时,指定 ``num_or_sections=[2,-1,1]`` ,输出的三个Tensor维度分别为:[4,6,2],[4,6,3],[4,6,1]。 + - **axis** (int|Tensor,可选) - 整数或者形状为[1]的Tensor,数据类型为int32或int64。表示需要分割的维度。如果 ``axis < 0`` ,则划分的维度为 ``rank(x) + axis`` 。默认值为0。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:分割后的Tensor列表。 + + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + + paddle.disable_static() + # x is a Tensor which shape is [3, 9, 5] + x_np = np.random.random([3, 9, 5]).astype("int32") + x = paddle.to_tensor(x_np) + + out0, out1, out22 = paddle.split(x, num_or_sections=3, axis=1) + # out0.shape [3, 3, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 3, 5] + + out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, 4], axis=1) + # out0.shape [3, 2, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 4, 5] + + out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, -1], axis=1) + # out0.shape [3, 2, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 4, 5] + + # axis is negative, the real axis is (rank(x) + axis) which real + # value is 1. + out0, out1, out2 = paddle.split(x, num_or_sections=3, axis=-2) + # out0.shape [3, 3, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 3, 5] diff --git a/doc/paddle/api/paddle/dataset/image/random_crop_cn.rst b/doc/paddle/api/paddle/dataset/image/random_crop_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f00e9f9285b31dc6ab899c2081e9bdd10faad39b --- /dev/null +++ b/doc/paddle/api/paddle/dataset/image/random_crop_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_fluid_layers_random_crop: + +random_crop +------------------------------- + +.. py:function:: paddle.fluid.layers.random_crop(x, shape, seed=None) + + + + +该操作对batch中每个实例进行随机裁剪,即每个实例的裁剪位置不同,裁剪位置由均匀分布随机数生成器决定。所有裁剪后的实例都具有相同的维度,由 ``shape`` 参数决定。 + +参数: + - **x(Variable)** - 多维Tensor。 + - **shape(list(int))** - 裁剪后最后几维的形状,注意, ``shape`` 的个数小于 ``x`` 的秩。 + - **seed(int|Variable,可选)** - 设置随机数种子,默认情况下,种子是[-65536,-65536)中一个随机数,如果类型是Variable,要求数据类型是int64,默认值:None。 + +返回: 裁剪后的Tensor。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + img = fluid.data("img", [None, 3, 256, 256]) + # cropped_img的shape: [-1, 3, 224, 224] + cropped_img = fluid.layers.random_crop(img, shape=[3, 224, 224]) + + # cropped_img2的shape: [-1, 2, 224, 224] + # cropped_img2 = fluid.layers.random_crop(img, shape=[2,224, 224]) + + # cropped_img3的shape: [-1, 3, 128, 224] + # cropped_img3 = fluid.layers.random_crop(img, shape=[128, 224]) + + + diff --git a/doc/paddle/api/paddle/dataset/imdb_cn.rst b/doc/paddle/api/paddle/dataset/imdb_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..02101b30d434849752ba230525ad42c9c08fad99 --- /dev/null +++ b/doc/paddle/api/paddle/dataset/imdb_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_paddle_dataset_imdb: + +imdb +------------------------------- + +IMDB数据集。 + +此模块将从 http://ai.stanford.edu/%7Eamaas/data/sentiment/aclImdb_v1.tar.gz 下载数据集。这个数据集包含了25000条训练用电影评论数据,25000条测试用评论数据,且这些评论带有明显情感倾向。此外,该模块还提供了用于构建词典的API。 + + +.. py:function:: paddle.dataset.imdb.build_dict(pattern, cutoff) + +从语料库构建一个单词字典,词典的键是word,值是这些单词从0开始的ID。 + + +.. py:function:: paddle.dataset.imdb.train(word_idx) + +IMDB训练数据集的creator。 + + +它返回一个reader creator, reader中的每个样本的是一个从0开始的ID序列,标签范围是[0,1]。 + + +参数: + - **word_idx** (dict) – 词典 + +返回: 训练数据集的reader creator + +返回类型:callable + + +.. py:function:: paddle.dataset.imdb.test(word_idx) + +IMDB测试数据集的creator。 + +它返回一个reader creator, reader中的每个样本的是一个从0开始的ID序列,标签范围是[0,1]。 + +参数: + - **word_idx** (dict) – 词典 + +返回: 训练数据集的reader creator + +返回类型:callable + + +.. py:function:: paddle.dataset.imdb.convert(path) + +将数据集转换为recordio格式。 + + diff --git a/doc/paddle/api/paddle/dataset/imikolov_cn.rst b/doc/paddle/api/paddle/dataset/imikolov_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3f6eab3f782420ce77e9f5414242516854194b36 --- /dev/null +++ b/doc/paddle/api/paddle/dataset/imikolov_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_paddle_dataset_imikolov: + +imikolov +------------------------------- + +imikolov的简化版数据集。 + +此模块将从 http://www.fit.vutbr.cz/~imikolov/rnnlm/ 下载数据集,并将训练集和测试集解析为paddle reader creator。 + +.. py:function:: paddle.dataset.imikolov.build_dict(min_word_freq=50) + +从语料库构建一个单词字典,字典的键是word,值是这些单词从0开始的ID。 + +.. py:function:: paddle.dataset.imikolov.train(word_idx, n, data_type=1) + +imikolov训练数据集的creator。 + +它返回一个reader creator, reader中的每个样本的是一个单词ID元组。 + +参数: + - **word_idx** (dict) – 词典 + - **n** (int) – 如果类型是ngram,表示滑窗大小;否则表示序列最大长度 + - **data_type** (数据类型的成员变量(NGRAM 或 SEQ)) – 数据类型 (ngram 或 sequence) + +返回: 训练数据集的reader creator + +返回类型:callable + +.. py:function::paddle.dataset.imikolov.test(word_idx, n, data_type=1) + +imikolov测试数据集的creator。 + +它返回一个reader creator, reader中的每个样本的是一个单词ID元组。 + +参数: + - **word_idx** (dict) – 词典 + - **n** (int) – 如果类型是ngram,表示滑窗大小;否则表示序列最大长度 + - **data_type** (数据类型的成员变量(NGRAM 或 SEQ)) – 数据类型 (ngram 或 sequence) + +返回: 测试数据集的reader creator + +返回类型:callable + + +.. py:function:: paddle.dataset.imikolov.convert(path) + +将数据集转换为recordio格式。 + + + diff --git a/doc/paddle/api/paddle/dataset/mnist_cn.rst b/doc/paddle/api/paddle/dataset/mnist_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3e00675dc198972b6b8a2f4c112b1eafa8186dcb --- /dev/null +++ b/doc/paddle/api/paddle/dataset/mnist_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_paddle_dataset_mnist: + +mnist +------------------------------- + +MNIST数据集。 + +此模块将从 http://yann.lecun.com/exdb/mnist/ 下载数据集,并将训练集和测试集解析为paddle reader creator。 + + + +.. py:function:: paddle.dataset.mnist.train() + +MNIST训练数据集的creator。 + +它返回一个reader creator, reader中的每个样本的图像像素范围是[-1,1],标签范围是[0,9]。 + +返回: 训练数据的reader creator + +返回类型:callable + + + +.. py:function:: paddle.dataset.mnist.test() + +MNIST测试数据集的creator。 + +它返回一个reader creator, reader中的每个样本的图像像素范围是[-1,1],标签范围是[0,9]。 + +返回: 测试数据集的reader creator + +返回类型:callable + + + +.. py:function:: paddle.dataset.mnist.convert(path) + +将数据集转换为recordio格式。 + + + diff --git a/doc/paddle/api/paddle/dataset/movielens_cn.rst b/doc/paddle/api/paddle/dataset/movielens_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d2cbd77ac2787ec5f70ecfed92323ca15c0f9d7f --- /dev/null +++ b/doc/paddle/api/paddle/dataset/movielens_cn.rst @@ -0,0 +1,57 @@ +.. _cn_api_paddle_dataset_movielens: + +movielens +------------------------------- + + +Movielens 1-M数据集。 + +Movielens 1-M数据集是由GroupLens Research采集的6000个用户对4000个电影的的100万个评级。 该模块将从 http://files.grouplens.org/datasets/movielens/ml-1m.zip 下载Movielens 1-M数据集,并将训练集和测试集解析为paddle reader creator。 + + +.. py:function:: paddle.dataset.movielens.get_movie_title_dict() + +获取电影标题词典。 + +.. py:function:: paddle.dataset.movielens.max_movie_id() + +获取电影ID的最大值。 + + +.. py:function:: paddle.dataset.movielens.max_user_id() + +获取用户ID的最大值。 + + +.. py:function:: paddle.dataset.movielens.max_job_id() + +获取职业ID的最大值。 + + +.. py:function:: paddle.dataset.movielens.movie_categories() + +获取电影类别词典。 + +.. py:function:: paddle.dataset.movielens.user_info() + +获取用户信息词典。 + +.. py:function:: paddle.dataset.movielens.movie_info() + +获取电影信息词典。 + +.. py:function:: paddle.dataset.movielens.convert(path) + +将数据集转换为recordio格式。 + +.. py:class:: paddle.dataset.movielens.MovieInfo(index, categories, title) + +电影ID,标题和类别信息存储在MovieInfo中。 + + +.. py:class:: paddle.dataset.movielens.UserInfo(index, gender, age, job_id) + +用户ID,性别,年龄和工作信息存储在UserInfo中。 + + + diff --git a/doc/paddle/api/paddle/dataset/sentiment_cn.rst b/doc/paddle/api/paddle/dataset/sentiment_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d5826830f07689805114028f6b387731e4770be2 --- /dev/null +++ b/doc/paddle/api/paddle/dataset/sentiment_cn.rst @@ -0,0 +1,28 @@ +.. _cn_api_paddle_dataset_sentiment: + +sentiment +------------------------------- + +脚本获取并预处理由NLTK提供的movie_reviews数据集。 + + +.. py:function:: paddle.dataset.sentiment.get_word_dict() + +按照样本中出现的单词的频率对单词进行排序。 + +返回: words_freq_sorted + +.. py:function:: paddle.dataset.sentiment.train() + +默认的训练集reader creator。 + +.. py:function:: paddle.dataset.sentiment.test() + +默认的测试集reader creator。 + +.. py:function:: paddle.dataset.sentiment.convert(path) + +将数据集转换为recordio格式。 + + + diff --git a/doc/paddle/api/paddle/dataset/uci_housing_cn.rst b/doc/paddle/api/paddle/dataset/uci_housing_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3014c3c5465b843fdd7cf79e059f588f7f32a563 --- /dev/null +++ b/doc/paddle/api/paddle/dataset/uci_housing_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_paddle_dataset_uci_housing: + +uci_housing +------------------------------- + + + +UCI Housing数据集。 + +该模块将从 https://archive.ics.uci.edu/ml/machine-learning-databases/housing/下载数据集,并将训练集和测试集解析为paddle reader creator。 + + + +.. py:function:: paddle.dataset.uci_housing.train() + +UCI_HOUSING训练集creator。 + +它返回一个reader creator,reader中的每个样本都是正则化和价格编号后的特征。 + +返回:训练集reader creator + +返回类型:callable + + + +.. py:function:: paddle.dataset.uci_housing.test() + + +UCI_HOUSING测试集creator。 + +它返回一个reader creator,reader中的每个样本都是正则化和价格编号后的特征。 + + +返回:测试集reader creator + +返回类型:callable + + + + + + diff --git a/doc/paddle/api/paddle/dataset/wmt14_cn.rst b/doc/paddle/api/paddle/dataset/wmt14_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..163e0d8c6ecaa0fa0a6da7169e23a3c412c1f49d --- /dev/null +++ b/doc/paddle/api/paddle/dataset/wmt14_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_paddle_dataset_wmt14: + +wmt14 +------------------------------- + +WMT14数据集。 原始WMT14数据集太大,所以提供了一组小数据集。 该模块将从 http://paddlepaddle.cdn.bcebos.com/demo/wmt_shrinked_data/wmt14.tgz 下载数据集,并将训练集和测试集解析为paddle reader creator。 + + +.. py:function:: paddle.dataset.wmt14.train(dict_size) + +WMT14训练集creator。 + +它返回一个reader creator,reader中的每个样本都是源语言单词ID序列,目标语言单词ID序列和下一个单词ID序列。 + +返回:训练集reader creator + +返回类型:callable + + + +.. py:function:: paddle.dataset.wmt14.test(dict_size) + + +WMT14测试集creator。 + +它返回一个reader creator,reader中的每个样本都是源语言单词ID序列,目标语言单词ID序列和下一个单词ID序列。 + +返回:测试集reader creator + +返回类型:callable + + + + +.. py:function:: paddle.dataset.wmt14.convert(path) + +将数据集转换为recordio格式。 + + + + + + diff --git a/doc/paddle/api/paddle/dataset/wmt16_cn.rst b/doc/paddle/api/paddle/dataset/wmt16_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..aaca1ee5aefba65b78da1d63e3907922a2d767f2 --- /dev/null +++ b/doc/paddle/api/paddle/dataset/wmt16_cn.rst @@ -0,0 +1,114 @@ +.. _cn_api_paddle_dataset_wmt16: + +wmt16 +------------------------------- + +ACL2016多模式机器翻译。 有关更多详细信息,请访问此网站:http://www.statmt.org/wmt16/multimodal-task.html#task1 + +如果您任务中使用该数据集,请引用以下文章:Multi30K:多语言英语 - 德语图像描述。 + +@article{elliott-EtAl:2016:VL16, author = {{Elliott}, D. and {Frank}, S. and {Sima”an}, K. and {Specia}, L.}, title = {Multi30K: Multilingual English-German Image Descriptions}, booktitle = {Proceedings of the 6th Workshop on Vision and Language}, year = {2016}, pages = {70–74}, year = 2016 +} + +.. py:function:: paddle.dataset.wmt16.train(src_dict_size, trg_dict_size, src_lang='en') + +WMT16训练集reader(读取器)。 + +此功能返回可读取训练数据的reader。 reader返回的每个样本由三个字段组成:源语言单词索引序列,目标语言单词索引序列和下一单词索引序列。 + +注意:训练数据的原始内容如下: http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/training.tar.gz + +paddle.dataset.wmt16使用moses的tokenization脚本提供原始数据集的tokenized版本: https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/tokenizer.perl + +参数: + - **src_dict_size** (int) – 源语言词典的大小。三个特殊标记将被添加到所述词典:为起始标记,为结束标记,为未知单词。 + - **trg_dict_size** (int) – 目标语言字典的大小。三个特殊标记将被添加到所述词典:为起始标记,为结束标记,为未知单词。 + - **src_lang** (string) – 一个字符串,指示哪种语言是源语言。 可用选项包括:英语为“en”,德国为“de”。 + +返回: 读训练集数据的reader + +返回类型: callable + + + +.. py:function:: paddle.dataset.wmt16.test(src_dict_size, trg_dict_size, src_lang='en') + + +WMT16测试(test)集reader。 + +此功能返回可读取测试数据的reader。reader返回的每个样本由三个字段组成:源语言单词索引序列,目标语言单词索引序列和下一单词索引序列。 + +注意:原始测试数据如下: http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/mmt16_task1_test.tar.gz + +paddle.dataset.wmt16使用moses的tokenization脚本提供原始数据集的tokenized版本: https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/tokenizer.perl + + +参数: + - **src_dict_size** (int) – 源语言词典的大小。三个特殊token将被添加到所述词典:为起始标记,为结束标记,为未知单词。 + - **trg_dict_size** (int) – 目标语言字典的大小。三个特殊token将被添加到所述词典:为起始标记,为结束标记,为未知单词。 + - **src_lang** (string) – 一个字符串,指示哪种语言是源语言。 可用选项包括:英语为“en”,德国为“de”。 + + +返回: 读测试集数据的reader + +返回类型: callable + + +.. py:function:: paddle.dataset.wmt16.validation(src_dict_size, trg_dict_size, src_lang='en') + +WMT16验证(validation)集reader。 + +此功能返回可读取验证数据的reader 。reader返回的每个样本由三个字段组成:源语言单词索引序列,目标语言单词索引序列和下一单词索引序列。 + +注意:验证数据的原始内容如下:http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz + +paddle.dataset.wmt16使用moses的tokenization脚本提供原始数据集的tokenized版本:https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/tokenizer.perl + + + +参数: + - **src_dict_size** (int) – 源语言词典的大小。三个特殊token将被添加到所述词典:为起始标记,为结束标记,为未知单词。 + - **trg_dict_size** (int) – 目标语言字典的大小。三个特殊token将被添加到所述词典:为起始标记,为结束标记,为未知单词。 + - **src_lang** (string) – 一个字符串,指示哪种语言是源语言。 可用选项包括:英语为“en”,德国为“de”。 + + +返回: 读集数据的reader + +返回类型: callable + + + + + + + +.. py:function:: paddle.dataset.wmt16.get_dict(lang, dict_size, reverse=False) + + +返回指定语言的词典(word dictionary)。 + + +参数: + - **lang** (string) - 表示哪种语言是源语言的字符串。 可用选项包括:英语为“en”,德国为“de”。 + - **dict_size** (int) - 指定语言字典的大小。 + - **reverse** (bool) - 如果reverse设置为False,则返回的python字典将使用word作为键并使用index作为值。 如果reverse设置为True,则返回的python字典将使用index作为键,将word作为值。 + +返回:特定语言的单词词典。 + +返回类型: dict + + + + +.. py:function:: paddle.dataset.wmt16.fetch() + +下载完整的数据集。 + + +.. py:function:: paddle.dataset.wmt16.convert(path, src_dict_size, trg_dict_size, src_lang) + + +将数据集转换为recordio格式。 + + + diff --git a/doc/paddle/api/paddle/distributed/ParallelEnv_cn.rst b/doc/paddle/api/paddle/distributed/ParallelEnv_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..846001c6e98239282f6a971d82a174d3e32068c5 --- /dev/null +++ b/doc/paddle/api/paddle/distributed/ParallelEnv_cn.rst @@ -0,0 +1,145 @@ +.. _cn_api_fluid_dygraph_ParallelEnv: + +ParallelEnv +------------------------------- + +.. py:class:: paddle.fluid.dygraph.ParallelEnv() + +**注意:** + **这个类的曾用名为 Env, 这个旧的名字会被废弃,请使用新的类名 ParallelEnv。** + +这个类用于获取动态图模型并行执行所需的环境变量值。 + +动态图并行模式现在需要使用 `paddle.distributed.launch` 模块启动,所需的环境变量默认由 `paddle.distributed.launch` 模块自动配置。 + +ParallelEnv通常需要和 `fluid.dygraph.DataParallel` 一起使用,用于配置动态图并行执行。 + +**示例代码:** + .. code-block:: python + + # 这个示例需要由paddle.distributed.launch启动, 用法为: + # python -m paddle.distributed.launch --selected_gpus=0,1 example.py + # 脚本example.py中的代码是下面这个示例. + + import numpy as np + import paddle.fluid as fluid + import paddle.fluid.dygraph as dygraph + from paddle.fluid.optimizer import AdamOptimizer + from paddle.fluid.dygraph.nn import Linear + from paddle.fluid.dygraph.base import to_variable + + place = fluid.CUDAPlace(fluid.dygraph.ParallelEnv().dev_id) + with fluid.dygraph.guard(place=place): + + # 准备数据并行的环境 + strategy=dygraph.prepare_context() + + linear = Linear(1, 10, act="softmax") + adam = fluid.optimizer.AdamOptimizer() + + # 配置模型为并行模型 + linear = dygraph.DataParallel(linear, strategy) + + x_data = np.random.random(size=[10, 1]).astype(np.float32) + data = to_variable(x_data) + + hidden = linear(data) + avg_loss = fluid.layers.mean(hidden) + + # 根据参与训练GPU卡的数量对loss值进行缩放 + avg_loss = linear.scale_loss(avg_loss) + + avg_loss.backward() + + # 收集各个GPU卡上的梯度值 + linear.apply_collective_grads() + + adam.minimize(avg_loss) + linear.clear_gradients() + +属性 +:::::::::::: + +.. py:attribute:: nranks + +参与训练进程的数量,一般也是训练所使用GPU卡的数量。 + +此属性的值等于环境变量 `PADDLE_TRAINERS_NUM` 的值。默认值为1。 + +**示例代码** + .. code-block:: python + + # 在Linux环境,提前执行此命令: export PADDLE_TRAINERS_NUM=4 + import paddle.fluid as fluid + + env = fluid.dygraph.ParallelEnv() + print("The nranks is %d" % env.nranks) + # The nranks is 4 + + +.. py:attribute:: local_rank + +当前训练进程的编号。 + +此属性的值等于环境变量 `PADDLE_TRAINER_ID` 的值。默认值是0。 + +**示例代码** + .. code-block:: python + + # 在Linux环境,提前执行此命令: export PADDLE_TRAINER_ID=0 + import paddle.fluid as fluid + + env = fluid.dygraph.ParallelEnv() + print("The local rank is %d" % env.local_rank) + # The local rank is 0 + + +.. py:attribute:: dev_id + +当前用于并行训练的GPU的编号。 + +此属性的值等于环境变量 `FLAGS_selected_gpus` 的值。默认值是0。 + +**示例代码** + .. code-block:: python + + # 在Linux环境,提前执行此命令: export FLAGS_selected_gpus=1 + import paddle.fluid as fluid + + env = fluid.dygraph.ParallelEnv() + print("The device id are %d" % env.dev_id) + # The device id are 1 + + +.. py:attribute:: current_endpoint + +当前训练进程的终端节点IP与相应端口,形式为(机器节点IP:端口号)。例如:127.0.0.1:6170。 + +此属性的值等于环境变量 `PADDLE_CURRENT_ENDPOINT` 的值。默认值为空字符串""。 + +**示例代码** + .. code-block:: python + + # 在Linux环境,提前执行此命令: export PADDLE_CURRENT_ENDPOINT=127.0.0.1:6170 + import paddle.fluid as fluid + + env = fluid.dygraph.ParallelEnv() + print("The current endpoint are %s" % env.current_endpoint) + # The current endpoint are 127.0.0.1:6170 + + +.. py:attribute:: trainer_endpoints + +当前任务所有参与训练进程的终端节点IP与相应端口,用于在NCCL2初始化的时候建立通信,广播NCCL ID。 + +此属性的值等于环境变量 `PADDLE_TRAINER_ENDPOINTS` 的值。默认值为空字符串""。 + +**示例代码** + .. code-block:: python + + # 在Linux环境,提前执行此命令: export PADDLE_TRAINER_ENDPOINTS=127.0.0.1:6170,127.0.0.1:6171 + import paddle.fluid as fluid + + env = fluid.dygraph.ParallelEnv() + print("The trainer endpoints are %s" % env.trainer_endpoints) + # The trainer endpoints are ['127.0.0.1:6170', '127.0.0.1:6171'] \ No newline at end of file diff --git a/doc/paddle/api/paddle/distributed/all_gather_cn.rst b/doc/paddle/api/paddle/distributed/all_gather_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6cbe19db63cad604c75657c9da83b18951475fdc --- /dev/null +++ b/doc/paddle/api/paddle/distributed/all_gather_cn.rst @@ -0,0 +1,44 @@ +.. _cn_api_distributed_all_gather: + +all_gather +------------------------------- + + +.. py:function:: paddle.distributed.all_gather(tensor_list, tensor, group=0) + +进程组内所有进程的指定tensor进行聚合操作,并返回给所有进程聚合的结果。 + +参数 +::::::::: + - tensor_list (list) - 操作的输出Tensor列表。列表中的每个元素均为Tensor,每个Tensor的数据类型为:float16、float32、float64、int32、int64。 + - tensor (Tensor) - 操作的输入Tensor。Tensor的数据类型为:float16、float32、float64、int32、int64。 + - group (int,可选) - 工作的进程组编号,默认为0。 + +返回 +::::::::: +无 + +代码示例 +::::::::: +.. code-block:: python + + import numpy as np + import paddle + from paddle.distributed import init_parallel_env + + paddle.disable_static() + paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) + init_parallel_env() + tensor_list = [] + if paddle.distributed.ParallelEnv().local_rank == 0: + np_data1 = np.array([[4, 5, 6], [4, 5, 6]]) + np_data2 = np.array([[4, 5, 6], [4, 5, 6]]) + data1 = paddle.to_tensor(np_data1) + data2 = paddle.to_tensor(np_data2) + paddle.distributed.all_gather(tensor_list, data1) + else: + np_data1 = np.array([[1, 2, 3], [1, 2, 3]]) + np_data2 = np.array([[1, 2, 3], [1, 2, 3]]) + data1 = paddle.to_tensor(np_data1) + data2 = paddle.to_tensor(np_data2) + paddle.distributed.all_gather(tensor_list, data2) diff --git a/doc/paddle/api/paddle/distributed/all_reduce_cn.rst b/doc/paddle/api/paddle/distributed/all_reduce_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fc183c32190520eafa94e715dfc1a4822f80e54a --- /dev/null +++ b/doc/paddle/api/paddle/distributed/all_reduce_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_distributed_all_reduce: + +all_reduce +------------------------------- + + +.. py:function:: paddle.distributed.all_reduce(tensor, op=ReduceOp.SUM, group=0) + +进程组内所有进程的指定tensor进行归约操作,并返回给所有进程归约的结果。 + +参数 +::::::::: + - tensor (Tensor) - 操作的输入Tensor,同时也会将归约结果返回至此Tensor中。Tensor的数据类型为:float16、float32、float64、int32、int64。 + - op (ReduceOp.SUM|ReduceOp.MAX|ReduceOp.Min|ReduceOp.PROD,可选) - 归约的具体操作,比如求和,取最大值,取最小值和求乘积,默认为求和归约。 + - group (int,可选) - 工作的进程组编号,默认为0。 + +返回 +::::::::: +无 + +代码示例 +::::::::: +.. code-block:: python + + import numpy as np + import paddle + from paddle.distributed import ReduceOp + from paddle.distributed import init_parallel_env + + paddle.disable_static() + paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) + init_parallel_env() + if paddle.distributed.ParallelEnv().local_rank == 0: + np_data = np.array([[4, 5, 6], [4, 5, 6]]) + else: + np_data = np.array([[1, 2, 3], [1, 2, 3]]) + data = paddle.to_tensor(np_data) + paddle.distributed.all_reduce(data) + out = data.numpy() + # [[5, 7, 9], [5, 7, 9]] diff --git a/doc/paddle/api/paddle/distributed/barrier_cn.rst b/doc/paddle/api/paddle/distributed/barrier_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fb63526cfd0163cec91396144b2e0c3c0f9beace --- /dev/null +++ b/doc/paddle/api/paddle/distributed/barrier_cn.rst @@ -0,0 +1,29 @@ +.. _cn_api_distributed_barrier: + +barrier +------------------------------- + + +.. py:function:: paddle.distributed.barrier(group=0) + +同步进程组内的所有进程。 + +参数 +::::::::: + - group (int,可选) - 工作的进程组编号,默认为0。 + +返回 +::::::::: +无 + +代码示例 +::::::::: +.. code-block:: python + + import paddle + from paddle.distributed import init_parallel_env + + paddle.disable_static() + paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) + init_parallel_env() + paddle.distributed.barrier() diff --git a/doc/paddle/api/paddle/distributed/broadcast_cn.rst b/doc/paddle/api/paddle/distributed/broadcast_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..33e653b56648ff6f97b64b19fffcf64bca4a989c --- /dev/null +++ b/doc/paddle/api/paddle/distributed/broadcast_cn.rst @@ -0,0 +1,39 @@ +.. _cn_api_distributed_broadcast: + +broadcast +------------------------------- + + +.. py:function:: paddle.distributed.broadcast(tensor, src, group=0) + +广播一个Tensor给其他所有进程 + +参数 +::::::::: + - tensor (Tensor) - 如果当前进程编号是源,那么这个Tensor变量将被发送给其他进程,否则这个Tensor将接收源发送过来的数据。Tensor的数据类型为:float16、float32、float64、int32、int64。 + - src (int) - 发送源的进程编号。 + - group (int,可选) - 工作的进程组编号,默认为0。 + +返回 +::::::::: +无 + +代码示例 +::::::::: +.. code-block:: python + + import numpy as np + import paddle + from paddle.distributed import init_parallel_env + + paddle.disable_static() + paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) + init_parallel_env() + if paddle.distributed.ParallelEnv().local_rank == 0: + np_data = np.array([[4, 5, 6], [4, 5, 6]]) + else: + np_data = np.array([[1, 2, 3], [1, 2, 3]]) + data = paddle.to_tensor(np_data) + paddle.distributed.broadcast(data, 1) + out = data.numpy() + # [[1, 2, 3], [1, 2, 3]] diff --git a/doc/paddle/api/paddle/distributed/fleet/DatasetFactory_cn.rst b/doc/paddle/api/paddle/distributed/fleet/DatasetFactory_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..901d32c2069c8905031d8f3d9b6abdc89730876a --- /dev/null +++ b/doc/paddle/api/paddle/distributed/fleet/DatasetFactory_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_fluid_dataset_DatasetFactory: + +DatasetFactory +------------------------------- + +.. py:class:: paddle.fluid.dataset.DatasetFactory + + + + +DatasetFactory是一个按数据集名称创建数据集的 "工厂",可以创建“QueueDataset”,“InMemoryDataset”或“FileInstantDataset”,默认为“QueueDataset”。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + +.. py:method:: create_dataset(datafeed_class='QueueDataset') + +创建“QueueDataset”,“InMemoryDataset” 或 “FileInstantDataset”,默认为“QueueDataset”。 + + +参数: + - **datafeed_class** (str) – datafeed类名,为QueueDataset或InMemoryDataset。默认为QueueDataset。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + + + diff --git a/doc/paddle/api/paddle/distributed/fleet/DistributedStrategy_cn.rst b/doc/paddle/api/paddle/distributed/fleet/DistributedStrategy_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..94c1ef0284eefbe3671e0fba49186e4359abaf20 --- /dev/null +++ b/doc/paddle/api/paddle/distributed/fleet/DistributedStrategy_cn.rst @@ -0,0 +1,190 @@ +.. _cn_api_distributed_fleet_DistributedStrategy: + +DistributedStrategy +------------------------------- + +.. py:class:: paddle.distributed.fleet.DistributedStrategy + + +属性 +:::::::::::: + +.. py:attribute:: recompute + +是否启用Recompute来优化内存空间,默认值:False + +**示例代码** + +.. code-block:: python + + import paddle.distributed.fleet as fleet + strategy = fleet.DistributedStrategy() + strategy.recompute = True + # suppose x and y are names of checkpoint tensors for recomputation + strategy.recompute_configs = {"checkpoints": ["x", "y"]} + + +.. py:attribute:: recompute_configs + +设置Recompute策略的配置。目前来讲,用户使用Recompute策略时,必须配置 checkpoints 参数。 + +**checkpoints(int):** Recompute策略的检查点,默认为空列表,也即不启用Recompute。 + +.. py:attribute:: pipeline + +是否启用Pipeline并行。目前,主要实现单机多GPU间的Pipeline并行和多机间的数据并行。Pipeline信息由用户定义程序中的device_guard确定。 + +**示例代码** + +.. code-block:: python + + import paddle.distributed.fleet as fleet + strategy = fleet.DistributedStrategy() + strategy.pipeline = True + + +.. py:attribute:: pipeline_configs + +设置Pipeline策略的配置。Pipeline策略下,神经网络的不同层在不同的GPU设备。相邻的GPU设备间有用于同步隐层Tensor的队列。Pipeline并行包含多种生产者-消费者形式的硬件对,如GPU-CPU、CPU-GPU、GPU-XPU。加速PIpeline并行的最佳方式是减少Tensor队列中的Tensor大小,这样生产者可以更快的为下游消费者提供数据。 + +**micro_batch (int):** 每个用户定义的mini-batch中包含的更小的micro-batch的数量。 + +**示例代码** + +.. code-block:: python + + import paddle.distributed.fleet as fleet + strategy = fleet.DistributedStrategy() + strategy.pipeline = True + strategy.pipeline_configs = {"micro_batch": 12} + + +.. py:attribute:: gradient_merge + +梯度累加,是一种大Batch训练的策略。添加这一策略后,模型的参数每过 **k_steps** 步更新一次, +**k_steps** 是用户定义的步数。在不更新参数的步数里,Paddle只进行前向、反向网络的计算; +在更新参数的步数里,Paddle执行优化网络,通过特定的优化器(比如SGD、Adam), +将累加的梯度应用到模型参数上。 + +**示例代码** + +.. code-block:: python + + import paddle.distributed.fleet as fleet + strategy = fleet.DistributedStrategy() + strategy.gradient_merge = True + strategy.gradient_merge_configs = {"k_steps": 4, "avg": True} + +.. py:attribute:: gradient_merge_configs + +设置 **distribute_strategy** 策略的配置。 + +**k_steps(int):** 参数更新的周期,默认为1 + +**avg(bool):** 梯度的融合方式,有两种选择: + +- **sum**: 梯度求和 +- **avg**: 梯度求平均 + +.. py:attribute:: lars +是否使用LARS optimizer,默认值:False + +**示例代码** + +.. code-block:: python + import paddle.distributed.fleet as fleet + strategy = fleet.DistributedStrategy() + strategy.lars = True + strategy.lars_configs = { + "lars_coeff": 0.001, + "lars_weight_decay": 0.0005, + "epsilon": 0, + "exclude_from_weight_decay": ["batch_norm", ".b"], + } +.. py:attribute:: lars_configs +设置LARS优化器的参数。用户可以配置 lars_coeff,lars_weight_decay,epsilon,exclude_from_weight_decay 参数。 + +**lars_coeff(float):** lars 系数,[原论文](https://arxiv.org/abs/1708.03888) 中的 trust coefficient。 默认值是 0.001. + +**lars_weight_decay(float):** lars 公式中 weight decay 系数。 默认值是 0.0005. + +**exclude_from_weight_decay(list[str]):** 不应用 weight decay 的 layers 的名字列表,某一layer 的name 如果在列表中,这一layer 的 lars_weight_decay将被置为 0. 默认值是 None. + +**epsilon(float):** 一个小的浮点值,目的是维持数值稳定性,避免 lars 公式中的分母为零。 默认值是 0. + +.. py:attribute:: lamb +是否使用LAMB optimizer,默认值:False + +**示例代码** + +.. code-block:: python + import paddle.distributed.fleet as fleet + strategy = fleet.DistributedStrategy() + strategy.lamb = True + strategy.lamb_configs = { + 'lamb_weight_decay': 0.01, + 'exclude_from_weight_decay': [], + } +.. py:attribute:: lamb_configs +设置LAMB优化器的参数。用户可以配置 lamb_weight_decay,exclude_from_weight_decay 参数。 + +**lamb_weight_decay(float):** lars 公式中 weight decay 系数。 默认值是 0.01. + +**exclude_from_weight_decay(list[str]):** 不应用 weight decay 的 layers 的名字列表,某一layer 的name 如果在列表中,这一layer 的 lamb_weight_decay将被置为 0. 默认值是 None. + +.. py:attribute:: localsgd +是否使用LocalSGD optimizer,默认值:False。更多的细节请参考[Don't Use Large Mini-Batches, Use Local SGD](https://arxiv.org/pdf/1808.07217.pdf) + +**示例代码** + +.. code-block:: python + + import paddle.distributed.fleet as fleet + strategy = fleet.DistributedStrategy() + strategy.localsgd = True # by default this is false + + +.. py:attribute:: localsgd_configs +设置LocalSGD优化器的参数。用户可以配置k_steps和begin_step参数。 + +**示例代码** + +.. code-block:: python + + import paddle.distributed.fleet as fleet + strategy = fleet.DistributedStrategy() + strategy.localsgd = True + strategy.localsgd_configs = {"k_steps": 4, + "begin_step": 30} + +**k_steps(int):** 训练过程中的全局参数更新间隔,默认值1。 + +**begin_step(int):** 指定从第几个step之后进行local SGD算法,默认值1。 + +.. py:attribute:: adaptive_localsgd +是否使用AdaptiveLocalSGD optimizer,默认值:False。更多的细节请参考[Adaptive Communication Strategies to Achieve the Best Error-Runtime Trade-off in Local-Update SGD](https://arxiv.org/pdf/1810.08313.pdf) + +**示例代码** + +.. code-block:: python + + import paddle.distributed.fleet as fleet + strategy = fleet.DistributedStrategy() + strategy.adaptive_localsgd = True # by default this is false + +.. py:attribute:: adaptive_localsgd_configs +设置AdaptiveLocalSGD优化器的参数。用户可以配置init_k_steps和begin_step参数。 + +**示例代码** + +.. code-block:: python + + import paddle.distributed.fleet as fleet + strategy = fleet.DistributedStrategy() + strategy.adaptive_localsgd = True + strategy.adaptive_localsgd_configs = {"init_k_steps": 1, + "begin_step": 30} + +**init_k_steps(int):** 自适应localsgd的初始训练步长。训练后,自适应localsgd方法将自动调整步长。 默认值1。 + +**begin_step(int):** 指定从第几个step之后进行Adaptive LocalSGD算法,默认值1。 diff --git a/doc/paddle/api/paddle/distributed/fleet/Fleet_cn.rst b/doc/paddle/api/paddle/distributed/fleet/Fleet_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..384ba3c245db9362a03f012b25cb6e19226f23d6 --- /dev/null +++ b/doc/paddle/api/paddle/distributed/fleet/Fleet_cn.rst @@ -0,0 +1,435 @@ +.. _cn_api_distributed_fleet_Fleet: + +Fleet +------------------------------- + + +.. py:class:: paddle.distributed.fleet.Fleet + + + + +.. py:method:: init(role_maker=None, is_collective=False) + + +.. py:method:: is_first_worker() + + +.. py:method:: worker_index() + + +.. py:method:: worker_num() + + +.. py:method:: is_worker() + + +.. py:method:: worker_endpoints(to_string=False) + + +.. py:method:: server_num() + + +.. py:method:: server_index() + + +.. py:method:: server_endpoints(to_string=False) + + +.. py:method:: is_server() + + +.. py:method:: barrier_worker() + + +.. py:method:: init_worker() + + +.. py:method:: init_server(*args, **kwargs) + + +.. py:method:: run_server() + + +.. py:method:: stop_worker() + + +.. py:method:: save_inference_model(executor, dirname, feeded_var_names, target_vars, main_program=None, export_for_deployment=True) + + +.. py:method:: save_persistables(executor, dirname, main_program=None) + + +.. py:method:: distributed_optimizer(optimizer, strategy=None) + + +.. py:method:: distributed_model(model) + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +返回分布式数据并行模型。 + +参数: + model (Layer) - 用户定义的模型,此处模型是指继承动态图Layer的网络。 + +返回:分布式数据并行模型,该模型同样继承动态图Layer。 + + +**代码示例** + +.. code-block:: python + + + # 这个示例需要由fleetrun启动, 用法为: + # fleetrun --gpus=0,1 example.py + # 脚本example.py中的代码是下面这个示例. + + import paddle + import paddle.nn as nn + from paddle.distributed import fleet + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear1 = nn.Linear(10, 10) + self._linear2 = nn.Linear(10, 1) + + def forward(self, x): + return self._linear2(self._linear1(x)) + + # 1. enable dynamic mode + paddle.disable_static() + + # 2. initialize fleet environment + fleet.init(is_collective=True) + + # 3. create layer & optimizer + layer = LinearNet() + loss_fn = nn.MSELoss() + adam = paddle.optimizer.Adam( + learning_rate=0.001, parameters=layer.parameters()) + + # 4. get data_parallel model using fleet + adam = fleet.distributed_optimizer(adam) + dp_layer = fleet.distributed_model(layer) + + # 5. run layer + inputs = paddle.randn([10, 10], 'float32') + outputs = dp_layer(inputs) + labels = paddle.randn([10, 1], 'float32') + loss = loss_fn(outputs, labels) + + print("loss:", loss.numpy()) + + loss = dp_layer.scale_loss(loss) + loss.backward() + dp_layer.apply_collective_grads() + + adam.step() + adam.clear_grad() + +.. py:method:: state_dict() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +以 ``dict`` 返回当前 ``optimizer`` 使用的所有Tensor 。比如对于Adam优化器,将返回 beta1, beta2, momentum 等Tensor。 + +返回:dict, 当前 ``optimizer`` 使用的所有Tensor。 + + +**代码示例** + +.. code-block:: python + + # 这个示例需要由fleetrun启动, 用法为: + # fleetrun --gpus=0,1 example.py + # 脚本example.py中的代码是下面这个示例. + + import numpy as np + import paddle + from paddle.distributed import fleet + + paddle.disable_static() + fleet.init(is_collective=True) + + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.fluid.dygraph.to_variable(value) + + layer = paddle.nn.Linear(13, 5) + adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters()) + + adam = fleet.distributed_optimizer(adam) + dp_layer = fleet.distributed_model(layer) + state_dict = adam.state_dict() + + +.. py:method:: set_state_dict(state_dict) + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +加载 ``optimizer`` 的Tensor字典给当前 ``optimizer`` 。 + +返回:None + + +**代码示例** + +.. code-block:: python + + # 这个示例需要由fleetrun启动, 用法为: + # fleetrun --gpus=0,1 example.py + # 脚本example.py中的代码是下面这个示例. + + import numpy as np + import paddle + from paddle.distributed import fleet + + paddle.disable_static() + fleet.init(is_collective=True) + + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.fluid.dygraph.to_variable(value) + + layer = paddle.nn.Linear(13, 5) + adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters()) + + adam = fleet.distributed_optimizer(adam) + dp_layer = fleet.distributed_model(layer) + state_dict = adam.state_dict() + paddle.framework.save(state_dict, "paddle_dy") + para_state_dict, opti_state_dict = paddle.framework.load( "paddle_dy") + adam.set_state_dict(opti_state_dict) + + +.. py:method:: set_lr(value) + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。 + +参数: + value (float) - 需要设置的学习率的值。 + +返回:None + + +**代码示例** + +.. code-block:: python + + # 这个示例需要由fleetrun启动, 用法为: + # fleetrun --gpus=0,1 example.py + # 脚本example.py中的代码是下面这个示例. + + import numpy as np + import paddle + from paddle.distributed import fleet + + paddle.disable_static() + fleet.init(is_collective=True) + + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.fluid.dygraph.to_variable(value) + + layer = paddle.nn.Linear(13, 5) + adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters()) + + adam = fleet.distributed_optimizer(adam) + dp_layer = fleet.distributed_model(layer) + + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + lr = adam.get_lr() + print("current lr is {}".format(lr)) + # Print: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + + +.. py:method:: get_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。 + +返回:float,当前步骤的学习率。 + + + +**代码示例** + +.. code-block:: python + + # 这个示例需要由fleetrun启动, 用法为: + # fleetrun --gpus=0,1 example.py + # 脚本example.py中的代码是下面这个示例. + + import numpy as np + import paddle + from paddle.distributed import fleet + + paddle.disable_static() + fleet.init(is_collective=True) + + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.fluid.dygraph.to_variable(value) + + layer = paddle.nn.Linear(13, 5) + adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters()) + + adam = fleet.distributed_optimizer(adam) + dp_layer = fleet.distributed_model(layer) + + lr = adam.get_lr() + print(lr) # 0.01 + + +.. py:method:: step() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +执行一次优化器并进行参数更新。 + +返回:None。 + + +**代码示例** + +.. code-block:: python + + # 这个示例需要由fleetrun启动, 用法为: + # fleetrun --gpus=0,1 example.py + # 脚本example.py中的代码是下面这个示例. + + import paddle + import paddle.nn as nn + from paddle.distributed import fleet + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear1 = nn.Linear(10, 10) + self._linear2 = nn.Linear(10, 1) + + def forward(self, x): + return self._linear2(self._linear1(x)) + + # 1. enable dynamic mode + paddle.disable_static() + + # 2. initialize fleet environment + fleet.init(is_collective=True) + + # 3. create layer & optimizer + layer = LinearNet() + loss_fn = nn.MSELoss() + adam = paddle.optimizer.Adam( + learning_rate=0.001, parameters=layer.parameters()) + + # 4. get data_parallel model using fleet + adam = fleet.distributed_optimizer(adam) + dp_layer = fleet.distributed_model(layer) + + # 5. run layer + inputs = paddle.randn([10, 10], 'float32') + outputs = dp_layer(inputs) + labels = paddle.randn([10, 1], 'float32') + loss = loss_fn(outputs, labels) + + print("loss:", loss.numpy()) + + loss = dp_layer.scale_loss(loss) + loss.backward() + dp_layer.apply_collective_grads() + + adam.step() + adam.clear_grad() + + +.. py:method:: clear_grad() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +清除需要优化的参数的梯度。 + +返回:None。 + + +**代码示例** + +.. code-block:: python + + # 这个示例需要由fleetrun启动, 用法为: + # fleetrun --gpus=0,1 example.py + # 脚本example.py中的代码是下面这个示例. + + import paddle + import paddle.nn as nn + from paddle.distributed import fleet + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear1 = nn.Linear(10, 10) + self._linear2 = nn.Linear(10, 1) + + def forward(self, x): + return self._linear2(self._linear1(x)) + + # 1. enable dynamic mode + paddle.disable_static() + + # 2. initialize fleet environment + fleet.init(is_collective=True) + + # 3. create layer & optimizer + layer = LinearNet() + loss_fn = nn.MSELoss() + adam = paddle.optimizer.Adam( + learning_rate=0.001, parameters=layer.parameters()) + + # 4. get data_parallel model using fleet + adam = fleet.distributed_optimizer(adam) + dp_layer = fleet.distributed_model(layer) + + # 5. run layer + inputs = paddle.randn([10, 10], 'float32') + outputs = dp_layer(inputs) + labels = paddle.randn([10, 1], 'float32') + loss = loss_fn(outputs, labels) + + print("loss:", loss.numpy()) + + loss = dp_layer.scale_loss(loss) + loss.backward() + dp_layer.apply_collective_grads() + + adam.step() + adam.clear_grad() + + +.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None) + + +.. py:attribute:: util + + diff --git a/doc/paddle/api/paddle/distributed/fleet/PaddleCloudRoleMaker_cn.rst b/doc/paddle/api/paddle/distributed/fleet/PaddleCloudRoleMaker_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b2c01b050b419377771e1fd3f4a6e0526745311d --- /dev/null +++ b/doc/paddle/api/paddle/distributed/fleet/PaddleCloudRoleMaker_cn.rst @@ -0,0 +1,10 @@ +.. _cn_api_distributed_fleet_PaddleCloudRoleMaker: + +PaddleCloudRoleMaker +------------------------------- + +.. py:class:: paddle.distributed.fleet.PaddleCloudRoleMaker + + + + diff --git a/doc/paddle/api/paddle/distributed/fleet/UserDefinedRoleMaker_cn.rst b/doc/paddle/api/paddle/distributed/fleet/UserDefinedRoleMaker_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..aee570bc51c9a41cf2b3ff6d47b6cd582913871d --- /dev/null +++ b/doc/paddle/api/paddle/distributed/fleet/UserDefinedRoleMaker_cn.rst @@ -0,0 +1,10 @@ +.. _cn_api_distributed_fleet_UserDefinedRoleMaker: + +UserDefinedRoleMaker +------------------------------- + +.. py:class:: paddle.distributed.fleet.UserDefinedRoleMaker + + + + diff --git a/doc/paddle/api/paddle/distributed/fleet/UtilBase_cn.rst b/doc/paddle/api/paddle/distributed/fleet/UtilBase_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bb873da34f2b91f34472ac31fca3440d8951bbb1 --- /dev/null +++ b/doc/paddle/api/paddle/distributed/fleet/UtilBase_cn.rst @@ -0,0 +1,10 @@ +.. _cn_api_distributed_fleet_UtilBase: + +UtilBase +------------------------------- + +.. py:class:: paddle.distributed.fleet.UtilBase + + + + diff --git a/doc/paddle/api/paddle/distributed/fleet/utils/fs/ExecuteError_cn.rst b/doc/paddle/api/paddle/distributed/fleet/utils/fs/ExecuteError_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..98359d848c42f7855cc2d086187bdfaf5921a23f --- /dev/null +++ b/doc/paddle/api/paddle/distributed/fleet/utils/fs/ExecuteError_cn.rst @@ -0,0 +1,10 @@ +.. _cn_api_distributed_fleet_utils_fs_ExecuteError: + +ExecuteError +------------------------------- + +.. py:class:: paddle.distributed.fleet.utils.fs.ExecuteError + + + + diff --git a/doc/paddle/api/paddle/distributed/fleet/utils/fs/FSFileExistsError_cn.rst b/doc/paddle/api/paddle/distributed/fleet/utils/fs/FSFileExistsError_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5081d1ca7d10c319b555db46792455fb65443f0a --- /dev/null +++ b/doc/paddle/api/paddle/distributed/fleet/utils/fs/FSFileExistsError_cn.rst @@ -0,0 +1,10 @@ +.. _cn_api_distributed_fleet_utils_fs_FSFileExistsError: + +FSFileExistsError +------------------------------- + +.. py:class:: paddle.distributed.fleet.utils.fs.FSFileExistsError + + + + diff --git a/doc/paddle/api/paddle/distributed/fleet/utils/fs/FSFileNotExistsError_cn.rst b/doc/paddle/api/paddle/distributed/fleet/utils/fs/FSFileNotExistsError_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1bebd7f4487c6e9900b8e90e4c835d166b236724 --- /dev/null +++ b/doc/paddle/api/paddle/distributed/fleet/utils/fs/FSFileNotExistsError_cn.rst @@ -0,0 +1,10 @@ +.. _cn_api_distributed_fleet_utils_fs_FSFileNotExistsError: + +FSFileNotExistsError +------------------------------- + +.. py:class:: paddle.distributed.fleet.utils.fs.FSFileNotExistsError + + + + diff --git a/doc/paddle/api/paddle/distributed/fleet/utils/fs/FSShellCmdAborted_cn.rst b/doc/paddle/api/paddle/distributed/fleet/utils/fs/FSShellCmdAborted_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bf48d0627f84eb83627f7539bac303ccdc3a0eb3 --- /dev/null +++ b/doc/paddle/api/paddle/distributed/fleet/utils/fs/FSShellCmdAborted_cn.rst @@ -0,0 +1,10 @@ +.. _cn_api_distributed_fleet_utils_fs_FSShellCmdAborted: + +FSShellCmdAborted +------------------------------- + +.. py:class:: paddle.distributed.fleet.utils.fs.FSShellCmdAborted + + + + diff --git a/doc/paddle/api/paddle/distributed/fleet/utils/fs/FSTimeOut_cn.rst b/doc/paddle/api/paddle/distributed/fleet/utils/fs/FSTimeOut_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5de353c06458546b0806df262020b124c8f921d1 --- /dev/null +++ b/doc/paddle/api/paddle/distributed/fleet/utils/fs/FSTimeOut_cn.rst @@ -0,0 +1,10 @@ +.. _cn_api_distributed_fleet_utils_fs_FSTimeOut: + +FSTimeOut +------------------------------- + +.. py:class:: paddle.distributed.fleet.utils.fs.FSTimeOut + + + + diff --git a/doc/paddle/api/paddle/distributed/fleet/utils/fs/FS_cn.rst b/doc/paddle/api/paddle/distributed/fleet/utils/fs/FS_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c290580cef57e9e6d364abd3f4b038d61259d482 --- /dev/null +++ b/doc/paddle/api/paddle/distributed/fleet/utils/fs/FS_cn.rst @@ -0,0 +1,10 @@ +.. _cn_api_distributed_fleet_utils_fs_FS: + +FS +------------------------------- + +.. py:class:: paddle.distributed.fleet.utils.fs.FS + + + + diff --git a/doc/paddle/api/paddle/distributed/fleet/utils/fs/HDFSClient_cn.rst b/doc/paddle/api/paddle/distributed/fleet/utils/fs/HDFSClient_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..21fae916cd1360e89ebe94ffc9d1bb53bc79c31b --- /dev/null +++ b/doc/paddle/api/paddle/distributed/fleet/utils/fs/HDFSClient_cn.rst @@ -0,0 +1,10 @@ +.. _cn_api_distributed_fleet_utils_fs_HDFSClient: + +HDFSClient +------------------------------- + +.. py:class:: paddle.distributed.fleet.utils.fs.HDFSClient + + + + diff --git a/doc/paddle/api/paddle/distributed/fleet/utils/fs/LocalFS_cn.rst b/doc/paddle/api/paddle/distributed/fleet/utils/fs/LocalFS_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5169d52c9263a1841c75de1f300d0507b102385c --- /dev/null +++ b/doc/paddle/api/paddle/distributed/fleet/utils/fs/LocalFS_cn.rst @@ -0,0 +1,10 @@ +.. _cn_api_distributed_fleet_utils_fs_LocalFS: + +LocalFS +------------------------------- + +.. py:class:: paddle.distributed.fleet.utils.fs.LocalFS + + + + diff --git a/doc/paddle/api/paddle/distributed/get_rank_cn.rst b/doc/paddle/api/paddle/distributed/get_rank_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..75ad8fc79baa6a560fe956799c1a00bc9d67376d --- /dev/null +++ b/doc/paddle/api/paddle/distributed/get_rank_cn.rst @@ -0,0 +1,25 @@ +.. _cn_api_distributed_get_rank: + +get_rank +---------- + +.. py:function:: paddle.distributed.get_rank() + +返回当前进程的rank。 + +当前进程rank的值等于环境变量 ``PADDLE_TRAINER_ID`` 的值,默认值为0。 + +返回 +::::::::: +(int) 当前进程的rank。 + +代码示例 +::::::::: +.. code-block:: python + + import paddle + import paddle.distributed as dist + + # execute this command in terminal: export PADDLE_TRAINER_ID=0 + print("The rank is %d" % dist.get_rank()) + # The rank is 0 diff --git a/doc/paddle/api/paddle/distributed/get_world_size_cn.rst b/doc/paddle/api/paddle/distributed/get_world_size_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..08342de3c1f44b96762eefb1d9ae96918112d9dd --- /dev/null +++ b/doc/paddle/api/paddle/distributed/get_world_size_cn.rst @@ -0,0 +1,25 @@ +.. _cn_api_distributed_get_world_size: + +get_world_size +---------------- + +.. py:function:: paddle.distributed.get_world_size() + +返回参与当前任务的进程数。 + +当前进程数等于环境变量 ``PADDLE_TRAINERS_NUM`` 的值,默认值为1。 + +返回 +::::::::: +(int) 参与任务的进程数。 + +代码示例 +::::::::: +.. code-block:: python + + import paddle + import paddle.distributed as dist + + # execute this command in terminal: export PADDLE_TRAINERS_NUM=4 + print("The world_size is %d" % dist.get_world_size()) + # The world_size is 4 diff --git a/doc/paddle/api/paddle/distributed/init_parallel_env_cn.rst b/doc/paddle/api/paddle/distributed/init_parallel_env_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..eafe9f10f0548931828798b28896ef59f211eefe --- /dev/null +++ b/doc/paddle/api/paddle/distributed/init_parallel_env_cn.rst @@ -0,0 +1,64 @@ +.. _cn_api_distributed_init_parallel_env: + +init_parallel_env +----------------- + +.. py:function:: paddle.distributed.init_parallel_env() + +初始化动态图模式下的并行训练环境。 + +.. note:: + 目前仅支持初始化GPU训练环境,使用NCCL进行通信。 + +返回 +::::::::: +无 + +代码示例 +::::::::: +.. code-block:: python + + import paddle + import paddle.nn as nn + import paddle.optimizer as opt + import paddle.distributed as dist + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear1 = nn.Linear(10, 10) + self._linear2 = nn.Linear(10, 1) + + def forward(self, x): + return self._linear2(self._linear1(x)) + + def train(): + # 1. enable dynamic mode + paddle.disable_static() + + # 2. initialize parallel environment + dist.init_parallel_env() + + # 3. create data parallel layer & optimizer + layer = LinearNet() + dp_layer = paddle.DataParallel(layer) + + loss_fn = nn.MSELoss() + adam = opt.Adam( + learning_rate=0.001, parameters=dp_layer.parameters()) + + # 4. run layer + inputs = paddle.randn([10, 10], 'float32') + outputs = dp_layer(inputs) + labels = paddle.randn([10, 1], 'float32') + loss = loss_fn(outputs, labels) + + loss = dp_layer.scale_loss(loss) + loss.backward() + dp_layer.apply_collective_grads() + + adam.step() + adam.clear_grad() + + if __name__ == '__main__': + dist.spawn(train) diff --git a/doc/paddle/api/paddle/distributed/prepare_context_cn.rst b/doc/paddle/api/paddle/distributed/prepare_context_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..53be1b628b001d52e853a83f4aa8c795ebba8696 --- /dev/null +++ b/doc/paddle/api/paddle/distributed/prepare_context_cn.rst @@ -0,0 +1,36 @@ +.. _api_fluid_dygraph_prepare_context: + +prepare_context +--------------- + +.. py:class:: paddle.fluid.dygraph.prepare_context(strategy=None) + + + + +该API是进行多进程多卡训练的环境配置接口,接受一个ParallelStrategy结构体变量作为输入。当strategy属性中的nums_trainer小于2时,API会直接返回,当nums_trainer大于1且为CUDAPlace时,由于目前动态图模式仅支持GPU多卡训练,仅能配置NCCL多卡训练的环境,所以此时会对NCCL环境进行配置,具体内容包括:生成NCCL ID,并广播至参与训练的各进程,用于支持的处理器同步操作,创建并配置NCCL通信器等。 + +参数: + - **strategy** (ParallelStrategy, 可选) – 该参数是配置储存多进程多卡训练配置信息的结构体变量,其具体成员包括:trainer节点的个数,当前trainer节点的ID,所有trainer节点的endpoint,当前节点的endpoint。当输入为None时,会调用PallelStrategy构造函数初始化strategy,此时,strategy的属性值为PallelStrategy结构体的默认值,接着strategy的属性会被环境变量中的对应值覆盖。默认值为None。 + +返回:一个属性配置后的ParallelStrategy结构体变量。 + +返回类型:实例(ParallelStrategy) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid.dygraph as dygraph + import paddle.fluid as fluid + with fluid.dygraph.guard(): + strategy=dygraph.parallel.prepare_context() + emb = fluid.dygraph.Embedding([10, 10]) + emb = dygraph.parallel.DataParallel(emb, strategy) + + state_dict = emb.state_dict() + fluid.save_dygraph( state_dict, "paddle_dy") + + para_state_dict, _ = fluid.load_dygraph( "paddle_dy") + + emb.set_dict( para_state_dict ) diff --git a/doc/paddle/api/paddle/distributed/reduce_cn.rst b/doc/paddle/api/paddle/distributed/reduce_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6f471a67a8a0a17617f18208ca9b596ce8182f5e --- /dev/null +++ b/doc/paddle/api/paddle/distributed/reduce_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_distributed_reduce: + +reduce +------------------------------- + + +.. py:function:: paddle.distributed.reduce(tensor, dst, op=ReduceOp.SUM, group=0) + +进程组内所有进程的指定tensor进行归约操作,并返回给所有进程归约的结果。 + +参数 +::::::::: + - tensor (Tensor) - 操作的输入Tensor,结果返回至目标进程号的Tensor中。Tensor的数据类型为:float16、float32、float64、int32、int64。 + - dst (int) - 返回操作结果的目标进程编号。 + - op (ReduceOp.SUM|ReduceOp.MAX|ReduceOp.Min|ReduceOp.PROD,可选) - 归约的具体操作,比如求和,取最大值,取最小值和求乘积,默认为求和归约。 + - group (int,可选) - 工作的进程组编号,默认为0。 + +返回 +::::::::: +无 + +代码示例 +::::::::: +.. code-block:: python + + import numpy as np + import paddle + from paddle.distributed import init_parallel_env + + paddle.disable_static() + paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) + init_parallel_env() + if paddle.distributed.ParallelEnv().local_rank == 0: + np_data = np.array([[4, 5, 6], [4, 5, 6]]) + else: + np_data = np.array([[1, 2, 3], [1, 2, 3]]) + data = paddle.to_tensor(np_data) + paddle.distributed.reduce(data, 0) + out = data.numpy() + # [[5, 7, 9], [5, 7, 9]] diff --git a/doc/paddle/api/paddle/distributed/scatter_cn.rst b/doc/paddle/api/paddle/distributed/scatter_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..43a9aad6d7d9f4d06a6cd3f544ed13af6428c1bd --- /dev/null +++ b/doc/paddle/api/paddle/distributed/scatter_cn.rst @@ -0,0 +1,78 @@ +.. _cn_api_paddle_cn_scatter: + +scatter +------------------------------- +.. py:function:: paddle.scatter(x, index, updates, overwrite=True, name=None) + + +通过基于 ``updates`` 来更新选定索引 ``index`` 上的输入来获得输出。具体行为如下: + + .. code-block:: python + + import numpy as np + #input: + x = np.array([[1, 1], [2, 2], [3, 3]]) + index = np.array([2, 1, 0, 1]) + # shape of updates should be the same as x + # shape of updates with dim > 1 should be the same as input + updates = np.array([[1, 1], [2, 2], [3, 3], [4, 4]]) + overwrite = False + # calculation: + if not overwrite: + for i in range(len(index)): + x[index[i]] = np.zeros((2)) + for i in range(len(index)): + if (overwrite): + x[index[i]] = updates[i] + else: + x[index[i]] += updates[i] + # output: + out = np.array([[3, 3], [6, 6], [1, 1]]) + out.shape # [3, 2] + +**Notice:** +因为 ``updates`` 的应用顺序是不确定的,因此,如果索引 ``index`` 包含重复项,则输出将具有不确定性。 + + +参数: + - **x** (Tensor) - ndim> = 1的输入N-D张量。 数据类型可以是float32,float64。 + - **index** (Tensor)- 一维Tensor。 数据类型可以是int32,int64。 ``index`` 的长度不能超过 ``updates`` 的长度,并且 ``index`` 中的值不能超过输入的长度。 + - **updates** (Tensor)- 根据 ``index`` 使用 ``update`` 参数更新输入 ``x`` 。 形状应与输入 ``x`` 相同,并且dim>1的dim值应与输入 ``x`` 相同。 + - **overwrite** (bool,可选)- 指定索引 ``index`` 相同时,更新输出的方式。如果为True,则使用覆盖模式更新相同索引的输出,如果为False,则使用累加模式更新相同索引的输出。默认值为True。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:Tensor,与x有相同形状和数据类型。 + + +**代码示例:** + .. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + x_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float32) + index_data = np.array([2, 1, 0, 1]).astype(np.int64) + updates_data = np.array([[1, 1], [2, 2], [3, 3], [4, 4]]).astype(np.float32) + + x = paddle.to_tensor(x_data) + index = paddle.to_tensor(index_data) + updates = paddle.to_tensor(updates_data) + + output1 = paddle.scatter(x, index, updates, overwrite=False) + # [[3., 3.], + # [6., 6.], + # [1., 1.]] + output2 = paddle.scatter(x, index, updates, overwrite=True) + # CPU device: + # [[3., 3.], + # [4., 4.], + # [1., 1.]] + # GPU device maybe have two results because of the repeated numbers in index + # result 1: + # [[3., 3.], + # [4., 4.], + # [1., 1.]] + # result 2: + # [[3., 3.], + # [2., 2.], + # [1., 1.]] diff --git a/doc/paddle/api/paddle/distributed/spawn_cn.rst b/doc/paddle/api/paddle/distributed/spawn_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..21f8f762f5052474aac91c31ada8f76b664594b2 --- /dev/null +++ b/doc/paddle/api/paddle/distributed/spawn_cn.rst @@ -0,0 +1,105 @@ +.. _cn_api_distributed_spawn: + +spawn +----- + +.. py:function:: paddle.distributed.spawn(func, args=(), nprocs=-1, join=True, daemon=False, **options) + +使用 ``spawn`` 方法启动多进程任务。 + +参数 +::::::::: + - func (function) - 由 ``spawn`` 方法启动的进程所调用的目标函数。该目标函数需要能够被 ``pickled`` (序列化),所以目标函数必须定义为模块的一级函数,不能是内部子函数或者类方法。 + - args (tuple, 可选) - 传入目标函数 ``func`` 的参数。 + - nprocs (int, 可选) - 启动进程的数目。默认值为-1。当 ``nproc`` 为-1时,模型执行时将会从环境变量中获取当前可用的所有设备进行使用:如果使用GPU执行任务,将会从环境变量 ``CUDA_VISIBLE_DEVICES`` 中获取当前所有可用的设备ID;如果使用CPU执行任务,将会从环境变量 ``CPU_NUM`` 中获取当前可用的CPU设备数,例如,可以通过指令 ``export CPU_NUM=4`` 配置默认可用CPU设备数,如果此环境变量没有设置,将会默认设置该环境变量的值为1。 + - join (bool, 可选) - 对所有启动的进程执行阻塞的 ``join`` ,等待进程执行结束。默认为True。 + - daemon (bool, 可选) - 配置启动进程的 ``daemon`` 属性。默认为False。 + - **options (dict, 可选) - 其他初始化并行执行环境的配置选项。目前支持以下选项: (1) start_method (string) - 启动子进程的方法。进程的启动方法可以是 ``spawn`` , ``fork`` , ``forkserver`` 。 因为CUDA运行时环境不支持 ``fork`` 方法,当在子进程中使用CUDA时,需要使用 ``spawn`` 或者 ``forkserver`` 方法启动进程。默认方法为 ``spawn`` ; (2) cluster_node_ips (string) - 运行集群的节点(机器)IP,例如 "192.168.0.16,192.168.0.17" ,默认值为 "127.0.0.1" ; (3) node_ip (string) - 当前节点(机器)的IP。例如 "192.168.0.16" , 默认值为 "127.0.0.1" ; (4) started_port (int) - 一个训练节点(机器)上各训练进程的起始端口。例如 6170. 默认值为None ; (5) selected_gpus (string) - 指定训练使用的GPU ID, 例如 "0,1,2,3" , 默认值为None ; (6) print_config (bool) - 打印当前并行训练的配置, 默认值为False ; (7) use_paddlecloud (bool) - 配置是否使用PaddleCloud启动多进程任务,默认值为False。 + +返回 +::::::::: + ``MultiprocessContext`` 对象,持有创建的多个进程。 + +代码示例 +::::::::: +.. code-block:: python + + from __future__ import print_function + + import paddle + import paddle.nn as nn + import paddle.optimizer as opt + import paddle.distributed as dist + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear1 = nn.Linear(10, 10) + self._linear2 = nn.Linear(10, 1) + + def forward(self, x): + return self._linear2(self._linear1(x)) + + def train(print_result=False): + # 1. enable dynamic mode + paddle.disable_static() + + # 2. initialize parallel environment + dist.init_parallel_env() + + # 3. create data parallel layer & optimizer + layer = LinearNet() + dp_layer = paddle.DataParallel(layer) + + loss_fn = nn.MSELoss() + adam = opt.Adam( + learning_rate=0.001, parameters=dp_layer.parameters()) + + # 4. run layer + inputs = paddle.randn([10, 10], 'float32') + outputs = dp_layer(inputs) + labels = paddle.randn([10, 1], 'float32') + loss = loss_fn(outputs, labels) + + if print_result is True: + print("loss:", loss.numpy()) + + loss = dp_layer.scale_loss(loss) + loss.backward() + dp_layer.apply_collective_grads() + + adam.step() + adam.clear_grad() + + # Usage 1: only pass function. + # If your training method no need any argument, and + # use all visible devices for parallel training. + if __name__ == '__main__': + dist.spawn(train) + + # Usage 2: pass function and arguments. + # If your training method need some arguments, and + # use all visible devices for parallel training. + if __name__ == '__main__': + dist.spawn(train, args=(True,)) + + # Usage 3: pass function, arguments and nprocs. + # If your training method need some arguments, and + # only use part of visible devices for parallel training. + # If your machine hold 8 cards {0,1,2,3,4,5,6,7}, + # this case will use cards {0,1}; If you set + # CUDA_VISIBLE_DEVICES=4,5,6,7, this case will use + # cards {4,5} + if __name__ == '__main__': + dist.spawn(train, args=(True,), nprocs=2) + + # Usage 4: pass function, arguments, nprocs and selected_gpus. + # If your training method need some arguments, and + # only use part of visible devices for parallel training, + # but you can't set your machine's environment varibale + # CUDA_VISIBLE_DEVICES, such as it is None or all cards + # {0,1,2,3,4,5,6,7}, you can pass `selelcted_gpus` to + # select the GPU cards you want to use. For example, + # this case will use cards {4,5} if your machine hold 8 cards. + if __name__ == '__main__': + dist.spawn(train, args=(True,), nprocs=2, selelcted_gpus='4,5') \ No newline at end of file diff --git a/doc/paddle/api/paddle/distribution/Distribution_cn.rst b/doc/paddle/api/paddle/distribution/Distribution_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c513c281df548f83a62a5183a6ef2b9f19e04ca9 --- /dev/null +++ b/doc/paddle/api/paddle/distribution/Distribution_cn.rst @@ -0,0 +1,49 @@ +.. _cn_api_distribution_Distribution: + +Distribution +------------------------------- + +.. py:class:: paddle.distribution.Distribution() + + + + +概率分布的抽象基类,在具体的分布中实现具体功能。 + + +.. py:function:: sample() + +从分布中采样 + +.. py:function:: entropy() + +分布的信息熵 + +.. py:function:: log_prob(value) + +对数概率密度函数 + +参数: + - **value** (Tensor) - 输入张量。 + +.. py:function:: probs(value) + +概率密度函数 + +参数: + - **value** (Tensor) - 输入张量。 + +.. py:function:: kl_divergence(other) + +两个分布之间的KL散度。 + +参数: + - **other** (Distribution) - Distribution的实例。 + + + + + + + + diff --git a/doc/paddle/api/paddle/distribution/Normal_cn.rst b/doc/paddle/api/paddle/distribution/Normal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b11a660613cc93bf826c210acb5ac84907161fa9 --- /dev/null +++ b/doc/paddle/api/paddle/distribution/Normal_cn.rst @@ -0,0 +1,156 @@ +.. _cn_api_distribution_Normal: + +Normal +------------------------------- + +.. py:class:: paddle.distribution.Normal(loc, scale, name=None) + + + + +正态分布 + +数学公式: + +.. math:: + + pdf(x; \mu, \sigma) = \frac{1}{Z}e^{\frac {-0.5 (x - \mu)^2} {\sigma^2} } + + Z = (2 \pi \sigma^2)^{0.5} + +上面的数学公式中: + +:math:`loc = \mu` : 平均值。 +:math:`scale = \sigma` : 标准差。 +:math:`Z`: 正态分布常量。 + +参数: + - **loc** (int|float|list|numpy.ndarray|Tensor) - 正态分布平均值。数据类型为int、float、list、numpy.ndarray或Tensor。 + - **scale** (int|float|list|numpy.ndarray|Tensor) - 正态分布标准差。数据类型为int、float、list、numpy.ndarray或Tensor。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + from paddle.distribution import Normal + + paddle.disable_static() + # Define a single scalar Normal distribution. + dist = Normal(loc=0., scale=3.) + # Define a batch of two scalar valued Normals. + # The first has mean 1 and standard deviation 11, the second 2 and 22. + dist = Normal(loc=[1., 2.], scale=[11., 22.]) + # Get 3 samples, returning a 3 x 2 tensor. + dist.sample([3]) + + # Define a batch of two scalar valued Normals. + # Both have mean 1, but different standard deviations. + dist = Normal(loc=1., scale=[11., 22.]) + + # Complete example + value_npdata = np.array([0.8], dtype="float32") + value_tensor = paddle.to_tensor(value_npdata) + + normal_a = Normal([0.], [1.]) + normal_b = Normal([0.5], [2.]) + sample = normal_a.sample([2]) + # a random tensor created by normal distribution with shape: [2, 1] + entropy = normal_a.entropy() + # [1.4189385] with shape: [1] + lp = normal_a.log_prob(value_tensor) + # [-1.2389386] with shape: [1] + p = normal_a.probs(value_tensor) + # [0.28969154] with shape: [1] + kl = normal_a.kl_divergence(normal_b) + # [0.34939718] with shape: [1] + + +.. py:function:: sample(shape, seed=0) + +生成指定维度的样本 + +参数: + - **shape** (list) - 1维列表,指定生成样本的维度。数据类型为int32。 + - **seed** (int) - 长整型数。 + +返回:预先设计好维度的张量, 数据类型为float32 + +返回类型:Tensor + +.. py:function:: entropy() + +信息熵 + +数学公式: + +.. math:: + + entropy(\sigma) = 0.5 \log (2 \pi e \sigma^2) + +上面的数学公式中: + +:math:`scale = \sigma` : 标准差。 + +返回:正态分布的信息熵, 数据类型为float32 + +返回类型:Tensor + +.. py:function:: log_prob(value) + +对数概率密度函数 + +参数: + - **value** (Tensor) - 输入张量。数据类型为float32或float64。 + +返回:对数概率, 数据类型与value相同 + +返回类型:Tensor + +.. py:function:: probs(value) + +概率密度函数 + +参数: + - **value** (Tensor) - 输入张量。数据类型为float32或float64。 + +返回:概率, 数据类型与value相同 + +返回类型:Tensor + +.. py:function:: kl_divergence(other) + +两个正态分布之间的KL散度。 + +数学公式: + +.. math:: + + KL\_divergence(\mu_0, \sigma_0; \mu_1, \sigma_1) = 0.5 (ratio^2 + (\frac{diff}{\sigma_1})^2 - 1 - 2 \ln {ratio}) + + ratio = \frac{\sigma_0}{\sigma_1} + + diff = \mu_1 - \mu_0 + +上面的数学公式中: + +:math:`loc = \mu_0`: 当前正态分布的平均值。 +:math:`scale = \sigma_0`: 当前正态分布的标准差。 +:math:`loc = \mu_1`: 另一个正态分布的平均值。 +:math:`scale = \sigma_1`: 另一个正态分布的标准差。 +:math:`ratio`: 两个标准差之间的比例。 +:math:`diff`: 两个平均值之间的差值。 + +参数: + - **other** (Normal) - Normal的实例。 + +返回:两个正态分布之间的KL散度, 数据类型为float32 + +返回类型:Tensor + + + + + diff --git a/doc/paddle/api/paddle/distribution/Uniform_cn.rst b/doc/paddle/api/paddle/distribution/Uniform_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9f9dddb7fabf2773a5e3568b5b0ab47de2ac895f --- /dev/null +++ b/doc/paddle/api/paddle/distribution/Uniform_cn.rst @@ -0,0 +1,119 @@ +.. _cn_api_distribution_Uniform: + +Uniform +------------------------------- + +.. py:class:: paddle.distribution.Uniform(low, high, name=None) + + + + +均匀分布 + +概率密度函数(pdf)为: + +.. math:: + + pdf(x; a, b) = \frac{1}{Z}, a <=x < b + + Z = b - a + +上面的数学公式中: + +:math:`low = a` 。 +:math:`high = b` 。 +:math:`Z`: 正态分布常量。 + +参数low和high的维度必须能够支持广播。 + +参数: + - **low** (int|float|list|numpy.ndarray|Tensor) - 均匀分布的下边界。数据类型为int、float、list、numpy.ndarray或Tensor。 + - **high** (int|float|list|numpy.ndarray|Tensor) - 均匀分布的上边界。数据类型为int、float、list、numpy.ndarray或Tensor。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + from paddle.distribution import Uniform + + paddle.disable_static() + # Without broadcasting, a single uniform distribution [3, 4]: + u1 = Uniform(low=3.0, high=4.0) + # 2 distributions [1, 3], [2, 4] + u2 = Uniform(low=[1.0, 2.0], high=[3.0, 4.0]) + # 4 distributions + u3 = Uniform(low=[[1.0, 2.0], [3.0, 4.0]], + high=[[1.5, 2.5], [3.5, 4.5]]) + + # With broadcasting: + u4 = Uniform(low=3.0, high=[5.0, 6.0, 7.0]) + + # Complete example + value_npdata = np.array([0.8], dtype="float32") + value_tensor = paddle.to_tensor(value_npdata) + + uniform = Uniform([0.], [2.]) + + sample = uniform.sample([2]) + # a random tensor created by uniform distribution with shape: [2, 1] + entropy = uniform.entropy() + # [0.6931472] with shape: [1] + lp = uniform.log_prob(value_tensor) + # [-0.6931472] with shape: [1] + p = uniform.probs(value_tensor) + # [0.5] with shape: [1] + + +.. py:function:: sample(shape, seed=0) + +生成指定维度的样本 + +参数: + - **shape** (list) - 1维列表,指定生成样本的维度。数据类型为int32。 + - **seed** (int) - 长整型数。 + +返回:预先设计好维度的张量, 数据类型为float32 + +返回类型:Tensor + +.. py:function:: entropy() + +信息熵 + +.. math:: + + entropy(low, high) = \log (high - low) + +返回:均匀分布的信息熵, 数据类型为float32 + +返回类型:Tensor + +.. py:function:: log_prob(value) + +对数概率密度函数 + +参数: + - **value** (Tensor) - 输入张量。数据类型为float32或float64。 + +返回:对数概率, 数据类型与value相同 + +返回类型:Tensor + +.. py:function:: probs(value) + +概率密度函数 + +参数: + - **value** (Tensor) - 输入张量。数据类型为float32或float64。 + +返回:概率, 数据类型与value相同 + +返回类型:Tensor + + + + + diff --git a/doc/paddle/api/paddle/fluid/CPUPlace_cn.rst b/doc/paddle/api/paddle/fluid/CPUPlace_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e091352c9018b355e234f8407625199d51c48555 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/CPUPlace_cn.rst @@ -0,0 +1,20 @@ +.. _cn_api_fluid_CPUPlace: + +CPUPlace +------------------------------- + +.. py:class:: paddle.fluid.CPUPlace + + + + +``CPUPlace`` 是一个设备描述符,表示一个分配或将要分配 ``Tensor`` 或 ``LoDTensor`` 的 ``CPU`` 设备。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + cpu_place = fluid.CPUPlace() + + diff --git a/doc/paddle/api/paddle/fluid/CUDAPinnedPlace_cn.rst b/doc/paddle/api/paddle/fluid/CUDAPinnedPlace_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a3e669344b2bac46b8cb57d24bbc633bb3549be3 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/CUDAPinnedPlace_cn.rst @@ -0,0 +1,20 @@ +.. _cn_api_fluid_CUDAPinnedPlace: + +CUDAPinnedPlace +------------------------------- + +.. py:class:: paddle.fluid.CUDAPinnedPlace + + + + +``CUDAPinnedPlace`` 是一个设备描述符,它所指代的页锁定内存由 CUDA 函数 ``cudaHostAlloc()`` 在主机内存上分配,主机的操作系统将不会对这块内存进行分页和交换操作,可以通过直接内存访问技术访问,加速主机和 GPU 之间的数据拷贝。 +有关 CUDA 的数据转移和 ``pinned memory``,参见 `官方文档 `_ 。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + place = fluid.CUDAPinnedPlace() + diff --git a/doc/paddle/api/paddle/fluid/CUDAPlace_cn.rst b/doc/paddle/api/paddle/fluid/CUDAPlace_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ba7cf62280b52b17dc310c8d9c1a5a4ca2cc6feb --- /dev/null +++ b/doc/paddle/api/paddle/fluid/CUDAPlace_cn.rst @@ -0,0 +1,33 @@ +.. _cn_api_fluid_CUDAPlace: + +CUDAPlace +------------------------------- + +.. py:class:: paddle.fluid.CUDAPlace + + + + +.. note:: + 多卡任务请先使用 FLAGS_selected_gpus 环境变量设置可见的GPU设备,下个版本将会修正 CUDA_VISIBLE_DEVICES 环境变量无效的问题。 + +``CUDAPlace`` 是一个设备描述符,表示一个分配或将要分配 ``Tensor`` 或 ``LoDTensor`` 的 GPU 设备。 +每个 ``CUDAPlace`` 有一个 ``dev_id`` (设备id)来表明当前的 ``CUDAPlace`` 所代表的显卡编号,编号从 0 开始。 +``dev_id`` 不同的 ``CUDAPlace`` 所对应的内存不可相互访问。 +这里编号指的是可见显卡的逻辑编号,而不是显卡实际的编号。 +可以通过 ``CUDA_VISIBLE_DEVICES`` 环境变量限制程序能够使用的 GPU 设备,程序启动时会遍历当前的可见设备,并从 0 开始为这些设备编号。 +如果没有设置 ``CUDA_VISIBLE_DEVICES``,则默认所有的设备都是可见的,此时逻辑编号与实际编号是相同的。 + +参数: + - **id** (int,可选) - GPU的设备ID。如果为 ``None``,则默认会使用 id 为 0 的设备。默认值为 ``None``。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + gpu_place = fluid.CUDAPlace(0) + + + + diff --git a/doc/paddle/api/paddle/fluid/DataFeedDesc_cn.rst b/doc/paddle/api/paddle/fluid/DataFeedDesc_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..572ca0ee582247a6cc1ee84cd3d58ad9970edbb3 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/DataFeedDesc_cn.rst @@ -0,0 +1,206 @@ +.. _cn_api_fluid_DataFeedDesc: + +DataFeedDesc +------------------------------- + + +.. py:class:: paddle.fluid.DataFeedDesc(proto_file) + + + + +描述训练数据的格式。输入是一个文件路径名,其内容是protobuf message。 + +可以参考 :code:`paddle/fluid/framework/data_feed.proto` 查看我们如何定义message + +一段典型的message可能是这样的: + +.. code-block:: python + + import paddle.fluid as fluid + f = open("data.proto", "w") + print >> f, 'name: "MultiSlotDataFeed"' + print >> f, 'batch_size: 2' + print >> f, 'multi_slot_desc {' + print >> f, ' slots {' + print >> f, ' name: "words"' + print >> f, ' type: "uint64"' + print >> f, ' is_dense: false' + print >> f, ' is_used: true' + print >> f, ' }' + print >> f, ' slots {' + print >> f, ' name: "label"' + print >> f, ' type: "uint64"' + print >> f, ' is_dense: false' + print >> f, ' is_used: true' + print >> f, ' }' + print >> f, '}' + f.close() + data_feed = fluid.DataFeedDesc('data.proto') + +用户需要了解DataFeedDesc中每个字段的含义,以便自定义字段的值。例如: + +.. code-block:: python + + import paddle.fluid as fluid + data_feed = fluid.DataFeedDesc('data.proto') + data_feed.set_batch_size(128) + data_feed.set_dense_slots('words') # 名为'words'的slot将被设置为密集的 + data_feed.set_use_slots('words') # 名为'words'的slot将被用于训练 + + # 最后,可以打印变量详细信息便于排查错误 + print(data_feed.desc()) + + +参数: + - **proto_file** (string) : 包含数据描述的protobuf message的磁盘文件 + + +.. py:method:: set_batch_size(batch_size) + +该接口用于设置DataFeedDesc中的 :code:`batch_size` 。可以在训练期间调用修改 :code:`batch_size` 。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + f = open("data.proto", "w") + print >> f, 'name: "MultiSlotDataFeed"' + print >> f, 'batch_size: 2' + print >> f, 'multi_slot_desc {' + print >> f, ' slots {' + print >> f, ' name: "words"' + print >> f, ' type: "uint64"' + print >> f, ' is_dense: false' + print >> f, ' is_used: true' + print >> f, ' }' + print >> f, ' slots {' + print >> f, ' name: "label"' + print >> f, ' type: "uint64"' + print >> f, ' is_dense: false' + print >> f, ' is_used: true' + print >> f, ' }' + print >> f, '}' + f.close() + data_feed = fluid.DataFeedDesc('data.proto') + data_feed.set_batch_size(128) + +参数: + - **batch_size** (int) - 新的批尺寸。 + +返回:无 + +.. py:method:: set_dense_slots(dense_slots_name) + +将 :code:`dense_slots_name` 指定的slots设置为密集的slot。**注意:默认情况下,所有slots都是稀疏的。** + +密集slot的特征将被输入一个Tensor,而稀疏slot的特征将被输入一个LoDTensor。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + f = open("data.proto", "w") + print >> f, 'name: "MultiSlotDataFeed"' + print >> f, 'batch_size: 2' + print >> f, 'multi_slot_desc {' + print >> f, ' slots {' + print >> f, ' name: "words"' + print >> f, ' type: "uint64"' + print >> f, ' is_dense: false' + print >> f, ' is_used: true' + print >> f, ' }' + print >> f, ' slots {' + print >> f, ' name: "label"' + print >> f, ' type: "uint64"' + print >> f, ' is_dense: false' + print >> f, ' is_used: true' + print >> f, ' }' + print >> f, '}' + f.close() + data_feed = fluid.DataFeedDesc('data.proto') + data_feed.set_dense_slots(['words']) + +参数: + - **dense_slots_name** (list(str)) - slot名称的列表,这些slot将被设置为密集的。 + +返回:无 + +.. py:method:: set_use_slots(use_slots_name) + + +设置一个特定的slot是否用于训练。一个数据集包含了很多特征,通过这个函数可以选择哪些特征将用于指定的模型。 + +参数: + - **use_slots_name** (list) : 将在训练中使用的slot名列表,类型为list,其中每个元素为一个字符串 + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + f = open("data.proto", "w") + print >> f, 'name: "MultiSlotDataFeed"' + print >> f, 'batch_size: 2' + print >> f, 'multi_slot_desc {' + print >> f, ' slots {' + print >> f, ' name: "words"' + print >> f, ' type: "uint64"' + print >> f, ' is_dense: false' + print >> f, ' is_used: true' + print >> f, ' }' + print >> f, ' slots {' + print >> f, ' name: "label"' + print >> f, ' type: "uint64"' + print >> f, ' is_dense: false' + print >> f, ' is_used: true' + print >> f, ' }' + print >> f, '}' + f.close() + data_feed = fluid.DataFeedDesc('data.proto') + data_feed.set_use_slots(['words']) + +.. note:: + + 默认值是不使用所有slot + + +.. py:method:: desc() + +返回此DataFeedDesc的protobuf message + +返回:一个protobuf message字符串 + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + f = open("data.proto", "w") + print >> f, 'name: "MultiSlotDataFeed"' + print >> f, 'batch_size: 2' + print >> f, 'multi_slot_desc {' + print >> f, ' slots {' + print >> f, ' name: "words"' + print >> f, ' type: "uint64"' + print >> f, ' is_dense: false' + print >> f, ' is_used: true' + print >> f, ' }' + print >> f, ' slots {' + print >> f, ' name: "label"' + print >> f, ' type: "uint64"' + print >> f, ' is_dense: false' + print >> f, ' is_used: true' + print >> f, ' }' + print >> f, '}' + f.close() + data_feed = fluid.DataFeedDesc('data.proto') + print(data_feed.desc()) + + + + + + diff --git a/doc/paddle/api/paddle/fluid/DataFeeder_cn.rst b/doc/paddle/api/paddle/fluid/DataFeeder_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3960dcb1970972383ba3b80e0e21d4ed4e1cb1a8 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/DataFeeder_cn.rst @@ -0,0 +1,210 @@ +.. _cn_api_fluid_DataFeeder: + +DataFeeder +------------------------------- + + +.. py:class:: paddle.fluid.DataFeeder(feed_list, place, program=None) + + + + + + +``DataFeeder`` 负责将reader(读取器)返回的数据转成一种特殊的数据结构,使它们可以输入到 ``Executor`` 和 ``ParallelExecutor`` 中。 +reader通常返回一个minibatch条目列表。在列表中每一条目都是一个样本(sample),它是由具有一至多个特征的列表或元组组成的。 + + +以下是简单用法: + +.. code-block:: python + + import paddle.fluid as fluid + place = fluid.CPUPlace() + img = fluid.layers.data(name='image', shape=[1, 28, 28]) + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + feeder = fluid.DataFeeder([img, label], fluid.CPUPlace()) + result = feeder.feed([([0] * 784, [9]), ([1] * 784, [1])]) + +在多GPU模型训练时,如果需要提前分别向各GPU输入数据,可以使用 ``decorate_reader`` 函数。 + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + + place=fluid.CUDAPlace(0) + data = fluid.layers.data(name='data', shape=[3, 224, 224], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + + feeder = fluid.DataFeeder(place=place, feed_list=[data, label]) + reader = feeder.decorate_reader( + paddle.batch(paddle.dataset.flowers.train(), batch_size=16), multi_devices=False) + + + +参数: + - **feed_list** (list) – 向模型输入的变量表或者变量表名 + - **place** (Place) – place表明是向GPU还是CPU中输入数据。如果想向GPU中输入数据, 请使用 ``fluid.CUDAPlace(i)`` (i 代表 the GPU id);如果向CPU中输入数据, 请使用 ``fluid.CPUPlace()`` + - **program** (Program) – 需要向其中输入数据的Program。如果为None, 会默认使用 ``default_main_program()``。 缺省值为None + + +抛出异常: + - ``ValueError`` – 如果一些变量不在此 Program 中 + + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + import paddle.fluid as fluid + + place = fluid.CPUPlace() + + def reader(): + yield [np.random.random([4]).astype('float32'), np.random.random([3]).astype('float32')], + + main_program = fluid.Program() + startup_program = fluid.Program() + + with fluid.program_guard(main_program, startup_program): + data_1 = fluid.layers.data(name='data_1', shape=[1, 2, 2]) + data_2 = fluid.layers.data(name='data_2', shape=[1, 1, 3]) + out = fluid.layers.fc(input=[data_1, data_2], size=2) + # ... + + feeder = fluid.DataFeeder([data_1, data_2], place) + + exe = fluid.Executor(place) + exe.run(startup_program) + for data in reader(): + outs = exe.run(program=main_program, + feed=feeder.feed(data), + fetch_list=[out]) + + +.. py:method:: feed(iterable) + + +根据feed_list(数据输入表)和iterable(可遍历的数据)提供的信息,将输入数据转成一种特殊的数据结构,使它们可以输入到 ``Executor`` 和 ``ParallelExecutor`` 中。 + +参数: + - **iterable** (list|tuple) – 要输入的数据 + +返回: 转换结果 + +返回类型: dict + +**代码示例** + +.. code-block:: python + + import numpy.random as random + import paddle.fluid as fluid + + def reader(limit=5): + for i in range(limit): + yield random.random([784]).astype('float32'), random.random([1]).astype('int64'), random.random([256]).astype('float32') + + data_1 = fluid.layers.data(name='data_1', shape=[1, 28, 28]) + data_2 = fluid.layers.data(name='data_2', shape=[1], dtype='int64') + data_3 = fluid.layers.data(name='data_3', shape=[16, 16], dtype='float32') + feeder = fluid.DataFeeder(['data_1','data_2', 'data_3'], fluid.CPUPlace()) + + result = feeder.feed(reader()) + + +.. py:method:: feed_parallel(iterable, num_places=None) + + +该方法获取的多个minibatch,并把每个minibatch提前输入进各个设备中。 + +参数: + - **iterable** (list|tuple) – 要输入的数据 + - **num_places** (int) – 设备数目。默认为None。 + +返回: 转换结果 + +返回类型: dict + +.. note:: + 设备(CPU或GPU)的数目必须等于minibatch的数目 + +**代码示例** + +.. code-block:: python + + import numpy.random as random + import paddle.fluid as fluid + + def reader(limit=10): + for i in range(limit): + yield [random.random([784]).astype('float32'), random.random([1]).astype('float32')], + + x = fluid.layers.data(name='x', shape=[1, 28, 28]) + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + + fluid.layers.elementwise_add(x, y) + + feeder = fluid.DataFeeder(['x','y'], fluid.CPUPlace()) + place_num = 2 + places = [fluid.CPUPlace() for x in range(place_num)] + data = [] + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + program = fluid.CompiledProgram(fluid.default_main_program()).with_data_parallel(places=places) + for item in reader(): + data.append(item) + if place_num == len(data): + exe.run(program=program, feed=list(feeder.feed_parallel(data, place_num)), fetch_list=[]) + data = [] + +.. py:method:: decorate_reader(reader, multi_devices, num_places=None, drop_last=True) + + + +将reader返回的输入数据batch转换为多个mini-batch,之后每个mini-batch都会被输入进各个设备(CPU或GPU)中。 + +参数: + - **reader** (fun) – 该参数是一个可以生成数据的函数 + - **multi_devices** (bool) – bool型,指明是否使用多个设备 + - **num_places** (int) – 如果 ``multi_devices`` 为 ``True`` , 可以使用此参数来设置GPU数目。如果 ``multi_devices`` 为 ``None`` ,该函数默认使用当前训练机所有GPU设备。默认为None。 + - **drop_last** (bool) – 如果最后一个batch的大小比 ``batch_size`` 要小,则可使用该参数来指明是否选择丢弃最后一个batch数据。 默认为 ``True`` + +返回:转换结果 + +返回类型: dict + +抛出异常: ``ValueError`` – 如果 ``drop_last`` 值为False并且data batch与设备不匹配时,产生此异常 + +**代码示例** + +.. code-block:: python + + import numpy.random as random + import paddle + import paddle.fluid as fluid + + def reader(limit=5): + for i in range(limit): + yield (random.random([784]).astype('float32'), random.random([1]).astype('int64')), + + place=fluid.CPUPlace() + data = fluid.layers.data(name='data', shape=[1, 28, 28], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + + feeder = fluid.DataFeeder(place=place, feed_list=[data, label]) + reader = feeder.decorate_reader(reader, multi_devices=False) + + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + for data in reader(): + exe.run(feed=data) + + + + + + diff --git a/doc/paddle/api/paddle/fluid/DistributeTranspilerConfig_cn.rst b/doc/paddle/api/paddle/fluid/DistributeTranspilerConfig_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c9a7af6880f0c3f38182d673797da5f776526496 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/DistributeTranspilerConfig_cn.rst @@ -0,0 +1,46 @@ +.. _cn_api_fluid_transpiler_DistributeTranspilerConfig: + +DistributeTranspilerConfig +------------------------------- + + +.. py:class:: paddle.fluid.transpiler.DistributeTranspilerConfig + + + + +单机任务切换为分布式任务的配置类,用户可根据需求进行配置,如指定同步/异步训练,指定节点个数及模型切分逻辑。 + +返回:None + +.. py:attribute:: slice_var_up (bool) + +是否为Pserver将张量切片, 默认为True, bool类型属性, 默认为True。该参数将指定是否将参数/梯度切分后均匀分布于多个PServer上。slice_var_up为True的情况下,会将参数均匀切分后分布于多个PServer端,使每个PServer的负载相对均衡。 + + +.. py:attribute:: split_method (PSDispatcher) + +参数分发的方式,当前支持的方法包括 :ref:`cn_api_fluid_transpiler_RoundRobin` 和 :ref:`cn_api_fluid_transpiler_HashName` 两种, 默认为RoundRobin。 + +注意: 尝试选择最佳方法来达到负载均衡。 + +.. py:attribute:: min_block_size (int) + +参数切片时,最小数据块的大小,默认为8192。 + +注意: 根据:https://github.com/PaddlePaddle/Paddle/issues/8638#issuecomment-369912156 , 当数据块大小超过2MB时,我们可以有效地使用带宽。如果你想更改它,请详细查看slice_variable函数。 + +**代码示例** + +.. code-block:: python + + from paddle.fluid.transpiler.ps_dispatcher import RoundRobin + import paddle.fluid as fluid + + config = fluid.DistributeTranspilerConfig() + config.slice_var_up = True + config.split_method = RoundRobin + config.min_block_size = 81920 + + + diff --git a/doc/paddle/api/paddle/fluid/DistributeTranspiler_cn.rst b/doc/paddle/api/paddle/fluid/DistributeTranspiler_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2bfb8528eb4afe008ca9137e496d9641ea2defb2 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/DistributeTranspiler_cn.rst @@ -0,0 +1,225 @@ +.. _cn_api_fluid_transpiler_DistributeTranspiler: + +DistributeTranspiler +------------------------------- + + +.. py:class:: paddle.fluid.transpiler.DistributeTranspiler (config=None) + + + + + +该类可以把fluid program转变为分布式数据并行计算的program, 有PServer和NCCL2两种模式。 +在Pserver(全称:parameter server)模式下, 通过 ``transpile`` 将用于单机训练的 ``program`` 转译为可用于parameter server的分布式架构(即PServer,参数服务器)来进行训练的program。 +在NCCL2模式下, 通过 ``transpile`` 将用于单机训练的 ``program`` 转译为可用于NCCL2的分布式架构来进行训练的program。在NCCL2模式下,transpiler会在 ``startup_program`` 中附加一个 ``NCCL_ID`` 广播算子(broadcasting operators)来实现在该集群中所有工作结点共享``NCCL_ID`` 。 调用 ``transpile_nccl2`` 后, 你 **必须** 将 ``trainer_id`` , ``num_trainers`` 参数提供给 ``Executor`` 来启动NCCL2分布式模式。 + + +参数: + - **config** (DistributeTranspilerConfig) DistributeTranspiler属性配置实例,定义了program转变所需要的属性, 请参考:`DistributeTranspilerConfig` 相关文档。 + +返回:初始化后的DistributeTranspiler实例 + +返回类型:实例(DistributeTranspiler) + + +**代码示例** + +.. code-block:: python + + x = fluid.layers.data(name='x', shape=[13], dtype='float32') + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y_predict = fluid.layers.fc(input=x, size=1, act=None) + + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_loss = fluid.layers.mean(cost) + + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) + sgd_optimizer.minimize(avg_loss) + + # pserver 模式下 + pserver_endpoints = "192.168.0.1:6174,192.168.0.2:6174" + trainer_endpoints = "192.168.0.1:6174,192.168.0.2:6174" + current_endpoint = "192.168.0.1:6174" + trainer_id = 0 + trainers = 4 + role = "PSERVER" + t = fluid.DistributeTranspiler() + t.transpile( + trainer_id, pservers=pserver_endpoints, trainers=trainers) + if role == "PSERVER": + pserver_program = t.get_pserver_program(current_endpoint) + pserver_startup_program = t.get_startup_program(current_endpoint, + pserver_program) + elif role == "TRAINER": + trainer_program = t.get_trainer_program() + + # nccl2 模式下 + trainer_num = 2 + trainer_id = 0 + config = fluid.DistributeTranspilerConfig() + config.mode = "nccl2" + trainer_endpoints = "192.168.0.1:6174,192.168.0.2:6174" + t = fluid.DistributeTranspiler(config=config) + t.transpile(trainer_id=trainer_id, trainers=trainer_endpoints, current_endpoint="192.168.0.1:6174") + exe = fluid.ParallelExecutor( + use_cuda=True, + loss_name=avg_loss.name, + num_trainers=trainer_num, + trainer_id=trainer_id + ) + + + +.. py:method:: transpile(trainer_id, program=None, pservers='127.0.0.1:6174', trainers=1, sync_mode=True, startup_program=None, current_endpoint='127.0.0.1:6174') + +通过此方法,可根据用户配置将单机的program转换为当前节点可用的数据并行的分布式program。 + +参数: + - **trainer_id** (int) – 当前Trainer worker的id, 如果有n个Trainer worker, id 取值范围为0 ~ n-1 + - **program** (Program|None) – 待transpile(转译)的main program, 默认为 ``fluid.default_main_program()`` + - **pservers** (str) – 内容为Pserver列表的字符串,格式为:按逗号区分不同的Pserver,每个Pserver的格式为 *ip地址:端口号* + - **trainers** (int|str) – 在Pserver模式下,该参数指Trainer机的个数;在nccl2模式下,它是一个内容为Trainer终端列表的字符串 + - **sync_mode** (bool) – 是否做同步训练(synchronous training), 默认为True + - **startup_program** (Program|None) – 待transpile(转译)的startup program,默认为 ``fluid.default_startup_program()`` + - **current_endpoint** (str) – 当需要把program转译(transpile)至NCCL2模式时,需要将当前endpoint(终端)传入该参数。PServer模型下,当用户需要使用增量训练时,必须要指定该参数。 + +返回:None + + +**代码示例** + +.. code-block:: python + + transpiler = fluid.DistributeTranspiler() + t.transpile( + trainer_id=0, + pservers="127.0.0.1:7000,127.0.0.1:7001", + trainers=2, + sync_mode=False, + current_endpoint="127.0.0.1:7000") + + +.. py:method:: get_trainer_program(wait_port=True) + + +该方法可以得到Trainer侧的program。Trainer侧的program相较于原始的单机执行的program,主要有以下不同: + + - 删除了参数更新optimizer相关op,参数的更新由Pserver(参数服务器)执行 + - 在每个参数的反向梯度计算op后,添加了 ``Send_op`` 与 ``Recv_op`` ,用于发送参数的梯度与接受更新后的参数 + +参数: + - **wait_port** (bool,默认值True) - 是否等待参数服务器准备就绪后再返回program + +返回: Trainer侧的program + +返回类型: Program + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + # 这是一个示例,请根据你的情况更改endpoint + pserver_endpoints = "192.168.0.1:6174,192.168.0.2:6174" + trainer_id = 0 + trainers = 4 + t = fluid.DistributeTranspiler() + t.transpile(trainer_id, trainers=trainers, pservers=pserver_endpoints) + trainer_program = t.get_trainer_program() + + +.. py:method:: get_pserver_program(endpoint) + + +该方法可以得到Pserver(参数服务器)侧的program。Pserver侧的program相较于原始的单机执行的program,主要有以下不同: + + - 仅包含参数更新optimizer相关op,与分布式通信相关op + - 0号block仅包含变量的定义及 ``listen_and_serv_op`` + - Pserver为每个需要进行更新的参数新建了一个独立的block + +参数: + - **endpoint** (str) – 当前Pserver终端 + +返回: 当前Pserver需要执行的program + +返回类型: Program + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + # 这是一个示例,请根据你的情况更改endpoint + pserver_endpoints = "192.168.0.1:6174,192.168.0.2:6174" + current_endpoint = "192.168.0.1:6174" + trainer_id = 0 + trainers = 4 + t = fluid.DistributeTranspiler() + t.transpile( + trainer_id, pservers=pserver_endpoints, trainers=trainers) + pserver_program = t.get_pserver_program(current_endpoint) + + +.. py:method:: get_pserver_programs(endpoint) + + +该方法可以得到Pserver侧用于分布式训练的 ``main_program`` 和 ``startup_program`` 。该函数返回的 ``main_program`` 与函数 ``get_pserver_program`` 的返回值一致。 + +参数: + - **endpoint** (str) – 当前Pserver终端 + +返回: (main_program, startup_program), “Program”类型的元组 + +返回类型: tuple + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + # 这是一个示例,请根据你的情况更改endpoint + pserver_endpoints = "192.168.0.1:6174,192.168.0.2:6174" + current_endpoint = "192.168.0.1:6174" + trainer_id = 0 + trainers = 4 + t = fluid.DistributeTranspiler() + t.transpile( + trainer_id, pservers=pserver_endpoints, trainers=trainers) + pserver_program, pserver_startup_program = t.get_pserver_programs(current_endpoint) + + +.. py:method:: get_startup_program(endpoint, pserver_program=None, startup_program=None) + + +**该函数已停止使用** +获取当前Pserver的startup_program,如果有多个被分散到不同blocks的变量,则修改operator的输入变量。 + +参数: + - **endpoint** (str) – 当前Pserver终端 + - **pserver_program** (Program) – 已停止使用。 先调用get_pserver_program + - **startup_program** (Program) – 已停止使用。应在初始化时传入startup_program + +返回: Pserver侧的startup_program + +返回类型: Program + +**代码示例** + +.. code-block:: python + + pserver_endpoints = "192.168.0.1:6174,192.168.0.2:6174" + trainer_endpoints = "192.168.0.1:6174,192.168.0.2:6174" + current_endpoint = "192.168.0.1:6174" + trainer_id = 0 + trainers = 4 + + t = fluid.DistributeTranspiler() + t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) + pserver_program = t.get_pserver_program(current_endpoint) + pserver_startup_program = t.get_startup_program(current_endpoint, + pserver_program) + + + diff --git a/doc/paddle/api/paddle/fluid/LoDTensorArray_cn.rst b/doc/paddle/api/paddle/fluid/LoDTensorArray_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4fa9be57d50dbec6f1137f7cf04fdc83f349af79 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/LoDTensorArray_cn.rst @@ -0,0 +1,47 @@ +.. _cn_api_fluid_LoDTensorArray: + +LoDTensorArray +------------------------------- + +.. py:class:: paddle.fluid.LoDTensorArray + + + + +LoDTensorArray是由LoDTensor组成的数组,支持"[]"运算符、len()函数和for迭代等。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + + arr = fluid.LoDTensorArray() + + + +.. py:method:: append(self: paddle.fluid.core_avx.LoDTensorArray, tensor: paddle.fluid.core.LoDTensor) → None + +该接口将LoDTensor追加到LoDTensorArray后。 + +参数: + - **tensor** (LoDTensor) - 追加的LoDTensor。 + +返回:无。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + arr = fluid.LoDTensorArray() + t = fluid.LoDTensor() + t.set(np.ndarray([5, 30]), fluid.CPUPlace()) + arr.append(t) + + + + + diff --git a/doc/paddle/api/paddle/fluid/LoDTensor_cn.rst b/doc/paddle/api/paddle/fluid/LoDTensor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2eaf09eecf00e779296f282482324e4b2a8385cc --- /dev/null +++ b/doc/paddle/api/paddle/fluid/LoDTensor_cn.rst @@ -0,0 +1,267 @@ +.. _cn_api_fluid_LoDTensor: + +LoDTensor +------------------------------- + +.. py:class:: paddle.fluid.LoDTensor + + + + + +LoDTensor是一个具有LoD(Level of Details)信息的张量(Tensor),可用于表示变长序列,详见 :ref:`cn_user_guide_lod_tensor` 。 + +LoDTensor可以通过 ``np.array(lod_tensor)`` 方法转换为numpy.ndarray。 + +如果您不需要了解LoDTensor的细节,可以跳过以下的注解。 + +下面以两个例子说明如何用LoDTensor表示变长序列。 + +示例1: + +假设x为一个表示变长序列的LoDTensor,它包含2个逻辑子序列,第一个序列长度是2(样本数量为2),第二个序列长度是3,总序列长度为5。 +第一个序列的数据为[1, 2], [3, 4],第二个序列的数据为[5, 6], [7, 8], [9, 10],每个样本数据的维度均是2,该LoDTensor最终的shape为[5, 2],其中5为总序列长度,2为每个样本数据的维度。 + +在逻辑上,我们可以用两种方式表示该变长序列,一种是递归序列长度的形式,即x.recursive_sequence_length = [[2, 3]];另一种是偏移量的形式,即x.lod = [[0, 2, 2+3]]。 +这两种表示方式是等价的,您可以通过LoDTensor的相应接口来设置和获取recursive_sequence_length或LoD。 + +在实现上,为了获得更快的序列访问速度,Paddle采用了偏移量的形式来存储不同的序列长度。因此,对recursive_sequence_length的操作最终将转换为对LoD的操作。 + +:: + + x.data = [[1, 2], [3, 4], + [5, 6], [7, 8], [9, 10]] + + x.shape = [5, 2] + + x.recursive_sequence_length = [[2, 3]] + + x.lod = [[0, 2, 5]] + +示例2: + +LoD可以有多个level(例如,一个段落可以有多个句子,一个句子可以有多个单词)。假设y为LoDTensor ,lod_level为2。从level=0来看有2个逻辑序列,序列长度分别为2和1,表示第一个逻辑序列包含2个子序列,第二个逻辑序列包含1个子序列。从level=1来看,第一个逻辑序列包含的2个子序列长度分别为2和2,第二个逻辑序列包含的1个子序列长度为3。 + +因此,该LoDTensor以递归序列长度形式表示为 y.recursive_sequence_length = [[2, 1], [2, 2, 3]];相应地,以偏移量形式表示为 y.lod = [[0, 2, 3], [0, 2, 4, 7]]。 + +:: + + y.data = [[1, 2], [3, 4], + [5, 6], [7, 8], + [9, 10], [11, 12], [13, 14]] + + y.shape = [2+2+3, 2] + + y.recursive_sequence_length = [[2, 1], [2, 2, 3]] + + y.lod = [[0, 2, 3], [0, 2, 4, 7]] + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + + t = fluid.LoDTensor() + + +.. py:method:: has_valid_recursive_sequence_lengths(self: paddle.fluid.core_avx.LoDTensor) → bool + +该接口检查LoDTensor的LoD的正确性。 + +返回: 是否带有正确的LoD。 + +返回类型: bool。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + t = fluid.LoDTensor() + t.set(np.ndarray([5, 30]), fluid.CPUPlace()) + t.set_recursive_sequence_lengths([[2, 3]]) + print(t.has_valid_recursive_sequence_lengths()) # True + +.. py:method:: lod(self: paddle.fluid.core_avx.LoDTensor) → List[List[int]] + +该接口返回LoDTensor的LoD。 + +返回:LoDTensor的LoD。 + +返回类型:List [List [int]]。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + t = fluid.LoDTensor() + t.set(np.ndarray([5, 30]), fluid.CPUPlace()) + t.set_lod([[0, 2, 5]]) + print(t.lod()) # [[0, 2, 5]] + +.. py:method:: recursive_sequence_lengths(self: paddle.fluid.core_avx.LoDTensor) → List[List[int]] + +该接口返回与LoDTensor的LoD对应的递归序列长度。 + +返回:LoDTensor的LoD对应的递归序列长度。 + +返回类型:List [List [int]]。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + t = fluid.LoDTensor() + t.set(np.ndarray([5, 30]), fluid.CPUPlace()) + t.set_recursive_sequence_lengths([[2, 3]]) + print(t.recursive_sequence_lengths()) # [[2, 3]] + + +.. py:method:: set(*args, **kwargs) + +该接口根据输入的numpy array和设备place,设置LoDTensor的数据。 + +重载函数: + +1. set(self: paddle.fluid.core_avx.Tensor, array: numpy.ndarray[float32], place: paddle::platform::CPUPlace) -> None + +2. set(self: paddle.fluid.core_avx.Tensor, array: numpy.ndarray[int32], place: paddle::platform::CPUPlace) -> None + +3. set(self: paddle.fluid.core_avx.Tensor, array: numpy.ndarray[float64], place: paddle::platform::CPUPlace) -> None + +4. set(self: paddle.fluid.core_avx.Tensor, array: numpy.ndarray[int64], place: paddle::platform::CPUPlace) -> None + +5. set(self: paddle.fluid.core_avx.Tensor, array: numpy.ndarray[bool], place: paddle::platform::CPUPlace) -> None + +6. set(self: paddle.fluid.core_avx.Tensor, array: numpy.ndarray[uint16], place: paddle::platform::CPUPlace) -> None + +7. set(self: paddle.fluid.core_avx.Tensor, array: numpy.ndarray[uint8], place: paddle::platform::CPUPlace) -> None + +8. set(self: paddle.fluid.core_avx.Tensor, array: numpy.ndarray[int8], place: paddle::platform::CPUPlace) -> None + +9. set(self: paddle.fluid.core_avx.Tensor, array: numpy.ndarray[float32], place: paddle::platform::CUDAPlace) -> None + +10. set(self: paddle.fluid.core_avx.Tensor, array: numpy.ndarray[int32], place: paddle::platform::CUDAPlace) -> None + +11. set(self: paddle.fluid.core_avx.Tensor, array: numpy.ndarray[float64], place: paddle::platform::CUDAPlace) -> None + +12. set(self: paddle.fluid.core_avx.Tensor, array: numpy.ndarray[int64], place: paddle::platform::CUDAPlace) -> None + +13. set(self: paddle.fluid.core_avx.Tensor, array: numpy.ndarray[bool], place: paddle::platform::CUDAPlace) -> None + +14. set(self: paddle.fluid.core_avx.Tensor, array: numpy.ndarray[uint16], place: paddle::platform::CUDAPlace) -> None + +15. set(self: paddle.fluid.core_avx.Tensor, array: numpy.ndarray[uint8], place: paddle::platform::CUDAPlace) -> None + +16. set(self: paddle.fluid.core_avx.Tensor, array: numpy.ndarray[int8], place: paddle::platform::CUDAPlace) -> None + +17. set(self: paddle.fluid.core_avx.Tensor, array: numpy.ndarray[float32], place: paddle::platform::CUDAPinnedPlace) -> None + +18. set(self: paddle.fluid.core_avx.Tensor, array: numpy.ndarray[int32], place: paddle::platform::CUDAPinnedPlace) -> None + +19. set(self: paddle.fluid.core_avx.Tensor, array: numpy.ndarray[float64], place: paddle::platform::CUDAPinnedPlace) -> None + +20. set(self: paddle.fluid.core_avx.Tensor, array: numpy.ndarray[int64], place: paddle::platform::CUDAPinnedPlace) -> None + +21. set(self: paddle.fluid.core_avx.Tensor, array: numpy.ndarray[bool], place: paddle::platform::CUDAPinnedPlace) -> None + +22. set(self: paddle.fluid.core_avx.Tensor, array: numpy.ndarray[uint16], place: paddle::platform::CUDAPinnedPlace) -> None + +23. set(self: paddle.fluid.core_avx.Tensor, array: numpy.ndarray[uint8], place: paddle::platform::CUDAPinnedPlace) -> None + +24. set(self: paddle.fluid.core_avx.Tensor, array: numpy.ndarray[int8], place: paddle::platform::CUDAPinnedPlace) -> None + +参数: + - **array** (numpy.ndarray) - 要设置的numpy array,支持的数据类型为bool, float32, float64, int8, int32, int64, uint8, uint16。 + - **place** (CPUPlace|CUDAPlace|CUDAPinnedPlace) - 要设置的LoDTensor所在的设备。 + +返回:无。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + t = fluid.LoDTensor() + t.set(np.ndarray([5, 30]), fluid.CPUPlace()) + + +.. py:method:: set_lod(self: paddle.fluid.core_avx.LoDTensor, lod: List[List[int]]) → None + +该接口设置LoDTensor的LoD。 + +参数: + - **lod** (List [List [int]]) - 要设置的LoD。 + +返回:无。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + t = fluid.LoDTensor() + t.set(np.ndarray([5, 30]), fluid.CPUPlace()) + t.set_lod([[0, 2, 5]]) + print(t.lod()) # [[0, 2, 5]] + + + +.. py:method:: set_recursive_sequence_lengths(self: paddle.fluid.core_avx.LoDTensor, recursive_sequence_lengths: List[List[int]]) → None + +该接口根据递归序列长度 ``recursive_sequence_lengths`` 设置LoDTensor的LoD。 + +例如,如果 ``recursive_sequence_lengths = [[2, 3]]``,意味着有两个长度分别为2和3的序列,相应的LoD是[[0, 2, 2 + 3]],即[[0, 2, 5]]。 + +参数: + - **recursive_sequence_lengths** (List [List [int]]) - 递归序列长度。 + +返回:无。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + t = fluid.LoDTensor() + t.set(np.ndarray([5, 30]), fluid.CPUPlace()) + t.set_recursive_sequence_lengths([[2, 3]]) + print(t.recursive_sequence_length()) # [[2, 3]] + print(t.lod()) # [[0, 2, 5]] + +.. py:method:: shape(self: paddle.fluid.core_avx.Tensor) → List[int] + +该接口返回LoDTensor的shape。 + +返回:LoDTensor的shape。 + +返回类型:List[int] 。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + t = fluid.LoDTensor() + t.set(np.ndarray([5, 30]), fluid.CPUPlace()) + print(t.shape()) # [5, 30] + + + diff --git a/doc/paddle/api/paddle/fluid/Tensor_cn.rst b/doc/paddle/api/paddle/fluid/Tensor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0cae5aac94cd27f7e3432a19b295f2b03ef606b8 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/Tensor_cn.rst @@ -0,0 +1,59 @@ +.. _cn_api_fluid_Tensor: + +Tensor +------------------------------- + +.. py:function:: paddle.fluid.Tensor + + + + +Tensor用于表示多维张量,可以通过 ``np.array(tensor)`` 方法转换为numpy.ndarray。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + + t = fluid.Tensor() + +.. py:method:: set(array, place, zero_copy=False) + +该接口根据输入的numpy array和设备place,设置Tensor的数据。 + +参数: + - **array** (numpy.ndarray) - 要设置的numpy array,支持的数据类型为bool, float32, float64, int8, int32, int64, uint8, uint16。 + - **place** (CPUPlace|CUDAPlace|CUDAPinnedPlace) - 要设置的Tensor所在的设备。 + - **zero_copy** (bool,可选) - 是否与输入的numpy数组共享内存。此参数仅适用于CPUPlace。默认值为False。 + +返回:无。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + t = fluid.Tensor() + t.set(np.ndarray([5, 30]), fluid.CPUPlace()) + +.. py:method:: shape(self: paddle.fluid.core_avx.Tensor) → List[int] + +该接口返回Tensor的shape。 + +返回:Tensor的shape。 + +返回类型:List[int] 。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + t = fluid.Tensor() + t.set(np.ndarray([5, 30]), fluid.CPUPlace()) + print(t.shape()) # [5, 30] \ No newline at end of file diff --git a/doc/paddle/api/paddle/fluid/backward/append_backward_cn.rst b/doc/paddle/api/paddle/fluid/backward/append_backward_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..268349a2f3342c7e3c796834927284d73742e29c --- /dev/null +++ b/doc/paddle/api/paddle/fluid/backward/append_backward_cn.rst @@ -0,0 +1,73 @@ +.. _cn_api_fluid_backward_append_backward: + +append_backward +------------------------------- + + +.. py:function:: paddle.fluid.backward.append_backward(loss, parameter_list=None, no_grad_set=None, callbacks=None) + + + + +该接口将向主程序(``main_program``)追加反向部分 。 + +完整的神经网络训练由前向和反向传播组成。但是当我们配置网络时,我们只需要指定其前向部分。 +该接口使用链式法则,能够根据前向部分自动生成反向部分。 + +在大多数情况下,用户无需手动调用此接口,它将由优化器(``Optimizer``)的 ``minimize`` 函数自动调用。 + +参数: + - **loss** ( :ref:`api_guide_Variable` ) - 网络的损失变量。 + - **parameter_list** (list [Variable|str],可选)- 指定优化器需要更新的参数或参数名称列表。如果为 ``None`` ,则将更新所有参数。默认值为 ``None``。 + - **no_grad_set** (set [Variable|str],可选)- 在 `block0` ( :ref:`api_guide_Block` ) 中要忽略梯度的 :ref:`api_guide_Variable` 的名字的集合。所有的 :ref:`api_guide_Block` 中带有 ``stop_gradient = True`` 的所有 :ref:`api_guide_Variable` 的名字都会被自动添加到此集合中。如果该参数不为 ``None``,则会将该参数集合的内容添加到默认的集合中。默认值为 ``None``。 + - **callbacks** (list [callable object],可选)- 回调函数列表。用于在反向传播构建中执行一些自定义作业。每次将新的梯度OP添加到程序中时,将调用其中的所有可调用对象。可调用对象必须有两个输入参数: :ref:`api_guide_Block` 和 ``context`` 。 :ref:`api_guide_Block` 是将被添加到新梯度算子的块。 ``context`` 是一个映射,其键是梯度 :ref:`api_guide_Variable` 名,值是对应的原始 :ref:`api_guide_Variable` 。除此之外, ``context`` 还有另一个特殊的键值对:键是字符串 ``__ current_op_desc__`` ,值是刚刚触发可调用对象的梯度OP的 ``op_desc`` 。默认值为 ``None``。 + +返回: 参数及其梯度 :ref:`api_guide_Variable` 的元组的列表。元组的第一个值为参数,第二个值为该参数的梯度 :ref:`api_guide_Variable` 。 + +返回类型: list[( :ref:`api_guide_Variable` , :ref:`api_guide_Variable` )] + +抛出: + - ``AssertionError`` - 如果 loss 不是 :ref:`api_guide_Variable` 的实例。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + + x = fluid.data(name='x', shape=[None, 13], dtype='int64') + y = fluid.data(name='y', shape=[None, 1], dtype='float32') + x_emb = fluid.embedding(x, size=[100, 256]) + y_predict = fluid.layers.fc(input=x_emb, size=1, act=None, name='my_fc') + loss = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_loss = fluid.layers.mean(loss) + + # 获取main_program中所有weight参数, 不包括bias. + all_weights = [param for param in fluid.default_main_program().block(0).all_parameters() if 'w_' in param.name] + all_weights_name = [w.name for w in all_weights] + + # 若parameter_list为默认值(None), 则返回包含所有param_grad的list + p_g_list1 = fluid.backward.append_backward(loss=avg_loss) + # output: [(embedding_0.w_0, embedding_0.w_0@GRAD), (my_fc.w_0, my_fc.w_0@GRAD), (my_fc.b_0, my_fc.b_0@GRAD)] + + # 返回与传入parameter_list对应的param_grad的list, 传入的parameter_list可以是 param(Variable类型)的list + p_g_list2 = fluid.backward.append_backward(loss=avg_loss, parameter_list=all_weights) + # output: [(embedding_0.w_0, embedding_0.w_0@GRAD), (my_fc.w_0, my_fc.w_0@GRAD)] + + # 传入的parameter_list也可以是值为param.name(str类型)的list + p_g_list3 = fluid.backward.append_backward(loss=avg_loss, parameter_list=all_weights_name) + # output: [(embedding_0.w_0, embedding_0.w_0@GRAD), (my_fc.w_0, my_fc.w_0@GRAD)] + + # no_grad_set可以是set[Variables]类型,表示梯度将在这些Variables处截断 + p_g_list4 = fluid.backward.append_backward(loss=avg_loss, no_grad_set=set([x_emb])) + # output: [(my_fc.w_0, my_fc.w_0@GRAD), (my_fc.b_0, my_fc.b_0@GRAD)] + + # no_grad_set也可以是set[Variable.names]类型。当参数Variable是在layers内部创建,且不方便显式地指定时,可以使用set[Variable.names] + p_g_list5 = fluid.backward.append_backward(loss=avg_loss, no_grad_set=set(['my_fc.b_0'])) + # output: [(embedding_0.w_0, embedding_0.w_0@GRAD), (my_fc.w_0, my_fc.w_0@GRAD)] + + # 返回为[], 因为所有的param_grad均被传入的no_grad_set过滤掉了 + p_g_list6 = fluid.backward.append_backward(loss=avg_loss, parameter_list=all_weights, no_grad_set=set(all_weights)) + + + diff --git a/doc/paddle/api/paddle/fluid/backward/gradients_cn.rst b/doc/paddle/api/paddle/fluid/backward/gradients_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4d1ca3786618ab07386839872a255899091af89c --- /dev/null +++ b/doc/paddle/api/paddle/fluid/backward/gradients_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_fluid_backward_gradients: + +gradients +------------------------------- + + +.. py:function:: paddle.fluid.backward.gradients(targets, inputs, target_gradients=None, no_grad_set=None) + + + + +将目标梯度反向传播到输入。 + +参数: + - **targets** (Variable|list[Variable]) – 目标变量 + - **inputs** (Variable|list[Variable]) – 输入变量 + - **target_gradients** (Variable|list[Variable],可选) – 目标的梯度变量,应与目标变量形状相同;如果设置为None,则以1初始化所有梯度变量 + - **no_grad_set** (set[Variable|str],可选) – 在 `block0` ( :ref:`api_guide_Block` ) 中要忽略梯度的 :ref:`api_guide_Variable` 的名字的集合。所有的 :ref:`api_guide_Block` 中带有 ``stop_gradient = True`` 的所有 :ref:`api_guide_Variable` 的名字都会被自动添加到此集合中。如果该参数不为 ``None``,则会将该参数集合的内容添加到默认的集合中。默认值为 ``None``。 + + +返回:数组,包含与输入对应的梯度。如果一个输入不影响目标函数,则对应的梯度变量为None + +返回类型:(list[Variable]) + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + + x = fluid.data(name='x', shape=[None,2,8,8], dtype='float32') + x.stop_gradient=False + y = fluid.layers.conv2d(x, 4, 1, bias_attr=False) + y = fluid.layers.relu(y) + y = fluid.layers.conv2d(y, 4, 1, bias_attr=False) + y = fluid.layers.relu(y) + z = fluid.gradients([y], x) + print(z) \ No newline at end of file diff --git a/doc/paddle/api/paddle/fluid/clip/ErrorClipByValue_cn.rst b/doc/paddle/api/paddle/fluid/clip/ErrorClipByValue_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c62a90742856f5bd867e7548d432d818105a488e --- /dev/null +++ b/doc/paddle/api/paddle/fluid/clip/ErrorClipByValue_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_fluid_clip_ErrorClipByValue: + +ErrorClipByValue +------------------------------- + +.. py:class:: paddle.fluid.clip.ErrorClipByValue(max, min=None) + + + + +给定一个 Tensor ``t`` (该 Tensor 传入方式见代码示例),对 Tensor 中的元素超出给定最大 ``max`` 和最小界 ``min`` 内区间范围 [min, max] 的元素,重设为所超出界的界值。 + + +- 任何小于min(最小值)的值都被设置为 ``min`` + +- 任何大于max(最大值)的值都被设置为 ``max`` + + +参数: + - **max** (foat) - 要修剪的最大值。 + - **min** (float) - 要修剪的最小值。如果用户没有设置,将被框架默认设置为 ``-max`` 。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + BATCH_SIZE = 128 + CLIP_MAX = 2e-6 + CLIP_MIN = -1e-6 + prog = fluid.framework.Program() + + with fluid.program_guard(main_program=prog): + image = fluid.layers.data(name='x', shape=[784], dtype='float32') + hidden1 = fluid.layers.fc(input=image, size=128, act='relu') + hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu') + predict = fluid.layers.fc(input=hidden2, size=10, act='softmax') + label = fluid.layers.data(name='y', shape=[1], dtype='int64') + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(cost) + prog_clip = prog.clone() + prog_clip.block(0).var(hidden1.name)._set_error_clip( + fluid.clip.ErrorClipByValue(max=CLIP_MAX, min=CLIP_MIN)) + + + + + diff --git a/doc/paddle/api/paddle/fluid/clip/GradientClipByGlobalNorm_cn.rst b/doc/paddle/api/paddle/fluid/clip/GradientClipByGlobalNorm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6a9c4ff2e6b4e442ebc0e159de1bf944b7fceab9 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/clip/GradientClipByGlobalNorm_cn.rst @@ -0,0 +1,104 @@ +.. _cn_api_fluid_clip_GradientClipByGlobalNorm: + +GradientClipByGlobalNorm +------------------------------- + +.. py:class:: paddle.fluid.clip.GradientClipByGlobalNorm(clip_norm, group_name='default_group', need_clip=None) + + + + +将一个 Tensor列表 :math:`t\_list` 中所有Tensor的L2范数之和,限定在 ``clip_norm`` 范围内。 + +- 如果范数之和大于 ``clip_norm`` ,则所有 Tensor 会乘以一个系数进行压缩 + +- 如果范数之和小于或等于 ``clip_norm`` ,则不会进行任何操作。 + +输入的 Tensor列表 不是从该类里传入, 而是默认会选择 ``Program`` 中全部的梯度,如果 ``need_clip`` 不为None,则可以只选择部分参数进行梯度裁剪。 + +该类需要在初始化 ``optimizer`` 时进行设置后才能生效,可参看 ``optimizer`` 文档(例如: :ref:`cn_api_fluid_optimizer_SGDOptimizer` )。 + +裁剪公式如下: + +.. math:: + \\t\_list[i]=t\_list[i]∗\frac{clip\_norm}{max(global\_norm,clip\_norm)}\\ + +其中: + +.. math:: + \\global\_norm=\sqrt{\sum_{i=0}^{n-1}(l2norm(t\_list[i]))^2}\\ + + +参数: + - **clip_norm** (float) - 所允许的范数最大值 + - **group_name** (str, optional) - 剪切的组名 + - **need_clip** (function, optional) - 类型: 函数。用于指定需要梯度裁剪的参数,该函数接收一个 ``Parameter`` ,返回一个 ``bool`` (True表示需要裁剪,False不需要裁剪)。默认为None,此时会裁剪网络中全部参数。 + +**代码示例1:静态图** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + main_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard( + main_program=main_prog, startup_program=startup_prog): + image = fluid.data( + name='x', shape=[-1, 2], dtype='float32') + predict = fluid.layers.fc(input=image, size=3, act='relu') #Trainable parameters: fc_0.w.0, fc_0.b.0 + loss = fluid.layers.mean(predict) + + # 裁剪网络中全部参数: + clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0) + + # 仅裁剪参数fc_0.w_0时: + # 为need_clip参数传入一个函数fileter_func,fileter_func接收参数的类型为Parameter,返回类型为bool + # def fileter_func(Parameter): + # # 可以较为方便的通过Parameter.name判断(name可以在fluid.ParamAttr中设置,默认为fc_0.w_0、fc_0.b_0) + # return Parameter.name=="fc_0.w_0" + # clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0, need_clip=fileter_func) + + sgd_optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.1, grad_clip=clip) + sgd_optimizer.minimize(loss) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + x = np.random.uniform(-100, 100, (10, 2)).astype('float32') + exe.run(startup_prog) + out = exe.run(main_prog, feed={'x': x}, fetch_list=loss) + + +**代码示例2:动态图** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.dygraph.Linear(10, 10) #可训练参数: linear_0.w.0, linear_0.b.0 + inputs = fluid.layers.uniform_random([32, 10]).astype('float32') + out = linear(fluid.dygraph.to_variable(inputs)) + loss = fluid.layers.reduce_mean(out) + loss.backward() + + # 裁剪网络中全部参数: + clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0) + + # 仅裁剪参数linear_0.w_0时: + # 为need_clip参数传入一个函数fileter_func,fileter_func接收参数的类型为ParamBase,返回类型为bool + # def fileter_func(ParamBase): + # # 可以通过ParamBase.name判断(name可以在fluid.ParamAttr中设置,默认为linear_0.w_0、linear_0.b_0) + # return ParamBase.name == "linear_0.w_0" + # # 注:linear.weight、linear.bias能分别返回dygraph.Linear层的权重与偏差,也可以此来判断 + # return ParamBase.name == linear.weight.name + # clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0, need_clip=fileter_func) + + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=0.1, + parameter_list=linear.parameters(), + grad_clip=clip) + sgd_optimizer.minimize(loss) diff --git a/doc/paddle/api/paddle/fluid/clip/GradientClipByNorm_cn.rst b/doc/paddle/api/paddle/fluid/clip/GradientClipByNorm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..541663965eb95609d2417ff2f4219f2317de8f71 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/clip/GradientClipByNorm_cn.rst @@ -0,0 +1,108 @@ +.. _cn_api_fluid_clip_GradientClipByNorm: + +GradientClipByNorm +------------------------------- + +.. py:class:: paddle.fluid.clip.GradientClipByNorm(clip_norm, need_clip=None) + + + + +将输入的多维Tensor :math:`X` 的L2范数限制在 ``clip_norm`` 范围之内。 + +- 如果L2范数大于 ``clip_norm`` ,则该 Tensor 会乘以一个系数进行压缩 + +- 如果L2范数小于或等于 ``clip_norm`` ,则不会进行任何操作。 + +输入的 Tensor 不是从该类里传入, 而是默认会选择 ``Program`` 中全部的梯度,如果 ``need_clip`` 不为None,则可以只选择部分参数进行梯度裁剪。 + +该类需要在初始化 ``optimizer`` 时进行设置后才能生效,可参看 ``optimizer`` 文档(例如: :ref:`cn_api_fluid_optimizer_SGDOptimizer` )。 + +裁剪公式如下: + +.. math:: + + Out= + \left\{ + \begin{aligned} + & X & & if (norm(X) \leq clip\_norm)\\ + & \frac{clip\_norm∗X}{norm(X)} & & if (norm(X) > clip\_norm) \\ + \end{aligned} + \right. + + +其中 :math:`norm(X)` 代表 :math:`X` 的L2范数 + +.. math:: + \\norm(X) = (\sum_{i=1}^{n}|x_i|^2)^{\frac{1}{2}}\\ + +参数: + - **clip_norm** (float) - 所允许的二范数最大值。 + - **need_clip** (function, optional) - 类型: 函数。用于指定需要梯度裁剪的参数,该函数接收一个 ``Parameter`` ,返回一个 ``bool`` (True表示需要裁剪,False不需要裁剪)。默认为None,此时会裁剪网络中全部参数。 + +**代码示例1:静态图** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + main_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard( + main_program=main_prog, startup_program=startup_prog): + image = fluid.data( + name='x', shape=[-1, 2], dtype='float32') + predict = fluid.layers.fc(input=image, size=3, act='relu') #可训练参数: fc_0.w.0, fc_0.b.0 + loss = fluid.layers.mean(predict) + + # 裁剪网络中全部参数: + clip = fluid.clip.GradientClipByNorm(clip_norm=1.0) + + # 仅裁剪参数fc_0.w_0时: + # 为need_clip参数传入一个函数fileter_func,fileter_func接收参数的类型为Parameter,返回类型为bool + # def fileter_func(Parameter): + # # 可以较为方便的通过Parameter.name判断(name可以在fluid.ParamAttr中设置,默认为fc_0.w_0、fc_0.b_0) + # return Parameter.name=="fc_0.w_0" + # clip = fluid.clip.GradientClipByNorm(clip_norm=1.0, need_clip=fileter_func) + + sgd_optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.1, grad_clip=clip) + sgd_optimizer.minimize(loss) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + x = np.random.uniform(-100, 100, (10, 2)).astype('float32') + exe.run(startup_prog) + out = exe.run(main_prog, feed={'x': x}, fetch_list=loss) + + +**代码示例2:动态图** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.dygraph.Linear(10, 10) #可训练参数: linear_0.w.0, linear_0.b.0 + inputs = fluid.layers.uniform_random([32, 10]).astype('float32') + out = linear(fluid.dygraph.to_variable(inputs)) + loss = fluid.layers.reduce_mean(out) + loss.backward() + + # 裁剪网络中全部参数: + clip = fluid.clip.GradientClipByNorm(clip_norm=1.0) + + # 仅裁剪参数linear_0.w_0时: + # 为need_clip参数传入一个函数fileter_func,fileter_func接收参数的类型为ParamBase,返回类型为bool + # def fileter_func(ParamBase): + # # 可以通过ParamBase.name判断(name可以在fluid.ParamAttr中设置,默认为linear_0.w_0、linear_0.b_0) + # return ParamBase.name == "linear_0.w_0" + # # 注:linear.weight、linear.bias能分别返回dygraph.Linear层的权重与偏差,也可以此来判断 + # return ParamBase.name == linear.weight.name + # clip = fluid.clip.GradientClipByNorm(clip_norm=1.0, need_clip=fileter_func) + + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=0.1, parameter_list=linear.parameters(), grad_clip=clip) + sgd_optimizer.minimize(loss) \ No newline at end of file diff --git a/doc/paddle/api/paddle/fluid/clip/GradientClipByValue_cn.rst b/doc/paddle/api/paddle/fluid/clip/GradientClipByValue_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d2d1482d9af9d59591887768c3715a3fe090eecc --- /dev/null +++ b/doc/paddle/api/paddle/fluid/clip/GradientClipByValue_cn.rst @@ -0,0 +1,97 @@ +.. _cn_api_fluid_clip_GradientClipByValue: + +GradientClipByValue +------------------------------- + +.. py:class:: paddle.fluid.clip.GradientClipByValue(max, min=None, need_clip=None) + + + + + +将输入的多维Tensor :math:`X` 的值限制在 [min, max] 范围。 + +输入的 Tensor 不是从该类里传入, 而是默认会选择 ``Program`` 中全部的梯度,如果 ``need_clip`` 不为None,则可以只选择部分参数进行梯度裁剪。 + +该类需要在初始化 ``optimizer`` 时进行设置后才能生效,可参看 ``optimizer`` 文档(例如: :ref:`cn_api_fluid_optimizer_SGDOptimizer` )。 + +给定一个 Tensor ``t`` ,该操作将它的值压缩到 ``min`` 和 ``max`` 之间 + +- 任何小于 ``min`` 的值都被设置为 ``min`` + +- 任何大于 ``max`` 的值都被设置为 ``max`` + +参数: + - **max** (foat) - 要修剪的最大值。 + - **min** (float,optional) - 要修剪的最小值。如果用户没有设置,将被自动设置为 ``-max`` (此时 ``max`` 必须大于0)。 + - **need_clip** (function, optional) - 类型: 函数。用于指定需要梯度裁剪的参数,该函数接收一个 ``Parameter`` ,返回一个 ``bool`` (True表示需要裁剪,False不需要裁剪)。默认为None,此时会裁剪网络中全部参数。 + +**代码示例1:静态图** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + main_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard( + main_program=main_prog, startup_program=startup_prog): + image = fluid.data( + name='x', shape=[-1, 2], dtype='float32') + predict = fluid.layers.fc(input=image, size=3, act='relu') #可训练参数: fc_0.w.0, fc_0.b.0 + loss = fluid.layers.mean(predict) + + # 裁剪网络中全部参数: + clip = fluid.clip.GradientClipByValue(min=-1, max=1) + + # 仅裁剪参数fc_0.w_0时: + # 为need_clip参数传入一个函数fileter_func,fileter_func接收参数的类型为Parameter,返回类型为bool + # def fileter_func(Parameter): + # # 可以较为方便的通过Parameter.name判断(name可以在fluid.ParamAttr中设置,默认为fc_0.w_0、fc_0.b_0) + # return Parameter.name=="fc_0.w_0" + # clip = fluid.clip.GradientClipByValue(min=-1, max=1, need_clip=fileter_func) + + sgd_optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.1, grad_clip=clip) + sgd_optimizer.minimize(loss) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + x = np.random.uniform(-100, 100, (10, 2)).astype('float32') + exe.run(startup_prog) + out = exe.run(main_prog, feed={'x': x}, fetch_list=loss) + + +**代码示例2:动态图** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.dygraph.Linear(10, 10) #可训练参数: linear_0.w.0, linear_0.b.0 + inputs = fluid.layers.uniform_random([32, 10]).astype('float32') + out = linear(fluid.dygraph.to_variable(inputs)) + loss = fluid.layers.reduce_mean(out) + loss.backward() + + # 裁剪网络中全部参数: + clip = fluid.clip.GradientClipByValue(min=-1, max=1) + + # 仅裁剪参数linear_0.w_0时: + # 为need_clip参数传入一个函数fileter_func,fileter_func接收参数的类型为ParamBase,返回类型为bool + # def fileter_func(ParamBase): + # # 可以通过ParamBase.name判断(name可以在fluid.ParamAttr中设置,默认为linear_0.w_0、linear_0.b_0) + # return ParamBase.name == "linear_0.w_0" + # # 注:linear.weight、linear.bias能分别返回dygraph.Linear层的权重与偏差,可以此来判断 + # return ParamBase.name == linear.weight.name + # clip = fluid.clip.GradientClipByValue(min=-1, max=1, need_clip=fileter_func) + + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=0.1, parameter_list=linear.parameters(), grad_clip=clip) + sgd_optimizer.minimize(loss) + + + diff --git a/doc/paddle/api/paddle/fluid/clip/set_gradient_clip_cn.rst b/doc/paddle/api/paddle/fluid/clip/set_gradient_clip_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c677a992c46f6686c66af051e6a7abe00fe9e758 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/clip/set_gradient_clip_cn.rst @@ -0,0 +1,83 @@ +.. _cn_api_fluid_clip_set_gradient_clip: + +set_gradient_clip +------------------------------- + + +.. py:function:: paddle.fluid.clip.set_gradient_clip(clip, param_list=None, program=None) + + + + +.. warning:: + 此API对位置使用的要求较高,其必须位于组建网络之后, ``minimize`` 之前,因此在未来版本中可能被删除,故不推荐使用。推荐在 ``optimizer`` 初始化时设置梯度裁剪。 + 有三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 如果在 ``optimizer`` 中设置过梯度裁剪,又使用了 ``set_gradient_clip`` ,``set_gradient_clip`` 将不会生效。 + +给指定参数做梯度裁剪。 + +参数: + - **clip** (GradientClipBase) - 梯度裁剪的策略,如 :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 等,用于描述具体的裁剪方法和属性。 + - **param_list** (list(Variable),可选) - 需要裁剪的参数列表,可以是参数或参数名称列表。默认值为None,表示裁剪 ``program`` 中的所有参数。 + - **program** (Program,可选) - 参数所在的Program。默认值为None,表示使用 :ref:`cn_api_fluid_default_main_program` 。 + +返回: 无。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + def network(): + image = fluid.layers.data(name='image', shape=[28], dtype='float32') + param_attr1 = fluid.ParamAttr("fc1_param") + fc1 = fluid.layers.fc(image, size=10, param_attr=param_attr1) + param_attr2 = fluid.ParamAttr("fc2_param") + fc2 = fluid.layers.fc(fc1, size=10, param_attr=param_attr2) + loss = fluid.layers.reduce_mean(fc2) + return loss + + + # network 1: clip all parameter gradient + with fluid.program_guard(fluid.Program(), fluid.Program()): + loss = network() + fluid.clip.set_gradient_clip( + fluid.clip.GradientClipByGlobalNorm(clip_norm=2.0)) + sgd = fluid.optimizer.SGD(learning_rate=1e-3) + sgd.minimize(loss) + + # network 2: clip parameter gradient by name + with fluid.program_guard(fluid.Program(), fluid.Program()): + loss = network() + fluid.clip.set_gradient_clip( + fluid.clip.GradientClipByValue(min=-1.0, max=1.0), + param_list=["fc1_param", "fc2_param"]) + sgd = fluid.optimizer.SGD(learning_rate=1e-3) + sgd.minimize(loss) + + # network 3: clip parameter gradient by var + with fluid.program_guard(fluid.Program(), fluid.Program()): + loss = network() + param_var1 = fluid.default_main_program().global_block().var("fc1_param") + param_var2 = fluid.default_main_program().global_block().var("fc2_param") + fluid.clip.set_gradient_clip( + fluid.clip.GradientClipByValue(min=-1.0, max=1.0), + param_list=[param_var1, param_var2]) + sgd = fluid.optimizer.SGD(learning_rate=1e-3) + sgd.minimize(loss) + + # network 4: use set_gradient_clip and minimize(grad_clip=clip) together + with fluid.program_guard(fluid.Program(), fluid.Program()): + loss = network() + param_var1 = fluid.default_main_program().global_block().var("fc1_param") + param_var2 = fluid.default_main_program().global_block().var("fc2_param") + clip1 = fluid.clip.GradientClipByValue(min=-1.0, max=1.0) + clip2 = fluid.clip.GradientClipByNorm(clip_norm=1.0) + # 设置梯度裁剪策略:clip1 + fluid.clip.set_gradient_clip(clip1) + + # 设置梯度裁剪策略:clip2 + sgd = fluid.optimizer.SGD(learning_rate=1e-3, grad_clip=clip2) + sgd.minimize(loss) + # 有设置冲突时,set_gradient_clip将不会生效,将以clip2的策略进行梯度裁剪 diff --git a/doc/paddle/api/paddle/fluid/compiler/BuildStrategy_cn.rst b/doc/paddle/api/paddle/fluid/compiler/BuildStrategy_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..569baf4050b2af2f8d9f41c65108cdfbf38a4a70 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/compiler/BuildStrategy_cn.rst @@ -0,0 +1,197 @@ +.. _cn_api_fluid_BuildStrategy: + +BuildStrategy +------------------------------- + + +.. py:class:: paddle.fluid.BuildStrategy + + + + +``BuildStrategy`` 使用户更方便地控制 :ref:`cn_api_fluid_ParallelExecutor` 中计算图的建造方法,可通过设置 ``ParallelExecutor`` 中的 ``BuildStrategy`` 成员来实现此功能。 + +**代码示例** + +.. code-block:: python + + import os + import numpy as np + import paddle.fluid as fluid + + os.environ["CPU_NUM"] = '2' + places = fluid.cpu_places() + + data = fluid.layers.data(name="x", shape=[1], dtype="float32") + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) + + build_strategy = fluid.BuildStrategy() + build_strategy.enable_inplace = True + build_strategy.memory_optimize = True + build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce + program = fluid.compiler.CompiledProgram(fluid.default_main_program()) + program = program.with_data_parallel(loss_name=loss.name, + build_strategy=build_strategy, + places=places) + + +.. py:attribute:: debug_graphviz_path + +str类型。表示以graphviz格式向文件中写入计算图的路径,有利于调试。默认值为空字符串。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + build_strategy = fluid.BuildStrategy() + build_strategy.debug_graphviz_path = "./graph" + + +.. py:attribute:: enable_sequential_execution + +bool类型。如果设置为True,则算子的执行顺序将与算子定义的执行顺序相同。默认为False。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + build_strategy = fluid.BuildStrategy() + build_strategy.enable_sequential_execution = True + + +.. py:attribute:: fuse_broadcast_ops + +bool类型。表明是否融合(fuse) broadcast ops。该选项指在Reduce模式下有效,使程序运行更快。默认为False。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + build_strategy = fluid.BuildStrategy() + build_strategy.fuse_broadcast_ops = True + + +.. py:attribute:: fuse_elewise_add_act_ops + +bool类型。表明是否融合(fuse) elementwise_add_op和activation_op。这会使整体执行过程更快。默认为False。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + build_strategy = fluid.BuildStrategy() + build_strategy.fuse_elewise_add_act_ops = True + + +.. py:attribute:: fuse_relu_depthwise_conv + +bool类型。表明是否融合(fuse) relu和depthwise_conv2d,节省GPU内存并可能加速执行过程。此选项仅适用于GPU设备。默认为False。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + build_strategy = fluid.BuildStrategy() + build_strategy.fuse_relu_depthwise_conv = True + +.. py:attribute:: gradient_scale_strategy + +``fluid.BuildStrategy.GradientScaleStrategy`` 类型。在 ``ParallelExecutor`` 中,存在三种定义loss对应梯度( *loss@grad* )的方式,分别为 ``CoeffNumDevice``, ``One`` 与 ``Customized``。默认情况下, ``ParallelExecutor`` 根据设备数目来设置 *loss@grad* 。如果用户需要自定义 *loss@grad* ,可以选择 ``Customized`` 方法。默认为 ``CoeffNumDevice`` 。 + +**代码示例** + +.. code-block:: python + + import os + import numpy as np + import paddle.fluid as fluid + import paddle.fluid.compiler as compiler + + use_cuda = True + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + + # NOTE: 如果你使用CPU计算,需要指定CPU_NUM, 否则,fluid + # 将使用所有的核的数目作为CPU_NUM, + # 这种情况下,输入的batch size应该大于CPU_NUM, 否则, + # 进程将会因为异常而失败。 + if not use_cuda: + os.environ['CPU_NUM'] = str(2) + places = fluid.cpu_places() + else: + places = places = fluid.cuda_places() + + data = fluid.layers.data(name='X', shape=[1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) + + fluid.default_startup_program().random_seed=1 + exe.run(fluid.default_startup_program()) + + build_strategy = fluid.BuildStrategy() + build_strategy.gradient_scale_strategy = \ + fluid.BuildStrategy.GradientScaleStrategy.Customized + compiled_prog = compiler.CompiledProgram( + fluid.default_main_program()).with_data_parallel( + loss_name=loss.name, build_strategy=build_strategy, + places = places) + + dev_count = len(places) + x = np.random.random(size=(10, 1)).astype('float32') + loss_grad = np.ones((dev_count)).astype("float32") * 0.01 + loss_grad_name = loss.name+"@GRAD" + loss_data = exe.run(compiled_prog, + feed={"X": x, loss_grad_name : loss_grad}, + fetch_list=[loss.name, loss_grad_name]) + +.. py:attribute:: memory_optimize + +bool类型或None。设为True时可用于减少总内存消耗,False表示不使用,None表示框架会自动选择使用或者不使用优化策略。当前,None意味着当GC不能使用时,优化策略将被使用。默认为None。 + +.. py:attribute:: reduce_strategy + +``fluid.BuildStrategy.ReduceStrategy`` 类型。在 ``ParallelExecutor`` 中,存在两种参数梯度聚合策略,即 ``AllReduce`` 和 ``Reduce`` 。如果用户需要在所有执行设备上独立地进行参数更新,可以使用 ``AllReduce`` 。如果使用 ``Reduce`` 策略,所有参数的优化将均匀地分配给不同的执行设备,随之将优化后的参数广播给其他执行设备。 +默认值为 ``AllReduce`` 。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + build_strategy = fluid.BuildStrategy() + build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce + +.. py:attribute:: remove_unnecessary_lock + +bool类型。设置True会去除GPU操作中的一些锁操作, ``ParallelExecutor`` 将运行得更快,默认为True。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + build_strategy = fluid.BuildStrategy() + build_strategy.remove_unnecessary_lock = True + + +.. py:attribute:: sync_batch_norm + +bool类型。表示是否使用同步的批正则化,即在训练阶段通过多个设备同步均值和方差。当前的实现不支持FP16训练和CPU。并且目前**仅支持**仅在一台机器上进行同步式批正则。默认为 False。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + build_strategy = fluid.BuildStrategy() + build_strategy.sync_batch_norm = True + + diff --git a/doc/paddle/api/paddle/fluid/compiler/CompiledProgram_cn.rst b/doc/paddle/api/paddle/fluid/compiler/CompiledProgram_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5e1fa7e7150e0046700570f66d400ca240260cb4 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/compiler/CompiledProgram_cn.rst @@ -0,0 +1,113 @@ +.. _cn_api_fluid_CompiledProgram: + +CompiledProgram +------------------------------- + + +.. py:class:: paddle.fluid.CompiledProgram(program_or_graph, build_strategy=None) + + + + +CompiledProgram根据 `build_strategy` 的配置将输入的Program或Graph进行转换和优化,例如:计算图中算子融合、计算图执行过程中开启内存/显存优化等,关于build_strategy更多信息。请参阅 ``fluid.BuildStrategy`` 。 + +参数: + - **program_or_graph** (Graph|Program): 该参数为被执行的Program或Graph。 + - **build_strategy** (BuildStrategy): 通过配置build_strategy,对计算图进行转换和优化,例如:计算图中算子融合、计算图执行过程中开启内存/显存优化等。关于build_strategy更多信息,请参阅 ``fluid.BuildStrategy`` 。 默认为None。 + +返回:初始化后的 ``CompiledProgram`` 对象 + +返回类型:CompiledProgram + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + + place = fluid.CUDAPlace(0) # fluid.CPUPlace() + exe = fluid.Executor(place) + + data = fluid.data(name='X', shape=[None, 1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) + + exe.run(fluid.default_startup_program()) + compiled_prog = fluid.CompiledProgram( + fluid.default_main_program()) + + x = numpy.random.random(size=(10, 1)).astype('float32') + loss_data, = exe.run(compiled_prog, + feed={"X": x}, + fetch_list=[loss.name]) + + +.. py:method:: with_data_parallel(loss_name=None, build_strategy=None, exec_strategy=None, share_vars_from=None, places=None) + +该接口用于将输入的Program或Graph进行转换,以便通过数据并行模式运行该模型。用户可以通过 `build_strategy` 和 `exec_strategy` 设置计算图构建和计算图执行过程中可以进行的一些优化,例如:将梯度聚合的AllReduce操作进行融合、指定计算图运行过程中使用的线程池大小等。**注意:如果在构建CompiledProgram和调用with_data_parallel时都指定了build_strategy,在CompiledProgram中的build_strategy会被复写,因此,如果是数据并行训练,建议在调用with_data_parallel接口时设置build_strategy**。 + +参数: + - **loss_name** (str) - 该参数为模型最后得到的损失变量的名字,**注意:如果是模型训练,必须设置loss_name,否则计算结果可能会有问题。** 默认为:None。 + - **build_strategy** (BuildStrategy): 通过配置build_strategy,对计算图进行转换和优化,例如:计算图中算子融合、计算图执行过程中开启内存/显存优化等。关于build_strategy更多的信息,请参阅 ``fluid.BuildStrategy`` 。 默认为:None。 + - **exec_strategy** (ExecutionStrategy) - 通过exec_strategy指定执行计算图过程可以调整的选项,例如线程池大小等。 关于exec_strategy更多信息,请参阅 ``fluid.ExecutionStrategy`` 。 默认为:None。 + - **share_vars_from** (CompiledProgram) - 如果设置了share_vars_from,当前的CompiledProgram将与share_vars_from指定的CompiledProgram共享参数值。需要设置该参数的情况:模型训练过程中需要进行模型测试,并且训练和测试都是采用数据并行模式,那么测试对应的CompiledProgram在调用with_data_parallel时,需要将share_vars_from设置为训练对应的CompiledProgram。由于CompiledProgram只有在第一次执行时才会将变量分发到其他设备上,因此share_vars_from指定的CompiledProgram必须在当前CompiledProgram之前运行。默认为:None。 + - **places** (list(CUDAPlace)|list(CPUPlace)) - 该参数指定模型运行所在的设备。如果希望在GPU0和GPU1上运行,places为[fluid.CUDAPlace(0), fluid.CUDAPlace(1)];如果希望使用2个CPU运行,places为[fluid.CPUPlace()] * 2。 如果没有设置该参数,即该参数为None,模型执行时,将从环境变量中获取可用的设备:如果使用GPU,模型执行时,从环境变量FLAGS_selected_gpus或CUDA_VISIBLE_DEVICES中获取当前可用的设备ID;如果使用CPU,模型执行时,从环境变量CPU_NUM中获取当前可利用的CPU个数。例如:export CPU_NUM=4,如果没有设置该环境变量,执行器会在环境变量中添加该变量,并将其值设为1。默认为:None。 + +返回:配置之后的 ``CompiledProgram`` 对象 + +返回类型:CompiledProgram + +.. note:: + 1. 如果只是进行多卡测试,不需要设置loss_name以及share_vars_from。 + 2. 如果程序中既有模型训练又有模型测试,则构建模型测试所对应的CompiledProgram时必须设置share_vars_from,否则模型测试和模型训练所使用的参数是不一致。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + import os + + use_cuda = True + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + parallel_places = [fluid.CUDAPlace(0), fluid.CUDAPlace(1)] if use_cuda else [fluid.CPUPlace()] * 2 + + # 注意:如果你使用CPU运行程序,需要具体设置CPU_NUM, + # 否则fluid会把逻辑核的所有数目设为CPU_NUM, + # 在这种情况下,输入的batch size应大于CPU_NUM, + # 否则程序会异常中断。 + if not use_cuda: + os.environ['CPU_NUM'] = str(2) + + exe = fluid.Executor(place) + + data = fluid.data(name='X', shape=[None, 1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + + test_program = fluid.default_main_program().clone(for_test=True) + fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) + + exe.run(fluid.default_startup_program()) + compiled_train_prog = fluid.CompiledProgram( + fluid.default_main_program()).with_data_parallel( + loss_name=loss.name, places=parallel_places) + # 注意:如果此处不设置share_vars_from=compiled_train_prog, + # 测试过程中用的参数与训练使用的参数是不一致 + compiled_test_prog = fluid.CompiledProgram( + test_program).with_data_parallel( + share_vars_from=compiled_train_prog, + places=parallel_places) + + train_data = numpy.random.random(size=(10, 1)).astype('float32') + loss_data, = exe.run(compiled_train_prog, + feed={"X": train_data}, + fetch_list=[loss.name]) + test_data = numpy.random.random(size=(10, 1)).astype('float32') + loss_data, = exe.run(compiled_test_prog, + feed={"X": test_data}, + fetch_list=[loss.name]) \ No newline at end of file diff --git a/doc/paddle/api/paddle/fluid/compiler/ExecutionStrategy_cn.rst b/doc/paddle/api/paddle/fluid/compiler/ExecutionStrategy_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8101ad7100aae53aabc229470ec3dca5cda3ccec --- /dev/null +++ b/doc/paddle/api/paddle/fluid/compiler/ExecutionStrategy_cn.rst @@ -0,0 +1,64 @@ +.. _cn_api_fluid_ExecutionStrategy: + +ExecutionStrategy +------------------------------- + + +.. py:class:: paddle.fluid.ExecutionStrategy + + + + +通过设置 ``ExecutionStrategy`` 中的选项,用户可以对执行器的执行配置进行调整,比如设置执行器中线程池的大小等。 + +返回:初始化后的ExecutionStrategy的实例 + +返回类型:ExecutionStrategy + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.data(name='x', shape=[13], dtype='float32') + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y_predict = fluid.layers.fc(input=x, size=1, act=None) + + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_loss = fluid.layers.mean(cost) + + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) + sgd_optimizer.minimize(avg_loss) + + exec_strategy = fluid.ExecutionStrategy() + exec_strategy.num_threads = 4 + + train_exe = fluid.ParallelExecutor(use_cuda=False, + loss_name=avg_loss.name, + exec_strategy=exec_strategy) + + +.. py:attribute:: num_iteration_per_drop_scope + +int型成员。该选项表示间隔多少次迭代之后清理一次临时变量。模型运行过程中,生成的中间临时变量将被放到local execution scope中,为了避免对临时变量频繁的申请与释放,通常将其设为较大的值(比如10或者100)。默认值为100。 + + +.. py:attribute:: num_iteration_per_run + +int型成员。它配置了当用户在python脚本中调用pe.run()时执行器会执行的迭代次数。Executor每次调用,会进行num_iteration_per_run次训练,它会使整体执行过程更快。 + +.. py:attribute:: num_threads + +int型成员。该选项表示当前 ``Executor`` 的线程池(thread pool)的大小, 此线程池可用来并发执行program中的operator(算子,运算)。如果 :math:`num\_threads=1` ,则所有的operator将一个接一个地执行,但在不同的program重复周期(iterations)中执行顺序可能不同。如果该选项没有被设置,则在 ``Executor`` 中,它会依据设备类型(device type)、设备数目(device count)而设置为相应值。对GPU,:math:`num\_threads=device\_count∗4` ;对CPU, :math:`num\_threads=CPU\_NUM∗4` 。在 ``Executor`` 中有关于 :math:`CPU\_NUM` 的详细解释。如果没有设置 :math:`CPU\_NUM` ,则设置默认值为1, 并提示用户进行 :math:`CPU\_NUM` 的设置。 + + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/contrib/BeamSearchDecoder_cn.rst b/doc/paddle/api/paddle/fluid/contrib/BeamSearchDecoder_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0aeb28a6ffe61eca841bd6b8ed6a13d6f2bba537 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/contrib/BeamSearchDecoder_cn.rst @@ -0,0 +1,168 @@ +.. _cn_api_fluid_layers_BeamSearchDecoder: + +BeamSearchDecoder +------------------------------- + + + +.. py:class:: paddle.fluid.layers.BeamSearchDecoder(cell, start_token, end_token, beam_size, embedding_fn=None, output_fn=None) + + + + +带beam search解码策略的解码器。该接口包装一个cell来计算概率,然后执行一个beam search步骤计算得分,并为每个解码步骤选择候选输出。更多详细信息请参阅 `Beam search `_ + +**注意** 在使用beam search解码时,cell的输入和状态将被扩展到 :math:`beam\_size` ,得到 :math:`[batch\_size * beam\_size, ...]` 一样的形状,这个操作在BeamSearchDecoder中自动完成,因此,其他任何在 :code:`cell.call` 中使用的tensor,如果形状为 :math:`[batch\_size, ...]` ,都必须先手动使用 :code:`BeamSearchDecoder.tile_beam_merge_with_batch` 接口扩展。最常见的情况是带注意机制的编码器输出。 + +参数: + - **cell** (RNNCell) - RNNCell的实例或者具有相同接口定义的对象。 + - **start_token** (int) - 起始标记id。 + - **end_token** (int) - 结束标记id。 + - **beam_size** (int) - 在beam search中使用的beam宽度。 + - **embedding_fn** (可选) - 处理选中的候选id的接口。它通常是一个将词id转换为词嵌入的嵌入层,其返回值将作为 :code:`cell.call` 接口的 :code:`input` 参数。**注意** ,这里要使用 :ref:`cn_api_fluid_embedding` 而非 :ref:`cn_api_fluid_layers_embedding`,因为选中的id的形状是 :math:`[batch\_size, beam\_size]` ,如果使用后者则还需要在这里提供unsqueeze。如果 :code:`embedding_fn` 未提供,则必须在 :code:`cell.call` 中实现词嵌入转换。默认值None。 + - **output_fn** (可选) - 处理cell输出的接口,在计算得分和选择候选标记id之前使用。默认值None。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.layers import GRUCell, BeamSearchDecoder + trg_embeder = lambda x: fluid.embedding( + x, size=[10000, 128], param_attr=fluid.ParamAttr(name="trg_embedding")) + output_layer = lambda x: layers.fc(x, + size=10000, + num_flatten_dims=len(x.shape) - 1, + param_attr=fluid.ParamAttr(name= + "output_w"), + bias_attr=False) + decoder_cell = GRUCell(hidden_size=128) + decoder = BeamSearchDecoder(decoder_cell, + start_token=0, + end_token=1, + beam_size=4, + embedding_fn=trg_embeder, + output_fn=output_layer) + + +.. py:method:: tile_beam_merge_with_batch(x, beam_size) + +扩展tensor的batch维度。此函数的输入是形状为 :math:`[batch\_size, s_0, s_1, ...]` 的tensor t,由minibatch中的样本 :math:`t[0], ..., t[batch\_size - 1]` 组成。将其扩展为形状是 :math:`[batch\_size * beam\_size, s_0, s_1, ...]` 的tensor,由 :math:`t[0], t[0], ..., t[1], t[1], ...` 组成, 每个minibatch中的样本重复 :math:`beam\_size` 次。 + +参数: + - **x** (Variable) - 形状为 :math:`[batch\_size, ...]` 的tenosr。数据类型应为float32,float64,int32,int64或bool。 + - **beam_size** (int) - 在beam search中使用的beam宽度。 + +返回:形状为 :math:`[batch\_size * beam\_size, ...]` 的tensor,其数据类型与 :code:`x` 相同。 + +返回类型:Variable + +.. py:method:: _split_batch_beams(x) + +将形状为 :math:`[batch\_size * beam\_size, ...]` 的tensor变换为形状为 :math:`[batch\_size, beam\_size, ...]` 的新tensor。 + +参数: + - **x** (Variable) - 形状为 :math:`[batch\_size * beam\_size, ...]` 的tenosr。数据类型应为float32,float64,int32,int64或bool。 + +返回:形状为 :math:`[batch\_size, beam\_size, ...]` 的tensor,其数据类型与 :code:`x` 相同。 + +返回类型:Variable + +.. py:method:: _merge_batch_beams(x) + +将形状为 :math:`[batch\_size, beam\_size, ...]` 的tensor变换为形状为 :math:`[batch\_size * beam\_size,...]` 的新tensor。 + +参数: + - **x** (Variable) - 形状为 :math:`[batch\_size, beam_size,...]` 的tenosr。数据类型应为float32,float64,int32,int64或bool。 + +返回:形状为 :math:`[batch\_size * beam\_size, ...]` 的tensor,其数据类型与 :code:`x` 相同。 + +返回类型:Variable + +.. py:method:: _expand_to_beam_size(x) + +此函数输入形状为 :math:`[batch\_size,s_0,s_1,...]` 的tensor t,由minibatch中的样本 :math:`t[0],...,t[batch\_size-1]` 组成。将其扩展为形状 :math:`[ batch\_size,beam\_size,s_0,s_1,...]` 的tensor,由 :math:`t[0],t[0],...,t[1],t[1],...` 组成,其中每个minibatch中的样本重复 :math:`beam\_size` 次。 + +参数: + - **x** (Variable) - 形状为 :math:`[batch\_size, ...]` 的tenosr。数据类型应为float32,float64,int32,int64或bool。 + +返回:具有与 :code:`x` 相同的形状和数据类型的tensor,其中未完成的beam保持不变,而已完成的beam被替换成特殊的tensor(tensor中所有概率质量被分配给EOS标记)。 + +返回类型:Variable + +.. py:method:: _mask_probs(probs, finished) + +屏蔽对数概率。该函数使已完成的beam将所有概率质量分配给EOS标记,而未完成的beam保持不变。 + +参数: + - **probs** (Variable) - 形状为 :math:`[batch\_size,beam\_size,vocab\_size]` 的tensor,表示对数概率。其数据类型应为float32。 + - **finish** (Variable) - 形状为 :math:`[batch\_size,beam\_size]` 的tensor,表示所有beam的完成状态。其数据类型应为bool。 + +返回:具有与 :code:`x` 相同的形状和数据类型的tensor,其中未完成的beam保持不变,而已完成的beam被替换成特殊的tensor(tensor中所有概率质量被分配给EOS标记)。 + +返回类型:Variable + +.. py:method:: _gather(x, indices, batch_size) + +对tensor :code:`x` 根据索引 :code:`indices` 收集。 + +参数: + - **x** (Variable) - 形状为 :math:`[batch\_size, beam\_size,...]` 的tensor。 + - **index** (Variable) - 一个形状为 :math:`[batch\_size, beam\_size]` 的int64 tensor,表示我们用来收集的索引。 + - **batch_size** (Variable) - 形状为 :math:`[1]` 的tensor。其数据类型应为int32或int64。 + +返回:具有与 :code:``x` 相同的形状和数据类型的tensor,表示收集后的tensor。 + +返回类型:Variable + +.. py:method:: initialize(initial_cell_states) + +初始化BeamSearchDecoder。 + +参数: + - **initial_cell_states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。调用者提供的参数。 + +返回:一个元组 :code:`(initial_inputs, initial_states, finished)`。:code:`initial_inputs` 是一个tensor,当 :code:`embedding_fn` 为None时,该tensor t的形状为 :math:`[batch\_size,beam\_size]` ,值为 :code:`start_token` ;否则使用 :code:`embedding_fn(t)` 返回的值。:code:`initial_states` 是tensor变量的嵌套结构(命名元组,字段包括 :code:`cell_states,log_probs,finished,lengths`),其中 :code:`log_probs,finished,lengths` 都含有一个tensor,形状为 :math:`[batch\_size, beam\_size]`,数据类型为float32,bool,int64。:code:`cell_states` 具有与输入参数 :code:`initial_cell_states` 相同结构的值,但形状扩展为 :math:`[batch\_size,beam\_size,...]`。 :code:`finished` 是一个布尔型tensor,由False填充,形状为 :math:`[batch\_size,beam\_size]`。 + +返回类型:tuple + +.. py:method:: _beam_search_step(time, logits, next_cell_states, beam_state) + +计算得分并选择候选id。 + +参数: + - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。 + - **logits** (Variable) - 形状为 :math:`[batch\_size,beam\_size,vocab\_size]` 的tensor,表示当前时间步的logits。其数据类型为float32。 + - **next_cell_states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。它的结构,形状和数据类型与 :code:`initialize()` 的返回值 :code:`initial_states` 中的 :code:`cell_states` 相同。它代表该cell的下一个状态。 + - **beam_state** (Variable) - tensor变量的结构。在第一个解码步骤与 :code:`initialize()` 返回的 :code:`initial_states` 同,其他步骤与 :code:`step()` 返回的 :code:`beam_search_state` 相同。 + +返回:一个元组 :code:`(beam_search_output, beam_search_state)`。:code:`beam_search_output` 是tensor变量的命名元组,字段为 :code:`scores,predicted_ids parent_ids`。其中 :code:`scores,predicted_ids,parent_ids` 都含有一个tensor,形状为 :math:`[batch\_size,beam\_size]`,数据类型为float32 ,int64,int64。:code:`beam_search_state` 具有与输入参数 :code:`beam_state` 相同的结构,形状和数据类型。 + +返回类型:tuple + +.. py:method:: step(time, inputs, states, **kwargs) + +执行beam search解码步骤,该步骤使用 :code:`cell` 来计算概率,然后执行beam search步骤以计算得分并选择候选标记ID。 + +参数: + - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。。 + - **inputs** (Variable) - tensor变量。在第一个解码时间步时与由 :code:`initialize()` 返回的 :code:`initial_inputs` 相同,其他时间步与由 :code:`step()` 返回的 :code:`next_inputs` 相同。 + - **states** (Variable) - tensor变量的结构。在第一个解码时间步时与 :code:`initialize()` 返回的 :code:`initial_states` 相同,其他时间步与由 :code:`step()` 返回的 :code:`beam_search_state` 相同。 + - **kwargs** - 附加的关键字参数,由调用者提供。 + +返回:一个元组 :code:`(beam_search_output,beam_search_state,next_inputs,finish)` 。:code:`beam_search_state` 和参数 :code:`states` 具有相同的结构,形状和数据类型。 :code:`next_inputs` 与输入参数 :code:`inputs` 具有相同的结构,形状和数据类型。 :code:`beam_search_output` 是tensor变量的命名元组(字段包括 :code:`scores,predicted_ids,parent_ids` ),其中 :code:`scores,predicted_ids,parent_ids` 都含有一个tensor,形状为 :math:`[batch\_size,beam\_size]`,数据类型为float32 ,int64,int64。:code:`finished` 是一个bool类型的tensor,形状为 :math:`[batch\_size,beam\_size]`。 + +返回类型:tuple + +.. py:method:: finalize(outputs, final_states, sequence_lengths) + +使用 :code:`gather_tree` 沿beam search树回溯并构建完整的预测序列。 + +参数: + - **outputs** (Variable) - tensor变量组成的结构(命名元组),该结构和数据类型与 :code:`output_dtype` 相同。tensor将所有时间步的输出堆叠,因此具有形状 :math:`[time\_step,batch\_size,...]`。 + - **final_states** (Variable) - tensor变量组成的结构(命名元组)。它是 :code:`decoder.step` 在最后一个解码步骤返回的 :code:`next_states`,因此具有与任何时间步的 :code:`state` 相同的结构、形状和数据类型。 + - **sequence_lengths** (Variable) - tensor,形状为 :math:`[batch\_size,beam\_size]`,数据类型为int64。它包含解码期间确定的每个beam的序列长度。 + +返回:一个元组 :code:`(predicted_ids, final_states)`。:code:`predicted_ids` 是一个tensor,形状为 :math:`[time\_step,batch\_size,beam\_size]`,数据类型为int64。:code:`final_states` 与输入参数 :code:`final_states` 相同。 + +返回类型:tuple diff --git a/doc/paddle/api/paddle/fluid/cpu_places_cn.rst b/doc/paddle/api/paddle/fluid/cpu_places_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..124973fc786ad84108f2478809a735e6ce45a081 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/cpu_places_cn.rst @@ -0,0 +1,29 @@ +.. _cn_api_fluid_cpu_places: + +cpu_places +------------------------------- + +.. py:function:: paddle.fluid.cpu_places(device_count=None) + + + + +该接口创建 ``device_count`` 个 ``fluid.CPUPlace`` 对象,并返回所创建的对象列表。 + +如果 ``device_count`` 为 ``None``,则设备数目将由环境变量 ``CPU_NUM`` 确定。如果未设置 ``CPU_NUM`` 环境变量,则设备数目会默认设为1,也就是说, ``CPU_NUM=1``。 +``CPU_NUM`` 表示在当前任务中使用的设备数目。如果 ``CPU_NUM`` 与物理核心数相同,可以加速程序的运行。 + +参数: + - **device_count** (int,可选) - 设备数目。默认值为 ``None``。 + +返回: ``CPUPlace`` 的列表。 + +返回类型:list[fluid.CPUPlace] + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + cpu_places = fluid.cpu_places() + diff --git a/doc/paddle/api/paddle/fluid/create_lod_tensor_cn.rst b/doc/paddle/api/paddle/fluid/create_lod_tensor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..386b0632f1a0256e8cad62c2143a05c7684ded1d --- /dev/null +++ b/doc/paddle/api/paddle/fluid/create_lod_tensor_cn.rst @@ -0,0 +1,49 @@ +.. _cn_api_fluid_create_lod_tensor: + + +create_lod_tensor +------------------------------- + +.. py:function:: paddle.fluid.create_lod_tensor(data, recursive_seq_lens, place) + + + + +从一个numpy数组、list或LoDTensor创建一个新的LoDTensor。 + +具体实现方法如下: + +1. 检查基于序列长度的LoD(length-based LoD),即参数中的 :code:`recursive_seq_lens` 是否正确。 + +2. 将 :code:`recursive_seq_lens` 转换为基于偏移量的LoD(offset-based LoD)。 + +3. 根据place参数,把所提供的 :code:`data` (numpy数组、list或LoDTensor)的数据复制到CPU或GPU上。 + +4. 将基于偏移量的LoD设置到输出的LoDTensor中。 + +假设我们想创建一个LoDTensor表示词的序列,其中每个词用一个整数id表示。若待创建的LoDTensor表示2个句子,其中一个句子包含2个单词,另一个句子包含3个单词。 + +那么, :code:`data` 为一个维度为(5, 1)的numpy整数数组; :code:`recursive_seq_lens` 为[[2, 3]],表示每个句子含的单词个数。在该接口内部,基于序列长度的 +:code:`recursive_seq_lens` [[2, 3]]会转换为为基于偏移量的LoD [[0, 2, 5]]。 + +请查阅 :ref:`cn_user_guide_lod_tensor` 了解更多关于LoD的介绍。 + +参数: + - **data** (numpy.ndarray|list|LoDTensor) - 表示LoDTensor数据的numpy数组、list或LoDTensor。 + - **recursive_seq_lens** (list[list[int]]) - 基于序列长度的LoD信息。 + - **place** (CPUPlace|CUDAPlace) - 表示返回的LoDTensor存储在CPU或GPU place中。 + +返回: 包含数据信息和序列长度信息的LoDTensor。 + +返回类型: LoDTensor + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + t = fluid.create_lod_tensor(np.ndarray([5, 30]), [[2, 3]], fluid.CPUPlace()) + + diff --git a/doc/paddle/api/paddle/fluid/create_random_int_lodtensor_cn.rst b/doc/paddle/api/paddle/fluid/create_random_int_lodtensor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4f17c85af152087e2a1a74fbed1c5d45fe19e489 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/create_random_int_lodtensor_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_fluid_create_random_int_lodtensor: + + +create_random_int_lodtensor +------------------------------- + + +.. py:function:: paddle.fluid.create_random_int_lodtensor(recursive_seq_lens, base_shape, place, low, high) + + + + +创建一个包含随机整数的LoDTensor。 + +具体实现方法如下: + +1. 基于序列长度 :code:`recursive_seq_lens` 和 :code:`base_shape` 产生返回值的维度。返回值的第一维等于序列总长度,其余维度为 :code:`base_shape` 。 + +2. 创建一个包含随机整数的numpy数组,并作为 :code:`data` 参数传入 :ref:`cn_api_fluid_create_lod_tensor` 接口中创建LoDTensor返回。 + +假设我们想创建一个LoDTensor表示序列信息,共包含2个序列,维度分别为[2, 30]和[3, 30],那么序列长度 :code:`recursive_seq_lens` 传入[[2, 3]],:code:`base_shape` 传入[30](即除了序列长度以外的维度)。 +最后返回的LoDTensor的维度为[5, 30],其中第一维5为序列总长度,其余维度为 :code:`base_shape` 。 + +参数: + - **recursive_seq_lens** (list[list[int]]) - 基于序列长度的LoD信息。 + - **base_shape** (list[int]) - 除第一维以外输出结果的维度信息。 + - **place** (CPUPlace|CUDAPlace) - 表示返回的LoDTensor存储在CPU或GPU place中。 + - **low** (int) - 随机整数的下限值。 + - **high** (int) - 随机整数的上限值,必须大于或等于low。 + +返回: 包含随机整数数据信息和序列长度信息的LoDTensor,数值范围在[low, high]之间。 + +返回类型: LoDTensor + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + t = fluid.create_random_int_lodtensor(recursive_seq_lens=[[2, 3]],base_shape=[30], place=fluid.CPUPlace(), low=0, high=10) + print(t.shape()) # [5, 30] + diff --git a/doc/paddle/api/paddle/fluid/cuda_pinned_places_cn.rst b/doc/paddle/api/paddle/fluid/cuda_pinned_places_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9c3955b528ce692e7c0d1ba3f6da0431080a7272 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/cuda_pinned_places_cn.rst @@ -0,0 +1,39 @@ +.. _cn_api_fluid_cuda_pinned_places: + +cuda_pinned_places +------------------------------- + + +.. py:function:: paddle.fluid.cuda_pinned_places(device_count=None) + + + + + + +该接口创建 ``device_count`` 个 ``fluid.CUDAPinnedPlace`` ( fluid. :ref:`cn_api_fluid_CUDAPinnedPlace` ) 对象,并返回所创建的对象列表。 + +如果 ``device_count`` 为 ``None``,实际设备数目将由当前任务中使用的GPU设备数决定。用户可通过以下2种方式设置任务可用的GPU设备: + +- 设置环境变量 ``FLAGS_selected_gpus`` ,例如 ``export FLAGS_selected_gpus='0,1'``。 +- 设置环境变量 ``CUDA_VISIBLE_DEVICES`` ,例如 ``export CUDA_VISIBLE_DEVICES='0,1'``。 + +关于如何设置任务中使用的GPU设备,具体请查看 fluid. :ref:`cn_api_fluid_cuda_places` 。 + +参数: + - **device_count** (int,可选) - 设备数目。默认值为 ``None``。 + +返回: ``fluid.CUDAPinnedPlace`` 对象列表。 + +返回类型:list[fluid.CUDAPinnedPlace] + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + # 1)不设置任何环境变量,默认使用所有的GPU,8卡的机器则将创建8个CUDAPinnedPlace + # 2)export FLAGS_selected_gpus='0,1',则创建2个CUDAPinnedPlace + cuda_pinned_places = fluid.cuda_pinned_places() + # 3)创建1个CUDAPinnedPlace + cuda_pinned_places = fluid.cuda_pinned_places(1) diff --git a/doc/paddle/api/paddle/fluid/cuda_places_cn.rst b/doc/paddle/api/paddle/fluid/cuda_places_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b0294e9cb5b2df17d525a053c68005d0355bbe2e --- /dev/null +++ b/doc/paddle/api/paddle/fluid/cuda_places_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_fluid_cuda_places: + +cuda_places +------------------------------- + +.. py:function:: paddle.fluid.cuda_places(device_ids=None) + + + + +.. note:: + 多卡任务请先使用 FLAGS_selected_gpus 环境变量设置可见的GPU设备,下个版本将会修正 CUDA_VISIBLE_DEVICES 环境变量无效的问题。 + +该接口根据 ``device_ids`` 创建一个或多个 ``fluid.CUDAPlace`` 对象,并返回所创建的对象列表。 + +如果 ``device_ids`` 为 ``None``,则首先检查 ``FLAGS_selected_gpus`` 标志。 +例如: ``FLAGS_selected_gpus=0,1,2`` ,则返回的列表将为 ``[fluid.CUDAPlace(0), fluid.CUDAPlace(1), fluid.CUDAPlace(2)]``。 +如果未设置标志 ``FLAGS_selected_gpus`` ,则根据 ``CUDA_VISIBLE_DEVICES`` 环境变量,返回所有可见的 GPU places。 + +如果 ``device_ids`` 不是 ``None``,它应该是使用的GPU设备ID的列表或元组。 +例如: ``device_id=[0,1,2]`` ,返回的列表将是 ``[fluid.CUDAPlace(0), fluid.CUDAPlace(1), fluid.CUDAPlace(2)]``。 + +参数: + - **device_ids** (list(int)|tuple(int),可选) - GPU的设备ID列表或元组。默认值为 ``None``。 + +返回: 创建的 ``fluid.CUDAPlace`` 列表。 + +返回类型:list[fluid.CUDAPlace] + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + cuda_places = fluid.cuda_places() + diff --git a/doc/paddle/api/paddle/fluid/data_cn.rst b/doc/paddle/api/paddle/fluid/data_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..14a6ab6ea1d94dcdc3586417ef9c85db98783c74 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/data_cn.rst @@ -0,0 +1,67 @@ +.. _cn_api_fluid_data: + +data +------------------------------- + + +.. py:function:: paddle.fluid.data(name, shape, dtype='float32', lod_level=0) + + + + +该OP会在全局block中创建变量(Variable),该全局变量可被计算图中的算子(operator)访问。该变量可作为占位符用于数据输入。例如用执行器(Executor)feed数据进该变量 + +注意: + + 不推荐使用 ``paddle.fluid.layers.data`` ,其在之后的版本中会被删除。请使用这个 ``paddle.fluid.data`` 。 + + ``paddle.fluid.layers.data`` 在组网期间会设置创建的变量维度(shape)和数据类型(dtype),但不会检查输入数据的维度和数据类型是否符合要求。 ``paddle.fluid.data`` 会在运行过程中由Executor/ParallelExecutor检查输入数据的维度和数据类型。 + + 如果想输入变长输入,可以使用 ``paddle.fluid.data`` 时将变长维度设为-1,或者直接输入 ``paddle.fluid.layers.data`` 且PaddlePaddle会按具体输入的形状运行。 + + 本API创建的变量默认 ``stop_gradient`` 属性为true,这意味这反向梯度不会被传递过这个数据变量。如果用户想传递反向梯度,可以设置 ``var.stop_gradient = False`` 。 + +参数: + - **name** (str)- 被创建的变量的名字,具体用法请参见 :ref:`api_guide_Name` 。 + - **shape** (list|tuple)- 声明维度信息的list或tuple。 + - **dtype** (np.dtype|VarType|str,可选)- 数据类型,支持bool,float16,float32,float64,int8,int16,int32,int64,uint8。默认值为float32。 + - **lod_level** (int,可选)- LoDTensor变量的LoD level数,LoD level是PaddlePaddle的高级特性,一般任务中不会需要更改此默认值,关于LoD level的详细适用场景和用法请见 :ref:`cn_user_guide_lod_tensor` 。默认值为0。 + +返回:全局变量,可进行数据访问 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # Creates a variable with fixed size [3, 2, 1] + # User can only feed data of the same shape to x + x = fluid.data(name='x', shape=[3, 2, 1], dtype='float32') + + # Creates a variable with changable batch size -1. + # Users can feed data of any batch size into y, + # but size of each data sample has to be [2, 1] + y = fluid.data(name='y', shape=[-1, 2, 1], dtype='float32') + + z = x + y + + # In this example, we will feed x and y with np-ndarry "1" + # and fetch z, like implementing "1 + 1 = 2" in PaddlePaddle + feed_data = np.ones(shape=[3, 2, 1], dtype=np.float32) + + exe = fluid.Executor(fluid.CPUPlace()) + out = exe.run(fluid.default_main_program(), + feed={ + 'x': feed_data, + 'y': feed_data + }, + fetch_list=[z.name]) + + # np-ndarray of shape=[3, 2, 1], dtype=float32, whose elements are 2 + print(out) + + diff --git a/doc/paddle/api/paddle/fluid/dataset/DatasetFactory_cn.rst b/doc/paddle/api/paddle/fluid/dataset/DatasetFactory_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..901d32c2069c8905031d8f3d9b6abdc89730876a --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dataset/DatasetFactory_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_fluid_dataset_DatasetFactory: + +DatasetFactory +------------------------------- + +.. py:class:: paddle.fluid.dataset.DatasetFactory + + + + +DatasetFactory是一个按数据集名称创建数据集的 "工厂",可以创建“QueueDataset”,“InMemoryDataset”或“FileInstantDataset”,默认为“QueueDataset”。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + +.. py:method:: create_dataset(datafeed_class='QueueDataset') + +创建“QueueDataset”,“InMemoryDataset” 或 “FileInstantDataset”,默认为“QueueDataset”。 + + +参数: + - **datafeed_class** (str) – datafeed类名,为QueueDataset或InMemoryDataset。默认为QueueDataset。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + + + diff --git a/doc/paddle/api/paddle/fluid/dataset/InMemoryDataset_cn.rst b/doc/paddle/api/paddle/fluid/dataset/InMemoryDataset_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7699284c681fd87008d3b8ad41db89a93fd8d788 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dataset/InMemoryDataset_cn.rst @@ -0,0 +1,357 @@ +.. _cn_api_fluid_dataset_InMemoryDataset: + +InMemoryDataset +------------------------------- + +.. py:class:: paddle.fluid.dataset.InMemoryDataset + + + + +InMemoryDataset会向内存中加载数据并在训练前缓冲数据。此类由DatasetFactory创建。 + +**代码示例**: + +.. code-block:: python + + dataset = paddle.fluid.DatasetFactory().create_dataset(“InMemoryDataset”) + +.. py:method:: set_queue_num(queue_num) + +设置 ``Dataset`` 输出队列数量,训练进程会从队列中获取数据。 + +参数: + - **queue_num** (int) - dataset输出队列数量 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + dataset.set_queue_num(12) + +.. py:method:: set_fleet_send_batch_size(fleet_send_batch_size) + +设置发送batch的大小 + +参数: + - **fleet_send_batch_size** (int) - 设置发送batch的大小。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + dataset.set_fleet_send_batch_size(800) + +.. py:method:: set_merge_by_lineid(var_list, erase_duplicate_feas=True, min_merge_size=2, keep_unmerged-ins=True) + +通过样本id来设置合并,一些线id的实例将会在shuffle之后进行合并,你应该在一个data生成器里面解析样本id。 + +参数: + - **var_list** (list) - 可以被合并的特征列表,其中的每一个元素都是一个 ``Variable`` 。一些类特征我们通常不把它们合并为同样的样本id,所以用户应当指定哪个类特征可以被合并。 + - **erase_duplicate_feas** (bool) - 合并的时候是否删除重复的特征值。默认为True。 + - **min_merge_size** (int) - 合并的最小数量。默认为2。 + - **keep_unmerged_ins** (bool) - 是否保留没有合并的样本,比如有着独特id的样本,或者重复id的数量小于 ``min_merge_size`` 的样本。 + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + dataset.set_merge_by_lineid() + +.. py:method:: load_into_memory() + +向内存中加载数据。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + filelist = ["a.txt", "b.txt"] + dataset.set_filelist(filelist) + dataset.load_into_memory() + +.. py:method:: preload_into_memory() + +向内存中以异步模式加载数据。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + filelist = ["a.txt", "b.txt"] + dataset.set_filelist(filelist) + dataset.preload_into_memory() + dataset.wait_preload_done() + +.. py:method:: wait_preload_done() + +等待 ``preload_into_memory`` 完成。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + filelist = ["a.txt", "b.txt"] + dataset.set_filelist(filelist) + dataset.preload_into_memory() + dataset.wait_preload_done() + +.. py:method:: local_shuffle() + +局域shuffle。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + filelist = ["a.txt", "b.txt"] + dataset.set_filelist(filelist) + dataset.load_into_memory() + dataset.local_shuffle() + + +.. py:method:: global_shuffle(fleet=None) + +全局shuffle。 + +只能用在分布式模式(单机多进程或多机多进程)中。您如果在分布式模式中运行,应当传递fleet而非None。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + filelist = ["a.txt", "b.txt"] + dataset.set_filelist(filelist) + dataset.load_into_memory() + dataset.global_shuffle(fleet) + +参数: + - **fleet** (Fleet) – fleet单例。默认为None。 + + +.. py:method:: release_memory() + +当数据不再使用时,释放InMemoryDataset内存数据。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + filelist = ["a.txt", "b.txt"] + dataset.set_filelist(filelist) + dataset.load_into_memory() + dataset.global_shuffle(fleet) + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + exe.train_from_dataset(fluid.default_main_program(), dataset) + dataset.release_memory() + +.. py:method:: get_memory_data_size(fleet=None) + +用户可以调用此函数以了解加载进内存后所有workers中的样本数量。 + +.. note:: + 该函数可能会导致性能不佳,因为它具有barrier。 + +参数: + - **fleet** (Fleet) – fleet对象。 + +返回:内存数据的大小。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + filelist = ["a.txt", "b.txt"] + dataset.set_filelist(filelist) + dataset.load_into_memory() + print dataset.get_memory_data_size(fleet) + + +.. py:method:: get_shuffle_data_size(fleet=None) + +获取shuffle数据大小,用户可以调用此函数以了解局域/全局shuffle后所有workers中的样本数量。 + +.. note:: + 该函数可能会导致局域shuffle性能不佳,因为它具有barrier。但其不影响局域shuffle。 + +参数: + - **fleet** (Fleet) – fleet对象。 + +返回:shuffle数据的大小。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet + dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + filelist = ["a.txt", "b.txt"] + dataset.set_filelist(filelist) + dataset.load_into_memory() + dataset.global_shuffle(fleet) + print dataset.get_shuffle_data_size(fleet) + + +.. py:method:: set_batch_size(batch_size) + +设置batch size。在训练期间生效。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_batch_size(128) + +参数: + - **batch_size** (int) - batch size + +.. py:method:: set_fea_eval(record_candidate_size, fea_eval=True) + +设置特征打乱特征验证模式,来修正特征level的重要性, 特征打乱需要 ``fea_eval`` 被设置为True。 + +参数: + - **record_candidate_size** (int) - 打乱一个特征的候选实例大小 + - **fea_eval** (bool) - 是否设置特征验证模式来打乱特征,默认为True。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset(“InMemoryDataset”) + dataset.set_fea_eval(1000000, True) + +.. py:method:: desc() + +为 ``DataFeedDesc`` 返回一个缓存信息。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + print(dataset.desc()) + +返回:一个字符串信息 + +.. py:method:: set_filelist(filelist) + +在当前的worker中设置文件列表。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_filelist(["a.txt", "b.txt"]) + +参数: + - **filelist** (list) - 文件列表 + +.. py:method:: set_hdfs_config(fs_name, fs_ugi) + +设置hdfs配置:fs名称与ugi。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_hdfs_config("my_fs_name", "my_fs_ugi") + +参数: + - **fs_name** (str) - fs名称 + - **fs_ugi** (str) - fs ugi + +.. py:method:: set_pipe_command(pipe_coommand) + +在当前的 ``dataset`` 中设置pipe命令。pipe命令只能使用UNIX的pipe命令 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_pipe_command("python my_script.py") + +参数: + - **pipe_command** (str) - pipe命令 + +.. py:method:: set_thread(thread_num) + +设置进程数量,等于readers的数量。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_thread(12) + +参数: + - **thread_num** (int) - 进程数量 + +.. py:method:: set_use_var(var_list) + +设置将要使用的 ``Variable`` 。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_use_var([data, label]) + +参数: + - **var_list** (list) - variable 列表 + +.. py:method:: slots_shuffle(slots) + +该方法是在特征层次上的一个打乱方法,经常被用在有着较大缩放率实例的稀疏矩阵上,为了比较metric,比如auc,在一个或者多个有着baseline的特征上做特征打乱来验证特征level的重要性。 + +参数: + - **slots** (list[string]) - 要打乱特征的集合 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset(“InMemoryDataset”) + dataset.set_merge_by_lineid() + #支持slot 0 + dataset.slots_shuffle([‘0’]) + + + diff --git a/doc/paddle/api/paddle/fluid/dataset/QueueDataset_cn.rst b/doc/paddle/api/paddle/fluid/dataset/QueueDataset_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..298bf2bb4ecc8b356327af82570e7bb07e2e2907 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dataset/QueueDataset_cn.rst @@ -0,0 +1,188 @@ +.. _cn_api_fluid_dataset_QueueDataset: + +QueueDataset +------------------------------- + +.. py:class:: paddle.fluid.dataset.QueueDataset + + + + +流式处理数据。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset("QueueDataset") + + + +.. py:method:: local_shuffle() + +局域shuffle数据 + +QueueDataset中不支持局域shuffle,可能抛出NotImplementedError + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset("QueueDataset") + dataset.local_shuffle() + + + +.. py:method:: global_shuffle(fleet=None) + +全局shuffle数据 + +QueueDataset中不支持全局shuffle,可能抛出NotImplementedError + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet + dataset = fluid.DatasetFactory().create_dataset("QueueDataset") + dataset.global_shuffle(fleet) + +.. py:method:: desc() + +为 ``DataFeedDesc`` 返回一个缓存信息。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + print(dataset.desc()) + +返回:一个字符串信息 + +.. py:method:: set_batch_size(batch_size) + +设置batch size。在训练期间生效。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_batch_size(128) + +参数: + - **batch_size** (int) - batch size + +.. py:method:: set_fea_eval(record_candidate_size,fea_eval) + +参数: + - **record_candidate_size** (int) - 打乱一个特征的候选实例大小 + - **fea_eval** (bool) - 是否设置特征验证模式来打乱特征,默认为True。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset(“InMemoryDataset”) + dataset.set_fea_eval(1000000, True) + +.. py:method:: set_filelist(filelist) + +在当前的worker中设置文件列表。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_filelist(["a.txt", "b.txt"]) + +参数: + - **filelist** (list) - 文件列表 + +.. py:method:: set_hdfs_config(fs_name, fs_ugi) + +设置hdfs配置:fs名称与ugi。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_hdfs_config("my_fs_name", "my_fs_ugi") + +参数: + - **fs_name** (str) - fs名称 + - **fs_ugi** (str) - fs ugi + +.. py:method:: set_pipe_command(pipe_coommand) + +在当前的 ``dataset`` 中设置pipe命令。pipe命令只能使用UNIX的pipe命令 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_pipe_command("python my_script.py") + +参数: + - **pipe_command** (str) - pipe命令 + +.. py:method:: set_thread(thread_num) + +设置进程数量,等于readers的数量。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_thread(12) + +参数: + - **thread_num** (int) - 进程数量 + +.. py:method:: set_use_var(var_list) + +设置将要使用的 ``Variable`` 。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_use_var([data, label]) + +参数: + - **var_list** (list) - variable 列表 + +.. py:method:: slots_shuffle(slots) + +该方法是在特征层次上的一个打乱方法,经常被用在有着较大缩放率实例的稀疏矩阵上,为了比较metric,比如auc,在一个或者多个有着baseline的特征上做特征打乱来验证特征level的重要性。 + +参数: + - **slots** (list[string]) - 要打乱特征的集合 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dataset = fluid.DatasetFactory().create_dataset(“InMemoryDataset”) + dataset.set_merge_by_lineid() + #支持slot 0 + dataset.slots_shuffle([‘0’]) + diff --git a/doc/paddle/api/paddle/fluid/device_guard_cn.rst b/doc/paddle/api/paddle/fluid/device_guard_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..7d369cc9740652a4d6d4c5a23ff723fdfa0dbdc4 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/device_guard_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_fluid_device_guard: + +device_guard +------------------------------- + +**注意:该API仅支持【静态图】模式** + +.. py:function:: paddle.fluid.device_guard(device=None) + +一个用于指定OP运行设备的上下文管理器。 + +参数: + - **device** (str|None) – 指定上下文中使用的设备。它可以是'cpu'或者'gpu‘,当它被设置为'cpu'或者'gpu'时,创建在该上下文中的OP将被运行在CPUPlace或者CUDAPlace上。若设置为'gpu',同时程序运行在单卡模式下,设备的索引将与执行器的设备索引保持一致。默认值:None,在该上下文中的OP将被自动地分配设备。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + support_gpu = fluid.is_compiled_with_cuda() + place = fluid.CPUPlace() + if support_gpu: + place = fluid.CUDAPlace(0) + # if GPU is supported, the three OPs below will be automatically assigned to CUDAPlace(0) + data1 = fluid.layers.fill_constant(shape=[1, 3, 8, 8], value=0.5, dtype='float32') + data2 = fluid.layers.fill_constant(shape=[1, 3, 5, 5], value=0.5, dtype='float32') + shape = fluid.layers.shape(data2) + with fluid.device_guard("cpu"): + # Ops created here will be placed on CPUPlace + shape = fluid.layers.slice(shape, axes=[0], starts=[0], ends=[4]) + with fluid.device_guard('gpu'): + # if GPU is supported, OPs created here will be placed on CUDAPlace(0), otherwise on CPUPlace + out = fluid.layers.crop_tensor(data1, shape=shape) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + result = exe.run(fetch_list=[out]) diff --git a/doc/paddle/api/paddle/fluid/dygraph/BatchNorm_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/BatchNorm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d0be6c8bcc7ca6b9bc99c4cc3a0f52db4cbf3db0 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/BatchNorm_cn.rst @@ -0,0 +1,73 @@ +.. _cn_api_fluid_dygraph_BatchNorm: + +BatchNorm +------------------------------- + +.. py:class:: paddle.fluid.dygraph.BatchNorm(num_channels, act=None, is_test=False, momentum=0.9, epsilon=1e-05, param_attr=None, bias_attr=None, dtype='float32', data_layout='NCHW', in_place=False, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=False, use_global_stats=False, trainable_statistics=False) + + + + +该接口用于构建 ``BatchNorm`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其中实现了批归一化层(Batch Normalization Layer)的功能,可用作卷积和全连接操作的批归一化函数,根据当前批次数据按通道计算的均值和方差进行归一化。更多详情请参考 : `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ + +当use_global_stats = False时,:math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是minibatch的统计数据。计算公式如下: + +.. math:: + \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mini-batch-mean \\ + \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \quad &// mini-batch-variance \\ + +- :math:`x` : 批输入数据 +- :math:`m` : 当前批次数据的大小 + +当use_global_stats = True时,:math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是全局(或运行)统计数据(moving_mean和moving_variance),通常来自预先训练好的模型。计算公式如下: + +.. math:: + + moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global mean \\ + moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global variance \\ + +归一化函数公式如下: + +.. math:: + + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \quad &// normalize \\ + y_i &\gets \gamma \hat{x_i} + \beta \quad &// scale-and-shift \\ + +- :math:`\epsilon` : 添加较小的值到方差中以防止除零 +- :math:`\gamma` : 可训练的比例参数 +- :math:`\beta` : 可训练的偏差参数 + +参数: + - **num_channels** (int) - 指明输入 ``Tensor`` 的通道数量。 + - **act** (str, 可选) - 应用于输出上的激活函数,如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations` ,默认值为None。 + - **is_test** (bool, 可选) - 指示是否在测试阶段,非训练阶段使用训练过程中统计到的全局均值和全局方差。默认值:False。 + - **momentum** (float, 可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var`` 。默认值:0.9。更新公式如上所示。 + - **epsilon** (float, 可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 + - **param_attr** (ParamAttr, 可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr, 可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **dtype** (str, 可选) - 指明输入 ``Tensor`` 的数据类型,可以为float32或float64。默认值:float32。 + - **data_layout** (string, 可选) - 指定输入数据格式,数据格式可以为“NCHW”或者“NHWC”。默认值:“NCHW”。 + - **in_place** (bool, 可选) - 指示 ``batch_norm`` 的输出是否可以复用输入内存。默认值:False。 + - **moving_mean_name** (str, 可选) - ``moving_mean`` 的名称,存储全局均值。如果将其设置为None, ``batch_norm`` 将随机命名全局均值;否则, ``batch_norm`` 将命名全局均值为 ``moving_mean_name`` 。默认值:None。 + - **moving_variance_name** (string, 可选) - ``moving_var`` 的名称,存储全局方差。如果将其设置为None, ``batch_norm`` 将随机命名全局方差;否则, ``batch_norm`` 将命名全局方差为 ``moving_variance_name`` 。默认值:None。 + - **do_model_average_for_mean_and_var** (bool, 可选) - 指示是否为mean和variance做模型均值。默认值:False。 + - **use_global_stats** (bool, 可选) – 指示是否使用全局均值和方差。在预测或测试模式下,将 ``use_global_stats`` 设置为true或将 ``is_test`` 设置为true,这两种行为是等效的。在训练模式中,当设置 ``use_global_stats`` 为True时,在训练期间也将使用全局均值和方差。默认值:False。 + - **trainable_statistics** (bool, 可选) - eval模式下是否计算mean均值和var方差。eval模式下,trainable_statistics为True时,由该批数据计算均值和方差。默认值:False。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.dygraph.base import to_variable + import numpy as np + + x = np.random.random(size=(3, 10, 3, 7)).astype('float32') + with fluid.dygraph.guard(): + x = to_variable(x) + batch_norm = fluid.BatchNorm(10) + hidden1 = batch_norm(x) + + diff --git a/doc/paddle/api/paddle/fluid/dygraph/BilinearTensorProduct_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/BilinearTensorProduct_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2d07f3acd52bf530e65f18c5e4a6a25a26b9c921 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/BilinearTensorProduct_cn.rst @@ -0,0 +1,65 @@ +.. _cn_api_fluid_dygraph_BilinearTensorProduct: + +BilinearTensorProduct +------------------------------- + +.. py:class:: paddle.fluid.dygraph.BilinearTensorProduct(input1_dim, input2_dim, output_dim, name=None, act=None, param_attr=None, bias_attr=None, dtype="float32") + + + + +该接口用于构建 ``BilinearTensorProduct`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。双线性乘积计算式子如下。 + +.. math:: + + out_{i} = x * W_{i} * {y^\mathrm{T}}, i=0,1,...,size-1 + +式中, + +- :math:`x` : 第一个输入,分别包含M个元素,维度为 :math:`[batch\_size, M]` +- :math:`y` :第二个输入,分别包含N个元素,维度为 :math:`[batch\_size, N]` +- :math:`W_i` :第i个学习到的权重,维度为 :math:`[M,N]` +- :math:`out_i` :输出的第i个元素 +- :math:`y^T` : :math:`y` 的转置 + + +参数: + - **input1_dim** (int) – 第一个输入的维度大小。 + - **input1_dim** (int) – 第二个输入的维度大小。 + - **output_dim** (int) – 输出的维度。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **act** (str,可选) – 对输出应用的激励函数。默认值为None。 + - **param_attr** (ParamAttr) – 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr) – 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr`。 + - **dtype** (str, 可选) - 数据类型,可以为"float32"或"float64"。默认值为"float32"。 + +返回:维度为[batch_size, size]的2D Tensor,数据类型与输入数据类型相同。 + +返回类型: Variable + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + + with fluid.dygraph.guard(): + layer1 = numpy.random.random((5, 5)).astype('float32') + layer2 = numpy.random.random((5, 4)).astype('float32') + bilinearTensorProduct = fluid.dygraph.nn.BilinearTensorProduct( + input1_dim=5, input2_dim=4, output_dim=1000) + ret = bilinearTensorProduct(fluid.dygraph.base.to_variable(layer1), + fluid.dygraph.base.to_variable(layer2)) + +属性 +:::::::::::: +.. py:attribute:: weight + +本层的可学习参数,类型为 ``Parameter`` + +.. py:attribute:: bias + +本层的可学习偏置,类型为 ``Parameter`` + + diff --git a/doc/paddle/api/paddle/fluid/dygraph/Conv2DTranspose_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/Conv2DTranspose_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b500bdbf6c7884f1bc263532eb0f3db379790fa8 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/Conv2DTranspose_cn.rst @@ -0,0 +1,89 @@ +.. _cn_api_fluid_dygraph_Conv2DTranspose: + +Conv2DTranspose +------------------------------- + +.. py:class:: paddle.fluid.dygraph.Conv2DTranspose(num_channels, num_filters, filter_size, output_size=None, padding=0, stride=1, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, dtype="float32") + + + + +该接口用于构建 ``Conv2DTranspose`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其将在神经网络中构建一个二维卷积转置层(Convlution2D Transpose Layer),其根据输入(input)、滤波器参数(num_filters、filter_size)、步长(stride)、填充(padding)、膨胀系数(dilation)、组数(groups)来计算得到输出特征图。输入和输出是 ``NCHW`` 格式,N是批数据大小,C是特征图个数,H是特征图高度,W是特征图宽度。滤波器的维度是 [M, C, H, W] ,M是输入特征图个数,C是输出特征图个数,H是滤波器高度,W是滤波器宽度。如果组数大于1,C等于输入特征图个数除以组数的结果。如果提供了偏移属性和激活函数类型,卷积的结果会和偏移相加,激活函数会作用在最终结果上。转置卷积的计算过程相当于卷积的反向计算,转置卷积又被称为反卷积(但其实并不是真正的反卷积)。详情请参考: `Conv2DTranspose `_ 。 + +输入 ``X`` 和输出 ``Out`` 的函数关系如下: + +.. math:: + Out=\sigma (W*X+b)\\ + +其中: + - :math:`X` :输入特征图, ``NCHW`` 格式的 ``Tensor`` + - :math:`W` :滤波器,维度为 [M, C, H, W] 的 ``Tensor`` + - :math:`*` :卷积操作 + - :math:`b` :偏移值,2-D ``Tensor`` ,维度为 ``[M,1]`` + - :math:`\sigma` :激活函数 + - :math:`Out` :输出值, ``Out`` 和 ``X`` 的维度可能不同 + +**输出维度计算示例** + +- 输入: + + 输入维度: :math:`(N,C_{in},H_{in},W_{in})` + + 滤波器维度: :math:`(C_{in},C_{out},H_{f},W_{f})` + +- 输出: + + 输出维度: :math:`(N,C_{out},H_{out},W_{out})` + +- 其中 + +.. math:: + + & H'_{out} = (H_{in}-1)*strides[0]-2*paddings[0]+dilations[0]*(H_f-1)+1 + + & W'_{out} = (W_{in}-1)*strides[1]-2*paddings[1]+dilations[1]*(W_f-1)+1 + + & H_{out}\in[H'_{out},H'_{out} + strides[0]) + + & W_{out}\in[W'_{out},W'_{out} + strides[1]) + +参数: + - **num_channels** (int) - 输入图像的通道数。 + - **num_filters** (int) - 滤波器的个数,和输出特征图个数相同。 + - **filter_size** (int|tuple) - 滤波器大小。如果 ``filter_size`` 是一个元组,则必须包含两个整型数,分别表示滤波器高度和宽度。否则,表示滤波器高度和宽度均为 ``filter_size`` 。 + - **output_size** (int|tuple, 可选) - 输出特征图的大小。如果 ``output_size`` 是一个元组,则必须包含两个整型数,分别表示特征图高度和宽度。如果 ``output_size`` 是整型,表示特征图高度和宽度均为 ``output_size`` 。如果 ``output_size`` 为None,则会根据 ``filter_size`` 、 ``padding`` 和 ``stride`` 来计算 ``output_size`` 。如果 ``output_size`` 和 ``filter_size`` 同时指定,那么它们应满足上面的公式。默认值:None。 + - **padding** (int|tuple, 可选) - 填充大小。如果 ``padding`` 为元组,则必须包含两个整型数,分别表示竖直和水平边界填充大小。否则,表示竖直和水平边界填充大小均为 ``padding`` 。默认值:0。 + - **stride** (int|tuple, 可选) - 步长大小。如果 ``stride`` 为元组,则必须包含两个整型数,分别表示垂直和水平滑动步长。否则,表示垂直和水平滑动步长均为 ``stride`` 。默认值:1。 + - **dilation** (int|tuple, 可选) - 膨胀系数大小。如果 ``dialation`` 为元组,则必须包含两个整型数,分别表示垂直和水平膨胀系数。否则,表示垂直和水平膨胀系数均为 ``dialation`` 。默认值:1。 + - **groups** (int, 可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的分组卷积:当group=2,滤波器的前一半仅和输入特征图的前一半连接。滤波器的后一半仅和输入特征图的后一半连接。默认值:1。 + - **param_attr** (ParamAttr, 可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool, 可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **use_cudnn** (bool, 可选) - 是否使用cudnn内核,只有已安装cudnn库时才有效。默认值:True。 + - **act** (str, 可选) - 应用于输出上的激活函数,如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations` ,默认值:None。 + - **dtype** (str, 可选) - 数据类型,可以为"float32"或"float64"。默认值:"float32"。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + with fluid.dygraph.guard(): + data = np.random.random((3, 32, 32, 5)).astype('float32') + conv2DTranspose = fluid.dygraph.nn.Conv2DTranspose( + num_channels=32, num_filters=2, filter_size=3) + ret = conv2DTranspose(fluid.dygraph.base.to_variable(data)) + +属性 +:::::::::::: +.. py:attribute:: weight + +本层的可学习参数,类型为 ``Parameter`` + +.. py:attribute:: bias + +本层的可学习偏置,类型为 ``Parameter`` + diff --git a/doc/paddle/api/paddle/fluid/dygraph/Conv2D_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/Conv2D_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3e81c4a31738d78234527178a1408c7cc03519ef --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/Conv2D_cn.rst @@ -0,0 +1,90 @@ +.. _cn_api_fluid_dygraph_Conv2D: + +Conv2D +------------------------------- + +.. py:class:: paddle.fluid.dygraph.Conv2D(num_channels, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, dtype='float32') + + + + +该接口用于构建 ``Conv2D`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其将在神经网络中构建一个二维卷积层(Convolution2D Layer),其根据输入、滤波器参数(num_filters、filter_size)、步长(stride)、填充(padding)、膨胀系数(dilation)、组数(groups)参数来计算得到输出特征图。输入和输出是 ``NCHW`` 格式,N是批数据大小,C是特征图个数,H是特征图高度,W是特征图宽度。滤波器的维度是 [M, C, H, W] ,M是输出特征图个数,C是输入特征图个数,H是滤波器高度,W是滤波器宽度。如果组数大于1,C等于输入特征图个数除以组数的结果。如果提供了偏移属性和激活函数类型,卷积的结果会和偏移相加,激活函数会作用在最终结果上。详情请参考: `卷积 `_ 。 + +对每个输入 ``X`` ,有等式: + +.. math:: + + Out = \sigma \left ( W * X + b \right ) + +其中: + - :math:`X` :输入特征图, ``NCHW`` 格式的 ``Tensor`` + - :math:`W` :滤波器,维度为 [M, C, H, W] 的 ``Tensor`` + - :math:`*` :卷积操作 + - :math:`b` :偏移值,2-D ``Tensor`` ,维度为 ``[M,1]`` + - :math:`\sigma` :激活函数 + - :math:`Out` :输出值, ``Out`` 和 ``X`` 的维度可能不同 + +**输出维度计算示例** + +- 输入: + + 输入维度: :math:`(N,C_{in},H_{in},W_{in})` + + 滤波器维度: :math:`(C_{out},C_{in},H_{f},W_{f})` + +- 输出: + + 输出维度: :math:`(N,C_{out},H_{out},W_{out})` + +- 其中 + +.. math:: + + H_{out} = \frac{\left ( H_{in}+2*paddings[0]-\left ( dilations[0]*\left ( H_{f}-1 \right )+1 \right ) \right )}{strides[0]}+1 + + W_{out} = \frac{\left ( W_{in}+2*paddings[1]-\left ( dilations[1]*\left ( W_{f}-1 \right )+1 \right ) \right )}{strides[1]}+1 + +参数: + - **num_channels** (int) - 输入图像的通道数。 + - **num_filters** (int) - 滤波器的个数,和输出特征图个数相同。 + - **filter_size** (int|tuple) - 滤波器大小。如果 ``filter_size`` 是一个元组,则必须包含两个整型数,分别表示滤波器高度和宽度。否则,表示滤波器高度和宽度均为 ``filter_size`` 。 + - **stride** (int|tuple, 可选) - 步长大小。如果 ``stride`` 为元组,则必须包含两个整型数,分别表示垂直和水平滑动步长。否则,表示垂直和水平滑动步长均为 ``stride`` 。默认值:1。 + - **padding** (int|tuple, 可选) - 填充大小。如果 ``padding`` 为元组,则必须包含两个整型数,分别表示竖直和水平边界填充大小。否则,表示竖直和水平边界填充大小均为 ``padding`` 。默认值:0。 + - **dilation** (int|tuple, 可选) - 膨胀系数大小。如果 ``dialation`` 为元组,则必须包含两个整型数,分别表示垂直和水平膨胀系数。否则,表示垂直和水平膨胀系数均为 ``dialation`` 。默认值:1。 + - **groups** (int, 可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的分组卷积:当group=2,滤波器的前一半仅和输入特征图的前一半连接。滤波器的后一半仅和输入特征图的后一半连接。默认值:1。 + - **param_attr** (ParamAttr, 可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool, 可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **use_cudnn** (bool, 可选) - 是否用cudnn核,只有已安装cudnn库时才有效。默认值:True。 + - **act** (str, 可选) - 应用于输出上的激活函数,如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations` ,默认值:None。 + - **dtype** (str, 可选) - 数据类型,可以为"float32"或"float64"。默认值:"float32"。 + +返回:无 + +抛出异常: + - ``ValueError`` - 如果 ``use_cudnn`` 不是bool值 + +**代码示例** + +.. code-block:: python + + from paddle.fluid.dygraph.base import to_variable + import paddle.fluid as fluid + from paddle.fluid.dygraph import Conv2D + import numpy as np + + data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32') + with fluid.dygraph.guard(): + conv2d = Conv2D(3, 2, 3) + data = to_variable(data) + conv = conv2d(data) + +属性 +:::::::::::: +.. py:attribute:: weight + +本层的可学习参数,类型为 ``Parameter`` + +.. py:attribute:: bias + +本层的可学习偏置,类型为 ``Parameter`` + diff --git a/doc/paddle/api/paddle/fluid/dygraph/Conv3DTranspose_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/Conv3DTranspose_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a3e4134d1e6a31dd8e206bcf5546d511a79e9d7a --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/Conv3DTranspose_cn.rst @@ -0,0 +1,118 @@ +.. _cn_api_fluid_dygraph_Conv3DTranspose: + +Conv3DTranspose +------------------------------- + +.. py:class:: paddle.fluid.dygraph.Conv3DTranspose(num_channels, num_filters, filter_size, output_size=None, padding=0, stride=1, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, dtype="float32") + + + + + +该接口用于构建 ``Conv3DTranspose`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。3D卷积转置层(Convlution3D transpose layer)根据输入(input)、滤波器(filter)和卷积核膨胀(dilations)、步长(stride)、填充来计算输出特征层大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCDHW格式。其中 ``N`` 为batch大小, ``C`` 为通道数(channel), ``D`` 为特征深度, ``H`` 为特征高度, ``W`` 为特征宽度。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解卷积转置层细节,请参考下面的说明和 参考文献_ 。如果参数bias_attr不为False, 转置卷积计算会添加偏置项。如果act不为None,则转置卷积计算之后添加相应的激活函数。 + + +.. _参考文献: https://arxiv.org/abs/1603.07285 + +输入 :math:`X` 和输出 :math:`Out` 函数关系如下: + +.. math:: + \\Out=\sigma (W*X+b)\\ + +其中: + - :math:`X` : 输入图像,具有NCDHW格式的张量(Tensor) + + - :math:`W` : 滤波器,具有NCDHW格式的张量(Tensor) + + - :math:`*` : 卷积操作(注意:转置卷积本质上的计算还是卷积) + + - :math:`b` : 偏置(bias),维度为 :math:`[M,1]` 的2D Tensor + + - :math:`σ` : 激活函数 + + - :math:`Out` : 输出值, ``Out`` 和 ``X`` 的 shape可能不一样 + + +**样例** + +输入: + + 输入Tensor的维度::math:`[N,C_{in}, D_{in}, H_{in}, W_{in}]` + + 滤波器Tensor的维度::math:`[C_{in}, C_{out}, D_f, H_f, W_f]` + + + +输出: + + 输出Tensor的维度::math:`[N,C_{out}, D_{out}, H_{out}, W_{out}]` + + +其中: + +.. math:: + D'_{out}=(D_{in}-1)*strides[0]-2*paddings[0]+dilations[0]*(D_f-1)+1 \\ + H'_{out}=(H_{in}-1)*strides[1]-2*paddings[1]+dilations[1]*(H_f-1)+1 \\ + W'_{out}=(W_{in}-1)*strides[2]-2*paddings[2]+dilations[2]*(W_f-1)+1 \\ +.. math:: + D_{out}\in[D'_{out},D'_{out} + strides[0]) \\ + H_{out}\in[H'_{out},H'_{out} + strides[1]) \\ + W_{out}\in[W'_{out},W'_{out} + strides[2]) + + +**注意** : + 如果output_size为None,则 :math:`D_{out}` = :math:`D^\prime_{out}` , :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}` ;否则,指定的output_size_depth(输出特征层的深度) :math:`D_{out}` 应当介于 :math:`D^\prime_{out}` 和 :math:`D^\prime_{out} + strides[0]` 之间(不包含 :math:`D^\prime_{out} + strides[0]` ),指定的output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[1]` 之间(不包含 :math:`H^\prime_{out} + strides[1]` ), 并且指定的output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[2]` 之间(不包含 :math:`W^\prime_{out} + strides[2]` )。 + + 由于转置卷积可以当成是卷积的反向计算,而根据卷积的输入输出计算公式来说,不同大小的输入特征层可能对应着相同大小的输出特征层,所以对应到转置卷积来说,固定大小的输入特征层对应的输出特征层大小并不唯一。 + + 如果指定了output_size, 其可以自动计算滤波器的大小。 + + +参数: + - **num_channels** (int) - 输入图像的通道数。 + - **num_filters** (int) - 滤波器(卷积核)的个数,与输出的图片的通道数相同。 + - **filter_size** (int|tuple) - 滤波器大小。如果filter_size是一个元组,则必须包含三个整型数,(filter_size_depth,filter_size_height, filter_size_width)。否则,filter_size_depth = filter_size_height = filter_size_width = filter_size。如果filter_size=None,则必须指定output_size, 其会根据output_size、padding和stride计算出滤波器大小。 + - **output_size** (int|tuple,可选) - 输出图片的大小。如果 ``output_size`` 是一个元组(tuple),则该元形式为(image_H,image_W),这两个值必须为整型。如果未设置,则内部会使用filter_size、padding和stride来计算output_size。如果 ``output_size`` 和 ``filter_size`` 是同时指定的,那么它们应满足上面的公式。默认值为None。output_size和filter_size不能同时为None。 + - **padding** (int|tuple,可选) - 填充padding大小。padding参数在输入特征层每边添加 ``dilation * (kernel_size - 1) - padding`` 个0。如果padding是一个元组,它必须包含三个整数(padding_depth,padding_height,padding_width)。否则,padding_depth = padding_height = padding_width = padding。默认值为0。 + - **stride** (int|tuple,可选) - 步长stride大小。滤波器和输入进行卷积计算时滑动的步长。如果stride是一个元组,那么元组的形式为(stride_depth,stride_height,stride_width)。否则,stride_depth = stride_height = stride_width = stride。默认值为1。 + - **dilation** (int|tuple,可选) - 膨胀比例dilation大小。空洞卷积时会指该参数,滤波器对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息,根据 `可视化效果图 `_ 较好理解。如果膨胀比例dilation是一个元组,那么元组的形式为(dilation_depth,dilation_height, dilation_width)。否则,dilation_depth = dilation_height = dilation_width = dilation。默认值为1。 + - **groups** (int,可选) - 三维转置卷积层的组数。从Alex Krizhevsky的CNN Deep论文中的群卷积中受到启发,当group=2时,输入和滤波器分别根据通道数量平均分为两组,第一组滤波器和第一组输入进行卷积计算,第二组滤波器和第二组输入进行卷积计算。默认值为1。 + - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **use_cudnn** (bool,可选) - 是否使用cudnn内核,只有安装Paddle GPU版时才有效。默认值为True。 + - **act** (str,可选) - 激活函数类型,如果设置为None,则不使用激活函数。默认值为None。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **dtype** (str, 可选) - 数据类型,可以为"float32"或"float64"。默认值为"float32"。 + + +返回: 无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + + with fluid.dygraph.guard(): + data = numpy.random.random((5, 3, 12, 32, 32)).astype('float32') + + conv3dTranspose = fluid.dygraph.nn.Conv3DTranspose( + 'Conv3DTranspose', + num_filters=12, + filter_size=12, + use_cudnn=False) + ret = conv3dTranspose(fluid.dygraph.base.to_variable(data)) + +属性 +:::::::::::: +.. py:attribute:: weight + +本层的可学习参数,类型为 ``Parameter`` + +.. py:attribute:: bias + +本层的可学习偏置,类型为 ``Parameter`` + + + diff --git a/doc/paddle/api/paddle/fluid/dygraph/Conv3D_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/Conv3D_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9e0291edba17ab22bc5288e21b4e16ee5a0305f0 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/Conv3D_cn.rst @@ -0,0 +1,88 @@ +.. _cn_api_fluid_dygraph_Conv3D: + +Conv3D +------------------------------- + +.. py:class:: paddle.fluid.dygraph.Conv3D(num_channels, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, dtype="float32") + + + + + +该接口用于构建 ``Conv3D`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。3D卷积层(convolution3D layer)根据输入、滤波器(filter)、步长(stride)、填充(padding)、膨胀(dilations)、组数参数计算得到输出。输入和输出是[N, C, D, H, W]的多维tensor,其中N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度。卷积三维(Convlution3D)和卷积二维(Convlution2D)相似,但多了一维深度(depth)。如果提供了bias属性和激活函数类型,bias会添加到卷积(convolution)的结果中相应的激活函数会作用在最终结果上。 + +对每个输入X,有等式: + +.. math:: + + + Out = \sigma \left ( W * X + b \right ) + +其中: + - :math:`X` :输入值,NCDHW格式的张量(Tensor) + - :math:`W` :滤波器值,MCDHW格式的张量(Tensor) + - :math:`*` : 卷积操作 + - :math:`b` :Bias值,二维张量(Tensor),维度为 ``[M,1]`` + - :math:`\sigma` :激活函数 + - :math:`Out` :输出值, ``Out`` 和 ``X`` 的维度可能不同 + +**示例** + +- 输入: + 输入Tensor的维度: :math:`[N, C_{in}, D_{in}, H_{in}, W_{in}]` + + 滤波器Tensor的维度: :math:`[C_{out}, C_{in}, D_f, H_f, W_f]` +- 输出: + 输出Tensor的维度: :math:`[N, C_{out}, D_{out}, H_{out}, W_{out}]` + +其中 + +.. math:: + + + D_{out}&= \frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\ + H_{out}&= \frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\ + W_{out}&= \frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1 + +参数: + - **num_channels** (int) - 输入图像的通道数。 + - **num_fliters** (int) - 滤波器(卷积核)的个数。和输出图像通道相同。 + - **filter_size** (int|tuple) - 滤波器大小。如果filter_size是一个元组,则必须包含三个整型数,(filter_size_D, filter_size_H, filter_size_W)。如果filter_size是一个int型,则filter_size_depth = filter_size_height = filter_size_width = filter_size。 + - **stride** (int|tuple,可选) - 步长(stride)大小。滤波器和输入进行卷积计算时滑动的步长。如果步长(stride)为元组,则必须包含三个整型数, (stride_D, stride_H, stride_W)。否则,stride_D = stride_H = stride_W = stride。默认值为1。 + - **padding** (int|tuple,可选) - 填充(padding)大小。padding参数在输入特征层每边添加padding个0。如果填充(padding)为元组,则必须包含三个整型数,(padding_depth, padding_height, padding_width)。否则, padding_depth = padding_height = padding_width = padding。默认值为0。 + - **dilation** (int|tuple,可选) - 膨胀(dilation)大小。空洞卷积时会指该参数,滤波器对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息,根据 `可视化效果图 `_ 较好理解。如果膨胀(dialation)为元组,则必须包含两个整型数, (dilation_D, dilation_H, dilation_W)。否则,dilation_D = dilation_H = dilation_W = dilation。默认值为1。 + - **groups** (int,可选) - 三维卷积层(Conv3D Layer)的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=2,输入和滤波器分别根据通道数量平均分为两组,第一组滤波器和第一组输入进行卷积计算,第二组滤波器和第二组输入进行卷积计算。默认值为1。 + - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr`。 + - **use_cudnn** (bool,可选) - 是否用cudnn内核,只有安装Paddle GPU版时才有效。默认值为True。 + - **act** (str,可选) - 激活函数类型,如果设为None,则未添加激活函数。默认值为None。 + - **dtype** (str, 可选) - 数据类型,可以为"float32"或"float64"。默认值:"float32"。 + + +返回:无 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + + with fluid.dygraph.guard(): + data = numpy.random.random((5, 3, 12, 32, 32)).astype('float32') + conv3d = fluid.dygraph.nn.Conv3D( + 'Conv3D', num_filters=2, filter_size=3, act="relu") + ret = conv3d(fluid.dygraph.base.to_variable(data)) + + +属性 +:::::::::::: +.. py:attribute:: weight + +本层的可学习参数,类型为 ``Parameter`` + +.. py:attribute:: bias + +本层的可学习偏置,类型为 ``Parameter`` + + diff --git a/doc/paddle/api/paddle/fluid/dygraph/Dropout_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/Dropout_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ce72d582a0abaad2c4db1f2634049388203f8b51 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/Dropout_cn.rst @@ -0,0 +1,53 @@ +.. _cn_api_fluid_dygraph_Dropout: + +Dropout +------------------------------- + +.. py:class:: paddle.fluid.dygraph.Dropout(p=0.5, seed=None, dropout_implementation='downgrade_in_infer', is_test=False) + +丢弃或者保持输入的每个元素独立。Dropout是一种正则化手段,通过在训练过程中阻止神经元节点间的相关性来减少过拟合。根据给定的丢弃概率,dropout操作符按丢弃概率随机将一些神经元输出设置为0,其他的仍保持不变。 + +Dropout层可以删除,提高执行效率。 + +参数: + - **p** (float32,可选) - 输入单元的丢弃概率,即输入单元设置为0的概率。默认值:0.5 + - **seed** (int,可选) - 整型数据,用于创建随机种子。如果该参数设为None,则使用随机种子。注:如果给定一个整型种子,始终丢弃相同的输出单元。训练过程中勿用固定不变的种子。默认值:None。 + - **dropout_implementation** (str,可选) - 丢弃单元的方式,有两种'downgrade_in_infer'和'upscale_in_train'两种选择,默认:'downgrade_in_infer'。具体作用可以参考一下描述。 + + 1. downgrade_in_infer(default), 在预测时减小输出结果 + + - train: out = input * mask + + - inference: out = input * (1.0 - p) + + (mask是一个张量,维度和输入维度相同,值为0或1,值为0的比例即为 ``p`` ) + + 2. upscale_in_train, 增加训练时的结果 + + - train: out = input * mask / ( 1.0 - p ) + + - inference: out = input + + (mask是一个张量,维度和输入维度相同,值为0或1,值为0的比例即为 ``p`` ) + + - **is_test** (bool,可选) - 标记是否是测试阶段。此标志仅对静态图模式有效。对于动态图模式,请使用 ``eval()`` 接口。默认:False。 + +返回:无 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.dygraph.base import to_variable + import numpy as np + + x = np.random.random(size=(3, 10, 3, 7)).astype('float32') + with fluid.dygraph.guard(): + x = to_variable(x) + m = fluid.dygraph.Dropout(p=0.5) + droped_train = m(x) + # 切换到 eval 模式 + m.eval() + droped_eval = m(x) + diff --git a/doc/paddle/api/paddle/fluid/dygraph/Embedding_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/Embedding_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..35e3ce8e593e37de1d6d2cd8714ab2c749c71ed0 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/Embedding_cn.rst @@ -0,0 +1,102 @@ +.. _cn_api_fluid_dygraph_Embedding: + +Embedding +------------------------------- + +.. py:class:: paddle.fluid.dygraph.Embedding(size, is_sparse=False, is_distributed=False, padding_idx=None, param_attr=None, dtype='float32') + + + + +嵌入层(Embedding Layer) + +该接口用于构建 ``Embedding`` 的一个可调用对象,具体用法参照 ``代码示例`` 。其根据input中的id信息从embedding矩阵中查询对应embedding信息,并会根据输入的size (vocab_size, emb_size)和dtype自动构造一个二维embedding矩阵。 + +输出的Tensor的shape是在输入Tensor shape的最后一维后面添加了emb_size的维度。 + +注:input中的id必须满足 ``0 =< id < size[0]``,否则程序会抛异常退出。 + + +:: + + Case 1: + + input是Tensor, 且padding_idx = -1 + input.data = [[1, 3], [2, 4], [4, 127]] + input.shape = [3, 2] + 若size = [128, 16] + 输出为Tensor: + out.shape = [3, 2, 16] + out.data = [[[0.129435295, 0.244512452, ..., 0.436322452], + [0.345421456, 0.524563927, ..., 0.144534654]], + + [[0.345249859, 0.124939536, ..., 0.194353745], + [0.945345345, 0.435394634, ..., 0.435345365]], + + [[0.945345345, 0.435394634, ..., 0.435345365], + [0.0, 0.0, ..., 0.0 ]]] # padding data + 输入的padding_idx小于0,则自动转换为padding_idx = -1 + 128 = 127, 对于输入id为127的词,进行padding处理。 + + Case 2: + + input是lod level 为1的LoDTensor, 且padding_idx = 0 + input.lod = [[2, 3]] + input.data = [[1], [3], [2], [4], [0]] + input.shape = [5, 1] + 若size = [128, 16] + 输出为LoDTensor: + out.lod = [[2, 3]] + out.shape = [5, 1, 16] + out.data = [[[0.129435295, 0.244512452, ..., 0.436322452]], + [[0.345421456, 0.524563927, ..., 0.144534654]], + [[0.345249859, 0.124939536, ..., 0.194353745]], + [[0.945345345, 0.435394634, ..., 0.435345365]], + [[0.0, 0.0, ..., 0.0 ]]] # padding data + 输入的padding_idx = 0,则对于输入id为0的词,进行padding处理。 + +参数: + - **size** (tuple|list) - embedding矩阵的维度。必须包含两个元素,第一个元素为vocab_size(词表大小), 第二个为emb_size(embedding层维度)。 + - **is_sparse** (bool) - 是否使用稀疏的更新方式,这个参数只会影响反向的梯度更新的性能,sparse更新速度更快,推荐使用稀疏更新的方式。但某些optimizer不支持sparse更新,比如 :ref:`cn_api_fluid_optimizer_AdadeltaOptimizer` 、 :ref:`cn_api_fluid_optimizer_AdamaxOptimizer` 、 :ref:`cn_api_fluid_optimizer_DecayedAdagradOptimizer` 、 :ref:`cn_api_fluid_optimizer_FtrlOptimizer` 、 :ref:`cn_api_fluid_optimizer_LambOptimizer` 、:ref:`cn_api_fluid_optimizer_LarsMomentumOptimizer` ,此时is_sparse必须为False。默认为False。 + - **is_distributed** (bool) - 是否使用分布式的方式存储embedding矩阵,仅在多机分布式cpu训练中使用。默认为False。 + - **padding_idx** (int|long|None) - padding_idx需在区间[-vocab_size, vocab_size),否则不生效,padding_idx<0时,padding_idx会被改成vocab_size + padding_idx,input中等于padding_index的id对应的embedding信息会被设置为0,且这部分填充数据在训练时将不会被更新。如果为None,不作处理,默认为None。 + - **param_attr** (ParamAttr) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。此外,可以通过 ``param_attr`` 参数加载用户自定义或预训练的词向量。只需将本地词向量转为numpy数据格式,且保证本地词向量的shape和embedding的 ``size`` 参数一致,然后使用 :ref:`cn_api_fluid_initializer_NumpyArrayInitializer` 进行初始化,即可实现加载自定义或预训练的词向量。详细使用方法见代码示例2。 + - **dtype** (str|core.VarDesc.VarType) - 输出Tensor或LoDTensor的数据类型,数据类型必须为:float32或float64,默认为float32。 + +返回:input映射后得到的Embedding Tensor或LoDTensor,数据类型和dtype定义的类型一致。 + +返回类型:Variable + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.dygraph.base as base + import numpy as np + + # 示例 1 + inp_word = np.array([[2, 3, 5], [4, 2, 1]]).astype('int64') + inp_word.shape # [2, 3] + dict_size = 20 + with fluid.dygraph.guard(): + emb = fluid.dygraph.Embedding( + size=[dict_size, 32], + param_attr='emb.w', + is_sparse=False) + static_rlt3 = emb(base.to_variable(inp_word)) + static_rlt3.shape # [2, 3, 32] + + # 示例 2: 加载用户自定义或预训练的词向量 + weight_data = np.random.random(size=(128, 100)) # numpy格式的词向量数据 + w_param_attrs = fluid.ParamAttr( + name="emb_weight", + learning_rate=0.5, + initializer=fluid.initializer.NumpyArrayInitializer(weight_data), + trainable=True) + with fluid.dygraph.guard(): + emb = fluid.dygraph.Embedding( + size=[128, 100], + param_attr= w_param_attrs, + is_sparse=False) + static_rlt3 = emb(base.to_variable(inp_word)) + diff --git a/doc/paddle/api/paddle/fluid/dygraph/GRUCell_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/GRUCell_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..953c666ca9c83a38275893507de8156229438f11 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/GRUCell_cn.rst @@ -0,0 +1,65 @@ +.. _cn_api_fluid_layers_GRUCell: + +GRUCell +------------------------------- + + +.. py:class:: paddle.fluid.layers.GRUCell(hidden_size, param_attr=None, bias_attr=None, gate_activation=None, activation=None, dtype="float32", name="GRUCell") + + + + +门控循环单元(Gated Recurrent Unit)。通过对 :code:`fluid.contrib.layers.rnn_impl.BasicGRUUnit` 包装,来让它可以应用于RNNCell。 + +公式如下: + +.. math:: + u_t & = act_g(W_{ux}x_{t} + W_{uh}h_{t-1} + b_u)\\ + r_t & = act_g(W_{rx}x_{t} + W_{rh}h_{t-1} + b_r)\\ + \tilde{h_t} & = act_c(W_{cx}x_{t} + W_{ch}(r_t \odot h_{t-1}) + b_c)\\ + h_t & = u_t \odot h_{t-1} + (1-u_t) \odot \tilde{h_t} + +更多细节可以参考 `Learning Phrase Representations using RNN Encoder Decoder for Statistical Machine Translation `_ + +参数: + - **hidden_size** (int) - GRUCell中的隐藏层大小。 + - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr`。 + - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **gate_activation** (function,可选) - :math:`act_g` 的激活函数。 默认值为 :code:`fluid.layers.sigmoid`。 + - **activation** (function,可选) - :math:`act_c` 的激活函数。 默认值为 :code:`fluid.layers.tanh` + - **dtype** (string,可选) - 此cell中使用的数据类型。 默认为"float32"。 + - **name** (string,可选) - 用于标识参数和偏差的名称域。 + +返回:GRUCell类的实例对象。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid.layers as layers + cell = layers.GRUCell(hidden_size=256) + + +.. py:method:: call(inputs, states) + +执行GRU的计算。 + +参数: + - **input** (Variable) - 输入,形状为 :math:`[batch\_size,input\_size]` 的tensor,对应于公式中的 :math:`x_t` 。数据类型应为float32。 + - **states** (Variable) - 状态,形状为 :math:`[batch\_size,hidden\_size]` 的tensor。 对应于公式中的 :math:`h_{t-1}` 。数据类型应为float32。 + +返回:一个元组 :code:`(outputs, new_states)` ,其中 :code:`outputs` 和 :code:`new_states` 是同一个tensor,其形状为 :math:`[batch\_size,hidden\_size]`,数据类型和 :code:`state` 的数据类型相同,对应于公式中的 :math:`h_t`。 + +返回类型:tuple + +.. py:method:: state_shape() + +GRUCell的 :code:`state_shape` 是形状 :math:`[hidden\_size]` (batch大小为-1,自动插入到形状中),对应于 :math:`h_{t-1}` 的形状。 + +参数:无。 + +返回:GRUCell的 :code:`state_shape`。 + +返回类型:Variable + + diff --git a/doc/paddle/api/paddle/fluid/dygraph/GRUUnit_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/GRUUnit_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c2f986da1df15d700808d8f57596f15e0c7c6c6c --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/GRUUnit_cn.rst @@ -0,0 +1,82 @@ +.. _cn_api_fluid_dygraph_GRUUnit: + +GRUUnit +------------------------------- + +.. py:class:: paddle.fluid.dygraph.GRUUnit(name_scope, size, param_attr=None, bias_attr=None, activation='tanh', gate_activation='sigmoid', origin_mode=False, dtype='float32') + + + + +该接口用于构建 ``GRU(Gated Recurrent Unit)`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其用于完成单个时间步内GRU的计算,支持以下两种计算方式: + +如果origin_mode为True,则使用的运算公式来自论文 +`Learning Phrase Representations using RNN Encoder Decoder for Statistical Machine Translation `_ 。 + +.. math:: + u_t & = act_g(W_{ux}x_{t} + W_{uh}h_{t-1} + b_u)\\ + r_t & = act_g(W_{rx}x_{t} + W_{rh}h_{t-1} + b_r)\\ + \tilde{h_t} & = act_c(W_{cx}x_{t} + W_{ch}(r_t \odot h_{t-1}) + b_c)\\ + h_t & = u_t \odot h_{t-1} + (1-u_t) \odot \tilde{h_t} + + +如果origin_mode为False,则使用的运算公式来自论文 +`Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling `_ 。 + +公式如下: + +.. math:: + u_t & = act_g(W_{ux}x_{t} + W_{uh}h_{t-1} + b_u)\\ + r_t & = act_g(W_{rx}x_{t} + W_{rh}h_{t-1} + b_r)\\ + \tilde{h_t} & = act_c(W_{cx}x_{t} + W_{ch}(r_t \odot h_{t-1}) + b_c)\\ + h_t & = (1-u_t) \odot h_{t-1} + u_t \odot \tilde{h_t} + + +其中, :math:`x_t` 为当前时间步的输入,:math:`h_{t-1}` 为前一时间步的隐状态 ``hidden``; :math:`u_t` 、 :math:`r_t` 、 :math:`\tilde{h_t}` 和 :math:`h_t` 分别代表了GRU单元中update gate(更新门)、reset gate(重置门)、candidate hidden(候选隐状态)和隐状态输出; :math:`\odot` 为逐个元素相乘; +:math:`W_{uh}, b_u` 、 :math:`W_{rh}, b_r` 和 :math:`W_{ch}, b_c` 分别代表更新门、重置门和候选隐状态在计算时使用的权重矩阵和偏置。在实现上,三个权重矩阵合并为一个维度为 :math:`[D, D \times 3]` 的Tensor存放。 + +参数: + - **size** (int) – 输入数据的维度大小。 + - **param_attr** (ParamAttr,可选) – 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + **注意** + - 权重参数维度为 :math:`[T, 3×D]` , :math:`D` 是隐藏状态的规模(hidden size), 其值与输入size相关,计算方式为size除以3取整 。 + - 权重参数矩阵所有元素由两部分组成, 一是update gate和reset gate的权重,维度为 :math:`[D, 2×D]` 的2D Tensor,数据类型可以为float32或float64;二是候选隐藏状态(candidate hidden state)的权重矩阵,维度为 :math:`[D, D]` 的2D Tensor,数据类型可以为float32或float64。 + - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **activation** (str,可选) – 公式中 :math:`act_c` 激活函数的类型。可以为'identity'、'sigmoid'、'tanh'、'relu'四种激活函数设置值。默认值为'tanh'。 + - **gate_activation** (str,可选) – 公式中 :math:`act_g` 激活函数的类型。可以为'identity'、'sigmoid'、'tanh'、'relu'四种激活函数设置值。默认值为'sigmoid'。 + - **origin_mode** (bool) – 指明要使用的GRU计算方式,两种计算方式具体差异见公式描述。默认值为False。 + - **dtype** (str,可选) – 该层的数据类型,可以为'float32', 'float64'。默认值为'float32'。 + +返回: + None. + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.dygraph.base as base + import numpy + + lod = [[2, 4, 3]] + D = 5 + T = sum(lod[0]) + + input = numpy.random.rand(T, 3 * D).astype('float32') + hidden_input = numpy.random.rand(T, D).astype('float32') + with fluid.dygraph.guard(): + x = numpy.random.random((3, 32, 32)).astype('float32') + gru = fluid.dygraph.GRUUnit(size=D * 3) + dy_ret = gru( + base.to_variable(input), base.to_variable(hidden_input)) + + +属性 +:::::::::::: +.. py:attribute:: weight + +本层的可学习参数,类型为 ``Parameter`` + +.. py:attribute:: bias + +本层的可学习偏置,类型为 ``Parameter`` diff --git a/doc/paddle/api/paddle/fluid/dygraph/GroupNorm_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/GroupNorm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9b03d8f04ce37b3409e76a4185e8e96f0e592912 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/GroupNorm_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_fluid_dygraph_GroupNorm: + +GroupNorm +------------------------------- + +.. py:class:: paddle.fluid.dygraph.GroupNorm(channels, groups, epsilon=1e-05, param_attr=None, bias_attr=None, act=None, data_layout='NCHW', dtype="float32") + + + + +**Group Normalization层** + +该接口用于构建 ``GroupNorm`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其中实现了组归一化层的功能。更多详情请参考: `Group Normalization `_ 。 + +参数: + - **channels** (int) - 输入的通道数。 + - **groups** (int) - 从通道中分离出来的 ``group`` 的数目。 + - **epsilon** (float, 可选) - 为防止方差除零,增加一个很小的值。默认值:1e-05。 + - **param_attr** (ParamAttr, 可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr, 可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **act** (str, 可选) - 应用于输出上的激活函数,如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations` ,默认值为None。 + - **data_layout** (str, 可选) - 只支持“NCHW”(num_batches,channels,height,width)格式。默认值:“NCHW”。 + - **dtype** (str, 可选) - 数据类型,可以为"float32"或"float64"。默认值为"float32"。 + +返回:无 + +抛出异常: + - ValueError - 如果 ``data_layout`` 不是“NCHW”格式。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy s np + + with fluid.dygraph.guard(): + x = np.random.random((8, 32, 32)).astype('float32') + groupNorm = fluid.dygraph.nn.GroupNorm(channels=32, groups=4) + ret = groupNorm(fluid.dygraph.base.to_variable(x)) + + diff --git a/doc/paddle/api/paddle/fluid/dygraph/LSTMCell_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/LSTMCell_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cebebb05235fe4cb9338ef681d4bd51d17dd1f1d --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/LSTMCell_cn.rst @@ -0,0 +1,66 @@ +.. _cn_api_fluid_layers_LSTMCell: + +LSTMCell +------------------------------- + + + +.. py:class:: paddle.fluid.layers.LSTMCell(hidden_size, param_attr=None, bias_attr=None, gate_activation=None, activation=None, forget_bias=1.0, dtype="float32", name="LSTMCell") + + + + +长短期记忆单元(Long-Short Term Memory)。通过对 :code:`fluid.contrib.layers.rnn_impl.BasicLSTMUnit` 包装,来让它可以应用于RNNCell。 + +公式如下: + +.. math:: + i_{t} &= act_g \left ( W_{x_{i}}x_{t}+W_{h_{i}}h_{t-1}+b_{i} \right ) \\ + f_{t} &= act_g \left ( W_{x_{f}}x_{t}+W_{h_{f}}h_{t-1}+b_{f}+forget\_bias \right ) \\ + c_{t} &= f_{t}c_{t-1}+i_{t}act_h\left ( W_{x_{c}}x_{t} +W_{h_{c}}h_{t-1}+b_{c}\right ) \\ + o_{t} &= act_g\left ( W_{x_{o}}x_{t}+W_{h_{o}}h_{t-1}+b_{o} \right ) \\ + h_{t} &= o_{t}act_h \left ( c_{t} \right ) + +更多细节可以参考 `RECURRENT NEURAL NETWORK REGULARIZATION `_ + +参数: + - **hidden_size** (int) - LSTMCell中的隐藏层大小。 + - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr`。 + - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **gate_activation** (function,可选) - :math:`act_g` 的激活函数。 默认值为 :code:`fluid.layers.sigmoid`。 + - **activation** (function,可选) - :math:`act_c` 的激活函数。 默认值为 :code:`fluid.layers.tanh`。 + - **forget_bias** (float,可选) - 计算遗忘们时使用的遗忘偏置。默认值为 1.0。 + - **dtype** (string,可选) - 此Cell中使用的数据类型。 默认值为 `float32`。 + - **name** (string,可选) - 用于标识参数和偏差的名称域。 + +返回:LSTMCell类的实例对象。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid.layers as layers + cell = layers.LSTMCell(hidden_size=256) + + +.. py:method:: call(inputs, states) + +执行GRU的计算。 + +参数: + - **input** (Variable) - 输入,形状为 :math:`[batch\_size,input\_size]` 的tensor,对应于公式中的 :math:`x_t`。数据类型应为float32。 + - **states** (Variable) - 状态,包含两个tensor的列表,每个tensor形状为 :math:`[batch\_size,hidden\_size]`。 对应于公式中的 :math:`h_{t-1}, c_{t-1}`。数据类型应为float32。 + +返回:一个元组 :code:`(outputs, new_states)`,其中 :code:`outputs` 是形状为 :math:`[batch\_size,hidden\_size]` 的tensor,对应于公式中的 :math:`h_{t}`;:code:`new_states` 是一个列表,包含形状为 :math:`[batch_size,hidden_size]` 的两个tensor变量,它们对应于公式中的 :math:`h_{t}, c_{t}`。这些tensor的数据类型都与 :code:`state` 的数据类型相同。 + +返回类型:tuple + +.. py:method:: state_shape() + +LSTMCell的 :code:`state_shape` 是一个具有两个形状的列表::math:`[[hidden\_size], [hidden\_size]]` (batch大小为-1,自动插入到形状中)。 这两个形状分别对应于公式中的 :math:`h_{t-1}` and :math:`c_{t-1}`。 + +参数:无。 + +返回:LSTMCell的 :code:`state_shape` + +返回类型:list diff --git a/doc/paddle/api/paddle/fluid/dygraph/LambdaDecay_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/LambdaDecay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..33e30850a1e2e5ddcb230d0ac222434e3257ad56 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/LambdaDecay_cn.rst @@ -0,0 +1,64 @@ +.. _cn_api_fluid_dygraph_LambdaDecay: + +LambdaDecay +------------------------------- + + +.. py:class:: paddle.fluid.dygraph.LambdaDecay(learning_rate, lr_lambda) + + + +该API提供 lambda函数 设置学习率的功能。 ``lr_lambda`` 为一个lambda函数,其通过 ``epoch`` 计算出一个因子,该因子会乘以初始学习率。 + +算法可以描述为: + +.. code-block:: text + + learning_rate = 0.5 # init learning_rate + lr_lambda = lambda epoch: 0.95 ** epoch + + learning_rate = 0.5 # epoch 0 + learning_rate = 0.475 # epoch 1 + learning_rate = 0.45125 # epoch 2 + +参数: + - **learning_rate** (float|int) - 初始化的学习率。可以是Python的float或int。 + - **lr_lambda** (function) - ``lr_lambda`` 为一个lambda函数,其通过 ``epoch`` 计算出一个因子,该因子会乘以初始学习率。 + +返回: 无 + +**代码示例**: + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + with fluid.dygraph.guard(): + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = fluid.dygraph.Linear(10, 10) + input = fluid.dygraph.to_variable(x) + scheduler = fluid.dygraph.LambdaDecay(0.5, lr_lambda=lambda x: 0.95**x) + adam = fluid.optimizer.Adam(learning_rate = scheduler, parameter_list = linear.parameters()) + for epoch in range(6): + for batch_id in range(5): + out = linear(input) + loss = fluid.layers.reduce_mean(out) + adam.minimize(loss) + scheduler.epoch() + print("epoch:%d, current lr is %f" .format(epoch, adam.current_step_lr())) + # epoch:0, current lr is 0.5 + # epoch:1, current lr is 0.475 + # epoch:2, current lr is 0.45125 + +.. py:method:: epoch(epoch=None) +通过当前的 epoch 调整学习率,调整后的学习率将会在下一次调用 ``optimizer.minimize`` 时生效。 + +参数: + - **epoch** (int|float,可选) - 类型:int或float。指定当前的epoch数。默认:无,此时将会自动累计epoch数。 + +返回: + 无 + +**代码示例**: + + 参照上述示例代码。 diff --git a/doc/paddle/api/paddle/fluid/dygraph/LayerNorm_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/LayerNorm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9e0bb049667f42de584166b4340d9c69e0921dde --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/LayerNorm_cn.rst @@ -0,0 +1,56 @@ +.. _cn_api_fluid_dygraph_LayerNorm: + +LayerNorm +------------------------------- + +.. py:class:: paddle.fluid.dygraph.LayerNorm(normalized_shape, scale=True, shift=True, begin_norm_axis=1, epsilon=1e-05, param_attr=None, bias_attr=None, act=None, dtype="float32") + + + + +该接口用于构建 ``LayerNorm`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其中实现了层归一化层(Layer Normalization Layer)的功能,其可以应用于小批量输入数据。更多详情请参考:`Layer Normalization `_ + +计算公式如下 + +.. math:: + \\\mu=\frac{1}{H}\sum_{i=1}^{H}x_i\\ + + \\\sigma=\sqrt{\frac{1}{H}\sum_i^H{(x_i-\mu)^2} + \epsilon}\\ + + \\y=f(\frac{g}{\sigma}(x-\mu) + b)\\ + +- :math:`x` : 该层神经元的向量表示 +- :math:`H` : 层中隐藏神经元个数 +- :math:`\epsilon` : 添加较小的值到方差中以防止除零 +- :math:`g` : 可训练的比例参数 +- :math:`b` : 可训练的偏差参数 + + +参数: + - **normalized_shape** (int 或 list 或 tuple) – 需规范化的shape,期望的输入shape为 ``[*, normalized_shape[0], normalized_shape[1], ..., normalized_shape[-1]]``。如果是单个整数,则此模块将在最后一个维度上规范化(此时最后一维的维度需与该参数相同)。 + - **scale** (bool, 可选) - 指明是否在归一化后学习自适应增益 ``g`` 。默认值:True。 + - **shift** (bool, 可选) - 指明是否在归一化后学习自适应偏差 ``b`` 。默认值:True。 + - **epsilon** (float, 可选) - 指明在计算过程中是否添加较小的值到方差中以防止除零。默认值:1e-05。 + - **param_attr** (ParamAttr, 可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr, 可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **act** (str, 可选) - 应用于输出上的激活函数,如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations` ,默认值为None。 + - **dtype** (str,可选) - 输出Tensor或LoDTensor的数据类型,数据类型必须为:float32或float64,默认为float32。 + + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.dygraph.base import to_variable + import numpy + + x = numpy.random.random((3, 32, 32)).astype('float32') + with fluid.dygraph.guard(): + x = to_variable(x) + layerNorm = fluid.LayerNorm([32, 32]) + ret = layerNorm(x) + + diff --git a/doc/paddle/api/paddle/fluid/dygraph/LinearLrWarmup_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/LinearLrWarmup_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a998567058693d62903618246658883b2f9310d9 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/LinearLrWarmup_cn.rst @@ -0,0 +1,105 @@ +.. _cn_api_paddle_optimizer_LinearLrWarmup: + +LinearLrWarmup +----------------------------------- + +.. py:class:: paddle.optimizer.lr_scheduler.LinearLrWarmup(learing_rate, warmup_steps, start_lr, end_lr, last_epoch=-1, verbose=False) + +该接口提供一种学习率优化策略-线性学习率热身(warm up)对学习率进行初步调整。在正常调整学习率之前,先逐步增大学习率。 + +当训练步数小于热身步数(warmup_steps)时,学习率lr按如下方式更新: + +.. code-block:: text + + linear_step = end_lr - start_lr + lr = start_lr + linear_step * (epoch / warmup_steps) + +当训练步数大于等于热身步数(warmup_steps)时,学习率lr为: + +.. code-block:: text + + lr = learning_rate + +其中learning_rate为热身之后的学习率。 + +参数 +::::::::: + - **learning rate** (float|_LRScheduler):热启训练之后的学习率,可以是Python的float或_LRScheduler子类。 + - **warmup_steps** (int):进行warm up过程的步数。 + - **start_lr** (float):warm up的起始学习率。 + - **end_lr** (float):warm up的最终学习率。 + - **last_epoch** (int,可选): 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率 。 + - **verbose** (bool,可选):如果是 `True` ,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 + + +返回 +::::::::: +返回计算LinearLrWarmup的可调用对象。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + # train on default dygraph mode + paddle.disable_static() + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.LinearLrWarmup( + learning_rate=0.5, warmup_steps=20, start_lr=0, end_lr=0.5, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameter_list=linear.parameters()) + for epoch in range(20): + for batch_id in range(2): + x = paddle.to_tensor(x) + out = linear(x) + loss = paddle.reduce_mean(out) + loss.backward() + sgd.minimize(loss) + linear.clear_gradients() + scheduler.step() + + # train on static mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr_scheduler.LinearLrWarmup( + learning_rate=0.5, warmup_steps=20, start_lr=0, end_lr=0.5, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(2): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() + +.. py:method:: step(epoch=None) + +step函数需要在优化器的 `step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 + +参数: + - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + +返回: + 无。 + +**代码示例** : + + 参照上述示例代码。 + + diff --git a/doc/paddle/api/paddle/fluid/dygraph/Linear_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/Linear_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..48ca2392ef39f2b1fd4dbf05f5828885a1abe298 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/Linear_cn.rst @@ -0,0 +1,60 @@ +.. _cn_api_fluid_dygraph_Linear: + +Linear +------------------------------- + +.. py:class:: paddle.fluid.dygraph.Linear(input_dim, output_dim, param_attr=None, bias_attr=None, act=None, dtype='float32') + + + + + +**线性变换层:** + +.. math:: + + \\Out = Act({XW + b})\\ + +其中,:math:`X` 为输入的 Tensor, :math:`W` 和 :math:`b` 分别为权重和偏置。 + +Linear 层只接受一个 Tensor 的输入。 +Linear 层将输入 Tensor 与权重矩阵 :math:`W` 相乘,然后生成形状为 :math:`[N,*,output_dim]` 的输出张量, +其中 :math:`N` 是批量大小,:math:`*` 表示任意数量的附加尺寸。 +如果 bias_attr 不是 None,则将创建一个 bias 变量并将其添加到输出中。 +最后,如果激活 act 不是 None,则相应激活函数也将应用于输出上。 + +参数: + - **input_dim** (int) – 线性变换层输入单元的数目。 + - **output_dim** (int) – 线性变换层输出单元的数目。 + - **param_attr** (ParamAttr, 可选) – 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr, 可选) – 指定偏置参数属性的对象,若 `bias_attr` 为bool类型,如果设置为False,表示不会为该层添加偏置;如果设置为True,表示使用默认的偏置参数属性。默认值为None,表示使用默认的偏置参数属性。默认的偏置参数属性将偏置参数的初始值设为0。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **act** (str, 可选) – 应用于输出上的激活函数,如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations` ,默认值为None。 + - **dtype** (str, 可选) – 权重的数据类型,可以为float32或float64。默认为float32。 + +返回:无 + +**代码示例** + +.. code-block:: python + + from paddle.fluid.dygraph.base import to_variable + import paddle.fluid as fluid + from paddle.fluid.dygraph import Linear + import numpy as np + + data = np.random.uniform( -1, 1, [30, 10, 32] ).astype('float32') + with fluid.dygraph.guard(): + linear = Linear(32, 64) + data = to_variable(data) + res = linear(data) # [30, 10, 64] + +属性 +:::::::::::: +.. py:attribute:: weight + +本层的可学习参数,类型为 ``Parameter`` + +.. py:attribute:: bias + +本层的可学习偏置,类型为 ``Parameter`` + diff --git a/doc/paddle/api/paddle/fluid/dygraph/MultiStepDecay_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/MultiStepDecay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7333f9a553c8588b4f5cff607d8fb7bb524f2624 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/MultiStepDecay_cn.rst @@ -0,0 +1,71 @@ +.. _cn_api_fluid_dygraph_MultiStepDecay: + +MultiStepDecay +------------------------------- + + +.. py:class:: paddle.fluid.dygraph.MultiStepDecay(learning_rate, milestones, decay_rate=0.1) + + + +该接口提供 ``MultiStep`` 衰减学习率的功能。 + +算法可以描述为: + +.. code-block:: text + + learning_rate = 0.5 + milestones = [30, 50] + decay_rate = 0.1 + if epoch < 30: + learning_rate = 0.5 + elif epoch < 50: + learning_rate = 0.05 + else: + learning_rate = 0.005 + +参数: + - **learning_rate** (float|int) - 初始化的学习率。可以是Python的float或int。 + - **milestones** (tuple|list) - 列表或元组。必须是递增的。 + - **decay_rate** (float, optional) - 学习率的衰减率。 ``new_lr = origin_lr * decay_rate`` 。其值应该小于1.0。默认:0.1。 + +返回: 无 + +**代码示例**: + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + with fluid.dygraph.guard(): + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = fluid.dygraph.Linear(10, 10) + input = fluid.dygraph.to_variable(x) + scheduler = fluid.dygraph.MultiStepDecay(0.5, milestones=[3, 5]) + adam = fluid.optimizer.Adam(learning_rate = scheduler, parameter_list = linear.parameters()) + for epoch in range(6): + for batch_id in range(5): + out = linear(input) + loss = fluid.layers.reduce_mean(out) + adam.minimize(loss) + scheduler.epoch() + print("epoch:{}, current lr is {}" .format(epoch, adam.current_step_lr())) + # epoch:0, current lr is 0.5 + # epoch:1, current lr is 0.5 + # epoch:2, current lr is 0.5 + # epoch:3, current lr is 0.05 + # epoch:4, current lr is 0.05 + # epoch:5, current lr is 0.005 + +.. py:method:: epoch(epoch=None) +通过当前的 epoch 调整学习率,调整后的学习率将会在下一次调用 ``optimizer.minimize`` 时生效。 + +参数: + - **epoch** (int|float,可选) - 类型:int或float。指定当前的epoch数。默认:无,此时将会自动累计epoch数。 + +返回: + 无 + +**代码示例**: + + 参照上述示例代码。 diff --git a/doc/paddle/api/paddle/fluid/dygraph/NCE_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/NCE_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..45302572ed5bfb377763073775d19cbed4310079 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/NCE_cn.rst @@ -0,0 +1,82 @@ +.. _cn_api_fluid_dygraph_NCE: + +NCE +------------------------------- + +.. py:class:: paddle.fluid.dygraph.NCE(num_total_classes, dim, param_attr=None, bias_attr=None, num_neg_samples=None, sampler='uniform', custom_dist=None, seed=0, is_sparse=False, dtype="float32") + + + + +该接口用于构建 ``NCE`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其中实现了 ``NCE`` 损失函数的功能,其默认使用均匀分布进行抽样,计算并返回噪音对比估计( noise-contrastive estimation training loss)。更多详情请参考:`Noise-contrastive estimation: A new estimation principle for unnormalized statistical models `_ + +参数: + - **num_total_classes** (int) - 所有样本中的类别的总数。 + - **dim** (int) - 输入的维度(一般为词嵌入的维度)。 + - **sample_weight** (Variable, 可选) - 维度为\[batch_size, 1\],存储每个样本的权重。每个样本的默认权重为1.0。默认值:None。 + - **param_attr** (ParamAttr, 可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr, 可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **num_neg_samples** (int, 可选) - 负样本的数量。默认值:10。 + - **sampler** (str, 可选) – 指明采样器的类型,用于从负类别中进行采样。可以是 ``uniform`` 、 ``log_uniform`` 或 ``custom_dist`` 。 默认值: ``uniform`` 。 + - **custom_dist** (float[], 可选) – float[] 类型的数据,并且它的长度为 ``num_total_classes`` 。如果采样器类别为 ``custom_dist`` ,则使用此参数。custom_dist\[i\]是第i个类别被取样的概率。默认值:None + - **seed** (int, 可选) – 采样器使用的随机种子。默认值:0。 + - **is_sparse** (bool, 可选) – 指明是否使用稀疏更新,如果为True, :math:`weight@GRAD` 和 :math:`bias@GRAD` 会变为 SelectedRows。默认值:False。 + - **dtype** (str, 可选) - 数据类型,可以为"float32"或"float64"。默认值:"float32"。 + +返回:无 + +**代码示例** + +.. code-block:: python + + + import numpy as np + import paddle.fluid as fluid + + window_size = 5 + dict_size = 20 + label_word = int(window_size // 2) + 1 + inp_word = np.array([[1], [2], [3], [4], [5]]).astype('int64') + nid_freq_arr = np.random.dirichlet(np.ones(20) * 1000).astype('float32') + + with fluid.dygraph.guard(): + words = [] + for i in range(window_size): + words.append(fluid.dygraph.base.to_variable(inp_word[i])) + + emb = fluid.Embedding( + size=[dict_size, 32], + param_attr='emb.w', + is_sparse=False) + + embs3 = [] + for i in range(window_size): + if i == label_word: + continue + + emb_rlt = emb(words[i]) + embs3.append(emb_rlt) + + embs3 = fluid.layers.concat(input=embs3, axis=1) + nce = fluid.NCE( + num_total_classes=dict_size, + dim=embs3.shape[1], + num_neg_samples=2, + sampler="custom_dist", + custom_dist=nid_freq_arr.tolist(), + seed=1, + param_attr='nce.w', + bias_attr='nce.b') + + wl = fluid.layers.unsqueeze(words[label_word], axes=[0]) + nce_loss3 = nce(embs3, wl) + +属性 +:::::::::::: +.. py:attribute:: weight + +本层的可学习参数,类型为 ``Parameter`` + +.. py:attribute:: bias + +本层的可学习偏置,类型为 ``Parameter`` diff --git a/doc/paddle/api/paddle/fluid/dygraph/PRelu_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/PRelu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8252bee477151d00696254cbe2ae4ea8dda01261 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/PRelu_cn.rst @@ -0,0 +1,66 @@ +.. _cn_api_fluid_dygraph_PRelu: + +PRelu +------------------------------- + +.. py:class:: paddle.fluid.dygraph.PRelu(mode, input_shape=None, param_attr=None, dtype="float32") + + + + +该接口用于构建 ``PRelu`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其中实现了 ``PRelu`` 激活函数的三种激活方式。 + +计算公式如下: + +.. math:: + y = max(0, x) + \alpha min(0, x) + + +参数: + - **mode** (str) - 权重共享模式。共提供三种激活方式: + + .. code-block:: text + + all:所有元素使用同一个alpha值 + channel:在同一个通道中的元素使用同一个alpha值 + element:每一个元素有一个独立的alpha值 + + - **channel** (int,可选) - 通道数。该参数在mode参数为"channel"时是必须的。默认为None。 + - **input_shape** (int 或 list 或 tuple,可选) - 输入的维度。该参数在mode参数为"element"时是必须的。默认为None。 + - **param_attr** (ParamAttr, 可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **dtype** (str, 可选) - 数据类型,可以为"float32"或"float64"。默认值:"float32"。 + +返回:无 + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.dygraph.base import to_variable + import numpy as np + + inp_np = np.ones([5, 200, 100, 100]).astype('float32') + with fluid.dygraph.guard(): + inp_np = to_variable(inp_np) + prelu0 = fluid.PRelu( + mode='all', + param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(1.0))) + dy_rlt0 = prelu0(inp_np) + prelu1 = fluid.PRelu( + mode='channel', + channel=200, + param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(1.0))) + dy_rlt1 = prelu1(inp_np) + prelu2 = fluid.PRelu( + mode='element', + input_shape=inp_np.shape, + param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(1.0))) + dy_rlt2 = prelu2(inp_np) + +属性 +:::::::::::: +.. py:attribute:: weight + +本层的可学习参数,类型为 ``Parameter`` + diff --git a/doc/paddle/api/paddle/fluid/dygraph/Pool2D_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/Pool2D_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4647cfdf671cce1d93124df5532126bd0fb32c55 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/Pool2D_cn.rst @@ -0,0 +1,92 @@ +.. _cn_api_fluid_dygraph_Pool2D: + +Pool2D +------------------------------- + +.. py:class:: paddle.fluid.dygraph.Pool2D(pool_size=-1, pool_type='max', pool_stride=1, pool_padding=0, global_pooling=False, use_cudnn=True, ceil_mode=False, exclusive=True, data_format="NCHW") + + + + +该接口用于构建 ``Pool2D`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其将在神经网络中构建一个二维池化层,并使用上述输入参数的池化配置,为二维空间池化操作,根据 ``input`` , 池化类型 ``pool_type`` , 池化核大小 ``pool_size`` , 步长 ``pool_stride`` ,填充 ``pool_padding`` 这些参数得到输出。 + +输入X和输出Out默认是NCHW格式,N为批大小,C是通道数,H是特征高度,W是特征宽度。参数( ``ksize``, ``strides``, ``paddings`` )含有两个整型元素。分别表示高度和宽度上的参数。输入X的大小和输出Out的大小可能不一致。 + +例如: + +输入: + X shape::math:`\left ( N,C,H_{in},W_{in} \right )` + +输出: + Out shape::math:`\left ( N,C,H_{out},W_{out} \right )` + +如果 ``ceil_mode`` = false: + +.. math:: + H_{out} = \frac{(H_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1 + +.. math:: + W_{out} = \frac{(W_{in} - ksize[1] + 2 * paddings[1])}{strides[1]} + 1 + +如果 ``ceil_mode`` = true: + +.. math:: + H_{out} = \frac{(H_{in} - ksize[0] + 2 * paddings[0] + strides[0] - 1)}{strides[0]} + 1 + +.. math:: + W_{out} = \frac{(W_{in} - ksize[1] + 2 * paddings[1] + strides[1] - 1)}{strides[1]} + 1 + +如果 ``exclusive`` = false: + +.. math:: + hstart &= i * strides[0] - paddings[0] \\ + hend &= hstart + ksize[0] \\ + wstart &= j * strides[1] - paddings[1] \\ + wend &= wstart + ksize[1] \\ + Output(i ,j) &= \frac{sum(Input[hstart:hend, wstart:wend])}{ksize[0] * ksize[1]} + +如果 ``exclusive`` = true: + +.. math:: + hstart &= max(0, i * strides[0] - paddings[0])\\ + hend &= min(H, hstart + ksize[0]) \\ + wstart &= max(0, j * strides[1] - paddings[1]) \\ + wend & = min(W, wstart + ksize[1]) \\ + Output(i ,j) & = \frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)} + +参数: + - **pool_size** (int|list|tuple, 可选) - 池化核的大小。如果它是一个元组或列表,它必须包含两个整数值, (pool_size_Height, pool_size_Width)。若为一个整数,则它的平方值将作为池化核大小,比如若pool_size=2, 则池化核大小为2x2。默认值:-1。 + - **pool_type** (str, 可选) - 池化类型,可以是”max“对应max-pooling,“avg”对应average-pooling。默认为”max“。 + - **pool_stride** (int|list|tuple, 可选) - 池化层的步长。如果它是一个元组或列表,它将包含两个整数,(pool_stride_Height, pool_stride_Width)。若为一个整数,则表示H和W维度上stride均为该值。默认值为1。 + - **pool_padding** (int|list|tuple, 可选) - 填充大小。如果它是一个元组或列表,它必须包含两个整数值,(pool_padding_on_Height, pool_padding_on_Width)。若为一个整数,则表示H和W维度上padding均为该值。默认值为1。 + - **global_pooling** (bool, 可选)- 是否用全局池化。如果global_pooling = True, ``pool_size`` 和 ``pool_padding`` 将被忽略,默认False。 + - **use_cudnn** (bool, 可选)- 是否用cudnn核,只有已安装cudnn库时才有效。默认True。 + - **ceil_mode** (bool, 可选)- 是否用ceil函数计算输出高度和宽度。如果设为False,则使用floor函数。默认为False。 + - **exclusive** (bool, 可选) - 是否在平均池化模式忽略填充值。默认为True。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + +返回:无 + +抛出异常: + - ``ValueError`` - 如果 ``pool_type`` 既不是“max”也不是“avg”。 + - ``ValueError`` - 如果 ``global_pooling`` 为False并且 ``pool_size`` 为-1。 + - ``ValueError`` - 如果 ``use_cudnn`` 不是bool值。 + - ``ValueError`` - 如果 ``data_format`` 既不是"NCHW"也不是"NHWC"。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.dygraph.base import to_variable + import numpy as np + + with fluid.dygraph.guard(): + data = np.random.random((3, 32, 32, 5)).astype('float32') + pool2d = fluid.dygraph.Pool2D(pool_size=2, + pool_type='max', + pool_stride=1, + global_pooling=False) + pool2d_res = pool2d(to_variable(data)) + + diff --git a/doc/paddle/api/paddle/fluid/dygraph/ProgramTranslator_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/ProgramTranslator_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..75cd816fbdeb794fbfb8efd8ff471c16d6812875 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/ProgramTranslator_cn.rst @@ -0,0 +1,263 @@ +.. _cn_api_fluid_dygraph_ProgramTranslator + +ProgramTranslator +------------------------------- + +.. py:class:: paddle.fluid.dygraph.dygraph_to_static.ProgramTranslator() + +将动态图函数转为静态图函数的类。该类是个单例(singleton)。 + +参数: + 无。 + +返回:ProgramTranslator 单例对象。 + +返回类型:ProgramTranslator。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + + # 以下两种调用方法得到同一个对象,因为ProgramTranslator是个单例 + fluid.dygraph.ProgramTranslator() + fluid.dygraph.ProgramTranslator.get_instance() + +.. py:method:: enable(enable_declarative) + +全局开启或关闭动态图转化为静态图。 + +参数: + - **enable_declarative** (bool) - 设置True或者False来打开或关闭declarative 。 + +返回:None。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + @fluid.dygraph.jit.declarative + def func(x): + x = fluid.dygraph.to_variable(x) + if fluid.layers.mean(x) > 0: + x_v = x - 1 + else: + x_v = x + 1 + return x_v + + prog_trans = fluid.dygraph.ProgramTranslator() + prog_trans.enable(False) + + x = np.ones([1, 2]) + # The declarative is disabled so the func is run in dygraph + with fluid.dygraph.guard(): + print(func(x).numpy()) # [[2. 2.]] + +.. py:method:: get_output(dygraph_func, *args, **kwargs) + +返回动态图函数输出的VarBase,但是该动态图函数的数值计算过程会被转化为静态图模式运行。 + +参数: + - **dygraph_func** (callable) - 动态图函数。 + - **args, kwargs** - 动态图函数的输入。 + +返回:包含数值结果的VarBase或者VarBase的元组,是输入动态图函数的返回值。 + +返回类型:VarBase或者VarBase的元组。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def func(x): + x = fluid.dygraph.to_variable(x) + if fluid.layers.mean(x) > 0: + x_v = x - 1 + else: + x_v = x + 1 + return x_v + + prog_trans = fluid.dygraph.ProgramTranslator() + + with fluid.dygraph.guard(): + x = np.ones([1, 2]) + x_v = prog_trans.get_output(func, x) + print(x_v.numpy()) # [[0. 0.]] + +.. py:method:: get_func(dygraph_func) + +返回一个可调用函数,该函数将输入动态图函数接口转化为静态图组网接口。组网接口不像动态图接口,其并不直接返回数据结果。用户需要自行处理对应的Program和Eexecutor。 + +参数: + - **dygraph_func** (callable) - 动态图函数。 + +返回:将动态图接口转为静态图组网接口的可调用函数。 + +返回类型:可调用函数。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def func(x): + x = fluid.dygraph.to_variable(x) + if fluid.layers.mean(x) > 0: + x_v = x - 1 + else: + x_v = x + 1 + return x_v + + prog_trans = fluid.dygraph.ProgramTranslator() + + static_func = prog_trans.get_func(func) + print(callable(static_func)) # True + +.. py:method:: get_program(dygraph_func, *args, **kwargs) + +返回动态图函数转化后的静态图Program和输入输出Varaible。用户可以使用Executor来执行该Program。 + +参数: + - **dygraph_func** (callable) - 动态图函数。 + - **args, kwargs** - 动态图函数的输入。 + +返回:元组(main_program, startup_program, inputs, outputs) + main_program: 转化后的main program。 + startup_program: 转化后的startup program。 + inputs: 输入Variable的列表,这些Variable可以在执行去feed。 + outputs: 输出Variable的列表,这些Variable可以在运行时被fetch。 + +返回类型:类型为(Program, Program, list(Variable), list(Variable)) 的元组。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def func(x): + x = fluid.dygraph.to_variable(x) + if fluid.layers.mean(x) > 0: + x_v = x - 1 + else: + x_v = x + 1 + return x_v + + prog_trans = fluid.dygraph.ProgramTranslator() + + x = np.ones([1, 2]) + main_prog, start_prog, inputs, outputs = prog_trans.get_program(func, x) + print([i.name for i in inputs]) + # ['feed_0'] 需要被feed的输入Variable名字,对应x + print([o.name for o in outputs]) + # ['_generated_var_4'] 需要被fetch的输出Variable名字,对应x_v + +.. py:method:: get_code(dygraph_func) + +返回动态图函数转化后的静态图代码字符串。 + +参数: + - **dygraph_func** (callable) - 动态图函数。 + +返回:转化后的静态图代码字符串。 + +返回类型:str。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def func(x): + x = fluid.dygraph.to_variable(x) + if fluid.layers.mean(x) > 0: + x_v = x - 1 + else: + x_v = x + 1 + return x_v + + prog_trans = fluid.dygraph.ProgramTranslator() + + code = prog_trans.get_code(func) + print(type(code)) # + +.. py:method:: save_inference_model(dirname, feed=None, fetch=None) + +将现有模型保存为预测模型。保存过程会裁剪main program,只保存和预测输入输出有关的部分,构建成新的Program,并将此Program和相关参数保存到指定dirname路径下,被保存的模型可以被 :ref:`cn_api_fluid_io_load_inference_model` 或者C++预测接口使用。 + +参数: + - **dirname** (str) - 存储预测模型的目录。 + - **feed (list[int], 可选)** - 预测模型要保存的输入Variable的序号。如果为None,则动态图函数的所有输入变量将被保存。默认值为None。 + - **fetch (list[int], 可选)** - 预测模型要保存的输出Variable的序号。如果为None,则动态图函数的所有输出变量将被保存。默认值为None。 + +返回:None。 + +**示例代码** + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + from paddle.fluid.dygraph import Linear + from paddle.fluid.dygraph import declarative + from paddle.fluid.dygraph import ProgramTranslator + + class SimpleNet(fluid.dygraph.Layer): + def __init__(self, in_size, out_size): + super(SimpleNet, self).__init__() + self._linear = Linear(in_size, out_size) + + @declarative + def forward(self, x): + y = self._linear(x) + z = self._linear(y) + loss = fluid.layers.mean(z) + return z, loss + + with fluid.dygraph.guard(fluid.CPUPlace()): + net = SimpleNet(8, 8) + adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) + x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) + for i in range(10): + loss, out = net(x) + loss.backward() + adam.minimize(loss) + net.clear_gradients() + # 保存模型 + # 注意fetch=[0]意味着我们将序号为0的动态图return输出'z'作为预测输出 + prog_trans = ProgramTranslator() + prog_trans.save_inference_model("./dy2stat_infer_model", fetch=[0]) + + # 在这个例子中,预测模型会根据输出'z'进行裁剪。被裁剪后的Program 会被保 + # 存在"./dy2stat_infer_model" 文件夹,并且参数也会保存为同一个文件夹下 + # 不同文件。 + +.. py:method:: get_program_cache() + +返回ProgramCache单例。这个方法是PaddlePaddle开发者用来管理ProgramTranslator中的Program缓存,普通用户不需要使用这个方法。 + +返回:ProgramTranslator中的ProgramCache。 + +返回类型:ProgramCache。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + + prog_trans = fluid.dygraph.ProgramTranslator() + prog_cache = prog_trans.get_program_cache() + diff --git a/doc/paddle/api/paddle/fluid/dygraph/ReduceLROnPlateau_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/ReduceLROnPlateau_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b0a0b75f7b31244421f02cab719a342461a9f7c1 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/ReduceLROnPlateau_cn.rst @@ -0,0 +1,89 @@ +.. _cn_api_fluid_dygraph_ReduceLROnPlateau: + +ReduceLROnPlateau +------------------------------- + +**注意:该API仅支持【动态图】模式** + +.. py:class:: paddle.fluid.dygraph.ReduceLROnPlateau(learning_rate, mode='min', decay_rate=0.1, patience=10, verbose=False, threshold=1e-4, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-8, dtype='float32') + +该API为 ``loss`` 自适应的学习率衰减策略。默认情况下,当 ``loss`` 停止下降时,降低学习率(如果将 ``mode`` 设置为 `'max'` ,此时判断逻辑相反, ``loss`` 停止上升时降低学习率)。其思想是:一旦模型表现不再提升,将学习率降低2-10倍对模型的训练往往有益。 + +``loss`` 是传入到该类方法 ``step`` 中的参数,其必须是shape为[1]的1-D Tensor。 如果 ``loss`` 停止下降(``mode`` 为 `min` 时)超过 ``patience`` 个epoch,学习率将会减小为 +`learning_rate * decay_rate` 。 + +此外,每降低一次学习率后,将会进入一个时长为 ``cooldown`` 个epoch的冷静期,在冷静期内,将不会监控 ``loss`` 的变化情况,也不会衰减。 +在冷静期之后,会继续监控 ``loss`` 的上升或下降。 + +参数: + - **learning_rate** (Variable|float|int) - 初始学习率。其类型可以是Python的float类型,如果输入int类型则会被转为float类型。其也可以是shape为[1]的 + 1-D Tensor,且相应数据类型必须为 "float32" 或 "float64" 。 + - **mode** (str,可选) - `'min'` 和 `'max'` 之一。通常情况下,为 `'min'` ,此时当 ``loss`` 停止下降时学习率将减小。默认:`'min'` 。 + (注意:仅在特殊用法时,可以将其设置为 `'max'` ,此时判断逻辑相反, ``loss`` 停止上升学习率才减小) + - **decay_rate** (float,可选) - 学习率衰减的比例。`new_lr = origin_lr * decay_rate` ,它是值小于1.0的float型数字,默认: 0.1。 + - **patience** (int,可选) - 当 ``loss`` 连续 ``patience`` 个epoch没有下降(mode: 'min')或上升(mode: 'max')时,学习率才会减小。默认:10。 + - **verbose** (bool,可选) - 如果为 ``True`` , 会在每次更新optimizer中的learning_rate时,打印信息。默认:``False`` 。 + - **threshold** (float,可选) - ``threshold`` 和 ``threshold_mode`` 两个参数将会决定 ``loss`` 最小变化的阈值。小于该阈值的变化 + 将会被忽视。默认:1e-4。 + - **threshold_mode** (str,可选) - `'rel'` 和 `'abs'` 之一。在 `'rel'` 模式下, ``loss`` 最小变化的阈值是 `last_loss * threshold` , + 其中 ``last_loss`` 是 ``loss`` 在上个epoch的值。在 `'abs'` 模式下,``loss`` 最小变化的阈值是 `threshold` 。 默认:`'rel'`。 + - **cooldown** (int,可选) - 在学习速率每次减小之后,会进入时长为 ``cooldown`` 个epoch的冷静期。默认:0。 + - **min_lr** (float,可选) - 最小的学习率。减小后的学习率最低下界限。默认:0。 + - **eps** (float,可选) - 如果新旧学习率间的差异小于 ``eps`` ,则不会更新。默认值:1e-8。 + - **dtype** (str,可选) – 学习率值的数据类型,可以为"float32", "float64"。默认:"float32"。 + +返回: ``loss`` 自适应的学习率 + +返回类型:Variable + +**代码示例**: + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + with fluid.dygraph.guard(): + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = fluid.dygraph.Linear(10, 10) + input = fluid.dygraph.to_variable(x) + + adam = fluid.optimizer.Adam( + learning_rate = fluid.dygraph.ReduceLROnPlateau( + learning_rate = 1.0, + decay_rate = 0.5, + patience = 5, + verbose = True, + cooldown = 3), + parameter_list = linear.parameters()) + + for epoch in range(10): + total_loss = 0 + for bath_id in range(5): + out = linear(input) + loss = fluid.layers.reduce_mean(out) + total_loss += loss + adam.minimize(loss) + + avg_loss = total_loss/5 + + # 根据传入total_loss,调整学习率 + reduce_lr.step(avg_loss) + lr = adam.current_step_lr() + print("current avg_loss is %s, current lr is %s" % (avg_loss.numpy()[0], lr)) + + + +.. py:method:: step(loss) +需要在每个epoch调用该方法,其根据传入的 ``loss`` 调整optimizer中的学习率,调整后的学习率将会在下一次调用 ``optimizer.minimize`` 时生效。 + +参数: + - **loss** (Variable) - 类型:Variable,shape为[1]的1-D Tensor。将被用来判断是否需要降低学习率。如果 ``loss`` 连续 ``patience`` 个epochs没有下降, + 将会降低学习率。 + +返回: + 无 + +**代码示例**: + + 参照其类中的说明。 diff --git a/doc/paddle/api/paddle/fluid/dygraph/SpectralNorm_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/SpectralNorm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bc0b26cd7d12756b637235d55f8e23ea05a26c5c --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/SpectralNorm_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_fluid_dygraph_SpectralNorm: + +SpectralNorm +------------------------------- + +.. py:class:: paddle.fluid.dygraph.SpectralNorm(weight_shape, dim=0, power_iters=1, eps=1e-12, name=None, dtype="float32") + + + + +该接口用于构建 ``SpectralNorm`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其中实现了谱归一化层的功能,用于计算fc、conv1d、conv2d、conv3d层的权重参数的谱正则值,输入权重参数应分别为2-D, 3-D, 4-D, 5-D张量,输出张量与输入张量维度相同。谱特征值计算方式如下: + +步骤1:生成形状为[H]的向量U,以及形状为[W]的向量V,其中H是输入权重张量的第 ``dim`` 个维度,W是剩余维度的乘积。 + +步骤2: ``power_iters`` 应该是一个正整数,用U和V迭代计算 ``power_iters`` 轮,迭代步骤如下。 + +.. math:: + + \mathbf{v} &:= \frac{\mathbf{W}^{T} \mathbf{u}}{\|\mathbf{W}^{T} \mathbf{u}\|_2}\\ + \mathbf{u} &:= \frac{\mathbf{W}^{T} \mathbf{v}}{\|\mathbf{W}^{T} \mathbf{v}\|_2} + +步骤3:计算 :math:`\sigma(\mathbf{W})` 并特征值值归一化。 + +.. math:: + \sigma(\mathbf{W}) &= \mathbf{u}^{T} \mathbf{W} \mathbf{v}\\ + \mathbf{W} &= \frac{\mathbf{W}}{\sigma(\mathbf{W})} + +可参考: `Spectral Normalization `_ + +参数: + - **weight_shape** (list 或 tuple) - 权重参数的shape。 + - **dim** (int, 可选) - 将输入(weight)重塑为矩阵之前应排列到第一个的维度索引,如果input(weight)是fc层的权重,则应设置为0;如果input(weight)是conv层的权重,则应设置为1。默认值:0。 + - **power_iters** (int, 可选) - 将用于计算的 ``SpectralNorm`` 功率迭代次数,默认值:1。 + - **eps** (float, 可选) - ``eps`` 用于保证计算规范中的数值稳定性,分母会加上 ``eps`` 防止除零。默认值:1e-12。 + - **name** (str, 可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **dtype** (str, 可选) - 数据类型,可以为"float32"或"float64"。默认值为"float32"。 + +返回:无 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + with fluid.dygraph.guard(): + weight = np.random.random((2, 8, 32, 32)).astype('float32') + spectralNorm = fluid.dygraph.nn.SpectralNorm(weight.shape, dim=1, power_iters=2) + ret = spectralNorm(fluid.dygraph.base.to_variable(weight)) + diff --git a/doc/paddle/api/paddle/fluid/dygraph/StepDecay_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/StepDecay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fe2131f17b89670023d4ae41e175e27e5d9caeba --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/StepDecay_cn.rst @@ -0,0 +1,72 @@ +.. _cn_api_fluid_dygraph_StepDecay: + +StepDecay +------------------------------- + + +.. py:class:: paddle.fluid.dygraph.StepDecay(learning_rate, step_size, decay_rate=0.1) + + + +该接口提供 ``step_size`` 衰减学习率的功能,每经过 ``step_size`` 个 ``epoch`` 时会通过 ``decay_rate`` 衰减一次学习率。 + +算法可以描述为: + +.. code-block:: text + + learning_rate = 0.5 + step_size = 30 + decay_rate = 0.1 + learning_rate = 0.5 if epoch < 30 + learning_rate = 0.05 if 30 <= epoch < 60 + learning_rate = 0.005 if 60 <= epoch < 90 + ... + +参数: + - **learning_rate** (float|int) - 初始化的学习率。可以是Python的float或int。 + - **step_size** (int) - 学习率每衰减一次的间隔。 + - **decay_rate** (float, optional) - 学习率的衰减率。 ``new_lr = origin_lr * decay_rate`` 。其值应该小于1.0。默认:0.1。 + +返回: 无 + +**代码示例**: + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + with fluid.dygraph.guard(): + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = fluid.dygraph.Linear(10, 10) + input = fluid.dygraph.to_variable(x) + scheduler = fluid.dygraph.StepDecay(0.5, step_size=3) + adam = fluid.optimizer.Adam(learning_rate = scheduler, parameter_list = linear.parameters()) + for epoch in range(9): + for batch_id in range(5): + out = linear(input) + loss = fluid.layers.reduce_mean(out) + adam.minimize(loss) + scheduler.epoch() + print("epoch:{}, current lr is {}" .format(epoch, adam.current_step_lr())) + # epoch:0, current lr is 0.5 + # epoch:1, current lr is 0.5 + # epoch:2, current lr is 0.5 + # epoch:3, current lr is 0.05 + # epoch:4, current lr is 0.05 + # epoch:5, current lr is 0.05 + # epoch:6, current lr is 0.005 + # epoch:7, current lr is 0.005 + # epoch:8, current lr is 0.005 + +.. py:method:: epoch(epoch=None) +通过当前的 epoch 调整学习率,调整后的学习率将会在下一次调用 ``optimizer.minimize`` 时生效。 + +参数: + - **epoch** (int|float,可选) - 类型:int或float。指定当前的epoch数。默认:无,此时将会自动累计epoch数。 + +返回: + 无 + +**代码示例**: + + 参照上述示例代码。 diff --git a/doc/paddle/api/paddle/fluid/dygraph/TreeConv_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/TreeConv_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..699a3f71ec5feb18f3da4d86f0c8df7566cb5c82 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/TreeConv_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_fluid_dygraph_TreeConv: + +TreeConv +------------------------------- + +.. py:class:: paddle.fluid.dygraph.TreeConv(feature_size, output_size, num_filters=1, max_depth=2, act='tanh', param_attr=None, bias_attr=None, name=None, dtype="float32") + + + + +该接口用于构建 ``TreeConv`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其将在神经网络中构建一个基于树结构的卷积(Tree-Based Convolution)运算。基于树的卷积是基于树的卷积神经网络(TBCNN,Tree-Based Convolution Neural Network)的一部分,它用于对树结构进行分类,例如抽象语法树。 Tree-Based Convolution提出了一种称为连续二叉树的数据结构,它将多路(multiway)树视为二叉树。详情请参考: `基于树的卷积论文 `_ 。 + + +参数: + - **feature_size** (int) – nodes_vector的shape的最后一维的维度。 + - **output_size** (int) – 输出特征宽度。 + - **num_filters** (int, 可选) – 滤波器的数量,默认值为1。 + - **max_depth** (int, 可选) – 滤波器的最大深度,默认值为2。 + - **act** (str, 可选) – 应用于输出上的激活函数,如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations` ,默认值为None。 + - **param_attr** (ParamAttr, 可选) – 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr, 可选) – 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **name** (str, 可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **dtype** (str, 可选) - 数据类型,可以为"float32"或"float64"。默认值为"float32"。 + +返回:无 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + + with fluid.dygraph.guard(): + nodes_vector = numpy.random.random((1, 10, 5)).astype('float32') + edge_set = numpy.random.random((1, 9, 2)).astype('int32') + treeConv = fluid.dygraph.nn.TreeConv( + feature_size=5, output_size=6, num_filters=1, max_depth=2) + ret = treeConv(fluid.dygraph.base.to_variable(nodes_vector), fluid.dygraph.base.to_variable(edge_set)) + +属性 +:::::::::::: +.. py:attribute:: weight + +本层的可学习参数,类型为 ``Parameter`` + +.. py:attribute:: bias + +本层的可学习偏置,类型为 ``Parameter`` + diff --git a/doc/paddle/api/paddle/fluid/dygraph/base/disable_dygraph_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/base/disable_dygraph_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..59dc22b7b491cf59f13fe9586f6d98bbaa86f00b --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/base/disable_dygraph_cn.rst @@ -0,0 +1,22 @@ +.. _cn_api_fluid_disable_dygraph: + +disable_dygraph +------------------------------- + +.. py:function:: paddle.fluid.disable_dygraph() + +该接口关闭动态图模式。 + +返回:无 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + fluid.enable_dygraph() # Now we are in dygraph mode + print(fluid.in_dygraph_mode()) # True + fluid.disable_dygraph() + print(fluid.in_dygraph_mode()) # False + diff --git a/doc/paddle/api/paddle/fluid/dygraph/base/enable_dygraph_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/base/enable_dygraph_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0df485bd856a4c70f3638db7c2f6b7470c143fe1 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/base/enable_dygraph_cn.rst @@ -0,0 +1,25 @@ +.. _cn_api_fluid_enable_dygraph: + +enable_dygraph +------------------------------- + +.. py:function:: paddle.fluid.enable_dygraph(place=None) + +该接口打开动态图模式。 + +参数: + - **place** (fluid.CPUPlace 或 fluid.CUDAPlace,可选) - 执行动态图的设备数目。若为None,则设备根据paddle的编译方式决定。默认值为 ``None``。 + +返回:无 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + fluid.enable_dygraph() # Now we are in dygraph mode + print(fluid.in_dygraph_mode()) # True + fluid.disable_dygraph() + print(fluid.in_dygraph_mode()) # False + diff --git a/doc/paddle/api/paddle/fluid/dygraph/base/grad_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/base/grad_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1b164d104b6a6432b11b641431165186700b9381 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/base/grad_cn.rst @@ -0,0 +1,102 @@ +.. _cn_api_paddle_grad: + +grad +------------------------------- + +**注意:该API仅支持【动态图】模式** + +.. py:method:: paddle.grad(outputs, inputs, grad_outputs=None, retain_graph=None, create_graph=False, only_inputs=True, allow_unused=False, no_grad_vars=None) + +对于每个 `inputs` ,计算所有 `outputs` 相对于其的梯度和。 + +参数: + - **outputs** (Tensor|list(Tensor)|tuple(Tensor)) – 用于计算梯度的图的输出变量,或多个输出变量构成的list/tuple。 + - **inputs** (Tensor|list(Tensor)|tuple(Tensor)) - 用于计算梯度的图的输入变量,或多个输入变量构成的list/tuple。该API的每个返回值对应每个 `inputs` 的梯度。 + - **grad_outputs** (Tensor|list(Tensor|None)|tuple(Tensor|None), 可选) - `outputs` 变量梯度的初始值。若 `grad_outputs` 为None,则 `outputs` 梯度的初始值均为全1的Tensor。若 `grad_outputs` 不为None,它必须与 `outputs` 的长度相等,此时,若 `grad_outputs` 的第i个元素为None,则第i个 `outputs` 的梯度初始值为全1的Tensor;若 `grad_outputs` 的第i个元素为Tensor,则第i个 `outputs` 的梯度初始值为 `grad_outputs` 的第i个元素。默认值为None。 + - **retain_graph** (bool, 可选) - 是否保留计算梯度的前向图。若值为True,则前向图会保留,用户可对同一张图求两次反向。若值为False,则前向图会释放。默认值为None,表示值与 `create_graph` 相等。 + - **create_graph** (bool, 可选) - 是否创建计算过程中的反向图。若值为True,则可支持计算高阶导数。若值为False,则计算过程中的反向图会释放。默认值为False。 + - **only_inputs** (bool, 可选) - 是否只计算 `inputs` 的梯度。若值为False,则图中所有叶节点变量的梯度均会计算,并进行累加。若值为True,则只会计算 `inputs` 的梯度。默认值为True。only_inputs=False功能正在开发中,目前尚不支持。 + - **allow_unused** (bool, 可选) - 决定当某些 `inputs` 变量不在计算图中时抛出错误还是返回None。若某些 `inputs` 变量不在计算图中(即它们的梯度为None),则当allowed_unused=False时会抛出错误,当allow_unused=True时会返回None作为这些变量的梯度。默认值为False。 + - **no_grad_vars** (Tensor|list(Tensor)|tuple(Tensor)|set(Tensor), 可选) - 指明不需要计算梯度的变量。默认值为None。 + +返回: tuple(Tensor),其长度等于 `inputs` 中的变量个数,且第i个返回的变量是所有 `outputs` 相对于第i个 `inputs` 的梯度之和。 + +**示例代码 1** + .. code-block:: python + + import paddle + paddle.disable_static() + + def test_dygraph_grad(create_graph): + x = paddle.ones(shape=[1], dtype='float32') + x.stop_gradient = False + y = x * x + + # Since y = x * x, dx = 2 * x + dx = paddle.grad( + outputs=[y], + inputs=[x], + create_graph=create_graph, + retain_graph=True)[0] + + z = y + dx + + # If create_graph = False, the gradient of dx + # would not be backpropagated. Therefore, + # z = x * x + dx, and x.gradient() = 2 * x = 2.0 + + # If create_graph = True, the gradient of dx + # would be backpropagated. Therefore, + # z = x * x + dx = x * x + 2 * x, and + # x.gradient() = 2 * x + 2 = 4.0 + + z.backward() + return x.gradient() + + print(test_dygraph_grad(create_graph=False)) # [2.] + print(test_dygraph_grad(create_graph=True)) # [4.] + +**示例代码 2** + .. code-block:: python + + import paddle + paddle.disable_static() + + def test_dygraph_grad(grad_outputs=None): + x = paddle.fill_constant(shape=[1], value=2.0, dtype='float32') + x.stop_gradient = False + + y1 = x * x + y2 = x * 3 + + # If grad_outputs=None, dy1 = [1], dy2 = [1]. + # If grad_outputs=[g1, g2], then: + # - dy1 = [1] if g1 is None else g1 + # - dy2 = [1] if g2 is None else g2 + + # Since y1 = x * x, dx = 2 * x * dy1. + # Since y2 = x * 3, dx = 3 * dy2. + # Therefore, the final result would be: + # dx = 2 * x * dy1 + 3 * dy2 = 4 * dy1 + 3 * dy2. + + dx = paddle.grad( + outputs=[y1, y2], + inputs=[x], + grad_outputs=grad_outputs)[0] + + return dx.numpy() + + grad_value = paddle.fill_constant(shape=[1], value=4.0, dtype='float32') + + # dy1 = [1], dy2 = [1] + print(test_dygraph_grad(None)) # [7.] + + # dy1 = [1], dy2 = [4] + print(test_dygraph_grad([None, grad_value])) # [16.] + + # dy1 = [4], dy2 = [1] + print(test_dygraph_grad([grad_value, None])) # [19.] + + # dy1 = [3], dy2 = [4] + grad_y1 = paddle.fill_constant(shape=[1], value=3.0, dtype='float32') + print(test_dygraph_grad([grad_y1, grad_value])) # [24.] \ No newline at end of file diff --git a/doc/paddle/api/paddle/fluid/dygraph/base/no_grad_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/base/no_grad_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..90fcd17c40d03ffa4b6049a9985f36c659a73499 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/base/no_grad_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_fluid_dygraph_no_grad: + +no_grad +------------------------------- + + +.. py:class:: paddle.fluid.dygraph.no_grad + + + +创建一个上下文来禁用动态图梯度计算。在此模式下,每次计算的结果都将具有stop_gradient=True。 + +也可以用作一个装饰器(需要创建实例对象作为装饰器)。 + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + + paddle.enable_imperative() + + # 用作生成器 + + data = np.array([[2, 3], [4, 5]]).astype('float32') + l0 = fluid.Linear(2, 2) # l0.weight.gradient() is None + l1 = fluid.Linear(2, 2) + with fluid.no_grad(): + # l1.weight.stop_gradient is False + tmp = l1.weight * 2 # tmp.stop_gradient is True + x = fluid.dygraph.to_variable(data) + y = l0(x) + tmp + o = l1(y) + o.backward() + print(tmp.gradient() is None) # True + print(l0.weight.gradient() is None) # False + + # 用作装饰器 + + @fluid.no_grad() + def test_layer(): + inp = np.ones([3, 1024], dtype='float32') + t = fluid.dygraph.base.to_variable(inp) + linear1 = fluid.Linear(1024, 4, bias_attr=False) + linear2 = fluid.Linear(4, 4) + ret = linear1(t) + dy_ret = linear2(ret) + + test_layer() diff --git a/doc/paddle/api/paddle/fluid/dygraph/base/to_variable_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/base/to_variable_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5472faa7762eaae176d53834746c256e83b4f6bf --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/base/to_variable_cn.rst @@ -0,0 +1,55 @@ +.. _cn_api_fluid_dygraph_to_variable: + +to_variable +------------------------------- + + +.. py:function:: paddle.fluid.dygraph.to_variable(value, name=None, zero_copy=None) + + + + + +该函数实现从tuple、list、numpy\.ndarray、Variable、ComplexVariable 对象创建一个 ``Variable`` 类型的对象。 + + +参数: + - **value** (tuple|list|ndarray|Variable|Tensor|ComplexVariable) – 初始化的数据。可以是tuple、list、numpy\.ndarray、Variable、ComplexVariable。 + 维度可以为多维,数据类型为numpy\.{float16, float32, float64, int16, int32, int64, uint8, uint16}中的一种。 + - **name** (str, 可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **zero_copy** (bool, 可选) – 是否与输入的numpy数组共享内存。此参数仅适用于CPUPlace,当它为None时将设置为True。默认值为None。 + - **dtype** (str, 可选) - 返回的 ``Variable`` 所需的数据类型。可以是 'bool','float16','float32','float64','int8','int16','int32','int64','uint8'。默认值: None。 + + +返回:如果 ``value`` 是tuple/list/numpy\.ndarray对象,返回对应numpy\.ndarray对象创建的 ``Tensor`` ;如果 ``value`` 是Variable对象,直接返回 ``value`` 。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + with fluid.dygraph.guard(fluid.CPUPlace()): + + x = np.ones([2, 2], np.float32) + y = fluid.dygraph.to_variable(x, zero_copy=False) + x[0][0] = -1 + y[0][0].numpy() # array([1.], dtype=float32) + + y = fluid.dygraph.to_variable(x) + x[0][0] = 0 + y[0][0].numpy() # array([0.], dtype=float32) + + c = np.array([2+1j, 2]) + z = fluid.dygraph.to_variable(c) + z.numpy() # array([2.+1.j, 2.+0.j]) + z.dtype # 'complex128' + + y = fluid.dygraph.to_variable([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]]) + y.shape # [3L, 2L] + y = fluid.dygraph.to_variable(((0.1, 1.2), (2.2, 3.1), (4.9, 5.2)), dtype='int32') + y.shape # [3L, 2L] + y.dtype # core.VarDesc.VarType.INT32 + diff --git a/doc/paddle/api/paddle/fluid/dygraph/checkpoint/load_dygraph_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/checkpoint/load_dygraph_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a49c06ac7420bedec3fb567ab25e7165951b9f5a --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/checkpoint/load_dygraph_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_fluid_dygraph_load_dygraph: + +load +---- + + +.. py:function:: paddle.load(model_path, config=None) + + +该接口用于从磁盘中加载Layer和Optimizer的 ``state_dict`` ,该接口会同时加载 ``model_path + ".pdparams"`` 和 ``model_path + ".pdopt"`` 中的内容。 + +.. note:: + 由于一些历史原因,如果从 ``paddle.io.save_inference_model`` 的存储结果中载入 ``state_dict`` ,动态图模式下参数的结构性变量名将无法被恢复。并且在将载入的 ``state_dict`` 配置到当前Layer中时,需要配置 ``Layer.set_state_dict`` 的参数 ``use_structured_name=False`` 。 + +参数: + - **model_path** (str) – 保存state_dict的文件前缀。该路径不应该包括后缀 ``.pdparams`` 或 ``.pdopt``。 + - **config** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象,这些选项主要是用于兼容 ``paddle.io.save_inference_model`` 存储模型的格式。默认为 ``None``。 + + +返回: 两个 ``dict`` ,即从文件中恢复的模型参数 ``dict`` 和优化器参数 ``dict``,如果只找到其中一个的存储文件,另一个返回None + +- param_dict: 从文件中恢复的模型参数 ``dict`` +- opt_dict: 从文件中恢复的优化器参数 ``dict`` + +返回类型: tuple(dict, dict) + +**代码示例** + +.. code-block:: python + + import paddle + + paddle.disable_static() + + emb = paddle.nn.Embedding([10, 10]) + + state_dict = emb.state_dict() + paddle.save(state_dict, "paddle_dy") + + scheduler = paddle.optimizer.lr_scheduler.NoamLR( + d_model=0.01, warmup_steps=100, verbose=True) + adam = paddle.optimizer.Adam( + learning_rate=scheduler, + parameters=emb.parameters()) + state_dict = adam.state_dict() + paddle.save(state_dict, "paddle_dy") + + para_state_dict, opti_state_dict = paddle.load("paddle_dy") + + + diff --git a/doc/paddle/api/paddle/fluid/dygraph/checkpoint/save_dygraph_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/checkpoint/save_dygraph_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..274ad2977d177697246b4a36d436103e59be9f60 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/checkpoint/save_dygraph_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_fluid_dygraph_save_dygraph: + +save_dygraph +------------------------------- + + +.. py:function:: paddle.fluid.dygraph.save_dygraph(state_dict, model_path) + + + + +该接口将传入的参数或优化器的 ``dict`` 保存到磁盘上。 + +``state_dict`` 是通过 :ref:`cn_api_fluid_dygraph_Layer` 的 ``state_dict()`` 方法得到的。 + +注: ``model_path`` 不可以是一个目录。 + +该接口会根据 ``state_dict`` 的内容,自动给 ``model_path`` 添加 ``.pdparams`` 或者 ``.pdopt`` 后缀, +生成 ``model_path + ".pdparams"`` 或者 ``model_path + ".pdopt"`` 文件。 + +参数: + - **state_dict** (dict of Parameters) – 要保存的模型参数的 ``dict`` 。 + - **model_path** (str) – 保存state_dict的文件前缀。格式为 ``目录名称/文件前缀``。如果文件前缀为空字符串,会引发异常。 + +返回: 无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + emb = fluid.dygraph.Embedding([10, 10]) + + state_dict = emb.state_dict() + fluid.save_dygraph( state_dict, "paddle_dy") # 会保存为 paddle_dy.pdparams + + adam = fluid.optimizer.Adam( learning_rate = fluid.layers.noam_decay( 100, 10000), + parameter_list = emb.parameters() ) + + state_dict = adam.state_dict() + fluid.save_dygraph( state_dict, "paddle_dy") # 会保存为 paddle_dy.pdopt \ No newline at end of file diff --git a/doc/paddle/api/paddle/fluid/dygraph/container/LayerList_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/container/LayerList_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0dc2468dff0ca366300ee50558cab2c56df68aad --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/container/LayerList_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_fluid_dygraph_LayerList: + +LayerList +------------------------------- + +.. py:class:: paddle.fluid.dygraph.LayerList(sublayers=None) + + + + +LayerList用于保存子层列表,它包含的子层将被正确地注册和添加。列表中的子层可以像常规python列表一样被索引。 + +参数: + - **sublayers** (iterable,可选) - 要保存的子层。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + class MyLayer(fluid.Layer): + def __init__(self): + super(MyLayer, self).__init__() + self.linears = fluid.dygraph.LayerList( + [fluid.dygraph.Linear(10, 10) for i in range(10)]) + def forward(self, x): + # LayerList可以像iterable一样迭代,也可以使用int索引 + for i, l in enumerate(self.linears): + x = self.linears[i // 2](x) + l(x) + return x + + diff --git a/doc/paddle/api/paddle/fluid/dygraph/container/ParameterList_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/container/ParameterList_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..82ca04fef6bde4a149153642c29b0e449931da9e --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/container/ParameterList_cn.rst @@ -0,0 +1,62 @@ +.. _cn_api_fluid_dygraph_ParameterList: + +ParameterList +------------------------------- + +.. py:class:: paddle.fluid.dygraph.ParameterList(parameters=None) + + + + +参数列表容器。此容器的行为类似于Python列表,但它包含的参数将被正确地注册和添加。 + +参数: + - **parameters** (iterable,可选) - 可迭代的Parameters。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + class MyLayer(fluid.Layer): + def __init__(self, num_stacked_param): + super(MyLayer, self).__init__() + # 使用可迭代的 Parameters 创建 ParameterList + self.params = fluid.dygraph.ParameterList( + [fluid.layers.create_parameter( + shape=[2, 2], dtype='float32')] * num_stacked_param) + def forward(self, x): + for i, p in enumerate(self.params): + tmp = self._helper.create_variable_for_type_inference('float32') + self._helper.append_op( + type="mul", + inputs={"X": x, + "Y": p}, + outputs={"Out": tmp}, + attrs={"x_num_col_dims": 1, + "y_num_col_dims": 1}) + x = tmp + return x + + data_np = np.random.uniform(-1, 1, [5, 2]).astype('float32') + with fluid.dygraph.guard(): + x = fluid.dygraph.to_variable(data_np) + num_stacked_param = 4 + model = MyLayer(num_stacked_param) + print(len(model.params)) # 4 + res = model(x) + print(res.shape) # [5, 2] + replaced_param = fluid.layers.create_parameter(shape=[2, 3], dtype='float32') + model.params[num_stacked_param - 1] = replaced_param # 替换最后一个参数 + res = model(x) + print(res.shape) # [5, 3] + model.params.append(fluid.layers.create_parameter(shape=[3, 4], dtype='float32')) # 添加参数 + print(len(model.params)) # 5 + res = model(x) + print(res.shape) # [5, 4] + + diff --git a/doc/paddle/api/paddle/fluid/dygraph/container/Sequential_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/container/Sequential_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b39b4a556aae0612cff2ea4d33a7fbc0af10107a --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/container/Sequential_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_fluid_dygraph_Sequential: + +Sequential +------------------------------- + +.. py:class:: paddle.fluid.dygraph.Sequential(*layers) + + + + +顺序容器。子Layer将按构造函数参数的顺序添加到此容器中。传递给构造函数的参数可以Layers或可迭代的name Layer元组。 + +参数: + - **layers** (tuple) - Layers或可迭代的name Layer对。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + data = np.random.uniform(-1, 1, [30, 10]).astype('float32') + with fluid.dygraph.guard(): + data = fluid.dygraph.to_variable(data) + # 使用 iterable Layers 创建 Sequential 容器 + model1 = fluid.dygraph.Sequential( + fluid.Linear(10, 1), fluid.Linear(1, 2) + ) + model1[0] # 访问第一个子层 + res1 = model1(data) # 顺序执行 + # 使用 iterable name Layer 对创建 Sequential 容器 + model2 = fluid.dygraph.Sequential( + ('l1', fluid.Linear(10, 2)), + ('l2', fluid.Linear(2, 3)) + ) + model2['l1'] # 访问 l1 子层 + model2.add_sublayer('l3', fluid.Linear(3, 3)) # 添加子层 + res2 = model2(data) # 顺序执行 + + diff --git a/doc/paddle/api/paddle/fluid/dygraph/enabled_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/enabled_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e5716e76456a99ba5724369d4c2aaba7bfa129f8 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/enabled_cn.rst @@ -0,0 +1,25 @@ +.. _cn_api_fluid_dygraph_enabled: + +enabled +------------------------------- + +.. py:method:: paddle.fluid.dygraph.enabled() + +这个函数用于检查程序是否运行在动态图模式。你可以使用 :ref:`cn_api_fluid_dygraph_guard` api进入动态图模式。或者使用 :ref:`cn_api_fluid_enable_dygraph` 和 :ref:`cn_api_fluid_disable_dygraph` api打开、关闭动态图模式。 + +注意: `fluid.dygraph.enabled` 实际上调用了 :ref:`cn_api_fluid_in_dygraph_mode` api,所以推荐使用 :ref:`cn_api_fluid_in_dygraph_mode` api。 + +返回: 程序是否运行在动态图模式。 + +返回类型: bool + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + + fluid.enable_dygraph() # Now we are in dygragh mode + print(fluid.dygraph.enabled()) # True + fluid.disable_dygraph() + print(fluid.dygraph.enabled()) # False diff --git a/doc/paddle/api/paddle/fluid/dygraph/guard_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/guard_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7c457eb15118baf6823be9e404fc2fa053b6a2f6 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/guard_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_fluid_unique_name_guard: + +guard +------------------------------- + +.. py:function:: paddle.fluid.unique_name.guard(new_generator=None) + + + + +该接口用于更改命名空间,与with语句一起使用。使用后,在with语句的上下文中使用新的命名空间,调用generate接口时相同前缀的名称将从0开始重新编号。 + +参数: + - **new_generator** (str|bytes, 可选) - 新命名空间的名称。请注意,Python2中的str在Python3中被区分为str和bytes两种,因此这里有两种类型。 缺省值为None,若不为None,new_generator将作为前缀添加到generate接口产生的唯一名称中。 + +返回: 无。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + with fluid.unique_name.guard(): + name_1 = fluid.unique_name.generate('fc') + with fluid.unique_name.guard(): + name_2 = fluid.unique_name.generate('fc') + print(name_1, name_2) # fc_0, fc_0 + + with fluid.unique_name.guard('A'): + name_1 = fluid.unique_name.generate('fc') + with fluid.unique_name.guard('B'): + name_2 = fluid.unique_name.generate('fc') + print(name_1, name_2) # Afc_0, Bfc_0 + + diff --git a/doc/paddle/api/paddle/fluid/dygraph/io/TranslatedLayer_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/io/TranslatedLayer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2f46906603077a254033c088587959f89f04d120 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/io/TranslatedLayer_cn.rst @@ -0,0 +1,194 @@ +.. _cn_api_fluid_dygraph_TranslatedLayer: + +TranslatedLayer +------------------------------- + +.. py:class:: paddle.fluid.dygraph.TranslatedLayer(programs, persistable_vars) + +``TranslatedLayer`` 是一个命令式编程模式 :ref:`cn_api_fluid_dygraph_Layer` 的继承类, +通过 :ref:`cn_api_fluid_dygraph_jit_load` 载入构建。能够像一般 ``Layer`` 一样在train或者eval模式下使用。 + +.. note:: + ``TranslatedLayer`` 对象不能够通过构造函数创建,仅能够通过 :ref:`cn_api_fluid_dygraph_jit_load` 接口载入构建。 + +**示例代码:** + .. code-block:: python + + import numpy as np + import paddle + import paddle.nn as nn + import paddle.optimizer as opt + + BATCH_SIZE = 16 + BATCH_NUM = 4 + EPOCH_NUM = 4 + + IMAGE_SIZE = 784 + CLASS_NUM = 10 + + # define a random dataset + class RandomDataset(paddle.io.Dataset): + def __init__(self, num_samples): + self.num_samples = num_samples + + def __getitem__(self, idx): + image = np.random.random([IMAGE_SIZE]).astype('float32') + label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64') + return image, label + + def __len__(self): + return self.num_samples + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM) + + @paddle.jit.to_static + def forward(self, x): + return self._linear(x) + + def train(layer, loader, loss_fn, opt): + for epoch_id in range(EPOCH_NUM): + for batch_id, (image, label) in enumerate(loader()): + out = layer(image) + loss = loss_fn(out, label) + loss.backward() + opt.step() + opt.clear_grad() + print("Epoch {} batch {}: loss = {}".format( + epoch_id, batch_id, np.mean(loss.numpy()))) + + # enable dygraph mode + place = paddle.CPUPlace() + paddle.disable_static(place) + + # 1. train & save model. + + # create network + layer = LinearNet() + loss_fn = nn.CrossEntropyLoss() + adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters()) + + # create data loader + dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) + loader = paddle.io.DataLoader(dataset, + places=place, + batch_size=BATCH_SIZE, + shuffle=True, + drop_last=True, + num_workers=2) + + # train + train(layer, loader, loss_fn, adam) + + # save + model_path = "linear.example.model" + paddle.jit.save(layer, model_path) + + # 2. load model as TranslatedLayer + + # load + translated_layer = paddle.jit.load(model_path) + + # inference + translated_layer.eval() + x = paddle.randn([1, IMAGE_SIZE], 'float32') + pred = translated_layer(x) + + # fine-tune + translated_layer.train() + adam = opt.Adam(learning_rate=0.001, parameters=translated_layer.parameters()) + train(translated_layer, loader, loss_fn, adam) + + +.. py:method:: program(method_name='forward'): + +获取TranslatedLayer中指定方法对应的Program。 + +参数: + - **method_name** (string) - 要获取的Porgram对应的方法名。默认值为"forward"。 + +返回:Program + +返回类型:Program + +**示例代码:** + .. code-block:: python + + import numpy as np + import paddle + import paddle.nn as nn + import paddle.optimizer as opt + + BATCH_SIZE = 16 + BATCH_NUM = 4 + EPOCH_NUM = 4 + + IMAGE_SIZE = 784 + CLASS_NUM = 10 + + # define a random dataset + class RandomDataset(paddle.io.Dataset): + def __init__(self, num_samples): + self.num_samples = num_samples + + def __getitem__(self, idx): + image = np.random.random([IMAGE_SIZE]).astype('float32') + label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64') + return image, label + + def __len__(self): + return self.num_samples + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM) + + @paddle.jit.to_static + def forward(self, x): + return self._linear(x) + + def train(layer, loader, loss_fn, opt): + for epoch_id in range(EPOCH_NUM): + for batch_id, (image, label) in enumerate(loader()): + out = layer(image) + loss = loss_fn(out, label) + loss.backward() + opt.step() + opt.clear_grad() + print("Epoch {} batch {}: loss = {}".format( + epoch_id, batch_id, np.mean(loss.numpy()))) + + # enable dygraph mode + place = paddle.CPUPlace() + paddle.disable_static(place) + + # create network + layer = LinearNet() + loss_fn = nn.CrossEntropyLoss() + adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters()) + + # create data loader + dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) + loader = paddle.io.DataLoader(dataset, + places=place, + batch_size=BATCH_SIZE, + shuffle=True, + drop_last=True, + num_workers=2) + + # train + train(layer, loader, loss_fn, adam) + + # save + model_path = "linear.example.model" + paddle.jit.save(layer, model_path) + + # load + translated_layer = paddle.jit.load(model_path) + + # get program + program = translated_layer.program() + print(program) diff --git a/doc/paddle/api/paddle/fluid/dygraph/jit/SaveLoadConfig_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/jit/SaveLoadConfig_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9cf2165136564c2f81001fa63d2ebfe7b5c4703c --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/jit/SaveLoadConfig_cn.rst @@ -0,0 +1,325 @@ +.. _cn_api_fluid_dygraph_jit_SaveLoadConfig: + +SaveLoadConfig +-------------- + +.. py:class:: paddle.SaveLoadConfig() + +用于配置接口 :ref:`cn_api_fluid_dygraph_jit_save` 和 :ref:`cn_api_fluid_dygraph_jit_load` 存储载入 :ref:`cn_api_fluid_dygraph_TranslatedLayer` 时的附加选项。 + +**示例代码:** + + 1. 在存储模型时使用 ``SaveLoadConfig`` + + .. code-block:: python + + import paddle + import paddle.nn as nn + import paddle.optimizer as opt + + class SimpleNet(nn.Layer): + def __init__(self, in_size, out_size): + super(SimpleNet, self).__init__() + self._linear = nn.Linear(in_size, out_size) + + @paddle.jit.to_static + def forward(self, x): + y = self._linear(x) + z = self._linear(y) + return z + + # enable dygraph mode + paddle.disable_static() + + # train model + net = SimpleNet(8, 8) + adam = opt.Adam(learning_rate=0.1, parameters=net.parameters()) + x = paddle.randn([4, 8], 'float32') + for i in range(10): + out = net(x) + loss = paddle.tensor.mean(out) + loss.backward() + adam.step() + adam.clear_grad() + + # use SaveLoadconfig when saving model + model_path = "simplenet.example.model" + config = paddle.SaveLoadConfig() + config.model_filename = "__simplenet__" + paddle.jit.save( + layer=net, + model_path=model_path, + config=config) + + 2. 在载入模型时使用 ``SaveLoadConfig`` + + .. code-block:: python + + import paddle + + # enable dygraph mode + paddle.disable_static() + + # use SaveLoadconfig when loading model + model_path = "simplenet.example.model" + config = paddle.SaveLoadConfig() + config.model_filename = "__simplenet__" + infer_net = paddle.jit.load(model_path, config=config) + # inference + x = paddle.randn([4, 8], 'float32') + pred = infer_net(x) + +属性 +:::::::::::: + +.. py:attribute:: output_spec + +选择保存模型( :ref:`cn_api_fluid_dygraph_TranslatedLayer` )的输出变量,通过指定的这些变量能够使模型仅计算特定的结果。 +默认情况下,原始 :ref:`cn_api_fluid_dygraph_Layer` 的forward方法的所有返回变量都将配置为存储后模型 :ref:`cn_api_fluid_dygraph_TranslatedLayer` 的输出变量。 + +``output_spec`` 属性类型需要是 ``list[Variable]``。如果输入的 ``output_spec`` 列表不是原始 :ref:`cn_api_fluid_dygraph_Layer` 的forward方法的所有返回变量, +将会依据输入的 ``output_spec`` 列表对存储的模型进行裁剪。 + +.. note:: + ``output_spec`` 属性仅在存储模型时使用。 + +**示例代码:** + .. code-block:: python + + import paddle + import paddle.nn as nn + import paddle.optimizer as opt + + class SimpleNet(nn.Layer): + def __init__(self, in_size, out_size): + super(SimpleNet, self).__init__() + self._linear = nn.Linear(in_size, out_size) + + @paddle.jit.to_static + def forward(self, x): + y = self._linear(x) + z = self._linear(y) + loss = paddle.tensor.mean(z) + return z, loss + + # enable dygraph mode + paddle.disable_static() + + # train model + net = SimpleNet(8, 8) + adam = opt.Adam(learning_rate=0.1, parameters=net.parameters()) + x = paddle.randn([4, 8], 'float32') + for i in range(10): + out, loss = net(x) + loss.backward() + adam.step() + adam.clear_grad() + + # use SaveLoadconfig.output_spec + model_path = "simplenet.example.model.output_spec" + config = paddle.SaveLoadConfig() + config.output_spec = [out] + paddle.jit.save( + layer=net, + model_path=model_path, + config=config) + + infer_net = paddle.jit.load(model_path) + x = paddle.randn([4, 8], 'float32') + pred = infer_net(x) + + + +.. py:attribute:: model_filename + +存储转写 :ref:`cn_api_fluid_dygraph_Layer` 模型结构 ``Program`` 的文件名称。默认文件名为 ``__model__``。 + +**示例代码** + .. code-block:: python + + import paddle + import paddle.nn as nn + import paddle.optimizer as opt + + class SimpleNet(nn.Layer): + def __init__(self, in_size, out_size): + super(SimpleNet, self).__init__() + self._linear = nn.Linear(in_size, out_size) + + @paddle.jit.to_static + def forward(self, x): + y = self._linear(x) + z = self._linear(y) + return z + + # enable dygraph mode + paddle.disable_static() + + # train model + net = SimpleNet(8, 8) + adam = opt.Adam(learning_rate=0.1, parameters=net.parameters()) + x = paddle.randn([4, 8], 'float32') + for i in range(10): + out = net(x) + loss = paddle.tensor.mean(out) + loss.backward() + adam.step() + adam.clear_grad() + + # saving with configs.model_filename + model_path = "simplenet.example.model.model_filename" + config = paddle.SaveLoadConfig() + config.model_filename = "__simplenet__" + paddle.jit.save( + layer=net, + model_path=model_path, + config=config) + + # loading with configs.model_filename + infer_net = paddle.jit.load(model_path, config=config) + x = paddle.randn([4, 8], 'float32') + pred = infer_net(x) + + +.. py:attribute:: params_filename + +存储转写 :ref:`cn_api_fluid_dygraph_Layer` 所有持久参数(包括 ``Parameters`` 和持久的 ``Buffers``)的文件名称。默认文件名称为 ``__variable__``。 + +**示例代码** + .. code-block:: python + + import paddle + import paddle.nn as nn + import paddle.optimizer as opt + + class SimpleNet(nn.Layer): + def __init__(self, in_size, out_size): + super(SimpleNet, self).__init__() + self._linear = nn.Linear(in_size, out_size) + + @paddle.jit.to_static + def forward(self, x): + y = self._linear(x) + z = self._linear(y) + return z + + # enable dygraph mode + paddle.disable_static() + + # train model + net = SimpleNet(8, 8) + adam = opt.Adam(learning_rate=0.1, parameters=net.parameters()) + x = paddle.randn([4, 8], 'float32') + for i in range(10): + out = net(x) + loss = paddle.tensor.mean(out) + loss.backward() + adam.step() + adam.clear_grad() + + model_path = "simplenet.example.model.params_filename" + config = paddle.SaveLoadConfig() + config.params_filename = "__params__" + + # saving with configs.params_filename + paddle.jit.save( + layer=net, + model_path=model_path, + config=config) + + # loading with configs.params_filename + infer_net = paddle.jit.load(model_path, config=config) + x = paddle.randn([4, 8], 'float32') + pred = infer_net(x) + + +.. py:attribute:: separate_params + +配置是否将 :ref:`cn_api_fluid_dygraph_Layer` 的参数存储为分散的文件。 +(这是为了兼容接口 :ref:`cn_api_fluid_io_save_inference_model` 的行为) + +如果设置为 ``True`` ,每个参数将会被存储为一个文件,文件名为参数名,同时``SaveLoadConfig.params_filename`` 指定的文件名将不会生效。默认为 ``False``。 + +**示例代码** + .. code-block:: python + + import paddle + import paddle.nn as nn + import paddle.optimizer as opt + + class SimpleNet(nn.Layer): + def __init__(self, in_size, out_size): + super(SimpleNet, self).__init__() + self._linear = nn.Linear(in_size, out_size) + + @paddle.jit.to_static + def forward(self, x): + y = self._linear(x) + z = self._linear(y) + return z + + # enable dygraph mode + paddle.disable_static() + + # train model + net = SimpleNet(8, 8) + adam = opt.Adam(learning_rate=0.1, parameters=net.parameters()) + x = paddle.randn([4, 8], 'float32') + for i in range(10): + out = net(x) + loss = paddle.tensor.mean(out) + loss.backward() + adam.step() + adam.clear_grad() + + model_path = "simplenet.example.model.separate_params" + config = paddle.jit.SaveLoadConfig() + config.separate_params = True + + # saving with configs.separate_params + paddle.jit.save( + layer=net, + model_path=model_path, + config=config) + # [result] the saved model directory contains: + # linear_0.b_0 linear_0.w_0 __model__ __variables.info__ + + # loading with configs.params_filename + infer_net = paddle.jit.load(model_path, config=config) + x = paddle.randn([4, 8], 'float32') + pred = infer_net(x) + + +.. py:attribute:: keep_name_table + +配置是否保留 ``paddle.load`` 载入结果中 ``structured_name`` 到真实的参数变量名的映射表。这个映射表是调用 ``paddle.save`` 时存储的,一般仅用于调试,移除此映射表不影响真实的训练和预测。默认情况下不会保留在 ``paddle.load`` 的结果中。默认值为False。 + +.. note:: + 该配置仅用于 ``paddle.load`` 方法。 + +**示例代码** + .. code-block:: python + + import paddle + + paddle.disable_static() + + linear = paddle.nn.Linear(5, 1) + + state_dict = linear.state_dict() + paddle.save(state_dict, "paddle_dy") + + configs = paddle.SaveLoadConfig() + configs.keep_name_table = True + para_state_dict, _ = paddle.load("paddle_dy", configs) + + print(para_state_dict) + # the name_table is 'StructuredToParameterName@@' + # {'bias': array([0.], dtype=float32), + # 'StructuredToParameterName@@': + # {'bias': u'linear_0.b_0', 'weight': u'linear_0.w_0'}, + # 'weight': array([[ 0.04230034], + # [-0.1222527 ], + # [ 0.7392676 ], + # [-0.8136974 ], + # [ 0.01211023]], dtype=float32)} diff --git a/doc/paddle/api/paddle/fluid/dygraph/jit/TracedLayer_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/jit/TracedLayer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..38ce2900c3dba2a5c04a00b47b10f7545a7b31fd --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/jit/TracedLayer_cn.rst @@ -0,0 +1,146 @@ +.. _cn_api_fluid_dygraph_TracedLayer: + +TracedLayer +------------------------------- + + +.. py:class:: paddle.fluid.dygraph.TracedLayer(program, parameters, feed_names, fetch_names) + + + + +TracedLayer用于将前向动态图模型转换为静态图模型,主要用于将动态图保存后做在线C++预测。除此以外,用户也可使用转换后的静态图模型在Python端做预测,通常比原先的动态图性能更好。 + +TracedLayer使用 ``Executor`` 和 ``CompiledProgram`` 运行静态图模型。转换后的静态图模型与原动态图模型共享参数。 + +所有的TracedLayer对象均不应通过构造函数创建,而应通过调用静态方法 ``TracedLayer.trace(layer, inputs)`` 创建。 + +TracedLayer只能用于将data independent的动态图模型转换为静态图模型,即待转换的动态图模型不应随tensor数据或维度的变化而变化。 + +.. py:staticmethod:: trace(layer, inputs) + +创建TracedLayer对象的唯一接口,该接口会调用 ``layer(*inputs)`` 方法运行动态图模型并将其转换为静态图模型。 + +参数: + - **layer** (dygraph.Layer) - 待追踪的动态图layer对象。 + - **inputs** (list(Variable)) - 动态图layer对象的输入变量列表。 + +返回: 包含2个元素的tuple,其中第一个元素是 ``layer(*inputs)`` 的输出结果,第二个元素是转换后得到的TracedLayer对象。 + +返回类型: tuple + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.dygraph import Linear, to_variable, TracedLayer + import numpy as np + + class ExampleLayer(fluid.dygraph.Layer): + def __init__(self): + super(ExampleLayer, self).__init__() + self._fc = Linear(3, 10) + + def forward(self, input): + return self._fc(input) + + with fluid.dygraph.guard(): + layer = ExampleLayer() + in_np = np.random.random([2, 3]).astype('float32') + in_var = to_variable(in_np) + out_dygraph, static_layer = TracedLayer.trace(layer, inputs=[in_var]) + + # 内部使用Executor运行静态图模型 + out_static_graph = static_layer([in_var]) + print(len(out_static_graph)) # 1 + print(out_static_graph[0].shape) # (2, 10) + + # 将静态图模型保存为预测模型 + static_layer.save_inference_model(dirname='./saved_infer_model') + +.. py:method:: set_strategy(build_strategy=None, exec_strategy=None) + +设置构建和执行静态图模型的相关策略。 + +参数: + - **build_strategy** (BuildStrategy, 可选) - TracedLayer内部 ``CompiledProgram`` 的构建策略。 + - **exec_strategy** (ExecutionStrategy, 可选) - TracedLayer内部 ``CompiledProgram`` 的执行策略。 + +返回: 无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.dygraph import Linear, to_variable, TracedLayer + import numpy as np + + class ExampleLayer(fluid.dygraph.Layer): + def __init__(self): + super(ExampleLayer, self).__init__() + self._fc = Linear(3, 10) + + def forward(self, input): + return self._fc(input) + + with fluid.dygraph.guard(): + layer = ExampleLayer() + in_np = np.random.random([2, 3]).astype('float32') + in_var = to_variable(in_np) + + out_dygraph, static_layer = TracedLayer.trace(layer, inputs=[in_var]) + + build_strategy = fluid.BuildStrategy() + build_strategy.enable_inplace = True + + exec_strategy = fluid.ExecutionStrategy() + exec_strategy.num_threads = 2 + + static_layer.set_strategy(build_strategy=build_strategy, exec_strategy=exec_strategy) + out_static_graph = static_layer([in_var]) + +.. py:method:: save_inference_model(dirname, feed=None, fetch=None) + +将TracedLayer保存为用于预测部署的模型。保存的预测模型可被C++预测接口加载。 + +参数: + - **dirname** (str) - 预测模型的保存目录。 + - **feed** (list(int), 可选) - 预测模型输入变量的索引。若为None,则TracedLayer的所有输入变量均会作为预测模型的输入。默认值为None。 + - **fetch** (list(int), 可选) - 预测模型输出变量的索引。若为None,则TracedLayer的所有输出变量均会作为预测模型的输出。默认值为None。 + +返回: 无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.dygraph import Linear, to_variable, TracedLayer + import numpy as np + + class ExampleLayer(fluid.dygraph.Layer): + def __init__(self): + super(ExampleLayer, self).__init__() + self._fc = Linear(3, 10) + + def forward(self, input): + return self._fc(input) + + save_dirname = './saved_infer_model' + in_np = np.random.random([2, 3]).astype('float32') + + with fluid.dygraph.guard(): + layer = ExampleLayer() + in_var = to_variable(in_np) + out_dygraph, static_layer = TracedLayer.trace(layer, inputs=[in_var]) + static_layer.save_inference_model(save_dirname, feed=[0], fetch=[0]) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + program, feed_vars, fetch_vars = fluid.io.load_inference_model(save_dirname, + exe) + + fetch, = exe.run(program, feed={feed_vars[0]: in_np}, fetch_list=fetch_vars) + print(fetch.shape) # (2, 10) \ No newline at end of file diff --git a/doc/paddle/api/paddle/fluid/dygraph/jit/declarative_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/jit/declarative_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9920b5b7af2d6913189ac6d0255cea41995e524d --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/jit/declarative_cn.rst @@ -0,0 +1,32 @@ +.. _cn_api_fluid_dygraph_declarative: + +declarative +------------------------------- + +.. py:decorator:: paddle.fluid.dygraph.jit.declarative + +本装饰器将函数内的动态图API转化为静态图API。此装饰器自动处理静态图模式下的Program和Executor,并将结果作为动态图Tensor返回。输出的动态图Tensor可以继续进行动态图训练、预测或其他运算。如果被装饰的函数里面调用其他动态图函数,被调用的函数也会被转化为静态图函数。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + from paddle.fluid.dygraph.jit import declarative + + fluid.enable_dygraph() + + @declarative + def func(x): + x = fluid.dygraph.to_variable(x) + if fluid.layers.mean(x) < 0: + x_v = x - 1 + else: + x_v = x + 1 + return x_v + + x = np.ones([1, 2]) + x_v = func(x) + print(x_v.numpy()) # [[2. 2.]] + diff --git a/doc/paddle/api/paddle/fluid/dygraph/jit/load_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/jit/load_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3711962e79b21948bf500b7d8432f630a73ae9fd --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/jit/load_cn.rst @@ -0,0 +1,216 @@ +.. _cn_api_fluid_dygraph_jit_load: + +load +----------------- + +.. py:function:: paddle.jit.load(model_path, config=None) + + +将接口 :ref:`cn_api_fluid_dygraph_jit_save` 或者 :ref:`cn_api_fluid_io_save_inference_model` 存储的模型载入为 :ref:`cn_api_fluid_dygraph_TranslatedLayer` ,用于预测推理或者fine-tune训练。 + +.. note:: + 由于一些历史原因,如果载入的模型是通过 :ref:`cn_api_fluid_io_save_inference_model` 存储的, + 在使用它进行fine-tune训练时会存在一些局限: + 1. 命令式编程模式不支持 ``LoDTensor`` ,所有原先输入变量或者参数依赖于LoD信息的模型暂时无法使用; + 2. 所有存储模型的feed变量都需要被传入 ``Translatedlayer`` 的forward方法; + 3. 原模型变量的 ``stop_gradient`` 信息已丢失且无法准确恢复; + 4. 原模型参数的 ``trainable`` 信息已丢失且无法准确恢复。 + +参数: + - **model_path** (str) - 存储模型的目录。 + - **config** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象。默认为 ``None``。 + +返回:TranslatedLayer - 一个能够执行存储模型的 ``Layer`` 对象。 + +**示例代码** + +1. 载入由接口 :ref:`cn_api_fluid_dygraph_jit_save` 存储的模型进行预测推理及fine-tune训练。 + + .. code-block:: python + + import numpy as np + import paddle + import paddle.nn as nn + import paddle.optimizer as opt + + BATCH_SIZE = 16 + BATCH_NUM = 4 + EPOCH_NUM = 4 + + IMAGE_SIZE = 784 + CLASS_NUM = 10 + + # define a random dataset + class RandomDataset(paddle.io.Dataset): + def __init__(self, num_samples): + self.num_samples = num_samples + + def __getitem__(self, idx): + image = np.random.random([IMAGE_SIZE]).astype('float32') + label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64') + return image, label + + def __len__(self): + return self.num_samples + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM) + + @paddle.jit.to_static + def forward(self, x): + return self._linear(x) + + def train(layer, loader, loss_fn, opt): + for epoch_id in range(EPOCH_NUM): + for batch_id, (image, label) in enumerate(loader()): + out = layer(image) + loss = loss_fn(out, label) + loss.backward() + opt.step() + opt.clear_grad() + print("Epoch {} batch {}: loss = {}".format( + epoch_id, batch_id, np.mean(loss.numpy()))) + + # enable dygraph mode + place = paddle.CPUPlace() + paddle.disable_static(place) + + # 1. train & save model. + + # create network + layer = LinearNet() + loss_fn = nn.CrossEntropyLoss() + adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters()) + + # create data loader + dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) + loader = paddle.io.DataLoader(dataset, + places=place, + batch_size=BATCH_SIZE, + shuffle=True, + drop_last=True, + num_workers=2) + + # train + train(layer, loader, loss_fn, adam) + + # save + model_path = "linear.example.model" + paddle.jit.save(layer, model_path) + + # 2. load model + + # load + loaded_layer = paddle.jit.load(model_path) + + # inference + loaded_layer.eval() + x = paddle.randn([1, IMAGE_SIZE], 'float32') + pred = loaded_layer(x) + + # fine-tune + loaded_layer.train() + adam = opt.Adam(learning_rate=0.001, parameters=loaded_layer.parameters()) + train(loaded_layer, loader, loss_fn, adam) + + + +2. 载入由接口 :ref:`cn_api_fluid_io_save_inference_model` 存储的模型进行预测推理及fine-tune训练。 + + .. code-block:: python + + import numpy as np + import paddle + import paddle.fluid as fluid + import paddle.nn as nn + import paddle.optimizer as opt + + BATCH_SIZE = 16 + BATCH_NUM = 4 + EPOCH_NUM = 4 + + IMAGE_SIZE = 784 + CLASS_NUM = 10 + + # define a random dataset + class RandomDataset(paddle.io.Dataset): + def __init__(self, num_samples): + self.num_samples = num_samples + + def __getitem__(self, idx): + image = np.random.random([IMAGE_SIZE]).astype('float32') + label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64') + return image, label + + def __len__(self): + return self.num_samples + + image = fluid.data(name='image', shape=[None, 784], dtype='float32') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') + pred = fluid.layers.fc(input=image, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=pred, label=label) + avg_loss = fluid.layers.mean(loss) + + optimizer = fluid.optimizer.SGD(learning_rate=0.001) + optimizer.minimize(avg_loss) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + # create data loader + dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) + loader = paddle.io.DataLoader(dataset, + feed_list=[image, label], + places=place, + batch_size=BATCH_SIZE, + shuffle=True, + drop_last=True, + num_workers=2) + + # 1. train and save inference model + for data in loader(): + exe.run( + fluid.default_main_program(), + feed=data, + fetch_list=[avg_loss]) + + model_path = "fc.example.model" + fluid.io.save_inference_model( + model_path, ["image"], [pred], exe) + + # 2. load model + + # enable dygraph mode + paddle.disable_static(place) + + # load + fc = paddle.jit.load(model_path) + + # inference + fc.eval() + x = paddle.randn([1, IMAGE_SIZE], 'float32') + pred = fc(x) + + # fine-tune + fc.train() + loss_fn = nn.CrossEntropyLoss() + adam = opt.Adam(learning_rate=0.001, parameters=fc.parameters()) + loader = paddle.io.DataLoader(dataset, + places=place, + batch_size=BATCH_SIZE, + shuffle=True, + drop_last=True, + num_workers=2) + for epoch_id in range(EPOCH_NUM): + for batch_id, (image, label) in enumerate(loader()): + out = fc(image) + loss = loss_fn(out, label) + loss.backward() + adam.step() + adam.clear_grad() + print("Epoch {} batch {}: loss = {}".format( + epoch_id, batch_id, np.mean(loss.numpy()))) + diff --git a/doc/paddle/api/paddle/fluid/dygraph/jit/save_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/jit/save_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..568aaebc1d4330620962693263c523d799d2131c --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/jit/save_cn.rst @@ -0,0 +1,104 @@ +.. _cn_api_fluid_dygraph_jit_save: + +save +----------------- + +.. py:function:: paddle.jit.save(layer, model_path, input_spec=None, config=None) + +将输入的经过 ``@declarative`` 装饰的 :ref:`cn_api_fluid_dygraph_Layer` 存储为 :ref:`cn_api_fluid_dygraph_TranslatedLayer` 格式的模型, +载入后可用于预测推理或者fine-tune训练。 + +该接口将会将输入 :ref:`cn_api_fluid_dygraph_Layer` 转写后的模型结构 ``Program`` 和所有必要的持久参数变量存储至输入路径 ``model_path`` 中。 + +默认存储的 ``Program`` 文件名为 ``__model__``, 默认存储持久参数变量的文件名为 ``__variables__``, +同时会将变量的一些描述信息存储至文件 ``__variables.info__``,这些额外的信息将在fine-tune训练中使用。 + +存储的模型能够被以下API载入使用: + - :ref:`cn_api_fluid_dygraph_jit_load` + - :ref:`cn_api_fluid_io_load_inference_model` (需要配置参数 ``params_filename='__variables__'`` ) + - 其他预测库API + +参数: + - **layer** (Layer) - 需要存储的 :ref:`cn_api_fluid_dygraph_Layer` 对象。输入的 ``Layer`` 需要经过 ``@declarative`` 装饰。 + - **model_path** (str) - 存储模型的目录。 + - **input_spec** (list[Variable], 可选) - 描述存储模型的输入。此参数是传入当前存储的 ``TranslatedLayer`` forward方法的一个示例输入。如果为 ``None`` ,所有原 ``Layer`` forward方法的输入变量将都会被配置为存储模型的输入变量。默认为 ``None``。 + - **config** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象。默认为 ``None``。 + +返回:无 + +**示例代码** + +.. code-block:: python + + import numpy as np + import paddle + import paddle.nn as nn + import paddle.optimizer as opt + + BATCH_SIZE = 16 + BATCH_NUM = 4 + EPOCH_NUM = 4 + + IMAGE_SIZE = 784 + CLASS_NUM = 10 + + # define a random dataset + class RandomDataset(paddle.io.Dataset): + def __init__(self, num_samples): + self.num_samples = num_samples + + def __getitem__(self, idx): + image = np.random.random([IMAGE_SIZE]).astype('float32') + label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64') + return image, label + + def __len__(self): + return self.num_samples + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM) + + @paddle.jit.to_static + def forward(self, x): + return self._linear(x) + + def train(layer, loader, loss_fn, opt): + for epoch_id in range(EPOCH_NUM): + for batch_id, (image, label) in enumerate(loader()): + out = layer(image) + loss = loss_fn(out, label) + loss.backward() + opt.step() + opt.clear_grad() + print("Epoch {} batch {}: loss = {}".format( + epoch_id, batch_id, np.mean(loss.numpy()))) + + # enable dygraph mode + place = paddle.CPUPlace() + paddle.disable_static(place) + + # 1. train & save model. + + # create network + layer = LinearNet() + loss_fn = nn.CrossEntropyLoss() + adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters()) + + # create data loader + dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) + loader = paddle.io.DataLoader(dataset, + places=place, + batch_size=BATCH_SIZE, + shuffle=True, + drop_last=True, + num_workers=2) + + # train + train(layer, loader, loss_fn, adam) + + # save + model_path = "linear.example.model" + paddle.jit.save(layer, model_path) + diff --git a/doc/paddle/api/paddle/fluid/dygraph/jit/set_code_level_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/jit/set_code_level_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..dfce37a70cae6f713cc6fffdbbc0dfae43e98f3a --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/jit/set_code_level_cn.rst @@ -0,0 +1,33 @@ +.. _cn_api_fluid_dygraph_jit_set_code_level: + +set_code_level +----------------- + +.. py:function:: paddle.jit.set_code_level(level=100, also_to_stdout=False) +设置代码级别,打印该级别 AST Transformer 转化后的代码。 + +有两种方法设置代码级别: + +1. 调用函数 ``set_code_level`` +2. 设置环境变量 ``TRANSLATOR_CODE_LEVEL`` + +.. note:: + 函数 ``set_code_level`` 的优先级高于环境变量 ``TRANSLATOR_CODE_LEVEL``。 + + +参数: + - **level** (int) - 打印的代码级别。默认值为100,这意味着打印的是所有 AST Transformer 转化后的代码。 + - **also_to_stdout** (bool) - 表示是否也将代码输出到 ``sys.stdout``。默认值 False,表示仅输出到 ``sys.stderr``。 + + +**示例代码** + +.. code-block:: python + + import paddle + import os + paddle.jit.set_code_level(2) + # It will print the transformed code at level 2, which means to print the code after second transformer, + # as the date of August 28, 2020, it is CastTransformer. + os.environ['TRANSLATOR_CODE_LEVEL'] = '3' + # The code level is now 3, but it has no effect because it has a lower priority than `set_code_level` diff --git a/doc/paddle/api/paddle/fluid/dygraph/jit/set_verbosity_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/jit/set_verbosity_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b5e35dd2a113640368af516ed08f656cf6350b87 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/jit/set_verbosity_cn.rst @@ -0,0 +1,31 @@ +.. _cn_api_fluid_dygraph_jit_set_verbosity: + +set_verbosity +----------------- + +.. py:function:: paddle.jit.set_verbosity(level=0, also_to_stdout=False) +设置动态图转静态图的日志详细级别。 + +有两种方法设置日志详细级别: + +1. 调用函数 ``set_verbosity`` +2. 设置环境变量 ``TRANSLATOR_VERBOSITY`` + +.. note:: + 函数 ``set_verbosity`` 的优先级高于环境变量 ``TRANSLATOR_VERBOSITY``。 + + +参数: + - **level** (int) - 日志详细级别。值越大,表示越详细。默认值为0,表示不显示日志。 + - **also_to_stdout** (bool) - 表示是否也将日志信息输出到 ``sys.stdout``。默认值 False,表示仅输出到 ``sys.stderr``。 + +**示例代码** + +.. code-block:: python + + import os + import paddle + paddle.jit.set_verbosity(1) + # The verbosity level is now 1 + os.environ['TRANSLATOR_VERBOSITY'] = '3' + # The verbosity level is now 3, but it has no effect because it has a lower priority than `set_verbosity` diff --git a/doc/paddle/api/paddle/fluid/dygraph/layers/Layer_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/layers/Layer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ffb30530b8d7a51170deeadb48e843c80ea1c719 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/layers/Layer_cn.rst @@ -0,0 +1,421 @@ +.. _cn_api_fluid_dygraph_Layer: + +Layer +------------------------------- + +.. py:class:: paddle.fluid.dygraph.Layer(name_scope=None, dtype=core.VarDesc.VarType.FP32) + + + + +基于OOD实现的动态图Layer,包含该Layer的参数、前序运行的结构等信息。 + +参数: + - **name_scope** (str,可选) - 为Layer内部参数命名而采用的名称前缀。如果前缀为“mylayer”,在一个类名为MyLayer的Layer中,参数名为“mylayer_0.w_n”,其中w是参数的名称,n为自动生成的具有唯一性的后缀。如果为None,前缀名将为小写的类名。默认值为None。 + - **dtype** (str|core.VarDesc.VarType, 可选) - Layer中参数数据类型。如果设置为str,则可以是“bool”,“float16”,“float32”,“float64”,“int8”,“int16”,“int32”,“int64”,“uint8”或“uint16”。默认值为 ``core.VarDesc.VarType.FP32`` 。 + +返回:无 + +.. py:method:: train() + +将此层及其所有子层设置为训练模式。这只会影响某些模块,如Dropout和BatchNorm。 + +返回:无 + +.. py:method:: eval() + +将此层及其所有子层设置为预测模式。这只会影响某些模块,如Dropout和BatchNorm。 + +返回:无 + +.. py:method:: full_name() + +Layer的全名。组成方式为: ``name_scope`` + “/” + MyLayer.__class__.__name__ 。 + +返回:Layer的全名 + +返回类型:str + +.. py:method:: register_forward_pre_hook(hook) + +为Layer注册一个 ``forward pre-hook`` 函数,该 ``hook`` 函数将会在 ``forward`` 函数调用之前被调用。 + +``hook`` 函数具有以下形式:它的 ``input`` 是 ``Layer`` 的 ``input`` ,并且可以返回一个元组或者单个修改值;如果返回单个修改值,则将值包装到一个元组中。用户可以使用该函数来查看或修改 ``Layer`` ``forward`` 函数的输入。 + +hook(Layer, input) -> None or modified input + +参数: + - **hook** (function) - 被注册为 ``forward pre-hook`` 的函数 + +返回:一个 ``HookRemoveHelper`` 类对象,可通过调用 ``hook_remove_helper.remove()`` 来删除注册的hook函数。 + +返回类型: ``HookRemoveHelper`` 类对象 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # forward_pre_hook函数修改了layer的输入:input = input * 2 + def forward_pre_hook(layer, input): + # 改变输入值 + input_return = (input[0] * 2) + return input_return + + with fluid.dygraph.guard(): + linear = fluid.Linear(13, 5, dtype="float32") + + # 注册hook + forward_pre_hook_handle = linear.register_forward_pre_hook(forward_pre_hook) + + value0 = np.arange(26).reshape(2, 13).astype("float32") + in0 = fluid.dygraph.to_variable(value0) + out0 = linear(in0) + + # 移除hook + forward_pre_hook_handle.remove() + + value1 = value0 * 2 + in1 = fluid.dygraph.to_variable(value1) + out1 = linear(in1) + + # hook改变了layer的输入(input = input * 2),所以out0等于out1 + assert (out0.numpy() == out1.numpy()).any() + +.. py:method:: register_forward_post_hook(hook) + +为Layer注册一个 ``forward post-hook`` 函数,该 ``hook`` 函数将会在 ``forward`` 函数调用之后被调用。 + +``hook`` 函数具有以下形式,它的 ``input`` 和 ``output`` 是 ``Layer`` 的 ``input`` 和 ``output`` 。用户可以用该函数来查看和修改 ``Layer`` ``forward`` 函数的输出。 + +hook(Layer, input, output) -> None or modified output + +参数: + - **hook** (function) - 被注册为 ``forward post-hook`` 的函数 + +返回:一个 ``HookRemoveHelper`` 类对象,可通过调用 ``hook_remove_helper.remove()`` 来删除注册的hook函数。 + +返回类型: ``HookRemoveHelper`` 类对象 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # forward_post_hook函数改变了layer的输出:output = output * 2 + def forward_post_hook(layer, input, output): + # 改变输出值 + return output * 2 + + with fluid.dygraph.guard(): + linear = fluid.Linear(13, 5, dtype="float32") + + # 注册hook + forward_post_hook_handle = linear.register_forward_post_hook(forward_post_hook) + + value1 = np.arange(26).reshape(2, 13).astype("float32") + in1 = fluid.dygraph.to_variable(value1) + + out0 = linear(in1) + + # remove the hook + forward_post_hook_handle.remove() + + out1 = linear(in1) + + # hook改变了layer的输出(output = output * 2),所以out0等于out1 * 2 + assert (out0.numpy() == (out1.numpy()) * 2).any() + +.. py:method:: create_parameter(shape, attr=None, dtype="float32", is_bias=False, default_initializer=None) + +为Layer创建参数。 + +参数: + - **shape** (list) - 参数的形状。列表中的数据类型必须为int。 + - **attr** (ParamAttr,可选) - 指定权重参数属性的对象,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。默认值为None。 + - **dtype** (str|core.VarDesc.VarType, 可选) - Layer中参数数据类型。如果设置为str,则可以是“bool”,“float16”,“float32”,“float64”,“int8”,“int16”,“int32”,“int64”,“uint8”或“uint16”。默认值为“float32”。 + - **is_bias** (bool, 可选) - 是否是偏置参数。默认值:False。 + - **default_initializer** (Initializer, 可选) - 默认的参数初始化方法。如果设置为None,则设置非bias参数的初始化方式为 :ref:`cn_api_fluid_initializer_XavierInitializer` ,设置bias参数的初始化方式为 :ref:`cn_api_fluid_initializer_ConstantInitializer` 。默认值:None。 + +返回:创建的参数变量 + +返回类型: :ref:`cn_api_fluid_Variable` + +.. py:method:: create_variable(name=None, persistable=None, dtype=None, type=VarType.LOD_TENSOR) + +为Layer创建变量。 + +参数: + - **name** (str, 可选) - 变量名。默认值:None。 + - **persistable** (bool, 可选) - 是否为持久性变量,后续会被移出。默认值:None。 + - **dtype** (str|core.VarDesc.VarType, 可选) - Layer中参数数据类型。如果设置为str,则可以是“bool”,“float16”,“float32”,“float64”,“int8”,“int16”,“int32”,“int64”,“uint8”或“uint16”。默认值为 ``core.VarDesc.VarType.FP32`` 。 + - **type** (core.VarDesc.VarType, 可选) - 变量类型,该参数不需要用户设置。默认值:core.VarDesc.VarType.LOD_TENSOR。 + +返回:创建的 ``Tensor`` + +返回类型: :ref:`cn_api_fluid_Variable` + +.. py:method:: parameters(include_sublayers=True) + +返回一个由当前层及其子层的所有参数组成的列表。 + +参数: + - **include_sublayers** (bool, 可选) - 是否返回子层的参数。如果为True,返回的列表中包含子层的参数。默认值:True。 + +返回:一个由当前层及其子层的所有参数组成的列表,列表中的元素类型为Parameter(Variable)。 + +返回类型:list + +.. py:method:: sublayers(include_sublayers=True) + +返回一个由所有子层组成的列表。 + +参数: + - **include_sublayers** (bool, 可选) - 是否返回子层中各个子层。如果为True,则包括子层中的各个子层。默认值:True。 + +返回: 一个由所有子层组成的列表,列表中的元素类型为Layer。 + +返回类型:list + +.. py:method:: clear_gradients() + +清除该层所有参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + with fluid.dygraph.guard(): + value = np.arange(26).reshape(2, 13).astype("float32") + a = fluid.dygraph.to_variable(value) + linear = fluid.Linear(13, 5, dtype="float32") + adam = fluid.optimizer.Adam(learning_rate=0.01, + parameter_list=linear.parameters()) + out = linear(a) + out.backward() + adam.minimize(out) + linear.clear_gradients() + + +.. py:method:: named_parameters(prefix='', include_sublayers=True) + +返回层中所有参数的迭代器,生成名称和参数的元组。 + +参数: + - **prefix** (str, 可选) - 在所有参数名称前加的前缀。默认值:''。 + - **include_sublayers** (bool, 可选) - 是否返回子层的参数。如果为True,返回的列表中包含子层的参数。默认值:True。 + +返回:产出名称和参数的元组的迭代器。 + +返回类型:iterator + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + fc1 = fluid.Linear(10, 3) + fc2 = fluid.Linear(3, 10, bias_attr=False) + model = fluid.dygraph.Sequential(fc1, fc2) + for name, param in model.named_parameters(): + print(name, param) + +.. py:method:: named_sublayers(prefix='', include_sublayers=True, include_self=False, layers_set=None) + +返回层中所有子层上的迭代器,生成名称和子层的元组。重复的子层只产生一次。 + +参数: + - **prefix** (str, 可选) - 在所有参数名称前加的前缀。默认值:''。 + - **include_sublayers** (bool, 可选) - 是否返回子层中各个子层。如果为True,则包括子层中的各个子层。默认值:True。 + - **include_self** (bool, 可选) - 是否包含该层自身。默认值:False。 + - **layers_set** (set, 可选): 记录重复子层的集合。默认值:None。 + +返回:产出名称和子层的元组的迭代器。 + +返回类型:iterator + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + fc1 = fluid.Linear(10, 3) + fc2 = fluid.Linear(3, 10, bias_attr=False) + model = fluid.dygraph.Sequential(fc1, fc2) + for prefix, layer in model.named_sublayers(): + print(prefix, layer) + +.. py:method:: register_buffer(name, variable, persistable=True) + +将一个Variable注册为buffer。 + +buffer是一个非参数类型的变量,不会被优化器更新,但在评估或预测阶段可能是必要的状态变量。比如 ``BatchNorm`` 中的均值和方差。 + +注册的buffer默认是可持久性的,会被保存到 ``state_dict`` 中。如果指定 ``persistable`` 参数为False,则会注册一个非持久性的buffer,即不会同步和保存到 ``state_dict`` 中。 + +参数: + - **name** (str) - 注册buffer的名字。可以通过此名字来访问已注册的buffer。 + - **variable** (Variable) - 将被注册为buffer的变量。 + - **persistable** (bool, 可选) - 注册的buffer是否需要可持久性地保存到 ``state_dict`` 中。 + +返回:None + +返回类型:None + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.Linear(10, 3) + value = np.array([0]).astype("float32") + buffer = fluid.dygraph.to_variable(value) + linear.register_buffer("buf_name", buffer, persistable=True) + + # get the buffer by attribute. + print(linear.buf_name) + +.. py:method:: buffers(include_sublayers=True) + +返回一个由当前层及其子层的所有buffers组成的列表。 + +参数: + - **include_sublayers** (bool, 可选) - 是否返回子层的buffers。如果为True,返回的列表中包含子层的buffers。默认值:True。 + +返回:一个由当前层及其子层的所有buffers组成的列表,列表中的元素类型为Variable。 + +返回类型:list + +.. py:method:: named_buffers(prefix='', include_sublayers=True) + +返回层中所有buffers的迭代器,生成名称和buffer的元组。 + +参数: + - **prefix** (str, 可选) - 在所有buffer名称前加的前缀。默认值:''。 + - **include_sublayers** (bool, 可选) - 是否返回子层的buffers。如果为True,返回的列表中包含子层的buffers。默认值:True。 + +返回:产出名称和buffer的元组的迭代器。 + +返回类型:iterator + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + fc1 = fluid.Linear(10, 3) + buffer1 = fluid.dygraph.to_variable(np.array([0]).astype("float32")) + # register a variable as buffer by specific `persistable` + fc1.register_buffer("buf_name_1", buffer1, persistable=True) + + fc2 = fluid.Linear(3, 10) + buffer2 = fluid.dygraph.to_variable(np.array([1]).astype("float32")) + # register a buffer by assigning an attribute with Variable. + # The `persistable` can only be False by this way. + fc2.buf_name_2 = buffer2 + + model = fluid.dygraph.Sequential(fc1, fc2) + + # get all named buffers + for name, buffer in model.named_buffers(): + print(name, buffer) + +.. py:method:: forward(*inputs, **kwargs) + +定义每次调用时执行的计算。应该被所有子类覆盖。 + +参数: + - **\*inputs** (tuple) - 解包后的tuple参数。 + - **\*\*kwargs** (dict) - 解包后的dict参数。 + +.. py:method:: add_sublayer(name, sublayer) + +添加子层实例。可以通过self.name访问该sublayer。 + +参数: + - **name** (str) - 子层名。 + - **sublayer** (Layer) - Layer实例。 + +返回:添加的子层 + +返回类型:Layer + +.. py:method:: add_parameter(name, parameter) + +添加参数实例。可以通过self.name访问该parameter。 + +参数: + - **name** (str) - 参数名。 + - **parameter** (Parameter) - Parameter实例。 + +返回:传入的参数实例 + +返回类型:Parameter( :ref:`cn_api_fluid_Variable` ) + +.. py:method:: state_dict(destination=None, include_sublayers=True) + +获取当前层及其子层的所有参数和可持久性buffers。并将所有参数和buffers存放在dict结构中。 + +参数: + - **destination** (dict, 可选) - 如果提供 ``destination`` ,则所有参数和可持久性buffers都将存放在 ``destination`` 中。 默认值:None。 + - **include_sublayers** (bool, 可选) - 如果设置为True,则包括子层的参数和buffers。默认值:True。 + +返回:包含所有参数和可持久行buffers的dict + +返回类型:dict + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + with fluid.dygraph.guard(): + emb = fluid.dygraph.Embedding([10, 10]) + state_dict = emb.state_dict() + fluid.save_dygraph(state_dict, "paddle_dy") + +.. py:method:: set_state_dict(state_dict, include_sublayers=True, use_structured_name=True) + +根据传入的 ``state_dict`` 设置参数和可持久性buffers。 所有参数和buffers将由 ``state_dict`` 中的 ``Tensor`` 设置。 + +参数: + - **state_dict** (dict) - 包含所有参数和可持久性buffers的dict。 + - **include_sublayers** (bool, 可选) - 如果设置为True,则还包括子层的参数和buffers。 默认值:True。 + - **use_structured_name** (bool, 可选) - 如果设置为True,将使用Layer的结构性变量名作为dict的key,否则将使用Parameter或者Buffer的变量名作为key。默认值:True。 + +返回:None + +**代码示例** + +.. code-block:: python + + import paddle + + paddle.disable_static() + + emb = paddle.nn.Embedding([10, 10]) + + state_dict = emb.state_dict() + paddle.save(state_dict, "paddle_dy") + + para_state_dict, _ = paddle.load("paddle_dy") + + emb.set_state_dict(para_state_dict) + diff --git a/doc/paddle/api/paddle/fluid/dygraph/learning_rate_scheduler/CosineDecay_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/learning_rate_scheduler/CosineDecay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..75f0259add2cc7fc2ebb475e3949b162875b548d --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/learning_rate_scheduler/CosineDecay_cn.rst @@ -0,0 +1,46 @@ +.. _cn_api_fluid_dygraph_CosineDecay: + +CosineDecay +------------------------------- + + +.. py:class:: paddle.fluid.dygraph.CosineDecay(learning_rate, step_each_epoch, epochs, begin=0, step=1, dtype='float32') + + + + +该接口提供按余弦函数衰减学习率的功能。 + +余弦衰减的计算方式如下。 + +.. math:: + + decayed\_learning\_rate = learning\_rate * 0.5 * (math.cos(global\_step * \frac{math.pi}{step\_each\_epoch} ) + 1) + +式中, + +- :math:`decayed\_learning\_rate` : 衰减后的学习率。 +式子中各参数详细介绍请看参数说明。 + +参数: + - **learning_rate** (Variable | float) - 初始学习率。如果类型为Variable,则为shape为[1]的Tensor,数据类型为float32或float64;也可以是python的float类型。 + - **step_each_epoch** (int) - 遍历一遍训练数据所需的步数。 + - **begin** (int,可选) - 起始步,即以上公式中global_step的初始化值。默认值为0。 + - **step** (int,可选) - 步大小,即以上公式中global_step的每次的增量值。默认值为1。 + - **dtype** (str,可选) - 初始化学习率变量的数据类型,可以为"float32", "float64"。默认值为"float32"。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + base_lr = 0.1 + with fluid.dygraph.guard(): + gru = fluid.dygraph.GRUUnit(5 * 3) + optimizer = fluid.optimizer.SGD( + learning_rate=fluid.dygraph.CosineDecay( + base_lr, 10000, 120), parameter_list=gru.parameters()) + + diff --git a/doc/paddle/api/paddle/fluid/dygraph/learning_rate_scheduler/ExponentialDecay_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/learning_rate_scheduler/ExponentialDecay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7870294264337d33374bac6fab14aa43883e6024 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/learning_rate_scheduler/ExponentialDecay_cn.rst @@ -0,0 +1,69 @@ +.. _cn_api_fluid_dygraph_ExponentialDecay: + +ExponentialDecay +------------------------------- + + +.. py:class:: paddle.fluid.dygraph.ExponentialDecay(learning_rate, decay_steps, decay_rate, staircase=False, begin=0, step=1, dtype=’float32‘) + + + + +该接口提供一种学习率按指数函数衰减的功能。 + +指数衰减的计算方式如下。 + +.. math:: + + decayed\_learning\_rate = learning\_rate * decay\_rate ^ y + + +当staircase为False时,y对应的计算公式为: + +.. math:: + + y = \frac{global\_step}{decay\_steps} + +当staircase为True时,y对应的计算公式为: + +.. math:: + + y = math.floor(\frac{global\_step}{decay\_steps}) + +式中, + +- :math:`decayed\_learning\_rate` : 衰减后的学习率。 +式子中各参数详细介绍请看参数说明。 + +参数: + - **learning_rate** (Variable|float) - 初始学习率。如果类型为Variable,则为shape为[1]的Tensor,数据类型为float32或float64;也可以是python的float类型。 + - **decay_steps** (int) - 衰减步数。必须是正整数,该参数确定衰减周期。 + - **decay_rate** (float)- 衰减率。 + - **staircase** (bool) - 若为True,则以不连续的间隔衰减学习速率即阶梯型衰减。若为False,则以标准指数型衰减。默认值为False。 + - **begin** (int) - 起始步,即以上运算式子中global_step的初始化值。默认值为0。 + - **step** (int) - 步大小,即以上运算式子中global_step的每次的增量值,使得global_step随着训练的次数递增。默认值为1。 + - **dtype** (str) - 初始化学习率变量的数据类型,可以为"float32", "float64"。 默认值为"float32"。 + +返回: 无 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + base_lr = 0.1 + with fluid.dygraph.guard(): + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=fluid.dygraph.ExponentialDecay( + learning_rate=base_lr, + decay_steps=10000, + decay_rate=0.5, + staircase=True)) + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/dygraph/learning_rate_scheduler/InverseTimeDecay_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/learning_rate_scheduler/InverseTimeDecay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..66906a274320d1f24d4123c0665dd70a6a64f6fd --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/learning_rate_scheduler/InverseTimeDecay_cn.rst @@ -0,0 +1,59 @@ +.. _cn_api_fluid_dygraph_InverseTimeDecay: + +InverseTimeDecay +------------------------------- + + +.. py:class:: paddle.fluid.dygraph.InverseTimeDecay(learning_rate, decay_steps, decay_rate, staircase=False, begin=0, step=1, dtype='float32') + + + + +该接口提供反时限学习率衰减的功能。 + +反时限学习率衰减计算方式如下。 + +当staircase为False时,计算公式为: + +.. math:: + + decayed\_learning\_rate = \frac{learning\_rate}{1 + decay\_rate * \frac{global\_step}{decay\_step}} + +当staircase为True时,计算公式为: + +.. math:: + + decayed\_learning\_rate = \frac{learning\_rate}{1 + decay\_rate * math.floor(\frac{global\_step}{decay\_step})} + +式中, + +- :math:`decayed\_learning\_rate` : 衰减后的学习率。 +式子中各参数详细介绍请看参数说明。 + +参数: + - **learning_rate** (Variable|float) - 初始学习率值。如果类型为Variable,则为shape为[1]的Tensor,数据类型为float32或float64;也可以是python的float类型。 + - **decay_steps** (int) - 衰减步数,见以上衰减运算式子。 + - **decay_rate** (float)- 衰减率。见以上衰减运算。 + - **staircase** (bool,可选) - 指定是否按阶梯状衰减。若为True, 学习率变化曲线呈阶梯状。若为False,学习率变化值曲线为平滑的曲线。默认值为False。 + - **begin** (int,可选) - 起始步,即以上运算式子中global_step的初始化值。默认值为0。 + - **step** (int,可选) - 步大小,即以上运算式子中global_step的每次的增量值,使得global_step随着训练的次数递增。默认值为1。 + - **dtype** (str,可选) - 初始化学习率变量的数据类型,可以为"float32", "float64"。默认值为"float32"。 + +返回: 无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + base_lr = 0.1 + with fluid.dygraph.guard(): + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=fluid.dygraph.InverseTimeDecay( + learning_rate=base_lr, + decay_steps=10000, + decay_rate=0.5, + staircase=True)) + + + diff --git a/doc/paddle/api/paddle/fluid/dygraph/learning_rate_scheduler/NaturalExpDecay_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/learning_rate_scheduler/NaturalExpDecay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e9d55c0f613ac34978d1895f2ed109cf662a8737 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/learning_rate_scheduler/NaturalExpDecay_cn.rst @@ -0,0 +1,67 @@ +.. _cn_api_fluid_dygraph_NaturalExpDecay: + +NaturalExpDecay +------------------------------- + + +.. py:class:: paddle.fluid.dygraph.NaturalExpDecay(learning_rate, decay_steps, decay_rate, staircase=False, begin=0, step=1, dtype='float32') + + + + +该接口提供按自然指数衰减学习率的功能。 + +自然指数衰减的计算方式如下。 + +.. math:: + + decayed\_learning\_rate = learning\_rate * e^{y} + +当staircase为False时,y对应的计算公式为: + +.. math:: + + y = - decay\_rate * \frac{global\_step}{decay\_steps} + +当staircase为True时,y对应的计算公式为: + +.. math:: + + y = - decay\_rate * math.floor(\frac{global\_step}{decay\_steps}) + +式中, + +- :math:`decayed\_learning\_rate` : 衰减后的学习率。 +式子中各参数详细介绍请看参数说明。 + +参数: + - **learning_rate** (Variable|float) - 初始学习率值。如果类型为Variable,则为shape为[1]的Tensor,数据类型为float32或float64;也可以是python的float类型。 + - **decay_steps** (int) – 指定衰减的步长。该参数确定衰减的周期。 + - **decay_rate** (float) – 指定衰减率。 + - **staircase** (bool,可选) - 若为True, 学习率变化曲线呈阶梯状,若为False,学习率变化值曲线为平滑的曲线。默认值为False。 + - **begin** (int,可选) – 起始步,即以上运算式子中global_step的初始化值。默认值为0。 + - **step** (int,可选) – 步大小,即以上运算式子中global_step的每次的增量值。默认值为1。 + - **dtype** (str,可选) – 学习率值的数据类型,可以为"float32", "float64"。默认值为"float32"。 + +返回: 无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + base_lr = 0.1 + with fluid.dygraph.guard(): + emb = fluid.dygraph.Embedding([10, 10]) + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=fluid.dygraph.NaturalExpDecay( + learning_rate=base_lr, + decay_steps=10000, + decay_rate=0.5, + staircase=True), + parameter_list=emb.parameters()) + + + + + diff --git a/doc/paddle/api/paddle/fluid/dygraph/learning_rate_scheduler/NoamDecay_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/learning_rate_scheduler/NoamDecay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bd502bf2e156b9d8a64205b14e95bb75c0075931 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/learning_rate_scheduler/NoamDecay_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_fluid_dygraph_NoamDecay: + +NoamDecay +------------------------------- + + +.. py:class:: paddle.fluid.dygraph.NoamDecay(d_model, warmup_steps, begin=1, step=1, dtype='float32', learning_rate=1.0) + + + + +该接口提供Noam衰减学习率的功能。 + +Noam衰减的计算方式如下。 + +.. math:: + + decayed\_learning\_rate = learning\_rate * d_{model}^{-0.5} * min(global\_steps^{-0.5}, global\_steps * warmup\_steps^{-1.5}) + +关于Noam衰减的更多细节请参考 `attention is all you need `_ + +式中, + +- :math:`decayed\_learning\_rate` : 衰减后的学习率。 +式子中各参数详细介绍请看参数说明。 + +参数: + - **d$_{model}$** (Variable|int) - 模型的输入、输出向量特征维度,为超参数。如果设置为Variable类型值,则数据类型可以为int32或int64的标量Tensor,也可以设置为Python int。 + - **warmup_steps** (Variable|int) - 预热步数,为超参数。如果设置为Variable类型,则数据类型为int32或int64的标量Tensor,也可以设置为为Python int。 + - **begin** (int,可选) – 起始步。即以上运算式子中global_steps的初始值。默认值为0。 + - **step** (int,可选) – 步大小。即以上运算式子中global_steps的递增值。默认值为1。 + - **dtype** (str,可选) – 学习率值的数据类型,可以为"float32", "float64"。默认值为"float32"。 + - **learning_rate** (Variable|float|int,可选) - 初始学习率。如果类型为Variable,则为shape为[1]的Tensor,数据类型为float32或float64;也可以是python的int类型。默认值为1.0。 + +返回: 无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + warmup_steps = 100 + learning_rate = 0.01 + with fluid.dygraph.guard(): + emb = fluid.dygraph.Embedding([10, 10]) + optimizer = fluid.optimizer.SGD( + learning_rate = fluid.dygraph.NoamDecay( + 1/(warmup_steps *(learning_rate ** 2)), + warmup_steps), + parameter_list = emb.parameters()) diff --git a/doc/paddle/api/paddle/fluid/dygraph/learning_rate_scheduler/PiecewiseDecay_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/learning_rate_scheduler/PiecewiseDecay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ada3ce242161a405bce53907208f4c4ad23fee29 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/learning_rate_scheduler/PiecewiseDecay_cn.rst @@ -0,0 +1,48 @@ +.. _cn_api_fluid_dygraph_PiecewiseDecay: + +PiecewiseDecay +------------------------------- + + +.. py:class:: paddle.fluid.dygraph.PiecewiseDecay(boundaries, values, begin, step=1, dtype='float32') + + + + +该接口提供对初始学习率进行分段(piecewise)常数衰减的功能。 + +分段常数衰减的过程举例描述如下。 + +.. code-block:: text + + 例如,设定的boundaries列表为[10000, 20000],候选学习率常量列表values为[1.0, 0.5, 0.1],则: + 1、在当前训练步数global_step小于10000步,学习率值为1.0。 + 2、在当前训练步数global_step大于或等于10000步,并且小于20000步时,学习率值为0.5。 + 3、在当前训练步数global_step大于或等于20000步时,学习率值为0.1。 + +参数: + - **boundaries** (list) - 指定衰减的步数边界。列表的数据元素为Python int类型。 + - **values** (list) - 备选学习率列表。数据元素类型为Python float的列表。与边界值列表有对应的关系。 + - **begin** (int) – 起始步,即以上举例描述中global_step的初始化值。 + - **step** (int,可选) – 步大小,即以上举例描述中global_step每步的递增值。默认值为1。 + - **dtype** (str,可选) – 初始化学习率变量的数据类型,可以为"float32", "float64"。默认值为"float32"。 + +返回: 无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + boundaries = [10000, 20000] + values = [1.0, 0.5, 0.1] + with fluid.dygraph.guard(): + emb = fluid.dygraph.Embedding( [10, 10] ) + optimizer = fluid.optimizer.SGD( + learning_rate=fluid.dygraph.PiecewiseDecay(boundaries, values, 0), + parameter_list = emb.parameters() ) + + + + + diff --git a/doc/paddle/api/paddle/fluid/dygraph/learning_rate_scheduler/PolynomialDecay_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/learning_rate_scheduler/PolynomialDecay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c48d34887b8c924a08d0ada219f74e0fddb87e9d --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/learning_rate_scheduler/PolynomialDecay_cn.rst @@ -0,0 +1,61 @@ +.. _cn_api_fluid_dygraph_PolynomialDecay: + +PolynomialDecay +------------------------------- + + +.. py:class:: paddle.fluid.dygraph.PolynomialDecay(learning_rate, decay_steps, end_learning_rate=0.0001, power=1.0, cycle=False, begin=0, step=1, dtype='float32') + + + + +该接口提供学习率按多项式衰减的功能。通过多项式衰减函数,使得学习率值逐步从初始的 ``learning_rate``,衰减到 ``end_learning_rate`` 。 + +计算方式如下。 + +若cycle为True,则计算公式为: + +.. math:: + + decay\_steps &= decay\_steps * math.ceil(\frac{global\_step}{decay\_steps}) \\ + decayed\_learning\_rate &= (learning\_rate-end\_learning\_rate)*(1-\frac{global\_step}{decay\_steps})^{power}+end\_learning\_rate + +若cycle为False,则计算公式为: + +.. math:: + + global\_step &= min(global\_step, decay\_steps) \\ + decayed\_learning\_rate &= (learning\_rate-end\_learning\_rate)*(1-\frac{global\_step}{decay\_steps})^{power}+end\_learning\_rate + +式中, + +- :math:`decayed\_learning\_rate` : 衰减后的学习率。 +式子中各参数详细介绍请看参数说明。 + +参数: + - **learning_rate** (Variable|float32) - 初始学习率。如果类型为Variable,则为shape为[1]的Tensor,数据类型为float32或float64;也可以是python的float类型。 + - **decay_steps** (int) - 衰减步数。必须是正整数,该参数确定衰减周期。 + - **end_learning_rate** (float,可选) - 最小的最终学习率。默认值为0.0001。 + - **power** (float,可选) - 多项式的幂。默认值为1.0。 + - **cycle** (bool,可选) - 学习率下降后是否重新上升。若为True,则学习率衰减到最低学习率值时,会出现上升。若为False,则学习率曲线则单调递减。默认值为False。 + - **begin** (int,可选) – 起始步,即以上运算式子中global_step的初始化值。默认值为0。 + - **step** (int,可选) – 步大小,即以上运算式子中global_step的递增值。默认值为1。 + - **dtype** (str,可选)– 初始化学习率变量的数据类型,可以为"float32", "float64"。默认值为"float32"。 + +返回: 无 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + start_lr = 0.01 + total_step = 5000 + end_lr = 0 + with fluid.dygraph.guard(): + emb = fluid.dygraph.Embedding( [10, 10]) + optimizer = fluid.optimizer.SGD( + learning_rate = fluid.dygraph.PolynomialDecay( + start_lr, total_step, end_lr, power=1.0), + parameter_list = emb.parameters()) diff --git a/doc/paddle/api/paddle/fluid/dygraph/parallel/DataParallel_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/parallel/DataParallel_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ff0d6821ec85dd25b95d6bc80a4811f0756fcd98 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/dygraph/parallel/DataParallel_cn.rst @@ -0,0 +1,207 @@ +.. _cn_api_fluid_dygraph_DataParallel: + +DataParallel +------------ + +.. py:class:: paddle.fluid.dygraph.DataParallel(layers, strategy) + + +通过数据并行模式执行动态图模型。 + +目前,``DataParallel`` 仅支持以多进程的方式执行动态图模型。 + +支持两种使用方式: + +1. 使用 ``paddle.distributed.spawn`` 方法启动,例如: + + ``python demo.py`` (spawn need to be called in ``__main__`` method) + +2. 使用 ``paddle.distributed.launch`` 方法启动,例如: + +``python -m paddle.distributed.launch –selected_gpus=0,1 demo.py`` + +其中 ``demo.py`` 脚本的代码可以是下面的示例代码。 + +参数: + - **Layer** (Layer) - 需要通过数据并行方式执行的模型。 + - **strategy** (ParallelStrategy,可选) - (deprecated) 数据并行的策略,包括并行执行的环境配置。默认为None。 + +返回:支持数据并行的 ``Layer`` + +返回类型:Layer实例 + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.nn as nn + import paddle.optimizer as opt + import paddle.distributed as dist + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear1 = nn.Linear(10, 10) + self._linear2 = nn.Linear(10, 1) + + def forward(self, x): + return self._linear2(self._linear1(x)) + + def train(): + # 1. enable dynamic mode + paddle.disable_static() + + # 2. initialize parallel environment + dist.init_parallel_env() + + # 3. create data parallel layer & optimizer + layer = LinearNet() + dp_layer = paddle.DataParallel(layer) + + loss_fn = nn.MSELoss() + adam = opt.Adam( + learning_rate=0.001, parameters=dp_layer.parameters()) + + # 4. run layer + inputs = paddle.randn([10, 10], 'float32') + outputs = dp_layer(inputs) + labels = paddle.randn([10, 1], 'float32') + loss = loss_fn(outputs, labels) + + loss = dp_layer.scale_loss(loss) + loss.backward() + dp_layer.apply_collective_grads() + + adam.step() + adam.clear_grad() + + if __name__ == '__main__': + # 1. start by ``paddle.distributed.spawn`` (default) + dist.spawn(train, nprocs=2) + # 2. start by ``paddle.distributed.launch`` + # train() + +.. py:method:: scale_loss(loss) + +缩放模型损失值 ``loss`` 。在数据并行模式中,损失值 ``loss`` 需要根据并行训练进程的数目进行缩放。 + +如果不在数据并行模式下,会直接返回原 ``loss`` 。 + +参数: + - **loss** (Variable) - 当前模型的损失值。 + +返回:缩放后的损失值 ``loss`` + +返回类型:Variable + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import paddle.optimizer as opt + import paddle.distributed as dist + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear1 = nn.Linear(10, 10) + self._linear2 = nn.Linear(10, 1) + + def forward(self, x): + return self._linear2(self._linear1(x)) + + def train(): + # 1. enable dynamic mode + paddle.disable_static() + + # 2. initialize parallel environment + dist.init_parallel_env() + + # 3. create data parallel layer & optimizer + layer = LinearNet() + dp_layer = paddle.DataParallel(layer) + + loss_fn = nn.MSELoss() + adam = opt.Adam( + learning_rate=0.001, parameters=dp_layer.parameters()) + + # 4. run layer + inputs = paddle.randn([10, 10], 'float32') + outputs = dp_layer(inputs) + labels = paddle.randn([10, 1], 'float32') + loss = loss_fn(outputs, labels) + + loss = dp_layer.scale_loss(loss) + loss.backward() + dp_layer.apply_collective_grads() + + adam.step() + adam.clear_grad() + + if __name__ == '__main__': + # 1. start by ``paddle.distributed.spawn`` (default) + dist.spawn(train, nprocs=2) + # 2. start by ``paddle.distributed.launch`` + # train() + + +.. py:method:: apply_collective_grads() + +AllReduce(规约)参数的梯度值。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import paddle.optimizer as opt + import paddle.distributed as dist + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear1 = nn.Linear(10, 10) + self._linear2 = nn.Linear(10, 1) + + def forward(self, x): + return self._linear2(self._linear1(x)) + + def train(): + # 1. enable dynamic mode + paddle.disable_static() + + # 2. initialize parallel environment + dist.init_parallel_env() + + # 3. create data parallel layer & optimizer + layer = LinearNet() + dp_layer = paddle.DataParallel(layer) + + loss_fn = nn.MSELoss() + adam = opt.Adam( + learning_rate=0.001, parameters=dp_layer.parameters()) + + # 4. run layer + inputs = paddle.randn([10, 10], 'float32') + outputs = dp_layer(inputs) + labels = paddle.randn([10, 1], 'float32') + loss = loss_fn(outputs, labels) + + loss = dp_layer.scale_loss(loss) + loss.backward() + dp_layer.apply_collective_grads() + + adam.step() + adam.clear_grad() + + if __name__ == '__main__': + # 1. start by ``paddle.distributed.spawn`` (default) + dist.spawn(train, nprocs=2) + # 2. start by ``paddle.distributed.launch`` + # train() diff --git a/doc/paddle/api/paddle/fluid/evaluator/ChunkEvaluator_cn.rst b/doc/paddle/api/paddle/fluid/evaluator/ChunkEvaluator_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c289db61aed83e2e4a33f8603e70c280a63b2fb2 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/evaluator/ChunkEvaluator_cn.rst @@ -0,0 +1,69 @@ +.. _cn_api_fluid_metrics_ChunkEvaluator: + +ChunkEvaluator +------------------------------- + +.. py:class:: paddle.fluid.metrics.ChunkEvaluator(name=None) + + + +该接口使用mini-batch的chunk_eval累计的counter numbers,来计算准确率、召回率和F1值。ChunkEvaluator有三个状态num_infer_chunks,num_label_chunks和num_correct_chunks,分别对应语块数目、标签中的语块数目、正确识别的语块数目。对于chunking的基础知识,请参考 https://www.aclweb.org/anthology/N01-1025 。ChunkEvalEvaluator计算块检测(chunk detection)的准确率,召回率和F1值,支持IOB, IOE, IOBES和IO标注方案。 + +参数: + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:初始化后的 ``ChunkEvaluator`` 对象 + +返回类型:ChunkEvaluator + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + + # 初始化chunck-level的评价管理。 + metric = fluid.metrics.ChunkEvaluator() + + # 假设模型预测10个chuncks,其中8个为正确,且真值有9个chuncks。 + num_infer_chunks = 10 + num_label_chunks = 9 + num_correct_chunks = 8 + + metric.update(num_infer_chunks, num_label_chunks, num_correct_chunks) + numpy_precision, numpy_recall, numpy_f1 = metric.eval() + + print("precision: %.2f, recall: %.2f, f1: %.2f" % (numpy_precision, numpy_recall, numpy_f1)) + + # 下一个batch,完美地预测了3个正确的chuncks。 + num_infer_chunks = 3 + num_label_chunks = 3 + num_correct_chunks = 3 + + metric.update(num_infer_chunks, num_label_chunks, num_correct_chunks) + numpy_precision, numpy_recall, numpy_f1 = metric.eval() + + print("precision: %.2f, recall: %.2f, f1: %.2f" % (numpy_precision, numpy_recall, numpy_f1)) + +.. py:method:: update(num_infer_chunks, num_label_chunks, num_correct_chunks) + +该函数使用输入的(num_infer_chunks, num_label_chunks, num_correct_chunks)来累计更新ChunkEvaluator对象的对应状态,更新方式如下: + + .. math:: + \\ \begin{array}{l}{\text { self. num_infer_chunks }+=\text { num_infer_chunks }} \\ {\text { self. num_Label_chunks }+=\text { num_label_chunks }} \\ {\text { self. num_correct_chunks }+=\text { num_correct_chunks }}\end{array} \\ + +参数: + - **num_infer_chunks** (int|numpy.array) – 给定mini-batch的语块数目。 + - **num_label_chunks** (int|numpy.array) - 给定mini-batch的标签中的语块数目。 + - **num_correct_chunks** (int|numpy.array)— 给定mini-batch的正确识别的语块数目。 + +返回:无 + +.. py:method:: eval() + +该函数计算并返回准确率,召回率和F1值。 + +返回:准确率,召回率和F1值 + +返回类型:float + diff --git a/doc/paddle/api/paddle/fluid/evaluator/DetectionMAP_cn.rst b/doc/paddle/api/paddle/fluid/evaluator/DetectionMAP_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..44d3700e5535c485a78361ba82c97c7c5b81ca87 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/evaluator/DetectionMAP_cn.rst @@ -0,0 +1,86 @@ +.. _cn_api_fluid_metrics_DetectionMAP: + +DetectionMAP +------------------------------- + +.. py:class:: paddle.fluid.metrics.DetectionMAP(input, gt_label, gt_box, gt_difficult=None, class_num=None, background_label=0, overlap_threshold=0.5, evaluate_difficult=True, ap_version='integral') + + + + +该OP用于计算检测网络的平均精度(mAP)。 mAP是衡量object detectors精度的指标,比如 Faster R-CNN,SSD等。它不同于召回率,它是最大精度的平均值。 + +通常步骤如下: + +1. 根据检测器中的输入和label,计算True Positive(TP)真正例 和 False Positive(FP)假正例 +2. 计算map,支持 ``11 point`` 和 ``integral`` 模式 + +请从以下文章中获取更多信息: + - https://sanchom.wordpress.com/tag/average-precision/ + - https://arxiv.org/abs/1512.0232 + +参数: + - **input** (Variable) – detection的输出结果,一个 shape=[M, 6] 的 LoDtensor。布局为[label, confidence, xmin, ymin, xmax, ymax],label为类别标签,confidence为置信度,xmin,ymin为检测框左上点坐标,xmax,ymax为检测框右下点坐标,数据类型为float32或float64。 + - **gt_label** (Variable) – ground truth label 的索引,它是一个形状为[N, 1]的LoDtensor,数据类型为float32或float64。 + - **gt_box** (Variable) – ground truth bounds box (bbox),是一个具有形状的LoD张量[N, 4]。布局是[xmin, ymin, xmax, ymax],数据类型为float32或float64。 + - **gt_difficult** (Variable|None, 可选) – 指定这个ground truth是否是一个difficult bounding bbox,它可以是一个 shape=[N, 1]的LoDTensor,也可以不被指定。默认设置为None,表示所有的ground truth标签都不是difficult bbox,数据类型为float32或float64。 + - **class_num** (int) – 检测类别的数目。 + - **background_label** (int) – 背景标签的索引,背景标签将被忽略。如果设置为-1,则所有类别将被考虑,默认为0。 + - **overlap_threshold** (float) – 判断真假阳性的阈值,默认为0.5。 + - **evaluate_difficult** (bool) – 是否考虑 difficult ground truth 进行评价,默认为 True。当 gt_difficult 为 None 时,这个参数不起作用。 + - **ap_version** (str) – 平均精度的计算方法,必须是 "integral" 或 "11point"。详情请查看 https://sanchom.wordpress.com/tag/average-precision/。 其中,11point为:11-point 插值平均精度。积分: precision-recall曲线的自然积分。 + +返回:变量(Variable) 计算mAP的结果,其中数据类型为float32或float64。 + +返回类型:变量(Variable) + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + batch_size = -1 # 可以为任意大小 + image_boxs_num = 10 + bounding_bboxes_num = 21 + + pb = fluid.data(name='prior_box', shape=[image_boxs_num, 4], + dtype='float32') + + pbv = fluid.data(name='prior_box_var', shape=[image_boxs_num, 4], + dtype='float32') + + loc = fluid.data(name='target_box', shape=[batch_size, bounding_bboxes_num, 4], + dtype='float32') + + scores = fluid.data(name='scores', shape=[batch_size, bounding_bboxes_num, image_boxs_num], + dtype='float32') + + nmsed_outs = fluid.layers.detection_output(scores=scores, + loc=loc, prior_box=pb, prior_box_var=pbv) + + gt_box = fluid.data(name="gt_box", shape=[batch_size, 4], dtype="float32") + gt_label = fluid.data(name="gt_label", shape=[batch_size, 1], dtype="float32") + difficult = fluid.data(name="difficult", shape=[batch_size, 1], dtype="float32") + + exe = fluid.Executor(fluid.CUDAPlace(0)) + map_evaluator = fluid.metrics.DetectionMAP(nmsed_outs, gt_label, gt_box, difficult, class_num = 3) + cur_map, accum_map = map_evaluator.get_map_var() + + + +.. py:method:: get_map_var() + +返回:当前 mini-batch 的 mAP 变量和不同 mini-batch 的 mAP 累加和 + +.. py:method:: reset(executor, reset_program=None) + +在指定的 batch 结束或者用户指定的开始时重置度量状态。 + +参数: + - **executor** (Executor) – 执行reset_program的执行程序 + - **reset_program** (Program|None, 可选) – 单个program 的 reset 过程。如果设置为 None,将创建一个 program + + + diff --git a/doc/paddle/api/paddle/fluid/evaluator/EditDistance_cn.rst b/doc/paddle/api/paddle/fluid/evaluator/EditDistance_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9ffd2a0165d7e718d1cf08d15be5e90f64cf916b --- /dev/null +++ b/doc/paddle/api/paddle/fluid/evaluator/EditDistance_cn.rst @@ -0,0 +1,68 @@ +.. _cn_api_fluid_metrics_EditDistance: + +EditDistance +------------------------------- + +.. py:class:: paddle.fluid.metrics.EditDistance(name) + + + + +用于管理字符串的编辑距离。编辑距离是通过计算将一个字符串转换为另一个字符串所需的最小编辑操作数(添加、删除或替换)来量化两个字符串(例如单词)彼此不相似的程度一种方法。 参考 https://en.wikipedia.org/wiki/Edit_distance。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # 假设batch_size为128 + batch_size = 128 + + # 初始化编辑距离管理器 + distance_evaluator = fluid.metrics.EditDistance("EditDistance") + # 生成128个序列对间的编辑距离,此处的最大距离是10 + edit_distances_batch0 = np.random.randint(low = 0, high = 10, size = (batch_size, 1)) + seq_num_batch0 = batch_size + + distance_evaluator.update(edit_distances_batch0, seq_num_batch0) + avg_distance, wrong_instance_ratio = distance_evaluator.eval() + print("the average edit distance for batch0 is %.2f and the wrong instance ratio is %.2f " % (avg_distance, wrong_instance_ratio)) + edit_distances_batch1 = np.random.randint(low = 0, high = 10, size = (batch_size, 1)) + seq_num_batch1 = batch_size + + distance_evaluator.update(edit_distances_batch1, seq_num_batch1) + avg_distance, wrong_instance_ratio = distance_evaluator.eval() + print("the average edit distance for batch0 and batch1 is %.2f and the wrong instance ratio is %.2f " % (avg_distance, wrong_instance_ratio)) + + +.. py:method:: reset() + +清空存储结果。 + +参数:无 + +返回:无 + + +.. py:method:: update(distances, seq_num) + +更新存储结果 + +参数: + - **distances** – 一个形状为(batch_size, 1)的numpy.array,每个元素代表两个序列间的距离。 + - **seq_num** – 一个整型/浮点型值,代表序列对的数量。 + +返回:无 + +.. py:method:: eval() + +返回两个浮点数: +avg_distance:使用更新函数更新的所有序列对的平均距离。 +avg_instance_error:编辑距离不为零的序列对的比例。 + + + + + diff --git a/doc/paddle/api/paddle/fluid/executor/Executor_cn.rst b/doc/paddle/api/paddle/fluid/executor/Executor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2ff94bcd5c7d1309dbbff21ab0ed65afb91f1a6e --- /dev/null +++ b/doc/paddle/api/paddle/fluid/executor/Executor_cn.rst @@ -0,0 +1,271 @@ +.. _cn_api_fluid_executor: + +Executor +------------------------------- + + + +.. py:class:: paddle.fluid.Executor (place=None) + + + + +Executor支持单GPU、多GPU以及CPU运行。 + +参数: + - **place** (fluid.CPUPlace()|fluid.CUDAPlace(N)|None) – 该参数表示Executor执行所在的设备,这里的N为GPU对应的ID。当该参数为 `None` 时,PaddlePaddle会根据其安装版本设置默认的运行设备。当安装的Paddle为CPU版时,默认运行设置会设置成 `CPUPlace()` ,而当Paddle为GPU版时,默认运行设备会设置成 `CUDAPlace(0)` 。默认值为None。 + +返回:初始化后的 ``Executor`` 对象 + +返回类型:Executor + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.compiler as compiler + import numpy + import os + + # 显式设置运行设备 + # use_cuda = True + # place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + # exe = fluid.Executor(place) + + # 如果不显示设置运行设备,PaddlePaddle会设置默认运行设备 + exe = fluid.Executor() + + train_program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(train_program, startup_program): + data = fluid.layers.data(name='X', shape=[1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) + + # 仅运行一次startup program + # 不需要优化/编译这个startup program + startup_program.random_seed=1 + exe.run(startup_program) + + # 无需编译,直接运行main program + x = numpy.random.random(size=(10, 1)).astype('float32') + loss_data, = exe.run(train_program, + feed={"X": x}, + fetch_list=[loss.name]) + + # 另一种方法是,编译这个main program然后运行。 + # 参考CompiledProgram以获取更多信息。 + # 注意:如果你使用CPU运行程序,需要具体设置CPU_NUM, + # 否则fluid会把逻辑核的所有数目设为CPU_NUM, + # 在这种情况下,输入的batch size应大于CPU_NUM, + # 否则程序会异常中断。 + + # 显式设置运行设备 + # if not use_cuda: + # os.environ['CPU_NUM'] = str(2) + + # 未显示设置运行设备且安装的Paddle为CPU版本 + os.environ['CPU_NUM'] = str(2) + + compiled_prog = compiler.CompiledProgram( + train_program).with_data_parallel( + loss_name=loss.name) + loss_data, = exe.run(compiled_prog, + feed={"X": x}, + fetch_list=[loss.name]) + +.. py:method:: close() + + +关闭执行器。该接口主要用于对于分布式训练,调用该接口后不可以再使用该执行器。该接口会释放在PServers上和目前Trainer有关联的资源。 + +返回:无 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + + cpu = fluid.CPUPlace() + exe = fluid.Executor(cpu) + # 执行训练或测试过程 + exe.close() + + +.. py:method:: run(program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_name='fetch', scope=None, return_numpy=True, use_program_cache=False, return_merged=True) + +执行指定的Program或者CompiledProgram。需要注意的是,执行器会执行Program或CompiledProgram中的所有算子,而不会根据fetch_list对Program或CompiledProgram中的算子进行裁剪。同时,需要传入运行该模型用到的scope,如果没有指定scope,执行器将使用全局scope,即fluid.global_scope()。 + +参数: + - **program** (Program|CompiledProgram) – 该参数为被执行的Program或CompiledProgram,如果未提供该参数,即该参数为None,在该接口内,main_program将被设置为fluid.default_main_program()。默认为:None。 + - **feed** (list|dict) – 该参数表示模型的输入变量。如果是单卡训练,``feed`` 为 ``dict`` 类型,如果是多卡训练,参数 ``feed`` 可以是 ``dict`` 或者 ``list`` 类型变量,如果该参数类型为 ``dict`` ,feed中的数据将会被分割(split)并分送给多个设备(CPU/GPU),即输入数据被均匀分配到不同设备上;如果该参数类型为 ``list`` ,则列表中的各个元素都会直接分别被拷贝到各设备中。默认为:None。 + - **fetch_list** (list) – 该参数表示模型运行之后需要返回的变量。默认为:None。 + - **feed_var_name** (str) – 该参数表示数据输入算子(feed operator)的输入变量名称。默认为:"feed"。 + - **fetch_var_name** (str) – 该参数表示结果获取算子(fetch operator)的输出变量名称。默认为:"fetch"。 + - **scope** (Scope) – 该参数表示执行当前program所使用的作用域,用户可以为不同的program指定不同的作用域。默认值:fluid.global_scope()。 + - **return_numpy** (bool) – 该参数表示是否将返回的计算结果(fetch list中指定的变量)转化为numpy;如果为False,则每个变量返回的类型为LoDTensor,否则返回变量的类型为numpy.ndarray。默认为:True。 + - **use_program_cache** (bool) – 该参数表示是否对输入的Program进行缓存。如果该参数为True,在以下情况时,模型运行速度可能会更快:输入的program为 ``fluid.Program`` ,并且模型运行过程中,调用该接口的参数(program、 feed变量名和fetch_list变量)名始终不变。默认为:False。 + - **return_merged** (bool) – 该参数表示是否按照执行设备维度将返回的计算结果(fetch list中指定的变量)进行合并。如果 ``return_merged`` 设为False,返回值类型是一个Tensor的二维列表( ``return_numpy`` 设为Fasle时)或者一个numpy.ndarray的二维列表( ``return_numpy`` 设为True时)。如果 ``return_merged`` 设为True,返回值类型是一个Tensor的一维列表( ``return_numpy`` 设为Fasle时)或者一个numpy.ndarray的一维列表( ``return_numpy`` 设为True时)。更多细节请参考示例代码2。如果返回的计算结果是变长的,请设置 ``return_merged`` 为False,即不按照执行设备维度合并返回的计算结果。该参数的默认值为True,但这仅是为了兼容性考虑,在未来的版本中默认值可能会更改为False。 + +返回:返回fetch_list中指定的变量值 + +返回类型:List + +.. note:: + 1. 如果是多卡训练,并且feed参数为dict类型,输入数据将被均匀分配到不同的卡上,例如:使用2块GPU训练,输入样本数为3,即[0, 1, 2],经过拆分之后,GPU0上的样本数为1,即[0],GPU1上的样本数为2,即[1, 2]。如果样本数少于设备数,程序会报错,因此运行模型时,应额外注意数据集的最后一个batch的样本数是否少于当前可用的CPU核数或GPU卡数,如果是少于,建议丢弃该batch。 + 2. 如果可用的CPU核数或GPU卡数大于1,则fetch出来的结果为不同设备上的相同变量值(fetch_list中的变量)在第0维拼接在一起。 + + +**示例代码1** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + + #首先创建执行引擎 + place = fluid.CPUPlace() # fluid.CUDAPlace(0) + exe = fluid.Executor(place) + + data = fluid.layers.data(name='X', shape=[1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + adam = fluid.optimizer.Adam() + adam.minimize(loss) + + #仅运行startup程序一次 + exe.run(fluid.default_startup_program()) + + x = numpy.random.random(size=(10, 1)).astype('float32') + outs = exe.run(feed={'X': x}, + fetch_list=[loss.name]) + + +**示例代码2** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + # 创建Executor对象 + place = fluid.CUDAPlace(0) + exe = fluid.Executor(place) + data = fluid.data(name='X', shape=[None, 1], dtype='float32') + class_dim = 2 + prediction = fluid.layers.fc(input=data, size=class_dim) + loss = fluid.layers.mean(prediction) + adam = fluid.optimizer.Adam() + adam.minimize(loss) + # 运行且仅运行一次startup program + exe.run(fluid.default_startup_program()) + build_strategy = fluid.BuildStrategy() + binary = fluid.CompiledProgram(fluid.default_main_program()).with_data_parallel( + loss_name=loss.name, build_strategy=build_strategy) + batch_size = 6 + x = np.random.random(size=(batch_size, 1)).astype('float32') + # 1) 设置 return_merged 参数为False以获取不合并的计算结果: + unmerged_prediction, = exe.run(binary, feed={'X': x}, + fetch_list=[prediction.name], + return_merged=False) + # 如果用户使用两个GPU卡来运行此python代码示例,输出结果将为(2, 3, class_dim)。 + # 输出结果中第一个维度值代表所使用的GPU卡数,而第二个维度值代表batch_size和所使用 + # 的GPU卡数之商。 + print("The unmerged prediction shape: {}".format(np.array(unmerged_prediction).shape)) + print(unmerged_prediction) + # 2) 设置 return_merged 参数为True以获取合并的计算结果: + merged_prediction, = exe.run(binary, feed={'X': x}, + fetch_list=[prediction.name], + return_merged=True) + # 如果用户使用两个GPU卡来运行此python代码示例,输出结果将为(6, class_dim)。输出结果 + # 中第一个维度值代表batch_size值。 + print("The merged prediction shape: {}".format(np.array(merged_prediction).shape)) + print(merged_prediction) + # 输出: + # The unmerged prediction shape: (2, 3, 2) + # [array([[-0.37620035, -0.19752218], + # [-0.3561043 , -0.18697084], + # [-0.24129935, -0.12669306]], dtype=float32), array([[-0.24489994, -0.12858354], + # [-0.49041364, -0.25748932], + # [-0.44331917, -0.23276259]], dtype=float32)] + # The merged prediction shape: (6, 2) + # [[-0.37789783 -0.19921964] + # [-0.3577645 -0.18863106] + # [-0.24274671 -0.12814042] + # [-0.24635398 -0.13003758] + # [-0.49232286 -0.25939852] + # [-0.44514108 -0.2345845 ]] + + +.. py:method:: infer_from_dataset(program=None, dataset=None, scope=None, thread=0, debug=False, fetch_list=None, fetch_info=None, print_period=100) + +infer_from_dataset的文档与train_from_dataset几乎完全相同,只是在分布式训练中,推进梯度将在infer_from_dataset中禁用。 infer_from_dataset()可以非常容易地用于多线程中的评估。 + +参数: + - **program** (Program|CompiledProgram) – 需要执行的program,如果没有给定那么默认使用default_main_program (未编译的) + - **dataset** (paddle.fluid.Dataset) – 在此函数外创建的数据集,用户应当在调用函数前提供完整定义的数据集。必要时请检查Dataset文件。默认为None + - **scope** (Scope) – 执行这个program的域,用户可以指定不同的域。默认为全局域 + - **thread** (int) – 用户想要在这个函数中运行的线程数量。线程的实际数量为min(Dataset.thread_num, thread),如果thread > 0,默认为0 + - **debug** (bool) – 是否开启debug模式,默认为False + - **fetch_list** (Variable List) – 返回变量列表,每个变量都会在训练过程中被打印出来,默认为None + - **fetch_info** (String List) – 每个变量的打印信息,默认为None + - **print_period** (int) – 每两次打印之间间隔的mini-batches的数量,默认为100 + +返回:None + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + place = fluid.CPUPlace() # 使用GPU时可设置place = fluid.CUDAPlace(0) + exe = fluid.Executor(place) + x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64") + y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1) + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_use_var([x, y]) + dataset.set_thread(1) + filelist = [] # 您可以设置您自己的filelist,如filelist = ["dataA.txt"] + dataset.set_filelist(filelist) + exe.run(fluid.default_startup_program()) + exe.infer_from_dataset(program=fluid.default_main_program(),dataset=dataset) + + +.. py:method:: train_from_dataset(program=None, dataset=None, scope=None, thread=0, debug=False, fetch_list=None, fetch_info=None, print_period=100) + +从预定义的数据集中训练。 数据集在paddle.fluid.dataset中定义。 给定程序(或编译程序),train_from_dataset将使用数据集中的所有数据样本。 输入范围可由用户给出。 默认情况下,范围是global_scope()。训练中的线程总数是thread。 训练中使用的线程数将是数据集中threadnum的最小值,同时也是此接口中线程的值。 可以设置debug,以便执行器显示所有算子的运行时间和当前训练任务的吞吐量。 + +注意:train_from_dataset将销毁每次运行在executor中创建的所有资源。 + +参数: + - **program** (Program|CompiledProgram) – 需要执行的program,如果没有给定那么默认使用default_main_program (未编译的) + - **dataset** (paddle.fluid.Dataset) – 在此函数外创建的数据集,用户应当在调用函数前提供完整定义的数据集。必要时请检查Dataset文件。默认为None + - **scope** (Scope) – 执行这个program的域,用户可以指定不同的域。默认为全局域 + - **thread** (int) – 用户想要在这个函数中运行的线程数量。线程的实际数量为min(Dataset.thread_num, thread),如果thread > 0,默认为0 + - **debug** (bool) – 是否开启debug模式,默认为False + - **fetch_list** (Variable List) – 返回变量列表,每个变量都会在训练过程中被打印出来,默认为None + - **fetch_info** (String List) – 每个变量的打印信息,默认为None + - **print_period** (int) – 每两次打印之间间隔的mini-batches的数量,默认为100 + +返回:None + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + + place = fluid.CPUPlace() # 通过设置place = fluid.CUDAPlace(0)使用GPU + exe = fluid.Executor(place) + x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64") + y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1) + dataset = fluid.DatasetFactory().create_dataset() + dataset.set_use_var([x, y]) + dataset.set_thread(1) + filelist = [] # 您可以设置您自己的filelist,如filelist = ["dataA.txt"] + dataset.set_filelist(filelist) + exe.run(fluid.default_startup_program()) + exe.train_from_dataset(program=fluid.default_main_program(), + dataset=dataset) diff --git a/doc/paddle/api/paddle/fluid/executor/global_scope_cn.rst b/doc/paddle/api/paddle/fluid/executor/global_scope_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8991f88933cd52bdeb164e1170376e9534110e17 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/executor/global_scope_cn.rst @@ -0,0 +1,27 @@ +.. _cn_api_fluid_executor_global_scope: + +global_scope +------------------------------- + + +.. py:function:: paddle.fluid.global_scope() + + + + +获取全局/默认作用域实例。很多API使用默认 ``global_scope`` ,例如 ``Executor.run`` 等。 + +返回:全局/默认作用域实例 + +返回类型:Scope + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + + fluid.global_scope().var("data").get_tensor().set(numpy.ones((1, 2)), fluid.CPUPlace()) + data = numpy.array(fluid.global_scope().find_var("data").get_tensor()) + print(data) # [[1. 1.]] diff --git a/doc/paddle/api/paddle/fluid/executor/scope_guard_cn.rst b/doc/paddle/api/paddle/fluid/executor/scope_guard_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9bb0538f7c3d29be7750383ffe969c64df80f126 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/executor/scope_guard_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_fluid_executor_scope_guard: + +scope_guard +------------------------------- + + +.. py:function:: paddle.fluid.executor.scope_guard (scope) + + + + + +该接口通过 python 的 ``with`` 语句切换作用域(scope)。 +作用域记录了变量名和变量 ( :ref:`api_guide_Variable` ) 之间的映射关系,类似于编程语言中的大括号。 +如果未调用此接口,所有的变量和变量名都会被记录在默认的全局作用域中。 +当用户需要创建同名的变量时,如果不希望同名的变量映射关系被覆盖,则需要通过该接口切换作用域。 +通过 ``with`` 语句切换后,``with`` 语句块中所有创建的变量都将分配给新的作用域。 + +参数: + - **scope** (Scope) - 新的作用域。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + + new_scope = fluid.Scope() + with fluid.scope_guard(new_scope): + fluid.global_scope().var("data").get_tensor().set(numpy.ones((1, 2)), fluid.CPUPlace()) + data = numpy.array(new_scope.find_var("data").get_tensor()) + print(data) # [[1. 1.]] diff --git a/doc/paddle/api/paddle/fluid/framework/Program_cn.rst b/doc/paddle/api/paddle/fluid/framework/Program_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2a611c7b0e913f8ed6ceb13ad88566c101dc8ef6 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/framework/Program_cn.rst @@ -0,0 +1,450 @@ +.. _cn_api_fluid_Program: + +Program +------------------------------- + +.. py:class:: paddle.fluid.Program + + + + +**注意:默认情况下,Paddle Fluid内部默认含有** :ref:`cn_api_fluid_default_startup_program` **和** :ref:`cn_api_fluid_default_main_program` **,它们共享参数。** :ref:`cn_api_fluid_default_startup_program` **只运行一次来初始化参数,** :ref:`cn_api_fluid_default_main_program` **在每个mini batch中运行并更新权重。** + +Program是Paddle Fluid对于计算图的一种静态描述,使用Program的构造函数可以创建一个Program。Program中包括至少一个 :ref:`api_guide_Block` ,当 :ref:`api_guide_Block` 中存在条件选择的控制流OP(例如 :ref:`cn_api_fluid_layers_While` 等)时,该Program将会含有嵌套着的 :ref:`api_guide_Block` 即控制流外部的 :ref:`api_guide_Block` 将包含着控制流内部的 :ref:`api_guide_Block` ,而嵌套的 :ref:`api_guide_Block` 的元素访问控制将由具体的控制流OP来决定。关于Program具体的结构和包含的类型请参阅 `framework.proto `_ +。 + +一个Program的集合通常包含初始化程序(startup_program)与主程序(main_program),初始化程序是一个包含一些初始化工作的Program,主程序将会包含用来训练的网络结构和变量,在使用同一个 :ref:`api_guide_executor` 执行时他们会共享初始化工作的结果,例如初始化的参数。一个Program的集合可以被用来测试或者训练,被用来训练时, ``Paddle Fluid`` 将会利用所有用户使用的OP和变量来搭建一个训练网络,被用来测试时, 可以通过调用Program相关的接口例如:`clone` 剪去一些与测试无关的OP和变量,比如反向传播的OP和变量。 + + +返回:创建的空的Program + +返回值类型:Program + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + main_program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(main_program=main_program, startup_program=startup_program): + x = fluid.layers.data(name="x", shape=[-1, 784], dtype='float32') + y = fluid.layers.data(name="y", shape=[-1, 1], dtype='int32') + z = fluid.layers.fc(name="fc", input=x, size=10, act="relu") + + # start_up program here will share fc's weight with main program + print("main program is: {}".format(main_program)) + + print("start up program is: {}".format(startup_program)) + + +.. py:method:: to_string(throw_on_error, with_details=False) + +将Program转换为字符串 + +参数: + - **throw_on_error** (bool) - 是否在没有设置必需字段时抛出异常。 + - **with_details** (bool) - 值为true时,打印更多关于变量和参数的信息,如trainable, optimize_attr等 + +返回: 将Program转换为字符串 + +返回类型: str + +抛出异常: ``ValueError`` - 当 ``throw_on_error == true`` ,当没有设置任何必需的字段时,抛出 ``ValueError`` 。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + prog = fluid.default_main_program() + x = fluid.layers.data(name="X", shape=[2,3], dtype="float32", append_batch_size=False) + pred = fluid.layers.fc(x, size=3) + prog_string = prog.to_string(throw_on_error=True, with_details=False) + prog_string_with_details = prog.to_string(throw_on_error=False, with_details=True) + print("program string without detail: {}".format(prog_string)) + print("program string with detail: {}".format(prog_string_with_details)) + +.. py:method:: clone(for_test=False) + +**注意:** + **1.** ``Program.clone()`` **方法不会克隆例如** :ref:`cn_api_fluid_io_DataLoader` **这样的数据读取相关的部分,这可能会造成的数据读取部分在克隆后丢失** + + **2. 此API当** ``for_test=True`` **时将会裁剪部分OP和变量。为防止错误的裁剪,推荐在** :ref:`cn_api_fluid_backward_append_backward` **和执行优化器之前使用** ``clone(for_test=True)`` 。 + + +当 ``for_test=True`` 时创建一个新的、仅包含当前Program前向内容的Program。否则创建一个新的,和当前Program完全相同的Program + +有些OP,在训练和测试之间的行为是不同的,比如 :ref:`cn_api_fluid_layers_batch_norm` 。它们有一个属性 ``is_test`` 来控制行为。当 ``for_test=True`` 时,此方法将把它们的 ``is_test`` 属性更改为True。 + +- 克隆Program用于训练时,将 ``for_test`` 设置为False。 +- 克隆Program用于测试时,将 ``for_test`` 设置为True。虽然在这种情况下,如果在使用了优化器之后调用 ``clone`` 我们依旧会对Program当中反向执行以及优化器相关的内容进行自动裁剪,但是,我们强烈建议在使用优化器之前使用 ``clone`` 例如如果使用的是 :ref:`cn_api_fluid_optimizer_Momentum` 可以这样去使用: + +**代码示例** + + :: + + import paddle.fluid as fluid + img = fluid.layers.data(name='image', shape=[784]) + pred = fluid.layers.fc(input=img, size=10, act='relu') + loss = fluid.layers.mean(pred) + ## 我们推荐在使用 Optimizer前使用clone()接口 + test_program = fluid.default_main_program().clone(for_test=True) + optimizer = fluid.optimizer.Momentum(learning_rate=0.01, momentum=0.9) + optimizer.minimize(loss) + +参数: + - **for_test** (bool) – 取值为True时,clone方法内部会把operator的属性 ``is_test`` 设置为 True, 并裁剪反向OP和参数优化OP,默认值为False + +返回:当 ``for_test=True`` 时返回一个新的、仅包含当前Program前向内容的Program。否则返回一个新的,和当前Program完全相同的Program + +返回类型: Program + +**代码示例** + +注意,Program在clone后的顺序可能不同,这不会影响的训练或测试进程。在下面的示例中,我们提供了一个简单的方法print_prog(Program)来打印程序描述,以确保clone后仍能得到同样的打印结果: + +.. code-block:: python + + import paddle.fluid as fluid + import six + + + def print_prog(prog): + for name, value in sorted(six.iteritems(prog.block(0).vars)): + print(value) + for op in prog.block(0).ops: + print("op type is {}".format(op.type)) + print("op inputs are {}".format(op.input_arg_names)) + print("op outputs are {}".format(op.output_arg_names)) + for key, value in sorted(six.iteritems(op.all_attrs())): + if key not in ['op_callstack', 'op_role_var']: + print(" [ attrs: {}: {} ]".format(key, value)) + +1.克隆一个Program,示例代码如下。 + +.. code-block:: python + + import paddle.fluid as fluid + import six + + def print_prog(prog): + for name, value in sorted(six.iteritems(prog.block(0).vars)): + print(value) + for op in prog.block(0).ops: + print("op type is {}".format(op.type)) + print("op inputs are {}".format(op.input_arg_names)) + print("op outputs are {}".format(op.output_arg_names)) + for key, value in sorted(six.iteritems(op.all_attrs())): + if key not in ['op_callstack', 'op_role_var']: + print(" [ attrs: {}: {} ]".format(key, value)) + + train_program = fluid.Program() + startup_program = fluid.Program() + + # ``startup_program`` 被用来执行一些参数初始化工作 + # ``main_program`` 被用来容纳网络 + with fluid.program_guard(train_program, startup_program): + with fluid.unique_name.guard(): + img = fluid.layers.data(name='image', shape=[784]) + hidden = fluid.layers.fc(input=img, size=200, act='relu') + hidden = fluid.layers.dropout(hidden, dropout_prob=0.5) + loss = fluid.layers.cross_entropy( + input=fluid.layers.fc(hidden, size=10, act='softmax'), + label=fluid.layers.data(name='label', shape=[1], dtype='int64')) + avg_loss = fluid.layers.mean(loss) + test_program = train_program.clone(for_test=True) + print_prog(test_program) + + # 由于需要使训练和测试参数共享,我们需要使用训练的 ``startup_program`` + # 来代替测试用的 ``startup_program``, 尽管测试的 ``startup_program`` 里面什么也没有。 + + # 在Paddle Fluid中我们会通过同样的变量名来共享权重. + # 训练和测试程序的所有参数将会拥有同样的名字,这将会使训练和测试程序实现参数的共享, + # 所以我们使用训练程序的 ``startup_program`` .并且由于测试的 ``startup_program`` 什么也没有, + # 因此它是一个新的程序. + with fluid.program_guard(train_program, startup_program): + with fluid.unique_name.guard(): + sgd = fluid.optimizer.SGD(learning_rate=1e-3) + sgd.minimize(avg_loss) + +2.如果分别运行 train Program 和 test Program,则可以不使用clone。 + +.. code-block:: python + + import paddle.fluid as fluid + import six + + def print_prog(prog): + for name, value in sorted(six.iteritems(prog.block(0).vars)): + print(value) + for op in prog.block(0).ops: + print("op type is {}".format(op.type)) + print("op inputs are {}".format(op.input_arg_names)) + print("op outputs are {}".format(op.output_arg_names)) + for key, value in sorted(six.iteritems(op.all_attrs())): + if key not in ['op_callstack', 'op_role_var']: + print(" [ attrs: {}: {} ]".format(key, value)) + + def network(): + img = fluid.layers.data(name='image', shape=[784]) + hidden = fluid.layers.fc(input=img, size=200, act='relu') + hidden = fluid.layers.dropout(hidden, dropout_prob=0.5) + loss = fluid.layers.cross_entropy( + input=fluid.layers.fc(hidden, size=10, act='softmax'), + label=fluid.layers.data(name='label', shape=[1], dtype='int64')) + avg_loss = fluid.layers.mean(loss) + return avg_loss + + train_program_2 = fluid.Program() + startup_program_2 = fluid.Program() + test_program_2 = fluid.Program() + with fluid.program_guard(train_program_2, startup_program_2): + with fluid.unique_name.guard(): + avg_loss = network() + sgd = fluid.optimizer.SGD(learning_rate=1e-3) + sgd.minimize(avg_loss) + # 不使用测试阶段的启动程序 + with fluid.program_guard(test_program_2, startup_program_2): + with fluid.unique_name.guard(): + avg_loss = network() + print_prog(test_program_2) + +上边两个代码片段生成和打印的Program是一样的。 + +.. py:staticmethod:: parse_from_string(binary_str) + +通过对 `protobuf `_ 的反序列化,转换成Program + + +参数: + - **binary_str_type** (str) – `protobuf `_ 二进制字符串 + +返回:反序列化后的 Program + +返回类型:Program + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + startup_prog = fluid.Program() + main_prog = fluid.Program() + with fluid.program_guard(startup_prog, main_prog): + x = fluid.layers.data( + name='X', shape=[1000, 784], dtype='float32', append_batch_size=False) + + y = fluid.layers.data( + name='Y', shape=[784, 100], dtype='float32', append_batch_size=False) + + z = fluid.layers.mul(x=x, y=y) + + binary_str = fluid.default_main_program().desc.serialize_to_string() + prog_restored = fluid.default_main_program().parse_from_string(binary_str) + + print(fluid.default_main_program()) + print(prog_restored) + + # 这里打印出的两个Program应该是一模一样的 + +.. py:attribute:: num_blocks + +该Program中的 :ref:`api_guide_Block` 的个数 + +返回: 该Program中的 :ref:`api_guide_Block` 的个数 + +返回类型:int + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + prog = fluid.default_main_program() + num_blocks = prog.num_blocks + print(num_blocks) + + ## 1 + ## 当前Program中只有一个Block,即全局的Block + +.. py:attribute:: random_seed + +**注意:必须在相关OP被添加之前设置。** + +程序中随机运算符的默认随机种子。0意味着随机生成随机种子。 + +返回:该Program中当前正在使用的random seed + +返回类型:int64 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + prog = fluid.default_main_program() + random_seed = prog.random_seed + x_var = fluid.layers.data(name="X", shape=[3,3], dtype="float32", append_batch_size=False) + print(random_seed) + ## 0 + ## 默认的random seed是 0 + + # 这里我们必须要在fluid.layers.dropout之前设置random_seed + prog.random_seed = 1 + z_var = fluid.layers.dropout(x_var, 0.7) + + print(prog.random_seed) + ## 1 + ## 修改后random seed变成了 1 + +.. py:method:: global_block() + +获取该Program的第一个 :ref:`api_guide_Block` 。 + +返回:该Program的第一个 :ref:`api_guide_Block` + +返回类型::ref:`api_guide_Block` + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + prog = fluid.default_main_program() + gb_block = prog.global_block() + print(gb_block) + ## + ## idx: 0 + ## parent_idx: -1 + ## 打印出了当前全局Block的描述 + +.. py:method:: block(index) + +返回该Program中 , ``index`` 指定的 :ref:`api_guide_Block` 。 ``index`` 类型为int + +参数: + - **index** (int) - 需要获取的 :ref:`api_guide_Block` 的index + +返回: 该Program中index对应的那个 :ref:`api_guide_Block` + +返回类型: :ref:`api_guide_Block` + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + prog = fluid.default_main_program() + block_0 = prog.block(0) + print(block_0) + ## + ## idx: 0 + ## parent_idx: -1 + ## 打印出了0号Block的描述 + +.. py:method:: current_block() + +获取当前 :ref:`api_guide_Block` 。当前 :ref:`api_guide_Block` 是用来添加OP的。 + +返回: 该Program中用户当前所在的 :ref:`api_guide_Block` + +返回类型: :ref:`api_guide_Block` + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + prog = fluid.default_main_program() + current_blk = prog.current_block() + print(current_blk) + ## + ## idx: 0 + ## parent_idx: -1 + ## 打印出了当前Block的描述 + +.. py:method:: list_vars() + +获取当前Program中所有变量。返回值是一个可迭代对象(iterable object)。 + +返回: Generator 会yield每个Program中的变量 + +返回类型: iterable 的 :ref:`api_guide_Variable` + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + prog = fluid.default_main_program() + img = fluid.layers.data(name='img', shape=[1,28,28], dtype='float32') + label = fluid.layers.data(name='label', shape=[128,1], dtype='int64') + for var in prog.list_vars(): + print(var) + + # 这里将会打印出当前Program中所有的Variable + +.. py:method:: all_parameters() + +获取当前Program中所有的 :ref:`api_guide_parameter` 。返回值是一个列表。 + +返回: 一个包含当前Program中所有参数的列表。 + +返回类型: list[ :ref:`api_guide_parameter` ] + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + program = fluid.default_main_program() + data = fluid.data(name='x', shape=[None, 13], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) + + for param in program.all_parameters(): + print(param) + + # 这里将会打印出当前Program中所有的Parameters,在本例中,输出结果是: + # + # name: "fc_0.w_0" + # type { + # type: LOD_TENSOR + # lod_tensor { + # tensor { + # data_type: FP32 + # dims: 13 + # dims: 10 + # } + # } + # } + # + # persistable: true + # name: "fc_0.b_0" + # type { + # type: LOD_TENSOR + # lod_tensor { + # tensor { + # data_type: FP32 + # dims: 10 + # } + # } + # } + # persistable: true + # + # 这里print(param)将会打印出一个参数所有的属性,包括name,type和persistable, + # 你可以访问一个参数的指定属性,例如param.name,param.type \ No newline at end of file diff --git a/doc/paddle/api/paddle/fluid/framework/Variable_cn.rst b/doc/paddle/api/paddle/fluid/framework/Variable_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..83af840cce8702bc2af188a1cab30f3561049d98 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/framework/Variable_cn.rst @@ -0,0 +1,410 @@ +.. _cn_api_fluid_Variable: + +Variable +------------------------------- + +.. py:class:: paddle.fluid.Variable + + + + +**注意:** + **1. 请不要直接调用** `Variable` **的构造函数,因为这会造成严重的错误发生!** + + **2. 在静态图形模式下:请使用** `Block.create_var` **创建一个静态的** `Variable` **,该静态的** `Variable` **在使用** :ref:`cn_api_fluid_executor` **执行前是没有实际数据的。** + + **3. 在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下:请使用** :ref:`cn_api_fluid_dygraph_to_variable` 创建一个拥有实际数据的 :ref:`api_guide_Variable` + +在Fluid中,OP的每个输入和输出都是 :ref:`api_guide_Variable` 。多数情况下, :ref:`api_guide_Variable` 用于保存不同种类的数据或训练标签。 + +:ref:`api_guide_Variable` 总是属于某一个 :ref:`api_guide_Block` 。所有 :ref:`api_guide_Variable` 都有其自己的 ``name`` ,不同 :ref:`api_guide_Block` 中的两个 :ref:`api_guide_Variable` 可以具有相同的名称。如果使用的 **不是** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式,那么同一个 :ref:`api_guide_Block` 中的两个或更多 :ref:`api_guide_Variable` 拥有相同 ``name`` 将意味着他们会共享相同的内容。通常我们使用这种方式来实现 **参数共享** + +:ref:`api_guide_Variable` 有很多种。它们每种都有自己的属性和用法。请参考 `framework.proto `_ 以获得详细信息。 :ref:`api_guide_Variable` 的大多数成员变量可以设置为 ``None``。它的意思是它不可用或稍后指定。 + +如果您希望创建一个 :ref:`api_guide_Variable` 那么可以参考如下示例: + +**示例代码:** + +在静态图形模式下: + .. code-block:: python + + import paddle.fluid as fluid + cur_program = fluid.Program() + cur_block = cur_program.current_block() + new_variable = cur_block.create_var(name="X", + shape=[-1, 23, 48], + dtype='float32') +在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下: + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + with fluid.dygraph.guard(): + new_variable = fluid.dygraph.to_variable(np.arange(10)) + + +.. py:method:: detach() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + **2.** ``detach`` **后的** :ref:`api_guide_Variable` **将会成为临时变量** + +产生一个新的,和当前计算图分离的,但是拥有当前 :ref:`api_guide_Variable` 其内容的临时变量 + +返回:一个新的,和当前计算图分离的,但是拥有当前 :ref:`api_guide_Variable` 其内容的临时 :ref:`api_guide_Variable` + +返回类型:(:ref:`api_guide_Variable` | 和输入的 ``Dtype`` 一致) + +**示例代码** + .. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.dygraph.base import to_variable + from paddle.fluid.dygraph import Linear + import numpy as np + + data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32') + with fluid.dygraph.guard(): + linear = Linear(32, 64) + data = to_variable(data) + x = linear(data) + y = x.detach() + +.. py:method:: numpy() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +返回一个 ``ndarray`` 来表示当前 :ref:`api_guide_Variable` 的值 + +返回:``numpy`` 的数组,表示当前 :ref:`api_guide_Variable` 的实际值 + +返回类型:ndarray,``dtype`` 和输入的 ``dtype`` 一致 + +**示例代码** + .. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.dygraph.base import to_variable + from paddle.fluid.dygraph import Linear + import numpy as np + + data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32') + with fluid.dygraph.guard(): + linear = Linear(32, 64) + data = to_variable(data) + x = linear(data) + print(x.numpy()) + +.. py:method:: set_value() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +为此 :ref:`api_guide_Variable` 设置一个新的值。 + +**参数:** + + - **value**: ( :ref:`api_guide_Variable` 或 ``ndarray`` ) 要赋值给此 :ref:`api_guide_Variable` 的新的值。 + +返回:无 + +抛出异常: ``ValueError`` - 当要赋于的新值的 ``shape`` 和此 :ref:`api_guide_Variable` 原有的 ``shape`` 不同时,抛出 ``ValueError`` 。 + +**示例代码** + .. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.dygraph.base import to_variable + from paddle.fluid.dygraph import Linear + import numpy as np + + data = np.ones([3, 1024], dtype='float32') + with fluid.dygraph.guard(): + linear = fluid.dygraph.Linear(1024, 4) + t = to_variable(data) + linear(t) # 使用默认参数值调用前向 + custom_weight = np.random.randn(1024, 4).astype("float32") + linear.weight.set_value(custom_weight) # 将参数修改为自定义的值 + out = linear(t) # 使用新的参数值调用前向 + +.. py:method:: backward() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + **2. 由于如果该** :ref:`api_guide_Variable` **以上没有任何地方需要梯度,那么仅仅设置该** :ref:`api_guide_Variable` **的梯度为** ``1`` **是没有意义的。因此,这种情况下,为了节省一些计算,我们不去产生该** :ref:`api_guide_Variable` **的梯度** + +从该节点开始执行反向 + +**参数:** + + - **retain_graph** (bool,可选) – 该参数用于确定反向梯度更新完成后反向梯度计算图是否需要保留(retain_graph为True则保留反向梯度计算图)。若用户打算在执行完该方法( :code:`backward` )后,继续向之前已构建的计算图中添加更多的Op,则需要设置 :code:`retain_graph` 值为True(这样才会保留之前计算得到的梯度)。可以看出,将 :code:`retain_graph` 设置为False可降低内存的占用。默认值为False。 + +返回:无 + + +**示例代码** + .. code-block:: python + + import numpy as np + import paddle + paddle.disable_static() + x = np.ones([2, 2], np.float32) + inputs = [] + for _ in range(10): + tmp = paddle.to_tensor(x) + # 如果这里我们不为输入tmp设置stop_gradient=False,那么后面loss也将因为这个链路都不需要梯度 + # 而不产生梯度 + tmp.stop_gradient=False + inputs.append(tmp) + ret = paddle.sums(inputs) + loss = paddle.reduce_sum(ret) + loss.backward() + +.. py:method:: gradient() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + **2. 由于如果该** :ref:`api_guide_Variable` **以上没有任何地方需要梯度,那么仅仅设置该** :ref:`api_guide_Variable` **的梯度为** ``1`` **是没有意义的。因此,这种情况下,为了节省一些计算,我们不去产生该** :ref:`api_guide_Variable` **的梯度** + +获取该 :ref:`api_guide_Variable` 的梯度值 + +返回:如果 :ref:`api_guide_Variable` 的类型是LoDTensor(参见 :ref:`cn_user_guide_lod_tensor` ),返回该 :ref:`api_guide_Variable` 类型为 ``ndarray`` 的梯度值;如果 :ref:`api_guide_Variable` 的类型是SelectedRows,返回该 :ref:`api_guide_Variable` 类型为 ``ndarray`` 的梯度值和类型为 ``ndarray`` 的词id组成的tuple。 + +返回类型:``ndarray`` 或者 ``tuple of ndarray`` , 返回类型 ``tuple of ndarray`` 仅在 :ref:`cn_api_fluid_dygraph_Embedding` 层稀疏更新时产生。 + + +**示例代码** + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # example1: 返回ndarray + x = np.ones([2, 2], np.float32) + with fluid.dygraph.guard(): + inputs2 = [] + for _ in range(10): + tmp = fluid.dygraph.base.to_variable(x) + tmp.stop_gradient=False + inputs2.append(tmp) + ret2 = fluid.layers.sums(inputs2) + loss2 = fluid.layers.reduce_sum(ret2) + loss2.backward() + print(loss2.gradient()) + + # example2: 返回tuple of ndarray + with fluid.dygraph.guard(): + embedding = fluid.dygraph.Embedding( + size=[20, 32], + param_attr='emb.w', + is_sparse=True) + x_data = np.arange(12).reshape(4, 3).astype('int64') + x_data = x_data.reshape((-1, 3, 1)) + x = fluid.dygraph.base.to_variable(x_data) + out = embedding(x) + out.backward() + print(embedding.weight.gradient()) + +.. py:method:: clear_gradient() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + **2. 只有当该** :ref:`api_guide_Variable` **有梯度时才可调用,通常我们都会为参数调用这个方法,因为临时变量的梯度将会在其离开作用域时被** ``python`` **自动清除** + +设置该 :ref:`api_guide_Variable` 的梯度为零 + +返回:无 + + +**示例代码** + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + x = np.ones([2, 2], np.float32) + with fluid.dygraph.guard(): + inputs2 = [] + for _ in range(10): + tmp = fluid.dygraph.base.to_variable(x) + tmp.stop_gradient=False + inputs2.append(tmp) + ret2 = fluid.layers.sums(inputs2) + loss2 = fluid.layers.reduce_sum(ret2) + loss2.backward() + print(loss2.gradient()) + loss2.clear_gradient() + print("After clear {}".format(loss2.gradient())) + + +.. py:method:: to_string() + +**注意:** + + **1. 该API只在非** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取该 :ref:`api_guide_Variable` 的静态描述字符串 + +**参数:(仅在非** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效)** + - **throw_on_error** (bool) - 是否在没有设置必需字段时抛出异常。 + - **with_details** (bool) - 值为true时,打印更多关于 :ref:`api_guide_Variable` 的信息,如 ``error_clip`` , ``stop_gradient`` 等 + + +返回:用于静态描述该 :ref:`api_guide_Variable` 的字符串 + + +返回: 将Program转换为字符串 + +返回类型: str + +抛出异常: ``ValueError`` - 当 ``throw_on_error == true`` ,当没有设置任何必需的字段时,抛出 ``ValueError`` 。 + + +**示例代码** + .. code-block:: python + + import paddle.fluid as fluid + + cur_program = fluid.Program() + cur_block = cur_program.current_block() + new_variable = cur_block.create_var(name="X", + shape=[-1, 23, 48], + dtype='float32') + print(new_variable.to_string(True)) + print("\n=============with detail===============\n") + print(new_variable.to_string(True, True)) + + +.. py:method:: astype(self, dtype) + +将该 :ref:`api_guide_Variable` 中的数据转换成目标 ``Dtype`` + +**参数:** + - **self** ( :ref:`api_guide_Variable` ) - 当前 :ref:`api_guide_Variable` , 用户不需要传入。 + - **dtype** (int | float | float64) - 希望转换成的 ``Dtype`` + + +返回:一个全新的转换了 ``Dtype`` 的 :ref:`api_guide_Variable` + +返回类型: :ref:`api_guide_Variable` + + +**示例代码** + +在静态图模式下: + .. code-block:: python + + import paddle.fluid as fluid + + startup_prog = fluid.Program() + main_prog = fluid.Program() + with fluid.program_guard(startup_prog, main_prog): + original_variable = fluid.data(name = "new_variable", shape=[2,2], dtype='float32') + new_variable = original_variable.astype('int64') + print("new var's dtype is: {}".format(new_variable.dtype)) + + +在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下: + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + x = np.ones([2, 2], np.float32) + with fluid.dygraph.guard(): + original_variable = fluid.dygraph.to_variable(x) + print("original var's dtype is: {}, numpy dtype is {}".format(original_variable.dtype, original_variable.numpy().dtype)) + new_variable = original_variable.astype('int64') + print("new var's dtype is: {}, numpy dtype is {}".format(new_variable.dtype, new_variable.numpy().dtype)) + + + +属性 +:::::::::::: + +.. py:attribute:: stop_gradient + +**注意:该属性在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下除参数以外默认值为** ``True`` **,而参数的该属性默认值为** ``False`` **。在静态图下所有的** :ref:`api_guide_Variable` **该属性默认值都为** ``False`` + +是否从此 :ref:`api_guide_Variable` 开始,之前的相关部分都停止梯度计算 + +**示例代码** + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + with fluid.dygraph.guard(): + value0 = np.arange(26).reshape(2, 13).astype("float32") + value1 = np.arange(6).reshape(2, 3).astype("float32") + value2 = np.arange(10).reshape(2, 5).astype("float32") + linear = fluid.Linear(13, 5, dtype="float32") + linear2 = fluid.Linear(3, 3, dtype="float32") + a = fluid.dygraph.to_variable(value0) + b = fluid.dygraph.to_variable(value1) + c = fluid.dygraph.to_variable(value2) + out1 = linear(a) + out2 = linear2(b) + out1.stop_gradient = True + out = fluid.layers.concat(input=[out1, out2, c], axis=1) + out.backward() + # 可以发现这里linear的参数梯度变成了None + assert linear.weight.gradient() is None + assert out1.gradient() is None + +.. py:attribute:: persistable + +**注意:该属性我们即将废弃,此介绍仅为了帮助用户理解概念, 1.6版本后用户可以不再关心该属性** + + **1. 该属性除参数以外默认值为** ``False`` **,而参数的该属性默认值为** ``True`` 。 + + **2. 该属性在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下一经初始化即不能修改,这是由于在动态执行时,** :ref:`api_guide_Variable` **的生命周期将由** ``Python`` **自行控制不再需要通过该属性来修改** + +此 :ref:`api_guide_Variable` 是否是长期存活的 :ref:`api_guide_Variable` + +.. py:attribute:: name + +**注意:在非** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下,那么同一个** :ref:`api_guide_Block` **中的两个或更多** :ref:`api_guide_Variable` **拥有相同** ``name`` **将意味着他们会共享相同的内容。通常我们使用这种方式来实现参数共享** + +此 :ref:`api_guide_Variable` 的名字(str) + + +.. py:attribute:: shape + +**注意:该属性是只读属性** + +此 :ref:`api_guide_Variable` 的维度 + +.. py:attribute:: dtype + +**注意:该属性是只读属性** + +此 :ref:`api_guide_Variable` 的实际数据类型 + +.. py:attribute:: lod_level + +**注意:** + + **1. 该属性是只读属性** + + **2.** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下,不支持该属性,该值为零** + +此 :ref:`api_guide_Variable` 的 ``LoD`` 信息,关于 ``LoD`` 可以参考 :ref:`api_fluid_LoDTensor` 相关内容 + +.. py:attribute:: type + +**注意:该属性是只读属性** + +此 :ref:`api_guide_Variable` 的内存模型,例如是::ref:`api_fluid_LoDTensor`, 或者SelectedRows diff --git a/doc/paddle/api/paddle/fluid/framework/default_main_program_cn.rst b/doc/paddle/api/paddle/fluid/framework/default_main_program_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4759fafea72a09002fc9e497baeb99983b2c6218 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/framework/default_main_program_cn.rst @@ -0,0 +1,59 @@ +.. _cn_api_fluid_default_main_program: + +default_main_program +------------------------------- + +.. py:function:: paddle.fluid.default_main_program() + + + + + +此接口可以获取当前用于存储op和variable描述信息的 ``default main program`` + +``fluid.layers`` 接口中添加的op和variable会存储在 ``default main program`` 中 + +``default main program`` 是fluid的许多编程接口中Program参数的默认值。例如对于 ``Executor.run()`` 如果用户没有传入Program参数,会默认使用 ``default main program`` + +可以使用 :ref:`cn_api_fluid_program_guard` 来替换 ``default main program`` + +参数: + - 无 + +返回: 当前默认用于存储op和variable描述的Program + +返回类型: :ref:`cn_api_fluid_Program` + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + #示例网络: + data = fluid.data(name='image', shape=[None, 3, 224, 224], dtype='float32') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') + + conv1 = fluid.layers.conv2d(data, 4, 5, 1, act=None) + bn1 = fluid.layers.batch_norm(conv1, act='relu') + pool1 = fluid.layers.pool2d(bn1, 2, 'max', 2) + conv2 = fluid.layers.conv2d(pool1, 16, 5, 1, act=None) + bn2 = fluid.layers.batch_norm(conv2, act='relu') + pool2 = fluid.layers.pool2d(bn2, 2, 'max', 2) + + fc1 = fluid.layers.fc(pool2, size=50, act='relu') + fc2 = fluid.layers.fc(fc1, size=102, act='softmax') + + loss = fluid.layers.cross_entropy(input=fc2, label=label) + loss = fluid.layers.mean(loss) + opt = fluid.optimizer.Momentum( + learning_rate=0.1, + momentum=0.9, + regularization=fluid.regularizer.L2Decay(1e-4)) + opt.minimize(loss) + + print(fluid.default_main_program().num_blocks) + print(fluid.default_main_program().blocks[0].var('image')) + + + diff --git a/doc/paddle/api/paddle/fluid/framework/default_startup_program_cn.rst b/doc/paddle/api/paddle/fluid/framework/default_startup_program_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bfc247c29b6a952d59e8ac524a558f32843cb536 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/framework/default_startup_program_cn.rst @@ -0,0 +1,45 @@ +.. _cn_api_fluid_default_startup_program: + + + + +default_startup_program +------------------------------- + +.. py:function:: paddle.fluid.default_startup_program() + + + + + + +该函数可以获取默认/全局 startup :ref:`cn_api_fluid_Program` (初始化启动程序)。 + + :ref:`_cn_api_fluid_layers` 中的函数会新建参数或 :ref:`cn_api_paddle_data_reader_reader` (读取器) 或 `NCCL `_ 句柄作为全局变量。 + +startup_program会使用内在的OP(算子)去初始化他们,并由 :ref:`_cn_api_fluid_layers` 中的函数将这些OP追加到startup :ref:`cn_api_fluid_Program` 中。 + +该函数将返回默认的或当前的startup_program。用户可以使用 :ref:`cn_api_fluid_program_guard` 去切换 :ref:`cn_api_fluid_default_startup_program` 。 + +返回: 当前的默认/全局 初始化 :ref:`cn_api_fluid_Program` + +返回类型: :ref:`cn_api_fluid_Program` + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + + main_program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(main_program=main_program, startup_program=startup_program): + x = fluid.layers.data(name="x", shape=[-1, 784], dtype='float32') + y = fluid.layers.data(name="y", shape=[-1, 1], dtype='int32') + z = fluid.layers.fc(name="fc", input=x, size=10, act="relu") + + print("main program is: {}".format(fluid.default_main_program())) + print("start up program is: {}".format(fluid.default_startup_program())) + + + diff --git a/doc/paddle/api/paddle/fluid/framework/in_dygraph_mode_cn.rst b/doc/paddle/api/paddle/fluid/framework/in_dygraph_mode_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..06c960ce5c3debdc422d8098744b5e7ecaa73bb5 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/framework/in_dygraph_mode_cn.rst @@ -0,0 +1,29 @@ +.. _cn_api_fluid_in_dygraph_mode: + +in_dygraph_mode +------------------------------- + +.. py:function:: paddle.fluid.in_dygraph_mode() + + + + +该接口检查程序是否在动态图模式中运行。 +可以通过 ``fluid.dygraph.guard`` 接口开启动态图模式。 + +返回:如果程序是在动态图模式下运行的,则返回 ``True``。 + +返回类型:bool + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + + fluid.enable_dygraph() # 现在进入 dygragh 模式 + print(fluid.in_dygraph_mode()) # True + fluid.disable_dygraph() + print(fluid.in_dygraph_mode()) # False + + diff --git a/doc/paddle/api/paddle/fluid/framework/name_scope_cn.rst b/doc/paddle/api/paddle/fluid/framework/name_scope_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7def349fb8e97c0541e92d7aaa7cb7ff727e876a --- /dev/null +++ b/doc/paddle/api/paddle/fluid/framework/name_scope_cn.rst @@ -0,0 +1,52 @@ +.. _cn_api_fluid_name_scope: + +name_scope +------------------------------- + + +.. py:function:: paddle.fluid.name_scope(prefix=None) + + + + + +该函数为operators生成不同的命名空间。该函数只用于调试和可视化,不建议用在其它方面。 + + +参数: + - **prefix** (str,可选) - 名称前缀。默认值为None。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + with fluid.name_scope("s1"): + a = fluid.data(name='data', shape=[None, 1], dtype='int32') + b = a + 1 + with fluid.name_scope("s2"): + c = b * 1 + with fluid.name_scope("s3"): + d = c / 1 + with fluid.name_scope("s1"): + f = fluid.layers.pow(d, 2.0) + with fluid.name_scope("s4"): + g = f - 1 + + # 没有指定的话默认OP在default main program中。 + for op in fluid.default_main_program().block(0).ops: + # elementwise_add在/s1/中创建 + if op.type == 'elementwise_add': + assert op.desc.attr("op_namescope") == '/s1/' + # elementwise_mul在/s1/s2中创建 + elif op.type == 'elementwise_mul': + assert op.desc.attr("op_namescope") == '/s1/s2/' + # elementwise_div在/s1/s3中创建 + elif op.type == 'elementwise_div': + assert op.desc.attr("op_namescope") == '/s1/s3/' + # elementwise_sum在/s4/中创建 + elif op.type == 'elementwise_sub': + assert op.desc.attr("op_namescope") == '/s4/' + # pow在/s1_1/中创建 + elif op.type == 'pow': + assert op.desc.attr("op_namescope") == '/s1_1/' diff --git a/doc/paddle/api/paddle/fluid/framework/program_guard_cn.rst b/doc/paddle/api/paddle/fluid/framework/program_guard_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..51d47cefc28e224f23c47b0b4304cc7ec3f58265 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/framework/program_guard_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_fluid_program_guard: + +program_guard +------------------------------- + + +.. py:function:: paddle.fluid.program_guard(main_program, startup_program=None) + + + + +该接口应配合使用python的 ``with`` 语句来将 ``with`` block 里的算子和变量添加进指定的全局主程序(main program)和启动程序(startup program)。 + +``with`` 语句块中的fluid.layers下各接口将在新的main program(主程序)中添加operators(算子)和variables(变量)。 + +参数: + - **main_program** (Program) – “with”语句中将使用的新的main program。 + - **startup_program** (Program,可选) – “with”语句中将使用的新的startup program。若传入 ``None`` 则不改变当前的启动程序,即仍使用default_startup_program。默认值为None。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + main_program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(main_program, startup_program): + data = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10, act='relu') + +例如,当组的网不需要startup_program初始化各变量时,可以传入一个临时的program。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + main_program = fluid.Program() + # 如果您不需要关心startup program,传入一个临时值即可 + with fluid.program_guard(main_program, fluid.Program()): + data = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') + diff --git a/doc/paddle/api/paddle/fluid/get_flags_cn.rst b/doc/paddle/api/paddle/fluid/get_flags_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e0323cf1f3e52c6c927ad7944a7e26a0a051442f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/get_flags_cn.rst @@ -0,0 +1,21 @@ +.. _cn_api_fluid_get_flags: + +get_flags +------------------------------- + +.. py:function:: paddle.fluid.get_flags(flags) +用于获取Paddle框架中环境变量FLAGS的当前值。 + +参数: + - **flags** (list|tuple|str) - 需要获取的环境变量FLAGS的名称。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + flags = ['FLAGS_eager_delete_tensor_gb', 'FLAGS_check_nan_inf'] + res = fluid.get_flags(flags) + print(res) + # {'FLAGS_eager_delete_tensor_gb': 0.0, 'FLAGS_check_nan_inf': False} diff --git a/doc/paddle/api/paddle/fluid/initializer/Bilinear_cn.rst b/doc/paddle/api/paddle/fluid/initializer/Bilinear_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bdbeafef6534a2e752c2fadca54218662bff7df0 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/initializer/Bilinear_cn.rst @@ -0,0 +1,13 @@ +.. _cn_api_fluid_initializer_Bilinear: + +Bilinear +------------------------------- + +.. py:attribute:: paddle.fluid.initializer.Bilinear + + + + +``BilinearInitializer`` 的别名 + + diff --git a/doc/paddle/api/paddle/fluid/initializer/Constant_cn.rst b/doc/paddle/api/paddle/fluid/initializer/Constant_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8e70a2c33c5811367c25682dd945a0cf7875a2c5 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/initializer/Constant_cn.rst @@ -0,0 +1,13 @@ +.. _cn_api_fluid_initializer_Constant: + +Constant +------------------------------- + +.. py:attribute:: paddle.fluid.initializer.Constant + + + + +``ConstantInitializer`` 的别名 + + diff --git a/doc/paddle/api/paddle/fluid/initializer/MSRA_cn.rst b/doc/paddle/api/paddle/fluid/initializer/MSRA_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b7a78c7c06b54dc50c51416d7745d1071a4ec404 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/initializer/MSRA_cn.rst @@ -0,0 +1,12 @@ +.. _cn_api_fluid_initializer_MSRA: + +MSRA +------------------------------- + +.. py:attribute:: paddle.fluid.initializer.MSRA + + + + +``MSRAInitializer`` 的别名 + diff --git a/doc/paddle/api/paddle/fluid/initializer/Normal_cn.rst b/doc/paddle/api/paddle/fluid/initializer/Normal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ce50e67bd12563ee7c24b6ab4141acf0ccf0c303 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/initializer/Normal_cn.rst @@ -0,0 +1,115 @@ +.. _cn_api_fluid_layers_Normal: + +Normal +------------------------------- + +.. py:class:: paddle.fluid.layers.Normal(loc, scale) + + + + +正态分布 + +数学公式: + +.. math:: + + pdf(x; \mu, \sigma) = \frac{1}{Z}e^{\frac {-0.5 (x - \mu)^2} {\sigma^2} } + + Z = (2 \pi \sigma^2)^{0.5} + +上面的数学公式中: + +:math:`loc = \mu` : 平均值。 +:math:`scale = \sigma` : 标准差。 +:math:`Z`: 正态分布常量。 + +参数: + - **loc** (float|list|numpy.ndarray|Variable) - 正态分布平均值。数据类型为float32。 + - **scale** (float|list|numpy.ndarray|Variable) - 正态分布标准差。数据类型为float32。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + from paddle.fluid import layers + from paddle.fluid.layers import Normal + + # 定义参数为float的正态分布。 + dist = Normal(loc=0., scale=3.) + # 定义一组有两个数的正态分布。 + # 第一组为均值1,标准差11,第二组为均值2,标准差22。 + dist = Normal(loc=[1., 2.], scale=[11., 22.]) + # 得到3个样本, 返回一个 3 x 2 张量。 + dist.sample([3]) + + # 通过广播的方式,定义一个两个参数的正态分布。 + # 均值都是1,标准差不同。 + dist = Normal(loc=1., scale=[11., 22.]) + + # 一个完整的例子 + value_npdata = np.array([0.8], dtype="float32") + value_tensor = layers.create_tensor(dtype="float32") + layers.assign(value_npdata, value_tensor) + + normal_a = Normal([0.], [1.]) + normal_b = Normal([0.5], [2.]) + + sample = normal_a.sample([2]) + # 一个由定义好的正太分布随机生成的张量,维度为: [2, 1] + entropy = normal_a.entropy() + # [1.4189385] with shape: [1] + lp = normal_a.log_prob(value_tensor) + # [-1.2389386] with shape: [1] + kl = normal_a.kl_divergence(normal_b) + # [0.34939718] with shape: [1] + + +.. py:function:: sample(shape, seed=0) + +生成指定维度的样本 + +参数: + - **shape** (list) - 1维列表,指定生成样本的维度。数据类型为int32。 + - **seed** (int) - 长整型数。 + +返回:预先设计好维度的张量, 数据类型为float32 + +返回类型:Variable + +.. py:function:: entropy() + +信息熵 + +返回:正态分布的信息熵, 数据类型为float32 + +返回类型:Variable + +.. py:function:: log_prob(value) + +对数概率密度函数 + +参数: + - **value** (Variable) - 输入张量。数据类型为float32或float64。 + +返回:对数概率, 数据类型与value相同 + +返回类型:Variable + +.. py:function:: kl_divergence(other) + +两个正态分布之间的KL散度。 + +参数: + - **other** (Normal) - Normal的实例。 + +返回:两个正态分布之间的KL散度, 数据类型为float32 + +返回类型:Variable + + + + + + diff --git a/doc/paddle/api/paddle/fluid/initializer/NumpyArrayInitializer_cn.rst b/doc/paddle/api/paddle/fluid/initializer/NumpyArrayInitializer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7c0365ccba17c21f28907047bb902e25fb0df43b --- /dev/null +++ b/doc/paddle/api/paddle/fluid/initializer/NumpyArrayInitializer_cn.rst @@ -0,0 +1,30 @@ +.. _cn_api_fluid_initializer_NumpyArrayInitializer: + +NumpyArrayInitializer +------------------------------- + +.. py:class:: paddle.fluid.initializer.NumpyArrayInitializer(value) + + + + +该OP使用Numpy型数组来初始化参数变量。 + +参数: + - **value** (numpy) - 用于初始化变量的一个Numpy型数组。 + +返回:张量(Tensor) + +返回类型:变量(Variable) + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + x1 = fluid.data(name="x1", shape=[2, 1], dtype='float32') + fc = fluid.layers.fc(input=x1, size=10, + param_attr=fluid.initializer.NumpyArrayInitializer(numpy.array([1,2]))) + + diff --git a/doc/paddle/api/paddle/fluid/initializer/TruncatedNormal_cn.rst b/doc/paddle/api/paddle/fluid/initializer/TruncatedNormal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8a0c9b829a1915e4170a963290737ab0372f1073 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/initializer/TruncatedNormal_cn.rst @@ -0,0 +1,13 @@ +.. _cn_api_fluid_initializer_TruncatedNormal: + +TruncatedNormal +------------------------------- + +.. py:attribute:: paddle.fluid.initializer.TruncatedNormal + + + + +``TruncatedNormalInitializer`` 的别名 + + diff --git a/doc/paddle/api/paddle/fluid/initializer/Uniform_cn.rst b/doc/paddle/api/paddle/fluid/initializer/Uniform_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..59e1544b3751afacf4002cfa859a6827df1de187 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/initializer/Uniform_cn.rst @@ -0,0 +1,106 @@ +.. _cn_api_fluid_layers_Uniform: + +Uniform +------------------------------- + +.. py:class:: paddle.fluid.layers.Uniform(low, high) + + + + +均匀分布 + +概率密度函数(pdf)为: + +.. math:: + + pdf(x; a, b) = \frac{1}{Z}, a <=x < b + + Z = b - a + +上面的数学公式中: + +:math:`low = a` 。 +:math:`high = b` 。 +:math:`Z`: 正态分布常量。 + +参数low和high的维度必须能够支持广播。 + +参数: + - **low** (float|list|numpy.ndarray|Variable) - 均匀分布的下边界。数据类型为float32。 + - **high** (float|list|numpy.ndarray|Variable) - 均匀分布的上边界。数据类型为float32。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + from paddle.fluid import layers + from paddle.fluid.layers import Uniform + + # 定义参数为float的均匀分布 + u1 = Uniform(low=3.0, high=4.0) + # 定义参数为list的均匀分布 + u2 = Uniform(low=[1.0, 2.0], + high=[3.0, 4.0]) + # 通过广播的方式,定义一个均匀分布 + u3 = Uniform(low=[[1.0, 2.0], + [3.0, 4.0]], + high=[[1.5, 2.5], + [3.5, 4.5]]) + + # 通过广播的方式,定义一个均匀分布 + u4 = Uniform(low=3.0, high=[5.0, 6.0, 7.0]) + + # 一个完整的例子 + value_npdata = np.array([0.8], dtype="float32") + value_tensor = layers.create_tensor(dtype="float32") + layers.assign(value_npdata, value_tensor) + + uniform = Uniform([0.], [2.]) + + sample = uniform.sample([2]) + # 一个由定义好的均匀分布随机生成的张量,维度为: [2, 1] + entropy = uniform.entropy() + # [0.6931472] with shape: [1] + lp = uniform.log_prob(value_tensor) + # [-0.6931472] with shape: [1] + + +.. py:function:: sample(shape, seed=0) + +生成指定维度的样本 + +参数: + - **shape** (list) - 1维列表,指定生成样本的维度。数据类型为int32。 + - **seed** (int) - 长整型数。 + +返回:预先设计好维度的张量, 数据类型为float32 + +返回类型:Variable + +.. py:function:: entropy() + +信息熵 + +返回:均匀分布的信息熵, 数据类型为float32 + +返回类型:Variable + +.. py:function:: log_prob(value) + +对数概率密度函数 + +参数: + - **value** (Variable) - 输入张量。数据类型为float32或float64。 + +返回:对数概率, 数据类型与value相同 + +返回类型:Variable + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/initializer/Xavier_cn.rst b/doc/paddle/api/paddle/fluid/initializer/Xavier_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cd77ab871251ee4f13619010a473b88ddb478c18 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/initializer/Xavier_cn.rst @@ -0,0 +1,17 @@ +.. _cn_api_fluid_initializer_Xavier: + +Xavier +------------------------------- + +.. py:attribute:: paddle.fluid.initializer.Xavier + + + + +``XavierInitializer`` 的别名 + + + + + + diff --git a/doc/paddle/api/paddle/fluid/initializer/set_global_initializer_cn.rst b/doc/paddle/api/paddle/fluid/initializer/set_global_initializer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..023629cf753daf5ecfb29a1b1984fbd184604bc4 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/initializer/set_global_initializer_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_fluid_set_global_initializer: + +set_global_initializer +------------------------------- + +.. py:function:: paddle.fluid.set_global_initializer(weight_init, bias_init=None) + +该API用于设置Paddle框架中全局的参数初始化方法。该API只对位于其后的代码生效。 + +模型参数为模型中的weight和bias统称,在fluid中对应fluid.Parameter类,继承自fluid.Variable,是一种可持久化的variable。 +该API的设置仅对模型参数生效,对通过 :ref:`cn_api_fluid_layers_create_global_var` 、 :ref:`cn_api_fluid_layers_create_tensor` 等API创建的变量不会生效。 + +如果创建网络层时还通过 ``param_attr`` 、 ``bias_attr`` 设置了初始化方式,这里的全局设置将不会生效,因为其优先级更低。 + +参数: + - **weight_init** (Initializer) - 设置框架的全局的weight参数初始化方法。 + - **bias_init** (Initializer,可选) - 设置框架的全局的bias参数初始化方法。默认:None。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + fluid.set_global_initializer(fluid.initializer.Uniform(), fluid.initializer.Constant()) + x = fluid.data(name="x", shape=[1, 3, 32, 32]) + + # conv1的weight参数是通过Uniform来初始化 + # conv1的bias参数是通过Constant来初始化 + conv1 = fluid.layers.conv2d(x, 5, 3) + + # 如果同时设置了param_attr/bias_attr, 全局初始化将不会生效 + # conv2的weight参数是通过Xavier来初始化 + # conv2的bias参数是通过Normal来初始化 + conv2 = fluid.layers.conv2d(conv1, 5, 3, + param_attr=fluid.initializer.Xavier(), + bias_attr=fluid.initializer.Normal()) + + # 取消全局参数初始化的设置 + fluid.set_global_initializer(None) \ No newline at end of file diff --git a/doc/paddle/api/paddle/fluid/input/embedding_cn.rst b/doc/paddle/api/paddle/fluid/input/embedding_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a377e832f4e96481ba81e1bd684d28fef3164203 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/input/embedding_cn.rst @@ -0,0 +1,102 @@ +.. _cn_api_fluid_layers_embedding: + +embedding +------------------------------- + + +.. py:function:: paddle.fluid.layers.embedding(input, size, is_sparse=False, is_distributed=False, padding_idx=None, param_attr=None, dtype='float32') + + + + +嵌入层(Embedding Layer) + +**注意:此OP将在未来的版本中被移除!该OP要求输入Tensor shape的最后一维必须为1。推荐使用fluid.** :ref:`cn_api_fluid_embedding` 。 + +该OP根据input中的id信息从embedding矩阵中查询对应embedding信息,并会根据输入的size (vocab_size, emb_size)和dtype自动构造一个二维embedding矩阵。 + +要求input的最后一维必须等于1,输出的Tensor的shape是将输入Tensor shape的最后一维的1替换为emb_size。 + +注:input中的id必须满足 ``0 =< id < size[0]``,否则程序会抛异常退出。 + + +:: + + Case 1: + + input是Tensor, 且padding_idx = -1 + input.data = [[[1], [3]], [[2], [4]], [[4], [127]]] + input.shape = [3, 2, 1] + 若size = [128, 16] + 输出为Tensor: + out.shape = [3, 2, 16] + out.data = [[[0.129435295, 0.244512452, ..., 0.436322452], + [0.345421456, 0.524563927, ..., 0.144534654]], + + [[0.345249859, 0.124939536, ..., 0.194353745], + [0.945345345, 0.435394634, ..., 0.435345365]], + + [[0.945345345, 0.435394634, ..., 0.435345365], + [0.0, 0.0, ..., 0.0 ]]] # padding data + 输入的padding_idx小于0,则自动转换为padding_idx = -1 + 128 = 127, 对于输入id为127的词,进行padding处理。 + + Case 2: + + input是lod level 为1的LoDTensor, 且padding_idx = 0 + input.lod = [[2, 3]] + input.data = [[1], [3], [2], [4], [0]] + input.shape = [5, 1] + 若size = [128, 16] + 输出为LoDTensor: + out.lod = [[2, 3]] + out.shape = [5, 16] + out.data = [[0.129435295, 0.244512452, ..., 0.436322452], + [0.345421456, 0.524563927, ..., 0.144534654], + [0.345249859, 0.124939536, ..., 0.194353745], + [0.945345345, 0.435394634, ..., 0.435345365], + [0.0, 0.0, ..., 0.0 ]] # padding data + 输入的padding_idx = 0,则对于输入id为0的词,进行padding处理。 + + +参数: + - **input** (Variable) - 存储id信息的Tensor或LoDTensor,数据类型必须为:int64,输入的shape最后一维须为1。input中的id必须满足 ``0 =< id < size[0]`` 。 + - **size** (tuple|list) - embedding矩阵的维度。必须包含两个元素,第一个元素为vocab_size(词表大小), 第二个为emb_size(embedding层维度)。 + - **is_sparse** (bool) - 是否使用稀疏的更新方式,这个参数只会影响反向的梯度更新的性能,sparse更新速度更快,推荐使用稀疏更新的方式。但某些optimizer不支持sparse更新,比如 :ref:`cn_api_fluid_optimizer_AdadeltaOptimizer` 、 :ref:`cn_api_fluid_optimizer_AdamaxOptimizer` 、 :ref:`cn_api_fluid_optimizer_DecayedAdagradOptimizer` 、 :ref:`cn_api_fluid_optimizer_FtrlOptimizer` 、 :ref:`cn_api_fluid_optimizer_LambOptimizer` 、:ref:`cn_api_fluid_optimizer_LarsMomentumOptimizer` ,此时is_sparse必须为False。默认为False。 + - **is_distributed** (bool) - 是否使用分布式的方式存储embedding矩阵,仅在多机分布式cpu训练中使用。默认为False。 + - **padding_idx** (int|long|None) - padding_idx需在区间[-vocab_size, vocab_size),否则不生效,padding_idx<0时,padding_idx会被改成vocab_size + padding_idx,input中等于padding_index的id对应的embedding信息会被设置为0,且这部分填充数据在训练时将不会被更新。如果为None,不作处理,默认为None。 + - **param_attr** (ParamAttr) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。此外,可以通过 ``param_attr`` 参数加载用户自定义或预训练的词向量。只需将本地词向量转为numpy数据格式,且保证本地词向量的shape和embedding的 ``size`` 参数一致,然后使用 :ref:`cn_api_fluid_initializer_NumpyArrayInitializer` 进行初始化,即可实现加载自定义或预训练的词向量。详细使用方法见代码示例2。 + - **dtype** (str|core.VarDesc.VarType) - 输出Tensor或LoDTensor的数据类型,数据类型必须为:float32或float64,默认为float32。 + +返回:input映射后得到的Embedding Tensor或LoDTensor,数据类型和dtype定义的类型一致。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1) + + # 示例 1 + emb_1 = fluid.layers.embedding(input=data, size=[128, 64]) + + # 示例 2: 加载用户自定义或预训练的词向量 + weight_data = np.random.random(size=(128, 100)) # numpy格式的词向量数据 + w_param_attrs = fluid.ParamAttr( + name="emb_weight", + learning_rate=0.5, + initializer=fluid.initializer.NumpyArrayInitializer(weight_data), + trainable=True) + emb_2 = fluid.layers.embedding(input=data, size=(128, 100), param_attr=w_param_attrs, dtype='float32') + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/io/PyReader_cn.rst b/doc/paddle/api/paddle/fluid/io/PyReader_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..10920cb264f87170a5394d4a4be70a74596c6c02 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/io/PyReader_cn.rst @@ -0,0 +1,389 @@ +.. _cn_api_fluid_io_PyReader: + +PyReader +------------------------------- + +.. py:class:: paddle.fluid.io.PyReader(feed_list=None, capacity=None, use_double_buffer=True, iterable=True, return_list=False) + + + + + +在python中为数据输入创建一个reader对象。将使用python线程预取数据,并将其异步插入队列。当调用Executor.run(…)时,将自动提取队列中的数据。 + +参数: + - **feed_list** (list(Variable)|tuple(Variable)) - feed变量列表,由 ``fluid.layers.data()`` 创建。 + - **capacity** (int) - PyReader对象内部维护队列的容量大小。单位是batch数量。若reader读取速度较快,建议设置较大的capacity值。 + - **use_double_buffer** (bool) - 是否使用 ``double_buffer_reader`` 。若use_double_buffer=True,PyReader会异步地预读取下一个batch的数据,可加速数据读取过程,但同时会占用少量的CPU/GPU存储,即一个batch输入数据的存储空间。 + - **iterable** (bool) - 所创建的DataLoader对象是否可迭代。 + - **return_list** (bool) - 每个设备上的数据是否以list形式返回。仅在iterable = True模式下有效。若return_list = False,每个设备上的返回数据均是str -> LoDTensor的映射表,其中映射表的key是每个输入变量的名称。若return_list = True,则每个设备上的返回数据均是list(LoDTensor)。推荐在静态图模式下使用return_list = False,在动态图模式下使用return_list = True。 + + +返回: 被创建的reader对象 + +返回类型: reader (Reader) + + +**代码示例** + +1.如果iterable=False,则创建的PyReader对象几乎与 ``fluid.layers.py_reader()`` 相同。算子将被插入program中。用户应该在每个epoch之前调用 ``start()`` ,并在epoch结束时捕获 ``Executor.run()`` 抛出的 ``fluid.core.EOFException`` 。一旦捕获到异常,用户应该调用 ``reset()`` 手动重置reader。 + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + EPOCH_NUM = 3 + ITER_NUM = 5 + BATCH_SIZE = 3 + + def network(image, label): + # 用户定义网络,此处以softmax回归为例 + predict = fluid.layers.fc(input=image, size=10, act='softmax') + return fluid.layers.cross_entropy(input=predict, label=label) + + def reader_creator_random_image_and_label(height, width): + def reader(): + for i in range(ITER_NUM): + fake_image = np.random.uniform(low=0, + high=255, + size=[height, width]) + fake_label = np.ones([1]) + yield fake_image, fake_label + return reader + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + + reader = fluid.io.PyReader(feed_list=[image, label], + capacity=4, + iterable=False) + + user_defined_reader = reader_creator_random_image_and_label(784, 784) + reader.decorate_sample_list_generator( + paddle.batch(user_defined_reader, batch_size=BATCH_SIZE)) + + loss = network(image, label) + executor = fluid.Executor(fluid.CPUPlace()) + executor.run(fluid.default_startup_program()) + for i in range(EPOCH_NUM): + reader.start() + while True: + try: + executor.run(feed=None) + except fluid.core.EOFException: + reader.reset() + break + + +2.如果iterable=True,则创建的PyReader对象与程序分离。程序中不会插入任何算子。在本例中,创建的reader是一个python生成器,它是可迭代的。用户应将从PyReader对象生成的数据输入 ``Executor.run(feed=...)`` 。 + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + EPOCH_NUM = 3 + ITER_NUM = 5 + BATCH_SIZE = 10 + + def network(image, label): + # 用户定义网络,此处以softmax回归为例 + predict = fluid.layers.fc(input=image, size=10, act='softmax') + return fluid.layers.cross_entropy(input=predict, label=label) + + def reader_creator_random_image(height, width): + def reader(): + for i in range(ITER_NUM): + fake_image = np.random.uniform(low=0, high=255, size=[height, width]), + fake_label = np.ones([1]) + yield fake_image, fake_label + return reader + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True, return_list=False) + + user_defined_reader = reader_creator_random_image(784, 784) + reader.decorate_sample_list_generator( + paddle.batch(user_defined_reader, batch_size=BATCH_SIZE), + fluid.core.CPUPlace()) + loss = network(image, label) + executor = fluid.Executor(fluid.CPUPlace()) + executor.run(fluid.default_startup_program()) + + for _ in range(EPOCH_NUM): + for data in reader(): + executor.run(feed=data, fetch_list=[loss]) + +3. return_list=True,返回值将用list表示而非dict,通常用于动态图模式中。 + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + EPOCH_NUM = 3 + ITER_NUM = 5 + BATCH_SIZE = 10 + + def reader_creator_random_image(height, width): + def reader(): + for i in range(ITER_NUM): + yield np.random.uniform(low=0, high=255, size=[height, width]), \ + np.random.random_integers(low=0, high=9, size=[1]) + return reader + + place = fluid.CPUPlace() + with fluid.dygraph.guard(place): + py_reader = fluid.io.PyReader(capacity=2, return_list=True) + user_defined_reader = reader_creator_random_image(784, 784) + py_reader.decorate_sample_list_generator( + paddle.batch(user_defined_reader, batch_size=BATCH_SIZE), + place) + for image, label in py_reader(): + relu = fluid.layers.relu(image) + +.. py:method:: start() + +启动数据输入线程。只能在reader对象不可迭代时调用。 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + BATCH_SIZE = 10 + + def generator(): + for i in range(5): + yield np.random.uniform(low=0, high=255, size=[784, 784]), + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False) + reader.decorate_sample_list_generator( + paddle.batch(generator, batch_size=BATCH_SIZE)) + + executor = fluid.Executor(fluid.CPUPlace()) + executor.run(fluid.default_startup_program()) + for i in range(3): + reader.start() + while True: + try: + executor.run(feed=None) + except fluid.core.EOFException: + reader.reset() + break + +.. py:method:: reset() + +当 ``fluid.core.EOFException`` 抛出时重置reader对象。只能在reader对象不可迭代时调用。 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + BATCH_SIZE = 10 + + def generator(): + for i in range(5): + yield np.random.uniform(low=0, high=255, size=[784, 784]), + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False) + reader.decorate_sample_list_generator( + paddle.batch(generator, batch_size=BATCH_SIZE)) + + executor = fluid.Executor(fluid.CPUPlace()) + executor.run(fluid.default_startup_program()) + for i in range(3): + reader.start() + while True: + try: + executor.run(feed=None) + except fluid.core.EOFException: + reader.reset() + break + +.. py:method:: decorate_sample_generator(sample_generator, batch_size, drop_last=True, places=None) + +设置PyReader对象的数据源。 + +提供的 ``sample_generator`` 应该是一个python生成器,它生成的数据类型应为list(numpy.ndarray)。 + +当PyReader对象可迭代时,必须设置 ``places`` 。 + +如果所有的输入都没有LOD,这个方法比 ``decorate_sample_list_generator(paddle.batch(sample_generator, ...))`` 更快。 + +参数: + - **sample_generator** (generator) – Python生成器,yield 类型为list(numpy.ndarray) + - **batch_size** (int) – batch size,必须大于0 + - **drop_last** (bool) – 当样本数小于batch数量时,是否删除最后一个batch + - **places** (None|list(CUDAPlace)|list(CPUPlace)) – 位置列表。当PyReader可迭代时必须被提供 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + EPOCH_NUM = 3 + ITER_NUM = 15 + BATCH_SIZE = 3 + + def network(image, label): + # 用户定义网络,此处以softmax回归为例 + predict = fluid.layers.fc(input=image, size=10, act='softmax') + return fluid.layers.cross_entropy(input=predict, label=label) + + def random_image_and_label_generator(height, width): + def generator(): + for i in range(ITER_NUM): + fake_image = np.random.uniform(low=0, + high=255, + size=[height, width]) + fake_label = np.array([1]) + yield fake_image, fake_label + return generator + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) + + user_defined_generator = random_image_and_label_generator(784, 784) + reader.decorate_sample_generator(user_defined_generator, + batch_size=BATCH_SIZE, + places=[fluid.CPUPlace()]) + loss = network(image, label) + executor = fluid.Executor(fluid.CPUPlace()) + executor.run(fluid.default_startup_program()) + + for _ in range(EPOCH_NUM): + for data in reader(): + executor.run(feed=data, fetch_list=[loss]) + +.. py:method:: decorate_sample_list_generator(reader, places=None) + +设置PyReader对象的数据源。 + +提供的 ``reader`` 应该是一个python生成器,它生成列表(numpy.ndarray)类型的批处理数据。 + +当PyReader对象不可迭代时,必须设置 ``places`` 。 + +参数: + - **reader** (generator) – 返回列表(numpy.ndarray)类型的批处理数据的Python生成器 + - **places** (None|list(CUDAPlace)|list(CPUPlace)) – 位置列表。当PyReader可迭代时必须被提供 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + EPOCH_NUM = 3 + ITER_NUM = 15 + BATCH_SIZE = 3 + + def network(image, label): + # 用户定义网络,此处以softmax回归为例 + predict = fluid.layers.fc(input=image, size=10, act='softmax') + return fluid.layers.cross_entropy(input=predict, label=label) + + def random_image_and_label_generator(height, width): + def generator(): + for i in range(ITER_NUM): + fake_image = np.random.uniform(low=0, + high=255, + size=[height, width]) + fake_label = np.ones([1]) + yield fake_image, fake_label + return generator + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) + + user_defined_generator = random_image_and_label_generator(784, 784) + reader.decorate_sample_list_generator( + paddle.batch(user_defined_generator, batch_size=BATCH_SIZE), + fluid.core.CPUPlace()) + loss = network(image, label) + executor = fluid.Executor(fluid.core.CPUPlace()) + executor.run(fluid.default_startup_program()) + + for _ in range(EPOCH_NUM): + for data in reader(): + executor.run(feed=data, fetch_list=[loss]) + +.. py:method:: decorate_batch_generator(reader, places=None) + +设置PyReader对象的数据源。 + +提供的 ``reader`` 应该是一个python生成器,它生成列表(numpy.ndarray)类型或LoDTensor类型的批处理数据。 + +当PyReader对象不可迭代时,必须设置 ``places`` 。 + +参数: + - **reader** (generator) – 返回LoDTensor类型的批处理数据的Python生成器 + - **places** (None|list(CUDAPlace)|list(CPUPlace)) – 位置列表。当PyReader可迭代时必须被提供 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + EPOCH_NUM = 3 + ITER_NUM = 15 + BATCH_SIZE = 3 + + def network(image, label): + # 用户定义网络,此处以softmax回归为例 + predict = fluid.layers.fc(input=image, size=10, act='softmax') + return fluid.layers.cross_entropy(input=predict, label=label) + + def random_image_and_label_generator(height, width): + def generator(): + for i in range(ITER_NUM): + batch_image = np.random.uniform(low=0, + high=255, + size=[BATCH_SIZE, height, width]) + batch_label = np.ones([BATCH_SIZE, 1]) + batch_image = batch_image.astype('float32') + batch_label = batch_label.astype('int64') + yield batch_image, batch_label + return generator + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) + + user_defined_generator = random_image_and_label_generator(784, 784) + reader.decorate_batch_generator(user_defined_generator, fluid.CPUPlace()) + + loss = network(image, label) + executor = fluid.Executor(fluid.CPUPlace()) + executor.run(fluid.default_startup_program()) + + for _ in range(EPOCH_NUM): + for data in reader(): + executor.run(feed=data, fetch_list=[loss]) + + +.. py:method:: next() + +获取下一个数据。用户不应直接调用此方法。此方法用于PaddlePaddle框架内部实现Python 2.x的迭代器协议。 diff --git a/doc/paddle/api/paddle/fluid/io/get_program_parameter_cn.rst b/doc/paddle/api/paddle/fluid/io/get_program_parameter_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0909c333928dbaafe06ea3ca16c73eaa73eb7d1d --- /dev/null +++ b/doc/paddle/api/paddle/fluid/io/get_program_parameter_cn.rst @@ -0,0 +1,29 @@ +.. _cn_api_fluid_io_get_program_parameter: + +get_program_parameter +------------------------------- + +.. py:function:: paddle.fluid.io.get_program_parameter(program) + + + + +该接口从Program中获取所有参数。 + +参数: + - **program** ( :ref:`cn_api_fluid_Program` ) – 从该Program中获取参数。 + +返回: 包含此Program中所有参数的list + +返回类型: list + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.data(name="img", shape=[64, 784]) + w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32', name='fc_w') + b = fluid.layers.create_parameter(shape=[200], dtype='float32', name='fc_b') + list_para = fluid.io.get_program_parameter( fluid.default_main_program() ) + diff --git a/doc/paddle/api/paddle/fluid/io/get_program_persistable_vars_cn.rst b/doc/paddle/api/paddle/fluid/io/get_program_persistable_vars_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..547839dd486dac415d69d55065e1f2fc43a27515 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/io/get_program_persistable_vars_cn.rst @@ -0,0 +1,29 @@ +.. _cn_api_fluid_io_get_program_persistable_vars: + +get_program_persistable_vars +------------------------------- + +.. py:function:: paddle.fluid.io.get_program_persistable_vars(program) + + + + +该接口从Program中获取所有persistable的变量。 + +参数: + - **program** ( :ref:`cn_api_fluid_Program` ) – 从该Program中获取persistable的变量。 + +返回: 包含此Program中所有persistable的变量 + +返回类型: list + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.data(name="img", shape=[64, 784]) + w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32', name='fc_w') + b = fluid.layers.create_parameter(shape=[200], dtype='float32', name='fc_b') + list_para = fluid.io.get_program_persistable_vars( fluid.default_main_program() ) + diff --git a/doc/paddle/api/paddle/fluid/io/load_cn.rst b/doc/paddle/api/paddle/fluid/io/load_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..36f03340e923d4d2c0689beabb7ccf9215d26377 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/io/load_cn.rst @@ -0,0 +1,167 @@ +.. _cn_api_fluid_dygraph_jit_load: + +load +----------------- + +.. py:function:: paddle.fluid.dygraph.jit.load(model_path, configs=None) + + +将接口 :ref:`cn_api_fluid_dygraph_jit_save` 或者 :ref:`cn_api_fluid_io_save_inference_model` 存储的模型载入为 :ref:`cn_api_fluid_dygraph_TranslatedLayer` ,用于预测推理或者fine-tune训练。 + +.. note:: + 由于一些历史原因,如果载入的模型是通过 :ref:`cn_api_fluid_io_save_inference_model` 存储的, + 在使用它进行fine-tune训练时会存在一些局限: + 1. 命令式编程模式不支持 ``LoDTensor`` ,所有原先输入变量或者参数依赖于LoD信息的模型暂时无法使用; + 2. 所有存储模型的feed变量都需要被传入 ``Translatedlayer`` 的forward方法; + 3. 原模型变量的 ``stop_gradient`` 信息已丢失且无法准确恢复; + 4. 原模型参数的 ``trainable`` 信息已丢失且无法准确恢复。 + +参数: + - **model_path** (str) - 存储模型的目录。 + - **configs** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象。默认为 ``None``。 + +返回:TranslatedLayer - 一个能够执行存储模型的 ``Layer`` 对象。 + +**示例代码** + +1. 载入由接口 :ref:`cn_api_fluid_dygraph_jit_save` 存储的模型进行预测推理及fine-tune训练。 + + .. code-block:: python + + import numpy as np + import paddle.fluid as fluid + from paddle.fluid.dygraph import Linear + from paddle.fluid.dygraph import declarative + BATCH_SIZE = 32 + BATCH_NUM = 20 + def random_batch_reader(): + def _get_random_images_and_labels(image_shape, label_shape): + image = np.random.random(size=image_shape).astype('float32') + label = np.random.random(size=label_shape).astype('int64') + return image, label + def __reader__(): + for _ in range(BATCH_NUM): + batch_image, batch_label = _get_random_images_and_labels( + [BATCH_SIZE, 784], [BATCH_SIZE, 1]) + yield batch_image, batch_label + return __reader__ + class LinearNet(fluid.dygraph.Layer): + def __init__(self, in_size, out_size): + super(LinearNet, self).__init__() + self._linear = Linear(in_size, out_size) + @declarative + def forward(self, x): + return self._linear(x) + # 开启命令式编程模式 + fluid.enable_dygraph() + # 1. 训练存储模型. + # 创建网络 + net = LinearNet(784, 1) + adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) + # 创建DataLoader + train_loader = fluid.io.DataLoader.from_generator(capacity=5) + train_loader.set_batch_generator(random_batch_reader()) + # 训练 + for data in train_loader(): + img, label = data + label.stop_gradient = True + cost = net(img) + loss = fluid.layers.cross_entropy(cost, label) + avg_loss = fluid.layers.mean(loss) + avg_loss.backward() + adam.minimize(avg_loss) + net.clear_gradients() + model_path = "linear.example.model" + fluid.dygraph.jit.save( + layer=net, + model_path=model_path, + input_spec=[img]) + # 2. 载入模型 & 预测 + # 载入模型 + infer_net = fluid.dygraph.jit.load(model_path) + # 预测 + x = fluid.dygraph.to_variable(np.random.random((1, 784)).astype('float32')) + pred = infer_net(x) + # 3. 载入模型 & fine-tune训练 + # 载入模型 + train_net = fluid.dygraph.jit.load(model_path) + train_net.train() + adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=train_net.parameters()) + # 创建DataLoader + train_loader = fluid.io.DataLoader.from_generator(capacity=5) + train_loader.set_batch_generator(random_batch_reader()) + # fine-tune训练 + for data in train_loader(): + img, label = data + label.stop_gradient = True + cost = train_net(img) + loss = fluid.layers.cross_entropy(cost, label) + avg_loss = fluid.layers.mean(loss) + avg_loss.backward() + adam.minimize(avg_loss) + train_net.clear_gradients() + + +2. 载入由接口 :ref:`cn_api_fluid_io_save_inference_model` 存储的模型进行预测推理及fine-tune训练。 + + .. code-block:: python + + import numpy as np + import paddle.fluid as fluid + BATCH_SIZE = 32 + BATCH_NUM = 20 + def random_batch_reader(): + def _get_random_images_and_labels(image_shape, label_shape): + image = np.random.random(size=image_shape).astype('float32') + label = np.random.random(size=label_shape).astype('int64') + return image, label + def __reader__(): + for _ in range(BATCH_NUM): + batch_image, batch_label = _get_random_images_and_labels( + [BATCH_SIZE, 784], [BATCH_SIZE, 1]) + yield batch_image, batch_label + return __reader__ + img = fluid.data(name='img', shape=[None, 784], dtype='float32') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') + pred = fluid.layers.fc(input=img, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=pred, label=label) + avg_loss = fluid.layers.mean(loss) + optimizer = fluid.optimizer.SGD(learning_rate=0.001) + optimizer.minimize(avg_loss) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + loader = fluid.io.DataLoader.from_generator( + feed_list=[img, label], capacity=5, iterable=True) + loader.set_batch_generator(random_batch_reader(), places=place) + # 1. 训练 & 存储预测模型 + for data in loader(): + exe.run( + fluid.default_main_program(), + feed=data, + fetch_list=[avg_loss]) + model_path = "fc.example.model" + fluid.io.save_inference_model( + model_path, ["img"], [pred], exe) + # 开启命令式编程模式 + fluid.enable_dygraph() + # 2. 载入模型 & 预测 + fc = fluid.dygraph.jit.load(model_path) + x = fluid.dygraph.to_variable(np.random.random((1, 784)).astype('float32')) + pred = fc(x) + # 3. 载入模型 & fine-tune训练 + fc = fluid.dygraph.jit.load(model_path) + fc.train() + sgd = fluid.optimizer.SGD(learning_rate=0.001, + parameter_list=fc.parameters()) + train_loader = fluid.io.DataLoader.from_generator(capacity=5) + train_loader.set_batch_generator( + random_batch_reader(), places=place) + for data in train_loader(): + img, label = data + label.stop_gradient = True + cost = fc(img) + loss = fluid.layers.cross_entropy(cost, label) + avg_loss = fluid.layers.mean(loss) + avg_loss.backward() + sgd.minimize(avg_loss) diff --git a/doc/paddle/api/paddle/fluid/io/load_params_cn.rst b/doc/paddle/api/paddle/fluid/io/load_params_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a0cd54fde2fd00183ce79d1e98628c0026f6dd5a --- /dev/null +++ b/doc/paddle/api/paddle/fluid/io/load_params_cn.rst @@ -0,0 +1,45 @@ +.. _cn_api_fluid_io_load_params: + +load_params +------------------------------- + + +.. py:function:: paddle.fluid.io.load_params(executor, dirname, main_program=None, filename=None) + + + + +该接口从指定的 ``main_program`` 中筛选出所有模型参数变量,并根据目录 ``dirname`` 或 ``filename`` 提供的参数文件对这些模型参数进行赋值。 + +使用 ``dirname`` 指定模型参数的存储路径。若模型参数变量以分离文件的形式存储在 ``dirname`` 指定的目录下,则设置 ``filename`` 值为None;若所有模型参数存储在一个单独的二进制文件中,则使用 ``filename`` 来指明这个二进制文件。 + +注意: + - 有些变量不是参数,如学习率、全局训练步数(global step)等,但它们之于训练却是必要的。因此,调用 :ref:`cn_api_fluid_io_save_params` 和 :ref:`cn_api_fluid_io_load_params` 来保存和加载参数对于断点训练是不够的,这种情况下可以使用 :ref:`cn_api_fluid_io_save_persistables` 和 :ref:`cn_api_fluid_io_load_persistables` 来保存和加载训练过程的检查点(checkpoint)。 + - 若希望同时加载预训练后的模型结构和模型参数以用于预测过程,则可使用 :ref:`cn_api_fluid_io_load_inference_model` 接口。更多细节请参考 :ref:`api_guide_model_save_reader` 。 + +参数: + - **executor** (Executor) – 加载模型参数的 ``executor`` (详见 :ref:`api_guide_executor` ) 。 + - **dirname** (str) – 模型参数的存储路径。 + - **main_program** (Program,可选) – 筛选模型参数变量所依据的 ``Program`` (详见 :ref:`api_guide_Program` )。若为None, 则使用全局默认的 ``default_main_program`` 。默认值为None。 + - **filename** (str,可选) – 若模型参数是以若干文件形式存储在 ``dirname`` 指定的目录下,则设置 ``filename`` 值为None。反之,需要通过 ``filename`` 来指明单一模型参数存储文件的名称。 默认值为None。 + +**返回:** 无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + exe = fluid.Executor(fluid.CPUPlace()) + param_path = "./my_paddle_model" + prog = fluid.default_main_program() + fluid.io.load_params(executor=exe, dirname=param_path, + main_program=None) + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/io/load_persistables_cn.rst b/doc/paddle/api/paddle/fluid/io/load_persistables_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..871b5c64ada0a3c3dadb92969f658aae2511fa48 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/io/load_persistables_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_fluid_io_load_persistables: + +load_persistables +------------------------------- + + +.. py:function:: paddle.fluid.io.load_persistables(executor, dirname, main_program=None, filename=None) + + + + +该接口从给定的 ``main_program`` 中取出所有 ``persistable==True`` 的变量(即持久性变量,详见 :ref:`api_guide_model_save_reader` ),并根据目录 ``dirname`` 或 ``filename`` 提供的参数文件对这些持久性变量进行赋值。 + +使用 ``dirname`` 指定持久性变量的存储路径。若持久性变量以分离文件的形式保存在 ``dirname`` 指定的目录下,则设置 ``filename`` 值为None;若所有持久性变量保存在一个单独的二进制文件中,则使用 ``filename`` 来指明这个二进制文件。 + +参数: + - **executor** (Executor) – 加载持久性变量的 ``executor`` (详见 :ref:`api_guide_executor` ) 。 + - **dirname** (str) – 持久性变量的存储路径。 + - **main_program** (Program,可选) – 筛选模型中持久性变量所依据的 ``Program`` (详见 :ref:`api_guide_Program` )。若为None, 则使用全局默认的 ``default_main_program`` 。默认值为None。 + - **filename** (str,可选) – 若模型中的持久性变量是以若干文件形式存储在 ``dirname`` 指定的目录下,则设置 ``filename`` 值为None。反之,需要通过 ``filename`` 来指明单一模型持久性变量存储文件的名称。 默认值为None。 + +**返回:** 无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + exe = fluid.Executor(fluid.CPUPlace()) + param_path = "./my_paddle_model" + prog = fluid.default_main_program() + fluid.io.load_persistables(executor=exe, dirname=param_path, + main_program=None) + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/io/load_vars_cn.rst b/doc/paddle/api/paddle/fluid/io/load_vars_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a483d5d025dd2e620c1ef7764537141dea9c0f63 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/io/load_vars_cn.rst @@ -0,0 +1,68 @@ +.. _cn_api_fluid_io_load_vars: + +load_vars +------------------------------- + +.. py:function:: paddle.fluid.io.load_vars(executor, dirname, main_program=None, vars=None, predicate=None, filename=None) + + + + +该接口从文件中加载 ``Program`` 的变量。 + +通过 ``vars`` 指定需要加载的变量,或者通过 ``predicate`` 筛选需要加载的变量, ``vars`` 和 ``predicate`` 不能同时为None。 + +参数: + - **executor** (Executor) – 运行的执行器,执行器的介绍请参考 :ref:`api_guide_model_save_reader` 。 + - **dirname** (str) – 加载变量所在的目录路径。 + - **main_program** (Program,可选) – 需要加载变量的 ``Program`` , ``Program`` 的介绍请参考 :ref:`api_guide_Program` 。如果 ``main_program`` 为None,则使用默认的主程序。默认值为None。 + - **vars** (list[Variable],可选) – 通过该列表指定需要加载的变量。默认值为None。 + - **predicate** (function,可选) – 通过该函数筛选 :math:`predicate(variable)== True` 的变量进行加载。如果通过 ``vars`` 指定了需要加载的变量,则该参数无效。默认值为None。 + - **filename** (str,可选) – 加载所有变量的文件。如果所有待加载变量是保存在一个文件中,则设置 ``filename`` 为该文件名;如果所有待加载变量是按照变量名称单独保存成文件,则设置 ``filename`` 为None。默认值为None。 + +返回: 无 + +抛出异常: + - ``TypeError`` - 如果main_program不是Program的实例,也不是None。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + main_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard(main_prog, startup_prog): + data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False) + w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32', name='fc_w') + b = fluid.layers.create_parameter(shape=[200], dtype='float32', name='fc_b') + hidden_w = fluid.layers.matmul(x=data, y=w) + hidden_b = fluid.layers.elementwise_add(hidden_w, b) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(startup_prog) + + # 示例一:用vars来指定加载变量。 + path = "./my_paddle_vars" + var_list = [w, b] + fluid.io.save_vars(executor=exe, dirname=path, vars=var_list, + filename="vars_file") + fluid.io.load_vars(executor=exe, dirname=path, vars=var_list, + filename="vars_file") + # 加载w和b。它们被保存在'var_file'的文件中,所在路径为 "./my_paddle_model" 。 + + # 示例二:通过predicate来筛选加载变量。 + def name_has_fc(var): + res = "fc" in var.name + return res + + param_path = "./my_paddle_model" + fluid.io.save_vars(executor=exe, dirname=param_path, main_program=main_prog, vars=None, predicate=name_has_fc) + fluid.io.load_vars(executor=exe, dirname=param_path, main_program=main_prog, vars=None, predicate=name_has_fc) + #加载 `main_program` 中变量名包含 ‘fc’ 的所有变量 + #此前所有变量应该保存在不同文件中 + + + + diff --git a/doc/paddle/api/paddle/fluid/io/save_params_cn.rst b/doc/paddle/api/paddle/fluid/io/save_params_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..49ace8a81a09e4d65b236147303d8b2aa3d17b24 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/io/save_params_cn.rst @@ -0,0 +1,47 @@ +.. _cn_api_fluid_io_save_params: + +save_params +------------------------------- + + +.. py:function:: paddle.fluid.io.save_params(executor, dirname, main_program=None, filename=None) + + + + +该OP从 ``main_program`` 中取出所有参数,然后将它们保存到 ``dirname`` 目录下或名为 ``filename`` 的文件中。 + +``dirname`` 用于指定保存参数的目标路径。若想将参数保存到多个独立文件中,设置 ``filename=None`` ; 若想将所有参数保存在单个文件中,请设置 ``filename`` 来指定该文件的名称。 + +注意: + - 有些变量不是参数,如学习率,全局训练步数(global step)等,但它们对于训练却是必要的。因此,调用 :ref:`cn_api_fluid_io_save_params` 和 :ref:`cn_api_fluid_io_load_params` 来保存和加载参数对于断点训练是不够的,这种情况下可以使用 :ref:`cn_api_fluid_io_save_persistables` 和 :ref:`cn_api_fluid_io_load_persistables` 来保存和加载训练过程中的检查点(checkpoint)。 + - 如果您想要储存您的模型用于预测,请使用 :ref:`cn_api_fluid_io_save_inference_model` 。更多细节请参考 :ref:`api_guide_model_save_reader` + +参数: + - **executor** (Executor) – 用于保存参数的 ``executor`` ,详见 :ref:`api_guide_executor` 。 + - **dirname** (str) – 指定保存参数的文件目录。 + - **main_program** (Program,可选) – 需要保存参数的Program( ``Program`` 含义详见 :ref:`api_guide_Program` )。如果为None,则使用default_main_Program 。默认值为None。 + - **filename** (str,可选) – 保存参数的文件名称。若需要将参数保存到多个独立的文件中,请设置 ``filename=None`` 。默认值为None。 + +返回: 无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + params_path = "./my_paddle_model" + image = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + feeder = fluid.DataFeeder(feed_list=[image, label], place=fluid.CPUPlace()) + predict = fluid.layers.fc(input=image, size=10, act='softmax') + + loss = fluid.layers.cross_entropy(input=predict, label=label) + avg_loss = fluid.layers.mean(loss) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + fluid.io.save_params(executor=exe, dirname=params_path) + # 网络中fc层的参数weight和bias将会分别存储在"./my_paddle_model"路径下。 + diff --git a/doc/paddle/api/paddle/fluid/io/save_persistables_cn.rst b/doc/paddle/api/paddle/fluid/io/save_persistables_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..17afebc88d257c447154f89ccc0809c472469d9f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/io/save_persistables_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_fluid_io_save_persistables: + +save_persistables +------------------------------- + + +.. py:function:: paddle.fluid.io.save_persistables(executor, dirname, main_program=None, filename=None) + + + + +该OP从给定 ``main_program`` 中取出所有持久性变量(详见 :ref:`api_guide_model_save_reader` ),然后将它们保存到目录 ``dirname`` 中或 ``filename`` 指定的文件中。 + +``dirname`` 用于指定保存持久性变量的目录。如果想将持久性变量保存到指定目录的若干文件中,请设置 ``filename=None`` ; 若想将所有持久性变量保存在同一个文件中,请设置 ``filename`` 来指定文件的名称。 + +参数: + - **executor** (Executor) – 用于保存持久性变量的 ``executor`` ,详见 :ref:`api_guide_executor` 。 + - **dirname** (str) – 用于储存持久性变量的文件目录。 + - **main_program** (Program,可选) – 需要保存持久性变量的Program( ``Program`` 含义详见 :ref:`api_guide_Program` )。如果为None,则使用default_main_Program 。默认值为None。 + - **filename** (str,可选) – 保存持久性变量的文件名称。若想分开保存变量,设置 ``filename=None`` 。 默认值为None。 + +返回: 无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + dir_path = "./my_paddle_model" + file_name = "persistables" + image = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + feeder = fluid.DataFeeder(feed_list=[image, label], place=fluid.CPUPlace()) + predict = fluid.layers.fc(input=image, size=10, act='softmax') + + loss = fluid.layers.cross_entropy(input=predict, label=label) + avg_loss = fluid.layers.mean(loss) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + fluid.io.save_persistables(executor=exe, dirname=dir_path, filename=file_name) + # 网络中fc层中的持久性变量weight和bia将会保存在路径“./my_paddle_model”下名为"persistables"的文件中。 + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/io/save_vars_cn.rst b/doc/paddle/api/paddle/fluid/io/save_vars_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d56681462193064f6e9287ab30d7e61608ecd729 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/io/save_vars_cn.rst @@ -0,0 +1,66 @@ +.. _cn_api_fluid_io_save_vars: + +save_vars +------------------------------- + + +.. py:function:: paddle.fluid.io.save_vars(executor, dirname, main_program=None, vars=None, predicate=None, filename=None) + + + + +该接口将 ``Program`` 的变量保存到文件中。 + +通过 ``vars`` 指定需要保存的变量,或者通过 ``predicate`` 筛选需要保存的变量, ``vars`` 和 ``predicate`` 不能同时为None。 + +参数: + - **executor** (Executor)- 运行的执行器,执行器的介绍请参考 :ref:`api_guide_model_save_reader` 。 + - **dirname** (str)- 保存变量的目录路径。 + - **main_program** (Program,可选)- 需要保存变量的 ``Program`` , ``Program`` 的介绍请参考 :ref:`api_guide_Program` 。如果 ``main_program`` 为None,则使用默认的主程序。默认值为None。 + - **vars** (list [Variable],可选)- 通过该列表指定需要保存的变量。默认值为None。 + - **predicate** (function,可选)- 通过该函数筛选 :math:`predicate(variable)== True` 的变量进行保存。如果通过 ``vars`` 指定了需要保存的变量,则该参数无效。默认值为None。 + - **filename** (str,可选)- 保存所有变量的文件。如果设置为None,所有变量会按照变量名称单独保存成文件;如果设置为非None,所有变量会保存成一个文件名为该设置值的文件。默认值为None。 + +返回:无 + +抛出异常: + - ``TypeError`` - 如果main_program不是Program的实例,也不是None。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + main_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard(main_prog, startup_prog): + data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False) + w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32', name='fc_w') + b = fluid.layers.create_parameter(shape=[200], dtype='float32', name='fc_b') + hidden_w = fluid.layers.matmul(x=data, y=w) + hidden_b = fluid.layers.elementwise_add(hidden_w, b) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(startup_prog) + + # 示例一:用vars来指定变量。 + var_list = [w, b] + path = "./my_paddle_vars" + fluid.io.save_vars(executor=exe, dirname=path, vars=var_list, + filename="vars_file") + # w, b 将被保存,使用同一文件名“var_file”,保存在路径“./my_paddle_vars”下。 + + # 示例二:通过predicate筛选变量。 + def name_has_fc(var): + res = "fc" in var.name + return res + + param_path = "./my_paddle_model" + fluid.io.save_vars(executor=exe, dirname=param_path, main_program=main_prog, vars=None, predicate = name_has_fc) + # 将main_program中名中包含“fc”的的所有变量保存。 + # 变量将分开保存。 + + + + + diff --git a/doc/paddle/api/paddle/fluid/io/shuffle_cn.rst b/doc/paddle/api/paddle/fluid/io/shuffle_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c5a09fa88737a1b98ea84f841f9cea808dc323af --- /dev/null +++ b/doc/paddle/api/paddle/fluid/io/shuffle_cn.rst @@ -0,0 +1,32 @@ +.. _cn_api_fluid_io_shuffle: + +shuffle +------------------------------- + +.. py:function:: paddle.fluid.io.shuffle(reader, buffer_size) + + + + +该接口创建一个数据读取器,其功能是将原始数据读取器的数据打乱,然后返回无序的数据。 + +从原始数据读取器取出buf_size个数据到缓冲区,将缓冲区数据打乱,然后将无序的数据依次返回。当缓冲区数据全部输出后,再次执行上述步骤。 + +参数: + - **reader** (callable) – 原始数据读取器。 + - **buf_size** (int) – 缓冲区保存数据的个数。 + +返回: 返回无序数据的数据读取器 + +返回类型: callable + +.. code-block:: python + + import paddle.fluid as fluid + def reader(): + for i in range(5): + yield i + shuffled_reader = fluid.io.shuffle(reader, 3) + for e in shuffled_reader(): + print(e) + # 输出结果是0~4的无序排列 diff --git a/doc/paddle/api/paddle/fluid/is_compiled_with_cuda_cn.rst b/doc/paddle/api/paddle/fluid/is_compiled_with_cuda_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5f2741e78783c432eb22fd82509e2f3ebf7c808e --- /dev/null +++ b/doc/paddle/api/paddle/fluid/is_compiled_with_cuda_cn.rst @@ -0,0 +1,24 @@ +.. _cn_api_fluid_is_compiled_with_cuda: + +is_compiled_with_cuda +------------------------------- + +.. py:function:: paddle.fluid.is_compiled_with_cuda() + + + + +检查 ``whl`` 包是否可以被用来在GPU上运行模型 + +返回:支持gpu则为True,否则为False。 + +返回类型:out(boolean) + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + support_gpu = fluid.is_compiled_with_cuda() + + diff --git a/doc/paddle/api/paddle/fluid/layers/BasicDecoder_cn.rst b/doc/paddle/api/paddle/fluid/layers/BasicDecoder_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..dd3820d852961be4903b9e7b6f0f10ca1eac35b8 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/BasicDecoder_cn.rst @@ -0,0 +1,80 @@ +.. _cn_api_fluid_layers_BasicDecoder: + +BasicDecoder +------------------------------- + + +.. py:class:: paddle.fluid.layers.BasicDecoder(cell, helper, output_fn=None) + +BasicDecoder是 :ref:`cn_api_fluid_layers_Decoder` 的子类,它组装了 :ref:`cn_api_fluid_layers_RNNCell` 和 :ref:`cn_api_fluid_layers_DecodeHelper` 的实例作为成员,其中DecodeHelper用来实现不同的解码策略。它依次执行以下步骤来完成单步解码: + +1. 执行 :code:`cell_outputs, cell_states = cell.call(inputs, states)` 以获取输出和新的状态。 + +2. 执行 :code:`sample_ids = helper.sample(time, cell_outputs, cell_states)` 以采样id并将其作为当前步的解码结果。 + +3. 执行 :code:`finished, next_inputs, next_states = helper.next_inputs(time, cell_outputs, cell_states, sample_ids)` 以产生下一解码步的结束标识、输入和状态。 + +参数: + - **cell** (RNNCell) - RNNCell的实例或者具有相同接口定义的对象。 + - **helper** (DecodeHelper) - DecodeHelper的实例。 + - **output_fn** (可选) - 处理cell输出的接口,在采样之前使用。默认值None。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + + start_tokens = fluid.data(name="start_tokens", + shape=[None], + dtype="int64") + + trg_embeder = lambda x: fluid.embedding( + x, size=[10000, 128], param_attr=fluid.ParamAttr(name="trg_embedding")) + output_layer = lambda x: layers.fc(x, + size=10000, + num_flatten_dims=len(x.shape) - 1, + param_attr=fluid.ParamAttr(name= + "output_w"), + bias_attr=False) + helper = layers.SampleEmbeddingHelper(trg_embeder, start_tokens=start_tokens, end_token=1) + decoder_cell = layers.GRUCell(hidden_size=128) + decoder = layers.BasicDecoder(decoder_cell, helper, output_fn=output_layer) + outputs = layers.dynamic_decode( + decoder=decoder, inits=decoder_cell.get_initial_states(start_tokens)) + +.. py:method:: initialize(initial_cell_states) + +初始化,包括helper的初始化和cell的初始化,cell初始化直接使用 :code:`initial_cell_states` 作为结果。 + +参数: + - **initial_cell_states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。这是由调用者 :ref:`cn_api_fluid_layers_dynamic_decode` 提供的参数。 + +返回::code:`(initial_inputs, initial_states, finished)` 的三元组。 :code:`initial_inputs, initial_states` 均是单个tensor变量或tensor变量组成的嵌套结构, :code:`finished` 是bool类型的tensor。 :code:`initial_inputs, finished` 与 :code:`helper.initialize()` 返回的内容相同; :code:`initial_states` 与输入参数中的 :code:`initial_cell_states` 的相同。 + +返回类型:tuple + +.. py:class:: OutputWrapper(cell_outputs, sample_ids) + + :code:`step()` 的返回值中 :code:`outputs` 使用的数据结构,是一个由 :code:`cell_outputs` 和 :code:`sample_ids` 这两个字段构成的命名元组。 + +.. py:method:: step(time, inputs, states, **kwargs) + +按照以下步骤执行单步解码: + +1. 执行 :code:`cell_outputs, cell_states = cell.call(inputs, states)` 以获取输出和新的状态。 + +2. 执行 :code:`sample_ids = helper.sample(time, cell_outputs, cell_states)` 以采样id并将其作为当前步的解码结果。 + +3. 执行 :code:`finished, next_inputs, next_states = helper.next_inputs(time, cell_outputs, cell_states, sample_ids)` 以产生下一解码步的结束标识、输入和状态。 + +参数: + - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。 + - **inputs** (Variable) - tensor变量。在第一个解码时间步时与由 :code:`initialize()` 返回的 :code:`initial_inputs` 相同,其他时间步与由 :code:`step()` 返回的 :code:`next_inputs` 相同。 + - **states** (Variable) - tensor变量的结构。在第一个解码时间步时与 :code:`initialize()` 返回的 :code:`initial_states` 相同,其他时间步与由 :code:`step()` 返回的 :code:`next_states` 相同。 + - **kwargs** - 附加的关键字参数,由调用者 :ref:`cn_api_fluid_layers_dynamic_decode` 提供。 + +返回: :code:`(outputs, next_states, next_inputs, finished)` 的四元组。 :code:`outputs` 是包含 :code:`cell_outputs` 和 :code:`sample_ids` 两个字段的命名元组,其中 :code:`cell_outputs` 是 :code:`cell.call()` 的结果, :code:`sample_ids` 是 :code:`helper.sample()` 的结果; :code:`next_states, next_inputs` 分别和输入参数中的 :code:`states, inputs` 有相同的的结构、形状和数据类型; :code:`finished` 是一个bool类型的tensor,形状是 :math:`[batch\_size]` 。 + +返回类型:tuple diff --git a/doc/paddle/api/paddle/fluid/layers/Categorical_cn.rst b/doc/paddle/api/paddle/fluid/layers/Categorical_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9265a666d8460b9b37679ae73f1e5a653e3576f3 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/Categorical_cn.rst @@ -0,0 +1,79 @@ +.. _cn_api_fluid_layers_Categorical: + +Categorical +------------------------------- + +.. py:class:: paddle.fluid.layers.Categorical(logits) + + + + +类别分布是一种离散概率分布,其随机变量可以取K个相互独立类别的其中一个。 + +概率质量函数(pmf)为: + +.. math:: + + pmf(k; p_i) =\prod_{i=1}^{k} p_i^{[x=i]} + +上面公式中: + - :math:`[x = i]` 表示:如果 :math:`x==i` ,则表达式取值为1,否则取值为0。 + + +参数: + - **logits** (list|numpy.ndarray|Variable) - 类别分布对应的logits。数据类型为float32。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + from paddle.fluid import layers + from paddle.fluid.layers import Categorical + + a_logits_npdata = np.array([-0.602,-0.602], dtype="float32") + a_logits_tensor = layers.create_tensor(dtype="float32") + layers.assign(a_logits_npdata, a_logits_tensor) + + b_logits_npdata = np.array([-0.102,-0.112], dtype="float32") + b_logits_tensor = layers.create_tensor(dtype="float32") + layers.assign(b_logits_npdata, b_logits_tensor) + + a = Categorical(a_logits_tensor) + b = Categorical(b_logits_tensor) + + a.entropy() + # [0.6931472] with shape: [1] + + b.entropy() + # [0.6931347] with shape: [1] + + a.kl_divergence(b) + # [1.2516975e-05] with shape: [1] + + +.. py:function:: kl_divergence(other) + +相对于另一个类别分布的KL散度 + +参数: + - **other** (Categorical) - 输入的另一个类别分布。数据类型为float32。 + +返回:相对于另一个类别分布的KL散度, 数据类型为float32 + +返回类型:Variable + +.. py:function:: entropy() + +信息熵 + +返回:类别分布的信息熵, 数据类型为float32 + +返回类型:Variable + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/DecodeHelper_cn.rst b/doc/paddle/api/paddle/fluid/layers/DecodeHelper_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..14ad49c37982245c138bb04b7377d9b40edc6fa1 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/DecodeHelper_cn.rst @@ -0,0 +1,44 @@ +.. _cn_api_fluid_layers_DecodeHelper: + +DecodeHelper +------------------------------- + + +.. py:class:: paddle.fluid.layers.DecodeHelper() + +DecodeHelper是一个基类,其子类的实例将在 :ref:`cn_api_fluid_layers_BasicDecoder` 中使用。它提供了在动态解码时采样和产生下一解码步的输入的接口。 + +.. py:method:: initialize() + +初始化以产生第一个解码步的输入和每个序列是否结束的初始标识。这是 :ref:`cn_api_fluid_layers_BasicDecoder` 初始化的一部分。 + +返回::code:`(initial_inputs, initial_finished)` 的二元组, :code:`initial_inputs` 是单个tensor变量或tensor变量组成的嵌套结构,tensor的形状是 :math:`[batch\_size, ...]` 。 :code:`initial_finished` 是一个bool类型且形状为 :math:`[batch\_size]` 的tensor。 + +返回类型:tuple + +.. py:method:: sample(time, outputs, states) + +根据 :code:`outputs` 以特定的方式进行采样,该方法是 :code:`BasicDecoder.step` 中的一部分。 + +参数: + - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。 + - **outputs** (Variable) - tensor变量,通常其数据类型为float32或float64,形状为 :math:`[batch\_size, vocabulary\_size]` ,表示当前解码步预测产生的logit(未归一化的概率),和由 :code:`BasicDecoder.output_fn(BasicDecoder.cell.call())` 返回的 :code:`outputs` 是同一内容。 + - **states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构,和由 :code:`BasicDecoder.cell.call()` 返回的 :code:`new_states` 是同一内容。 + +返回:数据类型为int64形状为 :math:`[batch\_size]` 的tensor,表示采样得到的id。 + +返回类型:Variable + +.. py:method:: next_inputs(time, outputs, states, sample_ids) + +产生下一解码步的输入、状态,以及每个序列是否结束的标识。该方法是 :code:`BasicDecoder.step` 中的一部分。 + +参数: + - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。 + - **outputs** (Variable) - tensor变量,通常其数据类型为float32或float64,形状为 :math:`[batch\_size, vocabulary\_size]` ,表示当前解码步预测产生的logit(未归一化的概率),和由 :code:`BasicDecoder.output_fn(BasicDecoder.cell.call())` 返回的 :code:`outputs` 是同一内容。 + - **states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构,和由 :code:`BasicDecoder.cell.call()` 返回的 :code:`new_states` 是同一内容。 + - **sample_ids** (Variable) - 数据类型为int64形状为 :math:`[batch\_size]` 的tensor,和由 :code:`sample()` 返回的 :code:`sample_ids` 是同一内容。 + +返回: :code:`(finished, next_inputs, next_states)` 的三元组。 :code:`next_inputs, next_states` 均是单个tensor变量或tensor变量组成的嵌套结构, :code:`next_states` 和输入参数中的 :code:`states` 具有相同的结构、形状和数据类型; :code:`finished` 是一个bool类型且形状为 :math:`[batch\_size]` 的tensor。 + +返回类型:tuple diff --git a/doc/paddle/api/paddle/fluid/layers/Decoder_cn.rst b/doc/paddle/api/paddle/fluid/layers/Decoder_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..865a690d98e76b90557a99bdb8bc6c624c6d7828 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/Decoder_cn.rst @@ -0,0 +1,66 @@ +.. _cn_api_fluid_layers_Decoder: + +Decoder +------------------------------- + + + +.. py:class:: paddle.fluid.layers.Decoder() + + + + + +Decoder是dynamic_decode中使用的任何decoder实例的基类。它提供了为每一个时间步生成输出的接口,可用于生成序列。 + +Decoder提供的主要抽象为: + +1. :code:`(initial_input, initial_state, finished) = initialize(inits)`, +为第一个解码步生成输入和状态,并给出指示batch中的每个序列是否结束的初始标识。 + +2. :code:`(output, next_state, next_input, finished) = step(time, input, state)`, +将输入和状态转换为输出和新的状态,为下一个解码步生成输入,并给出指示batch中的每个序列是否结束的标识。 + +3. :code:`(final_outputs, final_state) = finalize(outputs, final_state, sequence_lengths)`, +修改输出(所有时间步输出的堆叠)和最后的状态以做特殊用途。若无需修改堆叠得到的输出和来自最后一个时间步的状态,则无需实现。 + +与RNNCell相比,Decoder更为通用,因为返回的 :code:`next_input` 和 :code:`finished` 使它可以自行决定输入以及结束时机。 + + +.. py:method:: initialize(inits) + +在解码迭代之前调用一次。 + +参数: + - **inits** - 调用方提供的参数。 + +返回:一个元组 :code:`(initial_inputs, initial_states, finished)` 。:code:`initial_inputs` 和 :code:`initial_states` 都是单个tensor变量或tensor变量组成的嵌套结构, :code:`finished` 是具有bool数据类型的tensor。 + +返回类型:tuple + +.. py:method:: step(time, inputs, states, **kwargs) + +在解码的每个时间步中被调用的接口 + +参数: + - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。。 + - **inputs** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。在第一个解码时间步时与由 :code:`initialize()` 返回的 :code:`initial_inputs` 相同,其他时间步与由 :code:`step()` 返回的 :code:`next_inputs` 相同。 + - **states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。在第一个解码时间步时与 :code:`initialize()` 返回的 :code:`initial_states` 相同,其他时间步与由 :code:`step()` 返回的 :code:`beam_search_state` 相同。 + - **kwargs** - 附加的关键字参数,由调用者提供。 + +返回:一个元组 :code:`(outputs, next_states, next_inputs, finished)` 。:code:`next_states` 和 :code:`next_inputs` 都是单个tensor变量或tensor变量组成的嵌套结构,且结构、形状和数据类型均分别与输入参数中的 :code:`states` 和 :code:`inputs` 相同。 :code:`outputs` 是单个tensor变量或tensor变量组成的嵌套结构。 :code:`finished` 是一个bool类型的tensor变量。 + +返回类型:tuple + +.. py:method:: finalize(self, outputs, final_states, sequence_lengths) + +如果提供了实现,将在整个解码迭代结束后被执行一次。 + +参数: + - **outputs** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。 其中每个tensor的形状均为 :math:`[time\_step,batch\_size,...]` ,是将所有解码步中与其对应的的输出进行堆叠的结果,这个过程由其调用者完成。 + - **final_states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。 它是 :code:`decoder.step` 在最后一个解码步返回的 :code:`next_states`, 因此具有与任何时间步的状态相同的结构,形状和数据类型。 + - **kwargs** - 命名关键字参数,由提供调用者。 + +返回:一个元组 :code:`(final_outputs, final_states)` 。:code:`final_outputs` 和 :code:`final_states` 都是单个tensor变量或tensor变量组成的嵌套结构。 + +返回类型:tuple \ No newline at end of file diff --git a/doc/paddle/api/paddle/fluid/layers/DynamicRNN_cn.rst b/doc/paddle/api/paddle/fluid/layers/DynamicRNN_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..654e2651850d50c2edc74cb281f9bd2ae02271b5 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/DynamicRNN_cn.rst @@ -0,0 +1,430 @@ +.. _cn_api_fluid_layers_DynamicRNN: + +DynamicRNN +=================== + + +.. py:class:: paddle.fluid.layers.DynamicRNN(name=None) + + + + +**注意:该类型的输入仅支持LoDTensor,如果您需要处理的输入数据是Tensor类型, +请使用StaticRNN( fluid.layers.** :ref:`cn_api_fluid_layers_StaticRNN` **)。** + +DynamicRNN可以处理一批序列数据,其中每个样本序列的长度可以不同,每个序列的长度信息记录在LoD里面。 +DynamicRNN会按照时间步 (time step) 将输入序列展开,用户可以在 :code:`block` 中定义每个时间步要进行的运算。 +由于每个输入样本的序列长度不相同,RNN执行的step数由最长的序列决定。 +DynamicRNN的实现采用非padding的方式,每个时间步都会对输入数据进行收缩处理,移除已经处理完的序列的信息。 +因此,随着时间步的增加,每个时间步处理的样本数(batch size)会逐渐减少。 + +.. warning:: + 目前不支持在DynamicRNN的 :code:`block` 中任何层上配置 :code:`is_sparse = True` 。 + +参数: + - **name** (str,可选) - 具体用法参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +成员函数列表: + - :ref:`cn_api_fluid_layers_DynamicRNN_step_input` ,设置输入变量 + - :ref:`cn_api_fluid_layers_DynamicRNN_static_input` ,设置静态输入变量 + - :ref:`cn_api_fluid_layers_DynamicRNN_block` ,定义每个时间步执行的运算 + - :ref:`cn_api_fluid_layers_DynamicRNN_memory` ,创建用于在时间步之间传递信息的变量 + - :ref:`cn_api_fluid_layers_DynamicRNN_update_memory` ,更新需要传递的时间步信息 + - :ref:`cn_api_fluid_layers_DynamicRNN_output` ,设置时间步的输出变量 + - :ref:`cn_api_fluid_layers_DynamicRNN_call` ,获取RNN的输出序列 + + +.. _cn_api_fluid_layers_DynamicRNN_step_input: + +成员函数 step_input +--------------------------------- + +.. py:method:: step_input(x, level=0) + +将序列x设置为DynamicRNN输入。输入序列中最长的序列长度,将决定了RNN运算的长度。 +必须至少为DynamicRNN设置一个输入,也可以设置多个输入。 +如果多个输入x的 :code:`x.lod_level` 都为1,则要求多个输入LoDTensor携带完全相同的LoD信息。 +当输入x的 :code:`x.lod_level >= 2` 时,输入序列将按指定level进行展开,每个时间步携带 :code:`x.lod_level - level - 1` 层LoD信息, +此时要求多个输入序列的LoD在指定level上的信息完全一样。 + +- 示例1 + +.. code-block:: text + + # 输入,其中Si代表维度为[1, N]的数据 + level = 0 + x.lod = [[2, 1, 3]] + x.shape = [6, N] + x.data = [[S0], + [S0], + [S1], + [S2], + [S2], + [S2]] + + # 输出 + # step 0,持有3个序列的time step数据 + out.lod = [[]] + out.shape = [3, N] + out.data = [[S2], + [S0], + [S1]] + + # step 1,持有2个序列的time step数据 + out.lod = [[]] + out.shape = [2, N] + out.data = [[S2], + [S0]] + + # step 2,持有1个序列的time step数据 + out.lod = [[]] + out.shape = [1, N] + out.data = [[S2]] + + +参数: + - **x** (Variable) - 输入序列LoDTensor,代表由长度不同的多个序列组成的minibatch,要求 :code:`x.lod_level >= 1`。输入x第一个维度的值等于minibatch内所有序列的长度之和。RNN有多个输入序列时,多个输入LoDTensor的第一个维度必须相同,其它维度可以不同。支持的数据类型有:bool,float16,float32,float64,int8,int16,int32,int64,uint8。 + - **level** (int,可选) - 用于拆分输入序列的LoD层级,取值范围是 :math:`[0, x.lod\_level)`,默认值是0。 + +返回: 输入序列每个时间步的数据。执行第 :code:`step_idx` 个时间步时,若输入 :code:`x` 中有 :code:`num_sequences` 个长度不小于 :code:`step_idx` 的序列,则这个时间步返回值中只包含了这 :code:`num_sequences` 个序列第 :code:`step_idx` 时间步的数据。数据类型和输入一致。如果 :code:`x.lod_level == 1` ,返回值的维度是 :math:`\{num\_sequences, x.shape[1], ...\}`。否则,返回值也是一个变长的LoDTensor。 + +返回类型:Variable + +抛出异常: + - :code:`ValueError` :当 :code:`step_input()` 接口在RNN :code:`block()` 接口外面被调用时。 + - :code:`TypeError`:当输入x类型不是Variable时。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + sentence = fluid.data(name='sentence', shape=[None, 1], dtype='int64', lod_level=1) + embedding = fluid.layers.embedding(input=sentence, size=[65536, 32], is_sparse=True) + + drnn = fluid.layers.DynamicRNN() + with drnn.block(): + # 将embedding标记为RNN的输入,每个时间步取句子中的一个字进行处理 + word = drnn.step_input(embedding) + # 将memory初始化为一个值为0的常量Tensor,shape=[batch_size, 200],其中batch_size由输入embedding决定 + memory = drnn.memory(shape=[200]) + hidden = fluid.layers.fc(input=[word, memory], size=200, act='relu') + # 用hidden更新memory + drnn.update_memory(ex_mem=memory, new_mem=hidden) + # 将hidden标记为RNN的输出 + drnn.output(hidden) + + # 获得RNN的计算结果 + rnn_output = drnn() + + +.. _cn_api_fluid_layers_DynamicRNN_static_input: + +成员函数 static_input +--------------------------------- + +.. py:method:: static_input(x) + +将变量设置为RNN的静态输入。 + +- 示例1,静态输入携带LoD信息 + +.. code-block:: text + + # RNN的输入见step_input中的示例 + # 静态输入,其中Si代表维度为[1, M]的数据 + x.lod = [[3, 1, 2]] + x.shape = [6, M] + x.data = [[S0], + [S0], + [S0], + [S1], + [S2], + [S2]] + + # step 0,持有3个序列对应的数据 + out.lod = [[2, 3, 1]] + out.shape = [6, M] + out.data = [[S2], + [S2], + [S0], + [S0], + [S0], + [S1]] + + # step 1,持有2个序列对应的数据 + out.lod = [[2, 3]] + out.shape = [5, M] + out.data = [[S2], + [S2], + [S0], + [S0], + [S0]] + + # step 2,持有1个序列对应的数据 + out.lod = [[2]] + out.shape = [2, M] + out.data = [[S2], + [S2]] + + +- 示例2,静态输入不携带LoD信息 + +.. code-block:: text + + # RNN的输入见step_input中的示例 + # 静态输入,其中Si代表维度为[1, M]的数据 + x.lod = [[]] + x.shape = [3, M] + x.data = [[S0], + [S1], + [S2]] + + # step 0,持有3个序列对应的数据 + out.lod = [[]] + out.shape = [3, M] + out.data = [[S2], + [S0], + [S1]] + + # step 1,持有2个序列对应的数据 + out.lod = [[]] + out.shape = [2, M] + out.data = [[S2], + [S0]] + + # step 2,持有1个序列对应的数据 + out.lod = [[]] + out.shape = [1, M] + out.data = [[S2]] + + +参数: + - **x** (Variable) - 静态输入序列LoDTensor,要求持有与输入LoDTensor(通过 :code:`step_input` 设置的输入)相同的序列个数。如果输入x的LoD信息为空,则会被当成由 :code:`x.shape[0]` 个长度为1序列组成。支持的数据类型有:bool,float16,float32,float64,int8,int16,int32,int64,uint8。 + +返回: 经过按照RNN输入LoD信息重排序、且收缩处理后的静态输入LoDTensor。执行第 :code:`step_idx` 个时间步时,如果输入序列中只有 :code:`num_sequences` 长度不小于 :code:`step_idx` 的序列,静态输入也会进行收缩处理,只返回对应的 :code:`num_sequences` 个序列对应的数据。数据类型和输入一致。如果 :code:`x.lod == None` ,返回值的维度是 :math:`\{num\_sequences, x.shape[1], ...\}` 。否则,返回值是一个变长的LoDTensor。 + +返回类型:Variable + +抛出异常: + - :code:`ValueError`:当 :code:`static_input()` 接口在RNN :code:`block()` 接口外面被调用时。 + - :code:`TypeError`:当输入x类型不是Variable类型时。 + - :code:`RuntimeError`:当 :code:`static_input()` 接口在 :code:`step_input()` 接口之前被调用时。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + sentence = fluid.data(name='sentence', shape=[None, 32], dtype='float32', lod_level=1) + encoder_proj = fluid.data(name='encoder_proj', shape=[None, 32], dtype='float32', lod_level=1) + decoder_boot = fluid.data(name='boot', shape=[None, 10], dtype='float32') + + drnn = fluid.layers.DynamicRNN() + with drnn.block(): + # 将sentence标记为RNN的输入,每个时间步取句子中的一个字进行处理 + current_word = drnn.step_input(sentence) + # 将encode_proj标记为RNN的静态输入 + encoder_word = drnn.static_input(encoder_proj) + # 使用boot_memory初始化memory,并且需要依据输入序列进行重排序 + memory = drnn.memory(init=decoder_boot, need_reorder=True) + fc_1 = fluid.layers.fc(input=encoder_word, size=30) + fc_2 = fluid.layers.fc(input=current_word, size=30) + decoder_inputs = fc_1 + fc_2 + hidden, _, _ = fluid.layers.gru_unit(input=decoder_inputs, hidden=memory, size=30) + # 用hidden更新memory + drnn.update_memory(ex_mem=memory, new_mem=hidden) + out = fluid.layers.fc(input=hidden, size=10, bias_attr=True, act='softmax') + # 将out标记为RNN的输出 + drnn.output(out) + + # 获得RNN的计算结果 + rnn_output = drnn() + + +.. _cn_api_fluid_layers_DynamicRNN_block: + +成员函数 block +--------------------------------- + +.. py:method:: block() + +定义每个时间步执行的操作。 :code:`block` 语句里面定义的算子序列,将会被执行 :code:`max_sequence_len` 次( :code:`max_sequence_len` 是输入序列中大的序列长度)。 + +抛出异常: + - :code:`ValueError`:当RNN :code:`block()` 接口被多次调用时。 + + +.. _cn_api_fluid_layers_DynamicRNN_memory: + +成员函数 memory +--------------------------------- + +.. py:method:: memory(init=None, shape=None, value=0.0, need_reorder=False, dtype='float32') + +为RNN创建一个memory变量,用于在时间步之间传递信息。 +它可以用一个已有的Tensor来初始化,也可以初始化为一个特定维度的常量Tensor。 + +参数: + - **init** (Variable,可选) – 设置memory初始值的LoDTensor。如果init不是None,将使用init来初始化memory,要求持有与输入LoDTensor(通过 :code:`step_input` 设置的输入)相同的序列个数。如果输入init的LoD信息为空,则会被当成由 :code:`init.shape[0]` 个长度为1序列组成。默认值是None。 + - **shape** (list|tuple,可选) – 当init是None时,用来设置memory的维度。注意:shape中不包含batch_size。若设置 :math:`shape=\{D_1, D_2, ...\}`,memory Tensor的实际维度为 :math:`\{batch\_size, D_1, D_2, ...\}`,其中batch_size由输入序列决定。默认值是None。 + - **value** (float,可选) – 当init是None时,用来设置memory的初始值。默认值是0.0。 + - **need_reorder** (bool,可选) – 当init不是None时,用来决定init是否需要重新排序。动态RNN在计算时,会按照输入LoDTensor中序列的长度对输入进行排序,因此当init中的信息与输入序列样本紧密关联时,需要设置 :code:`need_reorder=True`。默认值是False。 + - **dtype** (str|numpy.dtype,可选) – 当init是None是,初始化memory的数据类型。默认值是"float32"。可设置的字符串值有:"float32","float64","int32","int64"。 + +返回:经过收缩处理后的memory LoDTensor。执行第 :code:`step_idx` 个时间步时,如果输入序列中只有 :code:`num_sequences` 长度不小于 :code:`step_idx` 的序列,memory也会进行收缩处理,只返回对应的 :code:`num_sequences` 个序列对应的数据。 + +返回类型:Variable + +抛出异常: + - :code:`ValueError`:当 :code:`memory()` 接口在RNN :code:`block()` 接口外面被调用时。 + - :code:`TypeError`:当init被设置了,但是不是Variable类型时。 + - :code:`ValueError`:当 :code:`memory()` 接口在 :code:`step_input()` 接口之前被调用时。 + +**代码示例一** + +.. code-block:: python + + import paddle.fluid as fluid + + sentence = fluid.data(name='sentence', shape=[None, 32], dtype='float32', lod_level=1) + boot_memory = fluid.data(name='boot', shape=[None, 10], dtype='float32') + + drnn = fluid.layers.DynamicRNN() + with drnn.block(): + # 将sentence标记为RNN的输入,每个时间步取句子中的一个字进行处理 + word = drnn.step_input(sentence) + # 使用boot_memory初始化memory,并且需要依据输入序列进行重排序 + memory = drnn.memory(init=boot_memory, need_reorder=True) + hidden = fluid.layers.fc(input=[word, memory], size=10, act='tanh') + # 用hidden更新memory + drnn.update_memory(ex_mem=memory, new_mem=hidden) + # 将hidden标记为RNN的输出 + drnn.output(hidden) + + # 获得RNN的计算结果 + rnn_output = drnn() + + +**代码示例二** + +.. code-block:: python + + import paddle.fluid as fluid + + sentence = fluid.data(name='sentence', shape=[None, 32], dtype='float32', lod_level=1) + + drnn = fluid.layers.DynamicRNN() + with drnn.block(): + # 将sentence标记为RNN的输入,每个时间步取句子中的一个字进行处理 + word = drnn.step_input(sentence) + # 将memory初始化为一个值为0的常量Tensor,shape=[batch_size, 10],其中batch_size由输入sentence决定 + memory = drnn.memory(shape=[10], dtype='float32', value=0) + hidden = fluid.layers.fc(input=[word, memory], size=10, act='tanh') + # 用hidden更新memory + drnn.update_memory(ex_mem=memory, new_mem=hidden) + # 将hidden标记为RNN的输出 + drnn.output(hidden) + + # 获得RNN的计算结果 + rnn_output = drnn() + + +.. _cn_api_fluid_layers_DynamicRNN_update_memory: + +成员函数 update_memory +--------------------------------- + +.. py:method:: update_memory(ex_mem, new_mem) + +将需要在时间步之间传递的信息更新。 + +参数: + - **ex_mem** (Variable) - 上一个时间步的信息。 + - **new_mem** (Variable) - 新的时间步信息。:code:`new_mem` 的维度和数据类型必须与 :code:`ex_mem` 一致。 + +返回:无 + +抛出异常: + - :code:`ValueError`:当 :code:`update_memory()` 接口在RNN :code:`block()` 接口外面被调用时。 + - :code:`TypeError`:当 :code:`ex_mem` 或 :code:`new_mem` 不是Variable类型时。 + - :code:`ValueError`:当 :code:`ex_mem` 不是使用 :code:`memory()` 接口定义的memory时。 + - :code:`ValueError`:当 :code:`update_memory()` 接口在 :code:`step_input()` 接口之前被调用时。 + + +.. _cn_api_fluid_layers_DynamicRNN_output: + +成员函数 output +--------------------------------- + +.. py:method:: output(*outputs) + +设置outputs为RNN每个时间步的输出变量。 + +参数: + - **\*outputs** (Variable ...) - 输出Tensor,可同时将多个Variable标记为输出。 + +返回:无 + +抛出异常: + - :code:`ValueError`:当 :code:`output()` 接口在RNN :code:`block()` 接口外面被调用时。 + + +.. _cn_api_fluid_layers_DynamicRNN_call: + +成员函数 __call__ +--------------------------------- + +.. py:method:: __call__() + +获取RNN计算的输出序列。 + +若定义了 :code:`drnn = DynamicRNN()`,则可以调用 :code:`drnn()` 获得输出序列,该输出序列是通过将每一个时间步的output数据合并得到的一个LoDTensor。 +当RNN的输入x(通过 :code:`step_input()` 接口设置)的 :code:`x.lod_level` 为1时,该输出LoDTensor将会和输入x持有完全相同的LoD信息。 +通过 :code:`drnn()` 获取的RNN输出LoDTensor中包含了所有时间步的计算结果,可调用 :ref:`cn_api_fluid_layers_sequence_last_step` 获取最后一个时间步的计算结果。 + +参数: + 无 + +返回:RNN的输出序列。 + +返回类型:Variable或Variable list + +抛出异常: + - :code:`ValueError` :当 :code:`__call__()` 接口在RNN :code:`block()` 定义之前被调用时。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + sentence = fluid.data(name='sentence', shape=[None, 32], dtype='float32', lod_level=1) + encoder_proj = fluid.data(name='encoder_proj', shape=[None, 32], dtype='float32', lod_level=1) + decoder_boot = fluid.data(name='boot', shape=[None, 10], dtype='float32') + + drnn = fluid.layers.DynamicRNN() + with drnn.block(): + # 将sentence标记为RNN的输入,每个时间步取句子中的一个字进行处理 + current_word = drnn.step_input(sentence) + # 将encode_proj标记为RNN的静态输入 + encoder_word = drnn.static_input(encoder_proj) + # 使用boot_memory初始化memory,并且需要依据输入序列进行重排序 + memory = drnn.memory(init=decoder_boot, need_reorder=True) + fc_1 = fluid.layers.fc(input=encoder_word, size=30) + fc_2 = fluid.layers.fc(input=current_word, size=30) + decoder_inputs = fc_1 + fc_2 + hidden, _, _ = fluid.layers.gru_unit(input=decoder_inputs, hidden=memory, size=30) + # 用hidden更新memory + drnn.update_memory(ex_mem=memory, new_mem=hidden) + out = fluid.layers.fc(input=hidden, size=10, bias_attr=True, act='softmax') + # 将hidden和out标记为RNN的输出 + drnn.output(hidden, out) + + # 获得RNN的计算结果 + hidden, out = drnn() + # 提取RNN最后一个时间步的计算结果 + last = fluid.layers.sequence_last_step(out) diff --git a/doc/paddle/api/paddle/fluid/layers/GRUCell_cn.rst b/doc/paddle/api/paddle/fluid/layers/GRUCell_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..953c666ca9c83a38275893507de8156229438f11 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/GRUCell_cn.rst @@ -0,0 +1,65 @@ +.. _cn_api_fluid_layers_GRUCell: + +GRUCell +------------------------------- + + +.. py:class:: paddle.fluid.layers.GRUCell(hidden_size, param_attr=None, bias_attr=None, gate_activation=None, activation=None, dtype="float32", name="GRUCell") + + + + +门控循环单元(Gated Recurrent Unit)。通过对 :code:`fluid.contrib.layers.rnn_impl.BasicGRUUnit` 包装,来让它可以应用于RNNCell。 + +公式如下: + +.. math:: + u_t & = act_g(W_{ux}x_{t} + W_{uh}h_{t-1} + b_u)\\ + r_t & = act_g(W_{rx}x_{t} + W_{rh}h_{t-1} + b_r)\\ + \tilde{h_t} & = act_c(W_{cx}x_{t} + W_{ch}(r_t \odot h_{t-1}) + b_c)\\ + h_t & = u_t \odot h_{t-1} + (1-u_t) \odot \tilde{h_t} + +更多细节可以参考 `Learning Phrase Representations using RNN Encoder Decoder for Statistical Machine Translation `_ + +参数: + - **hidden_size** (int) - GRUCell中的隐藏层大小。 + - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr`。 + - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **gate_activation** (function,可选) - :math:`act_g` 的激活函数。 默认值为 :code:`fluid.layers.sigmoid`。 + - **activation** (function,可选) - :math:`act_c` 的激活函数。 默认值为 :code:`fluid.layers.tanh` + - **dtype** (string,可选) - 此cell中使用的数据类型。 默认为"float32"。 + - **name** (string,可选) - 用于标识参数和偏差的名称域。 + +返回:GRUCell类的实例对象。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid.layers as layers + cell = layers.GRUCell(hidden_size=256) + + +.. py:method:: call(inputs, states) + +执行GRU的计算。 + +参数: + - **input** (Variable) - 输入,形状为 :math:`[batch\_size,input\_size]` 的tensor,对应于公式中的 :math:`x_t` 。数据类型应为float32。 + - **states** (Variable) - 状态,形状为 :math:`[batch\_size,hidden\_size]` 的tensor。 对应于公式中的 :math:`h_{t-1}` 。数据类型应为float32。 + +返回:一个元组 :code:`(outputs, new_states)` ,其中 :code:`outputs` 和 :code:`new_states` 是同一个tensor,其形状为 :math:`[batch\_size,hidden\_size]`,数据类型和 :code:`state` 的数据类型相同,对应于公式中的 :math:`h_t`。 + +返回类型:tuple + +.. py:method:: state_shape() + +GRUCell的 :code:`state_shape` 是形状 :math:`[hidden\_size]` (batch大小为-1,自动插入到形状中),对应于 :math:`h_{t-1}` 的形状。 + +参数:无。 + +返回:GRUCell的 :code:`state_shape`。 + +返回类型:Variable + + diff --git a/doc/paddle/api/paddle/fluid/layers/GreedyEmbeddingHelper_cn.rst b/doc/paddle/api/paddle/fluid/layers/GreedyEmbeddingHelper_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a01e1ab3e575b54855d774e29057ffc2b7d04a8f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/GreedyEmbeddingHelper_cn.rst @@ -0,0 +1,74 @@ +.. _cn_api_fluid_layers_GreedyEmbeddingHelper: + +GreedyEmbeddingHelper +------------------------------- + + +.. py:class:: paddle.fluid.layers.GreedyEmbeddingHelper(embedding_fn, start_tokens, end_token) + +GreedyEmbeddingHelper是 :ref:`cn_api_fluid_layers_DecodeHelper` 的子类。作为解码helper,它使用 :code:`argmax` 进行采样,并将采样结果送入embedding层,以此作为下一解码步的输入。 + +参数: + - **embedding_fn** (callable) - 作用于 :code:`argmax` 结果的函数,通常是一个将词id转换为词嵌入的embedding层,**注意** ,这里要使用 :ref:`cn_api_fluid_embedding` 而非 :ref:`cn_api_fluid_layers_embedding`,因为选中的id的形状是 :math:`[batch\_size]` ,如果使用后者则还需要在这里提供unsqueeze。 + - **start_tokens** (Variable) - 形状为 :math:`[batch\_size]` 、数据类型为int64、 值为起始标记id的tensor。 + - **end_token** (int) - 结束标记id。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + + start_tokens = fluid.data(name="start_tokens", + shape=[None], + dtype="int64") + + trg_embeder = lambda x: fluid.embedding( + x, size=[10000, 128], param_attr=fluid.ParamAttr(name="trg_embedding")) + output_layer = lambda x: layers.fc(x, + size=10000, + num_flatten_dims=len(x.shape) - 1, + param_attr=fluid.ParamAttr(name= + "output_w"), + bias_attr=False) + helper = layers.GreedyEmbeddingHelper(trg_embeder, start_tokens=start_tokens, end_token=1) + decoder_cell = layers.GRUCell(hidden_size=128) + decoder = layers.BasicDecoder(decoder_cell, helper, output_fn=output_layer) + outputs = layers.dynamic_decode( + decoder=decoder, inits=decoder_cell.get_initial_states(start_tokens)) + +.. py:method:: initialize() + +GreedyEmbeddingHelper初始化,其使用构造函数中的 :code:`start_tokens` 作为第一个解码步的输入,并给出每个序列是否结束的初始标识。这是 :ref:`cn_api_fluid_layers_BasicDecoder` 初始化的一部分。 + +返回::code:`(initial_inputs, initial_finished)` 的二元组, :code:`initial_inputs` 同构造函数中的 :code:`start_tokens` ; :code:`initial_finished` 是一个bool类型、值为False的tensor,其形状和 :code:`start_tokens` 相同。 + +返回类型:tuple + +.. py:method:: sample(time, outputs, states) + +使用 :code:`argmax` 根据 `outputs` 进行采样。 + +参数: + - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。 + - **outputs** (Variable) - tensor变量,通常其数据类型为float32或float64,形状为 :math:`[batch\_size, vocabulary\_size]` ,表示当前解码步预测产生的logit(未归一化的概率),和由 :code:`BasicDecoder.output_fn(BasicDecoder.cell.call())` 返回的 :code:`outputs` 是同一内容。 + - **states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构,和由 :code:`BasicDecoder.cell.call()` 返回的 :code:`new_states` 是同一内容。 + +返回:数据类型为int64形状为 :math:`[batch\_size]` 的tensor,表示采样得到的id。 + +返回类型:Variable + +.. py:method:: next_inputs(time, outputs, states, sample_ids) + +对 :code:`sample_ids` 使用 :code:`embedding_fn` ,以此作为下一解码步的输入;同时直接使用输入参数中的 :code:`states` 作为下一解码步的状态;并通过判别 :code:`sample_ids` 是否得到 :code:`end_token`,依此产生每个序列是否结束的标识。 + +参数: + - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。 + - **outputs** (Variable) - tensor变量,通常其数据类型为float32或float64,形状为 :math:`[batch\_size, vocabulary\_size]` ,表示当前解码步预测产生的logit(未归一化的概率),和由 :code:`BasicDecoder.output_fn(BasicDecoder.cell.call())` 返回的 :code:`outputs` 是同一内容。 + - **states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构,和由 :code:`BasicDecoder.cell.call()` 返回的 :code:`new_states` 是同一内容。 + - **sample_ids** (Variable) - 数据类型为int64形状为 :math:`[batch\_size]` 的tensor,和由 :code:`sample()` 返回的 :code:`sample_ids` 是同一内容。 + +返回: :code:`(finished, next_inputs, next_states)` 的三元组。 :code:`next_inputs, next_states` 均是单个tensor变量或tensor变量组成的嵌套结构,tensor的形状是 :math:`[batch\_size, ...]` , :code:`next_states` 和输入参数中的 :code:`states` 相同; :code:`finished` 是一个bool类型且形状为 :math:`[batch\_size]` 的tensor。 + +返回类型:tuple diff --git a/doc/paddle/api/paddle/fluid/layers/IfElse_cn.rst b/doc/paddle/api/paddle/fluid/layers/IfElse_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5801ac0d46f39a56ba60ade799be817850077454 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/IfElse_cn.rst @@ -0,0 +1,84 @@ +.. _cn_api_fluid_layers_IfElse: + +IfElse +------------------------------- + + +.. py:class:: paddle.fluid.layers.IfElse(cond, name=None) + + + + +该类用于实现IfElse分支控制功能, IfElse包含两个Block,true_block,false_block,IfElse会将满足True或False条件的数据分别放入不同的block运行。 + +cond是一个shape为[N, 1]、数据类型为bool的2-D tensor,表示输入数据对应部分的执行条件。 + +.. note:: + 如果参数 ``cond`` 的形状为[1],强烈建议您使用新的OP :ref:`cn_api_fluid_layers_cond` 而不是 ``IfElse``。 + OP :ref:`cn_api_fluid_layers_cond` 的使用方式更简单,并且调用该OP所用的代码更少且功能与 ``IfElse`` 一样。 + +IfElse OP同其他的OP在使用上有一定的区别,可能会对一些用户造成一定的困惑,以下展示了一个 +简单的样例对该OP进行说明。 + +.. code-block:: python + + # 以下代码完成的功能:对x中大于0的数据减去10,对x中小于0的数据加上10,并将所有的数据求和 + import numpy as np + import paddle.fluid as fluid + + x = fluid.layers.data(name='x', shape=[4, 1], dtype='float32', append_batch_size=False) + y = fluid.layers.data(name='y', shape=[4, 1], dtype='float32', append_batch_size=False) + + x_d = np.array([[3], [1], [-2], [-3]]).astype(np.float32) + y_d = np.zeros((4, 1)).astype(np.float32) + + # 比较x, y对元素的大小,输出cond, cond是shape为[4, 1],数据类型为bool的2-D tensor。 + # 根据输入数据x_d, y_d,可以推断出cond中的数据为[[true], [true], [false], [false]] + cond = fluid.layers.greater_than(x, y) + # 同其他常见OP不同的是,该OP返回的ie是一个IfElse OP的对象 + ie = fluid.layers.IfElse(cond) + + with ie.true_block(): + # 在这个block中,根据cond条件,获取x中对应条件为true维度的数据,并减去10 + out_1 = ie.input(x) + out_1 = out_1 - 10 + ie.output(out_1) + with ie.false_block(): + # 在这个block中,根据cond条件,获取x中对应条件为false维度的数据,并加上10 + out_1 = ie.input(x) + out_1 = out_1 + 10 + ie.output(out_1) + + # 根据cond条件将两个block中处理后的数据进行合并,此处的output为输出,类型为List,List中的元素类型为Variable。 + output = ie() # [array([[-7.], [-9.], [ 8.], [ 7.]], dtype=float32)] + + # 将输出List中的第一个Variable获取出来,并计算所有元素和 + out = fluid.layers.reduce_sum(output[0]) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + res = exe.run(fluid.default_main_program(), feed={"x":x_d, "y":y_d}, fetch_list=[out]) + print(res) + # [array([-1.], dtype=float32)] + +参数: + - **cond** (Variable)- cond是一个shape为[N, 1]、数据类型为bool的2-D tensor,表示N个输入数据的对应的执行条件。数据类型为bool。 + - **Name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +**返回:** + +同其他常见OP不同的是,该OP调用返回一个IfElse OP对象(如例子中的 ie),通过调用对象内部函数 ``true_block()`` , ``false_block()`` , ``input()`` , ``output()`` 对输入数据进行分支处理, +通过调用内部的 ``__call__()`` 函数,将不同分支处理的数据进行整合,作为整体的输出,输出类型为列表,列表中每个元素的类型为Variable。 + +**内部函数:** + +- 通过调用对象中的 ``with ie.true_block()`` 函数构建block,将条件为true下的计算逻辑放入此block中。如果没有构建相应的block,则对应条件维度下的输入数据不做改变。 + +- 通过调用对象中的 ``with ie.false_block()`` 函数构建block,将条件为false下的计算逻辑放入此block中。如果没有构建相应的block,则对应条件维度下的输入数据不做改变。 + +- ``out = ie.input(x)`` 会将x中对应条件维度的数据获取出来放入到out中,支持block内部处理多个输入。 + +- ``ie.output(out)`` 会将结果写入对应条件的输出中。 + +- 对象内部有 ``__call__()`` 函数,即通过对 ``output = ie()`` 的调用,将条件分别为True,False的block内部所有的输出进行融合作为整体的输出,输出的类型为列表,列表中每个元素的类型为Variable。 diff --git a/doc/paddle/api/paddle/fluid/layers/LSTMCell_cn.rst b/doc/paddle/api/paddle/fluid/layers/LSTMCell_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cebebb05235fe4cb9338ef681d4bd51d17dd1f1d --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/LSTMCell_cn.rst @@ -0,0 +1,66 @@ +.. _cn_api_fluid_layers_LSTMCell: + +LSTMCell +------------------------------- + + + +.. py:class:: paddle.fluid.layers.LSTMCell(hidden_size, param_attr=None, bias_attr=None, gate_activation=None, activation=None, forget_bias=1.0, dtype="float32", name="LSTMCell") + + + + +长短期记忆单元(Long-Short Term Memory)。通过对 :code:`fluid.contrib.layers.rnn_impl.BasicLSTMUnit` 包装,来让它可以应用于RNNCell。 + +公式如下: + +.. math:: + i_{t} &= act_g \left ( W_{x_{i}}x_{t}+W_{h_{i}}h_{t-1}+b_{i} \right ) \\ + f_{t} &= act_g \left ( W_{x_{f}}x_{t}+W_{h_{f}}h_{t-1}+b_{f}+forget\_bias \right ) \\ + c_{t} &= f_{t}c_{t-1}+i_{t}act_h\left ( W_{x_{c}}x_{t} +W_{h_{c}}h_{t-1}+b_{c}\right ) \\ + o_{t} &= act_g\left ( W_{x_{o}}x_{t}+W_{h_{o}}h_{t-1}+b_{o} \right ) \\ + h_{t} &= o_{t}act_h \left ( c_{t} \right ) + +更多细节可以参考 `RECURRENT NEURAL NETWORK REGULARIZATION `_ + +参数: + - **hidden_size** (int) - LSTMCell中的隐藏层大小。 + - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr`。 + - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **gate_activation** (function,可选) - :math:`act_g` 的激活函数。 默认值为 :code:`fluid.layers.sigmoid`。 + - **activation** (function,可选) - :math:`act_c` 的激活函数。 默认值为 :code:`fluid.layers.tanh`。 + - **forget_bias** (float,可选) - 计算遗忘们时使用的遗忘偏置。默认值为 1.0。 + - **dtype** (string,可选) - 此Cell中使用的数据类型。 默认值为 `float32`。 + - **name** (string,可选) - 用于标识参数和偏差的名称域。 + +返回:LSTMCell类的实例对象。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid.layers as layers + cell = layers.LSTMCell(hidden_size=256) + + +.. py:method:: call(inputs, states) + +执行GRU的计算。 + +参数: + - **input** (Variable) - 输入,形状为 :math:`[batch\_size,input\_size]` 的tensor,对应于公式中的 :math:`x_t`。数据类型应为float32。 + - **states** (Variable) - 状态,包含两个tensor的列表,每个tensor形状为 :math:`[batch\_size,hidden\_size]`。 对应于公式中的 :math:`h_{t-1}, c_{t-1}`。数据类型应为float32。 + +返回:一个元组 :code:`(outputs, new_states)`,其中 :code:`outputs` 是形状为 :math:`[batch\_size,hidden\_size]` 的tensor,对应于公式中的 :math:`h_{t}`;:code:`new_states` 是一个列表,包含形状为 :math:`[batch_size,hidden_size]` 的两个tensor变量,它们对应于公式中的 :math:`h_{t}, c_{t}`。这些tensor的数据类型都与 :code:`state` 的数据类型相同。 + +返回类型:tuple + +.. py:method:: state_shape() + +LSTMCell的 :code:`state_shape` 是一个具有两个形状的列表::math:`[[hidden\_size], [hidden\_size]]` (batch大小为-1,自动插入到形状中)。 这两个形状分别对应于公式中的 :math:`h_{t-1}` and :math:`c_{t-1}`。 + +参数:无。 + +返回:LSTMCell的 :code:`state_shape` + +返回类型:list diff --git a/doc/paddle/api/paddle/fluid/layers/MultivariateNormalDiag_cn.rst b/doc/paddle/api/paddle/fluid/layers/MultivariateNormalDiag_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..63e1d1bb2492d6f56c6ccbd7d7ada4505087ec25 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/MultivariateNormalDiag_cn.rst @@ -0,0 +1,94 @@ +.. _cn_api_fluid_layers_MultivariateNormalDiag: + +MultivariateNormalDiag +------------------------------- + +.. py:class:: paddle.fluid.layers.MultivariateNormalDiag(loc, scale) + + + + +多元高斯分布 + +概率密度函数(pdf)为: + +.. math:: + + pdf(x; loc, scale) = \frac{e^{-\frac{||y||^2}{2}}}{Z} + + y = inv(scale) @ (x - loc) + + Z = (2\pi )^{0.5k} |det(scale)| + +上面公式中: + - :math:`inv` 表示: 对矩阵求逆 + - :math:`@` 表示:矩阵相乘 + - :math:`det` 表示:求行列式的值 + + +参数: + - **loc** (list|numpy.ndarray|Variable) - 形状为 :math:`[k]` 的多元高斯分布的均值列表。数据类型为float32。 + - **scale** (list|numpy.ndarray|Variable) - 形状为 :math:`[k, k]` 的多元高斯分布的对角协方差矩阵,且除对角元素外,其他元素取值均为0。数据类型为float32。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + from paddle.fluid import layers + from paddle.fluid.layers import MultivariateNormalDiag + + a_loc_npdata = np.array([0.3,0.5],dtype="float32") + a_loc_tensor = layers.create_tensor(dtype="float32") + layers.assign(a_loc_npdata, a_loc_tensor) + + + a_scale_npdata = np.array([[0.4,0],[0,0.5]],dtype="float32") + a_scale_tensor = layers.create_tensor(dtype="float32") + layers.assign(a_scale_npdata, a_scale_tensor) + + b_loc_npdata = np.array([0.2,0.4],dtype="float32") + b_loc_tensor = layers.create_tensor(dtype="float32") + layers.assign(b_loc_npdata, b_loc_tensor) + + b_scale_npdata = np.array([[0.3,0],[0,0.4]],dtype="float32") + b_scale_tensor = layers.create_tensor(dtype="float32") + layers.assign(b_scale_npdata, b_scale_tensor) + + a = MultivariateNormalDiag(a_loc_tensor, a_scale_tensor) + b = MultivariateNormalDiag(b_loc_tensor, b_scale_tensor) + + a.entropy() + # [2.033158] with shape: [1] + b.entropy() + # [1.7777451] with shaoe: [1] + + a.kl_divergence(b) + # [0.06542051] with shape: [1] + + +.. py:function:: kl_divergence(other) + +计算相对于另一个多元高斯分布的KL散度 + +参数: + - **other** (MultivariateNormalDiag) - 输入的另一个多元高斯分布。数据类型为float32。 + +返回:相对于另一个多元高斯分布的KL散度,数据类型为float32 + +返回类型:Variable + +.. py:function:: entropy() + +信息熵 + +返回:多元高斯分布的信息熵,数据类型为float32 + +返回类型:Variable + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/Normal_cn.rst b/doc/paddle/api/paddle/fluid/layers/Normal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ce50e67bd12563ee7c24b6ab4141acf0ccf0c303 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/Normal_cn.rst @@ -0,0 +1,115 @@ +.. _cn_api_fluid_layers_Normal: + +Normal +------------------------------- + +.. py:class:: paddle.fluid.layers.Normal(loc, scale) + + + + +正态分布 + +数学公式: + +.. math:: + + pdf(x; \mu, \sigma) = \frac{1}{Z}e^{\frac {-0.5 (x - \mu)^2} {\sigma^2} } + + Z = (2 \pi \sigma^2)^{0.5} + +上面的数学公式中: + +:math:`loc = \mu` : 平均值。 +:math:`scale = \sigma` : 标准差。 +:math:`Z`: 正态分布常量。 + +参数: + - **loc** (float|list|numpy.ndarray|Variable) - 正态分布平均值。数据类型为float32。 + - **scale** (float|list|numpy.ndarray|Variable) - 正态分布标准差。数据类型为float32。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + from paddle.fluid import layers + from paddle.fluid.layers import Normal + + # 定义参数为float的正态分布。 + dist = Normal(loc=0., scale=3.) + # 定义一组有两个数的正态分布。 + # 第一组为均值1,标准差11,第二组为均值2,标准差22。 + dist = Normal(loc=[1., 2.], scale=[11., 22.]) + # 得到3个样本, 返回一个 3 x 2 张量。 + dist.sample([3]) + + # 通过广播的方式,定义一个两个参数的正态分布。 + # 均值都是1,标准差不同。 + dist = Normal(loc=1., scale=[11., 22.]) + + # 一个完整的例子 + value_npdata = np.array([0.8], dtype="float32") + value_tensor = layers.create_tensor(dtype="float32") + layers.assign(value_npdata, value_tensor) + + normal_a = Normal([0.], [1.]) + normal_b = Normal([0.5], [2.]) + + sample = normal_a.sample([2]) + # 一个由定义好的正太分布随机生成的张量,维度为: [2, 1] + entropy = normal_a.entropy() + # [1.4189385] with shape: [1] + lp = normal_a.log_prob(value_tensor) + # [-1.2389386] with shape: [1] + kl = normal_a.kl_divergence(normal_b) + # [0.34939718] with shape: [1] + + +.. py:function:: sample(shape, seed=0) + +生成指定维度的样本 + +参数: + - **shape** (list) - 1维列表,指定生成样本的维度。数据类型为int32。 + - **seed** (int) - 长整型数。 + +返回:预先设计好维度的张量, 数据类型为float32 + +返回类型:Variable + +.. py:function:: entropy() + +信息熵 + +返回:正态分布的信息熵, 数据类型为float32 + +返回类型:Variable + +.. py:function:: log_prob(value) + +对数概率密度函数 + +参数: + - **value** (Variable) - 输入张量。数据类型为float32或float64。 + +返回:对数概率, 数据类型与value相同 + +返回类型:Variable + +.. py:function:: kl_divergence(other) + +两个正态分布之间的KL散度。 + +参数: + - **other** (Normal) - Normal的实例。 + +返回:两个正态分布之间的KL散度, 数据类型为float32 + +返回类型:Variable + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/RNNCell_cn.rst b/doc/paddle/api/paddle/fluid/layers/RNNCell_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..78fad49a5a7eaf318f8bc9c23423a55fe1b3e0b8 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/RNNCell_cn.rst @@ -0,0 +1,49 @@ +.. _cn_api_fluid_layers_RNNCell: + +RNNCell +------------------------------- + + + +.. py:class:: paddle.fluid.layers.RNNCell(name=None) + + + +RNNCell是抽象的基类,代表将输入和状态映射到输出和新状态的计算,主要用于RNN。 + +.. py:method:: call(inputs, states, **kwargs) + +每个cell都必须实现此接口,将(输入和状态)映射到(输出和新状态)。为了更灵活,输入和状态都可以是单个tensor变量或嵌套结构的tensor变量(列表 | 元组 | 命名元组 | 字典)。 + +参数: + - **inputs** - 输入,为单个tensor变量或tensor变量组成的嵌套结构。 + - **states** - 状态,单个tensor变量或tensor变量组成的嵌套结构。 + - **kwargs** - 附加的关键字参数,由调用者提供。 +         +返回:包含输出和新状态的二元组 :code:`(outputs,new_states)` 。输出和新状态都可以是嵌套的tensor变量。新状态必须具有与状态相同的结构。 + +返回类型:tuple + +.. py:method:: get_initial_states(batch_ref, shape=None, dtype=None, init_value=0, batch_dim_idx=0) + +该接口根据提供的形状,数据类型和初始值来初始化状态。 + +参数: + - **batch_ref** - 单个tensor变量或tensor组成的嵌套结构。 tensor的第一维将用作初始化状态的batch大小。 + - **shape** - 单个形状或形状组成的嵌套结构,单个形状是整数的列表或元组。 如果形状的第一维不是batch大小,则自动插入-1作为batch大小。 如果该项为None,将使用属性 :code:`state_shape`。默认值为None。 + - **dtype** - 单个数据类型或由数据类型组成的嵌套结构。该结构必须与shape的结构相同,例外是当状态中的所有tensor都具有相同的数据类型,这时可以使用单个数据类型。 如果是None并且属性 :code:`cell.state_shape` 不可用,则float32将用作数据类型。 默认值为None。 + - **init_value** - 用于初始化状态的浮点值。 + - **batch_dim_idx** - 用于指示 :code:`batch_ref` 中batch所在维度的int值,默认值为0。 + +返回:和shape具有相同结构的tensor变量,代表初始状态。 + +返回类型:Variable + +.. py:method:: state_shape() + +抽象方法(属性),该接口用于初始化cell的状态。 单个形状或由形状组成的嵌套结构,单个形状可以是整数的列表或元组(如果形状的第一维不是batch大小,则自动插入-1作为batch大小)。 当没有使用 :code:`get_initial_states` 初始化状态或 :code:`get_initial_states` 没有提供 :code:`shape` 参数的时候,不用实现该方法。 + + +.. py:method:: state_dtype() + +抽象方法(属性),该接口用于初始化cell的状态。 单个数据类型或由数据类型组成的嵌套结构,该结构必须与 :code:`shape` 的结构相同,例外是当状态中的所有tensor都具有相同的数据类型,这时可以使用单个数据类型。 当没有使用 :code:`get_initial_states` 初始化状态或 :code:`get_initial_states` 没有提供 :code:`dtype` 参数的时候,不用实现该方法。 diff --git a/doc/paddle/api/paddle/fluid/layers/SampleEmbeddingHelper_cn.rst b/doc/paddle/api/paddle/fluid/layers/SampleEmbeddingHelper_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c38b80052fe9040d84d3ed3ba353e6e02cfe5a9c --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/SampleEmbeddingHelper_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_fluid_layers_SampleEmbeddingHelper: + +SampleEmbeddingHelper +------------------------------- + + +.. py:class:: paddle.fluid.layers.SampleEmbeddingHelper(embedding_fn, start_tokens, end_token, softmax_temperature=None, seed=None) + +SampleEmbeddingHelper是 :ref:`cn_api_fluid_layers_GreedyEmbeddingHelper` 的子类。作为解码helper,它通过采样而非使用 :code:`argmax` 并将采样结果送入embedding层,以此作为下一解码步的输入。 + +参数: + - **embedding_fn** (callable) - 作用于 :code:`argmax` 结果的函数,通常是一个将词id转换为词嵌入的embedding层,**注意** ,这里要使用 :ref:`cn_api_fluid_embedding` 而非 :ref:`cn_api_fluid_layers_embedding`,因为选中的id的形状是 :math:`[batch\_size]` ,如果使用后者则还需要在这里提供unsqueeze。 + - **start_tokens** (Variable) - 形状为 :math:`[batch\_size]` 、数据类型为int64、 值为起始标记id的tensor。 + - **end_token** (int) - 结束标记id。 + - **softmax_temperature** (float,可选) - 该值用于在softmax计算前除以logits。温度越高(大于1.0)随机性越大,温度越低则越趋向于argmax。该值必须大于0,默认值None等同于1.0。 + - **seed** (int,可选) - 采样使用的随机种子。默认为None,表示不使用固定的随机种子。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + + start_tokens = fluid.data(name="start_tokens", + shape=[None], + dtype="int64") + + trg_embeder = lambda x: fluid.embedding( + x, size=[10000, 128], param_attr=fluid.ParamAttr(name="trg_embedding")) + output_layer = lambda x: layers.fc(x, + size=10000, + num_flatten_dims=len(x.shape) - 1, + param_attr=fluid.ParamAttr(name= + "output_w"), + bias_attr=False) + helper = layers.SampleEmbeddingHelper(trg_embeder, start_tokens=start_tokens, end_token=1) + decoder_cell = layers.GRUCell(hidden_size=128) + decoder = layers.BasicDecoder(decoder_cell, helper, output_fn=output_layer) + outputs = layers.dynamic_decode( + decoder=decoder, inits=decoder_cell.get_initial_states(start_tokens)) + +.. py:method:: sample(time, outputs, states) + +根据一个多项分布进行采样,此分布由 :code:`softmax(outputs/softmax_temperature)` 计算得到。 + +参数: + - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。 + - **outputs** (Variable) - tensor变量,通常其数据类型为float32或float64,形状为 :math:`[batch\_size, vocabulary\_size]` ,表示当前解码步预测产生的logit(未归一化的概率),和由 :code:`BasicDecoder.output_fn(BasicDecoder.cell.call())` 返回的 :code:`outputs` 是同一内容。 + - **states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构,和由 :code:`BasicDecoder.cell.call()` 返回的 :code:`new_states` 是同一内容。 + +返回:数据类型为int64形状为 :math:`[batch\_size]` 的tensor,表示采样得到的id。 + +返回类型:Variable diff --git a/doc/paddle/api/paddle/fluid/layers/StaticRNN_cn.rst b/doc/paddle/api/paddle/fluid/layers/StaticRNN_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..92817b42e222eacbdb40f0d568dc0db0ae8064da --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/StaticRNN_cn.rst @@ -0,0 +1,272 @@ +.. _cn_api_fluid_layers_StaticRNN: + +StaticRNN +------------------------------- + + +.. py:class:: paddle.fluid.layers.StaticRNN(name=None) + + + + +该OP用来处理一批序列数据,其中每个样本序列的长度必须相等。StaticRNN将序列按照时间步长展开,用户需要定义每个时间步中的处理逻辑。 + +参数: + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + + vocab_size, hidden_size=10000, 200 + x = layers.data(name="x", shape=[-1, 1, 1], dtype='int64') + + # 创建处理用的word sequence + x_emb = layers.embedding( + input=x, + size=[vocab_size, hidden_size], + dtype='float32', + is_sparse=False) + # 把batch size变换到第1维。 + x_emb = layers.transpose(x_emb, perm=[1, 0, 2]) + + rnn = fluid.layers.StaticRNN() + with rnn.step(): + # 将刚才创建的word sequence标记为输入,每个时间步取一个word处理。 + word = rnn.step_input(x_emb) + # 创建memory变量作为prev,batch size来自于word变量。 + prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word) + hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu') + # 用处理完的hidden变量更新prev变量。 + rnn.update_memory(prev, hidden) + # 把每一步处理后的hidden标记为输出序列。 + rnn.step_output(hidden) + # 获取最终的输出结果 + result = rnn() + +.. py:method:: step() + +定义在每个时间步执行的操作。step用在with语句中,with语句中定义的OP会被执行sequence_len次(sequence_len是输入序列的长度)。 + + +.. py:method:: memory(init=None, shape=None, batch_ref=None, init_value=0.0, init_batch_dim_idx=0, ref_batch_dim_idx=1) + +为静态RNN创建一个内存变量。 +如果init不为None,则用init将初始化memory。 如果init为None,则必须设置shape和batch_ref,函数会使用shape和batch_ref创建新的Variable来初始化init。 + +参数: + - **init** (Variable,可选) - 用来初始化memory的Tensor。如果没有设置,则必须提供shape和batch_ref参数。默认值None。 + - **shape** (list|tuple) - 当init为None时用来设置memory的维度,注意不包括batch_size。默认值None。 + - **batch_ref** (Variable,可选) - 当init为None时,memory变量的batch size会设置为该batch_ref变量的ref_batch_dim_idx轴。默认值None。 + - **init_value** (float,可选) - 当init为None时用来设置memory的初始值,默认值0.0。 + - **init_batch_dim_idx** (int,可选) - init变量的batch_size轴,默认值0。 + - **ref_batch_dim_idx** (int,可选) - batch_ref变量的batch_size轴,默认值1。 + +返回:返回创建的memory变量。 + +返回类型;Variable + + +**代码示例一** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + + vocab_size, hidden_size=10000, 200 + x = layers.data(name="x", shape=[-1, 1, 1], dtype='int64') + + # 创建处理用的word sequence + x_emb = layers.embedding( + input=x, + size=[vocab_size, hidden_size], + dtype='float32', + is_sparse=False) + # 把batch size变换到第1维。 + x_emb = layers.transpose(x_emb, perm=[1, 0, 2]) + + rnn = fluid.layers.StaticRNN() + with rnn.step(): + # 将刚才创建的word sequence标记为输入,每个时间步取一个word处理。 + word = rnn.step_input(x_emb) + # 创建memory变量作为prev,batch size来自于word变量。 + prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word) + hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu') + # 用处理完的hidden变量更新prev变量。 + rnn.update_memory(prev, hidden) + +**代码示例二** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + + vocab_size, hidden_size=10000, 200 + x = layers.data(name="x", shape=[-1, 1, 1], dtype='int64') + + # 创建处理用的word sequence + x_emb = layers.embedding( + input=x, + size=[vocab_size, hidden_size], + dtype='float32', + is_sparse=False) + # 把batch size变换到第1维。 + x_emb = layers.transpose(x_emb, perm=[1, 0, 2]) + boot_memory = fluid.layers.data(name='boot', shape=[hidden_size], dtype='float32', lod_level=1) + + rnn = fluid.layers.StaticRNN() + with rnn.step(): + # 将刚才创建的word sequence标记为输入,每个时间步取一个word处理。 + word = rnn.step_input(x_emb) + # 用init初始化memory。 + prev = rnn.memory(init=boot_memory) + hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu') + # 用处理完的hidden变量更新prev变量。 + rnn.update_memory(prev, hidden) + +.. py:method:: step_input(x) + +标记StaticRNN的输入序列。 + +参数: + - **x** (Variable) – 输入序列,x的形状应为[seq_len, ...]。 + +返回:输入序列中当前时间步的数据。 + +返回类型:Variable + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + + vocab_size, hidden_size=10000, 200 + x = layers.data(name="x", shape=[-1, 1, 1], dtype='int64') + + # 创建处理用的word sequence + x_emb = layers.embedding( + input=x, + size=[vocab_size, hidden_size], + dtype='float32', + is_sparse=False) + # 把batch size变换到第1维。 + x_emb = layers.transpose(x_emb, perm=[1, 0, 2]) + + rnn = fluid.layers.StaticRNN() + with rnn.step(): + # 将刚才创建的word sequence标记为输入,每个时间步取一个word处理。 + word = rnn.step_input(x_emb) + # 创建memory变量作为prev,batch size来自于word变量。 + prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word) + hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu') + # 用处理完的hidden变量更新prev变量。 + rnn.update_memory(prev, hidden) + +.. py:method:: step_output(o) + +标记StaticRNN输出的序列。 + +参数: + -**o** (Variable) – 输出序列 + +返回:无 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + + vocab_size, hidden_size=10000, 200 + x = layers.data(name="x", shape=[-1, 1, 1], dtype='int64') + + # 创建处理用的word sequence + x_emb = layers.embedding( + input=x, + size=[vocab_size, hidden_size], + dtype='float32', + is_sparse=False) + # 把batch size变换到第1维。 + x_emb = layers.transpose(x_emb, perm=[1, 0, 2]) + + rnn = fluid.layers.StaticRNN() + with rnn.step(): + # 将刚才创建的word sequence标记为输入,每个时间步取一个word处理。 + word = rnn.step_input(x_emb) + # 创建memory变量作为prev,batch size来自于word变量。 + prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word) + hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu') + # 用处理完的hidden变量更新prev变量。 + rnn.update_memory(prev, hidden) + # 把每一步处理后的hidden标记为输出序列。 + rnn.step_output(hidden) + + result = rnn() + +.. py:method:: output(*outputs) + +标记StaticRNN输出变量。 + +参数: + -**outputs** – 输出Tensor,可同时将多个Variable标记为输出。 + +返回:无 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + + vocab_size, hidden_size=10000, 200 + x = layers.data(name="x", shape=[-1, 1, 1], dtype='int64') + + # 创建处理用的word sequence + x_emb = layers.embedding( + input=x, + size=[vocab_size, hidden_size], + dtype='float32', + is_sparse=False) + # 把batch size变换到第1维。 + x_emb = layers.transpose(x_emb, perm=[1, 0, 2]) + + rnn = fluid.layers.StaticRNN() + with rnn.step(): + # 将刚才创建的word sequence标记为输入,每个时间步取一个word处理。 + word = rnn.step_input(x_emb) + # 创建memory变量作为prev,batch size来自于word变量。 + prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word) + hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu') + # 用处理完的hidden变量更新prev变量。 + rnn.update_memory(prev, hidden) + # 把每一步的hidden和word标记为输出。 + rnn.output(hidden, word) + + result = rnn() + + +.. py:method:: update_memory(mem, var) + + +将memory从mem更新为var。 + +参数: + - **mem** (Variable) – memory接口定义的变量。 + - **var** (Variable) – RNN块中的变量,用来更新memory。var的维度和数据类型必须与mem一致。 + +返回:无 + +代码示例参考前述示例。 + diff --git a/doc/paddle/api/paddle/fluid/layers/Switch_cn.rst b/doc/paddle/api/paddle/fluid/layers/Switch_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0ef9883266646ee04d44a5ffc341b1d3cb848952 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/Switch_cn.rst @@ -0,0 +1,71 @@ +.. _cn_api_fluid_layers_Switch: + +Switch +------------------------------- + + +.. py:class:: paddle.fluid.layers.Switch (name=None) + + + + +该类用于实现Switch分支控制功能。Switch分支包含多个case分支和一个default分支,Switch控制流会依次检查各case分支条件是否满足,并仅执行第一个满足条件的case分支后面的语句。若不存在满足条件的case分支,则仅执行default分支后面的语句。 + +.. note:: + 如果参数 ``cond`` 的形状为[1],强烈建议您使用新的OP :ref:`cn_api_fluid_layers_case` 而不是 ``Switch``。 + OP :ref:`cn_api_fluid_layers_case` 的使用方式更简单,并且调用该OP所用的代码更少且功能与 ``Switch`` 一样。 + +成员函数: + - **case(cond)** - Switch的case分支,其参数cond为bool型的标量Variable。只有当前case分支的cond为True,且之前的case分支的cond均为False,该case分支后的语句才会执行,且不再执行之后的case后的语句。 + - **default()** - Switch的default分支。当所有case分支的cond均为False时,执行default分支后的语句。 + +注意:case和default函数只能用于Switch的scope内部,示例如下: + +.. code-block:: python + + with fluid.layers.Switch() as switch: + with switch.case(cond1): + i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=1) + with switch.case(cond2): + i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=2) + with switch.default(): + i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) + +参数: + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + lr = fluid.layers.create_global_var( + shape=[1], + value=0.0, + dtype='float32', + persistable=True, + name="learning_rate") + zero_var = fluid.layers.fill_constant( + shape=[1], dtype='float32', value=0.0) + one_var = fluid.layers.fill_constant( + shape=[1], dtype='float32', value=1.0) + two_var = fluid.layers.fill_constant( + shape=[1], dtype='float32', value=2.0) + + # 将参数中的begin设为非0值,则进入Switch的default分支,输出数组中的数字将为2 + global_step = fluid.layers.autoincreased_step_counter(counter_name='@LR_DECAY_COUNTER@', begin=0, step=1) + + with fluid.layers.control_flow.Switch() as switch: + with switch.case(global_step == zero_var): + fluid.layers.assign(input=one_var, output=lr) + with switch.default(): + fluid.layers.assign(input=two_var, output=lr) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + res = exe.run(fluid.default_main_program(), feed={}, fetch_list=[lr]) + print(res) # [array([1.], dtype=float32)] + + diff --git a/doc/paddle/api/paddle/fluid/layers/TrainingHelper_cn.rst b/doc/paddle/api/paddle/fluid/layers/TrainingHelper_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5d140dbf8ac61370b3a0c7a33a50f56c378e4929 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/TrainingHelper_cn.rst @@ -0,0 +1,70 @@ +.. _cn_api_fluid_layers_TrainingHelper: + +TrainingHelper +------------------------------- + + +.. py:class:: paddle.fluid.layers.TrainingHelper(inputs, sequence_length, time_major=False) + +TrainingHelper是 :ref:`cn_api_fluid_layers_DecodeHelper` 的子类。作为解码helper,它在每个解码时间步通过在完整序列输入 :code:`inputs` 的相应位置切片作为各步的输入,并且使用 :code:`argmax` 根据 :code:`cell.call()` 的输出进行采样。 +由于要求有完整的序列输入 :code:`inputs` ,TrainingHelper主要用于以teach-forcing的方式进行最大似然训练,采样得到的内容通常不会使用。 + +参数: + - **inputs** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。当 :code:`time_major == False` 时,tensor的形状应为 :math:`[batch\_size, sequence\_length, ...]`;当 :code:`time_major == True` 时,tensor的形状应为 :math:`[sequence\_length, batch\_size, ...]`。在解码的每一步都要从中切片取出相应的数据。 + - **sequence_length** (Variable) - 形状为 :math:`[batch\_size]` 的tensor。它存储了 :code:`inputs` 中每个样本的实际长度,可以据此来标识每个解码步中每个样本是否结束。 + - **time_major** (bool,可选) - 指示输入tensor和输出tensor中包含的tensor的数据组织。如果为False,则数据组织为batch为主,形状为 :math:`[batch\_size,sequence\_length,...]`。如果为True,则数据组织为time为主,形状为 :math:`[sequence\_length,batch\_size,...]`。默认值:False。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + trg_emb = fluid.data(name="trg_emb", + shape=[None, None, 128], + dtype="float32") + trg_seq_length = fluid.data(name="trg_seq_length", + shape=[None], + dtype="int64") + helper = layers.TrainingHelper(trg_emb, trg_seq_length) + decoder_cell = layers.GRUCell(hidden_size=128) + decoder = layers.BasicDecoder(decoder_cell, helper) + outputs = layers.dynamic_decode( + decoder, + inits=decoder_cell.get_initial_states(trg_emb), + is_test=False) + +.. py:method:: initialize() + +TrainingHelper初始化,其通过在完整序列输入 :code:`inputs` 中首个时间步的位置上切片,以此作为第一个解码步的输入,并给出每个序列是否结束的初始标识。这是 :ref:`cn_api_fluid_layers_BasicDecoder` 初始化的一部分。 + +返回::code:`(initial_inputs, initial_finished)` 的二元组, :code:`initial_inputs` 是单个tensor变量或tensor变量组成的嵌套结构,tensor的形状是 :math:`[batch\_size, ...]` 。 :code:`initial_finished` 是一个bool类型且形状为 :math:`[batch\_size]` 的tensor。 + +返回类型:tuple + +.. py:method:: sample(time, outputs, states) + +使用 :code:`argmax` 根据 `outputs` 进行采样。由于使用完整序列中的切片作为下一解码步的输入,采样得到的内容通常不会使用。 + +参数: + - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。 + - **outputs** (Variable) - tensor变量,通常其数据类型为float32或float64,形状为 :math:`[batch\_size, vocabulary\_size]` ,表示当前解码步预测产生的logit(未归一化的概率),和由 :code:`BasicDecoder.output_fn(BasicDecoder.cell.call())` 返回的 :code:`outputs` 是同一内容。 + - **states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构,和由 :code:`BasicDecoder.cell.call()` 返回的 :code:`new_states` 是同一内容。 + +返回:数据类型为int64形状为 :math:`[batch\_size]` 的tensor,表示采样得到的id。 + +返回类型:Variable + +.. py:method:: next_inputs(time, outputs, states, sample_ids) + +从完整序列输入中当前时间步的位置上切片,以此作为产生下一解码步的输入;同时直接使用输入参数中的 :code:`states` 作为下一解码步的状态;并比较当前时间与每个序列的大小,依此产生每个序列是否结束的标识。 + +参数: + - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。 + - **outputs** (Variable) - tensor变量,通常其数据类型为float32或float64,形状为 :math:`[batch\_size, vocabulary\_size]` ,表示当前解码步预测产生的logit(未归一化的概率),和由 :code:`BasicDecoder.output_fn(BasicDecoder.cell.call())` 返回的 :code:`outputs` 是同一内容。 + - **states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构,和由 :code:`BasicDecoder.cell.call()` 返回的 :code:`new_states` 是同一内容。 + - **sample_ids** (Variable) - 数据类型为int64形状为 :math:`[batch\_size]` 的tensor,和由 :code:`sample()` 返回的 :code:`sample_ids` 是同一内容。 + +返回: :code:`(finished, next_inputs, next_states)` 的三元组。 :code:`next_inputs, next_states` 均是单个tensor变量或tensor变量组成的嵌套结构,tensor的形状是 :math:`[batch\_size, ...]` , :code:`next_states` 和输入参数中的 :code:`states` 相同; :code:`finished` 是一个bool类型且形状为 :math:`[batch\_size]` 的tensor。 + +返回类型:tuple diff --git a/doc/paddle/api/paddle/fluid/layers/Uniform_cn.rst b/doc/paddle/api/paddle/fluid/layers/Uniform_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..59e1544b3751afacf4002cfa859a6827df1de187 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/Uniform_cn.rst @@ -0,0 +1,106 @@ +.. _cn_api_fluid_layers_Uniform: + +Uniform +------------------------------- + +.. py:class:: paddle.fluid.layers.Uniform(low, high) + + + + +均匀分布 + +概率密度函数(pdf)为: + +.. math:: + + pdf(x; a, b) = \frac{1}{Z}, a <=x < b + + Z = b - a + +上面的数学公式中: + +:math:`low = a` 。 +:math:`high = b` 。 +:math:`Z`: 正态分布常量。 + +参数low和high的维度必须能够支持广播。 + +参数: + - **low** (float|list|numpy.ndarray|Variable) - 均匀分布的下边界。数据类型为float32。 + - **high** (float|list|numpy.ndarray|Variable) - 均匀分布的上边界。数据类型为float32。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + from paddle.fluid import layers + from paddle.fluid.layers import Uniform + + # 定义参数为float的均匀分布 + u1 = Uniform(low=3.0, high=4.0) + # 定义参数为list的均匀分布 + u2 = Uniform(low=[1.0, 2.0], + high=[3.0, 4.0]) + # 通过广播的方式,定义一个均匀分布 + u3 = Uniform(low=[[1.0, 2.0], + [3.0, 4.0]], + high=[[1.5, 2.5], + [3.5, 4.5]]) + + # 通过广播的方式,定义一个均匀分布 + u4 = Uniform(low=3.0, high=[5.0, 6.0, 7.0]) + + # 一个完整的例子 + value_npdata = np.array([0.8], dtype="float32") + value_tensor = layers.create_tensor(dtype="float32") + layers.assign(value_npdata, value_tensor) + + uniform = Uniform([0.], [2.]) + + sample = uniform.sample([2]) + # 一个由定义好的均匀分布随机生成的张量,维度为: [2, 1] + entropy = uniform.entropy() + # [0.6931472] with shape: [1] + lp = uniform.log_prob(value_tensor) + # [-0.6931472] with shape: [1] + + +.. py:function:: sample(shape, seed=0) + +生成指定维度的样本 + +参数: + - **shape** (list) - 1维列表,指定生成样本的维度。数据类型为int32。 + - **seed** (int) - 长整型数。 + +返回:预先设计好维度的张量, 数据类型为float32 + +返回类型:Variable + +.. py:function:: entropy() + +信息熵 + +返回:均匀分布的信息熵, 数据类型为float32 + +返回类型:Variable + +.. py:function:: log_prob(value) + +对数概率密度函数 + +参数: + - **value** (Variable) - 输入张量。数据类型为float32或float64。 + +返回:对数概率, 数据类型与value相同 + +返回类型:Variable + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/While_cn.rst b/doc/paddle/api/paddle/fluid/layers/While_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ae41fbaad3dca1442764e323bf9e3deeeb238c13 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/While_cn.rst @@ -0,0 +1,88 @@ +.. _cn_api_fluid_layers_While: + +While +------------------------------- + + +.. py:class:: paddle.fluid.layers.While (cond, is_test=False, name=None) + + + + + +该类用于实现while循环控制功能,只要循环条件cond为True,就循环执行while循环体中的语句,直到cond为False为止。 + +.. note:: + 如果参数 ``cond`` 的形状为[1],强烈建议您使用新的OP :ref:`cn_api_fluid_layers_while_loop` 而不是 ``While``。 + OP :ref:`cn_api_fluid_layers_while_loop` 的使用方式更简单,并且调用该OP所用的代码更少且功能与 ``While`` 一样。 + +**注意:** + 在 ``While`` 中创建的局部变量类似于C++中的while,无法被外部引用,因此无法通过 ``Executor`` 中的 ``fetch_list`` 来获取。 + 若想实现该功能,PaddlePaddle提供了 ``assign`` 接口将局部变量赋值到外部,请参考示例代码2 或参考 `issue#22724 `_ 。 + +参数: + - **cond** (Variable) – 用于判断循环继续进行的条件,为数据类型bool型的Tensor,其shape必须为[1]。 + - **is_test** (bool,可选) – 用于表明是否在测试阶段执行,默认值为False。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +**代码示例 1** + +.. code-block:: python + + # 该示例代码展示整数循环+1,循环10次,输出计数结果 + import paddle.fluid as fluid + import numpy as np + + i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) # 循环计数器 + + loop_len = fluid.layers.fill_constant(shape=[1],dtype='int64', value=10) # 循环次数 + + cond = fluid.layers.less_than(x=i, y=loop_len) # 循环条件 + while_op = fluid.layers.While(cond=cond) + with while_op.block(): # 循环体 + i = fluid.layers.increment(x=i, value=1, in_place=True) + fluid.layers.less_than(x=i, y=loop_len, cond=cond) # 更新循环条件 + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + res = exe.run(fluid.default_main_program(), feed={}, fetch_list=[i]) + print(res) # [array([10])] + + +**代码示例 2** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) + loop_len = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) + one = fluid.layers.fill_constant(shape=[1], dtype='float32', value=1) + data = fluid.data(name='data', shape=[1], dtype='float32') + sums = fluid.layers.fill_constant(shape=[1], dtype='float32', value=0) # 在 While 外先定义要获取的变量,需和要获取的 While 内部的变量名称不同 + + cond = fluid.layers.less_than(x=i, y=loop_len) + while_op = fluid.layers.While(cond=cond) + with while_op.block(): + sums_tensor = fluid.layers.elementwise_add(x=data, y=data) + fluid.layers.assign(input=sums_tensor, output=sums) # 将 While 内定义的变量 sums_tenosr 通过 layers.assign 更新至 While 外的变量 sums 中 + i = fluid.layers.increment(x=i, value=1, in_place=True) + data = fluid.layers.elementwise_add(x=data, y=one) + fluid.layers.less_than(x=i, y=loop_len, cond=cond) + + feed_data = np.ones([1]).astype('float32') + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + res = exe.run(fluid.default_main_program(), feed={'data': feed_data}, fetch_list=sums) + print(res[0]) # [2.] # 因 While 内的 data 没有将值更新到 While 外,故循环过后此处 sums 的值为 [2.] + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/abs_cn.rst b/doc/paddle/api/paddle/fluid/layers/abs_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b294f3de09c7f329163e873c68975c90d294f3de --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/abs_cn.rst @@ -0,0 +1,33 @@ +.. _cn_api_fluid_layers_abs: + +abs +------------------------------- + +.. py:function:: paddle.fluid.layers.abs(x, name=None) + + + + +绝对值函数。 + +.. math:: + out = |x| + +参数: + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。 + - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回:输出Tensor,与 ``x`` 维度相同、数据类型相同。 + +返回类型:Tensor + +**代码示例**: + +.. code-block:: python + + import paddle + paddle.disable_static() + x = paddle.to_tensor([-1, -2, -3, -4], dtype='float32') + res = paddle.abs(x) + print(res.numpy()) + # [1, 2, 3, 4] diff --git a/doc/paddle/api/paddle/fluid/layers/acos_cn.rst b/doc/paddle/api/paddle/fluid/layers/acos_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..99798c8653d79ebc48dac88b153056eadf058c75 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/acos_cn.rst @@ -0,0 +1,34 @@ +.. _cn_api_fluid_layers_acos: + +acos +------------------------------- + +.. py:function:: paddle.fluid.layers.acos(x, name=None) + + + + +arccosine函数。 + +.. math:: + out = cos^{-1}(x) + +参数: + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。 + - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回:输出Tensor,与 ``x`` 维度相同、数据类型相同。 + +返回类型: Tensor + + +**代码示例**: + +.. code-block:: python + + import paddle + paddle.disable_static() + x = paddle.to_tensor([-0.8183, 0.4912, -0.6444, 0.0371]) + res = paddle.acos(x) + print(res.numpy()) + # [2.5293, 1.0573, 2.2711, 1.5336] diff --git a/doc/paddle/api/paddle/fluid/layers/adaptive_pool2d_cn.rst b/doc/paddle/api/paddle/fluid/layers/adaptive_pool2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7a97c39422f3a1ec3c1733e6d6e2058625294532 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/adaptive_pool2d_cn.rst @@ -0,0 +1,95 @@ +.. _cn_api_fluid_layers_adaptive_pool2d: + +adaptive_pool2d +------------------------------- + +.. py:function:: paddle.fluid.layers.adaptive_pool2d(input, pool_size, pool_type='max', require_index=False, name=None) + + + + +该OP使用上述输入参数的池化配置,为二维空间自适应池化操作,根据 ``input`` , 池化类型 ``pool_type`` , 池化核大小 ``pool_size`` 这些参数得到输出。 + +输入X和输出Out是NCHW格式,N为批大小,C是通道数,H是特征高度,W是特征宽度。参数 ``pool_size`` 含有两个整型元素, 分别代表高度和宽度上的参数。输出Out的H和W维由 ``pool_size`` 决定,即输出shape为 :math:`\left ( N,C,pool_size[0],pool_size[1] \right )` + + +对于平均adaptive pool2d: + +.. math:: + + hstart &= floor(i * H_{in} / H_{out}) + + hend &= ceil((i + 1) * H_{in} / H_{out}) + + wstart &= floor(j * W_{in} / W_{out}) + + wend &= ceil((j + 1) * W_{in} / W_{out}) + + Output(i ,j) &= \frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)} + +参数: + - **input** (Variable) - 池化操作的输入张量,维度为 :math:`[N, C, H, W]` 的4-D Tensor。 输入张量的格式为NCHW,其中N是batch大小,C是通道数,H是特征的高度,W是特征的宽度,数据类型为float32或float64。 + - **pool_size** (int|list|tuple) - 池化核大小。 如果池化核大小是元组或列表,则它必须包含两个整数(pool_size_Height,pool_size_Width。若为一个整数,则表示H和W维度上均为该值。 + - **pool_type** (string)- 池化类型,可输入“max”代表max-pooling,或者“avg”代表average-pooling。 + - **require_index** (bool) - 如果为False,则输出中带有最大池化点所在的索引。 如果pool_type为avg,该项不可被设置为True, 默认False。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值:None。 + + +返回: Variable(Tensor) 自适应池化结果张量 + +返回类型:变量(Variable),数据类型与 ``input`` 一致 + + +抛出异常: + + - ``ValueError`` – ``pool_type`` 不是 ‘max’ 或 ‘avg’ + - ``ValueError`` – 当 ``pool_type`` 是 ‘avg’ 时,错误地设置 ‘require_index’ 为true . + - ``ValueError`` – ``pool_size`` 应为一个长度为2的列表或元组 + +.. code-block:: python + + # average adaptive pool2d + # 假设输入形为[N, C, H, W], `pool_size` 为 [m, n], + # 输出形为 [N, C, m, n], adaptive pool 将输入的 H 和 W 维度 + # 平均分割为 m * n 个栅格(grid) ,然后为每个栅格进行池化得到输出 + # adaptive average pool 进行如下操作 + # + # for i in range(m): + # for j in range(n): + # hstart = floor(i * H / m) + # hend = ceil((i + 1) * H / m) + # wstart = floor(i * W / n) + # wend = ceil((i + 1) * W / n) + # output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend]) + # + import paddle.fluid as fluid + data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32') + pool_out = fluid.layers.adaptive_pool2d( + input=data, + pool_size=[3, 3], + pool_type='avg') + + # max adaptive pool2d + # 假设输入形为[N, C, H, W], `pool_size` 为 [m, n], + # 输出形为 [N, C, m, n], adaptive pool 将输入的 H 和 W 维度 + # 平均分割为 m * n 个栅格(grid) ,然后为每个栅格进行池化得到输出 + # adaptive average pool 进行如下操作 + # + # for i in range(m): + # for j in range(n): + # hstart = floor(i * H / m) + # hend = ceil((i + 1) * H / m) + # wstart = floor(i * W / n) + # wend = ceil((i + 1) * W / n) + # output[:, :, i, j] = max(input[:, :, hstart: hend, wstart: wend]) + # + import paddle.fluid as fluid + data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32') + pool_out = fluid.layers.adaptive_pool2d( + input=data, + pool_size=[3, 3], + pool_type='max') + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/adaptive_pool3d_cn.rst b/doc/paddle/api/paddle/fluid/layers/adaptive_pool3d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..35127de7acaeea4c0bda85c3e9b3faf84a078eca --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/adaptive_pool3d_cn.rst @@ -0,0 +1,113 @@ +.. _cn_api_fluid_layers_adaptive_pool3d: + +adaptive_pool3d +------------------------------- + +.. py:function:: paddle.fluid.layers.adaptive_pool3d(input, pool_size, pool_type='max', require_index=False, name=None) + + + + +该OP使用上述输入参数的池化配置,为二维空间自适应池化操作,根据 ``input`` , 池化类型 ``pool_type`` , 池化核大小 ``pool_size`` 这些参数得到输出。 + +输入X和输出Out是NCDHW格式,N为批大小,D是特征深度,C是通道数,H是特征高度,W是特征宽度。参数 ``pool_size`` 含有两个整型元素, 分别代表深度,高度和宽度上的参数。输出Out的D, H和W维由 ``pool_size`` 决定,即输出shape为 :math:`\left ( N,C,pool_size[0],pool_size[1],pool_size[2] \right )` + + +对于平均adaptive pool3d: + +.. math:: + + dstart &= floor(i * D_{in} / D_{out}) + + dend &= ceil((i + 1) * D_{in} / D_{out}) + + hstart &= floor(j * H_{in} / H_{out}) + + hend &= ceil((j + 1) * H_{in} / H_{out}) + + wstart &= floor(k * W_{in} / W_{out}) + + wend &= ceil((k + 1) * W_{in} / W_{out}) + + Output(i ,j, k) &= \frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)} + + + +参数: + - **input** (Variable) - 池化操作的输入张量,维度为 :math:`[N, C, D, H, W]` 的5-D Tensor。 输入张量的格式为NCDHW,其中N是batch大小,C是通道数,D为特征的深度,H是特征的高度,W是特征的宽度,数据类型为float32或float64。 + - **pool_size** (int|list|tuple) - 池化核大小。 如果池化核大小是元组或列表,则它必须包含三个整数(Depth, Height, Width)。若为一个整数,则表示D, H和W维度上均为该值。 + - **pool_type** (string)- 池化类型,可输入“max”代表max-pooling,或者“avg”代表average-pooling。 + - **require_index** (bool) - 如果为True,则输出中带有最大池化点所在的索引。 如果pool_type为avg,该项不可被设置为True, 默认False。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值:None。 + + +返回: Variable(Tensor) 自适应池化结果张量 + +返回类型:变量(Variable),数据类型与 ``input`` 一致 + + +抛出异常: + + - ``ValueError`` – ``pool_type`` 不是 ‘max’ 或 ‘avg’ + - ``ValueError`` – 当 ``pool_type`` 是 ‘avg’ 时,错误地设置 ‘require_index’ 为true . + - ``ValueError`` – ``pool_size`` 应为一个长度为3的列表或元组 + +.. code-block:: python + + # average adaptive pool2d + # 假设输入形为[N, C, D, H, W], `pool_size` 为 [l, m, n], + # 输出形为 [N, C, l, m, n], adaptive pool 将输入的D, H 和 W 维度 + # 平均分割为 l * m * n 个栅格(grid) ,然后为每个栅格进行池化得到输出 + # adaptive average pool 进行如下操作 + # + # for i in range(l): + # for j in range(m): + # for k in range(n): + # dstart = floor(i * D / l) + # dend = ceil((i + 1) * D / l) + # hstart = floor(j * H / m) + # hend = ceil((j + 1) * H / m) + # wstart = floor(k * W / n) + # wend = ceil((k + 1) * W / n) + # output[:, :, i, j, k] = + # avg(input[:, :, dstart:dend, hstart: hend, wstart: wend]) + # + + import paddle.fluid as fluid + + data = fluid.data(name='data', shape=[None, 3, 32, 32, 32], dtype='float32') + pool_out = fluid.layers.adaptive_pool3d( + input=data, + pool_size=[3, 3, 3], + pool_type='avg') + + # max adaptive pool2d + # 假设输入形为[N, C, D, H, W], `pool_size` 为 [l, m, n], + # 输出形为 [N, C, l, m, n], adaptive pool 将输入的D, H 和 W 维度 + # 平均分割为 l * m * n 个栅格(grid) ,然后为每个栅格进行池化得到输出 + # adaptive average pool 进行如下操作 + # + # for i in range(l): + # for j in range(m): + # for k in range(n): + # dstart = floor(i * D / l) + # dend = ceil((i + 1) * D / l) + # hstart = floor(j * H / m) + # hend = ceil((j + 1) * H / m) + # wstart = floor(k * W / n) + # wend = ceil((k + 1) * W / n) + # output[:, :, i, j, k] = + # avg(input[:, :, dstart:dend, hstart: hend, wstart: wend]) + # + + import paddle.fluid as fluid + + data = fluid.data(name='data', shape=[None, 3, 32, 32, 32], dtype='float32') + pool_out = fluid.layers.adaptive_pool3d( + input=data, + pool_size=[3, 3, 3], + pool_type='max') + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/add_position_encoding_cn.rst b/doc/paddle/api/paddle/fluid/layers/add_position_encoding_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a3f7c711e7a0a57503c3213425e2433c63c18c61 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/add_position_encoding_cn.rst @@ -0,0 +1,57 @@ +.. _cn_api_fluid_layers_add_position_encoding: + +add_position_encoding +------------------------------- + +.. py:function:: paddle.fluid.layers.add_position_encoding(input, alpha, beta, name=None) + + + + +该OP将输入inpu中每个位置(序列中的位置)的特征与对应的位置编码加权求和,位置编码可参考论文: `Attention Is All You Need `_ + +输出的计算公式如下: + +.. math:: + + PE(pos, 2i) &= \sin{(pos / 10000^{2i / P})}\\ + PE(pos, 2i + 1) &= \cos{(pos / 10000^{2i / P})}\\ + Out(:, pos, i) &= \alpha * input(:, pos, i) + \beta * PE(pos, i) + +其中: + - PE(pos, 2i): pos位置对应的编码中偶数特征位上的值 + - PE(pos, 2i + 1): pos位置对应的编码中奇数特征位上的值 + +参数: + - **input** (Variable) – Tensor或LoD level为1的LoDTensor。Tensor时,其形状为 :math:`[N, M, P]` ,其中 :math:`N` 表示batch size, :math:`M` 表示序列长度, :math:`P` 为特征维度大小;LoDTensor时,其形状为 :math:`[N, P]` ,其中 :math:`N` 表示所有序列长度之和, :math:`P` 为特征维度大小。数据类型为float32或float64。 + - **alpha** (float) – 加权求和时输入input的权重系数 + - **beta** (float) – 加权求和时位置编码的权重系数 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + + +返回: 加上位置编码后的Tensor或LoDTensor,和输入(input)具有相同数据类型和形状及LoD信息。 + +返回类型: Variable + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + + tensor = fluid.data( + name='tensor', + shape=[None, 64, 512], + dtype='float32') + position_tensor = fluid.layers.add_position_encoding( + input=tensor, alpha=1.0, beta=1.0) + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/affine_channel_cn.rst b/doc/paddle/api/paddle/fluid/layers/affine_channel_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..996d7c0e837b1f9835760f028c4165e426fd6ac4 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/affine_channel_cn.rst @@ -0,0 +1,44 @@ +.. _cn_api_fluid_layers_affine_channel: + +affine_channel +------------------------------- + +.. py:function:: paddle.fluid.layers.affine_channel(x, scale=None, bias=None, data_layout='NCHW', name=None,act=None) + + + + +对输入的每个 channel 应用单独的仿射变换。用于将空间批量归一化替换为其等价的固定变换。 + +输入也可以是二维张量,并在第二维应用仿射变换。 + +参数: + - **x** (Variable): 特征图输入可以是一个具有NCHW格式或NHWC格式的的4-D张量。它也可以是二维张量,此时该算法应用于第二维度的仿射变换。数据类型为float32或float64。 + - **scale** (Variable): 维度为(C)的一维输入,第C个元素为输入的第C通道仿射变换的尺度因子。数据类型为float32或float64。 + - **bias** (Variable): 维度为(C)的一维输入,第C个元素是输入的第C个通道的仿射变换的偏置。数据类型为float32或float64。 + - **data_layout** (str,可选): 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。如果输入是一个2D张量,可以忽略该参数,默认值为"NCHW"。 + - **name** (str,可选): 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **act** (str,可选): 应用于该层输出的激活函数,默认值为None。 + +返回:与x具有相同维度和数据布局的张量, 数据类型与x相同 + +返回类型:Variable + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.data(name='data', shape=[3, 32, 32], + dtype='float32') + input_scale = fluid.layers.create_parameter(shape=[3], + dtype="float32") + input_bias = fluid.layers.create_parameter(shape=[3], + dtype="float32") + out = fluid.layers.affine_channel(data,scale=input_scale, + bias=input_bias) + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/affine_grid_cn.rst b/doc/paddle/api/paddle/fluid/layers/affine_grid_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6fb027a1d996c787f999961775aa280508617004 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/affine_grid_cn.rst @@ -0,0 +1,31 @@ +.. _cn_api_fluid_layers_affine_grid: + +affine_grid +------------------------------- + +.. py:function:: paddle.fluid.layers.affine_grid(theta, out_shape, name=None) + + + + +该OP用于生成仿射变换前后的feature maps的坐标映射关系。在视觉应用中,根据该OP得到的映射关系,将输入feature map的像素点变换到对应的坐标,就得到了经过仿射变换的feature map。 + +参数: + - **theta** (Variable) - Shape为 ``[batch_size, 2, 3]`` 的Tensor,表示batch_size个 ``2X3`` 的变换矩阵。数据类型支持float32,float64。 + - **out_shape** (Variable | list | tuple) - 类型可以是1-D Tensor、list或tuple。用于表示在仿射变换中的输出的shape,其格式 ``[N, C, H, W]`` ,分别为输出feature map的batch size、channel数量、高和宽。数据类型支持int32。 + - **name** (None|str) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:api_guide_Name ,默认值为None。 + +返回: Variable。Shape为 ``[N, H, W, 2]`` 的4-D Tensor,表示仿射变换前后的坐标的映射关系。其中,N、H、W分别为仿射变换中输出feature map的batch size、高和宽。 数据类型与 ``theta`` 一致。 + +返回类型:Variable + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + theta = fluid.layers.data(name="x", shape=[2, 3], dtype="float32") + out_shape = fluid.layers.data(name="y", shape=[-1], dtype="float32") + data = fluid.layers.affine_grid(theta, out_shape) + # or + data = fluid.layers.affine_grid(theta, [5, 3, 28, 28]) diff --git a/doc/paddle/api/paddle/fluid/layers/anchor_generator_cn.rst b/doc/paddle/api/paddle/fluid/layers/anchor_generator_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c30fa66826ae8ad634542a37990d9045cd249cf6 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/anchor_generator_cn.rst @@ -0,0 +1,46 @@ +.. _cn_api_fluid_layers_anchor_generator: + +anchor_generator +------------------------------- + +.. py:function:: paddle.fluid.layers.anchor_generator(input, anchor_sizes=None, aspect_ratios=None, variance=[0.1, 0.1, 0.2, 0.2], stride=None, offset=0.5, name=None) + + + + +**Anchor generator operator** + +为RCNN算法生成anchor,输入的每一位产生N个anchor,N=size(anchor_sizes)*size(aspect_ratios)。生成anchor的顺序首先是aspect_ratios循环,然后是anchor_sizes循环。 + +参数: + - **input** (Variable) - 维度为[N,C,H,W]的4-D Tensor。数据类型为float32或float64。 + - **anchor_sizes** (float32|list|tuple,可选) - 生成anchor的anchor大小,以绝对像素的形式表示,例如:[64.,128.,256.,512.]。若anchor的大小为64,则意味着这个anchor的面积等于64**2。默认值为None。 + - **aspect_ratios** (float32|list|tuple,可选) - 生成anchor的高宽比,例如[0.5,1.0,2.0]。默认值为None。 + - **variance** (list|tuple,可选) - 变量,在框回归delta中使用,数据类型为float32。默认值为[0.1,0.1,0.2,0.2]。 + - **stride** (list|tuple,可选) - anchor在宽度和高度方向上的步长,比如[16.0,16.0],数据类型为float32。默认值为None。 + - **offset** (float32,可选) - 先验框的中心位移。默认值为0.5 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: + + - 表示输出anchor的Tensor,数据类型为float32或float64。维度为[H,W,num_anchors,4]。 ``H`` 是输入的高度, ``W`` 是输入的宽度, ``num_anchors`` 是输入每位的框数,每个anchor格式(未归一化)为(xmin,ymin,xmax,ymax) + + - 表示输出variance的Tensor,数据类型为float32或float64。维度为[H,W,num_anchors,4]。 ``H`` 是输入的高度, ``W`` 是输入的宽度, ``num_anchors`` 是输入每个位置的框数,每个变量的格式为(xcenter,ycenter,w,h)。 + + +返回类型:Variable + + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32') + anchor, var = fluid.layers.anchor_generator( + input=conv1, + anchor_sizes=[64, 128, 256, 512], + aspect_ratios=[0.5, 1.0, 2.0], + variance=[0.1, 0.1, 0.2, 0.2], + stride=[16.0, 16.0], + offset=0.5) diff --git a/doc/paddle/api/paddle/fluid/layers/argmax_cn.rst b/doc/paddle/api/paddle/fluid/layers/argmax_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d165ce8d6997f060e53c2754239be83b6298ef2c --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/argmax_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_fluid_layers_argmax: + +argmax +------------------------------- + +.. py:function:: paddle.fluid.layers.argmax(x, axis=0) + + + + +**argmax** + +该OP沿 ``axis`` 计算输入 ``x`` 的最大元素的索引。 + +参数: + - **x** (Variable) - 输入的多维 ``Tensor`` ,支持的数据类型:float32、float64、int8、int16、int32、int64。 + - **axis** (int,可选) - 指定对输入Tensor进行运算的轴, ``axis`` 的有效范围是[-R, R),R是输入 ``x`` 的Rank, ``axis`` 为负时与 ``axis`` +R 等价。默认值为0。 + +返回: ``Tensor`` ,数据类型int64 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + in1 = np.array([[[5,8,9,5], + [0,0,1,7], + [6,9,2,4]], + [[5,2,4,2], + [4,7,7,9], + [1,7,0,6]]]) + with fluid.dygraph.guard(): + x = fluid.dygraph.to_variable(in1) + out1 = fluid.layers.argmax(x=x, axis=-1) + out2 = fluid.layers.argmax(x=x, axis=0) + out3 = fluid.layers.argmax(x=x, axis=1) + out4 = fluid.layers.argmax(x=x, axis=2) + print(out1.numpy()) + # [[2 3 1] + # [0 3 1]] + print(out2.numpy()) + # [[0 0 0 0] + # [1 1 1 1] + # [0 0 0 1]] + print(out3.numpy()) + # [[2 2 0 1] + # [0 1 1 1]] + print(out4.numpy()) + # [[2 3 1] + # [0 3 1]] diff --git a/doc/paddle/api/paddle/fluid/layers/argmin_cn.rst b/doc/paddle/api/paddle/fluid/layers/argmin_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7f3681b4024ede427f83edc7f69a7ef5e25f345f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/argmin_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_fluid_layers_argmin: + +argmin +------------------------------- + +.. py:function:: paddle.fluid.layers.argmin(x, axis=0) + + + + +**argmin** + +该OP沿 ``axis`` 计算输入 ``x`` 的最小元素的索引。 + +参数: + - **x** (Variable) - 输入的多维 ``Tensor`` ,支持的数据类型:float32、float64、int8、int16、int32、int64。 + - **axis** (int,可选) - 指定对输入Tensor进行运算的轴, ``axis`` 的有效范围是[-R, R),R是输入 ``x`` 的Rank, ``axis`` 为负时与 ``axis`` +R 等价。默认值为0。 + +返回: ``Tensor`` ,数据类型int64 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + in1 = np.array([[[5,8,9,5], + [0,0,1,7], + [6,9,2,4]], + [[5,2,4,2], + [4,7,7,9], + [1,7,0,6]]]) + with fluid.dygraph.guard(): + x = fluid.dygraph.to_variable(in1) + out1 = fluid.layers.argmin(x=x, axis=-1) + out2 = fluid.layers.argmin(x=x, axis=0) + out3 = fluid.layers.argmin(x=x, axis=1) + out4 = fluid.layers.argmin(x=x, axis=2) + print(out1.numpy()) + # [[0 0 2] + # [1 0 2]] + print(out2.numpy()) + # [[0 1 1 1] + # [0 0 0 0] + # [1 1 1 0]] + print(out3.numpy()) + # [[1 1 1 2] + # [2 0 2 0]] + print(out4.numpy()) + # [[0 0 2] + # [1 0 2]] diff --git a/doc/paddle/api/paddle/fluid/layers/argsort_cn.rst b/doc/paddle/api/paddle/fluid/layers/argsort_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..de767e633872b78852f7b06eb4a634d093b3a640 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/argsort_cn.rst @@ -0,0 +1,69 @@ +.. _cn_api_fluid_layers_argsort: + +argsort +------------------------------- + +.. py:function:: paddle.fluid.layers.argsort(input,axis=-1,descending=False,name=None) + + + + +对输入变量沿给定轴进行排序,输出排序好的数据和相应的索引,其维度和输入相同。**默认升序排列,如果需要降序排列设置** ``descending=True`` 。 + + +参数: + - **input** (Variable) - 输入的多维 ``Tensor`` ,支持的数据类型:float32、float64、int16、int32、int64、uint8。 + - **axis** (int,可选) - 指定对输入Tensor进行运算的轴, ``axis`` 的有效范围是[-R, R),R是输入 ``x`` 的Rank, ``axis`` 为负时与 ``axis`` +R 等价。默认值为0。 + - **descending** (bool,可选) - 指定算法排序的方向。如果设置为True,算法按照降序排序。如果设置为False或者不设置,按照升序排序。默认值为False。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:一组已排序的输出(与 ``input`` 维度相同、数据类型相同)和索引(数据类型为int64)。 + +返回类型:tuple[Variable] + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + in1 = np.array([[[5,8,9,5], + [0,0,1,7], + [6,9,2,4]], + [[5,2,4,2], + [4,7,7,9], + [1,7,0,6]]]).astype(np.float32) + with fluid.dygraph.guard(): + x = fluid.dygraph.to_variable(in1) + out1 = fluid.layers.argsort(input=x, axis=-1) # same as axis==2 + out2 = fluid.layers.argsort(input=x, axis=0) + out3 = fluid.layers.argsort(input=x, axis=1) + print(out1[0].numpy()) + # [[[5. 5. 8. 9.] + # [0. 0. 1. 7.] + # [2. 4. 6. 9.]] + # [[2. 2. 4. 5.] + # [4. 7. 7. 9.] + # [0. 1. 6. 7.]]] + print(out1[1].numpy()) + # [[[0 3 1 2] + # [0 1 2 3] + # [2 3 0 1]] + # [[1 3 2 0] + # [0 1 2 3] + # [2 0 3 1]]] + print(out2[0].numpy()) + # [[[5. 2. 4. 2.] + # [0. 0. 1. 7.] + # [1. 7. 0. 4.]] + # [[5. 8. 9. 5.] + # [4. 7. 7. 9.] + # [6. 9. 2. 6.]]] + print(out3[0].numpy()) + # [[[0. 0. 1. 4.] + # [5. 8. 2. 5.] + # [6. 9. 9. 7.]] + # [[1. 2. 0. 2.] + # [4. 7. 4. 6.] + # [5. 7. 7. 9.]]] diff --git a/doc/paddle/api/paddle/fluid/layers/array_length_cn.rst b/doc/paddle/api/paddle/fluid/layers/array_length_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..27fcaf003b8dab728cded431ae24e525d9e4cb47 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/array_length_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_fluid_layers_array_length: + +array_length +------------------------------- + +.. py:function:: paddle.fluid.layers.array_length(array) + + + + +该OP用于获取输入数组 :ref:`cn_api_fluid_LoDTensorArray` 的长度。可以与 :ref:`cn_api_fluid_layers_array_read` 、 :ref:`cn_api_fluid_layers_array_write` 、 :ref:`cn_api_fluid_layers_While` OP结合使用,实现LoDTensorArray的遍历与读写。 + +参数: + - **array** (LoDTensorArray) - 输入的数组LoDTensorArray + +返回:shape为[1]的1-D Tensor, 表示数组LoDTensorArray的长度,数据类型为int64 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + tmp = fluid.layers.zeros(shape=[10], dtype='int32') + i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) + #tmp是shape为[10]的Tensor,将tmp写入到数组arr下标为10的位置,arr的长度因此为11 + arr = fluid.layers.array_write(tmp, i=i) + #查看arr的长度 + arr_len = fluid.layers.array_length(arr) + + #可以通过executor打印出LoDTensorArray的长度 + input = fluid.layers.Print(arr_len, message="The length of LoDTensorArray:") + main_program = fluid.default_main_program() + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(main_program) + +**运行输出** + +.. code-block:: python + + 1569576542 The length of LoDTensorArray: The place is:CPUPlace + Tensor[array_length_0.tmp_0] + shape: [1,] + dtype: l + data: 11, + + #输出shape为[1]的Tensor,值为11,表示LoDTensorArray长度为11 + #dtype为对应C++数据类型,在不同环境下可能显示值不同,但本质一致 + #例如:如果Tensor中数据类型是int64,则对应的C++数据类型为int64_t,所以dtype值为typeid(int64_t).name(), + # 其在MacOS下为'x',linux下为'l',Windows下为'__int64',都表示64位整型变量 diff --git a/doc/paddle/api/paddle/fluid/layers/array_read_cn.rst b/doc/paddle/api/paddle/fluid/layers/array_read_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6e8cd4fb70ec2f31bbb93822883c0cfc491cb691 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/array_read_cn.rst @@ -0,0 +1,66 @@ +.. _cn_api_fluid_layers_array_read: + +array_read +------------------------------- + +.. py:function:: paddle.fluid.layers.array_read(array,i) + + + + +该OP用于读取输入数组 :ref:`cn_api_fluid_LoDTensorArray` 中指定位置的数据, ``array`` 为输入的数组, ``i`` 为指定的读取位置。常与 :ref:`cn_api_fluid_layers_array_write` OP配合使用进行LoDTensorArray的读写。 + +例1: +:: + 输入: + 包含4个Tensor的LoDTensorArray,前3个shape为[1],最后一个shape为[1,2]: + input = ([0.6], [0.1], [0.3], [0.4, 0.2]) + 并且: + i = [3] + + 输出: + output = [0.4, 0.2] + +参数: + - **array** (Variable) - 输入的数组LoDTensorArray + - **i** (Variable) - shape为[1]的1-D Tensor,表示从 ``array`` 中读取数据的位置,数据类型为int64 + + +返回:从 ``array`` 中指定位置读取的LoDTensor或Tensor + +返回类型:Variable + +**代码示例** + +.. code-block:: python + + #先创建一个LoDTensorArray,再在指定位置写入Tensor,然后从该位置读取Tensor + import paddle.fluid as fluid + arr = fluid.layers.create_array(dtype='float32') + tmp = fluid.layers.fill_constant(shape=[3, 2], dtype='int64', value=5) + i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) + #tmp是shape为[3,2]的Tensor,将其写入空数组arr的下标10的位置,则arr的长度变为11 + arr = fluid.layers.array_write(tmp, i, array=arr) + #读取arr的下标10的位置的数据 + item = fluid.layers.array_read(arr, i) + + #可以通过executor打印出该数据 + input = fluid.layers.Print(item, message="The LoDTensor of the i-th position:") + main_program = fluid.default_main_program() + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(main_program) + +**输出结果** + +.. code-block:: python + + 1569588169 The LoDTensor of the i-th position: The place is:CPUPlace + Tensor[array_read_0.tmp_0] + shape: [3,2,] + dtype: l + data: 5,5,5,5,5,5, + + #输出了shape为[3,2]的Tensor + #dtype为对应C++数据类型,在不同环境下可能显示值不同,但本质一致 + #例如:如果Tensor中数据类型是int64,则对应的C++数据类型为int64_t,所以dtype值为typeid(int64_t).name(), + # 其在MacOS下为'x',linux下为'l',Windows下为'__int64',都表示64位整型变量 diff --git a/doc/paddle/api/paddle/fluid/layers/array_write_cn.rst b/doc/paddle/api/paddle/fluid/layers/array_write_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4f59d605093261366a4fea7d2e48dd9b4011158c --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/array_write_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_fluid_layers_array_write: + +array_write +------------------------------- + +.. py:function:: paddle.fluid.layers.array_write(x, i, array=None) + + + + +该OP将输入的变量 ``x`` 写入到数组 :ref:`cn_api_fluid_LoDTensorArray` ``array`` 的第i个位置,并返回修改后的LoDTensorArray,如果 ``array`` 为None,则创建一个新的LoDTensorArray。常与 :ref:`cn_api_fluid_layers_array_read` OP联合使用对LoDTensorArray进行读写。 + +参数: + - **x** (Variable) – 待写入的数据,多维Tensor或LoDTensor,数据类型支持float32,float64,int32,int64 + - **i** (Variable) – shape为[1]的1-D Tensor,表示写入到输出数组LoDTensorArray的位置,数据类型为int64 + - **array** (Variable,可选) – 指定写入 ``x`` 的数组LoDTensorArray。默认值为None, 此时将创建新的LoDTensorArray并作为结果返回 + +返回: 写入输入 ``x`` 之后的LoDTensorArray + +返回类型: Variable + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + tmp = fluid.layers.fill_constant(shape=[3, 2], dtype='int64', value=5) + i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) + #将tmp写入数组arr下标为10的位置,并返回arr + arr = fluid.layers.array_write(tmp, i=i) + + #此时arr是长度为11的LoDTensorArray,可以通过array_read来查看下标为10的LoDTensor,并将之打印出来 + item = fluid.layers.array_read(arr, i=i) + input = fluid.layers.Print(item, message="The content of i-th LoDTensor:") + main_program = fluid.default_main_program() + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(main_program) + +**输出结果** + +.. code-block:: python + + 1570533133 The content of i-th LoDTensor: The place is:CPUPlace + Tensor[array_read_0.tmp_0] + shape: [3,2,] + dtype: l + data: 5,5,5,5,5,5, + #返回了shape为[3,2]的Tensor,即为上面写入的tmp + #dtype为对应C++数据类型,在不同环境下可能显示值不同,但本质一致 + #例如:如果Tensor中数据类型是int64,则对应的C++数据类型为int64_t,所以dtype值为typeid(int64_t).name(), + # 其在MacOS下为'x',linux下为'l',Windows下为'__int64',都表示64位整型变量 diff --git a/doc/paddle/api/paddle/fluid/layers/asin_cn.rst b/doc/paddle/api/paddle/fluid/layers/asin_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2b39afaa2a83424c3b435dbc53d8213195c72c00 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/asin_cn.rst @@ -0,0 +1,33 @@ +.. _cn_api_fluid_layers_asin: + +asin +------------------------------- + +.. py:function:: paddle.fluid.layers.asin(x, name=None) + + + + +arcsine函数。 + +.. math:: + out = sin^{-1}(x) + +参数: + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64、float16。 + - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回:输出Tensor,与 ``x`` 维度相同、数据类型相同。 + +返回类型: Tensor + +**代码示例**: + +.. code-block:: python + + import paddle + paddle.disable_static() + x = paddle.to_tensor([-0.8183, 0.4912, -0.6444, 0.0371]) + res = paddle.asin(x) + print(res.numpy()) + # [-0.9585, 0.5135, -0.7003, 0.0372] diff --git a/doc/paddle/api/paddle/fluid/layers/assign_cn.rst b/doc/paddle/api/paddle/fluid/layers/assign_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..53e2efdf0f690f991dbc51f4ed7ba44eec0cf4a4 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/assign_cn.rst @@ -0,0 +1,31 @@ +.. _cn_api_fluid_layers_assign: + +assign +------------------------------- + +.. py:function:: paddle.fluid.layers.assign(input,output=None) + + + + +该OP将输入Tensor或numpy数组拷贝至输出Tensor。 + +参数: + - **input** (Variable|np.ndarray) - 输入Tensor或numpy数组,支持数据类型为float32, float64, int32, int64和bool。 + - **output** (Variable,可选) - 输出Tensor。如果为None,则创建一个新的Tensor作为输出Tensor,默认值为None。 + +返回:输出Tensor,形状、数据类型、数据值和 ``input`` 一致。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + data = fluid.layers.fill_constant(shape=[3, 2], value=2.5, dtype='float64') # [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]] + result1 = fluid.layers.create_tensor(dtype='float64') + fluid.layers.assign(data, result1) # result1 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]] + result2 = fluid.layers.assign(data) # result2 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]] + result3 = fluid.layers.assign(np.array([[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]], dtype='float32')) # result3 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]] diff --git a/doc/paddle/api/paddle/fluid/layers/atan_cn.rst b/doc/paddle/api/paddle/fluid/layers/atan_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ce1e632e48961f668b4c5c5fa53a5ec70cea0730 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/atan_cn.rst @@ -0,0 +1,33 @@ +.. _cn_api_fluid_layers_atan: + +atan +------------------------------- + +.. py:function:: paddle.fluid.layers.atan(x, name=None) + + + + +arctangent函数。 + +.. math:: + out = tan^{-1}(x) + +参数: + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64、float16。 + - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回:输出Tensor,与 ``x`` 维度相同、数据类型相同。 + +返回类型: Tensor + +**代码示例**: + +.. code-block:: python + + import paddle + paddle.disable_static() + x = paddle.to_tensor([-0.8183, 0.4912, -0.6444, 0.0371]) + res = paddle.atan(x) + print(res.numpy()) + # [-0.6858, 0.4566, -0.5724, 0.0371] diff --git a/doc/paddle/api/paddle/fluid/layers/autoincreased_step_counter_cn.rst b/doc/paddle/api/paddle/fluid/layers/autoincreased_step_counter_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..92d17ee1f0ce610ead80d882a26cc039d9d0c04b --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/autoincreased_step_counter_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_fluid_layers_autoincreased_step_counter: + +autoincreased_step_counter +------------------------------- + + +.. py:function:: paddle.fluid.layers.autoincreased_step_counter(counter_name=None, begin=1, step=1) + + + + +创建一个自增变量,每个迭代累加一次,默认首次返回值为 1,默认累加步长为 1。 + +参数: + - **counter_name** (str, 可选) - 该计数器的名称,默认为 ``@STEP_COUNTER@`` 。 + - **begin** (int) - 该计数器返回的第一个值。 + - **step** (int) - 累加步长。 + +返回:累加结果,数据类型为 int64 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + global_step = fluid.layers.autoincreased_step_counter( + counter_name='@LR_DECAY_COUNTER@', begin=0, step=1) + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/batch_norm_cn.rst b/doc/paddle/api/paddle/fluid/layers/batch_norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..08d3edab707ef638bbb41ea1d6ef6c508e359fa8 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/batch_norm_cn.rst @@ -0,0 +1,110 @@ +.. _cn_api_fluid_layers_batch_norm: + +batch_norm +------------------------------- + + +.. py:function:: paddle.fluid.layers.batch_norm(input, act=None, is_test=False, momentum=0.9, epsilon=1e-05, param_attr=None, bias_attr=None, data_layout='NCHW', in_place=False, name=None, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=False, use_global_stats=False) + + + + +批正则化层(Batch Normalization Layer) + +可用作卷积和全连接操作的批正则化函数,根据当前批次数据按通道计算的均值和方差进行正则化。该层需要的数据格式如下: + +1.NHWC[batch,in_height,in_width,in_channels] +2.NCHW[batch,in_channels,in_height,in_width] + +更多详情请参考 : `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ + +``input`` 是mini-batch的输入。 + +.. math:: + \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//\ + \ mini-batch\ mean \\ + \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \qquad &//\ + \ mini-batch\ variance \\ + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\ + y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift + + moving\_mean = moving\_mean * momentum + mini\_batch\_mean * (1. - momentum) \\ + moving\_variance = moving\_variance * momentum + mini\_batch\_var * (1. - momentum) + +moving_mean和moving_var是训练过程中统计得到的全局均值和方差,在预测或者评估中使用。 +`is_test` 参数只能用于测试或者评估阶段,如果想在训练阶段使用预训练模型的全局均值和方差的话,可以设置 `use_global_stats=True`. + +当use_global_stats = True时, :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 不是一个minibatch的统计数据。 它们是全局(或运行)统计数据(moving_mean和moving_variance),通常来自预先训练好的模型。训练和测试(或预测)具有相同的行为: + +.. math:: + + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\ + \sigma_{\beta}^{2} + \epsilon}} \\ + y_i &\gets \gamma \hat{x_i} + \beta + + + +参数: + - **input** (Variable) - batch_norm算子的输入特征,是一个Variable类型,输入维度可以是 2, 3, 4, 5。数据类型:flaot16, float32, float64。 + - **act** (string)- 激活函数类型,可以是leaky_realu、relu、prelu等。默认:None。 + - **is_test** (bool) - 指示它是否在测试阶段,非训练阶段使用训练过程中统计到的全局均值和全局方差。默认:False。 + - **momentum** (float|Variable)- 此值用于计算 moving_mean 和 moving_var,是一个float类型或者一个shape为[1],数据类型为float32的Variable类型。更新公式为: :math:`moving\_mean = moving\_mean * momentum + new\_mean * (1. - momentum)` , :math:`moving\_var = moving\_var * momentum + new\_var * (1. - momentum)` , 默认:0.9。 + - **epsilon** (float)- 加在分母上为了数值稳定的值。默认:1e-5。 + - **param_attr** (ParamAttr|None) :指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。batch_norm算子默认的权重初始化是1.0。 + - **bias_attr** (ParamAttr|None)- 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。batch_norm算子默认的偏置初始化是0.0。 + - **data_layout** (string) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **in_place** (bool)- batch_norm的输出复用输入的tensor,可以节省显存。默认:False。 + - **name** (str|None) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 + - **moving_mean_name** (string)- moving_mean的名称,存储全局均值。如果将其设置为None, ``batch_norm`` 将随机命名全局均值;否则, ``batch_norm`` 将命名全局均值为 ``moving_mean_name`` 。默认:None。 + - **moving_variance_name** (string)- moving_variance的名称,存储全局变量。如果将其设置为None, ``batch_norm`` 将随机命名全局方差;否则, ``batch_norm`` 将命名全局方差为 ``moving_variance_name`` 。默认:None。 + - **do_model_average_for_mean_and_var** (bool,默认False)- 是否为mean和variance做模型均值。 + - **use_global_stats** (bool) – 是否使用全局均值和方差。 在预测或测试模式下,将use_global_stats设置为true或将is_test设置为true,并且行为是等效的。 在训练模式中,当设置use_global_stats为True时,在训练期间也使用全局均值和方差。默认:False。 + +返回: 维度和输入相同的Tensor,在输入中运用批正则后的结果。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + x = fluid.layers.data(name='x', shape=[3, 7, 3, 7], dtype='float32', append_batch_size=False) + hidden1 = fluid.layers.fc(input=x, size=200) + param_attr = fluid.ParamAttr(name='batch_norm_w', initializer=fluid.initializer.Constant(value=1.0)) + bias_attr = fluid.ParamAttr(name='batch_norm_b', initializer=fluid.initializer.Constant(value=0.0)) + hidden2 = fluid.layers.batch_norm(input=hidden1, param_attr = param_attr, bias_attr = bias_attr) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + np_x = np.random.random(size=(3, 7, 3, 7)).astype('float32') + output = exe.run(feed={"x": np_x}, fetch_list = [hidden2]) + print(output) + +.. code-block:: python + + # batch_norm with momentum as Variable + import paddle.fluid as fluid + import paddle.fluid.layers.learning_rate_scheduler as lr_scheduler + + def get_decay_momentum(momentum_init, decay_steps, decay_rate): + global_step = lr_scheduler._decay_step_counter() + momentum = fluid.layers.create_global_var( + shape=[1], + value=float(momentum_init), + dtype='float32', + # set persistable for save checkpoints and resume + persistable=True, + name="momentum") + div_res = global_step / decay_steps + decayed_momentum = momentum_init * (decay_rate**div_res) + fluid.layers.assign(decayed_momentum, momentum) + + return momentum + + x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32') + hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w') + momentum = get_decay_momentum(0.9, 1e5, 0.9) + hidden2 = fluid.layers.batch_norm(input=hidden1, momentum=momentum) + diff --git a/doc/paddle/api/paddle/fluid/layers/beam_search_cn.rst b/doc/paddle/api/paddle/fluid/layers/beam_search_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d1d43f6b3fb5b22b92d9e27c3e572e4c236aed08 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/beam_search_cn.rst @@ -0,0 +1,81 @@ +.. _cn_api_fluid_layers_beam_search: + +beam_search +------------------------------- + +.. py:function:: paddle.fluid.layers.beam_search(pre_ids, pre_scores, ids, scores, beam_size, end_id, level=0, is_accumulated=True, name=None, return_parent_idx=False) + + + + +束搜索(Beam search)是在机器翻译等生成任务中选择候选词的一种经典算法 + +更多细节参考 `Beam Search `_ + +**该OP仅支持LoDTensor**,在计算产生得分之后使用,完成单个时间步内的束搜索。具体而言,在计算部分产生 ``ids`` 和 ``scores`` 后,对于每个源句(样本)该OP从 ``ids`` 中根据其对应的 ``scores`` 选择当前时间步 top-K (``K`` 是 ``beam_size``)的候选词id。而 ``pre_id`` 和 ``pre_scores`` 是上一时间步 ``beam_search`` 的输出,加入输入用于特殊处理到达结束的翻译候选。 + +注意,如果 ``is_accumulated`` 为 True,传入的 ``scores`` 应该是累积分数。反之,``scores`` 是单步得分,会在该OP内被转化为log值并累积到 ``pre_scores`` 作为最终得分。如需使用长度惩罚,应在计算累积分数前使用其他OP完成。 + +束搜索的完整用法请参阅以下示例: + + fluid/tests/book/test_machine_translation.py + + + +参数: + - **pre_ids** (Variable) - LoD level为2的LodTensor,表示前一时间步选择的候选id,是前一时间步 ``beam_search`` 的输出。第一步时,其形状应为为 :math:`[batch\_size,1]` , lod应为 :math:`[[0,1,...,batch\_size],[0,1,...,batch\_size]]` 。数据类型为int64。 + - **pre_scores** (Variable) - 维度和LoD均与 ``pre_ids`` 相同的LodTensor,表示前一时间步所选id对应的累积得分,是前一时间步 ``beam_search`` 的输出。数据类型为float32。 + - **ids** (None|Variable) - 包含候选id的LodTensor。LoD应与 ``pre_ids`` 相同,形状为 :math:`[batch\_size \times beam\_size,K]` ,其中第一维大小与 ``pre_ids`` 相同且``batch_size`` 会随样本到达结束而自动减小, ``K`` 应该大于 ``beam_size`` 。数据类型为int64。可为空,为空时使用 ``scores`` 上的索引作为id。 + - **scores** (Variable) - 表示 ``ids`` 对应的累积分数的LodTensor变量, 维度和LoD均与 ``ids`` 相同。 + - **beam_size** (int) - 指明束搜索中的束宽度。 + - **end_id** (int) - 指明标识序列结束的id。 + - **level** (int,可选) - **可忽略,当前不能更改** 。知道LoD level为2即可,两层lod的意义如下: 第一级表示每个源句(样本)包含的beam大小,若满足结束条件(达到 ``beam_size`` 个结束)则变为0;第二级是表示每个beam被选择的次数。 + - **is_accumulated** (bool,可选) - 指明输入分数 ``scores`` 是否为累积分数,默认为True。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **return_parent_idx** (bool,可选) - 指明是否返回一个额外的Tensor,该Tensor保存了选择的id的父节点(beam)在 ``pre_id`` 中索引,可用于通过gather OP更新其他Tensor的内容。默认为False。 + + +返回:Variable的二元组或三元组。二元组中包含了当前时间步选择的id和对应的累积得分两个LodTensor,形状相同且均为 :math:`[batch\_size×beam\_size,1]` ,LoD相同且level均为2,数据类型分别为int64和float32;若 ``return_parent_idx`` 为True时为三元组,多返回一个保存了父节点在 ``pre_id`` 中索引的Tensor,形状为 :math:`[batch\_size \times beam\_size]` ,数据类型为int64。 + +返回类型:tuple + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + # 假设 `probs` 包含计算神经元所得的预测结果 + # `pre_ids` 和 `pre_scores` 为beam_search之前时间步的输出 + beam_size = 4 + end_id = 1 + pre_ids = fluid.layers.data( + name='pre_id', shape=[1], lod_level=2, dtype='int64') + pre_scores = fluid.layers.data( + name='pre_scores', shape=[1], lod_level=2, dtype='float32') + probs = fluid.layers.data( + name='probs', shape=[10000], dtype='float32') + topk_scores, topk_indices = fluid.layers.topk(probs, k=beam_size) + accu_scores = fluid.layers.elementwise_add( + x=fluid.layers.log(x=topk_scores), + y=fluid.layers.reshape( + pre_scores, shape=[-1]), + axis=0) + selected_ids, selected_scores = fluid.layers.beam_search( + pre_ids=pre_ids, + pre_scores=pre_scores, + ids=topk_indices, + scores=accu_scores, + beam_size=beam_size, + end_id=end_id) + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/beam_search_decode_cn.rst b/doc/paddle/api/paddle/fluid/layers/beam_search_decode_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..21ee97b0f36c75ff933b7ae7f039d606c8e9e0a1 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/beam_search_decode_cn.rst @@ -0,0 +1,59 @@ +.. _cn_api_fluid_layers_beam_search_decode: + +beam_search_decode +------------------------------- + +.. py:function:: paddle.fluid.layers.beam_search_decode(ids, scores, beam_size, end_id, name=None) + + + + +该OP用在整个束搜索(Beam search)结束后,通过沿 ``ids`` 中保存的搜索路径回溯,为每个源句(样本)构造完整的beam search结果序列并保存在LoDTensor中。LoDTensor的格式和解析方式如下: + +:: + + + 若 lod = [[0, 3, 6], [0, 12, 24, 40, 54, 67, 82]] + 从第一层LoD的内容可以得出:包含两个样本,每个样本均对应了3个(等于束的宽度)生成序列 + 从第二层LoD的内容可以得出:第一个样本对应的三个序列的长度分别为12, 12, 16,第一个样本对应的三个序列的长度分别为14, 13, 15。 + + +完整用法请参阅下面的使用示例: + + :: + + fluid/tests/book/test_machine_translation.py + +参数: + - **id** (Variable) - 保存了每个时间步选择的id(beam_search OP的输出)的LoDTensorArray。其中每个LoDTensor的数据类型为int64,LoD level为2,LoD中保存了搜索路径信息。 + - **score** (Variable) - 保存了每个时间步选择的id所对应累积得分(beam_search OP的输出)的LoDTensorArray,和 ``id`` 具有相同大小。其中每个LoDTensor要和 ``id`` 中相应LoDTensor具有相同的形状和LoD,表示其对应的累积得分。数据类型为float32。 + - **beam_size** (int) - 指示束搜索中波束的宽度。 + - **end_id** (int) - 指明标识序列结束的id。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: Variable的二元组, 包含了完整id序列和对应的累积得分两个LodTensor,数据类型分别为int64和float32,形状相同且均展开为1维,LoD相同且level均为2。根据两层LoD可分别得到每个源句(样本)有多少个生成序列和每个序列有多少个id。 + +返回类型: tuple + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + # 假设 `ids` 和 `scores` 为LoDTensorArray类型的Variable,它们保留了 + # 所有时间步选择出的id和score + ids = fluid.layers.create_array(dtype='int64') + scores = fluid.layers.create_array(dtype='float32') + finished_ids, finished_scores = fluid.layers.beam_search_decode( + ids, scores, beam_size=5, end_id=0) + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/bilinear_tensor_product_cn.rst b/doc/paddle/api/paddle/fluid/layers/bilinear_tensor_product_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b8a01b3b0b3487c4407f1e27fe818f48e0d62452 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/bilinear_tensor_product_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_fluid_layers_bilinear_tensor_product: + +bilinear_tensor_product +------------------------------- + + +.. py:function:: paddle.fluid.layers.bilinear_tensor_product(x, y, size, act=None, name=None, param_attr=None, bias_attr=None) + + + + +该层对两个输入执行双线性张量积。 + +例如: + +.. math:: + out_{i} = x * W_{i} * {y^\mathrm{T}}, i=0,1,...,size-1 + +在这个公式中: + - :math:`x`: 第一个输入,包含 :math:`M` 个元素,形状为 [batch_size, M]。 + - :math:`y`: 第二个输入,包含 :math:`N` 个元素,形状为 [batch_size, N]。 + - :math:`W_{i}`: 第 :math:`i` 个被学习的权重,形状是 [M, N]。 + - :math:`out_{i}`: 输出的第 :math:`i` 个元素,形状是 [batch_size, size]。 + - :math:`y^\mathrm{T}`: :math:`y_{2}` 的转置。 + +参数: + - **x** (Variable): 2-D 输入张量,形状为 [batch_size, M], 数据类型为 float32 或 float64。 + - **y** (Variable): 2-D 输入张量,形状为 [batch_size, N],数据类型与 **x** 一致。 + - **size** (int): 此层的维度。 + - **act** (str, 可选): 应用到该层输出的激活函数。 + - **name** (str,可选) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为 None。 + - **param_attr** (ParamAttr,可选) :指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr,可选) : 指定偏置参数属性的对象。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + +返回: 一个形为 [batch_size, size] 的 2-D 张量。 + +返回类型:Variable + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + layer1 = fluid.layers.data("t1", shape=[-1, 5], dtype="float32") + layer2 = fluid.layers.data("t2", shape=[-1, 4], dtype="float32") + tensor = fluid.layers.bilinear_tensor_product(x=layer1, y=layer2, size=1000) + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/bipartite_match_cn.rst b/doc/paddle/api/paddle/fluid/layers/bipartite_match_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6d12015c5ab213a101cc499901c0dafd0b6f329d --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/bipartite_match_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_fluid_layers_bipartite_match: + +bipartite_match +------------------------------- + +.. py:function:: paddle.fluid.layers.bipartite_match(dist_matrix, match_type=None, dist_threshold=None, name=None) + + + + +该OP实现了贪心二分匹配算法,该算法用于根据输入距离矩阵获得与最大距离的匹配。对于输入二维矩阵,二分匹配算法可以找到每一行的匹配列(匹配意味着最大距离),也可以找到每列的匹配行。此算子仅计算列到行的匹配索引。对于每个实例,匹配索引的数量是 +输入距离矩阵的列号。**该OP仅支持CPU** + +它有两个输出,匹配的索引和距离。简单的描述是该算法将最佳(最大距离)行实体与列实体匹配,并且匹配的索引在ColToRowMatchIndices的每一行中不重复。如果列实体与任何行实体不匹配,则ColToRowMatchIndices设置为-1。 + +注意:输入距离矩阵可以是LoDTensor(带有LoD)或Tensor。如果LoDTensor带有LoD,则ColToRowMatchIndices的高度是批量大小。如果是Tensor,则ColToRowMatchIndices的高度为1。 + +注意:此API是一个非常低级别的API。它由 ``ssd_loss`` 层使用。请考虑使用 ``ssd_loss`` 。 + +参数: + - **dist_matrix** (Variable)- 维度为:[K,M]的2-D LoDTensor,数据类型为float32或float64。它是由每行和每列来表示实体之间的成对距离矩阵。例如,假设一个实体是具有形状[K]的A,另一个实体是具有形状[M]的B. dist_matrix [i] [j]是A[i]和B[j]之间的距离。距离越大,匹配越好。注意:此张量可以包含LoD信息以表示一批输入。该批次的一个实例可以包含不同数量的实体。 + - **match_type** (str,可选)- 匹配方法的类型,应为'bipartite'或'per_prediction'。默认值为None,即'bipartite'。 + - **dist_threshold** (float32,可选)- 如果match_type为'per_prediction',则此阈值用于根据最大距离确定额外匹配的bbox,默认值为None,即0.5。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: + + - matched_indices(Variable)- 维度为[N,M]的2-D Tensor, 数据类型为int32。 N是批量大小。如果match_indices[i][j]为-1,则表示B[j]与第i个实例中的任何实体都不匹配。否则,这意味着在第i个实例中B[j]与行match_indices[i][j]匹配。第i个实>例的行号保存在match_indices[i][j]中。 + - matched_distance(Variable)- 维度为[N,M]的2-D Tensor, 数据类型为float32,。 N是批量大小。如果match_indices[i][j]为-1,则match_distance[i][j]也为-1.0。否则,假设match_distance[i][j]=d,并且每个实例的行偏移称为LoD。然后match_distance[i][j]=dist_matrix[d]+ LoD[i]][j]。 + + +返回类型:Tuple + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.data(name='x', shape=[None, 4], dtype='float32') + y = fluid.data(name='y', shape=[None, 4], dtype='float32') + iou = fluid.layers.iou_similarity(x=x, y=y) + matched_indices, matched_dist = fluid.layers.bipartite_match(iou) diff --git a/doc/paddle/api/paddle/fluid/layers/box_clip_cn.rst b/doc/paddle/api/paddle/fluid/layers/box_clip_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..de7c50d07b7efd4ba41c96f28ed982ed8e498f41 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/box_clip_cn.rst @@ -0,0 +1,47 @@ +.. _cn_api_fluid_layers_box_clip: + +box_clip +------------------------------- + +.. py:function:: paddle.fluid.layers.box_clip(input, im_info, name=None) + + + + +将检测框框剪切为 ``im_info`` 给出的大小。对于每个输入框,公式如下: + +:: + + xmin = max(min(xmin, im_w - 1), 0) + ymin = max(min(ymin, im_h - 1), 0) + xmax = max(min(xmax, im_w - 1), 0) + ymax = max(min(ymax, im_h - 1), 0) + +其中im_w和im_h是通过im_info计算的: + +:: + + im_h = round(height / scale) + im_w = round(weight / scale) + + +参数: + - **input** (Variable) – 维度为[N_1, N_2, ..., N_k, 4]的多维Tensor,其中最后一维为box坐标维度。数据类型为float32或float64。 + - **im_info** (Variable) – 维度为[N, 3]的2-D Tensor,N为输入图片个数。具有(高度height,宽度width,比例scale)图像的信息,其中高度和宽度是输入大小,比例是输入大小和原始大小的比率。数据类型为float32或float64。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: 表示剪切后的检测框的Tensor或LoDTensor,数据类型为float32或float64,形状与输入检测框相同 + +返回类型:Variable + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + boxes = fluid.data( + name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1) + im_info = fluid.data(name='im_info', shape=[None, 3]) + out = fluid.layers.box_clip( + input=boxes, im_info=im_info) diff --git a/doc/paddle/api/paddle/fluid/layers/box_coder_cn.rst b/doc/paddle/api/paddle/fluid/layers/box_coder_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1bf1b4eeab83dd15187c37caea39c93f98ce00f8 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/box_coder_cn.rst @@ -0,0 +1,93 @@ +.. _cn_api_fluid_layers_box_coder: + +box_coder +------------------------------- + +.. py:function:: paddle.fluid.layers.box_coder(prior_box, prior_box_var, target_box, code_type='encode_center_size', box_normalized=True, name=None, axis=0) + + + + +Bounding Box Coder + +编码/解码带有先验框信息的目标边界框 + +编码规则描述如下: + +.. math:: + + ox &= (tx - px)/pw/pxv + + oy &= (ty - py)/ph/pyv + + ow &= log(abs(tw/pw))/pwv + + oh &= log(abs(th/ph))/phv + +解码规则描述如下: + +.. math:: + + ox &= (pw * pxv * tx * + px ) - tw/2 + + oy &= (ph * pyv * ty * + py ) - th/2 + + ow &= exp(pwv * tw ) * pw + tw/2 + + oh &= exp(phv * th ) * ph + th/2 + +其中tx,ty,tw,th分别表示目标框的中心坐标、宽度和高度。同样地,px,py,pw,ph表示先验框地中心坐标、宽度和高度。pxv,pyv,pwv,phv表示先验框变量,ox,oy,ow,oh表示编码/解码坐标、宽度和高度。 + + +在Box Decoding期间,支持两种broadcast模式。 假设目标框具有形状[N,M,4],并且prior框的形状可以是[N,4]或[M,4]。 然后,prior框将沿指定的轴broadcast到目标框。 + + +参数: + - **prior_box** (Variable) - 维度为[M,4]的2-D Tensor,M表示存储M个框,数据类型为float32或float64。先验框,每个框代表[xmin,ymin,xmax,ymax],[xmin,ymin]是先验框的左顶点坐标,如果输入数图像特征图,则接近坐标原点。[xmax,ymax]是先验框的右底点坐 +标 + - **prior_box_var** (list|Variable|None) - 支持三种输入类型,一是维度为:math:`[M,4]`的2-D Tensor,存储M个先验框的variance,数据类型为float32或float64。另外是一个长度为4的列表,所有先验框共用这个列表中的variance,数据类型为float32或float64。为None时不参与计算。 + - **target_box** (Variable) - 数据类型为float,double的Tensor或者LoDTensor,当code_type为‘encode_center_size’,输入是二维LoDTensor,维度为[N,4],N为目标框的个数,目标框的格式与先验框相同。当code_type为‘decode_center_size’,输>入为3-D Tensor,维度为[N,M,4]。通常N表示产生检测框的个数,M表示类别数。此时目标框为偏移量。 + - **code_type** (str) - 编码类型用目标框,可以是encode_center_size或decode_center_size,默认值为`encode_center_size` + - **box_normalized** (bool) - 先验框坐标是否正则化,即是否在[0, 1]区间内。默认值为true + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **axis** (int32) – 在PriorBox中为axis指定的轴broadcast以进行框解码,例如,如果axis为0,TargetBox具有形状[N,M,4]且PriorBox具有形状[M,4],则PriorBox将broadcast到[N,M,4]用于解码。仅在code_type为decode_center_size时有效。默认值为0 + + +返回: + - 表示解码或编码结果的Tensor或LoDTensor。数据类型为float32或float64。 + - ``code_type`` 为 ``‘encode_center_size’`` 时,形状为[N,M,4]的编码结果,N为目标框的个数,M为先验框的个数。 + - ``code_type`` 为 ``‘decode_center_size’`` 时,形状为[N,M,4]的解码结果,形状与输入目标框相同。 + + +返回类型:Variable + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + # For encode + prior_box_encode = fluid.data(name='prior_box_encode', + shape=[512, 4], + dtype='float32') + target_box_encode = fluid.data(name='target_box_encode', + shape=[81,4], + dtype='float32') + output_encode = fluid.layers.box_coder(prior_box=prior_box_encode, + prior_box_var=[0.1,0.1,0.2,0.2], + target_box=target_box_encode, + code_type="encode_center_size") + # For decode + prior_box_decode = fluid.data(name='prior_box_decode', + shape=[512, 4], + dtype='float32') + target_box_decode = fluid.data(name='target_box_decode', + shape=[512,81,4], + dtype='float32') + output_decode = fluid.layers.box_coder(prior_box=prior_box_decode, + prior_box_var=[0.1,0.1,0.2,0.2], + target_box=target_box_decode, + code_type="decode_center_size", + box_normalized=False, + axis=1) diff --git a/doc/paddle/api/paddle/fluid/layers/box_decoder_and_assign_cn.rst b/doc/paddle/api/paddle/fluid/layers/box_decoder_and_assign_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..87de42ed4c17e66a4987a6e0df837581b6b9820a --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/box_decoder_and_assign_cn.rst @@ -0,0 +1,65 @@ +.. _cn_api_fluid_layers_box_decoder_and_assign: + +box_decoder_and_assign +------------------------------- + +.. py:function:: paddle.fluid.layers.box_decoder_and_assign(prior_box, prior_box_var, target_box, box_score, box_clip, name=None) + + + + +边界框编码器。 + +根据先验框来解码目标边界框。 + +解码方案为: + +.. math:: + + ox &= (pw \times pxv \times tx + px) - \frac{tw}{2}\\ + oy &= (ph \times pyv \times ty + py) - \frac{th}{2}\\ + ow &= \exp (pwv \times tw) \times pw + \frac{tw}{2}\\ + oh &= \exp (phv \times th) \times ph + \frac{th}{2} + +其中tx,ty,tw,th分别表示目标框的中心坐标,宽度和高度。 类似地,px,py,pw,ph表示prior_box(anchor)的中心坐标,宽度和高度。 pxv,pyv,pwv,phv表示prior_box的variance,ox,oy,ow,oh表示decode_box中的解码坐标,宽度和高度。 + +box decode过程得出decode_box,然后分配方案如下所述: + +对于每个prior_box,使用最佳non-background(非背景)类的解码值来更新prior_box位置并获取output_assign_box。 因此,output_assign_box的形状与PriorBox相同。 + + + +参数: + - **prior_box** (Variable) - 维度为[N,4]的2-D Tensor,包含N个框,数据类型为float32或float64。每个框表示为[xmin,ymin,xmax,ymax], [xmin,ymin]是anchor框的左上坐标,如果输入是图像特征图,则它们接近坐标系的原点。 [xmax,ymax]是anchor框的右下坐标 + - **prior_box_var** (Variable) - 维度为[N,4]的2-D Tensor,包含N组variance。数据类型为float32或float64。 prior_box_var默认将所有元素设置为1 + - **target_box** (Variable) - 维度为[N,classnum * 4]的2-D Tensor或LoDTensor,拥有N个目标框,数据类型为float32或float64。 + - **box_score** (Variable) - 维度为[N,classnum]的2-D Tensor或LoDTensor,拥有N个目标框,数据类型为float32或float64。表示每个框属于各分类概率值。 + - **box_clip** (float32) - 裁剪框以防止溢出,默认值为4.135(即np.log(1000. / 16.)) + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + + +返回: + + - 表示解压检测框的Tensor或LoDTensor,数据类型为float32,float64。维度为[N,classnum * 4],N个prior_box解码得到的N个目标框的结果。 + - 表示输出最佳检测框的Tensor或LoDTensor,数据类型为float32,float64。维度为[N,4],N个prior_box解码后得到目标框,再选择最佳非背景类的目标框结果。 + + +返回类型:Tuple + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + pb = fluid.data( + name='prior_box', shape=[None, 4], dtype='float32') + pbv = fluid.data( + name='prior_box_var', shape=[4], dtype='float32') + loc = fluid.data( + name='target_box', shape=[None, 4*81], dtype='float32') + scores = fluid.data( + name='scores', shape=[None, 81], dtype='float32') + decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign( + pb, pbv, loc, scores, 4.135) + diff --git a/doc/paddle/api/paddle/fluid/layers/bpr_loss_cn.rst b/doc/paddle/api/paddle/fluid/layers/bpr_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ac3a365034e1f85aa045d855d72023e1cadd174b --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/bpr_loss_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_fluid_layers_bpr_loss: + +bpr_loss +------------------------------- + +.. py:function:: paddle.fluid.layers.bpr_loss(input, label, name=None) + + + + + +贝叶斯个性化排序损失函数(Bayesian Personalized Ranking Loss Operator ) + +该OP属于pairwise类型的损失函数。损失值由下式计算而得: + +.. math:: + + Y[i] = 1/(N[i] - 1) * \sum_j{\log(\sigma(X[i, Label[i]]-X[i, j]))} + +其中: + - :math:`X` :输入值,一个形为[T x D]的2-D Tensor, 此处为logit值。 + - :math:`N[i]` : 在时间步i的正例和负例的总和。 + - :math:`Label[i]` :在时间步i的正例下标。 + - :math:`\sigma` :激活函数。 + - :math:`Y` :输出值,一个形为[T x 1]的2-D Tensor。 + + +更多细节请参考 `Session Based Recommendations with Recurrent Neural Networks` + +参数: + - **input** (Variable) - 形为[T x D] , Tensor类型时T为batch大小,LoDTensor类型时T为mini-batch的总时间步。D 为正例加负例的个数。该输入为logits而非概率。数据类型是float32或float64。 + - **label** (Variable) - 形为[T x 1],表示input中正例的下标,数据类型为int64。。 + - **name** (None|str) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 + +返回: 形为[T x 1]的2D张量,数据类型同input相同,表示bpr损失值。 + +返回类型:Variable + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + + neg_size = 3 + # label=[0] + label = fluid.layers.data( + name="label", shape=[1], dtype="int64") + # predict = [0.1, 0.2, 0.3, 0.4] + predict = fluid.layers.data( + name="predict", shape=[neg_size + 1], dtype="float32") + # bpr_Loss : label [0] 表示predict中下标0表示正例,即为0.1, 负例有3个为0.2,0.3,0.4 + cost = fluid.layers.bpr_loss(input=predict, label=label) + diff --git a/doc/paddle/api/paddle/fluid/layers/brelu_cn.rst b/doc/paddle/api/paddle/fluid/layers/brelu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..545a942dafd5fd44f269742fb79ba7606c2d09e5 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/brelu_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_fluid_layers_brelu: + +brelu +------------------------------- + +.. py:function:: paddle.fluid.layers.brelu(x, t_min=0.0, t_max=24.0, name=None) + + + + + +BReLU 激活函数 + +.. math:: out=min(max(x,t\_min),t\_max) + +参数: + - **x** (Variable) - 该OP的输入为多维Tensor。数据类型为float32,float64。 + - **t_min** (float, 可选) - BRelu的最小值,默认值为0.0。 + - **t_max** (float, 可选) - BRelu的最大值,默认值为24.0。 + - **name** (str, 可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为None。 + +返回: 输出为Tensor,与 ``x`` 维度相同、数据类型相同。 + +返回类型: Variable + + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + input_brelu = np.array([[-1,6],[1,15.6]]) + with fluid.dygraph.guard(): + x = fluid.dygraph.to_variable(input_brelu) + y = fluid.layers.brelu(x, t_min=1.0, t_max=10.0) + print(y.numpy()) + #[[ 1. 6.] + #[ 1. 10.]] diff --git a/doc/paddle/api/paddle/fluid/layers/case_cn.rst b/doc/paddle/api/paddle/fluid/layers/case_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e86e4253d71fcc3f2a9a0b266c9f1c824c602ad3 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/case_cn.rst @@ -0,0 +1,73 @@ +.. _cn_api_fluid_layers_case: + +case +------------------------------- + + +.. py:function:: paddle.fluid.layers.case(pred_fn_pairs, default=None, name=None) + + + + +该OP的运行方式类似于python的if-elif-elif-else。 + +参数: + - **pred_fn_pairs** (list|tuple) - 一个list或者tuple,元素是二元组(pred, fn)。其中 ``pred`` 是形状为[1]的布尔型 Tensor,``fn`` 是一个可调用对象。所有的可调用对象都返回相同结构的Tensor。 + - **default** (callable,可选) - 可调用对象,返回一个或多个张量。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值:None。 + +返回:如果 ``pred_fn_pairs`` 中存在pred是True的元组(pred, fn),则返回第一个为True的pred的元组中fn的返回结果;如果 ``pred_fn_pairs`` 中不存在pred为True的元组(pred, fn) 且 ``default`` 不是None,则返回调用 ``default`` 的返回结果; +如果 ``pred_fn_pairs`` 中不存在pred为True的元组(pred, fn) 且 ``default`` 是None,则返回 ``pred_fn_pairs`` 中最后一个pred的返回结果。 + +返回类型:Variable|list(Variable) + +抛出异常: + - ``TypeError`` - 如果 ``pred_fn_pairs`` 的类型不是list或tuple。 + - ``TypeError`` - 如果 ``pred_fn_pairs`` 的元素的类型不是tuple。 + - ``TypeError`` - 如果 ``pred_fn_pairs`` 的tuple类型的元素大小不是2。 + - ``TypeError`` - 如果 ``pred_fn_pairs`` 中的2-tuple的第一个元素的类型不是Variable。 + - ``TypeError`` - 如果 ``pred_fn_pairs`` 中的2-tuple的第二个元素不是可调用对象。 + - ``TypeError`` - 当 ``default`` 不是None又不是可调用对象时。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + + def fn_1(): + return layers.fill_constant(shape=[1, 2], dtype='float32', value=1) + + def fn_2(): + return layers.fill_constant(shape=[2, 2], dtype='int32', value=2) + + def fn_3(): + return layers.fill_constant(shape=[3], dtype='int32', value=3) + + main_program = fluid.default_startup_program() + startup_program = fluid.default_main_program() + with fluid.program_guard(main_program, startup_program): + x = layers.fill_constant(shape=[1], dtype='float32', value=0.3) + y = layers.fill_constant(shape=[1], dtype='float32', value=0.1) + z = layers.fill_constant(shape=[1], dtype='float32', value=0.2) + + pred_1 = layers.less_than(z, x) # true: 0.2 < 0.3 + pred_2 = layers.less_than(x, y) # false: 0.3 < 0.1 + pred_3 = layers.equal(x, y) # false: 0.3 == 0.1 + + # Call fn_1 because pred_1 is True + out_1 = layers.case( + pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3) + + # Argument default is None and no pred in pred_fn_pairs is True. fn_3 will be called. + # because fn_3 is the last callable in pred_fn_pairs. + out_2 = layers.case(pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)]) + + exe = fluid.Executor(fluid.CPUPlace()) + res_1, res_2 = exe.run(main_program, fetch_list=[out_1, out_2]) + print(res_1) # [[1. 1.]] + print(res_2) # [3 3 3] + + + diff --git a/doc/paddle/api/paddle/fluid/layers/cast_cn.rst b/doc/paddle/api/paddle/fluid/layers/cast_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..08e0fbd7450665f3c5362b15a6940a85041f9627 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/cast_cn.rst @@ -0,0 +1,49 @@ +.. _cn_api_fluid_layers_cast: + +cast +------------------------------- + +.. py:function:: paddle.fluid.layers.cast(x,dtype) + + + + +该OP将 ``x`` 的数据类型转换为 ``dtype`` 并输出。支持输出和输入的数据类型相同。 + +参数: + - **x** (Variable) - 输入的多维Tensor或LoDTensor,支持的数据类型为:bool、float16、float32、float64、uint8、int32、int64。 + - **dtype** (str|np.dtype|core.VarDesc.VarType) - 输出Tensor的数据类型。支持的数据类型为:bool、float16、float32、float64、int8、int32、int64、uint8。 + +返回:Tensor或LoDTensor,维度与 ``x`` 相同,数据类型为 ``dtype`` + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + place = fluid.core.CPUPlace() + + # 构建网络 + x_lod = fluid.layers.data(name="x", shape=[1], lod_level=1) + cast_res1 = fluid.layers.cast(x=x_lod, dtype="uint8") + cast_res2 = fluid.layers.cast(x=x_lod, dtype=np.int32) + + # 创建CPU执行器 + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + x_i_lod = fluid.core.LoDTensor() + x_i_lod.set(np.array([[1.3,-2.4],[0,4]]).astype("float32"), place) + x_i_lod.set_recursive_sequence_lengths([[0,2]]) + res1 = exe.run(fluid.default_main_program(), feed={'x':x_i_lod}, fetch_list=[cast_res1], return_numpy=False) + res2 = exe.run(fluid.default_main_program(), feed={'x':x_i_lod}, fetch_list=[cast_res2], return_numpy=False) + print(np.array(res1[0]), np.array(res1[0]).dtype) + # [[ 1 254] + # [ 0 4]] uint8 + print(np.array(res2[0]), np.array(res2[0]).dtype) + # [[ 1 -2] + # [ 0 4]] int32 diff --git a/doc/paddle/api/paddle/fluid/layers/ceil_cn.rst b/doc/paddle/api/paddle/fluid/layers/ceil_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a3cdb47c9c1d4ec4a101d22075a7af67b032d9a7 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/ceil_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_fluid_layers_ceil: + +ceil +------------------------------- + +.. py:function:: paddle.fluid.layers.ceil(x, name=None) + + + + +向上取整运算函数。 + +.. math:: + out = \left \lceil x \right \rceil + + + +参数: + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64 、float16。 + - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回:输出Tensor,与 ``x`` 维度相同、数据类型相同。 + +返回类型: Tensor + +**代码示例**: + +.. code-block:: python + + import paddle + paddle.disable_static() + x = paddle.to_tensor([[-1.5,6], [1,15.6]]) + res = paddle.ceil(x) + print(res.numpy()) + # [[-1. 6.] + # [ 1. 16.]] diff --git a/doc/paddle/api/paddle/fluid/layers/center_loss_cn.rst b/doc/paddle/api/paddle/fluid/layers/center_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fb13738745b329e9dcd3323fa404d29026eec4d0 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/center_loss_cn.rst @@ -0,0 +1,60 @@ +.. _cn_api_fluid_layers_center_loss: + +center_loss +------------------------------- + + +.. py:function:: paddle.fluid.layers.center_loss(input, label, num_classes, alpha, param_attr, update_center=True) + + + + +该OP接收一个来自于最后一个隐藏层的输出和目标标签作为输入,返回损失值。为每一个类别提供一个类别中心,计算mini-batch中每个样本与对应类别中心的距离的平均值作为center loss。 + +对于输入,\(X\)和标签\(Y\),计算公式为: + + .. math:: + + out = \frac{1}{2}(X - Y)^2 + + + +参数: + + - **input** (Variable) - 输入形状为[N x M]的2维张量,数据类型为float32,float64。 + - **label** (Variable) - 输入的标签,一个形状为为[N x 1]的2维张量,N表示batch size,数据类型为int32。 + - **num_class** (int32) - 输入类别的数量。 + - **alpha** (float32|float64|Variable) - 学习率。数据类型为float32或者float64。 + - **param_attr** (ParamAttr) - 指定权重参数属性的对象。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **update_center** (bool) - 是否更新类别中心的参数。 + +返回:形状为[N x 1]的2维Tensor|LoDTensor。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + + input = fluid.layers.data(name='x',shape=[20,30],dtype='float32') + label = fluid.layers.data(name='y',shape=[20,1],dtype='int64') + num_classes = 1000 + alpha = 0.01 + param_attr = fluid.initializer.Xavier(uniform=False) + center_loss=fluid.layers.center_loss(input=input, + label=label, + num_classes=1000, + alpha=alpha, + param_attr=fluid.initializer.Xavier(uniform=False), + update_center=True) + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/clip_by_norm_cn.rst b/doc/paddle/api/paddle/fluid/layers/clip_by_norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ec57f0e0d319752123a3432cdced5a7115bd3b9d --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/clip_by_norm_cn.rst @@ -0,0 +1,44 @@ +.. _cn_api_fluid_layers_clip_by_norm: + +clip_by_norm +------------------------------- + +.. py:function:: paddle.fluid.layers.clip_by_norm(x, max_norm, name=None) + + + + +ClipByNorm算子 + +此算子将输入 ``X`` 的L2范数限制在 ``max_norm`` 内。如果 ``X`` 的L2范数小于或等于 ``max_norm`` ,则输出(Out)将与 ``X`` 相同。如果X的L2范数大于 ``max_norm`` ,则 ``X`` 将被线性缩放,使得输出(Out)的L2范数等于 ``max_norm`` ,如下面的公式所示: + +.. math:: + Out = \frac{max\_norm * X}{norm(X)} + +其中, :math:`norm(X)` 代表 ``x`` 的L2范数。 + + +参数: + - **x** (Variable)- 多维Tensor或LoDTensor,数据类型为float32。clip_by_norm运算的输入,维数必须在[1,9]之间。 + - **max_norm** (float32)- 最大范数值。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 +返回: 表示为输出Tensor或LoDTensor,数据类型为float32。和输入(X)具有相同的形状. + + +返回类型:Variable + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + input = fluid.data( + name='data', shape=[None, 1], dtype='float32') + reward = fluid.layers.clip_by_norm(x=input, max_norm=1.0) + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/clip_cn.rst b/doc/paddle/api/paddle/fluid/layers/clip_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8d56ff6c70de341035d6fdbf57af97005d24b16a --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/clip_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_fluid_layers_clip: + +clip +------------------------------- + +.. py:function:: paddle.fluid.layers.clip(x, min, max, name=None) + + + + +该OP对输入Tensor每个元素的数值进行裁剪,使得输出Tensor元素的数值被限制在区间[min, max]内。具体的计算公式为如下。 + +.. math:: + + Out = MIN(MAX(x,min),max) + + + +参数: + - **x** (Variable)- 多维Tensor,数据类型为float32 + - **min** (float)- 最小值,输入Tensor中小于该值的元素由min代替。 + - **max** (float)- 最大值,输入Tensor中大于该值的元素由max替换。 + - **name** (None|str) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 + +返回: 对元素的数值进行裁剪之后的Tesnor,与输入x具有相同的shape和数据类型 + +返回类型:Variable + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + input = fluid.layers.data( + name='data', shape=[1], dtype='float32') + reward = fluid.layers.clip(x=input, min=-1.0, max=1.0) + + diff --git a/doc/paddle/api/paddle/fluid/layers/collect_fpn_proposals_cn.rst b/doc/paddle/api/paddle/fluid/layers/collect_fpn_proposals_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b20a73a6bac6408712f9c99e2c37278d6a9ecf7b --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/collect_fpn_proposals_cn.rst @@ -0,0 +1,52 @@ +.. _cn_api_fluid_layers_collect_fpn_proposals: + +collect_fpn_proposals +------------------------------- + +.. py:function:: paddle.fluid.layers.collect_fpn_proposals(multi_rois, multi_scores, min_level, max_level, post_nms_top_n, name=None) + + + + +**该op仅支持LoDTensor输入**。连接多级RoIs(感兴趣区域)并依据multi_scores选择N个RoIs。此操作执行以下步骤: +1、选择num_level个RoIs和scores作为输入:num_level = max_level - min_level +2、连接num_level个RoIs和scores。 +3、对scores排序并选择post_nms_top_n个scores。 +4、通过scores中的选定位置收集RoIs。 +5、通过对应的batch_id重新对RoIs排序。 + + +参数: + - **multi_rois** (list) – 要收集的RoIs列表,列表中的元素为[N, 4]的2-D LoDTensor, 数据类型为float32或float64,其中N为RoI的个数。 + - **multi_scores** (list) - 要收集的RoIs对应分数的列表,列表中的元素为[N, 1]的2-D LoDTensor, 数据类型为float32或float64,其中N为RoI的个数。 + - **min_level** (int) - 要收集的FPN层的最低级 + - **max_level** (int) – 要收集的FPN层的最高级 + - **post_nms_top_n** (int) – 所选RoIs的数目 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:表示选定具有高分数的RoIs的LoDTensor,数据类型为float32或float64,同时具有LoD信息,维度为[M, 4],其中M为post_nms_top_n。 + + +返回类型:Variable + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + multi_rois = [] + multi_scores = [] + for i in range(4): + multi_rois.append(fluid.data( + name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1)) + for i in range(4): + multi_scores.append(fluid.data( + name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1)) + + fpn_rois = fluid.layers.collect_fpn_proposals( + multi_rois=multi_rois, + multi_scores=multi_scores, + min_level=2, + max_level=5, + post_nms_top_n=2000) diff --git a/doc/paddle/api/paddle/fluid/layers/concat_cn.rst b/doc/paddle/api/paddle/fluid/layers/concat_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a0c2ade9178f1842e355cfd3bdb0e667db38cd2d --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/concat_cn.rst @@ -0,0 +1,44 @@ +.. _cn_api_fluid_layers_concat: + +concat +------------------------------- + +.. py:function:: paddle.fluid.layers.concat(input, axis=0, name=None) + + +该OP对输入沿 ``axis`` 轴进行联结,返回一个新的Tensor。 + +参数: + - **input** (list|tuple|Tensor) - 待联结的Tensor list,Tensor tuple或者Tensor,支持的数据类型为:bool、float16、 float32、float64、int32、int64。 ``input`` 中所有Tensor的数据类型必须一致。 + - **axis** (int|Tensor,可选) - 指定对输入Tensor进行运算的轴,可以是整数或者形状为[1]的Tensor,数据类型为int32或者int64。 ``axis`` 的有效范围是[-R, R),R是输入 ``input`` 中Tensor 的维度, ``axis`` 为负值时与 :math:`axis + R` 等价。默认值为0。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:联结后的 ``Tensor`` ,数据类型和 ``input`` 中的Tensor相同。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + in1 = np.array([[1, 2, 3], + [4, 5, 6]]) + in2 = np.array([[11, 12, 13], + [14, 15, 16]]) + in3 = np.array([[21, 22], + [23, 24]]) + with fluid.dygraph.guard(): + x1 = fluid.dygraph.to_variable(in1) + x2 = fluid.dygraph.to_variable(in2) + x3 = fluid.dygraph.to_variable(in3) + out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1) + out2 = fluid.layers.concat(input=[x1, x2], axis=0) + print(out1.numpy()) + # [[ 1 2 3 11 12 13 21 22] + # [ 4 5 6 14 15 16 23 24]] + print(out2.numpy()) + # [[ 1 2 3] + # [ 4 5 6] + # [11 12 13] + # [14 15 16]] diff --git a/doc/paddle/api/paddle/fluid/layers/cond_cn.rst b/doc/paddle/api/paddle/fluid/layers/cond_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4f3c3d59b9d4acd7d0a2e8bf8bf285d658bf6eed --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/cond_cn.rst @@ -0,0 +1,90 @@ +.. _cn_api_fluid_layers_cond: + +cond +------------------------------- + + +.. py:function:: paddle.fluid.layers.cond(pred, true_fn=None, false_fn=None, name=None) + + + + +如果 ``pred`` 是 ``True`` ,该API返回 ``true_fn()`` ,否则返回 ``false_fn()`` 。 +用户如果不想在 ``callable`` 中做任何事,可以把 ``true_fn`` 或 ``false_fn`` 设为 ``None`` ,此时本API会把该 ``callable`` 视为简单返回 ``None`` 。 + +``true_fn`` 和 ``false_fn`` 需要返回同样嵌套结构(nest structure)的Tensor,如果不想返回任何值也可都返回 ``None`` 。 +PaddlePaddle里Tensor的嵌套结构是指一个Tensor,或者Tensor的元组(tuple),或者Tensor的列表(list)。 + +.. note:: + 1. 因为PaddlePaddle的静态图数据流, ``true_fn`` 和 ``false_fn`` 返回的元组必须形状相同,但是里面的Tensor形状可以不同。 + 2. 不论运行哪个分支,在 ``true_fn`` 和 ``false_fn`` 外创建的Tensor和Op都会被运行,即PaddlePaddle并不是惰性语法(lazy semantics)。例如 + + .. code-block:: python + + import paddle.fluid as fluid + a = fluid.data(name='a', shape=[-1, 1], dtype='float32') + b = fluid.data(name='b', shape=[-1, 1], dtype='float32') + c = a * b + out = fluid.layers.cond(a < b, lambda: a + c, lambda: b * b) + + 不管 ``a < b`` 是否成立, ``c = a * b`` 都会被运行。 + +参数: + - **pred** (Variable) - 一个形状为[1]的布尔型(boolean)的Tensor,该布尔值决定要返回 ``true_fn`` 还是 ``false_fn`` 的运行结果。 + - **true_fn** (callable) - 一个当 ``pred`` 是 ``True`` 时被调用的callable,默认值: ``None`` 。 + - **false_fn** (callable) - 一个当 ``pred`` 是 ``False`` 时被调用的callable,默认值: ``None`` 。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值: ``None`` 。 + +返回: + 如果 ``pred`` 是 ``True`` ,该API返回 ``true_fn()`` ,否则返回 ``false_fn()`` 。 + +返回类型:Variable|list(Variable)|tuple(Variable) + +抛出异常: + - ``TypeError`` - 如果 ``true_fn`` 或 ``false_fn`` 不是callable。 + - ``ValueError`` - 如果 ``true_fn`` 和 ``false_fn`` 没有返回同样的嵌套结构(nest structure),对嵌套结构的解释见上文。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + from paddle.fluid.executor import Executor + from paddle.fluid.framework import Program, program_guard + + # + # pseudocode: + # if 0.1 < 0.23: + # return 1, True + # else: + # return 3, 2 + # + + def true_func(): + return layers.fill_constant( + shape=[1, 2], dtype='int32', value=1), layers.fill_constant( + shape=[2, 3], dtype='bool', value=True) + + def false_func(): + return layers.fill_constant( + shape=[3, 4], dtype='float32', value=3), layers.fill_constant( + shape=[4, 5], dtype='int64', value=2) + + main_program = Program() + startup_program = Program() + with program_guard(main_program, startup_program): + x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) + y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) + pred = layers.less_than(x, y) + out = layers.cond(pred, true_func, false_func) + # out is a tuple containing 2 tensors + + place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( + ) else fluid.CPUPlace() + exe = fluid.Executor(place) + ret = exe.run(main_program, fetch_list=out) + # ret[0] = [[1 1]] + # ret[1] = [[ True True True] + # [ True True True]] + diff --git a/doc/paddle/api/paddle/fluid/layers/continuous_value_model_cn.rst b/doc/paddle/api/paddle/fluid/layers/continuous_value_model_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..122fa181ab24056f00c0410684f18ab1ac3657a4 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/continuous_value_model_cn.rst @@ -0,0 +1,48 @@ +.. _cn_api_fluid_layers_continuous_value_model: + +continuous_value_model +------------------------------- + +.. py:function:: paddle.fluid.layers.continuous_value_model(input, cvm, use_cvm=True) + + + + +**注意:该OP仅支持在CPU运行。** + +该OP在CTR项目中,用于去除或处理 ``input`` 中的展示和点击值。 + +输入 ``input`` 是一个含展示(show)和点击(click)的词向量,其形状为 :math:`[N, D]` (N为batch大小,D为 `2 + 嵌入维度` ),show和click占据词向量D的前两维。如果 ``use_cvm=True`` ,它会计算 :math:`log(show)` 和 :math:`log(click)` ,输出的形状为 :math:`[N, D]` 。如果 ``use_cvm=False`` ,它会从输入 ``input`` 中移除show和click,输出的形状为 :math:`[N, D - 2]` 。 ``cvm`` 为show和click信息,维度为 :math:`[N, 2]` 。 + +参数: + - **input** (Variable) - cvm操作的输入张量。维度为 :math:`[N, D]` 的2-D LoDTensor。 N为batch大小, D为 `2 + 嵌入维度` , `lod level = 1` 。 + - **cvm** (Variable) - cvm操作的展示和点击张量。维度为 :math:`[N, 2]` 的2-D Tensor。 N为batch大小,2为展示和点击值。 + - **use_cvm** (bool) - 是否使用展示和点击信息。如果使用,输出维度和输入相等,对 ``input`` 中的展示和点击值取log;如果不使用,输出维度为输入减2(移除展示和点击值)。 + +返回:Variable(LoDTensor)变量, :math:`[N, M]` 的2-D LoDTensor。如果 ``use_cvm=True`` ,M等于输入的维度D,否则M等于 `D - 2` 。 + +返回类型:变量(Variable),数据类型与 ``input`` 一致。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + input = fluid.layers.data(name="input", shape=[-1, 1], lod_level=1, append_batch_size=False, dtype="int64") + label = fluid.layers.data(name="label", shape=[-1, 1], append_batch_size=False, dtype="int64") + embed = fluid.layers.embedding( + input=input, + size=[100, 11], + dtype='float32') + label_shape = fluid.layers.shape(label) + ones = fluid.layers.fill_constant(shape=[label_shape[0], 1], dtype="int64", value=1) + show_clk = fluid.layers.cast(fluid.layers.concat([ones, label], axis=1), dtype='float32') + show_clk.stop_gradient = True + input_with_cvm = fluid.layers.continuous_value_model(embed, show_clk, True) + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/control_flow/Print_cn.rst b/doc/paddle/api/paddle/fluid/layers/control_flow/Print_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..534fa314e54ce9fd0204eea13bc15085961e0812 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/control_flow/Print_cn.rst @@ -0,0 +1,71 @@ +.. _cn_api_fluid_layers_Print: + +Print +------------------------------- + + +.. py:function:: paddle.fluid.layers.Print(input, first_n=-1, message=None, summarize=20, print_tensor_name=True, print_tensor_type=True, print_tensor_shape=True, print_tensor_lod=True, print_phase='both') + + + + +**Print操作命令** + +该OP创建一个打印操作,打印正在访问的Tensor内容。 + +封装传入的Tensor,以便无论何时访问Tensor,都会打印信息message和Tensor的当前值。 + +参数: + - **input** (Variable)-将要打印的Tensor + - **summarize** (int)-打印Tensor中的元素数目,如果值为-1则打印所有元素。默认值为20 + - **message** (str)-打印Tensor信息前自定义的字符串类型消息,作为前缀打印 + - **first_n** (int)-打印Tensor的次数 + - **print_tensor_name** (bool)-可选,指明是否打印Tensor名称,默认为True + - **print_tensor_type** (bool)-可选,指明是否打印Tensor类型,默认为True + - **print_tensor_shape** (bool)-可选,指明是否打印Tensor维度信息,默认为True + - **print_tensor_lod** (bool)-可选,指明是否打印Tensor的LoD信息,默认为True + - **print_phase** (str)-可选,指明打印的阶段,包括 ``forward`` , ``backward`` 和 ``both`` ,默认为 ``both`` 。设置为 ``forward`` 时,只打印Tensor的前向信息;设置为 ``backward`` 时,只打印Tensor的梯度信息;设置为 ``both`` 时,则同时打印Tensor的前向信息以及梯度信息。 + +返回:输出Tensor + +返回类型:Variable + +.. note:: + 输入和输出是两个不同的Variable,在接下来的过程中,应该使用输出Variable而非输入Variable,否则打印层将失去backward的信息。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import paddle + import numpy as np + + x = fluid.layers.data(name='x', shape=[1], dtype='float32', lod_level=1) + x = fluid.layers.Print(x, message="The content of input layer:") + + y = fluid.layers.data(name='y', shape=[1], dtype='float32', lod_level=2) + out = fluid.layers.sequence_expand(x=x, y=y, ref_level=0) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + x_d = fluid.create_lod_tensor(np.array([[1.1], [2.2],[3.3],[4.4]]).astype('float32'), [[1,3]], place) + y_d = fluid.create_lod_tensor(np.array([[1.1],[1.1],[1.1],[1.1],[1.1],[1.1]]).astype('float32'), [[1,3], [1,2,1,2]], place) + results = exe.run(fluid.default_main_program(), + feed={'x':x_d, 'y': y_d }, + fetch_list=[out],return_numpy=False) +**运行输出**: + +.. code-block:: bash + + The content of input layer: The place is:CPUPlace + Tensor[x] + shape: [4,1,] + dtype: f + LoD: [[ 0,1,4, ]] + data: 1.1,2.2,3.3,4.4, + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/conv2d_cn.rst b/doc/paddle/api/paddle/fluid/layers/conv2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c4837ce93284621429c32e061994f97c2a03d13f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/conv2d_cn.rst @@ -0,0 +1,113 @@ +.. _cn_api_fluid_layers_conv2d: + +conv2d +------------------------------- + + +.. py:function:: paddle.fluid.layers.conv2d(input, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format="NCHW") + + + + +该OP是二维卷积层(convolution2D layer),根据输入、滤波器、步长(stride)、填充(padding)、膨胀比例(dilations)一组参数计算输出特征层大小。输入和输出是NCHW或NHWC格式,其中N是批尺寸,C是通道数,H是特征高度,W是特征宽度。滤波器是MCHW格式,M是输出图像通道数,C是输入图像通道数,H是滤波器高度,W是滤波器宽度。如果组数(groups)大于1,C等于输入图像通道数除以组数的结果。详情请参考UFLDL's : `卷积 `_ 。如果bias_attr不为False,卷积计算会添加偏置项。如果指定了激活函数类型,相应的激活函数会作用在最终结果上。 + +对每个输入X,有等式: + +.. math:: + + Out = \sigma \left ( W * X + b \right ) + +其中: + - :math:`X` :输入值,NCHW或NHWC格式的4-D Tensor + - :math:`W` :滤波器值,MCHW格式的4-D Tensor + - :math:`*` :卷积操作 + - :math:`b` :偏置值,2-D Tensor,形状为 ``[M,1]`` + - :math:`\sigma` :激活函数 + - :math:`Out` :输出值,NCHW或NHWC格式的4-D Tensor, 和 ``X`` 的形状可能不同 + +**示例** + +- 输入: + + 输入形状::math:`(N,C_{in},H_{in},W_{in})` + + 滤波器形状: :math:`(C_{out},C_{in},H_{f},W_{f})` + +- 输出: + + 输出形状: :math:`(N,C_{out},H_{out},W_{out})` + +其中 + +.. math:: + + H_{out} &= \frac{\left ( H_{in} + padding\_height\_top + padding\_height\_bottom-\left ( dilation[0]*\left ( H_{f}-1 \right )+1 \right ) \right )}{stride[0]}+1 + + W_{out} &= \frac{\left ( W_{in} + padding\_width\_left + padding\_width\_right -\left ( dilation[1]*\left ( W_{f}-1 \right )+1 \right ) \right )}{stride[1]}+1 + +如果 ``padding`` = "SAME": + +.. math:: + H_{out} = \frac{(H_{in} + stride[0] - 1)}{stride[0]} + +.. math:: + W_{out} = \frac{(W_{in} + stride[1] - 1)}{stride[1]} + +如果 ``padding`` = "VALID": + +.. math:: + H_{out} = \frac{\left ( H_{in} -\left ( dilation[0]*\left ( H_{f}-1 \right )+1 \right ) \right )}{stride[0]}+1 + + W_{out} = \frac{\left ( W_{in} -\left ( dilation[1]*\left ( W_{f}-1 \right )+1 \right ) \right )}{stride[1]}+1 + +参数: + - **input** (Variable) - 形状为 :math:`[N, C, H, W]` 或 :math:`[N, H, W, C]` 的4-D Tensor,N是批尺寸,C是通道数,H是特征高度,W是特征宽度,数据类型为float16, float32或float64。 + - **num_filters** (int) - 滤波器(卷积核)的个数。和输出图像通道相同。 + - **filter_size** (int|list|tuple) - 滤波器大小。如果它是一个列表或元组,则必须包含两个整数值:(filter_size_height,filter_size_width)。若为一个整数,filter_size_height = filter_size_width = filter_size。 + - **stride** (int|list|tuple,可选) - 步长大小。滤波器和输入进行卷积计算时滑动的步长。如果它是一个列表或元组,则必须包含两个整型数:(stride_height,stride_width)。若为一个整数,stride_height = stride_width = stride。默认值:1。 + - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含4个二元组:当 ``data_format`` 为"NCHW"时为 [[0,0], [0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NHWC"时为[[0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含4个整数值:[padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含2个整数值:[padding_height, padding_width],此时padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_height = padding_width = padding。默认值:0。 + - **dilation** (int|list|tuple,可选) - 膨胀比例大小。空洞卷积时会使用该参数,滤波器对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息。如果膨胀比例为列表或元组,则必须包含两个整型数:(dilation_height,dilation_width)。若为一个整数,dilation_height = dilation_width = dilation。默认值:1。 + - **groups** (int,可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和滤波器分别根据通道数量平均分为n组,第一组滤波器和第一组输入进行卷积计算,第二组滤波器和第二组输入进行卷积计算,……,第n组滤波器和第n组输入进行卷积计算。默认值:1。 + - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **use_cudnn** (bool,可选)- 是否使用cudnn内核。只有已安装cudnn库时才有效。默认值:True。 + - **act** (str,可选) - 激活函数类型, 如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations` 。如果设为None,则未添加激活函数。默认值:None。 + - **name** (str,可选) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值:None。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + +返回:4-D Tensor,数据类型与 ``input`` 一致。如果未指定激活层,则返回卷积计算的结果,如果指定激活层,则返回卷积和激活计算之后的最终结果。 + +返回类型:Variable。 + +抛出异常: + - ``ValueError`` - 如果 ``use_cudnn`` 不是bool值。 + - ``ValueError`` - 如果 ``data_format`` 既不是"NCHW"也不是"NHWC"。 + - ``ValueError`` - 如果 ``input`` 的通道数未被明确定义。 + - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``padding`` 含有4个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ShapeError`` - 如果输入不是4-D Tensor。 + - ``ShapeError`` - 如果输入和滤波器的维度大小不相同。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + - ``ShapeError`` - 如果输出的通道数不能被 ``groups`` 整除。 + + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + data = fluid.layers.data(name='data', shape=[3, 32, 32], dtype='float32') + param_attr = fluid.ParamAttr(name='conv2d.weight', initializer=fluid.initializer.Xavier(uniform=False), learning_rate=0.001) + res = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + x = np.random.rand(1, 3, 32, 32).astype("float32") + output = exe.run(feed={"data": x}, fetch_list=[res]) + print(output) + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/conv2d_transpose_cn.rst b/doc/paddle/api/paddle/fluid/layers/conv2d_transpose_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..21d1b64a890973e2c24b4f6fe402e404318042da --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/conv2d_transpose_cn.rst @@ -0,0 +1,119 @@ +.. _cn_api_fluid_layers_conv2d_transpose: + +conv2d_transpose +------------------------------- + + +.. py:function:: paddle.fluid.layers.conv2d_transpose(input, num_filters, output_size=None, filter_size=None, padding=0, stride=1, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format='NCHW') + + + + +二维转置卷积层(Convlution2D transpose layer) + +该层根据输入(input)、滤波器(filter)和卷积核膨胀比例(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCHW或NHWC格式,其中N为批尺寸,C为通道数(channel),H为特征层高度,W为特征层宽度。滤波器是MCHW格式,M是输出图像通道数,C是输入图像通道数,H是滤波器高度,W是滤波器宽度。如果组数大于1,C等于输入图像通道数除以组数的结果。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解转置卷积层细节,请参考下面的说明和 参考文献_ 。如果参数bias_attr不为False, 转置卷积计算会添加偏置项。如果act不为None,则转置卷积计算之后添加相应的激活函数。 + +.. _参考文献: https://arxiv.org/pdf/1603.07285.pdf + + +输入 :math:`X` 和输出 :math:`Out` 函数关系如下: + +.. math:: + Out=\sigma (W*X+b)\\ + +其中: + - :math:`X` : 输入,具有NCHW或NHWC格式的4-D Tensor + - :math:`W` : 滤波器,具有NCHW格式的4-D Tensor + - :math:`*` : 卷积计算(注意:转置卷积本质上的计算还是卷积) + - :math:`b` : 偏置(bias),2-D Tensor,形状为 ``[M,1]`` + - :math:`σ` : 激活函数 + - :math:`Out` : 输出值,NCHW或NHWC格式的4-D Tensor, 和 ``X`` 的形状可能不同 + +**示例** + +- 输入: + + 输入Tensor的形状: :math:`(N,C_{in}, H_{in}, W_{in})` + + 滤波器的形状 : :math:`(C_{in}, C_{out}, H_f, W_f)` + +- 输出: + + 输出Tensor的形状 : :math:`(N,C_{out}, H_{out}, W_{out})` + +其中 + +.. math:: + + & H'_{out} = (H_{in}-1)*strides[0] - pad\_height\_top - pad\_height\_bottom + dilations[0]*(H_f-1)+1\\ + & W'_{out} = (W_{in}-1)*strides[1]- pad\_width\_left - pad\_width\_right + dilations[1]*(W_f-1)+1 \\ + & H_{out}\in[H'_{out},H'_{out} + strides[0])\\ + & W_{out}\in[W'_{out},W'_{out} + strides[1])\\ + +如果 ``padding`` = "SAME": + +.. math:: + & H'_{out} = \frac{(H_{in} + stride[0] - 1)}{stride[0]}\\ + & W'_{out} = \frac{(W_{in} + stride[1] - 1)}{stride[1]}\\ + +如果 ``padding`` = "VALID": + +.. math:: + & H'_{out} = (H_{in}-1)*strides[0] + dilations[0]*(H_f-1)+1\\ + & W'_{out} = (W_{in}-1)*strides[1] + dilations[1]*(W_f-1)+1 \\ + +注意: + +如果output_size为None,则 :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}` ;否则,指定的output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[0]` 之间(不包含 :math:`H^\prime_{out} + strides[0]` ), 并且指定的output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[1]` 之间(不包含 :math:`W^\prime_{out} + strides[1]` )。 + +由于转置卷积可以当成是卷积的反向计算,而根据卷积的输入输出计算公式来说,不同大小的输入特征层可能对应着相同大小的输出特征层,所以对应到转置卷积来说,固定大小的输入特征层对应的输出特征层大小并不唯一。 + +如果指定了output_size, ``conv2d_transpose`` 可以自动计算滤波器的大小。 + +参数: + - **input** (Variable)- 形状为 :math:`[N, C, H, W]` 或 :math:`[N, H, W, C]` 的4-D Tensor,N是批尺寸,C是通道数,H是特征高度,W是特征宽度。数据类型:float32或float64。 + - **num_filters** (int) - 滤波器(卷积核)的个数,与输出图片的通道数相同。 + - **output_size** (int|tuple,可选) - 输出图片的大小。如果output_size是一个元组,则必须包含两个整型数,(output_size_height,output_size_width)。如果output_size=None,则内部会使用filter_size、padding和stride来计算output_size。如果output_size和filter_size是同时指定的,那么它们应满足上面的公式。默认:None。output_size和filter_size不能同时为None。 + - **filter_size** (int|tuple,可选) - 滤波器大小。如果filter_size是一个元组,则必须包含两个整型数,(filter_size_height, filter_size_width)。否则,filter_size_height = filter_size_width = filter_size。如果filter_size=None,则必须指定output_size, ``conv2d_transpose`` 内部会根据output_size、padding和stride计算出滤波器大小。默认:None。output_size和filter_size不能同时为None。 + - **padding** (int|list|tuple|str,可选) - 填充padding大小。padding参数在输入特征层每边添加 ``dilation * (kernel_size - 1) - padding`` 个0。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含4个二元组:当 ``data_format`` 为"NCHW"时为 [[0,0], [0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NHWC"时为[[0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含4个整数值:[padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含2个整数值:[padding_height, padding_width],此时padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_height = padding_width = padding。默认值:0。 + - **stride** (int|tuple,可选) - 步长stride大小。滤波器和输入进行卷积计算时滑动的步长。如果stride是一个元组,则必须包含两个整型数,形式为(stride_height,stride_width)。否则,stride_height = stride_width = stride。默认:stride = 1。 + - **dilation** (int|tuple,可选) - 膨胀比例(dilation)大小。空洞卷积时会指该参数,滤波器对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息,根据 `可视化效果图 `_ 较好理解。如果膨胀比例dilation是一个元组,那么元组必须包含两个整型数,形式为(dilation_height, dilation_width)。否则,dilation_height = dilation_width = dilation。默认:dilation= 1。 + - **groups** (int,可选) - 二维转置卷积层的组数。从Alex Krizhevsky的CNN Deep论文中的群卷积中受到启发,当group=2时,输入和滤波器分别根据通道数量平均分为两组,第一组滤波器和第一组输入进行卷积计算,第二组滤波器和第二组输入进行卷积计算。默认:group = 1。 + - **param_attr** (ParamAttr,可选) :指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。conv2d_transpose算子默认的权重初始化是Xavier。 + - **bias_attr** (ParamAttr|False,可选)- 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。conv2d_transpose算子默认的偏置初始化是0.0。 + - **use_cudnn** (bool,可选) - 是否使用cudnn内核,只有已安装cudnn库时才有效。默认:True。 + - **act** (str,可选) - 激活函数类型,如果设置为None,则不使用激活函数。默认:None。 + - **name** (str,可选) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + +返回:4-D Tensor,数据类型与 ``input`` 一致。如果未指定激活层,则返回转置卷积计算的结果,如果指定激活层,则返回转置卷积和激活计算之后的最终结果。 + +返回类型:Variable + +抛出异常: + - ``ValueError`` : 如果输入的shape、filter_size、stride、padding和groups不匹配,抛出ValueError + - ``ValueError`` - 如果 ``data_format`` 既不是"NCHW"也不是"NHWC"。 + - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``padding`` 含有4个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ValueError`` - 如果 ``output_size`` 和 ``filter_size`` 同时为None。 + - ``ShapeError`` - 如果输入不是4-D Tensor。 + - ``ShapeError`` - 如果输入和滤波器的维度大小不相同。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + data = fluid.layers.data(name='data', shape=[3, 32, 32], dtype='float32') + param_attr = fluid.ParamAttr(name='conv2d.weight', initializer=fluid.initializer.Xavier(uniform=False), learning_rate=0.001) + res = fluid.layers.conv2d_transpose(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + x = np.random.rand(1, 3, 32, 32).astype("float32") + output = exe.run(feed={"data": x}, fetch_list=[res]) + print(output) + + diff --git a/doc/paddle/api/paddle/fluid/layers/conv3d_cn.rst b/doc/paddle/api/paddle/fluid/layers/conv3d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0b5852a8487a658634868c5ade361a3a472ceaee --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/conv3d_cn.rst @@ -0,0 +1,115 @@ +.. _cn_api_fluid_layers_conv3d: + +conv3d +------------------------------- + + +.. py:function:: paddle.fluid.layers.conv3d(input, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format="NCDHW") + + + + +该OP是三维卷积层(convolution3D layer),根据输入、滤波器、步长(stride)、填充(padding)、膨胀比例(dilations)一组参数计算得到输出特征层大小。输入和输出是NCDHW或NDWHC格式,其中N是批尺寸,C是通道数,D是特征层深度,H是特征层高度,W是特征层宽度。三维卷积(Convlution3D)和二维卷积(Convlution2D)相似,但多了一维深度信息(depth)。如果bias_attr不为False,卷积计算会添加偏置项。如果指定了激活函数类型,相应的激活函数会作用在最终结果上。 + +对每个输入X,有等式: + +.. math:: + + Out = \sigma \left ( W * X + b \right ) + +其中: + - :math:`X` :输入值,NCDHW或NDHWC格式的5-D Tensor + - :math:`W` :滤波器值,MCDHW格式的5-D Tensor + - :math:`*` :卷积操作 + - :math:`b` :偏置值,2-D Tensor,形为 ``[M,1]`` + - :math:`\sigma` :激活函数 + - :math:`Out` :输出值, NCDHW或NDHWC格式的5-D Tensor,和 ``X`` 的形状可能不同 + +**示例** + +- 输入: + + 输入形状: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` + + 滤波器形状: :math:`(C_{out}, C_{in}, D_f, H_f, W_f)` + +- 输出: + + 输出形状: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` + +其中 + +.. math:: + + D_{out} &= \frac{\left ( D_{in} + padding\_depth\_front + padding\_depth\_back-\left ( dilation[0]*\left ( D_{f}-1 \right )+1 \right ) \right )}{stride[0]}+1 + + H_{out} &= \frac{\left ( H_{in} + padding\_height\_top + padding\_height\_bottom-\left ( dilation[1]*\left ( H_{f}-1 \right )+1 \right ) \right )}{stride[1]}+1 + + W_{out} &= \frac{\left ( W_{in} + padding\_width\_left + padding\_width\_right -\left ( dilation[2]*\left ( W_{f}-1 \right )+1 \right ) \right )}{stride[2]}+1 + +如果 ``padding`` = "SAME": + +.. math:: + D_{out} = \frac{(D_{in} + stride[0] - 1)}{stride[0]} + + H_{out} = \frac{(H_{in} + stride[1] - 1)}{stride[1]} + + W_{out} = \frac{(W_{in} + stride[2] - 1)}{stride[2]} + +如果 ``padding`` = "VALID": + +.. math:: + D_{out} = \frac{\left ( D_{in} -\left ( dilation[0]*\left ( D_{f}-1 \right )+1 \right ) \right )}{stride[0]}+1 + + H_{out} = \frac{\left ( H_{in} -\left ( dilation[1]*\left ( H_{f}-1 \right )+1 \right ) \right )}{stride[1]}+1 + + W_{out} = \frac{\left ( W_{in} -\left ( dilation[2]*\left ( W_{f}-1 \right )+1 \right ) \right )}{stride[2]}+1 + +参数: + - **input** (Variable) - 形状为 :math:`[N, C, D, H, W]` 或 :math:`[N, D, H, W, C]` 的5-D Tensor,N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度,数据类型为float16, float32或float64。 + - **num_fliters** (int) - 滤波器(卷积核)的个数。和输出图像通道相同。 + - **filter_size** (int|list|tuple) - 滤波器大小。如果它是一个列表或元组,则必须包含三个整数值:(filter_size_depth, filter_size_height,filter_size_width)。若为一个整数,则filter_size_depth = filter_size_height = filter_size_width = filter_size。 + - **stride** (int|list|tuple,可选) - 步长大小。滤波器和输入进行卷积计算时滑动的步长。如果它是一个列表或元组,则必须包含三个整型数:(stride_depth, stride_height, stride_width)。若为一个整数,stride_depth = stride_height = stride_width = stride。默认值:1。 + - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含5个二元组:当 ``data_format`` 为"NCDHW"时为 [[0,0], [0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NDHWC"时为[[0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含6个整数值:[padding_depth_front, padding_depth_back, padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含3个整数值:[padding_depth, padding_height, padding_width],此时 padding_depth_front = padding_depth_back = padding_depth, padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_depth = padding_height = padding_width = padding。默认值:0。 + - **dilation** (int|list|tuple,可选) - 膨胀比例大小。空洞卷积时会使用该参数,滤波器对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息。如果膨胀比例为列表或元组,则必须包含三个整型数:(dilation_depth, dilation_height,dilation_width)。若为一个整数,dilation_depth = dilation_height = dilation_width = dilation。默认值:1。 + - **groups** (int,可选) - 三维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和滤波器分别根据通道数量平均分为n组,第一组滤波器和第一组输入进行卷积计算,第二组滤波器和第二组输入进行卷积计算,……,第n组滤波器和第n组输入进行卷积计算。默认值:1。 + - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **use_cudnn** (bool,可选)- 是否使用cudnn内核。只有已安装cudnn库时才有效。默认值:True。 + - **act** (str,可选) - 激活函数类型, 如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations` 。如果设为None,则未添加激活函数。默认值:None。 + - **name** (str,可选) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值:None。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCDHW"和"NDHWC"。N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度。默认值:"NCDHW"。 + +返回:5-D Tensor,数据类型与 ``input`` 一致。如果未指定激活层,则返回卷积计算的结果,如果指定激活层,则返回卷积和激活计算之后的最终结果。 + +返回类型:Variable。 + +抛出异常: + - ``ValueError`` - 如果 ``use_cudnn`` 不是bool值。 + - ``ValueError`` - 如果 ``data_format`` 既不是"NCDHW"也不是"NDHWC"。 + - ``ValueError`` - 如果 ``input`` 的通道数未被明确定义。 + - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``padding`` 含有5个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ShapeError`` - 如果输入不是5-D Tensor。 + - ``ShapeError`` - 如果输入和滤波器的维度大小不相同。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + - ``ShapeError`` - 如果输出的通道数不能被 ``groups`` 整除。 + + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + data = fluid.layers.data(name='data', shape=[3, 12, 32, 32], dtype='float32') + param_attr = fluid.ParamAttr(name='conv3d.weight', initializer=fluid.initializer.Xavier(uniform=False), learning_rate=0.001) + res = fluid.layers.conv3d(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + x = np.random.rand(1, 3, 12, 32, 32).astype("float32") + output = exe.run(feed={"data": x}, fetch_list=[res]) + print(output) + + diff --git a/doc/paddle/api/paddle/fluid/layers/conv3d_transpose_cn.rst b/doc/paddle/api/paddle/fluid/layers/conv3d_transpose_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..55f09e9d85e5e02a5e16dd5f764e14592f0c760d --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/conv3d_transpose_cn.rst @@ -0,0 +1,123 @@ +.. _cn_api_fluid_layers_conv3d_transpose: + +conv3d_transpose +------------------------------- + + +.. py:function:: paddle.fluid.layers.conv3d_transpose(input, num_filters, output_size=None, filter_size=None, padding=0, stride=1, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format='NCDHW') + + + + +三维转置卷积层(Convlution3D transpose layer) + +该层根据输入(input)、滤波器(filter)和卷积核膨胀比例(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCDHW或者NDHWC格式。其中N为批尺寸,C为通道数(channel),D为特征深度,H为特征层高度,W为特征层宽度。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解卷积转置层细节,请参考下面的说明和 参考文献_ 。如果参数bias_attr不为False, 转置卷积计算会添加偏置项。如果act不为None,则转置卷积计算之后添加相应的激活函数。 + +.. _参考文献: http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf + +输入 :math:`X` 和输出 :math:`Out` 函数关系如下: + +.. math:: + \\Out=\sigma (W*X+b)\\ + +其中: + - :math:`X` : 输入,具有NCDHW或NDHWC格式的5-D Tensor + - :math:`W` : 滤波器,具有NCDHW格式的5-D Tensor + - :math:`*` : 卷积操作(注意:转置卷积本质上的计算还是卷积) + - :math:`b` : 偏置(bias),2-D Tensor,形状为 ``[M,1]`` + - :math:`σ` : 激活函数 + - :math:`Out` : 输出值,NCDHW或NDHWC格式的5-D Tensor,和 ``X`` 的形状可能不同 + +**示例** + +输入: + + 输入的shape::math:`(N,C_{in}, D_{in}, H_{in}, W_{in})` + + 滤波器的shape::math:`(C_{in}, C_{out}, D_f, H_f, W_f)` + + + +输出: + + 输出的shape::math:`(N,C_{out}, D_{out}, H_{out}, W_{out})` + + +其中: + +.. math:: + + & D'_{out}=(D_{in}-1)*strides[0] - pad\_depth\_front - pad\_depth\_back + dilations[0]*(D_f-1)+1\\ + & H'_{out}=(H_{in}-1)*strides[1] - pad\_height\_top - pad\_height\_bottom + dilations[1]*(H_f-1)+1\\ + & W'_{out}=(W_{in}-1)*strides[2] - pad\_width\_left - pad\_width\_right + dilations[2]*(W_f-1)+1\\ + & D_{out}\in[D'_{out},D'_{out} + strides[0])\\ + & H_{out}\in[H'_{out},H'_{out} + strides[1])\\ + & W_{out}\in[W'_{out},W'_{out} + strides[2])\\ + +如果 ``padding`` = "SAME": + +.. math:: + D'_{out} = \frac{(D_{in} + stride[0] - 1)}{stride[0]}\\ + H'_{out} = \frac{(H_{in} + stride[1] - 1)}{stride[1]}\\ + W'_{out} = \frac{(W_{in} + stride[2] - 1)}{stride[2]}\\ + +如果 ``padding`` = "VALID": + +.. math:: + D'_{out}=(D_{in}-1)*strides[0] + dilations[0]*(D_f-1)+1\\ + H'_{out}=(H_{in}-1)*strides[1] + dilations[1]*(H_f-1)+1\\ + W'_{out}=(W_{in}-1)*strides[2] + dilations[2]*(W_f-1)+1\\ + +注意: + +如果output_size为None,则 :math:`D_{out}` = :math:`D^\prime_{out}` , :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}` ;否则,指定的output_size_depth(输出特征层的深度) :math:`D_{out}` 应当介于 :math:`D^\prime_{out}` 和 :math:`D^\prime_{out} + strides[0]` 之间(不包含 :math:`D^\prime_{out} + strides[0]` ),指定的output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[1]` 之间(不包含 :math:`H^\prime_{out} + strides[1]` ), 并且指定的output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[2]` 之间(不包含 :math:`W^\prime_{out} + strides[2]` )。 + +由于转置卷积可以当成是卷积的反向计算,而根据卷积的输入输出计算公式来说,不同大小的输入特征层可能对应着相同大小的输出特征层,所以对应到转置卷积来说,固定大小的输入特征层对应的输出特征层大小并不唯一。 + +如果指定了output_size, ``conv3d_transpose`` 可以自动计算滤波器的大小。 + +参数: + - **input** (Variable)- 形状为 :math:`[N, C, D, H, W]` 或 :math:`[N, D, H, W, C]` 的5-D Tensor,N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度,数据类型:float32或float64。 + - **num_filters** (int) - 滤波器(卷积核)的个数,与输出的图片的通道数相同。 + - **output_size** (int|tuple,可选) - 输出图片的大小。如果output_size是一个元组,则必须包含三个整型数,(output_size_depth,output_size_height,output_size_width)。如果output_size=None,则内部会使用filter_size、padding和stride来计算output_size。如果output_size和filter_size是同时指定的,那么它们应满足上面的公式。默认:None。output_size和filter_size不能同时为None。 + - **filter_size** (int|tuple,可选) - 滤波器大小。如果filter_size是一个元组,则必须包含三个整型数,(filter_size_depth,filter_size_height, filter_size_width)。否则,filter_size_depth = filter_size_height = filter_size_width = filter_size。如果filter_size=None,则必须指定output_size, ``conv2d_transpose`` 内部会根据output_size、padding和stride计算出滤波器大小。默认:None。output_size和filter_size不能同时为None。 + - **padding** (int|list|tuple|str,可选) - 填充padding大小。padding参数在输入特征层每边添加 ``dilation * (kernel_size - 1) - padding`` 个0。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含5个二元组:当 ``data_format`` 为"NCDHW"时为 [[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 ``data_format`` 为"NDHWC"时为[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]];(2)包含6个整数值:[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right];(3)包含3个整数值:[pad_depth, pad_height, pad_width],此时 pad_depth_front = pad_depth_back = pad_depth, pad_height_top = pad_height_bottom = pad_height, pad_width_left = pad_width_right = pad_width。若为一个整数,pad_depth = pad_height = pad_width = padding。默认值:0。 + - **stride** (int|tuple,可选) - 步长stride大小。滤波器和输入进行卷积计算时滑动的步长。如果stride是一个元组,那么元组的形式为(stride_depth,stride_height,stride_width)。否则,stride_depth = stride_height = stride_width = stride。默认:stride = 1。 + - **dilation** (int|tuple,可选) - 膨胀比例dilation大小。空洞卷积时会指该参数,滤波器对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息,根据 `可视化效果图 `_ 较好理解。如果膨胀比例dilation是一个元组,那么元组的形式为(dilation_depth,dilation_height, dilation_width)。否则,dilation_depth = dilation_height = dilation_width = dilation。默认:dilation= 1。 + - **groups** (int,可选) - 三维转置卷积层的组数。从Alex Krizhevsky的CNN Deep论文中的群卷积中受到启发,当group=2时,输入和滤波器分别根据通道数量平均分为两组,第一组滤波器和第一组输入进行卷积计算,第二组滤波器和第二组输入进行卷积计算。默认:group = 1。 + - **param_attr** (ParamAttr,可选) :指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。conv3d_transpose算子默认的权重初始化是Xavier。 + - **bias_attr** (ParamAttr|False,可选)- 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。conv3d_transpose算子默认的偏置初始化是0.0。 + - **use_cudnn** (bool,可选) - 是否使用cudnn内核,只有已安装cudnn库时才有效。默认:True。 + - **act** (str,可选) - 激活函数类型,如果设置为None,则不使用激活函数。默认:None。 + - **name** (str,可选) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCDHW"和"NDHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCDHW"。 + +返回:5-D Tensor,数据类型与 ``input`` 一致。如果未指定激活层,则返回转置卷积计算的结果,如果指定激活层,则返回转置卷积和激活计算之后的最终结果。 + +返回类型:Variable + +抛出异常: + - ``ValueError`` - 如果输入的shape、filter_size、stride、padding和groups不匹配。 + - ``ValueError`` - 如果 ``data_format`` 既不是"NCDHW"也不是"NDHWC"。 + - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``padding`` 含有5个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ValueError`` - 如果 ``output_size`` 和 ``filter_size`` 同时为None。 + - ``ShapeError`` - 如果输入不是5-D Tensor。 + - ``ShapeError`` - 如果输入和滤波器的维度大小不相同。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + data = fluid.layers.data(name='data', shape=[3, 12, 32, 32], dtype='float32') + param_attr = fluid.ParamAttr(name='conv3d.weight', initializer=fluid.initializer.Xavier(uniform=False), learning_rate=0.001) + res = fluid.layers.conv3d_transpose(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + x = np.random.rand(1, 3, 12, 32, 32).astype("float32") + output = exe.run(feed={"data": x}, fetch_list=[res]) + print(output) diff --git a/doc/paddle/api/paddle/fluid/layers/cos_cn.rst b/doc/paddle/api/paddle/fluid/layers/cos_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..35c6061757e614ac91c4e7ff3f8bdbd016b93e9b --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/cos_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_fluid_layers_cos: + +cos +------------------------------- + +.. py:function:: paddle.fluid.layers.cos(x, name=None) + + + + +余弦函数。 + +输入范围是 `(-inf, inf)` , 输出范围是 `[-1,1]`。 + +.. math:: + + out = cos(x) + +参数: + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64 、float16。 + - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回:输出Tensor,与 ``x`` 维度相同、数据类型相同。 + +返回类型:Tensor + +**代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + x = paddle.to_tensor([[-1, np.pi], [1, 15.6]], dtype='float32') + res = paddle.cos(x) + print(res.numpy()) + # [[ 0.54030231 -1. ] + # [ 0.54030231 -0.99417763]] diff --git a/doc/paddle/api/paddle/fluid/layers/cosine_decay_cn.rst b/doc/paddle/api/paddle/fluid/layers/cosine_decay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7c5e1df7aace1748cc245547b2883a434663d475 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/cosine_decay_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_fluid_layers_cosine_decay: + +cosine_decay +------------------------------- + +.. py:function:: paddle.fluid.layers.cosine_decay(learning_rate, step_each_epoch, epochs) + + + + +使用 cosine decay 的衰减方式进行学习率调整。 + +在训练模型时,建议一边进行训练一边降低学习率。 通过使用此方法,学习速率将通过如下cosine衰减策略进行衰减: + +.. math:: + decayed\_lr = learning\_rate * 0.5 * (cos(epoch * math.pi / epochs) + 1) + + +参数: + - **learning_rate** (Variable | float) - 初始学习率。 + - **step_each_epoch** (int) - 一次迭代中的步数。 + - **epochs** - 总迭代次数。 + + + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + base_lr = 0.1 + lr = fluid.layers.cosine_decay( learning_rate = base_lr, step_each_epoch=10000, epochs=120) + + + diff --git a/doc/paddle/api/paddle/fluid/layers/create_array_cn.rst b/doc/paddle/api/paddle/fluid/layers/create_array_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1426688b11bafd00a8a115aaf3f3eefcd907979b --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/create_array_cn.rst @@ -0,0 +1,39 @@ +.. _cn_api_fluid_layers_create_array: + +create_array +------------------------------- + +.. py:function:: paddle.fluid.layers.create_array(dtype) + + + + + +此OP创建一个LoDTensorArray,它可以用作 :ref:`cn_api_fluid_layers_array\_write` , :ref:`cn_api_fluid_layers_array\_read` OP的输入,以及和 :ref:`cn_api_fluid_layers_While` OP +一起创建RNN网络。 + +参数: + - **dtype** (str) — 指定Tensor中元素的数据类型,支持的数据类型值:float32,float64,int32,int64。 + +返回: 返回创建的空LoDTensorArray,Tensor中的元素数据类型为指定的dtype。 + +返回类型: Variable。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.create_array(dtype='float32') # 创建一个数据类型为float32的LoDTensorArray。 + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/create_parameter_cn.rst b/doc/paddle/api/paddle/fluid/layers/create_parameter_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3206e5292d3bf4f0150fb8c9ce96243e4d779c6e --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/create_parameter_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_fluid_layers_create_parameter: + +create_parameter +------------------------------- + + +.. py:function:: paddle.fluid.layers.create_parameter(shape,dtype,name=None,attr=None,is_bias=False,default_initializer=None) + + + + +该OP创建一个参数。该参数是一个可学习的变量, 拥有梯度并且可优化。 + +**注意:这是一个低级别的API。如果您希望自己创建新的op,这个API将非常有用,无需使用layers。** + +参数: + - **shape** (list[int]) - 指定输出Tensor的形状,它可以是一个整数列表。 + - **dtype** (str|numpy.dtype) – 初始化数据类型。可设置的字符串值有:"float16","float32","float64"。 + - **name** (str,可选) - 参数的名称。具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **attr** (ParamAttr,可选) - 指定参数的属性对象。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。默认值为None,表示将采用 ParamAttr 的默认方式初始化。 + - **is_bias** (bool,可选) - 当default_initializer为空,该值会对选择哪个默认初始化程序产生影响。如果is_bias为真,则使用initializer.Constant(0.0),否则使用Xavier(),默认值False。 + - **default_initializer** (Initializer,可选) - 参数的初始化程序,默认值为空。 + +返回:创建的Tensor变量。 + +返回类型:Variable。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + W = layers.create_parameter(shape=[784, 200], dtype='float32') + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/create_py_reader_by_data_cn.rst b/doc/paddle/api/paddle/fluid/layers/create_py_reader_by_data_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..62aa2edf605132ef78f5a886ea2f10b6a3f3e483 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/create_py_reader_by_data_cn.rst @@ -0,0 +1,69 @@ +.. _cn_api_fluid_layers_create_py_reader_by_data: + +create_py_reader_by_data +------------------------------- + + +.. py:function:: paddle.fluid.layers.create_py_reader_by_data(capacity,feed_list,name=None,use_double_buffer=True) + + + + +创建一个Python端提供数据的reader。该OP与 :ref:`cn_api_fluid_layers_py_reader` 类似,不同点在于它能够从feed变量列表读取数据。 + +参数: + - **capacity** (int) - ``py_reader`` 维护的队列缓冲区的容量大小。单位是batch数量。若reader读取速度较快,建议设置较大的 ``capacity`` 值。 + - **feed_list** (list(Variable)) - feed变量列表,这些变量一般由 :code:`fluid.data()` 创建。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **use_double_buffer** (bool,可选) - 是否使用双缓冲区,双缓冲区是为了预读下一个batch的数据、异步CPU -> GPU拷贝。默认值为True。 + +返回:能够从feed变量列表读取数据的reader,数据类型和feed变量列表中变量的数据类型相同。 + +返回类型:reader + +**代码示例:** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import paddle.dataset.mnist as mnist + + def network(img, label): + # 用户构建自定义网络,此处以一个简单的线性回归为例。 + predict = fluid.layers.fc(input=img, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=predict, label=label) + return fluid.layers.mean(loss) + + MEMORY_OPT = False + USE_CUDA = False + + image = fluid.data(name='image', shape=[None, 1, 28, 28], dtype='float32') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') + reader = fluid.layers.create_py_reader_by_data(capacity=64, + feed_list=[image, label]) + reader.decorate_paddle_reader( + paddle.reader.shuffle(paddle.batch(mnist.train(), batch_size=5), buf_size=500)) + img, label = fluid.layers.read_file(reader) + loss = network(img, label) # 用户构建自定义网络并返回损失函数 + + place = fluid.CUDAPlace(0) if USE_CUDA else fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + build_strategy = fluid.BuildStrategy() + build_strategy.memory_optimize = True if MEMORY_OPT else False + exec_strategy = fluid.ExecutionStrategy() + compiled_prog = fluid.compiler.CompiledProgram( + fluid.default_main_program()).with_data_parallel( + loss_name=loss.name, + build_strategy=build_strategy, + exec_strategy=exec_strategy) + + for epoch_id in range(2): + reader.start() + try: + while True: + exe.run(compiled_prog, fetch_list=[loss.name]) + except fluid.core.EOFException: + reader.reset() diff --git a/doc/paddle/api/paddle/fluid/layers/create_tensor_cn.rst b/doc/paddle/api/paddle/fluid/layers/create_tensor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7c781a6cda1051e3ac7e53da321f82f3df3a94f0 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/create_tensor_cn.rst @@ -0,0 +1,39 @@ +.. _cn_api_fluid_layers_create_tensor: + +create_tensor +------------------------------- + +.. py:function:: paddle.fluid.layers.create_tensor(dtype,name=None,persistable=False) + + + + +创建数据类型为dtype的Tensor。 + +参数: + - **dtype** (str|numpy.dtype) - 创建的Tensor的数据类型,支持数据类型为bool, float16, float32, float64, int8, int16, int32, int64。 + - **name** (str, 可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **persistable** (bool,可选) - 用于设置创建的Tensor的persistable属性,若不设置则默认设置为False。 + +返回: 创建的Tensor,数据类型为dtype。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + """ + 使用print(tensor)打印结果如下: + type { + type: LOD_TENSOR + lod_tensor { + tensor { + data_type: FP32 + } + } + } + persistable: false + """ + import paddle.fluid as fluid + tensor = fluid.layers.create_tensor(dtype='float32') diff --git a/doc/paddle/api/paddle/fluid/layers/crop_cn.rst b/doc/paddle/api/paddle/fluid/layers/crop_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..18d375f06d5e4277e93995db4de71a3006cbf0fb --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/crop_cn.rst @@ -0,0 +1,86 @@ +.. _cn_api_fluid_layers_crop: + +crop +------------------------------- + +.. py:function:: paddle.fluid.layers.crop(x, shape=None, offsets=None, name=None) + + + + +该OP根据偏移量(offsets)和形状(shape),裁剪输入张量。 + +**注意:** 此OP已被弃用,它将在以后的版本中被删除,请使用 :ref:`cn_api_fluid_layers_crop_tensor` 替代 + +**样例**: + +:: + + * Case 1: + Given + X = [[0, 1, 2, 0, 0] + [0, 3, 4, 0, 0] + [0, 0, 0, 0, 0]], + and + shape = [2, 2], + offsets = [0, 1], + output is: + Out = [[1, 2], + [3, 4]]. + * Case 2: + Given + X = [[0, 1, 2, 5, 0] + [0, 3, 4, 6, 0] + [0, 0, 0, 0, 0]], + and shape is tensor + shape = [[0, 0, 0] + [0, 0, 0]] + and + offsets = [0, 1], + + output is: + Out = [[1, 2, 5], + [3, 4, 6]]. + + +参数: + - **x** (Variable): 多维Tensor,数据类型为float32 + - **shape** (Variable|list/tuple of integers) - 指定输出Tensor的形状,它可以是一个Tensor/整数列表/整数元组。如果是Tensor,它的秩必须与x相同,它的形状指定了输出Tensor的形状,它的元素的数值在这里不起作用,该方式适用于每次迭代时候需要改变输出形状的情况。如果是整数列表/元组,则其长度必须与x的秩相同 + - **offsets** (Variable|list/tuple of integers|None,可选) - 指定每个维度上的裁剪的偏移量,它可以是一个Tensor,或者一个整数列表/整数元组。如果是一个Tensor,它的秩必须与x相同,这种方法适用于每次迭代的偏移量(offset)都可能改变的情况。如果是一个整数列表/元组,则长度必须与x的秩相同,如果offsets=None,则每个维度的偏移量为0。默认值为None + - **name** (str|None,可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 + +返回: 经过形状裁剪之后的Tensor,与输入x具有相同的数据类型 + +返回类型: Variable + +抛出异常: 如果形状不是列表、元组或Variable,抛出ValueError + + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + # case 1 + # 输入x的形状为[-1, 3, 5], + # 参数shape = y是个Variable,形状是[-1, 2, 2],输出Tensor将具有和y一样的形状 + # y的具体数值不起作用,起作用的只有它的形状 + # 经过下面的crop操作之后输出张量的形状是: [-1, 2, 2] + x = fluid.layers.data(name="x", shape=[3, 5], dtype="float32") + y = fluid.layers.data(name="y", shape=[2, 2], dtype="float32") + crop = fluid.layers.crop(x, shape=y) + ## 或者 case 2 + # 输入z的形状为: [-1, 3, 5], shape为整数列表[-1, 2, 3] + # 则经过下面的crop操作之后输出张量的形状为:[-1, 2, 3] + z = fluid.layers.data(name="z", shape=[3, 5], dtype="float32") + crop = fluid.layers.crop(z, shape=[-1, 2, 3]) + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/crop_tensor_cn.rst b/doc/paddle/api/paddle/fluid/layers/crop_tensor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4919b9866790919e0976adfffb3aafaed6f58ec1 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/crop_tensor_cn.rst @@ -0,0 +1,107 @@ +.. _cn_api_fluid_layers_crop_tensor: + +crop_tensor +------------------------------- + +.. py:function:: paddle.fluid.layers.crop_tensor(x, shape=None, offsets=None, name=None) + + + + +根据偏移量(offsets)和形状(shape),裁剪输入(x)Tensor。 + +**示例**: + +:: + + * 示例1(输入为2-D Tensor): + + 输入: + X.shape = [3, 5] + X.data = [[0, 1, 2, 0, 0], + [0, 3, 4, 0, 0], + [0, 0, 0, 0, 0]] + + 参数: + shape = [2, 2] + offsets = [0, 1] + + 输出: + Out.shape = [2, 2] + Out.data = [[1, 2], + [3, 4]] + + * 示例2(输入为3-D Tensor): + + 输入: + + X.shape = [2, 3, 4] + X.data = [[[0, 1, 2, 3], + [0, 5, 6, 7], + [0, 0, 0, 0]], + [[0, 3, 4, 5], + [0, 6, 7, 8], + [0, 0, 0, 0]]] + + 参数: + shape = [2, 2, -1] + offsets = [0, 0, 1] + + 输出: + Out.shape = [2, 2, 3] + Out.data = [[[1, 2, 3], + [5, 6, 7]], + [[3, 4, 5], + [6, 7, 8]]] + +参数: + - **x** (Variable): 1-D到6-D Tensor,数据类型为float32、float64、int32或者int64。 + - **shape** (list|tuple|Variable) - 输出Tensor的形状,数据类型为int32。如果是列表或元组,则其长度必须与x的维度大小相同,如果是Variable,则其应该是1-D Tensor。当它是列表时,每一个元素可以是整数或者形状为[1]的Tensor。含有Variable的方式适用于每次迭代时需要改变输出形状的情况。 + - **offsets** (list|tuple|Variable,可选) - 每个维度上裁剪的偏移量,数据类型为int32。如果是列表或元组,则其长度必须与x的维度大小相同,如果是Variable,则其应是1-D Tensor。当它是列表时,每一个元素可以是整数或者形状为[1]的Variable。含有Variable的方式适用于每次迭代的偏移量(offset)都可能改变的情况。默认值:None,每个维度的偏移量为0。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: 裁剪后的Tensor,数据类型与输入(x)相同。 + +返回类型: Variable + +抛出异常: + - :code:`TypeError` - x 的数据类型应该是float32、float64、int32或者int64。 + - :code:`TypeError` - shape 应该是列表、元组或Variable。 + - :code:`TypeError` - shape 的数据类型应该是int32。 + - :code:`TypeError` - offsets 应该是列表、元组、Variable或None。 + - :code:`TypeError` - offsets 的数据类型应该是int32。 + - :code:`TypeError` - offsets 的元素应该大于等于0。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.data(name="x", shape=[None, 3, 5], dtype="float32") + # x.shape = [-1, 3, 5], where -1 indicates batch size, and it will get the exact value in runtime. + + # shape is a 1-D Tensor + crop_shape = fluid.data(name="crop_shape", shape=[3], dtype="int32") + crop0 = fluid.layers.crop_tensor(x, shape=crop_shape) + # crop0.shape = [-1, -1, -1], it means crop0.shape[0] = x.shape[0] in runtime. + + # or shape is a list in which each element is a constant + crop1 = fluid.layers.crop_tensor(x, shape=[-1, -1, 3], offsets=[0, 1, 0]) + # crop1.shape = [-1, 2, 3] + + # or shape is a list in which each element is a constant or Tensor + y = fluid.data(name="y", shape=[3, 8, 8], dtype="float32") + dim1 = fluid.layers.data(name="dim1", shape=[1], dtype="int32") + crop2 = fluid.layers.crop_tensor(y, shape=[3, dim1, 4]) + # crop2.shape = [3, -1, 4] + + # offsets is a 1-D Tensor + crop_offsets = fluid.data(name="crop_offsets", shape=[3], dtype="int32") + crop3 = fluid.layers.crop_tensor(x, shape=[-1, 2, 3], offsets=crop_offsets) + # crop3.shape = [-1, 2, 3] + + # offsets is a list in which each element is a constant or Tensor + offsets_var = fluid.data(name="offset", shape=[1], dtype="int32") + crop4 = fluid.layers.crop_tensor(x, shape=[-1, 2, 3], offsets=[0, 1, offsets_var]) + # crop4.shape = [-1, 2, 3] + diff --git a/doc/paddle/api/paddle/fluid/layers/cross_entropy_cn.rst b/doc/paddle/api/paddle/fluid/layers/cross_entropy_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..05b080474c36f623ed1f81eef07c352b041166b5 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/cross_entropy_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_fluid_layers_cross_entropy: + +cross_entropy +------------------------------- + +.. py:function:: paddle.fluid.layers.cross_entropy(input, label, soft_label=False, ignore_index=-100) + + + + +该OP计算输入input和标签label间的交叉熵,可用于计算硬标签或软标签的交叉熵。 + +1. 硬标签交叉熵算法:若soft_label = False, :math:`label[i_1, i_2, ..., i_k]` 表示每个样本的硬标签值: + + .. math:: + \\output[i_1, i_2, ..., i_k]=-log(input[i_1, i_2, ..., i_k, j]), label[i_1, i_2, ..., i_k] = j, j != ignore\_index\\ + +2. 软标签交叉熵算法:若soft_label = True, :math:`label[i_1, i_2, ..., i_k, j]` 表明每个样本对应类别j的软标签值: + + .. math:: + \\output[i_1, i_2, ..., i_k]= -\sum_{j}label[i_1,i_2,...,i_k,j]*log(input[i_1, i_2, ..., i_k,j])\\ + +参数: + - **input** (Variable) – 维度为 :math:`[N_1, N_2, ..., N_k, D]` 的多维Tensor,其中最后一维D是类别数目。数据类型为float32或float64。 + - **label** (Variable) – 输入input对应的标签值。若soft_label=False,要求label维度为 :math:`[N_1, N_2, ..., N_k]` 或 :math:`[N_1, N_2, ..., N_k, 1]` ,数据类型为int64,且值必须大于等于0且小于D;若soft_label=True,要求label的维度、数据类型与input相同,且每个样本各软标签的总和为1。 + - **soft_label** (bool) – 指明label是否为软标签。默认为False,表示label为硬标签;若soft_label=True则表示软标签。 + - **ignore_index** (int) – 指定一个忽略的标签值,此标签值不参与计算,负值表示无需忽略任何标签值。仅在soft_label=False时有效。 默认值为-100。 + +返回: 表示交叉熵结果的Tensor,数据类型与input相同。若soft_label=False,则返回值维度与label维度相同;若soft_label=True,则返回值维度为 :math:`[N_1, N_2, ..., N_k, 1]` 。 + +返回类型:Variable + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + class_num = 7 + x = fluid.layers.data(name='x', shape=[3, 10], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + predict = fluid.layers.fc(input=x, size=class_num, act='softmax') + cost = fluid.layers.cross_entropy(input=predict, label=label) diff --git a/doc/paddle/api/paddle/fluid/layers/ctc_greedy_decoder_cn.rst b/doc/paddle/api/paddle/fluid/layers/ctc_greedy_decoder_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c2d8f67ca85f49c121c2a6530b4ad7bcb1632328 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/ctc_greedy_decoder_cn.rst @@ -0,0 +1,105 @@ +.. _cn_api_fluid_layers_ctc_greedy_decoder: + +ctc_greedy_decoder +------------------------------- + +.. py:function:: paddle.fluid.layers.ctc_greedy_decoder(input, blank, name=None) + + + + + +该OP用于贪婪策略解码序列,步骤如下: + 1. 获取输入中的每一行的最大值索引,也就是numpy.argmax(input, axis=0)。 + 2. 对于step1结果中的每个序列,合并两个空格之间的重复部分并删除所有空格。 + +该API支持两种输入,LoDTensor和Tensor输入,不同输入的代码样例如下: + +**样例**: + +:: + + # for lod tensor input + 已知: + + input.data = [[0.6, 0.1, 0.3, 0.1], + [0.3, 0.2, 0.4, 0.1], + [0.1, 0.5, 0.1, 0.3], + [0.5, 0.1, 0.3, 0.1], + + [0.5, 0.1, 0.3, 0.1], + [0.2, 0.2, 0.2, 0.4], + [0.2, 0.2, 0.1, 0.5], + [0.5, 0.1, 0.3, 0.1]] + + input.lod = [[4, 4]] + + 计算过程: + + 1. 将argmax的运算结果应用于输入的第一个序列,即 input.data[0:4] 。 + 则得出的结果为[[0], [2], [1], [0]] + 2. 合并重复的索引值部分,删除空格,即为0的值。 + 则第一个输入序列对应的输出为:[[2], [1]] + + 最后 + + output.data = [[2], + [1], + [3]] + + output.lod = [[2, 1]] + + # for tensor input + input.data = [[[0.6, 0.1, 0.3, 0.1], + [0.3, 0.2, 0.4, 0.1], + [0.1, 0.5, 0.1, 0.3], + [0.5, 0.1, 0.3, 0.1]], + + [[0.5, 0.1, 0.3, 0.1], + [0.2, 0.2, 0.2, 0.4], + [0.2, 0.2, 0.1, 0.5], + [0.5, 0.1, 0.3, 0.1]]] + + input_length.data = [[4], [4]] + input.shape = [2, 4, 4] + + step1: Apply argmax to first input sequence which is input.data[0:4]. Then we get: + [[0], [2], [1], [0]], for input.data[4:8] is [[0], [3], [3], [0]], shape is [2,4,1] + step2: Change the argmax result to use padding mode, then argmax result is + [[0, 2, 1, 0], [0, 3, 3, 0]], shape is [2, 4], lod is [], input_length is [[4], [4]] + step3: Apply ctc_align to padding argmax result, padding_value is 0 + + Finally: + output.data = [[2, 1, 0, 0], + [3, 0, 0, 0]] + output_length.data = [[2], [1]] + + +参数: + - **input** (Variable) — 变长序列的概率, 在输入为LoDTensor情况下,它是具有LoD信息的二维LoDTensor。 形状为[Lp,num_classes +1],其中Lp是所有输入序列的长度之和,num_classes是真实的类数。 在输入为Tensor情况下,它是带有填充的3-D张量,其形状为[batch_size,N,num_classes +1]。 (不包括空白标签)。 数据类型可以是float32或float64。 + - **blank** (int) — Connectionist Temporal Classification (CTC) loss空白标签索引, 其数值属于半开区间[0,num_classes + 1) + - **name** (str) — (str|None,可选) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None + +返回:对于输入为LoDTensor的情况,返回CTC贪婪解码器的结果,即2-D LoDTensor,形状为[Lp,1],数据类型为int64。 “ Lp”是所有输出序列长度的总和。 如果结果中的所有序列均为空,则结果LoDTensor将为[-1],其中LoD为[[]]。对于输入为Tensor的情况,返回一个元组,(output, output_length), 其中,output是一个形状为 [batch_size, N],类型为int64的Tensor。output_length是一个形状为[batch_size, 1],类型为int64的Tensor,表示Tensor输入下,每个输出序列的长度。 + +返回类型: Variable + + +**代码示例** + +.. code-block:: python + + # for lod mode + import paddle.fluid as fluid + x = fluid.data(name='x', shape=[None, 8], dtype='float32', lod_level=1) + cost = fluid.layers.ctc_greedy_decoder(input=x, blank=0) + # for padding mode + x_pad = fluid.data(name='x_pad', shape=[10, 4, 8], dtype='float32') + x_pad_len = fluid.data(name='x_pad_len', shape=[10, 1], dtype='int64') + out, out_len = fluid.layers.ctc_greedy_decoder(input=x_pad, blank=0, + input_length=x_pad_len) + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/cumsum_cn.rst b/doc/paddle/api/paddle/fluid/layers/cumsum_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8e6f238b87381651e08b0a0dac4fa441b7605683 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/cumsum_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_fluid_layers_cumsum: + +cumsum +------------------------------- + +.. py:function:: paddle.fluid.layers.cumsum(x,axis=None,exclusive=None,reverse=None) + + +沿给定轴(axis)的元素的累加和。默认结果的第一个元素和输入的第一个元素一致。如果exlusive为True,结果的第一个元素则为0。 + +参数: + - **x** (Variable) - 累加的输入,需要进行累加操作的变量Tensor/LoDTensor。 + - **axis** (int,可选) - 指明需要累加的维。-1代表最后一维。默认为:-1。 + - **exclusive** (bool,可选) - 是否执行exclusive累加。默认为:False。 + - **reverse** (bool,可选) - 若为True,则以相反顺序执行累加。默认为:False。 + +返回:Variable(Tensor)。是累加的结果,即累加器的输出。 + +返回类型:变量(Variable)。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.data(name="input", shape=[32, 784]) + result = fluid.layers.cumsum(data, axis=0) + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/data_cn.rst b/doc/paddle/api/paddle/fluid/layers/data_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..14a6ab6ea1d94dcdc3586417ef9c85db98783c74 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/data_cn.rst @@ -0,0 +1,67 @@ +.. _cn_api_fluid_data: + +data +------------------------------- + + +.. py:function:: paddle.fluid.data(name, shape, dtype='float32', lod_level=0) + + + + +该OP会在全局block中创建变量(Variable),该全局变量可被计算图中的算子(operator)访问。该变量可作为占位符用于数据输入。例如用执行器(Executor)feed数据进该变量 + +注意: + + 不推荐使用 ``paddle.fluid.layers.data`` ,其在之后的版本中会被删除。请使用这个 ``paddle.fluid.data`` 。 + + ``paddle.fluid.layers.data`` 在组网期间会设置创建的变量维度(shape)和数据类型(dtype),但不会检查输入数据的维度和数据类型是否符合要求。 ``paddle.fluid.data`` 会在运行过程中由Executor/ParallelExecutor检查输入数据的维度和数据类型。 + + 如果想输入变长输入,可以使用 ``paddle.fluid.data`` 时将变长维度设为-1,或者直接输入 ``paddle.fluid.layers.data`` 且PaddlePaddle会按具体输入的形状运行。 + + 本API创建的变量默认 ``stop_gradient`` 属性为true,这意味这反向梯度不会被传递过这个数据变量。如果用户想传递反向梯度,可以设置 ``var.stop_gradient = False`` 。 + +参数: + - **name** (str)- 被创建的变量的名字,具体用法请参见 :ref:`api_guide_Name` 。 + - **shape** (list|tuple)- 声明维度信息的list或tuple。 + - **dtype** (np.dtype|VarType|str,可选)- 数据类型,支持bool,float16,float32,float64,int8,int16,int32,int64,uint8。默认值为float32。 + - **lod_level** (int,可选)- LoDTensor变量的LoD level数,LoD level是PaddlePaddle的高级特性,一般任务中不会需要更改此默认值,关于LoD level的详细适用场景和用法请见 :ref:`cn_user_guide_lod_tensor` 。默认值为0。 + +返回:全局变量,可进行数据访问 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # Creates a variable with fixed size [3, 2, 1] + # User can only feed data of the same shape to x + x = fluid.data(name='x', shape=[3, 2, 1], dtype='float32') + + # Creates a variable with changable batch size -1. + # Users can feed data of any batch size into y, + # but size of each data sample has to be [2, 1] + y = fluid.data(name='y', shape=[-1, 2, 1], dtype='float32') + + z = x + y + + # In this example, we will feed x and y with np-ndarry "1" + # and fetch z, like implementing "1 + 1 = 2" in PaddlePaddle + feed_data = np.ones(shape=[3, 2, 1], dtype=np.float32) + + exe = fluid.Executor(fluid.CPUPlace()) + out = exe.run(fluid.default_main_program(), + feed={ + 'x': feed_data, + 'y': feed_data + }, + fetch_list=[z.name]) + + # np-ndarray of shape=[3, 2, 1], dtype=float32, whose elements are 2 + print(out) + + diff --git a/doc/paddle/api/paddle/fluid/layers/data_norm_cn.rst b/doc/paddle/api/paddle/fluid/layers/data_norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c03e0b70f94c54031762574426c57e0bfe681bcd --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/data_norm_cn.rst @@ -0,0 +1,64 @@ +.. _cn_api_fluid_layers_data_norm: + +data_norm +------------------------------- + + +.. py:function:: paddle.fluid.layers.data_norm(input, act=None, epsilon=1e-05, param_attr=None, data_layout='NCHW', in_place=False, name=None, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=False) + + + + +**数据正则化层** + +可用作conv2d和fully_connected操作的正则化函数。 此层所需的数据格式为以下之一: + +1. NHWC [batch, in_height, in_width, in_channels] +2. NCHW [batch, in_channels, in_height, in_width] + +:math:`input` 为一个mini-batch上的特征: + +.. math:: + \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//\ + \ mini-batch\ mean \\ + \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \ + \mu_{\beta})^2 \qquad &//\ mini-batch\ variance \\ + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\ + \sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\ + y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift + +参数: + - **input** (Variable) - 输入变量,它是一个LoDTensor。 + - **act** (string,默认None) - 激活函数类型,线性| relu | prelu | ... + - **epsilon** (float,默认1e-05) - + - **param_attr** (ParamAttr) - 参数比例的参数属性。 + - **data_layout** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **in_place** (bool,默认值False) - 使data_norm的输入和输出复用同一块内存。 + - **name** (string,默认None) - 此层的名称(可选)。 如果设置为None,则将自动命名该层。 + - **moving_mean_name** (string,Default None) - 存储全局Mean的moving_mean的名称。 + - **moving_variance_name** (string,默认None) - 存储全局Variance的moving_variance的名称。 + - **do_model_average_for_mean_and_var** (bool,默认值为false) - 是否为mean和variance进行模型平均。 + - **slot_dim** (int, 默认值为-1) - 一个slot的embedding维度,slot用来表征一类特征的集合,在pslib模式下,通常我们通过slot区分特征id,并从参数服务器(pslib)中提取它们的embedding。embedding的第一维是历史上这个embedding展示的次数。如果本op的输入是由这样的embedding连接而来,那么当这个特征id是新的或空的,则正则化结果可能不实际。为了避免这种情况,我们添加了slot_dim来定位并判断这一维是否为零。如果是的话,我们选择跳过正则化。 + - **summary_decay_rate** (float, 默认值为0.9999999) - 更新summary信息时的衰减率。 + - **sync_stats** (bool, 默认值False) - 在多GPU卡的场景下可以使用,用来同步多卡间的summary信息。 + - **enable_scale_and_shift** (bool, 默认值False) - 在分布式全局正则化后是否做像batchnorm一样做scale&shift的操作。 + +返回: 张量变量,是对输入数据进行正则化后的结果。 + +返回类型: Variable + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + hidden1 = fluid.layers.data(name="hidden1", shape=[200]) + hidden2 = fluid.layers.data_norm(name="hidden2", input=hidden1) + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/deformable_conv_cn.rst b/doc/paddle/api/paddle/fluid/layers/deformable_conv_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7ba3b80ae16346b1bd3a8c90c659dd2e4d4d96a2 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/deformable_conv_cn.rst @@ -0,0 +1,101 @@ +.. _cn_api_fluid_layers_deformable_conv: + +deformable_conv +------------------------------- + + +.. py:function:: paddle.fluid.layers.deformable_conv(input, offset, mask, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, deformable_groups=None, im2col_step=None, param_attr=None, bias_attr=None, modulated=True, name=None) + + + + +**可变形卷积算子** + +deformable_conv op对输入4-D Tensor计算2-D可变形卷积。给定输入Tensor x,输出Tensor y,可变形卷积运算如下所示: + +可形变卷积v2: + + :math:`y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k}` + +可形变卷积v1: + + :math:`y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)}` + +其中 :math:`\Delta p_k` 和 :math:`\Delta m_k` 分别为第k个位置的可学习偏移和调制标量。在deformable_conv_v1中 :math:`\Delta m_k` 为1. + +具体细节可以参考论文:`<> `_ 和 `<> `_ 。 + +**示例** + +输入: + input 形状: :math:`(N, C_{in}, H_{in}, W_{in})` + + 卷积核形状: :math:`(C_{out}, C_{in}, H_f, W_f)` + + offset 形状: :math:`(N, 2 * deformable\_groups * H_f * H_w, H_{in}, W_{in})` + + mask 形状: :math:`(N, deformable\_groups * H_f * H_w, H_{in}, W_{in})` + +输出: + 输出形状: :math:`(N, C_{out}, H_{out}, W_{out})` + +其中 + +.. math:: + + H_{out}&= \frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 + + W_{out}&= \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1 + + +参数: + - **input** (Variable) - 形状为 :math:`[N, C, H, W]` 的输入Tensor,数据类型为float32或float64。 + - **offset** (Variable) – 可变形卷积层的输入坐标偏移,数据类型为float32或float64。 + - **mask** (Variable, 可选) – 可变形卷积层的输入掩码,当使用可变形卷积算子v1时,请将mask设置为None, 数据类型为float32或float64。 + - **num_filters** (int) – 卷积核数,与输出Tensor通道数相同。 + - **filter_size** (int|tuple) – 卷积核大小。如果filter_size为元组,则必须包含两个整数(filter_size_H, filter_size_W)。若数据类型为int,卷积核形状为(filter_size, filter_size)。 + - **stride** (int|tuple) – 步长大小。如果stride为元组,则必须包含两个整数(stride_H, stride_W)。否则stride_H = stride_W = stride。默认值为1。 + - **padding** (int|tuple) – padding大小。如果padding为元组,则必须包含两个整数(padding_H, padding_W)。否则padding_H = padding_W = padding。默认值为0。 + - **dilation** (int|tuple) – dilation大小。如果dilation为元组,则必须包含两个整数(dilation_H, dilation_W)。否则dilation_H = dilation_W = dilation。默认值为1。 + - **groups** (int) – 卷积组数。依据Alex Krizhevsky的Deep CNN论文中的分组卷积,有:当group=2时,前一半卷积核只和前一半输入通道有关,而后一半卷积核只和后一半输入通道有关。缺省值为1。 + - **deformable_groups** (int) – 可变形卷积组数。默认值为1。 + - **im2col_step** (int) – 每个im2col计算的最大图像数。总batch大小应可以被该值整除或小于该值。如果您面临内存问题,可以尝试在此处使用一个较小的值。默认值为64。 + - **param_attr** (ParamAttr,可选) – 可变形卷积的可学习权重的属性。如果将其设置为None或某种ParamAttr,可变形卷积将创建ParamAttr作为param_attr。如果没有设置此param_attr的Initializer,该参数将被Normal(0.0, std)初始化,且其中的std为 :math:`(\frac{2.0 }{filter\_elem\_num})^{0.5}` 。默认值为None。 + - **bias_attr** (ParamAttr|bool,可选) – 可变形卷积层的偏置的参数属性。如果设为False,则输出单元不会加偏置。如果设为None或者某种ParamAttr,conv2d会创建ParamAttr作为bias_attr。如果不设置bias_attr的Initializer,偏置会被初始化为0。默认值为None。 + - **modulated** (bool)- 确定使用v1和v2中的哪个版本,如果为True,则选择使用v2。默认值为True。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:可变形卷积输出的4-D Tensor,数据类型为float32或float64。 + +返回类型:Variable + +抛出异常:ValueError – 如果input, filter_size, stride, padding和groups的大小不匹配。 + +**代码示例** + +.. code-block:: python + + #deformable conv v2: + + import paddle.fluid as fluid + C_in, H_in, W_in = 3, 32, 32 + filter_size, deformable_groups = 3, 1 + data = fluid.layers.data(name='data', shape=[C_in, H_in, W_in], dtype='float32') + offset = fluid.layers.data(name='offset', shape=[2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32') + mask = fluid.layers.data(name='mask', shape=[deformable_groups*filter_size**2, H_in, W_in], dtype='float32') + out = fluid.layers.deformable_conv(input=data, offset=offset, mask=mask, + num_filters=2, filter_size=filter_size, padding=1, modulated=True) + + #deformable conv v1: + + import paddle.fluid as fluid + C_in, H_in, W_in = 3, 32, 32 + filter_size, deformable_groups = 3, 1 + data = fluid.layers.data(name='data', shape=[C_in, H_in, W_in], dtype='float32') + offset = fluid.layers.data(name='offset', shape=[2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32') + out = fluid.layers.deformable_conv(input=data, offset=offset, mask=None, + num_filters=2, filter_size=filter_size, padding=1, modulated=False) + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/deformable_roi_pooling_cn.rst b/doc/paddle/api/paddle/fluid/layers/deformable_roi_pooling_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a1f4aba7203d3d88747f1fd822c4248065484cfa --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/deformable_roi_pooling_cn.rst @@ -0,0 +1,99 @@ +.. _cn_api_fluid_layers_deformable_roi_pooling: + +deformable_roi_pooling +------------------------------- + +.. py:function:: paddle.fluid.layers.deformable_roi_pooling(input, rois, trans, no_trans=False, spatial_scale=1.0, group_size=[1, 1], pooled_height=1, pooled_width=1, part_size=None, sample_per_part=1, trans_std=0.1, position_sensitive=False, name=None) + + + + +可变形感兴趣区域(ROI)池化层 + +该OP对输入进行了可形变的感兴趣区域(ROI)池化操作。如同 `可形变卷积网络 `_ 描述的一样,它将为每个bin中的像素获取一个偏移量,以便于在合适的位置进行池化。在完成可变形感兴趣区域(ROI)池化操作之后,批量数将变为候选框的数量。 + +可变形感兴趣区域(ROI)池化包含三个步骤: + +1、将获取的候选区域按照设定的池化宽度和池化高度划分成相同大小的区域。 + +2、将得到的位置偏移量添加到候选区域的像素来得到新的位置,并且通过双线性插值去获取那些偏移之后位置不为整数的像素的值。 + +3、在每一个bin中去均匀采样一些像素点,获取其中的均值去作为我们的输出。 + + +参数: + - **input** (Variable) - 可变形感兴趣区域(ROI)池化层的输入,输入为数据类型为float32的Tensor。输入张量的形状为[N,C,H,W]。其中N是批量大小,C是输入通道的数量,H是特征的高度,W是特征的宽度。 + - **rois** (Variable)- 将池化的ROIs(感兴趣区域),应为一个形状为(num_rois,4)的2-D LoDTensor,且lod level为1。其中值为[[x1,y1,x2,y2],...],(x1,y1)为左上角坐标,(x2, y2)为右下角坐标。 + - **trans** (Variable)- 池化时ROIs上的特征偏移,输入为数据类型为float32的Tensor。格式为[N,C,H,W],其中N是ROIs的数量,C是通道的数量,指示x和y方向上的偏移距离,H是池化的高度,W是池化的宽度。 + - **no_trans** (bool)- 确定roi池化阶段是否加入偏移以获取新的输出。其中值为bool变量,取True或False。如果为True,则表示不加入偏移。默认为False。 + - **spatial_scale** (float) - 输入特征图的高度(或宽度)与原始图像高度(或宽度)的比率,其中数值的类型为float32,并且等于卷积图层中总步长的倒数,默认为1.0。 + - **group_size** (list|tuple)- 输入通道划分成的组数,输入为list 或者 tuple,其中数值类型为int32(例如,输入通道的数量是k1 * k2 * (C + 1),其中k1和k2是组宽度和高度,C + 1是输出通道的数量。如(4,6)中4是组的高度,6是组的宽度)。默认为[1,1]。 + - **pooled_height** (int)- 池化后输出的高度, 值的类型为int32,默认值:1。 + - **pooled_width** (int)- 池化后输出的宽度, 值的类型为int32, 默认值:1。 + - **part_size** (list|tuple)- 偏移的高度和宽度,如(4,6)代表高度为4、宽度为6,常规是高度和宽度等于pooled_height和pooled_width。默认为None,此时默认值为[pooled_height,pooled_width]。 + - **sample_per_part** (int)- 每个bin中的样本数量,设置值越大,采样结果越精细,但是更加消耗性能。默认为1。 + - **trans_std** (float)- 偏移系数,控制偏移量的大小,默认为0.1。 + - **position_sensitive** (bool)- 是否选择可变形位置敏感型感兴趣区域(PSROI)池化模式,数值类型为bool型。如果为False,输入维度和输出维度相等。如果为True,输入维度等于输出维度乘以pooled_width和pooled_height。默认为False。 + - **name** (str)- 此层的名称,默认为None。 + +返回: 可变形感兴趣区域(ROI)池化的输出,如果position_sensitive为False,输出维度和输出维度相等。如果position_sensitive为True,输出维度等于输入维度除以pooled_width和pooled_height。 + + +返回类型: Variable, 数据类型为float32. + +**代码示例** + +.. code-block:: python + + #position_sensitive=False + + import paddle.fluid as fluid + input = fluid.data(name="input", + shape=[2, 192, 64, 64], + dtype='float32') + rois = fluid.data(name="rois", + shape=[-1, 4], + dtype='float32', + lod_level=1) + trans = fluid.data(name="trans", + shape=[2, 384, 64, 64], + dtype='float32') + x = fluid.layers.deformable_roi_pooling(input=input, + rois=rois, + trans=trans, + no_trans=False, + spatial_scale=1.0, + group_size=(1, 1), + pooled_height=8, + pooled_width=8, + part_size=(8, 8), + sample_per_part=4, + trans_std=0.1, + position_sensitive=False) + + #position_sensitive=True + + import paddle.fluid as fluid + input = fluid.data(name="input", + shape=[2, 192, 64, 64], + dtype='float32') + rois = fluid.data(name="rois", + shape=[-1, 4], + dtype='float32', + lod_level=1) + trans = fluid.data(name="trans", + shape=[2, 384, 64, 64], + dtype='float32') + x = fluid.layers.deformable_roi_pooling(input=input, + rois=rois, + trans=trans, + no_trans=False, + spatial_scale=1.0, + group_size=(1, 1), + pooled_height=8, + pooled_width=8, + part_size=(8, 8), + sample_per_part=4, + trans_std=0.1, + position_sensitive=True) + diff --git a/doc/paddle/api/paddle/fluid/layers/density_prior_box_cn.rst b/doc/paddle/api/paddle/fluid/layers/density_prior_box_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e6ca219728c3cc23cdb69f552a36737a7a078039 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/density_prior_box_cn.rst @@ -0,0 +1,62 @@ +.. _cn_api_fluid_layers_density_prior_box: + +density_prior_box +------------------------------- + +.. py:function:: paddle.fluid.layers.density_prior_box(input, image, densities=None, fixed_sizes=None, fixed_ratios=None, variance=[0.1, 0.1, 0.2, 0.2], clip=False, steps=[0.0, 0.0], offset=0.5, flatten_to_2d=False, name=None) + + + + + +该OP为SSD算法(Single Shot MultiBox Detector)生成density prior box,在每个 ``input`` 的位置产生N个候选框,其中,N由 ``densities`` , ``fixed_sizes`` 和 ``fixed_ratios`` 来计算。生成的每个输入位置附近的候选框中心(网格点)由 ``densities`` 和 ``density prior box`` 的数量计算,其中 ``density prior box`` 的数量由 ``fixed_sizes`` 和 ``fixed_ratios`` 决定。``fixed_sizes`` 和 ``densities`` 的大小一致。 + +.. math:: + + N\_density\_prior\_box =sum(N\_fixed\_ratios * {densities\_i}^2) + + +参数: + - **input** (Variable) - 形状为NCHW的4-D Tensor,数据类型为float32或float64。 + - **image** (Variable) - 输入图像,形状为NCHW的4-D Tensor,数据类型为float32或float64。 + - **densities** (list|tuple|None) - 生成的density prior boxes的densities,此属性应该是一个整数列表或数组。默认值为None。 + - **fixed_sizes** (list|tuple|None) - 生成的density prior boxes的大小,此属性应该为和 :attr:`densities` 有同样长度的列表或数组。默认值为None。 + - **fixed_ratios** (list|tuple|None) - 生成的density prior boxes的比值,如果该属性未被设置,同时 :attr:`densities` 和 :attr:`fix_sizes` 被设置,则 :attr:`aspect_ratios` 被用于生成 density prior boxes + - **variance** (list|tuple) - 将被用于density prior boxes编码的方差,默认值为:[0.1, 0.1, 0.2, 0.2] + - **clip** (bool) - 是否裁剪超出范围的box。默认值:False + - **step** (list|tuple) - Prior boxes在宽度和高度的步长,如果step[0]等于0.0或step[1]等于0.0, input的the density prior boxes的高度/宽度的步长将被自动计算。默认值:Default: [0., 0.] + - **offset** (float) - Prior boxes中心偏移值,默认为:0.5 + - **flatten_to_2d** (bool) - 是否将output prior boxes和方差 ``flatten`` 至2-D,其中第二个dim为4。默认值:False + - **name** (str|None) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。默认值为None。 + + +返回:含有两个变量的元组,包括: + 候选框: + + 当flatten_to_2d为False时,形状为[H, W, num_priors, 4]的4-D Tensor。 + 当flatten_to_2d为True时,形式为[H * W * num_priors, 4]的 2-D Tensor。 + 其中,H是输入的高度,W是输入的宽度,num_priors是输入中每个位置的候选框数。 + + 候选框的方差: + + 当flatten_to_2d为False时,形状为[H, W, num_priors, 4]的4-D Tensor。 + 当flatten_to_2d为True时,形式为[H * W * num_priors, 4]的2-D Tensor。 + 其中,H是输入的高度,W是输入的宽度,num_priors是输入中每个位置的候选框数。 + +返回类型:元组 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + input = fluid.layers.data(name="input", shape=[3,6,9]) + images = fluid.layers.data(name="images", shape=[3,9,12]) + box, var = fluid.layers.density_prior_box( + input=input, + image=images, + densities=[4, 2, 1], + fixed_sizes=[32.0, 64.0, 128.0], + fixed_ratios=[1.], + clip=True, + flatten_to_2d=True) diff --git a/doc/paddle/api/paddle/fluid/layers/detection_output_cn.rst b/doc/paddle/api/paddle/fluid/layers/detection_output_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..52f4657f727cc3d5e7c8a84a9071f3fa7f2596d4 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/detection_output_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_fluid_layers_detection_output: + +detection_output +------------------------------- + +.. py:function:: paddle.fluid.layers.detection_output(loc, scores, prior_box, prior_box_var, background_label=0, nms_threshold=0.3, nms_top_k=400, keep_top_k=200, score_threshold=0.01, nms_eta=1.0) + + + + +给定回归位置偏移、置信度以及先验框信息计算检测的输出,执行步骤如下: + + 1.根据先验框(``prior_box``)信息和回归位置偏移解码出预测框坐标。 + + 2.通过多类非极大值抑制(NMS)获得最终检测输出。 + +请注意,该操作符没有将最终输出边界框clip至图像大小。 + +参数: + - **loc** (Variable) - 3-D Tensor,数据类型为float32或float64,表示回归位置偏移。维度为[N,M,4],M是输入的预测bounding box的个数,N是batch size,每个bounding box有四个坐标值,格式为[xmin,ymin,xmax,ymax],[xmin,ymin]是左上角坐标,[xmax,ymax]是右下角坐标。 + - **scores** (Variable) - 3-D Tensor,数据类型为float32或float64,表示未归一化的置信度。维度为[N,M,C],N和M的含义同上,C是类别数。 + - **prior_box** (Variable) - 2-D Tensor,表示先验框。维度为[M,4],M是提取的先验框个数,格式为[xmin,ymin,xmax,ymax]。 + - **prior_box_var** (Variable) - 2-D Tensor,表示先验框的方差,和 ``prior_box`` 维度相同。 + - **background_label** (int) - 背景标签类别值,背景标签类别上不做NMS。若设为-1,将考虑所有类别。默认值是0。 + - **nms_threshold** (float) - 用于NMS的阈值(threshold),默认值是0.3。 + - **nms_top_k** (int) - 基于score_threshold过滤预测框后,NMS操作前,要挑选出的置信度高的预测框的个数。默认值是400。 + - **keep_top_k** (int) - NMS操作后,要挑选的bounding box总数。默认值是200。 + - **score_threshold** (float) - 置信度得分阈值(Threshold),在NMS之前用来过滤低置信数的边界框(bounding box)。若未提供,则考虑所有框。默认值是0.001。 + - **nms_eta** (float) - 一种adaptive NMS的参数,仅当该值小于1.0时才起作用。默认值是1.0。 + +返回: + 输出是2-D LoDTensor,形状为[No,6]。每行有6个值:[label,confidence,xmin,ymin,xmax,ymax]。No是该mini-batch总的检测框数。LoD的层级数为1,如果采用偏移的LoD表示,则第i个图像有 ``LoD[i+1] - LoD[i]`` 个检测结果,如果等于0,则表示无检测结果。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32') + pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32') + loc = fluid.data(name='target_box', shape=[2, 21, 4], dtype='float32') + scores = fluid.data(name='scores', shape=[2, 21, 10], dtype='float32') + nmsed_outs = fluid.layers.detection_output(scores=scores, + loc=loc, + prior_box=pb, + prior_box_var=pbv) + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/diag_cn.rst b/doc/paddle/api/paddle/fluid/layers/diag_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..72b7b68d507e49bd0ac9c123a50998a01f30e298 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/diag_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_fluid_layers_diag: + +diag +------------------------------- + +.. py:function:: paddle.fluid.layers.diag(diagonal) + + + + +该OP创建一个方阵,使用输入diagonal来指定方阵的对角线元素的值。 + +参数: + - **diagonal** (Variable|numpy.ndarray) — 数据shape为 :math:`[N]` 一维Tensor,会把该Tensor的元素赋在方阵的对角线上。数据类型可以是 float32,float64,int32,int64。 + +返回:存储着方阵的Tensor,对角线值是输入Tensor diagonal的值, 数据shape为 :math:`[N, N]` 二维Tensor。 + +返回类型:Variable,数据类型和输入数据类型一致。 + +**代码示例**: + +.. code-block:: python + + # [3, 0, 0] + # [0, 4, 0] + # [0, 0, 5] + + import paddle.fluid as fluid + import numpy as np + diagonal = np.arange(3, 6, dtype='int32') + data = fluid.layers.diag(diagonal) + # diagonal.shape=(3,) data.shape=(3, 3) + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/dice_loss_cn.rst b/doc/paddle/api/paddle/fluid/layers/dice_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..abb88a15db700b911d6a4d1cdac9e00deca44f9a --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/dice_loss_cn.rst @@ -0,0 +1,46 @@ +.. _cn_api_fluid_layers_dice_loss: + +dice_loss +------------------------------- + +.. py:function:: paddle.fluid.layers.dice_loss(input, label, epsilon=1e-05) + + + + +该OP用来比较预测结果跟标签之间的相似度,通常用于二值图像分割,即标签为二值,也可以做多标签的分割。 + +dice_loss定义为: + +.. math:: + dice\_loss &= 1- \frac{2 * intersection\_area}{total\_rea}\\ + &= \frac{(total\_area−intersection\_area)−intersection\_area}{total\_area}\\ + &= \frac{union\_area−intersection\_area}{total\_area} + +参数: + - **input** (Variable) - 分类的预测概率,秩大于等于2的多维Tensor,维度为 :math:`[N_1, N_2, ..., N_k, D]` 。第一个维度的大小是batch_size,最后一维的大小D是类别数目。数据类型是float32或者float64 + - **label** (Variable)- 正确的标注数据(groud truth),与输入 ``input`` 的秩相同的Tensor,维度为 :math:`[N_1, N_2, ..., N_k, 1]` 。第一个维度的大小是batch_size,最后一个维度的大小是1。数据类型为int32或者int64 + - **epsilon** (float,可选) - 将会加到分子和分母上的数,浮点型的数值。如果输入和标签都为空,则确保dice为1。默认值:0.00001 + +返回: 按上述公式计算出来的损失函数的结果所表示的Tensor,shape为[batch_size, 1],数据类型与 ``input`` 相同 + +返回类型: Variable + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.data(name='data', shape = [3, 224, 224, 2], dtype='float32') + label = fluid.layers.data(name='label', shape=[3, 224, 224, 1], dtype='float32') + predictions = fluid.layers.softmax(x) + loss = fluid.layers.dice_loss(input=predictions, label=label) + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/distribute_fpn_proposals_cn.rst b/doc/paddle/api/paddle/fluid/layers/distribute_fpn_proposals_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fbe393d8d1a4d100e8ae5982e063536353570118 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/distribute_fpn_proposals_cn.rst @@ -0,0 +1,49 @@ +.. _cn_api_fluid_layers_distribute_fpn_proposals: + +distribute_fpn_proposals +------------------------------- + +.. py:function:: paddle.fluid.layers.distribute_fpn_proposals(fpn_rois, min_level, max_level, refer_level, refer_scale, name=None) + + + + +**该op仅支持LoDTensor输入**。在 Feature Pyramid Networks(FPN)模型中,需要依据proposal的尺度和参考尺度与级别将所有proposal分配到不同的FPN级别中。 此外,为了恢复proposals的顺序,我们返回一个数组,该数组表示当前proposals中的原始RoIs索引。 要计算每个RoI的FPN级别,公式如下: + +.. math:: + roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}\\ + level = floor(&\log(\frac{roi\_scale}{refer\_scale}) + refer\_level) + +其中BBoxArea方法用来计算每个RoI的区域。 + + +参数: + - **fpn_rois** (Variable) - 维度为[N,4]的2-D LoDTensor,其中N为检测框的个数,数据类型为float32或float64。 + - **min_level** (int32) - 产生proposal最低级别FPN层。 + - **max_level** (int32) - 产生proposal最高级别FPN层。 + - **refer_level** (int32) - 具有指定比例的FPN层的引用级别。 + - **refer_scale** (int32) - 具有指定级别的FPN层的引用比例。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: + + - multi_rois(List)- 长度为(max_level-min_level+1)的列表,其中元素为Variable,维度为[M, 4]的2-D LoDTensor,M为每个级别proposal的个数,数据类型为float32或float64。表示每个FPN级别包含的proposals。 + - restore_ind(Variable)- 维度为[N,1]的Tensor,N是总rois的数量。数据类型为int32。 它用于恢复fpn_rois的顺序。 + + +返回类型:Tuple + + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + fpn_rois = fluid.data( + name='data', shape=[None, 4], dtype='float32', lod_level=1) + multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals( + fpn_rois=fpn_rois, + min_level=2, + max_level=5, + refer_level=4, + refer_scale=224) diff --git a/doc/paddle/api/paddle/fluid/layers/double_buffer_cn.rst b/doc/paddle/api/paddle/fluid/layers/double_buffer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f9fe3b110ab2db024599bf4be1687a15f4c1006c --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/double_buffer_cn.rst @@ -0,0 +1,48 @@ +.. _cn_api_fluid_layers_double_buffer: + +double_buffer +------------------------------- + +.. py:function:: paddle.fluid.layers.double_buffer(reader, place=None, name=None) + + + + + +生成一个双缓冲队列Reader。Reader类有DecoratedReader和FileReader,其中DecoratedReader又可以细分成CustomReader和BufferedReader。这里是基于BufferedReader,数据将复制到具有双缓冲队列的位置(由place指定),如果 ``place=None`` 则将使用executor执行的位置。 + +参数: + - **reader** (Variable) – 需要wrap的reader变量Reader。 + - **place** (Place,可选) – 目标数据的位置,比如CPU,GPU,GPU需要指明是哪张卡。默认是executor执行样本的位置。 + - **name** (str,可选) – 变量的名字。该参数供开发人员打印调试信息时使用,具体用法参见 :ref:`api_guide_Name`,默认值为None。 + + + +返回:Variable(Reader)。双缓冲队列的reader。 + +返回类型:变量(Variable)。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + reader = fluid.layers.py_reader(capacity=64, + shapes=[(-1, 1, 28, 28), (-1, 1)], + dtypes=['float32', 'int64'], + use_double_buffer=False) + reader = fluid.layers.double_buffer(reader) + image, label = fluid.layers.read_file(reader) + + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/dropout_cn.rst b/doc/paddle/api/paddle/fluid/layers/dropout_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bdf27eab6204372123acf0dda001b35fac79cb91 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/dropout_cn.rst @@ -0,0 +1,61 @@ +.. _cn_api_fluid_layers_dropout: + +dropout +------------------------------- + +.. py:function:: paddle.fluid.layers.dropout(x,dropout_prob,is_test=False,seed=None,name=None,dropout_implementation='downgrade_in_infer') + + + + +dropout操作 + +丢弃或者保持x的每个元素独立。Dropout是一种正则化手段,通过在训练过程中阻止神经元节点间的相关性来减少过拟合。根据给定的丢弃概率,dropout操作符按丢弃概率随机将一些神经元输出设置为0,其他的仍保持不变。 + +dropout op可以从Program中删除,提高执行效率。 + +参数: + - **x** (Variable) - 输入,多维Tensor。数据类型:float32和float64。 + - **dropout_prob** (float32) - 输入单元的丢弃概率,即输入单元设置为0的概率。 + - **is_test** (bool) - 标记是否是测试阶段。默认:False。 + - **seed** (int) - 整型数据,用于创建随机种子。如果该参数设为None,则使用随机种子。注:如果给定一个整型种子,始终丢弃相同的输出单元。训练过程中勿用固定不变的种子。 + - **name** (str|None) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 + - **dropout_implementation** (str) - 丢弃单元的方式,有两种'downgrade_in_infer'和'upscale_in_train'两种选择,默认:'downgrade_in_infer'。具体作用可以参考一下描述。 + + 1. downgrade_in_infer(default), 在预测时减小输出结果 + + - train: out = input * mask + + - inference: out = input * (1.0 - dropout_prob) + + (mask是一个张量,维度和输入维度相同,值为0或1,值为0的比例即为 ``dropout_prob`` ) + + 2. upscale_in_train, 增加训练时的结果 + + - train: out = input * mask / ( 1.0 - dropout_prob ) + + - inference: out = input + + (mask是一个张量,维度和输入维度相同,值为0或1,值为0的比例即为 ``dropout_prob`` ) + +dropout操作符可以从程序中移除,使程序变得高效。 + +返回:Tensor。经过丢弃部分数据之后的结果,与输入X形状相同的张量。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + x = fluid.layers.data(name="x", shape=[32, 32], dtype="float32") + droped = fluid.layers.dropout(x, dropout_prob=0.5) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + np_x = np.random.random(size=(32, 32)).astype('float32') + output = exe.run(feed={"x": np_x}, fetch_list = [droped]) + print(output) + diff --git a/doc/paddle/api/paddle/fluid/layers/dynamic_decode_cn.rst b/doc/paddle/api/paddle/fluid/layers/dynamic_decode_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a629ec588c9ce77d33a1a59ddfe7263175c5680d --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/dynamic_decode_cn.rst @@ -0,0 +1,57 @@ +.. _cn_api_fluid_layers_dynamic_decode: + +dynamic_decode +------------------------------- + + + +.. py:method:: dynamic_decode(decoder, inits=None, max_step_num=None, output_time_major=False, impute_finished=False, is_test=False, return_length=False, **kwargs): + + + + +该接口重复执行 :code:`decoder.step()` 直到 其返回的表示完成状态的Tensor中的值全部为True或解码步骤达到 :code:`max_step_num`。 + +:code:`decode.initialize()` 会在解码循环之前被调用一次。如果 :code:`decoder` 实现了 :code:`finalize` 方法,则 :code:`decoder.finalize()` 在解码循环后将被调用一次。 + +参数: + - **decoder** (Decoder) - 解码器的实例。 + - **inits** (object,可选) - 传递给 :code:`decoder.initialize` 的参数。默认为None。 + - **max_step_num** (int,可选) - 最大步数。如果未提供,解码直到解码过程完成( :code:`decode.step()` 返回的表示完成状态的Tensor中的值全部为True)。默认为None。 + - **output_time_major** (bool,可选) - 指明最终输出(此方法的第一个返回值)中包含的Tensor的数据布局。如果为False,其将使用batch优先的数据布局, 此时的形状为 :math:`[batch\_size,seq\_len,...]`。如果为True,其将使用time优先的数据布局,此时的形状为 :math:`[seq\_len,batch\_size,...]`。默认值为False。 + - **impute_finished** (bool,可选) - 若为True,对于当前批次中完成状态为结束的样本,将会拷贝其上一步的状态,而非像未结束的实例那样使用 :code:`decode.step()` 返回的 :code:`next_states` 作为新的状态,这保证了返回的最终状态 :code:`final_states` 是正确的;否则,不会区分是否结束,也没有这个拷贝操作。若 :code:`final_states` 会被使用,则这里应该设置为True,这会一定程度上影响速度。默认为False。 + - **is_test** (bool,可选) - 标识是否是预测模式,预测模式下内存占用会更少。默认为False。 + - **return_length** (bool,可选) - 标识是否在返回的元组中额外包含一个存放了所有解码序列实际长度的Tensor。默认为False。 + - **kwargs** - 其他命名关键字参数。这些参数将传递给 :code:`decoder.step`。 + +返回:若 :code:`return_length` 为True,则返回三元组 :code:`(final_outputs, final_states, sequence_lengths)` ,否则返回二元组 :code:`(final_outputs, final_states)` 。 :code:`final_outputs, final_states` 包含了最终的输出和状态,这两者都是Tensor或Tensor的嵌套结构。:code:`final_outputs` 具有与 :code:`decoder.step()` 返回的 :code:`outputs` 相同的结构和数据类型, 且其中的每个tensor都是将所有解码步中与其对应的的输出进行堆叠的结果;如果 :code:`decoder` 实现了 :code:`finalize` 方法,这些tensor也可能会通过 :code:`decoder.finalize()` 进行修改。:code:`final_states` 是最后时间步的状态,和 :code:`decoder.initialize()` 返回的初始状态具有相同的结构,形状和数据类型。:code:`sequence_lengths` 是int64类型的tensor,和 :code:`decoder.initialize()` 返回的 :code:`finished` 具有相同的形状,其保存了所有解码序列实际长度。 + +返回类型:tuple + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + from paddle.fluid.layers import GRUCell, BeamSearchDecoder, dynamic_decode + encoder_output = fluid.data(name="encoder_output", + shape=[-1, 32, 128], + dtype="float32") + trg_embeder = lambda x: fluid.embedding( + x, size=[10000, 128], param_attr=fluid.ParamAttr(name="trg_embedding")) + output_layer = lambda x: layers.fc(x, + size=10000, + num_flatten_dims=len(x.shape) - 1, + param_attr=fluid.ParamAttr(name= + "output_w"), + bias_attr=False) + decoder_cell = GRUCell(hidden_size=128) + decoder = BeamSearchDecoder(decoder_cell, + start_token=0, + end_token=1, + beam_size=4, + embedding_fn=trg_embeder, + output_fn=output_layer) + outputs = dynamic_decode( + decoder=decoder, inits=decoder_cell.get_initial_states(encoder_output)) diff --git a/doc/paddle/api/paddle/fluid/layers/dynamic_gru_cn.rst b/doc/paddle/api/paddle/fluid/layers/dynamic_gru_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ac3565e6dff163451ea5eb85bdcb28fd23da157e --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/dynamic_gru_cn.rst @@ -0,0 +1,90 @@ +.. _cn_api_fluid_layers_dynamic_gru: + +dynamic_gru +------------------------------- + + +.. py:function:: paddle.fluid.layers.dynamic_gru(input, size, param_attr=None, bias_attr=None, is_reverse=False, gate_activation='sigmoid', candidate_activation='tanh', h_0=None, origin_mode=False) + + + + + +**注意:该OP的输入只能是LoDTensor,如果您需要处理的输入是Tensor类型,请使用StaticRNN(fluid.layers.** :ref:`cn_api_fluid_layers_StaticRNN` **)。** + +该OP用于在完整序列上逐个时间步的进行单层Gated Recurrent Unit(GRU)的计算,单个时间步内GRU的计算支持以下两种计算方式: + +如果origin_mode为True,则使用的运算公式来自论文 +`Learning Phrase Representations using RNN Encoder Decoder for Statistical Machine Translation `_ 。 + +.. math:: + u_t & = act_g(W_{ux}x_{t} + W_{uh}h_{t-1} + b_u)\\ + r_t & = act_g(W_{rx}x_{t} + W_{rh}h_{t-1} + b_r)\\ + \tilde{h_t} & = act_c(W_{cx}x_{t} + W_{ch}(r_t \odot h_{t-1}) + b_c)\\ + h_t & = u_t \odot h_{t-1} + (1-u_t) \odot \tilde{h_t} + + +如果origin_mode为False,则使用的运算公式来自论文 +`Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling `_ 。 + +公式如下: + +.. math:: + u_t & = act_g(W_{ux}x_{t} + W_{uh}h_{t-1} + b_u)\\ + r_t & = act_g(W_{rx}x_{t} + W_{rh}h_{t-1} + b_r)\\ + \tilde{h_t} & = act_c(W_{cx}x_{t} + W_{ch}(r_t \odot h_{t-1}) + b_c)\\ + h_t & = (1-u_t) \odot h_{t-1} + u_t \odot \tilde{h_t} + + +其中, :math:`x_t` 为当前时间步的输入,这个输入并非 ``input``,该OP不包含 :math:`W_{ux}x_{t}, W_{rx}x_{t}, W_{cx}x_{t}` 的计算, **注意** 要在该OP前使用大小为 ``size`` 的3倍的全连接层并将其输出作为 ``input``; +:math:`h_{t-1}` 为前一时间步的隐状态 ``hidden``; :math:`u_t` 、 :math:`r_t` 、 :math:`\tilde{h_t}` 和 :math:`h_t` 分别代表了GRU单元中update gate(更新门)、reset gate(重置门)、candidate hidden(候选隐状态)和隐状态输出; :math:`\odot` 为逐个元素相乘; +:math:`W_{uh}, b_u` 、 :math:`W_{rh}, b_r` 和 :math:`W_{ch}, b_c` 分别代表更新门、重置门和候选隐状态在计算时使用的权重矩阵和偏置。在实现上,三个权重矩阵合并为一个 :math:`[D, D \times 3]` 形状的Tensor存放,三个偏置拼接为一个 :math:`[1, D \times 3]` 形状的Tensor存放,其中 :math:`D` 为隐单元的数目;权重Tensor存放布局为: :math:`W_{uh}` 和 :math:`W_{rh}` 拼接为 :math:`[D, D \times 2]` 形状位于前半部分,:math:`W_{ch}` 以 :math:`[D, D]` 形状位于后半部分。 + + +参数: + - **input** (Variable) – LoD level为1的LoDTensor,表示经线性变换后的序列输入,形状为 :math:`[T, D \times 3]` ,其中 :math:`T` 表示mini-batch中所有序列长度之和, :math:`D` 为隐状态特征维度的大小。数据类型为float32或float64。 + - **size** (int) – 隐状态特征维度的大小 + - **param_attr** (ParamAttr,可选) – 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **is_reverse** (bool,可选) – 指明是否按照和输入相反的序列顺序计算,默认为False。 + - **gate_activation** (str,可选) – 公式中 :math:`act_g` 激活函数的类型。支持identity、sigmoid、tanh、relu四种激活函数类型,默认为sigmoid。 + - **candidate_activation** (str,可选) – 公式中 :math:`act_c` 激活函数的类型。支持identity、sigmoid、tanh、relu四种激活函数类型,默认为tanh。 + - **h_0** (Variable,可选) – 表示初始隐状态的Tensor,若未提供,则默认为0。其形状为 :math:`[N, D]` , 其中 :math:`N` 为输入mini-batch中序列的数目, :math:`D` 为隐状态特征维度的大小。数据类型与 ``input`` 相同。默认值为None。 + - **origin_mode** (bool,可选) – 指明要使用的GRU计算方式,两种计算方式具体差异见公式描述,默认值为False。 + +返回: 形状为 :math:`[T, D]` 、LoD level为1的LoDTensor,其中 :math:`T` 表示mini-batch中所有序列长度之和, :math:`D` 为隐状态特征维度的大小。表示经过GRU变换的输出特征序列,和 ``input`` 具有相同的LoD(序列长度)和数据类型。 + +返回类型: Variable + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + dict_dim, emb_dim = 128, 64 + data = fluid.data(name='sequence', + shape=[None], + dtype='int64', + lod_level=1) + emb = fluid.embedding(input=data, size=[dict_dim, emb_dim]) + hidden_dim = 512 + x = fluid.layers.fc(input=emb, size=hidden_dim * 3) + hidden = fluid.layers.dynamic_gru(input=x, size=hidden_dim) + + + + + + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/dynamic_lstm_cn.rst b/doc/paddle/api/paddle/fluid/layers/dynamic_lstm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..711083ebf6396b06e66db8e3216621b13974d2dd --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/dynamic_lstm_cn.rst @@ -0,0 +1,99 @@ +.. _cn_api_fluid_layers_dynamic_lstm: + +dynamic_lstm +------------------------------- + + +.. py:function:: paddle.fluid.layers.dynamic_lstm(input, size, h_0=None, c_0=None, param_attr=None, bias_attr=None, use_peepholes=True, is_reverse=False, gate_activation='sigmoid', cell_activation='tanh', candidate_activation='tanh', dtype='float32', name=None) + + + + +该OP实现了 LSTM,即 Long-Short Term Memory(长短期记忆)运算 - `Hochreiter, S., & Schmidhuber, J. (1997) `_。 + +.. note:: + - 该OP仅支持 LoDTensor 作为输入,如果您需要处理的是Tensor,请使用 :ref:`cn_api_fluid_layers_lstm` 。 + - 在实现的时候为了提升效率,用户必须将LSTM的输入先进行线性映射,将维度为 [T, hidden_size] 的输入映射为 [T, 4 × hidden_size] 输入,然后再传给该OP。 + +该OP的默认实现方式为 diagonal/peephole 连接,参见 `Gers, F. A., & Schmidhuber, J. (2000) `_。 +如果需要禁用 peephole 连接方法,将 use_peepholes 设为 False 即可。 + +该OP对于序列中每一个时间步的计算公式如下: + +.. math:: + i_t=\sigma (W_{ix}x_{t}+W_{ih}h_{t-1}+W_{ic}c_{t-1}+b_i) +.. math:: + f_t=\sigma (W_{fx}x_{t}+W_{fh}h_{t-1}+W_{fc}c_{t-1}+b_f) +.. math:: + o_t=\sigma (W_{ox}x_{t}+W_{oh}h_{t-1}+W_{oc}c_{t-1}+b_o) +.. math:: + \widetilde{c_t}=act_g(W_{ct}x_{t}+W_{ch}h_{t-1}+b_{c}) +.. math:: + c_t=f_t\odot c_{t-1}+i_t\odot \widetilde{c_t} +.. math:: + h_t=o_t\odot act_h(c_t) + +公式中的概念信息如下: + - :math:`x_{t}` 表示时间步 :math:`t` 的输入 + - :math:`h_{t}` 表示时间步 :math:`t` 的 hidden 状态 + - :math:`h_{t-1}, c_{t-1}` 分别表示前一个时间步的 hidden 和 cell 状态 + - :math:`\widetilde{c_t}` 表示候选的 cell 状态 + - :math:`i_t` ,:math:`f_t` 和 :math:`o_t` 分别为 input gate,forget gate,output gate + - :math:`W` 表示 weight (例如, :math:`W_{ix}` 是在计算 input gate :math:`i_t` 时,对输入 :math:`x_{t}` 做线性变换的 weight) + - :math:`b` 表示 bias (例如, :math:`b_{i}` 是 input gate 的 bias) + - :math:`\sigma` 表示 gate 的非线性激活函数,默认为 sigmoid + - :math:`act_g, act_h` 分别表示 cell 输入和 cell 输出的非线性激活函数,默认为 tanh + - :math:`\odot` 表示矩阵的 Hadamard product,即对两个维度相同的矩阵,将相同位置的元素相乘,得到另一个维度相同的矩阵 + +参数: + - **input** ( :ref:`api_guide_Variable` ) 维度为 :math:`[T, 4*hidden\_size]` 的多维 LoDTensor(必须在传入该OP前对维度为 :math:`[T, hidden\_size]` 的输入经过线性变换得到),其中 T 为 batch 中所有样本的长度之和,hidden_size 为隐层大小,数据类型为 float32 或者 float64。 + - **size** (int) – 必须为 4*hidden_size。 + - **h_0** ( :ref:`api_guide_Variable` ,可选) 维度为 :math:`[batch\_size, hidden\_size]` 的多维 Tensor,其中 hidden_size 为隐层大小,数据类型为 float32 或者 float64。如果为 None,该OP会自动设置为全0的向量。默认值为None。 + - **c_0** ( :ref:`api_guide_Variable` ,可选) 维度为 :math:`[batch\_size, hidden\_size]` 的多维 Tensor,其中 hidden_size 为隐层大小,数据类型为 float32 或者 float64。如果为 None,该OP会自动设置为全0的向量;:math:`h_0, c_0` 如果要设置为None,必须同时为None。默认值为None。 + - **param_attr** (ParamAttr,可选) – 指定权重参数属性的对象。如果为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。如果用户需要设置此属性,维度必须等于 :math:`[hidden\_size, 4*hidden\_size]`。默认值为None。 + - **bias_attr** (ParamAttr,可选) – 指定偏置参数属性的对象。如果为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。如果用户需要设置此属性,如果 use_peepholes=true,维度需为 :math:`[1, 4*hidden\_size]`, use_peepholes=true,维度需为 :math:`[1, 7*hidden\_size]`。默认值为None。 + - **use_peepholes** (bool,可选) – 是否使用 peephole 连接。默认值为True。 + - **is_reverse** (bool,可选) – 是否将输入的数据根据根据样本长度进行逆序,同时会将输出进行逆序,用户拿到结果之后,不需要再逆序。默认值为False。 + - **gate_activation** (str,可选) – 应用于input gate,forget gate, output gate 的激活函数。默认值为sigmoid。 + - **cell_activation** (str,可选) – 用于cell输入的激活函数。默认值为tanh。 + - **candidate_activation** (str,可选) – 用于cell输出的激活函数。默认值为tanh。 + - **dtype** (str,可选) – 数据类型为 float32 或者 float64。默认值为 float32。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 + +返回:经过lstm运算输出的 hidden 和 cell 的状态的tuple,包括 + +- hidden:LSTM hidden的输出结果,维度为 :math:`[T, hidden\_size]` 的LoDTensor,且LoD保持与输入一致,数据类型与input一致。 +- cell:LSTM cell的输出结果,维度为 :math:`[T, hidden\_size]` 的LoDTensor,且LoD保持与输入一致,数据类型与input一致。 + +返回类型: tuple( :ref:`api_guide_Variable` , :ref:`api_guide_Variable` ) + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + emb_dim = 256 + vocab_size = 10000 + hidden_dim = 512 + + data = fluid.layers.data(name='x', shape=[1], dtype='int32', lod_level=1) + emb = fluid.layers.embedding(input=data, size=[vocab_size, emb_dim], is_sparse=True) + + forward_proj = fluid.layers.fc(input=emb, size=hidden_dim * 4, bias_attr=False) + forward, cell = fluid.layers.dynamic_lstm(input=forward_proj, size=hidden_dim * 4, use_peepholes=False) + forward.shape # (-1, 512) + cell.shape # (-1, 512) + + + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/dynamic_lstmp_cn.rst b/doc/paddle/api/paddle/fluid/layers/dynamic_lstmp_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a490ebefca6d2e01ced765c304a29be5f563fff0 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/dynamic_lstmp_cn.rst @@ -0,0 +1,127 @@ +.. _cn_api_fluid_layers_dynamic_lstmp: + +dynamic_lstmp +------------------------------- + +.. py:function:: paddle.fluid.layers.dynamic_lstmp(input, size, proj_size, param_attr=None, bias_attr=None, use_peepholes=True, is_reverse=False, gate_activation='sigmoid', cell_activation='tanh', candidate_activation='tanh', proj_activation='tanh', dtype='float32', name=None, h_0=None, c_0=None, cell_clip=None, proj_clip=None) + + + + +.. note:: + 在实现的时候为了提升效率,用户必须将输入先进行线性映射,将维度为 [T, hidden_size] 的输入映射为 [T, 4×hidden_size] 输入,然后再传给该OP。 + +该OP实现了LSTMP(LSTM Projected)层。LSTMP层在LSTM层之后有一个单独的的线性映射层。 -- `Sak, H., Senior, A., & Beaufays, F. (2014) `_。 + +与标准的LSTM层相比,LSTMP多出来的线性映射层,用于从原始隐藏状态 :math:`h_t` 映射到较低维的状态 :math:`r_t`, +从而减少参数总数和计算复杂度,特别是输出单元相对较大的情况下。 + +该OP的默认实现方式为 diagonal/peephole 连接,参见 `Gers, F. A., & Schmidhuber, J. (2000) `_。 +如果需要禁用 peephole 连接方法,将 use_peepholes 设为 False 即可。 + +该OP对于序列中每一个时间步的计算公式如下: + +.. math:: + i_t = \sigma(W_{ix}x_{t} + W_{ir}r_{t-1} + W_{ic}c_{t-1} + b_i) +.. math:: + f_t = \sigma(W_{fx}x_{t} + W_{fr}r_{t-1} + W_{fc}c_{t-1} + b_f) +.. math:: + o_t = \sigma(W_{ox}x_{t} + W_{or}r_{t-1} + W_{oc}c_{t-1} + b_o) +.. math:: + \widetilde{c_t} = act_g(W_{cx}x_t + W_{cr}r_{t-1} + b_c) +.. math:: + c_t = f_t \odot c_{t-1} + i_t \odot \widetilde{c_t} +.. math:: + h_t = o_t \odot act_h(c_t) +.. math:: + r_t = \overline{act_h}(W_{rh}h_t) + + +公式中的概念信息如下: + - :math:`x_{t}` 表示时间步 :math:`t` 的输入 + - :math:`h_{t}` 表示时间步 :math:`t` 的 hidden 状态 + - :math:`r_{t}` : 隐藏状态循环的映射输出的状态 + - :math:`h_{t-1}, c_{t-1}, r_{t-1}` 分别表示前一个时间步的 hidden 状态,cell 状态和循环映射输出状态 + - :math:`\widetilde{c_t}` 表示候选的 cell 状态 + - :math:`i_t` ,:math:`f_t` 和 :math:`o_t` 分别为 input gate,forget gate,output gate + - :math:`W` 表示 weight (例如, :math:`W_{ix}` 是在计算 input gate :math:`i_t` 时,对输入 :math:`x_{t}` 做线性变换的 weight) + - :math:`b` 表示 bias (例如, :math:`b_{i}` 是 input gate 的 bias) + - :math:`\sigma` 表示 gate 的非线性激活函数,默认为 sigmoid + - :math:`act_g, act_h, \overline{act_h}` 分别表示 cell 输入 cell 输出和映射输出的非线性激活函数,默认为 tanh + - :math:`\odot` 表示矩阵的 Hadamard product,即对两个维度相同的矩阵,将相同位置的元素相乘,得到另一个维度相同的矩阵 + +参数: + - **input** ( :ref:`api_guide_Variable` ) 维度为 :math:`[T, 4*hidden\_size]` 的多维 LoDTensor(必须在传入该OP前对维度为 :math:`[T, hidden\_size]` 的输入经过线性变换得到),其中 T 为 batch 中所有样本的长度之和,hidden_size 为隐层大小,数据类型为 float32 或者 float64。 + - **size** (int) – 必须为 4 * hidden_size。 + - **proj_size** (int) - 投影映射输出的大小。 + - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + + 说明: + 1. 隐藏状态到隐藏状态(Hidden-hidden)权重 = :math:`\{ W_{cr},W_{ir},W_{fr},W_{or} \}`,维度为 [P, 4*hidden_size] ,P是投影大小 + + 2. 投影(Projection)权重 = :math:`\{ W_{rh} \}`,维度为 [D, P] + + - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + + 说明: + 1. use_peepholes = False + - Biases = { :math:`b_{c},b_{i},b_{f},b_{o}`} + - 维度为 [1, 4*hidden_size] + + 2. use_peepholes = True + - Biases = { :math:`b_{c},b_{i},b_{f},b_{o},W_{ic},W_{fc},W_{oc}`} + - 维度为 [1, 7*hidden_size] + + - **use_peepholes** (bool,可选) - 是否使用 peephole 连接。默认值为True。 + - **is_reverse** (bool,可选) - 是否计算反向LSTM,默认值为False。 + - **gate_activation** (str,可选) - 应用于input gate,forget gate, output gate 的激活函数。可选值包括 sigmoid,tanh,relu,identity。默认值为 sigmoid。 + - **cell_activation** (str,可选) - cell输出的激活函数。可选值包括 sigmoid,tanh,relu,identity。默认值为 tanh。 + - **candidate_activation** (str,可选) - 候选隐藏状态(candidate hidden state)的激活状态。可选值包括 sigmoid,tanh,relu,identity。默认值为 tanh。 + - **proj_activation** (str,可选) - 投影输出的激活函数。可选值包括 sigmoid,tanh,relu,identity。默认值为 tanh。 + - **dtype** (str,可选) - 数据类型。可选值包括 float32,float64。默认值为 float32。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **h_0** ( :ref:`api_guide_Variable` ,可选) 维度为 :math:`[batch\_size, hidden\_size]` 的多维 Tensor。如果为 None,该OP会自动设置为全0的向量。默认值为None。 + - **c_0** ( :ref:`api_guide_Variable` ,可选) 维度为 :math:`[batch\_size, hidden\_size]` 的多维 Tensor。如果为 None,该OP会自动设置为全0的向量;:math:`h_0, c_0` 如果要设置为None,必须同时为None。默认值为None。 + - **cell_clip** (float,可选) - 如果该参数不为None,则在单元输出激活之前,单元状态将被此值剪裁。默认值为None。 + - **proj_clip** (float,可选) - 如果 num_proj > 0 并且 proj_clip 不为None,那么将投影值沿元素方向剪切到[-proj_clip,proj_clip]内。默认值为None。 + +返回:经过lstmp运算输出的 hidden 的映射和 cell 状态的tuple,包括 + +- hidden:LSTM hidden的输出结果,维度为 :math:`[T, P]` 的LoDTensor,且LoD保持与输入一致,数据类型与input一致。 +- cell:LSTM cell的输出结果,维度为 :math:`[T, hidden\_size]` 的LoDTensor,且LoD保持与输入一致,数据类型与input一致。 + +返回类型: tuple( :ref:`api_guide_Variable` , :ref:`api_guide_Variable` ) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + dict_dim, emb_dim = 128, 64 + data = fluid.layers.data(name='sequence', shape=[1], + dtype='int32', lod_level=1) + emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim]) + hidden_dim, proj_dim = 512, 256 + fc_out = fluid.layers.fc(input=emb, size=hidden_dim * 4, + act=None, bias_attr=None) + proj_out, cell = fluid.layers.dynamic_lstmp(input=fc_out, + size=hidden_dim * 4, + proj_size=proj_dim, + use_peepholes=False, + is_reverse=True, + cell_activation="tanh", + proj_activation="tanh") + proj_out.shape # (-1, 256) + cell.shape # (-1, 512) + + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/edit_distance_cn.rst b/doc/paddle/api/paddle/fluid/layers/edit_distance_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b9e16eb517b3a576d63264ed052cc436edbb1019 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/edit_distance_cn.rst @@ -0,0 +1,52 @@ +.. _cn_api_fluid_layers_edit_distance: + + +edit_distance +------------------------------- + +.. py:function:: paddle.fluid.layers.edit_distance(input,label,normalized=True,ignored_tokens=None, input_length=None, label_length=None) + + + + +该OP计算一批给定字符串及其参照字符串间的编辑距离。编辑距离也称Levenshtein距离,通过计算从一个字符串变成另一个字符串所需的最少操作步骤来衡量两个字符串的相异度。这里的操作包括插入、删除和替换。 + +比如给定假设字符串A=“kitten”和参照字符串B=“sitting”,从A变换成B编辑距离为3,至少需要两次替换和一次插入: + +“kitten”->“sitten”->“sittn”->“sitting” + +输入为LoDTensor/Tensor,包含假设字符串(带有表示批尺寸的总数)和分离信息(具体为LoD信息或者 ``input_length`` )。并且批尺寸大小的参照字符串和输入LoDTensor的顺序保持一致。 + +输出包含批尺寸大小的结果,代表一对字符串中每个字符串的编辑距离。如果Attr(normalized)为真,编辑距离则处以参照字符串的长度。 + +参数: + - **input** (Variable) - 假设字符串的索引,rank为2的Tensor或LoDTensor,数据类型为int64。 + - **label** (Variable) - 参照字符串的索引,rank为2的Tensor或LoDTensor,数据类型为int64。 + - **normalized** (bool)-表示是否用参照字符串的长度进行归一化,默认值为True。 + - **ignored_tokens** (list)-计算编辑距离前需要移除的token,默认值为None。 + - **name** (None|str) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_name` ,默认值为None。 + +返回:包含有形为[batch_size,1]的编辑距离和形为[ ]的序列数元组。 + +返回类型:元组 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + # 使用 LoDTensor + x_lod = fluid.layers.data(name='x_lod', shape=[1], dtype='int64', lod_level=1) + y_lod = fluid.layers.data(name='y_lod', shape=[1], dtype='int64', lod_level=1) + distance_lod, seq_num_lod = fluid.layers.edit_distance(input=x_lod, label=y_lod) + + # 使用 Tensor + x_seq_len = 5 + y_seq_len = 6 + x_pad = fluid.layers.data(name='x_pad', shape=[x_seq_len], dtype='int64') + y_pad = fluid.layers.data(name='y_pad', shape=[y_seq_len], dtype='int64') + x_len = fluid.layers.data(name='x_len', shape=[], dtype='int64') + y_len = fluid.layers.data(name='y_len', shape=[], dtype='int64') + distance_pad, seq_num_pad = fluid.layers.edit_distance( + input=x_pad, label=y_pad, input_length=x_len, label_length=y_len) diff --git a/doc/paddle/api/paddle/fluid/layers/elementwise_add_cn.rst b/doc/paddle/api/paddle/fluid/layers/elementwise_add_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..89aa33a66fe1558a5fd35af19bee15b34a1090c7 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/elementwise_add_cn.rst @@ -0,0 +1,118 @@ +.. _cn_api_fluid_layers_elementwise_add: + +elementwise_add +------------------------------- + +.. py:function:: paddle.fluid.layers.elementwise_add(x, y, axis=-1, act=None, name=None) + + + + +该OP是逐元素相加算子,输入 ``x`` 与输入 ``y`` 逐元素相加,并将各个位置的输出元素保存到返回结果中。 + +等式为: + +.. math:: + Out = X + Y + +- :math:`X` :多维Tensor。 +- :math:`Y` :维度必须小于等于X维度的Tensor。 + +对于这个运算算子有2种情况: + 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。 + 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。 + +对于情况2: + 1. 用 :math:`Y` 匹配 :math:`X` 的形状(shape),其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。 + 2. 如果 ``axis`` 为-1(默认值),则 :math:`axis= rank(X)-rank(Y)` 。 + 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。 + +例如: + +.. code-block:: text + + shape(X) = (2, 3, 4, 5), shape(Y) = (,) + shape(X) = (2, 3, 4, 5), shape(Y) = (5,) + shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 + shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 + shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 + shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 + +参数: + - **x** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 + - **y** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 + - **axis** (int32,可选)- ``y`` 的维度对应到 ``x`` 维度上时的索引。默认值为 -1。 + - **act** (str,可选)- 激活函数名称,作用于输出上。默认值为None。详细请参考 :ref:`api_guide_activations` , 常见的激活函数有: ``relu`` ``tanh`` ``sigmoid`` 等。 + - **name** (str,可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + + +返回: 维度与 ``x`` 相同的 ``Tensor`` 或 ``LoDTensor`` ,数据类型与 ``x`` 相同。 + +返回类型: Variable。 + +**代码示例 1** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + def gen_data(): + return { + "x": np.array([2, 3, 4]), + "y": np.array([1, 5, 2]) + } + x = fluid.layers.data(name="x", shape=[3], dtype='float32') + y = fluid.layers.data(name="y", shape=[3], dtype='float32') + z = fluid.layers.elementwise_add(x, y) + # z = x + y + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + print(z_value) # [3., 8., 6.] + +**代码示例 2** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + def gen_data(): + return { + "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), + "y": np.random.randint(1, 5, size=[3, 4]).astype('float32') + } + x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') + z = fluid.layers.elementwise_add(x, y, axis=1) + # z = x + y + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + print(z_value) # z.shape=[2,3,4,5] + +**代码示例 3** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + def gen_data(): + return { + "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), + "y": np.random.randint(1, 5, size=[5]).astype('float32') + } + x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.layers.data(name="y", shape=[5], dtype='float32') + # z = x + y + z = fluid.layers.elementwise_add(x, y, axis=3) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + print(z_value) # z.shape=[2,3,4,5] + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/elementwise_div_cn.rst b/doc/paddle/api/paddle/fluid/layers/elementwise_div_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6e5889c810d3e516b2b380b1659385162a4abb4f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/elementwise_div_cn.rst @@ -0,0 +1,118 @@ +.. _cn_api_fluid_layers_elementwise_div: + +elementwise_div +------------------------------- + +.. py:function:: paddle.fluid.layers.elementwise_div(x, y, axis=-1, act=None, name=None) + + + + +该OP是逐元素相除算子,输入 ``x`` 与输入 ``y`` 逐元素相除,并将各个位置的输出元素保存到返回结果中。 + +等式是: + +.. math:: + Out = X / Y + +- :math:`X` :多维Tensor。 +- :math:`Y` :维度必须小于等于X维度的Tensor。 + +对于这个运算算子有2种情况: + 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。 + 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。 + +对于情况2: + 1. 用 :math:`Y` 匹配 :math:`X` 的形状(shape),其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。 + 2. 如果 ``axis`` 为-1(默认值),则 :math:`axis= rank(X)-rank(Y)` 。 + 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。 + +例如: + +.. code-block:: text + + shape(X) = (2, 3, 4, 5), shape(Y) = (,) + shape(X) = (2, 3, 4, 5), shape(Y) = (5,) + shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 + shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 + shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 + shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 + +参数: + - **x** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 + - **y** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 + - **axis** (int32,可选)- ``y`` 的维度对应到 ``x`` 维度上时的索引。默认值为 -1。 + - **act** (str,可选)- 激活函数名称,作用于输出上。默认值为None。详细请参考 :ref:`api_guide_activations` , 常见的激活函数有: ``relu`` ``tanh`` ``sigmoid`` 等。 + - **name** (str,可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + + +返回: 维度与 ``x`` 相同的 ``Tensor`` 或 ``LoDTensor`` ,数据类型与 ``x`` 相同。 + +返回类型: Variable。 + +**代码示例 1** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + def gen_data(): + return { + "x": np.array([2, 3, 4]), + "y": np.array([1, 5, 2]) + } + x = fluid.layers.data(name="x", shape=[3], dtype='float32') + y = fluid.layers.data(name="y", shape=[3], dtype='float32') + z = fluid.layers.elementwise_div(x, y) + # z = x / y + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + print(z_value) # [2., 0.6, 2.] + +**代码示例 2** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + def gen_data(): + return { + "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), + "y": np.random.randint(1, 5, size=[3, 4]).astype('float32') + } + x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') + z = fluid.layers.elementwise_div(x, y, axis=1) + # z = x / y + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + print(z_value) # z.shape=[2,3,4,5] + +**代码示例 3** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + def gen_data(): + return { + "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), + "y": np.random.randint(1, 5, size=[5]).astype('float32') + } + x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.layers.data(name="y", shape=[5], dtype='float32') + z = fluid.layers.elementwise_div(x, y, axis=3) + # z = x / y + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + print(z_value) # z.shape=[2,3,4,5] + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/elementwise_floordiv_cn.rst b/doc/paddle/api/paddle/fluid/layers/elementwise_floordiv_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..233f562ee15d6cc8459f244cb659c842f3352abb --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/elementwise_floordiv_cn.rst @@ -0,0 +1,113 @@ +.. _cn_api_fluid_layers_elementwise_floordiv: + +elementwise_floordiv +------------------------------- + +.. py:function:: paddle.fluid.layers.elementwise_floordiv(x, y, axis=-1, act=None, name=None) + + + + +该OP是逐元素整除算子,输入 ``x`` 与输入 ``y`` 逐元素整除,并将各个位置的输出元素保存到返回结果中。 + +等式为: + +.. math:: + Out = X // Y + +- :math:`X` :多维Tensor。 +- :math:`Y` :维度必须小于等于X维度的Tensor。 + +对于这个运算算子有2种情况: + 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。 + 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。 + +对于情况2: + 1. 用 :math:`Y` 匹配 :math:`X` 的形状(shape),其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。 + 2. 如果 ``axis`` 为-1(默认值),则 :math:`axis= rank(X)-rank(Y)` 。 + 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。 + +例如: + +.. code-block:: text + + shape(X) = (2, 3, 4, 5), shape(Y) = (,) + shape(X) = (2, 3, 4, 5), shape(Y) = (5,) + shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 + shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 + shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 + shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 + +参数: + - **x** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 + - **y** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 + - **axis** (int32,可选)- ``y`` 的维度对应到 ``x`` 维度上时的索引。默认值为 -1。 + - **act** (str,可选)- 激活函数名称,作用于输出上。详细请参考 :ref:`api_guide_activations` , 常见的激活函数有: ``relu`` ``tanh`` ``sigmoid`` 等。如果为None则不添加激活函数。默认值为None。 + - **name** (str,可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + + +返回: 维度与 ``x`` 相同的 ``Tensor`` 或 ``LoDTensor`` ,数据类型与 ``x`` 相同。 + +返回类型: Variable。 + +**代码示例 1** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + def gen_data(): + return { + "x": np.array([2, 3, 4]), + "y": np.array([1, 5, 2]) + } + + x = fluid.data(name="x", shape=[3], dtype='int64') + y = fluid.data(name="y", shape=[3], dtype='int64') + z = fluid.layers.elementwise_floordiv(x, y) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) + print(z_value) #[2,0,2] + +**代码示例 2** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + def gen_data(): + return { + "x": np.random.randint(1, 5, size=[2, 3, 4, 5]), + "y": np.random.randint(1, 5, size=[3, 4]) + } + + x = fluid.data(name="x", shape=[2,3,4,5], dtype='int64') + y = fluid.data(name="y", shape=[3,4], dtype='int64') + z = fluid.layers.elementwise_floordiv(x, y, axis=1) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + print(z_value) # z.shape=[2,3,4,5] + +**代码示例 3** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + def gen_data(): + return { + "x": np.random.randint(1, 5, size=[2, 3, 4, 5]), + "y": np.random.randint(1, 5, size=[5]) + } + + x = fluid.data(name="x", shape=[2,3,4,5], dtype='int64') + y = fluid.data(name="y", shape=[5], dtype='int64') + z = fluid.layers.elementwise_floordiv(x, y, axis=3) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + print(z_value) # z.shape=[2,3,4,5] diff --git a/doc/paddle/api/paddle/fluid/layers/elementwise_max_cn.rst b/doc/paddle/api/paddle/fluid/layers/elementwise_max_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d56d5543235dbe8e6c5be7606551c5e013b901dc --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/elementwise_max_cn.rst @@ -0,0 +1,106 @@ +.. _cn_api_fluid_layers_elementwise_max: + +elementwise_max +------------------------------- + +.. py:function:: paddle.fluid.layers.elementwise_max(x, y, axis=-1, act=None, name=None) + + + +该OP逐元素对比输入的两个多维Tensor,并且把各个位置更大的元素保存到返回结果中。 + +等式是: + +.. math:: + Out = max(X, Y) + +- :math:`X` :多维Tensor。 +- :math:`Y` :多维Tensor。 + +此运算算子有两种情况: + 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。 + 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。 + +对于情况2: + 1. 用 :math:`Y` 的 ``shape`` 匹配 :math:`X` 的 ``shape``,其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。 + 2. 如果 ``axis`` 为-1(默认值),则 :math:`axis = rank(X)-rank(Y)` 。 + 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。 + +例如: + +.. code-block:: text + + shape(X) = (2, 3, 4, 5), shape(Y) = (,) + shape(X) = (2, 3, 4, 5), shape(Y) = (5,) + shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 + shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 + shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 + shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 + +参数: + - **x** (Variable)- 多维Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **y** (Variable)- 多维Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **axis** (int32, 可选)- Y的维度对应到X维度上时的索引。默认值为 -1。 + - **act** (string, 可选)- 激活函数名称,作用于输出上。默认值为None。详细请参考 :ref:`api_guide_activations` , 常见的激活函数有: ``relu`` ``tanh`` ``sigmoid`` 等。 + - **name** (string, 可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回: 维度和数据类型与 ``x`` 相同的多维Tensor。 + +返回类型: 多维Tensor。 + +**代码示例 1** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def gen_data(): + return { + "x": np.array([2, 3, 4]), + "y": np.array([1, 5, 2]) + } + + x = fluid.layers.data(name="x", shape=[3], dtype='float32') + y = fluid.layers.data(name="y", shape=[3], dtype='float32') + z = fluid.layers.elementwise_max(x, y) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + + print(z_value) #[2, 5, 4] + +**代码示例 2** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def gen_data(): + return { + "x": np.ones((2, 3, 4, 5)).astype('float32'), + "y": np.zeros((3, 4)).astype('float32') + } + + x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') + z = fluid.layers.elementwise_max(x, y, axis=1) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + + print(z_value)#[[[[1., 1., 1., 1., 1.] .... [1., 1., 1., 1., 1.]]]] + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/elementwise_min_cn.rst b/doc/paddle/api/paddle/fluid/layers/elementwise_min_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..833b4499b8b24112f99507110c748d7a6cedd8f8 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/elementwise_min_cn.rst @@ -0,0 +1,103 @@ +.. _cn_api_fluid_layers_elementwise_min: + +elementwise_min +------------------------------- + +.. py:function:: paddle.fluid.layers.elementwise_min(x, y, axis=-1, act=None, name=None) + + + +该OP逐元素对比输入的两个多维Tensor,并且把各个位置更小的元素保存到返回结果中。 + +等式是: + +.. math:: + Out = min(X, Y) + +- :math:`X` :多维Tensor。 +- :math:`Y` :多维Tensor。 + +此运算算子有两种情况: + 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。 + 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。 + +对于情况2: + 1. 用 :math:`Y` 的 ``shape`` 匹配 :math:`X` 的 ``shape``,其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。 + 2. 如果 ``axis`` 为-1(默认值),则 :math:`axis = rank(X)-rank(Y)` 。 + 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。 + +例如: + +.. code-block:: text + + shape(X) = (2, 3, 4, 5), shape(Y) = (,) + shape(X) = (2, 3, 4, 5), shape(Y) = (5,) + shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 + shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 + shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 + shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 + +参数: + - **x** (Variable)- 多维Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **y** (Variable)- 多维Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **axis** (int32, 可选)- Y的维度对应到X维度上时的索引。默认值为 -1。 + - **act** (string, 可选)- 激活函数名称,作用于输出上。默认值为None。详细请参考 :ref:`api_guide_activations` , 常见的激活函数有: ``relu`` ``tanh`` ``sigmoid`` 等。 + - **name** (string, 可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回: 维度和数据类型与 ``x`` 相同的多维Tensor。 + +返回类型: 多维Tensor。 + +**代码示例 1** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def gen_data(): + return { + "x": np.array([2, 3, 4]), + "y": np.array([1, 5, 2]) + } + + x = fluid.layers.data(name="x", shape=[3], dtype='float32') + y = fluid.layers.data(name="y", shape=[3], dtype='float32') + z = fluid.layers.elementwise_min(x, y) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + + print(z_value) #[1, 3, 2] + +**代码示例 2** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def gen_data(): + return { + "x": np.ones((2, 3, 4, 5)).astype('float32'), + "y": np.zeros((3, 4)).astype('float32') + } + + x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') + z = fluid.layers.elementwise_min(x, y, axis=1) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + + print(z_value)#[[[[0., 0., 0., 0., 0.] .... [0., 0., 0., 0., 0.]]]] + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/elementwise_mod_cn.rst b/doc/paddle/api/paddle/fluid/layers/elementwise_mod_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a644697772489469514c01cfabcbe807e36eb368 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/elementwise_mod_cn.rst @@ -0,0 +1,114 @@ +.. _cn_api_fluid_layers_elementwise_mod: + +elementwise_mod +------------------------------- + +.. py:function:: paddle.fluid.layers.elementwise_mod(x, y, axis=-1, act=None, name=None) + + + + +该OP是逐元素取模算子,输入 ``x`` 与输入 ``y`` 逐元素取模,并将各个位置的输出元素保存到返回结果中。 + +等式为: + +.. math:: + Out = X \% Y + +- :math:`X` :多维Tensor。 +- :math:`Y` :维度必须小于等于X维度的Tensor。 + +对于这个运算算子有2种情况: + 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。 + 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。 + +对于情况2: + 1. 用 :math:`Y` 匹配 :math:`X` 的形状(shape),其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。 + 2. 如果 ``axis`` 为-1(默认值),则 :math:`axis= rank(X)-rank(Y)` 。 + 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。 + +例如: + +.. code-block:: text + + shape(X) = (2, 3, 4, 5), shape(Y) = (,) + shape(X) = (2, 3, 4, 5), shape(Y) = (5,) + shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 + shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 + shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 + shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 + +参数: + - **x** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 + - **y** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 + - **axis** (int32,可选)- ``y`` 的维度对应到 ``x`` 维度上时的索引。默认值为 -1。 + - **act** (str,可选)- 激活函数名称,作用于输出上。详细请参考 :ref:`api_guide_activations` , 常见的激活函数有: ``relu`` ``tanh`` ``sigmoid`` 等。如果为None则不添加激活函数。默认值为None。 + - **name** (str,可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + + +返回: 维度与 ``x`` 相同的 ``Tensor`` 或 ``LoDTensor`` ,数据类型与 ``x`` 相同。 + +返回类型: Variable。 + + +**代码示例 1** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + def gen_data(): + return { + "x": np.array([2, 3, 4]), + "y": np.array([1, 5, 2]) + } + + x = fluid.data(name="x", shape=[3], dtype='int64') + y = fluid.data(name="y", shape=[3], dtype='int64') + z = fluid.layers.elementwise_mod(x, y) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) + print(z_value) #[0,3,0] + +**代码示例 2** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + def gen_data(): + return { + "x": np.random.randint(1, 5, size=[2, 3, 4, 5]), + "y": np.random.randint(1, 5, size=[3, 4]) + } + + x = fluid.data(name="x", shape=[2,3,4,5], dtype='int64') + y = fluid.data(name="y", shape=[3,4], dtype='int64') + z = fluid.layers.elementwise_mod(x, y, axis=1) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + print(z_value) # z.shape=[2,3,4,5] + +**代码示例 3** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + def gen_data(): + return { + "x": np.random.randint(1, 5, size=[2, 3, 4, 5]), + "y": np.random.randint(1, 5, size=[5]) + } + + x = fluid.data(name="x", shape=[2,3,4,5], dtype='int64') + y = fluid.data(name="y", shape=[5], dtype='int64') + z = fluid.layers.elementwise_mod(x, y, axis=3) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + print(z_value) # z.shape=[2,3,4,5] diff --git a/doc/paddle/api/paddle/fluid/layers/elementwise_pow_cn.rst b/doc/paddle/api/paddle/fluid/layers/elementwise_pow_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..61d5e5376f9f384295622e4b2e44ed0604879857 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/elementwise_pow_cn.rst @@ -0,0 +1,81 @@ +.. _cn_api_fluid_layers_elementwise_pow: + +elementwise_pow +------------------------------- + +.. py:function:: paddle.fluid.layers.elementwise_pow(x, y, axis=-1, act=None, name=None) + + + +该OP逐元素对输入Tensor进行幂操作。 + +等式是: + +.. math:: + Out = X ^ Y + +- :math:`X` :多维Tensor。 +- :math:`Y` :多维Tensor。 + +此运算算子有两种情况: + 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。 + 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。 + +对于情况2: + 1. 用 :math:`Y` 的 ``shape`` 匹配 :math:`X` 的 ``shape``,其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。 + 2. 如果 ``axis`` 为-1(默认值),则 :math:`axis = rank(X)-rank(Y)` 。 + 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。 + +例如: + +.. code-block:: text + + shape(X) = (2, 3, 4, 5), shape(Y) = (,) + shape(X) = (2, 3, 4, 5), shape(Y) = (5,) + shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 + shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 + shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 + shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 + +参数: + - **x** (Variable)- 多维Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **y** (Variable)- 多维Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **axis** (int32, 可选)- Y的维度对应到X维度上时的索引。默认值为 -1。 + - **act** (string, 可选)- 激活函数名称,作用于输出上。默认值为None。详细请参考 :ref:`api_guide_activations` , 常见的激活函数有: ``relu`` ``tanh`` ``sigmoid`` 等。 + - **name** (string, 可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回: 维度和数据类型与 ``x`` 相同的多维Tensor。 + +返回类型: 多维Tensor。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def gen_data(): + return { + "x": np.array([2, 3, 4]), + "y": np.array([1, 5, 2]) + } + + x = fluid.layers.data(name="x", shape=[3], dtype='float32') + y = fluid.layers.data(name="y", shape=[3], dtype='float32') + z = fluid.layers.elementwise_pow(x, y) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + + print(z_value) #[2, 243, 16] + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/elementwise_sub_cn.rst b/doc/paddle/api/paddle/fluid/layers/elementwise_sub_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..021b896b67a7dea2b20ca1bd8c6d6116b0425bbf --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/elementwise_sub_cn.rst @@ -0,0 +1,119 @@ +.. _cn_api_fluid_layers_elementwise_sub: + +elementwise_sub +------------------------------- + +.. py:function:: paddle.fluid.layers.elementwise_sub(x, y, axis=-1, act=None, name=None) + + + + +该OP是逐元素相减算子,输入 ``x`` 与输入 ``y`` 逐元素相减,并将各个位置的输出元素保存到返回结果中。 + +等式是: + +.. math:: + Out = X - Y + +- :math:`X` :多维Tensor。 +- :math:`Y` :维度必须小于等于X维度的Tensor。 + +对于这个运算算子有2种情况: + 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。 + 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。 + +对于情况2: + 1. 用 :math:`Y` 匹配 :math:`X` 的形状(shape),其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。 + 2. 如果 ``axis`` 为-1(默认值),则 :math:`axis= rank(X)-rank(Y)` 。 + 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。 + +例如: + +.. code-block:: text + + shape(X) = (2, 3, 4, 5), shape(Y) = (,) + shape(X) = (2, 3, 4, 5), shape(Y) = (5,) + shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 + shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 + shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 + shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 + +参数: + - **x** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 + - **y** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 + - **axis** (int32,可选)- ``y`` 的维度对应到 ``x`` 维度上时的索引。默认值为 -1。 + - **act** (str,可选)- 激活函数名称,作用于输出上。默认值为None。详细请参考 :ref:`api_guide_activations` , 常见的激活函数有: ``relu`` ``tanh`` ``sigmoid`` 等。 + - **name** (str,可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + + +返回: 维度与 ``x`` 相同的 ``Tensor`` 或 ``LoDTensor`` ,数据类型与 ``x`` 相同。 + +返回类型: Variable。 + +**代码示例 1** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + def gen_data(): + return { + "x": np.array([2, 3, 4]), + "y": np.array([1, 5, 2]) + } + x = fluid.layers.data(name="x", shape=[3], dtype='float32') + y = fluid.layers.data(name="y", shape=[3], dtype='float32') + z = fluid.layers.elementwise_sub(x, y) + # z = x - y + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + print(z_value) # [1., -2., 2.] + +**代码示例 2** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + def gen_data(): + return { + "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), + "y": np.random.randint(1, 5, size=[3, 4]).astype('float32') + } + x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') + z = fluid.layers.elementwise_sub(x, y, axis=1) + # z = x - y + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + print(z_value) # z.shape=[2,3,4,5] + +**代码示例 3** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + def gen_data(): + return { + "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), + "y": np.random.randint(1, 5, size=[5]).astype('float32') + } + x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.layers.data(name="y", shape=[5], dtype='float32') + z = fluid.layers.elementwise_sub(x, y, axis=3) + # z = x - y + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + print(z_value) # z.shape=[2,3,4,5] + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/elu_cn.rst b/doc/paddle/api/paddle/fluid/layers/elu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..dc35f133ab2a8e3f2e2b79c9ef72ab1b25d9358b --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/elu_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_fluid_layers_elu: + +elu +------------------------------- + +.. py:function:: paddle.fluid.layers.elu(x, alpha=1.0, name=None) + + + + +ELU激活层(ELU Activation Operator) + +根据 https://arxiv.org/abs/1511.07289 对输入Tensor中每个元素应用以下计算。 + +.. math:: + \\out=max(0,x)+min(0,α∗(e^{x}−1))\\ + +参数: + - **x** (Variable) - 该OP的输入为多维Tensor。数据类型为float32或float64。 + - **alpha** (float, 可选) - ELU的alpha值,默认值为1.0。 + - **name** (str, 可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为None。 + +返回: 输出为Tensor,与 ``x`` 维度相同、数据类型相同。 + +返回类型: Variable + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + input_elu = np.array([[-1,6],[1,15.6]]) + with fluid.dygraph.guard(): + x = fluid.dygraph.to_variable(input_elu) + y = fluid.layers.elu(x, alpha=0.2) + print(y.numpy()) + # [[-0.12642411 6. ] + # [ 1. 15.6 ]] diff --git a/doc/paddle/api/paddle/fluid/layers/embedding_cn.rst b/doc/paddle/api/paddle/fluid/layers/embedding_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a377e832f4e96481ba81e1bd684d28fef3164203 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/embedding_cn.rst @@ -0,0 +1,102 @@ +.. _cn_api_fluid_layers_embedding: + +embedding +------------------------------- + + +.. py:function:: paddle.fluid.layers.embedding(input, size, is_sparse=False, is_distributed=False, padding_idx=None, param_attr=None, dtype='float32') + + + + +嵌入层(Embedding Layer) + +**注意:此OP将在未来的版本中被移除!该OP要求输入Tensor shape的最后一维必须为1。推荐使用fluid.** :ref:`cn_api_fluid_embedding` 。 + +该OP根据input中的id信息从embedding矩阵中查询对应embedding信息,并会根据输入的size (vocab_size, emb_size)和dtype自动构造一个二维embedding矩阵。 + +要求input的最后一维必须等于1,输出的Tensor的shape是将输入Tensor shape的最后一维的1替换为emb_size。 + +注:input中的id必须满足 ``0 =< id < size[0]``,否则程序会抛异常退出。 + + +:: + + Case 1: + + input是Tensor, 且padding_idx = -1 + input.data = [[[1], [3]], [[2], [4]], [[4], [127]]] + input.shape = [3, 2, 1] + 若size = [128, 16] + 输出为Tensor: + out.shape = [3, 2, 16] + out.data = [[[0.129435295, 0.244512452, ..., 0.436322452], + [0.345421456, 0.524563927, ..., 0.144534654]], + + [[0.345249859, 0.124939536, ..., 0.194353745], + [0.945345345, 0.435394634, ..., 0.435345365]], + + [[0.945345345, 0.435394634, ..., 0.435345365], + [0.0, 0.0, ..., 0.0 ]]] # padding data + 输入的padding_idx小于0,则自动转换为padding_idx = -1 + 128 = 127, 对于输入id为127的词,进行padding处理。 + + Case 2: + + input是lod level 为1的LoDTensor, 且padding_idx = 0 + input.lod = [[2, 3]] + input.data = [[1], [3], [2], [4], [0]] + input.shape = [5, 1] + 若size = [128, 16] + 输出为LoDTensor: + out.lod = [[2, 3]] + out.shape = [5, 16] + out.data = [[0.129435295, 0.244512452, ..., 0.436322452], + [0.345421456, 0.524563927, ..., 0.144534654], + [0.345249859, 0.124939536, ..., 0.194353745], + [0.945345345, 0.435394634, ..., 0.435345365], + [0.0, 0.0, ..., 0.0 ]] # padding data + 输入的padding_idx = 0,则对于输入id为0的词,进行padding处理。 + + +参数: + - **input** (Variable) - 存储id信息的Tensor或LoDTensor,数据类型必须为:int64,输入的shape最后一维须为1。input中的id必须满足 ``0 =< id < size[0]`` 。 + - **size** (tuple|list) - embedding矩阵的维度。必须包含两个元素,第一个元素为vocab_size(词表大小), 第二个为emb_size(embedding层维度)。 + - **is_sparse** (bool) - 是否使用稀疏的更新方式,这个参数只会影响反向的梯度更新的性能,sparse更新速度更快,推荐使用稀疏更新的方式。但某些optimizer不支持sparse更新,比如 :ref:`cn_api_fluid_optimizer_AdadeltaOptimizer` 、 :ref:`cn_api_fluid_optimizer_AdamaxOptimizer` 、 :ref:`cn_api_fluid_optimizer_DecayedAdagradOptimizer` 、 :ref:`cn_api_fluid_optimizer_FtrlOptimizer` 、 :ref:`cn_api_fluid_optimizer_LambOptimizer` 、:ref:`cn_api_fluid_optimizer_LarsMomentumOptimizer` ,此时is_sparse必须为False。默认为False。 + - **is_distributed** (bool) - 是否使用分布式的方式存储embedding矩阵,仅在多机分布式cpu训练中使用。默认为False。 + - **padding_idx** (int|long|None) - padding_idx需在区间[-vocab_size, vocab_size),否则不生效,padding_idx<0时,padding_idx会被改成vocab_size + padding_idx,input中等于padding_index的id对应的embedding信息会被设置为0,且这部分填充数据在训练时将不会被更新。如果为None,不作处理,默认为None。 + - **param_attr** (ParamAttr) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。此外,可以通过 ``param_attr`` 参数加载用户自定义或预训练的词向量。只需将本地词向量转为numpy数据格式,且保证本地词向量的shape和embedding的 ``size`` 参数一致,然后使用 :ref:`cn_api_fluid_initializer_NumpyArrayInitializer` 进行初始化,即可实现加载自定义或预训练的词向量。详细使用方法见代码示例2。 + - **dtype** (str|core.VarDesc.VarType) - 输出Tensor或LoDTensor的数据类型,数据类型必须为:float32或float64,默认为float32。 + +返回:input映射后得到的Embedding Tensor或LoDTensor,数据类型和dtype定义的类型一致。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1) + + # 示例 1 + emb_1 = fluid.layers.embedding(input=data, size=[128, 64]) + + # 示例 2: 加载用户自定义或预训练的词向量 + weight_data = np.random.random(size=(128, 100)) # numpy格式的词向量数据 + w_param_attrs = fluid.ParamAttr( + name="emb_weight", + learning_rate=0.5, + initializer=fluid.initializer.NumpyArrayInitializer(weight_data), + trainable=True) + emb_2 = fluid.layers.embedding(input=data, size=(128, 100), param_attr=w_param_attrs, dtype='float32') + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/equal_cn.rst b/doc/paddle/api/paddle/fluid/layers/equal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9a66e76cedc7d3997fe8e6cbfefca91232f5734b --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/equal_cn.rst @@ -0,0 +1,39 @@ +.. _cn_api_fluid_layers_equal: + +equal +------------------------------- + +.. py:function:: paddle.fluid.layers.equal(x, y, cond=None, name=None) + + +该OP返回 :math:`x==y` 逐元素比较x和y是否相等,x和y的维度应该相同。 + +参数: + - **x** (Variable) - 输入Tensor,支持的数据类型包括 float32, float64,int32, int64。 + - **y** (Variable) - 输入Tensor,支持的数据类型包括 float32, float64, int32, int64。 + - **cond** (Variable,可选) – 如果为None,则创建一个Tensor来作为进行比较的输出结果,该Tensor的shape和数据类型和输入x一致;如果不为None,则将Tensor作为该OP的输出,数据类型和数据shape需要和输入x一致。默认值为None。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:输出结果的Tensor,输出Tensor的shape和输入一致,Tensor数据类型为bool。 + +返回类型:变量(Variable) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + out_cond =fluid.data(name="input1", shape=[2], dtype='bool') + label = fluid.layers.assign(np.array([3, 3], dtype="int32")) + limit = fluid.layers.assign(np.array([3, 2], dtype="int32")) + label_cond = fluid.layers.assign(np.array([1, 2], dtype="int32")) + + out1 = fluid.layers.equal(x=label,y=limit) #out1=[True, False] + out2 = fluid.layers.equal(x=label_cond,y=limit, cond=out_cond) #out2=[False, True] out_cond=[False, True] + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/erf_cn.rst b/doc/paddle/api/paddle/fluid/layers/erf_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b434dddfd4ed4ee50d505f1b84d570bc6d86171c --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/erf_cn.rst @@ -0,0 +1,72 @@ +.. _cn_api_fluid_layers_erf: + +erf +------------------------------- + +.. py:function:: paddle.fluid.layers.erf(x) + + + + +逐元素计算 Erf 激活函数。更多细节请参考 `Error function `_ 。 + + +.. math:: + out = \frac{2}{\sqrt{\pi}} \int_{0}^{x}e^{- \eta^{2}}d\eta + +参数: + - **x** (Variable) - Erf Op 的输入,多维 Tensor 或 LoDTensor,数据类型为 float16, float32 或 float64。 + +返回: + - 多维 Tensor 或 LoDTensor, 数据类型为 float16, float32 或 float64, 和输入 x 的数据类型相同,形状和输入 x 相同。 + +返回类型: + - Variable + +**代码示例**: + +.. code-block:: python + + # declarative mode + import numpy as np + from paddle import fluid + + x = fluid.data(name="x", shape=(-1, 3), dtype="float32") + y = fluid.layers.erf(x) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + start = fluid.default_startup_program() + main = fluid.default_main_program() + + data = np.random.randn(2, 3).astype("float32") + exe.run(start) + + y_np, = exe.run(main, feed={"x": data}, fetch_list=[y]) + + data + # array([[ 0.4643714 , -1.1509596 , 1.2538221 ], + # [ 0.34369683, 0.27478245, 1.1805398 ]], dtype=float32) + y_np + # array([[ 0.48863927, -0.8964121 , 0.9237998 ], + # [ 0.37307587, 0.30242872, 0.9049887 ]], dtype=float32) + +.. code-block:: python + + # imperative mode + import numpy as np + from paddle import fluid + import paddle.fluid.dygraph as dg + + data = np.random.randn(2, 3).astype("float32") + place = fluid.CPUPlace() + with dg.guard(place) as g: + x = dg.to_variable(data) + y = fluid.layers.erf(x) + y_np = y.numpy() + data + # array([[ 0.4643714 , -1.1509596 , 1.2538221 ], + # [ 0.34369683, 0.27478245, 1.1805398 ]], dtype=float32) + y_np + # array([[ 0.48863927, -0.8964121 , 0.9237998 ], + # [ 0.37307587, 0.30242872, 0.9049887 ]], dtype=float32) diff --git a/doc/paddle/api/paddle/fluid/layers/exp_cn.rst b/doc/paddle/api/paddle/fluid/layers/exp_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..243b41fd5494ac96ed6d75c83b63b06a469b83a4 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/exp_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_fluid_layers_exp: + +exp +------------------------------- + +.. py:function:: paddle.fluid.layers.exp(x, name=None) + + + + +对输入,逐元素进行以自然数e为底指数运算。 + +.. math:: + out = e^x + +参数: + - **x** (Variable) - 该OP的输入为多维Tensor。数据类型为float32,float64。 + - **name** (str, 可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为None。 + +返回:输出为Tensor,与 ``x`` 维度相同、数据类型相同。 + +返回类型: Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + input_exp = np.array([[-1.5,6],[1,15.6]]) + with fluid.dygraph.guard(): + x = fluid.dygraph.to_variable(input_exp) + y = fluid.layers.exp(x) + print(y.numpy()) + # [[2.23130160e-01 4.03428793e+02] + # [2.71828183e+00 5.95653801e+06]] diff --git a/doc/paddle/api/paddle/fluid/layers/expand_as_cn.rst b/doc/paddle/api/paddle/fluid/layers/expand_as_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b24e79655ce599e8d13ed47bbf910b23956ca2f9 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/expand_as_cn.rst @@ -0,0 +1,65 @@ +.. _cn_api_fluid_layers_expand_as: + +expand_as +------------------------------- + +.. py:function:: paddle.fluid.layers.expand_as(x, target_tensor, name=None) + + + + +该OP会根据输入的variable ``target_tensor`` 对输入 ``x`` 的各维度进行广播。通过 ``target_tensor``的维度来为 ``x`` 的每个维度设置广播的次数,使得x 的维度与target_tensor的维度相同。 ``x`` 的秩应小于等于6。注意, ``target_tensor`` 的秩必须与 ``x`` 的秩相同。 +注意:``target_tensor`` 对应的每一维必须能整除输入x中对应的维度,否则会报错。比如,target_tensor的维度为[2,6,2],x为[2,3,1],则整除后为[1,2,2],x广播后维度为[2,6,2]。如果target_tensor的维度为[2,5,2],第二维5不能整除x的第二维3,则会报错。 + +以下是一个示例: + +:: + + 输入(x) 是一个形状为[2, 3, 1]的 3-D Tensor : + + [ + [[1], [2], [3]], + [[4], [5], [6]] + ] + + target_tensor的维度 : [2, 6, 2] + + 输出(out) 是一个形状为[2, 6, 2]的 3-D Tensor: + + [ + [[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]], + [[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]] + ] + + + +参数: + - **x** (Variable)- 维度最高为6的多维 ``Tensor`` 或 ``LoDTensor``,数据类型为 ``float32``,``float64``,``int32`` 或 ``bool``。 + - **target_tensor** (list|tuple|Variable)- 数据类型为 ``float32``,``float64``,``int32`` 或 ``bool`` 。可为Tensor或者LODTensor。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值: ``None``。 + +返回:维度与输入 ``x`` 相同的 ``Tensor`` 或 ``LoDTensor``,数据类型与 ``x`` 相同。返回值的每个维度的大小等于``target_tensor`` 对应的维度的大小。 + +返回类型:``Variable`` 。 + +抛出异常: + - :code:`ValueError`:``target_tensor`` 对应的每一维必须能整除输入x中对应的维度,否则会报错。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + data = fluid.data(name="data", shape=[-1,10], dtype='float64') + target_tensor = fluid.data(name="target_tensor", shape=[-1,20], dtype='float64') + result = fluid.layers.expand_as(x=data, target_tensor=target_tensor) + use_cuda = False + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + x = np.random.rand(3,10) + y = np.random.rand(3,20) + output= exe.run(feed={"data":x,"target_tensor":y},fetch_list=[result.name]) + print(output[0].shape) + #(3,20) \ No newline at end of file diff --git a/doc/paddle/api/paddle/fluid/layers/expand_cn.rst b/doc/paddle/api/paddle/fluid/layers/expand_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d1926073a1301536c039a9011dc6620a6cfe9b53 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/expand_cn.rst @@ -0,0 +1,68 @@ +.. _cn_api_fluid_layers_expand: + +expand +------------------------------- + +.. py:function:: paddle.fluid.layers.expand(x, expand_times, name=None) + + + + +该OP会根据参数 ``expand_times`` 对输入 ``x`` 的各维度进行复制。通过参数 ``expand_times`` 来为 ``x`` 的每个维度设置复制次数。 ``x`` 的秩应小于等于6。注意, ``expand_times`` 的大小必须与 ``x`` 的秩相同。以下是一个用例: + +:: + + 输入(x) 是一个形状为[2, 3, 1]的 3-D Tensor : + + [ + [[1], [2], [3]], + [[4], [5], [6]] + ] + + 属性(expand_times): [1, 2, 2] + + 输出(out) 是一个形状为[2, 6, 2]的 3-D Tensor: + + [ + [[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]], + [[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]] + ] + +参数: + - **x** (Variable)- 维度最高为6的多维 ``Tensor`` 或 ``LoDTensor``,数据类型为 ``float32``,``float64``,``int32`` 或 ``bool``。 + - **expand_times** (list|tuple|Variable)- 数据类型是 ``int32`` 。如果 ``expand_times`` 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 ``Tensor`` 或 ``LoDTensor``。如果 ``expand_times`` 的类型是 ``Variable``,则是1-D ``Tensor`` 或 ``LoDTensor``。表示 ``x`` 每一个维度被复制的次数。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值: ``None``。 + +返回:维度与输入 ``x`` 相同的 ``Tensor`` 或 ``LoDTensor``,数据类型与 ``x`` 相同。返回值的每个维度的大小等于 ``x`` 的相应维度的大小乘以 ``expand_times`` 给出的相应值。 + +返回类型:``Variable`` 。 + +抛出异常: + - :code:`TypeError`:``expand_times`` 的类型应该是 list、tuple 或 Variable。 + - :code:`ValueError`:``expand_times`` 中的元素不能是负值。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + # example 1: + data_1 = fluid.layers.fill_constant(shape=[2, 3, 1], dtype='int32', value=0) + expanded_1 = fluid.layers.expand(data_1, expand_times=[1, 2, 2]) + # the shape of expanded_1 is [2, 6, 2]. + + # example 2: + data_2 = fluid.layers.fill_constant(shape=[12, 14], dtype="int32", value=3) + expand_times = fluid.layers.fill_constant(shape=[2], dtype="int32", value=4) + expanded_2 = fluid.layers.expand(data_2, expand_times=expand_times) + # the shape of expanded_2 is [48, 56]. + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/exponential_decay_cn.rst b/doc/paddle/api/paddle/fluid/layers/exponential_decay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..69c9aeab225a2e266a1efc33d789a39ecc1817a8 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/exponential_decay_cn.rst @@ -0,0 +1,55 @@ +.. _cn_api_fluid_layers_exponential_decay: + +exponential_decay +------------------------------- + +.. py:function:: paddle.fluid.layers.exponential_decay(learning_rate,decay_steps,decay_rate,staircase=False) + + + + +在学习率上运用指数衰减。 + +训练模型时,在训练过程中降低学习率。每 ``decay_steps`` 步骤中以 ``decay_rate`` 衰减学习率。 + +学习率衰减计算方式如下。 + +.. code-block:: text + + if staircase == True: + decayed_learning_rate = learning_rate * decay_rate ^ floor(global_step / decay_steps) + else: + decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps) + +参数: + - **learning_rate** (Variable|float) - 初始学习率,类型可以为学习率变量(Variable)或float型常量。 + - **decay_steps** (int) - 学习率衰减步长,见以上衰减运算。 + - **decay_rate** (float) - 学习率衰减率。见以上衰减运算。 + - **staircase** (bool) - 若为True,按离散区间衰减学习率,即每 ``decay_steps`` 步学习率衰减 ``decay_rate`` 。若为False,则按以上衰减运算持续衰减。默认False。 + +返回:Variable(Tensor) 随step衰减的学习率变量,维度为 :math:`[1]` 的1-D Tensor。 + +返回类型:变量(Variable) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + base_lr = 0.1 + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=fluid.layers.exponential_decay( + learning_rate=base_lr, + decay_steps=10000, + decay_rate=0.5, + staircase=True)) + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/eye_cn.rst b/doc/paddle/api/paddle/fluid/layers/eye_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6044ab6e322580b82e1aa1e830b3c23554506c98 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/eye_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_fluid_layers_eye: + +eye +------------------------------- + +.. py:function:: paddle.fluid.layers.eye(num_rows, num_columns=None, batch_shape=None, dtype='float32', name=None) + + +该OP用来构建二维Tensor,或一个批次的二维Tensor。 + +参数: + - **num_rows** (int) - 该批次二维Tensor的行数,数据类型为非负int32。 + - **num_columns** (int, 可选) - 该批次二维Tensor的列数,数据类型为非负int32。若为None,则默认等于num_rows。 + - **batch_shape** (list(int), 可选) - 如若提供,则返回Tensor的主批次维度将为batch_shape。 + - **dtype** (np.dtype|core.VarDesc.VarType|str,可选) - 返回Tensor的数据类型,可为int32,int64,float16,float32,float64,默认数据类型为float32。 + - **name** (str) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 + +返回: ``shape`` 为batch_shape + [num_rows, num_columns]的Tensor。 + + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.eye(3, dtype='int32') + # [[1, 0, 0] + # [0, 1, 0] + # [0, 0, 1]] + + data = fluid.layers.eye(2, 3, dtype='int32') + # [[1, 0, 0] + # [0, 1, 0]] + + data = fluid.layers.eye(2, batch_shape=[3]) + # Construct a batch of 3 identity tensors, each 2 x 2. + # data[i, :, :] is a 2 x 2 identity tensor, i = 0, 1, 2. + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/fc_cn.rst b/doc/paddle/api/paddle/fluid/layers/fc_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..46c93d93b17899eb4bff136d6fb78a404c0f63e6 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/fc_cn.rst @@ -0,0 +1,113 @@ +.. _cn_api_fluid_layers_fc: + +fc +------------------------------- + + +.. py:function:: paddle.fluid.layers.fc(input, size, num_flatten_dims=1, param_attr=None, bias_attr=None, act=None, name=None) + + + + + +**全连接层** + +该OP将在神经网络中构建一个全连接层。其输入可以是一个Tensor(或LoDTensor)或多个Tensor(或LoDTensor)组成的list(详见参数说明),该OP会为每个输入的Tensor创建一个权重(weights)变量,即一个从每个输入单元到每个输出单元的全连接权重矩阵。FC层将每个输入Tensor和其对应的权重(weights)相乘得到shape为 :math:`[M, size]` 输出Tensor,其中 ``M`` 为batch_size大小。如果有多个输入Tensor,则多个shape为 :math:`[M, size]` 的Tensor计算结果会被累加起来,作为最终输出。如果 ``bias_attr`` 非空,则会创建一个偏置变量(bias variable),并把它累加到输出结果中。如果 ``act`` 非空,将会在输出结果上应用相应的激活函数。 + +当输入为单个Tensor(或LoDTensor): + +.. math:: + + \\Out = Act({XW + b})\\ + + + +当输入为多个Tensor(或LoDTensor)组成的list时: + +.. math:: + + \\Out=Act(\sum^{N-1}_{i=0}X_iW_i+b) \\ + + +上述等式中: + - :math:`N` :输入的数目,如果输入是Tensor列表,N等于len(input) + - :math:`X_i` :第i个输入的Tensor + - :math:`W_i` :对应第i个输入张量的第i个权重矩阵 + - :math:`b` :该层创建的bias参数 + - :math:`Act` :activation function(激活函数) + - :math:`Out` :输出Tensor + +:: + + Case 1: + 给定单个输入Tensor data_1, 且num_flatten_dims = 2: + data_1.data = [[[0.1, 0.2], + [0.3, 0.4]]] + data_1.shape = (1, 2, 2) # 1是batch_size + + out = fluid.layers.fc(input=data_1, size=1, num_flatten_dims=2) + + 则输出为: + out.data = [[0.83234344], [0.34936576]] + out.shape = (1, 2, 1) + + + Case 2: + 给定多个Tensor组成的list: + data_1.data = [[[0.1, 0.2], + [0.3, 0.4]]] + data_1.shape = (1, 2, 2) # 1 是 batch_size + + data_2 = [[[0.1, 0.2, 0.3]]] + data_2.shape = (1, 1, 3) + + out = fluid.layers.fc(input=[data_1, data_2], size=2) + + 则输出为: + out.data = [[0.18669507, 0.1893476]] + out.shape = (1, 2) + + +参数: + - **input** (Variable|list of Variable) – 维度为 :math:`[N_1, N_2, ..., N_k]` 的多维Tensor(或LoDTensor)或由多个Tensor(或LoDTensor)组成的list,输入Tensor的shape至少是2。数据类型为float32或float64。 + - **size** (int) – 全连接层输出单元的数目,即输出Tensor(或LoDTensor)特征维度。 + - **num_flatten_dims** (int) – 输入可以接受维度大于2的Tensor。在计算时,输入首先会被扁平化(flatten)为一个二维矩阵,之后再与权重(weights)相乘。参数 ``num_flatten_dims`` 决定了输入Tensor的flatten方式: 前 ``num_flatten_dims`` (包含边界,从1开始数) 个维度会被扁平化为二维矩阵的第一维 (即为矩阵的高), 剩下的 :math:`rank(X) - num\_flatten\_dims` 维被扁平化为二维矩阵的第二维 (即矩阵的宽)。 例如, 假设X是一个五维的Tensor,其shape为(2, 3, 4, 5, 6), 若 :math:`num\_flatten\_dims = 3` ,则扁平化的矩阵shape为: :math:`(2 x 3 x 4, 5 x 6) = (24, 30)` ,最终输出Tensor的shape为 :math:`(2, 3, 4, size)` 。默认为1。 + - **param_attr** (ParamAttr) – 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr) – 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **act** (str) – 应用于输出上的激活函数,如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations` ,默认值为None。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + + +返回:经过全连接层计算后的Tensor或LoDTensor,数据类型与input类型一致。 + +返回类型: Variable + +弹出异常:``ValueError`` - 如果输入Tensor(或LoDTensor)的维度小于2 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + # 当输入为单个张量时 + + data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") + fc = fluid.layers.fc(input=data, size=1000, act="tanh") + + # 当输入为多个张量时 + data_1 = fluid.layers.data(name="data_1", shape=[32, 32], dtype="float32") + data_2 = fluid.layers.data(name="data_2", shape=[24, 36], dtype="float32") + fc = fluid.layers.fc(input=[data_1, data_2], size=1000, act="tanh") + + + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/fill_constant_cn.rst b/doc/paddle/api/paddle/fluid/layers/fill_constant_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..19ce58f3e1f264aac80b079d8a754a0e0780168a --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/fill_constant_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_fluid_layers_fill_constant: + +fill_constant +------------------------------- + +.. py:function:: paddle.fluid.layers.fill_constant(shape,dtype,value,force_cpu=False,out=None) + + + + +该OP创建一个形状为shape并且数据类型为dtype的Tensor,同时用 ``value`` 中提供的常量初始化该Tensor。 + +创建的Tensor的stop_gradient属性默认为True。 + +参数: + - **shape** (tuple|list|Variable)- 要创建的LoDTensor或者SelectedRows的形状。 数据类型为int32或int64。 如果shape是一个列表或元组,则其元素应该是形状为[1]的整数或Tensor。 如果shape是Variable,则它应该是一维Tensor。 + - **dtype** (np.dtype|core.VarDesc.VarType|str)- 创建LoDTensor或者SelectedRows的数据类型,支持数据类型为float16, float32, float64, int32, int64。 + - **value** (float|int)- 用于初始化输出LoDTensor或者SelectedRows的常量数据的值。 + - **force_cpu** (bool)- 用于标志LoDTensor或者SelectedRows是否创建在CPU上,默认值为False,若设为true,则数据必须在CPU上。 + - **out** (Variable,可选)- 用于存储创建的LoDTensor或者SelectedRows,可以是程序中已经创建的任何Variable。默认值为None,此时将创建新的Variable来保存输出结果。 + + +返回: 根据shape和dtype创建的Tensor。 + +返回类型:变量(Variable) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') #data1=[[0],[0]] + data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1) + #data1=[[5],[5]] data2=[[5],[5]] + + # attr shape is a list which contains Variable Tensor. + positive_2 = fluid.layers.fill_constant([1], "int32", 2) + data3 = fluid.layers.fill_constant(shape=[1, positive_2], dtype='float32', value=1.5) # data3=[1.5, 1.5] + + # attr shape is a Variable Tensor. + shape = fluid.layers.fill_constant([1,2], "int32", 2) # shape=[2,2] + data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]] diff --git a/doc/paddle/api/paddle/fluid/layers/filter_by_instag_cn.rst b/doc/paddle/api/paddle/fluid/layers/filter_by_instag_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4ff343a1ef5530ca5ebc5656ec0c1b37b29a6c66 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/filter_by_instag_cn.rst @@ -0,0 +1,52 @@ +.. _cn_api_fluid_layers_filter_by_instag: + +filter_by_instag +------------------------------- + +.. py:function:: paddle.fluid.layers.filter_by_instag(ins, ins_tag, filter_tag, is_lod) + + + + +此函数通过instag来过滤ins batch,大量属于同样的tags的样本,我们可以指定我们想要的一些tags,属于这些tags的样本将会被保留在输出中,其余的将会移除。比如,一个batch有4个样本,每个样本都有自己的tag表。 + +Ins | Ins_Tag | + +|:—–:|:——:| + +| 0 | 0, 1 | + +| 1 | 1, 3 | + +| 2 | 0, 3 | + +| 3 | 2, 6 | + +Lod为[1,1,1,1],filter tags为[1],从上面的定义中,带有标签[1]的样本将会通过filter,所以,样本0和1将会通过并且出现在输出中。准确来说,如果 ``is_lod`` 为false,它是一个等于值全为1的lod_tensor的普通的tensor,和上面的例子很相似。 + +参数: + - **ins** (Variable) - 输入变量(LoDTensor),通常为2D向量,第一个维度可以有lod info,也可以没有。 + - **ins_tag** (Variable) - 输入变量(LoDTensor),通常为1维列表,通过lod info来分割。 + - **filter_tag** (Variable) - 输入变量(1D Tensor/List),通常为持有tags的列表。 + - **is_lod** (Bool) – 指定样本是否为lod tensor的布尔值。 + - **out_val_if_empty** (Int64) - 如果batch内样本被全部过滤,输出会被指定成这个值。 + +返回:过滤之后的样本(LoDTensor)和 损失权重(Tensor)。 + +返回类型:变量(Variable) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid.layers as layers + ins = layers.data(name='Ins', shape=[-1,32], lod_level=0, dtype='float64') + ins_tag = layers.data(name='Ins_tag', shape=[-1,16], lod_level=0, dtype='int64') + filter_tag = layers.data(name='Filter_tag', shape=[-1,16], dtype='int64') + out, loss_weight = layers.filter_by_instag(ins, ins_tag, filter_tag, True) + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/flatten_cn.rst b/doc/paddle/api/paddle/fluid/layers/flatten_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ef54e3c6f84734f40570d9b8f3d1d60dd431cf82 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/flatten_cn.rst @@ -0,0 +1,59 @@ +.. _cn_api_fluid_layers_flatten: + +flatten +------------------------------- + +.. py:function:: paddle.fluid.layers.flatten(x, axis=1, name=None) + + + + +flatten op将输入的多维Tensor展平成2-D Tensor矩阵 + +例如: + +.. code-block:: text + + Case 1: + + 给定 + X.shape = (3, 100, 100, 4) + 且 + axis = 2 + 得到: + Out.shape = (3 * 100, 4 * 100) + + Case 2: + + 给定 + X.shape = (3, 100, 100, 4) + 且 + axis = 0 + 得到: + Out.shape = (1, 3 * 100 * 100 * 4) + +参数: + - **x** (Variable) - 一个维度数>=axis 的多维Tensor, 数据类型可以为float32,float64,int8,int32或int64。 + - **axis** (int) - flatten展开的分割轴,[0, axis) 轴数据被flatten到输出矩阵的0轴,[axis, R)数据被flatten到输出矩阵的1轴,其中R是输入张量的总维度数。axis的值必须在[0,R]范围内。当 axis=0 时,若输入Tensor的维度为 :math:`[d_0, d_1,… d_n]` ,则输出张量的Tensor维度为 :math:`[1,d_0 * d_1 *… d_n]` ,默认值为1。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: 一个 2-D Tensor,它包含输入Tensor的数据,但维度发生变化。输入的[0, axis)维将沿axis展平到输出Tensor的0维度,剩余的输入维数展平到输出的1维度。数据类型与输入x相同。 + +返回类型: Variable + +抛出异常: + - ValueError: 如果 x 不是一个Variable + - ValueError: 如果axis的范围不在 [0, rank(x)] 范围内 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.data(name="x", shape=[4, 4, 3], append_batch_size=False, dtype="float32") + # x shape is [4, 4, 3] + out = fluid.layers.flatten(x=x, axis=2) + # out shape is [16, 3] + + + diff --git a/doc/paddle/api/paddle/fluid/layers/floor_cn.rst b/doc/paddle/api/paddle/fluid/layers/floor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8a2f6cb928fde626dd92ee1e739603f7003b2d91 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/floor_cn.rst @@ -0,0 +1,32 @@ +.. _cn_api_fluid_layers_floor: + +floor +------------------------------- + +.. py:function:: paddle.fluid.layers.floor(x, name=None) + + + + +向下取整函数。 + +.. math:: + out = \left \lfloor x \right \rfloor + +参数: + - **x** - 该OP的输入为多维Tensor。数据类型必须为float32或float64。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:输出为Tensor,与 ``x`` 维度相同、数据类型相同。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + data1 = fluid.layers.fill_constant(shape=[3, 2], value=2.5, dtype='float32') # [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]] + data2 = fluid.layers.fill_constant(shape=[2, 3], value=-2.5, dtype='float64') # [[-2.5, -2.5, -2.5], [-2.5, -2.5, -2.5]] + result1 = fluid.layers.floor(data1) # [[2., 2.], [2., 2.], [2., 2.]] + result2 = fluid.layers.floor(data2) # [[-3., -3., -3.], [-3., -3., -3.]] diff --git a/doc/paddle/api/paddle/fluid/layers/fsp_matrix_cn.rst b/doc/paddle/api/paddle/fluid/layers/fsp_matrix_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1fafa4800c82ea7644ff3f3953f1e02f21197b45 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/fsp_matrix_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_fluid_layers_fsp_matrix: + +fsp_matrix +------------------------------- + +.. py:function:: paddle.fluid.layers.fsp_matrix(x, y) + + + + +**FSP matrix op** + +fsp_matrix op用于计算两个4-D Tensor特征图的求解过程(FSP)矩阵。假设特征图x的形状为 :math:`[x\_channel,h,w]` ,特征图y的形状为 :math:`[y\_channel,h,w]` ,fsp_matrix op分两步得到x和y的fsp矩阵: + +1.将x reshape到形状为 :math:`[x\_channel,h*w]` 的矩阵,将y reshape到形状为 :math:`[h*w,y\_channel]` 的矩阵。 + +2.对x和y做矩阵乘法得到形状为 :math:`[x\_channel,y\_channel]` 的fsp矩阵。 + +输出是一个batch的fsp矩阵。 + +参数: + - **x** (Variable): 一个形状为 :math:`[batch\_size, x\_channel, height, width]` 的 4-D 特征图Tensor, 数据类型为float32或float64。 + - **y** (Variable):一个形状为 :math:`[batch\_size, y\_channel, height, width]` 的 4-D 特征图Tensor, 数据类型为float32或float64。y_channel可以与输入(x)的x_channel不同,而其他维度必须与输入(x)相同。 + +返回:一个形状为 :math:`[batch\_size, x\_channel, y\_channel]` 的fsp矩阵, 是一个 3-D Tensor,数据类型与输入数据类型一致。其中,x_channel是输入x的通道数,y_channel是输入y的通道数。数据类型为float32或float64。 + +返回类型:Variable + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.data(name='data', shape=[3, 32, 32]) + feature_map_0 = fluid.layers.conv2d(data, num_filters=2, + filter_size=3) + feature_map_1 = fluid.layers.conv2d(feature_map_0, num_filters=2, + filter_size=1) + loss = fluid.layers.fsp_matrix(feature_map_0, feature_map_1) + diff --git a/doc/paddle/api/paddle/fluid/layers/gather_cn.rst b/doc/paddle/api/paddle/fluid/layers/gather_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9d0caee7b263b87387fcffe9fc24861b8e3950f3 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/gather_cn.rst @@ -0,0 +1,55 @@ +.. _cn_api_fluid_layers_gather: + +gather +------------------------------- + +.. py:function:: paddle.fluid.layers.gather(input, index, overwrite=True) + + + + +根据索引 ``index`` 获取输入 ``input`` 的最外层维度的条目,并将它们拼接在一起。 + +.. math:: + + Out=X[Index] + +.. code-block:: text + + X = [[1, 2], + [3, 4], + [5, 6]] + + Index = [1, 2] + + Then: + + Out = [[3, 4], + [5, 6]] + + +参数: + - **input** (Tensor) - 输入, 秩 ``rank >= 1`` , 支持的数据类型包括 int32、int64、float32、float64 和 uint8 (CPU)、float16(GPU) 。 + - **index** (Tensor) - 索引,秩 ``rank = 1``, 数据类型为 int32 或 int64。 + - **overwrite** (bool) - 具有相同索引时在反向更新梯度的模式。如果为 ``True`` ,则使用覆盖模式更新相同索引的梯度;如果为 ``False`` ,则使用累积模式更新相同索引的梯度。默认值为 ``True`` 。 + +返回:和输入的秩相同的输出张量。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.data(name='x', shape=[-1, 5], dtype='float32') + index = fluid.layers.data(name='index', shape=[-1, 1], dtype='int32') + output = fluid.layers.gather(x, index) + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/gather_nd_cn.rst b/doc/paddle/api/paddle/fluid/layers/gather_nd_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8a570ff2e2840215d6d84712e244c192a2344d22 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/gather_nd_cn.rst @@ -0,0 +1,74 @@ +.. _cn_api_fluid_layers_gather_nd: + +gather_nd +------------------------------- + +.. py:function:: paddle.fluid.layers.gather_nd(input, index, name=None) + + +该OP是 :code:`gather` 的高维推广,并且支持多轴同时索引。 :code:`index` 是一个K维度的张量,它可以认为是从 :code:`input` 中取K-1维张量,每一个元素是一个切片: + +.. math:: + output[(i_0, ..., i_{K-2})] = input[index[(i_0, ..., i_{K-2})]] + +显然, :code:`index.shape[-1] <= input.rank` 并且输出张量的维度是 :code:`index.shape[:-1] + input.shape[index.shape[-1]:]` 。 + +示例: + +:: + + 给定: + input = [[[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]], + [[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]]] + input.shape = (2, 3, 4) + + - 案例 1: + index = [[1]] + + gather_nd(input, index) + = [input[1, :, :]] + = [[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]] + + - 案例 2: + + index = [[0,2]] + gather_nd(input, index) + = [input[0, 2, :]] + = [8, 9, 10, 11] + + - 案例 3: + + index = [[1, 2, 3]] + gather_nd(input, index) + = [input[1, 2, 3]] + = [23] + + +参数: + - **input** (Tensor) - 输入Tensor,数据类型可以是int32,int64,float32,float64, bool。 + - **index** (Tensor) - 输入的索引Tensor,其数据类型为int32或者int64。它的维度 :code:`index.rank` 必须大于1,并且 :code:`index.shape[-1] <= input.rank` 。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:shape为index.shape[:-1] + input.shape[index.shape[-1]:]的Tensor|LoDTensor,数据类型与 :code:`input` 一致。 + + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + x = fluid.layers.data(name='x', shape=[3, 4, 5], dtype='float32') + index = fluid.layers.data(name='index', shape=[2, 2], dtype='int32') + output = fluid.layers.gather_nd(x, index) + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/gather_tree_cn.rst b/doc/paddle/api/paddle/fluid/layers/gather_tree_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1d1d4629b3b567c09b9b61a4794451cd8180fa74 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/gather_tree_cn.rst @@ -0,0 +1,68 @@ +.. _cn_api_fluid_layers_gather_tree: + +gather_tree +------------------------------- + +.. py:function:: paddle.fluid.layers.gather_tree(ids, parents) + + + + +该OP在整个束搜索(Beam Search)结束后使用。在搜索结束后,可以获得每个时间步选择的的候选词id及其对应的在搜索树中的parent节点, ``ids`` 和 ``parents`` 的形状布局均为 :math:`[max\_time, batch\_size, beam\_size]` ,该OP从最后一个时间步回溯产生完整的id序列。 + + +示例: + +:: + + 给定: + ids = [[[2 2] + [6 1]] + [[3 9] + [6 1]] + [[0 1] + [9 0]]] + parents = [[[0 0] + [1 1]] + [[1 0] + [1 0]] + [[0 0] + [0 1]]] + + 结果: + gather_tree(ids, parents) + = [[[2 2] + [1 6]] + [[3 3] + [6 1]] + [[0 1] + [9 0]]] + + + +参数: + - **ids** (Variable) - 形状为 :math:`[length, batch\_size, beam\_size]` 的三维Tensor,数据类型是int32或int64。包含了所有时间步选择的id。 + - **parents** (Variable) - 形状和数据类型均与 ``ids`` 相同的Tensor。包含了束搜索中每一时间步所选id对应的parent。 + +返回:和 ``ids`` 具有相同形状和数据类型的Tensor。包含了根据parent回溯而收集产生的完整id序列。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + + ids = fluid.data(name='ids', + shape=[5, 2, 2], + dtype='int64') + parents = fluid.data(name='parents', + shape=[5, 2, 2], + dtype='int64') + final_sequences = fluid.layers.gather_tree(ids, parents) + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/gaussian_random_cn.rst b/doc/paddle/api/paddle/fluid/layers/gaussian_random_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..059f19be02e3982a43bcec9a3ccbcd25e9bda5fd --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/gaussian_random_cn.rst @@ -0,0 +1,70 @@ +.. _cn_api_fluid_layers_gaussian_random: + +gaussian_random +------------------------------- + +.. py:function:: paddle.fluid.layers.gaussian_random(shape, mean=0.0, std=1.0, seed=0, dtype='float32', name=None) + + + + +该OP返回数值符合高斯随机分布的Tensor,形状为 ``shape``,数据类型为 ``dtype``。 + +参数: + - **shape** (list|tuple|Tensor) - 生成的随机Tensor的形状。如果 ``shape`` 是list、tuple,则其中的元素可以是int,或者是形状为[1]且数据类型为int32、int64的Tensor。如果 ``shape`` 是Tensor,则是数据类型为int32、int64的1-D Tensor。 + - **mean** (float|int, 可选) - 输出Tensor的均值,支持的数据类型:float、int。默认值为0.0。 + - **std** (float|int, 可选) - 输出Tensor的标准差,支持的数据类型:float、int。默认值为1.0。 + - **seed** (int, 可选) - 随机数种子,默认值为 0。注:seed 设置为 0 表示使用系统的随机数种子。注意如果 seed 不为 0,则此算子每次将始终生成相同的随机数。 + - **dtype** (str|np.dtype|core.VarDesc.VarType, 可选) - 输出Tensor的数据类型,支持float32、float64。默认值为float32。 + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回: + Tensor:符合高斯随机分布的Tensor,形状为 ``shape``,数据类型为 ``dtype``。 + +抛出异常: + - ``TypeError`` - 如果 ``shape`` 的类型不是list、tuple、Tensor。 + - ``TypeError`` - 如果 ``dtype`` 不是float32、float64。 + +**代码示例**: + +.. code-block:: python + + # 静态图使用 + import numpy as np + from paddle import fluid + + x = fluid.layers.gaussian_random((2, 3), std=2., seed=10) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + start = fluid.default_startup_program() + main = fluid.default_main_program() + + exe.run(start) + x_np, = exe.run(main, feed={}, fetch_list=[x]) + + x_np + # array([[2.3060477, 2.676496 , 3.9911983], + # [0.9990833, 2.8675377, 2.2279181]], dtype=float32) + + +.. code-block:: python + + # 动态图使用 + import numpy as np + from paddle import fluid + import paddle.fluid.dygraph as dg + + place = fluid.CPUPlace() + with dg.guard(place) as g: + x = fluid.layers.gaussian_random((2, 4), mean=2., dtype="float32", seed=10) + x_np = x.numpy() + x_np + # array([[2.3060477 , 2.676496 , 3.9911983 , 0.9990833 ], + # [2.8675377 , 2.2279181 , 0.79029655, 2.8447366 ]], dtype=float32) + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/gelu_cn.rst b/doc/paddle/api/paddle/fluid/layers/gelu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9541348c8dbf6fa5ce71f79dad2c052ddb0da37b --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/gelu_cn.rst @@ -0,0 +1,79 @@ +.. _cn_api_fluid_layers_gelu: + +gelu +------------------------------- + +.. py:function:: paddle.fluid.layers.gelu(x) + + + + +逐元素计算 Gelu激活函数。更多细节请参考 `Gaussian Error Linear Units `_ 。 + +如果使用近似计算: + +.. math:: + out = 0.5 * x * (1 + tanh(\sqrt{\frac{2}{\pi}} * (x + 0.044715x^{3}))) + +如果不使用近似计算: + +.. math:: + out = 0.5 * x * (1 + erf(\frac{x}{\sqrt{2}})) + +参数: + - **x** (Variable) - Gelu Op 的输入,多维 Tensor 或 LoDTensor,数据类型为 float32 或 float64。 + - **approximate** (bool, 可选) - 是否使用近似计算,默认值为 False。 + +返回: + - 多维 Tensor 或 LoDTensor, 数据类型为 float32 或 float64, 和输入 x 的数据类型相同,形状和输入 x 相同。 + +返回类型: + - Variable + +**代码示例**: + +.. code-block:: python + + # declarative mode + import numpy as np + from paddle import fluid + + x = fluid.data(name="x", shape=(-1, 3), dtype="float32") + y = fluid.layers.gelu(x) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + start = fluid.default_startup_program() + main = fluid.default_main_program() + + data = np.random.randn(2, 3).astype("float32") + exe.run(start) + + y_np, = exe.run(main, feed={"x": data}, fetch_list=[y]) + + data + # array([[ 0.87165993, -1.0541513 , -0.37214822], + # [ 0.15647964, 0.32496083, 0.33045998]], dtype=float32) + y_np + # array([[ 0.70456535, -0.15380788, -0.13207214], + # [ 0.08796856, 0.20387867, 0.2080159 ]], dtype=float32) + +.. code-block:: python + + # imperative mode + import numpy as np + from paddle import fluid + import paddle.fluid.dygraph as dg + + data = np.random.randn(2, 3).astype("float32") + place = fluid.CPUPlace() + with dg.guard(place) as g: + x = dg.to_variable(data) + y = fluid.layers.gelu(x) + y_np = y.numpy() + data + # array([[ 0.87165993, -1.0541513 , -0.37214822], + # [ 0.15647964, 0.32496083, 0.33045998]], dtype=float32) + y_np + # array([[ 0.70456535, -0.15380788, -0.13207214], + # [ 0.08796856, 0.20387867, 0.2080159 ]], dtype=float32) diff --git a/doc/paddle/api/paddle/fluid/layers/generate_mask_labels_cn.rst b/doc/paddle/api/paddle/fluid/layers/generate_mask_labels_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cba56f7c897f92a623980b8b5c06b0b95df1c25d --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/generate_mask_labels_cn.rst @@ -0,0 +1,90 @@ +.. _cn_api_fluid_layers_generate_mask_labels: + +generate_mask_labels +------------------------------- + +.. py:function:: paddle.fluid.layers.generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois, labels_int32, num_classes, resolution) + + + + +**为Mask-RCNN生成mask标签** + +对于给定的 RoI (Regions of Interest) 和 输入ground truth的分类标签和分割的坐标标签,采样出前景RoI,并返回其在输入 ``rois`` 中索引位置,并对每个RoI生成 :math:`K*M^{2}` 的二值mask标签。K为类别个数,M是RoI特征图大小。这些输出目标一般用于计算mask分支的损失。 + +请注意分割groud-truth(真实标签,下简称GT)数据格式,这里要求分割标签为坐标信息,假如,第一个实例有两个GT对象。 第二个实例有一个GT对象,该GT分割标签是两段(例如物体中间被隔开),输入标签格式组织如下: + + +:: + + #[ + # [[[229.14, 370.9, 229.14, 370.9, ...]], + # [[343.7, 139.85, 349.01, 138.46, ...]]], # 第0个实例对象 + # [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 第1个实例对象 + #] + + batch_masks = [] + for semgs in batch_semgs: + gt_masks = [] + for semg in semgs: + gt_segm = [] + for polys in semg: + gt_segm.append(np.array(polys).reshape(-1, 2)) + gt_masks.append(gt_segm) + batch_masks.append(gt_masks) + + + place = fluid.CPUPlace() + feeder = fluid.DataFeeder(place=place, feed_list=feeds) + feeder.feed(batch_masks) + + +参数: + - **im_info** (Variable) – 维度为[N,3]的2-D Tensor,数据类型为float32。 N是batch size,其每个元素是图像的高度、宽度、比例,比例是图片预处理时resize之后的大小和原始大小的比例 :math:`\frac{target\_size}{original\_size}` 。 + - **gt_classes** (Variable) – 维度为[M,1]的2-D LoDTensor,数据类型为int32,LoD层数为1。 M是的groud-truth总数,其每个元素是类别索引。 + - **is_crowd** (Variable) – 维度和 ``gt_classes`` 相同的 LoDTensor,数据类型为int32,每个元素指示一个ground-truth是否为crowd(crowd表示一组对象的集合)。 + - **gt_segms** (Variable) – 维度为[S,2]的2D LoDTensor,它的LoD层数为3,数据类型为float32。通常用户不需要理解LoD,但用户应该在Reader中返回正确的数据格式。LoD[0]表示每个实例中GT对象的数目。 LoD[1]表示每个GT对象的标签分段数。LoD[2]表示每个分段标签多边形(polygon)坐标点的个数。S为所有示例的标签的多边形坐标点的总数。每个元素是(x,y)坐标点。 + - **rois** (Variable) – 维度维度[R,4]的2-D LoDTensor,LoD层数为1,数据类型为float32。 R是RoI的总数,其中每个元素是在原始图像范围内具有(xmin,ymin,xmax,ymax)格式的bounding box。 + - **labels_int32** (Variable) – 形为[R,1]且类型为int32的2-D LoDTensor,数据类型为int32。 R与 ``rois`` 中的R含义相同。每个元素表示RoI框的一个类别标签索引。 + - **num_classes** (int) – 类别数目。 + - **resolution** (int) – 特征图分辨率大小。 + +返回: + - mask_rois (Variable): 维度为[P,4]的2-D LoDTensor,数据类型为float32。P是采样出的RoI总数,每个元素都是在原始图像大小范围内具有[xmin,ymin,xmax,ymax]格式的bounding box。 + - mask_rois_has_mask_int32(Variable):维度为[P,1]的2-D LoDTensor,数据类型为int32。每个元素表示采样出的RoI在输入 ``rois`` 内的位置索引。 + - mask_int32(Variable):维度为[P,K * M * M]的2-D LoDTensor,数据类型为int32。K为种类数,M为特征图的分辨率大小,每个元素都是二值mask标签。 + +返回类型:tuple(Variable) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + + im_info = fluid.data(name="im_info", shape=[None, 3], dtype="float32") + gt_classes = fluid.data(name="gt_classes", shape=[None, 1], + dtype="float32", lod_level=1) + is_crowd = fluid.data(name="is_crowd", shape=[None, 1], + dtype="float32", lod_level=1) + gt_masks = fluid.data(name="gt_masks", shape=[None, 2], + dtype="float32", lod_level=3) + # rois, roi_labels 可以是fluid.layers.generate_proposal_labels的输出 + rois = fluid.data(name="rois", shape=[None, 4], + dtype="float32", lod_level=1) + roi_labels = fluid.data(name="roi_labels", shape=[None, 1], + dtype="int32", lod_level=1) + mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels( + im_info=im_info, + gt_classes=gt_classes, + is_crowd=is_crowd, + gt_segms=gt_masks, + rois=rois, + labels_int32=roi_labels, + num_classes=81, + resolution=14) + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/generate_proposal_labels_cn.rst b/doc/paddle/api/paddle/fluid/layers/generate_proposal_labels_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..588f4df95a4670e089063f7a080ecd9a7a156b06 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/generate_proposal_labels_cn.rst @@ -0,0 +1,73 @@ +.. _cn_api_fluid_layers_generate_proposal_labels: + +generate_proposal_labels +------------------------------- + +.. py:function:: paddle.fluid.layers.generate_proposal_labels(rpn_rois, gt_classes, is_crowd, gt_boxes, im_info, batch_size_per_im=256, fg_fraction=0.25, fg_thresh=0.25, bg_thresh_hi=0.5, bg_thresh_lo=0.0, bbox_reg_weights=[0.1, 0.1, 0.2, 0.2], class_nums=None, use_random=True, is_cls_agnostic=False, is_cascade_rcnn=False) + + + + +**注意:该OP无对应的反向OP** + +该OP根据RPN预测产出的bounding boxes和groundtruth,抽取出用来计算loss的foreground boxes and background boxes。 + +RPN的输出经过 ``generate_proposals OP`` 的处理,产出 ``RPN RoIs`` ,即该OP的输入。然后,在该OP中按以下规则对 ``RPN RoIs`` 进行分类: + +- 与某个groundtruth的重叠面积大于 ``fg_thresh`` ,则该box被标记为foreground box。 +- 与某个groundtruth的重叠面积大于 ``bg_thresh_lo`` 且小于 ``bg_thresh_hi`` ,则该box被标记为background box。 + +按上述规则筛选出一批boxes后,在对这些boxes做随机采样,以保证foreground boxes的数量不高于batch_size_per_im * fg_fraction。 + +对最终得到的boxes, 我们给它们分配类别标签和回归目标(box label),并产出 ``bboxInsideWeights`` 和 ``BboxOutsideWeights`` 。 + +参数: + - **rpn_rois** (Variable) – Shape为 ``[N, 4]`` 的2-D LoDTensor。 其中,N为RoIs的个数。每个RoI以 :math:`[x_{min}, y_{min}, x_{max}, y_{max}]` 的格式表示,其中,:math:`x_{min}` 和 :math:`y_{min}` 为RoI的左上角坐标,:math:`x_{max}` 和 :math:`y_{max}` 为RoI的右下角坐标。数据类型支持float32和float64。 + - **gt_classes** (Variable) – Shape为 ``[M, 1]`` 的2-D LoDTensor,M为groundtruth boxes的数量。用于表示groundtruth boxes的类别ID。数据类型支持int32。 + - **is_crowd** (Variable) –Shape为 ``[M, 1]`` 的2-D LoDTensor,M为groundtruth boxes的数量。用于标记boxes是否是crowd。数据类型支持int32。 + - **gt_boxes** (Variable) – Shape为 ``[M, 4]`` 的2-D LoDTensor,M为groundtruth boxes的数量。每个box以 :math:`[x_{min}, y_{min}, x_{max}, y_{max}]` 的格式表示。 + - **im_info** (Variable) - Shape为 ``[N,3]`` 的2-D张量,表示原始图像的大小信息。信息包含原始图像宽、高和 ``feature map`` 相对于原始图像缩放的比例。 + - **batch_size_per_im** (int,可选) – 整型数字。每张图片抽取出的的RoIs的数目。数据类型支持int32。缺省值为256。 + - **fg_fraction** (float,可选) – 浮点数值。在单张图片中,foreground boxes占所有boxes的比例。数据类型支持float32。缺省值为0.25。 + - **fg_thresh** (float,可选) – 浮点数值。foreground重叠阀值,用于筛选foreground boxes。数据类型支持float32。缺省值为0.25。 + - **bg_thresh_hi** (float,可选) – 浮点数值。background重叠阀值的上界,用于筛选background boxes。数据类型支持float32。缺省值为0.5。 + - **bg_thresh_lo** (float,可选) – 浮点数值。background重叠阀值的下界,用于筛选background boxes。数据类型支持float32。缺省值为0.0。 + - **bbox_reg_weights** (list|tuple,可选) – 列表或元组。Box 回归权重。数据类型支持float32。缺省值为[0.1,0.1,0.2,0.2]。 + - **class_nums** (int,可选) – 整型数字。类别数目。数据类型支持int32。缺省值为None。 + - **use_random** (bool,可选) – 布尔类型。是否使用随机采样来选择foreground boxes和background boxes。缺省值为True。 + - **is_cls_agnostic** (bool,可选)- 布尔类型。是否忽略类别,只做位置回归。缺省值为False。 + - **is_cascade_rcnn** (bool,可选)- 布尔类型。是否为 cascade RCNN 模型,为True时采样策略发生变化。缺省值为False。 + + +返回:元组,格式为 ``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights)`` ,其中,各个元素解释如下: + +- **rois** - Shape为 ``[P, 4]`` 的2-D LoDTensor,P一般是 ``batch_size_per_im * batch_size`` 。每个RoIs以 ``[xmin, ymin, xmax, ymax]`` 的格式表示。数据类型与 ``rpn_rois`` 一致。 +- **labels_int32** - Shape为 ``[P, 1]`` 的2-D LoDTensor,P一般是 ``batch_size_per_im * batch_size`` 。表示每个RoI的类别ID。数据类型为int32。 +- **bbox_targets** - Shape为 ``[P, 4 * class_num]`` 的2-D LoDTensor,表示所有RoIs的回归目标(box label)。数据类型与 ``rpn_rois`` 一致。 +- **bbox_inside_weights** - Shape为 ``[P, 4 * class_num]`` 的2-D LoDTensor。foreground boxes回归loss的权重。数据类型与 ``rpn_rois`` 一致。 +- **bbox_outside_weights** - Shape为 ``[P, 4 * class_num]`` 的2-D LoDTensor。回归loss的权重。数据类型与 ``rpn_rois`` 一致。 + +返回类型:元组 + + + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + rpn_rois = fluid.layers.data(name='rpn_rois', shape=[2, 4], + append_batch_size=False, dtype='float32') + gt_classes = fluid.layers.data(name='gt_classes', shape=[8, 1], + append_batch_size=False, dtype='float32') + is_crowd = fluid.layers.data(name='is_crowd', shape=[8, 1], + append_batch_size=False, dtype='float32') + gt_boxes = fluid.layers.data(name='gt_boxes', shape=[8, 4], + append_batch_size=False, dtype='float32') + im_info = fluid.layers.data(name='im_info', shape=[10, 3], + append_batch_size=False, dtype='float32') + rois, labels, bbox, inside_weights, + outside_weights = fluid.layers.generate_proposal_labels( + rpn_rois, gt_classes, is_crowd, gt_boxes, im_info, + class_nums=10) + diff --git a/doc/paddle/api/paddle/fluid/layers/generate_proposals_cn.rst b/doc/paddle/api/paddle/fluid/layers/generate_proposals_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d1e40bf64f406de680a062da145218e4a8fcbea9 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/generate_proposals_cn.rst @@ -0,0 +1,64 @@ +.. _cn_api_fluid_layers_generate_proposals: + +generate_proposals +------------------------------- + +.. py:function:: paddle.fluid.layers.generate_proposals(scores, bbox_deltas, im_info, anchors, variances, pre_nms_top_n=6000, post_nms_top_n=1000, nms_thresh=0.5, min_size=0.1, eta=1.0, name=None) + + + + + +该OP根据每个检测框为foreground对象的概率,推选生成用于后续检测网络的RoIs。 +其中的检测框根据 ``anchors`` 和 ``bbox_deltas`` 计算得到。 + + +该OP通过以下步骤生成 ``RoIs`` : + + 1、通过转置操作将 ``scores`` 和 ``bbox_deltas`` 的大小分别调整为 ``(H * W * A,1)`` 和 ``(H * W * A,4)`` 。 + + 2、根据 ``anchors`` 和 ``bbox_deltas`` 计算出候选框的位置。 + + 3、Clip boxes to image。 + + 4、删除面积较小的候选框。 + + 5、通过NMS选出满足条件的候选框作为结果。 + +参数: + - **scores** (Variable) - Shape为 ``[N,A,H,W]`` 的4-D Tensor,表示每个框包含object的概率。N是批量大小,A是anchor数,H和W是feature map的高度和宽度。数据类型支持float32。 + - **bbox_deltas** (Variable)- Shape为 ``[N,4 * A,H,W]`` 的4-D Tensor,表示预测出的候选框的位置和anchor的位置之间的距离。数据类型支持float32。 + - **im_info** (Variable) - Shape为 ``[N,3]`` 的2-D张量,表示原始图像的大小信息。信息包含原始图像宽、高和feature map相对于原始图像缩放的比例。 + - **anchors** (Variable) - Shape为 ``[H,W,A,4]`` 的4-D Tensor。H和W是feature map的高度和宽度。A是每个位置的框的数量。每个anchor以 ``(xmin,ymin,xmax,ymax)`` 的格式表示,其中, ``xmin`` 和 ``ymin`` 为左上角的坐标, ``xmax`` 和 ``ymax`` 为右下角的坐标。 + - **variances** (Variable) - Shape为 ``[H,W,A,4]`` 的4-D Tensor,表示 ``anchors`` 的方差。每个anchor的方差都是 ``(xcenter,ycenter,w,h)`` 的格式表示。 + - **pre_nms_top_n** (int,可选) - 整型数字。每张图在NMS操作之前要保留的总框数。数据类型仅支持int32。缺省值为6000。 + - **post_nms_top_n** (int,可选) - 整型数字。每个图在NMS后要保留的总框数。数据类型仅支持int32。缺省值为1000。 + - **nms_thresh** (float,可选) - 浮点型数字。NMS中的阈值。数据类型仅支持float32。缺省值为0.5。 + - **min_size** (float,可选) - 浮点型数字。根据宽和高过滤候选框的阈值,宽或高小于该阈值的候选框将被过滤掉。数据类型仅支持float32。缺省值为0.1。 + - **eta** (float,可选) - 浮点型数字。自适应阈值的衰减系数。仅在自适应NMS中且自适应阈值大于0.5时生效,在每次迭代中 ``adaptive_threshold = adaptive_treshold * eta`` 。缺省值为1.0。 + + +返回: 元组,格式为 ``(rpn_rois, rpn_roi_probs)`` + +- **rpn_rois** (Variable) - 表示产出的RoIs, shape为 ``[N, 4]`` 的2D LoDTensor, N为RoIs的数量。数据类型与 ``scores`` 一致。 +- **rpn_roi_probs** (Variable) - 表示RoIs的得分,shape为 ``[N, 1]`` ,N为RoIs的数量。数据类型与 ``scores`` 一致。 + +返回类型:元组 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + scores = fluid.layers.data(name='scores', shape=[2, 4, 5, 5], + append_batch_size=False, dtype='float32') + bbox_deltas = fluid.layers.data(name='bbox_deltas', shape=[2, 16, 5, 5], + append_batch_size=False, dtype='float32') + im_info = fluid.layers.data(name='im_info', shape=[2, 3], + append_batch_size=False, dtype='float32') + anchors = fluid.layers.data(name='anchors', shape=[5, 5, 4, 4], + append_batch_size=False, dtype='float32') + variances = fluid.layers.data(name='variances', shape=[5, 5, 10, 4], + append_batch_size=False, dtype='float32') + rois, roi_probs = fluid.layers.generate_proposals(scores, bbox_deltas, + im_info, anchors, variances) diff --git a/doc/paddle/api/paddle/fluid/layers/get_tensor_from_selected_rows_cn.rst b/doc/paddle/api/paddle/fluid/layers/get_tensor_from_selected_rows_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3a9b9268aa345959d50b69667659b77203777a7c --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/get_tensor_from_selected_rows_cn.rst @@ -0,0 +1,56 @@ +.. _cn_api_fluid_layers_get_tensor_from_selected_rows: + +get_tensor_from_selected_rows +------------------------------- + +.. py:function:: paddle.fluid.layers.get_tensor_from_selected_rows(x, name=None) + + + + +该OP从SelectedRows类型的输入中获取向量数据,以LoDTensor的形式输出。 + + +:: + + 例如: + + 输入为SelectedRows类型: + x.rows = [0, 5, 5, 4, 19] + x.height = 20 + x.value = [[1, 1] [2, 2] [2, 2] [3, 3] [6, 6]] + + 输出为LoDTensor: + out.shape = [5, 2] + out.data = [[1, 1], + [2, 2], + [2, 2], + [3, 3], + [6, 6]] + + +参数: + - **x** (SelectedRows) - SelectedRows类型的输入,数据类型为float32,float64,int32或int64。 + - **name** (str) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 + +返回: 从SelectedRows中转化而来的LoDTensor,数据类型和输入一致。 + +返回类型: Variable + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + b = fluid.default_main_program().global_block() + input = b.create_var(name="X", dtype="float32", persistable=True, type=fluid.core.VarDesc.VarType.SELECTED_ROWS) + out = fluid.layers.get_tensor_from_selected_rows(input) + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/greater_equal_cn.rst b/doc/paddle/api/paddle/fluid/layers/greater_equal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..46b70775cdb7f478f694f79795d0f6eb46bcb062 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/greater_equal_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_fluid_layers_greater_equal: + +greater_equal +------------------------------- + +.. py:function:: paddle.fluid.layers.greater_equal(x, y, cond=None, name=None) + + + + +该OP逐元素地返回 :math:`x >= y` 的逻辑值,使用重载算子 `>=` 可以有相同的计算函数效果。 + + +参数: + - **x** (Variable) – 进行比较的第一个输入,是一个多维的Tensor,数据类型可以是float32,float64,int32,int64。 + - **y** (Variable) – 进行比较的第二个输入,是一个多维的Tensor,数据类型可以是float32,float64,int32,int64。 + - **cond** (Variable,可选) – 如果为None,则创建一个Tensor来作为进行比较的输出结果,该Tensor的shape,数据类型和输入x一致;如果不为None,则将Tensor作为该OP的输出,数据shape和数据类型需要和输入x一致。默认值为None。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:输出结果的Tensor,数据的shape和输入x一致。 + +返回类型:Variable,数据类型为bool类型。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + import numpy as np + label = layers.assign(np.array([2, 2], dtype='int32')) + limit = layers.assign(np.array([2, 3], dtype='int32')) + out = fluid.layers.greater_equal(x=label, y=limit) #out=[True, False] + out_1 = label >= limit #out1=[True, False] + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/greater_than_cn.rst b/doc/paddle/api/paddle/fluid/layers/greater_than_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2cdda692360290f4d491bb267db78a96dbb4a7dd --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/greater_than_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_fluid_layers_greater_than: + +greater_than +------------------------------- + +.. py:function:: paddle.fluid.layers.greater_than(x, y, cond=None, name=None) + + + + +该OP逐元素地返回 :math:`x > y` 的逻辑值,使用重载算子 `>` 可以有相同的计算函数效果。 + +参数: + - **x** (Variable) – 进行比较的第一个输入,是一个多维的Tensor,数据类型可以是float32,float64,int32,int64。 + - **y** (Variable) – 进行比较的第二个输入,是一个多维的Tensor,数据类型可以是float32,float64,int32,int64。 + - **cond** (Variable,可选) – 如果为None,则创建一个Tensor来作为进行比较的输出结果,该Tensor的shape和数据类型和输入x一致;如果不为None,则将Tensor作为该OP的输出,数据类型和数据shape需要和输入x一致。默认值为None。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:输出结果的Tensor,数据的shape和输入x一致。 + +返回类型:Variable,数据类型为bool类型。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + import numpy as np + label = layers.assign(np.array([2, 3], dtype='int32')) + limit = layers.assign(np.array([3, 2], dtype='int32')) + out = fluid.layers.greater_than(x=label, y=limit) #out=[False, True] + out1 = label > limit #out1=[False, True] + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/grid_sampler_cn.rst b/doc/paddle/api/paddle/fluid/layers/grid_sampler_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..56299e01f7e6d77a37ae9dee3921fcc5821c59a9 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/grid_sampler_cn.rst @@ -0,0 +1,84 @@ +.. _cn_api_fluid_layers_grid_sampler: + +grid_sampler +------------------------------- + +.. py:function:: paddle.fluid.layers.grid_sampler(x, grid, name=None) + + + + +该OP基于flow field网格的对输入X进行双线性插值采样。网格通常由affine_grid生成, shape为[N, H, W, 2],是shape为[N, H, W]的采样点张量的(x, y)坐标。 +其中,x坐标是对输入数据X的第四个维度(宽度维度)的索引,y坐标是第三维度(高维度)的索引,最终输出采样值为采样点的4个最接近的角点的双线性插值结果,输出张量的shape为[N, C, H, W]。 + +step 1: + + 得到(x, y)网格坐标,缩放到[0,h -1/W-1] + +.. code-block:: text + + grid_x = 0.5 * (grid[:, :, :, 0] + 1) * (W - 1) grid_y = 0.5 * (grid[:, :, :, 1] + 1) * (H - 1) + +step 2: + + 在每个[H, W]区域用网格(X, y)作为输入数据X的索引,并将双线性插值点值由4个最近的点表示。 + +.. code-block:: text + + wn ------- y_n ------- en + | | | + | d_n | + | | | + x_w --d_w-- grid--d_e-- x_e + | | | + | d_s | + | | | + ws ------- y_s ------- wn + + x_w = floor(x) // west side x coord + x_e = x_w + 1 // east side x coord + y_n = floor(y) // north side y coord + y_s = y_s + 1 // south side y coord + d_w = grid_x - x_w // distance to west side + d_e = x_e - grid_x // distance to east side + d_n = grid_y - y_n // distance to north side + d_s = y_s - grid_y // distance to south side + wn = X[:, :, y_n, x_w] // north-west point value + en = X[:, :, y_n, x_e] // north-east point value + ws = X[:, :, y_s, x_w] // south-east point value + es = X[:, :, y_s, x_w] // north-east point value + + + output = wn * d_e * d_s + en * d_w * d_s + + ws * d_e * d_n + es * d_w * d_n + +参数: + - **x** (Variable): 输入张量,维度为 :math:`[N, C, H, W]` 的4-D Tensor,N为批尺寸,C是通道数,H是特征高度,W是特征宽度, 数据类型为float32或float64。 + - **grid** (Variable): 输入网格数据张量,维度为 :math:`[N, H, W, 2]` 的4-D Tensor,N为批尺寸,C是通道数,H是特征高度,W是特征宽度, 数据类型为float32或float64。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值:None。 + +返回: Variable(Tensor): 输入X基于输入网格的双线性插值计算结果,维度为 :math:`[N, C, H, W]` 的4-D Tensor + +返回类型:变量(Variable),数据类型与 ``x`` 一致 + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + + # 一般与 affine_grid 组合使用 + x = fluid.data(name='x', shape=[None, 10, 32, 32], dtype='float32') + theta = fluid.layers.data(name='theta', shape=[2, 3], dtype='float32') + grid = fluid.layers.affine_grid(theta=theta, out_shape=[3, 10, 32, 32]) + out = fluid.layers.grid_sampler(x=x, grid=grid) + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/group_norm_cn.rst b/doc/paddle/api/paddle/fluid/layers/group_norm_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..2bd4d58fd6eb81c1e3f7aa5ec697fca2f47125de --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/group_norm_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_fluid_layers_group_norm: + +group_norm +------------------------------- + + +.. py:function:: paddle.fluid.layers.group_norm(input, groups, epsilon=1e-05, param_attr=None, bias_attr=None, act=None, data_layout='NCHW', name=None) + + + + +参考论文: `Group Normalization `_ + +参数: + - **input** (Variable):输入为4-D Tensor,数据类型为float32或float64。 + - **groups** (int):从 channel 中分离出来的 group 的数目,数据类型为int32。 + - **epsilon** (float,可选):为防止方差除以零,增加一个很小的值。数据类型为float32。默认值:1e-05。 + - **param_attr** (ParamAttr|bool,可选) :指定权重参数属性的对象。若 ``param_attr`` 为bool类型,只支持为False,表示没有权重参数。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选) : 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **act** (str,可选):将激活应用于输出的 group normalizaiton。 + - **data_layout** (str,可选):指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **name** (str,可选):具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:4-D Tensor,数据类型和格式与 `input` 一致。 + +返回类型:Variable + +抛出异常: + - ``ValueError`` - 如果 ``data_layout`` 既不是"NCHW"也不是"NHWC"。 + - ``ValueError`` - 如果 ``groups`` 小于1,或者 ``groups`` 大于输入的通道数。 + - ``ShapeError`` - 如果 ``param_attr`` (Scale) 或者 ``bias_attr`` (Bias) 不是 1-D Tensor。 + - ``ShapeError`` - 如果 ``param_attr`` (Scale) 或者 ``bias_attr`` (Bias) 的大小与输入的通道数不相等。 + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.data(name='data', shape=[None, 8, 32, 32], dtype='float32') + x = fluid.layers.group_norm(input=data, groups=4) + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/gru_unit_cn.rst b/doc/paddle/api/paddle/fluid/layers/gru_unit_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..610d51cbb3bc55bef186f1ca26f1d6bc76b7247f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/gru_unit_cn.rst @@ -0,0 +1,85 @@ +.. _cn_api_fluid_layers_gru_unit: + +gru_unit +------------------------------- + + +.. py:function:: paddle.fluid.layers.gru_unit(input, hidden, size, param_attr=None, bias_attr=None, activation='tanh', gate_activation='sigmoid', origin_mode=False) + + + + +Gated Recurrent Unit(GRU)循环神经网络计算单元。该OP用于完成单个时间步内GRU的计算,支持以下两种计算方式: + +如果origin_mode为True,则使用的运算公式来自论文 +`Learning Phrase Representations using RNN Encoder Decoder for Statistical Machine Translation `_ 。 + +.. math:: + u_t & = act_g(W_{ux}x_{t} + W_{uh}h_{t-1} + b_u)\\ + r_t & = act_g(W_{rx}x_{t} + W_{rh}h_{t-1} + b_r)\\ + \tilde{h_t} & = act_c(W_{cx}x_{t} + W_{ch}(r_t \odot h_{t-1}) + b_c)\\ + h_t & = u_t \odot h_{t-1} + (1-u_t) \odot \tilde{h_t} + + +如果origin_mode为False,则使用的运算公式来自论文 +`Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling `_ 。 + +公式如下: + +.. math:: + u_t & = act_g(W_{ux}x_{t} + W_{uh}h_{t-1} + b_u)\\ + r_t & = act_g(W_{rx}x_{t} + W_{rh}h_{t-1} + b_r)\\ + \tilde{h_t} & = act_c(W_{cx}x_{t} + W_{ch}(r_t \odot h_{t-1}) + b_c)\\ + h_t & = (1-u_t) \odot h_{t-1} + u_t \odot \tilde{h_t} + + +其中, :math:`x_t` 为当前时间步的输入,这个输入并非 ``input``,该OP不包含 :math:`W_{ux}x_{t}, W_{rx}x_{t}, W_{cx}x_{t}` 的计算,**注意** 要在该OP前使用大小为GRU隐单元数目的3倍的全连接层并将其输出作为 ``input``; +:math:`h_{t-1}` 为前一时间步的隐状态 ``hidden``; :math:`u_t` 、 :math:`r_t` 、 :math:`\tilde{h_t}` 和 :math:`h_t` 分别代表了GRU单元中update gate(更新门)、reset gate(重置门)、candidate hidden(候选隐状态)和隐状态输出; :math:`\odot` 为逐个元素相乘; +:math:`W_{uh}, b_u` 、 :math:`W_{rh}, b_r` 和 :math:`W_{ch}, b_c` 分别代表更新门、重置门和候选隐状态在计算时使用的权重矩阵和偏置。在实现上,三个权重矩阵合并为一个 :math:`[D, D \times 3]` 形状的Tensor存放,三个偏置拼接为一个 :math:`[1, D \times 3]` 形状的Tensor存放,其中 :math:`D` 为隐单元的数目;权重Tensor存放布局为: :math:`W_{uh}` 和 :math:`W_{rh}` 拼接为 :math:`[D, D \times 2]` 形状位于前半部分,:math:`W_{ch}` 以 :math:`[D, D]` 形状位于后半部分。 + + +参数: + - **input** (Variable) – 表示经线性变换后当前时间步的输入,是形状为 :math:`[N, D \times 3]` 的二维Tensor,其中 :math:`N` 为batch_size, :math:`D` 为隐单元的数目。数据类型为float32或float64。 + - **hidden** (Variable) – 表示上一时间步产生的隐状态,是形状为 :math:`[N, D]` 的二维Tensor,其中 :math:`N` 为batch_size, :math:`D` 为隐单元的数目。数据类型与 ``input`` 相同。 + - **size** (integer) – 输入数据 ``input`` 特征维度的大小,需要是隐单元数目的3倍。 + - **param_attr** (ParamAttr,可选) – 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **activation** (string) – 公式中 :math:`act_c` 激活函数的类型。支持identity、sigmoid、tanh、relu四种激活函数类型,默认为tanh。 + - **gate_activation** (string) – 公式中 :math:`act_g` 激活函数的类型。支持identity、sigmoid、tanh、relu四种激活函数类型,默认为sigmoid。 + - **origin_mode** (bool) – 指明要使用的GRU计算方式,两种计算方式具体差异见公式描述,默认值为False。 + + +返回:Variable的三元组,包含三个与 ``input`` 相同数据类型的Tensor,分别表示下一时间步的隐状态( :math:`h_t` )、重置的前一时间步的隐状态( :math:`r_t \odot h_{t-1}` )和 :math:`h_t, r_t, \tilde{h_t}` 的拼接,形状分别为 :math:`[N, D]` 、 :math:`[N, D]` 和 :math:`[N, D \times 3]` 。通常只有下一时间步的隐状态( :math:`h_t` )作为GRU的输出和隐状态使用,其他内容只是中间计算结果。 + +返回类型: tuple + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + dict_dim, emb_dim = 128, 64 + data = fluid.data(name='step_data', shape=[None], dtype='int64') + emb = fluid.embedding(input=data, size=[dict_dim, emb_dim]) + hidden_dim = 512 + x = fluid.layers.fc(input=emb, size=hidden_dim * 3) + pre_hidden = fluid.data( + name='pre_hidden', shape=[None, hidden_dim], dtype='float32') + hidden = fluid.layers.gru_unit( + input=x, hidden=pre_hidden, size=hidden_dim * 3) + + + + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/hard_shrink_cn.rst b/doc/paddle/api/paddle/fluid/layers/hard_shrink_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..75286ecd9ca5609c415e27e24b6f8608a681cda3 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/hard_shrink_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_fluid_layers_hard_shrink: + +hard_shrink +------------------------------- + +.. py:function:: paddle.fluid.layers.hard_shrink(x,threshold=None) + + + + +HardShrink激活函数(HardShrink activation operator) + + +.. math:: + + out = \begin{cases} + x, \text{if } x > \lambda \\ + x, \text{if } x < -\lambda \\ + 0, \text{otherwise} + \end{cases} + +参数: + - **x** - HardShrink激活函数的输入 + - **threshold** (FLOAT)-HardShrink激活函数的threshold值。[默认:0.5] + +返回:HardShrink激活函数的输出 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.data(name="input", shape=[784]) + result = fluid.layers.hard_shrink(x=data, threshold=0.3) + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/hard_sigmoid_cn.rst b/doc/paddle/api/paddle/fluid/layers/hard_sigmoid_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5d6e7937499a400bdbe1c2115994fb55f75d52e2 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/hard_sigmoid_cn.rst @@ -0,0 +1,33 @@ +.. _cn_api_fluid_layers_hard_sigmoid: + +hard_sigmoid +------------------------------- + +.. py:function:: paddle.fluid.layers.hard_sigmoid(x, slope=0.2, offset=0.5, name=None) + + + + +sigmoid的分段线性逼近激活函数,速度比sigmoid快,详细解释参见 https://arxiv.org/abs/1603.00391。 + +.. math:: + + \\out=\max(0,\min(1,slope∗x+offset))\\ + +参数: + - **x** (Variable) - 该OP的输入为多维Tensor。数据类型必须为float32或float64。 + - **slope** (float,可选) - 斜率。值必须为正数,默认值为0.2。 + - **offset** (float,可选) - 偏移量。默认值为0.5。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:激活后的Tensor,形状、数据类型和 ``x`` 一致。 + +返回类型:Variable + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.fill_constant(shape=[3, 2], value=0.5, dtype='float32') # [[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]] + result = fluid.layers.hard_sigmoid(data) # [[0.6, 0.6], [0.6, 0.6], [0.6, 0.6]] diff --git a/doc/paddle/api/paddle/fluid/layers/hard_swish_cn.rst b/doc/paddle/api/paddle/fluid/layers/hard_swish_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..406bec654315ad5ec64f25bacb2c5862298fd17b --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/hard_swish_cn.rst @@ -0,0 +1,53 @@ +.. _cn_api_fluid_layers_hard_swish: + +hard_swish +------------------------------- + +.. py:function:: paddle.fluid.layers.hard_swish(x, threshold=6.0, scale=6.0, offset=3.0, name=None) + + + + +该OP实现了hard_swish激活函数。hard_swish激活函数在MobileNetV3架构中被提出,相较于swish函数,具有数值稳定性好,计算速度快等优点,具体原理请参考: https://arxiv.org/pdf/1905.02244.pdf + + :math:`out = \frac{x * (min(max(0, x+offset), threshold))}{scale}` + + 阈值 ``threshold`` 和缩放因子 ``scale`` 为正数,位移 ``offset`` 正负均可,建议使用默认参数。 + +参数: + - **x** (Variable) - 输入特征,多维Tensor。数据类型为float32或float64。 + - **threshold** (float,可选) - 激活操作中Relu函数的阈值,默认值为6.0。 + - **scale** (float,可选) - 激活操作的缩放因子,默认值为6.0。 + - **offset** (float,可选) - 激活操作的位移,默认值为3.0。 + - **name** (None|str) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 + +返回:经过hard_swish计算后的结果,数据类型及维度和x相同。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + DATATYPE='float32' + shape = [1,4] + + x_data = np.array([i for i in range(1,5)]).reshape(shape).astype(DATATYPE) + + x = fluid.layers.data(name="x", shape=shape, dtype=DATATYPE) + y = fluid.layers.hard_swish(x) + + place = fluid.CUDAPlace(0) + exe = fluid.Executor(place) + out, = exe.run(feed={'x':x_data}, fetch_list=[y.name]) + print(out) # [[0.66666667, 1.66666667,3., 4.]] + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/has_inf_cn.rst b/doc/paddle/api/paddle/fluid/layers/has_inf_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..79e1f77ba97adeb76a325317ef08a9efae8db43f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/has_inf_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_fluid_layers_has_inf: + +has_inf +------------------------------- + +.. py:function:: paddle.fluid.layers.has_inf(x) + + + + +检查输入的变量(x)中是否包含无穷数(inf)。 + +参数: + - **x** (Variable) - 被检查的变量Tensor/LoDTensor。 + +返回:Variable(Tensor)变量存储输出值,包含一个bool型数值,指明输入中是否包含无穷数(inf)。 + +返回类型:变量(Variable) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.data(name="input", shape=[4, 32, 32], dtype="float32") + res = fluid.layers.has_inf(data) + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/has_nan_cn.rst b/doc/paddle/api/paddle/fluid/layers/has_nan_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..301a11c6921d69d43e7533595d39b81283cc0e45 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/has_nan_cn.rst @@ -0,0 +1,30 @@ +.. _cn_api_fluid_layers_has_nan: + +has_nan +------------------------------- + +.. py:function:: paddle.fluid.layers.has_nan(x) + + + + +检查输入的变量(x)中是否包含NAN。 + +参数: + - **x** (Variable) - 被检查的变量Tensor/LoDTensor。 + +返回:Variable(Tensor)变量存储输出值,包含一个bool型数值,指明输入中是否包含NAN。 + +返回类型:变量(Variable) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.data(name="input", shape=[4, 32, 32], dtype="float32") + res = fluid.layers.has_nan(data) + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/hash_cn.rst b/doc/paddle/api/paddle/fluid/layers/hash_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..707fed7d788b4a09542f404c7cc14a5e361bdaed --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/hash_cn.rst @@ -0,0 +1,55 @@ +.. _cn_api_fluid_layers_hash: + +hash +------------------------------- + +.. py:function:: paddle.fluid.layers.hash(input, hash_size, num_hash=1, name=None) + + + + +该OP将输入 hash 成为一个整数,该数的值小于给定的 ``hash_size`` 。**仅支持输入为LoDTensor**。 + +该OP使用的哈希算法是:xxHash - `Extremely fast hash algorithm `_ + + +参数: + - **input** (Variable) - 输入是一个 **二维** ``LoDTensor`` 。**输入维数必须为2**。数据类型为:int32、int64。**仅支持LoDTensor**。 + - **hash_size** (int) - 哈希算法的空间大小。输出值将保持在 :math:`[0, hash\_size)` 范围内。 + - **num_hash** (int) - 哈希次数。默认值为1。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:``LoDTensor`` + +返回类型:Variable + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + place = fluid.core.CPUPlace() + + # 构建网络 + x = fluid.data(name="x", shape=[2, 2], dtype="int32", lod_level=1) + res = fluid.layers.hash(name="res", input=x, hash_size=1000, num_hash=4) + + # 创建CPU执行器 + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + in1 = np.array([[1,2],[3,4]]).astype("int32") + print(in1) + x_i = fluid.create_lod_tensor(in1, [[0, 2]], place) + res = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res], return_numpy=False) + print(np.array(res[0])) + # [[[722] + # [407] + # [337] + # [395]] + # [[603] + # [590] + # [386] + # [901]]] diff --git a/doc/paddle/api/paddle/fluid/layers/hsigmoid_cn.rst b/doc/paddle/api/paddle/fluid/layers/hsigmoid_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..55e12069a80688f10c473e1af89ff77e5b410581 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/hsigmoid_cn.rst @@ -0,0 +1,59 @@ +.. _cn_api_fluid_layers_hsigmoid: + +hsigmoid +------------------------------- + + +.. py:function:: paddle.fluid.layers.hsigmoid(input, label, num_classes, param_attr=None, bias_attr=None, name=None, path_table=None, path_code=None, is_custom=False, is_sparse=False) + + + + +层次sigmoid(hierarchical sigmoid),该OP通过构建一个分类二叉树来降低计算复杂度,主要用于加速语言模型的训练过程。 + +该OP建立的二叉树中每个叶节点表示一个类别(单词),每个非叶子节点代表一个二类别分类器(sigmoid)。对于每个类别(单词),都有一个从根节点到它的唯一路径,hsigmoid累加这条路径上每个非叶子节点的损失得到总损失。 + +相较于传统softmax的计算复杂度 :math:`O(N)` ,hsigmoid可以将计算复杂度降至 :math:`O(logN)` ,其中 :math:`N` 表示类别总数(字典大小)。 + +若使用默认树结构,请参考 `Hierarchical Probabilistic Neural Network Language Model `_ 。 + +若使用自定义树结构,请将参数 ``is_custom`` 设置为True,并完成以下步骤(以语言模型为例): + +1. 使用自定义词典来建立二叉树,每个叶结点都应该是词典中的单词; + +2. 建立一个dict类型数据结构,用于存储 **单词id -> 该单词叶结点至根节点路径** 的映射,即路径表 ``path_table`` 参数; + +3. 建立一个dict类型数据结构,用于存储 **单词id -> 该单词叶结点至根节点路径的编码** 的映射,即路径编码 ``path_code`` 参数。 编码是指每次二分类的标签,1为真,0为假; + +4. 每个单词都已经有自己的路径和路径编码,当对于同一批输入进行操作时,可以同时传入一批路径和路径编码进行运算。 + +参数: + - **input** (Variable) - 输入Tensor。数据类型为float32或float64,形状为 ``[N, D]`` ,其中 ``N`` 为minibatch的大小,``D`` 为特征大小。 + - **label** (Variable) - 训练数据的标签。数据类型为int64,形状为 ``[N, 1]`` 。 + - **num_classes** (int) - 类别总数(字典大小)必须大于等于2。若使用默认树结构,即当 ``is_custom=False`` 时 ,必须设置该参数。若使用自定义树结构,即当 ``is_custom=True`` 时,它取值应为自定义树结构的非叶节点的个数,用于指定二分类的类别总数。 + - **param_attr** (ParamAttr,可选) - 该OP可学习参数的属性。可以设置为None或者一个ParamAttr的类(ParamAttr中可以指定参数的各种属性)。 该OP将利用 ``param_attr`` 属性来创建ParamAttr实例。如果没有设置 ``param_attr`` 的初始化函数,那么参数将采用Xavier初始化。默认值为None。 + - **bias_attr** (ParamAttr, 可选) - 该OP的偏置参数的属性。可以设置为None或者一个ParamAttr的类(ParamAttr中可以指定参数的各种属性)。 该OP将利用 ``bias_attr`` 属性来创建ParamAttr实例。如果没有设置 ``bias_attr`` 的初始化函数,参数初始化为0.0。默认值为None。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **path_table** (Variable,可选) – 存储每一批样本从类别(单词)到根节点的路径,按照从叶至根方向存储。 数据类型为int64,形状为 ``[N, L]`` ,其中L为路径长度。``path_table`` 和 ``path_code`` 应具有相同的形状, 对于每个样本i,path_table[i]为一个类似np.ndarray的结构,该数组内的每个元素都是其双亲结点权重矩阵的索引。默认值为None。 + - **path_code** (Variable,可选) – 存储每一批样本从类别(单词)到根节点的路径编码,按从叶至根方向存储。数据类型为int64,形状为 ``[N, L]``。默认值为None。 + - **is_custom** (bool,可选) – 是否使用用户自定义二叉树取代默认二叉树结构。如果设置为True,请务必设置 ``path_table`` , ``path_code`` , ``num_classes`` ,否则必须设置num_classes。默认值为False。 + - **is_sparse** (bool,可选) – 是否使用稀疏更新方式。如果设置为True,W的梯度和输入梯度将会变得稀疏。默认值为False。 + +返回: 层次sigmoid计算后的Tensor,形状为[N, 1],数据类型和 ``input`` 一致。 + +返回类型: Variable + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.fill_constant(shape=[4, 3], value=0.9, dtype='float32') + # x = [[0.9, 0.9, 0.9], [0.9, 0.9, 0.9], [0.9, 0.9, 0.9], [0.9, 0.9, 0.9]] + y = fluid.layers.fill_constant( + shape=[4, 1], value=1, dtype='int64') + # y = [[1], [1], [1], [1]] + out = fluid.layers.hsigmoid(input=x, label=y, num_classes=2, param_attr=fluid.initializer.Constant( + value=0.05), bias_attr=fluid.initializer.Constant(value=.0)) + # out = [[0.62792355], [0.62792355], [0.62792355], [0.62792355]] diff --git a/doc/paddle/api/paddle/fluid/layers/huber_loss_cn.rst b/doc/paddle/api/paddle/fluid/layers/huber_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ffa2060431800f648c84d7742bd094c4dca3c3e8 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/huber_loss_cn.rst @@ -0,0 +1,55 @@ +.. _cn_api_fluid_layers_huber_loss: + +huber_loss +------------------------------- + +.. py:function:: paddle.fluid.layers.huber_loss(input, label, delta) + + + + + +该OP计算输入(input)与标签(label)之间的Huber损失。Huber损失是常用的回归损失之一,相较于平方误差损失,Huber损失减小了对异常点的敏感度,更具鲁棒性。 + +当输入与标签之差的绝对值大于delta时,计算线性误差: + +.. math:: + huber\_loss = delta * (label - input) - 0.5 * delta * delta + +当输入与标签之差的绝对值小于delta时,计算平方误差: + +.. math:: + huber\_loss = 0.5 * (label - input) * (label - input) + + +参数: + - **input** (Variable) - 输入的预测数据,维度为[batch_size, 1] 或[batch_size]的Tensor。数据类型为float32或float64。 + - **label** (Variable) - 输入的真实标签,维度为[batch_size, 1] 或[batch_size]的Tensor。数据类型为float32或float64。 + - **delta** (float) - Huber损失的阈值参数,用于控制Huber损失对线性误差或平方误差的侧重。数据类型为float32。 + +返回: 计算出的Huber损失,数据维度和数据类型与label相同的Tensor。 + +返回类型: Variable + + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + DATATYPE='float32' + input_data = np.array([[1.],[2.],[3.],[4.]]).astype(DATATYPE) + label_data = np.array([[3.],[3.],[4.],[4.]]).astype(DATATYPE) + + x = fluid.layers.data(name='input', shape=[1], dtype=DATATYPE) + y = fluid.layers.data(name='label', shape=[1], dtype=DATATYPE) + loss = fluid.layers.huber_loss(input=x, label=y, delta=1.0) + + place = fluid.CPUPlace() + #place = fluid.CUDAPlace(0) + exe = fluid.Executor(place) + HuberLoss, = exe.run(feed={'input':input_data ,'label':label_data}, fetch_list=[loss.name]) + print(HuberLoss) #[[1.5], [0.5], [0.5], [0. ]], dtype=float32 diff --git a/doc/paddle/api/paddle/fluid/layers/im2sequence_cn.rst b/doc/paddle/api/paddle/fluid/layers/im2sequence_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a9d44f12b481233f565da6485739f70e5bb31e6b --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/im2sequence_cn.rst @@ -0,0 +1,96 @@ +.. _cn_api_fluid_layers_im2sequence: + +im2sequence +------------------------------- + + +.. py:function:: paddle.fluid.layers.im2sequence(input, filter_size=1, stride=1, padding=0, input_image_size=None, out_stride=1, name=None) + + + + +该OP使用 `filter` 扫描输入的Tensor并将输入Tensor转换成序列,返回值的 `shape={input.batch_size * output_height * output_width, filter_size_height* filter_size_width * input.channels}` 。返回值的timestep的个数为 `output_height * output_width` , 每个timestep的维度是 `filter_size_height* filter_size_width * input.channels` 。其中 `output_height` 和 `output_width` 由以下式计算: + + +.. math:: + output\_height = 1 + \frac{padding\_up + padding\_down + input\_height - filter\_size\_height + stride\_height-1}{stride\_height} \\ + output\_width = 1 + \frac{padding\_left + padding\_right + input\_width - filter\_size\_width + stride\_width-1}{stride\_width} + +其中符号的意义如下所示。 + +参数: + - **input** (Variable)- 类型为float32的4-D Tensor,格式为 `[N, C, H, W]` 。公式中 `input_height` 和 `input_width` 分别代表输入的高和宽。 + - **filter_size** (int32 | List[int32]) - 滤波器大小。如果 `filter_size` 是一个List,它必须包含两个整数 `[filter_size_height, filter_size_width]` 。如果 `filter_size` 是一个int32, 则滤波器大小是 `[filter_size, filter_size]` , 默认值为1。 + - **stride** (int32 | List[int32]) - 步长大小。如果stride是一个List,它必须包含两个整数 `[stride_height,stride_width]` 。如果stride是一个int32, 则步长大小是 `[stride, stride]` , 默认值为1。 + - **padding** (int32 | List[int32]) - 填充大小。如果padding是一个List,它可以包含四个整数 `[padding_up, padding_left, padding_down, padding_right]` ,当包含两个整数 `[padding_height, padding_width]` 时,可展开为 `[padding_height, padding_width, padding_height, padding_width]` 。如果padding是一个int, 可展开为 `[padding, padding, padding, padding]` 。默认值为0。 + - **input_image_size** (Variable, 可选) - 2-D Tensor, 输入图像的实际大小, 它的维度为 `[batchsize,2]` 。当该参数不为None时,可用于batch inference。默认值为None. + - **out_stride** (int32 | List[int32]) - 输出步长。只有当input_image_size不为None时才有效。如果out_stride是List,它必须包含 `[out_stride_height, out_stride_width]` ,如果out_stride是int32, 则可展开为 `[out_stride, out_stride]` ,默认值为1。 + - **name** (str, 可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,缺省值为None。 + +返回: 数据类型为float32, `shape` 为 `{batch_size * output_height * output_width, filter_size_height * filter_size_width * input.channels}` 的 2-D LodTensor。 + +返回类型: Variable + +:: + + Given: + + x = [[[[ 6. 2. 1.] + [ 8. 3. 5.] + [ 0. 2. 6.]] + + [[ 2. 4. 4.] + [ 6. 3. 0.] + [ 6. 4. 7.]]] + + [[[ 6. 7. 1.] + [ 5. 7. 9.] + [ 2. 4. 8.]] + + [[ 1. 2. 1.] + [ 1. 3. 5.] + [ 9. 0. 8.]]]] + + x.dims = {2, 2, 3, 3} + + And: + + filter = [2, 2] + stride = [1, 1] + padding = [0, 0] + + Then: + + output.data = [[ 6. 2. 8. 3. 2. 4. 6. 3.] + [ 2. 1. 3. 5. 4. 4. 3. 0.] + [ 8. 3. 0. 2. 6. 3. 6. 4.] + [ 3. 5. 2. 6. 3. 0. 4. 7.] + [ 6. 7. 5. 7. 1. 2. 1. 3.] + [ 7. 1. 7. 9. 2. 1. 3. 5.] + [ 5. 7. 2. 4. 1. 3. 9. 0.] + [ 7. 9. 4. 8. 3. 5. 0. 8.]] + + output.dims = {8, 8} + + output.lod = [[4, 4]] + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.data(name='data', shape=[3, 32, 32], + dtype='float32') + output = fluid.layers.im2sequence( + input=data, stride=[1, 1], filter_size=[2, 2]) + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/image_resize_cn.rst b/doc/paddle/api/paddle/fluid/layers/image_resize_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ccbcec362c76685ac0afc5d587e86184020f89c8 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/image_resize_cn.rst @@ -0,0 +1,189 @@ +.. _cn_api_fluid_layers_image_resize: + +image_resize +------------------------------- + +.. py:function:: paddle.fluid.layers.image_resize(input, out_shape=None, scale=None, name=None, resample='BILINEAR', actual_shape=None, align_corners=True, align_mode=1, data_format='NCHW') + + + + +**注意:** 参数 ``actual_shape`` 将被弃用,请使用 ``out_shape`` 替代。 + +该OP用于调整一个batch中图片的大小。 + +输入为4-D Tensor时形状为(num_batches, channels, in_h, in_w)或者(num_batches, in_h, in_w, channels),输入为5-D Tensor时形状为(num_batches, channels, in_d, in_h, in_w)或者(num_batches, in_d, in_h, in_w, channels),并且调整大小只适用于深度,高度和宽度对应的维度。 + +支持的插值方法: + + BILINEAR:双线性插值 + + TRALINEAR:三线插值 + + NEAREST:最近邻插值 + + +最近邻插值是在输入张量的高度和宽度上进行最近邻插值。 + +双线性插值是线性插值的扩展,用于在直线2D网格上插值两个变量(例如,该操作中的H方向和W方向)的函数。 关键思想是首先在一个方向上执行线性插值,然后在另一个方向上再次执行线性插值。 + +三线插值是线性插值的一种扩展,是3参数的插值方程(比如op里的D,H,W方向),在三个方向上进行线性插值。 + +Align_corners和align_mode是可选参数,插值的计算方法可以由它们选择。 + +示例: + +:: + + For scale: + + if align_corners = True && out_size > 1 : + + scale_factor = (in_size-1.0)/(out_size-1.0) + + else: + + scale_factor = float(in_size/out_size) + + + Nearest neighbor interpolation: + + if: + align_corners = False + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = \left \lfloor {H_{in} * scale_{}factor}} \right \rfloor + W_out = \left \lfloor {W_{in} * scale_{}factor}} \right \rfloor + + else: + align_corners = True + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = round(H_{in} * scale_{factor}) + W_out = round(W_{in} * scale_{factor}) + + Bilinear interpolation: + + if: + align_corners = False , align_mode = 0 + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = (H_{in}+0.5) * scale_{factor} - 0.5 + W_out = (W_{in}+0.5) * scale_{factor} - 0.5 + + + else: + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = H_{in} * scale_{factor} + W_out = W_{in} * scale_{factor} + + Trilinear interpolation: + + if: + align_corners = False , align_mode = 0 + + input : (N,C,D_in,H_in,W_in) + output: (N,C,D_out,H_out,W_out) where: + + D_out = (D_{in}+0.5) * scale_{factor} - 0.5 + H_out = (H_{in}+0.5) * scale_{factor} - 0.5 + W_out = (W_{in}+0.5) * scale_{factor} - 0.5 + + + else: + + input : (N,C,D_in,H_in,W_in) + output: (N,C,D_out,H_out,W_out) where: + + D_out = D_{in} * scale_{factor} + H_out = H_{in} * scale_{factor} + W_out = W_{in} * scale_{factor} + + +有关最近邻插值的详细信息,请参阅维基百科: +https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation + +有关双线性插值的详细信息,请参阅维基百科: +https://en.wikipedia.org/wiki/Bilinear_interpolation + +有关三线插值的详细信息,请参阅维基百科: +https://en.wikipedia.org/wiki/Trilinear_interpolation + +参数: + - **input** (Variable) - 4-D或5-D Tensor,数据类型为float32、float64或uint8,其数据格式由参数 ``data_format`` 指定。 + - **out_shape** (list|tuple|Variable|None) - 输出Tensor,输入为4D张量时,形状为为(out_h, out_w)的2-D Tensor。输入为5-D Tensor时,形状为(out_d, out_h, out_w)的3-D Tensor。如果 :code:`out_shape` 是列表,每一个元素可以是整数或者形状为[1]的变量。如果 :code:`out_shape` 是变量,则其维度大小为1。默认值为None。 + - **scale** (float|Variable|None)-输入的高度或宽度的乘数因子 。 out_shape和scale至少要设置一个。out_shape的优先级高于scale。默认值为None。 + - **name** (str|None) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。默认值为None。 + - **resample** (str) - 插值方法。支持“双线性”,“三线性”,“临近插值”。默认值为双线性插值。 + - **actual_shape** (Variable) - 可选输入,用于动态指定输出形状。如果指定actual_shape,图像将根据给定的形状调整大小,而不是根据指定形状的 :code:`out_shape` 和 :code:`scale` 进行调整。也就是说, :code:`actual_shape` 具有最高的优先级。如果希望动态指定输出形状,建议使用 :code:`out_shape` ,因为 :code:`actual_shape` 未来将被弃用。在使用actual_shape指定输出形状时,还需要设置out_shape和scale之一,否则在图形构建阶段会出现错误。默认值:None + - **align_corners** (bool)- 一个可选的bool型参数,如果为True,则将输入和输出张量的4个角落像素的中心对齐,并保留角点像素的值。 默认值为True + - **align_mode** (int)- 双线性插值的可选项。 可以是 '0' 代表src_idx = scale *(dst_indx + 0.5)-0.5;如果为'1' ,代表src_idx = scale * dst_index。 + - **data_format** (str,可选)- 指定输入的数据格式,输出的数据格式将与输入保持一致。对于4-D Tensor,支持 NCHW(num_batches, channels, height, width) 或者 NHWC(num_batches, height, width, channels),对于5-D Tensor,支持 NCDHW(num_batches, channels, depth, height, width)或者 NDHWC(num_batches, depth, height, width, channels),默认值:'NCHW'。 + +返回:4-D Tensor,形状为 (num_batches, channels, out_h, out_w) 或 (num_batches, out_h, out_w, channels);或者5-D Tensor,形状为 (num_batches, channels, out_d, out_h, out_w) 或 (num_batches, out_d, out_h, out_w, channels)。 + +返回类型: 变量(variable) + +抛出异常: + - :code:`TypeError` - out_shape应该是一个列表、元组或变量。 + - :code:`TypeError` - actual_shape应该是变量或None。 + - :code:`ValueError` - image_resize的"resample"只能是"BILINEAR"或"TRILINEAR"或"NEAREST"。 + - :code:`ValueError` - out_shape 和 scale 不可同时为 None。 + - :code:`ValueError` - out_shape 的长度必须为2如果输入是4D张量。 + - :code:`ValueError` - out_shape 的长度必须为3如果输入是5D张量。 + - :code:`ValueError` - scale应大于0。 + - :code:`TypeError` - align_corners 应为bool型。 + - :code:`ValueError` - align_mode 只能取 ‘0’ 或 ‘1’。 + - :code:`ValueError` - data_format 只能取 ‘NCHW’、‘NHWC’、‘NCDHW’ 或者 ‘NDHWC’。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + input = fluid.layers.data(name="input", shape=[3,6,9], dtype="float32") + # input.shape = [-1, 3, 6, 9], where -1 indicates batch size, and it will get the exact value in runtime. + + out = fluid.layers.image_resize(input, out_shape=[12, 12], resample="NEAREST") + out0 = fluid.layers.image_resize(input, out_shape=[12, 12], resample="NEAREST") + # out0.shape = [-1, 3, 12, 12], it means out0.shape[0] = input.shape[0] in runtime. + + # out_shape is a list in which each element is a integer or a tensor Variable + dim1 = fluid.layers.data(name="dim1", shape=[1], dtype="int32", append_batch_size=False) + out1 = fluid.layers.image_resize(input, out_shape=[12, dim1], resample="NEAREST") + # out1.shape = [-1, 3, 12, -1] + + # out_shape is a 1-D tensor Variable + shape_tensor = fluid.layers.data(name="shape_tensor", shape=[2], dtype="int32", append_batch_size=False) + out2 = fluid.layers.image_resize(input, out_shape=shape_tensor, resample="NEAREST") + # out2.shape = [-1, 3, -1, -1] + + # when use actual_shape + actual_shape_tensor = fluid.layers.data(name="actual_shape_tensor", shape=[2], dtype="int32", append_batch_size=False) + out3 = fluid.layers.image_resize(input, out_shape=[4, 4], resample="NEAREST", actual_shape=actual_shape_tensor) + # out3.shape = [-1, 3, 4, 4] + + # scale is a Variable + scale_tensor = fluid.layers.data(name="scale", shape=[1], dtype="float32", append_batch_size=False) + out4 = fluid.layers.image_resize(input, scale=scale_tensor) + # out4.shape = [-1, 3, -1, -1] + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/image_resize_short_cn.rst b/doc/paddle/api/paddle/fluid/layers/image_resize_short_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4218c4408fc41a36b9b7c3e2476d711aa245f3ac --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/image_resize_short_cn.rst @@ -0,0 +1,29 @@ +.. _cn_api_fluid_layers_image_resize_short: + +image_resize_short +------------------------------- + +.. py:function:: paddle.fluid.layers.image_resize_short(input, out_short_len, resample='BILINEAR') + + + + +该OP用于调整一批图片的大小。输入图像的短边将被调整为给定的out_short_len 。输入图像的长边按比例调整大小,最终图像的长宽比保持不变。 + +参数: + - **input** (Variable) - 图像调整图层的输入张量,这是一个维度为[num_batch, channels, in_h, in_w]的4-D Tensor。 + - **out_short_len** (int) - 输出图像的短边长度。 + - **resample** (str) - resample方法,默认为双线性插值。 + +返回: 4维张量,shape为(num_batches, channels, out_h, out_w) + +返回类型: 变量(variable) + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + input = fluid.layers.data(name="input", shape=[3,6,9], dtype="float32") + out = fluid.layers.image_resize_short(input, out_short_len=3) + diff --git a/doc/paddle/api/paddle/fluid/layers/increment_cn.rst b/doc/paddle/api/paddle/fluid/layers/increment_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a6635feade9ed1394ce6366e1990f655e39eb1d4 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/increment_cn.rst @@ -0,0 +1,28 @@ +.. _cn_api_fluid_layers_increment: + +increment +------------------------------- + +.. py:function:: paddle.fluid.layers.increment(x, value=1.0, in_place=True) + + + + +使输入Tensor ``x`` 的数据累加 ``value`` , 该OP通常用于循环次数的计数。 + +参数: + - **x** (Variable) – 元素个数为1的Tensor,数据类型必须为float32,float64,int32,int64。 + - **value** (float,可选) – 需要增加的值,默认为1.0。 + - **in_place** (bool,可选) – 输出Tensor是否和输入Tensor ``x`` 复用同一块内存,默认为True。 + +返回:累加计算后的Tensor,形状、数据类型和 ``x`` 一致。 + +返回类型:Variable + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + counter = fluid.layers.zeros(shape=[1], dtype='float32') # [0.] + fluid.layers.increment(counter) # [1.] diff --git a/doc/paddle/api/paddle/fluid/layers/inplace_abn_cn.rst b/doc/paddle/api/paddle/fluid/layers/inplace_abn_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..11077c5b78fe34fb0387a9d2dcb7bfd3f73c20b0 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/inplace_abn_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_fluid_layers_inplace_abn: + +inplace_abn +------------------------------- + +**注意:该API仅支持【静态图】模式** + +.. py:function:: paddle.fluid.layers.inplace_abn(input, act=None, is_test=False, momentum=0.9, epsilon=1e-05, param_attr=None, bias_attr=None, data_layout='NCHW', name=None, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=False, use_global_stats=False, act_alpha=1.0) + +就地批正则化化激活层(Inplace Activation Batch Normalization Layer) + +此层使用就地内存计算批处理正则化和激活来实现节省内存,有关批量正则化计算,请参见 ``fluid.layers.batch_norm`` ,有关就地激活批正则化化的计算,请参考 `In-Place Activated BatchNorm for Memory-Optimized Training of DNNs `_。 + +参数: + - **input** (Variable) - inplace_abn算子的输入特征,是一个Variable类型,输入维度可以是 2, 3, 4, 5。数据类型:flaot16, float32, float64。 + - **act** (string)- 激活函数类型,可以是leaky_realu、relu、prelu等。默认:None。 + - **is_test** (bool) - 指示它是否在测试阶段,非训练阶段使用训练过程中统计到的全局均值和全局方差。默认:False。 + - **momentum** (float|Variable)- 此值用于计算 moving_mean 和 moving_var,是一个float类型或者一个shape为[1],数据类型为float32的Variable类型。更新公式为: :math:`moving\_mean = moving\_mean * momentum + new\_mean * (1. - momentum)` , :math:`moving\_var = moving\_var * momentum + new\_var * (1. - momentum)` , 默认:0.9。 + - **epsilon** (float)- 加在分母上为了数值稳定的值。默认:1e-5。 + - **param_attr** (ParamAttr|None) :指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。inplace_abn算子默认的权重初始化是1.0。 + - **bias_attr** (ParamAttr|None)- 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。inplace_abn算子默认的偏置初始化是0.0。 + - **data_layout** (string) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **name** (str|None) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 + - **moving_mean_name** (string)- moving_mean的名称,存储全局均值。如果将其设置为None, ``inplace_abn`` 将随机命名全局均值;否则, ``inplace_abn`` 将命名全局均值为 ``moving_mean_name`` 。默认:None。 + - **moving_variance_name** (string)- moving_variance的名称,存储全局变量。如果将其设置为None, ``inplace_abn`` 将随机命名全局方差;否则, ``inplace_abn`` 将命名全局方差为 ``moving_variance_name`` 。默认:None。 + - **do_model_average_for_mean_and_var** (bool,默认False)- 是否为mean和variance做模型均值。 + - **use_global_stats** (bool) – 是否使用全局均值和方差。 在预测或测试模式下,将use_global_stats设置为true或将is_test设置为true,并且行为是等效的。 在训练模式中,当设置use_global_stats为True时,在训练期间也使用全局均值和方差。默认:False。 + - **act_alpha** (float) – 当 ``act`` 参数为None、leaky-relu、elu时,会使用就地批正则化激活算法,可通过此参数给定leaky-relu、elu的 ``alpha`` 值。默认:1.0。 + + +返回: 维度和输入相同的Tensor,在输入中运用批正则后的结果。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32') + hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w') + hidden2 = fluid.layers.inplace_abn(input=hidden1) + hidden3 = fluid.layers.inplace_abn(input=hidden2, act='leaky_relu', act_alpha=0.2) diff --git a/doc/paddle/api/paddle/fluid/layers/instance_norm_cn.rst b/doc/paddle/api/paddle/fluid/layers/instance_norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7c7f975ae8850c0015ce9f83ac282aa81a99bf8e --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/instance_norm_cn.rst @@ -0,0 +1,56 @@ +.. _cn_api_fluid_layers_instance_norm: + +instance_norm +------------------------------- + + +.. py:function:: paddle.fluid.layers.instance_norm(input, epsilon=1e-05, param_attr=None, bias_attr=None, name=None) + + + + + +可用作卷积和全连接操作的实例正则化函数,根据每个样本的每个通道的均值和方差信息进行正则化。该层需要的数据格式如下: + +NCHW[batch,in_channels,in_height,in_width] + +更多详情请参考 : `Instance Normalization: The Missing Ingredient for Fast Stylization `_ + +``input`` 是mini-batch的输入。 + +.. math:: + \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mean of each channel in each sample in a batch \\ + \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \quad &// variance of each channel in each sample a batch \\ + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \quad &// normalize \\ + y_i &\gets \gamma \hat{x_i} + \beta \quad &// scale-and-shift + + +参数: + - **input** (Variable) - instance_norm算子的输入特征,是一个Variable类型,输入的维度可以为 2, 3, 4, 5。数据类型:float32和float64。 + - **epsilon** (float,默认1e-05)-为了当前输入做标准化时得到稳定的结果而加在的分母上的扰动值。默认值为1e-5。 + - **param_attr** (ParamAttr|None) - instance_norm 权重参数的属性,可以设置为None或者一个ParamAttr的类(ParamAttr中可以指定参数的各种属性)。 如果设为None,则默认的参数初始化为1.0。如果在ParamAttr指定了属性时, instance_norm创建相应属性的param_attr(权重)参数。默认:None。 + - **bias_attr** (ParamAttr|None) - instance_norm 偏置参数的属性,可以设置为None或者一个ParamAttr的类(ParamAttr中可以指定参数的各种属性)。如果设为None,默认的参数初始化为0.0。如果在ParamAttr指定了参数的属性时, instance_norm创建相应属性的bias_attr(偏置)参数。默认:None。 + - **name** (string,默认None)- 该层名称(可选)。若设为None,则自动为该层命名。 + +返回: 张量,在输入中运用instance normalization后的结果 + +返回类型:变量(Variable) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + x = fluid.layers.data(name='x', shape=[3, 7, 3, 7], dtype='float32', append_batch_size=False) + hidden1 = fluid.layers.fc(input=x, size=200) + param_attr = fluid.ParamAttr(name='instance_norm_w', initializer=fluid.initializer.Constant(value=1.0)) + bias_attr = fluid.ParamAttr(name='instance_norm_b', initializer=fluid.initializer.Constant(value=0.0)) + hidden2 = fluid.layers.instance_norm(input=hidden1, param_attr = param_attr, bias_attr = bias_attr) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + np_x = np.random.random(size=(3, 7, 3, 7)).astype('float32') + output = exe.run(feed={"x": np_x}, fetch_list = [hidden2]) + print(output) + diff --git a/doc/paddle/api/paddle/fluid/layers/inverse_time_decay_cn.rst b/doc/paddle/api/paddle/fluid/layers/inverse_time_decay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cd46afe366fc8b036fcdf0398e0c57f25f257093 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/inverse_time_decay_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_fluid_layers_inverse_time_decay: + +inverse_time_decay +------------------------------- + +.. py:function:: paddle.fluid.layers.inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False) + + + + +在初始学习率上运用逆时衰减。 + +训练模型时,最好在训练过程中降低学习率。通过执行该函数,将对初始学习率运用逆时衰减函数。 + +逆时衰减计算方式如下。 + +.. code-block:: python + + if staircase == True: + decayed_learning_rate = learning_rate / (1 + decay_rate * floor(global_step / decay_step)) + else: + decayed_learning_rate = learning_rate / (1 + decay_rate * global_step / decay_step) + +参数: + - **learning_rate** (Variable|float) - 初始学习率,类型可以为学习率变量(Variable)或float型常量。 + - **decay_steps** (int) - 学习率衰减步长,见以上衰减运算。 + - **decay_rate** (float) - 学习率衰减率。见以上衰减运算。 + - **staircase** (bool) - 若为True,按离散区间衰减学习率,即每 ``decay_steps`` 步多衰减 ``decay_rate`` 倍。若为False,则按以上衰减运算持续衰减。默认False。 + +返回:Variable(Tensor) 随step衰减的学习率变量,维度为 :math:`[1]` 的1-D Tensor。 + +返回类型:变量(Variable) + +**示例代码:** + +.. code-block:: python + + import paddle.fluid as fluid + base_lr = 0.1 + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=fluid.layers.natural_exp_decay( + learning_rate=base_lr, + decay_steps=10000, + decay_rate=0.5, + staircase=True)) + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/iou_similarity_cn.rst b/doc/paddle/api/paddle/fluid/layers/iou_similarity_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d870183e611f14dd5835792d97dbd845b5cc99bb --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/iou_similarity_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_fluid_layers_iou_similarity: + +iou_similarity +------------------------------- + +.. py:function:: paddle.fluid.layers.iou_similarity(x, y, box_normalized=True, name=None) + + + + +**IOU Similarity Operator** + +计算两个框列表的intersection-over-union(IOU)。框列表 :math:`X` 应为LoDTensor, :math:`Y` 是普通张量, :math:`X` 成批输入的所有实例共享 :math:`Y` 中的框。给定框A和框B,IOU的运算如下: + +.. math:: + IOU(A, B) = \frac{area(A\cap B)}{area(A)+area(B)-area(A\cap B)} + +参数: + - **x** (Variable) - 框列表 :math:`X` 是二维LoDTensor,维度为 :math:`[N,4]` ,存有 :math:`N` 个框,每个框表示为 :math:`[xmin, ymin, xmax, ymax]` ,:math:`X` 的维度为 :math:`[N,4]` 。如果输入是图像特征图,:math:`[xmin, ymin]` 表示框的左上角坐标,接近坐标轴的原点。:math:`[xmax, ymax]` 表示框的右下角坐标。该张量包含批次输入的LoD信息。该批次输入的一个实例能容纳不同的项数。数据类型为float32或float64。 + - **y** (Variable) - 框列表 :math:`Y` 是二维张量,存有 :math:`M` 个框,每个框表示为 :math:`[xmin, ymin, xmax, ymax]` ,:math:`Y` 的维度为 :math:`[M,4]`。如果输入是图像特征图,:math:`[xmin, ymin]` 表示框的左上角坐标,接近坐标轴的原点。:math:`[xmax, ymax]` 表示框的右下角坐标。数据类型为float32或float64。 + - **box_normalized** (bool) - 先验框坐标是否正则化,即是否在[0, 1]区间内。默认值为true + +返回:维度为 :math:`[N,M]` 的LoDTensor,代表每一对iou分数,数据类型与 :math:`X` 相同 + +返回类型:Variable + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + x = fluid.layers.data(name='x', shape=[4], dtype='float32') + y = fluid.layers.data(name='y', shape=[4], dtype='float32') + iou = fluid.layers.iou_similarity(x=x, y=y) + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/is_empty_cn.rst b/doc/paddle/api/paddle/fluid/layers/is_empty_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d0e21720f1568d2777cc5eff030beb47c8cddb56 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/is_empty_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_fluid_layers_is_empty: + +is_empty +------------------------------- + +.. py:function:: paddle.fluid.layers.is_empty(x, cond=None) + + + + +测试变量是否为空 + +参数: + - **x** (Variable)-测试的变量 + - **cond** (Variable|None)-可选输出参数,默认为空(None)。若传入了该参数,则该参数中存储返回给定x的测试结果 + +返回:布尔类型的标量。如果变量x为空则值为真 + +返回类型:Variable + +抛出异常:``TypeError``-如果input类型不是Variable或cond存储的返回结果的类型不是bool + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + input = fluid.layers.data(name="input", shape=[4, 32, 32], dtype="float32") + res = fluid.layers.is_empty(x=input) + # or: + # fluid.layers.is_empty(x=input, cond=res) + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/isfinite_cn.rst b/doc/paddle/api/paddle/fluid/layers/isfinite_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..493a17cfc716cb565f2791e358b4ace9dc95d78d --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/isfinite_cn.rst @@ -0,0 +1,46 @@ +.. _cn_api_fluid_layers_isfinite: + +isfinite +------------------------------- + +.. py:function:: paddle.fluid.layers.isfinite(x) + + + + +``注意:此算子的输入 Tensor / LoDTensor 数据类型必须为 int32 / float / double 之一。`` + +测试 x 是否包含无穷值(即 nan 或 inf)。若元素均为有穷数,返回真;否则返回假。 + +参数: + - **x(variable)** : 变量,包含被测试的 Tensor / LoDTensor。 + +返回: + - Variable (Tensor / LoDTensor),此 Tensor 变量包含一个 bool 型结果。 + +返回类型 + - Variable (Tensor / LoDTensor),一个包含 Tensor 的变量。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + + # Graph Organizing + var = fluid.layers.data(name="data", shape=(4, 6), dtype="float32") + output = fluid.layers.isfinite(var) + + # Create an executor using CPU as an example + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + # Execute + img = numpy.array((4, 6)).astype(numpy.float32) + res, = exe.run(fluid.default_main_program(), feed={'data':img}, fetch_list=[output]) + print(res) # Output Value: [ True] + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/kldiv_loss_cn.rst b/doc/paddle/api/paddle/fluid/layers/kldiv_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..992496047373d6165246aa037a4ebf05073779f4 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/kldiv_loss_cn.rst @@ -0,0 +1,71 @@ +.. _cn_api_fluid_layers_kldiv_loss: + +kldiv_loss +------------------------------- + +.. py:function:: paddle.fluid.layers.kldiv_loss(x, target, reduction='mean', name=None) + + + + +该OP计算输入(X)和输入(Target)之间的Kullback-Leibler散度损失。注意其中输入(X)应为对数概率值,输入(Target)应为概率值。 + +kL发散损失计算如下: + +.. math:: + + l(x, y) = y * (log(y) - x) + +:math:`x` 为输入(X),:math:`y` 输入(Target)。 + +当 ``reduction`` 为 ``none`` 时,输出损失与输入(x)形状相同,各点的损失单独计算,不会对结果做reduction 。 + +当 ``reduction`` 为 ``mean`` 时,输出损失为[1]的形状,输出为所有损失的平均值。 + +当 ``reduction`` 为 ``sum`` 时,输出损失为[1]的形状,输出为所有损失的总和。 + +当 ``reduction`` 为 ``batchmean`` 时,输出损失为[N]的形状,N为批大小,输出为所有损失的总和除以批量大小。 + +参数: + - **x** (Variable) - KL散度损失算子的输入张量。维度为[N, \*]的多维Tensor,其中N是批大小,\*表示任何数量的附加维度,数据类型为float32或float64。 + - **target** (Variable) - KL散度损失算子的张量。与输入 ``x`` 的维度和数据类型一致的多维Tensor。 + - **reduction** (Variable)-要应用于输出的reduction类型,可用类型为‘none’ | ‘batchmean’ | ‘mean’ | ‘sum’,‘none’表示无reduction,‘batchmean’ 表示输出的总和除以批大小,‘mean’ 表示所有输出的平均值,‘sum’表示输出的总和。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值:None。 + - **name** (None|str) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 + +返回:Variable(Tensor) KL散度损失。 + +返回类型:变量(Variable),数据类型与输入 ``x`` 一致。 + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + + # 'batchmean' reduction, loss shape 为[N] + x = fluid.data(name='x', shape=[None,4,2,2], dtype='float32') # shape=[-1, 4, 2, 2] + target = fluid.layers.data(name='target', shape=[4,2,2], dtype='float32') + loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='batchmean') # shape=[-1] + + # 'mean' reduction, loss shape 为[1] + x = fluid.data(name='x', shape=[None,4,2,2], dtype='float32') # shape=[-1, 4, 2, 2] + target = fluid.layers.data(name='target', shape=[4,2,2], dtype='float32') + loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='mean') # shape=[1] + + # 'sum' reduction, loss shape 为[1] + x = fluid.data(name='x', shape=[None,4,2,2], dtype='float32') # shape=[-1, 4, 2, 2] + target = fluid.layers.data(name='target', shape=[4,2,2], dtype='float32') + loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='sum') # shape=[1] + + # 'none' reduction, loss shape 与X相同 + x = fluid.data(name='x', shape=[None,4,2,2], dtype='float32') # shape=[-1, 4, 2, 2] + target = fluid.layers.data(name='target', shape=[4,2,2], dtype='float32') + loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='none') # shape=[-1, 4, 2, 2] + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/l2_normalize_cn.rst b/doc/paddle/api/paddle/fluid/layers/l2_normalize_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..acb9ae715d8da19f9d524a288bb6f549d31c200a --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/l2_normalize_cn.rst @@ -0,0 +1,47 @@ +.. _cn_api_fluid_layers_l2_normalize: + +l2_normalize +------------------------------- + +.. py:function:: paddle.fluid.layers.l2_normalize(x,axis,epsilon=1e-12,name=None) + + + + +该OP计算欧几里得距离之和对x进行归一化。对于1-D张量(系数矩阵的维度固定为0) +计算公式如下: + +.. math:: + + y=\frac{x}{\sqrt{\sum x^{2}+epsilon}} + +对于输入为多维Tensor的情况,该OP分别对维度轴上的每个1-D切片单独归一化 + +参数: + - **x** (Variable) - 维度为 :math:`[N_1, N_2, ..., N_k, D]` 的多维Tensor,其中最后一维D是类别数目。数据类型为float32或float64。 + - **axis** (int) - 归一化的轴。如果轴小于0,归一化的维是rank(X)+axis。其中,-1用来表示最后一维。 + - **epsilon** (float) - epsilon,用于避免除0,默认值为1e-12。 + - **name** (str|None) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。默认值为None。 + + 返回:与输入x的维度一致的Tensor + + 返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.data(name="data", + shape=(3, 17, 13), + dtype="float32") + normed = fluid.layers.l2_normalize(x=data, axis=1) + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/label_smooth_cn.rst b/doc/paddle/api/paddle/fluid/layers/label_smooth_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ba40e2fe2f4fde601fbd6b4a4bc2f22fbaf9fbb6 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/label_smooth_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_fluid_layers_label_smooth: + +label_smooth +------------------------------- + +.. py:function:: paddle.fluid.layers.label_smooth(label, prior_dist=None, epsilon=0.1, dtype='float32', name=None) + + + + +该OP实现了标签平滑的功能。标签平滑是一种对分类器层进行正则化的机制,称为标签平滑正则化(LSR)。由于直接优化正确标签的对数似然可能会导致过拟合,降低模型的适应能力,因此提出了标签平滑的方法来降低模型置信度。 + +标签平滑使用标签 :math:`y` 和一些固定模式随机分布变量 :math:`\mu` 。对 :math:`k` 标签,标签平滑的计算方式如下。 + +.. math:: + + \tilde{y_k} = (1 - \epsilon) * y_k + \epsilon * \mu_k, + +其中 :math:`1-\epsilon` 和 :math:`\epsilon` 分别是权重, :math:`\tilde{y_k}` 是平滑后的标签,通常 :math:`\mu` 使用均匀分布。 + + +关于更多标签平滑的细节, `查看论文 `_ 。 + + +参数: + - **label** (Variable) - 包含标签数据的输入变量。 标签数据应使用 one-hot 表示,是维度为 :math:`[N_1, ..., Depth]` 的多维Tensor,其中Depth为字典大小。 + - **prior_dist** (Variable,可选) - 用于平滑标签的先验分布,是维度为 :math:`[1,class\_num]` 的2D Tensor。 如果未设置,则使用均匀分布。默认值为None。 + - **epsilon** (float,可选) - 用于混合原始真实分布和固定分布的权重。默认值为0.1。 + - **dtype** (np.dtype|core.VarDesc.VarType|str,可选) - 输入 ``Tensor`` 的数据类型,,数据类型可以为”float32“或”float64“。默认值为”float32“。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:为平滑后标签的 ``Tensor`` ,数据类型为dtype设置的数据类型,维度也与输入的label参数维度相同。 + +返回类型: Variable + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + + label = layers.data(name="label", shape=[1], dtype="float32") + one_hot_label = layers.one_hot(input=label, depth=10) + smooth_label = layers.label_smooth(label=one_hot_label, epsilon=0.1, dtype="float32") + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/layer_norm_cn.rst b/doc/paddle/api/paddle/fluid/layers/layer_norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a03a81265a09272686c6dfd5ae32e32ffc9e1c44 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/layer_norm_cn.rst @@ -0,0 +1,58 @@ +.. _cn_api_fluid_layers_layer_norm: + +layer_norm +------------------------------- + + +.. py:function:: paddle.fluid.layers.layer_norm(input, scale=True, shift=True, begin_norm_axis=1, epsilon=1e-05, param_attr=None, bias_attr=None, act=None, name=None) + + + + +该OP实现了层归一化层(Layer Normalization Layer),其可以应用于小批量输入数据。更多详情请参考:`Layer Normalization `_ + +计算公式如下 + +.. math:: + \\\mu=\frac{1}{H}\sum_{i=1}^{H}x_i\\ + + \\\sigma=\sqrt{\frac{1}{H}\sum_i^H{(x_i-\mu)^2} + \epsilon}\\ + + \\y=f(\frac{g}{\sigma}(x-\mu) + b)\\ + +- :math:`x` : 该层神经元的向量表示 +- :math:`H` : 层中隐藏神经元个数 +- :math:`\epsilon` : 添加较小的值到方差中以防止除零 +- :math:`g` : 可训练的比例参数 +- :math:`b` : 可训练的偏差参数 + +参数: + - **input** (Variable) - 维度为任意维度的多维 ``Tensor`` ,数据类型为float32或float64。 + - **scale** (bool, 可选) - 指明是否在归一化后学习自适应增益 ``g`` 。默认值:True。 + - **shift** (bool, 可选) - 指明是否在归一化后学习自适应偏差 ``b`` 。默认值:True。 + - **begin_norm_axis** (int, 可选) - 指明归一化将沿着 ``begin_norm_axis`` 到 ``rank(input)`` 的维度执行。默认值:1。 + - **epsilon** (float, 可选) - 指明在计算过程中是否添加较小的值到方差中以防止除零。默认值:1e-05。 + - **param_attr** (ParamAttr, 可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr, 可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **act** (str, 可选) - 应用于输出上的激活函数,如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations` ,默认值为None。 + - **name** (str, 可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。。 + +返回:表示归一化结果的 ``Tensor`` ,数据类型和 ``input`` 一致,返回维度和 ``input`` 一致。 + +返回类型:Variable + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + x = fluid.layers.data(name='x', shape=[3, 32, 32], dtype='float32') + hidden1 = fluid.layers.layer_norm(input=x, begin_norm_axis=1) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + np_x = np.random.random(size=(8, 3, 32, 32)).astype('float32') + output = exe.run(feed={"x": np_x}, fetch_list = [hidden1]) + print(output) + diff --git a/doc/paddle/api/paddle/fluid/layers/leaky_relu_cn.rst b/doc/paddle/api/paddle/fluid/layers/leaky_relu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..113cfe0113285e190a895ca9a9938e6ae4935738 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/leaky_relu_cn.rst @@ -0,0 +1,44 @@ +.. _cn_api_fluid_layers_leaky_relu: + +leaky_relu +------------------------------- + +.. py:function:: paddle.fluid.layers.leaky_relu(x, alpha=0.02, name=None) + + + + +LeakyRelu激活函数 + +.. math:: out=max(x,α∗x) + +参数: + - **x** (Variable) - 输入的多维LoDTensor/Tensor,数据类型为:float32,float64。 + - **alpha** (float) - 负斜率,缺省值为0.02。 + - **name** (str,可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 + +返回: 与 ``x`` 维度相同,数据类型相同的LodTensor/Tensor。 + +返回类型: Variable + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # Graph Organizing + x = fluid.layers.data(name="x", shape=[2], dtype="float32") + res = fluid.layers.leaky_relu(x, alpha=0.1) + + # Create an executor using CPU as an example + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + # Execute + x_i = np.array([[-1, 2], [3, -4]]).astype(np.float32) + res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res]) + print(res_val) # [[-0.1, 2], [3, -0.4]] + + diff --git a/doc/paddle/api/paddle/fluid/layers/less_equal_cn.rst b/doc/paddle/api/paddle/fluid/layers/less_equal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..13f3509885f7a9188a9e9f054c1c92e4ef8a847e --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/less_equal_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_fluid_layers_less_equal: + +less_equal +------------------------------- + +.. py:function:: paddle.fluid.layers.less_equal(x, y, cond=None, name=None) + + + + +该OP逐元素地返回 :math:`x <= y` 的逻辑值,使用重载算子 `<=` 可以有相同的计算函数效果。 + +参数: + - **x** (Variable) – 进行比较的第一个输入,是一个多维的Tensor,数据类型可以是float32,float64,int32,int64。 + - **y** (Variable) – 进行比较的第二个输入,是一个多维的Tensor,数据类型可以是float32,float64,int32,int64。 + - **cond** (Variable,可选) – 如果为None,则创建一个Tensor来作为进行比较的输出结果,该Tensor的shape和数据类型和输入x一致;如果不为None,则将Tensor作为该OP的输出,数据类型和数据shape需要和输入x一致。默认值为None。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:输出结果的Tensor,数据的shape和输入x一致。 + +返回类型:Variable,数据类型为bool类型。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + import numpy as np + label = layers.assign(np.array([1, 3], dtype='int32')) + limit = layers.assign(np.array([1, 2], dtype='int32')) + out = fluid.layers.less_equal(x=label, y=limit) #out=[True, False] + out1 = label<= limit #out1=[True, False] + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/less_than_cn.rst b/doc/paddle/api/paddle/fluid/layers/less_than_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..76bc1daf4c1c6deab2e8cf642b8855ae3a8f84cd --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/less_than_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_fluid_layers_less_than: + +less_than +------------------------------- + +.. py:function:: paddle.fluid.layers.less_than(x, y, force_cpu=None, cond=None, name=None) + + + + + +该OP逐元素地返回 :math:`x < y` 的逻辑值,使用重载算子 `<` 可以有相同的计算函数效果 + + +参数: + - **x** (Variable) - 进行比较的第一个输入,是一个多维的LoDTensor/Tensor,数据类型可以是float32,float64,int32,int64。 + - **y** (Variable) - 进行比较的第二个输入,是一个多维的LoDTensor/Tensor,数据类型可以是float32,float64,int32,int64。 + - **force_cpu** (bool) – 如果为True则强制将输出变量写入CPU内存中,否则将其写入目前所在的运算设备上。默认值为False。注意:该属性已弃用,其值始终是False。 + - **cond** (Variable,可选) – 指定算子输出结果的LoDTensor/Tensor,可以是程序中已经创建的任何Variable。默认值为None,此时将创建新的Variable来保存输出结果。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + + +返回:输出结果的LoDTensor/Tensor,数据的shape和输入x一致。 + +返回类型: Variable,数据类型为bool。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # Graph Organizing + x = fluid.layers.data(name='x', shape=[2], dtype='float64') + y = fluid.layers.data(name='y', shape=[2], dtype='float64') + result = fluid.layers.less_than(x=x, y=y) + # The comment lists another available method. + # result = fluid.layers.fill_constant(shape=[2], dtype='float64', value=0) + # fluid.layers.less_than(x=x, y=y, cond=result) + + # Create an executor using CPU as example + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + # Execute + x_i = np.array([[1, 2], [3, 4]]).astype(np.float64) + y_i = np.array([[2, 2], [1, 3]]).astype(np.float64) + result_value, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[result]) + print(result_value) # [[True, False], [False, False]] + diff --git a/doc/paddle/api/paddle/fluid/layers/linear_chain_crf_cn.rst b/doc/paddle/api/paddle/fluid/layers/linear_chain_crf_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..81968c2cbc9c55c54bc75ea9fd2263682adadce8 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/linear_chain_crf_cn.rst @@ -0,0 +1,140 @@ +.. _cn_api_fluid_layers_linear_chain_crf: + +linear_chain_crf +------------------------------- + + +.. py:function:: paddle.fluid.layers.linear_chain_crf(input, label, param_attr=None, length=None) + + + + +线性链条件随机场(Linear Chain CRF) + +条件随机场定义间接概率图,节点代表随机变量,边代表两个变量之间的依赖。CRF学习条件概率 :math:`P\left ( Y|X \right )` , :math:`X = \left ( x_{1},x_{2},...,x_{n} \right )` 是结构性输入,:math:`Y = \left ( y_{1},y_{2},...,y_{n} \right )` 为输入标签。 + +线性链条件随机场(Linear Chain CRF)是特殊的条件随机场(CRF),有利于序列标注任务。序列标注任务不为输入设定许多条件依赖。唯一的限制是输入和输出必须是线性序列。因此类似CRF的图是一个简单的链或者线,也就是线性链随机场(linear chain CRF)。 + +该操作符实现了线性链条件随机场(linear chain CRF)的前向——反向算法。详情请参照 http://www.cs.columbia.edu/~mcollins/fb.pdf 和 http://cseweb.ucsd.edu/~elkan/250Bwinter2012/loglinearCRFs.pdf。 + + +长度为L的序列s的概率定义如下: + +.. math:: + + P(s) = (1/Z) exp(a_{s_1} + b_{s_L} + sum_{l=1}^L x_{s_l} + sum_{l=2}^L w_{s_{l-1},s_l}) + + +其中Z是归一化值,所有可能序列的P(s)之和为1,x是线性链条件随机场(linear chain CRF)的发射(emission)特征权重。 + +线性链条件随机场最终输出每个batch训练样本的条件概率的对数 + + + 1.这里 :math:`x` 代表Emission + + 2.Transition的第一维度值,代表起始权重,这里用 :math:`a` 表示 + + 3.Transition的下一维值,代表末尾权重,这里用 :math:`b` 表示 + + 4.Transition剩下的值,代表转移权重,这里用 :math:`w` 表示 + + 5.Label用 :math:`s` 表示 + + + + +**注意:** + + 1.条件随机场(CRF)的特征函数由发射特征(emission feature)和转移特征(transition feature)组成。发射特征(emission feature)权重在调用函数前计算,而不在函数里计算。 + + 2.由于该函数对所有可能序列的进行全局正则化,发射特征(emission feature)权重应是未缩放的。因此如果该函数带有发射特征(emission feature),并且发射特征是任意非线性激活函数的输出,则请勿调用该函数。 + + 3.Emission的第二维度必须和标记数字(tag number)相同。 + +参数: + - **input** (LoDTensor|Tensor) - 数据类型为float32, float64的Tensor或者LoDTensor。线性链条件随机场的发射矩阵emission。输入为LoDTensor时,是一个shape为[N*D]的2-D LoDTensor,N是每一个batch中batch对应的长度数想加的总数,D是维度。当输入为Tensor时,应该是一个shape为[N x S x D]的Tensor,N是batch_size,S为序列的最大长度,D是维度。 + - **label** (Tensor|LoDTensor) - 数据类型为int64类型Tensor或者LoDTensor。该值为标签值。输入为LoDTensor时[N x 1],N是mini-batch的总数;输入为Tensor时,[N x S],N为batch数量,S为序列的最大长度。 + - **Length** (Tensor) - 数据类型为int64类型的Tensor。 shape为[M x 1]的Tensor,M为mini_batch中序列的数量。 + - **param_attr** (ParamAttr) - 可学习参数的属性,为transition矩阵。详见代码示例。 + +返回: + Emission的指数形式。shape与Emission相同。这是前向计算中的中间计算结果,在反向计算中还会复用。 + + Transition的指数形式。shape为[(D+2)*D]的二维张量。这是前向计算中的中间计算结果,在反向计算中还会复用。 + + 条件概率的对数形式。每个batch训练样本的条件概率的对数。这是一个shape为[S*1]的二维张量,S是mini-batch的序列数。注:S等于mini-batch的序列数。输出不再是LoDTensor。 + +返回类型: + Emission的指数形式。Variable(Tensor|LoDTensor):数据类型为float32, float64的Tensor或者LoDTensor。 + + Transition的指数形式。Variable(Tensor|LoDTensor):数据类型为float32, float64的Tensor或者LoDTensor。 + + 条件概率的对数形式。Variable(Tensor):数据类型为float32, float64的Tensor。 + + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + train_program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(train_program, startup_program): + input_data = fluid.layers.data(name='input_data', shape=[10], dtype='float32', lod_level=1) + label = fluid.layers.data(name='label', shape=[1], dtype='int', lod_level=1) + emission= fluid.layers.fc(input=input_data, size=10, act="tanh") + crf_cost = fluid.layers.linear_chain_crf( + input=emission, + label=label, + param_attr=fluid.ParamAttr( + name='crfw', + learning_rate=0.01)) + use_cuda = False + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(startup_program) + #using LoDTensor, define network + a = fluid.create_lod_tensor(np.random.rand(12,10).astype('float32'), [[3,3,4,2]], place) + b = fluid.create_lod_tensor(np.array([[1],[1],[2],[3],[1],[1],[1],[3],[1],[1],[1],[1]]),[[3,3,4,2]] , place) + feed1 = {'input_data':a,'label':b} + loss= exe.run(train_program,feed=feed1, fetch_list=[crf_cost]) + print(loss) + + #using padding, define network + train_program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(train_program, startup_program): + input_data2 = fluid.layers.data(name='input_data2', shape=[10,10], dtype='float32') + label2 = fluid.layers.data(name='label2', shape=[10,1], dtype='int') + label_length = fluid.layers.data(name='length', shape=[1], dtype='int') + emission2= fluid.layers.fc(input=input_data2, size=10, act="tanh", num_flatten_dims=2) + crf_cost2 = fluid.layers.linear_chain_crf( + input=emission2, + label=label2, + length=label_length, + param_attr=fluid.ParamAttr( + name='crfw', + learning_rate=0.01)) + + use_cuda = False + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(startup_program) + + #define input data + cc=np.random.rand(4,10,10).astype('float32') + dd=np.random.rand(4,10,1).astype('int64') + ll=np.array([[3,3,4,2]]) + feed2 = {'input_data2':cc,'label2':dd,'length':ll} + + loss2= exe.run(train_program,feed=feed2, fetch_list=[crf_cost2]) + print(loss2) + """ + output: + [array([[ 7.8902354], + [ 7.3602567], + [ 10.004011], + [ 5.86721 ]], dtype=float32)] + """ \ No newline at end of file diff --git a/doc/paddle/api/paddle/fluid/layers/linear_lr_warmup_cn.rst b/doc/paddle/api/paddle/fluid/layers/linear_lr_warmup_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ff40eab243139f4e2309e77166dbd94ac37e6ff0 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/linear_lr_warmup_cn.rst @@ -0,0 +1,64 @@ +.. _cn_api_fluid_layers_linear_lr_warmup: + +linear_lr_warmup +------------------------------- + +.. py:function:: paddle.fluid.layers.linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr) + + + + + +该OP使用学习率优化策略-线性学习率热身(warm up)对学习率进行初步调整。在正常调整学习率之前,先逐步增大学习率,具体原理可参考: `Bag of Tricks for Image Classification with Convolutional Neural Networks `_ + +当训练步数(global_step)小于热身步数(warmup_steps)时,学习率lr按如下方式更新: + +.. code-block:: text + + linear_step = end_lr - start_lr + lr = start_lr + linear_step * (global_step / warmup_steps) + +其中start_lr为warm up起始学习率,end_lr为最终学习率; + +当训练步数(global_step)大于等于热身步数(warmup_steps)时,学习率lr为: + +.. code-block:: text + + lr = learning_rate + +其中learning_rate为热身之后的学习率。 + +参数: + - **learning_rate** (Variable|float) - 热身之后的学习率,它可以是数据类型为float32的1D-Tensor或单个浮点数。 + - **warmup_steps** (int) - 进行warm up过程的步数。 + - **start_lr** (float) - warm up的起始学习率。 + - **end_lr** (float) - warm up的最终学习率。 + +返回:进行热身衰减后的学习率,数据类型与learning_rate相同。 + +返回类型:Variable + + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + + boundaries = [100, 200] + lr_steps = [0.1, 0.01, 0.001] + learning_rate = fluid.layers.piecewise_decay(boundaries, lr_steps) #case1, Tensor + #learning_rate = 0.1 #case2, float32 + warmup_steps = 50 + start_lr = 1. / 3. + end_lr = 0.1 + decayed_lr = fluid.layers.linear_lr_warmup(learning_rate, + warmup_steps, start_lr, end_lr) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + out, = exe.run(fetch_list=[decayed_lr.name]) + print(out) + # case1: [0.33333334] + # case2: [0.33333334] diff --git a/doc/paddle/api/paddle/fluid/layers/linspace_cn.rst b/doc/paddle/api/paddle/fluid/layers/linspace_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c301233b746178e17db08dacbf927d0b0b5a89f7 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/linspace_cn.rst @@ -0,0 +1,32 @@ +.. _cn_api_fluid_layers_linspace: + +linspace +------------------------------- + +.. py:function:: paddle.fluid.layers.linspace(start, stop, num, dtype=None, name=None) + +该OP返回一个Tensor,Tensor的值为在区间start和stop上均匀间隔的num个值,输出Tensor的长度为num。 +**注意:该OP不进行梯度计算** + +参数: + - **start** (int|float|Tensor) – ``start`` 是区间开始的变量,可以是一个浮点标量,或是一个shape为[1]的Tensor,该Tensor的数据类型可以是float32,float64,int32 或者int64。 + - **stop** (int|float|Tensor) – ``stop`` 是区间结束的变量,可以是一个浮点标量,或是一个shape为[1]的Tensor,该Tensor的数据类型可以是float32,float64,int32或者int64。 + - **num** (int|Tensor) – ``num`` 是给定区间内需要划分的区间数,可以是一个整型标量,或是一个shape为[1]的Tensor,该Tensor的数据类型需为int32。 + - **dtype** (np.dtype|str, 可选) – 输出Tensor的数据类型,可以是float32,float64, int32或者int64。如果dtype的数据类型为None,输出Tensor数据类型为float32。 + - **name** (str, 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:表示等间隔划分结果的1-D Tensor,该Tensor的shape大小为 :math:`[num]` ,在mum为1的情况下,仅返回包含start元素值的Tensor。 + + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.linspace(0, 10, 5, 'float32') # [0.0, 2.5, 5.0, 7.5, 10.0] + data = fluid.layers.linspace(0, 10, 1, 'float32') # [0.0] + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/load_cn.rst b/doc/paddle/api/paddle/fluid/layers/load_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..36f03340e923d4d2c0689beabb7ccf9215d26377 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/load_cn.rst @@ -0,0 +1,167 @@ +.. _cn_api_fluid_dygraph_jit_load: + +load +----------------- + +.. py:function:: paddle.fluid.dygraph.jit.load(model_path, configs=None) + + +将接口 :ref:`cn_api_fluid_dygraph_jit_save` 或者 :ref:`cn_api_fluid_io_save_inference_model` 存储的模型载入为 :ref:`cn_api_fluid_dygraph_TranslatedLayer` ,用于预测推理或者fine-tune训练。 + +.. note:: + 由于一些历史原因,如果载入的模型是通过 :ref:`cn_api_fluid_io_save_inference_model` 存储的, + 在使用它进行fine-tune训练时会存在一些局限: + 1. 命令式编程模式不支持 ``LoDTensor`` ,所有原先输入变量或者参数依赖于LoD信息的模型暂时无法使用; + 2. 所有存储模型的feed变量都需要被传入 ``Translatedlayer`` 的forward方法; + 3. 原模型变量的 ``stop_gradient`` 信息已丢失且无法准确恢复; + 4. 原模型参数的 ``trainable`` 信息已丢失且无法准确恢复。 + +参数: + - **model_path** (str) - 存储模型的目录。 + - **configs** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象。默认为 ``None``。 + +返回:TranslatedLayer - 一个能够执行存储模型的 ``Layer`` 对象。 + +**示例代码** + +1. 载入由接口 :ref:`cn_api_fluid_dygraph_jit_save` 存储的模型进行预测推理及fine-tune训练。 + + .. code-block:: python + + import numpy as np + import paddle.fluid as fluid + from paddle.fluid.dygraph import Linear + from paddle.fluid.dygraph import declarative + BATCH_SIZE = 32 + BATCH_NUM = 20 + def random_batch_reader(): + def _get_random_images_and_labels(image_shape, label_shape): + image = np.random.random(size=image_shape).astype('float32') + label = np.random.random(size=label_shape).astype('int64') + return image, label + def __reader__(): + for _ in range(BATCH_NUM): + batch_image, batch_label = _get_random_images_and_labels( + [BATCH_SIZE, 784], [BATCH_SIZE, 1]) + yield batch_image, batch_label + return __reader__ + class LinearNet(fluid.dygraph.Layer): + def __init__(self, in_size, out_size): + super(LinearNet, self).__init__() + self._linear = Linear(in_size, out_size) + @declarative + def forward(self, x): + return self._linear(x) + # 开启命令式编程模式 + fluid.enable_dygraph() + # 1. 训练存储模型. + # 创建网络 + net = LinearNet(784, 1) + adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) + # 创建DataLoader + train_loader = fluid.io.DataLoader.from_generator(capacity=5) + train_loader.set_batch_generator(random_batch_reader()) + # 训练 + for data in train_loader(): + img, label = data + label.stop_gradient = True + cost = net(img) + loss = fluid.layers.cross_entropy(cost, label) + avg_loss = fluid.layers.mean(loss) + avg_loss.backward() + adam.minimize(avg_loss) + net.clear_gradients() + model_path = "linear.example.model" + fluid.dygraph.jit.save( + layer=net, + model_path=model_path, + input_spec=[img]) + # 2. 载入模型 & 预测 + # 载入模型 + infer_net = fluid.dygraph.jit.load(model_path) + # 预测 + x = fluid.dygraph.to_variable(np.random.random((1, 784)).astype('float32')) + pred = infer_net(x) + # 3. 载入模型 & fine-tune训练 + # 载入模型 + train_net = fluid.dygraph.jit.load(model_path) + train_net.train() + adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=train_net.parameters()) + # 创建DataLoader + train_loader = fluid.io.DataLoader.from_generator(capacity=5) + train_loader.set_batch_generator(random_batch_reader()) + # fine-tune训练 + for data in train_loader(): + img, label = data + label.stop_gradient = True + cost = train_net(img) + loss = fluid.layers.cross_entropy(cost, label) + avg_loss = fluid.layers.mean(loss) + avg_loss.backward() + adam.minimize(avg_loss) + train_net.clear_gradients() + + +2. 载入由接口 :ref:`cn_api_fluid_io_save_inference_model` 存储的模型进行预测推理及fine-tune训练。 + + .. code-block:: python + + import numpy as np + import paddle.fluid as fluid + BATCH_SIZE = 32 + BATCH_NUM = 20 + def random_batch_reader(): + def _get_random_images_and_labels(image_shape, label_shape): + image = np.random.random(size=image_shape).astype('float32') + label = np.random.random(size=label_shape).astype('int64') + return image, label + def __reader__(): + for _ in range(BATCH_NUM): + batch_image, batch_label = _get_random_images_and_labels( + [BATCH_SIZE, 784], [BATCH_SIZE, 1]) + yield batch_image, batch_label + return __reader__ + img = fluid.data(name='img', shape=[None, 784], dtype='float32') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') + pred = fluid.layers.fc(input=img, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=pred, label=label) + avg_loss = fluid.layers.mean(loss) + optimizer = fluid.optimizer.SGD(learning_rate=0.001) + optimizer.minimize(avg_loss) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + loader = fluid.io.DataLoader.from_generator( + feed_list=[img, label], capacity=5, iterable=True) + loader.set_batch_generator(random_batch_reader(), places=place) + # 1. 训练 & 存储预测模型 + for data in loader(): + exe.run( + fluid.default_main_program(), + feed=data, + fetch_list=[avg_loss]) + model_path = "fc.example.model" + fluid.io.save_inference_model( + model_path, ["img"], [pred], exe) + # 开启命令式编程模式 + fluid.enable_dygraph() + # 2. 载入模型 & 预测 + fc = fluid.dygraph.jit.load(model_path) + x = fluid.dygraph.to_variable(np.random.random((1, 784)).astype('float32')) + pred = fc(x) + # 3. 载入模型 & fine-tune训练 + fc = fluid.dygraph.jit.load(model_path) + fc.train() + sgd = fluid.optimizer.SGD(learning_rate=0.001, + parameter_list=fc.parameters()) + train_loader = fluid.io.DataLoader.from_generator(capacity=5) + train_loader.set_batch_generator( + random_batch_reader(), places=place) + for data in train_loader(): + img, label = data + label.stop_gradient = True + cost = fc(img) + loss = fluid.layers.cross_entropy(cost, label) + avg_loss = fluid.layers.mean(loss) + avg_loss.backward() + sgd.minimize(avg_loss) diff --git a/doc/paddle/api/paddle/fluid/layers/locality_aware_nms_cn.rst b/doc/paddle/api/paddle/fluid/layers/locality_aware_nms_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f85aead2e4ef836512851fac9e34df172dea3550 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/locality_aware_nms_cn.rst @@ -0,0 +1,63 @@ +.. _cn_api_fluid_layers_locality_aware_nms: + +locality_aware_nms +------------------------------- + +.. py:function:: paddle.fluid.layers.locality_aware_nms(bboxes, scores, score_threshold, nms_top_k, keep_top_k, nms_threshold=0.3, normalized=True, nms_eta=1.0, background_label=-1, name=None) + + + + +**局部感知NMS** + +`局部感知NMS `_ 用于对边界框(bounding box)和评分(scores)执行局部感知非极大值抑制(LANMS)。 + +首先,根据边界框之间的IOU(交并比),对边界框和评分进行融合。 + +在NMS中,如果提供 ``score_threshold`` 阈值,则此OP贪心地选择所有得分(scores)高于 ``score_threshold`` 的检测边界框(bounding box)的子集,如果nms_top_k大于-1,则选择最大的nms_top_k置信度分数。 接着,该OP依据 adaptive nms(基于 ``nms_threshold`` 和 ``nms_eta``),删除与已选择的框IOU(交并比)高于nms_threshold 的重叠框。 + +在NMS步骤后,如果keep_top_k大于-1,则每个图像最多保留keep_top_k个框(bounding box)。 + + + +参数: + - **bboxes** (Variable) – 支持两种类型的边界框(bounding box): + + 1. (Tensor)形为[N,M,4 或 8、16、24、32]的3-D张量,表示将预测M个边界框的预测位置, N是批大小(batch size)。当边界框(bounding box)大小等于4时,每个边界框有四个坐标值,布局为 :math:`[xmin, ymin, xmax, ymax]` 。数据类型为float32或float64。 + + - **scores** (Variable) – 支持两种类型的分数: + + 1. (Tensor)具有形状 :math:`[N, C, M]` 的3-D张量表示预测的置信度。 N是批量大小 batch size,C是种类数目,M是边界框bounding box的数量。目前仅支持单个类别,所以输入维度应为 :math:`[N, 1, M]` 。请注意,M等于bboxes的第二维。数据类型为float32或float64。 + + - **background_label** (int) – 背景标签(类别)的索引,如果设置为 0 ,则忽略背景标签(类别)。如果设置为 -1 ,则考虑所有类别。默认值:-1 + - **score_threshold** (float) – 过滤掉低置信度分数的边界框的阈值。如果没有提供,请考虑所有边界框。 + - **nms_top_k** (int) – 基于 score_threshold 的过滤检测后,根据置信度保留的最大检测次数。 + - **nms_threshold** (float) – 在LANMS中用于融合检测框和剔除检测框IOU的阈值,默认值:0.3 。 + - **nms_eta** (float) – 在NMS中用于调整 nms_threshold 的参数,设为1时表示nms_threshold不变。默认值:1.0 。 + - **keep_top_k** (int) – NMS步骤后每个图像要保留的总bbox数。 -1表示在NMS步骤之后保留所有bbox。 + - **normalized** (bool) – 检测是否已经经过正则化。默认值:True 。 + - **name** (str|None) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:形为[No,6]的2-D LoDTensor,表示检测(detections)结果。每行有6个值:[标签label,置信度confidence,xmin,ymin,xmax,ymax]。或形为[No,10]的2-D LoDTensor,用来表示检测结果。 每行有10个值:[标签label,置信度confidence,x1,y1,x2,y2,x3,y3,x4,y4]。 No是检测的总数。 如果对所有图像都没有检测到的box,则lod将设置为{1},而Out仅包含一个值-1。 (1.3版本之后,当未检测到box时,lod从{0}更改为{1}) + +返回类型:Variable,数据类型与输入一致。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + boxes = fluid.data(name='bboxes', shape=[None, 81, 8], + dtype='float32') + scores = fluid.data(name='scores', shape=[None, 1, 81], + dtype='float32') + out = fluid.layers.locality_aware_nms(bboxes=boxes, + scores=scores, + score_threshold=0.5, + nms_top_k=400, + nms_threshold=0.3, + keep_top_k=200, + normalized=False) + + + diff --git a/doc/paddle/api/paddle/fluid/layers/lod_append_cn.rst b/doc/paddle/api/paddle/fluid/layers/lod_append_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2c7f754233a198fdcdc39d4fcb453ad56c1a81d6 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/lod_append_cn.rst @@ -0,0 +1,56 @@ +.. _cn_api_fluid_layers_lod_append: + +lod_append +------------------------------- + +.. py:function:: paddle.fluid.layers.lod_append(x, level) + + + + +给 ``x`` 的LoD添加 ``level`` 。 + +简单示例: + +.. code-block:: python + + give a 1-level LodTensor x: + x.lod = [[2, 3, 1]] + x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] + x.dims = [6, 1] + + level:[1, 1, 1, 1, 1, 1] + + Then we get a 2-level LodTensor: + x.lod = [[2, 3, 1], [1, 1, 1, 1, 1, 1] + x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] + x.dims = [6, 1] + +参数: + - **x** (Variable)-输入变量,可以是LoDTensor或tensor。 + - **level** (list|tuple|Variable)-预添加到x的LoD里的LoD level。 + +返回:一个有着新的LoD level的输出变量 + +返回类型:Variable + +Raise: ``ValueError`` - 如果y为None或者level不可迭代。 + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.data(name='x', shape=[6, 10], lod_level=1) + out = fluid.layers.lod_append(x, [1,1,1,1,1,1]) + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/lod_reset_cn.rst b/doc/paddle/api/paddle/fluid/layers/lod_reset_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fbb090b26751d1a372911a7d35cf4425a2048e76 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/lod_reset_cn.rst @@ -0,0 +1,116 @@ +.. _cn_api_fluid_layers_lod_reset: + +lod_reset +------------------------------- + +.. py:function:: paddle.fluid.layers.lod_reset(x, y=None, target_lod=None) + + + + +根据给定的参数 ``y`` 或 ``target_lod`` ,重设输入 ``x`` (LoDTensor) 的 LoD 信息。 + +参数: + - **x** (Variable) : 输入变量,类型为 Tensor 或者 LoDTensor。 + - **y** (Variable|None) : 当 ``y`` 非空时,输出 LoDTensor 的 LoD 信息将与 ``y`` 的 LoD 一致。 + - **target_lod** (list|tuple|None) : 一级 LoD,当 ``y`` 为空时,输出 LoDTensor 的 LoD 信息将与 ``target_lod`` 一致。 + +返回: + - Variable (LoDTensor),重设了 LoD 信息的 LoDTensor。 + +返回类型: + - Variable (LoDTensor)。 + +抛出异常: + - ``TypeError`` : 当 ``y`` 和 ``target_lod`` 二者均为空时抛出此异常。 + +:: + + * 例 1: + + x: 包含一级 LoD 信息的 LoDTensor + x.lod = [[ 2, 3, 1 ]] + x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] + x.dims = [6, 1] + + y: None + + target_lod: [4, 2] + + Output: 包含一级 LoD 信息的 LoDTensor + out.lod = [[4, 2]] + out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] + out.dims = [6, 1] + + * 例 2: + + x: 包含一级 LoD 信息的 LoDTensor + x.lod = [[2, 3, 1]] + x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] + x.dims = [6, 1] + + y: 普通 Tensor,不含 LoD 信息 + y.data = [[2, 4]] + y.dims = [1, 3] + + target_lod: 当 y 不为空时,此参数不起作用 + + Output: 包含一级 LoD 信息的 LoDTensor + out.lod = [[2, 4]] + out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] + out.dims = [6, 1] + + * 例 3: + + x: 包含一级 LoD 信息的 LoDTensor + x.lod = [[2, 3, 1]] + x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] + x.dims = [6, 1] + + y: 包含二级 LoD 信息的 LoDTensor + y.lod = [[2, 2], [2, 2, 1, 1]] + y.data = [[1.1], [2.1], [3.1], [4.1], [5.1], [6.1]] + y.dims = [6, 1] + + target_lod: 当 y 不为空时,此参数不起作用 + + Output: 包含二级 LoD 信息的 LoDTensor + out.lod = [[2, 2], [2, 2, 1, 1]] + out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] + out.dims = [6, 1] + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + + # Graph Organizing + x = fluid.layers.data(name='x', shape=[6]) + y = fluid.layers.data(name='y', shape=[6], lod_level=2) + output = fluid.layers.lod_reset(x=x, y=y) + + # Create an executor using CPU as an example + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + # Execute + x_tensor = fluid.core.LoDTensor() + x_tensor.set(numpy.ones([6]).astype(numpy.float32), place) + y_ndarray = numpy.ones([6]).astype(numpy.float32) + y_lod = [[2, 2], [2, 2, 1, 1]] + y_tensor = fluid.create_lod_tensor(y_ndarray, y_lod, place) + + res, = exe.run(fluid.default_main_program(), + feed={'x':x_tensor, 'y':y_tensor}, + fetch_list=[output], + return_numpy=False) + print(res) + # Output Value: + # lod: [[0, 2, 4], [0, 2, 4, 5, 6]] + # dim: 6 + # layout: NCHW + # dtype: float + # data: [1 1 1 1 1 1] diff --git a/doc/paddle/api/paddle/fluid/layers/log_cn.rst b/doc/paddle/api/paddle/fluid/layers/log_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..870a8901cdcbc834369eb92d3447b1addb61ca42 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/log_cn.rst @@ -0,0 +1,46 @@ +.. _cn_api_fluid_layers_log: + +log +------------------------------- + +.. py:function:: paddle.fluid.layers.log(x, name=None) + + + + + +Log激活函数(计算自然对数) + +.. math:: + \\Out=ln(x)\\ + + +参数: + - **x** (Variable) – 该OP的输入为LodTensor/Tensor。数据类型为float32,float64。 + - **name** (str,可选) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 + +返回:Log算子自然对数输出 + +返回类型: Variable - 该OP的输出为LodTensor/Tensor,数据类型为输入一致。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # Graph Organizing + x = fluid.layers.data(name="x", shape=[1], dtype="float32") + res = fluid.layers.log(x) + + # Create an executor using CPU as an example + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + # Execute + x_i = np.array([[1], [2]]).astype(np.float32) + res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res]) + print(res_val) # [[0.], [0.6931472]] + diff --git a/doc/paddle/api/paddle/fluid/layers/log_loss_cn.rst b/doc/paddle/api/paddle/fluid/layers/log_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e47491c0dd65ca96d7bf7889f40ee4793838e6c9 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/log_loss_cn.rst @@ -0,0 +1,49 @@ +.. _cn_api_fluid_layers_log_loss: + +log_loss +------------------------------- + +.. py:function:: paddle.fluid.layers.log_loss(input, label, epsilon=0.0001, name=None) + + + + +**负log loss层** + +该 OP 对输入的预测结果和目标标签进行计算,返回负对数损失值。 + +.. math:: + + Out = -label * \log{(input + \epsilon)} - (1 - label) * \log{(1 - input + \epsilon)} + + +参数: + - **input** (Variable) – 形为 [N x 1] 的二维张量, 其中 N 为 batch 大小。该输入是由前驱算子计算得来的概率, 数据类型是 float32。 + - **label** (Variable) – 形为 [N x 1] 的二维张量,真值标签, 其中 N 为 batch 大小,数据类型是 float32。 + - **epsilon** (float) – epsilon + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: 形为[N x 1]的二维张量,计算出的负log_loss值,数据类型为 float32 + +返回类型: Variable + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + label = fluid.data(name='label', shape=[None, 1], dtype='float32') + prob = fluid.data(name='prob', shape=[None, 1], dtype='float32') + cost = fluid.layers.log_loss(input=prob, label=label) + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/logical_and_cn.rst b/doc/paddle/api/paddle/fluid/layers/logical_and_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..09e3efbcbcac121b94fc557a2ccee27357bfac80 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/logical_and_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_fluid_layers_logical_and: + +logical_and +------------------------------- + +.. py:function:: paddle.logical_and(x, y, out=None, name=None) + +该OP逐元素的对 ``x`` 和 ``y`` 进行逻辑与运算。 + +.. math:: + Out = X \&\& Y + +.. note:: + ``paddle.logical_and`` 遵守broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 + +参数: + - **x** (Tensor)- 输入的 `Tensor` ,数据类型为:bool。 + - **y** (Tensor)- 输入的 `Tensor` ,数据类型为:bool。 + - **out** (Tensor,可选)- 指定算子输出结果的 `Tensor` ,可以是程序中已经创建的任何Tensor。默认值为None,此时将创建新的Tensor来保存输出结果。 + - **name** (str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回: ``Tensor`` , 维度``x`` 维度相同,存储运算后的结果。 + +**代码示例:** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + x_data = np.array([True], dtype=np.bool) + y_data = np.array([True, False, True, False], dtype=np.bool) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + res = paddle.logical_and(x, y) + print(res.numpy()) # [True False True False] diff --git a/doc/paddle/api/paddle/fluid/layers/logical_not_cn.rst b/doc/paddle/api/paddle/fluid/layers/logical_not_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..34e332bce89193bfbf05aacd04d2989633b857e2 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/logical_not_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_fluid_layers_logical_not: + +logical_not +------------------------------- + +.. py:function:: paddle.logical_not(x, out=None, name=None) + + + + +该OP逐元素的对 ``X`` Variable进行逻辑非运算 + +.. math:: + Out = !X + +参数: + - **x** (Variable)- 逻辑非运算的输入,是一个 Variable,数据类型只能是bool。 + - **out** (Variable,可选)- 指定算子输出结果的 Variable,可以是程序中已经创建的任何 Variable。默认值为None,此时将创建新的Variable来保存输出结果。 + - **name** (str,可选)- 该参数供开发人员打印调试信息时使用,具体用法参见 :ref:`api_guide_Name` ,默认值为None。 + +返回:与 ``x`` 维度相同,数据类型相同的 Variable。 + +返回类型:Variable + +**代码示例:** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + x_data = np.array([True, False, True, False], dtype=np.bool) + x = paddle.imperative.to_variable(x_data) + res = paddle.logical_not(x) + print(res.numpy()) # [False True False True] diff --git a/doc/paddle/api/paddle/fluid/layers/logical_or_cn.rst b/doc/paddle/api/paddle/fluid/layers/logical_or_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9cb3420ea323948e47209be35dada5a85d2d51ea --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/logical_or_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_fluid_layers_logical_or: + +logical_or +------------------------------- + +.. py:function:: paddle.logical_or(x, y, out=None, name=None) + +该OP逐元素的对 ``X`` 和 ``Y`` 进行逻辑或运算。 + +.. math:: + Out = X || Y + +.. note:: + ``paddle.logical_or`` 遵守broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 + +参数: + - **x** (Tensor)- 输入的 `Tensor` ,数据类型为:bool。 + - **y** (Tensor)- 输入的 `Tensor` ,数据类型为:bool。 + - **out** (Tensor,可选)- 指定算子输出结果的 `Tensor` ,可以是程序中已经创建的任何Tensor。默认值为None,此时将创建新的Tensor来保存输出结果。 + - **name** (str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回: ``Tensor`` , 维度``x`` 维度相同,存储运算后的结果。 + +**代码示例:** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + x_data = np.array([True, False], dtype=np.bool).reshape(2, 1) + y_data = np.array([True, False, True, False], dtype=np.bool).reshape(2, 2) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + res = paddle.logical_or(x, y) + print(res.numpy()) # [[ True True] [ True False]] diff --git a/doc/paddle/api/paddle/fluid/layers/logical_xor_cn.rst b/doc/paddle/api/paddle/fluid/layers/logical_xor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..502a5a60e55ea3384cc5a2b579118085bccb9e1f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/logical_xor_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_fluid_layers_logical_xor: + +logical_xor +------------------------------- + +.. py:function:: paddle.logical_xor(x, y, out=None, name=None) + +该OP逐元素的对 ``X`` 和 ``Y`` 进行逻辑异或运算。 + +.. math:: + Out = (X || Y) \&\& !(X \&\& Y) + +.. note:: + ``paddle.logical_xor`` 遵守broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 + +参数: + - **x** (Tensor)- 输入的 `Tensor` ,数据类型为:bool。 + - **y** (Tensor)- 输入的 `Tensor` ,数据类型为:bool。 + - **out** (Tensor,可选)- 指定算子输出结果的 `Tensor` ,可以是程序中已经创建的任何Tensor。默认值为None,此时将创建新的Tensor来保存输出结果。 + - **name** (str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回: ``Tensor`` , 维度``x`` 维度相同,存储运算后的结果。 + +**代码示例:** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + x_data = np.array([True, False], dtype=np.bool).reshape([2, 1]) + y_data = np.array([True, False, True, False], dtype=np.bool).reshape([2, 2]) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + res = paddle.logical_xor(x, y) + print(res.numpy()) # [[False, True], [ True, False]] diff --git a/doc/paddle/api/paddle/fluid/layers/logsigmoid_cn.rst b/doc/paddle/api/paddle/fluid/layers/logsigmoid_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..43cadf0c64e70458bf5c44b708cc7afc327debf1 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/logsigmoid_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_fluid_layers_logsigmoid: + +logsigmoid +------------------------------- + +.. py:function:: paddle.fluid.layers.logsigmoid(x, name=None) + + + + +Logsigmoid激活函数 + + +.. math:: + + out = \log \frac{1}{1 + e^{-x}} + + +参数: + - **x** (Variable)- 张量(Tensor) + - **name** (str|None) - 该层名称(可选),若设为None,则自动为该层命名。 + +返回: 张量(Tensor) + +返回类型: 变量(Variable) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.data(name="input", shape=[32, 784]) + result = fluid.layers.logsigmoid(data) + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/lrn_cn.rst b/doc/paddle/api/paddle/fluid/layers/lrn_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e7a077a93fabb3999908189abba095819f965945 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/lrn_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_fluid_layers_lrn: + +lrn +------------------------------- + +.. py:function:: paddle.fluid.layers.lrn(input, n=5, k=1.0, alpha=0.0001, beta=0.75, name=None, data_format='NCHW') + + + + + +该OP实现了局部响应正则化层(Local Response Normalization Layer),用于对局部输入区域正则化,执行一种侧向抑制(lateral inhibition)。更多详情参考: `ImageNet Classification with Deep Convolutional Neural Networks `_ + +其中 ``input`` 是mini-batch的输入特征。计算过程如下: + +.. math:: + + Output(i,x,y) = Input(i,x,y)/\left ( k+\alpha \sum_{j=max(0,i-n/2)}^{min(C-1,i+n/2)}(Input(j,x,y))^2 \right )^\beta + +在以上公式中: + - :math:`n` :累加的通道数 + - :math:`k` :位移 + - :math:`\alpha` : 缩放参数 + - :math:`\beta` : 指数参数 + + +参数: + - **input** (Variable)- 输入特征,形状为[N,C,H,W]或者[N,H,W,C]的4D-Tensor,其中N为batch大小,C为输入通道数,H为特征高度,W为特征宽度。必须包含4个维度,否则会抛出 ``ValueError`` 的异常。数据类型为float32。 + - **n** (int,可选) - 累加的通道数,默认值为5。 + - **k** (float,可选)- 位移,正数。默认值为1.0。 + - **alpha** (float,可选)- 缩放参数,正数。默认值为1e-4。 + - **beta** (float,可选)- 指数,正数。默认值为0.75。 + - **name** (None|str) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + + +返回:局部响应正则化得到的输出特征,数据类型及维度和input相同。 + +返回类型:Variable + +抛出异常: + - ``ValueError`` : 如果输入不是4-D Tensor。 + - ``ValueError`` - 如果 ``data_format`` 不是"NCHW"或者"NHWC"。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.data( + name="data", shape=[3, 112, 112], dtype="float32") + lrn = fluid.layers.lrn(input=data) + print(lrn.shape) # [-1, 3, 112, 112] + print(lrn.dtype) # float32 diff --git a/doc/paddle/api/paddle/fluid/layers/lstm_cn.rst b/doc/paddle/api/paddle/fluid/layers/lstm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9d0be734ac6b337b3200089a99bb86a53b8539d1 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/lstm_cn.rst @@ -0,0 +1,103 @@ +.. _cn_api_fluid_layers_lstm: + +lstm +------------------------------- + + +.. py:function:: paddle.fluid.layers.lstm(input, init_h, init_c, max_len, hidden_size, num_layers, dropout_prob=0.0, is_bidirec=False, is_test=False, name=None, default_initializer=None, seed=-1) + + + + +.. note:: + 该OP仅支持 GPU 设备运行 + +该OP实现了 LSTM,即 Long-Short Term Memory(长短期记忆)运算 - `Hochreiter, S., & Schmidhuber, J. (1997) `_。 + +该OP的实现不包括 diagonal/peephole 连接,参见 `Gers, F. A., & Schmidhuber, J. (2000) `_。 +如果需要使用 peephole 连接方法,请使用 :ref:`cn_api_fluid_layers_dynamic_lstm` 。 + +该OP对于序列中每一个时间步的计算公式如下: + +.. math:: + i_t = \sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + b_{x_i} + b_{h_i}) +.. math:: + f_t = \sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + b_{x_f} + b_{h_f}) +.. math:: + o_t = \sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + b_{x_o} + b_{h_o}) +.. math:: + \widetilde{c_t} = tanh(W_{cx}x_t + W_{ch}h_{t-1} + b{x_c} + b_{h_c}) +.. math:: + c_t = f_t \odot c_{t-1} + i_t \odot \widetilde{c_t} +.. math:: + h_t = o_t \odot tanh(c_t) + +公式中的概念信息如下: + - :math:`x_{t}` 表示时间步 :math:`t` 的输入 + - :math:`h_{t}` 表示时间步 :math:`t` 的 hidden 状态 + - :math:`h_{t-1}, c_{t-1}` 分别表示前一个时间步的 hidden 和 cell 状态 + - :math:`\widetilde{c_t}` 表示候选的 cell 状态 + - :math:`i_t` ,:math:`f_t` 和 :math:`o_t` 分别为 input gate,forget gate,output gate + - :math:`W` 表示 weight (例如, :math:`W_{ix}` 是在计算 input gate :math:`i_t` 时,对输入 :math:`x_{t}` 做线性变换的 weight) + - :math:`b` 表示 bias (例如, :math:`b_{i}` 是 input gate 的 bias) + - :math:`\sigma` 表示 gate 的非线性激活函数,默认为 sigmoid + - :math:`\odot` 表示矩阵的 Hadamard product,即对两个维度相同的矩阵,将相同位置的元素相乘,得到另一个维度相同的矩阵 + +参数: + - **input** ( :ref:`api_guide_Variable` ) - LSTM的输入张量,维度为 :math:`[batch\_size, seq\_len, input\_dim]` 的 3-D Tensor,其中 seq_len 为序列的长度, input_dim 为序列词嵌入的维度。数据类型为 float32 或者 float64。 + - **init_h** ( :ref:`api_guide_Variable` ) – LSTM的初始 hidden 状态,维度为 :math:`[num\_layers, batch\_size, hidden\_size]` 的 3-D Tensor,其中 num_layers 是LSTM的总层数,hidden_size 是隐层维度。 如果is_bidirec = True, 维度应该为 :math:`[num\_layers*2, batch\_size, hidden\_size]` 。数据类型为 float32 或者 float64。 + - **init_c** ( :ref:`api_guide_Variable` ) - LSTM的初始 cell 状态。维度为 :math:`[num\_layers, batch\_size, hidden\_size]` 的 3-D Tensor,其中 num_layers 是LSTM的总层数,hidden_size 是隐层维度。 如果is_bidirec = True, 维度应该为 :math:`[num\_layers*2, batch\_size, hidden\_size]` 。数据类型为 float32 或者 float64。 + - **max_len** (int) – LSTM的最大长度。输入张量的第一个 input_dim 不能大于 max_len。 + - **hidden_size** (int) - LSTM hidden 状态的维度。 + - **num_layers** (int) – LSTM的总层数。例如,该参数设置为2,则会堆叠两个LSTM,其第一个LSTM的输出会作为第二个LSTM的输入。 + - **dropout_prob** (float,可选) – dropout比例,dropout 只在 rnn 层之间工作,而不是在时间步骤之间。dropout 不作用于最后的 rnn 层的 rnn 输出中。默认值为 0.0。 + - **is_bidirec** (bool,可选) – 是否是双向的LSTM。默认值为 False。 + - **is_test** (bool,可选) – 是否在测试阶段。默认值为 False。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **default_initializer** (Initializer,可选) – 用于初始化权重的初始化器,如果为None,将进行默认初始化。默认值为 None。 + - **seed** (int,可选) – LSTM中dropout的seed,如果是-1,dropout将使用随机seed。默认值为 1。 + +返回: 经过lstm运算输出的三个Tensor的tuple,包括 + +- rnn_out:LSTM hidden的输出结果的Tensor,数据类型与input一致,维度为 :math:`[batch\_size, seq\_len, hidden\_size]` 。如果 ``is_bidirec`` 设置为True,则维度为 :math:`[batch\_size, seq\_len, hidden\_size*2]` +- last_h:LSTM最后一步的hidden状态的Tensor,数据类型与input一致,维度为 :math:`[num\_layers, batch\_size, hidden\_size]` 。如果 ``is_bidirec`` 设置为True,则维度为 :math:`[num\_layers*2, batch\_size, hidden\_size]` +- last_c:LSTM最后一步的cell状态的Tensor,数据类型与input一致,维度为 :math:`[num\_layers, batch\_size, hidden\_size]` 。如果 ``is_bidirec`` 设置为True,则维度为 :math:`[num\_layers*2, batch\_size, hidden\_size]` + +返回类型: tuple( :ref:`api_guide_Variable` , :ref:`api_guide_Variable` , :ref:`api_guide_Variable` ) + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + + emb_dim = 256 + vocab_size = 10000 + data = fluid.layers.data(name='x', shape=[-1, 100, 1], + dtype='int64') + emb = fluid.layers.embedding(input=data, size=[vocab_size, emb_dim], is_sparse=True) + batch_size = 20 + max_len = 100 + dropout_prob = 0.2 + hidden_size = 150 + num_layers = 1 + init_h = layers.fill_constant( [num_layers, batch_size, hidden_size], 'float32', 0.0 ) + init_c = layers.fill_constant( [num_layers, batch_size, hidden_size], 'float32', 0.0 ) + + rnn_out, last_h, last_c = layers.lstm(emb, init_h, init_c, max_len, hidden_size, num_layers, dropout_prob=dropout_prob) + rnn_out.shape # (-1, 100, 150) + last_h.shape # (1, 20, 150) + last_c.shape # (1, 20, 150) + + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/lstm_unit_cn.rst b/doc/paddle/api/paddle/fluid/layers/lstm_unit_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..afe363c99e5ec3f1af14ccd14eb8492be706cf4f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/lstm_unit_cn.rst @@ -0,0 +1,67 @@ +.. _cn_api_fluid_layers_lstm_unit: + +lstm_unit +------------------------------- + + +.. py:function:: paddle.fluid.layers.lstm_unit(x_t, hidden_t_prev, cell_t_prev, forget_bias=0.0, param_attr=None, bias_attr=None, name=None) + + + + + +Long-Short Term Memory(LSTM)循环神经网络计算单元。该OP用于完成单个时间步内LSTM的计算,基于论文 `RECURRENT NEURAL NETWORK REGULARIZATION `_ 中的描述实现, + +并在forget gate(遗忘门)中增加了 ``forget_bias`` 来控制遗忘力度,公式如下: + +.. math:: + + i_{t} &= \sigma \left ( W_{x_{i}}x_{t}+W_{h_{i}}h_{t-1}+b_{i} \right ) \\ + f_{t} &= \sigma \left ( W_{x_{f}}x_{t}+W_{h_{f}}h_{t-1}+b_{f}+forget\_bias \right ) \\ + c_{t} &= f_{t}c_{t-1}+i_{t}tanh\left ( W_{x_{c}}x_{t} +W_{h_{c}}h_{t-1}+b_{c}\right ) \\ + o_{t} &= \sigma \left ( W_{x_{o}}x_{t}+W_{h_{o}}h_{t-1}+b_{o} \right ) \\ + h_{t} &= o_{t}tanh \left ( c_{t} \right ) + +其中, :math:`x_{t}` 对应 ``x_t``, 表示当前时间步的输入; :math:`h_{t-1}` 和 :math:`c_{t-1}` 对应 ``hidden_t_prev`` 和 ``cell_t_prev``,表示上一时间步的hidden和cell输出; +:math:`i_{t}, f_{t}, c_{t}, o_{t}, h_{t}` 分别为input gate(输入门)、forget gate(遗忘门)、cell、output gate(输出门)和hidden的计算。 + + +参数: + - **x_t** (Variable) - 表示当前时间步的输入的Tensor,形状为 :math:`[N, M]` ,其中 :math:`N` 为batch_size, :math:`M` 为输入的特征维度大小。数据类型为float32或float64。 + - **hidden_t_prev** (Variable) - 表示前一时间步hidden输出的Tensor,形状为 :math:`[N, D]`,其中 :math:`N` 为batch_size, :math:`D` 为LSTM中隐单元的数目。数据类型与 ``x_t`` 相同。 + - **cell_t_prev** (Variable) - 表示前一时间步cell输出的Tensor,和 ``hidden_t_prev`` 具有相同形状和数据类型。 + - **forget_bias** (float,可选) - 额外添加在遗忘门中的偏置项(参见公式)。默认值为0。 + - **param_attr** (ParamAttr,可选) – 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:Variable的二元组,包含了两个形状和数据类型均与 ``hidden_t_prev`` 相同的Tensor,分别表示hiddel和cell输出,即公式中的 :math:`h_{t}` 和 :math:`c_{t}` 。 + +返回类型:tuple + +抛出异常: + - :code:`ValueError`: ``x_t`` 的阶不为2 + - :code:`ValueError`: ``hidden_t_prev`` 的阶不为2 + - :code:`ValueError`: ``cell_t_prev`` 的阶不为2 + - :code:`ValueError`: ``x_t`` 、``hidden_t_prev`` 和 ``cell_t_prev`` 的第一维大小必须相同 + - :code:`ValueError`: ``hidden_t_prev`` 和 ``cell_t_prev`` 的第二维大小必须相同 + + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + + dict_dim, emb_dim, hidden_dim = 128, 64, 512 + data = fluid.data(name='step_data', shape=[None], dtype='int64') + x = fluid.embedding(input=data, size=[dict_dim, emb_dim]) + pre_hidden = fluid.data( + name='pre_hidden', shape=[None, hidden_dim], dtype='float32') + pre_cell = fluid.data( + name='pre_cell', shape=[None, hidden_dim], dtype='float32') + hidden = fluid.layers.lstm_unit( + x_t=x, + hidden_t_prev=pre_hidden, + cell_t_prev=pre_cell) + diff --git a/doc/paddle/api/paddle/fluid/layers/margin_rank_loss_cn.rst b/doc/paddle/api/paddle/fluid/layers/margin_rank_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fbbf847994a0c97be6bbf17d82e0dc349ca79685 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/margin_rank_loss_cn.rst @@ -0,0 +1,52 @@ +.. _cn_api_fluid_layers_margin_rank_loss: + +margin_rank_loss +------------------------------- + +.. py:function:: paddle.fluid.layers.margin_rank_loss(label, left, right, margin=0.1, name=None) + + + + +margin rank loss(间隔排序损失)层。在排序问题中,它可以比较来自排序网络的输入 ``left`` 和输入 ``right`` 的得分。 + +可用如下等式定义: + +.. math:: + rank\_loss = max(0, -label * (left - right) + margin) + + +参数: + - **label** (Variable) – 表示输入 ``left`` 的真实排序是否高于输入 ``right`` , 数据类型为 float32。 + - **left** (Variable) – 输入 ``left`` 的排序得分, 数据类型为 float32 。 + - **right** (Variable) – 输入 ``right`` 的排序得分, 数据类型为 float32。 + - **margin** (float) – 指定的间隔。 + - **name** (str,可选) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 + +返回: 排序损失 + +返回类型: Variable + +抛出异常: + - ``ValueError`` - ``label`` , ``left`` , ``right`` 有一者不为Variable类型时,抛出此异常 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + label = fluid.layers.data(name="label", shape=[-1, 1], dtype="float32") + left = fluid.layers.data(name="left", shape=[-1, 1], dtype="float32") + right = fluid.layers.data(name="right", shape=[-1, 1], dtype="float32") + out = fluid.layers.margin_rank_loss(label, left, right) + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/matmul_cn.rst b/doc/paddle/api/paddle/fluid/layers/matmul_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8514a410c290fe9de6004751329d2439772bcd99 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/matmul_cn.rst @@ -0,0 +1,104 @@ +.. _cn_api_fluid_layers_matmul: + +matmul +------------------------------- + +.. py:function:: paddle.fluid.layers.matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None) + + + + +输入 ``x`` 和输入 ``y`` 矩阵相乘。 + +两个输入的形状可为任意维度,但当任一输入维度大于3时,两个输入的维度必须相等。 +实际的操作取决于 ``x`` 、 ``y`` 的维度和 ``transpose_x`` 、 ``transpose_y`` 的布尔值。具体如下: + +- 如果 ``transpose`` 为真,则对应 Tensor 的后两维会转置。假定 ``x`` 是一个 shape=[D] 的一维 Tensor,则 ``x`` 非转置形状为 [1, D],转置形状为 [D, 1]。转置之后的输入形状需满足矩阵乘法要求,即 `x_width` 与 `y_height` 相等。 + +- 转置后,输入的两个 Tensor 维度将为 2-D 或 n-D,将根据下列规则矩阵相乘: + - 如果两个矩阵都是 2-D,则同普通矩阵一样进行矩阵相乘。 + - 如果任意一个矩阵是 n-D,则将其视为带 batch 的二维矩阵乘法。 + +- 如果原始 Tensor x 或 y 的秩为 1 且未转置,则矩阵相乘后的前置或附加维度 1 将移除。 + +参数: + - **x** (Variable) : 输入变量,类型为 Tensor 或 LoDTensor。 + - **y** (Variable) : 输入变量,类型为 Tensor 或 LoDTensor。 + - **transpose_x** (bool) : 相乘前是否转置 x。 + - **transpose_y** (bool) : 相乘前是否转置 y。 + - **alpha** (float) : 输出比例,默认为 1.0。 + - **name** (str|None) : 该层名称(可选),如果设置为空,则自动为该层命名。 + +返回: + - Variable (Tensor / LoDTensor),矩阵相乘后的结果。 + +返回类型: + - Variable(变量)。 + +:: + + * 例 1: + + x: [B, ..., M, K], y: [B, ..., K, N] + out: [B, ..., M, N] + + * 例 2: + + x: [B, M, K], y: [B, K, N] + out: [B, M, N] + + * 例 3: + + x: [B, M, K], y: [K, N] + out: [B, M, N] + + * 例 4: + + x: [M, K], y: [K, N] + out: [M, N] + + * 例 5: + + x: [B, M, K], y: [K] + out: [B, M] + + * 例 6: + + x: [K], y: [K] + out: [1] + + * 例 7: + + x: [M], y: [N] + out: [M, N] + + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + + # Graph Organizing + x = fluid.layers.data(name='x', shape=[2, 3], dtype='float32') + y = fluid.layers.data(name='y', shape=[3, 2], dtype='float32') + output = fluid.layers.matmul(x, y, True, True) + + # Create an executor using CPU as an example + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + # Execute + input_x = numpy.ones([2, 3]).astype(numpy.float32) + input_y = numpy.ones([3, 2]).astype(numpy.float32) + res, = exe.run(fluid.default_main_program(), + feed={'x':input_x, 'y':input_y}, + fetch_list=[output]) + print(res) + ''' + Output Value: + [[2. 2. 2.] + [2. 2. 2.] + [2. 2. 2.]] + ''' diff --git a/doc/paddle/api/paddle/fluid/layers/matrix_nms_cn.rst b/doc/paddle/api/paddle/fluid/layers/matrix_nms_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9cb2e54f2a877d29656227c9d3c772dc927d04ca --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/matrix_nms_cn.rst @@ -0,0 +1,56 @@ +.. _cn_api_fluid_layers_matrix_nms: + +matrix_nms +------------------------------- + + +.. py:function:: paddle.fluid.layers.matrix_nms(bboxes, scores, score_threshold, post_threshold, nms_top_k, keep_top_k, use_gaussian=False, gaussian_sigma=2., background_label=0, normalized=True, return_index=False, name=None) + + + + +**Matrix NMS** + +该OP使用Matrix NMS算法对边界框(bounding box)和评分(scores)执行多类非极大值抑制(NMS)。 + +如果提供 ``score_threshold`` 阈值且 ``nms_top_k`` 大于-1,则选择置信度分数最大的k个框。 然后按照Matrix NMS算法对分数进行衰减。经过抑制后,如果 ``keep_top_k`` 大于-1, 则每张图片最终保留 ``keep_top_k`` 个检测框。 + +在NMS步骤后,如果keep_top_k大于-1,则每个图像最多保留keep_top_k个框(bounding box)。 + + +参数: + - **bboxes** (Variable) - 形为[N,M,4]的3-D张量,表示将预测M个边界框的预测位置, N是批大小(batch size)。当边界框(bounding box)大小等于4时,每个边界框有四个坐标值,布局为[xmin,ymin,xmax,ymax]。数据类型为float32或float64。 + - **scores** (Variable) – 形为[N,C,M]的3-D张量,表示预测的置信度。 N是批大小(batch size),C是种类数目,M是边界框bounding box的数量。对于每个类别,存在对应于M个边界框的总M个分数。请注意,M等于bboxes的第二维。数据类型为float32或float64。 + - **score_threshold** (float) – 过滤掉低置信度分数的边界框的阈值。 + - **post_threshold** (float) – 经过NMS衰减后,过滤掉低置信度分数的边界框的阈值。 + - **nms_top_k** (int) – 基于 score_threshold 的过滤检测后,根据置信度保留的最大检测次数。 + - **keep_top_k** (int) – 经过NMS抑制后, 最终保留的最大检测次数。如果设置为 -1 ,则则保留全部。 + - **use_gaussian** (bool) – 是否使用高斯函数衰减。默认值:False 。 + - **gaussian_sigma** (float) – 高斯函数的Sigma值,默认值:2.0 。 + - **background_label** (int) – 背景标签(类别)的索引,如果设置为 0 ,则忽略背景标签(类别)。如果设置为 -1 ,则考虑所有类别。默认值:0 + - **normalized** (bool) – 检测是否已经经过正则化。默认值:True 。 + - **return_index** (bool) – 是否同时返回保留检测框的序号。默认值:False 。 + - **name** (str|None) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 + +返回: + - **Out** (Variable) - 形为[No,6]的2-D LoDTensor,表示检测结果。每行有6个值:[标签label,置信度confidence,xmin,ymin,xmax,ymax]。或形为[No,10]的2-D LoDTensor,用来表示检测结果。 每行有10个值:[标签label,置信度confidence,x1,y1,x2,y2,x3,y3,x4,y4]。 No是检测的总数。 如果对所有图像都没有检测到的box,则lod将设置为{1},而Out仅包含一个值-1。 (1.3版本之后,当未检测到box时,lod从{0}更改为{1}) + - **Index** (Variable) - 形为[No,1]的2-D LoDTensor,表示检测结果在整个批次中的序号。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + boxes = fluid.data(name='bboxes', shape=[None,81, 4], + dtype='float32', lod_level=1) + scores = fluid.data(name='scores', shape=[None,81], + dtype='float32', lod_level=1) + out = fluid.layers.matrix_nms(bboxes=boxes, + scores=scores, + background_label=0, + score_threshold=0.5, + post_threshold=0.1, + nms_top_k=400, + keep_top_k=200, + normalized=False) diff --git a/doc/paddle/api/paddle/fluid/layers/maxout_cn.rst b/doc/paddle/api/paddle/fluid/layers/maxout_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..494ea1edc0d2728c662ec3de69aa1e0b6e631220 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/maxout_cn.rst @@ -0,0 +1,49 @@ +.. _cn_api_fluid_layers_maxout: + +maxout +------------------------------- + +.. py:function:: paddle.fluid.layers.maxout(x, groups, name=None, axis=1) + + + + +假设输入形状为(N, Ci, H, W),输出形状为(N, Co, H, W),则 :math:`Co=Ci/groups` 运算公式如下: + +.. math:: + + y_{si+j} &= \max_k x_{gsi + sk + j} \\ + g &= groups \\ + s &= \frac{input.size}{num\_channels} \\ + 0 \le &i < \frac{num\_channels}{groups} \\ + 0 \le &j < s \\ + 0 \le &k < groups + + +请参阅论文: + - Maxout Networks: http://www.jmlr.org/proceedings/papers/v28/goodfellow13.pdf + - Multi-digit Number Recognition from Street View Imagery using Deep Convolutional Neural Networks: https://arxiv.org/pdf/1312.6082v4.pdf + +参数: + - **x** (Variable) - 4-D Tensor,maxout算子的输入张量,其数据类型为float32,数据格式为NCHW或NHWC,其中N为 batch size ,C为通道数,H和W为特征图的高和宽。 + - **groups** (int) - 指定将输入张量的channel通道维度进行分组的数目。输出的通道数量为通道数除以组数。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **axis** (int) - 指定通道所在维度的索引。当数据格式为NCHW时,axis应该被设置为1,当数据格式为NHWC时,axis应该被设置为-1或者3。默认值:1。 + +返回:4-D Tensor,数据类型和格式与 `x` 一致。 + +返回类型:Variable + +抛出异常: + - ``ValueError`` - 如果 ``axis`` 既不是1,也不是-1或3。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + input = fluid.data( + name='data', + shape=[None, 256, 32, 32], + dtype='float32') + out = fluid.layers.maxout(input, groups=2) diff --git a/doc/paddle/api/paddle/fluid/layers/mean_cn.rst b/doc/paddle/api/paddle/fluid/layers/mean_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ec99d732e366db0d92b86b6bc4da0a3daedce0a7 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/mean_cn.rst @@ -0,0 +1,49 @@ +.. _cn_api_fluid_layers_mean: + +mean +------------------------------- + +.. py:function:: paddle.fluid.layers.mean(x, name=None) + + + + +计算 ``x`` 所有元素的平均值。 + +参数: + - **x** (Variable) : Tensor 或 LoDTensor。均值运算的输入。 + - **name** (basestring | None) : 输出变量的名称。 + +返回: + - Variable: 包含输出均值的 Tensor / LoDTensor。 + +返回类型: + - Variable(变量)。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + + # Graph Organizing + input = fluid.layers.data( + name='data', shape=[2, 3], dtype='float32') + output = fluid.layers.mean(input) + + # Create an executor using CPU as an example + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + # Execute + x_ndarray = numpy.ones([2, 3]).astype(numpy.float32) + res, = exe.run(fluid.default_main_program(), + feed={'data':x_ndarray}, + fetch_list=[output]) + print(res) + ''' + Output Value: + [1.] + ''' diff --git a/doc/paddle/api/paddle/fluid/layers/merge_selected_rows_cn.rst b/doc/paddle/api/paddle/fluid/layers/merge_selected_rows_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d44783ca46c84322e25a4d16a767663ccad95d25 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/merge_selected_rows_cn.rst @@ -0,0 +1,68 @@ +.. _cn_api_fluid_layers_merge_selected_rows: + +merge_selected_rows +------------------------------- + +.. py:function:: paddle.fluid.layers.merge_selected_rows(x, name=None) + + + + +累加合并 `SelectedRows `_ ( ``x`` ) 中的重复行,并对行值由小到大重新排序。 + +参数: + - x (Variable) : 类型为 SelectedRows,选中行允许重复。 + - name (basestring|None) : 输出变量名称。 + +返回: + - 含有 SelectedRows 的 Variable,选中行不重复。 + +返回类型: + - Variable(变量)。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + + place = fluid.CPUPlace() + block = fluid.default_main_program().global_block() + + var = block.create_var(name="X2", + dtype="float32", + persistable=True, + type=fluid.core.VarDesc.VarType.SELECTED_ROWS) + y = fluid.layers.merge_selected_rows(var) + z = fluid.layers.get_tensor_from_selected_rows(y) + + x_rows = [0, 2, 2, 4, 19] + row_numel = 2 + np_array = numpy.ones((len(x_rows), row_numel)).astype("float32") + + x = fluid.global_scope().var("X2").get_selected_rows() + x.set_rows(x_rows) + x.set_height(20) + x_tensor = x.get_tensor() + x_tensor.set(np_array, place) + + exe = fluid.Executor(place=place) + result = exe.run(fluid.default_main_program(), fetch_list=[z]) + + print("x_rows: ", x_rows) + print("np_array: ", np_array) + print("result: ", result) + ''' + Output Values: + ('x_rows: ', [0, 2, 2, 4, 19]) + ('np_array: ', array([[1., 1.], + [1., 1.], + [1., 1.], + [1., 1.], + [1., 1.]], dtype=float32)) + ('result: ', [array([[1., 1.], + [2., 2.], + [1., 1.], + [1., 1.]], dtype=float32)]) + ''' diff --git a/doc/paddle/api/paddle/fluid/layers/mse_loss_cn.rst b/doc/paddle/api/paddle/fluid/layers/mse_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5c3b5e0dafe5d6e7159784929633292ca1003136 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/mse_loss_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_fluid_layers_mse_loss: + +mse_loss +------------------------------- + +.. py:function:: paddle.fluid.layers.mse_loss(input,label) + + + + +该OP用于计算预测值和目标值的均方差误差。 + +对于预测值input和目标值label,公式为: + +.. math:: + + Out = MEAN((input-label)^{2}) + +参数: + - **input** (Variable) - 预测值,维度为 :math:`[N_1, N_2, ..., N_k, D]` 的多维Tensor,其中最后一维D是类别数目。数据类型为float32或float64。 + - **label** (Variable) - 目标值,维度为 :math:`[N_1, N_2, ..., N_k, D]` 的多维Tensor,其中最后一维D是类别数目。数据类型为float32或float64。 + +返回:预测值和目标值的均方差 + +返回类型:变量(Variable) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + y = fluid.data(name='y', shape=[1], dtype='float32') + y_predict = fluid.data(name='y_predict', shape=[1], dtype='float32') + cost = fluid.layers.mse_loss(input=y_predict, label=y) + + + diff --git a/doc/paddle/api/paddle/fluid/layers/mul_cn.rst b/doc/paddle/api/paddle/fluid/layers/mul_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4ce54f1a02584347177f65889ec546d5352cff50 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/mul_cn.rst @@ -0,0 +1,46 @@ +.. _cn_api_fluid_layers_mul: + +mul +------------------------------- + +.. py:function:: paddle.fluid.layers.mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None) + + + + +mul算子 +此运算是用于对输入x和y执行矩阵乘法。 +公式是: + +.. math:: + Out = x * y + +输入x和y都可以携带LoD(详细程度)信息。但输出仅与输入x共享LoD信息。 + +参数: + - **x** (Variable) - 乘法运算的第一个输入张量Tensor/LoDTensor。 + - **y** (Variable) - 乘法运算的第二个输入张量Tensor/LoDTensor。 + - **x_num_col_dims** (int,可选) - 默认值1, 可以将具有两个以上维度的张量作为输入。如果输入x是具有多于两个维度的张量,则输入x将先展平为二维矩阵。展平规则是:前 ``num_col_dims`` 将被展平成最终矩阵的第一个维度(矩阵的高度),其余的 rank(x) - num_col_dims 维度被展平成最终矩阵的第二个维度(矩阵的宽度)。结果是展平矩阵的高度等于x的前 ``x_num_col_dims`` 维数的乘积,展平矩阵的宽度等于x的最后一个 rank(x)- ``num_col_dims`` 个剩余维度的维数的乘积。例如,假设x是一个5-D张量,形状为(2,3,4,5,6),并且 ``x_num_col_dims`` 的值为3。 则扁平化后的张量具有的形即为(2x3x4,5x6)=(24,30)。 + - **y_num_col_dims** (int,可选) - 默认值1, 可以将具有两个以上维度的张量作为输入。如果输入y是具有多于两个维度的张量,则y将首先展平为二维矩阵。 ``y_num_col_dims`` 属性确定y的展平方式。有关更多详细信息,请参阅 ``x_num_col_dims`` 的注释。 + - **name** (str,可选) - 输出的名称。该参数供开发人员打印调试信息时使用,具体用法参见 :ref:`api_guide_name`,默认为:None。 + +返回:Variable(Tensor)乘法运算输出张量。 + +返回类型:变量(Variable)。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + dataX = fluid.layers.data(name="dataX", append_batch_size = False, shape=[2, 5], dtype="float32") + dataY = fluid.layers.data(name="dataY", append_batch_size = False, shape=[5, 3], dtype="float32") + output = fluid.layers.mul(dataX, dataY, + x_num_col_dims = 1, + y_num_col_dims = 1) + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/multi_box_head_cn.rst b/doc/paddle/api/paddle/fluid/layers/multi_box_head_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fb65e307f425b8112c39ccc1d39b5b214366ccb6 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/multi_box_head_cn.rst @@ -0,0 +1,111 @@ +.. _cn_api_fluid_layers_multi_box_head: + +multi_box_head +------------------------------- + + +.. py:function:: paddle.fluid.layers.multi_box_head(inputs, image, base_size, num_classes, aspect_ratios, min_ratio=None, max_ratio=None, min_sizes=None, max_sizes=None, steps=None, step_w=None, step_h=None, offset=0.5, variance=[0.1, 0.1, 0.2, 0.2], flip=True, clip=False, kernel_size=1, pad=0, stride=1, name=None, min_max_aspect_ratios_order=False) + + + + +基于SSD(Single Shot MultiBox Detector)算法,在不同层输入特征上提取先验框、计算回归的坐标位置和分类的置信度,并合并到一起作为输出,具体参数解释和输出格式参考下面说明。更详细信息,请参阅SSD论文 `SSD:Single Shot MultiBox Detector `_ 的2.2节。 + +参数: + - **inputs** (list(Variable) | tuple(Variable)) - 输入特征的列表,仅支持格式为NCHW的4-D Tensor。 + - **image** (Variable) - 一般是网络输入的图像数据,仅支持NCHW格式。 + - **base_size** (int) - 输入图片的大小,当输入个数len(inputs) > 2,并且 ``min_size`` 和 ``max_size`` 为None时,通过 ``baze_size``, ``min_ratio`` 和 ``max_ratio`` 来计算出 ``min_size`` 和 ``max_size`` 。计算公式如下: + + .. code-block:: python + + min_sizes = [] + max_sizes = [] + step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2))) + for ratio in six.moves.range(min_ratio, max_ratio + 1, step): + min_sizes.append(base_size * ratio / 100.) + max_sizes.append(base_size * (ratio + step) / 100.) + min_sizes = [base_size * .10] + min_sizes + max_sizes = [base_size * .20] + max_sizes + + - **num_classes** (int) - 类别数。 + - **aspect_ratios** (list(float) | tuple(float) | list(list(float)) | tuple(tuple(float)) - 候选框的宽高比, ``aspect_ratios`` 和 ``input`` 的个数必须相等。如果每个特征层提取先验框的 ``aspect_ratio`` 多余一个,写成嵌套的list,例如[[2., 3.]]。 + - **min_ratio** (int)- 先验框的长度和 ``base_size`` 的最小比率,注意,这里是百分比,假如比率为0.2,这里应该给20.0。默认值: None。 + - **max_ratio** (int)- 先验框的长度和 ``base_size`` 的最大比率,注意事项同 ``min_ratio`` 。默认值: None。 + - **min_sizes** (list(float) | tuple(float) | None)- 每层提取的先验框的最小长度,如果输入个数len(inputs)<= 2,则必须设置 ``min_sizes`` ,并且 ``min_sizes`` 的个数应等于len(inputs)。默认值:None。 + - **max_sizes** (list | tuple | None)- 每层提取的先验框的最大长度,如果len(inputs)<= 2,则必须设置 ``max_sizes`` ,并且 ``min_sizes`` 的长度应等于len(inputs)。默认值:None。 + - **steps** (list(float) | tuple(float)) - 相邻先验框的中心点步长 ,如果在水平和垂直方向上步长相同,则设置steps即可,否则分别通过step_w和step_h设置不同方向的步长。如果 ``steps``, ``ste_w`` 和 ``step_h`` 均为None,步长为输入图片的大小 ``base_size`` 和特征图大小的比例。默认值:None。 + - **step_w** (list(float)| tuple(float)) - 水平方向上先验框中心点步长。默认值:None。 + - **step_h** (list | tuple) - 垂直方向上先验框中心点步长。默认值:None。 + - **offset** (float) - 左上角先验框中心在水平和垂直方向上的偏移。默认值:0.5 + - **variance** (list | tuple) - 先验框的方差。默认值:[0.1,0.1,0.2,0.2]。 + - **flip** (bool) - 是否翻转宽高比。默认值:False。 + - **clip** (bool) - 是否剪切超出边界的框。默认值:False。 + - **kernel_size** (int) - 计算回归位置和分类置信度的卷积核的大小。默认值:1。 + - **pad** (int | list(int) | tuple(int)) - 计算回归位置和分类置信度的卷积核的填充。默认值:0。 + - **stride** (int | list | tuple) - 计算回归位置和分类置信度的卷积核的步长。默认值:1。 + - **name** (str) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **min_max_aspect_ratios_order** (bool) - 如果设置为True,则输出先验框的顺序为[min,max,aspect_ratios],这与Caffe一致。请注意,此顺序会影响卷积层后面的权重顺序,但不会影响最终检测结果。默认值:False。 + +返回: + - **mbox_loc(Variable)** - 预测框的回归位置。格式为[N,num_priors,4],其中 ``N`` 是batch size, ``num_priors`` 是总共提取的先验框的个数。 + - **mbox_conf(Variable)** - 预测框的分类信度。格式为[N,num_priors,C],其中 ``num_priors`` 同上,C是类别数。 + - **boxes(Variable)** - 提取的先验框。布局是[num_priors,4], ``num_priors`` 同上,常量4是坐标个数。 + - **variances(Variable)** - 提取的先验框方差。布局是[num_priors,4], ``num_priors`` 同上。 + +返回类型:list(Variable) | tuple(Variable) + +**代码示例1: 设置min_ratio和max_ratio** + +.. code-block:: python + + import paddle.fluid as fluid + + images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32') + conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32') + conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32') + conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32') + conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32') + conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32') + conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32') + + mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head( + inputs=[conv1, conv2, conv3, conv4, conv5, conv6], + image=images, + num_classes=21, + min_ratio=20, + max_ratio=90, + aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]], + base_size=300, + offset=0.5, + flip=True, + clip=True) + + +**代码示例2: 设置min_sizes和max_sizes** + +.. code-block:: python + + import paddle.fluid as fluid + + images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32') + conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32') + conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32') + conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32') + conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32') + conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32') + conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32') + + mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head( + inputs=[conv1, conv2, conv3, conv4, conv5, conv6], + image=images, + num_classes=21, + min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0], + max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0], + aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]], + base_size=300, + offset=0.5, + flip=True, + clip=True) + + + diff --git a/doc/paddle/api/paddle/fluid/layers/multiclass_nms_cn.rst b/doc/paddle/api/paddle/fluid/layers/multiclass_nms_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..57ac4ab0175890fd6345085b8fe85c32dd080946 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/multiclass_nms_cn.rst @@ -0,0 +1,82 @@ +.. _cn_api_fluid_layers_multiclass_nms: + +multiclass_nms +------------------------------- + +.. py:function:: paddle.fluid.layers.multiclass_nms(bboxes, scores, score_threshold, nms_top_k, keep_top_k, nms_threshold=0.3, normalized=True, nms_eta=1.0, background_label=0, name=None) + + + + +**多分类NMS** + +该OP用于对边界框(bounding box)和评分(scores)执行多类非极大值抑制(NMS)。 + +在NMS中,如果提供 ``score_threshold`` 阈值,则此OP贪心地选择所有得分(scores)高于 ``score_threshold`` 的检测边界框(bounding box)的子集,如果nms_top_k大于-1,则选择最大的nms_top_k置信度分数。 接着,该OP依据 adaptive nms(基于 ``nms_threshold`` 和 ``nms_eta``),删除与已选择的框IOU(交并比)高于nms_threshold 的重叠框。 + +在NMS步骤后,如果keep_top_k大于-1,则每个图像最多保留keep_top_k个框(bounding box)。 + +计算示例: + .. code-block:: text + + 如果: + box1.data = (2.0, 3.0, 7.0, 5.0) 数据格式为 (xmin, ymin, xmax, ymax) + box1.scores = (0.7, 0.2, 0.4) 其中 (label0.score=0.7, label1.score=0.2, label2.cores=0.4) + box2.data = (3.0, 4.0, 8.0, 5.0) + box2.score = (0.3, 0.3, 0.1) + nms_threshold = 0.3 + background_label = 0 + score_threshold = 0 + 则: + iou = 4/11 > 0.3 + out.data = [[1, 0.3, 3.0, 4.0, 8.0, 5.0], + [2, 0.4, 2.0, 3.0, 7.0, 5.0]] + + 输出数据格式为 (label, confidence, xmin, ymin, xmax, ymax) + + + +参数: + - **bboxes** (Variable) – 支持两种类型的边界框(bounding box): + + 1. (Tensor)形为[N,M,4 或 8、16、24、32]的3-D张量,表示将预测M个边界框的预测位置, N是批大小(batch size)。当边界框(bounding box)大小等于4时,每个边界框有四个坐标值,布局为[xmin,ymin,xmax,ymax]。数据类型为float32或float64。 + 2. (LoDTensor)形状为[M,C,4] M是边界框的个数,C是类别个数。数据类型为float32或float64 + + - **scores** (Variable) – 支持两种类型的分数: + + 1. (Tensor)具有形状[N,C,M]的3-D张量表示预测的置信度。 N是批量大小 batch size,C是种类数目,M是边界框bounding box的数量。对于每个类别,存在对应于M个边界框的总M个分数。请注意,M等于bboxes的第二维。数据类型为float32或float64。 + 2. (LoDTensor)具有形状[M,C]的2-D LoDTensor。 M是bbox的数量,C是种类数目。在这种情况下,输入bboxes应该是形为[M,C,4]的第二种情况。数据类型为float32或float64。 + + - **background_label** (int) – 背景标签(类别)的索引,如果设置为 0 ,则忽略背景标签(类别)。如果设置为 -1 ,则考虑所有类别。默认值:0 + - **score_threshold** (float) – 过滤掉低置信度分数的边界框的阈值。如果没有提供,请考虑所有边界框。 + - **nms_top_k** (int) – 基于 score_threshold 的过滤检测后,根据置信度保留的最大检测次数。 + - **nms_threshold** (float) – 在NMS中用于剔除检测框IOU的阈值,默认值:0.3 。 + - **nms_eta** (float) – 在NMS中用于调整 nms_threshold 的参数,设为1时表示nms_threshold不变。默认值:1.0 。 + - **keep_top_k** (int) – NMS步骤后每个图像要保留的总bbox数。 -1表示在NMS步骤之后保留所有bbox。 + - **normalized** (bool) – 检测是否已经经过正则化。默认值:True 。 + - **name** (str|None) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 + +返回:形为[No,6]的2-D LoDTensor,表示检测(detections)结果。每行有6个值:[标签label,置信度confidence,xmin,ymin,xmax,ymax]。或形为[No,10]的2-D LoDTensor,用来表示检测结果。 每行有10个值:[标签label,置信度confidence,x1,y1,x2,y2,x3,y3,x4,y4]。 No是检测的总数。 如果对所有图像都没有检测到的box,则lod将设置为{1},而Out仅包含一个值-1。 (1.3版本之后,当未检测到box时,lod从{0}更改为{1}) + +返回类型:Variable,数据类型与输入一致。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + boxes = fluid.layers.data(name='bboxes', shape=[81, 4], + dtype='float32', lod_level=1) + scores = fluid.layers.data(name='scores', shape=[81], + dtype='float32', lod_level=1) + out = fluid.layers.multiclass_nms(bboxes=boxes, + scores=scores, + background_label=0, + score_threshold=0.5, + nms_top_k=400, + nms_threshold=0.3, + keep_top_k=200, + normalized=False) + + + diff --git a/doc/paddle/api/paddle/fluid/layers/multiplex_cn.rst b/doc/paddle/api/paddle/fluid/layers/multiplex_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fcc44a78105b6ea8270311729add4120fa4776fc --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/multiplex_cn.rst @@ -0,0 +1,73 @@ +.. _cn_api_fluid_layers_multiplex: + +multiplex +------------------------------- + +.. py:function:: paddle.fluid.layers.multiplex(inputs, index) + + + + +根据给定的index参数,该OP从每个输入Tensor中选择特定行构造输出Tensor。 + +设该OP输入包含 :math:`m` 个Tensor,其中 :math:`I_{i}` 代表第i个输入Tensor,:math:`i` 处于区间 :math:`[0,m)`。 + +设该OP输出为 :math:`O` ,其中 :math:`O[i]` 为输出的第i行,则输出满足: :math:`O[i] = I_{index[i]}[i]` + +示例: + +.. code-block:: text + + # 输入为4个shape为[4,4]的Tensor + inputs = [[[0,0,3,4], [0,1,3,4], [0,2,4,4], [0,3,3,4]], + [[1,0,3,4], [1,1,7,8], [1,2,4,2], [1,3,3,4]], + [[2,0,3,4], [2,1,7,8], [2,2,4,2], [2,3,3,4]], + [[3,0,3,4], [3,1,7,8], [3,2,4,2], [3,3,3,4]]] + + # index为shape为[4,1]的Tensor + index = [[3],[0],[1],[2]] + + # 输出shape为[4,4] + out = [[3,0,3,4] // out[0] = inputs[index[0]][0] = inputs[3][0] = [3,0,3,4] + [0,1,3,4] // out[1] = inputs[index[1]][1] = inputs[0][1] = [0,1,3,4] + [1,2,4,2] // out[2] = inputs[index[2]][2] = inputs[1][2] = [1,2,4,2] + [2,3,3,4]] // out[3] = inputs[index[3]][3] = inputs[2][3] = [2,3,3,4] + +参数: + - **inputs** (list) - 为输入Tensor列表,列表元素为数据类型为float32,float64,int32,int64的多维Tensor。所有输入Tensor的shape应相同,秩必须至少为2。 + - **index** (Variable)- 用来选择输入Tensor中的某些行构建输出Tensor的索引,为数据类型为int32或int64、shape为[M, 1]的2-D Tensor,其中M为输入Tensor个数。 + +返回:进行Multiplex运算后的输出Tensor。 + +返回类型:Variable(Tensor)。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + x1 = fluid.layers.data(name='x1', shape=[4], dtype='float32') + x2 = fluid.layers.data(name='x2', shape=[4], dtype='float32') + index = fluid.layers.data(name='index', shape=[1], dtype='int32') + out = fluid.layers.multiplex(inputs=[x1, x2], index=index) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + img1 = np.array([[1, 2], [3, 4]]).astype(np.float32) + img2 = np.array([[5, 6], [7, 8]]).astype(np.float32) + index = np.array([[1], [0]]).astype(np.int32) + + res = exe.run(fluid.default_main_program(), feed={'x1':img1, 'x2':img2, 'index':index}, fetch_list=[out]) + print(res) # [array([[5., 6.], [3., 4.]], dtype=float32)] + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/natural_exp_decay_cn.rst b/doc/paddle/api/paddle/fluid/layers/natural_exp_decay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c3ecb177a1b65cce3c62b568422d45cdbfd7c710 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/natural_exp_decay_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_fluid_layers_natural_exp_decay: + +natural_exp_decay +------------------------------- + +.. py:function:: paddle.fluid.layers.natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False) + + + + +将自然指数衰减运用到初始学习率上。 + +训练模型时,在训练过程中降低学习率。 自然指数衰减使用自然指数来计算衰减倍率,每 ``decay_steps`` 步衰减倍率的自然指数幂次项上增加 ``decay_rate`` 。 + +自然指数学习率衰减计算方式如下。 + +.. code-block:: python + + if not staircase: + decayed_learning_rate = learning_rate * exp(- decay_rate * (global_step / decay_steps)) + else: + decayed_learning_rate = learning_rate * exp(- decay_rate * floor(global_step / decay_steps)) + +参数: + - **learning_rate** (Variable|float) - 初始学习率,类型可以为学习率变量(Variable)或float型常量。 + - **decay_steps** (int) - 学习率衰减步长,见以上衰减运算。 + - **decay_rate** (float) - 学习率衰减率。见以上衰减运算。 + - **staircase** (bool) - 若为True,按离散区间衰减学习率,即每 ``decay_steps`` 步衰减倍率的自然指数幂次项上增加 ``decay_rate`` 。若为False,则按以上衰减运算持续衰减。默认False。 + +返回:Variable(Tensor) 随step衰减的学习率变量,维度为 :math:`[1]` 的1-D Tensor。 + +返回类型:变量(Variable) + +**示例代码:** + +.. code-block:: python + + import paddle.fluid as fluid + base_lr = 0.1 + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=fluid.layers.natural_exp_decay( + learning_rate=base_lr, + decay_steps=10000, + decay_rate=0.5, + staircase=True)) + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/nce_cn.rst b/doc/paddle/api/paddle/fluid/layers/nce_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..abbafb324758f762a4201a4bb17ff935cc1f185f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/nce_cn.rst @@ -0,0 +1,77 @@ +.. _cn_api_fluid_layers_nce: + +nce +------------------------------- + + +.. py:function:: paddle.fluid.layers.nce(input, label, num_total_classes, sample_weight=None, param_attr=None, bias_attr=None, num_neg_samples=None, name=None, sampler='uniform', custom_dist=None, seed=0, is_sparse=False) + + + + +计算并返回噪音对比估计损失值( noise-contrastive estimation training loss)。 +请参考 `Noise-contrastive estimation: A new estimation principle for unnormalized statistical models +`_ +该层默认使用均匀分布进行抽样。 + +参数: + - **input** (Variable) - 输入变量, 2-D 张量,形状为 [batch_size, dim],数据类型为 float32 或者 float64。 + - **label** (Variable) - 标签,2-D 张量,形状为 [batch_size, num_true_class],数据类型为 int64。 + - **num_total_classes** (int) - 所有样本中的类别的总数。 + - **sample_weight** (Variable,可选) - 存储每个样本权重,shape 为 [batch_size, 1] 存储每个样本的权重。每个样本的默认权重为1.0。 + - **param_attr** (ParamAttr,可选) :指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr,可选) : 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **num_neg_samples** (int) - 负样例的数量,默认值是10。 + - **name** (str,可选) - 该layer的名称,具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **sampler** (str,可选) – 采样器,用于从负类别中进行取样。可以是 ``uniform``, ``log_uniform`` 或 ``custom_dist`` , 默认 ``uniform`` 。 + - **custom_dist** (nd.array, 可选) – 第0维的长度为 ``num_total_classes`` 。 如果采样器类别为 ``custom_dist`` ,则使用此参数。custom_dist[i] 是第i个类别被取样的概率。默认为 None + - **seed** (int,可选) – 采样器使用的seed。默认为0 + - **is_sparse** (bool,可选) – 标志位,指明是否使用稀疏更新, 为 ``True`` 时 :math:`weight@GRAD` 和 :math:`bias@GRAD` 的类型会变为 SelectedRows。默认为 ``False`` 。 + +返回: nce loss,数据类型与 **input** 相同 + +返回类型: Variable + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + window_size = 5 + words = [] + for i in range(window_size): + words.append(fluid.data( + name='word_{0}'.format(i), shape=[-1, 1], dtype='int64')) + + dict_size = 10000 + label_word = int(window_size / 2) + 1 + + embs = [] + for i in range(window_size): + if i == label_word: + continue + + emb = fluid.layers.embedding(input=words[i], size=[dict_size, 32], + param_attr='embed', is_sparse=True) + embs.append(emb) + + embs = fluid.layers.concat(input=embs, axis=1) + loss = fluid.layers.nce(input=embs, label=words[label_word], + num_total_classes=dict_size, param_attr='nce.w_0', + bias_attr='nce.b_0') + + #or use custom distribution + dist = np.array([0.05,0.5,0.1,0.3,0.05]) + loss = fluid.layers.nce(input=embs, label=words[label_word], + num_total_classes=5, param_attr='nce.w_1', + bias_attr='nce.b_1', + num_neg_samples=3, + sampler="custom_dist", + custom_dist=dist) + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/nn/py_func_cn.rst b/doc/paddle/api/paddle/fluid/layers/nn/py_func_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b6444f1a5735d9489932739806a67092ce34c73b --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/nn/py_func_cn.rst @@ -0,0 +1,132 @@ +.. _cn_api_fluid_layers_py_func: + +py_func +------------------------------- + + +.. py:function:: paddle.fluid.layers.py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None) + + + + +PaddlePaddle Fluid通过py_func在Python端注册OP。py_func的设计原理在于Paddle中的LodTensor与numpy数组可以方便的互相转换,从而可使用Python中的numpy API来自定义一个Python OP。 + +该自定义的Python OP的前向函数是 ``func``, 反向函数是 ``backward_func`` 。 Paddle将在前向部分调用 ``func`` ,并在反向部分调用 ``backward_func`` (如果 ``backward_func`` 不是None)。 ``x`` 为 ``func`` 的输入,必须为LoDTensor类型; ``out`` 为 ``func`` 的输出, 既可以是LoDTensor类型, 也可以是numpy数组。 + +反向函数 ``backward_func`` 的输入依次为:前向输入 ``x`` 、前向输出 ``out`` 、 ``out`` 的梯度。 如果 ``out`` 的某些变量没有梯度,则 ``backward_func`` 的相关输入变量为None。如果 ``x`` 的某些变量没有梯度,则用户应在 ``backward_func`` 中主动返回None。 + +在调用该接口之前,还应正确设置 ``out`` 的数据类型和形状,而 ``out`` 和 ``x`` 对应梯度的数据类型和形状将自动推断而出。 + +此功能还可用于调试正在运行的网络,可以通过添加没有输出的 ``py_func`` 运算,并在 ``func`` 中打印输入 ``x`` 。 + +参数: + - **func** (callable) - 所注册的Python OP的前向函数,运行网络时,将根据该函数与前向输入 ``x`` ,计算前向输出 ``out`` 。 在 ``func`` 建议先主动将LoDTensor转换为numpy数组,方便灵活的使用numpy相关的操作,如果未转换成numpy,则可能某些操作无法兼容。 + - **x** (Variable|tuple(Variable)|list[Variale]) - 前向函数 ``func`` 的输入,多个LoDTensor以tuple(Variable)或list[Variale]的形式传入,其中Variable为LoDTensor或Tenosr。 + - **out** (Variable|tuple(Variable)|list[Variale]) - 前向函数 ``func`` 的输出,可以为Variable|tuple(Variable)|list[Variale],其中Variable既可以为LoDTensor或Tensor,也可以为numpy数组。由于Paddle无法自动推断 ``out`` 的形状和数据类型,必须应事先创建 ``out`` 。 + - **backward_func** (callable,可选) - 所注册的Python OP的反向函数。默认值为None,意味着没有反向计算。若不为None,则会在运行网络反向时调用 ``backward_func`` 计算 ``x`` 的梯度。 + - **skip_vars_in_backward_input** (Variable,可选) - ``backward_func`` 的输入中不需要的变量,可以是Variable|tuple(Variable)|list[Variale]。 这些变量必须是 ``x`` 和 ``out`` 中的一个。默认值为None,意味着没有变量需要从 ``x`` 和 ``out`` 中去除。若不为None,则这些变量将不是 ``backward_func`` 的输入。该参数仅在 ``backward_func`` 不为None时有用。 + +返回: 前向函数的输出 ``out`` + +返回类型: Variable|tuple(Variable)|list[Variable] + +**示例代码1**: + +.. code-block:: python + + import paddle.fluid as fluid + import six + + # 自定义的前向函数,可直接输入LoDTenosor + def tanh(x): + return np.tanh(x) + + # 在反向函数中跳过前向输入x,返回x的梯度。 + # 必须使用np.array主动将LodTensor转换为numpy,否则"+/-"等操作无法使用 + def tanh_grad(y, dy): + return np.array(dy) * (1 - np.square(np.array(y))) + + # 自定义的前向函数,可用于调试正在运行的网络(打印值) + def debug_func(x): + print(x) + + def create_tmp_var(name, dtype, shape): + return fluid.default_main_program().current_block().create_var( + name=name, dtype=dtype, shape=shape) + + def simple_net(img, label): + hidden = img + for idx in six.moves.range(4): + hidden = fluid.layers.fc(hidden, size=200) + new_hidden = create_tmp_var(name='hidden_{}'.format(idx), + dtype=hidden.dtype, shape=hidden.shape) + + # 用户自定义的前向反向计算 + hidden = fluid.layers.py_func(func=tanh, x=hidden, + out=new_hidden, backward_func=tanh_grad, + skip_vars_in_backward_input=hidden) + + # 用户自定义的调试函数,打印出输入的LodTensor + fluid.layers.py_func(func=debug_func, x=hidden, out=None) + + prediction = fluid.layers.fc(hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + return fluid.layers.mean(loss) + +**示例代码2**: + +.. code-block:: python + + # 该示例展示了如何将LoDTensor转化为numpy数组,并利用numpy API来自定义一个OP + import paddle.fluid as fluid + import numpy as np + + def element_wise_add(x, y): + # 必须先手动将LodTensor转换为numpy数组,否则无法支持numpy的shape操作 + x = np.array(x) + y = np.array(y) + + if x.shape != y.shape: + raise AssertionError("the shape of inputs must be the same!") + + result = np.zeros(x.shape, dtype='int32') + for i in range(len(x)): + for j in range(len(x[0])): + result[i][j] = x[i][j] + y[i][j] + + return result + + def create_tmp_var(name, dtype, shape): + return fluid.default_main_program().current_block().create_var( + name=name, dtype=dtype, shape=shape) + + def py_func_demo(): + start_program = fluid.default_startup_program() + main_program = fluid.default_main_program() + + # 创建前向函数的输入变量 + x = fluid.data(name='x', shape=[2,3], dtype='int32') + y = fluid.data(name='y', shape=[2,3], dtype='int32') + + # 创建前向函数的输出变量,必须指明变量名称name/数据类型dtype/维度shape + output = create_tmp_var('output','int32', [3,1]) + + # 输入多个LodTensor以list[Variable]或tuple(Variable)形式 + fluid.layers.py_func(func=element_wise_add, x=[x,y], out=output) + + exe=fluid.Executor(fluid.CPUPlace()) + exe.run(start_program) + + # 给program喂入numpy数组 + input1 = np.random.randint(1, 10, size=[2,3], dtype='int32') + input2 = np.random.randint(1, 10, size=[2,3], dtype='int32') + out = exe.run(main_program, + feed={'x':input1, 'y':input2}, + fetch_list=[output.name]) + print("{0} + {1} = {2}".format(input1, input2, out)) + + py_func_demo() + + # 参考输出: + # [[5, 9, 9] + [[7, 8, 4] = [array([[12, 17, 13] + # [7, 5, 2]] [1, 3, 3]] [8, 8, 5]], dtype=int32)] diff --git a/doc/paddle/api/paddle/fluid/layers/noam_decay_cn.rst b/doc/paddle/api/paddle/fluid/layers/noam_decay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d769630deea09ac0c5ef920d9c5760d7f7367f03 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/noam_decay_cn.rst @@ -0,0 +1,56 @@ +.. _cn_api_fluid_layers_noam_decay: + +noam_decay +------------------------------- + +.. py:function:: paddle.fluid.layers.noam_decay(d_model, warmup_steps) + + + + +Noam衰减方法 + +noam衰减的numpy实现如下: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + # 设置超参数 + base_lr = 0.01 + d_model = 2 + current_steps = 20 + warmup_steps = 200 + # 计算 + lr_value = base_lr * np.power(d_model, -0.5) * np.min([ + np.power(current_steps, -0.5), + np.power(warmup_steps, -1.5) * current_steps]) + +请参照 `attention is all you need `_ + +参数: + - **d_model** (Variable|int) - 模型的输入、输出向量特征维度。类型可设置为标量Tensor,或int值。 + - **warmup_steps** (Variable|int) - 预热步数,类型可设置为标量Tensor,或int值。 + - **learning_rate** (Variable|float|int,可选) - 初始学习率。如果类型为Variable,则为shape为[1]的Tensor,数据类型为float32或float64;也可以是python的int类型。默认值为1.0。 + +返回:衰减的学习率 + +返回类型: Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + warmup_steps = 100 + learning_rate = 0.01 + lr = fluid.layers.learning_rate_scheduler.noam_decay( + 1/(warmup_steps *(learning_rate ** 2)), + warmup_steps, + learning_rate) + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/not_equal_cn.rst b/doc/paddle/api/paddle/fluid/layers/not_equal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7118715a21622b58ff310c38ce60f5ae80fa35b7 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/not_equal_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_fluid_layers_not_equal: + +not_equal +------------------------------- + +.. py:function:: paddle.fluid.layers.not_equal(x, y, cond=None, name=None) + + + + +该OP逐元素地返回 :math:`x != y` 的逻辑值,使用重载算子 `!=` 可以有相同的计算函数效果。 + +参数: + - **x** (Variable) – 进行比较的第一个输入,是一个多维的Tensor,数据类型可以是float32,float64,int32,int64。 + - **y** (Variable) – 进行比较的第二个输入,是一个多维的Tensor,数据类型可以是float32,float64,int32,int64。 + - **cond** (Variable,可选) – 如果为None,则创建一个Tensor来作为进行比较的输出结果,该Tensor的shape和数据类型和输入x一致;如果不为None,则将Tensor作为该OP的输出,数据类型和数据shape需要和输入x一致。默认值为None。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 +返回:输出结果的Tensor,数据的shape和输入x一致。 + +返回类型:变量(Variable),数据类型为bool类型。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + import numpy as np + label = layers.assign(np.array([2, 3], dtype='int32')) + limit = layers.assign(np.array([3, 2], dtype='int32')) + out = fluid.layers.not_equal(x=label, y=limit) #out=[True, True] + out1 = label != limit #out1=[True, True] + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/npair_loss_cn.rst b/doc/paddle/api/paddle/fluid/layers/npair_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..feaeff4a6d11def1f43339e37a9962a6451726f1 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/npair_loss_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_fluid_layers_npair_loss: + +npair_loss +------------------------------- + +.. py:function:: paddle.fluid.layers.npair_loss(anchor, positive, labels, l2_reg=0.002) + + + + +**Npair Loss Layer** + +参考阅读 `Improved Deep Metric Learning with Multi class N pair Loss Objective `_ + +NPair损失需要成对的数据。NPair损失分为两部分:第一部分是对嵌入向量进行L2正则化;第二部分是每一对数据的相似性矩阵的每一行和映射到ont-hot之后的标签的交叉熵损失的和。 + +参数: + - **anchor** (Variable) - 锚点图像的嵌入Tensor,形状为[batch_size, embedding_dims]的2-D Tensor。数据类型:float32和float64。 + - **positive** (Variable) - 正例图像的嵌入Tensor,形状为[batch_size, embedding_dims]的2-D Tensor。数据类型:float32和float64。 + - **labels** (Variable) - 标签向量,形状为[batch_size]的1-DTensor。数据类型:float32、float64和int64。 + - **l2_reg** (float) - 嵌入向量的L2正则化系数,默认:0.002。 + +返回: Tensor。经过npair loss计算之后的结果,是一个值。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + anchor = fluid.layers.data( + name = 'anchor', shape = [18, 6], dtype = 'float32', append_batch_size=False) + positive = fluid.layers.data( + name = 'positive', shape = [18, 6], dtype = 'float32', append_batch_size=False) + labels = fluid.layers.data( + name = 'labels', shape = [18], dtype = 'float32', append_batch_size=False) + + res = fluid.layers.npair_loss(anchor, positive, labels, l2_reg = 0.002) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + a = np.random.rand(18, 6).astype("float32") + p = np.random.rand(18, 6).astype("float32") + l = np.random.rand(18).astype("float32") + output = exe.run(feed={"anchor": a, "positive": p, "labels": l}, fetch_list=[res]) + print(output) + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/one_hot_cn.rst b/doc/paddle/api/paddle/fluid/layers/one_hot_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2aaaf117e6fc9dc5ab26032db91bef286f14e567 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/one_hot_cn.rst @@ -0,0 +1,80 @@ +.. _cn_api_fluid_layers_one_hot: + +one_hot +------------------------------- + +.. py:function:: paddle.fluid.layers.one_hot(input, depth, allow_out_of_range=False) + + + + +**注意:此OP要求输入Tensor shape的最后一维必须为1。此OP将在未来的版本中被移除!推荐使用fluid.** :ref:`cn_api_fluid_one_hot` 。 + +该OP将输入(input)中的每个id转换为一个one-hot向量,其长度为 ``depth`` ,该id对应的向量维度上的值为1,其余维度的值为0。 + +输出的Tensor(或LoDTensor)的shape是将输入shape的最后一维替换为depth的维度。 + +- 示例1(allow_out_of_range=False): + +.. code-block:: python + + 输入: + X.shape = [4, 1] + X.data = [[1], [1], [3], [0]] + depth = 4 + + 输出: + Out.shape = [4, 4] + Out.data = [[0., 1., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 1.], + [1., 0., 0., 0.]] + +- 示例2 (allow_out_of_range=True): + +.. code-block:: python + + 输入: + X.shape = [4, 1] + X.data = [[1], [1], [5], [0]] + depth = 4 + allow_out_of_range=True + + 输出: + Out.shape = [4, 4] + Out.data = [[0., 1., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 0.], ## 这一维的值是5,超过了depth,因此填成0 + [1., 0., 0., 0.]] + +- 示例3 (allow_out_of_range=False): + +.. code-block:: python + + 输入: + X.shape = [4, 1] + X.data = [[1], [1], [5], [0]] + depth = 4 + allow_out_of_range=False + + 输出:抛出 Illegal value 的异常 + X中第2维的值是5,超过了depth,而allow_out_of_range=False表示不允许超过,因此抛异常。 + + +参数: + - **input** (Variable) - 维度为 :math:`[N_1, ..., N_n, 1]` 的多维Tensor或LoDTensor,维度至少两维,且最后一维必须是1。数据类型为int32或int64。 + - **depth** (int) - 用于定义一个one-hot向量的长度。若输入为词id,则 ``depth`` 通常取值为词典大小。 + - **allow_out_of_range** (bool) - 指明input中所包含的id值是否可以大于depth值。当超过depth时,如果 `allow_out_of_range` 为False,则会抛出 `Illegal value` 的异常;如果设置为True,该id对应的向量为0向量。默认值为False。 + +返回:转换后的one_hot Tensor或LoDTensor,数据类型为float32。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + # 该代码对应上述第一个示例,其中输入label的shape是[4, 1],输出one_hot_label的shape是[4, 4] + label = fluid.layers.data(name="label", shape=[4, 1], append_batch_size=False, dtype="int64") + one_hot_label = fluid.layers.one_hot(input=label, depth=4) diff --git a/doc/paddle/api/paddle/fluid/layers/ones_cn.rst b/doc/paddle/api/paddle/fluid/layers/ones_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..842ca7ff66e298b01c9a7aec60c290dd4bfbb133 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/ones_cn.rst @@ -0,0 +1,22 @@ +.. _cn_api_fluid_layers_ones: + +ones +------------------------------- + +.. py:function:: paddle.fluid.layers.ones(shape,dtype,force_cpu=False) + +该OP创建形状为 ``shape`` 、数据类型为 ``dtype`` 且值全为1的Tensor。 + +参数: + - **shape** (tuple|list|Tensor) - 输出Tensor的形状, ``shape`` 的数据类型为int32或者int64。 + - **dtype** (np.dtype|str) - 输出Tensor的数据类型,数据类型必须为bool、 float16、float32、float64、int32或int64。 + - **force_cpu** (bool, 可选) – 是否强制将输出Tensor写入CPU内存。如果 ``force_cpu`` 为False,则将输出Tensor写入当前所在运算设备的内存,默认为False。 + +返回:值全为1的Tensor,数据类型和 ``dtype`` 定义的类型一致。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.ones(shape=[2, 4], dtype='float32') # [[1., 1., 1., 1.], [1., 1., 1., 1.]] diff --git a/doc/paddle/api/paddle/fluid/layers/ones_like_cn.rst b/doc/paddle/api/paddle/fluid/layers/ones_like_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5d1e6a89788690d771f0d1cb986e4bcf425e5968 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/ones_like_cn.rst @@ -0,0 +1,34 @@ +.. _cn_api_fluid_layers_ones_like: + +ones_like +------------------------------- + +.. py:function:: paddle.fluid.layers.ones_like(x, out=None) + + + + +ones_like + +该功能创建一个形状与类型与x相似的张量,初始值为1。 + + +参数: + - **x** (Variable) - 指定形状与数据类型的输入张量 + - **out** (Variable)-输出张量 + +返回:输出张量 + +返回类型:变量(Variable) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + + x = fluid.layers.data(name='x', dtype='float32', shape=[3], append_batch_size=False) + data = fluid.layers.ones_like(x) # [1.0, 1.0, 1.0] + + + diff --git a/doc/paddle/api/paddle/fluid/layers/pad2d_cn.rst b/doc/paddle/api/paddle/fluid/layers/pad2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8f5780f7147e7a7f4fd8c9a3dbfd532d781c95bf --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/pad2d_cn.rst @@ -0,0 +1,62 @@ +.. _cn_api_fluid_layers_pad2d: + +pad2d +------------------------------- + +.. py:function:: paddle.fluid.layers.pad2d(input, paddings=[0, 0, 0, 0], mode='constant', pad_value=0.0, data_format='NCHW', name=None) + + + + +该OP依照 paddings 和 mode 属性对input进行2维 ``pad`` 。 + +参数: + - **input** (Variable) - 类型为float32的4-D Tensor, format为 `[N, C, H, W]` 或 `[N, H, W, C]` 。 + - **paddings** (Variable | List[int32]) - 填充大小。如果paddings是一个List,它必须包含四个整数 `[padding_top, padding_bottom, padding_left, padding_right]` 。 + 如果paddings是Variable, 则是类型为int32 的1-D Tensor,shape是 `[4]` 。默认值为 `[0,0,0,0]` 。 + - **mode** (str) - padding的三种模式,分别为 `'constant'` (默认)、 `'reflect'` 、 `'edge'` 。 `'constant'` 为填充常数 `pad_value` , `'reflect'` 为填充以input边界值为轴的映射, `'edge'` 为填充input边界值。具体结果可见以下示例。默认值为 `'constant'` 。 + - **pad_value** (float32) - 以 `'constant'` 模式填充区域时填充的值。默认值为0.0。 + - **data_format** (str) - 指定input的format,可为 `'NCHW'` 和 `'NHWC'` ,默认值为 `'NCHW'` 。 + - **name** (str, 可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,缺省值为None。 +返回: 对input进行2维 ``pad`` 的结果,数据类型和input一样的4-D Tensor。 + +返回类型:Variable + +**示例**: + +.. code-block:: text + + Input = [[[[1., 2., 3.], + [4., 5., 6.]]]] + + Case 0: + paddings = [0, 1, 2, 3], + mode = 'constant' + pad_value = 0 + Out = [[[[0., 0., 1., 2., 3., 0., 0., 0.], + [0., 0., 4., 5., 6., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0.]]]] + + Case 1: + paddings = [0, 1, 2, 1], + mode = 'reflect' + Out = [[[[3., 2., 1., 2., 3., 2.], + [6., 5., 4., 5., 6., 5.], + [3., 2., 1., 2., 3., 2.]]]] + + Case 2: + paddings = [0, 1, 2, 1], + mode = 'edge' + Out = [[[[1., 1., 1., 2., 3., 3.], + [4., 4., 4., 5., 6., 6.], + [4., 4., 4., 5., 6., 6.]]]] + + + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32') + result = fluid.layers.pad2d(input=data, paddings=[0, 1, 2, 3], mode='reflect') diff --git a/doc/paddle/api/paddle/fluid/layers/pad_cn.rst b/doc/paddle/api/paddle/fluid/layers/pad_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e80815f41e96b3fb8b35878cfffca83b7603e677 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/pad_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_fluid_layers_pad: + +pad +------------------------------- + +.. py:function:: paddle.fluid.layers.pad(x, paddings, pad_value=0.0, name=None) + + + + +该OP在Tensor上填充一个由 ``pad_value`` 给出的常数值,填充宽度由 ``paddings`` 指定。 +其中,维度 ``i`` 中 ``x`` 内容前填充的值个数用 ``paddings[2*i]`` 表示,维度 ``i`` 中 ``x`` 内容后填充的值个数用 ``paddings[2*i+1]`` 表示。 + +**示例**: + +.. code-block:: text + + Given: + x = [[1, 2], [3, 4]] + + paddings = [0, 1, 1, 2] + + pad_value = 0 + + Return: + out = [[0, 1, 2, 0, 0] + [0, 3, 4, 0, 0] + [0, 0, 0, 0, 0]] + + +参数: + - **x** (Variable) — 多维Tensor,数据类型为float32 + - **paddings** (list of integers) — 整数列表,指定每个维度填充值的个数。维度 ``i`` 中 ``x`` 内容前填充的值个数用 ``paddings[2*i]`` 表示,维度 ``i`` 中 ``x`` 内容后填充的值个数用 ``paddings[2*i+1]`` 表示。 ``paddings`` 长度必须是 ``rank(x)×2`` + - **pad_value** (float32, 可选) — 用来填充的常量值,数据类型为float。默认值为0. + - **name** (str|None) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 + +返回: 填充后的Tensor,数据类型与输入 ``x`` 相同 + +返回类型: Variable + + +**代码示例** + +.. code-block:: python + + # x 为一个秩为2的张量 + import paddle.fluid as fluid + x = fluid.data(name='data', shape=[300, 300], dtype='float32') + out = fluid.layers.pad(x=x, paddings=[0, 1, 1, 2], pad_value=0.) + + diff --git a/doc/paddle/api/paddle/fluid/layers/pad_constant_like_cn.rst b/doc/paddle/api/paddle/fluid/layers/pad_constant_like_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5abd44811907a8c5ca85cd1c5292ac1468974848 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/pad_constant_like_cn.rst @@ -0,0 +1,83 @@ +.. _cn_api_fluid_layers_pad_constant_like: + +pad_constant_like +------------------------------- + +.. py:function:: paddle.fluid.layers.pad_constant_like(x, y, pad_value=0.0, name=None) + + + + +该OP使用 ``pad_value`` 填充 ``y`` ,填充到每个维度值的数量由x和y的形状而指定,((0,x.shape[0] - y.shape[0]), ..., (0, x.shape[i] - y.shape[i]), ..., (0, x.shape[n] - y.shape[n]))是每个维度填充的宽度,对于维度i,填充宽度 ``(0, x.shape[i] - y.shape[i])`` ,表示在y的第i维开头不填充,而在末尾填充 ``x.shape[i] - y.shape[i]`` 个位置。该OP要求y与x具有相同的秩,并且对每个维度i, ``y.shape[i] <= x.shape[i]`` 。 + +**示例**: + +.. code-block:: text + + Given: + X = [[[[ 0, 1, 2], + [ 3, 4, 5]], + [[ 6, 7, 8], + [ 9, 10, 11]], + [[12, 13, 14], + [15, 16, 17]]], + [[[18, 19, 20], + [21, 22, 23]], + [[24, 25, 26], + [27, 28, 29]], + [[30, 31, 32], + [33, 34, 35]]]] + + X.shape = (2, 3, 2, 3) + + Y = [[[[35, 36, 37]], + [[38, 39, 40]], + [[41, 42, 43]]]] + + Y.shape = (1, 3, 1, 3) + + And + pad_value = 0. + + Return: + Out = [[[[35, 36, 37], + [ 0, 0, 0]], + [[38, 39, 40], + [ 0, 0, 0]], + [[41, 42, 43], + [ 0, 0, 0]]], + [[[ 0, 0, 0], + [ 0, 0, 0]], + [[ 0, 0, 0], + [ 0, 0, 0]], + [[ 0, 0, 0], + [ 0, 0, 0]]]] + + Out.shape = [2, 3, 2, 3] + + +参数: + - **x** (Variable)- 多维Tensor + - **y** (Variable)- 多维Tensor,与x具有相同的秩,而且对任意维度 ``i`` ,要求满足 ``y.shape[i] <= x.shape[i]`` 。数据类型为float32或float64 + - **pad_value** (float,可选) - 用于填充的常量值。默认值为0. + - **name** (str | None) - (str|None) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 + +返回:经过维度填充后的Tensor,与x具有相同的shape,与y具有相同的数据类型 + +返回类型: Variable + +**示例代码** + +.. code-block:: python + + # x是秩为4的tensor, x.shape = (2, 3, 2, 3) + # y是秩为4的tensor, y.shape = (1, 3, 1, 3) + import paddle.fluid as fluid + x = fluid.data(name='x', shape=[2,3,2,3], dtype='float32') + y = fluid.data(name='y', shape=[1,3,1,3], dtype='float32') + out = fluid.layers.pad_constant_like(x=x, y=y, pad_value=0.) + # out是秩为4的tensor, out.shape = [2, 3 ,2 , 3] + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/piecewise_decay_cn.rst b/doc/paddle/api/paddle/fluid/layers/piecewise_decay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..592fa3f04e31d54a76192cfacb4912dfc48cd8af --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/piecewise_decay_cn.rst @@ -0,0 +1,48 @@ +.. _cn_api_fluid_layers_piecewise_decay: + +piecewise_decay +------------------------------- + +.. py:function:: paddle.fluid.layers.piecewise_decay(boundaries,values) + + + + +对初始学习率进行分段衰减。 + +该算法可用如下代码描述。 + +.. code-block:: text + + boundaries = [10000, 20000] + values = [1.0, 0.5, 0.1] + if step < 10000: + learning_rate = 1.0 + elif 10000 <= step < 20000: + learning_rate = 0.5 + else: + learning_rate = 0.1 + +参数: + - **boundaries(list)** - 代表步数的数字 + - **values(list)** - 学习率的值,不同的步边界中的学习率值 + +返回:衰减的学习率 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + boundaries = [10000, 20000] + values = [1.0, 0.5, 0.1] + optimizer = fluid.optimizer.Momentum( + momentum=0.9, + learning_rate=fluid.layers.piecewise_decay(boundaries=boundaries, values=values), + regularization=fluid.regularizer.L2Decay(1e-4)) + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/pixel_shuffle_cn.rst b/doc/paddle/api/paddle/fluid/layers/pixel_shuffle_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3324269a4936bca2993bf9b124f2d11760df2cb9 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/pixel_shuffle_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_fluid_layers_pixel_shuffle: + +pixel_shuffle +------------------------------- + +.. py:function:: paddle.fluid.layers.pixel_shuffle(x, upscale_factor) + + + + +该OP将一个形为[N, C, H, W]的Tensor重新排列成形为 [N, C/r**2, H*r, W*r] 的Tensor。这样做有利于实现步长(stride)为1/r的高效sub-pixel(亚像素)卷积。详见Shi等人在2016年发表的论文 `Real Time Single Image and Video Super Resolution Using an Efficient Sub Pixel Convolutional Neural Network `_ 。 + +.. code-block:: text + + 给定一个形为 x.shape = [1, 9, 4, 4] 的4-D张量 + 设定:upscale_factor=3 + 那么输出张量的形为:[1, 1, 12, 12] + +参数: + - **x** (Variable)- 维度为 :math:`[N_1, N_2, ..., N_k, D]` 的多维Tensor,其中最后一维D是类别数目。数据类型为float32或float64。 + - **upscale_factor** (int)- 增大空间分辨率的增大因子 + + +返回:根据新的维度信息进行重组的张量 + +返回类型: Variable + +抛出异常: ``ValueError`` - 如果upscale_factor的平方不能整除输入的通道维度(C)的大小。 + + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + input = fluid.layers.data(name="input", shape=[9,4,4]) + output = fluid.layers.pixel_shuffle(x=input, upscale_factor=3) + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/polygon_box_transform_cn.rst b/doc/paddle/api/paddle/fluid/layers/polygon_box_transform_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a86cf9e85a000c4024ab6f935825dedf765eadc4 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/polygon_box_transform_cn.rst @@ -0,0 +1,39 @@ +.. _cn_api_fluid_layers_polygon_box_transform: + +polygon_box_transform +------------------------------- + +.. py:function:: paddle.fluid.layers.polygon_box_transform(input, name=None) + + + + +**PolygonBoxTransform 算子** + +该op用于将偏移坐标改变为真实的坐标。 + +输入4-D Tensor是检测网络最终的几何输出。我们使用 2*n 个数来表示从 polygon_box 中的 n 个顶点(vertice)到像素位置的偏移。由于每个距离偏移包含两个数 :math:`(x_i, y_i)` ,所以几何输出通道数为 2*n。 + +参数: + - **input** (Variable) - 形状为 :math:`[batch\_size,geometry\_channels,height,width]` 的4-D Tensor,数据类型为float32或float64。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:polygon_box_transform输出的真实坐标,是一个 4-D Tensor。数据类型为float32或float64。 + +返回类型:Variable + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + input = fluid.layers.data(name='input', shape=[4, 10, 5, 5], + append_batch_size=False, dtype='float32') + out = fluid.layers.polygon_box_transform(input) + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/polynomial_decay_cn.rst b/doc/paddle/api/paddle/fluid/layers/polynomial_decay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..55bb9d00bfa315ebcc189f96caab3b525eb3cb0f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/polynomial_decay_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_fluid_layers_polynomial_decay: + +polynomial_decay +------------------------------- + +.. py:function:: paddle.fluid.layers.polynomial_decay(learning_rate,decay_steps,end_learning_rate=0.0001,power=1.0,cycle=False) + + + + +对初始学习率使用多项式衰减 + +.. code-block:: text + + if cycle: + decay_steps = decay_steps * ceil(global_step / decay_steps) + else: + global_step = min(global_step, decay_steps) + decayed_learning_rate = (learning_rate - end_learning_rate) * + (1 - global_step / decay_steps) ^ power + end_learning_rate + +参数: + - **learning_rate** (Variable|float) - 训练过程中的初始学习率,数据类型为float的常数或变量。 + - **decay_steps** (int) - 衰减步数 + - **end_learning_rate** (float) - 训练过程的最终学习率 + - **power** (float) - 多项式衰减系数 + - **cycle** (bool) - step 超出 decay_steps 后是否继续循环,默认为False + +返回:衰减的学习率 + +返回类型:变量(Variable) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + start_lr = 0.01 + total_step = 5000 + end_lr = 0 + lr = fluid.layers.polynomial_decay( + start_lr, total_step, end_lr, power=1) + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/pool2d_cn.rst b/doc/paddle/api/paddle/fluid/layers/pool2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c30d4f30ea8c9ad62754aa2d1ad22ce502f26a38 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/pool2d_cn.rst @@ -0,0 +1,212 @@ +.. _cn_api_fluid_layers_pool2d: + +pool2d +------------------------------- + +.. py:function:: paddle.fluid.layers.pool2d(input, pool_size=-1, pool_type='max', pool_stride=1, pool_padding=0, global_pooling=False, use_cudnn=True, ceil_mode=False, name=None, exclusive=True, data_format="NCHW") + + + + +该OP使用上述输入参数的池化配置,为二维空间池化操作,根据 ``input`` ,池化核大小 ``pool_size`` ,池化类型 ``pool_type`` ,步长 ``pool_stride`` ,填充 ``pool_padding`` 等参数得到输出。 + +输入 ``input`` 和输出(out)采用NCHW或NHWC格式,N为批大小,C是通道数,H是特征高度,W是特征宽度。 + +参数 ``pool_size`` 和 ``pool_stride`` 含有两个整型元素,分别表示高度和宽度维度上的参数。 + +输入 ``input`` 和输出(out)的形状可能不同。 + + +例如: + +输入: + ``input`` 的形状::math:`\left ( N,C,H_{in},W_{in} \right )` + +输出: + ``out`` 的形状::math:`\left ( N,C,H_{out},W_{out} \right )` + +如果 ``ceil_mode`` = false: + +.. math:: + H_{out} = \frac{(H_{in} - pool\_size[0] + pad\_height\_top + pad\_height\_bottom)}{pool\_stride[0]} + 1 + +.. math:: + W_{out} = \frac{(W_{in} - pool\_size[1] + pad\_width\_left + pad\_width\_right)}{pool\_stride[1]} + 1 + +如果 ``ceil_mode`` = true: + +.. math:: + H_{out} = \frac{(H_{in} - pool\_size[0] + pad\_height\_top + pad\_height\_bottom + pool\_stride[0] - 1)}{pool\_stride[0]} + 1 + +.. math:: + W_{out} = \frac{(W_{in} - pool\_size[1] + pad\_width\_left + pad\_width\_right + pool\_stride[1] - 1)}{pool\_stride[1]} + 1 + +如果 ``exclusive`` = false: + +.. math:: + hstart &= i * pool\_stride[0] - pad\_height\_top \\ + hend &= hstart + pool\_size[0] \\ + wstart &= j * pool\_stride[1] - pad\_width\_left \\ + wend &= wstart + pool\_size[1] \\ + Output(i ,j) &= \frac{sum(Input[hstart:hend, wstart:wend])}{pool\_size[0] * pool\_size[1]} + +如果 ``exclusive`` = true: + +.. math:: + hstart &= max(0, i * pool\_stride[0] - pad\_height\_top) \\ + hend &= min(H, hstart + pool\_size[0]) \\ + wstart &= max(0, j * pool\_stride[1] - pad\_width\_left) \\ + wend & = min(W, wstart + pool\_size[1]) \\ + Output(i ,j) & = \frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)} + +如果 ``pool_padding`` = "SAME": + +.. math:: + H_{out} = \frac{(H_{in} + pool\_stride[0] - 1)}{pool\_stride[0]} + +.. math:: + W_{out} = \frac{(W_{in} + pool\_stride[1] - 1)}{pool\_stride[1]} + +如果 ``pool_padding`` = "VALID": + +.. math:: + H_{out} = \frac{(H_{in} - pool\_size[0])}{pool\_stride[0]} + 1 + +.. math:: + W_{out} = \frac{(W_{in} - pool\_size[1])}{pool\_stride[1]} + 1 + +参数: + - **input** (Variable) - 形状为 :math:`[N, C, H, W]` 或 :math:`[N, H, W, C]` 的4-D Tensor,N是批尺寸,C是通道数,H是特征高度,W是特征宽度,数据类型为float32或float64。 + - **pool_size** (int|list|tuple) - 池化核的大小。如果它是一个元组或列表,那么它包含两个整数值:(pool_size_Height, pool_size_Width)。若为一个整数,则表示H和W维度上均为该值,比如若pool_size=2, 则池化核大小为[2,2]。 + - **pool_type** (str) - 池化类型,可以为"max"或"avg","max"对应max-pooling,"avg"对应average-pooling。默认值:"max"。 + - **pool_stride** (int|list|tuple) - 池化层的步长。如果它是一个元组或列表,它将包含两个整数:(pool_stride_Height, pool_stride_Width)。若为一个整数,则表示H和W维度上均为该值,比如若pool_stride=3, 则池化层步长为[3,3]。默认值:1。 + - **pool_padding** (int|list|tuple|str) - 池化填充。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``pool_padding`` = "SAME"或 ``pool_padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含2个整数值:[pad_height, pad_width];(2)包含4个整数值:[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right];(3)包含4个二元组:当 ``data_format`` 为"NCHW"时为 [[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 ``data_format`` 为"NHWC"时为[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]。若为一个整数,则表示H和W维度上均为该值。默认值:0。 + - **global_pooling** (bool)- 是否用全局池化。如果global_pooling = True, 已设置的 ``pool_size`` 和 ``pool_padding`` 会被忽略, ``pool_size`` 将被设置为 :math:`[H_{in}, W_{in}]` , ``pool_padding`` 将被设置为0。默认值:False。 + - **use_cudnn** (bool)- 是否使用cudnn内核。只有已安装cudnn库时才有效。默认值:True。 + - **ceil_mode** (bool)- 是否用ceil函数计算输出高度和宽度。计算细节可参考上述 ``ceil_mode`` = true或 ``ceil_mode`` = false 时的计算公式。默认值:False。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值:None。 + - **exclusive** (bool) - 是否在平均池化模式忽略填充值。计算细节可参考上述 ``exclusive`` = true或 ``exclusive`` = false 时的计算公式。默认值:True。 + - **data_format** (str) - 输入和输出的数据格式,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + +返回: 4-D Tensor,数据类型与 ``input`` 一致。 + +返回类型:Variable。 + +抛出异常: + - ``ValueError`` - 如果 ``pool_type`` 既不是"max"也不是"avg"。 + - ``ValueError`` - 如果 ``global_pooling`` 为False并且 ``pool_size`` 为-1。 + - ``TypeError`` - 如果 ``use_cudnn`` 不是bool值。 + - ``ValueError`` - 如果 ``data_format`` 既不是"NCHW"也不是"NHWC"。 + - ``ValueError`` - 如果 ``pool_padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``pool_padding`` 是"VALID",但是 ``ceil_mode`` 是True。 + - ``ValueError`` - 如果 ``pool_padding`` 含有4个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ShapeError`` - 如果 ``input`` 既不是4-D Tensor 也不是5-D Tensor。 + - ``ShapeError`` - 如果 ``input`` 的维度减去 ``pool_stride`` 的尺寸大小不是2。 + - ``ShapeError`` - 如果 ``pool_size`` 和 ``pool_stride`` 的尺寸大小不相等。 + - ``ShapeError`` - 如果计算出的输出形状的元素值不大于0。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + data_NCHW = fluid.data(name='data', shape=[None, 3, 8, 8], dtype='float32') + + data_NHWC = fluid.data(name='data', shape=[None, 8, 8, 3], dtype='float32') + + # example 1: + # ceil_mode = False + out_1 = fluid.layers.pool2d( + input=data_NCHW, # shape: [2, 3, 8, 8] + pool_size=[3,3], + pool_type='avg', + pool_stride=[3,3], + pool_padding=[2,1], # it is same as pool_padding = [2,2,1,1] + global_pooling=False, + ceil_mode=False, + exclusive=True, + data_format="NCHW") + # shape of out_1: [2, 3, 4, 3] + + # example 2: + # ceil_mode = True (different from example 1) + out_2 = fluid.layers.pool2d( + input=data_NCHW, + pool_size=[3,3], + pool_type='avg', + pool_stride=[3,3], + pool_padding=[[0,0], [0,0], [2,2], [1,1]], # it is same as pool_padding = [2,2,1,1] + global_pooling=False, + ceil_mode=True, + exclusive=True, + data_format="NCHW") + # shape of out_2: [2, 3, 4, 4] which is different from out_1 + + # example 3: + # pool_padding = "SAME" (different from example 1) + out_3 = fluid.layers.pool2d( + input=data_NCHW, + pool_size=[3,3], + pool_type='avg', + pool_stride=[3,3], + pool_padding="SAME", + global_pooling=False, + ceil_mode=False, + exclusive=True, + data_format="NCHW") + # shape of out_3: [2, 3, 3, 3] which is different from out_1 + + # example 4: + # pool_padding = "VALID" (different from example 1) + out_4 = fluid.layers.pool2d( + input=data_NCHW, + pool_size=[3,3], + pool_type='avg', + pool_stride=[3,3], + pool_padding="VALID", + global_pooling=False, + ceil_mode=False, + exclusive=True, + data_format="NCHW") + # shape of out_4: [2, 3, 2, 2] which is different from out_1 + + # example 5: + # global_pooling = True (different from example 1) + # It will be set pool_size = [8,8] and pool_padding = [0,0] actually. + out_5 = fluid.layers.pool2d( + input=data_NCHW, + pool_size=[3,3], + pool_type='avg', + pool_stride=[3,3], + pool_padding=[2,1], + global_pooling=True, + ceil_mode=False, + exclusive=True, + data_format="NCHW") + # shape of out_5: [2, 3, 1, 1] which is different from out_1 + + # example 6: + # data_format = "NHWC" (different from example 1) + out_6 = fluid.layers.pool2d( + input=data_NHWC, # shape: [2, 8, 8, 3] + pool_size=[3,3], + pool_type='avg', + pool_stride=[3,3], + pool_padding=[2,1], + global_pooling=False, + ceil_mode=False, + exclusive=True, + data_format="NHWC") + # shape of out_6: [2, 4, 3, 3] which is different from out_1 + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/pool3d_cn.rst b/doc/paddle/api/paddle/fluid/layers/pool3d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..486aacef3e4496f5bda99867308750c84f41c901 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/pool3d_cn.rst @@ -0,0 +1,223 @@ +.. _cn_api_fluid_layers_pool3d: + +pool3d +------------------------------- + +.. py:function:: paddle.fluid.layers.pool3d(input, pool_size=-1, pool_type='max', pool_stride=1, pool_padding=0, global_pooling=False, use_cudnn=True, ceil_mode=False, name=None, exclusive=True, data_format="NCDHW") + + + + +该OP使用上述输入参数的池化配置,为三维空间池化操作,根据 ``input`` ,池化核大小 ``pool_size`` ,池化类型 ``pool_type`` ,步长 ``pool_stride`` 和填充 ``pool_padding`` 等参数计算输出。 + +输入 ``input`` 和输出(Out)采用NCDHW或NDHWC格式,其中N是批大小,C是通道数,D,H和W分别是特征的深度,高度和宽度。 + +参数 ``pool_size`` 和 ``pool_stride`` 含有三个整型元素。 分别代表深度,高度和宽度维度上的参数。 + +输入 ``input`` 和输出(Out)的形状可能不同。 + + +例如: + +输入: + ``X`` 的形状: :math:`(N, C, D_{in}, H_{in}, W_{in})` + +输出: + ``out`` 的形状: :math:`(N, C, D_{out}, H_{out}, W_{out})` + +当 ``ceil_mode`` = false时, + +.. math:: + + D_{out} &= \frac{(D_{in} - pool\_size[0] + pad\_depth\_front + pad\_depth\_back)}{pool\_stride[0]} + 1\\ + H_{out} &= \frac{(H_{in} - pool\_size[1] + pad\_height\_top + pad\_height\_bottom)}{pool\_stride[1]} + 1\\ + W_{out} &= \frac{(W_{in} - pool\_size[2] + pad\_width\_left + pad\_width\_right)}{pool\_stride[2]} + 1 + +当 ``ceil_mode`` = true时, + +.. math:: + + D_{out} &= \frac{(D_{in} - pool\_size[0] + pad\_depth\_front + pad\_depth\_back + pool\_stride[0] -1)}{pool\_stride[0]} + 1\\ + H_{out} &= \frac{(H_{in} - pool\_size[1] + pad\_height\_top + pad\_height\_bottom + pool\_stride[1] -1)}{pool\_stride[1]} + 1\\ + W_{out} &= \frac{(W_{in} - pool\_size[2] + pad\_width\_left + pad\_width\_right + pool\_stride[2] -1)}{pool\_stride[2]} + 1 + +当 ``exclusive`` = false时, + +.. math:: + dstart &= i * pool\_stride[0] - pad\_depth\_front \\ + dend &= dstart + pool\_size[0] \\ + hstart &= j * pool\_stride[1] - pad\_height\_top \\ + hend &= hstart + pool\_size[1] \\ + wstart &= k * pool\_stride[2] - pad\_width\_left \\ + wend &= wstart + pool\_size[2] \\ + Output(i ,j, k) &= \frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{pool\_size[0] * pool\_size[1] * pool\_size[2]} + +如果 ``exclusive`` = true: + +.. math:: + dstart &= max(0, i * pool\_stride[0] - pad\_depth\_front) \\ + dend &= min(D, dstart + pool\_size[0]) \\ + hstart &= max(0, j * pool\_stride[1] - pad\_height\_top) \\ + hend &= min(H, hstart + pool\_size[1]) \\ + wstart &= max(0, k * pool\_stride[2] - pad\_width\_left) \\ + wend & = min(W, wstart + pool\_size[2]) \\ + Output(i ,j, k) & = \frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)} + +如果 ``pool_padding`` = "SAME": + +.. math:: + D_{out} = \frac{(D_{in} + pool\_stride[0] - 1)}{pool\_stride[0]} + +.. math:: + H_{out} = \frac{(H_{in} + pool\_stride[1] - 1)}{pool\_stride[1]} + +.. math:: + W_{out} = \frac{(W_{in} + pool\_stride[2] - 1)}{pool\_stride[2]} + +如果 ``pool_padding`` = "VALID": + +.. math:: + D_{out} = \frac{(D_{in} - pool\_size[0])}{pool\_stride[0]} + 1 + +.. math:: + H_{out} = \frac{(H_{in} - pool\_size[1])}{pool\_stride[1]} + 1 + +.. math:: + W_{out} = \frac{(W_{in} - pool\_size[2])}{pool\_stride[2]} + 1 + + +参数: + - **input** (Vairable) - 形状为 :math:`[N, C, D, H, W]` 或 :math:`[N, D, H, W, C]` 的5-D Tensor,N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度,数据类型为float32或float64。 + - **pool_size** (int|list|tuple) - 池化核的大小。如果它是一个元组或列表,那么它包含三个整数值,(pool_size_Depth, pool_size_Height, pool_size_Width)。若为一个整数,则表示D,H和W维度上均为该值,比如若pool_size=2, 则池化核大小为[2,2,2]。 + - **pool_type** (str) - 池化类型,可以为"max"或"avg","max" 对应max-pooling, "avg" 对应average-pooling。默认值:"max"。 + - **pool_stride** (int|list|tuple) - 池化层的步长。如果它是一个元组或列表,那么它包含三个整数值,(pool_stride_Depth, pool_stride_Height, pool_stride_Width)。若为一个整数,则表示D,H和W维度上均为该值,比如若pool_stride=3, 则池化层步长为[3,3,3]。默认值:1。 + - **pool_padding** (int|list|tuple|str) - 池化填充。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``pool_padding`` = "SAME"或 ``pool_padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含3个整数值:[pad_depth, pad_height, pad_width];(2)包含6个整数值:[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right];(3)包含5个二元组:当 ``data_format`` 为"NCDHW"时为[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 ``data_format`` 为"NDHWC"时为[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]。若为一个整数,则表示D、H和W维度上均为该值。默认值:0。 + - **global_pooling** (bool)- 是否用全局池化。如果global_pooling = True,已设置的 ``pool_size`` 和 ``pool_padding`` 会被忽略, ``pool_size`` 将被设置为 :math:`[D_{in}, H_{in}, W_{in}]` , ``pool_padding`` 将被设置为0。默认值:False。 + - **use_cudnn** (bool)- 是否使用cudnn内核。只有已安装cudnn库时才有效。默认值:True。 + - **ceil_mode** (bool)- 是否用ceil函数计算输出的深度、高度和宽度。计算细节可参考上述 ``ceil_mode`` = true或 ``ceil_mode`` = false 时的计算公式。默认值:False。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值:None。 + - **exclusive** (bool) - 是否在平均池化模式忽略填充值。计算细节可参考上述 ``exclusive`` = true或 ``exclusive`` = false 时的计算公式。默认值:True。 + - **data_format** (str) - 输入和输出的数据格式,可以是"NCDHW"和"NDHWC"。N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度。默认值:"NDCHW"。 + +返回: 5-D Tensor,数据类型与 ``input`` 一致。 + +返回类型:Variable。 + +抛出异常: + - ``ValueError`` - 如果 ``pool_type`` 既不是"max"也不是"avg"。 + - ``ValueError`` - 如果 ``global_pooling`` 为False并且 ``pool_size`` 为-1。 + - ``TypeError`` - 如果 ``use_cudnn`` 不是bool值。 + - ``ValueError`` - 如果 ``data_format`` 既不是"NCHW"也不是"NHWC"。 + - ``ValueError`` - 如果 ``pool_padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``pool_padding`` 是"VALID",但是 ``ceil_mode`` 是True。 + - ``ValueError`` - 如果 ``pool_padding`` 含有5个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ShapeError`` - 如果 ``input`` 既不是4-D Tensor 也不是5-D Tensor。 + - ``ShapeError`` - 如果 ``input`` 的维度减去 ``pool_stride`` 的尺寸大小不是2。 + - ``ShapeError`` - 如果 ``pool_size`` 和 ``pool_stride`` 的尺寸大小不相等。 + - ``ShapeError`` - 如果计算出的输出形状的元素值不大于0。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + data_NCDHW = fluid.data(name='data', shape=[None, 3, 8, 8, 8], dtype='float32') + + data_NDHWC = fluid.data(name='data', shape=[None, 8, 8, 8, 3], dtype='float32') + + # example 1: + # ceil_mode = False + out_1 = fluid.layers.pool3d( + input=data_NCDHW, # shape: [2, 3, 8, 8, 8] + pool_size=[3,3,3], + pool_type='avg', + pool_stride=[3,3,3], + pool_padding=[2,2,1], # it is same as pool_padding = [2,2,2,2,1,1] + global_pooling=False, + ceil_mode=False, + exclusive=True, + data_format="NCDHW") + # shape of out_1: [2, 3, 4, 4, 3] + + # example 2: + # ceil_mode = True (different from example 1) + out_2 = fluid.layers.pool3d( + input=data_NCDHW, + pool_size=[3,3,3], + pool_type='avg', + pool_stride=[3,3,3], + pool_padding=[[0,0], [0,0], [2,2], [2,2], [1,1]], # it is same as pool_padding = [2,2,2,2,1,1] + global_pooling=False, + ceil_mode=True, + exclusive=True, + data_format="NCDHW") + # shape of out_2: [2, 3, 4, 4, 4] which is different from out_1 + + # example 3: + # pool_padding = "SAME" (different from example 1) + out_3 = fluid.layers.pool3d( + input=data_NCDHW, + pool_size=[3,3,3], + pool_type='avg', + pool_stride=[3,3,3], + pool_padding="SAME", + global_pooling=False, + ceil_mode=False, + exclusive=True, + data_format="NCDHW") + # shape of out_3: [2, 3, 3, 3, 3] which is different from out_1 + + # example 4: + # pool_padding = "VALID" (different from example 1) + out_4 = fluid.layers.pool3d( + input=data_NCDHW, + pool_size=[3,3,3], + pool_type='avg', + pool_stride=[3,3,3], + pool_padding="VALID", + global_pooling=False, + ceil_mode=False, + exclusive=True, + data_format="NCDHW") + # shape of out_4: [2, 3, 2, 2, 2] which is different from out_1 + + # example 5: + # global_pooling = True (different from example 1) + # It will be set pool_size = [8,8,8] and pool_padding = [0,0,0] actually. + out_5 = fluid.layers.pool3d( + input=data_NCDHW, + pool_size=[3,3,3], + pool_type='avg', + pool_stride=[3,3,3], + pool_padding=[2,2,1], + global_pooling=True, + ceil_mode=False, + exclusive=True, + data_format="NCDHW") + # shape of out_5: [2, 3, 1, 1, 1] which is different from out_1 + + # example 6: + # data_format = "NDHWC" (different from example 1) + out_6 = fluid.layers.pool3d( + input=data_NHWC, # shape: [2, 8, 8, 8, 3] + pool_size=[3,3,3], + pool_type='avg', + pool_stride=[3,3,3], + pool_padding=[2,2,1], + global_pooling=False, + ceil_mode=False, + exclusive=True, + data_format="NDHWC") + # shape of out_6: [2, 4, 4, 3, 3] which is different from out_1 + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/pow_cn.rst b/doc/paddle/api/paddle/fluid/layers/pow_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..40eaf542138527856d25a002f16a4cf29c891f47 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/pow_cn.rst @@ -0,0 +1,55 @@ +.. _cn_api_fluid_layers_pow: + +pow +------------------------------- + +.. py:function:: paddle.pow(x, exponent, name=None) + + + + +该OP是指数激活算子: + +.. math:: + + out = x^{exponent} + +**注意:如果需要对输入进行 elementwise_pow 操作,请查使用** :ref:`cn_api_fluid_layers_elementwise_pow` 。 + +参数: + - **x** (Variable)- 多维 ``Variable``,数据类型为 ``float32`` 或 ``float64`` 。 + - **exponent** (float32|Variable)- ``float32`` 或形状为[1]的 ``Variable``,数据类型为 ``float32``。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值: ``None``。 + +返回:维度与输入 `x` 相同的 ``Variable``,数据类型与 ``x`` 相同。 + +返回类型:Variable。 + + +**代码示例:** + +.. code-block:: python + + import paddle + import numpy as np + x = fluid.data(name="x", shape=[32,32], dtype="float32") + paddle.enable_imperative() + + # example 1: exponent is a float + x_data = np.array([1, 2, 3]) + exponent = 2 + x = paddle.imperative.to_variable(x_data) + res = paddle.pow(x, exponent) + print(res.numpy()) # [1 4 9] + + # example 2: exponent is a Variable + exponent = paddle.fill_constant(shape=[1], value=2, dtype='float32') + res = paddle.pow(x, exponent) + print(res.numpy()) # [1 4 9] + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/prelu_cn.rst b/doc/paddle/api/paddle/fluid/layers/prelu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..edaf6dfe79316dc2a675ac45d8fefd42478b275f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/prelu_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_fluid_layers_prelu: + +prelu +------------------------------- + + +.. py:function:: paddle.fluid.layers.prelu(x, mode, param_attr=None, name=None) + + + + +等式: + +.. math:: + y = max(0, x) + \alpha min(0, x) + +共提供三种激活方式: + +.. code-block:: text + + all: 所有元素使用同一个alpha值 + channel: 在同一个通道中的元素使用同一个alpha值 + element: 每一个元素有一个独立的alpha值 + + +参数: + - **x** (Variable)- 多维Tensor或LoDTensor,数据类型为float32。 + - **mode** (str) - 权重共享模式。 + - **param_attr** (ParamAttr,可选) - 可学习权重 :math:`[\alpha]` 的参数属性,可由ParamAttr创建。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + + +返回: 表示激活输出Tensor或LoDTensor,数据类型为float32。与输入形状相同。 + + +返回类型:Variable + + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.param_attr import ParamAttr + x = fluid.data(name="x", shape=[None,5,10,10], dtype="float32") + mode = 'channel' + output = fluid.layers.prelu( + x,mode,param_attr=ParamAttr(name='alpha')) + + diff --git a/doc/paddle/api/paddle/fluid/layers/prior_box_cn.rst b/doc/paddle/api/paddle/fluid/layers/prior_box_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..929d56ced5f2628a6d55314ace3b45b2ab096a66 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/prior_box_cn.rst @@ -0,0 +1,44 @@ +.. _cn_api_fluid_layers_prior_box: + +prior_box +------------------------------- +.. py:function:: paddle.fluid.layers.prior_box(input,image,min_sizes=None,max_sizes=None,aspect_ratios=[1.0],variance=[0.1,0.1,0.2,0.2],flip=False,clip=False,steps=[0.0,0.0],offset=0.5,name=None,min_max_aspect_ratios_order=False) + + + + +该OP为SSD(Single Shot MultiBox Detector)算法生成候选框。输入的每个位产生N个候选框,N由min_sizes,max_sizes和aspect_ratios的数目决定,候选框的尺寸在(min_size,max_size)之间,该尺寸根据aspect_ratios在序列中生成。 + +参数: + - **input** (Variable) - 形状为NCHW的4-DTensor,数据类型为float32或float64。 + - **image** (Variable) - PriorBoxOp的输入图像数据,形状为NCHW的4-D Tensor,数据类型为float32或float64。 + - **min_sizes** (list|tuple|float) - 生成的候选框的最小尺寸。 + - **max_sizes** (list|tuple|None) - 生成的候选框的最大尺寸。默认值为None + - **aspect_ratios** (list|tuple|float) - 生成的候选框的长宽比。默认值为[1.]。 + - **variance** (list|tuple) - 在候选框中解码的方差。默认值为[0.1,0.1,0.2,0.2]。 + - **flip** (bool) - 是否翻转。默认值为False。 + - **clip** (bool) - 是否裁剪。默认值为False。 + - **step** (list|tuple) - 候选框在width和height上的步长。如果step[0]等于0.0或者step[1]等于0.0,则自动计算候选框在宽度和高度上的步长。默认:[0.,0.] + - **offset** (float) - 候选框中心位移。默认:0.5 + - **name** (str|None) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。默认值为None。 + - **min_max_aspect_ratios_order** (bool) - 若设为True,候选框的输出以[min, max, aspect_ratios]的顺序输出,和Caffe保持一致。请注意,该顺序会影响后面卷基层的权重顺序,但不影响最后的检测结果。默认:False。 + +返回:含有两个变量的元组,包括: + boxes: 候选框。形状为[H,W,num_priors,4]的4-D Tensor。其中,H是输入的高度,W是输入的宽度,num_priors是输入每位的总框数。 + variances: 候选框的方差,形状为[H,W,num_priors,4]的4-D Tensor。其中,H是输入的高度,W是输入的宽度,num_priors是输入每位的总框数。 + +返回类型:元组 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + input = fluid.layers.data(name="input", shape=[3,6,9]) + images = fluid.layers.data(name="images", shape=[3,9,12]) + box, var = fluid.layers.prior_box( + input=input, + image=images, + min_sizes=[100.], + flip=True, + clip=True) diff --git a/doc/paddle/api/paddle/fluid/layers/prroi_pool_cn.rst b/doc/paddle/api/paddle/fluid/layers/prroi_pool_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..58665c7e622ef575bc09b9d36e60d84dfbf51bbc --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/prroi_pool_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_fluid_layers_prroi_pool: + +prroi_pool +------------------------------- + +.. py:function:: paddle.fluid.layers.prroi_pool(input, rois, output_channels, spatial_scale, pooled_height, pooled_width, name=None) + + + + +PRROIPool运算 + +精确区域池化方法(Precise region of interest pooling,也称为PRROIPooling)是对输入的 "感兴趣区域"(RoI)执行插值处理,将离散的特征图数据映射到一个连续空间,使用二重积分再求均值的方式实现Pooling。 + +通过积分方式计算ROI特征,反向传播时基于连续输入值计算梯度,使得反向传播连续可导的PRROIPooling。 有关更多详细信息,请参阅 https://arxiv.org/abs/1807.11590。 + +参数: + - **input** (Variable) - (Tensor),PRROIPoolOp的输入。 输入张量的格式是NCHW。 其中N是批大小batch_size,C是输入通道的数量,H是输入特征图的高度,W是特征图宽度 + - **rois** (Variable) - 要进行池化的RoI(感兴趣区域)。应为一个形状为(num_rois, 4)的二维LoDTensor,其lod level为1。给出[[x1, y1, x2, y2], ...],(x1, y1)为左上角坐标,(x2, y2)为右下角坐标。 + - **output_channels** (integer) - (int),输出特征图的通道数。 对于共C个种类的对象分类任务,output_channels应该是(C + 1),该情况仅适用于分类任务。 + - **spatial_scale** (float) - (float,default 1.0),乘法空间比例因子,用于将ROI坐标从其输入比例转换为池化使用的比例。默认值:1.0 + - **pooled_height** (integer) - (int,默认值1),池化输出的高度。默认值:1 + - **pooled_width** (integer) - (int,默认值1),池化输出的宽度。默认值:1 + - **name** (str,default None) - 此层的名称。 + +返回: (Tensor),PRROIPoolOp的输出是形为 (num_rois,output_channels,pooled_h,pooled_w) 的4-D Tensor。 + +返回类型: 变量(Variable) + +**代码示例:** + +.. code-block:: python + + ## prroi_pool without batch_roi_num + import paddle.fluid as fluid + x = fluid.data(name='x', shape=[None, 490, 28, 28], dtype='float32') + rois = fluid.data(name='rois', shape=[None, 4], lod_level=1, dtype='float32') + pool_out = fluid.layers.prroi_pool(x, rois, 1.0, 7, 7) + + ## prroi_pool with batch_roi_num + batchsize=4 + x2 = fluid.data(name='x2', shape=[batchsize, 490, 28, 28], dtype='float32') + rois2 = fluid.data(name='rois2', shape=[batchsize, 4], dtype='float32') + batch_rois_num = fluid.data(name='rois_nums', shape=[batchsize], dtype='int64') + pool_out2 = fluid.layers.prroi_pool(x2, rois2, 1.0, 7, 7, batch_roi_nums=batch_rois_num) + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/psroi_pool_cn.rst b/doc/paddle/api/paddle/fluid/layers/psroi_pool_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8f30851307f43cdc26b5f3fdfe69b9234e7b05b4 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/psroi_pool_cn.rst @@ -0,0 +1,56 @@ +.. _cn_api_fluid_layers_psroi_pool: + +psroi_pool +------------------------------- + +.. py:function:: paddle.fluid.layers.psroi_pool(input, rois, output_channels, spatial_scale, pooled_height, pooled_width, name=None) + + + + +**注意 rois必须为2维LoDTensor,lod_level为1** + +该OP执行PSROIPooling运算,是位置敏感的感兴趣区域池化方法(Position sensitive region of interest pooling,也称为PSROIPooling)。输入input是位置敏感的评分图,输入rois是感兴趣区域的位置坐标。PSROIPooling不同于普通ROIPooling的地方在于,输入input特征图的不同通道会跟输出特征图上的位置区域相关联,该方法是在R-FCN模型中首次提出来的,更多详细信息请参阅 https://arxiv.org/abs/1605.06409。 + + +**样例**: + +:: + + Given: + input.shape = [2, 490, 28, 28] + rois.shape = [5, 4], rois.lod = [[3, 2]] + output_channels = 10 + pooled_height = 7 + pooled_width = 7 + + Return: + out.shape = [5, 10, 7, 7], out.lod = [[3, 2]] + + +参数: + - **input** (Variable) - 输入特征图,4-D Tensor,格式是NCHW。 其中N是batch_size,C是输入通道的数量,H是输入特征图的高度,W是特征图宽度。数据类型是float32或者float64 + - **rois** (Variable) - 感兴趣区域,2-D LoDTensor,形状为(num_rois, 4),lod_level为1。形式如[x1, y1, x2, y2], ...],其中(x1, y1)为左上角坐标,(x2, y2)为右下角坐标。数据类型与input相同 + - **output_channels** (int) - 输出特征图的通道数。对于共C个种类的图像分类任务,output_channels应该是 ``(C + 1)`` ,其中1代表背景 + - **spatial_scale** (float) - 空间跨度因子,用于将 ``rois`` 中的坐标从其输入尺寸按比例映射到 ``input`` 特征图的尺寸 + - **pooled_height** (int) - 池化输出的高度 + - **pooled_width** (int) - 池化输出的宽度 + - **name** (str|None) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 + +返回: 经过PSROIPooling之后的结果,形状为(num_rois,output_channels,pooled_height,pooled_width) 的4维LoDTensor,lod_level为1,数据类型与input相同,与rois具有相同的lod信息。 + +返回类型: Variable + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.data(name='x', shape=[490, 28, 28], dtype='float32') + rois = fluid.layers.data(name='rois', shape=[4], lod_level=1, dtype='float32') + pool_out = fluid.layers.psroi_pool(x, rois, 10, 1.0, 7, 7) + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/py_reader_cn.rst b/doc/paddle/api/paddle/fluid/layers/py_reader_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f9ac0f083bedb8355b008b42bc840656c09b2462 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/py_reader_cn.rst @@ -0,0 +1,152 @@ +.. _cn_api_fluid_layers_py_reader: + +py_reader +------------------------------- + + +.. py:function:: paddle.fluid.layers.py_reader(capacity, shapes, dtypes, lod_levels=None, name=None, use_double_buffer=True) + + + + + +创建一个在Python端提供数据的reader + +该OP返回一个Reader Variable。该Reader提供了 ``decorate_paddle_reader()`` 和 ``decorate_tensor_provider()`` 来设置Python generator作为数据源,将数据源中的数据feed到Reader Variable。在c++端调用 ``Executor::Run()`` 时,来自generator的数据将被自动读取。与 ``DataFeeder.feed()`` 不同,数据读取进程和 ``Executor::Run()`` 进程可以使用 ``py_reader`` 并行运行。在每次数据传递开始时调用reader的 ``start()`` ,在传递结束和抛出 ``fluid.core.EOFException`` 异常后执行 ``reset()`` 。 + +注意: ``Program.clone()`` (含义详见 :ref:`cn_api_fluid_Program` )不能克隆 ``py_reader`` ,且 ``read_file`` ( ``read_file`` 含义详见 :ref:`cn_api_fluid_layers_read_file` )调用需在声明 ``py_reader`` 的program block内。 + +参数: + - **capacity** (int) – ``py_reader`` 维护的缓冲区的容量数据个数。 + - **shapes** (list|tuple) – 一个列表或元组,shapes[i]是代表第i个数据shape,因此shape[i]也是元组或列表。 + - **dtypes** (list|tuple) – 一个string的列表或元组。为 ``shapes`` 对应元素的数据类型,支持bool,float16,float32,float64,int8,int16,int32,int64,uint8。 + - **lod_levels** (list|tuple) – lod_level的整型列表或元组 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **use_double_buffer** (bool) – 是否使用双缓冲区,双缓冲区是为了预读下一个batch的数据、异步CPU -> GPU拷贝。默认值为True。 + +返回:reader,从reader中可以获取feed的数据,其dtype和feed的数据dtype相同。 + +返回类型:Variable + + + +**代码示例** + +1.py_reader 基本用法如下 + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import paddle.dataset.mnist as mnist + + def network(image, label): + # 用户自定义网络,此处以softmax回归为例 + predict = fluid.layers.fc(input=image, size=10, act='softmax') + return fluid.layers.cross_entropy(input=predict, label=label) + + reader = fluid.layers.py_reader(capacity=64, + shapes=[(-1,1, 28, 28), (-1,1)], + dtypes=['float32', 'int64']) + reader.decorate_paddle_reader( + paddle.reader.shuffle(paddle.batch(mnist.train(), batch_size=5), + buf_size=1000)) + + img, label = fluid.layers.read_file(reader) + loss = network(img, label) # 一些网络定义 + + fluid.Executor(fluid.CUDAPlace(0)).run(fluid.default_startup_program()) + exe = fluid.ParallelExecutor(use_cuda=True, loss_name=loss.name) + for epoch_id in range(10): + reader.start() + try: + while True: + exe.run(fetch_list=[loss.name]) + except fluid.core.EOFException: + reader.reset() + + fluid.io.save_inference_model(dirname='./model', + feeded_var_names=[img.name, label.name], + target_vars=[loss], + executor=fluid.Executor(fluid.CUDAPlace(0))) + + +2.训练和测试应使用不同的名称创建两个不同的py_reader,例如: + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import paddle.dataset.mnist as mnist + + def network(reader): + img, label = fluid.layers.read_file(reader) + # 用户自定义网络,此处以softmax回归为例 + predict = fluid.layers.fc(input=img, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=predict, label=label) + return fluid.layers.mean(loss) + + # 新建 train_main_prog 和 train_startup_prog + train_main_prog = fluid.Program() + train_startup_prog = fluid.Program() + with fluid.program_guard(train_main_prog, train_startup_prog): + # 使用 fluid.unique_name.guard() 实现与test program的参数共享 + with fluid.unique_name.guard(): + train_reader = fluid.layers.py_reader(capacity=64, + shapes=[(-1, 1, 28, 28), (-1, 1)], + dtypes=['float32', 'int64'], + name='train_reader') + train_reader.decorate_paddle_reader( + paddle.reader.shuffle(paddle.batch(mnist.train(), + batch_size=5), + buf_size=500)) + train_loss = network(train_reader) # 一些网络定义 + adam = fluid.optimizer.Adam(learning_rate=0.01) + adam.minimize(train_loss) + + # Create test_main_prog and test_startup_prog + test_main_prog = fluid.Program() + test_startup_prog = fluid.Program() + with fluid.program_guard(test_main_prog, test_startup_prog): + # 使用 fluid.unique_name.guard() 实现与train program的参数共享 + with fluid.unique_name.guard(): + test_reader = fluid.layers.py_reader(capacity=32, + shapes=[(-1, 1, 28, 28), (-1, 1)], + dtypes=['float32', 'int64'], + name='test_reader') + test_reader.decorate_paddle_reader(paddle.batch(mnist.test(), 512)) + test_loss = network(test_reader) + + fluid.Executor(fluid.CUDAPlace(0)).run(train_startup_prog) + fluid.Executor(fluid.CUDAPlace(0)).run(test_startup_prog) + + train_exe = fluid.ParallelExecutor(use_cuda=True, + loss_name=train_loss.name, main_program=train_main_prog) + test_exe = fluid.ParallelExecutor(use_cuda=True, + loss_name=test_loss.name, main_program=test_main_prog) + for epoch_id in range(10): + train_reader.start() + try: + while True: + train_exe.run(fetch_list=[train_loss.name]) + except fluid.core.EOFException: + train_reader.reset() + + test_reader.start() + try: + while True: + test_exe.run(fetch_list=[test_loss.name]) + except fluid.core.EOFException: + test_reader.reset() + + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/random_crop_cn.rst b/doc/paddle/api/paddle/fluid/layers/random_crop_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f00e9f9285b31dc6ab899c2081e9bdd10faad39b --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/random_crop_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_fluid_layers_random_crop: + +random_crop +------------------------------- + +.. py:function:: paddle.fluid.layers.random_crop(x, shape, seed=None) + + + + +该操作对batch中每个实例进行随机裁剪,即每个实例的裁剪位置不同,裁剪位置由均匀分布随机数生成器决定。所有裁剪后的实例都具有相同的维度,由 ``shape`` 参数决定。 + +参数: + - **x(Variable)** - 多维Tensor。 + - **shape(list(int))** - 裁剪后最后几维的形状,注意, ``shape`` 的个数小于 ``x`` 的秩。 + - **seed(int|Variable,可选)** - 设置随机数种子,默认情况下,种子是[-65536,-65536)中一个随机数,如果类型是Variable,要求数据类型是int64,默认值:None。 + +返回: 裁剪后的Tensor。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + img = fluid.data("img", [None, 3, 256, 256]) + # cropped_img的shape: [-1, 3, 224, 224] + cropped_img = fluid.layers.random_crop(img, shape=[3, 224, 224]) + + # cropped_img2的shape: [-1, 2, 224, 224] + # cropped_img2 = fluid.layers.random_crop(img, shape=[2,224, 224]) + + # cropped_img3的shape: [-1, 3, 128, 224] + # cropped_img3 = fluid.layers.random_crop(img, shape=[128, 224]) + + + diff --git a/doc/paddle/api/paddle/fluid/layers/range_cn.rst b/doc/paddle/api/paddle/fluid/layers/range_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..519f5e76f72b649cb924adc6c00342b7b5c54929 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/range_cn.rst @@ -0,0 +1,34 @@ +.. _cn_api_fluid_layers_range: + +range +------------------------------- + +.. py:function:: paddle.fluid.layers.range(start, end, step, dtype, name=None) + + +注意:推荐使用 paddle.arange + +该OP返回以步长 ``step`` 均匀分隔给定数值区间[``start``, ``end``)的1-D Tensor,数据类型为 ``dtype``。 + +当 ``dtype`` 表示浮点类型时,为了避免浮点计算误差,建议给 ``end`` 加上一个极小值epsilon,使边界可以更加明确。 + +参数: + - **start** (float|int|Tensor) - 区间起点(且区间包括此值)。当 ``start`` 类型是Tensor时,是形状为[1]且数据类型为int32、int64、float32、float64的Tensor。 + - **end** (float|int|Tensor) - 区间终点(且通常区间不包括此值)。当 ``end`` 类型是Tensor时,是形状为[1]且数据类型为int32、int64、float32、float64的Tensor。 + - **step** (float|int|Tensor) - 均匀分割的步长。当 ``step`` 类型是Tensor时,是形状为[1]且数据类型为int32、int64、float32、float64的Tensor。 + - **dtype** (str|np.dtype|core.VarDesc.VarType) - 输出Tensor的数据类型,支持int32、int64、float32、float64。 + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回: + Tensor: 以步长 ``step`` 均匀分割给定数值区间[``start``, ``end``)后得到的1-D Tensor, 数据类型为 ``dtype`` 。 + +抛出异常: + - ``TypeError`` - 如果 ``dtype`` 不是int32、int64、float32、float64。 + +代码示例: + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.range(0, 10, 2, 'int32') + # [0, 2, 4, 6, 8] diff --git a/doc/paddle/api/paddle/fluid/layers/rank_cn.rst b/doc/paddle/api/paddle/fluid/layers/rank_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..424f9c98eba17be6f0fd8828fcbc497394d7452d --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/rank_cn.rst @@ -0,0 +1,29 @@ +.. _cn_api_fluid_layers_rank: + +rank +------------------------------- + +.. py:function:: paddle.fluid.layers.rank(input) + + + + +该OP用于计算输入Tensor的维度(秩)。 + +参数: + - **input** (Variable) — 输入input是shape为 :math:`[N_1, N_2, ..., N_k]` 的多维Tensor,数据类型可以任意类型。 + +返回:输出Tensor的秩,是一个0-D Tensor。 + +返回类型:Variable,数据类型为int32。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + input = fluid.data( + name="input", shape=[3, 100, 100], dtype="float32") + rank = fluid.layers.rank(input) # rank=(4,) + + diff --git a/doc/paddle/api/paddle/fluid/layers/rank_loss_cn.rst b/doc/paddle/api/paddle/fluid/layers/rank_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d34f3be20f217ee2600523391de4db677ea6be47 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/rank_loss_cn.rst @@ -0,0 +1,45 @@ +.. _cn_api_fluid_layers_rank_loss: + +rank_loss +------------------------------- + +.. py:function:: paddle.fluid.layers.rank_loss(label, left, right, name=None) + + + + +该OP实现了RankNet模型中的排序损失层。RankNet是一种文档对(pairwise)排序模型,训练样本由一对文档(假设用A、B来表示)组成。标签(假设用P来表示)表示A的排名是否高于B。更多详情请参考:`RankNet `_ + +排序损失层有三个输入: :math:`o_i` 、 :math:`o_j` 和 :math:`\tilde{P_{ij}}` ,输入分别表示RankNet模型对文档A、B的输出得分和标签P的值;排序损失层的输入是批输入数据(批大小大于等于1);标签P的取值可以为: {0, 1} 或 {0, 0.5, 1} ,其中,0.5表示输入文档对排序相同。输入数据的排序损失 :math:`C_{i,j}` 计算过程如下: + +.. math:: + + C_{i,j} &= -\tilde{P_{ij}} * o_{i,j} + \log(1 + e^{o_{i,j}}) + + o_{i,j} &= o_i - o_j + + \tilde{P_{i,j}} &= \left \{0, 0.5, 1 \right \} \ or \ \left \{0, 1 \right \} + +参数: + - **label** (Variable):维度为 :math:`[batch,1]` 的2-D ``Tensor`` ,数据类型为float32。其中batch表示批数据的大小。表示A的排名是否高于B。 + - **left** (Variable):维度为 :math:`[batch,1]` 的2-D ``Tensor`` ,数据类型为float32。其中batch表示批数据的大小。表示RankNet对文档A的输出得分。 + - **right** (Variable):维度为 :math:`[batch,1]` 的2-D ``Tensor`` ,数据类型为float32。其中batch表示批数据的大小。表示RankNet对文档B的输出得分。 + - **name** (str, 可选):具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:表示排序损失层输出值的 ``Tensor`` ,数据类型为float32,返回值维度为 :math:`[batch,1]` 。 + +返回类型:Variable + +抛出异常: + - ``ValueError`` - 输入 ``label`` , ``left`` ,和 ``right`` 至少有一个不是 ``Variable`` 类型。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + label = fluid.layers.data(name="label", shape=[-1, 1], dtype="float32") + left = fluid.layers.data(name="left", shape=[-1, 1], dtype="float32") + right = fluid.layers.data(name="right", shape=[-1, 1], dtype="float32") + out = fluid.layers.rank_loss(label, left, right) + diff --git a/doc/paddle/api/paddle/fluid/layers/read_file_cn.rst b/doc/paddle/api/paddle/fluid/layers/read_file_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4996050c73986bb439c27ce0668fef92f8007487 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/read_file_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_fluid_layers_read_file: + +read_file +------------------------------- + + +.. py:function:: paddle.fluid.layers.read_file(reader) + + + + +从给定的reader中读取数据 + +reader是一个Variable,它可以是由函数fluid.layers.py_reader()生成的reader,或者是由函数fluid.layers.double_buffer()生成的装饰Variable。 + +参数: + - **reader** (Variable)-待处理的reader + +返回:从reader中读取的数据元组,元组数据类型为Variable + +返回类型: tuple(元组) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + reader = fluid.layers.py_reader(capacity=64, + shapes=[(-1, 1, 28, 28), (-1, 1)], + dtypes=['float32', 'int64']) + image, label = fluid.layers.read_file(reader) + data_file = fluid.layers.double_buffer( + fluid.layers.batch(data_file, batch_size=64)) + input, label = fluid.layers.read_file(data_file) + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/reciprocal_cn.rst b/doc/paddle/api/paddle/fluid/layers/reciprocal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..07ca2cab71a695b17e450fea13459f53d516e1ff --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/reciprocal_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_fluid_layers_reciprocal: + +reciprocal +------------------------------- + +.. py:function:: paddle.fluid.layers.reciprocal(x, name=None) + + + + +reciprocal 对输入Tensor取倒数 + + +.. math:: + out = \frac{1}{x} + +参数: + + - **x** - 输入的多维Tensor,支持的数据类型为float32,float64。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + + +返回: 对输入取倒数得到的Tensor,输出Tensor数据类型和维度与输入相同。 + +**代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + x_data = np.array([1, 2, 3, 4]).astype(np.float32) + x = paddle.imperative.to_variable(x_data) + res = paddle.%s(x) + print(res.numpy()) + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/reduce_all_cn.rst b/doc/paddle/api/paddle/fluid/layers/reduce_all_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3c45fc7a7871c5348f9490a9c4049e674d4c98af --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/reduce_all_cn.rst @@ -0,0 +1,45 @@ +.. _cn_api_fluid_layers_reduce_all: + +reduce_all +------------------------------- + +.. py:function:: paddle.fluid.layers.reduce_all(input, dim=None, keep_dim=False, name=None) + + + + +该OP是对指定维度上的Tensor元素进行与逻辑(&)计算,并输出相应的计算结果。 + +参数: + - **input** (Variable)— 输入变量为多维Tensor或LoDTensor,数据类型需要为bool类型。 + - **dim** (list | int,可选)— 与逻辑运算的维度。如果为None,则计算所有元素的与逻辑并返回包含单个元素的Tensor变量,否则必须在 :math:`[−rank(input),rank(input))` 范围内。如果 :math:`dim [i] <0` ,则维度将减小为 :math:`rank+dim[i]` 。默认值为None。 + - **keep_dim** (bool)— 是否在输出Tensor中保留减小的维度。如 keep_dim 为true,否则结果张量的维度将比输入张量小,默认值为False。 + - **name** (str, 可选)— 这一层的名称。如果设置为None,则将自动命名这一层。默认值为None。 + +返回:在指定dim上进行与逻辑计算的Tensor,数据类型为bool类型。 + +返回类型:Variable,数据类型为bool类型。 + +**代码示例** + +.. code-block:: python + + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + import numpy as np + + # x是一个布尔型Tensor,元素如下: + # [[True, False] + # [True, True]] + x = layers.assign(np.array([[1, 0], [1, 1]], dtype='int32')) + x = layers.cast(x, 'bool') + + out = layers.reduce_all(x) # False + out = layers.reduce_all(x, dim=0) # [True, False] + out = layers.reduce_all(x, dim=-1) # [False, True] + # keep_dim=False, x.shape=(2,2), out.shape=(2,) + + out = layers.reduce_all(x, dim=1, keep_dim=True) # [[False], [True]] + # keep_dim=True, x.shape=(2,2), out.shape=(2,1) + diff --git a/doc/paddle/api/paddle/fluid/layers/reduce_any_cn.rst b/doc/paddle/api/paddle/fluid/layers/reduce_any_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7150bd43ecfc7a0610f41ab25e196954292ae26d --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/reduce_any_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_fluid_layers_reduce_any: + +reduce_any +------------------------------- + +.. py:function:: paddle.fluid.layers.reduce_any(input, dim=None, keep_dim=False, name=None) + + + + +该OP是对指定维度上的Tensor元素进行或逻辑(|)计算,并输出相应的计算结果。 + +参数: + - **input** (Variable)— 输入变量为多维Tensor或LoDTensor,数据类型需要为bool类型。 + - **dim** (list | int,可选)— 与逻辑运算的维度。如果为None,则计算所有元素的与逻辑并返回包含单个元素的Tensoe变量,否则必须在 :math:`[−rank(input),rank(input))` 范围内。如果 :math:`dim [i] <0` ,则维度将减小为 :math:`rank+dim[i]` 。默认值为None。 + - **keep_dim** (bool)— 是否在输出Tensor中保留减小的维度。如 keep_dim 为true,否则结果张量的维度将比输入张量小,默认值为False。 + - **name** (str,可选)— 这一层的名称(可选)。如果设置为None,则将自动命名这一层。默认值为None。 + +返回:在指定dim上进行或逻辑计算的Tensor,数据类型为bool类型。 + +返回类型:Variable,数据类型为bool类型。 + +**代码示例** + +.. code-block:: python + + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + import numpy as np + + # x是一个布尔型Tensor,元素如下: + # [[True, False] + # [False, False]] + x = layers.assign(np.array([[1, 0], [0, 0]], dtype='int32')) + x = layers.cast(x, 'bool') + + out = layers.reduce_any(x) # True + out = layers.reduce_any(x, dim=0) # [True, False] + out = layers.reduce_any(x, dim=-1) # [True, False] + # keep_dim=False, x.shape=(2,2), out.shape=(2,) + + out = layers.reduce_any(x, dim=1, + keep_dim=True) # [[True], [False]] + # keep_dim=True, x.shape=(2,2), out.shape=(2,1) + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/reduce_max_cn.rst b/doc/paddle/api/paddle/fluid/layers/reduce_max_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..dad1f713026893ef75107710ad1b1f7f172f86cf --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/reduce_max_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_fluid_layers_reduce_max: + +reduce_max +------------------------------- + +.. py:function:: paddle.fluid.layers.reduce_max(input, dim=None, keep_dim=False, name=None) + + + + +该OP是对指定维度上的Tensor元素求最大值运算,并输出相应的计算结果。 + +参数: + - **input** (Variable)- 输入变量为多维Tensor或LoDTensor,支持数据类型为float32,float64,int32,int64。 + - **dim** (list | int ,可选)- 求最大值运算的维度。如果为None,则计算所有元素的最大值并返回包含单个元素的Tensor变量,否则必须在 :math:`[−rank(input),rank(input)]` 范围内。如果 :math:`dim [i] <0` ,则维度将变为 :math:`rank+dim[i]` ,默认值为None。 + - **keep_dim** (bool)- 是否在输出Tensor中保留减小的维度。如 keep_dim 为true,否则结果张量的维度将比输入张量小,默认值为False。 + - **name** (str , 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: 在指定dim上进行求最大值运算的Tensor,数据类型和输入数据类型一致。 + +返回类型: 变量(Variable) + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + # x是一个Tensor,元素如下: + # [[0.2, 0.3, 0.5, 0.9] + # [0.1, 0.2, 0.6, 0.7]] + # 接下来的示例中,我们在每处函数调用后面都标注出了它的结果张量。 + x = fluid.data(name='x', shape=[2, 4], dtype='float32') + fluid.layers.reduce_max(x) # [0.9] + fluid.layers.reduce_max(x, dim=0) # [0.2, 0.3, 0.6, 0.9] + fluid.layers.reduce_max(x, dim=-1) # [0.9, 0.7] + fluid.layers.reduce_max(x, dim=1, keep_dim=True) # [[0.9], [0.7]] + + # y是一个shape为[2, 2, 2]的Tensor,元素如下: + # [[[1.0, 2.0], [3.0, 4.0]], + # [[5.0, 6.0], [7.0, 8.0]]] + # 接下来的示例中,我们在每处函数调用后面都标注出了它的结果张量。 + y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32') + fluid.layers.reduce_max(y, dim=[1, 2]) # [4.0, 8.0] + fluid.layers.reduce_max(y, dim=[0, 1]) # [7.0, 8.0] + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/reduce_mean_cn.rst b/doc/paddle/api/paddle/fluid/layers/reduce_mean_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7de43cd7dd5050d78390b15d7e1fe4af27f7ad54 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/reduce_mean_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_fluid_layers_reduce_mean: + +reduce_mean +------------------------------- + +.. py:function:: paddle.fluid.layers.reduce_mean(input, dim=None, keep_dim=False, name=None) + + + + +该OP是对指定维度上的Tensor元素进行平均值算,并输出相应的计算结果。 + +参数: + - **input** (Variable)- 输入变量为多维Tensor或LoDTensor,支持数据类型为float32,float64,int32,int64。 + - **dim** (list | int ,可选)— 求平均值运算的维度。如果为None,则计算所有元素的平均值并返回包含单个元素的Tensor变量,否则必须在 :math:`[−rank(input),rank(input)]` 范围内。如果 :math:`dim [i] <0` ,则维度将变为 :math:`rank+dim[i]` ,默认值为None。 + - **keep_dim** (bool)- 是否在输出Tensor中保留减小的维度。如 keep_dim 为true,否则结果张量的维度将比输入张量小,默认值为False。 + - **name** (str , 可选)— 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: 在指定dim上进行平均值运算的Tensor,数据类型和输入数据类型一致。 + +返回类型: 变量(Variable) + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + # x是一个Tensor,元素如下: + # [[0.2, 0.3, 0.5, 0.9] + # [0.1, 0.2, 0.6, 0.7]] + # 接下来的示例中,我们在每处函数调用后面都标注出了它的结果张量。 + x = fluid.data(name='x', shape=[2, 4], dtype='float32') + fluid.layers.reduce_mean(x) # [0.4375] + fluid.layers.reduce_mean(x, dim=0) # [0.15, 0.25, 0.55, 0.8] + fluid.layers.reduce_mean(x, dim=-1) # [0.475, 0.4] + fluid.layers.reduce_mean(x, dim=1, keep_dim=True) # [[0.475], [0.4]] + + # y是一个shape为[2, 2, 2]的Tensor元素如下: + # [[[1.0, 2.0], [3.0, 4.0]], + # [[5.0, 6.0], [7.0, 8.0]]] + # 接下来的示例中,我们在每处函数调用后面都标注出了它的结果张量。。 + y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32') + fluid.layers.reduce_mean(y, dim=[1, 2]) # [2.5, 6.5] + fluid.layers.reduce_mean(y, dim=[0, 1]) # [4.0, 5.0] + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/reduce_min_cn.rst b/doc/paddle/api/paddle/fluid/layers/reduce_min_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..627cb0213e64eebd6093ac025f655d9ee0be1602 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/reduce_min_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_fluid_layers_reduce_min: + +reduce_min +------------------------------- + +.. py:function:: paddle.fluid.layers.reduce_min(input, dim=None, keep_dim=False, name=None) + + + + +该OP是对指定维度上的Tensor元素求最小值运算,并输出相应的计算结果。 + +参数: + - **input** (Variable)- 输入变量为多维Tensor或LoDTensor,支持数据类型为float32,float64,int32,int64。 + - **dim** (list | int ,可选)- 求最小值运算的维度。如果为None,则计算所有元素的最小值并返回包含单个元素的Tensor变量,否则必须在 :math:`[−rank(input),rank(input)]` 范围内。如果 :math:`dim [i] <0` ,则维度将变为 :math:`rank+dim[i]` ,默认值为None。 + - **keep_dim** (bool)- 是否在输出Tensor中保留减小的维度。如 keep_dim 为true,否则结果张量的维度将比输入张量小,默认值为False。 + - **name** (str , 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: 在指定dim上进行求最小值运算的Tensor,数据类型和输入数据类型一致。 + +返回类型: 变量(Variable) + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + # x是一个Tensor,元素如下: + # [[0.2, 0.3, 0.5, 0.9] + # [0.1, 0.2, 0.6, 0.7]] + # 接下来的示例中,我们在每处函数调用后面都标注出了它的结果张量。 + x = fluid.data(name='x', shape=[2, 4], dtype='float32') + fluid.layers.reduce_min(x) # [0.1] + fluid.layers.reduce_min(x, dim=0) # [0.1, 0.2, 0.5, 0.7] + fluid.layers.reduce_min(x, dim=-1) # [0.2, 0.1] + fluid.layers.reduce_min(x, dim=1, keep_dim=True) # [[0.2], [0.1]] + + # y是一个shape为[2, 2, 2]的Tensor元素如下: + # [[[1.0, 2.0], [3.0, 4.0]], + # [[5.0, 6.0], [7.0, 8.0]]] + # 接下来的示例中,我们在每处函数调用后面都标注出了它的结果张量。 + y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32') + fluid.layers.reduce_min(y, dim=[1, 2]) # [1.0, 5.0] + fluid.layers.reduce_min(y, dim=[0, 1]) # [1.0, 2.0] + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/reduce_prod_cn.rst b/doc/paddle/api/paddle/fluid/layers/reduce_prod_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6ec6b0edec0d409e776bc3b3a0e5f07e65d32b9c --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/reduce_prod_cn.rst @@ -0,0 +1,55 @@ +.. _cn_api_fluid_layers_reduce_prod: + +reduce_prod +------------------------------- + +.. py:function:: paddle.fluid.layers.reduce_prod(input, dim=None, keep_dim=False, name=None) + + + + +该OP是对指定维度上的Tensor元素进行求乘积运算,并输出相应的计算结果。 + +参数: + - **input** (Variable)- 输入变量为多维Tensor或LoDTensor,支持数据类型为float32,float64,int32,int64。 + - **dim** (int|list|tuple ,可选)- 求乘积运算的维度。如果为None,则计算所有元素的乘积并返回包含单个元素的Tensor变量,否则必须在 :math:`[−rank(input),rank(input)]` 范围内。如果 :math:`dim [i] <0` ,则维度将变为 :math:`rank+dim[i]` ,默认值为None。 + - **keep_dim** (bool)- 是否在输出Tensor中保留减小的维度。如 keep_dim 为true,否则结果张量的维度将比输入张量小,默认值为False。 + - **name** (str , 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: 在指定dim上进行求乘积运算的Tensor,数据类型和输入数据类型一致。 + +返回类型: 变量(Variable) + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + # x是一个Tensor,元素如下: + # [[0.2, 0.3, 0.5, 0.9] + # [0.1, 0.2, 0.6, 0.7]] + # 接下来的示例中,我们在每处函数调用后面都标注出了它的结果张量。 + x = fluid.data(name='x', shape=[2, 4], dtype='float32') + fluid.layers.reduce_prod(x) # [0.0002268] + fluid.layers.reduce_prod(x, dim=0) # [0.02, 0.06, 0.3, 0.63] + fluid.layers.reduce_prod(x, dim=-1) # [0.027, 0.0084] + fluid.layers.reduce_prod(x, dim=1, + keep_dim=True) # [[0.027], [0.0084]] + + # y 是一个shape为[2, 2, 2]的Tensor元素如下: + # [[[1.0, 2.0], [3.0, 4.0]], + # [[5.0, 6.0], [7.0, 8.0]]] + # 接下来的示例中,我们在每处函数调用后面都标注出了它的结果张量。 + y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32') + fluid.layers.reduce_prod(y, dim=[1, 2]) # [24.0, 1680.0] + fluid.layers.reduce_prod(y, dim=[0, 1]) # [105.0, 384.0] + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/reduce_sum_cn.rst b/doc/paddle/api/paddle/fluid/layers/reduce_sum_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..316b0cc7daab6c4ec173483e1faa59832092e956 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/reduce_sum_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_fluid_layers_reduce_sum: + +reduce_sum +------------------------------- + +.. py:function:: paddle.fluid.layers.reduce_sum(input, dim=None, keep_dim=False, name=None) + + + + +该OP是对指定维度上的Tensor元素进行求和运算,并输出相应的计算结果。 + +参数: + - **input** (Variable)- 输入变量为多维Tensor或LoDTensor,支持数据类型为float32,float64,int32,int64。 + - **dim** (list | int ,可选)- 求和运算的维度。如果为None,则计算所有元素的和并返回包含单个元素的Tensor变量,否则必须在 :math:`[−rank(input),rank(input)]` 范围内。如果 :math:`dim [i] <0` ,则维度将变为 :math:`rank+dim[i]` ,默认值为None。 + - **keep_dim** (bool)- 是否在输出Tensor中保留减小的维度。如 keep_dim 为true,否则结果张量的维度将比输入张量小,默认值为False。 + - **name** (str , 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: 在指定dim上进行求和运算的Tensor,数据类型和输入数据类型一致。 + +返回类型: 变量(Variable) + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + # x是一个Tensor,元素如下: + # [[0.2, 0.3, 0.5, 0.9] + # [0.1, 0.2, 0.6, 0.7]] + # 接下来的示例中,我们在每处函数调用后面都标注出了它的结果张量。 + x = fluid.data(name='x', shape=[2, 4], dtype='float32') + fluid.layers.reduce_sum(x) # [3.5] + fluid.layers.reduce_sum(x, dim=0) # [0.3, 0.5, 1.1, 1.6] + fluid.layers.reduce_sum(x, dim=-1) # [1.9, 1.6] + fluid.layers.reduce_sum(x, dim=1, keep_dim=True) # [[1.9], [1.6]] + + # y 是一个shape为[2, 2, 2]的Tensor元素如下: + # [[[1, 2], [3, 4]], + # [[5, 6], [7, 8]]] + # 接下来的示例中,我们在每处函数调用后面都标注出了它的结果张量。 + y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32') + fluid.layers.reduce_sum(y, dim=[1, 2]) # [10, 26] + fluid.layers.reduce_sum(y, dim=[0, 1]) # [16, 20] + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/relu6_cn.rst b/doc/paddle/api/paddle/fluid/layers/relu6_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..511d6e6efe39a2baced442ef8bf53cd9b88fd9eb --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/relu6_cn.rst @@ -0,0 +1,39 @@ +.. _cn_api_fluid_layers_relu6: + +relu6 +------------------------------- + +.. py:function:: paddle.fluid.layers.relu6(x, threshold=6.0, name=None) + + + + +relu6激活函数 + +.. math:: out=min(max(0, x), threshold) + + +参数: + - **x** (Variable) - 输入的多维 ``Tensor`` ,数据类型为:float32、float64。 + - **threshold** (float) - relu6的阈值。默认值为6.0 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: 与 ``x`` 维度相同、数据类型相同的 ``Tensor``。 + +返回类型: Variable + + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + in1 = np.array([[-1,0],[2.5,7.8]]) + with fluid.dygraph.guard(): + x1 = fluid.dygraph.to_variable(in1) + out1 = fluid.layers.relu6(x=x1, threshold=6.0) + print(out1.numpy()) + # [[0. 0. ] + # [2.5 6. ]] diff --git a/doc/paddle/api/paddle/fluid/layers/relu_cn.rst b/doc/paddle/api/paddle/fluid/layers/relu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6df2e05bea36cf96f6990269d6bb77952f15549e --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/relu_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_fluid_layers_relu: + +relu +------------------------------- + +.. py:function:: paddle.fluid.layers.relu(x, name=None) + + + + +ReLU(Rectified Linear Unit)激活函数 + +.. math:: Out=max(0,x) + + +参数: + - **x** (Variable) - 输入的多维 ``Tensor`` ,数据类型为:float32、float64。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: 与 ``x`` 维度相同、数据类型相同的 ``Tensor`` 。 + +返回类型: Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + in1 = np.array([[-1,0],[1,2.6]]) + with fluid.dygraph.guard(): + x1 = fluid.dygraph.to_variable(in1) + out1 = fluid.layers.relu(x1) + print(out1.numpy()) + # [[0. 0. ] + # [1. 2.6]] diff --git a/doc/paddle/api/paddle/fluid/layers/reorder_lod_tensor_by_rank_cn.rst b/doc/paddle/api/paddle/fluid/layers/reorder_lod_tensor_by_rank_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7e67ee31003a74f34f64e9403336732388ca0ed2 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/reorder_lod_tensor_by_rank_cn.rst @@ -0,0 +1,67 @@ +.. _cn_api_fluid_layers_reorder_lod_tensor_by_rank: + +reorder_lod_tensor_by_rank +------------------------------- + +.. py:function:: paddle.fluid.layers.reorder_lod_tensor_by_rank(x, rank_table) + + + + + +该OP根据 ``rank_table`` 中提供的 ``LoDRankTable`` 类型的顺序信息来实现对 ``X`` 的重新排列。 +接口参数 ``X`` 是由多个序列(Sequence)组成的的一个批序列(Batch of Sequences), ``rank_table`` 存储着对batch中序列重新排列的 ``LoDRankTable`` 类型的顺序信息。 + +例如: + +假设在 ``rank_table`` 中存储的序列索引为 :math:`[3,0,2,1]` , ``X`` 将会被这样被重新排列: +``X`` 中的第四个序列(即索引为3的序列,后面以此类推)会变成排列后的batch中的第一个,紧接着就是原来batch中的第一个元素,第三个元素,和第二个元素。 +简言之,若有原batch::math:`X = [Seq0, Seq1, Seq2, Seq3]` 且 RankTable 中的索引为 :math:`[3,0,2,1]` ,那么输出即为 :math:`Out = [Seq3, Seq0, Seq2, Seq1]` ,它携带着新的LoD信息。 +如果 ``X`` 的LoD信息是空的,这表明 ``X`` 不是序列型数据。这和由多个定长为1的序列组成的batch是相同的情况。此时,该函数将对 ``X`` 中数据 在第一轴(axis)上按 ``rank_table`` 里的规则加以排列。 +例如,现有 :math:`X = [Slice0, Slice1, Slice2, Slice3]` ,并且它LoD信息为空,在 ``rank_table`` 索引为 :math:`[3, 0, 2, 1]` 。则 :math:`Out = [Slice3, Slice0, Slice2, Slice1]` ,并且不在其中追加LoD信息。 + +注意:该OP对 ``X`` 进行的排序所依据的 ``LoDRankTable`` 不一定是在 ``X`` 的基础上得出来的。它可以由其他不同的序列得出,并由该OP依据这个 ``LoDRankTable`` 来对 ``X`` 排序。 + +参数: + - **x** (Variable) - 待根据提供的 ``rank_table`` 进行排序的LoDTensor. + - **rank_table** (Variable) - 提供对 ``x`` 重新排列的 ``LoDRankTable`` 类型的顺序信息. + + +返回: 重新排列后的LoDTensor + +返回类型: Variable + +**代码示例**: + +.. code-block:: python + + + import numpy as np + import paddle.fluid as fluid + + rank_data = fluid.layers.data(name='rank_data', shape=[5], dtype='float32', lod_level=2) + table = fluid.layers.control_flow.lod_rank_table(rank_data, level=1) + + data = fluid.layers.data(name='data', shape=[9], lod_level=2) + new_data = fluid.layers.reorder_lod_tensor_by_rank( + x=data, rank_table=table) + + + place=fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + rank_tensor = fluid.create_lod_tensor(np.random.random([14,5]).astype("float32"), [[4,1], [3, 2, 2, 3, 4]], place) + + data_ndarray = np.random.random([27, 9]).astype("float32") + data_lod = [[1, 2, 2, 4, 4], [2, 2, 4, 2, 2, 2, 1, 1, 2, 2, 4, 2, 1]] + data_tensor = fluid.create_lod_tensor(data_ndarray, data_lod, place) + + out = exe.run(fluid.default_main_program(),feed={'data':data_tensor, 'rank_data':rank_tensor}, fetch_list=[new_data], return_numpy=False) + print(out[0]) + # lod: {{0, 4, 5, 9, 11, 13}{0, 2, 6, 8, 9, 11, 13, 14, 15, 17, 19, 23, 25, 27}} + #shape: [27, 9] + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/reshape_cn.rst b/doc/paddle/api/paddle/fluid/layers/reshape_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e2a892314e8361cc76f568014bc32cb0fbb8124c --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/reshape_cn.rst @@ -0,0 +1,78 @@ +.. _cn_api_fluid_layers_reshape: + +reshape +------------------------------- + +.. py:function:: paddle.fluid.layers.reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None) + + +该OP在保持输入 ``x`` 数据不变的情况下,改变 ``x`` 的形状。 + +目标形状可由 ``shape`` 或 ``actual_shape`` 给出。当两个属性同时被指定时,``actual_shape`` 的优先级高于 ``shape`` ,但此时 ``shape`` 只能是整数列表或元组,且在编译时仍然应该正确地设置 ``shape`` 以保证形状推断。 + +在指定目标shape时存在一些技巧: + +.. code-block:: text + + 1. -1 表示这个维度的值是从x的元素总数和剩余维度推断出来的。因此,有且只有一个维度可以被设置为-1。 + 2. 0 表示实际的维数是从x的对应维数中复制出来的,因此shape中0的索引值不能超过x的维度。 + + +这里有一些例子来解释它们: + +.. code-block:: text + + 1. 给定一个形状为[2,4,6]的三维张量x,目标形状为[6,8],则将x变换为形状为[6,8]的2-D张量,且x的数据保持不变。 + 2. 给定一个形状为[2,4,6]的三维张量x,目标形状为[2,3,-1,2],则将x变换为形状为[2,3,4,2]的4-D张量,且x的数据保持不变。在这种情况下,目标形状的一个维度被设置为-1,这个维度的值是从x的元素总数和剩余维度推断出来的。 + 3. 给定一个形状为[2,4,6]的三维张量x,目标形状为[-1,0,3,2],则将x变换为形状为[2,4,3,2]的4-D张量,且x的数据保持不变。在这种情况下,0对应位置的维度值将从x的对应维数中复制,-1对应位置的维度值由x的元素总数和剩余维度推断出来。 + +.. warning:: +参数 ``actual_shape`` 之后将被舍弃,只用参数 ``shape`` 来表示目标形状。 + +参数: + - **x** (Tensor)- N-D ``Tensor``,数据类型为 ``float32``,``float64``,``int32``,或 ``int64``。 + - **shape** (list|tuple|Tensor)- 数据类型是 ``int32`` 。定义目标形状。目标形状最多只能有一个维度为-1。如果 ``shape`` 的类型是 list 或 tuple, 它的元素可以是整数或者形状为[1]的 ``Tensor``。如果 ``shape`` 的类型是 ``Tensor``,则是1-D的 ``Tensor``。 + - **actual_shape** (Tensor,可选)- 1-D ``Tensor``,默认值:`None`。如果 ``actual_shape`` 被提供,``actual_shape`` 具有比 ``shape`` 更高的优先级,此时 ``shape`` 只能是整数列表或元组。更新提示:``actual_shape`` 在未来的版本中将被舍弃,并用 ``shape`` 代替。 + - **act** (str,可选)- 对形状改变后的输入变量做非线性激活操作,激活函数类型可以参考 :ref:`api_guide_activations` 。默认值: ``None``。 + - **inplace** (bool,可选)- 如果 ``inplace`` 为 ``True``,则 ``layers.reshape`` 的输入和输出是同一个变量,否则 ``layers.reshape`` 的输入和输出是不同的变量。默认值:``False``。请注意,如果 ``x`` 是多个OP的输入,则 ``inplace`` 必须为False。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值: ``None``。 + +返回: +::::::::: +``Tensor``,改变形状后的 ``Tensor``,数据类型与 ``x`` 相同。如果 ``inplace`` 为 ``False``,则返回一个新的变量,否则将改变输入变量 ``x`` 自身。如果 ``act`` 为 ``None``,则直接返回形状改变后的变量,否则返回经过激活函数后的变量。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + # example 1: + # attr shape is a list which doesn't contain Tensors. + data_1 = fluid.data( + name='data_1', shape=[2, 4, 6], dtype='float32') + reshaped_1 = fluid.layers.reshape( + x=data_1, shape=[-1, 0, 3, 2], inplace=True) + # the shape of reshaped_1 is [2,4,3,2]. + + # example 2: + # attr shape is a list which contains Tensors. + data_2 = fluid.layers.fill_constant([2,25], "int32", 3) + dim = fluid.layers.fill_constant([1], "int32", 5) + reshaped_2 = fluid.layers.reshape(data_2, shape=[dim, 10]) + # the shape of reshaped_2 is [5,10]. + + # example 3: + data_3 = fluid.data( + name="data_3", shape=[2,4,6], dtype='float32') + reshaped_3 = fluid.layers.reshape(x=data_3, shape=[6,8]) + # the shape of reshaped_3 is [6,8]. + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/resize_bilinear_cn.rst b/doc/paddle/api/paddle/fluid/layers/resize_bilinear_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9b0eb12851a4abebad7fa149bccb402ce6c86806 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/resize_bilinear_cn.rst @@ -0,0 +1,98 @@ +.. _cn_api_fluid_layers_resize_bilinear: + +resize_bilinear +------------------------------- + +.. py:function:: paddle.fluid.layers.resize_bilinear(input, out_shape=None, scale=None, name=None, actual_shape=None, align_corners=True, align_mode=1, data_format='NCHW') + + + + +**注意:** 参数 ``actual_shape`` 将被弃用,请使用 ``out_shape`` 替代。 + +该OP应用双向性插值法调整输入图片的大小,输出形状按优先级由actual_shape、out_shape和scale指定。 + +双线性插值是对线性插值的扩展,即二维变量方向上(如h方向和w方向)插值。关键思想是先在一个方向上执行线性插值,然后再在另一个方向上执行线性插值。 + +详情请参阅 `维基百科 `_ 。 + +align_corners和align_mode是可选参数,插值的计算方法可以由它们选择。 + + +:: + + Example: + + For scale: + + if align_corners = True && out_size > 1 : + + scale_factor = (in_size-1.0)/(out_size-1.0) + + else: + + scale_factor = float(in_size/out_size) + + Bilinear interpolation: + + if align_corners = False , align_mode = 0 + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = (H_{in}+0.5) * scale_{factor} - 0.5 + W_out = (W_{in}+0.5) * scale_{factor} - 0.5 + + + else: + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = H_{in} * scale_{factor} + W_out = W_{in} * scale_{factor} + + + +参数: + - **input** (Variable) - 4-D Tensor,数据类型为float32、float64或uint8,其数据格式由参数 ``data_format`` 指定。 + - **out_shape** (list|tuple|Variable|None) - 双线性层的输出形状,维度为[out_h, out_w]的二维Tensor。如果 :code:`out_shape` 是列表,每一个元素可以是整数或者维度为[1]的变量。如果 :code:`out_shape` 是变量,则其维度大小为1。默认值为None。 + - **scale** (float|Variable|None) - 用于输入高度或宽度的乘数因子。out_shape和scale至少要设置一个。out_shape的优先级高于scale。默认值为None。 + - **name** (str|None) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。默认值为None。 + - **actual_shape** (Variable) - 可选输入,用于动态指定输出形状。如果指定actual_shape,图像将根据给定的形状调整大小,而不是根据指定形状的 :code:`out_shape` 和 :code:`scale` 进行调整。也就是说, :code:`actual_shape` 具有最高的优先级。注意:如果希望动态指定输出形状,建议使用 :code:`out_shape` , 因为 :code:`actual_shape` 未来将被弃用。在使用actual_shape指定输出形状时,仍然需要设置out_shape和scale之一,否则在图形构建阶段会出现错误。默认值为None。 + - **align_corners** (bool)- 一个可选的bool型参数,如果为True,则将输入和输出张量的4个角落像素的中心对齐,并保留角点像素的值。 默认值为True + - **align_mode** (int)- 双线性插值的可选项。 可以是'0'代表src_idx = scale *(dst_indx + 0.5)-0.5;如果为'1' ,代表src_idx = scale * dst_index。 + - **data_format** (str,可选)- 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + +返回:4-D Tensor,形状为 (num_batches, channels, out_h, out_w) 或 (num_batches, out_h, out_w, channels)。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + input = fluid.layers.data(name="input", shape=[3,6,9], dtype="float32") + # input.shape = [-1, 3, 6, 9], where -1 indicates batch size, and it will get the exact value in runtime. + + out0 = fluid.layers.resize_bilinear(input, out_shape=[12, 12]) + # out0.shape = [-1, 3, 12, 12], it means out0.shape[0] = input.shape[0] in runtime. + + # out_shape is a list in which each element is a integer or a tensor Variable + dim1 = fluid.layers.data(name="dim1", shape=[1], dtype="int32", append_batch_size=False) + out1 = fluid.layers.resize_bilinear(input, out_shape=[12, dim1]) + # out1.shape = [-1, 3, 12, -1] + + # out_shape is a 1-D tensor Variable + shape_tensor = fluid.layers.data(name="shape_tensor", shape=[2], dtype="int32", append_batch_size=False) + out2 = fluid.layers.resize_bilinear(input, out_shape=shape_tensor) + # out2.shape = [-1, 3, -1, -1] + + # when use actual_shape + actual_shape_tensor = fluid.layers.data(name="actual_shape_tensor", shape=[2], dtype="int32", append_batch_size=False) + out3 = fluid.layers.resize_bilinear(input, out_shape=[4, 4], actual_shape=actual_shape_tensor) + # out3.shape = [-1, 3, 4, 4] + + # scale is a Variable + scale_tensor = fluid.layers.data(name="scale", shape=[1], dtype="float32", append_batch_size=False) + out4 = fluid.layers.resize_bilinear(input, scale=scale_tensor) + # out4.shape = [-1, 3, -1, -1] diff --git a/doc/paddle/api/paddle/fluid/layers/resize_nearest_cn.rst b/doc/paddle/api/paddle/fluid/layers/resize_nearest_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3b5c6cbfb3790d81b781ccbc8aca760e3d2edd7d --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/resize_nearest_cn.rst @@ -0,0 +1,108 @@ +.. _cn_api_fluid_layers_resize_nearest: + +resize_nearest +------------------------------- + +.. py:function:: paddle.fluid.layers.resize_nearest(input, out_shape=None, scale=None, name=None, actual_shape=None, align_corners=True, data_format='NCHW') + + + + +该OP对输入图片进行大小调整,在高度方向宽度方向进行最邻近插值(nearest neighbor interpolation)操作。 + +输出形状按优先级顺序依据 ``actual_shape`` , ``out_shape`` 和 ``scale`` 而定。 + +**注意:** 参数 ``actual_shape`` 将被弃用,请使用 ``out_shape`` 替代。 + +:: + + Example: + + For scale: + + if align_corners = True && out_size > 1 : + + scale_factor = (in_size-1.0)/(out_size-1.0) + + else: + + scale_factor = float(in_size/out_size) + + + Nearest neighbor interpolation: + + if align_corners = False + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = \left \lfloor {H_{in} * scale_{}factor}} \right \rfloor + W_out = \left \lfloor {W_{in} * scale_{}factor}} \right \rfloor + + else: + align_corners = True + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = round(H_{in} * scale_{factor}) + W_out = round(W_{in} * scale_{factor}) + +最邻近插值的详细介绍请参照: `Wiki Nearest-neighbor interpolation `_ + + +参数: + - **input** (Variable) - 4-D Tensor,数据类型为float32、float64或uint8,其数据格式由参数 ``data_format`` 指定。 + - **out_shape** (list|tuple|Variable|None) - 双线性插值法调整后的输出,维度为[out_h, out_w]的2-D Tensor。如果 :code:`out_shape` 是列表,每一个元素可以是整数或者shape为[1]的变量。如果 :code:`out_shape` 是变量,则其维度大小为1。默认值为None。 + - **scale** (float|Variable|None) – 输入高宽的乘数因子。 ``out_shape`` 和 ``scale`` 二者至少设置其一。 ``out_shape`` 具有比 ``scale`` 更高的优先级。 默认值为None。 + - **name** (str|None) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。默认值为None。 + - **actual_shape** (Variable) - 可选输入,用于动态指定输出形状。如果指定actual_shape,图像将根据给定的形状调整大小,而不是根据指定形状的 :code:`out_shape` 和 :code:`scale` 进行调整。也就是说, :code:`actual_shape` 具有最高的优先级。注意:如果希望动态指定输出形状,建议使用 :code:`out_shape` , 因为 :code:`actual_shape` 未来将被弃用。在使用actual_shape指定输出形状时,仍然需要设置out_shape和scale之一,否则在图形构建阶段会出现错误。默认值为None。 + - **align_corners** (bool)- 一个可选的bool型参数,如果为True,则将输入和输出张量的4个角落像素的中心对齐,并保留角点像素的值。 默认值为True。 + - **data_format** (str,可选)- 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + +返回:4-D Tensor,形状为 (num_batches, channels, out_h, out_w) 或 (num_batches, out_h, out_w, channels)。 + +返回类型:Variable + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + input = fluid.layers.data(name="input", shape=[3,6,9], dtype="float32") + # input.shape = [-1, 3, 6, 9], where -1 indicates batch size, and it will get the exact value in runtime. + + out0 = fluid.layers.resize_nearest(input, out_shape=[12, 12]) + # out0.shape = [-1, 3, 12, 12], it means out0.shape[0] = input.shape[0] in runtime. + + # out_shape is a list in which each element is a integer or a tensor Variable + dim1 = fluid.layers.data(name="dim1", shape=[1], dtype="int32", append_batch_size=False) + out1 = fluid.layers.resize_nearest(input, out_shape=[12, dim1]) + # out1.shape = [-1, 3, 12, -1] + + # out_shape is a 1-D tensor Variable + shape_tensor = fluid.layers.data(name="resize_shape", shape=[2], dtype="int32", append_batch_size=False) + out2 = fluid.layers.resize_nearest(input, out_shape=shape_tensor) + # out2.shape = [-1, 3, -1, -1] + + # when use actual_shape + actual_shape_tensor = fluid.layers.data(name="actual_shape_tensor", shape=[2], dtype="int32", append_batch_size=False) + out3 = fluid.layers.resize_nearest(input, out_shape=[4, 4], actual_shape=actual_shape_tensor) + # out3.shape = [-1, 3, 4, 4] + + # scale is a Variable + scale_tensor = fluid.layers.data(name="scale", shape=[1], dtype="float32", append_batch_size=False) + out4 = fluid.layers.resize_nearest(input, scale=scale_tensor) + # out4.shape = [-1, 3, -1, -1] + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/resize_trilinear_cn.rst b/doc/paddle/api/paddle/fluid/layers/resize_trilinear_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..785a2b7478773f234264a733f448d82026960b18 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/resize_trilinear_cn.rst @@ -0,0 +1,93 @@ +.. _cn_api_fluid_layers_resize_trilinear: + +resize_trilinear +------------------------------- + +.. py:function:: paddle.fluid.layers.resize_trilinear(input, out_shape=None, scale=None, name=None, actual_shape=None, align_corners=True, align_mode=1, data_format='NCDHW') + + + + +**注意:** 参数 ``actual_shape`` 将被弃用,请使用 ``out_shape`` 替代。 + +该层对输入进行放缩,基于给定的由 ``actual_shape`` , ``out_shape`` , ``scale`` 确定的输出shape,进行三线插值。三线插值是包含三个参数的线性插值方程(D方向,H方向, W方向),在一个3D格子上进行三个方向的线性插值。更多细节,请参考维基百科:https://en.wikipedia.org/wiki/Trilinear_interpolation +Align_corners和align_mode都是可选参数,可以用来设置插值的计算方法,如下: + +:: + + Example: + + For scale: + + if align_corners = True && out_size > 1 : + + scale_factor = (in_size-1.0)/(out_size-1.0) + + else: + + scale_factor = float(in_size/out_size) + + Bilinear interpolation: + + if: + align_corners = False , align_mode = 0 + + input : (N,C,D_in,H_in,W_in) + output: (N,C,D_out,H_out,W_out) where: + + D_out = (D_{in}+0.5) * scale_{factor} - 0.5 + H_out = (H_{in}+0.5) * scale_{factor} - 0.5 + W_out = (W_{in}+0.5) * scale_{factor} - 0.5 + + + else: + + input : (N,C,D_in,H_in,W_in) + output: (N,C,D_out,H_out,W_out) where: + + D_out = D_{in} * scale_{factor} + H_out = H_{in} * scale_{factor} + W_out = W_{in} * scale_{factor} + +参数: + - **input** (Variable) – 5-D Tensor,数据类型为float32、float64或uint8,其数据格式由参数 ``data_format`` 指定。 + - **out_shape** (list|tuple|Variable|None) – 调整最近邻层的输出形状,形式为(out_h, out_w)。默认值:None。如果 :code:`out_shape` 是列表,每一个元素可以是整数或者shape为[1]的变量。如果 :code:`out_shape` 是变量,则其维度大小为1。 + - **scale** (float|None) – 输入高、宽的乘法器。 ``out_shape`` 和 ``scale`` 二者至少设置其一。 ``out_shape`` 具有比 ``scale`` 更高的优先级。 默认: None + - **name** (str|None) – 输出变量的命名 + - **actual_shape** (Variable) – 可选输入, 动态设置输出张量的形状。 如果提供该值, 图片放缩会依据此形状进行, 而非依据 ``out_shape`` 和 ``scale`` 。 即为, ``actual_shape`` 具有最高的优先级。 如果想动态指明输出形状,推荐使用 ``out_shape`` ,因为 ``actual_shape`` 未来将被弃用。 当使用 ``actual_shape`` 来指明输出形状, ``out_shape`` 和 ``scale`` 也应该进行设置, 否则在图形生成阶段将会报错。默认: None + - **align_corners** (bool)- 一个可选的bool型参数,如果为True,则将输入和输出张量的4个角落像素的中心对齐,并保留角点像素的值。 默认值:True + - **align_mode** (bool) - (int,默认为'1'),双线性插值选项,src_idx = scale*(dst_index+0.5)-0.5时取'0',src_idx = scale*dst_index时取'1'。 + - **data_format** (str,可选)- 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCDHW"和"NDHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCDHW"。 + +返回:5-D Tensor,形状为 (num_batches, channels, out_d, out_h, out_w) 或 (num_batches, out_d, out_h, out_w, channels)。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + input = fluid.layers.data(name="input", shape=[3,6,9,11], dtype="float32") + # input.shape = [-1, 3, 6, 9, 11], where -1 indicates batch size, and it will get the exact value in runtime. + + out0 = fluid.layers.resize_trilinear(input, out_shape=[12, 12, 12]) + # out0.shape = [-1, 3, 12, 12, 12], it means out0.shape[0] = input.shape[0] in runtime. + + # out_shape is a list in which each element is a integer or a tensor Variable + dim1 = fluid.layers.data(name="dim1", shape=[1], dtype="int32", append_batch_size=False) + out1 = fluid.layers.resize_trilinear(input, out_shape=[12, dim1, 4]) + # out1.shape = [-1, 3, 12, -1, 4] + + # out_shape is a 1-D tensor Variable + shape_tensor = fluid.layers.data(name="shape_tensor", shape=[3], dtype="int32", append_batch_size=False) + out2 = fluid.layers.resize_trilinear(input, out_shape=shape_tensor) + # out2.shape = [-1, 3, -1, -1, -1] + + # when use actual_shape + actual_shape_tensor = fluid.layers.data(name="actual_shape_tensor", shape=[3], dtype="int32", append_batch_size=False) + out3 = fluid.layers.resize_trilinear(input, out_shape=[4, 4, 8], actual_shape=actual_shape_tensor) + # out3.shape = [-1, 3, 4, 4, 8] + + # scale is a Variable + scale_tensor = fluid.layers.data(name="scale", shape=[1], dtype="float32", append_batch_size=False) + out4 = fluid.layers.resize_trilinear(input, scale=scale_tensor) + # out4.shape = [-1, 3, -1, -1, -1] diff --git a/doc/paddle/api/paddle/fluid/layers/retinanet_detection_output_cn.rst b/doc/paddle/api/paddle/fluid/layers/retinanet_detection_output_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8c6d59a5546e019ff8f3560f648080f224d1afc6 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/retinanet_detection_output_cn.rst @@ -0,0 +1,63 @@ +.. _cn_api_fluid_layers_retinanet_detection_output: + +retinanet_detection_output +------------------------------- + +.. py:function:: paddle.fluid.layers.retinanet_detection_output(bboxes, scores, anchors, im_info, score_threshold=0.05, nms_top_k=1000, keep_top_k=100, nms_threshold=0.3, nms_eta=1.0) + + + + +在 `RetinaNet `_ 中,有多个 `FPN `_ 层会输出用于分类的预测值和位置回归的预测值,该OP通过执行以下步骤将这些预测值转换成最终的检测结果: + +1. 在每个FPN层上,先剔除分类预测值小于score_threshold的anchor,然后按分类预测值从大到小排序,选出排名前nms_top_k的anchor,并将这些anchor与其位置回归的预测值做解码操作得到检测框。 +2. 合并全部FPN层上的检测框,对这些检测框进行非极大值抑制操作(NMS)以获得最终的检测结果。 + + +参数: + - **bboxes** (List) – 由来自不同FPN层的Tensor组成的列表,表示全部anchor的位置回归预测值。列表中每个元素是一个维度为 :math:`[N, Mi, 4]` 的3-D Tensor,其中,第一维N表示批量训练时批量内的图片数量,第二维Mi表示每张图片第i个FPN层上的anchor数量,第三维4表示每个anchor有四个坐标值。数据类型为float32或float64。 + - **scores** (List) – 由来自不同FPN层的Tensor组成的列表,表示全部anchor的分类预测值。列表中每个元素是一个维度为 :math:`[N, Mi, C]` 的3-D Tensor,其中第一维N表示批量训练时批量内的图片数量,第二维Mi表示每张图片第i个FPN层上的anchor数量,第三维C表示类别数量( **不包括背景类** )。数据类型为float32或float64。 + - **anchors** (List) – 由来自不同FPN层的Tensor组成的列表,表示全部anchor的坐标值。列表中每个元素是一个维度为 :math:`[Mi, 4]` 的2-D Tensor,其中第一维Mi表示第i个FPN层上的anchor数量,第二维4表示每个anchor有四个坐标值([xmin, ymin, xmax, ymax])。数据类型为float32或float64。 + - **im_info** (Variable) – 维度为 :math:`[N, 3]` 的2-D Tensor,表示输入图片的尺寸信息。 其中,第一维N表示批量训练时各批量内的图片数量,第二维3表示各图片的尺寸信息,分别是网络输入尺寸的高和宽,以及原图缩放至网络输入大小时的缩放比例。数据类型为float32或float64。 + - **score_threshold** (float32) – 在NMS步骤之前,用于滤除每个FPN层的检测框的阈值,默认值为0.05。 + - **nms_top_k** (int32) – 在NMS步骤之前,保留每个FPN层的检测框的数量,默认值为1000。 + - **keep_top_k** (int32) – 在NMS步骤之后,每张图像要保留的检测框数量,默认值为100,若设为-1,则表示保留NMS步骤后剩下的全部检测框。 + - **nms_threshold** (float32) – NMS步骤中用于剔除检测框的Intersection-over-Union(IoU)阈值,默认为0.3。 + - **nms_eta** (float32) – NMS步骤中用于调整nms_threshold的参数。默认值为1.,表示nms_threshold的取值在NMS步骤中一直保持不变,即其设定值。若nms_eta小于1.,则表示当nms_threshold的取值大于0.5时,每保留一个检测框就调整一次nms_threshold的取值,即nms_threshold = nms_threshold * nms_eta,直到nms_threshold的取值小于等于0.5后结束调整。 +**注意:在模型输入尺寸特别小的情况,此时若用score_threshold滤除anchor,可能会导致没有任何检测框剩余。为避免这种情况出现,该OP不会对最高FPN层上的anchor做滤除。因此,要求bboxes、scores、anchors中最后一个元素是来自最高FPN层的Tensor** 。 + +返回:维度是 :math:`[No, 6]` 的2-D LoDTensor,表示批量内的检测结果。第一维No表示批量内的检测框的总数,第二维6表示每行有六个值:[label, score,xmin,ymin,xmax,ymax]。该LoDTensor的LoD中存放了每张图片的检测框数量,第i张图片的检测框数量为 :math:`LoD[i + 1] - LoD[i]` 。如果 :math:`LoD[i + 1] - LoD[i]` 为0,则第i个图像没有检测结果。 如果批量内的全部图像都没有检测结果,则LoD中所有元素被设置为0,LoDTensor被赋为空(None)。 + + +返回类型:变量(Variable),数据类型为float32或float64。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + bboxes_low = fluid.data( + name='bboxes_low', shape=[1, 44, 4], dtype='float32') + bboxes_high = fluid.data( + name='bboxes_high', shape=[1, 11, 4], dtype='float32') + scores_low = fluid.data( + name='scores_low', shape=[1, 44, 10], dtype='float32') + scores_high = fluid.data( + name='scores_high', shape=[1, 11, 10], dtype='float32') + anchors_low = fluid.data( + name='anchors_low', shape=[44, 4], dtype='float32') + anchors_high = fluid.data( + name='anchors_high', shape=[11, 4], dtype='float32') + im_info = fluid.data( + name="im_info", shape=[1, 3], dtype='float32') + nmsed_outs = fluid.layers.retinanet_detection_output( + bboxes=[bboxes_low, bboxes_high], + scores=[scores_low, scores_high], + anchors=[anchors_low, anchors_high], + im_info=im_info, + score_threshold=0.05, + nms_top_k=1000, + keep_top_k=100, + nms_threshold=0.45, + nms_eta=1.0) diff --git a/doc/paddle/api/paddle/fluid/layers/retinanet_target_assign_cn.rst b/doc/paddle/api/paddle/fluid/layers/retinanet_target_assign_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e6ce7bfa4237f7191cb4bb1c41719c9ee067e70f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/retinanet_target_assign_cn.rst @@ -0,0 +1,75 @@ +.. _cn_api_fluid_layers_retinanet_target_assign: + +retinanet_target_assign +------------------------------- + +.. py:function:: paddle.fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, gt_labels, is_crowd, im_info, num_classes=1, positive_overlap=0.5, negative_overlap=0.4) + + + + +该OP是从输入anchor中找出训练检测模型 `RetinaNet `_ 所需的正负样本,并为每个正负样本分配用于分类的目标值和位置回归的目标值,同时从全部anchor的类别预测值cls_logits、位置预测值bbox_pred中取出属于各正负样本的部分。 + +正负样本的查找准则如下: + - 若anchor与某个真值框之间的Intersection-over-Union(IoU)大于其他anchor与该真值框的IoU,则该anchor是正样本,且被分配给该真值框; + - 若anchor与某个真值框之间的IoU大于等于positive_overlap,则该anchor是正样本,且被分配给该真值框; + - 若anchor与某个真值框之间的IoU介于[0, negative_overlap),则该anchor是负样本; + - 不满足以上准则的anchor不参与模型训练。 + +在RetinaNet中,对于每个anchor,模型都会预测一个C维的向量用于分类,和一个4维的向量用于位置回归,因此各正负样本的分类目标值也是一个C维向量,各正样本的位置回归目标值也是一个4维向量。对于正样本而言,若其被分配的真值框的类别是i,则其分类目标值的第i-1维为1,其余维度为0;其位置回归的目标值由anchor和真值框之间位置差值计算得到。对于负样本而言,其分类目标值的所有维度都为0,因负样本不参与位置回归的训练,故负样本无位置回归的目标值。 + +分配结束后,从全部anchor的类别预测值cls_logits中取出属于各正负样本的部分,从针对全部anchor的位置预测值bbox_pred中取出属于各正样本的部分。 + + +参数: + - **bbox_pred** (Variable) – 维度为 :math:`[N, M, 4]` 的3-D Tensor,表示全部anchor的位置回归预测值。其中,第一维N表示批量训练时批量内的图片数量,第二维M表示每张图片的全部anchor的数量,第三维4表示每个anchor有四个坐标值。数据类型为float32或float64。 + - **cls_logits** (Variable) – 维度为 :math:`[N, M, C]` 的3-D Tensor,表示全部anchor的分类预测值。 其中,第一维N表示批量训练时批量内的图片数量,第二维M表示每张图片的全部anchor的数量,第三维C表示每个anchor需预测的类别数量( **注意:不包括背景** )。数据类型为float32或float64。 + + - **anchor_box** (Variable) – 维度为 :math:`[M, 4]` 的2-D Tensor,表示全部anchor的坐标值。其中,第一维M表示每张图片的全部anchor的数量,第二维4表示每个anchor有四个坐标值 :math:`[xmin, ymin, xmax, ymax]` ,:math:`[xmin, ymin]` 是anchor的左上顶部坐标,:math:`[xmax, ymax]` 是anchor的右下坐标。数据类型为float32或float64。anchor_box的生成请参考OP :ref:`cn_api_fluid_layers_anchor_generator`。 + - **anchor_var** (Variable) – 维度为 :math:`[M, 4]` 的2-D Tensor,表示在后续计算损失函数时anchor坐标值的缩放比例。其中,第一维M表示每张图片的全部anchor的数量,第二维4表示每个anchor有四个坐标缩放因子。数据类型为float32或float64。anchor_var的生成请参考OP :ref:`cn_api_fluid_layers_anchor_generator`。 + - **gt_boxes** (Variable) – 维度为 :math:`[G, 4]` 且LoD level必须为1的2-D LoDTensor,表示批量训练时批量内的真值框位置。其中,第一维G表示批量内真值框的总数,第二维表示每个真值框有四个坐标值。数据类型为float32或float64。 + - **gt_labels** (variable) – 维度为 :math:`[G, 1]` 且LoD level必须为1的2-D LoDTensor,表示批量训练时批量内的真值框类别,数值范围为 :math:`[1, C]` 。其中,第一维G表示批量内真值框的总数,第二维表示每个真值框只有1个类别。数据类型为int32。 + - **is_crowd** (Variable) – 维度为 :math:`[G]` 且LoD level必须为1的1-D LoDTensor,表示各真值框是否位于重叠区域,值为1表示重叠,则不参与训练。第一维G表示批量内真值框的总数。数据类型为int32。 + - **im_info** (Variable) – 维度为 :math:`[N, 3]` 的2-D Tensor,表示输入图片的尺寸信息。其中,第一维N表示批量训练时批量内的图片数量,第二维3表示各图片的尺寸信息,分别是网络输入尺寸的高和宽,以及原图缩放至网络输入尺寸的缩放比例。数据类型为float32或float64。 + - **num_classes** (int32) – 分类的类别数量,默认值为1。 + - **positive_overlap** (float32) – 判定anchor是一个正样本时anchor和真值框之间的最小IoU,默认值为0.5。 + - **negative_overlap** (float32) – 判定anchor是一个负样本时anchor和真值框之间的最大IoU,默认值为0.4。该参数的设定值应小于等于positive_overlap的设定值,若大于,则positive_overlap的取值为negative_overlap的设定值。 + + +返回: + - **predict_scores** (Variable) – 维度为 :math:`[F + B, C]` 的2-D Tensor,表示正负样本的分类预测值。其中,第一维F为批量内正样本的数量,B为批量内负样本的数量,第二维C为分类的类别数量。数据类型为float32或float64。 + - **predict_location** (Variable) — 维度为 :math:`[F, 4]` 的2-D Tensor,表示正样本的位置回归预测值。其中,第一维F为批量内正样本的数量,第二维4表示每个样本有4个坐标值。数据类型为float32或float64。 + - **target_label** (Variable) — 维度为 :math:`[F + B, 1]` 的2-D Tensor,表示正负样本的分类目标值。其中,第一维F为正样本的数量,B为负样本的数量,第二维1表示每个样本的真值类别只有1类。数据类型为int32。 + - **target_bbox** (Variable) — 维度为 :math:`[F, 4]` 的2-D Tensor,表示正样本的位置回归目标值。其中,第一维F为正样本的数量,第二维4表示每个样本有4个坐标值。数据类型为float32或float64。 + - **bbox_inside_weight** (Variable) — 维度为 :math:`[F, 4]` 的2-D Tensor,表示位置回归预测值中是否属于假正样本,若某个正样本为假,则bbox_inside_weight中对应维度的值为0,否则为1。第一维F为正样本的数量,第二维4表示每个样本有4个坐标值。数据类型为float32或float64。 + - **fg_num** (Variable) — 维度为 :math:`[N, 1]` 的2-D Tensor,表示正样本的数量。其中,第一维N表示批量内的图片数量。 **注意:由于正样本数量会用作后续损失函数的分母,为避免出现除以0的情况,该OP已将每张图片的正样本数量做加1操作** 。数据类型为int32。 + + +返回类型:元组(tuple),元组中的元素predict_scores,predict_location,target_label,target_bbox,bbox_inside_weight,fg_num都是Variable。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4], + dtype='float32') + cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10], + dtype='float32') + anchor_box = fluid.data(name='anchor_box', shape=[100, 4], + dtype='float32') + anchor_var = fluid.data(name='anchor_var', shape=[100, 4], + dtype='float32') + gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4], + dtype='float32') + gt_labels = fluid.data(name='gt_labels', shape=[10, 1], + dtype='int32') + is_crowd = fluid.data(name='is_crowd', shape=[1], + dtype='int32') + im_info = fluid.data(name='im_info', shape=[1, 3], + dtype='float32') + score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \ + fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box, + anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10) diff --git a/doc/paddle/api/paddle/fluid/layers/reverse_cn.rst b/doc/paddle/api/paddle/fluid/layers/reverse_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5568cfb36f44a17a5f8f9bc15e5993ca5ee8a921 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/reverse_cn.rst @@ -0,0 +1,62 @@ +.. _cn_api_fluid_layers_reverse: + +reverse +------------------------------- + +.. py:function:: paddle.fluid.layers.reverse(x,axis) + + + + +**reverse** + +该OP对输入Tensor ``x`` 在指定轴 ``axis`` 上进行数据的逆序操作。 + +:: + + 示例1: + 输入是 LoDTensor 类型: + x = [[0, 1, 2], [3, 4, 5], [6, 7, 8]] + axis = [0, 1] + + 输出: + output = [[8, 7, 6], [5, 4, 3], [2, 1, 0]] + + 示例2: + 输入是 LoDTensorArray 类型: + x = {[[0, 1], [2, 3]], + [[4, 5, 6]], + [[7], [8], [9]]} + axis = 0 + + 输出: + output = {[[7], [8], [9]], + [[4, 5, 6]], + [[0, 1], [2, 3]]} + +参数: + - **x** (Variable) - 输入为Tensor或LoDTensorArray,数据类型支持bool,int8,int32,int64,float32和float64。若输入是LoDTensorArray类型,则返回一个逆序的LoDTensorArray,其内部Tensor元素的次序保持不变。 + - **axis** (int|tuple|list) - 指定逆序运算的轴,取值范围是[-R, R),R是输入 ``x`` 的Rank, ``axis`` 为负时与 ``axis`` +R 等价。如果 ``axis`` 是一个元组或列表,则在 ``axis`` 每个元素值所指定的轴上进行逆序运算。如果输入是LoDTensorArray类型,axis须是值为0的int,或shape为[1]的list ``[0]`` 、元组 ``(0,)`` 。 +返回:逆序后的Tensor,形状、数据类型和 ``x`` 一致。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + data = fluid.layers.assign(np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype='float32')) # [[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]] + result1 = fluid.layers.reverse(data, 0) # [[6., 7., 8.], [3., 4., 5.], [0., 1., 2.]] + result2 = fluid.layers.reverse(data, [0, 1]) # [[8., 7., 6.], [5., 4., 3.], [2., 1., 0.]] + + # 输入为LoDTensorArray时 + data1 = fluid.layers.assign(np.array([[0, 1, 2]], dtype='float32')) + data2 = fluid.layers.assign(np.array([[3, 4, 5]], dtype='float32')) + tensor_array = fluid.layers.create_array(dtype='float32') + i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) + fluid.layers.array_write(data1, i, tensor_array) + fluid.layers.array_write(data2, i+1, tensor_array) + + reversed_tensor_array = fluid.layers.reverse(tensor_array, 0) # {[[3, 4, 5]], [[0, 1, 2]]} diff --git a/doc/paddle/api/paddle/fluid/layers/rnn_cn.rst b/doc/paddle/api/paddle/fluid/layers/rnn_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6f3cf8f2f193f4acb8a91e21d08cab4beb6ad863 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/rnn_cn.rst @@ -0,0 +1,39 @@ +.. _cn_api_fluid_layers_rnn: + +rnn +------------------------------- + + + +.. py:method:: paddle.fluid.layers.rnn(cell, inputs, initial_states=None, sequence_length=None, time_major=False, is_reverse=False, **kwargs) + + + + + +rnn创建一个由RNNCell :code:`cell` 指定的递归神经网络,该神经网络重复执行 :code:`cell.call()` 直至达到 :code:`inputs` 的最大长度。 + +参数: + - **cell** (RNNCell) - RNNCell的实例。 + - **inputs** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。当 :code:`time_major == False` 时,tensor的形状应为 :math:`[batch\_size, sequence\_length, ...]`;当 :code:`time_major == True` 时,tensor的形状应为 :math:`[sequence\_length, batch\_size, ...]`。它表示要在RNN中展开的输入。 + - **initial_states** (Variable,可选) - 初始状态,单个tensor变量或tensor变量组成的嵌套结构,表示RNN的初始状态。如果未提供,将使用 :code:`cell.get_initial_states` 产生初始状态。默认值None。 + - **sequence_length** (Variable,可选) - 序列长度,形状为 :math:`[batch\_size]` 的tensor。它存储每个实例的实际长度,从而使用户能够在批处理的时候,提取最后一个有效状态,以确保正确性。如果未提供,则不区分填充和非填充输入。默认值None。 + - **time_major** (bool,可选) - 指示输入tensor和输出tensor中包含的tensor的数据组织。如果为False,则数据组织为batch为主,形状为 :math:`[batch\_size,sequence\_length,...]`。如果为True,则数据组织为time为主,形状为 :math:`[sequence\_length,batch\_size,...]`。默认值:False。 + - **is_reverse** (bool,可选) - 指示是否以输入序列的相反顺序进行计算。默认值:False。 + - **kwargs** - 其他关键字参数。参数传递给 :code:`cell.call`。 + +返回:一个元组 :code:`(final_outputs, final_states)` ,包括 :code:`final_outputs` 和 :code:`final_states`,均为单个tensor变量或tensor变量的嵌套结构。:code:`final_outputs` 具有与 :code:`cell.call` 返回的 :code:`outputs` 相同的结构和数据类型,并且 :code:`final_outputs` 中的每个tensor是将所有时间步的 :code:`outputs` 中对应内容堆叠产生,因此其形状为 :math:`[batch\_size,sequence\_length,...]` (:code:`time_major == False` 时)或 :math:`[sequence\_length,batch\_size,...]` (:code:`time_major == True` 时)。:code:`final_states` 是最后一步的状态,因此具有和 :code:`initial_states` 相同的结构,形状和数据类型。 + +返回类型:tuple + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + inputs = fluid.data(name="inputs", + shape=[-1, 32, 128], + dtype="float32") + cell = fluid.layers.GRUCell(hidden_size=128) + outputs = fluid.layers.rnn(cell=cell, inputs=inputs) + diff --git a/doc/paddle/api/paddle/fluid/layers/roi_align_cn.rst b/doc/paddle/api/paddle/fluid/layers/roi_align_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d00f6637db27b9152e6fabf60d0d7f0a4777adb7 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/roi_align_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_fluid_layers_roi_align: + +roi_align +------------------------------- + +.. py:function:: paddle.fluid.layers.roi_align(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0, sampling_ratio=-1, name=None) + + + + +**实现RoIAlign操作。** + +Region of Interests align(直译:有意义、有价值选区对齐) 用于实现双线性插值,它可以将不均匀大小的输入 +变为固定大小的特征图(feature map)。 + +该运算通过 ``pooled_width`` 和 ``pooled_height`` 将每个推荐区域划分为等大小分块。位置保持不变。 + +在每个RoI分块中,分别取sampling_ratio个点(若为-1则取框内所有点),每个点通过双线性插值直接计算得到坐标。再对分块内取的点取平均值作为小框的坐标值。坐标对齐有误的问题。 + +参数: + - **input** (Variable) – 维度为[N,C,H,W]的4-D Tensor,N 为batch大小, C 为输入通道的个数, H 特征高度, W 特征宽度。数据类型为float32或float64。 + - **rois** (Variable) – 维度为[num_rois,4]2-D LoDTensor,数据类型为float32或float64。待池化的ROIs (Regions of Interest),lod level 为1。给定比如[[x1,y1,x2,y2], ...],(x1,y1)为左上点坐标,(x2,y2)为右下点坐标。 + - **pooled_height** (int32,可选) – 池化后的输出高度,默认值为1。 + - **pooled_width** (int32,可选) – 池化后的输出宽度,默认值为1。 + - **spatial_scale** (float32,可选) – 乘法性质空间标尺因子,池化时,将RoI坐标变换至运算采用的标度,默认值为1.0。 + - **sampling_ratio** (int32) – 插值格中采样点的数目。 如果它 <=0, 它们将自适应 ``roi_width`` 和 ``pooled_w`` , 在高度上也是同样的道理。默认值为-1 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + + +返回:表示RoI align输出的LoDTensor,数据类型为float32或float64,维度为 (num_rois, channels, pooled_h, pooled_w) + + +返回类型:Variable + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.data( + name='data', shape=[None, 256, 32, 32], dtype='float32') + rois = fluid.data( + name='rois', shape=[None, 4], dtype='float32') + align_out = fluid.layers.roi_align(input=x, + rois=rois, + pooled_height=7, + pooled_width=7, + spatial_scale=0.5, + sampling_ratio=-1) + diff --git a/doc/paddle/api/paddle/fluid/layers/roi_perspective_transform_cn.rst b/doc/paddle/api/paddle/fluid/layers/roi_perspective_transform_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5f03d7b7d112d433f2c6e0f7df9ecbfef1979715 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/roi_perspective_transform_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_fluid_layers_roi_perspective_transform: + +roi_perspective_transform +------------------------------- + +.. py:function:: paddle.fluid.layers.roi_perspective_transform(input, rois, transformed_height, transformed_width, spatial_scale=1.0) + + + + +该OP对RoI区域做透视变换,将不规则的RoI区域变成固定大小的矩形区域,透视变换是线性代数里面的一种基础变换。 + +参数: + - **input** (Variable) - 输入特征图,4-D Tensor,格式为NCHW。N是batch_size,C是输入通道数,H是特征图高度,W是特征图宽度。数据类型是float32 + - **rois** (Variable) - 感兴趣区域,2D-LoDTensor,形状是(num_rois,8),lod_level为1。其数据形式是[[x1,y1,x2,y2,x3,y3,x4,y4], ...],其中(x1,y1)是左上角坐标,(x2,y2)是右上角坐标,(x3,y3)是右下角坐标,(x4,y4)是左下角坐标。数据类型与 ``input`` 相同 + - **transformed_height** (int) - 输出的高度 + - **transformed_width** (int) – 输出的宽度 + - **spatial_scale** (float,可选) - 空间尺度因子,用于缩放ROI坐标,浮点数。默认值1.0 + +返回: 由三个变量构成的元组 (out, mask, transform_matrix) + - ``out`` : ``ROIPerspectiveTransformOp`` 的输出,4D-LoDTensor,形状是(num_rois,channels,transformed_height,transformed_width),lod_level为1 + - ``mask`` : ``ROIPerspectiveTransformOp`` 的掩码,4D-LoDTensor,形状是(num_rois,1,transformed_height,transformed_width),lod_level为1 + - ``transform_matrix`` : ``ROIPerspectiveTransformOp`` 的转换矩阵,2D-LoDTensor,形状是(num_rois,9),lod_level为1 + +返回类型: 元组 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + + x = fluid.layers.data(name='x', shape=[256, 28, 28], dtype='float32') + rois = fluid.layers.data(name='rois', shape=[8], lod_level=1, dtype='float32') + out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0) + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/roi_pool_cn.rst b/doc/paddle/api/paddle/fluid/layers/roi_pool_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..797ac888ad59b730145e5a3ed03f3cf7e0aff749 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/roi_pool_cn.rst @@ -0,0 +1,69 @@ +.. _cn_api_fluid_layers_roi_pool: + +roi_pool +------------------------------- + +.. py:function:: paddle.fluid.layers.roi_pool(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0) + + + + + +该OP实现了roi池化操作,对非均匀大小的输入执行最大池化,以获得固定大小的特征映射(例如7*7)。 + +该OP的操作分三个步骤: + + 1. 用pooled_width和pooled_height将每个proposal区域划分为大小相等的部分; + 2. 在每个部分中找到最大的值; + 3. 将这些最大值复制到输出缓冲区。 + +Faster-RCNN使用了roi池化。roi池化的具体原理请参考 https://stackoverflow.com/questions/43430056/what-is-roi-layer-in-fast-rcnn + +参数: + - **input** (Variable) - 输入特征,维度为[N,C,H,W]的4D-Tensor,其中N为batch大小,C为输入通道数,H为特征高度,W为特征宽度。数据类型为float32或float64. + - **rois** (Variable) – 待池化的ROIs (Regions of Interest),维度为[num_rois,4]的2D-LoDTensor,lod level 为1。给定如[[x1,y1,x2,y2], ...],其中(x1,y1)为左上点坐标,(x2,y2)为右下点坐标。lod信息记录了每个roi所属的batch_id。 + - **pooled_height** (int,可选) - 数据类型为int32,池化输出的高度。默认值为1。 + - **pooled_width** (int,可选) - 数据类型为int32,池化输出的宽度。默认值为1。 + - **spatial_scale** (float,可选) - 数据类型为float32,用于将ROI coords从输入比例转换为池化时使用的比例。默认值为1.0。 + +返回: 池化后的特征,维度为[num_rois, C, pooled_height, pooled_width]的4D-Tensor。 + +返回类型: Variable + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + DATATYPE='float32' + + place = fluid.CPUPlace() + #place = fluid.CUDAPlace(0) + + input_data = np.array([i for i in range(1,17)]).reshape(1,1,4,4).astype(DATATYPE) + roi_data =fluid.create_lod_tensor(np.array([[1., 1., 2., 2.], [1.5, 1.5, 3., 3.]]).astype(DATATYPE),[[2]], place) + + x = fluid.layers.data(name='input', shape=[1, 4, 4], dtype=DATATYPE) + rois = fluid.layers.data(name='roi', shape=[4], lod_level=1, dtype=DATATYPE) + + pool_out = fluid.layers.roi_pool( + input=x, + rois=rois, + pooled_height=1, + pooled_width=1, + spatial_scale=1.0) + + exe = fluid.Executor(place) + out, = exe.run(feed={'input':input_data ,'roi':roi_data}, fetch_list=[pool_out.name]) + print(out) #array([[[[11.]]], [[[16.]]]], dtype=float32) + print(np.array(out).shape) # (2, 1, 1, 1) + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/round_cn.rst b/doc/paddle/api/paddle/fluid/layers/round_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d0e3fe474424e03cc0e9d8671979e0e985c35e24 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/round_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_fluid_layers_round: + +round +------------------------------- + +.. py:function:: paddle.fluid.layers.round(x, name=None) + + + + + +该OP将输入中的数值四舍五入到最接近的整数数值。 + +.. code-block:: python + + 输入: + x.shape = [4] + x.data = [1.2, -0.9, 3.4, 0.9] + + 输出: + out.shape = [4] + Out.data = [1., -1., 3., 1.] + +参数: + + - **x** (Variable) - 支持任意维度的Tensor。数据类型为float32,float64或float16。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:返回类型为Variable(Tensor|LoDTensor), 数据类型同输入一致。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + + inputs = fluid.layers.data(name="x", shape = [3], dtype='float32') + output = fluid.layers.round(inputs) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + img = np.array([1.2, -0.9, 3.4, 0.9]).astype(np.float32) + + res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) + print(res) + # [array([ 1., -1., 3., 1.], dtype=float32)] + + + diff --git a/doc/paddle/api/paddle/fluid/layers/row_conv_cn.rst b/doc/paddle/api/paddle/fluid/layers/row_conv_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e89170ff266cc440da8bb850aa251e3da4876b64 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/row_conv_cn.rst @@ -0,0 +1,58 @@ +.. _cn_api_fluid_layers_row_conv: + +row_conv +------------------------------- + + +.. py:function:: paddle.fluid.layers.row_conv(input, future_context_size, param_attr=None, act=None) + + + + +该接口为行卷积(Row-convolution operator)或称之为超前卷积(lookahead convolution),最早介绍于DeepSpeech2论文中,论文链接: + + ``_ + +双向的RNN在深度语音模型中很有用,它通过对整个序列执行正向和反向传递来学习序列的表示。然而,与单向RNNs不同的是,在线部署和低延迟设置中,双向RNNs具有难度。超前卷积将来自未来子序列的信息以一种高效的方式进行计算,以改进单向递归神经网络。 row convolution operator 与一维序列卷积不同,计算方法如下: + +给定输入序列长度为 :math:`t` 的输入序列 :math:`X` 和输入维度 :math:`D` ,以及一个大小为 :math:`context * D` 的滤波器 :math:`W` ,输出序列卷积为: + +.. math:: + out_i = \sum_{j=i}^{i+context-1} X_{j} · W_{j-i} + +公式中: + - :math:`out_i` : 第i行输出变量形为[1, D]. + - :math:`context` : 下文(future context)大小 + - :math:`X_j` : 第j行输出变量,形为[1,D] + - :math:`W_{j-i}` : 第(j-i)行参数,其形状为[1,D]。 + +详细请参考 `设计文档 `_ 。 + +参数: + - **input** (Variable) -- 支持输入为LodTensor和Tensor,输入类型可以是[float32, float64],它支持可变时间长度的输入序列。当输入input为LodTensor时,其内部张量是一个具有形状(T x N)的矩阵,其中T是这个mini batch中的总的timestep,N是输入数据维数。当输入input为Tensor时,其形状为(B x T x N)的三维矩阵,B为mini batch大小,T为每个batch输入中的最大timestep,N是输入数据维数。当输入input为LoDTensor,形状为[9, N],LoD信息为[2, 3, 4],等价于输入input为形状是[3, 4, N]的Tensor。 + - **future_context_size** (int) -- 下文大小。请注意,卷积核的shape是[future_context_size + 1, N],N和输入input的数据维度N保持一致。 + - **param_attr** (ParamAttr) -- 参数的属性,包括名称、初始化器等。 + - **act** (str) -- 非线性激活函数。 + +返回:表示row_conv计算结果的Variable,数据类型、维度和输入input相同。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + # LoDTensor input + x = fluid.layers.data(name='x', shape=[9, 16], + dtype='float32', lod_level=3, + append_batch_size=False) + out = fluid.layers.row_conv(input=x, future_context_size=2) + + # Tensor input + x = fluid.layers.data(name='x', shape=[9, 4, 16], + dtype='float32', + append_batch_size=False) + out = fluid.layers.row_conv(input=x, future_context_size=2) + + + diff --git a/doc/paddle/api/paddle/fluid/layers/rpn_target_assign_cn.rst b/doc/paddle/api/paddle/fluid/layers/rpn_target_assign_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b7eb29fefb25c427d708d79444978c288a335cd6 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/rpn_target_assign_cn.rst @@ -0,0 +1,75 @@ +.. _cn_api_fluid_layers_rpn_target_assign: + +rpn_target_assign +------------------------------- + +.. py:function:: paddle.fluid.layers.rpn_target_assign(bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info, rpn_batch_size_per_im=256, rpn_straddle_thresh=0.0, rpn_fg_fraction=0.5, rpn_positive_overlap=0.7, rpn_negative_overlap=0.3, use_random=True) + + + + +该OP用于为anchors分配分类标签和回归标签,以便用这些标签对RPN进行训练。 + +该OP将anchors分为两种类别,正和负。根据Faster-RCNN的paper,正类别anchor包括以下两种anchor: + +- 在与一个ground-truth boxes相交的所有anchor中,IoU最高的anchor +- 和任意一个ground-truth box的IoU超出了阈值 ``rpn_positive_overlap`` + +负类别anchor是和任何ground-truth boxes的IoU都低于阈值 ``rpn_negative_overlap`` 的anchor. + +正负anchors之外的anchors不会被选出来参与训练。 + +回归标签是ground-truth boxes和正类别anchor的偏移值。 + +参数: + - **bbox_pred** (Variable) - Shape为 ``[batch_size,M,4]`` 的3-D Tensor,表示M个边界框的预测位置。每个边界框有四个坐标值,即 ``[xmin,ymin,xmax,ymax]`` 。数据类型支持float32和float64。 + - **cls_logits** (Variable)- Shape为 ``[batch_size,M,1]`` 的3-D Tensor,表示预测的置信度。1是frontground和background的sigmoid,M是边界框的数量。数据类型支持float32和float64。 + - **anchor_box** (Variable) - Shape为 ``[M,4]`` 的2-D Tensor,它拥有M个框,每个框可表示为 ``[xmin,ymin,xmax,ymax]`` , ``[xmin,ymin]`` 是anchor框的左上部坐标,如果输入是图像特征图,则它们接近坐标系的原点。 ``[xmax,ymax]`` 是anchor框的右下部坐标。数据类型支持float32和float64。 + - **anchor_var** (Variable) - Shape为 ``[M,4]`` 的2-D Tensor,它拥有anchor的expand方差。数据类型支持float32和float64。 + - **gt_boxes** (Variable) - Shape为 ``[Ng,4]`` 的2-D LoDTensor, ``Ng`` 是一个batch内输入groundtruth boxes的总数。数据类型支持float32和float64。 + - **is_crowd** (Variable) –Shape为 ``[M, 1]`` 的2-D LoDTensor,M为groundtruth boxes的数量。用于标记boxes是否是crowd。数据类型支持int32。 + - **im_info** (Variable) - Shape为[N,3]的2-D张量,表示原始图像的大小信息。信息包含原始图像宽、高和feature map相对于原始图像缩放的比例。数据类型支持int32。 + - **rpn_batch_size_per_im** (int,可选) - 整型数字。每个图像中RPN示例总数。数据类型支持int32。缺省值为256。 + - **rpn_straddle_thresh** (float,可选) - 浮点数字。超出图像外部 ``straddle_thresh`` 个像素的RPN anchors会被删除。数据类型支持float32。缺省值为0.0。 + - **rpn_fg_fraction** (float,可选) - 浮点数字。标记为foreground boxes的数量占batch内总体boxes的比例。 数据类型支持float32。缺省值为0.5。 + - **rpn_positive_overlap** (float,可选) - 浮点数字。和任意一个groundtruth box的 ``IoU`` 超出了阈值 ``rpn_positive_overlap`` 的box被判定为正类别。 数据类型支持float32。缺省值为0.7。 + - **rpn_negative_overlap** (float,可选) - 浮点数字。负类别anchor是和任何ground-truth boxes的IoU都低于阈值 ``rpn_negative_overlap`` 的anchor。 数据类型支持float32。缺省值为0.3。 + - **use_random** (bool,可选) – 布尔类型。是否使用随机采样来选择foreground boxes和background boxes。缺省值为True。 + +返回: 元组。格式为 ``(predicted_scores, predicted_location, target_label, target_bbox, bbox_inside_weight)`` + - **predicted_scores** (Varible) - RPN预测的类别结果。Shape为 ``[F + B,1]`` 的2D Tensor。 ``F`` 为foreground anchor的数量,B为background anchor的数量。数据类型与 ``bbox_pred`` 一致。 + - **predicted_location** (Variable) - RPN预测的位置结果。Shape为 ``[F, 4]`` 的2D Tensor。数据类型与 ``bbox_pred`` 一致。 + - **target_label** (Variable) - Shape为 ``[F + B,1]`` 的2D Tensor。数据类型为int32。 + - **target_bbox** (Variable) - Shape为 ``[F, 4]`` 的2D Tensor。数据类型与 ``bbox_pred`` 一致。 + - **Bbox_inside_weight** (Variable) - Shape为 ``[F, 4]`` 的2D Tensor。数据类型与 ``bbox_pred`` 一致。 + +返回类型:元组 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + bbox_pred = fluid.layers.data(name='bbox_pred', shape=[100, 4], + append_batch_size=False, dtype='float32') + cls_logits = fluid.layers.data(name='cls_logits', shape=[100, 1], + append_batch_size=False, dtype='float32') + anchor_box = fluid.layers.data(name='anchor_box', shape=[20, 4], + append_batch_size=False, dtype='float32') + anchor_var = fluid.layers.data(name='anchor_var', shape=[20, 4], + append_batch_size=False, dtype='float32') + gt_boxes = fluid.layers.data(name='gt_boxes', shape=[10, 4], + append_batch_size=False, dtype='float32') + is_crowd = fluid.layers.data(name='is_crowd', shape=[1], + append_batch_size=False, dtype='float32') + im_info = fluid.layers.data(name='im_infoss', shape=[1, 3], + append_batch_size=False, dtype='float32') + loc_pred, score_pred, loc_target, score_target, bbox_inside_weight= + fluid.layers.rpn_target_assign(bbox_pred, cls_logits, + anchor_box, anchor_var, gt_boxes, is_crowd, im_info) + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/rsqrt_cn.rst b/doc/paddle/api/paddle/fluid/layers/rsqrt_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..058fe91a2db99838bcd77aa056ca110e2e71be1f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/rsqrt_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_fluid_layers_rsqrt: + +rsqrt +------------------------------- + +.. py:function:: paddle.fluid.layers.rsqrt(x, name=None) + + + + +该OP为rsqrt激活函数。 + +注:输入x应确保为非 **0** 值,否则程序会抛异常退出。 + +其运算公式如下: + +.. math:: + out = \frac{1}{\sqrt{x}} + + +参数: + - **x** (Variable) – 输入是多维Tensor或LoDTensor,数据类型可以是float32和float64。 + - **name** (str,可选)— 这一层的名称(可选)。如果设置为None,则将自动命名这一层。默认值为None。 + +返回:对输入x进行rsqrt激活函数计算后的Tensor或LoDTensor,数据shape和输入x的shape一致。 + +返回类型:Variable,数据类型和输入数据类型一致。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.data(name="input", shape=[32, 784]) + result = fluid.layers.rsqrt(data) + diff --git a/doc/paddle/api/paddle/fluid/layers/sampled_softmax_with_cross_entropy_cn.rst b/doc/paddle/api/paddle/fluid/layers/sampled_softmax_with_cross_entropy_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..624f41a6a0c10237d86da10c4fbaaddebab16146 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sampled_softmax_with_cross_entropy_cn.rst @@ -0,0 +1,53 @@ +.. _cn_api_fluid_layers_sampled_softmax_with_cross_entropy: + +sampled_softmax_with_cross_entropy +---------------------------------------------- + +.. py:function:: paddle.fluid.layers.sampled_softmax_with_cross_entropy(logits, label, num_samples, num_true=1, remove_accidental_hits=True, use_customized_samples=False, customized_samples=None, customized_probabilities=None, seed=0) + + + + +**Sampled Softmax With Cross Entropy Operator** + +对于较大的输出类,采样的交叉熵损失Softmax被广泛地用作输出层。该运算符为所有示例采样若干个样本,并计算每行采样张量的SoftMax标准化值,然后计算交叉熵损失。 + +由于此运算符在内部对逻辑执行SoftMax,因此它需要未分级的逻辑。此运算符不应与SoftMax运算符的输出一起使用,因为这样会产生不正确的结果。 + +对于T真标签(T>=1)的示例,我们假设每个真标签的概率为1/T。对于每个样本,使用对数均匀分布生成S个样本。真正的标签与这些样本连接起来,形成每个示例的T+S样本。因此,假设逻辑的形状是[N x K],样本的形状是[N x(T+S)]。对于每个取样标签,计算出一个概率,对应于[Jean et al., 2014]( `http://arxiv.org/abs/1412.2007 `_ )中的Q(y|x)。 + +根据采样标签对逻辑进行采样。如果remove_accidental_hits为“真”,如果sample[i, j] 意外匹配“真”标签,则相应的sampled_logits[i, j]减去1e20,使其SoftMax结果接近零。然后用logQ(y|x)减去采样的逻辑,这些采样的逻辑和重新索引的标签被用来计算具有交叉熵的SoftMax。 + +参数: + - **logits** (Variable)- 非比例对数概率,是一个二维张量,形状为[N x K]。N是批大小,K是类别号。 + - **label** (Variable)- 基本事实,是一个二维张量。label是一个张量,其形状为[N x T],其中T是每个示例的真实标签数。 + - **num_samples** (int)- 每个示例的数目num_samples应该小于类的数目。 + - **num_true** (int)- 每个训练实例的目标类别总数。 + - **remove_accidental_hits** (bool)- 指示采样时是否删除意外命中的标签。如果为真,如果一个sample[i,j]意外地碰到了真标签,那么相应的sampled_logits[i,j]将被减去1e20,使其SoftMax结果接近零。默认值为True。 + - **use_customized_samples** (bool)- 是否使用自定义样本和可能性对logits进行抽样。 + - **customized_samples** (Variable)- 用户定义的示例,它是一个具有形状[N, T + S]的二维张量。S是num_samples,T是每个示例的真标签数。 + - **customized_probabilities** (Variable)- 用户定义的样本概率,与customized_samples形状相同的二维张量。 + - **seed** (int)- 用于生成随机数的随机种子,在采样过程中使用。默认值为0。 + +返回:交叉熵损失,是一个二维张量,形状为[N x 1]。 + +返回类型:Variable + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + + input = fluid.layers.data(name='data', shape=[256], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + fc = fluid.layers.fc(input=input, size=100) + out = fluid.layers.sampled_softmax_with_cross_entropy( + logits=fc, label=label, num_samples=25) + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/sampling_id_cn.rst b/doc/paddle/api/paddle/fluid/layers/sampling_id_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..098bba3e2e75a62a28ea4bd9c040a1960846f858 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sampling_id_cn.rst @@ -0,0 +1,44 @@ +.. _cn_api_fluid_layers_sampling_id: + +sampling_id +------------------------------- + +.. py:function:: paddle.fluid.layers.sampling_id(x, min=0.0, max=1.0, seed=0, dtype='float32') + + + + +该OP从输入的多项分布中进行采样。 + +参数: + - **x** (Variable)- 输入Tensor。一个形如[batch_size,input_feature_dimensions]的2-D Tensor。 + - **min** (Float)- 随机的最小值。默认值为为0.0。 + - **max** (Float)- 随机的最大值。默认值为1.0。 + - **seed** (int)- 随机种子。0表示使用系统生成的种子, 默认值为0。请注意,如果seed不为0,则此算子每次调用将生成相同的随机数。 + - **dtype** (np.dtype | core.VarDesc.VarType | str)- 指定输出数据的类型。 + +返回:采样的数据张量(Tensor) + +返回类型:变量(Variable) + + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.data( + name="X", + shape=[13, 11], + dtype='float32', + append_batch_size=False) + + out = fluid.layers.sampling_id(x) + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/scale_cn.rst b/doc/paddle/api/paddle/fluid/layers/scale_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..06ae0be071857a1cc083e4d42794b107b89efae2 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/scale_cn.rst @@ -0,0 +1,74 @@ +.. _cn_api_fluid_layers_scale: + +scale +------------------------------- + +.. py:function:: paddle.fluid.layers.scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None) + + + + +缩放算子。 + +对输入Tensor进行缩放和偏置,其公式如下: + +``bias_after_scale`` 为True: + +.. math:: + Out=scale*X+bias + +``bias_after_scale`` 为False: + +.. math:: + Out=scale*(X+bias) + +参数: + - **x** (Variable) - 要进行缩放的多维Tensor,数据类型可以为float32,float64,int8,int16,int32,int64,uint8。 + - **scale** (float|Variable) - 缩放的比例,是一个float类型或者一个shape为[1],数据类型为float32的Variable类型。 + - **bias** (float) - 缩放的偏置。 + - **bias_after_scale** (bool) - 判断在缩放之前或之后添加偏置。为True时,先缩放再偏置;为False时,先偏置再缩放。该参数在某些情况下,对数值稳定性很有用。 + - **act** (str,可选) - 应用于输出的激活函数,如tanh、softmax、sigmoid、relu等。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: 缩放后的输出Tensor。 + +返回类型: Variable(Tensor|LoDTensor)。 + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + inputs = fluid.layers.data(name="x", shape=[2, 3], dtype='float32') + output = fluid.layers.scale(inputs, scale = 2.0, bias = 1.0) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) + + res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) + print(res) # [array([[ 3., 5., 7.], [ 9., 11., 13.]], dtype=float32)] + +.. code-block:: python + + # scale with parameter scale as Variable + import paddle.fluid as fluid + import numpy as np + + inputs = fluid.layers.data(name="x", shape=[2, 3], dtype='float32') + scale = fluid.layers.data(name="scale", shape=[1], dtype='float32', + append_batch_size=False) + output = fluid.layers.scale(inputs, scale = scale, bias = 1.0) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) + scale_np = np.array([2.]).astype(np.float32) + + res = exe.run(fluid.default_main_program(), feed={'x':img, 'scale':scale_np}, fetch_list=[output]) + print(res) # [array([[ 3., 5., 7.], [ 9., 11., 13.]], dtype=float32)] + diff --git a/doc/paddle/api/paddle/fluid/layers/scatter_cn.rst b/doc/paddle/api/paddle/fluid/layers/scatter_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..75dfb0d10a66f03f39d397740e9940dd31e9e1b5 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/scatter_cn.rst @@ -0,0 +1,76 @@ +.. _cn_api_fluid_layers_scatter: + +scatter +------------------------------- + +.. py:function:: paddle.fluid.layers.scatter(input, index, updates, name=None, overwrite=True) + + + + +该OP根据index中的索引值将updates数据更新到input中。 + +.. code-block:: python + + 输入: + input = np.array([[1, 1], [2, 2], [3, 3]]) + index = np.array([2, 1, 0, 1]) + # updates的维度需要和input一样 + # updates 维度 > 1 的shape要和input一样 + updates = np.array([[1, 1], [2, 2], [3, 3], [4, 4]]) + overwrite = False + + 计算过程: + if not overwrite: + for i in range(len(index)): + input[index[i]] = np.zeros((2)) + + # 根据index中的索引值取updates中的数据更新到input中去 + for i in range(len(index)): + if (overwirte): + input[index[i]] = updates[i] + else: + input[index[i]] += updates[i] + + 输出: + out # np.array([[3, 3], [6, 6], [1, 1]]) + out.shape # [3, 2] + +参数: + - **input** (Variable) - 支持任意纬度的Tensor。支持的数据类型为float32。 + - **index** (Variable) - 表示索引,仅支持1-D Tensor。 支持的数据类型为int32,int64。 + - **updates** (Variable) - 根据索引的值将updates Tensor中的对应值更新到input Tensor中,updates Tensor的维度需要和input tensor保持一致,且除了第一维外的其他的维度的大小需要和input Tensor保持相同。支持的数据类型为float32。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **overwrite** (bool,可选) - 如果index中的索引值有重复且overwrite 为True,旧更新值将被新的更新值覆盖;如果为False,新的更新值将同旧的更新值相加。默认值为True。 + +返回:返回类型为Variable(Tensor|LoDTensor),数据类型以及shape大小同输入一致。 + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + + input = fluid.layers.data(name='data', shape=[3, 2], dtype='float32', append_batch_size=False) + index = fluid.layers.data(name='index', shape=[4], dtype='int64', append_batch_size=False) + updates = fluid.layers.data(name='update', shape=[4, 2], dtype='float32', append_batch_size=False) + + output = fluid.layers.scatter(input, index, updates, overwrite=False) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + in_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float32) + index_data = np.array([2, 1, 0, 1]).astype(np.int64) + update_data = np.array([[1, 1], [2, 2], [3, 3], [4, 4]]).astype(np.float32) + + res = exe.run(fluid.default_main_program(), feed={'data':in_data, "index":index_data, "update":update_data}, fetch_list=[output]) + print(res) + # [array([[3., 3.], + # [6., 6.], + # [1., 1.]], dtype=float32)] + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/scatter_nd_add_cn.rst b/doc/paddle/api/paddle/fluid/layers/scatter_nd_add_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..245a1b6c9416b78f9e775133aa62fc7854aeee66 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/scatter_nd_add_cn.rst @@ -0,0 +1,66 @@ +.. _cn_api_fluid_layers_scatter_nd_add: + +scatter_nd_add +------------------------------- + +.. py:function:: paddle.fluid.layers.scatter_nd_add(ref, index, updates, name=None) + + + + +该OP通过对Variable中的单个值或切片应用稀疏加法,从而得到输出的Variable。 + +:code:`ref` 是维度为 :code:`R` 的张量。 :code:`index` 是维度为 :code:`K` 的张量。因此, :code:`index` 的形状是 :math:`[i_0, i_1, ..., i_{K-2}, Q]` ,其中 :math:`Q \leq R` 。:code:`updates` 是一个维度为 :math:`K - 1 + R - Q` 的张量,它的形状是 :math:`index.shape[:-1] + ref.shape[index.shape[-1]:]` 。 + +根据 :code:`index` 的 :math:`[i_0, i_1, ..., i_{K-2}]` 得到相应的 :code:`updates` 切片,将其加到根据 :code:`index` 的最后一维得到 :code:`ref` 切片上,从而得到最终的输出张量。 + + +示例: + +:: + + - 案例 1: + ref = [0, 1, 2, 3, 4, 5] + index = [[1], [2], [3], [1]] + updates = [9, 10, 11, 12] + + 得到: + + output = [0, 22, 12, 14, 4, 5] + + - 案例 2: + ref = [[65, 17], [-14, -25]] + index = [[], []] + updates = [[[-1, -2], [1, 2]], + [[3, 4], [-3, -4]]] + ref.shape = (2, 2) + index.shape = (2, 0) + updates.shape = (2, 2, 2) + + 得到: + + output = [[67, 19], [-16, -27]] + + +参数: + - **ref** (Variable) - 输入张量,数据类型可以是float32,float64。 + - **index** (Variable) - 输入的索引张量,数据类型为非负int32或非负int64。它的维度 :code:`index.rank` 必须大于1,并且 :code:`index.shape[-1] <= ref.rank` + - **updates** (Variable) - 输入的更新张量,它必须和 :code:`ref` 有相同的数据类型。形状必须是 :code:`index.shape[:-1] + ref.shape[index.shape[-1]:]` 。 + - **name** (string) - 该层的名字,默认值为None,表示会自动命名。 + +返回:数据类型和形状都与 :code:`ref` 相同的Tensor|LoDTensor。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + ref = fluid.layers.data(name='ref', shape=[3, 5, 9, 10], dtype='float32', append_batch_size=False) + index = fluid.layers.data(name='index', shape=[3, 2], dtype='int32', append_batch_size=False) + updates = fluid.layers.data(name='update', shape=[3, 9, 10], dtype='float32', append_batch_size=False) + output = fluid.layers.scatter_nd_add(ref, index, updates) + + + diff --git a/doc/paddle/api/paddle/fluid/layers/scatter_nd_cn.rst b/doc/paddle/api/paddle/fluid/layers/scatter_nd_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b9e5a06ec80dbf489120f77095498a6031b2d71c --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/scatter_nd_cn.rst @@ -0,0 +1,33 @@ +.. _cn_api_fluid_layers_scatter_nd: + +scatter_nd +------------------------------- + +.. py:function:: paddle.fluid.layers.scatter_nd(index, updates, shape, name=None) + + + + +该OP根据 :code:`index` ,将 :code:`updates` 添加到一个新的张量中,从而得到输出的Variable。这个操作与 :code:`scatter_nd_add` 类似,除了形状为 :code:`shape` 的张量是通过零初始化的。相应地, :code:`scatter_nd(index, updates, shape)` 等价于 :code:`scatter_nd_add(fluid.layers.zeros(shape, updates.dtype), index, updates)` 。如果 :code:`index` 有重复元素,则将累积相应的更新,因此,由于数值近似问题,索引中重复元素的顺序不同可能会导致不同的输出结果。具体的计算方法可以参见 :code:`scatter_nd_add` 。该OP是 :code:`gather_nd` 的反函数。 + +参数: + - **index** (Variable) - 输入的索引张量,数据类型为非负int32或非负int64。它的维度 :code:`index.rank` 必须大于1,并且 :code:`index.shape[-1] <= len(shape)` + - **updates** (Variable) - 输入的更新张量。形状必须是 :code:`index.shape[:-1] + shape[index.shape[-1]:]` 。数据类型可以是float32,float64。 + - **shape** (tuple|list) - 要求输出张量的形状。类型是tuple或者list。 + - **name** (string) - 该层的名字,默认值为None,表示会自动命名。 + +返回:数据类型与 :code:`updates` 相同,形状是 :code:`shape` 的Tensor|LoDTensor。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + index = fluid.layers.data(name='index', shape=[3, 2], dtype='int64', append_batch_size=False) + updates = fluid.layers.data(name='update', shape=[3, 9, 10], dtype='float32', append_batch_size=False) + shape = [3, 5, 9, 10] + output = fluid.layers.scatter_nd(index, updates, shape) + + diff --git a/doc/paddle/api/paddle/fluid/layers/selu_cn.rst b/doc/paddle/api/paddle/fluid/layers/selu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..56e838344bd6243374b6772cd97c0370b32a2bce --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/selu_cn.rst @@ -0,0 +1,61 @@ +.. _cn_api_fluid_layers_selu: + +selu +------------------------------- + +.. py:function:: paddle.fluid.layers.selu(x, scale=None, alpha=None, name=None) + + + + +SeLU激活函数,其公式如下: + +.. math:: + selu= \lambda* + \begin{cases} + x &\quad \text{ if } x>0 \\ + \alpha * e^x - \alpha &\quad \text{ if } x<=0 + \end{cases} + +输入 ``x`` 可以选择性携带LoD信息。输出和它共享此LoD信息(如果有)。 + +参数: + - **x** (Variable) - 输入变量,为数据类型为float32,float64的多维Tensor或者LoDTensor。 + - **scale** (float,可选) – 可选,表示SeLU激活函数中的λ的值,其默认值为 1.0507009873554804934193349852946。 详情请见: `Self-Normalizing Neural Networks `_。 + - **alpha** (float,可选) – 可选,表示SeLU激活函数中的α的值,其默认值为 1.6732632423543772848170429916717。 详情请见: `Self-Normalizing Neural Networks `_。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:一个Tensor,shape和输入Tensor相同。 + +返回类型:Variable(Tensor|LoDTensor),LoD信息与输入Tensor一致。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + inputs = fluid.layers.data(name="x", shape=[2, 2], dtype="float32") + output = fluid.layers.selu(inputs) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + img = np.array([[0, 1],[2, 3]]).astype(np.float32) + + res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) + print(res) # [array([[0. , 1.050701],[2.101402, 3.152103]], dtype=float32)] + + + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/sequence_concat_cn.rst b/doc/paddle/api/paddle/fluid/layers/sequence_concat_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..659c5d92489ef883c5ffbcf84ca3ede77c90060f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sequence_concat_cn.rst @@ -0,0 +1,62 @@ +.. _cn_api_fluid_layers_sequence_concat: + +sequence_concat +------------------------------- + + +.. py:function:: paddle.fluid.layers.sequence_concat(input, name=None) + + + + +**注意:该OP的输入只能是LoDTensor,如果您需要处理的输入是Tensor类型,请使用concat函数(fluid.layers.** :ref:`cn_api_fluid_layers_concat` **)。** + +**该OP仅支持LoDTensor** ,通过LoDTensor的LoD信息将输入的多个LoDTensor进行连接(concat),输出连接后的LoDTensor。 + +:: + + input是由多个LoDTensor组成的list: + input = [x1, x2] + 其中: + x1.lod = [[0, 3, 5]] + x1.data = [[1], [2], [3], [4], [5]] + x1.shape = [5, 1] + + x2.lod = [[0, 2, 4]] + x2.data = [[6], [7], [8], [9]] + x2.shape = [4, 1] + 且必须满足:len(x1.lod[0]) == len(x2.lod[0]) + + 输出为LoDTensor: + out.lod = [[0, 3+2, 5+4]] + out.data = [[1], [2], [3], [6], [7], [4], [5], [8], [9]] + out.shape = [9, 1] + + +参数: + - **input** (list of Variable) – 多个LoDTensor组成的list,要求每个输入LoDTensor的LoD长度必须一致。数据类型为float32,float64或int64。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: 输出连接后的LoDTensor,数据类型和输入一致。 + +返回类型: Variable + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.data(name='x', shape=[10], dtype='float32') + y = fluid.layers.data(name='y', shape=[10], dtype='float32') + out = fluid.layers.sequence_concat(input=[x, y]) + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/sequence_conv_cn.rst b/doc/paddle/api/paddle/fluid/layers/sequence_conv_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..213ce1bf7dd5c0a738031fe748cd31cf1921ac42 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sequence_conv_cn.rst @@ -0,0 +1,84 @@ +.. _cn_api_fluid_layers_sequence_conv: + +sequence_conv +------------------------------- + + +.. py:function:: paddle.fluid.layers.sequence_conv(input, num_filters, filter_size=3, filter_stride=1, padding=True, padding_start=None, bias_attr=None, param_attr=None, act=None, name=None) + + + + +**注意:该OP的输入只能是LoDTensor,如果您需要处理的输入是Tensor类型,请使用conv2d函数(fluid.layers.** :ref:`cn_api_fluid_layers_conv2d` **)。** + +该OP在给定的卷积参数下(如卷积核数目、卷积核大小等),对输入的变长序列(sequence)LoDTensor进行卷积操作。默认情况下,该OP会自适应地在每个输入序列的两端等长地填充全0数据,以确保卷积后的序列输出长度和输入长度一致。支持通过配置 ``padding_start`` 参数来指定序列填充的行为。 + +**提示:** 参数 ``padding`` 为无用参数,将在未来的版本中被移除。 + +:: + + 这里详细介绍数据填充操作的细节: + 对于一个min-batch为2的变长序列输入,分别包含3个、1个时间步(time_step), + 假设输入input是一个[4, N]的float类型LoDTensor,为了方便,这里假设N = 2 + input.data = [[1, 1], + [2, 2], + [3, 3], + [4, 4]] + input.lod = [[0, 3, 4]] + + 即输入input总共有4个词,每个词被表示为一个2维向量。 + + Case1: + + 若 padding_start = -1,filter_size = 3, + 则两端填充数据的长度分别为: + up_pad_len = max(0, -padding_start) = 1 + down_pad_len = max(0, filter_size + padding_start - 1) = 1 + + 则以此填充后的输入数据为: + data_aftet_padding = [[0, 0, 1, 1, 2, 2], + [1, 1, 2, 2, 3, 3], + [2, 2, 3, 3, 0, 0], + [0, 0, 4, 4, 0, 0]] + + 它将和卷积核矩阵相乘得到最终的输出,假设num_filters = 3: + output.data = [[ 0.3234, -0.2334, 0.7433], + [ 0.5646, 0.9464, -0.1223], + [-0.1343, 0.5653, 0.4555], + [ 0.9954, -0.1234, -0.1234]] + output.shape = [4, 3] # 3 = num_filters + output.lod = [[0, 3, 4]] # 保持不变 + + + +参数: + - **input** (Variable) - 维度为 :math:`(M, K)` 的二维LoDTensor,仅支持lod_level为1。其中M是mini-batch的总时间步数,K是输入的 ``hidden_size`` 特征维度。数据类型为float32或float64。 + - **num_filters** (int) - 滤波器的数量。 + - **filter_size** (int) - 滤波器的高度(H);不支持指定滤波器宽度(W),宽度固定取值为输入的 ``hidden_size`` 。默认值为3。 + - **filter_stride** (int) - 滤波器每次移动的步长。目前只支持取值为1,默认为1。 + - **padding** (bool) - **此参数不起任何作用,将在未来的版本中被移除。** 无论 ``padding`` 取值为False或者True,默认地,该函数会自适应地在每个输入序列的两端等长地填充全0数据,以确保卷积后的输出序列长度和输入长度一致。默认填充是考虑到输入的序列长度可能会小于卷积核大小,这会导致无正确计算卷积输出。填充为0的数据在训练过程中不会被更新。默认为True。 + - **padding_start** (int) - 表示对输入序列填充时的起始位置,可以为负值。负值表示在每个序列的首端填充 ``|padding_start|`` 个时间步(time_step)的全0数据;正值表示对每个序列跳过前 ``padding_start`` 个时间步的数据。同时在末端填充 :math:`filter\_size + padding\_start - 1` 个时间步的全0数据,以保证卷积输出序列长度和输入长度一致。如果 ``padding_start`` 为None,则在每个序列的两端填充 :math:`\frac{filter\_size}{2}` 个时间步的全0数据;如果 ``padding_start`` 设置为0,则只在序列的末端填充 :math:`filter\_size - 1` 个时间步的全0数据。默认为None。 + - **bias_attr** (ParamAttr) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **param_attr** (ParamAttr) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **act** (str) – 应用于输出上的激活函数,如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations` ,默认值为None。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + + +返回:和输入序列等长的LoDTensor,数据类型和输入一致,为float32或float64。 + +返回类型:Variable + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.data(name='x', shape=[10,10], append_batch_size=False, dtype='float32') + x_conved = fluid.layers.sequence_conv(x,2) + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/sequence_enumerate_cn.rst b/doc/paddle/api/paddle/fluid/layers/sequence_enumerate_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b494b5e9bf768f99575a855271da6cad9ac0b723 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sequence_enumerate_cn.rst @@ -0,0 +1,56 @@ +.. _cn_api_fluid_layers_sequence_enumerate: + +sequence_enumerate +------------------------------- + + +.. py:function:: paddle.fluid.layers.sequence_enumerate(input, win_size, pad_value=0, name=None) + + + + +枚举形状为 ``[d_1, 1]`` 的输入序列所有长度为 ``win_size`` 的子序列,生成一个形状为 ``[d_1, win_size]`` 的新序列,需要时以 ``pad_value`` 填充。 + +注意,该OP的输入 ``input`` 只能是LodTensor。 + +范例如下: + +:: + + 给定输入 x: + x.lod = [[0, 3, 5]] + x.data = [[1], [2], [3], [4], [5]] + x.dims = [5, 1] + 设置属性 win_size = 2 pad_value = 0 + + 得到输出 out: + out.lod = [[0, 3, 5]] + out.data = [[1, 2], [2, 3], [3, 0], [4, 5], [5, 0]] + out.dims = [5, 2] + +参数: + - **input** (Variable)- 输入序列,形状为 ``[d_1, 1]`` ,lod level为1的LodTensor。数据类型支持int32,int64,float32或float64。 + - **win_size** (int)- 子序列窗口大小。 + - **pad_value** (int,可选)- 填充值,默认为0。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: 枚举序列,形状为 ``[d_1, win_size]`` ,lod_level为1的LoDTensor。数据类型与输入 ``input`` 一致。 + +返回类型: Variable + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.data(name='x',shape=[-1, 1], dtype='int32', lod_level=1) + out = fluid.layers.sequence_enumerate(input=x, win_size=3, pad_value=0) + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/sequence_expand_as_cn.rst b/doc/paddle/api/paddle/fluid/layers/sequence_expand_as_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f87ca92fd8db81d0341cd7fad508a8ce73ede197 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sequence_expand_as_cn.rst @@ -0,0 +1,113 @@ +.. _cn_api_fluid_layers_sequence_expand_as: + +sequence_expand_as +------------------------------- + + +.. py:function:: paddle.fluid.layers.sequence_expand_as(x, y, name=None) + + + + +Sequence Expand As Layer,该OP根据输入 ``y`` 的第0级lod对输入 ``x`` 进行扩展。当前实现要求 ``y`` 的lod层数(level)必须为1,且 ``x`` 的第一维必须和 ``y`` 的第0层lod大小相同,所以扩展后的LodTensor具有和 ``y`` 相同的lod。扩展结果与输入 ``x`` 的lod无关,所以无需考虑 ``x`` 的lod。 + +注意,该OP的输入 ``x`` 可以是Tensor或LoDTensor, ``y`` 只能是LodTensor。 + +范例解释如下: + +:: + + 例1: + 假设,有4个长度维1的序列[a]、[b]、[c]和[d],现在要将其扩展为长度是3、3、1、1的序列[a][a][a]、[b][b][b]、[c]和[d]。 + 显然,扩展后的序列lod为[0, 3, 6, 7, 8],则: + 给定输入一维LoDTensor x + x.data = [[a], [b], [c], [d]] + x.dims = [4, 1] + 和输入 y + y.lod = [[3, 3, 1, 1]] #为了便于理解这里用基于长度lod表示 + + 经过sequence_expand_as运算,得到输出1级LoDTensor out + out.lod = [[0, 3, 6, 7, 8]] #基于偏移的lod,等价于基于长度的[[3, 3, 1, 1]] + out.data = [[a], [a], [a], [b], [b], [b], [c], [d]] + out.dims = [8, 1] + + 可见,输出out将x扩展至和y具有相同的lod。 + +:: + + 例2: + 设定与例1类似,给定输入一维LoDTensor x: + x.data = [[a, b], [c, d], [e, f]] + x.dims = [3, 2] + 和输入 y: + y.lod = [[2, 1, 3]] #为了便于理解这里用基于长度lod表示 + + 输出为1级LoDTensor: + out.lod = [[0, 2, 3, 6]] #基于偏移的lod,等价于基于长度的[[2, 1, 3]] + out.data = [[a, b], [a, b] [c, d], [e, f], [e, f], [e, f]] + out.dims = [6, 2] + + 可见,输出out将x扩展至和y具有相同的lod。 + + +参数: + - **x** (Variable) - 输入变量,维度为 :math:`[M, K]` 的二维Tensor或LoDTensor,第一维必须与输入 ``y`` 的第0层lod大小相同,且仅支持lod_level为1。数据类型支持int32,int64,float32或float64。 + - **y** (Variable) - 输入变量,LoDTensor,lod level必须为1。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:扩展变量,维度为 :math:`[N, K]` 的二维LoDTensor,N由输入 ``y`` 的lod决定,且仅支持lod_level为1。数据类型与输入 ``x`` 一致。 + +返回类型:Variable + + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + import numpy as np + + x = fluid.data(name='x', shape=[1], dtype='float32') + y = fluid.data(name='y', shape=[1], dtype='float32', lod_level=1) + out = layers.sequence_expand_as(x=x, y=y) + + exe = fluid.Executor(fluid.CPUPlace()) + place = fluid.CPUPlace() + + np_data = np.array([[1], [2], [3], [4]]).astype('float32') + x_lod_tensor = fluid.create_lod_tensor(np_data, [[2, 2]], place) + print(x_lod_tensor) + #lod: [[0, 2, 4]] + # dim: 4, 1 + # layout: NCHW + # dtype: float + # data: [1 2 3 4] + + y_lod_tensor = fluid.create_random_int_lodtensor([[3,3,1,1]], [1], + place, low=0, high=1) + print(y_lod_tensor) + #lod: [[0, 3, 6, 7, 8]] + # dim: 8, 1 + # layout: NCHW + # dtype: int64_t + # data: [0 0 1 0 1 1 1 0] + + out_main = exe.run(fluid.default_main_program(), + feed={'x': x_lod_tensor, 'y': y_lod_tensor}, + fetch_list=[out], return_numpy=False) + print(out_main[0]) + #lod: [[0, 3, 6, 7, 8]] + # dim: 8, 1 + # layout: NCHW + # dtype: float + # data: [1 1 1 2 2 2 3 4] + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/sequence_expand_cn.rst b/doc/paddle/api/paddle/fluid/layers/sequence_expand_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..27b137378d9952cfe441d9ef4b788bb50b81fb5c --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sequence_expand_cn.rst @@ -0,0 +1,113 @@ +.. _cn_api_fluid_layers_sequence_expand: + +sequence_expand +------------------------------- + + +.. py:function:: paddle.fluid.layers.sequence_expand(x, y, ref_level=-1, name=None) + + + + +序列扩张层(Sequence Expand Layer),根据输入 ``y`` 的第 ``ref_level`` 层lod对输入 ``x`` 进行扩展。 ``x`` 的lod level最多为1,若 ``x`` 的lod level为1,则 ``x`` 的lod大小必须与 ``y`` 的第 ``ref_level`` 层lod大小相等;若 ``x`` 的lod level为0,则 ``x`` 的第一维大小必须与 ``y`` 第 ``ref_level`` 层大小相等。 ``x`` 的秩最少为2,当 ``x`` 的秩大于2时,将被当作是一个二维张量处理。 + +注意,该OP的输入 ``x`` 可以是Tensor或LodTensor, ``y`` 只能是LodTensor。 + +范例解释如下: + +:: + + 例1: + 假设两个长度为2的序列[a][b]和[c][d],欲将其扩展为4个长度为2的序列[a][b]、[a][b]、[c][d]、[c][d]。 + 序列[a][b]扩展2次,[c][d]扩展2次,扩展所需依据的lod为[2, 2],则: + 给定输入一维LoDTensor x + x.lod = [[2, 2]] #表示两个序列的长度为2,为了便于理解这里用基于长度lod表示 + x.data = [[a], [b], [c], [d]] + x.dims = [4, 1] + 和输入 y + y.lod = [[2, 2], #第0层lod,指定按该层扩展,表示分别扩展2次,为了便于理解这里用基于长度lod表示 + [3, 3, 1, 1]] #第1层lod,注意,因为指定ref_level为0,所以这一层与运算无关 + 指定 ref_level = 0,依据y的第0层lod进行扩展, + + 经过sequence_expand,输出为1级LoDTensor out + out.lod = [[0, 2, 4, 6, 8]] #基于偏移的lod,等价于基于长度的[[2, 2, 2, 2]] + out.data = [[a], [b], [a], [b], [c], [d], [c], [d]] + out.dims = [8, 1] + +:: + + 例2: + 假设有3个长度维1的序列[a]、[b]、[c],现在要将其扩展为长度是2、0、3的序列[a][a]、[c][c][c]。 + 显然,扩展后的序列lod为[2, 0, 3],则: + 给定输入一维LoDTensor x + x.data = [[a], [b], [c]] + x.dims = [3, 1] + 和输入 y + y.lod = [[2, 0, 3]] + 默认 ref_level = -1 + + 经过sequence_expand,输出为1级LoDTensor out + out.data = [[a], [a], [c], [c], [c]] + out.dims = [5, 1] + +参数: + - **x** (Variable) - 输入变量,维度为 :math:`[M, K]` ,lod level至多1的二维Tensor或LoDTensor。数据类型支持int32,int64,float32或float64。 + - **y** (Variable) - 输入变量,lod level至少为1的LoDTensor。数据类型不限。 + - **ref_level** (int,可选) - 扩展 ``x`` 所依据的 ``y`` 的lod层。默认值-1,表示lod的最后一层。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:扩展变量,维度为 :math:`[N, K]` 的LoDTensor,N由输入 ``x`` 和 ``y`` 的lod共同决定。数据类型与输入 ``x`` 一致。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + import numpy as np + + x = fluid.data(name='x', shape=[1], dtype='float32') + y = fluid.data(name='y', shape=[1], + dtype='float32', lod_level=1) + out = layers.sequence_expand(x=x, y=y, ref_level=0) + + exe = fluid.Executor(fluid.CPUPlace()) + place = fluid.CPUPlace() + + np_data = np.array([[1], [2], [3], [4]]).astype('float32') + x_lod_tensor = fluid.create_lod_tensor(np_data, [[2, 2]], place) + print(x_lod_tensor) + #lod: [[0, 2, 4]] + # dim: 4, 1 + # layout: NCHW + # dtype: float + # data: [1 2 3 4] + + y_lod_tensor = fluid.create_random_int_lodtensor([[2, 2], [3,3,1,1]], [1], + place, low=0, high=1) + print(y_lod_tensor) + #lod: [[0, 2, 4][0, 3, 6, 7, 8]] + # dim: 8, 1 + # layout: NCHW + # dtype: int64_t + # data: [0 0 1 1 1 1 1 0] + + out_main = exe.run(fluid.default_main_program(), + feed={'x': x_lod_tensor, 'y': y_lod_tensor}, + fetch_list=[out], return_numpy=False) + print(out_main[0]) + #lod: [[0, 2, 4, 6, 8]] + # dim: 8, 1 + # layout: NCHW + # dtype: float + # data: [1 2 1 2 3 4 3 4] + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/sequence_first_step_cn.rst b/doc/paddle/api/paddle/fluid/layers/sequence_first_step_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..025ba504580044bc434dfa7ec6979743759534da --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sequence_first_step_cn.rst @@ -0,0 +1,67 @@ +.. _cn_api_fluid_layers_sequence_first_step: + +sequence_first_step +------------------------------- + + +.. py:function:: paddle.fluid.layers.sequence_first_step(input) + + + + +该OP **仅支持LoDTensor类型的输入** ,将对输入的LoDTensor,在最后一层lod_level上,选取其每个序列(sequence)的第一个时间步(time_step)的特征向量作为池化后的输出向量。 + +:: + + Case 1: + + input是1-level LoDTensor: + input.lod = [[0, 2, 5, 7]] + input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]] + input.shape = [7, 1] + + 输出为LoDTensor: + out.shape = [3, 1] + 且 out.shape[0] == len(x.lod[-1]) == 3 + out.data = [[1.], [2.], [5.]], where 1.=first(1., 3.), 2.=first(2., 4., 6.), 5.=first(5., 1.) + + Case 2: + + input是2-level的LoDTensor, 包含3个长度分别为[2, 0, 3]的序列,其中中间的0表示序列为空。 + 第一个长度为2的序列包含2个长度分别为[1, 2]的子序列; + 最后一个长度为3的序列包含3个长度分别为[1, 0, 3]的子序列。 + input.lod = [[0, 2, 2, 5], [0, 1, 3, 4, 4, 7]] + input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]] + input.shape = [7, 1] + + 将根据最后一层的lod信息[0, 1, 3, 4, 4, 7]进行池化操作,且pad_value = 0.0 + 输出为LoDTensor: + out.shape= [5, 1] + out.lod = [[0, 2, 2, 5]] + 其中 out.shape[0] == len(x.lod[-1]) == 5 + out.data = [[1.], [3.], [4.], [0.0], [6.]] + where 1.=first(1.), 3.=first(3., 2.), 4.=first(4.), 0.0 = pad_value, 6.=first(6., 5., 1.) + +参数:**input** (Variable)- 类型为LoDTensor的输入序列,仅支持lod_level不超过2的LoDTensor,数据类型为float32。 + +返回:每个输入序列中的第一个step的特征向量组成的LoDTensor,数据类型为float32。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.data(name='x', shape=[7, 1], append_batch_size=False, + dtype='float32', lod_level=1) + x_first_step = fluid.layers.sequence_first_step(input=x) + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/sequence_last_step_cn.rst b/doc/paddle/api/paddle/fluid/layers/sequence_last_step_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2cae6fb9a0bce9d8a5f2c6f1b43a275262c14622 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sequence_last_step_cn.rst @@ -0,0 +1,68 @@ +.. _cn_api_fluid_layers_sequence_last_step: + +sequence_last_step +------------------------------- + + +.. py:function:: paddle.fluid.layers.sequence_last_step(input) + + + + +该OP **仅支持LoDTensor类型的输入** ,将对输入的LoDTensor,在最后一层lod_level上,选取其每个序列(sequence)的最后一个时间步(time-step)的特征向量作为池化后的输出向量。 + +:: + + Case 1: + + input是1-level的LoDTensor: + input.lod = [[0, 2, 5, 7]] + input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]] + input.shape = [7, 1] + + 输出为LoDTensor: + out.shape = [3, 1] + 且 out.shape[0] == len(x.lod[-1]) == 3 + + out.data = [[3.], [6.], [1.]], where 3.=last(1., 3.), 6.=last(2., 4., 6.), 1.=last(5., 1.) + + Case 2: + + input是2-level的LoDTensor, 包含3个长度分别为[2, 0, 3]的序列,其中中间的0表示序列为空。 + 第一个长度为2的序列包含2个长度分别为[1, 2]的子序列; + 最后一个长度为3的序列包含3个长度分别为[1, 0, 3]的子序列。 + input.lod = [[0, 2, 2, 5], [0, 1, 3, 4, 4, 7]] + input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]] + input.shape = [7, 1] + + 将根据最后一层的lod信息[0, 1, 3, 4, 4, 7]进行池化操作,且pad_value = 0.0 + 输出为LoDTensor: + out.shape= [5, 1] + out.lod = [[0, 2, 2, 5]] + 其中 out.shape[0] == len(x.lod[-1]) == 5 + out.data = [[1.], [2.], [4.], [0.0], [1.]] + where 1.=last(1.), 2.=last(3., 2.), 4.=last(4.), 0.0 = pad_value, 1=last(6., 5., 1.) + +参数:**input** (Variable)- 类型为LoDTensor的输入序列,仅支持lod_level不超过2的LoDTensor,数据类型为float32。 + +返回:每个输入序列中的最后一步特征向量组成的LoDTensor,数据类型为float32。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.data(name='x', shape=[7, 1], append_batch_size=False, + dtype='float32', lod_level=1) + x_last_step = fluid.layers.sequence_last_step(input=x) + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/sequence_mask_cn.rst b/doc/paddle/api/paddle/fluid/layers/sequence_mask_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ac7fafb2faa1728201dc7452da644c8449a6f728 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sequence_mask_cn.rst @@ -0,0 +1,64 @@ +.. _cn_api_fluid_layers_sequence_mask: + +sequence_mask +------------------------------- + +.. py:function:: paddle.fluid.layers.sequence_mask(x, maxlen=None, dtype='int64', name=None) + + + + +该层根据输入 ``x`` 和 ``maxlen`` 输出一个掩码,数据类型为 ``dtype`` 。 + +假设 x 是一个形状为 ``[d_1, d_2,…, d_n]`` 的张量, 则输出 y 是一个形状为 ``[d_1, d_2,… ,d_n, maxlen]`` 的掩码,其中: + +.. math:: + + y(i_1, i_2,..., i_n, j) = (j < x(i_1, i_2,..., i_n)) + +范例如下: + +:: + + 给定输入: + x = [3, 1, 1, 0] maxlen = 4 + + 得到输出张量: + mask = [[1, 1, 1, 0], + [1, 0, 0, 0], + [1, 0, 0, 0], + [0, 0, 0, 0]] + + + + + +参数: + - **x** (Variable) - 输入张量,其元素是小于等于 ``maxlen`` 的整数,形状为 ``[d_1, d_2,…, d_n]`` 的Tensor或LoDTensor。 + - **maxlen** (int,可选) - 序列的最大长度。默认为空,此时 ``maxlen`` 取 ``x`` 中所有元素的最大值。 + - **dtype** (np.dtype|core.VarDesc.VarType|str,可选) - 输出的数据类型,默认为 ``int64`` 。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: mask张量,Tensor或LoDTensor,形状为 ``[d_1, d_2,… ,d_n, maxlen]`` ,数据类型由 ``dtype`` 指定,支持float32、float64、int32和int64,默认为int64。 + +返回类型: Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + + x = fluid.data(name='x', shape=[10], dtype='float32', lod_level=1) + mask = layers.sequence_mask(x=x) + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/sequence_pad_cn.rst b/doc/paddle/api/paddle/fluid/layers/sequence_pad_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cae168930ddd5ea6336c1ee1a3e61792bbe9ab71 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sequence_pad_cn.rst @@ -0,0 +1,90 @@ +.. _cn_api_fluid_layers_sequence_pad: + +sequence_pad +------------------------------- + + +.. py:function:: paddle.fluid.layers.sequence_pad(x,pad_value,maxlen=None,name=None) + + + + +序列填充操作符(Sequence Pad Operator),该OP将同一batch中的序列填充到一个一致的长度(由 ``maxlen`` 指定)。填充的新元素的值具体由输入 ``pad_value`` 指定,并会添加到每一个序列的末尾,使得他们最终的长度保持一致。最后返回一个Python tuple ``(Out, Length)`` ,其中LodTensor ``Out`` 为填充后的序列,LodTensor ``Length`` 为填充前的原序列长度信息。 + +注意,该OP的输入 ``x`` 只能是LodTensor。 + +范例如下: + +:: + + 例1: + 给定输入1-level LoDTensor x: + x.lod = [[0, 2, 5]] #输入的两个序列长度是2和3 + x.data = [[a],[b],[c],[d],[e]] + 和输入 pad_value: + pad_value.data = [0] + 设置 maxlen = 4 + + 得到得到tuple (Out, Length): + Out.data = [[[a],[b],[0],[0]],[[c],[d],[e],[0]]] + Length.data = [2, 3] #原序列长度是2和3 + +:: + + 例2: + 给定输入1-level LoDTensor x: + x.lod = [[0, 2, 5]] + x.data = [[a1,a2],[b1,b2],[c1,c2],[d1,d2],[e1,e2]] + 和输入 pad_value: + pad_value.data = [0] + 默认 maxlen = None, (根据x的形状,此例中实际长度为3) + + 得到得到tuple (Out, Length): + Out.data = [[[a1,a2],[b1,b2],[0,0]],[[c1,c2],[d1,d2],[e1,e2]]] + Length.data = [2, 3] + +:: + + 例3: + 给定输入1-level LoDTensor x: + x.lod = [[0, 2, 5]] + x.data = [[a1,a2],[b1,b2],[c1,c2],[d1,d2],[e1,e2]] + 和输入 pad_value: + pad_value.data = [p1,p2] + 默认 maxlen = None, (根据x的形状,此例中实际长度为3) + + 得到tuple (Out, Length): + Out.data = [[[a1,a2],[b1,b2],[p1,p2]],[[c1,c2],[d1,d2],[e1,e2]]] + Length.data = [2, 3] + + +参数: + - **x** (Vairable) - 输入,维度为 ``[M, K]`` 的LoDTensor,仅支持lod_level为1。lod所描述的序列数量,作为要填充的batch_size。数据类型为int32,int64,float32或float64。 + - **pad_value** (Variable) - 填充值,可以是标量或长度为 ``K`` 的一维Tensor。如果是标量,则自动广播为Tensor。数据类型需与 ``x`` 相同。 + - **maxlen** (int,可选) - 填充序列的长度。默认为None,此时以序列中最长序列的长度为准,其他所有序列填充至该长度。当是某个特定的正整数,最大长度必须大于最长初始序列的长度。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:元素为两个LoDTensor的Python tuple。第一个元素为填充后的变量 ``Out`` ,形状为 ``[batch_size, maxlen, K]`` ,lod level为0的LoDTensor,数据类型与输入 ``x`` 相同。第二个元素为填充前的原序列长度信息 ``Length`` ,lod level为0的一维LoDTensor,长度等于batch_size,数据类型为int64。 + +返回类型:tuple + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + + x = fluid.layers.data(name='y', shape=[10, 5], + dtype='float32', lod_level=1) + pad_value = fluid.layers.assign( + input=numpy.array([0.0], dtype=numpy.float32)) + out = fluid.layers.sequence_pad(x=x, pad_value=pad_value) + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/sequence_pool_cn.rst b/doc/paddle/api/paddle/fluid/layers/sequence_pool_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..988ffd7b57030bfac2acb29f01a87138bf26ae69 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sequence_pool_cn.rst @@ -0,0 +1,98 @@ +.. _cn_api_fluid_layers_sequence_pool: + +sequence_pool +------------------------------- + + +.. py:function:: paddle.fluid.layers.sequence_pool(input, pool_type, is_test=False, pad_value=0.0) + + + + +**注意:该OP的输入只能是LoDTensor,如果您需要处理的输入是Tensor类型,请使用pool2d函数(fluid.layers.** :ref:`cn_api_fluid_layers_pool2d` **)。** + +该OP **仅支持LoDTensor类型的输入** ,将对输入的LoDTensor进行指定方式的池化(pooling)操作。通过指定pool_type参数,将输入的每个序列(sequence)在最后一层lod_level上或时间步(time-step)上对特征进行诸如sum、average、sqrt等池化操作。 + +支持六种pool_type: + +- **average**: :math:`Out[i] = \frac{\sum_{i}X_{i}}{N}` +- **sum**: :math:`Out[i] = \sum _{j}X_{ij}` +- **sqrt**: :math:`Out[i] = \frac{ \sum _{j}X_{ij}}{\sqrt{len(\sqrt{X_{i}})}}` +- **max**: :math:`Out[i] = max(X_{i})` +- **last**: :math:`Out[i] = X_{N\_i}` +- **first**: :math:`Out[i] = X_{0}` + +其中 ``N_i`` 为待池化第i个输入序列的长度。 + +:: + + Case 1: + + input是1-level的LoDTensor, 且pad_value = 0.0: + input.lod = [[0, 2, 5, 7, 7]] + input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]] + input.shape = [7, 1] + 输出为LoDTensor: + out.shape = [4, 1] + 其中 out.shape[0] == len(x.lod[-1]) == 4 + 对于不同的pool_type: + average: out.data = [[2.], [4.], [3.], [0.0]], where 2.=(1. + 3.)/2, 4.=(2. + 4. + 6.)/3, 3.=(5. + 1.)/2 + sum : out.data = [[4.], [12.], [6.], [0.0]], where 4.=1. + 3., 12.=2. + 4. + 6., 6.=5. + 1. + sqrt : out.data = [[2.82], [6.93], [4.24], [0.0]], where 2.82=(1. + 3.)/sqrt(2), 6.93=(2. + 4. + 6.)/sqrt(3), 4.24=(5. + 1.)/sqrt(2) + max : out.data = [[3.], [6.], [5.], [0.0]], where 3.=max(1., 3.), 6.=max(2., 4., 6.), 5.=max(5., 1.) + last : out.data = [[3.], [6.], [1.], [0.0]], where 3.=last(1., 3.), 6.=last(2., 4., 6.), 1.=last(5., 1.) + first : out.data = [[1.], [2.], [5.], [0.0]], where 1.=first(1., 3.), 2.=first(2., 4., 6.), 5.=first(5., 1.) + + 上述out.data中的最后一个[0.0]均为填充的数据。 + + Case 2: + + input是2-level的LoDTensor, 包含3个长度分别为[2, 0, 3]的序列,其中中间的0表示序列为空。 + 第一个长度为2的序列包含2个长度分别为[1, 2]的子序列; + 最后一个长度为3的序列包含3个长度分别为[1, 0, 3]的子序列。 + input.lod = [[0, 2, 2, 5], [0, 1, 3, 4, 4, 7]] + input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]] + input.shape = [7, 1] + + 以pool_type取值为sum为例,将根据最后一层的lod信息[0, 1, 3, 4, 4, 7]进行池化操作,且pad_value = 0.0 + 输出为LoDTensor: + out.shape= [5, 1] + out.lod = [[0, 2, 2, 5]] + 其中 out.shape[0] == len(x.lod[-1]) == 5 + sum: out.data = [[1.], [5.], [4.], [0.0], [12.]] + where 1.=1., 5.=3. + 2., 4.=4., 0.0=pad_value, 12.=6. + 5. + 1. + + +参数: + - **input** (Variable) - 类型为LoDTensor的输入序列,仅支持lod_level不超过2的LoDTensor,数据类型为float32。 + - **pool_type** (str) - 池化类型,支持average,sum,sqrt,max,last和first池化操作。 + - **is_test** (bool) - 仅在pool_type取值为max时生效。当is_test为False时,则在池化操作过程中会创建maxIndex临时Tenosr,以记录最大特征值对应的索引信息,用于训练阶段的反向梯度计算。默认为False。 + - **pad_value** (float) - 用于填充输入序列为空时的池化结果,默认为0.0。 + +返回:经过指定类型池化后的LoDTensor,数据类型为float32。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + + x = fluid.layers.data(name='x', shape=[7, 1], append_batch_size=False, + dtype='float32', lod_level=1) + avg_x = fluid.layers.sequence_pool(input=x, pool_type='average') + sum_x = fluid.layers.sequence_pool(input=x, pool_type='sum') + sqrt_x = fluid.layers.sequence_pool(input=x, pool_type='sqrt') + max_x = fluid.layers.sequence_pool(input=x, pool_type='max') + last_x = fluid.layers.sequence_pool(input=x, pool_type='last') + first_x = fluid.layers.sequence_pool(input=x, pool_type='first') + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/sequence_reshape_cn.rst b/doc/paddle/api/paddle/fluid/layers/sequence_reshape_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9bba87bdbea9ca16898fdf1addf24b3c18fc0ea5 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sequence_reshape_cn.rst @@ -0,0 +1,58 @@ +.. _cn_api_fluid_layers_sequence_reshape: + +sequence_reshape +------------------------------- + + +.. py:function:: paddle.fluid.layers.sequence_reshape(input, new_dim) + + + + +**注意:该OP的输入只能是LoDTensor,如果您需要处理的输入是Tensor类型,请使用reshape函数(fluid.layers.** :ref:`cn_api_fluid_layers_reshape` **)。** + +**该OP仅支持LoDTensor** ,在指定 ``new_dim`` 参数下,通过序列原始长度、和原始shape计算出新的shape,以输出包含新维度(new_dim)下的LoDTensor。目前仅支持1-level LoDTensor,请确保(原长度*原维数)可以除以新的维数,且每个序列没有余数。 + +:: + + input是一个LoDTensor: + input.lod = [[0, 2, 6]] + input.data = [[1, 2], [3, 4], + [5, 6], [7, 8], + [9, 10], [11, 12]] + input.shape = [6, 2] + 设置 new_dim = 4 + 输出为LoDTensor: + out.lod = [[0, 1, 3]] + + out.data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + out.shape = [3, 4] + + + +参数: + - **input** (Variable) - 维度为 :math:`[M, K]` 的二维LoDTensor,且仅支持lod_level为1。数据类型为int32,int64,float32或float64。 + - **new_dim** (int)- 指定reshape后的新维度,即对输入LoDTensor重新reshape后的新维度。 + +返回:根据新维度重新reshape后的LoDTensor,数据类型和输入一致。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.data(name='x', shape=[2, 6], append_batch_size=False, dtype='float32', lod_level=1) + x_reshaped = fluid.layers.sequence_reshape(input=x, new_dim=4) + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/sequence_reverse_cn.rst b/doc/paddle/api/paddle/fluid/layers/sequence_reverse_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2bad60302dc081653322dee0bbc23a6c20cc84e9 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sequence_reverse_cn.rst @@ -0,0 +1,57 @@ +.. _cn_api_fluid_layers_sequence_reverse: + +sequence_reverse +------------------------------- + +.. py:function:: paddle.fluid.layers.sequence_reverse(x, name=None) + + + + +**注意:该OP的输入只能是LoDTensor,如果您需要处理的输入是Tensor类型,请使用reverse函数(fluid.layers.** :ref:`cn_api_fluid_layers_reverse` **)。** + +**该OP仅支持LoDTensor** ,对于输入的LoDTensor,在每个序列(sequence)上进行反转。目前仅支持对LoD层次(LoD level)为1的LoDTensor进行反转。该OP在构建反向 :ref:`cn_api_fluid_layers_DynamicRNN` 网络时十分有用。 + +:: + + 输入x是一个LoDTensor: + x.lod = [[0, 2, 5]] + x.data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13,14, 15, 16], + [17,18, 19, 20]] + x.shape = [5, 4] + + 输出out与x具有同样的shape和LoD信息: + out.lod = [[0, 2, 5]] + out.data = [[5, 6, 7, 8], + [1, 2, 3, 4], + [17,18, 19, 20], + [13,14, 15, 16], + [9, 10, 11, 12]] + out.shape = [5, 4] + + +参数: + - **x** (Variable) – 输入是LoD level为1的LoDTensor。目前仅支持对LoD层次(LoD level)为1的LoDTensor进行反转。数据类型为float32,float64,int8,int32或int64。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:输出在每个序列上反转后的LoDTensor,数据类型和输入类型一致。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.data(name='x', shape=[2, 6], dtype='float32') + x_reversed = fluid.layers.sequence_reverse(x) + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/sequence_scatter_cn.rst b/doc/paddle/api/paddle/fluid/layers/sequence_scatter_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9cfdb0d6364de573c7fdd33b9a50c893954b40ec --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sequence_scatter_cn.rst @@ -0,0 +1,78 @@ +.. _cn_api_fluid_layers_sequence_scatter: + +sequence_scatter +------------------------------- + + +.. py:function:: paddle.fluid.layers.sequence_scatter(input, index, updates, name=None) + + + + +.. note:: + 该OP的输入index,updates必须是LoDTensor。 + +该OP根据index提供的位置将updates中的信息更新到输出中。 + +该OP先使用input初始化output,然后通过output[instance_index][index[pos]] += updates[pos]方式,将updates的信息更新到output中,其中instance_idx是pos对应的在batch中第k个样本。 + +output[i][j]的值取决于能否在index中第i+1个区间中找到对应的数据j,若能找到out[i][j] = input[i][j] + update[m][n],否则 out[i][j] = input[i][j]。 + +例如,在下面样例中,index的lod信息分为了3个区间。其中,out[0][0]能在index中第1个区间中找到对应数据0,所以,使用updates对应位置的值进行更新,out[0][0] = input[0][0]+updates[0][0]。out[2][1]不能在index中第3个区间找到对应数据1,所以,它等于输入对应位置的值,out[2][1] = input[2][1]。 + +**样例**: + +:: + + 输入: + + input.data = [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]] + input.dims = [3, 6] + + index.data = [[0], [1], [2], [5], [4], [3], [2], [1], [3], [2], [5], [4]] + index.lod = [[0, 3, 8, 12]] + + updates.data = [[0.3], [0.3], [0.4], [0.1], [0.2], [0.3], [0.4], [0.0], [0.2], [0.3], [0.1], [0.4]] + updates.lod = [[ 0, 3, 8, 12]] + + 输出: + + out.data = [[1.3, 1.3, 1.4, 1.0, 1.0, 1.0], + [1.0, 1.0, 1.4, 1.3, 1.2, 1.1], + [1.0, 1.0, 1.3, 1.2, 1.4, 1.1]] + out.dims = X.dims = [3, 6] + + +参数: + - **input** (Variable) - 维度为 :math:`[N, k_1 ... k_n]` 的Tensor, 支持的数据类型:float32,float64,int32,int64。 + - **index** (Variable) - 包含index信息的LoDTensor,lod level必须等于1,支持的数据类型:int32,int64。 + - **updates** (Variable) - 包含updates信息的LoDTensor,lod level和index一致,数据类型与input的数据类型一致。支持的数据类型:float32,float64,int32,int64。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:在input的基础上使用updates进行更新后得到的Tensor,它与input有相同的维度和数据类型。 + +返回类型:Variable + + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + + input = fluid.data( name="x", shape=[3, 6], dtype='float32' ) + index = fluid.data( name='index', shape=[12, 1], dtype='int64', lod_level=1) + updates = fluid.data( name='updates', shape=[12, 1], dtype='float32', lod_level=1) + output = fluid.layers.sequence_scatter(input, index, updates) + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/sequence_slice_cn.rst b/doc/paddle/api/paddle/fluid/layers/sequence_slice_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1815f6750061bf45e2ad540e6eea107cf796963a --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sequence_slice_cn.rst @@ -0,0 +1,71 @@ +.. _cn_api_fluid_layers_sequence_slice: + +sequence_slice +------------------------------- + + +.. py:function:: paddle.fluid.layers.sequence_slice(input, offset, length, name=None) + + + + +**实现Sequence Slice(序列切片)运算** + +**该OP输入只能是LoDTensor, 如果您需要处理的是Tensor类型,请使用 :ref:`cn_api_fluid_layers_slice` 。** +该层从给定序列中截取子序列。截取依据为所给的开始 ``offset`` (偏移量) 和子序列长 ``length`` 。 + +:: + 输入变量: + (1) input (LoDTensor): + input.data = [[a1, a2], [b1, b2], [c1, c2], [d1, d2], [e1, e2]], + input.lod = [[3, 2]], + input.dims = (5, 2), + + (2) offset (Variable): + offset.data = [[0], [1]] + (3) length (Variable): + length.data = [[2], [1]] + (4) name (str|None) + + 输出变量为LoDTensor: + + out.data = [[a1, a2], [b1, b2], [e1, e2]], + out.lod = [[2, 1]], + out.dims = (3, 2). + +.. 注意:: + ``input`` , ``offset`` , ``length`` 的第一维大小应相同。 + ``offset`` 从0开始。 + +参数: + - **input** (Variable) – 输入变量,类型为LoDTensor,承载着完整的序列。数据类型为float32,float64,int32或int64。 + - **offset** (Variable) – 指定每个序列切片的起始索引,数据类型为int32或int64。 + - **length** (Variable) – 指定每个子序列的长度,数据类型为int32或int64。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:Variable(LoDTensor) 序列切片运算结果 + +返回类型:变量(Variable), 数据类型与 ``input`` 一致 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + seqs = fluid.layers.data(name='x', shape=[10, 5], + dtype='float32', lod_level=1) + offset = fluid.layers.assign(input=np.array([[0, 1]]).astype("int32")) + length = fluid.layers.assign(input=np.array([[2, 1]]).astype("int32")) + subseqs = fluid.layers.sequence_slice(input=seqs, offset=offset, + length=length) + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/sequence_softmax_cn.rst b/doc/paddle/api/paddle/fluid/layers/sequence_softmax_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..866cca004450e11a212eb8bbc4244bbc3e75a6a4 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sequence_softmax_cn.rst @@ -0,0 +1,76 @@ +.. _cn_api_fluid_layers_sequence_softmax: + +sequence_softmax +------------------------------- + + +.. py:function:: paddle.fluid.layers.sequence_softmax(input, use_cudnn=False, name=None) + + + + +.. note:: + 该OP的输入只能是LoDTensor,如果要处理的输入是Tensor类型,请使用 :ref:`cn_api_fluid_layers_softmax` + +该OP根据LoD信息将输入的第0维度进行划分,在划分的每一个区间内部进行运算。 + +对第i个区间内的元素的计算公式如下: + +.. math:: + + Out\left ( X[lod[i]:lod[i+1]],: \right ) = \frac{exp(X[lod[i]:lod[i+1],:])}{\sum (exp(X[lod[i]:lod[i+1],:]))} + +输入Tensor的维度可为 :math:`[N,1]` 或者 :math:`[N]` ,推荐使用 :math:`[N]` 。 + +例如,对有6个样本的batch,每个样本的长度为3,2,4,1,2,3,其lod信息为[[0, 3, 5, 9, 10, 12, 15]],根据lod信息将第0维度划分为6份,在 :math:`X[0:3,:],X[3:5,:],X[5:9,:],X[9:10,:],X[10:12,:],X[12:15,:]` 中进行softmax运算。 + +:: + + 示例: + + 给定: + input.data = [0.7, 1, 0.6, + 1.5, 1.1, + 1.2, 0.2, 0.6, 1.9, + 3.1, + 2.5, 0.8, + 0.1, 2.4, 1.3] + input.lod = [[0, 3, 5, 9, 10, 12, 15]] + 则: + output.data = [0.30724832, 0.41474187, 0.2780098, + 0.59868765, 0.40131235, + 0.2544242, 0.09359743, 0.13963096, 0.5123474, + 1., + 0.84553474, 0.15446526, + 0.06995796, 0.69777346, 0.23226859] + output.lod = [[0, 3, 5, 9, 10, 12, 15]] + + +参数: + - **input** (Variable) - 维度为 :math:`[N, 1]` 或者 :math:`[N]` 的LoDTensor,推荐使用 :math:`[N]` 。支持的数据类型:float32,float64。 + - **use_cudnn** (bool,可选) - 是否用cudnn核,仅当安装cudnn版本的paddle库且使用gpu训练或推理的时候生效。支持的数据类型:bool型。默认值为False。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:根据区间计算softmax之后的LoDTensor,其维度与input的维度一致,数据类型与input的数据类型一致。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.data(name='x', shape=[7, 1], + dtype='float32', lod_level=1) + x_sequence_softmax = fluid.layers.sequence_softmax(input=x) + + y = fluid.data(name='y', shape=[7], + dtype='float32', lod_level=1) + y_sequence_softmax = fluid.layers.sequence_softmax(input=y) + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/sequence_unpad_cn.rst b/doc/paddle/api/paddle/fluid/layers/sequence_unpad_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..20a437aef48d2d620a9b8f382756837355d9e853 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sequence_unpad_cn.rst @@ -0,0 +1,75 @@ +.. _cn_api_fluid_layers_sequence_unpad: + +sequence_unpad +------------------------------- + + +.. py:function:: paddle.fluid.layers.sequence_unpad(x, length, name=None) + + + + +.. note:: + 该OP的输入为Tensor,输出为LoDTensor。该OP用于移除填充元素,与之对应,还存在进行数据填充的OP sequence_pad,详情见: :ref:`cn_api_fluid_layers_sequence_pad` + +该OP根据length的信息,将input中padding(填充)元素移除,并且返回一个LoDTensor。 + +:: + + 示例: + + 给定输入变量 ``x`` : + x.data = [[ 1.0, 2.0, 3.0, 4.0, 5.0], + [ 6.0, 7.0, 8.0, 9.0, 10.0], + [11.0, 12.0, 13.0, 14.0, 15.0]], + + 其中包含 3 个被填充到长度为5的序列,实际长度由输入变量 ``length`` 指明,其中,x的维度为[3,4],length维度为[3],length的第0维与x的第0维一致: + + length.data = [2, 3, 4], + + 则去填充(unpad)后的输出变量为: + + out.data = [[1.0, 2.0, 6.0, 7.0, 8.0, 11.0, 12.0, 13.0, 14.0]] + out.lod = [[0, 2, 5, 9]] + + + +参数: + - **x** (Variable) – 包含填充元素的Tensor,其维度大小不能小于2,支持的数据类型:float32, float64,int32, int64。 + - **length** (Variable) – 存储每个样本实际长度信息的1D Tesnor,该Tensor维度的第0维必须与x维度的第0维一致。支持的数据类型:int64。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:将输入的填充元素移除,并返回一个LoDTensor,其递归序列长度与length参数的信息一致,其数据类型和输入一致。 + +返回类型:Variable + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + + # example 1: + x = fluid.data(name='x', shape=[10, 5], dtype='float32') + len = fluid.data(name='length', shape=[10], dtype='int64') + out = fluid.layers.sequence_unpad(x=x, length=len) + + # example 2: + # 使用sequence_pad填充数据 + input = fluid.data(name='input', shape=[10, 5], dtype='float32', lod_level=1) + pad_value = fluid.layers.assign(input=numpy.array([0.0], dtype=numpy.float32)) + pad_data, len = fluid.layers.sequence_pad(x=input, pad_value=pad_value) + + #使用sequence_unpad移除填充数据 + unpad_data = fluid.layers.sequence_unpad(x=pad_data, length=len) + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/shape_cn.rst b/doc/paddle/api/paddle/fluid/layers/shape_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3314982cd5be473d7144b96a6af2043e1d0bca9d --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/shape_cn.rst @@ -0,0 +1,56 @@ +.. _cn_api_fluid_layers_shape: + +shape +------------------------------- + +.. py:function:: paddle.fluid.layers.shape(input) + + + + +shape层。 + +获得输入Tensor或SelectedRows的shape。 + +:: + + 示例1: + 输入是 N-D Tensor类型: + input = [ [1, 2, 3, 4], [5, 6, 7, 8] ] + + 输出shape: + input.shape = [2, 4] + + 示例2: + 输入是 SelectedRows类型: + input.rows = [0, 4, 19] + input.height = 20 + input.value = [ [1, 2], [3, 4], [5, 6] ] # inner tensor + 输出shape: + input.shape = [3, 2] + +参数: + - **input** (Variable)- 输入的多维Tensor或SelectedRows,数据类型为float16,float32,float64,int32,int64。如果输入是SelectedRows类型,则返回其内部持有Tensor的shape。 + + +返回: 一个Tensor,表示输入Tensor或SelectedRows的shape。 + +返回类型: Variable(Tensor)。 + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + inputs = fluid.data(name="x", shape=[3, 100, 100], dtype="float32") + output = fluid.layers.shape(inputs) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + img = np.ones((3, 100, 100)).astype(np.float32) + + res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) + print(res) # [array([ 3, 100, 100], dtype=int32)] diff --git a/doc/paddle/api/paddle/fluid/layers/shard_index_cn.rst b/doc/paddle/api/paddle/fluid/layers/shard_index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3a4f2c92da059e1fa39f5ed0b5c36c5e291cb49f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/shard_index_cn.rst @@ -0,0 +1,62 @@ +.. _cn_api_fluid_layers_shard_index: + +shard_index +------------------------------- + +.. py:function:: paddle.fluid.layers.shard_index(input, index_num, nshards, shard_id, ignore_value=-1) + + + + +该函数对输入的索引根据分片(shard)的偏移量重新计算。 +索引长度被均分为N个分片,如果输入索引所在的分片跟分片ID对应,则该索引以分片的偏移量为界重新计算,否则更新为默认值(ignore_value)。具体计算为: +:: + + 每个分片的长度为 + shard_size = (index_num + nshards - 1) // nshards + + 如果 shard_id == input // shard_size + 则 output = input % shard_size + 否则 output = ignore_value + +注意:若索引长度不能被分片数整除,则最后一个分片长度不足shard_size。 + +示例: +:: + + 输入: + input.shape = [4, 1] + input.data = [[1], [6], [12], [19]] + index_num = 20 + nshards = 2 + ignore_value=-1 + + 如果 shard_id == 0, 输出: + output.shape = [4, 1] + output.data = [[1], [6], [-1], [-1]] + + 如果 shard_id == 1, 输出: + output.shape = [4, 1] + output.data = [[-1], [-1], [2], [9]] + +参数: + - **input** (Variable)- 输入的索引 + - **index_num** (scalar) - 索引长度 + - **nshards** (scalar) - 分片数量 + - **shard_id** (scalar) - 当前分片ID + - **ignore_value** (scalar) - 超出分片索引范围的默认值 + +返回:更新后的索引值 + +返回类型:Variable + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + label = fluid.layers.data(name="label", shape=[1], dtype="int64") + shard_label = fluid.layers.shard_index(input=label, + index_num=20, + nshards=2, + shard_id=0) diff --git a/doc/paddle/api/paddle/fluid/layers/shuffle_channel_cn.rst b/doc/paddle/api/paddle/fluid/layers/shuffle_channel_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7d4b0637a6aeb1dbfa1303c55792646491aec4d9 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/shuffle_channel_cn.rst @@ -0,0 +1,72 @@ +.. _cn_api_fluid_layers_shuffle_channel: + +shuffle_channel +------------------------------- + +.. py:function:: paddle.fluid.layers.shuffle_channel(x, group, name=None) + + + + +该OP将输入 ``x`` 的通道混洗重排。 它将每个组中的输入通道分成 ``group`` 个子组,并通过逐一从每个子组中选择元素来获得新的顺序。 + +请参阅 https://arxiv.org/pdf/1707.01083.pdf + +:: + + 输入一个形为 (N, C, H, W) 的4-D tensor: + + input.shape = (1, 4, 2, 2) + input.data =[[[[0.1, 0.2], + [0.2, 0.3]], + + [[0.3, 0.4], + [0.4, 0.5]], + + [[0.5, 0.6], + [0.6, 0.7]], + + [[0.7, 0.8], + [0.8, 0.9]]]] + + 指定组数 group: 2 + 可得到与输入同形的输出 4-D tensor: + + out.shape = (1, 4, 2, 2) + out.data = [[[[0.1, 0.2], + [0.2, 0.3]], + + [[0.5, 0.6], + [0.6, 0.7]], + + [[0.3, 0.4], + [0.4, 0.5]], + + [[0.7, 0.8], + [0.8, 0.9]]]] + +参数: + - **x** (Variable) – 输入Tensor。 维度为[N,C,H,W]的4-D Tensor。 + - **group** (int) – 表示子组的数目,它应该整除通道数。 + +返回:一个形状和类型与输入相同的Tensor。 + +返回类型:Variable + + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + input = fluid.layers.data(name='input', shape=[4,2,2], dtype='float32') + out = fluid.layers.shuffle_channel(x=input, group=2) + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/sigmoid_cn.rst b/doc/paddle/api/paddle/fluid/layers/sigmoid_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..fb5ccd21da695a2f3c840663797b66a5ccaff54c --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sigmoid_cn.rst @@ -0,0 +1,55 @@ +.. _cn_api_fluid_layers_sigmoid: + +sigmoid +------------------------------- + +.. py:function:: paddle.fluid.layers.sigmoid(x, name=None) + + + + +sigmoid激活函数 + +.. math:: + out = \frac{1}{1 + e^{-x}} + + +参数: + + - **x** (Tensor|LoDTensor)- 数据类型为float32,float64。激活函数的输入值。 + - **name** (str|None) - 该层名称(可选)。若为空,则自动为该层命名。默认:None + +返回:激活函数的输出值 + +返回类型:Variable(Tensor),数据类型为float32的Tensor。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + data = fluid.layers.data(name="input", shape=[-1, 3]) + result = fluid.layers.sigmoid(data) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + x = np.random.rand(3, 3) + output= exe.run(feed={"input": x}, + fetch_list=[result[0]]) + print(output) + """ + output: + [array([0.50797188, 0.71353652, 0.5452265 ])] + """ + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/sigmoid_cross_entropy_with_logits_cn.rst b/doc/paddle/api/paddle/fluid/layers/sigmoid_cross_entropy_with_logits_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..28458eb0c401762eeb4a71b694761f648cafc380 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sigmoid_cross_entropy_with_logits_cn.rst @@ -0,0 +1,71 @@ +.. _cn_api_fluid_layers_sigmoid_cross_entropy_with_logits: + +sigmoid_cross_entropy_with_logits +------------------------------- + +.. py:function:: paddle.fluid.layers.sigmoid_cross_entropy_with_logits(x, label, ignore_index=-100, name=None, normalize=False) + + + + +在每个类别独立的分类任务中,该OP可以计算按元素的概率误差。可以将其视为预测数据点的标签,其中标签不是互斥的。例如,一篇新闻文章可以同时关于政治,科技,体育或者同时不包含这些内容。 + +logistic loss可通过下式计算: + +.. math:: + loss = -Labels * log(sigma(X)) - (1 - Labels) * log(1 - sigma(X)) + +已知: + +.. math:: + sigma(X) = \frac{1}{1 + exp(-X)} + +代入上方计算logistic loss公式中: + +.. math:: + loss = X - X * Labels + log(1 + exp(-X)) + +为了计算稳定性,防止 :math:`exp(-X)` 溢出,当 :math:`X<0` 时,loss将采用以下公式计算: + +.. math:: + loss = max(X, 0) - X * Labels + log(1 + exp(-|X|)) + +输入 ``X`` 和 ``label`` 都可以携带LoD信息。然而输出仅采用输入 ``X`` 的LoD。 + + + +参数: + - **x** (Variable) - (Tensor, 默认 Tensor),形为 N x D 的二维张量,N为batch大小,D为类别数目。该输入是一个由先前运算得出的logit组成的张量。logit是未标准化(unscaled)的log概率, 公式为 :math:`log(\frac{p}{1-p})`, 数据类型为float32或float64。 + - **label** (Variable) - (Tensor, 默认 Tensor) 具有和 ``X`` 相同数据类型,相同形状的二维张量。该输入张量代表了每个logit的可能标签。 + - **ignore_index** (int) - (int,默认kIgnoreIndex)指定被忽略的目标值,它不会影响输入梯度。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **normalize** (bool) - 如果为true,则将输出除以除去ignore_index对应目标外的目标数,默认为False。 + +返回: Variable(Tensor, 默认Tensor), 形为 N x D 的二维张量,其值代表了按元素的logistic loss,数据类型为float32或float64。 + +返回类型:变量(Variable) + + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + input = fluid.layers.data( + name='data', shape=[10], dtype='float32') + label = fluid.layers.data( + name='data', shape=[10], dtype='float32') + loss = fluid.layers.sigmoid_cross_entropy_with_logits( + x=input, + label=label, + ignore_index=-1, + normalize=True) # or False + # loss = fluid.layers.reduce_sum(loss) # loss之和 + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/sigmoid_focal_loss_cn.rst b/doc/paddle/api/paddle/fluid/layers/sigmoid_focal_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c62a42b2f76a1fea8593e695b90d7084457a4a34 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sigmoid_focal_loss_cn.rst @@ -0,0 +1,113 @@ +.. _cn_api_fluid_layers_sigmoid_focal_loss: + +sigmoid_focal_loss +------------------------------- + +.. py:function:: paddle.fluid.layers.sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25) + + + + +`Focal Loss `_ 被提出用于解决计算机视觉任务中前景-背景不平衡的问题。该OP先计算输入x中每个元素的sigmoid值,然后计算sigmoid值与类别目标值label之间的Focal Loss。 + +Focal Loss的计算过程如下: + +.. math:: + + \mathop{loss_{i,\,j}}\limits_{i\in\mathbb{[0,\,N-1]},\,j\in\mathbb{[0,\,C-1]}}=\left\{ + \begin{array}{rcl} + - \frac{1}{fg\_num} * \alpha * {(1 - \sigma(x_{i,\,j}))}^{\gamma} * \log(\sigma(x_{i,\,j})) & & {(j +1) = label_{i,\,0}}\\ + - \frac{1}{fg\_num} * (1 - \alpha) * {\sigma(x_{i,\,j})}^{ \gamma} * \log(1 - \sigma(x_{i,\,j})) & & {(j +1)!= label_{i,\,0}} + \end{array} \right. + +其中,已知: + +.. math:: + + \sigma(x_{i,\,j}) = \frac{1}{1 + \exp(-x_{i,\,j})} + + +参数: + - **x** (Variable) – 维度为 :math:`[N, C]` 的2-D Tensor,表示全部样本的分类预测值。其中,第一维N是批量内参与训练的样本数量,例如在目标检测中,样本为框级别,N为批量内所有图像的正负样本的数量总和;在图像分类中,样本为图像级别,N为批量内的图像数量总和。第二维:math:`C` 是类别数量( **不包括背景类** )。数据类型为float32或float64。 + - **label** (Variable) – 维度为 :math:`[N, 1]` 的2-D Tensor,表示全部样本的分类目标值。其中,第一维N是批量内参与训练的样本数量,第二维1表示每个样本只有一个类别目标值。正样本的目标类别值的取值范围是 :math:`[1, C]` , 负样本的目标类别值是0。数据类型为int32。 + - **fg_num** (Variable) – 维度为 :math:`[1]` 的1-D Tensor,表示批量内正样本的数量,需在进入此OP前获取正样本的数量。数据类型为int32。 + - **gamma** (int|float) – 用于平衡易分样本和难分样本的超参数, 默认值设置为2.0。 + - **alpha** (int|float) – 用于平衡正样本和负样本的超参数,默认值设置为0.25。 + + +返回: 输入x中每个元素的Focal loss,即维度为 :math:`[N, C]` 的2-D Tensor。 + +返回类型: 变量(Variable),数据类型为float32或float64。 + +**代码示例** + +.. code-block:: python + + + import numpy as np + import paddle.fluid as fluid + + num_classes = 10 # exclude background + image_width = 16 + image_height = 16 + batch_size = 32 + max_iter = 20 + + + def gen_train_data(): + x_data = np.random.uniform(0, 255, (batch_size, 3, image_height, + image_width)).astype('float64') + label_data = np.random.randint(0, num_classes, + (batch_size, 1)).astype('int32') + return {"x": x_data, "label": label_data} + + + def get_focal_loss(pred, label, fg_num, num_classes): + pred = fluid.layers.reshape(pred, [-1, num_classes]) + label = fluid.layers.reshape(label, [-1, 1]) + label.stop_gradient = True + loss = fluid.layers.sigmoid_focal_loss( + pred, label, fg_num, gamma=2.0, alpha=0.25) + loss = fluid.layers.reduce_sum(loss) + return loss + + + def build_model(mode='train'): + x = fluid.data(name="x", shape=[-1, 3, -1, -1], dtype='float64') + output = fluid.layers.pool2d(input=x, pool_type='avg', global_pooling=True) + output = fluid.layers.fc( + input=output, + size=num_classes, + # Notice: size is set to be the number of target classes (excluding backgorund) + # because sigmoid activation will be done in the sigmoid_focal_loss op. + act=None) + if mode == 'train': + label = fluid.data(name="label", shape=[-1, 1], dtype='int32') + # Obtain the fg_num needed by the sigmoid_focal_loss op: + # 0 in label represents background, >=1 in label represents foreground, + # find the elements in label which are greater or equal than 1, then + # computed the numbers of these elements. + data = fluid.layers.fill_constant(shape=[1], value=1, dtype='int32') + fg_label = fluid.layers.greater_equal(label, data) + fg_label = fluid.layers.cast(fg_label, dtype='int32') + fg_num = fluid.layers.reduce_sum(fg_label) + fg_num.stop_gradient = True + avg_loss = get_focal_loss(output, label, fg_num, num_classes) + return avg_loss + else: + # During evaluating or testing phase, + # output of the final fc layer should be connected to a sigmoid layer. + pred = fluid.layers.sigmoid(output) + return pred + + + loss = build_model('train') + moment_optimizer = fluid.optimizer.MomentumOptimizer( + learning_rate=0.001, momentum=0.9) + moment_optimizer.minimize(loss) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + for i in range(max_iter): + outs = exe.run(feed=gen_train_data(), fetch_list=[loss.name]) + print(outs) diff --git a/doc/paddle/api/paddle/fluid/layers/sign_cn.rst b/doc/paddle/api/paddle/fluid/layers/sign_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7b5e84aa970e7c8dd9c736990c7f32f5d8e7fc17 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sign_cn.rst @@ -0,0 +1,26 @@ +.. _cn_api_fluid_layers_sign: + +sign +------------------------------- + +.. py:function:: paddle.fluid.layers.sign(x) + +此OP对输入x中每个元素进行正负判断,并且输出正负判断值:1代表正,-1代表负,0代表零。 + +参数: + - **x** (Variable|numpy.ndarray) – 进行正负值判断的多维Tensor或者是多维的numpy数组,数据类型为 float32,float64。 + +返回:输出正负号Tensor,数据的shape大小和输入x的数据shape一致。 + +返回类型:Variable,数据类型和输入数据类型一致。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + data = fluid.layers.sign(np.array([3.0, 0.0, -2.0], dtype='float32')) + # data=[1.0, 0.0, -1.0] + diff --git a/doc/paddle/api/paddle/fluid/layers/similarity_focus_cn.rst b/doc/paddle/api/paddle/fluid/layers/similarity_focus_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..212c2105b51e46d2740b640e537165589161067b --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/similarity_focus_cn.rst @@ -0,0 +1,103 @@ +.. _cn_api_fluid_layers_similarity_focus: + +similarity_focus +------------------------------- + +.. py:function:: paddle.fluid.layers.similarity_focus(input, axis, indexes, name=None) + + + + +**实现SimilarityFocus(相似度聚焦)运算** + +通过以下三个步骤,该层生成一个和输入 ``input`` 同形的 similarity focus mask(相似度聚焦掩码): + +1. 根据 ``axis`` 和 ``indexes`` 提取一个三维张量,第一维为batch大小。 + 例如,如果 ``axis=1, indexes=[a]`` , 将得到矩阵 T=X[:, a, :, :] 。 + 该例中,如果输入X的形为 (BatchSize, A, B, C) ,则输出张量T的形为 (BatchSize, B, C) 。 +2. 对于每一个索引,在输出T中找到最大值。所以同一行、同一列最多只有一个数字,这意味着如果在第i行,第j列中找到最大值,那么在相应行、列中的其他数值都将被忽略。然后再在剩余的数值中找到下一个最大值。显然,将会产生 min(B,C)个数字,并把三维相似聚焦掩码张量相应位置的元素置为1,其余则置为0。对每个索引按元素进行or运算。 +3. 将这个三维相似度聚焦掩码张量 broadcast 成输入 ``input`` 的形状 + +请参考 `Similarity Focus Layer `_ 。 + +:: + + 例如 : + + 给定四维张量 x 形为 (BatchSize, C, A, B), 其中C 为通道Channel数目, + 特征图(feature map)的形为(A,B): + + x.shape = (2, 3, 2, 2) + x.data = [[[[0.8, 0.1], + [0.4, 0.5]], + + [[0.9, 0.7], + [0.9, 0.9]], + + [[0.8, 0.9], + [0.1, 0.2]]], + + + [[[0.2, 0.5], + [0.3, 0.4]], + + [[0.9, 0.7], + [0.8, 0.4]], + + [[0.0, 0.2], + [0.4, 0.7]]]] + + 给定轴: 1 (即channel轴) + 给定索引: [0] + + 于是我们得到一个与输入同形的四维输出张量: + out.shape = (2, 3, 2, 2) + out.data = [[[[1.0, 0.0], + [0.0, 1.0]], + + [[1.0, 0.0], + [0.0, 1.0]], + + [[1.0, 0.0], + [0.0, 1.0]]], + + [[[0.0, 1.0], + [1.0, 0.0]], + + [[0.0, 1.0], + [1.0, 0.0]], + + [[0.0, 1.0], + [1.0, 0.0]]]] + + + +参数: + - **input** (Variable) – 输入张量,应为一个四维张量,形为[BatchSize, A, B, C],数据类型为 float32 或者 float64。 + - **axis** (int) – 指明要选择的轴。 可能取值为 1, 2 或 3。 + - **indexes** (list) – 指明选择维度的索引列表。 + +返回:一个和输入 Variable 同形状、同数据类型的 Variable + +返回类型:Variable + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.data( + name='data', shape=[-1, 3, 2, 2], dtype='float32') + fluid.layers.similarity_focus(input=data, axis=1, indexes=[0]) + + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/sin_cn.rst b/doc/paddle/api/paddle/fluid/layers/sin_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5dd8bcd2206d95f485e796a3f0958d8fcade98a6 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sin_cn.rst @@ -0,0 +1,49 @@ +.. _cn_api_fluid_layers_sin: + +sin +------------------------------- + +.. py:function:: paddle.fluid.layers.sin(x, name=None) + + + + +计算输入的正弦值。 + +参数: + - **x** (Variable) - 支持任意维度的Tensor。数据类型为float32,float64或float16。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:返回类型为Variable(Tensor|LoDTensor), 数据类型同输入一致。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + + inputs = fluid.layers.data(name="x", shape = [3], dtype='float32') + output = fluid.layers.sin(inputs) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + img = np.array([0, 45, 90]).astype(np.float32) + + res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) + print(res) + # [array([0. , 0.8509035 , 0.89399666], dtype=float32)] + + + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/size_cn.rst b/doc/paddle/api/paddle/fluid/layers/size_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6082053be65007c49e2b1e4e41e8a7cf7164035c --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/size_cn.rst @@ -0,0 +1,39 @@ +.. _cn_api_fluid_layers_size: + +size +------------------------------- + +.. py:function:: paddle.fluid.layers.size(input) + + + + +返回张量的单元数量,是一个shape为[1]的int64的张量。 + +参数: + - **input** (Variable)- 输入变量 + +返回:(Variable)。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid.layers as layers + + input = layers.data( + name="input", shape=[3, 100], dtype="float32", append_batch_size=False) + rank = layers.size(input) # 300 + + + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/slice_cn.rst b/doc/paddle/api/paddle/fluid/layers/slice_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..c73d21d901b83fdb28ac345f77e3a335b00c9bb9 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/slice_cn.rst @@ -0,0 +1,73 @@ +.. _cn_api_fluid_layers_slice: + +slice +------------------------------- + +.. py:function:: paddle.fluid.layers.slice(input, axes, starts, ends) + + + + +该OP沿多个轴生成 ``input`` 的切片。与numpy类似: https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html 该OP使用 ``axes`` 、 ``starts`` 和 ``ends`` 属性来指定轴列表中每个轴的起点和终点位置,并使用此信息来对 ``input`` 切片。如果向 ``starts`` 或 ``ends`` 传递负值如 :math:`-i`,则表示该轴的反向第 :math:`i-1` 个位置(这里以0为初始位置)。如果传递给 ``starts`` 或 ``end`` 的值大于n(维度中的元素数目),则表示n。当切片一个未知数量的维度时,建议传入 ``INT_MAX``。 ``axes`` 、 ``starts`` 和 ``ends`` 三个参数的元素数目必须相等。以下示例将解释切片如何工作: + +:: + + 示例1: + 给定: + data=[[1,2,3,4],[5,6,7,8],] + axes=[0,1] + starts=[1,0] + ends=[2,3] + 则: + result=[[5,6,7],] + + 示例2: + 给定: + data=[[1,2,3,4],[5,6,7,8],] + starts=[0,1] + ends=[-1,1000] # 此处-1表示第0维的反向第0个位置,索引值是1。 + 则: + result=[[2,3,4],] # 即 data[0:1, 1:4] + +参数: + - **input** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor``,数据类型为 ``float16``, ``float32``,``float64``,``int32``,或 ``int64``。 + - **axes** (list|tuple)- 数据类型是 ``int32``。表示进行切片的轴。 + - **starts** (list|tuple|Variable)- 数据类型是 ``int32``。如果 ``starts`` 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 ``Tensor`` 或 ``LoDTensor``。如果 ``starts`` 的类型是 ``Variable``,则是1-D ``Tensor`` 或 ``LoDTensor``。表示在各个轴上切片的起始索引值。 + - **ends** (list|tuple|Variable)- 数据类型是 ``int32``。如果 ``ends`` 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 ``Tensor`` 或 ``LoDTensor``。如果 ``ends`` 的类型是 ``Variable``,则是1-D ``Tensor`` 或 ``LoDTensor``。表示在各个轴上切片的结束索引值。 + +返回:多维 ``Tensor`` 或 ``LoDTensor``,数据类型与 ``input`` 相同。 + +返回类型:Variable。 + +抛出异常: + - :code:`TypeError`:``starts`` 的类型应该是 list、tuple 或 Variable。 + - :code:`TypeError`:``ends`` 的类型应该是 list、tuple 或 Variable。 +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + + input = fluid.layers.data( + name="input", shape=[3, 4, 5, 6], dtype='float32') + + # example 1: + # attr starts is a list which doesn't contain tensor Variable. + axes = [0, 1, 2] + starts = [-3, 0, 2] + ends = [3, 2, 4] + sliced_1 = fluid.layers.slice(input, axes=axes, starts=starts, ends=ends) + # sliced_1 is input[:, 0:3, 0:2, 2:4]. + + + # example 2: + # attr starts is a list which contain tensor Variable. + minus_3 = fluid.layers.fill_constant([1], "int32", -3) + sliced_2 = fluid.layers.slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends) + # sliced_2 is input[:, 0:3, 0:2, 2:4]. + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/smooth_l1_cn.rst b/doc/paddle/api/paddle/fluid/layers/smooth_l1_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..f64236c76ac5ede5fae21048f7b39fae6c9acc78 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/smooth_l1_cn.rst @@ -0,0 +1,57 @@ +.. _cn_api_fluid_layers_smooth_l1: + +smooth_l1 +------------------------------- + +.. py:function:: paddle.fluid.layers.smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None) + + + + +该layer计算变量 ``x`` 和 ``y`` 的smooth L1 loss,它以 ``x`` 和 ``y`` 的第一维大小作为批处理大小。对于每个实例,按元素计算smooth L1 loss,然后计算所有loss。输出变量的形状是[batch_size, 1] + + +参数: + - **x** (Tensor|LoDTensor) - 数据类型为float32,rank至少为2的张量。smooth L1损失函数的输入,shape为[batch_size, dim1,…,dimN]。 + - **y** (Tensor|LoDTensor) - 数据类型为float32,rank至少为2的张量。与 ``x`` shape相同的目标值。 + - **inside_weight** (Tensor|None) - 数据类型为float32,rank至少为2的张量。这个输入是可选的,与x的shape应该相同。如果给定, ``(x - y)`` 的结果将乘以这个张量元素。 + - **outside_weight** (Tensor|None) - 数据类型为float32,一个rank至少为2的张量。这个输入是可选的,它的shape应该与 ``x`` 相同。 smooth L1 loss的输出会乘以这个张量。 + - **sigma** (float|NoneType) - smooth L1 loss layer的超参数。标量,默认值为1.0。 + +返回: smooth L1损失的输出值, shape为 [batch_size, 1] + +返回类型:Variable(Tensor),数据类型为float32的Tensor。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + data = fluid.layers.data(name="x", shape=[-1, 3], dtype="float32") + label = fluid.layers.data(name="y", shape=[-1, 3], dtype="float32") + result = fluid.layers.smooth_l1(data,label) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + x = np.random.rand(3,3).astype("float32") + y = np.random.rand(3,3).astype("float32") + output= exe.run(feed={"x":x, "y":y}, + fetch_list=[result]) + print(output) + """ + output: + [array([[0.08220536], + [0.36652038], + [0.20541131]], dtype=float32)] + """ + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/soft_relu_cn.rst b/doc/paddle/api/paddle/fluid/layers/soft_relu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9aca26e8ebddd1a494040fc0bb3e461c23ad1984 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/soft_relu_cn.rst @@ -0,0 +1,48 @@ +.. _cn_api_fluid_layers_soft_relu: + +soft_relu +------------------------------- + +.. py:function:: paddle.fluid.layers.soft_relu(x, threshold=40.0, name=None) + + + + +SoftReLU 激活函数. + +.. math:: out=ln(1+exp(max(min(x,threshold),-threshold))) + +参数: + - **x** (Variable) - SoftReLU激活函数的输入,为数据类型为float32,float64的多维Tensor或者LoDTensor。 + - **threshold** (float) - SoftRelu的阈值,默认为40.0。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:一个Tensor,shape和输入Tensor相同。 + +返回类型:Variable(Tensor|LoDTensor),LoD信息与输入Tensor一致。 + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + inputs = fluid.layers.data(name="x", shape=[2, 2], dtype="float32") + output = fluid.layers.soft_relu(inputs, threshold=20.0) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + img = np.array([[0, 1],[2, 3]]).astype(np.float32) + + res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) + print(res) # [array([[0.6931472, 1.3132616], [2.126928 , 3.0485873]], dtype=float32)] + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/softmax_cn.rst b/doc/paddle/api/paddle/fluid/layers/softmax_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..21052f227cd42db24536fd67b97c77cb6bdc5057 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/softmax_cn.rst @@ -0,0 +1,112 @@ +.. _cn_api_fluid_layers_softmax: + +softmax +------------------------------- + +.. py:function:: paddle.fluid.layers.softmax(input, use_cudnn=False, name=None, axis=-1) + +该OP实现了softmax层。OP的计算过程如下: + +步骤1:输入 ``input`` 的 ``axis`` 维会被置换到最后一维; + +步骤2:将输入 ``Tensor`` 在逻辑上变换为二维矩阵。二维矩阵第一维(列长度)是输入除最后一维之外的其他维度值的乘积,第二维(行长度)和输入 ``axis`` 维的长度相同;对于矩阵的每一行,softmax操作对其进行重新缩放,使得该行的每个元素在 \[0,1\] 范围内,并且总和为1; + +步骤3:softmax操作执行完成后,执行步骤1和步骤2的逆运算,将二维矩阵恢复至和输入 ``input`` 相同的维度。 + +上述步骤2中softmax操作计算过程如下: + + - 对于二维矩阵的每一行,计算K维向量(K是输入第 ``axis`` 维的长度)中指定位置的指数值和全部位置指数值的和。 + + - 指定位置指数值与全部位置指数值之和的比值就是softmax操作的输出。 + +对于二维矩阵中的第i行和第j列有: + +.. math:: + + + Out[i,j] = \frac{exp(X[i,j])}{\sum_j exp(X[i,j])} + +- 示例1(矩阵一共有三维。axis = -1,表示沿着最后一维(即第三维)做softmax操作) + +.. code-block:: python + + 输入 + + X.shape = [2, 3, 4] + + X.data = [[[2.0, 3.0, 4.0, 5.0], + [3.0, 4.0, 5.0, 6.0], + [7.0, 8.0, 8.0, 9.0]], + [[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [6.0, 7.0, 8.0, 9.0]]] + + axis = -1 + + 输出 + + Out.shape = [2, 3, 4] + + Out.data = [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.07232949, 0.19661193, 0.19661193, 0.53444665]], + [[0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]] + +- 示例2(矩阵一共有三维。axis = 1,表示沿着第二维做softmax操作) + +.. code-block:: python + + 输入 + + X.shape = [2, 3, 4] + + X.data = [[[2.0, 3.0, 4.0, 5.0], + [3.0, 4.0, 5.0, 6.0], + [7.0, 8.0, 8.0, 9.0]], + [[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [6.0, 7.0, 8.0, 9.0]]] + + axis = 1 + + 输出 + + Out.shape = [2, 3, 4] + + Out.data = [[[0.00657326, 0.00657326, 0.01714783, 0.01714783], + [0.01786798, 0.01786798, 0.04661262, 0.04661262], + [0.97555875, 0.97555875, 0.93623955, 0.93623955]], + [[0.00490169, 0.00490169, 0.00490169, 0.00490169], + [0.26762315, 0.26762315, 0.26762315, 0.26762315], + [0.72747516, 0.72747516, 0.72747516, 0.72747516]]] + + +参数: + - **input** (Variable) - 任意维度的多维 ``Tensor`` ,数据类型为float32或float64。 + - **use_cudnn** (bool, 可选) - 指示是否用cudnn库。当 ``use_cudnn`` 为True时,在安装GPU版本Paddle并且本机安装cudnn库的前提下,使用GPU训练或推理时才有效。默认值:False。 + - **name** (str, 可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **axis** (int, 可选) - 指示进行softmax计算的维度索引,其范围应为 :math:`[-1,rank-1]` ,其中rank是输入变量的秩。默认值:-1(表示对最后一维做softmax操作)。 + +返回:表示softmax操作结果的 ``Tensor`` ,数据类型和 ``input`` 一致,返回维度和 ``input`` 一致。 + +返回类型:Variable + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + data = fluid.layers.data(name="input", shape=[-1, 3],dtype="float32") + result = fluid.layers.softmax(data,axis=1) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + x = np.random.rand(3, 3).astype("float32") + output= exe.run(feed={"input": x}, + fetch_list=[result[0]]) + print(output) + diff --git a/doc/paddle/api/paddle/fluid/layers/softmax_with_cross_entropy_cn.rst b/doc/paddle/api/paddle/fluid/layers/softmax_with_cross_entropy_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3babcf9e9a951d36580a930837149bb0694ea083 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/softmax_with_cross_entropy_cn.rst @@ -0,0 +1,61 @@ +.. _cn_api_fluid_layers_softmax_with_cross_entropy: + +softmax_with_cross_entropy +------------------------------- + +.. py:function:: paddle.fluid.layers.softmax_with_cross_entropy(logits, label, soft_label=False, ignore_index=-100, numeric_stable_mode=True, return_softmax=False, axis=-1) + + + + +该OP实现了softmax交叉熵损失函数。该函数会将softmax操作、交叉熵损失函数的计算过程进行合并,从而提供了数值上更稳定的梯度值。 + +因为该运算对 ``logits`` 的 ``axis`` 维执行softmax运算,所以它需要未缩放的 ``logits`` 。该运算不应该对softmax运算的输出进行操作,否则会产生错误的结果。 + +当 ``soft_label`` 为 ``False`` 时, ``label`` 除了 ``axis`` 维度上的形状为1,其余维度和 ``logits`` 一致,表示一批数据中的每一个样本仅可分类到一个类别。 + +涉及到的等式如下: + +1. 硬标签(每个样本仅可分到一个类别) + +.. math:: + loss_j = -\text{logits}_{label_j} +\log\left(\sum_{i=0}^{K}\exp(\text{logits}_i)\right), j = 1,..., K + +2. 软标签(每个样本以一定的概率被分配至多个类别中,概率和为1) + +.. math:: + loss_j = -\sum_{i=0}^{K}\text{label}_i\left(\text{logits}_i - \log\left(\sum_{i=0}^{K}\exp(\text{logits}_i)\right)\right), j = 1,...,K + +3. 如果 ``numeric_stable_mode`` 为 ``True`` ,softmax结果首先经由下式计算得出,然后使用softmax结果和 ``label`` 计算交叉熵损失。 + +.. math:: + max_j &= \max_{i=0}^{K}{\text{logits}_i} \\ + log\_max\_sum_j &= \log\sum_{i=0}^{K}\exp(logits_i - max_j)\\ + softmax_j &= \exp(logits_j - max_j - {log\_max\_sum}_j) + +参数: + - **logits** (Variable) - 维度为任意维的多维 ``Tensor`` ,数据类型为float32或float64。表示未缩放的输入。 + - **label** (Variable) - 如果 ``soft_label`` 为True, ``label`` 是一个和 ``logits`` 维度相同的的 ``Tensor`` 。如果 ``soft_label`` 为False, ``label`` 是一个在axis维度上大小为1,其它维度上与 ``logits`` 维度相同的 ``Tensor`` 。 + - **soft_label** (bool, 可选) - 指明是否将输入标签当作软标签。默认值:False。 + - **ignore_index** (int, 可选) - 指明要无视的目标值,使其不对输入梯度有贡献。仅在 ``soft_label`` 为False时有效,默认值:kIgnoreIndex(-100)。 + - **numeric_stable_mode** (bool, 可选) – 指明是否使用一个具有更佳数学稳定性的算法。仅在 ``soft_label`` 为 False的GPU模式下生效。若 ``soft_label`` 为 True或者执行设备为CPU,算法一直具有数学稳定性。注意使用稳定算法时速度可能会变慢。默认值:True。 + - **return_softmax** (bool, 可选) – 指明是否在返回交叉熵计算结果的同时返回softmax结果。默认值:False。 + - **axis** (int, 可选) – 执行softmax计算的维度索引。其范围为 :math:`[-1,rank-1]` ,其中 ``rank`` 是输入 ``logits`` 的秩。默认值:-1。 + +返回: + - 如果 ``return_softmax`` 为 False,则返回交叉熵损失结果的 ``Tensor`` ,数据类型和 ``logits`` 一致,除了 ``axis`` 维度上的形状为1,其余维度和 ``logits`` 一致。 + - 如果 ``return_softmax`` 为 True,则返回交叉熵损失结果的 ``Tensor`` 和softmax结果的 ``Tensor`` 组成的元组。其中交叉熵损失结果的数据类型和 ``logits`` 一致,除了 ``axis`` 维度上的形状为1,其余维度上交叉熵损失结果和 ``logits`` 一致;softmax结果的数据类型和 ``logits`` 一致,维度和 ``logits`` 一致。 + +返回类型:变量或者两个变量组成的元组 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.data(name='data', shape=[128], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + fc = fluid.layers.fc(input=data, size=100) + out = fluid.layers.softmax_with_cross_entropy(logits=fc, label=label) + + diff --git a/doc/paddle/api/paddle/fluid/layers/softplus_cn.rst b/doc/paddle/api/paddle/fluid/layers/softplus_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d6d2676aef893849376f9021ff838bc8a317061f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/softplus_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_fluid_layers_softplus: + +softplus +------------------------------- + +.. py:function:: paddle.fluid.layers.softplus(x,name=None) + + + + +softplus激活函数 + +.. math:: + out = \ln(1 + e^{x}) + +参数: + - **x** (Variable) - 张量(Tensor) + - **name** (str|None) - 该层名称(可选)。若设为None,则自动为该层命名。 + +返回: 张量(Tensor) + +返回类型: 变量(Variable) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.data(name="input", shape=[32, 784]) + result = fluid.layers.softplus(data) + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/softshrink_cn.rst b/doc/paddle/api/paddle/fluid/layers/softshrink_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..77cfc805f380055215851961736a117085797ebb --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/softshrink_cn.rst @@ -0,0 +1,46 @@ +.. _cn_api_fluid_layers_softshrink: + +softshrink +------------------------------- + +.. py:function:: paddle.fluid.layers.softshrink(x, alpha=None) + + + + +Softshrink激活函数 + +.. math:: + out = \begin{cases} + x - \alpha, \text{if } x > \alpha \\ + x + \alpha, \text{if } x < -\alpha \\ + 0, \text{otherwise} + \end{cases} + +参数: + - **x** (Variable0 - 张量(Tensor) + - **alpha** (float) - 上面公式中alpha的值 + +返回: 张量(Tensor) + +返回类型: 变量(Variable) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.data(name="input", shape=[32, 784]) + result = fluid.layers.softshrink(data) + + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/softsign_cn.rst b/doc/paddle/api/paddle/fluid/layers/softsign_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f200375f8d7cb7e7c9a0e5fea9cfd31265abc95f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/softsign_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_fluid_layers_softsign: + +softsign +------------------------------- + +.. py:function:: paddle.fluid.layers.softsign(x,name=None) + + + + + +softsign激活函数 + +.. math:: + out = \frac{x}{1 + |x|} + +参数: + - **x** (Variable) - 张量(Tensor) + - **name** (str|None) - 该层名称(可选)。若设为None,则自动为该层命名。 + +返回: 张量(Tensor) + +返回类型: 变量(Variable) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.data(name="input", shape=[32, 784]) + result = fluid.layers.softsign(data) + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/space_to_depth_cn.rst b/doc/paddle/api/paddle/fluid/layers/space_to_depth_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..174c657adf993b252b281073d8096deb7cc45333 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/space_to_depth_cn.rst @@ -0,0 +1,94 @@ +.. _cn_api_fluid_layers_space_to_depth: + +space_to_depth +------------------------------- + +.. py:function:: paddle.fluid.layers.space_to_depth(x, blocksize, name=None) + + + + +该OP对成块的空间数据进行重组,输出一个输入张量的拷贝,其高度和宽度维度上的值移至通道维度。 + +重组时,依据 ``blocksize`` 指明的数据块大小, 对形为 ``[batch, channel, height, width]`` 的输入张量进行space_to_depth(广度至深度)运算,生成形为 ``[batch, channel * blocksize * blocksize, height/blocksize, width/blocksize]`` 的输出: + + - 在各位置上,不重叠的,大小为 ``blocksize * blocksize`` 的块重组入深度depth + - 输入各个块中的Y, X坐标变为输出张量通道索引的高序部位 + - 输入 ``channel`` 需可以被 ``blocksize`` 的平方整除 + - 输入的高度和宽度需可以被 ``blocksize`` 整除 + +该OP适用于在卷积间重放缩激活函数,并保持所有的数据。 + +范例如下: + +:: + + 给定形状为[1, 1, 4, 4]的输入 x: + x.data = [[[[1, 2, 5, 6], + [3, 4, 7, 8], + [9, 10, 13, 14], + [11, 12, 15, 16]]]] + 设置 blocksize = 2 + + 得到形状为[1, 4, 2, 2]的输出 out: + out.data = [[[[1, 2], [3, 4]], + [[5, 6], [7, 8]], + [[9, 10], [11, 12]], + [[13, 14], [15, 16]]]] + + + + +参数: + - **x** (Variable) – 输入,形状为 ``[batch, channel, height, width]`` 的4维Tensor或LoD Tensor。数据类型支持int32,int64,float32或float64。 + - **blocksize** (int) – 在每个特征图上选择元素时采用的块大小,应该 >= 2 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:输出,形状为 ``[batch, channel * blocksize * blocksize, height/blocksize, width/blocksize]`` 的4维Tensor或LoD Tensor。数据类型与输入 ``x`` 一致。 + +返回类型:Variable + +抛出异常: + - ``TypeError`` - ``blocksize`` 必须是int64类型 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + data = fluid.data( + name='data', shape=[1, 4, 2, 2], dtype='float32') + space_to_depthed = fluid.layers.space_to_depth( + x=data, blocksize=2) + + exe = fluid.Executor(fluid.CPUPlace()) + data_np = np.arange(0,16).reshape((1,4,2,2)).astype('float32') + + print(data_np) + #array([[[[ 0., 1.], [ 2., 3.]], + # [[ 4., 5.], [ 6., 7.]], + # [[ 8., 9.], [10., 11.]], + # [[12., 13.], [14., 15.]]]], dtype=float32) + + out_main = exe.run(fluid.default_main_program(), + feed={'data': data_np}, + fetch_list=[space_to_depthed]) + + print(out_main) + #[array([[[[ 0.]], [[ 4.]], [[ 1.]], [[ 5.]], + # [[ 8.]], [[12.]], [[ 9.]], [[13.]], + # [[ 2.]], [[ 6.]], [[ 3.]], [[ 7.]], + # [[10.]], [[14.]], [[11.]], [[15.]]]], dtype=float32)] + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/spectral_norm_cn.rst b/doc/paddle/api/paddle/fluid/layers/spectral_norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e5d85d632500aaed7f124426450b5ccf2430863f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/spectral_norm_cn.rst @@ -0,0 +1,56 @@ +.. _cn_api_fluid_layers_spectral_norm: + +spectral_norm +------------------------------- + + +.. py:function:: paddle.fluid.layers.spectral_norm(weight, dim=0, power_iters=1, eps=1e-12, name=None) + + + + +**Spectral Normalization Layer** + +该OP用于计算了fc、conv1d、conv2d、conv3d层的权重参数的谱正则值,输入权重参数应分别为2-D, 3-D, 4-D, 5-D张量,输出张量与输入张量shape相同。谱特征值计算方式如下。 + +步骤1:生成形状为[H]的向量U,以及形状为[W]的向量V,其中H是输入权重张量的第 ``dim`` 个维度,W是剩余维度的乘积。 + +步骤2: ``power_iters`` 应该是一个正整数,用U和V迭代计算 ``power_iters`` 轮,迭代步骤如下。 + +.. math:: + + \mathbf{v} &:= \frac{\mathbf{W}^{T} \mathbf{u}}{\|\mathbf{W}^{T} \mathbf{u}\|_2}\\ + \mathbf{u} &:= \frac{\mathbf{W}^{T} \mathbf{v}}{\|\mathbf{W}^{T} \mathbf{v}\|_2} + +步骤3:计算 :math:`\sigma(\mathbf{W})` 并特征值值归一化。 + +.. math:: + \sigma(\mathbf{W}) &= \mathbf{u}^{T} \mathbf{W} \mathbf{v}\\ + \mathbf{W} &= \frac{\mathbf{W}}{\sigma(\mathbf{W})} + +可参考: `Spectral Normalization `_ + +参数: + - **weight** (Variable) - spectral_norm算子的输入权重张量,可以是2-D, 3-D, 4-D, 5-D Tensor,它是fc、conv1d、conv2d、conv3d层的权重,数据类型为float32或float64。 + - **dim** (int) - 将输入(weight)重塑为矩阵之前应排列到第一个的维度索引,如果input(weight)是fc层的权重,则应设置为0;如果input(weight)是conv层的权重,则应设置为1,默认为0。 + - **power_iters** (int) - 将用于计算spectral norm的功率迭代次数,默认值1 + - **eps** (float) - epsilon用于保证计算规范中的数值稳定性,分母会加上 ``eps`` 防止除零,默认1e-12 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值:None。 + +返回:Variable(Tensor) 谱正则化后权重张量,维度与输入 ``weight`` 一致。 + +返回类型:变量(Variable),数据类型与输入 ``weight`` 一致。 + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + + weight = fluid.data(name='weight', shape=[2, 8, 32, 32], dtype='float32') + x = fluid.layers.spectral_norm(weight=weight, dim=1, power_iters=2) + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/split_cn.rst b/doc/paddle/api/paddle/fluid/layers/split_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..260756ee823d1b3c341d7f1da5ecf631f692dff4 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/split_cn.rst @@ -0,0 +1,60 @@ +.. _cn_api_fluid_layers_split: + +split +------------------------------- + +.. py:function:: paddle.fluid.layers.split(input, num_or_sections, dim=-1, name=None) + + + + +该OP将输入Tensor分割成多个子Tensor。 + +参数: + - **input** (Tensor) - 输入变量,数据类型为bool, float16,float32,float64,int32,int64的多维Tensor。 + - **num_or_sections** (int|list|tuple) - 如果 ``num_or_sections`` 是一个整数,则表示Tensor平均划分为相同大小子Tensor的数量。如果 ``num_or_sections`` 是一个list或tuple,那么它的长度代表子Tensor的数量,它的元素可以是整数或者形状为[1]的Tensor,依次代表子Tensor需要分割成的维度的大小。list或tuple的长度不能超过输入Tensor待分割的维度的大小。至多有一个元素值为-1,-1表示该值是由 ``input`` 待分割的维度值和 ``num_or_sections`` 的剩余元素推断出来的。 + - **dim** (int|Tenspr,可选) - 整数或者形状为[1]的Tensor,数据类型为int32或int64。表示需要分割的维度。如果 ``dim < 0`` ,则划分的维度为 ``rank(input) + dim`` 。默认值为-1。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:分割后的Tensor列表。 + + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + + # input is a Tensor which shape is [3, 9, 5] + input = fluid.data( + name="input", shape=[3, 9, 5], dtype="float32") + + out0, out1, out2 = fluid.layers.split(input, num_or_sections=3, dim=1) + # out0.shape [3, 3, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 3, 5] + + out0, out1, out2 = fluid.layers.split(input, num_or_sections=[2, 3, 4], dim=1) + # out0.shape [3, 2, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 4, 5] + + out0, out1, out2 = fluid.layers.split(input, num_or_sections=[2, 3, -1], dim=1) + # out0.shape [3, 2, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 4, 5] + + # dim is negative, the real dim is (rank(input) + axis) which real + # value is 1. + out0, out1, out2 = fluid.layers.split(input, num_or_sections=3, dim=-2) + # out0.shape [3, 3, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 3, 5] + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/sqrt_cn.rst b/doc/paddle/api/paddle/fluid/layers/sqrt_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..779ab45fbf975d0cc9538cd7f2d68f0194e8bc64 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sqrt_cn.rst @@ -0,0 +1,55 @@ +.. _cn_api_fluid_layers_sqrt: + +sqrt +------------------------------- + +.. py:function:: paddle.fluid.layers.sqrt(x, name=None) + + + + +计算输入的算数平方根。 + +.. math:: out=\sqrt x=x^{1/2} + +.. note:: + 请确保输入中的数值是非负数。 + +参数: + + - **x** (Variable) - 支持任意维度的Tensor。数据类型为float32,float64或float16。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:返回类型为Variable(Tensor|LoDTensor), 数据类型同输入一致。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + + inputs = fluid.layers.data(name="x", shape = [3], dtype='float32') + output = fluid.layers.sqrt(inputs) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + img = np.array([0, 9, 36]).astype(np.float32) + + res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) + print(res) + # [array([0., 3., 6.], dtype=float32)] + + + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/square_cn.rst b/doc/paddle/api/paddle/fluid/layers/square_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c3687b4c7a4dd0766e7b73e2464eb0f0de125a6d --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/square_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_fluid_layers_square: + +square +------------------------------- + +.. py:function:: paddle.fluid.layers.square(x,name=None) + + + + +该OP执行逐元素取平方运算。 + +.. math:: + out = x^2 + +参数: + - **x** (Variable) - 任意维度的Tensor,支持的数据类型: float32,float64。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:返回取平方后的Tensor,维度和数据类型同输入一致。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.data(name="input", shape=[32, 784], dtype='float32') + result = fluid.layers.square(data) #result.shape=[32, 784], type=float32 + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/square_error_cost_cn.rst b/doc/paddle/api/paddle/fluid/layers/square_error_cost_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..be416cf465fd34f2335de4039a6e2f075c42d754 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/square_error_cost_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_fluid_layers_square_error_cost: + +square_error_cost +------------------------------- + +.. py:function:: paddle.fluid.layers.square_error_cost(input,label) + + + + +该OP用于计算预测值和目标值的方差估计。 + +对于预测值input和目标值label,公式为: + +.. math:: + + Out = (input-label)^{2} + +参数: + - **input** (Variable) - 预测值,维度为 :math:`[N_1, N_2, ..., N_k, D]` 的多维Tensor,其中最后一维D是类别数目。数据类型为float32或float64。 + - **label** (Variable) - 目标值,维度为 :math:`[N_1, N_2, ..., N_k, D]` 的多维Tensor,其中最后一维D是类别数目。数据类型为float32或float64。 + +返回:预测值和目标值的方差 + +返回类型:变量(Variable) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y_predict = fluid.layers.data(name='y_predict', shape=[1], dtype='float32') + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/squeeze_cn.rst b/doc/paddle/api/paddle/fluid/layers/squeeze_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..026f38455ff2e877cb13952e0c446427196ee83a --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/squeeze_cn.rst @@ -0,0 +1,66 @@ +.. _cn_api_fluid_layers_squeeze: + +squeeze +------------------------------- + +.. py:function:: paddle.fluid.layers.squeeze(input, axes, name=None) + + + + +该OP会根据axes压缩输入Tensor的维度。如果指定了axes,则会删除axes中指定的维度,axes指定的维度要等于1。如果没有指定axes,那么所有等于1的维度都会被删除。 + +- 例1: + +.. code-block:: python + + 输入: + X.shape = [1,3,1,5] + axes = [0] + 输出; + Out.shape = [3,1,5] +- 例2: + +.. code-block:: python + + 输入: + X.shape = [1,3,1,5] + axes = [] + 输出: + Out.shape = [3,5] +- 例3: + +.. code-block:: python + + 输入: + X.shape = [1,3,1,5] + axes = [-2] + 输出: + Out.shape = [1,3,5] + +参数: + - **input** (Variable) - 输入任意维度的Tensor。 支持的数据类型:float32,float64,int8,int32,int64。 + - **axes** (list) - 输入一个或一列整数,代表要压缩的轴。axes的范围: :math:`[-rank(input), rank(input))` 。 axes为负数时, :math:`axes=axes+rank(input)` 。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: 返回对维度进行压缩后的Tensor。数据类型与输入Tensor一致。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + x = layers.data(name='x', shape=[5, 1, 10]) + y = layers.squeeze(input=x, axes=[1]) #y.shape=[5, 10] + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/ssd_loss_cn.rst b/doc/paddle/api/paddle/fluid/layers/ssd_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1fff5eb8cfae0c4edfa01516114cc44e40081073 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/ssd_loss_cn.rst @@ -0,0 +1,98 @@ +.. _cn_api_fluid_layers_ssd_loss: + +ssd_loss +------------------------------- + +.. py:function:: paddle.fluid.layers.ssd_loss(location, confidence, gt_box, gt_label, prior_box, prior_box_var=None, background_label=0, overlap_threshold=0.5, neg_pos_ratio=3.0, neg_overlap=0.5, loc_loss_weight=1.0, conf_loss_weight=1.0, match_type='per_prediction', mining_type='max_negative', normalize=True, sample_size=None) + + + + +该OP用于SSD物体检测算法的多窗口损失层 + +该层用于计算SSD的损失,给定位置偏移预测,置信度预测,候选框和真实框标签,以及难样本挖掘的类型。通过执行以下步骤,返回的损失是本地化损失(或回归损失)和置信度损失(或分类损失)的加权和: + +1、通过二分匹配算法查找匹配的边界框。 + + 1.1、计算真实框与先验框之间的IOU相似度。 + + 1.2、通过二分匹配算法计算匹配的边界框。 + +2、计算难分样本的置信度 + + 2.1、根据匹配的索引获取目标标签。 + + 2.2、计算置信度损失。 + +3、应用难样本挖掘算法来获取负样本索引并更新匹配的索引。 + +4、分配分类和回归目标 + + 4.1、根据生成的候选框bbox进行编码。 + + 4.2、分配回归目标。 + + 4.3、分配分类目标。 + +5、计算总体的物体损失。 + + 5.1计算置信度(confidence)损失。 + + 5.1计算回归(location)损失。 + + 5.3计算总体加权损失。 + +参数: + - **location** (Variable)- 位置预测,具有形状[N,Np,4]的3D-Tensor,N是batch大小,Np是每个实例的预测总数。 4是坐标的维数,布局是[xmin,ymin,xmax,ymax],xmin,ymin代表box左上坐标,xmax,ymax代表box右下坐标,数据类型为float32或float64。 + - **confidence** (Variable) - 置信度(分类)预测,具有形状[N,Np,C]的3D-Tensor,N是batch大小,Np是每个实例的预测总数,C是类别数量,数据类型为float32或float64。 + - **gt_box** (Variable)- 真实框(bbox),具有形状[Ng,4]的2D LoDTensor,Ng是mini-batch输入的真实框(bbox)的总数,4是坐标的维数,布局是[xmin,ymin,xmax,ymax],xmin,ymin代表box左上坐标,xmax,ymax代表box右下坐标,数据类型为float32或float64。 + - **gt_label** (Variable)- ground-truth标签, 具有形状[Ng,1]的2D LoDTensor,Ng是mini-batch输入的真实框(bbox)的总数,1表示类别号,数据类型为float32或float64。 + - **prior_box** (Variable)- 检测网络生成的候选框, 具有形状[Np,4]的2D-Tensor,Np是生成的候选框总数,4是坐标的维数,布局是[xmin,ymin,xmax,ymax],xmin,ymin代表box左上坐标,xmax,ymax代表box右下坐标,数据类型为float32或float64。。 + - **prior_box_var** (Variable)- 候选框的方差, 具有形状[Np,4]的2D张量,形状及数据类型同 ``prior_box`` 。 + - **background_label** (int)- background标签的索引,默认为0。 + - **overlap_threshold** (float)- 额外匹配的bbox阈值,当找到匹配的框,如果 ``match_type`` 为'per_prediction',使用 ``overlap_threshold`` 确定额外匹配的bbox。默认为0.5。 + - **neg_pos_ratio** (float)- 负框与正框的比率,仅在 ``mining_type`` 为'max_negative'时使用,默认为3.0。 + - **neg_overlap** (float)- 不匹配预测的负重叠上限。仅当 ``mining_type`` 为'max_negative'时使用,默认为0.5。 + - **loc_loss_weight** (float)- 回归损失的权重,默认为1.0。 + - **conf_loss_weight** (float)- 置信度损失的权重,默认为1.0。 + - **match_type** (str)- 训练期间匹配方法的类型应为'bipartite'或'per_prediction',默认为'per_prediction'。 + - **mining_type** (str)- 难样本挖掘类型,分为'hard_example'或'max_negative',目前只支持'max_negative'。 + - **normalize** (bool)- 是否通过输出位置的总数将SSD损失标准化,默认为True。 + - **sample_size** (int)- 负样本框的最大样本大小,仅在 ``mining_type`` 为'hard_example'时使用。 + +返回: Variable(Tensor) 定位损失和置信度损失的加权和, 具有形状[N * Np,1], N是batch大小,Np是每个实例的预测总数,数据类型为float32或float64。 + +抛出异常: ``ValueError`` - 如果 ``mining_type`` 是'hard_example',目前只支持 ``max_negative`` 的挖掘类型。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + pb = fluid.layers.data( + name='prior_box', + shape=[10, 4], + append_batch_size=False, + dtype='float32') + pbv = fluid.layers.data( + name='prior_box_var', + shape=[10, 4], + append_batch_size=False, + dtype='float32') + loc = fluid.layers.data(name='target_box', shape=[10, 4], dtype='float32') + scores = fluid.layers.data(name='scores', shape=[10, 21], dtype='float32') + gt_box = fluid.layers.data( + name='gt_box', shape=[4], lod_level=1, dtype='float32') + gt_label = fluid.layers.data( + name='gt_label', shape=[1], lod_level=1, dtype='float32') + loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv) + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/stack_cn.rst b/doc/paddle/api/paddle/fluid/layers/stack_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b0c338f0515ab31308db52becf071d6fd2d46839 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/stack_cn.rst @@ -0,0 +1,77 @@ +.. _cn_api_fluid_layers_stack: + +stack +------------------------------- + +.. py:function:: paddle.fluid.layers.stack(x, axis=0) + + + + + +该OP沿 ``axis`` 轴对输入 ``x`` 进行堆叠操作。 + +- 例1: + +.. code-block:: text + + 输入: + x[0].shape = [1, 2] + x[0].data = [ [1.0 , 2.0 ] ] + x[1].shape = [1, 2] + x[1].data = [ [3.0 , 4.0 ] ] + x[2].shape = [1, 2] + x[2].data = [ [5.0 , 6.0 ] ] + + 参数: + axis = 0 #沿着第0维对输入x进行堆叠操作。 + + 输出: + Out.shape = [3, 1, 2] + Out.data = [ [ [1.0, 2.0] ], + [ [3.0, 4.0] ], + [ [5.0, 6.0] ] ] + + +- 例2: + +.. code-block:: text + + 输入: + x[0].shape = [1, 2] + x[0].data = [ [1.0 , 2.0 ] ] + x[1].shape = [1, 2] + x[1].data = [ [3.0 , 4.0 ] ] + x[2].shape = [1, 2] + x[2].data = [ [5.0 , 6.0 ] ] + + 参数: + axis = 1 or axis = -2 #沿着第1维对输入进行堆叠操作。 + + 输出: + Out.shape = [1, 3, 2] + Out.data = [ [ [1.0, 2.0] + [3.0, 4.0] + [5.0, 6.0] ] ] + +参数: + - **x** (list(Variable)|tuple(Variable)) – 输入 x 可以是单个Tensor,或是多个Tensor组成的列表。如果 x 是一个列表,那么这些Tensor的维度必须相同。 假设输入是N维Tensor :math:`[d_0,d_1,...,d_{n−1}]`,则输出变量的维度为N+1维 :math:`[d_0,d_1,...d_{axis-1},len(x),d_{axis}...,d_{n−1}]` 。支持的数据类型: float32,float64,int32,int64。 + - **axis** (int, 可选) – 指定对输入Tensor进行堆叠运算的轴,有效 ``axis`` 的范围是: :math:`[-(R+1), R+1)`,R是输入中第一个Tensor的rank。如果 ``axis`` < 0,则 :math:`axis=axis+rank(x[0])+1` 。axis默认值为0。 + +返回: 堆叠运算后的Tensor,数据类型与输入Tensor相同。输出维度等于 :math:`rank(x[0])+1` 维。 + +返回类型: Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + # set batch size=None + x1 = fluid.data(name='x1', shape=[None, 1, 2], dtype='int32') + x2 = fluid.data(name='x2', shape=[None, 1, 2], dtype='int32') + # stack Tensor list + data = layers.stack([x1,x2]) # stack according to axis 0, data.shape=[2, None, 1, 2] + + data = layers.stack([x1,x2], axis=1) # stack according to axis 1, data.shape=[None, 2, 1, 2] diff --git a/doc/paddle/api/paddle/fluid/layers/stanh_cn.rst b/doc/paddle/api/paddle/fluid/layers/stanh_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e7d1b211cdcd7d623a90584131875cd44ebe7ef6 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/stanh_cn.rst @@ -0,0 +1,48 @@ +.. _cn_api_fluid_layers_stanh: + +stanh +------------------------------- + +.. py:function:: paddle.fluid.layers.stanh(x, scale_a=0.67, scale_b=1.7159, name=None) + + + + +STanh 激活算子(STanh Activation Operator.) + +.. math:: + \\out=b*\frac{e^{a*x}-e^{-a*x}}{e^{a*x}+e^{-a*x}}\\ + +参数: + - **x** (Tensor|LoDTensor) - 数据类型为float32,float64。STanh operator的输入 + - **scale_a** (float) - 输入的a的缩放参数 + - **scale_b** (float) - b的缩放参数 + - **name** (str|None) - 这个层的名称(可选)。如果设置为None,该层将被自动命名 + +返回: 与输入shape相同的张量 + +返回类型: Variable(Tensor),数据类型为float32的Tensor。 + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + data = fluid.layers.data(name="input", shape=[-1, 3]) + result = fluid.layers.stanh(data,scale_a=0.67, scale_b=1.72) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + x = np.random.random(size=(3, 3)).astype('float32') + output= exe.run(feed={"input": x}, + fetch_list=[result]) + print(output) + """ + output: + [array([[0.626466 , 0.89842904, 0.7501062 ], + [0.25147712, 0.7484996 , 0.22902708], + [0.62705994, 0.23110689, 0.56902856]], dtype=float32)] + """ + + diff --git a/doc/paddle/api/paddle/fluid/layers/strided_slice_cn.rst b/doc/paddle/api/paddle/fluid/layers/strided_slice_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bc608cc55ce0f118348532277a20e2267f7d0a10 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/strided_slice_cn.rst @@ -0,0 +1,86 @@ +.. _cn_api_fluid_layers_strided_slice: + +strided_slice +------------------------------- +.. py:function:: paddle.fluid.layers.strided_slice(input, axes, starts, ends, strides) + + + +strided_slice算子。 + +该OP沿多个轴生成 ``input`` 的切片,与numpy类似: https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html。该OP使用 ``axes`` 、 ``starts`` 和 ``ends`` 属性来指定轴列表中每个轴的起点和终点位置,并使用此信息来对 ``input`` 切片。如果向 ``starts`` 或 ``ends`` 传递负值如 :math:`-i`,则表示该轴的反向第 :math:`i-1` 个位置(这里以0为初始位置), ``strides`` 表示切片的步长, ``strides`` 如果为负数,则按照反方向进行切片。如果传递给 ``starts`` 或 ``ends`` 的值大于n(维度中的元素数目),则表示n。当切片一个未知数量的维度时,建议传入 ``INT_MAX``。 ``axes`` 、 ``starts`` 和 ``ends`` 以及 ``strides`` 四个参数的元素数目必须相等。以下示例将解释切片如何工作: + +:: + + + 示例1: + 给定: + data=[[1,2,3,4],[5,6,7,8],] + axes=[0,1] + starts=[1,0] + ends=[2,3] + strides=[1,1] + + 则: + result=[[5,6,7],] + 示例2: + 给定: + data=[[1,2,3,4],[5,6,7,8],] + axes=[0,1] + starts=[1,3] + ends=[2,0] + strides=[1,-1] + + 则: + result=[[8,7,6],] + 示例3: + 给定: + data=[[1,2,3,4],[5,6,7,8],] + axes=[0,1] + starts=[0,1] + ends=[-1,1000] # 此处-1表示第0维的反向第0个位置,索引值是1。 + strides =[1,3] + 则: + result=[[2],] + + +参数: + + - **input** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor``,数据类型为 ``float32``,``float64``,``int32``,或 ``int64``。 + - **axes** (list|tuple)- 数据类型是 ``int32``。表示进行切片的轴。 + - **starts** (list|tuple|Variable)- 数据类型是 ``int32``。如果 ``starts`` 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 ``Tensor`` 或 ``LoDTensor``。如果 ``starts`` 的类型是 ``Variable``,则是1-D ``Tensor`` 或 ``LoDTensor``。表示在各个轴上切片的起始索引值。 + - **ends** (list|tuple|Variable)- 数据类型是 ``int32``。如果 ``ends`` 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 ``Tensor`` 或 ``LoDTensor``。如果 ``ends`` 的类型是 ``Variable``,则是1-D ``Tensor`` 或 ``LoDTensor``。表示在各个轴上切片的结束索引值。 + - **strides** (list|tuple|Variable)- 数据类型是 ``int32``。如果 ``strides`` 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 ``Tensor`` 或 ``LoDTensor``。如果 ``strides`` 的类型是 ``Variable``,则是1-D ``Tensor`` 或 ``LoDTensor``。表示在各个轴上切片的步长。 + + +返回:多维 ``Tensor`` 或 ``LoDTensor``,数据类型与 ``input`` 相同。 + + +返回类型:Variable。 + +抛出异常: + - :code:`TypeError`:``starts`` 的类型应该是 list、tuple 或 Variable。 + - :code:`TypeError`:``ends`` 的类型应该是 list、tuple 或 Variable。 + - :code:`TypeError`:``strides`` 的类型应该是 list、tuple 或 Variable。 + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + input = fluid.layers.data( + name="input", shape=[3, 4, 5, 6], dtype='float32', append_batch_size=False) + # example 1: + # attr starts is a list which doesn't contain tensor Variable. + axes = [1, 2, 3] + starts = [-3, 0, 2] + ends = [3, 2, 4] + strides_1 = [1, 1, 1] + strides_2 = [1, 1, 2] + sliced_1 = fluid.layers.strided_slice(input, axes=axes, starts=starts, ends=ends, strides=strides_1) + # sliced_1 is input[:, 0:3:1, 0:2:1, 2:4:1]. + # example 2: + # attr starts is a list which contain tensor Variable. + minus_3 = fluid.layers.fill_constant([1], "int32", -3) + sliced_2 = fluid.layers.strided_slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends, strides=strides_2) + # sliced_2 is input[:, 0:3:1, 0:2:1, 2:4:2]. diff --git a/doc/paddle/api/paddle/fluid/layers/sum_cn.rst b/doc/paddle/api/paddle/fluid/layers/sum_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..0dc93e01cf6a6f44293843654f20a6214009e559 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sum_cn.rst @@ -0,0 +1,81 @@ +.. _cn_api_fluid_layers_sum: + +sum +------------------------------- + +.. py:function:: paddle.fluid.layers.sum(x) + + + + +该OP用于对输入的一至多个Tensor或LoDTensor求和。如果输入的是LoDTensor,输出仅与第一个输入共享LoD信息(序列信息)。 + +例1: +:: + 输入: + input.shape = [2, 3] + input = [[1, 2, 3], + [4, 5, 6]] + + 输出: + output.shape = [2, 3] + output = [[1, 2, 3], + [4, 5, 6]] + +例2: +:: + 输入: + 第一个输入: + input1.shape = [2, 3] + input1 = [[1, 2, 3], + [4, 5, 6]] + + 第二个输入: + input2.shape = [2, 3] + input2 = [[7, 8, 9], + [10, 11, 12]] + + 输出: + output.shape = [2, 3] + output = [[8, 10, 12], + [14, 16, 18]] + +参数: + **x** (Variable|list(Variable)) - 输入的一至多个Variable。如果输入了多个Variable,则不同Variable间的shape和数据类型应保持一致。Variable为多维Tensor或LoDTensor,数据类型支持:float32,float64,int32,int64 + +返回:对输入 ``x`` 中的Variable求和后的结果,shape和数据类型与 ``x`` 一致 + +返回类型:Variable + + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + input0 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=5) + input1 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=3) + sum = fluid.layers.sum([input0, input1]) + + #用户可以通过executor打印出求和的结果 + out = fluid.layers.Print(sum, message="the sum of input0 and input1: ") + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_main_program()) + + #打印出的数据为: + 1570701754 the sum of input0 and input1: The place is:CPUPlace + Tensor[sum_0.tmp_0] + shape: [2,3,] + dtype: l + data: 8,8,8,8,8,8, + + #输出了shape为[2,3]的Tensor,与输入的shape一致 + #dtype为对应C++数据类型,在不同环境下可能显示值不同,但本质相同 + #例如:如果Tensor中数据类型是int64,则对应的C++数据类型为int64_t,所以dtype值为typeid(int64_t).name(), + # 其在MacOS下为'x',linux下为'l',Windows下为'__int64',都表示64位整型变量 + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/sums_cn.rst b/doc/paddle/api/paddle/fluid/layers/sums_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7385626c3dcaf0e05d46a37438132bf1b5c4a26f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/sums_cn.rst @@ -0,0 +1,57 @@ +.. _cn_api_fluid_layers_sums: + +sums +------------------------------- + +.. py:function:: paddle.fluid.layers.sums(input,out=None) + + + + +该OP计算多个输入Tensor逐个元素相加的和。 + +- 示例:3个Tensor求和 + +.. code-block:: python + + 输入: + x0.shape = [2, 3] + x0.data = [[1., 2., 3.], + [4., 5., 6.]] + x1.shape = [2, 3] + x1.data = [[10., 20., 30.], + [40., 50., 60.]] + x2.shape = [2, 3] + x2.data = [[100., 200., 300.], + [400., 500., 600.]] + + 输出: + out.shape = [2, 3] + out.data = [[111., 222., 333.], + [444., 555., 666.]] + + +参数: + - **input** (list) - 多个维度相同的Tensor组成的元组。支持的数据类型:float32,float64,int32,int64。 + - **out** (Variable,可选) - 指定求和的结果Tensor,可以是程序中已经创建的任何Variable。默认值为None,此时将创建新的Variable来保存输出结果。 + +返回:输入的和,数据类型和维度与输入Tensor相同。若 ``out`` 为 ``None`` ,返回值是一个新的Variable;否则,返回值就是 ``out`` 。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + + x0 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=1) + x1 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=2) + x2 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=3) + x3 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=0) + + # 多个Tensor求和,结果保存在一个新建的Variable sum0,即sum0=x0+x1+x2,值为[[6, ..., 6], ..., [6, ..., 6]] + sum0 = fluid.layers.sums(input=[x0, x1, x2]) + + # 多个Tensor求和,sum1和x3是同一个Variable,相当于x3=x0+x1+x2,值为[[6, ..., 6], ..., [6, ..., 6]] + sum1 = fluid.layers.sums(input=[x0, x1, x2], out=x3) diff --git a/doc/paddle/api/paddle/fluid/layers/swish_cn.rst b/doc/paddle/api/paddle/fluid/layers/swish_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..392858c851f6985e037867db9cdfaea65d0d7afb --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/swish_cn.rst @@ -0,0 +1,75 @@ +.. _cn_api_fluid_layers_swish: + +swish +------------------------------- + +.. py:function:: paddle.fluid.layers.swish(x, beta=1.0, name=None) + + + + +逐元素计算 Swish 激活函数,参考 `Searching for Activation Functions `_ 。 + +.. math:: + out = \frac{x}{1 + e^{- beta * x}} + +参数: + - **x** (Variable) - 多维 Tensor 或 LoDTensor,数据类型为 float32,float64。 + - **beta** (float) - Swish operator 的常量 beta,默认值为 1.0。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: + - Swish op 的结果,多维 Tensor 或 LoDTensor。数据类型为 float32 或 float64,数据类型以及形状和输入 x 一致。 + +返回类型: + - Variable + + +**代码示例:** + +.. code-block:: python + + # 静态图使用 + import numpy as np + from paddle import fluid + + x = fluid.data(name="x", shape=(-1, 3), dtype="float32") + y = fluid.layers.swish(x, beta=2.0) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + start = fluid.default_startup_program() + main = fluid.default_main_program() + + data = np.random.randn(2, 3).astype("float32") + exe.run(start) + y_np, = exe.run(main, feed={"x": data}, fetch_list=[y]) + + data + # array([[-1.1239197 , 1.3391294 , 0.03921051], + # [ 1.1970421 , 0.02440812, 1.2055548 ]], dtype=float32) + y_np + # array([[-0.2756806 , 1.0610548 , 0.01998957], + # [ 0.9193261 , 0.01235299, 0.9276883 ]], dtype=float32) + +.. code-block:: python + + # 动态图使用 + import numpy as np + from paddle import fluid + import paddle.fluid.dygraph as dg + + data = np.random.randn(2, 3).astype("float32") + place = fluid.CPUPlace() + with dg.guard(place) as g: + x = dg.to_variable(data) + y = fluid.layers.swish(x) + y_np = y.numpy() + data + # array([[-0.0816701 , 1.1603649 , -0.88325626], + # [ 0.7522361 , 1.0978601 , 0.12987892]], dtype=float32) + y_np + # array([[-0.03916847, 0.8835007 , -0.25835553], + # [ 0.51126915, 0.82324016, 0.06915068]], dtype=float32) + + diff --git a/doc/paddle/api/paddle/fluid/layers/switch_case_cn.rst b/doc/paddle/api/paddle/fluid/layers/switch_case_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c6999157f428298be2ab570976fc16d32aebcd9e --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/switch_case_cn.rst @@ -0,0 +1,79 @@ +.. _cn_api_fluid_layers_switch_case: + +switch_case +------------------------------- + + +.. py:function:: paddle.fluid.layers.switch_case(branch_index, branch_fns, default=None, name=None) + + + + +该OP的运行方式类似于c++的switch/case。 + +参数: + - **branch_index** (Variable)- 形状为[1]的Tensor,指定将要执行的分支。数据类型是 ``int32``, ``int64`` 或 ``uint8``。 + - **branch_fns** (dict|list|tuple) - 如果 ``branch_fns`` 是一个list或tuple,它的元素可以是 (int, callable) 二元组,即由整数和可调用对象构成的二元组,整数表示对应的可调用对象的键;也可以仅仅是可调用对象,它在list或者tuple中的实际索引值将作为该可调用对象的键。如果 ``branch_fns`` 是一个字典,那么它的键是整数,它的值是可调用对象。所有的可调用对象都返回相同结构的Tensor。 + - **default** (callable,可选) - 可调用对象,返回一个或多个张量。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值:None。 + +返回:如果 ``branch_fns`` 中存在与 ``branch_index`` 匹配的可调用对象,则返回该可调用对象的返回结果;如果 ``branch_fns`` 中不存在与 ``branch_index`` 匹配的可调用对象且 ``default`` 不是None,则返回调用 ``default`` 的返回结果; +如果 ``branch_fns`` 中不存在与 ``branch_index`` 匹配的可调用对象且 ``default`` 是None,则返回 ``branch_fns`` 中键值最大的可调用对象的返回结果。 + +返回类型:Variable|list(Variable) + +抛出异常: + - ``TypeError`` - 如果 ``branch_index`` 的类型不是list或tuple。 + - ``TypeError`` - 如果 ``branch_index`` 的数据类型不是 ``int32``, ``int64`` 或 ``uint8``。 + - ``TypeError`` - 如果 ``branch_fns`` 的类型不是dict,list或tuple。 + - ``TypeError`` - 如果 ``branch_fns`` 的元素不是2-tuple。 + - ``TypeError`` - 如果 ``branch_fns`` 中的2-tuple的第一个元素的类型不是整数。 + - ``ValueError`` - 如果 ``branch_fns`` 中的2-tuple的第一个元素值不唯一。 + - ``TypeError`` - 如果 ``branch_fns`` 中的2-tuple的第二个元素不是可调用对象。 + - ``TypeError`` - 当 ``default`` 不是None又不是可调用对象时。 + + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + + def fn_1(): + return layers.fill_constant(shape=[1, 2], dtype='float32', value=1) + + def fn_2(): + return layers.fill_constant(shape=[2, 2], dtype='int32', value=2) + + def fn_3(): + return layers.fill_constant(shape=[3], dtype='int32', value=3) + + main_program = fluid.default_startup_program() + startup_program = fluid.default_main_program() + with fluid.program_guard(main_program, startup_program): + index_1 = layers.fill_constant(shape=[1], dtype='int32', value=1) + index_2 = layers.fill_constant(shape=[1], dtype='int32', value=2) + + out_1 = layers.switch_case( + branch_index=index_1, + branch_fns={1: fn_1, 2: fn_2}, + default=fn_3) + + out_2 = layers.switch_case( + branch_index=index_2, + branch_fns=[(1, fn_1), (2, fn_2)], + default=fn_3) + + # Argument default is None and no index matches. fn_3 will be called because of the max index 7. + out_3 = layers.switch_case( + branch_index=index_2, + branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)]) + + exe = fluid.Executor(fluid.CPUPlace()) + res_1, res_2, res_3 = exe.run(main_program, fetch_list=[out_1, out_2, out_3]) + print(res_1) # [[1. 1.]] + print(res_2) # [[2 2] [2 2]] + print(res_3) # [3 3 3] + + diff --git a/doc/paddle/api/paddle/fluid/layers/tanh_cn.rst b/doc/paddle/api/paddle/fluid/layers/tanh_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c5fbd21242439402ebda6b4fcd5e6aedec699aa7 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/tanh_cn.rst @@ -0,0 +1,46 @@ +.. _cn_api_fluid_layers_tanh: + +tanh +------------------------------- + +.. py:function:: paddle.fluid.layers.tanh(x, name=None) + + + + + +tanh 激活函数 + +.. math:: + out = \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}} + + +参数: + + - **x** - Tanh算子的输入 + - **name** (str|None) - 该层名称(可选)。若设为None,则自动为该层命名。 + +返回: 张量(Tensor) + +返回类型: 变量(Variable) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.data(name="input", shape=[32, 784]) + result = fluid.layers.tanh(data) + + + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/tanh_shrink_cn.rst b/doc/paddle/api/paddle/fluid/layers/tanh_shrink_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4bdd4120aa88595f1a8344b77fb5ced845e706e8 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/tanh_shrink_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_fluid_layers_tanh_shrink: + +tanh_shrink +------------------------------- + +.. py:function:: paddle.fluid.layers.tanh_shrink(x, name=None) + + + + +tanh_shrink激活函数 + +.. math:: + out = x - \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}} + +参数: + + - **x** - TanhShrink算子的输入 + - **name** (str|None) - 该层名称(可选)。若设为None,则自动为该层命名。 + +返回: 张量(Tensor) + +返回类型: 变量(Variable) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.data(name="input", shape=[32, 784]) + result = fluid.layers.tanh_shrink(data) + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/target_assign_cn.rst b/doc/paddle/api/paddle/fluid/layers/target_assign_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7a8b47b33bc5aac3905c0426f49c17391c6925f9 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/target_assign_cn.rst @@ -0,0 +1,71 @@ +.. _cn_api_fluid_layers_target_assign: + +target_assign +------------------------------- + +.. py:function:: paddle.fluid.layers.target_assign(input, matched_indices, negative_indices=None, mismatch_value=None, name=None) + + + + +对于每个实例,根据 ``match_indices`` 和 ``negative_indices`` 位置索引, 给输入 ``out`` 和 ``out_weight`` 赋值。输入 ``input`` 和 ``negative_indices`` 均为2-D LoDTensor。假如 ``input`` 中每个实例的行偏移称作lod,该操作计算步骤如下: + +1.根据match_indices赋值: + +.. code-block:: text + + If id = match_indices[i][j] > 0, + + out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K] + out_weight[i][j] = 1. + + Otherwise, + + out[j][j][0 : K] = {mismatch_value, mismatch_value, ...} + out_weight[i][j] = 0. + +2.如果提供neg_indices,则再次依据该输入赋值: + +neg_indices中的第i个实例的索引称作neg_indice,则对于第i个实例: + +.. code-block:: text + + for id in neg_indice: + out[i][id][0 : K] = {mismatch_value, mismatch_value, ...} + out_weight[i][id] = 1.0 + +参数: + - **input** (Variable) - 输入为3-D LoDTensor,为了方便在上述文档中解释,假如维度是[M,P,K]。 + - **matched_indices** (Variable) - 输入为2-D Tensor,数据类型为int32,表示在输入中匹配位置,具体计算如上,同样,为了方便解释,假如维度大小为[N,P],如果 ``matched_indices[i][j]`` 为-1,表示在第 ``i`` 个实例中第j列项没有任何匹配项,输出会设置成 ``mismatch_value`` 。 + - **negative_indices** (Variable,可选) - 维度为2-D LoDTensor,数据类型为int32。可以不设置,如果设置,会依据该位置索引再次给输出赋值,具体参考上述文档。 + - **mismatch_value** (float32,可选) - 未匹配的位置填充值。 + - **name** (str) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:返回一个元组(out,out_weight)。out是三维张量,维度为[N,P,K],N和P与 ``matched_indices`` 中的N和P一致,K和输入X中的K一致。 ``out_weight`` 的维度为[N,P,1]。 + +返回类型:tuple(Variable) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.data( + name='x', + shape=[4, 20, 4], + dtype='float', + lod_level=1) + matched_id = fluid.data( + name='indices', + shape=[8, 20], + dtype='int32') + trg, trg_weight = fluid.layers.target_assign( + x, + matched_id, + mismatch_value=0) + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/teacher_student_sigmoid_loss_cn.rst b/doc/paddle/api/paddle/fluid/layers/teacher_student_sigmoid_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..562c1ee511077dc98985d198c824db9176c78754 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/teacher_student_sigmoid_loss_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_fluid_layers_teacher_student_sigmoid_loss: + +teacher_student_sigmoid_loss +----------------------------------- + +.. py:function:: paddle.fluid.layers.teacher_student_sigmoid_loss(input, label, soft_max_up_bound=15.0, soft_max_lower_bound=-15.0) + + + + +**Teacher Student Log Loss Layer(教师--学生对数损失层)** + +定制化需求,用于student萃取teacher的值。此图层接受输入预测和目标标签,并返回teacher_student损失。 +z表示是否点击,z'表示teacher q值。label取值范围{-2,-1,[0, 2]} +teacher q值不存在时,点击时label为-1,否则为-2。 +teacher q值存在时,点击时label为z',否则为1 + z'。 + +.. math:: + + loss = max(x, 0) - x * z + log(1 + exp(-abs(x))) + max(x, 0) - x * z' + log(1 + exp(-abs(x))) + +其中: + - :math:`x` :预测输入值。 + - :math:`z` :是否点击。 + - :math:`z'` :teacher q值。 + + +参数: + - **input** (Variable) – 形状为[N x 1]的2-d Tensor,其中N是批大小batch size。 该输入是由前一个运算计算而得的概率,数据类型为float32或者float64。 + - **label** (Variable) – 具有形状[N x 1]的2-d Tensor的真实值,其中N是批大小batch_size,数据类型为float32或者float64。 + - **soft_max_up_bound** (float) – 若input > soft_max_up_bound, 输入会被向下限制。默认为15.0 。 + - **soft_max_lower_bound** (float) – 若input < soft_max_lower_bound, 输入将会被向上限制。默认为-15.0 。 + +返回:具有形状[N x 1]的2-D Tensor,teacher_student_sigmoid_loss。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + + batch_size = 64 + label = fluid.layers.data( + name="label", shape=[batch_size, 1], dtype="int64", append_batch_size=False) + similarity = fluid.layers.data( + name="similarity", shape=[batch_size, 1], dtype="float32", append_batch_size=False) + cost = fluid.layers.teacher_student_sigmoid_loss(input=similarity, label=label) + + diff --git a/doc/paddle/api/paddle/fluid/layers/temporal_shift_cn.rst b/doc/paddle/api/paddle/fluid/layers/temporal_shift_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6f1b285069390ed96c616a2ecd5a2e8b31d5328f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/temporal_shift_cn.rst @@ -0,0 +1,56 @@ +.. _cn_api_fluid_layers_temporal_shift: + +temporal_shift +------------------------------- +.. py:function:: paddle.fluid.layers.temporal_shift(x, seg_num, shift_ratio=0.25, name=None) + + + + +该OP用于对输入X做时序通道T上的位移操作,为TSM(Temporal Shift Module)中使用的操作。 + +输入(X)的形状应为[N*T, C, H, W],N是批大小,T是 ``seg_num`` 指定的时间段号,C是通道号,H和W是特征的高度和宽度。 + +时间偏移计算如下: + +步骤1:将输入(X)reshape为[N, T, C, H, W]。 + +步骤2:填充0到第二个(T)尺寸的变形结果,填充宽度每边为1,填充结果的形状为[N,T+2,C,H,W]。 + +步骤3:假设 ``shift_ratio`` 为1/4,切片填充结果如下: + +.. math:: + + slice1 &= x[:, :T, :C/4, :, :] + + slice2 &= x[:, 2:T+2, C/4:C/2, :, :] + + slice3 &= x[:, 1:T+1, C/2:, :, :] + +步骤4:沿第3(C)维连接三个切片,并将结果重塑为[N*T, C, H, W]。 + +有关时序移动的详细信息,请参阅文件: `Temporal Shift Module `_ + +参数: + - **x** (Variable) – 时移算符的输入张量。维度为 :math:`[N*T,C,H,W]` 的4-D Tensor。N为批量大小,T为时间段数,C为信道数,H为特征高度,W为特征宽度,数据类型为float32或float64。 + - **seg_num** (int) – 时间段编号,这应该是一个正整数。 + - **shift_ratio** (float) – 通道的移位比、通道的第一个 ``shift_ratio`` 部分沿时间维度移动-1,通道的第二个 ``shift_ratio`` 部分沿时间维度移动1,范围须在[0, 0.5]内。默认值0.25 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值:None。 + +返回:Variable(Tensor) 时序位移后的输出张量,维度与输入 ``x`` 一致。 + +返回类型:变量(Variable),数据类型与输入 ``x`` 一致。 + +抛出异常: ``TypeError`` – seg_num 必须是int类型 + + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + input = fluid.data(name='input', shape=[None,4,2,2], dtype='float32') + out = fluid.layers.temporal_shift(x=input, seg_num=2, shift_ratio=0.2) + + + diff --git a/doc/paddle/api/paddle/fluid/layers/tensor/create_global_var_cn.rst b/doc/paddle/api/paddle/fluid/layers/tensor/create_global_var_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..97034b6dc1c6bc60e8be576cb2332deb1f1d77f7 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/tensor/create_global_var_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_fluid_layers_create_global_var: + +create_global_var +------------------------------- + +.. py:function:: paddle.fluid.layers.create_global_var(shape,value,dtype,persistable=False,force_cpu=False,name=None) + + + + +该OP在全局块中创建一个新的Tensor,Tensor的值为 ``value`` 。 + +参数: + - **shape** (list[int])- 指定输出Tensor的形状,它可以是一个整数列表。 + - **value** (float)- 变量的值,填充新创建的变量。 + - **dtype** (str|numpy.dtype,可选)– 初始化数据类型。 + - **persistable** (bool,可选)- 是否为永久变量,默认:False。 + - **force_cpu** (bool,可选)- 是否将该变量压入CPU,默认值为 False。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:创建的Tensor变量 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + var = layers.create_global_var(shape=[2,3], value=1.0, dtype='float32', + persistable=True, force_cpu=True, name='new_var') + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/tensor/create_parameter_cn.rst b/doc/paddle/api/paddle/fluid/layers/tensor/create_parameter_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3206e5292d3bf4f0150fb8c9ce96243e4d779c6e --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/tensor/create_parameter_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_fluid_layers_create_parameter: + +create_parameter +------------------------------- + + +.. py:function:: paddle.fluid.layers.create_parameter(shape,dtype,name=None,attr=None,is_bias=False,default_initializer=None) + + + + +该OP创建一个参数。该参数是一个可学习的变量, 拥有梯度并且可优化。 + +**注意:这是一个低级别的API。如果您希望自己创建新的op,这个API将非常有用,无需使用layers。** + +参数: + - **shape** (list[int]) - 指定输出Tensor的形状,它可以是一个整数列表。 + - **dtype** (str|numpy.dtype) – 初始化数据类型。可设置的字符串值有:"float16","float32","float64"。 + - **name** (str,可选) - 参数的名称。具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **attr** (ParamAttr,可选) - 指定参数的属性对象。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。默认值为None,表示将采用 ParamAttr 的默认方式初始化。 + - **is_bias** (bool,可选) - 当default_initializer为空,该值会对选择哪个默认初始化程序产生影响。如果is_bias为真,则使用initializer.Constant(0.0),否则使用Xavier(),默认值False。 + - **default_initializer** (Initializer,可选) - 参数的初始化程序,默认值为空。 + +返回:创建的Tensor变量。 + +返回类型:Variable。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + W = layers.create_parameter(shape=[784, 200], dtype='float32') + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/tensor_array_to_tensor_cn.rst b/doc/paddle/api/paddle/fluid/layers/tensor_array_to_tensor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..777b9a572d2177368b3d2b0f9b091ac426a4d9b6 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/tensor_array_to_tensor_cn.rst @@ -0,0 +1,83 @@ +.. _cn_api_fluid_layers_tensor_array_to_tensor: + +tensor_array_to_tensor +------------------------------- + +.. py:function:: paddle.fluid.layers.tensor_array_to_tensor(input, axis=1, name=None, use_stack=False) + + + + +该OP将 ``input`` 这个LoDTensorArray中的所有Tensor沿 ``axis`` 指定的轴进行拼接(concat)或堆叠(stack)。 + +示例: + +:: + + - 案例 1: + + 给定: + + input.data = {[[0.6, 0.1, 0.3], + [0.5, 0.3, 0.2]], + [[1.3], + [1.8]], + [[2.3, 2.1], + [2.5, 2.4]]} + + axis = 1, use_stack = False + + 结果: + + output.data = [[0.6, 0.1, 0.3, 1.3, 2.3, 2.1], + [0.5, 0.3, 0.2, 1.8, 2.5, 2.4]] + + output_index.data = [3, 1, 2] + + - 案例 2: + + 给定: + + input.data = {[[0.6, 0.1], + [0.5, 0.3]], + [[0.3, 1.3], + [0.2, 1.8]], + [[2.3, 2.1], + [2.5, 2.4]]} + + axis = 1, use_stack = False + + 结果: + + output.data = [[[0.6, 0.1] + [0.3, 1.3] + [2.3, 2.1], + [[0.5, 0.3] + [0.2, 1.8] + [2.5, 2.4]]] + + output_index.data = [2, 2, 2] + +参数: + - **input** (Variable) - 输入的LoDTensorArray。支持的数据类型为:float32、float64、int32、int64。 + - **axis** (int,可选) - 指定对输入Tensor进行运算的轴, ``axis`` 的有效范围是[-R, R),R是输入 ``input`` 中Tensor的Rank,``axis`` 为负时与 ``axis`` +R 等价。默认值为1。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **use_stack** (bool,可选) – 指明使用stack或concat进行运算,若为stack模式,要求LoDTensorArray中的所有Tensor具有相同的形状。默认值为False。 + +返回:Variable的二元组, 包含了两个Tensor。第一个Tensor表示对数组内的元素进行stack或concat的输出结果,数据类型与数组中的Tensor相同;第二个Tensor包含了数组中各Tensor在 `axis` 维度的大小,数据类型为int32。 + +返回类型: tuple + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + x0 = fluid.layers.assign(np.random.rand(2, 2).astype("float32")) + x1 = fluid.layers.assign(np.random.rand(2, 2).astype("float32")) + i = fluid.layers.fill_constant(shape=[1], dtype="int64", value=0) + array = fluid.layers.create_array(dtype='float32') + fluid.layers.array_write(x0, i, array) + fluid.layers.array_write(x1, i + 1, array) + output, output_index = fluid.layers.tensor_array_to_tensor(input=array) diff --git a/doc/paddle/api/paddle/fluid/layers/thresholded_relu_cn.rst b/doc/paddle/api/paddle/fluid/layers/thresholded_relu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..79ad0a40814e6cc1140355405f364996641a2c95 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/thresholded_relu_cn.rst @@ -0,0 +1,76 @@ +.. _cn_api_fluid_layers_thresholded_relu: + +thresholded_relu +------------------------------- + +.. py:function:: paddle.fluid.layers.thresholded_relu(x,threshold=None) + + + + +逐元素计算 ThresholdedRelu激活函数。 + +.. math:: + + out = \left\{\begin{matrix} + x, &if x > threshold\\ + 0, &otherwise + \end{matrix}\right. + +参数: + - **x** (Variable) -ThresholdedRelu Op 的输入,多维 Tensor 或 LoDTensor,数据类型为 float32,float64。 + - **threshold** (float,可选)-激活函数的 threshold 值,如 threshold 值为 None,则其值为 1.0。 + +返回: + - 多维 Tensor 或 LoDTensor, 数据类型为 float32 或 float64, 和输入 x 的数据类型相同,形状和输入 x 相同。 + +返回类型: + - Variable + +**代码示例**: + +.. code-block:: python + + # 静态图使用 + import numpy as np + from paddle import fluid + + x = fluid.data(name="x", shape=(-1, 3), dtype="float32") + y = fluid.layers.thresholded_relu(x, threshold=0.1) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + + start = fluid.default_startup_program() + main = fluid.default_main_program() + data = np.random.randn(2, 3).astype("float32") + exe.run(start) + y_np, = exe.run(main, feed={"x": data}, fetch_list=[y]) + + data + # array([[ 0.21134382, -1.1805999 , 0.32876605], + # [-1.2210793 , -0.7365624 , 1.0013918 ]], dtype=float32) + y_np + # array([[ 0.21134382, -0. , 0.32876605], + # [-0. , -0. , 1.0013918 ]], dtype=float32) + + +.. code-block:: python + + # 动态图使用 + import numpy as np + from paddle import fluid + import paddle.fluid.dygraph as dg + + data = np.random.randn(2, 3).astype("float32") + place = fluid.CPUPlace() + with dg.guard(place) as g: + x = dg.to_variable(data) + y = fluid.layers.thresholded_relu(x, threshold=0.1) + y_np = y.numpy() + data + # array([[ 0.21134382, -1.1805999 , 0.32876605], + # [-1.2210793 , -0.7365624 , 1.0013918 ]], dtype=float32) + y_np + # array([[ 0.21134382, -0. , 0.32876605], + # [-0. , -0. , 1.0013918 ]], dtype=float32) diff --git a/doc/paddle/api/paddle/fluid/layers/topk_cn.rst b/doc/paddle/api/paddle/fluid/layers/topk_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0411b2cba8f833863991c21e8d7cf15d4bb49fb1 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/topk_cn.rst @@ -0,0 +1,75 @@ +.. _cn_api_fluid_layers_topk: + +topk +------------------------------- +.. py:function:: paddle.fluid.layers.topk(input, k, name=None) + + + + +此OP用于查找输入Tensor的最后一维的前k个最大项,返回它们的值和索引。 +如果输入是1-D Tensor,则找到Tensor的前k个最大项,并输出前k个最大项的值和索引。如果输入是更高阶的Tensor,则该OP会基于最后一维计算前k项。 + +- 例1: + +.. code-block:: python + + 输入: + input.shape = [3, 4] + input.data = [[5, 4, 2, 3], + [9, 7, 10, 25], + [6, 2, 10, 1]] + k = 2 + + 输出: + 第一个输出: + values.shape = [3, 2] + values.data = [[5, 4], + [10, 25], + [6, 10]] + + 第二个输出: + indices.shape = [3, 2] + indices.data = [[0, 1], + [2, 3], + [0, 2]] + + +参数: + - **input** (Variable) - 输入的Tensor,支持的数据类型: float32,float64。 + - **k** (int|Variable) - 指定在输入Tensor最后一维中寻找最大前多少项。 + - **name** (str, 可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: + - ``values``: 输入Tensor最后维切片的最大前k项。数据类型同输入Tensor一致。Tensor维度等于 :math:`input.shape[:-1]+ [k]` 。 + + - ``indices``: 输入Tensor最后维切片最大前k项值的索引,数据类型为int64,维度同values的维度。 + +抛出异常: + - ``ValueError`` : 如果k<1或者k大于输入的最后维。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + input = layers.data(name="input", shape=[13, 11], dtype='float32') + top5_values, top5_indices = layers.topk(input, k=5) #top5_values.shape=[13, 5],top5_indices.shape=[13, 5] + + # 1D Tensor + input1 = layers.data(name="input1", shape=[13], dtype='float32') + top5_values, top5_indices = layers.topk(input1, k=5) #top5_values.shape=[5],top5_indices.shape=[5] + + # k=Variable + input2 = layers.data(name="input2", shape=[13, 11], dtype='float32') + vk = layers.data(name="vk", shape=[1], dtype='int32') # 把k值保存在vk.data[0]中 + vk_values, vk_indices = layers.topk(input2, k=vk) #vk_values.shape=[13, k],vk_indices.shape=[13, k] + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/transpose_cn.rst b/doc/paddle/api/paddle/fluid/layers/transpose_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..78062e142cc00fcfd83e28c1f376602837c46571 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/transpose_cn.rst @@ -0,0 +1,60 @@ +.. _cn_api_fluid_layers_transpose: + +transpose +------------------------------- + +.. py:function:: paddle.fluid.layers.transpose(x,perm,name=None) + + + + +该OP根据perm对输入的多维Tensor进行数据重排。返回多维Tensor的第i维对应输入Tensor的perm[i]维。 + +参数: + - **x** (Variable) - 输入:x:[N_1, N_2, ..., N_k, D]多维Tensor,可选的数据类型为float16, float32, float64, int32, int64。 + - **perm** (list) - perm长度必须和X的维度相同,并依照perm中数据进行重排。 + - **name** (str) - 该层名称(可选)。 + +返回: 多维Tensor + +返回类型:Variable + +**示例**: + +.. code-block:: python + + x = [[[ 1 2 3 4] [ 5 6 7 8] [ 9 10 11 12]] + [[13 14 15 16] [17 18 19 20] [21 22 23 24]]] + shape(x) = [2,3,4] + + # 例0 + perm0 = [1,0,2] + y_perm0 = [[[ 1 2 3 4] [13 14 15 16]] + [[ 5 6 7 8] [17 18 19 20]] + [[ 9 10 11 12] [21 22 23 24]]] + shape(y_perm0) = [3,2,4] + + # 例1 + perm1 = [2,1,0] + y_perm1 = [[[ 1 13] [ 5 17] [ 9 21]] + [[ 2 14] [ 6 18] [10 22]] + [[ 3 15] [ 7 19] [11 23]] + [[ 4 16] [ 8 20] [12 24]]] + shape(y_perm1) = [4,3,2] + + +**代码示例**: + +.. code-block:: python + + # 请使用 append_batch_size=False 来避免 + # 在数据张量中添加多余的batch大小维度 + import paddle.fluid as fluid + x = fluid.layers.data(name='x', shape=[2, 3, 4], + dtype='float32', append_batch_size=False) + x_transposed = fluid.layers.transpose(x, perm=[1, 0, 2]) + print(x_transposed.shape) + #(3L, 2L, 4L) + + + diff --git a/doc/paddle/api/paddle/fluid/layers/unbind_cn.rst b/doc/paddle/api/paddle/fluid/layers/unbind_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f05aa5f0660d2247ff944884df7fe0d00d673f8f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/unbind_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_paddle_tensor_unbind +unbind +------------------------------- + +.. py:function:: paddle.tensor.unbind(input, axis=0) + + + + +该OP将输入Tensor按照指定的维度分割成多个子Tensor。 + +**参数**: + - **input** (Variable) - 输入变量,数据类型为float32,float64,int32,int64的多维Tensor。 + - **axis** (int32|int64,可选) - 数据类型为int32或int64,表示需要分割的维度。如果axis < 0,则划分的维度为rank(input) + axis。默认值为0。 + +**返回**:分割后的Tensor列表。 + +**返回类型**:列表(Variable),数据类型为int32,int64,float32,float64。 + +**代码示例**: + +.. code-block:: python + + import paddle + # input is a variable which shape is [3, 4, 5] + input = paddle.fluid.data( + name="input", shape=[3, 4, 5], dtype="float32") + [x0, x1, x2] = paddle.tensor.unbind(input, axis=0) + # x0.shape [4, 5] + # x1.shape [4, 5] + # x2.shape [4, 5] + [x0, x1, x2, x3] = paddle.tensor.unbind(input, axis=1) + # x0.shape [3, 5] + # x1.shape [3, 5] + # x2.shape [3, 5] + # x3.shape [3, 5] diff --git a/doc/paddle/api/paddle/fluid/layers/unfold_cn.rst b/doc/paddle/api/paddle/fluid/layers/unfold_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f2c53703cadd08e37b0160d92aa2f0473996b0f1 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/unfold_cn.rst @@ -0,0 +1,67 @@ +.. _cn_api_fluid_layers_unfold: + +unfold +------------------------------- + +.. py:function:: paddle.fluid.layers.unfold(x, kernel_size, strides=1, paddings=0, dilation=1, name=None) + + + + +该OP实现的功能与卷积中用到的im2col函数一样,通常也被称作为im2col过程。对于每一个卷积核覆盖下的区域,元素会被重新排成一列。当卷积核在整个图片上滑动时,将会形成一系列的列向量。对于每一个输入形状为[N, C, H, W]的 ``x`` ,都将会按照下面公式计算出一个形状为[N, Cout, Lout]的输出。 + + +.. math:: + + dkernel[0] &= dilations[0] * (kernel\_sizes[0] - 1) + 1 + + dkernel[1] &= dilations[1] * (kernel\_sizes[1] - 1) + 1 + + hout &= \frac{H + paddings[0] + paddings[2] - dkernel[0]}{strides[0]} + 1 + + wout &= \frac{W + paddings[1] + paddings[3] - dkernel[1]}{strides[1]} + 1 + + Cout &= C * kernel\_sizes[0] * kernel\_sizes[1] + + Lout &= hout * wout + +**样例**: + +:: + + Given: + x.shape = [5, 10, 25, 25] + kernel_size = [3, 3] + strides = 1 + paddings = 1 + + Return: + out.shape = [5, 90, 625] + + +参数: + - **x** (Variable) – 输入4-D Tensor,形状为[N, C, H, W],数据类型为float32或者float64 + - **kernel_size** (int|list of int) – 卷积核的尺寸,整数或者整型列表。如果为整型列表,应包含两个元素 ``[k_h, k_w]`` ,卷积核大小为 ``k_h * k_w`` ;如果为整数k,会被当作整型列表 ``[k, k]`` 处理 + - **strides** (int|list of int,可选) – 卷积步长,整数或者整型列表。如果为整型列表,应该包含两个元素 ``[stride_h, stride_w]`` 。如果为整数,则 ``stride_h = stride_w = strides`` 。默认值为1 + - **paddings** (int|list of int,可选) – 每个维度的扩展, 整数或者整型列表。如果为整型列表,长度应该为4或者2;长度为4 对应的padding参数是:[padding_top, padding_left,padding_bottom, padding_right],长度为2对应的padding参数是[padding_h, padding_w],会被当作[padding_h, padding_w, padding_h, padding_w]处理。如果为整数padding,则会被当作[padding, padding, padding, padding]处理。默认值为0 + - **dilations** (int|list of int,可选) – 卷积膨胀,整型列表或者整数。如果为整型列表,应该包含两个元素[dilation_h, dilation_w]。如果是整数dilation,会被当作整型列表[dilation, dilation]处理。默认值为1 + - **name** (str|None,可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 + + +返回: unfold操作之后的结果,形状如上面所描述的[N, Cout, Lout],Cout每一个滑动block里面覆盖的元素个数,Lout是滑动block的个数,数据类型与 ``x`` 相同 + +返回类型: Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.data(name = 'data', shape = [3, 224, 224], dtype = 'float32') + y = fluid.layers.unfold(x, [3, 3], 1, 1, 1) + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/uniform_random_cn.rst b/doc/paddle/api/paddle/fluid/layers/uniform_random_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..14e921926463fb4f02aa1b1bf133e2bc2f8c9bd7 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/uniform_random_cn.rst @@ -0,0 +1,75 @@ +.. _cn_api_fluid_layers_uniform_random: + +uniform_random +------------------------------- + +.. py:function:: paddle.fluid.layers.uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0, name=None) + + + + +该OP返回数值服从范围[``min``, ``max``)内均匀分布的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 + +:: + + 示例1: + 给定: + shape=[1,2] + 则输出为: + result=[[0.8505902, 0.8397286]] + +参数: + - **shape** (list|tuple|Tensor) - 生成的随机Tensor的形状。如果 ``shape`` 是list、tuple,则其中的元素可以是int,或者是形状为[1]且数据类型为int32、int64的Tensor。如果 ``shape`` 是Tensor,则是数据类型为int32、int64的1-D Tensor。 + - **dtype** (str|np.dtype|core.VarDesc.VarType, 可选) - 输出Tensor的数据类型,支持float32、float64。默认值为float32。 + - **min** (float|int,可选) - 要生成的随机值范围的下限,min包含在范围中。支持的数据类型:float、int。默认值为-1.0。 + - **max** (float|int,可选) - 要生成的随机值范围的上限,max不包含在范围中。支持的数据类型:float、int。默认值为1.0。 + - **seed** (int,可选) - 随机种子,用于生成样本。0表示使用系统生成的种子。注意如果种子不为0,该操作符每次都生成同样的随机数。支持的数据类型:int。默认为 0。 + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回: + Tensor:数值服从范围[``min``, ``max``)内均匀分布的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 + +抛出异常: + - ``TypeError`` - 如果 ``shape`` 的类型不是list、tuple、Tensor。 + - ``TypeError`` - 如果 ``dtype`` 不是float32、float64。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + startup_program = fluid.Program() + train_program = fluid.Program() + with fluid.program_guard(train_program, startup_program): + # example 1: + # attr shape is a list which doesn't contain Tensor. + result_1 = fluid.layers.uniform_random(shape=[3, 4]) + + # example 2: + # attr shape is a list which contains Tensor. + dim_1 = fluid.layers.fill_constant([1],"int64",3) + dim_2 = fluid.layers.fill_constant([1],"int32",5) + result_2 = fluid.layers.uniform_random(shape=[dim_1, dim_2]) + + # example 3: + # attr shape is a Tensor, the data type must be int32 or int64 + var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64") + result_3 = fluid.layers.uniform_random(var_shape) + var_shape_int32 = fluid.data(name='var_shape_int32', shape=[2], dtype="int32") + result_4 = fluid.layers.uniform_random(var_shape_int32) + shape_1 = np.array([3,4]).astype("int64") + shape_2 = np.array([3,4]).astype("int32") + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(startup_program) + outs = exe.run(train_program, feed = {'var_shape':shape_1, 'var_shape_int32':shape_2}, + fetch_list=[result_1, result_2, result_3, result_4]) + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/unique_cn.rst b/doc/paddle/api/paddle/fluid/layers/unique_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..362d511d8b2253fcfd9241f9da9b060420cbd8bf --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/unique_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_fluid_layers_unique: + +unique +------------------------------- + +.. py:function:: paddle.fluid.layers.unique(x, dtype='int32') + +unique为 ``x`` 返回一个unique张量和一个指向该unique张量的索引。 + +参数: + - **x** (Tensor) - 一个1维输入张量 + - **dtype** (np.dtype|str, 可选) – 索引张量的类型,应该为int32或者int64。默认:int32. + +返回:元组(out, index)。 ``out`` 为 ``x`` 的指定dtype的unique张量, ``index`` 是一个指向 ``out`` 的索引张量, 用户可以通过该函数来转换原始的 ``x`` 张量的索引。 + +返回类型:元组(tuple) + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + x = fluid.layers.assign(np.array([2, 3, 3, 1, 5, 3], dtype='int32')) + out, index = fluid.layers.unique(x) # out is [2, 3, 1, 5]; index is [0, 1, 1, 2, 3, 1] + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/unique_with_counts_cn.rst b/doc/paddle/api/paddle/fluid/layers/unique_with_counts_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..755fc858a41385fe1892f00eafa28e7d9a171317 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/unique_with_counts_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_fluid_layers_unique_with_counts: + +unique_with_counts +------------------------------- + +.. py:function:: paddle.fluid.layers.unique_with_counts(x, dtype='int32') + + + +该OP对输入Tensor元素进行去重,获取去重后结果Tensor,同时获取去重后结果在原始输入中的计数Tensor以及在原始输入中的索引Tensor。 + +注:该OP仅支持 **CPU** ,同时仅支持 **Tensor** + +参数: + - **x** (Variable) – 数据shape为 :math:`[N]` 的一维Tensor,数据类型为 float32,float64,int32,int64。 + - **dtype** (np.dtype|core.VarDesc.VarType|str) – 索引和计数Tensor的类型,默认为 int32,数据类型需要为 int32或int64。 + +返回: + - **out** 表示对输入进行去重后结果一维Tensor,数据shape为 :math:`[K]` ,K和输入x的shape中的N可能不一致。 + - **index** 表示原始输入在去重后结果中的索引Tensor :math:`[N]` ,shape和输入x的shape一致。 + - **count** 表示去重后元素的计数结果Tensor,数据shape为 :math:`[K]` ,数据shape和out的shape一致。 + +返回类型:tuple,tuple中元素类型为Variable(Tensor),输出中的out和输入x的数据类型一致,输出中index以及count的数据类型为 int32,int64。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + x = fluid.layers.assign(np.array([2, 3, 3, 1, 5, 3], dtype='int32')) + out, index, count = fluid.layers.unique_with_counts(x) # out is [2, 3, 1, 5]; + # index is [0, 1, 1, 2, 3, 1]; + # count is [1, 3, 1, 1] + # x.shape=(6,) out.shape=(4,), index.shape=(6,), count.shape=(4,) + diff --git a/doc/paddle/api/paddle/fluid/layers/unsqueeze_cn.rst b/doc/paddle/api/paddle/fluid/layers/unsqueeze_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3290bebea17c0e989232238b6f5c6c79639467fd --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/unsqueeze_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_fluid_layers_unsqueeze: + +unsqueeze +------------------------------- + +.. py:function:: paddle.fluid.layers.unsqueeze(input, axes, name=None) + + + + +该OP向输入(input)的shape中一个或多个位置(axes)插入维度。 + +- 示例: + +.. code-block:: python + + 输入: + X.shape = [2, 3] + X.data = [[1, 2, 3], + [4,5,6]] + axes = [0, 2] + 输出(在X的第0维和第2维插入新维度): + Out.shape = [1, 2, 1, 3] + Out.data = [[[[1, 2, 3]], + [[4, 5, 6]]]] + +参数: + - **input** (Variable)- 多维 ``Tensor``,数据类型为 ``float32``, ``float64``, ``int8``, ``int32``,或 ``int64``。 + - **axes** (int|list|tuple|Variable) - 表示要插入维度的位置。数据类型是 ``int32`` 。如果 ``axes`` 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 ``Tensor`` 。如果 ``axes`` 的类型是 ``Variable``,则是1-D ``Tensor``。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值: ``None``。 + +返回:扩展维度后的多维Tensor + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.data(name='x', shape=[5, 10]) + y = fluid.layers.unsqueeze(input=x, axes=[1]) + # y.shape is [5, 1, 10] diff --git a/doc/paddle/api/paddle/fluid/layers/unstack_cn.rst b/doc/paddle/api/paddle/fluid/layers/unstack_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ea6f89069786395dff73614638c0466aa2ce2b67 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/unstack_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_fluid_layers_unstack: + +unstack +------------------------------- + +.. py:function:: paddle.fluid.layers.unstack(x, axis=0, num=None) + + + + +该OP将单个dim为 ``D`` 的Tensor沿 ``axis`` 轴unpack为 ``num`` 个dim为 ``(D-1)`` 的Tensor + +参数: + - **x** (Tensor) – 输入x为 ``dim > 0`` 的Tensor, + 支持的数据类型: float32,float64,int32,int64。 + + - **axis** (int | 可选) – 输入Tensor进行unpack运算所在的轴,axis的范围为:``[-D, D)`` , + 如果 ``axis < 0`` ,则 :math:`axis = axis + dim(x)`,axis的默认值为0。 + + - **num** (int | 可选) - axis轴的长度,一般无需设置,默认值为 ``None`` 。 + +返回: 长度为num的Tensor列表, 数据类型与输入Tensor相同,dim为 ``(D-1)``。 + +返回类型: list(Tensor) + +抛出异常: + - :code:`ValueError`:``x.shape[axis]`` <= 0 或 ``axis`` 不在[-D, D)范围内 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.data(name='x', shape=[2, 3, 5], dtype='float32') #创建一个shape=[2, 3, 5]的Tensor + y = fluid.layers.unstack(x, axis=1) #沿着第1轴进行unpack, unpack后为3个shape=[2,5]的Tensor + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/warpctc_cn.rst b/doc/paddle/api/paddle/fluid/layers/warpctc_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d4df2ff0c8463717786707f177b453d1dbd5b94c --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/warpctc_cn.rst @@ -0,0 +1,95 @@ +.. _cn_api_fluid_layers_warpctc: + +warpctc +------------------------------- + +.. py:function:: paddle.fluid.layers.warpctc(input, label, blank=0, norm_by_times=False, input_length=None, label_length=None) + + + + +该OP用于计算 `CTC loss `_ 。该OP的底层调用了第三方 `baidu-research::warp-ctc `_ 的实现。 + +参数: + - **input** (Variable) - 可以是3-D Tensor或2-D LoDTensor。当输入类型是3-D Tensor时,则表示输入是经过padding的定长序列,其 shape 必须是 ``[seq_length, batch_size, num_classes + 1]`` 。当输入类型是2-D LoDTensor时,则表示输入为变长序列,其shape必须为 ``[Lp,num_classes+1]`` , ``Lp`` 是所有输入序列长度之和。以上 shape 中的 ``num_classes`` 是实际类别数,不包括空白标签。该输入不需要经过 softmax 操作,因为该OP的内部对 ``input`` 做了 softmax 操作。数据类型仅支持float32。 + - **label** (Variable) - 可以是3-D Tensor或2-D LoDTensor,需要跟 ``input`` 保持一致。当输入类型为3-D Tensor时,表示输入是经过 padding 的定长序列,其 shape 为 ``[batch_size, label_length]`` ,其中, ``label_length`` 是最长的 label 序列的长度。当输入类型是2-D LoDTensor时,则表示输入为变长序列,其shape必须为 ``[Lp, 1]`` , 其中 ``Lp`` 是所有 label 序列的长度和。 ``label`` 中的数值为字符ID。数据类型支持int32。 + - **blank** (int,可选) - 空格标记的ID,其取值范围为 ``[0,num_classes+1)`` 。数据类型支持int32。缺省值为0。 + - **norm_by_times** (bool,可选) - 是否根据序列长度对梯度进行正则化。数据类型支持 bool 。缺省值为False。 + - **input_length** (Variable) - 必须是1-D Tensor。仅在输入为定长序列时使用,表示输入数据中每个序列的长度,shape为 ``[batch_size]`` 。数据类型支持int64。默认为None。 + - **label_length** (Variable) - 必须是1-D Tensor。仅在label为定长序列时使用,表示 label 中每个序列的长度,shape为 ``[batch_size]`` 。数据类型支持int64。默认为None。 + +返回:Shape为[batch_size,1]的2-D Tensor,表示每一个序列的CTC loss。数据类型与 ``input`` 一致。 + +返回类型:Variable + +**代码示例** + +.. code-block:: python + + # using LoDTensor + import paddle.fluid as fluid + import numpy as np + + # lengths of logit sequences + seq_lens = [2,6] + # lengths of label sequences + label_lens = [2,3] + # class num + class_num = 5 + + logits = fluid.data(name='logits',shape=[None, class_num+1], + dtype='float32',lod_level=1) + label = fluid.data(name='label', shape=[None, 1], + dtype='int32', lod_level=1) + cost = fluid.layers.warpctc(input=logits, label=label) + place = fluid.CPUPlace() + x = fluid.create_lod_tensor( + np.random.rand(np.sum(seq_lens), class_num+1).astype("float32"), + [seq_lens], place) + y = fluid.create_lod_tensor( + np.random.randint(0, class_num, [np.sum(label_lens), 1]).astype("int32"), + [label_lens], place) + exe = fluid.Executor(place) + output= exe.run(fluid.default_main_program(), + feed={"logits": x,"label": y}, + fetch_list=[cost.name]) + print(output) + +.. code-block:: python + + # using Tensor + import paddle.fluid as fluid + import numpy as np + + # length of the longest logit sequence + max_seq_length = 5 + # length of the longest label sequence + max_label_length = 3 + # number of logit sequences + batch_size = 16 + # class num + class_num = 5 + logits = fluid.data(name='logits', + shape=[max_seq_length, batch_size, class_num+1], + dtype='float32') + logits_length = fluid.data(name='logits_length', shape=[None], + dtype='int64') + label = fluid.data(name='label', shape=[batch_size, max_label_length], + dtype='int32') + label_length = fluid.data(name='labels_length', shape=[None], + dtype='int64') + cost = fluid.layers.warpctc(input=logits, label=label, + input_length=logits_length, + label_length=label_length) + place = fluid.CPUPlace() + x = np.random.rand(max_seq_length, batch_size, class_num+1).astype("float32") + y = np.random.randint(0, class_num, [batch_size, max_label_length]).astype("int32") + exe = fluid.Executor(place) + output= exe.run(fluid.default_main_program(), + feed={"logits": x, + "label": y, + "logits_length": np.array([max_seq_length]*batch_size).astype("int64"), + "labels_length": np.array([max_label_length]*batch_size).astype("int64")}, + fetch_list=[cost.name]) + print(output) + diff --git a/doc/paddle/api/paddle/fluid/layers/where_cn.rst b/doc/paddle/api/paddle/fluid/layers/where_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d483b8fa905522125f7abecdf922ba9fd647fcbd --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/where_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_fluid_layers_where: + +where +------------------------------- + +.. py:function:: paddle.fluid.layers.where(condition) + + + + +该OP计算输入元素中为True的元素在输入中的坐标(index)。 + +参数: + - **condition** (Variable)– 输入秩至少为1的多维Tensor,数据类型是bool类型。 + +返回:输出condition元素为True的坐标(index),将所有的坐标(index)组成一个2-D的Tensor。 + +返回类型:Variable,数据类型是int64。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + import numpy as np + # tensor 为 [True, False, True] + condition = layers.assign(np.array([1, 0, 1], dtype='int32')) + condition = layers.cast(condition, 'bool') + out = layers.where(condition) # [[0], [2]] + + # tensor 为 [[True, False], [False, True]] + condition = layers.assign(np.array([[1, 0], [0, 1]], dtype='int32')) + condition = layers.cast(condition, 'bool') + out = layers.where(condition) # [[0, 0], [1, 1]] + + # tensor 为 [False, False, False] + condition = layers.assign(np.array([0, 0, 0], dtype='int32')) + condition = layers.cast(condition, 'bool') + out = layers.where(condition) # [[]] + + diff --git a/doc/paddle/api/paddle/fluid/layers/while_loop_cn.rst b/doc/paddle/api/paddle/fluid/layers/while_loop_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f9d44614afd334e61cdc1f925b5c32ad3ad0dc35 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/while_loop_cn.rst @@ -0,0 +1,63 @@ +.. _cn_api_fluid_layers_while_loop: + +while_loop +____________________________________ + + + +.. py:function:: paddle.fluid.layers.while_loop(cond, body, loop_vars, is_test=False, name=None) + + + + +该API用于实现类似while的循环控制功能,只要循环条件 ``cond`` 的返回值为True,``while_loop`` 则会循环执行循环体 ``body`` ,直到 ``cond`` 的返回值为False。 + +**注意:** + ``body`` 中定义的局部变量无法使用 ``Executor`` 的 ``fetch_list`` 来获取的,变量需在 ``body`` 外定义并将其置于 ``loop_vars`` 中进行循环更新后才可通过 ``fetch_list`` 获取。 + +参数: + - **cond** (callable) - 返回boolean类型张量的可调用函数,用以判断循环是否继续执行。 ``cond`` 的参数和 ``loop_vars`` 相对应。 + - **body** (callable) - 循环执行的结构体。其返回一个包含tensor或LoDTensorArray的列表或元组,且这些tensor或LoDTensorArray的长度,结构,类型和 ``loop_vars`` 中的相同。 且``body`` 的参数与 ``loop_vars`` 相对应。 + - **loop_vars** (list|tuple) - 包含tensor或LoDTensorArray的列表或是元组,将其传入至 ``cond`` 和 ``body`` 中,得到循环条件和输出值。 + - **is_test** (bool,可选) - 用于表明是否在测试阶段执行,默认值为False。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`。一般无需设置,默认值为None。 + +返回:循环迭代之后 ``body`` 的返回值,和 ``loop_vars`` 具有相同的结构。 + +返回类型:list|tuple + +异常抛出: + - ``TypeError``:若 ``cond`` 不是可调用的。 + - ``TypeError``:若 ``body`` 不是可调用的。 + - ``TypeError``:若 ``loop_vars`` 不是列表或是元组。 + - ``TypeError``:若 ``cond`` 返回值不是变量。 + - ``TypeError``:若 ``cond`` 返回值不是boolean类型的variable。 + - ``TypeError``:若 ``cond`` 返回值的shape不等于1。 + - ``ValueError``:若 ``loop_vars`` 为空。 + - ``ValueError``:若 ``cond`` 返回值的长度和类型和 ``loop_vars`` 不同。 + +**示例代码** + +.. code-block:: python + + # 该示例代码展示整数循环+1,循环10次,输出计数结果 + import paddle.fluid as fluid + import paddle.fluid.layers as layers + + def cond(i, ten): # 参数和loop_vars相对应 + return i < ten + + def body(i, ten): # 参数和loop_vars相对应 + i = i + 1 + return [i, ten] + + main_program = fluid.default_main_program() + startup_program = fluid.default_startup_program() + with fluid.program_guard(main_program, startup_program): + i = layers.fill_constant(shape=[1], dtype='int64', value=0) # 循环计数器 + ten = layers.fill_constant(shape=[1], dtype='int64', value=10) # 循环次数 + i, ten = layers.while_loop(cond, body, [i, ten]) + + exe = fluid.Executor(fluid.CPUPlace()) + res = exe.run(main_program, feed={}, fetch_list=[i]) + print(res) #[array([10])] diff --git a/doc/paddle/api/paddle/fluid/layers/yolo_box_cn.rst b/doc/paddle/api/paddle/fluid/layers/yolo_box_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..37c10f10eb2948e4cd87c5205e9403094868c73f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/yolo_box_cn.rst @@ -0,0 +1,71 @@ +.. _cn_api_fluid_layers_yolo_box: + +yolo_box +------------------------------- + +.. py:function:: paddle.fluid.layers.yolo_box(x, img_size, anchors, class_num, conf_thresh, downsample_ratio, clip_bbox=True,name=None) + + + + + +该运算符基于YOLOv3网络的输出结果,生成YOLO检测框。 + +连接 yolo_box 网络的输出形状应为[N,C,H,W],其中 H 和 W 相同,用来指定网格大小。对每个网格点预测给定的数目的框,这个数目记为 S ,由 anchor 的数量指定。 在第二维(通道维度)中,C应该等于S *(5 + class_num),class_num是源数据集中对象类别数目(例如coco数据集中的80),此外第二个(通道)维度中还有4个框位置坐标x,y,w,h,以及anchor box的one-hot key的置信度得分。 + +假设4个位置坐标是 :math:`t_x` ,:math:`t_y` ,:math:`t_w` , :math:`t_h` +,则框的预测算法为: + +.. math:: + + b_x &= \sigma(t_x) + c_x\\ + b_y &= \sigma(t_y) + c_y\\ + b_w &= p_w e^{t_w}\\ + b_h &= p_h e^{t_h}\\ + +在上面的等式中, :math:`c_x` , :math:`c_x` 是当前网格的左上角顶点坐标。 :math:`p_w` , :math:`p_h` 由anchors指定。 + +每个anchor预测框的第五通道的逻辑回归值表示每个预测框的置信度得分,并且每个anchor预测框的最后class_num通道的逻辑回归值表示分类得分。 应忽略置信度低于conf_thresh的框。另外,框最终得分是置信度得分和分类得分的乘积。 + + +.. math:: + + score_{pred} = score_{conf} * score_{class} + + +参数: + - **x** (Variable) - YoloBox的输入张量是一个4-D张量,形状为[N,C,H,W]。第二维(C)存储每个anchor box位置坐标,每个anchor box的置信度分数和one hot key。通常,X应该是YOLOv3网络的输出。数据类型为float32或float64 + - **img_size** (Variable) - YoloBox的图像大小张量,这是一个形状为[N,2]的二维张量。该张量保持每个输入图像的高度和宽度,用于对输出图像按输入图像比例调整输出框的大小。数据类型为int32。 + - **anchors** (list | tuple) - anchor的宽度和高度,它将逐对解析 + - **class_num** (int) - 要预测的类数 + - **conf_thresh** (float) - 检测框的置信度得分阈值。置信度得分低于阈值的框应该被忽略 + - **downsample_ratio** (int) - 从网络输入到YoloBox操作输入的下采样率,因此应依次为第一个,第二个和第三个YoloBox运算设置该值为32,16,8 + - **clip_bbox** (bool) - 是否将输出的bbox裁剪到 :attr:`img_size` 范围内,默认为True。 + - **name** (str|None) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 + +返回: + 1. 框的坐标,形为[N,M,4]的三维张量 + 2. 框的分类得分, 形为 [N,M,class_num]的三维张量 + +返回类型: 变量(Variable) + +抛出异常: + - TypeError - yolov_box的输入x必须是Variable + - TypeError - yolo框的anchors参数必须是list或tuple + - TypeError - yolo box的class_num参数必须是整数 + - TypeError - yolo框的conf_thresh参数必须是一个浮点数 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.data(name='x', shape=[255, 13, 13], dtype='float32') + img_size = fluid.layers.data(name='img_size',shape=[2],dtype='int64') + anchors = [10, 13, 16, 30, 33, 23] + boxes, scores = fluid.layers.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors, + conf_thresh=0.01, downsample_ratio=32) + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/yolov3_loss_cn.rst b/doc/paddle/api/paddle/fluid/layers/yolov3_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e4cc4e1bd1b05d080b543c81899158d3a722b93c --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/yolov3_loss_cn.rst @@ -0,0 +1,108 @@ +.. _cn_api_fluid_layers_yolov3_loss: + +yolov3_loss +------------------------------- + +.. py:function:: paddle.fluid.layers.yolov3_loss(x, gt_box, gt_label, anchors, anchor_mask, class_num, ignore_thresh, downsample_ratio, gt_score=None, use_label_smooth=True, name=None) + + + + +该运算通过给定的预测结果和真实框计算yolov3损失。 + +yolov3 loss前的网络输出形状为[N,C,H,W],H和W应该相同,用来指定网格(grid)大小。每个网格点预测S个边界框(bounding boxes),S由每个尺度中 ``anchors`` 簇的个数指定。在第二维(表示通道的维度)中,C的值应为S *(class_num + 5),class_num是源数据集的对象种类数(如coco中为80),另外,除了存储4个边界框位置坐标x,y,w,h,还包括边界框以及每个anchor框的one-hot关键字的置信度得分。 + +假设有四个表征位置的坐标为 :math:`t_x, t_y, t_w, t_h` ,那么边界框的预测将会如下定义: + + $$ + b_x = \\sigma(t_x) + c_x + $$ + $$ + b_y = \\sigma(t_y) + c_y + $$ + $$ + b_w = p_w e^{t_w} + $$ + $$ + b_h = p_h e^{t_h} + $$ + +在上面的等式中, :math:`c_x, c_y` 是当前网格的左上角, :math:`p_w, p_h` 由anchors指定。 +置信度得分是anchor框和真实框之间的IoU的逻辑回归值,anchor框的得分最高为1,此时该anchor框对应着最大IoU。 +如果anchor框之间的IoU大于忽略阀值ignore_thresh,则该anchor框的置信度评分损失将会被忽略。 + +因此,yolov3损失包括三个主要部分,框位置损失,目标性损失,分类损失。L1损失用于 +框坐标(w,h),同时,sigmoid交叉熵损失用于框坐标(x,y),目标性损失和分类损失。 + +每个真实框将在所有anchor中找到最匹配的anchor,对该anchor的预测将会计算全部(三种)损失,但是没有匹配GT box(ground truth box真实框)的anchor的预测只会产生目标性损失。 + +为了权衡大框(box)和小(box)之间的框坐标损失,框坐标损失将与比例权重相乘而得。即: + + $$ + weight_{box} = 2.0 - t_w * t_h + $$ + +最后的loss值将如下计算: + + $$ + loss = (loss_{xy} + loss_{wh}) * weight_{box} + loss_{conf} + loss_{class} + $$ + + +当 ``use_label_smooth`` 为 ``True`` 时,在计算分类损失时将平滑分类目标,将正样本的目标平滑到1.0-1.0 / class_num,并将负样本的目标平滑到1.0 / class_num。 + +``GTScore`` (如果存在)表示真实框的mixup得分,那么真实框所产生的所有损失需要乘上GTScore。 + + + +参数: + - **x** (Variable) – YOLOv3损失运算的输入张量,这是一个形状为[N,C,H,W]的四维Tensor。H和W应该相同,第二维(C)存储框的位置信息,以及每个anchor box的置信度得分和one-hot分类。数据类型为float32或float64。 + - **gt_box** (Variable) – 真实框,应该是[N,B,4]的形状。第三维用来承载x、y、w、h,其中 x, y是真实框的中心坐标,w, h是框的宽度和高度,且x、y、w、h将除以输入图片的尺寸,缩放到[0,1]区间内。 N是batch size,B是图像中所含有的的最多的box数目。数据类型为float32或float64。 + - **gt_label** (Variable) – 真实框的类id,应该形为[N,B]。数据类型为int32。 + - **anchors** (list|tuple) – 指定anchor框的宽度和高度,它们将逐对进行解析 + - **anchor_mask** (list|tuple) – 当前YOLOv3损失计算中使用anchor的mask索引 + - **class_num** (int) – 要预测的类别数 + - **ignore_thresh** (float) – 一定条件下忽略某框置信度损失的忽略阈值 + - **downsample_ratio** (int) – 网络输入到YOLOv3 loss输入的下采样率,因此第一,第二和第三个 loss 的下采样率应分别为32,16,8 + - **gt_score** (Variable) - 真实框的混合得分,形为[N,B]。 默认None。数据类型为float32或float64。 + - **use_label_smooth** (bool) - 是否使用平滑标签。 默认为True + - **name** (str|None) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 + + +返回: yolov3损失的值,具有形状[N]的1-D张量。 + +返回类型: 变量(Variable),数据类型与input一致。 + +抛出异常: + - ``TypeError`` – yolov3_loss的输入x必须是Variable + - ``TypeError`` – 输入yolov3_loss的gtbox必须是Variable + - ``TypeError`` – 输入yolov3_loss的gtlabel必须是None或Variable + - ``TypeError`` – 输入yolov3_loss的gtscore必须是Variable + - ``TypeError`` – 输入yolov3_loss的anchors必须是list或tuple + - ``TypeError`` – 输入yolov3_loss的class_num必须是整数integer类型 + - ``TypeError`` – 输入yolov3_loss的ignore_thresh必须是一个浮点数float类型 + - ``TypeError`` – 输入yolov3_loss的use_label_smooth必须是bool型 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.data(name='x', shape=[255, 13, 13], dtype='float32') + gt_box = fluid.layers.data(name='gt_box', shape=[6, 4], dtype='float32') + gt_label = fluid.layers.data(name='gt_label', shape=[6], dtype='int32') + gt_score = fluid.layers.data(name='gt_score', shape=[6], dtype='float32') + anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326] + anchor_mask = [0, 1, 2] + loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label, + gt_score=gt_score, anchors=anchors, + anchor_mask=anchor_mask, class_num=80, + ignore_thresh=0.7, downsample_ratio=32) + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/zeros_cn.rst b/doc/paddle/api/paddle/fluid/layers/zeros_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d7a77f6688a0e0fb691a49558c3f1ba78a6a3144 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/zeros_cn.rst @@ -0,0 +1,22 @@ +.. _cn_api_fluid_layers_zeros: + +zeros +------------------------------- + +.. py:function:: paddle.fluid.layers.zeros(shape,dtype,force_cpu=False) + +该OP创建形状为 ``shape`` 、数据类型为 ``dtype`` 且值全为0的Tensor。 + +参数: + - **shape** (tuple|list|Tensor) - 输出Tensor的形状, ``shape`` 的数据类型为int32或者int64。 + - **dtype** (np.dtype|str) - 输出Tensor的数据类型,数据类型必须为bool、 float16、float32、float64、int32或int64。 + - **force_cpu** (bool, 可选) - 是否强制将输出Tensor写入CPU内存。如果 ``force_cpu`` 为False,则将输出Tensor写入当前所在运算设备的内存,默认为False。 + +返回:值全为0的Tensor,数据类型和 ``dtype`` 定义的类型一致。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]] diff --git a/doc/paddle/api/paddle/fluid/layers/zeros_like_cn.rst b/doc/paddle/api/paddle/fluid/layers/zeros_like_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d75dc7cd1029a8bacaa2e69b28304305c68b5602 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/layers/zeros_like_cn.rst @@ -0,0 +1,29 @@ +.. _cn_api_fluid_layers_zeros_like: + +zeros_like +------------------------------- + +.. py:function:: paddle.fluid.layers.zeros_like(x, out=None) + + + + + +该OP创建一个和x具有相同的形状和数据类型的全零Tensor。 + +参数: + - **x** (Variable) – 指定输入为一个多维的Tensor,数据类型可以是bool,float32,float64,int32,int64。 + - **out** (Variable|可选) – 如果为None,则创建一个Variable作为输出,创建后的Variable的数据类型,shape大小和输入变量x一致。如果是输入的一个Tensor,数据类型和数据shape大小需要和输入变量x一致。默认值为None。 + +返回:返回一个多维的Tensor,具体的元素值和输入的数据类型相关,如果是bool类型的,则全False,其它均为0。数据shape大小和输入x一致。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.data(name='x', dtype='float32', shape=[3]) + data = fluid.layers.zeros_like(x) # [0.0, 0.0, 0.0] + diff --git a/doc/paddle/api/paddle/fluid/load_op_library_cn.rst b/doc/paddle/api/paddle/fluid/load_op_library_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d460cfc5c2e09215af692c5ddb0bf5af9cf7b0f5 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/load_op_library_cn.rst @@ -0,0 +1,25 @@ +.. _cn_api_fluid_load_op_library: + +load_op_library +------------------------------- + +.. py:class:: paddle.fluid.load_op_library + + + + +``load_op_library`` 用于自定义C++算子中,用来加载算子动态共享库。加载库后,注册好的算子及其Kernel实现将在PaddlePaddle主进程中可以被调用。 请注意,自定义算子的类型不能与框架中的现有算子类型相同。 + +参数: + - **lib_filename** (str) – 动态共享库的名字。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + #fluid.load_op_library('custom_op.so') + + + + diff --git a/doc/paddle/api/paddle/fluid/memory_optimize_cn.rst b/doc/paddle/api/paddle/fluid/memory_optimize_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..33fd6d95d79e8f66e966b2852a8f6fe0541e6b72 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/memory_optimize_cn.rst @@ -0,0 +1,12 @@ +.. _cn_api_fluid_transpiler_memory_optimize: + +memory_optimize +------------------------------- + + +.. py:function:: paddle.fluid.transpiler.memory_optimize(input_program, skip_opt_set=None, print_log=False, level=0, skip_grads=True) + + + + +**从1.6版本开始此接口不再推荐使用,请不要在新写的代码中使用它,1.6+版本已默认开启更优的存储优化策略** \ No newline at end of file diff --git a/doc/paddle/api/paddle/fluid/metrics/Accuracy_cn.rst b/doc/paddle/api/paddle/fluid/metrics/Accuracy_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0cb6ea087033b21afd2cea5838f6d1366868b92f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/metrics/Accuracy_cn.rst @@ -0,0 +1,63 @@ +.. _cn_api_fluid_metrics_Accuracy: + +Accuracy +------------------------------- +.. py:class:: paddle.fluid.metrics.Accuracy(name=None) + + + + +该接口用来计算多个mini-batch的平均准确率。Accuracy对象有两个状态value和weight。Accuracy的定义参照 https://en.wikipedia.org/wiki/Accuracy_and_precision 。 + +参数: + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:初始化后的 ``Accuracy`` 对象 + +返回类型:Accuracy + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + # 假设有batch_size = 128 + batch_size=128 + accuracy_manager = fluid.metrics.Accuracy() + # 假设第一个batch的准确率为0.9 + batch1_acc = 0.9 + accuracy_manager.update(value = batch1_acc, weight = batch_size) + print("expect accuracy: %.2f, get accuracy: %.2f" % (batch1_acc, accuracy_manager.eval())) + # 假设第二个batch的准确率为0.8 + batch2_acc = 0.8 + accuracy_manager.update(value = batch2_acc, weight = batch_size) + #batch1和batch2的联合准确率为(batch1_acc * batch_size + batch2_acc * batch_size) / batch_size / 2 + print("expect accuracy: %.2f, get accuracy: %.2f" % ((batch1_acc * batch_size + batch2_acc * batch_size) / batch_size / 2, accuracy_manager.eval())) + #重置accuracy_manager + accuracy_manager.reset() + #假设第三个batch的准确率为0.8 + batch3_acc = 0.8 + accuracy_manager.update(value = batch3_acc, weight = batch_size) + print("expect accuracy: %.2f, get accuracy: %.2f" % (batch3_acc, accuracy_manager.eval())) + +.. py:method:: update(value, weight) + +该函数使用输入的(value, weight)来累计更新Accuracy对象的对应状态,更新方式如下: + + .. math:: + \\ \begin{array}{l}{\text { self. value }+=\text { value } * \text { weight }} \\ {\text { self. weight }+=\text { weight }}\end{array} \\ + +参数: + - **value** (float|numpy.array) – mini-batch的正确率 + - **weight** (int|float) – mini-batch的大小 + +返回:无 + +.. py:method:: eval() + +该函数计算并返回累计的mini-batches的平均准确率。 + +返回:累计的mini-batches的平均准确率 + +返回类型:float或numpy.array + diff --git a/doc/paddle/api/paddle/fluid/metrics/Auc_cn.rst b/doc/paddle/api/paddle/fluid/metrics/Auc_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8e6b7bfea5ec381b7af051ba39fc080291b4fcba --- /dev/null +++ b/doc/paddle/api/paddle/fluid/metrics/Auc_cn.rst @@ -0,0 +1,67 @@ +.. _cn_api_fluid_metrics_Auc: + +Auc +------------------------------- +.. py:class:: paddle.fluid.metrics.Auc(name, curve='ROC', num_thresholds=4095) + + + + +**注意**:目前只用Python实现Auc,可能速度略慢 + +该接口计算Auc,在二分类(binary classification)中广泛使用。相关定义参考 https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve 。 + +该接口创建四个局部变量true_positives, true_negatives, false_positives和false_negatives,用于计算Auc。为了离散化AUC曲线,使用临界值的线性间隔来计算召回率和准确率的值。用false positive的召回值高度计算ROC曲线面积,用recall的准确值高度计算PR曲线面积。 + +参数: + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **curve** (str) - 将要计算的曲线名的详情,曲线包括ROC(默认)或者PR(Precision-Recall-curve)。 + +返回:初始化后的 ``Auc`` 对象 + +返回类型:Auc + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + # 初始化auc度量 + auc_metric = fluid.metrics.Auc("ROC") + + # 假设batch_size为128 + batch_num = 100 + batch_size = 128 + + for batch_id in range(batch_num): + + class0_preds = np.random.random(size = (batch_size, 1)) + class1_preds = 1 - class0_preds + + preds = np.concatenate((class0_preds, class1_preds), axis=1) + + labels = np.random.randint(2, size = (batch_size, 1)) + auc_metric.update(preds = preds, labels = labels) + + # 应为一个接近0.5的值,因为preds是随机指定的 + print("auc for iteration %d is %.2f" % (batch_id, auc_metric.eval())) + +.. py:method:: update(preds, labels) + +用给定的预测值和标签更新Auc曲线。 + +参数: + - **preds** (numpy.array) - 维度为[batch_size, 2],preds[i][j]表示将实例i划分为类别j的概率。 + - **labels** (numpy.array) - 维度为[batch_size, 1],labels[i]为0或1,代表实例i的标签。 + +返回:无 + +.. py:method:: eval() + +该函数计算并返回Auc值。 + +返回:Auc值 + +返回类型:float + diff --git a/doc/paddle/api/paddle/fluid/metrics/ChunkEvaluator_cn.rst b/doc/paddle/api/paddle/fluid/metrics/ChunkEvaluator_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c289db61aed83e2e4a33f8603e70c280a63b2fb2 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/metrics/ChunkEvaluator_cn.rst @@ -0,0 +1,69 @@ +.. _cn_api_fluid_metrics_ChunkEvaluator: + +ChunkEvaluator +------------------------------- + +.. py:class:: paddle.fluid.metrics.ChunkEvaluator(name=None) + + + +该接口使用mini-batch的chunk_eval累计的counter numbers,来计算准确率、召回率和F1值。ChunkEvaluator有三个状态num_infer_chunks,num_label_chunks和num_correct_chunks,分别对应语块数目、标签中的语块数目、正确识别的语块数目。对于chunking的基础知识,请参考 https://www.aclweb.org/anthology/N01-1025 。ChunkEvalEvaluator计算块检测(chunk detection)的准确率,召回率和F1值,支持IOB, IOE, IOBES和IO标注方案。 + +参数: + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:初始化后的 ``ChunkEvaluator`` 对象 + +返回类型:ChunkEvaluator + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + + # 初始化chunck-level的评价管理。 + metric = fluid.metrics.ChunkEvaluator() + + # 假设模型预测10个chuncks,其中8个为正确,且真值有9个chuncks。 + num_infer_chunks = 10 + num_label_chunks = 9 + num_correct_chunks = 8 + + metric.update(num_infer_chunks, num_label_chunks, num_correct_chunks) + numpy_precision, numpy_recall, numpy_f1 = metric.eval() + + print("precision: %.2f, recall: %.2f, f1: %.2f" % (numpy_precision, numpy_recall, numpy_f1)) + + # 下一个batch,完美地预测了3个正确的chuncks。 + num_infer_chunks = 3 + num_label_chunks = 3 + num_correct_chunks = 3 + + metric.update(num_infer_chunks, num_label_chunks, num_correct_chunks) + numpy_precision, numpy_recall, numpy_f1 = metric.eval() + + print("precision: %.2f, recall: %.2f, f1: %.2f" % (numpy_precision, numpy_recall, numpy_f1)) + +.. py:method:: update(num_infer_chunks, num_label_chunks, num_correct_chunks) + +该函数使用输入的(num_infer_chunks, num_label_chunks, num_correct_chunks)来累计更新ChunkEvaluator对象的对应状态,更新方式如下: + + .. math:: + \\ \begin{array}{l}{\text { self. num_infer_chunks }+=\text { num_infer_chunks }} \\ {\text { self. num_Label_chunks }+=\text { num_label_chunks }} \\ {\text { self. num_correct_chunks }+=\text { num_correct_chunks }}\end{array} \\ + +参数: + - **num_infer_chunks** (int|numpy.array) – 给定mini-batch的语块数目。 + - **num_label_chunks** (int|numpy.array) - 给定mini-batch的标签中的语块数目。 + - **num_correct_chunks** (int|numpy.array)— 给定mini-batch的正确识别的语块数目。 + +返回:无 + +.. py:method:: eval() + +该函数计算并返回准确率,召回率和F1值。 + +返回:准确率,召回率和F1值 + +返回类型:float + diff --git a/doc/paddle/api/paddle/fluid/metrics/CompositeMetric_cn.rst b/doc/paddle/api/paddle/fluid/metrics/CompositeMetric_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2b9382f7870ca4a104a6e63fb0e0ddb5e67b64d0 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/metrics/CompositeMetric_cn.rst @@ -0,0 +1,79 @@ +.. _cn_api_fluid_metrics_CompositeMetric: + +CompositeMetric +------------------------------- + +.. py:class:: paddle.fluid.metrics.CompositeMetric(name=None) + + + + +创建一个可以容纳若干个评价指标(如F1, accuracy, recall等)的容器,评价指标添加完成后,通过调用eval()方法可自动计算该容器内的所有评价指标。 + +**注意,只有输入参数列表完全相同的评价指标才可被加入到同一个CompositeMetric实例内。** + +继承自:MetricBase + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + preds = [[0.1], [0.7], [0.8], [0.9], [0.2], + [0.2], [0.3], [0.5], [0.8], [0.6]] + labels = [[0], [1], [1], [1], [1], + [0], [0], [0], [0], [0]] + preds = np.array(preds) + labels = np.array(labels) + + comp = fluid.metrics.CompositeMetric() + precision = fluid.metrics.Precision() + recall = fluid.metrics.Recall() + comp.add_metric(precision) + comp.add_metric(recall) + + comp.update(preds=preds, labels=labels) + numpy_precision, numpy_recall = comp.eval() + print("expect precision: %.2f, got %.2f" % ( 3. / 5, numpy_precision ) ) + print("expect recall: %.2f, got %.2f" % (3. / 4, numpy_recall ) ) + + +.. py:method:: add_metric(metric) + +向容器内添加一个新的评价指标。注意新添加的评价指标的输入参数列表必须与容器里已有的其他指标保持一致。 + +参数: + - **metric** (MetricBase) – 评价指标对象,一个MetricBase的实例。 + +返回:无 + + +.. py:method:: update(preds, labels) + +更新容器中的每个评价指标。 + +参数: + - **preds** (numpy.array) - 当前mini-batch的预测结果,输入的shape和dtype应与该容器内添加的评价指标的要求保持一致。 + - **labels** (numpy.array) - 当前mini-batch的真实标签,输入的shape和dtype应与该容器内添加的评价指标的要求保持一致 + +返回:无 + +.. py:method:: eval() + +按照添加顺序计算出各个评价指标。 + +参数: 无 + +返回: 列表存储的各个评价指标的计算结果。每个计算结果的数据类型和shape取决于被添加的评价指标的定义 + +返回类型: list + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/metrics/DetectionMAP_cn.rst b/doc/paddle/api/paddle/fluid/metrics/DetectionMAP_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..44d3700e5535c485a78361ba82c97c7c5b81ca87 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/metrics/DetectionMAP_cn.rst @@ -0,0 +1,86 @@ +.. _cn_api_fluid_metrics_DetectionMAP: + +DetectionMAP +------------------------------- + +.. py:class:: paddle.fluid.metrics.DetectionMAP(input, gt_label, gt_box, gt_difficult=None, class_num=None, background_label=0, overlap_threshold=0.5, evaluate_difficult=True, ap_version='integral') + + + + +该OP用于计算检测网络的平均精度(mAP)。 mAP是衡量object detectors精度的指标,比如 Faster R-CNN,SSD等。它不同于召回率,它是最大精度的平均值。 + +通常步骤如下: + +1. 根据检测器中的输入和label,计算True Positive(TP)真正例 和 False Positive(FP)假正例 +2. 计算map,支持 ``11 point`` 和 ``integral`` 模式 + +请从以下文章中获取更多信息: + - https://sanchom.wordpress.com/tag/average-precision/ + - https://arxiv.org/abs/1512.0232 + +参数: + - **input** (Variable) – detection的输出结果,一个 shape=[M, 6] 的 LoDtensor。布局为[label, confidence, xmin, ymin, xmax, ymax],label为类别标签,confidence为置信度,xmin,ymin为检测框左上点坐标,xmax,ymax为检测框右下点坐标,数据类型为float32或float64。 + - **gt_label** (Variable) – ground truth label 的索引,它是一个形状为[N, 1]的LoDtensor,数据类型为float32或float64。 + - **gt_box** (Variable) – ground truth bounds box (bbox),是一个具有形状的LoD张量[N, 4]。布局是[xmin, ymin, xmax, ymax],数据类型为float32或float64。 + - **gt_difficult** (Variable|None, 可选) – 指定这个ground truth是否是一个difficult bounding bbox,它可以是一个 shape=[N, 1]的LoDTensor,也可以不被指定。默认设置为None,表示所有的ground truth标签都不是difficult bbox,数据类型为float32或float64。 + - **class_num** (int) – 检测类别的数目。 + - **background_label** (int) – 背景标签的索引,背景标签将被忽略。如果设置为-1,则所有类别将被考虑,默认为0。 + - **overlap_threshold** (float) – 判断真假阳性的阈值,默认为0.5。 + - **evaluate_difficult** (bool) – 是否考虑 difficult ground truth 进行评价,默认为 True。当 gt_difficult 为 None 时,这个参数不起作用。 + - **ap_version** (str) – 平均精度的计算方法,必须是 "integral" 或 "11point"。详情请查看 https://sanchom.wordpress.com/tag/average-precision/。 其中,11point为:11-point 插值平均精度。积分: precision-recall曲线的自然积分。 + +返回:变量(Variable) 计算mAP的结果,其中数据类型为float32或float64。 + +返回类型:变量(Variable) + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + batch_size = -1 # 可以为任意大小 + image_boxs_num = 10 + bounding_bboxes_num = 21 + + pb = fluid.data(name='prior_box', shape=[image_boxs_num, 4], + dtype='float32') + + pbv = fluid.data(name='prior_box_var', shape=[image_boxs_num, 4], + dtype='float32') + + loc = fluid.data(name='target_box', shape=[batch_size, bounding_bboxes_num, 4], + dtype='float32') + + scores = fluid.data(name='scores', shape=[batch_size, bounding_bboxes_num, image_boxs_num], + dtype='float32') + + nmsed_outs = fluid.layers.detection_output(scores=scores, + loc=loc, prior_box=pb, prior_box_var=pbv) + + gt_box = fluid.data(name="gt_box", shape=[batch_size, 4], dtype="float32") + gt_label = fluid.data(name="gt_label", shape=[batch_size, 1], dtype="float32") + difficult = fluid.data(name="difficult", shape=[batch_size, 1], dtype="float32") + + exe = fluid.Executor(fluid.CUDAPlace(0)) + map_evaluator = fluid.metrics.DetectionMAP(nmsed_outs, gt_label, gt_box, difficult, class_num = 3) + cur_map, accum_map = map_evaluator.get_map_var() + + + +.. py:method:: get_map_var() + +返回:当前 mini-batch 的 mAP 变量和不同 mini-batch 的 mAP 累加和 + +.. py:method:: reset(executor, reset_program=None) + +在指定的 batch 结束或者用户指定的开始时重置度量状态。 + +参数: + - **executor** (Executor) – 执行reset_program的执行程序 + - **reset_program** (Program|None, 可选) – 单个program 的 reset 过程。如果设置为 None,将创建一个 program + + + diff --git a/doc/paddle/api/paddle/fluid/metrics/EditDistance_cn.rst b/doc/paddle/api/paddle/fluid/metrics/EditDistance_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9ffd2a0165d7e718d1cf08d15be5e90f64cf916b --- /dev/null +++ b/doc/paddle/api/paddle/fluid/metrics/EditDistance_cn.rst @@ -0,0 +1,68 @@ +.. _cn_api_fluid_metrics_EditDistance: + +EditDistance +------------------------------- + +.. py:class:: paddle.fluid.metrics.EditDistance(name) + + + + +用于管理字符串的编辑距离。编辑距离是通过计算将一个字符串转换为另一个字符串所需的最小编辑操作数(添加、删除或替换)来量化两个字符串(例如单词)彼此不相似的程度一种方法。 参考 https://en.wikipedia.org/wiki/Edit_distance。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # 假设batch_size为128 + batch_size = 128 + + # 初始化编辑距离管理器 + distance_evaluator = fluid.metrics.EditDistance("EditDistance") + # 生成128个序列对间的编辑距离,此处的最大距离是10 + edit_distances_batch0 = np.random.randint(low = 0, high = 10, size = (batch_size, 1)) + seq_num_batch0 = batch_size + + distance_evaluator.update(edit_distances_batch0, seq_num_batch0) + avg_distance, wrong_instance_ratio = distance_evaluator.eval() + print("the average edit distance for batch0 is %.2f and the wrong instance ratio is %.2f " % (avg_distance, wrong_instance_ratio)) + edit_distances_batch1 = np.random.randint(low = 0, high = 10, size = (batch_size, 1)) + seq_num_batch1 = batch_size + + distance_evaluator.update(edit_distances_batch1, seq_num_batch1) + avg_distance, wrong_instance_ratio = distance_evaluator.eval() + print("the average edit distance for batch0 and batch1 is %.2f and the wrong instance ratio is %.2f " % (avg_distance, wrong_instance_ratio)) + + +.. py:method:: reset() + +清空存储结果。 + +参数:无 + +返回:无 + + +.. py:method:: update(distances, seq_num) + +更新存储结果 + +参数: + - **distances** – 一个形状为(batch_size, 1)的numpy.array,每个元素代表两个序列间的距离。 + - **seq_num** – 一个整型/浮点型值,代表序列对的数量。 + +返回:无 + +.. py:method:: eval() + +返回两个浮点数: +avg_distance:使用更新函数更新的所有序列对的平均距离。 +avg_instance_error:编辑距离不为零的序列对的比例。 + + + + + diff --git a/doc/paddle/api/paddle/fluid/metrics/MetricBase_cn.rst b/doc/paddle/api/paddle/fluid/metrics/MetricBase_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..91cfa45e4f86514a1873576088dc10bb30af03d9 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/metrics/MetricBase_cn.rst @@ -0,0 +1,67 @@ +.. _cn_api_fluid_metrics_MetricBase: + +MetricBase +------------------------------- + +.. py:class:: paddle.fluid.metrics.MetricBase(name) + + + + +在评估神经网络效果的时候,由于我们常常需要把测试数据切分成mini-batch,并逐次将每个mini-batch送入神经网络进行预测和评估,因此我们每次只能获得当前batch下的评估结果,而并不能一次性获得整个测试集的评估结果。paddle.fluid.metrics正是为了解决这些问题而设计的,大部分paddle.fluid.metrics下的类都具有如下功能: + +1. 接受模型对一个batch的预测结果(numpy.array)和这个batch的原始标签(numpy.array)作为输入,并进行特定的计算(如计算准确率,召回率等)。 + +2. 将当前batch评估结果和历史评估结果累计起来,以获取目前处理过的所有batch的整体评估结果。 + +MetricBase是所有paddle.fluid.metrics下定义的所有python类的基类,它定义了一组接口,并需要所有继承他的类实现具体的计算逻辑,包括: + +1. update(preds, labels):给定当前计算当前batch的预测结果(preds)和标签(labels),计算这个batch的评估结果。 + +2. eval():合并当前累积的每个batch的评估结果,并返回整体评估结果。 + +3. reset():清空累积的每个batch的评估结果。 + +.. py:method:: __init__(name) + +构造函数,参数name表示当前创建的评估器的名字。 + +参数: + - **name** (str) - 当前创建的评估器的名字,用于区分不同的评估器,例如准确率(accuracy)或者其他自定义名字(如,my_evaluator)。 + +返回:一个python对象,表示一个具体的评估器。 + +返回类型:python对象 + +.. py:method:: reset() + +空累积的每个batch的评估结果。 + +返回:无 + +.. py:method:: update(preds,labels) + +给定当前计算当前batch的预测结果(preds)和标签(labels),计算这个batch的评估结果,并将这个评估结果在评估器内部记录下来,注意update函数并不会返回评估结果。 + +参数: + - **preds** (numpy.array) - 当前minibatch的预测结果。 + - **labels** (numpy.array) - 当前minibatch的标签。 + +返回:无 + +.. py:method:: eval() + +合并当前累积的每个batch的评估结果,并返回整体评估结果。 + +返回:当前累积batch的整体评估结果。 + +返回类型:float|list(float)|numpy.array + +.. py:method:: get_config() + +获取当前评估器的状态,特指评估器内部没有 ``_`` 前缀的所有成员变量。 + +返回:一个python字典,包含了当前评估器内部的状态。 + +返回类型:python字典(dict) + diff --git a/doc/paddle/api/paddle/fluid/metrics/Precision_cn.rst b/doc/paddle/api/paddle/fluid/metrics/Precision_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..29a1cb89a9ebc7185c8ddb596fe8e1494ece800d --- /dev/null +++ b/doc/paddle/api/paddle/fluid/metrics/Precision_cn.rst @@ -0,0 +1,62 @@ +.. _cn_api_fluid_metrics_Precision: + +Precision +------------------------------- + +.. py:class:: paddle.fluid.metrics.Precision(name=None) + + + + +精确率Precision(也称为 positive predictive value,正预测值)是被预测为正样例中实际为正的比例。 https://en.wikipedia.org/wiki/Evaluation_of_binary_classifiers 该类管理二分类任务的precision分数。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + metric = fluid.metrics.Precision() + + # 生成预测值和标签 + + preds = [[0.1], [0.7], [0.8], [0.9], [0.2], + [0.2], [0.3], [0.5], [0.8], [0.6]] + + labels = [[0], [1], [1], [1], [1], + [0], [0], [0], [0], [0]] + + preds = np.array(preds) + labels = np.array(labels) + + metric.update(preds=preds, labels=labels) + precision = metric.eval() + + print("expected precision: %.2f and got %.2f" % ( 3.0 / 5.0, precision)) + + + +.. py:method:: update(preds, labels) + +使用当前mini-batch的预测结果更新精确率的计算。 + +参数: + - **preds** (numpy.array) - 当前mini-batch的预测结果,二分类sigmoid函数的输出,shape为[batch_size, 1],数据类型为'float64'或'float32'。 + - **labels** (numpy.array) - 当前mini-batch的真实标签,输入的shape应与preds保持一致,shape为[batch_size, 1],数据类型为'int32'或'int64' + +返回:无 + + + +.. py:method:: eval() + +计算出最终的精确率。 + +参数:无 + +返回: 精确率的计算结果。标量输出,float类型 +返回类型:float + + diff --git a/doc/paddle/api/paddle/fluid/metrics/Recall_cn.rst b/doc/paddle/api/paddle/fluid/metrics/Recall_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3d4975fc55e42f251e983fd551c760511407f003 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/metrics/Recall_cn.rst @@ -0,0 +1,71 @@ +.. _cn_api_fluid_metrics_Recall: + +Recall +------------------------------- + +.. py:class:: paddle.fluid.metrics.Recall(name=None) + + + + +召回率Recall(也称为敏感度)是指得到的相关实例数占相关实例总数的比例。https://en.wikipedia.org/wiki/Precision_and_recall 该类管理二分类任务的召回率。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + metric = fluid.metrics.Recall() + # 生成预测值和标签 + preds = [[0.1], [0.7], [0.8], [0.9], [0.2], + [0.2], [0.3], [0.5], [0.8], [0.6]] + labels = [[0], [1], [1], [1], [1], + [0], [0], [0], [0], [0]] + + preds = np.array(preds) + labels = np.array(labels) + + metric.update(preds=preds, labels=labels) + recall = metric.eval() + + print("expected recall: %.2f and got %.2f" % ( 3.0 / 4.0, recall)) + + + +.. py:method:: update(preds, labels) + +使用当前mini-batch的预测结果更新召回率的计算。 + +参数: + - **preds** (numpy.array) - 当前mini-batch的预测结果,二分类sigmoid函数的输出,shape为[batch_size, 1],数据类型为'float64'或'float32'。 + - **labels** (numpy.array) - 当前mini-batch的真实标签,输入的shape应与preds保持一致,shape为[batch_size, 1],数据类型为'int32'或'int64' + +返回:无 + + + +.. py:method:: eval() + +计算出最终的召回率。 + +参数:无 + +返回:召回率的计算结果。标量输出,float类型 +返回类型:float + + + + + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/nets/glu_cn.rst b/doc/paddle/api/paddle/fluid/nets/glu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cdb4966715220dd4fd80d6f364a16c30468c9c52 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/nets/glu_cn.rst @@ -0,0 +1,44 @@ +.. _cn_api_fluid_nets_glu: + +glu +------------------------------- + +.. py:function:: paddle.fluid.nets.glu(input, dim=-1) + + + + +门控线性单元 Gated Linear Units (GLU) 由 :ref:`cn_api_fluid_layers_split` ,:ref:`cn_api_fluid_layers_sigmoid` 和 :ref:`cn_api_fluid_layers_elementwise_mul` 组成。特定的,沿着给定维度将输入拆分成两个大小相同的部分,:math:`a` 和 :math:`b` ,按如下方式计算: + +.. math:: + GLU(a,b) = a \bigotimes \sigma (b) + + +参考论文: `Language Modeling with Gated Convolutional Networks `_ + +参数: + - **input** (Variable) - 输入变量,多维 Tensor 或 LoDTensor, 支持的数据类型为float32、float64 和 float16(GPU)。 + - **dim** (int) - 拆分的维度。如果 :math:`dim<0` ,拆分的维为 :math:`rank(input) + dim` 。默认为 -1,即最后一维。 + +返回: 计算结果,尺寸为输入大小的一半,数据类型与输入的数据类型相同 + +返回类型:变量(Variable) + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.data( + name="words", shape=[-1, 6, 3, 9], dtype="float32") + # 输出的形状为[-1, 3, 3, 9] + output = fluid.nets.glu(input=data, dim=1) + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/nets/img_conv_group_cn.rst b/doc/paddle/api/paddle/fluid/nets/img_conv_group_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..27cb510c8c2739b5a1a73f5f29ce455c22d67401 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/nets/img_conv_group_cn.rst @@ -0,0 +1,58 @@ +.. _cn_api_fluid_nets_img_conv_group: + +img_conv_group +------------------------------- + + +.. py:function:: paddle.fluid.nets.img_conv_group(input, conv_num_filter, pool_size, conv_padding=1, conv_filter_size=3, conv_act=None, param_attr=None, conv_with_batchnorm=False, conv_batchnorm_drop_rate=0.0, pool_stride=1, pool_type='max', use_cudnn=True) + + + + +Image Convolution Group由Convolution2d,BatchNorm,DropOut和Pool2d组成。根据输入参数,img_conv_group将使用Convolution2d,BatchNorm,DropOut对Input进行连续计算,得到最后结果。 + +参数: + - **input** (Variable) - 输入,格式为[N,C,H,W]的4-D Tensor。数据类型:float32和float64。 + - **conv_num_filter** (list | tuple) - 卷积中使用的滤波器数。 + - **pool_size** (int | list | tuple) - 池化层中池化核的大小。如果pool_size是列表或元组,则它必须包含两个整数(pool_size_height,pool_size_width)。否则,pool_size_height = pool_size_width = pool_size。 + - **conv_padding** (int | list | tuple) - 卷积层中的填充 ``padding`` 的大小。如果 ``padding`` 是列表或元组,则其长度必须等于 ``conv_num_filter`` 的长度。否则,所有卷积的 ``conv_padding`` 都是相同的。默认:1。 + - **conv_filter_size** (int | list | tuple) - 卷积层中滤波器大小。如果filter_size是列表或元组,则其长度必须等于 ``conv_num_filter`` 的长度。否则,所有卷积的 ``conv_filter_size`` 都是相同的。默认:3。 + - **conv_act** (str) - 卷积层之后接的的激活层类型, ``BatchNorm`` 后面没有。默认:None。 + - **param_attr** (ParamAttr|None) :指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。conv2d算子默认的权重初始化是Xavier。 + - **conv_with_batchnorm** (bool | list) - 表示在卷积层之后是否使用 ``BatchNorm`` 。如果 ``conv_with_batchnorm`` 是一个列表,则其长度必须等于 ``conv_num_filter`` 的长度。否则, ``conv_with_batchnorm`` 指示是否所有卷积层后都使用 ``BatchNorm`` 。默认:False。 + - **conv_batchnorm_drop_rate** (float | list) - 表示 ``BatchNorm`` 之后的 ``Dropout Layer`` 的 ``drop_rate`` 。如果 ``conv_batchnorm_drop_rate`` 是一个列表,则其长度必须等于 ``conv_num_filter`` 的长度。否则,所有 ``Dropout Layers`` 的 ``drop_rate`` 都是 ``conv_batchnorm_drop_rate`` 。默认:0.0。 + - **pool_stride** (int | list | tuple) - 池化层的池化步长。如果 ``pool_stride`` 是列表或元组,则它必须包含两个整数(pooling_stride_height,pooling_stride_width)。否则,pooling_stride_height = pooling_stride_width = pool_stride。默认:1。 + - **pool_type** (str) - 池化类型可以是最大池化的 ``max`` 和平均池化的 ``avg`` 。默认:max。 + - **use_cudnn** (bool) - 是否使用cudnn内核,仅在安装cudnn库时才有效。默认值:True + +返回: Tensor。使用Convolution2d,BatchNorm,DropOut和Pool2d进行串行计算后的最终结果。 + +返回类型: Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') + conv_pool = fluid.nets.img_conv_group(input=img, + conv_padding=1, + conv_num_filter=[3, 3], + conv_filter_size=3, + conv_act="relu", + pool_size=2, + pool_stride=2) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + np_x = np.random.random(size=(1, 1, 28, 28)).astype('float32') + output = exe.run(feed={"img": np_x}, fetch_list = [conv_pool]) + print(output) + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/nets/scaled_dot_product_attention_cn.rst b/doc/paddle/api/paddle/fluid/nets/scaled_dot_product_attention_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6290de8c59ef77f7fb09c22468bbae9cbd7da5dd --- /dev/null +++ b/doc/paddle/api/paddle/fluid/nets/scaled_dot_product_attention_cn.rst @@ -0,0 +1,56 @@ +.. _cn_api_fluid_nets_scaled_dot_product_attention: + +scaled_dot_product_attention +------------------------------- + + +.. py:function:: paddle.fluid.nets.scaled_dot_product_attention(queries, keys, values, num_heads=1, dropout_rate=0.0) + + + + +该接口实现了的基于点积(并进行了缩放)的多头注意力(Multi-Head Attention)机制。attention可以表述为将一个查询(query)和一组键值对(key-value pair)映射为一个输出;Multi-Head Attention则是使用多路进行attention,而且对attention的输入进行了线性变换。公式如下: + + +.. math:: + + MultiHead(Q, K, V ) & = Concat(head_1, ..., head_h)\\ + where \ head_i & = Attention(QW_i^Q , KW_i^K , VW_i^V )\\ + Attention(Q, K, V) & = softmax(\frac{QK^\mathrm{T}}{\sqrt{d_k}})V\\ + +其中, :math:`Q, K, V` 分别对应 ``queries``、 ``keys`` 和 ``values`` ,详细内容请参阅 `Attention Is All You Need `_ + +要注意该接口实现支持的是batch形式, :math:`Attention(Q, K, V)` 中使用的矩阵乘是batch形式的矩阵乘法,参考 fluid.layers. :ref:`cn_api_fluid_layers_matmul` 。 + +参数: + - **queries** (Variable) - 形状为 :math:`[N, L_q, d_k \times h]` 的三维Tensor,其中 :math:`N` 为batch_size, :math:`L_q` 为查询序列长度, :math:`d_k \times h` 为查询的特征维度大小,:math:`h` 为head数。数据类型为float32或float64。 + - **keys** (Variable) - 形状为 :math:`[N, L_k, d_k \times h]` 的三维Tensor,其中 :math:`N` 为batch_size, :math:`L_k` 为键值序列长度, :math:`d_k \times h` 为键的特征维度大小,:math:`h` 为head数。数据类型与 ``queries`` 相同。 + - **values** (Variable) - 形状为 :math:`[N, L_k, d_v \times h]` 的三维Tensor,其中 :math:`N` 为batch_size, :math:`L_k` 为键值序列长度, :math:`d_v \times h` 为值的特征维度大小,:math:`h` 为head数。数据类型与 ``queries`` 相同。 + - **num_heads** (int) - 指明所使用的head数。head数为1时不对输入进行线性变换。默认值为1。 + - **dropout_rate** (float) - 以指定的概率对要attention到的内容进行dropout。默认值为0,即不使用dropout。 + +返回: 形状为 :math:`[N, L_q, d_v * h]` 的三维Tensor,其中 :math:`N` 为batch_size, :math:`L_q` 为查询序列长度, :math:`d_v * h` 为值的特征维度大小。与输入具有相同的数据类型。表示Multi-Head Attention的输出。 + +返回类型: Variable + +抛出异常: + - :code:`ValueError`: ``queries`` 、 ``keys`` 和 ``values`` 必须都是三维。 + - :code:`ValueError`: ``queries`` 和 ``keys`` 的最后一维(特征维度)大小必须相同。 + - :code:`ValueError`: ``keys`` 和 ``values`` 的第二维(长度维度)大小必须相同。 + - :code:`ValueError`: ``keys`` 的最后一维(特征维度)大小必须是 ``num_heads`` 的整数倍。 + - :code:`ValueError`: ``values`` 的最后一维(特征维度)大小必须是 ``num_heads`` 的整数倍。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + queries = fluid.data(name="queries", shape=[3, 5, 9], dtype="float32") + keys = fluid.data(name="keys", shape=[3, 6, 9], dtype="float32") + values = fluid.data(name="values", shape=[3, 6, 10], dtype="float32") + contexts = fluid.nets.scaled_dot_product_attention(queries, keys, values) + contexts.shape # [3, 5, 10] + + diff --git a/doc/paddle/api/paddle/fluid/nets/sequence_conv_pool_cn.rst b/doc/paddle/api/paddle/fluid/nets/sequence_conv_pool_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..588fe5375f41bc40ddc94e81f9e7623ab7423347 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/nets/sequence_conv_pool_cn.rst @@ -0,0 +1,53 @@ +.. _cn_api_fluid_nets_sequence_conv_pool: + +sequence_conv_pool +------------------------------- + + +.. py:function:: paddle.fluid.nets.sequence_conv_pool(input, num_filters, filter_size, param_attr=None, act='sigmoid', pool_type='max', bias_attr=None) + + + + +**注意:该OP的输入** ``input`` **必须是2维LoDTensor, lod_level为1,如果输入是Tensor,建议使用** :ref:`cn_api_fluid_nets_simple_img_conv_pool` **代替** + +该接口由序列卷积( :ref:`cn_api_fluid_layers_sequence_conv` )和池化( :ref:`cn_api_fluid_layers_sequence_pool` )组成 + +参数: + - **input** (Variable) - sequence_conv的输入,LoDTensor, lod_level为1,支持时间长度可变的输入序列。当前输入为shape为(T,N)的矩阵,T是mini-batch中的总时间步数,N是input_hidden_size。数据类型为float32或者float64 + - **num_filters** (int)- 卷积核的数目,整数 + - **filter_size** (int)- 卷积核的大小,整数 + - **param_attr** (ParamAttr,可选) - sequence_conv层的参数属性,类型是ParamAttr或者None。默认值为None + - **act** (str|None,可选) - sequence_conv层的激活函数类型,字符串,可以是'relu', 'softmax', 'sigmoid'等激活函数的类型。如果设置为None,则不使用激活。默认值为'sigmoid' + - **pool_type** (str,可选) - 池化类型,字符串。可以是'max', 'average', 'sum'或者'sqrt'。默认值为'max' + - **bias_attr** (ParamAttr|bool,可选) – sequence_conv偏置的参数属性,类型可以是bool,ParamAttr或者None。如果设置为False,则不会向输出单元添加偏置。如果将参数设置为ParamAttr的None或one属性,sequence_conv将创建ParamAttr作为bias_attr。如果未设置bias_attr的初始化器,则初始化偏差为零。默认值为None + - **name** (str|None,可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None + +返回:经过sequence_conv和sequence_pool两个操作之后的结果所表示的Tensor,数据类型与 ``input`` 相同 + + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + input_dim = 100 #len(word_dict) + emb_dim = 128 + hid_dim = 512 + data = fluid.layers.data( name="words", shape=[1], dtype="int64", lod_level=1) + emb = fluid.layers.embedding(input=data, size=[input_dim, emb_dim], is_sparse=True) + seq_conv = fluid.nets.sequence_conv_pool(input=emb, + num_filters=hid_dim, + filter_size=3, + act="tanh", + pool_type="sqrt") + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/nets/simple_img_conv_pool_cn.rst b/doc/paddle/api/paddle/fluid/nets/simple_img_conv_pool_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f5c7e4a4a79d4be93b4a79c936901908859f33b3 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/nets/simple_img_conv_pool_cn.rst @@ -0,0 +1,61 @@ +.. _cn_api_fluid_nets_simple_img_conv_pool: + +simple_img_conv_pool +------------------------------- + + +.. py:function:: paddle.fluid.nets.simple_img_conv_pool(input, num_filters, filter_size, pool_size, pool_stride, pool_padding=0, pool_type='max', global_pooling=False, conv_stride=1, conv_padding=0, conv_dilation=1, conv_groups=1, param_attr=None, bias_attr=None, act=None, use_cudnn=True) + + + + + ``simple_img_conv_pool`` 由一个conv2d( :ref:`cn_api_fluid_layers_conv2d` )和一个pool2d( :ref:`cn_api_fluid_layers_pool2d` ) OP组成。 + +参数: + - **input** (Variable) - 输入图像, 4-D Tensor, 格式为[N,C,H,W]。数据类型是float32或者float64 + - **num_filters** (int) - 卷积核的数目,整数。 + - **filter_size** (int | list | tuple) - conv2d卷积核大小,整数或者整型列表或者整型元组。如果 ``filter_size`` 是列表或元组,则它必须包含两个整数(filter_size_H,filter_size_W)。如果是整数,则filter_size_H = filter_size_W = filter_size。 + - **pool_size** (int | list | tuple) - pool2d池化层大小,整数或者整型列表或者整型元组。如果pool_size是列表或元组,则它必须包含两个整数(pool_size_H,pool_size_W)。如果是整数,则pool_size_H = pool_size_W = pool_size。 + - **pool_stride** (int | list | tuple) - pool2d池化层步长,整数或者整型列表或者整型元组。如果pool_stride是列表或元组,则它必须包含两个整数(pooling_stride_H,pooling_stride_W)。如果是整数,pooling_stride_H = pooling_stride_W = pool_stride。 + - **pool_padding** (int | list | tuple,可选) - pool2d池化层的padding,整数或者整型列表或者整型元组。如果pool_padding是列表或元组,则它必须包含两个整数(pool_padding_H,pool_padding_W)。如果是整数,pool_padding_H = pool_padding_W = pool_padding。默认值为0。 + - **pool_type** (str,可选) - 池化类型,字符串,可以是 ``max`` 或者 ``avg`` ,分别对应最大池化和平均池化。默认 ``max`` 。 + - **global_pooling** (bool,可选)- 是否使用全局池化。如果global_pooling = true,则忽略pool_size和pool_padding。默认为False + - **conv_stride** (int | list | tuple,可选) - conv2d Layer的卷积步长,整数或者整型列表或者整型元组。如果conv_stride是列表或元组,则它必须包含两个整数,(conv_stride_H,conv_stride_W)。如果是整数,conv_stride_H = conv_stride_W = conv_stride。默认值:conv_stride = 1。 + - **conv_padding** (int | list | tuple,可选) - conv2d Layer的padding大小,整数或者整型列表或者整型元组。如果conv_padding是列表或元组,则它必须包含两个整数(conv_padding_H,conv_padding_W)。如果是整数,conv_padding_H = conv_padding_W = conv_padding。默认值:conv_padding = 0。 + - **conv_dilation** (int | list | tuple,可选) - conv2d Layer的dilation大小,整数或者整型列表或者整型元。如果conv_dilation是列表或元组,则它必须包含两个整数(conv_dilation_H,conv_dilation_W)。如果是整数,conv_dilation_H = conv_dilation_W = conv_dilation。默认值:conv_dilation = 1。 + - **conv_groups** (int,可选) - conv2d Layer的组数,整数。根据Alex Krizhevsky的Deep CNN论文中的分组卷积:当group = 2时,前半部分滤波器仅连接到输入通道的前半部分,而后半部分滤波器仅连接到后半部分输入通道。默认值:conv_groups = 1。 + - **param_attr** (ParamAttr,可选) - conv2d的weights参数属性。如果将其设置为None或ParamAttr的一个属性,则conv2d将创建ParamAttr作为param_attr。如果未设置param_attr的初始化,则使用 :math:`Normal(0.0,std)` 初始化参数,并且 ``std`` 为 :math:`(\frac{2.0 }{filter\_elem\_num})^{0.5}` 。默认值:None + - **bias_attr** (ParamAttr | bool | None,可选) - conv2d的bias参数属性。如果设置为False,则不会向输出单元添加bias。如果将其设置为None或ParamAttr的一个属性,则conv2d将创建ParamAttr作为bias_attr。如果设置bias_attr为None,则将其初始化为零。默认值:None + - **act** (str,可选) - conv2d的激活类型,字符串,可以是'relu', 'softmax', 'sigmoid'等激活函数的类型。如果设置为None,则不附加激活。默认值:None。 + - **use_cudnn** (bool,可选) - 是否使用cudnn内核,仅在安装cudnn库时才有效。默认值:True。 + - **name** (str|None,可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None + +返回: 输入input经过conv2d和pool2d之后输入的结果,数据类型与input相同 + +返回类型: Variable + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + # 输入 img.shape = [-1, 1, 28, 28] + # 使用该接口带如下参数的操作之后,输出conv_pool.shape = [-1, 20, 12, 12] + img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') + conv_pool = fluid.nets.simple_img_conv_pool(input=img, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + act="relu") + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/one_hot_cn.rst b/doc/paddle/api/paddle/fluid/one_hot_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2aaaf117e6fc9dc5ab26032db91bef286f14e567 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/one_hot_cn.rst @@ -0,0 +1,80 @@ +.. _cn_api_fluid_layers_one_hot: + +one_hot +------------------------------- + +.. py:function:: paddle.fluid.layers.one_hot(input, depth, allow_out_of_range=False) + + + + +**注意:此OP要求输入Tensor shape的最后一维必须为1。此OP将在未来的版本中被移除!推荐使用fluid.** :ref:`cn_api_fluid_one_hot` 。 + +该OP将输入(input)中的每个id转换为一个one-hot向量,其长度为 ``depth`` ,该id对应的向量维度上的值为1,其余维度的值为0。 + +输出的Tensor(或LoDTensor)的shape是将输入shape的最后一维替换为depth的维度。 + +- 示例1(allow_out_of_range=False): + +.. code-block:: python + + 输入: + X.shape = [4, 1] + X.data = [[1], [1], [3], [0]] + depth = 4 + + 输出: + Out.shape = [4, 4] + Out.data = [[0., 1., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 1.], + [1., 0., 0., 0.]] + +- 示例2 (allow_out_of_range=True): + +.. code-block:: python + + 输入: + X.shape = [4, 1] + X.data = [[1], [1], [5], [0]] + depth = 4 + allow_out_of_range=True + + 输出: + Out.shape = [4, 4] + Out.data = [[0., 1., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 0.], ## 这一维的值是5,超过了depth,因此填成0 + [1., 0., 0., 0.]] + +- 示例3 (allow_out_of_range=False): + +.. code-block:: python + + 输入: + X.shape = [4, 1] + X.data = [[1], [1], [5], [0]] + depth = 4 + allow_out_of_range=False + + 输出:抛出 Illegal value 的异常 + X中第2维的值是5,超过了depth,而allow_out_of_range=False表示不允许超过,因此抛异常。 + + +参数: + - **input** (Variable) - 维度为 :math:`[N_1, ..., N_n, 1]` 的多维Tensor或LoDTensor,维度至少两维,且最后一维必须是1。数据类型为int32或int64。 + - **depth** (int) - 用于定义一个one-hot向量的长度。若输入为词id,则 ``depth`` 通常取值为词典大小。 + - **allow_out_of_range** (bool) - 指明input中所包含的id值是否可以大于depth值。当超过depth时,如果 `allow_out_of_range` 为False,则会抛出 `Illegal value` 的异常;如果设置为True,该id对应的向量为0向量。默认值为False。 + +返回:转换后的one_hot Tensor或LoDTensor,数据类型为float32。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + # 该代码对应上述第一个示例,其中输入label的shape是[4, 1],输出one_hot_label的shape是[4, 4] + label = fluid.layers.data(name="label", shape=[4, 1], append_batch_size=False, dtype="int64") + one_hot_label = fluid.layers.one_hot(input=label, depth=4) diff --git a/doc/paddle/api/paddle/fluid/optimizer/Adam_cn.rst b/doc/paddle/api/paddle/fluid/optimizer/Adam_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6c57911eddad02aa0b43c62bb6230fd6a188112e --- /dev/null +++ b/doc/paddle/api/paddle/fluid/optimizer/Adam_cn.rst @@ -0,0 +1,262 @@ +.. _cn_api_paddle_optimizer_Adam: + +Adam +------------------------------- + +.. py:class:: paddle.optimizer.Adam(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameters=None, weight_decay=None, grad_clip=None, name=None, lazy_mode=False) + + + + +Adam优化器出自 `Adam论文 `_ 的第二节,能够利用梯度的一阶矩估计和二阶矩估计动态调整每个参数的学习率。 + +其参数更新的计算公式如下: + +.. math:: + \\t = t + 1 +.. math:: + moment\_1\_out=\beta_1∗moment\_1+(1−\beta_1)∗grad +.. math:: + moment\_2\_out=\beta_2∗moment\_2+(1−\beta_2)∗grad*grad +.. math:: + learning\_rate=learning\_rate*\frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} +.. math:: + param\_out=param-learning\_rate*\frac{moment\_1}{\sqrt{moment\_2}+\epsilon}\\ + +相关论文:`Adam: A Method for Stochastic Optimization `_ + +参数: + - **learning_rate** (float|_LRScheduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001 + - **beta1** (float|Tensor, 可选) - 一阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.9 + - **beta2** (float|Tensor, 可选) - 二阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.999 + - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 + - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 + - **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None + - **lazy_mode** (bool, 可选) - 设为True时,仅更新当前具有梯度的元素。官方Adam算法有两个移动平均累加器(moving-average accumulators)。累加器在每一步都会更新。在密集模式和稀疏模式下,两条移动平均线的每个元素都会更新。如果参数非常大,那么更新可能很慢。 lazy mode仅更新当前具有梯度的元素,所以它会更快。但是这种模式与原始的算法有不同的描述,可能会导致不同的结果,默认为False + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + adam = paddle.optimizer.Adam(learning_rate=0.1, + parameters=linear.parameters()) + out.backward() + adam.step() + adam.clear_grad() + +.. code-block:: python + + # Adam with beta1/beta2 as Tensor and weight_decay as float + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + + beta1 = paddle.to_tensor([0.9], dtype="float32") + beta2 = paddle.to_tensor([0.99], dtype="float32") + + adam = paddle.optimizer.Adam(learning_rate=0.1, + parameters=linear.parameters(), + beta1=beta1, + beta2=beta2, + weight_decay=0.01) + out.backward() + adam.step() + adam.clear_grad() + +.. py:method:: step() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +执行一次优化器并进行参数更新。 + +返回:None。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5) + adam = paddle.optimizer.Adam(learning_rate = 0.01, + parameters = linear.parameters()) + out = linear(a) + out.backward() + adam.step() + adam.clear_grad() + +.. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None) + +为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 + +参数: + - **loss** (Tensor) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + + beta1 = paddle.to_tensor([0.9], dtype="float32") + beta2 = paddle.to_tensor([0.99], dtype="float32") + + adam = paddle.optimizer.Adam(learning_rate=0.1, + parameters=linear.parameters(), + weight_decay=0.01) + out.backward() + adam.minimize(loss) + adam.clear_grad() + +.. py:method:: clear_grad() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +清除需要优化的参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5) + optimizer = paddle.optimizer.Adam(learning_rate=0.02, + parameters=linear.parameters()) + out = linear(a) + out.backward() + optimizer.step() + optimizer.clear_grad() + +.. py:method:: set_lr(value) + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float) - 需要设置的学习率的值。 + +返回:None + +**代码示例** + +.. code-block:: python + + import paddle + paddle.disable_static() + linear = paddle.nn.Linear(10, 10) + + adam = paddle.optimizer.Adam(0.1, parameters=linear.parameters()) + + # set learning rate manually by python float value + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + lr = adam.get_lr() + print("current lr is {}".format(lr)) + # Print: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + +.. py:method:: get_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 + +返回:float,当前步骤的学习率。 + + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + # example1: _LRScheduler is not used, return value is all the same + paddle.disable_static() + emb = paddle.nn.Embedding(10, 10, sparse=False) + adam = paddle.optimizer.Adam(0.001, parameters = emb.parameters()) + lr = adam.get_lr() + print(lr) # 0.001 + + # example2: PiecewiseLR is used, return the step learning rate + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + scheduler = paddle.optimizer.PiecewiseLR(bd, value, 0) + adam = paddle.optimizer.Adam(scheduler, + parameters=linear.parameters()) + + # first step: learning rate is 0.2 + np.allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.step() + lr = adam.get_lr() + scheduler.step() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True diff --git a/doc/paddle/api/paddle/fluid/optimizer/Adamax_cn.rst b/doc/paddle/api/paddle/fluid/optimizer/Adamax_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b38c446571272905ab42e25ce18a463846c300cf --- /dev/null +++ b/doc/paddle/api/paddle/fluid/optimizer/Adamax_cn.rst @@ -0,0 +1,242 @@ +.. _cn_api_paddle_optimizer_Adamax: + +Adamax +------------------------------- + +.. py:class:: paddle.optimizer.Adamax(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameters=None, weight_decay=None, grad_clip=None, name=None) + + + + +Adamax优化器是参考 `Adam论文 `_ 第7节Adamax优化相关内容所实现的。Adamax算法是基于无穷大范数的 `Adam `_ 算法的一个变种,使学习率更新的算法更加稳定和简单。 + +其参数更新的计算公式如下: + +.. math:: + \\t = t + 1 +.. math:: + moment\_out=\beta_1∗moment+(1−\beta_1)∗grad +.. math:: + inf\_norm\_out=\max{(\beta_2∗inf\_norm+\epsilon, \left|grad\right|)} +.. math:: + learning\_rate=\frac{learning\_rate}{1-\beta_1^t} +.. math:: + param\_out=param−learning\_rate*\frac{moment\_out}{inf\_norm\_out}\\ + +相关论文:`Adam: A Method for Stochastic Optimization `_ + +论文中没有 ``epsilon`` 参数。但是,为了保持数值稳定性, 避免除0错误, 此处增加了这个参数。 + +参数: + - **learning_rate** (float|_LRScheduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001 + - **beta1** (float, 可选) - 一阶矩估计的指数衰减率,默认值为0.9 + - **beta2** (float, 可选) - 二阶矩估计的指数衰减率,默认值为0.999 + - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 + - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 + - **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None + +.. note:: + 目前 ``Adamax`` 不支持 Sparse Parameter Optimization(稀疏参数优化)。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + adam = paddle.optimizer.Adamax(learning_rate=0.1, + parameters=linear.parameters()) + out.backward() + adam.step() + adam.clear_grad() + + +.. py:method:: step() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +执行一次优化器并进行参数更新。 + +返回:None。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5) + adam = paddle.optimizer.Adam(learning_rate = 0.01, + parameters = linear.parameters()) + out = linear(a) + out.backward() + adam.step() + adam.clear_grad() + +.. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None) + +为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 + +参数: + - **loss** (Tensor) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成集合,默认值为None + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + + beta1 = paddle.to_tensor([0.9], dtype="float32") + beta2 = paddle.to_tensor([0.99], dtype="float32") + + adam = paddle.optimizer.Adamax(learning_rate=0.1, + parameters=linear.parameters(), + weight_decay=0.01) + out.backward() + adam.minimize(loss) + adam.clear_grad() + + +.. py:method:: clear_grad() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +清除需要优化的参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5) + optimizer = paddle.optimizer.Adamax(learning_rate=0.02, + parameters=linear.parameters()) + out = linear(a) + out.backward() + optimizer.step() + optimizer.clear_grad() + +.. py:method:: set_lr(value) + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float) - 需要设置的学习率的值。 + +返回:None + +**代码示例** + +.. code-block:: python + + import paddle + paddle.disable_static() + linear = paddle.nn.Linear(10, 10) + + adam = paddle.optimizer.Adamax(0.1, parameters=linear.parameters()) + + # set learning rate manually by python float value + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + lr = adam.get_lr() + print("current lr is {}".format(lr)) + # Print: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + +.. py:method:: get_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 + +返回:float,当前步骤的学习率。 + + +**代码示例** + +.. code-block:: python + + + import numpy as np + import paddle + # example1: _LRScheduler is not used, return value is all the same + paddle.disable_static() + emb = paddle.nn.Embedding(10, 10, sparse=False) + adam = paddle.optimizer.Adamax(0.001, parameters = emb.parameters()) + lr = adam.get_lr() + print(lr) # 0.001 + + # example2: PiecewiseLR is used, return the step learning rate + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + scheduler = paddle.optimizer.PiecewiseLR(bd, value, 0) + adam = paddle.optimizer.Adamax(scheduler, + parameters=linear.parameters()) + + # first step: learning rate is 0.2 + np.allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.step() + lr = adam.get_lr() + scheduler.step() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True diff --git a/doc/paddle/api/paddle/fluid/optimizer/MomentumOptimizer_cn.rst b/doc/paddle/api/paddle/fluid/optimizer/MomentumOptimizer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c2c39c5a8fad49c25a80ba2668eb0a332698dda7 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/optimizer/MomentumOptimizer_cn.rst @@ -0,0 +1,229 @@ +.. _cn_api_fluid_optimizer_MomentumOptimizer: + +MomentumOptimizer +------------------------------- + +.. py:class:: paddle.fluid.optimizer.MomentumOptimizer(learning_rate, momentum, parameter_list=None, use_nesterov=False, regularization=None, grad_clip=None, name=None) + + + + +该接口实现含有速度状态的Simple Momentum 优化器 + +该优化器含有牛顿动量标志,公式更新如下: + +.. math:: + & velocity = mu * velocity + gradient\\ + & if (use\_nesterov):\\ + &\quad param = param - (gradient + mu * velocity) * learning\_rate\\ + & else:\\&\quad param = param - learning\_rate * velocity + +参数: + - **learning_rate** (float|Variable) - 学习率,用于参数更新。作为数据参数,可以是浮点型值或含有一个浮点型值的变量。 + - **momentum** (float) - 动量因子。 + - **parameter_list** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **use_nesterov** (bool,可选) - 赋能牛顿动量,默认值False。 + - **regularization** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 + - **name** (str, 可选) - 可选的名称前缀,一般无需设置,默认值为None。 + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + place = fluid.CPUPlace() + main = fluid.Program() + with fluid.program_guard(main): + x = fluid.layers.data(name='x', shape=[13], dtype='float32') + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y_predict = fluid.layers.fc(input=x, size=1, act=None) + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + + moment_optimizer = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, momentum=0.9) + moment_optimizer.minimize(avg_cost) + + fetch_list = [avg_cost] + train_reader = paddle.batch( + paddle.dataset.uci_housing.train(), batch_size=1) + feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + for data in train_reader(): + exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) + + + +.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None) + +为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameter_list中的Parameters,最小化网络损失值loss。 + +参数: + - **loss** (Variable) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + +返回类型: tuple + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + place = fluid.CPUPlace() + main = fluid.Program() + with fluid.program_guard(main): + x = fluid.layers.data(name='x', shape=[13], dtype='float32') + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y_predict = fluid.layers.fc(input=x, size=1, act=None) + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + + moment_optimizer = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, momentum=0.9) + moment_optimizer.minimize(avg_cost) + + fetch_list = [avg_cost] + train_reader = paddle.batch( + paddle.dataset.uci_housing.train(), batch_size=1) + feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + for data in train_reader(): + exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) + + + +.. py:method:: clear_gradients() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +清除需要优化的参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + with fluid.dygraph.guard(): + value = np.arange(26).reshape(2, 13).astype("float32") + a = fluid.dygraph.to_variable(value) + linear = fluid.Linear(13, 5, dtype="float32") + optimizer = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, momentum=0.9, + parameter_list=linear.parameters()) + out = linear(a) + out.backward() + optimizer.minimize(out) + optimizer.clear_gradients() + + +.. py:method:: set_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float|Variable) - 需要设置的学习率的值。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.dygraph.nn.Linear(10, 10) + adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters()) + # 通过Python float数值手动设置学习率 + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + + + # 通过 框架的Variable 设置学习率 + lr_var = fluid.layers.create_global_var(shape=[1], value=0.7, dtype='float32') + adam.set_lr(lr_var) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.7 + + + +.. py:method:: current_step_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。 + +返回:当前步骤的学习率。 + +返回类型:float + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # example1: LearningRateDecay is not used, return value is all the same + with fluid.dygraph.guard(): + emb = fluid.dygraph.Embedding([10, 10]) + adam = fluid.optimizer.Adam(0.001, parameter_list = emb.parameters()) + lr = adam.current_step_lr() + print(lr) # 0.001 + + # example2: PiecewiseDecay is used, return the step learning rate + with fluid.dygraph.guard(): + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = fluid.dygraph.nn.Linear(10, 10) + inp = fluid.dygraph.to_variable(inp) + out = linear(inp) + loss = fluid.layers.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + adam = fluid.optimizer.Adam(fluid.dygraph.PiecewiseDecay(bd, value, 0), + parameter_list=linear.parameters()) + + # first step: learning rate is 0.2 + np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.minimize(loss) + lr = adam.current_step_lr() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True + diff --git a/doc/paddle/api/paddle/fluid/optimizer/Momentum_cn.rst b/doc/paddle/api/paddle/fluid/optimizer/Momentum_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..80e940e19f2fbc1a92e60f20fe32a3200c8ba94b --- /dev/null +++ b/doc/paddle/api/paddle/fluid/optimizer/Momentum_cn.rst @@ -0,0 +1,14 @@ +.. _cn_api_fluid_optimizer_Momentum: + +Momentum +------------------------------- + +.. py:attribute:: paddle.fluid.optimizer.Momentum + + + + +``MomentumOptimizer`` 的别名 + + + diff --git a/doc/paddle/api/paddle/fluid/optimizer/SGDOptimizer_cn.rst b/doc/paddle/api/paddle/fluid/optimizer/SGDOptimizer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..582c590bb04393acd13add208c5b75b2032d0167 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/optimizer/SGDOptimizer_cn.rst @@ -0,0 +1,221 @@ +.. _cn_api_fluid_optimizer_SGDOptimizer: + +SGDOptimizer +------------------------------- + +.. py:class:: paddle.fluid.optimizer.SGDOptimizer(learning_rate, parameter_list=None, regularization=None, grad_clip=None, name=None) + + + + +该接口实现随机梯度下降算法的优化器 + +.. math:: + \\param\_out=param-learning\_rate*grad\\ + + +参数: + - **learning_rate** (float|Variable) - 用于更新参数的学习率。可以是浮点值,也可以是具有一个浮点值作为数据元素的变量。 + - **parameter_list** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **regularization** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 + - **name** (str, 可选) - 可选的名称前缀,一般无需设置,默认值为None。 + + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + place = fluid.CPUPlace() + main = fluid.Program() + with fluid.program_guard(main): + x = fluid.layers.data(name='x', shape=[13], dtype='float32') + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y_predict = fluid.layers.fc(input=x, size=1, act=None) + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) + sgd_optimizer.minimize(avg_cost) + + fetch_list = [avg_cost] + train_reader = paddle.batch( + paddle.dataset.uci_housing.train(), batch_size=1) + feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + for data in train_reader(): + exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) + + + +.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None) + +为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameter_list中的Parameters,最小化网络损失值loss。 + +参数: + - **loss** (Variable) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 +返回类型: tuple + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + place = fluid.CPUPlace() + main = fluid.Program() + with fluid.program_guard(main): + x = fluid.layers.data(name='x', shape=[13], dtype='float32') + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y_predict = fluid.layers.fc(input=x, size=1, act=None) + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) + sgd_optimizer.minimize(avg_cost) + + fetch_list = [avg_cost] + train_reader = paddle.batch( + paddle.dataset.uci_housing.train(), batch_size=1) + feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + for data in train_reader(): + exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) + + + +.. py:method:: clear_gradients() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +清除需要优化的参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + with fluid.dygraph.guard(): + value = np.arange(26).reshape(2, 13).astype("float32") + a = fluid.dygraph.to_variable(value) + linear = fluid.Linear(13, 5, dtype="float32") + optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.01, + parameter_list=linear.parameters()) + out = linear(a) + out.backward() + optimizer.minimize(out) + optimizer.clear_gradients() + +.. py:method:: set_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float|Variable) - 需要设置的学习率的值。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.dygraph.nn.Linear(10, 10) + adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters()) + # 通过Python float数值手动设置学习率 + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + + + # 通过 框架的Variable 设置学习率 + lr_var = fluid.layers.create_global_var(shape=[1], value=0.7, dtype='float32') + adam.set_lr(lr_var) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.7 + + +.. py:method:: current_step_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。 + +返回:当前步骤的学习率。 + +返回类型:float + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # example1: LearningRateDecay is not used, return value is all the same + with fluid.dygraph.guard(): + emb = fluid.dygraph.Embedding([10, 10]) + adam = fluid.optimizer.Adam(0.001, parameter_list = emb.parameters()) + lr = adam.current_step_lr() + print(lr) # 0.001 + + # example2: PiecewiseDecay is used, return the step learning rate + with fluid.dygraph.guard(): + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = fluid.dygraph.nn.Linear(10, 10) + inp = fluid.dygraph.to_variable(inp) + out = linear(inp) + loss = fluid.layers.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + adam = fluid.optimizer.Adam(fluid.dygraph.PiecewiseDecay(bd, value, 0), + parameter_list=linear.parameters()) + + # first step: learning rate is 0.2 + np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.minimize(loss) + lr = adam.current_step_lr() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True + diff --git a/doc/paddle/api/paddle/fluid/optimizer/SGD_cn.rst b/doc/paddle/api/paddle/fluid/optimizer/SGD_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8810f79d267ab312ae682332240e047ad10771e6 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/optimizer/SGD_cn.rst @@ -0,0 +1,17 @@ +.. _cn_api_fluid_optimizer_SGD: + +SGD +------------------------------- + +.. py:attribute:: paddle.fluid.optimizer.SGD + + + + +``SGDOptimizer`` 的别名 + + + + + + diff --git a/doc/paddle/api/paddle/fluid/parallel_executor/ParallelExecutor_cn.rst b/doc/paddle/api/paddle/fluid/parallel_executor/ParallelExecutor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..577423467876e6e325240d98b3ad47df4430df54 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/parallel_executor/ParallelExecutor_cn.rst @@ -0,0 +1,195 @@ +.. _cn_api_fluid_ParallelExecutor: + +ParallelExecutor +------------------------------- + + +.. py:class:: paddle.fluid.ParallelExecutor(use_cuda, loss_name=None, main_program=None, share_vars_from=None, exec_strategy=None, build_strategy=None, num_trainers=1, trainer_id=0, scope=None) + + + + +``ParallelExecutor`` 是 ``Executor`` 的一个升级版本,可以支持基于数据并行的多节点模型训练和测试。如果采用数据并行模式, ``ParallelExecutor`` 在构造时会将参数分发到不同的节点上,并将输入的 ``Program`` 拷贝到不同的节点,在执行过程中,各个节点独立运行模型,将模型反向计算得到的参数梯度在多个节点之间进行聚合,之后各个节点独立的进行参数的更新。如果使用GPU运行模型,即 ``use_cuda=True`` ,节点指代GPU, ``ParallelExecutor`` 将自动获取在当前机器上可用的GPU资源,用户也可以通过在环境变量设置可用的GPU资源,例如:希望使用GPU0、GPU1计算,export CUDA_VISIBLEDEVICES=0,1;如果在CPU上进行操作,即 ``use_cuda=False`` ,节点指代CPU,**注意:此时需要用户在环境变量中手动添加 CPU_NUM ,并将该值设置为CPU设备的个数,例如:export CPU_NUM=4,如果没有设置该环境变量,执行器会在环境变量中添加该变量,并将其值设为1**。 + +参数: + - **use_cuda** (bool) – 该参数表示是否使用GPU执行。 + - **loss_name** (str) - 该参数为模型最后得到的损失变量的名字。**注意:如果是数据并行模型训练,必须设置loss_name,否则计算结果可能会有问题。** 默认为:None。 + - **main_program** (Program) – 需要被执行的Program 。如果未提供该参数,即该参数为None,在该接口内,main_program将被设置为fluid.default_main_program()。 默认为:None。 + - **share_vars_from** (ParallelExecutor) - 如果设置了share_vars_from,当前的ParallelExecutor将与share_vars_from指定的ParallelExecutor共享参数值。需要设置该参数的情况:模型训练过程中需要进行模型测试,并且训练和测试都是采用数据并行模式,那么测试对应的ParallelExecutor在调用with_data_parallel时,需要将share_vars_from设置为训练所对应的ParallelExecutor。由于ParallelExecutor只有在第一次执行时才会将参数变量分发到其他设备上,因此share_vars_from指定的ParallelExecutor必须在当前ParallelExecutor之前运行。默认为:None。 + - **exec_strategy** (ExecutionStrategy) - 通过exec_strategy指定执行计算图过程可以调整的选项,例如线程池大小等。 关于exec_strategy更多信息,请参阅 ``fluid.ExecutionStrategy`` 。 默认为:None。 + - **build_strategy** (BuildStrategy): 通过配置build_strategy,对计算图进行转换和优化,例如:计算图中算子融合、计算图执行过程中开启内存/显存优化等。关于build_strategy更多的信息,请参阅 ``fluid.BuildStrategy`` 。 默认为:None。 + - **num_trainers** (int) – 进行GPU分布式训练时需要设置该参数。如果该参数值大于1,NCCL将会通过多层级节点的方式来初始化。每个节点应有相同的GPU数目。默认为:1。 + - **trainer_id** (int) – 进行GPU分布式训练时需要设置该参数。该参数必须与num_trainers参数同时使用。trainer_id指明是当前所在节点的 “rank”(层级)。trainer_id从0开始计数。默认为:0。 + - **scope** (Scope) – 指定执行Program所在的作用域。默认为:fluid.global_scope()。 + +返回:初始化后的 ``ParallelExecutor`` 对象 + +返回类型:ParallelExecutor + +抛出异常:``TypeError`` + - 如果提供的参数 ``share_vars_from`` 不是 ``ParallelExecutor`` 类型的,将会抛出此异常。 + +.. note:: + 1. 如果只是进行多卡测试,不需要设置loss_name以及share_vars_from。 + 2. 如果程序中既有模型训练又有模型测试,则构建模型测试所对应的ParallelExecutor时必须设置share_vars_from,否则模型测试和模型训练所使用的参数是不一致。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + import os + + use_cuda = True + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + # 注意:如果你使用CPU运行程序,需要具体设置CPU_NUM, + # 否则fluid会把逻辑核的所有数目设为CPU_NUM, + # 在这种情况下,输入的batch size应大于CPU_NUM, + # 否则程序会异常中断。 + if not use_cuda: + os.environ['CPU_NUM'] = str(2) + + exe = fluid.Executor(place) + + train_program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(train_program, startup_program): + data = fluid.layers.data(name='X', shape=[1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + test_program = fluid.default_main_program().clone(for_test=True) + fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) + + exe.run(startup_program) + + train_exe = fluid.ParallelExecutor(use_cuda=use_cuda, + main_program=train_program, + loss_name=loss.name) + # 注意:如果此处不设置share_vars_from=train_exe,测试过程中用的参数与训练使用的参数是不一致 + test_exe = fluid.ParallelExecutor(use_cuda=use_cuda, + main_program=test_program, + share_vars_from=train_exe) + + train_data = numpy.random.random(size=(10, 1)).astype('float32') + loss_data, = train_exe.run(feed={"X": train_data}, + fetch_list=[loss.name]) + + test_data = numpy.random.random(size=(10, 1)).astype('float32') + loss_data, = test_exe.run(feed={"X": test_data}, + fetch_list=[loss.name]) + +.. py:method:: run(fetch_list, feed=None, feed_dict=None, return_numpy=True) + +该接口用于运行当前模型,需要注意的是,执行器会执行Program中的所有算子,而不会根据fetch_list对Program中的算子进行裁剪。 + +参数: + - **fetch_list** (list) – 该变量表示模型运行之后需要返回的变量。 + - **feed** (list|dict) – 该变量表示模型的输入变量。如果该参数类型为 ``dict`` ,feed中的数据将会被分割(split)并分送给多个设备(CPU/GPU);如果该参数类型为 ``list`` ,则列表中的各个元素都会直接分别被拷贝到各设备中。默认为:None。 + - **feed_dict** – 该参数已经停止使用。默认为:None。 + - **return_numpy** (bool) – 该变量表示是否将fetched tensor转换为numpy。默认为:True。 + +返回:返回fetch_list中指定的变量值 + +返回类型:List + +抛出异常: + - ``ValueError`` - 如果feed参数是list类型,但是它的长度不等于可用设备(执行场所)的数目,再或者给定的feed不是dict类型,抛出此异常 + - ``TypeError`` - 如果feed参数是list类型,但是它里面的元素不是dict类型时,抛出此异常 + +.. note:: + 1. 如果feed参数为dict类型,输入数据将被均匀分配到不同的卡上,例如:使用2块GPU训练,输入样本数为3,即[0, 1, 2],经过拆分之后,GPU0上的样本数为1,即[0],GPU1上的样本数为2,即[1, 2]。如果样本数少于设备数,程序会报错,因此运行模型时,应额外注意数据集的最后一个batch的样本数是否少于当前可用的CPU核数或GPU卡数,如果是少于,建议丢弃该batch。 + 2. 如果可用的CPU核数或GPU卡数大于1,则fetch出来的结果为不同设备上的相同变量值(fetch_list中的变量)在第0维拼接在一起。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + import os + + use_cuda = True + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + # 注意:如果你使用CPU运行程序,需要具体设置CPU_NUM, + # 否则fluid会把逻辑核的所有数目设为CPU_NUM, + # 在这种情况下,输入的batch size应大于CPU_NUM, + # 否则程序会异常中断。 + if not use_cuda: + os.environ['CPU_NUM'] = str(2) + exe = fluid.Executor(place) + + train_program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(train_program, startup_program): + data = fluid.layers.data(name='X', shape=[1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) + + exe.run(startup_program) + + train_exe = fluid.ParallelExecutor(use_cuda=use_cuda, + main_program=train_program, + loss_name=loss.name) + # 如果feed参数是dict类型: + # 图像会被split到设备中。假设有两个设备,那么每个设备将会处理形为 (5, 1)的图像 + x = numpy.random.random(size=(10, 1)).astype('float32') + loss_data, = train_exe.run(feed={"X": x}, + fetch_list=[loss.name]) + + # 如果feed参数是list类型: + # 各设备挨个处理列表中的每个元素 + # 第一个设备处理形为 (10, 1) 的图像 + # 第二个设备处理形为 (9, 1) 的图像 + # + # 使用 exe.device_count 得到设备数目 + x1 = numpy.random.random(size=(10, 1)).astype('float32') + x2 = numpy.random.random(size=(9, 1)).astype('float32') + loss_data, = train_exe.run(feed=[{"X": x1}, {"X": x2}], + fetch_list=[loss.name]) + +.. py:method:: drop_local_exe_scopes() + +立即清除scope中的临时变量。模型运行过程中,生成的中间临时变量将被放到local execution scope中,为了避免对临时变量频繁的申请与释放,ParallelExecutor中采取的策略是间隔若干次迭代之后清理一次临时变量。ParallelExecutor在ExecutionStrategy中提供了num_iteration_per_drop_scope选项,该选项表示间隔多少次迭代之后清理一次临时变量。如果num_iteration_per_drop_scope值为100,但是希望在迭代50次之后清理一次临时变量,可以通过手动调用该接口。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + import os + + use_cuda = True + # 注意:如果你使用CPU运行程序,需要具体设置CPU_NUM, + # 否则fluid会把逻辑核的所有数目设为CPU_NUM, + # 在这种情况下,输入的batch size应大于CPU_NUM, + # 否则程序会异常中断。 + if not use_cuda: + os.environ['CPU_NUM'] = str(2) + + train_program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(train_program, startup_program): + data = fluid.layers.data(name='X', shape=[1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(startup_program) + + parallel_exe = fluid.ParallelExecutor(use_cuda=use_cuda, + main_program=train_program, + loss_name=loss.name) + + x = numpy.random.random(size=(10, 1)).astype('float32') + loss_data, = parallel_exe.run(feed={"X": x}, + fetch_list=[loss.name]) + + parallel_exe.drop_local_exe_scopes() diff --git a/doc/paddle/api/paddle/fluid/param_attr/ParamAttr_cn.rst b/doc/paddle/api/paddle/fluid/param_attr/ParamAttr_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..641ed94ab2d0b52e9ef2de4cdd783f61e5df672f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/param_attr/ParamAttr_cn.rst @@ -0,0 +1,47 @@ +.. _cn_api_fluid_ParamAttr: + + +ParamAttr +------------------------------- + + +.. py:class:: paddle.fluid.ParamAttr(name=None, initializer=None, learning_rate=1.0, regularizer=None, trainable=True, do_model_average=False) + + + + +.. note:: + 该类中的 ``gradient_clip`` 属性在2.0版本会废弃,推荐在初始化 ``optimizer`` 时设置梯度裁剪。共有三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 + :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + +创建一个参数属性对象,用户可设置参数的名称、初始化方式、学习率、正则化规则、是否需要训练、梯度裁剪方式、是否做模型平均等属性。 + +参数: + - **name** (str,可选) - 参数的名称。默认值为None,表示框架自动创建参数的名称。 + - **initializer** (Initializer,可选) - 参数的初始化方式。默认值为None,表示权重参数采用Xavier初始化方式,偏置参数采用全0初始化方式。 + - **learning_rate** (float) - 参数的学习率。实际参数的学习率等于全局学习率乘以参数的学习率,再乘以learning rate schedule的系数。 + - **regularizer** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` ,如果在 ``optimizer`` (例如 :ref:`cn_api_fluid_optimizer_SGDOptimizer` ) 中也 + 设置了正则化,``optimizer`` 中的正则化将被忽略。默认值为None,表示没有正则化。 + - **trainable** (bool) - 参数是否需要训练。默认值为True,表示需要训练。 + - **do_model_average** (bool) - 是否做模型平均。默认值为False,表示不做模型平均。 + +返回: 表示参数属性的对象。 + +返回类型: ParamAttr + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + w_param_attrs = fluid.ParamAttr(name="fc_weight", + learning_rate=0.5, + regularizer=fluid.regularizer.L2Decay(1.0), + trainable=True) + print(w_param_attrs.name) # "fc_weight" + x = fluid.layers.data(name='X', shape=[1], dtype='float32') + y_predict = fluid.layers.fc(input=x, size=10, param_attr=w_param_attrs) + + diff --git a/doc/paddle/api/paddle/fluid/param_attr/WeightNormParamAttr_cn.rst b/doc/paddle/api/paddle/fluid/param_attr/WeightNormParamAttr_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9b50263ff208c7cd516ba88759f6ebc245e8987f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/param_attr/WeightNormParamAttr_cn.rst @@ -0,0 +1,48 @@ +.. _cn_api_fluid_WeightNormParamAttr: + +WeightNormParamAttr +------------------------------- + + +.. py:class:: paddle.fluid.WeightNormParamAttr(dim=None, name=None, initializer=None, learning_rate=1.0, regularizer=None, trainable=True, do_model_average=False) + + + + +.. note:: + 该类中的 ``gradient_clip`` 属性在2.0版本会废弃,推荐在初始化 ``optimizer`` 时设置梯度裁剪。共有三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 + :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + +该类定义了权重归一化(Weight Normalization)的参数。权重归一化可以将神经网络中权重向量的长度与其方向解耦,详细的定义与实现可以参考论文:`Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks `_ + +参数: + - **dim** (int) - 进行归一化操作(norm)的切片所在维度,是小于权重Tensor rank的非负数。比如卷积的权重shape是 :math:`[cout, cin, kh, kw]` , rank是4,则dim可以选0,1,2,3;fc的权重shape是 :math:`[cout, cin]` ,rank是2,dim可以选0,1。 dim 默认为None,如果为None就对所有元素做归一化(norm)。 + - **name** (None|str) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认为None。 + - **initializer** (Initializer) - 初始化参数方法,例如 ``initializer = fluid.initializer.ConstantInitializer(1.0)`` 。默认为None,如果为None则使用默认初始化函数 `Xavier()` 。 + - **learning_rate** (float32) - 学习率,优化过程 :math:`global\_lr∗parameter\_lr∗scheduler\_factor` 的学习速率,默认为1.0。 + - **regularizer** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` ,如果在 ``optimizer`` (例如 :ref:`cn_api_fluid_optimizer_SGDOptimizer` ) 中也 + 设置了正则化,``optimizer`` 中的正则化将被忽略。默认值为None,表示没有正则化。 + - **trainable** (bool) - 可选,指明参数是否可训练,默认为True。 + - **do_model_average** (bool) - 可选,指明参数是否需要模型平均化操作(Model Average),默认为False。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.data(name="data", shape=[3, 32, 32], dtype="float32") + fc = fluid.layers.fc(input=data, + size=1000, + param_attr=fluid.WeightNormParamAttr( + dim=None, + name='weight_norm_param', + initializer=fluid.initializer.ConstantInitializer(1.0), + learning_rate=1.0, + regularizer=fluid.regularizer.L2DecayRegularizer(regularization_coeff=0.1), + trainable=True, + do_model_average=False)) + + + diff --git a/doc/paddle/api/paddle/fluid/profiler/cuda_profiler_cn.rst b/doc/paddle/api/paddle/fluid/profiler/cuda_profiler_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ff292887d1b39cf7d11cd06569a22f18ae8d1505 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/profiler/cuda_profiler_cn.rst @@ -0,0 +1,49 @@ +.. _cn_api_fluid_profiler_cuda_profiler: + +cuda_profiler +------------------------------- + +.. py:function:: paddle.fluid.profiler.cuda_profiler(output_file, output_mode=None, config=None) + + + + + +CUDA性能分析器。该分析器通过调用CUDA运行时编程接口,对CUDA程序进行性能分析,并将分析结果写入输出文件output_file。输出格式由output_mode参数控制,性能分析配置选项由config参数控制。得到输出文件后,用户可使用 `NVIDIA Visual Profiler `_ 工具来加载这个输出文件以获得可视化结果。 + + +参数: + - **output_file** (str) – 输出文件名称, 输出结果将会写入该文件。 + - **output_mode** (str,可选) – 输出格式,有两种可以选择,分别是 key-value 键值对格式'kvp' 和 逗号分割的格式'csv'(默认格式)。 + - **config** (list, 可选) – NVIDIA性能分析配置列表,默认值为None时会选择以下配置:['gpustarttimestamp', 'gpuendtimestamp', 'gridsize3d', 'threadblocksize', 'streamid', 'enableonstart 0', 'conckerneltrace']。上述每个配置的含义和更多配置选项,请参考 `Compute Command Line Profiler User Guide `_ 。 + +抛出异常: + - ``ValueError`` - 如果输出格式output_mode不是'kvp'、'csv'两者之一,会抛出异常。 + +返回: 无 + +**代码示例** + + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.profiler as profiler + import numpy as np + + epoc = 8 + dshape = [4, 3, 28, 28] + data = fluid.data(name='data', shape=[None, 3, 28, 28], dtype='float32') + conv = fluid.layers.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1]) + + place = fluid.CUDAPlace(0) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + output_file = 'cuda_profiler.txt' + with profiler.cuda_profiler(output_file, 'csv') as nvprof: + for i in range(epoc): + input = np.random.random(dshape).astype('float32') + exe.run(fluid.default_main_program(), feed={'data': input}) + + # 之后可以使用 NVIDIA Visual Profile 可视化结果 diff --git a/doc/paddle/api/paddle/fluid/profiler/profiler_cn.rst b/doc/paddle/api/paddle/fluid/profiler/profiler_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c3ae718c596611f11731ad4cef6b993f66bfaf62 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/profiler/profiler_cn.rst @@ -0,0 +1,77 @@ +.. _cn_api_fluid_profiler_profiler: + +profiler +------------------------------- + +.. py:function:: paddle.fluid.profiler.profiler(state, sorted_key=None, profile_path='/tmp/profile', tracer_option='Default') + + + + +通用性能分析器 。与 :ref:`cn_api_fluid_profiler_cuda_profiler` 不同,此分析器可用于分析CPU和GPU程序。 + +参数: + - **state** (str) – 性能分析状态, 取值为 'CPU' 或 'GPU' 或 'All'。'CPU'表示只分析CPU上的性能;'GPU'表示同时分析CPU和GPU上的性能;'All'表示除了同时分析CPU和GPU上的性能外,还将生成 `性能分析的时间轴信息 <../../advanced_usage/development/profiling/timeline_cn.html>`_ 。 + - **sorted_key** (str,可选) – 性能分析结果的打印顺序,取值为None、'call'、'total'、'max'、'min'、'ave'之一。默认值为None,表示按照第一次结束时间顺序打印;'call'表示按调用的数量进行排序;'total'表示按总执行时间排序;'max'表示按最大执行时间排序;'min'表示按最小执行时间排序;'ave'表示按平均执行时间排序。 + - **profile_path** (str,可选) – 如果性能分析状态为'All', 将生成的时间轴信息写入profile_path,默认输出文件为 ``/tmp/profile`` 。 + - **tracer_option** (str,可选) – 性能分析选项取值为 'Default' 或 'OpDetail' 或 'AllOpDetail', 此选项用于设置性能分析层次并打印不同层次的性能分析结果, `Default` 选项打印不同Op类型的性能分析结果, `OpDetail` 则会打印不同OP类型更详细的性能分析结果,比如compute和data transform。 `AllOpDetail` 和 `OpDetail` 类似,但是打印的是不同Op名字的性能分析结果。 + +抛出异常: + - ``ValueError`` – 如果state取值不在 ['CPU', 'GPU', 'All']中,或sorted_key取值不在 [None, 'calls', 'total', 'max', 'min', 'ave']中,则抛出异常。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.profiler as profiler + import numpy as np + + epoc = 8 + dshape = [4, 3, 28, 28] + data = fluid.layers.data(name='data', shape=[3, 28, 28], dtype='float32') + conv = fluid.layers.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1]) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + with profiler.profiler('CPU', 'total', '/tmp/profile') as prof: + for i in range(epoc): + input = np.random.random(dshape).astype('float32') + exe.run(fluid.default_main_program(), feed={'data': input}) + +**结果示例** + +.. code-block:: text + + #### sorted_key = 'total', 'calls', 'max', 'min', 'ave' 结果 #### + # 示例结果中,除了Sorted by number of xxx in descending order in the same thread 这句随着sorted_key变化而不同,其余均相同。 + # 原因是,示例结果中,上述5列都已经按从大到小排列了。 + -------------------------> Profiling Report <------------------------- + + Place: CPU + Time unit: ms + Sorted by total time in descending order in the same thread + #Sorted by number of calls in descending order in the same thread + #Sorted by number of max in descending order in the same thread + #Sorted by number of min in descending order in the same thread + #Sorted by number of avg in descending order in the same thread + + Event Calls Total Min. Max. Ave. Ratio. + thread0::conv2d 8 129.406 0.304303 127.076 16.1758 0.983319 + thread0::elementwise_add 8 2.11865 0.193486 0.525592 0.264832 0.016099 + thread0::feed 8 0.076649 0.006834 0.024616 0.00958112 0.000582432 + + #### sorted_key = None 结果 #### + # 示例结果中,是按照Op结束时间顺序打印,因此打印顺序为feed->conv2d->elementwise_add + -------------------------> Profiling Report <------------------------- + + Place: CPU + Time unit: ms + Sorted by event first end time in descending order in the same thread + + Event Calls Total Min. Max. Ave. Ratio. + thread0::feed 8 0.077419 0.006608 0.023349 0.00967738 0.00775934 + thread0::conv2d 8 7.93456 0.291385 5.63342 0.99182 0.795243 + thread0::elementwise_add 8 1.96555 0.191884 0.518004 0.245693 0.196998 diff --git a/doc/paddle/api/paddle/fluid/profiler/reset_profiler_cn.rst b/doc/paddle/api/paddle/fluid/profiler/reset_profiler_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..575f408a74a153885468fb0ef527972273f1d049 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/profiler/reset_profiler_cn.rst @@ -0,0 +1,23 @@ +.. _cn_api_fluid_profiler_reset_profiler: + +reset_profiler +------------------------------- + +.. py:function:: paddle.fluid.profiler.reset_profiler() + + + + +清除之前的性能分析记录。此接口不能和 :ref:`cn_api_fluid_profiler_cuda_profiler` 一起使用 ,但它可以和 :ref:`cn_api_fluid_profiler_start_profiler` 、:ref:`cn_api_fluid_profiler_stop_profiler` 和 :ref:`cn_api_fluid_profiler_profiler` 一起使用。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.profiler as profiler + with profiler.profiler('CPU', 'total', '/tmp/profile'): + for iter in range(10): + if iter == 2: + profiler.reset_profiler() + # ... diff --git a/doc/paddle/api/paddle/fluid/profiler/start_profiler_cn.rst b/doc/paddle/api/paddle/fluid/profiler/start_profiler_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0eaca4a57366e23575b8d4bf0cc9752f4e10ce4d --- /dev/null +++ b/doc/paddle/api/paddle/fluid/profiler/start_profiler_cn.rst @@ -0,0 +1,32 @@ +.. _cn_api_fluid_profiler_start_profiler: + +start_profiler +------------------------------- + +.. py:function:: paddle.fluid.profiler.start_profiler(state, tracer_option='Default') + + + + +激活使用性能分析器。除了 :ref:`cn_api_fluid_profiler_profiler` 外,用户还可以使用 :ref:`cn_api_fluid_profiler_start_profiler` 和 :ref:`cn_api_fluid_profiler_stop_profiler` 来激活和停止使用性能分析器。 + +参数: + - **state** (str) – 性能分析状态, 取值为 'CPU' 或 'GPU' 或 'All'。'CPU'表示只分析CPU上的性能;'GPU'表示同时分析CPU和GPU上的性能;'All'表示除了同时分析CPU和GPU上的性能外,还将生成性能分析的时间轴信息 :ref:`fluid_timeline` 。 + - **tracer_option** (str,可选) – 性能分析选项取值为 'Default' 或 'OpDetail' 或 'AllOpDetail', 此选项用于设置性能分析层次并打印不同层次的性能分析结果, `Default` 选项打印不同Op类型的性能分析结果, `OpDetail` 则会打印不同OP类型更详细的性能分析结果,比如compute和data transform。 `AllOpDetail` 和 `OpDetail` 类似,但是打印的是不同Op名字的性能分析结果。 + +抛出异常: + - ``ValueError`` – 如果state取值不在 ['CPU', 'GPU', 'All']中或者tracer_option取值不在['Default', 'OpDetail', 'AllOpDetail']中,则抛出异常 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.profiler as profiler + + profiler.start_profiler('GPU') + for iter in range(10): + if iter == 2: + profiler.reset_profiler() + # except each iteration + profiler.stop_profiler('total', '/tmp/profile') diff --git a/doc/paddle/api/paddle/fluid/profiler/stop_profiler_cn.rst b/doc/paddle/api/paddle/fluid/profiler/stop_profiler_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..85f2cbb116cc88d9adfcfb830d6b9f0d203292c5 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/profiler/stop_profiler_cn.rst @@ -0,0 +1,33 @@ +.. _cn_api_fluid_profiler_stop_profiler: + +stop_profiler +------------------------------- + +.. py:function:: paddle.fluid.profiler.stop_profiler(sorted_key=None, profile_path='/tmp/profile') + + + + +停止使用性能分析器。除了 :ref:`cn_api_fluid_profiler_profiler` 外,用户还可以使用 :ref:`cn_api_fluid_profiler_start_profiler` 和 :ref:`cn_api_fluid_profiler_stop_profiler` 来激活和停止使用性能分析器。 + +参数: + - **sorted_key** (str,可选) – 性能分析结果的打印顺序,取值为None、'call'、'total'、'max'、'min'、'ave'之一。默认值为None,表示按照第一次结束时间顺序打印;'call'表示按调用的数量进行排序;'total'表示按总执行时间排序;'max'表示按最大执行时间排序;'min'表示按最小执行时间排序;'ave'表示按平均执行时间排序。 + - **profile_path** (str,可选) – 如果性能分析状态为'All', 将生成的时间轴信息写入profile_path,默认输出文件为 ``/tmp/profile`` 。 + + +抛出异常: + - ``ValueError`` – 如果sorted_key取值不在 [None, 'calls', 'total', 'max', 'min', 'ave']中,则抛出异常。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.profiler as profiler + + profiler.start_profiler('GPU') + for iter in range(10): + if iter == 2: + profiler.reset_profiler() + # except each iteration + profiler.stop_profiler('total', '/tmp/profile') diff --git a/doc/paddle/api/paddle/fluid/reader/PyReader_cn.rst b/doc/paddle/api/paddle/fluid/reader/PyReader_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..10920cb264f87170a5394d4a4be70a74596c6c02 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/reader/PyReader_cn.rst @@ -0,0 +1,389 @@ +.. _cn_api_fluid_io_PyReader: + +PyReader +------------------------------- + +.. py:class:: paddle.fluid.io.PyReader(feed_list=None, capacity=None, use_double_buffer=True, iterable=True, return_list=False) + + + + + +在python中为数据输入创建一个reader对象。将使用python线程预取数据,并将其异步插入队列。当调用Executor.run(…)时,将自动提取队列中的数据。 + +参数: + - **feed_list** (list(Variable)|tuple(Variable)) - feed变量列表,由 ``fluid.layers.data()`` 创建。 + - **capacity** (int) - PyReader对象内部维护队列的容量大小。单位是batch数量。若reader读取速度较快,建议设置较大的capacity值。 + - **use_double_buffer** (bool) - 是否使用 ``double_buffer_reader`` 。若use_double_buffer=True,PyReader会异步地预读取下一个batch的数据,可加速数据读取过程,但同时会占用少量的CPU/GPU存储,即一个batch输入数据的存储空间。 + - **iterable** (bool) - 所创建的DataLoader对象是否可迭代。 + - **return_list** (bool) - 每个设备上的数据是否以list形式返回。仅在iterable = True模式下有效。若return_list = False,每个设备上的返回数据均是str -> LoDTensor的映射表,其中映射表的key是每个输入变量的名称。若return_list = True,则每个设备上的返回数据均是list(LoDTensor)。推荐在静态图模式下使用return_list = False,在动态图模式下使用return_list = True。 + + +返回: 被创建的reader对象 + +返回类型: reader (Reader) + + +**代码示例** + +1.如果iterable=False,则创建的PyReader对象几乎与 ``fluid.layers.py_reader()`` 相同。算子将被插入program中。用户应该在每个epoch之前调用 ``start()`` ,并在epoch结束时捕获 ``Executor.run()`` 抛出的 ``fluid.core.EOFException`` 。一旦捕获到异常,用户应该调用 ``reset()`` 手动重置reader。 + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + EPOCH_NUM = 3 + ITER_NUM = 5 + BATCH_SIZE = 3 + + def network(image, label): + # 用户定义网络,此处以softmax回归为例 + predict = fluid.layers.fc(input=image, size=10, act='softmax') + return fluid.layers.cross_entropy(input=predict, label=label) + + def reader_creator_random_image_and_label(height, width): + def reader(): + for i in range(ITER_NUM): + fake_image = np.random.uniform(low=0, + high=255, + size=[height, width]) + fake_label = np.ones([1]) + yield fake_image, fake_label + return reader + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + + reader = fluid.io.PyReader(feed_list=[image, label], + capacity=4, + iterable=False) + + user_defined_reader = reader_creator_random_image_and_label(784, 784) + reader.decorate_sample_list_generator( + paddle.batch(user_defined_reader, batch_size=BATCH_SIZE)) + + loss = network(image, label) + executor = fluid.Executor(fluid.CPUPlace()) + executor.run(fluid.default_startup_program()) + for i in range(EPOCH_NUM): + reader.start() + while True: + try: + executor.run(feed=None) + except fluid.core.EOFException: + reader.reset() + break + + +2.如果iterable=True,则创建的PyReader对象与程序分离。程序中不会插入任何算子。在本例中,创建的reader是一个python生成器,它是可迭代的。用户应将从PyReader对象生成的数据输入 ``Executor.run(feed=...)`` 。 + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + EPOCH_NUM = 3 + ITER_NUM = 5 + BATCH_SIZE = 10 + + def network(image, label): + # 用户定义网络,此处以softmax回归为例 + predict = fluid.layers.fc(input=image, size=10, act='softmax') + return fluid.layers.cross_entropy(input=predict, label=label) + + def reader_creator_random_image(height, width): + def reader(): + for i in range(ITER_NUM): + fake_image = np.random.uniform(low=0, high=255, size=[height, width]), + fake_label = np.ones([1]) + yield fake_image, fake_label + return reader + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True, return_list=False) + + user_defined_reader = reader_creator_random_image(784, 784) + reader.decorate_sample_list_generator( + paddle.batch(user_defined_reader, batch_size=BATCH_SIZE), + fluid.core.CPUPlace()) + loss = network(image, label) + executor = fluid.Executor(fluid.CPUPlace()) + executor.run(fluid.default_startup_program()) + + for _ in range(EPOCH_NUM): + for data in reader(): + executor.run(feed=data, fetch_list=[loss]) + +3. return_list=True,返回值将用list表示而非dict,通常用于动态图模式中。 + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + EPOCH_NUM = 3 + ITER_NUM = 5 + BATCH_SIZE = 10 + + def reader_creator_random_image(height, width): + def reader(): + for i in range(ITER_NUM): + yield np.random.uniform(low=0, high=255, size=[height, width]), \ + np.random.random_integers(low=0, high=9, size=[1]) + return reader + + place = fluid.CPUPlace() + with fluid.dygraph.guard(place): + py_reader = fluid.io.PyReader(capacity=2, return_list=True) + user_defined_reader = reader_creator_random_image(784, 784) + py_reader.decorate_sample_list_generator( + paddle.batch(user_defined_reader, batch_size=BATCH_SIZE), + place) + for image, label in py_reader(): + relu = fluid.layers.relu(image) + +.. py:method:: start() + +启动数据输入线程。只能在reader对象不可迭代时调用。 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + BATCH_SIZE = 10 + + def generator(): + for i in range(5): + yield np.random.uniform(low=0, high=255, size=[784, 784]), + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False) + reader.decorate_sample_list_generator( + paddle.batch(generator, batch_size=BATCH_SIZE)) + + executor = fluid.Executor(fluid.CPUPlace()) + executor.run(fluid.default_startup_program()) + for i in range(3): + reader.start() + while True: + try: + executor.run(feed=None) + except fluid.core.EOFException: + reader.reset() + break + +.. py:method:: reset() + +当 ``fluid.core.EOFException`` 抛出时重置reader对象。只能在reader对象不可迭代时调用。 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + BATCH_SIZE = 10 + + def generator(): + for i in range(5): + yield np.random.uniform(low=0, high=255, size=[784, 784]), + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False) + reader.decorate_sample_list_generator( + paddle.batch(generator, batch_size=BATCH_SIZE)) + + executor = fluid.Executor(fluid.CPUPlace()) + executor.run(fluid.default_startup_program()) + for i in range(3): + reader.start() + while True: + try: + executor.run(feed=None) + except fluid.core.EOFException: + reader.reset() + break + +.. py:method:: decorate_sample_generator(sample_generator, batch_size, drop_last=True, places=None) + +设置PyReader对象的数据源。 + +提供的 ``sample_generator`` 应该是一个python生成器,它生成的数据类型应为list(numpy.ndarray)。 + +当PyReader对象可迭代时,必须设置 ``places`` 。 + +如果所有的输入都没有LOD,这个方法比 ``decorate_sample_list_generator(paddle.batch(sample_generator, ...))`` 更快。 + +参数: + - **sample_generator** (generator) – Python生成器,yield 类型为list(numpy.ndarray) + - **batch_size** (int) – batch size,必须大于0 + - **drop_last** (bool) – 当样本数小于batch数量时,是否删除最后一个batch + - **places** (None|list(CUDAPlace)|list(CPUPlace)) – 位置列表。当PyReader可迭代时必须被提供 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + EPOCH_NUM = 3 + ITER_NUM = 15 + BATCH_SIZE = 3 + + def network(image, label): + # 用户定义网络,此处以softmax回归为例 + predict = fluid.layers.fc(input=image, size=10, act='softmax') + return fluid.layers.cross_entropy(input=predict, label=label) + + def random_image_and_label_generator(height, width): + def generator(): + for i in range(ITER_NUM): + fake_image = np.random.uniform(low=0, + high=255, + size=[height, width]) + fake_label = np.array([1]) + yield fake_image, fake_label + return generator + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) + + user_defined_generator = random_image_and_label_generator(784, 784) + reader.decorate_sample_generator(user_defined_generator, + batch_size=BATCH_SIZE, + places=[fluid.CPUPlace()]) + loss = network(image, label) + executor = fluid.Executor(fluid.CPUPlace()) + executor.run(fluid.default_startup_program()) + + for _ in range(EPOCH_NUM): + for data in reader(): + executor.run(feed=data, fetch_list=[loss]) + +.. py:method:: decorate_sample_list_generator(reader, places=None) + +设置PyReader对象的数据源。 + +提供的 ``reader`` 应该是一个python生成器,它生成列表(numpy.ndarray)类型的批处理数据。 + +当PyReader对象不可迭代时,必须设置 ``places`` 。 + +参数: + - **reader** (generator) – 返回列表(numpy.ndarray)类型的批处理数据的Python生成器 + - **places** (None|list(CUDAPlace)|list(CPUPlace)) – 位置列表。当PyReader可迭代时必须被提供 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + EPOCH_NUM = 3 + ITER_NUM = 15 + BATCH_SIZE = 3 + + def network(image, label): + # 用户定义网络,此处以softmax回归为例 + predict = fluid.layers.fc(input=image, size=10, act='softmax') + return fluid.layers.cross_entropy(input=predict, label=label) + + def random_image_and_label_generator(height, width): + def generator(): + for i in range(ITER_NUM): + fake_image = np.random.uniform(low=0, + high=255, + size=[height, width]) + fake_label = np.ones([1]) + yield fake_image, fake_label + return generator + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) + + user_defined_generator = random_image_and_label_generator(784, 784) + reader.decorate_sample_list_generator( + paddle.batch(user_defined_generator, batch_size=BATCH_SIZE), + fluid.core.CPUPlace()) + loss = network(image, label) + executor = fluid.Executor(fluid.core.CPUPlace()) + executor.run(fluid.default_startup_program()) + + for _ in range(EPOCH_NUM): + for data in reader(): + executor.run(feed=data, fetch_list=[loss]) + +.. py:method:: decorate_batch_generator(reader, places=None) + +设置PyReader对象的数据源。 + +提供的 ``reader`` 应该是一个python生成器,它生成列表(numpy.ndarray)类型或LoDTensor类型的批处理数据。 + +当PyReader对象不可迭代时,必须设置 ``places`` 。 + +参数: + - **reader** (generator) – 返回LoDTensor类型的批处理数据的Python生成器 + - **places** (None|list(CUDAPlace)|list(CPUPlace)) – 位置列表。当PyReader可迭代时必须被提供 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + EPOCH_NUM = 3 + ITER_NUM = 15 + BATCH_SIZE = 3 + + def network(image, label): + # 用户定义网络,此处以softmax回归为例 + predict = fluid.layers.fc(input=image, size=10, act='softmax') + return fluid.layers.cross_entropy(input=predict, label=label) + + def random_image_and_label_generator(height, width): + def generator(): + for i in range(ITER_NUM): + batch_image = np.random.uniform(low=0, + high=255, + size=[BATCH_SIZE, height, width]) + batch_label = np.ones([BATCH_SIZE, 1]) + batch_image = batch_image.astype('float32') + batch_label = batch_label.astype('int64') + yield batch_image, batch_label + return generator + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) + + user_defined_generator = random_image_and_label_generator(784, 784) + reader.decorate_batch_generator(user_defined_generator, fluid.CPUPlace()) + + loss = network(image, label) + executor = fluid.Executor(fluid.CPUPlace()) + executor.run(fluid.default_startup_program()) + + for _ in range(EPOCH_NUM): + for data in reader(): + executor.run(feed=data, fetch_list=[loss]) + + +.. py:method:: next() + +获取下一个数据。用户不应直接调用此方法。此方法用于PaddlePaddle框架内部实现Python 2.x的迭代器协议。 diff --git a/doc/paddle/api/paddle/fluid/regularizer/L1DecayRegularizer_cn.rst b/doc/paddle/api/paddle/fluid/regularizer/L1DecayRegularizer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..90ebd61ec981aaaf1889ed3f7b3a66949591f1b7 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/regularizer/L1DecayRegularizer_cn.rst @@ -0,0 +1,14 @@ +.. _cn_api_fluid_regularizer_L1DecayRegularizer: + +L1DecayRegularizer +------------------------------- + +.. py:class:: paddle.fluid.regularizer.L1DecayRegularizer(regularization_coeff=0.0) + + + + +**注意:paddle.fluid.regularizer.L1DecayRegularizer是paddle.fluid.regularizer.L1Decay的别名,推荐使用paddle.fluid.regularizer.L1Decay。** + +详见 :ref:`cn_api_fluid_regularizer_L1Decay` 接口的使用文档。 + diff --git a/doc/paddle/api/paddle/fluid/regularizer/L1Decay_cn.rst b/doc/paddle/api/paddle/fluid/regularizer/L1Decay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d5b0dc6f548ce0708004df7adcf06522886fdc17 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/regularizer/L1Decay_cn.rst @@ -0,0 +1,74 @@ + +.. _cn_api_fluid_regularizer_L1Decay: + +L1Decay +------------------------------- + +.. py:attribute:: paddle.fluid.regularizer.L1Decay(regularization_coeff=0.0) + + + + +L1Decay实现L1权重衰减正则化,用于模型训练,使得权重矩阵稀疏。 + +该类生成的实例对象,需要设置在 :ref:`cn_api_fluid_ParamAttr` 或者 ``optimizer`` +(例如 :ref:`cn_api_fluid_optimizer_SGDOptimizer` )中,在 ``ParamAttr`` 中设置时, +只对该网络层中的参数生效;在 ``optimizer`` 中设置时,会对所有的参数生效;如果同时设置, +在 ``ParamAttr`` 中设置的优先级会高于在 ``optimizer`` 中设置。 + +具体实现中,L1权重衰减正则化的计算公式如下: + +.. math:: + \\L1WeightDecay=reg\_coeff∗sign(parameter)\\ + +参数: + - **regularization_coeff** (float) – L1正则化系数,默认值为0.0。 + +**代码示例1** + +.. code-block:: python + + import paddle.fluid as fluid + + main_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard(main_prog, startup_prog): + data = fluid.layers.data(name='image', shape=[3, 28, 28], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + hidden = fluid.layers.fc(input=data, size=128, act='relu') + prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + avg_loss = fluid.layers.mean(loss) + optimizer = fluid.optimizer.Adagrad( + learning_rate=1e-4, + regularization=fluid.regularizer.L1Decay( + regularization_coeff=0.1)) + optimizer.minimize(avg_loss) + + +**代码示例2** + +.. code-block:: python + + # 在 ParamAttr 和 optimizer 中同时设置正则化 + import paddle.fluid as fluid + l1 = fluid.regularizer.L1Decay(regularization_coeff=0.1) + l2 = fluid.regularizer.L2Decay(regularization_coeff=0.1) + x = fluid.layers.uniform_random([3,4]) + + # 在ParamAttr中设置L1正则化 + w_param = fluid.ParamAttr(regularizer=l1) + hidden1 = fluid.layers.fc(x, 8, param_attr=w_param) # fc_0.w_0(L1), fc_0.b_0 + hidden2 = fluid.layers.fc(hidden1, 16, param_attr=w_param) # fc_1.w_0(L1), fc_1.b_0 + predict = fluid.layers.fc(hidden2, 32) # fc_3.w_0, fc_3.b_0 + avg_loss = fluid.layers.mean(predict) + + # 在optimizer中设置L2正则化 + optimizer = fluid.optimizer.SGD(learning_rate=1e-4, regularization=l2) + optimizer.minimize(avg_loss) + + # 将会打印出提示信息: + # Regularization of [fc_0.w_0, fc_1.w_0] have been set by ParamAttr or WeightNormParamAttr already. + # So, the Regularization of Optimizer will not take effect for these parameters! + + diff --git a/doc/paddle/api/paddle/fluid/regularizer/L2DecayRegularizer_cn.rst b/doc/paddle/api/paddle/fluid/regularizer/L2DecayRegularizer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6e72e53138d365fee1af9b07cb8bce36147999fe --- /dev/null +++ b/doc/paddle/api/paddle/fluid/regularizer/L2DecayRegularizer_cn.rst @@ -0,0 +1,15 @@ +.. _cn_api_fluid_regularizer_L2DecayRegularizer: + +L2DecayRegularizer +------------------------------- + +.. py:class:: paddle.fluid.regularizer.L2DecayRegularizer(regularization_coeff=0.0) + + + + +**注意:paddle.fluid.regularizer.L2DecayRegularizer是paddle.fluid.regularizer.L2Decay的别名,推荐使用paddle.fluid.regularizer.L2Decay。** + +详见 :ref:`cn_api_fluid_regularizer_L2Decay` 接口的使用文档。 + + diff --git a/doc/paddle/api/paddle/fluid/regularizer/L2Decay_cn.rst b/doc/paddle/api/paddle/fluid/regularizer/L2Decay_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d9c55c53f8c838965b7e5e588d9de4a2a6cce5d4 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/regularizer/L2Decay_cn.rst @@ -0,0 +1,72 @@ +.. _cn_api_fluid_regularizer_L2Decay: + +L2Decay +------------------------------- + +.. py:attribute:: paddle.fluid.regularizer.L2Decay + + + + +L2Decay实现L2权重衰减正则化,用于模型训练,有助于防止模型对训练数据过拟合。 + +该类生成的实例对象,需要设置在 :ref:`cn_api_fluid_ParamAttr` 或者 ``optimizer`` +(例如 :ref:`cn_api_fluid_optimizer_SGDOptimizer` )中,在 ``ParamAttr`` 中设置时, +只对该网络层中的参数生效;在 ``optimizer`` 中设置时,会对所有的参数生效;如果同时设置, +在 ``ParamAttr`` 中设置的优先级会高于在 ``optimizer`` 中设置。 + +具体实现中,L2权重衰减正则化的计算公式如下: + +.. math:: + \\L2WeightDecay=reg\_coeff*parameter\\ + +参数: + - **regularization_coeff** (float) – 正则化系数,默认值为0.0。 + +**代码示例1** + +.. code-block:: python + + import paddle.fluid as fluid + + main_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard(main_prog, startup_prog): + data = fluid.layers.data(name='image', shape=[3, 28, 28], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + hidden = fluid.layers.fc(input=data, size=128, act='relu') + prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + avg_loss = fluid.layers.mean(loss) + optimizer = fluid.optimizer.Adagrad( + learning_rate=1e-4, + regularization=fluid.regularizer.L2Decay( + regularization_coeff=0.1)) + optimizer.minimize(avg_loss) + + +**代码示例2** + +.. code-block:: python + + # 在 ParamAttr 和 optimizer 中同时设置正则化 + import paddle.fluid as fluid + l1 = fluid.regularizer.L1Decay(regularization_coeff=0.1) + l2 = fluid.regularizer.L2Decay(regularization_coeff=0.1) + x = fluid.layers.uniform_random([3,4]) + + # 在ParamAttr中设置L1正则化 + w_param = fluid.ParamAttr(regularizer=l1) + hidden1 = fluid.layers.fc(x, 8, param_attr=w_param) # fc_0.w_0(L1), fc_0.b_0 + hidden2 = fluid.layers.fc(hidden1, 16, param_attr=w_param) # fc_1.w_0(L1), fc_1.b_0 + predict = fluid.layers.fc(hidden2, 32) # fc_3.w_0, fc_3.b_0 + avg_loss = fluid.layers.mean(predict) + + # 在optimizer中设置L2正则化 + optimizer = fluid.optimizer.SGD(learning_rate=1e-4, regularization=l2) + optimizer.minimize(avg_loss) + + # 将会打印出提示信息: + # Regularization of [fc_0.w_0, fc_1.w_0] have been set by ParamAttr or WeightNormParamAttr already. + # So, the Regularization of Optimizer will not take effect for these parameters! + diff --git a/doc/paddle/api/paddle/fluid/release_memory_cn.rst b/doc/paddle/api/paddle/fluid/release_memory_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c2e518b97e12170065772a1a7549031ec625db87 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/release_memory_cn.rst @@ -0,0 +1,12 @@ +.. _cn_api_fluid_transpiler_release_memory: + +release_memory +------------------------------- + + +.. py:function:: paddle.fluid.transpiler.release_memory(input_program, skip_opt_set=None) + + + + +**从1.6版本开始此接口不再推荐使用,请不要在新写的代码中使用它,1.6+版本已默认开启更优的存储优化策略** \ No newline at end of file diff --git a/doc/paddle/api/paddle/fluid/require_version_cn.rst b/doc/paddle/api/paddle/fluid/require_version_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..19f14cb37fe9adc927141d647e61ddfa843bc3d2 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/require_version_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_fluid_require_version: + +require_version +------------------------------- + +.. py:function:: paddle.fluid.require_version(min_version, max_version=None) + + + +该接口用于检查已安装的飞桨版本是否介于[``min_version``, ``max_version``]之间(包含 ``min_version`` 和 ``max_version`` ),如果已安装的版本低于 ``min_version`` 或者高于 ``max_version`` ,将会抛出异常。该接口无返回值。 + +参数: + - **min_version** (str) - 指定所需要的最低版本(如‘1.4.0’) + - **max_version** (str, optional) – 指定可接受的最高版本(如‘1.7.0’),默认值None,表示任意大于等于 ``min_version`` 的版本都可以接受。 + +返回:无 + +抛出异常: + + - ``TypeError`` – ``min_version`` 的类型不是str。 + - ``TypeError`` – ``max_version`` 的类型不是str或type(None)。 + - ``ValueError`` – ``min_version`` 的值不是正常的版本号格式。 + - ``ValueError`` – ``max_version`` 的值不是正常的版本号格式或None。 + - ``Exception`` – 已安装的版本低于 ``min_version`` 或者高于 ``max_version`` 。 + + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + + # 任何大于等于0.1.0的版本都可以接受 + fluid.require_version('0.1.0') + + # 只接受介于0.1.0和10.0.0之间的版本(包含0.1.0和10.0.0) + fluid.require_version(min_version='0.1.0', max_version='10.0.0') + diff --git a/doc/paddle/api/paddle/fluid/save_cn.rst b/doc/paddle/api/paddle/fluid/save_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f0276316bacd0d7b7cb7ef6df12b1f9ac08b759f --- /dev/null +++ b/doc/paddle/api/paddle/fluid/save_cn.rst @@ -0,0 +1,80 @@ +.. _cn_api_fluid_dygraph_jit_save: + +save +----------------- + +.. py:function:: paddle.fluid.dygraph.jit.save(layer, model_path, input_spec=None, configs=None) + +将输入的经过 ``@declarative`` 装饰的 :ref:`cn_api_fluid_dygraph_Layer` 存储为 :ref:`cn_api_fluid_dygraph_TranslatedLayer` 格式的模型, +载入后可用于预测推理或者fine-tune训练。 + +该接口将会将输入 :ref:`cn_api_fluid_dygraph_Layer` 转写后的模型结构 ``Program`` 和所有必要的持久参数变量存储至输入路径 ``model_path`` 中。 + +默认存储的 ``Program`` 文件名为 ``__model__``, 默认存储持久参数变量的文件名为 ``__variables__``, +同时会将变量的一些描述信息存储至文件 ``__variables.info__``,这些额外的信息将在fine-tune训练中使用。 + +存储的模型能够被以下API载入使用: + - :ref:`cn_api_fluid_dygraph_jit_load` + - :ref:`cn_api_fluid_io_load_inference_model` (需要配置参数 ``params_filename='__variables__'`` ) + - 其他预测库API + +参数: + - **layer** (Layer) - 需要存储的 :ref:`cn_api_fluid_dygraph_Layer` 对象。输入的 ``Layer`` 需要经过 ``@declarative`` 装饰。 + - **model_path** (str) - 存储模型的目录。 + - **input_spec** (list[Variable], 可选) - 描述存储模型的输入。此参数是传入当前存储的 ``TranslatedLayer`` forward方法的一个示例输入。如果为 ``None`` ,所有原 ``Layer`` forward方法的输入变量将都会被配置为存储模型的输入变量。默认为 ``None``。 + - **configs** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象。默认为 ``None``。 + +返回:无 + +**示例代码** + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + from paddle.fluid.dygraph import Linear + from paddle.fluid.dygraph import declarative + BATCH_SIZE = 32 + BATCH_NUM = 20 + def random_batch_reader(): + def _get_random_images_and_labels(image_shape, label_shape): + image = np.random.random(size=image_shape).astype('float32') + label = np.random.random(size=label_shape).astype('int64') + return image, label + def __reader__(): + for _ in range(BATCH_NUM): + batch_image, batch_label = _get_random_images_and_labels( + [BATCH_SIZE, 784], [BATCH_SIZE, 1]) + yield batch_image, batch_label + return __reader__ + class LinearNet(fluid.dygraph.Layer): + def __init__(self, in_size, out_size): + super(LinearNet, self).__init__() + self._linear = Linear(in_size, out_size) + @declarative + def forward(self, x): + return self._linear(x) + # 开启命令式编程模式 + fluid.enable_dygraph() + # 创建网络 + net = LinearNet(784, 1) + adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) + # 创建DataLoader + train_loader = fluid.io.DataLoader.from_generator(capacity=5) + train_loader.set_batch_generator(random_batch_reader()) + # 训练 + for data in train_loader(): + img, label = data + label.stop_gradient = True + cost = net(img) + loss = fluid.layers.cross_entropy(cost, label) + avg_loss = fluid.layers.mean(loss) + avg_loss.backward() + adam.minimize(avg_loss) + net.clear_gradients() + # 存储模型 + model_path = "linear.example.model" + fluid.dygraph.jit.save( + layer=net, + model_path=model_path, + input_spec=[img]) diff --git a/doc/paddle/api/paddle/fluid/set_flags_cn.rst b/doc/paddle/api/paddle/fluid/set_flags_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a4f6fe1cd02bd2a691059b6732a04e757ab0304e --- /dev/null +++ b/doc/paddle/api/paddle/fluid/set_flags_cn.rst @@ -0,0 +1,18 @@ +.. _cn_api_fluid_set_flags: + +set_flags +------------------------------- + +.. py:function:: paddle.fluid.set_flags(flags) +用于设置Paddle框架中环境变量FLAGS的值。 + +参数: + - **flags** (dict) - 包含想要设置的环境变量FLAGS的名称和值的字典。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + + fluid.set_flags({'FLAGS_eager_delete_tensor_gb': 1.0}) diff --git a/doc/paddle/api/paddle/fluid/transpiler/HashName_cn.rst b/doc/paddle/api/paddle/fluid/transpiler/HashName_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ad7fcc97bba0be7080215232f78c5ce864718580 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/transpiler/HashName_cn.rst @@ -0,0 +1,49 @@ +.. _cn_api_fluid_transpiler_HashName: + +HashName +------------------------------- + + +.. py:class:: paddle.fluid.transpiler.HashName(pserver_endpoints) + + + + +该方法使用 python ``Hash()`` 函数将变量散列到多个parameter server节点。 + +参数: + - **pserver_endpoints** (list) - endpoint (ip:port)的 list + +返回:实例化后的HashName的对象 + +返回类型:HashName + +**代码示例** + +.. code-block:: python + + import paddle.fluid.transpiler.HashName as HashName + + pserver_endpoints = [“127.0.0.1:6007”, “127.0.0.1:6008”] + vars = [“var1”,”var2”,”var3”,”var4”,”var5”] + rr = HashName(pserver_endpoints) + rr.dispatch(vars) + + +.. py:method:: reset() + +该方法将重置HashName内置的计数, 计数将重置为0。 + +返回:无。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid.transpiler.HashName as HashName + + pserver_endpoints = [“127.0.0.1:6007”, “127.0.0.1:6008”] + vars = [“var1”,”var2”,”var3”,”var4”,”var5”] + rr = HashName(pserver_endpoints) + rr.reset() + diff --git a/doc/paddle/api/paddle/fluid/unique_name/generate_cn.rst b/doc/paddle/api/paddle/fluid/unique_name/generate_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4eb8a68c09bf029f5363f04ff9461b5e5aa9ecff --- /dev/null +++ b/doc/paddle/api/paddle/fluid/unique_name/generate_cn.rst @@ -0,0 +1,29 @@ +.. _cn_api_fluid_unique_name_generate: + +generate +------------------------------- + +.. py:function:: paddle.fluid.unique_name.generate(key) + + + + +该接口产生以前缀key开头的唯一名称。目前,Paddle通过从0开始的编号对相同前缀key的名称进行区分。例如,使用key=fc连续调用该接口会产生fc_0, fc_1, fc_2等不同名称。 + +参数: + - **key** (str) - 产生的唯一名称的前缀。 + +返回:含前缀key的唯一名称。 + +返回类型:str。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + name1 = fluid.unique_name.generate('fc') + name2 = fluid.unique_name.generate('fc') + print(name1, name2) # fc_0, fc_1 + + diff --git a/doc/paddle/api/paddle/fluid/unique_name/guard_cn.rst b/doc/paddle/api/paddle/fluid/unique_name/guard_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7c457eb15118baf6823be9e404fc2fa053b6a2f6 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/unique_name/guard_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_fluid_unique_name_guard: + +guard +------------------------------- + +.. py:function:: paddle.fluid.unique_name.guard(new_generator=None) + + + + +该接口用于更改命名空间,与with语句一起使用。使用后,在with语句的上下文中使用新的命名空间,调用generate接口时相同前缀的名称将从0开始重新编号。 + +参数: + - **new_generator** (str|bytes, 可选) - 新命名空间的名称。请注意,Python2中的str在Python3中被区分为str和bytes两种,因此这里有两种类型。 缺省值为None,若不为None,new_generator将作为前缀添加到generate接口产生的唯一名称中。 + +返回: 无。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + with fluid.unique_name.guard(): + name_1 = fluid.unique_name.generate('fc') + with fluid.unique_name.guard(): + name_2 = fluid.unique_name.generate('fc') + print(name_1, name_2) # fc_0, fc_0 + + with fluid.unique_name.guard('A'): + name_1 = fluid.unique_name.generate('fc') + with fluid.unique_name.guard('B'): + name_2 = fluid.unique_name.generate('fc') + print(name_1, name_2) # Afc_0, Bfc_0 + + diff --git a/doc/paddle/api/paddle/fluid/unique_name/switch_cn.rst b/doc/paddle/api/paddle/fluid/unique_name/switch_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..93da7cc1dd97162fbc09b6ae811c2a8f0f11a958 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/unique_name/switch_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_fluid_unique_name_switch: + +switch +------------------------------- + +.. py:function:: paddle.fluid.unique_name.switch(new_generator=None) + + + + +该接口将当前上下文的命名空间切换到新的命名空间。该接口与guard接口都可用于更改命名空间,推荐使用guard接口,配合with语句管理命名空间上下文。 + +参数: + - **new_generator** (UniqueNameGenerator, 可选) - 要切换到的新命名空间,一般无需设置。缺省值为None,表示切换到一个匿名的新命名空间。 + +返回:先前的命名空间,一般无需操作该返回值。 + +返回类型:UniqueNameGenerator。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + name1 = fluid.unique_name.generate('fc') + name2 = fluid.unique_name.generate('fc') + print(name1, name2) # fc_0, fc_1 + + pre_generator = fluid.unique_name.switch() # 切换到新命名空间 + name2 = fluid.unique_name.generate('fc') + print(name2) # fc_0 + + fluid.unique_name.switch(pre_generator) # 切换回原命名空间 + name3 = fluid.unique_name.generate('fc') + print(name3) # fc_2, 因为原命名空间已生成fc_0, fc_1 diff --git a/doc/paddle/api/paddle/framework/CPUPlace_cn.rst b/doc/paddle/api/paddle/framework/CPUPlace_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e091352c9018b355e234f8407625199d51c48555 --- /dev/null +++ b/doc/paddle/api/paddle/framework/CPUPlace_cn.rst @@ -0,0 +1,20 @@ +.. _cn_api_fluid_CPUPlace: + +CPUPlace +------------------------------- + +.. py:class:: paddle.fluid.CPUPlace + + + + +``CPUPlace`` 是一个设备描述符,表示一个分配或将要分配 ``Tensor`` 或 ``LoDTensor`` 的 ``CPU`` 设备。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + cpu_place = fluid.CPUPlace() + + diff --git a/doc/paddle/api/paddle/framework/CUDAPinnedPlace_cn.rst b/doc/paddle/api/paddle/framework/CUDAPinnedPlace_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a3e669344b2bac46b8cb57d24bbc633bb3549be3 --- /dev/null +++ b/doc/paddle/api/paddle/framework/CUDAPinnedPlace_cn.rst @@ -0,0 +1,20 @@ +.. _cn_api_fluid_CUDAPinnedPlace: + +CUDAPinnedPlace +------------------------------- + +.. py:class:: paddle.fluid.CUDAPinnedPlace + + + + +``CUDAPinnedPlace`` 是一个设备描述符,它所指代的页锁定内存由 CUDA 函数 ``cudaHostAlloc()`` 在主机内存上分配,主机的操作系统将不会对这块内存进行分页和交换操作,可以通过直接内存访问技术访问,加速主机和 GPU 之间的数据拷贝。 +有关 CUDA 的数据转移和 ``pinned memory``,参见 `官方文档 `_ 。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + place = fluid.CUDAPinnedPlace() + diff --git a/doc/paddle/api/paddle/framework/CUDAPlace_cn.rst b/doc/paddle/api/paddle/framework/CUDAPlace_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ba7cf62280b52b17dc310c8d9c1a5a4ca2cc6feb --- /dev/null +++ b/doc/paddle/api/paddle/framework/CUDAPlace_cn.rst @@ -0,0 +1,33 @@ +.. _cn_api_fluid_CUDAPlace: + +CUDAPlace +------------------------------- + +.. py:class:: paddle.fluid.CUDAPlace + + + + +.. note:: + 多卡任务请先使用 FLAGS_selected_gpus 环境变量设置可见的GPU设备,下个版本将会修正 CUDA_VISIBLE_DEVICES 环境变量无效的问题。 + +``CUDAPlace`` 是一个设备描述符,表示一个分配或将要分配 ``Tensor`` 或 ``LoDTensor`` 的 GPU 设备。 +每个 ``CUDAPlace`` 有一个 ``dev_id`` (设备id)来表明当前的 ``CUDAPlace`` 所代表的显卡编号,编号从 0 开始。 +``dev_id`` 不同的 ``CUDAPlace`` 所对应的内存不可相互访问。 +这里编号指的是可见显卡的逻辑编号,而不是显卡实际的编号。 +可以通过 ``CUDA_VISIBLE_DEVICES`` 环境变量限制程序能够使用的 GPU 设备,程序启动时会遍历当前的可见设备,并从 0 开始为这些设备编号。 +如果没有设置 ``CUDA_VISIBLE_DEVICES``,则默认所有的设备都是可见的,此时逻辑编号与实际编号是相同的。 + +参数: + - **id** (int,可选) - GPU的设备ID。如果为 ``None``,则默认会使用 id 为 0 的设备。默认值为 ``None``。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + gpu_place = fluid.CUDAPlace(0) + + + + diff --git a/doc/paddle/api/paddle/framework/SaveLoadConfig_cn.rst b/doc/paddle/api/paddle/framework/SaveLoadConfig_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cbee1bab234be6f53f83061c52139093513d321b --- /dev/null +++ b/doc/paddle/api/paddle/framework/SaveLoadConfig_cn.rst @@ -0,0 +1,273 @@ +.. _cn_api_fluid_dygraph_jit_SaveLoadConfig: + +SaveLoadConfig +------------------------------- + +.. py:class:: paddle.fluid.dygraph.jit.SaveLoadConfig() + +用于配置接口 :ref:`cn_api_fluid_dygraph_jit_save` 和 :ref:`cn_api_fluid_dygraph_jit_load` 存储载入 :ref:`cn_api_fluid_dygraph_TranslatedLayer` 时的附加选项。 + +**示例代码:** + + 1. 在存储模型时使用 ``SaveLoadConfig`` + + .. code-block:: python + + import numpy as np + import paddle.fluid as fluid + from paddle.fluid.dygraph import Linear + from paddle.fluid.dygraph import declarative + class SimpleNet(fluid.dygraph.Layer): + def __init__(self, in_size, out_size): + super(SimpleNet, self).__init__() + self._linear = Linear(in_size, out_size) + @declarative + def forward(self, x): + y = self._linear(x) + z = self._linear(y) + return z + # 开启命令式编程模式 + fluid.enable_dygraph() + # 训练模型 + net = SimpleNet(8, 8) + adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) + x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) + for i in range(10): + out = net(x) + loss = fluid.layers.mean(out) + loss.backward() + adam.minimize(loss) + net.clear_gradients() + # 在存储模型时使用SaveLoadConfig + model_path = "simplenet.example.model" + configs = fluid.dygraph.jit.SaveLoadConfig() + configs.model_filename = "__simplenet__" + fluid.dygraph.jit.save( + layer=net, + model_path=model_path, + input_spec=[x], + configs=configs) + + 2. 在载入模型时使用 ``SaveLoadConfig`` + + .. code-block:: python + + import numpy as np + import paddle.fluid as fluid + # 开启命令式编程模式 + fluid.enable_dygraph() + # 在载入模型时使用SaveLoadconfig + model_path = "simplenet.example.model" + configs = fluid.dygraph.jit.SaveLoadConfig() + configs.model_filename = "__simplenet__" + infer_net = fluid.dygraph.jit.load(model_path, configs=configs) + # 预测 + x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) + pred = infer_net(x) + +属性 +:::::::::::: + +.. py:attribute:: output_spec + +选择保存模型( :ref:`cn_api_fluid_dygraph_TranslatedLayer` )的输出变量,通过指定的这些变量能够使模型仅计算特定的结果。 +默认情况下,原始 :ref:`cn_api_fluid_dygraph_Layer` 的forward方法的所有返回变量都将配置为存储后模型 :ref:`cn_api_fluid_dygraph_TranslatedLayer` 的输出变量。 + +``output_spec`` 属性类型需要是 ``list[Variable]``。如果输入的 ``output_spec`` 列表不是原始 :ref:`cn_api_fluid_dygraph_Layer` 的forward方法的所有返回变量, +将会依据输入的 ``output_spec`` 列表对存储的模型进行裁剪。 + +.. note:: + ``output_spec`` 属性仅在存储模型时使用。 + +**示例代码:** + .. code-block:: python + + import numpy as np + import paddle.fluid as fluid + from paddle.fluid.dygraph import Linear + from paddle.fluid.dygraph import declarative + class SimpleNet(fluid.dygraph.Layer): + def __init__(self, in_size, out_size): + super(SimpleNet, self).__init__() + self._linear = Linear(in_size, out_size) + @declarative + def forward(self, x): + y = self._linear(x) + z = self._linear(y) + loss = fluid.layers.mean(z) + return z, loss + # 开启命令式编程模式 + fluid.enable_dygraph() + # 训练模型 + net = SimpleNet(8, 8) + adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) + x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) + for i in range(10): + out, loss = net(x) + loss.backward() + adam.minimize(loss) + net.clear_gradients() + # 使用SaveLoadconfig.output_spec + model_path = "simplenet.example.model.output_spec" + configs = fluid.dygraph.jit.SaveLoadConfig() + # 仅在存储模型中保留预测结果,丢弃loss + configs.output_spec = [out] + fluid.dygraph.jit.save( + layer=net, + model_path=model_path, + input_spec=[x], + configs=configs) + infer_net = fluid.dygraph.jit.load(model_path, configs=configs) + x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) + # 仅有预测结果输出 + pred = infer_net(x) + + +.. py:attribute:: model_filename + +存储转写 :ref:`cn_api_fluid_dygraph_Layer` 模型结构 ``Program`` 的文件名称。默认文件名为 ``__model__``。 + +**示例代码** + .. code-block:: python + + import numpy as np + import paddle.fluid as fluid + from paddle.fluid.dygraph import Linear + from paddle.fluid.dygraph import declarative + class SimpleNet(fluid.dygraph.Layer): + def __init__(self, in_size, out_size): + super(SimpleNet, self).__init__() + self._linear = Linear(in_size, out_size) + @declarative + def forward(self, x): + y = self._linear(x) + z = self._linear(y) + return z + # 开启命令式编程模式 + fluid.enable_dygraph() + # 训练模型 + net = SimpleNet(8, 8) + adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) + x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) + for i in range(10): + out = net(x) + loss = fluid.layers.mean(out) + loss.backward() + adam.minimize(loss) + net.clear_gradients() + model_path = "simplenet.example.model.model_filename" + configs = fluid.dygraph.jit.SaveLoadConfig() + configs.model_filename = "__simplenet__" + # 配置configs.model_filename存储模型 + fluid.dygraph.jit.save( + layer=net, + model_path=model_path, + input_spec=[x], + configs=configs) + # [结果] 存储模型目录文件包括: + # __simplenet__ __variables__ __variables.info__ + # 配置configs.model_filename载入模型 + infer_net = fluid.dygraph.jit.load(model_path, configs=configs) + x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) + pred = infer_net(x) + + +.. py:attribute:: params_filename + +存储转写 :ref:`cn_api_fluid_dygraph_Layer` 所有持久参数(包括 ``Parameters`` 和持久的 ``Buffers``)的文件名称。默认文件名称为 ``__variable__``。 + +**示例代码** + .. code-block:: python + + import numpy as np + import paddle.fluid as fluid + from paddle.fluid.dygraph import Linear + from paddle.fluid.dygraph import declarative + class SimpleNet(fluid.dygraph.Layer): + def __init__(self, in_size, out_size): + super(SimpleNet, self).__init__() + self._linear = Linear(in_size, out_size) + @declarative + def forward(self, x): + y = self._linear(x) + z = self._linear(y) + return z + # 开启命令式编程模式 + fluid.enable_dygraph() + # 训练模型 + net = SimpleNet(8, 8) + adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) + x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) + for i in range(10): + out = net(x) + loss = fluid.layers.mean(out) + loss.backward() + adam.minimize(loss) + net.clear_gradients() + model_path = "simplenet.example.model.params_filename" + configs = fluid.dygraph.jit.SaveLoadConfig() + configs.params_filename = "__params__" + # 配置configs.params_filename存储模型 + fluid.dygraph.jit.save( + layer=net, + model_path=model_path, + input_spec=[x], + configs=configs) + # [结果] 存储模型目录文件包括: + # __model__ __params__ __variables.info__ + # 配置configs.params_filename载入模型 + infer_net = fluid.dygraph.jit.load(model_path, configs=configs) + x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) + pred = infer_net(x) + + +.. py:attribute:: separate_params + +配置是否将 :ref:`cn_api_fluid_dygraph_Layer` 的参数存储为分散的文件。 +(这是为了兼容接口 :ref:`cn_api_fluid_io_save_inference_model` 的行为) + +如果设置为 ``True`` ,每个参数将会被存储为一个文件,文件名为参数名,同时``SaveLoadConfig.params_filename`` 指定的文件名将不会生效。默认为 ``False``。 + +**示例代码** + .. code-block:: python + + import numpy as np + import paddle.fluid as fluid + from paddle.fluid.dygraph import Linear + from paddle.fluid.dygraph import declarative + class SimpleNet(fluid.dygraph.Layer): + def __init__(self, in_size, out_size): + super(SimpleNet, self).__init__() + self._linear = Linear(in_size, out_size) + @declarative + def forward(self, x): + y = self._linear(x) + z = self._linear(y) + return z + # 开启命令式编程模式 + fluid.enable_dygraph() + # 训练模型 + net = SimpleNet(8, 8) + adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) + x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) + for i in range(10): + out = net(x) + loss = fluid.layers.mean(out) + loss.backward() + adam.minimize(loss) + net.clear_gradients() + model_path = "simplenet.example.model.separate_params" + configs = fluid.dygraph.jit.SaveLoadConfig() + configs.separate_params = True + # 配置configs.separate_params存储模型 + fluid.dygraph.jit.save( + layer=net, + model_path=model_path, + input_spec=[x], + configs=configs) + # [结果] 存储模型目录文件包括: + # linear_0.b_0 linear_0.w_0 __model__ __variables.info__ + # 配置configs.params_filename载入模型 + infer_net = fluid.dygraph.jit.load(model_path, configs=configs) + x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) + pred = infer_net(x) diff --git a/doc/paddle/api/paddle/framework/get_default_dtype_cn.rst b/doc/paddle/api/paddle/framework/get_default_dtype_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cedfe95fb87a30c2038e33484298affc4439c047 --- /dev/null +++ b/doc/paddle/api/paddle/framework/get_default_dtype_cn.rst @@ -0,0 +1,23 @@ +.. _cn_api_paddle_framework_get_default_dtype: + +get_default_dtype +------------------------------- + +.. py:function:: paddle.get_default_dtype() + + +得到当前全局的dtype。 该值初始是float32。 + + +参数: + + 无 + +返回: string,这个全局dtype仅支持float16、float32、float64 + +**代码示例**: + +.. code-block:: python + + import paddle + paddle.get_default_dtype() diff --git a/doc/paddle/api/paddle/framework/random/manual_seed_cn.rst b/doc/paddle/api/paddle/framework/random/manual_seed_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7ddf88f632df1a1c4fd6aea961b9cf30d75c682c --- /dev/null +++ b/doc/paddle/api/paddle/framework/random/manual_seed_cn.rst @@ -0,0 +1,24 @@ +.. _cn_api_paddle_framework_manual_seed: + +manual_seed +------------------------------- + +.. py:function:: paddle.framework.manual_seed(seed) + + +设置全局默认generator的随机种子。 + + +参数: + + - **seed** (int) - 要设置的的随机种子,推荐使用较大的整数。 + +返回: + Generator:全局默认generator对象。 + +**代码示例**: + +.. code-block:: python + + import paddle + paddle.manual_seed(102) diff --git a/doc/paddle/api/paddle/framework/set_default_dtype_cn.rst b/doc/paddle/api/paddle/framework/set_default_dtype_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9c6c6fe948acb39b526261c9ef92b8eaf4b59858 --- /dev/null +++ b/doc/paddle/api/paddle/framework/set_default_dtype_cn.rst @@ -0,0 +1,23 @@ +.. _cn_api_paddle_framework_set_default_dtype: + +set_default_dtype +------------------------------- + +.. py:function:: paddle.set_default_dtype(d) + + +设置默认的全局dtype。 默认的全局dtype最初是float32。 + + +参数: + + - **d** (string|np.dtype) - 设为默认值的dtype。 它仅支持float16,float32和float64。 + +返回: 无 + +**代码示例**: + +.. code-block:: python + + import paddle + paddle.set_default_dtype("float32") diff --git a/doc/paddle/api/paddle/hapi/callbacks/Callback_cn.rst b/doc/paddle/api/paddle/hapi/callbacks/Callback_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..45eadc4c66b0ac17559ab61b0607bcb8a7e690ca --- /dev/null +++ b/doc/paddle/api/paddle/hapi/callbacks/Callback_cn.rst @@ -0,0 +1,26 @@ +.. _cn_api_paddle_callbacks_Callback: + +Callback +------------------------------- + +.. py:class:: paddle.callbacks.Callback() + + ``Callback`` 是一个基类,用于实现用户自定义的callback。 + +**代码示例**: + +.. code-block:: python + + import paddle + + # build a simple model checkpoint callback + class ModelCheckpoint(paddle.callbacks.Callback): + def __init__(self, save_freq=1, save_dir=None): + self.save_freq = save_freq + self.save_dir = save_dir + + def on_epoch_end(self, epoch, logs=None): + if self.model is not None and epoch % self.save_freq == 0: + path = '{}/{}'.format(self.save_dir, epoch) + print('save checkpoint at {}'.format(path)) + self.model.save(path) \ No newline at end of file diff --git a/doc/paddle/api/paddle/hapi/callbacks/ModelCheckpoint_cn.rst b/doc/paddle/api/paddle/hapi/callbacks/ModelCheckpoint_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a8b304a6d7d329710bc1b8837c83961c057815f0 --- /dev/null +++ b/doc/paddle/api/paddle/hapi/callbacks/ModelCheckpoint_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_paddle_callbacks_ModelCheckpoint: + +ModelCheckpoint +------------------------------- + +.. py:class:: paddle.callbacks.ModelCheckpoint(save_freq=1, save_dir=None) + + ``ModelCheckpoint`` 是一个日志回调类。 + +参数: + - **save_freq** (int,可选) - 间隔多少个epoch保存模型。默认值:1。 + - **save_dir** (int,可选) - 保存模型的文件夹。如果不设定,将不会保存模型。默认值:None。 + + +**代码示例**: + +.. code-block:: python + + import paddle + from paddle.static import InputSpec + + inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')] + labels = [InputSpec([None, 1], 'int64', 'label')] + + train_dataset = paddle.vision.datasets.MNIST(mode='train') + + model = paddle.Model(paddle.vision.LeNet(classifier_activation=None), + inputs, labels) + + optim = paddle.optimizer.Adam(0.001) + model.prepare(optimizer=optim, + loss=paddle.nn.CrossEntropyLoss(), + metrics=paddle.metric.Accuracy()) + + callback = paddle.callbacks.ModelCheckpoint(save_dir='./temp') + model.fit(train_dataset, batch_size=64, callbacks=callback) \ No newline at end of file diff --git a/doc/paddle/api/paddle/hapi/callbacks/ProgBarLogger_cn.rst b/doc/paddle/api/paddle/hapi/callbacks/ProgBarLogger_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e90cc940fbce4b8fa2572e1b6bcdc96f7e4a0e7f --- /dev/null +++ b/doc/paddle/api/paddle/hapi/callbacks/ProgBarLogger_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_paddle_callbacks_ProgBarLogger: + +ProgBarLogger +------------------------------- + +.. py:class:: paddle.callbacks.ProgBarLogger(log_freq=1, verbose=2) + + ``ProgBarLogger`` 是一个日志回调类。 + +参数: + - **log_freq** (int,可选) - 损失值和指标打印的频率。默认值:1。 + - **verbose** (int,可选) - 打印信息的模式。设置为0时,不打印信息;设置为1时,使用进度条的形式打印信息;是指为2时,使用行的形式打印信息。默认值:2。 + + +**代码示例**: + +.. code-block:: python + + import paddle + from paddle.static import InputSpec + + inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')] + labels = [InputSpec([None, 1], 'int64', 'label')] + + train_dataset = paddle.vision.datasets.MNIST(mode='train') + + model = paddle.Model(paddle.vision.LeNet(classifier_activation=None), + inputs, labels) + + optim = paddle.optimizer.Adam(0.001) + model.prepare(optimizer=optim, + loss=paddle.nn.CrossEntropyLoss(), + metrics=paddle.metric.Accuracy()) + + callback = paddle.callbacks.ProgBarLogger(log_freq=10) + model.fit(train_dataset, batch_size=64, callbacks=callback) \ No newline at end of file diff --git a/doc/paddle/api/paddle/hapi/model/Model_cn.rst b/doc/paddle/api/paddle/hapi/model/Model_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b87a54bde8b63b163959f6b20680946da63e3736 --- /dev/null +++ b/doc/paddle/api/paddle/hapi/model/Model_cn.rst @@ -0,0 +1,491 @@ +.. _cn_api_paddle_Model: + +Model +------------------------------- + +.. py:class:: paddle.Model() + + ``Model`` 对象是一个具备训练、测试、推理的神经网络。该对象同时支持静态图和动态图模式,通过 ``paddle.disable_static()`` 来切换。需要注意的是,该开关需要在实例化 ``Model`` 对象之前使用。输入需要使用 ``paddle.static.InputSpec`` 来定义。 + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.nn as nn + from paddle.static import InputSpec + + device = paddle.set_device('cpu') # or 'gpu' + # if use static graph, do not set + paddle.disable_static(device) + + net = nn.Sequential( + nn.Linear(784, 200), + nn.Tanh(), + nn.Linear(200, 10)) + + input = InputSpec([None, 784], 'float32', 'x') + label = InputSpec([None, 1], 'int64', 'label') + + model = paddle.Model(net, input, label) + optim = paddle.optimizer.SGD(learning_rate=1e-3, + parameters=model.parameters()) + model.prepare(optim, + paddle.nn.CrossEntropyLoss(), + paddle.metric.Accuracy()) + + data = paddle.vision.datasets.MNIST(mode='train', chw_format=False) + model.fit(data, epochs=2, batch_size=32, verbose=1) + + +.. py:function:: train_batch(inputs, labels=None) + +在一个批次的数据上进行训练。 + +参数: + - **inputs** (list) - 1维列表,每个元素都是一批次的输入数据,数据类型为 ``numpy.ndarray``。 + - **labels** (list) - 1维列表,每个元素都是一批次的输入标签,数据类型为 ``numpy.ndarray`` 。默认值:None。 + +返回:如果没有定义评估函数,则返回包含了训练损失函数的值的列表;如果定义了评估函数,则返回一个元组(损失函数的列表,评估指标的列表)。 + + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.nn as nn + from paddle.static import InputSpec + + device = paddle.set_device('cpu') # or 'gpu' + paddle.disable_static(device) + + net = nn.Sequential( + nn.Linear(784, 200), + nn.Tanh(), + nn.Linear(200, 10)) + + input = InputSpec([None, 784], 'float32', 'x') + label = InputSpec([None, 1], 'int64', 'label') + model = paddle.Model(net, input, label) + optim = paddle.optimizer.SGD(learning_rate=1e-3, + parameters=model.parameters()) + model.prepare(optim, paddle.nn.CrossEntropyLoss()) + data = np.random.random(size=(4,784)).astype(np.float32) + label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64) + loss = model.train_batch([data], [label]) + print(loss) + +.. py:function:: eval_batch(inputs, labels=None) + +在一个批次的数据上进行评估。 + +参数: + - **inputs** (list) - 1维列表,每个元素都是一批次的输入数据,数据类型为 ``numpy.ndarray`` 。 + - **labels** (list) - 1维列表,每个元素都是一批次的输入标签,数据类型为 ``numpy.ndarray`` 。默认值:None。 + +返回:如果没有定义评估函数,则返回包含了预测损失函数的值的列表;如果定义了评估函数,则返回一个元组(损失函数的列表,评估指标的列表)。 + +返回类型:list + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.nn as nn + from paddle.static import InputSpec + + device = paddle.set_device('cpu') # or 'gpu' + paddle.disable_static(device) + + net = nn.Sequential( + nn.Linear(784, 200), + nn.Tanh(), + nn.Linear(200, 10)) + + input = InputSpec([None, 784], 'float32', 'x') + label = InputSpec([None, 1], 'int64', 'label') + model = paddle.Model(net, input, label) + optim = paddle.optimizer.SGD(learning_rate=1e-3, + parameters=model.parameters()) + model.prepare(optim, + paddle.nn.CrossEntropyLoss()) + data = np.random.random(size=(4,784)).astype(np.float32) + label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64) + loss = model.eval_batch([data], [label]) + print(loss) + +.. py:function:: test_batch(inputs) + +在一个批次的数据上进行测试。 + +参数: + - **inputs** (list) - 1维列表,每个元素都是一批次的输入数据,数据类型为 ``numpy.ndarray`` 。 + +返回:一个列表,包含了模型的输出。 + +返回类型:list + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.nn as nn + from paddle.static import InputSpec + + device = paddle.set_device('cpu') # or 'gpu' + paddle.disable_static(device) + + net = nn.Sequential( + nn.Linear(784, 200), + nn.Tanh(), + nn.Linear(200, 10), + nn.Softmax()) + + input = InputSpec([None, 784], 'float32', 'x') + label = InputSpec([None, 1], 'int64', 'label') + model = paddle.Model(net, input, label) + model.prepare() + data = np.random.random(size=(4,784)).astype(np.float32) + out = model.test_batch([data]) + print(out) + +.. py:function:: save(path, training=True): + +将模型的参数和训练过程中优化器的信息保存到指定的路径,以及推理所需的参数与文件。如果training=True,所有的模型参数都会保存到一个后缀为 ``.pdparams`` 的文件中。 +所有的优化器信息和相关参数,比如 ``Adam`` 优化器中的 ``beta1`` , ``beta2`` ,``momentum`` 等,都会被保存到后缀为 ``.pdopt``。如果优化器比如SGD没有参数,则该不会产生该文件。如果training=False,则不会保存上述说的文件。只会保存推理需要的参数文件和模型文件。 + +参数: + - **path** (str) - 保存的文件名前缀。格式如 ``dirname/file_prefix`` 或者 ``file_prefix`` 。 + - **training** (bool,可选) - 是否保存训练的状态,包括模型参数和优化器参数等。如果为False,则只保存推理所需的参数与文件。默认值:True。 + +返回:None + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.nn as nn + from paddle.static import InputSpec + + class Mnist(nn.Layer): + def __init__(self): + super(Mnist, self).__init__() + self.net = nn.Sequential( + nn.Linear(784, 200), + nn.Tanh(), + nn.Linear(200, 10), + nn.Softmax()) + + def forward(self, x): + return self.net(x) + + dynamic = True # False + device = paddle.set_device('cpu') + # if use static graph, do not set + paddle.disable_static(device) if dynamic else None + + input = InputSpec([None, 784], 'float32', 'x') + label = InputSpec([None, 1], 'int64', 'label') + model = paddle.Model(Mnist(), input, label) + optim = paddle.optimizer.SGD(learning_rate=1e-3, + parameters=model.parameters()) + model.prepare(optim, paddle.nn.CrossEntropyLoss()) + data = paddle.vision.datasets.MNIST(mode='train', chw_format=False) + model.fit(data, epochs=1, batch_size=32, verbose=0) + model.save('checkpoint/test') # save for training + model.save('inference_model', False) # save for inference + +.. py:function:: load(path, skip_mismatch=False, reset_optimizer=False): + +从指定的文件中载入模型参数和优化器参数,如果不想恢复优化器参数信息,优化器信息文件可以不存在。需要注意的是:参数名称的检索是根据保存模型时结构化的名字,当想要载入参数进行迁移学习时要保证预训练模型和当前的模型的参数有一样结构化的名字。 + +参数: + - **path** (str) - 保存参数或优化器信息的文件前缀。格式如 ``path.pdparams`` 或者 ``path.pdopt`` ,后者是非必要的,如果不想恢复优化器信息。 + - **skip_mismatch** (bool) - 是否需要跳过保存的模型文件中形状或名称不匹配的参数,设置为 ``False`` 时,当遇到不匹配的参数会抛出一个错误。默认值:False。 + - **reset_optimizer** (bool) - 设置为 ``True`` 时,会忽略提供的优化器信息文件。否则会载入提供的优化器信息。默认值:False。 + +返回:None + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.nn as nn + from paddle.static import InputSpec + + device = paddle.set_device('cpu') + paddle.disable_static(device) + + input = InputSpec([None, 784], 'float32', 'x') + label = InputSpec([None, 1], 'int64', 'label') + model = paddle.Model(nn.Sequential( + nn.Linear(784, 200), + nn.Tanh(), + nn.Linear(200, 10), + nn.Softmax()), + input, + label) + model.save('checkpoint/test') + model.load('checkpoint/test') + +.. py:function:: parameters(*args, **kwargs): + +返回一个包含模型所有参数的列表。 + +返回:在静态图中返回一个包含 ``Parameter`` 的列表,在动态图中返回一个包含 ``ParamBase`` 的列表。 + +**代码示例**: + +.. code-block:: python + import paddle + import paddle.nn as nn + from paddle.static import InputSpec + + paddle.disable_static() + + input = InputSpec([None, 784], 'float32', 'x') + label = InputSpec([None, 1], 'int64', 'label') + model = paddle.Model(nn.Sequential( + nn.Linear(784, 200), + nn.Tanh(), + nn.Linear(200, 10)), + input, + label) + params = model.parameters() + + +.. py:function:: prepare(optimizer=None, loss_function=None, metrics=None): + +配置模型所需的部件,比如优化器、损失函数和评价指标。 + +参数: + - **optimizer** (Optimizer) - 当训练模型的,该参数必须被设定。当评估或测试的时候,该参数可以不设定。默认值:None。 + - **loss_function** (Loss) - 当训练模型的,该参数必须被设定。默认值:None。 + - **metrics** (Metric|list[Metric]) - 当该参数被设定时,所有给定的评估方法会在训练和测试时被运行,并返回对应的指标。默认值:None。 + + +.. py:function:: fit(train_data=None, eval_data=None, batch_size=1, epochs=1, eval_freq=1, log_freq=10, save_dir=None, save_freq=1, verbose=2, drop_last=False, shuffle=True, num_workers=0, callbacks=None): + +训练模型。当 ``eval_data`` 给定时,会在 ``eval_freq`` 个 ``epoch`` 后进行一次评估。 + +参数: + - **train_data** (Dataset|DataLoader) - 一个可迭代的数据源,推荐给定一个 ``paddle paddle.io.Dataset`` 或 ``paddle.io.Dataloader`` 的实例。默认值:None。 + - **eval_data** (Dataset|DataLoader) - 一个可迭代的数据源,推荐给定一个 ``paddle paddle.io.Dataset`` 或 ``paddle.io.Dataloader`` 的实例。当给定时,会在每个 ``epoch`` 后都会进行评估。默认值:None。 + - **batch_size** (int) - 训练数据或评估数据的批大小,当 ``train_data`` 或 ``eval_data`` 为 ``DataLoader`` 的实例时,该参数会被忽略。默认值:1。 + - **epochs** (int) - 训练的轮数。默认值:1。 + - **eval_freq** (int) - 评估的频率,多少个 ``epoch`` 评估一次。默认值:1。 + - **log_freq** (int) - 日志打印的频率,多少个 ``step`` 打印一次日志。默认值:1。 + - **save_dir** (str|None) - 保存模型的文件夹,如果不设定,将不保存模型。默认值:None。 + - **save_freq** (int) - 保存模型的频率,多少个 ``epoch`` 保存一次模型。默认值:1。 + - **verbose** (int) - 可视化的模型,必须为0,1,2。当设定为0时,不打印日志,设定为1时,使用进度条的方式打印日志,设定为2时,一行一行地打印日志。默认值:2。 + - **drop_last** (bool) - 是否丢弃训练数据中最后几个不足设定的批次大小的数据。默认值:False。 + - **shuffle** (bool) - 是否对训练数据进行洗牌。当 ``train_data`` 为 ``DataLoader`` 的实例时,该参数会被忽略。默认值:True。 + - **num_workers** (int) - 启动子进程用于读取数据的数量。当 ``train_data`` 和 ``eval_data`` 都为 ``DataLoader`` 的实例时,该参数会被忽略。默认值:True。 + - **callbacks** (Callback|list[Callback]|None) - ``Callback`` 的一个实例或实例列表。该参数不给定时,默认会插入 ``ProgBarLogger`` 和 ``ModelCheckpoint`` 这两个实例。默认值:None。 + +返回:None + +**代码示例**: + +.. code-block:: python + + # 1. 使用Dataset训练,并设置batch_size的例子。 + import paddle + from paddle.static import InputSpec + + dynamic = True + device = paddle.set_device('cpu') # or 'gpu' + paddle.disable_static(device) if dynamic else None + + train_dataset = paddle.vision.datasets.MNIST(mode='train') + val_dataset = paddle.vision.datasets.MNIST(mode='test') + + input = InputSpec([None, 1, 28, 28], 'float32', 'image') + label = InputSpec([None, 1], 'int64', 'label') + + model = paddle.Model( + paddle.vision.models.LeNet(classifier_activation=None), + input, label) + optim = paddle.optimizer.Adam( + learning_rate=0.001, parameters=model.parameters()) + model.prepare( + optim, + paddle.nn.CrossEntropyLoss(), + paddle.metric.Accuracy(topk=(1, 2))) + model.fit(train_dataset, + val_dataset, + epochs=2, + batch_size=64, + save_dir='mnist_checkpoint') + + # 2. 使用Dataloader训练的例子. + + import paddle + from paddle.static import InputSpec + + dynamic = True + device = paddle.set_device('cpu') # or 'gpu' + paddle.disable_static(device) if dynamic else None + + train_dataset = paddle.vision.datasets.MNIST(mode='train') + train_loader = paddle.io.DataLoader(train_dataset, + places=device, batch_size=64) + val_dataset = paddle.vision.datasets.MNIST(mode='test') + val_loader = paddle.io.DataLoader(val_dataset, + places=device, batch_size=64) + + input = InputSpec([None, 1, 28, 28], 'float32', 'image') + label = InputSpec([None, 1], 'int64', 'label') + + model = paddle.Model( + paddle.vision.models.LeNet(classifier_activation=None), input, label) + optim = paddle.optimizer.Adam( + learning_rate=0.001, parameters=model.parameters()) + model.prepare( + optim, + paddle.nn.CrossEntropyLoss(), + paddle.metric.Accuracy(topk=(1, 2))) + model.fit(train_loader, + val_loader, + epochs=2, + save_dir='mnist_checkpoint') + + +.. py:function:: evaluate(eval_data, batch_size=1, log_freq=10, verbose=2, num_workers=0, callbacks=None): + +在输入数据上,评估模型的损失函数值和评估指标。 + +参数: + - **eval_data** (Dataset|DataLoader) - 一个可迭代的数据源,推荐给定一个 ``paddle paddle.io.Dataset`` 或 ``paddle.io.Dataloader`` 的实例。默认值:None。 + - **batch_size** (int) - 训练数据或评估数据的批大小,当 ``eval_data`` 为 ``DataLoader`` 的实例时,该参数会被忽略。默认值:1。 + - **log_freq** (int) - 日志打印的频率,多少个 ``step`` 打印一次日志。默认值:1。 + - **verbose** (int) - 可视化的模型,必须为0,1,2。当设定为0时,不打印日志,设定为1时,使用进度条的方式打印日志,设定为2时,一行一行地打印日志。默认值:2。 + - **num_workers** (int) - 启动子进程用于读取数据的数量。当 ``eval_data`` 为 ``DataLoader`` 的实例时,该参数会被忽略。默认值:True。 + - **callbacks** (Callback|list[Callback]|None) - ``Callback`` 的一个实例或实例列表。该参数不给定时,默认会插入 ``ProgBarLogger`` 和 ``ModelCheckpoint`` 这两个实例。默认值:None。 + +返回:None + +**代码示例**: + +.. code-block:: python + + # declarative mode + import paddle + from paddle.static import InputSpec + + # declarative mode + val_dataset = paddle.vision.datasets.MNIST(mode='test') + + input = InputSpec([-1, 1, 28, 28], 'float32', 'image') + label = InputSpec([None, 1], 'int64', 'label') + model = paddle.Model(paddle.vision.models.LeNet(), input, label) + model.prepare(metrics=paddle.metric.Accuracy()) + result = model.evaluate(val_dataset, batch_size=64) + print(result) + + # imperative mode + paddle.disable_static() + model = paddle.Model(paddle.vision.models.LeNet(), input, label) + model.prepare(metrics=paddle.metric.Accuracy()) + result = model.evaluate(val_dataset, batch_size=64) + print(result) + + +.. py:function:: predict(test_data, batch_size=1, num_workers=0, stack_outputs=False, callbacks=None): + +在输入数据上,预测模型的输出。 + +参数: + - **test_data** (Dataset|DataLoader) - 一个可迭代的数据源,推荐给定一个 ``paddle paddle.io.Dataset`` 或 ``paddle.io.Dataloader`` 的实例。默认值:None。 + - **batch_size** (int) - 训练数据或评估数据的批大小,当 ``eval_data`` 为 ``DataLoader`` 的实例时,该参数会被忽略。默认值:1。 + - **num_workers** (int) - 启动子进程用于读取数据的数量。当 ``eval_data`` 为 ``DataLoader`` 的实例时,该参数会被忽略。默认值:True。 + - **stack_outputs** (bool) - 是否将输出进行堆叠。默认值:False。 + - **callbacks** (Callback|list[Callback]|None) - ``Callback`` 的一个实例或实例列表。默认值:None。 + +返回:None + +**代码示例**: + +.. code-block:: python + + # declarative mode + import numpy as np + import paddle + from paddle.static import InputSpec + + class MnistDataset(paddle.vision.datasets.MNIST): + def __init__(self, mode, return_label=True): + super(MnistDataset, self).__init__(mode=mode) + self.return_label = return_label + + def __getitem__(self, idx): + img = np.reshape(self.images[idx], [1, 28, 28]) + if self.return_label: + return img, np.array(self.labels[idx]).astype('int64') + return img, + + def __len__(self): + return len(self.images) + + test_dataset = MnistDataset(mode='test', return_label=False) + + # declarative mode + input = InputSpec([-1, 1, 28, 28], 'float32', 'image') + model = paddle.Model(paddle.vision.models.LeNet(), input) + model.prepare() + + result = model.predict(test_dataset, batch_size=64) + print(len(result[0]), result[0][0].shape) + + # imperative mode + device = paddle.set_device('cpu') + paddle.disable_static(device) + model = paddle.Model(paddle.vision.models.LeNet(), input) + model.prepare() + result = model.predict(test_dataset, batch_size=64) + print(len(result[0]), result[0][0].shape) + + +.. py:function:: summary(input_size=None, batch_size=None, dtype=None): + +打印网络的基础结构和参数信息。 + +参数: + - **input_size** (tuple|InputSpec|list[tuple|InputSpec,可选) - 输入张量的大小。如果网络只有一个输入,那么该值需要设定为tuple或InputSpec。如果模型有多个输入。那么该值需要设定为list[tuple|InputSpec],包含每个输入的shape。如果该值没有设置,会将 ``self._inputs`` 作为输入。默认值:None。 + - **batch_size** (int,可选) - 输入张量的批大小。默认值:None。 + - **dtypes** (str,可选) - 输入张量的数据类型,如果没有给定,默认使用 ``float32`` 类型。默认值:None。 + +返回:字典:包含网络全部参数的大小和全部可训练参数的大小。 + +**代码示例**: + +.. code-block:: python + + import paddle + from paddle.static import InputSpec + + dynamic = True + device = paddle.set_device('cpu') + paddle.disable_static(device) if dynamic else None + + input = InputSpec([None, 1, 28, 28], 'float32', 'image') + label = InputSpec([None, 1], 'int64', 'label') + + model = paddle.Model(paddle.vision.LeNet(classifier_activation=None), + input, label) + optim = paddle.optimizer.Adam( + learning_rate=0.001, parameters=model.parameters()) + model.prepare( + optim, + paddle.nn.CrossEntropyLoss()) + + params_info = model.summary() + print(params_info) diff --git a/doc/paddle/api/paddle/hapi/model_summary/summary_cn.rst b/doc/paddle/api/paddle/hapi/model_summary/summary_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1505fba3c26997bb54014681d205318c76106429 --- /dev/null +++ b/doc/paddle/api/paddle/hapi/model_summary/summary_cn.rst @@ -0,0 +1,58 @@ +.. _cn_api_paddle_summary: + +summary +------------------------------- + +.. py:function:: paddle.summary(net, input_size, batch_size=None, dtypes=None) + + ``summary`` 函数能够打印网络的基础结构和参数信息。 + +参数: + - **net** (Layer) - 网络实例,必须是 ``Layer`` 的子类。 + - **input_size** (tuple|InputSpec|list[tuple|InputSpec) - 输入张量的大小。如果网络只有一个输入,那么该值需要设定为tuple或InputSpec。如果模型有多个输入。那么该值需要设定为list[tuple|InputSpec],包含每个输入的shape。 + - **batch_size** (int,可选) - 输入张量的批大小。默认值:None。 + - **dtypes** (str,可选) - 输入张量的数据类型,如果没有给定,默认使用 ``float32`` 类型。默认值:None。 + +返回:字典,包含了总的参数量和总的可训练的参数量。 + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.nn as nn + + class LeNet(nn.Layer): + def __init__(self, num_classes=10): + super(LeNet, self).__init__() + self.num_classes = num_classes + self.features = nn.Sequential( + nn.Conv2d( + 1, 6, 3, stride=1, padding=1), + nn.ReLU(), + nn.MaxPool2d(2, 2), + nn.Conv2d( + 6, 16, 5, stride=1, padding=0), + nn.ReLU(), + nn.MaxPool2d(2, 2)) + + if num_classes > 0: + self.fc = nn.Sequential( + nn.Linear(400, 120), + nn.Linear(120, 84), + nn.Linear( + 84, 10)) + + def forward(self, inputs): + x = self.features(inputs) + + if self.num_classes > 0: + x = paddle.flatten(x, 1) + x = self.fc(x) + return x + + lenet = LeNet() + + params_info = paddle.summary(lenet, (1, 28, 28)) + print(params_info) + diff --git a/doc/paddle/api/paddle/io/BatchSampler_cn.rst b/doc/paddle/api/paddle/io/BatchSampler_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..deac13212764fbfa577da18fada6a7db1eb290eb --- /dev/null +++ b/doc/paddle/api/paddle/io/BatchSampler_cn.rst @@ -0,0 +1,65 @@ +.. _cn_api_io_cn_BatchSampler: + +BatchSampler +------------------------------- + +.. py:class:: paddle.io.BatchSampler(dataset=None, sampler=None, shuffle=Fasle, batch_size=1, drop_last=False) + +批采样器的基础实现,用于 ``paddle.io.DataLoader`` 中迭代式获取mini-batch的样本下标数组,数组长度与 ``batch_size`` 一致。 + +所有用于 ``paddle.io.DataLoader`` 中的批采样器都必须是 ``paddle.io.BatchSampler`` 的子类并实现以下方法: + +``__iter__``: 迭代式返回批样本下标数组。 + +``__len__``: 每epoch中mini-batch数。 + +参数: + - **dataset** (Dataset) - 此参数必须是 ``paddle.io.Dataset`` 或 ``paddle.io.IterableDataset`` 的一个子类实例或实现了 ``__len__`` 的Python对象,用于生成样本下标。默认值为None。 + - **sampler** (Sampler) - 此参数必须是 ``paddle.io.Sampler`` 的子类实例,用于迭代式获取样本下标。``dataset`` 和 ``sampler`` 参数只能设置一个。默认值为None。 + - **shuffle** (bool) - 是否需要在生成样本下标时打乱顺序。默认值为False。 + - **batch_size** (int) - 每mini-batch中包含的样本数。默认值为1。 + - **drop_last** (bool) - 是否需要丢弃最后无法凑整一个mini-batch的样本。默认值为False。 + +见 ``paddle.io.DataLoader`` 。 + +返回:返回样本下标数组的迭代器。 + +返回类型: BatchSampler + +**代码示例** + +.. code-block:: python + + from paddle.io import RandomSampler, BatchSampler, Dataset + + # init with dataset + class RandomDataset(Dataset): + def __init__(self, num_samples): + self.num_samples = num_samples + + def __getitem__(self, idx): + image = np.random.random([784]).astype('float32') + label = np.random.randint(0, 9, (1, )).astype('int64') + return image, label + + def __len__(self): + return self.num_samples + + bs = BatchSampler(dataset=RandomDataset(100), + shuffle=False, + batch_size=16, + drop_last=False) + + for batch_indices in bs: + print(batch_indices) + + # init with sampler + sampler = RandomSampler(RandomDataset(100)) + bs = BatchSampler(sampler=sampler, + shuffle=True, + batch_size=8, + drop_last=True) + + for batch_indices in bs: + print(batch_indices) + diff --git a/doc/paddle/api/paddle/io/DataLoader_cn.rst b/doc/paddle/api/paddle/io/DataLoader_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2012f53dffef8e6271f748d557cd559d957ded4c --- /dev/null +++ b/doc/paddle/api/paddle/io/DataLoader_cn.rst @@ -0,0 +1,378 @@ +.. _cn_api_fluid_io_DataLoader: + +DataLoader +------------------------------- + +.. py:class:: paddle.fluid.io.DataLoader(dataset, feed_list=None, places=None, return_list=False, batch_sampler=None, batch_size=1, shuffle=False, drop_last=False, collate_fn=None, num_workers=0, use_buffer_reader=True, use_shared_memory=False, timeout=0, worker_init_fn=None) + +DataLoader返回一个迭代器,该迭代器根据 ``batch_sampler`` 给定的顺序迭代一次给定的 ``dataset`` + +DataLoader支持单进程和多进程的数据加载方式,当 ``num_workers`` 大于0时,将使用多进程方式异步加载数据。 + +DataLoader当前仅支持 ``map-style`` 的数据集(可通过下标索引样本), ``map-style`` 的数据集请参考 ``paddle.io.Dataset`` 。 + +``batch_sampler`` 请参考 ``fluid.io.BatchSampler`` + +参数: + - **dataset** (Dataset) - DataLoader从此参数给定数据集中加载数据,此参数必须是 ``paddle.io.Dataset`` 或 ``paddle.io.IterableDataset`` 的一个子类实例。 + - **feed_list** (list(Tensor)|tuple(Tensor)) - feed变量列表,由 ``fluid.layers.data()`` 创建。当 ``return_list`` 为False时,此参数必须设置。默认值为None。 + - **places** (list(Place)|tuple(Place)) - 数据需要放置到的Place列表。在静态图和动态图模式中,此参数均必须设置。在动态图模式中,此参数列表长度必须是1。默认值为None。 + - **return_list** (bool) - 每个设备上的数据是否以list形式返回。若return_list = False,每个设备上的返回数据均是str -> Tensor的映射表,其中映射表的key是每个输入变量的名称。若return_list = True,则每个设备上的返回数据均是list(Tensor)。在动态图模式下,此参数必须为True。默认值为False。 + - **batch_sampler** (BatchSampler) - ``fluid.io.BatchSampler`` 或其子类的实例,DataLoader通过 ``batch_sampler`` 产生的mini-batch索引列表来 ``dataset`` 中索引样本并组成mini-batch。默认值为None。 + - **batch_size** (int) - 每mini-batch中样本个数,为 ``batch_sampler`` 的替代参数,若 ``batch_sampler`` 未设置,会根据 ``batch_size`` ``shuffle`` ``drop_last`` 创建一个 ``fluid.io.BatchSampler`` 。默认值为1。 + - **shuffle** (bool) - 生成mini-batch索引列表时是否对索引打乱顺序,为 ``batch_sampler`` 的替代参数,若 ``batch_sampler`` 未设置,会根据 ``batch_size`` ``shuffle`` ``drop_last`` 创建一个 ``fluid.io.BatchSampler`` 。默认值为False。 + - **drop_last** (bool) - 是否丢弃因数据集样本数不能被 ``batch_size`` 整除而产生的最后一个不完整的mini-batch,为 ``batch_sampler`` 的替代参数,若 ``batch_sampler`` 未设置,会根据 ``batch_size`` ``shuffle`` ``drop_last`` 创建一个 ``fluid.io.BatchSampler`` 。默认值为False。 + - **collate_fn** (callable) - 通过此参数指定如果将样本列表组合为mini-batch数据,当 ``collate_fn`` 为None时,默认为将样本个字段在第0维上堆叠(同 ``np.stack(..., axis=0)`` )为mini-batch的数据。默认值为None。 + - **num_workers** (int) - 用于加载数据的子进程个数,若为0即为不开启子进程,在主进程中进行数据加载。默认值为0。 + - **use_buffer_reader** (bool) - 是否使用缓存读取器 。若 ``use_buffer_reader`` 为True,DataLoader会异步地预读取下一个mini-batch的数据,可加速数据读取过程,但同时会占用少量的CPU/GPU存储,即一个batch输入数据的存储空间。默认值为True。 + - **use_shared_memory** (bool) - 是否使用共享内存来提升子进程将数据放入进程间队列的速度,该参数尽在多进程模式下有效(即 ``num_workers > 0`` ),请确认机器上有足够的共享内存空间(如Linux系统下 ``/dev/shm/`` 目录空间大小)再设置此参数。默认为False。 + - **timeout** (int) - 从子进程输出队列获取mini-batch数据的超时时间。默认值为0。 + - **worker_init_fn** (callable) - 子进程初始化函数,此函数会被子进程初始化时被调用,并传递 ``worker id`` 作为参数。默认值为None。 + +返回:迭代 ``dataset`` 数据的迭代器 + +返回类型: DataLoader + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + from paddle.io import Dataset, BatchSampler, DataLoader + + BATCH_NUM = 20 + BATCH_SIZE = 16 + EPOCH_NUM = 4 + + IMAGE_SIZE = 784 + CLASS_NUM = 10 + + USE_GPU = True # whether use GPU to run model + + # define a random dataset + class RandomDataset(Dataset): + def __init__(self, num_samples): + self.num_samples = num_samples + + def __getitem__(self, idx): + image = np.random.random([IMAGE_SIZE]).astype('float32') + label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64') + return image, label + + def __len__(self): + return self.num_samples + + # get places + places = fluid.cuda_places() if USE_GPU else fluid.cpu_places() + + # -------------------- static graph --------------------- + + def simple_net(image, label): + fc_tmp = fluid.layers.fc(image, size=CLASS_NUM, act='softmax') + cross_entropy = fluid.layers.softmax_with_cross_entropy(image, label) + loss = fluid.layers.reduce_mean(cross_entropy) + sgd = fluid.optimizer.SGD(learning_rate=1e-3) + sgd.minimize(loss) + return loss + + image = fluid.data(name='image', shape=[None, IMAGE_SIZE], dtype='float32') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') + + loss = simple_net(image, label) + + exe = fluid.Executor(places[0]) + exe.run(fluid.default_startup_program()) + + prog = fluid.CompiledProgram(fluid.default_main_program()).with_data_parallel(loss_name=loss.name) + + dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) + + loader = DataLoader(dataset, + feed_list=[image, label], + places=places, + batch_size=BATCH_SIZE, + shuffle=True, + drop_last=True, + num_workers=2) + + for e in range(EPOCH_NUM): + for i, data in enumerate(loader()): + l = exe.run(prog, feed=data, fetch_list=[loss], return_numpy=True) + print("Epoch {} batch {}: loss = {}".format(e, i, l[0][0])) + + # ------------------------------------------------------- + + # -------------------- dynamic graph -------------------- + + class SimpleNet(fluid.dygraph.Layer): + def __init__(self): + super(SimpleNet, self).__init__() + self.fc = fluid.dygraph.nn.Linear(IMAGE_SIZE, CLASS_NUM, act='softmax') + + def forward(self, image, label=None): + return self.fc(image) + + with fluid.dygraph.guard(places[0]): + simple_net = SimpleNet() + opt = fluid.optimizer.SGD(learning_rate=1e-3, + parameter_list=simple_net.parameters()) + + loader = DataLoader(dataset, + places=places[0], + batch_size=BATCH_SIZE, + shuffle=True, + drop_last=True, + num_workers=2) + + for e in range(EPOCH_NUM): + for i, (image, label) in enumerate(loader()): + out = simple_net(image) + loss = fluid.layers.cross_entropy(out, label) + avg_loss = fluid.layers.reduce_mean(loss) + avg_loss.backward() + opt.minimize(avg_loss) + simple_net.clear_gradients() + print("Epoch {} batch {}: loss = {}".format(e, i, np.mean(loss.numpy()))) + + # ------------------------------------------------------- + + +.. py:method:: from_generator(feed_list=None, capacity=None, use_double_buffer=True, iterable=True, return_list=False, use_multiprocess=False, drop_last=True) + +.. note:: + 框架保证DataLoader的数据加载顺序与用户提供的数据源读取顺序一致。 + +创建一个DataLoader对象用于加载Python生成器产生的数据。数据会由Python线程预先读取,并异步送入一个队列中。 + +本方法创建的DataLoader对象提供了3个方法设置数据源,分别是 :code:`set_sample_generator` , :code:`set_sample_list_generator` 和 +:code:`set_batch_generator` 。请查阅下述示例代码了解它们的使用方法。 + +如果iterable = True,本方法创建的DataLoader对象时一个Python生成器,可以for-range的方法循环迭代。 + +如果iterable = False,本方法创建的DataLoader对象提供 :code:`start()` 和 :code:`reset()` 方法控制数据读取过程。此模式用于兼容 +``fluid.layers.py_reader`` 的使用方式。用户可使用iterable = False模式,方便地将 ``fluid.layers.py_reader`` 的代码迁移至 +``fluid.io.DataLoader`` 。 + +参数: + - **feed_list** (list(Variable)|tuple(Variable)) - feed变量列表,由 ``fluid.layers.data()`` 创建。 + - **capacity** (int) - DataLoader对象内部维护队列的容量大小。单位是batch数量。若reader读取速度较快,建议设置较大的capacity值。 + - **use_double_buffer** (bool) - 是否使用 ``double_buffer_reader`` 。若use_double_buffer=True,DataLoader会异步地预读取下一个batch的数据,可加速数据读取过程,但同时会占用少量的CPU/GPU存储,即一个batch输入数据的存储空间。 + - **iterable** (bool) - 所创建的DataLoader对象是否可迭代。 + - **return_list** (bool) - 每个设备上的数据是否以list形式返回。仅在iterable = True模式下有效。若return_list = False,每个设备上的返回数据均是str -> LoDTensor的映射表,其中映射表的key是每个输入变量的名称。若return_list = True,则每个设备上的返回数据均是list(LoDTensor)。推荐在静态图模式下使用return_list = False,在动态图模式下使用return_list = True。 + - **use_multiprocess** (bool) - 设置是否是用多进程加速动态图的数据载入过程。注意:该参数的设置仅在动态图模式下有效, 在静态图模式下,该参数设置与否均无任何影响。默认值为False。 + - **drop_last** (bool): 是否丢弃最后的不足CPU/GPU设备数的批次。默认值为True。在网络训练时,用户不能设置drop_last=False,此时所有CPU/GPU设备均应从DataLoader中读取到数据。在网络预测时,用户可以设置drop_last=False,此时最后不足CPU/GPU设备数的批次可以进行预测。 + +返回: 被创建的DataLoader对象 + +返回类型: loader (DataLoader) + +**代码示例 1** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + BATCH_NUM = 10 + BATCH_SIZE = 16 + EPOCH_NUM = 4 + + CLASS_NUM = 10 + + ITERABLE = True # whether the created DataLoader object is iterable + USE_GPU = False # whether to use GPU + + DATA_FORMAT = 'batch_generator' # data format of data source user provides + + def simple_net(image, label): + fc_tmp = fluid.layers.fc(image, size=CLASS_NUM) + cross_entropy = fluid.layers.softmax_with_cross_entropy(image, label) + loss = fluid.layers.reduce_mean(cross_entropy) + sgd = fluid.optimizer.SGD(learning_rate=1e-3) + sgd.minimize(loss) + return loss + + def get_random_images_and_labels(image_shape, label_shape): + image = np.random.random(size=image_shape).astype('float32') + label = np.random.random(size=label_shape).astype('int64') + return image, label + + # If the data generator yields one sample each time, + # use DataLoader.set_sample_generator to set the data source. + def sample_generator_creator(): + def __reader__(): + for _ in range(BATCH_NUM * BATCH_SIZE): + image, label = get_random_images_and_labels([784], [1]) + yield image, label + + return __reader__ + + # If the data generator yield list of samples each time, + # use DataLoader.set_sample_list_generator to set the data source. + def sample_list_generator_creator(): + def __reader__(): + for _ in range(BATCH_NUM): + sample_list = [] + for _ in range(BATCH_SIZE): + image, label = get_random_images_and_labels([784], [1]) + sample_list.append([image, label]) + + yield sample_list + + return __reader__ + + # If the data generator yields a batch each time, + # use DataLoader.set_batch_generator to set the data source. + def batch_generator_creator(): + def __reader__(): + for _ in range(BATCH_NUM): + batch_image, batch_label = get_random_images_and_labels([BATCH_SIZE, 784], [BATCH_SIZE, 1]) + yield batch_image, batch_label + + return __reader__ + + # If DataLoader is iterable, use for loop to train the network + def train_iterable(exe, prog, loss, loader): + for _ in range(EPOCH_NUM): + for data in loader(): + exe.run(prog, feed=data, fetch_list=[loss]) + + # If DataLoader is not iterable, use start() and reset() method to control the process + def train_non_iterable(exe, prog, loss, loader): + for _ in range(EPOCH_NUM): + loader.start() # call DataLoader.start() before each epoch starts + try: + while True: + exe.run(prog, fetch_list=[loss]) + except fluid.core.EOFException: + loader.reset() # call DataLoader.reset() after catching EOFException + + def set_data_source(loader, places): + if DATA_FORMAT == 'sample_generator': + loader.set_sample_generator(sample_generator_creator(), batch_size=BATCH_SIZE, drop_last=True, places=places) + elif DATA_FORMAT == 'sample_list_generator': + loader.set_sample_list_generator(sample_list_generator_creator(), places=places) + elif DATA_FORMAT == 'batch_generator': + loader.set_batch_generator(batch_generator_creator(), places=places) + else: + raise ValueError('Unsupported data format') + + image = fluid.layers.data(name='image', shape=[784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + + # Define DataLoader + loader = fluid.io.DataLoader.from_generator(feed_list=[image, label], capacity=16, iterable=ITERABLE) + + # Define network + loss = simple_net(image, label) + + # Set data source of DataLoader + # + # If DataLoader is iterable, places must be given and the number of places must be the same with device number. + # - If you are using GPU, call `fluid.cuda_places()` to get all GPU places. + # - If you are using CPU, call `fluid.cpu_places()` to get all CPU places. + # + # If DataLoader is not iterable, places can be None. + places = fluid.cuda_places() if USE_GPU else fluid.cpu_places() + set_data_source(loader, places) + + exe = fluid.Executor(places[0]) + exe.run(fluid.default_startup_program()) + + prog = fluid.CompiledProgram(fluid.default_main_program()).with_data_parallel(loss_name=loss.name) + + if loader.iterable: + train_iterable(exe, prog, loss, loader) + else: + train_non_iterable(exe, prog, loss, loader) + + + ''' + Users can use return_list = True in dygraph mode. + ''' + with fluid.dygraph.guard(places[0]): + loader = fluid.io.DataLoader.from_generator(capacity=2, return_list=True) + set_data_source(loader, places[0]) + for image, label in loader(): + relu = fluid.layers.relu(image) + assert image.shape == [BATCH_SIZE, 784] + assert label.shape == [BATCH_SIZE, 1] + assert relu.shape == [BATCH_SIZE, 784] + + +**代码示例 2** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + import os + + # We use 2 CPU cores to run inference network + os.environ['CPU_NUM'] = '2' + + # The data source has only 3 batches, which can not be + # divided evenly to each CPU core + def batch_generator(): + for i in range(3): + yield np.array([i+1]).astype('float32'), + + x = fluid.data(name='x', shape=[None], dtype='float32') + y = x * x + + def run_inference(drop_last): + loader = fluid.io.DataLoader.from_generator(feed_list=[x], + capacity=8, drop_last=drop_last) + loader.set_batch_generator(batch_generator, fluid.cpu_places()) + + exe = fluid.Executor(fluid.CPUPlace()) + prog = fluid.CompiledProgram(fluid.default_main_program()) + prog = prog.with_data_parallel() + + result = [] + for data in loader(): + each_ret, = exe.run(prog, feed=data, fetch_list=[y]) + result.extend(each_ret) + return result + + # Set drop_last to True, so that the last batch whose + # number is less than CPU core number would be discarded. + print(run_inference(drop_last=True)) # [1.0, 4.0] + + # Set drop_last to False, so that the last batch whose + # number is less than CPU core number can be tested. + print(run_inference(drop_last=False)) # [1.0, 4.0, 9.0] + + +.. py:method:: from_dataset(dataset, places, drop_last=True) + +创建一个DataLoader对象用于加载Dataset产生的数据。目前,Dataset仅支持Linux系统下使用。 + +参数: + - **dataset** (InMemoryDataset|QueueDataset) - Dataset对象。 + - **places** (list(CUDAPlace)|list(CPUPlace)) - DataLoader对象返回数据所在的place。 + - **drop_last** (bool) - 是否丢弃最后样本数量不足batch size的batch。若drop_last = True则丢弃,若drop_last = False则不丢弃。 + +返回: 被创建的DataLoader对象,可以for-range的方式循环迭代 + +返回类型: loader (DataLoader) + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + image = fluid.layers.data(name='image', shape=[784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + + dataset = fluid.DatasetFactory().create_dataset("QueueDataset") + dataset.set_batch_size(32) + dataset.set_filelist(['a.txt', 'b.txt', 'c.txt']) + dataset.set_use_var([image, label]) + dataset.set_pipe_command('cat') + + loader = fluid.io.DataLoader.from_dataset(dataset, fluid.cpu_places()) + + diff --git a/doc/paddle/api/paddle/io/Dataset_cn.rst b/doc/paddle/api/paddle/io/Dataset_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1e793ac5984a654d1c4918836ae3b0b79b3b7981 --- /dev/null +++ b/doc/paddle/api/paddle/io/Dataset_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_io_cn_Dataset: + +Dataset +------------------------------- + +.. py:class:: paddle.io.Dataset + +概述Dataset的方法和行为的抽象类。 + +映射式(map-style)数据集需要继承这个基类,映射式数据集为可以通过一个键值索引并获取指定样本的数据集,所有映射式数据集须实现以下方法: + +``__getitem__``: 根据给定索引获取数据集中指定样本,在 ``paddle.io.DataLoader`` 中需要使用此函数通过下标获取样本。 + +``__len__``: 返回数据集样本个数, ``paddle.io.BatchSampler`` 中需要样本个数生成下标序列。 + +见 ``paddle.io.DataLoader`` 。 + +**代码示例** + +.. code-block:: python + + import numpy as np + from paddle.io import Dataset + + # define a random dataset + class RandomDataset(Dataset): + def __init__(self, num_samples): + self.num_samples = num_samples + + def __getitem__(self, idx): + image = np.random.random([784]).astype('float32') + label = np.random.randint(0, 9, (1, )).astype('int64') + return image, label + + def __len__(self): + return self.num_samples + + dataset = RandomDataset(10) + for i in range(len(dataset)): + print(dataset[i]) + diff --git a/doc/paddle/api/paddle/io/IterableDataset_cn.rst b/doc/paddle/api/paddle/io/IterableDataset_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..55f38c1e424b78a7c592c2d8ab20acd06d84d61c --- /dev/null +++ b/doc/paddle/api/paddle/io/IterableDataset_cn.rst @@ -0,0 +1,136 @@ +.. _cn_api_io_cn_IterableDataset: + +IterableDataset +------------------------------- + +.. py:class:: paddle.io.IterableDataset + +概述迭代式数据集的方法和行为的抽象类。 + +迭代式(iterable style)数据集需要继承这个基类,迭代式数据集为只能依次迭代式获取样本的数据集,类似Python中的迭代器,所有迭代式数据集须实现以下方法: + +``__iter__``: 依次返回数据赝本。 + +.. note:: + 迭代式数据集不需要实现 ``__getitem__`` 和 ``__len__``,也不可以调用迭代式数据集的这两个方法。 + +见 ``paddle.io.DataLoader`` 。 + +**代码示例** + +.. code-block:: python + + import numpy as np + from paddle.io import Dataset + + # define a random dataset + class RandomDataset(Dataset): + def __init__(self, num_samples): + self.num_samples = num_samples + + def __iter__(self): + for i in range(self.num_samples): + image = np.random.random([784]).astype('float32') + label = np.random.randint(0, 9, (1, )).astype('int64') + yield image, label + + dataset = RandomDataset(10) + for img, lbl in dataset: + print(img, lbl) + +当 ``paddle.io.DataLoader`` 中 ``num_workers > 0`` 时,每个子进程都会遍历全量的数据集返回全量样本,所以数据集会重复 ``num_workers`` +次,如果需要数据集样本不会重复返回,可通过如下两种方法避免样本重复,两种方法中都需要通过 ``paddle.io.get_worker_info`` 获取各子进程的信息。 + +1. 通过 ``__iter__`` 函数划分各子进程的数据 + +**代码示例1** + +.. code-block:: python + + import math + import numpy as np + import paddle.fluid as fluid + from paddle.io import IterableDataset, DataLoader, get_worker_info + + class SplitedIterableDataset(IterableDataset): + def __init__(self, start, end): + self.start = start + self.end = end + + def __iter__(self): + worker_info = get_worker_info() + if worker_info is None: + iter_start = self.start + iter_end = self.end + else: + per_worker = int( + math.ceil((self.end - self.start) / float( + worker_info.num_workers))) + worker_id = worker_info.id + iter_start = self.start + worker_id * per_worker + iter_end = min(iter_start + per_worker, self.end) + + for i in range(iter_start, iter_end): + yield np.array([i]) + + place = fluid.CPUPlace() + with fluid.dygraph.guard(place): + dataset = SplitedIterableDataset(start=2, end=9) + dataloader = DataLoader( + dataset, + places=place, + num_workers=2, + batch_size=1, + drop_last=True) + + print(list(dataloader)) + # outputs: [2, 5, 3, 6, 4, 7] + +2. 通过各子进程初始化函数 ``worker_inif_fn`` 划分子进程数据 + +**代码示例2** + +.. code-block:: python + + import math + import numpy as np + import paddle.fluid as fluid + from paddle.io import IterableDataset, DataLoader, get_worker_info + + class RangeIterableDataset(IterableDataset): + def __init__(self, start, end): + self.start = start + self.end = end + + def __iter__(self): + for i in range(self.start, self.end): + yield np.array([i]) + + place = fluid.CPUPlace() + with fluid.dygraph.guard(place): + dataset = RangeIterableDataset(start=2, end=9) + + def worker_init_fn(worker_id): + worker_info = get_worker_info() + + dataset = worker_info.dataset + start = dataset.start + end = dataset.end + num_per_worker = int( + math.ceil((end - start) / float(worker_info.num_workers))) + + worker_id = worker_info.id + dataset.start = start + worker_id * num_per_worker + dataset.end = min(dataset.start + num_per_worker, end) + + dataloader = DataLoader( + dataset, + places=place, + num_workers=2, + batch_size=1, + drop_last=True, + worker_init_fn=worker_init_fn) + + print(list(dataloader)) + # outputs: [2, 5, 3, 6, 4, 7] + diff --git a/doc/paddle/api/paddle/io/RandomSampler_cn.rst b/doc/paddle/api/paddle/io/RandomSampler_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9116c58c44eed7dfd70cbf46e183d113f38f8875 --- /dev/null +++ b/doc/paddle/api/paddle/io/RandomSampler_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_io_cn_RandomSampler: + +RandomSampler +------------------------------- + +.. py:class:: paddle.io.RandomSampler(data_source=None, replacement=False, num_samples=None, generator=None) + +顺序迭代 ``data_source`` 返回样本下标,即一次返回 ``0, 1, 2, ..., len(data_source) - 1`` + +参数: + - **data_source** (Dataset) - 此参数必须是 ``paddle.io.Dataset`` 或 ``paddle.io.IterableDataset`` 的一个子类实例或实现了 ``__len__`` 的Python对象,用于生成样本下标。默认值为None。 + - **replacement** (bool) - 如果为 ``False`` 则会采样整个数据集,如果为 ``True`` 则会按 ``num_samples`` 指定的样本数采集。默认值为 ``False`` 。 + - **num_samples** (int) - 如果 ``replacement`` 设置为 ``True`` 则按此参数采集对应的样本数。默认值为None。 + - **generator** (Generator) - 指定采样 ``data_source`` 的采样器。默认值为None。 + +返回: 返回随机采样下标的采样器 + +返回类型: RandomSampler + +**代码示例** + +.. code-block:: python + + from paddle.io import Dataset, RandomSampler + + class RandomDataset(Dataset): + def __init__(self, num_samples): + self.num_samples = num_samples + + def __getitem__(self, idx): + image = np.random.random([784]).astype('float32') + label = np.random.randint(0, 9, (1, )).astype('int64') + return image, label + + def __len__(self): + return self.num_samples + + sampler = RandomSampler(data_souce=RandomDataset(100)) + + for index in sampler: + print(index) diff --git a/doc/paddle/api/paddle/io/Sampler_cn.rst b/doc/paddle/api/paddle/io/Sampler_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..838eb6d50b26f80e225ccfc7c31679ed8d49bd5a --- /dev/null +++ b/doc/paddle/api/paddle/io/Sampler_cn.rst @@ -0,0 +1,57 @@ +.. _cn_api_io_cn_Sampler: + +Sampler +------------------------------- + +.. py:class:: paddle.io.Sampler(data_source=None) + +概括数据集采样器行为和方法的基类。 + +所有数据集采样器必须继承这个基类,并实现以下方法: + +``__iter__``: 迭代返回数据样本下标 + +``__len__``: ``data_source`` 中的样本数 + +参数: + - **data_source** (Dataset) - 此参数必须是 ``paddle.io.Dataset`` 或 ``paddle.io.IterableDataset`` 的一个子类实例或实现了 ``__len__`` 的Python对象,用于生成样本下标。默认值为None。 + +可见 ``paddle.io.BatchSampler`` 和 ``paddle.io.DataLoader`` + +返回:返回样本下标的迭代器。 + +返回类型: Sampler + +**代码示例** + +.. code-block:: python + + from paddle.io import Dataset, Sampler + + class RandomDataset(Dataset): + def __init__(self, num_samples): + self.num_samples = num_samples + + def __getitem__(self, idx): + image = np.random.random([784]).astype('float32') + label = np.random.randint(0, 9, (1, )).astype('int64') + return image, label + + def __len__(self): + return self.num_samples + + class MySampler(Sampler): + def __init__(self, data_source): + self.data_source = data_source + + def __iter__(self): + return iter(range(len(self.data_source))) + + def __len__(self): + return len(self.data_source) + + sampler = MySampler(data_source=RandomDataset(100)) + + for index in sampler: + print(index) + diff --git a/doc/paddle/api/paddle/io/SequenceSampler_cn.rst b/doc/paddle/api/paddle/io/SequenceSampler_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1af7c682b9f01f5bb641939ce8212ce0f7ec5b32 --- /dev/null +++ b/doc/paddle/api/paddle/io/SequenceSampler_cn.rst @@ -0,0 +1,39 @@ +.. _cn_api_io_cn_SequenceSampler: + +SequenceSampler +------------------------------- + +.. py:class:: paddle.io.SequenceSampler(data_source=None) + +顺序迭代 ``data_source`` 返回样本下标,即一次返回 ``0, 1, 2, ..., len(data_source) - 1`` + +参数: + - **data_source** (Dataset) - 此参数必须是 ``paddle.io.Dataset`` 或 ``paddle.io.IterableDataset`` 的一个子类实例或实现了 ``__len__`` 的Python对象,用于生成样本下标。默认值为None。 + +返回:返回样本下标的迭代器。 + +返回类型: SequenceSampler + +**代码示例** + +.. code-block:: python + + from paddle.io import Dataset, SequenceSampler + + class RandomDataset(Dataset): + def __init__(self, num_samples): + self.num_samples = num_samples + + def __getitem__(self, idx): + image = np.random.random([784]).astype('float32') + label = np.random.randint(0, 9, (1, )).astype('int64') + return image, label + + def __len__(self): + return self.num_samples + + sampler = SequenceSampler(data_source=RandomDataset(100)) + + for index in sampler: + print(index) + diff --git a/doc/paddle/api/paddle/io/batch_cn.rst b/doc/paddle/api/paddle/io/batch_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3e3804abfb573be5a561fa7fb48603a15bf5f87c --- /dev/null +++ b/doc/paddle/api/paddle/io/batch_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_fluid_io_batch: + +batch +------------------------------- + +.. py:function:: paddle.fluid.io.batch(reader, batch_size, drop_last=False) + + + + +该接口是一个reader的装饰器。返回的reader将输入reader的数据打包成指定的batch_size大小的批处理数据(batched data)。 + +参数: + - **reader** (generator)- 读取数据的数据reader。 + - **batch_size** (int)- 批尺寸。 + - **drop_last** (bool) - 若设置为True,则当最后一个batch不等于batch_size时,丢弃最后一个batch;若设置为False,则不会。默认值为False。 + +返回:batched reader + +返回类型:generator + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + def reader(): + for i in range(10): + yield i + batch_reader = fluid.io.batch(reader, batch_size=2) + + for data in batch_reader(): + print(data) + + # 输出为: + # [0, 1] + # [2, 3] + # [4, 5] + # [6, 7] + # [8, 9] + + + + + + + + + diff --git a/doc/paddle/api/paddle/io/buffered_cn.rst b/doc/paddle/api/paddle/io/buffered_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..027c6346f735874d8c9accc38024c09b5940c429 --- /dev/null +++ b/doc/paddle/api/paddle/io/buffered_cn.rst @@ -0,0 +1,17 @@ +.. _cn_api_fluid_io_buffered: + +buffered +------------------------------- + +.. py:function:: paddle.fluid.io.buffered(reader, size) + + + + +创建一个缓存数据读取器,它读取数据并且存储进缓存区,从缓存区读取数据将会加速,只要缓存不是空的。 + +参数: + - **reader** (callable) – 读取数据的reader + - **size** (int) – 最大buffer的大小 + +返回:缓存的reader(读取器) \ No newline at end of file diff --git a/doc/paddle/api/paddle/io/cache_cn.rst b/doc/paddle/api/paddle/io/cache_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e93e4c85d134c0feb2ff813a6127c89816baed76 --- /dev/null +++ b/doc/paddle/api/paddle/io/cache_cn.rst @@ -0,0 +1,18 @@ +.. _cn_api_fluid_io_cache: + +cache +------------------------------- + +.. py:function:: paddle.fluid.io.cache(reader) + + + + +缓存reader数据到内存中,小心此方法可能会花长时间来处理数据,并且会占用大量内存。 ``reader()`` 只能被调用一次。 + +参数: + - **reader** (callable) – 读取数据的reader,每次都会yields数据。 + +返回:每次都会从内存中yields数据的一个装饰reader。 + +返回类型:生成器 \ No newline at end of file diff --git a/doc/paddle/api/paddle/io/chain_cn.rst b/doc/paddle/api/paddle/io/chain_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4a4872d268cc2dfbddad0a6d4720be54e5eb41c9 --- /dev/null +++ b/doc/paddle/api/paddle/io/chain_cn.rst @@ -0,0 +1,47 @@ +.. _cn_api_fluid_io_chain: + +chain +------------------------------- + +.. py:function:: paddle.fluid.io.chain(*readers) + + + + +该接口将多个数据读取器组成一个数据读取器,它依次返回多个数据读取器的输出数据,同时不改变输出数据原先的格式。 + +举例来说,如果有3个输入读取器且输出分别为[0,0,0]、[10,10,10]和[20,20,20],那么调用该接口产生的新数据读取器的输出为[0,0,0], [10,10,10], [20,20,20]。 + +参数: + - **readers(list)** – 输入的数据读取器。 + +返回: 新的数据读取器。 + +返回类型:callable + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + def reader_creator_3(start): + def reader(): + for i in range(start, start + 3): + yield [i, i, i] + return reader + + c = fluid.io.chain(reader_creator_3(0), reader_creator_3(10), reader_creator_3(20)) + for e in c(): + print(e) + # 输出结果如下: + # [0, 0, 0] + # [1, 1, 1] + # [2, 2, 2] + # [10, 10, 10] + # [11, 11, 11] + # [12, 12, 12] + # [20, 20, 20] + # [21, 21, 21] + # [22, 22, 22] + diff --git a/doc/paddle/api/paddle/io/compose_cn.rst b/doc/paddle/api/paddle/io/compose_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b4393bab5db375413712c27b60fb0699e4d85370 --- /dev/null +++ b/doc/paddle/api/paddle/io/compose_cn.rst @@ -0,0 +1,34 @@ +.. _cn_api_fluid_io_compose: + +compose +------------------------------- + +.. py:function:: paddle.fluid.io.compose(*readers, **kwargs) + + + + +该接口将多个数据读取器组合为一个数据读取器,返回读取器的输出包含所有输入读取器的输出。 + +例如:如果输入为三个reader,三个reader的输出分别为:(1,2)、3、(4,5),则组合reader的输出为:(1,2,3,4,5)。 + +参数: + - **readers** - 将被组合的多个数据读取器(Reader),数据读取器的定义参见 :ref:`cn_api_paddle_data_reader_reader` 。 + - **check_alignment** (bool) - 可选,指明是否对输入reader进行对齐检查,默认值为True。如果为True,将检查输入reader是否正确对齐。如果为False,将不检查对齐并自动丢弃无法对齐的末尾数据。 + +返回:数据读取器(Reader)。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + def reader_creator_10(dur): + def reader(): + for i in range(10): + yield i + return reader + + reader = fluid.io.compose(reader_creator_10(0), reader_creator_10(0)) + +注意: 运行过程可能会抛出异常 ``ComposeNotAligned`` ,原因是输入的readers数据未对齐。 当check_alignment设置为False时,不会检查并触发该异常。 diff --git a/doc/paddle/api/paddle/io/firstn_cn.rst b/doc/paddle/api/paddle/io/firstn_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ba9c1d427ab3ec1946dab0e78f1a2021a712fe94 --- /dev/null +++ b/doc/paddle/api/paddle/io/firstn_cn.rst @@ -0,0 +1,30 @@ +.. _cn_api_fluid_io_firstn: + +firstn +------------------------------- + +.. py:function:: paddle.fluid.io.firstn(reader, n) + + + + +该接口创建一个数据读取器,它可以返回的最大样本数为n。 + +参数: + - **reader** (callable) – 输入的数据读取器。 + - **n** (int) – 可以返回的最大样本数。 + +返回: 新的的数据读取器。 + +返回类型: callable + +.. code-block:: python + + import paddle.fluid as fluid + def reader(): + for i in range(100): + yield i + firstn_reader = fluid.io.firstn(reader, 5) + for e in firstn_reader(): + print(e) + # 输出结果为:0 1 2 3 4 diff --git a/doc/paddle/api/paddle/io/get_worker_info.rst b/doc/paddle/api/paddle/io/get_worker_info.rst new file mode 100644 index 0000000000000000000000000000000000000000..f7fcddc00d30a315d14299836971491289db4e83 --- /dev/null +++ b/doc/paddle/api/paddle/io/get_worker_info.rst @@ -0,0 +1,16 @@ +.. _cn_api_io_cn_get_worker_info: + +get_worker_info +------------------------------- + +.. py:class:: paddle.io.get_worker_info + +获取 ``paddle.io.DataLoader`` 子进程信息的函数,用于 ``paddle.io.IterableDataset`` 中划分子进程数据。子进程信息包含以下字段: + +``num_workers``: 子进程数。 + +``id``: 子进程逻辑序号,从0到 ``num_workers - 1`` + +``dataset``: 各子进程中数据集实例。 + +示例代码见 ``paddle.io.IterableDataset`` diff --git a/doc/paddle/api/paddle/io/load_inference_model_cn.rst b/doc/paddle/api/paddle/io/load_inference_model_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..17210d8b2024380bc06223bb99a5d53f9e9c5afe --- /dev/null +++ b/doc/paddle/api/paddle/io/load_inference_model_cn.rst @@ -0,0 +1,79 @@ +.. _cn_api_fluid_io_load_inference_model: + +load_inference_model +------------------------------- + + +.. py:function:: paddle.fluid.io.load_inference_model(dirname, executor, model_filename=None, params_filename=None, pserver_endpoints=None) + + + + +从指定文件路径中加载预测模型(Inference Model),即调用该接口可获得模型结构(Inference Program)和模型参数。若只想加载预训练后的模型参数,请使用 :ref:`cn_api_fluid_io_load_params` 接口。更多细节请参考 :ref:`api_guide_model_save_reader` 。 + +参数: + - **dirname** (str) – 待加载模型的存储路径。 + - **executor** (Executor) – 运行 Inference Model 的 ``executor`` ,详见 :ref:`api_guide_executor` 。 + - **model_filename** (str,可选) – 存储Inference Program结构的文件名称。如果设置为None,则使用 ``__model__`` 作为默认的文件名。默认值为None。 + - **params_filename** (str,可选) – 存储所有模型参数的文件名称。当且仅当所有模型参数被保存在一个单独的二进制文件中,它才需要被指定。如果模型参数是存储在各自分离的文件中,设置它的值为None。默认值为None。 + - **pserver_endpoints** (list,可选) – 只有在分布式预测时才需要用到。当训练过程中使用分布式查找表(distributed lookup table)时, 预测时需要指定pserver_endpoints的值。它是 pserver endpoints 的列表,默认值为None。 + +返回:该接口返回一个包含三个元素的列表(program,feed_target_names, fetch_targets)。它们的含义描述如下: + - **program** (Program)– ``Program`` (详见 :ref:`api_guide_Program` )类的实例。此处它被用于预测,因此可被称为Inference Program。 + - **feed_target_names** (list)– 字符串列表,包含着Inference Program预测时所需提供数据的所有变量名称(即所有输入变量的名称)。 + - **fetch_targets** (list)– ``Variable`` (详见 :ref:`api_guide_Program` )类型列表,包含着模型的所有输出变量。通过这些输出变量即可得到模型的预测结果。 + +**返回类型:** 列表(list) + +抛出异常: + - ``ValueError`` – 如果接口参数 ``dirname`` 指向一个不存在的文件路径,则抛出异常。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # 构建模型 + main_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard(main_prog, startup_prog): + data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False) + w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32') + b = fluid.layers.create_parameter(shape=[200], dtype='float32') + hidden_w = fluid.layers.matmul(x=data, y=w) + hidden_b = fluid.layers.elementwise_add(hidden_w, b) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(startup_prog) + + # 保存预测模型 + path = "./infer_model" + fluid.io.save_inference_model(dirname=path, feeded_var_names=['img'],target_vars=[hidden_b], executor=exe, main_program=main_prog) + + # 示例一: 不需要指定分布式查找表的模型加载示例,即训练时未用到distributed lookup table。 + [inference_program, feed_target_names, fetch_targets] = (fluid.io.load_inference_model(dirname=path, executor=exe)) + tensor_img = np.array(np.random.random((1, 64, 784)), dtype=np.float32) + results = exe.run(inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets) + + # 示例二: 若训练时使用了distributed lookup table,则模型加载时需要通过endpoints参数指定pserver服务器结点列表。 + # pserver服务器结点列表主要用于分布式查找表进行ID查找时使用。下面的["127.0.0.1:2023","127.0.0.1:2024"]仅为一个样例。 + endpoints = ["127.0.0.1:2023","127.0.0.1:2024"] + [dist_inference_program, dist_feed_target_names, dist_fetch_targets] = ( + fluid.io.load_inference_model(dirname=path, + executor=exe, + pserver_endpoints=endpoints)) + + # 在上述示例中,inference program 被保存在“ ./infer_model/__model__”文件内, + # 参数保存在“./infer_mode ”单独的若干文件内。 + # 加载 inference program 后, executor可使用 fetch_targets 和 feed_target_names 执行Program,并得到预测结果。 + + + + + + + diff --git a/doc/paddle/api/paddle/io/load_program_state_cn.rst b/doc/paddle/api/paddle/io/load_program_state_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9875b5406c3465e2221b992600d0132b4ed5ee0b --- /dev/null +++ b/doc/paddle/api/paddle/io/load_program_state_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_fluid_io_load_program_state: + +load_program_state +------------------------------- + +.. py:function:: paddle.fluid.io.load_program_state(model_path, var_list=None) + + + + +该接口从本地加载 ``Program`` 的参数和优化器的变量信息到内存中。 + +参数: + - **model_path** (str) - 存储 ``Program`` 的参数和优化器的变量信息的目录名称+文件前缀,格式为 ``目录名称/文件前缀`` 。 + - **var_list** (list, 可选) - 指定加载的变量列表,该参数只在加载旧接口[save_params,save_persistables,save_vars]保存的模型文件时使用。当加载的是多个小文件时,变量列表可以是所有加载文件中变量的子集;当加载的单个大文件时,变量列表必须和加载文件中的变量保持一致。 + +返回:存储参数和优化器信息的dict + +返回类型:dict + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + x = fluid.data( name="x", shape=[10, 10], dtype='float32') + y = fluid.layers.fc( x, 10) + z = fluid.layers.fc( y, 10) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run( fluid.default_startup_program() ) + prog = fluid.default_main_program() + + fluid.save( prog, "./temp") + program_state = fluid.load_program_state( "./temp") + diff --git a/doc/paddle/api/paddle/io/map_readers_cn.rst b/doc/paddle/api/paddle/io/map_readers_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cb50e62839cd523184fef37d4ff9dc625c52fc6b --- /dev/null +++ b/doc/paddle/api/paddle/io/map_readers_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_fluid_io_map_readers: + +map_readers +------------------------------- + +.. py:function:: paddle.fluid.io.map_readers(func, *readers) + + + + +该接口将创建一个数据读取器(Reader),其中 `func` 函数的输出将直接作为新数据读取器的输出, `readers` 的输出将作为函数 `func` 的输入参数。 + +例如:如果输入的 `readers` 为两个输出分别为:2、3 的 `reader` ,输入的 `func` 为乘法函数 `mul(x, y)` ,则得到的新建 `reader` 的输出为:6。 + +参数: + - **func** - 读取数据并返回数据项的函数, `func` 的输出将直接作为新创建的数据读取器的输出。 + + - **readers** - 输入的一个或多个数据读取器(Reader),这些数据读取器的输出数据将作为函数 `func` 的输入参数。数据读取器的定义参见 :ref:`cn_api_paddle_data_reader_reader` 。 + +返回: 新创建的数据读取器(Reader) + +**代码示例**: + +.. code-block:: python + + + import paddle.fluid as fluid + d = {"h": 0, "i": 1} + def func(x): + return d[x] + + def reader(): + yield "h" + yield "i" + + map_reader_result = fluid.io.map_readers(func, reader) + + + + diff --git a/doc/paddle/api/paddle/io/save_inference_model_cn.rst b/doc/paddle/api/paddle/io/save_inference_model_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9a4b7a3d2d1eeec3fd5d6464e69a9e54b5df22ad --- /dev/null +++ b/doc/paddle/api/paddle/io/save_inference_model_cn.rst @@ -0,0 +1,71 @@ +.. _cn_api_fluid_io_save_inference_model: + +save_inference_model +------------------------------- + + +.. py:function:: paddle.fluid.io.save_inference_model(dirname, feeded_var_names, target_vars, executor, main_program=None, model_filename=None, params_filename=None, export_for_deployment=True, program_only=False) + + + + +修剪指定的 ``main_program`` 以构建一个专门用于预测的 ``Inference Program`` ( ``Program`` 含义详见 :ref:`api_guide_Program` )。 所得到的 ``Inference Program`` 及其对应的所有相关参数均被保存到 ``dirname`` 指定的目录中。若只想保存训练后的模型参数,请使用 :ref:`cn_api_fluid_io_save_params` 接口。更多细节请参考 :ref:`api_guide_model_save_reader` 。 + +**注意:dirname用于指定保存预测模型结构和参数的目录。若需要将模型参数保存在指定目录的若干文件中,请设置params_filename的值为None; 若需要将所有模型参数保存在一个单独的二进制文件中,请使用params_filename来指定该二进制文件的名称。** + +参数: + - **dirname** (str) – 指定保存预测模型结构和参数的文件目录。 + - **feeded_var_names** (list[str]) – 字符串列表,包含着Inference Program预测时所需提供数据的所有变量名称(即所有输入变量的名称)。 + - **target_vars** (list[Variable]) – ``Variable`` (详见 :ref:`api_guide_Program` )类型列表,包含着模型的所有输出变量。通过这些输出变量即可得到模型的预测结果。 + - **executor** (Executor) – 用于保存预测模型的 ``executor`` ,详见 :ref:`api_guide_executor` 。 + - **main_program** (Program,可选) – 通过该参数指定的 ``main_program`` 可构建一个专门用于预测的 ``Inference Program`` 。 若为None, 则使用全局默认的 ``_main_program_`` 。默认值为None。 + - **model_filename** (str,可选) – 保存预测模型结构 ``Inference Program`` 的文件名称。若设置为None,则使用 ``__model__`` 作为默认的文件名。 + - **params_filename** (str,可选) – 保存预测模型所有相关参数的文件名称。若设置为None,则模型参数被保存在单独的文件中。 + - **export_for_deployment** (bool,可选) – 若为True,则 ``main_program`` 指定的Program将被修改为只支持直接预测部署的Program。否则,将存储更多的信息,方便优化和再训练。目前只支持设置为True,且默认值为True。 + - **program_only** (bool,可选) – 若为True,则只保存预测模型的网络结构,而不保存预测模型的网络参数。默认值为False。 + + +**返回:** 用于获取模型预测结果的所有输出变量的名称列表。 + +**返回类型:** 列表(list) + +抛出异常: + - ``ValueError`` – 若 ``feed_var_names`` 不是字符串列表,则抛出异常。 + - ``ValueError`` – 若 ``target_vars`` 不是 ``Variable`` 类型列表,则抛出异常。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + path = "./infer_model" + + # 用户定义网络,此处以softmax回归为例 + image = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + feeder = fluid.DataFeeder(feed_list=[image, label], place=fluid.CPUPlace()) + predict = fluid.layers.fc(input=image, size=10, act='softmax') + + loss = fluid.layers.cross_entropy(input=predict, label=label) + avg_loss = fluid.layers.mean(loss) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + # 数据输入及训练过程 + + # 保存预测模型。注意,用于预测的模型网络结构不需要保存标签和损失。 + fluid.io.save_inference_model(dirname=path, feeded_var_names=['img'], target_vars=[predict], executor=exe) + + # 在这个示例中,save_inference_mode接口将根据网络的输入结点(img)和输出结点(predict)修剪默认的主程序(_main_program_)。 + # 修剪得到的Inference Program将被保存在 “./infer_model/__model__”文件中, + # 模型参数将被保存在“./infer_model/”文件夹下以各自名称命名的单独文件中。 + + + + + + + + diff --git a/doc/paddle/api/paddle/io/set_program_state_cn.rst b/doc/paddle/api/paddle/io/set_program_state_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a2510b9d8e8a778d1ba10f0e5aee8f9c233df9a5 --- /dev/null +++ b/doc/paddle/api/paddle/io/set_program_state_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_fluid_io_set_program_state: + +set_program_state +------------------------------- + +.. py:function:: paddle.fluid.io.set_program_state(program, state_dict) + + + + +利用 ``state_dict`` 设置 ``Program`` 的参数和优化器信息。 + +如果参数的 shape 或 dtype 不匹配,则会引发异常。 + +**注意:必须在运行 start_up_program 之后调用此函数。** + +参数: + - **program** (Program) - 需要被设置的 ``Program`` 。 + - **state_dict** (dict) - 存储参数和优化器信息的dict;dict中key的类型为变量的名称,value为np.ndarray类型的数据。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + x = fluid.data( name="x", shape=[10, 10], dtype='float32') + y = fluid.layers.fc( x, 10) + z = fluid.layers.fc( y, 10) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run( fluid.default_startup_program() ) + prog = fluid.default_main_program() + + fluid.save( prog, "./temp") + program_state = fluid.load_program_state( "./temp") + fluid.set_program_state( prog, program_state) + diff --git a/doc/paddle/api/paddle/io/xmap_readers_cn.rst b/doc/paddle/api/paddle/io/xmap_readers_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5f434ecb9525c4f8dd746319d2b4baace88c89ae --- /dev/null +++ b/doc/paddle/api/paddle/io/xmap_readers_cn.rst @@ -0,0 +1,57 @@ +.. _cn_api_fluid_io_xmap_readers: + +xmap_readers +------------------------------- + +.. py:function:: paddle.fluid.io.xmap_readers(mapper, reader, process_num, buffer_size, order=False) + + + + +多线程下,使用自定义映射器 reader 返回样本到输出队列。 + +参数: + - **mapper** (callable): 映射 reader 数据的函数。 + - **reader** (callable): 产生数据的 reader。 + - **process_num** (int): 处理样本的线程数。 + - **buffer_size** (int): 数据缓冲队列大小。 + - **order** (bool): 是否保持原始 reader 数据顺序,默认为 False。 + +返回:一个用户定义的 reader `装饰器 `_ 。 + +返回类型:callable,可调用对象。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import time + + def reader_creator_10(dur): + def reader(): + for i in range(10): + time.sleep(dur) + yield i + return reader + + def mapper(x): + return (x + 1) + + orders = (True, False) + thread_num = (1, 2, 4, 8, 16) + buffer_size = (1, 2, 4, 8, 16) + for order in orders: + for t_num in thread_num: + for size in buffer_size: + user_reader = fluid.io.xmap_readers(mapper, + reader_creator_10(0), + t_num, size, order) + for n in range(3): + result = list() + for i in user_reader(): + result.append(i) + if not order: + result.sort() + for idx, e in enumerate(result): + assert e == mapper(idx) \ No newline at end of file diff --git a/doc/paddle/api/paddle/metric/accuracy_cn.rst b/doc/paddle/api/paddle/metric/accuracy_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0ef4deb37b97307a03e3141edf28bdcfb875f7f5 --- /dev/null +++ b/doc/paddle/api/paddle/metric/accuracy_cn.rst @@ -0,0 +1,53 @@ +.. _cn_api_paddle_metric_accuracy: + +accuracy +------------------------------- + +.. py:function:: paddle.metric.accuracy(input, label, k=1, correct=None, total=None) + +accuracy layer。 参考 https://en.wikipedia.org/wiki/Precision_and_recall + +使用输入和标签计算准确率。 如果正确的标签在topk个预测值里,则计算结果加1。注意:输出正确率的类型由input类型决定,input和lable的类型可以不一样。 + +参数 +::::::::: + + - **input** (Tensor|LoDTensor)-数据类型为float32,float64。输入为网络的预测值。shape为 ``[sample_number, class_dim]`` 。 + - **label** (Tensor|LoDTensor)-数据类型为int64,int32。输入为数据集的标签。shape为 ``[sample_number, 1]`` 。 + - **k** (int64|int32) - 取每个类别中k个预测值用于计算。 + - **correct** (int64|int32)-正确预测值的个数。 + - **total** (int64|int32)-总共的预测值。 + +返回 +::::::::: + + ``Tensor``,计算出来的正确率,数据类型为float32的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + data = fluid.layers.data(name="input", shape=[-1, 32, 32], dtype="float32") + label = fluid.layers.data(name="label", shape=[-1,1], dtype="int") + fc_out = fluid.layers.fc(input=data, size=10) + predict = fluid.layers.softmax(input=fc_out) + result = fluid.layers.accuracy(input=predict, label=label, k=5) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + + exe.run(fluid.default_startup_program()) + x = np.random.rand(3, 32, 32).astype("float32") + y = np.array([[1],[0],[1]]) + output= exe.run(feed={"input": x,"label": y}, + fetch_list=[result[0]]) + print(output) + + """ + Output: + [array([0.6666667], dtype=float32)] + """ diff --git a/doc/paddle/api/paddle/metric/auc_cn.rst b/doc/paddle/api/paddle/metric/auc_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..092380aeb4ea21121cb3ad9563e9b5fb26f9a80b --- /dev/null +++ b/doc/paddle/api/paddle/metric/auc_cn.rst @@ -0,0 +1,71 @@ +.. _cn_api_paddle_metric_auc: + +auc +------------------------------- + +.. py:function:: paddle.metric.auc(input, label, curve='ROC', num_thresholds=200, topk=1, slide_steps=1) + +**Area Under the Curve(AUC) Layer** + +该层根据前向输出和标签计算AUC,在二分类(binary classification)估计中广泛使用。 + +注:如果输入标注包含一种值,只有0或1两种情况,数据类型则强制转换成布尔值。相关定义可以在这里: https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve 找到 + +有两种可能的曲线: + +1. ROC:受试者工作特征曲线 + +2. PR:准确率召回率曲线 + +参数: +::::::::: + + - **input** (Tensor|LoDTensor) - 数据类型为float32,float64。浮点二维变量,值的范围为[0,1]。每一行降序排列。该输入为网络预测值的输入。 + - **label** (Tensor|LoDTensor) - 数据类型为int32,int64。二维整型变量,为训练数据的标签。 + - **curve** (str) - 曲线类型,可以为 ``ROC`` 或 ``PR``,默认 ``ROC``。 + - **num_thresholds** (int) - 将roc曲线离散化时使用的临界值数。默认200。 + - **topk** (int) - 取topk的输出值用于计算。 + - **slide_steps** (int) - 当计算batch auc时,不仅用当前步也用于先前步。slide_steps=1,表示用当前步;slide_steps = 3表示用当前步和前两步;slide_steps = 0,则用所有步。 + +返回: +::::::::: + + ``Tensor``, 代表当前AUC的一个元组, 数据类型为float32或float64的Tensor。 + 返回的元组为auc_out, batch_auc_out, [batch_stat_pos, batch_stat_neg, stat_pos, stat_neg]。 + auc_out为准确率的结果。 + batch_auc_out为batch准确率的结果。 + batch_stat_pos为batch计算时label=1的统计值 + batch_stat_neg为batch计算时label=0的统计值 + stat_pos计算时label=1的统计值 + stat_neg为计算时label=0的统计值 + + +代码示例: +::::::::: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + data = fluid.layers.data(name="input", shape=[-1, 32,32], dtype="float32") + label = fluid.layers.data(name="label", shape=[1], dtype="int") + fc_out = fluid.layers.fc(input=data, size=2) + predict = fluid.layers.softmax(input=fc_out) + result=fluid.layers.auc(input=predict, label=label) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + + exe.run(fluid.default_startup_program()) + x = np.random.rand(3,32,32).astype("float32") + y = np.array([1,0,1]) + output= exe.run(feed={"input": x,"label": y}, + fetch_list=[result[0]]) + print(output) + """ + output: + [array([0.5])] + """ + + diff --git a/doc/paddle/api/paddle/metric/chunk_eval_cn.rst b/doc/paddle/api/paddle/metric/chunk_eval_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c264a071ab03ca4a53279da3906db671e499b1d2 --- /dev/null +++ b/doc/paddle/api/paddle/metric/chunk_eval_cn.rst @@ -0,0 +1,109 @@ +.. _cn_api_fluid_layers_chunk_eval: + +chunk_eval +------------------------------- + +.. py:function:: paddle.fluid.layers.chunk_eval(input, label, chunk_scheme, num_chunk_types, excluded_chunk_types=None, sqe_length=None) + + + + +该OP计算语块识别(chunk detection)的准确率、召回率和F1值,常用于命名实体识别(NER,语块识别的一种)等序列标注任务中。 + +语块识别的基础请参考 `Chunking with Support Vector Machines `_ + +该OP支持IOB,IOE,IOBES和IO(plain)的标注方式。以下是这些标注方式在命名实体识别示例中的使用: + +:: + + + ====== ====== ====== ===== == ============ ===== ===== ===== == ========= + Li Ming works at Agricultural Bank of China in Beijing. + ====== ====== ====== ===== == ============ ===== ===== ===== == ========= + IO I-PER I-PER O O I-ORG I-ORG I-ORG I-ORG O I-LOC + IOB B-PER I-PER O O B-ORG I-ORG I-ORG I-ORG O B-LOC + IOE I-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O E-LOC + IOBES B-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O S-LOC + ====== ====== ====== ===== == ============ ===== ===== ===== == ========= + +例中有PER(人名),ORG(机构名)和LOC(地名)三种语块类型(命名实体类型)。可以看到,一个完整的标签包括标注类型(tag type)和语块类型(chunk type),形式为 ``标注类型-语块类型(tag type-chunk type)`` 。 + +由于该OP在计算实现上使用的是标签id而非标签字符串,为使其能正确运行,标签id要能够转换为相应的标注类型(tag type)和语块类型(chunk type)。该OP使用了下面的方式完成映射转换: + +:: + + + tag_type = label % num_tag_type + chunk_type = label / num_tag_type + +其中num_tag_type是标注方式中的标签类型(tag type)数,各标注方式的tag type取值如下: + +:: + + + Scheme Begin Inside End Single + plain 0 - - - + IOB 0 1 - - + IOE - 0 1 - + IOBES 0 1 2 3 + +据此,在上面的NER例子中,若标注方式是IOB,语块类型包括ORG、PER和LOC三种,则所有标签及其对应id如下: + +:: + + + B-ORG 0 + I-ORG 1 + B-PER 2 + I-PER 3 + B-LOC 4 + I-LOC 5 + O 6 + +从标签id可以正确的得到其对应的标注类型(tag type)和语块类型(chunk type)。 + +参数: + - **input** (Variable) - 表示网络预测的标签,为Tensor或LoD level为1的LoDTensor。Tensor时,其形状为 :math:`[N, M, 1]` ,其中 :math:`N` 表示batch size, :math:`M` 表示序列长度;LoDTensor时,其形状为 :math:`[N, 1]` 或 :math:`[N]` ,其中 :math:`N` 表示所有序列长度之和。数据类型为int64。 + - **label** (Variable) - 表示真实标签(ground-truth)的Tensor或LoDTensor,和 ``input`` 具有相同形状、LoD和数据类型。 + - **chunk_scheme** (str) - 标注方式,必须是IOB,IOE,IOBES或者plain中的一种。 + - **num_chunk_types** (int) - 表示标签中的语块类型数。 + - **excluded_chunk_types** (list,可选) - 表示不计入统计的语块类型,需要为语块类型(int表示)的列表。默认值为空的list。 + - **seq_length** (Variable,可选) - 当输入 ``input`` 和 ``label`` 是Tensor而非LoDTensor时,用来指示输入中每个序列长度的1-D Tensor。数据类型为int64。可以为空,默认为None。 + +返回:Variable的元组。元组中包含准确率、召回率、F1值,以及识别出的语块数目、标签中的语块数目、正确识别的语块数目。每个均是单个元素的Tensor,准确率、召回率、F1值的数据类型为float32,其他的数据类型为int64。 + +返回类型:tuple + +**代码示例**: + +.. code-block:: python: + + import paddle.fluid as fluid + + dict_size = 10000 + label_dict_len = 7 + sequence = fluid.data( + name='id', shape=[None, 1], lod_level=1, dtype='int64') + embedding = fluid.embedding( + input=sequence, size=[dict_size, 512]) + hidden = fluid.layers.fc(input=embedding, size=512) + label = fluid.data( + name='label', shape=[None, 1], lod_level=1, dtype='int64') + crf = fluid.layers.linear_chain_crf( + input=hidden, label=label, param_attr=fluid.ParamAttr(name="crfw")) + crf_decode = fluid.layers.crf_decoding( + input=hidden, param_attr=fluid.ParamAttr(name="crfw")) + fluid.layers.chunk_eval( + input=crf_decode, + label=label, + chunk_scheme="IOB", + num_chunk_types=int((label_dict_len - 1) / 2)) + + + + + + + + + diff --git a/doc/paddle/api/paddle/metric/cos_sim_cn.rst b/doc/paddle/api/paddle/metric/cos_sim_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bee83df05f929eb2808de57958d66198e8531d05 --- /dev/null +++ b/doc/paddle/api/paddle/metric/cos_sim_cn.rst @@ -0,0 +1,47 @@ +.. _cn_api_fluid_layers_cos_sim: + +cos_sim +------------------------------- + +.. py:function:: paddle.fluid.layers.cos_sim(X, Y) + + + + +余弦相似度算子(Cosine Similarity Operator) + +.. math:: + + Out = \frac{X^{T}*Y}{\sqrt{X^{T}*X}*\sqrt{Y^{T}*Y}} + +输入X和Y必须具有相同的shape。但是有一个例外:如果输入Y的第一维为1(不同于输入X的第一维度),在计算它们的余弦相似度之前,Y的第一维度会自动进行广播(broadcast),以便于匹配输入X的shape。 + +输入X和Y可以都携带或者都不携带LoD(Level of Detail)信息。但输出和输入X的LoD信息保持一致。 + +参数: + - **X** (Variable) - cos_sim操作函数的第一个输入,维度为 ``[N_1, N_2, ..., N_k]`` 的多维LoDTensor, 维度不能小于2。数据类型:float32。 + - **Y** (Variable) - cos_sim操作函数的第二个输入,维度为 ``[N_1 或者 1, N_2, ..., N_k]`` 的多维Tensor。数据类型:float32。 + +返回:LoDTensor。输出两个输入的余弦相似度。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + x = fluid.layers.data(name='x', shape=[3, 7], dtype='float32', append_batch_size=False) + y = fluid.layers.data(name='y', shape=[1, 7], dtype='float32', append_batch_size=False) + out = fluid.layers.cos_sim(x, y) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + np_x = np.random.random(size=(3, 7)).astype('float32') + np_y = np.random.random(size=(1, 7)).astype('float32') + output = exe.run(feed={"x": np_x, "y": np_y}, fetch_list = [out]) + print(output) + + + diff --git a/doc/paddle/api/paddle/metric/mean_iou_cn.rst b/doc/paddle/api/paddle/metric/mean_iou_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..254c9d9e5a4200741675d7c926085526bda5862f --- /dev/null +++ b/doc/paddle/api/paddle/metric/mean_iou_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_fluid_layers_mean_iou: + +mean_iou +------------------------------- + +.. py:function:: paddle.fluid.layers.mean_iou(input, label, num_classes) + + + + +该OP计算均值IOU, 均值IOU(Mean Intersection-Over-Union)是语义图像分割中的常用的评价指标之一,它首先计算每个类的IOU,然后计算类之间的平均值。IOU定义如下: + +.. math:: + + IOU = \frac{true\_positive}{true\_positive+false\_positive+false\_negative} + +先得到类别的预测结果,然后从中计算均值-IOU。 + +参数: + - **input** (Variable) - 分割类别预测结果,类型为int32或int64的多维Tensor。 + - **label** (Variable) - 真实label,类型为int32或int64的多维Tensor,它的shape与input相同。 + - **num_classes** (int32) - 类别数目。 + +返回: + - **mean_iou** (Variable) - 类型为float32的1-D Tensor,shape为[1], 均值IOU的计算结果。 + - **out_wrong** (Variable) - 类型为int32的1-D Tensor,shape为[num_classes],代表每个类别中错误的个数。 + - **out_correct** (Variable) - 类型为int32的1-D Tensor,shape为[num_classes],代表每个类别中正确的个数。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + iou_shape = [32, 32] + num_classes = 5 + predict = fluid.layers.data(name='predict', shape=iou_shape, dtype='int64') + label = fluid.layers.data(name='label', shape=iou_shape, dtype='int64') + mean_iou, out_wrong, out_correct = fluid.layers.mean_iou(predict, label, num_classes) + + diff --git a/doc/paddle/api/paddle/metric/metrics/Accuracy_cn.rst b/doc/paddle/api/paddle/metric/metrics/Accuracy_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2b04be2fecb8812acaf7f40d17da0d2d06d4f7c7 --- /dev/null +++ b/doc/paddle/api/paddle/metric/metrics/Accuracy_cn.rst @@ -0,0 +1,106 @@ +.. _cn_api_metric_Accuracy: + +Accuracy +------------------------------- + +.. py:class:: paddle.metric.Accuracy() + +计算准确率(accuracy)。 + +参数: +::::::::: + - **topk** (int|tuple(int)) - 计算准确率的top个数,默认是1。 + - **name** (str, optional) - metric实例的名字,默认是'acc'。 + +**代码示例**: + + # 独立使用示例: + + .. code-block:: python + + import numpy as np + import paddle + + paddle.disable_static() + x = paddle.to_tensor(np.array([ + [0.1, 0.2, 0.3, 0.4], + [0.1, 0.4, 0.3, 0.2], + [0.1, 0.2, 0.4, 0.3], + [0.1, 0.2, 0.3, 0.4]])) + y = paddle.to_tensor(np.array([[0], [1], [2], [3]])) + + m = paddle.metric.Accuracy() + correct = m.compute(x, y) + m.update(correct) + res = m.accumulate() + print(res) # 0.75 + + + # 在Model API中的示例: + + .. code-block:: python + + import paddle + + paddle.disable_static() + train_dataset = paddle.vision.datasets.MNIST(mode='train') + + model = paddle.Model(paddle.vision.LeNet(classifier_activation=None)) + optim = paddle.optimizer.Adam( + learning_rate=0.001, parameters=model.parameters()) + model.prepare( + optim, + loss=paddle.nn.CrossEntropyLoss(), + metrics=paddle.metric.Accuracy()) + + model.fit(train_dataset, batch_size=64) + + +.. py:function:: compute(pred, label, *args) + +计算top-k(topk中的最大值)的索引。 + +参数: +::::::::: + - **pred** (Tensor) - 预测结果为是float64或float32类型的Tensor。 + - **label** (Tensor) - 真实的标签值是一个2D的Tensor,shape为[batch_size, 1], 数据类型为int64。 + +返回: 一个Tensor,shape是[batch_size, topk], 值为0或1,1表示预测正确. + + +.. py:function:: update(pred, label, *args) + +更新metric的状态(正确预测的个数和总个数),以便计算累积的准确率。返回当前step的准确率。 + +参数: +::::::::: + - **correct** (numpy.array | Tensor): 一个值为0或1的Tensor,shape是[batch_size, topk]。 + +返回: 当前step的准确率。 + + +.. py:function:: reset() + +清空状态和计算结果。 + +返回 +::::::::: + 无 + + +.. py:function:: accumulate() + +累积的统计指标,计算和返回准确率。 + +返回 +::::::::: + 准确率,一般是个标量 或 多个标量,和topk的个数一致。 + + +.. py:function:: name() + +返回Metric实例的名字, 参考上述name,默认是'acc'。 + +返回 +::::::::: + 评估的名字,string类型。 diff --git a/doc/paddle/api/paddle/metric/metrics/Auc_cn.rst b/doc/paddle/api/paddle/metric/metrics/Auc_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d14fce51f0a8d0075784aa21bc50ea96a5fb5678 --- /dev/null +++ b/doc/paddle/api/paddle/metric/metrics/Auc_cn.rst @@ -0,0 +1,112 @@ +.. _cn_api_metric_Auc: + +Auc +------------------------------- + +.. py:class:: paddle.metric.Auc() + +**注意**:目前只用Python实现Auc,可能速度略慢 + +该接口计算Auc,在二分类(binary classification)中广泛使用。相关定义参考 https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve 。 + +该接口创建四个局部变量true_positives, true_negatives, false_positives和false_negatives,用于计算Auc。为了离散化AUC曲线,使用临界值的线性间隔来计算召回率和准确率的值。用false positive的召回值高度计算ROC曲线面积,用recall的准确值高度计算PR曲线面积。 + + +参数: +::::::::: + - **curve** (str) - 将要计算的曲线名的模式,包括'ROC'(默认)或者'PR'(Precision-Recall-curve)。 + - **num_thresholds** (int) - 离散化AUC曲线的整数阈值数,默认是4095。 + - **name** (str,可选) – metric实例的名字,默认是'auc'。 + +**代码示例**: + + # 独立使用示例: + + .. code-block:: python + + import numpy as np + import paddle + + m = paddle.metric.Auc() + + n = 8 + class0_preds = np.random.random(size = (n, 1)) + class1_preds = 1 - class0_preds + + preds = np.concatenate((class0_preds, class1_preds), axis=1) + labels = np.random.randint(2, size = (n, 1)) + + m.update(preds=preds, labels=labels) + res = m.accumulate() + + + # 在Model API中的示例: + + .. code-block:: python + + import numpy as np + import paddle + import paddle.nn as nn + + class Data(paddle.io.Dataset): + def __init__(self): + super(Data, self).__init__() + self.n = 1024 + self.x = np.random.randn(self.n, 10).astype('float32') + self.y = np.random.randint(2, size=(self.n, 1)).astype('int64') + + def __getitem__(self, idx): + return self.x[idx], self.y[idx] + + def __len__(self): + return self.n + + paddle.disable_static() + model = paddle.Model(nn.Sequential( + nn.Linear(10, 2), nn.Softmax()) + ) + optim = paddle.optimizer.Adam( + learning_rate=0.001, parameters=model.parameters()) + + def loss(x, y): + return nn.functional.nll_loss(paddle.log(x), y) + + model.prepare( + optim, + loss=loss, + metrics=paddle.metric.Auc()) + data = Data() + model.fit(data, batch_size=16) + + +.. py:function:: update(pred, label, *args) + +更新AUC计算的状态。 + +参数: +::::::::: + - **preds** (numpy.array | Tensor): 一个shape为[batch_size, 2]的Numpy数组或Tensor,preds[i][j]表示第i个样本类别为j的概率。 + - **labels** (numpy.array | Tensor): 一个shape为[batch_size, 1]的Numpy数组或Tensor,labels[i]是0或1,表示第i个样本的类别。 + +返回: 无。 + + +.. py:function:: reset() + +清空状态和计算结果。 + +返回:无 + + +.. py:function:: accumulate() + +累积的统计指标,计算和返回AUC值。 + +返回:AUC值,一个标量。 + + +.. py:function:: name() + +返回Metric实例的名字, 参考上述的name,默认是'auc'。 + +返回: 评估的名字,string类型。 diff --git a/doc/paddle/api/paddle/metric/metrics/Metric_cn.rst b/doc/paddle/api/paddle/metric/metrics/Metric_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a15735ed0a316e44fdf18643b5f6e2a6769f3d9b --- /dev/null +++ b/doc/paddle/api/paddle/metric/metrics/Metric_cn.rst @@ -0,0 +1,119 @@ +.. _cn_api_metric_Metric: + +Metric +------------------------------- + +.. py:class:: paddle.metric.Metric() + + +评估器metric的基类。 + +用法: + + .. code-block:: text + + m = SomeMetric() + for prediction, label in ...: + m.update(prediction, label) + m.accumulate() + +:code:`compute` 接口的进阶用法: + +在 :code:`compute` 中可以使用PaddlePaddle内置的算子进行评估器的状态,而不是通过 +Python/NumPy, 这样可以加速计算。:code:`update` 接口将 :code:`compute` 的输出作为 +输入,内部采用Python/NumPy计算。 + +:code: `Metric` 计算流程如下 (在{}中的表示模型和评估器的计算): + + .. code-block:: text + + inputs & labels || ------------------ + | || + {model} || + | || + outputs & labels || + | || tensor data + {Metric.compute} || + | || + metric states(tensor) || + | || + {fetch as numpy} || ------------------ + | || + metric states(numpy) || numpy data + | || + {Metric.update} \/ ------------------ + +**代码示例** + +以 计算正确率的 :code: `Accuracy` 为例,该评估器的输入为 :code: `pred` 和 +:code: `label`, 可以在 :code:`compute` 中通过 :code: `pred` 和 :code: `label` +先计算正确预测的矩阵。 例如,预测结果包含10类,:code: `pred` 的shape是[N, 10], +:code: `label` 的shape是[N, 1], N是batch size,我们需要计算top-1和top-5的准 +确率,可以在:code: `compute` 中计算每个样本的top-5得分,正确预测的矩阵的shape +是[N, 5]. + + + .. code-block:: python + def compute(pred, label): + # sort prediction and slice the top-5 scores + pred = paddle.argsort(pred, descending=True)[:, :5] + # calculate whether the predictions are correct + correct = pred == label + return paddle.cast(correct, dtype='float32') + +在:code:`compute` 中的计算,使用内置的算子(可以跑在GPU上,是的速度更快)。 +作为:code:`update` 的输入,该接口计算如下: + + .. code-block:: python + def update(self, correct): + accs = [] + for i, k in enumerate(self.topk): + num_corrects = correct[:, :k].sum() + num_samples = len(correct) + accs.append(float(num_corrects) / num_samples) + self.total[i] += num_corrects + self.count[i] += num_samples + return accs + +.. py:function:: reset() + +清空状态和计算结果。 + +返回:无 + + +.. py:function:: update(*args) + + +更新状态。如果定义了:code:`compute` ,:code:`update` 的输入是:code:`compute` 的输出。 +如果没有定义,则输入是网络的输出**output**和标签**label**, +如: :code:`update(output1, output2, ..., label1, label2,...)` . + +也可以参考 :code:`update` 。 + + +.. py:function:: accumulate() + +累积的统计指标,计算和返回评估结果。 + +返回:评估结果,一般是个标量 或 多个标量。 + + +.. py:function:: name() + +返回Metric的名字, 一般通过__init__构造函数传入。 + +返回: 评估的名字,string类型。 + + +.. py:function:: compute() + +此接口可以通过PaddlePaddle内置的算子计算metric的状态,可以加速metric的计算, +为可选的高阶接口。 + +如果这个接口定义了,输入是网络的输出 **outputs** 和 标签 **labels** , 定义如: +:code:`compute(output1, output2, ..., label1, label2,...)` 。 +如果这个接口没有定义, 默认的行为是直接将输入参数返回给 :code: `update` ,则其 +定义如: :code:`update(output1, output2, ..., label1, label2,...)` 。 + +也可以参考 :code:`compute` 。 diff --git a/doc/paddle/api/paddle/metric/metrics/Precision_cn.rst b/doc/paddle/api/paddle/metric/metrics/Precision_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..528087b89718d0a157b9640d972f570b26cf967e --- /dev/null +++ b/doc/paddle/api/paddle/metric/metrics/Precision_cn.rst @@ -0,0 +1,104 @@ +.. _cn_api_metric_Precision: + +Precision +------------------------------- + +.. py:class:: paddle.metric.Precision() + + +精确率Precision(也称为 positive predictive value,正预测值)是被预测为正样例中实际为正的比例。 https://en.wikipedia.org/wiki/Evaluation_of_binary_classifiers 该类管理二分类任务的precision分数。 + +**注意**:这个metric只能用来评估二分类。 + + +参数: +::::::::: + - **name** (str,可选) – metric实例的名字,默认是'precision'。 + + +**代码示例** + + # 独立使用示例: + + .. code-block:: python + + import numpy as np + import paddle + + x = np.array([0.1, 0.5, 0.6, 0.7]) + y = np.array([0, 1, 1, 1]) + + m = paddle.metric.Precision() + m.update(x, y) + res = m.accumulate() + print(res) # 1.0 + + # 在Model API中的示例: + + .. code-block:: python + + import numpy as np + + import paddle + import paddle.nn as nn + + class Data(paddle.io.Dataset): + def __init__(self): + super(Data, self).__init__() + self.n = 1024 + self.x = np.random.randn(self.n, 10).astype('float32') + self.y = np.random.randint(2, size=(self.n, 1)).astype('float32') + + def __getitem__(self, idx): + return self.x[idx], self.y[idx] + + def __len__(self): + return self.n + + paddle.disable_static() + model = paddle.Model(nn.Sequential( + nn.Linear(10, 1), + nn.Sigmoid() + )) + optim = paddle.optimizer.Adam( + learning_rate=0.001, parameters=model.parameters()) + model.prepare( + optim, + loss=nn.BCELoss(), + metrics=paddle.metric.Precision()) + + data = Data() + model.fit(data, batch_size=16) + + +.. py:function:: update(preds, labels, *args) + +更新Precision的状态。 + +参数: +::::::::: + - **preds** (numpy.array | Tensor): 预测输出结果通常是sigmoid函数的输出,是一个数据类型为float64或float32的向量。 + - **labels** (numpy.array | Tensor): 真实标签的shape和:code: `preds` 相同,数据类型为int32或int64。 + +返回: 无。 + + +.. py:function:: reset() + +清空状态和计算结果。 + +返回:无 + + +.. py:function:: accumulate() + +累积的统计指标,计算和返回precision值。 + +返回:precision值,一个标量。 + + +.. py:function:: name() + +返回Metric实例的名字, 参考上述的name,默认是'precision'。 + +返回: 评估的名字,string类型。 diff --git a/doc/paddle/api/paddle/metric/metrics/Recall_cn.rst b/doc/paddle/api/paddle/metric/metrics/Recall_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fc2aac5241b4173751245615d53f4dbd45fb16a9 --- /dev/null +++ b/doc/paddle/api/paddle/metric/metrics/Recall_cn.rst @@ -0,0 +1,104 @@ +.. _cn_api_metric_Recall: + +Recall +------------------------------- + +.. py:class:: paddle.metric.Recall() + + +召回率Recall(也称为敏感度)是指得到的相关实例数占相关实例总数的比例。https://en.wikipedia.org/wiki/Precision_and_recall 该类管理二分类任务的召回率。 + +**注意**:这个metric只能用来评估二分类。 + + +参数 +::::::::: + - **name** (str,可选) – metric实例的名字,默认是'recall'。 + + +**代码示例** + + # 独立使用示例: + + .. code-block:: python + + import numpy as np + import paddle + + x = np.array([0.1, 0.5, 0.6, 0.7]) + y = np.array([1, 0, 1, 1]) + + m = paddle.metric.Recall() + m.update(x, y) + res = m.accumulate() + print(res) # 2.0 / 3.0 + + # 在Model API中的示例: + + .. code-block:: python + + import numpy as np + + import paddle + import paddle.nn as nn + + class Data(paddle.io.Dataset): + def __init__(self): + super(Data, self).__init__() + self.n = 1024 + self.x = np.random.randn(self.n, 10).astype('float32') + self.y = np.random.randint(2, size=(self.n, 1)).astype('float32') + + def __getitem__(self, idx): + return self.x[idx], self.y[idx] + + def __len__(self): + return self.n + + paddle.disable_static() + model = paddle.Model(nn.Sequential( + nn.Linear(10, 1), + nn.Sigmoid() + )) + optim = paddle.optimizer.Adam( + learning_rate=0.001, parameters=model.parameters()) + model.prepare( + optim, + loss=nn.BCELoss(), + metrics=[paddle.metric.Precision(), paddle.metric.Recall()]) + + data = Data() + model.fit(data, batch_size=16) + + +.. py:function:: update(preds, labels, *args) + +更新Recall的状态。 + +参数: +::::::::: + - **preds** (numpy.array | Tensor): 预测输出结果通常是sigmoid函数的输出,是一个数据类型为float64或float32的向量。 + - **labels** (numpy.array | Tensor): 真实标签的shape和:code: `preds` 相同,数据类型为int32或int64。 + +返回: 无。 + + +.. py:function:: reset() + +清空状态和计算结果。 + +返回:无 + + +.. py:function:: accumulate() + +累积的统计指标,计算和返回recall值。 + +返回:precision值,一个标量。 + + +.. py:function:: name() + +返回Metric实例的名字, 参考上述的name,默认是'recall'。 + +返回: 评估的名字,string类型。 diff --git a/doc/paddle/api/paddle/nn/GRUCell_cn.rst b/doc/paddle/api/paddle/nn/GRUCell_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..953c666ca9c83a38275893507de8156229438f11 --- /dev/null +++ b/doc/paddle/api/paddle/nn/GRUCell_cn.rst @@ -0,0 +1,65 @@ +.. _cn_api_fluid_layers_GRUCell: + +GRUCell +------------------------------- + + +.. py:class:: paddle.fluid.layers.GRUCell(hidden_size, param_attr=None, bias_attr=None, gate_activation=None, activation=None, dtype="float32", name="GRUCell") + + + + +门控循环单元(Gated Recurrent Unit)。通过对 :code:`fluid.contrib.layers.rnn_impl.BasicGRUUnit` 包装,来让它可以应用于RNNCell。 + +公式如下: + +.. math:: + u_t & = act_g(W_{ux}x_{t} + W_{uh}h_{t-1} + b_u)\\ + r_t & = act_g(W_{rx}x_{t} + W_{rh}h_{t-1} + b_r)\\ + \tilde{h_t} & = act_c(W_{cx}x_{t} + W_{ch}(r_t \odot h_{t-1}) + b_c)\\ + h_t & = u_t \odot h_{t-1} + (1-u_t) \odot \tilde{h_t} + +更多细节可以参考 `Learning Phrase Representations using RNN Encoder Decoder for Statistical Machine Translation `_ + +参数: + - **hidden_size** (int) - GRUCell中的隐藏层大小。 + - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr`。 + - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **gate_activation** (function,可选) - :math:`act_g` 的激活函数。 默认值为 :code:`fluid.layers.sigmoid`。 + - **activation** (function,可选) - :math:`act_c` 的激活函数。 默认值为 :code:`fluid.layers.tanh` + - **dtype** (string,可选) - 此cell中使用的数据类型。 默认为"float32"。 + - **name** (string,可选) - 用于标识参数和偏差的名称域。 + +返回:GRUCell类的实例对象。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid.layers as layers + cell = layers.GRUCell(hidden_size=256) + + +.. py:method:: call(inputs, states) + +执行GRU的计算。 + +参数: + - **input** (Variable) - 输入,形状为 :math:`[batch\_size,input\_size]` 的tensor,对应于公式中的 :math:`x_t` 。数据类型应为float32。 + - **states** (Variable) - 状态,形状为 :math:`[batch\_size,hidden\_size]` 的tensor。 对应于公式中的 :math:`h_{t-1}` 。数据类型应为float32。 + +返回:一个元组 :code:`(outputs, new_states)` ,其中 :code:`outputs` 和 :code:`new_states` 是同一个tensor,其形状为 :math:`[batch\_size,hidden\_size]`,数据类型和 :code:`state` 的数据类型相同,对应于公式中的 :math:`h_t`。 + +返回类型:tuple + +.. py:method:: state_shape() + +GRUCell的 :code:`state_shape` 是形状 :math:`[hidden\_size]` (batch大小为-1,自动插入到形状中),对应于 :math:`h_{t-1}` 的形状。 + +参数:无。 + +返回:GRUCell的 :code:`state_shape`。 + +返回类型:Variable + + diff --git a/doc/paddle/api/paddle/nn/LSTMCell_cn.rst b/doc/paddle/api/paddle/nn/LSTMCell_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cebebb05235fe4cb9338ef681d4bd51d17dd1f1d --- /dev/null +++ b/doc/paddle/api/paddle/nn/LSTMCell_cn.rst @@ -0,0 +1,66 @@ +.. _cn_api_fluid_layers_LSTMCell: + +LSTMCell +------------------------------- + + + +.. py:class:: paddle.fluid.layers.LSTMCell(hidden_size, param_attr=None, bias_attr=None, gate_activation=None, activation=None, forget_bias=1.0, dtype="float32", name="LSTMCell") + + + + +长短期记忆单元(Long-Short Term Memory)。通过对 :code:`fluid.contrib.layers.rnn_impl.BasicLSTMUnit` 包装,来让它可以应用于RNNCell。 + +公式如下: + +.. math:: + i_{t} &= act_g \left ( W_{x_{i}}x_{t}+W_{h_{i}}h_{t-1}+b_{i} \right ) \\ + f_{t} &= act_g \left ( W_{x_{f}}x_{t}+W_{h_{f}}h_{t-1}+b_{f}+forget\_bias \right ) \\ + c_{t} &= f_{t}c_{t-1}+i_{t}act_h\left ( W_{x_{c}}x_{t} +W_{h_{c}}h_{t-1}+b_{c}\right ) \\ + o_{t} &= act_g\left ( W_{x_{o}}x_{t}+W_{h_{o}}h_{t-1}+b_{o} \right ) \\ + h_{t} &= o_{t}act_h \left ( c_{t} \right ) + +更多细节可以参考 `RECURRENT NEURAL NETWORK REGULARIZATION `_ + +参数: + - **hidden_size** (int) - LSTMCell中的隐藏层大小。 + - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr`。 + - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **gate_activation** (function,可选) - :math:`act_g` 的激活函数。 默认值为 :code:`fluid.layers.sigmoid`。 + - **activation** (function,可选) - :math:`act_c` 的激活函数。 默认值为 :code:`fluid.layers.tanh`。 + - **forget_bias** (float,可选) - 计算遗忘们时使用的遗忘偏置。默认值为 1.0。 + - **dtype** (string,可选) - 此Cell中使用的数据类型。 默认值为 `float32`。 + - **name** (string,可选) - 用于标识参数和偏差的名称域。 + +返回:LSTMCell类的实例对象。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid.layers as layers + cell = layers.LSTMCell(hidden_size=256) + + +.. py:method:: call(inputs, states) + +执行GRU的计算。 + +参数: + - **input** (Variable) - 输入,形状为 :math:`[batch\_size,input\_size]` 的tensor,对应于公式中的 :math:`x_t`。数据类型应为float32。 + - **states** (Variable) - 状态,包含两个tensor的列表,每个tensor形状为 :math:`[batch\_size,hidden\_size]`。 对应于公式中的 :math:`h_{t-1}, c_{t-1}`。数据类型应为float32。 + +返回:一个元组 :code:`(outputs, new_states)`,其中 :code:`outputs` 是形状为 :math:`[batch\_size,hidden\_size]` 的tensor,对应于公式中的 :math:`h_{t}`;:code:`new_states` 是一个列表,包含形状为 :math:`[batch_size,hidden_size]` 的两个tensor变量,它们对应于公式中的 :math:`h_{t}, c_{t}`。这些tensor的数据类型都与 :code:`state` 的数据类型相同。 + +返回类型:tuple + +.. py:method:: state_shape() + +LSTMCell的 :code:`state_shape` 是一个具有两个形状的列表::math:`[[hidden\_size], [hidden\_size]]` (batch大小为-1,自动插入到形状中)。 这两个形状分别对应于公式中的 :math:`h_{t-1}` and :math:`c_{t-1}`。 + +参数:无。 + +返回:LSTMCell的 :code:`state_shape` + +返回类型:list diff --git a/doc/paddle/api/paddle/nn/Linear_cn.rst b/doc/paddle/api/paddle/nn/Linear_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..47aef1f7cf3f86d5e02eda4da802a000df8e1483 --- /dev/null +++ b/doc/paddle/api/paddle/nn/Linear_cn.rst @@ -0,0 +1,57 @@ +.. _cn_api_fluid_dygraph_Linear: + +Linear +------------------------------- + +.. py:class:: paddle.nn.Linear(input_dim, output_dim, param_attr=None, bias_attr=None, act=None, dtype='float32') + + +**线性变换层:** + +.. math:: + + \\Out = Act({XW + b})\\ + +其中,:math:`X` 为输入的 Tensor, :math:`W` 和 :math:`b` 分别为权重和偏置。 + +Linear 层只接受一个 Tensor 的输入。 +Linear 层将输入 Tensor 与权重矩阵 :math:`W` 相乘,然后生成形状为 :math:`[N,*,output_dim]` 的输出张量, +其中 :math:`N` 是批量大小,:math:`*` 表示任意数量的附加尺寸。 +如果 bias_attr 不是 None,则将创建一个 bias 变量并将其添加到输出中。 +最后,如果激活 act 不是 None,则相应激活函数也将应用于输出上。 + +参数: + - **input_dim** (int) – 线性变换层输入单元的数目。 + - **output_dim** (int) – 线性变换层输出单元的数目。 + - **param_attr** (ParamAttr, 可选) – 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr, 可选) – 指定偏置参数属性的对象,若 `bias_attr` 为bool类型,如果设置为False,表示不会为该层添加偏置;如果设置为True,表示使用默认的偏置参数属性。默认值为None,表示使用默认的偏置参数属性。默认的偏置参数属性将偏置参数的初始值设为0。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **act** (str, 可选) – 应用于输出上的激活函数,如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations` ,默认值为None。 + - **dtype** (str, 可选) – 权重的数据类型,可以为float32或float64。默认为float32。 + +返回:无 + +**代码示例** + +.. code-block:: python + + from paddle.fluid.dygraph.base import to_variable + import paddle + import paddle.fluid as fluid + import numpy as np + + data = np.random.uniform( -1, 1, [30, 10, 32] ).astype('float32') + with fluid.dygraph.guard(): + linear = paddle.nn.Linear(32, 64) + data = to_variable(data) + res = linear(data) # [30, 10, 64] + +属性 +:::::::::::: +.. py:attribute:: weight + +本层的可学习参数,类型为 ``Parameter`` + +.. py:attribute:: bias + +本层的可学习偏置,类型为 ``Parameter`` + diff --git a/doc/paddle/api/paddle/nn/functional/activation/elu_cn.rst b/doc/paddle/api/paddle/nn/functional/activation/elu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c0cf22a5123d75696dc8bdbd7fb78e2d33314c0c --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/activation/elu_cn.rst @@ -0,0 +1,44 @@ +.. _cn_api_nn_cn_elu: + +elu +------------------------------- + +.. py:function:: paddle.nn.functional.elu(x, alpha=1.0, name=None) + +elu激活层(ELU Activation Operator) + +根据 `Exponential Linear Units `_ 对输入Tensor中每个元素应用以下计算。 + +.. math:: + + elu(x) = max(0, x) + min(0, \alpha * (e^{x} − 1)) + +其中,:math:`x` 为输入的 Tensor + +参数: +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - alpha (float, 可选) - elu的alpha值,默认值为1.0。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([[-1,6],[1,15.6]])) + out = F.elu(x, alpha=0.2) + # [[-0.12642411 6. ] + # [ 1. 15.6 ]] + + diff --git a/doc/paddle/api/paddle/nn/functional/activation/gelu_cn.rst b/doc/paddle/api/paddle/nn/functional/activation/gelu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..586cd3677d7fddeeddccb47df01b46125dddba08 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/activation/gelu_cn.rst @@ -0,0 +1,48 @@ +.. _cn_api_nn_cn_gelu: + +gelu +------------------------------- + +.. py:function:: paddle.nn.functional.gelu(x, approximate=False, name=None) + +gelu激活层(GELU Activation Operator) + +逐元素计算 gelu激活函数。更多细节请参考 `Gaussian Error Linear Units `_ 。 + +如果使用近似计算: + +.. math:: + gelu(x) = 0.5 * x * (1 + tanh(\sqrt{\frac{2}{\pi}} * (x + 0.044715x^{3}))) + +如果不使用近似计算: + +.. math:: + gelu(x) = 0.5 * x * (1 + erf(\frac{x}{\sqrt{2}})) + +其中,:math:`x` 为输入的 Tensor + +参数: +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - approximate (bool, 可选) - 是否使用近似计算,默认值为 False,表示不使用近似计算。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([[-1, 0.5],[1, 1.5]])) + out1 = F.gelu(x) # [-0.158655 0.345731 0.841345 1.39979] + out2 = F.gelu(x, True) # [-0.158808 0.345714 0.841192 1.39957] + diff --git a/doc/paddle/api/paddle/nn/functional/activation/hardshrink_cn.rst b/doc/paddle/api/paddle/nn/functional/activation/hardshrink_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e63411ffcfb57ded805757e3230ba0372a6ea638 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/activation/hardshrink_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_nn_cn_hard_shrink: + +hardshrink +------------------------------- +.. py:function:: paddle.nn.functional.hardshrink(x, threshold=0.5, name=None) + +hardshrink激活层。计算公式如下: + +.. math:: + + hardshrink(x)= + \left\{ + \begin{aligned} + &x, & & if \ x > threshold \\ + &x, & & if \ x < -threshold \\ + &0, & & if \ others + \end{aligned} + \right. + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - threshold (float, 可选) - hard_shrink激活计算公式中的threshold值。默认值为0.5。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +:::::::::: + + import paddle + import paddle.nn.functional as F + paddle.disable_static() + + x = paddle.to_tensor([-1, 0.3, 2.5]) + out = F.hardshrink(x) # [-1., 0., 2.5] diff --git a/doc/paddle/api/paddle/nn/functional/activation/hardtanh_cn.rst b/doc/paddle/api/paddle/nn/functional/activation/hardtanh_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fa18b323674f9f5f9fac51b41f32c804b2c1852b --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/activation/hardtanh_cn.rst @@ -0,0 +1,45 @@ +.. _cn_api_nn_cn_hardtanh: + +hardtanh +------------------------------- +.. py:function:: paddle.nn.functional.hardtanh(x, min=-1.0, max=1.0, name=None): + +hardtanh激活层(Hardtanh Activation Operator)。计算公式如下: + +.. math:: + + hardtanh(x)= + \left\{ + \begin{aligned} + &max, & & if \ x > max \\ + &min, & & if \ x < min \\ + &x, & & if \ others + \end{aligned} + \right. + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - min (float, 可选) - hardtanh激活计算公式中的min值。默认值为-1。 + - max (float, 可选) - hardtanh激活计算公式中的max值。默认值为1。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-1.5, 0.3, 2.5])) + out = F.hardtanh(x) # [-1., 0.3, 1.] diff --git a/doc/paddle/api/paddle/nn/functional/activation/hsigmoid_cn.rst b/doc/paddle/api/paddle/nn/functional/activation/hsigmoid_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..55e12069a80688f10c473e1af89ff77e5b410581 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/activation/hsigmoid_cn.rst @@ -0,0 +1,59 @@ +.. _cn_api_fluid_layers_hsigmoid: + +hsigmoid +------------------------------- + + +.. py:function:: paddle.fluid.layers.hsigmoid(input, label, num_classes, param_attr=None, bias_attr=None, name=None, path_table=None, path_code=None, is_custom=False, is_sparse=False) + + + + +层次sigmoid(hierarchical sigmoid),该OP通过构建一个分类二叉树来降低计算复杂度,主要用于加速语言模型的训练过程。 + +该OP建立的二叉树中每个叶节点表示一个类别(单词),每个非叶子节点代表一个二类别分类器(sigmoid)。对于每个类别(单词),都有一个从根节点到它的唯一路径,hsigmoid累加这条路径上每个非叶子节点的损失得到总损失。 + +相较于传统softmax的计算复杂度 :math:`O(N)` ,hsigmoid可以将计算复杂度降至 :math:`O(logN)` ,其中 :math:`N` 表示类别总数(字典大小)。 + +若使用默认树结构,请参考 `Hierarchical Probabilistic Neural Network Language Model `_ 。 + +若使用自定义树结构,请将参数 ``is_custom`` 设置为True,并完成以下步骤(以语言模型为例): + +1. 使用自定义词典来建立二叉树,每个叶结点都应该是词典中的单词; + +2. 建立一个dict类型数据结构,用于存储 **单词id -> 该单词叶结点至根节点路径** 的映射,即路径表 ``path_table`` 参数; + +3. 建立一个dict类型数据结构,用于存储 **单词id -> 该单词叶结点至根节点路径的编码** 的映射,即路径编码 ``path_code`` 参数。 编码是指每次二分类的标签,1为真,0为假; + +4. 每个单词都已经有自己的路径和路径编码,当对于同一批输入进行操作时,可以同时传入一批路径和路径编码进行运算。 + +参数: + - **input** (Variable) - 输入Tensor。数据类型为float32或float64,形状为 ``[N, D]`` ,其中 ``N`` 为minibatch的大小,``D`` 为特征大小。 + - **label** (Variable) - 训练数据的标签。数据类型为int64,形状为 ``[N, 1]`` 。 + - **num_classes** (int) - 类别总数(字典大小)必须大于等于2。若使用默认树结构,即当 ``is_custom=False`` 时 ,必须设置该参数。若使用自定义树结构,即当 ``is_custom=True`` 时,它取值应为自定义树结构的非叶节点的个数,用于指定二分类的类别总数。 + - **param_attr** (ParamAttr,可选) - 该OP可学习参数的属性。可以设置为None或者一个ParamAttr的类(ParamAttr中可以指定参数的各种属性)。 该OP将利用 ``param_attr`` 属性来创建ParamAttr实例。如果没有设置 ``param_attr`` 的初始化函数,那么参数将采用Xavier初始化。默认值为None。 + - **bias_attr** (ParamAttr, 可选) - 该OP的偏置参数的属性。可以设置为None或者一个ParamAttr的类(ParamAttr中可以指定参数的各种属性)。 该OP将利用 ``bias_attr`` 属性来创建ParamAttr实例。如果没有设置 ``bias_attr`` 的初始化函数,参数初始化为0.0。默认值为None。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **path_table** (Variable,可选) – 存储每一批样本从类别(单词)到根节点的路径,按照从叶至根方向存储。 数据类型为int64,形状为 ``[N, L]`` ,其中L为路径长度。``path_table`` 和 ``path_code`` 应具有相同的形状, 对于每个样本i,path_table[i]为一个类似np.ndarray的结构,该数组内的每个元素都是其双亲结点权重矩阵的索引。默认值为None。 + - **path_code** (Variable,可选) – 存储每一批样本从类别(单词)到根节点的路径编码,按从叶至根方向存储。数据类型为int64,形状为 ``[N, L]``。默认值为None。 + - **is_custom** (bool,可选) – 是否使用用户自定义二叉树取代默认二叉树结构。如果设置为True,请务必设置 ``path_table`` , ``path_code`` , ``num_classes`` ,否则必须设置num_classes。默认值为False。 + - **is_sparse** (bool,可选) – 是否使用稀疏更新方式。如果设置为True,W的梯度和输入梯度将会变得稀疏。默认值为False。 + +返回: 层次sigmoid计算后的Tensor,形状为[N, 1],数据类型和 ``input`` 一致。 + +返回类型: Variable + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.fill_constant(shape=[4, 3], value=0.9, dtype='float32') + # x = [[0.9, 0.9, 0.9], [0.9, 0.9, 0.9], [0.9, 0.9, 0.9], [0.9, 0.9, 0.9]] + y = fluid.layers.fill_constant( + shape=[4, 1], value=1, dtype='int64') + # y = [[1], [1], [1], [1]] + out = fluid.layers.hsigmoid(input=x, label=y, num_classes=2, param_attr=fluid.initializer.Constant( + value=0.05), bias_attr=fluid.initializer.Constant(value=.0)) + # out = [[0.62792355], [0.62792355], [0.62792355], [0.62792355]] diff --git a/doc/paddle/api/paddle/nn/functional/activation/leaky_relu_cn.rst b/doc/paddle/api/paddle/nn/functional/activation/leaky_relu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a0bb19d6ec7f383294f7943389fdd962a6d94bbb --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/activation/leaky_relu_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_nn_cn_leaky_relu: + +leaky_relu +------------------------------- +.. py:function:: paddle.nn.functional.leaky_relu(x, negative_slope=0.01, name=None) + +leaky_relu激活层。计算公式如下: + +.. math:: + + LeakyReLU(x)= + \left\{ + \begin{aligned} + &x, & & if \ x >= 0 \\ + &negative\_slope * x, & & otherwise \\ + \end{aligned} + \right. \\ + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。 + - negative_slope (float,可选) - :math:`x < 0` 时的斜率。默认值为0.01。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-2, 0, 1], 'float32')) + out = F.leaky_relu(x) # [-0.02, 0., 1.] diff --git a/doc/paddle/api/paddle/nn/functional/activation/log_sigmoid_cn.rst b/doc/paddle/api/paddle/nn/functional/activation/log_sigmoid_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bf9b685eae444306229bd8968d413042a6008fe8 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/activation/log_sigmoid_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_nn_cn_log_sigmoid: + +log_sigmoid +------------------------------- + +.. py:function:: paddle.nn.functional.log_sigmoid(x, name=None) + +log_sigmoid激活层。计算公式如下: + +.. math:: + + log\_sigmoid(x) = \log \frac{1}{1 + e^{-x}} + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + + paddle.disable_static() + + x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0]) + out = F.log_sigmoid(x) # [-0.313262 -0.126928 -0.0485874 -0.0181499] diff --git a/doc/paddle/api/paddle/nn/functional/activation/log_softmax_cn.rst b/doc/paddle/api/paddle/nn/functional/activation/log_softmax_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5509a6a4f18b928ffa4426b7bedfda88926f5017 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/activation/log_softmax_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_nn_cn_log_softmax: + +log_softmax +------------------------------- +.. py:function:: paddle.nn.functional.log_softmax(x, axis=-1, dtype=None, name=None) + +该OP实现了log_softmax层。OP的计算公式如下: + +.. math:: + + Out[i, j] = log(softmax(x)) = log(\frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}) + +参数 +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - axis (int, 可选) - 指定对输入 ``x`` 进行运算的轴。``axis`` 的有效范围是[-D, D),D是输入 ``x`` 的维度, ``axis`` 为负值时与 :math:`axis + D` 等价。默认值为-1。 + - dtype (str|np.dtype|core.VarDesc.VarType, 可选) - 输入Tensor的数据类型。如果指定了 ``dtype`` ,则输入Tensor的数据类型会在计算前转换到 ``dtype`` 。``dtype``可以用来避免数据溢出。如果 ``dtype`` 为None,则输出Tensor的数据类型和 ``x`` 相同。默认值为None。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,形状和 ``x`` 相同,数据类型为 ``dtype`` 或者和 ``x`` 相同。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = np.array([[[-2.0, 3.0, -4.0, 5.0], + [3.0, -4.0, 5.0, -6.0], + [-7.0, -8.0, 8.0, 9.0]], + [[1.0, -2.0, -3.0, 4.0], + [-5.0, 6.0, 7.0, -8.0], + [6.0, 7.0, 8.0, 9.0]]]).astype('float32') + x = paddle.to_tensor(x) + out1 = F.log_softmax(x) + out2 = F.log_softmax(x, dtype='float64') + # out1's data type is float32; out2's data type is float64 + # out1 and out2's value is as follows: + # [[[ -7.1278396 -2.1278396 -9.127839 -0.12783948] + # [ -2.1270514 -9.127051 -0.12705144 -11.127051 ] + # [-16.313261 -17.313261 -1.3132617 -0.31326184]] + # [[ -3.0518122 -6.051812 -7.051812 -0.051812 ] + # [-12.313267 -1.3132664 -0.3132665 -15.313267 ] + # [ -3.4401896 -2.4401896 -1.4401896 -0.44018966]]] diff --git a/doc/paddle/api/paddle/nn/functional/activation/prelu_cn.rst b/doc/paddle/api/paddle/nn/functional/activation/prelu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..caa1681a91964f84679793a7a795a812bc839a2a --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/activation/prelu_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_nn_cn_prelu: + +prelu +------------------------------- +.. py:function:: paddle.nn.functional.prelu(x, weight, name=None): + +prelu激活层(PRelu Activation Operator)。计算公式如下: + +.. math:: + + prelu(x) = max(0, x) + weight * min(0, x) + +其中,:math:`x` 和 `weight` 为输入的 Tensor + +参数 +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - weight (Tensor) - 可训练参数,数据类型同``x`` 一致,形状支持2种:[1] 或者 [in],其中`in`为输入的通道数。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + data = np.array([[[[-2.0, 3.0, -4.0, 5.0], + [ 3.0, -4.0, 5.0, -6.0], + [-7.0, -8.0, 8.0, 9.0]], + [[ 1.0, -2.0, -3.0, 4.0], + [-5.0, 6.0, 7.0, -8.0], + [ 6.0, 7.0, 8.0, 9.0]]]], 'float32') + x = paddle.to_tensor(data) + w = paddle.to_tensor(np.array([0.25]).astype('float32')) + out = F.prelu(x, w) + # [[[[-0.5 , 3. , -1. , 5. ], + # [ 3. , -1. , 5. , -1.5 ], + # [-1.75, -2. , 8. , 9. ]], + # [[ 1. , -0.5 , -0.75, 4. ], + # [-1.25, 6. , 7. , -2. ], + # [ 6. , 7. , 8. , 9. ]]]] \ No newline at end of file diff --git a/doc/paddle/api/paddle/nn/functional/activation/relu6_cn.rst b/doc/paddle/api/paddle/nn/functional/activation/relu6_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b4c98f8fce5e63674f35bda7951018e9eb2d1fa7 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/activation/relu6_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_nn_cn_relu6: + +relu6 +------------------------------- + +.. py:function:: paddle.nn.functional.relu6(x, name=None) + +relu6激活层 + +.. math:: + + relu6(x) = min(max(0,x), 6) + +其中,:math:`x` 为输入的 Tensor + +参数: +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-1, 0.3, 6.5])) + out = F.relu6(x) # [0, 0.3, 6] diff --git a/doc/paddle/api/paddle/nn/functional/activation/relu_cn.rst b/doc/paddle/api/paddle/nn/functional/activation/relu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..447d0bb01514d42aa74f6265d52cd8ed42c40880 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/activation/relu_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_nn_cn_relu: + +relu +------------------------------- + +.. py:function:: paddle.nn.functional.relu(x, name=None) + +relu激活层(Rectified Linear Unit)。计算公式如下: + +.. math:: + + relu(x) = max(0, x) + +其中,:math:`x` 为输入的 Tensor + + +参数 +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-2, 0, 1]).astype('float32')) + out = F.relu(x) # [0., 0., 1.] diff --git a/doc/paddle/api/paddle/nn/functional/activation/selu_cn.rst b/doc/paddle/api/paddle/nn/functional/activation/selu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a52d54bad21e955dbbd74dcd853d54aae32e1a56 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/activation/selu_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_nn_cn_selu: + +selu +------------------------------- + +.. py:function:: paddle.nn.functional.selu(x, scale=1.0507009873554804934193349852946, alpha=1.6732632423543772848170429916717, name=None) + +selu激活层 + +.. math:: + + selu(x)= scale * + \begin{cases} + x, \text{if } x > 0 \\ + alpha * e^{x} - alpha, \text{if } x <= 0 + \end{cases} + +其中,:math:`x` 为输入的 Tensor + +参数: +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - scale (float, 可选) - selu激活计算公式中的scale值,必须大于1.0。默认值为1.0507009873554804934193349852946。 + - alpha (float, 可选) - selu激活计算公式中的alpha值,必须大于等于零。默认值为1.6732632423543772848170429916717。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([[0.0, 1.0],[2.0, 3.0]])) + out = F.selu(x) # [[0, 1.050701],[2.101402, 3.152103]] diff --git a/doc/paddle/api/paddle/nn/functional/activation/softmax_cn.rst b/doc/paddle/api/paddle/nn/functional/activation/softmax_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..74b1605f2b433c57bfae76b4629341450d451677 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/activation/softmax_cn.rst @@ -0,0 +1,123 @@ +.. _cn_api_nn_cn_softmax: + +softmax +------------------------------- +.. py:function:: paddle.nn.functional.softmax(x, axis=-1, dtype=None, name=None) + + +该OP实现了softmax层。OP的计算过程如下: + +步骤1:输入 ``x`` 的 ``axis`` 维会被置换到最后一维; + +步骤2:将输入 ``x`` 在逻辑上变换为二维矩阵。二维矩阵第一维(列长度)是输入除最后一维之外的其他维度值的乘积,第二维(行长度)和输入 ``axis`` 维的长度相同;对于矩阵的每一行,softmax操作对其进行重新缩放,使得该行的每个元素在 \[0,1\] 范围内,并且总和为1; + +步骤3:softmax操作执行完成后,执行步骤1和步骤2的逆运算,将二维矩阵恢复至和输入 ``x`` 相同的维度。 + +上述步骤2中softmax操作计算过程如下: + + - 对于二维矩阵的每一行,计算K维向量(K是输入第 ``axis`` 维的长度)中指定位置的指数值和全部位置指数值的和。 + + - 指定位置指数值与全部位置指数值之和的比值就是softmax操作的输出。 + +对于二维矩阵中的第i行和第j列有: + +.. math:: + + softmax[i, j] = \frac{\exp(x[i, j])}{\sum_j(exp(x[i, j])} + +- 示例1(矩阵一共有三维。axis = -1,表示沿着最后一维(即第三维)做softmax操作) + +.. code-block:: text + + # input + + x.shape = [2, 3, 4] + + x.data = [[[2.0, 3.0, 4.0, 5.0], + [3.0, 4.0, 5.0, 6.0], + [7.0, 8.0, 8.0, 9.0]], + [[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [6.0, 7.0, 8.0, 9.0]]] + + axis = -1 + + # output + + out.shape = [2, 3, 4] + + out.data = [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.07232949, 0.19661193, 0.19661193, 0.53444665]], + [[0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]] + +- 示例2(矩阵一共有三维。axis = 1,表示沿着第二维做softmax操作) + +.. code-block:: text + + # input + + x.shape = [2, 3, 4] + + x.data = [[[2.0, 3.0, 4.0, 5.0], + [3.0, 4.0, 5.0, 6.0], + [7.0, 8.0, 8.0, 9.0]], + [[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [6.0, 7.0, 8.0, 9.0]]] + + axis = 1 + + # output + + out.shape = [2, 3, 4] + + out.data = [[[0.00657326, 0.00657326, 0.01714783, 0.01714783], + [0.01786798, 0.01786798, 0.04661262, 0.04661262], + [0.97555875, 0.97555875, 0.93623955, 0.93623955]], + [[0.00490169, 0.00490169, 0.00490169, 0.00490169], + [0.26762315, 0.26762315, 0.26762315, 0.26762315], + [0.72747516, 0.72747516, 0.72747516, 0.72747516]]] + + +参数 +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - axis (int, 可选) - 指定对输入 ``x`` 进行运算的轴。``axis`` 的有效范围是[-D, D),D是输入 ``x`` 的维度, ``axis`` 为负值时与 :math:`axis + D` 等价。默认值为-1。 + - dtype (str|np.dtype|core.VarDesc.VarType, 可选) - 输入Tensor的数据类型。如果指定了 ``dtype`` ,则输入Tensor的数据类型会在计算前转换到 ``dtype`` 。``dtype``可以用来避免数据溢出。如果 ``dtype`` 为None,则输出Tensor的数据类型和 ``x`` 相同。默认值为None。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,形状和 ``x`` 相同,数据类型为 ``dtype`` 或者和 ``x`` 相同。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = np.array([[[2.0, 3.0, 4.0, 5.0], + [3.0, 4.0, 5.0, 6.0], + [7.0, 8.0, 8.0, 9.0]], + [[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [6.0, 7.0, 8.0, 9.0]]], 'float32') + x = paddle.to_tensor(x) + out1 = F.softmax(x) + out2 = F.softmax(x, dtype='float64') + # out1's data type is float32; out2's data type is float64 + # out1 and out2's value is as follows: + # [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426], + # [0.0320586 , 0.08714432, 0.23688282, 0.64391426], + # [0.07232949, 0.19661193, 0.19661193, 0.53444665]], + # [[0.0320586 , 0.08714432, 0.23688282, 0.64391426], + # [0.0320586 , 0.08714432, 0.23688282, 0.64391426], + # [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]] diff --git a/doc/paddle/api/paddle/nn/functional/activation/softplus_cn.rst b/doc/paddle/api/paddle/nn/functional/activation/softplus_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..dabf852e2ade608c739fa9e0900935132c7b7b2d --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/activation/softplus_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_nn_cn_softplus: + +softplus +------------------------------- + +.. py:function:: paddle.nn.functional.softplus(x, beta=1, threshold=20, name=None) + +softplus激活层 + +.. math:: + + softplus(x) = \frac{1}{beta} * \log(1 + e^{beta * x}) \\ + \text{为了保证数值稳定性, 当}\,beta * x > threshold\,\text{时,函数转变为线性函数x}. + +其中,:math:`x` 为输入的 Tensor + +参数: +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - beta (float, 可选) - Softplus激活计算公式中的beta值。默认值为1。 + - threshold (float, 可选) - Softplus激活计算公式中的threshold值。默认值为20。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) + out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355] diff --git a/doc/paddle/api/paddle/nn/functional/activation/softshrink_cn.rst b/doc/paddle/api/paddle/nn/functional/activation/softshrink_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d630a80d553919f7814247ac5b6049b1f3146f84 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/activation/softshrink_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_nn_cn_softshrink: + +softshrink +------------------------------- + +.. py:function:: paddle.nn.functional.softshrink(x, threshold=0.5, name=None) + +softshrink激活层 + +.. math:: + + softshrink(x)= \begin{cases} + x - threshold, \text{if } x > threshold \\ + x + threshold, \text{if } x < -threshold \\ + 0, \text{otherwise} + \end{cases} + +其中,:math:`x` 为输入的 Tensor + +参数: +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - threshold (float, 可选) - softshrink激活计算公式中的threshold值,必须大于等于零。默认值为0.5。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8])) + out = F.softshrink(x) # [-0.4, 0, 0, 0.3] diff --git a/doc/paddle/api/paddle/nn/functional/activation/softsign_cn.rst b/doc/paddle/api/paddle/nn/functional/activation/softsign_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..24c34bdbd2e89e28cc09e3ce421763afbafda529 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/activation/softsign_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_nn_cn_softsign: + +softsign +------------------------------- + +.. py:function:: paddle.nn.functional.softsign(x, name=None) + +softsign激活层 + +.. math:: + + softsign(x) = \frac{x}{1 + |x|} + +其中,:math:`x` 为输入的 Tensor + +参数: +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) + out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769] diff --git a/doc/paddle/api/paddle/nn/functional/activation/tanhshrink_cn.rst b/doc/paddle/api/paddle/nn/functional/activation/tanhshrink_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2e49b2b5156ab0d27298fbeefdc0802061cb09ca --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/activation/tanhshrink_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_nn_cn_tanhshrink: + +tanhshrink +------------------------------- + +.. py:function:: paddle.nn.functional.tanhshrink(x, name=None) + +tanhshrink激活层 + +.. math:: + + tanhshrink(x) = x - tanh(x) + +其中,:math:`x` 为输入的 Tensor + +参数: +:::::::::: + - x (Tensor) - 输入的 ``Tensor`` ,数据类型为:float32、float64。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,数据类型和形状同 ``x`` 一致。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) + out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739] diff --git a/doc/paddle/api/paddle/nn/functional/common/alpha_dropout_cn.rst b/doc/paddle/api/paddle/nn/functional/common/alpha_dropout_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cad8c3bb7d12fb6a9f9db481103d8e949ca0c688 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/common/alpha_dropout_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_nn_functional_alpha_dropout: + +alpha_dropout +------------------------------- + +.. py:function:: paddle.nn.functional.alpha_dropout(x, p=0.5, training=True, name=None) + +alpha_dropout是一种具有自归一化性质的dropout。均值为0,方差为1的输入,经过alpha_dropout计算之后,输出的均值和方差与输入保持一致。alpha_dropout通常与SELU激活函数组合使用。 + +参数 +::::::::: + - **x** (Tensor): 输入的多维 `Tensor` ,数据类型为:float32、float64。 + - **p** (float): 将输入节点置0的概率,即丢弃概率。默认: 0.5。 + - **training** (bool): 标记是否为训练阶段。 默认: True。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: +经过alpha_dropout之后的结果,与输入x形状相同的 `Tensor` 。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + x = np.array([[-1, 1], [-1, 1]]).astype('float32') + x = paddle.to_tensor(x) + y_train = paddle.nn.functional.alpha_dropout(x, 0.5) + y_test = paddle.nn.functional.alpha_dropout(x, 0.5, training=False) + print(x.numpy()) + print(y_train.numpy()) + # [[-0.10721093, 1.6655989 ], [-0.7791938, -0.7791938]] (randomly) + print(y_test.numpy()) diff --git a/doc/paddle/api/paddle/nn/functional/common/bilinear_cn.rst b/doc/paddle/api/paddle/nn/functional/common/bilinear_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6872bd957cd65779d834d2bda44774f9a04e2c36 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/common/bilinear_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_nn_functional_bilinear: + +bilinear +------------------------------- + + +.. py:function:: paddle.nn.functional.bilinear(x1, x2, weight, bias=None, name=None) + +该层对两个输入执行双线性张量积。 +详细的计算和返回值维度请参见 :ref:`cn_api_nn_Bilinear` + +参数 +::::::::: + - **x1** (int): 第一个输入的 `Tensor` ,数据类型为:float32、float64。 + - **x2** (int): 第二个输入的 `Tensor` ,数据类型为:float32、float64。 + - **weight** (Parameter) :本层的可学习参数。形状是 [out_features, in1_features, in2_features]。 + - **bias** (Parameter, 可选) : 本层的可学习偏置。形状是 [1, out_features]。默认值为None,如果被设置成None,则不会有bias加到output结果上。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为 None。 + +返回 +::::::::: +``Tensor``,一个形为 [batch_size, out_features] 的 2-D 张量。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy + import paddle.nn.functional as F + + paddle.disable_static() + x1 = numpy.random.random((5, 5)).astype('float32') + x2 = numpy.random.random((5, 4)).astype('float32') + w = numpy.random.random((1000, 5, 4)).astype('float32') + b = numpy.random.random((1, 1000)).astype('float32') + + result = F.bilinear(paddle.to_tensor(x1), paddle.to_tensor(x2), paddle.to_tensor(w), paddle.to_tensor(b)) # result shape [5, 1000] + + diff --git a/doc/paddle/api/paddle/nn/functional/common/cosine_similarity_cn.rst b/doc/paddle/api/paddle/nn/functional/common/cosine_similarity_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c2f4109534b5485e5154cc140a1661d94b3945f8 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/common/cosine_similarity_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_paddle_nn_cosine_similarity: + +cosine_similarity +------------------------------- + +.. py:function:: paddle.nn.functional.cosine_similarity(x1, x2, axis=1, eps=1e-8) + +该OP用于计算x1与x2沿axis维度的余弦相似度。 + +参数: + - **x1** (Tensor) - Tensor,数据类型支持float32, float64。 + - **x2** (Tensor) - Tensor,数据类型支持float32, float64。 + - **axis** (int) - 指定计算的维度,会在该维度上计算余弦相似度,默认值为1。 + - **eps** (float) - 很小的值,防止计算时分母为0,默认值为1e-8。 + + +返回: 余弦相似度的计算结果,数据类型与x1, x2相同。 + +返回类型:Tensor + + + +**代码示例:** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + np.random.seed(0) + x1 = np.random.rand(2,3) + x2 = np.random.rand(2,3) + x1 = paddle.to_tensor(x1) + x2 = paddle.to_tensor(x2) + result = paddle.nn.functional.cosine_similarity(x1, x2, axis=0) + print(result.numpy()) + # [0.99806249 0.9817672 0.94987036] + + + diff --git a/doc/paddle/api/paddle/nn/functional/common/dropout2d_cn.rst b/doc/paddle/api/paddle/nn/functional/common/dropout2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..35c61c137a4842bc17f7c43eea4eb14d3a595e81 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/common/dropout2d_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_nn_functional_dropout2d: + +dropout2d +------------------------------- + +.. py:function:: paddle.nn.functional.dropout2d(x, p=0.5, training=True, name=None) + +该算子根据丢弃概率 `p` ,在训练过程中随机将某些通道特征图置0(对一个形状为 `NCHW` 的4维张量,通道特征图指的是其中的形状为 `HW` 的2维特征图)。 + +.. note:: + 该op基于 ``paddle.nn.functional.dropout`` 实现,如您想了解更多,请参见 :ref:`cn_api_nn_functional_dropout` 。 + +参数 +::::::::: + - **x** (Tensor): 形状为[N, C, H, W]或[N, H, W, C]的4D `Tensor` ,数据类型为float32或float64。 + - **p** (float): 将输入通道置0的概率,即丢弃概率。默认: 0.5。 + - **training** (bool): 标记是否为训练阶段。 默认: True。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: +经过dropout2d之后的结果,与输入x形状相同的 `Tensor` 。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + x = np.random.random(size=(2, 3, 4, 5)).astype('float32') + x = paddle.to_tensor(x) + y_train = paddle.nn.functional.dropout2d(x) #train + y_test = paddle.nn.functional.dropout2d(x, training=False) + for i in range(2): + for j in range(3): + print(x.numpy()[i,j,:,:]) + print(y_train.numpy()[i,j,:,:]) + print(y_test.numpy()[i,j,:,:]) diff --git a/doc/paddle/api/paddle/nn/functional/common/dropout3d_cn.rst b/doc/paddle/api/paddle/nn/functional/common/dropout3d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0fc0ab0ee6a5cf37b81319673b9db8c1c131081c --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/common/dropout3d_cn.rst @@ -0,0 +1,39 @@ +.. _cn_api_nn_functional_dropout3d: + +dropout3d +------------------------------- + +.. py:function:: paddle.nn.functional.dropout3d(x, p=0.5, training=True, name=None) + +该算子根据丢弃概率 `p` ,在训练过程中随机将某些通道特征图置0(对一个形状为 `NCDHW` 的5维张量,通道指的是其中的形状为 `DHW` 的3维特征图)。 + +.. note:: + 该op基于 ``paddle.nn.functional.dropout`` 实现,如您想了解更多,请参见 :ref:`cn_api_nn_functional_dropout` 。 + +参数 +::::::::: + - **x** (Tensor): 形状为[N, C, D, H, W]或[N, D, H, W, C]的5D `Tensor` ,数据类型为float32或float64。 + - **p** (float): 将输入通道置0的概率,即丢弃概率。默认: 0.5。 + - **training** (bool): 标记是否为训练阶段。 默认: True。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: +经过dropout3d之后的结果,与输入x形状相同的 `Tensor` 。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + x = np.random.random(size=(2, 3, 4, 5, 6)).astype('float32') + x = paddle.to_tensor(x) + y_train = paddle.nn.functional.dropout3d(x) #train + y_test = paddle.nn.functional.dropout3d(x, training=False) + print(x.numpy()[0,0,:,:,:]) + print(y_train.numpy()[0,0,:,:,:]) + print(y_test.numpy()[0,0,:,:,:]) diff --git a/doc/paddle/api/paddle/nn/functional/common/dropout_cn.rst b/doc/paddle/api/paddle/nn/functional/common/dropout_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a349e66aeb5471ffe5810e2c5fe480d8f736515e --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/common/dropout_cn.rst @@ -0,0 +1,134 @@ +.. _cn_api_nn_functional_dropout: + +dropout +------------------------------- + +.. py:function:: paddle.nn.functional.dropout(x, p=0.5, axis=None, training=True, mode="upscale_in_train”, name=None) + +Dropout是一种正则化手段,该算子根据给定的丢弃概率 `p` ,在训练过程中随机将一些神经元输出设置为0,通过阻止神经元节点间的相关性来减少过拟合。 + +参数 +::::::::: + - **x** (Tensor): 输入的多维 `Tensor` ,数据类型为:float32、float64。 + - **p** (float): 将输入节点置0的概率,即丢弃概率。默认: 0.5。 + - **axis** (int|list): 指定对输入 `Tensor` 进行dropout操作的轴。默认: None。 + - **training** (bool): 标记是否为训练阶段。 默认: True。 + - **mode** (str): 丢弃单元的方式,有两种'upscale_in_train'和'downscale_in_infer',默认: 'upscale_in_train'。计算方法如下: + + 1. upscale_in_train, 在训练时增大输出结果。 + + - train: out = input * mask / ( 1.0 - p ) + - inference: out = input + + 2. downscale_in_infer, 在预测时减小输出结果 + + - train: out = input * mask + - inference: out = input * (1.0 - p) + + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: +经过dropout之后的结果,与输入x形状相同的 `Tensor` 。 + +使用示例1 +::::::::: +axis参数的默认值为None。当 ``axis=None`` 时,dropout的功能为: 对输入张量x中的任意元素,以丢弃概率p随机将一些元素输出置0。这是我们最常见的dropout用法。 + + - 下面以一个示例来解释它的实现逻辑,同时展示其它参数的含义。 + +.. code-block:: text + + 假定x是形状为2*3的2维张量: + [[1 2 3] + [4 5 6]] + 在对x做dropout时,程序会先生成一个和x相同形状的mask张量,mask中每个元素的值为0或1。 + 每个元素的具体值,则是依据丢弃概率从伯努利分布中随机采样得到。 + 比如,我们可能得到下面这样一个2*3的mask: + [[0 1 0] + [1 0 1]] + 将输入x和生成的mask点积,就得到了随机丢弃部分元素之后的结果: + [[0 2 0] + [4 0 6]] + 假定dropout的概率使用默认值,即 ``p=0.5`` ,若mode参数使用默认值,即 ``mode='upscale_in_train'`` , + 则在训练阶段,最终增大后的结果为: + [[0 4 0 ] + [8 0 12]] + 在测试阶段,输出跟输入一致: + [[1 2 3] + [4 5 6]] + 若参数mode设置为'downscale_in_infer',则训练阶段的输出为: + [[0 2 0] + [4 0 6]] + 在测试阶段,缩小后的输出为: + [[0.5 1. 1.5] + [2. 2.5 3. ]] + +使用示例2 +::::::::: +若参数axis不为None,dropout的功能为:以一定的概率从图像特征或语音序列中丢弃掉整个通道。 + + - axis应设置为: ``[0,1,...,ndim(x)-1]`` 的子集(ndim(x)为输入x的维度),例如: + + - 若x的维度为2,参数axis可能的取值有4种: ``None``, ``[0]``, ``[1]``, ``[0,1]`` + - 若x的维度为3,参数axis可能的取值有8种: ``None``, ``[0]``, ``[1]``, ``[2]``, ``[0,1]``, ``[0,2]``, ``[1,2]``, ``[0,1,2]`` + + - 下面以维度为2的输入张量展示axis参数的用法: + +.. code-block:: text + + 假定x是形状为2*3的2维Tensor: + [[1 2 3] + [4 5 6]] + (1) 若 ``axis=[0]`` , 则表示只在第0个维度做dropout。这时生成mask的形状为2*1。 + 例如,我们可能会得到这样的mask: + [[1] + [0]] + 这个2*1的mask在和x做点积的时候,会首先广播成一个2*3的矩阵: + [[1 1 1] + [0 0 0]] + 点积所得的结果为: + [[1 2 3] + [0 0 0]] + 之后依据其它参数的设置,得到最终的输出结果。 + + (2) 若 ``axis=[1]`` ,则表示只在第1个维度做dropout。这时生成的mask形状为1*3。 + 例如,我们可能会得到这样的mask: + [[1 0 1]] + 这个1*3的mask在和x做点积的时候,会首先广播成一个2*3的矩阵: + [[1 0 1] + [1 0 1]] + 点积所得结果为: + [[1 0 3] + [4 0 6]] + (3) 若 ``axis=[0, 1]`` ,则表示在第0维和第1维上做dropout。此时与默认设置 ``axis=None`` 的作用一致。 + +若输入x为4维张量,形状为 `NCHW` , 当设置 ``axis=[0,1]`` 时,则只会在通道 `N` 和 `C` 上做dropout,通道 `H` 和 `W` 的元素是绑定在一起的,即: ``paddle.nn.functional.dropout(x, p, axis=[0,1])`` , 此时对4维张量中的某个2维特征图(形状 `HW` ),或者全部置0,或者全部保留,这便是dropout2d的实现。详情参考 :ref:`cn_api_nn_functional_dropout2d` 。 + +类似的,若输入x为5维张量,形状为 `NCDHW` , 当设置 ``axis=[0,1]`` 时,便可实现dropout3d。详情参考 :ref:`cn_api_nn_functional_dropout3d` 。 + +.. note:: + 关于广播(broadcasting)机制,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + x = np.array([[1,2,3], [4,5,6]]).astype('float32') + x = paddle.to_tensor(x) + y_train = paddle.nn.functional.dropout(x, 0.5) + y_test = paddle.nn.functional.dropout(x, 0.5, training=False) #test + y_0 = paddle.nn.functional.dropout(x, axis=0) + y_1 = paddle.nn.functional.dropout(x, axis=1) + y_01 = paddle.nn.functional.dropout(x, axis=[0,1]) + print(x.numpy()) + print(y_train.numpy()) + print(y_test.numpy()) + print(y_0.numpy()) + print(y_1.numpy()) + print(y_01.numpy()) diff --git a/doc/paddle/api/paddle/nn/functional/common/interpolate_cn.rst b/doc/paddle/api/paddle/nn/functional/common/interpolate_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5e43cf41995dc715f1b2c67dd0cb7f9937faba94 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/common/interpolate_cn.rst @@ -0,0 +1,206 @@ +.. _cn_api_paddle_nn_functioanl_interpolate: + +Inerpolate +------------------------------- + +.. py:function:: paddle.nn.functioanl.interpolate(input, out_shape=None, scale=None, name=None, resample='BILINEAR', actual_shape=None, align_corners=True, align_mode=1, data_format='NCHW') + + + + +**注意:** 参数 ``actual_shape`` 将被弃用,请使用 ``out_shape`` 替代。 + +该OP用于调整一个batch中图片的大小。 + +输入为4-D Tensor时形状为(num_batches, channels, in_h, in_w)或者(num_batches, in_h, in_w, channels),输入为5-D Tensor时形状为(num_batches, channels, in_d, in_h, in_w)或者(num_batches, in_d, in_h, in_w, channels),并且调整大小只适用于深度,高度和宽度对应的维度。 + +支持的插值方法: + + NEAREST:最近邻插值 + + BILINEAR:双线性插值 + + TRALINEAR:三线性插值 + + BICUBIC:双三次插值 + + + + +最近邻插值是在输入张量的高度和宽度上进行最近邻插值。 + +双线性插值是线性插值的扩展,用于在直线2D网格上插值两个变量(例如,该操作中的H方向和W方向)的函数。 关键思想是首先在一个方向上执行线性插值,然后在另一个方向上再次执行线性插值。 + +三线插值是线性插值的一种扩展,是3参数的插值方程(比如op里的D,H,W方向),在三个方向上进行线性插值。 + +双三次插值是在二维网格上对数据点进行插值的三次插值的扩展,它能创造出比双线性和最近临插值更为光滑的图像边缘。 + +Align_corners和align_mode是可选参数,插值的计算方法可以由它们选择。 + +示例: + +:: + + + scale 计算方法: + + if align_corners = True && out_size > 1 : + + scale_factor = (in_size-1.0)/(out_size-1.0) + + else: + + scale_factor = float(in_size/out_size) + + + 不同插值方式的输出纬度计算规则: + + Nearest neighbor interpolation: + + if: + align_corners = False + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = \left \lfloor {H_{in} * scale_{}factor}} \right \rfloor + W_out = \left \lfloor {W_{in} * scale_{}factor}} \right \rfloor + + else: + align_corners = True + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = round(H_{in} * scale_{factor}) + W_out = round(W_{in} * scale_{factor}) + + Bilinear interpolation: + + if: + align_corners = False , align_mode = 0 + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = (H_{in}+0.5) * scale_{factor} - 0.5 + W_out = (W_{in}+0.5) * scale_{factor} - 0.5 + + + else: + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = H_{in} * scale_{factor} + W_out = W_{in} * scale_{factor} + + Bicubic interpolation: + + if: + align_corners = False + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = (H_{in}+0.5) * scale_{factor} - 0.5 + W_out = (W_{in}+0.5) * scale_{factor} - 0.5 + + else: + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = H_{in} * scale_{factor} + W_out = W_{in} * scale_{factor} + + Trilinear interpolation: + + if: + align_corners = False , align_mode = 0 + + input : (N,C,D_in,H_in,W_in) + output: (N,C,D_out,H_out,W_out) where: + + D_out = (D_{in}+0.5) * scale_{factor} - 0.5 + H_out = (H_{in}+0.5) * scale_{factor} - 0.5 + W_out = (W_{in}+0.5) * scale_{factor} - 0.5 + + + else: + + input : (N,C,D_in,H_in,W_in) + output: (N,C,D_out,H_out,W_out) where: + + D_out = D_{in} * scale_{factor} + H_out = H_{in} * scale_{factor} + W_out = W_{in} * scale_{factor} + + +有关最近邻插值的详细信息,请参阅维基百科: +https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation + +有关双线性插值的详细信息,请参阅维基百科: +https://en.wikipedia.org/wiki/Bilinear_interpolation + +有关三线插值的详细信息,请参阅维基百科: +https://en.wikipedia.org/wiki/Trilinear_interpolation + +有关双三次插值的详细信息,请参阅维基百科: +https://en.wikipedia.org/wiki/Bicubic_interpolation + +参数: + - **input** (Variable) - 4-D或5-D Tensor,数据类型为float32、float64或uint8,其数据格式由参数 ``data_format`` 指定。 + - **out_shape** (list|tuple|Variable|None) - 输出Tensor,输入为4D张量时,形状为为(out_h, out_w)的2-D Tensor。输入为5-D Tensor时,形状为(out_d, out_h, out_w)的3-D Tensor。如果 :code:`out_shape` 是列表,每一个元素可以是整数或者形状为[1]的变量。如果 :code:`out_shape` 是变量,则其维度大小为1。默认值为None。 + - **scale** (float|Variable|None)-输入的高度或宽度的乘数因子 。 out_shape和scale至少要设置一个。out_shape的优先级高于scale。默认值为None。 + - **name** (str|None) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。默认值为None。 + - **resample** (str) - 插值方法。支持“双线性”,“三线性”,“临近插值”,"双三次"。默认值为双线性插值。 + - **actual_shape** (Variable) - 可选输入,用于动态指定输出形状。如果指定actual_shape,图像将根据给定的形状调整大小,而不是根据指定形状的 :code:`out_shape` 和 :code:`scale` 进行调整。也就是说, :code:`actual_shape` 具有最高的优先级。如果希望动态指定输出形状,建议使用 :code:`out_shape` ,因为 :code:`actual_shape` 未来将被弃用。在使用actual_shape指定输出形状时,还需要设置out_shape和scale之一,否则在图形构建阶段会出现错误。默认值:None + - **align_corners** (bool)- 一个可选的bool型参数,如果为True,则将输入和输出张量的4个角落像素的中心对齐,并保留角点像素的值。 默认值为True + - **align_mode** (int)- 双线性插值的可选项。 可以是 '0' 代表src_idx = scale *(dst_indx + 0.5)-0.5;如果为'1' ,代表src_idx = scale * dst_index。 + - **data_format** (str,可选)- 指定输入的数据格式,输出的数据格式将与输入保持一致。对于4-D Tensor,支持 NCHW(num_batches, channels, height, width) 或者 NHWC(num_batches, height, width, channels),对于5-D Tensor,支持 NCDHW(num_batches, channels, depth, height, width)或者 NDHWC(num_batches, depth, height, width, channels),默认值:'NCHW'。 + +返回:4-D Tensor,形状为 (num_batches, channels, out_h, out_w) 或 (num_batches, out_h, out_w, channels);或者5-D Tensor,形状为 (num_batches, channels, out_d, out_h, out_w) 或 (num_batches, out_d, out_h, out_w, channels)。 + +返回类型: 变量(variable) + +抛出异常: + - :code:`TypeError` - out_shape应该是一个列表、元组或变量。 + - :code:`TypeError` - actual_shape应该是变量或None。 + - :code:`ValueError` - image_resize的"resample"只能是"BILINEAR"或"TRILINEAR"或"NEAREST"或"BICUBIC"。 + - :code:`ValueError` - out_shape 和 scale 不可同时为 None。 + - :code:`ValueError` - out_shape 的长度必须为2如果输入是4D张量。 + - :code:`ValueError` - out_shape 的长度必须为3如果输入是5D张量。 + - :code:`ValueError` - scale应大于0。 + - :code:`TypeError` - align_corners 应为bool型。 + - :code:`ValueError` - align_mode 只能取 ‘0’ 或 ‘1’。 + - :code:`ValueError` - data_format 只能取 ‘NCHW’、‘NHWC’、‘NCDHW’ 或者 ‘NDHWC’。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + input = fluid.data(name="input", shape=[None,3,6,10]) + output = paddle.nn.functional.interpolate(input=input,out_shape=[12,12]) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + input_data = np.random.rand(2,3,6,10).astype("float32") + output_data = exe.run(fluid.default_main_program(), + feed={"input":input_data}, + fetch_list=[output], + return_numpy=True) + print(output_data[0].shape) + # (2, 3, 12, 12) + + #imperative mode + import paddle.fluid.dygraph as dg + with dg.guard(place) as g: + input = dg.to_variable(input_data) + output = paddle.nn.functional.interpolate(input=input, out_shape=[12,12]) + print(output.shape) + # [2L, 3L, 12L, 12L] + diff --git a/doc/paddle/api/paddle/nn/functional/common/pad_cn.rst b/doc/paddle/api/paddle/nn/functional/common/pad_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d4d7b998b550be9cf85b9c02ab2e05b6414a68fe --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/common/pad_cn.rst @@ -0,0 +1,97 @@ +.. _cn_api_nn_cn_pad: + +pad +------------------------------- + +.. py:function:: paddle.nn.functional.pad(x, pad, mode="constant", value=0.0, data_format="NCHW", name=None) + +该OP依照 pad 和 mode 属性对input进行 ``pad`` 。 + +参数: + - **x** (Tensor) - Tensor,format可以为 `'NCL'`, `'NLC'`, `'NCHW'`, `'NHWC'`, `'NCDHW'` + 或 `'NDHWC'`,默认值为`'NCHW'`,数据类型支持float16, float32, float64, int32, int64。 + - **pad** (Tensor | List[int32]) - 填充大小。当输入维度为3时,pad的格式为[pad_left, pad_right]; + 当输入维度为4时,pad的格式为[pad_left, pad_right, pad_top, pad_bottom]; + 当输入维度为5时,pad的格式为[pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back]。 + - **mode** (str) - padding的四种模式,分别为 `'constant'`, `'reflect'`, `'replicate'` 和`'circular'`。 + `'constant'` 表示填充常数 `value`;`'reflect'` 表示填充以input边界值为轴的映射;`'replicate'` 表示 + 填充input边界值;`'circular'`为循环填充input。具体结果可见以下示例。默认值为 `'constant'` 。 + - **value** (float32) - 以 `'constant'` 模式填充区域时填充的值。默认值为0.0。 + - **data_format** (str) - 指定input的format,可为 `'NCL'`, `'NLC'`, `'NCHW'`, `'NHWC'`, `'NCDHW'` + 或 `'NDHWC'`,默认值为`'NCHW'`。 + - **name** (str, 可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,缺省值为None。 +返回: 对input进行``pad`` 的结果,数据类型和input相同。 + +返回类型:Tensor + +**示例**: + +.. code-block:: text + + x = [[[[[1., 2., 3.], + [4., 5., 6.]]]]] + + Case 0: + pad = [2, 2, 1, 1, 0, 0], + mode = 'constant' + pad_value = 0 + Out = [[[[[0. 0. 0. 0. 0. 0. 0.] + [0. 0. 1. 2. 3. 0. 0.] + [0. 0. 4. 5. 6. 0. 0.] + [0. 0. 0. 0. 0. 0. 0.]]]]] + + Case 1: + pad = [2, 2, 1, 1, 0, 0], + mode = 'reflect' + Out = [[[[[6. 5. 4. 5. 6. 5. 4.] + [3. 2. 1. 2. 3. 2. 1.] + [6. 5. 4. 5. 6. 5. 4.] + [3. 2. 1. 2. 3. 2. 1.]]]]] + + Case 2: + pad = [2, 2, 1, 1, 0, 0], + mode = 'replicate' + Out = [[[[[1. 1. 1. 2. 3. 3. 3.] + [1. 1. 1. 2. 3. 3. 3.] + [4. 4. 4. 5. 6. 6. 6.] + [4. 4. 4. 5. 6. 6. 6.]]]]] + + Case 3: + pad = [2, 2, 1, 1, 0, 0], + mode = 'circular' + Out = [[[[[5. 6. 4. 5. 6. 4. 5.] + [2. 3. 1. 2. 3. 1. 2.] + [5. 6. 4. 5. 6. 4. 5.] + [2. 3. 1. 2. 3. 1. 2.]]]]] + +**代码示例:** + +.. code-block:: python + + import numpy as np + import paddle + import paddle.nn.functional as F + + paddle.disable_static() + + # example 1 + x_shape = (1, 1, 3) + x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape) + 1 + tensor_x = paddle.to_tensor(x) + y = F.pad(tensor_x, [2, 3], value=1, mode='constant') + print(y.numpy()) + # [[[1. 1. 1. 2. 3. 1. 1. 1.]]] + + # example 2 + x_shape = (1, 1, 2, 3) + x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape) + 1 + tensor_x = paddle.to_tensor(x) + y = F.pad(tensor_x, [1, 2, 1, 1], value=1, mode='circular') + print(y.numpy()) + # [[[[6. 4. 5. 6. 4. 5.] + # [3. 1. 2. 3. 1. 2.] + # [6. 4. 5. 6. 4. 5.] + # [3. 1. 2. 3. 1. 2.]]]] + + + diff --git a/doc/paddle/api/paddle/nn/functional/conv/conv2d_cn.rst b/doc/paddle/api/paddle/nn/functional/conv/conv2d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..aa03a61971f36611643c570e689bc254cb86e126 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/conv/conv2d_cn.rst @@ -0,0 +1,106 @@ +conv2d +------------------------------- + +.. py:function:: paddle.nn.functional.conv2d(x, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, data_format="NCHW", name=None) + +该OP是二维卷积层(convolution2d layer),根据输入、卷积核、步长(stride)、填充(padding)、空洞大小(dilations)一组参数计算输出特征层大小。输入和输出是NCHW或NHWC格式,其中N是批尺寸,C是通道数,H是特征高度,W是特征宽度。卷积核是MCHW格式,M是输出图像通道数,C是输入图像通道数,H是卷积核高度,W是卷积核宽度。如果组数(groups)大于1,C等于输入图像通道数除以组数的结果。详情请参考UFLDL's : `卷积 `_ 。如果bias_attr不为False,卷积计算会添加偏置项。如果指定了激活函数类型,相应的激活函数会作用在最终结果上。 + +对每个输入X,有等式: + +.. math:: + + Out = \sigma \left ( W * X + b \right ) + +其中: + - :math:`X` :输入值,NCHW或NHWC格式的4-D Tensor + - :math:`W` :卷积核值,MCHW格式的4-D Tensor + - :math:`*` :卷积操作 + - :math:`b` :偏置值,2-D Tensor,形状为 ``[M,1]`` + - :math:`\sigma` :激活函数 + - :math:`Out` :输出值,NCHW或NHWC格式的4-D Tensor, 和 ``X`` 的形状可能不同 + +**示例** + +- 输入: + + 输入形状::math:`(N,C_{in},H_{in},W_{in})` + + 卷积核形状: :math:`(C_{out},C_{in},H_{f},W_{f})` + +- 输出: + + 输出形状: :math:`(N,C_{out},H_{out},W_{out})` + +其中 + +.. math:: + + H_{out} &= \frac{\left ( H_{in} + padding\_height\_top + padding\_height\_bottom-\left ( dilation[0]*\left ( H_{f}-1 \right )+1 \right ) \right )}{stride[0]}+1 + + W_{out} &= \frac{\left ( W_{in} + padding\_width\_left + padding\_width\_right -\left ( dilation[1]*\left ( W_{f}-1 \right )+1 \right ) \right )}{stride[1]}+1 + +如果 ``padding`` = "SAME": + +.. math:: + H_{out} = \frac{(H_{in} + stride[0] - 1)}{stride[0]} + +.. math:: + W_{out} = \frac{(W_{in} + stride[1] - 1)}{stride[1]} + +如果 ``padding`` = "VALID": + +.. math:: + H_{out} = \frac{\left ( H_{in} -\left ( dilation[0]*\left ( H_{f}-1 \right )+1 \right ) \right )}{stride[0]}+1 + + W_{out} = \frac{\left ( W_{in} -\left ( dilation[1]*\left ( W_{f}-1 \right )+1 \right ) \right )}{stride[1]}+1 + + +参数: + - **x** (Tensor) - 输入是形状为 :math:`[N, C, H, W]` 或 :math:`[N, H, W, C]` 的4-D Tensor,N是批尺寸,C是通道数,H是特征高度,W是特征宽度,数据类型为float16, float32或float64。 + - **weight** (Tensor)) - 形状为 :math:`[M, C/g, kH, kW]` 的卷积核。 M是输出通道数, g是分组的个数,kH是卷积核的高度,kW是卷积核的宽度。 + - **bias** (int|list|tuple) - 偏置项,形状为: :math:`[M,]` 。 + - **stride** (int|list|tuple,可选) - 步长大小。卷积核和输入进行卷积计算时滑动的步长。如果它是一个列表或元组,则必须包含两个整型数:(stride_height,stride_width)。若为一个整数,stride_height = stride_width = stride。默认值:1。 + - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含4个二元组:当 ``data_format`` 为"NCHW"时为 [[0,0], [0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NHWC"时为[[0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含4个整数值:[padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含2个整数值:[padding_height, padding_width],此时padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_height = padding_width = padding。默认值:0。 + - **dilation** (int|list|tuple,可选) - 空洞大小。空洞卷积时会使用该参数,卷积核对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息。如果空洞大小为列表或元组,则必须包含两个整型数:(dilation_height,dilation_width)。若为一个整数,dilation_height = dilation_width = dilation。默认值:1。 + - **groups** (int,可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和卷积核分别根据通道数量平均分为n组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第n组卷积核和第n组输入进行卷积计算。默认值:1。 + - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **name** (str,可选) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值:None。 + +返回:4-D Tensor,数据类型与 ``x`` 一致。返回卷积的结果。 + +返回类型:Tensor。 + +抛出异常: + - ``ValueError`` - 如果 ``data_format`` 既不是"NCHW"也不是"NHWC"。 + - ``ValueError`` - 如果 ``input`` 的通道数未被明确定义。 + - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``padding`` 含有4个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ShapeError`` - 如果输入不是4-D Tensor。 + - ``ShapeError`` - 如果输入和卷积核的维度大小不相同。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + - ``ShapeError`` - 如果输出的通道数不能被 ``groups`` 整除。 + + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + x = np.random.randn(2, 3, 8, 8).astype(np.float32) + w = np.random.randn(6, 3, 3, 3).astype(np.float32) + + paddle.disable_static() + + x_var = paddle.to_tensor(x) + w_var = paddle.to_tensor(w) + y_var = F.conv2d(x_var, w_var) + y_np = y_var.numpy() + + print(y_np.shape) + + # (2, 6, 6, 6) \ No newline at end of file diff --git a/doc/paddle/api/paddle/nn/functional/conv/conv3d_cn.rst b/doc/paddle/api/paddle/nn/functional/conv/conv3d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..a96871b1ca9f0540cdadae0e1ba1ce90b5178aac --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/conv/conv3d_cn.rst @@ -0,0 +1,83 @@ +conv3d +------------------------------- + +.. py:function:: paddle.nn.functional.conv3d(x, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, data_format="NCHW", name=None) + +该OP是三维卷积层(convolution3D layer),根据输入、卷积核、步长(stride)、填充(padding)、空洞大小(dilations)一组参数计算得到输出特征层大小。输入和输出是NCDHW或NDWHC格式,其中N是批尺寸,C是通道数,D是特征层深度,H是特征层高度,W是特征层宽度。三维卷积(Convlution3D)和二维卷积(Convlution2D)相似,但多了一维深度信息(depth)。如果bias_attr不为False,卷积计算会添加偏置项。 + +对每个输入X,有等式: + +.. math:: + + Out = \sigma \left ( W * X + b \right ) + +其中: + - :math:`X` :输入值,NCDHW或NDHWC格式的5-D Tensor + - :math:`W` :卷积核值,MCDHW格式的5-D Tensor + - :math:`*` :卷积操作 + - :math:`b` :偏置值,2-D Tensor,形为 ``[M,1]`` + - :math:`\sigma` :激活函数 + - :math:`Out` :输出值, NCDHW或NDHWC格式的5-D Tensor,和 ``X`` 的形状可能不同 + +**示例** + +- 输入: + + 输入形状: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` + + 卷积核形状: :math:`(C_{out}, C_{in}, D_f, H_f, W_f)` + +- 输出: + + 输出形状: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` + +参数: + - **x** (Tensor) - 输入是形状为 :math:`[N, C, H, W]` 或 :math:`[N, H, W, C]` 的4-D Tensor,N是批尺寸,C是通道数,H是特征高度,W是特征宽度,数据类型为float16, float32或float64。 + - **weight** (Tensor) - 形状为 :math:`[M, C/g, kH, kW]` 的卷积核(卷积核)。 M是输出通道数, g是分组的个数,kH是卷积核的高度,kW是卷积核的宽度。 + - **bias** (int|list|tuple) - 偏置项,形状为: :math:`[M,]` 。 + - **stride** (int|list|tuple,可选) - 步长大小。卷积核和输入进行卷积计算时滑动的步长。如果它是一个列表或元组,则必须包含两个整型数:(stride_height,stride_width)。若为一个整数,stride_height = stride_width = stride。默认值:1。 + - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含5个二元组:当 ``data_format`` 为"NCDHW"时为 [[0,0], [0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NDHWC"时为[[0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含6个整数值:[padding_depth_front, padding_depth_back, padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含3个整数值:[padding_depth, padding_height, padding_width],此时 padding_depth_front = padding_depth_back = padding_depth, padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_depth = padding_height = padding_width = padding。默认值:0。 + - **dilation** (int|list|tuple,可选) - 空洞大小。空洞卷积时会使用该参数,卷积核对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息。如果空洞大小为列表或元组,则必须包含两个整型数:(dilation_height,dilation_width)。若为一个整数,dilation_height = dilation_width = dilation。默认值:1。 + - **groups** (int,可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和卷积核分别根据通道数量平均分为n组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第n组卷积核和第n组输入进行卷积计算。默认值:1。 + - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **name** (str,可选) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值:None。 + +返回:5-D Tensor,数据类型与 ``input`` 一致。返回卷积计算的结果。 + +返回类型:Tensor。 + +抛出异常: + - ``ValueError`` - 如果 ``use_cudnn`` 不是bool值。 + - ``ValueError`` - 如果 ``data_format`` 既不是"NCDHW"也不是"NDHWC"。 + - ``ValueError`` - 如果 ``input`` 的通道数未被明确定义。 + - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``padding`` 含有5个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ShapeError`` - 如果输入不是5-D Tensor。 + - ``ShapeError`` - 如果输入和卷积核的维度大小不相同。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + - ``ShapeError`` - 如果输出的通道数不能被 ``groups`` 整除。 + + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.nn.functional as F + + x = np.random.randn(2, 3, 8, 8, 8).astype(np.float32) + w = np.random.randn(6, 3, 3, 3, 3).astype(np.float32) + + paddle.disable_static() + + x_var = paddle.to_tensor(x) + w_var = paddle.to_tensor(w) + y_var = F.conv3d(x_var, w_var) + + y_np = y_var.numpy() + print(y_np.shape) + + # (2, 6, 6, 6, 6) \ No newline at end of file diff --git a/doc/paddle/api/paddle/nn/functional/conv/conv_transpose2d_cn.rst b/doc/paddle/api/paddle/nn/functional/conv/conv_transpose2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b2b5b68eb5b2fd410d051b05af1fe5b124aec74b --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/conv/conv_transpose2d_cn.rst @@ -0,0 +1,120 @@ + +conv_transpose2d +------------------------------- + + +.. py:function:: paddle.nn.functional.conv_transpose2d(x, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1, data_format='NCHW', output_size=None, name=None) + + + +二维转置卷积层(Convlution2D transpose layer) + +该层根据输入(input)、卷积核(kernel)和空洞大小(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCHW或NHWC格式,其中N为批尺寸,C为通道数(channel),H为特征层高度,W为特征层宽度。卷积核是MCHW格式,M是输出图像通道数,C是输入图像通道数,H是卷积核高度,W是卷积核宽度。如果组数大于1,C等于输入图像通道数除以组数的结果。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解转置卷积层细节,请参考下面的说明和 参考文献_ 。如果参数bias_attr不为False, 转置卷积计算会添加偏置项。如果act不为None,则转置卷积计算之后添加相应的激活函数。 + +.. _参考文献: https://arxiv.org/pdf/1603.07285.pdf + + +输入 :math:`X` 和输出 :math:`Out` 函数关系如下: + +.. math:: + Out=\sigma (W*X+b)\\ + +其中: + - :math:`X` : 输入,具有NCHW或NHWC格式的4-D Tensor + - :math:`W` : 卷积核,具有NCHW格式的4-D Tensor + - :math:`*` : 卷积计算(注意:转置卷积本质上的计算还是卷积) + - :math:`b` : 偏置(bias),2-D Tensor,形状为 ``[M,1]`` + - :math:`σ` : 激活函数 + - :math:`Out` : 输出值,NCHW或NHWC格式的4-D Tensor, 和 ``X`` 的形状可能不同 + +**示例** + +- 输入: + + 输入Tensor的形状: :math:`(N,C_{in}, H_{in}, W_{in})` + + 卷积核的形状 : :math:`(C_{in}, C_{out}, H_f, W_f)` + +- 输出: + + 输出Tensor的形状 : :math:`(N,C_{out}, H_{out}, W_{out})` + +其中 + +.. math:: + + & H'_{out} = (H_{in}-1)*strides[0] - pad\_height\_top - pad\_height\_bottom + dilations[0]*(H_f-1)+1\\ + & W'_{out} = (W_{in}-1)*strides[1]- pad\_width\_left - pad\_width\_right + dilations[1]*(W_f-1)+1 \\ + & H_{out}\in[H'_{out},H'_{out} + strides[0])\\ + & W_{out}\in[W'_{out},W'_{out} + strides[1])\\ + +如果 ``padding`` = "SAME": + +.. math:: + & H'_{out} = \frac{(H_{in} + stride[0] - 1)}{stride[0]}\\ + & W'_{out} = \frac{(W_{in} + stride[1] - 1)}{stride[1]}\\ + +如果 ``padding`` = "VALID": + +.. math:: + & H'_{out} = (H_{in}-1)*strides[0] + dilations[0]*(H_f-1)+1\\ + & W'_{out} = (W_{in}-1)*strides[1] + dilations[1]*(W_f-1)+1 \\ + +注意: + +如果output_size为None,则 :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}` ;否则,指定的output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[0]` 之间(不包含 :math:`H^\prime_{out} + strides[0]` ), 并且指定的output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[1]` 之间(不包含 :math:`W^\prime_{out} + strides[1]` )。 + +由于转置卷积可以当成是卷积的反向计算,而根据卷积的输入输出计算公式来说,不同大小的输入特征层可能对应着相同大小的输出特征层,所以对应到转置卷积来说,固定大小的输入特征层对应的输出特征层大小并不唯一。 + +如果指定了output_size, ``conv2d_transpose`` 可以自动计算卷积核的大小。 + +参数: + - **x** (Tensor) - 输入是形状为 :math:`[N, C, H, W]` 或 :math:`[N, H, W, C]` 的4-D Tensor,N是批尺寸,C是通道数,H是特征高度,W是特征宽度,数据类型为float16, float32或float64。 + - **weight** (Tensor) - 形状为 :math:`[C, M/g, kH, kW]` 的卷积核(卷积核)。 M是输出通道数, g是分组的个数,kH是卷积核的高度,kW是卷积核的宽度。 + - **bias** (int|list|tuple) - 偏置项,形状为: :math:`[M,]` 。 + - **stride** (int|list|tuple,可选) - 步长大小。如果 ``stride`` 为元组,则必须包含两个整型数,分别表示垂直和水平滑动步长。否则,表示垂直和水平滑动步长均为 ``stride`` 。默认值:1。 + - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含4个二元组:当 ``data_format`` 为"NCHW"时为 [[0,0], [0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NHWC"时为[[0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含4个整数值:[padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含2个整数值:[padding_height, padding_width],此时padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_height = padding_width = padding。默认值:0。 + - **output_padding** (int|list|tuple, optional): 输出形状上一侧额外添加的大小. 默认值: 0. + - **dilation** (int|list|tuple,可选) - 空洞大小。空洞卷积时会使用该参数,卷积核对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息。如果空洞大小为列表或元组,则必须包含两个整型数:(dilation_height,dilation_width)。若为一个整数,dilation_height = dilation_width = dilation。默认值:1。 + - **groups** (int,可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和卷积核分别根据通道数量平均分为n组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第n组卷积核和第n组输入进行卷积计算。默认值:1。 + - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **name** (str,可选) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值:None。 + + +返回:4-D Tensor,数据类型与 ``input`` 一致。如果未指定激活层,则返回转置卷积计算的结果,如果指定激活层,则返回转置卷积和激活计算之后的最终结果。 + +返回类型:Variable + +抛出异常: + - ``ValueError`` : 如果输入的shape、kernel_size、stride、padding和groups不匹配,抛出ValueError + - ``ValueError`` - 如果 ``data_format`` 既不是"NCHW"也不是"NHWC"。 + - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``padding`` 含有4个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ValueError`` - 如果 ``output_size`` 和 ``filter_size`` 同时为None。 + - ``ShapeError`` - 如果输入不是4-D Tensor。 + - ``ShapeError`` - 如果输入和卷积核的维度大小不相同。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + import paddle.nn.functional as F + + x = np.random.randn(2, 3, 8, 8).astype(np.float32) + w = np.random.randn(3, 6, 3, 3).astype(np.float32) + + paddle.disable_static() + x_var = paddle.to_tensor(x) + w_var = paddle.to_tensor(w) + y_var = F.conv_transpose2d(x_var, w_var) + y_np = y_var.numpy() + print(y_np.shape) + + # (2, 6, 10, 10) + + diff --git a/doc/paddle/api/paddle/nn/functional/conv/conv_transpose3d_cn.rst b/doc/paddle/api/paddle/nn/functional/conv/conv_transpose3d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4c78cc746ea70c3e2cfebb2c30da4c4c19b84899 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/conv/conv_transpose3d_cn.rst @@ -0,0 +1,124 @@ +conv_transpose3d +------------------------------- + + +.. py:function:: paddle.nn.functional.conv_transpose3d(x, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1, data_format='NCHW', output_size=None, name=None) + + + + +三维转置卷积层(Convlution3d transpose layer) + +该层根据输入(input)、卷积核(kernel)和卷积核空洞大小(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCDHW或者NDHWC格式。其中N为批尺寸,C为通道数(channel),D为特征深度,H为特征层高度,W为特征层宽度。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解卷积转置层细节,请参考下面的说明和 参考文献_ 。如果参数bias_attr不为False, 转置卷积计算会添加偏置项。 + +.. _参考文献: http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf + +输入 :math:`X` 和输出 :math:`Out` 函数关系如下: + +.. math:: + \\Out=\sigma (W*X+b)\\ + +其中: + - :math:`X` : 输入,具有NCDHW或NDHWC格式的5-D Tensor + - :math:`W` : 卷积核,具有NCDHW格式的5-D Tensor + - :math:`*` : 卷积操作(注意:转置卷积本质上的计算还是卷积) + - :math:`b` : 偏置(bias),2-D Tensor,形状为 ``[M,1]`` + - :math:`σ` : 激活函数 + - :math:`Out` : 输出值,NCDHW或NDHWC格式的5-D Tensor,和 ``X`` 的形状可能不同 + +**示例** + +输入: + + 输入的shape::math:`(N,C_{in}, D_{in}, H_{in}, W_{in})` + + 卷积核的shape::math:`(C_{in}, C_{out}, D_f, H_f, W_f)` + +输出: + + 输出的shape::math:`(N,C_{out}, D_{out}, H_{out}, W_{out})` + + +其中: + +.. math:: + + & D'_{out}=(D_{in}-1)*strides[0] - pad\_depth\_front - pad\_depth\_back + dilations[0]*(D_f-1)+1\\ + & H'_{out}=(H_{in}-1)*strides[1] - pad\_height\_top - pad\_height\_bottom + dilations[1]*(H_f-1)+1\\ + & W'_{out}=(W_{in}-1)*strides[2] - pad\_width\_left - pad\_width\_right + dilations[2]*(W_f-1)+1\\ + & D_{out}\in[D'_{out},D'_{out} + strides[0])\\ + & H_{out}\in[H'_{out},H'_{out} + strides[1])\\ + & W_{out}\in[W'_{out},W'_{out} + strides[2])\\ + +如果 ``padding`` = "SAME": + +.. math:: + D'_{out} = \frac{(D_{in} + stride[0] - 1)}{stride[0]}\\ + H'_{out} = \frac{(H_{in} + stride[1] - 1)}{stride[1]}\\ + W'_{out} = \frac{(W_{in} + stride[2] - 1)}{stride[2]}\\ + +如果 ``padding`` = "VALID": + +.. math:: + D'_{out}=(D_{in}-1)*strides[0] + dilations[0]*(D_f-1)+1\\ + H'_{out}=(H_{in}-1)*strides[1] + dilations[1]*(H_f-1)+1\\ + W'_{out}=(W_{in}-1)*strides[2] + dilations[2]*(W_f-1)+1\\ + +注意: + +如果output_size为None,则 :math:`D_{out}` = :math:`D^\prime_{out}` , :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}` ;否则,指定的output_size_depth(输出特征层的深度) :math:`D_{out}` 应当介于 :math:`D^\prime_{out}` 和 :math:`D^\prime_{out} + strides[0]` 之间(不包含 :math:`D^\prime_{out} + strides[0]` ),指定的output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[1]` 之间(不包含 :math:`H^\prime_{out} + strides[1]` ), 并且指定的output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[2]` 之间(不包含 :math:`W^\prime_{out} + strides[2]` )。 + +由于转置卷积可以当成是卷积的反向计算,而根据卷积的输入输出计算公式来说,不同大小的输入特征层可能对应着相同大小的输出特征层,所以对应到转置卷积来说,固定大小的输入特征层对应的输出特征层大小并不唯一。 + +如果指定了output_size, ``conv_transpose3d`` 可以自动计算卷积核的大小。 + +参数: + - **x** (Tensor) - 形状为 :math:`[N, C, D, H, W]` 或 :math:`[N, D, H, W, C]` 的5-D Tensor,N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度,数据类型:float32或float64。 + - **weight** (Tensor) - 形状为 :math:`[C, M/g, kD, kH, kW]` 的卷积核。 M是输出通道数, g是分组的个数,kD是卷积核的深度,kH是卷积核的高度,kW是卷积核的宽度。 + - **bias** (int|list|tuple) - 偏置项,形状为: :math:`[M,]` 。 + - **stride** (int|list|tuple,可选) - 步长大小。如果 ``stride`` 为元组或列表,则必须包含三个整型数,分别表示深度,垂直和水平滑动步长。否则,表示深度,垂直和水平滑动步长均为 ``stride`` 。默认值:1。 + - **padding** (int|list|tuple|str,可选) - 填充padding大小。padding参数在输入特征层每边添加 ``dilation * (kernel_size - 1) - padding`` 个0。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含5个二元组:当 ``data_format`` 为"NCDHW"时为 [[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 ``data_format`` 为"NDHWC"时为[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]];(2)包含6个整数值:[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right];(3)包含3个整数值:[pad_depth, pad_height, pad_width],此时 pad_depth_front = pad_depth_back = pad_depth, pad_height_top = pad_height_bottom = pad_height, pad_width_left = pad_width_right = pad_width。若为一个整数,pad_depth = pad_height = pad_width = padding。默认值:0。 + - **output_padding** (int|list|tuple, optional): 输出形状上一侧额外添加的大小. 默认值: 0. + - **dilation** (int|list|tuple,可选) - 空洞大小。空洞卷积时会使用该参数,卷积核对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息。如果空洞大小为列表或元组,则必须包含两个整型数:(dilation_height,dilation_width)。若为一个整数,dilation_height = dilation_width = dilation。默认值:1。 + - **groups** (int,可选) - 三维转置卷积层的组数。从Alex Krizhevsky的CNN Deep论文中的群卷积中受到启发,当group=2时,输入和卷积核分别根据通道数量平均分为两组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算。默认:group = 1。 + - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **name** (str,可选) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值:None。 + + +返回:5-D Tensor,数据类型与 ``input`` 一致。如果未指定激活层,则返回转置卷积计算的结果,如果指定激活层,则返回转置卷积和激活计算之后的最终结果。 + +返回类型:Tensor + +抛出异常: + - ``ValueError`` - 如果输入的shape、kernel_size、stride、padding和groups不匹配。 + - ``ValueError`` - 如果 ``data_format`` 既不是"NCDHW"也不是"NDHWC"。 + - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``padding`` 含有5个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ValueError`` - 如果 ``output_size`` 和 ``filter_size`` 同时为None。 + - ``ShapeError`` - 如果输入不是5-D Tensor。 + - ``ShapeError`` - 如果输入和卷积核的维度大小不相同。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + +**代码示例** + +.. code-block:: python + + import numpy as np + + import paddle + import paddle.nn.functional as F + + x = np.random.randn(2, 3, 8, 8, 8).astype(np.float32) + w = np.random.randn(3, 6, 3, 3, 3).astype(np.float32) + + paddle.disable_static() + + x_var = paddle.to_tensor(x) + w_var = paddle.to_tensor(w) + y_var = F.conv_transpose3d(x_var, w_var) + y_np = y_var.numpy() + print(y_np.shape) + + # (2, 6, 10, 10, 10) diff --git a/doc/paddle/api/paddle/nn/functional/extension/diag_embed_cn.rst b/doc/paddle/api/paddle/nn/functional/extension/diag_embed_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c84be759a21b99a0f54f04b551bf90ae01e403c1 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/extension/diag_embed_cn.rst @@ -0,0 +1,76 @@ +.. _cn_api_functional_diag_embed: + +diag_embed +------------------------------- + +.. py:function:: paddle.functional.diag_embed(input, offset=0, dim1=-2, dim2=-1): + + + + + 该 OP 创建一个 Tensor,其在指定的 2D 平面(由 ``dim1`` 和 ``dim2`` 指定)上的对角线由输入 ``input`` 填充。 + 默认的,指定的 2D 平面由返回 Tensor 的最后两维组成。 + + 参数 ``offset`` 确定在指定的二维平面中填充对角线的位置: + + - 如果 offset = 0,则填充主对角线。 + - 如果 offset > 0,则填充主对角线右上的对角线。 + - 如果 offset < 0,则填充主对角线左下的对角线。 + +参数: + - **input** (Variable|numpy.ndarray)- 输入变量,至少为 1D 数组,支持数据类型为 float32,float64,int32,int64。 + - **offset** (int ,可选)- 从指定的二维平面中获取对角线的位置,默认值为 0,既主对角线。 + - **dim1** (int , 可选)- 填充对角线的二维平面的第一维,默认值为 -2。 + - **dim2** (int , 可选)- 填充对角线的二维平面的第二维,默认值为 -1。 + +返回: 指定二维平面填充了对角线的 Tensor。数据类型和输入数据类型一致。 + +返回类型: 变量(Variable) + +**代码示例** + +.. code-block:: python + + import paddle.nn.functional as F + import paddle.fluid.dygraph as dg + import numpy as np + + diag_embed = np.random.randn(2, 3).astype('float32') + # [[ 0.7545889 , -0.25074545, 0.5929117 ], + # [-0.6097662 , -0.01753256, 0.619769 ]] + with dg.guard(): + data1 = F.diag_embed(diag_embed) + data1.numpy() + # [[[ 0.7545889 , 0. , 0. ], + # [ 0. , -0.25074545, 0. ], + # [ 0. , 0. , 0.5929117 ]], + + # [[-0.6097662 , 0. , 0. ], + # [ 0. , -0.01753256, 0. ], + # [ 0. , 0. , 0.619769 ]]] + + data2 = F.diag_embed(diag_embed, offset=-1, dim1=0, dim2=2) + data2.numpy() + # [[[ 0. , 0. , 0. , 0. ], + # [ 0.7545889 , 0. , 0. , 0. ], + # [ 0. , -0.25074545, 0. , 0. ], + # [ 0. , 0. , 0.5929117 , 0. ]], + # + # [[ 0. , 0. , 0. , 0. ], + # [-0.6097662 , 0. , 0. , 0. ], + # [ 0. , -0.01753256, 0. , 0. ], + # [ 0. , 0. , 0.619769 , 0. ]]] + + data3 = F.diag_embed(diag_embed, offset=1, dim1=0, dim2=2) + data3.numpy() + # [[[ 0. , 0.7545889 , 0. , 0. ], + # [ 0. , -0.6097662 , 0. , 0. ]], + # + # [[ 0. , 0. , -0.25074545, 0. ], + # [ 0. , 0. , -0.01753256, 0. ]], + # + # [[ 0. , 0. , 0. , 0.5929117 ], + # [ 0. , 0. , 0. , 0.619769 ]], + # + # [[ 0. , 0. , 0. , 0. ], + # [ 0. , 0. , 0. , 0. ]]] diff --git a/doc/paddle/api/paddle/nn/functional/extension/row_conv_cn.rst b/doc/paddle/api/paddle/nn/functional/extension/row_conv_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e89170ff266cc440da8bb850aa251e3da4876b64 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/extension/row_conv_cn.rst @@ -0,0 +1,58 @@ +.. _cn_api_fluid_layers_row_conv: + +row_conv +------------------------------- + + +.. py:function:: paddle.fluid.layers.row_conv(input, future_context_size, param_attr=None, act=None) + + + + +该接口为行卷积(Row-convolution operator)或称之为超前卷积(lookahead convolution),最早介绍于DeepSpeech2论文中,论文链接: + + ``_ + +双向的RNN在深度语音模型中很有用,它通过对整个序列执行正向和反向传递来学习序列的表示。然而,与单向RNNs不同的是,在线部署和低延迟设置中,双向RNNs具有难度。超前卷积将来自未来子序列的信息以一种高效的方式进行计算,以改进单向递归神经网络。 row convolution operator 与一维序列卷积不同,计算方法如下: + +给定输入序列长度为 :math:`t` 的输入序列 :math:`X` 和输入维度 :math:`D` ,以及一个大小为 :math:`context * D` 的滤波器 :math:`W` ,输出序列卷积为: + +.. math:: + out_i = \sum_{j=i}^{i+context-1} X_{j} · W_{j-i} + +公式中: + - :math:`out_i` : 第i行输出变量形为[1, D]. + - :math:`context` : 下文(future context)大小 + - :math:`X_j` : 第j行输出变量,形为[1,D] + - :math:`W_{j-i}` : 第(j-i)行参数,其形状为[1,D]。 + +详细请参考 `设计文档 `_ 。 + +参数: + - **input** (Variable) -- 支持输入为LodTensor和Tensor,输入类型可以是[float32, float64],它支持可变时间长度的输入序列。当输入input为LodTensor时,其内部张量是一个具有形状(T x N)的矩阵,其中T是这个mini batch中的总的timestep,N是输入数据维数。当输入input为Tensor时,其形状为(B x T x N)的三维矩阵,B为mini batch大小,T为每个batch输入中的最大timestep,N是输入数据维数。当输入input为LoDTensor,形状为[9, N],LoD信息为[2, 3, 4],等价于输入input为形状是[3, 4, N]的Tensor。 + - **future_context_size** (int) -- 下文大小。请注意,卷积核的shape是[future_context_size + 1, N],N和输入input的数据维度N保持一致。 + - **param_attr** (ParamAttr) -- 参数的属性,包括名称、初始化器等。 + - **act** (str) -- 非线性激活函数。 + +返回:表示row_conv计算结果的Variable,数据类型、维度和输入input相同。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + # LoDTensor input + x = fluid.layers.data(name='x', shape=[9, 16], + dtype='float32', lod_level=3, + append_batch_size=False) + out = fluid.layers.row_conv(input=x, future_context_size=2) + + # Tensor input + x = fluid.layers.data(name='x', shape=[9, 4, 16], + dtype='float32', + append_batch_size=False) + out = fluid.layers.row_conv(input=x, future_context_size=2) + + + diff --git a/doc/paddle/api/paddle/nn/functional/input/embedding_cn.rst b/doc/paddle/api/paddle/nn/functional/input/embedding_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a377e832f4e96481ba81e1bd684d28fef3164203 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/input/embedding_cn.rst @@ -0,0 +1,102 @@ +.. _cn_api_fluid_layers_embedding: + +embedding +------------------------------- + + +.. py:function:: paddle.fluid.layers.embedding(input, size, is_sparse=False, is_distributed=False, padding_idx=None, param_attr=None, dtype='float32') + + + + +嵌入层(Embedding Layer) + +**注意:此OP将在未来的版本中被移除!该OP要求输入Tensor shape的最后一维必须为1。推荐使用fluid.** :ref:`cn_api_fluid_embedding` 。 + +该OP根据input中的id信息从embedding矩阵中查询对应embedding信息,并会根据输入的size (vocab_size, emb_size)和dtype自动构造一个二维embedding矩阵。 + +要求input的最后一维必须等于1,输出的Tensor的shape是将输入Tensor shape的最后一维的1替换为emb_size。 + +注:input中的id必须满足 ``0 =< id < size[0]``,否则程序会抛异常退出。 + + +:: + + Case 1: + + input是Tensor, 且padding_idx = -1 + input.data = [[[1], [3]], [[2], [4]], [[4], [127]]] + input.shape = [3, 2, 1] + 若size = [128, 16] + 输出为Tensor: + out.shape = [3, 2, 16] + out.data = [[[0.129435295, 0.244512452, ..., 0.436322452], + [0.345421456, 0.524563927, ..., 0.144534654]], + + [[0.345249859, 0.124939536, ..., 0.194353745], + [0.945345345, 0.435394634, ..., 0.435345365]], + + [[0.945345345, 0.435394634, ..., 0.435345365], + [0.0, 0.0, ..., 0.0 ]]] # padding data + 输入的padding_idx小于0,则自动转换为padding_idx = -1 + 128 = 127, 对于输入id为127的词,进行padding处理。 + + Case 2: + + input是lod level 为1的LoDTensor, 且padding_idx = 0 + input.lod = [[2, 3]] + input.data = [[1], [3], [2], [4], [0]] + input.shape = [5, 1] + 若size = [128, 16] + 输出为LoDTensor: + out.lod = [[2, 3]] + out.shape = [5, 16] + out.data = [[0.129435295, 0.244512452, ..., 0.436322452], + [0.345421456, 0.524563927, ..., 0.144534654], + [0.345249859, 0.124939536, ..., 0.194353745], + [0.945345345, 0.435394634, ..., 0.435345365], + [0.0, 0.0, ..., 0.0 ]] # padding data + 输入的padding_idx = 0,则对于输入id为0的词,进行padding处理。 + + +参数: + - **input** (Variable) - 存储id信息的Tensor或LoDTensor,数据类型必须为:int64,输入的shape最后一维须为1。input中的id必须满足 ``0 =< id < size[0]`` 。 + - **size** (tuple|list) - embedding矩阵的维度。必须包含两个元素,第一个元素为vocab_size(词表大小), 第二个为emb_size(embedding层维度)。 + - **is_sparse** (bool) - 是否使用稀疏的更新方式,这个参数只会影响反向的梯度更新的性能,sparse更新速度更快,推荐使用稀疏更新的方式。但某些optimizer不支持sparse更新,比如 :ref:`cn_api_fluid_optimizer_AdadeltaOptimizer` 、 :ref:`cn_api_fluid_optimizer_AdamaxOptimizer` 、 :ref:`cn_api_fluid_optimizer_DecayedAdagradOptimizer` 、 :ref:`cn_api_fluid_optimizer_FtrlOptimizer` 、 :ref:`cn_api_fluid_optimizer_LambOptimizer` 、:ref:`cn_api_fluid_optimizer_LarsMomentumOptimizer` ,此时is_sparse必须为False。默认为False。 + - **is_distributed** (bool) - 是否使用分布式的方式存储embedding矩阵,仅在多机分布式cpu训练中使用。默认为False。 + - **padding_idx** (int|long|None) - padding_idx需在区间[-vocab_size, vocab_size),否则不生效,padding_idx<0时,padding_idx会被改成vocab_size + padding_idx,input中等于padding_index的id对应的embedding信息会被设置为0,且这部分填充数据在训练时将不会被更新。如果为None,不作处理,默认为None。 + - **param_attr** (ParamAttr) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。此外,可以通过 ``param_attr`` 参数加载用户自定义或预训练的词向量。只需将本地词向量转为numpy数据格式,且保证本地词向量的shape和embedding的 ``size`` 参数一致,然后使用 :ref:`cn_api_fluid_initializer_NumpyArrayInitializer` 进行初始化,即可实现加载自定义或预训练的词向量。详细使用方法见代码示例2。 + - **dtype** (str|core.VarDesc.VarType) - 输出Tensor或LoDTensor的数据类型,数据类型必须为:float32或float64,默认为float32。 + +返回:input映射后得到的Embedding Tensor或LoDTensor,数据类型和dtype定义的类型一致。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1) + + # 示例 1 + emb_1 = fluid.layers.embedding(input=data, size=[128, 64]) + + # 示例 2: 加载用户自定义或预训练的词向量 + weight_data = np.random.random(size=(128, 100)) # numpy格式的词向量数据 + w_param_attrs = fluid.ParamAttr( + name="emb_weight", + learning_rate=0.5, + initializer=fluid.initializer.NumpyArrayInitializer(weight_data), + trainable=True) + emb_2 = fluid.layers.embedding(input=data, size=(128, 100), param_attr=w_param_attrs, dtype='float32') + + + + + + + + + diff --git a/doc/paddle/api/paddle/nn/functional/input/one_hot_cn.rst b/doc/paddle/api/paddle/nn/functional/input/one_hot_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9df99a99421004e31491c28c6223d3b0bfc3f752 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/input/one_hot_cn.rst @@ -0,0 +1,60 @@ +.. _cn_api_nn_functional_one_hot: + +one_hot +------------------------------- + +.. py:function:: paddle.nn.functional.one_hot(x, num_classes, name=None) +该OP将输入'x'中的每个id转换为一个one-hot向量,其长度为 ``num_classes`` ,该id对应的向量维度上的值为1,其余维度的值为0。 + +输出的Tensor的shape是在输入shape的最后一维后面添加了num_classes的维度。 + +- 示例1: + +.. code-block:: text + + 输入: + X.shape = [4] + X.data = [1, 1, 3, 0] + num_classes = 4 + + 输出: + Out.shape = [4, 4] + Out.data = [[0., 1., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 1.], + [1., 0., 0., 0.]] + +- 示例2: + +.. code-block:: text + + 输入: + X.shape = [4] + X.data = [1, 1, 5, 0] + num_classes = 4 + + 输出:抛出 Illegal value 的异常 + X中第2维的值是5,超过了num_classes,因此抛异常。 + + +参数: + - **x** (Tensor) - 维度为 :math:`[N_1, ..., N_n]` 的多维Tensor,维度至少1维。数据类型为int32或int64。 + - **num_classes** (int) - 用于定义一个one-hot向量的长度。若输入为词id,则 ``num_classes`` 通常取值为词典大小。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:Tensor,转换后的one_hot Tensor,数据类型为float32。 + +**代码示例**: + +.. code-block:: python + + import paddle + label = paddle.data(name="label", shape=[4], dtype="int64") + # label.shape = [4] + # label.data = [1, 1, 3, 0] + one_hot_label = paddle.nn.functional.one_hot(x=label, num_classes=4) + # one_hot_label.shape = [4, 4] + # one_hot_label.data = [[0., 1., 0., 0.], + # [0., 1., 0., 0.], + # [0., 0., 0., 1.], + # [1., 0., 0., 0.]] \ No newline at end of file diff --git a/doc/paddle/api/paddle/nn/functional/loss/binary_cross_entropy_cn.rst b/doc/paddle/api/paddle/nn/functional/loss/binary_cross_entropy_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..11f77916759816ce1ddb451263e3bb42106ee49a --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/loss/binary_cross_entropy_cn.rst @@ -0,0 +1,61 @@ +.. _cn_api_nn_functional_binary_cross_entropy: + +binary_cross_entropy +------------------------------- + +.. py:functional:: paddle.nn.functional.binary_cross_entropy(input, label, weight=None, reduction='mean', name=None) + +该函数用于计算输入 ``input`` 和标签 ``label`` 之间的二值交叉熵损失值。二值交叉熵损失函数公式如下: + +当 `weight` 不为空时,公式为: + +.. math:: + Out = -1 * weight * (label * log(input) + (1 - label) * log(1 - input)) + +当 `weight` 为空时,公式为: + +.. math:: + Out = -1 * (label * log(input) + (1 - label) * log(1 - input)) + +当 `reduction` 为 `none` 时,直接返回最原始的 `Out` 结果。 + +当 `reduction` 为 `mean` 时,最终的输出结果为: + +.. math:: + Out = MEAN(Out) + +当 `reduction` 为 `sum` 时,最终的输出结果为: + +.. math:: + Out = SUM(Out) + + +.. note:: + 输入数据 ``input`` 一般是 ``sigmoid`` 的输出。因为是二分类,所以标签值 ``label`` 应该是0或者1。 + +参数 +::::::::: + - **input** (Tensor) - :math:`[N, *]` , 其中N是batch_size, `*` 是任意其他维度。输入数据 ``input`` 一般是 ``sigmoid`` 的输出。数据类型是float32、float64。 + - **label** (Tensor) - :math:`[N, *]` ,标签 ``label`` 的维度、数据类型与输入 ``input`` 相同。 + - **weight** (Tensor,可选) - 手动指定每个batch二值交叉熵的权重,如果指定的话,维度必须是一个batch的数据的维度。数据类型是float32, float64。默认值是:None。 + - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有: ``'none'``, ``'mean'``, ``'sum'`` 。默认为 ``'mean'``,计算 `BCELoss` 的均值;设置为 ``'sum'`` 时,计算 `BCELoss` 的总和;设置为 ``'none'`` 时,则返回bce_loss。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: + - 输出的结果Tensor。如果 :attr:`reduction` 是 ``'none'``, 则输出的维度为 :math:`[N, *]` , 与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``, 则输出的维度为 :math:`[1]` 。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + + paddle.disable_static() + input = paddle.to_tensor([0.5, 0.6, 0.7], dtype='float32') + label = paddle.to_tensor([1.0, 0.0, 1.0], dtype='float32') + output = F.binary_cross_entropy(input, label) + print(output.numpy()) # [0.65537095] + diff --git a/doc/paddle/api/paddle/nn/functional/loss/binary_cross_entropy_with_logits_cn.rst b/doc/paddle/api/paddle/nn/functional/loss/binary_cross_entropy_with_logits_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..18279319b1c0c6f9568e0af6e93bf52bb4cf0b22 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/loss/binary_cross_entropy_with_logits_cn.rst @@ -0,0 +1,59 @@ +.. _cn_api_paddle_nn_functional_binary_cross_entropy_with_logits: + +binary_cross_entropy_with_logits +------------------------------- + +.. py:function:: paddle.nn.functional.binary_cross_entropy_with_logits(logit, label, weight=None, reduction='mean', pos_weight=None, name=None) + +该OP用于计算输入 `logit` 和标签 `label` 间的 `binary cross entropy with logits loss` 损失。 + +该OP结合了 `sigmoid` 操作和 :ref:`api_nn_loss_BCELoss` 操作。同时,我们也可以认为该OP是 ``sigmoid_cross_entrop_with_logits`` 和一些 `reduce` 操作的组合。 + +在每个类别独立的分类任务中,该OP可以计算按元素的概率误差。可以将其视为预测数据点的标签,其中标签不是互斥的。例如,一篇新闻文章可以同时关于政治,科技,体育或者同时不包含这些内容。 + +首先,该OP可通过下式计算损失函数: + +.. math:: + Out = -Labels * \log(\sigma(Logit)) - (1 - Labels) * \log(1 - \sigma(Logit)) + +其中 :math:`\sigma(Logit) = \frac{1}{1 + e^{-Logit}}` , 代入上方计算公式中: + +.. math:: + Out = Logit - Logit * Labels + \log(1 + e^{-Logit}) + +为了计算稳定性,防止当 :math:`Logit<0` 时, :math:`e^{-Logit}` 溢出,loss将采用以下公式计算: + +.. math:: + Out = \max(Logit, 0) - Logit * Labels + \log(1 + e^{-\|Logit\|}) + +然后,当 ``weight`` or ``pos_weight`` 不为None的时候,该算子会在输出Out上乘以相应的权重。张量 ``weight`` 给Batch中的每一条数据赋予不同权重,张量 ``pos_weight`` 给每一类的正例添加相应的权重。 + +最后,该算子会添加 `reduce` 操作到前面的输出Out上。当 `reduction` 为 `none` 时,直接返回最原始的 `Out` 结果。当 `reduction` 为 `mean` 时,返回输出的均值 :math:`Out = MEAN(Out)` 。当 `reduction` 为 `sum` 时,返回输出的求和 :math:`Out = SUM(Out)` 。 + +**注意: 因为是二分类任务,所以标签值应该是0或者1。 + +参数 +::::::::: + - **logit** (Tensor) - :math:`[N, *]` , 其中N是batch_size, `*` 是任意其他维度。输入数据 ``logit`` 一般是线性层的输出,不需要经过 ``sigmoid`` 层。数据类型是float32、float64。 + - **label** (Tensor) - :math:`[N, *]` ,标签 ``label`` 的维度、数据类型与输入 ``logit`` 相同。 + - **weight** (Tensor,可选) - 手动指定每个batch二值交叉熵的权重,如果指定的话,维度必须是一个batch的数据的维度。数据类型是float32, float64。默认值是:None。 + - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有: ``'none'``, ``'mean'``, ``'sum'`` 。默认为 ``'mean'``,计算 `BCELoss` 的均值;设置为 ``'sum'`` 时,计算 `BCELoss` 的总和;设置为 ``'none'`` 时,则返回原始loss。 + - **pos_weight** (Tensor,可选) - 手动指定正类的权重,必须是与类别数相等长度的向量。数据类型是float32, float64。默认值是:None。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: + - Tensor,输出的Tensor。如果 :attr:`reduction` 是 ``'none'``, 则输出的维度为 :math:`[N, *]` , 与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``, 则输出的维度为 :math:`[1]` 。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + paddle.disable_static() + logit = paddle.to_tensor([5.0, 1.0, 3.0], dtype="float32") + label = paddle.to_tensor([1.0, 0.0, 1.0], dtype="float32") + output = paddle.nn.functional.binary_cross_entropy_with_logits(logit, label) + print(output.numpy()) # [0.45618808] + diff --git a/doc/paddle/api/paddle/nn/functional/loss/cross_entropy_cn.rst b/doc/paddle/api/paddle/nn/functional/loss/cross_entropy_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..05b080474c36f623ed1f81eef07c352b041166b5 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/loss/cross_entropy_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_fluid_layers_cross_entropy: + +cross_entropy +------------------------------- + +.. py:function:: paddle.fluid.layers.cross_entropy(input, label, soft_label=False, ignore_index=-100) + + + + +该OP计算输入input和标签label间的交叉熵,可用于计算硬标签或软标签的交叉熵。 + +1. 硬标签交叉熵算法:若soft_label = False, :math:`label[i_1, i_2, ..., i_k]` 表示每个样本的硬标签值: + + .. math:: + \\output[i_1, i_2, ..., i_k]=-log(input[i_1, i_2, ..., i_k, j]), label[i_1, i_2, ..., i_k] = j, j != ignore\_index\\ + +2. 软标签交叉熵算法:若soft_label = True, :math:`label[i_1, i_2, ..., i_k, j]` 表明每个样本对应类别j的软标签值: + + .. math:: + \\output[i_1, i_2, ..., i_k]= -\sum_{j}label[i_1,i_2,...,i_k,j]*log(input[i_1, i_2, ..., i_k,j])\\ + +参数: + - **input** (Variable) – 维度为 :math:`[N_1, N_2, ..., N_k, D]` 的多维Tensor,其中最后一维D是类别数目。数据类型为float32或float64。 + - **label** (Variable) – 输入input对应的标签值。若soft_label=False,要求label维度为 :math:`[N_1, N_2, ..., N_k]` 或 :math:`[N_1, N_2, ..., N_k, 1]` ,数据类型为int64,且值必须大于等于0且小于D;若soft_label=True,要求label的维度、数据类型与input相同,且每个样本各软标签的总和为1。 + - **soft_label** (bool) – 指明label是否为软标签。默认为False,表示label为硬标签;若soft_label=True则表示软标签。 + - **ignore_index** (int) – 指定一个忽略的标签值,此标签值不参与计算,负值表示无需忽略任何标签值。仅在soft_label=False时有效。 默认值为-100。 + +返回: 表示交叉熵结果的Tensor,数据类型与input相同。若soft_label=False,则返回值维度与label维度相同;若soft_label=True,则返回值维度为 :math:`[N_1, N_2, ..., N_k, 1]` 。 + +返回类型:Variable + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + class_num = 7 + x = fluid.layers.data(name='x', shape=[3, 10], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + predict = fluid.layers.fc(input=x, size=class_num, act='softmax') + cost = fluid.layers.cross_entropy(input=predict, label=label) diff --git a/doc/paddle/api/paddle/nn/functional/loss/ctc_loss_cn.rst b/doc/paddle/api/paddle/nn/functional/loss/ctc_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2722525a61feefc4ea1bb3263e7eec0f88462796 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/loss/ctc_loss_cn.rst @@ -0,0 +1,80 @@ +ctc_loss +------------------------------- + +.. py:function:: paddle.nn.functional.ctc_loss(log_probs, labels, input_lengths, label_lengths, blank=0, reduction='mean') + +该接口用于计算 CTC loss。该接口的底层调用了第三方 baidu-research::warp-ctc 的实现。 +也可以叫做 softmax with CTC,因为 Warp-CTC 库中插入了 softmax 激活函数来对输入的值进行归一化。 + +参数 +::::::::: + - **log_probs** (Tensor): - 经过 padding 的概率序列,其 shape 必须是 [max_logit_length, batch_size, num_classes + 1]。其中 max_logit_length 是最长输入序列的长度。该输入不需要经过 softmax 操作,因为该 OP 的内部对 input 做了 softmax 操作。数据类型仅支持float32。 + - **labels** (Tensor): - 经过 padding 的标签序列,其 shape 为 [batch_size, max_label_length],其中 max_label_length 是最长的 label 序列的长度。数据类型支持int32。 + - **input_lengths** (Tensor): - 表示输入 ``log_probs`` 数据中每个序列的长度,shape为 [batch_size] 。数据类型支持int64。 + - **label_lengths** (Tensor): - 表示 label 中每个序列的长度,shape为 [batch_size] 。数据类型支持int64。 + - **blank** (int,可选): - 空格标记的 ID 值,其取值范围为 [0,num_classes+1) 。数据类型支持int32。默认值为0。 + - **reduction** (string,可选): - 指定应用于输出结果的计算方式,可选值有: ``'none'``, ``'mean'``, ``'sum'``。设置为 ``'mean'`` 时,对 loss 值除以 label_lengths,并返回所得商的均值;设置为 ``'sum'`` 时,返回 loss 值的总和;设置为 ``'none'`` 时,则直接返回输出的 loss 值。默认值为 ``'mean'``。 + +返回 +::::::::: +``Tensor``,输入 ``log_probs`` 和标签 ``labels`` 间的 `ctc loss`。如果 :attr:`reduction` 是 ``'none'``, 则输出 loss 的维度为 [batch_size]。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出Loss的维度为 [1]。数据类型与输入 ``log_probs`` 一致。 + + +代码示例 +::::::::: + +.. code-block:: python + + # declarative mode + import paddle.nn.functional as F + import numpy as np + import paddle + + # length of the longest logit sequence + max_seq_length = 4 + #length of the longest label sequence + max_label_length = 3 + # number of logit sequences + batch_size = 2 + # class num + class_num = 3 + + np.random.seed(1) + log_probs = np.array([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04], + [3.02332580e-01, 1.46755889e-01, 9.23385918e-02]], + + [[1.86260208e-01, 3.45560730e-01, 3.96767467e-01], + [5.38816750e-01, 4.19194520e-01, 6.85219526e-01]], + + [[2.04452246e-01, 8.78117442e-01, 2.73875929e-02], + [6.70467496e-01, 4.17304814e-01, 5.58689833e-01]], + + [[1.40386939e-01, 1.98101491e-01, 8.00744593e-01], + [9.68261600e-01, 3.13424170e-01, 6.92322612e-01]], + + [[8.76389146e-01, 8.94606650e-01, 8.50442126e-02], + [3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]]).astype("float32") + labels = np.array([[1, 2, 2], + [1, 2, 2]]).astype("int32") + input_lengths = np.array([5, 5]).astype("int64") + label_lengths = np.array([3, 3]).astype("int64") + + paddle.disable_static() + log_probs = paddle.to_tensor(log_probs) + labels = paddle.to_tensor(labels) + input_lengths = paddle.to_tensor(input_lengths) + label_lengths = paddle.to_tensor(label_lengths) + + loss = F.ctc_loss(log_probs, labels, + input_lengths, + label_lengths, + blank=0, + reduction='none') + print(loss.numpy()) #[3.9179852 2.9076521] + + loss = F.ctc_loss(log_probs, labels, + input_lengths, + label_lengths, + blank=0, + reduction='mean') + print(loss.numpy()) #[1.1376063] \ No newline at end of file diff --git a/doc/paddle/api/paddle/nn/functional/loss/kl_div_cn.rst b/doc/paddle/api/paddle/nn/functional/loss/kl_div_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..87aeca4641aa449058405e393e9518ce1e98c9c8 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/loss/kl_div_cn.rst @@ -0,0 +1,65 @@ +kl_div +------------------------------- + +.. py:function:: paddle.nn.functional.kl_div(input, label, reduction='mean', name=None) + +该算子计算输入(Input)和输入(Label)之间的Kullback-Leibler散度损失。注意其中输入(Input)应为对数概率值,输入(Label)应为概率值。 + +kL发散损失计算如下: + +.. math:: + + l(input, label) = label * (log(label) - input) + + +当 ``reduction`` 为 ``none`` 时,输出损失与输入(x)形状相同,各点的损失单独计算,不会对结果做reduction 。 + +当 ``reduction`` 为 ``mean`` 时,输出损失为[1]的形状,输出为所有损失的平均值。 + +当 ``reduction`` 为 ``sum`` 时,输出损失为[1]的形状,输出为所有损失的总和。 + +当 ``reduction`` 为 ``batchmean`` 时,输出损失为[N]的形状,N为批大小,输出为所有损失的总和除以批量大小。 + +参数: + - **input** (Tensor) - KL散度损失算子的输入张量。维度为[N, \*]的多维Tensor,其中N是批大小,\*表示任何数量的附加维度,数据类型为float32或float64。 + - **label** (Tensor) - KL散度损失算子的张量。与输入 ``input`` 的维度和数据类型一致的多维Tensor。 + - **reduction** (str,可选) - 要应用于输出的reduction类型,可用类型为‘none’ | ‘batchmean’ | ‘mean’ | ‘sum’,‘none’表示无reduction,‘batchmean’ 表示输出的总和除以批大小,‘mean’ 表示所有输出的平均值,‘sum’表示输出的总和。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值:None。 + +返回:Tensor KL散度损失。 + + +**代码示例:** + +.. code-block:: python + + import paddle + import numpy as np + import paddle.nn.functional as F + + paddle.disable_static() + + shape = (5, 20) + input = np.random.uniform(-10, 10, shape).astype('float32') + target = np.random.uniform(-10, 10, shape).astype('float32') + + # 'batchmean' reduction, loss shape will be [N] + pred_loss = F.kl_div(paddle.to_tensor(input), + paddle.to_tensor(target), reduction='batchmean') + # shape=[5] + + # 'mean' reduction, loss shape will be [1] + pred_loss = F.kl_div(paddle.to_tensor(input), + paddle.to_tensor(target), reduction='mean') + # shape=[1] + + # 'sum' reduction, loss shape will be [1] + pred_loss = F.kl_div(paddle.to_tensor(input), + paddle.to_tensor(target), reduction='sum') + # shape=[1] + + # 'none' reduction, loss shape is same with input shape + pred_loss = F.kl_div(paddle.to_tensor(input), + paddle.to_tensor(target), reduction='none') + # shape=[5, 20] + diff --git a/doc/paddle/api/paddle/nn/functional/loss/l1_loss_cn.rst b/doc/paddle/api/paddle/nn/functional/loss/l1_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ff4fc9bf4c99a86a56c7ee3ff1dbb5da4042bbfe --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/loss/l1_loss_cn.rst @@ -0,0 +1,59 @@ +l1_loss +------------------------------- + +.. py:function:: paddle.nn.functional.l1_loss(input, label, reduction='mean', name=None) + +该接口计算输入 ``input`` 和标签 ``label`` 间的 `L1 loss` 损失。 + +该损失函数的数学计算公式如下: + +当 `reduction` 设置为 ``'none'`` 时, + + .. math:: + Out = \lvert input - label\rvert + +当 `reduction` 设置为 ``'mean'`` 时, + + .. math:: + Out = MEAN(\lvert input - label\rvert) + +当 `reduction` 设置为 ``'sum'`` 时, + + .. math:: + Out = SUM(\lvert input - label\rvert) + + +参数 +::::::::: + - **input** (Tensor): - 输入的Tensor,维度是[N, *], 其中N是batch size, `*` 是任意数量的额外维度。数据类型为:float32、float64、int32、int64。 + - **label** (Tensor): - 标签,维度是[N, *], 与 ``input`` 相同。数据类型为:float32、float64、int32、int64。 + - **reduction** (str, 可选): - 指定应用于输出结果的计算方式,可选值有: ``'none'``, ``'mean'``, ``'sum'`` 。默认为 ``'mean'``,计算 `L1Loss` 的均值;设置为 ``'sum'`` 时,计算 `L1Loss` 的总和;设置为 ``'none'`` 时,则返回 `L1Loss`。 + - **name** (str,可选): - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +::::::::: +``Tensor``, 输入 ``input`` 和标签 ``label`` 间的 `L1 loss` 损失。如果 `reduction` 是 ``'none'``, 则输出Loss的维度为 [N, *], 与输入 ``input`` 相同。如果 `reduction` 是 ``'mean'`` 或 ``'sum'``, 则输出Loss的维度为 [1]。 + + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + paddle.disable_static() + input = paddle.to_tensor([[1.5, 0.8], [0.2, 1.3]]) + label = paddle.to_tensor([[1.7, 1], [0.4, 0.5]]) + + l1_loss = paddle.nn.functional.l1_loss(input, label) + print(l1_loss.numpy()) + # [0.35] + + l1_loss = paddle.nn.functional.l1_loss(input, label, reduction='none') + print(l1_loss.numpy()) + # [[0.20000005 0.19999999] + # [0.2 0.79999995]] + + l1_loss = paddle.nn.functional.l1_loss(input, label, reduction='sum') + print(l1_loss.numpy()) + # [1.4] diff --git a/doc/paddle/api/paddle/nn/functional/loss/margin_ranking_loss_cn.rst b/doc/paddle/api/paddle/nn/functional/loss/margin_ranking_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7cb375833858922cc173322895b2a22120918ba2 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/loss/margin_ranking_loss_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_nn_cn_margin_ranking_loss: + +margin_ranking_loss +------------------------------- + +.. py:function:: paddle.nn.functional.margin_ranking_loss(input, other, label, margin=0.0, reduction='mean', name=None) + +该算子计算输入input,other 和 标签label间的 `margin rank loss` 损失。该损失函数的数学计算公式如下: + + .. math:: + margin\_rank\_loss = max(0, -label * (input - other) + margin) + +当 `reduction` 设置为 ``'mean'`` 时, + + .. math:: + Out = MEAN(margin\_rank\_loss) + +当 `reduction` 设置为 ``'sum'`` 时, + + .. math:: + Out = SUM(margin\_rank\_loss) + +当 `reduction` 设置为 ``'none'`` 时,直接返回最原始的 `margin_rank_loss` 。 + +参数 +:::::::: + - **input** (Tensor):第一个输入的 `Tensor` ,数据类型为:float32、float64。 + - **other** (Tensor):第二个输入的 `Tensor` ,数据类型为:float32、float64。 + - **label** (Tensor):训练数据的标签,数据类型为:float32, float64。 + - **margin** (float,可选): - 用于加和的margin值,默认值为0。 + - **reduction** (string,可选): - 指定应用于输出结果的计算方式,可选值有: ``'none'`` 、 ``'mean'`` 、 ``'sum'`` 。如果设置为 ``'none'`` ,则直接返回 最原始的 ``margin_rank_loss`` 。如果设置为 ``'sum'`` ,则返回 ``margin_rank_loss`` 的总和。如果设置为 ``'mean'`` ,则返回 ``margin_rank_loss`` 的平均值。默认值为 ``'none'`` 。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::: +Tensor, 如果 :attr:`reduction` 为 ``'sum'`` 或者是 ``'mean'`` ,则形状为 :math:`[1]` ,否则shape和输入 `input` 保持一致 。数据类型与 ``input``、 ``other`` 相同。 + +代码示例 +:::::::: + +.. code-block:: python + + import paddle + paddle.disable_static() + + input = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32') + other = paddle.to_tensor([[2, 1], [2, 4]], dtype='float32') + label = paddle.to_tensor([[1, -1], [-1, -1]], dtype='float32') + loss = paddle.nn.functional.margin_ranking_loss(input, other, label) + print(loss.numpy()) # [0.75] diff --git a/doc/paddle/api/paddle/nn/functional/loss/mse_loss_cn.rst b/doc/paddle/api/paddle/nn/functional/loss/mse_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b5213836569abff9e4f78e04f603f1b727183467 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/loss/mse_loss_cn.rst @@ -0,0 +1,66 @@ +mse_loss +------------------------------- + +.. py:function:: paddle.nn.functional.mse_loss(input, label, reduction='mean', name=None) + +该OP用于计算预测值和目标值的均方差误差。 + +对于预测值input和目标值label,公式为: + +当 `reduction` 设置为 ``'none'`` 时, + + .. math:: + Out = (input - label)^2 + +当 `reduction` 设置为 ``'mean'`` 时, + + .. math:: + Out = \operatorname{mean}((input - label)^2) + +当 `reduction` 设置为 ``'sum'`` 时, + + .. math:: + Out = \operatorname{sum}((input - label)^2) + + +参数: +::::::::: + - **input** (Tensor) - 预测值,维度为 :math:`[N_1, N_2, ..., N_k]` 的多维Tensor。数据类型为float32或float64。 + - **label** (Tensor) - 目标值,维度为 :math:`[N_1, N_2, ..., N_k]` 的多维Tensor。数据类型为float32或float64。 + +返回 +::::::::: +``Tensor``, 输入 ``input`` 和标签 ``label`` 间的 `mse loss` 损失。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + # static graph mode + paddle.enable_static() + mse_loss = paddle.nn.loss.MSELoss() + input = paddle.data(name="input", shape=[1]) + label = paddle.data(name="label", shape=[1]) + place = paddle.CPUPlace() + input_data = np.array([1.5]).astype("float32") + label_data = np.array([1.7]).astype("float32") + output = mse_loss(input,label) + exe = paddle.static.Executor(place) + exe.run(paddle.static.default_startup_program()) + output_data = exe.run( + paddle.static.default_main_program(), + feed={"input":input_data, "label":label_data}, + fetch_list=[output], + return_numpy=True) + print(output_data) + # [array([0.04000002], dtype=float32)] + # dynamic graph mode + paddle.disable_static() + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) + output = mse_loss(input, label) + print(output.numpy()) + # [0.04000002] + diff --git a/doc/paddle/api/paddle/nn/functional/loss/nll_loss_cn.rst b/doc/paddle/api/paddle/nn/functional/loss/nll_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4840c999342c5e17715105ea5963dfff7b89112d --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/loss/nll_loss_cn.rst @@ -0,0 +1,45 @@ +.. _cn_api_nn_functional_nll_loss: + +nll_loss +------------------------------- +.. py:function:: paddle.nn.functional.nll_loss(input, label, weight=None, ignore_index=-100, reduction='mean', name=None) + +该接口返回 `negative log likelihood` 。可在 :ref:`cn_api_nn_loss_NLLLoss` 查看详情。 + +参数 +::::::::: + - **input** (Tensor): - 输入 `Tensor`, 其形状为 :math:`[N, C]` , 其中 `C` 为类别数。但是对于多维度的情形下,它的形状为 :math:`[N, C, d_1, d_2, ..., d_K]` 。数据类型为float32或float64。 + - **label** (Tensor): - 输入x对应的标签值。其形状为 :math:`[N,]` 或者 :math:`[N, d_1, d_2, ..., d_K]`, 数据类型为int64。 + - **weight** (Tensor, 可选): - 手动指定每个类别的权重。其默认为 `None` 。如果提供该参数的话,长度必须为 `num_classes` 。数据类型为float32或float64。 + - **ignore_index** (int64, 可选): - 指定一个忽略的标签值,此标签值不参与计算。默认值为-100。数据类型为int64。 + - **reduction** (str, 可选): - 指定应用于输出结果的计算方式,可选值有: `none`, `mean`, `sum` 。默认为 `mean` ,计算 `mini-batch` loss均值。设置为 `sum` 时,计算 `mini-batch` loss的总和。设置为 `none` 时,则返回loss Tensor。数据类型为string。 + - **name** (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: +`Tensor` ,返回存储表示 `negative log likelihood loss` 的损失值。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + from paddle.nn.functional import nll_loss + log_softmax = paddle.nn.LogSoftmax(axis=1) + + input_np = np.array([[0.88103855, 0.9908683 , 0.6226845 ], + [0.53331435, 0.07999352, 0.8549948 ], + [0.25879037, 0.39530203, 0.698465 ], + [0.73427284, 0.63575995, 0.18827209], + [0.05689114, 0.0862954 , 0.6325046 ]]).astype(np.float32) + label_np = np.array([0, 2, 1, 1, 0]).astype(np.int64) + + place = paddle.CPUPlace() + paddle.disable_static(place) + input = paddle.to_tensor(input_np) + log_out = log_softmax(input) + label = paddle.to_tensor(label_np) + result = nll_loss(log_out, label) + print(result.numpy()) # [1.0720209] diff --git a/doc/paddle/api/paddle/nn/functional/loss/smooth_l1_loss_cn.rst b/doc/paddle/api/paddle/nn/functional/loss/smooth_l1_loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..50e419b69b7e9d55ff26b5668b3de02e7b098128 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/loss/smooth_l1_loss_cn.rst @@ -0,0 +1,48 @@ +smooth_l1_loss +------------------------------- + +.. py:function:: paddle.nn.functional.smooth_l1_loss(input, label, reduction='mean', delta=1.0, name=None) + +该OP计算输入input和标签label间的SmoothL1损失,如果逐个元素的绝对误差低于1,则创建使用平方项的条件 +,否则为L1损失。在某些情况下,它可以防止爆炸梯度, 也称为Huber损失,该损失函数的数学计算公式如下: + + .. math:: + loss(x,y)=\\frac{1}{n}\\sum_{i}z_i + +`z_i`的计算公式如下: + + .. math:: + + \\mathop{z_i}=\\left\\{\\begin{array}{rcl} + 0.5(x_i - y_i)^2 & & {if |x_i - y_i| < delta} \\\\ + delta * |x_i - y_i| - 0.5 * delta^2 & & {otherwise} + \\end{array} \\right. + +参数 +:::::::::: + - **input** (Tensor): 输入 `Tensor`, 数据类型为float32。其形状为 :math:`[N, C]` , 其中 `C` 为类别数。对于多维度的情形下,它的形状为 :math:`[N, C, d_1, d_2, ..., d_k]`,k >= 1。 + - **label** (Tensor): 输入input对应的标签值,数据类型为float32。数据类型和input相同。 + - **reduction** (string, 可选): - 指定应用于输出结果的计算方式,数据类型为string,可选值有: `none`, `mean`, `sum` 。默认为 `mean` ,计算`mini-batch` loss均值。设置为 `sum` 时,计算 `mini-batch` loss的总和。设置为 `none` 时,则返回loss Tensor。 + - **delta** (string, 可选): SmoothL1Loss损失的阈值参数,用于控制Huber损失对线性误差或平方误差的侧重。数据类型为float32。 默认值= 1.0。 + - **name** (string,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + + + +返回:返回计算 `smooth_l1_loss` 后的损失值。 + +返回类型:Tensor + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + input = np.random.rand(3,3).astype("float32") + label = np.random.rand(3,3).astype("float32") + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) + output = paddle.nn.functioanl.smooth_l1_loss(input,label) + print(output.numpy()) diff --git a/doc/paddle/api/paddle/nn/functional/norm/batch_norm_cn.rst b/doc/paddle/api/paddle/nn/functional/norm/batch_norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e9fee412f3a8fa00a8197f8be3502d0a9206eafc --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/norm/batch_norm_cn.rst @@ -0,0 +1,47 @@ +.. _cn_api_nn_functional_batch_norm: + +batch_norm +------------------------------- + +.. py:class:: paddle.nn.functional.batch_norm(x, running_mean, running_var, weight, bias, training=False, momentum=0.9, epsilon=1e-05, data_format='NCHW', name=None): + +推荐使用nn.BatchNorm1d,nn.BatchNorm2d, nn.BatchNorm3d,由内部调用此方法。 + +详情见 :ref:`cn_api_nn_BatchNorm1d` 。 + +参数: + - **x** (int) - 输入,数据类型为float32, float64。 + - **running_mean** (Tensor) - 均值的Tensor。 + - **running_var** (Tensor) - 方差的Tensor。 + - **weight** (Tensor) - 权重的Tensor。 + - **bias** (Tensor) - 偏置的Tensor。 + - **momentum** (float, 可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var`` 。默认值:0.9。更新公式如上所示。 + - **epsilon** (float, 可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 + - **data_format** (string, 可选) - 指定输入数据格式,数据格式可以为“NC", "NCL", "NCHW" 或者"NCDHW"。默认值:"NCHW"。 + - **name** (string, 可选) – BatchNorm的名称, 默认值为None。更多信息请参见 :ref:`api_guide_Name` 。 + + +返回:无 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + x = np.random.seed(123) + x = np.random.random(size=(2, 1, 2, 3)).astype('float32') + running_mean = np.random.random(size=1).astype('float32') + running_variance = np.random.random(size=1).astype('float32') + weight_data = np.random.random(size=1).astype('float32') + bias_data = np.random.random(size=1).astype('float32') + x = paddle.to_tensor(x) + rm = paddle.to_tensor(running_mean) + rv = paddle.to_tensor(running_variance) + w = paddle.to_tensor(weight_data) + b = paddle.to_tensor(bias_data) + batch_norm_out = paddle.nn.functional.batch_norm(x, rm, rv, w, b) + print(batch_norm_out.numpy()) diff --git a/doc/paddle/api/paddle/nn/functional/norm/instance_norm_cn.rst b/doc/paddle/api/paddle/nn/functional/norm/instance_norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..97a49237f9605b595464027b8e137408fd645483 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/norm/instance_norm_cn.rst @@ -0,0 +1,47 @@ +.. cn_api_nn_functional_instance_norm: + +instance_norm +------------------------------- + +.. py:class:: paddle.nn.functional.instance_norm(x, running_mean, running_var, weight, bias, training=False, epsilon=1e-05, momentum=0.9, use_input_stats=True, data_format='NCHW', name=None): + +推荐使用nn.InstanceNorm1d,nn.InstanceNorm2d, nn.InstanceNorm3d,由内部调用此方法。 + +详情见 :ref:`cn_api_nn_InstanceNorm1d` 。 + +参数: + - **x** (int) - 输入,数据类型为float32, float64。 + - **running_mean** (Tensor) - 均值的Tensor。 + - **running_var** (Tensor) - 方差的Tensor。 + - **weight** (Tensor) - 权重的Tensor。 + - **bias** (Tensor) - 偏置的Tensor。 + - **epsilon** (float, 可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 + - **momentum** (float, 可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var`` 。默认值:0.9。更新公式如上所示。 + - **use_input_stats** (bool, 可选) - 默认是True. + - **data_format** (string, 可选) - 指定输入数据格式,数据格式可以为“NC", "NCL", "NCHW" 或者"NCDHW"。默认值:"NCHW"。 + - **name** (string, 可选) – InstanceNorm的名称, 默认值为None。更多信息请参见 :ref:`api_guide_Name` 。 + +返回:无 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + x = np.random.seed(123) + x = np.random.random(size=(2, 1, 2, 3)).astype('float32') + running_mean = np.random.random(size=1).astype('float32') + running_variance = np.random.random(size=1).astype('float32') + weight_data = np.random.random(size=1).astype('float32') + bias_data = np.random.random(size=1).astype('float32') + x = paddle.to_tensor(x) + rm = paddle.to_tensor(running_mean) + rv = paddle.to_tensor(running_variance) + w = paddle.to_tensor(weight_data) + b = paddle.to_tensor(bias_data) + instance_norm_out = paddle.nn.functional.instance_norm(x, rm, rv, w, b) + print(instance_norm_out.numpy()) diff --git a/doc/paddle/api/paddle/nn/functional/norm/layer_norm_cn.rst b/doc/paddle/api/paddle/nn/functional/norm/layer_norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e91822acf6299380ab132fe7d945ac3fc3875487 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/norm/layer_norm_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_nn_functional_layer_norm: + +layer_norm +------------------------------- + +.. py:class:: paddle.nn.functional.layer_norm(x, normalized_shape, weight=None, bias=None, epsilon=1e-05, name=None): + +推荐使用nn.LayerNorm。 + +详情见 :ref:`cn_api_nn_LayerNorm` . + +参数: + - **x** (int) - 输入,数据类型为float32, float64。 + - **normalized_shape** (int|list|tuple) - 期望的输入是 :math:`[*, normalized_shape[0], normalized_shape[1], ..., normalized_shape[-1]]` ,如果是一个整数,会作用在最后一个维度。 + - **weight** (Tensor) - 权重的Tensor, 默认为None。 + - **bias** (Tensor) - 偏置的Tensor, 默认为None。 + - **epsilon** (float, 可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 + - **name** (string, 可选) – LayerNorm的名称, 默认值为None。更多信息请参见 :ref:`api_guide_Name` 。 + + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np.random.seed(123) + x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32') + x = paddle.to_tensor(x_data) + layer_norm_out = paddle.nn.functional.layer_norm(x, x.shape[1:]) + + print(layer_norm_out.numpy()) + diff --git a/doc/paddle/api/paddle/nn/functional/norm/normalize_cn.rst b/doc/paddle/api/paddle/nn/functional/norm/normalize_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..703a22ddef7b99fe18a9e8d52fb34747ed6cc106 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/norm/normalize_cn.rst @@ -0,0 +1,59 @@ +normalize +------------------------------- + +.. py:function:: paddle.nn.functional.normalize(x, p=2, axis=1, epsilon=1e-12, name=None) + +该接口使用 :math:`L_p` 范数沿维度 ``axis`` 对 ``x`` 进行归一化。计算公式如下: + +.. math:: + + y = \frac{x}{ \max\left( \lvert \lvert x \rvert \rvert_p, epsilon\right) } + +.. math:: + \lvert \lvert x \rvert \rvert_p = \left(\sum_i {\lvert x_i\rvert^p} \right)^{1/p} + +其中 :math:`\sum_i{\lvert x_i\rvert^p}` 沿维度 ``axis`` 进行计算。 + + +参数 +::::::::: + - **x** (Tensor) - 输入可以是N-D Tensor。数据类型为:float32、float64。 + - **p** (float|int, 可选) - 范数公式中的指数值。默认值:2 + - **axis** (int, 可选)- 要进行归一化的轴。如果 ``x`` 是1-D Tensor,轴固定为0。如果 `axis < 0`,轴为 `x.ndim + axis`。-1表示最后一维。 + - **epsilon** (float,可选) - 添加到分母上的值以防止分母除0。默认值为1e-12。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +::::::::: +``Tensor``, 输出的形状和数据类型和 ``x`` 相同。 + +抛出异常: +::::::::: + - ``TypeError`` - 当参数 ``p`` 或者 ``axis`` 的类型不符合要求时。或者当参数 ``x`` 的类型或数据类型不符合要求时。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.nn.functional as F + + paddle.disable_static() + x = np.arange(6, dtype=np.float32).reshape(2,3) + x = paddle.to_tensor(x) + y = F.normalize(x) + print(y.numpy()) + # [[0. 0.4472136 0.8944272 ] + # [0.42426404 0.5656854 0.7071067 ]] + + y = F.normalize(x, p=1.5) + print(y.numpy()) + # [[0. 0.40862012 0.81724024] + # [0.35684016 0.4757869 0.5947336 ]] + + y = F.normalize(x, axis=0) + print(y.numpy()) + # [[0. 0.24253564 0.37139067] + # [1. 0.97014254 0.9284767 ]] diff --git a/doc/paddle/api/paddle/nn/functional/pooling/adaptive_avg_pool1d_cn.rst b/doc/paddle/api/paddle/nn/functional/pooling/adaptive_avg_pool1d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..ae3683263fc3f3ea659259873028b7116d483c02 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/pooling/adaptive_avg_pool1d_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_nn_functional_adaptive_avg_pool1d: + + +adaptive_avg_pool1d +------------------------------- + +.. py:function:: paddle.nn.functional.adaptive_avg_pool1d(x, output_size, name=None) + +该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算1D的自适应平均池化。输入和输出都是3-D Tensor, +默认是以 `NCL` 格式表示的,其中 `N` 是 batch size, `C` 是通道数, `L` 是输入特征的长度. + +.. note:: + 详细请参考对应的 `Class` 请参考: :ref:`cn_api_nn_AdaptiveAvgPool1d` 。 + + +参数 +::::::::: + - **x** (Tensor): 当前算子的输入, 其是一个形状为 `[N, C, L]` 的3-D Tensor。其中 `N` 是batch size, `C` 是通道数, `L` 是输入特征的长度。 其数据类型为float32或者float64。 + - **output_size** (int): 算子输出特征图的长度,其数据类型为int。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +::::::::: +``Tensor``, 输入 `x` 经过自适应池化计算得到的目标3-D Tensor,其数据类型与输入相同。 + +抛出异常 +::::::::: + - ``ValueError`` - ``output_size`` 应是一个整数。 + +代码示例 +::::::::: + +.. code-block:: python + + # average adaptive pool1d + # suppose input data in shape of [N, C, L], `output_size` is m, + # output shape is [N, C, m], adaptive pool divide L dimension + # of input data into m grids averagely and performs poolings in each + # grid to get output. + # adaptive avg pool performs calculations as follow: + # + # for i in range(m): + # lstart = floor(i * L / m) + # lend = ceil((i + 1) * L / m) + # output[:, :, i] = sum(input[:, :, lstart: lend])/(lstart - lend) + # + import paddle + import paddle.nn.functional as F + import numpy as np + paddle.disable_static() + + data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) + pool_out = F.adaptive_avg_pool1d(data, output_size=16) + # pool_out shape: [1, 3, 16]) diff --git a/doc/paddle/api/paddle/nn/functional/pooling/adaptive_avg_pool2d_cn.rst b/doc/paddle/api/paddle/nn/functional/pooling/adaptive_avg_pool2d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..cd5e3a087a4e187c9015c2a19a96112295012c15 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/pooling/adaptive_avg_pool2d_cn.rst @@ -0,0 +1,68 @@ +adaptive_avg_pool2d +------------------------------- + +.. py:function:: paddle.nn.functional.adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None) + +该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算2D的自适应平均池化。输入和输出都是4-D Tensor, +默认是以 `NCHW` 格式表示的,其中 `N` 是 batch size, `C` 是通道数, `H` 是输入特征的高度, `H` 是输入特征的宽度。 + +计算公式如下: + +.. math:: + + hstart &= floor(i * H_{in} / H_{out}) + + hend &= ceil((i + 1) * H_{in} / H_{out}) + + wstart &= floor(j * W_{in} / W_{out}) + + wend &= ceil((j + 1) * W_{in} / W_{out}) + + Output(i ,j) &= \frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)} + + +参数 +::::::::: + - **x** (Tensor): 默认形状为(批大小,通道数,高度,宽度),即NCHW格式的4-D Tensor。 其数据类型为float16, float32, float64, int32或int64. + - **output_size** (int|list|turple): 算子输出特征图的尺寸,如果其是list或turple类型的数值,必须包含两个元素,H和W。H和W既可以是int类型值也可以是None,None表示与输入特征尺寸相同。 + - **data_format** (str): 输入和输出的数据格式,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +::::::::: +``Tensor``, 默认形状为(批大小,通道数,输出特征高度,输出特征宽度),即NCHW格式的4-D Tensor,其数据类型与输入相同。 + +抛出异常 +::::::::: + - ``ValueError`` - 如果 ``data_format`` 既不是"NCHW"也不是"NHWC"。 + +代码示例 +::::::::: + +.. code-block:: python + + # adaptive avg pool2d + # suppose input data in shape of [N, C, H, W], `output_size` is [m, n], + # output shape is [N, C, m, n], adaptive pool divide H and W dimensions + # of input data into m * n grids averagely and performs poolings in each + # grid to get output. + # adaptive avg pool performs calculations as follow: + # + # for i in range(m): + # for j in range(n): + # hstart = floor(i * H / m) + # hend = ceil((i + 1) * H / m) + # wstart = floor(i * W / n) + # wend = ceil((i + 1) * W / n) + # output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend]) + # + import paddle + import numpy as np + paddle.disable_static() + input_data = np.random.rand(2, 3, 32, 32) + x = paddle.to_tensor(input_data) + # x.shape is [2, 3, 32, 32] + pool_out = paddle.nn.functional.adaptive_avg_pool2d( + x = x, + output_size=[3, 3]) + # pool_out.shape is [2, 3, 3, 3] \ No newline at end of file diff --git a/doc/paddle/api/paddle/nn/functional/pooling/adaptive_avg_pool3d_cn.rst b/doc/paddle/api/paddle/nn/functional/pooling/adaptive_avg_pool3d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..756f9d01ef220e7c42253d2a53572dba5f619f43 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/pooling/adaptive_avg_pool3d_cn.rst @@ -0,0 +1,76 @@ +adaptive_avg_pool3d +------------------------------- + +.. py:function:: paddle.nn.functional.adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None) + +该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算3D的自适应平均池化。输入和输出都是5-D Tensor, +默认是以 `NCDHW` 格式表示的,其中 `N` 是 batch size, `C` 是通道数, `D` 是特征图长度, `H` 是输入特征的高度, `H` 是输入特征的宽度。 + +计算公式如下: + +.. math:: + + dstart &= floor(i * D_{in} / D_{out}) + + dend &= ceil((i + 1) * D_{in} / D_{out}) + + hstart &= floor(j * H_{in} / H_{out}) + + hend &= ceil((j + 1) * H_{in} / H_{out}) + + wstart &= floor(k * W_{in} / W_{out}) + + wend &= ceil((k + 1) * W_{in} / W_{out}) + + Output(i ,j, k) &= \frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)} + + + +参数 +::::::::: + - **x** (Tensor): 默认形状为(批大小,通道数,长度,高度,宽度),即NCDHW格式的5-D Tensor。 其数据类型为float16, float32, float64, int32或int64. + - **output_size** (int|list|turple): 算子输出特征图的尺寸,如果其是list或turple类型的数值,必须包含三个元素,D,H和W。D,H和W既可以是int类型值也可以是None,None表示与输入特征尺寸相同。 + - **data_format** (str): 输入和输出的数据格式,可以是"NCDHW"和"NDHWC"。N是批尺寸,C是通道数,D是特征长度,H是特征高度,W是特征宽度。默认值:"NCDHW"。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +::::::::: +``Tensor``, 默认形状为(批大小,通道数,输出特征长度,输出特征高度,输出特征宽度),即NCDHW格式的5-D Tensor,其数据类型与输入相同。 + +抛出异常 +::::::::: + - ``ValueError`` - 如果 ``data_format`` 既不是"NCDHW"也不是"NDHWC"。 + +代码示例 +::::::::: + +.. code-block:: python + + # adaptive avg pool3d + # suppose input data in shape of [N, C, D, H, W], `output_size` is [l, m, n], + # output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions + # of input data into l * m * n grids averagely and performs poolings in each + # grid to get output. + # adaptive avg pool performs calculations as follow: + # + # for i in range(l): + # for j in range(m): + # for k in range(n): + # dstart = floor(i * D / l) + # dend = ceil((i + 1) * D / l) + # hstart = floor(j * H / m) + # hend = ceil((j + 1) * H / m) + # wstart = floor(k * W / n) + # wend = ceil((k + 1) * W / n) + # output[:, :, i, j, k] = + # avg(input[:, :, dstart:dend, hstart: hend, wstart: wend]) + import paddle + import numpy as np + paddle.disable_static() + input_data = np.random.rand(2, 3, 8, 32, 32) + x = paddle.to_tensor(input_data) + # x.shape is [2, 3, 8, 32, 32] + pool_out = paddle.nn.functional.adaptive_avg_pool3d( + x = x, + output_size=[3, 3, 3]) + # pool_out.shape is [2, 3, 3, 3, 3] \ No newline at end of file diff --git a/doc/paddle/api/paddle/nn/functional/pooling/adaptive_max_pool1d_cn.rst b/doc/paddle/api/paddle/nn/functional/pooling/adaptive_max_pool1d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..f88c16aa13961842246fcb6d87b5b8b7e675b929 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/pooling/adaptive_max_pool1d_cn.rst @@ -0,0 +1,55 @@ +.. _cn_api_nn_functional_adaptive_max_pool1d: + + +adaptive_max_pool1d +------------------------------- + +.. py:function:: paddle.nn.functional.adaptive_max_pool1d(x, output_size, return_indices=False, name=None) + +该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算1D的自适应最大值池化。输入和输出都是3-D Tensor, +默认是以 `NCL` 格式表示的,其中 `N` 是 batch size, `C` 是通道数, `L` 是输入特征的长度. + +.. note:: + 详细请参考对应的 `Class` 请参考: :ref:`cn_api_nn_AdaptiveMaxPool1d` 。 + + +参数 +::::::::: + - **x** (Tensor): 当前算子的输入, 其是一个形状为 `[N, C, L]` 的3-D Tensor。其中 `N` 是batch size, `C` 是通道数, `L` 是输入特征的长度。 其数据类型为float32或者float64。 + - **output_size** (int|list|tuple): 算子输出特征图的长度,其数据类型为int或list,tuple。 + - **return_indices** (bool): 如果设置为True,则会与输出一起返回最大值的索引,默认为False。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +::::::::: +``Tensor``, 输入 `x` 经过自适应池化计算得到的目标3-D Tensor,其数据类型与输入相同。 + +抛出异常 +::::::::: + - ``ValueError`` - 如果 ``output_size`` 不是int类型值。 + +代码示例 +::::::::: + +.. code-block:: python + + # max adaptive pool1d + # suppose input data in shape of [N, C, L], `output_size` is m, + # output shape is [N, C, m], adaptive pool divide L dimension + # of input data into m grids averagely and performs poolings in each + # grid to get output. + # adaptive max pool performs calculations as follow: + # + # for i in range(m): + # lstart = floor(i * L / m) + # lend = ceil((i + 1) * L / m) + # output[:, :, i] = max(input[:, :, lstart: lend]) + # + import paddle + import paddle.nn.functional as F + import numpy as np + paddle.disable_static() + + data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) + pool_out = F.adaptive_max_pool1d(data, output_size=16) + # pool_out shape: [1, 3, 16]) diff --git a/doc/paddle/api/paddle/nn/functional/pooling/avg_pool1d_cn.rst b/doc/paddle/api/paddle/nn/functional/pooling/avg_pool1d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..0db33aac3c91669b90656ec32a05e1ba875ee843 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/pooling/avg_pool1d_cn.rst @@ -0,0 +1,53 @@ +.. _cn_api_nn_functional_avg_pool1d: + + +avg_pool1d +------------------------------- + +.. py:function:: paddle.nn.functional.avg_pool1d(x, kernel_size, stride=None, padding=0, count_include_pad=True, ceil_mode=False, name=None) + +该算子根据输入 `x` , `kernel_size` 等参数对一个输入Tensor计算1D的平均池化。输入和输出都是3-D Tensor, +默认是以 `NCL` 格式表示的,其中 `N` 是 batch size, `C` 是通道数, `L` 是输入特征的长度。 + +.. note:: + 详细请参考对应的 `Class` 请参考: :ref:`cn_api_nn_AvgPool1d` 。 + + +参数 +::::::::: + - **x** (Tensor): 当前算子的输入, 其是一个形状为 `[N, C, L]` 的3-D Tensor。其中 `N` 是batch size, `C` 是通道数, `L` 是输入特征的长度。 其数据类型为float32或者float64。 + - **kernel_size** (int|list|tuple): 池化核的尺寸大小. 如果kernel_size为list或tuple类型, 其必须包含一个整数. + - **stride** (int|list|tuple): 池化操作步长. 如果stride为list或tuple类型, 其必须包含一个整数. + - **padding** (string|int|list|tuple): 池化补零的方式. 如果padding是一个字符串,则必须为 `SAME` 或者 `VALID` 。 如果是turple或者list类型, 则应是 `[pad_left, pad_right]` 形式。如果padding是一个非0值,那么表示会在输入的两端都padding上同样长度的0。 + - **count_include_pad** (bool): 是否用额外padding的值计算平均池化结果,默认为True。 + - **ceil_mode** (bool): 是否用ceil函数计算输出的height和width,如果设置为False, 则使用floor函数来计算,默认为False。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + + +返回 +::::::::: +``Tensor``, 输入 `x` 经过平均池化计算得到的目标3-D Tensor,其数据类型与输入相同。 + +抛出异常 +::::::::: + - ``ValueError`` - 如果 ``padding`` 是字符串但不是 "SAME" 和 "VALID" 。 + - ``ValueError`` - 如果 ``padding`` 是 "VALID" 但 `ceil_mode` 被设置为True。 + - ``ValueError`` - 如果 ``padding`` 是一个长度大于1的list或turple。 + - ``ShapeError`` - 如果输入x不是一个3-D Tensor。 + - ``ShapeError`` - 如果计算得到的输出形状小于等于0。 + + + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + paddle.disable_static() + + data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) + pool_out = F.avg_pool1d(data, kernel_size=2, stride=2, padding=0) + # pool_out shape: [1, 3, 16] diff --git a/doc/paddle/api/paddle/nn/functional/pooling/max_pool1d_cn.rst b/doc/paddle/api/paddle/nn/functional/pooling/max_pool1d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..3ddefaa2d5f1a0c5b010cb3e3894fd3c1154656d --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/pooling/max_pool1d_cn.rst @@ -0,0 +1,56 @@ +.. _cn_api_nn_functional_max_pool1d: + + +max_pool1d +------------------------------- + +.. py:function:: paddle.nn.functional.max_pool1d(x, kernel_size, stride=None, padding=0, return_indices=False, ceil_mode=False, name=None) + +该算子根据输入 `x` , `kernel_size` 等参数对一个输入Tensor计算1D的最大值池化。输入和输出都是3-D Tensor, +默认是以 `NCL` 格式表示的,其中 `N` 是 batch size, `C` 是通道数, `L` 是输入特征的长度。 + +.. note:: + 详细请参考对应的 `Class` 请参考: :ref:`cn_api_nn_MaxPool1d` 。 + + +参数 +::::::::: + - **x** (Tensor): 当前算子的输入, 其是一个形状为 `[N, C, L]` 的3-D Tensor。其中 `N` 是batch size, `C` 是通道数, `L` 是输入特征的长度。 其数据类型为float32或者float64。 + - **kernel_size** (int|list|tuple): 池化核的尺寸大小. 如果kernel_size为list或tuple类型, 其必须包含一个整数. + - **stride** (int|list|tuple): 池化操作步长. 如果stride为list或tuple类型, 其必须包含一个整数. + - **padding** (string|int|list|tuple): 池化补零的方式. 如果padding是一个字符串,则必须为 `SAME` 或者 `VALID` 。如果是turple或者list类型, 则应是 `[pad_left, pad_right]` 形式。如果padding是一个非0值,那么表示会在输入的两端都padding上同样长度的0。 + - **return_indices** (bool): 是否返回最大值的索引,默认为False。 + - **ceil_mode** (bool): 是否用ceil函数计算输出的height和width,如果设置为False, 则使用floor函数来计算,默认为False。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + + +返回 +::::::::: +``Tensor``, 输入 `x` 经过最大值池化计算得到的目标3-D Tensor,其数据类型与输入相同。 + + +抛出异常 +::::::::: + - ``ValueError`` - 如果 ``padding`` 是字符串但不是 "SAME" 和 "VALID" 。 + - ``ValueError`` - 如果 ``padding`` 是 "VALID" 但 `ceil_mode` 被设置为True。 + - ``ValueError`` - 如果 ``padding`` 是一个长度大于1的list或turple。 + - ``ShapeError`` - 如果输入x不是一个3-D Tensor。 + - ``ShapeError`` - 如果计算得到的输出形状小于等于0。 + + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + paddle.disable_static() + + data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) + pool_out = F.max_pool1d(data, kernel_size=2, stride=2, padding=0) + # pool_out shape: [1, 3, 16] + + pool_out, indices = F.max_pool1d(data, kernel_size=2, stride=2, padding=0, return_indices=True) + # pool_out shape: [1, 3, 16], indices shape: [1, 3, 16] diff --git a/doc/paddle/api/paddle/nn/functional/vision/affine_grid_cn.rst b/doc/paddle/api/paddle/nn/functional/vision/affine_grid_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6fb027a1d996c787f999961775aa280508617004 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/vision/affine_grid_cn.rst @@ -0,0 +1,31 @@ +.. _cn_api_fluid_layers_affine_grid: + +affine_grid +------------------------------- + +.. py:function:: paddle.fluid.layers.affine_grid(theta, out_shape, name=None) + + + + +该OP用于生成仿射变换前后的feature maps的坐标映射关系。在视觉应用中,根据该OP得到的映射关系,将输入feature map的像素点变换到对应的坐标,就得到了经过仿射变换的feature map。 + +参数: + - **theta** (Variable) - Shape为 ``[batch_size, 2, 3]`` 的Tensor,表示batch_size个 ``2X3`` 的变换矩阵。数据类型支持float32,float64。 + - **out_shape** (Variable | list | tuple) - 类型可以是1-D Tensor、list或tuple。用于表示在仿射变换中的输出的shape,其格式 ``[N, C, H, W]`` ,分别为输出feature map的batch size、channel数量、高和宽。数据类型支持int32。 + - **name** (None|str) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:api_guide_Name ,默认值为None。 + +返回: Variable。Shape为 ``[N, H, W, 2]`` 的4-D Tensor,表示仿射变换前后的坐标的映射关系。其中,N、H、W分别为仿射变换中输出feature map的batch size、高和宽。 数据类型与 ``theta`` 一致。 + +返回类型:Variable + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + theta = fluid.layers.data(name="x", shape=[2, 3], dtype="float32") + out_shape = fluid.layers.data(name="y", shape=[-1], dtype="float32") + data = fluid.layers.affine_grid(theta, out_shape) + # or + data = fluid.layers.affine_grid(theta, [5, 3, 28, 28]) diff --git a/doc/paddle/api/paddle/nn/functional/vision/pixel_shuffle_cn.rst b/doc/paddle/api/paddle/nn/functional/vision/pixel_shuffle_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3324269a4936bca2993bf9b124f2d11760df2cb9 --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/vision/pixel_shuffle_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_fluid_layers_pixel_shuffle: + +pixel_shuffle +------------------------------- + +.. py:function:: paddle.fluid.layers.pixel_shuffle(x, upscale_factor) + + + + +该OP将一个形为[N, C, H, W]的Tensor重新排列成形为 [N, C/r**2, H*r, W*r] 的Tensor。这样做有利于实现步长(stride)为1/r的高效sub-pixel(亚像素)卷积。详见Shi等人在2016年发表的论文 `Real Time Single Image and Video Super Resolution Using an Efficient Sub Pixel Convolutional Neural Network `_ 。 + +.. code-block:: text + + 给定一个形为 x.shape = [1, 9, 4, 4] 的4-D张量 + 设定:upscale_factor=3 + 那么输出张量的形为:[1, 1, 12, 12] + +参数: + - **x** (Variable)- 维度为 :math:`[N_1, N_2, ..., N_k, D]` 的多维Tensor,其中最后一维D是类别数目。数据类型为float32或float64。 + - **upscale_factor** (int)- 增大空间分辨率的增大因子 + + +返回:根据新的维度信息进行重组的张量 + +返回类型: Variable + +抛出异常: ``ValueError`` - 如果upscale_factor的平方不能整除输入的通道维度(C)的大小。 + + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + input = fluid.layers.data(name="input", shape=[9,4,4]) + output = fluid.layers.pixel_shuffle(x=input, upscale_factor=3) + + + + + diff --git a/doc/paddle/api/paddle/nn/layer/activation/ELU_cn.rst b/doc/paddle/api/paddle/nn/layer/activation/ELU_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f2cb3cfb242282ccca4ffa6b7355adb2a9e9e1ca --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/activation/ELU_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_nn_ELU: + +ELU +------------------------------- +.. py:class:: paddle.nn.ELU(alpha=1.0, name=None) + +ELU激活层(ELU Activation Operator) + +根据 `Exponential Linear Units `_ 对输入Tensor中每个元素应用以下计算。 + +.. math:: + + ELU(x) = max(0, x) + min(0, \alpha * (e^{x} − 1)) + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - alpha (float, 可选) - ELU的alpha值,默认值为1.0。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([[-1, 6],[1, 15.6]])) + m = paddle.nn.ELU(0.2) + out = m(x) + # [[-0.12642411 6. ] + # [ 1. 15.6 ]] diff --git a/doc/paddle/api/paddle/nn/layer/activation/GELU_cn.rst b/doc/paddle/api/paddle/nn/layer/activation/GELU_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3493bf7f6ea266f1c392908fc933c25a664af549 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/activation/GELU_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_nn_GELU: + +GELU +------------------------------- +.. py:class:: paddle.nn.GELU(approximate=False, name=None) + +GELU激活层(GELU Activation Operator) + +逐元素计算 GELU激活函数。更多细节请参考 `Gaussian Error Linear Units `_ 。 + +如果使用近似计算: + +.. math:: + GELU(x) = 0.5 * x * (1 + tanh(\sqrt{\frac{2}{\pi}} * (x + 0.044715x^{3}))) + +如果不使用近似计算: + +.. math:: + GELU(x) = 0.5 * x * (1 + erf(\frac{x}{\sqrt{2}})) + + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - approximate (bool, 可选) - 是否使用近似计算,默认值为 False,即不使用近似计算。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([[-1, 0.5],[1, 1.5]])) + + m = paddle.nn.GELU() + out = m(x) # [-0.158655 0.345731 0.841345 1.39979] + + m = paddle.nn.GELU(True) + out = m(x) # [-0.158808 0.345714 0.841192 1.39957] diff --git a/doc/paddle/api/paddle/nn/layer/activation/Hardshrink_cn.rst b/doc/paddle/api/paddle/nn/layer/activation/Hardshrink_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..124b6f3ea93642ecab7a935e5eb16adc05ba9a13 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/activation/Hardshrink_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_nn_Hardshrink: + +Hardshrink +------------------------------- +.. py:class:: paddle.nn.Hardshrink(threshold=0.5, name=None) + +Hardshrink激活层 + +.. math:: + + Hardshrink(x)= + \left\{ + \begin{aligned} + &x, & & if \ x > threshold \\ + &x, & & if \ x < -threshold \\ + &0, & & if \ others + \end{aligned} + \right. + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - threshold (float, 可选) - Hardshrink激活计算公式中的threshold值。默认值为0.5。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + paddle.disable_static() + x = paddle.to_tensor([-1, 0.3, 2.5]) + m = paddle.nn.Hardshrink() + out = m(x) # [-1., 0., 2.5] diff --git a/doc/paddle/api/paddle/nn/layer/activation/Hardtanh_cn.rst b/doc/paddle/api/paddle/nn/layer/activation/Hardtanh_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d3955f41d3d8a9177ec1d0cb42a8103fa39a83ea --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/activation/Hardtanh_cn.rst @@ -0,0 +1,45 @@ +.. _cn_api_nn_Hardtanh: + +Hardtanh +------------------------------- +.. py:class:: paddle.nn.Hardtanh(min=-1.0, max=1.0, name=None) + +Hardtanh激活层(Hardtanh Activation Operator)。计算公式如下: + +.. math:: + + Hardtanh(x)= + \left\{ + \begin{aligned} + &max, & & if \ x > max \\ + &min, & & if \ x < min \\ + &x, & & if \ others + \end{aligned} + \right. + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - min (float, 可选) - Hardtanh激活计算公式中的min值。默认值为-1。 + - max (float, 可选) - Hardtanh激活计算公式中的max值。默认值为1。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-1.5, 0.3, 2.5])) + m = paddle.nn.Hardtanh() + out = m(x) # # [-1., 0.3, 1.] diff --git a/doc/paddle/api/paddle/nn/layer/activation/LeakyReLU_cn.rst b/doc/paddle/api/paddle/nn/layer/activation/LeakyReLU_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..62d710903d488957b9360f88a448c4c267212023 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/activation/LeakyReLU_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_nn_LeakyReLU: + +LeakyReLU +------------------------------- +.. py:class:: paddle.nn.LeakyReLU(negative_slope=0.01, name=None) + +LeakyReLU 激活层 + +.. math:: + + LeakyReLU(x)= + \left\{ + \begin{aligned} + &x, & & if \ x >= 0 \\ + &negative\_slope * x, & & otherwise \\ + \end{aligned} + \right. \\ + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - negative_slope (float,可选) - :math:`x < 0` 时的斜率。默认值为0.01。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + m = paddle.nn.LeakyReLU() + x = paddle.to_tensor(np.array([-2, 0, 1], 'float32')) + out = m(x) # [-0.02, 0., 1.] diff --git a/doc/paddle/api/paddle/nn/layer/activation/LogSigmoid_cn.rst b/doc/paddle/api/paddle/nn/layer/activation/LogSigmoid_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..754f59fce5b6ab07dd2c0c699d7f86bff9bd93bb --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/activation/LogSigmoid_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_nn_LogSigmoid: + +LogSigmoid +------------------------------- +.. py:class:: paddle.nn.LogSigmoid(name=None) + +LogSigmoid激活层。计算公式如下: + +.. math:: + + LogSigmoid(x) = \log \frac{1}{1 + e^{-x}} + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([1.0, 2.0, 3.0, 4.0])) + m = paddle.nn.LogSigmoid() + out = m(x) # [-0.313262 -0.126928 -0.0485874 -0.0181499] diff --git a/doc/paddle/api/paddle/nn/layer/activation/LogSoftmax_cn.rst b/doc/paddle/api/paddle/nn/layer/activation/LogSoftmax_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..96bbc3a886f9535bf02cb1954645d5182e669120 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/activation/LogSoftmax_cn.rst @@ -0,0 +1,47 @@ +.. _cn_api_nn_LogSoftmax: + +LogSoftmax +------------------------------- +.. py:class:: paddle.nn.LogSoftmax(axis=-1, name=None) + +LogSoftmax激活层,计算公式如下: + +.. math:: + + Out[i, j] = log(softmax(x)) + = log(\\frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}) + +参数 +:::::::::: + - axis (int, 可选) - 指定对输入Tensor进行运算的轴。``axis`` 的有效范围是[-D, D),D是输入Tensor的维度, ``axis`` 为负值时与 :math:`axis + D` 等价。默认值为-1。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = np.array([[[-2.0, 3.0, -4.0, 5.0], + [3.0, -4.0, 5.0, -6.0], + [-7.0, -8.0, 8.0, 9.0]], + [[1.0, -2.0, -3.0, 4.0], + [-5.0, 6.0, 7.0, -8.0], + [6.0, 7.0, 8.0, 9.0]]], 'float32') + m = paddle.nn.LogSoftmax() + x = paddle.to_tensor(x) + out = m(x) + # [[[ -7.1278396 -2.1278396 -9.127839 -0.12783948] + # [ -2.1270514 -9.127051 -0.12705144 -11.127051 ] + # [-16.313261 -17.313261 -1.3132617 -0.31326184]] + # [[ -3.0518122 -6.051812 -7.051812 -0.051812 ] + # [-12.313267 -1.3132664 -0.3132665 -15.313267 ] + # [ -3.4401896 -2.4401896 -1.4401896 -0.44018966]]] diff --git a/doc/paddle/api/paddle/nn/layer/activation/PReLU_cn.rst b/doc/paddle/api/paddle/nn/layer/activation/PReLU_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d36cee798c13583d8abbe05aacc8d3e44c791bbc --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/activation/PReLU_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_nn_PReLU: + +PReLU +------------------------------- +.. py:class:: paddle.nn.PReLU(num_parameters=1, init=0.25, weight_attr=None, name=None) + +PReLU激活层(PReLU Activation Operator)。计算公式如下: + +如果使用近似计算: + +.. math:: + + PReLU(x) = max(0, x) + weight * min(0, x) + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - num_parameters (int, 可选) - 可训练`weight`数量,支持2种输入:1 - 输入中的所有元素使用同一个`weight`值; 输入的通道数 - 在同一个通道中的元素使用同一个`weight`值。默认为1。 + - init (float, 可选) - `weight`的初始值。默认为0.25。 + - weight_attr (ParamAttr, 可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor,默认数据类型为float32。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + paddle.set_default_dtype("float64") + + data = np.array([[[[-2.0, 3.0, -4.0, 5.0], + [ 3.0, -4.0, 5.0, -6.0], + [-7.0, -8.0, 8.0, 9.0]], + [[ 1.0, -2.0, -3.0, 4.0], + [-5.0, 6.0, 7.0, -8.0], + [ 6.0, 7.0, 8.0, 9.0]]]], 'float64') + x = paddle.to_tensor(data) + m = paddle.nn.PReLU(1, 0.25) + out = m(x) + # [[[[-0.5 , 3. , -1. , 5. ], + # [ 3. , -1. , 5. , -1.5 ], + # [-1.75, -2. , 8. , 9. ]], + # [[ 1. , -0.5 , -0.75, 4. ], + # [-1.25, 6. , 7. , -2. ], + # [ 6. , 7. , 8. , 9. ]]]] \ No newline at end of file diff --git a/doc/paddle/api/paddle/nn/layer/activation/ReLU6_cn.rst b/doc/paddle/api/paddle/nn/layer/activation/ReLU6_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0ba5f2af0ec631c33f97e3091044309d20906868 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/activation/ReLU6_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_nn_ReLU6: + +ReLU6 +------------------------------- +.. py:class:: paddle.nn.ReLU6(name=None) + +ReLU6激活层 + +.. math:: + + ReLU6(x) = min(max(0,x), 6) + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-1, 0.3, 6.5])) + m = paddle.nn.ReLU6() + out = m(x) # [0, 0.3, 6] diff --git a/doc/paddle/api/paddle/nn/layer/activation/ReLU_cn.rst b/doc/paddle/api/paddle/nn/layer/activation/ReLU_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ac39225bf61db1653fc7d62a7339a824f992aa0e --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/activation/ReLU_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_nn_ReLU: + +ReLU +------------------------------- +.. py:class:: paddle.nn.ReLU(name=None) + +ReLU激活层(Rectified Linear Unit)。计算公式如下: + +.. math:: + + ReLU(x) = max(0, x) + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-2, 0, 1]).astype('float32')) + m = paddle.nn.ReLU() + out = m(x) # [0., 0., 1.] diff --git a/doc/paddle/api/paddle/nn/layer/activation/SELU_cn.rst b/doc/paddle/api/paddle/nn/layer/activation/SELU_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2bd2ce11b2bf60c8e1aafd5f1fe2d44c51ee1f30 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/activation/SELU_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_nn_SELU: + +SELU +------------------------------- +.. py:class:: paddle.nn.SELU(scale=1.0507009873554804934193349852946, alpha=1.6732632423543772848170429916717, name=None) + +SELU激活层 + +.. math:: + + SELU(x)= scale * + \begin{cases} + x, \text{if } x > 0 \\ + alpha * e^{x} - alpha, \text{if } x <= 0 + \end{cases} + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - scale (float, 可选) - SELU激活计算公式中的scale值,必须大于1.0。默认值为1.0507009873554804934193349852946。 + - alpha (float, 可选) - SELU激活计算公式中的alpha值,必须大于等于零。默认值为1.6732632423543772848170429916717。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([[0.0, 1.0],[2.0, 3.0]])) + m = paddle.nn.SELU() + out = m(x) # [[0, 1.050701],[2.101402, 3.152103]] diff --git a/doc/paddle/api/paddle/nn/layer/activation/Sigmoid_cn.rst b/doc/paddle/api/paddle/nn/layer/activation/Sigmoid_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..10698265ff6a4f4a149ec37f0bb198d51e2d402a --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/activation/Sigmoid_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_nn_layer_Sigmoid: + +Sigmoid +------------------------------- + +.. py:class:: paddle.nn.layer.Sigmoid(name=None) + +该接口用于创建一个 ``Sigmoid`` 的可调用类。 这个类可以计算输入 `x` 经过激活函数 `sigmoid` 之后的值。 + + .. math:: + + output = \frac{1}{1 + e^{-x}} + +参数 +:::::::: + - **name** (str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +形状 +:::::::: + - **x** (Tensor)- N-D tensor, 可以支持的数据类型是float16,float32,float64。 + +返回 +:::::::: + 返回计算 ``Sigmoid`` 的可调用对象。 + + +代码示例 +:::::::: + +.. code-block:: python + + import paddle + paddle.disable_static() + m = paddle.nn.Sigmoid() + x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0]) + output = m(x) + print(output.numpy()) # [0.7310586, 0.880797, 0.95257413, 0.98201376 diff --git a/doc/paddle/api/paddle/nn/layer/activation/Softmax_cn.rst b/doc/paddle/api/paddle/nn/layer/activation/Softmax_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ba6f60076ea114a210ac1d0a4b919a0b99a02a0f --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/activation/Softmax_cn.rst @@ -0,0 +1,117 @@ +.. _cn_api_nn_Softmax: + +Softmax +------------------------------- +.. py:class:: paddle.nn.Softmax(axis=-1, name=None) + +Softmax激活层,OP的计算过程如下: + +步骤1:输入 ``x`` 的 ``axis`` 维会被置换到最后一维; + +步骤2:将输入 ``x`` 在逻辑上变换为二维矩阵。二维矩阵第一维(列长度)是输入除最后一维之外的其他维度值的乘积,第二维(行长度)和输入 ``axis`` 维的长度相同;对于矩阵的每一行,softmax操作对其进行重新缩放,使得该行的每个元素在 \[0,1\] 范围内,并且总和为1; + +步骤3:softmax操作执行完成后,执行步骤1和步骤2的逆运算,将二维矩阵恢复至和输入 ``x`` 相同的维度。 + +上述步骤2中softmax操作计算过程如下: + + - 对于二维矩阵的每一行,计算K维向量(K是输入第 ``axis`` 维的长度)中指定位置的指数值和全部位置指数值的和。 + + - 指定位置指数值与全部位置指数值之和的比值就是softmax操作的输出。 + +对于二维矩阵中的第i行和第j列有: + +.. math:: + + Softmax[i, j] = \frac{\exp(x[i, j])}{\sum_j(exp(x[i, j])} + +- 示例1(矩阵一共有三维。axis = -1,表示沿着最后一维(即第三维)做softmax操作) + +.. code-block:: text + + # input + + x.shape = [2, 3, 4] + + x.data = [[[2.0, 3.0, 4.0, 5.0], + [3.0, 4.0, 5.0, 6.0], + [7.0, 8.0, 8.0, 9.0]], + [[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [6.0, 7.0, 8.0, 9.0]]] + + axis = -1 + + # output + + out.shape = [2, 3, 4] + + out.data = [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.07232949, 0.19661193, 0.19661193, 0.53444665]], + [[0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.0320586 , 0.08714432, 0.23688282, 0.64391426], + [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]] + +- 示例2(矩阵一共有三维。axis = 1,表示沿着第二维做softmax操作) + +.. code-block:: text + + # input + + x.shape = [2, 3, 4] + + x.data = [[[2.0, 3.0, 4.0, 5.0], + [3.0, 4.0, 5.0, 6.0], + [7.0, 8.0, 8.0, 9.0]], + [[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [6.0, 7.0, 8.0, 9.0]]] + + axis = 1 + + # output + + out.shape = [2, 3, 4] + + out.data = [[[0.00657326, 0.00657326, 0.01714783, 0.01714783], + [0.01786798, 0.01786798, 0.04661262, 0.04661262], + [0.97555875, 0.97555875, 0.93623955, 0.93623955]], + [[0.00490169, 0.00490169, 0.00490169, 0.00490169], + [0.26762315, 0.26762315, 0.26762315, 0.26762315], + [0.72747516, 0.72747516, 0.72747516, 0.72747516]]] + +参数 +:::::::::: + - axis (int, 可选) - 指定对输入Tensor进行运算的轴。``axis`` 的有效范围是[-D, D),D是输入Tensor的维度, ``axis`` 为负值时与 :math:`axis + D` 等价。默认值为-1。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = np.array([[[-2.0, 3.0, -4.0, 5.0], + [3.0, -4.0, 5.0, -6.0], + [-7.0, -8.0, 8.0, 9.0]], + [[1.0, -2.0, -3.0, 4.0], + [-5.0, 6.0, 7.0, -8.0], + [6.0, 7.0, 8.0, 9.0]]], 'float32') + x = paddle.to_tensor(x) + m = paddle.nn.Softmax() + out = m(x) + # [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426], + # [0.0320586 , 0.08714432, 0.23688282, 0.64391426], + # [0.07232949, 0.19661193, 0.19661193, 0.53444665]], + # [[0.0320586 , 0.08714432, 0.23688282, 0.64391426], + # [0.0320586 , 0.08714432, 0.23688282, 0.64391426], + # [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]] diff --git a/doc/paddle/api/paddle/nn/layer/activation/Softplus_cn.rst b/doc/paddle/api/paddle/nn/layer/activation/Softplus_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cce23fcb7046fbdc66eb61a099517077a6378750 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/activation/Softplus_cn.rst @@ -0,0 +1,39 @@ +.. _cn_api_nn_Softplus: + +Softplus +------------------------------- +.. py:class:: paddle.nn.Softplus(beta=1, threshold=20, name=None) + +Softplus激活层 + +.. math:: + + Softplus(x) = \frac{1}{beta} * \log(1 + e^{beta * x}) \\ + \text{为了保证数值稳定性, 当}\,beta * x > threshold\,\text{时,函数转变为线性函数x}. + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - beta (float, 可选) - Softplus激活计算公式中的beta值。默认值为1。 + - threshold (float, 可选) - Softplus激活计算公式中的threshold值。默认值为20。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) + m = paddle.nn.Softplus() + out = m(x) # [0.513015, 0.598139, 0.744397, 0.854355] diff --git a/doc/paddle/api/paddle/nn/layer/activation/Softshrink_cn.rst b/doc/paddle/api/paddle/nn/layer/activation/Softshrink_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d04cf74e560a9176824a324d7ecd340e227b59dc --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/activation/Softshrink_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_nn_Softshrink: + +Softshrink +------------------------------- +.. py:class:: paddle.nn.Softshrink(threshold=0.5, name=None) + +Softshrink激活层 + +.. math:: + + Softshrink(x)= \begin{cases} + x - threshold, \text{if } x > threshold \\ + x + threshold, \text{if } x < -threshold \\ + 0, \text{otherwise} + \end{cases} + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - threshold (float, 可选) - Softshrink激活计算公式中的threshold值,必须大于等于零。默认值为0.5。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8])) + m = paddle.nn.Softshrink() + out = m(x) # [-0.4, 0, 0, 0.3] diff --git a/doc/paddle/api/paddle/nn/layer/activation/Softsign_cn.rst b/doc/paddle/api/paddle/nn/layer/activation/Softsign_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8029d97d80dc9bcf506bb127fc2499f7564029de --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/activation/Softsign_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_nn_Softsign: + +Softsign +------------------------------- +.. py:class:: paddle.nn.Softsign(name=None) + +Softsign激活层 + +.. math:: + + Softsign(x) = \frac{x}{1 + |x|} + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) + m = paddle.nn.Softsign() + out = m(x) # [-0.285714, -0.166667, 0.0909091, 0.230769] diff --git a/doc/paddle/api/paddle/nn/layer/activation/Tanh_cn.rst b/doc/paddle/api/paddle/nn/layer/activation/Tanh_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4f97cdb57f053dd24bca94e945afae7ffa6fd04a --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/activation/Tanh_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_nn_Tanh: + +Tanh +------------------------------- +.. py:class:: paddle.nn.Tanh(name=None) + +Tanh激活层 + +.. math:: + Tanh(x) = \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}} + + +参数 +:::::::::: + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) + m = paddle.nn.Tanh() + out = m(x) + print(out.numpy()) + # [-0.37994896 -0.19737532 0.09966799 0.29131261] \ No newline at end of file diff --git a/doc/paddle/api/paddle/nn/layer/activation/Tanhshrink_cn.rst b/doc/paddle/api/paddle/nn/layer/activation/Tanhshrink_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6c657fd800a7d4a475bb0277df42bf9dfea7eeb2 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/activation/Tanhshrink_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_nn_Tanhshrink: + +Tanhshrink +------------------------------- +.. py:class:: paddle.nn.Tanhshrink(name=None) + +Tanhshrink激活层 + +.. math:: + + Tanhshrink(x) = x - tanh(x) + +其中,:math:`x` 为输入的 Tensor + +参数 +:::::::::: + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状: +:::::::::: + - input: 任意形状的Tensor。 + - output: 和input具有相同形状的Tensor。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3])) + m = paddle.nn.Tanhshrink() + out = m(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739] diff --git a/doc/paddle/api/paddle/nn/layer/common/AlphaDropout_cn.rst b/doc/paddle/api/paddle/nn/layer/common/AlphaDropout_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8dfdac7935a55010b9891b9835b8b2446629e75c --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/common/AlphaDropout_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_nn_AlphaDropout: + +AlphaDropout +------------------------------- + +.. py:function:: paddle.nn.AlphaDropout(p=0.5, name=None) + +AlphaDropout是一种具有自归一化性质的dropout。均值为0,方差为1的输入,经过AlphaDropout计算之后,输出的均值和方差与输入保持一致。AlphaDropout通常与SELU激活函数组合使用。论文请参考: `Self-Normalizing Neural Networks `_ + +在动态图模式下,请使用模型的 `eval()` 方法切换至测试阶段。 + +.. note:: + 对应的 `functional方法` 请参考: :ref:`cn_api_nn_functional_alpha_dropout` 。 + +参数 +::::::::: + - **p** (float): 将输入节点置0的概率,即丢弃概率。默认: 0.5。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: +经过AlphaDropout之后的结果,与输入x形状相同的 `Tensor` 。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + x = np.array([[-1, 1], [-1, 1]]).astype('float32') + x = paddle.to_tensor(x) + m = paddle.nn.AlphaDropout(p=0.5) + y_train = m(x) + m.eval() # switch the model to test phase + y_test = m(x) + print(x.numpy()) + print(y_train.numpy()) + # [[-0.10721093, 1.6655989 ], [-0.7791938, -0.7791938]] (randomly) + print(y_test.numpy()) diff --git a/doc/paddle/api/paddle/nn/layer/common/Bilinear_cn.rst b/doc/paddle/api/paddle/nn/layer/common/Bilinear_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..55ccb27c78fdfea567ed8195aa6fd0828ee64cc6 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/common/Bilinear_cn.rst @@ -0,0 +1,59 @@ +.. _cn_api_nn_Bilinear: + +Bilinear +------------------------------- + +.. py:function:: paddle.nn.Bilinear(in1_features, in2_features, out_features, weight_attr=None, bias_attr=None, name=None) + +该层对两个输入执行双线性张量积。 + +例如: + +.. math:: + + out_{i} = x1 * W_{i} * {x2^\mathrm{T}}, i=0,1,...,size-1 + + out = out + b + +在这个公式中: + - :math:`x1`: 第一个输入,包含 :in1_features个元素,形状为 [batch_size, in1_features]。 + - :math:`x2`: 第二个输入,包含 :in2_features个元素,形状为 [batch_size, in2_features]。 + - :math:`W_{i}`: 第 :i个被学习的权重,形状是 [in1_features, in2_features]。 + - :math:`out_{i}`: 输出的第 :i个元素,形状是 [batch_size, out_features]。 + - :math:`b`: 被学习的偏置参数,形状是 [1, out_features]。 + - :math:`x2^\mathrm{T}`: :math:`x2` 的转置。 + +参数 +::::::::: + - **in1_features** (int): 每个 **x1** 元素的维度。 + - **in2_features** (int): 每个 **x2** 元素的维度。 + - **out_features** (int): 输出张量的维度。 + - **weight_attr** (ParamAttr,可选) :指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。 + - **bias_attr** (ParamAttr,可选) : 指定偏置参数属性的对象。默认值为 None,表示使用默认的偏置参数属性,此时bias的元素会被初始化成0。如果设置成False,则不会有bias加到output结果上。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为 None。 + +属性 +::::::::: + - **weight** 本层的可学习参数,类型为 Parameter + - **bias** 本层的可学习偏置,类型为 Parameter + +返回 +::::::::: +``Tensor``,一个形为 [batch_size, out_features] 的 2-D 张量。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy + + paddle.disable_static() + layer1 = numpy.random.random((5, 5)).astype('float32') + layer2 = numpy.random.random((5, 4)).astype('float32') + bilinear = paddle.nn.Bilinear( + in1_features=5, in2_features=4, out_features=1000) + result = bilinear(paddle.to_tensor(layer1), + paddle.to_tensor(layer2)) # result shape [5, 1000] + diff --git a/doc/paddle/api/paddle/nn/layer/common/ConstantPad1d_cn.rst b/doc/paddle/api/paddle/nn/layer/common/ConstantPad1d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d7025d80e873242b892d0e1deda4bcac895b2e37 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/common/ConstantPad1d_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_nn_ConstantPad1d: + +ConstantPad1d +------------------------------- +.. py:class:: paddle.nn.ConstantPad1d(padding, value=0.0, data_format="NCL", name=None) + +**ConstantPad1d** + +按照 padding 对输入 以constant模式进行 ``pad``,即填充固定值。 + +参数: + - **padding** (Tensor | List[int32]) - 填充大小。pad的格式为[pad_left, pad_right]。 + - **value** (float32) - 待填充的值,默认值为0.0。 + - **data_format** (str) - 指定input的format,可为 `'NCL'` 或者 `'NLC'`,默认值为`'NCL'`。 + - **name** (str, 可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,缺省值为None。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 2, 3) + pad = [1, 2] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ConstantPad1d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[0. 1. 2. 3. 0. 0.] + # [0. 4. 5. 6. 0. 0.]]] diff --git a/doc/paddle/api/paddle/nn/layer/common/ConstantPad2d_cn.rst b/doc/paddle/api/paddle/nn/layer/common/ConstantPad2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3e8f3e14a80fa3b1e8bd78656fb66b2eb38e9f6d --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/common/ConstantPad2d_cn.rst @@ -0,0 +1,39 @@ +.. _cn_api_nn_ConstantPad2d: + +ConstantPad2d +------------------------------- +.. py:class:: paddle.nn.ConstantPad2d(padding, value=0.0, data_format="NCHW", name=None) + +**ConstantPad2d** + +按照 padding 对输入 以constant模式进行 ``pad``,即填充固定值。 + +参数: + - **padding** (Tensor | List[int32]) - 填充大小。pad的格式为[pad_left, pad_right, pad_top, pad_bottom]。 + - **value** (float32) - 待填充的值,默认值为0.0。 + - **data_format** (str) - 指定input的format,可为 `'NCHW'` 或者 `'NHWC'`,默认值为`'NCHW'`。 + - **name** (str, 可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,缺省值为None。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 1, 2, 3) + pad = [1, 0, 1, 2] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ConstantPad2d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[[0. 0. 0. 0.] + # [0. 1. 2. 3.] + # [0. 4. 5. 6.] + # [0. 0. 0. 0.] + # [0. 0. 0. 0.]]]] diff --git a/doc/paddle/api/paddle/nn/layer/common/ConstantPad3d_cn.rst b/doc/paddle/api/paddle/nn/layer/common/ConstantPad3d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..35ad8aa795d3d0704f7dbac79ba06b6cd5223b7c --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/common/ConstantPad3d_cn.rst @@ -0,0 +1,39 @@ +.. _cn_api_nn_ConstantPad3d: + +ConstantPad3d +------------------------------- +.. py:class:: paddle.nn.ConstantPad3d(padding, value=0.0, data_format="NCDHW", name=None) + +**ConstantPad3d** + +按照 padding 对输入 以constant模式进行 ``pad``,即填充固定值。 + +参数: + - **padding** (Tensor | List[int32]) - 填充大小。pad的格式为[pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back]。 + - **value** (float32) - 待填充的值,默认值为0.0。 + - **data_format** (str) - 指定input的format,可为 `'NCDHW'` 或者 `'NDHWC'`,默认值为`'NCDHW'`。 + - **name** (str, 可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,缺省值为None。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 1, 1, 2, 3) + pad = [1, 0, 1, 2, 0, 0] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ConstantPad3d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[[[0. 0. 0. 0.] + # [0. 1. 2. 3.] + # [0. 4. 5. 6.] + # [0. 0. 0. 0.] + # [0. 0. 0. 0.]]]]] diff --git a/doc/paddle/api/paddle/nn/layer/common/CosineSimilarity_cn.rst b/doc/paddle/api/paddle/nn/layer/common/CosineSimilarity_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a11bb606ad14afd7c91871c10b618e5255209bde --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/common/CosineSimilarity_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_nn_CosineSimilarity: + +CosineSimilarity +------------------------------- +.. py:class:: paddle.nn.CosineSimilarity(axis=1, eps=1e-8) + +**CosineSimilarity** + +计算x1与x2沿axis维度的余弦相似度。 + +参数: + - **axis** (int) - 指定计算的维度,会在该维度上计算余弦相似度,默认值为1。 + - **eps** (float) - 很小的值,防止计算时分母为0,默认值为1e-8。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + np.random.seed(0) + x1 = np.random.rand(2,3) + x2 = np.random.rand(2,3) + x1 = paddle.to_tensor(x1) + x2 = paddle.to_tensor(x2) + + cos_sim_func = nn.CosineSimilarity(axis=0) + result = cos_sim_func(x1, x2) + print(result.numpy()) + # [0.99806249 0.9817672 0.94987036] diff --git a/doc/paddle/api/paddle/nn/layer/common/Dropout_cn.rst b/doc/paddle/api/paddle/nn/layer/common/Dropout_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..345de11e7060b6d43b369d6f078cd7c038eb6a46 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/common/Dropout_cn.rst @@ -0,0 +1,55 @@ +.. _cn_api_nn_Dropout: + +Dropout +------------------------------- + +.. py:function:: paddle.nn.Dropout(p=0.5, axis=None, mode="upscale_in_train”, name=None) + +Dropout是一种正则化手段,该算子根据给定的丢弃概率 `p` ,在训练过程中随机将一些神经元输出设置为0,通过阻止神经元节点间的相关性来减少过拟合。论文请参考: `Improving neural networks by preventing co-adaptation of feature detectors `_ + +在动态图模式下,请使用模型的 `eval()` 方法切换至测试阶段。 + +.. note:: + 对应的 `functional方法` 请参考: :ref:`cn_api_nn_functional_dropout` 。 + +参数 +::::::::: + - **p** (float): 将输入节点置为0的概率, 即丢弃概率。默认: 0.5。 + - **axis** (int|list): 指定对输入 `Tensor` 进行Dropout操作的轴。默认: None。 + - **mode** (str): 丢弃单元的方式,有两种'upscale_in_train'和'downscale_in_infer',默认: 'upscale_in_train'。计算方法如下: + + 1. upscale_in_train, 在训练时增大输出结果。 + + - train: out = input * mask / ( 1.0 - p ) + - inference: out = input + + 2. downscale_in_infer, 在预测时减小输出结果 + + - train: out = input * mask + - inference: out = input * (1.0 - p) + + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +形状 +::::::::: + - **输入** : N-D `Tensor` 。 + - **输出** : N-D `Tensor` ,形状与输入相同。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + x = np.array([[1,2,3], [4,5,6]]).astype('float32') + x = paddle.to_tensor(x) + m = paddle.nn.Dropout(p=0.5) + y_train = m(x) + m.eval() # switch the model to test phase + y_test = m(x) + print(x.numpy()) + print(y_train.numpy()) + print(y_test.numpy()) diff --git a/doc/paddle/api/paddle/nn/layer/common/Embedding_cn.rst b/doc/paddle/api/paddle/nn/layer/common/Embedding_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..35e3ce8e593e37de1d6d2cd8714ab2c749c71ed0 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/common/Embedding_cn.rst @@ -0,0 +1,102 @@ +.. _cn_api_fluid_dygraph_Embedding: + +Embedding +------------------------------- + +.. py:class:: paddle.fluid.dygraph.Embedding(size, is_sparse=False, is_distributed=False, padding_idx=None, param_attr=None, dtype='float32') + + + + +嵌入层(Embedding Layer) + +该接口用于构建 ``Embedding`` 的一个可调用对象,具体用法参照 ``代码示例`` 。其根据input中的id信息从embedding矩阵中查询对应embedding信息,并会根据输入的size (vocab_size, emb_size)和dtype自动构造一个二维embedding矩阵。 + +输出的Tensor的shape是在输入Tensor shape的最后一维后面添加了emb_size的维度。 + +注:input中的id必须满足 ``0 =< id < size[0]``,否则程序会抛异常退出。 + + +:: + + Case 1: + + input是Tensor, 且padding_idx = -1 + input.data = [[1, 3], [2, 4], [4, 127]] + input.shape = [3, 2] + 若size = [128, 16] + 输出为Tensor: + out.shape = [3, 2, 16] + out.data = [[[0.129435295, 0.244512452, ..., 0.436322452], + [0.345421456, 0.524563927, ..., 0.144534654]], + + [[0.345249859, 0.124939536, ..., 0.194353745], + [0.945345345, 0.435394634, ..., 0.435345365]], + + [[0.945345345, 0.435394634, ..., 0.435345365], + [0.0, 0.0, ..., 0.0 ]]] # padding data + 输入的padding_idx小于0,则自动转换为padding_idx = -1 + 128 = 127, 对于输入id为127的词,进行padding处理。 + + Case 2: + + input是lod level 为1的LoDTensor, 且padding_idx = 0 + input.lod = [[2, 3]] + input.data = [[1], [3], [2], [4], [0]] + input.shape = [5, 1] + 若size = [128, 16] + 输出为LoDTensor: + out.lod = [[2, 3]] + out.shape = [5, 1, 16] + out.data = [[[0.129435295, 0.244512452, ..., 0.436322452]], + [[0.345421456, 0.524563927, ..., 0.144534654]], + [[0.345249859, 0.124939536, ..., 0.194353745]], + [[0.945345345, 0.435394634, ..., 0.435345365]], + [[0.0, 0.0, ..., 0.0 ]]] # padding data + 输入的padding_idx = 0,则对于输入id为0的词,进行padding处理。 + +参数: + - **size** (tuple|list) - embedding矩阵的维度。必须包含两个元素,第一个元素为vocab_size(词表大小), 第二个为emb_size(embedding层维度)。 + - **is_sparse** (bool) - 是否使用稀疏的更新方式,这个参数只会影响反向的梯度更新的性能,sparse更新速度更快,推荐使用稀疏更新的方式。但某些optimizer不支持sparse更新,比如 :ref:`cn_api_fluid_optimizer_AdadeltaOptimizer` 、 :ref:`cn_api_fluid_optimizer_AdamaxOptimizer` 、 :ref:`cn_api_fluid_optimizer_DecayedAdagradOptimizer` 、 :ref:`cn_api_fluid_optimizer_FtrlOptimizer` 、 :ref:`cn_api_fluid_optimizer_LambOptimizer` 、:ref:`cn_api_fluid_optimizer_LarsMomentumOptimizer` ,此时is_sparse必须为False。默认为False。 + - **is_distributed** (bool) - 是否使用分布式的方式存储embedding矩阵,仅在多机分布式cpu训练中使用。默认为False。 + - **padding_idx** (int|long|None) - padding_idx需在区间[-vocab_size, vocab_size),否则不生效,padding_idx<0时,padding_idx会被改成vocab_size + padding_idx,input中等于padding_index的id对应的embedding信息会被设置为0,且这部分填充数据在训练时将不会被更新。如果为None,不作处理,默认为None。 + - **param_attr** (ParamAttr) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。此外,可以通过 ``param_attr`` 参数加载用户自定义或预训练的词向量。只需将本地词向量转为numpy数据格式,且保证本地词向量的shape和embedding的 ``size`` 参数一致,然后使用 :ref:`cn_api_fluid_initializer_NumpyArrayInitializer` 进行初始化,即可实现加载自定义或预训练的词向量。详细使用方法见代码示例2。 + - **dtype** (str|core.VarDesc.VarType) - 输出Tensor或LoDTensor的数据类型,数据类型必须为:float32或float64,默认为float32。 + +返回:input映射后得到的Embedding Tensor或LoDTensor,数据类型和dtype定义的类型一致。 + +返回类型:Variable + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.dygraph.base as base + import numpy as np + + # 示例 1 + inp_word = np.array([[2, 3, 5], [4, 2, 1]]).astype('int64') + inp_word.shape # [2, 3] + dict_size = 20 + with fluid.dygraph.guard(): + emb = fluid.dygraph.Embedding( + size=[dict_size, 32], + param_attr='emb.w', + is_sparse=False) + static_rlt3 = emb(base.to_variable(inp_word)) + static_rlt3.shape # [2, 3, 32] + + # 示例 2: 加载用户自定义或预训练的词向量 + weight_data = np.random.random(size=(128, 100)) # numpy格式的词向量数据 + w_param_attrs = fluid.ParamAttr( + name="emb_weight", + learning_rate=0.5, + initializer=fluid.initializer.NumpyArrayInitializer(weight_data), + trainable=True) + with fluid.dygraph.guard(): + emb = fluid.dygraph.Embedding( + size=[128, 100], + param_attr= w_param_attrs, + is_sparse=False) + static_rlt3 = emb(base.to_variable(inp_word)) + diff --git a/doc/paddle/api/paddle/nn/layer/common/ReflectionPad1d_cn.rst b/doc/paddle/api/paddle/nn/layer/common/ReflectionPad1d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bba4b39464fe5e8bdb4ee0626d912ad8f9e2e858 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/common/ReflectionPad1d_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_nn_ReflectionPad1d: + +ReflectionPad1d +------------------------------- +.. py:class:: paddle.nn.ReflectionPad1d(padding, data_format="NCL", name=None) + +**ReflectionPad1d** + +按照 padding 对输入 以reflection模式进行 ``pad``,即填充以输入边界值为轴的映射 。 + +参数: + - **padding** (Tensor | List[int32]) - 填充大小。pad的格式为[pad_left, pad_right]。 + - **data_format** (str) - 指定input的format,可为 `'NCL'` 或者 `'NLC'`,默认值为`'NCL'`。 + - **name** (str, 可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,缺省值为None。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 2, 3) + pad = [1, 2] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ReflectionPad1d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[2. 1. 2. 3. 2. 1.] + # [5. 4. 5. 6. 5. 4.]]] diff --git a/doc/paddle/api/paddle/nn/layer/common/ReflectionPad2d_cn.rst b/doc/paddle/api/paddle/nn/layer/common/ReflectionPad2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..35fdb4bc0f2af5d55629ba5cb9f101dd8f3f2150 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/common/ReflectionPad2d_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_nn_ReflectionPad2d: + +ReflectionPad2d +------------------------------- +.. py:class:: paddle.nn.ReflectionPad2d(padding, data_format="NCHW", name=None) + +**ReflectionPad2d** + +按照 padding 对输入 以reflection模式进行 ``pad``,即填充以输入边界值为轴的映射 。 + +参数: + - **padding** (Tensor | List[int32]) - 填充大小。pad的格式为[pad_left, pad_right, pad_top, pad_bottom]。。 + - **data_format** (str) - 指定input的format,可为 `'NCHW'` 或者 `'NHWC'`,默认值为`'NCHW'`。 + - **name** (str, 可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,缺省值为None。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 1, 4, 3) + pad = [1, 0, 1, 2] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ReflectionPad2d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[[ 5. 4. 5. 6.] + # [ 2. 1. 2. 3.] + # [ 5. 4. 5. 6.] + # [ 8. 7. 8. 9.] + # [11. 10. 11. 12.] + # [ 8. 7. 8. 9.] + # [ 5. 4. 5. 6.]]]] \ No newline at end of file diff --git a/doc/paddle/api/paddle/nn/layer/common/ReplicationPad1d_cn.rst b/doc/paddle/api/paddle/nn/layer/common/ReplicationPad1d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..aade886104421dcec986096a6288846f54828576 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/common/ReplicationPad1d_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_nn_ReplicationPad1d: + +ReplicationPad1d +------------------------------- +.. py:class:: paddle.nn.ReplicationPad1d(padding, data_format="NCL", name=None) + +**ReplicationPad1d** + +按照 padding 对输入 以replicate模式进行 ``pad``,即填充输入的边界值。 + +参数: + - **padding** (Tensor | List[int32]) - 填充大小。pad的格式为[pad_left, pad_right]。 + - **data_format** (str) - 指定input的format,可为 `'NCL'` 或者 `'NLC'`,默认值为`'NCL'`。 + - **name** (str, 可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,缺省值为None。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 2, 3) + pad = [1, 2] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ReplicationPad1d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[1. 1. 2. 3. 3. 3.] + # [4. 4. 5. 6. 6. 6.]]] diff --git a/doc/paddle/api/paddle/nn/layer/common/ReplicationPad2d_cn.rst b/doc/paddle/api/paddle/nn/layer/common/ReplicationPad2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..97b41a220b9ccfe3813cdcfd26029126d4b4be22 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/common/ReplicationPad2d_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_nn_ReplicationPad2d: + +ReplicationPad2d +------------------------------- +.. py:class:: paddle.nn.ReplicationPad2d(padding, data_format="NCHW", name=None) + +**ReplicationPad2d** + +按照 padding 对输入 以replicate模式进行 ``pad``,即填充输入的边界值。 + +参数: + - **padding** (Tensor | List[int32]) - 填充大小。pad的格式为[pad_left, pad_right, pad_top, pad_bottom]。 + - **data_format** (str) - 指定input的format,可为 `'NCHW'` 或者 `'NHWC'`,默认值为`'NCHW'`。 + - **name** (str, 可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,缺省值为None。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 1, 2, 3) + pad = [1, 0, 1, 2] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ReplicationPad2d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[[1. 1. 2. 3.] + # [1. 1. 2. 3.] + # [4. 4. 5. 6.] + # [4. 4. 5. 6.] + # [4. 4. 5. 6.]]]] diff --git a/doc/paddle/api/paddle/nn/layer/common/ReplicationPad3d_cn.rst b/doc/paddle/api/paddle/nn/layer/common/ReplicationPad3d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4d379c21297d0602c97cfeba3f81e06585cc1026 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/common/ReplicationPad3d_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_nn_ReplicationPad3d: + +ReplicationPad3d +------------------------------- +.. py:class:: paddle.nn.ReplicationPad3d(padding, data_format="NCDHW", name=None) + +**ReplicationPad3d** + +按照 padding 对输入 以replicate模式进行 ``pad``,即填充输入的边界值。 + +参数: + - **padding** (Tensor | List[int32]) - 填充大小。pad的格式为[pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back]。 + - **data_format** (str) - 指定input的format,可为 `'NCDHW'` 或者 `'NDHWC'`,默认值为`'NCDHW'`。 + - **name** (str, 可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,缺省值为None。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 1, 1, 2, 3) + pad = [1, 0, 1, 2, 0, 0] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ReplicationPad3d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[[[1. 1. 2. 3.] + # [1. 1. 2. 3.] + # [4. 4. 5. 6.] + # [4. 4. 5. 6.] + # [4. 4. 5. 6.]]]]] diff --git a/doc/paddle/api/paddle/nn/layer/common/ZeroPad2d_cn.rst b/doc/paddle/api/paddle/nn/layer/common/ZeroPad2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ed6a69b86fdb93f4172a14d638abd08022910e1b --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/common/ZeroPad2d_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_nn_ZeroPad2d: + +ZeroPad2d +------------------------------- +.. py:class:: paddle.nn.ZeroPad2d(padding, data_format="NCHW", name=None) + +**ZeroPad2d** + +按照 padding 对输入填充固定值0。 + +参数: + - **padding** (Tensor | List[int32]) - 填充大小。pad的格式为[pad_left, pad_right, pad_top, pad_bottom]。 + - **data_format** (str) - 指定input的format,可为 `'NCHW'` 或者 `'NHWC'`,默认值为`'NCHW'`。 + - **name** (str, 可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,缺省值为None。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 1, 2, 3) + pad = [1, 0, 1, 2] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ZeroPad2d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[[0. 0. 0. 0.] + # [0. 1. 2. 3.] + # [0. 4. 5. 6.] + # [0. 0. 0. 0.] + # [0. 0. 0. 0.]]]] diff --git a/doc/paddle/api/paddle/nn/layer/conv/Conv2d_cn.rst b/doc/paddle/api/paddle/nn/layer/conv/Conv2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ffc23cb83e0769ed83a67ddd6ab3d41a8b44c4e1 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/conv/Conv2d_cn.rst @@ -0,0 +1,103 @@ +Conv2d +------------------------------- + +.. py:class:: paddle.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, padding_mode='zeros', weight_attr=None, bias_attr=None, data_format="NCHW") + + + +**二维卷积层** + +该OP是二维卷积层(convolution2d layer),根据输入、卷积核、步长(stride)、填充(padding)、空洞大小(dilations)一组参数计算输出特征层大小。输入和输出是NCHW或NHWC格式,其中N是批尺寸,C是通道数,H是特征高度,W是特征宽度。卷积核是MCHW格式,M是输出图像通道数,C是输入图像通道数,H是卷积核高度,W是卷积核宽度。如果组数(groups)大于1,C等于输入图像通道数除以组数的结果。详情请参考UFLDL's : `卷积 `_ 。如果bias_attr不为False,卷积计算会添加偏置项。 + +对每个输入X,有等式: + +.. math:: + + Out = \sigma \left ( W * X + b \right ) + +其中: + - :math:`X` :输入值,NCHW或NHWC格式的4-D Tensor + - :math:`W` :卷积核值,MCHW格式的4-D Tensor + - :math:`*` :卷积操作 + - :math:`b` :偏置值,2-D Tensor,形状为 ``[M,1]`` + - :math:`\sigma` :激活函数 + - :math:`Out` :输出值,NCHW或NHWC格式的4-D Tensor, 和 ``X`` 的形状可能不同 + + +参数: + - **in_channels** (int) - 输入图像的通道数。 + - **out_channels** (int) - 由卷积操作产生的输出的通道数。 + - **kernel_size** (int|list|tuple) - 卷积核大小。可以为单个整数或包含两个整数的元组或列表,分别表示卷积核的高和宽。如果为单个整数,表示卷积核的高和宽都等于该整数。 + - **stride** (int|list|tuple,可选) - 步长大小。可以为单个整数或包含两个整数的元组或列表,分别表示卷积沿着高和宽的步长。如果为单个整数,表示沿着高和宽的步长都等于该整数。默认值:1。 + - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含4个二元组:当 ``data_format`` 为"NCHW"时为 [[0,0], [0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NHWC"时为[[0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含4个整数值:[padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含2个整数值:[padding_height, padding_width],此时padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_height = padding_width = padding。默认值:0。 + - **dilation** (int|list|tuple,可选) - 空洞大小。可以为单个整数或包含两个整数的元组或列表,分别表示卷积核中的元素沿着高和宽的空洞。如果为单个整数,表示高和宽的空洞都等于该整数。默认值:1。 + - **groups** (int,可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和卷积核分别根据通道数量平均分为n组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第n组卷积核和第n组输入进行卷积计算。默认值:1。 + - **padding_mode** (str, 可选): 填充模式。 包括 ``'zeros'``, ``'reflect'``, ``'replicate'`` 或者 ``'circular'``. 默认值: ``'zeros'`` . + - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + + +属性 +:::::::::::: +.. py:attribute:: weight +本层的可学习参数,类型为 ``Parameter`` + +.. py:attribute:: bias +本层的可学习偏置,类型为 ``Parameter`` + +形状: + - 输入: :math:`(N, C_{in}, H_{in}, W_{in})` + - 输出: :math:`(N, C_{out}, H_{out}, W_{out})` + + 其中: + + .. math:: + H_{out} = \frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (kernel\_size[0] - 1) + 1))}{strides[0]} + 1 + + W_{out} = \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (kernel\_size[1] - 1) + 1))}{strides[1]} + 1 + + 如果 ``padding`` = "SAME": + + .. math:: + H_{out} = \frac{(H_{in} + stride[0] - 1)}{stride[0]} + + .. math:: + W_{out} = \frac{(W_{in} + stride[1] - 1)}{stride[1]} + + 如果 ``padding`` = "VALID": + + .. math:: + H_{out} = \frac{\left ( H_{in} -\left ( dilation[0]*\left ( kernel\_size[0]-1 \right )+1 \right ) \right )}{stride[0]}+1 + + W_{out} = \frac{\left ( W_{in} -\left ( dilation[1]*\left ( kernel\_size[1]-1 \right )+1 \right ) \right )}{stride[1]}+1 + + +抛出异常: + - ``ValueError`` - 如果 ``data_format`` 既不是"NCHW"也不是"NHWC"。 + - ``ValueError`` - 如果 ``input`` 的通道数未被明确定义。 + - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``padding`` 含有4个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ShapeError`` - 如果输入不是4-D Tensor。 + - ``ShapeError`` - 如果输入和卷积核的维度大小不相同。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + - ``ShapeError`` - 如果输出的通道数不能被 ``groups`` 整除。 + + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.nn as nn + x = np.random.uniform(-1, 1, (2, 4, 8, 8)).astype('float32') + + paddle.disable_static() + x_var = paddle.to_tensor(x) + conv = nn.Conv2d(4, 6, (3, 3)) + y_var = conv(x_var) + y_np = y_var.numpy() + print(y_np.shape) + + # (2, 6, 6, 6) diff --git a/doc/paddle/api/paddle/nn/layer/conv/Conv3d_cn.rst b/doc/paddle/api/paddle/nn/layer/conv/Conv3d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1f61e1d4e9f808a89ce2bddb74603744d74fdd47 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/conv/Conv3d_cn.rst @@ -0,0 +1,110 @@ +Conv3d +------------------------------- + +.. py:class:: paddle.nn.Conv3d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, padding_mode='zeros', weight_attr=None, bias_attr=None, data_format="NCDHW") + + + +**三维卷积层** + +该OP是三维卷积层(convolution3D layer),根据输入、卷积核、步长(stride)、填充(padding)、空洞大小(dilations)一组参数计算得到输出特征层大小。输入和输出是NCDHW或NDWHC格式,其中N是批尺寸,C是通道数,D是特征层深度,H是特征层高度,W是特征层宽度。三维卷积(Convlution3D)和二维卷积(Convlution2D)相似,但多了一维深度信息(depth)。如果bias_attr不为False,卷积计算会添加偏置项。 + +对每个输入X,有等式: + +.. math:: + + Out = \sigma \left ( W * X + b \right ) + +其中: + - :math:`X` :输入值,NCDHW或NDHWC格式的5-D Tensor + - :math:`W` :卷积核值,MCDHW格式的5-D Tensor + - :math:`*` :卷积操作 + - :math:`b` :偏置值,2-D Tensor,形为 ``[M,1]`` + - :math:`\sigma` :激活函数 + - :math:`Out` :输出值, NCDHW或NDHWC格式的5-D Tensor,和 ``X`` 的形状可能不同 + +参数: + - **in_channels** (int) - 输入图像的通道数。 + - **out_channels** (int) - 由卷积操作产生的输出的通道数。 + - **kernel_size** (int|list|tuple) - 卷积核大小。可以为单个整数或包含三个整数的元组或列表,分别表示卷积核的深度,高和宽。如果为单个整数,表示卷积核的深度,高和宽都等于该整数。 + - **stride** (int|list|tuple,可选) - 步长大小。可以为单个整数或包含三个整数的元组或列表,分别表示卷积沿着深度,高和宽的步长。如果为单个整数,表示沿着高和宽的步长都等于该整数。默认值:1。 + - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含5个二元组:当 ``data_format`` 为"NCDHW"时为 [[0,0], [0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NDHWC"时为[[0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含6个整数值:[padding_depth_front, padding_depth_back, padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含3个整数值:[padding_depth, padding_height, padding_width],此时 padding_depth_front = padding_depth_back = padding_depth, padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_depth = padding_height = padding_width = padding。默认值:0。 + - **dilation** (int|list|tuple,可选) - 空洞大小。可以为单个整数或包含三个整数的元组或列表,分别表示卷积核中的元素沿着深度,高和宽的空洞。如果为单个整数,表示深度,高和宽的空洞都等于该整数。默认值:1。 + - **groups** (int,可选) - 三维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和卷积核分别根据通道数量平均分为n组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第n组卷积核和第n组输入进行卷积计算。默认值:1。 + - **padding_mode** (str, 可选): 填充模式。 包括 ``'zeros'``, ``'reflect'``, ``'replicate'`` 或者 ``'circular'``. 默认值: ``'zeros'`` . + - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCDHW"和"NDHWC"。N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度。默认值:"NCDHW"。 + + +属性 +:::::::::::: +.. py:attribute:: weight +本层的可学习参数,类型为 ``Parameter`` + +.. py:attribute:: bias +本层的可学习偏置,类型为 ``Parameter`` + +形状: + + - 输入::math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` + - 输出::math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` + + 其中 + + .. math:: + + D_{out} &= \frac{\left ( D_{in} + padding\_depth\_front + padding\_depth\_back-\left ( dilation[0]*\left ( kernel\_size[0]-1 \right )+1 \right ) \right )}{stride[0]}+1 + + H_{out} &= \frac{\left ( H_{in} + padding\_height\_top + padding\_height\_bottom-\left ( dilation[1]*\left ( kernel\_size[1]-1 \right )+1 \right ) \right )}{stride[1]}+1 + + W_{out} &= \frac{\left ( W_{in} + padding\_width\_left + padding\_width\_right -\left ( dilation[2]*\left ( kernel\_size[2]-1 \right )+1 \right ) \right )}{stride[2]}+1 + + 如果 ``padding`` = "SAME": + + .. math:: + D_{out} = \frac{(D_{in} + stride[0] - 1)}{stride[0]} + + H_{out} = \frac{(H_{in} + stride[1] - 1)}{stride[1]} + + W_{out} = \frac{(W_{in} + stride[2] - 1)}{stride[2]} + + 如果 ``padding`` = "VALID": + + .. math:: + D_{out} = \frac{\left ( D_{in} -\left ( dilation[0]*\left ( kernel\_size[0]-1 \right )+1 \right ) \right )}{stride[0]}+1 + + H_{out} = \frac{\left ( H_{in} -\left ( dilation[1]*\left ( kernel\_size[1]-1 \right )+1 \right ) \right )}{stride[1]}+1 + + W_{out} = \frac{\left ( W_{in} -\left ( dilation[2]*\left ( kernel\_size[2]-1 \right )+1 \right ) \right )}{stride[2]}+1 + +抛出异常: + - ``ValueError`` - 如果 ``data_format`` 既不是"NCDHW"也不是"NDHWC"。 + - ``ValueError`` - 如果 ``input`` 的通道数未被明确定义。 + - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``padding`` 含有5个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ShapeError`` - 如果输入不是5-D Tensor。 + - ``ShapeError`` - 如果输入和卷积核的维度大小不相同。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + - ``ShapeError`` - 如果输出的通道数不能被 ``groups`` 整除。 + + +**代码示例**: + +.. code-block:: python + + import numpy as np + + import paddle + import paddle.nn as nn + x = np.random.uniform(-1, 1, (2, 4, 8, 8, 8)).astype('float32') + + paddle.disable_static() + + x_var = paddle.to_tensor(x) + conv = nn.Conv3d(4, 6, (3, 3, 3)) + y_var = conv(x_var) + y_np = y_var.numpy() + print(y_np.shape) + + # (2, 6, 6, 6, 6) diff --git a/doc/paddle/api/paddle/nn/layer/conv/ConvTranspose2d_cn.rst b/doc/paddle/api/paddle/nn/layer/conv/ConvTranspose2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7cf28d4fbeb6e4bc25f1080636f57696357c9985 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/conv/ConvTranspose2d_cn.rst @@ -0,0 +1,106 @@ +ConvTranspose2d +------------------------------- + +.. py:class:: paddle.nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1, dilation=1, weight_attr=None, bias_attr=None, data_format="NCHW") + + +二维转置卷积层(Convlution2d transpose layer) + +该层根据输入(input)、卷积核(kernel)和空洞大小(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCHW或NHWC格式,其中N为批尺寸,C为通道数(channel),H为特征层高度,W为特征层宽度。卷积核是MCHW格式,M是输出图像通道数,C是输入图像通道数,H是卷积核高度,W是卷积核宽度。如果组数大于1,C等于输入图像通道数除以组数的结果。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解转置卷积层细节,请参考下面的说明和 参考文献_ 。如果参数bias_attr不为False, 转置卷积计算会添加偏置项。 + +.. _参考文献: https://arxiv.org/pdf/1603.07285.pdf + + +输入 :math:`X` 和输出 :math:`Out` 函数关系如下: + +.. math:: + Out=\sigma (W*X+b)\\ + +其中: + - :math:`X` : 输入,具有NCHW或NHWC格式的4-D Tensor + - :math:`W` : 卷积核,具有NCHW格式的4-D Tensor + - :math:`*` : 卷积计算(注意:转置卷积本质上的计算还是卷积) + - :math:`b` : 偏置(bias),2-D Tensor,形状为 ``[M,1]`` + - :math:`σ` : 激活函数 + - :math:`Out` : 输出值,NCHW或NHWC格式的4-D Tensor, 和 ``X`` 的形状可能不同 + + +注意: + +如果output_size为None,则 :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}` ;否则,指定的output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[0]` 之间(不包含 :math:`H^\prime_{out} + strides[0]` ), 并且指定的output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[1]` 之间(不包含 :math:`W^\prime_{out} + strides[1]` )。 + +由于转置卷积可以当成是卷积的反向计算,而根据卷积的输入输出计算公式来说,不同大小的输入特征层可能对应着相同大小的输出特征层,所以对应到转置卷积来说,固定大小的输入特征层对应的输出特征层大小并不唯一。 + +如果指定了output_size, ``conv2d_transpose`` 可以自动计算卷积核的大小。 + +参数: + - **in_channels** (int) - 输入图像的通道数。 + - **out_channels** (int) - 卷积核的个数,和输出特征图通道数相同。 + - **kernel_size** (int|list|tuple) - 卷积核大小。可以为单个整数或包含两个整数的元组或列表,分别表示卷积核的高和宽。如果为单个整数,表示卷积核的高和宽都等于该整数。 + - **stride** (int|tuple, 可选) - 步长大小。如果 ``stride`` 为元组或列表,则必须包含两个整型数,分别表示垂直和水平滑动步长。否则,表示垂直和水平滑动步长均为 ``stride`` 。默认值:1。 + - **padding** (int|tuple, 可选) - 填充大小。如果 ``padding`` 为元组或列表,则必须包含两个整型数,分别表示竖直和水平边界填充大小。否则,表示竖直和水平边界填充大小均为 ``padding`` 。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考下方形状 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。默认值:0。 + - **output_padding** (int|list|tuple, optional): 输出形状上一侧额外添加的大小. 默认值: 0. + - **groups** (int, 可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的分组卷积:当group=2,卷积核的前一半仅和输入特征图的前一半连接。卷积核的后一半仅和输入特征图的后一半连接。默认值:1。 + - **dilation** (int|tuple, 可选) - 空洞大小。可以为单个整数或包含两个整数的元组或列表,分别表示卷积核中的元素沿着高和宽的空洞。如果为单个整数,表示高和宽的空洞都等于该整数。默认值:1。 + - **weight_attr** (ParamAttr, 可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool, 可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + + +形状: + + - 输入::math:`(N,C_{in}, H_{in}, W_{in})` + + + - 输出::math:`(N,C_{out}, H_{out}, W_{out})` + + 其中 + + .. math:: + + & H'_{out} = (H_{in}-1)*strides[0] - pad\_height\_top - pad\_height\_bottom + dilations[0]*(kernel\_size[0]-1)+1\\ + & W'_{out} = (W_{in}-1)*strides[1]- pad\_width\_left - pad\_width\_right + dilations[1]*(kernel\_size[1]-1)+1 \\ + & H_{out}\in[H'_{out},H'_{out} + strides[0])\\ + & W_{out}\in[W'_{out},W'_{out} + strides[1])\\ + + 如果 ``padding`` = "SAME": + + .. math:: + & H'_{out} = \frac{(H_{in} + stride[0] - 1)}{stride[0]}\\ + & W'_{out} = \frac{(W_{in} + stride[1] - 1)}{stride[1]}\\ + + 如果 ``padding`` = "VALID": + + .. math:: + & H'_{out} = (H_{in}-1)*strides[0] + dilations[0]*(kernel\_size[0]-1)+1\\ + & W'_{out} = (W_{in}-1)*strides[1] + dilations[1]*(kernel\_size[1]-1)+1 \\ + +抛出异常: + - ``ValueError`` : 如果输入的shape、filter_size、stride、padding和groups不匹配,抛出ValueError + - ``ValueError`` - 如果 ``data_format`` 既不是"NCHW"也不是"NHWC"。 + - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``padding`` 含有4个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ValueError`` - 如果 ``output_size`` 和 ``filter_size`` 同时为None。 + - ``ShapeError`` - 如果输入不是4-D Tensor。 + - ``ShapeError`` - 如果输入和卷积核的维度大小不相同。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + import paddle.nn as nn + + x = np.random.uniform(-1, 1, (2, 4, 8, 8)).astype('float32') + + paddle.disable_static() + + x_var = paddle.to_tensor(x) + conv = nn.ConvTranspose2d(4, 6, (3, 3)) + y_var = conv(x_var) + y_np = y_var.numpy() + print(y_np.shape) + + # (2, 6, 10, 10) diff --git a/doc/paddle/api/paddle/nn/layer/conv/ConvTranspose3d_cn.rst b/doc/paddle/api/paddle/nn/layer/conv/ConvTranspose3d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..67c8fb483903219f88b96d84b5032f61fd1df4f0 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/conv/ConvTranspose3d_cn.rst @@ -0,0 +1,106 @@ +ConvTranspose3d +------------------------------- + +.. py:class:: paddle.nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1, dilation=1, weight_attr=None, bias_attr=None, data_format="NCDHW") + + +三维转置卷积层(Convlution3d transpose layer) + +该层根据输入(input)、卷积核(kernel)和卷积核空洞大小(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCDHW或者NDHWC格式。其中N为批尺寸,C为通道数(channel),D为特征深度,H为特征层高度,W为特征层宽度。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解卷积转置层细节,请参考下面的说明和 参考文献_ 。如果参数bias_attr不为False, 转置卷积计算会添加偏置项。 + +.. _参考文献: http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf + +输入 :math:`X` 和输出 :math:`Out` 函数关系如下: + +.. math:: + \\Out=\sigma (W*X+b)\\ + +其中: + - :math:`X` : 输入,具有NCDHW或NDHWC格式的5-D Tensor + - :math:`W` : 卷积核,具有NCDHW格式的5-D Tensor + - :math:`*` : 卷积操作(注意:转置卷积本质上的计算还是卷积) + - :math:`b` : 偏置(bias),2-D Tensor,形状为 ``[M,1]`` + - :math:`σ` : 激活函数 + - :math:`Out` : 输出值,NCDHW或NDHWC格式的5-D Tensor,和 ``X`` 的形状可能不同 + + +注意: + +如果output_size为None,则 :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}` ;否则,指定的output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[0]` 之间(不包含 :math:`H^\prime_{out} + strides[0]` ), 并且指定的output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[1]` 之间(不包含 :math:`W^\prime_{out} + strides[1]` )。 + +由于转置卷积可以当成是卷积的反向计算,而根据卷积的输入输出计算公式来说,不同大小的输入特征层可能对应着相同大小的输出特征层,所以对应到转置卷积来说,固定大小的输入特征层对应的输出特征层大小并不唯一。 + +如果指定了output_size, 该算子可以自动计算卷积核的大小。 + +参数: + - **in_channels** (int) - 输入图像的通道数。 + - **out_channels** (int) - 卷积核的个数,和输出特征图个数相同。 + - **kernel_size** (int|list|tuple) - 卷积核大小。可以为单个整数或包含三个整数的元组或列表,分别表示卷积核的深度,高和宽。如果为单个整数,表示卷积核的深度,高和宽都等于该整数。默认:None。output_size和kernel_size不能同时为None。 + - **stride** (int|tuple, 可选) - 步长大小。如果 ``stride`` 为元组或列表,则必须包含三个整型数,分别表示深度,垂直和水平滑动步长。否则,表示深度,垂直和水平滑动步长均为 ``stride`` 。默认值:1。 + - **padding** (int|tuple, 可选) - 填充大小。如果 ``padding`` 为元组或列表,则必须包含三个整型数,分别表示深度,竖直和水平边界填充大小。否则,表示深度,竖直和水平边界填充大小均为 ``padding`` 。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考下方形状 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。默认值:0。 + - **output_padding** (int|list|tuple, optional): 输出形状上一侧额外添加的大小. 默认值: 0. + - **groups** (int, 可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的分组卷积:当group=2,卷积核的前一半仅和输入特征图的前一半连接。卷积核的后一半仅和输入特征图的后一半连接。默认值:1。 + - **dilation** (int|tuple, 可选) - 空洞大小。可以为单个整数或包含三个整数的元组或列表,分别表示卷积核中的元素沿着深度,高和宽的空洞。如果为单个整数,表示深度,高和宽的空洞都等于该整数。默认值:1。 + - **weight_attr** (ParamAttr, 可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool, 可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCDHW"。 + +形状: + + - 输入::math:`(N,C_{in}, H_{in}, W_{in})` + + + - 输出::math:`(N,C_{out}, H_{out}, W_{out})` + + 其中 + + .. math:: + + & D'_{out}=(D_{in}-1)*strides[0] - pad\_depth\_front - pad\_depth\_back + dilations[0]*(kernel\_size[0]-1)+1\\ + & H'_{out} = (H_{in}-1)*strides[1] - pad\_height\_top - pad\_height\_bottom + dilations[1]*(kernel\_size[1]-1)+1\\ + & W'_{out} = (W_{in}-1)*strides[2]- pad\_width\_left - pad\_width\_right + dilations[2]*(kernel\_size[2]-1)+1 \\ + & D_{out}\in[D'_{out},D'_{out} + strides[0])\\ + & H_{out}\in[H'_{out},H'_{out} + strides[1])\\ + & W_{out}\in[W'_{out},W'_{out} + strides[2])\\ + + 如果 ``padding`` = "SAME": + + .. math:: + & D'_{out} = \frac{(D_{in} + stride[0] - 1)}{stride[0]}\\ + & H'_{out} = \frac{(H_{in} + stride[1] - 1)}{stride[1]}\\ + & W'_{out} = \frac{(W_{in} + stride[2] - 1)}{stride[2]}\\ + + 如果 ``padding`` = "VALID": + + .. math:: + & D'_{out} = (D_{in}-1)*strides[0] + dilations[0]*(kernel\_size[0]-1)+1\\ + & H'_{out} = (H_{in}-1)*strides[1] + dilations[1]*(kernel\_size[1]-1)+1\\ + & W'_{out} = (W_{in}-1)*strides[2] + dilations[2]*(kernel\_size[2]-1)+1 \\ + +抛出异常: + - ``ValueError`` : 如果输入的shape、kernel_size、stride、padding和groups不匹配,抛出ValueError + - ``ValueError`` - 如果 ``data_format`` 既不是"NCHW"也不是"NHWC"。 + - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``padding`` 含有4个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ValueError`` - 如果 ``output_size`` 和 ``filter_size`` 同时为None。 + - ``ShapeError`` - 如果输入不是4-D Tensor。 + - ``ShapeError`` - 如果输入和卷积核的维度大小不相同。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + import paddle.nn as nn + x = np.random.uniform(-1, 1, (2, 4, 8, 8, 8)).astype('float32') + + paddle.disable_static() + x_var = paddle.to_tensor(x) + conv = nn.ConvTranspose3d(4, 6, (3, 3, 3)) + y_var = conv(x_var) + y_np = y_var.numpy() + print(y_np.shape) + + # (2, 6, 10, 10, 10) diff --git a/doc/paddle/api/paddle/nn/layer/distance/PairwiseDistance_cn.rst b/doc/paddle/api/paddle/nn/layer/distance/PairwiseDistance_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8f9e38dfe6b9b550189d78d53be8fb85dd7a5f03 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/distance/PairwiseDistance_cn.rst @@ -0,0 +1,39 @@ +.. _cn_api_nn_PairwiseDistance: + +PairwiseDistance +------------------------------- + +.. py:class:: paddle.nn.PairwiseDistance(p=2., epsilon=1e-6, keepdim=False, name=None) + +该OP计算两个向量(输入 ``x``、``y`` )之间pairwise的距离。该距离通过p范数计算: + + .. math:: + + \Vert x \Vert _p = \left( \sum_{i=1}^n \vert x_i \vert ^ p \right ) ^ {1/p}. + +参数 +:::::::: + - **p** (float,可选)- 指定p阶的范数。默认值为2。 + - **epsilon** (float,可选)- 添加到分母的一个很小值,避免发生除零错误。默认值为1e-6。 + - **keepdim** (bool,可选)- 是否保留输出张量减少的维度。输出结果相对于 ``|x-y|`` 的结果减少一维,除非 :attr:`keepdim` 为True,默认值为False。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +形状 +:::::::: + - **x** (Tensor) - :math:`(N, D)` ,其中D是向量的维度,数据类型为float32或float64。 + - **y** (Tensor) - :math:`(N, D)` ,与 ``x`` 的形状、数据类型相同。 + - **output** (Tensor) - :math:`(N)` ,如果 :attr:`keepdim` 为True,则形状为 :math:`(N, 1)` 。数据类型与 ``x``、 ``y`` 相同。 + +代码示例 +:::::::: + +.. code-block:: python + + import paddle + paddle.disable_static() + x = paddle.to_tensor([[1., 3.], [3., 5.]], dtype='float64') + y = paddle.to_tensor([[5., 6.], [7., 8.]], dtype='float64') + dist = paddle.nn.PairwiseDistance() + distance = dist(x, y) + print(distance.numpy()) # [5. 5.] + diff --git a/doc/paddle/api/paddle/nn/layer/loss/BCELoss_cn.rst b/doc/paddle/api/paddle/nn/layer/loss/BCELoss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0231b6eb5a19db9448376be8f7106dfe083604b7 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/loss/BCELoss_cn.rst @@ -0,0 +1,65 @@ +.. _cn_api_paddle_nn_BCELoss: + +BCELoss +------------------------------- + +.. py:class:: paddle.nn.BCELoss(weight=None, reduction='mean', name=None) + +该接口用于创建一个BCELoss的可调用类,用于计算输入 ``input`` 和标签 ``label`` 之间的二值交叉熵损失值。二值交叉熵损失函数公式如下: + +当 `weight` 不为空时,公式为: + +.. math:: + Out = -1 * weight * (label * log(input) + (1 - label) * log(1 - input)) + +当 `weight` 为空时,公式为: + +.. math:: + Out = -1 * (label * log(input) + (1 - label) * log(1 - input)) + +当 `reduction` 为 `none` 时,直接返回最原始的 `Out` 结果。 + +当 `reduction` 为 `mean` 时,最终的输出结果为: + +.. math:: + Out = MEAN(Out) + +当 `reduction` 为 `sum` 时,最终的输出结果为: + +.. math:: + Out = SUM(Out) + + +.. note:: + 输入数据 ``input`` 一般是 ``sigmoid`` 的输出。因为是二分类,所以标签值 ``label`` 应该是0或者1。 + +参数 +::::::::: + - **weight** (Tensor,可选) - 手动指定每个batch二值交叉熵的权重,如果指定的话,维度必须是一个batch的数据的维度。数据类型是float32, float64。默认值是:None。 + - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有: ``'none'``, ``'mean'``, ``'sum'`` 。默认为 ``'mean'``,计算 `BCELoss` 的均值;设置为 ``'sum'`` 时,计算 `BCELoss` 的总和;设置为 ``'none'`` 时,则返回bce_loss。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +形状 +::::::::: + - **input** (Tensor) - :math:`(N, *)` , 其中N是batch_size, `*` 是任意其他维度。输入数据 ``input`` 一般是 ``sigmoid`` 的输出。数据类型是float32、float64。 + - **label** (Tensor) - :math:`(N, *)` ,标签 ``label`` 的维度、数据类型与输入 ``input`` 相同。 + - **output** (Tensor) - 输出的Tensor。如果 :attr:`reduction` 是 ``'none'``, 则输出的维度为 :math:`(N, *)` , 与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``, 则输出的维度为 :math:`[1]` 。 + +返回 +::::::::: + 返回计算BCELoss的可调用对象。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + + paddle.disable_static() + input = paddle.to_tensor([0.5, 0.6, 0.7], dtype='float32') + label = paddle.to_tensor([1.0, 0.0, 1.0], dtype='float32') + bce_loss = paddle.nn.loss.BCELoss() + output = bce_loss(input, label) + print(output.numpy()) # [0.65537095] + diff --git a/doc/paddle/api/paddle/nn/layer/loss/BCEWithLogitsLoss_cn.rst b/doc/paddle/api/paddle/nn/layer/loss/BCEWithLogitsLoss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..aef2aae152051cadf3ffca5f19568d8e68f73c3b --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/loss/BCEWithLogitsLoss_cn.rst @@ -0,0 +1,64 @@ +.. _cn_api_paddle_nn_BCEWithLogitsLoss: + +BCEWithLogitsLoss +------------------------------- + +.. py:class:: paddle.nn.BCEWithLogitsLoss(weight=None, reduction='mean', pos_weight=None, name=None) + +该OP可创建一个BCEWithLogitsLoss的可调用类,计算输入 `logit` 和标签 `label` 间的 `binary cross entropy with logits loss` 损失。 + +该OP结合了 `sigmoid` 操作和 :ref:`api_nn_loss_BCELoss` 操作。同时,我们也可以认为该OP是 ``sigmoid_cross_entrop_with_logits`` 和一些 `reduce` 操作的组合。 + +在每个类别独立的分类任务中,该OP可以计算按元素的概率误差。可以将其视为预测数据点的标签,其中标签不是互斥的。例如,一篇新闻文章可以同时关于政治,科技,体育或者同时不包含这些内容。 + +首先,该OP可通过下式计算损失函数: + +.. math:: + Out = -Labels * \log(\sigma(Logit)) - (1 - Labels) * \log(1 - \sigma(Logit)) + +其中 :math:`\sigma(Logit) = \frac{1}{1 + e^{-Logit}}` , 代入上方计算公式中: + +.. math:: + Out = Logit - Logit * Labels + \log(1 + e^{-Logit}) + +为了计算稳定性,防止当 :math:`Logit<0` 时, :math:`e^{-Logit}` 溢出,loss将采用以下公式计算: + +.. math:: + Out = \max(Logit, 0) - Logit * Labels + \log(1 + e^{-\|Logit\|}) + +然后,当 ``weight`` or ``pos_weight`` 不为None的时候,该算子会在输出Out上乘以相应的权重。张量 ``weight`` 给Batch中的每一条数据赋予不同权重,张量 ``pos_weight`` 给每一类的正例添加相应的权重。 + +最后,该算子会添加 `reduce` 操作到前面的输出Out上。当 `reduction` 为 `none` 时,直接返回最原始的 `Out` 结果。当 `reduction` 为 `mean` 时,返回输出的均值 :math:`Out = MEAN(Out)` 。当 `reduction` 为 `sum` 时,返回输出的求和 :math:`Out = SUM(Out)` 。 + +**注意: 因为是二分类任务,所以标签值应该是0或者1。 + +参数 +::::::::: + - **weight** (Tensor,可选) - 手动指定每个batch二值交叉熵的权重,如果指定的话,维度必须是一个batch的数据的维度。数据类型是float32, float64。默认值是:None。 + - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有: ``'none'``, ``'mean'``, ``'sum'`` 。默认为 ``'mean'``,计算 `BCELoss` 的均值;设置为 ``'sum'`` 时,计算 `BCELoss` 的总和;设置为 ``'none'`` 时,则返回原始loss。 + - **pos_weight** (Tensor,可选) - 手动指定正类的权重,必须是与类别数相等长度的向量。数据类型是float32, float64。默认值是:None。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +形状 +::::::::: + - **logit** (Tensor) - :math:`[N, *]` , 其中N是batch_size, `*` 是任意其他维度。输入数据 ``logit`` 一般是线性层的输出,不需要经过 ``sigmoid`` 层。数据类型是float32、float64。 + - **label** (Tensor) - :math:`[N, *]` ,标签 ``label`` 的维度、数据类型与输入 ``logit`` 相同。 + - **output** (Tensor) - 输出的Tensor。如果 :attr:`reduction` 是 ``'none'``, 则输出的维度为 :math:`[N, *]` , 与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``, 则输出的维度为 :math:`[1]` 。 + +返回 +::::::::: + 返回计算BCEWithLogitsLoss的可调用对象。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + paddle.disable_static() + logit = paddle.to_tensor([5.0, 1.0, 3.0], dtype="float32") + label = paddle.to_tensor([1.0, 0.0, 1.0], dtype="float32") + bce_logit_loss = paddle.nn.BCEWithLogitsLoss() + output = bce_logit_loss(logit, label) + print(output.numpy()) # [0.45618808] + diff --git a/doc/paddle/api/paddle/nn/layer/loss/CTCLoss_cn.rst b/doc/paddle/api/paddle/nn/layer/loss/CTCLoss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a2ad0c28a4058d8cbdfc447b9f890e0ce9d24185 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/loss/CTCLoss_cn.rst @@ -0,0 +1,78 @@ +CTCLoss +------------------------------- + +.. py:class:: paddle.nn.loss.CTCLoss(blank=0, reduction='mean') + +该接口用于计算 CTC loss。该接口的底层调用了第三方 baidu-research::warp-ctc 的实现。 +也可以叫做 softmax with CTC,因为 Warp-CTC 库中插入了 softmax 激活函数来对输入的值进行归一化。 + +参数 +::::::::: + - **blank** (int,可选): - 空格标记的 ID 值,其取值范围为 [0,num_classes+1) 。数据类型支持int32。默认值为0。 + - **reduction** (string,可选): - 指定应用于输出结果的计算方式,可选值有: ``'none'``, ``'mean'``, ``'sum'`` 。设置为 ``'mean'`` 时,对 loss 值除以 label_lengths,并返回所得商的均值;设置为 ``'sum'`` 时,返回 loss 值的总和;设置为 ``'none'`` 时,则直接返回输出的 loss 值。默认值为 ``'mean'``。 + +形状 +::::::::: + - **log_probs** (Tensor): - 经过 padding 的概率序列,其 shape 必须是 [max_logit_length, batch_size, num_classes + 1]。其中 max_logit_length 是最长输入序列的长度。该输入不需要经过 softmax 操作,因为该 OP 的内部对 input 做了 softmax 操作。数据类型仅支持float32。 + - **labels** (Tensor): - 经过 padding 的标签序列,其 shape 为 [batch_size, max_label_length],其中 max_label_length 是最长的 label 序列的长度。数据类型支持int32。 + - **input_lengths** (Tensor): - 表示输入 ``log_probs`` 数据中每个序列的长度,shape为 [batch_size] 。数据类型支持int64。 + - **label_lengths** (Tensor): - 表示 label 中每个序列的长度,shape为 [batch_size] 。数据类型支持int64。 + +返回 +::::::::: +``Tensor``,输入 ``log_probs`` 和标签 ``labels`` 间的 `ctc loss`。如果 :attr:`reduction` 是 ``'none'``,则输出 loss 的维度为 [batch_size]。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``, 则输出Loss的维度为 [1]。数据类型与输入 ``log_probs`` 一致。 + +代码示例 +::::::::: + +.. code-block:: python + + # declarative mode + import numpy as np + import paddle + + # length of the longest logit sequence + max_seq_length = 4 + #length of the longest label sequence + max_label_length = 3 + # number of logit sequences + batch_size = 2 + # class num + class_num = 3 + + np.random.seed(1) + log_probs = np.array([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04], + [3.02332580e-01, 1.46755889e-01, 9.23385918e-02]], + + [[1.86260208e-01, 3.45560730e-01, 3.96767467e-01], + [5.38816750e-01, 4.19194520e-01, 6.85219526e-01]], + + [[2.04452246e-01, 8.78117442e-01, 2.73875929e-02], + [6.70467496e-01, 4.17304814e-01, 5.58689833e-01]], + + [[1.40386939e-01, 1.98101491e-01, 8.00744593e-01], + [9.68261600e-01, 3.13424170e-01, 6.92322612e-01]], + + [[8.76389146e-01, 8.94606650e-01, 8.50442126e-02], + [3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]]).astype("float32") + labels = np.array([[1, 2, 2], + [1, 2, 2]]).astype("int32") + input_lengths = np.array([5, 5]).astype("int64") + label_lengths = np.array([3, 3]).astype("int64") + + paddle.disable_static() + log_probs = paddle.to_tensor(log_probs) + labels = paddle.to_tensor(labels) + input_lengths = paddle.to_tensor(input_lengths) + label_lengths = paddle.to_tensor(label_lengths) + + loss = paddle.nn.CTCLoss(blank=0, reduction='none')(log_probs, labels, + input_lengths, + label_lengths) + print(loss.numpy()) #[3.9179852 2.9076521] + + loss = paddle.nn.CTCLoss(blank=0, reduction='mean')(log_probs, labels, + input_lengths, + label_lengths) + print(loss.numpy()) #[1.1376063] + diff --git a/doc/paddle/api/paddle/nn/layer/loss/CrossEntropyLoss_cn.rst b/doc/paddle/api/paddle/nn/layer/loss/CrossEntropyLoss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0535e8690f6902f24fb31e39fa06d61bf4c88117 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/loss/CrossEntropyLoss_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_nn_loss_CrossEntropyLoss: + +CrossEntropyLoss +------------------------------- + +.. py:function:: paddle.nn.loss.CrossEntropyLoss(weight=None, ignore_index=-100, reduction='mean') + +该OP计算输入input和标签label间的交叉熵损失 ,它结合了 `LogSoftmax` 和 `NLLLoss` 的OP计算,可用于训练一个 `n` 类分类器。 + +如果提供 `weight` 参数的话,它是一个 `1-D` 的tensor, 每个值对应每个类别的权重。 +该损失函数的数学计算公式如下: + + .. math:: + loss_j = -\text{input[class]} + + \log\left(\sum_{i=0}^{K}\exp(\text{input}_i)\right), j = 1,..., K + +当 `weight` 不为 `none` 时,损失函数的数学计算公式为: + + .. math:: + loss_j = \text{weight[class]}(-\text{input[class]} + + \log\left(\sum_{i=0}^{K}\exp(\text{input}_i)\right)), j = 1,..., K + + +参数 +::::::::: + - **weight** (Tensor, 可选): - 指定每个类别的权重。其默认为 `None` 。如果提供该参数的话,维度必须为 `C` (类别数)。数据类型为float32或float64。 + - **ignore_index** (int64, 可选): - 指定一个忽略的标签值,此标签值不参与计算。默认值为-100。数据类型为int64。 + - **reduction** (str, 可选): - 指定应用于输出结果的计算方式,数据类型为string,可选值有: `none`, `mean`, `sum` 。默认为 `mean` ,计算 `mini-batch` loss均值。设置为 `sum` 时,计算 `mini-batch` loss的总和。设置为 `none` 时,则返回loss Tensor。 + +形状 +::::::::: + - **input** (Tensor): - 输入 `Tensor` ,数据类型为float32或float64。其形状为 :math:`[N, C]` , 其中 `C` 为类别数。对于多维度的情形下,它的形状为 :math:`[N, C, d_1, d_2, ..., d_k]` ,k >= 1。 + - **label** (Tensor): - 输入input对应的标签值,数据类型为int64。其形状为 :math:`[N]` ,每个元素符合条件:0 <= label[i] <= C-1。对于多维度的情形下,它的形状为 :math:`[N, d_1, d_2, ..., d_k]` ,k >= 1。 + - **output** (Tensor): - 计算 `CrossEntropyLoss` 交叉熵后的损失值。 + + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + input_data = np.random.random([5, 100]).astype("float64") + label_data = np.random.randint(0, 100, size=(5)).astype(np.int64) + weight_data = np.random.random([100]).astype("float64") + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) + weight = paddle.to_tensor(weight_data) + ce_loss = paddle.nn.loss.CrossEntropyLoss(weight=weight, reduction='mean') + output = ce_loss(input, label) + print(output.numpy()) + diff --git a/doc/paddle/api/paddle/nn/layer/loss/KLDivLoss_cn.rst b/doc/paddle/api/paddle/nn/layer/loss/KLDivLoss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..49267b8a1afc0b755a06881067949cdd79ebdae2 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/loss/KLDivLoss_cn.rst @@ -0,0 +1,68 @@ +KLDivLoss +------------------------------- + +.. py:class:: paddle.nn.loss.KLDivLoss(reduction='mean') + +该算子计算输入(Input)和输入(Label)之间的Kullback-Leibler散度损失。注意其中输入(Input)应为对数概率值,输入(Label)应为概率值。 + +kL发散损失计算如下: + +.. math:: + + l(input, label) = label * (log(label) - input) + + +当 ``reduction`` 为 ``none`` 时,输出损失与输入(input)形状相同,各点的损失单独计算,不会对结果做reduction 。 + +当 ``reduction`` 为 ``mean`` 时,输出损失为[1]的形状,输出为所有损失的平均值。 + +当 ``reduction`` 为 ``sum`` 时,输出损失为[1]的形状,输出为所有损失的总和。 + +当 ``reduction`` 为 ``batchmean`` 时,输出损失为[N]的形状,N为批大小,输出为所有损失的总和除以批量大小。 + +参数: + - **reduction** (str,可选) - 要应用于输出的reduction类型,可用类型为‘none’ | ‘batchmean’ | ‘mean’ | ‘sum’,‘none’表示无reduction,‘batchmean’ 表示输出的总和除以批大小,‘mean’ 表示所有输出的平均值,‘sum’表示输出的总和。 + +形状: + - **input** (Tensor): - 输入的Tensor,维度是[N, *], 其中N是batch size, `*` 是任意数量的额外维度。数据类型为:float32、float64。 + - **label** (Tensor): - 标签,维度是[N, *], 与 ``input`` 相同。数据类型为:float32、float64。 + - **output** (Tensor): - 输入 ``input`` 和标签 ``label`` 间的kl散度。如果 `reduction` 是 ``'none'``, 则输出Loss的维度为 [N, *], 与输入 ``input`` 相同。如果 `reduction` 是 ``'batchmean'`` 、 ``'mean'`` 或 ``'sum'``, 则输出Loss的维度为 [1]。 + +**代码示例:** + +.. code-block:: python + + import paddle + import numpy as np + import paddle.nn as nn + + paddle.disable_static() + + shape = (5, 20) + x = np.random.uniform(-10, 10, shape).astype('float32') + target = np.random.uniform(-10, 10, shape).astype('float32') + + # 'batchmean' reduction, loss shape will be [N] + kldiv_criterion = nn.KLDivLoss(reduction='batchmean') + pred_loss = kldiv_criterion(paddle.to_tensor(x), + paddle.to_tensor(target)) + # shape=[5] + + # 'mean' reduction, loss shape will be [1] + kldiv_criterion = nn.KLDivLoss(reduction='mean') + pred_loss = kldiv_criterion(paddle.to_tensor(x), + paddle.to_tensor(target)) + # shape=[1] + + # 'sum' reduction, loss shape will be [1] + kldiv_criterion = nn.KLDivLoss(reduction='sum') + pred_loss = kldiv_criterion(paddle.to_tensor(x), + paddle.to_tensor(target)) + # shape=[1] + + # 'none' reduction, loss shape is same with X shape + kldiv_criterion = nn.KLDivLoss(reduction='none') + pred_loss = kldiv_criterion(paddle.to_tensor(x), + paddle.to_tensor(target)) + # shape=[5, 20] + diff --git a/doc/paddle/api/paddle/nn/layer/loss/L1Loss_cn.rst b/doc/paddle/api/paddle/nn/layer/loss/L1Loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..050a1c0b19073d294bb519a447b0ec2f284bae57 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/loss/L1Loss_cn.rst @@ -0,0 +1,63 @@ +L1Loss +------------------------------- + +.. py:class:: paddle.nn.loss.L1Loss(reduction='mean', name=None) + +该接口用于创建一个L1Loss的可调用类,L1Loss计算输入input和标签label间的 `L1 loss` 损失。 + +该损失函数的数学计算公式如下: + +当 `reduction` 设置为 ``'none'`` 时, + + .. math:: + Out = \lvert input - label\rvert + +当 `reduction` 设置为 ``'mean'`` 时, + + .. math:: + Out = MEAN(\lvert input - label\rvert) + +当 `reduction` 设置为 ``'sum'`` 时, + + .. math:: + Out = SUM(\lvert input - label\rvert) + + +参数 +::::::::: + - **reduction** (str, 可选): - 指定应用于输出结果的计算方式,可选值有: ``'none'``, ``'mean'``, ``'sum'`` 。默认为 ``'mean'``,计算 `L1Loss` 的均值;设置为 ``'sum'`` 时,计算 `L1Loss` 的总和;设置为 ``'none'`` 时,则返回 `L1Loss`。 + - **name** (str,可选): - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状 +::::::::: + - **input** (Tensor): - 输入的Tensor,维度是[N, *], 其中N是batch size, `*` 是任意数量的额外维度。数据类型为:float32、float64、int32、int64。 + - **label** (Tensor): - 标签,维度是[N, *], 与 ``input`` 相同。数据类型为:float32、float64、int32、int64。 + - **output** (Tensor): - 输入 ``input`` 和标签 ``label`` 间的 `L1 loss` 损失。如果 `reduction` 是 ``'none'``, 则输出Loss的维度为 [N, *], 与输入 ``input`` 相同。如果 `reduction` 是 ``'mean'`` 或 ``'sum'``, 则输出Loss的维度为 [1]。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + + paddle.disable_static() + input = paddle.to_tensor([[1.5, 0.8], [0.2, 1.3]]) + label = paddle.to_tensor([[1.7, 1], [0.4, 0.5]]) + + l1_loss = paddle.nn.loss.L1Loss() + output = l1_loss(input, label) + print(output.numpy()) + # [0.35] + + l1_loss = paddle.nn.loss.L1Loss(reduction='sum') + output = l1_loss(input, label) + print(output.numpy()) + # [1.4] + + l1_loss = paddle.nn.loss.L1Loss(reduction='none') + output = l1_loss(input, label) + print(output.numpy()) + # [[0.20000005 0.19999999] + # [0.2 0.79999995]] + diff --git a/doc/paddle/api/paddle/nn/layer/loss/MSELoss_cn.rst b/doc/paddle/api/paddle/nn/layer/loss/MSELoss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d147671a0d703dbbc4e57bed5d335c5b13ebd43c --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/loss/MSELoss_cn.rst @@ -0,0 +1,70 @@ +MSELoss +------------------------------- + +.. py:function:: paddle.nn.loss.MSELoss(reduction='mean') + +该OP用于计算预测值和目标值的均方差误差。 + +对于预测值input和目标值label: + +当reduction为'none'时: + +.. math:: + Out = (input - label)^2 + +当`reduction`为`'mean'`时: + +.. math:: + Out = \operatorname{mean}((input - label)^2) + +当`reduction`为`'sum'`时: + +.. math:: + Out = \operatorname{sum}((input - label)^2) + +参数: + - **reduction** (str, 可选) - 约简方式,可以是 'none' | 'mean' | 'sum'。设为'none'时不使用约简,设为'mean'时返回loss的均值,设为'sum'时返回loss的和。 + +形状: + - **input** (Tensor) - 预测值,维度为 :math:`[N_1, N_2, ..., N_k]` 的多维Tensor。数据类型为float32或float64。 + - **label** (Tensor) - 目标值,维度为 :math:`[N_1, N_2, ..., N_k]` 的多维Tensor。数据类型为float32或float64。 + + +返回:变量(Tensor), 预测值和目标值的均方差, 数值类型与输入相同 + + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + + + # static graph mode + paddle.enable_static() + mse_loss = paddle.nn.loss.MSELoss() + input = paddle.data(name="input", shape=[1]) + label = paddle.data(name="label", shape=[1]) + place = paddle.CPUPlace() + input_data = np.array([1.5]).astype("float32") + label_data = np.array([1.7]).astype("float32") + + output = mse_loss(input,label) + exe = paddle.static.Executor(place) + exe.run(paddle.static.default_startup_program()) + output_data = exe.run( + paddle.static.default_main_program(), + feed={"input":input_data, "label":label_data}, + fetch_list=[output], + return_numpy=True) + print(output_data) + # [array([0.04000002], dtype=float32)] + + # dynamic graph mode + paddle.disable_static() + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) + output = mse_loss(input, label) + print(output.numpy()) + # [0.04000002] diff --git a/doc/paddle/api/paddle/nn/layer/loss/MarginRankingLoss_cn.rst b/doc/paddle/api/paddle/nn/layer/loss/MarginRankingLoss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4d840b1751cd3146415ee075ac7a3838f47fbe87 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/loss/MarginRankingLoss_cn.rst @@ -0,0 +1,56 @@ +.. _cn_api_nn_loss_MarginRankingLoss: + +MarginRankingLoss +------------------------------- + +.. py:class:: paddle.nn.loss.MarginRankingLoss(margin=0.0, reduction='mean', name=None) + +该接口用于创建一个 ``MarginRankingLoss`` 的可调用类,计算输入input,other 和 标签label间的 `margin rank loss` 损失。 + +该损失函数的数学计算公式如下: + + .. math:: + margin\_rank\_loss = max(0, -label * (input - other) + margin) + +当 `reduction` 设置为 ``'mean'`` 时, + + .. math:: + Out = MEAN(margin\_rank\_loss) + +当 `reduction` 设置为 ``'sum'`` 时, + + .. math:: + Out = SUM(margin\_rank\_loss) + +当 `reduction` 设置为 ``'none'`` 时,直接返回最原始的 `margin_rank_loss` 。 + +参数 +:::::::: + - **margin** (float,可选): - 用于加和的margin值,默认值为0。 + - **reduction** (string,可选): - 指定应用于输出结果的计算方式,可选值有: ``'none'`` 、 ``'mean'`` 、 ``'sum'`` 。如果设置为 ``'none'`` ,则直接返回 最原始的 ``margin_rank_loss`` 。如果设置为 ``'sum'`` ,则返回 ``margin_rank_loss`` 的总和。如果设置为 ``'mean'`` ,则返回 ``margin_rank_loss`` 的平均值。默认值为 ``'none'`` 。 + - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状 +:::::::: + - **input** - N-D Tensor, 维度是[N,*] 其中N 是batch size,`*` 是任意数量的额外维度,数据类型为float32或float64。 + - **other** - 与 ``input`` 的形状、数据类型相同。 + - **label** - 与 ``input`` 的形状、数据类型相同。 + - **output** - 如果 :attr:`reduction` 为 ``'sum'`` 或者是 ``'mean'`` ,则形状为 :math:`[1]` ,否则shape和输入 `input` 保持一致 。数据类型与 ``input``、 ``other`` 相同。 + +返回 +:::::::: +返回计算MarginRankingLoss的可调用对象。 + +代码示例 +:::::::: + +.. code-block:: python + + import paddle + paddle.disable_static() + input = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32') + other = paddle.to_tensor([[2, 1], [2, 4]], dtype='float32') + label = paddle.to_tensor([[1, -1], [-1, -1]], dtype='float32') + margin_rank_loss = paddle.nn.MarginRankingLoss() + loss = margin_rank_loss(input, other, label) + print(loss.numpy()) # [0.75] diff --git a/doc/paddle/api/paddle/nn/layer/loss/NLLLoss_cn.rst b/doc/paddle/api/paddle/nn/layer/loss/NLLLoss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..30d19fedfc3983d05b96fb9deabe5e444448d71a --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/loss/NLLLoss_cn.rst @@ -0,0 +1,70 @@ +.. _cn_api_nn_loss_NLLLoss: + +NLLLoss +------------------------------- + +.. py:class:: paddle.nn.loss.NLLLoss(weight=None, ignore_index=-100, reduction='mean', name=None) + +该接口可创建一个NLLLoss可调用类,计算输入x和标签label间的 `negative log likelihood loss` 损失 ,可用于训练一个 `n` 类分类器。 + +如果提供 `weight` 参数的话,它是一个 `1-D` 的tensor, 里面的值对应类别的权重。当你的训练集样本 +不均衡的话,使用这个参数是非常有用的。 + +该损失函数的数学计算公式如下: + +当 `reduction` 设置为 `none` 时,损失函数的数学计算公式为: + + .. math:: + \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad + l_n = - w_{y_n} x_{n,y_n}, \quad + w_{c} = \text{weight}[c] \cdot \mathbb{1}\{c \not= \text{ignore_index}\}, + +其中 `N` 表示 `batch_size` 。如果 `reduction` 的值不是 `none` (默认为 `mean`),那么此时损失函数 +的数学计算公式为: + + .. math:: + \ell(x, y) = \begin{cases} + \sum_{n=1}^N \frac{1}{\sum_{n=1}^N w_{y_n}} l_n, & + \text{if reduction} = \text{'mean';}\\ + \sum_{n=1}^N l_n, & + \text{if reduction} = \text{'sum'.} + \end{cases} + +参数 +::::::::: + - **weight** (Tensor, 可选): - 手动指定每个类别的权重。其默认为 `None` 。如果提供该参数的话,长度必须为 `num_classes` 。数据类型为float32或float64。 + - **ignore_index** (int64, 可选): - 指定一个忽略的标签值,此标签值不参与计算。默认值为-100。数据类型为int64。 + - **reduction** (str, 可选): - 指定应用于输出结果的计算方式,可选值有: `none`, `mean`, `sum` 。默认为 `mean` ,计算 `mini-batch` loss均值。设置为 `sum` 时,计算 `mini-batch` loss的总和。设置为 `none` 时,则返回loss Tensor。数据类型为string。 + - **name** (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +形状 +::::::::: + - **input** (Tensor): - 输入 `Tensor`, 其形状为 :math:`[N, C]` , 其中 `C` 为类别数。但是对于多维度的情形下,它的形状为 :math:`[N, C, d_1, d_2, ..., d_K]` 。数据类型为float32或float64。 + - **label** (Tensor): - 输入 `input` 对应的标签值。其形状为 :math:`[N,]` 或者 :math:`[N, d_1, d_2, ..., d_K]`, 数据类型为int64。 + - **output** (Tensor): - 输入 `input` 和 `label` 间的 `negative log likelihood loss` 损失。如果 `reduction` 为 `'none'` ,则输出Loss形状为 `[N, *]` 。 如果 `reduction` 为 `'sum'` 或者 `'mean'` ,则输出Loss形状为 `'[1]'` 。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + nll_loss = paddle.nn.layer.NLLLoss() + log_softmax = paddle.nn.LogSoftmax(axis=1) + + input_np = np.array([[0.88103855, 0.9908683 , 0.6226845 ], + [0.53331435, 0.07999352, 0.8549948 ], + [0.25879037, 0.39530203, 0.698465 ], + [0.73427284, 0.63575995, 0.18827209], + [0.05689114, 0.0862954 , 0.6325046 ]]).astype(np.float32) + label_np = np.array([0, 2, 1, 1, 0]).astype(np.int64) + + place = paddle.CPUPlace() + paddle.disable_static(place) + input = paddle.to_tensor(input_np) + log_out = log_softmax(input) + label = paddle.to_tensor(label_np) + result = nll_loss(log_out, label) + print(result.numpy()) # [1.0720209] diff --git a/doc/paddle/api/paddle/nn/layer/loss/SmoothL1Loss_cn.rst b/doc/paddle/api/paddle/nn/layer/loss/SmoothL1Loss_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8a5fee1e07cb45a055fc2eb65b45233d1ca6cbdd --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/loss/SmoothL1Loss_cn.rst @@ -0,0 +1,52 @@ +SmoothL1Loss +------------------------------- + +.. py:class:: paddle.nn.loss.SmoothL1Loss(reduction='mean', delta=1.0, name=None) + +该OP计算输入input和标签label间的SmoothL1损失,如果逐个元素的绝对误差低于1,则创建使用平方项的条件 +,否则为L1损失。在某些情况下,它可以防止爆炸梯度, 也称为Huber损失,该损失函数的数学计算公式如下: + + .. math:: + loss(x,y)=\\frac{1}{n}\\sum_{i}z_i + +`z_i`的计算公式如下: + + .. math:: + + \\mathop{z_i}=\\left\\{\\begin{array}{rcl} + 0.5(x_i - y_i)^2 & & {if |x_i - y_i| < delta} \\\\ + delta * |x_i - y_i| - 0.5 * delta^2 & & {otherwise} + \\end{array} \\right. + +参数 +:::::::::: + - **reduction** (string, 可选): - 指定应用于输出结果的计算方式,数据类型为string,可选值有: `none`, `mean`, `sum` 。默认为 `mean` ,计算 `mini-batch` loss均值。设置为 `sum` 时,计算 `mini-batch` loss的总和。设置为 `none` 时,则返回loss Tensor。 + - **delta** (string, 可选): SmoothL1Loss损失的阈值参数,用于控制Huber损失对线性误差或平方误差的侧重。数据类型为float32。 默认值= 1.0。 + - **name** (string, 可选): - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +调用参数 +:::::::::: + - **input** (Tensor): 输入 `Tensor`, 数据类型为float32。其形状为 :math:`[N, C]` , 其中 `C` 为类别数。对于多维度的情形下,它的形状为 :math:`[N, C, d_1, d_2, ..., d_k]`,k >= 1。 + - **label** (Tensor): 输入input对应的标签值,数据类型为float32。数据类型和input相同。 + + + +返回:返回计算 `SmoothL1Loss` 后的损失值。 + +返回类型:Tensor + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + input = np.random.rand(3,3).astype("float32") + label = np.random.rand(3,3).astype("float32") + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) + loss = paddle.nn.SmoothL1Loss() + output = loss(input, label) + print(output.numpy()) diff --git a/doc/paddle/api/paddle/nn/layer/norm/BatchNorm1d_cn.rst b/doc/paddle/api/paddle/nn/layer/norm/BatchNorm1d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c25ec52a26365319fea221ed14c0afd79eea200c --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/norm/BatchNorm1d_cn.rst @@ -0,0 +1,74 @@ +.. _cn_api_nn_BatchNorm1d: + +BatchNorm1d +------------------------------- + +.. py:class:: paddle.nn.BatchNorm1d(num_features, momentum=0.9, epsilon=1e-05, weight_attr=None, bias_attr=None, data_format='NCL', track_running_stats=True, name=None): + + +该接口用于构建 ``BatchNorm1d`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。可以处理2D或者3D的Tensor, 实现了批归一化层(Batch Normalization Layer)的功能,可用作卷积和全连接操作的批归一化函数,根据当前批次数据按通道计算的均值和方差进行归一化。更多详情请参考 : `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ + +当训练时 :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是minibatch的统计数据。计算公式如下: + +.. math:: + \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mini-batch-mean \\ + \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \quad &// mini-batch-variance \\ + +- :math:`x` : 批输入数据 +- :math:`m` : 当前批次数据的大小 + +当预测时,track_running_stats = True :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是全局(或运行)统计数据(moving_mean和moving_variance),通常来自预先训练好的模型。计算公式如下: + +.. math:: + + moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global mean \\ + moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global variance \\ + +归一化函数公式如下: + +.. math:: + + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \quad &// normalize \\ + y_i &\gets \gamma \hat{x_i} + \beta \quad &// scale-and-shift \\ + +- :math:`\epsilon` : 添加较小的值到方差中以防止除零 +- :math:`\gamma` : 可训练的比例参数 +- :math:`\beta` : 可训练的偏差参数 + +参数: + - **num_features** (int) - 指明输入 ``Tensor`` 的通道数量。 + - **epsilon** (float, 可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 + - **momentum** (float, 可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var`` 。默认值:0.9。更新公式如上所示。 + - **weight_attr** (ParamAttr|bool, 可选) - 指定权重参数属性的对象。如果为False, 则表示每个通道的伸缩固定为1,不可改变。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **bias_attr** (ParamAttr, 可选) - 指定偏置参数属性的对象。如果为False, 则表示每一个通道的偏移固定为0,不可改变。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **data_format** (string, 可选) - 指定输入数据格式,数据格式可以为“NC"或者"NCL"。默认值:“NCL”。 + - **track_running_stats** (bool, 可选) – 指示是否使用全局均值和方差。在训练时,设置为True表示在训练期间将保存全局均值和方差用于推理。推理时此属性只能设置为True。默认值:True。 + - **name** (string, 可选) – BatchNorm的名称, 默认值为None。更多信息请参见 :ref:`api_guide_Name` 。 + + +返回:无 + +形状: + - input: 形状为(批大小,通道数)的2-D Tensor 或(批大小, 通道数,长度)的3-D Tensor。 + - output: 和输入形状一样。 + +.. note:: +目前训练时设置track_running_stats为False是无效的,实际还是会按照True的方案保存全局均值和方差。之后的版本会修复此问题。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np.random.seed(123) + x_data = np.random.random(size=(2, 1, 3)).astype('float32') + x = paddle.to_tensor(x_data) + batch_norm = paddle.nn.BatchNorm1d(1) + batch_norm_out = batch_norm(x) + + print(batch_norm_out.numpy()) + diff --git a/doc/paddle/api/paddle/nn/layer/norm/BatchNorm2d_cn.rst b/doc/paddle/api/paddle/nn/layer/norm/BatchNorm2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..af590ce252fa8c65fe46f01a9138bb4adac2554d --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/norm/BatchNorm2d_cn.rst @@ -0,0 +1,74 @@ +.. _cn_api_nn_BatchNorm2d: + +BatchNorm2d +------------------------------- + +.. py:class:: paddle.nn.BatchNorm2d(num_features, momentum=0.9, epsilon=1e-05, weight_attr=None, bias_attr=None, data_format='NCHW', track_running_stats=True, name=None): + + +该接口用于构建 ``BatchNorm2d`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。可以处理4D的Tensor, 实现了批归一化层(Batch Normalization Layer)的功能,可用作卷积和全连接操作的批归一化函数,根据当前批次数据按通道计算的均值和方差进行归一化。更多详情请参考 : `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ + +当训练时 :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是minibatch的统计数据。计算公式如下: + +.. math:: + \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mini-batch-mean \\ + \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \quad &// mini-batch-variance \\ + +- :math:`x` : 批输入数据 +- :math:`m` : 当前批次数据的大小 + +当预测时,track_running_stats = True :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是全局(或运行)统计数据(moving_mean和moving_variance),通常来自预先训练好的模型。计算公式如下: + +.. math:: + + moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global mean \\ + moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global variance \\ + +归一化函数公式如下: + +.. math:: + + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \quad &// normalize \\ + y_i &\gets \gamma \hat{x_i} + \beta \quad &// scale-and-shift \\ + +- :math:`\epsilon` : 添加较小的值到方差中以防止除零 +- :math:`\gamma` : 可训练的比例参数 +- :math:`\beta` : 可训练的偏差参数 + +参数: + - **num_features** (int) - 指明输入 ``Tensor`` 的通道数量。 + - **epsilon** (float, 可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 + - **momentum** (float, 可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var`` 。默认值:0.9。更新公式如上所示。 + - **weight_attr** (ParamAttr|bool, 可选) - 指定权重参数属性的对象。如果为False, 则表示每个通道的伸缩固定为1,不可改变。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **bias_attr** (ParamAttr, 可选) - 指定偏置参数属性的对象。如果为False, 则表示每一个通道的偏移固定为0,不可改变。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **data_format** (string, 可选) - 指定输入数据格式,数据格式可以为"NCHW"。默认值:“NCHW”。 + - **track_running_stats** (bool, 可选) – 指示是否使用全局均值和方差。在训练时,设置为True表示在训练期间将保存全局均值和方差用于推理。推理时此属性只能设置为True。默认值:True。 + - **name** (string, 可选) – BatchNorm的名称, 默认值为None。更多信息请参见 :ref:`api_guide_Name` 。 + + +返回:无 + +形状: + - input: 形状为(批大小,通道数, 高度,宽度)的4-D Tensor 或(批大小, 通道数,宽度,高度)的4-D Tensor。 + - output: 和输入形状一样。 + +.. note:: +目前训练时设置track_running_stats为False是无效的,实际还是会按照True的方案保存全局均值和方差。之后的版本会修复此问题。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np.random.seed(123) + x_data = np.random.random(size=(2, 1, 2, 3)).astype('float32') + x = paddle.to_tensor(x_data) + batch_norm = paddle.nn.BatchNorm2d(1) + batch_norm_out = batch_norm(x) + + print(batch_norm_out.numpy()) + diff --git a/doc/paddle/api/paddle/nn/layer/norm/BatchNorm3d_cn.rst b/doc/paddle/api/paddle/nn/layer/norm/BatchNorm3d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..98152e9f539fee04642390274c00e48a33c33696 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/norm/BatchNorm3d_cn.rst @@ -0,0 +1,74 @@ +.. _cn_api_nn_BatchNorm3d: + +BatchNorm3d +------------------------------- + +.. py:class:: paddle.nn.BatchNorm3d(num_features, momentum=0.9, epsilon=1e-05, weight_attr=None, bias_attr=None, data_format='NCDHW', track_running_stats=True, name=None): + + +该接口用于构建 ``BatchNorm3d`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。可以处理4D的Tensor, 实现了批归一化层(Batch Normalization Layer)的功能,可用作卷积和全连接操作的批归一化函数,根据当前批次数据按通道计算的均值和方差进行归一化。更多详情请参考 : `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ + +当训练时 :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是minibatch的统计数据。计算公式如下: + +.. math:: + \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mini-batch-mean \\ + \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \quad &// mini-batch-variance \\ + +- :math:`x` : 批输入数据 +- :math:`m` : 当前批次数据的大小 + +当预测时,track_running_stats = True :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是全局(或运行)统计数据(moving_mean和moving_variance),通常来自预先训练好的模型。计算公式如下: + +.. math:: + + moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global mean \\ + moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global variance \\ + +归一化函数公式如下: + +.. math:: + + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \quad &// normalize \\ + y_i &\gets \gamma \hat{x_i} + \beta \quad &// scale-and-shift \\ + +- :math:`\epsilon` : 添加较小的值到方差中以防止除零 +- :math:`\gamma` : 可训练的比例参数 +- :math:`\beta` : 可训练的偏差参数 + +参数: + - **num_features** (int) - 指明输入 ``Tensor`` 的通道数量。 + - **epsilon** (float, 可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 + - **momentum** (float, 可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var`` 。默认值:0.9。更新公式如上所示。 + - **weight_attr** (ParamAttr|bool, 可选) - 指定权重参数属性的对象。如果为False, 则表示每个通道的伸缩固定为1,不可改变。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **bias_attr** (ParamAttr, 可选) - 指定偏置参数属性的对象。如果为False, 则表示每一个通道的偏移固定为0,不可改变。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **data_format** (string, 可选) - 指定输入数据格式,数据格式可以为“NCDHW"。默认值:“NCDHW”。 + - **track_running_stats** (bool, 可选) – 指示是否使用全局均值和方差。在训练时,设置为True表示在训练期间将保存全局均值和方差用于推理。推理时此属性只能设置为True。默认值:True。 + - **name** (string, 可选) – BatchNorm的名称, 默认值为None。更多信息请参见 :ref:`api_guide_Name` 。 + + +返回:无 + +形状: + - input: 形状为(批大小,通道数, 维度,高度,宽度)的5-D Tensor。 + - output: 和输入形状一样。 + +.. note:: +目前训练时设置track_running_stats为False是无效的,实际还是会按照True的方案保存全局均值和方差。之后的版本会修复此问题。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np.random.seed(123) + x_data = np.random.random(size=(2, 1, 2, 2, 3)).astype('float32') + x = paddle.to_tensor(x_data) + batch_norm = paddle.nn.BatchNorm3d(1) + batch_norm_out = batch_norm(x) + + print(batch_norm_out.numpy()) + diff --git a/doc/paddle/api/paddle/nn/layer/norm/GroupNorm_cn.rst b/doc/paddle/api/paddle/nn/layer/norm/GroupNorm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4628093cc07d714743d24e26a1ef34832f4bca67 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/norm/GroupNorm_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_nn_GroupNorm: + +GroupNorm +------------------------------- + +.. py:class:: paddle.nn.GroupNorm(num_groups, num_channels, epsilon=1e-05, weight_attr=None, bias_attr=None, data_layout='NCHW, 'name=None) + +**Group Normalization层** + +该接口用于构建 ``GroupNorm`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其中实现了组归一化层的功能。更多详情请参考: `Group Normalization `_ 。 + +参数: + - **num_groups** (int) - 从通道中分离出来的 ``group`` 的数目。 + - **num_channels** (int) - 输入的通道数。 + - **epsilon** (float, 可选) - 为防止方差除零,增加一个很小的值。默认值:1e-05。 + - **weight_attr** (ParamAttr|bool, 可选) - 指定权重参数属性的对象。如果为False, 表示参数不学习。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool, 可选) - 指定偏置参数属性的对象。如果为False, 表示参数不学习。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (string, 可选) - 只支持“NCHW”(num_batches,channels,height,width)格式。默认值:“NCHW”。 + - **name** (string, 可选) – GroupNorm的名称, 默认值为None。更多信息请参见 :ref:`api_guide_Name` 。 + +返回:无 + +形状: + - input: 形状为(批大小,通道数, 高度,宽度)的4-D Tensor。 + - output: 和输入形状一样。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np.random.seed(123) + x_data = np.random.random(size=(2, 6, 2, 2)).astype('float32') + x = paddle.to_tensor(x_data) + group_norm = paddle.nn.GroupNorm(num_channels=6, num_groups=6) + group_norm_out = group_norm(x) + + print(group_norm_out.numpy()) diff --git a/doc/paddle/api/paddle/nn/layer/norm/InstanceNorm1d_cn.rst b/doc/paddle/api/paddle/nn/layer/norm/InstanceNorm1d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c77698a5506d9483d091474b60430e18b8d4f45b --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/norm/InstanceNorm1d_cn.rst @@ -0,0 +1,60 @@ +.. _cn_api_nn_InstanceNorm1d: + +InstanceNorm1d +------------------------------- + +.. py:class:: paddle.nn.InstanceNorm1d(num_features, epsilon=1e-05, momentum=0.9, weight_attr=None, bias_attr=None, track_running_stats=True, data_format="NCL", name=None): + + +该接口用于构建 ``InstanceNorm1d`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。可以处理2D或者3D的Tensor, 实现了实例归一化层(Instance Normalization Layer)的功能。更多详情请参考 : Instance Normalization: The Missing Ingredient for Fast Stylization . + +``input`` 是mini-batch的输入。 + +.. math:: + \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mean \\ + \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \quad &// variance \\ + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \quad &// normalize \\ + y_i &\gets \gamma \hat{x_i} + \beta \quad &// scale-and-shift + + +Note: + `H` 是高度, `W` 是宽度. + + +参数: + - **num_features** (int) - 指明输入 ``Tensor`` 的通道数量。 + - **epsilon** (float, 可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 + - **momentum** (float, 可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var`` 。默认值:0.9。更新公式如上所示。 + - **weight_attr** (ParamAttr|bool, 可选) - 指定权重参数属性的对象。如果为False, 则表示每个通道的伸缩固定为1,不可改变。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **bias_attr** (ParamAttr, 可选) - 指定偏置参数属性的对象。如果为False, 则表示每一个通道的偏移固定为0,不可改变。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **track_running_stats** (bool, 可选) – 指示是否使用全局均值和方差。在训练时,设置为True表示在训练期间将保存全局均值和方差用于推理。推理时此属性只能设置为True。默认值:True。 + - **data_format** (string, 可选) - 指定输入数据格式,数据格式可以为“NC"或者"NCL"。默认值:“NCL”。 + - **name** (string, 可选) – InstanceNorm的名称, 默认值为None。更多信息请参见 :ref:`api_guide_Name` 。 + + +返回:无 + +形状: + - input: 形状为(批大小,通道数)的2-D Tensor 或(批大小, 通道数,长度)的3-D Tensor。 + - output: 和输入形状一样。 + +.. note:: +目前设置track_running_stats和momentum是无效的。之后的版本会修复此问题。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np.random.seed(123) + x_data = np.random.random(size=(2, 2, 3)).astype('float32') + x = paddle.to_tensor(x_data) + instance_norm = paddle.nn.InstanceNorm1d(2) + instance_norm_out = instance_norm(x) + + print(instance_norm_out.numpy()) + diff --git a/doc/paddle/api/paddle/nn/layer/norm/InstanceNorm2d_cn.rst b/doc/paddle/api/paddle/nn/layer/norm/InstanceNorm2d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cfb2571820a236e2991529841a984e8ae7a09ba3 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/norm/InstanceNorm2d_cn.rst @@ -0,0 +1,60 @@ +.. _cn_api_nn_cn_InstanceNorm2d: + +InstanceNorm2d +------------------------------- + +.. py:class:: paddle.nn.InstanceNorm2d(num_features, epsilon=1e-05, momentum=0.9, weight_attr=None, bias_attr=None, track_running_stats=True, data_format="NCHW", name=None): + + +该接口用于构建 ``InstanceNorm2d`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。可以处理2D或者3D的Tensor, 实现了实例归一化层(Instance Normalization Layer)的功能。更多详情请参考 : Instance Normalization: The Missing Ingredient for Fast Stylization . + +``input`` 是mini-batch的输入。 + +.. math:: + \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mean \\ + \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \quad &// variance \\ + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \quad &// normalize \\ + y_i &\gets \gamma \hat{x_i} + \beta \quad &// scale-and-shift + +Note: + `H` 是高度, `W` 是宽度. + + +参数: + - **num_features** (int) - 指明输入 ``Tensor`` 的通道数量。 + - **epsilon** (float, 可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 + - **momentum** (float, 可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var`` 。默认值:0.9。更新公式如上所示。 + - **weight_attr** (ParamAttr|bool, 可选) - 指定权重参数属性的对象。如果为False, 则表示每个通道的伸缩固定为1,不可改变。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **bias_attr** (ParamAttr, 可选) - 指定偏置参数属性的对象。如果为False, 则表示每一个通道的偏移固定为0,不可改变。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **track_running_stats** (bool, 可选) – 指示是否使用全局均值和方差。在训练时,设置为True表示在训练期间将保存全局均值和方差用于推理。推理时此属性只能设置为True。默认值:True。 + - **data_format** (string, 可选) - 指定输入数据格式,数据格式可以为“NCHW"。默认值:“NCHW”。 + - **name** (string, 可选) – InstanceNorm的名称, 默认值为None。更多信息请参见 :ref:`api_guide_Name` 。 + + +返回:无 + +形状: + - input: 形状为(批大小,通道数,高度,宽度)的4-D Tensor。 + - output: 和输入形状一样。 + +.. note:: +目前设置track_running_stats和momentum是无效的。之后的版本会修复此问题。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np.random.seed(123) + x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32') + x = paddle.to_tensor(x_data) + instance_norm = paddle.nn.InstanceNorm2d(2) + instance_norm_out = instance_norm(x) + + print(instance_norm_out.numpy()) + + diff --git a/doc/paddle/api/paddle/nn/layer/norm/InstanceNorm3d_cn.rst b/doc/paddle/api/paddle/nn/layer/norm/InstanceNorm3d_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e1c4ab4a9e547f99e7f8451fa6c2747887c9c10d --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/norm/InstanceNorm3d_cn.rst @@ -0,0 +1,58 @@ +.. _cn_api_nn_cn_InstanceNorm3d: + +InstanceNorm3d +------------------------------- + +.. py:class:: paddle.nn.InstanceNorm3d(num_features, epsilon=1e-05, momentum=0.9, weight_attr=None, bias_attr=None, track_running_stats=True, data_format="NCDHW", name=None): + +该接口用于构建 ``InstanceNorm3d`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。可以处理5D的Tensor, 实现了实例归一化层(Instance Normalization Layer)的功能。更多详情请参考 : Instance Normalization: The Missing Ingredient for Fast Stylization . + +``input`` 是mini-batch的输入。 + +.. math:: + \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mean \\ + \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \quad &// variance \\ + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \quad &// normalize \\ + y_i &\gets \gamma \hat{x_i} + \beta \quad &// scale-and-shift + +Note: + `H` 是高度, `W` 是宽度. + + +参数: + - **num_features** (int) - 指明输入 ``Tensor`` 的通道数量。 + - **epsilon** (float, 可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 + - **momentum** (float, 可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var`` 。默认值:0.9。更新公式如上所示。 + - **weight_attr** (ParamAttr|bool, 可选) - 指定权重参数属性的对象。如果为False, 则表示每个通道的伸缩固定为1,不可改变。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **bias_attr** (ParamAttr, 可选) - 指定偏置参数属性的对象。如果为False, 则表示每一个通道的偏移固定为0,不可改变。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **track_running_stats** (bool, 可选) – 指示是否使用全局均值和方差。在训练时,设置为True表示在训练期间将保存全局均值和方差用于推理。推理时此属性只能设置为True。默认值:True。 + - **data_format** (string, 可选) - 指定输入数据格式,数据格式可以为"NCDHW"。默认值:“NCDHW”。 + - **name** (string, 可选) – InstanceNorm的名称, 默认值为None。更多信息请参见 :ref:`api_guide_Name` 。 + + +返回:无 + +形状: + - input: 形状为5-D Tensor。 + - output: 和输入形状一样。 + +.. note:: +目前设置track_running_stats和momentum是无效的。之后的版本会修复此问题。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np.random.seed(123) + x_data = np.random.random(size=(2, 2, 2, 2, 3)).astype('float32') + x = paddle.to_tensor(x_data) + instance_norm = paddle.nn.InstanceNorm3d(2) + instance_norm_out = instance_norm(x) + + print(instance_norm_out.numpy()) + diff --git a/doc/paddle/api/paddle/nn/layer/norm/InstanceNorm_cn.rst b/doc/paddle/api/paddle/nn/layer/norm/InstanceNorm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..334fa5109f3ba0f52b1039357e08032671cd1849 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/norm/InstanceNorm_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_fluid_dygraph_InstanceNorm: + +InstanceNorm +------------------------------- + +.. py:class:: paddle.fluid.dygraph.InstanceNorm(num_channels, epsilon=1e-05, param_attr=None, bias_attr=None, dtype='float32') + +该接口用于构建 ``InstanceNorm`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。 + +可用作卷积和全连接操作的实例正则化函数,根据每个样本的每个通道的均值和方差信息进行正则化。该层需要的数据格式如下: + +NCHW[batch,in_channels,in_height,in_width] + +更多详情请参考 : `Instance Normalization: The Missing Ingredient for Fast Stylization `_ + +``input`` 是mini-batch的输入。 + +.. math:: + \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mean of each channel in each sample in a batch \\ + \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \quad &// variance of each channel in each sample a batch \\ + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \quad &// normalize \\ + y_i &\gets \gamma \hat{x_i} + \beta \quad &// scale-and-shift + + +参数: + - **num_channels** (int)- 指明输入 ``Tensor`` 的通道数量。 + - **epsilon** (float,默认1e-05)- 为了当前输入做标准化时得到稳定的结果而加在的分母上的扰动值。默认值为1e-5。 + - **param_attr** (ParamAttr|None) - instance_norm 权重参数的属性,可以设置为None或者一个ParamAttr的类(ParamAttr中可以指定参数的各种属性)。 如果设为None,则默认的参数初始化为1.0。如果在ParamAttr指定了属性时, instance_norm创建相应属性的param_attr(权重)参数。默认:None。 + - **bias_attr** (ParamAttr|None) - instance_norm 偏置参数的属性,可以设置为None或者一个ParamAttr的类(ParamAttr中可以指定参数的各种属性)。如果设为None,默认的参数初始化为0.0。如果在ParamAttr指定了参数的属性时, instance_norm创建相应属性的bias_attr(偏置)参数。默认:None。 + - **dtype** (string,默认float32)- 指明输入 ``Tensor`` 的数据类型,可以为float32或float64。默认:float32。 + +返回:无 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.dygraph.base import to_variable + import numpy as np + import paddle + + # x's shape is [1, 3, 1, 2] + x = np.array([[[[1.0, 8.0]], [[10.0, 5.0]], [[4.0, 6.0]]]]).astype('float32') + with fluid.dygraph.guard(): + x = to_variable(x) + instanceNorm = paddle.nn.InstanceNorm(3) + ret = instanceNorm(x) + # ret's shape is [1, 3, 1, 2]; value is [-1 1 0.999999 -0.999999 -0.999995 0.999995] + print(ret) + diff --git a/doc/paddle/api/paddle/nn/layer/norm/LayerNorm_cn.rst b/doc/paddle/api/paddle/nn/layer/norm/LayerNorm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6329fc658a85c632010a57ccbb5cfe671fbdb379 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/norm/LayerNorm_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_nn_LayerNorm: + +LayerNorm +------------------------------- + +.. py:class:: paddle.nn.LayerNorm(normalized_shape, epsilon=1e-05, weight_attr=None, bias_attr=None, name=None) + +该接口用于构建 ``LayerNorm`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其中实现了层归一化层(Layer Normalization Layer)的功能,其可以应用于小批量输入数据。更多详情请参考:`Layer Normalization `_ + +计算公式如下 + +.. math:: + \\\mu=\frac{1}{H}\sum_{i=1}^{H}x_i\\ + + \\\sigma=\sqrt{\frac{1}{H}\sum_i^H{(x_i-\mu)^2} + \epsilon}\\ + + \\y=f(\frac{g}{\sigma}(x-\mu) + b)\\ + +- :math:`x` : 该层神经元的向量表示 +- :math:`H` : 层中隐藏神经元个数 +- :math:`\epsilon` : 添加较小的值到方差中以防止除零 +- :math:`g` : 可训练的比例参数 +- :math:`b` : 可训练的偏差参数 + + +参数: + - **normalized_shape** (int 或 list 或 tuple) – 需规范化的shape,期望的输入shape为 ``[*, normalized_shape[0], normalized_shape[1], ..., normalized_shape[-1]]`` 。如果是单个整数,则此模块将在最后一个维度上规范化(此时最后一维的维度需与该参数相同)。 + - **epsilon** (float, 可选) - 指明在计算过程中是否添加较小的值到方差中以防止除零。默认值:1e-05。 + - **weight_attr** (ParamAttr|bool, 可选) - 指定权重参数属性的对象。如果为False固定为1,不进行学习。默认值为None, 表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr, 可选) - 指定偏置参数属性的对象。如果为False固定为0,不进行学习。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **name** (string, 可选) – LayerNorm的名称, 默认值为None。更多信息请参见 :ref:`api_guide_Name` 。 + +返回:无 + +形状: + - input: 2-D, 3-D, 4-D或5D 的Tensor。 + - output: 和输入形状一样。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np.random.seed(123) + x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32') + x = paddle.to_tensor(x_data) + layer_norm = paddle.nn.LayerNorm(x_data.shape[1:]) + layer_norm_out = layer_norm(x) + + print(layer_norm_out.numpy()) + diff --git a/doc/paddle/api/paddle/nn/layer/norm/SyncBatchNorm_cn.rst b/doc/paddle/api/paddle/nn/layer/norm/SyncBatchNorm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..84618185a20913de4a73c76c7f9da6d2c32260ec --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/norm/SyncBatchNorm_cn.rst @@ -0,0 +1,87 @@ +.. _cn_api_nn_SyncBatchNorm: + +SyncBatchNorm +------------------------------- + +.. py:class:: paddle.nn.SyncBatchNorm(num_features, epsilon=1e-5, momentum=0.9, track_running_stats=True, weight_attr=None, bias_attr=None, data_format='NCHW', name=None) + +该接口用于构建 ``SyncBatchNorm`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。实现了跨卡GPU同步的批归一化(Cross-GPU Synchronized Batch Normalization Layer)的功能,可用在其他层(类似卷积层和全连接层)之后进行归一化操作。根据所有GPU同一批次的数据按照通道计算的均值和方差进行归一化。更多详情请参考 : `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ + +当模型处于训练模式时,:math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是所有GPU上同一minibatch的统计数据。计算公式如下: + +.. math:: + \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mini-batch-mean \\ + \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \quad &// mini-batch-variance \\ + +- :math:`x` : 所有GPU上同一批输入数据 +- :math:`m` : 所有GPU上同一批次数据的大小 + +当模型处于评估模式时,:math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是全局(或运行)统计数据(moving_mean和moving_variance, 这两个统计量通常来自预先训练好的模型)。计算公式如下: + +.. math:: + + moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global mean \\ + moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global variance \\ + +归一化函数公式如下: + +.. math:: + + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \quad &// normalize \\ + y_i &\gets \gamma \hat{x_i} + \beta \quad &// scale-and-shift \\ + +- :math:`\epsilon` : 添加较小的值到方差中以防止除零 +- :math:`\gamma` : 可训练的比例参数 +- :math:`\beta` : 可训练的偏差参数 + +参数: + - **num_features** (int) - 指明输入 ``Tensor`` 的通道数量。 + - **epsilon** (float, 可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 + - **momentum** (float, 可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var`` 。默认值:0.9。更新公式如上所示。 + - **weight_attr** (ParamAttr|bool, 可选) - 指定权重参数属性的对象。如果设置为 ``False`` ,则表示本层没有可训练的权重参数。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool, 可选) - 指定偏置参数属性的对象。如果设置为 ``False`` ,则表示本层没有可训练的偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **track_running_stats** (bool, 可选)- 是否计算全局均值和方差。默认: ``True`` ,表示计算全局均值和方差。 + +形状: + - input: 一个二维到五维的 ``Tensor`` 。 + - output: 和input 相同形状的 ``Tensor`` 。 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + x = np.array([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32') + paddle.disable_static() + x = paddle.to_tensor(x) + if paddle.fluid.is_compiled_with_cuda(): + sync_batch_norm = nn.SyncBatchNorm(2) + hidden1 = sync_batch_norm(x) + print(hidden1.numpy()) + # [[[[0.26824948, 1.0936325],[0.26824948, -1.6301316]],[[ 0.8095662, -0.665287],[-1.2744656, 1.1301866 ]]]] + +方法 +::::::::: +convert_sync_batchnorm(layer) +''''''''''''''''''''''''''''' + +该接口用于把 ``BatchNorm*d`` 层转换为 ``SyncBatchNorm`` 层。 + +参数: + - **layer** (paddle.nn.Layer) - 包含一个或多个 ``BatchNorm*d`` 层的模型。 + +返回: + 如果原始模型中有 ``BatchNorm*d`` 层, 则把 ``BatchNorm*d`` 层转换为 ``SyncBatchNorm`` 层的原始模型。 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.nn as nn + paddle.disable_static() + model = nn.Sequential(nn.Conv2d(3, 5, 3), nn.BatchNorm2d(5)) + sync_model = nn.SyncBatchNorm.convert_sync_batchnorm(model) + diff --git a/doc/paddle/api/paddle/nn/layer/pooling/AdaptiveAvgPool1d_cn.rst b/doc/paddle/api/paddle/nn/layer/pooling/AdaptiveAvgPool1d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..b7ea3869b9b9cfad8c3c9a77a72aa19b1f80e0a8 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/pooling/AdaptiveAvgPool1d_cn.rst @@ -0,0 +1,66 @@ +.. _cn_api_nn_AdaptiveAvgPool1d: + + +AdaptiveAvgPool1d +------------------------------- + +.. py:function:: paddle.nn.AdaptiveAvgPool1d(output_size, name=None) + +该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算1D的自适应平均池化。输入和输出都是3-D Tensor, +默认是以 `NCL` 格式表示的,其中 `N` 是 batch size, `C` 是通道数, `L` 是输入特征的长度. + +计算公式如下: + +.. math:: + + lstart &= floor(i * L_{in} / L_{out}) + + lend &= ceil((i + 1) * L_{in} / L_{out}) + + Output(i) &= \frac{sum(Input[lstart:lend])}{(lstart - lend)} + + +参数 +::::::::: + - **output_size** (int): 算子输出特征图的长度,其数据类型为int。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状 +::::::::: + - **x** (Tensor): 默认形状为(批大小,通道数,输出特征长度),即NCL格式的3-D Tensor。 其数据类型为float32或者float64。 + - **output** (Tensor): 默认形状为(批大小,通道数,输出特征长度),即NCL格式的3-D Tensor。 其数据类型与输入x相同。 + +返回 +::::::::: +计算AdaptiveAvgPool1d的可调用对象 + +抛出异常 +::::::::: + - ``ValueError`` - ``output_size`` 应是一个整数。 + +代码示例 +::::::::: + +.. code-block:: python + + # average adaptive pool1d + # suppose input data in shape of [N, C, L], `output_size` is m or [m], + # output shape is [N, C, m], adaptive pool divide L dimension + # of input data into m grids averagely and performs poolings in each + # grid to get output. + # adaptive avg pool performs calculations as follow: + # + # for i in range(m): + # lstart = floor(i * L / m) + # lend = ceil((i + 1) * L / m) + # output[:, :, i] = sum(input[:, :, lstart: lend])/(lstart - lend) + # + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) + AdaptiveAvgPool1d = nn.layer.AdaptiveAvgPool1d(output_size=16) + pool_out = AdaptiveAvgPool1d(data) + # pool_out shape: [1, 3, 16] diff --git a/doc/paddle/api/paddle/nn/layer/pooling/AdaptiveAvgPool2d_cn.rst b/doc/paddle/api/paddle/nn/layer/pooling/AdaptiveAvgPool2d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..6d6eaa5044f9b2781c46f449240e8bf158c3fb50 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/pooling/AdaptiveAvgPool2d_cn.rst @@ -0,0 +1,72 @@ +AdaptiveAvgPool2d +------------------------------- + +.. py:function:: paddle.nn.AdaptiveAvgPool2d(output_size, data_format="NCHW", name=None) + +该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算2D的自适应平均池化。输入和输出都是4-D Tensor, +默认是以 `NCHW` 格式表示的,其中 `N` 是 batch size, `C` 是通道数, `H` 是输入特征的高度, `H` 是输入特征的宽度。 + +计算公式如下: + +.. math:: + + hstart &= floor(i * H_{in} / H_{out}) + + hend &= ceil((i + 1) * H_{in} / H_{out}) + + wstart &= floor(j * W_{in} / W_{out}) + + wend &= ceil((j + 1) * W_{in} / W_{out}) + + Output(i ,j) &= \frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)} + + +参数 +::::::::: + - **output_size** (int|list|turple): 算子输出特征图的尺寸,如果其是list或turple类型的数值,必须包含两个元素,H和W。H和W既可以是int类型值也可以是None,None表示与输入特征尺寸相同。 + - **data_format** (str): 输入和输出的数据格式,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状 +::::::::: + - **x** (Tensor): 默认形状为(批大小,通道数,高度,宽度),即NCHW格式的4-D Tensor。 其数据类型为float16, float32, float64, int32或int64。 + - **output** (Tensor): 默认形状为(批大小,通道数,输出特征高度,输出特征宽度),即NCHW格式的4-D Tensor。 其数据类型与输入相同。 + + +返回 +::::::::: +计算AdaptiveAvgPool2d的可调用对象 + +抛出异常 +::::::::: + - ``ValueError`` - 如果 ``data_format`` 既不是"NCHW"也不是"NHWC"。 + +代码示例 +::::::::: + +.. code-block:: python + + # adaptive avg pool2d + # suppose input data in shape of [N, C, H, W], `output_size` is [m, n], + # output shape is [N, C, m, n], adaptive pool divide H and W dimensions + # of input data into m * n grids averagely and performs poolings in each + # grid to get output. + # adaptive avg pool performs calculations as follow: + # + # for i in range(m): + # for j in range(n): + # hstart = floor(i * H / m) + # hend = ceil((i + 1) * H / m) + # wstart = floor(i * W / n) + # wend = ceil((i + 1) * W / n) + # output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend]) + # + import paddle + import numpy as np + paddle.disable_static() + input_data = np.random.rand(2, 3, 32, 32) + x = paddle.to_tensor(input_data) + # x.shape is [2, 3, 32, 32] + adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=3) + pool_out = adaptive_avg_pool(x = x) + # pool_out.shape is [2, 3, 3, 3] \ No newline at end of file diff --git a/doc/paddle/api/paddle/nn/layer/pooling/AdaptiveAvgPool3d_cn.rst b/doc/paddle/api/paddle/nn/layer/pooling/AdaptiveAvgPool3d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..4315f960cdf53e6f6f25c1b06d600d84d3b03dd2 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/pooling/AdaptiveAvgPool3d_cn.rst @@ -0,0 +1,78 @@ +AdaptiveAvgPool3d +------------------------------- + +.. py:function:: paddle.nn.AdaptiveAvgPool3d(output_size, data_format="NCDHW", name=None) + +该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算3D的自适应平均池化。输入和输出都是5-D Tensor, +默认是以 `NCDHW` 格式表示的,其中 `N` 是 batch size, `C` 是通道数, `D` 是特征图长度, `H` 是输入特征的高度, `H` 是输入特征的宽度。 + +计算公式如下: + +.. math:: + + dstart &= floor(i * D_{in} / D_{out}) + + dend &= ceil((i + 1) * D_{in} / D_{out}) + + hstart &= floor(j * H_{in} / H_{out}) + + hend &= ceil((j + 1) * H_{in} / H_{out}) + + wstart &= floor(k * W_{in} / W_{out}) + + wend &= ceil((k + 1) * W_{in} / W_{out}) + + Output(i ,j, k) &= \frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)} + +参数 +::::::::: + - **output_size** (int|list|turple): 算子输出特征图的尺寸,如果其是list或turple类型的数值,必须包含三个元素,D,H和W。D,H和W既可以是int类型值也可以是None,None表示与输入特征尺寸相同。 + - **data_format** (str): 输入和输出的数据格式,可以是"NCDHW"和"NDHWC"。N是批尺寸,C是通道数,D是特征长度,H是特征高度,W是特征宽度。默认值:"NCDHW"。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状 +::::::::: + - **x** (Tensor): 默认形状为(批大小,通道数,长度,高度,宽度),即NCDHW格式的5-D Tensor。 其数据类型为float16, float32, float64, int32或int64. + - **output** (Tensor): 默认形状为(批大小,通道数,输出特征长度,输出特征高度,输出特征宽度),即NCDHW格式的5-D Tensor。 其数据类型与输入相同。 + + +返回 +::::::::: +计算AdaptiveAvgPool3d的可调用对象 + +抛出异常 +::::::::: + - ``ValueError`` - 如果 ``data_format`` 既不是"NCDHW"也不是"NDHWC"。 + +代码示例 +::::::::: + +.. code-block:: python + + # adaptive avg pool3d + # suppose input data in shape of [N, C, D, H, W], `output_size` is [l, m, n], + # output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions + # of input data into l * m * n grids averagely and performs poolings in each + # grid to get output. + # adaptive avg pool performs calculations as follow: + # + # for i in range(l): + # for j in range(m): + # for k in range(n): + # dstart = floor(i * D / l) + # dend = ceil((i + 1) * D / l) + # hstart = floor(j * H / m) + # hend = ceil((j + 1) * H / m) + # wstart = floor(k * W / n) + # wend = ceil((k + 1) * W / n) + # output[:, :, i, j, k] = + # avg(input[:, :, dstart:dend, hstart: hend, wstart: wend]) + import paddle + import numpy as np + paddle.disable_static() + input_data = np.random.rand(2, 3, 8, 32, 32) + x = paddle.to_tensor(input_data) + # x.shape is [2, 3, 8, 32, 32] + adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d(output_size=3) + pool_out = adaptive_avg_pool(x = x) + # pool_out = [2, 3, 3, 3, 3] diff --git a/doc/paddle/api/paddle/nn/layer/pooling/AdaptiveMaxPool1d_cn.rst b/doc/paddle/api/paddle/nn/layer/pooling/AdaptiveMaxPool1d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..3d11df5b51d3f6bbede8b92e36091614fd868bc3 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/pooling/AdaptiveMaxPool1d_cn.rst @@ -0,0 +1,72 @@ +.. _cn_api_nn_AdaptiveMaxPool1d: + + +AdaptiveMaxPool1d +------------------------------- + +.. py:function:: paddle.nn.AdaptiveMaxPool1d(output_size, return_indices=False, name=None) + +该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算1D的自适应平均池化。输入和输出都是3-D Tensor, +默认是以 `NCL` 格式表示的,其中 `N` 是 batch size, `C` 是通道数, `L` 是输入特征的长度. + +计算公式如下: + +.. math:: + + lstart &= floor(i * L_{in} / L_{out}) + + lend &= ceil((i + 1) * L_{in} / L_{out}) + + Output(i) &= max(Input[lstart:lend]) + + +参数 +::::::::: + - **output_size** (int|list|tuple): 算子输出特征图的长度,其数据类型为int,list或tuple。 + - **return_indices** (bool): 如果设置为True,则会与输出一起返回最大值的索引,默认为False。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +形状 +::::::::: + - **x** (Tensor): 默认形状为(批大小,通道数,输出特征长度),即NCL格式的3-D Tensor。 其数据类型为float32或者float64。 + - **output** (Tensor): 默认形状为(批大小,通道数,输出特征长度),即NCL格式的3-D Tensor。 其数据类型与输入x相同。 + +返回 +::::::::: +计算AdaptiveMaxPool1d的可调用对象 + +抛出异常 +::::::::: + - ``ValueError`` - ``output_size`` 应是一个整数或长度为1的list,tuple + +代码示例 +::::::::: + +.. code-block:: python + + # max adaptive pool1d + # suppose input data in shape of [N, C, L], `output_size` is m or [m], + # output shape is [N, C, m], adaptive pool divide L dimension + # of input data into m grids averagely and performs poolings in each + # grid to get output. + # adaptive max pool performs calculations as follow: + # + # for i in range(m): + # lstart = floor(i * L / m) + # lend = ceil((i + 1) * L / m) + # output[:, :, i] = max(input[:, :, lstart: lend]) + # + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) + AdaptiveMaxPool1d = nn.layer.AdaptiveMaxPool1d(output_size=16) + pool_out = AdaptiveMaxPool1d(data) + # pool_out shape: [1, 3, 16] + + # for return_indices = true + AdaptiveMaxPool1d = nn.layer.AdaptiveMaxPool1d(output_size=16, return_indices=True) + pool_out, indices = AdaptiveMaxPool1d(data) + # pool_out shape: [1, 3, 16], indices shape: [1, 3, 16] diff --git a/doc/paddle/api/paddle/nn/layer/pooling/AvgPool1d_cn.rst b/doc/paddle/api/paddle/nn/layer/pooling/AvgPool1d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..c24c89ec51ce367586385684a750ef4700aa6b08 --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/pooling/AvgPool1d_cn.rst @@ -0,0 +1,59 @@ +.. _cn_api_nn_AvgPool1d: + + +AvgPool1d +------------------------------- + +.. py:function:: paddle.nn.AvgPool1d(kernel_size, stride=None, padding=0, count_include_pad=True, ceil_mode=False, name=None) + +该算子根据输入 `x` , `kernel_size` 等参数对一个输入Tensor计算1D的平均池化。输入和输出都是3-D Tensor, +默认是以 `NCL` 格式表示的,其中 `N` 是 batch size, `C` 是通道数, `L` 是输入特征的长度。 + +假设输入形状是(N, C, L),输出形状是 (N, C, L_{out}),卷积核尺寸是k, 1d平均池化计算公式如下: + +.. math:: + + Output(N_i, C_i, l) = mean(Input[N_i, C_i, stride \times l:stride \times l+k]) + +参数 +::::::::: + - **kernel_size** (int|list|tuple): 池化核的尺寸大小. 如果kernel_size为list或tuple类型, 其必须包含一个整数, 最终池化核的大小为该数值。 + - **stride** (int|list|tuple): 池化操作步长. 如果stride为list或tuple类型, 其必须包含一个整数,最终池化操作的步长为该数值。 + - **padding** (string|int|list|tuple): 池化补零的方式. 如果padding是一个字符串,则必须为 `SAME` 或者 `VALID` 。如果是turple或者list类型, 则应是 `[pad_left, pad_right]` 形式。如果padding是一个非0值,那么表示会在输入的两端都padding上同样长度的0。 + - **count_include_pad** (bool): 是否用额外padding的值计算平均池化结果,默认为True。 + - **ceil_mode** (bool): 是否用ceil函数计算输出的height和width,如果设置为False, 则使用floor函数来计算,默认为False。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + + +形状 +::::::::: + - **x** (Tensor): 默认形状为(批大小,通道数,长度),即NCL格式的3-D Tensor。 其数据类型为float32或float64. + - **output** (Tensor): 默认形状为(批大小,通道数,输出特征长度),即NCL格式的3-D Tensor。 其数据类型与输入x相同。 + +返回 +::::::::: +计算AvgPool1d的可调用对象 + + +抛出异常 +::::::::: + - ``ValueError`` - 如果 ``padding`` 是字符串但不是 "SAME" 和 "VALID" 。 + - ``ValueError`` - 如果 ``padding`` 是 "VALID" 但 `ceil_mode` 被设置为True。 + - ``ValueError`` - 如果 ``padding`` 是一个长度大于1的list或turple。 + - ``ShapeError`` - 如果输入x不是一个3-D Tensor。 + - ``ShapeError`` - 如果计算得到的输出形状小于等于0。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) + AvgPool1d = nn.layer.AvgPool1d(kernel_size=2, stride=2, padding=0) + pool_out = AvgPool1d(data) + # pool_out shape: [1, 3, 16] diff --git a/doc/paddle/api/paddle/nn/layer/pooling/MaxPool1d_cn.rst b/doc/paddle/api/paddle/nn/layer/pooling/MaxPool1d_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..e6cddc468a33abc836c54038ed9f8b1c78dc7b3a --- /dev/null +++ b/doc/paddle/api/paddle/nn/layer/pooling/MaxPool1d_cn.rst @@ -0,0 +1,62 @@ +.. _cn_api_nn_MaxPool1d: + + +MaxPool1d +------------------------------- + +.. py:function:: paddle.nn.MaxPool1d(kernel_size, stride=None, padding=0, return_indices=False, ceil_mode=False, name=None) + +该算子根据输入 `x` , `kernel_size` 等参数对一个输入Tensor计算1D的最大值池化。输入和输出都是3-D Tensor, +默认是以 `NCL` 格式表示的,其中 `N` 是 batch size, `C` 是通道数, `L` 是输入特征的长度。 + +假设输入形状是(N, C, L),输出形状是 (N, C, L_{out}),卷积核尺寸是k, 1d最大值池化计算公式如下: + +.. math:: + + Output(N_i, C_i, l) = max(Input[N_i, C_i, stride \times l:stride \times l+k]) + +参数 +::::::::: + - **kernel_size** (int|list|tuple): 池化核的尺寸大小. 如果kernel_size为list或tuple类型, 其必须包含一个整数, 最终池化核的大小为该数值。 + - **stride** (int|list|tuple): 池化操作步长. 如果stride为list或tuple类型, 其必须包含一个整数,最终池化操作的步长为该数值。 + - **padding** (string|int|list|tuple): 池化补零的方式. 如果padding是一个字符串,则必须为 `SAME` 或者 `VALID` 。 如果是turple或者list类型, 则应是 `[pad_left, pad_right]` 形式。如果padding是一个非0值,那么表示会在输入的两端都padding上同样长度的0。 + - **return_indices** (bool): 是否返回最大值的索引,默认为False。 + - **ceil_mode** (bool): 是否用ceil函数计算输出的height和width,如果设置为False, 则使用floor函数来计算,默认为False。 + - **name** (str,可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + + +形状 +::::::::: + - **x** (Tensor): 默认形状为(批大小,通道数,长度),即NCL格式的3-D Tensor。 其数据类型为float32或float64. + - **output** (Tensor): 默认形状为(批大小,通道数,输出特征长度),即NCL格式的3-D Tensor。 其数据类型与输入x相同。 + +返回 +::::::::: +计算MaxPool1d的可调用对象 + +抛出异常 +::::::::: + - ``ValueError`` - 如果 ``padding`` 是字符串但不是 "SAME" 和 "VALID" 。 + - ``ValueError`` - 如果 ``padding`` 是 "VALID" 但 `ceil_mode` 被设置为True。 + - ``ValueError`` - 如果 ``padding`` 是一个长度大于1的list或turple。 + - ``ShapeError`` - 如果输入x不是一个3-D Tensor。 + - ``ShapeError`` - 如果计算得到的输出形状小于等于0。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) + MaxPool1d = nn.layer.MaxPool1d(kernel_size=2, stride=2, padding=0) + pool_out = MaxPool1d(data) + # pool_out shape: [1, 3, 16] + + MaxPool1d = nn.layer.MaxPool1d(kernel_size=2, stride=2, padding=0, return_indices=True) + pool_out, indices = MaxPool1d(data) + # pool_out shape: [1, 3, 16], indices shape: [1, 3, 16] diff --git a/doc/paddle/api/paddle/nn/utils/weight_norm_hook/remove_weight_norm_cn.rst b/doc/paddle/api/paddle/nn/utils/weight_norm_hook/remove_weight_norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3c14b5b625d9cdff1024ae9fcafb0a5057cdd6a0 --- /dev/null +++ b/doc/paddle/api/paddle/nn/utils/weight_norm_hook/remove_weight_norm_cn.rst @@ -0,0 +1,29 @@ +.. _cn_api_nn_cn_remove_weight_norm: + +remove_weight_norm +------------------------------- + +.. py:function:: paddle.nn.utils.remove_weight_norm(layer, name='weight') + +移除传入 ``layer`` 中的权重归一化。 + +参数: + - **layer** (paddle.nn.Layer) - 要添加权重归一化的层。 + - **name** (str, 可选) - 权重参数的名字。默认:'weight'. + +返回: + ``Layer`` , 移除权重归一化hook之后的层 + +**代码示例** + +.. code-block:: python + + import paddle + from paddle.nn import Conv2d + from paddle.nn.utils import weight_norm, remove_weight_norm + paddle.disable_static() + conv = Conv2d(3, 5, 3) + wn = weight_norm(conv) + remove_weight_norm(conv) + # print(conv.weight_g) + # AttributeError: 'Conv2D' object has no attribute 'weight_g' diff --git a/doc/paddle/api/paddle/nn/utils/weight_norm_hook/weight_norm_cn.rst b/doc/paddle/api/paddle/nn/utils/weight_norm_hook/weight_norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c427c956be98b56fcffb88fbf5b1fcb12da9dc65 --- /dev/null +++ b/doc/paddle/api/paddle/nn/utils/weight_norm_hook/weight_norm_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_nn_cn_weight_norm: + +weight_norm +------------------------------- + +.. py:function:: paddle.nn.utils.weight_norm(layer, name='weight', dim=0) + +该接口根据以下公式对传入的 ``layer`` 中的权重参数进行归一化: + +.. math:: + \mathbf{w} = g \dfrac{v}{\|v\|} + +权重归一化可以将神经网络中权重向量的长度与其方向解耦,权重归一化可以用两个变量(例如: 代表长度的变量 `weight_g` 和代表方向的变量 `weight_v`)来代替由名字(例如: `weight`)指定的变量。详细可以参考论文: `A Simple Reparameterization to Accelerate Training of Deep Neural Networks `_ + +参数: + - **layer** (paddle.nn.Layer) - 要添加权重归一化的层。 + - **name** (str, 可选) - 权重参数的名字。默认:'weight'. + - **dim** (int|None, 可选) - 进行归一化操作的切片所在维度,是小于权重Tensor rank的非负数。比如卷积的权重shape是 [cout,cin,kh,kw] , rank是4,则dim可以选0,1,2,3;fc的权重shape是 [cout,cin] ,rank是2,dim可以选0,1。 如果为None就对所有维度上的元素做归一化。默认:0。 + +返回: + ``Layer`` , 添加了权重归一化hook的层 + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + from paddle.nn import Conv2d + from paddle.nn.utils import weight_norm + x = np.array([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32') + paddle.disable_static() + conv = Conv2d(3, 5, 3) + wn = weight_norm(conv) + print(conv.weight_g.shape) + # [5] + print(conv.weight_v.shape) + # [5, 3, 3, 3] diff --git a/doc/paddle/api/paddle/optimizer/AdadeltaOptimizer_cn.rst b/doc/paddle/api/paddle/optimizer/AdadeltaOptimizer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e1d5168744d910a7b930eb1f2ff1dc532707dd49 --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/AdadeltaOptimizer_cn.rst @@ -0,0 +1,196 @@ +.. _cn_api_fluid_optimizer_AdadeltaOptimizer: + +AdadeltaOptimizer +------------------------------- + +.. py:class:: paddle.fluid.optimizer.AdadeltaOptimizer(learning_rate, epsilon=1.0e-6, rho=0.95, parameter_list=None, regularization=None, grad_clip=None, name=None) + + + + +**注意:此接口不支持稀疏参数更新。** + +Adadelta优化器,具体细节可参考论文 `ADADELTA: AN ADAPTIVE LEARNING RATE METHOD `_ 。 + +更新公式如下: + +.. math:: + + E(g_t^2) &= \rho * E(g_{t-1}^2) + (1-\rho) * g^2\\ + learning\_rate &= \sqrt{ ( E(dx_{t-1}^2) + \epsilon ) / ( E(g_t^2) + \epsilon ) }\\ + E(dx_t^2) &= \rho * E(dx_{t-1}^2) + (1-\rho) * (-g*learning\_rate)^2 + + +参数: + - **learning_rate** (float|Variable) - 全局学习率。 + - **epsilon** (float) - 维持数值稳定性的浮点型值,默认值为1.0e-6。 + - **rho** (float) - 算法中的衰减率,默认值为0.95。 + - **parameter_list** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **regularization** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + image = fluid.layers.data(name='image', shape=[28], dtype='float32') + fc = fluid.layers.fc(image, size=10) + cost = fluid.layers.reduce_mean(fc) + optimizer = fluid.optimizer.AdadeltaOptimizer( + learning_rate=0.0003, epsilon=1.0e-6, rho=0.95) + optimizer_ops, params_grads = optimizer.minimize(cost) + + +.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None) + +为训练网络添加反向和参数优化部分,进而使损失最小化。 + +参数: + - **loss** (Variable) – 优化器的损失变量。 + - **startup_program** (Program,可选) – 参数所在的startup program。默认值为None,表示 :ref:`cn_api_fluid_default_startup_program` 。 + - **parameter_list** (list,可选) – 待更新的Parameter或者Parameter.name组成的列表。默认值为None,表示所有参数均需要更新。 + - **no_grad_set** (set,可选) – 不需要更新的Parameter或者Parameter.name组成的集合。默认值为None。 + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + +返回类型: tuple + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + image = fluid.layers.data(name='image', shape=[28], dtype='float32') + fc = fluid.layers.fc(image, size=10) + cost = fluid.layers.reduce_mean(fc) + optimizer = fluid.optimizer.AdadeltaOptimizer( + learning_rate=0.0003, epsilon=1.0e-6, rho=0.95) + optimizer_ops, params_grads = optimizer.minimize(cost) + + +.. py:method:: clear_gradients() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +清除需要优化的参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + with fluid.dygraph.guard(): + value = np.arange(26).reshape(2, 13).astype("float32") + a = fluid.dygraph.to_variable(value) + linear = fluid.Linear(13, 5, dtype="float32") + optimizer = fluid.optimizer.AdadeltaOptimizer(learning_rate=0.0003, epsilon=1.0e-6, rho=0.95, + parameter_list=linear.parameters()) + out = linear(a) + out.backward() + optimizer.minimize(out) + optimizer.clear_gradients() + +.. py:method:: set_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float|Variable) - 需要设置的学习率的值。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.dygraph.nn.Linear(10, 10) + adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters()) + # 通过Python float数值手动设置学习率 + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + + + # 通过 框架的Variable 设置学习率 + lr_var = fluid.layers.create_global_var(shape=[1], value=0.7, dtype='float32') + adam.set_lr(lr_var) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.7 + + + +.. py:method:: current_step_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。 + +返回:当前步骤的学习率。 + +返回类型:float + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # example1: LearningRateDecay is not used, return value is all the same + with fluid.dygraph.guard(): + emb = fluid.dygraph.Embedding([10, 10]) + adam = fluid.optimizer.Adam(0.001, parameter_list = emb.parameters()) + lr = adam.current_step_lr() + print(lr) # 0.001 + + # example2: PiecewiseDecay is used, return the step learning rate + with fluid.dygraph.guard(): + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = fluid.dygraph.nn.Linear(10, 10) + inp = fluid.dygraph.to_variable(inp) + out = linear(inp) + loss = fluid.layers.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + adam = fluid.optimizer.Adam(fluid.dygraph.PiecewiseDecay(bd, value, 0), + parameter_list=linear.parameters()) + + # first step: learning rate is 0.2 + np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.minimize(loss) + lr = adam.current_step_lr() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True + diff --git a/doc/paddle/api/paddle/optimizer/Adadelta_cn.rst b/doc/paddle/api/paddle/optimizer/Adadelta_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fabe7b81fa54401bea6e21de5cfa5cce4bc5a701 --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/Adadelta_cn.rst @@ -0,0 +1,17 @@ +.. _cn_api_fluid_optimizer_Adadelta: + +Adadelta +------------------------------- + +.. py:attribute:: paddle.fluid.optimizer.Adadelta + + + + +``AdadeltaOptimizer`` 的别名 + + + + + + diff --git a/doc/paddle/api/paddle/optimizer/AdagradOptimizer_cn.rst b/doc/paddle/api/paddle/optimizer/AdagradOptimizer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0837c86c5a6298dde6b75e9c937699fa7e2c91c8 --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/AdagradOptimizer_cn.rst @@ -0,0 +1,215 @@ +.. _cn_api_fluid_optimizer_AdagradOptimizer: + +AdagradOptimizer +------------------------------- + +.. py:class:: paddle.fluid.optimizer.AdagradOptimizer(learning_rate, epsilon=1e-06, parameter_list=None, regularization=None, grad_clip=None, name=None, initial_accumulator_value=0.0) + + + + +Adaptive Gradient 优化器(自适应梯度优化器,简称Adagrad)可以针对不同参数样本数不平均的问题,自适应地为各个参数分配不同的学习率。 + +其参数更新的计算过程如下: + +.. math:: + + moment\_out &= moment + grad * grad\\param\_out + &= param - \frac{learning\_rate * grad}{\sqrt{moment\_out} + \epsilon} + + +相关论文:`Adaptive Subgradient Methods for Online Learning and Stochastic Optimization `_。 + +原始论文的算法中没有引入上述公式中的 ``epsilon`` 属性,此处引入该属性用于维持数值稳定性,避免除0错误发生。 + +引入epsilon参数依据:`Per-parameter adaptive learning rate methods `_。 + +参数: + - **learning_rate** (float|Variable) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个值为浮点型的Variable + - **epsilon** (float, 可选) - 维持数值稳定性的浮点型值,默认值为1e-06 + - **parameter_list** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **regularization** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 + - **name** (str, 可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None + - **initial_accumulator_value** (float, 可选) - moment累加器的初始值,默认值为0.0 + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + + np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) + inp = fluid.layers.data( + name="inp", shape=[2, 2], append_batch_size=False) + out = fluid.layers.fc(inp, size=3) + out = fluid.layers.reduce_sum(out) + optimizer = fluid.optimizer.AdagradOptimizer(learning_rate=0.2) + optimizer.minimize(out) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + exe.run( + feed={"inp": np_inp}, + fetch_list=[out.name]) + +.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None) + +为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameter_list中的Parameters,最小化网络损失值loss。 + +参数: + - **loss** (Variable) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合。默认值为None + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + +返回类型: tuple + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + + inp = fluid.layers.data( + name="inp", shape=[2, 2], append_batch_size=False) + out = fluid.layers.fc(inp, size=3) + out = fluid.layers.reduce_sum(out) + optimizer = fluid.optimizer.AdagradOptimizer(learning_rate=0.2) + optimizer.minimize(out) + + np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + exe.run( + feed={"inp": np_inp}, + fetch_list=[out.name]) + + +.. py:method:: clear_gradients() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +清除需要优化的参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + with fluid.dygraph.guard(): + value = np.arange(26).reshape(2, 13).astype("float32") + a = fluid.dygraph.to_variable(value) + linear = fluid.Linear(13, 5, dtype="float32") + optimizer = fluid.optimizer.AdagradOptimizer(learning_rate=0.2, + parameter_list=linear.parameters()) + out = linear(a) + out.backward() + optimizer.minimize(out) + optimizer.clear_gradients() + +.. py:method:: set_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float|Variable) - 需要设置的学习率的值。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.dygraph.nn.Linear(10, 10) + adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters()) + # 通过Python float数值手动设置学习率 + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + + + # 通过 框架的Variable 设置学习率 + lr_var = fluid.layers.create_global_var(shape=[1], value=0.7, dtype='float32') + adam.set_lr(lr_var) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.7 + + + +.. py:method:: current_step_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。 + +返回:当前步骤的学习率。 + +返回类型:float + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # example1: LearningRateDecay is not used, return value is all the same + with fluid.dygraph.guard(): + emb = fluid.dygraph.Embedding([10, 10]) + adam = fluid.optimizer.Adam(0.001, parameter_list = emb.parameters()) + lr = adam.current_step_lr() + print(lr) # 0.001 + + # example2: PiecewiseDecay is used, return the step learning rate + with fluid.dygraph.guard(): + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = fluid.dygraph.nn.Linear(10, 10) + inp = fluid.dygraph.to_variable(inp) + out = linear(inp) + loss = fluid.layers.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + adam = fluid.optimizer.Adam(fluid.dygraph.PiecewiseDecay(bd, value, 0), + parameter_list=linear.parameters()) + + # first step: learning rate is 0.2 + np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.minimize(loss) + lr = adam.current_step_lr() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True + diff --git a/doc/paddle/api/paddle/optimizer/Adagrad_cn.rst b/doc/paddle/api/paddle/optimizer/Adagrad_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f4304ba8b4d89f0d1b446cf31e487e6c9ebb4c34 --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/Adagrad_cn.rst @@ -0,0 +1,15 @@ +.. _cn_api_fluid_optimizer_Adagrad: + +Adagrad +------------------------------- + +.. py:attribute:: paddle.fluid.optimizer.Adagrad + + + + +``AdagradOptimizer`` 的别名 + + + + diff --git a/doc/paddle/api/paddle/optimizer/AdamW_cn.rst b/doc/paddle/api/paddle/optimizer/AdamW_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..352cfcf892d1b450585a74651b78e5017d98aef3 --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/AdamW_cn.rst @@ -0,0 +1,241 @@ +.. _cn_api_paddle_optimizer_AdamW: + +AdamW +------------------------------- + +.. py:class:: paddle.optimizer.AdamW(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameters=None, weight_decay=0.01, apply_decay_param_fun=None, grad_clip=None, name=None, lazy_mode=False) + + + + +AdamW优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION 论文 `,用来解决Adam优化器中L2正则化失效的问题。 + +其参数更新的计算公式如下: + +.. math:: + \\t = t + 1 +.. math:: + moment\_1\_out=\beta_1∗moment\_1+(1−\beta_1)∗grad +.. math:: + moment\_2\_out=\beta_2∗moment\_2+(1−\beta_2)∗grad*grad +.. math:: + learning\_rate=learning\_rate*\frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} +.. math:: + param\_out=param-learning\_rate*(\frac{moment\_1}{\sqrt{moment\_2}+\epsilon} + \lambda * param) + +相关论文:`Adam: A Method for Stochastic Optimization `_ + +参数: + - **learning_rate** (float|_LRScheduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001 + - **beta1** (float|Tensor, 可选) - 一阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.9 + - **beta2** (float|Tensor, 可选) - 二阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.999 + - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 + - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **weight_decay** (float|Tensor, 可选) - 权重衰减系数,是一个float类型或者shape为[1] ,数据类型为float32的Tensor类型。默认值为0.01 + - **apply_decay_param_fun** (function|None, 可选): 传入函数时,只有可以使 apply_decay_param_fun(Tensor)==True的Tensor会更新参数。只有在想要指定要更新的参数时使用。默认值为None + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 + - **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None + - **lazy_mode** (bool, 可选) - 设为True时,仅更新当前具有梯度的元素。官方Adam算法有两个移动平均累加器(moving-average accumulators)。累加器在每一步都会更新。在密集模式和稀疏模式下,两条移动平均线的每个元素都会更新。如果参数非常大,那么更新可能很慢。 lazy mode仅更新当前具有梯度的元素,所以它会更快。但是这种模式与原始的算法有不同的描述,可能会导致不同的结果,默认为False + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + adam = paddle.optimizer.AdamW(weight_decay=0.01, learning_rate=0.1, + parameters=linear.parameters()) + out.backward() + adam.step() + adam.clear_grad() + + +.. py:method:: step() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +执行一次优化器并进行参数更新。 + +返回:None。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5) + adam = paddle.optimizer.AdamW(learning_rate = 0.01, + weight_decay = 0.01, + parameters = linear.parameters()) + out = linear(a) + out.backward() + adam.step() + adam.clear_grad() + +.. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None) + +为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 + +参数: + - **loss** (Tensor) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + + beta1 = paddle.to_tensor([0.9], dtype="float32") + beta2 = paddle.to_tensor([0.99], dtype="float32") + + adam = paddle.optimizer.Adam(learning_rate=0.1, + parameters=linear.parameters(), + weight_decay=0.01) + out.backward() + adam.minimize(loss) + adam.clear_grad() + +.. py:method:: clear_grad() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +清除需要优化的参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5) + optimizer = paddle.optimizer.AdamW(weight_decay=0.01, + learning_rate=0.02, + parameters=linear.parameters()) + out = linear(a) + out.backward() + optimizer.step() + optimizer.clear_grad() + +.. py:method:: set_lr(value) + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float) - 需要设置的学习率的值。 + +返回:None + +**代码示例** + +.. code-block:: python + + import paddle + paddle.disable_static() + linear = paddle.nn.Linear(10, 10) + + adam = paddle.optimizer.AdamW(weight_decay=0.01, + learning_rate=0.1, parameters=linear.parameters()) + + # set learning rate manually by python float value + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + lr = adam.get_lr() + print("current lr is {}".format(lr)) + # Print: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + +.. py:method:: get_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 + +返回:float,当前步骤的学习率。 + + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + # example1: _LRScheduler is not used, return value is all the same + paddle.disable_static() + emb = paddle.nn.Embedding(10, 10, sparse=False) + adam = paddle.optimizer.AdamW(learning_rate=0.001, parameters = emb.parameters(),weight_decay=0.01) + lr = adam.get_lr() + print(lr) # 0.001 + + # example2: PiecewiseLR is used, return the step learning rate + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + scheduler = paddle.optimizer.PiecewiseLR(bd, value, 0) + adam = paddle.optimizer.AdamW(scheduler, + parameters=linear.parameters(), + weight_decay=0.01) + + # first step: learning rate is 0.2 + np.allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.step() + lr = adam.get_lr() + scheduler.step() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True diff --git a/doc/paddle/api/paddle/optimizer/Adam_cn.rst b/doc/paddle/api/paddle/optimizer/Adam_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6c57911eddad02aa0b43c62bb6230fd6a188112e --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/Adam_cn.rst @@ -0,0 +1,262 @@ +.. _cn_api_paddle_optimizer_Adam: + +Adam +------------------------------- + +.. py:class:: paddle.optimizer.Adam(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameters=None, weight_decay=None, grad_clip=None, name=None, lazy_mode=False) + + + + +Adam优化器出自 `Adam论文 `_ 的第二节,能够利用梯度的一阶矩估计和二阶矩估计动态调整每个参数的学习率。 + +其参数更新的计算公式如下: + +.. math:: + \\t = t + 1 +.. math:: + moment\_1\_out=\beta_1∗moment\_1+(1−\beta_1)∗grad +.. math:: + moment\_2\_out=\beta_2∗moment\_2+(1−\beta_2)∗grad*grad +.. math:: + learning\_rate=learning\_rate*\frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} +.. math:: + param\_out=param-learning\_rate*\frac{moment\_1}{\sqrt{moment\_2}+\epsilon}\\ + +相关论文:`Adam: A Method for Stochastic Optimization `_ + +参数: + - **learning_rate** (float|_LRScheduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001 + - **beta1** (float|Tensor, 可选) - 一阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.9 + - **beta2** (float|Tensor, 可选) - 二阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.999 + - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 + - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 + - **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None + - **lazy_mode** (bool, 可选) - 设为True时,仅更新当前具有梯度的元素。官方Adam算法有两个移动平均累加器(moving-average accumulators)。累加器在每一步都会更新。在密集模式和稀疏模式下,两条移动平均线的每个元素都会更新。如果参数非常大,那么更新可能很慢。 lazy mode仅更新当前具有梯度的元素,所以它会更快。但是这种模式与原始的算法有不同的描述,可能会导致不同的结果,默认为False + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + adam = paddle.optimizer.Adam(learning_rate=0.1, + parameters=linear.parameters()) + out.backward() + adam.step() + adam.clear_grad() + +.. code-block:: python + + # Adam with beta1/beta2 as Tensor and weight_decay as float + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + + beta1 = paddle.to_tensor([0.9], dtype="float32") + beta2 = paddle.to_tensor([0.99], dtype="float32") + + adam = paddle.optimizer.Adam(learning_rate=0.1, + parameters=linear.parameters(), + beta1=beta1, + beta2=beta2, + weight_decay=0.01) + out.backward() + adam.step() + adam.clear_grad() + +.. py:method:: step() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +执行一次优化器并进行参数更新。 + +返回:None。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5) + adam = paddle.optimizer.Adam(learning_rate = 0.01, + parameters = linear.parameters()) + out = linear(a) + out.backward() + adam.step() + adam.clear_grad() + +.. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None) + +为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 + +参数: + - **loss** (Tensor) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + + beta1 = paddle.to_tensor([0.9], dtype="float32") + beta2 = paddle.to_tensor([0.99], dtype="float32") + + adam = paddle.optimizer.Adam(learning_rate=0.1, + parameters=linear.parameters(), + weight_decay=0.01) + out.backward() + adam.minimize(loss) + adam.clear_grad() + +.. py:method:: clear_grad() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +清除需要优化的参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5) + optimizer = paddle.optimizer.Adam(learning_rate=0.02, + parameters=linear.parameters()) + out = linear(a) + out.backward() + optimizer.step() + optimizer.clear_grad() + +.. py:method:: set_lr(value) + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float) - 需要设置的学习率的值。 + +返回:None + +**代码示例** + +.. code-block:: python + + import paddle + paddle.disable_static() + linear = paddle.nn.Linear(10, 10) + + adam = paddle.optimizer.Adam(0.1, parameters=linear.parameters()) + + # set learning rate manually by python float value + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + lr = adam.get_lr() + print("current lr is {}".format(lr)) + # Print: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + +.. py:method:: get_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 + +返回:float,当前步骤的学习率。 + + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + # example1: _LRScheduler is not used, return value is all the same + paddle.disable_static() + emb = paddle.nn.Embedding(10, 10, sparse=False) + adam = paddle.optimizer.Adam(0.001, parameters = emb.parameters()) + lr = adam.get_lr() + print(lr) # 0.001 + + # example2: PiecewiseLR is used, return the step learning rate + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + scheduler = paddle.optimizer.PiecewiseLR(bd, value, 0) + adam = paddle.optimizer.Adam(scheduler, + parameters=linear.parameters()) + + # first step: learning rate is 0.2 + np.allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.step() + lr = adam.get_lr() + scheduler.step() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True diff --git a/doc/paddle/api/paddle/optimizer/Adamax_cn.rst b/doc/paddle/api/paddle/optimizer/Adamax_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b38c446571272905ab42e25ce18a463846c300cf --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/Adamax_cn.rst @@ -0,0 +1,242 @@ +.. _cn_api_paddle_optimizer_Adamax: + +Adamax +------------------------------- + +.. py:class:: paddle.optimizer.Adamax(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, parameters=None, weight_decay=None, grad_clip=None, name=None) + + + + +Adamax优化器是参考 `Adam论文 `_ 第7节Adamax优化相关内容所实现的。Adamax算法是基于无穷大范数的 `Adam `_ 算法的一个变种,使学习率更新的算法更加稳定和简单。 + +其参数更新的计算公式如下: + +.. math:: + \\t = t + 1 +.. math:: + moment\_out=\beta_1∗moment+(1−\beta_1)∗grad +.. math:: + inf\_norm\_out=\max{(\beta_2∗inf\_norm+\epsilon, \left|grad\right|)} +.. math:: + learning\_rate=\frac{learning\_rate}{1-\beta_1^t} +.. math:: + param\_out=param−learning\_rate*\frac{moment\_out}{inf\_norm\_out}\\ + +相关论文:`Adam: A Method for Stochastic Optimization `_ + +论文中没有 ``epsilon`` 参数。但是,为了保持数值稳定性, 避免除0错误, 此处增加了这个参数。 + +参数: + - **learning_rate** (float|_LRScheduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001 + - **beta1** (float, 可选) - 一阶矩估计的指数衰减率,默认值为0.9 + - **beta2** (float, 可选) - 二阶矩估计的指数衰减率,默认值为0.999 + - **epsilon** (float, 可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08 + - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 + - **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None + +.. note:: + 目前 ``Adamax`` 不支持 Sparse Parameter Optimization(稀疏参数优化)。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + adam = paddle.optimizer.Adamax(learning_rate=0.1, + parameters=linear.parameters()) + out.backward() + adam.step() + adam.clear_grad() + + +.. py:method:: step() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +执行一次优化器并进行参数更新。 + +返回:None。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5) + adam = paddle.optimizer.Adam(learning_rate = 0.01, + parameters = linear.parameters()) + out = linear(a) + out.backward() + adam.step() + adam.clear_grad() + +.. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None) + +为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 + +参数: + - **loss** (Tensor) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成集合,默认值为None + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + + beta1 = paddle.to_tensor([0.9], dtype="float32") + beta2 = paddle.to_tensor([0.99], dtype="float32") + + adam = paddle.optimizer.Adamax(learning_rate=0.1, + parameters=linear.parameters(), + weight_decay=0.01) + out.backward() + adam.minimize(loss) + adam.clear_grad() + + +.. py:method:: clear_grad() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +清除需要优化的参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5) + optimizer = paddle.optimizer.Adamax(learning_rate=0.02, + parameters=linear.parameters()) + out = linear(a) + out.backward() + optimizer.step() + optimizer.clear_grad() + +.. py:method:: set_lr(value) + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float) - 需要设置的学习率的值。 + +返回:None + +**代码示例** + +.. code-block:: python + + import paddle + paddle.disable_static() + linear = paddle.nn.Linear(10, 10) + + adam = paddle.optimizer.Adamax(0.1, parameters=linear.parameters()) + + # set learning rate manually by python float value + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + lr = adam.get_lr() + print("current lr is {}".format(lr)) + # Print: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + +.. py:method:: get_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 + +返回:float,当前步骤的学习率。 + + +**代码示例** + +.. code-block:: python + + + import numpy as np + import paddle + # example1: _LRScheduler is not used, return value is all the same + paddle.disable_static() + emb = paddle.nn.Embedding(10, 10, sparse=False) + adam = paddle.optimizer.Adamax(0.001, parameters = emb.parameters()) + lr = adam.get_lr() + print(lr) # 0.001 + + # example2: PiecewiseLR is used, return the step learning rate + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + scheduler = paddle.optimizer.PiecewiseLR(bd, value, 0) + adam = paddle.optimizer.Adamax(scheduler, + parameters=linear.parameters()) + + # first step: learning rate is 0.2 + np.allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.step() + lr = adam.get_lr() + scheduler.step() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True diff --git a/doc/paddle/api/paddle/optimizer/CosineAnnealingLR_cn.rst b/doc/paddle/api/paddle/optimizer/CosineAnnealingLR_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..177ee846f284d232209f27cfd35a51938a527a5c --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/CosineAnnealingLR_cn.rst @@ -0,0 +1,100 @@ +.. _cn_api_paddle_optimizer_CosineAnnealingLR: + +CosineAnnealingLR +----------------------------------- + +.. py:class:: paddle.optimizer.lr_scheduler.CosineAnnealingLR(learning_rate, T_max, eta_min=0, last_epoch=-1, verbose=False) + +该接口使用 ``cosine annealing`` 方式来动态调整学习率。 + +.. math:: + \begin{aligned} + \eta_t & = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 + + \cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right), + & T_{cur} \neq (2k+1)T_{max}; \\ + \eta_{t+1} & = \eta_{t} + \frac{1}{2}(\eta_{max} - \eta_{min}) + \left(1 - \cos\left(\frac{1}{T_{max}}\pi\right)\right), + & T_{cur} = (2k+1)T_{max}. + \end{aligned} + + +:math:`\eta_{max}` 的初始值为 ``learning_rate``, :math:`T_{cur}` 是SGDR(重启训练SGD)训练过程中的当前训练轮数。SGDR的训练方法可以参考文档 `SGDR: Stochastic Gradient Descent with Warm Restarts `_. +这里只是实现了 ``cosine annealing`` 动态学习率,热启训练部分没有实现。 + + +参数 +::::::::: + - **learning_rate** (float):初始学习率,可以是Python的float。 + - **T_max** (float|int):训练的上限轮数,是学习率衰减周期的一半。 + - **eta_min** (float|int, 可选):学习率的下限,即公式中的 :math:`\eta_{min}` 。默认值为0。 + - **last_epoch** (int,可选): 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 + - **verbose** (bool,可选):如果是 `True` ,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 + +返回 +::::::::: + 返回计算CosineAnnealingLR的可调用对象。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + # train on default imperative mode + paddle.disable_static() + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.lr_scheduler.CosineAnnealingLR(learning_rate=0.5, T_max=10, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameter_list=linear.parameters()) + for epoch in range(20): + for batch_id in range(2): + x = paddle.to_tensor(x) + out = linear(x) + loss = paddle.reduce_mean(out) + loss.backward() + sgd.minimize(loss) + linear.clear_gradients() + scheduler.step() + + # train on static mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr_scheduler.CosineAnnealingLR(learning_rate=0.5, T_max=10, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(2): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() + +.. py:method:: step(epoch=None) + +step函数需要在优化器的 `step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 + +参数: + - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + +返回: + 无。 + +**代码示例** : + + 参照上述示例代码。 + diff --git a/doc/paddle/api/paddle/optimizer/DGCMomentumOptimizer_cn.rst b/doc/paddle/api/paddle/optimizer/DGCMomentumOptimizer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e4ca06112a4b4a9563ee567b0c48072485b43574 --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/DGCMomentumOptimizer_cn.rst @@ -0,0 +1,154 @@ +.. _cn_api_fluid_optimizer_DGCMomentumOptimizer: + +DGCMomentumOptimizer +------------------------------- + + +.. py:class:: paddle.fluid.optimizer.DGCMomentumOptimizer(learning_rate, momentum, rampup_begin_step, rampup_step=1, sparsity=[0.999], use_nesterov=False, local_grad_clip_norm=None, num_trainers=None, regularization=None, grad_clip=None, name=None) + + + + +DGC(深度梯度压缩)Momentum 优化器。原始论文: https://arxiv.org/abs/1712.01887 + +DGC通过只传送重要梯度(稀疏更新)的方式,即只发送大于给定阈值的梯度,来减少通信带宽使用。 + +DGC会在本地累加剩余梯度以避免信息的丢失。最终这些梯度会大到足以传输。 + +因此,DGC只会立即发送大梯度,但随时间流逝所有梯度终将发送出去。 + +为确保精度不会损失,DGC在梯度稀疏化之上采用动量修正和局部梯度修剪(clip)来维持模型性能。 + +DGC还使用动量因子掩藏(momentum factor masking)和预训练(warm-up)来克服由于规约(reduced)通信而导致的数据陈旧性(staleness)问题。 + +这个优化器会执行如下操作: + +1. 从张量中获取的前TopK个重要梯度进行压缩,并将其用于allreduce通信以减少网络带宽使用。 +2. 调用momentum来优化代价函数。 + +参数: + - **learning_rate** (float | Variable) - 用于更新参数的学习率。可以是浮点值或由一个浮点型数据组成的Variable。 + - **momentum** (float) - 动量因子。 + - **rampup_begin_step** (int) - 进行梯度压缩的起步点。 + - **rampup_step** (int) - 使用稀疏预热的时间步长。默认值为1。例如:如果稀疏度为[0.75,0.9375,0.984375,0.996,0.999],并且rampup_step为100,则在0~19步时使用0.75,在20~39步时使用0.9375,依此类推。当到达sparsity数组末尾时,此后将会使用0.999。 + - **sparsity** (list [float]) - 从梯度张量中获取top个重要元素,比率为(1-当前稀疏度)。默认值为[0.999]。例如:如果sparsity为[0.99, 0.999],则将传输top [1%, 0.1%]的重要元素。 + - **use_nesterov** (bool) - 启用Nesterov momentum。 True意味着使用Nesterov。默认值False。 + - **local_grad_clip_norm** (float,可选) - 局部梯度裁减标准值。可选,默认为None,表示不需要裁减。 + - **num_trainers** (int,可选) - 训练节点的数量。可选,默认为None。 + - **regularization** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipByNorm, 可选) – 梯度裁剪的策略,``DGCMomentumOptimizer`` 仅支持 :ref:`cn_api_fluid_clip_GradientClipByNorm` 裁剪策略,如果不为该类型,将会抛出类型异常。默认值为None,此时将不进行梯度裁剪。 + - **name** (str,可选) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + optimizer = fluid.optimizer.DGCMomentumOptimizer( + learning_rate=0.0001, + momentum=0.9, + rampup_step=1000, + rampup_begin_step=1252, + sparsity=[0.999, 0.999]) + + + + +.. py:method:: apply_gradients(params_grads) + +为给定的params_grads对附加优化算子,为minimize过程的第二步 + +参数: + - **params_grads** (list)- 用于优化的(param, grad)对组成的列表 + +返回: 附加在当前Program的算子组成的列表 + +返回类型: list + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + def network(): + x = fluid.layers.data(name='x', shape=[1], dtype='int64', lod_level=0) + y = fluid.layers.data(name='y', shape=[1], dtype='int64', lod_level=0) + emb_x = fluid.layers.embedding( + input=x, + size=[10, 2], + is_sparse=False) + emb_y = fluid.layers.embedding( + input=y, + size=[10, 2], + is_sparse=False) + + concat = fluid.layers.concat([emb_x, emb_y], axis=1) + + fc = fluid.layers.fc(input=concat, + name="fc", + size=1, + num_flatten_dims=1, + bias_attr=False) + loss = fluid.layers.reduce_mean(fc) + return loss + + loss = network() + optimizer = fluid.optimizer.SGD(learning_rate=0.1) + params_grads = optimizer.backward(loss) + # you may append operations for params_grads here + # ... + optimizer.apply_gradients(params_grads) + + +.. py:method:: apply_optimize(loss, startup_program, params_grads) + +为给定的params_grads对附加优化算子,为minimize过程的第二步。 + +参数: + - **loss** (Variable) – 用于优化过程的损失值变量 + - **startup_program** (Program) – 用于初始化在parameter_list中参数的startup_program + - **params_grads** (list)- 用于优化的(param, grad)对组成的列表 + +返回: 附加在当前Program的算子组成的列表 + +返回类型: list + +.. py:method:: backward(loss, startup_program=None, parameter_list=None, no_grad_set=None, callbacks=None) + +自动做diff来向当前program附加反向算子,为minimize过程的第一步。 + +参数: + - **loss** (Variable) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None + - **callbacks** (list, 可选) – 当为某参数附加反向算子时所要运行的callables组成的列表 + +返回: 附加在当前Program的算子组成的列表 + +返回类型: list + +**代码示例** + +详见apply_gradients的示例 + +.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None) + + +通过更新parameter_list来添加操作,进而使损失最小化。 + +该算子相当于backward()和apply_gradients()功能的合体。 + +参数: + - **loss** (Variable) – 用于优化过程的损失值变量 + - **startup_program** (Program) – 用于初始化在parameter_list中参数的startup_program + - **parameter_list** (list) – 待更新的Variables组成的列表 + - **no_grad_set** (set|None) – 应该被无视的Variables集合 + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + +返回类型: tuple + diff --git a/doc/paddle/api/paddle/optimizer/DecayedAdagradOptimizer_cn.rst b/doc/paddle/api/paddle/optimizer/DecayedAdagradOptimizer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7777b674d3aa0c0cebd4f3a39c3a53f00634e239 --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/DecayedAdagradOptimizer_cn.rst @@ -0,0 +1,209 @@ +.. _cn_api_fluid_optimizer_DecayedAdagradOptimizer: + +DecayedAdagradOptimizer +------------------------------- + +.. py:class:: paddle.fluid.optimizer.DecayedAdagradOptimizer(learning_rate, decay=0.95, epsilon=1e-06, parameter_list=None, regularization=None, grad_clip=None, name=None) + + + + +Decayed Adagrad优化器,可以看做是引入了衰减率的 `Adagrad `_ 算法,用于解决使用 :ref:`cn_api_fluid_optimizer_AdagradOptimizer` 优化器时,在模型训练中后期学习率急剧下降的问题。 + +其参数更新的计算公式如下: + +.. math:: + moment\_out = decay*moment+(1-decay)*grad*grad +.. math:: + param\_out = param-\frac{learning\_rate*grad}{\sqrt{moment\_out}+\epsilon } + +在原论文中没有 ``epsilon`` 参数。但是,为了保持数值稳定性, 防止除0错误, 此处增加了这个参数。 + +相关论文:`Adaptive Subgradient Methods for Online Learning and Stochastic Optimization `_ + + +参数: + - **learning_rate** (float|Variable) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个值为浮点型的Variable + - **parameter_list** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **decay** (float,可选) – 衰减率,默认值为0.95 + - **regularization** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 + - **epsilon** (float,可选) - 保持数值稳定性的短浮点类型值,默认值为1e-06 + - **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None + +.. note:: + 当前, ``DecayedAdagradOptimizer`` 不支持Sparse Parameter Optimization(稀疏参数优化) + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + from paddle.fluid.optimizer import DecayedAdagrad + + x = layers.data( name='x', shape=[-1, 10], dtype='float32' ) + trans = layers.fc( x, 100 ) + cost = layers.reduce_mean( trans ) + optimizer = fluid.optimizer.DecayedAdagradOptimizer(learning_rate=0.2) + optimizer.minimize(cost) + +.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None) + +为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameter_list中的Parameters,最小化网络损失值loss。 + +参数: + - **loss** (Variable) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + +返回类型: tuple + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + + inp = fluid.layers.data( + name="inp", shape=[2, 2], append_batch_size=False) + out = fluid.layers.fc(inp, size=3) + out = fluid.layers.reduce_sum(out) + optimizer = fluid.optimizer.DecayedAdagrad(learning_rate=0.2) + optimizer.minimize(out) + + np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + exe.run( + feed={"inp": np_inp}, + fetch_list=[out.name]) + + +.. py:method:: clear_gradients() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +清除需要优化的参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + with fluid.dygraph.guard(): + value = np.arange(26).reshape(2, 13).astype("float32") + a = fluid.dygraph.to_variable(value) + linear = fluid.Linear(13, 5, dtype="float32") + optimizer = fluid.optimizer.DecayedAdagradOptimizer(learning_rate=0.02, + parameter_list=linear.parameters()) + out = linear(a) + out.backward() + optimizer.minimize(out) + optimizer.clear_gradients() + +.. py:method:: set_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float|Variable) - 需要设置的学习率的值。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.dygraph.nn.Linear(10, 10) + adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters()) + # 通过Python float数值手动设置学习率 + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + + + # 通过 框架的Variable 设置学习率 + lr_var = fluid.layers.create_global_var(shape=[1], value=0.7, dtype='float32') + adam.set_lr(lr_var) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.7 + + + +.. py:method:: current_step_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。 + +返回:当前步骤的学习率。 + +返回类型:float + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # example1: LearningRateDecay is not used, return value is all the same + with fluid.dygraph.guard(): + emb = fluid.dygraph.Embedding([10, 10]) + adam = fluid.optimizer.Adam(0.001, parameter_list = emb.parameters()) + lr = adam.current_step_lr() + print(lr) # 0.001 + + # example2: PiecewiseDecay is used, return the step learning rate + with fluid.dygraph.guard(): + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = fluid.dygraph.nn.Linear(10, 10) + inp = fluid.dygraph.to_variable(inp) + out = linear(inp) + loss = fluid.layers.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + adam = fluid.optimizer.Adam(fluid.dygraph.PiecewiseDecay(bd, value, 0), + parameter_list=linear.parameters()) + + # first step: learning rate is 0.2 + np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.minimize(loss) + lr = adam.current_step_lr() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True + diff --git a/doc/paddle/api/paddle/optimizer/DecayedAdagrad_cn.rst b/doc/paddle/api/paddle/optimizer/DecayedAdagrad_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..215758e1f13843b4f60d299c229bac6b7721c5c4 --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/DecayedAdagrad_cn.rst @@ -0,0 +1,16 @@ +.. _cn_api_fluid_optimizer_DecayedAdagrad: + +DecayedAdagrad +------------------------------- + +.. py:attribute:: paddle.fluid.optimizer.DecayedAdagrad + + + + +``DecayedAdagradOptimizer`` 的别名 + + + + + diff --git a/doc/paddle/api/paddle/optimizer/DpsgdOptimizer_cn.rst b/doc/paddle/api/paddle/optimizer/DpsgdOptimizer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6bfeeec2248ee46f5f118aa73905996fd2982bac --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/DpsgdOptimizer_cn.rst @@ -0,0 +1,100 @@ +.. _cn_api_fluid_optimizer_DpsgdOptimizer: + +DpsgdOptimizer +------------------------------- + +.. py:class:: paddle.fluid.optimizer.DpsgdOptimizer(learning_rate=0.001, clip=0.9, batch_size=0.999, sigma=1e-8) + + + + +Dpsgd优化器是参考CCS16论文 `《Deep Learning with Differential Privacy》 `_ 相关内容实现的。 + +其参数更新的计算公式如下: + +.. math:: + g\_clip_t = \frac{g_t}{\max{(1, \frac{||g_t||^2}{clip})}}\\ +.. math:: + g\_noise_t = g\_clip_t + \frac{gaussian\_noise(\sigma)}{batch\_size}\\ +.. math:: + param\_out=param−learning\_rate*g\_noise_t + + +参数: + - **learning_rate** (float|Variable,可选) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个值为浮点型的Variable,默认值为0.001 + - **clip** (float, 可选) - 裁剪梯度的L2正则项值的阈值下界,若梯度L2正则项值小于clip,则取clip作为梯度L2正则项值,默认值为0.9 + - **batch_size** (float, 可选) - 每个batch训练的样本数,默认值为0.999 + - **sigma** (float, 可选) - 参数更新时,会在梯度后添加一个满足高斯分布的噪声。此为高斯噪声的方差,默认值为1e-08 + +.. note:: + 目前 ``DpsgdOptimizer`` 不支持 Sparse Parameter Optimization(稀疏参数优化)。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + + # First create the Executor. + place = fluid.CPUPlace() # fluid.CUDAPlace(0) + exe = fluid.Executor(place) + + train_program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(train_program, startup_program): + data = fluid.layers.data(name='X', shape=[1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + optimizer = fluid.optimizer.Dpsgd(learning_rate=0.01, clip=10.0, batch_size=16.0, sigma=1.0) + optimizer.minimize(loss) + + # Run the startup program once and only once. + exe.run(startup_program) + + x = numpy.random.random(size=(10, 1)).astype('float32') + outs = exe.run(program=train_program, + feed={'X': x}, + fetch_list=[loss.name]) + +.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None) + +为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameter_list中的Parameters,最小化网络损失值loss。 + +参数: + - **loss** (Variable) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成集合,默认值为None + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + +**代码示例**: + +.. code-block:: python + + import numpy + import paddle.fluid as fluid + + data = fluid.layers.data(name='X', shape=[1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + adam = fluid.optimizer.Dpsgd(learning_rate=0.2) + adam.minimize(loss) + + place = fluid.CPUPlace() # fluid.CUDAPlace(0) + exe = fluid.Executor(place) + + x = numpy.random.random(size=(10, 1)).astype('float32') + exe.run(fluid.default_startup_program()) + outs = exe.run(program=fluid.default_main_program(), + feed={'X': x}, + fetch_list=[loss.name]) + + + + + + + + diff --git a/doc/paddle/api/paddle/optimizer/Dpsgd_cn.rst b/doc/paddle/api/paddle/optimizer/Dpsgd_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a6fde680ba5c3c0e82913e85b1bac83aa8d7623c --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/Dpsgd_cn.rst @@ -0,0 +1,17 @@ +.. _cn_api_fluid_optimizer_Dpsgd: + +Dpsgd +------------------------------- + +.. py:attribute:: paddle.fluid.optimizer.Dpsgd + + + + +``DpsgdOptimizer`` 的别名 + + + + + + diff --git a/doc/paddle/api/paddle/optimizer/ExponentialLR_cn.rst b/doc/paddle/api/paddle/optimizer/ExponentialLR_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7360bd3f3c758f7f5a4c71f73b1e6ef12cfcae14 --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/ExponentialLR_cn.rst @@ -0,0 +1,92 @@ +.. _cn_api_paddle_optimizer_ExponentialLR: + +ExponentialLR +----------------------------------- + +.. py:class:: paddle.optimizer.lr_scheduler.ExponentialLR(learning_rate, gamma, last_epoch=-1, verbose=False) + +该接口提供一种学习率按指数函数衰减的功能。 + +衰减函数可以用以下公式表示: + +.. math:: + + new\_learning\_rate = last\_learning\_rate * gamma + +参数 +::::::::: + - **learning_rate** (float) - 初始学习率,数据类型为Python float。 + - **gamma** (float):衰减率,new_lr = origin_lr * gamma。 + - **last_epoch** (int,可选): 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率 。 + - **verbose** (bool,可选):如果是 `True` ,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 + +返回 +::::::::: +返回计算ExponentialLR的可调用对象。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + # train on default dygraph mode + paddle.disable_static() + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.lr_scheduler.ExponentialLR(learning_rate=0.5, gamma=0.9, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameter_list=linear.parameters()) + for epoch in range(20): + for batch_id in range(2): + x = paddle.to_tensor(x) + out = linear(x) + loss = paddle.reduce_mean(out) + loss.backward() + sgd.minimize(loss) + linear.clear_gradients() + scheduler.step() + + # train on static mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr_scheduler.ExponentialLR(learning_rate=0.5, gamma=0.9, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(2): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() + + +.. py:method:: step(epoch=None) + +step函数需要在优化器的 `step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 + +参数: + - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + +返回: + 无。 + +**代码示例** : + + 参照上述示例代码。 + + diff --git a/doc/paddle/api/paddle/optimizer/ExponentialMovingAverage_cn.rst b/doc/paddle/api/paddle/optimizer/ExponentialMovingAverage_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..88b6bf672a90457340e474378b1c9b11c0070c91 --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/ExponentialMovingAverage_cn.rst @@ -0,0 +1,109 @@ +.. _cn_api_fluid_optimizer_ExponentialMovingAverage: + +ExponentialMovingAverage +------------------------------- + + +.. py:class:: paddle.fluid.optimizer.ExponentialMovingAverage(decay=0.999, thres_steps=None, name=None) + + + + +用指数衰减计算参数的滑动平均值。给定参数 :math:`\theta` ,它的指数滑动平均值 (exponential moving average, EMA) 为 + +.. math:: + \begin{align}\begin{aligned}\text{EMA}_0 & = 0\\\text{EMA}_t & = \text{decay} * \text{EMA}_{t-1} + (1 - \text{decay}) * \theta_t\end{aligned}\end{align} + + +用 ``update()`` 方法计算出的平均结果将保存在由实例化对象创建和维护的临时变量中,并且可以通过调用 ``apply()`` 方法把结果应用于当前模型的参数。同时,可用 ``restore()`` 方法恢复原始参数。 + +**偏置校正** 所有的滑动平均均初始化为 :math:`0` ,因此它们相对于零是有偏的,可以通过除以因子 :math:`(1 - \text{decay}^t)` 来校正,因此在调用 ``apply()`` 方法时,作用于参数的真实滑动平均值将为: + +.. math:: + \widehat{\text{EMA}}_t = \frac{\text{EMA}_t}{1 - \text{decay}^t} + +**衰减率调节** 一个非常接近于1的很大的衰减率将会导致平均值滑动得很慢。更优的策略是,开始时设置一个相对较小的衰减率。参数 ``thres_steps`` 允许用户传递一个变量以设置衰减率,在这种情况下, +真实的衰减率变为 : + +.. math:: + \min(\text{decay}, \frac{1 + \text{thres_steps}}{10 + \text{thres_steps}}) + +通常 ``thres_steps`` 可以是全局的训练迭代步数。 + + +参数: + - **decay** (float) – 指数衰减率,通常接近1,如0.999,0.9999,…… + - **thres_steps** (Variable, 可选) – 调节衰减率的阈值步数,默认值为 None。 + - **name** (str,可选) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 + +**代码示例** + +.. code-block:: python + + import numpy + import paddle + import paddle.fluid as fluid + + data = fluid.layers.data(name='x', shape=[5], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + cost = fluid.layers.mean(hidden) + + test_program = fluid.default_main_program().clone(for_test=True) + + optimizer = fluid.optimizer.Adam(learning_rate=0.001) + optimizer.minimize(cost) + + global_steps = fluid.layers.learning_rate_scheduler._decay_step_counter() + ema = fluid.optimizer.ExponentialMovingAverage(0.999, thres_steps=global_steps) + ema.update() + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + for pass_id in range(3): + for batch_id in range(6): + data = numpy.random.random(size=(10, 5)).astype('float32') + exe.run(program=fluid.default_main_program(), + feed={'x': data}, + fetch_list=[cost.name]) + + # usage 1 + with ema.apply(exe): + data = numpy.random.random(size=(10, 5)).astype('float32') + exe.run(program=test_program, + feed={'x': data}, + fetch_list=[hidden.name]) + + + # usage 2 + with ema.apply(exe, need_restore=False): + data = numpy.random.random(size=(10, 5)).astype('float32') + exe.run(program=test_program, + feed={'x': data}, + fetch_list=[hidden.name]) + ema.restore(exe) + + +.. py:method:: update() + +更新指数滑动平均,在训练过程中需调用此方法。 + +.. py:method:: apply(executor, need_restore=True) + +模型评测时,将滑动平均的结果作用在参数上。 + +参数: + - **executor** (Executor) – 将滑动平均值作用在参数上的执行器。 + - **need_restore** (bool) –是否在结束后恢复原始参数,默认值为 ``True`` 。 + +.. py:method:: restore(executor) + +恢复参数。 + +参数: + - **executor** (Executor) – 执行恢复动作的执行器。 + + + + diff --git a/doc/paddle/api/paddle/optimizer/FtrlOptimizer_cn.rst b/doc/paddle/api/paddle/optimizer/FtrlOptimizer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..149c890ea37a12d5983a34d3a6ee0a677d7ad65a --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/FtrlOptimizer_cn.rst @@ -0,0 +1,219 @@ +.. _cn_api_fluid_optimizer_FtrlOptimizer: + +FtrlOptimizer +------------------------------- + +.. py:class:: paddle.fluid.optimizer.FtrlOptimizer(learning_rate, l1=0.0, l2=0.0, lr_power=-0.5, parameter_list=None, regularization=None, grad_clip=None, name=None) + + + + +该接口实现FTRL (Follow The Regularized Leader) Optimizer. + +FTRL 原始论文: ( `https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf `_) + + +.. math:: + &\qquad new\_accum=squared\_accum+grad^2\\\\ + &\qquad if(lr\_power==−0.5):\\ + &\qquad \qquad linear\_accum+=grad-\frac{\sqrt{new\_accum}-\sqrt{squared\_accum}}{learning\_rate*param}\\ + &\qquad else:\\ + &\qquad \qquad linear\_accum+=grad-\frac{new\_accum^{-lr\_power}-accum^{-lr\_power}}{learning\_rate*param}\\\\ + &\qquad x=l1*sign(linear\_accum)−linear\_accum\\\\ + &\qquad if(lr\_power==−0.5):\\ + &\qquad \qquad y=\frac{\sqrt{new\_accum}}{learning\_rate}+(2*l2)\\ + &\qquad \qquad pre\_shrink=\frac{x}{y}\\ + &\qquad \qquad param=(abs(linear\_accum)>l1).select(pre\_shrink,0.0)\\ + &\qquad else:\\ + &\qquad \qquad y=\frac{new\_accum^{-lr\_power}}{learning\_rate}+(2*l2)\\ + &\qquad \qquad pre\_shrink=\frac{x}{y}\\ + &\qquad \qquad param=(abs(linear\_accum)>l1).select(pre\_shrink,0.0)\\\\ + &\qquad squared\_accum+=grad^2 + + +参数: + - **learning_rate** (float|Variable)- 全局学习率。 + - **parameter_list** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **l1** (float,可选) - L1 regularization strength,默认值0.0。 + - **l2** (float,可选) - L2 regularization strength,默认值0.0。 + - **lr_power** (float,可选) - 学习率降低指数,默认值-0.5。 + - **regularization** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 + - **name** (str, 可选) - 可选的名称前缀,一般无需设置,默认值为None。 + +抛出异常: + - ``ValueError`` - 如果 ``learning_rate`` , ``rho`` , ``epsilon`` , ``momentum`` 为 None. + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + place = fluid.CPUPlace() + main = fluid.Program() + with fluid.program_guard(main): + x = fluid.layers.data(name='x', shape=[13], dtype='float32') + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y_predict = fluid.layers.fc(input=x, size=1, act=None) + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + + ftrl_optimizer = fluid.optimizer.Ftrl(learning_rate=0.1) + ftrl_optimizer.minimize(avg_cost) + + fetch_list = [avg_cost] + train_reader = paddle.batch( + paddle.dataset.uci_housing.train(), batch_size=1) + feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + for data in train_reader(): + exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) + + +**注意:目前, FtrlOptimizer 不支持 sparse parameter optimization。** + + +.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None) + + +通过更新parameter_list来添加操作,进而使损失最小化。 + +该算子相当于backward()和apply_gradients()功能的合体。 + +参数: + - **loss** (Variable) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + +返回类型: tuple + + +.. py:method:: clear_gradients() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +清除需要优化的参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + with fluid.dygraph.guard(): + value = np.arange(26).reshape(2, 13).astype("float32") + a = fluid.dygraph.to_variable(value) + linear = fluid.Linear(13, 5, dtype="float32") + optimizer = fluid.optimizer.FtrlOptimizer(learning_rate=0.02, + parameter_list=linear.parameters()) + out = linear(a) + out.backward() + optimizer.minimize(out) + optimizer.clear_gradients() + +.. py:method:: set_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float|Variable) - 需要设置的学习率的值。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.dygraph.nn.Linear(10, 10) + adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters()) + # 通过Python float数值手动设置学习率 + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + + + # 通过 框架的Variable 设置学习率 + lr_var = fluid.layers.create_global_var(shape=[1], value=0.7, dtype='float32') + adam.set_lr(lr_var) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.7 + + +.. py:method:: current_step_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。 + +返回:当前步骤的学习率。 + +返回类型:float + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # example1: LearningRateDecay is not used, return value is all the same + with fluid.dygraph.guard(): + emb = fluid.dygraph.Embedding([10, 10]) + adam = fluid.optimizer.Adam(0.001, parameter_list = emb.parameters()) + lr = adam.current_step_lr() + print(lr) # 0.001 + + # example2: PiecewiseDecay is used, return the step learning rate + with fluid.dygraph.guard(): + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = fluid.dygraph.nn.Linear(10, 10) + inp = fluid.dygraph.to_variable(inp) + out = linear(inp) + loss = fluid.layers.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + adam = fluid.optimizer.Adam(fluid.dygraph.PiecewiseDecay(bd, value, 0), + parameter_list=linear.parameters()) + + # first step: learning rate is 0.2 + np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.minimize(loss) + lr = adam.current_step_lr() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True + diff --git a/doc/paddle/api/paddle/optimizer/Ftrl_cn.rst b/doc/paddle/api/paddle/optimizer/Ftrl_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..51856bf25d8165751d74df062408e21a517af929 --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/Ftrl_cn.rst @@ -0,0 +1,15 @@ +.. _cn_api_fluid_optimizer_Ftrl: + +Ftrl +------------------------------- + +.. py:attribute:: paddle.fluid.optimizer.Ftrl + + + + +``FtrlOptimizer`` 的别名 + + + + diff --git a/doc/paddle/api/paddle/optimizer/InverseTimeLR_cn.rst b/doc/paddle/api/paddle/optimizer/InverseTimeLR_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7738cf5c504c5533ae9a69ebf262b9c18195c27b --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/InverseTimeLR_cn.rst @@ -0,0 +1,94 @@ +.. _cn_api_paddle_optimizer_InverseTimeLR: + +InverseTimeLR +------------------------------- + + +.. py:class:: paddle.optimizer.lr_scheduler.InverseTimeLR(learning_rate, gamma, last_epoch=-1, verbose=False) + + +该接口提供反时限学习率衰减的功能。 + +反时限学习率衰减计算方式如下。 + +当staircase为False时,计算公式为: + +.. math:: + + new\_learning\_rate = \\frac{learning\_rate}{1 + gamma * epoch} + + +参数 +::::::::: + - **learning_rate** (float) - 初始学习率,数据类型为Python float。 + - **gamma** (float):衰减率,new_lr = origin_lr * gamma。 + - **last_epoch** (int,可选): 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 + - **verbose** (bool,可选):如果是 `True` ,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 + +返回 +::::::::: +返回计算InverseTimeLR的可调用对象。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + # train on default dygraph mode + paddle.disable_static() + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.lr_scheduler.InverseTimeLR(learning_rate=0.5, gamma=0.1, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameter_list=linear.parameters()) + for epoch in range(20): + for batch_id in range(2): + x = paddle.to_tensor(x) + out = linear(x) + loss = paddle.reduce_mean(out) + loss.backward() + sgd.minimize(loss) + linear.clear_gradients() + scheduler.step() + + # train on static mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr_scheduler.InverseTimeLR(learning_rate=0.5, gamma=0.1, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(2): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() + +.. py:method:: step(epoch=None) + +step函数需要在优化器的 `step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 + +参数: + - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + +返回: + 无。 + +**代码示例** : + + 参照上述示例代码。 diff --git a/doc/paddle/api/paddle/optimizer/LambOptimizer_cn.rst b/doc/paddle/api/paddle/optimizer/LambOptimizer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0be07027ecd6260fc691b11ee1cb38fbf72ba143 --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/LambOptimizer_cn.rst @@ -0,0 +1,226 @@ +.. _cn_api_fluid_optimizer_LambOptimizer: + +LambOptimizer +------------------------------- + +.. py:class:: paddle.fluid.optimizer.LambOptimizer(learning_rate=0.001, lamb_weight_decay=0.01, beta1=0.9, beta2=0.999, epsilon=1e-06, parameter_list=None, regularization=None, grad_clip=None, exclude_from_weight_decay_fn=None, name=None) + + + + +LAMB(Layer-wise Adaptive Moments optimizer for Batching training)优化器 +LAMB的优化器旨在不降低精度的前提下增大训练的批量大小,其支持自适应的逐元素更新和精确的分层校正。 更多信息请参考 `Large Batch Optimization for +Deep Learning: Training BERT in 76 minutes `_ 。 +参数更新如下: + +.. math:: + + \begin{align} + \begin{aligned} + m_t &= \beta_1 m_{t - 1}+ (1 - \beta_1)g_t \\ + v_t &= \beta_2 v_{t - 1} + (1 - \beta_2)g_t^2 \\ + r_t &= \frac{m_t}{\sqrt{v_t}+\epsilon} \\ + w_t &= w_{t-1} -\eta_t \frac{\left \| w_{t-1}\right \|}{\left \| r_t + \lambda w_{t-1}\right \|} (r_t + \lambda w_{t-1}) + \end{aligned} + \end{align} + +其中 :math:`m` 为第一个动量,:math:`v` 为第二个动量,:math:`\eta` 为学习率,:math:`\lambda` 为 LAMB 权重衰减率。 + +参数: + - **learning_rate** (float|Variable) – 用于更新参数的学习率。可以是浮点数,或数据类型为浮点数的 Variable。 + - **lamb_weight_decay** (float) – LAMB权重衰减率。 + - **beta1** (float) – 第一个动量估计的指数衰减率。 + - **beta2** (float) – 第二个动量估计的指数衰减率。 + - **epsilon** (float) – 一个小的浮点值,目的是维持数值稳定性。 + - **parameter_list** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **regularization** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 + - **exclude_from_weight_decay_fn** (function) – 当某个参数作为输入该函数返回值为 ``True`` 时,为该参数跳过权重衰减。 + - **name** (str,可选) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + data = fluid.layers.data(name='x', shape=[5], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + cost = fluid.layers.mean(hidden) + + def exclude_fn(param): + return param.name.endswith('.b_0') + + optimizer = fluid.optimizer.Lamb(learning_rate=0.002, + exclude_from_weight_decay_fn=exclude_fn) + optimizer.minimize(cost) + + +.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None) + +为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameter_list中的Parameters,最小化网络损失值loss。 + +参数: + - **loss** (Variable) – 需要最小化的损失值变量。 + - **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的的集合,默认值为None + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + +返回类型: tuple + +**代码示例**: + +.. code-block:: python + + import numpy + import paddle.fluid as fluid + + x = fluid.layers.data(name='X', shape=[13], dtype='float32') + y = fluid.layers.data(name='Y', shape=[1], dtype='float32') + y_predict = fluid.layers.fc(input=x, size=1, act=None) + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + loss = fluid.layers.mean(cost) + adam = fluid.optimizer.LambOptimizer(learning_rate=0.2) + adam.minimize(loss) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + + x = numpy.random.random(size=(10, 13)).astype('float32') + y = numpy.random.random(size=(10, 1)).astype('float32') + exe.run(fluid.default_startup_program()) + outs = exe.run(program=fluid.default_main_program(), + feed={'X': x, 'Y': y}, + fetch_list=[loss.name]) + + + +.. py:method:: clear_gradients() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +清除需要优化的参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def exclude_fn(param): + return param.name.endswith('.b_0') + + with fluid.dygraph.guard(): + value = np.arange(26).reshape(2, 13).astype("float32") + a = fluid.dygraph.to_variable(value) + linear = fluid.Linear(13, 5, dtype="float32") + optimizer = fluid.optimizer.LambOptimizer(learning_rate=0.02, + exclude_from_weight_decay_fn=exclude_fn, + parameter_list=linear.parameters()) + out = linear(a) + out.backward() + optimizer.minimize(out) + optimizer.clear_gradients() + +.. py:method:: set_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float|Variable) - 需要设置的学习率的值。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.dygraph.nn.Linear(10, 10) + adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters()) + # 通过Python float数值手动设置学习率 + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + + + # 通过 框架的Variable 设置学习率 + lr_var = fluid.layers.create_global_var(shape=[1], value=0.7, dtype='float32') + adam.set_lr(lr_var) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.7 + + + +.. py:method:: current_step_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。 + +返回:当前步骤的学习率。 + +返回类型:float + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # example1: LearningRateDecay is not used, return value is all the same + with fluid.dygraph.guard(): + emb = fluid.dygraph.Embedding([10, 10]) + adam = fluid.optimizer.Adam(0.001, parameter_list = emb.parameters()) + lr = adam.current_step_lr() + print(lr) # 0.001 + + # example2: PiecewiseDecay is used, return the step learning rate + with fluid.dygraph.guard(): + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = fluid.dygraph.nn.Linear(10, 10) + inp = fluid.dygraph.to_variable(inp) + out = linear(inp) + loss = fluid.layers.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + adam = fluid.optimizer.Adam(fluid.dygraph.PiecewiseDecay(bd, value, 0), + parameter_list=linear.parameters()) + + # first step: learning rate is 0.2 + np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.minimize(loss) + lr = adam.current_step_lr() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True + diff --git a/doc/paddle/api/paddle/optimizer/LambdaLR_cn.rst b/doc/paddle/api/paddle/optimizer/LambdaLR_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5ae95b411a617ea8b6b41e5042a96ad88ea872be --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/LambdaLR_cn.rst @@ -0,0 +1,95 @@ +.. _cn_api_paddle_optimizer_LambdaLR: + +LambdaLR +----------------------------------- + +.. py:class:: paddle.optimizer.lr_scheduler.LambdaLR(learning_rate, lr_lambda, last_epoch=-1, verbose=False) + +该接口提供 ``lambda`` 函数设置学习率的功能。 ``lr_lambda`` 为一个 ``lambda`` 函数,其通过 ``epoch`` 计算出一个因子,该因子会乘以初始学习率。。 + +衰减过程可以参考以下代码: + +.. code-block:: python + + learning_rate = 0.5 # init learning_rate + lr_lambda = lambda epoch: 0.95 ** epoch + learning_rate = 0.5 # epoch 0 + learning_rate = 0.475 # epoch 1 + learning_rate = 0.45125 # epoch 2 + + +参数 +::::::::: + - **learning_rate** (float) - 初始学习率,数据类型为Python float。 + - **lr_lambda** (function):lr_lambda 为一个lambda函数,其通过 epoch 计算出一个因子,该因子会乘以初始学习率。 + - **last_epoch** (int,可选): 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率 。 + - **verbose** (bool,可选):如果是 `True` ,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 + +返回 +::::::::: + 返回计算LambdaLR的可调用对象。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + # train on default dygraph mode + paddle.disable_static() + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.lr_scheduler.LambdaLR(learning_rate=0.5, lr_lambda=lambda x:0.95**x, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameter_list=linear.parameters()) + for epoch in range(20): + for batch_id in range(2): + x = paddle.to_tensor(x) + out = linear(x) + loss = paddle.reduce_mean(out) + loss.backward() + sgd.minimize(loss) + linear.clear_gradients() + scheduler.step() + + # train on static mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr_scheduler.LambdaLR(learning_rate=0.5, lr_lambda=lambda x:0.95**x, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(2): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() + +.. py:method:: step(epoch=None) + +step函数需要在优化器的 `step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 + +参数: + - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + +返回: + 无。 + +**代码示例** : + + 参照上述示例代码。 + diff --git a/doc/paddle/api/paddle/optimizer/LarsMomentumOptimizer_cn.rst b/doc/paddle/api/paddle/optimizer/LarsMomentumOptimizer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..63f22d05a09267924e4e5081fec6d2e4b757910d --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/LarsMomentumOptimizer_cn.rst @@ -0,0 +1,196 @@ +.. _cn_api_fluid_optimizer_LarsMomentumOptimizer: + +LarsMomentumOptimizer +------------------------------- + +.. py:class:: paddle.fluid.optimizer.LarsMomentumOptimizer(learning_rate, momentum, lars_coeff=0.001, lars_weight_decay=0.0005, parameter_list=None, regularization=None, grad_clip=None, name=None) + + + + +该接口实现LARS支持的Momentum优化器 + +公式作如下更新: + +.. math:: + + & local\_learning\_rate = learning\_rate * lars\_coeff * \ + \frac{||param||}{||gradient|| + lars\_weight\_decay * ||param||}\\ + & velocity = mu * velocity + local\_learning\_rate * (gradient + lars\_weight\_decay * param)\\ + & param = param - velocity + +参数: + - **learning_rate** (float|Variable) - 学习率,用于参数更新。作为数据参数,可以是浮点型值或含有一个浮点型值的变量。 + - **momentum** (float) - 动量因子。 + - **parameter_list** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **lars_coeff** (float,可选) - 定义LARS本地学习率的权重,默认值0.001。 + - **lars_weight_decay** (float,可选) - 使用LARS进行衰减的权重衰减系数,默认值0.0005。 + - **regularization** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 + - **name** (str, 可选) - 可选的名称前缀,一般无需设置,默认值为None。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) + inp = fluid.layers.data( + name="inp", shape=[2, 2], append_batch_size=False) + out = fluid.layers.fc(inp, size=3) + out = fluid.layers.reduce_sum(out) + optimizer = fluid.optimizer.LarsMomentumOptimizer(learning_rate=0.001, momentum=0.9) + optimizer.minimize(out) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + exe.run( + feed={"inp": np_inp}, + fetch_list=[out.name]) + + + +.. py:method:: minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None) + + +通过更新parameter_list来添加操作,进而使损失最小化。 + +该算子相当于backward()和apply_gradients()功能的合体。 + +参数: + - **loss** (Variable) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的的集合,默认值为None + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + +返回类型: tuple + + +.. py:method:: clear_gradients() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +清除需要优化的参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + with fluid.dygraph.guard(): + value = np.arange(26).reshape(2, 13).astype("float32") + a = fluid.dygraph.to_variable(value) + linear = fluid.Linear(13, 5, dtype="float32") + optimizer = fluid.optimizer.LarsMomentumOptimizer(learning_rate=0.001, momentum=0.9, + parameter_list=linear.parameters()) + out = linear(a) + out.backward() + optimizer.minimize(out) + optimizer.clear_gradients() + +.. py:method:: set_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用LearningRateDecay时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float|Variable) - 需要设置的学习率的值。 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.dygraph.nn.Linear(10, 10) + adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters()) + # 通过Python float数值手动设置学习率 + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + + + # 通过 框架的Variable 设置学习率 + lr_var = fluid.layers.create_global_var(shape=[1], value=0.7, dtype='float32') + adam.set_lr(lr_var) + print("current lr is {}".format(adam.current_step_lr())) + # 打印结果: + # current lr is 0.7 + + + +.. py:method:: current_step_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。当不使用LearningRateDecay时,每次调用的返回值都相同,否则返回当前步骤的学习率。 + +返回:当前步骤的学习率。 + +返回类型:float + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # example1: LearningRateDecay is not used, return value is all the same + with fluid.dygraph.guard(): + emb = fluid.dygraph.Embedding([10, 10]) + adam = fluid.optimizer.Adam(0.001, parameter_list = emb.parameters()) + lr = adam.current_step_lr() + print(lr) # 0.001 + + # example2: PiecewiseDecay is used, return the step learning rate + with fluid.dygraph.guard(): + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = fluid.dygraph.nn.Linear(10, 10) + inp = fluid.dygraph.to_variable(inp) + out = linear(inp) + loss = fluid.layers.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + adam = fluid.optimizer.Adam(fluid.dygraph.PiecewiseDecay(bd, value, 0), + parameter_list=linear.parameters()) + + # first step: learning rate is 0.2 + np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.minimize(loss) + lr = adam.current_step_lr() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True + diff --git a/doc/paddle/api/paddle/optimizer/LarsMomentum_cn.rst b/doc/paddle/api/paddle/optimizer/LarsMomentum_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d5d4db1b2d8fc6eb306b40ab6d24a58418aa69e2 --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/LarsMomentum_cn.rst @@ -0,0 +1,16 @@ +.. _cn_api_fluid_optimizer_LarsMomentum: + +LarsMomentum +------------------------------- + +.. py:attribute:: paddle.fluid.optimizer.LarsMomentum + + + + +``fluid.optimizer.LarsMomentumOptimizer`` 的别名 + + + + + diff --git a/doc/paddle/api/paddle/optimizer/LinearLrWarmup_cn.rst b/doc/paddle/api/paddle/optimizer/LinearLrWarmup_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a998567058693d62903618246658883b2f9310d9 --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/LinearLrWarmup_cn.rst @@ -0,0 +1,105 @@ +.. _cn_api_paddle_optimizer_LinearLrWarmup: + +LinearLrWarmup +----------------------------------- + +.. py:class:: paddle.optimizer.lr_scheduler.LinearLrWarmup(learing_rate, warmup_steps, start_lr, end_lr, last_epoch=-1, verbose=False) + +该接口提供一种学习率优化策略-线性学习率热身(warm up)对学习率进行初步调整。在正常调整学习率之前,先逐步增大学习率。 + +当训练步数小于热身步数(warmup_steps)时,学习率lr按如下方式更新: + +.. code-block:: text + + linear_step = end_lr - start_lr + lr = start_lr + linear_step * (epoch / warmup_steps) + +当训练步数大于等于热身步数(warmup_steps)时,学习率lr为: + +.. code-block:: text + + lr = learning_rate + +其中learning_rate为热身之后的学习率。 + +参数 +::::::::: + - **learning rate** (float|_LRScheduler):热启训练之后的学习率,可以是Python的float或_LRScheduler子类。 + - **warmup_steps** (int):进行warm up过程的步数。 + - **start_lr** (float):warm up的起始学习率。 + - **end_lr** (float):warm up的最终学习率。 + - **last_epoch** (int,可选): 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率 。 + - **verbose** (bool,可选):如果是 `True` ,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 + + +返回 +::::::::: +返回计算LinearLrWarmup的可调用对象。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + # train on default dygraph mode + paddle.disable_static() + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.LinearLrWarmup( + learning_rate=0.5, warmup_steps=20, start_lr=0, end_lr=0.5, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameter_list=linear.parameters()) + for epoch in range(20): + for batch_id in range(2): + x = paddle.to_tensor(x) + out = linear(x) + loss = paddle.reduce_mean(out) + loss.backward() + sgd.minimize(loss) + linear.clear_gradients() + scheduler.step() + + # train on static mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr_scheduler.LinearLrWarmup( + learning_rate=0.5, warmup_steps=20, start_lr=0, end_lr=0.5, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(2): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() + +.. py:method:: step(epoch=None) + +step函数需要在优化器的 `step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 + +参数: + - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + +返回: + 无。 + +**代码示例** : + + 参照上述示例代码。 + + diff --git a/doc/paddle/api/paddle/optimizer/LookaheadOptimizer_cn.rst b/doc/paddle/api/paddle/optimizer/LookaheadOptimizer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b28b358621da9060a313360105bcbe306dbfa464 --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/LookaheadOptimizer_cn.rst @@ -0,0 +1,58 @@ +.. _cn_api_fluid_optimizer_LookaheadOptimizer: + +LookaheadOptimizer +------------------------------- + + +.. py:class:: paddle.fluid.optimizer.LookaheadOptimizer(inner_optimizer, alpha=0.5, k=5) + + + + +本类实现了Lookahead优化算法:https://arxiv.org/abs/1907.08610。Lookahead优化算法在内存中保存两部分参数:快参数和慢参数。每个训练步次,inner_optimizer都更新快参数;每隔k个训练步次,Lookahead更新慢参数,如下: + +.. math:: + + & slow\_param_t = slow\_param_{t-1} + \alpha * (fast\_param_{t-1} - slow\_param_{t-1}) + + & fast\_param_t = slow\_param_t + +参数: + - **inner_optimizer** (Optimizer) - 基础优化器,如SGD + - **alpha** (float) - Lookahead 的学习率 + - **k** (int) - 慢参数更新的频率:k次一更新 + +**代码示例** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy.random as random + + x = fluid.layers.data(name='x', shape=[2], dtype='float32') + label = fluid.layers.data(name="label", shape=[1], dtype="int64") + y = fluid.layers.fc(input=[x], size=2, act="softmax") + loss = fluid.layers.cross_entropy(input=y, label=label) + loss = fluid.layers.mean(x=loss) + sgd = fluid.optimizer.SGD(learning_rate=0.01) + optimizer = fluid.optimizer.LookaheadOptimizer(sgd, + alpha=0.5, + k=5) + optimizer.minimize(loss) + main_program = fluid.default_main_program() + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + def train_reader(limit=5): + for i in range(limit): + yield random.random([2]).astype('float32'), random.random([1]).astype('int64') + + feeder = fluid.DataFeeder(feed_list=[x, label], place=place) + reader = paddle.batch(paddle.reader.shuffle(train_reader, buf_size=50000),batch_size=1) + + for batch_data in reader(): + exe.run(fluid.default_main_program(), + feed=feeder.feed(batch_data)) + diff --git a/doc/paddle/api/paddle/optimizer/ModelAverage_cn.rst b/doc/paddle/api/paddle/optimizer/ModelAverage_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bb9ece7b2644c82dd28f45f90e68d119033cd2a1 --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/ModelAverage_cn.rst @@ -0,0 +1,172 @@ +.. _cn_api_fluid_optimizer_ModelAverage: + +ModelAverage +------------------------------- + + +.. py:class:: paddle.fluid.optimizer.ModelAverage(average_window_rate, min_average_window=10000, max_average_window=10000, regularization=None, name=None) + + + + +ModelAverage优化器,在训练过程中累积特定连续的历史Parameters,累积的历史范围可以用传入的average_window参数来控制,在预测时使用平均后的Parameters,通常可以提高预测的精度。 + +在滑动窗口中累积Parameters的平均值,将结果将保存在临时变量中,通过调用 ``apply()`` 方法可应用于当前模型的Parameters,使用 ``restore()`` 方法恢复当前模型Parameters的值。 + +计算平均值的窗口大小由 ``average_window_rate`` , ``min_average_window`` , ``max_average_window`` 以及当前Parameters更新次数(num_updates)共同决定。 + +累积次数(num_accumulates)大于特定窗口阈值(average_window)时,将累积的Parameters临时变量置为0.0,这几个参数的作用通过以下示例代码说明: + +.. code-block:: python + + if num_accumulates >= min_average_window and num_accumulates >= min(max_average_window, num_updates * average_window_rate): + num_accumulates = 0 + +上述条件判断语句中,num_accumulates表示当前累积的次数,可以抽象理解为累积窗口的长度,窗口长度至少要达到min_average_window参数设定的长度,并且不能超过max_average_window参数或者num_updates * average_window_rate规定的长度,其中num_updates表示当前Parameters更新的次数,average_window_rate是一个计算窗口长度的系数。 + +参数: + - **average_window_rate** (float) – 相对于Parameters更新次数的窗口长度计算比率 + - **min_average_window** (int, 可选) – 平均值计算窗口长度的最小值,默认值为10000 + - **max_average_window** (int, 可选) – 平均值计算窗口长度的最大值,推荐设置为一轮训练中mini-batchs的数目,默认值为10000 + - **regularization** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **name** (str, 可选)– 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + + # 首先创建执行引擎 + place = fluid.CPUPlace() # fluid.CUDAPlace(0) + exe = fluid.Executor(place) + + train_program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(train_program, startup_program): + # 构建net + data = fluid.layers.data(name='X', shape=[1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + optimizer = fluid.optimizer.Momentum(learning_rate=0.2, momentum=0.1) + optimizer.minimize(loss) + + # 构建ModelAverage优化器 + model_average = fluid.optimizer.ModelAverage(0.15, + min_average_window=10000, + max_average_window=12500) + exe.run(startup_program) + for i in range(12500): + x = numpy.random.random(size=(10, 1)).astype('float32') + outs = exe.run(program=train_program, + feed={'X': x}, + fetch_list=[loss.name]) + # 应用ModelAverage + with model_average.apply(exe): + x = numpy.random.random(size=(10, 1)).astype('float32') + exe.run(program=train_program, + feed={'X': x}, + fetch_list=[loss.name]) + + +.. py:method:: apply(executor, need_restore=True) + +将累积Parameters的平均值应用于当前网络的Parameters。 + +参数: + - **executor** (fluid.Executor) – 当前网络的执行器 + - **need_restore** (bool) – 恢复标志变量,设为True时,执行完成后会将网络的Parameters恢复为网络默认的值,设为False将不会恢复,默认值True + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + + # 首先创建执行引擎 + place = fluid.CPUPlace() # fluid.CUDAPlace(0) + exe = fluid.Executor(place) + + train_program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(train_program, startup_program): + # 构建net + data = fluid.layers.data(name='X', shape=[1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + optimizer = fluid.optimizer.Momentum(learning_rate=0.2, momentum=0.1) + optimizer.minimize(loss) + + # 构建ModelAverage优化器 + model_average = fluid.optimizer.ModelAverage(0.15, + min_average_window=10000, + max_average_window=12500) + exe.run(startup_program) + for i in range(12500): + x = numpy.random.random(size=(10, 1)).astype('float32') + outs = exe.run(program=train_program, + feed={'X': x}, + fetch_list=[loss.name]) + + # 应用ModelAverage + with model_average.apply(exe): + x = numpy.random.random(size=(10, 1)).astype('float32') + exe.run(program=train_program, + feed={'X': x}, + fetch_list=[loss.name]) + +.. py:method:: restore(executor) + +恢复当前网络的Parameters值 + +参数: + - **executor** (fluid.Executor) – 当前网络的执行器 + +返回:无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + + # 首先创建执行引擎 + place = fluid.CPUPlace() # fluid.CUDAPlace(0) + exe = fluid.Executor(place) + + train_program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(train_program, startup_program): + # 构建net + data = fluid.layers.data(name='X', shape=[1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + optimizer = fluid.optimizer.Momentum(learning_rate=0.2, momentum=0.1) + optimizer.minimize(loss) + + # 构建ModelAverage优化器 + model_average = fluid.optimizer.ModelAverage(0.15, + min_average_window=10000, + max_average_window=12500) + exe.run(startup_program) + for i in range(12500): + x = numpy.random.random(size=(10, 1)).astype('float32') + outs = exe.run(program=train_program, + feed={'X': x}, + fetch_list=[loss.name]) + + # 应用ModelAverage + with model_average.apply(exe, False): + x = numpy.random.random(size=(10, 1)).astype('float32') + exe.run(program=train_program, + feed={'X': x}, + fetch_list=[loss.name]) + # 恢复网络Parameters + model_average.restore(exe) diff --git a/doc/paddle/api/paddle/optimizer/Momentum_cn.rst b/doc/paddle/api/paddle/optimizer/Momentum_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..80e940e19f2fbc1a92e60f20fe32a3200c8ba94b --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/Momentum_cn.rst @@ -0,0 +1,14 @@ +.. _cn_api_fluid_optimizer_Momentum: + +Momentum +------------------------------- + +.. py:attribute:: paddle.fluid.optimizer.Momentum + + + + +``MomentumOptimizer`` 的别名 + + + diff --git a/doc/paddle/api/paddle/optimizer/MultiStepLR_cn.rst b/doc/paddle/api/paddle/optimizer/MultiStepLR_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e424b3153122a05f63202c5559d0ecd0f3825096 --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/MultiStepLR_cn.rst @@ -0,0 +1,100 @@ +.. _cn_api_paddle_optimizer_MultiStepLR: + +MultiStepLR +----------------------------------- + +.. py:class:: paddle.optimizer.lr_scheduler.MultiStepLR(learning_rate, milestones, gamma=0.1, last_epoch=-1, verbose=False) + +该接口提供一种学习率按指定轮数衰减的功能。 + +衰减过程可以参考以下代码: + +.. code-block:: text + + learning_rate = 0.5 + milestones = [30, 50] + gamma = 0.1 + if epoch < 30: + learning_rate = 0.5 + elif epoch < 50: + learning_rate = 0.05 # 0.5 * 0.1 + else: + learning_rate = 0.005 # 0.05 * 0.1 + +参数 +::::::::: + - **learning_rate** (float) - 初始学习率,数据类型为Python float。 + - **milestones** :(list):轮数下标列表。必须递增。 + - **gamma** (float,可选):衰减率,new_lr = origin_lr * gamma, 衰减率必须小于等于1.0,默认值为0.1。 + - **last_epoch** (int,可选): 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率 。 + - **verbose** (bool,可选):如果是 `True` ,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 + + +返回 +::::::::: +返回计算MultiStepLR的可调用对象。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + # train on default dygraph mode + paddle.disable_static() + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.lr_scheduler.MultiStepLR(learning_rate=0.5, milestones=[2, 4, 6], gamma=0.8, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameter_list=linear.parameters()) + for epoch in range(20): + for batch_id in range(2): + x = paddle.to_tensor(x) + out = linear(x) + loss = paddle.reduce_mean(out) + loss.backward() + sgd.minimize(loss) + linear.clear_gradients() + scheduler.step() + + # train on static mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr_scheduler.MultiStepLR(learning_rate=0.5, milestones=[2, 4, 6], gamma=0.8, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(2): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() + +.. py:method:: step(epoch=None) + +step函数需要在优化器的 `step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 + +参数: + - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + +返回: + 无。 + +**代码示例** : + + 参照上述示例代码。 + diff --git a/doc/paddle/api/paddle/optimizer/NaturalExpLR_cn.rst b/doc/paddle/api/paddle/optimizer/NaturalExpLR_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c89cd03a7aee7356d7315518c69c59d4c87793aa --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/NaturalExpLR_cn.rst @@ -0,0 +1,91 @@ +.. _cn_api_paddle_optimizer_NaturalExpLR: + +NaturalExpLR +------------------------------- + + +.. py:class:: paddle.optimizer.lr_scheduler.NaturalExpLR(learning_rate, gama, last_epoch=-1, verbose=False) + +该接口提供按自然指数衰减学习率的功能。 + +自然指数衰减的计算方式如下。 + +.. math:: + + decayed\_learning\_rate = learning\_rate * e^{y} + +参数 +::::::::: + - **learning_rate** (float) - 初始学习率,数据类型为Python float。 + - **gamma** (float):衰减率。 + - **last_epoch** (int,可选): 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 + - **verbose** (bool,可选):如果是 `True` ,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 + +返回 +::::::::: +返回计算NaturalExpLR的可调用对象。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + # train on default dygraph mode + paddle.disable_static() + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.lr_scheduler.NaturalExpLR(learning_rate=0.5, gamma=0.1, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameter_list=linear.parameters()) + for epoch in range(20): + for batch_id in range(2): + x = paddle.to_tensor(x) + out = linear(x) + loss = paddle.reduce_mean(out) + loss.backward() + sgd.minimize(loss) + linear.clear_gradients() + scheduler.step() + + # train on static mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr_scheduler.NaturalExpLR(learning_rate=0.5, gamma=0.1, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(2): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() + +.. py:method:: step(epoch=None) + +step函数需要在优化器的 `step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 + +参数: + - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + +返回: + 无。 + +**代码示例** : + + 参照上述示例代码。 + diff --git a/doc/paddle/api/paddle/optimizer/NoamLR_cn.rst b/doc/paddle/api/paddle/optimizer/NoamLR_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..eadb423134cadd459d1719b658756c1bb2cfcfb3 --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/NoamLR_cn.rst @@ -0,0 +1,98 @@ +.. _cn_api_paddle_optimizer_NoamLR: + +NoamLR +------------------------------- + + +.. py:class:: paddle.optimizer.lr_scheduler.NoamLR(d_model, warmup_steps, learning_rate=1.0, last_epoch=-1, verbose=False) + + +该接口提供Noam衰减学习率的功能。 + +Noam衰减的计算方式如下。 + +.. math:: + + decayed\_learning\_rate = learning\_rate * d_{model}^{-0.5} * min(epoch^{-0.5}, epoch * warmup\_steps^{-1.5}) + +关于Noam衰减的更多细节请参考 `attention is all you need `_ + +参数 +::::::::: + - **d$_{model}$** (int) - 模型的输入、输出向量特征维度,为超参数。数据类型为Python int。 + - **warmup_steps** (int) - 预热步数,为超参数。数据类型为Python int。 + - **learning_rate** (float) - 初始学习率,数据类型为Python float。默认值为1.0。 + - **last_epoch** (int,可选): 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 + - **verbose** (bool,可选):如果是 `True` ,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 + +返回 +::::::::: +返回计算NoamLR的可调用对象。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + # train on default dygraph mode + paddle.disable_static() + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.lr_scheduler.NoamLR(d_model=0.01, warmup_steps=100, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameter_list=linear.parameters()) + for epoch in range(20): + for batch_id in range(2): + x = paddle.to_tensor(x) + out = linear(x) + loss = paddle.reduce_mean(out) + loss.backward() + sgd.minimize(loss) + linear.clear_gradients() + scheduler.step() + + # train on static mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr_scheduler.NoamLR(d_model=0.01, warmup_steps=100, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(2): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() + + + +.. py:method:: step(epoch=None) + +step函数需要在优化器的 `step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 + +参数: + - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + +返回: + 无。 + +**代码示例** : + + 参照上述示例代码。 + + diff --git a/doc/paddle/api/paddle/optimizer/Optimizer_cn.rst b/doc/paddle/api/paddle/optimizer/Optimizer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5aa31be296502db20d199843a4613537185caa1d --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/Optimizer_cn.rst @@ -0,0 +1,220 @@ +.. _cn_api_paddle_optimizer_Optimizer: + +Optimizer +------------------------------- + +.. py:class:: paddle.optimizer.Optimizer(learning_rate=0.001, epsilon=1e-08, parameters=None, weight_decay=None, grad_clip=None, name=None) + + + +优化器的基类。 + +参数: + - **learning_rate** (float|_LRSeduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001 + - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 + - **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None + + +**代码示例** + +.. code-block:: python + + #以子类Adam为例 + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + adam = paddle.optimizer.Adam(learning_rate=0.1, + parameters=linear.parameters()) + out.backward() + adam.step() + adam.clear_grad() + +.. py:method:: step() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +执行一次优化器并进行参数更新。 + +返回:None。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5) + # This can be any optimizer supported by dygraph. + adam = paddle.optimizer.Adam(learning_rate = 0.01, + parameters = linear.parameters()) + out = linear(a) + out.backward() + adam.step() + adam.clear_grad() + +.. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None) + +为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 + +参数: + - **loss** (Tensor) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + + beta1 = paddle.to_tensor([0.9], dtype="float32") + beta2 = paddle.to_tensor([0.99], dtype="float32") + + adam = paddle.optimizer.Adam(learning_rate=0.1, + parameters=linear.parameters(), + weight_decay=0.01) + out.backward() + adam.minimize(loss) + adam.clear_grad() + +.. py:method:: clear_grad() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +清除需要优化的参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5) + optimizer = paddle.optimizer.Adam(learning_rate=0.02, + parameters=linear.parameters()) + out = linear(a) + out.backward() + optimizer.step() + optimizer.clear_grad() + +.. py:method:: set_lr(value) + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float) - 需要设置的学习率的值。 + +返回:None + +**代码示例** + +.. code-block:: python + + import paddle + paddle.disable_static() + linear = paddle.nn.Linear(10, 10) + + adam = paddle.optimizer.Adam(0.1, parameters=linear.parameters()) + + # set learning rate manually by python float value + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + lr = adam.get_lr() + print("current lr is {}".format(lr)) + # Print: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + +.. py:method:: get_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 + +返回:float,当前步骤的学习率。 + + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + # example1: _LRScheduler is not used, return value is all the same + paddle.disable_static() + emb = paddle.nn.Embedding(10, 10, sparse=False) + adam = paddle.optimizer.Adam(0.001, parameters = emb.parameters()) + lr = adam.get_lr() + print(lr) # 0.001 + + # example2: PiecewiseLR is used, return the step learning rate + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + scheduler = paddle.optimizer.PiecewiseLR(bd, value, 0) + adam = paddle.optimizer.Adam(scheduler, + parameters=linear.parameters()) + + # first step: learning rate is 0.2 + np.allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.step() + lr = adam.get_lr() + scheduler.step() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True + diff --git a/doc/paddle/api/paddle/optimizer/PiecewiseLR_cn.rst b/doc/paddle/api/paddle/optimizer/PiecewiseLR_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5f22bd57cf2646c2246b9238e28ffb548ab819db --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/PiecewiseLR_cn.rst @@ -0,0 +1,95 @@ +.. _cn_api_paddle_optimizer_PiecewiseLR: + +PiecewiseLR +------------------------------- + + +.. py:class:: paddle.optimizer.lr_scheduler.PiecewiseLR(boundaries, values, last_epoch=-1, verbose=False) + + +该接口提供对初始学习率进行分段(piecewise)常数衰减的功能。 + +分段常数衰减的过程举例描述如下。 + +.. code-block:: text + + 例如,设定的boundaries列表为[100, 200],候选学习率常量列表values为[1.0, 0.5, 0.1],则: + 1、在当前训练步数epoch小于100步,学习率值为1.0。 + 2、在当前训练步数epoch大于或等于100步,并且小于200步时,学习率值为0.5。 + 3、在当前训练步数epoch大于或等于200步时,学习率值为0.1。 + + +参数 +::::::::: + - **boundaries** (list):指定衰减的步数边界。列表的数据元素为Python int类型。 + - **values** (list) :备选学习率列表。数据元素类型为Python float的列表。与边界值列表有对应的关系。 + - **last_epoch** (int,可选):上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率 。 + - **verbose** (bool,可选):如果是 `True` ,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 + +返回 +::::::::: +返回计算PiecewiseLR的可调用对象。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + # train on default dygraph mode + paddle.disable_static() + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.lr_scheduler.PiecewiseLR(boundaries=[3, 6, 9], values=[0.1, 0.2, 0.3, 0.4], verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameter_list=linear.parameters()) + for epoch in range(20): + for batch_id in range(2): + x = paddle.to_tensor(x) + out = linear(x) + loss = paddle.reduce_mean(out) + loss.backward() + sgd.minimize(loss) + linear.clear_gradients() + scheduler.step() + + # train on static mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr_scheduler.PiecewiseLR(boundaries=[3, 6, 9], values=[0.1, 0.2, 0.3, 0.4], verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(2): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() + +.. py:method:: step(epoch=None) + +step函数需要在优化器的 `step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 + +参数: + - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + +返回: + 无。 + +**代码示例** : + + 参照上述示例代码。 diff --git a/doc/paddle/api/paddle/optimizer/RMSProp_cn.rst b/doc/paddle/api/paddle/optimizer/RMSProp_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2439a95494df7f5199e877cf9f66841dba5fc8a0 --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/RMSProp_cn.rst @@ -0,0 +1,251 @@ +.. _cn_api_paddle_optimizer_RMSProp: + +RMSProp +------------------------------- + +.. py:class:: paddle.optimizer.RMSProp(learning_rate, rho=0.95, epsilon=1e-06, momentum=0.0, centered=False, parameters=None, weight_decay=None, grad_clip=None, name=None) + + + + +该接口实现均方根传播(RMSProp)法,是一种未发表的,自适应学习率的方法。原演示幻灯片中提出了RMSProp:[http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf]中的第29张。等式如下所示: + +.. math:: + r(w, t) & = \rho r(w, t-1) + (1 - \rho)(\nabla Q_{i}(w))^2\\ + w & = w - \frac{\eta} {\sqrt{r(w,t) + \epsilon}} \nabla Q_{i}(w) + +第一个等式计算每个权重平方梯度的移动平均值,然后将梯度除以 :math:`sqrtv(w,t)` 。 + +.. math:: + r(w, t) & = \rho r(w, t-1) + (1 - \rho)(\nabla Q_{i}(w))^2\\ + v(w, t) & = \beta v(w, t-1) +\frac{\eta} {\sqrt{r(w,t) +\epsilon}} \nabla Q_{i}(w)\\ + w & = w - v(w, t) + +如果居中为真: + +.. math:: + r(w, t) & = \rho r(w, t-1) + (1 - \rho)(\nabla Q_{i}(w))^2\\ + g(w, t) & = \rho g(w, t-1) + (1 -\rho)\nabla Q_{i}(w)\\ + v(w, t) & = \beta v(w, t-1) + \frac{\eta} {\sqrt{r(w,t) - (g(w, t))^2 +\epsilon}} \nabla Q_{i}(w)\\ + w & = w - v(w, t) + +其中, :math:`ρ` 是超参数,典型值为0.9,0.95等。 :math:`beta` 是动量术语。 :math:`epsilon` 是一个平滑项,用于避免除零,通常设置在1e-4到1e-8的范围内。 + +参数: + - **learning_rate** (float) - 全局学习率。 + - **rho** (float,可选) - rho是等式中的 :math:`rho` ,默认值0.95。 + - **epsilon** (float,可选) - 等式中的epsilon是平滑项,避免被零除,默认值1e-6。 + - **momentum** (float,可选) - 方程中的β是动量项,默认值0.0。 + - **centered** (bool,可选) - 如果为True,则通过梯度的估计方差,对梯度进行归一化;如果False,则由未centered的第二个moment归一化。将此设置为True有助于模型训练,但会消耗额外计算和内存资源。默认为False。 + - **parameters** (list, 可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 + :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + - **grad_clip** (GradientClipBase, 可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + 默认值为None,此时将不进行梯度裁剪。 + - **name** (str, 可选) - 可选的名称前缀,一般无需设置,默认值为None。 + +抛出异常: + - ``ValueError`` -如果 ``learning_rate`` , ``rho`` , ``epsilon`` , ``momentum`` 为None。 + +**示例代码** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + + beta1 = paddle.to_tensor([0.9], dtype="float32") + beta2 = paddle.to_tensor([0.99], dtype="float32") + + adam = paddle.optimizer.RMSProp(learning_rate=0.1, + parameters=linear.parameters(), + weight_decay=0.01) + out.backward() + adam.step() + adam.clear_grad() + +.. py:method:: step() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +执行一次优化器并进行参数更新。 + +返回:None。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5) + adam = paddle.optimizer.RMSProp(learning_rate = 0.01, + parameters = linear.parameters()) + out = linear(a) + out.backward() + adam.step() + adam.clear_grad() + +.. py:method:: minimize(loss, startup_program=None, parameters=None, no_grad_set=None) + +为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 + +参数: + - **loss** (Tensor) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameters** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None + +返回: tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + + +**示例代码** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.mean(out) + + beta1 = paddle.to_tensor([0.9], dtype="float32") + beta2 = paddle.to_tensor([0.99], dtype="float32") + + adam = paddle.optimizer.RMSProp(learning_rate=0.1, + parameters=linear.parameters(), + weight_decay=0.01) + out.backward() + adam.minimize(loss) + adam.clear_grad() + +.. py:method:: clear_gradients() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + + +清除需要优化的参数的梯度。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + value = np.arange(26).reshape(2, 13).astype("float32") + a = paddle.to_tensor(value) + linear = paddle.nn.Linear(13, 5) + optimizer = paddle.optimizer.RMSProp(learning_rate=0.02, + parameters=linear.parameters()) + out = linear(a) + out.backward() + optimizer.step() + optimizer.clear_gradients() + +.. py:method:: set_lr(value) + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 + +参数: + value (float) - 需要设置的学习率的值。 + +返回:None + +**代码示例** + +.. code-block:: python + + + import paddle + paddle.disable_static() + linear = paddle.nn.Linear(10, 10) + + adam = paddle.optimizer.RMSProp(0.1, parameters=linear.parameters()) + + # set learning rate manually by python float value + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + lr = adam.get_lr() + print("current lr is {}".format(lr)) + # Print: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + +.. py:method:: get_lr() + +**注意:** + + **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + +获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 + +返回:float,当前步骤的学习率。 + + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + # example1: _LRScheduler is not used, return value is all the same + paddle.disable_static() + emb = paddle.nn.Embedding(10, 10, sparse=False) + adam = paddle.optimizer.RMSProp(0.001, parameters = emb.parameters()) + lr = adam.get_lr() + print(lr) # 0.001 + + # example2: PiecewiseDecay is used, return the step learning rate + paddle.disable_static() + inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + inp = paddle.to_tensor(inp) + out = linear(inp) + loss = paddle.reduce_mean(out) + + bd = [2, 4, 6, 8] + value = [0.2, 0.4, 0.6, 0.8, 1.0] + scheduler = paddle.optimizer.PiecewiseLR(bd, value, 0) + adam = paddle.optimizer.RMSProp(scheduler, + parameters=linear.parameters()) + + # first step: learning rate is 0.2 + np.allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) # True + + # learning rate for different steps + ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] + for i in range(12): + adam.step() + lr = adam.get_lr() + scheduler.step() + np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True diff --git a/doc/paddle/api/paddle/optimizer/RecomputeOptimizer_cn.rst b/doc/paddle/api/paddle/optimizer/RecomputeOptimizer_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..28161f7682953d987bd4995821d5d9cadecf1ae0 --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/RecomputeOptimizer_cn.rst @@ -0,0 +1,195 @@ +.. _cn_api_fluid_optimizer_RecomputeOptimizer: + +RecomputeOptimizer +------------------------------- + + +.. py:class:: paddle.fluid.optimizer.RecomputeOptimizer(optimizer) + + + + +通常来讲,一个深度学习的训练流程包含了三个子步骤:首先,运行前向算子来计算Variable和loss的值;其次,运行反向算子来计算参数的梯度;最后,应用优化算法以更新参数值。 + +在前向运算过程中,反向运算会用到的Variable都会保存在内存中,当模型深度很深时,这会占用大量的内存。 + +重计算将深度学习网络切分为k个部分(segments)。在每个segment,运行反向运算时会首先运算前向计算。在重计算模式下,前向计算除了checkpoint和一些必须存储在内存中的特殊Variable,其他临时Variable都会被释放,这对节省内存非常有益。 + +把一个深度学习网络切分为k个segments的Variables被称为checkpoints。用户在使用运行RecomputeOptimizer之前需要先设置checkpoints。 + +参数: + - **optimizer** (Optimizer)-内部优化器 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + def gen_data(): + return {"x": np.random.random(size=(32, 32)).astype('float32'), + "y": np.random.randint(2, size=(32, 1)).astype('int64')} + def mlp(input_x, input_y, hid_dim=128, label_dim=2): + print(input_x) + fc_1 = fluid.layers.fc(input=input_x, size=hid_dim) + prediction = fluid.layers.fc(input=[fc_1], size=label_dim, act='softmax') + cost = fluid.layers.cross_entropy(input=prediction, label=input_y) + sum_cost = fluid.layers.reduce_mean(cost) + return sum_cost, fc_1, prediction + input_x = fluid.layers.data(name="x", shape=[32], dtype='float32') + input_y = fluid.layers.data(name="y", shape=[1], dtype='int64') + cost, fc_1, pred = mlp(input_x, input_y) + + sgd = fluid.optimizer.Adam(learning_rate=0.01) + sgd = fluid.optimizer.RecomputeOptimizer(sgd) + sgd._set_checkpoints([fc_1, pred]) + sgd.minimize(cost) + + print("Finished optimize") + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + step = 10 + + for i in range(step): + cost_val = exe.run(feed=gen_data(), + program=fluid.default_main_program(), + fetch_list=[cost.name]) + print("step=%d cost=%f" % (i, cost_val[0])) + + +.. py:method:: apply_gradients(params_grads) + +调用self.apply_gradients + +参数: + - **params_grads** (list)- 用于优化的(param, grad)对组成的列表 + +返回: 附加在当前Program的优化算子组成的列表 + +返回类型: list + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.framework as framework + + def mlp(input_x, input_y, hid_dim=128, label_dim=2): + fc_1 = fluid.layers.fc(input=input_x, size=hid_dim) + prediction = fluid.layers.fc(input=[fc_1], size=label_dim, act='softmax') + cost = fluid.layers.cross_entropy(input=prediction, label=input_y) + sum_cost = fluid.layers.reduce_mean(cost) + return sum_cost, fc_1, prediction + + + input_x = fluid.layers.data(name="x", shape=[32], dtype='float32') + input_y = fluid.layers.data(name="y", shape=[1], dtype='int64') + cost, fc_1, pred = mlp(input_x, input_y) + print("Finished FF") + + sgd = fluid.optimizer.Adam(learning_rate=0.01) + sgd = fluid.optimizer.RecomputeOptimizer(sgd) + params_grads = sgd.backward( + cost, + startup_program=None, + parameter_list=None, + no_grad_set=None) + + program = cost.block.program + with framework.program_guard(program, None): + optimize_ops = sgd.apply_gradients(params_grads) + + print("Finished apply gradients") + +.. py:method:: apply_optimize(loss, startup_program, params_grads) + +调用self._optimizer的apply_optimize函数 + +参数: + - **loss** (Variable) – 用于优化过程的损失值变量 + - **startup_program** (Program) – 用于初始化在parameter_list中参数的startup_program + - **params_grads** (list)- 用于优化的(param, grad)对组成的列表 + +返回: 附加在当前Program的算子组成的列表 + +返回类型: list + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + def mlp(input_x, input_y, hid_dim=128, label_dim=2): + fc_1 = fluid.layers.fc(input=input_x, size=hid_dim) + prediction = fluid.layers.fc(input=[fc_1], size=label_dim, act='softmax') + cost = fluid.layers.cross_entropy(input=prediction, label=input_y) + sum_cost = fluid.layers.reduce_mean(cost) + return sum_cost, fc_1, prediction + + input_x = fluid.layers.data(name="x", shape=[32], dtype='float32') + input_y = fluid.layers.data(name="y", shape=[1], dtype='int64') + cost, fc_1, pred = mlp(input_x, input_y) + print("Finished FF") + + sgd = fluid.optimizer.Adam(learning_rate=0.01) + sgd = fluid.optimizer.RecomputeOptimizer(sgd) + params_grads = sgd.backward( + cost, + startup_program=None, + parameter_list=None, + no_grad_set=None) + + optimize_ops = sgd.apply_optimize( + cost, startup_program=None, params_grads=params_grads) + + print("Finished apply_optimize") + +.. py:method:: backward(loss, startup_program=None, parameter_list=None, no_grad_set=None, callbacks=None) + +带checkpoint的backward函数 + +参数: + - **loss** (Variable) – 需要最小化的损失值变量 + - **startup_program** (Program, 可选) – 用于初始化parameter_list中参数的 :ref:`cn_api_fluid_Program` , 默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` + - **parameter_list** (list, 可选) – 待更新的Parameter或者Parameter.name组成的列表, 默认值为None,此时将更新所有的Parameter + - **no_grad_set** (set, 可选) – 不需要更新的Parameter或者Parameter.name组成的的集合,默认值为None + - **callbacks** (list, 可选) – 当为某参数附加反向算子时所要运行的callables组成的列表 + - **checkpoints** (list, 可选) – 一批作为checkpoints的Variables + +返回: 由(param, grad)对构成的列表,其中param是参数,grad是其对应的梯度 + +返回类型: list + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + def mlp(input_x, input_y, hid_dim=128, label_dim=2): + fc_1 = fluid.layers.fc(input=input_x, size=hid_dim) + prediction = fluid.layers.fc(input=[fc_1], size=label_dim, act='softmax') + cost = fluid.layers.cross_entropy(input=prediction, label=input_y) + sum_cost = fluid.layers.reduce_mean(cost) + return sum_cost, fc_1, prediction + + + input_x = fluid.layers.data(name="x", shape=[32], dtype='float32') + input_y = fluid.layers.data(name="y", shape=[1], dtype='int64') + cost, fc_1, pred = mlp(input_x, input_y) + print("Finished FF") + + sgd = fluid.optimizer.Adam(learning_rate=0.01) + sgd = fluid.optimizer.RecomputeOptimizer(sgd) + params_grads = sgd.backward( + cost, + startup_program=None, + parameter_list=None, + no_grad_set=None) + print("Finished backward") + + + diff --git a/doc/paddle/api/paddle/optimizer/ReduceLROnPlateau_cn.rst b/doc/paddle/api/paddle/optimizer/ReduceLROnPlateau_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b0a0b75f7b31244421f02cab719a342461a9f7c1 --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/ReduceLROnPlateau_cn.rst @@ -0,0 +1,89 @@ +.. _cn_api_fluid_dygraph_ReduceLROnPlateau: + +ReduceLROnPlateau +------------------------------- + +**注意:该API仅支持【动态图】模式** + +.. py:class:: paddle.fluid.dygraph.ReduceLROnPlateau(learning_rate, mode='min', decay_rate=0.1, patience=10, verbose=False, threshold=1e-4, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-8, dtype='float32') + +该API为 ``loss`` 自适应的学习率衰减策略。默认情况下,当 ``loss`` 停止下降时,降低学习率(如果将 ``mode`` 设置为 `'max'` ,此时判断逻辑相反, ``loss`` 停止上升时降低学习率)。其思想是:一旦模型表现不再提升,将学习率降低2-10倍对模型的训练往往有益。 + +``loss`` 是传入到该类方法 ``step`` 中的参数,其必须是shape为[1]的1-D Tensor。 如果 ``loss`` 停止下降(``mode`` 为 `min` 时)超过 ``patience`` 个epoch,学习率将会减小为 +`learning_rate * decay_rate` 。 + +此外,每降低一次学习率后,将会进入一个时长为 ``cooldown`` 个epoch的冷静期,在冷静期内,将不会监控 ``loss`` 的变化情况,也不会衰减。 +在冷静期之后,会继续监控 ``loss`` 的上升或下降。 + +参数: + - **learning_rate** (Variable|float|int) - 初始学习率。其类型可以是Python的float类型,如果输入int类型则会被转为float类型。其也可以是shape为[1]的 + 1-D Tensor,且相应数据类型必须为 "float32" 或 "float64" 。 + - **mode** (str,可选) - `'min'` 和 `'max'` 之一。通常情况下,为 `'min'` ,此时当 ``loss`` 停止下降时学习率将减小。默认:`'min'` 。 + (注意:仅在特殊用法时,可以将其设置为 `'max'` ,此时判断逻辑相反, ``loss`` 停止上升学习率才减小) + - **decay_rate** (float,可选) - 学习率衰减的比例。`new_lr = origin_lr * decay_rate` ,它是值小于1.0的float型数字,默认: 0.1。 + - **patience** (int,可选) - 当 ``loss`` 连续 ``patience`` 个epoch没有下降(mode: 'min')或上升(mode: 'max')时,学习率才会减小。默认:10。 + - **verbose** (bool,可选) - 如果为 ``True`` , 会在每次更新optimizer中的learning_rate时,打印信息。默认:``False`` 。 + - **threshold** (float,可选) - ``threshold`` 和 ``threshold_mode`` 两个参数将会决定 ``loss`` 最小变化的阈值。小于该阈值的变化 + 将会被忽视。默认:1e-4。 + - **threshold_mode** (str,可选) - `'rel'` 和 `'abs'` 之一。在 `'rel'` 模式下, ``loss`` 最小变化的阈值是 `last_loss * threshold` , + 其中 ``last_loss`` 是 ``loss`` 在上个epoch的值。在 `'abs'` 模式下,``loss`` 最小变化的阈值是 `threshold` 。 默认:`'rel'`。 + - **cooldown** (int,可选) - 在学习速率每次减小之后,会进入时长为 ``cooldown`` 个epoch的冷静期。默认:0。 + - **min_lr** (float,可选) - 最小的学习率。减小后的学习率最低下界限。默认:0。 + - **eps** (float,可选) - 如果新旧学习率间的差异小于 ``eps`` ,则不会更新。默认值:1e-8。 + - **dtype** (str,可选) – 学习率值的数据类型,可以为"float32", "float64"。默认:"float32"。 + +返回: ``loss`` 自适应的学习率 + +返回类型:Variable + +**代码示例**: + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + with fluid.dygraph.guard(): + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = fluid.dygraph.Linear(10, 10) + input = fluid.dygraph.to_variable(x) + + adam = fluid.optimizer.Adam( + learning_rate = fluid.dygraph.ReduceLROnPlateau( + learning_rate = 1.0, + decay_rate = 0.5, + patience = 5, + verbose = True, + cooldown = 3), + parameter_list = linear.parameters()) + + for epoch in range(10): + total_loss = 0 + for bath_id in range(5): + out = linear(input) + loss = fluid.layers.reduce_mean(out) + total_loss += loss + adam.minimize(loss) + + avg_loss = total_loss/5 + + # 根据传入total_loss,调整学习率 + reduce_lr.step(avg_loss) + lr = adam.current_step_lr() + print("current avg_loss is %s, current lr is %s" % (avg_loss.numpy()[0], lr)) + + + +.. py:method:: step(loss) +需要在每个epoch调用该方法,其根据传入的 ``loss`` 调整optimizer中的学习率,调整后的学习率将会在下一次调用 ``optimizer.minimize`` 时生效。 + +参数: + - **loss** (Variable) - 类型:Variable,shape为[1]的1-D Tensor。将被用来判断是否需要降低学习率。如果 ``loss`` 连续 ``patience`` 个epochs没有下降, + 将会降低学习率。 + +返回: + 无 + +**代码示例**: + + 参照其类中的说明。 diff --git a/doc/paddle/api/paddle/optimizer/SGD_cn.rst b/doc/paddle/api/paddle/optimizer/SGD_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8810f79d267ab312ae682332240e047ad10771e6 --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/SGD_cn.rst @@ -0,0 +1,17 @@ +.. _cn_api_fluid_optimizer_SGD: + +SGD +------------------------------- + +.. py:attribute:: paddle.fluid.optimizer.SGD + + + + +``SGDOptimizer`` 的别名 + + + + + + diff --git a/doc/paddle/api/paddle/optimizer/StepLR_cn.rst b/doc/paddle/api/paddle/optimizer/StepLR_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cf4ffa74295a0ca21d5ebf9b905c4c40263edd19 --- /dev/null +++ b/doc/paddle/api/paddle/optimizer/StepLR_cn.rst @@ -0,0 +1,100 @@ +.. _cn_api_paddle_optimizer_StepLR: + +StepLR +----------------------------------- + +.. py:class:: paddle.optimizer.lr_scheduler.StepLR(learning_rate, step_size, gamma=0.1, last_epoch=-1, verbose=False) + +该接口提供一种学习率按指定 `间隔` 轮数衰减的功能。 + +衰减过程可以参考以下代码: + +.. code-block:: text + + learning_rate = 0.5 + step_size = 30 + gamma = 0.1 + if epoch < 30: + learning_rate = 0.5 + elif epoch < 60: + learning_rate = 0.05 # 0.5 * 0.1 + else: + learning_rate = 0.005 # 0.05 * 0.1 + +参数 +::::::::: + - **learning_rate** (float) - 初始学习率,数据类型为Python float。 + - **step_size** :(int):学习率衰减轮数间隔。 + - **gamma** (float, 可选):衰减率,new_lr = origin_lr * gamma, 衰减率必须小于等于1.0,默认值为0.1。 + - **last_epoch** (int,可选): 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率 。 + - **verbose** (bool,可选):如果是 `True` ,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 + + +返回 +::::::::: +返回计算StepLR的可调用对象。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + # train on default dygraph mode + paddle.disable_static() + x = np.random.uniform(-1, 1, [10, 10]).astype("float32") + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.lr_scheduler.StepLR(learning_rate=0.5, step_size=5, gamma=0.8, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameter_list=linear.parameters()) + for epoch in range(20): + for batch_id in range(2): + x = paddle.to_tensor(x) + out = linear(x) + loss = paddle.reduce_mean(out) + loss.backward() + sgd.minimize(loss) + linear.clear_gradients() + scheduler.step() + + # train on static mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr_scheduler.StepLR(learning_rate=0.5, step_size=5, gamma=0.8, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(2): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() + +.. py:method:: step(epoch=None) + +step函数需要在优化器的 `step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 + +参数: + - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + +返回: + 无。 + +**代码示例** : + + 参照上述示例代码。 + diff --git a/doc/paddle/api/paddle/reader/ComposeNotAligned_cn.rst b/doc/paddle/api/paddle/reader/ComposeNotAligned_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1f4a911c5cb8b7dbad64807f81a4114e411ebeab --- /dev/null +++ b/doc/paddle/api/paddle/reader/ComposeNotAligned_cn.rst @@ -0,0 +1,14 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_io_ComposeNotAligned: + +ComposeNotAligned +----------------- + +.. autoclass:: paddle.fluid.io.ComposeNotAligned + :members: + :inherited-members: + :noindex: + +表示一种错误状态,说明调用compose API时, reader的输出数据没有对齐。 diff --git a/doc/paddle/api/paddle/reader/buffered_cn.rst b/doc/paddle/api/paddle/reader/buffered_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..027c6346f735874d8c9accc38024c09b5940c429 --- /dev/null +++ b/doc/paddle/api/paddle/reader/buffered_cn.rst @@ -0,0 +1,17 @@ +.. _cn_api_fluid_io_buffered: + +buffered +------------------------------- + +.. py:function:: paddle.fluid.io.buffered(reader, size) + + + + +创建一个缓存数据读取器,它读取数据并且存储进缓存区,从缓存区读取数据将会加速,只要缓存不是空的。 + +参数: + - **reader** (callable) – 读取数据的reader + - **size** (int) – 最大buffer的大小 + +返回:缓存的reader(读取器) \ No newline at end of file diff --git a/doc/paddle/api/paddle/reader/cache_cn.rst b/doc/paddle/api/paddle/reader/cache_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e93e4c85d134c0feb2ff813a6127c89816baed76 --- /dev/null +++ b/doc/paddle/api/paddle/reader/cache_cn.rst @@ -0,0 +1,18 @@ +.. _cn_api_fluid_io_cache: + +cache +------------------------------- + +.. py:function:: paddle.fluid.io.cache(reader) + + + + +缓存reader数据到内存中,小心此方法可能会花长时间来处理数据,并且会占用大量内存。 ``reader()`` 只能被调用一次。 + +参数: + - **reader** (callable) – 读取数据的reader,每次都会yields数据。 + +返回:每次都会从内存中yields数据的一个装饰reader。 + +返回类型:生成器 \ No newline at end of file diff --git a/doc/paddle/api/paddle/reader/chain_cn.rst b/doc/paddle/api/paddle/reader/chain_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4a4872d268cc2dfbddad0a6d4720be54e5eb41c9 --- /dev/null +++ b/doc/paddle/api/paddle/reader/chain_cn.rst @@ -0,0 +1,47 @@ +.. _cn_api_fluid_io_chain: + +chain +------------------------------- + +.. py:function:: paddle.fluid.io.chain(*readers) + + + + +该接口将多个数据读取器组成一个数据读取器,它依次返回多个数据读取器的输出数据,同时不改变输出数据原先的格式。 + +举例来说,如果有3个输入读取器且输出分别为[0,0,0]、[10,10,10]和[20,20,20],那么调用该接口产生的新数据读取器的输出为[0,0,0], [10,10,10], [20,20,20]。 + +参数: + - **readers(list)** – 输入的数据读取器。 + +返回: 新的数据读取器。 + +返回类型:callable + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + def reader_creator_3(start): + def reader(): + for i in range(start, start + 3): + yield [i, i, i] + return reader + + c = fluid.io.chain(reader_creator_3(0), reader_creator_3(10), reader_creator_3(20)) + for e in c(): + print(e) + # 输出结果如下: + # [0, 0, 0] + # [1, 1, 1] + # [2, 2, 2] + # [10, 10, 10] + # [11, 11, 11] + # [12, 12, 12] + # [20, 20, 20] + # [21, 21, 21] + # [22, 22, 22] + diff --git a/doc/paddle/api/paddle/reader/compose_cn.rst b/doc/paddle/api/paddle/reader/compose_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b4393bab5db375413712c27b60fb0699e4d85370 --- /dev/null +++ b/doc/paddle/api/paddle/reader/compose_cn.rst @@ -0,0 +1,34 @@ +.. _cn_api_fluid_io_compose: + +compose +------------------------------- + +.. py:function:: paddle.fluid.io.compose(*readers, **kwargs) + + + + +该接口将多个数据读取器组合为一个数据读取器,返回读取器的输出包含所有输入读取器的输出。 + +例如:如果输入为三个reader,三个reader的输出分别为:(1,2)、3、(4,5),则组合reader的输出为:(1,2,3,4,5)。 + +参数: + - **readers** - 将被组合的多个数据读取器(Reader),数据读取器的定义参见 :ref:`cn_api_paddle_data_reader_reader` 。 + - **check_alignment** (bool) - 可选,指明是否对输入reader进行对齐检查,默认值为True。如果为True,将检查输入reader是否正确对齐。如果为False,将不检查对齐并自动丢弃无法对齐的末尾数据。 + +返回:数据读取器(Reader)。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + def reader_creator_10(dur): + def reader(): + for i in range(10): + yield i + return reader + + reader = fluid.io.compose(reader_creator_10(0), reader_creator_10(0)) + +注意: 运行过程可能会抛出异常 ``ComposeNotAligned`` ,原因是输入的readers数据未对齐。 当check_alignment设置为False时,不会检查并触发该异常。 diff --git a/doc/paddle/api/paddle/reader/firstn_cn.rst b/doc/paddle/api/paddle/reader/firstn_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ba9c1d427ab3ec1946dab0e78f1a2021a712fe94 --- /dev/null +++ b/doc/paddle/api/paddle/reader/firstn_cn.rst @@ -0,0 +1,30 @@ +.. _cn_api_fluid_io_firstn: + +firstn +------------------------------- + +.. py:function:: paddle.fluid.io.firstn(reader, n) + + + + +该接口创建一个数据读取器,它可以返回的最大样本数为n。 + +参数: + - **reader** (callable) – 输入的数据读取器。 + - **n** (int) – 可以返回的最大样本数。 + +返回: 新的的数据读取器。 + +返回类型: callable + +.. code-block:: python + + import paddle.fluid as fluid + def reader(): + for i in range(100): + yield i + firstn_reader = fluid.io.firstn(reader, 5) + for e in firstn_reader(): + print(e) + # 输出结果为:0 1 2 3 4 diff --git a/doc/paddle/api/paddle/reader/map_readers_cn.rst b/doc/paddle/api/paddle/reader/map_readers_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cb50e62839cd523184fef37d4ff9dc625c52fc6b --- /dev/null +++ b/doc/paddle/api/paddle/reader/map_readers_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_fluid_io_map_readers: + +map_readers +------------------------------- + +.. py:function:: paddle.fluid.io.map_readers(func, *readers) + + + + +该接口将创建一个数据读取器(Reader),其中 `func` 函数的输出将直接作为新数据读取器的输出, `readers` 的输出将作为函数 `func` 的输入参数。 + +例如:如果输入的 `readers` 为两个输出分别为:2、3 的 `reader` ,输入的 `func` 为乘法函数 `mul(x, y)` ,则得到的新建 `reader` 的输出为:6。 + +参数: + - **func** - 读取数据并返回数据项的函数, `func` 的输出将直接作为新创建的数据读取器的输出。 + + - **readers** - 输入的一个或多个数据读取器(Reader),这些数据读取器的输出数据将作为函数 `func` 的输入参数。数据读取器的定义参见 :ref:`cn_api_paddle_data_reader_reader` 。 + +返回: 新创建的数据读取器(Reader) + +**代码示例**: + +.. code-block:: python + + + import paddle.fluid as fluid + d = {"h": 0, "i": 1} + def func(x): + return d[x] + + def reader(): + yield "h" + yield "i" + + map_reader_result = fluid.io.map_readers(func, reader) + + + + diff --git a/doc/paddle/api/paddle/reader/multiprocess_reader_cn.rst b/doc/paddle/api/paddle/reader/multiprocess_reader_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ab600239eb702ffa2c503f88d5bd9d6ef6bda443 --- /dev/null +++ b/doc/paddle/api/paddle/reader/multiprocess_reader_cn.rst @@ -0,0 +1,59 @@ +.. _cn_api_fluid_io_multiprocess_reader: + +multiprocess_reader +------------------------------- + +.. py:function:: paddle.fluid.io.multiprocess_reader(readers, use_pipe=True, queue_size=1000) + + + + +使用python多进程从 ``readers`` 中读取数据,然后使用 ``multiprocessing.Pipe`` 或 ``multiprocessing.Queue`` 合并所有数据。 ``readers`` 列表中的每个reader会被创建一个独立的进程来调用,reader之间应该相互独立,互不影响,避免出现多进程读取的冲突问题. + +multiprocess.queue需要/dev/shm的rw访问权限,某些平台不支持。 + +参数: + - **readers** (list(generator)|tuple(generator)) - python生成器list, 用来读取数据 + - **use_pipe** (bool,可选) - use_pipe控制multiprocess_reader内部用 ``pipe`` 还是 ``queue`` 来实现进程间通信,默认为 ``True`` 使用 ``pipe`` 进行通信 + - **queue_size** (int,可选) - 如果使用queue来进行进程间通信 (``use_pipe=False``), 则该参数用来设定队列大小 + +返回:使用多进程封装readers之后的reader + +返回类型:python生成器 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.io import multiprocess_reader + import numpy as np + + + def fake_reader(start, end): + def __impl__(): + for i in range(start, end): + yield [np.array([1, 2, 3]) * i], + return __impl__ + + + with fluid.program_guard(fluid.Program(), fluid.Program()): + place = fluid.CPUPlace() + image = fluid.layers.data( + name='image', dtype='int64', shape=[3]) + fluid.layers.Print(image) + reader = fluid.io.PyReader( + feed_list=[image], capacity=2) + image_p_1 = image + 1 + decorated_reader = multiprocess_reader( + [fake_reader(1, 5), fake_reader(6, 10)], False) + + reader.decorate_sample_generator(decorated_reader, batch_size=2, places=[place]) + + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + for data in reader(): + exe.run(feed=data, fetch_list=[image_p_1]) + diff --git a/doc/paddle/api/paddle/reader/xmap_readers_cn.rst b/doc/paddle/api/paddle/reader/xmap_readers_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5f434ecb9525c4f8dd746319d2b4baace88c89ae --- /dev/null +++ b/doc/paddle/api/paddle/reader/xmap_readers_cn.rst @@ -0,0 +1,57 @@ +.. _cn_api_fluid_io_xmap_readers: + +xmap_readers +------------------------------- + +.. py:function:: paddle.fluid.io.xmap_readers(mapper, reader, process_num, buffer_size, order=False) + + + + +多线程下,使用自定义映射器 reader 返回样本到输出队列。 + +参数: + - **mapper** (callable): 映射 reader 数据的函数。 + - **reader** (callable): 产生数据的 reader。 + - **process_num** (int): 处理样本的线程数。 + - **buffer_size** (int): 数据缓冲队列大小。 + - **order** (bool): 是否保持原始 reader 数据顺序,默认为 False。 + +返回:一个用户定义的 reader `装饰器 `_ 。 + +返回类型:callable,可调用对象。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import time + + def reader_creator_10(dur): + def reader(): + for i in range(10): + time.sleep(dur) + yield i + return reader + + def mapper(x): + return (x + 1) + + orders = (True, False) + thread_num = (1, 2, 4, 8, 16) + buffer_size = (1, 2, 4, 8, 16) + for order in orders: + for t_num in thread_num: + for size in buffer_size: + user_reader = fluid.io.xmap_readers(mapper, + reader_creator_10(0), + t_num, size, order) + for n in range(3): + result = list() + for i in user_reader(): + result.append(i) + if not order: + result.sort() + for idx, e in enumerate(result): + assert e == mapper(idx) \ No newline at end of file diff --git a/doc/paddle/api/paddle/static/input/InputSpec_cn.rst b/doc/paddle/api/paddle/static/input/InputSpec_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e431a1a11943ef9b9d98d2f135824f23956a016a --- /dev/null +++ b/doc/paddle/api/paddle/static/input/InputSpec_cn.rst @@ -0,0 +1,126 @@ +.. _cn_api_static_cn_InputSpec: + +InputSpec +------------------------------- + + +.. py:class:: paddle.static.InputSpec(shape=None, dtype='float32', name=None) +用于描述模型输入的签名信息,包括shape、dtype和name。 + +此接口常用于指定高层API中模型的输入张量信息,或动态图转静态图时,指定被 ``paddle.jit.to_static`` 装饰的forward函数每个输入参数的张量信息。 + +参数: + - **shape** (list|tuple)- 声明维度信息的list或tuple,默认值为None。 + - **dtype** (np.dtype|VarType|str,可选)- 数据类型,支持bool,float16,float32,float64,int8,int16,int32,int64,uint8。默认值为float32。 + - **name** (str)- 被创建对象的名字,具体用法请参见 :ref:`api_guide_Name` 。 + +返回:初始化后的 ``InputSpec`` 对象 + +返回类型:InputSpec + +**代码示例** + +.. code-block:: python + + from paddle.static import InputSpec + + input = InputSpec([None, 784], 'float32', 'x') + label = InputSpec([None, 1], 'int64', 'label') + print(input) # InputSpec(shape=(-1, 784), dtype=VarType.FP32, name=x) + print(label) # InputSpec(shape=(-1, 1), dtype=VarType.INT64, name=label) + + +.. py:method:: from_tensor(tensor, name=None) +该接口将根据输入Tensor的shape、dtype等信息构建InputSpec对象。 + +参数: + - **tensor** (Tensor) - 用于构建InputSpec的源Tensor + - **name** (str): 被创建对象的名字,具体用法请参见 :ref:`api_guide_Name` 。 默认为:None。 + + +返回:根据Tensor信息构造的 ``InputSpec`` 对象 + +返回类型:InputSpec + + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + from paddle.static import InputSpec + + paddle.disable_static() + + x = paddle.to_tensor(np.ones([2, 2], np.float32)) + x_spec = InputSpec.from_tensor(x, name='x') + print(x_spec) # InputSpec(shape=(2, 2), dtype=VarType.FP32, name=x) + + +.. py:method:: from_numpy(ndarray, name=None) +该接口将根据输入numpy ndarray的shape、dtype等信息构建InputSpec对象。 + +参数: + - **ndarray** (Tensor) - 用于构建InputSpec的numpy ndarray + - **name** (str): 被创建对象的名字,具体用法请参见 :ref:`api_guide_Name` 。 默认为:None。 + + +返回:根据ndarray信息构造的 ``InputSpec`` 对象 + +返回类型:InputSpec + + +**代码示例** + +.. code-block:: python + + import numpy as np + from paddle.static import InputSpec + + x = np.ones([2, 2], np.float32) + x_spec = InputSpec.from_numpy(x, name='x') + print(x_spec) # InputSpec(shape=(2, 2), dtype=VarType.FP32, name=x) + + +.. py:method:: batch(batch_size) +该接口将batch_size插入到当前InputSpec对象的shape元组最前面。 + +参数: + - **batch_size** (int) - 被插入的batch size整型数值 + +返回: 更新shape信息后的 ``InputSpec`` 对象 + +返回类型:InputSpec + + +**代码示例** + +.. code-block:: python + + from paddle.static import InputSpec + + x_spec = InputSpec(shape=[64], dtype='float32', name='x') + x_spec.batch(4) + print(x_spec) # InputSpec(shape=(4, 64), dtype=VarType.FP32, name=x) + + +.. py:method:: unbatch() +该接口将当前InputSpec对象shape[0]值移除。 + + +返回: 更新shape信息后的 ``InputSpec`` 对象 + +返回类型:InputSpec + + +**代码示例** + +.. code-block:: python + + from paddle.static import InputSpec + + x_spec = InputSpec(shape=[4, 64], dtype='float32', name='x') + x_spec.unbatch() + print(x_spec) # InputSpec(shape=(64,), dtype=VarType.FP32, name=x) + diff --git a/doc/paddle/api/paddle/static/input/data_cn.rst b/doc/paddle/api/paddle/static/input/data_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..860068cbaf6259ccac54f619fdfbf3b48e3d3c87 --- /dev/null +++ b/doc/paddle/api/paddle/static/input/data_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_static_cn_data: + +data +------------------------------- + + +.. py:function:: paddle.static.data(name, shape, dtype=None, lod_level=0) + + + + +该OP会在全局block中创建变量(Variable),该全局变量可被计算图中的算子(operator)访问。该变量可作为占位符用于数据输入。例如用执行器(Executor)feed数据进该变量,当 ``dtype`` 为None时, ``dtype`` 将通过 ``padle.get_default_dtype()`` 获取全局类型。 + + +参数: + - **name** (str)- 被创建的变量的名字,具体用法请参见 :ref:`api_guide_Name` 。 + - **shape** (list|tuple)- 声明维度信息的list或tuple。可以在某个维度上设置None或-1,以指示该维度可以是任何大小。例如,将可变batchsize设置为None或-1。 + - **dtype** (np.dtype|str,可选)- 数据类型,支持bool,float16,float32,float64,int8,int16,int32,int64,uint8。默认值为None。当 ``dtype`` 为None时, ``dtype`` 将通过 ``padle.get_default_dtype()`` 获取全局类型。 + - **lod_level** (int,可选)- LoDTensor变量的LoD level数,LoD level是PaddlePaddle的高级特性,一般任务中不会需要更改此默认值,关于LoD level的详细适用场景和用法请见 :ref:`cn_user_guide_lod_tensor` 。默认值为0。 + +返回:全局变量,可进行数据访问 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + import paddle + # Creates a variable with fixed size [3, 2, 1] + # User can only feed data of the same shape to x + # the dtype is not set, so it will set "float32" by + # paddle.get_default_dtype(). You can use paddle.get_default_dtype() to + # change the global dtype + x = paddle.static.data(name='x', shape=[3, 2, 1]) + # Creates a variable with changeable batch size -1. + # Users can feed data of any batch size into y, + # but size of each data sample has to be [2, 1] + y = paddle.static.data(name='y', shape=[-1, 2, 1], dtype='float32') + z = x + y + # In this example, we will feed x and y with np-ndarray "1" + # and fetch z, like implementing "1 + 1 = 2" in PaddlePaddle + feed_data = np.ones(shape=[3, 2, 1], dtype=np.float32) + exe = fluid.Executor(fluid.CPUPlace()) + out = exe.run(fluid.default_main_program(), + feed={ + 'x': feed_data, + 'y': feed_data + }, + fetch_list=[z.name]) + # np-ndarray of shape=[3, 2, 1], dtype=float32, whose elements are 2 + print(out) diff --git a/doc/paddle/api/paddle/tensor/creation/Tensor_cn.rst b/doc/paddle/api/paddle/tensor/creation/Tensor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f6e15aee81bf14e1f299ebb22319065628f8ace7 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/creation/Tensor_cn.rst @@ -0,0 +1,1426 @@ +.. _cn_api_paddle_Tensor: + +Tensor +------------------------------- + +.. py:class:: paddle.Tensor + +``Tensor`` 是Paddle中最为基础的数据结构,有几种创建Tensor的不同方式: + +- 用预先存在的 ``data`` 数据创建1个Tensor,请参考 :ref:`cn_api_paddle_to_tensor` +- 创建一个指定 ``shape`` 的Tensor,请参考 :ref:`cn_api_tensor_ones` 、 :ref:`cn_api_tensor_zeros`、 :ref:`cn_api_tensor_full` +- 创建一个与其他Tensor具有相同 ``shape`` 与 ``dtype`` 的Tensor,请参考 :ref:`cn_api_tensor_ones_like` 、 :ref:`cn_api_tensor_zeros_like` 、 :ref:`cn_api_tensor_full_like` + +.. py:attribute:: dtype + +查看一个Tensor的数据类型,支持:'bool','float16','float32','float64','uint8','int8','int16','int32','int64' 类型。 + +**代码示例** + + .. code-block:: python + + import paddle + paddle.disable_static() + x = paddle.to_tensor([1.0, 2.0, 3.0]) + print("tensor's grad is: {}".format(x.dtype)) + +.. py:attribute:: grad + +查看一个Tensor的梯度,数据类型为numpy\.ndarray。 + +**代码示例** + + .. code-block:: python + + import paddle + paddle.disable_static() + x = paddle.to_tensor([1.0, 2.0, 3.0], stop_gradient=False) + y = paddle.to_tensor([4.0, 5.0, 6.0], stop_gradient=False) + z = x * y + z.backward() + print("tensor's grad is: {}".format(x.grad)) + +.. py:attribute:: name + +查看一个Tensor的name,Tensor的name是其唯一标识符,为python的字符串类型。 + +**代码示例** + + .. code-block:: python + + import paddle + paddle.disable_static() + print("Tensor name: ", paddle.to_tensor(1).name) + # Tensor name: generated_tensor_0 + +.. py:attribute:: ndim + +查看一个Tensor的维度,也称作rank。 + +**代码示例** + + .. code-block:: python + + import paddle + paddle.disable_static() + print("Tensor's number of dimensition: ", paddle.to_tensor([[1, 2], [3, 4]]).ndim) + # Tensor's number of dimensition: 2 + +.. py:attribute:: persistable + +查看一个Tensor的persistable属性,该属性为True时表示持久性变量,持久性变量在每次迭代之后都不会删除。模型参数、学习率等Tensor,都是 +持久性变量。 + +**代码示例** + + .. code-block:: python + + import paddle + paddle.disable_static() + print("Whether Tensor is persistable: ", paddle.to_tensor(1).persistable) + # Whether Tensor is persistable: false + + +.. py:attribute:: place + +查看一个Tensor的设备位置,Tensor可能的设备位置有三种:CPU/GPU/固定内存,其中固定内存也称为不可分页内存或锁页内存, +其与GPU之间具有更高的读写效率,并且支持异步传输,这对网络整体性能会有进一步提升,但其缺点是分配空间过多时可能会降低主机系统的性能, +因为其减少了用于存储虚拟内存数据的可分页内存。 + +**代码示例** + + .. code-block:: python + + import paddle + paddle.disable_static() + cpu_tensor = paddle.to_tensor(1, place=paddle.CPUPlace()) + print(cpu_tensor.place) + +.. py:attribute:: shape + +查看一个Tensor的shape,shape是Tensor的一个重要的概念,其描述了tensor在每个维度上的元素数量。 + +**代码示例** + + .. code-block:: python + + import paddle + paddle.disable_static() + print("Tensor's shape: ", paddle.to_tensor([[1, 2], [3, 4]]).shape) + # Tensor's shape: [2, 2] + +.. py:attribute:: stop_gradient + +查看一个Tensor是否计算并传播梯度,如果stop_gradient为True,则该Tensor不会计算梯度,并会阻绝Autograd的梯度传播。 +反之,则会计算梯度并传播梯度。用户自行创建的的Tensor,默认是True,模型参数的stop_gradient都为False。 + +**代码示例** + + .. code-block:: python + + import paddle + paddle.disable_static() + print("Tensor's stop_gradient: ", paddle.to_tensor([[1, 2], [3, 4]]).stop_gradient) + # Tensor's stop_gradient: True + +.. py:method:: abs(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_abs` + +.. py:method:: acos(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_acos` + +.. py:method:: add(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_add` + +.. py:method:: addcmul(tensor1, tensor2, value=1.0, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_addcmul` + +.. py:method:: addmm(x, y, beta=1.0, alpha=1.0, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_addmm` + +.. py:method:: allclose(y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_allclose` + +.. py:method:: argmax(axis=None, keepdim=False, dtype=int64, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_argmax` + +.. py:method:: argmin(axis=None, keepdim=False, dtype=int64, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_argmin` + +.. py:method:: argsort(axis=-1, descending=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cn_argsort` + +.. py:method:: asin(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_asin` + +.. py:method:: astype(dtype) + +将Tensor的类型转换为 ``dtype`` ,并返回一个新的Tensor。 + +参数: + - **dtype** (str) - 转换后的dtype,支持'bool','float16','float32','float64','int8','int16', + 'int32','int64','uint8'。 + +返回:类型转换后的新的Tensor + +返回类型:Tensor + +**代码示例** + .. code-block:: python + + import paddle + paddle.disable_static() + x = paddle.to_tensor(1.0) + print("original tensor's dtype is: {}".format(x.dtype)) + print("new tensor's dtype is: {}".format(x.astype('float64').dtype)) + +.. py:method:: atan(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_atan` + +.. py:method:: backward(retain_graph=False) + +从当前Tensor开始计算反向的神经网络,传导并计算计算图中Tensor的梯度。 + +参数: + - **retain_graph** (bool, optional) - 如果为False,反向计算图将被释放。如果在backward()之后继续添加OP, + 需要设置为True,此时之前的反向计算图会保留。将其设置为False会更加节省内存。默认值:False。 + +返回:无 + +**代码示例** + .. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + x = np.ones([2, 2], np.float32) + inputs = [] + for _ in range(10): + tmp = paddle.to_tensor(x) + # if we don't set tmp's stop_gradient as False then, all path to loss will has no gradient since + # there is no one need gradient on it. + tmp.stop_gradient=False + inputs.append(tmp) + ret = paddle.sums(inputs) + loss = paddle.reduce_sum(ret) + loss.backward() + +.. py:method:: bmm(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_paddle_tensor_bmm` + +.. py:method:: broadcast_to(shape, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_paddle_tensor_broadcast_to` + +.. py:method:: cast(dtype) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_cast` + +.. py:method:: ceil(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_ceil` + +.. py:method:: cholesky(upper=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cholesky` + +.. py:method:: chunk(chunks, axis=0, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cn_chunk` + + +.. py:method:: clear_gradient() + +清除当前Tensor的梯度。 + +返回:无 + +**代码示例** + .. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + + x = np.ones([2, 2], np.float32) + inputs2 = [] + for _ in range(10): + tmp = paddle.to_tensor(x) + tmp.stop_gradient=False + inputs2.append(tmp) + ret2 = fluid.layers.sums(inputs2) + loss2 = fluid.layers.reduce_sum(ret2) + loss2.backward() + print(loss2.gradient()) + loss2.clear_gradient() + print("After clear {}".format(loss2.gradient())) + + +.. py:method:: clip(min=None, max=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_clip` + +.. py:method:: concat(axis=0, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_concat` + +.. py:method:: cos(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_cos` + +.. py:method:: cosh(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_cosh` + +.. py:method:: cross(y, axis=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_linalg_cross` + +.. py:method:: cumsum(axis=None, dtype=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cn_cumsum` + +.. py:method:: detach() + +返回一个新的Tensor,从当前计算图分离。 + +返回:与当前计算图分离的Tensor。 + +**代码示例** + .. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + + data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32') + linear = Linear(32, 64) + data = paddle.to_tensor(data) + x = linear(data) + y = x.detach() + +.. py:method:: dim() + +查看一个Tensor的维度,也称作rank。 + +**代码示例** + + .. code-block:: python + + import paddle + paddle.disable_static() + print("Tensor's number of dimensition: ", paddle.to_tensor([[1, 2], [3, 4]]).dim()) + # Tensor's number of dimensition: 2 + +.. py:method:: dist(y, p=2) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_linalg_dist` + +.. py:method:: divide(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_divide` + +.. py:method:: dot(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_paddle_tensor_linalg_dot` + +.. py:method:: elementwise_add(y, axis=-1, act=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_elementwise_add` + +.. py:method:: elementwise_div(y, axis=-1, act=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_elementwise_div` + +.. py:method:: elementwise_floordiv(y, axis=-1, act=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_elementwise_floordiv` + +.. py:method:: elementwise_mod(y, axis=-1, act=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_elementwise_mod` + +.. py:method:: elementwise_pow(y, axis=-1, act=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_elementwise_pow` + +.. py:method:: elementwise_sub(y, axis=-1, act=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_elementwise_sub` + +.. py:method:: elementwise_sum(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_elementwise_sum` + +.. py:method:: equal(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_equal` + +.. py:method:: equal_all(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_equal_all` + +.. py:method:: erf(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_erf` + +.. py:method:: exp(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_exp` + +.. py:method:: expand(shape, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_expand` + +.. py:method:: expand_as(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_expand_as` + +.. py:method:: flatten(start_axis=0, stop_axis=-1, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_flatten` + +.. py:method:: flip(axis, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_flip` + +.. py:method:: floor(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_floor` + +.. py:method:: floor_divide(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_floor_divide` + +.. py:method:: floor_mod(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_remainder` + +.. py:method:: gather(index, axis=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_gather` + +.. py:method:: gather_nd(index, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cn_gather_nd` + +.. py:method:: gradient() + +与 ``Tensor.grad`` 相同,查看一个Tensor的梯度,数据类型为numpy\.ndarray。 + +返回:该Tensor的梯度 +返回类型:numpy\.ndarray + +**代码示例** + .. code-block:: python + + import paddle + paddle.disable_static() + x = paddle.to_tensor([1.0, 2.0, 3.0], stop_gradient=False) + y = paddle.to_tensor([4.0, 5.0, 6.0], stop_gradient=False) + z = x * y + z.backward() + print("tensor's grad is: {}".format(x.grad)) + +.. py:method:: greater_equal(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cn_greater_equal` + +.. py:method:: greater_than(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cn_greater_than` + +.. py:method:: has_inf() + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_has_inf` + +.. py:method:: has_nan() + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_has_nan` + +.. py:method:: histogram(bins=100, min=0, max=0) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_histogram` + +.. py:method:: increment(value=1.0, in_place=True) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_increment` + +.. py:method:: index_sample(index) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_search_index_sample` + +.. py:method:: index_select(index, axis=0, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_search_index_select` + +.. py:method:: inverse(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_inverse` + +.. py:method:: is_empty(cond=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_is_empty` + +.. py:method:: isfinite(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_isfinite` + +.. py:method:: isinf(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_isinf` + +.. py:method:: isnan(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_isnan` + +.. py:method:: kron(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_paddle_tensor_kron` + +.. py:method:: less_equal(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cn_less_equal` + +.. py:method:: less_than(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cn_less_than` + +.. py:method:: log(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_log` + +.. py:method:: log1p(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_paddle_tensor_log1p` + +.. py:method:: logical_and(y, out=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_logical_and` + +.. py:method:: logical_not(out=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_logical_not` + +.. py:method:: logical_or(y, out=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_logical_or` + +.. py:method:: logical_xor(y, out=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_logical_xor` + +.. py:method:: logsigmoid() + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_logsigmoid` + +.. py:method:: logsumexp(axis=None, keepdim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_paddle_tensor_math_logsumexp` + +.. py:method:: masked_select(mask, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_masked_select` + +.. py:method:: matmul(y, transpose_x=False, transpose_y=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_matmul` + +.. py:method:: max(axis=None, keepdim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_paddle_tensor_max` + +.. py:method:: maximum(y, axis=-1, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_paddle_tensor_maximum` + +.. py:method:: mean(axis=None, keepdim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cn_mean` + +.. py:method:: min(axis=None, keepdim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_paddle_tensor_min` + +.. py:method:: minimum(y, axis=-1, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_paddle_tensor_minimum` + +.. py:method:: mm(mat2, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_mm` + +.. py:method:: mod(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_remainder` + +.. py:method:: multiplex(index) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_multiplex` + +.. py:method:: multiply(y, axis=-1, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_multiply` + +.. py:method:: ndimension() + +查看一个Tensor的维度,也称作rank。 + +**代码示例** + + .. code-block:: python + + import paddle + paddle.disable_static() + print("Tensor's number of dimensition: ", paddle.to_tensor([[1, 2], [3, 4]]).ndimension()) + # Tensor's number of dimensition: 2 + +.. py:method:: nonzero(as_tuple=False) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_search_nonzero` + +.. py:method:: norm(p=fro, axis=None, keepdim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_norm` + +.. py:method:: not_equal(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_not_equal` + +.. py:method:: numel(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_numel` + +.. py:method:: numpy() + +将当前Tensor转化为numpy\.ndarray。 + +返回:Tensor转化成的numpy\.ndarray。 +返回类型:numpy\.ndarray + +**代码示例** + .. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + + data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32') + linear = paddle.nn.Linear(32, 64) + data = paddle.to_tensor(data) + x = linear(data) + print(x.numpy()) + +.. py:method:: pow(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_pow` + +.. py:method:: prod(axis=None, keepdim=False, dtype=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cn_prod` + +.. py:method:: rank() + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_rank` + +.. py:method:: reciprocal(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_reciprocal` + +.. py:method:: reduce_all(dim=None, keep_dim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_reduce_all` + +.. py:method:: reduce_any(dim=None, keep_dim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_reduce_any` + +.. py:method:: reduce_max(dim=None, keep_dim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_reduce_max` + +.. py:method:: reduce_mean(dim=None, keep_dim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_reduce_mean` + +.. py:method:: reduce_min(dim=None, keep_dim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_reduce_min` + +.. py:method:: reduce_prod(dim=None, keep_dim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_reduce_prod` + +.. py:method:: reduce_sum(dim=None, keep_dim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_reduce_sum` + +.. py:method:: remainder(y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_remainder` + +.. py:method:: reshape(shape, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_reshape` + +.. py:method:: reverse(axis, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_reverse` + +.. py:method:: roll(shifts, axis=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_manipulation_roll` + +.. py:method:: round(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_round` + +.. py:method:: rsqrt(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_rsqrt` + +.. py:method:: scale(scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_scale` + +.. py:method:: scatter(index, updates, overwrite=True, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_scatter` + +.. py:method:: scatter_nd(updates, shape, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_scatter_nd` + +.. py:method:: scatter_nd_add(index, updates, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_scatter_nd_add` + +.. py:method:: set_value(value) + +设置当前Tensor的值。 + +参数: + - **value** (Tensor|np.ndarray) - 需要被设置的值,类型为Tensor或者numpy\.array。 + +**代码示例** + .. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + + data = np.ones([3, 1024], dtype='float32') + linear = paddle.nn.Linear(1024, 4) + input = paddle.to_tensor(data) + linear(input) # call with default weight + custom_weight = np.random.randn(1024, 4).astype("float32") + linear.weight.set_value(custom_weight) # change existing weight + out = linear(input) # call with different weight + +返回:计算后的Tensor + +.. py:method:: shard_index(index_num, nshards, shard_id, ignore_value=-1) + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_shard_index` + +.. py:method:: sigmoid() + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_sigmoid` + +.. py:method:: sign(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_sign` + +.. py:method:: sin(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_sin` + +.. py:method:: sinh(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_sinh` + +.. py:method:: size() + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_size` + +.. py:method:: slice(axes, starts, ends) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_slice` + +.. py:method:: softplus() + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_softplus` + +.. py:method:: softsign() + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_softsign` + +.. py:method:: sort(axis=-1, descending=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_sort` + +.. py:method:: split(num_or_sections, axis=0, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_split` + +.. py:method:: sqrt(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_sqrt` + +.. py:method:: square(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_square` + +.. py:method:: squeeze(axis=None, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_squeeze` + +.. py:method:: stack(axis=0, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_stack` + +.. py:method:: stanh(scale_a=0.67, scale_b=1.7159, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_stanh` + +.. py:method:: std(axis=None, unbiased=True, keepdim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cn_std` + +.. py:method:: strided_slice(axes, starts, ends, strides) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_strided_slice` + +.. py:method:: sum(axis=None, dtype=None, keepdim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_sum` + +.. py:method:: sums(out=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_sums` + +.. py:method:: t(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_paddle_tensor_t` + +.. py:method:: tanh(name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_tanh` + +.. py:method:: tanh_shrink() + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_tanh_shrink` + +.. py:method:: tile(repeat_times, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_tile` + +.. py:method:: topk(k, axis=None, largest=True, sorted=True, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_topk` + +.. py:method:: trace(offset=0, axis1=0, axis2=1, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_trace` + +.. py:method:: transpose(perm, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_transpose` + +.. py:method:: unbind(axis=0) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_paddle_tensor_unbind` + +.. py:method:: unique(return_index=False, return_inverse=False, return_counts=False, axis=None, dtype=int64, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_unique` + +.. py:method:: unique_with_counts(dtype=int32) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_unique_with_counts` + +.. py:method:: unsqueeze(axis, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_unsqueeze` + +.. py:method:: unstack(axis=0, num=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_fluid_layers_unstack` + +.. py:method:: var(axis=None, unbiased=True, keepdim=False, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_cn_var` + +.. py:method:: where(x, y, name=None) + +返回:计算后的Tensor + +返回类型:Tensor + +请参考 :ref:`cn_api_tensor_where` \ No newline at end of file diff --git a/doc/paddle/api/paddle/tensor/creation/arange_cn.rst b/doc/paddle/api/paddle/tensor/creation/arange_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b423166d47ce6bdcda137600947d0fb11bc57e42 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/creation/arange_cn.rst @@ -0,0 +1,53 @@ +.. _cn_api_paddle_tensor_arange + +arange +------------------------------- + +.. py:function:: paddle.arange(start=0, end=None, step=1, dtype=None, name=None) + + + + +该OP返回以步长 ``step`` 均匀分隔给定数值区间[``start``, ``end``)的1-D Tensor,数据类型为 ``dtype``。 + +当 ``dtype`` 表示浮点类型时,为了避免浮点计算误差,建议给 ``end`` 加上一个极小值epsilon,使边界可以更加明确。 + +参数 +:::::::::: + - **start** (float|int|Tensor) - 区间起点(且区间包括此值)。当 ``start`` 类型是Tensor时,是形状为[1]且数据类型为int32、int64、float32、float64的Tensor。如果仅指定 ``start`` ,而 ``end`` 为None,则区间为[0, ``start``)。默认值为0。 + - **end** (float|int|Tensor, 可选) - 区间终点(且通常区间不包括此值)。当 ``end`` 类型是Tensor时,是形状为[1]且数据类型为int32、int64、float32、float64的Tensor。默认值为None。 + - **step** (float|int|Tensor, 可选) - 均匀分割的步长。当 ``step`` 类型是Tensor时,是形状为[1]且数据类型为int32、int64、float32、float64的Tensor。默认值为1。 + - **dtype** (str|np.dtype|core.VarDesc.VarType, 可选) - 输出Tensor的数据类型,支持int32、int64、float32、float64。当该参数值为None时, 输出Tensor的数据类型为int64。默认值为None. + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回 +:::::::::: + Tensor: 以步长 ``step`` 均匀分割给定数值区间[``start``, ``end``)后得到的1-D Tensor, 数据类型为 ``dtype`` 。 + +抛出异常 +:::::::::: + - ``TypeError`` - 如果 ``dtype`` 不是int32、int64、float32、float64。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + + out1 = paddle.arange(5) + # [0, 1, 2, 3, 4] + + out2 = paddle.arange(3, 9, 2.0) + # [3, 5, 7] + + # use 4.999 instead of 5.0 to avoid floating point rounding errors + out3 = paddle.arange(4.999, dtype='float32') + # [0., 1., 2., 3., 4.] + + start_var = paddle.imperative.to_variable(np.array([3])) + out4 = paddle.arange(start_var, 7) + # [3, 4, 5, 6] diff --git a/doc/paddle/api/paddle/tensor/creation/diag_cn.rst b/doc/paddle/api/paddle/tensor/creation/diag_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f525efb5890531e85eee24367e96b3f62c8fb0ea --- /dev/null +++ b/doc/paddle/api/paddle/tensor/creation/diag_cn.rst @@ -0,0 +1,84 @@ +.. _cn_api_paddle_cn_diag: + +diag +------------------------------- + +.. py:function:: paddle.diag(x, offset=0, padding_value=0, name=None) + + +如果 ``x`` 是向量(1-D张量),则返回带有 ``x`` 元素作为对角线的2-D方阵。 + +如果 ``x`` 是矩阵(2-D张量),则提取 ``x`` 的对角线元素,以1-D张量返回。 + +参数 ``offset`` 控制对角线偏移量: + +- 如果 ``offset`` = 0,则为主对角线。 +- 如果 ``offset`` > 0,则为上对角线。 +- 如果 ``offset`` < 0,则为下对角线。 + +参数 +::::::::: + - x(Tensor):输入的 `Tensor`。它的形状可以是一维或二维。其数据类型应为float32,float64,int32,int64。 + - offset(int,可选):对角线偏移量。正值表示上对角线,0表示主对角线,负值表示下对角线。 + - padding_value(int|float,可选):使用此值来填充指定对角线以外的区域。仅在输入为一维张量时生效。默认值为0。 + - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +::::::::: +``Tensor``,方阵或向量。输出数据类型与输入数据类型相同。 + + +代码示例 1 +::::::::: + +.. code-block:: python + + import paddle + + paddle.disable_static() + x = paddle.to_tensor([1, 2, 3]) + y = paddle.diag(x) + print(y.numpy()) + # [[1 0 0] + # [0 2 0] + # [0 0 3]] + + y = paddle.diag(x, offset=1) + print(y.numpy()) + # [[0 1 0 0] + # [0 0 2 0] + # [0 0 0 3] + # [0 0 0 0]] + + y = paddle.diag(x, padding_value=6) + print(y.numpy()) + # [[1 6 6] + # [6 2 6] + # [6 6 3]] + + +代码示例 2 +::::::::: + +.. code-block:: python + + import paddle + + paddle.disable_static() + x = paddle.to_tensor([[1, 2, 3], [4, 5, 6]]) + y = paddle.diag(x) + print(y.numpy()) + # [1 5] + + y = paddle.diag(x, offset=1) + print(y.numpy()) + # [2 6] + + y = paddle.diag(x, offset=-1) + print(y.numpy()) + # [4] + + + + + diff --git a/doc/paddle/api/paddle/tensor/creation/empty_cn.rst b/doc/paddle/api/paddle/tensor/creation/empty_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..eb843619e00bc400d203193429d4102ff4d572a6 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/creation/empty_cn.rst @@ -0,0 +1,46 @@ +.. _cn_api_tensor_empty: + +empty +------------------------------- + +.. py:function:: paddle.empty(shape, dtype=None, name=None) + + + +该OP创建形状大小为shape并且数据类型为dtype的Tensor,其中元素值是未初始化的。 + +参数: + - **shape** (list|tuple|Tensor) – 指定创建Tensor的形状(shape), 数据类型为int32 或者int64。 + - **dtype** (np.dtype|str, 可选)- 输出变量的数据类型,可以是bool, float16, float32, float64, int32, int64。若为None,则输出变量的数据类型为系统全局默认类型,默认值为None。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为None。 + +返回:返回一个根据``shape``和``dtype``创建并且尚未初始化的Tensor。 + +**代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() # Now we are in imperative mode + paddle.set_device("cpu") # and use cpu device + + # example 1: argument ``shape`` is a list which doesn't contain Tensor. + data1 = paddle.empty(shape=[2,3], dtype='float32') + #[[4.3612203e+27 1.8176809e+31 1.3555911e-19] # uninitialized + # [1.1699684e-19 1.3563156e-19 3.6408321e-11]] # uninitialized + + # example 2: argument ``shape`` is a Tensor, the data type must be int64 or int32. + shape_data = np.array([2, 3]).astype('int32') + shape = paddle.to_tensor(shape_data) + data2 = paddle.empty(shape=shape, dtype='float32') + #[[1.7192326e-37 4.8125365e-38 1.9866003e-36] # uninitialized + # [1.3284029e-40 7.1117408e-37 2.5353012e+30]] # uninitialized + + # example 3: argument ``shape`` is a list which contains Tensor. + dim2_data = np.array([3]).astype('int32') + dim2 = paddle.to_tensor(dim2_data) + data3 = paddle.empty(shape=[2, dim2], dtype='float32') + #[[1.1024214e+24 7.0379409e+22 6.5737699e-34] # uninitialized + # [7.5563101e+31 7.7130405e+31 2.8020654e+20]] # uninitialized diff --git a/doc/paddle/api/paddle/tensor/creation/eye_cn.rst b/doc/paddle/api/paddle/tensor/creation/eye_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c105c4b06b517488c1f0100ffb4cd4ee26ac8f89 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/creation/eye_cn.rst @@ -0,0 +1,39 @@ +.. _cn_api_paddle_tensor_eye: + +eye +------------------------------- + +.. py:function:: paddle.tensor.eye(num_rows, num_columns=None, dtype=None, name=None) + +该OP用来构建二维Tensor(主对角线元素为1,其他元素为0)。 + +参数: + - **num_rows** (int) - 生成2-D Tensor的行数,数据类型为非负int32。 + - **num_columns** (int,可选) - 生成2-D Tensor的列数,数据类型为非负int32。若为None,则默认等于num_rows。 + - **dtype** (np.dtype|str, 可选) - 返回Tensor的数据类型,可为float16,float32,float64, int32, int64。若为None, 则默认等于float32。 + - **name** (str, 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: ``shape`` 为 [num_rows, num_columns]的Tensor。 + + +抛出异常: + - ``TypeError``: - 如果 ``dtype`` 的类型不是float16, float32, float64, int32, int64其中之一。 + - ``TypeError``: - 如果 ``num_columns`` 不是非负整数或者 ``num_rows`` 不是非负整数。 + +**代码示例**: + +.. code-block:: python + + import paddle + paddle.disable_static() # Now we are in imperative mode + data = paddle.eye(3, dtype='int32') + # [[1 0 0] + # [0 1 0] + # [0 0 1]] + data = paddle.eye(2, 3, dtype='int32') + # [[1 0 0] + # [0 1 0]] + + + + diff --git a/doc/paddle/api/paddle/tensor/creation/full_cn.rst b/doc/paddle/api/paddle/tensor/creation/full_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c1d47170014bed30360eae294d3a8ebc8767452c --- /dev/null +++ b/doc/paddle/api/paddle/tensor/creation/full_cn.rst @@ -0,0 +1,47 @@ +.. _cn_api_tensor_full: + +full +------------------------------- + +.. py:function:: paddle.full(shape, fill_value, dtype=None, name=None) + + + +该OP创建形状大小为 ``shape`` 并且数据类型为 ``dtype`` 的Tensor,其中元素值均为 ``fill_value`` 。 + +参数: + - **shape** (list|tuple|Tensor) – 指定创建Tensor的形状(shape), 数据类型为int32 或者int64。 + - **fill_value** (bool|float|int|Tensor) - 用于初始化输出Tensor的常量数据的值。注意:该参数不可超过输出变量数据类型的表示范围。 + - **dtype** (np.dtype|str, 可选)- 输出变量的数据类型。若为None,则输出变量的数据类型和输入变量相同,默认值为None。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:返回一个存储结果的Tensor,数据类型和dtype相同。 + + +**代码示例**: + +.. code-block:: python + + import paddle + + paddle.disable_static() # Now we are in imperative mode + data1 = paddle.full(shape=[2,1], fill_value=0, dtype='int64') + #[[0] + # [0]] + + # attr shape is a list which contains Tensor. + positive_2 = paddle.fill_constant([1], "int32", 2) + data3 = paddle.full(shape=[1, positive_2], dtype='float32', fill_value=1.5) + # [[1.5 1.5]] + + # attr shape is a Tensor. + shape = paddle.fill_constant([2], "int32", 2) + data4 = paddle.full(shape=shape, dtype='bool', fill_value=True) + # [[True True] + # [True True]] + + # attr fill_value is a Tensor. + val = paddle.fill_constant([1], "float32", 2.0) + data5 = paddle.full(shape=[2,1], fill_value=val, dtype='float32') + # [[2.0] + # [2.0]] diff --git a/doc/paddle/api/paddle/tensor/creation/full_like_cn.rst b/doc/paddle/api/paddle/tensor/creation/full_like_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..48b2a38cd67faa56b87942ac7aeea70d360eb25a --- /dev/null +++ b/doc/paddle/api/paddle/tensor/creation/full_like_cn.rst @@ -0,0 +1,32 @@ +.. _cn_api_tensor_full_like: + +full_like +------------------------------- + +.. py:function:: paddle.full_like(x, fill_value, dtype=None, name=None) + + +该OP创建一个和 ``x`` 具有相同的形状并且数据类型为 ``dtype`` 的Tensor,其中元素值均为 ``fill_value`` , 当 ``dtype`` 为None的时候,Tensor数据类型和输入 ``x`` 相同。 + +参数: + - **x** (Tensor) – 输入Tensor, 输出Tensor和x具有相同的形状,x的数据类型可以是bool,float16,float32,float64,int32,int64。 + - **fill_value** (bool|float|int) - 用于初始化输出张量的常量数据的值。注意:该参数不可超过输出变量数据类型的表示范围。 + - **dtype** (np.dtype|str, 可选)- 输出变量的数据类型。若参数为None,则输出变量的数据类型和输入变量相同,默认值为None。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:返回一个存储结果的Tensor,数据类型和dtype相同。 + + + **代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() # Now we are in imperative mode + input = paddle.full(shape=[2, 3], fill_value=0.0, dtype='float32', name='input') + output = paddle.full_like(input, 2.0) + # [[2. 2. 2.] + # [2. 2. 2.]] + diff --git a/doc/paddle/api/paddle/tensor/creation/meshgrid_cn.rst b/doc/paddle/api/paddle/tensor/creation/meshgrid_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..78c8ca682a11a2ae3d7c33806364ecbdf142b5a9 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/creation/meshgrid_cn.rst @@ -0,0 +1,41 @@ + +.. _cn_api_paddle_tensor_meshgrid: + +meshgrid +------------------------------- + +.. py:function:: paddle.tensor.meshgrid(*args, **kargs) + + + + +该OP的输入是张量或者包含张量的列表, 包含 k 个一维张量,对每个张量做扩充操作,输出 k 个 k 维张量。 + +参数: + - \* **args** (Variable|Variable数组)- 输入变量为 k 个一维张量,形状分别为(N1,), (N2,), ..., (Nk, )。支持数据类型为float32,float64,int32,int64。 + - ** **kargs** (可选)- 目前只接受name参数(str),具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: +k 个 k 维张量,每个张量的形状均为(N1, N2, ..., Nk)。 + +返回类型: 变量(Variable) + +**代码示例** + + + +.. code-block:: python + + #动态图示例 + import paddle + import numpy as np + + paddle.enable_imperative() + + input_3 = np.random.randint(0, 100, [100, ]).astype('int32') + input_4 = np.random.randint(0, 100, [200, ]).astype('int32') + tensor_3 = paddle.imperative.to_variable(input_3) + tensor_4 = paddle.imperative.to_variable(input_4) + grid_x, grid_y = paddle.tensor.meshgrid(tensor_3, tensor_4) + #the shape of grid_x is (100, 200) + #the shape of grid_y is (100, 200) diff --git a/doc/paddle/api/paddle/tensor/creation/ones_cn.rst b/doc/paddle/api/paddle/tensor/creation/ones_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..05e9ded48eab47f1a2530e8f45f11a727fd71796 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/creation/ones_cn.rst @@ -0,0 +1,46 @@ +.. _cn_api_tensor_ones: + +ones +------------------------------- + +.. py:function:: paddle.ones(shape, dtype=None) + + + +该OP创建形状为 ``shape`` 、数据类型为 ``dtype`` 且值全为1的Tensor。 + +参数: + - **shape** (tuple|list|Tensor) - 输出Tensor的形状, ``shape`` 的数据类型为int32或者int64。 + - **dtype** (np.dtype|str, 可选) - 输出Tensor的数据类型,数据类型必须为bool、 float16、float32、float64、int32或int64。如果 ``dtype`` 为None,默认数据类型为float32。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:值全为1的Tensor,数据类型和 ``dtype`` 定义的类型一致。 + + +抛出异常: + - ``TypeError`` - 当 ``dtype`` 不是bool、 float16、float32、float64、int32、int64和None时。 + - ``TypeError`` - 当 ``shape`` 不是tuple、list、或者Tensor的时, 当 ``shape`` 为Tensor时,其数据类型不是int32或者int64。 + +**代码示例**: + +.. code-block:: python + + import paddle + + paddle.disable_static() + + #default dtype for ones OP + data1 = paddle.ones(shape=[3, 2]) + # [[1. 1.] + # [1. 1.] + # [1. 1.]] + data2 = paddle.ones(shape=[2, 2], dtype='int32') + # [[1 1] + # [1 1]] + + #attr shape is a Variable Tensor + shape = paddle.fill_constant(shape=[2], dtype='int32', value=2) + data3 = paddle.ones(shape=shape, dtype='int32') + # [[1 1] + # [1 1]] + diff --git a/doc/paddle/api/paddle/tensor/creation/ones_like_cn.rst b/doc/paddle/api/paddle/tensor/creation/ones_like_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..33b166ebcc8974385db52cf94def3529dfb307d5 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/creation/ones_like_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_tensor_ones_like: + +ones_like +------------------------------- + +.. py:function:: paddle.ones_like(x, dtype=None, name=None) + + +该OP返回一个和 ``x`` 具有相同形状的数值都为1的Tensor,数据类型为 ``dtype`` 或者和 ``x`` 相同。 + +参数 +:::::::::: + - **x** (Tensor) – 输入的Tensor,数据类型可以是bool,float16, float32,float64,int32,int64。输出Tensor的形状和 ``x`` 相同。如果 ``dtype`` 为None,则输出Tensor的数据类型与 ``x`` 相同。 + - **dtype** (str|np.dtype|core.VarDesc.VarType, 可选) - 输出Tensor的数据类型,支持bool,float16, float32,float64,int32,int64。当该参数值为None时, 输出Tensor的数据类型与 ``x`` 相同。默认值为None. + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回 +:::::::::: + Tensor:和 ``x`` 具有相同形状的数值都为1的Tensor,数据类型为 ``dtype`` 或者和 ``x`` 相同。 + +抛出异常 +:::::::::: + - ``TypeError`` - 如果 ``dtype`` 不是bool、float16、float32、float64、int32、int64。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + + x = paddle.imperative.to_variable(np.array([1,2,3], dtype='float32')) + out1 = paddle.ones_like(x) # [1., 1., 1.] + out2 = paddle.ones_like(x, dtype='int32') # [1, 1, 1] diff --git a/doc/paddle/api/paddle/tensor/creation/to_tensor_cn.rst b/doc/paddle/api/paddle/tensor/creation/to_tensor_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..24fff2ad786ff669677646d0279c4de3f60f09d9 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/creation/to_tensor_cn.rst @@ -0,0 +1,96 @@ +.. _cn_api_paddle_to_tensor: + +to_tensor +------------------------------- + + +.. py:function:: paddle.to_tensor(data, dtype=None, place=None, stop_gradient=True) + +该API通过已知的 ``data`` 来创建一个 tensor,tensor类型为 ``paddle.Tensor`` 或 ``paddle.ComplexTensor`` 。 +``data`` 可以是 scalar,tuple,list,numpy\.ndarray,paddle\.Tensor,paddle\.ComplexTensor。 + +如果 ``data`` 已经是一个tensor,且 ``dtype`` 、 ``place`` 没有发生变化,将不会发生 tensor 的拷贝并返回原来的 tensor。 +否则会创建一个新的tensor,且不保留原来计算图。 + +``ComplexTensor`` 是Paddle特有的数据类型。对于 ``ComplexTensor`` ``x`` , ``x.real`` 表示实部,``x.imag`` 表示虚部。 + +参数: + - **data** (scalar|tuple|list|ndarray|Tensor|ComplexTensor) - 初始化tensor的数据,可以是 + scalar,list,tuple,numpy\.ndarray,paddle\.Tensor,paddle\.ComplexTensor类型。 + - **dtype** (str, optional) - 创建tensor的数据类型,可以是 'bool' ,'float16','float32', + 'float64' ,'int8','int16','int32','int64','uint8'。如果创建的是 ``ComplexTensor`` , + 则dtype还可以是 'complex64','complex128'。默认值为None,如果 ``data`` 为python浮点类型,则从 + :ref:`cn_api_paddle_framework_get_default_dtype` 获取类型,如果 ``data`` 为其他类型, + 则会自动推导类型。 + - **place** (CPUPlace|CUDAPinnedPlace|CUDAPlace, optional) - 创建tensor的设备位置,可以是 + CPUPlace, CUDAPinnedPlace, CUDAPlace。默认值为None,使用全局的place。 + - **stop_gradient** (bool, optional) - 是否阻断Autograd的梯度传导。默认值为True,此时不进行梯度传传导。 + +返回:通过 ``data`` 创建的 tensor。其类型为 ``paddle.Tensor`` 或 ``paddle.ComplexTensor`` + +抛出异常: + - ``TypeError``: 当 ``data`` 不是 scalar,list,tuple,numpy.ndarray,paddle.Tensor或paddle.ComplexTensor类型时 + - ``ValueError``: 当 ``data`` 是包含不等长子序列的tuple或list时, 例如[[1, 2], [3, 4, 5]] + - ``TypeError``: 当 ``dtype`` 不是 bool,float16,float32,float64,int8,int16,int32,int64,uint8,complex64,complex128时 + - ``ValueError``: 当 ``place`` 不是 paddle.CPUPlace,paddle.CUDAPinnedPlace,paddle.CUDAPlace时 + +**代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + + type(paddle.to_tensor(1)) + # + + paddle.to_tensor(1) + # Tensor: generated_tensor_0 + # - place: CUDAPlace(0) # allocate on global default place CPU:0 + # - shape: [1] + # - layout: NCHW + # - dtype: int64_t + # - data: [1] + + x = paddle.to_tensor(1) + paddle.to_tensor(x, dtype='int32', place=paddle.CPUPlace()) # A new tensor will be constructed due to different dtype or place + # Tensor: generated_tensor_01 + # - place: CPUPlace + # - shape: [1] + # - layout: NCHW + # - dtype: int + # - data: [1] + + paddle.to_tensor((1.1, 2.2), place=paddle.CUDAPinnedPlace()) + # Tensor: generated_tensor_1 + # - place: CUDAPinnedPlace + # - shape: [2] + # - layout: NCHW + # - dtype: double + # - data: [1.1 2.2] + + paddle.to_tensor([[0.1, 0.2], [0.3, 0.4]], place=paddle.CUDAPlace(0), stop_gradient=False) + # Tensor: generated_tensor_2 + # - place: CUDAPlace(0) + # - shape: [2, 2] + # - layout: NCHW + # - dtype: double + # - data: [0.1 0.2 0.3 0.4] + + type(paddle.to_tensor([[1+1j, 2], [3+2j, 4]]), dtype='complex64') + # + + paddle.to_tensor([[1+1j, 2], [3+2j, 4]], dtype='complex64') + # ComplexTensor[real]: generated_tensor_0.real + # - place: CUDAPlace(0) + # - shape: [2, 2] + # - layout: NCHW + # - dtype: float + # - data: [1 2 3 4] + # ComplexTensor[imag]: generated_tensor_0.imag + # - place: CUDAPlace(0) + # - shape: [2, 2] + # - layout: NCHW + # - dtype: float + # - data: [1 0 2 0] \ No newline at end of file diff --git a/doc/paddle/api/paddle/tensor/creation/tril_cn.rst b/doc/paddle/api/paddle/tensor/creation/tril_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..66e69e15a81fc5e585dd4febd0c90e11b5cb3fd7 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/creation/tril_cn.rst @@ -0,0 +1,61 @@ +.. _cn_api_tensor_tril: + +tril +------------------------------- + +.. py:function:: paddle.tensor.tril(input, diagonal=0, name=None) + + + + +返回输入矩阵 `input` 的下三角部分,其余部分被设为0。 +矩形的下三角部分被定义为对角线上和下方的元素。 + +参数: + - **input** (Variable) : 输入Tensor input,数据类型支持 `float32`, `float64`, `int32`, `int64` 。 + - **diagonal** (int,可选) : 指定的对角线,默认值为0。如果diagonal = 0,表示主对角线; 如果diagonal是正数,表示主对角线之上的对角线; 如果diagonal是负数,表示主对角线之下的对角线。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:计算得到的Tensor。Tensor数据类型与输入 `input` 数据类型一致。 + +返回类型:Variable + + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle.tensor as tensor + import paddle.fluid as fluid + + data = np.arange(1, 13, dtype="int64").reshape(3,-1) + # array([[ 1, 2, 3, 4], + # [ 5, 6, 7, 8], + # [ 9, 10, 11, 12]]) + x = fluid.data(shape=(-1, 4), dtype='int64', name='x') + exe = fluid.Executor(fluid.CPUPlace()) + + # example 1, default diagonal + tril = tensor.tril(x) + tril_out, = exe.run(fluid.default_main_program(), feed={"x": data}, + fetch_list=[tril], return_numpy=True) + # array([[ 1, 0, 0, 0], + # [ 5, 6, 0, 0], + # [ 9, 10, 11, 0]]) + + # example 2, positive diagonal value + tril = tensor.tril(x, diagonal=2) + tril_out, = exe.run(fluid.default_main_program(), feed={"x": data}, + fetch_list=[tril], return_numpy=True) + # array([[ 1, 2, 3, 0], + # [ 5, 6, 7, 8], + # [ 9, 10, 11, 12]]) + + # example 3, negative diagonal value + tril = tensor.tril(x, diagonal=-1) + tril_out, = exe.run(fluid.default_main_program(), feed={"x": data}, + fetch_list=[tril], return_numpy=True) + # array([[ 0, 0, 0, 0], + # [ 5, 0, 0, 0], + # [ 9, 10, 0, 0]]) diff --git a/doc/paddle/api/paddle/tensor/creation/triu_cn.rst b/doc/paddle/api/paddle/tensor/creation/triu_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1ac3e9b83c35b5c9af7ab0976cb869d05bd369bb --- /dev/null +++ b/doc/paddle/api/paddle/tensor/creation/triu_cn.rst @@ -0,0 +1,62 @@ +.. _cn_api_tensor_triu: + +triu +------------------------------- + +.. py:function:: paddle.tensor.triu(input, diagonal=0, name=None) + + + + +返回输入矩阵 `input` 的上三角部分,其余部分被设为0。 +矩形的上三角部分被定义为对角线上和上方的元素。 + +参数: + - **input** (Variable) : 输入Tensor input,数据类型支持 `float32`, `float64`, `int32`, `int64` 。 + - **diagonal** (int,可选) : 指定的对角线,默认值为0。如果diagonal = 0,表示主对角线; 如果diagonal是正数,表示主对角线之上的对角线; 如果diagonal是负数,表示主对角线之下的对角线。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:计算得到的Tensor。Tensor数据类型与输入 `input` 数据类型一致。 + +返回类型:Variable + + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + import paddle.tensor as tensor + + data = np.arange(1, 13, dtype="int64").reshape(3,-1) + # array([[ 1, 2, 3, 4], + # [ 5, 6, 7, 8], + # [ 9, 10, 11, 12]]) + x = fluid.data(shape=(-1, 4), dtype='int64', name='x') + exe = fluid.Executor(fluid.CPUPlace()) + + # example 1, default diagonal + triu = tensor.triu(x) + triu_out, = exe.run(fluid.default_main_program(), feed={"x": data}, + fetch_list=[triu], return_numpy=True) + # array([[ 1, 2, 3, 4], + # [ 0, 6, 7, 8], + # [ 0, 0, 11, 12]]) + + # example 2, positive diagonal value + triu = tensor.triu(x, diagonal=2) + triu_out, = exe.run(fluid.default_main_program(), feed={"x": data}, + fetch_list=[triu], return_numpy=True) + # array([[0, 0, 3, 4], + # [0, 0, 0, 8], + # [0, 0, 0, 0]]) + + # example 3, negative diagonal value + triu = tensor.triu(x, diagonal=-1) + triu_out, = exe.run(fluid.default_main_program(), feed={"x": data}, + fetch_list=[triu], return_numpy=True) + # array([[ 1, 2, 3, 4], + # [ 5, 6, 7, 8], + # [ 0, 10, 11, 12]]) + diff --git a/doc/paddle/api/paddle/tensor/creation/zeros_cn.rst b/doc/paddle/api/paddle/tensor/creation/zeros_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..40e54e0daaeb91e9bf2d15b4f32ce65a987748e7 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/creation/zeros_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_tensor_zeros: + +zeros +------------------------------- + +.. py:function:: paddle.zeros(shape, dtype=None, name=None) + + + +该OP创建形状为 ``shape`` 、数据类型为 ``dtype`` 且值全为0的Tensor。 + +参数: + - **shape** (tuple|list|Tensor) - 输出Tensor的形状, ``shape`` 的数据类型为int32或者int64。 + - **dtype** (np.dtype|core.VarDesc.VarType|str,可选) - 输出Tensor的数据类型,数据类型必须为bool、float16、float32、float64、int32或int64。若为None,数据类型为float32, 默认为None。 + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回:值全为0的Tensor,数据类型和 ``dtype`` 定义的类型一致。 + +抛出异常: + - ``TypeError`` - 当 ``dtype`` 不是bool、 float16、float32、float64、int32、int64和None时。 + - ``TypeError`` - 当 ``shape`` 不是tuple、list、或者Tensor时, 当 ``shape`` 为Tensor,其数据类型不是int32或者int64时。 + +**代码示例**: + +.. code-block:: python + + import paddle + paddle.disable_static() # Now we are in imperative mode + data = paddle.zeros(shape=[3, 2], dtype='float32') + # [[0. 0.] + # [0. 0.] + # [0. 0.]] + + data = paddle.zeros(shape=[2, 2]) + # [[0. 0.] + # [0. 0.]] + + # shape is a Tensor + shape = paddle.fill_constant(shape=[2], dtype='int32', value=2) + data3 = paddle.zeros(shape=shape, dtype='int32') + # [[0 0] + # [0 0]] + diff --git a/doc/paddle/api/paddle/tensor/creation/zeros_like_cn.rst b/doc/paddle/api/paddle/tensor/creation/zeros_like_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a1a202aca8c6f4a67bf22a44c2e88085fb6e0674 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/creation/zeros_like_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_tensor_zeros_like: + +zeros_like +------------------------------- + +.. py:function:: paddle.zeros_like(x, dtype=None, name=None) + + +该OP返回一个和 ``x`` 具有相同的形状的全零Tensor,数据类型为 ``dtype`` 或者和 ``x`` 相同。 + +参数 +:::::::::: + - **x** (Tensor) – 输入的多维Tensor,数据类型可以是bool,float16, float32,float64,int32,int64。输出Tensor的形状和 ``x`` 相同。如果 ``dtype`` 为None,则输出Tensor的数据类型与 ``x`` 相同。 + - **dtype** (str|np.dtype|core.VarDesc.VarType, 可选) - 输出Tensor的数据类型,支持bool,float16, float32,float64,int32,int64。当该参数值为None时, 输出Tensor的数据类型与 ``x`` 相同。默认值为None. + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回 +:::::::::: + Tensor:和 ``x`` 具有相同的形状全零Tensor,数据类型为 ``dtype`` 或者和 ``x`` 相同。 + +抛出异常 +:::::::::: + - ``TypeError`` - 如果 ``dtype`` 不是bool、float16、float32、float64、int32、int64。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + + x = paddle.imperative.to_variable(np.array([1,2,3], dtype='float32')) + out1 = paddle.zeros_like(x) # [0., 0., 0.] + out2 = paddle.zeros_like(x, dtype='int32') # [0, 0, 0] diff --git a/doc/paddle/api/paddle/tensor/linalg/bmm_cn.rst b/doc/paddle/api/paddle/tensor/linalg/bmm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0b3704113f1d9f1c5d106a8d9fb6907fad0f5295 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/linalg/bmm_cn.rst @@ -0,0 +1,49 @@ +.. _cn_api_paddle_tensor_bmm: + +bmm +------------------------------- + +.. py:function:: paddle.tensor.bmm(x, y, name=None): + + + + +对输入x及输入y进行矩阵相乘。 + +两个输入的维度必须等于3,并且矩阵x和矩阵y的第一维必须相等 + +同时矩阵x的第二维必须等于矩阵y的第三维 + +例如:若x和y分别为(b, m, k)和 (b, k, n)的矩阵,则函数的输出为一个(b, m, n)的矩阵 + +**参数**: + + -**x** (Variable) : 输入变量,类型为 Tensor 或 LoDTensor。 + -**y** (Variable) : 输入变量,类型为 Tensor 或 LoDTensor。 + -**name** (str|None) : 该层名称(可选),如果设置为空,则自动为该层命名。 + +**返回**: + - Variable (Tensor / LoDTensor),矩阵相乘后的结果。 + +**返回类型**: + - Variable(变量)。 + + +**示例**: + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + # size input1: (2, 2, 3) and input2: (2, 3, 2) + input1 = np.array([[[1.0, 1.0, 1.0],[2.0, 2.0, 2.0]],[[3.0, 3.0, 3.0],[4.0, 4.0, 4.0]]]) + input2 = np.array([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]],[[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) + with fluid.dygraph.guard(): + x = fluid.dygraph.to_variable(input1) + y = fluid.dygraph.to_variable(input2) + out = paddle.bmm(x, y) + #output size: (2, 2, 2) + #output value: + #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] + out_np = out.numpy() + diff --git a/doc/paddle/api/paddle/tensor/linalg/cholesky_cn.rst b/doc/paddle/api/paddle/tensor/linalg/cholesky_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..eb42fde92181374845e04d6b128ec7772719d764 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/linalg/cholesky_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_tensor_cholesky: + +cholesky +------------------------------- + +.. py:function:: paddle.cholesky(x, upper=False, name=None) + + + + +计算一个对称正定矩阵或一批对称正定矩阵的Cholesky分解。如果 `upper` 是 `True` , +则分解形式为 :math:`A = U ^ {T} U` , 返回的矩阵U是上三角矩阵。 +否则,分解形式为 :math:`A = LL ^ {T}` ,并返回矩阵 :math:`L` 是下三角矩阵。 + +参数: + - **x** (Variable)- 输入变量为多维Tensor,它的维度应该为 `[*, M, N]` ,其中*为零或更大的批次尺寸,并且最里面的两个维度上的矩阵都应为对称的正定矩阵,支持数据类型为float32,float64。 + - **upper** (bool)- 指示是否返回上三角矩阵或下三角矩阵。默认值:False。 + - **name** (str , 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: 与 `x` 具有相同形状和数据类型的Tensor。它代表了Cholesky分解生成的三角矩阵。 + +返回类型: 变量(Variable) + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + a = np.random.rand(3, 3) + a_t = np.transpose(a, [1, 0]) + x_data = np.matmul(a, a_t) + 1e-03 + x = paddle.imperative.to_variable(x_data) + out = paddle.cholesky(x, upper=False) + print(out.numpy()) + # [[1.190523 0. 0. ] + # [0.9906703 0.27676893 0. ] + # [1.25450498 0.05600871 0.06400121]] diff --git a/doc/paddle/api/paddle/tensor/linalg/cross_cn.rst b/doc/paddle/api/paddle/tensor/linalg/cross_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..13b07d94f6488957dcf4a6859b8b6ef38546a7e3 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/linalg/cross_cn.rst @@ -0,0 +1,55 @@ +.. _cn_api_tensor_linalg_cross: + +cross +------------------------------- + +.. py:function:: paddle.cross(x, y, axis=None, name=None) + + + + +计算张量 ``x`` 和 ``y`` 在 ``axis`` 维度上的向量积(叉积)。 ``x`` 和 ``y`` 必须有相同的形状, +且指定的 ``axis`` 的长度必须为3. 如果未指定 ``axis`` ,默认选取第一个长度为3的 ``axis`` . + +**参数**: + - **x** (Variable)– 第一个输入张量。 + - **y** (Variable)– 第二个输入张量。 + - **axis** (int, optional) – 沿着此维进行向量积操作。默认选取第一个长度为3的 ``axis`` . + - **name** (str,可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +**返回**:向量积的结果。 + +**返回类型**:Variable + +**代码示例**: + +.. code-block:: python + + import paddle + from paddle.imperative import to_variable + import numpy as np + + paddle.enable_imperative() + + data_x = np.array([[1.0, 1.0, 1.0], + [2.0, 2.0, 2.0], + [3.0, 3.0, 3.0]]) + data_y = np.array([[1.0, 1.0, 1.0], + [1.0, 1.0, 1.0], + [1.0, 1.0, 1.0]]) + x = to_variable(data_x) + y = to_variable(data_y) + + z1 = paddle.cross(x, y) + print(z1.numpy()) + # [[-1. -1. -1.] + # [ 2. 2. 2.] + # [-1. -1. -1.]] + + z2 = paddle.cross(x, y, axis=1) + print(z2.numpy()) + # [[0. 0. 0.] + # [0. 0. 0.] + # [0. 0. 0.]] + + diff --git a/doc/paddle/api/paddle/tensor/linalg/dist_cn.rst b/doc/paddle/api/paddle/tensor/linalg/dist_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4202e5d0efa017e7fac928dadf7890a8896c08f6 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/linalg/dist_cn.rst @@ -0,0 +1,78 @@ +.. _cn_api_tensor_linalg_dist: + +dist +------------------------------- + +.. py:function:: paddle.tensor.linalg.dist(x, y, p=2) + + + + +该OP用于计算 `(x-y)` 的 p 范数(p-norm),需要注意这不是严格意义上的范数,仅作为距离的度量。输入 `x` 和 `y` 的形状(shape)必须是可广播的(broadcastable)。其含义如下,详情请参考 `numpy的广播概念 `_ : + +- 每个输入都至少有1维 +- 对两个输入的维度从后向前匹配,两个输入每一维的大小需要满足3个条件中的任意一个:相等、其中一个为1或者其中一个不存在。 + +定义 `z = x - y` ,`x` 和 `y` 的形状是可广播的,那么 `z` 的形状可按照下列步骤得到: + +(1) 如果 `x` 和 `y` 的维数不同,先对维数较少的这个输入的维度往前补1。 + +例如,`x` 的形状为[8, 1, 6, 1],`y` 的形状为[7, 1, 5],对 `y` 的维度补1, + +x (4-D Tensor): 8 x 1 x 6 x 1 + +y (4-D Tensor): 1 x 7 x 1 x 5 + +(2) 确定输出 `z` 每一维度的大小:从两个输入的维度中选取最大值。 + +z (4-D Tensor): 8 x 7 x 6 x 5 + +若两个输入的维数相同,则输出的大小可直接用步骤2确定。以下是 `p` 取不同值时,范数的计算公式: + +当 `p = 0` ,定义 $0^0 = 0$,则 z 的零范数是 `z` 中非零元素的个数。 + +.. math:: + ||z||_{0}=\lim_{p \rightarrow 0}\sum_{i=1}^{m}|z_i|^{p} + +当 `p = inf` ,`z` 的无穷范数是 `z` 所有元素中的最大值。 + +.. math:: + ||z||_\infty=\max_i |z_i| + +当 `p = -inf` ,`z` 的负无穷范数是 `z` 所有元素中的最小值。 + +.. math:: + ||z||_{-\infty}=\min_i |z_i| + +其他情况下,`z` 的 `p` 范数使用以下公式计算: + +.. math:: + ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\frac{1}{p}} + +参数: + - **x** (Variable): 1-D 到 6-D Tensor,数据类型为float32或float64。 + - **y** (Variable): 1-D 到 6-D Tensor,数据类型为float32或float64。 + - **p** (float, optional): 用于设置需要计算的范数,数据类型为float32或float64。默认值为2. + +返回: `(x-y)` 的 `p` 范数。 + +返回类型: Variable + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + with fluid.dygraph.guard(): + x = fluid.dygraph.to_variable(np.array([[3, 3],[3, 3]]).astype(np.float32)) + y = fluid.dygraph.to_variable(np.array([[3, 3],[3, 1]]).astype(np.float32)) + out = paddle.dist(x, y, 0) + print(out.numpy()) # out = [1.] + out = paddle.dist(x, y, 2) + print(out.numpy()) # out = [2.] + out = paddle.dist(x, y, float("inf")) + print(out.numpy()) # out = [2.] + out = paddle.dist(x, y, float("-inf")) + print(out.numpy()) # out = [0.] diff --git a/doc/paddle/api/paddle/tensor/linalg/dot_cn.rst b/doc/paddle/api/paddle/tensor/linalg/dot_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5914eb30d807572d0cc439690ea834c89a11a355 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/linalg/dot_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_paddle_tensor_linalg_dot: + +dot +------------------------------- + +.. py:function:: paddle.tensor.linalg.dot(x, y, name=None) + + +该OP计算向量的内积 + +.. note:: + + 支持1维和2维Tensor. + +参数: + - **x** (Tensor)- 1维或2维 ``Tensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **y** (Tensor)- 1维或2维 ``Tensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **name** (str,可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + + +返回: ``Tensor`` ,数据类型与 ``x`` 相同。 + +返回类型: Tensor。 + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) + y_data = np.random.uniform(1, 3, [10]).astype(np.float32) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + z = paddle.dot(x, y) + print(z.numpy()) diff --git a/doc/paddle/api/paddle/tensor/linalg/histogram_cn.rst b/doc/paddle/api/paddle/tensor/linalg/histogram_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d57084fa6f76f8640688f8d2fb0b2a84d81a8994 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/linalg/histogram_cn.rst @@ -0,0 +1,53 @@ +.. _cn_api_tensor_histogram: + +histogram +------------------------------- + +.. py:function:: paddle.histogram(input, bins=100, min=0, max=0): + +计算输入张量的直方图。以min和max为range边界,将其均分成bins个直条,然后将排序好的数据划分到各个直条(bins)中。如果min和max都为0, 则利用数据中的最大最小值作为边界。 + +参数: + - **input** (Variable) - 输入Tensor。维度为多维,数据类型为int32, int64, float32或float64。 + - **bins** (int) - 直方图 bins(直条)的个数,默认为100。 + - **min** (int) - range的下边界(包含),默认为0。 + - **max** (int) - range的上边界(包含),默认为0。 + +返回:直方图。 + +返回类型:Variable,数据为int64类型,维度为(nbins,)。 + +抛出异常: + - ``ValueError`` - 当输入 ``bin``, ``min``, ``max``不合法时。 + +**代码示例1**: + +.. code-block:: python + + import paddle + import numpy as np + startup_program = paddle.Program() + train_program = paddle.Program() + with paddle.program_guard(train_program, startup_program): + inputs = paddle.data(name='input', dtype='int32', shape=[2,3]) + output = paddle.histogram(inputs, bins=5, min=1, max=5) + place = paddle.CPUPlace() + exe = paddle.Executor(place) + exe.run(startup_program) + img = np.array([[2, 4, 2], [2, 5, 4]]).astype(np.int32) + res = exe.run(train_program, + feed={'input': img}, + fetch_list=[output]) + print(np.array(res[0])) # [0, 3, 0, 2, 1] + +**代码示例2**: + +.. code-block:: python + + import paddle + import numpy as np + with paddle.imperative.guard(paddle.CPUPlace()): + inputs_np = np.array([0.5, 1.5, 2.5]).astype(np.float) + inputs = paddle.imperative.to_variable(inputs_np) + result = paddle.histogram(inputs, bins=5, min=1, max=5) + print(result) # [1, 1, 0, 0, 0] diff --git a/doc/paddle/api/paddle/tensor/linalg/matmul_cn.rst b/doc/paddle/api/paddle/tensor/linalg/matmul_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c12ac055bb88cb94f32f91554f72684a3852da70 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/linalg/matmul_cn.rst @@ -0,0 +1,93 @@ +.. _cn_api_tensor_matmul: + +matmul +------------------------------- + +.. py:function:: paddle.matmul(x, y, transpose_x=False, transpose_y=False, name=None) + +该op是计算两个Tensor的乘积,遵循完整的广播规则,关于广播规则,请参考 :ref:`use_guide_broadcasting` 。 +并且其行为与 ``numpy.matmul`` 一致。目前,输入张量的维数可以是任意数量, ``matmul`` 可以用于 +实现 ``dot`` , ``matmul`` 和 ``batchmatmul`` 。实际行为取决于输入 ``x`` 、输入 ``y`` 、 ``transpose_x`` , +``transpose_y`` 。具体如下: + +- 如果 ``transpose`` 为真,则对应 Tensor 的后两维会转置。如果Tensor的一维,则转置无效。假定 ``x`` 是一个 shape=[D] 的一维 Tensor,则 ``x`` 视为 [1, D]。然而, ``y`` 是一个shape=[D]的一维Tensor,则视为[D, 1]。 + +乘法行为取决于 ``x`` 和 ``y`` 的尺寸。 具体如下: + +- 如果两个张量均为一维,则获得点积结果。 + +- 如果两个张量都是二维的,则获得矩阵与矩阵的乘积。 + +- 如果 ``x`` 是1维的,而 ``y`` 是2维的,则将1放在 ``x`` 维度之前,以进行矩阵乘法。矩阵相乘后,将删除前置尺寸。 + +- 如果 ``x`` 是2维的,而 ``y`` 是1维的,获得矩阵与向量的乘积。 + +- 如果两个输入至少为一维,且至少一个输入为N维(其中N> 2),则将获得批矩阵乘法。 如果第一个自变量是一维的,则将1放在其维度的前面,以便进行批量矩阵的乘法运算,然后将其删除。 如果第二个参数为一维,则将1附加到其维度后面,以实现成批矩阵倍数的目的,然后将其删除。 根据广播规则广播非矩阵维度(不包括最后两个维度)。 例如,如果输入 ``x`` 是(j,1,n,m)Tensor,另一个 ``y`` 是(k,m,p)Tensor,则out将是(j,k,n,p)张量。 + +参数 +::::::::: + - **x** (Tensor) : 输入变量,类型为 Tensor,数据类型为float32, float64。 + - **y** (Tensor) : 输入变量,类型为 Tensor,数据类型为float32, float64。 + - **transpose_x** (bool,可选) : 相乘前是否转置 x,默认值为False。 + - **transpose_y** (bool,可选) : 相乘前是否转置 y,默认值为False。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: +::::::::: + + - Tensor,矩阵相乘后的结果,数据类型和输入数据类型一致。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + + # vector * vector + x_data = np.random.random([10]).astype(np.float32) + y_data = np.random.random([10]).astype(np.float32) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + z = paddle.matmul(x, y) + print(z.numpy().shape) + # [1] + + # matrix * vector + x_data = np.random.random([10, 5]).astype(np.float32) + y_data = np.random.random([5]).astype(np.float32) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + z = paddle.matmul(x, y) + print(z.numpy().shape) + # [10] + + # batched matrix * broadcasted vector + x_data = np.random.random([10, 5, 2]).astype(np.float32) + y_data = np.random.random([2]).astype(np.float32) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + z = paddle.matmul(x, y) + print(z.numpy().shape) + # [10, 5] + + # batched matrix * batched matrix + x_data = np.random.random([10, 5, 2]).astype(np.float32) + y_data = np.random.random([10, 2, 5]).astype(np.float32) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + z = paddle.matmul(x, y) + print(z.numpy().shape) + # [10, 5, 5] + + # batched matrix * broadcasted matrix + x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) + y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + z = paddle.matmul(x, y) + print(z.numpy().shape) + # [10, 3, 5, 5] + diff --git a/doc/paddle/api/paddle/tensor/linalg/norm_cn.rst b/doc/paddle/api/paddle/tensor/linalg/norm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bc766646d9d744b603d3bcd686dc6705905ebf57 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/linalg/norm_cn.rst @@ -0,0 +1,61 @@ +.. _cn_api_tensor_norm: + +norm +------------------------------- + +.. py:function:: paddle.norm(x, p='fro', axis=None, keepdim=False, name=None): + + + + +该OP将计算给定Tensor的矩阵范数(Frobenius 范数)和向量范数(向量1范数、2范数、或者通常的p范数). + +参数: + - **x** (Tensor) - 输入Tensor。维度为多维,数据类型为float32或float64。 + - **p** (float|string, 可选) - 范数(ord)的种类。目前支持的值为 `fro`、`inf`、`-inf`、`0`、`1`、`2`,和任何正实数p对应的p范数。默认值为 `fro` 。 + - **axis** (int|list|tuple, 可选) - 使用范数计算的轴。如果 ``axis`` 为None,则忽略input的维度,将其当做向量来计算。如果 ``axis`` 为int或者只有一个元素的list|tuple,``norm`` API会计算输入Tensor的向量范数。如果axis为包含两个元素的list,API会计算输入Tensor的矩阵范数。 当 ``axis < 0`` 时,实际的计算维度为 rank(input) + axis。默认值为 `None` 。 + - **keepdim** (bool,可选) - 是否在输出的Tensor中保留和输入一样的维度,默认值为False。当 :attr:`keepdim` 为False时,输出的Tensor会比输入 :attr:`input` 的维度少一些。 + - **name** (str|None) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。默认值为None。 + +返回: + - 在指定axis上进行范数计算的Tensor,与输入input数据类型相同。 + +**代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + shape=[2, 3, 4] + np_input = np.arange(24).astype('float32') - 12 + np_input = np_input.reshape(shape) + x = paddle.to_tensor(np_input) + #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] + # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]] + + # compute frobenius norm along last two dimensions. + out_fro = paddle.norm(x, p='fro', axis=[0,1]) + # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535] + + # compute 2-order vector norm along last dimension. + out_pnorm = paddle.norm(x, p=2, axis=-1) + #out_pnorm.numpy(): [[21.118711 13.190906 5.477226] + # [ 3.7416575 11.224972 19.131126]] + + # compute 2-order norm along [0,1] dimension. + out_pnorm = paddle.norm(x, p=2, axis=[0,1]) + #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535] + + # compute inf-order norm + out_pnorm = paddle.norm(x, p=np.inf) + #out_pnorm.numpy() = [12.] + out_pnorm = paddle.norm(x, p=np.inf, axis=0) + #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]] + + # compute -inf-order norm + out_pnorm = paddle.norm(x, p=-np.inf) + #out_pnorm.numpy(): [0.] + out_pnorm = paddle.norm(x, p=-np.inf, axis=0) + #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] + diff --git a/doc/paddle/api/paddle/tensor/linalg/t_cn.rst b/doc/paddle/api/paddle/tensor/linalg/t_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fe506771cb0b17d2324755a9e767843ba338a971 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/linalg/t_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_paddle_tensor_t: + +t +------------------------------- + +.. py:function:: paddle.tensor.t(input, name=None) + + + + +该OP对小于等于2维的Tensor进行数据转置。0维和1维Tensor返回本身,2维Tensor等价于perm设置为0,1的 :ref:`cn_api_fluid_layers_transpose` 函数。 + +参数: + - **input** (Variable) - 输入:N维(N<=2)Tensor,可选的数据类型为float16, float32, float64, int32, int64。 + - **name** (str, 可选)- 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None + +返回: N维Tensor + +返回类型:Variable + +**示例**: + +.. code-block:: python + + # 例1 (0-D tensor) + x = tensor([0.79]) + paddle.t(x) = tensor([0.79]) + + # 例2 (1-D tensor) + x = tensor([0.79, 0.84, 0.32]) + paddle.t(x) = tensor([0.79, 0.84, 0.32]) + + # 例3 (2-D tensor) + x = tensor([0.79, 0.84, 0.32], + [0.64, 0.14, 0.57]) + paddle.t(x) = tensor([0.79, 0.64], + [0.84, 0.14], + [0.32, 0.57]) + + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + x = fluid.data(name='x', shape=[2, 3], dtype='float32') + x_transposed = paddle.t(x) # paddle.t 等价于 paddle.tensor.t + print(x_transposed.shape) + #(3L, 2L) + diff --git a/doc/paddle/api/paddle/tensor/logic/allclose_cn.rst b/doc/paddle/api/paddle/tensor/logic/allclose_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c483e3a112f2513f8db0bb7095dc1f99e7a4abd3 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/logic/allclose_cn.rst @@ -0,0 +1,58 @@ +.. _cn_api_tensor_allclose: + +allclose +------------------------------- + +.. py:function:: paddle.allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None) + +逐个检查x和y的所有元素是否均满足如下条件: + +.. math:: + \left| x - y \right| \leq atol + rtol \times \left| y \right| + +该API的行为类似于 :math:`numpy.allclose` ,即当两个待比较Tensor的所有元素均在一定容忍误差范围内视为相等则该API返回True值。 + +参数: + - **x** (Tensor) - 输入的 `Tensor` ,数据类型为:float32、float64。 + - **y** (Tensor) - 输入的 `Tensor` ,数据类型为:float32、float64。 + - **rtol** (float,可选) - 相对容忍误差,默认值为1e-5。 + - **atol** (float,可选) - 绝对容忍误差,默认值为1e-8。 + - **equal_nan** (bool,可选) - 如果设置为True,则两个NaN数值将被视为相等,默认值为False。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:计算得到的布尔类型单值Tensor。 + +**代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + np_x = np.array([10000., 1e-07]).astype("float32") + np_y = np.array([10000.1, 1e-08]).astype("float32") + x = paddle.to_tensor (np_x) + y = paddle.to_tensor (np_y) + result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, + equal_nan=False, name="ignore_nan") + np_result1 = result1.numpy() + # [False] + result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, + equal_nan=True, name="equal_nan") + np_result2 = result2.numpy() + # [False] + + np_x = np.array([1.0, float('nan')]).astype("float32") + np_y = np.array([1.0, float('nan')]).astype("float32") + x = paddle.to_tensor (np_x) + y = paddle.to_tensor (np_y) + result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, + equal_nan=False, name="ignore_nan") + np_result1 = result1.numpy() + # [False] + result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, + equal_nan=True, name="equal_nan") + np_result2 = result2.numpy() + # [True] diff --git a/doc/paddle/api/paddle/tensor/logic/equal_all_cn.rst b/doc/paddle/api/paddle/tensor/logic/equal_all_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f40403fcb8fef19d101d4d7ac550a76a95cfbbcd --- /dev/null +++ b/doc/paddle/api/paddle/tensor/logic/equal_all_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_tensor_equal_all: + +equal_all +------------------------------- + +.. py:function:: paddle.equal_all(x, y, name=None) + + +该OP返回:返回的结果只有一个元素值,如果所有相同位置的元素相同返回True,否则返回False。 + +**注:该OP输出的结果不返回梯度。** + + +参数: + - **x** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64,int32, int64。 + - **y** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64, int32, int64。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:输出结果为Tensor,Tensor数据类型为bool。 + +返回类型:变量(Tensor) + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.imperative as imperative + paddle.enable_imperative() + paddle.enable_imperative() + x = imperative.to_variable(np.array([1, 2, 3])) + y = imperative.to_variable(np.array([1, 2, 3])) + z = imperative.to_variable(np.array([1, 4, 3])) + result1 = paddle.equal_all(x, y) + print(result1.numpy()) # result1 = [True ] + result2 = paddle.equal_all(x, z) + print(result2.numpy()) # result2 = [False ] diff --git a/doc/paddle/api/paddle/tensor/logic/equal_cn.rst b/doc/paddle/api/paddle/tensor/logic/equal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a8368530761fdf3243f709dc2fa0affe8197d15c --- /dev/null +++ b/doc/paddle/api/paddle/tensor/logic/equal_cn.rst @@ -0,0 +1,34 @@ +.. _cn_api_tensor_equal: + +equal +------------------------------- +.. py:function:: paddle.equal(x, y, name=None) + + +该OP返回 :math:`x==y` 逐元素比较x和y是否相等,相同位置的元素相同则返回True,否则返回False。使用重载算子 `==` 可以有相同的计算函数效果 + +**注:该OP输出的结果不返回梯度。** + +参数: + - **x** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64,int32, int64。 + - **y** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64, int32, int64。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + + +返回:输出结果的Tensor,输出Tensor的shape和输入一致,Tensor数据类型为bool。 + +返回类型:变量(Tensor) + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.imperative as imperative + paddle.enable_imperative() + x = imperative.to_variable(np.array([1, 2, 3])) + y = imperative.to_variable(np.array([1, 3, 2])) + result1 = paddle.equal(x, y) + print(result1.numpy()) # result1 = [True False False] + diff --git a/doc/paddle/api/paddle/tensor/logic/greater_equal_cn.rst b/doc/paddle/api/paddle/tensor/logic/greater_equal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..59358ed7900aa9c77fbc59a323e4dd494a493721 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/logic/greater_equal_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_tensor_cn_greater_equal: + +greater_equal +------------------------------- +.. py:function:: paddle.greater_equal(x, y, name=None) + + +该OP逐元素地返回 :math:`x >= y` 的逻辑值,相同位置前者输入大于等于后者输入则返回True,否则返回False。使用重载算子 `>=` 可以有相同的计算函数效果。 + +**注:该OP输出的结果不返回梯度。** + +参数: + - **x** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64,int32, int64。 + - **y** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64, int32, int64。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + + +返回:输出结果的Tensor,输出Tensor的shape和输入一致,Tensor数据类型为bool。 + +返回类型:变量(Tensor) + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.imperative as imperative + paddle.enable_imperative() + x = imperative.to_variable(np.array([1, 2, 3])) + y = imperative.to_variable(np.array([1, 3, 2])) + result1 = paddle.greater_equal(x, y) + print(result1.numpy()) # result1 = [True False True] + + diff --git a/doc/paddle/api/paddle/tensor/logic/greater_than_cn.rst b/doc/paddle/api/paddle/tensor/logic/greater_than_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0842cacdcd9559e0280a135d9d31a5612cc1c755 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/logic/greater_than_cn.rst @@ -0,0 +1,33 @@ +.. _cn_api_tensor_cn_greater_than: + +greater_than +------------------------------- +.. py:function:: paddle.greater_than(x, y, name=None) + + +该OP返回 :math:`x>y` 逐元素比较x和y是否相等,相同位置前者输入大于等于后者输入则返回True,否则返回False。使用重载算子 `>` 可以有相同的计算函数效果 + +**注:该OP输出的结果不返回梯度。** + +参数: + - **x** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64,int32, int64。 + - **y** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64, int32, int64。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + + +返回:输出结果的Tensor,输出Tensor的shape和输入一致,Tensor数据类型为bool。 + +返回类型:变量(Tensor) + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.imperative as imperative + paddle.enable_imperative() + x = imperative.to_variable(np.array([1, 2, 3])) + y = imperative.to_variable(np.array([1, 3, 2])) + result1 = paddle.greater_than(x, y) + print(result1.numpy()) # result1 = [False False True] diff --git a/doc/paddle/api/paddle/tensor/logic/less_equal_cn.rst b/doc/paddle/api/paddle/tensor/logic/less_equal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2523d344bd69cff7459a97ce944cc361fa001ddc --- /dev/null +++ b/doc/paddle/api/paddle/tensor/logic/less_equal_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_tensor_cn_less_equal: + +less_equal +------------------------------- +.. py:function:: paddle.less_equal(x, y, name=None) + + +该OP逐元素地返回 :math:`x <= y` 的逻辑值,相同位置前者输入小于等于后者输入则返回True,否则返回False。使用重载算子 `<=` 可以有相同的计算函数效果。 + +**注:该OP输出的结果不返回梯度。** + +参数: + - **x** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64,int32, int64。 + - **y** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64, int32, int64。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + + +返回:输出结果的Tensor,输出Tensor的shape和输入一致,Tensor数据类型为bool。 + +返回类型:变量(Tensor) + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.imperative as imperative + paddle.enable_imperative() + x = imperative.to_variable(np.array([1, 2, 3])) + y = imperative.to_variable(np.array([1, 3, 2])) + result1 = paddle.less_equal(x, y) + print(result1.numpy()) # result1 = [True True False] + + diff --git a/doc/paddle/api/paddle/tensor/logic/less_than_cn.rst b/doc/paddle/api/paddle/tensor/logic/less_than_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8e70c4c1ec99c3b2213d77e3ef195998d5b5caf5 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/logic/less_than_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_tensor_cn_less_than: + +less_than +------------------------------- +.. py:function:: paddle.less_than(x, y, name=None) + + +该OP逐元素地返回 :math:`x < y` 的逻辑值,相同位置前者输入小于后者输入则返回True,否则返回False。使用重载算子 `<` 可以有相同的计算函数效果。 + +**注:该OP输出的结果不返回梯度。** + +参数: + - **x** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64,int32, int64。 + - **y** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64, int32, int64。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + + +返回:输出结果的Tensor,输出Tensor的shape和输入一致,Tensor数据类型为bool。 + +返回类型:变量(Tensor) + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.imperative as imperative + paddle.enable_imperative() + x = imperative.to_variable(np.array([1, 2, 3])) + y = imperative.to_variable(np.array([1, 3, 2])) + result1 = paddle.less_than(x, y) + print(result1.numpy()) # result1 = [False True False] + + diff --git a/doc/paddle/api/paddle/tensor/logic/not_equal_cn.rst b/doc/paddle/api/paddle/tensor/logic/not_equal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..be4a5391a8caf1c8fd740d01e16d04af6f334799 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/logic/not_equal_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_tensor_not_equal: + +not_equal +------------------------------- +.. py:function:: paddle.not_equal(x, y, name=None) + + +该OP返回 :math:`x!=y` 逐元素比较x和y是否相等,相同位置的元素不相同则返回True,否则返回False。使用重载算子 `!=` 可以有相同的计算函数效果 + +**注:该OP输出的结果不返回梯度。** + +参数: + - **x** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64,int32, int64。 + - **y** (Tensor) - 输入Tensor,支持的数据类型包括 float32, float64, int32, int64。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + + +返回:输出结果的Tensor,输出Tensor的shape和输入一致,Tensor数据类型为bool。 + +返回类型:变量(Tensor) + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.imperative as imperative + paddle.enable_imperative() + x = imperative.to_variable(np.array([1, 2, 3])) + y = imperative.to_variable(np.array([1, 3, 2])) + result1 = paddle.not_equal(x, y) + print(result1.numpy()) # result1 = [False True True] + + diff --git a/doc/paddle/api/paddle/tensor/manipulation/chunk_cn.rst b/doc/paddle/api/paddle/tensor/manipulation/chunk_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..da78f9e2951ff2e4fce59a3ab55b087ce9d87f94 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/manipulation/chunk_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_tensor_cn_chunk: + +chunk +------------------------------- + +.. py:function:: paddle.chunk(x, chunks, axis=0, name=None) + +该OP将输入Tensor分割成多个子Tensor。 + +**参数**: + - **x** (Tensor) - 输入变量,数据类型为bool, float16, float32,float64,int32,int64的多维Tensor。 + - **chunks** (int) - ``chunks`` 是一个整数,表示将输入Tensor划分成多少个相同大小的子Tensor。 + - **axis** (int|Tensor,可选) - 整数或者形状为[1]的Tensor,数据类型为int32或int64。表示需要分割的维度。如果 ``axis < 0`` ,则划分的维度为 ``rank(x) + axis`` 。默认值为0。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:分割后的Tensor列表。 + + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + + paddle.disable_static() + # x is a Tensor which shape is [3, 9, 5] + x_np = np.random.random([3, 9, 5]).astype("int32") + x = paddle.to_tensor(x_np) + + out0, out1, out2 = paddle.chunk(x, chunks=3, axis=1) + # out0.shape [3, 3, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 3, 5] + + + # axis is negative, the real axis is (rank(x) + axis) which real + # value is 1. + out0, out1, out2 = paddle.chunk(x, chunks=3, axis=-2) + # out0.shape [3, 3, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 3, 5] diff --git a/doc/paddle/api/paddle/tensor/manipulation/concat_cn.rst b/doc/paddle/api/paddle/tensor/manipulation/concat_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..57d5811af6600c39e40c1872f53300ef2104fac2 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/manipulation/concat_cn.rst @@ -0,0 +1,49 @@ +.. _cn_api_tensor_concat: + +concat +------------------------------- + +.. py:function:: paddle.concat(x, axis=0, name=None) + + +该OP对输入沿 ``axis`` 轴进行联结,返回一个新的Tensor。 + +参数: + - **x** (list|tuple) - 待联结的Tensor list或者Tensor tuple ,支持的数据类型为:bool, float16, float32、float64、int32、int64, ``x`` 中所有Tensor的数据类型应该一致。 + - **axis** (int|Tensor,可选) - 指定对输入 ``x`` 进行运算的轴,可以是整数或者形状为[1]的Tensor,数据类型为int32或者int64。 ``axis`` 的有效范围是[-R, R),R是输入 ``x`` 中Tensor的维度, ``axis`` 为负值时与 :math:`axis + R` 等价。默认值为0。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:联结后的Tensor ,数据类型和 ``x`` 中的Tensor相同。 + + +**代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() # Now we are in imperative mode + in1 = np.array([[1, 2, 3], + [4, 5, 6]]) + in2 = np.array([[11, 12, 13], + [14, 15, 16]]) + in3 = np.array([[21, 22], + [23, 24]]) + x1 = paddle.to_tensor(in1) + x2 = paddle.to_tensor(in2) + x3 = paddle.to_tensor(in3) + zero = paddle.full(shape=[1], dtype='int32', fill_value=0) + # When the axis is negative, the real axis is (axis + Rank(x)) + # As follow, axis is -1, Rank(x) is 2, the real axis is 1 + out1 = paddle.concat(x=[x1, x2, x3], axis=-1) + out2 = paddle.concat(x=[x1, x2], axis=0) + out3 = paddle.concat(x=[x1, x2], axis=zero) + # out1 + # [[ 1 2 3 11 12 13 21 22] + # [ 4 5 6 14 15 16 23 24]] + # out2 out3 + # [[ 1 2 3] + # [ 4 5 6] + # [11 12 13] + # [14 15 16]] diff --git a/doc/paddle/api/paddle/tensor/manipulation/expand_as_cn.rst b/doc/paddle/api/paddle/tensor/manipulation/expand_as_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2f62f67233c8e52f87f77e703c3e4499cb8fc40c --- /dev/null +++ b/doc/paddle/api/paddle/tensor/manipulation/expand_as_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_tensor_expand_as: + +expand_as +------------------------------- + +.. py:function:: paddle.expand_as(x, y, name=None) + +根据 ``y`` 的形状扩展 ``x`` ,扩展后, ``x`` 的形状和 ``y`` 的形状相同。 + +``x`` 的维数和 ``y`` 的维数应小于等于6,并且 ``y`` 的维数应该大于等于 ``x`` 的维数。扩展的维度的维度值应该为1。 + +参数 +::::::::: + - x (Tensor) - 输入的Tensor,数据类型为:bool、float32、float64、int32或int64。 + - y (Tensor) - 给定输入 ``x`` 扩展后的形状。 + - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: +``Tensor`` ,数据类型与 ``x`` 相同。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np_data_x = np.array([1, 2, 3]).astype('int32') + np_data_y = np.array([[1, 2, 3], [4, 5, 6]]).astype('int32') + data_x = paddle.to_tensor(np_data_x) + data_y = paddle.to_tensor(np_data_y) + out = paddle.expand_as(data_x, data_y) + np_out = out.numpy() + # [[1, 2, 3], [1, 2, 3]] + diff --git a/doc/paddle/api/paddle/tensor/manipulation/expand_cn.rst b/doc/paddle/api/paddle/tensor/manipulation/expand_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..94af4e93fe13c2d5ff220c50b6a18f37e0491851 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/manipulation/expand_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_tensor_expand: + +expand +------------------------------- + +.. py:function:: paddle.expand(x, shape, name=None) + +根据 ``shape`` 指定的形状扩展 ``x`` ,扩展后, ``x`` 的形状和 ``shape`` 指定的形状一致。 + +``x`` 的维数和 ``shape`` 的元素数应小于等于6,并且 ``shape`` 中的元素数应该大于等于 ``x`` 的维数。扩展的维度的维度值应该为1。 + +参数 +::::::::: + - x (Tensor) - 输入的Tensor,数据类型为:bool、float32、float64、int32或int64。 + - shape (tuple|list|Tensor) - 给定输入 ``x`` 扩展后的形状,若 ``shape`` 为list或者tuple,则其中的元素值应该为整数或者1-D Tensor,若 ``shape`` 类型为Tensor,则其应该为1-D Tensor。 + - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: +``Tensor`` ,数据类型与 ``x`` 相同。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np_data = np.array([1, 2, 3]).astype('int32') + data = paddle.to_tensor(np_data) + out = paddle.expand(data, [2, 3]) + np_out = out.numpy() + # [[1, 2, 3], [1, 2, 3]] + diff --git a/doc/paddle/api/paddle/tensor/manipulation/flatten_cn.rst b/doc/paddle/api/paddle/tensor/manipulation/flatten_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ef54e3c6f84734f40570d9b8f3d1d60dd431cf82 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/manipulation/flatten_cn.rst @@ -0,0 +1,59 @@ +.. _cn_api_fluid_layers_flatten: + +flatten +------------------------------- + +.. py:function:: paddle.fluid.layers.flatten(x, axis=1, name=None) + + + + +flatten op将输入的多维Tensor展平成2-D Tensor矩阵 + +例如: + +.. code-block:: text + + Case 1: + + 给定 + X.shape = (3, 100, 100, 4) + 且 + axis = 2 + 得到: + Out.shape = (3 * 100, 4 * 100) + + Case 2: + + 给定 + X.shape = (3, 100, 100, 4) + 且 + axis = 0 + 得到: + Out.shape = (1, 3 * 100 * 100 * 4) + +参数: + - **x** (Variable) - 一个维度数>=axis 的多维Tensor, 数据类型可以为float32,float64,int8,int32或int64。 + - **axis** (int) - flatten展开的分割轴,[0, axis) 轴数据被flatten到输出矩阵的0轴,[axis, R)数据被flatten到输出矩阵的1轴,其中R是输入张量的总维度数。axis的值必须在[0,R]范围内。当 axis=0 时,若输入Tensor的维度为 :math:`[d_0, d_1,… d_n]` ,则输出张量的Tensor维度为 :math:`[1,d_0 * d_1 *… d_n]` ,默认值为1。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: 一个 2-D Tensor,它包含输入Tensor的数据,但维度发生变化。输入的[0, axis)维将沿axis展平到输出Tensor的0维度,剩余的输入维数展平到输出的1维度。数据类型与输入x相同。 + +返回类型: Variable + +抛出异常: + - ValueError: 如果 x 不是一个Variable + - ValueError: 如果axis的范围不在 [0, rank(x)] 范围内 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.data(name="x", shape=[4, 4, 3], append_batch_size=False, dtype="float32") + # x shape is [4, 4, 3] + out = fluid.layers.flatten(x=x, axis=2) + # out shape is [16, 3] + + + diff --git a/doc/paddle/api/paddle/tensor/manipulation/flip_cn.rst b/doc/paddle/api/paddle/tensor/manipulation/flip_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4af69dda8062f76a330cd8b269a1cb256d7ae2b2 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/manipulation/flip_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_tensor_flip: + +flip +------------------------------- + +.. py:function:: paddle.flip(x, axis, name=None): + + + + +该OP沿指定轴反转n维tensor. + +参数: + - **x** (Variable) - 输入张量。维度为多维,数据类型为bool, int32, int64, float32或float64。 + - **axis** (list) - 需要翻转的轴。当 ``axis[i] < 0`` 时,实际的计算维度为 ndim(x) + axis[i],其中i为axis的索引。 + - **name** (str|None) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。默认值为None。 + +返回:在指定axis上翻转后的张量,与输入x数据类型相同。 + +返回类型:Variable,与输入x数据类型相同。 + +抛出异常: + - ``TypeError`` - 当输出 ``out`` 和输入 ``x`` 数据类型不一致时候。 + - ``ValueError`` - 当参数 ``axis`` 不合法时。 + +**代码示例1**: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + + image_shape=(3, 2, 2) + x = np.arange(image_shape[0] * image_shape[1] * image_shape[2]).reshape(image_shape) + x = x.astype('float32') + img = paddle.imperative.to_variable(x) + out = paddle.flip(img, [0,1]) + print(out) # [[[10,11][8, 9]],[[6, 7],[4, 5]] [[2, 3],[0, 1]]] + diff --git a/doc/paddle/api/paddle/tensor/manipulation/gather_cn.rst b/doc/paddle/api/paddle/tensor/manipulation/gather_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7cda6107ba2e08103d49dc65a5059341a024a0fd --- /dev/null +++ b/doc/paddle/api/paddle/tensor/manipulation/gather_cn.rst @@ -0,0 +1,47 @@ +.. _cn_api_paddle_tensor_gather +gather +------------------------------- + +.. py:function:: paddle.gather(x, index, axis=None, name=None) + +根据索引 index 获取输入 ``x`` 的指定 ``aixs`` 维度的条目,并将它们拼接在一起。 + +.. code-block:: text + + X = [[1, 2], + [3, 4], + [5, 6]] + + Index = [1, 2] + + axis = 0 + + Then: + + Out = [[3, 4], + [5, 6]] + +**参数**: + - **x** (Tensor) - 输入 Tensor, 秩 ``rank >= 1`` , 支持的数据类型包括 int32、int64、float32、float64 和 uint8 (CPU)、float16(GPU) 。 + - **index** (Tensor) - 索引 Tensor,秩 ``rank = 1``, 数据类型为 int32 或 int64。 + - **axis** (Tensor) - 指定index 获取输入的维度, ``axis`` 的类型可以是int或者Tensor,当 ``axis`` 为Tensor的时候其数据类型为int32 或者int64。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +**返回**:和输入的秩相同的输出Tensor。 + + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + + paddle.disable_static() + input_1 = np.array([[1,2],[3,4],[5,6]]) + index_1 = np.array([0,1]) + input = paddle.to_tensor(input_1) + index = paddle.to_tensor(index_1) + output = paddle.gather(input, index, axis=0) + # expected output: [[1,2],[3,4]] + diff --git a/doc/paddle/api/paddle/tensor/manipulation/gather_nd_cn.rst b/doc/paddle/api/paddle/tensor/manipulation/gather_nd_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8aa603b1e614d02f64900271827991a5a3edd6cd --- /dev/null +++ b/doc/paddle/api/paddle/tensor/manipulation/gather_nd_cn.rst @@ -0,0 +1,76 @@ +.. _cn_api_tensor_cn_gather_nd: + +gather_nd +------------------------------- +.. py:function:: paddle.gather_nd(x, index, name=None) + + +该OP是 :code:`gather` 的高维推广,并且支持多轴同时索引。 :code:`index` 是一个K维度的张量,它可以认为是从 :code:`x` 中取K-1维张量,每一个元素是一个切片: + +.. math:: + output[(i_0, ..., i_{K-2})] = x[index[(i_0, ..., i_{K-2})]] + +显然, :code:`index.shape[-1] <= x.rank` 并且输出张量的维度是 :code:`index.shape[:-1] + x.shape[index.shape[-1]:]` 。 + +示例: + +:: + + 给定: + x = [[[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]], + [[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]]] + x.shape = (2, 3, 4) + + - 案例 1: + index = [[1]] + + gather_nd(x, index) + = [x[1, :, :]] + = [[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]] + + - 案例 2: + + index = [[0,2]] + gather_nd(x, index) + = [x[0, 2, :]] + = [8, 9, 10, 11] + + - 案例 3: + + index = [[1, 2, 3]] + gather_nd(x, index) + = [x[1, 2, 3]] + = [23] + + +参数: + - **x** (Tensor) - 输入Tensor,数据类型可以是int32,int64,float32,float64, bool。 + - **index** (Tensor) - 输入的索引Tensor,其数据类型int32或者int64。它的维度 :code:`index.rank` 必须大于1,并且 :code:`index.shape[-1] <= x.rank` 。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:shape为index.shape[:-1] + x.shape[index.shape[-1]:]的Tensor,数据类型与 :code:`x` 一致。 + + +**代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np_x = np.array([[[1, 2], [3, 4], [5, 6]], + [[7, 8], [9, 10], [11, 12]]]) + np_index = [[0, 1]] + x = paddle.to_tensor(np_x) + index = paddle.to_tensor(np_index) + + output = paddle.gather_nd(x, index) #[[3, 4]] + + diff --git a/doc/paddle/api/paddle/tensor/manipulation/reshape_cn.rst b/doc/paddle/api/paddle/tensor/manipulation/reshape_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e2a892314e8361cc76f568014bc32cb0fbb8124c --- /dev/null +++ b/doc/paddle/api/paddle/tensor/manipulation/reshape_cn.rst @@ -0,0 +1,78 @@ +.. _cn_api_fluid_layers_reshape: + +reshape +------------------------------- + +.. py:function:: paddle.fluid.layers.reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None) + + +该OP在保持输入 ``x`` 数据不变的情况下,改变 ``x`` 的形状。 + +目标形状可由 ``shape`` 或 ``actual_shape`` 给出。当两个属性同时被指定时,``actual_shape`` 的优先级高于 ``shape`` ,但此时 ``shape`` 只能是整数列表或元组,且在编译时仍然应该正确地设置 ``shape`` 以保证形状推断。 + +在指定目标shape时存在一些技巧: + +.. code-block:: text + + 1. -1 表示这个维度的值是从x的元素总数和剩余维度推断出来的。因此,有且只有一个维度可以被设置为-1。 + 2. 0 表示实际的维数是从x的对应维数中复制出来的,因此shape中0的索引值不能超过x的维度。 + + +这里有一些例子来解释它们: + +.. code-block:: text + + 1. 给定一个形状为[2,4,6]的三维张量x,目标形状为[6,8],则将x变换为形状为[6,8]的2-D张量,且x的数据保持不变。 + 2. 给定一个形状为[2,4,6]的三维张量x,目标形状为[2,3,-1,2],则将x变换为形状为[2,3,4,2]的4-D张量,且x的数据保持不变。在这种情况下,目标形状的一个维度被设置为-1,这个维度的值是从x的元素总数和剩余维度推断出来的。 + 3. 给定一个形状为[2,4,6]的三维张量x,目标形状为[-1,0,3,2],则将x变换为形状为[2,4,3,2]的4-D张量,且x的数据保持不变。在这种情况下,0对应位置的维度值将从x的对应维数中复制,-1对应位置的维度值由x的元素总数和剩余维度推断出来。 + +.. warning:: +参数 ``actual_shape`` 之后将被舍弃,只用参数 ``shape`` 来表示目标形状。 + +参数: + - **x** (Tensor)- N-D ``Tensor``,数据类型为 ``float32``,``float64``,``int32``,或 ``int64``。 + - **shape** (list|tuple|Tensor)- 数据类型是 ``int32`` 。定义目标形状。目标形状最多只能有一个维度为-1。如果 ``shape`` 的类型是 list 或 tuple, 它的元素可以是整数或者形状为[1]的 ``Tensor``。如果 ``shape`` 的类型是 ``Tensor``,则是1-D的 ``Tensor``。 + - **actual_shape** (Tensor,可选)- 1-D ``Tensor``,默认值:`None`。如果 ``actual_shape`` 被提供,``actual_shape`` 具有比 ``shape`` 更高的优先级,此时 ``shape`` 只能是整数列表或元组。更新提示:``actual_shape`` 在未来的版本中将被舍弃,并用 ``shape`` 代替。 + - **act** (str,可选)- 对形状改变后的输入变量做非线性激活操作,激活函数类型可以参考 :ref:`api_guide_activations` 。默认值: ``None``。 + - **inplace** (bool,可选)- 如果 ``inplace`` 为 ``True``,则 ``layers.reshape`` 的输入和输出是同一个变量,否则 ``layers.reshape`` 的输入和输出是不同的变量。默认值:``False``。请注意,如果 ``x`` 是多个OP的输入,则 ``inplace`` 必须为False。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值: ``None``。 + +返回: +::::::::: +``Tensor``,改变形状后的 ``Tensor``,数据类型与 ``x`` 相同。如果 ``inplace`` 为 ``False``,则返回一个新的变量,否则将改变输入变量 ``x`` 自身。如果 ``act`` 为 ``None``,则直接返回形状改变后的变量,否则返回经过激活函数后的变量。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + # example 1: + # attr shape is a list which doesn't contain Tensors. + data_1 = fluid.data( + name='data_1', shape=[2, 4, 6], dtype='float32') + reshaped_1 = fluid.layers.reshape( + x=data_1, shape=[-1, 0, 3, 2], inplace=True) + # the shape of reshaped_1 is [2,4,3,2]. + + # example 2: + # attr shape is a list which contains Tensors. + data_2 = fluid.layers.fill_constant([2,25], "int32", 3) + dim = fluid.layers.fill_constant([1], "int32", 5) + reshaped_2 = fluid.layers.reshape(data_2, shape=[dim, 10]) + # the shape of reshaped_2 is [5,10]. + + # example 3: + data_3 = fluid.data( + name="data_3", shape=[2,4,6], dtype='float32') + reshaped_3 = fluid.layers.reshape(x=data_3, shape=[6,8]) + # the shape of reshaped_3 is [6,8]. + + + + + + + + diff --git a/doc/paddle/api/paddle/tensor/manipulation/roll_cn.rst b/doc/paddle/api/paddle/tensor/manipulation/roll_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..db43450e3c8b3dc978e18bf853a3556d18566f71 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/manipulation/roll_cn.rst @@ -0,0 +1,45 @@ +.. _cn_api_tensor_manipulation_roll: + +roll +------------------------------- + +.. py:function:: paddle.roll(x, shifts, axis=None, name=None): + + + + +该OP沿着指定维度 ``axis`` 对输入 ``x`` 进行循环滚动,当元素移动到最后位置时,会从第一个位置重新插入。如果 ``axis`` 为 ``None`` ,则输入在被循环滚动之前,会先展平成 ``1-D Tensor`` ,滚动操作完成后恢复成原来的形状。 + +**参数**: + - **x** (Variable)– 输入张量。 + - **shifts** (int|list|tuple) - 滚动位移。如果 ``shifts`` 是一个元组或者列表,则 ``axis`` 必须是相同大小的元组或者列表,输入张量将依次沿着每个维度滚动相应的数值。 + - **axis** (int|list|tuple, optinal) – 滚动轴。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +**返回**: + - **Variable**,数据类型同输入。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + + data = np.array([[1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + [7.0, 8.0, 9.0]]) + paddle.enable_imperative() + x = paddle.imperative.to_variable(data) + out_z1 = paddle.roll(x, shifts=1) + print(out_z1.numpy()) + #[[9. 1. 2.] + # [3. 4. 5.] + # [6. 7. 8.]] + out_z2 = paddle.roll(x, shifts=1, axis=0) + print(out_z2.numpy()) + #[[7. 8. 9.] + # [1. 2. 3.] + # [4. 5. 6.]] + + diff --git a/doc/paddle/api/paddle/tensor/manipulation/scatter_cn.rst b/doc/paddle/api/paddle/tensor/manipulation/scatter_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..43a9aad6d7d9f4d06a6cd3f544ed13af6428c1bd --- /dev/null +++ b/doc/paddle/api/paddle/tensor/manipulation/scatter_cn.rst @@ -0,0 +1,78 @@ +.. _cn_api_paddle_cn_scatter: + +scatter +------------------------------- +.. py:function:: paddle.scatter(x, index, updates, overwrite=True, name=None) + + +通过基于 ``updates`` 来更新选定索引 ``index`` 上的输入来获得输出。具体行为如下: + + .. code-block:: python + + import numpy as np + #input: + x = np.array([[1, 1], [2, 2], [3, 3]]) + index = np.array([2, 1, 0, 1]) + # shape of updates should be the same as x + # shape of updates with dim > 1 should be the same as input + updates = np.array([[1, 1], [2, 2], [3, 3], [4, 4]]) + overwrite = False + # calculation: + if not overwrite: + for i in range(len(index)): + x[index[i]] = np.zeros((2)) + for i in range(len(index)): + if (overwrite): + x[index[i]] = updates[i] + else: + x[index[i]] += updates[i] + # output: + out = np.array([[3, 3], [6, 6], [1, 1]]) + out.shape # [3, 2] + +**Notice:** +因为 ``updates`` 的应用顺序是不确定的,因此,如果索引 ``index`` 包含重复项,则输出将具有不确定性。 + + +参数: + - **x** (Tensor) - ndim> = 1的输入N-D张量。 数据类型可以是float32,float64。 + - **index** (Tensor)- 一维Tensor。 数据类型可以是int32,int64。 ``index`` 的长度不能超过 ``updates`` 的长度,并且 ``index`` 中的值不能超过输入的长度。 + - **updates** (Tensor)- 根据 ``index`` 使用 ``update`` 参数更新输入 ``x`` 。 形状应与输入 ``x`` 相同,并且dim>1的dim值应与输入 ``x`` 相同。 + - **overwrite** (bool,可选)- 指定索引 ``index`` 相同时,更新输出的方式。如果为True,则使用覆盖模式更新相同索引的输出,如果为False,则使用累加模式更新相同索引的输出。默认值为True。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:Tensor,与x有相同形状和数据类型。 + + +**代码示例:** + .. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + x_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float32) + index_data = np.array([2, 1, 0, 1]).astype(np.int64) + updates_data = np.array([[1, 1], [2, 2], [3, 3], [4, 4]]).astype(np.float32) + + x = paddle.to_tensor(x_data) + index = paddle.to_tensor(index_data) + updates = paddle.to_tensor(updates_data) + + output1 = paddle.scatter(x, index, updates, overwrite=False) + # [[3., 3.], + # [6., 6.], + # [1., 1.]] + output2 = paddle.scatter(x, index, updates, overwrite=True) + # CPU device: + # [[3., 3.], + # [4., 4.], + # [1., 1.]] + # GPU device maybe have two results because of the repeated numbers in index + # result 1: + # [[3., 3.], + # [4., 4.], + # [1., 1.]] + # result 2: + # [[3., 3.], + # [2., 2.], + # [1., 1.]] diff --git a/doc/paddle/api/paddle/tensor/manipulation/split_cn.rst b/doc/paddle/api/paddle/tensor/manipulation/split_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d7421732749cd2cb0892d98371d2ca6d0da50540 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/manipulation/split_cn.rst @@ -0,0 +1,52 @@ +.. _cn_api_paddle_tensor_split +split +------------------------------- + +.. py:function:: paddle.tensor.split(x, num_or_sections, axis=0, name=None) + + + +该OP将输入Tensor分割成多个子Tensor。 + +**参数**: + - **x** (Tensor) - 输入变量,数据类型为bool, float16, float32,float64,int32,int64的多维Tensor。 + - **num_or_sections** (int|list|tuple) - 如果 ``num_or_sections`` 是一个整数,则表示Tensor平均划分为相同大小子Tensor的数量。如果 ``num_or_sections`` 是一个list或tuple,那么它的长度代表子Tensor的数量,它的元素可以是整数或者形状为[1]的Tensor,依次代表子Tensor需要分割成的维度的大小。list或tuple的长度不能超过输入Tensor待分割的维度的大小。在list或tuple中,至多有一个元素值为-1,表示该值是由 ``x`` 的维度和其他 ``num_or_sections`` 中元素推断出来的。例如对一个维度为[4,6,6]Tensor的第三维进行分割时,指定 ``num_or_sections=[2,-1,1]`` ,输出的三个Tensor维度分别为:[4,6,2],[4,6,3],[4,6,1]。 + - **axis** (int|Tensor,可选) - 整数或者形状为[1]的Tensor,数据类型为int32或int64。表示需要分割的维度。如果 ``axis < 0`` ,则划分的维度为 ``rank(x) + axis`` 。默认值为0。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:分割后的Tensor列表。 + + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + + paddle.disable_static() + # x is a Tensor which shape is [3, 9, 5] + x_np = np.random.random([3, 9, 5]).astype("int32") + x = paddle.to_tensor(x_np) + + out0, out1, out22 = paddle.split(x, num_or_sections=3, axis=1) + # out0.shape [3, 3, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 3, 5] + + out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, 4], axis=1) + # out0.shape [3, 2, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 4, 5] + + out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, -1], axis=1) + # out0.shape [3, 2, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 4, 5] + + # axis is negative, the real axis is (rank(x) + axis) which real + # value is 1. + out0, out1, out2 = paddle.split(x, num_or_sections=3, axis=-2) + # out0.shape [3, 3, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 3, 5] diff --git a/doc/paddle/api/paddle/tensor/manipulation/stack_cn.rst b/doc/paddle/api/paddle/tensor/manipulation/stack_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..33953b9821290741b713d1a2eedcee1432522075 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/manipulation/stack_cn.rst @@ -0,0 +1,80 @@ +.. _cn_api_paddle_tensor_arange +stack +------------------------------- + +.. py:function:: paddle.tensor.stack(x, axis=0, name=None) + + + +该OP沿 axis 轴对输入 x 进行堆叠操作。要求所有输入Tensor有相同的Shape和数据类型。 +例如,输入 x 为 N 个 Shape 为 [A, B]的 Tensor, 如果 ``axis==0`` , 则输出 Tensor 的 Shape 为 [N, A, B]; 如果 ``axis==1`` , 则输出 Tensor 的 Shape 为 [A, N, B]; 以此类推。 + +.. code-block:: text + + Case 1: + + Input: + x[0].shape = [1, 2] + x[0].data = [ [1.0 , 2.0 ] ] + x[1].shape = [1, 2] + x[1].data = [ [3.0 , 4.0 ] ] + x[2].shape = [1, 2] + x[2].data = [ [5.0 , 6.0 ] ] + + Attrs: + axis = 0 + + Output: + Out.dims = [3, 1, 2] + Out.data =[ [ [1.0, 2.0] ], + [ [3.0, 4.0] ], + [ [5.0, 6.0] ] ] + + + Case 2: + + Input: + x[0].shape = [1, 2] + x[0].data = [ [1.0 , 2.0 ] ] + x[1].shape = [1, 2] + x[1].data = [ [3.0 , 4.0 ] ] + x[2].shape = [1, 2] + x[2].data = [ [5.0 , 6.0 ] ] + + + Attrs: + axis = 1 or axis = -2 # If axis = -2, axis = axis+ndim(x[0])+1 = -2+2+1 = 1. + + Output: + Out.shape = [1, 3, 2] + Out.data =[ [ [1.0, 2.0] + [3.0, 4.0] + [5.0, 6.0] ] ] + +**参数**: + - **x** (list[Tensor]|tuple[Tensor]) – 输入 x 是多个Tensor,且这些Tensor的维度和数据类型必须相同。支持的数据类型: float32,float64,int32,int64。 + + - **axis** (int, 可选) – 指定对输入Tensor进行堆叠运算的轴,有效 axis 的范围是: [−(R+1),R+1],R是输入中第一个Tensor的维数。如果 axis < 0,则 axis=axis+R+1 。默认值为0。 + + - **name** (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +**返回**:堆叠运算后的Tensor,数据类型与输入Tensor相同。 + +**返回类型**:Variable + +**代码示例**: + +.. code-block:: python + + import paddle + paddle.disable_static() + x1 = paddle.to_tensor([[1.0, 2.0]]) + x2 = paddle.to_tensor([[3.0, 4.0]]) + x3 = paddle.to_tensor([[5.0, 6.0]]) + + out = paddle.stack([x1, x2, x3], axis=0) + print(out.shape) # [3, 1, 2] + print(out.numpy()) + # [[[1., 2.]], + # [[3., 4.]], + # [[5., 6.]]] diff --git a/doc/paddle/api/paddle/tensor/manipulation/tile_cn.rst b/doc/paddle/api/paddle/tensor/manipulation/tile_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3d8e989d4fbf50ce7e87148fd101adc417b567e5 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/manipulation/tile_cn.rst @@ -0,0 +1,45 @@ +.. _cn_api_tensor_tile: + +tile +------------------------------- + +.. py:function:: paddle.tile(x, repeat_times, name=None) + +根据参数 ``repeat_times`` 对输入 ``x`` 的各维度进行复制。 + +``x`` 的维数和 ``repeat_times`` 中的元素数量应小于等于6,并且repeat_times中的元素数量应该小于等于6。 + +参数 +::::::::: + - x (Tensor) - 输入的Tensor,数据类型为:bool、float32、float64、int32或int64。 + - repeat_times (list|tuple|Tensor) - 指定输入 ``x`` 每个维度的复制次数。如果 ``repeat_times`` 的类型是list或tuple,它的元素可以是整数或者数据类型为int32的1-D Tensor。如果 ``repeat_times`` 的类型是Tensor,则是数据类型为int32的1-D Tensor。 + - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: +``Tensor`` ,数据类型与 ``x`` 相同。返回值的第i维的大小等于 ``x[i] * repeat_times[i]`` 。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np_data = np.array([1, 2, 3]).astype('int32') + data = paddle.to_tensor(np_data) + out = paddle.tile(data, repeat_times=[2, 1]) + np_out = out.numpy() + # [[1, 2, 3], [1, 2, 3]] + + out = paddle.tile(data, repeat_times=[2, 2]) + np_out = out.numpy() + # [[1, 2, 3, 1, 2, 3], [1, 2, 3, 1, 2, 3]] + + np_repeat_times = np.array([2, 1]).astype("int32") + repeat_times = paddle.to_tensor(np_repeat_times) + out = paddle.tile(data, repeat_times=repeat_times) + np_out = out.numpy() + # [[1, 2, 3], [1, 2, 3]] diff --git a/doc/paddle/api/paddle/tensor/manipulation/unbind_cn.rst b/doc/paddle/api/paddle/tensor/manipulation/unbind_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f05aa5f0660d2247ff944884df7fe0d00d673f8f --- /dev/null +++ b/doc/paddle/api/paddle/tensor/manipulation/unbind_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_paddle_tensor_unbind +unbind +------------------------------- + +.. py:function:: paddle.tensor.unbind(input, axis=0) + + + + +该OP将输入Tensor按照指定的维度分割成多个子Tensor。 + +**参数**: + - **input** (Variable) - 输入变量,数据类型为float32,float64,int32,int64的多维Tensor。 + - **axis** (int32|int64,可选) - 数据类型为int32或int64,表示需要分割的维度。如果axis < 0,则划分的维度为rank(input) + axis。默认值为0。 + +**返回**:分割后的Tensor列表。 + +**返回类型**:列表(Variable),数据类型为int32,int64,float32,float64。 + +**代码示例**: + +.. code-block:: python + + import paddle + # input is a variable which shape is [3, 4, 5] + input = paddle.fluid.data( + name="input", shape=[3, 4, 5], dtype="float32") + [x0, x1, x2] = paddle.tensor.unbind(input, axis=0) + # x0.shape [4, 5] + # x1.shape [4, 5] + # x2.shape [4, 5] + [x0, x1, x2, x3] = paddle.tensor.unbind(input, axis=1) + # x0.shape [3, 5] + # x1.shape [3, 5] + # x2.shape [3, 5] + # x3.shape [3, 5] diff --git a/doc/paddle/api/paddle/tensor/manipulation/unique_cn.rst b/doc/paddle/api/paddle/tensor/manipulation/unique_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f824b73ee70c000258ed8bcd2b83e8e270ba406f --- /dev/null +++ b/doc/paddle/api/paddle/tensor/manipulation/unique_cn.rst @@ -0,0 +1,53 @@ +.. _cn_api_tensor_cn_unique: + +unique +------------------------------- + +.. py:function:: paddle.unique(x, return_index=False, return_inverse=False, return_counts=False, axis=None, dtype="int64", name=None) + +返回Tensor按升序排序后的独有元素。 + +参数: + - **x** (Tensor) - 输入的 `Tensor` ,数据类型为:float32、float64、int32、int64。 + - **return_index** (bool, 可选) - 如果为True,则还返回独有元素在输入Tensor中的索引。 + - **return_inverse** (bool, 可选) - 如果为True,则还返回输入Tensor的元素对应在独有元素中的索引,该索引可用于重构输入Tensor。 + - **return_counts** (bool, 可选) - 如果为True,则还返回每个独有元素在输入Tensor中的个数。 + - **axis** (int, 可选) - 指定选取独有元素的轴。默认值为None,将输入平铺为1-D的Tensor后再选取独有元素。 + - **dtype** (np.dtype|str, 可选) - 用于设置 `index`,`inverse` 或者 `counts` 的类型,应该为int32或者int64。默认:int64. + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: + - **out** (Tensor) - 独有元素构成的Tensor,数据类型与输入一致。 + - **index** (Tensor, 可选) - 独有元素在输入Tensor中的索引,仅在 `return_index` 为True时返回。 + - **inverse** (Tensor, 可选) - 输入Tensor的元素对应在独有元素中的索引,仅在 `return_inverse` 为True时返回。 + - **counts** (Tensor, 可选) - 每个独有元素在输入Tensor中的个数,仅在 `return_counts` 为True时返回。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + + paddle.disable_static() + x_data = np.array([2, 3, 3, 1, 5, 3]) + x = paddle.to_tensor(x_data) + unique = paddle.unique(x) + np_unique = unique.numpy() # [1 2 3 5] + _, indices, inverse, counts = paddle.unique(x, return_index=True, return_inverse=True, return_counts=True) + np_indices = indices.numpy() # [3 0 1 4] + np_inverse = inverse.numpy() # [1 2 2 0 3 2] + np_counts = counts.numpy() # [1 1 3 1] + + x_data = np.array([[2, 1, 3], [3, 0, 1], [2, 1, 3]]) + x = paddle.to_tensor(x_data) + unique = paddle.unique(x) + np_unique = unique.numpy() # [0 1 2 3] + + unique = paddle.unique(x, axis=0) + np_unique = unique.numpy() + # [[2 1 3] + # [3 0 1]] + + + diff --git a/doc/paddle/api/paddle/tensor/manipulation/unsqueeze_cn.rst b/doc/paddle/api/paddle/tensor/manipulation/unsqueeze_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e8dbe24681b39af44735fa7a651bc85f09c8f9dc --- /dev/null +++ b/doc/paddle/api/paddle/tensor/manipulation/unsqueeze_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_paddle_tensor_unsqueeze +unsqueeze +------------------------------- + +.. py:function:: paddle.tensor.unsqueeze(x, axis, name=None) + +该OP向输入Tensor的Shape中一个或多个位置(axis)插入尺寸为1的维度。 + +**参数**: + - **x** (Variable)- 输入的 `Tensor` ,数据类型为:float32、float64、bool、int8、int32、int64。 + - **axis** (int|list|tuple|Tensor) - 表示要插入维度的位置。数据类型是 int32 。如果 axis 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 Tensor 。如果 axes 的类型是 Tensor,则是1-D Tensor。如果 axis 是负数,则 axis=axis+ndim(x)+1 。 + - **name** (str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +**返回**:扩展维度后的多维Tensor,数据类型与输入Tensor一致。 + +**返回类型**:Tensor + +**代码示例**: + +.. code-block:: python + + import paddle + + paddle.disable_static() + x = paddle.rand([5, 10]) + print(x.shape) # [5, 10] + + out1 = paddle.unsqueeze(x, axis=0) + print(out1.shape) # [1, 5, 10] + + out2 = paddle.unsqueeze(x, axis=[0, 2]) + print(out2.shape) # [1, 5, 1, 10] + + axis = paddle.fluid.dygraph.to_variable([0, 1, 2]) + out3 = paddle.unsqueeze(x, axis=axis) + print(out3.shape) # [1, 1, 1, 5, 10] diff --git a/doc/paddle/api/paddle/tensor/math/add_cn.rst b/doc/paddle/api/paddle/tensor/math/add_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..32281de5a9f86334f50e256fbdb4412e67af12af --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/add_cn.rst @@ -0,0 +1,46 @@ +.. _cn_api_tensor_add: + +add +------------------------------- + +.. py:function:: paddle.add(x, y, name=None) + + + +该OP是逐元素相加算子,输入 ``x`` 与输入 ``y`` 逐元素相加,并将各个位置的输出元素保存到返回结果中。 + +输入 ``x`` 与输入 ``y`` 必须可以广播为相同形状, 关于广播规则,请参考 :ref:`use_guide_broadcasting` + +等式为: + +.. math:: + Out = X + Y + +- :math:`X` :多维Tensor。 +- :math:`Y` :多维Tensor。 + +参数: + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64、int32、int64。 + - y (Tensor) - 输入的Tensor,数据类型为:float32、float64、int32、int64。 + - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回: 多维Tensor, 数据类型与 ``x`` 相同, 维度为广播后的形状。 + +返回类型: Tensor + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + np_x = np.array([2, 3, 4]).astype('float64') + np_y = np.array([1, 5, 2]).astype('float64') + x = paddle.imperative.to_variable(np_x) + y = paddle.imperative.to_variable(np_y) + z = paddle.add(x, y) + np_z = z.numpy() + print(np_z) # [3., 8., 6. ] diff --git a/doc/paddle/api/paddle/tensor/math/addcmul_cn.rst b/doc/paddle/api/paddle/tensor/math/addcmul_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..97cb77c233b7d1aee76aa083b691d3080898a9f5 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/addcmul_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_tensor_addcmul: + +addcmul +------------------------------- + +.. py:function:: paddle.addcmul(input, tensor1, tensor2, value=1.0, out=None, name=None) + + + + +计算tensor1和tensor2的逐元素乘积,然后将结果乘以标量value,再加到input上输出。其中input, tensor1, tensor2的维度必须是可广播的。 + +计算过程的公式为: +.. math:: + out = input + value * tensor1 * tensor2 + +参数: + - **input** (Variable) : 输入Tensor input,数据类型支持float32, float64, int32, int64。 + - **itensor1** (Variable) : 输入Tensor tensor1,数据类型支持float32, float64, int32, int64。 + - **itensor2** (Variable) : 输入Tensor tensor2,数据类型支持float32, float64, int32, int64。 + - **value** (int|float) : 乘以tensor1*tensor2的标量。如果输入input类型为float32或float64,value类型必须为float,如果输入input类型为int32或int64,value类型必须为int。 + - **out** (Variable, 可选) – 指定存储运算结果的Tensor。如果设置为None或者不设置,将创建新的Tensor存储运算结果,默认值为None。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:计算得到的Tensor。Tensor数据类型与输入input数据类型一致。 + +返回类型:变量(Variable) + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + + input = fluid.data(name='input', dtype='float32', shape=[3, 4]) + tensor1 = fluid.data(name='tenosr1', dtype='float32', shape=[1, 4]) + tensor2 = fluid.data(name='tensor2', dtype='float32', shape=[3, 4]) + data = paddle.addcmul(input, tensor1, tensor2, value=1.0) + diff --git a/doc/paddle/api/paddle/tensor/math/addmm_cn.rst b/doc/paddle/api/paddle/tensor/math/addmm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3bd44d5258fd3d49738f804eb500bde5fc12f875 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/addmm_cn.rst @@ -0,0 +1,55 @@ +.. _cn_api_tensor_addmm: + + +addmm +------------------------------- + +.. py:function:: paddle.addmm(input, x, y, alpha=1.0, beta=1.0, name=None) + + + + +计算x和y的乘积,将结果乘以标量alpha,再加上input与beta的乘积,得到输出。其中input与x、y乘积的维度必须是可广播的。 + +计算过程的公式为: + +.. math:: + out = alpha * x * y + beta * input + +参数: + - **input** (Variable):输入Tensor input,数据类型支持float32, float64。 + - **x** (Variable):输入Tensor x,数据类型支持float32, float64。 + - **y** (Variable):输入Tensor y,数据类型支持float32, float64。 + - **alpha** (float,可选):乘以x*y的标量,数据类型支持float32, float64,默认值为1.0。 + - **beta** (float,可选):乘以input的标量,数据类型支持float32, float64,默认值为1.0。 + - **name** (str,可选):具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:计算得到的Tensor。Tensor数据类型与输入input数据类型一致。 + +返回类型:变量(Variable) + + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.fluid as fluid + + input = fluid.data(name='input', shape=[2, 2], dtype='float32') + x = fluid.data(name='x', shape=[2, 2], dtype='float32') + y = fluid.data(name='y', shape=[2, 2], dtype='float32') + out = paddle.addmm( input=input, x=x, y=y, alpha=5.0, beta=0.5 ) + + data_x = np.ones((2, 2)).astype(np.float32) + data_y = np.ones((2, 2)).astype(np.float32) + data_input = np.ones((2, 2)).astype(np.float32) + + place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + exe = fluid.Executor(place) + results = exe.run(fluid.default_main_program(), + fetch_list=[out], feed={"input": data_input, 'x': data_x, "y": data_y}) + print(np.array(results[0])) + # [[10.5 10.5] + # [10.5 10.5]] diff --git a/doc/paddle/api/paddle/tensor/math/clip_cn.rst b/doc/paddle/api/paddle/tensor/math/clip_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9617418636a1c4271922d42909342b78116df3b0 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/clip_cn.rst @@ -0,0 +1,44 @@ +.. _cn_api_tensor_clip: + +clip +------------------------------- + +.. py:function:: paddle.clip(x, min=None, max=None, name=None) + + + + +该OP将输入的所有元素进行剪裁,使得输出元素限制在[min, max]内,具体公式如下: + +.. math:: + + Out = MIN(MAX(x, min), max) + +参数: + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。 + - min (float32|Tensor, 可选) - 裁剪的最小值,输入中小于该值的元素将由该元素代替,若参数为空,则不对输入的最小值做限制。数据类型可以是float32或形状为[1]的Tensor,类型可以为int32,float32,float64,默认值为None。 + - max (float32|Tensor, 可选) - 裁剪的最大值,输入中大于该值的元素将由该元素代替,若参数为空,则不对输入的最大值做限制。数据类型可以是float32或形状为[1]的Tensor,类型可以为int32,float32,float64,默认值为None。 + - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回:输出Tensor,与 ``x`` 维度相同、数据类型相同。 + +返回类型:Tensor + +**代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + x = np.array([[1.2,3.5], [4.5,6.4]]).astype('float32') + x1 = paddle.to_tensor(x) + out1 = paddle.clip(x1, min=3.5, max=5.0) + out2 = paddle.clip(x1, min=2.5) + print(out1.numpy()) + # [[3.5, 3.5] + # [4.5, 5.0]] + print(out2.numpy()) + # [[2.5, 3.5] + # [[4.5, 6.4] diff --git a/doc/paddle/api/paddle/tensor/math/cumsum_cn.rst b/doc/paddle/api/paddle/tensor/math/cumsum_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..71896b1bc17808d6b70873ef45a5587d1ad6cce1 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/cumsum_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_tensor_cn_cumsum: + +cumsum +------------------------------- + +.. py:function:: paddle.cumsum(x, axis=None, dtype=None, name=None) + + + +沿给定 ``axis`` 计算张量 ``x`` 的累加和。结果的第一个元素和输入的第一个元素相同。 + +参数: + - **x** (Tensor) - 累加的输入,需要进行累加操作的Tensor. + - **axis** (int,可选) - 指明需要累加的维度。-1代表最后一维。默认:None,将输入展开为一维变量再进行累加计算。 + - **dtype** (str,可选) - 输出Tensor的数据类型,支持int32、int64、float32、float64. 如果指定了,那么在执行操作之前,输入张量将被转换为dtype. 这对于防止数据类型溢出非常有用。默认为:None. + - **name** (str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回:累加的结果,即累加器的输出。 + +返回类型:Tensor + +**代码示例**: + +.. code-block:: python + + import paddle + from paddle.imperative import to_variable + import numpy as np + + paddle.enable_imperative() + data_np = np.arange(12).reshape(3, 4) + data = to_variable(data_np) + + y = paddle.cumsum(data) + print(y.numpy()) + # [ 0 1 3 6 10 15 21 28 36 45 55 66] + + y = paddle.cumsum(data, axis=0) + print(y.numpy()) + # [[ 0 1 2 3] + # [ 4 6 8 10] + # [12 15 18 21]] + + y = paddle.cumsum(data, axis=-1) + print(y.numpy()) + # [[ 0 1 3 6] + # [ 4 9 15 22] + # [ 8 17 27 38]] + + y = paddle.cumsum(data, dtype='float64') + print(y.dtype) + # VarType.FP64 + + diff --git a/doc/paddle/api/paddle/tensor/math/divide_cn.rst b/doc/paddle/api/paddle/tensor/math/divide_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d0b0113c24644f5e5e55b4a3a73722da63610df4 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/divide_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_tensor_divide: + +divide +------------------------------- + +.. py:function:: paddle.divide(x, y, name=None) + +该OP是逐元素相除算子,输入 ``x`` 与输入 ``y`` 逐元素相除,并将各个位置的输出元素保存到返回结果中。 +输入 ``x`` 与输入 ``y`` 必须可以广播为相同形状, 关于广播规则,请参考 :ref:`use_guide_broadcasting` + +等式为: + +.. math:: + Out = X / Y + +- :math:`X` :多维Tensor。 +- :math:`Y` :多维Tensor。 + +参数: + - x(Tensor)- 多维Tensor。数据类型为float32 、float64、int32或int64。 + - y(Tensor)- 多维Tensor。数据类型为float32 、float64、int32或int64。 + - name(str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + + +返回: 多维 Tensor, 数据类型与 ``x`` 相同,维度为广播后的形状。 + +返回类型: Tensor + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np_x = np.array([2, 3, 4]).astype('float64') + np_y = np.array([1, 5, 2]).astype('float64') + x = paddle.to_tensor(np_x) + y = paddle.to_tensor(np_y) + z = paddle.divide(x, y) + print(z.numpy()) # [2., 0.6, 2.] diff --git a/doc/paddle/api/paddle/tensor/math/elementwise_sum_cn.rst b/doc/paddle/api/paddle/tensor/math/elementwise_sum_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a4ee39d936b8a974adec210cc2875c9ee52902ef --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/elementwise_sum_cn.rst @@ -0,0 +1,78 @@ +.. _cn_api_tensor_elementwise_sum: + +elementwise_sum +------------------------------- + +.. py:function:: paddle.elementwise_sum(inputs, name=None) + + + + +该OP用于对输入的一至多个Tensor或LoDTensor求和。如果输入的是LoDTensor,输出仅与第一个输入共享LoD信息(序列信息)。 + +例1: +:: + 输入: + input.shape = [2, 3] + input = [[1, 2, 3], + [4, 5, 6]] + + 输出: + output.shape = [2, 3] + output = [[1, 2, 3], + [4, 5, 6]] + +例2: +:: + 输入: + 第一个输入: + input1.shape = [2, 3] + input1 = [[1, 2, 3], + [4, 5, 6]] + + 第二个输入: + input2.shape = [2, 3] + input2 = [[7, 8, 9], + [10, 11, 12]] + + 输出: + output.shape = [2, 3] + output = [[8, 10, 12], + [14, 16, 18]] + +参数: + - **inputs** (Variable|list(Variable)) - 输入的一至多个Variable。如果输入了多个Variable,则不同Variable间的shape和数据类型应保持一致。Variable为多维Tensor或LoDTensor,数据类型支持:float32,float64,int32,int64。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:对输入 ``inputs`` 中的Variable求和后的结果,shape和数据类型与 ``inputs`` 一致。 + +返回类型:Variable + + +**代码示例:** + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + input0 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=5) + input1 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=3) + sum = paddle.elementwise_sum([input0, input1]) + + #用户可以通过executor打印出求和的结果 + out = fluid.layers.Print(sum, message="the sum of input0 and input1: ") + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_main_program()) + + #打印出的数据为: + 1570701754 the sum of input0 and input1: The place is:CPUPlace + Tensor[elementwise_sum_0.tmp_0] + shape: [2,3,] + dtype: l + data: 8,8,8,8,8,8, + + #输出了shape为[2,3]的Tensor,与输入的shape一致 + #dtype为对应C++数据类型,在不同环境下可能显示值不同,但本质相同 + #例如:如果Tensor中数据类型是int64,则对应的C++数据类型为int64_t,所以dtype值为typeid(int64_t).name(), + # 其在MacOS下为'x',linux下为'l',Windows下为'__int64',都表示64位整型变量 + diff --git a/doc/paddle/api/paddle/tensor/math/floor_divide_cn.rst b/doc/paddle/api/paddle/tensor/math/floor_divide_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c63ae4fd10451f280169d67c8156fa30f92649d7 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/floor_divide_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_tensor_floor_divide: + +floor_divide +------------------------------- + +.. py:function:: paddle.floor_divide(x, y, name=None) + +该OP是逐元素整除算子,输入 ``x`` 与输入 ``y`` 逐元素整除,并将各个位置的输出元素保存到返回结果中。 +输入 ``x`` 与输入 ``y`` 必须可以广播为相同形状, 关于广播规则,请参考 :ref:`use_guide_broadcasting` + +等式为: + +.. math:: + Out = X // Y + +- :math:`X` :多维Tensor。 +- :math:`Y` :多维Tensor。 + +参数: + - x(Tensor)- 多维Tensor。数据类型为int32或int64。 + - y(Tensor)- 多维Tensor。数据类型为int32或int64。 + - name(str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + + +返回: 多维 Tensor, 数据类型与 ``x`` 相同,维度为广播后的形状。 + +返回类型: Tensor + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + np_x = np.array([2, 3, 8, 7]) + np_y = np.array([1, 5, 3, 3]) + x = paddle.to_tensor(np_x) + y = paddle.to_tensor(np_y) + z = paddle.floor_divide(x, y) + print(z.numpy()) # [2, 0, 2, 2] \ No newline at end of file diff --git a/doc/paddle/api/paddle/tensor/math/floor_mod_cn.rst b/doc/paddle/api/paddle/tensor/math/floor_mod_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e9b9773c09dfef8d15f85f62edd4ebcb09c86b4b --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/floor_mod_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_floor_mod: + +floor_mod +------------------------------- +:doc_source: paddle.tensor.remainder + + diff --git a/doc/paddle/api/paddle/tensor/math/inverse_cn.rst b/doc/paddle/api/paddle/tensor/math/inverse_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f70355dfd559c656be7b4233e35b5d5f58ab5a7c --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/inverse_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_tensor_inverse: + +inverse +------------------------------- + +.. py:function:: paddle.inverse(x, name=None) + + + + +计算方阵的逆。方阵是行数和列数相等的矩阵。输入可以是一个方阵(2-D张量),或者是批次方阵(维数大于2时)。 + +**参数**: + - **x** (Variable) – 输入张量,最后两维的大小必须相等。如果输入张量的维数大于2,则被视为2-D矩阵的批次(batch)。支持的数据类型:float32,float64。 + - **name** (str,可选) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 + +**返回**: 数据类型同输入。 + +返回类型: Variable + +抛出异常: + - :code:`TypeError` ,x不是Variable类型,或者数据类型不是float32、float64时 + - :code:`ValueError` ,x的维数小于2时 + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + + mat_np = np.array([[2, 0], [0, 2]]).astype("float32") + paddle.enable_imperative() + mat = paddle.imperative.to_variable(mat_np) + inv = paddle.inverse(mat) + print(inv) # [[0.5, 0], [0, 0.5]] diff --git a/doc/paddle/api/paddle/tensor/math/isfinite_cn.rst b/doc/paddle/api/paddle/tensor/math/isfinite_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0cc20e3e2335cc100d6275c6936d7bc347b17454 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/isfinite_cn.rst @@ -0,0 +1,30 @@ +.. _cn_api_tensor_isfinite: + +isfinite +----------------------------- + +.. py:function:: paddle.tensor.isfinite(x, name=None) + +返回输入tensor的每一个值是否为 `Finite` (既非 `+/-INF` 也非 `+/-NaN` )。 + +参数 +::::::::: + - **x** (Tensor): 输入的 `Tensor` ,数据类型为:float16、float32、float64、int32、int64。 + - **name** (str, 可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: +``Tensor``, 每个元素是一个bool值,表示输入 `x` 的每个元素是否为 `Finite` (既非 `+/-INF` 也非 `+/-NaN` )。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + x_np = np.array([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')]) + x = paddle.to_tensor(x_np) + out = paddle.tensor.isfinite(x) + print(out.numpy()) # [False True True False True False False] diff --git a/doc/paddle/api/paddle/tensor/math/isinf_cn.rst b/doc/paddle/api/paddle/tensor/math/isinf_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1fdd20485f6c504d5dfcbd18198a5475eac79872 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/isinf_cn.rst @@ -0,0 +1,30 @@ +.. _cn_api_tensor_isinf: + +isinf +----------------------------- + +.. py:function:: paddle.tensor.isinf(x, name=None) + +返回输入tensor的每一个值是否为 `+/-INF` 。 + +参数 +::::::::: + - **x** (Tensor): 输入的 `Tensor` ,数据类型为:float16、float32、float64、int32、int64。 + - **name** (str, 可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: +``Tensor``, 每个元素是一个bool值,表示输入 `x` 的每个元素是否为 `+/-INF` 。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + x_np = np.array([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')]) + x = paddle.to_tensor(x_np) + out = paddle.tensor.isinf(x) + print(out.numpy()) # [ True False False True False False False] diff --git a/doc/paddle/api/paddle/tensor/math/isnan_cn.rst b/doc/paddle/api/paddle/tensor/math/isnan_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5765ebf184254b87a72c9b5eb8a142d6cef879b1 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/isnan_cn.rst @@ -0,0 +1,30 @@ +.. _cn_api_tensor_isnan: + +isnan +----------------------------- + +.. py:function:: paddle.tensor.isnan(x, name=None) + +返回输入tensor的每一个值是否为 `+/-NaN` 。 + +参数 +::::::::: + - **x** (Tensor): 输入的 `Tensor` ,数据类型为:float16、float32、float64、int32、int64。 + - **name** (str, 可选): 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: +``Tensor``, 每个元素是一个bool值,表示输入 `x` 的每个元素是否为 `+/-NaN` 。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + x_np = np.array([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')]) + x = paddle.to_tensor(x_np) + out = paddle.tensor.isnan(x) + print(out.numpy()) # [False False False False False True True] diff --git a/doc/paddle/api/paddle/tensor/math/kron_cn.rst b/doc/paddle/api/paddle/tensor/math/kron_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..67141c0ce6267c1287d141680e1bf50903aca279 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/kron_cn.rst @@ -0,0 +1,76 @@ +.. _cn_api_paddle_tensor_kron: + +kron +------------------------------- + +.. py:function:: paddle.tensor.kron(x, y, out=None, name=None) + + + + + +Kronecker Product 算子。 + +该 OP 计算两个张量的克罗内克积,结果是一个合成的张量,由第二个张量经过第一个张量中的元素缩放 +后的组块构成。 + + +这个 OP 预设两个张量 $X$ 和 $Y$ 的秩 (rank) 相同,如有必要,将会在秩较小的张量的形状前面补 +上 1。令 $X$ 的形状是 [$r_0$, $r_1$, ..., $r_N$],$Y$ 的形状是 +[$s_0$, $s_1$, ..., $s_N$],那么输出张量的形状是 +[$r_{0}s_{0}$, $r_{1}s_{1}$, ..., $r_{N}s_{N}$]. 其中的元素是 $X$ 和 $Y$ 中的元素 +的乘积。 + +公式为 + +.. math:: + + output[k_{0}, k_{1}, ..., k_{N}] = X[i_{0}, i_{1}, ..., i_{N}] * + Y[j_{0}, j_{1}, ..., j_{N}] + + +其中 + +.. math:: + + k_{t} = i_{t} * s_{t} + j_{t}, t = 0, 1, ..., N + + +参数: + - **x** (Variable) – Kron OP 的第一个输入。多维 Tensor,数据类型为 float16, float32, float64, int32 或 int64。 + - **y** (Variable) – Kron OP 的第二个输入。多维 Tensor,数据类型为 float16, float32, float64, int32 或 int64,与 x 相同。 + - **out** (Variable, 可选) - 指定算子输出结果的 Tensor,可以是程序中已经创建的任何 Variable。默认值为 None,此时将创建新的 Variable 来保存输出结果。 + - **name** (str,可选) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为 None。 + +返回: + - Kron OP 的输出。多维 Tensor,数据类型为 float16, float32, float64, int32 或 int64,与 x 一致。 + +返回类型: Variable + + +**代码示例** + +.. code-block:: python + + import paddle + from paddle import fluid + import paddle.fluid.dygraph as dg + import numpy as np + + a = np.arange(1, 5).reshape(2, 2).astype(np.float32) + b = np.arange(1, 10).reshape(3, 3).astype(np.float32) + + place = fluid.CPUPlace() + with dg.guard(place): + a_var = dg.to_variable(a) + b_var = dg.to_variable(b) + c_var = paddle.kron(a_var, b_var) + c_np = c_var.numpy() + print(c_np) + + #[[ 1. 2. 3. 2. 4. 6.] + # [ 4. 5. 6. 8. 10. 12.] + # [ 7. 8. 9. 14. 16. 18.] + # [ 3. 6. 9. 4. 8. 12.] + # [12. 15. 18. 16. 20. 24.] + # [21. 24. 27. 28. 32. 36.]] diff --git a/doc/paddle/api/paddle/tensor/math/log1p_cn.rst b/doc/paddle/api/paddle/tensor/math/log1p_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..211edb789fb300ee09763f10c09d7f9b9d73b63e --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/log1p_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_paddle_tensor_log1p: + +log1p +------------------------------- + +.. py:function:: paddle.log1p(x, name=None) + + + + + +该OP计算Log1p(加一的自然对数)结果。 + +.. math:: + \\Out=ln(x+1)\\ + + +参数: + - **x** (Tensor) – 指定输入为一个多维的Tensor。数据类型为float32,float64。 + - **name** (str,可选) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 + +返回:Log1p算子自然对数输出 + +返回类型: Tensor - 该OP的输出为一个多维的Tensor,数据类型为输入一致。 + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + x = np.array([[1, 2], [3, 4]]).astype('float32') + x1 = paddle.imperative.to_variable(x) + + out1 = paddle.log1p(x1) + print(out1.numpy()) + # [[0.6931472 1.0986123] + # [1.3862944 1.609438 ]] + diff --git a/doc/paddle/api/paddle/tensor/math/logsumexp_cn.rst b/doc/paddle/api/paddle/tensor/math/logsumexp_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c95d8d611840f53c99f2205dad88139a0633694f --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/logsumexp_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_paddle_tensor_math_logsumexp: + +logsumexp +------------------------------- + +.. py:function:: paddle.tensor.math.logsumexp(x, axis=None, keepdim=False, name=None) + +该OP沿着 ``axis`` 计算 ``x`` 的以e为底的指数的和的自然对数。计算公式如下: + +.. math:: + logsumexp(x) = \log\sum exp(x) + +参数 +:::::::::: + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64 。 + - axis (int|list|tuple, 可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是int、list(int)、tuple(int)。如果 ``axis`` 包含多个维度,则沿着 ``axis`` 中的所有轴进行计算。``axis`` 或者其中的元素值应该在范围[-D, D)内,D是 ``x`` 的维度。如果 ``axis`` 或者其中的元素值小于0,则等价于 :math:`axis + D` 。如果 ``axis`` 是None,则对 ``x`` 的全部元素计算平均值。默认值为None。 + - keepdim (bool, 可选) - 是否在输出Tensor中保留减小的维度。如果 ``keepdim`` 为True,则输出Tensor和 ``x`` 具有相同的维度(减少的维度除外,减少的维度的大小为1)。否则,输出Tensor的形状会在 ``axis`` 上进行squeeze操作。默认值为False。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,沿着 ``axis`` 进行logsumexp计算的结果,数据类型和 ``x`` 相同。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = np.array([[-1.5, 0., 2.], [3., 1.2, -2.4]]) + x = paddle.to_tensor(x) + out1 = paddle.logsumexp(x) # [3.4691226] + out2 = paddle.logsumexp(x, 1) # [2.15317821, 3.15684602] diff --git a/doc/paddle/api/paddle/tensor/math/max_cn.rst b/doc/paddle/api/paddle/tensor/math/max_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..62a805fd95b96081c75982bfb3cc8df44881729a --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/max_cn.rst @@ -0,0 +1,56 @@ +.. _cn_api_paddle_tensor_max: + +max +------------------------------- + +.. py:function:: paddle.tensor.max(x, axis=None, keepdim=False, name=None) + + +该OP是对指定维度上的Tensor元素求最大值运算,并输出相应的计算结果。 + +参数 +::::::::: + - **x** (Tensor)- Tensor,支持数据类型为float32,float64,int32,int64。 + - **axis** (list | int ,可选)- 求最大值运算的维度。如果为None,则计算所有元素的最大值并返回包含单个元素的Tensor变量,否则必须在 :math:`[-x.ndim, x.ndim]` 范围内。如果 :math:`axis[i] <0` ,则维度将变为 :math:`x.ndim+axis[i]` ,默认值为None。 + - **keepdim** (bool)- 是否在输出Tensor中保留减小的维度。如果keepdim 为 False,结果张量的维度将比输入张量的小,默认值为False。 + - **name** (str, 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回 +::::::::: + Tensor, 在指定axis上进行求最大值运算的Tensor,数据类型和输入数据类型一致。 + + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + paddle.disable_static() + + # the axis is a int element + x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9], + [0.1, 0.2, 0.6, 0.7]])) + result1 = paddle.max(x) + print(result1.numpy()) + #[0.9] + result2 = paddle.max(x, axis=0) + print(result2.numpy()) + #[0.2 0.3 0.6 0.9] + result3 = paddle.max(x, axis=-1) + print(result3.numpy()) + #[0.9 0.7] + result4 = paddle.max(x, axis=1, keepdim=True) + print(result4.numpy()) + #[[0.9] + # [0.7]] + + # the axis is list + y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]], + [[5.0, 6.0], [7.0, 8.0]]]) + result5 = paddle.max(y, axis=[1, 2]) + print(result5.numpy()) + #[4. 8.] + result6 = paddle.max(y, axis=[0, 1]) + print(result6.numpy()) + #[7. 8.] diff --git a/doc/paddle/api/paddle/tensor/math/maximum_cn.rst b/doc/paddle/api/paddle/tensor/math/maximum_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..14d43ff8600ce6cc5c91158f278afbe1a203fb68 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/maximum_cn.rst @@ -0,0 +1,85 @@ +.. _cn_api_paddle_tensor_maximum: + +maximum +------------------------------- + +.. py:function:: paddle.tensor.maximum(x, y, axis=-1, name=None) + + +该OP逐元素对比输入的两个多维Tensor,并且把各个位置更大的元素保存到返回结果中。 + +等式是: + +.. math:: + Out = max(X, Y) + +- :math:`X` :多维Tensor。 +- :math:`Y` :多维Tensor。 + +此运算算子有两种情况: + 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。 + 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。 + +对于情况2: + 1. 用 :math:`Y` 的 ``shape`` 匹配 :math:`X` 的 ``shape``,其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。 + 2. 如果 ``axis`` < 0(默认值为-1),则 :math:`axis = abs(X.ndim - Y.ndim) - axis - 1` 。 + 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。 + +例如: + +.. code-block:: text + + shape(X) = (2, 3, 4, 5), shape(Y) = (,) + shape(X) = (2, 3, 4, 5), shape(Y) = (5,) + shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 + shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 + shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 + shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 + +具体的飞桨的广播(broadcasting)机制可以参考 `<> `_ 。 + +参数 +::::::::: + - **x** (Tensor)- 多维Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **y** (Tensor)- 多维Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **axis** (int32, 可选)- Y的维度对应到X维度上时的索引。默认值为 -1。 + - **name** (string, 可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: + Tensor,维度和数据类型与 ``x`` 相同的多维Tensor。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + + x = paddle.to_tensor([[1, 2], [3, 4]]) + y = paddle.to_tensor([[5, 6], [7, 8]]) + res = paddle.maximum(x, y) + print(res.numpy()) + #[[5. 6.] + # [7. 8.]] + + x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]]) + y = paddle.to_tensor([1, 2]) + res = paddle.maximum(x, y, axis=1) + print(res.numpy()) + #[[[1. 2. 3.] + # [2. 2. 3.]]] + + x = paddle.to_tensor([2, 3, 5], dtype='float32') + y = paddle.to_tensor([1, 4, np.nan], dtype='float32') + res = paddle.maximum(x, y) + print(res.numpy()) + #[ 2. 4. nan] + + x = paddle.to_tensor([5, 3, np.inf], dtype='float32') + y = paddle.to_tensor([1, 4, 5], dtype='float32') + res = paddle.maximum(x, y) + print(res.numpy()) + #[ 5. 4. inf] diff --git a/doc/paddle/api/paddle/tensor/math/min_cn.rst b/doc/paddle/api/paddle/tensor/math/min_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a242a37cff72a3738759e445f47fe2915b758ae6 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/min_cn.rst @@ -0,0 +1,55 @@ +.. _cn_api_paddle_tensor_min: + +min +------------------------------- + +.. py:function:: paddle.tensor.min(x, axis=None, keepdim=False, name=None) + + +该OP是对指定维度上的Tensor元素求最小值运算,并输出相应的计算结果。 + +参数 +::::::::: + - **x** (Tensor)- Tensor,支持数据类型为float32,float64,int32,int64。 + - **axis** (list | int ,可选)- 求最小值运算的维度。如果为None,则计算所有元素的最小值并返回包含单个元素的Tensor变量,否则必须在 :math:`[−x.ndim, x.ndim]` 范围内。如果 :math:`axis[i] < 0` ,则维度将变为 :math:`x.ndim+axis[i]` ,默认值为None。 + - **keepdim** (bool)- 是否在输出Tensor中保留减小的维度。如果keepdim 为False,结果张量的维度将比输入张量的小,默认值为False。 + - **name** (str, 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回 +::::::::: + Tensor,在指定axis上进行求最小值运算的Tensor,数据类型和输入数据类型一致。 + + +代码示例 +:::::::::: +.. code-block:: python + + import paddle + paddle.disable_static() + + # the axis is a int element + x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9], + [0.1, 0.2, 0.6, 0.7]]) + result1 = paddle.min(x) + print(result1.numpy()) + #[0.1] + result2 = paddle.min(x, axis=0) + print(result2.numpy()) + #[0.1 0.2 0.5 0.7] + result3 = paddle.min(x, axis=-1) + print(result3.numpy()) + #[0.2 0.1] + result4 = paddle.min(x, axis=1, keepdim=True) + print(result4.numpy()) + #[[0.2] + # [0.1]] + + # the axis is list + y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]], + [[5.0, 6.0], [7.0, 8.0]]]) + result5 = paddle.min(y, axis=[1, 2]) + print(result5.numpy()) + #[1. 5.] + result6 = paddle.min(y, axis=[0, 1]) + print(result6.numpy()) + #[1. 2.] diff --git a/doc/paddle/api/paddle/tensor/math/minimum_cn.rst b/doc/paddle/api/paddle/tensor/math/minimum_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6fdf6dda658f1d30601d7605dfb4154b9cc7de06 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/minimum_cn.rst @@ -0,0 +1,85 @@ +.. _cn_api_paddle_tensor_minimum: + +minimum +------------------------------- + +.. py:function:: paddle.tensor.minimum(x, y, axis=-1, name=None) + + +该OP逐元素对比输入的两个多维Tensor,并且把各个位置更小的元素保存到返回结果中。 + +等式是: + +.. math:: + Out = min(X, Y) + +- :math:`X` :多维Tensor。 +- :math:`Y` :多维Tensor。 + +此运算算子有两种情况: + 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。 + 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。 + +对于情况2: + 1. 用 :math:`Y` 的 ``shape`` 匹配 :math:`X` 的 ``shape``,其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。 + 2. 如果 ``axis`` < 0(默认值为-1),则 :math:`axis = abs(X.ndim - Y.ndim) - axis - 1` 。 + 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。 + +例如: + +.. code-block:: text + + shape(X) = (2, 3, 4, 5), shape(Y) = (,) + shape(X) = (2, 3, 4, 5), shape(Y) = (5,) + shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 + shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 + shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 + shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 + +具体的飞桨的广播(broadcasting)机制可以参考 `<> `_ 。 + +参数 +::::::::: + - **x** (Tensor)- 多维Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **y** (Tensor)- 多维Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **axis** (int32, 可选)- Y的维度对应到X维度上时的索引。默认值为 -1。 + - **name** (string, 可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回 +::::::::: + Tensor,维度和数据类型与 ``x`` 相同的多维Tensor。 + + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + paddle.disable_static() + + x = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32') + y = paddle.to_tensor([[5, 6], [7, 8]], dtype='float32') + res = paddle.minimum(x, y) + print(res.numpy()) + #[[1. 2.] + # [3. 4.]] + + x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]], dtype='float32') + y = paddle.to_tensor([1, 2], dtype='float32') + res = paddle.minimum(x, y, axis=1) + print(res.numpy()) + #[[[1. 1. 1.] + # [2. 2. 2.]]] + + x = paddle.to_tensor([2, 3, 5], dtype='float32') + y = paddle.to_tensor([1, 4, np.nan], dtype='float32') + res = paddle.minimum(x, y) + print(res.numpy()) + #[ 1. 3. nan] + + x = paddle.to_tensor([5, 3, np.inf], dtype='float32') + y = paddle.to_tensor([1, 4, 5], dtype='float32') + res = paddle.minimum(x, y) + print(res.numpy()) + #[1. 3. 5.] diff --git a/doc/paddle/api/paddle/tensor/math/mm_cn.rst b/doc/paddle/api/paddle/tensor/math/mm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b963d6cbe8744210a56af7aef21ba047f3b8f60c --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/mm_cn.rst @@ -0,0 +1,77 @@ +.. _cn_api_tensor_mm: + +mm +------------------------------- + +.. py:function:: paddle.mm(input, mat2, out=None, name=None) + + + + +用于两个输入矩阵的相乘。 + +两个输入的形状可为任意维度,但当任一输入维度大于3时,两个输入的维度必须相等。 + +如果原始 Tensor input 或 mat2 的秩为 1 且未转置,则矩阵相乘后的前置或附加维度 1 将移除。 + +参数: + - **input** (Variable) : 输入变量,类型为 Tensor 或 LoDTensor。 + - **mat2** (Variable) : 输入变量,类型为 Tensor 或 LoDTensor。 + - **out** (Variable, 可选) – 指定存储运算结果的Tensor。如果设置为None或者不设置,将创建新的Tensor存储运算结果,默认值为None。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: + - Variable (Tensor / LoDTensor),矩阵相乘后的结果。 + +返回类型: + - Variable(变量)。 + +:: + + * 例 1: + + input: [B, ..., M, K], mat2: [B, ..., K, N] + out: [B, ..., M, N] + + * 例 2: + + input: [B, M, K], mat2: [B, K, N] + out: [B, M, N] + + * 例 3: + + input: [B, M, K], mat2: [K, N] + out: [B, M, N] + + * 例 4: + + input: [M, K], mat2: [K, N] + out: [M, N] + + * 例 5: + + input: [B, M, K], mat2: [K] + out: [B, M] + + * 例 6: + + input: [K], mat2: [K] + out: [1] + + * 例 7: + + input: [M], mat2: [N] + out: [M, N] + + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + + input = fluid.data(name='input', shape=[2, 3], dtype='float32') + mat2 = fluid.data(name='mat2', shape=[3, 2], dtype='float32') + out = paddle.mm(input, mat2) # out shape is [2, 2] + diff --git a/doc/paddle/api/paddle/tensor/math/mod_cn.rst b/doc/paddle/api/paddle/tensor/math/mod_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b7f34c81e5937ac5ed96e7b63a9d2cc080c5b01d --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/mod_cn.rst @@ -0,0 +1,7 @@ +.. _cn_api_tensor_cn_mod: + +mod +------------------------------- +:doc_source: paddle.tensor.remainder + + diff --git a/doc/paddle/api/paddle/tensor/math/multiply_cn.rst b/doc/paddle/api/paddle/tensor/math/multiply_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5db2bfc01ff12b16da867352136d0faecf3f364f --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/multiply_cn.rst @@ -0,0 +1,77 @@ +.. _cn_api_fluid_layers_multiply: + +multiply +------------------------------- + +.. py:function:: paddle.multiply(x, y, axis=-1, name=None) + + + + +该OP是逐元素相乘算子,输入 ``x`` 与输入 ``y`` 逐元素相乘,并将各个位置的输出元素保存到返回结果中。 + +等式是: + +.. math:: + Out = X \odot Y + +- :math:`X` :多维Tensor。 +- :math:`Y` :维度必须小于等于X维度的Tensor。 + +对于这个运算算子有2种情况: + 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。 + 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。 + +对于情况2: + 1. 用 :math:`Y` 匹配 :math:`X` 的形状(shape),其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。 + 2. 如果 ``axis`` 为-1(默认值),则 :math:`axis= rank(X)-rank(Y)` 。 + 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。 + +例如: + +.. code-block:: text + + shape(X) = (2, 3, 4, 5), shape(Y) = (,) + shape(X) = (2, 3, 4, 5), shape(Y) = (5,) + shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 + shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 + shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 + shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 + +参数: + - **x** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 + - **y** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 + - **axis** (int32,可选)- ``y`` 的维度对应到 ``x`` 维度上时的索引。默认值为 -1。 + - **name** (string,可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + + +返回: 维度与 ``x`` 相同的 ``Tensor`` 或 ``LoDTensor`` ,数据类型与 ``x`` 相同。 + +返回类型: Variable。 + +**代码示例 1** + +.. code-block:: python + + import paddle + import numpy as np + paddle.enable_imperative() + x_data = np.array([[1, 2], [3, 4]], dtype=np.float32) + y_data = np.array([[5, 6], [7, 8]], dtype=np.float32) + x = paddle.imperative.to_variable(x_data) + y = paddle.imperative.to_variable(y_data) + res = paddle.multiply(x, y) + print(res.numpy()) # [[5, 12], [21, 32]] + x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32) + y_data = np.array([1, 2], dtype=np.float32) + x = paddle.imperative.to_variable(x_data) + y = paddle.imperative.to_variable(y_data) + res = paddle.multiply(x, y, axis=1) + print(res.numpy()) # [[[1, 2, 3], [2, 4, 6]]] + + + + + + + diff --git a/doc/paddle/api/paddle/tensor/math/pow_cn.rst b/doc/paddle/api/paddle/tensor/math/pow_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..40eaf542138527856d25a002f16a4cf29c891f47 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/pow_cn.rst @@ -0,0 +1,55 @@ +.. _cn_api_fluid_layers_pow: + +pow +------------------------------- + +.. py:function:: paddle.pow(x, exponent, name=None) + + + + +该OP是指数激活算子: + +.. math:: + + out = x^{exponent} + +**注意:如果需要对输入进行 elementwise_pow 操作,请查使用** :ref:`cn_api_fluid_layers_elementwise_pow` 。 + +参数: + - **x** (Variable)- 多维 ``Variable``,数据类型为 ``float32`` 或 ``float64`` 。 + - **exponent** (float32|Variable)- ``float32`` 或形状为[1]的 ``Variable``,数据类型为 ``float32``。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值: ``None``。 + +返回:维度与输入 `x` 相同的 ``Variable``,数据类型与 ``x`` 相同。 + +返回类型:Variable。 + + +**代码示例:** + +.. code-block:: python + + import paddle + import numpy as np + x = fluid.data(name="x", shape=[32,32], dtype="float32") + paddle.enable_imperative() + + # example 1: exponent is a float + x_data = np.array([1, 2, 3]) + exponent = 2 + x = paddle.imperative.to_variable(x_data) + res = paddle.pow(x, exponent) + print(res.numpy()) # [1 4 9] + + # example 2: exponent is a Variable + exponent = paddle.fill_constant(shape=[1], value=2, dtype='float32') + res = paddle.pow(x, exponent) + print(res.numpy()) # [1 4 9] + + + + + + + diff --git a/doc/paddle/api/paddle/tensor/math/prod_cn.rst b/doc/paddle/api/paddle/tensor/math/prod_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..64058ada576905be353bdb8f97ad50369db56688 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/prod_cn.rst @@ -0,0 +1,69 @@ +.. _cn_api_tensor_cn_prod: + +prod +------------------------------- + +.. py:function:: paddle.prod(x, axis=None, keepdim=False, dtype=None, name=None) + + + +对指定维度上的Tensor元素进行求乘积运算,并输出相应的计算结果。 + +参数: + - **x** (Tensor) - 输入的 `Tensor` ,数据类型为:float32、float64、int32、int64。 + - **axis** (int|list|tuple,可选) - 求乘积运算的维度。如果是None,则计算所有元素的乘积并返回包含单个元素的Tensor,否则该参数必须在 :math:`[-x.ndim, x.ndim)` 范围内。如果 :math:`axis[i] < 0` ,则维度将变为 :math:`x.ndim + axis[i]` ,默认为None。 + - **keepdim** (bool,可选) - 是否在输出 `Tensor` 中保留减小的维度。如 `keepdim` 为True,否则结果张量的维度将比输入张量小,默认值为False。 + - **dtype** (str,可选) - 输出Tensor的数据类型,支持int32、int64、float32、float64。如果指定了该参数,那么在执行操作之前,输入Tensor将被转换为dtype类型. 这对于防止数据类型溢出非常有用。若参数为空,则输出变量的数据类型和输入变量相同,默认为:None。 + - **name** (str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + +返回:指定axis上累乘的结果的Tensor。 + + +**代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + # the axis is a int element + data_x = np.array([[0.2, 0.3, 0.5, 0.9], + [0.1, 0.2, 0.6, 0.7]]).astype(np.float32) + x = paddle.to_tensor(data_x) + out1 = paddle.prod(x) + print(out1.numpy()) + # [0.0002268] + + out2 = paddle.prod(x, -1) + print(out2.numpy()) + # [0.027 0.0084] + + out3 = paddle.prod(x, 0) + print(out3.numpy()) + # [0.02 0.06 0.3 0.63] + print(out3.numpy().dtype) + # float32 + + out4 = paddle.prod(x, 0, keepdim=True) + print(out4.numpy()) + # [[0.02 0.06 0.3 0.63]] + + out5 = paddle.prod(x, 0, dtype='int64') + print(out5.numpy()) + # [0 0 0 0] + print(out5.numpy().dtype) + # int64 + + # the axis is list + data_y = np.array([[[1.0, 2.0], [3.0, 4.0]], + [[5.0, 6.0], [7.0, 8.0]]]) + y = paddle.to_tensor(data_y) + out6 = paddle.prod(y, [0, 1]) + print(out6.numpy()) + # [105. 384.] + + out7 = paddle.prod(y, (1, 2)) + print(out7.numpy()) + # [ 24. 1680.] diff --git a/doc/paddle/api/paddle/tensor/math/remainder_cn.rst b/doc/paddle/api/paddle/tensor/math/remainder_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ccf124eb8a344c65e216f368d8b748c20c484a0f --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/remainder_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_tensor_remainder: + +remainder +------------------------------- + +.. py:function:: paddle.remainder(x, y, name=None) + +该OP是逐元素取模算子,输入 ``x`` 与输入 ``y`` 逐元素取模,并将各个位置的输出元素保存到返回结果中。 +输入 ``x`` 与输入 ``y`` 必须可以广播为相同形状, 关于广播规则,请参考 :ref:`use_guide_broadcasting` + +等式为: + +.. math:: + Out = X \% Y + +- :math:`X` :多维Tensor。 +- :math:`Y` :多维Tensor。 + +参数: + - x(Tensor)- 多维Tensor。数据类型为float32 、float64、int32或int64。 + - y(Tensor)- 多维Tensor。数据类型为float32 、float64、int32或int64。 + - name(str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + + +返回: 多维 Tensor, 数据类型与 ``x`` 相同,维度为广播后的形状。 + +返回类型: Tensor + + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + np_x = np.array([2, 3, 8, 7]) + np_y = np.array([1, 5, 3, 3]) + x = paddle.to_tensor(np_x) + y = paddle.to_tensor(np_y) + z = paddle.remainder(x, y) + print(z.numpy()) # [0, 3, 2, 1] \ No newline at end of file diff --git a/doc/paddle/api/paddle/tensor/math/sign_cn.rst b/doc/paddle/api/paddle/tensor/math/sign_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e1443fc2dfa3aedf65a2997f6b10bd91593d6325 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/sign_cn.rst @@ -0,0 +1,30 @@ +.. _cn_api_tensor_sign: + +sign +------------------------------- + +.. py:function:: paddle.sign(x, name=None) + +此OP对输入x中每个元素进行正负判断,并且输出正负判断值:1代表正,-1代表负,0代表零。 + +参数: + - **x** (Tensor) – 进行正负值判断的多维Tensor,数据类型为 float16, float32或float64。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:输出正负号Tensor,数据的shape大小及数据类型和输入 ``x`` 一致。 + +返回类型:Tensor + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + + data = np.array([3.0, 0.0, -2.0, 1.7], dtype='float32') + paddle.disable_static() + x = paddle.to_tensor(data) + out = paddle.sign(x=x) + print(out) # [1.0, 0.0, -1.0, 1.0] + diff --git a/doc/paddle/api/paddle/tensor/math/sqrt_cn.rst b/doc/paddle/api/paddle/tensor/math/sqrt_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..779ab45fbf975d0cc9538cd7f2d68f0194e8bc64 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/sqrt_cn.rst @@ -0,0 +1,55 @@ +.. _cn_api_fluid_layers_sqrt: + +sqrt +------------------------------- + +.. py:function:: paddle.fluid.layers.sqrt(x, name=None) + + + + +计算输入的算数平方根。 + +.. math:: out=\sqrt x=x^{1/2} + +.. note:: + 请确保输入中的数值是非负数。 + +参数: + + - **x** (Variable) - 支持任意维度的Tensor。数据类型为float32,float64或float16。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:返回类型为Variable(Tensor|LoDTensor), 数据类型同输入一致。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle.fluid as fluid + + inputs = fluid.layers.data(name="x", shape = [3], dtype='float32') + output = fluid.layers.sqrt(inputs) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + img = np.array([0, 9, 36]).astype(np.float32) + + res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) + print(res) + # [array([0., 3., 6.], dtype=float32)] + + + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/tensor/math/sum_cn.rst b/doc/paddle/api/paddle/tensor/math/sum_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..47afc379c37747e1c87c2e53d0eea7276899818b --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/sum_cn.rst @@ -0,0 +1,47 @@ +.. _cn_api_tensor_sum: + +sum +------------------------------- + +.. py:function:: paddle.sum(x, axis=None, dtype=None, keepdim=False, name=None) + +该OP是对指定维度上的Tensor元素进行求和运算,并输出相应的计算结果。 + +参数: + - **x** (Tensor)- 输入变量为多维Tensor,支持数据类型为float32,float64,int32,int64。 + - **axis** (int | list | tuple ,可选)- 求和运算的维度。如果为None,则计算所有元素的和并返回包含单个元素的Tensor变量,否则必须在 :math:`[−rank(x),rank(x)]` 范围内。如果 :math:`axis [i] <0` ,则维度将变为 :math:`rank+axis[i]` ,默认值为None。 + - **dtype** (str , 可选)- 输出变量的数据类型。若参数为空,则输出变量的数据类型和输入变量相同,默认值为None。 + - **keepdim** (bool)- 是否在输出Tensor中保留减小的维度。如 keepdim 为true,否则结果张量的维度将比输入张量小,默认值为False。 + - **name** (str , 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: + ``Tensor``,在指定维度上进行求和运算的Tensor,数据类型和输入数据类型一致。 + + +**代码示例** + +.. code-block:: python + + import numpy as np + import paddle + paddle.disable_static() + + # x is a Tensor variable with following elements: + # [[0.2, 0.3, 0.5, 0.9] + # [0.1, 0.2, 0.6, 0.7]] + # Each example is followed by the corresponding output tensor. + x_data = np.array([[0.2, 0.3, 0.5, 0.9],[0.1, 0.2, 0.6, 0.7]]).astype('float32') + x = paddle.to_tensor(x_data) + out1 = paddle.sum(x) # [3.5] + out2 = paddle.sum(x, axis=0) # [0.3, 0.5, 1.1, 1.6] + out3 = paddle.sum(x, axis=-1) # [1.9, 1.6] + out4 = paddle.sum(x, axis=1, keepdim=True) # [[1.9], [1.6]] + + # y is a Tensor variable with shape [2, 2, 2] and elements as below: + # [[[1, 2], [3, 4]], + # [[5, 6], [7, 8]]] + # Each example is followed by the corresponding output tensor. + y_data = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]).astype('float32') + y = paddle.to_tensor(y_data) + out5 = paddle.sum(y, axis=[1, 2]) # [10, 26] + out6 = paddle.sum(y, axis=[0, 1]) # [16, 20] diff --git a/doc/paddle/api/paddle/tensor/math/tanh_cn.rst b/doc/paddle/api/paddle/tensor/math/tanh_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ee81756c3cf07006510b44204f56ebdc43ffa442 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/tanh_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_tensor_tanh: + +tanh +------------------------------- + +.. py:function:: paddle.tanh(x, name=None) + + +tanh 激活函数 + +.. math:: + out = \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}} + +参数: + + - **x** (Tensor) - Tanh算子的输入, 多维Tensor,数据类型为 float16,float32或float64。 + - **name** (str, 可选) - 该层名称(可选,默认为None)。具体用法请参见 :ref:`api_guide_Name`。 + +返回: tanh的输出Tensor,和输入有着相同类型和shape。 + +返回类型: Tensor + +**代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x_data = np.array([-0.4, -0.2, 0.1, 0.3]) + x = paddle.to_tensor(x_data) + out = paddle.tanh(x) + print(out.numpy()) + # [-0.37994896 -0.19737532 0.09966799 0.29131261] + diff --git a/doc/paddle/api/paddle/tensor/math/trace_cn.rst b/doc/paddle/api/paddle/tensor/math/trace_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..df34ce99a8c84cc93dc180f8da0adaa1ae730cec --- /dev/null +++ b/doc/paddle/api/paddle/tensor/math/trace_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_tensor_trace: + +trace +------------------------------- + +.. py:function:: paddle.trace(x, offset=0, axis1=0, axis2=1, name=None) + + + + +该 OP 计算输入 Tensor 在指定平面上的对角线元素之和,并输出相应的计算结果。 + +如果输入是 2D Tensor,则返回对角线元素之和。 + +如果输入的维度大于 2D,则返回一个由对角线元素之和组成的数组,其中对角线从由 axis1 和 axis2 指定的二维平面中获得。默认由输入的前两维组成获得对角线的 2D 平面。 + +参数 ``offset`` 确定从指定的二维平面中获取对角线的位置: + + - 如果 offset = 0,则取主对角线。 + - 如果 offset > 0,则取主对角线右上的对角线。 + - 如果 offset < 0,则取主对角线左下的对角线。 + +参数: + - **x** (Variable)- 输入张量,至少为 2D 数组,支持数据类型为 float32,float64,int32,int64。 + - **offset** (int ,可选)- 从指定的二维平面中获取对角线的位置,默认值为 0,既主对角线。 + - **axis1** (int , 可选)- 获取对角线的二维平面的第一维,默认值为 0。 + - **axis2** (int , 可选)- 获取对角线的二维平面的第二维,默认值为 1。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回: 指定二维平面的对角线元素之和。数据类型和输入数据类型一致。 + +返回类型: 变量(Variable) + +**代码示例** + +.. code-block:: python + + import paddle + import numpy as np + + case1 = np.random.randn(2, 3).astype('float32') + case2 = np.random.randn(3, 10, 10).astype('float32') + case3 = np.random.randn(3, 10, 5, 10).astype('float32') + + paddle.enable_imperative() + case1 = paddle.imperative.to_variable(case1) + case2 = paddle.imperative.to_variable(case2) + case3 = paddle.imperative.to_variable(case3) + data1 = paddle.trace(case1) # data1.shape = [1] + data2 = paddle.trace(case2, offset=1, axis1=1, axis2=2) # data2.shape = [3] + data3 = paddle.trace(case3, offset=-3, axis1=1, axis2=-1) # data2.shape = [3, 5] diff --git a/doc/paddle/api/paddle/tensor/random/bernoulli_cn.rst b/doc/paddle/api/paddle/tensor/random/bernoulli_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a18096b54472967d22d7707b2198581770569a0e --- /dev/null +++ b/doc/paddle/api/paddle/tensor/random/bernoulli_cn.rst @@ -0,0 +1,49 @@ +.. _cn_api_tensor_bernoulli: + +bernoulli +------------------------------- + +.. py:function:: paddle.bernoulli(x, name=None) + + + + +该OP以输入 ``x`` 为概率,生成一个伯努利分布(0-1分布)的Tensor,输出Tensor的形状和数据类型与输入 ``x`` 相同。 + +.. math:: + out_i \sim Bernoulli(p = x_i) + +参数: + - **x** (Tensor) - 输入的概率值。数据类型为 ``float32`` 、``float64`` . + - **name** (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回: + Tensor:伯努利分布的随机Tensor,形状和数据类型为与输入 ``x`` 相同。 + + +**代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.rand([2, 3]) + print(x.numpy()) + # [[0.11272584 0.3890902 0.7730957 ] + # [0.10351662 0.8510418 0.63806665]] + + out = paddle.bernoulli(x) + print(out.numpy()) + # [[0. 0. 1.] + # [0. 0. 1.]] + + + + + + + + diff --git a/doc/paddle/api/paddle/tensor/random/normal_cn.rst b/doc/paddle/api/paddle/tensor/random/normal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ac57976ad7b741cd133014372b99b4fd801d0c13 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/random/normal_cn.rst @@ -0,0 +1,48 @@ +.. _cn_api_tensor_random_normal: + +normal +------------------------------- + +.. py:function:: paddle.normal(mean=0.0, std=1.0, shape=None, name=None) + + +该OP返回符合正态分布(均值为 ``mean`` ,标准差为 ``std`` 的正态随机分布)的随机Tensor。 + +如果 ``mean`` 是Tensor,则输出Tensor和 ``mean`` 具有相同的形状和数据类型。 +如果 ``mean`` 不是Tensor,且 ``std`` 是Tensor,则输出Tensor和 ``std`` 具有相同的形状和数据类型。 +如果 ``mean`` 和 ``std`` 都不是Tensor,则输出Tensor的形状为 ``shape``,数据类型为float32。 + +如果 ``mean`` 和 ``std`` 都是Tensor,则 ``mean`` 和 ``std`` 的元素个数应该相同。 + +参数 +:::::::::: + - mean (float|Tensor, 可选) - 输出Tensor的正态分布的平均值。如果 ``mean`` 是float,则表示输出Tensor中所有元素的正态分布的平均值。如果 ``mean`` 是Tensor(支持的数据类型为float32、float64),则表示输出Tensor中每个元素的正态分布的平均值。默认值为0.0 + - std (float|Tensor, 可选) - 输出Tensor的正态分布的标准差。如果 ``std`` 是float,则表示输出Tensor中所有元素的正态分布的标准差。如果 ``std`` 是Tensor(支持的数据类型为float32、float64),则表示输出Tensor中每个元素的正态分布的标准差。默认值为0.0 + - shape (list|tuple|Tensor, 可选) - 生成的随机Tensor的形状。如果 ``shape`` 是list、tuple,则其中的元素可以是int,或者是形状为[1]且数据类型为int32、int64的Tensor。如果 ``shape`` 是Tensor,则是数据类型为int32、int64的1-D Tensor。如果 ``mean`` 或者 ``std`` 是Tensor,输出Tensor的形状和 ``mean`` 或者 ``std`` 相同(此时 ``shape`` 无效)。默认值为None。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + Tensor:符合正态分布(均值为 ``mean`` ,标准差为 ``std`` 的正态随机分布)的随机Tensor。 + +示例代码 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + out1 = paddle.normal(shape=[2, 3]) + # [[ 0.17501129 0.32364586 1.561118 ] # random + # [-1.7232178 1.1545963 -0.76156676]] # random + + mean_tensor = paddle.to_tensor(np.array([1.0, 2.0, 3.0])) + out2 = paddle.normal(mean=mean_tensor) + # [ 0.18644847 -1.19434458 3.93694787] # random + + std_tensor = paddle.to_tensor(np.array([1.0, 2.0, 3.0])) + out3 = paddle.normal(mean=mean_tensor, std=std_tensor) + # [1.00780561 3.78457445 5.81058198] # random diff --git a/doc/paddle/api/paddle/tensor/random/rand_cn.rst b/doc/paddle/api/paddle/tensor/random/rand_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5045d6d4f9e24b27954b84c7cf46abb6e94b835b --- /dev/null +++ b/doc/paddle/api/paddle/tensor/random/rand_cn.rst @@ -0,0 +1,49 @@ +.. _cn_api_tensor_random_rand: + +rand +---------------------- + +.. py:function:: paddle.rand(shape, dtype=None, name=None) + +该OP返回符合均匀分布的,范围在[0, 1)的Tensor,形状为 ``shape``,数据类型为 ``dtype``。 + +参数 +:::::::::: + - **shape** (list|tuple|Tensor) - 生成的随机Tensor的形状。如果 ``shape`` 是list、tuple,则其中的元素可以是int,或者是形状为[1]且数据类型为int32、int64的Tensor。如果 ``shape`` 是Tensor,则是数据类型为int32、int64的1-D Tensor。 + - **dtype** (str|np.dtype|core.VarDesc.VarType, 可选) - 输出Tensor的数据类型,支持float32、float64。当该参数值为None时, 输出Tensor的数据类型为float32。默认值为None. + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回 +:::::::::: + Tensor: 符合均匀分布的范围为[0, 1)的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 + +示例代码 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + # example 1: attr shape is a list which doesn't contain Tensor. + out1 = paddle.rand(shape=[2, 3]) + # [[0.451152 , 0.55825245, 0.403311 ], # random + # [0.22550228, 0.22106001, 0.7877319 ]] # random + + # example 2: attr shape is a list which contains Tensor. + dim1 = paddle.full([1], 2, "int64") + dim2 = paddle.full([1], 3, "int32") + out2 = paddle.rand(shape=[dim1, dim2, 2]) + # [[[0.8879919 , 0.25788337], # random + # [0.28826773, 0.9712097 ], # random + # [0.26438272, 0.01796806]], # random + # [[0.33633623, 0.28654453], # random + # [0.79109055, 0.7305809 ], # random + # [0.870881 , 0.2984597 ]]] # random + + # example 3: attr shape is a Tensor, the data type must be int64 or int32. + shape_tensor = paddle.to_tensor(np.array([2, 3])) + out2 = paddle.rand(shape_tensor) + # [[0.22920267, 0.841956 , 0.05981819], # random + # [0.4836288 , 0.24573246, 0.7516129 ]] # random diff --git a/doc/paddle/api/paddle/tensor/random/randint_cn.rst b/doc/paddle/api/paddle/tensor/random/randint_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..bac8502dc0b3e0cc6546e8cb4d737b2e399f1d80 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/random/randint_cn.rst @@ -0,0 +1,60 @@ +.. _cn_api_tensor_random_randint: + +randint +------------------------------- + +.. py:function:: paddle.randint(low=0, high=None, shape=[1], dtype=None, name=None) + +该OP返回服从均匀分布的、范围在[``low``, ``high``)的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。当 ``high`` 为None时(默认),均匀采样的区间为[0, ``low``)。 + +参数 +:::::::::: + - **low** (int) - 要生成的随机值范围的下限,``low`` 包含在范围中。当 ``high`` 为None时,均匀采样的区间为[0, ``low``)。默认值为0。 + - **high** (int, 可选) - 要生成的随机值范围的上限,``high`` 不包含在范围中。默认值为None,此时范围是[0, ``low``)。 + - **shape** (list|tuple|Tensor) - 生成的随机Tensor的形状。如果 ``shape`` 是list、tuple,则其中的元素可以是int,或者是形状为[1]且数据类型为int32、int64的Tensor。如果 ``shape`` 是Tensor,则是数据类型为int32、int64的1-D Tensor。。默认值为[1]。 + - **dtype** (str|np.dtype|core.VarDesc.VarType, 可选) - 输出Tensor的数据类型,支持int32、int64。当该参数值为None时, 输出Tensor的数据类型为int64。默认值为None. + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回 +:::::::::: + Tensor:从区间[``low``,``high``)内均匀分布采样的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 + +代码示例 +::::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + # example 1: + # attr shape is a list which doesn't contain Tensor. + out1 = paddle.randint(low=-5, high=5, shape=[3]) + # [0, -3, 2] # random + + # example 2: + # attr shape is a list which contains Tensor. + dim1 = paddle.full([1], 2, "int64") + dim2 = paddle.full([1], 3, "int32") + out2 = paddle.randint(low=-5, high=5, shape=[dim1, dim2], dtype="int32") + # [[0, -1, -3], # random + # [4, -2, 0]] # random + + # example 3: + # attr shape is a Tensor + shape_tensor = paddle.to_tensor(np.array([3])) + out3 = paddle.randint(low=-5, high=5, shape=shape_tensor) + # [-2, 2, 3] # random + + # example 4: + # data type is int32 + out4 = paddle.randint(low=-5, high=5, shape=[3], dtype='int32') + # [-5, 4, -4] # random + + # example 5: + # Input only one parameter + # low=0, high=10, shape=[1], dtype='int64' + out5 = paddle.randint(10) + # [7] # random diff --git a/doc/paddle/api/paddle/tensor/random/randn_cn.rst b/doc/paddle/api/paddle/tensor/random/randn_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fa9b18fb9513c241a539fdc7bdc23b74da8363ca --- /dev/null +++ b/doc/paddle/api/paddle/tensor/random/randn_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_tensor_random_randn: + +randn +------------------------------- + +.. py:function:: paddle.randn(shape, dtype=None, name=None) + +该OP返回符合标准正态分布(均值为0,标准差为1的正态随机分布)的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 + +参数 +:::::::::: + - **shape** (list|tuple|Tensor) - 生成的随机Tensor的形状。如果 ``shape`` 是list、tuple,则其中的元素可以是int,或者是形状为[1]且数据类型为int32、int64的Tensor。如果 ``shape`` 是Tensor,则是数据类型为int32、int64的1-D Tensor。 + - **dtype** (str|np.dtype|core.VarDesc.VarType, 可选) - 输出Tensor的数据类型,支持float32、float64。当该参数值为None时, 输出Tensor的数据类型为float32。默认值为None. + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回 +:::::::::: + Tensor:符合标准正态分布的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 + +示例代码 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + # example 1: attr shape is a list which doesn't contain Tensor. + out1 = paddle.randn(shape=[2, 3]) + # [[-2.923464 , 0.11934398, -0.51249987], # random + # [ 0.39632758, 0.08177969, 0.2692008 ]] # random + + # example 2: attr shape is a list which contains Tensor. + dim1 = paddle.full([1], 2, "int64") + dim2 = paddle.full([1], 3, "int32") + out2 = paddle.randn(shape=[dim1, dim2, 2]) + # [[[-2.8852394 , -0.25898588], # random + # [-0.47420555, 0.17683524], # random + # [-0.7989969 , 0.00754541]], # random + # [[ 0.85201347, 0.32320443], # random + # [ 1.1399018 , 0.48336947], # random + # [ 0.8086993 , 0.6868893 ]]] # random + + # example 3: attr shape is a Tensor, the data type must be int64 or int32. + shape_tensor = paddle.to_tensor(np.array([2, 3])) + out3 = paddle.randn(shape_tensor) + # [[-2.878077 , 0.17099959, 0.05111201] # random + # [-0.3761474, -1.044801 , 1.1870178 ]] # random diff --git a/doc/paddle/api/paddle/tensor/random/randperm_cn.rst b/doc/paddle/api/paddle/tensor/random/randperm_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c78212af8b6984fffab96dd2aede895d9f342d5d --- /dev/null +++ b/doc/paddle/api/paddle/tensor/random/randperm_cn.rst @@ -0,0 +1,33 @@ +.. _cn_api_tensor_random_randperm: + +randperm +------------------------------- + +.. py:function:: paddle.randperm(n, dtype="int64", name=None) + +该OP返回一个数值在0到n-1、随机排列的1-D Tensor,数据类型为 ``dtype``。 + +参数: +:::::::::: + - **n** (int) - 随机序列的上限(不包括在序列中),应该大于0。 + - **dtype** (str|np.dtype|core.VarDesc.VarType, 可选) - 输出Tensor的数据类型,支持int32、int64、float32、float64。默认值为"int64". + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回 +:::::::::: + Tensor:一个数值在0到n-1、随机排列的1-D Tensor,数据类型为 ``dtype`` 。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + + paddle.disable_static() + + out1 = paddle.randperm(5) + # [4, 1, 2, 3, 0] # random + + out2 = paddle.randperm(7, 'int32') + # [1, 6, 2, 0, 4, 3, 5] # random diff --git a/doc/paddle/api/paddle/tensor/random/standard_normal_cn.rst b/doc/paddle/api/paddle/tensor/random/standard_normal_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d379107d66ca0e024427a3cf3eb91b161ee1425e --- /dev/null +++ b/doc/paddle/api/paddle/tensor/random/standard_normal_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_tensor_random_standard_normal: + +standard_normal +------------------------------- + +.. py:function:: paddle.standard_normal(shape, dtype=None, name=None) + +该OP返回符合标准正态分布(均值为0,标准差为1的正态随机分布)的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 + +参数 +:::::::::: + - **shape** (list|tuple|Tensor) - 生成的随机Tensor的形状。如果 ``shape`` 是list、tuple,则其中的元素可以是int,或者是形状为[1]且数据类型为int32、int64的Tensor。如果 ``shape`` 是Tensor,则是数据类型为int32、int64的1-D Tensor。 + - **dtype** (str|np.dtype|core.VarDesc.VarType, 可选) - 输出Tensor的数据类型,支持float32、float64。当该参数值为None时, 输出Tensor的数据类型为float32。默认值为None. + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回 +:::::::::: + Tensor:符合标准正态分布的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 + +示例代码 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + # example 1: attr shape is a list which doesn't contain Tensor. + out1 = paddle.standard_normal(shape=[2, 3]) + # [[-2.923464 , 0.11934398, -0.51249987], # random + # [ 0.39632758, 0.08177969, 0.2692008 ]] # random + + # example 2: attr shape is a list which contains Tensor. + dim1 = paddle.full([1], 2, "int64") + dim2 = paddle.full([1], 3, "int32") + out2 = paddle.standard_normal(shape=[dim1, dim2, 2]) + # [[[-2.8852394 , -0.25898588], # random + # [-0.47420555, 0.17683524], # random + # [-0.7989969 , 0.00754541]], # random + # [[ 0.85201347, 0.32320443], # random + # [ 1.1399018 , 0.48336947], # random + # [ 0.8086993 , 0.6868893 ]]] # random + + # example 3: attr shape is a Tensor, the data type must be int64 or int32. + shape_tensor = paddle.to_tensor(np.array([2, 3])) + out3 = paddle.standard_normal(shape_tensor) + # [[-2.878077 , 0.17099959, 0.05111201] # random + # [-0.3761474, -1.044801 , 1.1870178 ]] # random diff --git a/doc/paddle/api/paddle/tensor/random/uniform_cn.rst b/doc/paddle/api/paddle/tensor/random/uniform_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f5996a72d4f88a38ad0d71fc84b002c3a69bbd2d --- /dev/null +++ b/doc/paddle/api/paddle/tensor/random/uniform_cn.rst @@ -0,0 +1,77 @@ +.. _cn_api_tensor_uniform: + +uniform +------------------------------- + +.. py:function:: paddle.uniform(shape, dtype='float32', min=-1.0, max=1.0, seed=0, name=None) + + + + +该OP返回数值服从范围[``min``, ``max``)内均匀分布的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 + +:: + + 示例1: + 给定: + shape=[1,2] + 则输出为: + result=[[0.8505902, 0.8397286]] + +参数: + - **shape** (list|tuple|Tensor) - 生成的随机Tensor的形状。如果 ``shape`` 是list、tuple,则其中的元素可以是int,或者是形状为[1]且数据类型为int32、int64的Tensor。如果 ``shape`` 是Tensor,则是数据类型为int32、int64的1-D Tensor。 + - **dtype** (str|np.dtype, 可选) - 输出Tensor的数据类型,支持float32、float64。默认值为float32。 + - **min** (float|int,可选) - 要生成的随机值范围的下限,min包含在范围中。支持的数据类型:float、int。默认值为-1.0。 + - **max** (float|int,可选) - 要生成的随机值范围的上限,max不包含在范围中。支持的数据类型:float、int。默认值为1.0。 + - **seed** (int,可选) - 随机种子,用于生成样本。0表示使用系统生成的种子。注意如果种子不为0,该操作符每次都生成同样的随机数。支持的数据类型:int。默认为 0。 + - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + +返回: + Tensor:数值服从范围[``min``, ``max``)内均匀分布的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 + +抛出异常: + - ``TypeError`` - 如果 ``shape`` 的类型不是list、tuple、Tensor。 + - ``TypeError`` - 如果 ``dtype`` 不是float32、float64。 + +**代码示例**: + +.. code-block:: python + + import numpy as np + import paddle + + paddle.disable_static() + + # example 1: + # attr shape is a list which doesn't contain Tensor. + result_1 = paddle.uniform(shape=[3, 4]) + # [[ 0.84524226, 0.6921872, 0.56528175, 0.71690357], + # [-0.34646994, -0.45116323, -0.09902662, -0.11397249], + # [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] + + # example 2: + # attr shape is a list which contains Tensor. + dim_1 = paddle.fill_constant([1], "int64", 2) + dim_2 = paddle.fill_constant([1], "int32", 3) + result_2 = paddle.uniform(shape=[dim_1, dim_2]) + # [[-0.9951253, 0.30757582, 0.9899647 ], + # [ 0.5864527, 0.6607096, -0.8886161 ]] + + # example 3: + # attr shape is a Tensor, the data type must be int64 or int32. + shape = np.array([2, 3]) + shape_tensor = paddle.to_tensor(shape) + + result_3 = paddle.uniform(shape_tensor) + # if shape_tensor's value is [2, 3] + # result_3 is: + # [[-0.8517412, -0.4006908, 0.2551912 ], + # [ 0.3364414, 0.36278176, -0.16085452]] + + + + + + + + diff --git a/doc/paddle/api/paddle/tensor/search/argmax_cn.rst b/doc/paddle/api/paddle/tensor/search/argmax_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d2fb738b4cd99c1cf6d458ed994aae2b7186f09d --- /dev/null +++ b/doc/paddle/api/paddle/tensor/search/argmax_cn.rst @@ -0,0 +1,44 @@ +.. _cn_api_tensor_argmax: + +argmax +------------------------------- + +.. py:function:: paddle.argmax(x, axis=None, keepdim=False, dtype='int64', name=None) + + +该OP沿 ``axis`` 计算输入 ``x`` 的最大元素的索引。 + +参数 +:::::::: + - **x** (Tensor) - 输入的多维 ``Tensor`` ,支持的数据类型:float32、float64、int16、int32、int64、uint8。 + - **axis** (int,可选) - 指定对输入Tensor进行运算的轴, ``axis`` 的有效范围是[-R, R),R是输入 ``x`` 的维度个数, ``axis`` 为负数时,进行计算的 ``axis`` 与 ``axis`` + R 一致。默认值为None, 将会对输入的 `x` 进行平铺展开,返回最大值的索引。 + - **keepdim** (bool,可选)- 是否保留进行最大值索引操作的轴,默认值为False。 + - **dtype** (np.dtype|str,可选)- 输出Tensor的数据类型,可选值为int32,int64,默认值为int64,将返回int64类型的结果。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回 +:::::::: +``Tensor``, 如果设置 :attr:`dtype` 为 `int32` 时,返回的tensor的数据类型为 `int32` ,其它情况将返回的tensor的数据类型为 `int64` 。 + + +示例代码 +:::::::: + +.. code-block:: python + + import numpy as np + import paddle + + paddle.disable_static() + data = [[5,8,9,5], + [0,0,1,7], + [6,9,2,4]] + x = paddle.to_tensor(data) + out1 = paddle.argmax(x) + print(out1.numpy()) # 2 + out2 = paddle.argmax(x, axis=1) + print(out2.numpy()) + # [2 3 1] + out3 = paddle.argmax(x, axis=-1) + print(out3.numpy()) + # [2 3 1] diff --git a/doc/paddle/api/paddle/tensor/search/argmin_cn.rst b/doc/paddle/api/paddle/tensor/search/argmin_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4fd851764cbffe8a5ba93e3605034a9c55854e47 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/search/argmin_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_tensor_argmin: + +argmin +------------------------------- + +.. py:function:: paddle.argmin(x, axis=None, keepdim=False, dtype='int64', name=None) + + +该OP沿 ``axis`` 计算输入 ``x`` 的最小元素的索引。 + +参数 +:::::::: + - **x** (Tensor) - 输入的多维 ``Tensor`` ,支持的数据类型:float32、float64、int16、int32、int64、uint8。 + - **axis** (int,可选) - 指定对输入Tensor进行运算的轴, ``axis`` 的有效范围是[-R, R),R是输入 ``x`` 的维度个数, ``axis`` 为负数时,进行计算的 ``axis`` 与 ``axis`` + R 一致。默认值为None, 将会对输入的 `x` 进行平铺展开,返回最小值的索引。 + - **keepdim** (bool,可选)- 是否保留进行最小值索引操作的轴,默认值为False。 + - **dtype** (np.dtype|str, 可选)- 输出Tensor的数据类型,可选值为int32,int64,默认值为'int64',将返回int64类型的结果。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回 +:::::::: +``Tensor``, 如果设置 :attr:`dtype` 为 `int32` 时,返回的tensor的数据类型为 `int32` ,其它情况将返回的tensor的数据类型为 `int64` 。 + +示例代码 +:::::::: + +.. code-block:: python + + import numpy as np + import paddle + + paddle.disable_static() + data = [[5,8,9,5], + [0,0,1,7], + [6,9,2,4]] + x = paddle.to_tensor(data) + out1 = paddle.argmin(x) + print(out1.numpy()) # 4 + out2 = paddle.argmin(x, axis=1) + print(out2.numpy()) + # [0 0 2] + out3 = paddle.argmin(x, axis=-1) + print(out3.numpy()) + # [0 0 2] diff --git a/doc/paddle/api/paddle/tensor/search/argsort_cn.rst b/doc/paddle/api/paddle/tensor/search/argsort_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6ba242415d16ce07e050e653d487df88331a706a --- /dev/null +++ b/doc/paddle/api/paddle/tensor/search/argsort_cn.rst @@ -0,0 +1,60 @@ +.. _cn_api_tensor_cn_argsort: + +argsort +------------------------------- + +.. py:function:: paddle.argsort(x, axis=-1, descending=False, name=None) + + +对输入变量沿给定轴进行排序,输出排序好的数据的相应索引,其维度和输入相同。默认升序排列,如果需要降序排列设置 ``descending=True`` 。 + + +参数: + - **x** (Tensor) - 输入的多维 ``Tensor`` ,支持的数据类型:float32、float64、int16、int32、int64、uint8。 + - **axis** (int,可选) - 指定对输入Tensor进行运算的轴, ``axis`` 的有效范围是[-R, R),R是输入 ``x`` 的Rank, ``axis`` 为负时与 ``axis`` +R 等价。默认值为0。 + - **descending** (bool,可选) - 指定算法排序的方向。如果设置为True,算法按照降序排序。如果设置为False或者不设置,按照升序排序。默认值为False。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:Tensor, 排序后索引信息(与 ``x`` 维度信息一致),数据类型为int64。 + + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.imperative as imperative + import numpy as np + + paddle.enable_imperative() + input_array = np.array([[[5,8,9,5], + [0,0,1,7], + [6,9,2,4]], + [[5,2,4,2], + [4,7,7,9], + [1,7,0,6]]]).astype(np.float32) + x = imperative.to_variable(input_array) + out1 = paddle.argsort(x=x, axis=-1) + out2 = paddle.argsort(x=x, axis=0) + out3 = paddle.argsort(x=x, axis=1) + print(out1.numpy()) + #[[[0 3 1 2] + # [0 1 2 3] + # [2 3 0 1]] + # [[1 3 2 0] + # [0 1 2 3] + # [2 0 3 1]]] + print(out2.numpy()) + #[[[0 1 1 1] + # [0 0 0 0] + # [1 1 1 0]] + # [[1 0 0 0] + # [1 1 1 1] + # [0 0 0 1]]] + print(out3.numpy()) + #[[[1 1 1 2] + # [0 0 2 0] + # [2 2 0 1]] + # [[2 0 2 0] + # [1 1 0 2] + # [0 2 1 1]]] diff --git a/doc/paddle/api/paddle/tensor/search/index_sample_cn.rst b/doc/paddle/api/paddle/tensor/search/index_sample_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..711268d685040608853392c7db66ff7a5b092440 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/search/index_sample_cn.rst @@ -0,0 +1,73 @@ +.. _cn_api_tensor_search_index_sample: + +index_sample +------------------------------- + +.. py:function:: paddle.index_sample(x, index) + + + + +该OP实现对输入 ``x`` 中的元素进行批量抽样,取 ``index`` 指定的对应下标的元素,按index中出现的先后顺序组织,填充为一个新的张量。 + +该OP中 ``x`` 与 ``index`` 都是 ``2-D`` 张量。 ``index`` 的第一维度与输入 ``x`` 的第一维度必须相同, ``index`` 的第二维度没有大小要求,可以重复索引相同下标元素。 + +**参数**: + - **x** (Variable)– 输入的二维张量,数据类型为 int32,int64,float32,float64。 + - **index** (Variable)– 包含索引下标的二维张量。数据类型为 int32,int64。 + +**返回**: + -**Variable** ,数据类型与输入 ``x`` 相同,维度与 ``index`` 相同。 + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + data = np.array([[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [9.0, 10.0, 11.0, 12.0]]).astype('float32') + + data_index = np.array([[0, 1, 2], + [1, 2, 3], + [0, 0, 0]]).astype('int32') + + target_data = np.array([[100, 200, 300, 400], + [500, 600, 700, 800], + [900, 1000, 1100, 1200]]).astype('int32') + + + with fluid.dygraph.guard(): + x = fluid.dygraph.to_variable(data) + index = fluid.dygraph.to_variable(data_index) + target = fluid.dygraph.to_variable(target_data) + + out_z1 = paddle.index_sample(x, index) + print(out_z1.numpy()) + #[[1. 2. 3.] + # [6. 7. 8.] + # [9. 9. 9.]] + + # 巧妙用法:使用topk op产出的top元素的下标 + # 在另一个tensor中索引对应位置的元素 + top_value, top_index = fluid.layers.topk(x, k=2) + out_z2 = paddle.index_sample(target, top_index) + print(top_value.numpy()) + #[[ 4. 3.] + # [ 8. 7.] + # [12. 11.]] + + print(top_index.numpy()) + #[[3 2] + # [3 2] + # [3 2]] + + print(out_z2.numpy()) + #[[ 400 300] + # [ 800 700] + # [1200 1100]] + + diff --git a/doc/paddle/api/paddle/tensor/search/index_select_cn.rst b/doc/paddle/api/paddle/tensor/search/index_select_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fe60cbc5aec076d5e8c0c9235330fbecd33da432 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/search/index_select_cn.rst @@ -0,0 +1,45 @@ +.. _cn_api_tensor_search_index_select: + +index_select +------------------------------- + +.. py:function:: paddle.index_select(x, index, axis=0, name=None) + + + +该OP沿着指定轴 ``axis`` 对输入 ``x`` 进行索引,取 ``index`` 中指定的相应项,创建并返回到一个新的Tensor。这里 ``index`` 是一个 ``1-D`` Tensor。除 ``axis`` 轴外,返回的Tensor其余维度大小和输入 ``x`` 相等 , ``axis`` 维度的大小等于 ``index`` 的大小。 + +**参数**: + - **x** (Tensor)– 输入Tensor。 ``x`` 的数据类型可以是float32,float64,int32,int64。 + - **index** (Tensor)– 包含索引下标的1-D Tensor。 + - **axis** (int, 可选) – 索引轴,若未指定,则默认选取第0维。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +**返回**: + -**Tensor**: 返回一个数据类型同输入的Tensor。 + + +**代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() # Now we are in imperative mode + data = np.array([[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [9.0, 10.0, 11.0, 12.0]]) + data_index = np.array([0, 1, 1]).astype('int32') + + x = paddle.to_tensor(data) + index = paddle.to_tensor(data_index) + out_z1 = paddle.index_select(x=x, index=index) + #[[1. 2. 3. 4.] + # [5. 6. 7. 8.] + # [5. 6. 7. 8.]] + out_z2 = paddle.index_select(x=x, index=index, axis=1) + #[[ 1. 2. 2.] + # [ 5. 6. 6.] + # [ 9. 10. 10.]] + diff --git a/doc/paddle/api/paddle/tensor/search/masked_select_cn.rst b/doc/paddle/api/paddle/tensor/search/masked_select_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cecffda2c283dde127660a0a1b7083f64dbe102d --- /dev/null +++ b/doc/paddle/api/paddle/tensor/search/masked_select_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_tensor_masked_select: + +masked_select +------------------------------- + +.. py:function:: paddle.masked_select(x, mask, name=None) + + + +该OP返回一个1-D 的Tensor, Tensor的值是根据 ``mask`` 对输入 ``x`` 进行选择的, ``mask`` 的数据类型是bool 。 + +参数: + - **x** (Tensor) - 输入Tensor, 数据类型为float32, float64,int32 或者int64。 + - **mask** (Tensor) - 用于索引的二进制掩码的Tensor,数据类型维bool。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:返回一个根据 ``mask`` 选择的的Tensor + + +抛出异常: + - ``TypeError``: - 如果 ``x`` 不是 Tensor 或者 ``x`` 是Tensor的时候的数据类型不是 float32, float64, int32, int64其中之一。 + - ``TypeError``: - 如果 ``mask`` 不是 Tensor 或者 ``mask`` 是Tensor的时候的数据类型不是 bool。 + +**代码示例**: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + data = np.array([[1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [9.0, 10.0, 11.0, 12.0]]).astype('float32') + + mask_data = np.array([[True, False, False, False], + [True, True, False, False], + [True, False, False, False]]).astype('bool') + x = paddle.to_tensor(data) + mask = paddle.to_tensor(mask_data) + out = paddle.masked_select(x, mask) + #[1.0 5.0 6.0 9.0] + diff --git a/doc/paddle/api/paddle/tensor/search/nonzero_cn.rst b/doc/paddle/api/paddle/tensor/search/nonzero_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c4e43dfaa114fdfd6de5f02636c918760db8a4a9 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/search/nonzero_cn.rst @@ -0,0 +1,70 @@ +.. _cn_api_tensor_search_nonzero: + +nonzero +------------------------------- + +.. py:function:: paddle.nonzero(input, as_tuple=False) + + + + +该OP返回输入 ``input`` 中非零元素的坐标。如果输入 ``input`` 有 ``n`` 维,共包含 ``z`` 个非零元素,当 ``as_tuple = False`` 时, +返回结果是一个 ``shape`` 等于 ``[z x n]`` 的 ``Tensor`` , 第 ``i`` 行代表输入中第 ``i`` 个非零元素的坐标;当 ``as_tuple = True`` 时, +返回结果是由 ``n`` 个大小为 ``z`` 的 ``1-D Tensor`` 构成的元组,第 ``i`` 个 ``1-D Tensor`` 记录输入的非零元素在第 ``i`` 维的坐标。 + +**参数**: + - **input** (Variable)– 输入张量。 + - **as_tuple** (bool, optinal) - 返回格式。是否以 ``1-D Tensor`` 构成的元组格式返回。 + +**返回**: + - **Variable** (Tensor or tuple(1-D Tensor)),数据类型为 **INT64** 。 + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + data1 = np.array([[1.0, 0.0, 0.0], + [0.0, 2.0, 0.0], + [0.0, 0.0, 3.0]]) + data2 = np.array([0.0, 1.0, 0.0, 3.0]) + data3 = np.array([0.0, 0.0, 0.0]) + with fluid.dygraph.guard(): + x1 = fluid.dygraph.to_variable(data1) + x2 = fluid.dygraph.to_variable(data2) + x3 = fluid.dygraph.to_variable(data3) + out_z1 = paddle.nonzero(x1) + print(out_z1.numpy()) + #[[0 0] + # [1 1] + # [2 2]] + out_z1_tuple = paddle.nonzero(x1, as_tuple=True) + for out in out_z1_tuple: + print(out.numpy()) + #[[0] + # [1] + # [2]] + #[[0] + # [1] + # [2]] + out_z2 = paddle.nonzero(x2) + print(out_z2.numpy()) + #[[1] + # [3]] + out_z2_tuple = paddle.nonzero(x2, as_tuple=True) + for out in out_z2_tuple: + print(out.numpy()) + #[[1] + # [3]] + out_z3 = paddle.nonzero(x3) + print(out_z3.numpy()) + #[] + out_z3_tuple = paddle.nonzero(x3, as_tuple=True) + for out in out_z3_tuple: + print(out.numpy()) + #[] + + diff --git a/doc/paddle/api/paddle/tensor/search/sort_cn.rst b/doc/paddle/api/paddle/tensor/search/sort_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..57b338c8b8e72dbdab1d60c8cd82fcc41d003724 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/search/sort_cn.rst @@ -0,0 +1,61 @@ +.. _cn_api_tensor_sort: + +sort +------------------------------- + +.. py:function:: paddle.sort(x, axis=-1, descending=False, name=None) + + + +对输入变量沿给定轴进行排序,输出排序好的数据,其维度和输入相同。默认升序排列,如果需要降序排列设置 ``descending=True`` 。 + + +参数: + - **x** (Tensor) - 输入的多维 ``Tensor`` ,支持的数据类型:float32、float64、int16、int32、int64、uint8。 + - **axis** (int,可选) - 指定对输入Tensor进行运算的轴, ``axis`` 的有效范围是[-R, R),R是输入 ``x`` 的Rank, ``axis`` 为负时与 ``axis`` +R 等价。默认值为0。 + - **descending** (bool,可选) - 指定算法排序的方向。如果设置为True,算法按照降序排序。如果设置为False或者不设置,按照升序排序。默认值为False。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:Tensor, 排序后的输出(与 ``x`` 维度相同、数据类型相同)。 + + +**代码示例**: + +.. code-block:: python + + import paddle + import paddle.imperative as imperative + import numpy as np + + paddle.enable_imperative() + input_array = np.array([[[5,8,9,5], + [0,0,1,7], + [6,9,2,4]], + [[5,2,4,2], + [4,7,7,9], + [1,7,0,6]]]).astype(np.float32) + x = imperative.to_variable(input_array) + out1 = paddle.sort(x=x, axis=-1) + out2 = paddle.sort(x=x, axis=0) + out3 = paddle.sort(x=x, axis=1) + print(out1.numpy()) + #[[[5. 5. 8. 9.] + # [0. 0. 1. 7.] + # [2. 4. 6. 9.]] + # [[2. 2. 4. 5.] + # [4. 7. 7. 9.] + # [0. 1. 6. 7.]]] + print(out2.numpy()) + #[[[5. 2. 4. 2.] + # [0. 0. 1. 7.] + # [1. 7. 0. 4.]] + # [[5. 8. 9. 5.] + # [4. 7. 7. 9.] + # [6. 9. 2. 6.]]] + print(out3.numpy()) + #[[[0. 0. 1. 4.] + # [5. 8. 2. 5.] + # [6. 9. 9. 7.]] + # [[1. 2. 0. 2.] + # [4. 7. 4. 6.] + # [5. 7. 7. 9.]]] diff --git a/doc/paddle/api/paddle/tensor/search/topk_cn.rst b/doc/paddle/api/paddle/tensor/search/topk_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..47d73f11b02224ad073f1b5fe53fd9a85cddfe8b --- /dev/null +++ b/doc/paddle/api/paddle/tensor/search/topk_cn.rst @@ -0,0 +1,63 @@ +.. _cn_api_tensor_cn_topk: + +topk +------------------------------- + +.. py:function:: paddle.topk(x, k, axis=None, largest=True, sorted=True, name=None) + +该OP沿着可选的 ``axis`` 查找topk最大或者最小的结果和结果所在的索引信息。 +如果是一维Tensor,则直接返回topk查询的结果。如果是多维Tensor,则在指定的轴上查询topk的结果。 + +参数 +::::::::: + - **x** (Tensor) - 输入的多维 ``Tensor`` ,支持的数据类型:float32、float64、int32、int64。 + - **k** (int,Tensor) - 在指定的轴上进行top寻找的数量。 + - **axis** (int,可选) - 指定对输入Tensor进行运算的轴, ``axis`` 的有效范围是[-R, R),R是输入 ``x`` 的Rank, ``axis`` 为负时与 ``axis`` + R 等价。默认值为-1。 + - **largest** (bool,可选) - 指定算法排序的方向。如果设置为True,排序算法按照降序的算法排序,否则按照升序排序。默认值为True。 + - **sorted** (bool,可选) - 控制返回的结果是否按照有序返回,默认为True。在gpu上总是返回有序的结果。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回 +::::::::: +tuple(Tensor), 返回topk的结果和结果的索引信息。结果的数据类型和输入 ``x`` 一致。索引的数据类型是int64。 + +代码示例 +::::::::: + + +.. code-block:: python + + import numpy as np + import paddle + + paddle.disable_static() + + data_1 = np.array([1, 4, 5, 7]) + tensor_1 = paddle.to_tensor(data_1) + value_1, indices_1 = paddle.topk(tensor_1, k=1) + print(value_1.numpy()) + # [7] + print(indices_1.numpy()) + # [3] + data_2 = np.array([[1, 4, 5, 7], [2, 6, 2, 5]]) + tensor_2 = paddle.to_tensor(data_2) + value_2, indices_2 = paddle.topk(tensor_2, k=1) + print(value_2.numpy()) + # [[7] + # [6]] + print(indices_2.numpy()) + # [[3] + # [1]] + value_3, indices_3 = paddle.topk(tensor_2, k=1, axis=-1) + print(value_3.numpy()) + # [[7] + # [6]] + print(indices_3.numpy()) + # [[3] + # [1]] + value_4, indices_4 = paddle.topk(tensor_2, k=1, axis=0) + print(value_4.numpy()) + # [[2 6 5 7]] + print(indices_4.numpy()) + # [[1 1 0 0]] + diff --git a/doc/paddle/api/paddle/tensor/search/where_cn.rst b/doc/paddle/api/paddle/tensor/search/where_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a5e6540075da9cf794d388e9ab61e22ec5077c1b --- /dev/null +++ b/doc/paddle/api/paddle/tensor/search/where_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_tensor_where: + +where +------------------------------- + +.. py:function:: paddle.where(condition, x, y, name=None) + + + + +该OP返回一个根据输入 ``condition``, 选择 ``x`` 或 ``y`` 的元素组成的多维 ``Tensor`` : + +.. math:: + Out_i = + \left\{ + \begin{aligned} + &X_i, & & if \ cond_i \ is \ True \\ + &Y_i, & & if \ cond_i \ is \ False \\ + \end{aligned} + \right. + +参数: + - **condition** (Variable)- 选择 ``x`` 或 ``y`` 元素的条件 。 + - **x** (Variable)- 多维 ``Tensor`` ,数据类型为 ``float32`` 或 ``float64`` 或 ``int32`` 或 ``int64`` 。 + - **y** (Variable)- 多维 ``Tensor`` ,数据类型为 ``float32`` 或 ``float64`` 或 ``int32`` 或 ``int64`` 。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:数据类型与 ``x`` 相同的 ``Tensor`` 。 + +返回类型:Variable。 + + +**代码示例:** + +.. code-block:: python + + import paddle + import numpy as np + import paddle.fluid as fluid + + x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float32") + y_i = np.array([1.0, 1.0, 1.0, 1.0]).astype("float32") + + with fluid.dygraph.guard(): + x = fluid.dygraph.to_variable(x_i) + y = fluid.dygraph.to_variable(y_i) + out = paddle.where(x>1, x, y) + + print(out.numpy()) + #out: [1.0, 1.0, 3.2, 1.2] diff --git a/doc/paddle/api/paddle/tensor/stat/mean_cn.rst b/doc/paddle/api/paddle/tensor/stat/mean_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8ac774a6b4471daca40ba4ab7ee8308fe3539b84 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/stat/mean_cn.rst @@ -0,0 +1,53 @@ +.. _cn_api_tensor_cn_mean: + +mean +------------------------------- + +.. py:function:: paddle.mean(x, axis=None, keepdim=False, name=None) + + + +该OP沿 ``axis`` 计算 ``x`` 的平均值。 + +参数 +:::::::::: + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。 + - axis (int|list|tuple, 可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是int、list(int)、tuple(int)。如果 ``axis`` 包含多个维度,则沿着 ``axis`` 中的所有轴进行计算。``axis`` 或者其中的元素值应该在范围[-D, D)内,D是 ``x`` 的维度。如果 ``axis`` 或者其中的元素值小于0,则等价于 :math:`axis + D` 。如果 ``axis`` 是None,则对 ``x`` 的全部元素计算平均值。默认值为None。 + - keepdim (bool, 可选) - 是否在输出Tensor中保留减小的维度。如果 ``keepdim`` 为True,则输出Tensor和 ``x`` 具有相同的维度(减少的维度除外,减少的维度的大小为1)。否则,输出Tensor的形状会在 ``axis`` 上进行squeeze操作。默认值为False。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,沿着 ``axis`` 进行平均值计算的结果,数据类型和 ``x`` 相同。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = np.array([[[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]], + [[13, 14, 15, 16], + [17, 18, 19, 20], + [21, 22, 23, 24]]], 'float32') + x = paddle.to_tensor(x) + out1 = paddle.mean(x) + # [12.5] + out2 = paddle.mean(x, axis=-1) + # [[ 2.5 6.5 10.5] + # [14.5 18.5 22.5]] + out3 = paddle.mean(x, axis=-1, keepdim=True) + # [[[ 2.5] + # [ 6.5] + # [10.5]] + # [[14.5] + # [18.5] + # [22.5]]] + out4 = paddle.mean(x, axis=[0, 2]) + # [ 8.5 12.5 16.5] diff --git a/doc/paddle/api/paddle/tensor/stat/numel_cn.rst b/doc/paddle/api/paddle/tensor/stat/numel_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..360dad0e10f2359bcc971d9a6fbfb60bb2e0a1e9 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/stat/numel_cn.rst @@ -0,0 +1,26 @@ +.. _cn_api_tensor_numel: + +numel +------------------------------- + +.. py:function:: paddle.numel(x) + + +该OP返回一个长度为1并且元素值为输入 ``x`` 元素个数的Tensor。 + +参数: + - **x** (Tensor) - 输入Tensor,数据类型为int32,int64, float16, float32, float64, int32, int64 。 + +返回: 返回长度为1并且元素值为 ``x`` 元素个数的Tensor。 + + +**代码示例**: + +.. code-block:: python + + import paddle + + paddle.disable_static() + x = paddle.full(shape=[4, 5, 7], fill_value=0, dtype='int32') + numel = paddle.numel(x) # 140 + diff --git a/doc/paddle/api/paddle/tensor/stat/std_cn.rst b/doc/paddle/api/paddle/tensor/stat/std_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..eb30280cc8b88e4ced5e54e1ad62ecda4af5f5d1 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/stat/std_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_tensor_cn_std: + +std +------------------------------- + +.. py:function:: paddle.std(x, axis=None, unbiased=True, keepdim=False, name=None) + +沿给定的轴 ``axis`` 计算 ``x`` 中元素的标准差。 + +参数 +:::::::::: + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。 + - axis (int|list|tuple, 可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是int、list(int)、tuple(int)。如果 ``axis`` 包含多个维度,则沿着 ``axis`` 中的所有轴进行计算。``axis`` 或者其中的元素值应该在范围[-D, D)内,D是 ``x`` 的维度。如果 ``axis`` 或者其中的元素值小于0,则等价于 :math:`axis + D` 。如果 ``axis`` 是None,则对 ``x`` 的全部元素计算标准差。默认值为None。 + - unbiased (bool, 可选) - 是否使用无偏估计来计算标准差。使用 :math:`N` 来代表在 axis 上的维度,如果 ``unbiased`` 为True, 则在计算中使用 :math:`N - 1` 作为除数。为 False 时将使用 :math:`N` 作为除数。默认值为True。 + - keepdim (bool, 可选) - 是否在输出Tensor中保留减小的维度。如果 ``keepdim`` 为True,则输出Tensor和 ``x`` 具有相同的维度(减少的维度除外,减少的维度的大小为1)。否则,输出Tensor的形状会在 ``axis`` 上进行squeeze操作。默认值为False。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,沿着 ``axis`` 进行标准差计算的结果,数据类型和 ``x`` 相同。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = np.array([[1.0, 2.0, 3.0], [1.0, 4.0, 5.0]]) + x = paddle.to_tensor(x) + out1 = paddle.std(x) + # [1.63299316] + out2 = paddle.std(x, axis=1) + # [1. 2.081666] diff --git a/doc/paddle/api/paddle/tensor/stat/var_cn.rst b/doc/paddle/api/paddle/tensor/stat/var_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c44af2301a4ee0d192ec66738433014fced92eb2 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/stat/var_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_tensor_cn_var: + +var +------------------------------- + +.. py:function:: paddle.var(x, axis=None, unbiased=True, keepdim=False, name=None) + +沿给定的轴 ``axis`` 计算 ``x`` 中元素的方差。 + +参数 +:::::::::: + - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。 + - axis (int|list|tuple, 可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是int、list(int)、tuple(int)。如果 ``axis`` 包含多个维度,则沿着 ``axis`` 中的所有轴进行计算。``axis`` 或者其中的元素值应该在范围[-D, D)内,D是 ``x`` 的维度。如果 ``axis`` 或者其中的元素值小于0,则等价于 :math:`axis + D` 。如果 ``axis`` 是None,则对 ``x`` 的全部元素计算方差。默认值为None。 + - unbiased (bool, 可选) - 是否使用无偏估计来计算方差。使用 :math:`N` 来代表在 axis 上的维度,如果 ``unbiased`` 为True, 则在计算中使用 :math:`N - 1` 作为除数。为 False 时将使用 :math:`N` 作为除数。默认值为True。 + - keepdim (bool, 可选) - 是否在输出Tensor中保留减小的维度。如果 ``keepdim`` 为True,则输出Tensor和 ``x`` 具有相同的维度(减少的维度除外,减少的维度的大小为1)。否则,输出Tensor的形状会在 ``axis`` 上进行squeeze操作。默认值为False。 + - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + +返回 +:::::::::: + ``Tensor`` ,沿着 ``axis`` 进行方差计算的结果,数据类型和 ``x`` 相同。 + +代码示例 +:::::::::: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = np.array([[1.0, 2.0, 3.0], [1.0, 4.0, 5.0]]) + x = paddle.to_tensor(x) + out1 = paddle.var(x) + # [2.66666667] + out2 = paddle.var(x, axis=1) + # [1. 4.33333333] diff --git a/doc/paddle/api/paddle/text/BeamSearchDecoder_cn.rst b/doc/paddle/api/paddle/text/BeamSearchDecoder_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0aeb28a6ffe61eca841bd6b8ed6a13d6f2bba537 --- /dev/null +++ b/doc/paddle/api/paddle/text/BeamSearchDecoder_cn.rst @@ -0,0 +1,168 @@ +.. _cn_api_fluid_layers_BeamSearchDecoder: + +BeamSearchDecoder +------------------------------- + + + +.. py:class:: paddle.fluid.layers.BeamSearchDecoder(cell, start_token, end_token, beam_size, embedding_fn=None, output_fn=None) + + + + +带beam search解码策略的解码器。该接口包装一个cell来计算概率,然后执行一个beam search步骤计算得分,并为每个解码步骤选择候选输出。更多详细信息请参阅 `Beam search `_ + +**注意** 在使用beam search解码时,cell的输入和状态将被扩展到 :math:`beam\_size` ,得到 :math:`[batch\_size * beam\_size, ...]` 一样的形状,这个操作在BeamSearchDecoder中自动完成,因此,其他任何在 :code:`cell.call` 中使用的tensor,如果形状为 :math:`[batch\_size, ...]` ,都必须先手动使用 :code:`BeamSearchDecoder.tile_beam_merge_with_batch` 接口扩展。最常见的情况是带注意机制的编码器输出。 + +参数: + - **cell** (RNNCell) - RNNCell的实例或者具有相同接口定义的对象。 + - **start_token** (int) - 起始标记id。 + - **end_token** (int) - 结束标记id。 + - **beam_size** (int) - 在beam search中使用的beam宽度。 + - **embedding_fn** (可选) - 处理选中的候选id的接口。它通常是一个将词id转换为词嵌入的嵌入层,其返回值将作为 :code:`cell.call` 接口的 :code:`input` 参数。**注意** ,这里要使用 :ref:`cn_api_fluid_embedding` 而非 :ref:`cn_api_fluid_layers_embedding`,因为选中的id的形状是 :math:`[batch\_size, beam\_size]` ,如果使用后者则还需要在这里提供unsqueeze。如果 :code:`embedding_fn` 未提供,则必须在 :code:`cell.call` 中实现词嵌入转换。默认值None。 + - **output_fn** (可选) - 处理cell输出的接口,在计算得分和选择候选标记id之前使用。默认值None。 + +**示例代码** + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.layers import GRUCell, BeamSearchDecoder + trg_embeder = lambda x: fluid.embedding( + x, size=[10000, 128], param_attr=fluid.ParamAttr(name="trg_embedding")) + output_layer = lambda x: layers.fc(x, + size=10000, + num_flatten_dims=len(x.shape) - 1, + param_attr=fluid.ParamAttr(name= + "output_w"), + bias_attr=False) + decoder_cell = GRUCell(hidden_size=128) + decoder = BeamSearchDecoder(decoder_cell, + start_token=0, + end_token=1, + beam_size=4, + embedding_fn=trg_embeder, + output_fn=output_layer) + + +.. py:method:: tile_beam_merge_with_batch(x, beam_size) + +扩展tensor的batch维度。此函数的输入是形状为 :math:`[batch\_size, s_0, s_1, ...]` 的tensor t,由minibatch中的样本 :math:`t[0], ..., t[batch\_size - 1]` 组成。将其扩展为形状是 :math:`[batch\_size * beam\_size, s_0, s_1, ...]` 的tensor,由 :math:`t[0], t[0], ..., t[1], t[1], ...` 组成, 每个minibatch中的样本重复 :math:`beam\_size` 次。 + +参数: + - **x** (Variable) - 形状为 :math:`[batch\_size, ...]` 的tenosr。数据类型应为float32,float64,int32,int64或bool。 + - **beam_size** (int) - 在beam search中使用的beam宽度。 + +返回:形状为 :math:`[batch\_size * beam\_size, ...]` 的tensor,其数据类型与 :code:`x` 相同。 + +返回类型:Variable + +.. py:method:: _split_batch_beams(x) + +将形状为 :math:`[batch\_size * beam\_size, ...]` 的tensor变换为形状为 :math:`[batch\_size, beam\_size, ...]` 的新tensor。 + +参数: + - **x** (Variable) - 形状为 :math:`[batch\_size * beam\_size, ...]` 的tenosr。数据类型应为float32,float64,int32,int64或bool。 + +返回:形状为 :math:`[batch\_size, beam\_size, ...]` 的tensor,其数据类型与 :code:`x` 相同。 + +返回类型:Variable + +.. py:method:: _merge_batch_beams(x) + +将形状为 :math:`[batch\_size, beam\_size, ...]` 的tensor变换为形状为 :math:`[batch\_size * beam\_size,...]` 的新tensor。 + +参数: + - **x** (Variable) - 形状为 :math:`[batch\_size, beam_size,...]` 的tenosr。数据类型应为float32,float64,int32,int64或bool。 + +返回:形状为 :math:`[batch\_size * beam\_size, ...]` 的tensor,其数据类型与 :code:`x` 相同。 + +返回类型:Variable + +.. py:method:: _expand_to_beam_size(x) + +此函数输入形状为 :math:`[batch\_size,s_0,s_1,...]` 的tensor t,由minibatch中的样本 :math:`t[0],...,t[batch\_size-1]` 组成。将其扩展为形状 :math:`[ batch\_size,beam\_size,s_0,s_1,...]` 的tensor,由 :math:`t[0],t[0],...,t[1],t[1],...` 组成,其中每个minibatch中的样本重复 :math:`beam\_size` 次。 + +参数: + - **x** (Variable) - 形状为 :math:`[batch\_size, ...]` 的tenosr。数据类型应为float32,float64,int32,int64或bool。 + +返回:具有与 :code:`x` 相同的形状和数据类型的tensor,其中未完成的beam保持不变,而已完成的beam被替换成特殊的tensor(tensor中所有概率质量被分配给EOS标记)。 + +返回类型:Variable + +.. py:method:: _mask_probs(probs, finished) + +屏蔽对数概率。该函数使已完成的beam将所有概率质量分配给EOS标记,而未完成的beam保持不变。 + +参数: + - **probs** (Variable) - 形状为 :math:`[batch\_size,beam\_size,vocab\_size]` 的tensor,表示对数概率。其数据类型应为float32。 + - **finish** (Variable) - 形状为 :math:`[batch\_size,beam\_size]` 的tensor,表示所有beam的完成状态。其数据类型应为bool。 + +返回:具有与 :code:`x` 相同的形状和数据类型的tensor,其中未完成的beam保持不变,而已完成的beam被替换成特殊的tensor(tensor中所有概率质量被分配给EOS标记)。 + +返回类型:Variable + +.. py:method:: _gather(x, indices, batch_size) + +对tensor :code:`x` 根据索引 :code:`indices` 收集。 + +参数: + - **x** (Variable) - 形状为 :math:`[batch\_size, beam\_size,...]` 的tensor。 + - **index** (Variable) - 一个形状为 :math:`[batch\_size, beam\_size]` 的int64 tensor,表示我们用来收集的索引。 + - **batch_size** (Variable) - 形状为 :math:`[1]` 的tensor。其数据类型应为int32或int64。 + +返回:具有与 :code:``x` 相同的形状和数据类型的tensor,表示收集后的tensor。 + +返回类型:Variable + +.. py:method:: initialize(initial_cell_states) + +初始化BeamSearchDecoder。 + +参数: + - **initial_cell_states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。调用者提供的参数。 + +返回:一个元组 :code:`(initial_inputs, initial_states, finished)`。:code:`initial_inputs` 是一个tensor,当 :code:`embedding_fn` 为None时,该tensor t的形状为 :math:`[batch\_size,beam\_size]` ,值为 :code:`start_token` ;否则使用 :code:`embedding_fn(t)` 返回的值。:code:`initial_states` 是tensor变量的嵌套结构(命名元组,字段包括 :code:`cell_states,log_probs,finished,lengths`),其中 :code:`log_probs,finished,lengths` 都含有一个tensor,形状为 :math:`[batch\_size, beam\_size]`,数据类型为float32,bool,int64。:code:`cell_states` 具有与输入参数 :code:`initial_cell_states` 相同结构的值,但形状扩展为 :math:`[batch\_size,beam\_size,...]`。 :code:`finished` 是一个布尔型tensor,由False填充,形状为 :math:`[batch\_size,beam\_size]`。 + +返回类型:tuple + +.. py:method:: _beam_search_step(time, logits, next_cell_states, beam_state) + +计算得分并选择候选id。 + +参数: + - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。 + - **logits** (Variable) - 形状为 :math:`[batch\_size,beam\_size,vocab\_size]` 的tensor,表示当前时间步的logits。其数据类型为float32。 + - **next_cell_states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。它的结构,形状和数据类型与 :code:`initialize()` 的返回值 :code:`initial_states` 中的 :code:`cell_states` 相同。它代表该cell的下一个状态。 + - **beam_state** (Variable) - tensor变量的结构。在第一个解码步骤与 :code:`initialize()` 返回的 :code:`initial_states` 同,其他步骤与 :code:`step()` 返回的 :code:`beam_search_state` 相同。 + +返回:一个元组 :code:`(beam_search_output, beam_search_state)`。:code:`beam_search_output` 是tensor变量的命名元组,字段为 :code:`scores,predicted_ids parent_ids`。其中 :code:`scores,predicted_ids,parent_ids` 都含有一个tensor,形状为 :math:`[batch\_size,beam\_size]`,数据类型为float32 ,int64,int64。:code:`beam_search_state` 具有与输入参数 :code:`beam_state` 相同的结构,形状和数据类型。 + +返回类型:tuple + +.. py:method:: step(time, inputs, states, **kwargs) + +执行beam search解码步骤,该步骤使用 :code:`cell` 来计算概率,然后执行beam search步骤以计算得分并选择候选标记ID。 + +参数: + - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。。 + - **inputs** (Variable) - tensor变量。在第一个解码时间步时与由 :code:`initialize()` 返回的 :code:`initial_inputs` 相同,其他时间步与由 :code:`step()` 返回的 :code:`next_inputs` 相同。 + - **states** (Variable) - tensor变量的结构。在第一个解码时间步时与 :code:`initialize()` 返回的 :code:`initial_states` 相同,其他时间步与由 :code:`step()` 返回的 :code:`beam_search_state` 相同。 + - **kwargs** - 附加的关键字参数,由调用者提供。 + +返回:一个元组 :code:`(beam_search_output,beam_search_state,next_inputs,finish)` 。:code:`beam_search_state` 和参数 :code:`states` 具有相同的结构,形状和数据类型。 :code:`next_inputs` 与输入参数 :code:`inputs` 具有相同的结构,形状和数据类型。 :code:`beam_search_output` 是tensor变量的命名元组(字段包括 :code:`scores,predicted_ids,parent_ids` ),其中 :code:`scores,predicted_ids,parent_ids` 都含有一个tensor,形状为 :math:`[batch\_size,beam\_size]`,数据类型为float32 ,int64,int64。:code:`finished` 是一个bool类型的tensor,形状为 :math:`[batch\_size,beam\_size]`。 + +返回类型:tuple + +.. py:method:: finalize(outputs, final_states, sequence_lengths) + +使用 :code:`gather_tree` 沿beam search树回溯并构建完整的预测序列。 + +参数: + - **outputs** (Variable) - tensor变量组成的结构(命名元组),该结构和数据类型与 :code:`output_dtype` 相同。tensor将所有时间步的输出堆叠,因此具有形状 :math:`[time\_step,batch\_size,...]`。 + - **final_states** (Variable) - tensor变量组成的结构(命名元组)。它是 :code:`decoder.step` 在最后一个解码步骤返回的 :code:`next_states`,因此具有与任何时间步的 :code:`state` 相同的结构、形状和数据类型。 + - **sequence_lengths** (Variable) - tensor,形状为 :math:`[batch\_size,beam\_size]`,数据类型为int64。它包含解码期间确定的每个beam的序列长度。 + +返回:一个元组 :code:`(predicted_ids, final_states)`。:code:`predicted_ids` 是一个tensor,形状为 :math:`[time\_step,batch\_size,beam\_size]`,数据类型为int64。:code:`final_states` 与输入参数 :code:`final_states` 相同。 + +返回类型:tuple diff --git a/doc/paddle/api/paddle/text/RNNCell_cn.rst b/doc/paddle/api/paddle/text/RNNCell_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..78fad49a5a7eaf318f8bc9c23423a55fe1b3e0b8 --- /dev/null +++ b/doc/paddle/api/paddle/text/RNNCell_cn.rst @@ -0,0 +1,49 @@ +.. _cn_api_fluid_layers_RNNCell: + +RNNCell +------------------------------- + + + +.. py:class:: paddle.fluid.layers.RNNCell(name=None) + + + +RNNCell是抽象的基类,代表将输入和状态映射到输出和新状态的计算,主要用于RNN。 + +.. py:method:: call(inputs, states, **kwargs) + +每个cell都必须实现此接口,将(输入和状态)映射到(输出和新状态)。为了更灵活,输入和状态都可以是单个tensor变量或嵌套结构的tensor变量(列表 | 元组 | 命名元组 | 字典)。 + +参数: + - **inputs** - 输入,为单个tensor变量或tensor变量组成的嵌套结构。 + - **states** - 状态,单个tensor变量或tensor变量组成的嵌套结构。 + - **kwargs** - 附加的关键字参数,由调用者提供。 +         +返回:包含输出和新状态的二元组 :code:`(outputs,new_states)` 。输出和新状态都可以是嵌套的tensor变量。新状态必须具有与状态相同的结构。 + +返回类型:tuple + +.. py:method:: get_initial_states(batch_ref, shape=None, dtype=None, init_value=0, batch_dim_idx=0) + +该接口根据提供的形状,数据类型和初始值来初始化状态。 + +参数: + - **batch_ref** - 单个tensor变量或tensor组成的嵌套结构。 tensor的第一维将用作初始化状态的batch大小。 + - **shape** - 单个形状或形状组成的嵌套结构,单个形状是整数的列表或元组。 如果形状的第一维不是batch大小,则自动插入-1作为batch大小。 如果该项为None,将使用属性 :code:`state_shape`。默认值为None。 + - **dtype** - 单个数据类型或由数据类型组成的嵌套结构。该结构必须与shape的结构相同,例外是当状态中的所有tensor都具有相同的数据类型,这时可以使用单个数据类型。 如果是None并且属性 :code:`cell.state_shape` 不可用,则float32将用作数据类型。 默认值为None。 + - **init_value** - 用于初始化状态的浮点值。 + - **batch_dim_idx** - 用于指示 :code:`batch_ref` 中batch所在维度的int值,默认值为0。 + +返回:和shape具有相同结构的tensor变量,代表初始状态。 + +返回类型:Variable + +.. py:method:: state_shape() + +抽象方法(属性),该接口用于初始化cell的状态。 单个形状或由形状组成的嵌套结构,单个形状可以是整数的列表或元组(如果形状的第一维不是batch大小,则自动插入-1作为batch大小)。 当没有使用 :code:`get_initial_states` 初始化状态或 :code:`get_initial_states` 没有提供 :code:`shape` 参数的时候,不用实现该方法。 + + +.. py:method:: state_dtype() + +抽象方法(属性),该接口用于初始化cell的状态。 单个数据类型或由数据类型组成的嵌套结构,该结构必须与 :code:`shape` 的结构相同,例外是当状态中的所有tensor都具有相同的数据类型,这时可以使用单个数据类型。 当没有使用 :code:`get_initial_states` 初始化状态或 :code:`get_initial_states` 没有提供 :code:`dtype` 参数的时候,不用实现该方法。 diff --git a/doc/paddle/api/paddle/text/datasets/conll05/Conll05st_cn.rst b/doc/paddle/api/paddle/text/datasets/conll05/Conll05st_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a459259d2147756bd9834738dde21c246df030b5 --- /dev/null +++ b/doc/paddle/api/paddle/text/datasets/conll05/Conll05st_cn.rst @@ -0,0 +1,62 @@ +.. _cn_api_text_datasets_Conll05st: + +Conll05st +------------------------------- + +.. py:class:: paddle.text.datasets.Conll05st() + + +该类是对`Conll05st `_ +测试数据集的实现. + +.. note:: + 只支持自动下载公共的 Conll05st测试数据集。 + +参数 +::::::::: + - data_file(str)- 保存数据的路径,如果参数:attr:`download`设置为True, + 可设置为None。默认为None。 + - word_dict_file(str)- 保存词典的路径。如果参数:attr:`download`设置为True, + 可设置为None。默认为None。 + - verb_dict_file(str)- 保存动词词典的路径。如果参数:attr:`download`设置为True, + 可设置为None。默认为None。 + - target_dict_file(str)- 保存目标词典的路径如果参数:attr:`download`设置为True, + 可设置为None。默认为None。 + - emb_file(str)- 保存词嵌入词典的文件。只有在:code:`get_embedding`能被设置为None + 且:attr:`download` 为True时使用。 + - download(bool)- 如果:attr:`data_file` :attr:`word_dict_file` + :attr:`verb_dict_file` 和:attr:`target_dict_file` 未设置,是否下载数据集。默认为True。 + +返回值 +::::::::: +``Dataset``,conll05st数据集实例。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + from paddle.text.datasets import Conll05st + + class SimpleNet(paddle.nn.Layer): + def __init__(self): + super(SimpleNet, self).__init__() + + def forward(self, pred_idx, mark, label): + return paddle.sum(pred_idx), paddle.sum(mark), paddle.sum(label) + + paddle.disable_static() + + conll05st = Conll05st() + + for i in range(10): + pred_idx, mark, label= conll05st[i][-3:] + pred_idx = paddle.to_tensor(pred_idx) + mark = paddle.to_tensor(mark) + label = paddle.to_tensor(label) + + model = SimpleNet() + pred_idx, mark, label= model(pred_idx, mark, label) + print(pred_idx.numpy(), mark.numpy(), label.numpy()) + diff --git a/doc/paddle/api/paddle/text/datasets/imdb/Imdb_cn.rst b/doc/paddle/api/paddle/text/datasets/imdb/Imdb_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..455393089c2484d3ff168a41384e5b7cb5be6bc9 --- /dev/null +++ b/doc/paddle/api/paddle/text/datasets/imdb/Imdb_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_text_datasets_Imdb: + +Imdb +------------------------------- + +.. py:class:: paddle.text.datasets.Imdb() + + +该类是对`IMDB `_ 测试数据集的实现。 + +参数 +::::::::: + - data_file(str) - 保存压缩数据的路径,如果参数:attr:`download`设置为True, + 可设置为None。默认为None。 + - mode(str) - 'train' 或'test' 模式。默认为'train'。 + - cutoff(int) - 构建词典的截止大小。默认为Default 150。 + - download(bool) - 如果:attr:`data_file`未设置,是否自动下载数据集。默认为True。 + +返回值 +::::::::: +``Dataset``, IMDB数据集实例。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + from paddle.text.datasets import Imdb + + class SimpleNet(paddle.nn.Layer): + def __init__(self): + super(SimpleNet, self).__init__() + + def forward(self, doc, label): + return paddle.sum(doc), label + + paddle.disable_static() + + imdb = Imdb(mode='train') + + for i in range(10): + doc, label = imdb[i] + doc = paddle.to_tensor(doc) + label = paddle.to_tensor(label) + + model = SimpleNet() + image, label = model(doc, label) + print(doc.numpy().shape, label.numpy().shape) + diff --git a/doc/paddle/api/paddle/text/datasets/imikolov/Imikolov_cn.rst b/doc/paddle/api/paddle/text/datasets/imikolov/Imikolov_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..714ca66688993d14cb17c788e62d16d6b0f475ed --- /dev/null +++ b/doc/paddle/api/paddle/text/datasets/imikolov/Imikolov_cn.rst @@ -0,0 +1,52 @@ +.. _cn_api_text_datasets_Imikolov: + +Imikolov +------------------------------- + +.. py:class:: paddle.text.datasets.Imikolov() + + +该类是对imikolov测试数据集的实现。 + +参数 +::::::::: + - data_file(str)- 保存数据的路径,如果参数:attr:`download`设置为True, + 可设置为None。默认为None。 + - data_type(str)- 'NGRAM'或'SEQ'。默认为'NGRAM'。 + - window_size(int) - 'NGRAM'数据滑动窗口的大小。默认为-1。 + - mode(str)- 'train' 'test' mode. Default 'train'. + - min_word_freq(int)- 构建词典的最小词频。默认为50。 + - download(bool)- 如果:attr:`data_file`未设置,是否自动下载数据集。默认为True。 + +返回值 +::::::::: +``Dataset``,imikolov数据集实例。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + from paddle.text.datasets import Imikolov + + class SimpleNet(paddle.nn.Layer): + def __init__(self): + super(SimpleNet, self).__init__() + + def forward(self, src, trg): + return paddle.sum(src), paddle.sum(trg) + + paddle.disable_static() + + imikolov = Imikolov(mode='train', data_type='SEQ', window_size=2) + + for i in range(10): + src, trg = imikolov[i] + src = paddle.to_tensor(src) + trg = paddle.to_tensor(trg) + + model = SimpleNet() + src, trg = model(src, trg) + print(src.numpy().shape, trg.numpy().shape) + diff --git a/doc/paddle/api/paddle/text/datasets/movie_reviews/MovieReviews_cn.rst b/doc/paddle/api/paddle/text/datasets/movie_reviews/MovieReviews_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b4c99bccf3c72776f9f805445791918d08282eda --- /dev/null +++ b/doc/paddle/api/paddle/text/datasets/movie_reviews/MovieReviews_cn.rst @@ -0,0 +1,49 @@ +.. _cn_api_text_datasets_MovieReviews: + +MovieReviews +------------------------------- + +.. py:class:: paddle.text.datasets.MovieReviews() + + +该类是对`NLTK movie reviews `_ 测试数据集的实现。 + +参数 +::::::::: + - data_file(str)- 保存压缩数据的路径,如果参数:attr:`download`设置为True, + 可设置为None。默认为None。 + - mode(str)- 'train'或 'test' 模式。默认为'train'。 + - download(bool)- 如果:attr:`data_file`未设置,是否自动下载数据集。默认为True。 + +返回值 +::::::::: +``Dataset``,NLTK movie reviews数据集实例。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + from paddle.text.datasets import MovieReviews + + class SimpleNet(paddle.nn.Layer): + def __init__(self): + super(SimpleNet, self).__init__() + + def forward(self, word, category): + return paddle.sum(word), category + + paddle.disable_static() + + movie_reviews = MovieReviews(mode='train') + + for i in range(10): + word_list, category = movie_reviews[i] + word_list = paddle.to_tensor(word_list) + category = paddle.to_tensor(category) + + model = SimpleNet() + word_list, category = model(word_list, category) + print(word_list.numpy().shape, category.numpy()) + diff --git a/doc/paddle/api/paddle/text/datasets/movielens/Movielens_cn.rst b/doc/paddle/api/paddle/text/datasets/movielens/Movielens_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7921ec9e0132f1cfc1e6ff1da93ed78a40e47b12 --- /dev/null +++ b/doc/paddle/api/paddle/text/datasets/movielens/Movielens_cn.rst @@ -0,0 +1,52 @@ +.. _cn_api_text_datasets_Movielens: + +Movielens +------------------------------- + +.. py:class:: paddle.text.datasets.Movielens() + + +该类是对`Movielens 1-M `_ +测试数据集的实现。 + +参数 +::::::::: + - data_file(str)- 保存压缩数据的路径,如果参数:attr:`download`设置为True, + 可设置为None。默认为None。 + - mode(str)- 'train' 或 'test' 模式。默认为'train'。 + - test_ratio(float) - 为测试集划分的比例。默认为0.1。 + - rand_seed(int)- 随机数种子。默认为0。 + - download(bool)- 如果:attr:`data_file`未设置,是否自动下载数据集。默认为True。 + +返回值 +::::::::: + ``Dataset``,Movielens 1-M数据集实例。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + from paddle.text.datasets import Movielens + + class SimpleNet(paddle.nn.Layer): + def __init__(self): + super(SimpleNet, self).__init__() + + def forward(self, category, title, rating): + return paddle.sum(category), paddle.sum(title), paddle.sum(rating) + + paddle.disable_static() + + movielens = Movielens(mode='train') + + for i in range(10): + category, title, rating = movielens[i][-3:] + category = paddle.to_tensor(category) + title = paddle.to_tensor(title) + rating = paddle.to_tensor(rating) + + model = SimpleNet() + category, title, rating = model(category, title, rating) + print(category.numpy().shape, title.numpy().shape, rating.numpy().shape) diff --git a/doc/paddle/api/paddle/text/datasets/uci_housing/UCIHousing_cn.rst b/doc/paddle/api/paddle/text/datasets/uci_housing/UCIHousing_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8f9ea0974c05fe2373fdafac8a3265632a787a34 --- /dev/null +++ b/doc/paddle/api/paddle/text/datasets/uci_housing/UCIHousing_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_text_datasets_UCIHousing: + +UCIHousing +------------------------------- + +.. py:class:: paddle.text.datasets.UCIHousing() + + +该类是对`UCI housing `_ +测试数据集的实现。 + +参数 +::::::::: + - data_file(str)- 保存数据的路径,如果参数:attr:`download`设置为True, + 可设置为None。默认为None。 + - mode(str)- 'train'或'test'模式。默认为'train'。 + - download(bool)- 如果:attr:`data_file`未设置,是否自动下载数据集。默认为True。 + +返回值 +::::::::: +``Dataset``,UCI housing数据集实例。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + from paddle.text.datasets import UCIHousing + + class SimpleNet(paddle.nn.Layer): + def __init__(self): + super(SimpleNet, self).__init__() + + def forward(self, feature, target): + return paddle.sum(feature), target + + paddle.disable_static() + + uci_housing = UCIHousing(mode='train') + + for i in range(10): + feature, target = uci_housing[i] + feature = paddle.to_tensor(feature) + target = paddle.to_tensor(target) + + model = SimpleNet() + feature, target = model(feature, target) + print(feature.numpy().shape, target.numpy()) + diff --git a/doc/paddle/api/paddle/text/datasets/wmt14/WMT14_cn.rst b/doc/paddle/api/paddle/text/datasets/wmt14/WMT14_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..eb25b94c607380eb0fa7e8bf14f755a97499e48d --- /dev/null +++ b/doc/paddle/api/paddle/text/datasets/wmt14/WMT14_cn.rst @@ -0,0 +1,53 @@ +.. _cn_api_text_datasets_WMT14: + +WMT14 +------------------------------- + +.. py:class:: paddle.text.datasets.WMT14() + + +该类是对`WMT14 `_ 测试数据集实现。 +由于原始WMT14数据集太大,我们在这里提供了一组小数据集。该类将从 +http://paddlepaddle.bj.bcebos.com/demo/wmt_shrinked_data/wmt14.tgz +下载数据集。 + +参数 +::::::::: + - data_file(str)- 保存数据集压缩文件的路径, 如果参数:attr:`download`设置为True,可设置为None。 + 默认为None。 + - mode(str)- 'train', 'test' 或'gen'。默认为'train'。 + - dict_size(int)- 词典大小。默认为-1。 + - download(bool)- 如果:attr:`data_file`未设置,是否自动下载数据集。默认为True。 + +返回值 +::::::::: +``Dataset``,WMT14数据集实例。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + from paddle.text.datasets import WMT14 + + class SimpleNet(paddle.nn.Layer): + def __init__(self): + super(SimpleNet, self).__init__() + + def forward(self, src_ids, trg_ids, trg_ids_next): + return paddle.sum(src_ids), paddle.sum(trg_ids), paddle.sum(trg_ids_next) + + paddle.disable_static() + + wmt14 = WMT14(mode='train', dict_size=50) + + for i in range(10): + src_ids, trg_ids, trg_ids_next = wmt14[i] + src_ids = paddle.to_tensor(src_ids) + trg_ids = paddle.to_tensor(trg_ids) + trg_ids_next = paddle.to_tensor(trg_ids_next) + + model = SimpleNet() + src_ids, trg_ids, trg_ids_next = model(src_ids, trg_ids, trg_ids_next) + print(src_ids.numpy(), trg_ids.numpy(), trg_ids_next.numpy()) diff --git a/doc/paddle/api/paddle/text/datasets/wmt16/WMT16_cn.rst b/doc/paddle/api/paddle/text/datasets/wmt16/WMT16_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8166a1d902303c4f40215cf17ce3f5c762962583 --- /dev/null +++ b/doc/paddle/api/paddle/text/datasets/wmt16/WMT16_cn.rst @@ -0,0 +1,69 @@ +.. _cn_api_text_datasets_WMT16: + +WMT16 +------------------------------- + +.. py:class:: paddle.text.datasets.WMT16() + + +该类是对`WMT16 `_ 测试数据集实现。 +ACL2016多模态机器翻译。有关更多详细信息,请访问此网站: +http://www.statmt.org/wmt16/multimodal-task.html#task1 + +如果您任务中使用了该数据集,请引用如下论文: +Multi30K: Multilingual English-German Image Descriptions. + +.. code-block:: text + + @article{elliott-EtAl:2016:VL16, + author = {{Elliott}, D. and {Frank}, S. and {Sima"an}, K. and {Specia}, L.}, + title = {Multi30K: Multilingual English-German Image Descriptions}, + booktitle = {Proceedings of the 6th Workshop on Vision and Language}, + year = {2016}, + pages = {70--74}, + year = 2016 + } + +参数 +::::::::: + - data_file(str)- 保存数据集压缩文件的路径,如果参数:attr:`download`设置为True,可设置为None。 + 默认值为None。 + - mode(str)- 'train', 'test' 或 'val'。默认为'train'。 + - src_dict_size(int)- 源语言词典大小。默认为-1。 + - trg_dict_size(int) - 目标语言测点大小。默认为-1。 + - lang(str)- 源语言,'en' 或 'de'。默认为 'en'。 + - download(bool)- 如果:attr:`data_file`未设置,是否自动下载数据集。默认为True。 + +返回值 +::::::::: +``Dataset``,WMT16数据集实例。 + +代码示例 +::::::::: + +.. code-block:: python + + import paddle + from paddle.text.datasets import WMT16 + + class SimpleNet(paddle.nn.Layer): + def __init__(self): + super(SimpleNet, self).__init__() + + def forward(self, src_ids, trg_ids, trg_ids_next): + return paddle.sum(src_ids), paddle.sum(trg_ids), paddle.sum(trg_ids_next) + + paddle.disable_static() + + wmt16 = WMT16(mode='train', src_dict_size=50, trg_dict_size=50) + + for i in range(10): + src_ids, trg_ids, trg_ids_next = wmt16[i] + src_ids = paddle.to_tensor(src_ids) + trg_ids = paddle.to_tensor(trg_ids) + trg_ids_next = paddle.to_tensor(trg_ids_next) + + model = SimpleNet() + src_ids, trg_ids, trg_ids_next = model(src_ids, trg_ids, trg_ids_next) + print(src_ids.numpy(), trg_ids.numpy(), trg_ids_next.numpy()) + diff --git a/doc/paddle/api/paddle/utils/download/get_weights_path_from_url_cn.rst b/doc/paddle/api/paddle/utils/download/get_weights_path_from_url_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ce9fd2e0113f82208b7c192c68e96a1518e9d690 --- /dev/null +++ b/doc/paddle/api/paddle/utils/download/get_weights_path_from_url_cn.rst @@ -0,0 +1,24 @@ +.. _cn_api_paddle_utils_download_get_weights_path_from_url: + +get_weights_path_from_url +------------------------------- + +.. py:function:: paddle.utils.download.get_weights_path_from_url(url, md5sum=None) + + 从 ``WEIGHT_HOME`` 文件夹获取权重,如果不存在,就从url下载 + +参数: + - **url** (str) - 下载的链接。 + - **md5sum** (str,可选) - 下载文件的md5值。默认值:None。 + +返回:权重的本地路径。 + + +**代码示例**: + +.. code-block:: python + + from paddle.utils.download import get_weights_path_from_url + + resnet18_pretrained_weight_url = 'https://paddle-hapi.bj.bcebos.com/models/resnet18.pdparams' + local_weight_path = get_weights_path_from_url(resnet18_pretrained_weight_url) diff --git a/doc/paddle/api/paddle/vision/datasets/Cifar100_cn.rst b/doc/paddle/api/paddle/vision/datasets/Cifar100_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..eb893227fc0aa1ba2140279fb9b0f229f30879c7 --- /dev/null +++ b/doc/paddle/api/paddle/vision/datasets/Cifar100_cn.rst @@ -0,0 +1,59 @@ +.. _cn_api_vision_datasets_Cifar100: + +Cifar100 +------------------------------- + +.. py:class:: paddle.vision.datasets.Cifar100() + + + `Cifar-100 `_ 数据集的实现,数据集包含100中类别. + + 参数 +::::::::: + - data_file (str) - 数据集文件路径,如果 ``download`` 设置为 ``True`` ,此参数可以设置为None。默认值为None。 + - mode (str) - ``'train'`` 或 ``'test'`` 模式,默认为 ``'train'`` 。 + - transform (callable) - 图片数据的预处理,若未 ``None`` 即为不做预处理。默认值为None。 + - download (bool) - 是否自定下载数据集文件。默认为 ``True`` 。 + +返回 +::::::::: + + Cifar100数据集实例 + + 代码示例 +::::::::: + + .. code-block:: python + + import paddle + import paddle.nn as nn + from paddle.vision.datasets import Cifar100 + from paddle.vision.transforms import Normalize + + class SimpleNet(paddle.nn.Layer): + def __init__(self): + super(SimpleNet, self).__init__() + self.fc = nn.Sequential( + nn.Linear(3072, 10), + nn.Softmax()) + + def forward(self, image, label): + image = paddle.reshape(image, (3, -1)) + return self.fc(image), label + + paddle.disable_static() + + normalize = Normalize(mean=[0.5, 0.5, 0.5], + std=[0.5, 0.5, 0.5]) + cifar100 = Cifar100(mode='train', transform=normalize) + + for i in range(10): + image, label = cifar100[i] + image = paddle.to_tensor(image) + label = paddle.to_tensor(label) + + model = SimpleNet() + image, label = model(image, label) + print(image.numpy().shape, label.numpy().shape) + + diff --git a/doc/paddle/api/paddle/vision/datasets/Cifar10_cn.rst b/doc/paddle/api/paddle/vision/datasets/Cifar10_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2ee4e8407c22e36d1e733ee3ee4b3b7a76b62c60 --- /dev/null +++ b/doc/paddle/api/paddle/vision/datasets/Cifar10_cn.rst @@ -0,0 +1,59 @@ +.. _cn_api_vision_datasets_Cifar10: + +Cifar10 +------------------------------- + +.. py:class:: paddle.vision.datasets.Cifar10() + + + `Cifar-10 `_ 数据集的实现,数据集包含10中类别. + +参数 +::::::::: + - data_file (str) - 数据集文件路径,如果 ``download`` 设置为 ``True`` ,此参数可以设置为None。默认值为None。 + - mode (str) - ``'train'`` 或 ``'test'`` 模式,默认为 ``'train'`` 。 + - transform (callable) - 图片数据的预处理,若未 ``None`` 即为不做预处理。默认值为None。 + - download (bool) - 是否自定下载数据集文件。默认为 ``True`` 。 + +返回 +::::::::: + + Cifar10数据集实例 + +代码示例 +::::::::: + + .. code-block:: python + + import paddle + import paddle.nn as nn + from paddle.vision.datasets import Cifar10 + from paddle.vision.transforms import Normalize + + class SimpleNet(paddle.nn.Layer): + def __init__(self): + super(SimpleNet, self).__init__() + self.fc = nn.Sequential( + nn.Linear(3072, 10), + nn.Softmax()) + + def forward(self, image, label): + image = paddle.reshape(image, (3, -1)) + return self.fc(image), label + + paddle.disable_static() + + normalize = Normalize(mean=[0.5, 0.5, 0.5], + std=[0.5, 0.5, 0.5]) + cifar100 = Cifar10(mode='train', transform=normalize) + + for i in range(10): + image, label = cifar100[i] + image = paddle.to_tensor(image) + label = paddle.to_tensor(label) + + model = SimpleNet() + image, label = model(image, label) + print(image.numpy().shape, label.numpy().shape) + + diff --git a/doc/paddle/api/paddle/vision/datasets/Flowers_cn.rst b/doc/paddle/api/paddle/vision/datasets/Flowers_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f0a03968f79f96847e4bc3045499fe10b82bc5c5 --- /dev/null +++ b/doc/paddle/api/paddle/vision/datasets/Flowers_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_vision_datasets_Flowers: + +Flowers +------------------------------- + +.. py:class:: paddle.vision.datasets.Flowers() + + + `Flowers `_ 数据集 + +参数 +::::::::: + - data_file (str) - 数据文件路径,如果 ``download`` 设置为 ``True`` ,此参数可以设置为None。默认值为None。 + - label_file (str) - 标签文件路径,如果 ``download`` 设置为 ``True`` ,此参数可以设置为None。默认值为None。 + - setid_file (str) - 子数据集下标划分文件路径,如果 ``download`` 设置为 ``True`` ,此参数可以设置为None。默认值为None。 + - mode (str) - ``'train'`` 或 ``'test'`` 模式,默认为 ``'train'`` 。 + - transform (callable) - 作用于图片数据的transform,若未 ``None`` 即为无transform。 + - download (bool) - 是否自定下载数据集文件。默认为 ``True`` 。 + +返回 +::::::::: + + Flowers数据集实例 + +代码示例 +::::::::: + + .. code-block:: python + + from paddle.vision.datasets import Flowers + + flowers = Flowers(mode='test') + + for i in range(len(flowers)): + sample = flowers[i] + print(sample[0].shape, sample[1]) + + diff --git a/doc/paddle/api/paddle/vision/datasets/MNIST_cn.rst b/doc/paddle/api/paddle/vision/datasets/MNIST_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..be21bf45fa0981326294d4589acb51765dd4e719 --- /dev/null +++ b/doc/paddle/api/paddle/vision/datasets/MNIST_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_vision_datasets_MNIST: + +MNIST +------------------------------- + +.. py:class:: paddle.vision.datasets.MNIST() + + + `MNIST `_ 数据集 + +参数 +::::::::: + - image_path (str) - 图像文件路径,如果 ``download`` 设置为 ``True`` ,此参数可以设置为None。默认值为None。 + - label_path (str) - 标签文件路径,如果 ``download`` 设置为 ``True`` ,此参数可以设置为None。默认值为None。 + - chw_format (bool) - 若为 ``True`` 输出形状为[1, 28, 28], 否则为 [1, 784]。默认值为 ``True`` 。 + - mode (str) - ``'train'`` 或 ``'test'`` 模式,默认为 ``'train'`` 。 + - download (bool) - 是否自定下载数据集文件。默认为 ``True`` 。 + +返回 +::::::::: + + MNIST数据集实例 + +代码示例 +::::::::: + + .. code-block:: python + + from paddle.vision.datasets import MNIST + + mnist = MNIST(mode='test') + + for i in range(len(mnist)): + sample = mnist[i] + print(sample[0].shape, sample[1]) + + diff --git a/doc/paddle/api/paddle/vision/datasets/VOC2012_cn.rst b/doc/paddle/api/paddle/vision/datasets/VOC2012_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2ab458a02c16ffb404d60cfd9e6feddef8870aed --- /dev/null +++ b/doc/paddle/api/paddle/vision/datasets/VOC2012_cn.rst @@ -0,0 +1,53 @@ +.. _cn_api_vision_datasets_VOC2012: + +VOC2012 +------------------------------- + +.. py:class:: paddle.vision.datasets.VOC2012() + + + `VOC2012 `_ 数据集 + +参数 +::::::::: + - data_file (str) - 数据集文件路径,如果 ``download`` 设置为 ``True`` ,此参数可以设置为None。默认值为None。 + - label_file (str) - 标签文件路径,如果 ``download`` 设置为 ``True`` ,此参数可以设置为None。默认值为None。 + - setid_file (str) - 子数据集下标划分文件路径,如果 ``download`` 设置为 ``True`` ,此参数可以设置为None。默认值为None。 + - mode (str) - ``'train'`` 或 ``'test'`` 模式,默认为 ``'train'`` 。 + - transform (callable) - 作用于图片数据的transform,若未 ``None`` 即为无transform。 + - download (bool) - 是否自定下载数据集文件。默认为 ``True`` 。 + +返回 +::::::::: + + VOC2012数据集实例 + +代码示例 +::::::::: + + .. code-block:: python + + import paddle + from paddle.vision.datasets import VOC2012 + + class SimpleNet(paddle.nn.Layer): + def __init__(self): + super(SimpleNet, self).__init__() + + def forward(self, image, label): + return paddle.sum(image), label + + paddle.disable_static() + + voc2012 = VOC2012(mode='train') + + for i in range(10): + image, label= voc2012[i] + image = paddle.cast(paddle.to_tensor(image), 'float32') + label = paddle.to_tensor(label) + + model = SimpleNet() + image, label= model(image, label) + print(image.numpy().shape, label.numpy().shape) + + diff --git a/doc/paddle/api/paddle/vision/datasets/folder/DatasetFolder_cn.rst b/doc/paddle/api/paddle/vision/datasets/folder/DatasetFolder_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..89259a14dc7df9570ca2cc45456d0e6b30a3a91b --- /dev/null +++ b/doc/paddle/api/paddle/vision/datasets/folder/DatasetFolder_cn.rst @@ -0,0 +1,54 @@ +.. _cn_api_paddle_vision_datasets_DatasetFolder: + +DatasetFolder +------------------------------- + +.. py:class:: paddle.vision.datasets.DatasetFolder(root, loader=None, extensions=None, transform=None, is_valid_file=None) + + 一种通用的数据加载方式,当输入以如下的格式存放时: + root/class_a/1.ext + root/class_a/2.ext + root/class_a/3.ext + + root/class_b/123.ext + root/class_b/456.ext + root/class_b/789.ext + +参数: + - **root** (str) - 根目录路径。 + - **loader** (callable,可选) - 可以加载数据路径的一个函数,如果该值没有设定,默认使用 ``cv2.imread`` 。默认值:None。 + - **extensions** (tuple[str],可选) - 允许的数据后缀列表,如果该值没有设定,默认使用 ``('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')`` 。默认值:None。 + - **transform** (callable,可选) - 数据增强函数。默认值:None。 + - **is_valid_file** (callable,可选) - 根据每条数据的路径来判断是否合法的一个函数。默认值:None。 + + +**代码示例**: + +.. code-block:: python + + import os + import cv2 + import tempfile + import shutil + import numpy as np + from paddle.vision.datasets import DatasetFolder + + def make_fake_dir(): + data_dir = tempfile.mkdtemp() + + for i in range(2): + sub_dir = os.path.join(data_dir, 'class_' + str(i)) + if not os.path.exists(sub_dir): + os.makedirs(sub_dir) + for j in range(2): + fake_img = (np.random.random((32, 32, 3)) * 255).astype('uint8') + cv2.imwrite(os.path.join(sub_dir, str(j) + '.jpg'), fake_img) + return data_dir + + temp_dir = make_fake_dir() + data_folder = DatasetFolder(temp_dir) + + for items in data_folder: + break + + shutil.rmtree(temp_dir) diff --git a/doc/paddle/api/paddle/vision/datasets/folder/ImageFolder_cn.rst b/doc/paddle/api/paddle/vision/datasets/folder/ImageFolder_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..37571e1247108a26aa2cce8c21a3da52a11eec07 --- /dev/null +++ b/doc/paddle/api/paddle/vision/datasets/folder/ImageFolder_cn.rst @@ -0,0 +1,50 @@ +.. _cn_api_paddle_vision_datasets_ImageFolder: + +ImageFolder +------------------------------- + +.. py:class:: paddle.vision.datasets.ImageFolder(root, loader=None, extensions=None, transform=None, is_valid_file=None) + + 一种通用的数据加载方式,当输入以如下的格式存放时: + root/1.ext + root/2.ext + root/sub_dir/3.ext + +参数: + - **root** (str) - 根目录路径。 + - **loader** (callable,可选) - 可以加载数据路径的一个函数,如果该值没有设定,默认使用 ``cv2.imread`` 。默认值:None。 + - **extensions** (tuple[str],可选) - 允许的数据后缀列表,如果该值没有设定,默认使用 ``('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')`` 。默认值:None。 + - **transform** (callable,可选) - 数据增强函数。默认值:None。 + - **is_valid_file** (callable,可选) - 根据每条数据的路径来判断是否合法的一个函数。默认值:None。 + + +**代码示例**: + +.. code-block:: python + + import os + import cv2 + import tempfile + import shutil + import numpy as np + from paddle.vision.datasets import ImageFolder + + def make_fake_dir(): + data_dir = tempfile.mkdtemp() + + for i in range(2): + sub_dir = os.path.join(data_dir, 'class_' + str(i)) + if not os.path.exists(sub_dir): + os.makedirs(sub_dir) + for j in range(2): + fake_img = (np.random.random((32, 32, 3)) * 255).astype('uint8') + cv2.imwrite(os.path.join(sub_dir, str(j) + '.jpg'), fake_img) + return data_dir + + temp_dir = make_fake_dir() + data_folder = ImageFolder(temp_dir) + + for items in data_folder: + break + + shutil.rmtree(temp_dir) diff --git a/doc/paddle/api/paddle/vision/models/lenet/LeNet_cn.rst b/doc/paddle/api/paddle/vision/models/lenet/LeNet_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2aa28af127d3271d330c62e0aa97b9c308a00ec5 --- /dev/null +++ b/doc/paddle/api/paddle/vision/models/lenet/LeNet_cn.rst @@ -0,0 +1,26 @@ +.. _cn_api_paddle_vision_models_LeNet: + +LeNet +------------------------------- + +.. py:class:: paddle.vision.models.LeNet(num_classes=10) + + LeNet模型,来自论文`"LeCun Y, Bottou L, Bengio Y, et al. Gradient-based learning applied to document recognition[J]. Proceedings of the IEEE, 1998, 86(11): 2278-2324.`_。 + +参数: + - **num_classes** (int,可选) - 最后一个全连接层输出的维度。默认值:10。 + + +**代码示例**: + +.. code-block:: python + + import paddle + from paddle.vision.models import LeNet + + model = LeNet() + + x = paddle.rand([1, 1, 28, 28]) + out = model(x) + + print(out.shape) \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/models/mobilenetv1/MobileNetV1_cn.rst b/doc/paddle/api/paddle/vision/models/mobilenetv1/MobileNetV1_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..67a4cae6322b0bedad1052bbace11c220b934b14 --- /dev/null +++ b/doc/paddle/api/paddle/vision/models/mobilenetv1/MobileNetV1_cn.rst @@ -0,0 +1,29 @@ +.. _cn_api_paddle_vision_models_MobileNetV1: + +MobileNetV1 +------------------------------- + +.. py:class:: paddle.vision.models.MobileNetV1(scale=1.0, num_classes=1000, with_pool=True) + + MobileNetV1模型,来自论文`"MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications" `_。 + +参数: + - **scale** (float,可选) - 模型通道数的缩放比例。默认值:1.0。 + - **num_classes** (int, 可选) - 最后一个全连接层输出的维度。如果该值小于0,则不定义最后一个全连接层。默认值:1000。 + - **with_pool** (bool,可选) - 是否定义最后一个全连接层之前的池化层。默认值:True。 + +返回:mobilenetv1模型,Layer的实例。 + +**代码示例**: + +.. code-block:: python + + import paddle + from paddle.vision.models import MobileNetV1 + + model = MobileNetV1() + + x = paddle.rand([1, 3, 224, 224]) + out = model(x) + + print(out.shape) diff --git a/doc/paddle/api/paddle/vision/models/mobilenetv1/mobilenet_v1_cn.rst b/doc/paddle/api/paddle/vision/models/mobilenetv1/mobilenet_v1_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..83a49508c9982f76cf760cdcbf8e8e247126106c --- /dev/null +++ b/doc/paddle/api/paddle/vision/models/mobilenetv1/mobilenet_v1_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_paddle_vision_models_mobilenet_v1: + +mobilenet_v1 +------------------------------- + +.. py:function:: paddle.vision.models.mobilenet_v1(pretrained=False, scale=1.0, **kwargs) + + MobileNetV1模型,来自论文`"MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications" `_。 + +参数: + - **pretrained** (bool,可选) - 是否加载在imagenet数据集上的预训练权重。默认值:False。 + - **scale** (float,可选) - 模型通道数的缩放比例。默认值:1.0。 + +返回:mobilenetv1模型,Layer的实例。 + +**代码示例**: + +.. code-block:: python + + import paddle + from paddle.vision.models import mobilenet_v1 + + # build model + model = mobilenet_v1() + + # build model and load imagenet pretrained weight + # model = mobilenet_v1(pretrained=True) + + # build mobilenet v1 with scale=0.5 + model_scale = mobilenet_v1(scale=0.5) + + x = paddle.rand([1, 3, 224, 224]) + out = model(x) + + print(out.shape) diff --git a/doc/paddle/api/paddle/vision/models/mobilenetv2/MobileNetV2_cn.rst b/doc/paddle/api/paddle/vision/models/mobilenetv2/MobileNetV2_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..56c4066fa6a47f3c6025dade59238704feaf64b5 --- /dev/null +++ b/doc/paddle/api/paddle/vision/models/mobilenetv2/MobileNetV2_cn.rst @@ -0,0 +1,29 @@ +.. _cn_api_paddle_vision_models_MobileNetV2: + +MobileNetV2 +------------------------------- + +.. py:class:: paddle.vision.models.MobileNetV2(scale=1.0, num_classes=1000, with_pool=True) + + MobileNetV2模型,来自论文`"MobileNetV2: Inverted Residuals and Linear Bottlenecks" `_。 + +参数: + - **scale** (float,可选) - 模型通道数的缩放比例。默认值:1.0。 + - **num_classes** (int, 可选) - 最后一个全连接层输出的维度。如果该值小于0,则不定义最后一个全连接层。默认值:1000。 + - **with_pool** (bool,可选) - 是否定义最后一个全连接层之前的池化层。默认值:True。 + +返回:mobilenetv2模型,Layer的实例。 + +**代码示例**: + +.. code-block:: python + + import paddle + from paddle.vision.models import MobileNetV2 + + model = MobileNetV2() + + x = paddle.rand([1, 3, 224, 224]) + out = model(x) + + print(out.shape) diff --git a/doc/paddle/api/paddle/vision/models/mobilenetv2/mobilenet_v2_cn.rst b/doc/paddle/api/paddle/vision/models/mobilenetv2/mobilenet_v2_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..93bb11560091c84c9d01da61e45e98d76f982fee --- /dev/null +++ b/doc/paddle/api/paddle/vision/models/mobilenetv2/mobilenet_v2_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_paddle_vision_models_mobilenet_v2: + +mobilenet_v2 +------------------------------- + +.. py:function:: paddle.vision.models.mobilenet_v2(pretrained=False, scale=1.0, **kwargs) + + MobileNetV2模型,来自论文`"MobileNetV2: Inverted Residuals and Linear Bottlenecks" `_。 + +参数: + - **pretrained** (bool,可选) - 是否加载在imagenet数据集上的预训练权重。默认值:False。 + - **scale** (float,可选) - 模型通道数的缩放比例。默认值:1.0。 + +返回:mobilenetv2模型,Layer的实例。 + +**代码示例**: + +.. code-block:: python + + import paddle + from paddle.vision.models import mobilenet_v2 + + # build model + model = mobilenet_v2() + + # build model and load imagenet pretrained weight + # model = mobilenet_v2(pretrained=True) + + # build mobilenet v2 with scale=0.5 + model = mobilenet_v2(scale=0.5) + + x = paddle.rand([1, 3, 224, 224]) + out = model(x) + + print(out.shape) diff --git a/doc/paddle/api/paddle/vision/models/resnet/ResNet_cn.rst b/doc/paddle/api/paddle/vision/models/resnet/ResNet_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..dde8845e5a6c81b1a4b94bcf83fbdb31671937bf --- /dev/null +++ b/doc/paddle/api/paddle/vision/models/resnet/ResNet_cn.rst @@ -0,0 +1,33 @@ +.. _cn_api_paddle_vision_models_ResNet: + +ResNet +------------------------------- + +.. py:class:: paddle.vision.models.ResNet(Block, depth=50, num_classes=1000, with_pool=True) + + ResNet模型,来自论文`"Deep Residual Learning for Image Recognition" `_。 + +参数: + - **Block** (BasicBlock|BottleneckBlock) - 模型的残差模块。 + - **depth** (int,可选) - resnet模型的深度。默认值:50 + - **num_classes** (int, 可选) - 最后一个全连接层输出的维度。如果该值小于0,则不定义最后一个全连接层。默认值:1000。 + - **with_pool** (bool,可选) - 是否定义最后一个全连接层之前的池化层。默认值:True。 + +返回:ResNet模型,Layer的实例。 + +**代码示例**: + +.. code-block:: python + + import paddle + from paddle.vision.models import ResNet + from paddle.vision.models.resnet import BottleneckBlock, BasicBlock + + resnet50 = ResNet(BottleneckBlock, 50) + + resnet18 = ResNet(BasicBlock, 18) + + x = paddle.rand([1, 3, 224, 224]) + out = resnet18(x) + + print(out.shape) diff --git a/doc/paddle/api/paddle/vision/models/resnet/resnet101_cn.rst b/doc/paddle/api/paddle/vision/models/resnet/resnet101_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0baf12db9e610d368187a353df643c6254e6a542 --- /dev/null +++ b/doc/paddle/api/paddle/vision/models/resnet/resnet101_cn.rst @@ -0,0 +1,31 @@ +.. _cn_api_paddle_vision_models_resnet101: + +resnet101 +------------------------------- + +.. py:function:: paddle.vision.models.resnet101(pretrained=False, **kwargs) + + 101层的resnet模型,来自论文`"Deep Residual Learning for Image Recognition" `_。 + +参数: + - **pretrained** (bool,可选) - 是否加载在imagenet数据集上的预训练权重。默认值:False。 + +返回:resnet101模型,Layer的实例。 + +**代码示例**: + +.. code-block:: python + + import paddle + from paddle.vision.models import resnet101 + + # build model + model = resnet101() + + # build model and load imagenet pretrained weight + # model = resnet101(pretrained=True) + + x = paddle.rand([1, 3, 224, 224]) + out = model(x) + + print(out.shape) diff --git a/doc/paddle/api/paddle/vision/models/resnet/resnet152_cn.rst b/doc/paddle/api/paddle/vision/models/resnet/resnet152_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b0de35f85b720213779adae057dc98f5f9d363da --- /dev/null +++ b/doc/paddle/api/paddle/vision/models/resnet/resnet152_cn.rst @@ -0,0 +1,31 @@ +.. _cn_api_paddle_vision_models_resnet152: + +resnet152 +------------------------------- + +.. py:function:: paddle.vision.models.resnet152(pretrained=False, **kwargs) + + 152层的resnet模型,来自论文`"Deep Residual Learning for Image Recognition" `_。 + +参数: + - **pretrained** (bool,可选) - 是否加载在imagenet数据集上的预训练权重。默认值:False。 + +返回:resnet152模型,Layer的实例。 + +**代码示例**: + +.. code-block:: python + + import paddle + from paddle.vision.models import resnet152 + + # build model + model = resnet152() + + # build model and load imagenet pretrained weight + # model = resnet152(pretrained=True) + + x = paddle.rand([1, 3, 224, 224]) + out = model(x) + + print(out.shape) diff --git a/doc/paddle/api/paddle/vision/models/resnet/resnet18_cn.rst b/doc/paddle/api/paddle/vision/models/resnet/resnet18_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..05f3a158c635fbca926a7b304d4104c11cb85a06 --- /dev/null +++ b/doc/paddle/api/paddle/vision/models/resnet/resnet18_cn.rst @@ -0,0 +1,31 @@ +.. _cn_api_paddle_vision_models_resnet18: + +resnet18 +------------------------------- + +.. py:function:: paddle.vision.models.resnet18(pretrained=False, **kwargs) + + 18层的resnet模型,来自论文`"Deep Residual Learning for Image Recognition" `_。 + +参数: + - **pretrained** (bool,可选) - 是否加载在imagenet数据集上的预训练权重。默认值:False。 + +返回:resnet18模型,Layer的实例。 + +**代码示例**: + +.. code-block:: python + + import paddle + from paddle.vision.models import resnet18 + + # build model + model = resnet18() + + # build model and load imagenet pretrained weight + # model = resnet18(pretrained=True) + + x = paddle.rand([1, 3, 224, 224]) + out = model(x) + + print(out.shape) diff --git a/doc/paddle/api/paddle/vision/models/resnet/resnet34_cn.rst b/doc/paddle/api/paddle/vision/models/resnet/resnet34_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9f3aaaff9cb50b7f9be03304240d12c8506764a6 --- /dev/null +++ b/doc/paddle/api/paddle/vision/models/resnet/resnet34_cn.rst @@ -0,0 +1,31 @@ +.. _cn_api_paddle_vision_models_resnet34: + +resnet34 +------------------------------- + +.. py:function:: paddle.vision.models.resnet34(pretrained=False, **kwargs) + + 34层的resnet模型,来自论文`"Deep Residual Learning for Image Recognition" `_。 + +参数: + - **pretrained** (bool,可选) - 是否加载在imagenet数据集上的预训练权重。默认值:False。 + +返回:resnet34模型,Layer的实例。 + +**代码示例**: + +.. code-block:: python + + import paddle + from paddle.vision.models import resnet34 + + # build model + model = resnet34() + + # build model and load imagenet pretrained weight + # model = resnet34(pretrained=True) + + x = paddle.rand([1, 3, 224, 224]) + out = model(x) + + print(out.shape) diff --git a/doc/paddle/api/paddle/vision/models/resnet/resnet50_cn.rst b/doc/paddle/api/paddle/vision/models/resnet/resnet50_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1035193e0c320328bc2b326495bdc6ad5321d35e --- /dev/null +++ b/doc/paddle/api/paddle/vision/models/resnet/resnet50_cn.rst @@ -0,0 +1,31 @@ +.. _cn_api_paddle_vision_models_resnet50: + +resnet50 +------------------------------- + +.. py:function:: paddle.vision.models.resnet50(pretrained=False, **kwargs) + + 50层的resnet模型,来自论文`"Deep Residual Learning for Image Recognition" `_。 + +参数: + - **pretrained** (bool,可选) - 是否加载在imagenet数据集上的预训练权重。默认值:False。 + +返回:resnet50模型,Layer的实例。 + +**代码示例**: + +.. code-block:: python + + import paddle + from paddle.vision.models import resnet50 + + # build model + model = resnet50() + + # build model and load imagenet pretrained weight + # model = resnet50(pretrained=True) + + x = paddle.rand([1, 3, 224, 224]) + out = model(x) + + print(out.shape) diff --git a/doc/paddle/api/paddle/vision/models/vgg/VGG_cn.rst b/doc/paddle/api/paddle/vision/models/vgg/VGG_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..21147c244a7896e87c8469a6b830a4f2c22d3aea --- /dev/null +++ b/doc/paddle/api/paddle/vision/models/vgg/VGG_cn.rst @@ -0,0 +1,33 @@ +.. _cn_api_paddle_vision_models_VGG: + +VGG +------------------------------- + +.. py:class:: paddle.vision.models.VGG(features, num_classes=1000) + + VGG模型,来自论文`"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_。 + +参数: + - **features** (Layer) - vgg模型的特征层。由函数make_layers产生。 + - **num_classes** (int, 可选) - 最后一个全连接层输出的维度。如果该值小于0,则不定义最后一个全连接层。默认值:1000。 + +返回:vgg模型,Layer的实例。 + +**代码示例**: + +.. code-block:: python + + import paddle + from paddle.vision.models import VGG + from paddle.vision.models.vgg import make_layers + + vgg11_cfg = [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'] + + features = make_layers(vgg11_cfg) + + vgg11 = VGG(features) + + x = paddle.rand([1, 3, 224, 224]) + out = vgg11(x) + + print(out.shape) diff --git a/doc/paddle/api/paddle/vision/models/vgg/vgg11_cn.rst b/doc/paddle/api/paddle/vision/models/vgg/vgg11_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..35ee24863aa0eaec4abc8fac358430f4ea87ae75 --- /dev/null +++ b/doc/paddle/api/paddle/vision/models/vgg/vgg11_cn.rst @@ -0,0 +1,32 @@ +.. _cn_api_paddle_vision_models_vgg11: + +vgg11 +------------------------------- + +.. py:function:: paddle.vision.models.vgg11(pretrained=False, batch_norm=False, **kwargs) + + vgg11模型,来自论文`"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_。 + +参数: + - **pretrained** (bool,可选) - 是否加载在imagenet数据集上的预训练权重。默认值:False。 + - **batch_norm** (bool, 可选) - 是否在每个卷积层后添加批归一化层。默认值:False。 + +返回:vgg11模型,Layer的实例。 + +**代码示例**: + +.. code-block:: python + + import paddle + from paddle.vision.models import vgg11 + + # build model + model = vgg11() + + # build vgg11 model with batch_norm + model = vgg11(batch_norm=True) + + x = paddle.rand([1, 3, 224, 224]) + out = model(x) + + print(out.shape) \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/models/vgg/vgg13_cn.rst b/doc/paddle/api/paddle/vision/models/vgg/vgg13_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..d96204947e176029fcb4e215d812213ebfd14ea2 --- /dev/null +++ b/doc/paddle/api/paddle/vision/models/vgg/vgg13_cn.rst @@ -0,0 +1,32 @@ +.. _cn_api_paddle_vision_models_vgg13: + +vgg13 +------------------------------- + +.. py:function:: paddle.vision.models.vgg13(pretrained=False, batch_norm=False, **kwargs) + + vgg13模型,来自论文`"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_。 + +参数: + - **pretrained** (bool,可选) - 是否加载在imagenet数据集上的预训练权重。默认值:False。 + - **batch_norm** (bool, 可选) - 是否在每个卷积层后添加批归一化层。默认值:False。 + +返回:vgg13模型,Layer的实例。 + +**代码示例**: + +.. code-block:: python + + import paddle + from paddle.vision.models import vgg13 + + # build model + model = vgg13() + + # build vgg13 model with batch_norm + model = vgg13(batch_norm=True) + + x = paddle.rand([1, 3, 224, 224]) + out = model(x) + + print(out.shape) diff --git a/doc/paddle/api/paddle/vision/models/vgg/vgg16_cn.rst b/doc/paddle/api/paddle/vision/models/vgg/vgg16_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4fb42f17bb207904527419a69cc3a3b138638649 --- /dev/null +++ b/doc/paddle/api/paddle/vision/models/vgg/vgg16_cn.rst @@ -0,0 +1,32 @@ +.. _cn_api_paddle_vision_models_vgg16: + +vgg16 +------------------------------- + +.. py:function:: paddle.vision.models.vgg16(pretrained=False, batch_norm=False, **kwargs) + + vgg16模型,来自论文`"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_。 + +参数: + - **pretrained** (bool,可选) - 是否加载在imagenet数据集上的预训练权重。默认值:False。 + - **batch_norm** (bool, 可选) - 是否在每个卷积层后添加批归一化层。默认值:False。 + +返回:vgg16模型,Layer的实例。 + +**代码示例**: + +.. code-block:: python + + import paddle + from paddle.vision.models import vgg16 + + # build model + model = vgg16() + + # build vgg16 model with batch_norm + model = vgg16(batch_norm=True) + + x = paddle.rand([1, 3, 224, 224]) + out = model(x) + + print(out.shape) diff --git a/doc/paddle/api/paddle/vision/models/vgg/vgg19_cn.rst b/doc/paddle/api/paddle/vision/models/vgg/vgg19_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3dadfa1654ad6cf6992cbc57a198c3b83ad72aad --- /dev/null +++ b/doc/paddle/api/paddle/vision/models/vgg/vgg19_cn.rst @@ -0,0 +1,32 @@ +.. _cn_api_paddle_vision_models_vgg19: + +vgg19 +------------------------------- + +.. py:function:: paddle.vision.models.vgg19(pretrained=False, batch_norm=False, **kwargs) + + vgg19模型,来自论文`"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_。 + +参数: + - **pretrained** (bool,可选) - 是否加载在imagenet数据集上的预训练权重。默认值:False。 + - **batch_norm** (bool, 可选) - 是否在每个卷积层后添加批归一化层。默认值:False。 + +返回:vgg19模型,Layer的实例。 + +**代码示例**: + +.. code-block:: python + + import paddle + from paddle.vision.models import vgg19 + + # build model + model = vgg19() + + # build vgg19 model with batch_norm + model = vgg19(batch_norm=True) + + x = paddle.rand([1, 3, 224, 224]) + out = model(x) + + print(out.shape) diff --git a/doc/paddle/api/paddle/vision/transforms/functional/flip_cn.rst b/doc/paddle/api/paddle/vision/transforms/functional/flip_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..291ca39342acfbf86fdc9798aa18730e38d1a182 --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/functional/flip_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_vision_transforms_flip: + +flip +------------------------------- + +.. py:function:: paddle.vision.transforms.flip(image, code) + +根据翻转的类型对输入图像进行翻转。 + +参数 +::::::::: + + - image (numpy.ndarray) - 输入的图像数据,形状为(H, W, C)。 + - code (int) - 支持的翻转类型,-1: 水平和垂直翻转,0: 垂直翻转,1: 水平翻转。 + +返回 +::::::::: + + ``numpy.ndarray``,翻转后的图像数据。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.vision.transforms import functional as F + + + np.random.seed(5) + fake_img = np.random.rand(224, 224, 3) + + # flip horizontally and vertically + flip_img_1 = F.flip(fake_img, -1) + + # flip vertically + flip_img_2 = F.flip(fake_img, 0) + + # flip horizontally + flip_img_3 = F.flip(fake_img, 1) + \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/transforms/functional/pad_cn.rst b/doc/paddle/api/paddle/vision/transforms/functional/pad_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cba1468c478f76075fef7f3c1f3acc9f1269cde2 --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/functional/pad_cn.rst @@ -0,0 +1,44 @@ +.. _cn_api_vision_transforms_pad: + +pad +------------------------------- + +.. py:function:: paddle.vision.transforms.pad(img, padding, fill=(0, 0, 0), padding_mode='constant') + +使用特定的填充模式和填充值来对输入图像进行填充。 + +参数 +::::::::: + + - img (np.ndarray) - 被填充的图像。 + - padding (int|tuple) - 在图像每个边框上填充。 + 如果提供单个int值,则用于填充图像所有边。 + 如果提供长度为2的元组,则分别为图像左/右和顶部/底部进行填充。 + 如果提供了长度为4的元组,则分别按照左,上,右和下的顺序为图像填充。 + - fill (int|tuple) - 用于填充的像素值,仅当padding_mode为constant时传递此参数,默认使用0来进行每个像素的填充。 + 如果参数值是一个长度为3的元组,则会分别用于填充R,G,B通道。 + - padding_mode (string) - 填充模式,支持:constant, edge, reflect 或 symmetric,默认值:constant,使用fill参数值填充。 + ``constant`` 表示使用fill参数来指定一个值进行填充。 + ``edge`` 表示在图像边缘填充最后一个值。 + ``reflect`` 表示用原图像的反向图片填充(不重复使用边缘上的值)。比如使用这个模式对 ``[1, 2, 3, 4]``的两端分别填充2个值,最后结果是 ``[3, 2, 1, 2, 3, 4, 3, 2]``。 + ``symmetric`` 表示用原图像的反向图片填充(重复使用边缘上的值)。比如使用这个模式对 ``[1, 2, 3, 4]``的两端分别填充2个值,最后结果是 ``[2, 1, 1, 2, 3, 4, 4, 3]``。 + +返回 +::::::::: + + ``numpy.ndarray``,填充后的图像数据。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.vision.transforms.functional import pad + + + fake_img = np.random.rand(500, 500, 3).astype('float32') + fake_img = pad(fake_img, 2) + + print(fake_img.shape) + # (504, 504, 3) \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/transforms/functional/resize_cn.rst b/doc/paddle/api/paddle/vision/transforms/functional/resize_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c64fc53159aa3406ac26299ed571a60ff5e87e24 --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/functional/resize_cn.rst @@ -0,0 +1,43 @@ +.. _cn_api_vision_transforms_resize: + +resize +------------------------------- + +.. py:function:: paddle.vision.transforms.resize(img, size, interpolation=cv2.INTER_LINEAR) + +将输入数据调整为指定大小。 + +参数 +::::::::: + + - img (numpy.ndarray) - 输入数据,可以是(H, W, C)形状的图像或遮罩。 + - size (int|tuple) - 输出图像大小。 + 如果size是一个序列,例如(h,w),输出大小将与此匹配。 + 如果size为int,图像的较小边缘将与此数字匹配,即如果 height > width,则图像将重新缩放为(size * height / width, size)。 + - interpolation (int,可选) - 调整图片大小时使用的插值模式。默认值: cv2.INTER_LINEAR。 + +返回 +::::::::: + + ``numpy.ndarray``,调整大小后的图像数据。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.vision.transforms import functional as F + + + fake_img = np.random.rand(256, 256, 3) + fake_img_1 = F.resize(fake_img, 224) + + print(fake_img_1.shape) + # (224, 224, 3) + + fake_img_2 = F.resize(fake_img, (200, 150)) + + print(fake_img_2.shape) + # (200, 150, 3) + \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/transforms/functional/rotate_cn.rst b/doc/paddle/api/paddle/vision/transforms/functional/rotate_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ce7e66286caf5cd94c2421cc9de4775819aabbe8 --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/functional/rotate_cn.rst @@ -0,0 +1,69 @@ +.. _cn_api_vision_transforms_rotate: + +rotate +------------------------------- + +.. py:function:: paddle.vision.transforms.rotate(img, angle, interpolation=cv2.INTER_LINEAR, expand=False, center=None) + +按角度旋转图像。 + +参数 +::::::::: + + - img (numpy.ndarray) - 输入图像。 + - angle (float|int) - 旋转角度,顺时针。 + - interpolation (int,可选) - 调整图片大小时使用的插值模式。默认值: cv2.INTER_LINEAR。 + - expand (bool,可选) - 是否要对旋转后的图片进行大小扩展,默认值: False,不进行扩展。 + 当参数值为True时,会对图像大小进行扩展,让其能够足以容纳整个旋转后的图像。 + 当参数值为False时,会按照原图像大小保留旋转后的图像。 + **这个扩展操作的前提是围绕中心旋转且没有平移。** + - center (2-tuple,可选) - 旋转的中心点坐标,原点是图片左上角,默认值是图像的中心点。 + +返回 +::::::::: + + ``numpy ndarray``,旋转后的图像。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.vision.transforms.functional import rotate + + + fake_img = np.random.rand(3, 3, 3).astype('float32') + print('before rotate:') + print(fake_img) + fake_img = rotate(fake_img, 90) + print('after rotate:') + print(fake_img) + """ + before rotate: + [[[0.9320921 0.311002 0.22388814] + [0.9551999 0.10015319 0.7481808 ] + [0.4619514 0.29591113 0.12210595]] + + [[0.77222216 0.3235876 0.5718483 ] + [0.8797754 0.35876957 0.9330844 ] + [0.65897316 0.11888863 0.31214228]] + + [[0.7627513 0.05149421 0.41464522] + [0.2620253 0.7800404 0.990831 ] + [0.7814754 0.21640824 0.4333755 ]]] + + after rotate: + [[[0. 0. 0. ] + [0.7627513 0.05149421 0.41464522] + [0.77222216 0.3235876 0.5718483 ]] + + [[0. 0. 0. ] + [0.2620253 0.7800404 0.990831 ] + [0.8797754 0.35876957 0.9330844 ]] + + [[0. 0. 0. ] + [0.7814754 0.21640824 0.4333755 ] + [0.65897316 0.11888863 0.31214228]]] + """ + \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/transforms/functional/to_grayscale_cn.rst b/doc/paddle/api/paddle/vision/transforms/functional/to_grayscale_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..24c2509747a4a1a2fc7e2564e0c5adb86f365d68 --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/functional/to_grayscale_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_vision_transforms_to_grayscale: + +to_grayscale +------------------------------- + +.. py:function:: paddle.vision.transforms.to_grayscale(img, num_output_channels=1) + +将图像转换为灰度。 + +参数 +::::::::: + + - img (numpy.ndarray) - 输入图像 + - num_output_channels (int,可选) - 输出图像的通道数,默认值为1,单通道。 + +返回 +::::::::: + + ``numpy.ndarray``,输入图像的灰度版本。 + - 如果 output_channels == 1 : 返回一个单通道图像。 + - 如果 output_channels == 3 : 返回一个RBG格式的3通道图像。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.vision.transforms.functional import to_grayscale + + fake_img = np.random.rand(500, 500, 3).astype('float32') + fake_img = to_grayscale(fake_img) + + print(fake_img.shape) + # (500, 500, 1) + \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/transforms/transforms/BatchCompose_cn.rst b/doc/paddle/api/paddle/vision/transforms/transforms/BatchCompose_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..23918dccd78ea0c2adb8bb586fd03e43b74e2b5c --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/transforms/BatchCompose_cn.rst @@ -0,0 +1,76 @@ +.. _cn_api_vision_transforms_BatchCompose: + +BatchCompose +------------------------------- + +.. py:class:: paddle.vision.transforms.BatchCompose(transforms=[]) + +用于处理批数据的预处理接口组合。 + +参数 +::::::::: + + - transforms (list): - 用于组合的数据预处理接口实例。这些预处理接口所处理的是一批数据。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.io import DataLoader + + from paddle import set_device + from paddle.vision.datasets import Flowers + from paddle.vision.transforms import Compose, BatchCompose, Resize + + class NormalizeBatch(object): + def __init__(self, + mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225], + scale=True, + channel_first=True): + + self.mean = mean + self.std = std + self.scale = scale + self.channel_first = channel_first + if not (isinstance(self.mean, list) and isinstance(self.std, list) and + isinstance(self.scale, bool)): + raise TypeError("{}: input type is invalid.".format(self)) + from functools import reduce + if reduce(lambda x, y: x * y, self.std) == 0: + raise ValueError('{}: std is invalid!'.format(self)) + + def __call__(self, samples): + for i in range(len(samples)): + samples[i] = list(samples[i]) + im = samples[i][0] + im = im.astype(np.float32, copy=False) + mean = np.array(self.mean)[np.newaxis, np.newaxis, :] + std = np.array(self.std)[np.newaxis, np.newaxis, :] + if self.scale: + im = im / 255.0 + im -= mean + im /= std + if self.channel_first: + im = im.transpose((2, 0, 1)) + samples[i][0] = im + return samples + + transform = Compose([Resize((500, 500))]) + flowers_dataset = Flowers(mode='test', transform=transform) + + device = set_device('cpu') + + collate_fn = BatchCompose([NormalizeBatch()]) + loader = DataLoader( + flowers_dataset, + batch_size=4, + places=device, + return_list=True, + collate_fn=collate_fn) + + for data in loader: + # do something + break \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/transforms/transforms/BrightnessTransform_cn.rst b/doc/paddle/api/paddle/vision/transforms/transforms/BrightnessTransform_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..00c9e340f3adfdb472c23b6a6ceb3a5f1ea517a0 --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/transforms/BrightnessTransform_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_vision_transforms_BrightnessTransform: + +BrightnessTransform +------------------------------- + +.. py:class:: paddle.vision.transforms.BrightnessTransform(value) + +调整图像的亮度。 + +参数 +::::::::: + + - value (float) - 亮度调整范围大小,会从给定参数后的均匀分布[max(0,1 - brightness), 1 + brightness]中随机选择进行实际调整,可以是任何非负数。参数等于0时输出原始图像。 + +返回 +::::::::: + + ``numpy ndarray``,调整亮度后的图像。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.vision.transforms import BrightnessTransform + + + transform = BrightnessTransform(0.4) + fake_img = np.random.rand(500, 500, 3).astype('float32') + fake_img = transform(fake_img) + + print(fake_img.shape) + # (500, 500, 3) + \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/transforms/transforms/CenterCropResize_cn.rst b/doc/paddle/api/paddle/vision/transforms/transforms/CenterCropResize_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e0a2b9a14830024fe5fdd9997e0ad3c86729a000 --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/transforms/CenterCropResize_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_vision_transforms_CenterCropResize: + +CenterCropResize +------------------------------- + +.. py:class:: paddle.vision.transforms.CenterCropResize(size, crop_padding=32, interpolation=cv2.INTER_LINEAR) + +通过填充将图像裁剪到图像中心,然后缩放尺寸。 + +参数 +::::::::: + + - size (int|list|tuple) - 输出图像的形状大小。 + - crop_padding (int) - 中心裁剪时进行padding的大小。默认值: 32。 + - interpolation (int) - 调整图片大小时使用的插值模式。默认值: cv2.INTER_LINEAR。 + +返回 +::::::::: + + ``numpy ndarray``,裁剪并调整尺寸后的图像。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.vision.transforms import CenterCropResize + + + transform = CenterCropResize(224) + fake_img = np.random.rand(500, 500, 3).astype('float32') + fake_img = transform(fake_img) + + print(fake_img.shape) + # (224, 224, 3) + \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/transforms/transforms/CenterCrop_cn.rst b/doc/paddle/api/paddle/vision/transforms/transforms/CenterCrop_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b71e8b4259927259a27c00a70ac7cf6376b1a6b8 --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/transforms/CenterCrop_cn.rst @@ -0,0 +1,34 @@ +.. _cn_api_vision_transforms_CenterCrop: + +CenterCrop +------------------------------- + +.. py:class:: paddle.vision.transforms.CenterCrop(output_size) + +对输入图像进行裁剪,保持图片中心点不变。 + +参数 +::::::::: + + - output_size (int|tuple) - 输出图像的形状大小。 + +返回 +::::::::: + + ``numpy ndarray``,裁剪后的图像。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.vision.transforms import CenterCrop + + + transform = CenterCrop(224) + fake_img = np.random.rand(500, 500, 3).astype('float32') + fake_img = transform(fake_img) + + print(fake_img.shape) + # (224, 224, 3) \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/transforms/transforms/ColorJitter_cn.rst b/doc/paddle/api/paddle/vision/transforms/transforms/ColorJitter_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..751d0a915bf3c712d470dafcbe1dad61a4cf23d6 --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/transforms/ColorJitter_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_vision_transforms_ColorJitter: + +ColorJitter +------------------------------- + +.. py:class:: paddle.vision.transforms.ColorJitter(brightness=0, contrast=0, saturation=0, hue=0) + +随机调整图像的亮度,对比度,饱和度和色调。 + +参数 +::::::::: + + - brightness(float) - 亮度调整范围大小,会从给定参数后的均匀分布[max(0,1 - brightness), 1 + brightness]中随机选择进行实际调整,不能是负数。 + - contrast(float) - 对比度调整范围大小,,会从给定参数后的均匀分布[max(0,1 - contrast), 1 + contrast]中随机选择进行实际调整,不能是负数。 + - saturation(float) - 饱和度调整范围大小,,会从给定参数后的均匀分布[max(0,1 - saturation), 1 + saturation]中随机选择进行实际调整,不能是负数。 + - hue(float) - 色调调整范围大小,,会从给定参数后的均匀分布[-hue, hue]中随机选择进行实际调整,参数值需要在0到0.5之间。 + +返回 +::::::::: + + ``numpy ndarray``,调整亮度、对比度、饱和度和色调后的图像。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.vision.transforms import ColorJitter + + + transform = ColorJitter(0.4) + fake_img = np.random.rand(500, 500, 3).astype('float32') + fake_img = transform(fake_img) + + print(fake_img.shape) + # (500, 500, 3) + \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/transforms/transforms/Compose_cn.rst b/doc/paddle/api/paddle/vision/transforms/transforms/Compose_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c36174238bf2fbc8a2dab66c0f69bfa8351f4afa --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/transforms/Compose_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_vision_transforms_Compose: + +Compose +------------------------------- + +.. py:class:: paddle.vision.transforms.Compose(transforms) + +将用于数据集预处理的接口以列表的方式进行组合。 + +参数 +::::::::: + + - transforms (list) - 用于组合的数据预处理接口实例列表。 + +返回 +::::::::: + + 一个可调用的Compose对象,它将依次调用每个给定的 :attr:`transforms`。 + +代码示例 +::::::::: + +.. code-block:: python + + from paddle.vision.datasets import Flowers + from paddle.vision.transforms import Compose, ColorJitter, Resize + + transform = Compose([ColorJitter(), Resize(size=608)]) + flowers = Flowers(mode='test', transform=transform) + + for i in range(10): + sample = flowers[i] + print(sample[0].shape, sample[1]) + + \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/transforms/transforms/ContrastTransform_cn.rst b/doc/paddle/api/paddle/vision/transforms/transforms/ContrastTransform_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..31f34498e06e94aca840c131ffe9a312458487cd --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/transforms/ContrastTransform_cn.rst @@ -0,0 +1,34 @@ +.. _cn_api_vision_transforms_ContrastTransform: + +ContrastTransform +------------------------------- + +.. py:class:: paddle.vision.transforms.ContrastTransform(value) + +调整图像对比度。 + +参数 +::::::::: + + - value (float) - 对比度调整范围大小,会从给定参数后的均匀分布[max(0,1 - contrast), 1 + contrast]中随机选择进行实际调整,不能是负数。参数值为0时返回原图像。 + +返回 +::::::::: + + ``numpy ndarray``,调整对比度后的图像。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.vision.transforms import ContrastTransform + + + transform = ContrastTransform(0.4) + fake_img = np.random.rand(500, 500, 3).astype('float32') + fake_img = transform(fake_img) + + print(fake_img.shape) + # (500, 500, 3) \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/transforms/transforms/GaussianNoise_cn.rst b/doc/paddle/api/paddle/vision/transforms/transforms/GaussianNoise_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3e713a9fd03192b1a6f75700110e365a4cea97a2 --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/transforms/GaussianNoise_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_vision_transforms_GaussianNoise: + +GaussianNoise +------------------------------- + +.. py:class:: paddle.vision.transforms.GaussianNoise(mean=0.0, std=1.0) + +基于给定的均值和标准差来产生高斯噪声,并将随机高斯噪声添加到输入数据。 + +参数 +::::::::: + + - mean (float) - 用于产生噪声的高斯平均值。 + - std (float) - 用于产生噪声的高斯标准偏差。 + +返回 +::::::::: + + ``numpy ndarray``,增加高斯噪声后的图像。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.vision.transforms import GaussianNoise + + + transform = GaussianNoise() + fake_img = np.random.rand(500, 500, 3).astype('float32') + fake_img = transform(fake_img) + + print(fake_img.shape) + # (500, 500, 3) + \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/transforms/transforms/Grayscale_cn.rst b/doc/paddle/api/paddle/vision/transforms/transforms/Grayscale_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a9ac84412617be742b96ea45ebd478d18a3a4567 --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/transforms/Grayscale_cn.rst @@ -0,0 +1,37 @@ +.. _cn_api_vision_transforms_Grayscale: + +Grayscale +------------------------------- + +.. py:class:: paddle.vision.transforms.Grayscale(output_channels=1) + +将图像转换为灰度。 + +参数 +::::::::: + + - output_channels (int) - 输出图像的通道数,参数值为1或3。 + +返回 +::::::::: + + ``numpy.ndarray``,输入图像的灰度版本。 + - 如果 output_channels == 1 : 返回一个单通道图像。 + - 如果 output_channels == 3 : 返回一个RBG格式的3通道图像。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.vision.transforms import Grayscale + + + transform = Grayscale() + fake_img = np.random.rand(500, 400, 3).astype('float32') + fake_img = transform(fake_img) + + print(fake_img.shape) + # (500, 400, 1) + \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/transforms/transforms/HueTransform_cn.rst b/doc/paddle/api/paddle/vision/transforms/transforms/HueTransform_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ef057f4c949b4f1999f4e838843b2c63714dda7f --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/transforms/HueTransform_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_vision_transforms_HueTransform: + +HueTransform +------------------------------- + +.. py:class:: paddle.vision.transforms.HueTransform(value) + +调整图像的色调。 + +参数 +::::::::: + + - value (float) - 色调调整范围大小,,会从给定参数后的均匀分布[-hue, hue]中随机选择进行实际调整,参数值需要在0到0.5之间, 参数值为0时返回原始图像。 + +返回 +::::::::: + + ``numpy ndarray``,调整色调后的图像。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.vision.transforms import HueTransform + + + transform = HueTransform(0.4) + fake_img = np.random.rand(500, 500, 3).astype('float32') + fake_img = transform(fake_img) + + print(fake_img.shape) + # (500, 500, 3) + \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/transforms/transforms/Normalize_cn.rst b/doc/paddle/api/paddle/vision/transforms/transforms/Normalize_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5bccb9e4d11bd587d8f1529c04e687f7f23e1a78 --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/transforms/Normalize_cn.rst @@ -0,0 +1,41 @@ +.. _cn_api_vision_transforms_Normalize: + +Normalize +------------------------------- + +.. py:class:: paddle.vision.transforms.Normalize(mean=0.0, std=1.0) + +图像归一化处理,支持两种方式: +1. 用统一的均值和标准差值对图像的每个通道进行归一化处理; +2. 对每个通道指定不同的均值和标准差值进行归一化处理。 + +计算过程: + +``output[channel] = (input[channel] - mean[channel]) / std[channel]`` + +参数 +::::::::: + + - mean (int|float|list) - 用于每个通道归一化的均值。 + - std (int|float|list) - 用于每个通道归一化的标准差值。 + +返回 +::::::::: + + ``numpy ndarray``,归一化后的图像。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.vision.transforms import Normalize + + + normalize = Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + fake_img = np.random.rand(3, 500, 500).astype('float32') + fake_img = normalize(fake_img) + + print(fake_img.shape) + # (3, 500, 500) \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/transforms/transforms/Pad_cn.rst b/doc/paddle/api/paddle/vision/transforms/transforms/Pad_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5e5a42ea5a5801491d2f5f7678e4a96a839b72f8 --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/transforms/Pad_cn.rst @@ -0,0 +1,44 @@ +.. _cn_api_vision_transforms_Pad: + +Pad +------------------------------- + +.. py:class:: paddle.vision.transforms.Pad(padding, fill=0, padding_mode='constant') + +使用特定的填充模式和填充值来对输入图像进行填充。 + +参数 +::::::::: + + - padding (int|tuple) - 在图像每个边框上填充。 + 如果提供单个int值,则用于填充图像所有边。 + 如果提供长度为2的元组,则分别为图像左/右和顶部/底部进行填充。 + 如果提供了长度为4的元组,则分别按照左,上,右和下的顺序为图像填充。 + - fill (int|tuple) - 用于填充的像素值,仅当padding_mode为constant时传递此参数,默认使用0来进行每个像素的填充。 + 如果参数值是一个长度为3的元组,则会分别用于填充R,G,B通道。 + - padding_mode (string) - 填充模式,支持:constant, edge, reflect 或 symmetric,默认值:constant,使用fill参数值填充。 + ``constant`` 表示使用fill参数来指定一个值进行填充。 + ``edge`` 表示在图像边缘填充最后一个值。 + ``reflect`` 表示用原图像的反向图片填充(不重复使用边缘上的值)。比如使用这个模式对 ``[1, 2, 3, 4]``的两端分别填充2个值,最后结果是 ``[3, 2, 1, 2, 3, 4, 3, 2]``。 + ``symmetric`` 表示用原图像的反向图片填充(重复使用边缘上的值)。比如使用这个模式对 ``[1, 2, 3, 4]``的两端分别填充2个值,最后结果是 ``[2, 1, 1, 2, 3, 4, 4, 3]``。 + +返回 +::::::::: + + ``numpy ndarray``,填充后的图像。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.vision.transforms import Pad + + + transform = Pad(2) + fake_img = np.random.rand(500, 500, 3).astype('float32') + fake_img = transform(fake_img) + + print(fake_img.shape) + # (504, 504, 3) \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/transforms/transforms/Permute_cn.rst b/doc/paddle/api/paddle/vision/transforms/transforms/Permute_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fd7de54a88f9715a25bdd4b1c54cf44409b67fed --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/transforms/Permute_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_vision_transforms_Permute: + +Permute +------------------------------- + +.. py:class:: paddle.vision.transforms.Permute(mode="CHW", to_rgb=True) + +将输入的图像数据更改为目标格式。例如,大多数数据预处理是使用HWC格式的图片,而神经网络可能使用CHW模式输入张量。 + +.. note:: + 输入图像应为HWC格式的numpy.ndarray。 + +参数 +::::::::: + + - mode (str) - 输出图像的格式,默认值为CHW(图像通道-图像高度-图像宽度)。 + - to_rgb (bool) - 将BGR格式图像转换为RGB格式,默认值为True,启用此项转换。 + +返回 +::::::::: + + ``numpy ndarray``,更改格式后的图像。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.vision.transforms import Permute + + + transform = Permute() + fake_img = np.random.rand(500, 500, 3).astype('float32') + fake_img = transform(fake_img) + + print(fake_img.shape) + # (3, 500, 500) \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/transforms/transforms/RandomCrop_cn.rst b/doc/paddle/api/paddle/vision/transforms/transforms/RandomCrop_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..afbfa282ad69afe6cb713f8c6c66995d7706834a --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/transforms/RandomCrop_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_vision_transforms_RandomCrop: + +RandomCrop +------------------------------- + +.. py:class:: paddle.vision.transforms.RandomCrop(size, padding=0, pad_if_needed=False) + +在随机位置裁剪输入的图像。 + +参数 +::::::::: + + - size (sequence|int) - 裁剪后的图片大小。如果size是一个int值,而不是(h, w)这样的序列,那么会做一个方形的裁剪(size, size)。 + - padding (int|sequence,可选) - 对图像四周外边进行填充,如果提供了长度为4的序列,则将其分别用于填充左边界,上边界,右边界和下边界。 默认值:0,不填充。 + - pad_if_needed (boolean,可选) - 如果裁剪后的图像小于期望的大小时,是否对裁剪后的图像进行填充,以避免引发异常,默认值:False,保持初次裁剪后的大小,不填充。 + +返回 +::::::::: + + ``numpy ndarray``,随机裁剪后的图像。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.vision.transforms import RandomCrop + + + transform = RandomCrop(224) + fake_img = np.random.rand(500, 500, 3).astype('float32') + fake_img = transform(fake_img) + + print(fake_img.shape) + # (224, 224, 3) diff --git a/doc/paddle/api/paddle/vision/transforms/transforms/RandomErasing_cn.rst b/doc/paddle/api/paddle/vision/transforms/transforms/RandomErasing_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..061575511e0ccbb5171db5141de67c3fcef9b285 --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/transforms/RandomErasing_cn.rst @@ -0,0 +1,71 @@ +.. _cn_api_vision_transforms_RandomErasing: + +RandomErasing +------------------------------- + +.. py:class:: paddle.vision.transforms.RandomErasing(prob=0.5, scale=(0.02, 0.4), ratio=0.3, value=[0., 0., 0.]) + +随机选择图像中的一个矩形区域并将其像素删除。 +具体可参见``Random Erasing Data Augmentation``这篇论文,链接:https://arxiv.org/pdf/1708.04896.pdf。 + +参数 +::::::::: + + - prob (float) - 随机擦除操作被执行的概率。 + - scale (tuple) - 擦除区域相对于输入图像的比例范围,参数格式:(min,max)。 + - ratio (float) - 擦除区域的宽高比范围。 + - value (float|list|tuple) - 擦除操作使用的像素值。如果为单个float值,则用于擦除所有像素。 + 如果是长度为3的元组或数组,则分别用于擦除R,G,B通道。默认值:0.,使用0.来擦除矩形区域。 + +返回 +::::::::: + + ``numpy ndarray``,随机擦除后的图像。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.vision.transforms import RandomErasing + + transform = RandomErasing() + np.random.seed(5) + fake_img = np.random.rand(5, 5, 3).astype('float32') + fake_img = transform(fake_img) + + print(fake_img) + """ + 因函数会随机擦除,以下是其中一种结果示例,0.的位置就是被做了擦除。 + [[[0.22199318 0.8707323 0.20671916] + [0.91861093 0.4884112 0.61174387] + [0.7659079 0.518418 0.2968005 ] + [0.18772122 0.08074127 0.7384403 ] + [0.4413092 0.15830986 0.87993705]] + + [[0. 0. 0. ] + [0. 0. 0. ] + [0.26581913 0.28468588 0.2535882 ] + [0.32756394 0.1441643 0.16561286] + [0.96393055 0.9602267 0.18841465]] + + [[0. 0. 0. ] + [0. 0. 0. ] + [0.00164217 0.5154726 0.6397952 ] + [0.98562443 0.2590976 0.8024969 ] + [0.8704831 0.92274964 0.00221421]] + + [[0.46948838 0.98146874 0.3989448 ] + [0.8137325 0.5464565 0.7708541 ] + [0.48493108 0.02911156 0.08652569] + [0.11145381 0.2512451 0.9649153 ] + [0.6317661 0.8166602 0.566082 ]] + + [[0.6353562 0.8119024 0.9266826 ] + [0.91262674 0.82481074 0.09420273] + [0.36104843 0.03550903 0.54635835] + [0.7961427 0.0511428 0.18866773] + [0.36547777 0.24429087 0.79508746]]] + """ + \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/transforms/transforms/RandomHorizontalFlip_cn.rst b/doc/paddle/api/paddle/vision/transforms/transforms/RandomHorizontalFlip_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e20fc1f0a8bf410297f2473b235a45f4ed685806 --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/transforms/RandomHorizontalFlip_cn.rst @@ -0,0 +1,62 @@ +.. _cn_api_vision_transforms_RandomHorizontalFlip: + +RandomHorizontalFlip +------------------------------- + +.. py:class:: paddle.vision.transforms.RandomHorizontalFlip(prob=0.5) + +基于概率来执行图片的水平翻转。 + +参数 +::::::::: + + - prob (float) - 图片执行水平翻转的概率,默认值为0.5。 + +返回 +::::::::: + + ``numpy ndarray``,概率执行水平翻转后的图像。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.vision.transforms import RandomHorizontalFlip + + transform = RandomHorizontalFlip(1) + np.random.seed(5) + fake_img = np.random.rand(3, 3, 3).astype('float32') + print('翻转前的图片') + print(fake_img) + fake_img = transform(fake_img) + + print('翻转后的图片') + print(fake_img) + """ + 翻转前的图片 + [[[0.22199318 0.8707323 0.20671916] + [0.91861093 0.4884112 0.61174387] + [0.7659079 0.518418 0.2968005 ]] + + [[0.18772122 0.08074127 0.7384403 ] + [0.4413092 0.15830986 0.87993705] + [0.27408648 0.41423503 0.29607993]] + + [[0.62878793 0.5798378 0.5999292 ] + [0.26581913 0.28468588 0.2535882 ] + [0.32756394 0.1441643 0.16561286]]] + 翻转后的图片 + [[[0.7659079 0.518418 0.2968005 ] + [0.91861093 0.4884112 0.61174387] + [0.22199318 0.8707323 0.20671916]] + + [[0.27408648 0.41423503 0.29607993] + [0.4413092 0.15830986 0.87993705] + [0.18772122 0.08074127 0.7384403 ]] + + [[0.32756394 0.1441643 0.16561286] + [0.26581913 0.28468588 0.2535882 ] + [0.62878793 0.5798378 0.5999292 ]]] + """ \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/transforms/transforms/RandomResizedCrop_cn.rst b/doc/paddle/api/paddle/vision/transforms/transforms/RandomResizedCrop_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0112ca44357480bfab61bd00d0c94b9eb408651d --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/transforms/RandomResizedCrop_cn.rst @@ -0,0 +1,87 @@ +.. _cn_api_vision_transforms_RandomResizedCrop: + +RandomResizedCrop +------------------------------- + +.. py:class:: paddle.vision.transforms.RandomResizedCrop(output_size, scale=(0.08, 1.0), ratio=(3. / 4, 4. / 3), interpolation=cv2.INTER_LINEAR) + +将输入图像按照随机大小和长宽比进行裁剪。 +会根据参数生成基于原图像的随机比例(默认值:0.08至1.0)和随机宽高比(默认值:3./4至4./3)。 +经过此接口操作后,输入图像将调整为参数指定大小。 + +参数 +::::::::: + + - output_size (int|list|tuple) - 输出图像大小,当为单个int值时,生成output_size大小的方形图片,为(height,width)格式的数组或元组时按照参数大小输出。 + - scale (list|tuple) - 基于原图选定的需要进行裁剪的范围,默认值:(0.08,1.0)。 + - ratio (list|tuple) - 裁剪后的目标图像宽高比范围,默认值: (0.75, 1.33)。 + +返回 +::::::::: + + ``numpy ndarray``,随机裁剪和改变大小后的图像。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.vision.transforms import RandomResizedCrop + + + transform = RandomResizedCrop(3) + fake_img = np.random.rand(5, 5, 3).astype('float32') + + print('original image:') + print(fake_img) + + fake_img = transform(fake_img) + + print('output image:') + print(fake_img) + + """ + original image: + [[[0.22199318 0.8707323 0.20671916] + [0.91861093 0.4884112 0.61174387] + [0.7659079 0.518418 0.2968005 ] + [0.18772122 0.08074127 0.7384403 ] + [0.4413092 0.15830986 0.87993705]] + + [[0.27408648 0.41423503 0.29607993] + [0.62878793 0.5798378 0.5999292 ] + [0.26581913 0.28468588 0.2535882 ] + [0.32756394 0.1441643 0.16561286] + [0.96393055 0.9602267 0.18841465]] + + [[0.02430656 0.20455554 0.6998436 ] + [0.7795146 0.02293309 0.5776629 ] + [0.00164217 0.5154726 0.6397952 ] + [0.98562443 0.2590976 0.8024969 ] + [0.8704831 0.92274964 0.00221421]] + + [[0.46948838 0.98146874 0.3989448 ] + [0.8137325 0.5464565 0.7708541 ] + [0.48493108 0.02911156 0.08652569] + [0.11145381 0.2512451 0.9649153 ] + [0.6317661 0.8166602 0.566082 ]] + + [[0.6353562 0.8119024 0.9266826 ] + [0.91262674 0.82481074 0.09420273] + [0.36104843 0.03550903 0.54635835] + [0.7961427 0.0511428 0.18866773] + [0.36547777 0.24429087 0.79508746]]] + output image: + [[[0.7659079 0.518418 0.2968005 ] + [0.18772122 0.08074127 0.7384403 ] + [0.4413092 0.15830986 0.87993705]] + + [[0.26581913 0.28468588 0.2535882 ] + [0.32756394 0.1441643 0.16561286] + [0.96393055 0.9602267 0.18841465]] + + [[0.00164217 0.5154726 0.6397952 ] + [0.98562443 0.2590976 0.8024969 ] + [0.8704831 0.92274964 0.00221421]]] + """ diff --git a/doc/paddle/api/paddle/vision/transforms/transforms/RandomRotate_cn.rst b/doc/paddle/api/paddle/vision/transforms/transforms/RandomRotate_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1d4c2b7359948b130a2d13a9f6b8660a5292d5fa --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/transforms/RandomRotate_cn.rst @@ -0,0 +1,42 @@ +.. _cn_api_vision_transforms_RandomRotate: + +RandomRotate +------------------------------- + +.. py:class:: paddle.vision.transforms.RandomRotate(degrees, interpolation=cv2.INTER_LINEAR, expand=False, center=None) + +按角度旋转图像。 + +参数 +::::::::: + + - degrees (sequence|float|int) - 旋转的角度度数范围。如果度数是数字而不是像(min,max)这样的序列,则会根据degrees参数值生成度数范围(-degrees,+degrees)。 + - interpolation (int,可选) - 调整图片大小时使用的插值模式。默认值: cv2.INTER_LINEAR。 + - expand (bool,可选): 是否要对旋转后的图片进行大小扩展,默认值: False,不进行扩展。 + 当参数值为True时,会对图像大小进行扩展,让其能够足以容纳整个旋转后的图像。 + 当参数值为False时,会按照原图像大小保留旋转后的图像。 + **这个扩展操作的前提是围绕中心旋转且没有平移。** + - center (2-tuple,可选) - 旋转的中心点坐标,原点是图片左上角,默认值是图像的中心点。 + +返回 +::::::::: + + ``numpy ndarray``,随机旋转一定角度后的图像。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.vision.transforms import RandomRotate + + + transform = RandomRotate(90) + np.random.seed(5) + fake_img = np.random.rand(500, 400, 3).astype('float32') + fake_img = transform(fake_img) + + print(fake_img.shape) + # (500, 400, 3) + \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/transforms/transforms/RandomVerticalFlip_cn.rst b/doc/paddle/api/paddle/vision/transforms/transforms/RandomVerticalFlip_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..5400dc9d7abec381929b9cd4922be8c57a5fd2dd --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/transforms/RandomVerticalFlip_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_vision_transforms_RandomVerticalFlip: + +RandomVerticalFlip +------------------------------- + +.. py:class:: paddle.vision.transforms.RandomVerticalFlip(prob=0.5) + +基于概率来执行图片的垂直翻转。 + +参数 +::::::::: + + - prob (float) - 执行图片垂直翻转的概率,默认值为0.5。 + +返回 +::::::::: + + ``numpy ndarray``,概率执行垂直翻转后的图像。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.vision.transforms import RandomVerticalFlip + + + transform = RandomVerticalFlip() + np.random.seed(5) + fake_img = np.random.rand(500, 500, 3).astype('float32') + fake_img = transform(fake_img) + + print(fake_img.shape) + # (500, 500, 3) \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/transforms/transforms/Resize_cn.rst b/doc/paddle/api/paddle/vision/transforms/transforms/Resize_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..894245966d13da677ce2b9563b0c75350348207e --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/transforms/Resize_cn.rst @@ -0,0 +1,38 @@ +.. _cn_api_vision_transforms_Resize: + +Resize +------------------------------- + +.. py:class:: paddle.vision.transforms.Resize(size, interpolation=cv2.INTER_LINEAR) + +将输入数据调整为指定大小。 + +参数 +::::::::: + + - size (int|list|tuple) - 输出图像大小。 + 如果size是一个序列,例如(h,w),输出大小将与此匹配。 + 如果size为int,图像的较小边缘将与此数字匹配,即如果 height > width,则图像将重新缩放为(size * height / width, size)。 + - interpolation (int,可选) - 调整图片大小时使用的插值模式。默认值: cv2.INTER_LINEAR。 + +返回 +::::::::: + + ``numpy ndarray``,调整大小后的图像。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.vision.transforms import Resize + + + transform = Resize(size=224) + np.random.seed(5) + fake_img = np.random.rand(500, 500, 3).astype('float32') + fake_img = transform(fake_img) + + print(fake_img.shape) + # (224, 224, 3) \ No newline at end of file diff --git a/doc/paddle/api/paddle/vision/transforms/transforms/SaturationTransform_cn.rst b/doc/paddle/api/paddle/vision/transforms/transforms/SaturationTransform_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..702ac4cce4ed883a7527dd23734527b04d626ff5 --- /dev/null +++ b/doc/paddle/api/paddle/vision/transforms/transforms/SaturationTransform_cn.rst @@ -0,0 +1,35 @@ +.. _cn_api_vision_transforms_SaturationTransform: + +SaturationTransform +------------------------------- + +.. py:class:: paddle.vision.transforms.SaturationTransform(value) + +调整图像的饱和度 + +参数 +::::::::: + + - value (float) - 饱和度的调整数值,非负数,当参数值为0时返回原始图像。 + +返回 +::::::::: + + ``numpy ndarray``,调整饱和度后的图像。 + +代码示例 +::::::::: + +.. code-block:: python + + import numpy as np + from paddle.vision.transforms import SaturationTransform + + + transform = SaturationTransform(0.4) + np.random.seed(5) + fake_img = np.random.rand(500, 500, 3).astype('float32') + fake_img = transform(fake_img) + + print(fake_img.shape) + # (500, 500, 3) \ No newline at end of file diff --git a/doc/paddle/api_guides/X2Paddle/Caffe-Fluid.rst b/doc/paddle/api_guides/X2Paddle/Caffe-Fluid.rst new file mode 100644 index 0000000000000000000000000000000000000000..c3b078d29c119213a9624c50f45c199f43c25690 --- /dev/null +++ b/doc/paddle/api_guides/X2Paddle/Caffe-Fluid.rst @@ -0,0 +1,45 @@ +.. _Caffe-Fluid: + +######################## +Caffe-Fluid常用层对应表 +######################## + +本文档梳理了Caffe常用Layer与PaddlePaddle API对应关系和差异分析。根据文档对应关系,有Caffe使用经验的用户,可根据对应关系,快速熟悉PaddlePaddle的接口使用。 + + +.. csv-table:: + :header: "序号", "Caffe Layer", "Fluid接口", "备注" + :widths: 1, 8, 8, 3 + + "1", "`AbsVal `_", ":ref:`cn_api_fluid_layers_abs`", "功能一致" + "2", "`Accuracy `_", ":ref:`cn_api_fluid_layers_accuracy`", "`差异对比 `_" + "3", "`ArgMax `_", ":ref:`cn_api_fluid_layers_argmax`", "`差异对比 `_" + "4", "`BatchNorm `_", ":ref:`cn_api_fluid_layers_batch_norm`", "`差异对比 `_" + "5", "`BNLL `_", ":ref:`cn_api_fluid_layers_softplus`", "功能一致" + "6", "`Concat `_", ":ref:`cn_api_fluid_layers_concat`", "功能一致" + "7", "`Convolution `_", ":ref:`cn_api_fluid_layers_conv2d`", "`差异对比 `_" + "8", "`Crop `_", ":ref:`cn_api_fluid_layers_crop`", "`差异对比 `_" + "9", "`Deconvolution `_", ":ref:`cn_api_fluid_layers_conv2d_transpose`", "`差异对比 `_" + "10", "`Dropout `_", ":ref:`cn_api_fluid_layers_dropout`", "`差异对比 `_" + "11", "`Eltwise `_", "无相应接口", "`Fluid实现 `_" + "12", "`ELU `_", ":ref:`cn_api_fluid_layers_elu`", "功能一致" + "13", "`EuclideanLoss `_", ":ref:`cn_api_fluid_layers_square_error_cost`", "`差异对比 `_" + "14", "`Exp `_", ":ref:`cn_api_fluid_layers_exp`", "`差异对比 `_" + "15", "`Flatten `_", ":ref:`cn_api_fluid_layers_reshape`", "`差异对比 `_" + "16", "`InnerProduct `_", ":ref:`cn_api_fluid_layers_fc`", "`差异对比 `_" + "17", "`Input `_", ":ref:`cn_api_fluid_layers_data`", "`差异对比 `_" + "18", "`Log `_", ":ref:`cn_api_fluid_layers_log`", "`差异对比 `_" + "19", "`LRN `_", ":ref:`cn_api_fluid_layers_lrn`", "`差异对比 `_" + "20", "`Pooling `_", ":ref:`cn_api_fluid_layers_pool2d`", "`差异对比 `_" + "21", "`Power `_", ":ref:`cn_api_fluid_layers_pow`", "`差异对比 `_" + "22", "`PReLU `_", ":ref:`cn_api_fluid_layers_prelu`", "功能一致" + "23", "`Reduction `_", "无相应接口", "`Fluid实现 `_" + "24", "`ReLU `_", ":ref:`cn_api_fluid_layers_leaky_relu`", "功能一致" + "25", "`Reshape `_", ":ref:`cn_api_fluid_layers_reshape`", "`差异对比 `_" + "26", "`SigmoidCrossEntropyLoss `_", ":ref:`cn_api_fluid_layers_sigmoid_cross_entropy_with_logits`", "`差异对比 `_" + "27", "`Sigmoid `_", ":ref:`cn_api_fluid_layers_sigmoid`", "功能一致" + "28", "`Slice `_", ":ref:`cn_api_fluid_layers_slice`", "`差异对比 `_" + "29", "`SoftmaxWithLoss `_", ":ref:`cn_api_fluid_layers_softmax_with_cross_entropy`", "`差异对比 `_" + "30", "`Softmax `_", ":ref:`cn_api_fluid_layers_softmax`", "`差异对比 `_" + "31", "`TanH `_", ":ref:`cn_api_fluid_layers_tanh`", "功能一致" + "32", "`Tile `_", ":ref:`cn_api_fluid_layers_expand`", "`差异对比 `_" diff --git a/doc/paddle/api_guides/X2Paddle/TensorFlow-Fluid.rst b/doc/paddle/api_guides/X2Paddle/TensorFlow-Fluid.rst new file mode 100644 index 0000000000000000000000000000000000000000..20ee91c8cd7c19c6521b68962647f46620b436aa --- /dev/null +++ b/doc/paddle/api_guides/X2Paddle/TensorFlow-Fluid.rst @@ -0,0 +1,154 @@ +.. _TensorFlow-Fluid: + +############################### +TensorFlow-Fluid常用接口对应表 +############################### + +本文档基于TensorFlow v1.13梳理了常用API与PaddlePaddle API对应关系和差异分析。根据文档对应关系,有TensorFlow使用经验的用户,可根据对应关系,快速熟悉PaddlePaddle的接口使用。 + +.. csv-table:: + :header: "序号", "TensorFlow接口", "Fluid接口", "备注" + :widths: 1, 8, 8, 3 + + "1", "`tf.abs `_", ":ref:`cn_api_fluid_layers_abs`", "功能一致" + "2", "`tf.add `_", ":ref:`cn_api_fluid_layers_elementwise_add`", "功能一致" + "3", "`tf.argmax `_", ":ref:`cn_api_fluid_layers_argmax`", "功能一致" + "4", "`tf.argmin `_", ":ref:`cn_api_fluid_layers_argmin`", "功能一致" + "5", "`tf.assign `_", ":ref:`cn_api_fluid_layers_assign`", "功能一致" + "6", "`tf.assign_add `_", ":ref:`cn_api_fluid_layers_increment`", "功能一致" + "7", "`tf.case `_", ":ref:`cn_api_fluid_layers_Switch`", "`差异对比 `_" + "8", "`tf.cast `_", ":ref:`cn_api_fluid_layers_cast`", "功能一致" + "9", "`tf.clip_by_global_norm `_", ":ref:`cn_api_fluid_clip_GradientClipByGlobalNorm`", "`差异对比 `_" + "10", "`tf.clip_by_norm `_", ":ref:`cn_api_fluid_layers_clip_by_norm`", "`差异对比 `_" + "11", "`tf.clip_by_value `_", ":ref:`cn_api_fluid_layers_clip`", "功能一致" + "12", "`tf.concat `_", ":ref:`cn_api_fluid_layers_concat`", "功能一致" + "13", "`tf.cond `_", ":ref:`cn_api_fluid_layers_ifElse`", "功能一致" + "14", "`tf.constant `_", ":ref:`cn_api_fluid_layers_fill_constant`", "功能一致" + "15", "`tf.contrib.layers.batch_norm `_", ":ref:`cn_api_fluid_layers_batch_norm`", "功能一致" + "16", "`tf.contrib.layers.flatten `_", ":ref:`cn_api_fluid_layers_flatten`", "`差异对比 `_" + "17", "`tf.contrib.layers.fully_connected `_", ":ref:`cn_api_fluid_layers_fc`", "功能一致" + "18", "`tf.contrib.layers.one_hot_encoding `_", ":ref:`cn_api_fluid_layers_one_hot`", "功能一致" + "19", "`tf.contrib.layers.softmax `_", ":ref:`cn_api_fluid_layers_softmax`", "功能一致" + "20", "`tf.contrib.layers.xavier_initializer `_", ":ref:`cn_api_fluid_initializer_Xavier`", "功能一致" + "21", "`tf.nn.rnn.GRUCell `_", ":ref:`cn_api_fluid_layers_gru_unit`", "`差异对比 `_" + "22", "`tf.nn.rnn.MultiRNNCell `_", "无相应接口", "`Fluid实现 `_" + "23", "`tf.nn.rnn.static_rnn `_", ":ref:`cn_api_fluid_layers_DynamicRNN`", "功能一致" + "24", "`tf.convert_to_tensor `_", ":ref:`cn_api_fluid_layers_assign`", "功能一致" + "25", "`tf.cos `_", ":ref:`cn_api_fluid_layers_cos`", "功能一致" + "26", "`tf.div `_", ":ref:`cn_api_fluid_layers_elementwise_div`", "功能一致" + "27", "`tf.divide `_", ":ref:`cn_api_fluid_layers_elementwise_div`", "功能一致" + "28", "`tf.dropout `_", ":ref:`cn_api_fluid_layers_dropout`", "`差异对比 `_" + "29", "`tf.equal `_", "`运算符== `_", "功能一致" + "30", "`tf.exp `_", ":ref:`cn_api_fluid_layers_exp`", "功能一致" + "31", "`tf.expand_dims `_", ":ref:`cn_api_fluid_layers_unsqueeze`", "`差异对比 `_" + "32", "`tf.fill `_", ":ref:`cn_api_fluid_layers_fill_constant`", "功能一致" + "33", "`tf.floor `_", ":ref:`cn_api_fluid_layers_floor`", "功能一致" + "34", "`tf.gather `_", ":ref:`cn_api_fluid_layers_gather`", "功能一致" + "35", "`tf.greater `_", "`运算符> `_", "功能一致" + "36", "`tf.greater_equal `_", "`运算符>= `_", "功能一致" + "37", "`tf.image.non_max_suppression `_", ":ref:`cn_api_fluid_layers_multiclass_nms`", "`差异对比 `_" + "38", "`tf.image.resize_bilinear `_", ":ref:`cn_api_fluid_layers_resize_bilinear`", "功能一致" + "39", "`tf.image.resize_images `_", ":ref:`cn_api_fluid_layers_image_resize`", "`差异对比 `_" + "40", "`tf.image.resize_nearest_neighbor `_", ":ref:`cn_api_fluid_layers_resize_nearest`", "功能一致" + "41", "`tf.is_finite `_", ":ref:`cn_api_fluid_layers_isfinite`", "`差异对比 `_" + "42", "`tf.layers.batch_normalization `_", ":ref:`cn_api_fluid_layers_batch_norm`", "功能一致" + "43", "`tf.layers.conv2d `_", ":ref:`cn_api_fluid_layers_conv2d`", "`差异对比 `_" + "44", "`tf.layers.dense `_", ":ref:`cn_api_fluid_layers_fc`", "`差异对比 `_" + "45", "`tf.layers.dropout `_", ":ref:`cn_api_fluid_layers_dropout`", "功能一致" + "46", "`tf.layers.Dropout `_", ":ref:`cn_api_fluid_layers_dropout`", "功能一致" + "47", "`tf.layers.flatten `_", ":ref:`cn_api_fluid_layers_flatten`", "功能一致" + "48", "`tf.less `_", "`运算符< `_", "功能一致" + "49", "`tf.less_equal `_", "`运算符<= `_", "功能一致" + "50", "`tf.log `_", ":ref:`cn_api_fluid_layers_log`", "功能一致" + "51", "`tf.logical_and `_", ":ref:`cn_api_fluid_layers_logical_and`", "功能一致" + "52", "`tf.logical_not `_", ":ref:`cn_api_fluid_layers_logical_not`", "功能一致" + "53", "`tf.logical_or `_", ":ref:`cn_api_fluid_layers_logical_or`", "功能一致" + "54", "`tf.losses.mean_squared_error `_", ":ref:`cn_api_fluid_layers_square_error_cost`", "`差异对比 `_" + "55", "`tf.losses.sigmoid_cross_entropy `_", ":ref:`cn_api_fluid_layers_sigmoid_cross_entropy_with_logits`", "`差异对比 `_" + "56", "`tf.losses.softmax_cross_entropy `_", ":ref:`cn_api_fluid_layers_softmax_with_cross_entropy`", "功能一致" + "57", "`tf.matmul `_", ":ref:`cn_api_fluid_layers_matmul`", "`差异对比 `_" + "58", "`tf.maximum `_", ":ref:`cn_api_fluid_layers_elementwise_max`", "功能一致" + "59", "`tf.metrics.accuracy `_", ":ref:`cn_api_fluid_layers_accuracy`", "功能一致" + "60", "`tf.metrics.mean `_", ":ref:`cn_api_fluid_layers_mean`", "功能一致" + "61", "`tf.minimum `_", ":ref:`cn_api_fluid_layers_elementwise_min`", "功能一致" + "62", "`tf.multiply `_", ":ref:`cn_api_fluid_layers_elementwise_mul`", "功能一致" + "63", "`tf.nn.avg_pool `_", ":ref:`cn_api_fluid_layers_pool2d`", "`差异对比 `_" + "64", "`tf.nn.batch_normalization `_", ":ref:`cn_api_fluid_layers_batch_norm`", "功能一致" + "65", "`tf.nn.bidirectional_dynamic_rnn `_", "无相应接口", "`Fluid实现 `_" + "66", "`tf.nn.conv2d `_", ":ref:`cn_api_fluid_layers_conv2d`", "`差异对比 `_" + "67", "`tf.nn.conv2d_transpose `_", ":ref:`cn_api_fluid_layers_conv2d_transpose`", "`差异对比 `_" + "68", "`tf.nn.conv3d_transpose `_", ":ref:`cn_api_fluid_layers_conv3d_transpose`", "`差异对比 `_" + "69", "`tf.nn.depthwise_conv2d `_", ":ref:`cn_api_fluid_layers_conv2d`", "`差异对比 `_" + "70", "`tf.nn.dynamic_rnn `_", ":ref:`cn_api_fluid_layers_DynamicRNN`", "`差异对比 `_" + "71", "`tf.nn.l2_normalize `_", ":ref:`cn_api_fluid_layers_l2_normalize`", "`差异对比 `_" + "72", "`tf.nn.leaky_relu `_", ":ref:`cn_api_fluid_layers_leaky_relu`", "功能一致" + "73", "`tf.nn.lrn `_", ":ref:`cn_api_fluid_layers_lrn`", "`差异对比 `_" + "74", "`tf.nn.max_pool `_", ":ref:`cn_api_fluid_layers_pool2d`", "`差异对比 `_" + "75", "`tf.nn.relu `_", ":ref:`cn_api_fluid_layers_relu`", "功能一致" + "76", "`tf.nn.relu6 `_", ":ref:`cn_api_fluid_layers_relu6`", "功能一致" + "77", "`tf.nn.rnn_cell.LSTMCell `_", ":ref:`cn_api_fluid_layers_lstm_unit`", "`差异对比 `_" + "78", "`tf.nn.separable_conv2d `_", "无相应接口", "`Fluid实现 `_" + "79", "`tf.nn.sigmoid `_", ":ref:`cn_api_fluid_layers_sigmoid`", "功能一致" + "80", "`tf.nn.sigmoid_cross_entropy_with_logits `_", ":ref:`cn_api_fluid_layers_sigmoid_cross_entropy_with_logits`", "功能一致" + "81", "`tf.nn.softmax `_", ":ref:`cn_api_fluid_layers_softmax`", "功能一致" + "82", "`tf.nn.softmax_cross_entropy_with_logits `_", ":ref:`cn_api_fluid_layers_softmax_with_cross_entropy`", "`差异对比 `_" + "83", "`tf.nn.softplus `_", ":ref:`cn_api_fluid_layers_softplus`", "功能一致" + "84", "`tf.nn.softsign `_", ":ref:`cn_api_fluid_layers_softsign`", "功能一致" + "85", "`tf.nn.tanh `_", ":ref:`cn_api_fluid_layers_tanh`", "功能一致" + "86", "`tf.one_hot `_", ":ref:`cn_api_fluid_layers_one_hot`", "`差异对比 `_" + "87", "`tf.ones `_", ":ref:`cn_api_fluid_layers_ones`", "功能一致" + "88", "`tf.intializers.ones `_", ":ref:`cn_api_fluid_initializer_Constant`", "功能一致" + "89", "`tf.pad `_", ":ref:`cn_api_fluid_layers_pad`", "`差异对比 `_" + "90", "`tf.placeholder `_", ":ref:`cn_api_fluid_layers_data`", "`差异对比 `_" + "91", "`tf.pow `_", ":ref:`cn_api_fluid_layers_pow`", "`差异对比 `_" + "92", "`tf.print `_", ":ref:`cn_api_fluid_layers_print`", "`差异对比 `_" + "93", "`tf.py_func `_", ":ref:`cn_api_fluid_layers_py_func`", "功能一致" + "94", "`tf.random_normal `_", ":ref:`cn_api_fluid_layers_gaussian_random`", "功能一致" + "95", "`tf.random_normal_initializer `_", ":ref:`cn_api_fluid_initializer_Normal`", "功能一致" + "96", "`tf.random_uniform `_", ":ref:`cn_api_fluid_layers_uniform_random`", "功能一致" + "97", "`tf.random_uniform_initializer `_", ":ref:`cn_api_fluid_initializer_UniformInitializer`", "功能一致" + "98", "`tf.reduce_logsumexp `_", "无相应接口", "`Fluid实现 `_" + "99", "`tf.reduce_max `_", ":ref:`cn_api_fluid_layers_reduce_max`", "功能一致" + "100", "`tf.reduce_mean `_", ":ref:`cn_api_fluid_layers_reduce_mean`", "功能一致" + "101", "`tf.reduce_min `_", ":ref:`cn_api_fluid_layers_reduce_min`", "功能一致" + "102", "`tf.reduce_sum `_", ":ref:`cn_api_fluid_layers_reduce_sum`", "功能一致" + "103", "`tf.reshape `_", ":ref:`cn_api_fluid_layers_reshape`", "`差异对比 `_" + "104", "`tf.reverse `_", ":ref:`cn_api_fluid_layers_reverse`", "功能一致" + "105", "`tf.reverse_sequence `_", ":ref:`cn_api_fluid_layers_sequence_reverse`", "`差异对比 `_" + "106", "`tf.reverse_v2 `_", ":ref:`cn_api_fluid_layers_reverse`", "功能一致" + "107", "`tf.round `_", ":ref:`cn_api_fluid_layers_round`", "功能一致" + "108", "`tf.rsqrt `_", ":ref:`cn_api_fluid_layers_rsqrt`", "功能一致" + "109", "`tf.scalar_mul `_", ":ref:`cn_api_fluid_layers_scale`", "功能一致" + "110", "`tf.scatter_update `_", ":ref:`cn_api_fluid_layers_scatter`", "`差异对比 `_" + "111", "`tf.sequence_mask `_", ":ref:`cn_api_fluid_layers_sequence_mask`", "功能一致" + "112", "`tf.shape `_", ":ref:`cn_api_fluid_layers_shape`", "功能一致" + "113", "`tf.sigmoid `_", ":ref:`cn_api_fluid_layers_sigmoid`", "功能一致" + "114", "`tf.sin `_", ":ref:`cn_api_fluid_layers_sin`", "功能一致" + "115", "`tf.slice `_", ":ref:`cn_api_fluid_layers_slice`", "`差异对比 `_" + "116", "`tf.split `_", ":ref:`cn_api_fluid_layers_split`", "`差异对比 `_" + "117", "`tf.sqrt `_", ":ref:`cn_api_fluid_layers_sqrt`", "功能一致" + "118", "`tf.square `_", ":ref:`cn_api_fluid_layers_square`", "功能一致" + "119", "`tf.squared_difference `_", "无相应接口", "`Fluid实现 `_" + "120", "`tf.squeeze `_", ":ref:`cn_api_fluid_layers_squeeze`", "功能一致" + "121", "`tf.stack `_", ":ref:`cn_api_fluid_layers_stack`", "功能一致" + "122", "`tf.stop_gradient `_", "无相应接口", "`Fluid实现 `_" + "123", "`tf.subtract `_", ":ref:`cn_api_fluid_layers_elementwise_sub`", "功能一致" + "124", "`tf.tanh `_", ":ref:`cn_api_fluid_layers_tanh`", "功能一致" + "125", "`tf.tile `_", ":ref:`cn_api_fluid_layers_expand`", "功能一致" + "126", "`tf.top_k `_", ":ref:`cn_api_fluid_layers_topk`", "`差异对比 `_" + "127", "`tf.train.AdagradOptimizer `_", ":ref:`cn_api_fluid_optimizer_AdagradOptimizer`", "功能一致" + "128", "`tf.train.AdamOptimizer `_", ":ref:`cn_api_fluid_optimizer_Adam`", "功能一致" + "129", "`tf.train.exponential_decay `_", ":ref:`cn_api_fluid_layers_exponential_decay`", "功能一致" + "130", "`tf.train.GradientDescentOptimizer `_", ":ref:`cn_api_fluid_optimizer_SGDOptimizer`", "功能一致" + "131", "`tf.train.MomentumOptimizer `_", ":ref:`cn_api_fluid_optimizer_MomentumOptimizer`", "功能一致" + "132", "`tf.train.polynomial_decay `_", ":ref:`cn_api_fluid_layers_polynomial_decay`", "功能一致" + "133", "`tf.train.RMSPropOptimizer `_", ":ref:`cn_api_fluid_optimizer_RMSPropOptimizer`", "功能一致" + "134", "`tf.transpose `_", ":ref:`cn_api_fluid_layers_transpose`", "功能一致" + "135", "`tf.truediv `_", ":ref:`cn_api_fluid_layers_elementwise_div`", "功能一致" + "136", "`tf.truncated_normal `_", ":ref:`cn_api_fluid_initializer_TruncatedNormal`", "功能一致" + "137", "`tf.truncated_normal_initializer `_", ":ref:`cn_api_fluid_initializer_TruncatedNormal`", "功能一致" + "138", "`tf.unstack `_", ":ref:`cn_api_fluid_layers_unstack`", "功能一致" + "139", "`tf.Variable `_", ":ref:`cn_api_fluid_layers_create_parameter`", "功能一致" + "140", "`tf.while_loop `_", ":ref:`cn_api_fluid_layers_While`", "`差异对比 `_" + "141", "`tf.zeros `_", ":ref:`cn_api_fluid_layers_zeros`", "功能一致" + "142", "`tf.zeros_initializer `_", ":ref:`cn_api_fluid_initializer_Constant`", "功能一致" diff --git a/doc/paddle/api_guides/index_cn.rst b/doc/paddle/api_guides/index_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..01086f5c049f99bd8566ebe144aad7f2a167da49 --- /dev/null +++ b/doc/paddle/api_guides/index_cn.rst @@ -0,0 +1,25 @@ +=========== +API功能分类 +=========== + +本模块分功能向您介绍PaddlePaddle Fluid的API体系和用法,提高您的查找效率,帮助您快速了解PaddlePaddle Fluid API的全貌,包括以下几个模块: + +.. toctree:: + :maxdepth: 1 + + low_level/program.rst + low_level/layers/index.rst + low_level/nets.rst + low_level/optimizer.rst + low_level/backward.rst + low_level/metrics.rst + low_level/model_save_reader.rst + low_level/inference.rst + low_level/memory_optimize.rst + low_level/executor.rst + low_level/parallel_executor.rst + low_level/compiled_program.rst + low_level/parameter.rst + low_level/distributed/index.rst + X2Paddle/TensorFlow-Fluid.rst + X2Paddle/Caffe-Fluid.rst diff --git a/doc/paddle/api_guides/index_en.rst b/doc/paddle/api_guides/index_en.rst new file mode 100755 index 0000000000000000000000000000000000000000..012a3a401c79937169de355d1a15e27027321a5f --- /dev/null +++ b/doc/paddle/api_guides/index_en.rst @@ -0,0 +1,23 @@ +================= +API Quick Search +================= + +This section introduces the Fluid API structure and usage, to help you quickly get the full picture of the PaddlePaddle Fluid API. This section is divided into the following modules: + +.. toctree:: + :maxdepth: 1 + + low_level/program_en.rst + low_level/layers/index_en.rst + low_level/nets_en.rst + low_level/optimizer_en.rst + low_level/backward_en.rst + low_level/metrics_en.rst + low_level/model_save_reader_en.rst + low_level/inference_en.rst + low_level/memory_optimize_en.rst + low_level/executor_en.rst + low_level/parallel_executor_en.rst + low_level/compiled_program_en.rst + low_level/parameter_en.rst + low_level/distributed/index_en.rst diff --git a/doc/paddle/api_guides/low_level/backward.rst b/doc/paddle/api_guides/low_level/backward.rst new file mode 100644 index 0000000000000000000000000000000000000000..1b5fd9728948be6b40414698050e2ae8df41ddb8 --- /dev/null +++ b/doc/paddle/api_guides/low_level/backward.rst @@ -0,0 +1,23 @@ +.. _api_guide_backward: + + +######## +反向传播 +######## + +神经网络对模型的表达能力依赖于优化算法,优化是一个不断计算梯度并调整可学习参数的过程,Fluid中的优化算法可参考 :ref:`api_guide_optimizer` 。 + +在网络的训练过程中,梯度计算分为两个步骤:前向计算与 `反向传播 `_ 。 + +- 前向计算会根据您搭建的网络结构,将输入单元的状态传递到输出单元。 + +- 反向传播借助 `链式法则 `_ ,计算两个或两个以上复合函数的导数,将输出单元的梯度反向传播回输入单元,根据计算出的梯度,调整网络的可学习参数。 + +详细实现过程可以参考阅读 `反向传导算法 `_ 。 + +在Fluid中,我们并不推荐直接调用 :code:`fluid` 中反向传播相关API,因为这是一个极底层的API,请考虑使用 :ref:`api_guide_optimizer` 中的相关API替代。当您使用优化相关API时,Fluid会自动为您计算复杂的反向传播过程。 + +如想自己实现,您也可以使用 :ref:`cn_api_fluid_backward_append_backward` 中的 :code:`callback` 自 +定义Operator的梯度计算形式。更多用法,请参考: + +* :ref:`cn_api_fluid_backward_append_backward` diff --git a/doc/paddle/api_guides/low_level/backward_en.rst b/doc/paddle/api_guides/low_level/backward_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..022b4900f07e608771c3d76e8b53c4b01056d3b7 --- /dev/null +++ b/doc/paddle/api_guides/low_level/backward_en.rst @@ -0,0 +1,23 @@ +.. _api_guide_backward_en: + + +################ +Back Propagation +################ + +The ability of neural network to define model depends on optimization algorithm. Optimization is a process of calculating gradient continuously and adjusting learnable parameters. You can refer to :ref:`api_guide_optimizer_en` to learn more about optimization algorithm in Fluid. + +In the training process of network, gradient calculation is divided into two steps: forward computing and `back propagation `_ . + +Forward computing transfers the state of the input unit to the output unit according to the network structure you build. + +Back propagation calculates the derivatives of two or more compound functions by means of `chain rule `_ . The gradient of output unit is propagated back to input unit. According to the calculated gradient, the learning parameters of the network are adjusted. + + +You could refer to `back propagation algorithm `_ for detialed implementation process. + +We do not recommend directly calling backpropagation-related APIs in :code:`fluid` , as these are very low-level APIs. Consider using the relevant APIs in :ref:`api_guide_optimizer_en` instead. When you use optimizer APIs, Fluid automatically calculates the complex back-propagation for you. + +If you want to implement it by yourself, you can also use: :code:`callback` in :ref:`api_fluid_backward_append_backward` to define the customized gradient form of Operator. +For more information, please refer to: :ref:`api_fluid_backward_append_backward` + diff --git a/doc/paddle/api_guides/low_level/compiled_program.rst b/doc/paddle/api_guides/low_level/compiled_program.rst new file mode 100644 index 0000000000000000000000000000000000000000..4e1bd9b5698b5c8a22e80252cc166ddaa5c376ef --- /dev/null +++ b/doc/paddle/api_guides/low_level/compiled_program.rst @@ -0,0 +1,45 @@ +.. _api_guide_compiled_program: + +################ +CompiledProgram +################ + +:ref:`cn_api_fluid_CompiledProgram` 用于把程序转化为不同的优化组合。例如,你可以使用with_data_parallel将程序转化为数据并行程序,使其能够运行在多个设备上。 + + +.. code-block:: python + + # 注释: + # - 如果你想在ParallelExecutor中指定用于运行的GPU卡,需要在环境中定义 + # CUDA_VISIBLE_DEVICES + # - 如果你想在ParallelExecutor中使用多CPU来运行程序,需要在环境中定义 + # CPU_NUM + + # 首先创建Executor。 + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + # 运行启动程序仅一次。 + exe.run(fluid.default_startup_program()) + + # 直接运行主程序,无需编译。 + loss = exe.run(fluid.default_main_program(), + feed=feed_dict, + fetch_list=[loss.name]) + + # 或者编译程序后用数据并行方式运行模型。 + exec_strategy = fluid.ExecutionStrategy() + exec_strategy.num_threads = dev_count * 4 # the size of thread pool. + build_strategy = fluid.BuildStrategy() + build_strategy.memory_optimize = True if memory_opt else False + compiled_prog = compiler.CompiledProgram( + fluid.default_main_program()).with_data_parallel( + loss_name=loss.name, + build_strategy=build_strategy, + exec_strategy=exec_strategy) + loss, = exe.run(compiled_prog, + feed=feed_dict, + fetch_list=[loss.name]) + +- 相关API : + + - :ref:`cn_api_fluid_CompiledProgram` diff --git a/doc/paddle/api_guides/low_level/compiled_program_en.rst b/doc/paddle/api_guides/low_level/compiled_program_en.rst new file mode 100755 index 0000000000000000000000000000000000000000..77ea883d6cc9a3c9bf74202e64412cbb5a928fdf --- /dev/null +++ b/doc/paddle/api_guides/low_level/compiled_program_en.rst @@ -0,0 +1,51 @@ +.. _api_guide_compiled_program_en: + +################ +CompiledProgram +################ + +The :ref:`api_fluid_CompiledProgram` is used to transform a program for various optimizations. For example, you can use :code:`with_data_parallel` to transform the program to data parallel program so that it can be run in multiple devices. + + +.. code-block:: python + + # Note: + # - If you want to specify the GPU cards which are used to run + # in ParallelExecutor, you should define the CUDA_VISIBLE_DEVICES + # in environment. + # - If you want to use multi CPU to run the program in ParallelExecutor, + # you should define the CPU_NUM in the environment. + + # First create the Executor. + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + + # Run the startup program once and only once. + exe.run(fluid.default_startup_program()) + + # Run the main program directly without compile. + loss = exe.run(fluid.default_main_program(), + feed=feed_dict, + fetch_list=[loss.name]) + + # Or, compiled the program, and then run the model with data parallel. + exec_strategy = fluid.ExecutionStrategy() + exec_strategy.num_threads = dev_count * 4 # the size of thread pool. + build_strategy = fluid.BuildStrategy() + build_strategy.memory_optimize = True if memory_opt else False + + compiled_prog = compiler.CompiledProgram( + fluid.default_main_program()).with_data_parallel( + loss_name=loss.name, + build_strategy=build_strategy, + exec_strategy=exec_strategy) + + loss, = exe.run(compiled_prog, + feed=feed_dict, + fetch_list=[loss.name]) + +**Note**: :code:`fluid.Porgram` and :code:`compiler.CompiledPorgram` are completely different :code:`Programs`. :code:`fluid.Porgram` is composed of a series of operators. :code:`compiler.CompiledPorgram` compiles the :code:`fluid.Porgram` and converts it into a computational graph. :code:`compiler.CompiledPorgram` cannot be saved at present. + + +- Related API : + - :ref:`api_fluid_CompiledProgram` diff --git a/doc/paddle/api_guides/low_level/distributed/async_training.rst b/doc/paddle/api_guides/low_level/distributed/async_training.rst new file mode 100644 index 0000000000000000000000000000000000000000..61f2ee0f79b271dc4d339d1659adbd3264475d80 --- /dev/null +++ b/doc/paddle/api_guides/low_level/distributed/async_training.rst @@ -0,0 +1,33 @@ +.. _api_guide_async_training: + +############ +分布式异步训练 +############ + +Fluid支持数据并行的分布式异步训练,API使用 :code:`DistributeTranspiler` 将单机网络配置转换成可以多机执行的 +:code:`pserver` 端程序和 :code:`trainer` 端程序。用户在不同的节点执行相同的一段代码,根据环境变量或启动参数, +可以执行对应的 :code:`pserver` 或 :code:`trainer` 角色。Fluid异步训练只支持pserver模式,异步训练和 `同步训练 <../distributed/sync_training.html>`_ 的主要差异在于:异步训练每个trainer的梯度是单独更新到参数上的, +而同步训练是所有trainer的梯度合并之后统一更新到参数上,因此,同步训练和异步训练的超参数需要分别调节。 + +pserver模式分布式异步训练 +====================== + +API详细使用方法参考 :ref:`cn_api_fluid_DistributeTranspiler` ,简单示例用法: + +.. code-block:: python + + config = fluid.DistributeTranspilerConfig() + # 配置策略config + config.slice_var_up = False + t = fluid.DistributeTranspiler(config=config) + t.transpile(trainer_id, + program=main_program, + pservers="192.168.0.1:6174,192.168.0.2:6174", + trainers=1, + sync_mode=False) + +以上参数说明请参考 `同步训练 <../distributed/sync_training.html>`_ + +需要注意的是:进行异步训练时,请修改 :code:`sync_mode` 的值 + +- :code:`sync_mode` : 是否是同步训练模式,默认为True,不传此参数也默认是同步训练模式,设置为False则为异步训练 diff --git a/doc/paddle/api_guides/low_level/distributed/async_training_en.rst b/doc/paddle/api_guides/low_level/distributed/async_training_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..d2646559163567759f0769b28729766fa34df551 --- /dev/null +++ b/doc/paddle/api_guides/low_level/distributed/async_training_en.rst @@ -0,0 +1,32 @@ +.. _api_guide_async_training_en: + +#################################### +Asynchronous Distributed Training +#################################### + +Fluid supports parallelism asynchronous distributed training. :code:`DistributeTranspiler` converts a single node network configuration into a :code:`pserver` side program and the :code:`trainer` side program that can be executed on multiple machines. The user executes the same piece of code on different nodes. Depending on the environment variables or startup parameters, the corresponding :code:`pserver` or :code:`trainer` role can be executed. + +**Asynchronous distributed training in Fluid only supports the pserver mode** . The main difference between asynchronous training and `synchronous training <../distributed/sync_training_en.html>`_ is that the gradients of each trainer are asynchronously applied on the parameters, but in synchronous training, the gradients of all trainers must be combined first and then they are used to update the parameters. Therefore, the hyperparameters of synchronous training and asynchronous training need to be adjusted separately. + +Asynchronous distributed training in Pserver mode +================================================== + +For detailed API, please refer to :ref:`api_fluid_transpiler_DistributeTranspiler` . A simple example: + +.. code-block:: python + + config = fluid.DistributeTranspilerConfig() + #Configuring config policy + config.slice_var_up = False + t = fluid.DistributeTranspiler(config=config) + t.transpile(trainer_id, + program=main_program, + pservers="192.168.0.1:6174,192.168.0.2:6174", + trainers=1, + sync_mode=False) + +For the description of parameters above, please refer to `Sync Training <../distributed/sync_training_en.html>`_ . + +Note that when performing asynchronous training, please modify the value of :code:`sync_mode` . + +- :code:`sync_mode` : Whether it is synchronous training mode, the default is True. If you do not pass this parameter, the default is synchronous training mode. If it is set to False, it is asynchronous training. diff --git a/doc/paddle/api_guides/low_level/distributed/cluster_train_data_cn.rst b/doc/paddle/api_guides/low_level/distributed/cluster_train_data_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c4c7e22958bd337b54658e632c37f421b0b66536 --- /dev/null +++ b/doc/paddle/api_guides/low_level/distributed/cluster_train_data_cn.rst @@ -0,0 +1,64 @@ +.. _api_guide_cluster_train_data: + +#################### +分布式训练reader准备 +#################### + +一个数据并行的分布式训练任务通常会含有多个训练进程,每个训练进程处理整个数据集中的一部分,根据当前进程的唯一序号(trainer_id)以及训练进程总数(trainers)可以决定当前训练进程应该读取哪一部分数据。 + +实现 cluster_reader 来读取分布式训练数据集 +---------------------------------------- + +比较通用的方法,可以实现一个 cluster_reader, 根据训练进程数量以及进程序号决定读取哪些 example: + + .. code-block:: python + + def cluster_reader(reader, trainers, trainer_id): + def reader_creator(): + for idx, data in enumerate(reader()): + if idx % trainers == trainer_id: + yield data + return reader + + trainers = int(os.getenv("PADDLE_TRAINERS", "1")) + trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0")) + train_reader = cluster_reader(paddle.dataset.mnist.train(), trainers, trainer_id) + +上述代码中,`trainers` 和 `trainer_id` 分别是训练进程总数和当前训练进程的序号,可以通过环境变量或者参数的方式传递给 Python 程序。 + +预先切分训练文件 +----------------- + +由于使用 `cluster_reader` 依然会读取全量数据,对于训练进程比较多的任务,会造成IO资源的浪费、影响训练性能。另一种方法是可以将训练数据切分成多个小文件,每个进程处理其中的一部分文件, +例如在 Linux 系统中可以使用 `split `_ 命令将训练数据切分成多个小文件: + + .. code-block:: bash + $ split -d -a 4 -d -l 100 housing.data cluster/housing.data. + $ find ./cluster + cluster/ + cluster/housing.data.0002 + cluster/housing.data.0003 + cluster/housing.data.0004 + cluster/housing.data.0000 + cluster/housing.data.0001 + cluster/housing.data.0005 + +数据切分好以后, 可以实现一个 file_dispatcher 函数,根据训练进程数量以及序号决定需要读取哪些文件: + + .. code-block:: python + + def file_dispatcher(files_pattern, trainers, trainer_id): + file_list = glob.glob(files_pattern) + ret_list = [] + for idx, f in enumerate(file_list): + if (idx + trainers) % trainers == trainer_id: + ret_list.append(f) + return ret_list + + trainers = int(os.getenv("PADDLE_TRAINERS", "1")) + trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0")) + files_pattern = "cluster/housing.data.*" + + my_files = file_dispatcher(files_pattern, triners, trainer_id) + +在上述例子中,`files_pattern` 是训练文件的 `glob 表达式 `_,一般可以用通配符来表示。 diff --git a/doc/paddle/api_guides/low_level/distributed/cluster_train_data_en.rst b/doc/paddle/api_guides/low_level/distributed/cluster_train_data_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..3c810417c5eb8085a8deefdaaabdffe94518c55a --- /dev/null +++ b/doc/paddle/api_guides/low_level/distributed/cluster_train_data_en.rst @@ -0,0 +1,64 @@ +.. _api_guide_cluster_train_data_en: + +############################################### +Preparing Data Reader for Distributed Training +############################################### + +A parallelism distributed training task usually contains multiple training processes. Each training process processes a part of the entire data set. The unique serial number (trainer_id) of the current process and the total number of training processes (trainers) determines which part of the data can be read by the current training process. + +Read datasets in distributed training by defining a cluster_reader +----------------------------------------------------------------- + +Generally, you can implement a cluster_reader, regarding the number of training processes and the process serial number(i.e. trainer_id) to decide which data to read: + + .. code-block:: python + + def cluster_reader(reader, trainers, trainer_id): + def reader_creator(): + for idx, data in enumerate(reader()): + if idx % trainers == trainer_id: + yield data + return reader + + trainers = int(os.getenv("PADDLE_TRAINERS", "1")) + trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0")) + train_reader = cluster_reader(paddle.dataset.mnist.train(), trainers, trainer_id) + +In the code above, `trainers` and `trainer_id` are respectively the total number of training processes and the serial number of the current training process, which can be passed to the Python program through environment variables or parameters. + +Split training files in advance +---------------------------------- + +Since `cluster_reader` is still used to read the full set of data, for tasks with more training processes, it will cause waste of IO resources and affect training performance. Another method is to divide the training data into multiple small files, and each process processes a part of the files. +For example, in a Linux system, the training data can be split into multiple small files using the `split `_ command: + + .. code-block:: bash + $ split -d -a 4 -d -l 100 housing.data cluster/housing.data. + $ find ./cluster + cluster/ + cluster/housing.data.0002 + cluster/housing.data.0003 + cluster/housing.data.0004 + cluster/housing.data.0000 + cluster/housing.data.0001 + cluster/housing.data.0005 + +After the data is split, you can define a file_dispatcher function that determines which files need to be read based on the number of training processes and the serial number: + + .. code-block:: python + + def file_dispatcher(files_pattern, trainers, trainer_id): + file_list = glob.glob(files_pattern) + ret_list = [] + for idx, f in enumerate(file_list): + if (idx + trainers) % trainers == trainer_id: + ret_list.append(f) + return ret_list + + trainers = int(os.getenv("PADDLE_TRAINERS", "1")) + trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0")) + files_pattern = "cluster/housing.data.*" + + my_files = file_dispatcher(files_pattern, triners, trainer_id) + +In the example above, `files_pattern` is a `glob expression `_ of the training file and can generally be represented by a wildcard. diff --git a/doc/paddle/api_guides/low_level/distributed/index.rst b/doc/paddle/api_guides/low_level/distributed/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..40026707b7cd8434d8e884ea0603303f215452f7 --- /dev/null +++ b/doc/paddle/api_guides/low_level/distributed/index.rst @@ -0,0 +1,11 @@ +============= +分布式训练 +============= + +.. toctree:: + :maxdepth: 1 + + sync_training.rst + async_training.rst + large_scale_sparse_feature_training.rst + cluster_train_data_cn.rst diff --git a/doc/paddle/api_guides/low_level/distributed/index_en.rst b/doc/paddle/api_guides/low_level/distributed/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..a77fb47375aeba84bf09283548bbbab8da179529 --- /dev/null +++ b/doc/paddle/api_guides/low_level/distributed/index_en.rst @@ -0,0 +1,13 @@ +==================== +Distributed Training +==================== + +.. toctree:: + :maxdepth: 1 + + sync_training_en.rst + async_training_en.rst + large_scale_sparse_feature_training_en.rst + cluster_train_data_en.rst + + diff --git a/doc/paddle/api_guides/low_level/distributed/large_scale_sparse_feature_training.rst b/doc/paddle/api_guides/low_level/distributed/large_scale_sparse_feature_training.rst new file mode 100644 index 0000000000000000000000000000000000000000..6ba7a716b2c05addba5b2960761b52bc4931dbdb --- /dev/null +++ b/doc/paddle/api_guides/low_level/distributed/large_scale_sparse_feature_training.rst @@ -0,0 +1,44 @@ +.. _api_guide_large_scale_sparse_feature_training: + +################### +大规模稀疏特征模型训练 +################### + + +模型配置和训练 +============= + +embedding被广泛应用在各种网络结构中,尤其是文本处理相关的模型。在某些场景,例如推荐系统或者搜索引擎中, +embedding的feature id可能会非常多,当feature id达到一定数量时,embedding参数会变得很大, +会带来两个问题: + +1. 单机内存由于无法存放如此巨大的embedding参数,导致无法训练; +2. 普通的训练模式每一轮迭代都需要同步完整的参数,参数太大会让通信变得非常慢,进而影响训练速度。 + +Fluid支持千亿量级超大规模稀疏特征embedding的训练,embedding参数只会保存在parameter server上,通过 +参数prefetch和梯度稀疏更新的方法,大大减少通信量,提高通信速度。 + +该功能只对分布式训练有效,单机无法使用。 +需要配合 `稀疏更新 <../layers/sparse_update.html>`_ 一起使用。 + +使用方法:在配置embedding的时候,加上参数 :code:`is_distributed=True` 以及 :code:`is_sparse=True` 即可。 +参数 :code:`dict_size` 定义数据中总的id的数量,id可以是int64范围内的任意值,只要总id个数小于等于dict_size就可以支持。 +所以配置之前需要预估一下数据中总的feature id的数量。 + +.. code-block:: python + + emb = fluid.layers.embedding( + is_distributed=True, + input=input, + size=[dict_size, embedding_width], + is_sparse=True, + is_distributed=True) + + +模型存储和预测 +============= + +当特征数量达到千亿的时候,参数量很大,单机已经无法存下,所以模型的存储和加载都和普通模式不同: + +1. 普通模式下,参数是在trainer端保存和加载的; +2. 分布式模式下,参数的保存和加载,都是在pserver端进行,每个pserver只保存和加载该pserver自身对应部分的参数 diff --git a/doc/paddle/api_guides/low_level/distributed/large_scale_sparse_feature_training_en.rst b/doc/paddle/api_guides/low_level/distributed/large_scale_sparse_feature_training_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..932697f8a8b3cf558fd54eb67e2747d533a66eab --- /dev/null +++ b/doc/paddle/api_guides/low_level/distributed/large_scale_sparse_feature_training_en.rst @@ -0,0 +1,39 @@ +.. _api_guide_large_scale_sparse_feature_training_en: + +################################################### +Training of Models with Large Scale Sparse Features +################################################### + + +Model configuration and training +================================== + +Embedding is widely used in various network structures, especially in the text processing related models. In some scenarios, such as recommendation systems or search engines, the number of feature ids of embedding may be very large. When the feature id reaches a certain amount, the embedding parameter will become very large, which will bring two problems: + +1. The single-machine memory cannot be trained because it cannot store such huge embedding parameters. +2. The normal training mode needs to synchronize the complete parameters for each iteration. If the parameters are too large, the communication will become very slow, which will affect the training speed. + +Fluid supports the training of hundreds of millions of large-scale sparse features embedding. The embedding parameter is only saved on the parameter server. Through the parameter prefetch and gradient sparse update method, the communication volume is greatly reduced and the communication speed is improved. + +This function is only valid for distributed training and cannot be used in single node training. It need to work with `sparse update <../layers/sparse_update_en.html>`_. + +How to use: When configuring embedding, add the parameters :code:`is_distributed=True` and :code:`is_sparse=True`. +Parameter :code:`dict_size` defines the total number of ids in the data. The id can be any value in the int64 range. As long as the total number of ids is less than or equal to dict_size, it can be supported. So before you configure, you need to estimate the total number of feature ids in the data. + +.. code-block:: python + + emb = fluid.layers.embedding( + is_distributed=True, + input=input, + size=[dict_size, embedding_width], + is_sparse=True, + is_distributed=True) + + +Model storage and inference +=============================== + +When the number of features reaches 100 billion, the parameters are very large, and the single machine can't hold such huge amount of parameters. As the result, the model is stored and loaded differently from the normal mode: + +1. In normal mode, parameters are saved and loaded on the trainer side; +2. In distributed mode, all the parameters are saved and loaded on the pserver side. Each pserver only saves and loads the parameters corresponding to the pserver itself. diff --git a/doc/paddle/api_guides/low_level/distributed/sync_training.rst b/doc/paddle/api_guides/low_level/distributed/sync_training.rst new file mode 100644 index 0000000000000000000000000000000000000000..1ea58c39706f8903d5a7b39304fd870baa95fbee --- /dev/null +++ b/doc/paddle/api_guides/low_level/distributed/sync_training.rst @@ -0,0 +1,83 @@ +.. _api_guide_sync_training: + +############ +分布式同步训练 +############ + +Fluid支持数据并行的分布式同步训练,API使用 :code:`DistributeTranspiler` 将单机网络配置转换成可以多机执行的 +:code:`pserver` 端程序和 :code:`trainer` 端程序。用户在不同的节点执行相同的一段代码,根据环境变量或启动参数, +可以执行对应的 :code:`pserver` 或 :code:`trainer` 角色。Fluid分布式同步训练同时支持pserver模式和NCCL2模式, +在API使用上有差别,需要注意。 + +pserver模式分布式训练 +=================== + +API详细使用方法参考 :ref:`DistributeTranspiler` ,简单实例用法: + +.. code-block:: python + + config = fluid.DistributeTranspilerConfig() + # 配置策略config + config.slice_var_up = False + t = fluid.DistributeTranspiler(config=config) + t.transpile(trainer_id, + program=main_program, + pservers="192.168.0.1:6174,192.168.0.2:6174", + trainers=1, + sync_mode=True) + +以上参数中: + +- :code:`trainer_id` : trainer节点的id,从0到n-1,n为当前训练任务中trainer节点的个数 +- :code:`program` : 被转换的 :code:`program` 默认使用 :code:`fluid.default_main_program()` +- :code:`pservers` : 当前训练任务中pserver节点的IP端口列表 +- :code:`trainers` : int类型,当前训练任务中trainer节点的个数。注意: + * pserver模式下,trainer节点个数可以和pserver节点个数不一致,比如使用20个pserver和50个trainer。在实际训练任务中,您可以通过调整pserver节点和trainer节点个数找到最佳性能 + * NCCL2模式中,此项参数是字符串,指定trainer节点的IP端口列表 +- :code:`sync_mode` : 是否是同步训练模式,默认为True,不传此参数也默认是同步训练模式 + + +其中,支持的config包括: + +- :code:`slice_var_up` : 配置是否切分一个参数到多个pserver上进行优化,默认开启。此选项适用于模型参数个数少,但需要使用大量节点的场景,有利于提升pserver端计算并行度 +- :code:`split_method` : 配置transpiler分配参数(或参数的切片)到多个pserver的方式,默认为"RoundRobin",也可以使用"HashName" +- :code:`min_block_size` : 如果配置了参数切分,指定最小Tensor的切分大小,防止RPC请求包过小,默认为8192,一般情况不需要调整此项参数 +- :code:`enable_dc_asgd` : 是否开启 :code:`DC-ASGD` 此选项在异步训练中生效,启用异步训练补偿算法 +- :code:`mode` : 可以选择"pserver"或"nccl2",指定使用pserver模式或NCCL2模式分布式训练 +- :code:`print_log` : 是否开启transpiler debug日志,此项为开发调试使用 + +通用环境变量配置: + +- :code:`FLAGS_rpc_send_thread_num` :int,指定RPC通信发送时线程的个数 +- :code:`FLAGS_rpc_get_thread_num` : int,指定RPC通信接受时线程的个数 +- :code:`FLAGS_rpc_prefetch_thread_num` : int,分布式lookup table执行RPC通信时,prefetch线程的个数 +- :code:`FLAGS_rpc_deadline` : int,RPC通信最长等待时间,单位为毫秒,默认180000 + + +NCCL2模式分布式训练 +================= + +基于NCCL2 (Collective Communication) 的多机同步训练模式,仅支持在GPU集群下进行。 +此部分详细API说明可以参考 :ref:`DistributeTranspiler` 。 + +注意:NCCL2模式下,集群不需要启动pserver,只需要启动多个trainer节点即可。 + +使用以下代码,将当前 :code:`Program` 转化成适用于NCCL2分布式计算的Fluid :code:`Program` : + +.. code-block:: python + + config = fluid.DistributeTranspilerConfig() + config.mode = "nccl2" + t = fluid.DistributeTranspiler(config=config) + t.transpile(trainer_id, + program=main_program, + startup_program=startup_program, + trainers="192.168.0.1:6174,192.168.0.2:6174", + current_endpoint="192.168.0.1:6174") + +其中: + +- :code:`trainer_id` : trainer节点的id,从0到n-1,n为当前训练任务中trainer节点的个数 +- :code:`program` 和 :code:`startup_program` : 分别为Fluid 模型的主配置program和初始化startup_program +- :code:`trainers` : 字符串类型,指定当前任务所有trainer的IP和端口号,仅用于NCCL2初始化(pserver模式中,此参数为int,指定trainer节点的个数) +- :code:`current_endpoint` : 当前任务的当前节点的IP和端口号 diff --git a/doc/paddle/api_guides/low_level/distributed/sync_training_en.rst b/doc/paddle/api_guides/low_level/distributed/sync_training_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..6b8cd734316f539a4fce642dc6d80d284bc085c2 --- /dev/null +++ b/doc/paddle/api_guides/low_level/distributed/sync_training_en.rst @@ -0,0 +1,80 @@ +.. _api_guide_sync_training_en: + +#################################### +Synchronous Distributed Training +#################################### + +Fluid supports parallelism distributed synchronous training, the API uses the :code:`DistributeTranspiler` to convert a single node network configuration into a :code:`pserver` side and :code:`trainer` side program that can be executed on multiple machines. The user executes the same piece of code on different nodes. Depending on the environment variables or startup parameters, you can execute the corresponding :code:`pserver` or :code:`trainer` role. Fluid distributed synchronous training supports both pserver mode and NCCL2 mode. There are differences in the use of the API, to which you need to pay attention. + +Distributed training in pserver mode +====================================== + +For API Reference, please refer to :ref:`DistributeTranspiler`. A simple example : + +.. code-block:: python + + config = fluid.DistributeTranspilerConfig() + #Configuring policy config + config.slice_var_up = False + t = fluid.DistributeTranspiler(config=config) + t.transpile(trainer_id, + program=main_program, + pservers="192.168.0.1:6174,192.168.0.2:6174", + trainers=1, + sync_mode=True) + +Among parameters above: + +- :code:`trainer_id` : The id of the trainer node, from 0 to n-1, where n is the number of trainer nodes in the current training task. +- :code:`program` : The :code:`program` to transpile, where :code:`fluid.default_main_program()` is used by default. +- :code:`pservers` : list of IP ports of the pserver nodes in the current training task. +- :code:`trainers` : int type, the number of trainer nodes in the current training task. Please note: + * In pserver mode, the number of trainer nodes can be different from the number of pserver nodes, such as 20 pservers and 50 trainers. In practical training tasks, you can get the best performance by adjusting the number of pserver nodes and trainer nodes. + * In NCCL2 mode, this parameter is a string specifying the IP port list of the trainer nodes. +- :code:`sync_mode` : Whether it is in synchronous training mode, the default is True. Even though this parameter is not set, it is the synchronous training mode by default. + + +Moreover, the supported config includes: + +- :code:`slice_var_up` : Configure whether to split a parameter to multiple pservers for optimization, which is enabled by default. This option is applicable to scenarios where the number of model parameters is small, but a large number of nodes are needed, which is beneficial to improve the computational parallelism of the pserver side. +- :code:`split_method` : Configure the method by which transpiler allocates parameters (or slices of parameters) to multiple pservers. The default is "RoundRobin". Alternatively, you can use "HashName". +- :code:`min_block_size` : If parameter splitting is configured, this option specifies the minimum size of the Tensor to prevent the RPC request packet from being too small. The default size is 8192. Generally, you do not need to adjust this parameter. +- :code:`enable_dc_asgd` : Whether to enable :code:`DC-ASGD`. This option is effective in asynchronous training, using asynchronous training with DC-ASGD enabled. +- :code:`mode` : You can choose "pserver" or "nccl2" to specify distributed training using pserver mode or NCCL2 mode. +- :code:`print_log` : Whether to enable the transpiler debug log. This item is used for development and debugging + +Configuration for general environment variables: + +- :code:`FLAGS_rpc_send_thread_num` : int, specifies the number of threads when RPC communication is sent +- :code:`FLAGS_rpc_get_thread_num` : int, specifies the number of threads when RPC communication is received +- :code:`FLAGS_rpc_prefetch_thread_num` : int, the number of prefetch threads when the distributed lookup table executes RPC communication +- :code:`FLAGS_rpc_deadline` : int, the longest waiting time for RPC communication, in milliseconds, default 180000 + + +Distributed training in NCCL2 mode +==================================== + +The multi-node synchronous training mode based on NCCL2 (Collective Communication) is only supported in the GPU cluster. +Detailed API descriptions in this section can be found in :ref:`api_fluid_transpiler_DistributeTranspiler` . + +Note: In NCCL2 mode, the cluster does not need to start pserver, but only need to start multiple trainer nodes. + +Use the following code to convert the current :code:`Program` to a Fluid :code:`Program` for NCCL2 distributed computing: + +.. code-block:: python + + Config = fluid.DistributeTranspilerConfig() + Config.mode = "nccl2" + t = fluid.DistributeTranspiler(config=config) + t.transpile(trainer_id, + program=main_program, + startup_program=startup_program, + trainers="192.168.0.1:6174,192.168.0.2:6174", + current_endpoint="192.168.0.1:6174") + +Among them: + +- :code:`trainer_id` : The id of the trainer node, from 0 to n-1, where n is the number of trainer nodes in the current training task. +- :code:`program` and :code:`startup_program` : respectively for the main configuration program of the Fluid model and the initializing startup_program +- :code:`trainers` : String type, specifies the IP and port numbers of all trainers of the current task, only used for NCCL2 initialization (in pserver mode, this parameter is of int type which specifies the number of trainer nodes) +- :code:`current_endpoint` : the IP and port numbers of the current task's node. diff --git a/doc/paddle/api_guides/low_level/executor.rst b/doc/paddle/api_guides/low_level/executor.rst new file mode 100644 index 0000000000000000000000000000000000000000..c4368d513182e4b4ea3b88448039e250b751ff3a --- /dev/null +++ b/doc/paddle/api_guides/low_level/executor.rst @@ -0,0 +1,27 @@ +.. _api_guide_executor: + +########## +执行引擎 +########## + +:code:`Executor` 实现了一个简易的执行器,所有的操作在其中顺序执行。你可以在Python脚本中运行 :code:`Executor` 。PaddlePaddle Fluid中有两种执行器。一种是 :code:`Executor` 默认的单线程执行器,另一种是并行计算执行器,在 :ref:`api_guide_parallel_executor` 中进行了解释。``Executor`` 和 :ref:`api_guide_parallel_executor` 的配置不同,这可能会给部分用户带来困惑。为使执行器更加灵活,我们引入了 :ref:`api_guide_compiled_program` , :ref:`api_guide_compiled_program` 用于把一个程序转换为不同的优化组合,可以通过 :code:`Executor` 运行。 + + :code:`Executor` 的逻辑非常简单。建议在调试阶段用 :code:`Executor` 在一台计算机上完整地运行模型,然后转向多设备或多台计算机计算。 + + :code:`Executor` 在构造时接受一个 :code:`Place` ,它既可能是 :ref:`api_fluid_CPUPlace` 也可能是 :ref:`api_fluid_CUDAPlace` 。 + +.. code-block:: python + # 首先创建Executor。 + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + # 运行启动程序仅一次。 + exe.run(fluid.default_startup_program()) + + # 直接运行主程序。 + loss, = exe.run(fluid.default_main_program(), + feed=feed_dict, + fetch_list=[loss.name]) +简单样例请参照 `basics_fit_a_line <../../beginners_guide/basics/fit_a_line/README.cn.html>`_ + +- 相关API : + - :ref:`cn_api_fluid_Executor` diff --git a/doc/paddle/api_guides/low_level/executor_en.rst b/doc/paddle/api_guides/low_level/executor_en.rst new file mode 100755 index 0000000000000000000000000000000000000000..eb92d2dd66510e982b8e8ca3b827ae66a695f2ce --- /dev/null +++ b/doc/paddle/api_guides/low_level/executor_en.rst @@ -0,0 +1,34 @@ +.. _api_guide_executor_en: + +################ +Executor +################ + +:code:`Executor` realizes a simple executor in which all operators will be executed in order. You can run :code:`Executor` in a Python script. There are two kinds of executors in PaddlePaddle Fluid. One is single-thread executor which is the default option for :code:`Executor` and the other is the parallel executor which is illustrated in :ref:`api_guide_parallel_executor_en` . The config of `Executor` and :ref:`api_guide_parallel_executor_en` is different, it may be a bit confusing for some users. To make the executor more facility, we introduce :ref:`api_guide_compiled_program_en` , :ref:`api_guide_compiled_program_en` is used to transform a program for various optimizations, and it can be run by :code:`Executor`. + +The logic of :code:`Executor` is very simple. It is suggested to thoroughly run the model with :code:`Executor` in debugging phase on one computer and then switch to mode of multiple devices or multiple computers to compute. + +:code:`Executor` receives a :code:`Place` at construction, which can either be :ref:`api_fluid_CPUPlace` or :ref:`api_fluid_CUDAPlace`. + +.. code-block:: python + + # First create the Executor. + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + + # Run the startup program once and only once. + exe.run(fluid.default_startup_program()) + + # Run the main program directly. + loss, = exe.run(fluid.default_main_program(), + feed=feed_dict, + fetch_list=[loss.name]) + + +For simple example please refer to `basics_fit_a_line <../../beginners_guide/basics/fit_a_line/README.html>`_ + +- Related API : + - :ref:`api_fluid_Executor` + + + diff --git a/doc/paddle/api_guides/low_level/inference.rst b/doc/paddle/api_guides/low_level/inference.rst new file mode 100644 index 0000000000000000000000000000000000000000..84dfbacfce27cf1cd0ea056c0452abff65dc0833 --- /dev/null +++ b/doc/paddle/api_guides/low_level/inference.rst @@ -0,0 +1,58 @@ +.. _api_guide_inference: + +######### +预测引擎 +######### + +预测引擎提供了存储预测模型 :ref:`cn_api_fluid_io_save_inference_model` 和加载预测模型 :ref:`cn_api_fluid_io_load_inference_model` 两个接口。 + +预测模型的存储格式 +================= + +预测模型的存储格式有两种,由上述两个接口中的 :code:`model_filename` 和 :code:`params_filename` 变量控制: + +- 参数保存到各个独立的文件,如设置 :code:`model_filename` 为 :code:`None` 、:code:`params_filename` 为 :code:`None` + + .. code-block:: bash + + ls recognize_digits_conv.inference.model/* + __model__ conv2d_1.w_0 conv2d_2.w_0 fc_1.w_0 conv2d_1.b_0 conv2d_2.b_0 fc_1.b_0 + +- 参数保存到同一个文件,如设置 :code:`model_filename` 为 :code:`None` 、:code:`params_filename` 为 :code:`__params__` + + .. code-block:: bash + + ls recognize_digits_conv.inference.model/* + __model__ __params__ + +存储预测模型 +=========== + +存储预测模型时,一般通过 :code:`fluid.io.save_inference_model` 接口对默认的 :code:`fluid.Program` 进行裁剪,只保留预测 :code:`predict_var` 所需部分。 +裁剪后的 program 会保存在指定路径 ./infer_model/__model__ 下,参数会保存到 ./infer_model 下的各个独立文件。 + +示例代码如下: + +.. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + path = "./infer_model" + fluid.io.save_inference_model(dirname=path, feeded_var_names=['img'], + target_vars=[predict_var], executor=exe) + + +加载预测模型 +=========== + +.. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + path = "./infer_model" + [inference_program, feed_target_names, fetch_targets] = + fluid.io.load_inference_model(dirname=path, executor=exe) + results = exe.run(inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets) + +在这个示例中,首先调用 :code:`fluid.io.load_inference_model` 接口,获得预测的 :code:`inference_program` 、输入数据的名称 :code:`feed_target_names` 和输出结果的 :code:`fetch_targets` ; +然后调用 :code:`executor` 执行预测的 :code:`inference_program` 获得预测结果。 diff --git a/doc/paddle/api_guides/low_level/inference_en.rst b/doc/paddle/api_guides/low_level/inference_en.rst new file mode 100755 index 0000000000000000000000000000000000000000..33bd5d12ad3c5fd27a6cd90f70bee126b10e1c1b --- /dev/null +++ b/doc/paddle/api_guides/low_level/inference_en.rst @@ -0,0 +1,58 @@ +.. _api_guide_inference_en: + +################# +Inference Engine +################# + +Inference engine provides interfaces to save inference model :ref:`api_fluid_io_save_inference_model` and load inference model :ref:`api_fluid_io_load_inference_model` . + +Format of Saved Inference Model +===================================== + +There are two formats of saved inference model, which are controlled by :code:`model_filename` and :code:`params_filename` parameters in the two interfaces above. + +- Parameters are saved into independent separate files, such as :code:`model_filename` set as :code:`None` and :code:`params_filename` set as :code:`None` + + .. code-block:: bash + + ls recognize_digits_conv.inference.model/* + __model__ conv2d_1.w_0 conv2d_2.w_0 fc_1.w_0 conv2d_1.b_0 conv2d_2.b_0 fc_1.b_0 + +- Parameters are saved into the same file, such as :code:`model_filename` set as :code:`None` and :code:`params_filename` set as :code:`__params__` + + .. code-block:: bash + + ls recognize_digits_conv.inference.model/* + __model__ __params__ + +Save Inference model +=============================== + +To save an inference model, we normally use :code:`fluid.io.save_inference_model` to tailor the default :code:`fluid.Program` and only keep the parts useful for predicting :code:`predict_var`. +After being tailored, :code:`program` will be saved under :code:`./infer_model/__model__` while the parameters will be saved into independent files under :code:`./infer_model` . + +Sample Code: + +.. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + path = "./infer_model" + fluid.io.save_inference_model(dirname=path, feeded_var_names=['img'], + target_vars=[predict_var], executor=exe) + + +Load Inference Model +===================== + +.. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + path = "./infer_model" + [inference_program, feed_target_names, fetch_targets] = + fluid.io.load_inference_model(dirname=path, executor=exe) + results = exe.run(inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets) + +In this example, at first we call :code:`fluid.io.load_inference_model` to get inference :code:`inference_program` , :code:`feed_target_names`-name of input data and :code:`fetch_targets` of output; +then call :code:`executor` to run inference :code:`inference_program` to get inferred result. diff --git a/doc/paddle/api_guides/low_level/layers/activations.rst b/doc/paddle/api_guides/low_level/layers/activations.rst new file mode 100644 index 0000000000000000000000000000000000000000..4f193cb6f7e4e6263f4333466c9944774ac15c6d --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/activations.rst @@ -0,0 +1,28 @@ +.. _api_guide_activations: + +#### +激活函数 +#### + +激活函数将非线性的特性引入到神经网络当中。 + +PaddlePaddle Fluid 对大部分的激活函数进行了支持,其中有: + +:ref:`cn_api_fluid_layers_relu`, :ref:`cn_api_fluid_layers_tanh`, :ref:`cn_api_fluid_layers_sigmoid`, :ref:`cn_api_fluid_layers_elu`, :ref:`cn_api_fluid_layers_relu6`, :ref:`cn_api_fluid_layers_pow`, :ref:`cn_api_fluid_layers_stanh`, :ref:`cn_api_fluid_layers_hard_sigmoid`, :ref:`cn_api_fluid_layers_swish`, :ref:`cn_api_fluid_layers_prelu`, :ref:`cn_api_fluid_layers_brelu`, :ref:`cn_api_fluid_layers_leaky_relu`, :ref:`cn_api_fluid_layers_soft_relu`, :ref:`cn_api_fluid_layers_thresholded_relu`, :ref:`cn_api_fluid_layers_maxout`, :ref:`cn_api_fluid_layers_logsigmoid`, :ref:`cn_api_fluid_layers_hard_shrink`, :ref:`cn_api_fluid_layers_softsign`, :ref:`cn_api_fluid_layers_softplus`, :ref:`cn_api_fluid_layers_tanh_shrink`, :ref:`cn_api_fluid_layers_softshrink`, :ref:`cn_api_fluid_layers_exp`。 + + +**Fluid提供了两种使用激活函数的方式:** + +- 如果一个层的接口提供了 :code:`act` 变量(默认值为None),我们可以通过该变量指定该层的激活函数类型。该方式支持常见的激活函数: :code:`relu`, :code:`tanh`, :code:`sigmoid`, :code:`identity`。 + +.. code-block:: python + + conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3, act="relu") + + +- Fluid为每个Activation提供了接口,我们可以显式的对它们进行调用。 + +.. code-block:: python + + conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3) + relu1 = fluid.layers.relu(conv2d) diff --git a/doc/paddle/api_guides/low_level/layers/activations_en.rst b/doc/paddle/api_guides/low_level/layers/activations_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..0273d83e617ae1353ff1ddc2b850b18028e39831 --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/activations_en.rst @@ -0,0 +1,49 @@ +.. _api_guide_activations_en: + +################### +Activation Function +################### + +The activation function incorporates non-linearity properties into the neural network. + +PaddlePaddle Fluid supports most of the activation functions, including: + +:ref:`api_fluid_layers_relu`, +:ref:`api_fluid_layers_tanh`, +:ref:`api_fluid_layers_sigmoid`, +:ref:`api_fluid_layers_elu`, +:ref:`api_fluid_layers_relu6`, +:ref:`api_fluid_layers_pow`, +:ref:`api_fluid_layers_stanh`, +:ref:`api_fluid_layers_hard_sigmoid`, +:ref:`api_fluid_layers_swish`, +:ref:`api_fluid_layers_prelu`, +:ref:`api_fluid_layers_brelu`, +:ref:`api_fluid_layers_leaky_relu`, +:ref:`api_fluid_layers_soft_relu`, +:ref:`api_fluid_layers_thresholded_relu`, +:ref:`api_fluid_layers_maxout`, +:ref:`api_fluid_layers_logsigmoid`, +:ref:`api_fluid_layers_hard_shrink`, +:ref:`api_fluid_layers_softsign`, +:ref:`api_fluid_layers_softplus`, +:ref:`api_fluid_layers_tanh_shrink`, +:ref:`api_fluid_layers_softshrink`, +:ref:`api_fluid_layers_exp`. + + +**Fluid provides two ways to use the activation function:** + +- If a layer interface provides :code:`act` variables (default None), we can specify the type of layer activation function through this parameter. This mode supports common activation functions :code:`relu`, :code:`tanh`, :code:`sigmoid`, :code:`identity`. + +.. code-block:: python + + conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3, act="relu") + + +- Fluid provides an interface for each Activation, and we can explicitly call it. + +.. code-block:: python + + conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3) + relu1 = fluid.layers.relu(conv2d) diff --git a/doc/paddle/api_guides/low_level/layers/control_flow.rst b/doc/paddle/api_guides/low_level/layers/control_flow.rst new file mode 100644 index 0000000000000000000000000000000000000000..a213a6e2e22a617f505668aecb6eef3d7567f314 --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/control_flow.rst @@ -0,0 +1,58 @@ +.. _api_guide_control_flow: + +###### +控制流 +###### + +在程序语言中,控制流(control flow)决定了语句的执行顺序,常见的控制流包括顺序执行、分支和循环等。PaddlePaddle Fluid继承了这一概念,提供了多种控制流API, 以控制深度学习模型在训练或者预测过程中的执行逻辑。 + +IfElse +====== + +条件分支,允许对同一个batch的输入,根据给定的条件,分别选择 :code:`true_block` 或 :code:`false_block` 中的逻辑进行执行,执行完成之后再将两个分支的输出合并为同一个输出。通常,条件表达式可由 :ref:`cn_api_fluid_layers_less_than`, :ref:`cn_api_fluid_layers_equal` 等逻辑比较 API 产生。 + +请参考 :ref:`cn_api_fluid_layers_IfElse` + +**注意:** 强烈建议您使用新的OP :ref:`cn_api_fluid_layers_cond` 而不是 ``IfElse``。:ref:`cn_api_fluid_layers_cond` 的使用方式更简单,并且调用该OP所用的代码更少且功能与 ``IfElse`` 一样。 + +Switch +====== + +多分支选择结构,如同程序语言中常见的 :code:`switch-case` 声明, 其根据输入表达式的取值不同,选择不同的分支执行。具体来说,Fluid 所定义的 :code:`Switch` 控制流有如下特性: + +* case的条件是个bool类型的值,即在Program中是一个张量类型的Variable; +* 依次检查逐个case,选择第一个满足条件的case执行,完成执行后即退出所属的block; +* 如果所有case均不满足条件,会选择默认的case进行执行。 + +请参考 :ref:`cn_api_fluid_layers_Switch` + +**注意:** 强烈建议您使用新的OP :ref:`cn_api_fluid_layers_case` 而不是 ``Switch``。 :ref:`cn_api_fluid_layers_case` 的使用方式更简单,并且调用该OP所用的代码更少且功能与 ``Switch`` 一样。 + +While +===== + +While 循环,当条件判断为真时,循环执行 :code:`While` 控制流所属 :code:`block` 内的逻辑,条件判断为假时退出循环。与之相关的API有 + +* :ref:`cn_api_fluid_layers_increment` :累加API,通常用于对循环次数进行计数; +* :ref:`cn_api_fluid_layers_array_read` :从 :code:`LOD_TENSOR_ARRAY` 中指定的位置读入Variable,进行计算; +* :ref:`cn_api_fluid_layers_array_write` :将 Variable 写回到 :code:`LOD_TENSOR_ARRAY` 指定的位置,存储计算结果。 + +请参考 :ref:`cn_api_fluid_layers_While` + +**注意:** 强烈建议您使用新的OP :ref:`cn_api_fluid_layers_while_loop` 而不是 ``While``。 :ref:`cn_api_fluid_layers_while_loop` 的使用方式更简单,并且调用该OP所用的代码更少且功能与 ``While`` 一样。 + +DynamicRNN +========== + +即动态RNN,可处理一个batch不等长的序列数据,其接受 :code:`lod_level=1` 的 Variable 作为输入,在 :code:`DynamicRNN` 的 :code:`block` 内,用户需自定义RNN的单步计算逻辑。在每一个时间步,用户可将需记忆的状态写入到 :code:`DynamicRNN` 的 :code:`memory` 中,并将需要的输出写出到其 :code:`output` 中。 + +:ref:`cn_api_fluid_layers_sequence_last_step` 可获取 :code:`DynamicRNN` 最后一个时间步的输出。 + +请参考 :ref:`cn_api_fluid_layers_DynamicRNN` + +StaticRNN +========= + +即静态RNN,只能处理固定长度的序列数据,接受 :code:`lod_level=0` 的 Variable 作为输入。与 :code:`DynamicRNN` 类似,在RNN的每单个时间步,用户需自定义计算逻辑,并可将状态和输出写出。 + +请参考 :ref:`cn_api_fluid_layers_StaticRNN` diff --git a/doc/paddle/api_guides/low_level/layers/control_flow_en.rst b/doc/paddle/api_guides/low_level/layers/control_flow_en.rst new file mode 100755 index 0000000000000000000000000000000000000000..ecc42e30f79b9df196eacc45f5ee6a39ddc913ea --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/control_flow_en.rst @@ -0,0 +1,59 @@ +.. api_guide_control_flow_en: + +############# +Control Flow +############# + +In programming languages, the control flow determines the order in which statements are executed. Common control flows contain sequential execution, branching, and looping. PaddlePaddle Fluid inherits this concept and provides a variety of control flow APIs to control the execution logic of the deep learning model during training or prediction. + +IfElse +====== + +Conditional branch, for the input of a batch, according to the given conditions, select the process in :code:`true_block` or :code:`false_block` to execute respectively, and then merge the outputs of the two branches into one after the execution. In general, conditional expressions can be generated by a logical comparison API such as :ref:`api_fluid_layers_less_than`, :ref:`api_fluid_layers_equal`. + +Please refer to :ref:`api_fluid_layers_IfElse` + +**Note:** A new OP :ref:`api_fluid_layers_cond` is highly recommended instead of ``IfElse`` . OP :ref:`api_fluid_layers_cond` is easier to use and is called with less code but does the same thing as ``IfElse`` . + +Switch +====== + +Switch, like the :code:`switch-case` declaration commonly found in programming languages, selects different branch to execute depending on the value of the input expression. Specifically, the :code:`Switch` control flow defined by Fluid has the following characteristics: + +* The condition of the case is a bool type value, which is a tensor type Variable in the Program; +* It checks each case one by one, selects the first case that satisfies the condition, and exits the block after completion of the execution; +* If all cases do not meet the conditions, the default case will be selected for execution. + +Please refer to :ref:`api_fluid_layers_Switch` + +**Note:** A new OP :ref:`api_fluid_layers_case` is highly recommended instead of ``Switch`` . OP :ref:`api_fluid_layers_case` is easier to use and is called with less code but does the same thing as ``Switch`` . + +While +===== + +When the condition is true, repeatedly execute logic in the :code:`block` which :code:`While` flow belongs to until the condition is judged to be false and the loop will be ended. The related APIs are as follows: + +* :ref:`api_fluid_layers_increment` : It is usually used to count the number of loops; +* :ref:`api_fluid_layers_array_read` : Reads Variable from the specified location in :code:`LOD_TENSOR_ARRAY` to perform calculations; +* :ref:`api_fluid_layers_array_write` : Writes the Variable back to the specified location in :code:`LOD_TENSOR_ARRAY` and stores the result of the calculation. + +Please refer to :ref:`api_fluid_layers_While` + +**Note**: A new OP :ref:`api_fluid_layers_while_loop` is highly recommended instead of ``While`` . OP :ref:`api_fluid_layers_while_loop` is easier to use and is called with less code but does the same thing as ``While`` . + + +DynamicRNN +========== + +Dynamic RNN can process a batch of unequal(variable)-length sequence data, which accepts the variable with :code:`lod_level=1` as input. In the :code:`block` of :code:`DynamicRNN`, the user needs to customize RNN's single-step calculation logic. At each time step, the user can write the state to be remembered to the :code:`memory` of :code:`DynamicRNN` and write the required output to its :code:`output`. + +:ref:`api_fluid_layers_sequence_last_step` gets the output of the last time step of :code:`DynamicRNN`. + +Please refer to :ref:`api_fluid_layers_DynamicRNN` + +StaticRNN +========= + +Static RNN can only process fixed-length sequence data, and accept Variable with :code:`lod_level=0` as input. Similar to :code:`DynamicRNN`, at each single time step of the RNN, the user needs to customize the calculation logic and export the status and output. + +Please refer to :ref:`api_fluid_layers_StaticRNN` \ No newline at end of file diff --git a/doc/paddle/api_guides/low_level/layers/conv.rst b/doc/paddle/api_guides/low_level/layers/conv.rst new file mode 100644 index 0000000000000000000000000000000000000000..5a15e40349dad7d916baaa03755e7e7cd6bf0a27 --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/conv.rst @@ -0,0 +1,65 @@ +.. _api_guide_conv: + +##### +卷积 +##### + +卷积有两组输入:特征图和卷积核,依据输入特征和卷积核的形状、Layout不同、计算方式的不同,在Fluid里,有针对变长序列特征的一维卷积,有针对定长图像特征的二维(2D Conv)、三维卷积(3D Conv),同时也有卷积计算的逆向过程,下面先介绍Fluid里的2D/3D卷积,再来介绍序列卷积。 + + +2D/3D卷积 +============== + +1. 卷积输入参数: +--------------------- + +卷积需要依据滑动步长(stride)、填充长度(padding)、卷积核窗口大小(filter size)、分组数(groups)、扩张系数(dilation rate)来决定如何计算。groups最早在 `AlexNet `_ 中引入, 可以理解为将原始的卷积分为独立若干组卷积计算。 + + **注意**: 同cuDNN的方式,Fluid目前只支持在特征图上下填充相同的长度,左右也是。 + +- 输入输出Layout: + + 2D卷积输入特征的Layout为[N, C, H, W]或[N, H, W, C], N即batch size,C是通道数,H、W是特征的高度和宽度,输出特征和输入特征的Layout一致。(相应的3D卷积输入特征的Layout为[N, C, D, H, W]或[N, D, H, W, C],但 **注意**,Fluid的卷积当前只支持[N, C, H, W],[N, C, D, H, W]。) + +- 卷积核的Layout: + + Fluid中2D卷积的卷积核(也称权重)的Layout为[C_o, C_in / groups, f_h, f_w],C_o、C_in表示输出、输入通道数,f_h、f_w表示卷积核窗口的高度和宽度,按行序存储。(相应的3D卷积的卷积核Layout为[C_o, C_in / groups, f_d, f_h, d_w],同样按行序存储。) + +- 深度可分离卷积(depthwise separable convolution): + + 在深度可分离卷积中包括depthwise convolution和pointwise convolution两组,这两个卷积的接口和上述普通卷积接口相同。前者可以通过给普通卷积设置groups来做,后者通过设置卷积核filters的大小为1x1,深度可分离卷积减少参数的同时减少了计算量。 + + 对于depthwise convolution,可以设置groups等于输入通道数,此时,2D卷积的卷积核形状为[C_o, 1, f_h, f_w]。 + 对于pointwise convolution,卷积核的形状为[C_o, C_in, 1, 1]。 + + **注意**:Fluid针对depthwise convolution的GPU计算做了高度优化,您可以通过在 + :code:`fluid.layers.conv2d` 接口设置 :code:`use_cudnn=False` 来使用Fluid自身优化的CUDA程序。 + +- 空洞卷积(dilated convolution): + + 空洞卷积相比普通卷积而言,卷积核在特征图上取值时不在连续,而是间隔的,这个间隔数称作dilation,等于1时,即为普通卷积,空洞卷积相比普通卷积的感受野更大。 + +- API汇总: + - :ref:`cn_api_fluid_layers_conv2d` + - :ref:`cn_api_fluid_layers_conv3d` + - :ref:`cn_api_fluid_layers_conv2d_transpose` + - :ref:`cn_api_fluid_layers_conv3d_transpose` + + +1D序列卷积 +============== + +Fluid可以表示变长的序列结构,这里的变长是指不同样本的时间步(step)数不一样,通常是一个2D的Tensor和一个能够区分的样本长度的辅助结构来表示。假定,2D的Tensor的形状是shape,shape[0]是所有样本的总时间步数,shape[1]是序列特征的大小。 + +基于此数据结构的卷积在Fluid里称作序列卷积,也表示一维卷积。同图像卷积,序列卷积的输入参数有卷积核大小、填充大小、滑动步长,但与2D卷积不同的是,这些参数个数都为1。**注意**,目前仅支持stride为1的情况,输出序列的时间步数和输入序列相同。 + +假如:输入序列形状为(T, N), T即该序列的时间步数,N是序列特征大小;卷积核的上下文步长为K,输出序列长度为M,则卷积核权重形状为(K * N, M),输出序列形状为(T, M)。 + +另外,参考DeepSpeech,Fluid实现了行卷积row convolution, 或称 +`look ahead convolution `_ , +该卷积相比上述普通序列卷积可以减少参数。 + + +- API汇总: + - :ref:`cn_api_fluid_layers_sequence_conv` + - :ref:`cn_api_fluid_layers_row_conv` diff --git a/doc/paddle/api_guides/low_level/layers/conv_en.rst b/doc/paddle/api_guides/low_level/layers/conv_en.rst new file mode 100755 index 0000000000000000000000000000000000000000..fd02bde43d83376e4bf4153b5f4930ae540ecabe --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/conv_en.rst @@ -0,0 +1,58 @@ +.. _api_guide_conv_en: + +############# +Convolution +############# + +Convolution has two sets of inputs: feature maps and convolution kernels. Depending on the input features, the shape of the convolution kernel, the layout and the calculation method, in Fluid, there is a one-dimensional convolution for variable-length sequence features, two-dimensional (2D Conv) and three-dimensional convolution (3D Conv) for fixed-length image features. At the same time, there is also a reverse(backward) process of convolution calculation. The subsequent content describes the 2D/3D convolution in Fluid, and then introduces the sequence convolution. + + +2D/3D Convolution +================== + +1. Input parameters of convolution: +-------------------------------------- +The convolution needs to be determined according to stride, padding, filter size, groups, and dilation rate. Groups were first introduced in `AlexNet `_ . It can be considered that the original convolution is split into independent sets of convolution to be calculated. + +**Note**: In the same way as cuDNN, Fluid currently only supports padding upper and lower part of feature maps with equal length , as well as that for left and right part. + +- The layout(shape) of Input and Output : + + Layout of input feature of 2D convolution is [N, C, H, W] or [N, H, W, C], where N is the batch size, C is the number of channels, H,W is the height and width of feature. Layout of input feature is the same as that of output feature. (Layout of input feature of 3D convolution is [N, C, D, H, W] or [N, D, H, W, C]. But **note**, Fluid convolution currently only supports [N, C, H, W],[N, C, D, H, W].) + +- The layout of convolution kernel: + + The layout of the 2D_conv convolution kernel (also called weight) in Fluid is [C_o, C_in / groups, f_h, f_w], where C_o, C_in represent the number of output and input channels, and f_h, f_w represent the height and width of filter, which are stored in row order. (The corresponding 2D_conv convolution kernel layout is [C_o, C_in / groups, f_d, f_h, d_w], which is also stored in row order.) + +- Depthwise Separable Convolution: + + Depthwise Separable Convolution contains depthwise convolution和pointwise convolution. The interfaces of these two convolutions are the same as the above normal convolutional interfaces. The former can be performed by setting groups for ordinary convolutions. The latter can be realised by setting the size of the convolution kernel filters to 1x1. Depthwise Separable Convolution reduces the parameters as well as the volume of computation. + + For depthwise convolution, you can set groups equal to the number of input channels. At this time, the convolution kernel shape of the 2D convolution is [C_o, 1, f_h, f_w]. For pointwise convolution, the shape of the convolution kernel is [C_o, C_in, 1, 1]. + + **Note**: Fluid optimized GPU computing for depthwise convolution greatly. You can use Fluid's self-optimized CUDA program by setting :code:`use_cudnn=False` in the :code:`fluid.layers.conv2d` interface. + +- Dilated Convolution: + + Compared with ordinary convolution, for dilated convolution, the convolution kernel does not continuously read values from the feature map, but with intervals. This interval is called dilation. When it is equal to 1, it becomes ordinary convolution. And receptive fields of dilated convolution is larger than that of ordinary convolution. + + +- related API: + - :ref:`api_fluid_layers_conv2d` + - :ref:`api_fluid_layers_conv3d` + - :ref:`api_fluid_layers_conv2d_transpose` + - :ref:`api_fluid_layers_conv3d_transpose` + + +1D sequence convolution +========================= + +Fluid can represent a variable-length sequence structure. The variable length here means that the number of time steps of different samples is different. It is usually represented by a 2D Tensor and an auxiliary structure that can distinguish the sample length. Assume that the shape of the 2D Tensor is shape, shape[0] is the total number of time steps for all samples, and shape[1] is the size of the sequence feature. + +Convolution based on this data structure is called sequence convolution in Fluid and also represents one-dimensional convolution. Similar to image convolution, the input parameters of the sequence convolution contain the filter size, the padding size, and the size of sliding stride. But unlike the 2D convolution, the number of each parameter is 1. **Note**, it currently only supports stride = 1. The output sequence has the same number of time steps as the input sequence. + +Suppose the input sequence shape is (T, N), while T is the number of time steps of the sequence, and N is the sequence feature size; The convolution kernel has a context stride of K. The length of output sequence is M, the shape of convolution kernel weight is (K * N, M), and the shape of output sequence is (T, M). + +- related API: + - :ref:`api_fluid_layers_sequence_conv` + - :ref:`api_fluid_layers_row_conv` \ No newline at end of file diff --git a/doc/paddle/api_guides/low_level/layers/data_feeder.rst b/doc/paddle/api_guides/low_level/layers/data_feeder.rst new file mode 100644 index 0000000000000000000000000000000000000000..495869bdafd9b644b1f66cb2adae99dbcebf976f --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/data_feeder.rst @@ -0,0 +1,44 @@ +.. _api_guide_data_feeder: + +使用DataFeeder传入训练/预测数据 +################################### + +Fluid提供 :code:`DataFeeder` 类,将numpy array等数据转换为 :code:`LoDTensor` 类型传入训练/预测网络。 + +用户创建 :code:`DataFeeder` 对象的方式为: + +.. code-block:: python + + import paddle.fluid as fluid + + image = fluid.layers.data(name='image', shape=[-1, 3, 224, 224], dtype='float32') + label = fluid.layers.data(name='label', shape=[-1, 1], dtype='int64') + place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + feeder = fluid.DataFeeder(feed_list=[image, label], place=place) + +其中,:code:`feed_list` 参数为变量列表,这些变量由 :code:`fluid.layers.data()` 创建, +:code:`place` 参数表示应将Python端传入的numpy array等数据转换为GPU端或是CPU端的 :code:`LoDTensor` 。 +创建 :code:`DataFeeder` 对象后,用户可调用其 :code:`feed(iterable)` 方法将用户传入的 +:code:`iterable` 数据转换为 :code:`LoDTensor`。 + +:code:`iterable` 应为Python List或Tuple类型对象,且 :code:`iterable` 的每个元素均为长度为N的 +Python List或Tuple类型对象,其中N为创建 :code:`DataFeeder` 对象时传入的 :code:`feed_list` 变量个数。 + +:code:`iterable` 的具体格式为: + +.. code-block:: python + + iterable = [ + (image_1, label_1), + (image_2, label_2), + ... + (image_n, label_n) + ] + +其中,:code:`image_i` 与 :code:`label_i` 均为numpy array类型数据。若传入数据的维度为[1],如 :code:`label_i`, +则可传入Python int、float等类型数据。 :code:`image_i` 与 :code:`label_i` 的数据类型和维度不必 +与 :code:`fluid.layers.data()` 创建时指定的 :code:`dtype` 和 :code:`shape` 完全一致,:code:`DataFeeder` 内部 +会完成数据类型和维度的转换。若 :code:`feed_list` 中的变量的 :code:`lod_level` 不为零,则Fluid会将经过维度转换后的 +:code:`iterable` 中每行数据的第0维作为返回结果的 :code:`LoD`。 + +具体使用方法请参见 :ref:`cn_api_fluid_DataFeeder` 。 \ No newline at end of file diff --git a/doc/paddle/api_guides/low_level/layers/data_feeder_en.rst b/doc/paddle/api_guides/low_level/layers/data_feeder_en.rst new file mode 100755 index 0000000000000000000000000000000000000000..053aefd4a30a64353b14db6e5ac2baa83d83ab51 --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/data_feeder_en.rst @@ -0,0 +1,41 @@ +.. _api_guide_data_feeder_en: + +Feed training/inference data with DataFeeder +######################################################## + +Fluid provides the :code:`DataFeeder` class, which converts data types such as numpy array into a :code:`LoDTensor` type to feed the training/inference network. + +To create a :code:`DataFeeder` object: + +.. code-block:: python + + import paddle.fluid as fluid + + image = fluid.layers.data(name='image', shape=[-1, 3, 224, 224], dtype='float32') + label = fluid.layers.data(name='label', shape=[-1, 1], dtype='int64') + place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + feeder = fluid.DataFeeder(feed_list=[image, label], place=place) + +The :code:`feed_list` parameter is a list of variables created by :code:`fluid.layers.data()` . +The :code:`place` parameter indicates that data such as numpy array passed in from the Python side should be converted to GPU or CPU :code:`LoDTensor`. +After creating the :code:`DataFeeder` object, the user can call the :code:`feed(iterable)` method to convert :code:`iterable` data given by user into :code:`LoDTensor` . + +:code:`iterable` should be a object of Python List or a Tuple type, and each element in :code:`iterable` is a Python List of length N or Tuple type object, where N is the number of :code:`feed_list` variables passed in when the :code:`DataFeeder` object is created. + +The concrete format of :code:`iterable` is: + +.. code-block:: python + + iterable = [ + (image_1, label_1), + (image_2, label_2), + ... + (image_n, label_n) + ] + +:code:`image_i` and :code:`label_i` are both numpy array data. If the dimension of the input data is [1], such as :code:`label_i`, +you can feed Python int, float, and other types of data. The data types and dimensions of :code:`image_i` and :code:`label_i` are not necessarily +the same as :code:`dtype` and :code:`shape` specified at :code:`fluid.layers.data()`. :code:`DataFeeder` internally +performs the conversion of data types and dimensions. If the :code:`lod_level` of the variable in :code:`feed_list` is not zero, in Fluid, the 0th dimension of each row in the dimensionally converted :code:`iterable` will be returned as :code:`LoD` . + +Read :ref:`api_fluid_DataFeeder` for specific usage. \ No newline at end of file diff --git a/doc/paddle/api_guides/low_level/layers/data_in_out.rst b/doc/paddle/api_guides/low_level/layers/data_in_out.rst new file mode 100644 index 0000000000000000000000000000000000000000..4517c17fbe4384391d8bad0fead0dd8ad6071246 --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/data_in_out.rst @@ -0,0 +1,33 @@ +.. _api_guide_data_in_out: + +数据输入输出 +############### + + +数据输入 +------------- + +Fluid支持两种数据输入方式,包括: + +1. Python Reader: 纯Python的Reader。用户在Python端定义 :code:`fluid.layers.data` 层构建网络,并通过 +:code:`executor.run(feed=...)` 的方式读入数据。数据读取和模型训练/预测的过程是同步进行的。 + +2. PyReader: 高效灵活的C++ Reader接口。PyReader内部维护容量为 :code:`capacity` 的队列(队列容量由 +:code:`fluid.layers.py_reader` 接口中的 :code:`capacity` 参数设置),Python端调用队列的 :code:`push` +方法送入训练/预测数据,C++端的训练/预测程序调用队列的 :code:`pop` 方法取出Python端送入的数据。PyReader可与 +:code:`double_buffer` 配合使用,实现数据读取和训练/预测的异步执行。 + +具体使用方法请参考 :ref:`cn_api_fluid_layers_py_reader`。 + + +数据输出 +------------ + +Fluid支持在训练/预测阶段获取当前batch的数据。 + +用户可通过 :code:`executor.run(fetch_list=[...], return_numpy=...)` 的方式 +fetch期望的输出变量,通过设置 :code:`return_numpy` 参数设置是否将输出数据转为numpy array。 +若 :code:`return_numpy` 为 :code:`False` ,则返回 :code:`LoDTensor` 类型数据。 + +具体使用方式请参考相关API文档 :ref:`cn_api_fluid_executor_Executor` 和 +:ref:`cn_api_fluid_ParallelExecutor`。 \ No newline at end of file diff --git a/doc/paddle/api_guides/low_level/layers/data_in_out_en.rst b/doc/paddle/api_guides/low_level/layers/data_in_out_en.rst new file mode 100755 index 0000000000000000000000000000000000000000..a5c8d16a16f64a2cf4e219c354e96f50be5a0f53 --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/data_in_out_en.rst @@ -0,0 +1,30 @@ +.. _api_guide_data_in_out_en: + +Data input and output +###################### + + +Data input +------------- + +Fluid supports two methods for data input, including: + +1. Python Reader: A pure Python Reader. The user defines the :code:`fluid.layers.data` layer on the Python side and builds the network. +Then, read the data by calling :code:`executor.run(feed=...)` . The process of data reading and model training/inference is performed simultaneously. + +2. PyReader: An Efficient and flexible C++ Reader interface. PyReader internally maintains a queue with size of :code:`capacity` (queue capacity is determined by +:code:`capacity` parameter in the :code:`fluid.layers.py_reader` interface ). Python side call queue :code:`push` to feed the training/inference data, and the C++ side training/inference program calls the :code:`pop` method to retrieve the data sent by the Python side. PyReader can work in conjunction with :code:`double_buffer` to realize asynchronous execution of data reading and model training/inference. + +For details, please refer to :ref:`api_fluid_layers_py_reader`. + + +Data output +------------ + +Fluid supports obtaining data for the current batch in the training/inference phase. + +The user can fetch expected variables from :code:`executor.run(fetch_list=[...], return_numpy=...)` . User can determine whether to convert the output data to numpy array by setting the :code:`return_numpy` parameter. +If :code:`return_numpy` is :code:`False` , data of type :code:`LoDTensor` will be returned. + +For specific usage, please refer to the relevant API documentation :ref:`api_fluid_executor_Executor` and +:ref:`api_fluid_ParallelExecutor`. \ No newline at end of file diff --git a/doc/paddle/api_guides/low_level/layers/detection.rst b/doc/paddle/api_guides/low_level/layers/detection.rst new file mode 100644 index 0000000000000000000000000000000000000000..a904accd2d5951491422b3089afab75eeafb5944 --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/detection.rst @@ -0,0 +1,103 @@ +.. _api_guide_detection: + + +图像检测 +######### + +PaddlePaddle Fluid在图像检测任务中实现了多个特有的操作。以下分模型介绍各个api: + +通用操作 +------------- + +图像检测中的一些通用操作,是对检测框的一系列操作,其中包括: + +* 对检测框的编码,解码(box_coder):实现两种框之间编码和解码的转换。例如训练阶段对先验框和真实框进行编码得到训练目标值。API Reference 请参考 :ref:`cn_api_fluid_layers_box_coder` + +* 比较两个检测框并进行匹配: + + * iou_similarity:计算两组框的IOU值。API Reference 请参考 :ref:`cn_api_fluid_layers_iou_similarity` + + * bipartite_match:通过贪心二分匹配算法得到每一列中距离最大的一行。API Reference 请参考 :ref:`cn_api_fluid_layers_bipartite_match` + +* 根据检测框和标签得到分类和回归目标值(target_assign):通过匹配索引和非匹配索引得到目标值和对应权重。API Reference 请参考 :ref:`cn_api_fluid_layers_target_assign` + +* 对检测框进行后处理: + + * box_clip: 将检测框剪切到指定大小。API Reference 请参考 :ref:`cn_api_fluid_layers_box_clip` + + * multiclass_nms: 对边界框和评分进行多类非极大值抑制。API Reference 请参考 :ref:`cn_api_fluid_layers_multiclass_nms` + + +RCNN +------------- + +RCNN系列模型是两阶段目标检测器,其中包含`Faster RCNN `_,`Mask RCNN `_,相较于传统提取区域的方法,RCNN中RPN网络通过共享卷积层参数大幅提高提取区域的效率,并提出高质量的候选区域。RPN网络需要对输入anchor和真实值进行比较生成初选候选框,并对初选候选框分配分类和回归值,需要如下五个特有api: + +* rpn_target_assign:通过anchor和真实框为anchor分配RPN网络的分类和回归目标值。API Reference 请参考 :ref:`cn_api_fluid_layers_rpn_target_assign` + +* anchor_generator:为每个位置生成一系列anchor。API Reference 请参考 :ref:`cn_api_fluid_layers_anchor_generator` + +* generate_proposal_labels: 通过generate_proposals得到的候选框和真实框得到RCNN部分的分类和回归的目标值。API Reference 请参考 :ref:`cn_api_fluid_layers_generate_proposal_labels` + +* generate_proposals: 对RPN网络输出box解码并筛选得到新的候选框。API Reference 请参考 :ref:`cn_api_fluid_layers_generate_proposals` + +* generate_mask_labels: 通过generate_proposal_labels得到的RoI,和真实框对比后进一步筛选RoI并得到Mask分支的目标值。API Reference 请参考 :ref:`cn_api_fluid_layers_generate_mask_labels` + +FPN +------------- + +`FPN `_ 全称Feature Pyramid Networks, 采用特征金字塔做目标检测。 顶层特征通过上采样和低层特征做融合,并将FPN放在RPN网络中用于生成候选框,有效的提高检测精度,需要如下两种特有api: + +* collect_fpn_proposals: 拼接多层RoI,同时选择分数较高的RoI。API Reference 请参考 :ref:`cn_api_fluid_layers_collect_fpn_proposals` + +* distribute_fpn_proposals: 将多个RoI依据面积分配到FPN的多个层级中。API Reference 请参考 :ref:`cn_api_fluid_layers_distribute_fpn_proposals` + +SSD +---------------- + +`SSD `_ 全称Single Shot MultiBox Detector,是目标检测领域较新且效果较好的检测算法之一,具有检测速度快且检测精度高的特点。与两阶段的检测方法不同,单阶段目标检测并不进行区域推荐,而是直接从特征图回归出目标的边界框和分类概率。SSD网络对六个尺度特>征图计算损失,进行预测,需要如下五种特有api: + +* 根据不同参数为每个输入位置生成一系列候选框。 + + * prior box: API Reference 请参考 :ref:`cn_api_fluid_layers_prior_box` + + * density_prior box: API Reference 请参考 :ref:`cn_api_fluid_layers_density_prior_box` + +* multi_box_head :得到不同prior box的位置和置信度。API Reference 请参考 :ref:`cn_api_fluid_layers_multi_box_head` + +* detection_output:对prioir box解码,通过多分类NMS得到检测结果。API Reference 请参考 :ref:`cn_api_fluid_layers_detection_output` + +* ssd_loss:通过位置偏移预测值,置信度,检测框位置和真实框位置和标签计算损失。API Reference 请参考 :ref:`cn_api_fluid_layers_ssd_loss` + +* detection_map: 利用mAP评估SSD网络模型。API Reference 请参考 :ref:`cn_api_fluid_layers_detection_map` + +YOLO V3 +--------------- + +`YOLO V3 `_ 是单阶段目标检测器,同时具备了精度高,速度快的特点。对特征图划分多个区块,每个区块得到坐标位置和置信度。采用了多尺度融合的方式预测以得到更高的训练精度,需要如下两种特有api: + +* yolo_box: 从YOLOv3网络的输出生成YOLO检测框。API Reference 请参考 :ref:`cn_api_fluid_layers_yolo_box` + +* yolov3_loss:通过给定的预测结果和真实框生成yolov3损失。API Reference 请参考 :ref:`cn_api_fluid_layers_yolov3_loss` + +RetinaNet +--------------- + +`RetinaNet `_ 是单阶段目标检测器,引入Focal Loss和FPN后,能以更快的速率实现与双阶段目标检测网络近似或更优的效果,需要如下三种特有api: + +* sigmoid_focal_loss: 用于处理单阶段检测器中类别不平均问题的损失。API Reference 请参考 :ref:`cn_api_fluid_layers_sigmoid_focal_loss` + +* retinanet_target_assign: 对给定anchor和真实框,为每个anchor分配分类和回归的目标值,用于训练RetinaNet。API Reference 请参考 :ref:`cn_api_fluid_layers_retinanet_target_assign` + +* retinanet_detection_output: 对检测框进行解码,并做非极大值抑制后得到检测输出。API Reference 请参考 :ref:`cn_api_fluid_layers_retinanet_detection_output` + +OCR +--------- + +场景文字识别是在图像背景复杂、分辨率低下、字体多样、分布随意等情况下,将图像信息转化为文字序列的过程,可认为是一种特别的翻译过程:将图像输入翻译为自然语言输出。OCR任务中需要对检测框进行不规则变换,其中需要如下两个api: + +* roi_perspective_transform:对输入roi做透视变换。API Reference 请参考 :ref:`cn_api_fluid_layers_roi_perspective_transform` + +* polygon_box_transform:对不规则检测框进行坐标变换。API Reference 请参考 :ref:`cn_api_fluid_layers_polygon_box_transform` + + diff --git a/doc/paddle/api_guides/low_level/layers/detection_en.rst b/doc/paddle/api_guides/low_level/layers/detection_en.rst new file mode 100755 index 0000000000000000000000000000000000000000..f7ffed049b2e3d3768287a18d19e73278f2b3acc --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/detection_en.rst @@ -0,0 +1,62 @@ + +.. _api_guide_detection_en: + + +Image Detection +################# + +PaddlePaddle Fluid implements several unique operators for image detection tasks. This article introduces related APIs grouped by diverse model types. + +General operations +-------------------- + +Some common operations in image detection are a series of operations on the bounding boxes, including: + +* Encoding and decoding of the bounding box : Conversion between encoding and decoding between the two kinds of boxes. For example, the training phase encodes the prior box and the ground-truth box to obtain the training target value. For API Reference, please refer to :ref:`api_fluid_layers_box_coder` + +* Compare the two bounding boxes and match them: + + * iou_similarity: Calculate the IOU value of the two sets of boxes. For API Reference, please refer to :ref:`api_fluid_layers_iou_similarity` + + * bipartite_match: Get the row with the largest distance in each column by the greedy binary matching algorithm. For API Reference, please refer to :ref:`api_fluid_layers_bipartite_match` + +* Get classification and regression target values ​​(target_assign) based on the bounding boxes and labels: Get the target values and corresponding weights by matched indices and negative indices. For API Reference, please refer to :ref:`api_fluid_layers_target_assign` + + +Faster RCNN +------------- + +`Faster RCNN `_ is a typical dual-stage target detector. Compared with the traditional extraction method, the RPN network in Faster RCNN greatly improves the extraction efficiency by sharing convolution layer parameters, and proposes high-quality region proposals. The RPN network needs to compare the input anchor with the ground-truth value to generate a primary candidate region, and assigns a classification and regression value to the primary candidate box. The following four unique apis are required: + +* rpn_target_assign: Assign the classification and regression target values ​​of the RPN network to the anchor through the anchor and the ground-truth box. For API Reference, please refer to :ref:`api_fluid_layers_rpn_target_assign` + +* anchor_generator: Generate a series of anchors for each location. For API Reference, please refer to :ref:`api_fluid_layers_anchor_generator` + +* generate_proposal_labels: Get the classification and regression target values ​​of the RCNN part through the candidate box and the ground-truth box obtained by generate_proposals. For API Reference, please refer to :ref:`api_fluid_layers_generate_proposal_labels` + +* generate_proposals: Decode the RPN network output box and selects a new region proposal. For API Reference, please refer to :ref:`api_fluid_layers_generate_proposals` + + +SSD +---------------- + +`SSD `_ , the acronym for Single Shot MultiBox Detector, is one of the latest and better detection algorithms in the field of target detection. It has the characteristics of fast detection speed and high detection accuracy. Unlike the dual-stage detection method, the single-stage target detection does not perform regional proposals, but directly returns the target's bounding box and classification probability from the feature map. The SSD network calculates the loss through six metrics of features maps and performs prediction. SSD requires the following five unique apis: + +* Prior Box: Generate a series of candidate boxes for each input position based on different parameters. For API Reference, please refer to :ref:`api_fluid_layers_prior_box` + +* multi_box_head : Get the position and confidence of different prior boxes. For API Reference, please refer to :ref:`api_fluid_layers_multi_box_head` + +* detection_output: Decode the prior box and obtains the detection result by multi-class NMS. For API Reference, please refer to :ref:`api_fluid_layers_detection_output` + +* ssd_loss: Calculate the loss by prediction value of position offset, confidence, bounding box position and ground-truth box position and label. For API Reference, please refer to :ref:`api_fluid_layers_ssd_loss` + +* detection map: Evaluate the SSD network model using mAP. For API Reference, please refer to :ref:`api_fluid_layers_detection_map` + +OCR +--------- + +Scene text recognition is a process of converting image information into a sequence of characters in the case of complex image background, low resolution, diverse fonts, random distribution and so on. It can be considered as a special translation process: translation of image input into natural language output. The OCR task needs to perform irregular transformation on the bounding box, which requires the following two APIs: + +* roi_perspective_transform: Make a perspective transformation on the input RoI. For API Reference, please refer to :ref:`api_fluid_layers_roi_perspective_transform` + +* polygon_box_transform: Coordinate transformation of the irregular bounding box. For API Reference, please refer to :ref:`api_fluid_layers_polygon_box_transform` \ No newline at end of file diff --git a/doc/paddle/api_guides/low_level/layers/index.rst b/doc/paddle/api_guides/low_level/layers/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..78bf1fa1f2f0aab986f40245b776f8a18a7695ac --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/index.rst @@ -0,0 +1,21 @@ +============= +神经网络层 +============= + +.. toctree:: + :maxdepth: 1 + + conv.rst + pooling.rst + detection.rst + sequence.rst + math.rst + activations.rst + loss_function.rst + data_in_out.rst + control_flow.rst + sparse_update.rst + data_feeder.rst + learning_rate_scheduler.rst + tensor.rst + diff --git a/doc/paddle/api_guides/low_level/layers/index_en.rst b/doc/paddle/api_guides/low_level/layers/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..c4d1193fe2af74f1c3f4ebcef395dcedafc71340 --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/index_en.rst @@ -0,0 +1,21 @@ +===================== +Neural Network Layer +===================== + +.. toctree:: + :maxdepth: 1 + + conv_en.rst + pooling_en.rst + detection_en.rst + sequence_en.rst + math_en.rst + activations_en.rst + loss_function_en.rst + data_in_out_en.rst + control_flow_en.rst + sparse_update_en.rst + data_feeder_en.rst + learning_rate_scheduler_en.rst + tensor_en.rst + diff --git a/doc/paddle/api_guides/low_level/layers/learning_rate_scheduler.rst b/doc/paddle/api_guides/low_level/layers/learning_rate_scheduler.rst new file mode 100644 index 0000000000000000000000000000000000000000..43f468db553311cff6f767efda6fe267fc659130 --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/learning_rate_scheduler.rst @@ -0,0 +1,46 @@ +.. _api_guide_learning_rate_scheduler: + +############ +学习率调度器 +############ + +当我们使用诸如梯度下降法等方式来训练模型时,一般会兼顾训练速度和损失(loss)来选择相对合适的学习率。但若在训练过程中一直使用一个学习率,训练集的损失下降到一定程度后便不再继续下降,而是在一定范围内震荡。其震荡原理如下图所示,即当损失函数收敛到局部极小值附近时,会由于学习率过大导致更新步幅过大,每步参数更新会反复越过极小值而出现震荡。 + +.. image:: ../../../images/learning_rate_scheduler.png + :scale: 80 % + :align: center + + +学习率调度器定义了常用的学习率衰减策略来动态生成学习率,学习率衰减函数以epoch或step为参数,返回一个随训练逐渐减小的学习率,从而兼顾降低训练时间和在局部极小值能更好寻优两个方面。 + +下面介绍学习率调度器中相关的Api: + +====== + +* :code:`noam_decay`: 诺姆衰减,相关算法请参考 `《Attention Is All You Need》 `_ 。 + 相关API Reference请参考 :ref:`cn_api_fluid_layers_noam_decay` + +* :code:`exponential_decay`: 指数衰减,即每次将当前学习率乘以给定的衰减率得到下一个学习率。 + 相关API Reference请参考 :ref:`cn_api_fluid_layers_exponential_decay` + +* :code:`natural_exp_decay`: 自然指数衰减,即每次将当前学习率乘以给定的衰减率的自然指数得到下一个学习率。 + 相关API Reference请参考 :ref:`cn_api_fluid_layers_natural_exp_decay` + +* :code:`inverse_time_decay`: 逆时间衰减,即得到的学习率与当前衰减次数成反比。 + 相关API Reference请参考 :ref:`cn_api_fluid_layers_inverse_time_decay` + +* :code:`polynomial_decay`: 多项式衰减,即得到的学习率为初始学习率和给定最终学习之间由多项式计算权重定比分点的插值。 + 相关API Reference请参考 :ref:`cn_api_fluid_layers_polynomial_decay` + +* :code:`piecewise_decay`: 分段衰减,即由给定step数分段呈阶梯状衰减,每段内学习率相同。 + 相关API Reference请参考 :ref:`cn_api_fluid_layers_piecewise_decay` + +* :code:`append_LARS`: 通过Layer-wise Adaptive Rate Scaling算法获得学习率,相关算法请参考 `《Train Feedfoward Neural Network with Layer-wise Adaptive Rate via Approximating Back-matching Propagation》 `_ 。 + 相关API Reference请参考 :ref:`cn_api_fluid_layers_append_LARS` + +* :code:`cosine_decay`: 余弦衰减,即学习率随step数变化呈余弦函数。 + 相关API Reference请参考 :ref:`cn_api_fluid_layers_cosine_decay` + +* :code:`linear_lr_warmup`: 学习率随step数线性增加到指定学习率。 + 相关API Reference请参考 :ref:`cn_api_fluid_layers_linear_lr_warmup` + diff --git a/doc/paddle/api_guides/low_level/layers/learning_rate_scheduler_en.rst b/doc/paddle/api_guides/low_level/layers/learning_rate_scheduler_en.rst new file mode 100755 index 0000000000000000000000000000000000000000..61e49f3a659502efac4dc55151f59f4360c72cbf --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/learning_rate_scheduler_en.rst @@ -0,0 +1,36 @@ +.. _api_guide_learning_rate_scheduler_en: + +######################## +Learning rate scheduler +######################## + +When we use a method such as the gradient descent method to train the model, the training speed and loss are generally taken into consideration to select a relatively appropriate learning rate. However, if a fixed learning rate is used throughout the training process, the loss of the training set will not continue to decline after falling to a certain extent, but will 'jump' within a certain range. The jumping principle is shown in the figure below. When the loss function converges to the local minimum value, the update step will be too large due to the excessive learning rate. The parameter update will repeatedly *jump over* the local minimum value and an oscillation-like phenomenon will occur. + +.. image:: ../../../images/learning_rate_scheduler.png + :scale: 80 % + :align: center + + +The learning rate scheduler defines a commonly used learning rate decay strategy to dynamically generate the learning rate. The learning rate decay function takes epoch or step as the parameter and returns a learning rate that gradually decreases with training. Thereby it reduces the training time and finds the local minimum value at the same time. + +The following content describes the APIs related to the learning rate scheduler: + +====== + +* :code:`noam_decay`: Noam decay. Please refer to `Attention Is All You Need `_ for related algorithms. For related API Reference please refer to :ref:`api_fluid_layers_noam_decay` + +* :code:`exponential_decay`: Exponential decay. That is, each time the current learning rate is multiplied by the given decay rate to get the next learning rate. For related API Reference please refer to :ref:`api_fluid_layers_exponential_decay` + +* :code:`natural_exp_decay`: Natural exponential decay. That is, each time the current learning rate is multiplied by the natural exponent of the given decay rate to get the next learning rate. For related API Reference please refer to :ref:`api_fluid_layers_natural_exp_decay` + +* :code:`inverse_time_decay`: Inverse time decay. The decayed learning rate is inversely proportional to the current number of decays. For related API Reference please refer to :ref:`api_fluid_layers_inverse_time_decay` + +* :code:`polynomial_decay`: Polynomial decay, i.e. the decayed learning rate is calculated in a polynomial format with the initial learning rate and the end learning rate. For related API Reference please refer to :ref:`api_fluid_layers_polynomial_decay` + +* :code:`piecewise_decay`: Piecewise decay. That is, the stair-like decay for a given number of steps, the learning rate stays the same within each step. For related API Reference please refer to :ref:`api_fluid_layers_piecewise_decay` + +* :code:`append_LARS`: The learning rate is obtained by the Layer-wise Adaptive Rate Scaling algorithm. For related algorithms, please refer to `Train Feed forward Neural Network with Layerwise Adaptive Rate via Approximating Back-matching Propagation `_ . For related API Reference please refer to :ref:`api_fluid_layers_append_LARS` + +* :code:`cosine_decay`: Cosine attenuation. It means the learning rate changes with the number of steps in the form of a cosine function. For related API Reference please refer to :ref:`api_fluid_layers_cosine_decay` + +* :code:`linear_lr_warmup`: The learning rate increases linearly to an appointed rate with the number of steps. For related API Reference please refer to :ref:`api_fluid_layers_linear_lr_warmup` diff --git a/doc/paddle/api_guides/low_level/layers/loss_function.rst b/doc/paddle/api_guides/low_level/layers/loss_function.rst new file mode 100644 index 0000000000000000000000000000000000000000..6b4d22e1bf4055d803b1cbdb59c3b654de48d496 --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/loss_function.rst @@ -0,0 +1,60 @@ +.. _api_guide_loss_function: + +####### +损失函数 +####### + +损失函数定义了拟合结果和真实结果之间的差异,作为优化的目标直接关系模型训练的好坏,很多研究工作的内容也集中在损失函数的设计优化上。 +Paddle Fluid 中提供了面向多种任务的多种类型的损失函数,以下列出了一些 Paddle Fluid 中包含的较为常用的损失函数。 + +回归 +==== + +平方误差损失(squared error loss)使用预测值和真实值之间误差的平方作为样本损失,是回归问题中最为基本的损失函数。 +API Reference 请参考 :ref:`cn_api_fluid_layers_square_error_cost`。 + +平滑 L1 损失(smooth_l1 loss)是一种分段的损失函数,较平方误差损失其对异常点相对不敏感,因而更为鲁棒。 +API Reference 请参考 :ref:`cn_api_fluid_layers_smooth_l1`。 + + +分类 +==== + +`交叉熵(cross entropy) `_ 是分类问题中使用最为广泛的损失函数,Paddle Fluid 中提供了接受归一化概率值和非归一化分值输入的两种交叉熵损失函数的接口,并支持 soft label 和 hard label 两种样本类别标签。 +API Reference 请参考 :ref:`cn_api_fluid_layers_cross_entropy` 和 :ref:`cn_api_fluid_layers_softmax_with_cross_entropy`。 + +多标签分类 +--------- +对于多标签分类问题,如一篇文章同属于政治、科技等多个类别的情况,需要将各类别作为独立的二分类问题计算损失,Paddle Fluid 中为此提供了 sigmoid_cross_entropy_with_logits 损失函数, +API Reference 请参考 :ref:`cn_api_fluid_layers_sigmoid_cross_entropy_with_logits`。 + +大规模分类 +--------- +对于大规模分类问题,通常需要特殊的方法及相应的损失函数以加速训练,常用的方法有 `噪声对比估计(Noise-contrastive estimation,NCE) `_ 和 `层级 sigmoid `_ 。 + +* 噪声对比估计通过将多分类问题转化为学习分类器来判别数据来自真实分布和噪声分布的二分类问题,基于二分类来进行极大似然估计,避免在全类别空间计算归一化因子从而降低了计算复杂度。 +* 层级 sigmoid 通过二叉树进行层级的二分类来实现多分类,每个样本的损失对应了编码路径上各节点二分类交叉熵的和,避免了归一化因子的计算从而降低了计算复杂度。 +这两种方法对应的损失函数在 Paddle Fluid 中均有提供,API Reference 请参考 :ref:`cn_api_fluid_layers_nce` 和 :ref:`cn_api_fluid_layers_hsigmoid`。 + +序列分类 +------- +序列分类可以分为以下三种: + +* 序列分类(Sequence Classification)问题,整个序列对应一个预测标签,如文本分类。这种即是普通的分类问题,可以使用 cross entropy 作为损失函数。 +* 序列片段分类(Segment Classification)问题,序列中的各个片段对应有自己的类别标签,如命名实体识别。对于这种序列标注问题,`(线性链)条件随机场(Conditional Random Field,CRF) `_ 是一种常用的模型方法,其使用句子级别的似然概率,序列中不同位置的标签不再是条件独立,能够有效解决标记偏置问题。Paddle Fluid 中提供了 CRF 对应损失函数的支持,API Reference 请参考 :ref:`cn_api_fluid_layers_linear_chain_crf`。 +* 时序分类(Temporal Classification)问题,需要对未分割的序列进行标注,如语音识别。对于这种时序分类问题,`CTC(Connectionist Temporal Classification) `_ 损失函数不需要对齐输入数据及标签,可以进行端到端的训练,Paddle Fluid 提供了 warpctc 的接口来计算相应的损失,API Reference 请参考 :ref:`cn_api_fluid_layers_warpctc`。 + +排序 +==== + +`排序问题 `_ 可以使用 Pointwise、Pairwise 和 Listwise 的学习方法,不同的方法需要使用不同的损失函数: + +* Pointwise 的方法通过近似为回归问题解决排序问题,可以使用回归问题的损失函数。 +* Pairwise 的方法需要特殊设计的损失函数,其通过近似为分类问题解决排序问题,使用两篇文档与 query 的相关性得分以偏序作为二分类标签来计算损失。Paddle Fluid 中提供了两种常用的 Pairwise 方法的损失函数,API Reference 请参考 :ref:`cn_api_fluid_layers_rank_loss` 和 :ref:`cn_api_fluid_layers_margin_rank_loss`。 + +更多 +==== + +对于一些较为复杂的损失函数,可以尝试使用其他损失函数组合实现;Paddle Fluid 中提供的用于图像分割任务的 :ref:`cn_api_fluid_layers_dice_loss` 即是使用其他 OP 组合(计算各像素位置似然概率的均值)而成;多目标损失函数也可看作这样的情况,如 Faster RCNN 就使用 cross entropy 和 smooth_l1 loss 的加权和作为损失函数。 + +**注意**,在定义损失函数之后为能够使用 :ref:`api_guide_optimizer` 进行优化,通常需要使用 :ref:`cn_api_fluid_layers_mean` 或其他操作将损失函数返回的高维 Tensor 转换为 Scalar 值。 \ No newline at end of file diff --git a/doc/paddle/api_guides/low_level/layers/loss_function_en.rst b/doc/paddle/api_guides/low_level/layers/loss_function_en.rst new file mode 100755 index 0000000000000000000000000000000000000000..9e7c6a3a6677bb191078cf5f3595202b32a923a9 --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/loss_function_en.rst @@ -0,0 +1,61 @@ +.. _api_guide_loss_function_en: + +############## +Loss function +############## + +The loss function defines the difference between the inference result and the ground-truth result. As the optimization target, it directly determines whether the model training is good or not, and many researches also focus on the optimization of the loss function design. +Paddle Fluid offers diverse types of loss functions for a variety of tasks. Let's take a look at the commonly-used loss functions included in Paddle Fluid. + +Regression +=========== + +The squared error loss uses the square of the error between the predicted value and the ground-truth value as the sample loss, which is the most basic loss function in the regression problems. +For API Reference, please refer to :ref:`api_fluid_layers_square_error_cost`. + +Smooth L1 loss (smooth_l1 loss) is a piecewise loss function that is relatively insensitive to outliers and therefore more robust. +For API Reference, please refer to :ref:`api_fluid_layers_smooth_l1`. + + +Classification +================ + +`cross entropy `_ is the most widely used loss function in classification problems. The interfaces in Paddle Fluid for the cross entropy loss functions are divided into the one accepting fractional input of normalized probability values ​​and another for non-normalized input. And Fluid supports two types labels, namely soft label and hard label. +For API Reference, please refer to :ref:`api_fluid_layers_cross_entropy` and :ref:`api_fluid_layers_softmax_with_cross_entropy`. + +Multi-label classification +---------------------------- +For the multi-label classification, such as the occasion that an article belongs to multiple categories like politics, technology, it is necessary to calculate the loss by treating each category as an independent binary-classification problem. We provide the sigmoid_cross_entropy_with_logits loss function for this purpose. +For API Reference, please refer to :ref:`api_fluid_layers_sigmoid_cross_entropy_with_logits`. + +Large-scale classification +----------------------------- +For large-scale classification problems, special methods and corresponding loss functions are usually needed to speed up the training. The commonly used methods are +`Noise contrastive estimation (NCE) `_ and `Hierarchical sigmoid `_ . + +* NCE solves the binary-classification problem of discriminating the true distribution and the noise distribution by converting the multi-classification problem into a classifier. The maximum likelihood estimation is performed based on the binary-classification to avoid calculating the normalization factor in the full-class space to reduce computational complexity. +* Hierarchical sigmoid realizes multi-classification by hierarchical classification of binary trees. The loss of each sample corresponds to the sum of the cross-entropy of the binary-classification for each node on the coding path, which avoids the calculation of the normalization factor and reduces the computational complexity. +The loss functions for both methods are available in Paddle Fluid. For API Reference please refer to :ref:`api_fluid_layers_nce` and :ref:`api_fluid_layers_hsigmoid`. + +Sequence classification +------------------------- +Sequence classification can be divided into the following three types: + +* Sequence Classification problem is that the entire sequence corresponds to a prediction label, such as text classification. This is a common classification problem, you can use cross entropy as the loss function. +* Segment Classification problem is that each segment in the sequence corresponds to its own category tag, such as named entity recognition. For this sequence labeling problem, `the (Linear Chain) Conditional Random Field (CRF) `_ is a commonly used model. The method uses the likelihood probability on the sentence level, and the labels for different positions in the sequence are no longer conditionally independent, which can effectively solve the label offset problem. Support for CRF loss functions is available in Paddle Fluid. For API Reference please refer to :ref:`api_fluid_layers_linear_chain_crf` . +* Temporal Classification problem needs to label unsegmented sequences, such as speech recognition. For this time-based classification problem, `CTC(Connectionist Temporal Classification) `_ loss function does not need to align input data and labels, and is able to perform end-to-end training. Paddle Fluid provides a warpctc interface to calculate the corresponding loss. For API Reference, please refer to :ref:`api_fluid_layers_warpctc` . + +Rank +========= + +`Rank problems `_ can use learning methods of Pointwise, Pairwise, and Listwise. Different methods require different loss functions: + +* The Pointwise method solves the ranking problem by approximating the regression problem. Therefore the loss function of the regression problem can be used. +* Pairwise's method requires a special loss function. Pairwise solves the sorting problem by approximating the classification problem, using relevance score of two documents and the query to use the partial order as the binary-classification label to calculate the loss. Paddle Fluid provides two commonly used loss functions for Pairwise methods. For API Reference please refer to :ref:`api_fluid_layers_rank_loss` and :ref:`api_fluid_layers_margin_rank_loss`. + +More +==== + +For more complex loss functions, try to use combinations of other loss functions; the :ref:`api_fluid_layers_dice_loss` provided in Paddle Fluid for image segmentation tasks is an example of using combinations of other operators (calculate the average likelihood probability of each pixel position). The multi-objective loss function can also be considered similarly, such as Faster RCNN that uses the weighted sum of cross entropy and smooth_l1 loss as a loss function. + +**Note**, after defining the loss function, in order to optimize with :ref:`api_guide_optimizer_en`, you usually need to use :ref:`api_fluid_layers_mean` or other operations to convert the high-dimensional Tensor returned by the loss function to a Scalar value. \ No newline at end of file diff --git a/doc/paddle/api_guides/low_level/layers/math.rst b/doc/paddle/api_guides/low_level/layers/math.rst new file mode 100644 index 0000000000000000000000000000000000000000..e00df723d79a58f8a9b2451a2d0cd5ad4d3ef415 --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/math.rst @@ -0,0 +1,186 @@ +.. _api_guide_math: + + +数学操作 +######### + +Paddle提供了丰富的数学操作,以下列出的数学操作都是对目标张量进行逐元素的操作。其中,如果二元操作的两个输入有不同形状,会先进行 :code:`broadcast`. 部分数学操作还支持数学操作符,比如: :code:`+`, :code:`-`, :code:`*`, :code:`/` 等。数学操作符不仅支持张量,还支持标量。 + + +一元操作 +================== + +exp +------------------ + +对输入 :code:`Tensor` 逐元素做 :code:`exp` 操作。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_exp` + +tanh +------------------ + +对输入 :code:`Tensor` 逐元素取正切。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_tanh` + +sqrt +------------------ + +对输入 :code:`Tensor` 逐元素取平方根。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_sqrt` + +abs +------------------ + +对输入 :code:`Tensor` 逐元素取绝对值。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_abs` + +ceil +------------------ + +对输入 :code:`Tensor` 逐元素向上取整。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_ceil` + +floor +------------------ + +对输入 :code:`Tensor` 逐元素向下取整。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_floor` + +sin +------------------ + +对输入 :code:`Tensor` 逐元素取正弦。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_sin` + +cos +------------------ + +对输入 :code:`Tensor` 逐元素取余弦。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_cos` + +round +------------------ + +对输入 :code:`Tensor` 逐元素四舍五入取整。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_round` + +square +------------------ + +对输入 :code:`Tensor` 逐元素取平方。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_square` + +reciprocal +------------------ + +对输入 :code:`Tensor` 逐元素取倒数。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_reciprocal` + + +reduce +------------------ + +对输入 :code:`Tensor` 在指定的若干轴上做reduce操作,包括:min, max, sum, mean, product + +API Reference 请参考: +:ref:`cn_api_fluid_layers_reduce_min` +:ref:`cn_api_fluid_layers_reduce_max` +:ref:`cn_api_fluid_layers_reduce_sum` +:ref:`cn_api_fluid_layers_reduce_mean` +:ref:`cn_api_fluid_layers_reduce_prod` + + +二元操作 +================== + +elementwise_add +------------------ + +对两个 :code:`Tensor` 逐元素相加,对应的数学操作符为 :code:`+` + +API Reference 请参考 :ref:`cn_api_fluid_layers_elementwise_add` + +elementwise_sub +------------------ + +对两个 :code:`Tensor` 逐元素相减,对应数学操作符 :code:`-` + +API Reference 请参考 :ref:`cn_api_fluid_layers_elementwise_sub` + +elementwise_mul +------------------ + +对两个 :code:`Tensor` 逐元素相乘, 对应数学操作符 :code:`*` + +API Reference 请参考 :ref:`cn_api_fluid_layers_elementwise_mul` + +elementwise_div +------------------ + +对两个 :code:`Tensor` 逐元素相除, 对应数学操作符 :code:`/` 或 :code:`//` + +API Reference 请参考 :ref:`cn_api_fluid_layers_elementwise_div` + + +elementwise_pow +------------------ + +对两个 :code:`Tensor` 逐元素做次幂操作, 对应数学操作符 :code:`**` + +API Reference 请参考 :ref:`cn_api_fluid_layers_elementwise_pow` + +equal +------------------ + +对两个 :code:`Tensor` 逐元素判断是否相等, 对应数学操作符 :code:`==` + +API Reference 请参考 :ref:`cn_api_fluid_layers_equal` + + +less_than +------------------ + +对两个 :code:`Tensor` 逐元素判断是否满足小于关系, 对应数学操作符 :code:`<` + +API Reference 请参考 :ref:`cn_api_fluid_layers_less_than` + + + +sum +------------------ + +对两个 :code:`Tensor` 逐元素相加。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_sum` + +elementwise_min +------------------ + +对两个 :code:`Tensor` 逐元素进行 :code:`min(x, y)` 操作。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_elementwise_min` + +elementwise_max +------------------ + +对两个 :code:`Tensor` 逐元素进行 :code:`max(x, y)` 操作。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_elementwise_max` + +matmul +------------------ + +对两个 :code:`Tensor` 进行矩阵乘操作。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_matmul` diff --git a/doc/paddle/api_guides/low_level/layers/math_en.rst b/doc/paddle/api_guides/low_level/layers/math_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..74f484aaba7d436df43d48bfc904bd3adf1a5095 --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/math_en.rst @@ -0,0 +1,186 @@ +_api_guide_math_en: + + +Mathematical operation +########################### + +Paddle provides a wealth of mathematical operations. The mathematical operations listed below are all elementwise operations on the target tensor. If the two inputs of the binary operations have different shapes, they will be processed first by :code:`broadcast`. Some mathematical operations also support mathematical operators, such as: :code:`+`, :code:`-`, :code:`*`, :code:`/`, etc. Math operators not only support tensors but also scalars. + + +Unary operation +================== + +exp +------------------ + +Perform an :code:`exp` operation on each input :code:`Tensor` element. + +API Reference: :ref:`api_fluid_layers_exp` + +tanh +------------------ + +For the input :code:`Tensor`, take the tanh value of each element. + +API Reference: :ref:`api_fluid_layers_tanh` + +sqrt +------------------ + +For the input :code:`Tensor`, take the square root of each element. + +API Reference: :ref:`api_fluid_layers_sqrt` + +abs +------------------ + +For the input :code:`Tensor`, take the elementwise absolute value. + +API Reference: :ref:`api_fluid_layers_abs` + +ceil +------------------ + +Round up each input :code:`Tensor` element to the nearest greater integer. + +API Reference: :ref:`api_fluid_layers_ceil` + +floor +------------------ + +Round down each input :code:`Tensor` element to the nearest less integer. + +API Reference: :ref:`api_fluid_layers_floor` + +sin +------------------ + +For the input :code:`Tensor`, take the elementwise sin value. + +API Reference: :ref:`api_fluid_layers_sin` + +cos +------------------ + +For input :code:`Tensor`, take the elementwise cosine value. + +API Reference: :ref:`api_fluid_layers_cos` + +round +------------------ + +Rounding the input :code:`Tensor` in elementwise order. + +API Reference: :ref:`api_fluid_layers_round` + +square +------------------ + +Square the input :code:`Tensor` in elementwise order. + +API Reference: :ref:`api_fluid_layers_square` + +reciprocal +------------------ + +For the input :code:`Tensor`, take the reciprocal in elementwise order. + +API Reference: :ref:`api_fluid_layers_reciprocal` + + +reduce +------------------ + +For the input :code:`Tensor`, it performs reduce operations on the specified axes, including: min, max, sum, mean, product + +API Reference: +:ref:`api_fluid_layers_reduce_min` +:ref:`api_fluid_layers_reduce_max` +:ref:`fluid_layers_reduce_sum` +:ref:`api_fluid_layers_reduce_mean` +:ref:`api_fluid_layers_reduce_prod` + + +Binary operation +================== + +elementwise_add +------------------ + +Add two :code:`Tensor` in elementwise order, and the corresponding math operator is :code:`+` . + +API Reference: :ref:`api_fluid_layers_elementwise_add` + +elementwise_sub +------------------ + +Sub two :code:`Tensor` in elementwise order, the corresponding math operator is :code:`-` . + +API Reference: :ref:`api_fluid_layers_elementwise_sub` + +elementwise_mul +------------------ + +Multiply two :code:`Tensor` in elementwise order, and the corresponding math operator is :code:`*` . + +API Reference: :ref:`api_fluid_layers_elementwise_mul` + +elementwise_div +------------------ + +Divide two :code:`Tensor` in elementwise order, and the corresponding math operator is :code:`/` or :code:`//` . + +API Reference: :ref:`api_fluid_layers_elementwise_div` + + +elementwise_pow +------------------ + +Do power operations on two :code:`Tensor` in elementwise order, and the corresponding math operator is :code:`**` . + +API Reference: :ref:`api_fluid_layers_elementwise_pow` + +equal +------------------ + +Judge whether the two :code:`Tensor` elements are equal, and the corresponding math operator is :code:`==` . + +API Reference: :ref:`api_fluid_layers_equal` + + +less_than +------------------ + +Judge whether the two :code:`Tensor` elements satisfy the 'less than' relationship, and the corresponding math operator is :code:`<` . + +API Reference: :ref:`api_fluid_layers_less_than` + + + +sum +------------------ + +Add two :code:`Tensor` in elementwise order. + +API Reference: :ref:`api_fluid_layers_sum` + +elementwise_min +------------------ + +Perform :code:`min(x, y)` operations on two :code:`Tensor` in elementwise order . + +API Reference: :ref:`api_fluid_layers_elementwise_min` + +elementwise_max +------------------ + +Perform :code:`max(x, y)` operations on two :code:`Tensor` in elementwise order . + +API Reference: :ref:`api_fluid_layers_elementwise_max` + +matmul +------------------ + +Perform matrix multiplication operations on two :code:`Tensor`. + +API Reference: :ref:`api_fluid_layers_matmul` diff --git a/doc/paddle/api_guides/low_level/layers/pooling.rst b/doc/paddle/api_guides/low_level/layers/pooling.rst new file mode 100644 index 0000000000000000000000000000000000000000..56d90c190aeb4f2bd3d20135681d0135f54378b9 --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/pooling.rst @@ -0,0 +1,80 @@ +.. _api_guide_pool: + +##### +池化 +##### + +池化的作用是对输入特征做下采样和降低过拟合。降低过拟合是减小输出大小的结果,它同样也减少了后续层中的参数的数量。 + +池化通常只需要将前一层的特征图作为输入,此外需要一些参数来确定池化具体的操作。在PaddlePaddle中我们同样通过设定池化的大小,方式,步长,是否是全局池化,是否使用cudnn,是否使用ceil函数计算输出等参数来选择具体池化的方式。 +PaddlePaddle中有针对定长图像特征的二维(pool2d)、三维卷积(pool3d),RoI池化(roi_pool),以及针对序列的序列池化(sequence_pool),同时也有池化计算的反向过程,下面先介绍2D/3D池化,以及RoI池化,再来介绍序列池化。 + +-------------- + +1. pool2d/pool3d +------------------------ + +- ``input`` : 池化操作接收任何符合layout是:\ ``N(batch size)* C(channel size) * H(height) * W(width)``\ 格式的\ ``Tensor``\ 类型作为输入。 + +- ``pool_size``\ : 用来确定池化\ ``filter``\ 的大小,即将多大范围内的数据池化为一个值。 + +- ``num_channels``\ : 用来确定输入的\ ``channel``\ 数量,如果未设置参数或设置为\ ``None``\ ,其实际值将自动设置为输入的\ ``channel``\ 数量。 + +- ``pool_type``\ : 接收\ ``avg``\ 和\ ``max``\ 2种类型之一作为pooling的方式,默认值为\ ``max``\ 。其中\ ``max``\ 意为最大池化,即计算池化\ ``filter``\ 区域内的数据的最大值作为输出;而\ ``avg``\ 意为平均池化,即计算池化\ ``filter``\ 区域内的数据的平均值作为输出。 + +- ``pool_stride``\ : 意为池化的\ ``filter``\ 在输入特征图上移动的步长。 + +- ``pool_padding``\ : 用来确定池化中\ ``padding``\ 的大小,\ ``padding``\ 的使用是为了对于特征图边缘的特征进行池化,选择不同的\ ``pool_padding``\ 大小确定了在特征图边缘增加多大区域的补零。从而决定边缘特征被池化的程度。 + +- ``global_pooling``\ : 意为是否使用全局池化,全局池化是指使用和特征图大小相同的\ ``filter``\ 来进行池化,同样这个过程也可以使用平均池化或者最大池化来做为池化的方式,全局池化通常会用来替换全连接层以大量减少参数防止过拟合。 + +- ``use_cudnn``\ : 选项可以来选择是否使用cudnn来优化计算池化速度。 + +- ``ceil_mode``\ : 是否使用ceil函数计算输出高度和宽度。\ ``ceil mode``\ 意为天花板模式,是指会把特征图中不足\ ``filter size``\ 的边给保留下来,单独另算,或者也可以理解为在原来的数据上补充了值为-NAN的边。而floor模式则是直接把不足\ ``filter size``\ 的边给舍弃了。具体计算公式如下: + + - 非\ ``ceil_mode``\ 下:\ ``输出大小 = (输入大小 - filter size + 2 * padding) / stride(步长) + 1`` + + - ``ceil_mode``\ 下:\ ``输出大小 = (输入大小 - filter size + 2 * padding + stride - 1) / stride + 1`` + + + +api汇总: + +- :ref:`cn_api_fluid_layers_pool2d` +- :ref:`cn_api_fluid_layers_pool3d` + + +2. roi_pool +------------------ + +``roi_pool``\ 一般用于检测网络中,将输入特征图依据候选框池化到特定的大小。 + +- ``rois``\ : 接收\ ``LoDTensor``\ 类型来表示需要池化的 Regions of Interest,关于RoI的解释请参考\ `论文 `__ + +- ``pooled_height`` 和 ``pooled_width``\ : 这里可以接受非正方的池化窗口大小 + +- ``spatial_scale``\ : 用作设定缩放RoI和原图缩放的比例,注意,这里的设定需要用户自行计算RoI和原图的实际缩放比例。 + + +api汇总: + +- :ref:`cn_api_fluid_layers_roi_pool` + + +3. sequence_pool +-------------------- + +``sequence_pool``\ 是一个用作对于不等长序列进行池化的接口,它将每一个实例的全部时间步的特征进行池化,它同样支持 +``average``, ``sum``, ``sqrt`` 和\ ``max``\ 4种类型之一作为pooling的方式。 其中: + +- ``average``\ 是对于每一个时间步内的数据求和后分别取平均值做为池化的结果。 + +- ``sum``\ 则是对每一个时间步内的数据分别求和作为池化的结果。 + +- ``sqrt``\ 则是对每一个时间步内的数据分别求和再分别取平方根作为池化的结果。 + +- ``max``\ 则是对每一个时间步内的数据分别求取最大值作为池化的结果。 + +api汇总: + +- :ref:`cn_api_fluid_layers_sequence_pool` diff --git a/doc/paddle/api_guides/low_level/layers/pooling_en.rst b/doc/paddle/api_guides/low_level/layers/pooling_en.rst new file mode 100755 index 0000000000000000000000000000000000000000..d7fcb53db1157060ce278fa88902b7da29df37dd --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/pooling_en.rst @@ -0,0 +1,80 @@ +.. _api_guide_pool_en: + +######## +Pooling +######## + +Pooling is to downsample the input features and reduce overfitting. Reducing overfitting is the result of reducing the output size, which also reduces the number of parameters in subsequent layers. + +Pooling usually only takes the feature maps of the previous layer as input, and some parameters are needed to determine the specific operation of the pooling. In PaddlePaddle, we also choose the specific pooling by setting parameters like the size, method, step, whether to pool globally, whether to use cudnn, whether to use ceil function to calculate output. +PaddlePaddle has two-dimensional (pool2d), three-dimensional convolution (pool3d), RoI pooling (roi_pool) for fixed-length image features, and sequence pooling (sequence_pool) for sequences, as well as the reverse(backward) process of pooling calculations. The following text describes the 2D/3D pooling, and the RoI pooling, and then the sequence pooling. + +-------------- + +1. pool2d/pool3d +------------------------ + +- ``input`` : The pooling operation receives any ``Tensor`` that conforms to the layout: ``N(batch size)* C(channel size) * H(height) * W(width)`` format as input. + +- ``pool_size`` : It is used to determine the size of the pooling ``filter``, which determines the size of data to be pooled into a single value. + +- ``num_channels`` : It is used to determine the number of ``channel`` of input. If it is not set or is set to ``None``, its actual value will be automatically set to the ``channel`` quantity of input. + +- ``pool_type`` : It receives one of ``agg`` and ``max`` as the pooling method. The default value is ``max`` . ``max`` means maximum pooling, i.e. calculating the maximum value of the data in the pooled ``filter`` area as output; and ``avg`` means averaging pooling, i.e. calculating the average of the data in the pooled ``filter`` area as output. + +- ``pool_stride`` : It is the stride size in which the pooling ``filter`` moves on the input feature map. + +- ``pool_padding`` : It is used to determine the size of ``padding`` in the pooling, ``padding`` is used to pool the features of the edges of feature maps. The ``pool_padding`` size determines how much zero is padded to the edge of the feature maps. Thereby it determines the extent to which the edge features are pooled. + +- ``global_pooling`` : It Means whether to use global pooling. Global pooling refers to pooling using ``filter`` of the same size as the feature map. This process can also use average pooling or the maximum pooling as the pooling method. Global pooling is usually used to replace the fully connected layer to greatly reduce the parameters to prevent overfitting. + +- The ``use_cudnn`` : This option allows you to choose whether or not to use cudnn to accelerate pooling. + +- ``ceil_mode`` : Whether to use the ceil function to calculate the output height and width. ``ceil mode`` means ceiling mode, which means that, in the feature map, the edge parts that are smaller than ``filter size`` will be retained, and separately calculated. It can be understood as supplementing the original data with edge with a value of -NAN. By contrast, The floor mode directly discards the edges smaller than the ``filter size``. The specific calculation formula is as follows: +    + * Non ``ceil_mode`` : ``Output size = (input size - filter size + 2 * padding) / stride (stride size) + 1`` +     + * ``ceil_mode`` : ``Output size = (input size - filter size + 2 * padding + stride - 1) / stride + 1`` +     + + +related API: + +- :ref:`api_fluid_layers_pool2d` +- :ref:`api_fluid_layers_pool3d` + + +2. roi_pool +------------------ + +``roi_pool`` is generally used in detection networks, and the input feature map is pooled to a specific size by the bounding box. + +- ``rois`` : It receives ``LoDTensor`` type to indicate the Regions of Interest that needs to be pooled. For an explanation of RoI, please refer to `Paper `__ + +- ``pooled_height`` and ``pooled_width`` : accept non-square pooling box sizes + +- ``spatial_scale`` : Used to set the scale of scaling the RoI and the original image. Note that the settings here require the user to manually calculate the actual scaling of the RoI and the original image. +  + +related API: + +- :ref:`api_fluid_layers_roi_pool` + + +3. sequence_pool +-------------------- + +``sequence_pool`` is an interface used to pool variable-length sequences. It pools the features of all time steps of each instance, and also supports +one of ``average``, ``sum``, ``sqrt`` and ``max`` to be used as the pooling method. Specifically: + +- ``average`` sums up the data in each time step and takes its average as the pooling result. + +- ``sum`` take the sum of the data in each time step as pooling result. + +- ``sqrt`` sums the data in each time step and takes its square root as the pooling result. + +- ``max`` takes the maximum value for each time step as the pooling result. + +related API: + +- :ref:`api_fluid_layers_sequence_pool` diff --git a/doc/paddle/api_guides/low_level/layers/sequence.rst b/doc/paddle/api_guides/low_level/layers/sequence.rst new file mode 100644 index 0000000000000000000000000000000000000000..3c2dba96cb2c7c2378afdba78cfcd05de1b484ae --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/sequence.rst @@ -0,0 +1,112 @@ +.. _api_guide_sequence: + +######## +序列 +######## + +在深度学习领域许多问题涉及到对 `序列(sequence) `_ 的处理。 +从Wiki上的释义可知,序列可以表征多种物理意义,但在深度学习中,最常见的仍然是"时间序列"——一个序列包含多个时间步的信息。 + +在Paddle Fluid中,我们将序列表示为 :ref:`cn_api_fluid_LoDTensor` 。 +因为一般进行神经网络计算时都是一个batch一个batch地计算,所以我们用一个LoDTensor来存储一个mini batch的序列。 +一个LoDTensor的第0维包含该mini batch中所有序列的所有时间步,并且用LoD来记录各个序列的长度,区分不同序列。 +而在运算时,还需要根据LoD信息将LoDTensor中一个mini batch的第0维拆开成多个序列。(具体请参考上述LoD相关的文档。) +所以,对这类LoDTensor第0维的操作不能简单地使用一般的layer来进行,针对这一维的操作必须要结合LoD的信息。 +(例如,你不能用 :code:`layers.reshape` 来对一个序列的第0维进行reshape)。 + +为了实行各类针对序列的操作,我们设计了一系列序列相关的API,专门用于正确处理序列相关的操作。 +实践中,由于一个LoDTensor包括一个mini batch的序列,同一个mini batch中不同的序列通常属于多个sample,它们彼此之间不会也不应该发生相互作用。 +因此,若一个layer以两个(或多个)LoDTensor为输入(或者以一个list的LoDTensor为输入),每一个LoDTensor代表一个mini batch的序列,则第一个LoDTensor中的第一个序列只会和第二个LoDTensor中的第一个序列发生计算, +第一个LoDTensor中的第二个序列只会和第二个LoDTensor中的第二个序列发生计算,第一个LoDTensor中的第i个序列只会和第二个LoDTensor中第i个序列发生计算,依此类推。 + +**总而言之,一个LoDTensor存储一个mini batch的多个序列,其中的序列个数为batch size;多个LoDTensor间发生计算时,每个LoDTensor中的第i个序列只会和其他LoDTensor中第i个序列发生计算。理解这一点对于理解接下来序列相关的操作会至关重要。** + +1. sequence_softmax +------------------- +这个layer以一个mini batch的序列为输入,在每个序列内做softmax操作。其输出为一个mini batch相同shape的序列,但在序列内是经softmax归一化过的。 +这个layer往往用于在每个sequence内做softmax归一化。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_softmax` + + +2. sequence_concat +------------------ +这个layer以一个list为输入,该list中可以含有多个LoDTensor,每个LoDTensor为一个mini batch的序列。 +该layer会将每个batch中第i个序列在时间维度上拼接成一个新序列,作为返回的batch中的第i个序列。 +理所当然地,list中每个LoDTensor的序列必须有相同的batch size。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_concat` + + +3. sequence_first_step +---------------------- +这个layer以一个LoDTensor作为输入,会取出每个序列中的第一个元素(即第一个时间步的元素),并作为返回值。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_first_step` + + +4. sequence_last_step +--------------------- +同 :code:`sequence_first_step` ,除了本layer是取每个序列中最后一个元素(即最后一个时间步)作为返回值。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_last_step` + + +5. sequence_expand +------------------ +这个layer有两个LoDTensor的序列作为输入,并按照第二个LoDTensor中序列的LoD信息来扩展第一个batch中的序列。 +通常用来将只有一个时间步的序列(例如 :code:`sequence_first_step` 的返回结果)延展成有多个时间步的序列,以此方便与有多个时间步的序列进行运算。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_expand` + + +6. sequence_expand_as +--------------------- +这个layer需要两个LoDTensor的序列作为输入,然后将第一个Tensor序列中的每一个序列延展成和第二个Tensor中对应序列等长的序列。 +不同于 :code:`sequence_expand` ,这个layer会将第一个LoDTensor中的序列严格延展为和第二个LoDTensor中的序列等长。 +如果无法延展成等长的(例如第二个batch中的序列长度不是第一个batch中序列长度的整数倍),则会报错。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_expand_as` + + +7. sequence_enumerate +--------------------- +这个layer需要一个LoDTensor的序列作为输入,同时需要指定一个 :code:`win_size` 的长度。这个layer将依次取所有序列中长度为 :code:`win_size` 的子序列,并组合成新的序列。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_enumerate` + + +8. sequence_reshape +------------------- +这个layer需要一个LoDTensor的序列作为输入,同时需要指定一个 :code:`new_dim` 作为新的序列的维度。 +该layer会将mini batch内每个序列reshape为new_dim给定的维度。注意,每个序列的长度会改变(因此LoD信息也会变),以适应新的形状。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_reshape` + + +9. sequence_scatter +------------------- +这个layer可以将一个序列的数据scatter到另一个tensor上。这个layer有三个input,一个要被scatter的目标tensor :code:`input`; +一个是序列的数据 :code:`update` ,一个是目标tensor的上坐标 :code:`index` 。Output为scatter后的tensor,形状和 :code:`input` 相同。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_scatter` + + +10. sequence_pad +---------------- +这个layer可以将不等长的序列补齐成等长序列。使用这个layer需要提供一个 :code:`PadValue` 和一个 :code:`padded_length`。 +前者是用来补齐序列的元素,可以是一个数也可以是一个tensor;后者是序列补齐的目标长度。 +这个layer会返回补齐后的序列,以及一个记录补齐前各个序列长度的tensor :code:`Length`。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_pad` + + +11. sequence_mask +----------------- +这个layer会根据 :code:`input` 生成一个mask,:code:`input` 是一个记录了每个序列长度的tensor。 +此外这个layer还需要一个参数 :code:`maxlen` 用于指定序列中最长的序列长度。 +通常这个layer用于生成一个mask,将被pad后的序列中pad的部分过滤掉。 +:code:`input` 的长度tensor通常可以直接用 :code:`sequence_pad` 返回的 :code:`Length`。 + +API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_mask` + diff --git a/doc/paddle/api_guides/low_level/layers/sequence_en.rst b/doc/paddle/api_guides/low_level/layers/sequence_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..20049e730e0a10ee708bdf90806788623a117a0a --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/sequence_en.rst @@ -0,0 +1,110 @@ +.. _api_guide_sequence_en: + +######## +Sequence +######## + +Many problems in the field of deep learning involve the processing of the `sequence `_. +From Wiki's definition, sequences can represent a variety of physical meanings, but in deep learning, the most common is still "time sequence" - a sequence containing information of multiple time steps. + +In Paddle Fluid, we represent the sequence as :ref:`api_fluid_LoDTensor`. +Because the general neural network performs computing batch by batch, we use a LoDTensor to store a mini batch of sequences. +The 0th dimension of a LoDTensor contains all the time steps of all sequences in the mini batch, and LoD is used to record the length of each sequence to distinguish different sequences. +In the calculation, it is also necessary to split the 0th dimension of a mini batch in the LoDTensor into a number of sequences according to the LoD information. (Please refer to the LoD related documents for details. ) +Therefore, the operation for the 0th dimension of LoDTensor cannot be performed simply by a general layer. The operation of this dimension must be combined with the information of LoD. +(For example, you can't reshape the 0th dimension of a sequence with :code:`layers.reshape`). + +In order to correctly implement various sequence-oriented operations, we have designed a series of sequence-related APIs. +In practice, because a LoDTensor contains a mini batch of sequences, and different sequences in the same mini batch usually belong to multiple samples, they do not and should not interact with each other. +Therefore, if a layer is input with two (or more) LoDTensors (or with a list of LoDTensors), and each LoDTensor represents a mini batch of sequences, the first sequence in the first LoDTensor will be only calculated with the first sequence in the second LoDTensor, and the second sequence in the first LoDTensor will only be calculated with the second sequence in the second LoDTensor. To conclude with, the *i'th* sequence in the first LoDTensor will only be calculated with the *i'th* sequence in the second LoDTensor, and so on. + +**In summary, a LoDTensor stores multiple sequences in a mini batch, where the number of sequences is batch size; when multiple LoDTensors are calculated, the i'th sequence in each LoDTensor will only be calculated with the i'th of the other LoDTensors. Understanding this is critical to understand the following associated operations.** + +1. sequence_softmax +------------------- +This layer takes a mini batch of sequences as input and does a softmax operation in each sequence. The output is a mini batch of sequences in the same shape, but it is normalized by softmax within the sequence. +This layer is often used to do softmax normalization within each sequence. + + Please refer to :ref:`api_fluid_layers_sequence_softmax` + + +2. sequence_concat +------------------ +The layer takes a list as input, which can contain multiple LoDTensors, and every LoDTensors is a mini batch of sequences. +The layer will concatenate the i-th sequence in each batch into a new sequence in the time dimension as the i'th sequence in the returned batch. +Of course, the sequences of each LoDTensor in the list must have the same batch size. + + Please refer to :ref:`api_fluid_layers_sequence_concat` + + +3. sequence_first_step +---------------------- +This layer takes a LoDTensor as input and takes the first element in each sequence (the element of the first time step) as the return value. + + Please refer to :ref:`api_fluid_layers_sequence_first_step` + + +4. sequence_last_step +--------------------- +Same as :code:`sequence_first_step` except that this layer takes the last element in each sequence (i.e. the last time step) as the return value. + + Please refer to :ref:`api_fluid_layers_sequence_last_step` + + +5. sequence_expand +------------------ +This layer has two LoDTensors of sequences as input and extends the sequence in the first batch according to the LoD information of the sequence in the second LoDTensor. +It is usually used to extend a sequence with only one time step (for example, the return result of :code:`sequence_first_step`) into a sequence with multiple time steps, which is convenient for calculations with sequences composed of multiple time steps. + + Please refer to :ref:`api_fluid_layers_sequence_expand` + + +6. sequence_expand_as +--------------------- +This layer takes two LoDTensors of sequences as input and then extends each sequence in the first Tensor to a sequence with the same length as the corresponding one in the second Tensor. +Unlike :code:`sequence_expand` , this layer will strictly extend the sequence in the first LoDTensor to have the same length as the corresponding one in the second LoDTensor. +If it cannot be extended to the same length (for example, the sequence length of the second batch is not an integer multiple of the sequence length of the first batch), an error will be reported. + + Please refer to :ref:`api_fluid_layers_sequence_expand_as` + + +7. sequence_enumerate +--------------------- +This layer takes a LodTensor of sequences as input and also specifies the length of a :code:`win_size`. This layer will take a subsequence of length :code:`win_size` in all sequences and combine them into a new sequence. + + Please refer to :ref:`api_fluid_layers_sequence_enumerate` + + +8. sequence_reshape +------------------- +This layer requires a LoDTensor of sequences as input, and you need to specify a :code:`new_dim` as the dimension of the new sequence. +The layer will reshape each sequence in the mini batch to the dimension given by new_dim. Note that the length of each sequence will be changed (so does the LoD information) to accommodate the new shape. + + Please refer to :ref:`api_fluid_layers_sequence_reshape` + + +9. sequence_scatter +------------------- +This layer can scatter a sequence of data onto another tensor. This layer has three inputs, one is a target tensor to be scattered :code:`input`; +One is the sequence of data to scatter :code:`update` ; One is the upper coordinate of the target tensor :code:`index` . Output is the tensor after scatter, whose shape is the same as :code:`input`. + + Please refer to :ref:`api_fluid_layers_sequence_scatter` + + +10. sequence_pad +---------------- +This layer can pad sequences of unequal length into equal-length sequences. To use this layer you need to provide a :code:`PadValue` and a :code:`padded_length`. +The former is the element used to pad the sequence, it can be a number or a tensor; the latter is the target length of the sequence. +This layer will return the padded sequence, and a tensor :code:`Length` of the length for each sequence before padding. + + Please refer to :ref:`api_fluid_layers_sequence_pad` + + +11. sequence_mask +----------------- +This layer will generate a mask based on :code:`input`, where the :code:`input` is a tensor that records the length of each sequence. +In addition, this layer requires a parameter :code:`maxlen` to specify the largest sequence length in the sequence. +Usually, this layer is used to generate a mask that will filter away the portion of the paddings in the sequence. +The :code:`input` tensor can usually directly use the returned :code:`Length` from :code:`sequence_pad` . + + Please refer to :ref:`api_fluid_layers_sequence_mask` diff --git a/doc/paddle/api_guides/low_level/layers/sparse_update.rst b/doc/paddle/api_guides/low_level/layers/sparse_update.rst new file mode 100644 index 0000000000000000000000000000000000000000..55357da92d6fbbc66b32d22052ac35c50601bcbb --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/sparse_update.rst @@ -0,0 +1,45 @@ +.. _api_guide_sparse_update: + +##### +稀疏更新 +##### + +Fluid的 :ref:`cn_api_fluid_layers_embedding` 层在单机训练和分布式训练时,均可以支持“稀疏更新”,即梯度以sparse tensor 结构存储,只保存梯度不为0的行。 +在分布式训练中,对于较大的embedding层,开启稀疏更新有助于减少通信数据量,提升训练速度。 + +在paddle内部,我们用lookup_table来实现embedding。下边这张图说明了embedding在正向和反向计算的过程: + +如图所示:一个Tensor中有两行不为0,正向计算的过程中,我们使用ids存储不为0的行,并使用对应的两行数据来进行计算;反向更新的过程也只更新这两行。 + +.. image:: ../../../images/lookup_table_training.png + :scale: 50 % + +embedding使用例子: +--------------------- + +API详细使用方法参考 :ref:`cn_api_fluid_layers_embedding` ,以下是一个简单的例子: + +.. code-block:: python + + DICT_SIZE = 10000 * 10 + EMBED_SIZE = 64 + IS_SPARSE = False + def word_emb(word, dict_size=DICT_SIZE, embed_size=EMBED_SIZE): + embed = fluid.layers.embedding( + input=word, + size=[dict_size, embed_size], + dtype='float32', + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Normal(scale=1/math.sqrt(dict_size))), + is_sparse=IS_SPARSE, + is_distributed=False) + return embed + +以上参数中: + +- :code:`is_sparse` : 反向计算的时候梯度是否为sparse tensor。如果不设置,梯度是一个 :ref:`Lod_Tensor ` 。默认为False。 + +- :code:`is_distributed` : 标志是否是用在分布式的场景下。一般大规模稀疏更新(embedding的第0维维度很大,比如几百万以上)才需要设置。具体可以参考大规模稀疏的API guide :ref:`cn_api_guide_async_training` 。默认为False。 + +- API汇总: + - :ref:`cn_api_fluid_layers_embedding` diff --git a/doc/paddle/api_guides/low_level/layers/sparse_update_en.rst b/doc/paddle/api_guides/low_level/layers/sparse_update_en.rst new file mode 100755 index 0000000000000000000000000000000000000000..f97ef279b8f8bc8dd5fb5beb24d13dc9815eb942 --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/sparse_update_en.rst @@ -0,0 +1,45 @@ +.. _api_guide_sparse_update_en: + +############### +Sparse update +############### + +Fluid's :ref:`api_fluid_layers_embedding` layer supports "sparse updates" in both single-node and distributed training, which means gradients are stored in a sparse tensor structure where only rows with non-zero gradients are saved. +In distributed training, for larger embedding layers, sparse updates reduce the amount of communication data and speed up training. + +In paddle, we use lookup_table to implement embedding. The figure below illustrates the process of embedding in the forward and backward calculations: + +As shown in the figure: two rows in a Tensor are not 0. In the process of forward calculation, we use ids to store rows that are not 0, and use the corresponding two rows of data for calculation; the process of backward update is only to update the two lines. + +.. image:: ../../../images/lookup_table_training.png + :scale: 50 % + +Example +-------------------------- + +API reference :ref:`api_fluid_layers_embedding` . Here is a simple example: + +.. code-block:: python + + DICT_SIZE = 10000 * 10 + EMBED_SIZE = 64 + IS_SPARSE = False + def word_emb(word, dict_size=DICT_SIZE, embed_size=EMBED_SIZE): + embed = fluid.layers.embedding( + input=word, + size=[dict_size, embed_size], + dtype='float32', + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Normal(scale=1/math.sqrt(dict_size))), + is_sparse=IS_SPARSE, + is_distributed=False) + return embed + +The parameters: + +- :code:`is_sparse` : Whether the gradient is a sparse tensor in the backward calculation. If not set, the gradient is a `LodTensor `_ . The default is False. + +- :code:`is_distributed` : Whether the current training is in a distributed scenario. Generally, this parameter can only be set in large-scale sparse updates (the 0th dimension of embedding is very large, such as several million or more). For details, please refer to the large-scale sparse API guide :ref:`api_guide_async_training`. The default is False. + +- API : + - :ref:`api_fluid_layers_embedding` diff --git a/doc/paddle/api_guides/low_level/layers/tensor.rst b/doc/paddle/api_guides/low_level/layers/tensor.rst new file mode 100644 index 0000000000000000000000000000000000000000..87f953b5a0199fc26f73c9bc020bdb76dedf09f8 --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/tensor.rst @@ -0,0 +1,141 @@ +.. _api_guide_tensor: + +######## +张量 +######## + +Fluid 中使用两种数据结构来承载数据,分别是 `Tensor 和 LoD_Tensor <../../../user_guides/howto/basic_concept/lod_tensor.html>`_ 。 其中 LoD-Tensor 是 Fluid 的特有概念,它在 Tensor 基础上附加了序列信息。框架中可传输的数据包括:输入、输出、网络中的可学习参数,全部统一使用 LoD-Tensor 表示,Tensor 可以看作是一种特殊的 LoD-Tensor。 + +下面介绍这两种数据的相关操作。 + +Tensor +======= + +1. create_tensor +--------------------- +Tensor用于在框架中承载数据,使用 :code:`create_tensor` 可以创建一个指定数据类型的Lod-Tensor变量, + +API reference 请参考: :ref:`cn_api_fluid_layers_create_tensor` + + +2. create_parameter +--------------------- +神经网络的训练过程是一个对参数的学习过程,Fluid 使用 :code:`create_parameter` 创建一个可学习的参数。该参数的值可以被operator改变。 + +API reference 请参考::ref:`cn_api_fluid_layers_create_parameter` + + + +3. create_global_var +--------------------- +Fluid 使用 :code:`create_global_var` 创建一个全局tensor,通过此 API 可以指定被创建 Tensor 变量的数据类型、形状和值。 + +API reference 请参考::ref:`cn_api_fluid_layers_create_global_var` + + +4. cast +--------------- + +Fluid 使用 :code:`cast` 将数据转换为指定类型。 + +API reference 请参考::ref:`cn_api_fluid_layers_cast` + + +5. concat +---------------- + +Fluid 使用 :code:`concat` 将输入数据沿指定维度连接。 + +API reference 请参考::ref:`cn_api_fluid_layers_concat` + + +6. sums +---------------- + +Fluid 使用 :code:`sums` 执行对输入数据的加和。 + +API reference 请参考::ref:`cn_api_fluid_layers_sums` + +7. fill_constant +----------------- + +Fluid 使用 :code:`fill_constant` 创建一个具有特定形状和类型的 Tensor。可以通过 :code:`value` 设置该变量的初始值。 + +API reference 请参考: :ref:`cn_api_fluid_layers_fill_constant` + +8. assign +--------------- + +Fluid 使用 :code:`assign` 复制一个变量。 + +API reference 请参考::ref:`cn_api_fluid_layers_assign` + +9. argmin +-------------- + +Fluid 使用 :code:`argmin` 计算输入 Tensor 指定轴上最小元素的索引。 + +API reference 请参考::ref:`cn_api_fluid_layers_assign` + +10. argmax +----------- + +Fluid 使用 :code:`argmax` 计算输入 Tensor 指定轴上最大元素的索引。 + +API reference 请参考::ref:`cn_api_fluid_layers_argmax` + +11. argsort +------------ + +Fluid 使用 :code:`argsort` 对输入 Tensor 在指定轴上进行排序,并返回排序后的数据变量及其对应的索引值。 + +API reference 请参考: :ref:`cn_api_fluid_layers_argsort` + +12. ones +------------- + +Fluid 使用 :code:`ones` 创建一个指定大小和数据类型的Tensor,且初始值为1。 + +API reference 请参考: :ref:`cn_api_fluid_layers_ones` + +13. zeros +--------------- + +Fluid 使用 :code:`zeros` 创建一个指定大小和数据类型的Tensor,且初始值为0。 + +API reference 请参考: :ref:`cn_api_fluid_layers_zeros` + +14. reverse +------------------- + +Fluid 使用 :code:`reverse` 沿指定轴反转 Tensor。 + +API reference 请参考: :ref:`cn_api_fluid_layers_reverse` + + + +LoD-Tensor +============ + +LoD-Tensor非常适用于序列数据,相关知识可以参考阅读 `LoD_Tensor <../../../user_guides/howto/basic_concept/lod_tensor.html>`_ 。 + +1. create_lod_tensor +----------------------- + +Fluid 使用 :code:`create_lod_tensor` 基于numpy数组、列表或现有 LoD_Tensor 创建拥有新的层级信息的 LoD_Tensor。 + +API reference 请参考: :ref:`cn_api_fluid_create_lod_tensor` + +2. create_random_int_lodtensor +---------------------------------- + +Fluid 使用 :code:`create_random_int_lodtensor` 创建一个由随机整数组成的 LoD_Tensor。 + +API reference 请参考: :ref:`cn_api_fluid_create_random_int_lodtensor` + +3. reorder_lod_tensor_by_rank +--------------------------------- + +Fluid 使用 :code:`reorder_lod_tensor_by_rank` 对输入 LoD_Tensor 的序列信息按指定顺序重拍。 + +API reference 请参考::ref:`cn_api_fluid_layers_reorder_lod_tensor_by_rank` diff --git a/doc/paddle/api_guides/low_level/layers/tensor_en.rst b/doc/paddle/api_guides/low_level/layers/tensor_en.rst new file mode 100755 index 0000000000000000000000000000000000000000..4a14864d2e6a10d6b8e3fb10ef6769b4cfb3ed52 --- /dev/null +++ b/doc/paddle/api_guides/low_level/layers/tensor_en.rst @@ -0,0 +1,141 @@ +.. _api_guide_tensor_en: + +######## +Tensor +######## + +There are two data structures used in Fluid to host the data, namely `Tensor and LoD_Tensor <../../../user_guides/howto/basic_concept/lod_tensor_en.html>`_ . LoD-Tensor is a unique concept of Fluid, which appends sequence information to Tensor. The data that can be transferred in the framework includes: input, output, and learnable parameters in the network. All of them are uniformly represented by LoD-Tensor. In addition, tensor can be regarded as a special LoD-Tensor. + +Now let's take a closer look at the operations related to these two types of data. + +Tensor +======= + +1. create_tensor +--------------------- +Tensor is used to carry data in the framework, using :code:`create_tensor` to create a Lod-Tensor variable of the specified the data type. + +API reference : :ref:`api_fluid_layers_create_tensor` + + +2. create_parameter +--------------------- +The neural network training process is a learning process for parameters. Fluid uses :code:`create_parameter` to create a learnable parameter. The value of this parameter can be changed by the operator. + +API reference : :ref:`api_fluid_layers_create_parameter` + + + +3. create_global_var +--------------------- +Fluid uses :code:`create_global_var` to create a global tensor and this API allows you to specify the data type, shape, and value of the Tensor variable being created. + +API reference : :ref:`api_fluid_layers_create_global_var` + + +4. cast +--------------- + +Fluid uses :code:`cast` to convert the data to the specified type. + +API reference : :ref:`api_fluid_layers_cast` + + +5.concat +---------------- + +Fluid uses :code:`concat` to concatenate input data along a specified dimension. + +API reference : :ref:`api_fluid_layers_concat` + + +6. sums +---------------- + +Fluid uses :code:`sums` to sum up the input data. + +API reference : :ref:`api_fluid_layers_sums` + +7. fill_constant +----------------- + +Fluid uses :code:`fill_constant` to create a Tensor with a specific shape and type. The initial value of this variable can be set via :code:`value`. + +API reference : :ref:`api_fluid_layers_fill_constant` + +8. assign +--------------- + +Fluid uses :code:`assign` to duplicate a variable. + +API reference : :ref:`api_fluid_layers_assign` + +9. argmin +-------------- + +Fluid uses :code:`argmin` to calculate the index of the smallest element on the specified axis of Tensor. + +API reference : :ref:`api_fluid_layers_argmin` + +10. argmax +----------- + +Fluid uses :code:`argmax` to calculate the index of the largest element on the specified axis of Tensor. + +API reference : :ref:`api_fluid_layers_argmax` + +11. argsort +------------ + +Fluid uses :code:`argsort` to sort the input Tensor on the specified axis and it will return the sorted data variables and their corresponding index values. + +API reference : :ref:`api_fluid_layers_argsort` + +12. ones +------------- + +Fluid uses :code:`ones` to create a Tensor of the specified size and data type with an initial value of 1. + +API reference : :ref:`api_fluid_layers_ones` + +13. zeros +--------------- + +Fluid uses :code:`zeros` to create a Tensor of the specified size and data type with an initial value of zero. + +API reference : :ref:`api_fluid_layers_zeros` + +14. reverse +------------------- + +Fluid uses :code:`reverse` to invert Tensor along the specified axis. + +API reference : :ref:`api_fluid_layers_reverse` + + + +LoD-Tensor +============ + +LoD-Tensor is very suitable for sequence data. For related knowledge, please read `Tensor and LoD_Tensor <../../../user_guides/howto/basic_concept/lod_tensor_en.html>`_ . + +1.create_lod_tensor +----------------------- + +Fluid uses :code:`create_lod_tensor` to create a LoD_Tensor with new hierarchical information based on a numpy array, a list, or an existing LoD_Tensor. + +API reference : :ref:`api_fluid_create_lod_tensor` + +2. create_random_int_lodtensor +---------------------------------- + +Fluid uses :code:`create_random_int_lodtensor` to create a LoD_Tensor composed of random integers. + +API reference : :ref:`api_fluid_create_random_int_lodtensor` + +3. reorder_lod_tensor_by_rank +--------------------------------- + +Fluid uses :code:`reorder_lod_tensor_by_rank` to reorder the sequence information of the input LoD_Tensor in the specified order. + +API reference : :ref:`api_fluid_layers_reorder_lod_tensor_by_rank` diff --git a/doc/paddle/api_guides/low_level/metrics.rst b/doc/paddle/api_guides/low_level/metrics.rst new file mode 100644 index 0000000000000000000000000000000000000000..4044483704a37a779b6e65a41565fd2a3400ed4f --- /dev/null +++ b/doc/paddle/api_guides/low_level/metrics.rst @@ -0,0 +1,51 @@ +.. _api_guide_metrics: + + +评价指标 +######### +在神经网络训练过程中或者训练完成后,需要评价模型的训练效果。评价的方法一般是计算全体预测值和全体真值(label)之间的距离,不同类型的任务会使用不同的评价方法,或者综合使用多个评价方法。在具体的任务中,可以选用一种或者多种评价方法。下面对常用的评价方法按照任务类型做介绍。 + +分类任务评价 +------------------ +分类任务中最常用的是二分类,而多分类任务也可以转化为多个二分类任务的组合,二分类任务常用的评价指标有准确率、正确率、召回率、AUC和平均准确度。 + +- 准确率: :code:`Precision` ,用来衡量二分类中召回真值和召回值的比例。 + + API Reference 请参考 :ref:`cn_api_fluid_metrics_Precision` + +- 正确率: :code:`Accuracy` ,用来衡量二分类中召回真值和总样本数的比例。需要注意的是,准确率和正确率的定义是不同的,可以类比于误差分析中的 :code:`Variance` 和 :code:`Bias` 。 + + API Reference 请参考 :ref:`cn_api_fluid_metrics_Accuracy` + + +- 召回率: :code:`Recall` ,用来衡量二分类中召回值和总样本数的比例。准确率和召回率的选取相互制约,实际模型中需要进行权衡,可以参考文档 `Precision_and_recall `_ 。 + + API Reference 请参考 :ref:`cn_api_fluid_metrics_Recall` + +- AUC: :code:`Area Under Curve`, 适用于二分类的分类模型评估,用来计算 `ROC曲线的累积面积 `_。:code:`Auc` 通过python计算实现,如果关注性能,可以使用 :code:`fluid.layers.auc` 代替。 + + API Reference 请参考 :ref:`cn_api_fluid_metrics_Auc` + +- 平均准确度: :code:`Average Precision` ,常用在Faster R-CNN和SSD等物体检测任务中。在不同召回条件下,计算了准确率的平均值,具体可以参考文档 `Average-precision `_ 和 `SSD: Single Shot MultiBox Detector `_。 + + API Reference 请参考 :ref:`cn_api_fluid_metrics_DetectionMAP` + + + +序列标注任务评价 +------------------ +序列标注任务中,token的分组称为语块(chunk),模型会同时将输入的token分组和分类,常用的评估方法是语块评估方法。 + +- 语块评估方法: :code:`ChunkEvaluator` ,接收 :code:`chunk_eval` 接口的输出,累积每一个minibatch的语块统计值,最后计算准确率、召回率和F1值。:code:`ChunkEvaluator` 支持IOB, IOE, IOBES和IO四种标注模式。可以参考文档 `Chunking with Support Vector Machines `_ 。 + + API Reference 请参考 :ref:`cn_api_fluid_metrics_ChunkEvaluator` + + +生成任务评价 +------------------ +生成任务会依据输入直接产生输出。对应NLP任务中(比如语音识别),则生成新字符串。评估生成字符串和目标字符串之间距离的方法也有多种,比如多分类评估方法,而另外一种常用的方法叫做编辑距离。 + +- 编辑距离: :code:`EditDistance` ,用来衡量两个字符串的相似度。可以参考文档 `Edit_distance `_。 + + API Reference 请参考 :ref:`cn_api_fluid_metrics_EditDistance` + diff --git a/doc/paddle/api_guides/low_level/metrics_en.rst b/doc/paddle/api_guides/low_level/metrics_en.rst new file mode 100755 index 0000000000000000000000000000000000000000..358e4d33652788c7c3a5b683a9f940631d43bf5b --- /dev/null +++ b/doc/paddle/api_guides/low_level/metrics_en.rst @@ -0,0 +1,50 @@ +.. _api_guide_metrics_en: + + +Metrics +######### +During or after the training of the neural network, it is necessary to evaluate the training effect of the model. The method of evaluation generally is calculating the distance between the overall predicted value and the overall label. Different types of tasks are applied with different evaluation methods, or with a combination of evaluation methods. In a specific task, one or more evaluation methods can be selected. Now let's take a look at commonly used evaluation methods grouped by the type of task. + +Classification task evaluation +------------------------------- +The most common classification task is the binary classification task, and the multi-classification task can also be transformed into a combination of multiple binary classification tasks. The metrics commonly adopted in the two-category tasks are accuracy, correctness, recall rate, AUC and average accuracy. + +- :code:`Precision` , which is used to measure the proportion of recalled ground-truth values in recalled values ​​in binary classification. + +  For API Reference, please refer to :ref:`api_fluid_metrics_Precision` + +- :code:`Accuracy`, which is used to measure the proportion of the recalled ground-truth value in the total number of samples in binary classification. It should be noted that the definitions of precision and accuracy are different and can be analogized to :code:`Variance` and :code:`Bias` in error analysis. + +  For API Reference, please refer to :ref:`api_fluid_metrics_Accuracy` + + +- :code:`Recall`, which is used to measure the ratio of the recalled values to the total number of samples in binary classification. The choice of accuracy and recall rate is mutually constrained, and trade-offs are needed in the actual model. Refer to the documentation `Precision_and_recall `_ . + +  For API Reference, please refer to :ref:`api_fluid_metrics_Recall` + +- :code:`Area Under Curve`, a classification model for binary classification, used to calculate the cumulative area of ​​the `ROC curve `_ . :code:`Auc` is implemented via python. If you are concerned about performance, you can use :code:`fluid.layers.auc` instead. + +  For API Reference, please refer to :ref:`api_fluid_metrics_Auc` + +- :code:`Average Precision`, commonly used in object detection tasks such as Faster R-CNN and SSD. The average precision is calculated under different recall conditions. For details, please refer to the document `Average precision `_ and `SSD Single Shot MultiBox Detector `_ . + +  For API Reference, please refer to :ref:`api_fluid_metrics_DetectionMAP` + + + +Sequence labeling task evaluation +---------------------------------- +In the sequence labeling task, the group of tokens is called a chunk, and the model will group and classify the input tokens at the same time. The commonly used evaluation method is the chunk evaluation method. + +- The chunk evaluation method :code:`ChunkEvaluator` receives the output of the :code:`chunk_eval` interface, and accumulates the statistics of chunks in each mini-batch , and finally calculates the accuracy, recall and F1 values. :code:`ChunkEvaluator` supports four labeling modes: IOB, IOE, IOBES and IO. You can refer to the documentation `Chunking with Support Vector Machines `_. + +  For API Reference, please refer to :ref:`api_fluid_metrics_ChunkEvaluator` + + +Generation/Synthesis task evaluation +---------------------------- +The generation task produces output directly from the input. In NLP tasks (such as speech recognition), a new string is generated. There are several ways to evaluate the distance between a generated string and a target string, such as a multi-classification evaluation method, and another commonly used method is called editing distance. + +- Edit distance: :code:`EditDistance` to measure the similarity of two strings. You can refer to the documentation `Edit_distance `_. + +  For API Reference, please refer to :ref:`api_fluid_metrics_EditDistance` \ No newline at end of file diff --git a/doc/paddle/api_guides/low_level/model_save_reader.rst b/doc/paddle/api_guides/low_level/model_save_reader.rst new file mode 100644 index 0000000000000000000000000000000000000000..d14e19b48e678faafd5c88bf108053dcd0ebb894 --- /dev/null +++ b/doc/paddle/api_guides/low_level/model_save_reader.rst @@ -0,0 +1,59 @@ +.. _api_guide_model_save_reader: + +######### +模型保存与加载 +######### + +模型的保存与加载主要涉及到如下八个API: +:code:`fluid.io.save_vars`、:code:`fluid.io.save_params`、:code:`fluid.io.save_persistables`、:code:`fluid.io.save_inference_model`、:code:`fluid.io.load_vars`、:code:`fluid.io.load_params`、:code:`fluid.io.load_persistables` 和 :code:`fluid.io.load_inference_model`。 + +变量、持久性变量和参数 +==================== + +在 :code:`Paddle` 中,算子(:code:`Operator`)的每一个输入和输出都是一个变量(:code:`Variable`),而参数(:code:`Parameter`)是变量(:code:`Variable`)的子类。持久性变量(:code:`Persistables`)是一种在每次迭代结束后均不会被删除的变量。参数是一种持久性变量,其在每次迭代后都会被优化器(:ref:`api_guide_optimizer`)更新。训练神经网络本质上就是在更新参数。 + +模型保存API介绍 +==================== + +- :code:`fluid.io.save_vars`:通过执行器(:ref:`api_guide_executor`)保存变量到指定的目录中。保存变量的方式有两种: + + 1)通过接口中的 :code:`vars` 指定需要保存的变量列表。 + + 2)将一个已经存在的程序(:code:`Program`)赋值给接口中的 :code:`main_program`,然后这个程序中的所有变量都将被保存下来。 + + 第一种保存方式的优先级要高于第二种。 + + API Reference 请参考 :ref:`cn_api_fluid_io_save_vars`。 + +- :code:`fluid.io.save_params`:通过接口中的 :code:`main_program` 指定好程序(:code:`Program`),该接口会将所指定程序中的全部参数(:code:`Parameter`)过滤出来,并将它们保存到 :code:`dirname` 指定的文件夹或 :code:`filename` 指定的文件中。 + + API Reference 请参考 :ref:`cn_api_fluid_io_save_params`。 + +- :code:`fluid.io.save_persistables`:通过接口中的 :code:`main_program` 指定好程序(:code:`Program`),该接口会将所指定程序中的全部持久性变量(:code:`persistable==True`)过滤出来,并将它们保存到 :code:`dirname` 指定的文件夹或 :code:`filename` 指定的文件中。 + + API Reference 请参考 :ref:`cn_api_fluid_io_save_persistables`。 + +- :code:`fluid.io.save_inference_model`:请参考 :ref:`api_guide_inference`。 + +模型加载API介绍 +==================== + +- :code:`fluid.io.load_vars`:通过执行器(:code:`Executor`)加载指定目录中的变量。加载变量的方式有两种: + + 1)通过接口中的 :code:`vars` 指定需要加载的变量列表。 + + 2)将一个已经存在的程序(:code:`Program`)赋值给接口中的 :code:`main_program`,然后这个程序中的所有变量都将被加载。 + + 第一种加载方式的优先级要高于第二种。 + + API Reference 请参考 :ref:`cn_api_fluid_io_load_vars`。 + +- :code:`fluid.io.load_params`:该接口从 :code:`main_program` 指定的程序中过滤出全部参数(:code:`Parameter`),并试图从 :code:`dirname` 指定的文件夹或 :code:`filename` 指定的文件中加载这些参数。 + + API Reference 请参考 :ref:`cn_api_fluid_io_load_params`。 + +- :code:`fluid.io.load_persistables`:该接口从 :code:`main_program` 指定的程序中过滤出全部持久性变量(:code:`persistable==True`),并试图从 :code:`dirname` 指定的文件夹或 :code:`filename` 指定的文件中加载这些持久性变量。 + + API Reference 请参考 :ref:`cn_api_fluid_io_load_persistables`。 + +- :code:`fluid.io.load_inference_model`:请参考 :ref:`api_guide_inference`。 diff --git a/doc/paddle/api_guides/low_level/model_save_reader_en.rst b/doc/paddle/api_guides/low_level/model_save_reader_en.rst new file mode 100755 index 0000000000000000000000000000000000000000..bc3f575c7a70cb7772c50a39ac8d96dada506bb7 --- /dev/null +++ b/doc/paddle/api_guides/low_level/model_save_reader_en.rst @@ -0,0 +1,59 @@ +.. _api_guide_model_save_reader_en: + +####################### +Save and Load a Model +####################### + +To save and load model, there are eight APIs playing an important role: +:code:`fluid.io.save_vars`, :code:`fluid.io.save_params`, :code:`fluid.io.save_persistables`, :code:`fluid.io.save_inference_model`, :code:`fluid.io.load_vars`, :code:`fluid.io.load_params`, :code:`fluid.io.load_persistables` and :code:`fluid.io.load_inference_model` . + +Variables, Persistables and Parameters +================================================ + +In :code:`Paddle` , every input and output of operator( :code:`Operator` ) is a variable( :code:`Variable` ), and parameter( :code:`Parameter` ) is a derived class of Variable( :code:`Variable` ). Persistables (:code:`Persistables`) are variables that won't be deleted after each iteration. Parameter is a kind of persistable variable which will be updated by optimizer ( :ref:`api_guide_optimizer_en` ) after each iteration. Training of neural network in essence is to update parameters. + +Introduction to APIs for saving a model +======================================== + +- :code:`fluid.io.save_vars`: Variables are saved in specified directory by executor( :ref:`api_guide_executor_en` ). There are two ways to save variables: + + 1)Set :code:`vars` in the API to assign the variable list to be saved. + + 2)Assign an existed program( :code:`Program` ) to :code:`main_program` in the API, and then all variables in the program will be saved. + + The first one has a higher priority than the second one. + + For API Reference , please refer to :ref:`api_fluid_io_save_vars`. + +- :code:`fluid.io.save_params`: Set :code:`main_program` in the API with the model Program( :code:`Program` ). This API will filter all parameters( :code:`Parameter` ) of targeted program and save them in folder assigned by :code:`dirname` or file assigned by :code:`filename` . + + For API Reference , please refer to :ref:`api_fluid_io_save_params`. + +- :code:`fluid.io.save_persistables`: :code:`main_program` of API assigns program( :code:`Program` ). This API will filter all persistables( :code:`persistable==True` ) of targeted program and save them in folder assigned by :code:`dirname` or file assigned by :code:`filename` . + + For API Reference, please refer to :ref:`api_fluid_io_save_persistables`. + +- :code:`fluid.io.save_inference_model`: please refer to :ref:`api_guide_inference_en`. + +Introduction to APIs for loading a model +======================================== + +- :code:`fluid.io.load_vars`: Executor( :code:`Executor` ) loads variables into the target directory. There are two ways to load variables: + + 1):code:`vars` in the API assigns variable list to be loaded. + + 2)Assign an existed program( :code:`Program` ) to the :code:`main_program` field in the API, and then all variables in the program will be loaded. + + The first loading method has higher priority than the second one. + + For API Reference, please refer to :ref:`api_fluid_io_load_vars`. + +- :code:`fluid.io.load_params`: This API filters all parameters( :code:`Parameter` ) in program assigned by :code:`main_program` and load these parameters from folder assigned by :code:`dirname` or file assigned by :code:`filename` . + + For API Reference, please refer to :ref:`api_fluid_io_load_params` . + +- :code:`fluid.io.load_persistables`:This API filters all persistables( :code:`persistable==True` ) in program assigned by :code:`main_program` and load these persistables from folder assigned by :code:`dirname` or file assigned by :code:`filename` . + + For API Reference, please refer to :ref:`api_fluid_io_load_persistables` . + +- :code:`fluid.io.load_inference_model`: please refer to :ref:`api_guide_inference_en` . \ No newline at end of file diff --git a/doc/paddle/api_guides/low_level/nets.rst b/doc/paddle/api_guides/low_level/nets.rst new file mode 100644 index 0000000000000000000000000000000000000000..5e7f1fef7239b65e87a1cf3a7714aad6167deb8a --- /dev/null +++ b/doc/paddle/api_guides/low_level/nets.rst @@ -0,0 +1,62 @@ +.. _api_guide_nets: + +########### +复杂网络 +########### + +在处理复杂功能时,我们通常需要写大量的代码来构建复杂的 `神经网络 `_ 。 +因此,为了方便用户更加容易地搭建复杂网络模型,我们提供了一些比较常用的基本函数模块,以此来简化用户的代码量,从而降低开发成本。 +这些模块通常是由细粒度的函数根据一定的逻辑拼接组合而成,实现代码请参考 `nets.py `_ 。 + +1.simple_img_conv_pool +---------------------- + +:code:`simple_img_conv_pool` 是由 :ref:`cn_api_fluid_layers_conv2d` 与 :ref:`cn_api_fluid_layers_pool2d` 串联而成。 +该模块在图像分类模型中广泛使用,比如应用在 `MNIST `_ 数字分类的问题。 + +API Reference 请参考 :ref:`cn_api_fluid_nets_simple_img_conv_pool` + + +2.img_conv_group +---------------- + +:code:`img_conv_group` 是由 :ref:`cn_api_fluid_layers_conv2d` , :ref:`cn_api_fluid_layers_batch_norm`, :ref:`cn_api_fluid_layers_dropout` 和 :ref:`cn_api_fluid_layers_pool2d` 组成。 +该模块可以实现多个 :ref:`cn_api_fluid_layers_conv2d` , :ref:`cn_api_fluid_layers_batch_norm` 和 :ref:`cn_api_fluid_layers_dropout` 的串联单元与一个 :ref:`cn_api_fluid_layers_pool2d` 的组合。 +其中, :ref:`cn_api_fluid_layers_conv2d` , :ref:`cn_api_fluid_layers_batch_norm` 和 :ref:`cn_api_fluid_layers_dropout` 的数量都可以分别控制,从而得到多样的组合。 +该模块广泛使用在比较复杂的图像分类任务中,比如 `VGG `_ 。 + +API Reference 请参考 :ref:`cn_api_fluid_nets_img_conv_group` + + +3.sequence_conv_pool +-------------------- + +:code:`sequence_conv_pool` 是由 :ref:`cn_api_fluid_layers_sequence_conv` 与 :ref:`cn_api_fluid_layers_sequence_pool` 串联而成。 +该模块在 `自然语言处理 `_ 以及 `语音识别 `_ 等领域均有广泛应用, +比如 `文本分类模型 `_ , +`TagSpace `_ 以及 +`Multi-view Simnet `_ 等模型。 + +API Reference 请参考 :ref:`cn_api_fluid_nets_sequence_conv_pool` + + +4.glu +----- +:code:`glu` 全称 Gated Linear Units, 来源于论文 `Language Modeling with Gated Convolutional Networks `_ ,由 :ref:`cn_api_fluid_layers_split` , :ref:`cn_api_fluid_layers_sigmoid` 和 :ref:`cn_api_fluid_layers_elementwise_mul` 组成。 +它会把输入数据均分为2等份,并对第二部分求 `Sigmoid `_ , 然后再与第一部分数据求点乘得到输出。 + +API Reference 请参考 :ref:`cn_api_fluid_nets_glu` + + +5.scaled_dot_product_attention +------------------------------ +:code:`scaled_dot_product_attention` 来源于论文 `Attention Is All You Need `_ ,主要是由 :ref:`cn_api_fluid_layers_fc` 和 :ref:`cn_api_fluid_layers_softmax` 组成。 +对于输入数据 :code:`Queries` , :code:`Key` 和 :code:`Values` 按照如下公式求出 :code:`Attention` 。 + +.. math:: + Attention(Q, K, V)= softmax(QK^\mathrm{T})V + +该模块广泛使用在 `机器翻译 `_ 的模型中,比如 `Transformer `_ 。 + +API Reference 请参考 :ref:`cn_api_fluid_nets_scaled_dot_product_attention` + diff --git a/doc/paddle/api_guides/low_level/nets_en.rst b/doc/paddle/api_guides/low_level/nets_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..b31d834917dad9bc1223f511d14673ceb171bcb3 --- /dev/null +++ b/doc/paddle/api_guides/low_level/nets_en.rst @@ -0,0 +1,59 @@ +.. _api_guide_nets_en: + +################ +Complex Networks +################ + +When dealing with complex functions, we usually need to code a lot to build a complex `Neural Network `_ . +Therefore, in order to make it easier for users to build complex network models, we provide some common basic function modules to simplify the user's code and reduce development costs. +These modules are usually composed of fine-grained functions combined based on certain logics. For implementation, please refer to `nets `_ . + +1.simple_img_conv_pool +---------------------- + +:code:`simple_img_conv_pool` is got by concatenating :ref:`api_fluid_layers_conv2d` with :ref:`api_fluid_layers_pool2d` . +This module is widely used in image classification models, such as the `MNIST `_ number classification. + +For API Reference, please refer to :ref:`api_fluid_nets_simple_img_conv_pool` + + +2.img_conv_group +---------------- + +:code:`img_conv_group` is composed of :ref:`api_fluid_layers_conv2d` , :ref:`api_fluid_layers_batch_norm`, :ref:`api_fluid_layers_dropout` and :ref:`api_fluid_layers_pool2d`. +This module can implement the combination of multiple :ref:`api_fluid_layers_conv2d` , :ref:`api_fluid_layers_batch_norm` , :ref:`api_fluid_layers_dropout` and a single :ref:`api_fluid_layers_pool2d`. +Among them, the number of :ref:`api_fluid_layers_conv2d` , :ref:`api_fluid_layers_batch_norm` and :ref:`api_fluid_layers_dropout` can be controlled separately, resulting in various combinations. +This module is widely used in more complex image classification tasks, such as `VGG `_. + +For API Reference, please refer to :ref:`api_fluid_nets_img_conv_group` + + +3.sequence_conv_pool +-------------------- + +:code:`sequence_conv_pool` is got by concatenating :ref:`api_fluid_layers_sequence_conv` with :ref:`api_fluid_layers_sequence_pool`. +The module is widely used in the field of `natural language processing `_ and `speech recognition `_ . Models such as the `text classification model `_ , +`TagSpace `_ and `Multi view Simnet `_. + +For API Reference, please refer to :ref:`api_fluid_nets_sequence_conv_pool` + + +4.glu +----- +The full name of :code:`glu` is Gated Linear Units, which originates from the paper `Language Modeling with Gated Convolutional Networks `_ . It consists of :ref:`api_fluid_layers_split` , :ref:`api_fluid_layers_sigmoid` and :ref:`api_fluid_layers_elementwise_mul`. +It divides the input data into 2 equal parts, calculates the `Sigmoid `_ of second part, and then performs dot product of the sigmoid vlaue with the first part to get the output. + +For API Reference, please refer to :ref:`api_fluid_nets_glu` + + +5.scaled_dot_product_attention +------------------------------ +:code:`scaled_dot_product_attention` originates from the paper `Attention Is All You Need `_ , mainly composed of :ref:`api_fluid_layers_fc` and :ref:`api_fluid_layers_softmax` . +For the input data :code:`Queries` , :code:`Key` and :code:`Values`, calculate the :code:`Attention` according to the following formula. + +.. math:: + Attention(Q, K, V)= softmax(QK^\mathrm{T})V + +This module is widely used in the model of `machine translation `_, such as `Transformer `_ . + +For API Reference, please refer to :ref:`api_fluid_nets_scaled_dot_product_attention` diff --git a/doc/paddle/api_guides/low_level/optimizer.rst b/doc/paddle/api_guides/low_level/optimizer.rst new file mode 100644 index 0000000000000000000000000000000000000000..c597900ecba3b40d584a3b53ba9366b1e5b00435 --- /dev/null +++ b/doc/paddle/api_guides/low_level/optimizer.rst @@ -0,0 +1,92 @@ +.. _api_guide_optimizer: + +########### +优化器 +########### + +神经网络最终是一个 `最优化问题 `_ , +在经过 `前向计算和反向传播 `_ 后, +:code:`Optimizer` 使用反向传播梯度,优化神经网络中的参数。 + +1.SGD/SGDOptimizer +------------------ + +:code:`SGD` 是实现 `随机梯度下降 `_ 的一个 :code:`Optimizer` 子类,是 `梯度下降 `_ 大类中的一种方法。 +当需要训练大量样本的时候,往往选择 :code:`SGD` 来使损失函数更快的收敛。 + +API Reference 请参考 :ref:`cn_api_fluid_optimizer_SGDOptimizer` + + +2.Momentum/MomentumOptimizer +---------------------------- + +:code:`Momentum` 优化器在 :code:`SGD` 基础上引入动量,减少了随机梯度下降过程中存在的噪声问题。 +用户在使用时可以将 :code:`ues_nesterov` 参数设置为False或True,分别对应传统 `Momentum(论文4.1节) +`_ 算法和 `Nesterov accelerated gradient(论文4.2节) +`_ 算法。 + +API Reference 请参考 :ref:`cn_api_fluid_optimizer_MomentumOptimizer` + + +3. Adagrad/AdagradOptimizer +--------------------------- +`Adagrad `_ 优化器可以针对不同参数样本数不平均的问题,自适应地为各个参数分配不同的学习率。 + +API Reference 请参考 :ref:`cn_api_fluid_optimizer_AdagradOptimizer` + + +4.RMSPropOptimizer +------------------ +`RMSProp优化器 `_ ,是一种自适应调整学习率的方法, +主要解决使用Adagrad后,模型训练中后期学习率急剧下降的问题。 + +API Reference 请参考 :ref:`cn_api_fluid_optimizer_RMSPropOptimizer` + + + +5.Adam/AdamOptimizer +-------------------- +`Adam `_ 的优化器是一种自适应调整学习率的方法, +适用于大多非 `凸优化 `_ 、大数据集和高维空间的场景。在实际应用中,:code:`Adam` 是最为常用的一种优化方法。 + +API Reference 请参考 :ref:`cn_api_fluid_optimizer_AdamOptimizer` + + + +6.Adamax/AdamaxOptimizer +------------------------ + +`Adamax `_ 是 :code:`Adam` 算法的一个变体,对学习率的上限提供了一个更简单的范围,使学习率的边界范围更简单。 + +API Reference 请参考 :ref:`cn_api_fluid_optimizer_AdamaxOptimizer` + + + +7.DecayedAdagrad/ DecayedAdagradOptimizer +------------------------------------------- + +`DecayedAdagrad `_ 优化器,可以看做是引入了衰减速率的 :code:`Adagrad` 算法,解决使用Adagrad后,模型训练中后期学习率急剧下降的问题。 + +API Reference 请参考 :ref:`cn_api_fluid_optimizer_DecayedAdagrad` + + + + +8. Ftrl/FtrlOptimizer +---------------------- + +`FtrlOptimizer `_ 优化器结合了 `FOBOS算法 `_ 的高精度与 `RDA算法 +`_ 的稀疏性,是目前效果非常好的一种 `Online Learning `_ 算法。 + +API Reference 请参考 :ref:`cn_api_fluid_optimizer_FtrlOptimizer` + + + +9.ModelAverage +----------------- + +:code:`ModelAverage` 优化器,在训练中通过窗口来累计历史 parameter,在预测时使用取平均值后的paramet,整体提高预测的精度。 + +API Reference 请参考 :ref:`cn_api_fluid_optimizer_ModelAverage` + + diff --git a/doc/paddle/api_guides/low_level/optimizer_en.rst b/doc/paddle/api_guides/low_level/optimizer_en.rst new file mode 100755 index 0000000000000000000000000000000000000000..f135a297c5e5ce8ac88c7f69abafbded40496249 --- /dev/null +++ b/doc/paddle/api_guides/low_level/optimizer_en.rst @@ -0,0 +1,90 @@ +.. _api_guide_optimizer_en: + +########### +Optimizer +########### + +Neural network in essence is a `Optimization problem `_ . +With `forward computing and back propagation `_ , +:code:`Optimizer` use back-propagation gradients to optimize parameters in a neural network. + +1.SGD/SGDOptimizer +------------------ + +:code:`SGD` is an offspring class of :code:`Optimizer` implementing `Random Gradient Descent `_ which is a method of `Gradient Descent `_ . +When it needs to train a large number of samples, we usually choose :code:`SGD` to make loss function converge more quickly. + +API Reference: :ref:`api_fluid_optimizer_SGDOptimizer` + + +2.Momentum/MomentumOptimizer +---------------------------- + +:code:`Momentum` optimizer adds momentum on the basis of :code:`SGD` , reducing noise problem in the process of random gradient descent. +You can set :code:`ues_nesterov` as False or True, respectively corresponding to traditional `Momentum(Section 4.1 in thesis) +`_ algorithm and `Nesterov accelerated gradient(Section 4.2 in thesis) +`_ algorithm. + +API Reference: :ref:`api_fluid_optimizer_MomentumOptimizer` + + +3. Adagrad/AdagradOptimizer +--------------------------- +`Adagrad `_ Optimizer can adaptively allocate different learning rates for parameters to solve the problem of different sample sizes for different parameters. + +API Reference: :ref:`api_fluid_optimizer_AdagradOptimizer` + + +4.RMSPropOptimizer +------------------ +`RMSProp optimizer `_ is a method to adaptively adjust learning rate. +It mainly solves the problem of dramatic decrease of learning rate in the mid-term and end term of model training after Adagrad is used. + +API Reference: :ref:`api_fluid_optimizer_RMSPropOptimizer` + + + +5.Adam/AdamOptimizer +-------------------- +Optimizer of `Adam `_ is a method to adaptively adjust learning rate, +fit for most non- `convex optimization `_ , big data set and high-dimensional scenarios. :code:`Adam` is the most common optimization algorithm. + +API Reference: :ref:`api_fluid_optimizer_AdamOptimizer` + + + +6.Adamax/AdamaxOptimizer +------------------------ + +`Adamax `_ is a variant of :code:`Adam` algorithm, simplifying limits of learning rate, especially upper limit. + +API Reference: :ref:`api_fluid_optimizer_AdamaxOptimizer` + + + +7.DecayedAdagrad/DecayedAdagradOptimizer +------------------------------------------- + +`DecayedAdagrad `_ Optimizer can be regarded as an :code:`Adagrad` algorithm incorporated with decay rate to solve the problem of dramatic descent of learning rate in mid-term and end term of model training. + +API Reference: :ref:`api_fluid_optimizer_DecayedAdagrad` + + + + +8. Ftrl/FtrlOptimizer +---------------------- + +`FtrlOptimizer `_ Optimizer combines the high accuracy of `FOBOS algorithm `_ and the sparsity of `RDA algorithm `_ , which is an `Online Learning `_ algorithm with significantly satisfying effect. + +API Reference: :ref:`api_fluid_optimizer_FtrlOptimizer` + + + +9.ModelAverage +----------------- + +:code:`ModelAverage` Optimizer accumulates history parameters through sliding window during the model training. We use averaged parameters at inference time to upgrade general accuracy of inference. + +API Reference: :ref:`api_fluid_optimizer_ModelAverage` + diff --git a/doc/paddle/api_guides/low_level/parallel_executor.rst b/doc/paddle/api_guides/low_level/parallel_executor.rst new file mode 100644 index 0000000000000000000000000000000000000000..f2ae56c032480d474d1ec263451a5971831f1343 --- /dev/null +++ b/doc/paddle/api_guides/low_level/parallel_executor.rst @@ -0,0 +1,67 @@ +.. _api_guide_parallel_executor: + +##### +数据并行执行引擎 +##### + + +:code:`ParallelExecutor` 是以数据并行的方式在多个节点上分别执行 :code:`Program` 的执行器。用户可以通过Python脚本驱动 :code:`ParallelExecutor` 执行, :code:`ParallelExecutor` 的执行过程: + +- 首先根据 :code:`Program` 、 :code:`GPU` 卡的数目(或者 :code:`CPU` 的核数)以及 :ref:`cn_api_fluid_BuildStrategy` 构建 :code:`SSA Graph` 和一个线程池; +- 执行过程中,根据Op的输入是否Ready决定是否执行该Op,这样可以使没有相互依赖的多个Op可在线程池中并行执行; + +:code:`ParallelExecutor` 在构造时需要指定当前 :code:`Program` 的设备类型, :code:`GPU` 或者 :code:`CPU` : + +* 使用 :code:`GPU` 执行: :code:`ParallelExecutor` 会自动检测当前机器可以使用 :code:`GPU` 的个数,并在每个 :code:`GPU` 上分别执行 :code:`Program` ,用户也可以通过设置 :code:`CUDA_VISIBLE_DEVICES` 环境变量来指定执行器可使用的 :code:`GPU` ; +* 使用 :code:`CPU` 多线程执行::code:`ParallelExecutor` 会自动检测当前机器可利用的 :code:`CPU` 核数,并将 :code:`CPU` 核数作为执行器中线程的个数,每个线程分别执行 :code:`Program` ,用户也可以通过设置 :code:`CPU_NUM` 环境变量来指定当前训练使用的线程个数。 + +:code:`ParallelExecutor` 支持模型训练和模型预测: + +* 模型训练: :code:`ParallelExecutor` 在执行过程中对多个节点上的参数梯度进行聚合,然后进行参数的更新; +* 模型预测: :code:`ParallelExecutor` 在执行过程中各个节点独立运行当前的 :code:`Program` ; + +:code:`ParallelExecutor` 在模型训练时支持两种模式的梯度聚合, :code:`AllReduce` 和 :code:`Reduce` : + +* :code:`AllReduce` 模式下, :code:`ParallelExecutor` 调用AllReduce操作使多个节点上参数梯度完全相等,然后各个节点独立进行参数的更新; +* :code:`Reduce` 模式下, :code:`ParallelExecutor` 会预先将所有参数的更新分派到不同的节点上,在执行过程中 :code:`ParallelExecutor` 调用Reduce操作将参数梯度在预先指定的节点上进行聚合,并进行参数更新,最后调用Broadcast操作将更新后的参数发送到其他节点。 + +这两种模式通过 :code:`build_strategy` 来指定,使用方法,请参考 :ref:`cn_api_fluid_BuildStrategy` 。 + +**注意** :如果在Reduce模式下使用 :code:`CPU` 多线程执行 :code:`Program` , :code:`Program` 的参数在多个线程间是共享的,在某些模型上,Reduce模式可以大幅节省内存。 + +鉴于模型的执行速率和模型结构及执行器的执行策略有关,:code:`ParallelExecutor` 允许你修改执行器的相关参数,例如线程池的规模( :code:`num_threads` )、为清除临时变量 :code:`num_iteration_per_drop_scope` 需要进行的循环次数。更多信息请参照 :ref:`cn_api_fluid_ExecutionStrategy` 。 + + +.. code-block:: python + + # 注释: + # - 如果你想在ParallelExecutor中指定用于运行的GPU卡,需要在环境中定义 + # CUDA_VISIBLE_DEVICES + # - 如果你想在ParallelExecutor中使用多CPU来运行程序,需要在环境中定义 + # CPU_NUM + # 首先创建Executor。 + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + # 运行启动程序仅一次。 + exe.run(fluid.default_startup_program()) + # 定义train_exe和test_exe + exec_strategy = fluid.ExecutionStrategy() + exec_strategy.num_threads = dev_count * 4 # the size of thread pool. + build_strategy = fluid.BuildStrategy() + build_strategy.memory_optimize = True if memory_opt else False + train_exe = fluid.ParallelExecutor(use_cuda=use_cuda, + main_program=train_program, + build_strategy=build_strategy, + exec_strategy=exec_strategy, + loss_name=loss.name) + # 注释:对于test_exe,loss_name是不必要的。 + test_exe = fluid.ParallelExecutor(use_cuda=True, + main_program=test_program, + build_strategy=build_strategy, + exec_strategy=exec_strategy, + share_vars_from=train_exe) + train_loss, = train_exe.run(fetch_list=[loss.name], feed=feed_dict) + test_loss, = test_exe.run(fetch_list=[loss.name], feed=feed_dict) +- 相关API : + - :ref:`cn_api_fluid_ParallelExecutor` + - :ref:`cn_api_fluid_BuildStrategy` diff --git a/doc/paddle/api_guides/low_level/parallel_executor_en.rst b/doc/paddle/api_guides/low_level/parallel_executor_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..faad6688300667c76a90e9dc0f26012eb328dc9d --- /dev/null +++ b/doc/paddle/api_guides/low_level/parallel_executor_en.rst @@ -0,0 +1,76 @@ +.. _api_guide_parallel_executor_en: + +############################## +Parallel Executor +############################## + +:code:`ParallelExecutor` is an upgraded version of Executor, in addition, it supports model training of :code:`Program` in parallel with data. Users can use the Python script to run :code:`ParallelExecutor`. The execution process of :code:`ParallelExecutor` is as follows: + +- First it builds :code:`SSA Graph` and a thread pool based on :code:`Program`, the number of :code:`GPU` cards (or :code:`CPU` cores) and :ref:`api_fluid_BuildStrategy` ; +- During execution, it executes the Op depending on whether the input of Op is ready, so that multiple Ops that do not depend on each other can be executed in parallel in the thread pool; + +When constructing :code:`ParallelExecutor`, you need to specify the device type of the current :code:`Program`, namely :code:`GPU` or :code:`CPU` : + +* execution on :code:`GPU` : :code:`ParallelExecutor` will automatically detect the number of currently available :code:`GPU` s, and execute :code:`Program` on each :code:`GPU` . The user can also specify the :code:`GPU` that the executor can use by setting the :code:`CUDA_VISIBLE_DEVICES` environment variable; +* execution on multi-threaded :code:`CPU` : :code:`ParallelExecutor` will automatically detect the number of currently available :code:`CPU` s, and take it as the number of threads in the executor . Each thread executes :code:`Program` separately. The user can also specify the number of threads currently used for training by setting the :code:`CPU_NUM` environment variable. + +:code:`ParallelExecutor` supports model training and model prediction: + +* Model training: :code:`ParallelExecutor` aggregates the parameter gradients on multiple nodes during the execution process, and then updates the parameters; +* Model prediction: during the execution of :code:`ParallelExecutor`, each node runs the current :code:`Program` independently; + +:code:`ParallelExecutor` supports two modes of gradient aggregation during model training, :code:`AllReduce` and :code:`Reduce` : + +* In :code:`AllReduce` mode, :code:`ParallelExecutor` calls AllReduce operation to make the parameter gradients on multiple nodes completely equal, and then each node independently updates the parameters; +* In :code:`Reduce` mode, :code:`ParallelExecutor` will pre-allocate updates of all parameters to different nodes. During the execution :code:`ParallelExecutor` calls Reduce operation to aggregate parameter gradients on the pre-specified node, and the parameters are updated. Finally, the Broadcast operation is called to send the updated parameters to other nodes. + +These two modes are specified by :code:`build_strategy`. For how to use them, please refer to :ref:`api_fluid_BuildStrategy` . + +**Note**: If you use :code:`CPU` to execute :code:`Program` in multi-thread in Reduce mode, the parameters of :code:`Program` will be shared among multiple threads. On some models , Reduce mode can save a lot of memory. + +Since the execution speed of the model is related to the model structure and the execution strategy of the executor, :code:`ParallelExecutor` allows you to modify the relevant parameters of the executor, such as the size of thread pool ( :code:`num_threads` ), how many iterations should be done to clean up temporary variables :code:`num_iteration_per_drop_scope` . For more information, please refer to :ref:`api_fluid_ExecutionStrategy`. + + +.. code-block:: python + + # Note: + # - If you want to specify the GPU cards which are used to run + # in ParallelExecutor, you should define the CUDA_VISIBLE_DEVICES + # in environment. + # - If you want to use multi CPU to run the program in ParallelExecutor, + # you should define the CPU_NUM in the environment. + + # First create the Executor. + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + + # Run the startup program once and only once. + exe.run(fluid.default_startup_program()) + + # Define train_exe and test_exe + exec_strategy = fluid.ExecutionStrategy() + exec_strategy.num_threads = dev_count * 4 # the size of thread pool. + build_strategy = fluid.BuildStrategy() + build_strategy.memory_optimize = True if memory_opt else False + + train_exe = fluid.ParallelExecutor(use_cuda=use_cuda, + main_program=train_program, + build_strategy=build_strategy, + exec_strategy=exec_strategy, + loss_name=loss.name) + # NOTE: loss_name is unnecessary for test_exe. + test_exe = fluid.ParallelExecutor(use_cuda=True, + main_program=test_program, + build_strategy=build_strategy, + exec_strategy=exec_strategy, + share_vars_from=train_exe) + + train_loss, = train_exe.run(fetch_list=[loss.name], feed=feed_dict) + test_loss, = test_exe.run(fetch_list=[loss.name], feed=feed_dict) + +**Note**: :code:`fluid.Executor` and :code:`fluid.ParallelExecutor` are two completely different executors. First of all, their execution objects are different. The execution object of :code:`fluid.Executor` is :code:`fluid.Program` and the execution object of :code:`fluid.ParallelExecutor` is Graph. Secondly, their execution schedules are different. :code:`fluid.Executor` runs one by one according to the order of operators in :code:`fluid.Program`. :code:`fluid.ParallelExecutor` executes concurrently according to the dependencies between nodes in Graph. + +- Related API : + - :ref:`api_fluid_ParallelExecutor` + - :ref:`api_fluid_BuildStrategy` + - :ref:`api_fluid_ExecutionStrategy` diff --git a/doc/paddle/api_guides/low_level/parameter.rst b/doc/paddle/api_guides/low_level/parameter.rst new file mode 100644 index 0000000000000000000000000000000000000000..af7b63f1fd6dba5dcbdf60181a379398cef9542b --- /dev/null +++ b/doc/paddle/api_guides/low_level/parameter.rst @@ -0,0 +1,167 @@ +.. _api_guide_parameter: + +######### +模型参数 +######### + +模型参数为模型中的weight和bias统称,在fluid中对应fluid.Parameter类,继承自fluid.Variable,是一种可持久化的variable。模型的训练就是不断学习更新模型参数的过程。模型参数相关的属性可以通过 :ref:`cn_api_fluid_ParamAttr` 来配置,可配置内容有: + +- 初始化方式 +- 正则化 +- 梯度剪切 +- 模型平均 + +初始化方式 +================= + +fluid通过设置 :code:`ParamAttr` 的 :code:`initializer` 属性为单个parameter设置初始化方式。 +示例如下: + + .. code-block:: python + + param_attrs = fluid.ParamAttr(name="fc_weight", + initializer=fluid.initializer.ConstantInitializer(1.0)) + y_predict = fluid.layers.fc(input=x, size=10, param_attr=param_attrs) + + +以下为fluid支持的初始化方式: + +1. BilinearInitializer +----------------------- + +线性初始化方法。用该方法初始化的反卷积操作可当做线性插值操作使用。 + +可用别名:Bilinear + +API请参考::ref:`cn_api_fluid_initializer_BilinearInitializer` + +2. ConstantInitializer +---------------------- + +常数初始化方式,将parameter初始化为指定的数值。 + +可用别名:Constant + +API请参考::ref:`cn_api_fluid_initializer_ConstantInitializer` + +3. MSRAInitializer +------------------ + +该初始化方法参考论文: https://arxiv.org/abs/1502.01852 + +可用别名:MSRA + +API请参考::ref:`cn_api_fluid_initializer_MSRAInitializer` + +4. NormalInitializer +--------------------- + +随机高斯分布初始化方法。 + +可用别名:Normal + +API请参考::ref:`cn_api_fluid_initializer_NormalInitializer` + +5. TruncatedNormalInitializer +----------------------------- + +随机截断高斯分布初始化方法。 + +可用别名:TruncatedNormal + +API请参考::ref:`cn_api_fluid_initializer_TruncatedNormalInitializer` + +6. UniformInitializer +-------------------- + +随机均匀分布初始化方式。 + +可用别名:Uniform + +API请参考::ref:`cn_api_fluid_initializer_UniformInitializer` + +7. XavierInitializer +-------------------- + +该初始化方式参考论文: http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf + +可用别名:Xavier + +API请参考::ref:`cn_api_fluid_initializer_XavierInitializer` + +正则化方式 +============= + +fluid通过设置 :code:`ParamAttr` 的 :code:`regularizer` 属性为单个parameter设置正则化。 + + .. code-block:: python + + param_attrs = fluid.ParamAttr(name="fc_weight", + regularizer=fluid.regularizer.L1DecayRegularizer(0.1)) + y_predict = fluid.layers.fc(input=x, size=10, param_attr=param_attrs) + +以下为fluid支持的正则化方式: + +- :ref:`cn_api_fluid_regularizer_L1DecayRegularizer` (别名:L1Decay) +- :ref:`cn_api_fluid_regularizer_L2DecayRegularizer` (别名:L2Decay) + +Clipping +========== + +fluid通过设置 :code:`ParamAttr` 的 :code:`gradient_clip` 属性为单个parameter设置clipping方式。 + + .. code-block:: python + + param_attrs = fluid.ParamAttr(name="fc_weight", + regularizer=fluid.regularizer.L1DecayRegularizer(0.1)) + y_predict = fluid.layers.fc(input=x, size=10, param_attr=param_attrs) + + +以下为fluid支持的clipping方式: + +1. ErrorClipByValue +------------------- + +用来将一个tensor的值clipping到指定范围。 + +API请参考::ref:`cn_api_fluid_clip_ErrorClipByValue` + +2. GradientClipByGlobalNorm +--------------------------- + +用来将多个Tensor的global-norm限制在 :code:`clip_norm` 以内。 + +API请参考::ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` + +3. GradientClipByNorm +--------------------- + +将Tensor的l2-norm限制在 :code:`max_norm` 以内。如果Tensor的l2-norm超过了 :code:`max_norm` , +会将计算出一个 :code:`scale` ,该Tensor的所有值乘上计算出来的 :code:`scale` . + +API请参考::ref:`cn_api_fluid_clip_GradientClipByNorm` + +4. GradientClipByValue +---------------------- + +将parameter对应的gradient的值限制在[min, max]范围内。 + +API请参考::ref:`cn_api_fluid_clip_GradientClipByValue` + +模型平均 +======== + +fluid通过 :code:`ParamAttr` 的 :code:`do_model_average` 属性设置单个parameter是否进行平均优化。 +示例如下: + + .. code-block:: python + + param_attrs = fluid.ParamAttr(name="fc_weight", + do_model_average=true) + y_predict = fluid.layers.fc(input=x, size=10, param_attr=param_attrs) + +在miniBatch训练过程中,每个batch过后,都会更新一次parameters,模型平均做的就是平均最近k次更新产生的parameters。 + +平均后的parameters只是被用来进行测试和预测,其并不参与实际的训练过程。 + +具体API请参考::ref:`cn_api_fluid_optimizer_ModelAverage` diff --git a/doc/paddle/api_guides/low_level/parameter_en.rst b/doc/paddle/api_guides/low_level/parameter_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..fe46687ce8cf23aa7e628fdedad0c9b8b11e2e46 --- /dev/null +++ b/doc/paddle/api_guides/low_level/parameter_en.rst @@ -0,0 +1,175 @@ +.. _api_guide_parameter_en: + +################## +Model Parameters +################## + +Model parameters are weights and biases in a model. In fluid, they are instances of ``fluid.Parameter`` class which is inherited from fluid, and they are all persistable variables. Model training is a process of learning and updating model parameters. The attributes related to model parameters can be configured by :ref:`api_fluid_ParamAttr` . The configurable contents are as follows: + + +- Initialization method + +- Regularization + +- gradient clipping + +- Model Average + + + +Initialization method +======================== + +Fluid initializes a single parameter by setting attributes of :code:`initializer` in :code:`ParamAttr` . + +examples: + + .. code-block:: python + + param_attrs = fluid.ParamAttr(name="fc_weight", + initializer=fluid.initializer.ConstantInitializer(1.0)) + y_predict = fluid.layers.fc(input=x, size=10, param_attr=param_attrs) + + + +The following is the initialization method supported by fluid: + +1. BilinearInitializer +----------------------- + +Linear initialization. The deconvolution operation initialized by this method can be used as a linear interpolation operation. + +Alias:Bilinear + +API reference: :ref:`api_fluid_initializer_BilinearInitializer` + +2. ConstantInitializer +-------------------------- + +Constant initialization. Initialize the parameter to the specified value. + +Alias:Constant + +API reference: :ref:`api_fluid_initializer_ConstantInitializer` + +3. MSRAInitializer +---------------------- + +Please refer to https://arxiv.org/abs/1502.01852 for initialization. + +Alias:MSRA + +API reference: :ref:`api_fluid_initializer_MSRAInitializer` + +4. NormalInitializer +------------------------- + +Initialization method of random Gaussian distribution. + +Alias:Normal + +API reference: :ref:`api_fluid_initializer_NormalInitializer` + +5. TruncatedNormalInitializer +--------------------------------- + +Initialization method of stochastic truncated Gauss distribution. + +Alias:TruncatedNormal + +API reference: :ref:`api_fluid_initializer_TruncatedNormalInitializer` + +6. UniformInitializer +------------------------ + +Initialization method of random uniform distribution. + +Alias:Uniform + +API reference: :ref:`api_fluid_initializer_UniformInitializer` + +7. XavierInitializer +------------------------ + +Please refer to http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf for initialization. + +Alias:Xavier + +API reference: :ref:`api_fluid_initializer_XavierInitializer` + +Regularization +================= + +Fluid regularizes a single parameter by setting attributes of :code:`regularizer` in :code:`ParamAttr` . + + .. code-block:: python + + param_attrs = fluid.ParamAttr(name="fc_weight", + regularizer=fluid.regularizer.L1DecayRegularizer(0.1)) + y_predict = fluid.layers.fc(input=x, size=10, param_attr=param_attrs) + +The following is the regularization approach supported by fluid: + +- :ref:`api_fluid_regularizer_L1DecayRegularizer` (Alias:L1Decay) +- :ref:`api_fluid_regularizer_L2DecayRegularizer` (Alias:L2Decay) + +Clipping +========== + +Fluid sets clipping method for a single parameter by setting attributes of :code:`gradient_clip` in :code:`ParamAttr` . + + .. code-block:: python + + param_attrs = fluid.ParamAttr(name="fc_weight", + regularizer=fluid.regularizer.L1DecayRegularizer(0.1)) + y_predict = fluid.layers.fc(input=x, size=10, param_attr=param_attrs) + + + +The following is the clipping method supported by fluid: + +1. ErrorClipByValue +---------------------- + +Used to clipping the value of a tensor to a specified range. + +API reference: :ref:`api_fluid_clip_ErrorClipByValue` + +2. GradientClipByGlobalNorm +------------------------------ + +Used to limit the global-norm of multiple Tensors to :code:`clip_norm`. + +API reference: :ref:`api_fluid_clip_GradientClipByGlobalNorm` + +3. GradientClipByNorm +------------------------ +Limit the L2-norm of Tensor to :code:`max_norm` . If Tensor's L2-norm exceeds: :code:`max_norm` , +it will calculate a :code:`scale` . And then all values of the Tensor multiply the :code:`scale` . + +API reference: :ref:`api_fluid_clip_GradientClipByNorm` + +4. GradientClipByValue +------------------------- + +Limit the value of the gradient on a parameter to [min, max]. + +API reference: :ref:`api_fluid_clip_GradientClipByValue` + +Model Averaging +================ + +Fluid determines whether to average a single parameter by setting attributes of :code:`do_model_average` in :code:`ParamAttr` . +Examples: + + .. code-block:: python + + param_attrs = fluid.ParamAttr(name="fc_weight", + do_model_average=true) + y_predict = fluid.layers.fc(input=x, size=10, param_attr=param_attrs) + +In the miniBatch training process, parameters will be updated once after each batch, and the average model averages the parameters generated by the latest K updates. + +The averaged parameters are only used for testing and prediction, and they do not get involved in the actual training process. + +API reference :ref:`api_fluid_optimizer_ModelAverage` diff --git a/doc/paddle/api_guides/low_level/program.rst b/doc/paddle/api_guides/low_level/program.rst new file mode 100644 index 0000000000000000000000000000000000000000..f51b7468a339721ec95fdfb77507f10a08e9d5e3 --- /dev/null +++ b/doc/paddle/api_guides/low_level/program.rst @@ -0,0 +1,145 @@ +.. _api_guide_Program: + +######### +基础概念 +######### + +================== +Program +================== + +:code:`Fluid` 中使用类似于编程语言的抽象语法树的形式描述用户的神经网络配置,用户对计算的描述都将写入一段Program。Fluid 中的 Program 替代了传统框架中模型的概念,通过对顺序执行、条件选择和循环执行三种执行结构的支持,做到对任意复杂模型的描述。书写 :code:`Program` 的过程非常接近于写一段通用程序,如果您已经具有一定的编程经验,会很自然地将自己的知识迁移过来。 + + +总得来说: + +* 一个模型是一个 Fluid :code:`Program` ,一个模型可以含有多于一个 :code:`Program` ; + +* :code:`Program` 由嵌套的 :code:`Block` 构成,:code:`Block` 的概念可以类比到 C++ 或是 Java 中的一对大括号,或是 Python 语言中的一个缩进块; + +* :code:`Block` 中的计算由顺序执行、条件选择或者循环执行三种方式组合,构成复杂的计算逻辑; + +* :code:`Block` 中包含对计算和计算对象的描述。计算的描述称之为 Operator;计算作用的对象(或者说 Operator 的输入和输出)被统一为 Tensor,在Fluid中,Tensor 用层级为0的 :ref:`Lod_Tensor ` 表示。 + + + +.. _api_guide_Block: + +========= +Block +========= + +:code:`Block` 是高级语言中变量作用域的概念,在编程语言中,Block是一对大括号,其中包含局部变量定义和一系列指令或操作符。编程语言中的控制流结构 :code:`if-else` 和 :code:`for` 在深度学习中可以被等效为: + ++----------------------+-------------------------+ +| 编程语言 | Fluid | ++======================+=========================+ +| for, while loop | RNN,WhileOP | ++----------------------+-------------------------+ +| if-else, switch | IfElseOp, SwitchOp | ++----------------------+-------------------------+ +| 顺序执行 | 一系列 layers | ++----------------------+-------------------------+ + +如上文所说,Fluid 中的 :code:`Block` 描述了一组以顺序、选择或是循环执行的 Operator 以及 Operator 操作的对象:Tensor。 + + + + +============= +Operator +============= + +在 Fluid 中,所有对数据的操作都由 :code:`Operator` 表示,为了便于用户使用,在 Python 端,Fluid 中的 :code:`Operator` 被一步封装入 :code:`paddle.fluid.layers` , :code:`paddle.fluid.nets` 等模块。 + +这是因为一些常见的对 Tensor 的操作可能是由更多基础操作构成,为了提高使用的便利性,框架内部对基础 Operator 进行了一些封装,包括创建 Operator 依赖可学习参数,可学习参数的初始化细节等,减少用户重复开发的成本。 + + +更多内容可参考阅读 `Fluid设计思想 <../../advanced_usage/design_idea/fluid_design_idea.html>`_ + +.. _api_guide_Variable: + +========= +Variable +========= + +Fluid 中的 :code:`Variable` 可以包含任何类型的值———在大多数情况下是一个 :ref:`Lod_Tensor ` 。 + +模型中所有的可学习参数都以 :code:`Variable` 的形式保留在内存空间中,您在绝大多数情况下都不需要自己来创建网络中的可学习参数, Fluid 为几乎常见的神经网络基本计算模块都提供了封装。以最简单的全连接模型为例,调用 :code:`fluid.layers.fc` 会直接为全连接层创建连接权值( W )和偏置( bias )两个可学习参数,无需显示地调用 :code:`variable` 相关接口创建可学习参数。 + +.. _api_guide_Name: + +========= +Name +========= + +Fluid 中部分网络层里包含了 :code:`name` 参数,如 :ref:`cn_api_fluid_layers_fc` 。此 :code:`name` 一般用来作为网络层输出、权重的前缀标识,具体规则如下: + +* 用于网络层输出的前缀标识。若网络层中指定了 :code:`name` 参数,Fluid 将以 ``name值.tmp_数字`` 作为唯一标识对网络层输出进行命名;未指定 :code:`name` 参数时,则以 ``OP名_数字.tmp_数字`` 的方式进行命名,其中的数字会自动递增,以区分同名OP下的不同网络层。 + +* 用于权重或偏置变量的前缀标识。若在网络层中通过 ``param_attr`` 和 ``bias_attr`` 创建了权重变量或偏置变量, 如 :ref:`cn_api_fluid_layers_embedding` 、 :ref:`cn_api_fluid_layers_fc` ,则 Fluid 会自动生成 ``前缀.w_数字`` 或 ``前缀.b_数字`` 的唯一标识对其进行命名,其中 ``前缀`` 为用户指定的 :code:`name` 或自动生成的 ``OP名_数字`` 。若在 ``param_attr`` 和 ``bias_attr`` 中指定了 :code:`name` ,则用此 :code:`name` ,不再自动生成。细节请参考示例代码。 + +此外,在 :ref:`cn_api_fluid_ParamAttr` 中,可通过指定 :code:`name` 参数实现多个网络层的权重共享。 + +示例代码如下: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + x = fluid.layers.data(name='x', shape=[1], dtype='int64', lod_level=1) + emb = fluid.layers.embedding(input=x, size=(128, 100)) # embedding_0.w_0 + emb = fluid.layers.Print(emb) # Tensor[embedding_0.tmp_0] + + # default name + fc_none = fluid.layers.fc(input=emb, size=1) # fc_0.w_0, fc_0.b_0 + fc_none = fluid.layers.Print(fc_none) # Tensor[fc_0.tmp_1] + + fc_none1 = fluid.layers.fc(input=emb, size=1) # fc_1.w_0, fc_1.b_0 + fc_none1 = fluid.layers.Print(fc_none1) # Tensor[fc_1.tmp_1] + + # name in ParamAttr + w_param_attrs = fluid.ParamAttr(name="fc_weight", learning_rate=0.5, trainable=True) + print(w_param_attrs.name) # fc_weight + + # name == 'my_fc' + my_fc1 = fluid.layers.fc(input=emb, size=1, name='my_fc', param_attr=w_param_attrs) # fc_weight, my_fc.b_0 + my_fc1 = fluid.layers.Print(my_fc1) # Tensor[my_fc.tmp_1] + + my_fc2 = fluid.layers.fc(input=emb, size=1, name='my_fc', param_attr=w_param_attrs) # fc_weight, my_fc.b_1 + my_fc2 = fluid.layers.Print(my_fc2) # Tensor[my_fc.tmp_3] + + place = fluid.CPUPlace() + x_data = np.array([[1],[2],[3]]).astype("int64") + x_lodTensor = fluid.create_lod_tensor(x_data, [[1, 2]], place) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + ret = exe.run(feed={'x': x_lodTensor}, fetch_list=[fc_none, fc_none1, my_fc1, my_fc2], return_numpy=False) + + +上述示例中, ``fc_none`` 和 ``fc_none1`` 均未指定 :code:`name` 参数,则以 ``OP名_数字.tmp_数字`` 分别对该OP输出进行命名:``fc_0.tmp_1`` 和 ``fc_1.tmp_1`` ,其中 ``fc_0`` 和 ``fc_1`` 中的数字自动递增以区分两个全连接层; ``my_fc1`` 和 ``my_fc2`` 均指定了 :code:`name` 参数,但取值相同,Fluid 以后缀 ``tmp_数字`` 进行区分,即 ``my_fc.tmp_1`` 和 ``my_fc.tmp_3`` 。 + +对于网络层中创建的变量, ``emb`` 层和 ``fc_none`` 、 ``fc_none1`` 层均默认以 ``OP名_数字`` 为前缀对权重或偏置变量进行命名,如 ``embedding_0.w_0`` 、 ``fc_0.w_0`` 、 ``fc_0.b_0`` ,其前缀与OP输出的前缀一致。 ``my_fc1`` 层和 ``my_fc2`` 层则优先以 ``ParamAttr`` 中指定的 ``fc_weight`` 作为共享权重的名称。而偏置变量 ``my_fc.b_0`` 和 ``my_fc.b_1`` 则次优地以 :code:`name` 作为前缀标识。 + +在上述示例中,``my_fc1`` 和 ``my_fc2`` 两个全连接层通过构建 ``ParamAttr`` ,并指定 :code:`name` 参数,实现了网络层权重变量的共享机制。 + +.. _api_guide_ParamAttr: + +========= +ParamAttr +========= + +========= +相关API +========= + +* 用户配置的单个神经网络叫做 :ref:`cn_api_fluid_Program` 。值得注意的是,训练神经网 + 络时,用户经常需要配置和操作多个 :code:`Program` 。比如参数初始化的 + :code:`Program` , 训练用的 :code:`Program` ,测试用的 + :code:`Program` 等等。 + + +* 用户还可以使用 :ref:`cn_api_fluid_program_guard` 配合 :code:`with` 语句,修改配置好的 :ref:`cn_api_fluid_default_startup_program` 和 :ref:`cn_api_fluid_default_main_program` 。 + +* 在Fluid中,Block内部执行顺序由控制流决定,如 :ref:`cn_api_fluid_layers_IfElse` , :ref:`cn_api_fluid_layers_While`, :ref:`cn_api_fluid_layers_Switch` 等,更多内容可参考: :ref:`api_guide_control_flow` diff --git a/doc/paddle/api_guides/low_level/program_en.rst b/doc/paddle/api_guides/low_level/program_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..3a380229560796e4d9347211196ed58969c8f45f --- /dev/null +++ b/doc/paddle/api_guides/low_level/program_en.rst @@ -0,0 +1,143 @@ +.. _api_guide_Program_en: + +############### +Basic Concept +############### + +================== +Program +================== + +:code:`Fluid` describes neural network configuration in the form of abstract grammar tree similar to that of a programming language, and the user's description of computation will be written into a Program. Program in Fluid replaces the concept of models in traditional frameworks. It can describe any complex model through three execution structures: sequential execution, conditional selection and loop execution. Writing :code:`Program` is very close to writing a common program. If you have tried programming before, you will naturally apply your expertise to it. + +In brief: + +* A model is a Fluid :code:`Program` and can contain more than one :code:`Program` ; + +* :code:`Program` consists of nested :code:`Block` , and the concept of :code:`Block` can be analogized to a pair of braces in C++ or Java, or an indentation block in Python. + + +* Computing in :code:`Block` is composed of three ways: sequential execution, conditional selection or loop execution, which constitutes complex computational logic. + + +* :code:`Block` contains descriptions of computation and computational objects. The description of computation is called Operator; the object of computation (or the input and output of Operator) is unified as Tensor. In Fluid, Tensor is represented by 0-leveled `LoD-Tensor `_ . + +.. _api_guide_Block_en: + +========= +Block +========= + +:code:`Block` is the concept of variable scope in advanced languages. In programming languages, Block is a pair of braces, which contains local variable definitions and a series of instructions or operators. Control flow structures :code:`if-else` and :code:`for` in programming languages can be equivalent to the following counterparts in deep learning: + ++----------------------+-------------------------+ +| programming languages| Fluid | ++======================+=========================+ +| for, while loop | RNN,WhileOP | ++----------------------+-------------------------+ +| if-else, switch | IfElseOp, SwitchOp | ++----------------------+-------------------------+ +| execute sequentially | a series of layers | ++----------------------+-------------------------+ + +As mentioned above, :code:`Block` in Fluid describes a set of Operators that include sequential execution, conditional selection or loop execution, and the operating object of Operator: Tensor. + + + +============= +Operator +============= + +In Fluid, all operations of data are represented by :code:`Operator` . In Python, :code:`Operator` in Fluid is encapsulated into modules like :code:`paddle.fluid.layers` , :code:`paddle.fluid.nets` . + +This is because some common operations on Tensor may consist of more basic operations. For simplicity, some encapsulation of the basic Operator is carried out inside the framework, including the creation of learnable parameters relied by an Operator, the initialization details of learnable parameters, and so on, so as to reduce the cost of further development. + + + +More information can be read for reference. `Fluid Design Idea <../../advanced_usage/design_idea/fluid_design_idea.html>`_ + +.. _api_guide_Variable_en: + +========= +Variable +========= + +In Fluid, :code:`Variable` can contain any type of value -- in most cases a LoD-Tensor. + +All the learnable parameters in the model are kept in the memory space in form of :code:`Variable` . In most cases, you do not need to create the learnable parameters in the network by yourself. Fluid provides encapsulation for almost common basic computing modules of the neural network. Taking the simplest full connection model as an example, calling :code:`fluid.layers.fc` directly creates two learnable parameters for the full connection layer, namely, connection weight (W) and bias, without explicitly calling :code:`Variable` related interfaces to create learnable parameters. + +.. _api_guide_Name: + +========= +Name +========= + +In Fluid, some layers contain the parameter :code:`name` , such as :ref:`api_fluid_layers_fc` . This :code:`name` is generally used as the prefix identification of output and weight in network layers. The specific rules are as follows: + +* Prefix identification for output of layers. If :code:`name` is specified in the layer, Fluid will name the output with ``nameValue.tmp_number`` . If the :code:`name` is not specified, ``OPName_number.tmp_number`` is automatically generated to name the layer. The numbers are automatically incremented to distinguish different network layers under the same operator. + +* Prefix identification for weight or bias variable. If the weight and bias variables are created by ``param_attr`` and ``bias_attr`` in operator, such as :ref:`api_fluid_layers_embedding` 、 :ref:`api_fluid_layers_fc` , Fluid will generate ``prefix.w_number`` or ``prefix.b_number`` as unique identifier to name them, where the ``prefix`` is :code:`name` specified by users or ``OPName_number`` generated by default. If :code:`name` is specified in ``param_attr`` and ``bias_attr`` , the :code:`name` is no longer generated automatically. Refer to the sample code for details. + +In addition, the weights of multiple network layers can be shared by specifying the :code:`name` parameter in :ref:`api_fluid_ParamAttr`. + +Sample Code: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + x = fluid.layers.data(name='x', shape=[1], dtype='int64', lod_level=1) + emb = fluid.layers.embedding(input=x, size=(128, 100)) # embedding_0.w_0 + emb = fluid.layers.Print(emb) # Tensor[embedding_0.tmp_0] + + # default name + fc_none = fluid.layers.fc(input=emb, size=1) # fc_0.w_0, fc_0.b_0 + fc_none = fluid.layers.Print(fc_none) # Tensor[fc_0.tmp_1] + + fc_none1 = fluid.layers.fc(input=emb, size=1) # fc_1.w_0, fc_1.b_0 + fc_none1 = fluid.layers.Print(fc_none1) # Tensor[fc_1.tmp_1] + + # name in ParamAttr + w_param_attrs = fluid.ParamAttr(name="fc_weight", learning_rate=0.5, trainable=True) + print(w_param_attrs.name) # fc_weight + + # name == 'my_fc' + my_fc1 = fluid.layers.fc(input=emb, size=1, name='my_fc', param_attr=w_param_attrs) # fc_weight, my_fc.b_0 + my_fc1 = fluid.layers.Print(my_fc1) # Tensor[my_fc.tmp_1] + + my_fc2 = fluid.layers.fc(input=emb, size=1, name='my_fc', param_attr=w_param_attrs) # fc_weight, my_fc.b_1 + my_fc2 = fluid.layers.Print(my_fc2) # Tensor[my_fc.tmp_3] + + place = fluid.CPUPlace() + x_data = np.array([[1],[2],[3]]).astype("int64") + x_lodTensor = fluid.create_lod_tensor(x_data, [[1, 2]], place) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + ret = exe.run(feed={'x': x_lodTensor}, fetch_list=[fc_none, fc_none1, my_fc1, my_fc2], return_numpy=False) + + +In the above example, ``fc_none`` and ``fc_none1`` are not specified :code:`name` parameter, so this two layers are named with ``fc_0.tmp_1`` and ``fc_1.tmp_1`` in the form ``OPName_number.tmp_number`` , where the numbers in ``fc_0`` and ``fc_1`` are automatically incremented to distinguish this two fully connected layers. The other two fully connected layers ``my_fc1`` and ``my_fc2`` both specify the :code:`name` parameter with same values. Fluid will distinguish the two layers by suffix ``tmp_number`` . That is ``my_fc.tmp_1`` and ``my_fc.tmp_3`` . + +Variables created in ``emb`` layer and ``fc_none`` , ``fc_none1`` are named by the ``OPName_number`` , such as ``embedding_0.w_0`` 、 ``fc_0.w_0`` 、 ``fc_0.b_0`` . And the prefix is consistent with the prefix of network layer. The ``my_fc1`` layer and ``my_fc2`` layer preferentially name the shared weight with ``fc_weight`` specified in ``ParamAttr`` . The bias variables ``my_fc.b_0`` and ``my_fc.b_1`` are identified suboptimally with :code:`name` int the operator as prefix. + +In the above example, the ``my_fc1`` and ``my_fc2`` two fully connected layers implement the sharing of weight parameters by constructing ``ParamAttr`` and specifying the :code:`name` parameter. + +.. _api_guide_ParamAttr: + +========= +ParamAttr +========= + +================== +Related API +================== + + +* A single neural network configured by the user is called :ref:`api_fluid_Program` . It is noteworthy that when training neural networks, users often need to configure and operate multiple :code:`Program` . For example, :code:`Program` for parameter initialization, :code:`Program` for training, :code:`Program` for testing, etc. + + +* Users can also use :ref:`api_fluid_program_guard` with :code:`with` to modify the configured :ref:`api_fluid_default_startup_program` and :ref:`api_fluid_default_main_program` . + + +* In Fluid,the execution order in a Block is determined by control flow,such as :ref:`api_fluid_layers_IfElse` , :ref:`api_fluid_layers_While` and :ref:`api_fluid_layers_Switch` . For more information, please refer to: :ref:`api_guide_control_flow_en` diff --git a/doc/paddle/beginners_guide/basic_concept/broadcasting.rst b/doc/paddle/beginners_guide/basic_concept/broadcasting.rst new file mode 100644 index 0000000000000000000000000000000000000000..3afb091ae801a7f44e9311a54e2c5d2c142f43e9 --- /dev/null +++ b/doc/paddle/beginners_guide/basic_concept/broadcasting.rst @@ -0,0 +1,99 @@ +.. _cn_user_guide_broadcasting: + +========= +广播 (broadcasting) +========= + +飞桨(PaddlePaddle,以下简称Paddle)和其他框架一样,提供的一些API支持广播(broadcasting)机制,允许在一些运算时使用不同形状的张量。 +通常来讲,如果有一个形状较小和一个形状较大的张量,我们希望多次使用较小的张量来对较大的张量执行一些操作,看起来像是较小形状的张量的形状首先被扩展到和较大形状的张量一致,然后做运算。 +值得注意的是,这期间并没有对较小形状张量的数据拷贝操作。 + +飞桨的广播机制主要遵循如下规则;如果两个张量的形状遵循一下规则,我们认为这两个张量是可广播的(参考`Numpy 广播机制 `): + +1. 每个张量至少为一维张量 +2. 从后往前比较张量的形状,当前维度的大小要么相等,要么其中一个等于一,要么其中一个不存在 + +例如: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + + x = paddle.imperative.to_variable(np.ones((2,3,4), np.float32)) + y = paddle.imperative.to_variable(np.ones((2,3,4), np.float32)) + # 两个张量 形状一致,可以广播 + + x = paddle.imperative.to_variable(np.ones((2,3,1,5), np.float32)) + y = paddle.imperative.to_variable(np.ones((3,4,1), np.float32)) + # 从后向前依次比较: + # 第一次:y的维度大小是1 + # 第二次:x的维度大小是1 + # 第三次:x和y的维度大小相等 + # 第四次:y的维度不存在 + # 所以 x和y是可以广播的 + + # 相反 + x = paddle.imperative.to_variable(np.ones((2,3,4), np.float32)) + y = paddle.imperative.to_variable(np.ones((2,3,6), np.float32)) + # 此时x和y是不可广播的,因为第一次比较 4不等于6 + +现在我们知道什么情况下两个张量是可以广播的,两个张量进行广播语义后的结果张量的形状计算规则如下: + +1. 如果两个张量的形状的长度不一致,那么需要在较小形状长度的矩阵像前添加1,只到两个张量的形状长度相等。 +2. 保证两个张量形状相等之后,每个维度上的结果维度就是当前维度上较大的那个。 + +例如: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + + x = paddle.imperative.to_variable(np.ones((2,1,4), np.float32)) + y = paddle.imperative.to_variable(np.ones((3,1), np.float32)) + z = x+y + print(z.shape) + # z的形状: [2,3,4] + + x = paddle.imperative.to_variable(np.ones((2,1,4), np.float32)) + y = paddle.imperative.to_variable(np.ones((3,2), np.float32)) + z = x+y + print(z.shape) + # InvalidArgumentError: Broadcast dimension mismatch. + +除此之外,飞桨的elementwise系列API针对广播机制增加了axis参数,当使用较小形状的y来来匹配较大形状的x的时候,且满足y的形状的长度小于x的形状长度, +axis表示y在x上应用广播机制的时候的起始维度的位置,当设置了asis参数后,张量的维度比较顺序变成了从axis开始,从前向后比较。当axis=-1时,axis = rank(x) - rank(y), +同时y的大小为1的尾部维度将被忽略。 + +例如: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + + x = paddle.imperative.to_variable(np.ones((2,1,4), np.float32)) + y = paddle.imperative.to_variable(np.ones((3,1), np.float32)) + z = paddle.elementwise_add(x,y,axis=1) + # z的形状 [2, 3, 4] + + x = paddle.imperative.to_variable(np.ones((2,3,4,5), np.float32)) + y = paddle.imperative.to_variable(np.ones((4,5), np.float32)) + z = paddle.elementwise_add(x,y,axis=1) + print(z.shape) + # InvalidArgumentError: Broadcast dimension mismatch. + # 因为指定了axis之后,计算广播的维度从axis开始从前向后比较 + + x = paddle.imperative.to_variable(np.ones((2,3,4,5), np.float32)) + y = paddle.imperative.to_variable(np.ones((3), np.float32)) + z = paddle.elementwise_add(x,y,axis=1) + print(z.shape) + # z的形状 [2, 3, 4, 5] + # 因为此时是从axis=1的维度开始,从前向后比较维度进行广播 diff --git a/doc/paddle/beginners_guide/basic_concept/broadcasting_en.rst b/doc/paddle/beginners_guide/basic_concept/broadcasting_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..84b2f5b937b2f0046ad3fec3eac04f2494693fbd --- /dev/null +++ b/doc/paddle/beginners_guide/basic_concept/broadcasting_en.rst @@ -0,0 +1,101 @@ +.. _user_guide_broadcasting + +========= +Broadcasting +========= + +PaddlePaddle provides broadcasting semantics in some APIs like other deep learning frameworks, which allows using tensors with different shapes while operating. +In General, broadcast is the rule how the smaller tensor is “broadcast” across the larger tsnsor so that they have same shapes. +Note that no copies happened while broadcasting. + +In Paddlepaddle, tensors are broadcastable when following rulrs hold(ref: Numpy Broadcasting `): + +1. there should be at least one dimention in each tensor +2. when we compare their shapes element-wise from backward to forward, two dimensions are compatible when +they are equal, or one of them is 1, or one of them does not exist. + +For example: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + + x = paddle.imperative.to_variable(np.ones((2,3,4), np.float32)) + y = paddle.imperative.to_variable(np.ones((2,3,4), np.float32)) + # Two tensor have some shpes are broadcastable + + x = paddle.imperative.to_variable(np.ones((2,3,1,5), np.float32)) + y = paddle.imperative.to_variable(np.ones((3,4,1), np.float32)) + # compare from backward to forward: + # 1st step:y's dimention is 1 + # 2nd step:x's dimention is 1 + # 3rd step:two dimentions are the same + # 4st step:y's dimention does not exist + # So, x and y are broadcastable + + # In Compare + x = paddle.imperative.to_variable(np.ones((2,3,4), np.float32)) + y = paddle.imperative.to_variable(np.ones((2,3,6), np.float32)) + # x and y are not broadcastable because in first step form tail, x's dimention 4 is not equal to y's dimention 6 + +Now we know in what condition two tensors are broadcastable, how to calculate the resulting tensor's size follows the rules: + +1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the tensor with fewer dimensions to make them equal length. +2. Then, for each dimension size, the resulting dimension size is the max of the sizes of x and y along that dimension. + +For example: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + + x = paddle.imperative.to_variable(np.ones((2,1,4), np.float32)) + y = paddle.imperative.to_variable(np.ones((3,1), np.float32)) + z = x+y + print(z.shape) + # z'shape: [2,3,4] + + x = paddle.imperative.to_variable(np.ones((2,1,4), np.float32)) + y = paddle.imperative.to_variable(np.ones((3,2), np.float32)) + z = x+y + print(z.shape) + # InvalidArgumentError: Broadcast dimension mismatch. + +In addition, axis is introduced to PaddlePaddle's broadcasting semantics. when using smaller shape tensor y to broadcast a larger tensor x, +and y's length of dimentions is smaller than x's, we can specify a aixs to indicate the starting dimention to do broadcasting. +In this case, the comparation on dimentions runs from forward to backward started at axis. when axis=-1, axis = rank(x) - rank(y). +when the last dimention of y is 1, it will be ignored. + +For example: + +.. code-block:: python + + import paddle + import numpy as np + + paddle.enable_imperative() + + x = paddle.imperative.to_variable(np.ones((2,1,4), np.float32)) + y = paddle.imperative.to_variable(np.ones((3,1), np.float32)) + z = paddle.elementwise_add(x,y,axis=1) + # z'shape [2, 3, 4] + + x = paddle.imperative.to_variable(np.ones((2,3,4,5), np.float32)) + y = paddle.imperative.to_variable(np.ones((4,5), np.float32)) + z = paddle.elementwise_add(x,y,axis=1) + print(z.shape) + # InvalidArgumentError: Broadcast dimension mismatch. + # axis is indicated, comparation between dimentions starts at axis. + + x = paddle.imperative.to_variable(np.ones((2,3,4,5), np.float32)) + y = paddle.imperative.to_variable(np.ones((3), np.float32)) + z = paddle.elementwise_add(x,y,axis=1) + print(z.shape) + # z'shape [2, 3, 4, 5] + # Start comparation at axis=1 from forward to backward. \ No newline at end of file diff --git a/doc/paddle/beginners_guide/basic_concept/executor.rst b/doc/paddle/beginners_guide/basic_concept/executor.rst new file mode 100644 index 0000000000000000000000000000000000000000..dda20dc164aa4af5c217ff0b6f999ef6dd214948 --- /dev/null +++ b/doc/paddle/beginners_guide/basic_concept/executor.rst @@ -0,0 +1,40 @@ +.. _cn_user_guide_Executor: + +======= +Executor +======= + + +飞桨(PaddlePaddle,以下简称Paddle)的设计思想类似于高级编程语言C++和JAVA等。程序的执行过程被分为编译和执行两个阶段。 + +用户完成对 Program 的定义后,Executor 接受这段 Program 并转化为C++后端真正可执行的 FluidProgram,这一自动完成的过程叫做编译。 + +编译过后需要 Executor 来执行这段编译好的 FluidProgram。 + +例如上文实现的加法运算,当构建好 Program 后,需要创建 Executor,执行startup Program 和训练 Program: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + + a = fluid.data(name="a",shape=[1],dtype='float32') + b = fluid.data(name="b",shape=[1],dtype='float32') + + result = fluid.layers.elementwise_add(a,b) + + # 定义执行器,并且制定执行的设备为CPU + cpu = fluid.core.CPUPlace() + exe = fluid.Executor(cpu) + + exe.run(fluid.default_startup_program()) + + x = numpy.array([5]).astype("float32") + y = numpy.array([7]).astype("float32") + + outs = exe.run( + feed={'a':x,'b':y}, + fetch_list=[result]) + + # 打印输出结果,[array([12.], dtype=float32)] + print( outs ) diff --git a/doc/paddle/beginners_guide/basic_concept/fluid_basic_concept.rst b/doc/paddle/beginners_guide/basic_concept/fluid_basic_concept.rst new file mode 100644 index 0000000000000000000000000000000000000000..a915c2e3d4b8debf2cbabc7634ae3095566f2131 --- /dev/null +++ b/doc/paddle/beginners_guide/basic_concept/fluid_basic_concept.rst @@ -0,0 +1,389 @@ +================================ +PaddleFluid设计思想和基本使用概念 +================================ + + + +Paddle Fluid 是用来让用户像 PyTorch 和 Tensorflow Eager Execution 一样执行程序。 +在这些系统中,不再有模型这个概念,应用也不再包含一个用于描述 Operator 图或者一系列层的符号描述, +而是像通用程序那样描述训练或者预测的过程。 + + +深度学习平台的演化 +================ + +时至今日,深度学习已成为事实上最流行的机器学习技术。学术界多年研究加上工业界的长期实践提出了若干有效的基本建模单元: +全连接,卷积,循环神经网络等;设计各类训练技巧:初始化方法,跨层连接,各类 norm 技术等; +发明了各种新的优化算法:Adadelta,Adam 等; +各类固定的网络结构:highway, residual, attention 等纷纷涌现,不胜枚举。 +学术界工业界多年的付出共同促成了深度学习方法今日的影响力。 + +学术研究和生产实践中积累了大量的知识,能够很好的解释神经网络中基本模块各自独的学习能力和特性。 +基本模块和训练技术的组合能够搭建出千变万化的神经网络模型。 +基本模块和训练技术是有限的,但他们的组合却是千变万化,这是深度学习方法的魅力所在,也是难度所在。 + +正是这样高度的模块化特性,研究者和工程师们都在努力避免重复造轮子以提高研究和生产的效率, +又进一步催生了深度学习平台技术的发展,深度学习框架已演变成为 AI 基础设施中重要的一部分。 +从 Theano,到 DistBelief,到 TensorFlow;从 Caffe 到 Caffe2; +从 Torch 到 PyTorch;从 PaddlePaddle 到 PaddleFluid, +深度学习平台技术也经历了两代的演化,并向着第三代平台技术迈进。 + +站在历史发展的今天,当我们准备切换尝试使用一个新的深度学习平台作为支持自己学习和研究的工具时, +平台技术都发生了哪些演化,能够为我们的带来什么便利呢? + +先让我们来看看深度学习框架解决的三大问题: + +- 如何描述计算以支持未来潜在会出现的新模型? +- 如何高效利用异构设备最大化算力? +- 如何利用网络中的计算机进行分布式计算来处理千万亿级别的数据? + +以上三个问题中的第一个和使用者研究者最为密切相关。 +这篇文章我们通过分析 PaddleFluid的设计理念, +来了解一个深度学习框架如何抽象深度学习模型,来看看我们的使用经验如何在不同深度学习平台之间过度和迁移。 + +如何描述计算 +============= + +让我们首先来看看 PaddleFluid 如何描述机器学习模型 + + +PaddleFluid之 :code:`Program` + +如何描述计算很大程度决定了一个神经网络框架计算功能的完备性。 +深度学习模型和方法历经二十多年的发展:“依次执行一组计算的前向, +再以和前向计算相反的顺序执行反向计算,中间无分支无交互”, +这样的模型结构已经无法满足研究者和千千万万框架使用者的想象力。 + +从 `PaddleFluid 的设计目标 `_ 来看, +在如何描述机器学习模型这一核心问题上,PaddleFluid 的目标是: +创造一种新的计算描述方式,不但能够描述至今为止人们已知的主流神经网络模型,并且能够支持未来会出现的任意模型。 + +PaddleFluid 是如何做到支持未来出现的新模型这一目标呢?PaddleFluid 的设计选择是: +对用户来说,用一段 :code:`Program` (在 PaddleFluid 内部会被转化为一种叫作 :code:`ProgramDesc` 的描述语言), +而不是用计算图来描述机器学习模型。 :code:`Program` 用符合用户使用直觉的方式, +提供一种新的描述语言能够描述任意复杂的机器学习模型。 + +对所有计算机专业同学学习编程语言的第一课一定是建立对“程序语言的三种执行结构:顺序执行,条件选择和循环执行”的认识。 +计算机世界的所有可计算逻辑都是由这三种执行结构表示,用这三种结构描述的逻辑是可计算的。那么同样道理, +对一个神经网络框架来说,如果可以和程序语言一样提供对这三种执行结构的支持,那么将可以描述任意复杂的, +可被计算机计算的机器学习模型。PaddleFluid通过提供对这三种执行结构的支持,来做到对任意复杂模型的描述。 + +具体来说: + +1. Fluid 的核心设计理念都可以类比到程序语言,如果已经有写程序的经验,那么使用 Fluid 构建神经网络模型的体验,将非常接近写程序; + +2. 在 PaddleFluid 中,用户不会显示地感知“计算图”这样的概念,一个机器学习模型被描述为一个 Fluid :code:`Program` (Fluid 内部称之为 :code:`ProgramDesc` ); + +- 一个 Fluid :code:`Program` 由一组嵌套的 :code:`Block` 构成。 :code:`Block` 的概念可以类比到 C++ 或是 Java 中的一对大括号,或是 Python 语言中的一个缩进快; +- :code:`Block` 中的计算由顺序执行、条件选择或者循环执行三种方式组合,构成复杂的计算逻辑。 + +3. Fluid :code:`Program` 中包含对计算和计算对象的描述。计算的描述称之为 Operator;计算作用的对象(或者说 Operator 的输入和输出)被统一为 Tensor。 + +在描述计算和计算的作用对象这一问题上,各个深度学习框架的选择是相同的,如果有一个平台的使用经验,那么将非常容易在各个平台之间进行迁移。 + +核心使用概念 +============= + +下面,我们将更详细地了解核心使用概念在PaddlePaddle的使用方法。 + +数据表示和计算的对象:Tensor +-------------------------- + +Tensor 是向量矩阵概念的扩展,是神经网络模型计算操作的基本对象。这在是今天所有主流深度学习平台的共同选择。 + +可以简单地将 Tensor 理解为一个 N 维向量,它可以有任意多的维度。一个 Tensor 具有两个基本特征: + +1. 数据类型:每个 Tensor 的所有元素具有同样的、已知的数据类型; + +2. 大小(或者说形状):即维度的个数(rank,阶)以及各维度的长度。 + +Tensor 某些维度的长度在定义模型阶段可能是未知的,在实际算法执行时才能确定。例如一个 mini-batch 中包含的样本数目(batch size),或者是一个 mini-batch 中序列的最大长度。 + +PaddleFluid中的Tensor +"""""""""""""""""""""" + +PaddleFluid 中也使用 Tensor 作为神经网络中输入输出数据的统一表示。Tensor 的概念在今天主流的深度学习平台中都是完全相同,可以在各个深度学习框架之间直接无缝迁移。 + +在 Fluid 中也同样存在三种特殊的 Tensor: + +1. 模型中的可学习参数 + +模型中的可学习参数生存期和整个训练任务一样长,会接受优化算法的更新。在 PaddleFluid 中同样以 :code:`Variable` 表示; +用户在绝大多数情况下都不需要自己来创建网络中的可学习参数,Fluid 为几乎常见的神经网络基本计算模块都提供了封装。 +以最简单的全连接模型为例,下面的代码片段会直接为全连接层创建连接权值 WW 和偏置( :code:`bias` )两个可学习参数, +无需显示地调用 variable 相关接口创建可学习参数。 + + +:: + + import paddle.fluid as fluid + + y = fluid.layers.fc(input=x, size=128, bias_attr=True) + +2. 输入输出Tensor + +整个神经网络的输入数据也是一个特殊的 Tensor,在这个 Tensor 中, +一些维度的大小在定义模型时无法确定(通常包括:batch size; +如果 mini-batch 之间,数据可变,也会包括序列的最大长度,图片的宽度和高度等),在定义模型时需要占位; +PaddleFluid 中使用 :code:`fluid.layers.data` 来接入输入数据, :code:`fluid.layer.data` 需要提供输入 Tensor 的 形状信息, +当遇到无法确定的维度 时, 相应维度指定为 None ,如下面的代码片段所示: + +:: + + import paddle.fluid as fluid + + x = fluid.layers.data(name="x", shape=[2, None, 3], dtype="int64") + +3. 常量 Tensor 在 PaddleFluid 中需要通过组合 Tensor 和 :code:`fluid.layers.assign` 来实现。 + + +计算原语:Operation/Operator +---------------------------- + +Tensor 是今天所有主流深度学习框架的统一数据表示(输入、输出、中间计算结果、模型的可学习参数都是 Tensor)。 +另一方面,对数据的操作,在主流深度学习框架中也高度统一为:Operator/Operation。 +在中文中,通常我们会习惯将其称之为算子。 + +注:在 PaddleFluid 中使用 Operator 称呼对 Tensor 的操作。 + +Operation/Operator 接受多个 Tensor 作为输入,输出若干个 Tensor,表示了从输入到输出的变化。 + +PaddleFluid中的Operator +"""""""""""""""""""""""" + +PaddleFluid 支持的所有算子,可以在 `API 帮助文档 `_ 中查看。 + +为了便于用户使用,在 Python 端,Fluid 中的 Operator 被进一步封装入 :code:`paddle.fluid.layers` , +:code:`paddle.fluid.networks` 等模块。这是因为:一些常见的对Tensor的操作可能是有更多基础操作构成, +例如:l2 norm 内部由 reduce、elementwise_add,scale 等多个 Operator 组合计算逻辑完成, +为了提高使用的便利性,框架内部对基础 Operator 进行了一些封装,包括创建 Operator 依赖可学习参数, +可学习参数的初始化细节等,减少用户重复开发的成本。 + +对所有深度学习框架都面临同样的封装,在绝大多数情况下,用户很少会直接与框架底层的 Operator 直接打交道,而是使用框架提供的 layers,networks 等模块,降低开发的代码量。不论是什么样的概念,他们在各框架之间的本质和作用都是相同的:对 Tensor 的变换。 + +总结 +>>>>>> + +不论叫作 Operation、Operator 还是 layers,他们在各深度学习平台中的含义和作用都是相同的:对 Tensor 的变换。是一个深度学习平台提供的基础计算能力。可以在每个平台各自的 API 帮助文档中查到。 + +在各个深度学习平台都已加入 ONNX 项目的今天,每个深度学习平台提供给大家的基本算子都已趋同,与此同时,每个平台也各有其特点,会提供一些独特的算子,方便某一类任务的开发。 + +构建模型并执行 +-------------- + +整个训练任务运行方法如下: + +Fluid中的Program和Executor +""""""""""""""""""""""""""" + +1. Fluid 使用 :code:`Program` 描述神经网络模型,对用户来说,并没有计算图的概念。 +用户定义的所有 Tensor 以及对 Tensor 的操作:Operator 都会被加入一段 :code:`Program` 中; + +一段 Program 由嵌套的 :code:`Block` 构成,但用户无需显示地创建 :code:`Block` 或是显示地注意到 :code:`Block` 的存在; +在 Fluid 程序中, :code:`Block` 是在调用 :code:`while_op` , :code:`if_op` , :code:`parallel_do` 等特殊 :code:`Operator` 时,由这些 :code:`Operator` 来创建; +对用户使用来说,只需要知道自己正在向一段 Fluid Program 中添加变量( :code:`Tensor` )和操作( :code:`Operator` )即可。 + +2. Fluid 利用 :code:`Executor` 来执行一段 Fluid :code:`Program` 。 + +为进一步理解 Fluid 中 :code:`Executor` 的作用,需要先解释一下 Fluid 程序的执行流程。 下图展示单机上,Fluid 程序的执行流程: + +.. figure:: fluid_local_train.jpeg + + :scale: 50% + :align: center + + Figure.1 + + Fluid本地训练任务执行流程图 + +1. Fluid 设计思想和灵感非常类似于程序设计语言,和高级编译语言 C++/Java 编写程序的过程非常类似,Fluid 程序执行分为两个重要阶段:编译时和运行时; + +2. 编译期,用户通过调用 Fluid 提供的算子,向一段 :code:`Program` 中添加变量(Tensor)以及对变量的操作(Operators 或者 Layers)。用户只需要描述核心的前向计算,不需要关心反向计算,分布式下,异构设备下如何计算; + +3. 原始的 :code:`Program` 在平台内部转换为中间描述语言: :code:`ProgramDesc` ; + +4. 编译期最重要的一个功能模块是 Transpiler。Transpiler 接受一段 :code:`ProgramDesc` ,输出一段变化后的 :code:`ProgramDesc` ,作为后端 Executor 最终需要执行的 :code:`Fluid Program` ; + +最为常用的 Transipler 包括: + +1. 内存优化 Transipler:通过对变量读写依赖关系分析,插入内存回收 Operator 以维持运行过程中较小的内存开销; + +2. 分布式环境下的 Transpiler:接受用户定义的 local Program ,生成 Parameter Client 和 Parameter Server 执行的两段 :code:`Program` 。 + +3. 后端 Executor 接受 Transpiler 输出的这段 :code:`Program` ,依次执行其中的 Operator(可以类比为程序语言中的指令),在执行过程中会为 Operator 创建所需的输入输出并进行管理。 + +从上面的过程中可以看到,Fluid 程序的执行过程分为:编译器的定义 :code:`Program` ,和创建 :code:`Executor` 运行 :code:`Program` 。 + :code:`Executor` 执行一段 :code:`Program` 的过程是不可交互和不可中断的。 + +在 Fluid 中,可以创建多余一段 :code:`Program` 。默认情况,一个 PaddleFluid 程序中存在 2 段 Program: + +1. :code:`fluid.framework.default_startup_program` :其中定义了创建模型参数,输入输出,以及模型中可学习参数的初始化等各种操作; + +- :code:`default_startup_program` 可以由框架自动生成,使用时无需显示地创建; +- 如果调用修改了参数的默认初始化方式,框架会自动的将相关的修改加入 :code:`default_startup_program` 。 + +2. :code:`fluid.framework.default_main_program` :定义了神经网络模型,前向反向计算,以及优化算法对网络中可学习参数的更新; + +- 使用 Fluid 的核心就是构建起 :code:`default_main_program` 。 + +3. PaddleFluid 中的 :code:`Scope` 类似于 TensorFlow 中的 collection 这一概念,但在 Fluid 中 :code:`Scope` 是框架后端概念,用户无法直接操作。因此,在使用框架时无需关心。 + +总结 +""""" + +Fluid 中通过 Executor 来执行一段用户定义的 Fluid :code:`Program` 。 +1. Executor 连接了 Fluid 的前端和后端; + +2. Executor 接受用户定义的原始模型(一段 :code:`Program` ),通过调用系统中不同功能更的 :code:`Transpiler` 完成对原始 :code:`Program` 的变化,进行优化。 + +完整实例:如何完成一个机器学习模型的训练 +=================================== + + + +这一节,我们以 MNIST 手写数字识别问题 —— 机器学习任务的“Hello World”问题和数据,为例,通过一个可以运行的完整实例,来学习上文介绍的概念如何在PaddleFluid 平台使用。 + +步骤1:定义数据 +---------------- + +PaddleFluid 中以 :code:`fluid.layers.data` 来接收输入数据。 + +:: + + import numpy as np + import paddle + import paddle.fluid as fluid + + # define the input layers for the network. + x = fluid.layers.data(name="img", shape=[1, 28, 28], dtype="float32") + y_ = fluid.layers.data(name="label", shape=[1], dtype="int64") + +Fluid 中 Tensor 的第 0 维度固定为 batch size。在上面代码段中,图像输入 :code:`x` 的形状为:[1, 28, 28]。这三个维度的含义分别是:channel 数目,图像的高度和宽度。 + +实际上 Fluid 框架内部,一幅图像输入是一个 4-D Tensor,所有 Tensor 的第 0 维固定为 batch size。框架内部会自动为batch size进行填充占位。无需对batch size指定填充占位。 + +如果除去 batch size(第 0 维度)外,如果 Tensor 某一维度的大小只能在运行时确定,可以在该位置上直接指定 :code:`None` 进行占位。 + +步骤2:定义模型 +-------------- + +通过调用 Fluid 提供的算子定义含有一个隐层的神经网络。Fluid 模型的分为模型结构和优化方法两部分。这一点与 TensorFlow 程序十分相似似,使用概念可以直接对应进行迁移。 + +:: + + # define the network topology. + y = fluid.layers.fc(input=x, size=10, act="softmax") + loss = fluid.layers.cross_entropy(input=y, label=y_) + avg_loss = fluid.layers.mean(loss) + + # define the optimization algorithm. + optimizer = fluid.optimizer.Adam(learning_rate=1e-3) + optimizer.minimize(avg_loss) + +Fluid 使用 Program 而不是计算图描述模型,一般情况下,用户无需关心 Program 的细节,当调用以上 layers 时,会向一个全局的 Program: :code:`fluid.framework.default_main_program` 中插入变量(Tensor)和对变量的操作(上述代码段中的 layers 和 optimzier)。 + +步骤3:参数初始化 +---------------- + +如上文介绍,Fluid 程序中的 Executor 是连接 Fluid 前端和后端的接口。 + +默认一个Fluid模型存在至少两段 Program。用于初始化网络中的可学习参数的那一段 :code:`Program` 叫作 :code:`fluid.default_startup_program()` 。 + +只有执行器 executor 可以执行 Fluid Program,因此,在初始化网络中的可学习参数之前,需要首先创建一个 Fluid executor。 + +:: + + # define the executor. + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + +在以上代码段中, :code:`place` 用于告诉 executor 一段 Fluid Program 在何种设备上执行, +常见的有 :code:`fluid.CPUPlace()` 和 :code:`fluid.CUDAPlace()` 。 + +步骤4:数据输入 + 执行模型训练 +---------------------------- + +我们在步骤 2 中定义的神经网络模型最终被插入一段叫做 :code:`fluid.framework.default_main_program` 的 Fluid Program 中。 + +网络可学习参数初始化之后,可以通过让执行器 Executor 执行这段 :code:`fluid.framework.default_main_program` 来进行训练。 + +:: + + train_reader = paddle.batch( + paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=5000), + batch_size=BATCH_SIZE) + feeder = fluid.DataFeeder(place=place, feed_list=[x, y_]) + + for pass_id in range(100): + for batch_id, data in enumerate(train_reader()): + loss = exe.run( + fluid.framework.default_main_program(), + feed=feeder.feed(data), + fetch_list=[avg_loss]) + print("Cur Cost : %f" % (np.array(loss[0])[0])) + +从上面的代码片段中可以看到,Fluid 程序的训练过程和 TensorFlow 程序的训练过程非常接近, +都放在一个 :code:`for` 循环中,循环读取一个 mini-batch 数据, +调用执行器执行 Fluid :code:`default_main_program` :接收 mini-batch 输入,在其上进行前向,反向和参数更新计算。 + +`注:上面程序使用了 Fluid 内置的 MNIST 数据,和我们提供给 TensorFlow 示例程序的 MNIST 数据完全一样。` + +步骤5:观察模型效果 +----------------- + +以上步骤已经构成了完整的 Tensorflow 模型训练程序,每个 batch 观察一次 loss,可以直观看到模型的迭代效果: + +.. figure:: fluid_mnist.png + + :scale: 40% + :align: center + + Figure.2 + + Fluid MNIST手写数字识别任务代价下降曲线 + +附:完整代码 +------------ + +:: + + import numpy as np + import paddle + import paddle.fluid as fluid + + def main(): + BATCH_SIZE = 128 + + # define the input layers for the network. + x = fluid.layers.data(name="img", shape=[1, 28, 28], dtype="float32") + y_ = fluid.layers.data(name="label", shape=[1], dtype="int64") + + # define the network topology. + y = fluid.layers.fc(input=x, size=10, act="softmax") + loss = fluid.layers.cross_entropy(input=y, label=y_) + avg_loss = fluid.layers.mean(loss) + + optimizer = fluid.optimizer.Adam(learning_rate=5e-3) + optimizer.minimize(avg_loss) + + # define the executor. + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + train_reader = paddle.batch( + paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=5000), + batch_size=BATCH_SIZE) + feeder = fluid.DataFeeder(place=place, feed_list=[x, y_]) + + for pass_id in range(100): + for batch_id, data in enumerate(train_reader()): + loss = exe.run( + fluid.framework.default_main_program(), + feed=feeder.feed(data), + fetch_list=[avg_loss]) + print("Cur Cost : %f" % (np.array(loss[0])[0])) + + if __name__ == "__main__": + main() diff --git a/doc/paddle/beginners_guide/basic_concept/fluid_local_train.jpeg b/doc/paddle/beginners_guide/basic_concept/fluid_local_train.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..0a495901fafb85987e34acc3c454fb87e8160fca Binary files /dev/null and b/doc/paddle/beginners_guide/basic_concept/fluid_local_train.jpeg differ diff --git a/doc/paddle/beginners_guide/basic_concept/fluid_mnist.png b/doc/paddle/beginners_guide/basic_concept/fluid_mnist.png new file mode 100644 index 0000000000000000000000000000000000000000..e5ad0ba058c863cf68ef0789e58fcf67b3115fdb Binary files /dev/null and b/doc/paddle/beginners_guide/basic_concept/fluid_mnist.png differ diff --git a/doc/paddle/beginners_guide/basic_concept/index_cn.rst b/doc/paddle/beginners_guide/basic_concept/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..19a97536d0c363a0488d15be22e1cc03664cfdf0 --- /dev/null +++ b/doc/paddle/beginners_guide/basic_concept/index_cn.rst @@ -0,0 +1,25 @@ +############ +基本概念 +############ + +本文介绍飞桨核心框架中的基本概念: + +- `编程指南 <./programming_guide/programming_guide.html>`_ : 介绍飞桨的基本概念和使用方法。 +- `Variable `_ : Variable表示变量,在飞桨中可以包含任何类型的值,在大多数情况下是一个Lod-Tensor。 +- `Tensor `_ : Tensor表示数据。 +- `LoD-Tensor `_ : LoD-Tensor是飞桨的高级特性,它在Tensor基础上附加了序列信息,支持处理变长数据。 +- `Operator `_ : Operator表示对数据的操作。 +- `Program `_ : Program表示对计算过程的描述。 +- `Executor `_ : Executor表示执行引擎。 + +.. toctree:: + :hidden: + + programming_guide/programming_guide.md + variable.rst + tensor.rst + lod_tensor.rst + operator.rst + program.rst + executor.rst + diff --git a/doc/paddle/beginners_guide/basic_concept/index_en.rst b/doc/paddle/beginners_guide/basic_concept/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..8f8924576f6d9dec56dc3c78977e2bc024fcc8d4 --- /dev/null +++ b/doc/paddle/beginners_guide/basic_concept/index_en.rst @@ -0,0 +1,18 @@ +############ +Basic Concept +############ + +This paper introduces the basic concepts of Paddle: + +- `Guide to Fluid Programming <./programming_guide/programming_guide_en.html>`_ :introduces the basic concept and usage of Paddle. +- `LoD-Tensor User Guide `_ : LoD-Tensor is a high-level feature of Paddle. It adds sequence information on the basis of tensor and supports processing variable length data. + + +.. toctree:: + :hidden: + + programming_guide/programming_guide_en.md + lod_tensor_en.rst + + + diff --git a/doc/paddle/beginners_guide/basic_concept/lod_tensor.rst b/doc/paddle/beginners_guide/basic_concept/lod_tensor.rst new file mode 100644 index 0000000000000000000000000000000000000000..f9e1ccde75e488d315bd599dd5c5582df3f980d7 --- /dev/null +++ b/doc/paddle/beginners_guide/basic_concept/lod_tensor.rst @@ -0,0 +1,397 @@ +.. _cn_user_guide_lod_tensor: + +========= +LoDTensor +========= + +LoD(Level-of-Detail) Tensor是Paddle的高级特性,是对Tensor的一种扩充。LoDTensor通过牺牲灵活性来提升训练的效率。 + +**注:对于大部分用户来说,无需关注LoDTensor的用法。** + + +变长序列的解决方案 +================ + +现在主流的训练框架都采用batch的训练方式,即一个batch中包含多个样本。在nlp的任务中,一个batch中包含N个句子,句子的长度可能会不一致,为了解决这种长度不一致问题,Paddle提供了两种解决方案:1)padding,即在句子的结尾(或开头)添加padding id(建议的方式);2)LoDTensor,tensor中同时保存序列的长度信息。 + +对于padding的方式,会增加框架的计算量,但是对于大部分nlp任务,可以通过分桶、排序等机制,使得一个batch内的句子长度尽可能接近、能够降低padding id的比例,padding对于训练的计算量影响可以忽略。而且可以通过引入mask(记录哪些位置是padding id)信息,来移除padding id对于训练效果的影响。 + +但是对于一部分nlp任务来说,一个batch内的句子长度无法做到接近,比如聊天任务,需要计算query和多个答案之间的相似度,答案必须在一个batch中,这些答案的长度差异可能会非常大,最长的几百个token,最短的10几个token,如果采用padding的方式,计算量会增加几十倍,这种场景非常适合LoDTensor。LoDTensor存储了样本的长度信息,不需要增加padding的词,能给大幅减少计算量,从而提高训练的速度。 + +LoDTensor将长度不一致的维度拼接为一个大的维度,并引入了一个索引数据结构(LoD)来将张量分割成序列。LoDTensor进行了维度拼接之后,rank大小和之前padding的方式不一致,在一些运算(如dot attention)逻辑比padding方式要复杂。 + +**注:如果训练样本无法通过排序、分桶等手段,使得一个batch内的样本的长度非常接近,推荐用户使用LoDTensor;其他情况下,建议用户使用padding的组网方式。** + +LoD 索引 +=========== + +为了更好的理解LoD的概念,本节提供了几个例子供您参考: + +**句子组成的 mini-batch** + +假设一个mini-batch中有3个句子,每个句子中分别包含3个、1个和2个单词。我们可以用(3+1+2)xD维Tensor 加上一些索引信息来表示这个mini-batch: + +.. code-block :: text + + 3 1 2 + | | | | | | + +上述表示中,每一个 :code:`|` 代表一个D维的词向量,数字3,1,2构成了 1-level LoD。 + +**递归序列** + +让我们来看另一个2-level LoD-Tensor的例子:假设存在一个mini-batch中包含3个句子、1个句子和2个句子的文章,每个句子都由不同数量的单词组成,则这个mini-batch的样式可以看作: + +.. code-block:: text + + + 3 1 2 + 3 2 4 1 2 3 + ||| || |||| | || ||| + + +表示的LoD信息为: + +.. code-block:: text + + [[3,1,2]/*level=0*/,[3,2,4,1,2,3]/*level=1*/] + + +**视频的mini-batch** + +在视觉任务中,时常需要处理视频和图像这些元素是高维的对象,假设现存的一个mini-batch包含3个视频,分别有3个,1个和2个帧,每个帧都具有相同大小:640x480,则这个mini-batch可以被表示为: + +.. code-block:: text + + 3 1 2 + 口口口 口 口口 + + +最底层tensor大小为(3+1+2)x640x480,每一个 :code:`口` 表示一个640x480的图像 + +**图像的mini-batch** + +在传统的情况下,比如有N个固定大小的图像的mini-batch,LoD-Tensor表示为: + +.. code-block:: text + + 1 1 1 1 1 + 口口口口 ... 口 + +在这种情况下,我们不会因为索引值都为1而忽略信息,仅仅把LoD-Tensor看作是一个普通的张量: + +.. code-block:: text + + 口口口口 ... 口 + +**模型参数** + +模型参数只是一个普通的张量,在Fluid中它们被表示为一个0-level LoD-Tensor。 + +LoDTensor的偏移表示 +===================== + +为了快速访问基本序列,Fluid提供了一种偏移表示的方法——保存序列的开始和结束元素,而不是保存长度。 + +在上述例子中,您可以计算基本元素的长度: + +.. code-block:: text + + 3 2 4 1 2 3 + +将其转换为偏移表示: + +.. code-block:: text + + 0 3 5 9 10 12 15 + = = = = = = + 3 2+3 4+5 1+9 2+10 3+12 + +所以我们知道第一个句子是从单词0到单词3,第二个句子是从单词3到单词5。 + +类似的,LoD的顶层长度 + +.. code-block:: text + + 3 1 2 + +可以被转化成偏移形式: + +.. code-block:: text + + 0 3 4 6 + = = = + 3 3+1 4+2 + +因此该LoD-Tensor的偏移表示为: + +.. code-block:: text + + 0 3 4 6 + 3 5 9 10 12 15 + + +LoD-Tensor +============= +一个LoD-Tensor可以被看作是一个树的结构,树叶是基本的序列元素,树枝作为基本元素的标识。 + +在 Fluid 中 LoD-Tensor 的序列信息有两种表述形式:原始长度和偏移量。在 Paddle 内部采用偏移量的形式表述 LoD-Tensor,以获得更快的序列访问速度;在 python API中采用原始长度的形式表述 LoD-Tensor 方便用户理解和计算,并将原始长度称为: :code:`recursive_sequence_lengths` 。 + +以上文提到的一个2-level LoD-Tensor为例: + +.. code-block:: text + + 3 1 2 + 3 2 4 1 2 3 + ||| || |||| | || ||| + +- 以偏移量表示此 LoD-Tensor:[ [0,3,4,6] , [0,3,5,9,10,12,15] ], +- 以原始长度表达此 Lod-Tensor:recursive_sequence_lengths=[ [3-0 , 4-3 , 6-4] , [3-0 , 5-3 , 9-5 , 10-9 , 12-10 , 15-12] ]。 + + +以文字序列为例: [3,1,2] 可以表示这个mini-batch中有3篇文章,每篇文章分别有3、1、2个句子,[3,2,4,1,2,3] 表示每个句子中分别含有3、2、4、1、2、3个字。 + +recursive_seq_lens 是一个双层嵌套列表,也就是列表的列表,最外层列表的size表示嵌套的层数,也就是lod-level的大小;内部的每个列表,对应表示每个lod-level下,每个元素的大小。 + +下面三段代码分别介绍如何创建一个LoD-Tensor,如何将LoD-Tensor转换成Tensor,如何将Tensor转换成LoD-Tensor: + +* 创建 LoD-Tensor + +.. code-block:: python + + #创建lod-tensor + import paddle.fluid as fluid + import numpy as np + + a = fluid.create_lod_tensor(np.array([[1],[1],[1], + [1],[1], + [1],[1],[1],[1], + [1], + [1],[1], + [1],[1],[1]]).astype('int64') , + [[3,1,2] , [3,2,4,1,2,3]], + fluid.CPUPlace()) + + #查看lod-tensor嵌套层数 + print (len(a.recursive_sequence_lengths())) + # output:2 + + #查看最基础元素个数 + print (sum(a.recursive_sequence_lengths()[-1])) + # output:15 (3+2+4+1+2+3=15) + +* LoD-Tensor 转 Tensor + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # 创建一个 LoD-Tensor + a = fluid.create_lod_tensor(np.array([[1.1], [2.2],[3.3],[4.4]]).astype('float32'), [[1,3]], fluid.CPUPlace()) + + def LodTensor_to_Tensor(lod_tensor): + # 获取 LoD-Tensor 的 lod 信息 + lod = lod_tensor.lod() + # 转换成 array + array = np.array(lod_tensor) + new_array = [] + # 依照原LoD-Tensor的层级信息,转换成Tensor + for i in range(len(lod[0]) - 1): + new_array.append(array[lod[0][i]:lod[0][i + 1]]) + return new_array + + new_array = LodTensor_to_Tensor(a) + + # 输出结果 + print(new_array) + +* Tensor 转 LoD-Tensor + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def to_lodtensor(data, place): + # 存储Tensor的长度作为LoD信息 + seq_lens = [len(seq) for seq in data] + cur_len = 0 + lod = [cur_len] + for l in seq_lens: + cur_len += l + lod.append(cur_len) + # 对待转换的 Tensor 降维 + flattened_data = np.concatenate(data, axis=0).astype("float32") + flattened_data = flattened_data.reshape([len(flattened_data), 1]) + # 为 Tensor 数据添加lod信息 + res = fluid.LoDTensor() + res.set(flattened_data, place) + res.set_lod([lod]) + return res + + # new_array 为上段代码中转换的Tensor + lod_tensor = to_lodtensor(new_array,fluid.CPUPlace()) + + # 输出 LoD 信息 + print("The LoD of the result: {}.".format(lod_tensor.lod())) + + # 检验与原Tensor数据是否一致 + print("The array : {}.".format(np.array(lod_tensor))) + + + + +代码示例 +=========== + +本节代码将根据指定的级别y-lod,扩充输入变量x。本例综合了LoD-Tensor的多个重要概念,跟随代码实现,您将: + +- 直观理解Fluid中 :code:`fluid.layers.sequence_expand` 的实现过程 +- 掌握如何在Fluid中创建LoD-Tensor +- 学习如何打印LoDTensor内容 + + + +**定义计算过程** + +layers.sequence_expand通过获取 y 的 lod 值对 x 的数据进行扩充,关于 :code:`fluid.layers.sequence_expand` 的功能说明,请先阅读 :ref:`cn_api_fluid_layers_sequence_expand` 。 + +序列扩充代码实现: + +.. code-block:: python + + x = fluid.layers.data(name='x', shape=[1], dtype='float32', lod_level=1) + y = fluid.layers.data(name='y', shape=[1], dtype='float32', lod_level=2) + out = fluid.layers.sequence_expand(x=x, y=y, ref_level=0) + +*说明*:输出LoD-Tensor的维度仅与传入的真实数据维度有关,在定义网络结构阶段为x、y设置的shape值,仅作为占位,并不影响结果。 + +**创建Executor** + +.. code-block:: python + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + +**准备数据** + +这里我们调用 :code:`fluid.create_lod_tensor` 创建 :code:`sequence_expand` 的输入数据,通过定义 y_d 的 LoD 值,对 x_d 进行扩充。其中,输出值只与 y_d 的 LoD 值有关,y_d 的 data 值在这里并不参与计算,维度上与LoD[-1]一致即可。 + +:code:`fluid.create_lod_tensor()` 的使用说明请参考 :ref:`cn_api_fluid_create_lod_tensor` 。 + +实现代码如下: + +.. code-block:: python + + x_d = fluid.create_lod_tensor(np.array([[1.1],[2.2],[3.3],[4.4]]).astype('float32'), [[1,3]], place) + y_d = fluid.create_lod_tensor(np.array([[1.1],[1.1],[1.1],[1.1],[1.1],[1.1]]).astype('float32'), [[1,3], [2,1,2,1]],place) + + +**执行运算** + +在Fluid中,LoD>1的Tensor与其他类型的数据一样,使用 :code:`feed` 定义数据传入顺序。此外,由于输出results是带有LoD信息的Tensor,需在exe.run( )中添加 :code:`return_numpy=False` 参数,获得LoD-Tensor的输出结果。 + +.. code-block:: python + + results = exe.run(fluid.default_main_program(), + feed={'x':x_d, 'y': y_d }, + fetch_list=[out],return_numpy=False) + +**查看LoDTensor结果** + +由于LoDTensor的特殊属性,无法直接print查看内容,常用操作时将LoD-Tensor作为网络的输出fetch出来,然后执行 numpy.array(lod_tensor), 就能转成numpy array: + +.. code-block:: python + + np.array(results[0]) + +输出结果为: + +.. code-block:: text + + array([[1.1],[2.2],[3.3],[4.4],[2.2],[3.3],[4.4],[2.2],[3.3],[4.4]]) + +**查看序列长度** + +可以通过查看序列长度得到 LoDTensor 的递归序列长度: + +.. code-block:: python + + results[0].recursive_sequence_lengths() + +输出结果为: + +.. code-block:: text + + [[1L, 3L, 3L, 3L]] + +**完整代码** + +您可以运行下列完整代码,观察输出结果: + +.. code-block:: python + + #加载库 + import paddle + import paddle.fluid as fluid + import numpy as np + #定义前向计算 + x = fluid.layers.data(name='x', shape=[1], dtype='float32', lod_level=1) + y = fluid.layers.data(name='y', shape=[1], dtype='float32', lod_level=2) + out = fluid.layers.sequence_expand(x=x, y=y, ref_level=0) + #定义运算场所 + place = fluid.CPUPlace() + #创建执行器 + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + #创建LoDTensor + x_d = fluid.create_lod_tensor(np.array([[1.1], [2.2],[3.3],[4.4]]).astype('float32'), [[1,3]], place) + y_d = fluid.create_lod_tensor(np.array([[1.1],[1.1],[1.1],[1.1],[1.1],[1.1]]).astype('float32'), [[1,3], [1,2,1,2]], place) + #开始计算 + results = exe.run(fluid.default_main_program(), + feed={'x':x_d, 'y': y_d }, + fetch_list=[out],return_numpy=False) + #输出执行结果 + print("The data of the result: {}.".format(np.array(results[0]))) + #输出 result 的序列长度 + print("The recursive sequence lengths of the result: {}.".format(results[0].recursive_sequence_lengths())) + #输出 result 的 LoD + print("The LoD of the result: {}.".format(results[0].lod())) + + +FAQ: +======= + +问:如何打印variable的lod 信息 + +答: + +1. 可以使用 `executor.run` 将你需要查看的 `variable` fetch 出来,然后打印其 lod 信息,注意运行时设置 `executor.run` 方法的 `return_numpy` 参数为 `False`。 + + .. code-block:: python + + results = exe.run(fluid.default_main_program(), + feed={'x':x_d, 'y': y_d }, + fetch_list=[out],return_numpy=False) + lod_tensor = results[0] + print (lod_tensor.lod()) + +2. 可以使用fluid.layers.Print() + + .. code-block:: python + + y = fluid.layers.data(name='y', shape=[1], dtype='float32', lod_level=2) + + fluid.layers.Print(y) + + +总结 +======== + +至此,相信您已经基本掌握了LoD-Tensor的概念,尝试修改上述代码中的 x_d 与 y_d,观察输出结果,有助于您更好的理解这一灵活的结构。 + +更多LoDTensor的模型应用,可以参考新手入门中的 `词向量 <../../../beginners_guide/basics/word2vec/index.html>`_ 、`个性化推荐 <../../../beginners_guide/basics/recommender_system/index.html>`_、`情感分析 <../../../beginners_guide/basics/understand_sentiment/index.html>`_ 等指导教程。 + +更高阶的应用案例,请参考 `模型库 <../../../user_guides/models/index_cn.html>`_ 中的相关内容。 diff --git a/doc/paddle/beginners_guide/basic_concept/lod_tensor_en.rst b/doc/paddle/beginners_guide/basic_concept/lod_tensor_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..a5702fe7bb878f5d7cfe0c5a97c34b498c18439a --- /dev/null +++ b/doc/paddle/beginners_guide/basic_concept/lod_tensor_en.rst @@ -0,0 +1,373 @@ +.. _user_guide_lod_tensor: + +##################### +LoD-Tensor User Guide +##################### + +LoD(Level-of-Detail) Tensor is a unique term in Fluid, which can be constructed by appending sequence information to Tensor. Data transferred in Fluid contain input, output and learnable parameters of the network, all of which are represented by LoD-Tensor. + +With the help of this user guide, you will learn the design idea of LoD-Tensor in Fluid so that you can use such a data type more flexibly. + +Challenge of variable-length sequences +====================================== + +In most deep learning frameworks, a mini-batch is represented by Tensor. + +For example, if there are 10 pictures in a mini-batch and the size of each picture is 32*32, the mini-batch will be a 10*32*32 Tensor. + +Or in the NLP task, there are N sentences in a mini-batch and the length of each sentence is L. Every word is represented by a one-hot vector with D dimensions. Then the mini-batch can be represented by an N*L*D Tensor. + +In the two examples above, the size of each sequence element remains the same. However, the data to be trained are variable-length sequences in many cases. For this scenario, method to be taken in most frameworks is to set a fixed length and sequence data shorter than the fixed length will be padded with 0 to reach the fixed length. + +Owing to the LoD-Tensor in Fluid, it is not necessary to keep the lengths of sequence data in every mini-batch constant.Therefore tasks sensitive to sequence formats like NLP can also be finished without padding. + +Index Data Structure (LoD) is introduced to Fluid to split Tensor into sequences. + +Index Structure - LoD +====================== + +To have a better understanding of the concept of LoD, you can refer to the examples in this section. + +**mini-batch consisting of sentences** + +Suppose a mini-batch contains three sentences, and each contains 3, 1, 2 words respectively. Then the mini-batch can be represented by a (3+1+2)*D Tensor with some index information appended: + +.. code-block :: text + + 3 1 2 + | | | | | | + +In the text above, each :code:`|` represents a word vector with D dimension and a 1-level LoD is made up of digits 3,1,2 . + +**recursive sequence** + +Take a 2-level LoD-Tensor for example, a mini-batch contains articles of 3 sentences, 1 sentence and 2 sentences. The number of words in every sentence is different. Then the mini-batch is formed as follows: + +.. code-block:: text + + + 3 1 2 + 3 2 4 1 2 3 + ||| || |||| | || ||| + + +the LoD to express the format: + +.. code-block:: text + + [[3,1,2]/*level=0*/,[3,2,4,1,2,3]/*level=1*/] + + +**mini-batch consisting of video data** + +In the task of computer vision, it usually needs to deal objects with high dimension like videos and pictures. Suppose a mini-batch contains 3 videos, which is composed of 3 frames, 1 frames, 2 frames respectively. The size of each frame is 640*480. Then the mini-batch can be described as: + +.. code-block:: text + + 3 1 2 + 口口口 口 口口 + + +The size of the tensor at the bottom is (3+1+2)*640*480. Every :code:`口` represents a 640*480 picture. + +**mini-batch consisting of pictures** + +Traditionally, for a mini-batch of N pictures with fixed size, LoD-Tensor is described as: + +.. code-block:: text + + 1 1 1 1 1 + 口口口口 ... 口 + +Under such circumstance, we will consider LoD-Tensor as a common tensor instead of ignoring information because of the indices of all elements are 1. + +.. code-block:: text + + 口口口口 ... 口 + +**model parameter** + +model parameter is a common tensor which is described as a 0-level LoD-Tensor in Fluid. + +LoDTensor expressed by offset +============================= + +To have a quick access to the original sequence, you can take the offset expression method——store the first and last element of a sequence instead of its length. + +In the example above, you can compute the length of fundamental elements: + +.. code-block:: text + + 3 2 4 1 2 3 + +It is expressed by offset as follows: + +.. code-block:: text + + 0 3 5 9 10 12 15 + = = = = = = + 3 2+3 4+5 1+9 2+10 3+12 + +Therefore we infer that the first sentence starts from word 0 to word 3 and the second sentence starts from word 3 to word 5. + +Similarly, for the length of the top layer of LoD + +.. code-block:: text + + 3 1 2 + +It can be expressed by offset: + +.. code-block:: text + + 0 3 4 6 + = = = + 3 3+1 4+2 + +Therefore the LoD-Tensor is expressed by offset: + +.. code-block:: text + + 0 3 4 6 + 3 5 9 10 12 15 + + +LoD-Tensor +============= +A LoD-Tensor can be regarded as a tree of which the leaf is an original sequence element and branch is the flag of fundamental element. + +There are two ways to express sequence information of LoD-Tensor in Fluid: primitive length and offset. LoD-Tensor is expressed by offset in Paddle to offer a quicker access to sequence;LoD-Tensor is expressed by primitive length in python API to make user understand and compute more easily. The primary length is named as :code:`recursive_sequence_lengths` . + +Take a 2-level LoD-Tensor mentioned above as an example: + +.. code-block:: text + + 3 1 2 + 3 2 4 1 2 3 + ||| || |||| | || ||| + +- LoD-Tensor expressed by offset: [ [0,3,4,6] , [0,3,5,9,10,12,15] ] +- LoD-Tensor expressed by primitive length: recursive_sequence_lengths=[ [3-0 , 4-3 , 6-4] , [3-0 , 5-3 , 9-5 , 10-9 , 12-10 , 15-12] ] + + +Take text sequence as an example,[3,1,2] indicates there are 3 articles in the mini-batch,which contains 3,1,2 sentences respectively.[3,2,4,1,2,3] indicates there are 3,2,4,1,2,3 words in sentences respectively. + +recursive_seq_lens is a double Layer nested list, and in other words, the element of the list is list. The size of the outermost list represents the nested layers, namely the size of lod-level; Each inner list represents the size of each element in each lod-level. + +The following three pieces of codes introduce how to create LoD-Tensor, how to transform LoD-Tensor to Tensor and how to transform Tensor to LoD-Tensor respectively: + + * Create LoD-Tensor + +.. code-block:: python + + #Create lod-tensor + import paddle.fluid as fluid + import numpy as np + + a = fluid.create_lod_tensor(np.array([[1],[1],[1], + [1],[1], + [1],[1],[1],[1], + [1], + [1],[1], + [1],[1],[1]]).astype('int64') , + [[3,1,2] , [3,2,4,1,2,3]], + fluid.CPUPlace()) + + #Check lod-tensor nested layers + print (len(a.recursive_sequence_lengths())) + # output:2 + + #Check the number of the most fundamental elements + print (sum(a.recursive_sequence_lengths()[-1])) + # output:15 (3+2+4+1+2+3=15) + +* Transform LoD-Tensor to Tensor + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + # create LoD-Tensor + a = fluid.create_lod_tensor(np.array([[1.1], [2.2],[3.3],[4.4]]).astype('float32'), [[1,3]], fluid.CPUPlace()) + + def LodTensor_to_Tensor(lod_tensor): + # get lod information of LoD-Tensor + lod = lod_tensor.lod() + # transform into array + array = np.array(lod_tensor) + new_array = [] + # transform to Tensor according to the layer information of the original LoD-Tensor + for i in range(len(lod[0]) - 1): + new_array.append(array[lod[0][i]:lod[0][i + 1]]) + return new_array + + new_array = LodTensor_to_Tensor(a) + + # output the result + print(new_array) + + * Transform Tensor to LoD-Tensor + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def to_lodtensor(data, place): + # save the length of Tensor as LoD information + seq_lens = [len(seq) for seq in data] + cur_len = 0 + lod = [cur_len] + for l in seq_lens: + cur_len += l + lod.append(cur_len) + # decrease the dimention of transformed Tensor + flattened_data = np.concatenate(data, axis=0).astype("float32") + flattened_data = flattened_data.reshape([len(flattened_data), 1]) + # add lod information to Tensor data + res = fluid.LoDTensor() + res.set(flattened_data, place) + res.set_lod([lod]) + return res + + # new_array is the transformed Tensor above + lod_tensor = to_lodtensor(new_array,fluid.CPUPlace()) + + # output LoD information + print("The LoD of the result: {}.".format(lod_tensor.lod())) + + # examine the consistency with Tensor data + print("The array : {}.".format(np.array(lod_tensor))) + + + + + +Code examples +============== + +Input variable x is expanded according to specified layer level y-lod in the code example in this section. The example below contains some fundamental conception of LoD-Tensor. By following the code, you will + +- Have a direct understanding of the implementation of :code:`fluid.layers.sequence_expand` in Fluid +- Know how to create LoD-Tensor in Fluid +- Learn how to print the content of LoDTensor + + + +**Define the Process of Computing** + +layers.sequence_expand expands x by obtaining the lod value of y. About more explanation of :code:`fluid.layers.sequence_expand` , please read :ref:`api_fluid_layers_sequence_expand` first. + +Code of sequence expanding: + +.. code-block:: python + + x = fluid.layers.data(name='x', shape=[1], dtype='float32', lod_level=1) + y = fluid.layers.data(name='y', shape=[1], dtype='float32', lod_level=2) + out = fluid.layers.sequence_expand(x=x, y=y, ref_level=0) + +*Note*:The dimension of input LoD-Tensor is only associated with the dimension of real data transferred in. The shape value set for x and y in the definition of network structure is just a placeholder with little influence on the result. + +**Create Executor** + +.. code-block:: python + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + +**Prepare Data** + +Here we use :code:`fluid.create_lod_tensor` to create the input data of :code:`sequence_expand` and expand x_d by defining LoD of y_d. The output value is only associated with LoD of y_d. And the data of y_d is not invovled in the process of computation. The dimension of y_d must keep consistent with as its LoD[-1] . + +About the user guide of :code:`fluid.create_lod_tensor()` , please refer to :ref:`api_fluid_create_lod_tensor` . + +Code: + +.. code-block:: python + + x_d = fluid.create_lod_tensor(np.array([[1.1],[2.2],[3.3],[4.4]]).astype('float32'), [[1,3]], place) + y_d = fluid.create_lod_tensor(np.array([[1.1],[1.1],[1.1],[1.1],[1.1],[1.1]]).astype('float32'), [[1,3], [2,1,2,1]],place) + + +**Execute Computing** + +For tensor whose LoD > 1 in Fluid, like data of other types, the order of transfering data is defined by :code:`feed` . In addition, parameter :code:`return_numpy=False` needs to be added to exe.run() to get the output of LoD-Tensor because results are Tensors with LoD information. + +.. code-block:: python + + results = exe.run(fluid.default_main_program(), + feed={'x':x_d, 'y': y_d }, + fetch_list=[out],return_numpy=False) + +**Check the result of LodTensor** + +Because of the special attributes of LoDTensor, you could not print to check the content. The usual solution to the problem is to fetch the LoDTensor as the output of network and then execute numpy.array(lod_tensor) to transfer LoDTensor into numpy array: + +.. code-block:: python + + np.array(results[0]) + +Output: + +.. code-block:: text + + array([[1.1],[2.2],[3.3],[4.4],[2.2],[3.3],[4.4],[2.2],[3.3],[4.4]]) + +**Check the length of sequence** + +You can get the recursive sequence length of LoDTensor by checking the sequence length: + +.. code-block:: python + + results[0].recursive_sequence_lengths() + +Output + +.. code-block:: text + + [[1L, 3L, 3L, 3L]] + +**Complete Code** + +You can check the output by executing the following complete code: + +.. code-block:: python + + #Load + import paddle + import paddle.fluid as fluid + import numpy as np + #Define forward computation + x = fluid.layers.data(name='x', shape=[1], dtype='float32', lod_level=1) + y = fluid.layers.data(name='y', shape=[1], dtype='float32', lod_level=2) + out = fluid.layers.sequence_expand(x=x, y=y, ref_level=0) + #Define place for computation + place = fluid.CPUPlace() + #Create executer + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + #Create LoDTensor + x_d = fluid.create_lod_tensor(np.array([[1.1], [2.2],[3.3],[4.4]]).astype('float32'), [[1,3]], place) + y_d = fluid.create_lod_tensor(np.array([[1.1],[1.1],[1.1],[1.1],[1.1],[1.1]]).astype('float32'), [[1,3], [1,2,1,2]], place) + #Start computing + results = exe.run(fluid.default_main_program(), + feed={'x':x_d, 'y': y_d }, + fetch_list=[out],return_numpy=False) + #Output result + print("The data of the result: {}.".format(np.array(results[0]))) + #print the length of sequence of result + print("The recursive sequence lengths of the result: {}.".format(results[0].recursive_sequence_lengths())) + #print the LoD of result + print("The LoD of the result: {}.".format(results[0].lod())) + + +Summary +======== + +Then, we believe that you have known about the concept LoD-Tensor. And an attempt to change x_d and y_d in code above and then to check the output may help you get a better understanding of this flexible structure. + +About more model applications of LoDTensor, you can refer to `Word2vec <../../../beginners_guide/basics/word2vec/index_en.html>`_ , `Individual Recommendation <../../../beginners_guide/basics/recommender_system/index_en.html>`_ , `Sentiment Analysis <../../../beginners_guide/basics/understand_sentiment/index_en.html>`_ in the beginner's guide. + +About more difffiult and complex examples of application, please refer to associated information about `models <../../../user_guides/models/index_en.html>`_ . diff --git a/doc/paddle/beginners_guide/basic_concept/operator.rst b/doc/paddle/beginners_guide/basic_concept/operator.rst new file mode 100644 index 0000000000000000000000000000000000000000..2d7199380429d5fc482ffcd6178a38e0a221d051 --- /dev/null +++ b/doc/paddle/beginners_guide/basic_concept/operator.rst @@ -0,0 +1,57 @@ +.. _cn_user_guide_Operator: + +======= +Operator +======= + +在飞桨(PaddlePaddle,以下简称Paddle)中,所有对数据的操作都由Operator表示 + +为了便于用户使用,在Python端,Paddle中的Operator被封装入 :code:`paddle.fluid.layers` , :code:`paddle.fluid.nets` 等模块。 + +因为一些常见的对Tensor的操作可能是由更多基础操作构成,为了提高使用的便利性,框架内部对基础 Operator 进行了一些封装,包括创建 Operator 依赖可学习参数,可学习参数的初始化细节等,减少用户重复开发的成本。 + +例如用户可以利用 :code:`paddle.fluid.layers.elementwise_add()` 实现两个输入Tensor的加法运算: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + + a = fluid.data(name="a",shape=[1],dtype='float32') + b = fluid.data(name="b",shape=[1],dtype='float32') + + result = fluid.layers.elementwise_add(a,b) + + # 定义执行器,并且制定执行的设备为CPU + cpu = fluid.core.CPUPlace() + exe = fluid.Executor(cpu) + exe.run(fluid.default_startup_program()) + + x = numpy.array([5]).astype("float32") + y = numpy.array([7]).astype("float32") + + outs = exe.run( + feed={'a':x,'b':y}, + fetch_list=[result]) + # 打印输出结果,[array([12.], dtype=float32)] + print( outs ) + + +如果想获取网络执行过程中的a,b的具体值,可以将希望查看的变量添加在fetch_list中。 + +.. code-block:: python + + #执行计算 + outs = exe.run( + feed={'a':x,'b':y}, + fetch_list=[a,b,result]) + #查看输出结果 + print( outs ) + + +输出结果: + +.. code-block:: python + + [array([5.], dtype=float32), array([7.], dtype=float32), array([12.], dtype=float32)] + diff --git a/doc/paddle/beginners_guide/basic_concept/program.rst b/doc/paddle/beginners_guide/basic_concept/program.rst new file mode 100644 index 0000000000000000000000000000000000000000..78b174a2dcbaaae096b0f7716e3ca915937ded54 --- /dev/null +++ b/doc/paddle/beginners_guide/basic_concept/program.rst @@ -0,0 +1,51 @@ +.. _cn_user_guide_Program: + +======= +Program +======= + + +飞桨(PaddlePaddle,以下简称Paddle)用Program的形式动态描述整个计算过程。这种描述方式,兼具网络结构修改的灵活性和模型搭建的便捷性,在保证性能的同时极大地提高了框架对模型的表达能力。 + +用户定义Operator会被顺序的放入Program中,在网络搭建过程中,由于不能使用python 的控制流,Paddle通过同时提供分支和循环两类控制流op结构的支持,让用户可以通过组合描述任意复杂的模型。 + +**顺序执行:** + +用户可以使用顺序执行的方式搭建网络: + +.. code-block:: python + + x = fluid.data(name='x',shape=[None, 13], dtype='float32') + y_predict = fluid.layers.fc(input=x, size=1, act=None) + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + + +**条件分支——switch、if else:** + +Fluid 中有 switch 和 if-else 类来实现条件选择,用户可以使用这一执行结构在学习率调节器中调整学习率或其他希望的操作: + +.. code-block:: python + lr = fluid.layers.tensor.create_global_var( + shape=[1], + value=0.0, + dtype='float32', + persistable=True, + name="learning_rate") + + one_var = fluid.layers.fill_constant( + shape=[1], dtype='float32', value=1.0) + two_var = fluid.layers.fill_constant( + shape=[1], dtype='float32', value=2.0) + + with fluid.layers.control_flow.Switch() as switch: + with switch.case(global_step == zero_var): + fluid.layers.tensor.assign(input=one_var, output=lr) + with switch.default(): + fluid.layers.tensor.assign(input=two_var, output=lr) + + + +关于 Padldle 中 Program 的详细设计思想,可以参考阅读 `Fluid设计思想 <../../advanced_guide/addon_development/design_idea/fluid_design_idea.html>`_ 。 + +更多 Paddle 中的控制流,可以参考阅读 `API文档 <../../../api_guides/low_level/layers/control_flow.html>`_ 。 diff --git a/doc/paddle/beginners_guide/basic_concept/programming_guide/programming_guide.md b/doc/paddle/beginners_guide/basic_concept/programming_guide/programming_guide.md new file mode 100644 index 0000000000000000000000000000000000000000..cc010e7e00a2c7015777dec6870d1340b482248b --- /dev/null +++ b/doc/paddle/beginners_guide/basic_concept/programming_guide/programming_guide.md @@ -0,0 +1,331 @@ + +# 编程指南 + +目前飞桨(PaddlePaddle,以下简称Paddle)已经同时支持命令式编程模式(动态图)和声明式编程模式(静态图)两种编程方式, +本文主要侧重于介绍声明式编程模式的编程方法,关于命令式编程模式编程方法,请参考[命令式编程模式机制-DyGraph](../dygraph/DyGraph.html)。 + +阅读完本文档,您将了解在Paddle声明式编程模式编程方式中,如何表示和定义数据变量,以及如何完整的组建一个深度学习网络并进行训练。 + +## 数据的表示和定义 + +Paddle和其他主流框架一样,使用Tensor数据结构来承载数据,包括模型中的可学习参数(如网络权重、偏置等), +网络中每一层的输入输出数据,常量数据等。 + +Tensor可以简单理解成一个多维数组,一般而言可以有任意多的维度。 +不同的Tensor可以具有自己的数据类型和形状,同一Tensor中每个元素的数据类型是一样的, +Tensor的形状就是Tensor的维度。关于Tensor的详细介绍请参阅:[Tensor](../tensor.html) 。 + +在Paddle中我们使用 `fluid.data` 来创建数据变量, `fluid.data` 需要指定Tensor的形状信息和数据类型, +当遇到无法确定的维度时,可以将相应维度指定为None,如下面的代码片段所示: + +```python +import paddle.fluid as fluid + +# 定义一个数据类型为int64的二维数据变量x,x第一维的维度为3,第二个维度未知,要在程序执行过程中才能确定,因此x的形状可以指定为[3, None] +x = fluid.data(name="x", shape=[3, None], dtype="int64") + +# 大多数网络都会采用batch方式进行数据组织,batch大小在定义时不确定,因此batch所在维度(通常是第一维)可以指定为None +batched_x = fluid.data(name="batched_x", shape=[None, 3, None], dtype='int64') +``` + +除 `fluid.data` 之外,我们还可以使用 `fluid.layers.fill_constant` 来创建常量, +如下代码将创建一个维度为[3, 4], 数据类型为int64的Tensor,其中所有元素均为16(value参数所指定的值)。 + +```python +import paddle.fluid as fluid +data = fluid.layers.fill_constant(shape=[3, 4], value=16, dtype='int64') +``` + +以上例子中,我们只使用了一种数据类型"int64",即有符号64位整数数据类型,更多Paddle目前支持的数据类型请查看:[支持的数据类型](../../../advanced_guide/data_preparing/feeding_data.html#fluid)。 + +需要注意的是,在声明式编程模式编程方式中,上述定义的Tensor并不具有值(即使创建常量的时候指定了value), +它们仅表示将要执行的操作,在网络执行时(训练或者预测)才会进行真正的赋值操作, +如您直接打印上例代码中的data将会得对其信息的描述: + +```python +print data +``` +输出结果: + +``` +name: "fill_constant_0.tmp_0" +type { + type: LOD_TENSOR + lod_tensor { + tensor { + data_type: INT64 + dims: 3 + dims: 4 + } + } +} +persistable: false +``` + +在网络执行过程中,获取Tensor数值有两种方式:方式一是利用 `paddle.fluid.layers.Print` 创建一个打印操作, +打印正在访问的Tensor。方式二是将Variable添加在fetch_list中。 + +方式一的代码实现如下所示: + +```python +import paddle.fluid as fluid + +data = fluid.layers.fill_constant(shape=[3, 4], value=16, dtype='int64') +data = fluid.layers.Print(data, message="Print data:") + +place = fluid.CPUPlace() +exe = fluid.Executor(place) +exe.run(fluid.default_startup_program()) + +ret = exe.run() +``` + +运行时的输出结果: + +``` +1571742368 Print data: The place is:CPUPlace +Tensor[fill_constant_0.tmp_0] + shape: [3,4,] + dtype: x + data: 16,16,16,16,16,16,16,16,16,16,16,16, +``` + +方式二Fetch_list的详细过程会在后文展开描述。 + +## 数据读取 + +使用 `fluid.data` 创建数据变量之后,我们需要把网络执行所需要的数据读取到对应变量中, +具体的数据准备过程,请阅读[准备数据](../../../advanced_guide/data_preparing/index_cn.html)。 + +## 组建网络 + +在Paddle中,数据计算类API统一称为Operator(算子),简称OP,大多数OP在 `paddle.fluid.layers` 模块中提供。 + +例如用户可以利用 `paddle.fluid.layers.elementwise_add()` 实现两个输入Tensor的加法运算: + +```python +# 定义变量 +import paddle.fluid as fluid +a = fluid.data(name="a", shape=[None, 1], dtype='int64') +b = fluid.data(name="b", shape=[None, 1], dtype='int64') + +# 组建网络(此处网络仅由一个操作构成,即elementwise_add) +result = fluid.layers.elementwise_add(a,b) + +# 准备运行网络 +cpu = fluid.CPUPlace() # 定义运算设备,这里选择在CPU下训练 +exe = fluid.Executor(cpu) # 创建执行器 +exe.run(fluid.default_startup_program()) # 网络参数初始化 + +# 读取输入数据 +import numpy +data_1 = int(input("Please enter an integer: a=")) +data_2 = int(input("Please enter an integer: b=")) +x = numpy.array([[data_1]]) +y = numpy.array([[data_2]]) + +# 运行网络 +outs = exe.run( + feed={'a':x, 'b':y}, # 将输入数据x, y分别赋值给变量a,b + fetch_list=[result] # 通过fetch_list参数指定需要获取的变量结果 + ) + +# 输出计算结果 +print "%d+%d=%d" % (data_1,data_2,outs[0][0]) +``` + +输出结果: +``` +Please enter an integer: a=7 +Please enter an integer: b=3 +7+3=10 +``` + +本次运行时,输入a=7,b=3,得到outs=10。 + +您可以复制这段代码在本地执行,根据指示输入其他数值观察计算结果。 + +如果想获取网络执行过程中的a,b的具体值,可以将希望查看的变量添加在fetch_list中。 + +```python +... +# 运行网络 +outs = exe.run( + feed={'a':x, 'b':y}, # 将输入数据x, y分别赋值给变量a,b + fetch_list=[a, b, result] # 通过fetch_list参数指定需要获取的变量结果 + ) + +# 输出计算结果 +print outs +``` + +输出结果: +``` +[array([[7]]), array([[3]]), array([[10]])] +``` + +## 组建更加复杂的网络 + +某些场景下,用户需要根据当前网络中的某些状态,来具体决定后续使用哪一种操作,或者重复执行某些操作。在命令式编程模式中,可以方便的使用Python的控制流语句(如for,if-else等)来进行条件判断,但是在声明式编程模式中,由于组网阶段并没有实际执行操作,也没有产生中间计算结果,因此无法使用Python的控制流语句来进行条件判断,为此声明式编程模式提供了多个控制流API来实现条件判断。这里以[fluid.layers.while_loop](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/layers_cn/while_loop_cn.html)为例来说明如何在声明式编程模式中实现条件循环的操作。 + +while_loop API用于实现类似while/for的循环控制功能,使用一个callable的方法cond作为参数来表示循环的条件,只要cond的返回值为True,while_loop就会循环执行循环体body(也是一个callable的方法),直到 cond 的返回值为False。对于while_loop API的详细定义和具体说明请参考文档[fluid.layers.while_loop](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/layers_cn/while_loop_cn.html)。 + +下面的例子中,使用while_loop API进行条件循环操作,其实现的功能相当于在python中实现如下代码: + +```python +i = 0 +ten = 10 +while i < ten: + i = i + 1 +print('i =', i) +``` + +在声明式编程模式中使用while_loop API实现以上代码的逻辑: + +```python +# 该代码要求安装飞桨1.7+版本 + +# 该示例代码展示整数循环+1,循环10次,输出计数结果 +import paddle.fluid as fluid +import paddle.fluid.layers as layers + +# 定义cond方法,作为while_loop的判断条件 +def cond(i, ten): + return i < ten + +# 定义body方法,作为while_loop的执行体,只要cond返回值为True,while_loop就会一直调用该方法进行计算 +# 由于在使用while_loop OP时,cond和body的参数都是由while_loop的loop_vars参数指定的,所以cond和body必须有相同数量的参数列表,因此body中虽然只需要i这个参数,但是仍然要保持参数列表个数为2,此处添加了一个dummy参数来进行"占位" +def body(i, dummy): + # 计算过程是对输入参数i进行自增操作,即 i = i + 1 + i = i + 1 + return i, dummy + +i = layers.fill_constant(shape=[1], dtype='int64', value=0) # 循环计数器 +ten = layers.fill_constant(shape=[1], dtype='int64', value=10) # 循环次数 +out, ten = layers.while_loop(cond=cond, body=body, loop_vars=[i, ten]) # while_loop的返回值是一个tensor列表,其长度,结构,类型与loop_vars相同 + +exe = fluid.Executor(fluid.CPUPlace()) +res = exe.run(fluid.default_main_program(), feed={}, fetch_list=out) +print(res) #[array([10])] + +``` + +限于篇幅,上面仅仅用一个最简单的例子来说明如何在声明式编程模式中实现循环操作。循环操作在很多应用中都有着重要作用,比如NLP中常用的Transformer模型,在解码(生成)阶段的Beam Search算法中,需要使用循环操作来进行候选的选取与生成,可以参考[Transformer](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/PaddleMT/transformer)模型的实现来进一步学习while_loop在复杂场景下的用法。 + +除while_loop之外,飞桨还提供fluid.layers.cond API来实现条件分支的操作,以及fluid.layers.switch_case和fluid.layers.case API来实现分支控制功能,具体用法请参考文档:[cond](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/layers_cn/cond_cn.html),[switch_case](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/layers_cn/switch_case_cn.html)和[case](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/layers_cn/case_cn.html#case) + +## 一个完整的网络示例 + +一个典型的模型通常包含4个部分,分别是:输入数据定义,搭建网络(模型前向计算逻辑),定义损失函数,以及选择优化算法。 + +下面我们通过一个非常简单的数据预测网络(线性回归),来完整的展示如何使用Paddle声明式编程模式方式完成一个深度学习模型的组建和训练。 + +问题描述:给定一组数据 $$,求解出函数 $f$,使得 $y=f(x)$,其中$X$,$Y$均为一维张量。最终网络可以依据输入$x$,准确预测出$y_{\_predict}$。 + +1. 定义数据 + + 假设输入数据X=[1 2 3 4],Y=[2 4 6 8],在网络中定义: + + ```python + # 定义X数值 + train_data=numpy.array([[1.0], [2.0], [3.0], [4.0]]).astype('float32') + # 定义期望预测的真实值y_true + y_true = numpy.array([[2.0], [4.0], [6.0], [8.0]]).astype('float32') + ``` + +2. 搭建网络(定义前向计算逻辑) + + 接下来需要定义预测值与输入的关系,本次使用一个简单的线性回归函数进行预测: + + ```python + # 定义输入数据类型 + x = fluid.data(name="x", shape=[None, 1], dtype='float32') + y = fluid.data(name="y", shape=[None, 1], dtype='float32') + # 搭建全连接网络 + y_predict = fluid.layers.fc(input=x, size=1, act=None) + ``` + +3. 添加损失函数 + + 完成模型搭建后,如何评估预测结果的好坏呢?我们通常在设计的网络中添加损失函数,以计算真实值与预测值的差。 + + 在本例中,损失函数采用[均方差函数](https://en.wikipedia.org/wiki/Mean_squared_error): + + ```python + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + ``` + +4. 网络优化 + + 确定损失函数后,可以通过前向计算得到损失值,并根据损失值对网络参数进行更新,最简单的算法是随机梯度下降法:w=w−η⋅g,由 `fluid.optimizer.SGD` 实现: + + ```python + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.01) + sgd_optimizer.minimize(avg_cost) + + ``` + + 让我们的网络训练100次,查看结果: + + ```python + # 加载库 + import paddle.fluid as fluid + import numpy + + # 定义输入数据 + train_data=numpy.array([[1.0],[2.0],[3.0],[4.0]]).astype('float32') + y_true = numpy.array([[2.0],[4.0],[6.0],[8.0]]).astype('float32') + + # 组建网络 + x = fluid.data(name="x",shape=[None, 1],dtype='float32') + y = fluid.data(name="y",shape=[None, 1],dtype='float32') + y_predict = fluid.layers.fc(input=x,size=1,act=None) + + # 定义损失函数 + cost = fluid.layers.square_error_cost(input=y_predict,label=y) + avg_cost = fluid.layers.mean(cost) + + # 选择优化方法 + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.01) + sgd_optimizer.minimize(avg_cost) + + # 网络参数初始化 + cpu = fluid.CPUPlace() + exe = fluid.Executor(cpu) + exe.run(fluid.default_startup_program()) + + # 开始训练,迭代100次 + for i in range(100): + outs = exe.run( + feed={'x':train_data, 'y':y_true}, + fetch_list=[y_predict, avg_cost]) + + # 输出训练结果 + print outs + ``` + + 输出结果: + ``` + [array([[2.2075021], + [4.1005487], + [5.9935956], + [7.8866425]], dtype=float32), array([0.01651453], dtype=float32)] + ``` + + 可以看到100次迭代后,预测值已经非常接近真实值了,损失值也下降到了0.0165。 + + 恭喜您!已经成功完成了第一个简单网络的搭建,想尝试线性回归的进阶版——房价预测模型,请阅读:[线性回归](../../../user_guides/simple_case/fit_a_line/README.cn.html)。更多丰富的模型实例可以在[典型案例](../../../user_guides/index_cn.html)中找到。 + + +## 进一步学习 + +如果您已经掌握了基本操作,可以进行下一阶段的学习了: + +跟随这一教程将学习到如何对实际问题建模并使用Paddle构建模型:[配置简单的网络](../../coding_practice/configure_simple_model/index_cn.html)。 + +完成网络搭建后,可以开始在单机上训练您的网络了,详细步骤请参考[单机训练](../../coding_practice/single_node.html)。 + +除此之外,使用文档模块根据开发者的不同背景划分了三个学习阶段:[快速上手](../../index_cn.html)、[典型案例](../../../user_guides/index_cn.html)和[进阶指南](../../../advanced_guide/index_cn.html)。 + +如果您希望阅读更多场景下的应用案例,可以参考[典型案例](../../../user_guides/index_cn.html)。已经具备深度学习基础知识的用户,也可以从[进阶指南](../../../advanced_guide/index_cn.html)开始阅读。 diff --git a/doc/paddle/beginners_guide/basic_concept/programming_guide/programming_guide_en.md b/doc/paddle/beginners_guide/basic_concept/programming_guide/programming_guide_en.md new file mode 100644 index 0000000000000000000000000000000000000000..13b19eceb8e6fe78037c9f7288cd662608aae5d1 --- /dev/null +++ b/doc/paddle/beginners_guide/basic_concept/programming_guide/programming_guide_en.md @@ -0,0 +1,452 @@ + +# Guide to Fluid Programming + +This document will instruct you to program and create a simple nueral network with Fluid API. From this guide, you will get the hang of: + +- Core concepts of Fluid +- How to define computing process in Fluid +- How to run fluid operators with executor +- How to model practical problems logically +- How to call API(layers, datasets, loss functions, optimization methods and so on) + +Before building model, you need to figure out several core concepts of Fluid at first: + +## Express data with Tensor + +Like other mainstream frameworks, Fluid uses Tensor to hold data. + +All data transferred in neural network are Tensor which can simply be regarded as a multi-dimensional array. In general, the number of dimensions can be any. Tensor features its own data type and shape. Data type of each element in single Tensor is the same. And **the shape of Tensor** refers to the dimensions of Tensor. + +Picture below visually shows Tensor with dimension from one to six: +

+ +

+ + +There are three special kinds of Tensor in Fluid: + +**1. Learnable parameters of models** + +The lifetime of learnable parameters (such as network weight, bias and so on) of model is equal to the time of training task. The parameters will be updated by optimization algorithms. We use Parameter, the derived class of Variable, to express parameters. + +We can create learnable parameters with `fluid.layers.create_parameter` in Fluid: + +```python +w = fluid.layers.create_parameter(name="w",shape=[1],dtype='float32') +``` + + +In general, you don't need to explicitly create learnable parameters of network. Fluid encapsulates most fundamental computing modules in common networks. Take the fully connected model as a simplest example, The codes below create connection weight(W) and bias(bias) for fully connected layer with no need to explicitly call associated APIs of Parameter. + +```python +import paddle.fluid as fluid +y = fluid.layers.fc(input=x, size=128, bias_attr=True) +``` + + +**2. Input and Output Tensor** + +The input data of the whole neural network is also a special Tensor in which the sizes of some dimensions can not be decided at the definition time of models. Such dimensions usually includes batch size, or width and height of image when such data formats in a mini-batch are not constant. Placeholders for these uncertain dimension are necessary at the definition phase of model. + + +`fluid.layers.data` is used to receive input data in Fluid, and it needs to be provided with the shape of input Tensor. When the shape is not certain, the correspondent dimension is defined as None. + +The code below exemplifies the usage of `fluid.layers.data` : + +```python +import paddle.fluid as fluid + +#Define the dimension of x : [3,None]. What we could make sure is that the first dimension of x is 3. +#The second dimension is unknown and can only be known at runtime. +x = fluid.layers.data(name="x", shape=[3,None], dtype="int64") + +#batch size doesn't have to be defined explicitly. +#Fluid will automatically assign zeroth dimension as batch size dimension and fill right number at runtime. +a = fluid.layers.data(name="a",shape=[3,4],dtype='int64') + +#If the width and height of image are variable, we can define the width and height as None. +#The meaning of three dimensions of shape is channel, width of image, height of image respectively. +b = fluid.layers.data(name="image",shape=[3,None,None],dtype="float32") +``` + +dtype=“int64” indicates signed int 64 bits data. For more data types supported by Fluid, please refer to [Data types currently supported by Fluid](../../user_guides/howto/prepare_data/feeding_data_en.html#fluid). + +**3. Constant Tensor** + +`fluid.layers.fill_constant` is used to define constant Tensor in Fluid. You can define the shape, data type and value of Constant Tensor. Code is as follows: + +```python +import paddle.fluid as fluid +data = fluid.layers.fill_constant(shape=[1], value=0, dtype='int64') +``` + +Notice that the tensor defined above is not assigned with values. It merely represents the operation to perform. If you print data directly, you will get information about the description of this data: + +```python +print data +``` +Output: + +``` +name: "fill_constant_0.tmp_0" +type { + type: LOD_TENSOR + lod_tensor { + tensor { + data_type: INT64 + dims: 1 + } + } +} +persistable: false +``` + +Specific output value will be shown at the runtime of Executor. There are two ways to get runtime Variable value. The first way is to use `paddle.fluid.layers.Print` to create a print op that will print the tensor being accessed. The second way is to add Variable to Fetch_list. + +Code of the first way is as follows: + +```python +import paddle.fluid as fluid +data = fluid.layers.fill_constant(shape=[1], value=0, dtype='int64') +data = fluid.layers.Print(data, message="Print data: ") +``` + +Output at the runtime of Executor: + +``` +1563874307 Print data: The place is:CPUPlace +Tensor[fill_constant_0.tmp_0] + shape: [1,] + dtype: x + data: 0, +``` + +For more information on how to use the Print API, please refer to [Print operator](https://www.paddlepaddle.org.cn/documentation/docs/en/1.5/api/layers/control_flow.html#print). + +Detailed process of the second way Fetch_list will be explained later. + +## Feed data + +The method to feed data in Fluid: + +You need to use `fluid.layers.data` to configure data input layer and use ``executor.run(feed=...)`` to feed training data into `fluid.Executor` or `fluid.ParallelExecutor` . + +For specific preparation for data, please refer to [Preparation for data](../../../advanced_guide/data_preparing/index_en.html). + + +## Operators -- operations on data + +All operations on data are achieved by Operators in Fluid. + +To facilitate development, on Python end, Operators in Fluid are further encapsulated into `paddle.fluid.layers` , `paddle.fluid.nets` and other modules. + +It is because some common operations for Tensor may be composed of many fundamental operations. To make it more convenient, fundamental Operators are encapsulated in Fluid to reduce repeated coding, including the creation of learnable parameters which Operator relies on, details about initialization of learnable parameters and so on. + +For example, you can use `paddle.fluid.layers.elementwise_add()` to add up two input Tensor: + +```python +#Define network +import paddle.fluid as fluid +a = fluid.layers.data(name="a",shape=[1],dtype='float32') +b = fluid.layers.data(name="b",shape=[1],dtype='float32') + +result = fluid.layers.elementwise_add(a,b) + +#Define Exector +cpu = fluid.core.CPUPlace() #define computing place. Here we choose to train on CPU +exe = fluid.Executor(cpu) #create executor +exe.run(fluid.default_startup_program()) #initialize network parameters + +#Prepare data +import numpy +data_1 = int(input("Please enter an integer: a=")) +data_2 = int(input("Please enter an integer: b=")) +x = numpy.array([[data_1]]) +y = numpy.array([[data_2]]) + +#Run computing +outs = exe.run( +feed={'a':x,'b':y}, +fetch_list=[result.name]) + +#Verify result +print "%d+%d=%d" % (data_1,data_2,outs[0][0]) +``` + +Output: +``` +a=7 +b=3 +7+3=10 +``` + +At runtime, input a=7,b=3, and you will get output=10. + +You can copy the code, run it locally, input different numbers following the prompt instructions and check the computed result. + +If you want to get the specific value of a,b at the runtime of network, you can add variables you want to check into ``fetch_list`` . + +```python +... +#Run computing +outs = exe.run( + feed={'a':x,'b':y}, + fetch_list=[a,b,result.name] +#Check output +print outs +``` + +Output: +``` +[array([[7]]), array([[3]]), array([[10]])] +``` + +## Use Program to describe neural network model + +Fluid is different from most other deep learning frameworks. In Fluid, static computing map is replaced by Program to dynamically describe the network. This dynamic method delivers both flexible modifications to network structure and convenience to build model. Moreover, the capability of expressing a model is enhanced significantly while the performance is guaranteed. + +All Operators will be written into Program, which will be automatically transformed into a descriptive language named ProgramDesc in Fluid. It's like to write a general program to define Program. If you are an experienced developer, you can naturally apply the knowledge you have acquired on Fluid programming. + +You can describe any complex model by combining sequential processes, branches and loops supported by Fluid. + +**Sequential Process** + +You can use sequential structure to build network: + +```python +x = fluid.layers.data(name='x',shape=[13], dtype='float32') +y_predict = fluid.layers.fc(input=x, size=1, act=None) +y = fluid.layers.data(name='y', shape=[1], dtype='float32') +cost = fluid.layers.square_error_cost(input=y_predict, label=y) +``` + +**Conditional branch——switch,if else:** + +Switch and if-else class are used to implement conditional branch in Fluid. You can use the structure to adjust learning rate in learning rate adapter or perform other operations : + +```python +lr = fluid.layers.tensor.create_global_var( + shape=[1], + value=0.0, + dtype='float32', + persistable=True, + name="learning_rate") + +one_var = fluid.layers.fill_constant( + shape=[1], dtype='float32', value=1.0) +two_var = fluid.layers.fill_constant( + shape=[1], dtype='float32', value=2.0) + +with fluid.layers.control_flow.Switch() as switch: + with switch.case(global_step == zero_var): + fluid.layers.tensor.assign(input=one_var, output=lr) + with switch.default(): + fluid.layers.tensor.assign(input=two_var, output=lr) +``` + + +For detailed design principles of Program, please refer to [Design principle of Fluid](../../../advanced_guide/addon_development/design_idea/fluid_design_idea_en.html). + +For more about control flow in Fluid, please refer to [Control Flow](../../api/layers.html#control-flow). + + +## Use Executor to run Program + +The design principle of Fluid is similar to C++, JAVA and other advanced programming language. The execution of program is divided into two steps: compile and run. + +Executor accepts the defined Program and transforms it to a real executable Fluid Program at the back-end of C++. This process performed automatically is the compilation. + +After compilation, it needs Executor to run the compiled Fluid Program. + +Take add operator above as an example, you need to create an Executor to initialize and train Program after the construction of Program: + +```python +#define Executor +cpu = fluid.core.CPUPlace() #define computing place. Here we choose training on CPU +exe = fluid.Executor(cpu) #create executor +exe.run(fluid.default_startup_program()) #initialize Program + +#train Program and start computing +#feed defines the order of data transferred to network in the form of dict +#fetch_list defines the output of network +outs = exe.run( + feed={'a':x,'b':y}, + fetch_list=[result.name]) +``` + +## Code example + +So far, you have got a primary knowledge of core concepts in Fluid. Why not try to configure a simple network ? You can finish a very simple data prediction under the guide of the part if you are interested. If you have learned this part, you can skip this section and read [What's next](#what_next). + +Firstly, define input data format, model structure,loss function and optimized algorithm logically. Then you need to use PaddlePaddle APIs and operators to implement the logic of model. A typical model mainly contains four parts. They are: definition of input data format; forward computing logic; loss function; optimization algorithm. + +1. Problem + + Given a pair of data $$,construct a function $f$ so that $y=f(x)$ . $X$ , $Y$ are both one dimensional Tensor. Network finally can predict $y_{\_predict}$ accurately according to input $x$. + +2. Define data + + Supposing input data X=[1 2 3 4],Y=[2,4,6,8], make a definition in network: + + ```python + #define X + train_data=numpy.array([[1.0],[2.0],[3.0],[4.0]]).astype('float32') + #define ground-truth y_true expected to get from the model prediction + y_true = numpy.array([[2.0],[4.0],[6.0],[8.0]]).astype('float32') + ``` + +3. Create network (define forward computing logic) + + Next you need to define the relationship between the predicted value and the input. Take a simple linear regression function for example: + + ```python + #define input data type + x = fluid.layers.data(name="x",shape=[1],dtype='float32') + #create fully connected network + y_predict = fluid.layers.fc(input=x,size=1,act=None) + ``` + + Now the network can predict output. Although the output is just a group of random numbers, which is far from expected results: + + ```python + #load library + import paddle.fluid as fluid + import numpy + #define data + train_data=numpy.array([[1.0],[2.0],[3.0],[4.0]]).astype('float32') + y_true = numpy.array([[2.0],[4.0],[6.0],[8.0]]).astype('float32') + #define predict function + x = fluid.layers.data(name="x",shape=[1],dtype='float32') + y_predict = fluid.layers.fc(input=x,size=1,act=None) + #initialize parameters + cpu = fluid.core.CPUPlace() + exe = fluid.Executor(cpu) + exe.run(fluid.default_startup_program()) + #start training + outs = exe.run( + feed={'x':train_data}, + fetch_list=[y_predict.name]) + #observe result + print outs + ``` + + Output: + + ``` + [array([[0.74079144], + [1.4815829 ], + [2.2223744 ], + [2.9631658 ]], dtype=float32)] + ``` + +4. Add loss function + + After the construction of model, we need to evaluate the output result in order to make accurate predictions. How do we evaluate the result of prediction? We usually add loss function to network to compute the *distance* between ground-truth value and predict value. + + In this example, we adopt [mean-square function](https://en.wikipedia.org/wiki/Mean_squared_error) as our loss function : + + ```python + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + ``` + Output predicted value and loss function after a process of computing: + + ```python + #load library + import paddle.fluid as fluid + import numpy + #define data + train_data=numpy.array([[1.0],[2.0],[3.0],[4.0]]).astype('float32') + y_true = numpy.array([[2.0],[4.0],[6.0],[8.0]]).astype('float32') + #define network + x = fluid.layers.data(name="x",shape=[1],dtype='float32') + y = fluid.layers.data(name="y",shape=[1],dtype='float32') + y_predict = fluid.layers.fc(input=x,size=1,act=None) + #define loss function + cost = fluid.layers.square_error_cost(input=y_predict,label=y) + avg_cost = fluid.layers.mean(cost) + #initialize parameters + cpu = fluid.core.CPUPlace() + exe = fluid.Executor(cpu) + exe.run(fluid.default_startup_program()) + #start training + outs = exe.run( + feed={'x':train_data,'y':y_true}, + fetch_list=[y_predict.name,avg_cost.name]) + #observe output + print outs + ``` + Output: + + ``` + [array([[0.9010564], + [1.8021128], + [2.7031693], + [3.6042256]], dtype=float32), array([9.057577], dtype=float32)] + ``` + + We discover that the loss function after the first iteration of computing is 9.0, which shows there is a great improve space. + +5. Optimization of network + + After the definition of loss function,you can get loss value by forward computing and then get gradients of parameters with chain derivative method. + + Parameters should be updated after you have obtained gradients. The simplest algorithm is random gradient algorithm: w=w−η⋅g,which is implemented by `fluid.optimizer.SGD`: + ```python + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.01) + ``` + Let's train the network for 100 times and check the results: + + ```python + #load library + import paddle.fluid as fluid + import numpy + #define data + train_data=numpy.array([[1.0],[2.0],[3.0],[4.0]]).astype('float32') + y_true = numpy.array([[2.0],[4.0],[6.0],[8.0]]).astype('float32') + #define network + x = fluid.layers.data(name="x",shape=[1],dtype='float32') + y = fluid.layers.data(name="y",shape=[1],dtype='float32') + y_predict = fluid.layers.fc(input=x,size=1,act=None) + #define loss function + cost = fluid.layers.square_error_cost(input=y_predict,label=y) + avg_cost = fluid.layers.mean(cost) + #define optimization algorithm + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.01) + sgd_optimizer.minimize(avg_cost) + #initialize parameters + cpu = fluid.core.CPUPlace() + exe = fluid.Executor(cpu) + exe.run(fluid.default_startup_program()) + ##start training and iterate for 100 times + for i in range(100): + outs = exe.run( + feed={'x':train_data,'y':y_true}, + fetch_list=[y_predict.name,avg_cost.name]) + #observe result + print outs + ``` + + Output: + ``` + [array([[2.2075021], + [4.1005487], + [5.9935956], + [7.8866425]], dtype=float32), array([0.01651453], dtype=float32)] + ``` + Now we discover that predicted value is nearly close to real value and the loss value descends from original value 9.05 to 0.01 after iteration for 100 times. + + Congratulations! You have succeed to create a simple network. If you want to try advanced linear regression —— predict model of housing price, please read [linear regression](../../../user_guides/simple_case/fit_a_line/README.html). More examples of model can be found in [User Guides](../../../user_guides/index_en.html). + + +## What's next + +If you have been familiar with fundamental operations, you can start your next journey to learn fluid: + +You will learn how to build model for practical problem with fluid: [The configuration of simple network](../../coding_practice/configure_simple_model/index_en.html). + +After the construction of network, you can start training your network in single node. For detailed procedures, please refer to [Single-node training](../../coding_practice/single_node_en.html). + +In addition, there are three learning levels in documentation according to developer's background and experience: [Beginner's Guide](../../index_en.html) , [User Guides](../../../user_guides/index_en.html) and [Advanced Guide](../../../advanced_guide/index_en.html). + +If you want to read examples in more application scenarios, you can go to [User Guides](../../../user_guides/index_en.html) .If you have learned basic knowledge of deep learning, you can read from [Advanced Guide](../../../advanced_guide/index_en.html). diff --git a/doc/paddle/beginners_guide/basic_concept/tensor.rst b/doc/paddle/beginners_guide/basic_concept/tensor.rst new file mode 100644 index 0000000000000000000000000000000000000000..0cc44fec09c0d0546085f80175bdbcff756e2a25 --- /dev/null +++ b/doc/paddle/beginners_guide/basic_concept/tensor.rst @@ -0,0 +1,26 @@ +.. _cn_user_guide_tensor: + +========= +Tensor +========= + +飞桨(PaddlePaddle,以下简称Paddle)和其他框架一样,使用Tensor来表示数据。 + +在神经网络中传递的数据都是Tensor。Tensor可以简单理解成一个多维数组,一般而言可以有任意多的维度。 +不同的Tensor可以具有自己的数据类型和形状,同一Tensor中每个元素的数据类型是一样的,Tensor的形状就是Tensor的维度。 + +下图直观地表示1~6维的Tensor: + +.. image:: ../image/tensor.jpg + + +**Paddle 高级特性** + +:ref:`Lod-Tensor ` + +对于一些任务中batch内样本大小不一致的问题,Paddle提供了两种解决方案: + +1. padding, 将大小不一致的样本padding到同样的大小,这是一种常用且推荐的使用方式; +2. :ref:`Lod-Tensor ` ,记录每一个样本的大小,减少无用的计算量,LoD 牺牲灵活性来提升性能。 + +如果一个batch内的样本无法通过分桶、排序等方式使得大小接近, 建议使用 :ref:`Lod-Tensor ` 。 diff --git a/doc/paddle/beginners_guide/basic_concept/variable.rst b/doc/paddle/beginners_guide/basic_concept/variable.rst new file mode 100644 index 0000000000000000000000000000000000000000..22edb42b3a0caa0e0d5647899420a617fc30b7f7 --- /dev/null +++ b/doc/paddle/beginners_guide/basic_concept/variable.rst @@ -0,0 +1,61 @@ +.. _cn_user_guide_Variable: + +========= +Variable +========= + +飞桨(PaddlePaddle,以下简称Paddle)中的 :code:`Variable` 可以包含任何类型的值变量,提供的API中用到的类型是 :ref:`Tensor ` 。 + +后续的文档介绍中提到的 :code:`Variable` 基本等价于 :ref:`Tensor ` (特殊的地方会标注说明)。 + +在 Paddle 中存在三种 :code:`Variable`: + +**1. 模型中的可学习参数** + +模型中的可学习参数(包括网络权重、偏置等)生存期和整个训练任务一样长,会接受优化算法的更新,在 Paddle中以 Variable 的子类 Parameter 表示。 + +在Paddle中可以通过 :code:`fluid.layers.create_parameter` 来创建可学习参数: + +.. code-block:: python + + w = fluid.layers.create_parameter(name="w",shape=[1],dtype='float32') + + + +Paddle 为大部分常见的神经网络基本计算模块都提供了封装。以最简单的全连接模型为例,下面的代码片段会直接为全连接层创建连接权值(W)和偏置( bias )两个可学习参数,无需显式地调用 Parameter 相关接口来创建。 + +.. code-block:: python + + import paddle.fluid as fluid + y = fluid.layers.fc(input=x, size=128, bias_attr=True) + + +**2. 占位 Variable** + +在声明式编程模式(静态图)模式下,组网的时候通常不知道实际输入的信息,此刻需要一个占位的 :code:`Variable`,表示一个待提供输入的 :code:`Variable` + +Paddle 中使用 :code:`fluid.data` 来接收输入数据, :code:`fluid.data` 需要提供输入 Tensor 的形状信息,当遇到无法确定的维度时,相应维度指定为 None ,如下面的代码片段所示: + +.. code-block:: python + + import paddle.fluid as fluid + + #定义x的维度为[3,None],其中我们只能确定x的第一的维度为3,第二个维度未知,要在程序执行过程中才能确定 + x = fluid.data(name="x", shape=[3,None], dtype="int64") + + #若图片的宽度和高度在运行时可变,将宽度和高度定义为None。 + #shape的三个维度含义分别是:batch_size, channel、图片的宽度、图片的高度 + b = fluid.data(name="image",shape=[None, 3,None,None],dtype="float32") + + +其中,dtype=“int64”表示有符号64位整数数据类型,更多Fluid目前支持的数据类型请查看: :ref:`Paddle目前支持的数据类型 ` 。 + +**3. 常量 Variable** + +Fluid 通过 :code:`fluid.layers.fill_constant` 来实现常量Variable,用户可以指定内部包含Tensor的形状,数据类型和常量值。代码实现如下所示: + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.fill_constant(shape=[1], value=0, dtype='int64') + diff --git a/doc/paddle/beginners_guide/book/fit_a_line/README.md b/doc/paddle/beginners_guide/book/fit_a_line/README.md new file mode 100644 index 0000000000000000000000000000000000000000..182f383be1c119b8d31ad7b9836eaa1bd9f775ed --- /dev/null +++ b/doc/paddle/beginners_guide/book/fit_a_line/README.md @@ -0,0 +1 @@ +../../../../external/book/01.fit_a_line/README.md diff --git a/doc/paddle/beginners_guide/book/fit_a_line/image b/doc/paddle/beginners_guide/book/fit_a_line/image new file mode 100644 index 0000000000000000000000000000000000000000..d05fb9cdba7453d935f59f0eda3519234ede4459 --- /dev/null +++ b/doc/paddle/beginners_guide/book/fit_a_line/image @@ -0,0 +1 @@ +../../../../external/book/01.fit_a_line/image/ \ No newline at end of file diff --git a/doc/paddle/beginners_guide/book/fit_a_line/index.html b/doc/paddle/beginners_guide/book/fit_a_line/index.html new file mode 100644 index 0000000000000000000000000000000000000000..4eaff6b23c0874911cbefcad24a6f0e70ae4ded7 --- /dev/null +++ b/doc/paddle/beginners_guide/book/fit_a_line/index.html @@ -0,0 +1,65 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/beginners_guide/book/image_classification/README.md b/doc/paddle/beginners_guide/book/image_classification/README.md new file mode 100644 index 0000000000000000000000000000000000000000..67baed3b2c16ef2c4cc6fdd2d70c2950abc51381 --- /dev/null +++ b/doc/paddle/beginners_guide/book/image_classification/README.md @@ -0,0 +1 @@ +../../../../external/book/03.image_classification/README.md diff --git a/doc/paddle/beginners_guide/book/image_classification/image b/doc/paddle/beginners_guide/book/image_classification/image new file mode 100644 index 0000000000000000000000000000000000000000..afd28996d9fca9c55d5ea34043ac24fcc86c213c --- /dev/null +++ b/doc/paddle/beginners_guide/book/image_classification/image @@ -0,0 +1 @@ +../../../../external/book/03.image_classification/image/ \ No newline at end of file diff --git a/doc/paddle/beginners_guide/book/image_classification/index.html b/doc/paddle/beginners_guide/book/image_classification/index.html new file mode 100644 index 0000000000000000000000000000000000000000..cfad639633af2f36df160e9197b20aad041c25b2 --- /dev/null +++ b/doc/paddle/beginners_guide/book/image_classification/index.html @@ -0,0 +1,65 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/beginners_guide/book/index_en.rst b/doc/paddle/beginners_guide/book/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..2242e72520a098c75a33ed2ee53df30f698451f5 --- /dev/null +++ b/doc/paddle/beginners_guide/book/index_en.rst @@ -0,0 +1,14 @@ +Book +====== + +.. toctree:: + :maxdepth: 1 + + fit_a_line/README.md + recognize_digits/README.md + image_classification/README.md + word2vec/README.md + recommender_system/README.md + understand_sentiment/README.md + label_semantic_roles/README.md + machine_translation/README.md diff --git a/doc/paddle/beginners_guide/book/label_semantic_roles/README.md b/doc/paddle/beginners_guide/book/label_semantic_roles/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e2d5b31a09fd6cfaf4a0d2b83b7ab6a2f246761d --- /dev/null +++ b/doc/paddle/beginners_guide/book/label_semantic_roles/README.md @@ -0,0 +1 @@ +../../../../external/book/07.label_semantic_roles/README.md diff --git a/doc/paddle/beginners_guide/book/label_semantic_roles/image b/doc/paddle/beginners_guide/book/label_semantic_roles/image new file mode 100644 index 0000000000000000000000000000000000000000..0eb76d55feab0c443e7b1fab3da870b193f166b2 --- /dev/null +++ b/doc/paddle/beginners_guide/book/label_semantic_roles/image @@ -0,0 +1 @@ +../../../../external/book/07.label_semantic_roles/image/ \ No newline at end of file diff --git a/doc/paddle/beginners_guide/book/label_semantic_roles/index.html b/doc/paddle/beginners_guide/book/label_semantic_roles/index.html new file mode 100644 index 0000000000000000000000000000000000000000..c761fff4ecb0254b42cb983244ac777d2135bbf6 --- /dev/null +++ b/doc/paddle/beginners_guide/book/label_semantic_roles/index.html @@ -0,0 +1,65 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/beginners_guide/book/machine_translation/README.md b/doc/paddle/beginners_guide/book/machine_translation/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d4bd0a650934b19d12bff622852b40c7c25bc476 --- /dev/null +++ b/doc/paddle/beginners_guide/book/machine_translation/README.md @@ -0,0 +1 @@ +../../../../external/book/08.machine_translation/README.md diff --git a/doc/paddle/beginners_guide/book/machine_translation/image b/doc/paddle/beginners_guide/book/machine_translation/image new file mode 100644 index 0000000000000000000000000000000000000000..ed4d2366ec7d3fb3d03e0e4643cf9025bed4f698 --- /dev/null +++ b/doc/paddle/beginners_guide/book/machine_translation/image @@ -0,0 +1 @@ +../../../../external/book/08.machine_translation/image/ \ No newline at end of file diff --git a/doc/paddle/beginners_guide/book/machine_translation/index.html b/doc/paddle/beginners_guide/book/machine_translation/index.html new file mode 100644 index 0000000000000000000000000000000000000000..11c45827afc5adfb4a8e858f433498a225d44553 --- /dev/null +++ b/doc/paddle/beginners_guide/book/machine_translation/index.html @@ -0,0 +1,65 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/beginners_guide/book/recognize_digits/README.md b/doc/paddle/beginners_guide/book/recognize_digits/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7728b1504f5919289f36c226ca182ace6a20378d --- /dev/null +++ b/doc/paddle/beginners_guide/book/recognize_digits/README.md @@ -0,0 +1 @@ +../../../../external/book/02.recognize_digits/README.md diff --git a/doc/paddle/beginners_guide/book/recognize_digits/image b/doc/paddle/beginners_guide/book/recognize_digits/image new file mode 100644 index 0000000000000000000000000000000000000000..925f1617634b19104d7795071cbaae7b55bd84df --- /dev/null +++ b/doc/paddle/beginners_guide/book/recognize_digits/image @@ -0,0 +1 @@ +../../../../external/book/02.recognize_digits/image/ \ No newline at end of file diff --git a/doc/paddle/beginners_guide/book/recognize_digits/index.html b/doc/paddle/beginners_guide/book/recognize_digits/index.html new file mode 100644 index 0000000000000000000000000000000000000000..310d1f18a2e84f72f0a64ca1fd4255a2db278fa0 --- /dev/null +++ b/doc/paddle/beginners_guide/book/recognize_digits/index.html @@ -0,0 +1,65 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/beginners_guide/book/recommender_system/README.md b/doc/paddle/beginners_guide/book/recommender_system/README.md new file mode 100644 index 0000000000000000000000000000000000000000..43f2638277fa8a7e33f9a9ed60ea79ba78277825 --- /dev/null +++ b/doc/paddle/beginners_guide/book/recommender_system/README.md @@ -0,0 +1 @@ +../../../../external/book/05.recommender_system/README.md diff --git a/doc/paddle/beginners_guide/book/recommender_system/image b/doc/paddle/beginners_guide/book/recommender_system/image new file mode 100644 index 0000000000000000000000000000000000000000..a4e97ee1ae98c330733670be080c86d5aa24fc8d --- /dev/null +++ b/doc/paddle/beginners_guide/book/recommender_system/image @@ -0,0 +1 @@ +../../../../external/book/05.recommender_system/image/ \ No newline at end of file diff --git a/doc/paddle/beginners_guide/book/recommender_system/index.html b/doc/paddle/beginners_guide/book/recommender_system/index.html new file mode 100644 index 0000000000000000000000000000000000000000..ab36764c0de1fe7021c11a43cb18569901bb43a2 --- /dev/null +++ b/doc/paddle/beginners_guide/book/recommender_system/index.html @@ -0,0 +1,65 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/beginners_guide/book/understand_sentiment/README.md b/doc/paddle/beginners_guide/book/understand_sentiment/README.md new file mode 100644 index 0000000000000000000000000000000000000000..125de5942c97084edcb1fe3c62ab951458274199 --- /dev/null +++ b/doc/paddle/beginners_guide/book/understand_sentiment/README.md @@ -0,0 +1 @@ +../../../../external/book/06.understand_sentiment/README.md diff --git a/doc/paddle/beginners_guide/book/understand_sentiment/image b/doc/paddle/beginners_guide/book/understand_sentiment/image new file mode 100644 index 0000000000000000000000000000000000000000..c519bc94e3b50cc5bb31e7f601f6c274400345a2 --- /dev/null +++ b/doc/paddle/beginners_guide/book/understand_sentiment/image @@ -0,0 +1 @@ +../../../../external/book/06.understand_sentiment/image/ \ No newline at end of file diff --git a/doc/paddle/beginners_guide/book/understand_sentiment/index.html b/doc/paddle/beginners_guide/book/understand_sentiment/index.html new file mode 100644 index 0000000000000000000000000000000000000000..464dc5a316457fe84c69435358ceb5da95c0bbdd --- /dev/null +++ b/doc/paddle/beginners_guide/book/understand_sentiment/index.html @@ -0,0 +1,65 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/beginners_guide/book/word2vec/README.md b/doc/paddle/beginners_guide/book/word2vec/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b1bbd5bc8664692d3c34ee0b2b33cd7390683ab1 --- /dev/null +++ b/doc/paddle/beginners_guide/book/word2vec/README.md @@ -0,0 +1 @@ +../../../../external/book/04.word2vec/README.md diff --git a/doc/paddle/beginners_guide/book/word2vec/images b/doc/paddle/beginners_guide/book/word2vec/images new file mode 100644 index 0000000000000000000000000000000000000000..dd59e54f0c4cf9058d375650889b52a8996d1418 --- /dev/null +++ b/doc/paddle/beginners_guide/book/word2vec/images @@ -0,0 +1 @@ +../../../../external/book/04.word2vec/image/ \ No newline at end of file diff --git a/doc/paddle/beginners_guide/book/word2vec/index.html b/doc/paddle/beginners_guide/book/word2vec/index.html new file mode 100644 index 0000000000000000000000000000000000000000..b91741b3ec72216c2c93e21a2c93d3a2a3c4568b --- /dev/null +++ b/doc/paddle/beginners_guide/book/word2vec/index.html @@ -0,0 +1,65 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/beginners_guide/coding_practice/configure_simple_model/index_cn.rst b/doc/paddle/beginners_guide/coding_practice/configure_simple_model/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..cee9ba796020cf9d6767ced07dacaee157e4022b --- /dev/null +++ b/doc/paddle/beginners_guide/coding_practice/configure_simple_model/index_cn.rst @@ -0,0 +1,88 @@ +.. _user_guide_configure_simple_model: + +############## +配置简单的网络 +############## + +在解决实际问题时,可以先从逻辑层面对问题进行建模,明确模型所需要的 **输入数据类型**、**计算逻辑**、**求解目标** 以及 **优化算法**。PaddlePaddle提供了丰富的算子来实现模型逻辑。下面以一个简单回归任务举例说明如何使用PaddlePaddle构建模型。该例子完整代码参见 `fit_a_line `_。 + +问题描述及定义 +############## + +问题描述: 给定一组数据 :math:``,求解出函数 :math:`f`,使得 :math:`y=f(x)`,其中 :math:`x\subset X` 表示一条样本的特征,为 :math:`13` 维的实数向量;:math:`y \subset Y` 为一实数表示该样本对应的值。 + +我们可以尝试用回归模型来对问题建模,回归问题的损失函数有很多,这里选择常用的均方误差。为简化问题,这里假定 :math:`f` 为简单的线性变换函数,同时选用随机梯度下降算法来求解模型。 + ++----------------+----------------------------------------------+ +| 输入数据类型 | 样本特征: 13 维 实数 | ++ +----------------------------------------------+ +| | 样本标签: 1 维 实数 | ++----------------+----------------------------------------------+ +| 计算逻辑 | 使用线性模型,产生 1维实数作为模型的预测输出 | ++----------------+----------------------------------------------+ +| 求解目标 | 最小化模型预测输出与样本标签间的均方误差 | ++----------------+----------------------------------------------+ +| 优化算法 | 随机梯度下降 | ++----------------+----------------------------------------------+ + +使用PaddlePaddle建模 +################### + +从逻辑层面明确了输入数据格式、模型结构、损失函数以及优化算法后,需要使用PaddlePaddle提供的API及算子来实现模型逻辑。一个典型的模型主要包含4个部分,分别是:输入数据格式定义,模型前向计算逻辑,损失函数以及优化算法。 + +数据层 +------ + +PaddlePaddle提供了 :code:`fluid.data()` 算子来描述输入数据的格式。 + +:code:`fluid.data()` 算子的输出是一个Variable。这个Variable的实际类型是Tensor。Tensor具有强大的表征能力,可以表示多维数据。为了精确描述数据结构,通常需要指定数据shape以及数值类型type。其中shape为一个整数向量,type可以是一个字符串类型。目前支持的数据类型参考 :ref:`user_guide_paddle_support_data_types` 。 模型训练一般会使用batch的方式读取数据,而batch的size在训练过程中可能不固定。data算子会依据实际数据来推断batch size,所以这里提供shape时不用关心batch size,只需关心一条样本的shape即可,更高级用法请参考 :ref:`user_guide_customize_batch_size_rank`。从上知,:math:`x` 为 :math:`13` 维的实数向量,:math:`y` 为实数,可使用下面代码定义数据层: + +.. code-block:: python + + x = fluid.data(name='x', shape=[13], dtype='float32') + y = fluid.data(name='y', shape=[1], dtype='float32') + +该模型使用的数据比较简单,事实上data算子还可以描述变长的、嵌套的序列数据。更详细的文档可参照 :ref:`user_guide_prepare_data`。 + +前向计算逻辑 +------------ + +实现一个模型最重要的部分是实现计算逻辑,PaddlePaddle提供了丰富的算子。这些算子的封装粒度不同,通常对应一种或一组变换逻辑。算子输出即为对输入数据执行变换后的结果。用户可以灵活使用算子来完成复杂的模型逻辑。比如图像相关任务中会使用较多的卷积算子、序列任务中会使用LSTM/GRU等算子。复杂模型通常会组合多种算子,以完成复杂的变换。PaddlePaddle提供了非常自然的方式来组合算子,一般地可以使用下面的方式: + +.. code-block:: python + + op_1_out = fluid.layers.op_1(input=op_1_in, ...) + op_2_out = fluid.layers.op_2(input=op_1_out, ...) + ... + +其中op_1和op_2表示算子类型,可以是fc来执行线性变换(全连接),也可以是conv来执行卷积变换等。通过算子的输入输出的连接来定义算子的计算顺序以及数据流方向。上面的例子中,op_1的输出是op_2的输入,那么在执行计算时,会先计算op_1,然后计算op_2。更复杂的模型可能需要使用控制流算子,依据输入数据来动态执行,针对这种情况,PaddlePaddle提供了IfElseOp和WhileOp等。算子的文档可参考 :code:`fluid.layers`。具体到这个任务, 我们使用一个fc算子: + +.. code-block:: python + + y_predict = fluid.layers.fc(input=x, size=1, act=None) + +损失函数 +-------- + +损失函数对应求解目标,我们可以通过最小化损失来求解模型。大多数模型使用的损失函数,输出是一个实数值。但是PaddlePaddle提供的损失算子一般是针对一条样本计算。当输入一个batch的数据时,损失算子的输出有多个值,每个值对应一条样本的损失,所以通常会在损失算子后面使用mean等算子,来对损失做归约。模型在一次前向迭代后会得到一个损失值,PaddlePaddle会自动执行链式求导法则计算模型里面每个参数和变量对应的梯度值。这里使用均方误差损失: + +.. code-block:: python + + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + +优化方法 +-------- + +确定损失函数后,可以通过前向计算得到损失值,然后通过链式求导法则得到参数的梯度值。获取梯度值后需要更新参数,最简单的算法是随机梯度下降法::math:`w=w - \eta \cdot g`。但是普通的随机梯度下降算法存在一些问题: 比如收敛不稳定等。为了改善模型的训练速度以及效果,学术界先后提出了很多优化算法,包括: :code:`Momentum`、:code:`RMSProp`、:code:`Adam` 等。这些优化算法采用不同的策略来更新模型参数,一般可以针对具体任务和具体模型来选择优化算法。不管使用何种优化算法,学习率一般是一个需要指定的比较重要的超参数,需要通过实验仔细调整。这里采用随机梯度下降算法: + +.. code-block:: python + + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) + +更多优化算子可以参考 :code:`fluid.optimizer()` 。 + +下一步做什么? +############## + +使用PaddlePaddle实现模型时需要关注 **数据层**、**前向计算逻辑**、**损失函数** 和 **优化方法**。不同的任务需要的数据格式不同,涉及的计算逻辑不同,损失函数不同,优化方法也不同。PaddlePaddle提供了丰富的模型示例,可以以这些示例为参考来构建自己的模型结构。用户可以访问 `模型库 `_ 查看官方提供的示例。 diff --git a/doc/paddle/beginners_guide/coding_practice/configure_simple_model/index_en.rst b/doc/paddle/beginners_guide/coding_practice/configure_simple_model/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..991ff76f87ad82a54c0b6ff8b78803feb6afcf0b --- /dev/null +++ b/doc/paddle/beginners_guide/coding_practice/configure_simple_model/index_en.rst @@ -0,0 +1,91 @@ +.. _user_guide_configure_simple_model_en: + +####################### +Set up Simple Model +####################### + +When solving practical problems, in the beginning you can model the problem logically, and get a clear picture of **input data type** , **computing logic** , **target solution** and **optimization algorithm** of model. +PaddlePaddle provides abundant operators to implement logics of a model. In this article, we take a simple regression task as an example to clarify how to build model with PaddlePaddle. +About complete code of the example,please refer to `fit_a_line `_ 。 + +Description and Definition of Problem +###################################### + +Description : Given a pair of data :math:``, figure out a function :math:`f` to make :math:`y=f(x)` . :math:`x\subset X` represents the feature of a sample, which is a real number vector with 13 dimensions; :math:`y \subset Y` is a real number representing corresponding value of the given sample. + +We can try to model the problem with a regression model. Though lots of loss functions are available for regression problem, here we choose commonly used mean-square error. To simplify the problem, assuming :math:`f` is a simple linear transformation funtion, we choose random gradient descent algorithm to solve problem. + ++--------------------------+-------------------------------------------------------------------------------------+ +| input data type | sample feature: 13-dimension real number | ++ +-------------------------------------------------------------------------------------+ +| | sample label: 1-dimension real number | ++--------------------------+-------------------------------------------------------------------------------------+ +| computing logic | use linear model to generate 1-dimensional real number as predicted output of model | ++--------------------------+-------------------------------------------------------------------------------------+ +| target solution | minimize mean-squre error between predicted output of model and sample label | ++--------------------------+-------------------------------------------------------------------------------------+ +| optimization algorithm | random gradient descent | ++--------------------------+-------------------------------------------------------------------------------------+ + +Model with PaddlePaddle +####################### + +After getting clear of the of input data format, model structure, loss function and optimization algorithm in terms of logic, you need to use PaddlePaddle API and operators to implement logic of model. A typical model includes four parts: format of input data, forward computing logic, loss function and optimization algorithm. + +Data Layer +----------- + +PaddlePaddle provides :code:`fluid.data()` to describe format of input data. + +The output of :code:`fluid.data()` is a Variable which is in fact a Tensor. Tensor can represent multi-demensional data with its great expressive feature.In order to accurately describe data structure, it is usually necessary to indicate the shape and type of data. The shape is int vector and type can be a string. About current supported data type, please refer to :ref:`user_guide_paddle_support_data_types_en` . Data is often read in form of batch to train model. Since batch size may vary and data operator infers batch size according to actual data, here the batch size is ignored when shape is provided. It's enough to care for the shape of single sample. For more advanced usage, please refer to :ref:`user_guide_customize_batch_size_rank_en` . :math:`x` is real number vector of :math:`13` dimenstions while :math:`y` is a real number. Data layer can be defined as follows: + +.. code-block:: python + + x = fluid.data(name='x', shape=[13], dtype='float32') + y = fluid.data(name='y', shape=[1], dtype='float32') + +Data in this example model are relatively simple. In fact, data operator can describe variable-length and nested sequence data. For more detailed documentation, please refer to :ref:`user_guide_prepare_data_en` . + +Logic of Forward Computing +--------------------------- + +The most important part of a model is to implement logic of computing. PaddlePaddle provides lots of operators encapsulated in different granularity. These operators usually are correspondent to a kind or a group of transformation logic. The output of operator is the result of transfomation for input data. User can flexiblely use operators to implement models with complex logics. For example, many convolutional operators will be used in tasks associated with image tasks and LSTM/GRU operators will be used in sequence tasks. Various operators are usually combined in complex models to implement complex transformation. PaddlePaddle provides natural methods to combine operators. The following example displays the typical combination method: + +.. code-block:: python + + op_1_out = fluid.layers.op_1(input=op_1_in, ...) + op_2_out = fluid.layers.op_2(input=op_1_out, ...) + ... + +In the example above, op_1 and op_2 represent types of operators,such as fc performing linear transformation(full connection) or conv performing convolutional transformation. The computing order of operators and direction of data stream are defined by the connection of input and output of operators. In the example above, the output of op_1 is the input of op_2. It will firstly compute op_1 and then op_2 in the process of computing. For more complex models, we may need to use control flow operators to make it perform dynamically according to the input data. In this situation, IfElseOp, WhileOp and other operators are provided in PaddlePaddle. About documentation of these operators, please refer to :code:`fluid.layers` . As for this task, we use a fc operator: + +.. code-block:: python + + y_predict = fluid.layers.fc(input=x, size=1, act=None) + +Loss Function +-------------- + +Loss function is correspondent with the target solution. We can resolve the model by minimizing the loss value. The outputs of loss functions of most models are real numbers. But the loss operator in PaddlePaddle is only aimed at a single sample. When a batch is feeded, there will be many outputs from the loss operator, each of which is correspondent with the loss of a single sample. Therefore we usually append operators like ``mean`` after loss function to conduct reduction of losses. After each forward iteration, a loss value will be returned. After that, Chain derivation theorem will be performed automatically in PaddlePaddle to compute gradient value of every parameter and variable in computing model. Here we use mean square error cost: + +.. code-block:: python + + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + +Optimization Method +--------------------- + +After the definition of loss function, we can get loss value by forward computing and then get gradient value of parameters with chain deravation theorem. Having obtained the gradients, parameters have to be updated and the simplest algorithm is the random gradient descent algorithm: :math:`w=w - \eta \cdot g` .But common random gradient descent algorithms have some disadvantages, such as unstable convergency. To improve the training speed and effect of model, academic scholars have come up with many optimized algorithm, including :code:`Momentum` , :code:`RMSProp` , :code:`Adam` . Strategies vary from optimization algorithm to another to update parameters of model. Usually we can choose appropriate algorthm according to specific tasks and models. No matter what optimization algorithm we adopt, learning rate is usually an important super parameter to be specified and carefully adjusted by trials. Take random gradient descent algorithm as an example here: + +.. code-block:: python + + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) + +For more optimization operators,please refer to :code:`fluid.optimizer()` . + +What to do next? +################# + +Attention needs to be paid for **Data Layer**, **Forward Computing Logic**, **Loss function** and **Optimization Function** while you use PaddlePaddle to implement models. +The data format, computing logic, loss function and optimization function are all different in different tasks. A rich number of examples of model are provided in PaddlePaddle. You can build your own model structure by referring to these examples. You can visit `Model Repository `_ to refer to examples in official documentation. diff --git a/doc/paddle/beginners_guide/coding_practice/index_cn.rst b/doc/paddle/beginners_guide/coding_practice/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e1f5f5c5c8782c3d69c64437fd90c964d80bf374 --- /dev/null +++ b/doc/paddle/beginners_guide/coding_practice/index_cn.rst @@ -0,0 +1,13 @@ +############ +编程实践 +############ + +如果您已经掌握了基本概念中的内容,期望可以针对实际问题建模、搭建自己网络,本模块提供了一些 Paddle 的使用细节供您参考: + +.. toctree:: + :maxdepth: 1 + + configure_simple_model/index_cn.rst + single_node.rst + save_load_variables.rst + diff --git a/doc/paddle/beginners_guide/coding_practice/index_en.rst b/doc/paddle/beginners_guide/coding_practice/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..53f9376f9805fe0e69af386a61d7723f338962a5 --- /dev/null +++ b/doc/paddle/beginners_guide/coding_practice/index_en.rst @@ -0,0 +1,14 @@ +############ +Coding Practice +############ + +If you have mastered the basic concepts and you expect to model and build your own network according to the actual problems, this module provides you with some details about the use of paddle for your reference: + +.. toctree:: + :maxdepth: 1 + + configure_simple_model/index_en.rst + single_node_en.rst + test_while_training_en.rst + save_load_variables_en.rst + diff --git a/doc/paddle/beginners_guide/coding_practice/save_load_variables.rst b/doc/paddle/beginners_guide/coding_practice/save_load_variables.rst new file mode 100644 index 0000000000000000000000000000000000000000..56f53792726cc4375f1e2706f9522590f3febd2c --- /dev/null +++ b/doc/paddle/beginners_guide/coding_practice/save_load_variables.rst @@ -0,0 +1,292 @@ +.. _user_guide_save_load_vars: + +################################ +模型/变量的保存、载入与增量训练 +################################ + +模型变量分类 +############ + +在PaddlePaddle Fluid中,所有的模型变量都用 :code:`fluid.framework.Variable()` 作为基类。 +在该基类之下,模型变量主要可以分为以下几种类别: + +1. 模型参数 + 模型参数是深度学习模型中被训练和学习的变量,在训练过程中,训练框架根据反向传播(backpropagation)算法计算出每一个模型参数当前的梯度, + 并用优化器(optimizer)根据梯度对参数进行更新。模型的训练过程本质上可以看做是模型参数不断迭代更新的过程。 + 在PaddlePaddle Fluid中,模型参数用 :code:`fluid.framework.Parameter` 来表示, + 这是一个 :code:`fluid.framework.Variable()` 的派生类,除了具有 :code:`fluid.framework.Variable()` 的各项性质以外, + :code:`fluid.framework.Parameter` 还可以配置自身的初始化方法、更新率等属性。 + +2. 长期变量 + 长期变量指的是在整个训练过程中持续存在、不会因为一个迭代的结束而被销毁的变量,例如动态调节的全局学习率等。 + 在PaddlePaddle Fluid中,长期变量通过将 :code:`fluid.framework.Variable()` 的 :code:`persistable` + 属性设置为 :code:`True` 来表示。所有的模型参数都是长期变量,但并非所有的长期变量都是模型参数。 + +3. 临时变量 + 不属于上面两个类别的所有模型变量都是临时变量,这种类型的变量只在一个训练迭代中存在,在每一个迭代结束后, + 所有的临时变量都会被销毁,然后在下一个迭代开始之前,又会先构造出新的临时变量供本轮迭代使用。 + 一般情况下模型中的大部分变量都属于这一类别,例如输入的训练数据、一个普通的layer的输出等等。 + + + +如何保存模型变量 +################ + +根据用途的不同,我们需要保存的模型变量也是不同的。例如,如果我们只是想保存模型用来进行以后的预测, +那么只保存模型参数就够用了。但如果我们需要保存一个checkpoint(检查点,类似于存档,存有复现目前模型的必要信息)以备将来恢复训练, +那么我们应该将各种长期变量都保存下来,甚至还需要记录一下当前的epoch和step的id。 +因为一些模型变量虽然不是参数,但对于模型的训练依然必不可少。 + +save_vars、save_params、save_persistables 以及 save_inference_model的区别 +########################################################################## +1. :code:`save_inference_model` 会根据用户配置的 :code:`feeded_var_names` 和 :code:`target_vars` 进行网络裁剪,保存下裁剪后的网络结构的 ``__model__`` 以及裁剪后网络中的长期变量 + +2. :code:`save_persistables` 不会保存网络结构,会保存网络中的全部长期变量到指定位置。 + +3. :code:`save_params` 不会保存网络结构,会保存网络中的全部模型参数到指定位置。 + +4. :code:`save_vars` 不会保存网络结构,会根据用户指定的 :code:`fluid.framework.Parameter` 列表进行保存。 + + :code:`save_persistables` 保存的网络参数是最全面的,如果是增量训练或者恢复训练, 请选择 :code:`save_persistables` 进行变量保存。 + :code:`save_inference_model` 会保存网络参数及裁剪后的模型,如果后续要做预测相关的工作, 请选择 :code:`save_inference_model` 进行变量和网络的保存。 + :code:`save_vars 和 save_params` 仅在用户了解清楚用途及特殊目的情况下使用, 一般不建议使用。 + + +保存模型用于对新样本的预测 +========================== + +如果我们保存模型的目的是用于对新样本的预测,那么只保存模型参数就足够了。我们可以使用 +:code:`fluid.io.save_params()` 接口来进行模型参数的保存。 + +例如: + + +.. code-block:: python + + import paddle.fluid as fluid + + exe = fluid.Executor(fluid.CPUPlace()) + param_path = "./my_paddle_model" + prog = fluid.default_main_program() + fluid.io.save_params(executor=exe, dirname=param_path, main_program=None) + + +上面的例子中,通过调用 :code:`fluid.io.save_params` 函数,PaddlePaddle Fluid会对默认 +:code:`fluid.Program` 也就是 :code:`prog` 中的所有模型变量进行扫描, +筛选出其中所有的模型参数,并将这些模型参数保存到指定的 :code:`param_path` 之中。 + + + +如何载入模型变量 +################ + +与模型变量的保存相对应,我们提供了两套API来分别载入模型的参数和载入模型的长期变量,分别为保存、加载模型参数的 ``save_params()`` 、 ``load_params()`` 和 +保存、加载长期变量的 ``save_persistables`` 、 ``load_persistables`` 。 + +载入模型用于对新样本的预测 +========================== + +对于通过 :code:`fluid.io.save_params` 保存的模型,可以使用 :code:`fluid.io.load_params` +来进行载入。 + +例如: + + +.. code-block:: python + + import paddle.fluid as fluid + + exe = fluid.Executor(fluid.CPUPlace()) + param_path = "./my_paddle_model" + prog = fluid.default_main_program() + fluid.io.load_params(executor=exe, dirname=param_path, + main_program=prog) + + +上面的例子中,通过调用 :code:`fluid.io.load_params` 函数,PaddlePaddle Fluid会对 +:code:`prog` 中的所有模型变量进行扫描,筛选出其中所有的模型参数, +并尝试从 :code:`param_path` 之中读取加载它们。 + +需要格外注意的是,这里的 :code:`prog` 必须和调用 :code:`fluid.io.save_params` +时所用的 :code:`prog` 中的前向部分完全一致,且不能包含任何参数更新的操作。如果两者存在不一致, +那么可能会导致一些变量未被正确加载;如果错误地包含了参数更新操作,那可能会导致正常预测过程中参数被更改。 +这两个 :code:`fluid.Program` 之间的关系类似于训练 :code:`fluid.Program` +和测试 :code:`fluid.Program` 之间的关系,详见: :ref:`user_guide_test_while_training`。 + +另外,需特别注意运行 :code:`fluid.default_startup_program()` 必须在调用 :code:`fluid.io.load_params` +之前。如果在之后运行,可能会覆盖已加载的模型参数导致错误。 + +通过numpy数组设置模型参数值 +=========================== + +用户可以灵活地使用numpy数组设置模型参数的值,具体示例如下: + + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + main_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard(main_prog, startup_prog): + data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False) + w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32', name='fc_w') + b = fluid.layers.create_parameter(shape=[200], dtype='float32', name='fc_b') + hidden_w = fluid.layers.matmul(x=data, y=w) + hidden_b = fluid.layers.elementwise_add(hidden_w, b) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(startup_prog) + + for block in main_prog.blocks: + for param in block.all_parameters(): + pd_var = fluid.global_scope().find_var(param.name) + pd_param = pd_var.get_tensor() + print("load: {}, shape: {}".format(param.name, param.shape)) + print("Before setting the numpy array value: {}".format(np.array(pd_param).ravel()[:5])) + pd_param.set(np.ones(param.shape), place) + print("After setting the numpy array value: {}".format(np.array(pd_param).ravel()[:5])) + + # 输出结果: + # load: fc_w, shape: (784, 200) + # Before setting the numpy array value: [ 0.00121664 0.00700346 -0.05220041 -0.05879825 0.05155897] + # After setting the numpy array value: [1. 1. 1. 1. 1.] + # load: fc_b, shape: (200,) + # Before setting the numpy array value: [-0.098886 -0.00530401 -0.05821943 -0.01038218 0.00760134] + # After setting the numpy array value: [1. 1. 1. 1. 1.] + +预测模型的保存和加载 +############################## + +预测引擎提供了存储预测模型 :code:`fluid.io.save_inference_model` 和加载预测模型 :code:`fluid.io.load_inference_model` 两个接口。 + +- :code:`fluid.io.save_inference_model`:请参考 :ref:`api_guide_inference`。 +- :code:`fluid.io.load_inference_model`:请参考 :ref:`api_guide_inference`。 + + + +增量训练 +############ + +增量训练指一个学习系统能不断地从新样本中学习新的知识,并能保存大部分以前已经学习到的知识。因此增量学习涉及到两点:在上一次训练结束的时候保存需要的长期变量, 在下一次训练开始的时候加载上一次保存的这些长期变量。 因此增量训练涉及到如下几个API: +:code:`fluid.io.save_persistables`、:code:`fluid.io.load_persistables` 。 + +单机增量训练 +========================== +单机的增量训练的一般步骤如下: + +1. 在训练的最后调用 :code:`fluid.io.save_persistables` 保存持久性参数到指定的位置。 +2. 在训练的startup_program通过执行器 :code:`Executor` 执行成功之后调用 :code:`fluid.io.load_persistables` 加载之前保存的持久性参数。 +3. 通过执行器 :code:`Executor` 或者 :code:`ParallelExecutor` 继续训练。 + + +例如: + + +.. code-block:: python + + import paddle.fluid as fluid + + exe = fluid.Executor(fluid.CPUPlace()) + path = "./models" + prog = fluid.default_main_program() + fluid.io.save_persistables(exe, path, prog) + + +上面的例子中,通过调用 :code:`fluid.io.save_persistables` 函数,PaddlePaddle Fluid会从默认 :code:`fluid.Program` 也就是 :code:`prog` 的所有模型变量中找出长期变量,并将他们保存到指定的 :code:`path` 目录下。 + + +.. code-block:: python + + import paddle.fluid as fluid + + exe = fluid.Executor(fluid.CPUPlace()) + path = "./models" + startup_prog = fluid.default_startup_program() + exe.run(startup_prog) + main_prog = fluid.default_main_program() + fluid.io.load_persistables(exe, path, main_prog) + exe.run(main_prog) + +上面的例子中,通过调用 :code:`fluid.io.load_persistables` 函数,PaddlePaddle Fluid会从默认 +:code:`fluid.Program` 也就是 :code:`prog` 的所有模型变量中找出长期变量,从指定的 :code:`path` 目录中将它们一一加载, 然后再继续进行训练。 + + + +多机增量(不带分布式大规模稀疏矩阵)训练的一般步骤为 +========================== + +多机增量训练和单机增量训练有若干不同点: + +1. 在训练的最后调用 :code:`fluid.io.save_persistables` 保存长期变量时,不必要所有的trainer都调用这个方法来保存,一般0号trainer来保存即可。 +2. 多机增量训练的参数加载在PServer端,trainer端不用加载参数。在PServer全部启动后,trainer会从PServer端同步参数。 +3. 在确认需要使用增量的情况下, 多机在调用 :code:`fluid.DistributeTranspiler.transpile` 时需要指定 ``current_endpoint`` 参数。 + +多机增量(不带分布式大规模稀疏矩阵)训练的一般步骤为: + +1. 0号trainer在训练的最后调用 :code:`fluid.io.save_persistables` 保存持久性参数到指定的 :code:`path` 下。 +2. 通过HDFS等方式将0号trainer保存下来的所有的参数共享给所有的PServer(每个PServer都需要有完整的参数)。 +3. PServer在训练的startup_program通过执行器(:code:`Executor`)执行成功之后调用 :code:`fluid.io.load_persistables` 加载0号trainer保存的持久性参数。 +4. PServer通过执行器 :code:`Executor` 继续启动PServer_program. +5. 所有的训练节点trainer通过执行器 :code:`Executor` 或者 :code:`ParallelExecutor` 正常训练。 + + +对于训练过程中待保存参数的trainer, 例如: + + +.. code-block:: python + + import paddle.fluid as fluid + + exe = fluid.Executor(fluid.CPUPlace()) + path = "./models" + trainer_id = 0 + if trainer_id == 0: + prog = fluid.default_main_program() + fluid.io.save_persistables(exe, path, prog) + + +.. code-block:: bash + hadoop fs -mkdir /remote/$path + hadoop fs -put $path /remote/$path + + +上面的例子中,0号trainer通过调用 :code:`fluid.io.save_persistables` 函数,PaddlePaddle Fluid会从默认 +:code:`fluid.Program` 也就是 :code:`prog` 的所有模型变量中找出长期变量,并将他们保存到指定的 :code:`path` 目录下。然后通过调用第三方的文件系统(如HDFS)将存储的模型进行上传到所有PServer都可访问的位置。 + +对于训练过程中待载入参数的PServer, 例如: + + +.. code-block:: bash + hadoop fs -get /remote/$path $path + + +.. code-block:: python + + import paddle.fluid as fluid + + exe = fluid.Executor(fluid.CPUPlace()) + path = "./models" + pserver_endpoints = "127.0.0.1:1001,127.0.0.1:1002" + trainers = 4 + training_role == "PSERVER" + config = fluid.DistributeTranspilerConfig() + t = fluid.DistributeTranspiler(config=config) + t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers, sync_mode=True, current_endpoint=current_endpoint) + + if training_role == "PSERVER": + current_endpoint = "127.0.0.1:1001" + pserver_prog = t.get_pserver_program(current_endpoint) + pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) + + exe.run(pserver_startup) + fluid.io.load_persistables(exe, path, pserver_prog) + exe.run(pserver_prog) + if training_role == "TRAINER": + main_program = t.get_trainer_program() + exe.run(main_program) + +上面的例子中,每个PServer通过调用HDFS的命令获取到0号trainer保存的参数,通过配置获取到PServer的 :code:`fluid.Program` ,PaddlePaddle Fluid会从此 +:code:`fluid.Program` 也就是 :code:`pserver_startup` 的所有模型变量中找出长期变量,并通过指定的 :code:`path` 目录下一一加载。 + + diff --git a/doc/paddle/beginners_guide/coding_practice/save_load_variables_en.rst b/doc/paddle/beginners_guide/coding_practice/save_load_variables_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..2471ccfdf4607df5dc2591d99f6186949923fee9 --- /dev/null +++ b/doc/paddle/beginners_guide/coding_practice/save_load_variables_en.rst @@ -0,0 +1,218 @@ +.. _user_guide_save_load_vars_en: + +###################################################### +Save, Load Models or Variables & Incremental Learning +###################################################### + +Model variable classification +############################## + +In PaddlePaddle Fluid, all model variables are represented by :code:`fluid.Variable()` as the base class. Under this base class, model variables can be divided into the following categories: + +1. Model parameter + + The model parameters are the variables trained and learned in the deep learning model. During the training process, the training framework calculates the current gradient of each model parameter according to the back propagation algorithm, and updates the parameters according to their gradients by the optimizer. The essence of the training process of a model can be seen as the process of continuously iterative updating of model parameters. In PaddlePaddle Fluid, the model parameters are represented by :code:`fluid.framework.Parameter` , which is a derived class of :code:`fluid.Variable()` . Besides various properties of :code:`fluid.Variable()` , :code:`fluid.framework.Parameter` can also be configured with its own initialization methods, update rate and other properties. + +2. Persistable variable + + Persistable variables refer to variables that persist throughout the training process and are not destroyed by the end of an iteration, such as the global learning rate which is dynamically adjusted. In PaddlePaddle Fluid, persistable variables are represented by setting the :code:`persistable` property of :code:`fluid.Variable()` to :code:`True`. All model parameters are persistable variables, but not all persistable variables are model parameters. + +3. Temporary variables + + All model variables that do not belong to the above two categories are temporary variables. This type of variable exists only in one training iteration. After each iteration, all temporary variables will be destroyed, and before the next iteration, A new set of temporary variables will be constructed first for this iteration. In general, most of the variables in the model belong to this category, such as the input training data, the output of a normal layer, and so on. + + +How to save model variables +############################ + +The model variables we need to save are different depending on the application. For example, if we just want to save the model for future predictions, just saving the model parameters will be enough. But if we need to save a checkpoint for future recovery of current training, then we should save all the persistable variables, and even record the current epoch and step id. It is because even though some model variables are not parameters, they are still essential for model training. + + +Difference between save_vars、save_params、save_persistables and save_inference_model +########################################################################## +1. :code:`save_inference_model` will prune the inference model based on :code:`feeded_var_names` and :code:`target_vars` , this method will save the ``__model__`` file of the pruned program and the persistable variables in the program. + +2. :code:`save_persistables` this method will not save model, it will save all the persistable variables in the program. + +3. :code:`save_params` this method will not save model, it will save all the parameters in the program. + +4. :code:`save_vars` this method will not save model, it will save the given parameter by user. + + :code:`save_persistables` this method is useful for increment training or checkpoint training, it can save persistable variables in program comprehensively, such as parameter variables, optimizer variables, if you need increment training or checkpoint training, please choose this one. + :code:`save_inference_model` this method is useful for inference, it will save persistable variables and pruned program, if you need program and variables for follow-up high performance inference, please choose this one. + + :code:`save_vars 和 save_params` there methods are only needed in particular cases, we suppose you already know the purpose of there APIs, there are not recommended for use normally. + + +Save the model to make prediction for new samples +=================================================== + +If we save the model to make prediction for new samples, just saving the model parameters will be sufficient. We can use the :code:`fluid.io.save_params()` interface to save model parameters. + +For example: + +.. code-block:: python + + import paddle.fluid as fluid + + exe = fluid.Executor(fluid.CPUPlace()) + param_path = "./my_paddle_model" + prog = fluid.default_main_program() + fluid.io.save_params(executor=exe, dirname=param_path, main_program=None) + +In the example above, by calling the :code:`fluid.io.save_params` function, PaddlePaddle Fluid scans all model variables in the default :code:`fluid.Program` , i.e. :code:`prog` and picks out all model parameters. All these model parameters are saved to the specified :code:`param_path` . + + + +How to load model variables +############################# + +Corresponding to saving of model variables, we provide two sets of APIs to load the model parameters and the persistable variables of model. + +Load model to make predictions for new samples +================================================ + +For models saved with :code:`fluid.io.save_params` , you can load them with :code:`fluid.io.load_params`. + +For example: + +.. code-block:: python + + import paddle.fluid as fluid + + exe = fluid.Executor(fluid.CPUPlace()) + param_path = "./my_paddle_model" + prog = fluid.default_main_program() + fluid.io.load_params(executor=exe, dirname=param_path, + main_program=prog) + +In the above example, by calling the :code:`fluid.io.load_params` function, PaddlePaddle Fluid will scan all the model variables in :code:`prog`, filter out all the model parameters, and try to load them from :code:`param_path` . + +It is important to note that the :code:`prog` here must be exactly the same as the forward part of the :code:`prog` used when calling :code:`fluid.io.save_params` and cannot contain any operations of parameter updates. If there is an inconsistency between the two, it may cause some variables not to be loaded correctly; if the parameter update operation is incorrectly included, it may cause the parameters to be changed during normal prediction. The relationship between these two :code:`fluid.Program` is similar to the relationship between training :code:`fluid.Program` and test :code:`fluid.Program`, see: :ref:`user_guide_test_while_training_en` . + +In addition, special care must be taken that :code:`fluid.default_startup_program()` **must** be run before calling :code:`fluid.io.load_params` . If you run it later, it may overwrite the loaded model parameters and cause an error. + + + +Prediction of the used models and parameters saving +####################################################### + + +The inference engine provides two interfaces : prediction model saving :code:`fluid.io.save_inference_model` and the prediction model loading :code:`fluid.io.load_inference_model`. + +- :code:`fluid.io.save_inference_model`: Please refer to :ref:`api_guide_inference` . +- :code:`fluid.io.load_inference_model`: Please refer to :ref:`api_guide_inference` . + + + +Incremental training +##################### + +Incremental training means that a learning system can continuously learn new knowledge from new samples and preserve most of the knowledge that has been learned before. Therefore, incremental learning involves two points: saving the parameters that need to be persisted at the end of the last training, and loading the last saved persistent parameters at the beginning of the next training. Therefore incremental training involves the following APIs: +:code:`fluid.io.save_persistables`, :code:`fluid.io.load_persistables` . + +Single-node incremental training +================================= + +The general steps of incremental training on a single unit are as follows: + +1. At the end of the training, call :code:`fluid.io.save_persistables` to save the persistable parameter to the specified location. +2. After the training startup_program is executed successfully by the executor :code:`Executor`, call :code:`fluid.io.load_persistables` to load the previously saved persistable parameters. +3. Continue training with the executor :code:`Executor` or :code:`ParallelExecutor`. + + +Example: + +.. code-block:: python + + import paddle.fluid as fluid + + exe = fluid.Executor(fluid.CPUPlace()) + path = "./models" + prog = fluid.default_main_program() + fluid.io.save_persistables(exe, path, prog) + +In the above example, by calling the :code:`fluid.io.save_persistables` function, PaddlePaddle Fluid will find all persistable variables from all model variables in the default :code:`fluid.Program`, e.t. :code:`prog` , and save them to the specified :code:`path` directory. + + +.. code-block:: python + + import paddle.fluid as fluid + + exe = fluid.Executor(fluid.CPUPlace()) + path = "./models" + startup_prog = fluid.default_startup_program() + exe.run(startup_prog) + main_prog = fluid.default_main_program() + fluid.io.load_persistables(exe, path, main_prog) + exe.run(main_prog) + +In the above example, by calling the :code:`fluid.io.load_persistables` function, PaddlePaddle Fluid will find persistable variables from all model variables in the default :code:`fluid.Program` , e.t. :code:`prog` . and load them one by one from the specified :code:`path` directory to continue training. + + +The general steps for multi-node incremental training (without distributed large-scale sparse matrices) +========================================================================================================= + +There are several differences between multi-node incremental training and single-node incremental training: + +1. At the end of the training, when :code:`fluid.io.save_persistables` is called to save the persistence parameters, it is not necessary for all trainers to call this method, usually it is called on the 0th trainer. +2. The parameters of multi-node incremental training are loaded on the PServer side, and the trainer side does not need to load parameters. After the PServers are fully started, the trainer will synchronize the parameters from the PServer. + +The general steps for multi-node incremental training (do not enable distributed large-scale sparse matrices) are: + +1. At the end of the training, Trainer 0 will call :code:`fluid.io.save_persistables` to save the persistable parameters to the specified :code:`path`. +2. Share all the parameters saved by trainer 0 to all PServers through HDFS or other methods. (each PServer needs to have complete parameters). +3. After the training startup_program is successfully executed by the executor ( :code:`Executor` ), the PServer calls :code:`fluid.io.load_persistables` to load the persistable parameters saved by the 0th trainer. +4. The PServer continues to start PServer_program via the executor :code:`Executor`. +5. All training node trainers conduct training process normally through the executor :code:`Executor` or :code:`ParallelExecutor` . + + +For trainers whose parameters are to be saved during training, for example: + +.. code-block:: python + + import paddle.fluid as fluid + + exe = fluid.Executor(fluid.CPUPlace()) + path = "./models" + trainer_id = 0 + if trainer_id == 0: + prog = fluid.default_main_program() + fluid.io.save_persistables(exe, path, prog) + + +.. code-block:: bash + hadoop fs -mkdir /remote/$path + hadoop fs -put $path /remote/$path + +In the above example, the 0 trainer calls the :code:`fluid.io.save_persistables` function. By calling this function, PaddlePaddle Fluid will find all persistable variables in all model variables from default :code:`fluid.Program` , e.t. :code:`prog` , and save them to the specified :code:`path` directory. The stored model is then uploaded to a location accessible for all PServers by invoking a third-party file system (such as HDFS). + +For the PServer to be loaded with parameters during training, for example: + + +.. code-block:: python + + import paddle.fluid as fluid + + exe = fluid.Executor(fluid.CPUPlace()) + path = "./models" + pserver_endpoints = "127.0.0.1:1001,127.0.0.1:1002" + trainers = 4 + Training_role == "PSERVER" + config = fluid.DistributeTranspilerConfig() + t = fluid.DistributeTranspiler(config=config) + t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers, sync_mode=True) + + if training_role == "PSERVER": + current_endpoint = "127.0.0.1:1001" + pserver_prog = t.get_pserver_program(current_endpoint) + pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) + + exe.run(pserver_startup) + fluid.io.load_persistables(exe, path, pserver_startup) + exe.run(pserver_prog) + if training_role == "TRAINER": + main_program = t.get_trainer_program() + exe.run(main_program) + +In the above example, each PServer obtains the parameters saved by trainer 0 by calling the HDFS command, and obtains the PServer's :code:`fluid.Program` by configuration. PaddlePaddle Fluid will find all persistable variables in all model variables from this :code:`fluid.Program` , e.t. :code:`pserver_startup` , and load them from the specified :code:`path` directory. diff --git a/doc/paddle/beginners_guide/coding_practice/single_node.rst b/doc/paddle/beginners_guide/coding_practice/single_node.rst new file mode 100644 index 0000000000000000000000000000000000000000..d63379fc6fdc949441d8961004472429b6ca5d0b --- /dev/null +++ b/doc/paddle/beginners_guide/coding_practice/single_node.rst @@ -0,0 +1,136 @@ +######## +单机训练 +######## + +准备工作 +######## + +要进行PaddlePaddle Fluid单机训练,需要先 :ref:`user_guide_prepare_data` 和 +:ref:`user_guide_configure_simple_model` 。当\ +:ref:`user_guide_configure_simple_model` 完毕后,可以得到两个\ +:code:`fluid.Program`, :code:`startup_program` 和 :code:`main_program`。 +默认情况下,可以使用 :code:`fluid.default_startup_program()` 与\ :code:`fluid.default_main_program()` 获得全局的 :code:`fluid.Program`。 + +例如: + +.. code-block:: python + + import paddle.fluid as fluid + + image = fluid.data(name="image", shape=[None, 784], dtype='float32') + label = fluid.data(name="label", shape=[None, 1], dtype='int64') + hidden = fluid.layers.fc(input=image, size=100, act='relu') + prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + loss = fluid.layers.mean(loss) + + sgd = fluid.optimizer.SGD(learning_rate=0.001) + sgd.minimize(loss) + + # Here the fluid.default_startup_program() and fluid.default_main_program() + # has been constructed. + +在上述模型配置执行完毕后, :code:`fluid.default_startup_program()` 与\ +:code:`fluid.default_main_program()` 配置完毕了。 + +初始化参数 +########## + +参数随机初始化 +============== + +用户配置完模型后,参数初始化操作会被写入到\ +:code:`fluid.default_startup_program()` 中。使用 :code:`fluid.Executor()` 运行 +这一程序,初始化之后的参数默认被放在全局scope中,即 :code:`fluid.global_scope()` 。例如: + +.. code-block:: python + + exe = fluid.Executor(fluid.CUDAPlace(0)) + exe.run(program=fluid.default_startup_program()) + +载入预定义参数 +============== + +在神经网络训练过程中,经常会需要载入预定义模型,进而继续进行训练。\ +如何载入预定义参数,请参考 :ref:`user_guide_save_load_vars`。 + + +单卡训练 +######## + +执行单卡训练可以使用 :code:`fluid.Executor()` 中的 :code:`run()` 方法,运行训练\ +:code:`fluid.Program` 即可。在运行的时候,用户可以通过 :code:`run(feed=...)`\ +参数传入数据;用户可以通过 :code:`run(fetch=...)` 获取输出数据。例如:\ + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + + train_program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(train_program, startup_program): + data = fluid.data(name='X', shape=[None, 1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + sgd = fluid.optimizer.SGD(learning_rate=0.001) + sgd.minimize(loss) + + use_cuda = True + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + + # Run the startup program once and only once. + # Not need to optimize/compile the startup program. + startup_program.random_seed=1 + exe.run(startup_program) + + # Run the main program directly without compile. + x = numpy.random.random(size=(10, 1)).astype('float32') + loss_data, = exe.run(train_program, + feed={"X": x}, + fetch_list=[loss.name]) + + # Or use CompiledProgram: + compiled_prog = fluid.CompiledProgram(train_program) + loss_data, = exe.run(compiled_prog, + feed={"X": x}, + fetch_list=[loss.name]) + +多卡训练 +####################### +在多卡训练中,你可以使用 :code:`fluid.CompiledProgram` 来编译 :code:`fluid.Program` ,然后调用 :code:`with_data_parallel` 。例如: + +.. code-block:: python + + # NOTE: If you use CPU to run the program, you need + # to specify the CPU_NUM, otherwise, fluid will use + # all the number of the logic cores as the CPU_NUM, + # in that case, the batch size of the input should be + # greater than CPU_NUM, if not, the process will be + # failed by an exception. + if not use_cuda: + os.environ['CPU_NUM'] = str(2) + + compiled_prog = fluid.CompiledProgram( + train_program).with_data_parallel( + loss_name=loss.name) + loss_data, = exe.run(compiled_prog, + feed={"X": x}, + fetch_list=[loss.name]) + +注释: + +1. :ref:`cn_api_fluid_CompiledProgram` 会将传入的 :code:`fluid.Program` 转为计算图,即Graph,因为 :code:`compiled_prog` 与传入的 :code:`train_program` 是完全不同的对象,目前还不能够对 :code:`compiled_prog` 进行保存。 +2. 多卡训练也可以使用 :ref:`cn_api_fluid_ParallelExecutor` ,但是现在推荐使用 :ref:`cn_api_fluid_CompiledProgram` . +3. 如果 :code:`exe` 是用CUDAPlace来初始化的,模型会在GPU中运行。在显卡训练模式中,所有的显卡都将被占用。用户可以配置 `CUDA_VISIBLE_DEVICES `_ 以更改被占用的显卡。 +4. 如果 :code:`exe` 是用CPUPlace来初始化的,模型会在CPU中运行。在这种情况下,多线程用于运行模型,同时线程的数目和逻辑核的数目相等。用户可以配置 ``CPU_NUM`` 以更改使用中的线程数目。 + +进阶使用 +############### + +.. toctree:: + :maxdepth: 2 + + test_while_training.rst + diff --git a/doc/paddle/beginners_guide/coding_practice/single_node_en.rst b/doc/paddle/beginners_guide/coding_practice/single_node_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..3e69c281c55e1b5367a9795a83d4f8825e3d607e --- /dev/null +++ b/doc/paddle/beginners_guide/coding_practice/single_node_en.rst @@ -0,0 +1,126 @@ +##################### +Single-node training +##################### + +Preparation +############ + +To perform single-node training in PaddlePaddle Fluid, you need to read :ref:`user_guide_prepare_data_en` and :ref:`user_guide_configure_simple_model_en` . When you have finished reading :ref:`user_guide_configure_simple_model_en` , you can get two :code:`fluid.Program`, namely :code:`startup_program` and :code:`main_program` . By default, you can use :code:`fluid.default_startup_program()` and :code:`fluid.default_main_program()` to get global :code:`fluid.Program` . + +For example: + +.. code-block:: python + + import paddle.fluid as fluid + + image = fluid.data(name="image", shape=[None, 784], dtype='float32') + label = fluid.data(name="label", shape=[None, 1], dtype='int64') + hidden = fluid.layers.fc(input=image, size=100, act='relu') + prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + loss = fluid.layers.mean(loss) + + sgd = fluid.optimizer.SGD(learning_rate=0.001) + sgd.minimize(loss) + + # Here the fluid.default_startup_program() and fluid.default_main_program() + # has been constructed. + +After the configuration of model, the configurations of :code:`fluid.default_startup_program()` and :code:`fluid.default_main_program()` have been finished. + +Initialize Parameters +####################### + +Random Initialization of Parameters +==================================== + +After the configuration of model,the initialization of parameters will be written into :code:`fluid.default_startup_program()` . By running this program in :code:`fluid.Executor()` , the random initialization of parameters will be finished in global scope, i.e. :code:`fluid.global_scope()` .For example: + +.. code-block:: python + + exe = fluid.Executor(fluid.CUDAPlace(0)) + exe.run(program=fluid.default_startup_program()) + +Load Predefined Parameters +=========================== + +In the neural network training, predefined models are usually loaded to continue training. For how to load predefined parameters, please refer to :ref:`user_guide_save_load_vars_en`. + + +Single-card Training +##################### + +Single-card training can be performed through calling :code:`run()` of :code:`fluid.Executor()` to run training :code:`fluid.Program` . +In the runtime, users can feed data with :code:`run(feed=...)` and get output data with :code:`run(fetch=...)` . For example: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + + train_program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(train_program, startup_program): + data = fluid.data(name='X', shape=[None, 1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + sgd = fluid.optimizer.SGD(learning_rate=0.001) + sgd.minimize(loss) + + use_cuda = True + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + + # Run the startup program once and only once. + # Not need to optimize/compile the startup program. + startup_program.random_seed=1 + exe.run(startup_program) + + # Run the main program directly without compile. + x = numpy.random.random(size=(10, 1)).astype('float32') + loss_data, = exe.run(train_program, + feed={"X": x}, + fetch_list=[loss.name]) + # Or use CompiledProgram: + compiled_prog = fluid.CompiledProgram(train_program) + loss_data, = exe.run(compiled_prog, + feed={"X": x}, + fetch_list=[loss.name]) + + +Multi-card Training +####################### +In multi-card training, you can use :code:`fluid.CompiledProgram` to compile the :code:`fluid.Program`, and then call :code:`with_data_parallel`. For example: + +.. code-block:: python + + # NOTE: If you use CPU to run the program, you need + # to specify the CPU_NUM, otherwise, fluid will use + # all the number of the logic core as the CPU_NUM, + # in that case, the batch size of the input should be + # greater than CPU_NUM, if not, the process will be + # failed by an exception. + if not use_cuda: + os.environ['CPU_NUM'] = str(2) + + compiled_prog = fluid.CompiledProgram( + train_program).with_data_parallel( + loss_name=loss.name) + loss_data, = exe.run(compiled_prog, + feed={"X": x}, + fetch_list=[loss.name]) + +Notes: + +1. :ref:`api_fluid_CompiledProgram` will convert the input Program into a computational graph, and :code:`compiled_prog` is a completely different object from the incoming :code:`train_program`. At present, :code:`compiled_prog` can not be saved. +2. Multi-card training can also be used: ref:`api_fluid_ParallelExecutor` , but now it is recommended to use: :ref:`api_fluid_CompiledProgram`. +3. If :code:`exe` is initialized with CUDAPlace, the model will be run in GPU. In the mode of graphics card training, all graphics card will be occupied. Users can configure `CUDA_VISIBLE_DEVICES `_ to change graphics cards that are being used. +4. If :code:`exe` is initialized with CPUPlace, the model will be run in CPU. In this situation, the multi-threads are used to run the model, and the number of threads is equal to the number of logic cores. Users can configure `CPU_NUM` to change the number of threads that are being used. + +Advanced Usage +############### + +.. toctree:: + :maxdepth: 2 + + test_while_training_en.rst diff --git a/doc/paddle/beginners_guide/coding_practice/test_while_training.rst b/doc/paddle/beginners_guide/coding_practice/test_while_training.rst new file mode 100644 index 0000000000000000000000000000000000000000..612d8b36a25188dc1cf18427e1df99d667037302 --- /dev/null +++ b/doc/paddle/beginners_guide/coding_practice/test_while_training.rst @@ -0,0 +1,121 @@ +.. _user_guide_test_while_training: + +################## +训练过程中评测模型 +################## + +模型的测试评价与训练的 :code:`fluid.Program` 不同。在测试评价中: + +1. 测试评价不进行反向传播,不优化更新参数。 +2. 测试评价执行的操作可以不同。 + + * 例如 BatchNorm 操作,在训练和测试时执行不同的算法。 + + * 测试评价模型与训练模型可以是完全不同的模型。 + +生成测试 :code:`fluid.Program` +################################# + +通过克隆训练 :code:`fluid.Program` 生成测试 :code:`fluid.Program` +======================================================================= + +用 :code:`Program.clone()` 方法可以复制出新的 :code:`fluid.Program` 。 通过设置 +:code:`Program.clone(for_test=True)` 复制含有用于测试的操作 :code:`fluid.Program` 。简单的使用方法如下: + +.. code-block:: python + + import paddle.fluid as fluid + + image = fluid.data(name="image", shape=[None, 784], dtype='float32') + label = fluid.data(name="label", shape=[None, 1], dtype="int64") + + prediction = fluid.layers.fc( + input=fluid.layers.fc(input=image, size=100, act='relu'), + size=10, + act='softmax' + ) + loss = fluid.layers.mean(fluid.layers.cross_entropy(input=prediction, label=label)) + acc = fluid.layers.accuracy(input=prediction, label=label) + + test_program = fluid.default_main_program().clone(for_test=True) + + adam = fluid.optimizer.Adam(learning_rate=0.001) + adam.minimize(loss) + +在使用 :code:`Optimizer` 之前,将 :code:`fluid.default_main_program()` 复制\ +成一个 :code:`test_program` 。之后使用测试数据运行 :code:`test_program`,\ +就可以做到运行测试程序,而不影响训练结果。 + +分别配置训练 :code:`fluid.Program` 和测试 :code:`fluid.Program` +===================================================================== + +如果训练程序和测试程序相差较大时,用户也可以通过完全定义两个不同的 +:code:`fluid.Program`,分别进行训练和测试。在PaddlePaddle Fluid中,\ +所有的参数都有名字。如果两个不同的操作,甚至两个不同的网络使用了同样名字的参数,\ +那么他们的值和内存空间都是共享的。 + +PaddlePaddle Fluid中使用 :code:`fluid.unique_name` 包来随机初始化用户未定义的\ +参数名称。通过 :code:`fluid.unique_name.guard` 可以确保多次调用某函数\ +参数初始化的名称一致。 + +例如: + +.. code-block:: python + + import paddle.fluid as fluid + + def network(is_test): + image = fluid.data(name="image", shape=[None, 784], dtype='float32') + label = fluid.data(name="label", shape=[None, 1], dtype="int64") + hidden = fluid.layers.fc(input=image, size=100, act="relu") + hidden = fluid.layers.batch_norm(input=hidden, is_test=is_test) + ... + return loss + + with fluid.unique_name.guard(): + train_loss = network(is_test=False) + sgd = fluid.optimizer.SGD(0.001) + sgd.minimize(train_loss) + + test_program = fluid.Program() + with fluid.unique_name.guard(): + with fluid.program_guard(test_program, fluid.Program()): + test_loss = network(is_test=True) + + # fluid.default_main_program() is the train program + # fluid.test_program is the test program + +执行测试 :code:`fluid.Program` +################################# + +使用 :code:`Executor` 执行测试 :code:`fluid.Program` +======================================================= + +用户可以使用 :code:`Executor.run(program=...)` 来执行测试 +:code:`fluid.Program`。 + +例如 + +.. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + test_acc = exe.run(program=test_program, feed=test_data_batch, fetch_list=[acc]) + print 'Test accuracy is ', test_acc + +使用 :code:`ParallelExecutor` 执行测试 :code:`fluid.Program` +=============================================================== + +用户可以使用训练用的 :code:`ParallelExecutor` 与测试 :code:`fluid.Program` +一起,新建一个测试的 :code:`ParallelExecutor` ;再使用测试 +:code:`ParallelExecutor.run` 来执行测试。 + +例如: + +.. code-block:: python + + train_exec = fluid.ParallelExecutor(use_cuda=True, loss_name=loss.name) + + test_exec = fluid.ParallelExecutor(use_cuda=True, share_vars_from=train_exec, + main_program=test_program) + test_acc = test_exec.run(fetch_list=[acc], ...) + diff --git a/doc/paddle/beginners_guide/coding_practice/test_while_training_en.rst b/doc/paddle/beginners_guide/coding_practice/test_while_training_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..a8bebd78c89f51273dfc3f342b84490c4908c65e --- /dev/null +++ b/doc/paddle/beginners_guide/coding_practice/test_while_training_en.rst @@ -0,0 +1,109 @@ +.. _user_guide_test_while_training_en: + +############################## +Evaluate model while training +############################## + +:code:`fluid.Program` for model test and evaluation is different from the one for training. In the test and evalution phase: + +1. There is no back propagation and no process of optimizing and updating parameters in evaluation and test. +2. Operations in model evaluation can be different. + + * Take the operator BatchNorm for example, algorithms are different in train and test. + + * Evaluation model and training model can be totally different. + +Generate :code:`fluid.Program` for test +####################################### + +Generate test :code:`fluid.Program` by cloning training :code:`fluid.Program` +============================================================================ + +:code:`Program.clone()` can generate a copied new :code:`fluid.Program` . You can generate a copy of Program with operations applied for test by setting :code:`Program.clone(for_test=True)` . Simple usage is as follows: + +.. code-block:: python + + import paddle.fluid as fluid + + image = fluid.data(name="image", shape=[None, 784], dtype='float32') + label = fluid.data(name="label", shape=[None, 1], dtype="int64") + prediction = fluid.layers.fc( + input=fluid.layers.fc(input=image, size=100, act='relu'), + size=10, + act='softmax' + ) + loss = fluid.layers.mean(fluid.layers.cross_entropy(input=prediction, label=label)) + acc = fluid.layers.accuracy(input=prediction, label=label) + + test_program = fluid.default_main_program().clone(for_test=True) + + adam = fluid.optimizer.Adam(learning_rate=0.001) + adam.minimize(loss) + +Before using :code:`Optimizer` , please copy :code:`fluid.default_main_program()` into a :code:`test_program` . Then you can pass test data to :code:`test_program` so that you can run test program without influencing training result. + +Configure training :code:`fluid.Program` and test :code:`fluid.Program` individually +===================================================================================== + +If the training program is largely different from test program, you can define two totally different :code:`fluid.Program` , and perform training and test individually. In PaddlePaddle Fluid, all parameters are named. If two different operations or even two different networks use parameters with the same name, the value and memory space of these parameters are shared. + +Fluid adopts :code:`fluid.unique_name` package to randomly initialize the names of unnamed parameters. :code:`fluid.unique_name.guard` can keep the initialized names consistent across multiple times of calling some function. + +For example: + +.. code-block:: python + + import paddle.fluid as fluid + + def network(is_test): + image = fluid.data(name="image", shape=[None, 784], dtype='float32') + label = fluid.data(name="label", shape=[None, 1], dtype="int64") + hidden = fluid.layers.fc(input=image, size=100, act="relu") + hidden = fluid.layers.batch_norm(input=hidden, is_test=is_test) + ... + return loss + + with fluid.unique_name.guard(): + train_loss = network(is_test=False) + sgd = fluid.optimizer.SGD(0.001) + sgd.minimize(train_loss) + + test_program = fluid.Program() + with fluid.unique_name.guard(): + with fluid.program_guard(test_program, fluid.Program()): + test_loss = network(is_test=True) + + # fluid.default_main_program() is the train program + # fluid.test_program is the test program + +Perform test :code:`fluid.Program` +################################### + +Run test :code:`fluid.Program` with :code:`Executor` +======================================================= + +You can run test :code:`fluid.Program` with :code:`Executor.run(program=...)` . + +For example: + +.. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + test_acc = exe.run(program=test_program, feed=test_data_batch, fetch_list=[acc]) + print 'Test accuracy is ', test_acc + +Run test :code:`fluid.Program` with :code:`ParallelExecutor` +===================================================================== + +You can use :code:`ParallelExecutor` for training and :code:`fluid.Program` for test to create a new test :code:`ParallelExecutor` ; then use test :code:`ParallelExecutor.run` to run test process. + +For example: + +.. code-block:: python + + train_exec = fluid.ParallelExecutor(use_cuda=True, loss_name=loss.name) + + test_exec = fluid.ParallelExecutor(use_cuda=True, share_vars_from=train_exec, + main_program=test_program) + test_acc = test_exec.run(fetch_list=[acc], ...) + diff --git a/doc/paddle/beginners_guide/dygraph/DyGraph.md b/doc/paddle/beginners_guide/dygraph/DyGraph.md new file mode 100644 index 0000000000000000000000000000000000000000..2e0d095ade986147a6f10b8fbd0d0835a5a4c7e1 --- /dev/null +++ b/doc/paddle/beginners_guide/dygraph/DyGraph.md @@ -0,0 +1,815 @@ +# 命令式编程使用教程 + +从编程范式上说,飞桨兼容支持声明式编程和命令式编程,通俗地讲即静态图和动态图。其实飞桨本没有图的概念,在飞桨的设计中,把一个神经网络定义成一段类似程序的描述,也就是用户在写程序的过程中,就定义了模型表达及计算。在声明式编程的控制流实现方面,飞桨借助自己实现的控制流OP而不是python原生的if else和for循环,这使得在飞桨中的定义的program即一个网络模型,可以有一个内部的表达,是可以全局优化编译执行的。考虑对开发者来讲,更愿意使用python原生控制流,飞桨也做了支持,并通过解释方式执行,这就是命令式编程。但整体上,这两种编程范式是相对兼容统一的。飞桨将持续发布更完善的命令式编程功能,同时保持更强劲的性能。 + +飞桨平台中,将神经网络抽象为计算表示**Operator**(算子,常简称OP)和数据表示**Variable**(变量),如 图1 所示。神经网络的每层操作均由一个或若干**Operator**组成,每个**Operator**接受一系列的**Variable**作为输入,经计算后输出一系列的**Variable**。 +
+
图1 Operator和Variable关系示意图
+ +根据**Operator**解析执行方式不同,飞桨支持如下两种编程范式: +* **声明式编程范式(静态图模式)**:先编译后执行的方式。用户需预先定义完整的网络结构,再对网络结构进行编译优化后,才能执行获得计算结果。 +* **命令式编程范式(动态图模式)**:解析式的执行方式。用户无需预先定义完整的网络结构,每写一行网络代码,即可同时获得计算结果。 + +举例来说,假设用户写了一行代码:y=x+1,在声明式编程下,运行此代码只会往计算图中插入一个Tensor加1的**Operator**,此时**Operator**并未真正执行,无法获得y的计算结果。但在命令式编程下,所有**Operator**均是即时执行的,运行完此代码后**Operator**已经执行完毕,用户可直接获得y的计算结果。 + +## 为什么命令式编程越来越流行? + +声明式编程作为较早提出的一种编程范式,提供丰富的 API ,能够快速的实现各种模型;并且可以利用全局的信息进行图优化,优化性能和显存占用;在预测部署方面也可以实现无缝衔接。 但具体实践中声明式编程存在如下问题: +1. 采用先编译后执行的方式,组网阶段和执行阶段割裂,导致调试不方便。 +2. 属于一种符号化的编程方式,要学习新的编程方式,有一定的入门门槛。 +3. 网络结构固定,对于一些树结构的任务支持的不够好。 + +命令式编程的出现很好的解决了这些问题,存在以下优势: +1. 代码运行完成后,可以立马获取结果,支持使用 IDE 断点调试功能,使得调试更方便。 +2. 属于命令式的编程方式,与编写Python的方式类似,更容易上手。 +3. 网络的结构在不同的层次中可以变化,使用更灵活。 + + +综合以上优势,使得命令式编程越来越受开发者的青睐,本章侧重介绍在飞桨中命令式编程的编程方法,包括如下几部分: +1. 如何开启命令式编程 +2. 如何使用命令式编程进行模型训练 +3. 如何基于命令式编程进行多卡训练 +4. 如何部署命令式的模型 +5. 命令式编程常见的使用技巧,如中间变量值/梯度打印、断点调试、阻断反向传递,以及某些场景下如何改写为声明式模式运行。 + + +## 1. 开启命令式编程 + +此文档介绍的内容是基于2.0 alpha,请安装2.0 alpha 版本,安装方式如下: + +``` +pip install -q --upgrade paddlepaddle==2.0.0a0 +``` + +目前飞桨默认的模式是声明式编程,可以通过paddle.enable_imperative()开启命令式编程(也可以通过with paddle.imperative.guard()的方式启动): +``` +paddle.enable_imperative() +``` + +我们先通过一个实例,观察一下命令式编程开启前后执行方式的差别: + + +```python +import numpy as np +import paddle +from paddle.imperative import to_variable + +data = np.ones([2, 2], np.float32) +#x = paddle.data(name='x', shape=[2,2], dtype='float32') +x = paddle.nn.data(name='x', shape=[2,2], dtype='float32') +x += 10 +exe = paddle.Executor() +exe.run(paddle.default_startup_program()) +out = exe.run(fetch_list=[x], feed={'x': data}) +print("result", out) #[[11, 11], [11, 11]] + +# 命令式编程 +paddle.enable_imperative() +x = paddle.imperative.to_variable(data) +x += 10 +print('result', x.numpy()) #[[11, 11], [11, 11]] + +``` +* 命令式编程下,所有操作在运行时就已经完成,更接近我们平时的编程方式,可以随时获取每一个操作的执行结果。 +* 声明式编程下,过程中并没有实际执行操作,上述例子中可以看到只能打印声明的类型,最后需要调用执行器来统一执行所有操作,计算结果需要通过执行器统一返回。 + +## 2. 使用命令式编程进行模型训练 + +接下来我们以一个简单的手写体识别任务为例,说明如何使用飞桨的命令式编程来进行模型的训练。包括如下步骤: + +* 2.1 定义数据读取器:读取数据和预处理操作。 +* 2.2 定义模型和优化器:搭建神经网络结构。 +* 2.3 训练:配置优化器、学习率、训练参数。循环调用训练过程,循环执行“前向计算 + 损失函数 + 反向传播”。 +* 2.4 评估测试:将训练好的模型保存并评估测试。 + +最后介绍一下: +* 2.5 模型参数的保存和加载方法。 + +在前面章节我们已经了解到,“手写数字识别”的任务是:根据一个28 * 28像素的图像,识别图片中的数字。可采用MNIST数据集进行训练。 +![](https://ai-studio-static-online.cdn.bcebos.com/f8ffb092f6354d8c9c0219224db0e87b5490c5715cc346cf87b7098b2c3c2069) + +有关该任务和数据集的详细介绍,可参考:[初识飞桨手写数字识别模型](https://aistudio.baidu.com/aistudio/projectdetail/224342) + +### 2.1 定义数据读取器 + +飞桨提供了多个封装好的数据集API,本任务我们可以通过调用 [paddle.dataset.mnist](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/data/dataset_cn.html) 的 train 函数和 test 函数,直接获取处理好的 MNIST 训练集和测试集;然后调用 [paddle.batch](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/io_cn/batch_cn.html#batch) 接口返回 reader 的装饰器,该 reader 将输入 reader 的数据打包成指定 BATCH_SIZE 大小的批处理数据。 + + +```python +import paddle + +# 定义批大小 +BATCH_SIZE = 64 + +# 通过调用paddle.dataset.mnist的train函数和test函数来构造reader +train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=BATCH_SIZE, drop_last=True) +test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=BATCH_SIZE, drop_last=True) +``` + + + +### 2.2 定义模型和优化器 + +本节我们采用如下网络模型,该模型可以很好的完成“手写数字识别”的任务。模型由卷积层 -> 池化层 -> 卷积层 -> 池化层 -> 全连接层组成,池化层即降采样层。 + +![](https://ai-studio-static-online.cdn.bcebos.com/f9e59d727d68437aaaad8cee410e564c7a80063367bd4fcd9f710a1480ee338c) + + +在开始构建网络模型前,需要了解如下信息: + +> 在命令式编程中,参数和变量的存储管理方式与声明式编程不同。命令式编程下,网络中学习的参数和中间变量,生命周期和 Python 对象的生命周期是一致的。简单来说,一个 Python 对象的生命周期结束,相应的存储空间就会释放。 + +对于一个网络模型,在模型学习的过程中参数会不断更新,所以参数需要在整个学习周期内一直保持存在,因此需要一个机制来保持网络的所有的参数不被释放,飞桨的命令式编程采用了继承自 [paddle.nn.Layer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#layer) 的面向对象设计的方法来管理所有的参数,该方法也更容易模块化组织代码。 + +下面介绍如何通过继承 paddle.nn.Layer 实现一个简单的ConvPool层;该层由一个 [卷积层](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Conv2D_cn.html#conv2d) 和一个 [池化层](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Pool2D_cn.html#pool2d) 组成。 + + +```python +import paddle +from paddle.nn import Conv2D, Pool2D + +# 定义SimpleImgConvPool网络,必须继承自paddle.nn.Layer +# 该网络由一个卷积层和一个池化层组成 + +class SimpleImgConvPool(paddle.nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + pool_size, + pool_stride, + pool_padding=0, + pool_type='max', + global_pooling=False, + conv_stride=1, + conv_padding=0, + conv_dilation=1, + conv_groups=1, + act=None, + use_cudnn=False, + param_attr=None, + bias_attr=None): + super(SimpleImgConvPool, self).__init__() + + self._conv2d = Conv2D( + num_channels=num_channels, + num_filters=num_filters, + filter_size=filter_size, + stride=conv_stride, + padding=conv_padding, + dilation=conv_dilation, + groups=conv_groups, + param_attr=None, + bias_attr=None, + act=act, + use_cudnn=use_cudnn) + + self._pool2d = Pool2D( + pool_size=pool_size, + pool_type=pool_type, + pool_stride=pool_stride, + pool_padding=pool_padding, + global_pooling=global_pooling, + use_cudnn=use_cudnn) + + def forward(self, inputs): + x = self._conv2d(inputs) + x = self._pool2d(x) + return x +``` + +可以看出实现一个 ConvPool 层(即SimpleImgConvPool)分为两个步骤: +1. 定义 \_\_init\_\_ 构造函数。 + +在 \_\_init\_\_ 构造函数中,通常会执行变量初始化、参数初始化、子网络初始化等操作,执行这些操作时不依赖于输入的动态信息。这里我们对子网络(卷积层和池化层)做了初始化操作。 + +2. 定义 forward 函数。 + +该函数负责定义网络运行时的执行逻辑,将会在每一轮训练/预测中被调用。上述示例中,forward 函数的逻辑是先执行一个卷积操作,然后执行一个池化操作。 + + +接下来我们介绍如何利用子网络组合出MNIST网络,该网络由两个 SimpleImgConvPool 子网络和一个全连接层组成。 + + +```python +# 定义MNIST网络,必须继承自paddle.nn.Layer +# 该网络由两个SimpleImgConvPool子网络、reshape层、matmul层、softmax层、accuracy层组成 +class MNIST(paddle.nn.Layer): + def __init__(self): + super(MNIST, self).__init__() + self._simple_img_conv_pool_1 = SimpleImgConvPool( + 1, 20, 5, 2, 2, act="relu") + self._simple_img_conv_pool_2 = SimpleImgConvPool( + 20, 50, 5, 2, 2, act="relu") + + self.pool_2_shape = 50 * 4 * 4 + SIZE = 10 + self.output_weight = self.create_parameter( + [self.pool_2_shape, 10]) + + def forward(self, inputs, label=None): + x = self._simple_img_conv_pool_1(inputs) + x = self._simple_img_conv_pool_2(x) + x = paddle.reshape(x, shape=[-1, self.pool_2_shape]) + x = paddle.matmul(x, self.output_weight) + x = paddle.nn.functional.softmax(x) + if label is not None: + acc = paddle.metric.accuracy(input=x, label=label) + return x, acc + else: + return x +``` + +在这个复杂的 Layer 的 \_\_init\_\_ 构造函数中,包含了更多基础的操作: +1. 变量的初始化:self.pool_2_shape = 50 * 4 * 4 +2. 全连接层参数的创建,通过调用 [Layer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#layer) 的 [create_parameter](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#create_parameter) 接口:self.output_weight = self.create_parameter( [ self.pool_2_shape, 10]) +3. 子 Layer 的构造:self._simple_img_conv_pool_1、self._simple_img_conv_pool_2 + +forward 函数的实现和 前面SimpleImgConvPool 类中的实现方式类似。 + +接下来定义MNIST类的对象,以及优化器。这里优化器我们选择 [AdamOptimizer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/optimizer_cn/AdamOptimizer_cn.html#adamoptimizer) ,通过 [Layer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#layer) 的 [parameters](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#parameters) 接口来读取该网络的全部参数,实现如下: + + +```python +import numpy as np +from paddle.optimizer import AdamOptimizer +from paddle.imperative import to_variable + +paddle.enable_imperative() +# 定义MNIST类的对象 +mnist = MNIST() +# 定义优化器为AdamOptimizer,学习旅learning_rate为0.001 +# 注意命令式编程下必须传入parameter_list参数,该参数为需要优化的网络参数,本例需要优化mnist网络中的所有参数 +adam = AdamOptimizer(learning_rate=0.001, parameter_list=mnist.parameters()) +``` + +### 2.3 训练 + +当我们定义好上述网络结构之后,就可以进行训练了。 + +实现如下: +* 数据读取:读取每批数据,通过 [to_variable](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/to_variable_cn.html#to-variable) 接口将 numpy.ndarray 对象转换为 [Variable](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Variable_cn.html#variable) 类型的对象。 +* 网络正向执行:在正向执行时,用户构造出img和label之后,可利用类似函数调用的方式(如:mnist(img, label))传递参数执行对应网络的 forward 函数。 +* 计算损失值:根据网络返回的计算结果,计算损失值,便于后续执行反向计算。 +* 执行反向计算:需要用户主动调用 [backward](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Variable_cn.html#backward) 接口来执行反向计算。 +* 参数更新:调用优化器的 [minimize](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/optimizer_cn/AdamOptimizer_cn.html#minimize) 接口对参数进行更新。 +* 梯度重置:将本次计算的梯度值清零,以便进行下一次迭代和梯度更新。 +* 保存训练好的模型:通过 [Layer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#layer) 的 [state_dict](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#state_dict) 获取模型的参数;通过 [save_dygraph](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/save_dygraph_cn.html#save-dygraph) 对模型参数进行保存。 + + +```python +import numpy as np +from paddle.optimizer import AdamOptimizer +from paddle.imperative import to_variable + +paddle.enable_imperative() +# 定义MNIST类的对象 +mnist = MNIST() +# 定义优化器为AdamOptimizer,学习旅learning_rate为0.001 +# 注意命令式编程下必须传入parameter_list参数,该参数为需要优化的网络参数,本例需要优化mnist网络中的所有参数 +adam = AdamOptimizer(learning_rate=0.001, parameter_list=mnist.parameters()) + +# 设置全部样本的训练次数 +epoch_num = 5 + +for epoch in range(epoch_num): + for batch_id, data in enumerate(train_reader()): + dy_x_data = np.array([x[0].reshape(1, 28, 28) for x in data]).astype('float32') + y_data = np.array([x[1] for x in data]).astype('int64').reshape(-1, 1) + + img = to_variable(dy_x_data) + label = to_variable(y_data) + + cost, acc = mnist(img, label) + + loss = paddle.nn.functional.cross_entropy(cost, label) + avg_loss = paddle.mean(loss) + avg_loss.backward() + adam.minimize(avg_loss) + mnist.clear_gradients() + + if batch_id % 100 == 0: + print("Loss at epoch {} step {}: {:}".format( + epoch, batch_id, avg_loss.numpy())) + +model_dict = mnist.state_dict() +paddle.imperative.save(model_dict, "save_temp") +``` + + +### 2.4 评估测试 + +模型训练完成,我们已经保存了训练好的模型,接下来进行评估测试。某些OP(如 dropout、batch_norm)需要区分训练模式和评估模式,以标识不同的执行状态。飞桨中OP默认采用的是训练模式(train mode),可通过如下方法切换: + + ``` +model.eval() #切换到评估模式 +model.train() #切换到训练模式 +``` + + +模型评估测试的实现如下: +* 首先定义 MNIST 类的对象 mnist_eval,然后通过 [load_dygraph](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/load_dygraph_cn.html#load-dygraph) 接口加载保存好的模型参数,通过 [Layer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#layer) 的 [set_dict](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#set_dict) 接口将参数导入到模型中,通过 [Layer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#layer) 的 eval 接口切换到预测评估模式。 +* 读取测试数据执行网络正向计算,进行评估测试,输出不同 batch 数据下损失值和准确率的平均值。 + + +```python +paddle.enable_imperative() +mnist_eval = MNIST() +model_dict, _ = paddle.imperative.load("save_temp") +mnist_eval.set_dict(model_dict) +print("checkpoint loaded") + +mnist_eval.eval() + +acc_set = [] +avg_loss_set = [] +for batch_id, data in enumerate(test_reader()): + dy_x_data = np.array([x[0].reshape(1, 28, 28) + for x in data]).astype('float32') + y_data = np.array( + [x[1] for x in data]).astype('int64').reshape(-1, 1) + + img = to_variable(dy_x_data) + label = to_variable(y_data) + + prediction, acc = mnist_eval(img, label) + + loss = paddle.nn.functional.cross_entropy(input=prediction, label=label) + avg_loss = paddle.mean(loss) + acc_set.append(float(acc.numpy())) + avg_loss_set.append(float(avg_loss.numpy())) + +acc_val_mean = np.array(acc_set).mean() +avg_loss_val_mean = np.array(avg_loss_set).mean() +print("Eval avg_loss is: {}, acc is: {}".format(avg_loss_val_mean, acc_val_mean)) +``` + +### 2.5 模型参数的保存和加载 + +在命令式编程下,模型和优化器在不同的模块中,所以模型和优化器分别在不同的对象中存储,使得模型参数和优化器信息需分别存储。 +因此模型的保存需要单独调用模型和优化器中的 state_dict() 接口,同样模型的加载也需要单独进行处理。 + +保存模型 : +1. 保存模型参数:首先通过 minist.state_dict 函数获取 mnist 网络的所有参数,然后通过 paddle.imperative.save 函数将获得的参数保存至以 save_path 为前缀的文件中。 +1. 保存优化器信息:首先通过 adam.state_dict 函数获取 adam 优化器的信息,然后通过 paddle.imperative.save 函数将获得的参数保存至以 save_path 为前缀的文件中。 + * [Layer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#layer) 的 [state_dict](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#state_dict) 接口:该接口可以获取当前层及其子层的所有参数,并将参数存放在 dict 结构中。 + * [Optimizer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/optimizer_cn/AdamOptimizer_cn.html#adamoptimizer) 的 state_dict 接口:该接口可以获取优化器的信息,并将信息存放在 dict 结构中。其中包含优化器使用的所有变量,例如对于 Adam 优化器,包括 beta1、beta2、momentum 等信息。注意如果该优化器的 minimize 函数没有被调用过,则优化器的信息为空。 + * [paddle.imperative.save](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/save_dygraph_cn.html#save-dygraph) 接口:该接口将传入的参数或优化器的 dict 保存到磁盘上。 +``` +# 保存模型参数 +1. paddle.imperative.save(minist.state_dict(), "save_path") +# 保存优化器信息 +2. paddle.imperative.save(adam.state_dict(), "save_path") +``` +加载模型: +1. 通过 paddle.imperative.load 函数获取模型参数信息 model_state 和优化器信息 opt_state; +1. 通过 mnist.set_dict 函数用获取的模型参数信息设置 mnist 网络的参数 +1. 通过 adam.set_dict 函数用获取的优化器信息设置 adam 优化器信息。 + * [Layer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#layer) 的 [set_dict](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Layer_cn.html#set_dict) 接口:该接口根据传入的 dict 结构设置参数,所有参数将由 dict 结构中的 Tensor 设置。 + * [Optimizer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/optimizer_cn/AdamOptimizer_cn.html#adamoptimizer) 的 set_dict 接口:该接口根据传入的 dict 结构设置优化器信息,例如对于 Adam 优化器,包括 beta1、beta2、momentum 等信息。如果使用了 LearningRateDecay ,则 global_step 信息也将会被设置。 + * [paddle.imperative.load](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/load_dygraph_cn.html#load-dygraph) 接口:该接口尝试从磁盘中加载参数或优化器的 dict 。 +``` +# 获取模型参数和优化器信息 +1. model_state, opt_state= paddle.imperative.load(“save_path”) +# 加载模型参数 +2. mnist.set_dict(model_state) +# 加载优化器信息 +3. adam.set_dict(opt_state) +``` + + +## 3. 多卡训练 + +针对数据量、计算量较大的任务,我们需要多卡并行训练,以提高训练效率。目前命令式编程可支持GPU的单机多卡训练方式,在命令式编程中多卡的启动和单卡略有不同,多卡通过 Python 基础库 subprocess.Popen 在每一张 GPU 上启动单独的 Python 程序的方式,每张卡的程序独立运行,只是在每一轮梯度计算完成之后,所有的程序进行梯度的同步,然后更新训练的参数。 + +我们通过一个实例了解如何进行多卡训练: +>由于AI Studio上未配置多卡环境,所以本实例需在本地构建多卡环境后运行。 + +1. 本实例仍然采用前面定义的 MNIST 网络,可将前面定义的 SimpleImgConvPool、MNIST 网络结构、相关的库导入代码、以及下面多卡训练的示例代码拷贝至本地文件 train.py 中。 + + +```python +import numpy as np +import paddle +from paddle.optimizer import AdamOptimizer +from paddle.imperative import to_variable + +place = paddle.CUDAPlace(paddle.imperative.ParallelEnv().dev_id) +paddle.enable_imperative(place) +strategy = paddle.imperative.prepare_context() +epoch_num = 5 +BATCH_SIZE = 64 +mnist = MNIST() +adam = AdamOptimizer(learning_rate=0.001, parameter_list=mnist.parameters()) +mnist = paddle.imperative.DataParallel(mnist, strategy) + +train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=BATCH_SIZE, drop_last=True) +train_reader = paddle.incubate.reader.distributed_batch_reader( + train_reader) + +for epoch in range(epoch_num): + for batch_id, data in enumerate(train_reader()): + dy_x_data = np.array([x[0].reshape(1, 28, 28) + for x in data]).astype('float32') + y_data = np.array( + [x[1] for x in data]).astype('int64').reshape(-1, 1) + + img = to_variable(dy_x_data) + label = to_variable(y_data) + label.stop_gradient = True + + cost, acc = mnist(img, label) + + loss = paddle.nn.functional.cross_entropy(cost, label) + avg_loss = paddle.mean(loss) + + avg_loss = mnist.scale_loss(avg_loss) + avg_loss.backward() + mnist.apply_collective_grads() + + adam.minimize(avg_loss) + mnist.clear_gradients() + + if batch_id % 100 == 0 and batch_id is not 0: + print("epoch: {}, batch_id: {}, loss is: {}".format(epoch, batch_id, avg_loss.numpy())) + +if paddle.imperative.ParallelEnv().local_rank == 0: + paddle.imperative.save(mnist.state_dict(), "work_0") +``` + +2、飞桨命令式编程多进程多卡模型训练启动时,需要指定使用的 GPU,比如使用 0,1 卡,可执行如下命令启动训练: + + +``` +CUDA_VISIBLE_DEVICES=0,1 python -m paddle.distributed.launch --log_dir ./mylog train.py +``` +其中 log_dir 为存放 log 的地址,train.py 为程序名。 +执行结果如下: + +``` +----------- Configuration Arguments ----------- +cluster_node_ips: 127.0.0.1 +log_dir: ./mylog +node_ip: 127.0.0.1 +print_config: True +selected_gpus: 0,1 +started_port: 6170 +training_script: train.py +training_script_args: [] +use_paddlecloud: False +------------------------------------------------ +trainers_endpoints: 127.0.0.1:6170,127.0.0.1:6171 , node_id: 0 , current_node_ip: 127.0.0.1 , num_nodes: 1 , node_ips: ['127.0.0.1'] , nranks: 2 +``` + +此时,程序会将每个进程的输出 log 导出到 ./mylog 路径下,可以打开 workerlog.0 和 workerlog.1 来查看结果: + +``` +. +├── mylog +│ ├── workerlog.0 +│ └── workerlog.1 +└── train.py +``` + +总结一下,多卡训练相比单卡训练,有如下步骤不同: +1. 通过 ParallelEnv() 的 dev_id 设置程序运行的设备。 +``` +place = paddle.CUDAPlace(paddle.imperative.ParallelEnv().dev_id) +paddle.enable_imperative(place): +``` +2. 准备多卡环境。 +``` +strategy = paddle.imperative.prepare_context() +``` +3. 数据并行模块。 + +在数据并行的时候,我们需要存储和初始化一些多卡相关的信息,这些信息和操作放在 DataParallel 类中,使用的时候,我们需要利用 model(定义的模型) 和 strategy(第二步得到的多卡环境) 信息初始化 DataParallel。 +``` +mnist = paddle.imperative.DataParallel(mnist, strategy) +``` +4. 数据切分。 + +数据切分是一个非常重要的流程,是为了防止每张卡在每一轮训练见到的数据都一样,可以使用 distributed_batch_reader 对单卡的 reader 进行进行切分处理。 用户也可以其他的策略来达到数据切分的目的,比如事先分配好每张卡的数据,这样就可以使用单卡的 reader ,不使用 distributed_batch_reader。 + +``` +train_reader = paddle.incubate.reader.distributed_batch_reader(train_reader) +``` + +5. 单步训练。 + +首先对 loss 进行归一化,然后计算单卡的梯度,最终将所有的梯度聚合。 +``` +avg_loss = mnist.scale_loss(avg_loss) +avg_loss.backward() +mnist.apply_collective_grads() +``` +6. 模型保存。 + +和单卡不同,多卡训练时需逐个进程执行保存操作,多个进程同时保存会使模型文件格式出错。 +``` +if paddle.imperative.ParallelEnv().local_rank == 0: + paddle.imperative.save(mnist.state_dict(), "worker_0") +``` +7. 评估测试。 + +对模型进行评估测试时,如果需要加载模型,须确保评估和保存的操作在同一个进程中,否则可能出现模型尚未保存完成,即启动评估,造成加载出错的问题。如果不需要加载模型,则没有这个问题,在一个进程或多个进程中评估均可。 + +## 4. 模型部署 + +### 4.1 动转静部署 +命令式编程虽然有非常多的优点,但是如果用户希望使用 C++ 部署已经训练好的模型,会存在一些不便利。比如,命令式编程中可使用 Python 原生的控制流,包含 if/else、switch、for/while,这些控制流需要通过一定的机制才能映射到 C++ 端,实现在 C++ 端的部署。 + +
  • 如果用户使用的 if/else、switch、for/while 与输入(包括输入的值和 shape )无关,则可以使用如下命令式模型部署方案: +
    • 使用 TracedLayer 将前向命令式模型转换为声明式模型。可以将模型保存后做在线C++预测
    • +
    • 所有的TracedLayer对象均不应通过构造函数创建,而需通过调用静态方法 TracedLayer.trace(layer, inputs) 创建。
    • +
    • TracedLayer使用 Executor 和 CompiledProgram 运行声明式模型。
  • + +
+ + + +```python +from paddle.imperative import TracedLayer + +paddle.enable_imperative() +# 定义MNIST类的对象 +mnist = MNIST() +in_np = np.random.random([10, 1, 28, 28]).astype('float32') +# 将numpy的ndarray类型的数据转换为Variable类型 +input_var = paddle.imperative.to_variable(in_np) +# 通过 TracerLayer.trace 接口将命令式模型转换为声明式模型 +out_dygraph, static_layer = TracedLayer.trace(mnist, inputs=[input_var]) +save_dirname = './saved_infer_model' +# 将转换后的模型保存 +static_layer.save_inference_model(save_dirname, feed=[0], fetch=[0]) +``` + + +```python +# 声明式编程中需要使用执行器执行之前已经定义好的网络 +place = paddle.CPUPlace() +exe = paddle.Executor(place) +program, feed_vars, fetch_vars = paddle.io.load_inference_model(save_dirname, exe) +# 声明式编程中需要调用执行器的run方法执行计算过程 +fetch, = exe.run(program, feed={feed_vars[0]: in_np}, fetch_list=fetch_vars) +``` + +以上示例中,通过 [TracerLayer.trace](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/TracedLayer_cn.html#trace) 接口来运行命令式模型并将其转换为声明式模型,该接口需要传入命令式模型 mnist 和输入变量列表 [input_var];然后调用 [save_inference_model](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/TracedLayer_cn.html#save_inference_model) 接口将声明式模型保存为用于预测部署的模型,之后利用 [load_inference_model](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/io_cn/load_inference_model_cn.html) 接口将保存的模型加载,并使用 [Executor](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/executor_cn/Executor_cn.html#executor) 执行,检查结果是否正确。 + +[save_inference_model](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/dygraph_cn/TracedLayer_cn.html#save_inference_model) 保存的下来的模型,同样可以使用 C++ 加载部署,具体的操作请参考:[C++ 预测 API介绍](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/inference/native_infer.html) + +* 如果任务中包含了依赖数据的控制流,比如下面这个示例中if条件的判断依赖输入的shape。针对这种场景,可以使用基于ProgramTranslator的方式转成声明式编程的program,通过save_inference_model 接口将声明式模型保存为用于预测部署的模型,之后利用 [load_inference_model](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/io_cn/load_inference_model_cn.html) 接口将保存的模型加载,并使用 [Executor](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/executor_cn/Executor_cn.html#executor) 执行,检查结果是否正确。 + +保存的下来的模型,同样可以使用 C++ 加载部署,具体的操作请参考:[C++ 预测 API介绍](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/inference/native_infer.html) + + +```python +paddle.enable_imperative() +in_np = np.array([-2]).astype('int') +# 将numpy的ndarray类型的数据转换为Variable类型 +input_var = paddle.imperative.to_variable(in_np) +# if判断与输入input_var的shape有关 +if input_var.shape[0] > 1: + print("input_var's shape[0] > 1") +else: + print("input_var's shape[1] < 1") +``` + +* 针对依赖数据的控制流,解决流程如下 1. 添加declarative装饰器; 2. 利用ProgramTranslator进行转换 + +1) 添加declarative装饰器 +首先需要对给MNist类的forward函数添加一个declarative 装饰器,来标记需要转换的代码块,(注:需要在最外层的class的forward函数中添加) + +```python +from paddle.imperative import declarative + +# 定义MNIST网络,必须继承自paddle.nn.Layer +# 该网络由两个SimpleImgConvPool子网络、reshape层、matmul层、softmax层、accuracy层组成 +class MNIST(paddle.nn.Layer): + def __init__(self): + super(MNIST, self).__init__() + self._simple_img_conv_pool_1 = SimpleImgConvPool( + 1, 20, 5, 2, 2, act="relu") + self._simple_img_conv_pool_2 = SimpleImgConvPool( + 20, 50, 5, 2, 2, act="relu") + + self.pool_2_shape = 50 * 4 * 4 + SIZE = 10 + self.output_weight = self.create_parameter( + [self.pool_2_shape, 10]) + + @declarative + def forward(self, inputs, label=None): + x = self._simple_img_conv_pool_1(inputs) + x = self._simple_img_conv_pool_2(x) + x = paddle.reshape(x, shape=[-1, self.pool_2_shape]) + x = paddle.matmul(x, self.output_weight) + x = paddle.nn.functional.softmax(x) + if label is not None: + acc = paddle.metric.accuracy(input=x, label=label) + return x, acc + else: + return x + +``` + + +2) 利用ProgramTranslator进行转换 + + + +```python +import paddle + +paddle.enable_imperative() +prog_trans = paddle.imperative.ProgramTranslator() +mnist = MNIST() + +in_np = np.random.random([10, 1, 28, 28]).astype('float32') +label_np = np.random.randint(0, 10, size=(10,1)).astype( "int64") +input_var = paddle.imperative.to_variable(in_np) +label_var = paddle.imperative.to_variable(label_np) + +out = mnist( input_var, label_var) + +prog_trans.save_inference_model("./mnist_dy2stat", fetch=[0,1]) +``` + +### 4.2 动转静训练 + +由于命令式编程在执行的时候,存在python与c++交互,由于计算图的构建,会引起命令式编程在部分RNN相关的任务性能比声明式编程要差,为了提升这类性能的性能,可以将命令式转换为声明式模型的方法进行训练,转换方式非常简单,仅需要对给MNist类的forward函数添加一个declarative 装饰器,来标记需要转换的代码块。 + +```python +from paddle.imperative import declarative + +# 定义MNIST网络,必须继承自paddle.nn.Layer +# 该网络由两个SimpleImgConvPool子网络、reshape层、matmul层、softmax层、accuracy层组成 +class MNIST(paddle.nn.Layer): + def __init__(self): + super(MNIST, self).__init__() + self._simple_img_conv_pool_1 = SimpleImgConvPool( + 1, 20, 5, 2, 2, act="relu") + self._simple_img_conv_pool_2 = SimpleImgConvPool( + 20, 50, 5, 2, 2, act="relu") + + self.pool_2_shape = 50 * 4 * 4 + SIZE = 10 + self.output_weight = self.create_parameter( + [self.pool_2_shape, 10]) + + @declarative + def forward(self, inputs, label=None): + x = self._simple_img_conv_pool_1(inputs) + x = self._simple_img_conv_pool_2(x) + x = paddle.reshape(x, shape=[-1, self.pool_2_shape]) + x = paddle.matmul(x, self.output_weight) + x = paddle.nn.functional.softmax(x) + if label is not None: + acc = paddle.metric.accuracy(input=x, label=label) + return x, acc + else: + return x + +``` + + + +## 5. 使用技巧 + +### 5.1 中间变量值、梯度打印 + +1. 用户想要查看任意变量的值,可以使用 [numpy](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Variable_cn.html#numpy) 接口。 + +``` +x = y * 10 +print(x.numpy()) +``` + +来直接打印变量的值 + +2. 查看反向的值 +可以在执行了 backward 之后,可以通过 [gradient](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Variable_cn.html#gradient) 接口来看任意变量的梯度 + +``` +x = y * 10 +x.backward() +print(y.gradient()) +``` + +可以直接打印反向梯度的值 + +### 5.2 断点调试 + +因为采用了命令式的编程方式,程序在执行之后,可以立马获取到执行的结果,因此在命令式编程中,用户可以利用IDE提供的断点调试功能,通过查 Variable 的 shape、真实值等信息,有助于发现程序中的问题。 + +1. 如下图所示,在示例程序中设置两个断点,执行到第一个断点的位置,我们可以观察变量 x 和 linear1 的信息。 + +![](https://ai-studio-static-online.cdn.bcebos.com/b9bade026bea4ae797d26dcd4590452d0d563574df6b4e1cbedd0645dcbcb349) +![](https://ai-studio-static-online.cdn.bcebos.com/c2a9096e653044849b98d94758a4ac3a77025351c1134453b2c8d18dc8ad8a73) + +2. 同时可以观察 linear1 中的权重值。 + +![](https://ai-studio-static-online.cdn.bcebos.com/e46576c64de84fa780830e1146afda0acc67fb20ea43452dadfc4949a3aad684) +![](https://ai-studio-static-online.cdn.bcebos.com/c00a6152805a492485ba0bdde773b2ac7f544f56a0364038aa2d0681ed8d0483) +![](https://ai-studio-static-online.cdn.bcebos.com/f9bc8a52eaa24181a6a6832e992feb9e726afa17764146c38fd69e8d008e7994) + + +### 5.3 阻断反向传递 + +在一些任务中,只希望拿到正向预测的值,但是不希望更新参数,或者在反向的时候剪枝,减少计算量,阻断反向的传播, Paddle提供了两种解决方案: [detach](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Variable_cn.html#detach) 接口和 [stop_gradient](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Variable_cn.html#stop_gradient) 接口,建议用户使用 detach 接口。 + +1. detach接口(建议用法) +使用方式如下: + +``` +fw_out = fw_out.detach() +``` + +detach() 接口会产生一个新的、和当前计算图分离的,但是拥有当前变量内容的临时变量。 + +通过该接口可以阻断反向的梯度传递。 + + +```python +import paddle +import numpy as np + +paddle.enable_imperative() +value0 = np.arange(26).reshape(2, 13).astype("float32") +value1 = np.arange(6).reshape(2, 3).astype("float32") +value2 = np.arange(10).reshape(2, 5).astype("float32") + +# 将ndarray类型的数据转换为Variable类型 +a = paddle.imperative.to_variable(value0) +b = paddle.imperative.to_variable(value1) +c = paddle.imperative.to_variable(value2) + +# 构造fc、fc2层 +fc = paddle.nn.Linear(13, 5, dtype="float32") +fc2 = paddle.nn.Linear(3, 3, dtype="float32") + +# 对fc、fc2层执行前向计算 +out1 = fc(a) +out2 = fc2(b) + +# 将不会对out1这部分子图做反向计算 +out1 = out1.detach() + +out = paddle.concat(input=[out1, out2, c], axis=1) +out.backward() + +# 可以发现这里out1.gradient()的值都为0,同时使得fc.weight的grad没有初始化 +assert (out1.gradient() == 0).all() +``` + +2. stop_gradient 接口 + +每个 Variable 都有一个 stop_gradient 属性,可以用于细粒度地在反向梯度计算时排除部分子图,以提高效率。 + +如果OP只要有一个输入需要梯度,那么该OP的输出也需要梯度。相反,只有当OP的所有输入都不需要梯度时,该OP的输出也不需要梯度。在所有的 Variable 都不需要梯度的子图中,反向计算就不会进行计算了。 + +在命令式编程下,除参数以外的所有 Variable 的 stop_gradient 属性默认值都为 True,而参数的 stop_gradient 属性默认值为 False。 该属性用于自动剪枝,避免不必要的反向运算。 + +使用方式如下: + +``` +fw_out.stop_gradient = True +``` + +通过将 Variable 的 stop_gradient 属性设置为 True,当 stop_gradient 设置为 True 时,梯度在反向传播时,遇到该 Variable,就不会继续传递。 + + +```python +import paddle +import numpy as np + +paddle.enable_imperative() +value0 = np.arange(26).reshape(2, 13).astype("float32") +value1 = np.arange(6).reshape(2, 3).astype("float32") +value2 = np.arange(10).reshape(2, 5).astype("float32") + +# 将ndarray类型的数据转换为Variable类型 +a = paddle.imperative.to_variable(value0) +b = paddle.imperative.to_variable(value1) +c = paddle.imperative.to_variable(value2) + +# 构造fc、fc2层 +fc = paddle.nn.Linear(13, 5, dtype="float32") +fc2 = paddle.nn.Linear(3, 3, dtype="float32") + +# 对fc、fc2层执行前向计算 +out1 = fc(a) +out2 = fc2(b) + +# 将不会对out1这部分子图做反向计算 +out1.stop_gradient = True + +out = paddle.concat(input=[out1, out2, c], axis=1) +out.backward() + +# 可以发现这里out1.gradient()的值都为0,同时使得fc.weight的grad没有初始化 +assert (out1.gradient() == 0).all() +``` diff --git a/doc/paddle/beginners_guide/hapi.md b/doc/paddle/beginners_guide/hapi.md new file mode 100644 index 0000000000000000000000000000000000000000..42d74904124dbcf9202d1f328ab2b9227ae3b57d --- /dev/null +++ b/doc/paddle/beginners_guide/hapi.md @@ -0,0 +1,199 @@ +# 高层API介绍 + +## 简介 + +PaddleHapi是飞桨新推出的高层API,PaddleHapi是对飞桨API的进一步封装与升级,提供了更加简洁易用的API,进一步提升了飞桨的易学易用性,并增强飞桨的功能。 + +PaddleHapi面向从深度学习小白到资深开发者的所有人群,对于AI初学者来说,使用PaddleHapi可以简单快速的构建深度学习项目,对于资深开发者来说,可以使用PaddleHapi快速完成算法迭代。 + +PaddleHapi具有以下特点: +- 易学易用: 高层API是对普通动态图API的进一步封装和优化,同时保持与普通API的兼容性,高层API使用更加易学易用,同样的实现使用高层API可以节省大量的代码。 +- 低代码开发: 使用飞桨高层API的一个明显特点是,用户可编程代码量大大缩减。 +- 动静转换: 高层API支持动静转换,用户只需要改一行代码即可实现将动态图代码在静态图模式下训练,既方便用户使用动态图调试模型,又提升了模型训练效率。 + + + +在功能增强与使用方式上,高层API有以下升级: +1. 模型训练方式升级: 高层API中封装了Model类,继承了Model类的神经网络可以仅用几行代码完成模型的训练。 +2. 新增图像处理模块transform: 飞桨新增了图像预处理模块,其中包含十数种数据处理函数,基本涵盖了常用的数据处理、数据增强方法。 +3. 提供常用的神经网络模型可供调用: 高层API中集成了计算机视觉领域和自然语言处理领域常用模型,包括但不限于mobilenet、resnet、yolov3、cyclegan、bert、transformer、seq2seq等等。同时发布了对应模型的预训练模型,用户可以直接使用这些模型或者在此基础上完成二次开发。 + + +![](https://raw.githubusercontent.com/PaddlePaddle/FluidDoc/hapi/doc/fluid/beginners_guide/image/hapi_gif.gif) + + +## 目录 + +* [特性](#1) +* [快速使用](#2) +* [新增功能](#3) +* [使用示例](#4) + + +##

特性

+ +### 易学易用 + +高层API基于飞桨动态图实现,兼容飞桨动态图的所有功能,既秉承了动态图易学、易用、易调试的特点,又对飞桨的动态图做了进一步的封装与优化。 + +### 低代码开发 + +相比较与动态图的算法实现,使用高层API实现的算法可编程代码量更少,原始的动态图训练代码需要20多行代码才能完成模型的训练,使用高层API后,仅用8行代码即可实现相同的功能。 + +使用普通API与高层API实现手写字符识别对比如下图,左边是普通动态图API的实现,右边是使用高层API的实现,可以明显发现,使用高层API的代码量更少。 +![](https://raw.githubusercontent.com/PaddlePaddle/FluidDoc/hapi/doc/fluid/beginners_guide/image/new_hapi.png) + + +### 动静统一 + +高层API中实现了动静统一,用户无需感知到静态图、动态图的区别,只需要改一行代码即可实现将动态图代码在静态图模式下训练。动态图更方便调试模型,静态图的训练方式训练效率更高。 + +高层API默认采用静态图的训练方式,我们可以使用 fluid.enable_dygraph() 切换到动态图模式下运行。 + +``` +fluid.CUDAPlace() +# 一行代码切换动态图训练模式 +fluid.enable_dygraph(place) + +# 声明网络结构 +model = Mnist("mnist") +# 定义优化器 +optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.001, parameter_list=model.parameters()) +# 调用prepare() 完成训练的配置 +model.prepare(optimizer, CrossEntropy(), Accuracy(), inputs, labels, device='gpu') +# 调用 fit(),启动模型的训练 +model.fit(train_dataset, val_dataset, batch_size=100, epochs=1, log_freq=100, save_dir="./output/") +``` + +##

快速使用

+ +以mnist手写字符识别为例,介绍飞桨高层API的使用方式。 + +### 1. 搭建网络结构 + +使用高层API组建网络与动态图的组网方式基本相同,唯一的区别在于,使用高层API组建网络需要继承Model这个类,而普通的动态图组网是需要继承dygraph.Layer类。 + +高层API组网方式如下 +``` +from paddle.incubate.hapi.model import Model, Input +from paddle.incubate.hapi.loss import CrossEntropy + +class Mnist(Model): + def __init__(self, name_scope): + super(Mnist, self).__init__() + self.fc = Linear(input_dim=784, output_dim=10, act="softmax") + + # 定义网络结构的前向计算过程 + def forward(self, inputs): + outputs = self.fc(inputs) + return outputs + +``` + +### 2. 训练准备 + +在开始训练前,需要定义优化器、损失函数、度量函数,准备数据等等。这些过程均可以在高层API Model类中的prepare函数中完成。 + +``` +# 定义输入数据格式 +inputs = [Input([None, 784], 'float32', name='image')] +labels = [Input([None, 1], 'int64', name='label')] + +# 声明网络结构 +model = Mnist("mnist") +optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.001, parameter_list=model.parameters()) +# 使用高层API,prepare() 完成训练的配置 +model.prepare(optimizer, CrossEntropy(), Accuracy(), inputs, labels, device='gpu') +``` + +### 3. 启动训练 + +使用高层API完成训练迭代过程时,使用一行代码即可构建双层循环程序,去控制训练的轮数和数据读取过程。 + +``` +from paddle.incubate.hapi.datasets.mnist import MNIST as MnistDataset +# 定义数据读取器 +train_dataset = MnistDataset(mode='train') +val_dataset = MnistDataset(mode='test') +# 启动训练 +model.fit(train_dataset, val_dataset, batch_size=100, epochs=10, log_freq=100, save_dir="./output/") +``` + +高层API中通过fit函数完成训练的循环过程,只需要设置训练的数据读取器、batchsize大小,迭代的轮数epoch、训练日志打印频率log_freq,保存模型的路径即可。 + +##

新增功能

+ +除了使用高层API实现一行代码启动训练外,还新增了以下功能: +- transform 数据增强模块 +- paddlevision 模型调用模块 + +### transform +vision.transform。图像预处理模块transform包括一系列的图像增强与图像处理实现,对处理计算机视觉相关的任务有很大帮助。 + +下表中列出Transform支持的数据处理和数据增强API,如下所示: + +| transform的数据处理实现 | 函数功能 | | +| :-------- | :----- | :---- | +| Compose | 组合多种数据变换 | +| Resize | 将图像转换为固定大小 | +| RandomResizedCrop | 根据输入比例对图像做随机剪切,然后resize到指定大小 | +| CenterCrop | 以图像的中心为中心对图像做剪切 | | +| CenterCropResize | 对图像做padding,padding后的图像做centercrop,然后resize到指定大小| | +| RandomHorizontalFlip | 随机对图像做水平翻转 | | +| RandomVerticalFlip | 随机对图像做垂直翻转 | | +| Permute | 将数据的的维度换位 | | +| Normalize | 用指定的均值和标准差对数据做归一化 | | +| GaussianNoise | 给数据增加高斯噪声 | | +| BrightnessTransform | 调整输入图像的亮度 | | +| SaturationTransform | 调整输入图像的饱和度 | | +| ContrastTransform | 调整输入图像的对比度 | | +| HueTransform | 调整图像的色调 | | +| ColorJitter | 随机调整图像的亮度、饱和度、对比度、和色调| | + +使用方法如下: +``` + +from paddle.incubate.hapi.vision.transforms import transforms +import cv2 + +img_path = "./output/sample.jpg" +img = cv2.imread(img_path) + +# 使用Compose 将可以将多个数据增强函数组合在一起 +trans_funcs = transforms.Compose([transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.BrightnessTransform(0.2)]) +label = None +img_processed, label = trans_funcs(img, label) + +``` + +上述代码的效果图如下: + +![](https://raw.githubusercontent.com/PaddlePaddle/FluidDoc/hapi/doc/fluid/beginners_guide/image/hapi_transform.png) + + +### paddlevision + +paddlevision中包含了高层API对常用模型的封装,包括ResNet、VGG、MobileNet、yoloV3、darknet、BMN +transformer等等。使用这些现有的模型,可以快速的完成神经网络的训练、finetune等。 + +使用paddlevision中的模型可以简单快速的构建一个深度学习任务,比如13代码即可实现resnet在Imagenet数据集上的训练: + +![](https://raw.githubusercontent.com/PaddlePaddle/FluidDoc/hapi/doc/fluid/beginners_guide/image/paddlevision.png) + + + +##
更多使用示例
+ +更多的高层API使用示例请参考: +- [bert](https://github.com/PaddlePaddle/hapi/tree/master/examples/bert) +- [image classification](https://github.com/PaddlePaddle/hapi/tree/master/examples/image_classification) +- [BMN](https://github.com/PaddlePaddle/hapi/tree/master/examples/bmn) +- [cycleGAN](https://github.com/PaddlePaddle/hapi/tree/master/examples/cyclegan) +- [ocr](https://github.com/PaddlePaddle/hapi/tree/master/examples/ocr) +- [TSM](https://github.com/PaddlePaddle/hapi/tree/master/examples/tsm) +- [yolov3](https://github.com/PaddlePaddle/hapi/tree/master/examples/yolov3) +- [transformer](https://github.com/PaddlePaddle/hapi/tree/master/examples/transformer) +- [seq2seq](https://github.com/PaddlePaddle/hapi/tree/master/examples/seq2seq) +- [style-transfer](https://github.com/PaddlePaddle/hapi/tree/master/examples/style-transfer) diff --git a/doc/paddle/beginners_guide/image/hapi_gif.gif b/doc/paddle/beginners_guide/image/hapi_gif.gif new file mode 100644 index 0000000000000000000000000000000000000000..5d261d2f375e1800858c71bf12158a73edb7d5e9 Binary files /dev/null and b/doc/paddle/beginners_guide/image/hapi_gif.gif differ diff --git a/doc/paddle/beginners_guide/image/hapi_transform.png b/doc/paddle/beginners_guide/image/hapi_transform.png new file mode 100644 index 0000000000000000000000000000000000000000..c93947320c4d8d4a32135ce0873f45a5f5b8e9f1 Binary files /dev/null and b/doc/paddle/beginners_guide/image/hapi_transform.png differ diff --git a/doc/paddle/beginners_guide/image/new_hapi.png b/doc/paddle/beginners_guide/image/new_hapi.png new file mode 100644 index 0000000000000000000000000000000000000000..c0ea7e407b2f6b38a116ca96008d4f84ca21d88f Binary files /dev/null and b/doc/paddle/beginners_guide/image/new_hapi.png differ diff --git a/doc/paddle/beginners_guide/image/paddlevision.png b/doc/paddle/beginners_guide/image/paddlevision.png new file mode 100644 index 0000000000000000000000000000000000000000..eaef430b14bf1f107396b083d38d4a952f1744f2 Binary files /dev/null and b/doc/paddle/beginners_guide/image/paddlevision.png differ diff --git a/doc/paddle/beginners_guide/image/tensor.jpg b/doc/paddle/beginners_guide/image/tensor.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e45c28bd4b250c54e1ea380b55ee6507d4859a7f Binary files /dev/null and b/doc/paddle/beginners_guide/image/tensor.jpg differ diff --git a/doc/paddle/beginners_guide/index_cn.rst b/doc/paddle/beginners_guide/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c8d6d26db13464b4f6f33511bbc1c720ec649a5e --- /dev/null +++ b/doc/paddle/beginners_guide/index_cn.rst @@ -0,0 +1,197 @@ +快速上手 +=========== + +飞桨2.0概述 +----------- +在保持1.x版本工业级大规模高效训练和多平台快速部署优势的前提,飞桨2.0版本重点提升了框架的易用性,主要在用户交互层进行了优化,降低学习门槛,提升开发效率。不管对于初学者还是资深专家,都能够更容易地使用飞桨进行深度学习任务开发,加速前沿算法研究和工业级任务开发。 + +此版本为测试版,还在迭代开发中,目前还没有稳定,后续API会根据反馈有可能进行不兼容的升级。对于想要体验飞桨最新特性的开发者,欢迎试用此版本;对稳定性要求高的工业级应用场景推荐使用Paddle +1.8稳定版本。此版本主推命令式(imperative)开发模式,并提供了高层API的封装。命令式开发模式具有很好的灵活性,高层API可以大幅减少重复代码。对于初学者或基础的任务场景,推荐使用高层API的开发方式,简单易用;对于资深开发者想要实现复杂的功能,推荐使用动态图的API,灵活高效。 + +跟1.x版本对比,飞桨2.0版本的重要升级如下: + ++------------+--------------------------------------+-----------------------------------------+ +| | 飞桨1.x版本 | 飞桨2.0版本 | ++============+======================================+=========================================+ +| 开发模式 | 推荐声明式(declarative) | 推荐命令式(imperative) | ++------------+--------------------------------------+-----------------------------------------+ +| 组网方式 | 推荐函数式组网 | 推荐面向对象式组网 | ++------------+--------------------------------------+-----------------------------------------+ +| 高层API | 无 | 封装常见的操作,实现低代码开发 | ++------------+--------------------------------------+-----------------------------------------+ +| 基础API | fluid目录,结构不清晰,存在过时API | paddle目录,整体结构调整,清理废弃API | ++------------+--------------------------------------+-----------------------------------------+ + +开发模式 +-------- + +飞桨同时支持声明式和命令式这两种开发模式,兼具声明式编程的高效和命令式编程的灵活。 + +声明式编程模式(通常也被称为静态模式或define-and-run模式),程序可以明确分为网络结构定义和执行这两个阶段。定义阶段声明网络结构,此时并未传入具体的训练数据;执行阶段需要用户通过feed的方式传入具体数据,完成计算后,通过fetch的方式返回计算结果。示例如下: + +.. code:: python + + import numpy + import paddle + # 定义输入数据占位符 + a = paddle.nn.data(name="a", shape=[1], dtype='int64') + b = paddle.nn.data(name="b", shape=[1], dtype='int64') + # 组建网络(此处网络仅由一个操作构成,即elementwise_add) + result = paddle.elementwise_add(a, b) + # 准备运行网络 + cpu = paddle.CPUPlace() # 定义运算设备,这里选择在CPU下训练 + exe = paddle.Executor(cpu) # 创建执行器 + # 创建输入数据 + x = numpy.array([2]) + y = numpy.array([3]) + # 运行网络 + outs = exe.run( + feed={'a':x, 'b':y}, # 将输入数据x, y分别赋值给变量a,b + fetch_list=[result] # 通过fetch_list参数指定需要获取的变量结果 + ) + #输出运行结果 + print (outs) + #[array([5], dtype=int64)] + +声明式开发模式的优点为在程序执行之前,可以拿到全局的组网信息,方便对计算图进行全局的优化,提升性能;并且由于全局计算图的存在,方便将计算图导出到文件,方便部署到非python语言的开发环境中,比如:C/C++/JavaScript等。声明式开发模式的缺点为,由于网络定义和执行阶段分离,在定义的时候并不知道所执行的具体的数据,程序的开发和调试会比较困难。 + +命令式编程模式(通常也被称为动态模式、eager模式或define-by-run模式),程序在网络结构定义的同时立即执行,能够实时的到执行结果。示例如下: + +.. code:: python + + import numpy + import paddle + from paddle.imperative import to_variable + + # 切换命令式编程模式 + paddle.enable_imperative() + + # 创建数据 + x = to_variable(numpy.array([2])) + y = to_variable(numpy.array([3])) + # 定义运算并执行 + z = paddle.elementwise_add(x, y) + # 输出执行结果 + print (z.numpy()) + +飞桨2.0推荐开发者使用命令式编程,可以使用原生python控制流API,具有灵活,容易开发调试的优点;同时为了兼具声明式编程在性能和部署方面的优势,飞桨提供了自动转换功能,可以将包含python控制流的代码,转换为Program,通过底层的Executor进行执行。 + +组网方式 +-------- + +飞桨1.x大量使用函数式的组网方式,这种方法的好处是写法很简洁,但表达能力偏弱,比如:如果我们想要查看fc隐含的参数的值或者想要对某一个参数进行裁剪时,会很困难,我们需要操作隐含的参数名才能访问。比如: + +.. code:: python + + import paddle.fluid as fluid + + data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") + fc = fluid.layers.fc(input=data, size=1000, act="tanh") + +飞桨2.0推荐使用面向对象式的组网方式,需要通过继承\ ``paddle.nn.Layer``\ 类的\ ``__init__``\ 和\ ``forward``\ 函数实现网络结构自定义,这种方式通过类的成员变量,方便地访问到每个类的成员,比如: + +.. code:: python + + import paddle + + class SimpleNet(paddle.nn.Layer): + def __init__(self, in_size, out_size): + super(SimpleNet, self).__init__() + self._linear = paddle.nn.Linear(in_size, out_size) + + def forward(self, x): + y = self._linear(x) + return y + +高层API +------- + +使用飞桨进行深度学习任务的开发,整体过程包括数据处理、组网、训练、评估、模型导出、预测部署这些基本的操作。这些基本操作在不同的任务中会反复出现,使用基础API进行开发时,需要开发者重复地写这些基础操作的代码,增加了模型开发的工作量。高层API针对这些基础操作进行了封装,提供更高层的开发接口,开发者只需要关心数据处理和自定义组网,其他工作可以通过调用高层API来完成。在MNIST手写数字识别任务中,对比动态图基础API的实现方式,通过使用高层API可以减少80%的非组网类代码。 + +使用高层API的另外一个好处是,可以通过一行代码\ ``paddle.enable_imperative``\ ,切换命令式编程模式和声明式编程模式。在开发阶段,可以使用的命令式编程模式,方便调试;开发完成后,可以切换到声明式编程模式,加速训练和方便部署。兼具了命令式编程实时执行,容易调试的优点,以及声明式编程全局优化和容易部署的优点。 + +以下为高层API的一个基础示例 + +.. code:: python + + import numpy as np + import paddle + import paddle.nn.functional as F + from paddle.incubate.hapi.model import Model, Input, Loss + from paddle.incubate.hapi.loss import CrossEntropy + + #高层API的组网方式需要继承Model,Model类实现了模型执行所需的逻辑 + class SimpleNet(Model): + def __init__(self, in_size, out_size): + super(SimpleNet, self).__init__() + self._linear = paddle.nn.Linear(in_size, out_size) + def forward(self, x): + y = self._linear(x) + z = self._linear(y) + pred = F.softmax(z) + return pred + + #兼容声明式开发模式,定义数据形状类型,如果不使用声明式编程模式,可以不定义数据占位符 + inputs = [Input([None, 8], 'float32', name='image')] + labels = [Input([None, 1], 'int64', name='labels')] + + #定义模型网络结构,包括指定损失函数和优化算法 + model = SimpleNet(8, 8) + optimizer = paddle.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=model.parameters()) + model.prepare(optimizer, CrossEntropy(), None, inputs, labels, device='cpu') + + #切换执行模式 + paddle.enable_imperative(paddle.CPUPlace()) + + #基于batch的训练 + batch_num = 10 + x = np.random.random((4, 8)).astype('float32') + y = np.random.randint(0, 8, (4, 1)).astype('int64') + for i in range(batch_num): + model.train_batch(inputs=x, labels=y) + +更多高层API开发的模型和示例请参考github Repo: +`hapi `__ + +基础API +------- + +飞桨2.0提供了新的API,可以同时支持声明式和命令式两种开发模式,比如paddle.nn.Linear,避免在两种模式下使用不同的API造成困惑。原飞桨1.x的API位于paddle.fluid目录下,其中部分组网类的API,只能用于声明式开发,比如:fluid.layers.fc,无法用于命令式开发。 + +飞桨2.0对API的目录结构进行了调整,从原来的paddle.fluid目录调整到paddle目录下,使得开发接口更加清晰,调整后的目录结构如下: + ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| 目录 | 功能和包含API | ++=====================+===========================================================================================================+ +| paddle.\* | paddle根目录下保留了常用API的别名,当前包括:paddle.tensor, paddle.framework目录下的所有API | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.tensor | 跟tensor操作相关的API,比如:创建zeros, 矩阵运算matmul, 变换concat, 计算elementwise\_add, 查找argmax等 | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.nn | 跟组网相关的API,比如:输入占位符data/Input,控制流while\_loop/cond,损失函数,卷积,LSTM等,激活函数等 | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.framework | 基础框架相关的API,比如:Variable, Program, Executor等 | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.imperative | imprerative模式专用的API,比如:to\_variable, prepare\_context等 | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.optimizer | 优化算法相关API,比如:SGD,Adagrad, Adam等 | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.metric | 评估指标计算相关的API,比如:accuracy, cos\_sim等 | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.io | 数据输入输出相关API,比如:save, load, Dataset, DataLoader等 | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.device | 设备管理相关API,比如:CPUPlace, CUDAPlace等 | ++---------------------+-----------------------------------------------------------------------------------------------------------+ +| paddle.fleet | 分布式相关API | ++---------------------+-----------------------------------------------------------------------------------------------------------+ + +同时飞桨2.0对部分Paddle +1.x版本的API进行了清理,删除了部分不再推荐使用的API,具体信息请参考Release +Note。 + +.. toctree:: + :hidden: + + basic_concept/index_cn.rst + dygraph/DyGraph.md + hapi.md + diff --git a/doc/paddle/beginners_guide/quick_start.rst b/doc/paddle/beginners_guide/quick_start.rst new file mode 100644 index 0000000000000000000000000000000000000000..81f99aabed2c21fa9efa0e4acfa598d7724ed6cb --- /dev/null +++ b/doc/paddle/beginners_guide/quick_start.rst @@ -0,0 +1,174 @@ +Quick Start +============= + +Quick Installation +-------------------- + +PaddlePaddle supports quick installation by pip. Execute the following commands to finish quick installation of the CPU version: + +.. code-block:: bash + + pip install paddlepaddle + +If you need to install the GPU version, or look up more specific installation methods, please refer to `Installation Instructions <../beginners_guide/install/index_en.html>`_ + + +Quick Usage +------------- + +First, you need to import the fluid library + +.. code-block:: python + + import paddle.fluid as fluid + +* Tensor Operations + + +The following simple examples may help you quickly know about Fluid: + +1.use Fluid to create a one-dimensional array with five elements, and each element is 1 + +.. code-block:: python + + # define the dimension of an array and the data type, and the parameter 'shape' can be modified to define an array of any size + data = fluid.layers.ones(shape=[5], dtype='int64') + # compute on the CPU + place = fluid.CPUPlace() + # create executors + exe = fluid.Executor(place) + # execute computation + ones_result = exe.run(fluid.default_main_program(), + # get data + fetch_list=[data], + return_numpy=True) + # output the results + print(ones_result[0]) + +you can get the results: + +.. code-block:: text + + [1 1 1 1 1] + +2.use Fluid to add two arrays by bits + +.. code-block:: python + + # call elementwise_op to add the generative arrays by bits + add = fluid.layers.elementwise_add(data,data) + # define computation place + place = fluid.CPUPlace() + exe = fluid.Executor(place) + # execute computation + add_result = exe.run(fluid.default_main_program(), + fetch_list=[add], + return_numpy=True) + # output the results + print (add_result[0]) + +you can get the results: + +.. code-block:: text + + [2 2 2 2 2] + +3.use Fluid to transform the data type + +.. code-block:: python + + # transform a one-dimentional array of int to float64 + cast = fluid.layers.cast(x=data, dtype='float64') + # define computation place to execute computation + place = fluid.CPUPlace() + exe = fluid.Executor(place) + cast_result = exe.run(fluid.default_main_program(), + fetch_list=[cast], + return_numpy=True) + # output the results + print(cast_result[0]) + +you can get the results: + +.. code-block:: text + + [1. 1. 1. 1. 1.] + + +Operate the Linear Regression Model +------------------------------------- + +By the simple example above, you may have known how to operate data with Fluid to some extent, so please try to create a test.py, and copy the following codes. + +This a a simple linear regression model to help us quickly solve the quaternary linear equation. + +.. code-block:: python + + #load the library + import paddle.fluid as fluid + import numpy as np + #generate data + np.random.seed(0) + outputs = np.random.randint(5, size=(10, 4)) + res = [] + for i in range(10): + # assume the equation is y=4a+6b+7c+2d + y = 4*outputs[i][0]+6*outputs[i][1]+7*outputs[i][2]+2*outputs[i][3] + res.append([y]) + # define data + train_data=np.array(outputs).astype('float32') + y_true = np.array(res).astype('float32') + + #define the network + x = fluid.layers.data(name="x",shape=[4],dtype='float32') + y = fluid.layers.data(name="y",shape=[1],dtype='float32') + y_predict = fluid.layers.fc(input=x,size=1,act=None) + #define loss function + cost = fluid.layers.square_error_cost(input=y_predict,label=y) + avg_cost = fluid.layers.mean(cost) + #define optimization methods + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.05) + sgd_optimizer.minimize(avg_cost) + #initialize parameters + cpu = fluid.CPUPlace() + exe = fluid.Executor(cpu) + exe.run(fluid.default_startup_program()) + ##start training and iterate for 500 times + for i in range(500): + outs = exe.run( + feed={'x':train_data,'y':y_true}, + fetch_list=[y_predict.name,avg_cost.name]) + if i%50==0: + print ('iter={:.0f},cost={}'.format(i,outs[1][0])) + #save the training result + params_dirname = "result" + fluid.io.save_inference_model(params_dirname, ['x'], [y_predict], exe) + + # start inference + infer_exe = fluid.Executor(cpu) + inference_scope = fluid.Scope() + # load the trained model + with fluid.scope_guard(inference_scope): + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(params_dirname, infer_exe) + + # generate test data + test = np.array([[[9],[5],[2],[10]]]).astype('float32') + # inference + results = infer_exe.run(inference_program, + feed={"x": test}, + fetch_list=fetch_targets) + # give the problem 【9,5,2,10】 and output the value of y=4*9+6*5+7*2+10*2 + print ("9a+5b+2c+10d={}".format(results[0][0])) + +.. code-block:: text + + get the result: + + 9a+5b+2c+10d=[99.946] + +The output result should be a value close to 100, which may have a few errors every time. + + + + diff --git a/doc/paddle/beginners_guide/quick_start_cn.rst b/doc/paddle/beginners_guide/quick_start_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ed1fb9ea00bf472f4c906deb16251fe53929e758 --- /dev/null +++ b/doc/paddle/beginners_guide/quick_start_cn.rst @@ -0,0 +1,180 @@ +快速开始 +=========== + +快速安装 +---------- + +PaddlePaddle支持使用pip快速安装, 执行下面的命令完成CPU版本的快速安装: + +.. code-block:: bash + + pip install -U paddlepaddle + +如需安装GPU版本的PaddlePaddle,执行下面的命令完成GPU版本的快速安装: + +.. code-block:: bash + + pip install -U paddlepaddle-gpu + +同时请保证您参考NVIDIA官网,已经正确配置和安装了显卡驱动,`CUDA 9 `_ ,`cuDNN 7.3 `_ ,`NCCL2 `_ 等依赖,其他更加详细的安装信息请参考:`安装说明 `_ + + +快速使用 +------------- + +首先,您需要导入fluid库 + +.. code-block:: python + + import paddle.fluid as fluid + +* Tensor操作 + + +下面几个简单的案例,可以帮助您快速了解Fluid: + +1.使用Fluid创建5个元素的一维数组,其中每个元素都为1 + +.. code-block:: python + + # 定义数组维度及数据类型,可以修改shape参数定义任意大小的数组 + data = fluid.layers.ones(shape=[5], dtype='int64') + # 在CPU上执行运算 + place = fluid.CPUPlace() + # 创建执行器 + exe = fluid.Executor(place) + # 执行计算 + ones_result = exe.run(fluid.default_main_program(), + # 获取数据data + fetch_list=[data], + return_numpy=True) + # 输出结果 + print(ones_result[0]) + +可以得到结果: + +.. code-block:: text + + [1 1 1 1 1] + +2.使用Fluid将两个数组按位相加 + +.. code-block:: python + + # 调用 elementwise_op 将生成的一维数组按位相加 + add = fluid.layers.elementwise_add(data,data) + # 定义运算场所 + place = fluid.CPUPlace() + exe = fluid.Executor(place) + # 执行计算 + add_result = exe.run(fluid.default_main_program(), + fetch_list=[add], + return_numpy=True) + # 输出结果 + print (add_result[0]) + +可以得到结果: + +.. code-block:: text + + [2 2 2 2 2] + +3.使用Fluid转换数据类型 + +.. code-block:: python + + # 将一维整型数组,转换成float64类型 + cast = fluid.layers.cast(x=data, dtype='float64') + # 定义运算场所执行计算 + place = fluid.CPUPlace() + exe = fluid.Executor(place) + cast_result = exe.run(fluid.default_main_program(), + fetch_list=[cast], + return_numpy=True) + # 输出结果 + print(cast_result[0]) + +可以得到结果: + +.. code-block:: text + + [1. 1. 1. 1. 1.] + + +运行线性回归模型 +----------------- + +通过上面的小例子,相信您已经对如何使用Fluid操作数据有了一定的了解,那么试着创建一个test.py,并粘贴下面的代码吧。 + +这是一个简单的线性回归模型,来帮助我们快速求解4元一次方程。 + +.. code-block:: python + + #加载库 + import paddle.fluid as fluid + import numpy as np + #生成数据 + np.random.seed(0) + outputs = np.random.randint(5, size=(10, 4)) + res = [] + for i in range(10): + # 假设方程式为 y=4a+6b+7c+2d + y = 4*outputs[i][0]+6*outputs[i][1]+7*outputs[i][2]+2*outputs[i][3] + res.append([y]) + # 定义数据 + train_data=np.array(outputs).astype('float32') + y_true = np.array(res).astype('float32') + + #定义网络 + x = fluid.layers.data(name="x",shape=[4],dtype='float32') + y = fluid.layers.data(name="y",shape=[1],dtype='float32') + y_predict = fluid.layers.fc(input=x,size=1,act=None) + #定义损失函数 + cost = fluid.layers.square_error_cost(input=y_predict,label=y) + avg_cost = fluid.layers.mean(cost) + #定义优化方法 + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.05) + sgd_optimizer.minimize(avg_cost) + #参数初始化 + cpu = fluid.CPUPlace() + exe = fluid.Executor(cpu) + exe.run(fluid.default_startup_program()) + ##开始训练,迭代500次 + for i in range(500): + outs = exe.run( + feed={'x':train_data,'y':y_true}, + fetch_list=[y_predict.name,avg_cost.name]) + if i%50==0: + print ('iter={:.0f},cost={}'.format(i,outs[1][0])) + #存储训练结果 + params_dirname = "result" + fluid.io.save_inference_model(params_dirname, ['x'], [y_predict], exe) + + # 开始预测 + infer_exe = fluid.Executor(cpu) + inference_scope = fluid.Scope() + # 加载训练好的模型 + with fluid.scope_guard(inference_scope): + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(params_dirname, infer_exe) + + # 生成测试数据 + test = np.array([[[9],[5],[2],[10]]]).astype('float32') + # 进行预测 + results = infer_exe.run(inference_program, + feed={"x": test}, + fetch_list=fetch_targets) + # 给出题目为 【9,5,2,10】 输出y=4*9+6*5+7*2+10*2的值 + print ("9a+5b+2c+10d={}".format(results[0][0])) + +.. code-block:: text + + 得到结果: + + 9a+5b+2c+10d=[99.946] + +输出结果应是一个近似等于100的值,每次计算结果略有不同。 + + + + diff --git a/doc/paddle/book/fit_a_line/README.md b/doc/paddle/book/fit_a_line/README.md new file mode 120000 index 0000000000000000000000000000000000000000..bd2a37a3c4f2897d1f62ec205c5e0f2f51000b11 --- /dev/null +++ b/doc/paddle/book/fit_a_line/README.md @@ -0,0 +1 @@ +../../../../external/book/01.fit_a_line/README.md \ No newline at end of file diff --git a/doc/paddle/book/fit_a_line/image b/doc/paddle/book/fit_a_line/image new file mode 120000 index 0000000000000000000000000000000000000000..d05fb9cdba7453d935f59f0eda3519234ede4459 --- /dev/null +++ b/doc/paddle/book/fit_a_line/image @@ -0,0 +1 @@ +../../../../external/book/01.fit_a_line/image/ \ No newline at end of file diff --git a/doc/paddle/book/image_classification/README.md b/doc/paddle/book/image_classification/README.md new file mode 120000 index 0000000000000000000000000000000000000000..52413a256afad4df447e622495b62ddb7a5eef3f --- /dev/null +++ b/doc/paddle/book/image_classification/README.md @@ -0,0 +1 @@ +../../../../external/book/03.image_classification/README.md \ No newline at end of file diff --git a/doc/paddle/book/image_classification/image b/doc/paddle/book/image_classification/image new file mode 120000 index 0000000000000000000000000000000000000000..afd28996d9fca9c55d5ea34043ac24fcc86c213c --- /dev/null +++ b/doc/paddle/book/image_classification/image @@ -0,0 +1 @@ +../../../../external/book/03.image_classification/image/ \ No newline at end of file diff --git a/doc/paddle/book/index_en.rst b/doc/paddle/book/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..2242e72520a098c75a33ed2ee53df30f698451f5 --- /dev/null +++ b/doc/paddle/book/index_en.rst @@ -0,0 +1,14 @@ +Book +====== + +.. toctree:: + :maxdepth: 1 + + fit_a_line/README.md + recognize_digits/README.md + image_classification/README.md + word2vec/README.md + recommender_system/README.md + understand_sentiment/README.md + label_semantic_roles/README.md + machine_translation/README.md diff --git a/doc/paddle/book/label_semantic_roles/README.md b/doc/paddle/book/label_semantic_roles/README.md new file mode 120000 index 0000000000000000000000000000000000000000..0b3329adad969993a9ebf770b1fc34363bf900b2 --- /dev/null +++ b/doc/paddle/book/label_semantic_roles/README.md @@ -0,0 +1 @@ +../../../../external/book/07.label_semantic_roles/README.md \ No newline at end of file diff --git a/doc/paddle/book/label_semantic_roles/image b/doc/paddle/book/label_semantic_roles/image new file mode 120000 index 0000000000000000000000000000000000000000..0eb76d55feab0c443e7b1fab3da870b193f166b2 --- /dev/null +++ b/doc/paddle/book/label_semantic_roles/image @@ -0,0 +1 @@ +../../../../external/book/07.label_semantic_roles/image/ \ No newline at end of file diff --git a/doc/paddle/book/machine_translation/README.md b/doc/paddle/book/machine_translation/README.md new file mode 120000 index 0000000000000000000000000000000000000000..a3507c5a5f4181e2279c1e256920b7e6c93b6c05 --- /dev/null +++ b/doc/paddle/book/machine_translation/README.md @@ -0,0 +1 @@ +../../../../external/book/08.machine_translation/README.md \ No newline at end of file diff --git a/doc/paddle/book/machine_translation/image b/doc/paddle/book/machine_translation/image new file mode 120000 index 0000000000000000000000000000000000000000..ed4d2366ec7d3fb3d03e0e4643cf9025bed4f698 --- /dev/null +++ b/doc/paddle/book/machine_translation/image @@ -0,0 +1 @@ +../../../../external/book/08.machine_translation/image/ \ No newline at end of file diff --git a/doc/paddle/book/recognize_digits/README.md b/doc/paddle/book/recognize_digits/README.md new file mode 120000 index 0000000000000000000000000000000000000000..7106e07a423d3a8184b62da2401eb7bb3f307031 --- /dev/null +++ b/doc/paddle/book/recognize_digits/README.md @@ -0,0 +1 @@ +../../../../external/book/02.recognize_digits/README.md \ No newline at end of file diff --git a/doc/paddle/book/recognize_digits/image b/doc/paddle/book/recognize_digits/image new file mode 120000 index 0000000000000000000000000000000000000000..925f1617634b19104d7795071cbaae7b55bd84df --- /dev/null +++ b/doc/paddle/book/recognize_digits/image @@ -0,0 +1 @@ +../../../../external/book/02.recognize_digits/image/ \ No newline at end of file diff --git a/doc/paddle/book/recommender_system/README.md b/doc/paddle/book/recommender_system/README.md new file mode 120000 index 0000000000000000000000000000000000000000..8a1e846978fcb3d96e2d226e1baea1797cd1761c --- /dev/null +++ b/doc/paddle/book/recommender_system/README.md @@ -0,0 +1 @@ +../../../../external/book/05.recommender_system/README.md \ No newline at end of file diff --git a/doc/paddle/book/recommender_system/image b/doc/paddle/book/recommender_system/image new file mode 120000 index 0000000000000000000000000000000000000000..a4e97ee1ae98c330733670be080c86d5aa24fc8d --- /dev/null +++ b/doc/paddle/book/recommender_system/image @@ -0,0 +1 @@ +../../../../external/book/05.recommender_system/image/ \ No newline at end of file diff --git a/doc/paddle/book/understand_sentiment/README.md b/doc/paddle/book/understand_sentiment/README.md new file mode 120000 index 0000000000000000000000000000000000000000..1133fe46ddd0d6c12f16dc3adc2ee7372cfb1f95 --- /dev/null +++ b/doc/paddle/book/understand_sentiment/README.md @@ -0,0 +1 @@ +../../../../external/book/06.understand_sentiment/README.md \ No newline at end of file diff --git a/doc/paddle/book/understand_sentiment/image b/doc/paddle/book/understand_sentiment/image new file mode 120000 index 0000000000000000000000000000000000000000..c519bc94e3b50cc5bb31e7f601f6c274400345a2 --- /dev/null +++ b/doc/paddle/book/understand_sentiment/image @@ -0,0 +1 @@ +../../../../external/book/06.understand_sentiment/image/ \ No newline at end of file diff --git a/doc/paddle/book/word2vec/README.md b/doc/paddle/book/word2vec/README.md new file mode 120000 index 0000000000000000000000000000000000000000..7dfb712d27ab15cb33b2557b1b886d7d369d44f0 --- /dev/null +++ b/doc/paddle/book/word2vec/README.md @@ -0,0 +1 @@ +../../../../external/book/04.word2vec/README.md \ No newline at end of file diff --git a/doc/paddle/book/word2vec/images b/doc/paddle/book/word2vec/images new file mode 120000 index 0000000000000000000000000000000000000000..dd59e54f0c4cf9058d375650889b52a8996d1418 --- /dev/null +++ b/doc/paddle/book/word2vec/images @@ -0,0 +1 @@ +../../../../external/book/04.word2vec/image/ \ No newline at end of file diff --git a/doc/paddle/book_cnls b/doc/paddle/book_cnls new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/doc/paddle/design/algorithm/images/asgd.gif b/doc/paddle/design/algorithm/images/asgd.gif new file mode 100644 index 0000000000000000000000000000000000000000..4a0da7bf6df9326a2aab1638b77c5455c18b8c4e Binary files /dev/null and b/doc/paddle/design/algorithm/images/asgd.gif differ diff --git a/doc/paddle/design/algorithm/images/theta_star.gif b/doc/paddle/design/algorithm/images/theta_star.gif new file mode 100644 index 0000000000000000000000000000000000000000..dd24d33e124396be3fc410c9b12f33148f64efe2 Binary files /dev/null and b/doc/paddle/design/algorithm/images/theta_star.gif differ diff --git a/doc/paddle/design/algorithm/index_cn.rst b/doc/paddle/design/algorithm/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0883a9dc9c457f393ac1bdc930cb47ebcb0a25d9 --- /dev/null +++ b/doc/paddle/design/algorithm/index_cn.rst @@ -0,0 +1,7 @@ +梯度更新算法 +------------ + +.. toctree:: + :maxdepth: 1 + + parameter_average.md diff --git a/doc/paddle/design/algorithm/index_en.rst b/doc/paddle/design/algorithm/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..59fe68dcf79ce2ef90b9adc829a0db45a4f0b3dc --- /dev/null +++ b/doc/paddle/design/algorithm/index_en.rst @@ -0,0 +1,7 @@ +Gradient Update Algorithm +-------------------------------------- + +.. toctree:: + :maxdepth: 1 + + parameter_average.md diff --git a/doc/paddle/design/algorithm/parameter_average.md b/doc/paddle/design/algorithm/parameter_average.md new file mode 100644 index 0000000000000000000000000000000000000000..a77062cb16a20f102e1f81cef1214d47dda5f60c --- /dev/null +++ b/doc/paddle/design/algorithm/parameter_average.md @@ -0,0 +1,74 @@ +# Averaging Parameter in PaddlePaddle + +## Why Averaging +In a large scale machine learning setup where the size of the training data is huge, it could take us a large number of iterations over the training data before we can achieve the optimal values of parameters of our model. Looking at the problem setup, it is desirable to obtain the optimal values of parameters by going through the data in as few passes as possible. + +Polyak and Juditsky (1992) showed that the test performance of simple average of parameters obtained by Stochastic Gradient Descent (SGD) is as good as that of parameter values that are obtained by training the model over and over again, over the training dataset. + +Hence, to accelerate the speed of Stochastic Gradient Descent, Averaged Stochastic Gradient Descent (ASGD) was proposed in Polyak and Juditsky (1992). For ASGD, the running average of parameters obtained by SGD, is used as the estimator for
. The averaging is done as follows: + +

+
+

+ +We propose averaging for any optimizer similar to how ASGD performs it, as mentioned above. + +### How to perform Parameter Averaging in PaddlePaddle + +Parameter Averaging in PaddlePaddle works in the following way during training : +1. It will take in an instance of an optimizer as an input, e.g. RMSPropOptimizer +2. The optimizer itself is responsible for updating the parameters. +3. The ParameterAverageOptimizer maintains a separate copy of the parameters for itself: + 1. In theory, the values of this copy are the average of the values of the parameters in the most recent N batches. + 2. However, saving all N instances of the parameters in memory is not feasible. + 3. Therefore, an approximation algorithm is used. + +Hence, overall we have have two copies of the parameters: one for the optimizer itself, and one for the ParameterAverageOptimizer. The former should be used in back propagation, while the latter should be used during testing and should be saved. + +During the testing/saving the model phase, we perform the following steps: +1. Perform the delayed operations. +2. Save current values of the parameters to a temporary variable. +3. Replace the values of the parameters with the averaged values. +4. Perform testing and/or save the parameters. +5. Restore the values of the parameters once done. + +### How to implement Averaging of Parameter in PaddlePaddle + +We can add the ParameterAverageOptimizer op to the graph through Python API. Using this approach, we manually add this op to the graph and direct the output of the optimizer op to this op during training. + + **Advantages**: + - Allows for greater flexibility to the users of PaddlePaddle. Using this approach, the users can plug different optimizers into ParameterAverageOptimizer by passing in the optimizer to the op. + - Makes it easy for the users to customize and extend the framework. + + **Disadvantages**: + - Implementation requires re-writing the averaging methodology in Python. + +### Low-Level implementation + +In the new design, we propose to create a new operation for averaging parameter updates (ParameterAverageOptimizer). For now, we can add an op that takes in the following as input: +- the optimizer +- the window_size to keep the updates + +The ParameterAverageOptimizer op can be like any other operator with its own CPU/GPU implementation either using Eigen or separate CPU and GPU kernels. As the initial implementation, we can implement the kernel using Eigen following the abstraction pattern implemented for [Operators](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/rmsprop_op.h). We also want to support the case when the Trainer/Optimizer runs on the GPU while ParameterAverageOptimizer runs on a CPU. + +The idea of building an op for averaging is in sync with the refactored PaddlePaddle philosophy of using operators to represent any computation unit. The way the op will be added to the computation graph will be decided by the [layer functions](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/modules/python_api.md#layer-function) in Python API. + +### Python API implementation for ParameterAverageOptimizer + +Based on Polyak and Juditsky (1992), we can generalize the averaging of updates to any optimizer. The input to the op would be the following: +- Any optimizer (RMSProp , AdaGrad etc.) +- A window size. The op keeps accumulating updated parameter values over a window of N batches and takes an average. Move the averaged value to a buffer when window is full to avoid loss of precision. + +Using the ParameterAverageOptimizer op, any user can add the operation to their computation graphs. However, this will require a lot of lines of code and we should design Python APIs that support averaging. As per the PaddlePaddle [Python API design](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/modules/python_api.md), the layer functions are responsible for creating operators, operator parameters and variables. Since ParameterAverageOptimizer will be an operator, it makes sense to create it in the layer functions. +We will have a wrapper written in Python that will support the functionality and implement the actual core computation in C++ core as we have done for other [Optimizers](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/rmsprop_op.cc) + +#### Creation of the ParameterAverageOptimizer operator +There are two ways for creating the ParameterAverageOptimizer op: +1. We create the op immediately while building the computation graph. +2. We add the op in a lazy manner, just before the backward pass, similar to the way the optimization ops are added. + +The proposal is to add the op immediately while building the computation graph. + +#### High-level API + +In PaddlePaddle Python API, users will primarily rely on [layer functions](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/modules/python_api.md#layer-function) to create neural network layers. Hence, we also need to provide parameter average functionality in layer functions. diff --git a/doc/paddle/design/concepts/README.md b/doc/paddle/design/concepts/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8ded0ad22f4013a521bf3bee260565dc5cf855ae --- /dev/null +++ b/doc/paddle/design/concepts/README.md @@ -0,0 +1,174 @@ +A few months ago when we were trying to replace CMake with Bazel, @emailweixu suggested that we rewrite those handy Bazel functions using CMake. Now it seems that it's the right time to get this done, as we are facing problems from the porting of Majel and the development of new the parameter server using Go and C++. + +Here are some initial thoughts. Your comments are welcome! + +# Required CMake Function + +I think we need only the following few CMake functions to make a project description mean and clean: + + + + + + + + + + + + + + + + + + + + + + + + + + +
C++CUDA C++Go
cc_library nv_library go_library
cc_binary nv_binary go_binary
cc_test nv_test go_test
+ + +- The `_library` functions generate .a files from source code. +- The `_binary` functions generate executable binary files. +- The `_test` functions generate executable unit test files. They work like `_binary` but links `-lgtest` and `-lgtest_main`. + +The difference between `nv_` functions and `cc_` functions is that the former use `nvcc` instead of the system-default C++ compiler. + +Both `nv_` and `cc_` functions enables C++11 (-std=c++11). + +Also, + +- to describe external dependencies, we need `external_library`. +- to build shared libraries, we need `shared_library`. + +## An Example Project + +Suppose that we have aforementioned functions defined in our `/cmake` directory. The following example `CMakeLists.txt` describes a project including the following source files: + +- tensor.h +- tensor.cc +- tensor_test.cc +- ops.h +- ops.cu +- ops_test.cu +- api.go +- api_test.go + +Suppose that ops.cu depends on CUDNN. + +```cmake +# cc_binary parses tensor.cc and figures out that target also depend +# on tensor.h. +cc_binary(tensor + SRCS + tensor.cc) + +# The dependency to target tensor implies that if any of +# tensor{.h,.cc,_test.cc} is changed, tensor_test need to be re-built. +cc_test(tensor_test + SRCS + tensor_test.cc + DEPS + tensor) + +# I don't have a clear idea what parameters external_library need to +# have. @gangliao as a CMake expert would have better ideas. +external_library(cudnn + ....) + +# Suppose that ops.cu depends on external target CUDNN. Also, ops.cu +# include global functions that take Tensor as their parameters, so +# ops depend on tensor. This implies that if any of tensor.{h.cc}, +# ops.{h,cu} is changed, ops need to be re-built. +nv_library(ops + SRCS + ops.cu + DEPS + tensor + cudnn) # cudnn is defined later. + +nv_test(ops_test + SRCS + ops_test.cu + DEPS + ops) + +# Because api.go defines a GO wrapper to ops and tensor, it depends on +# both. This implies that if any of tensor.{h,cc}, ops.{h,cu}, or +# api.go is changed, api need to be re-built. +go_library(api + SRCS + api.go + DEPS + tensor # Because ops depend on tensor, this line is optional. + ops) + +go_test(api_test + SRCS + api_test.go + DEPS + api) + + +# This builds libapi.so. shared_library might use CMake target +# api_shared so to distinguish it from above target api. +shared_library(api + DEPS + api) + +``` + +## Implementation + +As above example CMakeLists.txt executes, each function invocation adds "nodes" to a dependency graph. It also use this graph to generate CMake commands including `add_executable`, `add_dependencies`, `target_link_libraries`, and `add_test`. + +## Using Package Manager For Go + +Building Go binaries and libraries need to satisfy their dependencies, generally +we can do `go get ./...` to download and compile all external dependencies. The +problems are: + +1. `go get` will always get the latest code from the default branch of the + remote repo, so changes of dependents might break the build. This is very + different with what we already have in `cmake/external` which download a + specific version or commit id of the dependency. +1. Some locations can not access external dependencies through the internet, as mentioned + in https://github.com/PaddlePaddle/Paddle/issues/2605. Using package management + tools can package the dependencies as a "vendor" package, which can be mirrored + at many cloud file hosting, so users what to compile paddle by themselves can + download this "vendor" package from a mirror site. + +### Choose A Suitable Tool + +As mentioned by @wangkuiyi, [Here](https://github.com/golang/go/wiki/PackageManagementTools) +list dozens of Go package managers. We choose the tool using following principles: + +- Most "active" projects with more stars, more pull requests or commits +- Widely used project + +After comparing all these projects, we shall choose between the most popular +tools: Godep and Glide. + +Here's a brief comparison between Godep and Glide +: https://github.com/Masterminds/glide/wiki/Go-Package-Manager-Comparison. There are +also many complaints about using `Godep`. There's also a new "official" pakcage +management tool has been started at: https://github.com/golang/dep to resolve +such problems, but it's currently at Alpha stage. So the best choice now is +glide obviously. + +### Manage Go Packages + +- Dependencies: `go/glide.yaml` will store the dependencies and their versions which + is directly imported by paddle. `go/glide.lock` will store all dependencies recursively + with their commit id. Builds will "lock" to these packages if we don't `glide up` + them +- Vendor package: `go/vendor` directory will generated when running `cmake` command. `cmake` + will download the code corresponding to `go/glide.lock`. If we put a vendor folder + under `go/`, cmake will just check the commit id to the packages under the folder, + if commit id matches, there will be no download at all. diff --git a/doc/paddle/design/concepts/block.md b/doc/paddle/design/concepts/block.md new file mode 100644 index 0000000000000000000000000000000000000000..15d3d67f55719d5f72fae5472a253d2b00f4da53 --- /dev/null +++ b/doc/paddle/design/concepts/block.md @@ -0,0 +1,375 @@ +# Design Doc: Block and Scope + +## The Representation of Computation + +Both deep learning systems and programming languages help users describe computation procedures. These systems use various representations of computation: + +- Caffe, Torch, and Paddle: sequences of layers. +- TensorFlow, Caffe2, Mxnet: graph of operators. +- PaddlePaddle: nested blocks, like C++ and Java programs. + +## Block in Programming Languages and Deep Learning + +In programming languages, a block is a pair of curly braces that includes local variables definitions and a sequence of instructions or operators. + +Blocks work with control flow structures like `if`, `else`, and `for`, which have equivalents in deep learning: + + + + + + + + + + + + + + + + + + + + + + +
programming languagesPaddlePaddle
for, while loop RNN, WhileOp
if, if-else, switch IfElseOp, SwitchOp
sequential execution a sequence of layers
+ + +A key difference is that a C++ program describes a one pass computation, whereas a deep learning program describes both the forward and backward passes. + +## Stack Frames and the Scope Hierarchy + +The existence of the backward pass makes the execution of a block of PaddlePaddle different from traditional programs: + + + + + + + + + + + + + + + + + + + + + + + + + + +
programming languagesPaddlePaddle
stack scope hierarchy
stack frame scope
push at entering block push at entering block
pop at leaving block destroy when minibatch completes
+ + +1. In traditional programs: + + - When the execution enters the left curly brace of a block, the runtime pushes a frame into the stack, where it realizes local variables. + - After the execution leaves the right curly brace, the runtime pops the frame. + - The maximum number of frames in the stack is the maximum depth of nested blocks. + +1. In PaddlePaddle + + - When the execution enters a block, PaddlePaddle adds a new scope, where it realizes variables. + - PaddlePaddle doesn't pop a scope after the execution of the block because variables therein are used by the backward pass. So it has a stack forest known as a *scope hierarchy*. + - The height of the highest tree is the maximum depth of nested blocks. + - After the processing of a minibatch, PaddlePaddle destroys the scope hierarchy. + +## Use Blocks in C++ and PaddlePaddle Programs + +Let us consolidate the discussion by presenting some examples. + +### Blocks with `if-else` and `IfElseOp` + +The following C++ programs shows how blocks are used with the `if-else` structure: + +```c++ +namespace pd = paddle; + +int x = 10; +int y = 1; +int z = 10; +bool cond = false; +int o1, o2; +if (cond) { + int z = x + y; + o1 = z; + o2 = pd::layer::softmax(z); +} else { + int d = pd::layer::fc(z); + o1 = d; + o2 = d+1; +} + +``` + +An equivalent PaddlePaddle program from the design doc of the [IfElseOp operator](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/execution/if_else_op.md) is as follows: + +```python +import paddle as pd + +x = minibatch([10, 20, 30]) # shape=[None, 1] +y = var(1) # shape=[1], value=1 +z = minibatch([10, 20, 30]) # shape=[None, 1] +cond = larger_than(x, 15) # [false, true, true] + +ie = pd.ifelse() +with ie.true_block(): + d = pd.layer.add_scalar(x, y) + ie.output(d, pd.layer.softmax(d)) +with ie.false_block(): + d = pd.layer.fc(z) + ie.output(d, d+1) +o1, o2 = ie(cond) +``` + +In both examples, the left branch computes `x+y` and `softmax(x+y)`, the right branch computes `fc(x)` and `x+1` . + +The difference is that variables in the C++ program contain scalar values, whereas those in the PaddlePaddle programs are mini-batches of instances. + + +### Blocks with `for` and `RNNOp` + +The following RNN model in PaddlePaddle from the [RNN design doc](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/dynamic_rnn/rnn_design_en.md) : + +```python +x = sequence([10, 20, 30]) # shape=[None, 1] +m = var(0) # shape=[1] +W = var(0.314, param=true) # shape=[1] +U = var(0.375, param=true) # shape=[1] + +rnn = pd.rnn() +with rnn.step(): + h = rnn.memory(init = m) + h_prev = rnn.previous_memory(h) + a = layer.fc(W, x) + b = layer.fc(U, h_prev) + s = pd.add(a, b) + act = pd.sigmoid(s) + rnn.update_memory(h, act) + rnn.output(a, b) +o1, o2 = rnn() +``` +has its equivalent C++ program as follows + +```c++ +int* x = {10, 20, 30}; +int* m = {0}; +int* W = {0.314}; +int* U = {0.375}; + +int mem[sizeof(x) / sizeof(x[0]) + 1]; +int o1[sizeof(x) / sizeof(x[0]) + 1]; +int o2[sizeof(x) / sizeof(x[0]) + 1]; +for (int i = 1; i <= sizeof(x)/sizeof(x[0]); ++i) { + int x = x[i-1]; + if (i == 1) mem[0] = m; + int a = W * x; + int b = Y * mem[i-1]; + int s = fc_out + hidden_out; + int act = sigmoid(sum); + mem[i] = act; + o1[i] = act; + o2[i] = hidden_out; +} +``` + +## Compilation and Execution + +Like TensorFlow, a PaddlePaddle program is written in Python. The first part describes a neural network as a protobuf message, and the rest executes the message for training or inference. + +The generation of this protobuf message is similar to how a compiler generates a binary executable file. The execution of the message is similar to how the OS executes the binary file. + +## The "Binary Executable File Format" + +The definition of the protobuf message is as follows: + +```protobuf +message BlockDesc { + repeated VarDesc vars = 1; + repeated OpDesc ops = 2; +} +``` + +The step net in above RNN example would look like + +``` +BlockDesc { + vars = { + VarDesc {...} // x + VarDesc {...} // h + VarDesc {...} // fc_out + VarDesc {...} // hidden_out + VarDesc {...} // sum + VarDesc {...} // act + } + ops = { + OpDesc {...} // matmul + OpDesc {...} // add_two + OpDesc {...} // sigmoid + } +}; +``` + +Also, the RNN operator in above example is serialized into a protobuf message of type `OpDesc` and would look like: + +``` +OpDesc { + inputs = {0} // the index of x in vars of BlockDesc above + outputs = {5, 3} // indices of act and hidden_out in vars of BlockDesc above + attrs { + "states" : {1} // the index of h + "step_net" : + } +}; +``` + +This `OpDesc` value is in the `ops` field of the `BlockDesc` value representing the global block. + + +## The Compilation of Blocks + +During the generation of the Protobuf message, the Block should store VarDesc (the Protobuf message which describes Variable) and OpDesc (the Protobuf message which describes Operator). + +VarDesc in a block should have its name scope to avoid local variables affecting parent block's name scope. +Child block's name scopes should inherit the parent's so that OpDesc in child block can reference a VarDesc that is stored in the parent block. For example: + +```python +a = pd.Variable(shape=[20, 20]) +b = pd.fc(a, params=["fc.w", "fc.b"]) + +rnn = pd.create_rnn() +with rnn.stepnet(): + x = a.as_step_input() + # reuse fc's parameter + fc_without_b = pd.get_variable("fc.w") + rnn.output(fc_without_b) + +out = rnn() +``` +The method `pd.get_variable` can help retrieve a Variable by the name. The Variable may be stored in a parent block, but might be retrieved in a child block, so block should have a variable scope that supports inheritance. + +In compiler design, the symbol table is a data structure created and maintained by compilers to store information about the occurrence of various entities such as variable names, function names, classes, etc. + +To store the definition of variables and operators, we define a C++ class `SymbolTable`, like the one used in compilers. + +`SymbolTable` can do the following: + +- store the definitions (some names and attributes) of variables and operators, +- verify if a variable was declared, +- make it possible to implement type checking (offer Protobuf message pointers to `InferShape` handlers). + + +```c++ +// Information in SymbolTable is enough to trace the dependency graph. So maybe +// the Eval() interface takes a SymbolTable is enough. +class SymbolTable { + public: + SymbolTable(SymbolTable* parent) : parent_(parent) {} + + OpDesc* NewOp(const string& name=""); + + // TODO determine whether name is generated by python or C++. + // Currently assume that a unique name will be generated by C++ if the + // argument name is left default. + VarDesc* Var(const string& name=""); + + // find a VarDesc by name, if recursive is true, find parent's SymbolTable + // recursively. + // this interface is introduced to support InferShape, find protobuf messages + // of variables and operators, pass pointers into InferShape. + // + // NOTE maybe some C++ classes such as VarDescBuilder and OpDescBuilder should + // be proposed and embedded into pybind to enable python operation on C++ pointers. + VarDesc* FindVar(const string& name, bool recursive=true); + + OpDesc* FindOp(const string& name); + + BlockDesc Compile() const; + + private: + SymbolTable* parent_; + + map ops_; + map vars_; +}; +``` + +After all the description of variables and operators is added into SymbolTable, +the block has enough information to run. + +The `Block` class takes a `BlockDesc` as input, and provides `Run` and `InferShape` functions. + + +```c++ +namespace { + +class Block : OperatorBase { +public: + Block(const BlockDesc& desc) desc_(desc) {} + + void InferShape(const framework::Scope& scope) const override { + if (!symbols_ready_) { + CreateVariables(scope); + CreateOperators(); + } + // should run InferShape first. + for (auto& op : runtime_table_.ops()) { + op->InferShape(scope); + } + } + + void Run(const framework::Scope& scope, + const platform::Place& place) const override { + PADDLE_ENFORCE(symbols_ready_, "operators and variables should be created first."); + for (auto& op : runtime_table_.ops()) { + op->Run(scope, place); + } + } + + void CreateVariables(const framework::Scope& scope); + void CreateOperators(); + + // some other necessary interfaces of NetOp are listed below + // ... + +private: + BlockDesc desc_; + bool symbols_ready_{false}; +}; +``` + +## The Execution of Blocks + +Block inherits from OperatorBase, which has a Run method. +Block's Run method will run its operators sequentially. + +There is another important interface called `Eval`, which takes some arguments called targets and generates a minimal graph which treats targets as the end points and creates a new Block. After `Run`, `Eval` will get the latest value and return the targets. + +The definition of Eval is as follows: + +```c++ +// clean a block description by targets using the corresponding dependency graph. +// return a new BlockDesc with minimal number of operators. +// NOTE: The return type is not a Block but the block's description so that this can be distributed +// to a cluster. +BlockDesc Prune(const BlockDesc& desc, vector targets); + +void Block::Eval(const vector& targets, + const framework::Scope& scope, + const platform::DeviceContext& dev_ctx) { + BlockDesc min_desc = Prune(desc_, targets); + Block min_block(min_desc); + min_block.Run(scope, dev_ctx); +} +``` diff --git a/doc/paddle/design/concepts/cpp_data_feeding.md b/doc/paddle/design/concepts/cpp_data_feeding.md new file mode 100644 index 0000000000000000000000000000000000000000..745d4fa007ae437789857bfd8bbb776d53e69686 --- /dev/null +++ b/doc/paddle/design/concepts/cpp_data_feeding.md @@ -0,0 +1,204 @@ +# C++ Data Feeding + +While using Paddle V2 API for training, data feeding completely depends on the Python code. To get rid of the Python environment and achieve the goal of "wrapping the whole training by a while loop op" in Paddle Fluid, a C++ data feeding mechanism is required. + +In this document, we show the fundamental design of a C++ data feeding process, which includes data reading, shuffling and batching. + +## Overview + +![](images/readers.png) + +## Reader + +In order to handle the above-mentioned problem, a new concept called 'Reader' is introduced. `Reader` is a series of inherited classes which can be held by our `Variable` and they are used to read or process file data. + + +### ReaderBase + +`ReaderBase` is the abstract base class for all readers. It defines the interface for all readers. + +```cpp +class ReaderBase { + public: + // Reads the next batch of data. (A 'batch' can be only one instance) + // If the next batch doesn't exist, it throws an exception + virtual void ReadNext(std::vector* out) = 0; + + // Checks whether the next instance exists. + virtual bool HasNext() = 0; + + // Reinitializes the reader and read the file from the beginning. + virtual void ReInit() = 0; + + virtual ~ReaderBase(); +}; +``` + +### FileReader + +`FileReader` is derived from the `ReaderBase`. It is still an abstract class and will further be derived by Readers of respective specific format. + +```cpp +class FileReader : public ReaderBase { + public: + explicit FileReader(const std::vector& dims); + + void ReadNext(std::vector* out) override; + + protected: + virtual void ReadNextImpl(std::vector* out) = 0; + + private: + std::vector dims_; +}; +``` + +A file reader binds with a single file and reads one data instance at a time. Each type of file reader shall implement its own `ReadNextImpl()`, `HasNext()` and `ReInit()`. + +The `ReadNextImpl()` is invoked by `ReadNext()`. Besides invoking `ReadNextImpl()`, `ReadNext()` is also responsible for checking the output, making sure that each shape of `LoDTensor` in `*out` is consistent with the one in `dims_`. + +### DecoratedReader + +A decorated reader takes another reader(both file reader and decorated reader are OK) as its 'underlying reader'. It gets data from its underlying reader, does some processing on them(shuffling, batching or something else), then yields processed data. The output data of a decorated reader can be a single instance or a batch. `ShuffleReader` and `BatchReader` are both decorated readers. + +```cpp +class DecoratedReader : public ReaderBase { + public: + explicit DecoratedReader(ReaderBase* reader) : ReaderBase(), reader_(reader) { + PADDLE_ENFORCE_NOT_NULL(reader_); + } + + void ReInit() override { reader_->ReInit(); } + + bool HasNext() const override { return reader_->HasNext(); } + + protected: + ReaderBase* reader_; +}; +``` + +Both the `FileReader` and `DecoratedReader` share exactly the same interface as defined in `ReaderBase`. So they can be decorated for multiple times: We can **shuffle** a reader's outputs and then **batch** the shuffled outputs. The interface consistency also allows related ops use readers without knowing their underlying type. + +### MultipleReader + +All `FileReader` binds with a single file and are single-threaded. However, sometimes we need to read data from more than one file. In this case, it's not enough to only have `FileReader` and `DecoratedReader`. + +So `MultipleReader` is introduced. It is also derived from `ReaderBase`. A `MultipleReader` holds several prefetching `FileReaders` and these readers run concurrently. Another pivotal part of a `MultipleReader` is a buffer channel. The channel collects data yield by all prefetching readers and makes subsequent OPs or decorated readers be able to fetch data without concerning about multiple readers scheduling. + +![](images/multiple_reader.png) + +This graph shows how a `MultipleReader` works with three prefetching file readers and two GPUs. There is a queue of files which are going to be read. Each time when a prefetching file reader is free(complete reading from one file), it fetches a new file from the queue. Each prefetching file reader runs in a separated prefetch thread and dumps their outputs to the same channel. + +To the subsequent two decorated readers, the `MultipleReader` is **a single reader**. They don't need to concern about how prefetch readers are scheduled. They only need to invoke `MultipleReader::ReadNext()` to get the next data from the buffer channel. + +### ReaderHolder + +Different readers belong to different class types. This leads to a problem: How can we drop them into `Variable`s and fetch them out by a unified method? For example, if a Variable holds a `BatchReader`, we can not get it by the following code: + +```cpp +var->Get("batch_reader"); +``` + +We would have to write: + +```cpp +var->Get("batch_reader"); +``` + +This requires that in order to get a reader from a variable, every time, we must know the reader's type exactly. This is nearly impossible. + +To solve this problem, we introduce `ReaderHolder` as a wrapper. It acts as an empty decorator of `ReaderBase`, which hides reader's type. With `ReaderHolder` we are able to fetch all types of readers by `var->Get("...")` and regard the obtained object as a reader. + +## Related Operators + +To create and invoke readers, some new ops are introduced: + +### Operators That Create Readers + +Each reader has its creation op. File readers' creation ops have no input and yield the created file reader as its output. Decorated readers' creation ops take the underlying readers as inputs and then yield new decorated readers. + +However, direct usage of file readers' creation ops is not recommended because a file reader can only read one file via a single thread. Using `OpenFilesOp` is a better choice. + +### OpenFilesOp + +The `OpenFilesOp` is the creation op of `MultipleReader`. It takes no input but requires a list of file names as one of its attributes. The newly created `MultipleReader` then creates its own prefetching readers according to given file names. + +To make sure that created prefetching readers match file formats, we need a name prefix rule to append file format tags to file names, as well as a file reader registry mechanism to map file format tags to their corresponding file readers' constructors. + +### HasNextOp + +`HasNextOp` is used to check whether the next data batch exists via the reader's `HasNext()` interface. + +### ResetOp + +`ResetOp` is used to reset a reader via its `ReInit()` interface. + +### ReadOp + +A reader is only a Variable. It cannot trigger the reading process by itself. So we add the `ReadOp` to execute it. A `ReadOp` takes a reader Variable as its input. Each time it runs, it invokes the reader‘s `ReadNext()` function and gets a new batch of data(or only one instance of data, if we use file reader directly). The output data of a reader are in the form of `std::vector`, so the `ReadOp` also needs to split the vector and move LoDTensors to their respective output Variables. + +## Program with Readers + +A `Program` holds readers as its persistable variables. These variables are created by `CreateReaderOp` or `OpenFilesOp`. These ops shall run only once. So they shall be settled in the `startup_program`. `HasNextOp`, `ResetOp` and `ReadOp` are required by training loop, so they shall be in the `main_program`. + +The ops of a `startup_program` with readers would be like this: + +``` +multiple_reader = open_files_op(...) +batch_reader = create_batch_reader_op(multiple_reader) +double_buffer_reader = create_double_buffer_op(batch_reader) +... (other initializers) +``` + +The forwarding ops of the corresponding `main_program` would be like this: + +``` +not_completed = true +pass_count = 0 +while_op(not_completed) { + has_next = has_next_op(double_buffer_reader) + if_else_op(has_next) { + batch_data = read_op(double_buffer_reader) + ... (subsequent training ops) + } else { + reset_op(double_buffer_reader) + increase_op(pass_count) + not_completed = less_than_op(pass_count, reqiured_pass_num) + } +} +``` + +A few important considerations for these programs are as follows: + +1. `not_completed`, `pass_count` and other variables shown above are all Fluid Variables. + +2. The multiple\_reader is the batch\_reader's underlying reader, and the batch\_reader is the double\_buffer\_reader's underlying reader. `read_op`, `has_next_op` and other reader related ops will only invoke the top-most reader. In this case, it's the double\_buffer\_reader. + +3. All readers exist in both `startup_program` and `main_program`. And they are persistable. + +### Simplify Configuration by MultiPassReader + +The Program configuration mentioned above is complicated. Users need to be very familiar to concepts of Program and Block to prevent making mistakes in their code. To make the usage of C++ readers more friendly to new users, we introduce `MultiPassReader`. + +`MultiPassReader` is a decorated reader. A multi-pass reader is used to continuously yield data for several training passes. It takes the number of passes to run as one of its attributes('pass_num') and maintains a counter to record how many passes it has completed. Each time its underlying reader reaches the EOF, the multi-pass reader checks whether it has completed the training of given number of pass. If not, the underlying reader will be re-initialized and starts a new pass automatically. Before completing the whole training, the return of MultiPassReader's `HasNext()` will always be `true`. + +With `MultiPassReader`, the startup program would be like this: + +``` +multiple_reader = open_files_op(...) +batch_reader = create_batch_reader_op(multiple_reader) +multi_pass_reader = create_multi_pass_reader_op(batch_reader) +double_buffer_reader = create_double_buffer_op(multi_pass_reader) +... (other initializers) +``` + +The forwarding part of the corresponding `main_program` would be like this: + +``` +not_completed = true +while_op(not_completed) { + batch_data = read_op(double_buffer_reader) + ... (subsequent training ops) + not_completed = has_next_op(double_buffer_reader) +} +``` diff --git a/doc/paddle/design/concepts/executor.md b/doc/paddle/design/concepts/executor.md new file mode 100644 index 0000000000000000000000000000000000000000..c436c57587a6e0e95edec635bb3ac47efd5eac28 --- /dev/null +++ b/doc/paddle/design/concepts/executor.md @@ -0,0 +1,29 @@ +# Executor Design Doc + +## Motivation +In [fluid](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/motivation/fluid.md), we encourage the user to use deep learning programming paradigms to describe the training process. When the user-written Python program is executed, it will first create a protobuf message +[`ProgramDesc`](https://github.com/PaddlePaddle/Paddle/blob/a91efdde6910ce92a78e3aa7157412c4c88d9ee8/paddle/framework/framework.proto#L145) that describes the process and is conceptually like an [abstract syntax tree](https://en.wikipedia.org/wiki/Abstract_syntax_tree). + +The executor runs the `ProgramDesc` like an interpreter. `ProgramDesc` contains the intrinsics (operators in this case) and variables which will be used, executor explicitly executes the stored precompiled code. + +## Overview + +An executor takes a `ProgramDesc`, a `block_id` and a `Scope`. The `ProgramDesc` is a list of blocks and each block contains the protobuf definition of all the parameters and operators in the block. The `block_id` specifies the entrance block. And the `Scope` is the container of all the variable instances, which is persistent throughout different runs. + +## Executor + +The `Executor` explicitly executes all the intrinsics (operators here) in the `block_id`th block of a `ProgramDesc`. Essentially, it instantiates Variables and Operators, then runs all the operators in sequence one-by-one. +It is very similar to how a push stack frame works when entering a block, following which it cleans up all the temporary variables when a mini-batch is finished. It does not however, have the stack frame pop process. + +### The interface +```c++ + Executor(places); +``` +A executor does not own any computing resources, a user can only construct an executor using the specified places. + +### Running an Executor + +``` + void Run(ProgramDesc, Scope, block_id, create_local_scope); +``` +An `Executor` only provides a unified way to execute `ProgramDesc`. `ProgramDesc` is the target that will be executed, the `Scope` specifies the variable container, the `block_id` indicates the entrance block and `create_local_scope` is a boolean that states whether it will destroy the temporary variables after the execution is finished. diff --git a/doc/paddle/design/concepts/functions_operators_layers.md b/doc/paddle/design/concepts/functions_operators_layers.md new file mode 100644 index 0000000000000000000000000000000000000000..1f86b99e5197c3e0b85fd76fe704520ef21b06d3 --- /dev/null +++ b/doc/paddle/design/concepts/functions_operators_layers.md @@ -0,0 +1,128 @@ +# Design Doc: Functions, Operators, and Layers + +In a DL system, we can compose one or more fine grained operators into a coarse grained one. For example, the FC layer can be composed of a multiplication operator and an add operator. + +Historically, some fine grained operations are known as operators, and some coarse level ones are known as layers. But we need a well-defined separation. + +In general, operators are those very fine grained operations, e.g., mul and add. In the implementation, we can write them as C++ functions: + +```c++ +template T add(T x, T y) { return x + y; } +template T mul(T x, T y) { return x * y; } +``` + +Then we can wrap them into operators which are C++ classes and can be created from Python bindings by name. A C macro can do this. For example, the following macro invocation + +```c++ +#define MAKE_FUNCTION_OPERATOR(mul); +``` + +generates + +```c++ +template class mulOp : public OperatorBase {...}; +REGISTER_OP(mulOp, "mul"); +``` + +so that in Python we can create operator mul by: + +```python +X1 = Var() +X2 = Var() +Y = Var() +paddle.cpp.create_operator("mul", input=[X1, X2], output=Y) +``` + +Also, at the same time, we can compose a coarse level C++ operator class by composing functions `mul` and `add`: + +```c++ +template +class FCOp : public OperatorBase { + public: + void Run(...) { + add(mul(Input("X"), Input("W")), Input("b")); + } +}; +REGISTER_OP(FCOp, "fc"); +``` + +We need to support such composition in Python as well. To do so, we need a higher level Python wrapping of operator creation than `paddle.cpp.create_operator`. This higher level operator API should be compatible with the layer API. + +Let's explain using an example. Suppose that we are going to compose the FC using mul and add in Python, we'd like to have Python functions `mul` and `add` defined in module `operator`: + +```python +def operator.mul(X1, X2): + O = Var() + paddle.cpp.create_operator("mul", input={X1, Y1}, output=O) + return O + +def operator.add(X1, X2): + O = Var() + paddle.cpp.create_operator("add", input={X1, X2}, output=O) + return O +``` + +Above code snippets are automatically generated. Given them, users can define + +```python +def layer.fc(X): + W = Var() + b = Var() + return operator.add(operator.mul(X, W), b) +``` + +If we don't have `operator.mul` and `operator.add`, the definiton of `layer.fc` would be complicated: + +```python +def layer.fc(X): + W = Var() + b = Var() + O1 = Var() + paddle.cpp.create_operator("mul", input=[X, W], output=O1) + O2 = Var() + paddle.cpp.create_operator("add", input=[O1, b], output=O2) + return O2 +``` + +We'd like to have Python bindings to operators in package `paddle.operator`, and Python compositions of operators in package `paddle.layer`. So we have the following concepts in above illustrative example: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
C++ functions/functorsmuladd
C++ operator class mulOpaddOp FCOp
Python binding operator.mul operator.add operator.fc
Python function layer.fc
+ + +This is how we differentiate layer and operators in PaddlePaddle: + +- those defined in C++ and have a lightweighted Python wrapper in module `operators` are operators; whereas +- those who don't have C++ implementations but a Python implementation that compose C++ operators are known as layers. diff --git a/doc/paddle/design/concepts/images/multiple_reader.png b/doc/paddle/design/concepts/images/multiple_reader.png new file mode 100644 index 0000000000000000000000000000000000000000..b22126b31db4982c13fc3a0827805e6aaf955046 Binary files /dev/null and b/doc/paddle/design/concepts/images/multiple_reader.png differ diff --git a/doc/paddle/design/concepts/images/parallel_executor_overview.dot b/doc/paddle/design/concepts/images/parallel_executor_overview.dot new file mode 100644 index 0000000000000000000000000000000000000000..40753cb140540c08d9d4c449b8d377e315280436 --- /dev/null +++ b/doc/paddle/design/concepts/images/parallel_executor_overview.dot @@ -0,0 +1,83 @@ +digraph G { + subgraph cluster_init { + label="Initialization" + startup_program [label="startup", shape=box] + node_w_g0 [label="W\nGPU0"] + startup_program -> node_w_g0 [label="Initialize"] + node_w_g1 [label="W\nGPU1"] + node_w_g0 -> node_w_g1 [label="broadcast"] + } + + subgraph cluster_train { + label="forward_backward" + + subgraph cluster_gpu0 { + label="GPU0" + fc_0 [label="fc\nGPU0", shape=box] + hidden_0 [label="hidden\nGPU0"] + node_w_g0 -> fc_0 + fc_0 -> hidden_0 + loss0 [label="loss\nGPU0"] + hidden_0 -> loss0 [label="many ops omitted"] + scale_loss_0 [label="scale_loss_gradient\nGPU0", shape=box] + loss_g0 [label="loss_grad\nGPU0"] + scale_loss_0->loss_g0 + + fc_g_0 [label="w_grad\nGPU0", shape=box] + loss0 -> fc_g_0 + loss_g0 -> fc_g_0 + hidden_0 -> fc_g_0 + } + + subgraph cluster_gpu1 { + label="GPU1" + fc_1 [label="fc\nGPU1", shape=box] + hidden_1 [label="hidden\nGPU1"] + node_w_g1 -> fc_1 + fc_1 -> hidden_1 + loss1 [label="loss\nGPU1"] + hidden_1 -> loss1 [label="many ops omitted"] + scale_loss_1 [label="scale_loss_gradient\nGPU1", shape=box] + loss_g1 [label="loss_grad\nGPU1"] + scale_loss_1->loss_g1 + + fc_g_1 [label="w_grad\nGPU1", shape=box] + loss1 -> fc_g_1 + loss_g1 -> fc_g_1 + hidden_1 -> fc_g_1 + } + } + + all_reduce_w [label="Merge Gradients(AllReduce)", shape=box] + fc_g_0 -> all_reduce_w + fc_g_1 -> all_reduce_w + + fc_g_0_merged [label="w_grad\nMerged\nGPU0"] + fc_g_1_merged [label="w_grad\nMerged\nGPU1"] + all_reduce_w -> fc_g_0_merged + all_reduce_w -> fc_g_1_merged + + subgraph cluster_optimization { + label="Optimization" + subgraph cluster_opt_gpu0 { + label="GPU0" + sgd_0 [label="SGD Op\nGPU0", shape=box] + + fc_g_0_merged -> sgd_0 + node_w_g0 -> sgd_0 + optimized_w_0 [label="Optimized W\nGPU0"] + sgd_0 -> optimized_w_0 + } + subgraph cluster_opt_gpu1 { + label="GPU1" + sgd_1 [label="SGD Op\nGPU1", shape=box] + + fc_g_1_merged -> sgd_1 + node_w_g1 -> sgd_1 + optimized_w_1 [label="Optimized W\nGPU0"] + sgd_1 -> optimized_w_1 + } + } + + +} diff --git a/doc/paddle/design/concepts/images/parallel_executor_overview.png b/doc/paddle/design/concepts/images/parallel_executor_overview.png new file mode 100644 index 0000000000000000000000000000000000000000..d890c0ffee3b38dc7cb74a2b56c2ab4831532211 Binary files /dev/null and b/doc/paddle/design/concepts/images/parallel_executor_overview.png differ diff --git a/doc/paddle/design/concepts/images/readers.png b/doc/paddle/design/concepts/images/readers.png new file mode 100644 index 0000000000000000000000000000000000000000..fd59168ce16c9e2a0ef45303c28c997cfd7740be Binary files /dev/null and b/doc/paddle/design/concepts/images/readers.png differ diff --git a/doc/paddle/design/concepts/index.html b/doc/paddle/design/concepts/index.html new file mode 100644 index 0000000000000000000000000000000000000000..646689dfe1ff74bf617962153b632742be7ac1a8 --- /dev/null +++ b/doc/paddle/design/concepts/index.html @@ -0,0 +1,238 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/design/concepts/index_cn.rst b/doc/paddle/design/concepts/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..dcdc894937ff328e6002623275ca3c65e87b2bb0 --- /dev/null +++ b/doc/paddle/design/concepts/index_cn.rst @@ -0,0 +1,19 @@ +核心概念 +------------- + +.. toctree:: + :maxdepth: 1 + + README.md + cpp_data_feeding.md + functions_operators_layers.md + program.md + variable.md + var_desc.md + tensor.md + tensor_array.md + lod_tensor.md + block.md + scope.md + executor.md + parallel_executor.md diff --git a/doc/paddle/design/concepts/index_en.rst b/doc/paddle/design/concepts/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..b85a3055746facaa642e8fc899976b58435f1ef2 --- /dev/null +++ b/doc/paddle/design/concepts/index_en.rst @@ -0,0 +1,19 @@ +Core Concepts +-------------------------------------- + +.. toctree:: + :maxdepth: 1 + + README.md + cpp_data_feeding.md + functions_operators_layers.md + program.md + variable.md + var_desc.md + tensor.md + tensor_array.md + lod_tensor.md + block.md + scope.md + executor.md + parallel_executor.md diff --git a/doc/paddle/design/concepts/lod_tensor.md b/doc/paddle/design/concepts/lod_tensor.md new file mode 100644 index 0000000000000000000000000000000000000000..d488b546d26200c3bd3e867d49df85e2f30eb48e --- /dev/null +++ b/doc/paddle/design/concepts/lod_tensor.md @@ -0,0 +1,211 @@ +# Design Doc: LoD (Level-of-Detail) Tensor + +Like other deep learning systems, PaddlePaddle supports training models from sequence data. Also, like other systems, PaddlePaddle represent a mini-batch of sequences as a Tensor. What is different is that PaddlePaddle doesn't require all sequences in a mini-batch to be of the same length. Thus no need for padding zeros. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TensorFlowPaddlePaddle
RNN Support Support
recursive RNN Support Support
padding zeros Must No need
blob data type Tensor LoDTensor
+ + +PaddlePaddle achieves this flexibility by passing through a new data type, *LoD Tensor*, which is a Tensor attached with segmentation index known as *LoD*, between operators. The LoD index doesn't only segment a tensor, but also recursively segments sub-sequences. This document presents the design of LoD and LoDTensor. + + +## The Challenge: Variable-length Sequences + +Most deep learning systems represent a mini-batch as a Tensor. For example, a mini-batch of 10 images, each of size 32x32, is a 10x32x32 Tensor. Another example is that each mini-batch contains N sentences, where each word is a D-dimensional one-hot vector. Suppose that all sentences have the same length L, we can represent this mini-batch by a NxLxD tensor. + +Both examples show that the elements of sequences are usually of the same size. In the first example, all images are 32x32, and in the second one, all words are D-dimensional vectors. It doesn't make sense to allow variable-sized images, as that would require transformations like convolution to handle variable-sized Tensors. + +The real challenge is that in most cases, sentences have variable lengths, and we will need an index data structure to segment the tensor into sequences. Also, sequences might consist of sub-sequences. + + +## A Solution: The LoD Index + +To understand our solution, it is best to look at some examples. + +### A Mini-Batch of Sentences + +Let's imagine a mini-batch of 3 variable lengths sentences composed of 3, 1, and 2 words, respectively. We can represent the mini-batch by a (3+1+2)xD tensor plus some index information: + +``` +3 1 2 +||| | || +``` + +where each `|` represents a D-dimensional word vector. The numbers, 3, 1, and 2, form a 1-level LoD. + +### Recursive Sequences + +Let check another example of a 2-level LoD Tensor. Consider a mini-batch of three articles with 3, 1, and 2 sentences, and each sentence consists of a variable number of words: + +``` +3 1 2 +3 2 4 1 2 3 +||| || |||| | || ||| +``` + +### A Mini-Batch of Videos + +LoD tensors generalize to the case where elements are higher dimensional objects, like images. Suppose that a mini-batch contains videos of the same frame size 640x480. Here is a mini-batch of 3 videos with 3, 1, and 2 frames, respectively. + +``` +3 1 2 +口口口 口 口口 +``` + +The underlying tensor is of size (3+1+2)x640x480, and each `口` represents a 640x480 image. + +### A Mini-Batch of Images + +In traditional cases like a mini-batch with N fixed-sized images, the LoD Tensor representation is as + +``` +1 1 1 1 1 +口口口口 ... 口 +``` + +In this case, we don't lose any information by ignoring the many 1's in the index and simply considering this LoD Tensor as a usual Tensor: + +``` +口口口口 ... 口 +``` + +### Model Parameters + +A model parameter is just a usual Tensor, which, just like the above example, is a **0-level LoD Tensor**. + + +## The LoD Tensor + +Let us revisit above example of the 2-level LoD Tensor + +``` +3 1 2 +3 2 4 1 2 3 +||| || |||| | || ||| +``` + +It is indeed a tree, where leaves are elementary sequences identified by **branches**. + +For example, the third sentence in above example is identified by branch <0,2>, where 0 indicates the first article with length 3, and 2 indicates the third sentence in this article with length 4. + +### The LoD Index + +We can save the LoD index in the above example + +``` +3 1 2 +3 2 4 1 2 3 +``` + +in a not-full 2D matrix: + +```c++ +typedef std::vector > LoD; +``` + +where + +- `LoD.size()` is the number of levels, or the maximum length of branches, +- `LoD[i][j]` is the length of the j-th segment at the i-th level. + +## The Offset Representation + +To quickly access elementary sequences, we adopt an offset representation -- instead of saving the lengths, we save the beginning and ending elements of sequences. + +In the above example, we accumulate the length of elementary sequences: + +``` +3 2 4 1 2 3 +``` + +into offsets + +``` +0 3 5 9 10 12 15 + = = = = = = + 3 2+3 4+5 1+9 2+10 3+12 +``` + +so we know that the first sentence is from word 0 to word 3, and the second sentence from word 3 to word 5. + +Similarly, the lengths in the top level LoD + +``` +3 1 2 +``` + +are transformed into offsets of elements/words as follows: + +``` +0 3 4 6 + = = = + 3 3+1 4+2 +``` + +## Slicing of LoD Tensors + + +When we use the above 2-level LoD Tensor as the input to a nested-RNN, we need to retrieve certain sequences. Here we define the sequence identified by branch as the **-slice**. + +For example, the <2>-slice of above example is + +``` +10 15 +10 12 15 + || ||| +``` + +and the <2,0>-slice of above slice is + +``` +10 12 + || +``` + +## Length Representation vs Offset Representation + +The offset representation is an implementation-oriented decision and it makes understanding the idea behind LoDTensor difficult. +Hence, we encapsulate this implementation detail in C++ and expose the original length representation in our Python API. +Specifically, we call this length representation `recursive_sequence_lengths` and users can use the following code to set or get the `recursive_sequence_lengths` of a LoDTensor in Python: +```Python +# length representation of lod called recursive_sequence_lengths +recursive_seq_lens = [[3, 1, 2], [2, 2, 1, 3, 1, 2]] +# Create a LoDTensor that has the above recursive_sequence_lengths info. +# This recursive_sequence_lengths will be converted to an offset representation of LoD in the C++ implementation under the hood. +tensor = fluid.LoDTensor(lod) + +# Set/Change the recursive_sequence_lengths info of LoDTensor +tensor.set_recursive_sequence_lengths([[3, 1, 2]]) +# Get the recursive_sequence_lengths info of a LoDTensor (the offset-based LoD representation stored in C++ will be converted +# back to length-based recursive_sequence_lengths), new_recursive_seq_lens = [[3, 1, 2]] +new_recursive_seq_lens = tensor.recursive_sequence_lengths() +``` diff --git a/doc/paddle/design/concepts/parallel_executor.md b/doc/paddle/design/concepts/parallel_executor.md new file mode 100644 index 0000000000000000000000000000000000000000..3745ef5d28df64d5f6280cd90e3bfec9b6a601e7 --- /dev/null +++ b/doc/paddle/design/concepts/parallel_executor.md @@ -0,0 +1,104 @@ +# ParallelExecutor + +## Background + +Neural network models are defined as a `ProgramDesc` in Fluid. The `ProgramDesc` can be executed by an interpreter(i.e. the `executor` concept in Fluid). The instructions or operators in a `Program` will be executed, and the results will be fetched in Python side. + +The executor is a very naive interpreter. It runs operators one by one. We can use `Parallel.Do` to support data parallelism, however, lacking device information in `ProgramDesc`; it is not possible to optimize the performance of `Parallel.Do`. + +We want a `ProgramDesc` can be run on different nodes. It is better not to contain device information in `ProgramDesc`. However, we can write a high-performance interpreter, which can hold an alternative intermediate representation of `ProgramDesc`, to take full usage of Multi-GPUs. + +ParallelExecutor is an interpreter of `ProgramDesc` which will [out-of-order execute](https://en.wikipedia.org/wiki/Out-of-order_execution) `Program` in data parallelism mode and maximise the utility of Multi-GPUs. + + +## Overview of MultiGPUs logic + +The ParallelExecutor takes the startup program and main program as inputs. The parameters will be initialised on `GPU0` by startup program and will broadcast to multi-GPUs. The main program will be duplicated into multi-GPUs. The gradient will be merged during each iteration, and each device will optimize parameters independently. Since the gradients on each device will be merged before parameter optimization, the parameters will be the same on each device and it does not need to be broadcast the parameters. + +![alt](images/parallel_executor_overview.png) + +There are several optimizations for this logic. + +1. We use an alternate representation in ParallelExecutor. It because the device information is critical for performance optimization. +2. The execution is out-of-order, i.e., an operator will be executed whenever the inputs of the operator are ready. + * GPU is a high-performance device; only one CPU thread cannot fulfil one GPU. So there is a thread pool to execute operators. + * Out-of-order also helps transpilers to generate `ProgramDesc`. It is no need to concern about the best order of performance when implementing a transpiler. +3. The streams of computation, merge gradients and fetch data are different. + +The performance of `ResNeXt152` on `TitanX` which `batch_size=12` is shown below. + +| Number of GPUs | 1 | 2 | 3 | 4| +| --- | --- | --- | --- | --- | +| Image/Sec | 17.9906 | 25.771 | 36.911 | 48.8428 | +| Speed Up | N/A | 1.43247029 | 2.05168255 | 2.71490667 | + + +## Static single assignment Graph + +[Static single assignment form](https://en.wikipedia.org/wiki/Static_single_assignment_form)(`SSA` for short) is a common form for compiler optimization. To implement concurrent execution, we uses an `SSA` graph as an intermedia representation of `ProgramDesc`. + +The `Program` is a directed acyclic graph, since a variable can be assigned multiple times. We enforce a variable will be assigned once, by adding version number to varaibles. We parsing the `Program` into a `SSA` graph. Also, ProgramExecutor duplicate `Program` into multi-devices. We also add a device number to varaibles and insert `NCCLAllReduce` into Graph. + +The data structure of `SSA` graph is: + +```c++ +struct VarHandleBase { + OpHandleBase* generated_op_; + vector pending_ops_; + + string name; + Place place; + size_t version; +}; + +struct OpHandleBase { + vector inputs_; + vector outputs_; +}; + +struct SSAGraph { + // vars on each devices. + // * the vars in each map in vector is on different device. + // * the map is mapping a variable name to variable handles + // with different versions + vector>> vars_; + + // All ops + vector ops_; +}; +``` +The variable handles are the wrapper of `Variables`. The operator handles are the wrapper of `OperatorBase`. Some `OpHandle` is not an `OperatorBase`, such as `NCCLAllReduceOpHandle`, because `AllReduceOpHandle` will use new device contexts. + +When the `ProgramDesc` converted into an `SSA` Graph, the [data hazard](https://en.wikipedia.org/wiki/Hazard_(computer_architecture)) problem is also need to be taken care. The dummy variables, which represent the dependency between operators, will be manually inserted into SSA graph to resolve the [data hazard](https://en.wikipedia.org/wiki/Hazard_(computer_architecture)) problem. + +## Execute SSA Graph + +The SSA graph can be out-of-order executed by an approximate [topological sorting](https://en.wikipedia.org/wiki/Topological_sorting) algorithm. The algorithm is + +1. Maintaining a map of an operator and its needed input number. +2. If a variable is not generated by an operator, i.e., `var.generated_op == nullptr`, decrease the needed input number of its pending operators. +3. If there is an operator which needed input number is decreased to zero, just run this operator. +4. After run this operator, just mark the variables are generated and repeat step 2 until all variables are generated. + +Running an operator can be asynchronized. There is a thread pool to execute an `SSA` graph. + +## Synchronize GPU Kernels + +The GPU is a non-blocking device. The different streams need be synchronized when switching streams. In current implementation, the synchronization based on the following algorithm: + +1. `OpHandle` will record `DeviceContext` that it is used. +2. In `OpHandle::Run`, if the `DeviceContext` of current operator is different from `DeviceContext` of any input variable, just wait the generate operator of this input variable. + +The `wait` are implemented by two strategies: + +1. Invoke `DeviceContext->Wait()`, It will wait all operators on this device contexts complete. +2. Uses `cudaStreamWaitEvent` to sending a event to the stream. It is a non-blocking call. The wait operators will be executed in GPU. + +Generally, the `cudaStreamWaitEvent` will have a better perforamnce. However, `DeviceContext->Wait()` strategy is easier to debug. The strategy can be changed in runtime. + +## What's next? + +* Merging gradient of dense parameters has been done. However, the merging of sparse parameters has not been done. +* The CPU version of Parallel Executor has not been implemented. The out-of-order logic will make CPU compuatation faster, too. +* A better strategy to merge gradients can be introduced. We can shrink the gradients from `float32` to `int8` or `int4` while merging. It will significantly speed up multi-GPUs training without much loss of precision. +* Combine multi-Nodes implementation. By the benifit of out-of-order, sending and recving operator can be an blocking operator, and the transpiler does not need to concern about the best position of operator. diff --git a/doc/paddle/design/concepts/program.md b/doc/paddle/design/concepts/program.md new file mode 100644 index 0000000000000000000000000000000000000000..243bdb738542d9f6172970af857f64dea04d93c3 --- /dev/null +++ b/doc/paddle/design/concepts/program.md @@ -0,0 +1,139 @@ +# Design Doc: PaddlePaddle Programs + +## Compile and Execution + +A PaddlePaddle program consists of two parts -- the first generates a `ProgramDesc` protobuf message that describes the program, and the second runs this message using a C++ class `Executor`. + +A simple example PaddlePaddle program can be found in [graph.md](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/others/graph.md): + +```python +x = layer.data("images") +l = layer.data("label") +y = layer.fc(x) +cost = layer.mse(y, l) +optimize(cost) +train(cost, reader=mnist.train()) +``` + +The first five lines of the following PaddlePaddle program generates, or, compiles, the `ProgramDesc` message. The last line runs it. + +## Programs and Blocks + +The basic structure of a PaddlePaddle program is some nested blocks, as a C++ or Java program. + +- program: some nested blocks +- [block](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/concepts/block.md): + - some local variable definitions, and + - a sequence of operators + +The concept of block comes from usual programs. For example, the following C++ program has three blocks: + +```c++ +int main() { // block 0 + int i = 0; + if (i < 10) { // block 1 + for (int j = 0; j < 10; j++) { // block 2 + } + } + return 0; +} +``` + +The following PaddlePaddle program has three blocks: + +```python +import paddle as pd // block 0 + +x = minibatch([10, 20, 30]) # shape=[None, 1] +y = var(1) # shape=[1], value=1 +z = minibatch([10, 20, 30]) # shape=[None, 1] +cond = larger_than(x, 15) # [false, true, true] + +ie = pd.ifelse() +with ie.true_block(): // block 1 + d = pd.layer.add_scalar(x, y) + ie.output(d, pd.layer.softmax(d)) +with ie.false_block(): // block 2 + d = pd.layer.fc(z) + ie.output(d, d+1) +o1, o2 = ie(cond) +``` + +## `BlockDesc` and `ProgramDesc` + +All protobuf messages are defined in `framework.proto`. + +`BlockDesc` is straight-forward -- it includes local variable definitions, `vars`, and a sequence of operators, `ops`. + +```protobuf +message BlockDesc { + required int32 parent = 1; + repeated VarDesc vars = 2; + repeated OpDesc ops = 3; +} +``` + +The parent ID indicates the parent block so that operators in a block can refer to variables defined locally and also those defined in their ancestor blocks. + +All hierarchical blocks in a program are flattened and stored in an array. The block ID is the index of the block in this array. + +```protobuf +message ProgramDesc { + repeated BlockDesc blocks = 1; +} +``` + + +### Global Block + +The global block is the first one in the above array. + +## Operators that Use Blocks + +In the above example, the operator `IfElseOp` has two blocks -- the true branch and the false branch. + +The definition of `OpDesc` shows that an operator could have some attributes: + +```protobuf +message OpDesc { + AttrDesc attrs = 1; + ... +} +``` + +and an attribute could be of type block, which is, in fact, a block ID as described above: + +``` +message AttrDesc { + required string name = 1; + + enum AttrType { + INT = 1, + STRING = 2, + ... + BLOCK = ... + } + required AttrType type = 2; + + optional int32 block = 10; // when type == BLOCK + ... +} +``` + +## InferShape + +With this design, the InferShape function should take the following parameters: + +```c++ +void InferShape(int current_block, + int current_operator, + ProgramDesc* program // might change VarDesc values. + ) { + ... +} +``` + +where + +- `current_block` indices into `ProgramDesc::blocks`, +- `current_operator` indices into `BlockDesc::ops`. diff --git a/doc/paddle/design/concepts/python_data_feeding.md b/doc/paddle/design/concepts/python_data_feeding.md new file mode 100644 index 0000000000000000000000000000000000000000..f68cbbf7864e68db2a595de0e21d20a0bb89404c --- /dev/null +++ b/doc/paddle/design/concepts/python_data_feeding.md @@ -0,0 +1,130 @@ +# Python Data Feeding + +In the former implementation of Paddle Fluid, there are two ways to feed data: + +- Use `reader_op` in backend C++ side. This method only supports data feeding from recordio files and random data generators, but supports many kinds of `decorated_readers`. For examples, `double_buffer_reader` uses two threads to achieve better performance: one for time-consuming I/O operations, and the other for `Executor::Run()`. See [C++ Data Feeding](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/cpp_data_feeding.md) for details. + +- Feed data directly using `DataFeeder.feed()` in Python codes. It is more flexible than the first way. Many kinds of preprocessing steps can be performed before feeding using Python or any other languages, instead of adding many uncommon `operators` in C++ side. But this method is less efficient: the program cannot read the next mini-batch data before `Executor::Run()` ends. Moreover, `decorated_readers` such as `double_buffer_reader` cannot be used for better performance. + +In this document, we design a Python Data Feeding process combining the efficiency of the first way and the flexibility of the second way. A data queue `LoDTensorBlockingQueue` is designed to be shared by the Python and C++ side, while `LoDTensorArray` is pushed into the queue in Python side and `reader_op` in C++ side reads out the data from the queue. + + +## Design of LoDTensorBlockingQueue +`LoDTensorBlockingQueue` is a blocking queue with a fixed `capacity` and accepts `std::vector` with shapes indicated by `dims`. Since `LoDTensorBlockingQueue` must be constructed using `capacity` and `dims`, it cannot be a `Variable` type. Therefore, a `LoDTensorBlockingQueueHolder` is designed to defer construction of `LoDTensorBlockingQueue`. + +```C++ +class LoDTensorBlockingQueueHolder; + +class LoDTensorBlockingQueue { + friend class LoDTensorBlockingQueueHolder; + private: + // `LoDTensorBlockingQueue` can only be constructed by + // `LoDTensorBlockingQueueHolder::InitOnce()` + LoDTensorBlockingQueue(size_t capacity, const std::vector& dims); + + public: + size_t Size() const { return queue_.Size(); } // Get the current size of the queue + + size_t Cap() const { return queue_.Cap(); }// Get the capacity of the queue + + void Close() { return queue_.Close(); } + + bool IsClosed() const { return queue_.IsClosed(); } + + // Block if Size() == Cap() + // Return false only when queue_.IsClosed() == true + bool Push(const std::vector &lod_tensor_vec); + + // Block if Size() == 0. + // *Success == false when queue_.IsClosed() == true + std::vector Pop(bool *success = nullptr); + + private: + // Use reader::BlockingQueue as the inner data structure + BlockingQueue> queue_; + std::vector dims_; +}; + +class LoDTensorBlockingQueueHolder { + public: + // Call the constructor of `LoDTensorBlockingQueue` to create queue_ + // `InitOnce` can only called once, otherwise an exception would raise + void InitOnce(size_t capacity, const std::vector& dims) { + PADDLE_ENFORCE(queue_ == nullptr); + queue_.reset(new LoDTensorBlockingQueue(capacity, dims)); + } + + const std::shared_ptr& GetQueue() const { return queue_; } + + private: + std::shared_ptr queue_; +}; +``` + +There are some major things that must be concerned: +- `LoDTensorBlockingQueueHolder` should be a `Variable` in global scope, so that `reader_op` can find it when reading data. +- A `Variable` of `LoDTensorBlockingQueueHolder` but not `VarDesc` must be created in Python code before `Executor::Run()` so that `Executor::Run()` can get the feeding data when it is called. +- `Create_reader_op` should accept the name of the `LoDTensorBlockingQueueHolder` variable as an input. + + +## Release of the GIL in pybind +`Pybind11::gil_scoped_release` is used to release GIL (Global Interpreter Lock) when `LoDTensorBlockingQueue::Push()` or `Executor::Run()` method are invoked in Python side, making `LoDTensorBlockingQueue::Push()` and `Executor::Run()` run in parallel. + + +## Design of PyReader +`PyReader` is a reader which holds a `LoDTensorBlockingQueue` object. +```C++ +class PyReader : public ReaderBase { + public: + explicit PyReader(const std::shared_ptr& queue); + + void ReadNext(std::vector* out) override { + bool success; + *out = queue_->Pop(&success); + if (!success) out->clear(); + } + + void ReInit() override { return; } + + private: + std::shared_ptr queue_; +}; +``` + + +## Design of CreatePyReaderOp +`CreatePyReaderOp` is used to create the `PyReader` object. It requires an input `blocking_queue` which indicates the name of the `LoDTensorBlockingQueueHolder` variable. +```C++ +class CreatePyReaderOp : public framework::OperatorBase { + public: + using framework::OperatorBase::OperatorBase; + private: + void RunImpl(const framework::Scope& scope, + const platform::Place& dev_place) const override { + auto* out = scope.FindVar(Output("Out")) + ->template GetMutable(); + if (out->Get() != nullptr) return; + + const std::string& queue_name = Input("blocking_queue"); + auto* queue_holder_var = scope.FindVar(queue_name); + PADDLE_ENFORCE(queue_holder_var != nullptr); + auto* queue_holder = queue_holder_var + ->template GetMutable(); + out->Reset(new PyReader(queue_holder->GetQueue())); + } +}; +``` + +## Design of Python codes +The design of Python codes are as follows. First, we construct a variable of `LoDTensorBlockingQueueHolder` and init it with given parameters, returning the `LoDTensorBlockingQueue` object after initialization. After that, a layer of `CreatePyReaderOp` is constructed and accepts the name of the `LoDTensorBlockingQueueHolder` variable. The `LoDTensorBlockingQueue` object and result of the layer are both returned. +```Python +def py_reader(capacity, shapes): + queue_name = unique_name.generate("lod_tensor_blocking_queue") + var = global_scope().var(feeder_name) # create LoDTensorBlockingQueueHolder Variable + feed_queue = core.init_lod_tensor_blocking_queue(var, capacity, shapes) # init the queue + out = create_var() + create_py_reader_op_with_queue_name( + inputs={'blocking_queue': queue_name}, + outputs={'Out':[out]}) + return out, feed_queue +``` diff --git a/doc/paddle/design/concepts/scope.md b/doc/paddle/design/concepts/scope.md new file mode 100644 index 0000000000000000000000000000000000000000..dcf76649357aaef80d6bc1a933ece8c4c1063547 --- /dev/null +++ b/doc/paddle/design/concepts/scope.md @@ -0,0 +1,124 @@ +# Design of Scope in Paddle + +## Overview + +Scope is an important concept in programming languages, which defines a program region that a set of bindings between names and entities applies. In a specific scope, a valid name is uniquely associated with an entity, such as a variable. And in another scope, this name may refer to other entity or nothing at all. It clearly restricts the visibility and validity of names in a program. Hence **Scope** is introduced to PaddlePaddle to manage variables in context. But different from the original abstract concept, Scope now becomes an object with two important attributes: + +- Scope is an association of a name to variable. +- Variables in a parent scope can be retrieved from local scope. + +A detailed explanation of these two attributes goes as following. + + +## Scope is an association of a name to variable. + +Scope is an association of a name to variable. All variables belong to `Scope`. You need to specify a scope to run a Net, i.e., `net.Run(&scope)`. One net can run in different scopes and update different variable in the scope. + + +1. Scope only contains a map of a name to variable. + + All parameters, data, states in a Net should be variables and stored inside a scope. Each op should get inputs and outputs to do computation from a scope, such as data buffer, state (momentum) etc. + +1. Variable can only be created by Scope and a variable can only be got from Scope. User cannot create or get a variable outside a scope. This is a constraints of our framework, and will keep our framework simple and clear. + +1. Scope only contains methods that are used to Create and Get Variables. Scope do not contain Operators and have no information to run them. + `Net` is designed to drive the computation and Scope only contains a map of variables. There is no computation logic inside a `Scope`. Scope just handles the lifetime management of variables. + - `Create` is used to create a Variable by its name and add the mapping relation. + - `Get` is used to find a Variable by name. + +1. Every variable only belongs to one certain Scope. + + Variable can not belong to many scopes. If you want to use variables from parent scope, you can use `parent scope`. + +1. Scope should destruct all Variables inside it when itself is destructed. User can never store `Variable` pointer somewhere else. + + Because Variable can only be got from Scope. When destroying Scope, we also need to destroy all the Variables in it. If user store `Variable` pointer to private data member or some global variable, the pointer will be an invalid pointer when associated `Scope` is destroyed. + +```cpp +class Scope { + public: + Variable* Var(const std::string& name); + const Variable* FindVar(const std::string& name) const; + + private: + std::unordered_map> vars_; +}; +``` + + +## Parent scope and local scope + +Just like [scope](https://en.wikipedia.org/wiki/Scope_(computer_science)) in programming languages, `Scope` in the neural network can also be a local scope. There are two attributes about local scope. + +1. We can create local variables in a local scope. When that local scope is destroyed, all local variables should also be destroyed. +2. Variables in a parent scope can be retrieved from local scopes of that parent scope, i.e., when user get a variable from a scope, it will try to search this variable in current scope. If there is no such variable in the local scope, `scope` will keep searching from its parent, until the variable is found or there is no parent. + +```cpp +class Scope { + public: + Scope(const std::shared_ptr& scope): parent_(scope) {} + + Variable* FindVar(const std::string& name) const { + auto it = vars_.find(name); + if (it != vars_.end()) { + return it->second.get(); + } else if (parent_ != nullptr) { + return parent_->FindVar(name); + } else { + return nullptr; + } + } + + private: + std::shared_ptr parent_ {nullptr}; +}; +``` + +In `Scope` class, there is a private data member called `parent_`. `parent_` is a smart pointer to its parent scope. When user `Get` a variable by its `name`, the `name` will be searched inside the current scope. If the variable cannot be found locally and parent scope is not a `nullptr`, the variable will be searched inside that parent scope. `parent_` pointer's default value is `nullptr`. It means that the scope is a global scope when `parent_` is nullptr. + +A local scope is very useful when we implement Recurrent Neural Network. Each timestep of an RNN should be a `Net`. Each `Net` of timestep (`StepNet` for short) should use an independent local scope. Just like variables in a while loop is inside a local scope in programming languages. By using a single `StepNet` and changing local scope, we can implement an RNN easily. + +## Interface Design + +```cpp +class Variable { + private: + Variable() = default; + friend class Scope; +}; + +class Scope { + private: + Scope(const std::shared_ptr& parent = nullptr); + + public: + static std::shared_ptr Create(const std::shared_ptr& parent = nullptr); + + // return nullptr if not found. + Variable* FindVar(const std::string& name) const; + + // return if already contains same name variable. + Variable* Var(const std::string& name); + + private: + std::shared_ptr parent_; + std::unordered_map> vars_; +}; +``` +## Only scope can create a variable + +To ensure `only scope can create a variable`, we should mark `Variable`'s constructor as a private member function, and Scope is a friend class of Variable. And then only `Var` can construct `Variable`. + +## When scope destroyed, all variables inside this scope should be destroyed together + +The scope hold unique pointers for all variables. User can `FindVar` from scope, but he should not hold this pointer as a member variable. Because when scope is destroyed, all variables inside this scope will be destroyed together. + +## Sharing a parent scope + +Local scope contains a `parent_` pointer. It is a linked-list for scopes. Using a `shared_ptr` because when a local scope is using, its parents cannot be destroyed. + +Also, as the parent scope is a `shared_ptr`, we can only `Create()` a scope shared pointer. We cannot construct a scope variable, because it cannot be passed to other scope as `parent` pointer. + +## Orthogonal interface + +`FindVar` will return `nullptr` when `name` is not found. It can be used as `Contains` method. `Var` will return an `Error` when there is a name conflict locally. Combine `FindVar` and `Var`, we can implement `Var` easily. diff --git a/doc/paddle/design/concepts/tensor.md b/doc/paddle/design/concepts/tensor.md new file mode 100644 index 0000000000000000000000000000000000000000..12a62b7012036e868f965ad5ef9c4f992d67f652 --- /dev/null +++ b/doc/paddle/design/concepts/tensor.md @@ -0,0 +1,189 @@ +# Tensor: An Unified Data Type in PaddlePaddle + +## Pain Point + +In this week, we discussed several potential weaknesses of PaddlePaddle caused by rapid iteration and development to promote new business products on the line in recent four years. For instance, current Matrix/Vector implementation in PaddlePaddle are long and tedious to read, which interfered seriously with the contribution of both fresh and professional engineers. More seriously for this issue, it will also become too challenging to maintain over time. + + +## Learn from Majel + +Consequently, we decide to refactor PaddlePaddle step-by-step. First, refactor and replace Matrix/Vector to Tensor, a modern terminology in the deep learning system. Fortunately, we can learn from Majel how to define a Tensor. + +To simplify heterogeneous resource allocation in any dimensions (1-9) and types (double, float, float16), Majel consists of several primitives such as `Dim`, `Place` and `Array`, all of them are standard C++ class templates. + +1. `Place`: memory location [i.e. CPU/GPU]. +2. `Allocation`: heterogeneous resource allocator [i.e. 20MB in GPU]. +3. `Dim`: size of each dimension. [i.e. Dim<4>({10, 2, 5, 1})] +4. `Array`: dynamic array consists of `Place`, `Dim`, and a pointer to memory. + +If you dig deeper into Majel source code, you will find Majel heavily use `boost.variant`. The variant class template is a safe, generic, stack-based discriminated union container, **offering a simple solution for manipulating an object from a heterogeneous set of types in a uniform manner**. Whereas standard containers such as std::vector may be thought of as "multi-value, single type," variant is "multi-type, single value." + +As a simple example, consider the following: + +```c++ +#include "boost/variant.hpp" +#include + +class my_visitor : public boost::static_visitor +{ +public: + int operator()(int i) const + { + return i; + } + + int operator()(const std::string & str) const + { + return str.length(); + } +}; + +int main() +{ + boost::variant< int, std::string > u("hello world"); + std::cout << u; // output: hello world + + int result = boost::apply_visitor( my_visitor(), u ); + std::cout << result; // output: 11 (i.e., length of "hello world") +} +``` + +In Majel, `DDimVar` is derived from `Dim`, `DArrayVar` is from `Array`. + +```c++ +template +struct Dim { +... +int head; +Dim tail; +} +``` + +```c++ +template +class Array : public Buffer { + ... +private: + Dim size_; + Dim stride_; + T* ptr_; +}; +``` + +```c++ +typedef boost::variant Place; +typedef boost::variant, Dim<2>, Dim<3>, Dim<4>, Dim<5>, + Dim<6>, Dim<7>, Dim<8>, Dim<9>> DDimVar; +typedef boost::variant< + Array, + Array, + Array, + Array, + + Array, + Array, + Array, + Array, + + Array, + Array, + Array, + Array > DArrayVar; +``` + +Because `variant` may be thought of as "multi-type, single value", we can utilize it to implement unified interfaces for PaddlePaddle. + +`DDim` plays two kinds of roles in Majel. First, it is used to indicate the size of a tensor. For example, we can construct a new `DArray` by following way: + + ```c++ + DArray arr = make_darray(make_ddim({2,3}), 0.0f); + ``` + It means that `arr` will be a two-dimension tensor, or a matrix. The size of its first dimension is 2 and the second is 3. All the element value of `arr` will be initialized as 0.0 . + + The second meaning of `DDim` is tensor index. For example, if we want to access the value in the 1st row and 2nd column of `arr` and set it to 1.0, we can do like this: + + ```c++ + arr[make_ddim({0, 1})] = 1.0; + ``` + +## Implement Tensor in Paddle + +We want to create a Tensor class to replace Vector and Matrix, and to support high-dimensional data. The operations on Tensor are implemented in both CPU and GPU. We also want to make sure that the Tensor interface is friendly to its callers. + +Tensor is only responsible for describing computing. It will not take charge of memory allocation policy, handles of some CUDA library context(e.g. cublasHandle, cudnnHandle), and dispatching CUDA kernels. Paddle has realize the initialization and resources management of hardware. + +Before writing code, please make sure you already look through Majel Source Code and grabbed the design philosophy of `DArray` in Majel. + + +### Memory Management +`Allocation` manages a block of memory in device(CPU/GPU). We use `Place` to decribe memory location. The details of memory allocation and deallocation are implememted in `Allocator` and `DeAllocator`. Related low-level API such as `hl_malloc_device()` and `hl_malloc_host()` are provided by Paddle. + +### Dim and Array +#### Dim + +`Dim` decribes the dimension information of an array. + +`DDimVar` is an alias of a specializd class of boost.variant class template. + +`DDim` is introduced to represent a dynamically sized dimension. + +For example: + +``` +Dim<2> d1 = make_dim(3, 3); +DDim d2 = make_ddim({1, 2, 3}); +``` + +You must appoint a concrete sized dimension to Dim, whereas DDim can represent a dynamically sized dimension. +#### Array + +`Array` represents for a tensor with specific type and size. + +`DArrarVar` is an alias of a specialized class of boost.variant class template. + +`DArray` is introduced to represent a dynamically typed array. + +For example: + +``` +Array a1(Dim<2>(2, 2)); +DArray a2 = make_darray(make_ddim({3, 4}), 0.0, CpuPlace()); +``` + +You must appoint the type and dimension of a Array, whereas DArray can represent a dynanmically typed array. + + +Please reference the section of `Learn from Majel` for more details. + +### ArrayView + +`ViewIterator` is a class template which implements basic iterator operation, including increment(++), decrement(--), dereference(*), equality comparisons(==) and so on. + +`ArrayView` is an encapsulation of `Array`, which introduces extra iterator methods, such as `begin()` and `end()`. The `begin()` method returns an iterator pointing to the first element in the ArrayView. And the `end()` method returns an iterator pointing to the pass-the-end element in the ArrayView. + +`ArrayView` make the visting and manipulating an array more efficiently, flexibly and safely. + + +A global function `make_view` is provided to transform an array to corresponding arrayview. + +``` +template +ArrayView make_view(const Array& in) { + return in; +} +``` + +A global function `make_iterator` is provided to make iterator of an array. + +``` +template +ViewIterator> make_iterator(const Array& in, Dim idx) { + return make_iterator(make_view(in), idx); +} +``` + +### Basic Operations + +The operations that manipulate DArray are defined as global functions, such as `ones`, `zeros`, `reshape`, `gemm` and so on. + +An array will be trasformed into an arrayview and then passed to the operation launching on a specific device(CPU/GPU). diff --git a/doc/paddle/design/concepts/tensor_array.md b/doc/paddle/design/concepts/tensor_array.md new file mode 100644 index 0000000000000000000000000000000000000000..3d149edb5f7f7f2e58d7b3d8a6e433ab47c82e2c --- /dev/null +++ b/doc/paddle/design/concepts/tensor_array.md @@ -0,0 +1,271 @@ +# Design for TensorArray +This design doc presents the necessity of a new C++ class `TensorArray`. +In addition to the very simple C++ implementation + +```c++ +class TensorArray { + public: + explicit TensorArray(const LoDTensor&); + explicit TensorArray(size_t size); + + private: + vector values_; +}; +``` + +We also need to expose it to PaddlePaddle's Python API, +because users would want to use it with our very flexible operators `WhileLoop`. +An example for a RNN based on dynamic operators is + +```python +input = pd.data(...) +num_steps = Var(12) + +TensorArray states(size=num_steps) +TensorArray step_inputs(unstack_from=input) +TensorArray step_outputs(size=num_steps) + +W = Tensor(...) +U = Tensor(...) +default_state = some_op() + +step = Var(1) + +wloop = paddle.create_whileloop(loop_vars=[step]) +with wloop.frame(): + wloop.break_if(pd.equal(step, num_steps) + pre_state = states.read(step-1, default_state) + step_input = step_inputs.read(step) + state = pd.sigmoid(pd.matmul(U, pre_state) + pd.matmul(W, step_input)) + states.write(step, state) + step_outputs.write(step, state) # output state + step.update(state+1) + +output = step_outputs.stack() +``` + +## Background +Steps are one of the core concepts of RNN. In each time step of RNN, there should be several input segments, states, and output segments; all these components act like arrays, for example, call `states[step_id]` will get the state in `step_id`th time step. + +An RNN can be implemented with the following pseudocode + +```c++ +Array states; +Array input_segments; +Array output_segments; +Parameter W, U; + +step = 1 +seq_len = 12 +while_loop { + if (step == seq_len) break; + states[step] = sigmoid(W * states[step-1] + U * input_segments[step]); + output_segments[step] = states[step] // take state as output + step++; +} +``` +According to the [RNN roadmap](https://github.com/PaddlePaddle/Paddle/issues/4561), there are several different RNNs that PaddlePaddle will eventually support. + +Currently, the basic RNN implementation supported by PaddlePaddle is the `recurrent_op` which takes tensors as input and splits them into `input_segments`. + + +Since a tensor cannot store variable-length sequences directly, PaddlePaddle implements the tensor with level of details (`LoDTensor` for short). +Segmenting the `LoDTensor` is much more complicated than splitting a tensor, that makes it necessary to refactor the `recurrent_op` with `LoDTensor` segmenting support. + +As the next step in RNN support, `dynamic_recurrent_op` should be introduced to handle inputs with variable-length sequences. + +The implementation is similar to `recurrent_op`. +The key difference is the way **the original input `LoDTensors` and outupts are split to get the `input_segments` and the `output_segments`.** + + +Though it can't be built over `recurrent_op` or `dynamic_recurrent_op` directly, +the logic behind splitting a tensor or a LoD tensor into `input_segments` remains the same. + +## Why `TensorArray` +The logic behind splitting the inputs to segments, states and outputs is similar and can be shared in a seperate module. + +The array of `states`, `input_segments` and `output_segments` would be exposed to users when writing a dynamic RNN model similar to the above pseudo codes. + +So there should be an array-like container, which can store the segments of a tensor or LoD tensor. + +**This container can store an array of tensors and provides several methods to split a tensor or a LoD tensor** . +This is where the notion of `TensorArray` comes from. + +## Introduce TensorArray to uniform all the three RNNs +TensorArray as a new concept is borrowed from TensorFlow, +it is meant to be used with dynamic iteration primitives such as `while_loop` and `map_fn`. + +This concept can be used to support our new design of dynamic operations, and help to refactor some existing variant-sentence-related layers, +such as `recurrent_op`, `RecurrentGradientMachine`. + +In [our design for dynamic RNN](https://github.com/PaddlePaddle/Paddle/pull/4401), +`TensorArray` is used to segment inputs and store states in all time steps. +By providing some methods similar to a C++ array, +the definition of some state-based dynamic models such as RNN can be more natural and highly flexible. + +## Dynamic-operations on TensorArray + +`TensorArray` will be used directly when defining dynamic models, so some operators listed below should be implemented + +```python +# several helper operators for TensorArray +def tensor_array_stack(ta, tensor): + ''' + get a tensor array `ta`, return a packed `tensor`. + ''' + pass + +def tensor_array_unstack(tensor, ta): + ''' + get a `tensor`, unstack it and get a tensor array `ta`. + ''' + pass + +def tensor_array_write(ta, index, tensor, data_shared): + ''' + get a `tensor` and a scalar tensor `index`, write `tensor` into index-th + value of the tensor array `ta`. + `data_shared` is an attribute that specifies whether to copy or reference the tensors. + ''' + pass + +def tensor_array_read(ta, index, tensor): + ''' + get a tensor array `ta`, a scalar tensor `index`, read the index-th value of + `ta` and return as the `tensor`. + ''' + pass + +def tensor_array_size(ta, tensor): + ''' + get a tensor array `ta`, return the size of `ta` and return as the scalar `tensor`. + ''' + pass +``` + +It is trivial for users to use so many low-level operators, so some helper methods should be proposed in python wrapper to make `TensorArray` easier to use, +for example + +```python +class TensorArray: + def __init__(self, name): + self.name = name + self.desc = TensorArrayDesc() + + def stack(self, name=None): + ''' + Pack the values in a `TensorArray` into a tensor with rank one higher + than each tensor in `values`. + `stack` can be used to split tensor into time steps for RNN or whileloop. + + @name: str + the name of the variable to output. + ''' + tensor = Var(name) + tensor_array_stack(self.name, tensor) + return tensor + + def unstack(self, input): + ''' + Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors. + `unstack` can be used to concatenate all the time steps for RNN or whileloop. + + @input: str + the name of input tensor + ''' + tensor_array_unstack(tensor, self.name) + + def write(self, index, value, data_shared=True): + ''' + Write value into index of the TensorArray. + If `data_shared` is set to True, than the index-th value in TensorArray will + be shared with the tensor passed in. + + @index: str + name of a scalar tensor + @value: str + name of a tensor + @data_shared: bool + ''' + tensor_array_write(self.name, index, value, data_shared) + + def read(self, index, output): + ''' + Read the value at location `index` in the `TensorArray`. + + @index: str + name of a scalar tensor + @output: + name of a output variable + ''' + tensor_array_read(self.name, index, output) + + + def size(self, output): + ''' + Return the number of values. + + @output: str + name of a scalar tensor + ''' + tensor_array_size(self.name, output) +``` + +## LoDTensor-related Supports +The `RecurrentGradientMachine` in Paddle serves as a flexible RNN layer; it takes varience-length sequences as input, and output sequences too. + +Since each step of RNN can only take a tensor-represented batch of data as input, +some preprocess should be taken on the inputs such as sorting the sentences by their length in descending order and cut each word and pack to new batches. + +Such cut-like operations can be embedded into `TensorArray` as general methods called `unpack` and `pack`, +these two operations are similar to `stack` and `unstack` except that they operate on variable-length sequences formated as a LoD tensor rather than a tensor. + +Some definitions are like + +```python +def unpack(level): + ''' + Split LodTensor in some `level` and generate batches, if set `sort_by_length`, + will sort by length. + + Returns: + - a new `TensorArray`, whose values are LodTensors and represents batches + of data. + - an int32 Tensor, which stores the map from the new batch's indices to + original LoDTensor + ''' + pass + +def pack(level, indices_map): + ''' + Recover the original LoD-arranged LoDTensor with the values in a `TensorArray` + and `level` and `indices_map`. + ''' + pass +``` + +With these two methods, a varience-length sentence supported RNN can be implemented like + +```c++ +// input is the varient-length data +LodTensor sentence_input(xxx); +TensorArray ta; +Tensor indice_map; +Tensor boot_state = xxx; // to initialize rnn's first state +TensorArray::unpack(input, 1/*level*/, true/*sort_by_length*/, &ta, &indice_map); +TessorArray step_outputs; +TensorArray states; + +for (int step = 0; step = ta.size(); step++) { + auto state = states.read(step); + // rnnstep is a function which acts like a step of RNN + auto step_input = ta.read(step); + auto step_output = rnnstep(step_input, state); + step_outputs.write(step_output, true/*data_shared*/); +} + +// rnn_output is the final output of an rnn +LoDTensor rnn_output = ta.pack(ta, indice_map); +``` +the code above shows that by embedding the LoDTensor-related preprocess operations into `TensorArray`, +the implementation of a RNN that supports varient-length sentences is far more concise than `RecurrentGradientMachine` because the latter mixes all the codes together, hard to read and extend. diff --git a/doc/paddle/design/concepts/var_desc.md b/doc/paddle/design/concepts/var_desc.md new file mode 100644 index 0000000000000000000000000000000000000000..5835d91142fb3bc521cb81f1bc414371108d749f --- /dev/null +++ b/doc/paddle/design/concepts/var_desc.md @@ -0,0 +1,100 @@ +# Design Doc: Var_desc + +## Background +PaddlePaddle divides the description of neural network computation into two stages: compile time and runtime. At compile time, the neural network computation is described as a `ProgramDesc` whereas at runtime an `Executor` interprets the `ProgramDesc` to compute the operations. + +PaddlePaddle uses proto message to describe compile time program because : + +1. The computation program description must be serializable and saved in a file. +1. During distributed training, the serialized program will be sent to multiple workers. It should also be possible to break the program into different components, each of which can be executed on a different worker. + +The computation `Program` consists of nested `Blocks`. Each `Block` will consist of data(i.e. `Variable`) and `Operations`. The concept to represent them is in the table below. + + + + + + + + + + + + + + + + + + + + + +
compile timeruntime
Data VarDesc(proto) Variable(cpp)
Operation OpDesc(proto) Operator(cpp)
+ + +## Definition of VarType + +A VarDesc should have a name, type and whether or not it is persistable. There are different kinds of variable types supported in PaddlePaddle, apart from the POD_Types like: `LOD_TENSOR`, `SELECTED_ROWS`, `FEED_MINIBATCH`, `FETCH_LIST`, `STEP_SCOPES`, `LOD_RANK_TABLE`, `LOD_TENSOR_ARRAY`, `PLACE_LIST`, `READER` and `CHANNEL`. These are declared inside `VarType`. A `VarDesc` then looks as the following: + +```proto +message VarDesc { + required string name = 1; + required VarType type = 2; + optional bool persistable = 3 [ default = false ]; +} +``` + +## Definition of TensorDesc + +```proto +message TensorDesc { + // Should only be PODType. Is enforced in C++ + required Type data_type = 1; + repeated int64 dims = 2; // [UNK, 640, 480] is saved as [-1, 640, 480] +} +``` + +The `Type` here comes from the enum defined inside of `VarType` : + +```proto +enum Type { + // Pod Types + BOOL = 0; + INT16 = 1; + INT32 = 2; + INT64 = 3; + FP16 = 4; + FP32 = 5; + FP64 = 6; + + // Other types that may need additional descriptions + LOD_TENSOR = 7; + SELECTED_ROWS = 8; + FEED_MINIBATCH = 9; + FETCH_LIST = 10; + STEP_SCOPES = 11; + LOD_RANK_TABLE = 12; + LOD_TENSOR_ARRAY = 13; + PLACE_LIST = 14; + READER = 15; + CHANNEL = 16; +} +``` + +A TensorDesc describes `SelectedRows` and `LoDTensor`. For details of `SelectedRows`, please reference `SelectedRows` . + +## Definition of LodTensorDesc + +```proto +message LoDTensorDesc { + required TensorDesc tensor = 1; + optional int32 lod_level = 2 [ default = 0 ]; +} +``` + +A LoDTensorDesc contains a tensor and a lod_level. + +## Definition of Variable in Python + +For Variable in Python, please reference `Python API`. diff --git a/doc/paddle/design/concepts/variable.md b/doc/paddle/design/concepts/variable.md new file mode 100644 index 0000000000000000000000000000000000000000..03ddb9faee76f69d621a0e3b04e77fe5ddf98e49 --- /dev/null +++ b/doc/paddle/design/concepts/variable.md @@ -0,0 +1,52 @@ +# Design Doc: Variable + + +Variable is also known as *blob* in MxNet and Caffe2. It is the input and output type of operators, where a neural network is a graph of operators. + +## Requirements: Lazy Memory Allocation + +For the flexibility of a DL system, a variable should be able to contain any typed value -- a tensor in most cases, but could also be some integer IDs or a scope of other variables in the case of RNN. + +To use the minimum amount of memory, we would like that a variable allocates memory only when it has to, or, lazy memory allocation. Let's take the following example: + +```cpp +Variable vr, v1, v2; + +Tensor* t1 = new Tensor(); +Tensor* t2 = new Tensor(); + +Randomize( + /* malloc */ v1.GetMutable().mutable_data(DDim(100,200)), + /* size */ t1.Size()); + +Randomize( + /* malloc */ v2.GetMutable().mutable_data(DDim(200,300)), + /* size */ t2.Size()); + +Mult( + /*result*/ vr.GetMutable().mutable_data(SizeOfMult(v1, v2)), + /*input1*/ v1.Get().data(), + /*input2*/ v2.Get().data()); +``` + +We see that a variable holds nothing until `Variable::GetMutable()` allocates a tensor and puts it in the variable. Similarly, a tensor gets its memory until `Tensor::mutable_data()`. + +This syntax for lazy memory allocation when we call `Randomize` and `Mult`, those functions that mutate the variable, so it saves us some line of C++ code. + + +## Implementation: Type Hiding + +To make memory allocation lazy, we cannot assume that we know the type held by a variable at definition time. In other words, `class Variable` cannot be a template `template class Variable`. + +Because we don't know the type `T`, we cannot save a `T*` as `Variable's` data member. Instead, we save an interface object `Placeholder`, which can return the pointer to the saved object via `Placeholder::Ptr()` as `void*`. + +But anyway, Variable needs to know `T` so could it `delete(ptr)` and so could `Variable::Get` checks the expected type and the saved object's type. + +We save `T` in `PlaceholderImpl`, the implementation of `Placeholder`. Please be aware that `PlaceholderImpl` is a class template and `T` is passed in as a template parameter. + +Because `PlaceholderImpl` knows `T`, it can save and return `typeid(T)` for the type comparison in `Variable::Get` and `Variable::GetMutable`. + + +## Conclusion + +The technique type hiding utilizes C++ class templates, interface and derivation, and C++ RTTI (typeid). This combination saves us from defining something like `caffe2::TypeMeta`, which takes hundreds of lines of C++ code. diff --git a/doc/paddle/design/concurrent/channel.md b/doc/paddle/design/concurrent/channel.md new file mode 100644 index 0000000000000000000000000000000000000000..df67438bcc741ac521b00ee962fc13c93db21182 --- /dev/null +++ b/doc/paddle/design/concurrent/channel.md @@ -0,0 +1,139 @@ +# Channel Design + +## Introduction + +A Channel is a data structure that allows for synchronous interprocess +communication via message passing. It is a fundemental component of CSP +(communicating sequential processes), and allows for users to pass data +between threads without having to worry about synchronization. + +## How to use it + +Paddle offers python APIs to open and close channels, along with sending +and receiving data to/from a channel. + +### Create a channel + +Creates a new channel that takes in variables of a specific dtype. + +- **fluid.make_channel(dtype, capacity=0)** + - **dtype**: The data type of variables being sent/received through channel + - **capacity**: The capacity of the channel. A capacity of 0 represents + an unbuffered channel. Capacity > 0 represents a buffered channel + +``` +ch = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR, 10) +``` + +### Close a channel + +Closes a channel. Any pending senders and receivers will be awoken during +this time. Receivers can still receive from a closed channel, but senders +are not allowed to send any additional data to the channel (Paddle will +raise an exception if users try to send to a closed channel.) + +- **fluid.channel_close(channel)** + +``` +fluid.channel_close(ch) +``` + +### Send data to a channel + +Sends a variable to a channel. Currently, variables of dtype `LoDTensor`, +`LoDRankTable`, `LoDTensorArray`, `SelectedRows`, `ReaderHolder`, and +`ChannelHolder` are supported. + +By default, the data of the Variable is moved from the sender to the receiver, +however the user can optionally copy the data before performing the send. + +- **channel_send(channel, variable, is_copy=False)** + - **channel**: The channel to send the variable to + - **variable**: The variable to send to the channel + - **is_copy**: If set to True, channel_send will perform a variable assign + to copy the source variable to a new variable to be sent. + +``` +ch = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR) +var = fill_constant(shape=[1],dtype=core.VarDesc.VarType.INT32, value=100) +fluid.channel_send(ch, var, True) +``` + +### Receive data from a channel + +Receives a variable from a channel. The data of the variable is moved to the +receiving variable. + +- **channel_recv(channel, return_variable)** + - **channel**: The channel to receive the variable from + - **return_variable**: The destination variable used to store the data of the + variable received from the channel + +``` +ch = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR) +var = fill_constant(shape=[1],dtype=core.VarDesc.VarType.INT32, value=-1) +fluid.channel_recv(ch, var) +``` + +## How it Works + +Channels provides a simple interface for different threads to share data. +To support the synchronization requirements, channels utilizes a series of +internal queues, locks, and conditional variables. + +### QueueMessage + +QueueMessage encapsulates the state of the channel send/receive operation to be +put in the **sendq/recvq**. It contains a condition variable used to lock the +thread (when there are no available sends/receives). In addition, it contains +a callback function to notify a thread when the QueueMessage is being +processed by the channel. + +### Queues + +- **buff_**: This queue holds the data buffer in a buffered channel. The +capacity is set to the capacity of the channel. This data buffer is not +used in an unbuffered channel. + +- **sendq**: This queue holds the QueueMessage of any pending senders of a +channel. When a thread performs a channel_send operation on the channel, the +channel_send operation will put a new QueueMessage on the sendq and block the +current thread under two conditions: + 1. The channel is buffered and is full + 2. The channel is unbuffered and does not have a receiver + +- **recvq**: This queue holds the QueueMessage of any pending receivers of a +channel. When a thread performs a channel_recv operation on the channel, the +channel_recv operation will put a new QueueMessage on the recvq and block the +current thread under two conditions: + 1. The channel is buffered and there is no data on the buff_ + 2. The channel is unbuffered and does not have a sender + +### State diagram + +#### Channel Send + +

+
+

+ +#### Channel Receive + +

+
+

+ +## Limitations and Considerations + +### Variable Copy + +In golang, variables in channels are copied from the sender to the receiver. +In Paddle, the data from our variables are **moved** from sender to receiver. +As a result, these variables should not be used after they are sent. We +provide a flag in channel_send method to allow users to copy the variable to +be sent before it is sent. + +Please note that this is acheived by adding an **assign** operator and creating +a temporary variable that is sent in place of the original variable. Please +note that **assign** operator has limited support for only certain variables +datatypes. diff --git a/doc/paddle/design/concurrent/concurrent_programming.md b/doc/paddle/design/concurrent/concurrent_programming.md new file mode 100644 index 0000000000000000000000000000000000000000..ecee3f8b95812e029cf1f74e1debb4ea3baf99f9 --- /dev/null +++ b/doc/paddle/design/concurrent/concurrent_programming.md @@ -0,0 +1,193 @@ +# Design Doc: Concurrent Programming with Fluid + +With PaddlePaddle Fluid, users describe a program other than a model. The program is a [`ProgramDesc`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/framework.proto) protobuf message. TensorFlow/MxNet/Caffe2 applications generate protobuf messages too, but their protobuf messages represent the model, a graph of operators, but not the program that trains/uses the model. + +Many know that when we program TensorFlow, we can specify the device on which each operator runs. This allows us to create a concurrent/parallel AI application. An interesting questions is **how does a `ProgramDesc` represents a concurrent program?** + +The answer relies on the fact that a `ProgramDesc` is similar to an abstract syntax tree (AST) that describes a program. So users just program a concurrent program that they do with any concurrent programming language, e.g., [Go](https://golang.org). + +## An Analogy + +The following table compares concepts in Fluid and Go + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
GoFluid
user-defined functions +layers
control-flow and built-in functions +intrinsics/operators
goroutines, channels +class ThreadPool
runtime +class Executor
+ + +## An Example Concurrent Program + +To review all above concepts in an example, let us take a simple program and writes its distributed version. + +Suppose that we want to parallelize a naive Fluid program (written in Go and calling Fluid's Go binding) that multiplies two tensors. + +```go +import "fluid" + +func paddlepaddle() { + X = fluid.read(...) + W = fluid.Tensor(...) + Y = fluid.mult(X, W) +} +``` + +Please be aware that the Fluid's Go binding provides the default `main` function, which calls the `paddlepaddle` function, which, in this case, is defined in above program and creates the following `ProgramDesc` message. + +```protobuf +message ProgramDesc { + block[0] = Block { + vars = [X, W, Y], + ops = [ + read(output = X) + assign(input = ..., output = W) + mult(input = {X, W}, output = Y) + ], + } +} +``` + +Then, the default `main` function calls `fluid.run()`, which creates an instance of the [`class Executor`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/executor.h) and calls `Executor.Run(block[0])`, where `block[0]` is the first and only block defined in above `ProgramDesc` message. + +The default `main` function is defined as follows: + +```go +func main() { + paddlepaddle() + fluid.run() +} +``` + +## The Concurrent Version + +By parallelizing the above program, we could support very big tensor X by splitting into small pieces {x_1, x_2, ...} and sent each piece to worker process/node for parallel multiplication. + +In this case, we can write a transpiler that takes a `ProgramDesc` message that represents the above example program and outputs two `ProgramDesc` messages, one for running on the master process/node, and the other one for worker processes/nodes. + +### The Master Program + +The master program could look like the following: + +```protobuf +message ProgramDesc { + block[0] = Block { + vars = [X, L, Y], + ops = [ + read(output = X) + kube_get_workers_addrs(output = L) + Y = tensor_array(len(L)) + parallel_for(input = X, output = Y, + attrs = {L, block_id(1)}) # referring to block 1 + ] + } + + block[1] = Block { + parent = 0, + vars = [x, y, index], + ops = [ + slice(input = [X, index], output = x) # index is initialized by parallel_for + send(input = x, attrs = L[index]) + recv(outputs = y, attrs = L[index]) + assign(input = y, output = Y[index]) + ] + } +} +``` + +The equivalent Fluid program (calling the Go binding) is: + +```go +func main() { //// block 0 + X = fluid.read(...) + L = fluid.k8s.get_worker_addrs() + Y = fluid.tensor_array(len(L)) + fluid.parallel_for(X, L, + func(index int) { //// block 1 + x = X[index] + fluid.send(L[index], x) + y = fluid.recv(L[index]) + Y[index] = y + }) +} +``` + +An explanation of the above program: + +- `fluid.k8s` is a package that provides access to Kubernetes API. +- `fluid.k8s.get_worker_addrs` returns the list of IP and ports of all pods of the current job except for the current one (the master pod). +- `fluid.tensor_array` creates a [tensor array](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/lod_tensor_array.h). `fluid.parallel_for` creates a `ParallelFor` intrinsic, which, when executed, + + 1. creates `len(L)` scopes, each for the concurrent running of the sub-block (block 1 in this case), and initializes a variable named "index" in the scope to an integer value in the range `[0, len(L)-1]`, and + 2. creates `len(L)` threads by calling into the `ThreadPool` singleton, each thread + 1. creates an Executor instance, and + 2. calls `Executor.Run(block)`, where `block` is block 1 as explained above. +1. Please be aware that block 1 is a sub-block of block 0, so ops in block 1 could refer to variables defined in block 0. + +### The Worker Program + +The worker program looks like + +```go +func main() { + W = Tensor(...) + x = fluid.listen_and_do( + fluid.k8s.self_addr(), + func(input Tensor) { + output = fluid.mult(input, W) + }) +} +``` + +where + +- `fluid.listen_and_do` creates a `ListenAndDo` intrinsic, which, when executed, + 1. listens on the current pod's IP address, as returned by `fliud.k8s.self_addr()`, + 2. once a connection is established, + 1. creates a scope of two parameters, "input" and "output", + 2. reads a [Fluid variable](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/variable.h) and saves it into "input", + 3. creates an Executor instance and calls `Executor.Run(block)`, where the block is generated by running the lambda specified as the second parameter of `fluid.listen_and_do`. + +## Summarization + +From the above example, we see that: + +1. Fluid enables the imperative programming paradigm by: + 1. letting users describe a program, but not a model (a sequence of layers, or a graph of operators), and + 2. call the `fluid.run` function that runs the program implicitly. +1. The program is described as a `ProgramDesc` protobuf message. +2. Function `Executor.Run` takes a block, instead of a `ProgramDesc`, as its parameter. +3. `fluid.run` calls `Executor.Run` to run the first block in the `ProgramDesc` message. +4. `Executor.Run`'s implementation is extremely simple -- it doesn't plan the execution nor create threads; instead, it runs on the current thread and execute intrinsics/operators' `Run` method sequentially as they appear in the `Block.ops` array. +5. Intrinsics/operators' `Run` method might create threads. For example, the `ListenAndDo` operator creates a thread to handle each incoming request. +6. Threads are not necessarily OS thread; instead, they could be [green threads](https://en.wikipedia.org/wiki/Green_threads) managed by ThreadPool. Multiple green threads might run on the same OS thread. An example green threads is Go's [goroutines](https://tour.golang.org/concurrency/1). diff --git a/doc/paddle/design/concurrent/csp.md b/doc/paddle/design/concurrent/csp.md new file mode 100644 index 0000000000000000000000000000000000000000..8059b53fd7e524c87002cf3ef318c6ac77bb1c6b --- /dev/null +++ b/doc/paddle/design/concurrent/csp.md @@ -0,0 +1,251 @@ +# Design Doc: CSP in PaddlePaddle Fluid + +## Motivation + +Concurrent programming is important for deep learning. Few example applications are: + +1. The main thread keeps reading the next mini-batch while another thread uses the GPU for computing. +2. The main thread performs the computation while another thread uploads the local gradients from each trainer to the parameter server. + +Most DL systems, including TensorFlow, Caffe2, and MxNet, can asynchronously execute operators in a graph. However, Fluid doesn't have the concept of a graph at all, as the design goal of Fluid is that of a programming language. + +## Concurrent Programming Models + +There were many concurrent programming models, implemented in various forms: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
concurrent programming modelimplementation
mutex types and functions in standard libraries
semaphore types and functions in standard libraries
communicating sequential processes (CSP) Go programming language
actor model Erlang programming language
message passing MPI
bulk synchronous parallel (BSP) Pregel distributed programming framework
+ + +Since Fluid was designed to be a programming language, we would like to implement CSP in Fluid. + +### CSP v.s. Actor Model + +A well-known implementation of Actor Model is the Erlang programming language. In Actor Model, *processes* could send messages to another process and receive messages from another process given the process IDs. We can find the three ingredients, process with ID, send, and recv, in MPI too. Indeed, we can rewrite Erlang programs in Python + MPI with possibly fewer lines of code. Our concern with Actor Model is that it doesn't seem reasonable to implement process management in a programming language's runtime library; instead, it should be the operating systems' responsibility to manage processes and libraries like MPI for send/recv. + +## CSP in Fluid + +Fluid has two fundamental control-flows: *if-else* and *while*. If we are to implement CSP, we need the following: + +1. a new data type: *channel* and operators *send* and *recv*, +1. *goroutine* or thread, and +1. a new control-flow: select. + +We also need Python wrappers for the above components. + +The type *channel* is conceptually the blocking queue. In Go, its implemented is a [blocking circular queue](https://github.com/golang/go/blob/68ce117cf17b8debf5754bfd476345779b5b6616/src/runtime/chan.go#L31-L50), which supports send and recv. + +The `select` operation has been in OS kernels long before Go language. All Unix kernels implement system calls *poll* and *select*. They monitor multiple file descriptors to see if I/O is possible on any of them. This takes O(N) time. Since Linux 2.6, a new system call, *epoll*, can do the same in O(1) time. In BSD systems, there is a similar system call *kqueue*. Go's Linux implementation uses epoll. + +It might be a good idea to implement Fluid's select using epoll too. In this design doc, we start from the O(N) way so that we could focus on Python binding and the syntax. + +### Type Channel + +Fluid supports many data types: + +1. Tensor, +1. Row-sparse Tensor +1. LoD Tensor, +1. Tensor array, etc + +Each data type is registered in the [`framework.proto`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto#L117-L127) as an enum value. To add a new type channel, we need to add a new type enum. + +To expose a C++ type to Python, we need to edit the [`pybind.cc`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/pybind/pybind.cc) file. [Here](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/pybind/pybind.cc#L120-L164) is an example how we expose C++ class LoDTensor. + +## Syntax Design + +### Create Channel + +In Go, we create a channel by specifying the element type and buffer size: + +```go +ch := make(chan int) // a channel without buffer +ch1 := make(chan int, 100) // a channel that can buffer 100 ints. +``` + +In Fluid, we should be able to do the same: + +```python +ch = fluid.make_channel(dtype=INT) +ch1 = fluid.make_channel(dtype=INT, 100) +``` + +In addition to that, we want channels that can hold more complex element types, e.g., Tensors of float16: + +```python +ch = fluid.make_channel(dtype=Tensor, etype=float16) +``` + +or Tensors of Tensors of float16 etc. + +The point here is that we need a consistent way to compose types, like in C++ we can have `Tensor...> >`. + +### Send and Recv + +Go's CSP implementation depends on data type *channel*. There are two types of channels: + +1. The unblocked channel, or buffered channel, is a blocking queue with a non-zero sized buffer. The sending to buffered channel blocks if the buffer is full, and the receive operation blocks if the buffer is empty. +1. blocked channel, or unbuffered channel, is a blocking queue with no buffer. Both sending and receiving block with unbuffered channels. + +There are four types of actions with a channel: + +1. Create a channel + + ```go + ch := make(chan int) // this is an unbuffered channel + ch := make(chan int, 100) // this is a buffered channel of 100 ints. + ``` + +1. Send + + ```go + ch <- 111 + ``` + +1. Recv + + ```go + y, ok <- ch + ``` + +1. Close + + ```go + close(ch) + ``` + + Please be aware that a closed channel is not a nil channel, which is `var ch chan int`. + +There are some [axioms with channels](https://dave.cheney.net/2014/03/19/channel-axioms): + +1. A send to a nil channel blocks forever + +1. A receive from a nil channel blocks forever + +1. A send to a closed channel panics + +1. A receive from a closed channel returns the residual values and then zeros. + +In Fluid, we have [buffered channels](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/details/buffered_channel.h) and [unbuffered channels](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/details/unbuffered_channel.h) + +The following program illustrates the Python syntax for accessing Fluid buffers. + +```python +import fluid + +buffer_size = 10 +ch = fluid.make_channel(dtype=INT, buffer_size) + +# Now write three elements to the channel +with fluid.while(steps=buffer_size): + fluid.send(ch, step) + +fluid.close_channel(ch) + +with fluid.while(steps=buffer_size): + fluid.print(fluid.recv(ch)) +``` + +The following example shows that to avoid the always-blocking behavior of unbuffered channels, we need to use Fluid's goroutines. + +```python +import fluid + +ch = fluid.make_channel(dtype=INT) + +with fluid.go(): + fluid.send(ch) + +y = fluid.recv(ch) + +fluid.close_channel(ch) +``` + +### Select + +In Go, the `select` statement lets a goroutine wait on multiple communication operations. A `select` blocks until one of its cases can run, then it executes that case. It chooses one at random if multiple are ready. + +```go + +ch1 := make(chan int) +ch2 := make(chan int, 100) + +x := 0 + +for { + select { + case ch1 <- x: + x := x + 1 + case y <- ch2: + fmt.Println("Received on channel") + default: + fmt.Println("Default") + } + } + +``` + +In Fluid, we should be able to do the same: + +```python +ch1 = fluid.make_chan(dtype=INT) +ch2 = fluid.make_chan(dtype=INT, 100) + +sel = fluid.select() + +with sel.case(ch1, 'w', X): + fluid.layers.increment(X) + +with sel.case(ch2, 'r', Y): + fluid.print("Received on Channel") + +with sel.default(): + fluid.print("Default") + +``` + +In the above code snippet, `X` and `Y` are variables. Now let us look at each of these statements one by one. + +- `sel.case(ch1, 'w', X)` : This specifies that we are writing to `ch1` and we want to write the integer in variable `X` to the channel. The character `w` is used here to make the syntax familiar to write syntax in Python I/O. + +- `sel.case(ch2, 'r', Y)` : This specifies that we would like to read the result from `ch2` into variable `Y`. The character `r` is used here to make the syntax familiar to read syntax in Python I/O. + +- `sel.default()` : This is equivalent to the default in Go `select`. If none of the channels are ready for read or write, then the fluid code in the default block will be executed. + +## Example Programs + +### 1. RPC between Trainers and Parameter Servers + +### 2. Concurrent Minibatch Loading diff --git a/doc/paddle/design/concurrent/go_op.md b/doc/paddle/design/concurrent/go_op.md new file mode 100644 index 0000000000000000000000000000000000000000..d55dcbc937cb673836bea057e6fbdc9c915cdc39 --- /dev/null +++ b/doc/paddle/design/concurrent/go_op.md @@ -0,0 +1,231 @@ +# go_op Design + +## Introduction + +The **go_op** allows user's of PaddlePaddle to run program blocks on a detached +thread. It works in conjuction with CSP operators (channel_send, +channel_receive, channel_open, channel_close, and select) to allow users to +concurrently process data and communicate easily between different threads. + +## How to use it + +``` +channel = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR) + +with fluid.Go(): + # Send a tensor of value 99 to "channel" on a detached thread + tensor = fill_constant(shape=[1], dtype='int', value=99) + tensor.stop_gradient = True + fluid.channel_send(channel, tensor) + +# Receive sent tensor from "channel" on the main thread +result = fill_constant(shape=[1], dtype='int', value=-1) +fluid.channel_recv(ch, result) +``` + +The go operator can be accessed by using the fluid.Go() control flow. This +will create a new sub block, where the user can add additional operators +to be ran on the thread. + +**Note:** Since back propegation is currently not support in the go_op, users +should ensure that operators in the go block does not require gradient +calculations. + +## How it Works + +Similar to other control blocks, go_op will create a sub block and add it +as a child to the current block. Operators and variables defined in this +block will be added to the go sub_block. + +In addition, the go operator will create a new child scope whose parent is +the global scope. Please refer to [block captures](#block-captures) for more +information. + +When Paddle executor runs go_op, go_op will take the sub_block and pass it to +the executor.run method (along with a newly created local scope) on a detached +thread. + +An example of the generated program description is shown below. Take note of +the **go_op** in particular. It is added as an operator in the current +block (in this example, block0). The **go_op** contains a `sub_block` +attribute, which points to the id of the block that will be executed in a +detached thread. + +``` +blocks { + idx: 0 + parent_idx: -1 + vars { + name: "return_value" + type { + type: LOD_TENSOR + lod_tensor { + tensor { + data_type: INT64 + } + } + } + } + vars { + name: "status_recv" + type { + type: LOD_TENSOR + lod_tensor { + tensor { + data_type: BOOL + } + } + } + } + ... + ops { + outputs { + parameter: "Out" + arguments: "channel" + } + type: "channel_create" + attrs { + name: "data_type" + type: INT + i: 7 + } + attrs { + name: "capacity" + type: INT + i: 0 + } + } + ops { + inputs { + parameter: "X" + arguments: "channel" + } + type: "go" + attrs { + name: "sub_block" + type: BLOCK + block_idx: 1 + } + } + ops { + inputs { + parameter: "Channel" + arguments: "channel" + } + outputs { + parameter: "Out" + arguments: "return_value" + } + outputs { + parameter: "Status" + arguments: "status_recv" + } + type: "channel_recv" + } + ... +} + +blocks { + idx: 1 + parent_idx: 0 + vars { + name: "status" + type { + type: LOD_TENSOR + lod_tensor { + tensor { + data_type: BOOL + } + } + } + } + ... + + ops { + outputs { + parameter: "Out" + arguments: "fill_constant_1.tmp_0" + } + type: "fill_constant" + attrs { + name: "force_cpu" + type: BOOLEAN + b: false + } + attrs { + name: "value" + type: FLOAT + f: 99.0 + } + attrs { + name: "shape" + type: INTS + ints: 1 + } + attrs { + name: "dtype" + type: INT + i: 3 + } + } + ops { + inputs { + parameter: "Channel" + arguments: "channel" + } + inputs { + parameter: "X" + arguments: "fill_constant_1.tmp_0" + } + outputs { + parameter: "Status" + arguments: "status" + } + type: "channel_send" + attrs { + name: "copy" + type: BOOLEAN + b: false + } + } +``` + +## Current Limitations + +#### Scopes and block captures: + +Paddle utilizes [scopes](./../concepts/scope.md) to store variables used in a +block. When a block is executed, a new local scope is created from the parent +scope (ie: scope derived from the parent block) and associated with the new +child block. After the block finishes executing, then the local scope and +all associated variables in the scope is deleted. + +This works well in a single threaded scenario, however with introduction of +go_op, a child block may continue to execute even after the parent block has +exited. If the go_op tries to access variables located in the parent block's +scope, it may receive a segmentation fault because the parent scope may have +been deleted. + +We need to implement block closures in order to prevent access to parent +scope variables from causing a segmentation fault. As a temporary workaround, +please ensure that all variables accessed in the go block is not destructed +before it is being accessed. Currently, the go_op will explicitly enforce +this requirement and raise an exception if a variable could not be found in +the scope. + +Please refer to [Closure issue](https://github.com/PaddlePaddle/Paddle/issues/8502) +for more details. + +#### Green Threads + +Golang utilizes `green threads`, which is a mechnism for the runtime library to +manage multiple threads (instead of natively by the OS). Green threads usually +allows for faster thread creation and switching, as there is less overhead +when spawning these threads. For the first version of CSP, we only support +OS threads. + + +#### Backward Propegation: + +go_op currently does not support backwards propagation. Please use go_op with +non training operators. diff --git a/doc/paddle/design/concurrent/images/channel_recv.png b/doc/paddle/design/concurrent/images/channel_recv.png new file mode 100644 index 0000000000000000000000000000000000000000..c06cd15ae7b8a8c94d5742f6675e389081fcf789 Binary files /dev/null and b/doc/paddle/design/concurrent/images/channel_recv.png differ diff --git a/doc/paddle/design/concurrent/images/channel_send.png b/doc/paddle/design/concurrent/images/channel_send.png new file mode 100644 index 0000000000000000000000000000000000000000..006ebb4a5a4bcd32c97847e9fb7729a740255f7c Binary files /dev/null and b/doc/paddle/design/concurrent/images/channel_send.png differ diff --git a/doc/paddle/design/concurrent/images/select_op_workflow.png b/doc/paddle/design/concurrent/images/select_op_workflow.png new file mode 100644 index 0000000000000000000000000000000000000000..719ed76f9d542d6c4f20c30f27656bb53325aa85 Binary files /dev/null and b/doc/paddle/design/concurrent/images/select_op_workflow.png differ diff --git a/doc/paddle/design/concurrent/index_cn.rst b/doc/paddle/design/concurrent/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e47135e9fc42760898083710e0a6767252a0225b --- /dev/null +++ b/doc/paddle/design/concurrent/index_cn.rst @@ -0,0 +1,8 @@ +并发编程 +------------ + +.. toctree:: + :maxdepth: 1 + + concurrent_programming.md + parallel_do.md diff --git a/doc/paddle/design/concurrent/index_en.rst b/doc/paddle/design/concurrent/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..0727e75798b2a869588f80d3cce7a886554e4ffb --- /dev/null +++ b/doc/paddle/design/concurrent/index_en.rst @@ -0,0 +1,8 @@ +Concurrent Programming +------------------------- + +.. toctree:: + :maxdepth: 1 + + concurrent_programming.md + parallel_do.md diff --git a/doc/paddle/design/concurrent/parallel_do.md b/doc/paddle/design/concurrent/parallel_do.md new file mode 100644 index 0000000000000000000000000000000000000000..943adf2b26df851d8ecaaaaf3a24311b286b83e3 --- /dev/null +++ b/doc/paddle/design/concurrent/parallel_do.md @@ -0,0 +1,163 @@ +# Design Doc: Parallel_Do in PaddlePaddle + +In PaddlePaddle, we use parallel_do primitive to represent multithread data parallel processing. + +## Design overview + +The definition of a parallel_do op looks like the following + +```c++ +AddInput(kInputs, "Inputs needed to be split onto different devices").AsDuplicable(); +AddInput(kParameters, "Parameters are duplicated over different devices") + .AsDuplicable(); +AddInput(kPlaces, "Devices used for parallel processing"); +AddOutput(kOutputs, "Outputs needed to be merged from different devices").AsDuplicable(); +AddOutput(kParallelScopes, + "Scopes for all local variables in forward pass. One scope for each device"); +AddAttr(kParallelBlock, + "List of operaters to be executed in parallel"); +``` + +A vanilla implementation of parallel_do can be shown as the following (`|` means single thread and +`||||` means multiple threads) + +``` +In the forward pass + | Split input onto different devices + | Copy parameter onto different devices + |||| Compute forward pass in parallel + | Merge output from different devices + +In the backward pass + | Split output@grad onto different devices + |||| Compute backward pass in parallel + | accumulate param@grad from different devices to the first device + | Merge input@grad from different devices +  | Copy param@grad to the place of parallel_do_op +``` + +This implementation allows to write mixed device program like this + +```python +W1 = fluid.tensor(size=[100,20], parameter=true) +W2 = fluid.tensor(size=[20,15], parameter=true) + +data = layers.data() + +gpu_places = layers.get_place(use_gpu=True) +# parallel processing on multiple GPUs +pd = ParallelDo(gpu_places) +with pd.do(input=data): + prediction = softmax(fc(fc(data, W1), W2)) + write_output(prediction) +prediction = pd() +loss = cross_entropy(prediction, label) +``` + +And the programDesc are like the following + +``` +# start_program will be run by executor(CPUPlace), all w1, w2 will be allocated on CPU +start_program +{ + vars: w1, w2 + ops: init(w1), init(w2) +} + +main_program +{ +block0 { + vars: data, places, w1, w2, w1_grad, w2_grad, + ops: data, get_place, parallel_do(block1), + parallel_do_grad(block2), + sgd(w2, w2_grad), + sgd(w1, w1_grad) +} +block1 { # the forward pass + parent_block: 0 + vars: data, h1, h2, loss + ops: fc, fc, softmax +} +block2 { # the backward pass + parent_block: 1 + vars: data_grad, h1_grad, h2_grad, loss_gard, local_w1_grad, local_w2_grad + ops: softmax_grad, + fc_grad + fc_grad +} +} +``` + +## Performance Imporvement + +There are serial places we can make this parallel_do faster. + +### forward: split input onto different devices + +If the input of the parallel_do is independent from any prior opeartors, we can avoid this step by +prefetching the input onto different devices in a seperate background thread. And the python code +looks like this. +```python +pd = ParallelDo(gpu_places) +with pd.do(): +    feature = get_data_from_prefetch_queue(gpu_places) + prediction = my_net(feature) + write_output(activation) +``` + +### forward: Copy parameter to onto different devices + +We can avoid this step by making each device have a copy of the parameter. This requires: + +1. `fluid.default_start_up_program()` to be run on all devices +1. In the backward, allreduce param@grad at different devices, this requires + 1. `backward.py` add `allreduce` operators at parallel_do_grad + 1. `allreduce` operators need to be called in async mode to achieve maximum throughput +1. apply gradients related op(i.e. cliping, normalization, decay, sgd) on different devices in parallel + +By doing so, we also avoided "backward: accumulate param@grad from different devices to the first device". +And the ProgramDesc looks like the following + +``` +# w1, w2 will be allocated on all GPUs +start_program +{ +block0 { + parallel_do(block1) +} +block1 { + parent_block: 0 + vars: w1, w2 + ops: init(w1), init(w2) +} +} + +main_program +{ +block0 { + vars: data, places, w1, w2 + ops: data, get_place, parallel_do(block1), + parallel_do_grad(block2), # append_backward + parallel_do(block3) # append_optimization + +} +block1 { + parent_block: 0 + vars: data, h1, h2, loss + ops: fc, fc, softmax +} +block2 { + parent_block: 1 + vars: data_grad, h1_grad, h2_grad, loss_gard, w1_grad, w2_grad + ops: softmax_grad, + fc_grad, allreduce(places, scopes, w1_grad), + fc_grad, allreduce(places, scopes, w2_grad) +} +block3 { + parent_block: 0 + vars: lr + ops: sgd(w2, w2_grad), + sgd(w1, w1_grad) +} +} +``` diff --git a/doc/paddle/design/concurrent/select_op.md b/doc/paddle/design/concurrent/select_op.md new file mode 100644 index 0000000000000000000000000000000000000000..2c193f5a2b44bdeb9aab50856db1637cf81a870f --- /dev/null +++ b/doc/paddle/design/concurrent/select_op.md @@ -0,0 +1,265 @@ +# select_op Design + +## Introduction + +In golang, the [**select**](https://golang.org/ref/spec#Select_statements) +statement lets a goroutine wait on multiple communication operations at the +same time. The **select** blocks until one of its cases can run, then +executes the case. If multiple cases are ready to run, then one case is +choosen at random to be executed. + +With the introduction of CSP for Paddle, we mimic this behavior by +creating a ***select_op***. + +## How to use it + +The **select_op** is available as a c++ operator. However most users +will prefer to use the much simplier Python API. + +- **fluid.Select()**: Creates a select operator and adds it to the current +block within the main program. Also creates a sub block and adds it to the +main program. This sub block is used to hold all variables and operators +used by the case statements. + +Within the select block, users can add cases by +calling **select.case** or **select.default** method. + +- **fluid.Select.case(channel_action, channel, result_variable)**: Represents +a fluid channel send/recv case. This method creates a SelectCase block +guard and adds it to the Select block. The arguments into this method tells +the select which channel operation to listen to. + +- **fluid.Select.default()**: Represents the fluid default case. This default +case is executed if none of the channel send/recv cases are available to +execute. + +**Example:** +``` +ch1 = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR) +quit_ch = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR) + +x = fill_constant(shape=[1], dtype=core.VarDesc.VarType.INT32, value=0) +y = fill_constant(shape=[1], dtype=core.VarDesc.VarType.INT32, value=1) + +while_cond = fill_constant(shape=[1], dtype=core.VarDesc.VarType.BOOL, value=True) +while_op = While(cond=while_cond) + +with while_op.block(): + with fluid.Select() as select: + with select.case(fluid.channel_send, channel, x): + # Send x, then perform Fibonacci calculation on x and y + x_tmp = fill_constant(shape=[1], dtype=core.VarDesc.VarType.INT32, value=0) + assign(input=x, output=x_tmp) + assign(input=y, output=x) + assign(elementwise_add(x=x_tmp, y=y), output=y) + with select.case(fluid.channel_recv, quit_channel, result2): + # Exit out of While loop + while_false = fill_constant(shape=[1], dtype=core.VarDesc.VarType.BOOL, value=False) + helper = layer_helper.LayerHelper('assign') + helper.append_op( + type='assign', + inputs={'X': [while_false]}, + outputs={'Out': [while_cond]}) +``` + +## How it Works + +### Program Description + +``` +blocks { + idx: 0 + ... + // Create "case_to_execute" variable + ops { + outputs { + parameter: "Out" + arguments: "fill_constant_110.tmp_0" + } + type: "fill_constant" + attrs { + name: "force_cpu" + type: BOOLEAN + b: false + } + attrs { + name: "value" + type: FLOAT + f: -1.0 + } + attrs { + name: "shape" + type: INTS + ints: 1 + } + attrs { + name: "dtype" + type: INT + i: 2 + } + } + // Create "select" operator. + // inputs: + // X: All input variables used by operators within the select block + // case_to_execute: Variable filled in by select_op when it determines + // which case to execute. + // + // outputs: + // Out: All output variables referenced by operators within select block. + // + // attrs: + // sub_block: The block id containing the select "cases" + // cases: Serialized list of all cases in the select op. + // Each case is serialized as: ',,,' + // where type is 0 for default, 1 for send, and 2 for receive. + // No channel and values are needed for default cases. + ops { + inputs { + parameter: "X" + arguments: "fill_constant_103.tmp_0" + arguments: "fill_constant_104.tmp_0" + } + inputs { + parameter: "case_to_execute" + arguments: "fill_constant_110.tmp_0" + } + outputs { + parameter: "Out" + arguments: "fill_constant_110.tmp_0" + } + type: "select" + attrs { + name: "sub_block" + type: BLOCK + block_idx: 1 + } + attrs { + name: "cases" + type: STRINGS + strings: "0,1,channel_101,fill_constant_109.tmp_0" + strings: "1,2,channel_102,fill_constant_108.tmp_0" + } + } + ... +} +``` + +The python select API will add the **select_op** to the current block. In addition, it will +iterate through all it's case statements and add any input variables required by case statements +into **X**. It will also create a temp variable called **case_to_execute**. This variable is +filled in by the select_op after it has completed processing the case statements. + +If there are no available cases to execute (ie: all cases are blocked on channel operations, and +there is no default statement), then the select_op will block the current thread. The thread will +unblock once there is a channel operation affecting one of the case statements, at which point, the +**select_op** will set the **case_to_execute** variable to the index of the case to execute. + +Finally the select_op will call executor.run on the **sub_block**. + +``` +blocks { + idx: 1 + parent_idx: 0 + ... + // Fill a tensor with the case index (ie: 0,1,2,3,ect.) + ops { + outputs { + parameter: "Out" + arguments: "fill_constant_111.tmp_0" + } + type: "fill_constant" + attrs { + name: "force_cpu" + type: BOOLEAN + b: false + } + attrs { + name: "value" + type: FLOAT + f: 0.0 + } + attrs { + name: "shape" + type: INTS + ints: 1 + } + attrs { + name: "dtype" + type: INT + i: 2 + } + } + // Create an "equal" operator to compare the case index with the "case_to_execute" + // tensor (which was filled in by the select op). + ops { + inputs { + parameter: "X" + arguments: "fill_constant_111.tmp_0" // case 0 + } + inputs { + parameter: "Y" + arguments: "fill_constant_110.tmp_0" // case_to_execute + } + outputs { + parameter: "Out" + arguments: "equal_0.tmp_0" + } + type: "equal" + attrs { + name: "axis" + type: INT + i: -1 + } + } + // Use the output of the "equal" operator as a condition for the "conditional_block". + // If the condition evaluates to true, then execute the "sub_block" (which represents + // the select case's body) + ops { + inputs { + parameter: "Params" + } + inputs { + parameter: "X" + arguments: "equal_0.tmp_0" + } + outputs { + parameter: "Out" + } + outputs { + parameter: "Scope" + arguments: "_generated_var_0" + } + type: "conditional_block" + attrs { + name: "is_scalar_condition" + type: BOOLEAN + b: true + } + attrs { + name: "sub_block" + type: BLOCK + block_idx: 4 + } + } + ... + // Repeat the above operators for each case statements inside the select body +} + +``` + +Cases are represented by a **conditional_block operator**, whose's condition is set as the output of +equal(**case_to_execute**, **case_index**). Since each case index is unique in this sub-block, +only one case will be executed. + +### select_op flow + +

+
+

+ +The select algorithm is inspired by golang's select routine. Please refer to +http://www.tapirgames.com/blog/golang-concurrent-select-implementation for more information. + +## Backward Pass + +TODO diff --git a/doc/paddle/design/data_type/float16.md b/doc/paddle/design/data_type/float16.md new file mode 100644 index 0000000000000000000000000000000000000000..aad578f5067723ea1a0e048fb811bb806720666d --- /dev/null +++ b/doc/paddle/design/data_type/float16.md @@ -0,0 +1,183 @@ +# Design Doc: float16 + +## Why float16 +Half precision (float16) is a binary floating-point format that occupies 16 bits in memory. float16 is half the size of traditional 32-bit single precision format (float) and has lower precision and smaller range. + +When high precision computation is not required (which is usually the case at least in the deep learning inference stage), using float16 data type could potentially + +- reduce storage space, memory bandwidth, and power usages; +- increase the chance of data fitting into a smaller cache of lower latency; +- provide arithmetic speed up if supported by hardware. + +## Survey of current float16 support +A brief survey of float16 support on different compilers, hardwares, and libraries can be found below. Interested readers can refer to [link1](https://github.com/PaddlePaddle/Paddle/issues/4853) and [link2](https://github.com/Xreki/Xreki.github.io/blob/master/multi_data_types_in_dl_framework/ppt/float16_and_quantized_type.md) for more info. + +The goal of float16 is to serve as a key for the executor to find and run the correct version of compute method specialized for float16 in operator kernels. It should be compatible with various natively supported float16 implementations including `__half` for cuda, `float16_t` for ARM, and `Eigen::half` for Eigen to make writing customized float16 kernels easier. + +### Compiler +- nvcc supports `__half` data type after CUDA 7.5. +- `__fp16` or `float16_t` is supported as storage type for gcc >= 6.1 and clang >= 3.4. +- `__fp16` or `float16_t` is supported as arithmetic type for gcc >= 7.1 and clang >= 3.9. + +### Hardware +- `__half` is supported on GPU with compute capability >= 5.3. +- `__fp16` is supported as storage type for ARMv7-A, ARMv8-A, and above. +- `__fp16` is supported as arithmetic type after ARMv8.2-A (currently, the only microarchitecture implementing ARMv8.2-A is ARM Cortex-A75, which is announced in May 2017. There seems to be no application processors currently available on market that adopts this architecture. It is reported that Qualcomm Snapdragon 845 uses Cortex-A75 design and will be available in mobile devices in early 2018). + +### Libraries +- [Eigen](https://github.com/RLovelett/eigen) >= 3.3 supports float16 calculation on both GPU and CPU using the `Eigen::half` class. It is mostly useful for Nvidia GPUs because of the overloaded arithmetic operators using cuda intrinsics. It falls back to using software emulation on CPU for calculation and there is no special treatment to ARM processors. +- [ARM compute library](https://github.com/ARM-software/ComputeLibrary) >= 17.02.01 supports NEON FP16 kernels (requires ARMv8.2-A CPU). + +### CUDA version issue +There are currently three versions of CUDA that supports `__half` data type, namely, CUDA 7.5, 8.0, and 9.0. +CUDA 7.5 and 8.0 define `__half` as a simple struct that has a `uint16_t` data (see [`cuda_fp16.h`](https://github.com/ptillet/isaac/blob/9212ab5a3ddbe48f30ef373f9c1fb546804c7a8c/include/isaac/external/CUDA/cuda_fp16.h)) as follows: +``` +typedef struct __align__(2) { + unsigned short x; +} __half; + +typedef __half half; +``` +This struct does not define any overloaded arithmetic operators. So you have to directly use `__hadd` instead of `+` to correctly add two half types: +``` +__global__ void Add() { + half a, b, c; + c = __hadd(a, b); // correct + c = a + b; // compiler error: no operator "+" matches these operands +} +``` +CUDA 9.0 provides a major update to the half data type. The related code can be found in the updated [`cuda_fp16.h`](https://github.com/ptillet/isaac/blob/master/include/isaac/external/CUDA/cuda_fp16.h) and the newly added [`cuda_fp16.hpp`](https://github.com/ptillet/isaac/blob/master/include/isaac/external/CUDA/cuda_fp16.hpp). + +Essentially, CUDA 9.0 renames the original `__half` type in 7.5 and 8.0 as `__half_raw`, and defines a new `__half` class type that has constructors, conversion operators, and also provides overloaded arithmetic operators such as follows: +``` +typedef struct __CUDA_ALIGN__(2) { + unsigned short x; +} __half_raw; + + +struct __CUDA_ALIGN__(2) __half { +protected: + unsigned short __x; +public: + // constructors and conversion operators from/to + // __half_raw and other built-in data types +} + +typedef __half half; + +__device__ __forceinline__ +__half operator+(const __half &lh, const __half &rh) { + return __hadd(lh, rh); +} + +// Other overloaded operators +``` +This new design makes `c = a + b` work correctly for CUDA half data type. + +## Implementation +The float16 class holds a 16-bit `uint16_t` data internally. +``` +struct float16 { + uint16_t x; +}; +``` + +float16 supports the following features: + - constructors / assignment operators that take input from primitive data types including bool, integers of various length, float, and double. + - constructors / assignment operators that take input from `__half` on cuda, `float16_t` on ARM, and `Eigen::half` on Eigen. + - conversion operators to primitive data types and half precision data types on cuda, ARM and Eigen. + - overloaded arithmetic operators for cuda, arm, and non-arm cpu, respectively. These operators will take advantage of the cuda and ARM intrinsics on the corresponding hardware. + +To support the above features, two fundamental conversion functions are provided: +``` +float16 float_to_half_rn(float f); // convert to half precision in round-to-nearest-even mode +float half_to_float(float16 h); +``` +which provides one-to-one conversion between float32 and float16. These twos functions will do different conversion routines based on the current hardware. CUDA/ARM instrinsics will be used when the corresonding hardware is available. If the hardware or compiler level does not support float32 to float16 conversion, software emulation will be performed to do the conversion. + +## float16 inference +In Fluid, a neural network is represented as a protobuf message called [ProgramDesc](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/concepts/program.md), whose Python wrapper is a [Program](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/modules/python_api.md#program). The basic structure of a program is some nested [blocks](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/modules/python_api.md#block), where each block consists of some [variable](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/modules/python_api.md#variable) definitions and a sequence of [operators](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/modules/python_api.md#operator). An [executor](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/concepts/executor.md) will run a given program desc by executing the sequence of operators in the entrance block of the program one by one. + +### Operator level requirement +Each operator has many kernels for different data types, devices, and library types. The operator will select the appropriate kernel to run based on, among other things, the data type of the input variables. By default, every Fluid operator has a float data type kernel that takes float variables as input and generates float output. + +This means that if we provide float input to the first operator in a program, then each opeartor will use float kernel to compute float output and send it as input to the next operator to trigger the float kernel. Overall, the program will run in float mode and give us a final output of float data type. + +The same principle applies if we want a program to run in float16 mode. We provide input variable of float16 data type to the first operator, and then one by one, each operator in the program will run the float16 kernel (provided that each operator in this program has float16 kernels registered) until we finally obtain a float16 output variable. + +So the preliminary requirement for float16 inference is to add float16 kernel to operators that are needed in a specific kind of program. For example, float16 inference on an image classification neural network like Vgg or Resnet, typically requires the following operators to have float16 kernels: convolution, pooling, multiplication, addition, batch norm, dropout, relu, and softmax. Please refer to [new_op_en](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/dev/new_op_en.md) for details of how to add new kernels to an operator. + +### Variable level requirement +Operators including convolution and multiplication (used in fully-connected layers) takes as input not only the variables generated by the preceding operators but also [parameter](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/modules/python_api.md#parameter) variables, which contains the trained weights to apply to the input data. These weights are obtained in the Fluid training process and are by default of float data type. + +When these operators are running in float16 mode, the float16 kernel requires those parameter variables to contain weights of Fluid float16 data type. Thus, we need a convenient way to convert the original float weights to float16 weights. + +In Fluid, we use tensor to hold actual data for a variable on the c++ end. [Pybind](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/pybind/tensor_py.h) is used to bind c++ tensors of certain data type with numpy array of the correponding numpy data type on the Python end. Each common c++ built-in data type has a corresponding numpy data type of the same name. However, since there is no built-in float16 type in c++, we cannot directly bind numpy float16 data type with the Fluid float16 class. Since both Fluid float16 and numpy float16 use uint16 as the internal data storage type, we use c++ built-in type `uint16_t` and the corresponding numpy uint16 data type to bridge the gap via [Pybind](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/pybind/tensor_py.h). + +The following code demonstrates how to do the tensor conversion. +```Python +# var is the variable of float weights +# tensor is a numpy array of data copied from the tensor data in var +# fp16_var is the variable that will contain float16 weights converted from var +tensor = numpy.array(var.get_tensor()) +fp16_tensor = fp16_var.get_tensor() + +# After the original tensor data is converted to numpy float16 data type, +# view(numpy.uint16) is used so that the internal memory of the numpy array +# will be reinterpreted to be of uint16 data type, which is binded to +# Fluid float16 class via pybind with the help of uint16_t built-in c++ type +fp16_tensor.set(tensor.astype(numpy.float16).view(numpy.uint16), GPUPlace) +``` + +### Consistent API requirement +The basic inference in float16 mode requires users to feed input and obtain output both of float16 data type. However, in this way, the inference APIs are not consistent between float16 mode and float mode, and users may find it confusing and diffcult to use float16 inference since they need to do extra steps to provide float16 input data and convert float16 output data back to float. To have consistent API for different inference modes, we need to transpile the program desc in some way so that we can run float16 inference by feeding and fetching variables of float data type. + +This problem can be solved by introducing a type-casting operator which takes an input variable of certain data type, cast it to another specified data type, and put the casted data into the output variable. Insert cast operator where needed can make a program internally run in float16 mode. + +### float16 transpiler +Put all the above requirements in mind, we designed a float16 inference transpiler that can tranpile a float32 mode inference program desc to a float16 mode one. + +Given a float inference program and the corresponding variables of float32 weights in the [scope](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/concepts/scope.md), +this transpiler mainly does the following modifications: + +1. Insert cast operators at the beginning of the program so that the input float data will be converted to float16 data type before feeding to subsequent operators to invoke the float16 kernel. + +2. Insert cast operators at the end of the program so that the output float16 data will be converted back to float data type before users obtain the result. + +3. For each parameter variable of float weights, create in the scope a corresponding variable of float16 weights which are converted from the corresponding float weights and add this new float16 variable to the program. + +4. Update the operator information in the program so that each relevant operator use the newly created float16 variable instead of its float counterpart. + +Below is an example of usage: +```Python +# Get the float inference program +[float_inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + +# Prepare the float input data +tensor_img = numpy.random.rand(1, 3, 32, 32).astype(numpy.float32) + +# Running inference_program in float mode +float_results = exe.run(float_inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets) + +# Use float16 transpiler to speedup +float16_inference_program = float_inference_program.clone() +t = fluid.InferenceTranspiler() +t.float16_transpile(float16_inference_program, GPUPlace) + +# Running +float16_results = exe.run(float16_inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets) +``` + +As we can see from the example above, users can simply use the `float16_transpile` method provided by the infernece transpiler class on an existing float inference program to run inference in float16 mode. + +### Speedup on GPU +Currently, Fluid inference in float16 mode is only supported on Nvidia GPU device. There is no motivation to support float16 inference on non-ARM CPUs because float16 is not natively supported there and float16 calculation will only be slower than its float counterpart. + +Nvidia started to support its native float16 data type (which has the same internal memory representation as Fluid float16 class) on CUDA 7.5. Moreover, float16 speedups on common computational intensive tasks including GEMM (general matrix-matrix multiplication) and convolution are supported since cublas 7.5 and cuDNN 5.0. + +Recently, the introduction of [tensor core](https://devblogs.nvidia.com/programming-tensor-cores-cuda-9/) in volta architecture GPUs and the support of tensor core calculation in CUDA 9.0 and cuDNN 7.0 make float16 truly superior to float in certain deep learning applications. Please refer to this [benchmark report](https://github.com/kexinzhao/Paddle_benchmark/blob/master/float16_benchmark.md) for more details. diff --git a/doc/paddle/design/data_type/index_cn.rst b/doc/paddle/design/data_type/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b60167b6b1599df69dfc5073ebf32bdbb0a316ec --- /dev/null +++ b/doc/paddle/design/data_type/index_cn.rst @@ -0,0 +1,7 @@ +数据类型 +------------ + +.. toctree:: + :maxdepth: 1 + + float16.md diff --git a/doc/paddle/design/data_type/index_en.rst b/doc/paddle/design/data_type/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..6a88d17943f49134a2d00363845e919537ff4545 --- /dev/null +++ b/doc/paddle/design/data_type/index_en.rst @@ -0,0 +1,7 @@ +Data Type +------------ + +.. toctree:: + :maxdepth: 1 + + float16.md diff --git a/doc/paddle/design/dist_train/README.md b/doc/paddle/design/dist_train/README.md new file mode 100644 index 0000000000000000000000000000000000000000..fe1283e51d537b0580301eca0a4be3d4f35d9120 --- /dev/null +++ b/doc/paddle/design/dist_train/README.md @@ -0,0 +1,57 @@ +## Distributed training overview doc + +Currently Paddle Fluid use parameter server architecture to support distributed training. + +For synchronous and asynchronous training, the differences are mostly in the logic of parameter server. Now we have already support synchronous training. + +### Synchronous training + +The training process of synchronous training is: + +![synchronous distributed training](./src/sync_distributed_training.png) + +1. Pserver + 1. set `barrier_condition_` to 0 and waits for trainers to send gradient. +1. Trainer + 1. Trainer read minibatch of data, run forward-backward with local parameter copy and get the gradients for parameters. + 1. Trainer use split op to split all the gradient into blocks. The split method is determined at compile time. + 1. Trainer use send_op to send all the split gradients to corresponding parameter server. + 1. After trainer send all the gradients, it will send a `BATCH_BARRIER_MESSAGE` to all pservers. + 1. Trainer call GetVariable to pserver and wait for `barrier_condition_` on pserver to be 1. +1. Pserver + 1. Pserver will count the number of `BATCH_BARRIER_MESSAGE`. + 1. When the count of `BATCH_BARRIER_MESSAGE` is equal to the number of Trainer. Pserver thinks it received all gradient from all trainers. + 1. Pserver will run the optimization block to optimize the parameters. + 1. After optimization, pserver set `barrier_condition_` to 1. + 1. Pserver wait for `FETCH_BARRIER_MESSAGE`. +1. Trainer. + 1. The trainer uses GetVariable to get all the parameters from pserver. + 1. Trainer sends a `FETCH_BARRIER_MESSAGE` to each pserver. +1. Pserver. + 1. when the number of `FETCH_BARRIER_MESSAGE` reach the number of all trainers. Pserver think all the parameters have been got. it will go back to 1. to set `barrier_condition_` to 0. + +### Asynchronous training +In the above process. There are two barriers for all trainers to synchronize with each other. In asynchronous training, these two barriers are not needed. The trainer can just send gradients to pserver and then get parameters back. + +The training process of asynchronous training can be: + +![asynchronous distributed training](./src/async_distributed_training.png) + +1. Pserver: + 1. Each parameter has a queue to receive its gradient from trainers. + 1. Each parameter has a thread to read data from the queue and run optimize block, using the gradient to optimize the parameter. + 1. Using an independent thread to handle RPC call `GetVariable` for trainers to get parameters back.(Maybe here we should use a thread pool to speed up fetching the parameters.) + +1. Trainer: + 1. Trainer read a batch of data. Run forward and backward with local parameter copy and get the gradients for parameters. + 1. Trainer split all gradients to blocks and then send these gradient blocks to pservers(pserver will put them into the queue). + 2. Trainer gets all parameters back from pserver. + +### Note: +There are also some conditions that need to consider. For exmaple: + +1. If trainer needs to wait for the pserver to apply it's gradient and then get back the parameters back. +1. If we need a lock between parameter update and parameter fetch. +1. If one parameter must be on one server, or it can also be split and send to multiple parameter servers. + +The above architecture of asynchronous training can support different mode, we can have a detailed test in the future for these problems. diff --git a/doc/paddle/design/dist_train/async_update.md b/doc/paddle/design/dist_train/async_update.md new file mode 100644 index 0000000000000000000000000000000000000000..38cb6f8e791ee0744503d6f03e5824a412c0a6d7 --- /dev/null +++ b/doc/paddle/design/dist_train/async_update.md @@ -0,0 +1,61 @@ +# Design Doc: Asynchronous Update With Distributed Training + +## Background + +For the typical synchronous distributed training, some significant steps are as follows: + +1. A trainer process will compute the gradients and **send** them to the parameter server (PS) nodes. +1. After the PS node received gradients came from all the Trainers, It will aggregate the +gradient variables for the same parameter into one gradient variable and then apply the aggregated +gradient to the respective parameter, finally using an optimize algorithms(SGD, Monument...) +to update the parameters. +1. The Trainer would wait for the PS finished the optimize stage, and GET the parameters from PS, +so all the Trainers would get the same parameters. + +In Synchronous Distributed Training, there is a **barrier** on each PS to wait until all trainers processes +have completed running current mini-batch. After that, all trainers can continue to run the next +mini-batch. So, we can find that the overall performance of Synchronous Distributed Training depends +on the slowest node. + +In Asynchronous Distributed Training, we don't need to wait for a global mini-bach, the optimizer on +the PS will run immediately when the gradient is uploaded to the PS from one trainer. This mode would +train such models that achieve scaling, better throughput. In this design doc, we will introduce how to +implement the Asynchronous Distributed Training base on PaddlePaddle Fluid. + +## Design + + + +As the figure above, we describe a global view of the asynchronous update process and use +the parameter `w1` as an example to introduce the steps: +1. For each gradient variables, they may distribute on different GPU card and aggregate +them while they are all calculated. +1. Split the gradient variable into multiple blocks according to the number of PS +instances and then send them. +1. PS would run an `Optimize Block` using a specified optimize algorithm to update +the specified parameter. +1. The trainer will fetch the latest parameter from PS before running forward Op which depends +on the specified parameter. +1. Broadcast the received variable into multiple GPU cards and continue to run the next +mini-batch. + +### Trainer + +- For the multiple devices distributed training, we need to aggregate the gradient +variables which placed on different devices firstly and then schedule a `SendVars` Operator to +send the gradient variables to the multiple PS instances. +- Schedule `FetchVars` operator to fetch the latest parameter from PS before running +the forward ops. +- There could be a large number of gradient variables to be sent, so we need to use another +thread pool(IO Threadpool) whose a number of the schedulable threads is larger than the +computing thread pool to avoid competitive the thread resources with computing. + +### Parameter Server + + + +- There should be multiple trainer instances want to optimize the same parameter at +the same time, to avoid the racing, we need one `BlockingQueue` for each gradient +variable to process them one by one. +- We need a `Map` structure to map a gradient variable name to the `OptimizeBlock` which +can optimize the respective parameter. diff --git a/doc/paddle/design/dist_train/dist_train_nccl2.md b/doc/paddle/design/dist_train/dist_train_nccl2.md new file mode 100644 index 0000000000000000000000000000000000000000..b8b8427811cddcddf872db5badfd37c96a76c3e3 --- /dev/null +++ b/doc/paddle/design/dist_train/dist_train_nccl2.md @@ -0,0 +1,35 @@ +# Distributed Training with NCCL2 + +We design a pattern that can enable training with `ParallelExecutor` and +use [NCCL2](https://developer.nvidia.com/nccl) as it's collective +communication library. + +In `ParallelExecutor` we can use `AllReduce` or `Reduce` and `Broadcast` +to do multi GPU training. And if we initialize NCCL2 communicators as +ranks in a distributed environment, we can simply run the `ParallelExecutor` +as a distributed program! The only thing that may be different than in +the single node version is that we need to broadcast the NCCL unique ID +to all the nodes and initialize communicators using that ID, so NCCL2 +can know each other as ranks. + +To achieve this feature, we introduce a new operator: `gen_nccl_id` op, +so we are ***not*** "bind to" running NCCL2 with MPI, we can run it in +whatever platform you like. + +It has two running modes: + +1. Generate and broadcast mode, which should be used on trainer 0; +1. Listen and fetch mode, which should be used on trainers other than 0. + +In both two modes, this op can save the NCCL ID into current scope as a +persistable variable, Then we can insert this op at the end of +"startup program" of fluid, so that all workers can get the same ID to +initialize NCCL communicator objects. + + + +The above figure indicates the general process when training with NCCL2 +distributed. Each trainer has the number of communicators equal to the +number of GPUs, but the ranks should match the global ranks number: here +we have total 8 GPUs, so `nranks==8`, for each trainer, the ranks should +be from 0 ~ 3 on trainer 0 and 4 ~ 7 on trainer 1. diff --git a/doc/paddle/design/dist_train/distributed_architecture.md b/doc/paddle/design/dist_train/distributed_architecture.md new file mode 100644 index 0000000000000000000000000000000000000000..c89b79e72cd16067cdf7f166bd45410b4d359283 --- /dev/null +++ b/doc/paddle/design/dist_train/distributed_architecture.md @@ -0,0 +1,197 @@ +# Design Doc: Fluid Distributed Training Architecture + +## Abstract + +PaddlePaddle version 0.10.0 uses the "trainer-parameter server" architecture. We run multiple instances of trainers (where each trainer runs the same model) and parameter servers for distributed training. This architecture serves well, but has few limitations: + +1. There is a need to write special code that handles tasks which should only be run on a single trainer. E.g., initializing the model, saving the model etc. + +2. Model parallelism is hard: It would need all the if-else branches conditioned on the trainer ID to partition the model onto the trainers, and eventually manually writing out the inter-model-shard communication code to communicate between different trainers. + +3. The user can not directly specify the parameter update rule: This would need to modify the parameter server code and compile a new binary. This makes things more complicated for researchers: A lot of extra effort is required to make this work. Besides, the training job submission program may not allow running arbitrary binaries. + +This design doc discusses PaddlePaddle's new distributed training architecture that addresses the above mentioned limitations. + +## Analysis + +The assumption is that the user writes the trainer program in either Python or C++. + +### Limitation 1 + +There are two basic functionalities in the trainer program: + +1. The training logic such as loading / saving the model and printing out the logs. +2. The neural network definition such as the definition of the data layer, the fully connected layer, the cost function and the + optimizer. + +When we train using PaddlePaddle v0.10.0 in a distributed fashion, multiple instances of the same Python code are run on different nodes, hence both: the +training logic as well as the neural network computation logic, is replicated. + +The tasks that only need to be run once belong to the training logic. Hence if we only replicate the neural network computation part, and do **not** +replicate the training logic, the limitation mentioned above can be avoided. + +### Limitation 2 + +Model parallelism means that a single model is partitioned into different components and each node runs one of the component separately. This comes at the extra cost of managing the +inter-model-shard communication between nodes. + +PaddlePaddle should ideally be able to modify the neural network computation and figure out the support for model parallelism automatically. However, the +computation is only specified in Python code which sits outside of PaddlePaddle, hence PaddlePaddle can not support the feature in this setup. + +Similar to how a compiler uses an intermediate representation (IR) so that the programmer does not need to manually optimize their code for most of the cases, we can have an intermediate representation in PaddlePaddle as well. The compiler optimizes the IR as follows: + + + +PaddlePaddle can support model parallelism by converting the IR so that the user no longer needs to manually perform the computation and operations in the Python component: + + + +The IR for PaddlePaddle after refactoring is called a `Block`, it specifies the computation dependency graph and the variables used in the computation. + +### Limitation 3 + +The user can not directly specify the parameter update rule for the parameter server in the Python module, since the parameter server does not use the same computation definition as the trainer. Instead, the update rule is baked inside the parameter server. The user can not specify the update rule explicitly. + +This could be fixed by making the parameter server also run an IR, which can be different to the trainer side +For a detailed explanation, refer to this document - +[Design Doc: Parameter Server](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/dist_train/parameter_server.md) + +## Distributed Training Architecture + +The revamped distributed training architecture can address the above discussed limitations. Below is the illustration of how it does so: + + + +The major components are: *Python API*, *Distribute Transpiler* and *Remote Executor*. + +### Python API + +Python API is the Python library that user's Python code invokes, to read the data, build the neural network topology, and start training, etc. + +```Python +images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype='float32') +label = fluid.layers.data(name='label', shape=[1], dtype='int64') +... +predict = fluid.layers.fc(input=conv_pool_2, size=10, act="softmax") +cost = fluid.layers.cross_entropy(input=predict, label=label) +avg_cost = fluid.layers.mean(x=cost) +optimizer = fluid.optimizer.Adam(learning_rate=0.01) +optimizer.minimize(avg_cost) + +train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.mnist.train(), buf_size=500), + batch_size=BATCH_SIZE) + +place = fluid.CPUPlace() +exe = fluid.Executor(place) + +for pass_id in range(10): + for data in train_reader(): + loss, acc = exe.run(trainer_prog, + feed=feeder.feed(data), + fetch_list=[avg_cost]) +``` + +The code above is a typical local training program, the "Training Program" is built using helper functions such as +`fluid.layer.fc`. The training is done by calling `Executor.run` +iteratively. + +For more details, the implementation of IR is [Program](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/concepts/program.md), and `ProgramDesc` is the protobuf type. + +[Executor](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/concepts/executor.md) simply runs the `ProgramDesc`. For local training you generally use +`Executor` to run the program locally. For any kind of distributed training, you can use +`RemoteExecutor` to specify desired distributed training method with some optional arguments. + +### Distributed Transpiler + +The Distributed Transpiler automatically converts the IR (in protobuf format) to partitioned IRs. Then +the Remote Executor dispatches the new IRs to Remote Executors across the cluster. +Below are the steps that are followed : + +1. User only need to change `Executor` to `RemoteExecutor` to change local program to distributed program. +1. `RemoteExecutor` calls `Distributed Transpiler` to "transpile" user's program to several IRs representing a + distributed training program: + 1. Parse configurations from `RemoteExecutor`. + 1. Determine the type of distributed program, can be DataParallelism, ModelParallelism or Streaming. + 1. Partition the `ProgramDesc` according to type and add `send` / `recv` OP pair on the boundaries. Take + DataParallelism type for example, it removes the optimization operators and add a `send` OP to the + "trainer" role, then add the optimization operators to the parameter server role within the `recv` OP. +1. Dispatch the partitioned graph to different `RemoteExecutor` in the cluster. +1. `RemoteExecutor` on each node run the received `ProgramDesc` utill the end. + + +### RemoteExecutor + +As shown in the graph, `RemoteExecutor.run` sends the IR to the cluster for Execution. +You can also use parameter `fetch_list` to interactively fetch variable back to local for +log printing. + +The Python `RemoteExecutor` is derived from `Executor` class. + +```python +exe = RemoteExecutor( + feed=feeder.feed(data), + fetch_list=[avg_cost], + job_desc=JobDesc( + jobname, + num_trainer, + num_pserver, + cpu_per_trainer, + gpu_per_trainer, + mem_per_trainer, + cpu_per_pserver, + mem_per_pserver + )) +for data in train_reader(): + loss, acc = exe.run(trainer_prog, + feed=feeder.feed(data), + fetch_list=[avg_cost]) +``` + +`JobDesc` object describe the distributed job resource specification to run on +Cluster environment. + + + +`RemoteExecutor.run` sends the `ProgramDesc` and +[TrainingJob](https://github.com/PaddlePaddle/cloud/blob/unreleased-tpr/doc/autoscale/README.md#training-job-resource) +to a server in the cluster which executes `RemoteExecutor.listen`. This server is responsible +to start the final Kubernetes Jobs to run the different role of `ProgramDesc` from `ConfigMap`. + + +### Placement Algorithm + +Our first implementation will only support "trainer-parameter server" placement: the parameters, initializers, and optimizers are all placed on the PaddlePaddle runtimes with the parameter server role. Everything else will be placed on the PaddlePaddle runtimes with the trainer role. This has the same functionality as the "trainer-parameter server" architecture of PaddlePaddle v0.10.0, but is more generic and flexible. + +In the future, a more general placement algorithm should be implemented, which makes placements according to the input IR, and a model of device computation time and device communication time. Model parallelism requires the generic placement algorithm. + + +### Local Training Architecture + +The local training architecture will be the same as the distributed training architecture, the difference is that everything runs locally, and there is just one PaddlePaddle runtime: + + + + +### Training Data + +In PaddlePaddle v0.10.0, training data is typically read +with `data reader` from Python. This approach is +no longer efficient when training distributedly since the Python +process no longer runs on the same node with the trainer processes, +the Python reader will need to read from the distributed filesystem +(assuming it has the access) and send to the trainers, doubling the +network traffic. + +When doing distributed training, the user can still use Python data +reader: the training data are sent with `Executor.run`. However, should +be used for debugging purpose only. The users are encouraged to use +the read data OPs. + + +## References: + +[1] [TensorFlow: Large-Scale Machine Learning on Heterogeneous Distributed Systems](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/45166.pdf) + +[2] [TensorFlow: A System for Large-Scale Machine Learning](https://www.usenix.org/system/files/conference/osdi16/osdi16-abadi.pdf) diff --git a/doc/paddle/design/dist_train/distributed_lookup_table_design.md b/doc/paddle/design/dist_train/distributed_lookup_table_design.md new file mode 100644 index 0000000000000000000000000000000000000000..b2b3396ed547ea7bc1bc5e754c89e03ebc943814 --- /dev/null +++ b/doc/paddle/design/dist_train/distributed_lookup_table_design.md @@ -0,0 +1,89 @@ +# Design Doc: Distributed Lookup Table Operator + +A distribute lookup table operator in PaddlePaddle where the table could be out +of the memory of a computer. + +## Background + +A lookup table operator is well-used in deep learning for learning the +representation, or the +[*embedding*](http://www.cs.toronto.edu/~fritz/absps/ieee-lre.pdf), of +symbols. + +### The Forward Algorithm + +The forward algorithm of the lookup table is a multiplication of the +input vector x and the lookup table matrix W: + +$$y = x * W$$ + +When x is a sparse vector of symbols, the above multiplication +simplifies into looking up rows in W that correspond to symbols in x, +denoted by W(x). Please be aware that W could be huge and out of the +memory, so we'd need a distributed storage service, which supports the +lookup of rows. + +The following figure illustrates the multiplication of x with two +non-zero elements, or say two symbols, and a lookup table W: + +![lookup table](./src/lookup_table.png) + +### The Backward Algorithm + +The backward algorithm computes W'(x) using W(x). W'(x) has the same +the scale of size as W(x) and is much smaller than W. + +To optimize W given W', we can do simple SGD update: + +$$W = f(W') = \lambda * W'$$ + +or some more sophisticated algorithms that rely on both W' and W: + +$$W = f(W, W')$$ + +The following figure illustrates the backward pass of the lookup +operator: ![lookup table training](./src/lookup_table_training.png) + +## Distributed Lookup Table +### Problem 1: The lookup table may be very large. + + In the condition like the search engine and recommendation system, the number of feature Id may be very large, say 100,000,000,000, then for a float value lookup table of size 8, the total size of the table is: + + ``` + 100,000,000,000 * 8 * 4(Bytes) = 2980.23 GB + ``` + +### Solution: Distributed storage + +1. Paddle use [SelectedRows](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/modules/selected_rows.md) as the storage format for the lookup table, the lookup table parameter will be split to multi-machine according to the hash of the feature ID, and data will also be split and send to the same machine to prefetch the parameter. + +1. For common parameters, the trainer will get the whole parameter for training, but for the big lookup table, the trainer can not store the whole parameter. Because the input data feature is very sparse, every time we only need a few parameters for training, so we use `prefetch_op` to only prefetch the parameter needed to trainer. + +### Problem 2. The Id in the lookup table is not sure before training. + + The feature Id is calculated by the hash function because the feature data source is so large, we can not get all the Id before training. So we can not initialize the table before training. + +### Solution: Id auto growth + +At the beginning of training, paddle only malloc the memory for the lookup table at parameter server side, the Id and it's value will not be initialized. During training, when a parameter server received an Id, if it is already in the lookup table, it will return the existing parameter, if the Id does not exist, paddle will add it into the lookup table and initialize the value for it. + +### Problem 3: parameter load and save + +For common parameters, paddle use trainer to save and load them. But for distributed lookup table, trainer cannot do this because it's large size. + +### Solution: Parameter server side save and load + +Paddle support parameter server side save and load for distribute lookup table. Each machine of parameter servers will only save and load part of the whole table. + +## Architecture +The whole architecture of the distribute lookup table is as below: + +### Training steps: +1. Read a batch of data, the data is feature ids. +1. The input ids will be split by `split_ids_op` with the same hash function of the lookup table. +1. The `prefetch_op` use the split result to prefetch parameters back from the lookup table. +1. Run forward-backward to get the gradient of the lookup table. +1. `split_ids_op` split the gradient and then use `send_op` to the parameter server. +1. parameter server update the table with the received gradient. + +![distribute lookup table](./src/distributed_lookup_table.jpeg) diff --git a/doc/paddle/design/dist_train/distributed_traing_review.md b/doc/paddle/design/dist_train/distributed_traing_review.md new file mode 100644 index 0000000000000000000000000000000000000000..c09b7c99159ace9b3df989f803ede20bc3585d92 --- /dev/null +++ b/doc/paddle/design/dist_train/distributed_traing_review.md @@ -0,0 +1,44 @@ +# Parallelism, Asynchronous, Synchronous, Codistillation + + +For valuable models, it’s worth using more hardware resources to reduce the training time and improve the final model quality. This doc discuss various solutions, their empirical results and some latest researches. + +# Model Parallelism +In some situations, larger and more complex models can improve the model quality. Sometimes, such models cannot fit in one device. Sometimes, parts of the model can be executed in parallel to improve speed. Model Parallelism address the issues by partitioning a single model and place the shards on several devices for execution. + +A common way of model parallelism is partition the logic of “gradient application” to parameter servers, while leaving the forward and backward computation at training servers. + +More flexible model parallelism is challenging. For example, multi-level-single-direction LSTM can be partitioned by layers, while such solution is not helpful for bi-directional LSTM. Different models can have quite different ways of partitioning and the benefits also depend on the underlying hardware. Framework needs to provide flexible APIs for user to define the customized partition scheme. For example, in TensorFlow, user can use tf.device() to specify the device placement. In MxNet, mx.AttrScope(ctx_group='dev1') does similar things. Recent research proposes to automatically find the optimal partition scheme with Reinforcement Learning, which is essentially solution space search algorithm that could cost a lot of extra hardware sources. + +# Data Parallelism +Data Parallelism runs the same model on multiple devices, each taking in a partition of the input batch. It’s more commonly used for a few reasons. It generally applies to common SGD mini-batch training. Compared with model parallelism, which requires users to carefully partition their model and tune for good performance, data parallelism usually involves no more than calling an extra API and speed up is more predictable. + +# Asynchronous Training +In asynchronous training, it usually involves a set of trainers and a set of parameter servers. The parameter servers collectively hold a single copy of shared parameters. While the trainers each holds a unique copy of model and trains the model independently. Each trainer pulls parameters from parameter servers and sends gradients to the parameter servers independently. Similarly the parameter servers applies the gradients to parameters as soon as the gradients are received and sends parameters whenever they are requested. + +In theory, asynchronous training is not safe and unstable. Each trainer is very likely using stale copy of parameters and parameters are also likely to apply stale gradients. However, in practice, especially for large-scale nonconvex optimization, it is effective [1]. Compared with synchronous solution, which will be discussed later, asynchronous distributed training is easier to implement and scales to a few dozen workers without losing much performance due to network communication or other overhead. Besides, asynchronous training can make progress even in case of random trainer failure in the cluster. + +Many production models, such as [3], are trained with distributed asynchronous solutions due to its scalability and effectiveness in practice. However, asynchronous training has its limitations. Usually, it’s not as stable as synchronous training. A warm-up phase is sometimes needed. Learning rate is usually smaller compared with synchronous training and decay is also often needed. Normally, asynchronous training doesn’t scale beyond 100 trainers. In other words, when putting more trainers beyond that, the model cannot converge faster. + +# Synchronous Training +Unlike asynchronous training, synchronous training requires step barriers. Parameter servers needs to wait for gradients from all trainers before they are applied to parameters and trainers will always pull the latest parameters. + +An obvious advantage of synchronous training is that the behavior is more clearly defined. Usually, it's more stable than asynchronous training. Learning rate can be set larger and for some vision tasks, the final accuracy can be slightly higher. (In my practical experience, for some models, it can actually be worse). + +Synchronous training usually faces scalability and performance issues, if not carefully implemented or deployed. In [2], native synchronous training can be 20%~40% slower than asynchronous training. A common trick to avoid slowness, discussed in [1] and [2], is to have backups. N+M replicas are scheduled while only the first N is needed for the training step the proceed. + +Similar to asynchronous training, the benefit of synchronous training diminishes quickly. Depending on the models, increasing the number of trainers (effectively batch size) beyond a point won’t delivers faster converge time or better final model quality. + +# Codistillation +Codistillation is a technique that tries to scale the training further. A few training instance (each training instance can be distributed) are performed during the same period. Each training instance has extra losses that comes from the prediction of other training instances. (likey teacher and student) The training process converges faster and usually converge to a better model quality. [4] + + +# Reference + +[1] Jeffrey Dean, Greg Corrado, Rajat Monga, Kai Chen, Matthieu Devin, Mark Mao, Andrew Senior, Paul Tucker, Ke Yang, Quoc V Le, et al. Large scale distributed deep networks. + +[2] Jianmin Chen, Rajat Monga, Samy Bengio, and Rafal Jozefowicz. Revisiting distributed synchronous SGD. + +[3] Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, et al. Google’s neural machine translation system: Bridging the gap between human and machine translation. + +[4] LARGE SCALE DISTRIBUTED NEURAL NETWORK TRAINING THROUGH ONLINE DISTILLATION diff --git a/doc/paddle/design/dist_train/index.html b/doc/paddle/design/dist_train/index.html new file mode 100644 index 0000000000000000000000000000000000000000..bc9fff28bab3b618d59bca40bd63e766e92b07bc --- /dev/null +++ b/doc/paddle/design/dist_train/index.html @@ -0,0 +1,121 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/design/dist_train/index_cn.rst b/doc/paddle/design/dist_train/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ed6f3dda271d2de58d92aa7ec804fa9e68dfc48a --- /dev/null +++ b/doc/paddle/design/dist_train/index_cn.rst @@ -0,0 +1,9 @@ +分布式训练 +------------ + +.. toctree:: + :maxdepth: 1 + + distributed_architecture.md + distributed_lookup_table_design.md + parameter_server.md diff --git a/doc/paddle/design/dist_train/index_en.rst b/doc/paddle/design/dist_train/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..f84688f168021113bd933802709bcd787b474bca --- /dev/null +++ b/doc/paddle/design/dist_train/index_en.rst @@ -0,0 +1,9 @@ +Distributed Training +--------------------- + +.. toctree:: + :maxdepth: 1 + + distributed_architecture.md + distributed_lookup_table_design.md + parameter_server.md diff --git a/doc/paddle/design/dist_train/mpi_enabled_design.md b/doc/paddle/design/dist_train/mpi_enabled_design.md new file mode 100644 index 0000000000000000000000000000000000000000..4ad3afc7b7522c60460c6f1f387f9415d3738778 --- /dev/null +++ b/doc/paddle/design/dist_train/mpi_enabled_design.md @@ -0,0 +1,46 @@ +# MPI-enabled PaddlePaddle Design doc + +# Background +When we do distribute multi GPU training, the communication overhead between servers become the major bottleneck, because of the following reasons: +1. Must copy at least once from GPU to CPU memory so that the data can be ready to transfer. And for the pserver side, copy data from CPU to GPU introduce more overhead. +2. GPU->CPU data transfer is 10 times slower than data transfer between GPUs or between PCIe devices. +3. TCP connections can not make full use of RDMA 100Gb devices. + +We will use OpenMPI API to PaddlePaddle, which can bring two benefits to PaddlePaddle: +1. Enable RDMA with PaddlePaddle, which bring high-performance low latency networks. +2. Enable GPUDriect with PaddlePaddle, which bring the highest throughput and lowest latency GPU read and write. + +# Change list +* Compile args: Need add compile args to enable MPI support. +* Execute args: Need add execute args to assign when and how to use MPI operations. +* New ops: Need new op ```mpi_send_op``` and ```mpi_listenandserve_op``` to support MPI send and receive. +* Transpiler optimized: Which can add ```mpi_send_op``` and ```mpi_listenandserve_op``` to the running graph. +* MPI utils package: Need MPI utils package as the low-level API supported. + +## Compile args +Because MPI or CUDA need hardware supported, so we will add compile args to enable MPI support and control compiling.Add ```WITH_MPI``` compile args to control MPI to use or not. If the ```WITH_MPI``` is ```ON```, compile system will find openMPI codes in configuration. We should prepare openMPI environment before compiling. + +## Execute args +Launch the script using the ```mpirun``` launcher, For example: ```mpirun -np 3 -hosts node1,node2,node3 python train.py```. By doing this, We can number the actors (trainer/pserver/master) with o .. (n-1). The node's number is the Rank of the calling process in a group of comm (integer), The MPI processes identify each other using a Rank ID. We have to create a mapping between PaddlePaddle's nodes and their Rank ID so that we can communicate with the correct destinations when using MPI operations. + +## New ops +We won't replace all the gRPC requests to MPI requests, the standard gRPC library is used for all administrative operations and the MPI API will be used to transfer tensor or selectRows to Pservers. The base of this idea, we create two new operators to handle requests and receives, the two operators are ```mpi_send_op``` and ```mpi_listenandserve_op```. They are a little similar to [send_op](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/send_op.cc) and [listen_and_serv_op](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/listen_and_serv_op.cc), also, We will build a new module to package MPI send and receive process. + +### mpi_send_op +Very similar with ```send_op```, we will replace gRPC code which used to send gradient with ```mpi_module```, at the same time, we will wrap it with ```framework::Async```. + +### mpi_listenandserve_op +Very similar with ```listen_and_serv_op```, we will replace gRPC code which used to receive gradient with ```mpi_module```, at the same time, we will wrap it with ```framework::Async```. + +## Transpiler optimized +**We can get env ```OMPI_COMM_WORLD_SIZE``` and ```OMPI_COMM_WORLD_RANK``` to distinguish use MPI or not, If we use openMPI, the variable in env must exist.** + if confirm to use MPI, we will modify ```send_op``` to ```mpi_send_op``` in distribute_transpiler, and modify ```listenandserve_op``` to ```mpi_listenandserve_op``` also. + +## MPI utils package +In this package, We will write openMPI low-level API to use MPI. +The API included in this package are: +* MPI send and receive module, We will build a new module to package MPI send and receive process. MPI send and receive are different to gRPC, the MPI [recvice](https://www.open-mpi.org/doc/v1.8/man3/MPI_Irecv.3.php) must know receive buffer size and receive buffer element. For this reason, We have to make communications twice, the first one is to send metadata about gradient through gRPC, the second one is the real communication through MPI which send gradient data to mpi_listenandserve_op. +The detailed flow is below: +![](https://github.com/seiriosPlus/Paddle/blob/mpi_enabled/doc/fluid/design/dist_train/src/mpi_module.png) +* MPI global configurations, which store the Rank ID and the mapping in global variables, for example: +gRPC client : MPI nodes :``` 127.0.0.1:32004 : 3 ``` diff --git a/doc/paddle/design/dist_train/multi_cpu.md b/doc/paddle/design/dist_train/multi_cpu.md new file mode 100644 index 0000000000000000000000000000000000000000..38222d083084ebfca3099ce96b47868c42d55101 --- /dev/null +++ b/doc/paddle/design/dist_train/multi_cpu.md @@ -0,0 +1,43 @@ +# Design Doc: Execute the Program with Multi CPU + +## Abstract + +This Design Doc propose an approach to make the user-defined Op graph +running with multi-CPU, we will use an auto transpiler to convert the user-defined +Op graph to a multi-CPU Op graph, and run `ParallelDo` Op to run the graph. + +## Transpiler + + + +After converted: + + + +## Implement + +- `Multi-CPU Transpiler` will convert the graph to a multi-CPU graph + which would be executed with multi-threads. +- `BlockingCounter` will `Init/Decrement` an atomic counter, and Blocking `Wait` + for the atomic counter become `0`: + ```cpp + BlockingCounter bc(thread_count); + for (int i = 0; i < thread_count; ++i) { + thread_pool->Start([&bc] {bc.DecrementCount(); }) + } + bc.Wait(); + ``` +- `ParallelDo` Operator + - Initialize a thread pool which is a Singleton. + - Use a block id as the input, and create run the specify Block on independent scope + with multi-threads. + - Initialize a `BlockingCounter` instance and wait until all threads are done. +- `Split` Operator will split the Input Tensor into a TensorArray. +- `Merge` merge all the gradients which calculated in different threads + with `mean/sum/max/min...` method, and then run the Optimizer Op to optimize `W`. + +## TODO + +- Improve the optimizer stage with multi-threads, since we could + assign the parameters to the different threads and execute + optimizer with multi-threads. diff --git a/doc/paddle/design/dist_train/parameter_server.md b/doc/paddle/design/dist_train/parameter_server.md new file mode 100644 index 0000000000000000000000000000000000000000..148791b63d435a0b1a953c4fefa25aed579e0484 --- /dev/null +++ b/doc/paddle/design/dist_train/parameter_server.md @@ -0,0 +1,106 @@ +# Design Doc: Parameter Server + +## Abstract + +We propose an approach to implement the parameter server. In this +approach, there is no fundamental difference between the trainer and +the parameter server: they both run subgraphs, but subgraphs of +different purposes. + +## Background + +The previous implementations of the parameter server do not run a +fluid sub-program. Parameter initialization, optimizer computation, network +communication and checkpointing are implemented twice on both the +trainer as well as the parameter server. + +It would be great if we can write code once and use them on both: the +trainer and the parameter server, since this reduces code duplication and +improves extensibility. Given that after the current refactoring, we are +representing everything as a computation graph on the +trainer. Representing everything as a computation graph on the parameter +server becomes a natural extension. + +## Design + +### Distributed Transpiler + +The *Distributed Transpiler* converts the user-defined fluid program +into sub-programs to be scheduled on different nodes with the following +steps: + +1. OP placement: the OPs will be placed on different nodes according + to a heuristic that minimizes the estimated total computation + time. Currently we will use a simple heuristic that puts parameter + variable on parameter server workers and everything else on trainer + workers. +1. Add communication OPs to enable the communication between nodes. + +We will need these OPs: *Send*, *Recv*, *Enqueue*, *Dequeue*. + +Below is an example of converting the user defined graph to the +subgraphs for the trainer and the parameter server: + + + +After converting: + + + +1. The parameter variable W and its optimizer program are placed on the parameter server. +1. Operators are added to the program. + - *Send* sends data to the connected *Recv* operator. The + scheduler on the receive node will only schedule *Recv* operator + to run when the *Send* operator has ran (the *Send* OP will mark + the *Recv* OP runnable automatically). + - *Enqueue* enqueues the input variable, it can block until space + become available in the queue. + - *Dequeue* outputs configurable numbers of tensors from the + queue. It will block until the queue has the required number of + tensors. + +### Sparse Update + +For embedding layers, the gradient may have many rows containing only 0 when training, +if the gradient uses a dense tensor to do parameter optimization, +it could spend unnecessary memory, slow down the calculations and waste +the bandwidth while doing distributed training. +In Fluid, we introduce [SelectedRows](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/modules/selected_rows.md) to represent a list of rows containing +non-zero gradient data. So when we do parameter optimization both locally and remotely, +we only need to send those non-zero rows to the optimizer operators: + + +### Benefits + +- Model parallelism becomes easier to implement: it is an extension to + the trainer - parameter server approach. We can have several "Transpilers" + to achieve different goals. +- User-defined optimizer is easier to add - user can now express it as + a sub-program. +- No more duplication logic inside the trainer and the parameter + server mentioned in the background section. + +### Challenges + +- It is important to balance the parameter shards on multiple + parameter servers. If a single parameter is very big (for example: some + word-embedding, fully connected, softmax layer), we need to + automatically partition the single parameter onto different + parameter servers when possible (only element-wise optimizer depends + on the parameter variable). +- In the "Async SGD" figure, the "W" variable on the parameter server + could be read and written concurrently. See + [here](https://github.com/PaddlePaddle/Paddle/pull/6394) for more + details about concurrent program in Fluid. + +### Discussion + +- Can the Enqueue OP be implemented under our current tensor design + (put the input tensor into the queue tensor)? +- *Dequeue* OP will have variable numbers of output (depending on the + `min_count` attribute), does our current design support it? (similar + question for the *Add* OP) + +### References + +[1] [TensorFlow: Large-Scale Machine Learning on Heterogeneous Distributed Systems](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/45166.pdf) diff --git a/doc/paddle/design/dist_train/src/async_distributed_training.png b/doc/paddle/design/dist_train/src/async_distributed_training.png new file mode 100644 index 0000000000000000000000000000000000000000..3b53ab59c0cd7b44b2956f16f1adc47fe85909d3 Binary files /dev/null and b/doc/paddle/design/dist_train/src/async_distributed_training.png differ diff --git a/doc/paddle/design/dist_train/src/async_pserver.graffle b/doc/paddle/design/dist_train/src/async_pserver.graffle new file mode 100644 index 0000000000000000000000000000000000000000..d2301611774fcb3866473e3e6470568d1e1312cf Binary files /dev/null and b/doc/paddle/design/dist_train/src/async_pserver.graffle differ diff --git a/doc/paddle/design/dist_train/src/async_pserver.png b/doc/paddle/design/dist_train/src/async_pserver.png new file mode 100644 index 0000000000000000000000000000000000000000..7d900b0c0eb291c67537b9cf93227c671bafdc73 Binary files /dev/null and b/doc/paddle/design/dist_train/src/async_pserver.png differ diff --git a/doc/paddle/design/dist_train/src/async_update.graffle b/doc/paddle/design/dist_train/src/async_update.graffle new file mode 100644 index 0000000000000000000000000000000000000000..3a631888688a0d564a873fcb16d943958c91223e Binary files /dev/null and b/doc/paddle/design/dist_train/src/async_update.graffle differ diff --git a/doc/paddle/design/dist_train/src/async_update.png b/doc/paddle/design/dist_train/src/async_update.png new file mode 100644 index 0000000000000000000000000000000000000000..3e8db973f45d6d9ac8dcce1dc7878067e79e6dcc Binary files /dev/null and b/doc/paddle/design/dist_train/src/async_update.png differ diff --git a/doc/paddle/design/dist_train/src/compiler.graffle b/doc/paddle/design/dist_train/src/compiler.graffle new file mode 100644 index 0000000000000000000000000000000000000000..8cc678fea3c820103e7ce81f7a5d625d6c1d92de Binary files /dev/null and b/doc/paddle/design/dist_train/src/compiler.graffle differ diff --git a/doc/paddle/design/dist_train/src/compiler.png b/doc/paddle/design/dist_train/src/compiler.png new file mode 100644 index 0000000000000000000000000000000000000000..65d34f841afce9756def07dd8ecb9ca44e658bfe Binary files /dev/null and b/doc/paddle/design/dist_train/src/compiler.png differ diff --git a/doc/paddle/design/dist_train/src/dist-graph.graffle b/doc/paddle/design/dist_train/src/dist-graph.graffle new file mode 100644 index 0000000000000000000000000000000000000000..941399c6ced8d5f65b6c595522b770c88259df4b Binary files /dev/null and b/doc/paddle/design/dist_train/src/dist-graph.graffle differ diff --git a/doc/paddle/design/dist_train/src/dist-graph.png b/doc/paddle/design/dist_train/src/dist-graph.png new file mode 100644 index 0000000000000000000000000000000000000000..3546b09f1c2ee3e4f60f519d5e47f823f08051a7 Binary files /dev/null and b/doc/paddle/design/dist_train/src/dist-graph.png differ diff --git a/doc/paddle/design/dist_train/src/distributed_architecture.graffle b/doc/paddle/design/dist_train/src/distributed_architecture.graffle new file mode 100644 index 0000000000000000000000000000000000000000..d1b60141342232e06227c2d430ebc60ec349a907 Binary files /dev/null and b/doc/paddle/design/dist_train/src/distributed_architecture.graffle differ diff --git a/doc/paddle/design/dist_train/src/distributed_architecture.png b/doc/paddle/design/dist_train/src/distributed_architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..29c7b0c0783f97c6d33b1db1ed484d6a2b9dd356 Binary files /dev/null and b/doc/paddle/design/dist_train/src/distributed_architecture.png differ diff --git a/doc/paddle/design/dist_train/src/distributed_lookup_table.graffle b/doc/paddle/design/dist_train/src/distributed_lookup_table.graffle new file mode 100644 index 0000000000000000000000000000000000000000..65dfdbbacd219739db6ddfdf243cc16c3c4e8d1e Binary files /dev/null and b/doc/paddle/design/dist_train/src/distributed_lookup_table.graffle differ diff --git a/doc/paddle/design/dist_train/src/distributed_lookup_table.jpeg b/doc/paddle/design/dist_train/src/distributed_lookup_table.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..5353a16fd329f62ff893d32706b9c3c0bcc46a07 Binary files /dev/null and b/doc/paddle/design/dist_train/src/distributed_lookup_table.jpeg differ diff --git a/doc/paddle/design/dist_train/src/distributed_training.graffle b/doc/paddle/design/dist_train/src/distributed_training.graffle new file mode 100644 index 0000000000000000000000000000000000000000..1168801bc1fadfce310a74cb3110695bd1629f6b Binary files /dev/null and b/doc/paddle/design/dist_train/src/distributed_training.graffle differ diff --git a/doc/paddle/design/dist_train/src/fluid_lookup_remote_table.graffle b/doc/paddle/design/dist_train/src/fluid_lookup_remote_table.graffle new file mode 100644 index 0000000000000000000000000000000000000000..96ca6d48f43bd9f49c6861dab006e2037873db87 Binary files /dev/null and b/doc/paddle/design/dist_train/src/fluid_lookup_remote_table.graffle differ diff --git a/doc/paddle/design/dist_train/src/fluid_lookup_remote_table.png b/doc/paddle/design/dist_train/src/fluid_lookup_remote_table.png new file mode 100644 index 0000000000000000000000000000000000000000..afa25ab3b4e427bc595a855b12ab966478e01ed0 Binary files /dev/null and b/doc/paddle/design/dist_train/src/fluid_lookup_remote_table.png differ diff --git a/doc/paddle/design/dist_train/src/local-graph.graffle b/doc/paddle/design/dist_train/src/local-graph.graffle new file mode 100644 index 0000000000000000000000000000000000000000..19e509bd9af3c1e9a3f5e0f16ddd281457a339c5 Binary files /dev/null and b/doc/paddle/design/dist_train/src/local-graph.graffle differ diff --git a/doc/paddle/design/dist_train/src/local-graph.png b/doc/paddle/design/dist_train/src/local-graph.png new file mode 100644 index 0000000000000000000000000000000000000000..ada51200f793a9bb18911e7d63cfdb3244b967d7 Binary files /dev/null and b/doc/paddle/design/dist_train/src/local-graph.png differ diff --git a/doc/paddle/design/dist_train/src/local_architecture.graffle b/doc/paddle/design/dist_train/src/local_architecture.graffle new file mode 100644 index 0000000000000000000000000000000000000000..49fcc663ebe3824aa234e3a67aadf285cb417877 Binary files /dev/null and b/doc/paddle/design/dist_train/src/local_architecture.graffle differ diff --git a/doc/paddle/design/dist_train/src/local_architecture.png b/doc/paddle/design/dist_train/src/local_architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..14adc9fd72b855bb9f74fbf2c84ac9ec0cf2b122 Binary files /dev/null and b/doc/paddle/design/dist_train/src/local_architecture.png differ diff --git a/doc/paddle/design/dist_train/src/lookup_table.png b/doc/paddle/design/dist_train/src/lookup_table.png new file mode 100644 index 0000000000000000000000000000000000000000..72dfe3547f731d0d090338afb206b0549dff472e Binary files /dev/null and b/doc/paddle/design/dist_train/src/lookup_table.png differ diff --git a/doc/paddle/design/dist_train/src/lookup_table_training.png b/doc/paddle/design/dist_train/src/lookup_table_training.png new file mode 100644 index 0000000000000000000000000000000000000000..cc7cc4aeb3b885850fe2f70f19fb84d5873bed1e Binary files /dev/null and b/doc/paddle/design/dist_train/src/lookup_table_training.png differ diff --git a/doc/paddle/design/dist_train/src/mpi_module.png b/doc/paddle/design/dist_train/src/mpi_module.png new file mode 100644 index 0000000000000000000000000000000000000000..e6b6a3e5d6f68baeeb67d7f71154bd8d85f32b6f Binary files /dev/null and b/doc/paddle/design/dist_train/src/mpi_module.png differ diff --git a/doc/paddle/design/dist_train/src/multi-threads.graffle b/doc/paddle/design/dist_train/src/multi-threads.graffle new file mode 100644 index 0000000000000000000000000000000000000000..e71173715fff92a0a933d0c7d83599ba948552c6 Binary files /dev/null and b/doc/paddle/design/dist_train/src/multi-threads.graffle differ diff --git a/doc/paddle/design/dist_train/src/multi-threads/multi-threads@3x.png b/doc/paddle/design/dist_train/src/multi-threads/multi-threads@3x.png new file mode 100644 index 0000000000000000000000000000000000000000..e40a869987dbbf5019d4cb03c1dab55b74d6c9f9 Binary files /dev/null and b/doc/paddle/design/dist_train/src/multi-threads/multi-threads@3x.png differ diff --git a/doc/paddle/design/dist_train/src/multi-threads/single-thread@3x.png b/doc/paddle/design/dist_train/src/multi-threads/single-thread@3x.png new file mode 100644 index 0000000000000000000000000000000000000000..4083aebfdd45af5fbac25fa2c4176bc08c3cb44a Binary files /dev/null and b/doc/paddle/design/dist_train/src/multi-threads/single-thread@3x.png differ diff --git a/doc/paddle/design/dist_train/src/ncc2_design.graffle b/doc/paddle/design/dist_train/src/ncc2_design.graffle new file mode 100644 index 0000000000000000000000000000000000000000..7d2753bbb03bc28c7a0054bb0aa424deb072ffbf Binary files /dev/null and b/doc/paddle/design/dist_train/src/ncc2_design.graffle differ diff --git a/doc/paddle/design/dist_train/src/ncc2_design.png b/doc/paddle/design/dist_train/src/ncc2_design.png new file mode 100644 index 0000000000000000000000000000000000000000..da0d5ee81f5dfeb4ca1356601b0bb5870456e3d6 Binary files /dev/null and b/doc/paddle/design/dist_train/src/ncc2_design.png differ diff --git a/doc/paddle/design/dist_train/src/paddle-compile.graffle b/doc/paddle/design/dist_train/src/paddle-compile.graffle new file mode 100644 index 0000000000000000000000000000000000000000..a6348cc3dbcaca923c6e794681b2edb85cb9f8f6 Binary files /dev/null and b/doc/paddle/design/dist_train/src/paddle-compile.graffle differ diff --git a/doc/paddle/design/dist_train/src/paddle-compile.png b/doc/paddle/design/dist_train/src/paddle-compile.png new file mode 100644 index 0000000000000000000000000000000000000000..e0f13d551ac41afaec627a57dea79356464bf0bf Binary files /dev/null and b/doc/paddle/design/dist_train/src/paddle-compile.png differ diff --git a/doc/paddle/design/dist_train/src/remote_executor.graffle b/doc/paddle/design/dist_train/src/remote_executor.graffle new file mode 100644 index 0000000000000000000000000000000000000000..41b2067311694b56d211a4f32d1b76884eeffd2d Binary files /dev/null and b/doc/paddle/design/dist_train/src/remote_executor.graffle differ diff --git a/doc/paddle/design/dist_train/src/remote_executor.png b/doc/paddle/design/dist_train/src/remote_executor.png new file mode 100644 index 0000000000000000000000000000000000000000..744e2fb2e0f1bbe058e991ba7b2a09000965ee79 Binary files /dev/null and b/doc/paddle/design/dist_train/src/remote_executor.png differ diff --git a/doc/paddle/design/dist_train/src/sparse_update.graffle b/doc/paddle/design/dist_train/src/sparse_update.graffle new file mode 100644 index 0000000000000000000000000000000000000000..08d689a58f83698d8c1158ee3990ed8abf3a7a9a Binary files /dev/null and b/doc/paddle/design/dist_train/src/sparse_update.graffle differ diff --git a/doc/paddle/design/dist_train/src/sparse_update.png b/doc/paddle/design/dist_train/src/sparse_update.png new file mode 100644 index 0000000000000000000000000000000000000000..8c872e6ac479f7d1b818a4a207956c43155d0ad7 Binary files /dev/null and b/doc/paddle/design/dist_train/src/sparse_update.png differ diff --git a/doc/paddle/design/dist_train/src/sync_distributed_training.png b/doc/paddle/design/dist_train/src/sync_distributed_training.png new file mode 100644 index 0000000000000000000000000000000000000000..e4f9a221fea4b7238e8a1d84e609c0371f6ef7a2 Binary files /dev/null and b/doc/paddle/design/dist_train/src/sync_distributed_training.png differ diff --git a/doc/paddle/design/dynamic_rnn/2_level_rnn.dot b/doc/paddle/design/dynamic_rnn/2_level_rnn.dot new file mode 100644 index 0000000000000000000000000000000000000000..5d77865061ca7bbbfcf254dd938f09aef5553505 --- /dev/null +++ b/doc/paddle/design/dynamic_rnn/2_level_rnn.dot @@ -0,0 +1,56 @@ +digraph G { + + rnn [label="1st level RNN" shape=box] + + subgraph cluster0 { + label = "time step 0" + + sent0 [label="sentence"] + sent1 [label="sentence"] + + rnn1 [label="2nd level RNN" shape=box] + + sent0 -> rnn1 + sent1 -> rnn1 + } + + subgraph cluster1 { + label = "time step 1" + + sent2 [label="sentence"] + sent3 [label="sentence"] + + rnn2 [label="2nd level RNN" shape=box] + + sent2 -> rnn2 + sent3 -> rnn2 + } + + subgraph cluster2 { + label = "time step 2" + + sent4 [label="sentence"] + sent5 [label="sentence"] + + rnn3 [label="2nd level RNN" shape=box] + + sent4 -> rnn3 + sent5 -> rnn3 + } + + + para0 [label="paragraph info 0"] + para1 [label="paragraph info 1"] + para2 [label="paragraph info 2"] + + rnn1 -> para0 + rnn2 -> para1 + rnn3 -> para2 + + para0 -> rnn + para1 -> rnn + para2 -> rnn + + chapter [label="chapter info"] + rnn -> chapter +} diff --git a/doc/paddle/design/dynamic_rnn/2_level_rnn.png b/doc/paddle/design/dynamic_rnn/2_level_rnn.png new file mode 100644 index 0000000000000000000000000000000000000000..0537a75beb175c0c284717421f7aa908da2a5038 Binary files /dev/null and b/doc/paddle/design/dynamic_rnn/2_level_rnn.png differ diff --git a/doc/paddle/design/dynamic_rnn/index_cn.rst b/doc/paddle/design/dynamic_rnn/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1d224d22cf7103616f44115db01f0ae55f1cb88a --- /dev/null +++ b/doc/paddle/design/dynamic_rnn/index_cn.rst @@ -0,0 +1,8 @@ +动态RNN +------------ + +.. toctree:: + :maxdepth: 1 + + rnn.md + rnn_design.md diff --git a/doc/paddle/design/dynamic_rnn/index_en.rst b/doc/paddle/design/dynamic_rnn/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..2bd9cba60e59adf281dddaa1979171f8d3ac30f2 --- /dev/null +++ b/doc/paddle/design/dynamic_rnn/index_en.rst @@ -0,0 +1,8 @@ +Dynamic RNN +------------ + +.. toctree:: + :maxdepth: 1 + + rnn.md + rnn_design_en.md diff --git a/doc/paddle/design/dynamic_rnn/rnn.dot b/doc/paddle/design/dynamic_rnn/rnn.dot new file mode 100644 index 0000000000000000000000000000000000000000..c1141cd9c981bb3cbf50d8bf7a6ed210280d79a5 --- /dev/null +++ b/doc/paddle/design/dynamic_rnn/rnn.dot @@ -0,0 +1,87 @@ +digraph G { + label = "simple RNN implementation" + + ranksep=2; + + //graph [nodesep=1, ranksep=1]; + + node[nodesep=1] + + subgraph cluster0 { + label = "global scope" + rankdir = TB + W + boot_memory + input + output + } + + subgraph cluster1 { + label = "step-scope 0" + rankdir = TB + memory0[label="memory"] + prememory0[label="pre-memory"] + step_input0[label="step input"] + step_output0[label="step output"] + } + + subgraph cluster2 { + label = "step-scope 1" + rankdir = TB + memory1[label="memory"] + prememory1[label="pre-memory"] + step_input1[label="step input"] + step_output1[label="step output"] + } + + subgraph cluster3 { + label = "step-scope 2" + rankdir = TB + memory2[label="memory"] + prememory2[label="pre-memory"] + step_input2[label="step input"] + step_output2[label="step output"] + } + + stepnet [shape=box] + stepnet0 [shape=box, style=dashed] + stepnet1 [shape=box, style=dashed] + stepnet2 [shape=box, style=dashed] + + + edge[color=blue] + boot_memory -> prememory0 [label="init" color="blue"] + memory0 -> prememory1 [label="copy/reference" color="blue"] + memory1 -> prememory2 [label="copy/reference" color="blue"] + + edge[color=black] + W -> stepnet0[constraint=false, style=dashed] + W -> stepnet1[constraint=false, style=dashed] + W -> stepnet2[constraint=false, style=dashed] + + memory0 -> stepnet0[style=dashed] + prememory0 -> stepnet0 -> step_output0[style=dashed] + + memory1 -> stepnet1[style=dashed] + prememory1 -> stepnet1 -> step_output1[style=dashed] + + memory2 -> stepnet2[style=dashed] + prememory2 -> stepnet2 -> step_output2[style=dashed] + + input -> step_input0 + input -> step_input1 + input -> step_input2 + + step_input0 -> stepnet0 [style=dashed] + step_input1 -> stepnet1[style=dashed] + step_input2 -> stepnet2[style=dashed] + + step_output0 -> output + step_output1 -> output + step_output2 -> output + + stepnet0 -> stepnet[style=dashed] + stepnet1 -> stepnet[style=dashed] + stepnet2 -> stepnet[style=dashed] + +} diff --git a/doc/paddle/design/dynamic_rnn/rnn.jpg b/doc/paddle/design/dynamic_rnn/rnn.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9867e404cf959df0dce6ded5222b466c788fb840 Binary files /dev/null and b/doc/paddle/design/dynamic_rnn/rnn.jpg differ diff --git a/doc/paddle/design/dynamic_rnn/rnn.md b/doc/paddle/design/dynamic_rnn/rnn.md new file mode 100644 index 0000000000000000000000000000000000000000..52783c86a710c605fbec6565b31f6fba5ac719b2 --- /dev/null +++ b/doc/paddle/design/dynamic_rnn/rnn.md @@ -0,0 +1,153 @@ +# RNNOp design + +This document describes the RNN (Recurrent Neural Network) operator and how it is implemented in PaddlePaddle. The RNN op requires that all instances in a mini-batch have the same length. We will have a more flexible dynamic RNN operator in the future. + +## RNN Algorithm Implementation + +

+ +

+ +The above diagram shows an RNN unrolled into a full network. + +There are several important concepts here: + +- *step-net*: the sub-graph that runs at each step. +- *memory*, $h_t$, the state of the current step. +- *ex-memory*, $h_{t-1}$, the state of the previous step. +- *initial memory value*, the memory of the first (initial) step. + +### Step-scope + +There could be local variables defined in each step-net. PaddlePaddle runtime realizes these variables in *step-scopes* which are created for each step. + +

+
+Figure 2 illustrates the RNN's data flow +

+ +Please be aware that every step runs the same step-net. Each step does the following: + +1. Creates the step-scope. +2. Initializes the local variables including step-outputs, in the step-scope. +3. Runs the step-net, which uses the above mentioned variables. + +The RNN operator will compose its output from step outputs in each of the step scopes. + +### Memory and Ex-memory + +Let's give more details about memory and ex-memory using a simple example: + +$$ +h_t = U h_{t-1} + W x_t +$$, + +where $h_t$ and $h_{t-1}$ are the memory and ex-memory (previous memory) of step $t$ respectively. + +In the implementation, we can make an ex-memory variable either "refer to" the memory variable of the previous step, +or copy the memory value of the previous step to the current ex-memory variable. + +### Usage in Python + +For more information on Block, please refer to the [design doc](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/concepts/block.md). + +We can define an RNN's step-net using a Block: + +```python +import paddle as pd + +X = some_op() # x is some operator's output and is a LoDTensor +a = some_op() + +# declare parameters +W = pd.Variable(shape=[20, 30]) +U = pd.Variable(shape=[20, 30]) + +rnn = pd.create_rnn_op(output_num=1) +with rnn.stepnet(): + x = rnn.add_input(X) + # declare a memory (rnn's step) + h = rnn.add_memory(init=a) + # h.pre_state(), the previous memory of rnn + new_state = pd.add_two( pd.matmul(W, x) + pd.matmul(U, h.pre_state())) + # update current memory + h.update(new_state) + # indicate that h variables in all step scopes should be merged + rnn.add_outputs(h) + +out = rnn() +``` + +Python API functions in above example: + +- `rnn.add_input`: indicates that the parameter is a variable that will be segmented into step-inputs. +- `rnn.add_memory`: creates a variable used as the memory. +- `rnn.add_outputs`: marks the variables that will be concatenated across steps into the RNN output. + +### Nested RNN and LoDTensor + +An RNN whose step-net includes other RNN operators is known as an *nested RNN*. + +For example, we could have a 2-level RNN, where the top level corresponds to paragraphs, and the lower level corresponds to sentences. Each step of the higher level RNN also receives an input from the corresponding step of the lower level, and additionally the output from the previous time step at the same level. + +The following figure illustrates feeding in text into the lower level, one sentence at a step, and the feeding in step outputs to the top level. The final top level output is about the whole text. + +

+ +

+ +```python +import paddle as pd + +W = pd.Variable(shape=[20, 30]) +U = pd.Variable(shape=[20, 30]) + +W0 = pd.Variable(shape=[20, 30]) +U0 = pd.Variable(shape=[20, 30]) + +# a is output of some op +a = some_op() + +# chapter_data is a set of 128-dim word vectors +# the first level of LoD is sentence +# the second level of LoD is a chapter +chapter_data = pd.Variable(shape=[None, 128], type=pd.lod_tensor, level=2) + +def lower_level_rnn(paragraph): + ''' + x: the input + ''' + rnn = pd.create_rnn_op(output_num=1) + with rnn.stepnet(): + sentence = rnn.add_input(paragraph, level=0) + h = rnn.add_memory(shape=[20, 30]) + h.update( + pd.matmul(W, sentence) + pd.matmul(U, h.pre_state())) + # get the last state as sentence's info + rnn.add_outputs(h) + return rnn + +top_level_rnn = pd.create_rnn_op(output_num=1) +with top_level_rnn.stepnet(): + paragraph_data = rnn.add_input(chapter_data, level=1) + low_rnn = lower_level_rnn(paragraph_data) + paragraph_out = low_rnn() + + h = rnn.add_memory(init=a) + h.update( + pd.matmul(W0, paragraph_data) + pd.matmul(U0, h.pre_state())) + top_level_rnn.add_outputs(h) + +# output the last step +chapter_out = top_level_rnn(output_all_steps=False) +``` + +In the above example, the construction of the `top_level_rnn` calls `lower_level_rnn`. The input is an LoD Tensor. The top level RNN segments input text data into paragraphs, and the lower level RNN segments each paragraph into sentences. + +By default, the `RNNOp` will concatenate the outputs from all the time steps. +If the `output_all_steps` is set to False, it will only output the final time step. + + +

+ +

diff --git a/doc/paddle/design/dynamic_rnn/rnn.png b/doc/paddle/design/dynamic_rnn/rnn.png new file mode 100644 index 0000000000000000000000000000000000000000..e139e373fe8396782044cfd936fdde624f8c66fe Binary files /dev/null and b/doc/paddle/design/dynamic_rnn/rnn.png differ diff --git a/doc/paddle/design/dynamic_rnn/rnn_2level_data.dot b/doc/paddle/design/dynamic_rnn/rnn_2level_data.dot new file mode 100644 index 0000000000000000000000000000000000000000..1d85ae2617a915ad0ad8288d848b607cc37ad297 --- /dev/null +++ b/doc/paddle/design/dynamic_rnn/rnn_2level_data.dot @@ -0,0 +1,75 @@ +digraph G { + chapter [label="chapter"] + + subgraph cluster0 { + label = "paragraph 0" + + top_rnn0[label="top rnn step 0" shape=box] + + p0 [label="paragraph 0"] + p1 [label="paragraph 1"] + } + + subgraph cluster1{ + label = "paragraph 1" + + top_rnn1[label="top rnn step 1" shape=box] + + p2 [label="paragraph 0"] + p3 [label="paragraph 1"] + } + + subgraph cluster_p0 { + label = "sentence 0" + + low_rnn0 [label="low rnn step 0" shape=box] + s00 [label="sentence 0"] + s01 [label="sentence 1"] + + low_rnn0 -> s00 + low_rnn0 -> s01 + } + + subgraph cluster_p1 { + label = "sentence 1" + low_rnn1 [label="low rnn step 1" shape=box] + s10 [label="sentence 0"] + s11 [label="sentence 1"] + low_rnn1 -> s10 + low_rnn1 -> s11 + } + + subgraph cluster_p2 { + label = "sentence 1" + low_rnn2 [label="low rnn step 0" shape=box] + s20 [label="sentence 0"] + s21 [label="sentence 1"] + low_rnn2 -> s20 + low_rnn2 -> s21 + } + + subgraph cluster_p3 { + label = "sentence 1" + low_rnn3 [label="low rnn step 1" shape=box] + s30 [label="sentence 0"] + s31 [label="sentence 1"] + low_rnn3 -> s30 + low_rnn3 -> s31 + } + + + chapter -> top_rnn0 + chapter -> top_rnn1 + + top_rnn0 -> p0 + top_rnn0 -> p1 + top_rnn1 -> p2 + top_rnn1 -> p3 + + + p0 -> low_rnn0 + p1 -> low_rnn1 + p2 -> low_rnn2 + p3 -> low_rnn3 + +} diff --git a/doc/paddle/design/dynamic_rnn/rnn_2level_data.png b/doc/paddle/design/dynamic_rnn/rnn_2level_data.png new file mode 100644 index 0000000000000000000000000000000000000000..4be81b2430717a6a506342a09fc26899568574c6 Binary files /dev/null and b/doc/paddle/design/dynamic_rnn/rnn_2level_data.png differ diff --git a/doc/paddle/design/dynamic_rnn/rnn_design.md b/doc/paddle/design/dynamic_rnn/rnn_design.md new file mode 100644 index 0000000000000000000000000000000000000000..cecfcd3307ae4c4fa603220a360e9e124069fa58 --- /dev/null +++ b/doc/paddle/design/dynamic_rnn/rnn_design.md @@ -0,0 +1,242 @@ +# RNN 变长输入设计 +对变长序列的学习,现有主流框架比如 tensorflow, pytorch, caffe2, mxnet 等均使用了padding的方式, +即将一个mini-batch内不同长度的序列补0到固定长度参与计算。 + +现有Paddle包括 `RecurrentLayerGroup` 在内的RNN均实现了无padding的变长序列支持,本文也将基于该模块的思路,设计重构后的变长序列支持。 + +## 背景介绍 +由于tensor必须有明确的shape,因此基于tensor 的主流框架在存储变长序列时, +必须用zero-padding的方式将变长序列补全为固定shape的tensor。 + +由于padding是一种框架实现变长序列的妥协, 从用户角度,在使用RNN类模型时自然会比较介意padding的存在, +因此会有pytorch中对非padding方式变长序列支持长篇的讨论[3]。 + +由于padding对内存和计算会有额外的消耗,tensorflow和mxnet均使用了bucketing来进行优化[1][2], +但不管是padding还是bucket,对于用户都是额外的使用负担。 + +因此,**paddle原生支持变长序列的方式,能直接满足用户对变长序列的最直接的需求,在当前主流平台中可以算是一大优势**。 + +但对变长序列的支持,需要对目前框架做一些修改,下面讨论如何在最小修改下支持变长序列。 + +## 多层序列数据格式 `LODTensor` +目前 Paddle 会将一个mini-batch内的数据存储在一维的内存上, +额外使用 `Argument.sequenceStartPositions` 来存储每个句子的信息。 + +Paddle里使用 `Argument.subSequenceStartPositions` 来存储2层的序列信息,更高维度的序列则无法直接支持; + +为了支持 `N-level` 序列的存储,本文将序列信息定义成如下数据结构: + +```c++ +std::shared_ptr>> lod_start_pos_; +``` + +或者更明确的定义 + +```c++ +typedef std::vector level_t; +std::vector lod_start_pos; +``` + +这里的每一个 `level_t` 存储一个粒度(level)的偏移信息,和paddle目前做法一致。 + +为了更透明地传递序列信息,我们引入了一种新的tensor 称为 `LODTensor`[4], +其关于tensor相关的接口都直接继承自 `Tensor`,但另外添加了序列相关接口。 +如此,在操作一个 `LODTensor` 时,普通 `Op` 直接当成 `Tensor` 使用, +而操作序列的 `Op` 会额外操作 `LODTensor` 的变长序列操作的相关接口。 + +`LODTensor` 具体定义如下: + +```c++ +class LODTensor : public Tensor { +public: + size_t Levels() const { return seq_start_positions_.size(); } + size_t Elements(int level = 0) const { + return seq_start_positions_[level].size(); + } + // slice of level[elem_begin: elem_end] + // NOTE low performance in slice seq_start_positions_. + // TODO should call Tensor's Slice. + LODTensor LODSlice(int level, int elem_begin, int elem_end) const; + + // slice with tensor's data shared with this. + LODTensor LODSliceShared(int level, int elem_begin, int elem_end) const; + + // copy other's lod_start_pos_, to share LOD info. + // NOTE the LOD info sould not be changed. + void ShareConstLODFrom(const LODTensor &other) { + lod_start_pos_ = other.lod_start_pos_; + } + // copy other's lod_start_pos_'s content, free to mutate. + void ShareMutableLODFrom(const LODTensor &other) { + lod_start_pos_ = std::make_shared < + std::vector>(other.lod_start_pos_.begin(), + other.lod_start_pos_.end()); + } + +private: + std::shared_ptr>> lod_start_pos_; +}; +``` + +其中, `lod_start_pos_` 使用了 `shared_ptr` 来减少存储和复制的代价, +可以认为 `LODTensor` 是 `Tensor` 的扩展,几乎完全兼容原始 `Tensor` 的使用。 + +## 框架支持 +### 框架现有的 `Tensor` 调用替换为 `LODTensor` +为了实现 `LODTensor` 的传递,框架里很多 `Tensor` 都需要变成 `LODTensor`, +简单实现,直接 **把之前所有的`Tensor` 全部替换成 `LODTensor`,这里可以直接修改 `pybind.cc` 里面创建`Tensor`的接口**。 + +此外,用户有可能需要感知序列的存在(比如序列的可视化需要解析模型中输出的序列),因此一些序列操作的API也需要暴露到 python 层。 + +### `lod_start_pos` 随着Op调用链传递 +框架需要支持下列特性,以实现`lod_start_pos`的传递: + +1. 以 `shared_ptr` 的方式实现传递 + - 不修改 `lod_start_pos` 内容的作为 consumer + - 修改 `lod_start_pos` 的作为 producer + - 约定 consumer 只需要复制传递过来的 `shared_ptr` + - producer 需要创建自己的独立的内存,以存储自己独立的修改,并暴露 `shared_ptr` 给后续 consumer + - 由于传递过程是以复制`shared_ptr`的方式实现,因此框架只需要传递一次 `lod_start_pos` + +2. 对于不感知 `lod_start_pos` 的Op足够透明 +3. 需要修改 `lod_start_pos` 的producer Op可以在 `Run` 时更新自己的 `lod_start_pos` 数据 + +具体的设计分为以下3小节 + +#### `load_start_pos` 的传递 + +- 对于不需要修改 `lod_start_pos` 的情况,调用 LODTensor的 `ShareConstLODFrom` 接口实现复制 +- 需要修改的,调用`ShareMutableLODFrom` 接口自己分配内存以存储修改 + +#### 框架透明 +传递这一步需要加入到网络跑之前的初始化操作中,并且只需要初始化一次,基于当前框架设计的初步方案如下 + +- 在 Op 的 `attrs` 中添加一项 `do_mutate_lod_info` 的属性,默认为 `false` + - 有需要修改 `lod_start_pos` 的Op需要在定义 `OpProto` 时设置为 `true` +- `OperatorBase` 的 `InferShape` 中会读取 `do_mutate_lod_info` ,并且调用 `LODTensor` 相关的方法实现 `lod_start_pos` 的复制。 +- `OperatorBase` 中添加一个 member `is_lod_inited{false}` 来保证传递只进行一次 + +一些逻辑如下 + +```c++ +class OperatorBase { +public: + // ... + void InferShape() { + if (!is_load_inited) { + bool do_mutate_lod_info = GetAttr("do_mutate_load_info"); + // find a input having LOD to copy + auto lod_input = ValidLODInput(); + for (auto &output : outputs) { + if (do_mutate_load_info) { + output.ShareMutableLODFrom(lod_input); + } else { + output.ShareConstLODFrom(load_input); + } + } + is_pod_inited = true; + } + + // call op's InferShape + // ... + } + +private: + // ... + bool is_lod_inited{false}; +}; +``` + +如此,`lod_start_pos` 的信息的传递对非OLD的Op的实现是完全透明的。 + +#### `lod_start_pos` 的更新 +上一小节介绍到,对于需要修改 `load_start_pos` 的Op,`OperatorBase` 会分配一块自己的内存以存储修改, +Op在 `Run` 的实现中,操作更新自己的 `load_start_pos` , +而所有依赖其 outputs 的 op 会通过共享的指针自动获取到其更新。 + +## 根据长度排序 +按照长度排序后,从前往后的时间步的batch size会自然地递减,可以直接塞入 Net 做batch计算 + +比如原始的输入: + +``` +origin: +xxxx +xx +xxx + +-> sorted: +xxxx +xxx +xx +``` + +经过 `SegmentInputs` 之后,每个会有4个时间步,每个时间步的输入如下(纵向排列) + +``` +0 1 2 3 +x x x x +x x x +x x +``` + +为了追踪排序前后序列的变化,这里用 +```c++ +struct SortedSeqItem { + void *start{nullptr}; + void *end{nullptr}; +}; + +std::vector sorted_seqs; +``` +来追踪序列排序后的位置,并添加一个新的接口 + +```c++ +std::vector SortBySeqLen(const LODTensor& tensor); +``` + +由于输入序列的顺序变化,以下现有的接口需要针对性地修改: + +- InitMemories, memory需要根据 `sorted_seqs` 重新排列 +- SetmentInputs +- ConcatOutputs + +此外,由于 `sorted_seqs` 需要被 `RecurrentGradientOp` 复用,因此会变成 `RecurrentOp` 一个新的output输出, +之后作为 `RecurrentGradientOp` 的一个输入传入。 + +## InitMemories +由于序列顺序的变化,`boot_memories` 的batch上的element的顺序也需要对应重新排列。 + +## SegmentInputs +`SegmentInputs` 会依赖 `sorted_seqs` 的信息,将原始的序列按照排序后的序列顺序,从横向切割,转为每个step中的inputs。 + +即下面的转变: +``` +origin: +xxxx +xx +xxx + + | + | + \ / + ! +0 1 2 3 +x x x x +x x x +x x +``` +## ConcatOutputs +`ConcatOutputs` 需要 + +- 将每个时间步的输出重新还原为原始输入的序列顺序(以防止Infer阶段顺序打乱) +- 将每个序列concat 为规则的mini-batch表示 + +## 参考文献 +[Tensorflow Bucketing](https://www.tensorflow.org/versions/r0.12/api_docs/python/contrib.training/bucketing) + +[mxnet Bucketing](http://mxnet.io/how_to/bucketing.html) + +[variable length input in RNN scenario](https://discuss.pytorch.org/t/about-the-variable-length-input-in-rnn-scenario/345/5) + +[Level of details](https://en.wikipedia.org/wiki/Level_of_detail) diff --git a/doc/paddle/design/dynamic_rnn/rnn_design_en.md b/doc/paddle/design/dynamic_rnn/rnn_design_en.md new file mode 100644 index 0000000000000000000000000000000000000000..84ded216efd00d8bc1b043ba5f49374ee3fb5944 --- /dev/null +++ b/doc/paddle/design/dynamic_rnn/rnn_design_en.md @@ -0,0 +1,175 @@ +# Varient Length supported RNN Design +For the learning of variable length sequences, the existing mainstream frameworks such as tensorflow, pytorch, caffe2, mxnet and so on all use padding. + +Different-length sequences in a mini-batch will be padded with zeros and transformed to same length. + +The existing RNN implementations of the PaddlePaddle is `RecurrentLayerGroup`, +which supports the variable length sequences without padding. +This doc will design fluid's RNN based on this idea. + +## Multi-layer sequence data format `LODTensor` +At present, Paddle stores data in one mini-batch in one-dimensional array. + +`Argument.sequenceStartPositions` is used to store information for each sentence. + +In Paddle, `Argument.subSequenceStartPositions` is used to store 2 levels of sequence information, while higher dimensional sequences can not be supported. + +In order to support the storage of `N-level` sequences, we define sequence information as the following data structure. + + +```c++ +std::shared_ptr>> lod_start_pos_; +``` + +Or more clearly defined here + +```c++ +typedef std::vector level_t; +std::vector lod_start_pos; +``` +Each `level_t` here stores a level of offset information consistent with paddle's current practice. + +In order to transmit sequence information more transparently, we have introduced a new tensor called `LODTensor`[1]. +Its tensor-related interfaces all inherit directly from `Tensor`, but it also adds serial-related interfaces. +Thus, when working with a `LODTensor`, ordinary `Op` is used directly as `Tensor`. +The `Op` of the operation sequence will additionally operate the relevant interface of the `LODTensor` variable-length sequence operation. + +The definition of `LODTensor` is as follows: + + +```c++ +class LODTensor : public Tensor { +public: + size_t Levels() const { return seq_start_positions_.size(); } + size_t Elements(int level = 0) const { + return seq_start_positions_[level].size(); + } + // slice of level[elem_begin: elem_end] + // NOTE low performance in slice seq_start_positions_. + // TODO should call Tensor's Slice. + LODTensor LODSlice(int level, int elem_begin, int elem_end) const; + + // slice with tensor's data shared with this. + LODTensor LODSliceShared(int level, int elem_begin, int elem_end) const; + + // copy other's lod_start_pos_, to share LOD info. + // NOTE the LOD info sould not be changed. + void ShareConstLODFrom(const LODTensor &other) { + lod_start_pos_ = other.lod_start_pos_; + } + // copy other's lod_start_pos_'s content, free to mutate. + void ShareMutableLODFrom(const LODTensor &other) { + lod_start_pos_ = std::make_shared < + std::vector>(other.lod_start_pos_.begin(), + other.lod_start_pos_.end()); + } + +private: + std::shared_ptr>> lod_start_pos_; +}; +``` +Among them, `lod_start_pos_` uses `shared_ptr` to reduce the cost of storage and replication. +`LODTensor` can be thought as an extension of `Tensor`, which is almost completely compatible with the original `Tensor`. + +## How to support the framework +### Replace `Tensor` with `LoDTensor` +To implement the passing of `LODTensor`, most `Tensor` in the framework need to be replaced with `LODTensor`. +Simple implementation, directly **replace all previous `Tensor` with `LODTensor`** , where you can directly modify the `Tensor` interface created in `pybind.cc`. + +In addition, the user may need to perceive the existence of a sequence (such as the sequence of the visualization needs to parse the output sequence in the model), so some of the serial operation APIs also need to be exposed to the python layer. + +### Transmit `lod_start_pos` along with the Op call chain +`lod_start_pos` is passed along with the Op call chain +The framework needs to support the following features to implement the transmit of `lod_start_pos`: + +1. Implement the transfer as `shared_ptr` + - Do not modify the contents of `lod_start_pos` as a consumer + - Modify producer of `lod_start_pos` as producer + - Conventions consumer only needs to copy `shared_ptr` passed over + - producer needs to create its own independent memory to store its own independent modifications and expose `shared_ptr` to subsequent consumer + - Since the transfer process is implemented by copying `shared_ptr`, the framework only needs to pass `lod_start_pos` once. + +2. Op is transparent enough not to sense `lod_start_pos` +3. Producer Op that needs to modify `lod_start_pos` can update its `lod_start_pos` data when `Run` + +## sorted by length +After sorting by length, the batch size from the forward time step will naturally decrement, and you can directly plug it into Net to do the batch calculation. + +For example, the original input: + +``` +origin: +xxxx +xx +xxx + +-> sorted: +xxxx +xxx +xx +``` + +After `SegmentInputs`, there will be 4 time steps, the input of each time step is as follows (vertical arrangement) + +``` +0 1 2 3 +x x x x +x x x +x x +``` + +In order to track the changes before and after sorting, use here + +```c++ +struct SortedSeqItem { + void *start{nullptr}; + void *end{nullptr}; +}; + +std::vector sorted_seqs; +``` +To track the position of the sequence after sorting, and add a new interface + +```c++ +std::vector SortBySeqLen(const LODTensor& tensor); +``` +Due to the sequence of input sequences, the following existing interfaces need to be modified: + +- InitMemories, memory needs to be rearranged according to `sorted_seqs` +- SetmentInputs +- ConcatOutputs + +In addition, because `sorted_seqs` needs to be multiplexed with `RecurrentGradientOp`, it will become a new output of `RecurrentOp`. +It is passed in as an input to `RecurrentGradientOp`. + +## InitMemories +Due to the sequence change, the order of the elements on the `boot_memories` batch also needs to be rearranged accordingly. + +## SegmentInputs + +`SegmentInputs` relies on the information of `sorted_seqs` to cut the original sequence from the horizontal to the input of each step in the sorted sequence order. + +the transition is as follows: +``` +origin: +xxxx +xx +xxx + + | + | + \ / + ! +0 1 2 3 +x x x x +x x x +x x +``` +## ConcatOutputs +`ConcatOutputs` needs + +- Restore the output of each time step back to the original input sequence order (to prevent the order of Infer phase from being upset) +- Concat each sequence as a regular mini-batch representation + +## references +1. [Level of details](https://en.wikipedia.org/wiki/Level_of_detail) diff --git a/doc/paddle/design/execution/if_else_op.md b/doc/paddle/design/execution/if_else_op.md new file mode 100644 index 0000000000000000000000000000000000000000..26d140f06db4ecefa86be015eaa731ffddc6910c --- /dev/null +++ b/doc/paddle/design/execution/if_else_op.md @@ -0,0 +1,51 @@ +# The `IfElse` Operator + +PaddlePaddle's `IfElse` operator differs from TensorFlow's: + +- the TensorFlow version takes a scalar boolean value as the condition so that the whole mini-batch goes to either the true or the false branch, whereas +- the PaddlePaddle version takes a vector of boolean value as the condition, and instances corresponding to true values go to the true branch, those corresponding to false values go to the false branch. + +## Example + +The following PaddlePaddle program shows the usage of the IfElse operator: + +```python +import paddle as pd + +x = minibatch([10, 20, 30]) # shape=[None, 1] +y = var(1) # shape=[1], value=1 +z = minibatch([10, 20, 30]) # shape=[None, 1] +cond = larger_than(x, 15) # [false, true, true] + +ie = pd.ifelse() +with ie.true_block(): + d = pd.layer.add(x, y) + ie.output(d, pd.layer.softmax(d)) +with ie.false_block(): + d = pd.layer.fc(z) + ie.output(d, d+1) +o1, o2 = ie(cond) +``` + +A challenge to implement the `IfElse` operator is to infer those variables to be split, or, say, to identify the variable of the mini-batch or those derived from the mini-batch. + +An equivalent C++ program is as follows: + +```c++ +namespace pd = paddle; + +int x = 10; +int y = 1; +int z = 10; +bool cond = false; +int o1, o2; +if (cond) { + int d = x + y; + o1 = z; + o2 = pd::layer::softmax(z); +} else { + int d = pd::layer::fc(z); + o1 = d; + o2 = d+1; +} +``` diff --git a/doc/paddle/design/execution/index_cn.rst b/doc/paddle/design/execution/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ed31b017429d168b2466d8f6b423f48bd5d78d1f --- /dev/null +++ b/doc/paddle/design/execution/index_cn.rst @@ -0,0 +1,8 @@ +执行流程 +------------- + +.. toctree:: + :maxdepth: 1 + + switch.md + if_else_op.md diff --git a/doc/paddle/design/execution/index_en.rst b/doc/paddle/design/execution/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..fcf846da348ff0bed707c42718e08314998fbac0 --- /dev/null +++ b/doc/paddle/design/execution/index_en.rst @@ -0,0 +1,8 @@ +Execution Process +-------------------------------------- + +.. toctree:: + :maxdepth: 1 + + switch.md + if_else_op.md diff --git a/doc/paddle/design/execution/switch.md b/doc/paddle/design/execution/switch.md new file mode 100644 index 0000000000000000000000000000000000000000..1c337bd7159b25e594c2f91f9a143b3f4bc3c8e8 --- /dev/null +++ b/doc/paddle/design/execution/switch.md @@ -0,0 +1,31 @@ +# Design Doc: Switch + +## Background + +Many programming languages provide `switch` as a generalization of `if-elif-else`. We want to add it to Fluid. + +The following example shows the usage of `fluid.switch`. + +```python +a = fluid.Var(10) +b = fluid.Var(0) + +with switch() as switch: + with switch.case(fluid.less_equal(a, 10)): + fluid.print("Case 1") + with switch.case(fluid.larger(a, 0)): + fluid.print("Case 2") + with switch.default(): + fluid.print("Case 3") +``` + +## The Semantics + +1. A `switch` control-flow checks cases one-by-one. +1. The condition of each case is a boolean value, which is a scalar, and differs from the `fluid.if_else` control-flow, which condition could be a vector of boolean values. +1. It runs the first matched case, or the default case if there is one. +1. Once it matches a case, it runs the corresponding branch and only that branch. It's like there is a C's `break` keyword at the end of each case. + +The above program should print and print only "Case 1". + +The implementation of the backward pass of the `switch` control-flow is easier than the backward of the `if_else`, because `switch` runs at most one branch, whereas `if-else` could run more than one branches. diff --git a/doc/paddle/design/index_cn.rst b/doc/paddle/design/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..31b62a5eb3cd9b5b68d51abcd001fd5b8c39a914 --- /dev/null +++ b/doc/paddle/design/index_cn.rst @@ -0,0 +1,19 @@ +设计思想 +------------ + +.. toctree:: + :maxdepth: 1 + + motivation/index_cn.rst + execution/index_cn.rst + concepts/index_cn.rst + data_type/index_cn.rst + memory/index_cn.rst + multi_devices/index_cn.rst + dynamic_rnn/index_cn.rst + concurrent/index_cn.rst + algorithm/index_cn.rst + network/index_cn.rst + modules/index_cn.rst + interface/index_cn.rst + dist_train/index_cn.rst diff --git a/doc/paddle/design/index_en.rst b/doc/paddle/design/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..d7cf2dfa28db586da2ad43a491480357e1dd1b2a --- /dev/null +++ b/doc/paddle/design/index_en.rst @@ -0,0 +1,18 @@ +Design +------------ + +.. toctree:: + :maxdepth: 1 + + motivation/index_en.rst + execution/index_en.rst + concepts/index_en.rst + data_type/index_en.rst + memory/index_en.rst + multi_devices/index_en.rst + dynamic_rnn/index_en.rst + concurrent/index_en.rst + algorithm/index_en.rst + network/index_en.rst + modules/index_en.rst + dist_train/index_en.rst diff --git a/doc/paddle/design/interface/index_cn.rst b/doc/paddle/design/interface/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..69a8d9bad4fe88935b9fa87757abf0105ca8eb75 --- /dev/null +++ b/doc/paddle/design/interface/index_cn.rst @@ -0,0 +1,4 @@ +多语言接口 +------------ + +TBD diff --git a/doc/paddle/design/interface/index_en.rst b/doc/paddle/design/interface/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..22abc71f984aa5da7151d5ebf0c3bdbcc69a3624 --- /dev/null +++ b/doc/paddle/design/interface/index_en.rst @@ -0,0 +1,4 @@ +Multi-Language Interface +----------------------- + +TBD diff --git a/doc/paddle/design/ir/overview.md b/doc/paddle/design/ir/overview.md new file mode 100644 index 0000000000000000000000000000000000000000..83ef97c99efeaf27a27f93f0cd3857c0f1bc812e --- /dev/null +++ b/doc/paddle/design/ir/overview.md @@ -0,0 +1,185 @@ +## Motivation + +There is a `gap` between the `Program` defined by +user and the `Executable` that can be scheduled +efficiently on heterogeneous hardware, either locally +or distributedly. + +Usually, the `gap` is bridged by + +* A serious transformations with defined order. + +* These transformations usually involve +`insert, delete, clustering, split, dependency analysis`. + +* Has a simple way to verify and debug each transformation. + +* Flexible to add, remove or customize transformations to fit +the requirements of various algorithms (models) and hardware secenarios. + +Some other events also push us to a better unified pattern. + +* The deep learning framework is built around the concepts of graphs. +To leverage tools such as compilation (e.g. TVM and nGraph) or +cross-framework conversion (e.g. ONNX), we also need a intermediate +representation that can be connected to the rest of the ecosystem. + + +We need a unified pattern to naturally support the requirements +described above. The pattern should fit both training, inference +and other offline serielized model transformations. +Learned from LLVM and other deep learning framework, we draft the +design below. + + +## Design + +### Major Concepts + +#### Node + +`Node` represents an operation that performs some computation or +a variable that is input or output of operation. + +`Node`s are connected to other `Node`s via inputs and outputs. + +Other properties (maybe device placement information) can be added +to `Node` in the future if it's a +common requirement of many other `Pass`es. Otherwise, it should live +in a `Node` wrapper class that is private to some `Pass` or be +a local member of a `Pass`. + +#### Graph + +`Graph` contains a list of `Node`s, which are connected to +each other via inputs and outputs. + +TODO: Better definitions for the graph. + +`Graph` can also contain `Attribute`s. `Attribute`s +can be `any` thing. For example, it can be a list of "wraper" +nodes. The `wrapper` nodes compose `Node`s and provide +helper method for execution or transformation. `Attribute` +can also contain other things that describe some properties of +the `Graph` or `Graph` nodes. `Attribute` can be passed +across `Pass`. However, it should be used with care. + +```cpp +class Graph { + public: + explicit Graph(const ProgramDesc &program); + + bool Has(const std::string &attr_name) const; + + template + AttrType &Get(const std::string &attr_name) const; + + template + void Set(const std::string &attr_name, AttrType *attr); + const std::unordered_set &Nodes() const; + + // Create a normal variable with non-null VarDesc. + ir::Node *CreateVarNode(VarDesc *var_desc); + + // Create a normal runnable operator with OpDesc. + ir::Node *CreateOpNode(OpDesc *op_desc); + + // Create a control dependency var that connects 2 operations. The + // var doesn't hold any data. Other than that, it's no different from + // other var, considering dependency analysis. + ir::Node *CreateControlDepVar(); + + // A more free style way of creating a graph node. Mostly use for test + // or "copy" from another node. Avoid using it if possible. + ir::Node *CreateEmptyNode(const std::string &name, ir::Node::Type type); + + // Clear all node information of the graph and return the ownership of the + // nodes. + std::vector> ReleaseNodes(); +}; +``` + +#### Pass + +`Pass` represents a transformation of `Graph`. Its input +is a `Graph` and its output is also a `Graph`. For example, +a `Pass` can simply print out the `Graph`. A `Pass` +can also fuse some `Graph`'s `Node`s. + +```cpp +class Pass { + public: + + std::unique_ptr Apply(std::unique_ptr graph) const { + // Some correctness check. + auto new_graph = ApplyImpl(std::move(graph)); + // Some correctness check. + return new_graph; + } + + // Get a reference to the attributed previously set. + template + AttrType &Get(const std::string &attr_name) const; + + // Set a pointer to the attribute. Pass takes ownership of the attribute. + template + void Set(const std::string &attr_name, AttrType *attr) ; + + // Set a pointer to the attribute. Pass doesn't take ownership. Caller + // should delete the attribute. + template + void SetNotOwned(const std::string &attr_name, AttrType *attr); + + protected: + virtual std::unique_ptr ApplyImpl(std::unique_ptr graph) const = 0; +}; + +// In my_pass.cc +class MyPass : public Pass { + protected: + std::unique_ptr ApplyImpl(std::unique_ptr graph) const override { + // do something. + return graph; + } +} +REGISTER_PASS(my_pass, MyPass) +.RequirePassAttr("places") +.RequireGraphAttr("dep_vars"); + + +// To use the pass. +auto my_pass = ir::PassRegistry::Instance().Get("my_pass"); +graph = my_pass->Apply(std::move(graph)); +// Note: to force link my_pass.cc, in the code: +USE_PASS(my_pass); +``` + +#### Optimize + +`Optimize` contains a series of `Pass` with defined order. +`Optimize` transforms a `Graph` that only contains raw +modeling logic to a `Graph` that can be run efficiently while +maintaining the original modeling logic. + + +### Optimize Process + +* Program is first converted to Graph. +* Graph goes through a series of Pass +* Graph is transformed from raw model logic to a +form that is efficient to execute. + +``` +// Program->ProgramToGraph->Graph->Pass1->Graph->Pass2->Graph->Pass3->Graph->Executor +auto graph = Graph(program); +graph = PassRegistry::Instance().Get("op_fuse_pass").Apply(std::move(grah)); +// For more complex Pass, Optimize Process can provide Pass attributes. +auto mem_opt_pass = PassRegistry::Instance().Get("memory_optimization_pass"); +mem_opt_pass.SetNotOwned("optimize_level", 1); +mem_opt_pass->Apply(std::move(graph)); +graph = PassRegistry::Instance().Get("multi_devices_pass").Apply(std::move(grah)); +graph = PassRegistry::Instance().Get("multi_devices_check_pass").Apply(std::move(grah)); +Executor exe; +exe.Run(graph); + +``` diff --git a/doc/paddle/design/memory/README.md b/doc/paddle/design/memory/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6964c543eaa7557d279c470a27a1a830544fb4a4 --- /dev/null +++ b/doc/paddle/design/memory/README.md @@ -0,0 +1,141 @@ +# Region-based Heterogeneous Memory Management +## Design + +### Usage + +To allocate 4KB CPU memory: + +```cpp +p = memory::Alloc(platform::CPUPlace(), 4*1024); +``` + +To allocate 4KB memory on the 3rd GPU: + +```cpp +p = memory::Alloc(platform::CUDAPlace(2), 4*1024); +``` + +To free memory and check the so-far used amount of memory on a place: + +```cpp +auto pl = platform::CUDAPlace(0); +p = memory::Alloc(pl, 4*1024); +cout << memory::Used(pl); +memory::Free(pl, p); +``` + +### API + +In `paddle/memory/memory.h` we have: + +```cpp +namespace memory { +template void* Alloc(Place, size_t); +template void Free(Place, void*); +template size_t Used(Place); +} // namespace memory +``` + +These function templates have specializations on either `platform::CPUPlace` or `platform::CUDAPlace`: + +```cpp +template<> +void* Alloc(CPUPlace p, size_t size) { + return GetCPUBuddyAllocator()->Alloc(size); +} +``` + +and + +```cpp +template<> +void Alloc(CUDAPlace p, size_t size) { + return GetGPUBuddyAllocator(p.id)->Alloc(size); +} +``` + +Similar specializations exist for `Free` and `Used`. + +### Implementation + +`GetCPUBuddyAllocator` and `GetGPUBuddyAllocator` are singletions. + +```cpp +BuddyAllocator* GetCPUBuddyAllocator() { + static BuddyAllocator* a = NULL; + if (a == NULL) { + a = new BuddyAllocator(new CPUAllocator /*backup allocator*/, ...); + } + return a; +} + +BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) { + static BuddyAllocator* as = NULL; + if (as == NULL) { + as = new BuddyAllocator*[platform::NumGPUs()]; + for (int gpu = 0; gpu < platform::NumGPUs(); gpu++) { + as[gpu] = new BuddyAllocator(new GPUAllocator(gpu) /* backup allocator */, ...); + } + } + return as[gpu_id); +``` + +#### `BuddyAllocator` + +`BuddyAllocator` implements the buddy allocation algorithm. Its constructor takes parameters only related with the algorithm: + +```cpp +BuddyAllocator::BuddyAllocator(initial_pool_size, max_pool_size) { + ... +} +``` + +Please be aware that **`BuddyAllocator` always allocate aligned memory**, aligned on 32-bytes, which can hold a `BuddyAllocator::Block` object: + +```cpp +class BuddyAllocator { + private: + struct Block { + size_t size; + Block* left, right; + size_t index; // allocator id + }; + ... +}; +``` + +Because BuddyAllocator has the meta-data of each block, it can trace the used memory -- record the amount returned by `Alloc` freed in `Free`. Instead, `CPUAllocator` and `GPUAllocator` doesn't know the size of freed memory block and cannot do the trace. + +#### System Allocators + +The `GPUAllocator` and `CPUAllocator` are calls *system allocators*. They work as the fallback allocators of `BuddyAllocator`. + +## Justification + +I got inspiration from Majel and Caffe2, though above design look different from both. + +### Caffe2 + +In Caffe2, `Tensor::mutable_data()` allocates the memroy. In particular, [`Tensor::mutable_data`](https://github.com/caffe2/caffe2/blob/v0.7.0/caffe2/core/tensor.h#L523) calls [`Tensor::raw_mutable_data`](https://github.com/caffe2/caffe2/blob/v0.7.0/caffe2/core/tensor.h#L459), which in turn calls [`Context::New`](https://github.com/caffe2/caffe2/blob/v0.7.0/caffe2/core/tensor.h#L479). + +There are two implementations of `Context`: + +1. [`CPUContext`](https://github.com/caffe2/caffe2/blob/v0.7.0/caffe2/core/context.h#L105), whose [`New` method](https://github.com/caffe2/caffe2/blob/v0.7.0/caffe2/core/context.h#L131) calls [`g_cpu_allocator.get()->New(size_t)`](https://github.com/caffe2/caffe2/blob/v0.7.0/caffe2/core/context.cc#L15) to allocate the memory. + +1. [`CUDAContext`](https://github.com/caffe2/caffe2/blob/v0.7.0/caffe2/core/context_gpu.h#L99), which has a data member [`int gpu_id_`](https://github.com/caffe2/caffe2/blob/v0.7.0/caffe2/core/context_gpu.h#L202). This looks very similar to class `majel::CUDAPlace`, who also has an `int id_` data member. `CUDAContext::New(size_t)` calls [`g_cub_allocator->DeviceAllocate(&ptr, nbytes)`](https://github.com/caffe2/caffe2/blob/v0.7.0/caffe2/core/context_gpu.cu#L355) to allocate the memory. + +### Majel + +In Majel, there are basically two allocator types: + +1. `cpu::SystemAllocator`, which has similar functionality to `caffe2::CPUContext::New/Delete`. +1. `gpu::SystemAllocator`, which has similar functionality to `caffe2::CUDAContext::New/Delete`. + +However, memory allocation is not via these two allocators. Instead, these two allocators are defined in hidden namespaces. + +In Majel there are hidden global variables like: + +1. `cpu::SystemAllocator g_cpu_allocator`, and +1. `vector g_gpu_allocators(NUM_GPUS)`. + +Programs allocate memory via a BuddyAllocator, which can take the `g_cpu_allocator` or a `g_gpu_allocators[gpu_id]` as its *fallback allocator*, so that if BuddyAllocator cannot find a block in its memory pool, it extends its memory pool by calling the fallback allocator's `New(size_t)`. diff --git a/doc/paddle/design/memory/images/control_flow_graph.png b/doc/paddle/design/memory/images/control_flow_graph.png new file mode 100644 index 0000000000000000000000000000000000000000..3579998e58d07abc50bd3332128d4733a391cb3b Binary files /dev/null and b/doc/paddle/design/memory/images/control_flow_graph.png differ diff --git a/doc/paddle/design/memory/images/dataflow_equations.png b/doc/paddle/design/memory/images/dataflow_equations.png new file mode 100644 index 0000000000000000000000000000000000000000..c10f7f69f4007952e5b0394edaa04efa1cfbb658 Binary files /dev/null and b/doc/paddle/design/memory/images/dataflow_equations.png differ diff --git a/doc/paddle/design/memory/images/deep_learning.png b/doc/paddle/design/memory/images/deep_learning.png new file mode 100644 index 0000000000000000000000000000000000000000..026becc4d94e01e407dacb2a5314a0e5723334ff Binary files /dev/null and b/doc/paddle/design/memory/images/deep_learning.png differ diff --git a/doc/paddle/design/memory/index.html b/doc/paddle/design/memory/index.html new file mode 100644 index 0000000000000000000000000000000000000000..acbf00a6d5c3e0c380b8fc32823e04a9f543646b --- /dev/null +++ b/doc/paddle/design/memory/index.html @@ -0,0 +1,205 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/design/memory/index_cn.rst b/doc/paddle/design/memory/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c507c638bd1a6eb428175ed2756a6ecfc6cca198 --- /dev/null +++ b/doc/paddle/design/memory/index_cn.rst @@ -0,0 +1,7 @@ +内存管理 +------------ + +.. toctree:: + :maxdepth: 1 + + memory_optimization.md diff --git a/doc/paddle/design/memory/index_en.rst b/doc/paddle/design/memory/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..f7526437a73a09b300f05e138084755f5528b242 --- /dev/null +++ b/doc/paddle/design/memory/index_en.rst @@ -0,0 +1,7 @@ +Memory Management +------------------- + +.. toctree:: + :maxdepth: 1 + + memory_optimization.md diff --git a/doc/paddle/design/memory/memory_optimization.md b/doc/paddle/design/memory/memory_optimization.md new file mode 100644 index 0000000000000000000000000000000000000000..b712a9ea0bdf97cbfaf1f84ddf0405090f971c49 --- /dev/null +++ b/doc/paddle/design/memory/memory_optimization.md @@ -0,0 +1,217 @@ +# Memory Optimization + + +## Problem + +In a lecture from Andrew Ng, he attributes the recent sucess of AI due to a combination of these: + +- Availability of Big Data +- Supercomputing power to process this Big Data over very large neural networks +- Modern algorithms + +Following graph shows the details: + +![](images/deep_learning.png) + +Larger model usually bring better performance. However, GPU memory is limited. For example, the memory size of a GTX TITAN X is only 12GB. To train complex and large models, we have to take care of memory usage. Besides, memory optimization is also necessary in both online/mobile inference. + +## Solution + +### Basic Strategy + +There are some basic strategies to improve memory usage, including in-place operations and memory sharing. + +#### In-place Operation +In a relu activation operator: + +$y = \max(x, 0)$ + +If the variable x is not used in any other operator, we can make an in-place operation. In other words, the memory block of variable y and variable x will be the same. In-place operations will save 50% memory occupancy immediately. + +#### Memory Sharing + +Not all operators support in-place operations. Memory sharing is a more general strategy. + +Following is an example: + +``` +a = op1(b, c); +d = op2(a) +e = op3(d, f) +``` + +In this case, variable a is no longer used, and op2 does not support in-place operation. After op2 finishes, we can put the memory of variable a to a memory pool. Then, variable e can share the memory of variable a from the pool. + + +### Live Variable Analysis + +It's not enough to only have some basic strategies. The pre-requisite of memory optimization is to know if a variable is still "live" after an operation. + +In our design, the neural network topology is defined as a program. Luckily, [live variable analysis](https://en.wikipedia.org/wiki/Live_variable_analysis) is a classic problem in compilers which can be used in many stages, such as register allocation. + +In compilers, the front end of the compiler translates programs into an intermediate language with an unbounded number of temporary variables. This program must run on a machine with a bounded number of registers. Two temporary variables a and b can fit into the same register, if a and b are never "in use" at the same time. Thus, many temporary variables can fit in few registers; if they don't all fit, the excess tempory variables can be kept in memory. + +Therefore, the compiler needs to analyze the intermediate-representation program to determine which temporary variables are in use at the same time. We say a variable is "live" if it holds a value that may be needed in the future, so this analysis is called liveness analysis. + +We can leran these techniques from compilers. There are mainly two stages to make live variable analysis: + +- construct a control flow graph +- solve the dataflow equations + + +#### Control Flow Graph +To perform analysis on a program, it is often useful to make a control flow graph. A [control flow graph](https://en.wikipedia.org/wiki/Control_flow_graph) (CFG) in computer science is a representation, using graph notation, of all paths that might be traversed through a program during its execution. Each statement in the program is a node in the flow graph; if statemment x can be followed by statement y, there is an egde from x to y. + +Following is the flow graph for a simple loop. + +![](images/control_flow_graph.png) + +#### Dataflow Analysis + +Liveness of variable "flows" around the edges of the control flow graph; determining the live range of each variable is an example of a dataflow problem. [Dataflow analysis](https://en.wikipedia.org/wiki/Data-flow_analysis) is a technique for gathering information about the possible set of values calculated at various points in a computer program. + +A simple way to perform data-flow analysis of programs is to set up dataflow equations for each node of the control flow graph and solve them by repeatedly calculating the output from the input locally at each node until the whole system stabilizes. + +- Flow Graph Terminology + +A flow graph node has out-edges that lead to sucessor nodes, and in-edges that come from predecessor nodes. The set *pred[n]* is all the predecessors of node n, and *succ[n]* is the set of sucessors. +In former control flow graph, the out-edges of node 5 are 5 --> 6 and 5 --> 2, and *succ[5]* = {2, 6}. The in-edges of 2 are 5 --> 2 and 1 --> 2, and *pred[2]* = {1, 5}. + +- Uses and Defs + +An assignmemt to a variable or temporary defines that variable. An occurence of a variable on the right-hand side of an assginment(or in other expressions) uses the variable. We can define the *def* of a variable as the set of graph nodes that define it; or the *def* of a graph node as the set of variables that it defines; and the similarly for the *use* of a variable or graph node. In former control flow graph, *def(3)* = {c}, *use(3)* = {b, c}. + +- Liveness + +A variable is *live* on an edge if there is a directed path from that edge to a *use* of the variable that does not go through any *def*. A variable is *live-in* at a node if it is live on any of the in-edges of that node; it is *live-out* at a node if it is live on any of the out-edges of the node. + + +The calcution of liveness can be solved by iteration until a fixed pointer is reached. Following is the recursive formula: + +![](images/dataflow_equations.png) + +### Memory optimization transpiler + +At last, we take basic strategy and liveness analysis techniques learning from compilers to implement our memory optimization transpiler. + +#### add in-place attribute + +In-place is a built-in attribute of an operator. Since we treat in-place and other operators differently, we have to add an in-place attribute for every operator. + + +#### contruct control flow graph + +Following is the ProgramDesc protobuf of [machine translation](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/book/test_machine_translation.py) example. + +- Block0: + +``` +lookup_table +mul +... +while(sub-block idx 1) +... +array_to_lod_tensor +cross_entropy +... +while_grad(sub-block idx 2) +read_from_array +array_to_lod_tensor +... +``` + +- Block1 + +``` +read_from_array +read_from_array +... +write_to_array +increment +write_to_array +less_than +``` + +- Block2 + +``` +read_from_array +increment +... +write_to_array +write_to_array +``` + +We can transfer all the operators and variables in ProgramDesc to build a control flow graph. + +```python +class ControlFlowGraph(object): + def __init__(self, Program): + self._sucessors = defaultdict(set) + self._presucessors = defaultdict(set) + self._uses = defaultdict(set) + self._defs = defaultdict(set) + self._live_in = defaultdict(set) + self._live_out = defaultdict(set) + self._program = Program + + def build(self): + pass + + def dataflow_analysis(self): + pass + + def memory_optimization(self): + pass + + def get_program(self): + return self._program +``` + +#### Make dataflow analysis + +We follow the guide from compilers and try to solve the dataflow equation to get liveness of every variable. If the live-in of an operator node is different from the live-out, then we can make memory sharing. + +For example: + +``` +a = op1(b, c); +d = op2(a) +e = op3(d, f) +``` + +The dataflow analysis result is: + +``` +live_in(op1) = {b, c, f} +live_out(op1) = {a, f} + +live_in(op2) = {a, f} +live_out(op2) = {d, f} + +live_in(op3) = {d, f} +live_out(op3) = {} +``` + +After op1, we can process variable b and variable c; After op2, we can process variable a. After op3, we can process variable d and variable f. + +#### memory sharing policy + +A memory pool will be mantained in the stage of memory optimization. Each operator node will be scanned to determine memory optimization is done or not. If an operator satifies the requirement, following policy will be taken to handle input/output variables. + +``` +if op.support_inplace(): + i --> pool + pool --> o +else: + pool --> o + i --> pool +``` + + + +## Reference + +- [Lecture Notes From Artificial Intelligence Is The New Electricity By Andrew Ng](https://manavsehgal.com/lecture-notes-from-artificial-intelligence-is-the-new-electricity-by-andrew-ng-4712dcbf26e5) +- Modern compiler implementation in ML, by Andrew W. Appel +- [Optimizing Memory Consumption in Deep learning](https://mxnet.incubator.apache.org/architecture/note_memory.html) diff --git a/doc/paddle/design/mkldnn/acquire_api/acquire_api.md b/doc/paddle/design/mkldnn/acquire_api/acquire_api.md new file mode 100644 index 0000000000000000000000000000000000000000..622f777c30494d7f3b0a62011e4e1d52ba42a2cd --- /dev/null +++ b/doc/paddle/design/mkldnn/acquire_api/acquire_api.md @@ -0,0 +1,76 @@ +# Design Doc: MKL-DNN Acquire API + +MKL-DNN kernels that are using MKL-DNN API tend to be quite complex due to: +* number of MKL-DNN api calls needed, which in fact are mostly repeated across all MKL-DNN kernels +* caching mechanism of MKL-DNN objects (conceptually the same across all paddle MKL-DNN kernels) +* still evolving MKL-DNN API which makes paddle MKL-DNN kernels difficult to maintain + +Hence Acquire API was created to wrap around MKL-DNN API that address above defined issues. + +### Common functionality +Each MKL-DNN kernel is essentially creating MKL-DNN memory objects followed by creation of MKL-DNN computational primitives and as a last step, execution +of created MKL-DNN primitives is triggered. Creation of mentioned MKL-DNN primitives require at least few calls to MKL-DNN API (for each MKL-DNN object) and code is much more complex when caching of created objects is added. Moreover code is pretty similar across MKL-DNN kernels, hence Acquire API was designed to provide easy to use way of creating and caching mentioned MKL-DNN objects. Having common code implemented inside Acquire API, to be used in operators, require less effort when creating given operator. It also makes integration of MKL-DNN kernels shorter and less prone to errors. + +### Details of Acquire API +Basic element of Acquire API is so called Handler. There is Basic MKLDNNHandler class which is implementing a code common to all operators using Acquire API . On the picture below rightmost nodes (Nodes grouped with "Basic MKLDNNHandler") represent common functionality used by Softmax and activation MKL-DNN kernels. Apart from basic MKLDNNHandler, there are derived handlers that are implementing functionality that is specific to given operator eg. Constructing caching key for given operator and add some non-standard function for getting workspace memory objects (Nodes grouped with "Derived handlers"). Leftmost nodes are entry functions (Compute) of Softmax and activation MKL-DNN kernels. + +![](images/acquire.svg) + +Caching MKL-DNN objects is already implemented in Basic MKLDNNHandler, so most of the time when implementing derived handler you do not have to consider caching. + +### Usage of Acquire MKL-DNN for MKL-DNN kernels implementation + +#### 1. Creating MKLDNNHandler +As a first step one need to create derived handler for his target MKL-DNN kernel (operator). For LRN op it would be LRNMKLDNNHandler that inherits from MKLDNNHandlerT. +Goal of derived handler is to provide operator specific functionality: creating key to caching, creation of Forward and Backward MKL-DNN primitive descriptors. +It is best to look into existing examples of derived handlers and implement new one by analogy. + +Example code of calling created LRN MKLDNNHandler: + + const float alpha = ctx.Attr("alpha") * static_cast(n); + const float beta = ctx.Attr("beta"); + const float k = ctx.Attr("k"); + bool is_test = ctx.Attr("is_test"); + + auto dims = paddle::framework::vectorize(x->dims()); + + platform::LRNMKLDNNHandler handler(dims, n, alpha, beta, k, x->format(), + is_test, dev_ctx, ctx.GetPlace(), + ctx.op().Output("Out")); + +#### 2. Creating MKL-DNN Memory objects +Once we have a derived handler, then it is time to get needed MKL-DNN memory objects. Memory objects either can wrap Tensor data or allocate data on its own. +Family of functions to get Memory objects are: +* AcquireSrcMemory +* AcquireDstMemory +* AcquireDiffDstMemory +* etc... + +They do expect Tensor to be passed as a parameter to each of them so then MKL-DNN memory object is wrapping Tensor (recommended way). If this is not possible +like in a case of some of workspace memory objects then avoiding passing Tensor will trigger creation of MKL-DNN memory object with its own allocation. + +Example usage based on LRN MKL-DNN kernel: + + auto src_memory = handler.AcquireSrcMemory(x); // x is input tensor of LRN + auto dst_memory = handler.AcquireDstMemory(out); // out is output tensor of LRN + +#### 3. Creating MKL-DNN computational primitives +Once We got Handler and MKL-DNN memory objects then we are to get computational MKL-DNN primitive. This is done with AcquireForwardPrimitive (For forward pass op) and AcquireBackwardPrimitive (for grad pass op). + +Example usage based on LRN MKL-DNN kernel: + + lrn_p = handler.AcquireForwardPrimitive(*src_memory, *dst_memory); + +#### 4. Execution of MKL-DNN computational primitives +Having memory objects and computational primitive we may trigger its execution . Example for LRN op: + + std::vector pipeline = {*lrn_p}; + mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); + +#### 5. Registering MKL-DNN memory format in corresponding Tensor +Last step is to register MKL-DNN output memory object format inside of Output tensor eg. set Tensor::format_ to MKL-DNN enum that corresponds the way Tensor data is arranged (NCHW, NCHW16C etc.) This enum can be taken from dst memory object (wrapper to Output tensor) in Forward pass or from diff_src memory object (wrapper to X_grad Tensor). + +Example of registring MKL-DNN format in output tensor: + + out->set_layout(framework::DataLayout::kMKLDNN); + out->set_format(platform::GetMKLDNNFormat(*dst_memory)); diff --git a/doc/paddle/design/mkldnn/acquire_api/images/acquire.svg b/doc/paddle/design/mkldnn/acquire_api/images/acquire.svg new file mode 100644 index 0000000000000000000000000000000000000000..a304a6c8ad84bde9b9eb28341d6f4f173b32d698 --- /dev/null +++ b/doc/paddle/design/mkldnn/acquire_api/images/acquire.svg @@ -0,0 +1,111 @@ + + + + + + +%3 + +cluster_A + +Derived Handlers + +cluster_B + +Base MKLDNNHandler + + +Node0x490c380 + +SoftmaxMKLDNNKernel::Compute() + + +Node0x4915e90 + +SoftmaxMKLDNNHandler::SoftmaxMKLDNNHandler<forward>() + + +Node0x490c380->Node0x4915e90 + + + + +Node0x49164c0 + +MKLDNNHandlerT::AcquireSrcMemory() + + +Node0x490c380->Node0x49164c0 + + + + +Dst + +MKLDNNHandlerT::AcquireDstMemory() + + +Node0x490c380->Dst + + + + +Node0x491bca0 + +MKLDNNHandlerT::AcquireForwardPrimitive() + + +Node0x490c380->Node0x491bca0 + + + + +Node0x4ab38f0 + +MKLDNNActivationKernel::Compute() + + +Node0x4b2e4f0 + +ActivationMKLDNNHandler::ActivationMKLDNNHandler<forward>() + + +Node0x4ab38f0->Node0x4b2e4f0 + + + + +Node0x4ab38f0->Node0x49164c0 + + + + +Node0x4ab38f0->Dst + + + + +Node0x4ab38f0->Node0x491bca0 + + + + +Node0x496cfc0 + +MKLDNNHandlerT::AcquireForwardPrimitiveDescriptor() + + +Node0x4915e90->Node0x496cfc0 + + + + +Node0x4b2e4f0->Node0x496cfc0 + + + + + diff --git a/doc/paddle/design/mkldnn/acquire_api/index_en.rst b/doc/paddle/design/mkldnn/acquire_api/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..1038704070862a3c6a5c98df156260fe0bca6e47 --- /dev/null +++ b/doc/paddle/design/mkldnn/acquire_api/index_en.rst @@ -0,0 +1,7 @@ +MKL-DNN Acquire API +-------------------------------------- + +.. toctree:: + :maxdepth: 1 + + acquire_api.md diff --git a/doc/paddle/design/mkldnn/acquire_api/scripts/acquire.dot b/doc/paddle/design/mkldnn/acquire_api/scripts/acquire.dot new file mode 100644 index 0000000000000000000000000000000000000000..3332fc6eb3def2743408845d7c09630034e69a66 --- /dev/null +++ b/doc/paddle/design/mkldnn/acquire_api/scripts/acquire.dot @@ -0,0 +1,53 @@ +digraph { + rankdir=LR + weight=0.5 + concentrate=true + splines=ortho + newrank=true +nodesep=1 + + node[width=4.4,shape=box] + + Node0x490c380 [shape=record,label="SoftmaxMKLDNNKernel::Compute()\l"]; + + Node0x4ab38f0 [shape=record,label="MKLDNNActivationKernel::Compute()\l"]; + + subgraph cluster_A { + label="Derived Handlers" + node[width=7.4,shape=box] + style=dotted +// Dummy[shape=record,label="", color=invis]; + Node0x4915e90 [shape=record,label="SoftmaxMKLDNNHandler::SoftmaxMKLDNNHandler\()\l"]; + Node0x4b2e4f0 [shape=record,label="ActivationMKLDNNHandler::ActivationMKLDNNHandler\()\l"]; + } + + subgraph cluster_B { + label="Base MKLDNNHandler" + style=dotted + node[width=6.2,shape=box] + Node0x49164c0 [shape=record,label="MKLDNNHandlerT::AcquireSrcMemory()\l"]; + Dst[shape=record,label="MKLDNNHandlerT::AcquireDstMemory()\l"]; + Node0x491bca0 [shape=record,label="MKLDNNHandlerT::AcquireForwardPrimitive()\l"]; + Node0x496cfc0 [shape=record,label="MKLDNNHandlerT::AcquireForwardPrimitiveDescriptor()\l"]; + } + +Node0x490c380 -> Node0x4915e90[style="bold"]; +Node0x490c380 -> Node0x49164c0; +Node0x490c380 -> Node0x491bca0; +Node0x490c380 -> Dst; +Node0x4915e90 -> Node0x496cfc0; + + +{rank=same Node0x4ab38f0 Node0x490c380 } // Compute level +{rank=same Node0x4915e90 Node0x4b2e4f0 } // Derived Handler level +{rank=same Node0x49164c0 Dst Node0x491bca0 Node0x496cfc0 } // Compute level + + + +Node0x4b2e4f0 -> Node0x496cfc0 +Node0x4ab38f0 -> Node0x49164c0 +Node0x4ab38f0 -> Dst +Node0x4ab38f0 -> Node0x491bca0 +Node0x4ab38f0 -> Node0x4b2e4f0[style="bold"] + +} diff --git a/doc/paddle/design/mkldnn/caching/caching.md b/doc/paddle/design/mkldnn/caching/caching.md new file mode 100644 index 0000000000000000000000000000000000000000..83e2e7da0120ff2befc8f799db83b6eb6f3d3dc8 --- /dev/null +++ b/doc/paddle/design/mkldnn/caching/caching.md @@ -0,0 +1,91 @@ +# Design Doc: MKL-DNN Data Caching + +Fluid MKL-DNN integration is having caching mechanism to store MKL-DNN objects. Purpose of storing MKL-DNN objects is: +* Transfer of MKL-DNN objects from Forward ops to Grad ops. +* Store once created MKL-DNN objects in order To avoid MKL-DNN recreation + +### 1. General architecture +Basic idea is that MKL-DNN cache is hash map where key name is pointing to MKL-DNN object stored in mentioned hash map. +In more detail MKL-DNN Cache is stored inside MKL-DNN Device Context and it consists of three level of unordered maps based structure. Picture below outlines mentioned architecture: + +![](images/cache.svg) + +MKL-DNN cache is to work in both single-threaded and multi-threaded execution scenarios (both inference and training) as well as in a situation when memory constraints are applied. To address memory constraint problem, clear cache mode was introduced. + +To make MKL-DNN cache working with multi-threading scenarios: +* Top level cache separates entries per session +* reading and modifying operations on MKL-DNN cache are ensured to be thread safe +* cache key contains Thread ID (when applicable) + +The design of MKL-DNN cache is to support dynamic shapes scenario (BERT model for example). Since MKLDNN primitives are sensitive to src/dst shape, if new input shape comes, new primitive needs to be created. That means there will be many primitives cached and MKL-DNN cache would consume lots of memory. By introducing second level cache, we can consider these kind of primitive as a group, once reach memory limitation, we can clear a whole group instead of just one primitive, and release more memory. + +Performance-wise it is better to clear group of primitives at once rather than to erase one primitive based on FIFO. More over when searching a cache it is also more efficient to have grouped primitives via input shape , so +we referring to specific primitive we consider only primitives in a group not all registered MKL-DNN cached objects. + +Currently model's input shape that is a key for second level cache is not related to input shape which is part of key in first level cache. + +##### a. Operational modes of MKL-DNN cache +Cache default mode is normal operating mode of MKL-DNN cache, suitable for single and multi-threaded execution e.g. Thread ID is part of cache key name that is used for referring cached objects. + +Cache clearing mode is a special operation mode designed for single threaded usage e.g. (Thread ID is not part of cache key). In this mode when set capacity of cache is to be exceeded (by adding next entry) then the registered entries corresponding to oldest input shape used by PaddlePaddle in given session , are erased (Refer to _section b._ for details on erasing cache entries based on input shape). Input shape is registered as a key for second level cache in _AnalysisPredictor::MkldnnPreSet()_. + +##### b. Adding object to MKL-DNN cache + +Picture | Description +---------------|--------------------------------------------------- +![](images/set_blob.svg) | Updating an object to be stored by given key (name) is done via
_SetBlob_ method. As mentioned earlier MKL-DNN cache is a
three level based architecture(session, input shape and name),
hence we start by getting current session ID and right after that
critical section is set to make sure no two threads are updating
MKL-DNN cache at the same time. Having session ID we check if
there exists cache for given session ID (third level cache). If
answer is negative then new cache object is created. Inside cache
object corresponding to running session, a check if currently used
*input shape* is having its own cache object (second level cache).
In a situation that no cache object for currently used object
exists, then corresponding cache object has to be created. If
MKL-DNN cache is operating in cache clear mode, then when number
of cached objects (second level cache) within currently active
session, exceeds set capacity then the cache entries for the oldest
input shape are removed e.g. one cached object (second level cache)
corresponding to oldest input shape ,that contains number of first
level cache objects , are erased and then cache object for current
input shape is created. Finally when second level cache object for
given input shape was acquired, then we look for cached object
pointed by hash key (name) inside of it (first level cache). If
requesting object does not exist then it is created and if it does exist then given **data** is assigned as a cached object pointed by **name**. + + +##### c. Getting object from MKL-DNN cache +Picture | Description +---------------|--------------------------------------------------- +![](images/get_blob.svg) | Getting an cached objected (given by key (name) is done via
_GetBlob_ method. General idea e.g. traversing through three
level cache is the same as in _SetBlob_ it is just in case of when cached object (of any cache level) does not exist then null object is returned. + +### 2. Clearing MKL-DNN cache +MKL-DNN cache clearing is a solution created to address situation when memory consumed by cache outgrown physically available resources on given instance (Virtual Machine running PaddlePaddle). + +MKL-DNN Device context provide a method for erasing cache entries eg. _MKLDNNDeviceContext::ResetBlobMap()_. Currently _ResetBlobMap()_ is used in two situations: +* Inference of integer based MKL-DNN models +* Releasing Executor object. +* Cache clear mode + +##### a. Inference of integer based MKL-DNN models +Execution of Int8 MKL-DNN models consists of two stages: +* Warm-up up run +* Int8 inference execution + +At warm-up stage FP32 inference is performed and range of signal and weights is gathered for MKL-DNN kernels that are having int8 implementation. Based on recorded ranges , scale parameters are determined to be passed to MKL-DNN +int8 primitives. This stage is executed by FP32 kernels and once Int8 inference execution starts we no longer need to have cached FP32 MKL-DNN primitives. This is the reason why clearing of cache is called after warm-up run stage. + +##### b. Releasing Executor object +Additionally erasing MKL-DNN cache is performed when Executor object is to be destroyed. Here clearing procedure was introduced to enforce having empty MKL-DNN cache when starting to execute MKL-DNN unit tests. +To be more precise MKL-DNN unit tests file like *test_conv2d_mkldnn_op.py* are containing number of testing scenarios. Without clearing the MKL-DNN cache all of those tests are sharing cached primitives. This is totally not needed +and made an impact on shape of hashing key, which was modified (extended) to make sure cached primitive is not reused for a situation when it should be recreated. In other words unit tests are sharing input and output +names of Tensors which require that hash key is equipped with attributes data of given MKL-DNN primitive to avoid false reusing of MKL-DNN primitives. Hence in order not too complicate hashing key Clearing of cache was +introduced when Executor object is destroyed, that way testing scenarios are isolated from each other e.g. Each scenario is executed with cleared MKL-DNN cache. + +##### c. Cache clearing mode +Another situation is when clearing of cache does happen is cache clearing mode: *platform::kMKLDNNSessionID_CacheClearing*. At that mode when new Entry to be added to cache then size of cache is compared with given capacity and once +no space for next objects is in a cache , then MKL-DNN cache is partially cleared. By default cache is NOT working in clearing mode e.g. cache will store all objects it was given. To enable MKL-DNN cache clearing mode one needs to +set capacity of MKL-DNN cache with *SetMkldnnCacheCapacity* (by default capacity is set to 0, meaning no clearing depending on size of cache, any non-negative value is allowed and its meaning is: size of second level cache e.g. number of different input shapes cached groups that can be cached). + +Cache clearing mode is to use in single-threaded scenario where available memory resources are limited. In particular it is to be used when executed model accept inputs of various shapes (NLP models). + +### 3. Transfer of MKL-DNN objects & Tensors from Forward ops to Grad ops. + +Best performance of MKL-DNN kernels is achieved when efficient data arrangement (memory layout) is used as a holder for input/output memory objects and when +number of conversions (reorders) among memory objects is limited to minimum (Not much time is wasted on rearranging data). +Choosing optimal memory layout is done by allowing MKL-DNN computationally heavy primitives (convolution, Inner Product) to choose best memory arrangement based on requested size of data, attributes of algorithm to be executed and architecture of platform. To help limit number of conversions (reorders), information on chosen memory layout and chosen MKL-DNN implementation, is shared among Forward and backward (grad) kernels of given operator instance. In particular forward MKL-DNN kernel decides which MKL-DNN implementation to use and then its decision (in a form of MKL-DNN primitive descriptor) is send to backward MKL-DNN operator. Transfer of MKL-DNN primitive descriptor from +Forward to backward kernel is done via Caching mechanism. + +Also MKL-DNN caching mechanism is used for transferring whole MKL-DNN memory allocations when there is no Tensor connecting Forward and Backward kernels available to be used for that purpose. An example could be a workspace MKL-DNN memory object in pooling (Max pooling) MKL-DNN kernel which transfers indices of maximal values (inside each pooling window) to the backward kernel where those indices are utilized for fast backward pooling computation. + + +##### Note on multi threading execution scenario +PaddlePaddle's program(models) can be executed by single thread as well as multiple threads. Graph's operators can be executed by different threads . In particular forward kernel of given operator may be executed by different thread +than backward kernel of the same instance of operator. In that situation hashing keys used for transferring MKL-DNN primitive descriptors cannot contain Thread ID specific values as key has to be shared among forward and backward kernel. Hence primitive descriptors (to be transfered to backward kernel) are using hashing key that does not contain any thread ID based value. As a result thread safety mechanics (critical section etc.) was employed to +ensure MKL-DNN cache consistency. + +### 4. Store once created MKL-DNN objects in order To avoid MKL-DNN recreation +While MKL-DNN computational algorithms are fast to be executed, preparing to execution e.g. Creation of computational primitives and its primitive descriptors takes significant time (From 2% up to 40% for latency mode inference, depends on Platform instruction sets and MKL-DNN version). We can save some time on recreation +of computational MKL-DNN primitives and its primitive descriptors, by storing once created MKL-DNN objects in a cache and refer to them in subsequent iterations when needed. diff --git a/doc/paddle/design/mkldnn/caching/images/cache.svg b/doc/paddle/design/mkldnn/caching/images/cache.svg new file mode 100644 index 0000000000000000000000000000000000000000..3edfa194646617afc073b33b63cb9b2fd55a16c1 --- /dev/null +++ b/doc/paddle/design/mkldnn/caching/images/cache.svg @@ -0,0 +1,72 @@ + + + + + + +Q + +cluster_bm + +BlobMap + +cluster_sm2 + +ShapeMap of Session ID=1 + +cluster_km + +KeyMap of X shape + +cluster_sm + +ShapeMap of Session ID=0 + +cluster_km + +KeyMap of X shape + +cluster_km2 + +KeyMap of Y shape + + +G + +... + + +C + +... + + +B + +second object of ShapeX + + +A + +first object of ShapeX + + +F + +... + + +E + +second object of ShapeY + + +D + +first object of ShapeY + + + diff --git a/doc/paddle/design/mkldnn/caching/images/get_blob.svg b/doc/paddle/design/mkldnn/caching/images/get_blob.svg new file mode 100644 index 0000000000000000000000000000000000000000..99b49c73e475d4df288e251634cc33275344f930 --- /dev/null +++ b/doc/paddle/design/mkldnn/caching/images/get_blob.svg @@ -0,0 +1,119 @@ + + + + + + +G + +cluster_A + +Critical section + + +A0 + +Get session ID + + +A1 + +cache[ID] exists? + + +A0->A1 + + +ID + + +A3 + +Get cache[ID] + + +A1->A3 + + +yes + + +B + +return null object + + +A1->B + + +no + + +A4 + +cache[ID][shape] exists? + + +A3->A4 + + + + +A5 + +Get cache[ID][shape] + + +A4->A5 + + +yes + + +A4->B + + +no + + +B1 + +cache[ID][shape][name] exists? + + +A5->B1 + + + + +B1->B + + +no + + +B3 + +Get cache[ID][shape][name] + + +B1->B3 + + +yes + + +B4 + +return object of cache[ID][shape][name] + + +B3->B4 + + + + + diff --git a/doc/paddle/design/mkldnn/caching/images/set_blob.svg b/doc/paddle/design/mkldnn/caching/images/set_blob.svg new file mode 100644 index 0000000000000000000000000000000000000000..3d035f30f6be7e1daca3475d330087ea84c40892 --- /dev/null +++ b/doc/paddle/design/mkldnn/caching/images/set_blob.svg @@ -0,0 +1,188 @@ + + + + + + +G + +cluster_A + +Critical section + + +A0 + +Get session ID + + +A1 + +cache[ID] exists? + + +A0->A1 + + +ID + + +A + +Create cache[ID][shape] + + +A5 + +Get cache[ID][shape] + + +A->A5 + + + + +A2 + +Create cache[ID] + + +A1->A2 + + +no + + +A3 + +Get cache[ID] + + +A1->A3 + + +yes + + +A2->A3 + + + + +A4 + +cache[ID][shape] exists? + + +A3->A4 + + + + +A4->A5 + + +yes + + +B + +Clear cache mode? + + +A4->B + + +no + + +B1 + +cache[ID][shape][name] exists? + + +A5->B1 + + + + +B->A + + +no + + +D + +Cache full? + + +B->D + + +yes + + +B2 + +Create cache[ID][shape][name] + + +B1->B2 + + +no + + +B3 + +Get cache[ID][shape][name] + + +B1->B3 + + +yes + + +B2->B3 + + + + +B4 + +assign data to cache[ID][shape][name] + + +B3->B4 + + + + +D->A + + +no + + +F + +Erase cache[ID][oldest shape] + + +D->F + + +yes + + +F->A + + + + + diff --git a/doc/paddle/design/mkldnn/caching/index_en.rst b/doc/paddle/design/mkldnn/caching/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..97addd974723a67f4c1ab4ad8fb12a26976834d4 --- /dev/null +++ b/doc/paddle/design/mkldnn/caching/index_en.rst @@ -0,0 +1,7 @@ +MKL-DNN Data Caching +-------------------------------------- + +.. toctree:: + :maxdepth: 1 + + caching.md diff --git a/doc/paddle/design/mkldnn/caching/scripts/cache.dot b/doc/paddle/design/mkldnn/caching/scripts/cache.dot new file mode 100644 index 0000000000000000000000000000000000000000..a921c77233efd285a508f89dddd7e39d98b1e63c --- /dev/null +++ b/doc/paddle/design/mkldnn/caching/scripts/cache.dot @@ -0,0 +1,39 @@ +digraph Q { + + node[shape=record] + + + subgraph cluster_bm { + label="BlobMap"; + + subgraph cluster_sm2 { + label="ShapeMap of Session ID=1"; + subgraph cluster_km { + label="KeyMap of X shape"; + G[label="..."]; + } + } + subgraph cluster_sm { + label="ShapeMap of Session ID=0"; + + subgraph cluster_km { + label="KeyMap of X shape"; + C[label="..."]; + B[label="second object of ShapeX"]; + A[label="first object of ShapeX"]; + } + + subgraph cluster_km2 { + label="KeyMap of Y shape"; + F[label="..."]; + E[label="second object of ShapeY"]; + D[label="first object of ShapeY"]; + } + } + } + +} + +// For DefaultSessionID Key is having TID inside, for anything else eg. clearing mode , named session ID. no TID in key. ParallelExecutor is workign in default mode +// +// diff --git a/doc/paddle/design/mkldnn/caching/scripts/get_blob.dot b/doc/paddle/design/mkldnn/caching/scripts/get_blob.dot new file mode 100644 index 0000000000000000000000000000000000000000..a5c66a9d02b2a5b099875db9aef6ac3779d8f75e --- /dev/null +++ b/doc/paddle/design/mkldnn/caching/scripts/get_blob.dot @@ -0,0 +1,35 @@ +digraph G { + +node[weight=100] +// Session ID +A0[label="Get session ID"] + + subgraph cluster_A { +label="Critical section" +style=dotted +labeljust="r" + +A1[shape=diamond,label="cache[ID] exists?"] +A3[label="Get cache[ID]"] +A4[shape=diamond,label="cache[ID][shape] exists?"] +A5[label="Get cache[ID][shape]"] + +B[label="return null object"] +B1[shape=diamond,label="cache[ID][shape][name] exists?"] +B3[label="Get cache[ID][shape][name]"] +B4[label="return object of cache[ID][shape][name]"] +} + +A0 -> A1 [label="ID"] +A1 -> B [label="no"] +A1 -> A3 [label="yes"] +A3 -> A4 +A4 -> B [label="no"] +A4 -> A5 [label="yes"] + +A5 -> B1 +B1 -> B3 [label="yes"] +B1 -> B [label="no"] +B3 -> B4 + +} diff --git a/doc/paddle/design/mkldnn/caching/scripts/set_blob.dot b/doc/paddle/design/mkldnn/caching/scripts/set_blob.dot new file mode 100644 index 0000000000000000000000000000000000000000..a32ea9b3017e28e6bd2af64257ab4a6cfa8ed075 --- /dev/null +++ b/doc/paddle/design/mkldnn/caching/scripts/set_blob.dot @@ -0,0 +1,53 @@ +digraph G { + + +// Session ID +A0[label="Get session ID"] + +subgraph cluster_A { +label="Critical section" +style=dotted +labeljust="r" + +A[label="Create cache[ID][shape]"] +A1[shape=diamond,label="cache[ID] exists?"] +A2[label="Create cache[ID]"] +A3[label="Get cache[ID]"] +A4[shape=diamond,label="cache[ID][shape] exists?"] +A5[label="Get cache[ID][shape]"] + +B[shape=diamond,label="Clear cache mode?"] +B1[shape=diamond,label="cache[ID][shape][name] exists?"] +B2[label="Create cache[ID][shape][name]"] +B3[label="Get cache[ID][shape][name]"] +B4[label="assign data to cache[ID][shape][name]"] +D[shape=diamond,label="Cache full?"] +F[label="Erase cache[ID][oldest shape]"] +} + +A0 -> A1 [label="ID"] +A1 -> A2 [label="no"] +A1 -> A3 [label="yes"] +A2 -> A3 +A3 -> A4 +A4 -> B [label="no"] +A -> A5 +A4 -> A5 [label="yes"] + +// Shape +// Get blob +B -> A [label="no"] +B -> D [label="yes"] + +D -> A [label="no"] +D -> F [label="yes"] +F -> A + + +A5 -> B1 +B1 -> B2 [label="no"] +B2 -> B3 +B1 -> B3 [label="yes"] +B3 -> B4 + +} diff --git a/doc/paddle/design/mkldnn/data_transformation/data_transform.md b/doc/paddle/design/mkldnn/data_transformation/data_transform.md new file mode 100644 index 0000000000000000000000000000000000000000..4a6c7de12446b6b3899a5f94d63be959e6489983 --- /dev/null +++ b/doc/paddle/design/mkldnn/data_transformation/data_transform.md @@ -0,0 +1,39 @@ +# Design Doc: MKL-DNN Data Transformation + +When fluid is using MKL-DNN engine to execute program, not all operators are having mkl-dnn kernels and some of operators are executed by CPU. MKL-DNN kernels of operators expect input Tensors to be provided in MKL-DNN layout , while Paddle(CPU) kernels are expecting input Tensors to be of Paddle layout. + +We can distinguish following scenarios(presented below on the picture): +* Paddle(CPU) kernel is followed by MKL-DNN kernel +* MKL-DNN kernel is followed by Paddle(CPU) kernel +* MKL-DNN kernel is followed by fetch operator + + +![](images/data_transform.svg) + + +### Paddle(CPU) kernel is followed by MKL-DNN kernel +In a situation when Paddle(CPU) kernel finished execution, its outcome is one or many Tensors of Paddle layout. Each of those +Tensors to be feed into MKL-DNN kernel, needs to be transformed to be of MKL-DNN layout. For this scenario conversion of Paddle Tensor to MKL-DNN Tensor is done by: +* changing layout flag to MKL-DNN +* picking MKL-DNN format that match Paddle Tensor rank +* Rearrange dims order to NCHW + +Those are computationally cheap operation as there is no real data rearrangement. More information on conversion from Paddle layout to MKL-DNN Tensor can be found in relevant [document](../nhwc/nhwc.md) + +This scenario is drawn on the picture with bold lines. Starting from Paddle(CPU) op on the left side , following arrows drawn in bold and finishing with MKL-DNN op on the right side of picture. + +### MKL-DNN kernel is followed by Paddle(CPU) kernel +In this situation MKL-DNN kernel finished its execution and as a result it produced one or more output Tensors. Each of those Tensors are of MKL-DNN layout and to be fed into Paddle(CPU) kernel, +they need to be converted into Paddle layout. In a detail MKL-DNN Tensor arrangement (mkl-dnn memory format) is checked if it is compatible with Paddle(CPU) layout and if positive then +just layout of Tensor is set as Paddle and mkl-dnn format is set to ``undef``. In case when MKL-DNN Tensor data arrangement is not compatible with Paddle layout then actual data arrangement +is performed. For example MKL-DNN Tensor is 4D and having format ``NCHW16C`` and to convert it into Paddle layout of ``NCHW`` we need to rearrange data to be ``NCHW`` format. To do so +MKL-DNN Reorder primitive is created that can do data rearrangement. + +This scenario is marked on the picture with outlined, empty inside arrows. Starting from MKL-DNN op on the left side , following empty arrows finishing with Paddle(CPU) op on the right side of picture. +### MKL-DNN kernel is followed by fetch operator +This situation is similar conceptually to previous section, but because fetch operator is an operator without kernel then it does not share data transformation code with operators that are having kernel registered. +Hence execution flow looks a bit different, although conceptually conversion of MKL-DNN Tensor into Paddle(CPU) Tensor is the same as in a described above + +This scenario is marked on the picture with regular arrows. Starting from MKL-DNN op on the left side , following regular arrows finishing with fetch op on the right side of picture. +### GPU and MKL-DNN kernels interoperability. +Currently Fluid is not supporting execution of programs by using combination of MKL-DNN and GPU kernels diff --git a/doc/paddle/design/mkldnn/data_transformation/images/data_transform.svg b/doc/paddle/design/mkldnn/data_transformation/images/data_transform.svg new file mode 100644 index 0000000000000000000000000000000000000000..1b135cbe62615d1217885d02c0284b94a52093ec --- /dev/null +++ b/doc/paddle/design/mkldnn/data_transformation/images/data_transform.svg @@ -0,0 +1,138 @@ + + + + + + +Q + +cluster_in + +Possible +i +  order operator types + +cluster_out + +Possible +i+1 +  order operator types + +cluster_Trans + +Transformation of Tensors + + +TransData + +TransformData + + +toMKLDNN + +Label Tensor as MKL-DNN + + +TransData->toMKLDNN + + + + +fromMKLDNN + +TransDataLayoutFromMKLDNN + + +TransData->fromMKLDNN + + + + + + +mkldnnop2 + +MKL-DNN op + + +toMKLDNN->mkldnnop2 + + +MKL-DNN Tensor + + +innerMKLDNN + +innerTransDataLayoutFromMKLDNN + { +MKL-DNN Reorder +} + + +fromMKLDNN->innerMKLDNN + + + + + + +cpuop2 + + +Paddle(CPU) op + + +innerMKLDNN->cpuop2 + + + + +Tensor + + +fetchop + +Fetch op + + +innerMKLDNN->fetchop + + +Tensor + + +cpuop + +Paddle(CPU) op + + +cpuop->TransData + + +Tensor + + +mkldnnop + + +MKL-DNN op + + +mkldnnop->TransData + + + + +MKL-DNN Tensor + + +mkldnnop->innerMKLDNN + + +MKL-DNN Tensor + + + diff --git a/doc/paddle/design/mkldnn/data_transformation/index_en.rst b/doc/paddle/design/mkldnn/data_transformation/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..c383a4183cb1e6da9e782010e35f99d9cbf1ac41 --- /dev/null +++ b/doc/paddle/design/mkldnn/data_transformation/index_en.rst @@ -0,0 +1,7 @@ +MKL-DNN Data Transformation +-------------------------------------- + +.. toctree:: + :maxdepth: 1 + + data_transform.md diff --git a/doc/paddle/design/mkldnn/data_transformation/scripts/data_transform.dot b/doc/paddle/design/mkldnn/data_transformation/scripts/data_transform.dot new file mode 100644 index 0000000000000000000000000000000000000000..5e0a03bf7c71106202d8c7094829fec0d9e13875 --- /dev/null +++ b/doc/paddle/design/mkldnn/data_transformation/scripts/data_transform.dot @@ -0,0 +1,46 @@ + +digraph Q { + + rankdir=LR + node[shape=box] + + TransData[label="TransformData"] + toMKLDNN[label="Label Tensor as MKL-DNN"] + fromMKLDNN[label="TransDataLayoutFromMKLDNN"] + innerMKLDNN[label= {MKL-DNN Reorder}>] + + + node[shape=circle] + + subgraph cluster_in { + label=i order operator types> + style=dotted + cpuop[label="Paddle(CPU) op",style=bold] + mkldnnop[label="MKL-DNN op",shape=doublecircle] + } + + subgraph cluster_out { + label=i+1 order operator types> + style=dotted + cpuop2[label="Paddle(CPU) op",shape=doublecircle] + fetchop[label="Fetch op"] + mkldnnop2[label="MKL-DNN op", style=bold] + } + + + cpuop -> TransData[label="Tensor ", style=bold] + mkldnnop -> TransData[label="MKL-DNN Tensor ", color="black:invis:black"] + mkldnnop -> innerMKLDNN[label="MKL-DNN Tensor"] + toMKLDNN -> mkldnnop2[style=bold, label="MKL-DNN Tensor "] + innerMKLDNN -> cpuop2[label="Tensor ", color="black:invis:black"] + innerMKLDNN -> fetchop[label="Tensor "] + subgraph cluster_Trans { + label="Transformation of Tensors" + style=dotted + TransData -> toMKLDNN[style=bold] + TransData -> fromMKLDNN[color="black:invis:black"] + fromMKLDNN -> innerMKLDNN[color="black:invis:black"] + + } +} + diff --git a/doc/paddle/design/mkldnn/inplace/images/inplace.svg b/doc/paddle/design/mkldnn/inplace/images/inplace.svg new file mode 100644 index 0000000000000000000000000000000000000000..69439d30643fd9b40e74498a3b94401413c5c181 --- /dev/null +++ b/doc/paddle/design/mkldnn/inplace/images/inplace.svg @@ -0,0 +1,120 @@ + + + + + + +G + + +cluster_0 + +in-placed + + + +e1 + +relu + + + +b + +b + + + +e1->b + + + + + +e2 + +elementwise_add + + + +e + +b + + + +e2->e + + + + + +e3 + +elementwise_mul + + + +g + +g + + + +e3->g + + + + + +a + +a + + + +a->e1 + + + + + +b->e2 + + + + + +e->e3 + + + + + +d + +d + + + +d->e2 + + + + + +f + +f + + + +f->e3 + + + + + diff --git a/doc/paddle/design/mkldnn/inplace/images/multi-output-inplace.svg b/doc/paddle/design/mkldnn/inplace/images/multi-output-inplace.svg new file mode 100644 index 0000000000000000000000000000000000000000..c046424e5197498689b3db1e776415fca32e5033 --- /dev/null +++ b/doc/paddle/design/mkldnn/inplace/images/multi-output-inplace.svg @@ -0,0 +1,265 @@ + + + + + + +G + + +cluster_before + +before + + +cluster_0 + +to be in-placed + + +cluster_after + +after + + +cluster_0b + +applied in-placed + + + +op1 + +elementwise_add + + + +c + +c + + + +op1->c + + + + + +op2 + +top_k +inputs_vars{c} + + + +d + +d + + + +op2->d + + + + + +e + +e + + + +op2->e + + + + + +op3 + +top_k +inputs_vars{c} + + + +g + +g + + + +op3->g + + + + + +h + +h + + + +op3->h + + + + + +c->op2 + + + + + +c->op3 + + + + + +a + +a + + + +a->op1 + + + + + +b + +b + + + +b->op1 + + + + + +op1b + +elementwise_add + + + +cb + +a + + + +op1b->cb + + + + + +op2b + +top_k +input_vars{a} + + + +db + +d + + + +op2b->db + + + + + +eb + +e + + + +op2b->eb + + + + + +op3b + +top_k +input_vars{a} + + + +gb + +g + + + +op3b->gb + + + + + +hb + +h + + + +op3b->hb + + + + + +cb->op2b + + + + + +cb->op3b + + + + + +ab + +a + + + +ab->op1b + + + + + +bb + +b + + + +bb->op1b + + + + + diff --git a/doc/paddle/design/mkldnn/inplace/images/unwanted-inplace.svg b/doc/paddle/design/mkldnn/inplace/images/unwanted-inplace.svg new file mode 100644 index 0000000000000000000000000000000000000000..2ee14e458f630021559996dc63209793cd7400a2 --- /dev/null +++ b/doc/paddle/design/mkldnn/inplace/images/unwanted-inplace.svg @@ -0,0 +1,74 @@ + + + + + + +G + + +cluster_0 + +in-placed + + + +e1 + +softmax +<oneDNN> + + + +c + +b + + + +e1->c + + + + + +e2 + +layer_norm +<Paddle CPU> + + + +e + +a + + + +e2->e + + + + + +c->e2 + + + + + +a + +a + + + +a->e1 + + + + + diff --git a/doc/paddle/design/mkldnn/inplace/index_en.rst b/doc/paddle/design/mkldnn/inplace/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..7c9c2f838e7f5138d4ed3e2157aa9c0a4de81f5d --- /dev/null +++ b/doc/paddle/design/mkldnn/inplace/index_en.rst @@ -0,0 +1,7 @@ +MKL-DNN IN-PLACE execution support +-------------------------------------- + +.. toctree:: + :maxdepth: 1 + + inplace.md diff --git a/doc/paddle/design/mkldnn/inplace/inplace.md b/doc/paddle/design/mkldnn/inplace/inplace.md new file mode 100644 index 0000000000000000000000000000000000000000..ccad26878661cea915b6a7157c0ade62dc99ca4c --- /dev/null +++ b/doc/paddle/design/mkldnn/inplace/inplace.md @@ -0,0 +1,97 @@ +## Introduction + +PaddlePaddle is implementing concept of in-place execution of some of operators. +The idea of in-place execution is present on following picture: + +![](images/inplace.svg) + +Exemplary graph presents three operators where one of them (type of elementwise_add) is to be performing in-place computation. In-place computation means that input variable (Tensor) is used for both input and output. This means that one of inputs will be overwritten with computational results. In presented picture in-place operator (elementwise_add) is +having two input nodes: *b* and *d* and output *b*. So *b* is used for input and output and underneath it is represented by a one, shared Tensor. So this means that variable *b* is initially holding some input data and after the operator computation, input data is lost and replaced by computation's result. + +Currently assumption is that if operator can have in-place processing then all its kernel (including oneDNN) should be able to work properly in in-place mode. To match this functionality oneDNN integration was extended to support in-place execution for some of its operators: +- activations +- softmax +- elementwise_add +- gelu* +- sum** + +Adventages of in-place computation are: +* lower memory usage +* improved performance of operators + +To have in-place computation, we need to analyze graph to search for where in-place execution could happen +and then make some of variables to be shared by input and output of in-place capable operator. + +Hence there are two parts of in-place support: +- in-place execution support within an operator +- oneDNN inplace C-API pass + +#### in-place execution support within an operator +For in-place execution, oneDNN primitive needs to have the same oneDNN memory object passed as input (src) and output (dst). More precisely, we check if pointers to allocated buffers are the same for input and output +and this indicates if we use one oneDNN memory object or two. For example: + +`auto src_memory_p = handler.AcquireSrcMemory(x);` + +`auto dst_memory_p = x->IsSharedBufferWith(*y) ? + src_memory_p : handler.AcquireDstMemory(y);` + +#### oneDNN in-place pass +As mentioned earlier, idea of in-place pass is to locate operators with oneDNN kerenels that can perform in-place execution and then modify output node's variables to match input node's variable of the operator. + +##### Identifying operators with oneDNN kernels capable of in-place execution +This identification is a result of two checks: +- Whether operator does have *inplaceInferer* structure +- Whether operator is on a list of oneDNN's in-place supported operators + +*InplaceInferer* is a struct that declares a mapping (one of inputs to one of outputs) indicating that +considered operator can perform in-place execution and both vars (mentioned input and output in *InplaceInferer*) will +share a tensor. This is not enough for oneDNN in-place C-API execution as oneDNN library may not provide in-place +computation for all required (to have in-place execution) operators of PaddlePaddle and some of operators would have to +simulate in-place computation through the external buffer which would not bring any benefits, so there is no point enabling those in-place computations for C-API inference. + +##### Restrictions +oneDNN in-place pass is taking advantage of graph pattern detector. So pattern consists of: +Node (Var 1) -> Node (oneDNN Op to be inplaced) -> Node (Var2) -> Node (next op - any type, oneDNN/native CPU - after in-placed one) -> Node (Var3) +Pattern is restricted so that in-placed to be op is of oneDNN type. Due to fact that some operators have +more than one input and their output may be consumed by more than one operator it is expected that pattern +maybe detected multiple times for the same operator e.g. once for one input, then for second input etc.. + +Just having oneDNN operator capable of in-place is not enough to have in-place execution enabled, hence follwing rules +are checked by oneDNN in-place pass: +1. If input node to in-place operator is also an input to different operator, then in-place computation cannot be performed, as there is a risk that other operator consuming in-placed op operator will be executed after in-placed operator and therefore get invalid input data (overwritten by in-place computation). +2. If after in-placed operator there is another operator that is reusing in-place op's input var then in-place cannot happen unless next op can perform in-place computation. Next picture presents the idea. + +![](images/unwanted-inplace.svg) + +In the picture we are seeing that in-place pass is considering to enable in-place execution for softmax oneDNN kernel. All is fine, but next operator after softmax is layer norm (non-oneDNN). Layer norm is already reusing input of softmax due to some earlier memory optimization pass being applied. If we make softmax op to perform in-place computation, then +it will also make layer norm to work in-place (b -> a). The thing is that layer norm cannot work in-place (InplaceInferer is not present), so if we force it do so layer norm will produce invalid result. + +##### In-place pass modification to graph when applied + +When sub-graph is aligned with restrictions then in-place computation can be enabled. This is done by: +1. Changing the name of output node of in-place op to be match input node of in-place op. +2. Renaming output var in output lists of node representing operator. +3. Changing the name of input var in next op inputs list. +4. If next Op is performing in-place computation then we need to updated next op's output as well not to break its + in-place computation. +5. if there are multiple operators after our in-place operator then we need to update all of them (their input vars). Idea is presented in the following picture: + +![](images/multi-output-inplace.svg) + +We can see that there are two *top_k* operators after *elementwise_add* operator that is set to work in-placed. Each of *top_k* is having its own list of input vars, so we need to rename relevant input var to new name. As in-place pattern +consists of: input node -> in-place op -> output node -> next op -> next op's output. For presented graph, there will be 8 patterns detected: +- b -> elementwise_add -> c -> top_k (left one) -> d +- b -> elementwise_add -> c -> top_k (left one) -> e +- b -> elementwise_add -> c -> top_k (right one) -> g +- b -> elementwise_add -> c -> top_k (right one) -> h +- a -> elementwise_add -> c -> top_k (left one) -> d +- a -> elementwise_add -> c -> top_k (left one) -> e +- a -> elementwise_add -> c -> top_k (right one) -> g +- a -> elementwise_add -> c -> top_k (right one) -> h + +Important thing is to remember original name of output, before it is renamed, so later we can +replace this original name in all of next op instances. + +\* oneDNN gelu kernel is able to perform in-place execution, but currently gelu op does not support in-place execution. + +\*\* sum kernel is using oneDNN sum primitive that does not provide in-place exection, so in-place computation is done faked through external buffer. So it was not added into oneDNN inplace pass. diff --git a/doc/paddle/design/mkldnn/inplace/scripts/inplace.dot b/doc/paddle/design/mkldnn/inplace/scripts/inplace.dot new file mode 100644 index 0000000000000000000000000000000000000000..7a5d22bb2922d6977e23c7dca30676d848180bd4 --- /dev/null +++ b/doc/paddle/design/mkldnn/inplace/scripts/inplace.dot @@ -0,0 +1,23 @@ +digraph G { + overlap=false + e1[label="relu"] + e2[label="elementwise_add"] + e3[label="elementwise_mul"] + + a -> e1 + e1 -> b + b[label="b"] + e[label="b"] + + subgraph cluster_0 { + label="in-placed" + b -> e2 + d -> e2 + e2 -> e + } + + + e -> e3 + f -> e3 -> g + +} diff --git a/doc/paddle/design/mkldnn/inplace/scripts/multi-output-inplace.dot b/doc/paddle/design/mkldnn/inplace/scripts/multi-output-inplace.dot new file mode 100644 index 0000000000000000000000000000000000000000..9778f5f18726422befc06125b1030facc7fb22a2 --- /dev/null +++ b/doc/paddle/design/mkldnn/inplace/scripts/multi-output-inplace.dot @@ -0,0 +1,63 @@ +digraph G { +subgraph cluster_before { + label="before" + style=dotted + op1[label="elementwise_add"] + op2[label="top_k\ninputs_vars{c}"] + op3[label="top_k\ninputs_vars{c}"] + + + c[label="c"] + + subgraph cluster_0 { + style=solid + label="to be in-placed" + a -> op1 + b-> op1 + op1 -> c + } + + + c -> op2 + c -> op3 + + op2 -> d + op2 -> e + op3 -> g + op3 -> h +} +subgraph cluster_after { + label="after" + style=dotted + op1b[label="elementwise_add"] + op2b[label="top_k\ninput_vars{a}"] + op3b[label="top_k\ninput_vars{a}"] + + + cb[label="a"] + ab[label="a"] + bb[label="b"] + db[label="d"] + eb[label="e"] + gb[label="g"] + hb[label="h"] + + subgraph cluster_0b { + style=solid + label="applied in-placed" + ab -> op1b + bb-> op1b + op1b -> cb + } + + + cb -> op2b + cb -> op3b + + op2b -> db + op2b -> eb + op3b -> gb + op3b -> hb +} + +} diff --git a/doc/paddle/design/mkldnn/inplace/scripts/unwanted-inplace.dot b/doc/paddle/design/mkldnn/inplace/scripts/unwanted-inplace.dot new file mode 100644 index 0000000000000000000000000000000000000000..a772736c31fb6a520c8e2761326550211a2edf54 --- /dev/null +++ b/doc/paddle/design/mkldnn/inplace/scripts/unwanted-inplace.dot @@ -0,0 +1,17 @@ +digraph G { + + e1[label="softmax\n"] + e2[label="layer_norm\n"] + + c[label="b"] + e[label="a"] + subgraph cluster_0 { + label="in-placed" + a -> e1 + e1 -> c + } + + c -> e2 + e2 -> e + +} diff --git a/doc/paddle/design/mkldnn/nhwc/images/nhwc-grad.svg b/doc/paddle/design/mkldnn/nhwc/images/nhwc-grad.svg new file mode 100644 index 0000000000000000000000000000000000000000..efc69b1cd11a639842cc59742a5f270564d52f62 --- /dev/null +++ b/doc/paddle/design/mkldnn/nhwc/images/nhwc-grad.svg @@ -0,0 +1,226 @@ + + + + + + +G + + +feed_op + +Feed Op +Paddle + + +tensor_feed + +Tensor feed_op +layout=kNCHW +NHWC dim format + + +feed_op->tensor_feed + + + + +fetch_op + +Fetch Op +Paddle + + +mean_op + +Mean Op +Paddle + + +tensor_output_mean + +Tensor mean op +layout=kNHWC +NHWC dim format + + +mean_op->tensor_output_mean + + + + +mean_grad_op + +Mean Grad Op +Paddle + + +tensor_mean_grad + +Tensor mean grad op +layout=kNHWC +NHWC dim format + + +mean_grad_op->tensor_mean_grad + + + + +conv_mkldnn + +Conv Op +data_format=NHWC +MKL-DNN + + +tensor_mkldnn + +Tensor conv op +layout=kMKLDNN +NCHW dim format + + +conv_mkldnn->tensor_mkldnn + + + + +conv_grad_mkldnn + +Conv Grad Op +data_format=NHWC +MKL-DNN + + +tensor_conv_grad_mkldnn + +Tensor Conv Grad op +layout=kMKLDNN +NCHW dim format + + +conv_grad_mkldnn->tensor_conv_grad_mkldnn + + + + +pool_mkldnn + +Pool Op +data_format=NHWC +MKL-DNN + + +tensor_mkldnn2 + +Tensor pool op +layout=kMKLDNN +NCHW dim format + + +pool_mkldnn->tensor_mkldnn2 + + + + +pool_grad_mkldnn + +Pool Grad Op +data_format=NHWC +MKL-DNN + + +tensor_pool_grad_mkldnn + +Tensor Pool Grad op +layout=kMKLDNN +NCHW dim format + + +pool_grad_mkldnn->tensor_pool_grad_mkldnn + + + + +tensor_mkldnn->pool_mkldnn + + + + +tensor_pool_grad_mkldnn->conv_grad_mkldnn + + + + +tensor_conv_grad_mkldnn->fetch_op + + + + +tensor_input_mean + +Tensor pool op +layout=kNHWC +NHWC dim format + + +tensor_mkldnn2->tensor_input_mean + + + + +tensor_mean_grad_mkldnn + +Tensor Grad mean +layout=kMKLDNN +NCHW dim format + + +tensor_mean_grad_mkldnn->pool_grad_mkldnn + + + + +tensor_input_mean->mean_op + + + + +tensor_input_mean->tensor_mean_grad + + +InferShape + + +tensor_output_mean->mean_grad_op + + + + +tensor_mean_grad->tensor_mean_grad_mkldnn + + + + +tensor_feed2 + +Tensor feed_op +layout=kMKLDNN +NCHW dim format + + +tensor_feed->tensor_feed2 + + + + +tensor_feed2->conv_mkldnn + + + + + diff --git a/doc/paddle/design/mkldnn/nhwc/images/nhwc.svg b/doc/paddle/design/mkldnn/nhwc/images/nhwc.svg new file mode 100644 index 0000000000000000000000000000000000000000..85eed3e7eddebbe92033444f9a4d8ea662f62854 --- /dev/null +++ b/doc/paddle/design/mkldnn/nhwc/images/nhwc.svg @@ -0,0 +1,151 @@ + + + + + + +G + + +feed_op + +Feed Op (Input signal) +Paddle + + +input_feed + +Tensor Input signal +layout=kNCHW +NHWC + dim format + + +feed_op->input_feed + + + + +feed_op2 + +Feed Op (Filter data) +Paddle + + +filter_feed + +Tensor filter data +layout=kNCHW +NCHW dim format + + +feed_op2->filter_feed + + + + +fetch_op + +Fetch Op +Paddle + + +tensor_fetch + +Tensor fetch_op +layout=kNCHW +NHWC + dim format + + +fetch_op->tensor_fetch + + + + +conv_mkldnn + +conv Op +data_format=NHWC +MKL-DNN + + +tensor_mkldnn + +Tensor conv op +layout=kMKLDNN +NCHW dim format + + +conv_mkldnn->tensor_mkldnn + + + + +pool_mkldnn + +pool Op +data_format=NHWC +MKL-DNN + + +tensor_mkldnn2 + +Tensor conv op +layout=kMKLDNN +NCHW dim format + + +pool_mkldnn->tensor_mkldnn2 + + + + +tensor_mkldnn->pool_mkldnn + + + + +tensor_mkldnn2->fetch_op + + + + +input_feed2 + +Tensor Input signal +layout=kMKLDNN +NCHW dim format + + +input_feed->input_feed2 + + + + +input_feed2->conv_mkldnn + + + + +filter_feed2 + +Tensor filter data +layout=kMKLDNN +NCHW dim format + + +filter_feed->filter_feed2 + + + + +filter_feed2->conv_mkldnn + + + + + diff --git a/doc/paddle/design/mkldnn/nhwc/index_en.rst b/doc/paddle/design/mkldnn/nhwc/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..24bd3544a5d6f0ea4a109c8d0ac2627548e12527 --- /dev/null +++ b/doc/paddle/design/mkldnn/nhwc/index_en.rst @@ -0,0 +1,7 @@ +MKL-DNN NHWC support +-------------------------------------- + +.. toctree:: + :maxdepth: 1 + + nhwc.md diff --git a/doc/paddle/design/mkldnn/nhwc/nhwc.md b/doc/paddle/design/mkldnn/nhwc/nhwc.md new file mode 100644 index 0000000000000000000000000000000000000000..19fca60d20e5eca6f6d1f8d26bda86cc1f5ba38a --- /dev/null +++ b/doc/paddle/design/mkldnn/nhwc/nhwc.md @@ -0,0 +1,82 @@ +# Design Doc: MKL-DNN NHWC support + +This document describes design & implementation of ``NHWC`` models using MKL-DNN engine. For overall +description of Tensors interoperability among Paddle and MKL-DNN Tensors please follow relevant [document](../data_transformation/data_transform.md) + +### Introduction + +PaddlePaddle does support execution of program/model using ``NCHW`` as well as ``NHWC`` data arrangement. Reasons for introducing data arrangements are: +* Execution performance of some of non MKL-DNN operators in ``NHWC`` may be faster that when ``NCHW`` data arrangement is used +* Convenience of use as sometimes user got his data prepared already in ``NHWC`` data arrangement. + +Choice among ``NCHW`` and ``NHWC`` is controlled with ``data_format`` attributes following operators: +* conv +* conv transposed +* pool +* LRN +* batch norm + +Other operators (those without data_format) are implemented so that they execute properly regardless the layout, for example elementwise operations. + +Having operators to control what layout (data arrangement) input and output data of operators is, allow +in theory to specify models that partially to work on ``NCHW`` and partially to work on ``NHWC`` data arrangement.However it was agreed on that given model will only have one type of data arrangement at during it execution. +Hence either all ``data_format`` attributes are set to ``NCHW`` (default) or to ``NHWC``, there is no support for having some operators having ``data_format`` set to ``NCHW`` while some others to ``NHWC``. + + +Another element to consider is that PaddlePaddle ``NHWC`` data layout as supported by non-MKLDNN CPU implementations is that ``NHWC`` data arrangement is only applicable to input signal e.g. parameters of listed operators are +always using ``NCHW`` PaddlePaddle layout. + +Final element is that PaddlePaddle data layout change how shape of data looks like. For example ``NCHW`` data shape of [2, 3, 4, 5] when being transformed to ``NHWC`` data will have a shape of [2, 4, 5, 3]. This is different from MKL-DNN shape description which is always ``NCHW`` order even if data underneath is ``NHWC``, ``NCHW16C`` or other. + +### Architecture of ``NHWC`` support in MKL-DNN integration + +Initially a request of ``NHWC`` and ``NCHW`` execution of program were implemented explicitly, e.g. by having MKL-DNN working on that selected data arrangement. This was proved to be very inefficient in terms of performance, as +performance-wise MKL-DNN is designed to work on data arrangements of its own choice (for example blocked formats ``NCHW16C``, ``NCHW8C`` etc.) rather than forcing MKL-DNN to use ``NHWC`` or ``NCHW`` data layout. + +Current solution is that MKL-DNN kernels are working on data layout best suitable for their performance, but +when upon completion of final MKL-DNN operator there has to be conversion (reorder) to either ``NCHW`` or ``NHWC`` Paddle data arrangement. Important note is that last operator executing MKL-DNN kernel may not have a ``data_format`` attribute hence there is need to store information on to what PaddlePaddle layout to convert to from MKL-DNN layouts. For this purpose We have global variable kept per thread (Thread Local Storage). + +To address the difference with shape description mechanism for shape transformation was added : *platform::MatchShapeToLayout()* which perform needed shape modification upon entering and exiting MKL-DNN execution thread of operators. + +Described architecture applied to simple execution of ``NHWC`` model that consists of convolution followed by pooling is presented in the following picture: + +![](images/nhwc.svg) + + +#### Notes on ``NHWC`` grad ops support + +Corresponding grad MKL-DNN kernels of operators listed at the beginning of this document, also are supporting +``NHWC`` models execution. + +All design concepts described in previous section do apply to MKL-DNN grad operators as well. However there +is also one additional element. Some grad operators like *mean* are inferring shape of their output, based on +shape of data produced during forward. In that situation kernel is actually having no need to operate on actual data, as only shape is needed to infer grad output's shape. In this scenario originally there was no data transformation of given variable, hence in particular no changing of shape of Tensor happened. This could result in having wrong shape send to *InferShape* of Grad op. This behaviour was modified to create dummy Variable that carries the shape of data in expected by grad operator, paddle format. + +Described situation is presented in the following picture: +![](images/nhwc-grad.svg) + +### Implementation guidelines + +Instead of modifying each MKL-DNN operator to match described architecture design, common code was modified which consists of modifications to: +* data transformation code +* *InferShape* of each operator supporting ``data_format`` attribute. +* Also each operator was added overloading of *GetKernelTypeForVar* method. + +Hence when enabling any operator to have ``NHWC`` data arrangement supported we need to extend *InferShape* and *GetKernelTypeForVar* + +#### *InferShape()* modifications +This modification is related to fact that MKL-DNN kernel does operate on data with shape described in ``NCHW`` +order, hence We need to make sure that even if ``data_format`` is having value ``NHWC`` still ``Infershape`` will work on ``NCHW`` order. + +Snippet from *PoolOp::InferShape()* that illustrated the idea of modifications to *InferShape*: + + // MKL-DNN Kernels are using NCHW order of dims description + // so we ignore data_format consideration for MKL-DNN kernel + const bool channel_last = (this->IsMKLDNNType() == false) && + (data_format == "NHWC" || data_format == "NDHWC"); + +#### *GetKernelTypeForVar()* overloading + +When performing data transformation we need a value of ``data_format`` and this value is acquired +inside of *GetKernelTypeForVar()* and based on that *data_layout* of Kernel Type is set, to be later +used by data transformation code. diff --git a/doc/paddle/design/mkldnn/nhwc/scripts/nhwc-grad.dot b/doc/paddle/design/mkldnn/nhwc/scripts/nhwc-grad.dot new file mode 100644 index 0000000000000000000000000000000000000000..cceb8f6ccd322f545612e3eba9d00a55a3d77cff --- /dev/null +++ b/doc/paddle/design/mkldnn/nhwc/scripts/nhwc-grad.dot @@ -0,0 +1,45 @@ +digraph G { +splines=ortho +rankdir=LR +feed_op[shape=circle,label="Feed Op\n\nPaddle"] +fetch_op[shape=circle,label="Fetch Op\n\nPaddle"] +mean_op[shape=circle,label="Mean Op\n\nPaddle"] +mean_grad_op[shape=circle,label="Mean Grad Op\n\nPaddle"] +conv_mkldnn[shape=circle,label="Conv Op\ndata_format=NHWC\n\nMKL-DNN"]; +conv_grad_mkldnn[shape=circle,label="Conv Grad Op\ndata_format=NHWC\n\nMKL-DNN"]; +pool_mkldnn[shape=circle,label="Pool Op\ndata_format=NHWC\n\nMKL-DNN"]; +pool_grad_mkldnn[shape=circle,label="Pool Grad Op\ndata_format=NHWC\n\nMKL-DNN"]; +tensor_mkldnn[shape=rectangle,label="Tensor conv op\nlayout=kMKLDNN\nNCHW dim format"] +tensor_pool_grad_mkldnn[shape=rectangle,label="Tensor Pool Grad op\nlayout=kMKLDNN\nNCHW dim format"] +tensor_conv_grad_mkldnn[shape=rectangle,label="Tensor Conv Grad op\nlayout=kMKLDNN\nNCHW dim format"] +tensor_mkldnn2[shape=rectangle,label="Tensor pool op\nlayout=kMKLDNN\nNCHW dim format"] +//tensor_input_data[shape=rectangle,label="Input Data\nlayout=kNCHW\nNCHW dim format"] +tensor_mean_grad_mkldnn[shape=rectangle,label="Tensor Grad mean\nlayout=kMKLDNN\nNCHW dim format"] +//tensor_fetch[shape=rectangle,label="Tensor Fetch_op\nlayout=kNCHW\nNHWC dim format"] +tensor_input_mean[shape=rectangle,label="Tensor pool op\nlayout=kNHWC\nNHWC dim format"] +tensor_output_mean[shape=rectangle,label="Tensor mean op\nlayout=kNHWC\nNHWC dim format"] +tensor_mean_grad[shape=rectangle,label="Tensor mean grad op\nlayout=kNHWC\nNHWC dim format"] +tensor_feed[shape=rectangle,label="Tensor feed_op\nlayout=kNCHW\nNHWC dim format"] +tensor_feed2[shape=rectangle,label="Tensor feed_op\nlayout=kMKLDNN\nNCHW dim format"] + +feed_op -> tensor_feed -> tensor_feed2 -> conv_mkldnn -> tensor_mkldnn -> pool_mkldnn -> tensor_mkldnn2 -> tensor_input_mean -> mean_op + +mean_op -> tensor_output_mean + +tensor_input_mean -> tensor_mean_grad[xlabel="InferShape", style=dashed] + +tensor_output_mean -> mean_grad_op + +mean_grad_op -> tensor_mean_grad -> tensor_mean_grad_mkldnn -> pool_grad_mkldnn -> tensor_pool_grad_mkldnn -> conv_grad_mkldnn -> tensor_conv_grad_mkldnn -> fetch_op + +{rank="same" mean_op; mean_grad_op; tensor_output_mean} +{rank="same" pool_mkldnn; pool_grad_mkldnn} +{rank="same" conv_mkldnn; conv_grad_mkldnn} +{rank="same" tensor_mean_grad; tensor_input_mean} +{rank="same" tensor_mkldnn2; tensor_mean_grad_mkldnn} +{rank="same" tensor_pool_grad_mkldnn; tensor_mkldnn} +{rank="same" tensor_conv_grad_mkldnn; tensor_feed} +{rank="same" fetch_op; feed_op} + +//tensor_mkldnn2 -> tensor_mean_grad[label="Infer shape"] +} diff --git a/doc/paddle/design/mkldnn/nhwc/scripts/nhwc.dot b/doc/paddle/design/mkldnn/nhwc/scripts/nhwc.dot new file mode 100644 index 0000000000000000000000000000000000000000..dee0df615a14b739613aea9c3478e3abb4a627a3 --- /dev/null +++ b/doc/paddle/design/mkldnn/nhwc/scripts/nhwc.dot @@ -0,0 +1,17 @@ +digraph G { +rankdir=LR +feed_op[shape=circle,label="Feed Op (Input signal)\n\nPaddle"] +feed_op2[shape=circle,label="Feed Op (Filter data)\n\nPaddle"] +fetch_op[shape=circle,label="Fetch Op\n\nPaddle"] +conv_mkldnn[shape=circle,label="conv Op\ndata_format=NHWC\n\nMKL-DNN"]; +pool_mkldnn[shape=circle,label="pool Op\ndata_format=NHWC\n\nMKL-DNN"]; +tensor_mkldnn[shape=rectangle,label="Tensor conv op\nlayout=kMKLDNN\nNCHW dim format"] +tensor_mkldnn2[shape=rectangle,label="Tensor conv op\nlayout=kMKLDNN\nNCHW dim format"] +tensor_fetch[shape=rectangle,label=layout=kNCHW
NHWC dim format>] +input_feed[shape=rectangle,label=layout=kNCHW
NHWC dim format>] +input_feed2[shape=rectangle,label="Tensor Input signal\nlayout=kMKLDNN\nNCHW dim format"] +filter_feed[shape=rectangle,label="Tensor filter data\nlayout=kNCHW\nNCHW dim format"] +filter_feed2[shape=rectangle,label="Tensor filter data\nlayout=kMKLDNN\nNCHW dim format"] +feed_op -> input_feed -> input_feed2 -> conv_mkldnn -> tensor_mkldnn -> pool_mkldnn -> tensor_mkldnn2 -> fetch_op -> tensor_fetch +feed_op2 -> filter_feed -> filter_feed2 -> conv_mkldnn +} diff --git a/doc/paddle/design/modules/backward.md b/doc/paddle/design/modules/backward.md new file mode 100644 index 0000000000000000000000000000000000000000..397fea5ce14c9e32fcaa2cfab3019e07c2df0979 --- /dev/null +++ b/doc/paddle/design/modules/backward.md @@ -0,0 +1,158 @@ +# Backward Building + +## Motivation + +In Neural Network, most models are solved by the backpropagation algorithm(known as **BP**) at present. Technically, BP calculates the gradient of the loss function, then propagates it back through the networks following the chain rule. However, when configuring the model structure, users do not need to define the backward part. So a mechanism is required by the framework which can complete the model's backward part automatically according to the given forward part. + +When implementing a specific `op`, the developer is also asked to implement its backward version, called `grad_op`. A `grad_op` takes gradients of its corresponding `op`'s outputs, and calculate gradients of the `op`'s inputs. During the building of a model's backward part, the framework creates each forward `op`'s `grad_op`, and then string them together in reverse order of forwarding part. In this way, gradients spread from the end to the beginning of the model, in another word, from the loss to parameters. + +## Challenges + +The motivation of backward building is apparent. However, implementation it correctly is not so easy. In the **Fluid** design, a deep learning model is described by `Program`, `Block`, `Op` and `Variable`. The `Block` itself can be nested. It means that the `op`s and `variable`s are scattered across different blocks rather than all be gathered in a single graph. Our backward building algorithm shall visit blocks in recursive order and be able to insert `grad_op`s and new created `variable`s into the right place. + +## Usage + +Although the whole algorithm is comprised of many functions, only one is exposed as API: + +```python +def append_backward(loss, parameter_list=None, no_grad_set=None): + """ + Append backward part to main_program + + Args: + loss(Variable): The variable generated by the cost function. + parameter_list(list): Parameters that need to be updated by optimizers. + If None, it means all parameters need to be updated. + + no_grad_set(set): Variables that have no gradients in Block 0. + If None, the set will be generated inside the function and + contains all variables with `step_gradient=True` from all blocks. + + Return: + (list[Variable]): list of (parameters, gradients) pair. + """ +``` + +By invoking this API, the framework appends backward part of the program where the `loss` is. It takes three arguments. `loss` means the final loss value. It must be a scalar and is usually the output of the loss layer. It is also where the gradient generated and backpropagation starts. `parameter_list` marks all parameters needs updating. If it's `None`, all parameter will be updated by optimizers. `no_grad_set` marks variables without gradient. if all outputs of some `grad_op` are in `no_grad_set`, the `grad_op` will not be run. + +This API will be invoked automatically before optimizer building. +As a result, in most cases, users do not need to invoke the API by themselves to append backward part. + +## Implementation + +The implementation of backward building algorithm is in `backward.py` file. The whole algorithm can be divided into two independent parts: creating `grad_op`s and creating new variables. + +### Creating `grad_op`s + +The creating of `grad_op`s is implemented by: + +```python +def _append_backward_ops_(target, + block, + target_block, + no_grad_dict, + grad_to_var): + """ + Create all grad ops, and insert them into given block + + Args: + target(Variable): the target variable of forward pass + block(Block): the block where forward ops are + target_block(Block): the block which is going to hold new generated grad ops + no_grad_dict(dict): + key(int) block index + val(set) a set of varibale names. These varibales have no gradient + grad_to_var(dict)(output argument): + key(str): grad variable name + val(str): corresponding forward variable name + """ +``` + +Given a `block`, the function will traverses all `op`s in this block in reverse order, gets corresponding `grad_op` from the C++ core via `core.get_grad_op_desc()`, then append it to `target_block`. + +However, some specific `op`(e.g. `while_op`, `if_else_op`) can hold its own sub-block. For these sub-blocks contains `op`s as well, the `grad_op` creating should be recursive. + +During the reverse traversal, we check each `op` whether it has an attribute named `sub_block`. If so, it means there is a sub-block and we need to deal with it first. After creating a new block whose father is the one in `op`'s attribute, we invoke `_append_backward_ops_()` recursively, assigning the new block to parameter `target_block` and the one in `op`'s attribute to `block`. The *pseudo-code* shows this process: + +``` +******* pseudo-code ******** +for op in reversed(block.ops): + if op has an attribute named 'sub_block': + Get the sub-block(`s_block`) from op's attribute. + Create a new block(`grad_s_block`), whose father is `s_block`. + Invoke _append_backward_ops_(), with `block=s_block` and `target_block=grad_s_block` + + Invoke `core.get_grad_op_desc()` to get op's grad_op. + Insert name correspondings between variables and their gradients of the grad_op to grad_to_var + Assign grad_s_block to grad_op as it's 'sub_block' attribute. + Append grad_op to current target_block. +``` + +The first invoking of `_append_backward_ops_()` is initiated by `append_backward()`, in which parameters `block` and `target_block` are all assigned with root block(the block with index 0). + +### Corner Cases of `grad_op` Creating + +In the previous section, we show the regular process of `grad_op` creating. However, in some corner cases, the conventional algorithm is not enough to get the correct result and appending handling is required. These additional processes run after the algorithm mentioned above and do some special adjusts on its output `grad_op`s. + +#### Shared Variables + +If a variable is read by more than one `op` in the forward pass, its gradient is likely to be written by more than one `grad_op`s in the next backward pass. To make the gradient result being the sum of all `grad_op`s' outputs instead of the last running one, we assign each output with a temporary variable and then add a `sum_op` to add them up. + +For the debug convenience, if the final gradient name is `w@GRAD`, it's corresponding temporary variables will be named as `w@GRAD@RENAME@0`, `w@GRAD@RENAME@1`... + +See function `_addup_repetitive_outputs_` in `backward.py` for implementation details. + +#### No Gradient Variables + +In our framework, variables can be marked as *no_gradient*, it means that the gradient of this variable is unnecessary and can be considered as zero in model training. Apparently, when all the outputs of some `grad_op` are marked as *no_gradient*, the `grad_op` itself can be skipped in backward pass. + +Another situation is all the gradient inputs of some `grad_op` are marked as *no_gradient*, which means all of them can be considered as zeros. For `grad_op`s are in essence the propagation of gradients, all the outputs are definitely zeros when all gradient inputs are zeros. Therefore the `grad_op` can also be skipped. + +It should be noted that all these zero gradients still need to be creating and initialized by something, otherwise following `grad_op`s who take these gradients as inputs take the risk of using uninitialized memory. In our code, we employ `fill_zeros_like_op` to initialize them as all zeros. + +This features are implemented in function `_remove_no_grad_branch_`. It checks new created `grad_op`s one-by-one, removes who can be skipped and inserts `fill_zeros_like_op` when its necessary. We can get the `no_grad_set` from the `_append_backward_ops_` argument `no_grad_dict` or generate it on the fly by scanning all variables' `no_gradient` attribute(True or False). + +### Creating Backward Variables + +Up to now, we have completed all creating and adjusting jobs of `grad_op`s. However, backward variables have not been created. Now they are only represented by `grad_op`'s input and output arguments. The backward variable creating job will be done by: + +```python +def _append_backward_vars_(block, + start_op_idx, + grad_to_var, + grad_info_map): + """ + Create new variables required by backward pass. + + Args: + block(Block): the block where new variables will be created + start_op_idx(int): Only variables required by ops in block.ops[start_op_idx : ] will be created + grad_to_var(dict): + key(str): grad variable name + val(str): corresponding forward variable name + In most cases, this dict is generated by _append_backward_ops_() + grad_info_map(dict)(output argument): + key(str): forward variable name + val(tuple): a tuple of (str, int), str is the corresponding grad name, int is the block index + """ +``` + +Given a `block`, this function traverses all the `grad_op`s in it(The argument `start_op_idx` indicates where the grad_op sequence starts.) and creates all the uncreated outputs. The *pseudo-code* shows this process: + +``` +for op in block.ops[start_op_idx : ]: + + if op has an attribute named 'sub_block': + Get the sub-block(`s_block`) from op's attribute. + Invoke _append_backward_vars_(), with `block=s_block` + + for var_name in op.all_output_names(): + if block.has_var_recursive(var_name) or var_name is the name of empty variable: + continue + create a new variable named 'var_name' in block + if grad_to_var.has_key(var_name): + set grad_info_map[grad_to_var[var_name]] as a tuple of (var_name. block) + + do op's var type inference + do op's shape inference +``` diff --git a/doc/paddle/design/modules/batch_norm_op.md b/doc/paddle/design/modules/batch_norm_op.md new file mode 100644 index 0000000000000000000000000000000000000000..a1bb7f709c94583682f481fc093baed003c4e820 --- /dev/null +++ b/doc/paddle/design/modules/batch_norm_op.md @@ -0,0 +1,134 @@ +# Batch Normalization + +## What is batch normalization + +Batch normalization is a frequently-used method in deep network training. It adjusts the mean and variance of a layer's output, and make the data distribution easier for next layer's training. + +The principle of batch normalization can be summarized into a simple function: + +``` +y = (x - E[x]) / STD[x]) * scale + bias +``` + +`x` is a batch of output data of a certain layer. `E[x]` and `STD[x]` is the mean and standard deviation of `x`, respectively。 `scale` and `bias` are two trainable parameters. The training of batch normalization layer equals to the learning of best values of `scale` and `bias`. + +In our design, we use a single operator(`batch_norm_op`) to implement the whole batch normalization in C++, and wrap it as a layer in Python. + +## Differences with normal operators + +`batch_norm_op` is a single operator. However, there are a few differences between `BatchNormOp` and normal operators, which we shall take into consideration in our design. + +1. `batch_norm_op` shall behave differently in training and inferencing. For example, during inferencing, there is no batch data and it's impossible to compute `E[x]` and `STD[x]`, so we have to use an `estimated_mean` and an `estimated_variance` instead of them. These require our framework to be able to inform operators current running type (training/inferencing), then operators can switch their behaviors. + +2. `batch_norm_op` shall have the ability to maintain `estimated_mean` and `estimated_variance` across mini-batch. In each mini-batch, `estimated_mean` is iterated by the following equations: + +``` +if batch_id == 0 + estimated_mean = E[x] +else + estimated_mean = estimated_mean * momentum + (1.0 - momentum_) * E[x] +``` + +The iterating of `estimated_variance` is similar. `momentum` is an attribute, which controls estimated_mean updating speed. + +## Implementation + +Batch normalization is designed as a single operator is C++, and then wrapped as a layer in Python. + +### C++ + +As most C++ operators do, `batch_norm_op` is defined by inputs, outputs, attributes and compute kernels. + +#### Inputs + +- `x`: The inputs data, which is generated by the previous layer. +- `estimated_mean`: The estimated mean of all previous data batches. It is updated in each forward propagation and will be used in inferencing to take the role of `E[x]`. +- `estimated_var`: The estimated standard deviation of all previous data batches. It is updated in each forward propagation and will be used in inferencing to take the role of `STD[x]`. +- `scale`: trainable parameter 'scale' +- `bias`: trainable parameter 'bias' + +#### Outputs + +- `y`: The output data. +- `batch_mean`: The mean value of batch data. +- `batch_var`: The standard deviation value of batch data. +- `saved_mean`: Updated `estimated_mean` with current batch data. It's supposed to share the memory with input `estimated_mean`. +- `saved_var`: Updated `estimated_var` with current batch data. It's supposed to share the memory with input `estimated_var`. + +#### Attributes + +- `is_infer`: *bool*. If true, run `batch_norm_op` in inferencing mode. +- `use_global_est`: *bool*. If true, use `saved_mean` and `saved_var` instead of `E[x]` and `STD[x]` in trainning. +- `epsilon`: *float*. The epsilon value to avoid division by zero. +- `momentum`: *float*. Factor used in `estimated_mean` and `estimated_var` updating. The usage is shown above. + +#### Kernels + +The following graph showes the training computational process of `batch_norm_op`: + + + +cudnn provides APIs to finish the whole series of computation, we can use them in our GPU kernel. + +### Python + +`batch_norm_op` is warpped as a layer in Python: + +```python +def batch_norm_layer(net, + input, + output, + scale, + bias, + use_global_est = False, + epsilon = 1e-6, + momentum = 0.99): + mean_cache = scope.new_var(name = 'estimated_mean', trainable = False) + var_cache = scop.new_var(name = 'estimated_var', trainable = False) + batch_mean = scope.new_var(name = 'batch_mean') + batch_var = scope.new_var(name = 'batch_var') + batch_norm_op = Operator('batch_norm_op', + x = input, + estimated_mean = mean_cache, + estimated_mean = var_cache, + scale = scale, + bias = bias, + y = output, + batch_mean = batch_mean, + batch_var = batch_var, + saved_mean = mean_cache, + saved_var = var_cache, + is_infer = False, + use_global_est = use_global_est, + epsilon = epsilon, + momentum = momentum) + net.append_op(batch_norm_op) + return output +``` + +Because Python API has not been finally decided, the code above can be regarded as pseudo code. There are a few key points we shall note: + +1. `estimated_mean` and `estimated_var` are assigned the same variables with `saved_mean` and `saved_var` respectively. So they share same the memories. The output mean and variance values(`saved_mean` and `saved_var`) of a certain batch will be the inputs(`estimated_mean` and `estimated_var`) of the next batch. + +2. `is_infer` decided whether `batch_norm_op` will run in training mode or inferencing mode. However, a network may contains both training and inferencing parts. And user may switch `batch_norm_op`'s running mode in Python `for` loop like this: + +```python +for pass_id in range(PASS_NUM): + # ... + net.train() # run training model + if pass_id % 100 == 0: + net.infer(test_image) # run inferencing model + # ... +``` + +`is_infer` is an attribute. Once an operator is created, its attributes can not be changed. It suggests us that we shall maintain two `batch_norm_op` in the model, one's `is_infer` is `True`(we call it `infer_batch_norm_op`) and the other one's is `False`(we call it `train_batch_norm_op`). They share all parameters and variables, but be placed in two different branches. That is to say, if a network contains a `batch_norm_op`, it will fork into two branches, one go through `train_batch_norm_op` and the other one go through `infer_batch_norm_op`: + +
+ +
+ +Just like what is shown in the above graph, the net forks before `batch_norm_op` and will never merge again. All the operators after `batch_norm_op` will duplicate. + +When the net runs in training mode, the end of the left branch will be set as the running target, so the dependency tracking process will ignore right branch automatically. When the net runs in inferencing mode, the process is reversed. + +How to set a target is related to Python API design, so I will leave it here waiting for more discussions. diff --git a/doc/paddle/design/modules/evaluator.md b/doc/paddle/design/modules/evaluator.md new file mode 100644 index 0000000000000000000000000000000000000000..de9605b0e67a035ab1ef1e4cafbe838f83bc5807 --- /dev/null +++ b/doc/paddle/design/modules/evaluator.md @@ -0,0 +1,58 @@ +# Evaluator Design + +## Problem Statement + +During training or inference, we provide an evaluation function to measure the model performance, for example, accuracy, precision, etc. In the operator based framework design, the data passes through the network pipeline batch by batch. As a result, inside the operator, we only calculate the metrics for one minibatch. Thus, we need to provide a mechanism to calculate the metrics for each N pass/batch the user wants. + +## Evaluator Design +Currently, every operation is expressed in the graph. We divide the evaluator process into three steps. + +1. Initialize the metric state and add it into the block. + +2. Calculate the concerned metrics for every mini-batch. The single evaluator operator is only responsible for calculating the necessary statistics for one mini-batch. For example, the accuracy operator only calculates the accuracy for a minibatch data if run once. + + +3. Merge the mini-batch statistics to form the evaluation result for multiple mini-batches. When it comes to distributed training/Multi-GPU training, aggregate the value from different devices. + +## Implementation +This design is shown in the Python API. +Each metric operator needs to caculate the metric statistic and return the batch-aware states. Python side is responsible for accumulating the states for each pass. + + +```python +class Evaluator(object): + """ + Evaluator Base class. + """ + def __init__(self, name, **kwargs): + """ + Different evaluator may has different metric states. E.g, Accuracy need two variables, total and right sample counts. + Auc need four variables, `true_positives`, + `true_negatives`, `false_positives` and `false_negatives`. So every evaluator should create its needed variables and append to main_program + + The initialization of Evaluator should be responsible for: + create metric states and append to the main_program + """ + pass + + def _update_ops(self, input, label, **kwargs) + """ + Add mini-batch evaluator caculate operators to the main_program. + Add increment operator to accumulate the metric states. + """ + + + def reset(self, executor, reset_program=None): + """ + Reset metric states at the begin of each pass/user specified batch number. + Execute the reset_program to reset the states. + """ + + + def eval(self, executor, eval_program=None): + """ + Merge the mini-batch statistics to form the evaluation result for multiple mini-batches. + Execute the eval_program and return the result. + """ + return eval_result +``` diff --git a/doc/paddle/design/modules/images/batch_norm_fork.dot b/doc/paddle/design/modules/images/batch_norm_fork.dot new file mode 100644 index 0000000000000000000000000000000000000000..4bc47713cba2cb23f1b34fffe6426ef10ac3a9df --- /dev/null +++ b/doc/paddle/design/modules/images/batch_norm_fork.dot @@ -0,0 +1,25 @@ +digraph ImageBatchNormForkGragh { + subgraph cluster_before { + Prev [label="...", shape=plaintext]; + Rnn [label="rnn_op", shape=box]; + BatchNorm [label="batch_norm_op", shape=box]; + Fc [label="fc_op", shape=box]; + After [label="...", shape=plaintext]; + Prev -> Rnn -> BatchNorm -> Fc -> After; + label="original"; + } + + subgraph cluster_after { + Prev2 [label="...", shape=plaintext]; + Rnn2 [label="rnn_op", shape=box]; + BatchNorm2_1 [label="train_batch_norm_op", shape=box]; + BatchNorm2_2 [label="infer_batch_norm_op", shape=box]; + Fc2_1 [label="fc_op", shape=box]; + Fc2_2 [label="fc_op", shape=box]; + After2_1 [label="...", shape=plaintext]; + After2_2 [label="...", shape=plaintext]; + Prev2 -> Rnn2 -> BatchNorm2_1 -> Fc2_1 -> After2_1; + Rnn2 -> BatchNorm2_2 ->Fc2_2 ->After2_2 + label="forked"; + } +} diff --git a/doc/paddle/design/modules/images/batch_norm_fork.png b/doc/paddle/design/modules/images/batch_norm_fork.png new file mode 100644 index 0000000000000000000000000000000000000000..aded62bce5bc268b7a3ef4dc96c89fe21d6ea955 Binary files /dev/null and b/doc/paddle/design/modules/images/batch_norm_fork.png differ diff --git a/doc/paddle/design/modules/images/batch_norm_op_kernel.png b/doc/paddle/design/modules/images/batch_norm_op_kernel.png new file mode 100644 index 0000000000000000000000000000000000000000..a99ce81ff3bf42880ebbd6a1297de3bf038e09b2 Binary files /dev/null and b/doc/paddle/design/modules/images/batch_norm_op_kernel.png differ diff --git a/doc/paddle/design/modules/images/feed_forward.png b/doc/paddle/design/modules/images/feed_forward.png new file mode 100644 index 0000000000000000000000000000000000000000..d312371a04c26aa6cd196e0bd1f51becb425180b Binary files /dev/null and b/doc/paddle/design/modules/images/feed_forward.png differ diff --git a/doc/paddle/design/modules/images/feed_forward_regularized.png b/doc/paddle/design/modules/images/feed_forward_regularized.png new file mode 100644 index 0000000000000000000000000000000000000000..677e99bfd9f8e72ed9fe4b27127af2ced202f447 Binary files /dev/null and b/doc/paddle/design/modules/images/feed_forward_regularized.png differ diff --git a/doc/paddle/design/modules/images/l1_regularization.png b/doc/paddle/design/modules/images/l1_regularization.png new file mode 100644 index 0000000000000000000000000000000000000000..e1b9c7a44f94dc027598a98da93ddb8133190972 Binary files /dev/null and b/doc/paddle/design/modules/images/l1_regularization.png differ diff --git a/doc/paddle/design/modules/images/l2_regularization.png b/doc/paddle/design/modules/images/l2_regularization.png new file mode 100644 index 0000000000000000000000000000000000000000..d5c2fcbc2ccae75ad083162e5a2dceb0210be298 Binary files /dev/null and b/doc/paddle/design/modules/images/l2_regularization.png differ diff --git a/doc/paddle/design/modules/images/loss_equation.png b/doc/paddle/design/modules/images/loss_equation.png new file mode 100644 index 0000000000000000000000000000000000000000..14212ec8d36c803de96bde8a9a4b5591bd20434e Binary files /dev/null and b/doc/paddle/design/modules/images/loss_equation.png differ diff --git a/doc/paddle/design/modules/index_cn.rst b/doc/paddle/design/modules/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b25783f0f5120991c29ba31b7b512bd4c183eecf --- /dev/null +++ b/doc/paddle/design/modules/index_cn.rst @@ -0,0 +1,14 @@ +代码结构和重要模块 +----------------- + +.. toctree:: + :maxdepth: 1 + + backward.md + python_api.md + regularization.md + infer_var_type.md + optimizer.md + prune.md + register_grad_op.md + net_op_design.md diff --git a/doc/paddle/design/modules/index_en.rst b/doc/paddle/design/modules/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..2108156e080996916f2650448f0a56f998757204 --- /dev/null +++ b/doc/paddle/design/modules/index_en.rst @@ -0,0 +1,14 @@ +Code Structure and Important Modules +------------------------------------- + +.. toctree:: + :maxdepth: 1 + + backward.md + python_api.md + regularization.md + infer_var_type.md + optimizer.md + prune.md + register_grad_op.md + net_op_design.md diff --git a/doc/paddle/design/modules/infer_var_type.md b/doc/paddle/design/modules/infer_var_type.md new file mode 100644 index 0000000000000000000000000000000000000000..d9d5397becba2ef1806d9341cd49cd9aabbf4a6a --- /dev/null +++ b/doc/paddle/design/modules/infer_var_type.md @@ -0,0 +1,78 @@ +# Design Doc: InferVarType + +## The Problem Posed + +The variable in our design can hold variant types. Such as `LoDTensor` and `SelectedRows`. An operator should be able to inference the variable types of its output. + +For example, a `lookup table` operator takes two `LoDTensor`; one is a float tensor as the embedding table, the other is an int tensor as word ID. The gradient operator of `lookup table` will generate a `SelectedRows` as its output. A `sum` operator can take both `LoDTensor` and `SelectedRows` as its inputs and will generate a `LoDTensor` if any of its inputs is `LoDTensor`, otherwise, the `sum` operator will generate `SelectedRows` as its output. + +The variable type will be constant at runtime. Every variable's type can either be set by the user (input data and parameter) or be inferred by the operator in compile time. + +## Proposed Solution + +The `InferVarType` is a compile-time function which is registered to each operator. The inferface of that function is: + + +```c++ +using InferVarTypeFN = std::function< + void (const OpDescBind& /*op_desc*/, BlockDescBind* /*block*/)>; +``` + +It takes an operator description as its input and will write the output variable type and store them in block description. + +The `InferVarTypeFN` will be registered in `OpInfo`, to replace `infer_var_type_` field. The `OpInfo` should be + +```cpp +struct OpInfo { + InferVarTypeFN infer_var_type_; + ... +}; +``` + +The default `InferVarType` will set output type as `LoDTensor`. It can be done by `GetInferVarType()`. + +```cpp +void DefaultInferVarType(const OpDescBind& op_desc, BlockDescBind* block) { + // set the output type of variable as `LoDTensor`. + // ... +} + +struct OpInfo { + InferVarTypeFN infer_var_type_; + InferVarTypeFN GetInferVarType() const { + if (infer_var_type_) { + return infer_var_type_; + } else { + return DefaultInferVarType; + } + } +}; +``` + +## Register InferVarType + +We provide a thin base class for registering an `InferVarTypeFN`. To use a base class will ease the implementation of registry since we can detect the registry entry is an `InferVarTypeFN` or not. + +```cpp +class VarTypeInferer { +public: + virtual void operator()(const OpDescBind& op_desc, BlockDescBind* block) const = 0; +} +``` + +Operator developers can write the specialize `VarTypeInferer` as follow. + +```cpp +class SpecialVarTypeInferer : public VarTypeInferer { +public: + virtual void operator()(const OpDescBind& op_desc, BlockDescBind* block) const { + // .. own logic + } +} +``` + +Then user can register the `InferVarType` just like `GradOpDescMaker` and `OpInfoMaker`. + +``` +REGISTER_OPERATOR(some_op, OpType, SpecialVarTypeInferer, ...); +``` diff --git a/doc/paddle/design/modules/net_op_design.md b/doc/paddle/design/modules/net_op_design.md new file mode 100644 index 0000000000000000000000000000000000000000..e64ac2fb1c6898bfeb883250347da3d9a4757b97 --- /dev/null +++ b/doc/paddle/design/modules/net_op_design.md @@ -0,0 +1,250 @@ +# Network Design + +`Network` is the container and controller of a set of operators, +user can build a real network from a `NetDesc` which is a protobuf message +and use `Network.Run()` to run all the operators in the network. + +A network object knows all Operators belonging to this network. Variables, +which are inputs and outputs of these operators, +are created and managed by a hierarchy of Scope objects. + +## API + +### Net +To make the `Network` extendable, a base class is defined like this + +```c++ +// operator's index stored in a network. +typedef int OpIndex; + +// The minimum a network should be implemented. +class Net { + public: + // run all the operators and return success(true) or not, with all the + // variables are located in `scope`. `context` describes the detail execution + // environment for ops. `begin` and `end` specify the scope of `ops_` to run, + // If no positive indexes are provided, all operators in `ops_` will run. + virtual Error Run(Scope *scope, OpContext *context, OpIndex begin = -1, + OpIndex end = -1) const = 0; + + // Add an Operator according to `def`. + virtual OpIndex AddOp(const proto::OpDef &def) = 0; + + // Add optimizer operators acctording to `attrs`. + virtual Error AddOptimizerOps(const OptAttrs &attrs) = 0; + + // Add backward operators. + virtual Error AddBackwardOps() = 0; + + // Infer the shapes of variables required by operators in the network. The + // `scope` will be mutated according to the inferred shapes. + + static std::unique_ptr Create(const NetDesc &def = NetDesc()); +}; +``` + +All network implementations should build networks from a protobuf message which +describes the structure of a real network; `Run` method should be implemented by +all implementations to offer a universal method to forward or backward compute a network. + +`Net::Create` is a method of factory pattern and can be implemented like + +```c++ +std::unique Net::Create(const NetDesc& def) { + switch (def.model_type()) { + case NN: + return new Network(def); + case Recursive: + return new RecursiveNet(def); + case Recurrent: + return new RecurrentNet(def); + } + return nullptr; +} +``` + +Network is designed as the container of operators. to make it more extendable, +we decouple it from the related variable resources. + +`Run(Scope* scope)` takes the scope as a argument so that it can run in different scopes. + +Finally, `Net` can be used as followed + +```c++ +Scope default_scope; +OpContext default_context; +auto net = Net::CreateNet(def); + +if (net) { + net.Run(&default_scope, &default_context); +} +``` + +### `PlainNet` as a simple implementation of `BaseNet` + +A very basic implementation is as follows. All it does is simply to run every operators in sequence. + +```c++ +class PlainNet : public Net { + public: + // Create a network describe by `def`. NetDesc is the definition of a network. + PlainNet(const NetDesc &def); + + // Infer all the operators' input and output varialbes' shapes, will be called before every mini-batch + training. + virtual Error InferShape(Scope *scope) override; + + // Run all the operators with the `scope`, if no scope is provided, default + // scope will be used instead. If no OpContext is provicded, default context will be used. + virtual Error Run(Scope *scope = nullptr, OpContext *context=nullptr, OpIndex begin = -1, + OpIndex end = -1) const override; + + virtual OpIndex AddOp(const proto::OpDef &def) override; + + virtual Error AddOptimizerOps(const OptAttrs &attrs) override; + + virtual Error AddBackwardOps() override; + + protected: + // Create operators accordding to `def`, will be called by the constructor. + Error BuildNet(const NetDesc &def); + + // Add a operator which is identified as `type` and has attributes described + // in `attrs`, the `inputs` are the keys of readonly input variables, + // `outputs` are keys of mutable output variables. An `OpIndex` will be + // returned to indicate the offset of the new operator in `ops_`. + OpIndex AddOp(const std::string &type, const std::vector &inputs, + const std::vector &outputs, + const OprAttr &attrs = OprAttr()); + + private: + // the operators owned by `Network`. + std::vector ops_; +}; +``` + +`PlainNet` will create operators so that a private member `ops_` is defined, +the operators are created by `CreateNet`, and each operator is created by `AddOp`. + + +## PlainNet Usage +`PlainNet` can be used to define and run a network as follows + +```c++ +// create an empty scope located on CPU device. +Scope scope(CPUPlace()); + +// create and init variables described in `net_desc`. +scope.CreateVariables(net_desc); +scope.InitVariables(net_desc); + +// create a network according to `net_desc` +auto net = Net::CreateNet(net_desc); +// Add more operators if needed. +net->AddOp(add...); +net->AddOp(fc...); + +net->AddBackwardOps(); +net->AddOptimizerOps(); + +// run the network providing the `scope`. +net.Run(&scope); +``` + +## `NetBuilder` as a C++ syntax wrapper +This is a detailed description of the user-related C++ network API, and may not needed in the prototype development stage. + +The `NetBuilder` will give users a much simpler syntax as follows to create a network, and demonstrates how to use the `BaseNet`'s raw interfaces. + +```c++ +Variable* fc_out = builder.AddOp("fc", input=image, size=100, activation="Sigmoid"); +Variable* prediction = builder.AddOp("fc", input=fc_out, size=10, activation="Sigmoid"); +Variable* loss = builder.AddOp("cross_entropy", input=prediction, label=label); +Variable* avg_loss = builder.AddOp("mean", loss); + +builder.BackwardFrom(avg_loss) +builder.AddOptimization(1e-4, "adam"); +builder.Run(); +``` + +`NetBuilder` will call `Net` 's virtual functions to change the real network structure, here is a sample definition + +```c++ +class NetBuilder final { + public: + NetBuilder(Net* net) : net_(net) {} + + Variable* AddOp(const string& type, const vector& inputs, + size_t size, Activation act) { + // much code here. + // ... + net_->AddOp(def); + need_rebuild_net_ = true; + net_->InferShape(); + // ... + } + + Error BackwardFrom(const Variable& cost); + + Error Run(Scope* scope, OpContext* context, bool need_backward = true) { + // backward. + if (need_backward) { + if (need_rebuild_net_) { + AddBackwardOps(); + AddOptimizerOps(); + } + net_->Run(scope, context); + return; + } + // just forward. + net_->Run(scope, context, 0, last_forward_op_); + } + + protected: + Error AddBackwardOps(); + Error AddOptimizerOps(); + + private: + Net* net_; + OpIndex last_forward_op_{-1}; + bool need_rebuild_net_{true}; +} +``` + +### Compatibility with RNN + +Benefitting from the decoupling of `PlainNet.Run` and `Scope`, `PlainNet` is compatible with future RNN design, +for example we can implement a simple recurrent neural network as follows + +```c++ +// copy some `vars` form `source` to `target` +void Copy(const Scope &source, Scope &target, + const std::vector &vars); + +Scope default_scope; +// some initial mutations on `default_scope` here. + +auto rnn_step_net = PlainNet(rnn_step_net_def); + +// Create rnn's states, the last scope is used to store rnn outputs. +Scope *rnn_states = new Scope[num_states + 1]; + +for (int i = 0; i < num_states + 1; i++) { + // Initialize all rnn state scopes, copy parameters and so on. + rnn_states[i].CreateVars(rnn_step_net_def); + Copy(default_scope, rnn_states[i], rnn_related_vars); + // Prepare rnn's inlinks, just copy inlink variables to each state. + Copy(default_scope, rnn_states[i], inlink_vars); +} + +// Run the rnn. +for (int i = 0; i < num_states; i++) { + rnn_step_net.Run(rnn_states[i]); + // Copy current state's state variables to next state, the related variables + // are named like "previous_state_xxx". + Copy(rnn_states[i], rnn_states[i + 1], pre_state_vars) +} + +// Copy rnn's final outputs to `default_scope`. +Copy(rnn_states[num_states], default_scope, outlink_vars); +``` diff --git a/doc/paddle/design/modules/optimizer.md b/doc/paddle/design/modules/optimizer.md new file mode 100644 index 0000000000000000000000000000000000000000..8a8c86f1efca2e621315c80c6dc2f32f9a70ec31 --- /dev/null +++ b/doc/paddle/design/modules/optimizer.md @@ -0,0 +1,91 @@ +# Optimizer Design + +## The Problem + +A PaddlePaddle program, or a block, is a sequence of operators operating variables. A training program needs to do three kinds of works: + +1. the forward pass, which computes intermediate results and the cost(s), +1. the backward pass, which derives gradients from intermediate results and costs, and +1. the optimization pass, which update model parameters to optimize the cost(s). + +These works rely on three kinds of operators: + +1. forward operators, +1. gradient operators, and +1. optimization operators. + +It's true that users should be able to create all these operators manually by calling some low-level API, but it would be much more convenient if they could only describe the forward pass and let PaddlePaddle create the backward and optimization operators automatically. + +In this design, we propose a high-level API that automatically derives the optimisation pass and operators from the forward pass. + + +## High-level Python API to describe the training process + +1. User write code to describe the network: + + ```python + images = layer.data("images") + labels = layer.data("labels") + w1 = pd.var("w1") + b1 = pd.var("b1") + hidden = layer.fc(images, w=w1, b=b1) + cost = layer.mse(hidden, labels) + ``` + + The above code snippet will create forward operators in [Block](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/block.md). + + +2. Users create a certain kind of Optimizer with some argument. + + ```python + optimizer = AdagradOptimizer(learing_rate=0.001) + ``` + +3. Users use the optimizer to `minimize` a certain `cost` through updating parameters in parameter_list. + + ```python + opt_op_list = optimizer.minimize(cost, parameter_list=[w1, b1]) + ``` + The above code snippet will create gradient and optimization operators in Block. The return value of `minimize()` is list of optimization operators that will be run by session. + +4. Users use Session/Executor to run this opt_op_list as target to do training. + + ```python + sess.run(target= opt_op_list, ...) + ``` + +### Optimizer Python interface: + +```python +class Optimizer(object): + """Optimizer Base class. + + """ + + def __init__(self): + pass + + def create_optimization_pass(self, parameters_and_grads): + """Add optimization operators to update gradients to variables. + + Args: + parameters_and_grads: a list of (variable, gradient) pair to update. + + Returns: + optmization_op_list: a list of optimization operator that will update parameter using gradient. + """ + return None + + def minimize(self, loss, parameter_list): + """Add operations to minimize `loss` by updating `parameter_list`. + + This method combines interface `append_backward()` and + `create_optimization_pass()` into one. + """ + params_grads = self.create_backward_pass(loss, parameter_list) + update_ops = self.create_optimization_pass(params_grads) + return update_ops + +``` + +Users can inherit the Optimizer above to create their own Optimizer with some special logic, such as AdagradOptimizer. diff --git a/doc/paddle/design/modules/prune.md b/doc/paddle/design/modules/prune.md new file mode 100644 index 0000000000000000000000000000000000000000..2925338d36c9f61fa0c044e51cacf8026b386982 --- /dev/null +++ b/doc/paddle/design/modules/prune.md @@ -0,0 +1,63 @@ +# Prune + +## Motivation + +We want to support running inference, training and checkpointing in one `ProgramDesc`. We implement +`void Prune(const ProgramDesc* input, ProgramDesc* output)` function, which takes a `ProgramDesc` +and generate a pruned `ProgramDesc`. + +## Challenge + +Pruning need to support both variables and operators being evaluation targets. Consider the following +different situations. + +```python +# Case 1: run foward pass. +cost_np = session.run(target=cost) +# Case 2: run backward passing. +opts_np, _ = session.run(target=[cost, opt]) +# Case 3: run checkpointing +_ = session.run(target=checkpoint) +``` + +## Solution + +To support evaluation of operators, we add `is_target` field in the `OpDesc`. + +```c++ +message OpDesc { + required string type = 3; + repeated Var inputs = 1; + repeated Var outputs = 2; + repeated Attr attrs = 4; + optional bool is_target = 5 [ default = false ]; +}; +``` + +To support evaluation of variables, we add [fetch_op](https://github.com/PaddlePaddle/Paddle/pull/4599). +For each variable in the `target`, we insert a `fetch_op` into the `ProgramDesc` with `variable` being +`fetch_op`'s input. Then we also set `fetch_op` is a target. + +### Algorithm + +If an operator needs to be run, it must fall into one of the following cases: + +1. It is the target. +2. It is depended by some other ops, meaning its output is some other op's input. + +The first case can be checked by `op_desc.is_traget()` . The second case can be implement as + +```c++ +bool HasDependentVar(const OpDesc& op_desc, const std::set& dependent_vars) { + for (auto& var : op_desc.outputs()) { + for (auto& argu : var.arguments()) { + if (dependent_vars.count(argu) != 0) { + return true; + } + } + } + return false; +} +``` + +Then the whole algorithm can be implemented as the following [code](https://github.com/tonyyang-svail/Paddle/blob/prune_impl/paddle/framework/prune.cc). diff --git a/doc/paddle/design/modules/python_api.md b/doc/paddle/design/modules/python_api.md new file mode 100644 index 0000000000000000000000000000000000000000..527e36e7cac1ab515ca0bc56018eaaae18a4d0f6 --- /dev/null +++ b/doc/paddle/design/modules/python_api.md @@ -0,0 +1,325 @@ +# Design Doc: Python API + +Due to the refactorization of the PaddlePaddle core, we need Python classes to construct corresponding protobuf messages that describe a DL program. + + + + + + + + + + + + + + + + + + + + + + + + + + +
Python classesProtobuf messages
Program ProgramDesc
Block BlockDesc
Operator OpDesc
Variable VarDesc
+ + +Please be aware that these Python classes need to maintain some construction-time information, which are not part of the protobuf messages. + +## Core Concepts + +### Program + +A `ProgramDesc` describes a [DL program](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/concepts/program.md), which is composed of an array of `BlockDesc`s. The `BlockDesc`s in a `ProgramDesc` can have a tree-like hierarchical structure. However, the `ProgramDesc` onlys stores a flattened array of `BlockDesc`s. A `BlockDesc` refers to its parent block by its index in the array. For example, operators in the step block of an RNN operator need to be able to access variables in its ancestor blocks. + +Whenever we create a block, we need to set its parent block to the current block, hence the Python class `Program` needs to maintain a data member `current_block`. + +```python +class Program(objects): + def __init__(self): + self.desc = core.NewProgram() # a C++ ProgramDesc pointer. + self.blocks = vector() + self.blocks.append(Block(self, -1)) # the global block + self.current_block = 0 # initialized to the global block + + def global_block(): + return self.blocks[0] + + def current_block(): + return self.get_block(self.current_block) + + def rollback(): + self.current_block = self.current_block().parent_idx + + def create_block(): + new_block_idx = len(self.block) + self.blocks.append(Block(self, self.current_block)) + self.current_block = new_block_idx + return current_block() +``` + +`Program` is an accessor to the protobuf message `ProgramDesc`, which is created in C++ space, because the InferShape function is in C++, which manipulates `VarDesc` messages, which are in turn members of `BlockDesc`, which is a member of `ProgramDesc`. + +`Program` creates the first block as the global block in its constructor. All parameters and their initializer operators are in the global block. + +### Block + +A [Block](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/concepts/block.md) includes + +1. a map from variable names to an instance of the Python `Variable` class, and +1. a list of `Operator` instances. + +```python +class Block(objects): + def __init__(self, program, parent_idx): + self.desc = core.NewBlock(program.desc) + self.program = program + self.vars = map() + self.ops = vector() + self.parent_idx = parent_idx + + def create_var(self, ...): + return Variable(self, ...) + + def _create_global_var(self, ...): + program.global_block().create_var(...) + + def create_parameter(self, name, ...): + # Parameter is a subclass of variable. See Parameter section for details. + self.vars[name] = Parameter(self._create_global_var(...), ...) + return self.vars[name] + + def append_operator(self, ...): + self.ops.append(Operator(self, ...)) + + def _prepend_operator(self, ...): # Parameter's ctor prepands initialize operators. + self.ops.prepend(Operator(self, ...)) +``` + +`create_parameter` is necessary because parameters are global variables, defined in the global block, but can be created in some sub-blocks. For example, an FC layer in the step block of an RNN operator. + +`_prepend_operator` is necessary because the constructor of `Parameter` needs to create the initialize (or load) operator of the parameter, and would like to put it in the *preamble* of the global block. + +### Operator + +The `Operator` class fills in the `OpDesc` message and calls the C++ function `InferShape` to infer the output shapes from the input shapes. + +```python +class Operator(object): + def __init__(self, + block, # Block + type, # string + inputs, # dict + outputs,# dict + attrs # dict + ): + self.desc = core.NewOpDesc(block.desc, type, inputs, outputs, attrs) + core.infer_shape(self.desc, inputs, outputs) + + def type(self): + return self.desc.type() +``` + +`Operator` creates the `OpDesc` message in C++ space, so that it can call the `InferShape` function, which is in C++. + +### Variable + +Operators take Variables as its inputs and outputs. + +```python +class Variable(object): + def __init__(self, + block=None, # Block + name=None, # string + shape, # tuple + dtype="float32", # string + lod_level=None # int + ): + if name is None: + name = unique_name_generator() + self.name = name + self.block = block + self.desc = core.NewVarDesc(block.desc, name, shape, lod_level) + self.writer = None +``` + +Please be aware of `self.writer`, that tracks operator who creates the variable. It possible that there are more than one operators who write a variable, but in Python space, each write to a variable is represented by a Variable class. This is guaranteed by the fact that **`core.NewVarDesc` must NOT create a new `VarDesc` message if its name already exists in the specified block**. + +### Parameter + +A parameter is a global variable with an initializer (or load) operator. + +```python +class Parameter(Variable): + def __init__(self, + block=None, # Block + name=None, # string + shape, # tuple + dtype="float32", # string + lod_level=None # int + trainable, # bool + initialize_op_attrs, + optimize_op_attrs): + super(Parameter, self).__init__(block, name, shape, dtype, lod_level) + self.trainable = trainable + self.optimize_op_attrs = optimize_op_attrs + block.prepend(Operator(block, # Block + initialize_op_attrs['type'], # string + None, # no inputs + self, # output is the parameter + initialize_op_attrs) +``` + +When users create a parameter, they can call + +```python +program.create_parameter( + ..., + init_attr={ + type: "uniform_random", + min: -1.0, + max: 1.0, + }) +) +``` + +In above example, `init_attr.type` names an initialize operator. It can also name the load operator + +```python +init_attr={ + type: "load", + filename: "something.numpy", +} +``` + +`optimize_op_attrs` is not in the `VarDesc` message, but kept in the Python instance, as it will be used in the Python space when creating the optimize operator's `OpDesc`, and will be in the `OpDesc` message. + +## Layer Function + +A layer is a Python function that creates some operators and variables. Layers simplify the work of application programmers. + +Layer functions take `Variable` and configuration parameters as its input and return the output variable(s). + +For example, `FullyConnected` take one or more variable as its input. The input could be input data or another layer's output. There are many configuration options for a `FullyConnected` layer, such as layer size, activation, parameter names, initialization strategies of parameters, and so on. The `FullyConnected` layer will return an output variable. + + +### Necessity for reusing code between layer functions + +There are a lot of code that can be reused. Such as + +* Give the default value of configuration. e.g., default initialize strategy for parameters is uniform random with `min = -1.0`, `max = 1.0`. and default initialize strategy for bias is to fill zero. +* Append the activation operator. +* Create a temporary variable. +* Create parameter. +* Generate a unique name. +* Add a bias. +* ... + +A mechanism to reuse code between layer functions is necessary. It will be around [150 lines of code](https://github.com/PaddlePaddle/Paddle/pull/4724/files#diff-823b27e07e93914ada859232ae23f846R12) if we write a `FullyConnected` layer without any helper functions. + + + +### Comparision between global functions and helper class + +The `FullyConnected` layer will be as follow when we provide global functions: + +```python +def fc_layer(input, size, param_attr=None, bias_attr=None, act=None, name=None): + if name is None: + name = unique_name("fc") + input = multiple_input(input) + param_attr = default_param_attr(param_attr) + param_attr = multiple_param_attr(param_attr, len(input)) + + # mul + mul_results = [] + for ipt, attr in zip(input, param_attr): + shape = ipt.shape[1:] + [size] + w = g_program.global_block().create_parameter(shape, ipt.dtype, name, attr) + tmp = create_tmp_var(name) + g_program.current_block().append_op("mul", {ipt, w}, {tmp}) + mul_results.append(tmp) + + # add sum + ... + # add bias + ... + # add activation + ... + return out +``` + +We can provide many helpers functions for layer developers. However, there are several disadvantages for global helper functions: + +1. We need a namespace for these methods, then layer developers can quickly figure out what method they can use. +2. Global functions will force layer developers to pass its parameter time by time. + +So we provide a helper class, `LayerHelper`, to share code between layer functions. The `FullyConnected` Layer will be as follow. + +```python +def fc_layer(input, size, param_attr=None, bias_attr=None, act=None, name=None): + helper = LayerHelper(locals()) # pass all parameter to LayerHelper + + mul_results = [] + for ipt, param in helper.iter_multiple_input_and_param(): + w = helper.create_parameter(shape=ipt.shape[1:] + [size], dtype = ipt.dtype) + tmp = helper.create_tmp_variable() + helper.append_op('mul', {ipt, w}, {tmp}) + mul_results.append(tmp) + + pre_bias = helper.add_sum(mul_results) + pre_activation = helper.add_bias(pre_bias) + return helper.add_activation(pre_activation) +``` + +We not only use the fewer lines of code to write `fc_layer` but also make the code clearer to understand. At the same time, layer developers can figure out what function they can invoke by typing `helper.` in a python editor. + + +### Implementation of layer helper + +We just keep all parameters of a layer function as a dictionary in layer helper as a private data member. Every method of layer helper will look up the dictionary after it is invoked. In that way, we can implement a layer helper for all layer functions even some layer does not contain some operator. For example, The `activation` is used by the FullyConnected layer or convolution layers, but a cross-entropy layer does not use it. The example code of `add_activation` are: + +```python +class LayerHelper(object): + def __init__(self, **kwargs): # kwargs is short for `keyword arguments` + self.kwargs = kwargs + + def add_activation(self, input_var): + act = self.kwargs.get("act", None) # default value is None + if act is None: # do nothing if no act + return input_var + + tmp = self.create_tmp_var(self) + self.append_op(type=act, input=input_var, output=tmp) + return tmp +``` + +### Return value of layer functions + +The layer will return a Variable, which is also the output of an operator. However, outputs of a layer function have more attributes than an operator. There are parameter variables, and their gradient variables need to return. To return them is useful. For example, + +1. Users can debug the network by printing parameter gradients. +2. Users can append attributes to a parameter, such as, `param.stop_gradient=True` will make a parameter stop generate the gradient. We can fix the parameter value during training by using this attribute. + +However, it is good to return a Variable for layers, since all layers and operators use Variables as their parameters. We can just append a `param` field and a `grad` field for layer function since the Python is dynamic typing. + +The sample usage is + +```python +data = fluid.layers.data(...) +hidden = fluid.layers.fc(data, ...) +... + +executor.run(fetch_list=[hidden.param, hidden.param.grad], ...) +``` + + +## Optimizer + +[Optimizer Design Doc](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/modules/optimizer.md) diff --git a/doc/paddle/design/modules/register_grad_op.md b/doc/paddle/design/modules/register_grad_op.md new file mode 100644 index 0000000000000000000000000000000000000000..484b7a5ad709d5afe7641d08da7fc35d8bccc2dd --- /dev/null +++ b/doc/paddle/design/modules/register_grad_op.md @@ -0,0 +1,92 @@ +# Design Doc: Gradient Operators Registration + + +## The Problem Posed + +Currently, for each C++ operator class definition, a *gradient operator creator* function is registered, which takes as input a C++ operator instance and returns the corresponding gradient operator instance. + +However, we noticed two problems with the current design: + +1. As we decided to separate the *compilation* and the *execution* phases, we need to change the creator to take an `OpDesc` protobuf message in a `ProgramDesc` and inserts corresponding `OpDesc` messages into the `ProgramDesc` message. + +1. For some operators, the gradient computation can be written in terms of existing operators. For example, the gradient of *minus* operator consists of two operators -- an *identity* operator followed by a *scale* operator. Hence the registration mechanism needs to support mapping from an operator to a set of operators for the gradient computation. + +## The Current Implementation + +Instances of the C++ class `OpInfo` are stored an associative map whose key is the operator type. The `grad_op_type` indicates the associated gradient operator type. An operator can create the gradient operator by invoking `OpInfo::creator_` of the gradient operator. The pseudo code is as follows + +```cpp +struct OpInfo { + std::function creator_; + std::string grad_op_type_; + ... +}; + +map OpInfoMap; + +OperatorBase* CreateGradientOperator(const OperatorBase& op) { + return OpInfoMap.at(op.Type()).creator_(...); +} +``` + +## Proposed Solution + +The mapping relationship between an operator and its gradient operators is a function. The interface of this function is: + +```cpp +// (OpDesc) --> vector +std::function(const OpDescBind&)>; +``` + +The function takes an `OpDescBind` of the forward operator and returns one or many gradient operator descriptions. `OpDescBind` is a C++ wrapper for the protobuf message `OpDesc` for rapid manipulation of `OpDesc`. + +The `GradOpDescMaker` will be registered in `OpInfo` and will replace the `grad_op_type_` field. The `OpInfo` should look like + +```cpp +struct OpInfo { + std::function>(const OpDescBind&)> grad_op_maker_; + ... +}; +``` + +The `grad_op_maker_ ` is a `nullptr` if the operator does not have any associated gradient operators. + +We propose a base class called `GradOpDescMakerBase` to let operator developers generate `Gradient Operators` easily. The public interface of that class is + +```cpp +class GradOpDescMakerBase { +public: + GradOpDescMakerBase(const OpDescBind& ); + virtual std::vector> operator()()const = 0; +}; +``` + +We can convert `GradOpDescMakerBase` to `std::function>(const OpDescBind&)>` by + +```cpp +using GradOpMaker = ...; +std::function(const OpDescBind&)> func; +func = [] (const OpDescBind& fwd_op) { + GradOpMaker maker(fwd_op); + return maker(); +}; +``` + +We can write many helper functions since the `GradOpDescMakerBase` is a class now. The basic helper functions get the variables of `Input`, `Output`, `InputGradient` and `OutputGradient` in the forwarding operator. + +We should change register macros at the same time. In the current solution, there is no difference between forwarding operators and backward operators. So `REGISTER_OP` just register one operator. If the `REGISTER_OPERATOR ` contains `OpProtoAndCheckerMaker` and `GradOpDescMaker`, we just list them in the same macro. It can be done by a macro contains `__VA_ARGS__`. + +The user interface should be + +```cpp +vector MinusOpGradMaker(OpDesc) {...} +REGISTER_OPERATOR(minus, MinusOp, MinusOpProtoAndCheckerMaker, SumOpGradMaker); +// Developers can still manually implement gradient operator. +REGISTER_OPERATOR(minus_grad, MinusGradOp); +``` + +The interface of current `REGISTER_OP` macro could not be changed. In `REGISTER_OP`, it will invoke `REGISTER_OPERATOR` two times and generate GradOpDescMaker inside. + +```cpp +REGISTER_OP(minus, MinusOp, MinusOpProtoAndCheckerMaker, minus_grad, MinusGradOp); +``` diff --git a/doc/paddle/design/modules/regularization.md b/doc/paddle/design/modules/regularization.md new file mode 100644 index 0000000000000000000000000000000000000000..665f32c9bb1f5d4926a4c7ddbbe44daddfd883cc --- /dev/null +++ b/doc/paddle/design/modules/regularization.md @@ -0,0 +1,66 @@ +# Regularization in PaddlePaddle + +## Introduction to Regularization +A central problem in machine learning is how to design an algorithm that will perform well not just on the training data, but also on new data. A frequently faced problem is the problem of **overfitting**, where the model does not make reliable predictions on new unseen data. **Regularization** is the process of introducing additional information in order to prevent overfitting. This is usually done by adding extra penalties to the loss function that restricts the parameter spaces that an optimization algorithm can explore. + +### Parameter Norm Penalties +Most common regularization approaches in deep learning are based on limiting the capacity of the models by adding a parameter norm penalty to the objective function `J`. This is given as follows: + +
+ +The parameter `alpha` is a hyperparameter that weights the relative contribution of the norm penalty term, `omega`, relative to the standard objective function `J`. + +The most commonly used norm penalties are the L2 norm penalty and the L1 norm penalty. These are given as follows: + +##### L2 Regularization: +
+ +##### L1 Regularization +
+ +A much more detailed mathematical background of regularization can be found [here](http://www.deeplearningbook.org/contents/regularization.html). + +## Regularization Survey + +A detailed survey of regularization in various deep learning frameworks can be found [here](https://github.com/PaddlePaddle/Paddle/wiki/Regularization-Survey). + +## Proposal for Regularization in PaddlePaddle + +### Low-Level implementation + +In the new design, we propose to create new operations for regularization. For now, we can add 2 ops that correspond to the most frequently used regularizations: +- L2_regularization_op +- L1_regularization_op + +These ops can be like any other ops with their own CPU/GPU implementations either using Eigen or separate CPU and GPU kernels. As the initial implementation, we can implement their kernels using Eigen following the abstraction pattern implemented for [Activation Ops](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/accuracy_op.h). This abstraction pattern can make it very easy to implement new regularization schemes other than L1 and L2 norm penalties. + +The idea of building ops for regularization is in sync with the refactored Paddle philosophy of using operators to represent any computation unit. The way these ops will be added to the computation graph, will be decided by the [layer functions](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#layer-function) in Python API. + +### Computation Graph + +Below is an example of a really simple feed forward neural network. + +
+ +The Python API will modify this computation graph to add regularization operators. The modified computation graph will look as follows: + +
+    +### Python API implementation for Regularization + +Using the low level ops, `L2_regularization_op` and `L1_regularization_op`, any user can add regularization to their computation graphs. However, this will require a lot of lines of code and we should design Python APIs that support regularization. An example of such an API can be seen in [Keras](https://keras.io/regularizers/). As per the PaddlePaddle [Python API design](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/modules/python_api.md), the layer functions are responsible for creating operators, operator parameters and variables. Since regularization is a property of parameters, it makes sense to create these in the layer functions. + +#### Creation of Regularization ops +There are two possibilities for creating the regularization ops: +1. We create these ops immediately while building the computation graph. +2. We add these ops in a lazy manner, just before the backward, similar to the way the optimization ops are added. + +The proposal is to add these ops in a lazy manner just before the backward pass. + +#### Storage of Regularization attributes + +Since we want to create the regularization ops in a lazy manner, the regularization attributes (type of regularization and weight of regularization penalty) can be stored as attributes of the `Parameter` class. This is because regularization is a property of the parameters and storing regularization properties with Parameters also allows for shared parameters. + +#### High-level API + +In PaddlePaddle Python API, users will primarily rely on [layer functions](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/modules/python_api.md#layer-function) to create neural network layers. Hence, we also need to provide regularization functionality in layer functions. The design of these APIs can be postponed for later right now. A good reference for these APIs can be found in [Keras](https://keras.io/regularizers/) and also by looking at Tensorflow in [`tf.contrib.layers`](https://www.tensorflow.org/api_guides/python/contrib.layers). diff --git a/doc/paddle/design/modules/selected_rows.md b/doc/paddle/design/modules/selected_rows.md new file mode 100644 index 0000000000000000000000000000000000000000..7836ec3cd8f8351c105953ef35f8a7656f4c23d7 --- /dev/null +++ b/doc/paddle/design/modules/selected_rows.md @@ -0,0 +1,74 @@ +# Design Doc: Selected Rows + +`SelectedRows` is a type of sparse tensor data type, which is designed to support `embedding` operators. The gradient of embedding table is a sparse tensor. Only a few rows are non-zero values in this tensor. It is straight-forward to represent a sparse tensor by the following sparse tensor data structure: + +```cpp +class SelectedRows { + private: + vector rows_; + Tensor value_; + int height_; +}; +``` + +The field `height_` is the first dimension of `SelectedRows`. The `rows` are the indices of the non-zero rows of `SelectedRows`. The `value_` field is an N-dim tensor of shape `[rows.size() /* NUM_ROWS */, ...]`, which supplies values for each row. The dimension of `SelectedRows` satisfies `[height_] + value_.shape[1:]`. + +Suppose that a SelectedRows-typed variable `x` has many rows, but only two of them have values -- row 73 is `[1, 2]` and row 84 is `[3, 4]`, the `SelectedRows` representation would be: + +``` +x = SelectedRow { + rows = [73, 84], + value = [[1, 2], [3,4]] +} +``` + + +## SelectedRows in Protobuf + +`SelectedRows` is a type of `Variable`. `VarDesc` in protobuf should describe the `SelectedRows` information. Only the tensor dimension of a `SelectedRows` will be described in compile-time because the `rows_` and `value_` are dependent on the training data. +So we use `TensorDesc` to unify `data_type` and `dims`. A LodTensorDesc contains a `TensorDesc` and `lod_level`. The description of `SelectedRows` is a Tensor description. + +```proto +message TensorDesc { + required DataType data_type = 1; + repeated int64 dims = 2; // [UNK, 640, 480] is saved as [-1, 640, 480] +} + +message LodTensorDesc { + required TensorDesc tensor = 1; + optional int lod_level = 2; +} + +message VarDesc { + required string name = 1; + enum VarType { + LOD_TENSOR = 0; + SELECTED_ROWS = 1; + } + required VarType type = 2; + optional LodTensorDesc lod_desc = 3; + optional TensorDesc selected_rows_desc = 4; + optional bool persistable = 5 [ default = false ]; +} +``` + +## InferShape for Selected Rows + +Just like `LoD` information, `InferShape` method will infer the output tensor type as well. The operator should decide whether its output is a `SelectedRows` or `Dense` tensor. + +For example, the gradient operator of `TableLookup` will always generate `SelectedRows`. Its `InferShape` method should be like following + +```cpp +void TableLookupGrad::InferShape(context) { + ... + context.SetDataType("Embedding.Grad", kSelectedRows); +} +``` + + +## Sparse Operators + +There are several operators that need to be written to support `SelectedRows`. These are: + +1. Operators which generate `SelectedRows` gradient. e.g. Gradient of `TableLookupOp`. +2. Optimize operators which support `SelectedRows` gradient. e.g. `SGD` or `AdaGrad` for `SelectedRows`. However, there should be only one `SGD` operator. `OpWithKernel::Run` should select a suitable kernel for both `dense` tensor or `SelectedRows`. diff --git a/doc/paddle/design/motivation/api.md b/doc/paddle/design/motivation/api.md new file mode 100644 index 0000000000000000000000000000000000000000..bc222564e3ec28e306ca0572b6a23104f6e9cbc5 --- /dev/null +++ b/doc/paddle/design/motivation/api.md @@ -0,0 +1,261 @@ +# PaddlePaddle Design Doc + +## Ingredients + +As our design principle is starting from the essence: how could we +allow users to express and solve their problems as neural networks. +Some essential concepts that our API have to provide include: + +1. A *topology* is an expression of *layers*. + +1. A layer could be any kind of computation, including *cost*. + +1. Some layers have parameters, some don't. Most costs don't have + parameters. + +1. In some topologies, layers share parameters. For + example, + [the network for training a ranking model](https://github.com/PaddlePaddle/Paddle/issues/1311#issuecomment-279121850). + +1. At programming time, users specify topologies and possible sharing + of parameters. PaddlePaddle can figure out and create parameters + required (and possibly shared) by one or more topologies. + + +## Starting from Examples + +As a summarization +of +[our disucssion](https://github.com/PaddlePaddle/Paddle/issues/1315), +let us present two examples here: + + +### Example 1. Sharing Parameters between Layers + +We use +the +[3-branch ranking](https://github.com/PaddlePaddle/Paddle/issues/1311#issuecomment-279121850) model +in this example. For your convenience, I copy-a-paste the model's +topology as follows: + +``` +A -> f -\ +Q -> f --> cost +B -> f -/ +``` + +The following program trains the topology including the cost, and then +use the sub-network in the trained topology in inference: + +```python +def f(in): + e = paddle.layer.embedding(in, parameter_name="embedding") + o = paddle.layer.softmax(e, parameter_name="semantic") + return o + +# Create 3 topologies (subnets), they share parameters because all +# correspoinding layers have the same parameter names. +fA = f(paddle.layer.data(input_name="A")) +fB = f(paddle.layer.data(input_name="B")) +fQ = f(paddle.layer.data(input_name="Q")) + +topology = paddle.layer.less_than( + paddle.layer.cross_entropy(fA, fQ), + paddle.layer.corss_entropy(fB, fQ)) + +# Derive parameters required in topology and create them in model. +parameters = paddle.parameters.create(topology) + +# Estimate parameters used in topology from data. +paddle.train(topology, parameters, reader=read_ranking_model_data) + +# Inference using fA (or fB or fC, as they share their parameters). +[testA, testB, testQ] = read_ranking_model_data() +print "The sematic-vector of testA: ", paddle.infer(fA, parameters, testA) +``` + + +### Example 2. Sharing Parameters between "Models" + +We use GAN in this example. In the following example program, `d0` and `d1` +correspond to the two networks in the following figure: + + + +```python +def G(in): + # over-simplified example as G has only one layers: + return paddle.layer.fc(in, parameter_name="G") + +def D(in); + # again, over-simplified: + return paddle.layer.fc(in, parameter_name="D") + +# Construct the first topology, which contains both D and G. +# By learning this topology, we update parameters of G. +d0 = paddle.layer.should_be_false(D(G(paddle.layer.data()))) + +# Construct a second topology d1, which contains only D. By +# training this topology, we update parameters of D. Note +# that d1 share parameters with d0. +d1 = paddle.layer.should_be_true(D(paddle.layer.data())) + +# Create parameters from a list of multiple topologies (models) for +# the chance to share parameters between these topologies. +parameters = paddle.parameters.create([d0, d1]) + +# Iterative training of GAN. +for ...: + train(d0, parameters, reader=read_from_rng, immutable_parameters={"D"}) + train(d1, parameters, reader=read_from_realistic_images) + +# Use d1 for inference: +print "D thinks a batch of images are realistic ", infer(d1, parameters, read_mnist_images) +``` + + +### Summarization + + +Above two programs reveal some important design concerns: + +1. Users describe a topology as an expression of layers. Every layer + has a *parameter name*. If the users don't specify it explicitly, it's automatically generated as a unique name. By + specifying the parameter name, users can specify the sharing of + parameters between layers and even between topologies. + +1. `paddle.parameters.create` figures out parameters required by one + or more topologies from parameter names of layers. It creates these + parameters and returns a `ParameterSet` object, which is in essence + a map from *parameter names* to *parameters*. + +1. At training and inference time, `paddle.train` and `paddle.infer` + requires both a topology and the parameter set that holds the parameters of that topology. There are some reasons: + + 1. This prevents users from forgetting to call + `paddle.parameters.create`. + 1. `paddle.train` needs to know which parameter set to update. + 1. Users could load another (pre-trained) parameter set and use it + with a topology in `train.infer`. + +1. By specifying the `immutable_parameters` parameter of + `paddle.train`, we can forbid the update of these parameters. + + +## Reader + +Not all programming frameworks allow users to define I/O functions. +An example is Google MapReduce, which can only read from text, +SSTable, and RecordIO files. Hadoop MapReduce allows users to define +readers and writers by deriving from base classes `Reader` and +`Writer`. The former is less flexible but also less error-prone. We +decide to provide the flexibility to users to define their readers. + + +There are some open questions here: + +1. **Should a reader return a Python dictionary?** + +1. **How to map multiple outputs from a reader to multiple data layers?** + +1. **How to easily compose some existing readers to read more data and + feed a topology with more data layers?** + + +## Training + +The recommended way to training a model is to call `paddle.train`, +which simply calls `paddle.trainer.Default`, a global variable of +type `paddle.trainer.SGD`. Equivalently, we can do + +```python +opt = paddle.trainer.SGD(..., paddle.updater.Adam(...)) +opt.train(topology, parameters, reader=read, ...) +``` + +### Updater + +Please be aware that a trainer can accept an updater as its data +member, where an updater is a class derived from +`paddle.trainer.Updater`. This is to make it easier to customize +trainers, as discussed +[here](https://github.com/PaddlePaddle/Paddle/issues/1319). + +### Event Handler + +`paddle.train` and `paddle.trainer.XXX.train` take an optional +parameter `event_handler`, which should be either `None` or a function +that handle some events: + +1. BeginTraining +1. EndTraining +1. BeginIteration +1. EndIteration +1. BeginPass +1. EndPass + +where EndPass is sent if and only if the reader yields +`end_pass=True`. + +An example as follows: + +```python +def event_handler(event): + if ininstance(event, paddle.event.EndIteration): + print paddle.test(...) + +paddle.train(topology, parameters, reader, event_handler) +``` + +If we are writing a PaddlePaddle program in and for iPython/Jypyter, +we can use metaplotlib in the event handler to plot a curve of +cost/error versus iterations, as shown +[here](https://blog.dominodatalab.com/interactive-dashboards-in-jupyter/). + +### Distributed Training + +If users want to do distributed training on a cluster, s/he should +call `paddle.dist_train` and provides access tokens to the cluster as +a parameter. + +For example, if the user has a TLS certificate that allows him to +access a Kubernetes cluster, s/he should be able to call + +```python +paddle.dist_train(model, + trainer=paddle.trainer.SGD(..., + paddle.updater.Adam(...)), + reader=read, + k8s_user="yi", + k8s_token="kube_cluster_tls.pem", + k8s_job="hello", + num_parameter_servers=15) +``` + +The pseudo code of `paddle.dist_train` is as follows: + +```python +def dist_train(topology, parameters, trainer, reader, ...): + if os.getenv("KUBERNETES_SERVICE_HOST") == None: + image_name = k8s_user + '/' + k8s_job + docker_build(image_name) + docker_push() + kube_ctrl_start_job(image_name, k8s_user, k8s_token) + else: + rank = kube_list_containers_in_job_and_return_current_containers_rank() + if rank == 0: + master() + elif rank < 15: + parameter_server() + else: + trainer.train(model, reader=read) +``` + +Please be aware that if a process is running on the Kubernetes +cluster, it will have some environment variables pre-defined. + +If `dist_train` doesn't see these environment variables, it knows +that it's running on users' personal computer, and it should work as a +*launcher*. Otherwise, it knows that it's running on the cluster and +need to figure out its role as either the master, or a trainer, or a +parameter server. diff --git a/doc/paddle/design/motivation/fluid-compiler.graffle b/doc/paddle/design/motivation/fluid-compiler.graffle new file mode 100644 index 0000000000000000000000000000000000000000..c933df2cb855462c52b2d25f7f9a99b95652961d Binary files /dev/null and b/doc/paddle/design/motivation/fluid-compiler.graffle differ diff --git a/doc/paddle/design/motivation/fluid-compiler.png b/doc/paddle/design/motivation/fluid-compiler.png new file mode 100644 index 0000000000000000000000000000000000000000..1b0ffed2039c91a3a00bbb719da08c91c3acf7bb Binary files /dev/null and b/doc/paddle/design/motivation/fluid-compiler.png differ diff --git a/doc/paddle/design/motivation/fluid.md b/doc/paddle/design/motivation/fluid.md new file mode 100644 index 0000000000000000000000000000000000000000..bb311cfc20cfa1fb2d40b4b2c55136d53cbc48ac --- /dev/null +++ b/doc/paddle/design/motivation/fluid.md @@ -0,0 +1,140 @@ +# Design Doc: PaddlePaddle Fluid + +## Why Fluid + +When Baidu developed PaddlePaddle in 2013, the only well-known open source deep learning system at the time was Caffe. However, when PaddlePaddle was open-sourced in 2016, many other choices were available. There was a challenge -- what is the need for open sourcing yet another deep learning framework? + +Fluid is the answer. Fluid is similar to PyTorch and TensorFlow Eager Execution, which describes the "process" of training or inference using the concept of a model. In fact in PyTorch, TensorFlow Eager Execution and Fluid, there is no concept of a model at all. The details are covered in the sections below. Fluid is currently more extreme in the above mentioned idea than PyTorch and Eager Execution, and we are trying to push Fluid towards the directions of a compiler and a new programming language for deep learning. + +## The Evolution of Deep Learning Systems + +Deep learning infrastructure is one of the fastest evolving technologies. Within four years, there have already been three generations of technologies invented. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Existed sincemodel as sequence of layersmodel as graph of operatorsNo model
2013 Caffe, Theano, Torch, PaddlePaddle
2015 TensorFlow, MxNet, Caffe2, ONNX, n-graph
2016 PyTorch, TensorFlow Eager Execution, PaddlePaddle Fluid
+ + +From the above table, we see that the deep learning technology is evolving towards getting rid of the concept of a model. To understand the reasons behind this direction, a comparison of the *programming paradigms* or the ways to program deep learning applications using these systems, would be helpful. The following section goes over these. + +## Deep Learning Programming Paradigms + +With the systems listed as the first or second generation, e.g., Caffe or TensorFlow, an AI application training program looks like the following: + +```python +x = layer.data("image") +l = layer.data("label") +f = layer.fc(x, W) +s = layer.softmax(f) +c = layer.mse(l, s) + +for i in xrange(1000): # train for 1000 iterations + m = read_minibatch() + forward({input=x, data=m}, minimize=c) + backward(...) + +print W # print the trained model parameters. +``` + +The above program includes two parts: + +1. The first part describes the model, and +2. The second part describes the training process (or inference process) for the model. + +This paradigm has a well-known problem that limits the productivity of programmers. If the programmer made a mistake in configuring the model, the error messages wouldn't show up until the second part is executed and `forward` and `backward` propagations are performed. This makes it difficult for the programmer to debug and locate a mistake that is located blocks away from the actual error prompt. + +This problem of being hard to debug and re-iterate fast on a program is the primary reason that programmers, in general, prefer PyTorch over the older systems. Using PyTorch, we would write the above program as following: + +```python +W = tensor(...) + +for i in xrange(1000): # train for 1000 iterations + m = read_minibatch() + x = m["image"] + l = m["label"] + f = layer.fc(x, W) + s = layer.softmax(f) + c = layer.mse(l, s) + backward() + +print W # print the trained model parameters. +``` + +We can see that the main difference is the moving the model configuration part (the first step) into the training loop. This change would allow the mistakes in model configuration to be reported where they actually appear in the programming block. This change also represents the model better, or its forward pass, by keeping the configuration process in the training loop. + +## Describe Arbitrary Models for the Future + +Describing the process instead of the model also brings Fluid, the flexibility to define different non-standard models that haven't been invented yet. + +As we write out the program for the process, we can write an RNN as a loop, instead of an RNN as a layer or as an operator. A PyTorch example would look like the following: + +```python +for i in xrange(1000): + m = read_minibatch() + x = m["sentence"] + for t in xrange x.len(): + h[t] = the_step(x[t]) +``` + +With Fluid, the training loop and the RNN in the above program are not really Python loops, but just a "loop structure" provided by Fluid and implemented in C++ as the following: + +```python +train_loop = layers.While(cond) +with train_loop.block(): + m = read_minibatch() + x = m["sentence"] + rnn = layers.While(...) + with rnn.block(): + h[t] = the_step(input[t]) +``` + +An actual Fluid example is described [here](https://github.com/PaddlePaddle/Paddle/blob/bde090a97564b9c61a6aaa38b72ccc4889d102d9/python/paddle/fluid/tests/unittests/test_while_op.py#L50-L58). + +From the example, the Fluid programs look very similar to their PyTorch equivalent programs, except that Fluid's loop structure, wrapped with Python's `with` statement, could run much faster than just a Python loop. + +We have more examples of the [`if-then-else`](https://github.com/PaddlePaddle/FluidDoc/tree/develop/doc/fluid/design/execution/if_else_op.md) structure of Fluid. + +## Turing Completeness + +In computability theory, a system of data-manipulation rules, such as a programming language, is said to be Turing complete if it can be used to simulate any Turing machine. For a programming language, if it provides if-then-else and loop, it is Turing complete. From the above examples, Fluid seems to be Turing complete; however, it is noteworthy to notice that there is a slight difference between the `if-then-else` of Fluid and that of a programming language. The difference being that the former runs both of its branches and splits the input mini-batch into two -- one for the True condition and another for the False condition. This hasn't been researched in depth if this is equivalent to the `if-then-else` in programming languages that makes them Turing-complete. Based on a conversation with [Yuang Yu](https://research.google.com/pubs/104812.html), it seems to be the case but this needs to be looked into in-depth. + +## The Execution of a Fluid Program + +There are two ways to execute a Fluid program. When a program is executed, it creates a protobuf message [`ProgramDesc`](https://github.com/PaddlePaddle/Paddle/blob/a91efdde6910ce92a78e3aa7157412c4c88d9ee8/paddle/framework/framework.proto#L145) that describes the process and is conceptually like an [abstract syntax tree](https://en.wikipedia.org/wiki/Abstract_syntax_tree). + +There is a C++ class [`Executor`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/executor.h), which runs a `ProgramDesc`, similar to how an interpreter runs a Python program. + +Fluid is moving towards the direction of a compiler, which is explain in [fluid_compiler.md](../fluid_compiler.html). + +## Backward Compatibility of Fluid + +Given all the advantages from the removal of the concept of a *model*, hardware manufacturers might still prefer the existence of the concept of a model, so it would be easier for them to support multiple frameworks all at once and could run a trained model during inference. For example, Nervana, a startup company acquired by Intel, has been working on an XPU that reads the models in the format known as [n-graph](https://github.com/NervanaSystems/ngraph). Similarly, [Movidius](https://www.movidius.com/) is producing a mobile deep learning chip that reads and runs graphs of operators. The well-known [ONNX](https://github.com/onnx/onnx) is also a file format of graphs of operators. + +For Fluid, we can write a converter that extracts the parts in the `ProgramDesc` protobuf message, converts them into a graph of operators, and exports the graph into the ONNX or n-graph format. diff --git a/doc/paddle/design/motivation/fluid_compiler.md b/doc/paddle/design/motivation/fluid_compiler.md new file mode 100644 index 0000000000000000000000000000000000000000..ef1dc7167ac18c9517ed1fa036e32c78d1d36bff --- /dev/null +++ b/doc/paddle/design/motivation/fluid_compiler.md @@ -0,0 +1,110 @@ +# PaddlePaddle Fluid: Towards a Compiled Programming Language + +As described in [fluid.md](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/motivation/fluid.md), when a Fluid application program +runs, it generates a `ProgramDesc` protobuf message as an intermediate +representation of itself. The C++ class `Executor` can run this +protobuf message as an interpreter. This article describes the Fluid +compiler. + +![](fluid-compiler.png) + +## ProgramDesc + +Before we go deeper into the idea of compiled language, let us take a +look at a simple example Fluid application. + +```python +import "fluid" + +func paddlepaddle() { + X = fluid.read(...) + W = fluid.Tensor(...) + Y = fluid.mult(X, W) +} +``` + +This program consists of a [block](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/concepts/block.md) of three operators -- +`read`, `assign`, and `mult`. Its `ProgramDesc` message looks like +the following + +```protobuf +message ProgramDesc { + block[0] = Block { + vars = [X, W, Y], + ops = [ + read(output = X) + assign(input = ..., output = W) + mult(input = {X, W}, output = Y) + ], + } +} +``` + +## Transpilers + +We can write a transpiler program that takes a `ProgramDesc`, e.g., +the above one, and outputs another `ProgramDesc`. Let us take some +examples: + +1. *Memory optimization transpiler*: We can write a transpiler that + inserts some `FreeMemoryOp`s in the above example `ProgramDesc` so + to free memory early, before the end of an iteration, so to keep a + small memory footprint. + +1. *Distributed training transpiler*: We can write a transpiler that + converts a`ProgramDesc` into its distributed version of two + `ProgramDesc`s -- one for running by the trainer processes and the + other for the parameter server. + +In the rest of this article, we talk about a special kind of +transpiler, *Native code generator*, which takes a `ProgramDesc` and +generates a `.cu` (or `.cc`) file, which could be built by C++ +compilers (gcc, nvcc, icc) into binaries. + +## Native Code Generator + +For the above example, the native code generator transpiler, say, the +CUDA code generator, should generate a `main` function: + +```c++ +void main() { + auto X = fluid_cuda_read(...); + auto W = fluid_cuda_create_tensor(...); + auto Y = fluid_cuda_mult(X, W); +} +``` + +and the definitions of functions `fluid_cuda_read`, +`fluid_cuda_create_tensor`, and `fluid_cuda_mult`. Please be aware +that each function could just define a C++ instance of an operator and +run it. For example + +```c++ +paddle::Tensor fluid_cuda_read(...) { + paddle::Tensor t; + paddle::operator::Read r(&t, ...); + r.Run(); + return t; +} +``` + +For computational operators that have multiple *kernels*, each for a +specific hardware platform, for example, the `mult` operator, the +generated code should call its CUDA kernel: + +```c++ +paddle::Tensor fluid_cuda_mult(const paddle::Tensor& a, + const paddle::Tensor& b) { + paddle::Tensor t; + paddle::operator::Mult m(a, b, ...); + Mult.Run(cuda_context); +} +``` + +where `cuda_context` could be a global variable of type +`paddle::CUDADeviceContext`. + +## Multi-Block Code Generation + +Most Fluid application programs may have more than one blocks. To +execute them, we need to trace [scopes](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/concepts/scope.md). diff --git a/doc/paddle/design/motivation/index_cn.rst b/doc/paddle/design/motivation/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7706e73eca644ed6db772fd77da947395313237f --- /dev/null +++ b/doc/paddle/design/motivation/index_cn.rst @@ -0,0 +1,10 @@ +设计动机和目标 +------------- + +.. toctree:: + :maxdepth: 1 + + api.md + refactorization.md + fluid.md + fluid_compiler.md diff --git a/doc/paddle/design/motivation/index_en.rst b/doc/paddle/design/motivation/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..10b64b257c604ced6b957d6d6018e8a363f00fac --- /dev/null +++ b/doc/paddle/design/motivation/index_en.rst @@ -0,0 +1,10 @@ +Design Motivations and Goals +-------------------------------------- + +.. toctree:: + :maxdepth: 1 + + api.md + refactorization.md + fluid.md + fluid_compiler.md diff --git a/doc/paddle/design/motivation/refactorization.md b/doc/paddle/design/motivation/refactorization.md new file mode 100644 index 0000000000000000000000000000000000000000..f883dab3c9f65036ed4d575074dac938953d292d --- /dev/null +++ b/doc/paddle/design/motivation/refactorization.md @@ -0,0 +1,269 @@ +# Design Doc: Refactorization Overview + +The goals of refactoring include: + +1. Making it easy for external contributors to write new elementary computation operations. +1. Making the codebase clean and readable. +1. Designing a new computation representation -- a computation graph of operators and variables. +1. Implementing auto-scalability and auto fault recoverable distributed computing with the help of computation graphs. + +## Computation Graphs + +1. PaddlePaddle represents the computation, training and inference of Deep Learning models, by computation graphs. + + 1. Please refer to [computation graphs](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/others/graph.md) for a concrete example. + +1. Users write Python programs to describe the graphs and run them (locally or remotely). + +1. A graph is composed of *variables* and *operators*. + +1. The description of graphs must be serializable/deserializable, so that: + + 1. It can be sent to the cloud for distributed execution, and + 1. It can be sent to clients for mobile or enterprise deployment. + +1. The Python program does two things + + 1. *Compilation* runs a Python program to generate a protobuf message representation of the graph and send it to + 1. the C++ library `libpaddle.so` for local execution, + 1. the master process of a distributed training job for training, or + 1. the server process of a Kubernetes serving job for distributed serving. + 1. *Execution* executes the graph by constructing instances of class [`Variable`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/variable.h#L24) and [`OperatorBase`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/operator.h#L70), according to the protobuf message. + +## Description and Realization of Computation Graph + +At compile time, the Python program generates a protobuf message representation of the graph, or a description of the graph. + +At runtime, the C++ program realizes the graph and runs it. + + + + + + + + + + + + + + + + + + + + + + + + + + +
Representation (protobuf messages)Realization (C++ class objects)
Data +VarDesc +Variable
Operation +OpDesc +Operator
Block BlockDesc Block
+ + +The word *graph* is interchangeable with *block* in this document. A graph consists of computation steps and local variables similar to a C++/Java program block, or a pair of parentheses(`{` and `}`). + +## Compilation and Execution + +1. Run a Python program to describe the graph. In particular, the Python application program does the following: + + 1. Create `VarDesc` to represent local/intermediate variables, + 1. Create operators and set attributes, + 1. Validate attribute values, + 1. Infer the type and the shape of variables, + 1. Plan memory-reuse for variables, + 1. Generate the backward graph + 1. Add optimization operators to the computation graph. + 1. Optionally, split the graph for distributed training. + +1. The invocation of `train` or [`infer`](https://github.com/PaddlePaddle/Paddle/blob/release/1.2/python/paddle/v2/inference.py#L108) methods in the Python program does the following: + + 1. Create a new Scope instance in the [scope hierarchy](../../concepts/scope.html) for each run of a block, + 1. realize local variables defined in the BlockDesc message in the new scope, + 1. a scope is similar to the stack frame in programming languages, + + 1. Create an instance of class `Block`, in which, + 1. realize operators in the BlockDesc message, + + 1. Run the Block by calling + 1. `Block::Eval(vector* targets)` for forward and backward computations, or + 1. `Block::Eval(vector* targets)` for optimization. + + +## Intermediate Representation (IR) + +```text +Compile Time -> IR -> Runtime +``` + +### Benefits of IR + +- Optimization + ```text + Compile Time -> IR -> Optimized IR -> Runtime + ``` +- Automatically send partitioned IR to different nodes. + - Automatic Data Parallelism + ```text + Compile Time + |-> Single GPU IR + |-> [trainer-IR-0, trainer-IR-1, pserver-IR] + |-> Node-0 (runs trainer-IR-0) + |-> Node-1 (runs trainer-IR-1) + |-> Node-2 (runs pserver-IR) + ``` + - Automatic Model Parallelism (planned for future) + + +## Operator/OpWithKernel/OpKernel + + + +### Operator + +* `Operator` is the fundamental building block of the user interface. + * Operator stores input/output variable names and attributes. + * The `InferShape` interface is used to infer the shape of the output variables based on the shapes of the input variables. + * Use `Run` to compute the `output` variables from the `input` variables. + + + +### OpWithKernel/Kernel + +* `OpWithKernel` inherits `Operator`. +* `OpWithKernel` contains a Kernel map. + * `OpWithKernel::Run` get device's kernel, and invoke `OpKernel::Compute`. + * `OpKernelKey` is the map key. Only device place now, but may be data type later. + + + +## Why separate Kernel and Operator + +* Separate GPU and CPU code. + * Make Paddle capable of running without GPU. +* Make one operator (which is a user interface) and create many implementations. + * For example, same multiplication op can have different implementations kernels such as FP16 kernel, FP32 kernel, MKL, eigen kernel. +--- + +## Libraries for Kernel development + +* `Eigen::Tensor` contains basic math and element-wise functions. + * Note that `Eigen::Tensor` has broadcast implementation. + * Limit the number of `tensor.device(dev) = ` in your code. +* `thrust::transform` and `std::transform`. + * `thrust` has the same API as C++ standard library. Using `transform`, one can quickly implement customized element-wise kernels. + * `thrust`, in addition, supports more complex APIs, like `scan`, `reduce`, `reduce_by_key`. +* Hand-writing `GPUKernel` and `CPU` code + * Do not write in header (`.h`) files. CPU Kernel should be in cpp source (`.cc`) and GPU kernels should be in cuda (`.cu`) files. (GCC cannot compile GPU code.) +--- +## Operator Registration + +### Why is registration necessary? +We need a method to build mappings between Op type names and Op classes. + +### How is registration implemented? +Maintaining a map, whose key is the type name and the value is the corresponding Op constructor. + +--- +## The Registry Map + +### `OpInfoMap` + +`op_type(string)` -> `OpInfo` + +`OpInfo`: + +- **`creator`**: The Op constructor. +- **`grad_op_type`**: The type of the gradient Op. +- **`proto`**: The Op's Protobuf, including inputs, outputs and required attributes. +- **`checker`**: Used to check attributes. + +--- +## Related Concepts + +### Op_Maker +It's constructor takes `proto` and `checker`. They are completed during Op_Maker's construction. ([ScaleOpMaker](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/scale_op.cc#L37)) + +### Register Macros +```cpp +REGISTER_OP(op_type, op_class, op_maker_class, grad_op_type, grad_op_class) +REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class) +``` + +--- +## Registration Process +1. Write an Op class and its gradient Op class, if required. +2. Write an Op maker class. In the constructor of this class, describe the inputs, outputs and attributes of the operator. +3. Invoke the macro `REGISTER_OP`. This macro will + 1. Call maker class to complete `proto` and `checker` + 2. Using the completed `proto` and `checker`, it will add a new key-value pair to the `OpInfoMap` + +--- +## Backward Module (1/2) +### Create Backward Operator +- Mapping from forward Op to backward Op +![backward](https://gist.githubusercontent.com/dzhwinter/a6fbd4623ee76c459f7f94591fd1abf0/raw/61026ab6e518e66bde66a889bc42557a1fccff33/backward.png) + +--- +## Backward Module (2/2) +### Build Backward Network +- **Input**: a graph of forward operators +- **Output**: a graph of backward operators +- **Corner cases in construction** + - Shared Variables => insert an `Add` operator to combine gradients + - No Gradient => insert a `fill_zero_grad` operator + - Recursive NetOp => call `Backward` recursively + - RNN Op => recursively call `Backward` on stepnet + - RNN Op => recursively call `Backward` on stepnet + + +--- +## Scope, Variable, Tensor + +* `Tensor` is an n-dimension array with type. + * Only dims and data pointers are stored in `Tensor`. + * All operations on `Tensor` are written in `Operator` or global functions. + * Variable length Tensor design [LoDTensor](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/concepts/lod_tensor.md) +* `Variable` instances are the inputs and the outputs of an operator, not just `Tensor`. + * `step_scopes` in RNN is a variable and not a tensor. +* `Scope` is where variables are stored. + * map + * `Scope` has a hierarchical structure. The local scope can get variables from its parent scope. + +--- +## Block (in design) +### the difference between original RNNOp and Block +- As an operator is more intuitive than `RNNOp`, +- Offers a new interface `Eval(targets)` to deduce the minimal block to `Run`, +- Fits the compile-time/ runtime separation design paradigm. + - During the compilation, `SymbolTable` stores `VarDesc`s and `OpDesc`s and serialize to a `BlockDesc` + - When graph executes, a Block with `BlockDesc` is passed. It then creates `Op` and `Var` instances and then invokes `Run`. + +--- +## Milestone +- Take Paddle/books as the main line, the requirement of the models motivates framework refactoring, +- Model migration + - Framework development gives **priority support** to model migration, for example, + - the MNIST demo needs a Python interface, + - the RNN models require the framework to support `LoDTensor`. + - Determine some timelines, + - Frequently used Ops need to be migrated first, + - Different models can be migrated in parallel. +- Improve the framework at the same time +- Accept imperfection, concentrate on solving the specific problem at the right price. + +--- +## Control the migration quality +- Compare the performance of migrated models with old ones. +- Follow the google C++ style guide. +- Build the automatic workflow of generating Python/C++ documentations. + - The documentation of layers and ops should be written inside the code. + - Take the documentation quality into account when submitting pull requests. + - Preview the documentations, read and improve them from a user's perspective. diff --git a/doc/paddle/design/multi_devices/index_cn.rst b/doc/paddle/design/multi_devices/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1f8439e8623e1c1ae9a12c24d08079f0ec3d761f --- /dev/null +++ b/doc/paddle/design/multi_devices/index_cn.rst @@ -0,0 +1,9 @@ +多设备支持 +------------ + +.. toctree:: + :maxdepth: 1 + + operator_kernel_type.md + kernel_selection.md + kernel_hint_design.md diff --git a/doc/paddle/design/multi_devices/index_en.rst b/doc/paddle/design/multi_devices/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..819e9c5d77b2abf8da0e2ce6f494ea5174c1d0a2 --- /dev/null +++ b/doc/paddle/design/multi_devices/index_en.rst @@ -0,0 +1,9 @@ +Multi-Device Support +---------------------- + +.. toctree:: + :maxdepth: 1 + + operator_kernel_type.md + kernel_selection.md + kernel_hint_design.md diff --git a/doc/paddle/design/multi_devices/kernel_hint_design.md b/doc/paddle/design/multi_devices/kernel_hint_design.md new file mode 100644 index 0000000000000000000000000000000000000000..b01753ef0fcf667feb0a29dd180d4b1d23d9a457 --- /dev/null +++ b/doc/paddle/design/multi_devices/kernel_hint_design.md @@ -0,0 +1,59 @@ +# Kernel Hint Design + +## Problem +In PaddlePaddle's [Design](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/execution/switch.md), one Operator may have multiple kernels. Users may have some personal preference to choose a certain type of kernel for an operator, such as `force_cpu` to choose a CPU kernel, `use_cudnn` to choose a CUDNN kernel, we need to provide a way for users to do this. + +In the current design, we use KernelType to describe one kernel. + +```cpp +struct KernelType { + Place place_; + DataType data_type_; + LayoutType layout_; +}; +``` + `place_` `data_type_` and `layout_` can be got from the input tensors of the operator, `GetActualKernelType(inputs)` use inputs to infer the proper kernel key that fit the incoming data, but users can not directly configure it. + +The [design](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/execution/switch.md) also provides a virtual method `GetExpectedKernelType` that user can overload and use to choose the KernelType they want to use. + +So we should send the information user defined in proto to `GetExpectedKernelType` for choosing a kernel. + +The problem is, how should we define and send the information for `GetExpectedKernelType` to use? + +## Solution + +### Potential choice +1. Do nothing, let the user add the information they want to operator‘s attribute and get them inside `GetExpectedKernelType`, this can work properly. But there is a little problem that users may define many kinds of hints for the same purpose, such as `force_cpu`, `use_cpu`, `cpu_kernel` to choose CPU kernel, and `use_cudnn`, `force_cudnn`, `cudnn_kernel` to choose CUDNN kernel. + +2. Pre-define all the needed option and use a single attr key such as `kernel_hint` for the user, this is not so flexible if the user wants to define some more kind of hint. + +### Final choice +To provide enough flexibility while avoiding confusion definition, we can define some global constants for these attribute names, such as `force_cpu`, `use_cudnn`, `use_mkldnn` for a user to choose. + +In C++ + +```cpp +const std::string kForceCPU = "force_cpu"; +const std::string kUseCUDNN = "use_cudnn"; +const std::string kUseMKLDNN = "use_mkldnn"; + +KernelType GetExpectedKernelType() { + if (Attr(kForceCPU)) { + return KernelType(CPUPlace, ...) + } else { + ... + } +} +``` + +In Python code + +```python +FORCE_CPU = core.kForceCPU() + +def xx_layer(..., force_cpu=false): + layer_helper = LayerHelper(...) + layer_helper.append_op( + type="xx", + attr={FORCE_CPU: force_cpu}) +``` diff --git a/doc/paddle/design/multi_devices/kernel_selection.md b/doc/paddle/design/multi_devices/kernel_selection.md new file mode 100644 index 0000000000000000000000000000000000000000..4d2aab87b8cf30d03075e96cc4c67070efaf963a --- /dev/null +++ b/doc/paddle/design/multi_devices/kernel_selection.md @@ -0,0 +1,101 @@ +# Kernel Selection + +## Background +Every operator has many kernels because there are multiple data types, places, data layout, library type that Fluid supports. We use the `OpKernelType ` to describe kernel types that operators can hold. + +The `OpKernelType ` is as follows: + +```cpp +struct OpKernelType { + Place place_; + DataType data_type_; + DataLayout data_layout_; + LibraryType library_type_; +}; +``` + +- The `place_` is a descriptor of the device, e.g., CPUPlace, CUDAPlace. + +- The `data_type_` is the data type that this kernel performs on, e.g., `FP32`, `INT64`. Note that one kernel may have inputs with different data types. However, it will be a major `data_type`. For example, the `cross_entropy` takes `int64` as it label, and `double`/`float` as its input logit and output cost. The major `data_type` of `cross_entropy` is `float` or `double`. + +- The `data_layout_ ` is useful for some computational library. One example is that MKLDNN uses many kinds of layout, such as `nChw8c`. Each kind of layout will invoke the different kernel. + +- The `library_type_` describes the computational library, e.g., `MKLDNN`, `CUDNN`. + +## Problem + +We register a kernel for every operator and every kernel type ideally. However, it is impracticable for the following situations. + +1. Some operators, like CRF, are complicated and inefficient to be implemented on GPU. The CRF operator will only have a CPU kernel. +2. Some operators will take too many memory. It is better to force them into CPU. However, the rest of operators in this neural network will be performed on GPU, i.e., model parallel problem. +3. Some layout and place are particular. One example is that MKLDNN uses `nChw8` and there is no other library uses `nChw8c`. + +Take one situation to give a detailed explanation, if we have two Operators: OP1 and OP2, OP1 has one output `op1_to_op2`, and `op1_to_op2` is the input of OP2. + +If OP1 and OP2 run on the same place(for example CPUPlace), then `op1_2_op2` can be used directly by OP2. + +``` +OP1(CPUPlace) + | + op1_2_op2 + | +OP2(CPUPlace) +``` + +If OP1 and OP2 run one different place, then OP2 cannot `use op1_2_op2` directly. + +Problems under these situations are similar. We can formalize this problem as follow. + +We register kernels with types $KT = \{kt_1, kt_2, kt_3, ...\}$ for one operator. The inputs of this operator should be run on kernel type $kt_{?}$, which the $kt_{?} \notin KT$. How to cast the input of this operator from $kt_{?}$ to any of kernel type in $KT$. + +## Solution: data transform + +It is clear that transforming inputs of an operator to adapt another kernel type is not related to the particular operator. So we should register these transformation methods as global methods. + +We can infer kernel type for each input of an operator. We let this kernel type as `actual kernel type for var`, which means this kernel type is the kernel type that can process this input variable. + +We can get a kernel type by 1) The configuration of operator description. (Users may want to force use `MKL` for `conv` operator). 2) The place of the current executor. (Executor is running on GPU). This kernel type is what we expect the operator will be performed on. We let this kernel type as `expect kernel type`. + +We transform the input data from `actual` to `expect` if the actual kernel type is not as same as expect kernel type. + +The algorithm is described as following + +```cpp +void OperatorWithKernel::Run( + const Scope& scope, + const platform::Place& place) const { + ExecutionContext ctx(...); + auto expected_kernel_key = this->GetExpectedKernelType(ctx); + + Scope& new_scope = scope.NewScope(); + + for (auto& var_name : this->Inputs()) { + auto* tensor_in = GetTensor(var_name); + auto kernel_type_for_var = this->GetKernelTypeForVar(...); + if (kernel_type_for_var.place_ != expected_kernel_key.place_) { + auto* trans_var = new_scope.Var(var_name); + auto* out = TransformData(expected_kernel_key, + kernel_type_for_var, + *tensor_in); + SetTensorToVariable(...); + } + } + + auto kernel = kernels.find(expected_kernel_key); + kernel->Compute(ExecutionContext(...)); +} +``` + +then the actual process for the multi-device above will be: + +``` +OP1(CPUPlace) + | +op1_2_op2(on CPU) + | +[transform](from CPU to GPU) + | +op1_2_op2(on GPU) + | +OP2(CUDAPlace) +``` diff --git a/doc/paddle/design/multi_devices/operator_kernel_type.md b/doc/paddle/design/multi_devices/operator_kernel_type.md new file mode 100644 index 0000000000000000000000000000000000000000..5e391bd62b4f4e123a9a6f35b7adf5726f205635 --- /dev/null +++ b/doc/paddle/design/multi_devices/operator_kernel_type.md @@ -0,0 +1,91 @@ +# Design Doc: The Keys of Operator Kernel Type +## Problem +An operator can have different kernel implementations, and each operator will have a map to store the related kernels. Fluid uses `OpKernelType` as a key to identify a unique kernel. Before an operator runs, a certain type of kernel must be chosen via a key of `OpKernelType`. Currently, `OpKernelType` is defined as follows: + +```cpp +struct OpKernelType { + platform::Place place_; + proto::DataType data_type_; +}; +``` +For more details, please refer to [codes](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/operator.h#L348-L374) in github. + +It contains two keys, `Place` and `DataType`. And these two keys will be hashed to a unique key to represent a certain type of kernel. However, these two keys do not provide enough information. We need a more complete representation of `OpKernelType`. + +We often implement a kernel of an operator with some computing library on certain device(place). Please note that computing library and device do not have a one-to-one correspondence. A device can have a lot of computing libraries and a computing library can also support different devices. + +For example, Eigen library supports Nvidia GPU/AMD GPU/CPU and MKLDNN library supports Intel CPU/Intel FPGA. Both `Place` and `Library` should be a key of `OpKernelType`. + +Different DataTypes, such as fp64/fp32/int8, will obviously have different kernels. But different data layout of a Tensor will also lead to different implementations. Please refer to the batch norm operator [kernels](https://github.com/PaddlePaddle/Paddle/blob/a948fac4d0ad7e0412d373b8aabeb711c2899563/paddle/operators/batch_norm_op.cc#L180-L209) as an example. Data layout should also be taken into consideration. + +## Solution + +There are four keys to determine a kernel type of an operator: `Place`/`Library`/`DataType`/`Layout`. + +```cpp +struct OpKernelType { + platform::Place place_; + platform::Library library_; + proto::DataType data_type_; + framework::Layout layout_; +}; +``` + +The details are as follows: + +### Place + +`Place` is defined as: + +```cpp +typedef boost::variant Place; +``` + +`Place` represents the device memory where data is located. + + +### Library + +One operator kernel is usually implemented based on one library. `Library` is defined as a enum variable: + +```cpp +enum Library { Plain, MKLDNN, CUDNN }; +``` + +We use `Plain` enumerator to represent default library. Since most operators in Fluid are implemented based on the `Eigen` library, we take `Eigen` library as the `Plain` enumerator. +A library usually has a corresponding `DeviceContext` which contains some handles needed for computation. Fluid now has two default DeviceContexts for CPU and CUDA, namely, `CPUDeviceContext` and `CUDADeviceContext`. `CPUDeviceContext` contains an Eigen library handle and `CDUADeviceContext` contains an Eigen library handle and a cuBLAS handle. + +If we want to support new library, a new enumerator need to be added to `Library` and a corresponding new `LibraryDeviceContext` need to be created. + + +### DataType + + +`DataType` is defined in [framework.proto](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto). Currently, int32/int64/fp32/fp64 are supported. + +### Layout + +Actually, a Tensor is a view of a block of memory. Besides a pointer to the memory, we also have to get some other descriptions of this block of memory, such as shape(ddim), stride, and layout. + +Different layout leads to different implementation of the operator kernel. There are mainly 4 principles we have to follow to support layout in our Fluid framework. + +- We take layout as a data member of Tensor. Layout is actually a enum variable. If Fluid is built with MKLDNN, then the memory format in MKLDNN will also be added into this enum variable. + +- Users have to set layout for input data. And some operators like fill_constant/random, also have to set layout for generating data. Of course, we can have some default layout, like NCHW. + +- The inference of Layout is at run-time, not at compile-time. + +- Every operator has to implement different kernels for different layouts. Let's take MKLDNN as an example. If we want to implement an MKLDNN convolution operator, we have to implement all the kernels for different layouts, which are listed [here](http://intel.github.io/mkl-dnn/structmkldnn_1_1memory.html). And we will have a special macro to register kernels for MKLDNN operators. + +`Layout` is also defined as a enum variable: + +```cpp +enum Layout { + kNCHW, + kNHWC, +#ifdef PADDLE_WITH_MKLDNN + knChw8c + ... +#endif +}; +``` diff --git a/doc/paddle/design/network/deep_speech_2.md b/doc/paddle/design/network/deep_speech_2.md new file mode 100644 index 0000000000000000000000000000000000000000..256ac69510222c35e69164f0c318c944bdd55598 --- /dev/null +++ b/doc/paddle/design/network/deep_speech_2.md @@ -0,0 +1,235 @@ +# DeepSpeech2 on PaddlePaddle: Design Doc + +We are planning to build Deep Speech 2 (DS2) \[[1](#references)\], a powerful Automatic Speech Recognition (ASR) engine, on PaddlePaddle. For the first-stage plan, we have the following short-term goals: + +- Release a basic distributed implementation of DS2 on PaddlePaddle. +- Contribute a chapter of Deep Speech to PaddlePaddle Book. + +Intensive system optimization and low-latency inference library (details in \[[1](#references)\]) are not yet covered in this first-stage plan. + +## Table of Contents + +- [Tasks](#tasks) +- [Task Dependency](#task-dependency) +- [Design Details](#design-details) + - [Overview](#overview) + - [Row Convolution](#row-convolution) + - [Beam Search With CTC and LM](#beam-search-with-ctc-and-lm) +- [Future Work](#future-work) +- [References](#references) + +## Tasks + +We roughly break down the project into 14 tasks: + +1. Develop an **audio data provider**: + - Json filelist generator. + - Audio file format transformer. + - Spectrogram feature extraction, power normalization etc. + - Batch data reader with SortaGrad. + - Data augmentation (optional). + - Prepare (one or more) public English data sets & baseline. +2. Create a **simplified DS2 model configuration**: + - With only fixed-length (by padding) audio sequences (otherwise need *Task 3*). + - With only bidirectional-GRU (otherwise need *Task 4*). + - With only greedy decoder (otherwise need *Task 5, 6*). +3. Develop to support **variable-shaped** dense-vector (image) batches of input data. + - Update `DenseScanner` in `dataprovider_converter.py`, etc. +4. Develop a new **lookahead-row-convolution layer** (See \[[1](#references)\] for details): + - Lookahead convolution windows. + - Within-row convolution, without kernels shared across rows. +5. Build KenLM **language model** (5-gram) for beam search decoder: + - Use KenLM toolkit. + - Prepare the corpus & train the model. + - Create infererence interfaces (for Task 6). +6. Develop a **beam search decoder** with CTC + LM + WORDCOUNT: + - Beam search with CTC. + - Beam search with external custom scorer (e.g. LM). + - Try to design a more general beam search interface. +7. Develop a **Word Error Rate evaluator**: + - update `ctc_error_evaluator`(CER) to support WER. +8. Prepare internal dataset for Mandarin (optional): + - Dataset, baseline, evaluation details. + - Particular data preprocessing for Mandarin. + - Might need cooperating with the Speech Department. +9. Create **standard DS2 model configuration**: + - With variable-length audio sequences (need *Task 3*). + - With unidirectional-GRU + row-convolution (need *Task 4*). + - With CTC-LM beam search decoder (need *Task 5, 6*). +10. Make it run perfectly on **clusters**. +11. Experiments and **benchmarking** (for accuracy, not efficiency): + - With public English dataset. + - With internal (Baidu) Mandarin dataset (optional). +12. Time **profiling** and optimization. +13. Prepare **docs**. +14. Prepare PaddlePaddle **Book** chapter with a simplified version. + +## Task Dependency + +Tasks parallelizable within phases: + + + + + + + + + + + + + + + + + + + + + + + + + + +
RoadmapDescription Parallelizable Tasks
Phase I Simplified model & components Task 1 ~ Task 8
Phase II Standard model & benchmarking & profilingTask 9 ~ Task 12
Phase III Documentations Task13 ~ Task14
+ + +Issue for each task will be created later. Contributions, discussions and comments are all highly appreciated and welcomed! + +## Design Details + +### Overview + +Traditional **ASR** (Automatic Speech Recognition) pipelines require great human efforts devoted to elaborately tuning multiple hand-engineered components (e.g. audio feature design, accoustic model, pronuncation model and language model etc.). **Deep Speech 2** (**DS2**) \[[1](#references)\], however, trains such ASR models in an end-to-end manner, replacing most intermediate modules with only a single deep network architecture. With scaling up both the data and model sizes, DS2 achieves a very significant performance boost. + +Please read Deep Speech 2 \[[1](#references),[2](#references)\] paper for more background knowledge. + +The classical DS2 network contains 15 layers (from bottom to top): + +- **Two** data layers (audio spectrogram, transcription text) +- **Three** 2D convolution layers +- **Seven** uni-directional simple-RNN layers +- **One** lookahead row convolution layers +- **One** fully-connected layers +- **One** CTC-loss layer + +
+
+Figure 1. Archetecture of Deep Speech 2 Network. +
+ +We don't have to persist on this 2-3-7-1-1-1 depth \[[2](#references)\]. Similar networks with different depths might also work well. As in \[[1](#references)\], authors use a different depth (e.g. 2-2-3-1-1-1) for final experiments. + +Key ingredients about the layers: + +- **Data Layers**: + - Frame sequences data of audio **spectrogram** (with FFT). + - Token sequences data of **transcription** text (labels). + - These two type of sequences do not have the same lengthes, thus a CTC-loss layer is required. +- **2D Convolution Layers**: + - Not only temporal convolution, but also **frequency convolution**. Like a 2D image convolution, but with a variable dimension (i.e. temporal dimension). + - With striding for only the first convlution layer. + - No pooling for all convolution layers. +- **Uni-directional RNNs** + - Uni-directional + row convolution: for low-latency inference. + - Bi-direcitional + without row convolution: if we don't care about the inference latency. +- **Row convolution**: + - For looking only a few steps ahead into the feature, instead of looking into a whole sequence in bi-directional RNNs. + - Not nessesary if with bi-direcitional RNNs. + - "**Row**" means convolutions are done within each frequency dimension (row), and no convolution kernels shared across. +- **Batch Normalization Layers**: + - Added to all above layers (except for data and loss layer). + - Sequence-wise normalization for RNNs: BatchNorm only performed on input-state projection and not state-state projection, for efficiency consideration. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Required Components PaddlePaddle Support Need to Develop
Data Layer I (Spectrogram) Not supported yet.TBD (Task 3)
Data Layer II (Transcription) paddle.data_type.integer_value_sequence -
2D Convolution Layer paddle.layer.image_conv_layer -
DataType Converter (vec2seq) paddle.layer.block_expand -
Bi-/Uni-directional RNNs paddle.layer.recurrent_group -
Row Convolution Layer Not supported yet.TBD (Task 4)
CTC-loss Layer paddle.layer.warp_ctc -
Batch Normalization Layer paddle.layer.batch_norm -
CTC-Beam search Not supported yet. TBD (Task 6)
+ + +### Row Convolution + +TODO by Assignees + +### Beam Search with CTC and LM + +
+
+Figure 2. Algorithm for CTC Beam Search Decoder. +
+ +- The **Beam Search Decoder** for DS2 CTC-trained network follows the similar approach in \[[3](#references)\] as shown in Figure 2, with two important modifications for the ambiguous parts: + - 1) in the iterative computation of probabilities, the assignment operation is changed to accumulation for one prefix may comes from different paths; + - 2) the if condition ```if l^+ not in A_prev then``` after probabilities' computation is deprecated for it is hard to understand and seems unnecessary. +- An **external scorer** would be passed into the decoder to evaluate a candidate prefix during decoding whenever a white space appended in English decoding and any character appended in Mandarin decoding. +- Such external scorer consists of language model, word count or any other custom scorers. +- The **language model** is built from Task 5, with parameters should be carefully tuned to achieve minimum WER/CER (c.f. Task 7) +- This decoder needs to perform with **high efficiency** for the convenience of parameters tuning and speech recognition in reality. + + +## Future Work + +- Efficiency Improvement +- Accuracy Improvement +- Low-latency Inference Library +- Large-scale benchmarking + +## References + +1. Dario Amodei, etc., [Deep Speech 2 : End-to-End Speech Recognition in English and Mandarin](http://proceedings.mlr.press/v48/amodei16.pdf). ICML 2016. +2. Dario Amodei, etc., [Deep Speech 2 : End-to-End Speech Recognition in English and Mandarin](https://arxiv.org/abs/1512.02595). arXiv:1512.02595. +3. Awni Y. Hannun, etc. [First-Pass Large Vocabulary Continuous Speech Recognition using Bi-Directional Recurrent DNNs](https://arxiv.org/abs/1408.2873). arXiv:1408.2873 diff --git a/doc/paddle/design/network/images/LOD-and-shape-changes-during-decoding.jpg b/doc/paddle/design/network/images/LOD-and-shape-changes-during-decoding.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8b0d90f7b9d8184b314b0ee4e521f53eb5f1b455 Binary files /dev/null and b/doc/paddle/design/network/images/LOD-and-shape-changes-during-decoding.jpg differ diff --git a/doc/paddle/design/network/images/beam_search.png b/doc/paddle/design/network/images/beam_search.png new file mode 100644 index 0000000000000000000000000000000000000000..7f7e35f34223162d0f7f0ed97375909c43b830ae Binary files /dev/null and b/doc/paddle/design/network/images/beam_search.png differ diff --git a/doc/paddle/design/network/images/ds2_network.png b/doc/paddle/design/network/images/ds2_network.png new file mode 100644 index 0000000000000000000000000000000000000000..1a5b2184d47928cc2849d5a7c8ea2d8cf5337e11 Binary files /dev/null and b/doc/paddle/design/network/images/ds2_network.png differ diff --git a/doc/paddle/design/network/index_cn.rst b/doc/paddle/design/network/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3557d55fe4dbae1f712e0760ca15111ec6f6792d --- /dev/null +++ b/doc/paddle/design/network/index_cn.rst @@ -0,0 +1,7 @@ +复杂网络设计 +------------ + +.. toctree:: + :maxdepth: 1 + + sequence_decoder.md diff --git a/doc/paddle/design/network/index_en.rst b/doc/paddle/design/network/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..73a7137236bdf0548d35721609351d6deca3013b --- /dev/null +++ b/doc/paddle/design/network/index_en.rst @@ -0,0 +1,7 @@ +Complex Network Design +------------------------ + +.. toctree:: + :maxdepth: 1 + + sequence_decoder.md diff --git a/doc/paddle/design/network/sequence_decoder.md b/doc/paddle/design/network/sequence_decoder.md new file mode 100644 index 0000000000000000000000000000000000000000..ec6c99a1cf97d8cf597e0535eff14e913430fda6 --- /dev/null +++ b/doc/paddle/design/network/sequence_decoder.md @@ -0,0 +1,229 @@ +# Design: Sequence Decoder Generating LoDTensors +In tasks such as machine translation and visual captioning, +a [sequence decoder](https://github.com/PaddlePaddle/book/blob/develop/08.machine_translation/README.md) is necessary to generate sequences, one word at a time. + +This documentation describes how to implement the sequence decoder as an operator. + +## Beam Search based Decoder +The [beam search algorithm](https://en.wikipedia.org/wiki/Beam_search) is necessary when generating sequences. It is a heuristic search algorithm that explores the paths by expanding the most promising node in a limited set. + +In the old version of PaddlePaddle, the C++ class `RecurrentGradientMachine` implements the general sequence decoder based on beam search, due to the complexity involved, the implementation relies on a lot of special data structures that are quite trivial and hard to be customized by users. + +There are a lot of heuristic tricks in the sequence generation tasks, so the flexibility of sequence decoder is very important to users. + +During the refactoring of PaddlePaddle, some new concepts are proposed such as: [LoDTensor](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/concepts/lod_tensor.md) and [TensorArray](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/concepts/tensor_array.md) that can better support the sequence usage, and they can also help make the implementation of beam search based sequence decoder **more transparent and modular** . + +For example, the RNN states, candidates IDs and probabilities of beam search can be represented all as `LoDTensors`; +the selected candidate's IDs in each time step can be stored in a `TensorArray`, and `Packed` to the sentences translated. + +## Changing LoD's absolute offset to relative offsets +The current `LoDTensor` is designed to store levels of variable-length sequences. It stores several arrays of integers where each represents a level. + +The integers in each level represent the begin and end (not inclusive) offset of a sequence **in the underlying tensor**, +let's call this format the **absolute-offset LoD** for clarity. + +The absolute-offset LoD can retrieve any sequence very quickly but fails to represent empty sequences, for example, a two-level LoD is as follows +```python +[[0, 3, 9] + [0, 2, 3, 3, 3, 9]] +``` +The first level tells that there are two sequences: +- the first's offset is `[0, 3)` +- the second's offset is `[3, 9)` + +while on the second level, there are several empty sequences that both begin and end at `3`. +It is impossible to tell how many empty second-level sequences exist in the first-level sequences. + +There are many scenarios that rely on empty sequence representation, for example in machine translation or visual captioning, one instance has no translation or the empty candidate set for a prefix. + +So let's introduce another format of LoD, +it stores **the offsets of the lower level sequences** and is called **relative-offset** LoD. + +For example, to represent the same sequences of the above data + +```python +[[0, 3, 6] + [0, 2, 3, 3, 3, 9]] +``` + +the first level represents that there are two sequences, +their offsets in the second-level LoD is `[0, 3)` and `[3, 5)`. + +The second level is the same with the relative offset example because the lower level is a tensor. +It is easy to find out the second sequence in the first-level LoD has two empty sequences. + +The following examples are based on relative-offset LoD. + +## Usage in a simple machine translation model +Let's start from a simple machine translation model that is simplified from the [machine translation chapter](https://github.com/PaddlePaddle/book/tree/develop/08.machine_translation) to draw a blueprint of what a sequence decoder can do and how to use it. + +The model has an encoder that learns the semantic vector from a sequence, and a decoder which uses the sequence encoder to generate new sentences. + +**Encoder** +```python +import paddle as pd + +dict_size = 8000 +source_dict_size = dict_size +target_dict_size = dict_size +word_vector_dim = 128 +encoder_dim = 128 +decoder_dim = 128 +beam_size = 5 +max_length = 120 + +# encoder +src_word_id = pd.data( + name='source_language_word', + type=pd.data.integer_value_sequence(source_dict_dim)) +src_embedding = pd.embedding(size=source_dict_size, size=word_vector_dim) + +src_word_vec = pd.lookup(src_embedding, src_word_id) + +encoder_out_seq = pd.gru(input=src_word_vec, size=encoder_dim) + +encoder_ctx = pd.last_seq(encoder_out_seq) +# encoder_ctx_proj is the learned semantic vector +encoder_ctx_proj = pd.fc( + encoder_ctx, size=decoder_dim, act=pd.activation.Tanh(), bias=None) +``` + +**Decoder** + +```python +def generate(): + decoder = pd.while_loop() + with decoder.step(): + decoder_mem = decoder.memory(init=encoder_ctx) # mark the memory + generated_ids = decoder.memory() # TODO init to batch_size s + generated_scores = decoder.memory() # TODO init to batch_size 1s or 0s + + target_word = pd.lookup(trg_embedding, gendrated_ids) + # expand encoder_ctx's batch to fit target_word's lod + # for example + # decoder_mem.lod is + # [[0 1 3], + # [0 1 3 6]] + # its tensor content is [a1 a2 a3 a4 a5] + # which means there are 2 sentences to translate + # - the first sentence has 1 translation prefixes, the offsets are [0, 1) + # - the second sentence has 2 translation prefixes, the offsets are [1, 3) and [3, 6) + # the target_word.lod is + # [[0, 1, 6] + # [0, 2, 4, 7, 9 12]] + # which means 2 sentences to translate, each has 1 and 5 prefixes + # the first prefix has 2 candidates + # the following has 2, 3, 2, 3 candidates + # the encoder_ctx_expanded's content will be + # [a1 a1 a2 a2 a3 a3 a3 a4 a4 a5 a5 a5] + encoder_ctx_expanded = pd.lod_expand(encoder_ctx, target_word) + decoder_input = pd.fc( + act=pd.activation.Linear(), + input=[target_word, encoder_ctx_expanded], + size=3 * decoder_dim) + gru_out, cur_mem = pd.gru_step( + decoder_input, mem=decoder_mem, size=decoder_dim) + scores = pd.fc( + gru_out, + size=trg_dic_size, + bias=None, + act=pd.activation.Softmax()) + # K is an config + topk_scores, topk_ids = pd.top_k(scores, K) + topk_generated_scores = pd.add_scalar(topk_scores, generated_scores) + + selected_ids, selected_generation_scores = decoder.beam_search( + topk_ids, topk_generated_scores) + + # update the states + decoder_mem.update(cur_mem) # tells how to update state + generated_ids.update(selected_ids) + generated_scores.update(selected_generation_scores) + + decoder.output(selected_ids) + decoder.output(selected_generation_scores) + +translation_ids, translation_scores = decoder() +``` +The `decoder.beam_search` is an operator that, given the candidates and the scores of translations including the candidates, +returns the result of the beam search algorithm. + +In this way, users can customize anything on the input or output of beam search, for example: + +1. Make the corresponding elements in `topk_generated_scores` zero or some small values, beam_search will discard this candidate. +2. Remove some specific candidate in `selected_ids`. +3. Get the final `translation_ids`, remove the translation sequence in it. + +The implementation of sequence decoder can reuse the C++ class: [RNNAlgorithm](https://github.com/Superjom/Paddle/blob/68cac3c0f8451fe62a4cdf156747d6dc0ee000b3/paddle/operators/dynamic_recurrent_op.h#L30), +so the python syntax is quite similar to that of an [RNN](https://github.com/Superjom/Paddle/blob/68cac3c0f8451fe62a4cdf156747d6dc0ee000b3/doc/design/block.md#blocks-with-for-and-rnnop). + +Both of them are two-level `LoDTensors`: + +- The first level represents `batch_size` of (source) sentences. +- The second level represents the candidate ID sets for translation prefix. + +For example, 3 source sentences to translate, and has 2, 3, 1 candidates. + +Unlike an RNN, in sequence decoder, the previous state and the current state have different LoD and shape, and an `lod_expand` operator is used to expand the LoD of the previous state to fit the current state. + +For example, the previous state: + +* LoD is `[0, 1, 3][0, 2, 5, 6]` +* content of tensor is `a1 a2 b1 b2 b3 c1` + +the current state is stored in `encoder_ctx_expanded`: + +* LoD is `[0, 2, 7][0 3 5 8 9 11 11]` +* the content is + - a1 a1 a1 (a1 has 3 candidates, so the state should be copied 3 times for each candidates) + - a2 a2 + - b1 b1 b1 + - b2 + - b3 b3 + - None (c1 has 0 candidates, so c1 is dropped) + +The benefit from the relative offset LoD is that the empty candidate set can be represented naturally. + +The status in each time step can be stored in `TensorArray`, and `Pack`ed to a final LoDTensor. The corresponding syntax is: + +```python +decoder.output(selected_ids) +decoder.output(selected_generation_scores) +``` + +The `selected_ids` are the candidate ids for the prefixes, and will be `Packed` by `TensorArray` to a two-level `LoDTensor`, where the first level represents the source sequences and the second level represents generated sequences. + +Packing the `selected_scores` will get a `LoDTensor` that stores scores of each translation candidate. + +Packing the `selected_generation_scores` will get a `LoDTensor`, and each tail is the probability of the translation. + +## LoD and shape changes during decoding +

+ +

+ +According to the image above, the only phase that changes the LoD is beam search. + +## Beam search design +The beam search algorithm will be implemented as one method of the sequence decoder and has 3 inputs: + +1. `topk_ids`, the top K candidate ids for each prefix. +2. `topk_scores`, the corresponding scores for `topk_ids` +3. `generated_scores`, the score of the prefixes. + +All of these are LoDTensors, so that the sequence affiliation is clear. Beam search will keep a beam for each prefix and select a smaller candidate set for each prefix. + +It will return three variables: + +1. `selected_ids`, the final candidate beam search function selected for the next step. +2. `selected_scores`, the scores for the candidates. +3. `generated_scores`, the updated scores for each prefix (with the new candidates appended). + +## Introducing the LoD-based `Pack` and `Unpack` methods in `TensorArray` +The `selected_ids`, `selected_scores` and `generated_scores` are LoDTensors that exist at each time step, +so it is natural to store them in arrays. + +Currently, PaddlePaddle has a module called `TensorArray` which can store an array of tensors. It is better to store the results of beam search in a `TensorArray`. + +The `Pack` and `UnPack` in `TensorArray` are used to pack tensors in the array to an `LoDTensor` or split the `LoDTensor` to an array of tensors. +It needs some extensions to support the packing or unpacking an array of `LoDTensors`. diff --git a/doc/paddle/design/onnx/images/project_structure.png b/doc/paddle/design/onnx/images/project_structure.png new file mode 100644 index 0000000000000000000000000000000000000000..ab1c2ff23cfff586516876684348bb15bd2084fc Binary files /dev/null and b/doc/paddle/design/onnx/images/project_structure.png differ diff --git a/doc/paddle/design/onnx/onnx_convertor.md b/doc/paddle/design/onnx/onnx_convertor.md new file mode 100644 index 0000000000000000000000000000000000000000..bc1665d7c33eb54cb63e5306a439c1ca67016d1e --- /dev/null +++ b/doc/paddle/design/onnx/onnx_convertor.md @@ -0,0 +1,131 @@ +# Background + +[ONNX (Open Neural Network Exchange)](https://github.com/onnx/onnx) bridges different deep learning frameworks by providing an open source graph format for models. The models trained in other frameworks can be converted into the ONNX format to execute inference by utilizing the built-in operators in ONNX - this is called a **frontend**. With the inverse conversion (called a **backend**), different frameworks can share any models supported by ONNX in principle. Now most mainstream frameworks have joined the ONNX community, e.g. Caffe2, PyTorch, and MXNet etc. And there is a momentum driving more and more vendors to begin supporting ONNX or even choose ONNX as the only machine learning runtime in their devices. + +Therefore, it is necessary to enable the conversion between PaddlePaddle and ONNX. This design doc is aimed at implementing a convertor, mainly for converting between **Fluid** models and ONNX (it is very likely that we may support older v2 models in the future). A complete convertor should be bidirectional - with a frontend AND a backend, but considering the importance, the we will start with the frontend i.e. Fluid models to ONNX models. + + +# How it works + +ONNX has a [working list of operators](https://github.com/onnx/onnx/blob/master/docs/Operators.md) which is versioned. + +When prioritizing implementation of a frontend over a backend, choice of coverage of Fluid -> ONNX operators comes down to choices of models to be supported (see section `Supported models`). Eventually, this will allow us to reach a really-wide coverage of all operators. + +Here are a few major considerations when it comes to converting models: + +- **Op-level conversion**: How to map the inputs, attributes, and outputs of each Paddle operator to those of the ONNX operator. In several cases, these require transformations. For each direction (frontend vs. backend), a different conversion mapping is needed. +- **Parameters (weights) initialization**: Setting initial parameters on different nodes. +- **Tensor data type mapping** (Note: Some ONNX data types are not supported in Fluid) +- **Network representation adaption**: Fluid `ProgramDesc` include nested blocks. Since ONNX is free of nesting, the `ProgramDesc` ops need to be traversed to only include ops from the global scope in the root block. The variables used as inputs and outputs should also be in this scope. +- **Model validation**: There are two kinds of validations that are necessary: + 1. We need to ensure that the inference outputs of the ops in run inside a model are the same as those when running the ONNX converted ops through an alternative ONNX backend. + 2. Checking to see if the generated nodes on the graph are validated by the internal ONNX checkers. +- **Versioning**: ONNX versions its op listing over versions. In fact, it has versioning on 3 different levels: ops, graphs, and ONNX models. This requires that we are conscious about versioning the convertor and updating tests and op convertor logic for each release. It also implies that we release pre-trained ONNX models upon each version release. + +One thing that makes this conversion more feasible in Fluid's case is the use of a static IR - the `ProgramDesc` - as opposed to a dynamic graph, as created in the cases of frameworks like PyTorch. + + +# Project structure + +

+ +

+ +The project contains four important parts: + +* **fluid**: The directory that contains wrappers for fluid related APIs. Fluid has provided some low-level APIs to parse or generate the inference model. However, directly using these low-level APIs makes the code tediously long. This module wraps low-level APIs to provide simplified interfaces. + +* **onnx**: This is a Python package provided by ONNX containing helpers for creating nodes, graphs, and eventually binary protobuf models with initializer parameters. + +* **onnx_fluid**: Contains two-way mapping (Fluid -> ONNX ops and ONNX -> Fluid ops). Called from `convert.py`, the program uses this mapping along with modifier functions to construct ONNX nodes with the help of ONNX's `make_node` helper. It also contains mapping between datatypes and tensor deprecation / amplification logic. + +* **convert.py**: The interface exposed to users. This will traverse the global program blocks/variables and construct the write-able model. + + +# Usage +The converter should be designed to very easy-to-use. Bidirectional conversion between a Fluid inference model and an ONNX binary model will be supported. Model validation will also provided to verify the correctness of converted model. + +* Convert Fluid inference model to ONNX binary model + + ``` + python convert.py --fluid_model --onnx_model validate True + ``` + +* Validate the converted model + + ``` + python validate.py --fluid_model --onnx_model + ``` + +The conversion and model validation will be completed consecutively, finally output a readable model structure description. And for the converse conversion, users only need to exchange the input and output. + + +# Challenges and mitigation + +## Cycles + +Cycles are unsupported in ONNX. In Paddle, the `while` op is the most prominent example of a cycle. + +*Resolution*: We won't support models with `while`s which can't be substituted until ONNX adds support for such ops. + +## Sequences + +Sequence processing operators like `sequence_expand`, `sequence_reshape`, `sequence_concat`, and `sequence_pool` are not supported by ONNX as well, because they do not support non-padded datatypes like LoDTensors. + +*Resolution*: Since the runtimes using our ONNX exported graphs won't be using LoDTensors in the first place, such sequence operators should be mapped to ONNX ops that will do the necessary transposing ops with the knowledge of the padding and shape of the Tensors. + +## Ops that can't easily be mapped + +There are ops that just aren't possible to map today: + +**Control flow operators** + +Paddle supports control flow ops like `If/Else` and `Switch` (if we ignore the CSP operations like `select` for now). ONNX has `If` support in the experimental phase. + +*Resolution*: Map Paddle's `If/Else` to ONNX's `If`, but ignore other control flow operators until ONNX brings support for them. + + +**Non-existent in Fluid** + +There are several ONNX operators that are not available in Fluid today, e.g. `InstanceNormalization`, `RandomUniform`, `Unsqueeze`, etc. + +*Resolution*: For the initial phase, we can choose to not support ops that our models don't care for and are subsequently not available in Fluid. However, for ops that we think might be necessary for Fluid users also, we must implement them on our side and support the ONNX conversion to them. This list is TBD. + + +**Concurrency** + +ONNX does not have any considerations for concurrency right now. + +*Resolution*: There are two ways to approach this: + +a. We choose to not support concurrent models. +b. We only support `go_op`s (basically threads) shallowly. This could mean that we enqueue `go_op` ops prior to gradient calculations OR even prior to the entire graph, and that's it - since `go_op`s do not have support for backprop anyways. One of the core target use cases of `go_op`: batch reading - can be handled through this approach. + + +**Overloaded in Fluid** + +There are ops in ONNX whose job can't be accomplished by a single corresponding Paddle operator (e.g. ), but a collection of operators. + +*Resolution*: Chain multiple Paddle operators. + + +## Lack of LoDTensors + +As stated above, ONNX only supports simple Tensor values. + +*Resolution*: Deprecate to plain old numpy-able tensors. + + +## Reconstruction from deprecated ONNX ops + +For higher-level Fluid ops, such as a few offered by the `nn` layer that do not have direct corresponding mappings but can be converted to ONNX by chaining a series of ops without cycles, it would be useful to map them back to the higher-level Fluid ops once converted back from the deprecated ONNX graphs. + +*Resolution*: Graphs that have the deprecation from Paddle -> ONNX. When converting back from ONNX, if we encounter the identical graphs by doing a forward search, we can replace the subgraphs with the matching ONNX op. + + +# Supported models + +As mentioned above, potential risks may come from the conversion of sequence-related models, including the LodTensor, ```if/else``` and ```while``` operator. So a good choice is to focus on some important feedforward models first, then implement some simple recurrent models. + +- Feedforward models: common models selected in PaddleBook, e.g. VGG, ResNet and some other models proposed by application teams. +- Recurrent models: language model, stacked LSTMs etc. diff --git a/doc/paddle/design/others/auto_gradient_check.md b/doc/paddle/design/others/auto_gradient_check.md new file mode 100644 index 0000000000000000000000000000000000000000..773b7b6a767541f28c27f247c1ad8c9a8a2d0ccf --- /dev/null +++ b/doc/paddle/design/others/auto_gradient_check.md @@ -0,0 +1,150 @@ +## Auto Gradient Check Design + +## Background: +- Generally, it is easy to check whether the forward computation of an Operator is correct or not. However, backpropagation is a notoriously difficult algorithm to debug and get right because of the following challenges: + 1. The formula for backpropagation formula should be correct according to the forward computation. + 2. The Implementation of the above shoule be correct in CPP. + 3. It is difficult to prepare an unbiased test data. + +- Auto gradient checking gets a numerical gradient using forward Operator and uses it as a reference for the backward Operator's result. It has several advantages: + 1. Numerical gradient checker only needs the forward operator. + 2. The user only needs to prepare the input data for forward Operator and not worry about the backward Operator. + +## Mathematical Theory +The following documents from Stanford have a detailed explanation of how to compute the numerical gradient and why it is useful. + +- [Gradient checking and advanced optimization(en)](http://deeplearning.stanford.edu/wiki/index.php/Gradient_checking_and_advanced_optimization) +- [Gradient checking and advanced optimization(cn)](http://ufldl.stanford.edu/wiki/index.php/%E6%A2%AF%E5%BA%A6%E6%A3%80%E9%AA%8C%E4%B8%8E%E9%AB%98%E7%BA%A7%E4%BC%98%E5%8C%96) + + +## Numerical Gradient Implementation +### Python Interface +```python +def get_numerical_gradient(op, + input_values, + output_name, + input_to_check, + delta=0.005, + local_scope=None): + """ + Get Numerical Gradient for the input of an operator. + + :param op: C++ operator instance, could be an network. + :param input_values: The input variables. Should be an dictionary, whose key is + variable name, and value is a numpy array. + :param output_name: The final output variable name. + :param input_to_check: The input variable with respect to which the gradient has to be computed. + :param delta: The perturbation value for numerical gradient method. The + smaller the delta, the more accurate the result. But if the delta is too + small, it will suffer from the numerical stability problem. + :param local_scope: The local scope used for get_numeric_gradient. + :return: The gradient array in numpy format. + """ +``` + +### Explanation: + +- Why do we need an `output_name` + - An Operator may have multiple Outputs, one can compute an independent gradient from each Output. So the caller should specify the name of the output variable. + +- Why do we need `input_to_check` + - One operator can have multiple inputs. Gradient Op can calculate the gradient of these inputs at the same time. But Numerical Gradient needs to calculate them one by one. So `get_numeric_gradient` is designed to calculate the gradient for one input. If you need to compute multiple inputs, you can call `get_numeric_gradient` multiple times each with a different input. + + +### Core Algorithm Implementation + + +```python + # we only compute the gradient of one element a time. + # we use a for loop to compute the gradient of each element. + for i in xrange(tensor_size): + # get one input element using the index i. + original = tensor_to_check.get_float_element(i) + + # add delta to it, run the forward op and then + # get the new value of the result tensor. + x_pos = original + delta + tensor_to_check.set_float_element(i, x_pos) + y_pos = get_output() + + # Subtract delta from this element, run the op again + # and get the new value of the result tensor. + x_neg = original - delta + tensor_to_check.set_float_element(i, x_neg) + y_neg = get_output() + + # restore old value + tensor_to_check.set_float_element(i, original) + + # compute the gradient of this element and store + # it into a numpy array. + gradient_flat[i] = (y_pos - y_neg) / delta / 2 + + # reshape the gradient result to the shape of the source tensor. + return gradient_flat.reshape(tensor_to_check.get_dims()) +``` + +## Auto Gradient Check Framework + +Each Operator Kernel has three kinds of Gradient: + +1. Numerical gradient +2. CPU kernel gradient +3. GPU kernel gradient (if supported by the device) + +The numerical gradient only relies on the forward Operator, so we use the numerical gradient as the reference value. The gradient checking is performed in the following three steps: + +1. Calculate the numerical gradient +2. Calculate CPU kernel gradient with the backward Operator and compare it with the numerical gradient. +3. Calculate GPU kernel gradient with the backward Operator and compare it with the numeric gradient. (if supported) + +#### Python Interface + +```python + def check_grad(self, + forward_op, + input_vars, + inputs_to_check, + output_name, + no_grad_set=None, + only_cpu=False, + max_relative_error=0.005): + """ + :param forward_op: used to create backward_op + :param input_vars: numpy value of input variable. The following + computation will use these variables. + :param inputs_to_check: the input variable with respect to which the + gradient will be computed. + :param output_name: The final output variable name. + :param max_relative_error: The relative tolerance parameter. + :param no_grad_set: used to create backward ops + :param only_cpu: only compute and check gradient on cpu kernel. + :return: + """ +``` + +### How to check if two numpy arrays are close enough? +if `abs_numerical_grad` is nearly zero, then use absolute error for numerical_grad. + +```python +numerical_grad = ... +operator_grad = numpy.array(scope.find_var(grad_var_name(name)).get_tensor()) + +abs_numerical_grad = numpy.abs(numerical_grad) +# if abs_numerical_grad is nearly zero, then use abs error for +# numeric_grad, instead of relative error. +abs_numerical_grad[abs_numerical_grad < 1e-3] = 1 + +diff_mat = numpy.abs(abs_numerical_grad - operator_grad) / abs_numerical_grad +max_diff = numpy.max(diff_mat) +``` + + +#### Notes: +The Input data for auto gradient checker should be reasonable to avoid numerical stability problem. + + +#### References: + +- [Gradient checking and advanced optimization(en)](http://deeplearning.stanford.edu/wiki/index.php/Gradient_checking_and_advanced_optimization) +- [Gradient checking and advanced optimization(cn)](http://ufldl.stanford.edu/wiki/index.php/%E6%A2%AF%E5%BA%A6%E6%A3%80%E9%AA%8C%E4%B8%8E%E9%AB%98%E7%BA%A7%E4%BC%98%E5%8C%96) diff --git a/doc/paddle/design/others/dcgan.png b/doc/paddle/design/others/dcgan.png new file mode 100644 index 0000000000000000000000000000000000000000..15e8e290a111ff43900934341365cb4360d87d28 Binary files /dev/null and b/doc/paddle/design/others/dcgan.png differ diff --git a/doc/paddle/design/others/gan_api.md b/doc/paddle/design/others/gan_api.md new file mode 100644 index 0000000000000000000000000000000000000000..b00c0c5706d7243bbb46916b38ea066709827cf8 --- /dev/null +++ b/doc/paddle/design/others/gan_api.md @@ -0,0 +1,253 @@ +# Design for GAN + +GAN (General Adversarial Net [https://arxiv.org/abs/1406.2661]) is an important model for unsupervised learning and widely used in many areas. + +It applies several important concepts in machine learning system design, including building and running subgraphs, dependency tracing, different optimizers in one executor and so forth. + +In our GAN design, we wrap it as a user-friendly easily customized python API to design different models. We take the conditional DC-GAN (Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks [https://arxiv.org/abs/1511.06434]) as an example due to its good performance on image generation. + +

+
+Figure 1. The overall running logic of GAN. The black solid arrows indicate the forward pass; the green dashed arrows indicate the backward pass of generator training; the red dashed arrows indicate the backward pass of the discriminator training. The BP pass of the green (red) arrow should only update the parameters in the green (red) boxes. The diamonds indicate the data providers. d\_loss and g\_loss marked in red and green are the two targets we would like to run. +

+ +The operators, layers and functions required/optional to build a GAN demo is summarized in https://github.com/PaddlePaddle/Paddle/issues/4563. + +

+
+Figure 2. Photo borrowed from the original DC-GAN paper. +

+ +## The Conditional-GAN might be a class. +This design we adopt the popular open source design in https://github.com/carpedm20/DCGAN-tensorflow and https://github.com/rajathkmp/DCGAN. It contains following data structure: + +- DCGAN(object): which contains everything required to build a GAN model. It provides following member functions methods as API: + +- __init__(...): Initialize hyper-parameters (like conv dimension and so forth), and declare model parameters of discriminator and generator as well. + +- generator(z, y=None): Generate a fake image from input noise z. If the label y is provided, the conditional GAN model will be chosen. +Returns a generated image. + +- discriminator(image): +Given an image, decide if it is from a real source or a fake one. +Returns a 0/1 binary label. + +- build_model(self): +build the whole GAN model, define training loss for both generator and discrimator. + +## Discussion on Engine Functions required to build GAN +- Trace the tensor and variable dependency in the engine executor. (Very critical, otherwise GAN can'be be trained correctly) +- Different optimizers responsible for optimizing different loss. + +To be more detailed, we introduce our design of DCGAN as following: + +### Class member Function: Initializer +- Set up hyper-parameters, including condtional dimension, noise dimension, batch size and so forth. +- Declare and define all the model variables. All the discriminator parameters are included in the list self.theta_D and all the generator parameters are included in the list self.theta_G. +```python +class DCGAN(object): + def __init__(self, y_dim=None): + + # hyper parameters + self.y_dim = y_dim # conditional gan or not + self.batch_size = 100 + self.z_dim = z_dim # input noise dimension + + # define parameters of discriminators + self.D_W0 = pd.Variable(shape=[3,3, 1, 128], data=pd.gaussian_normal_randomizer()) + self.D_b0 = pd.Variable(np.zeros(128)) # variable also support initialization using a numpy data + self.D_W1 = pd.Variable(shape=[784, 128], data=pd.gaussian_normal_randomizer()) + self.D_b1 = pd.Variable(np.zeros(128)) # variable also support initialization using a numpy data + self.D_W2 = pd.Varialble(np.random.rand(128, 1)) + self.D_b2 = pd.Variable(np.zeros(128)) + self.theta_D = [self.D_W0, self.D_b0, self.D_W1, self.D_b1, self.D_W2, self.D_b2] + + # define parameters of generators + self.G_W0 = pd.Variable(shape=[784, 128], data=pd.gaussian_normal_randomizer()) + self.G_b0 = pd.Variable(np.zeros(128)) # variable also support initialization using a numpy data + self.G_W1 = pd.Variable(shape=[784, 128], data=pd.gaussian_normal_randomizer()) + self.G_b1 = pd.Variable(np.zeros(128)) # variable also support initialization using a numpy data + self.G_W2 = pd.Varialble(np.random.rand(128, 1)) + self.G_b2 = pd.Variable(np.zeros(128)) + self.theta_G = [self.G_W0, self.G_b0, self.G_W1, self.G_b1, self.G_W2, self.G_b2] +``` + +### Class member Function: Generator +- Given a noisy input z, returns a fake image. +- Concatenation, batch-norm, FC operations required; +- Deconv layer required, which is missing now... +```python +class DCGAN(object): + def generator(self, z, y = None): + # input z: the random noise + # input y: input data label (optional) + # output G_im: generated fake images + + if not self.y_dim: + z = pd.layer.concat(1, [z, y]) + + G_h0 = pd.layer.fc(z, self.G_w0, self.G_b0) + G_h0_bn = pd.layer.batch_norm(G_h0) + G_h0_relu = pd.layer.relu(G_h0_bn) + + G_h1 = pd.layer.deconv(G_h0_relu, self.G_w1, self.G_b1) + G_h1_bn = pd.layer.batch_norm(G_h1) + G_h1_relu = pd.layer.relu(G_h1_bn) + + G_h2 = pd.layer.deconv(G_h1_relu, self.G_W2, self.G_b2)) + G_im = pd.layer.tanh(G_im) + return G_im +``` + +### Class member function: Discriminator +- Given a noisy input z, returns a fake image. +- Concatenation, Convolution, batch-norm, FC, Leaky-ReLU operations required; +```python +class DCGAN(object): + def discriminator(self, image): + # input image: either generated images or real ones + # output D_h2: binary logit of the label + + D_h0 = pd.layer.conv2d(image, w=self.D_w0, b=self.D_b0) + D_h0_bn = pd.layer.batchnorm(h0) + D_h0_relu = pd.layer.lrelu(h0_bn) + + D_h1 = pd.layer.conv2d(D_h0_relu, w=self.D_w1, b=self.D_b1) + D_h1_bn = pd.layer.batchnorm(D_h1) + D_h1_relu = pd.layer.lrelu(D_h1_bn) + + D_h2 = pd.layer.fc(D_h1_relu, w=self.D_w2, b=self.D_b2) + return D_h2 +``` + +### Class member function: Build the model +- Define data readers as placeholders to hold the data; +- Build generator and discriminators; +- Define two training losses for discriminator and generator, respectively. +If we have execution dependency engine to back-trace all tensors, the module building our GAN model will be like this: +```python +class DCGAN(object): + def build_model(self): + if self.y_dim: + self.y = pd.data(pd.float32, [self.batch_size, self.y_dim]) + self.images = pd.data(pd.float32, [self.batch_size, self.im_size, self.im_size]) + self.faked_images = pd.data(pd.float32, [self.batch_size, self.im_size, self.im_size]) + self.z = pd.data(tf.float32, [None, self.z_size]) + + # step 1: generate images by generator, classify real/fake images with discriminator + if self.y_dim: # if conditional GAN, includes label + self.G = self.generator(self.z, self.y) + self.D_t = self.discriminator(self.images) + # generated fake images + self.sampled = self.sampler(self.z, self.y) + self.D_f = self.discriminator(self.G) + else: # original version of GAN + self.G = self.generator(self.z) + self.D_t = self.discriminator(self.images) + # generate fake images + self.sampled = self.sampler(self.z) + self.D_f = self.discriminator(self.images) + + # step 2: define the two losses + self.d_loss_real = pd.reduce_mean(pd.cross_entropy(self.D_t, np.ones(self.batch_size)) + self.d_loss_fake = pd.reduce_mean(pd.cross_entropy(self.D_f, np.zeros(self.batch_size)) + self.d_loss = self.d_loss_real + self.d_loss_fake + + self.g_loss = pd.reduce_mean(pd.cross_entropy(self.D_f, np.ones(self.batch_szie)) +``` + +If we do not have dependency engine but blocks, the module building our GAN model will be like this: +```python +class DCGAN(object): + def build_model(self, default_block): + # input data in the default block + if self.y_dim: + self.y = pd.data(pd.float32, [self.batch_size, self.y_dim]) + self.images = pd.data(pd.float32, [self.batch_size, self.im_size, self.im_size]) + # self.faked_images = pd.data(pd.float32, [self.batch_size, self.im_size, self.im_size]) + self.z = pd.data(tf.float32, [None, self.z_size]) + + # step 1: generate images by generator, classify real/fake images with discriminator + with pd.default_block().g_block(): + if self.y_dim: # if conditional GAN, includes label + self.G = self.generator(self.z, self.y) + self.D_g = self.discriminator(self.G, self.y) + else: # original version of GAN + self.G = self.generator(self.z) + self.D_g = self.discriminator(self.G, self.y) + self.g_loss = pd.reduce_mean(pd.cross_entropy(self.D_g, np.ones(self.batch_szie)) + + with pd.default_block().d_block(): + if self.y_dim: # if conditional GAN, includes label + self.D_t = self.discriminator(self.images, self.y) + self.D_f = self.discriminator(self.G, self.y) + else: # original version of GAN + self.D_t = self.discriminator(self.images) + self.D_f = self.discriminator(self.G) + + # step 2: define the two losses + self.d_loss_real = pd.reduce_mean(pd.cross_entropy(self.D_t, np.ones(self.batch_size)) + self.d_loss_fake = pd.reduce_mean(pd.cross_entropy(self.D_f, np.zeros(self.batch_size)) + self.d_loss = self.d_loss_real + self.d_loss_fake +``` +Some small confusion and problems with this design: +- D\_g and D\_f are actually the same thing, but has to be written twice; i.e., if we want to run two sub-graphs conceptually, the same codes have to be written twice if they are shared by the graph. +- Requires ability to create a block anytime, rather than in if-else or rnn only; + +## Main function for the demo: +Generally, the user of GAN just need to the following things: +- Define an object as DCGAN class; +- Build the DCGAN model; +- Specify two optimizers for two different losses with respect to different parameters. +```python +# pd for short, should be more concise. +import paddle as pd +import numpy as np +import logging + +if __name__ == "__main__": + # dcgan class in the default graph/block + # if we use dependency engine as tensorflow + # the codes, will be slightly different like: + # dcgan = DCGAN() + # dcgan.build_model() + with pd.block() as def_block: + dcgan = DCGAN() + dcgan.build_model(def_block) + + # load mnist data + data_X, data_y = self.load_mnist() + + # Two subgraphs required!!! + with pd.block().d_block(): + d_optim = pd.train.Adam(lr = .001, beta= .1) + d_step = d_optim.minimize(dcgan.d_loss, dcgan.theta_D) + with pd.block.g_block(): + g_optim = pd.train.Adam(lr = .001, beta= .1) + g_step = pd.minimize(dcgan.g_loss, dcgan.theta_G) + + # executor + sess = pd.executor() + + # training + for epoch in xrange(10000): + for batch_id in range(N / batch_size): + idx = ... + # sample a batch + batch_im, batch_label = data_X[idx:idx+batch_size], data_y[idx:idx+batch_size] + # sample z + batch_z = np.random.uniform(-1., 1., [batch_size, z_dim]) + + if batch_id % 2 == 0: + sess.run(d_step, + feed_dict = {dcgan.images: batch_im, + dcgan.y: batch_label, + dcgan.z: batch_z}) + else: + sess.run(g_step, + feed_dict = {dcgan.z: batch_z}) +``` + +# More thinking about dependency engine v.s. block design: +- What if we just want to run an intermediate result? Do we need to run the whole block/graph? +- Should we call eval() to get the fake images in the first stage? And then train the discriminator in the second stage? diff --git a/doc/paddle/design/others/graph.md b/doc/paddle/design/others/graph.md new file mode 100644 index 0000000000000000000000000000000000000000..7519a65df835a39fe14f6ef45530afff170191ff --- /dev/null +++ b/doc/paddle/design/others/graph.md @@ -0,0 +1,70 @@ +# Design Doc: Computations as a Graph + +A primary goal of the refactorization of PaddlePaddle is a more flexible representation of deep learning computation, in particular, a graph of operators and variables, instead of sequences of layers as before. + +This document explains that the construction of a graph as three steps: + +- construct the forward part +- construct the backward part +- construct the optimization part + +## The Construction of a Graph + +Let us take the problem of image classification as a simple example. The application program that trains the model looks like: + +```python +x = layer.data("images") +l = layer.data("label") +y = layer.fc(x) +cost = layer.mse(y, l) +optimize(cost) +train(cost, reader=mnist.train()) +``` + +### Forward Part + +The first four lines of above program build the forward part of the graph. + +![](images/graph_construction_example_forward_only.png) + +In particular, the first line `x = layer.data("images")` creates variable x and a Feed operator that copies a column from the minibatch to x. `y = layer.fc(x)` creates not only the FC operator and output variable y, but also two parameters, W and b, and the initialization operators. + +Initialization operators are kind of "run-once" operators -- the `Run` method increments a class data member counter so to run at most once. By doing so, a parameter wouldn't be initialized repeatedly, say, in every minibatch. + +In this example, all operators are created as `OpDesc` protobuf messages, and all variables are `VarDesc`. These protobuf messages are saved in a `BlockDesc` protobuf message. + +### Backward Part + +The fifth line `optimize(cost)` calls two functions, `ConstructBackwardGraph` and `ConstructOptimizationGraph`. + +`ConstructBackwardGraph` traverses the forward graph in the `BlockDesc` protobuf message and builds the backward part. + +![](images/graph_construction_example_forward_backward.png) + +According to the chain rule of gradient computation, `ConstructBackwardGraph` would + +1. create a gradient operator G for each operator F, +1. make all inputs, outputs, and outputs' gradient of F as inputs of G, +1. create gradients for all inputs of F, except for those who don't have gradients, like x and l, and +1. make all these gradients as outputs of G. + +### Optimization Part + +For each parameter, like W and b created by `layer.fc`, marked as double circles in above graphs, `ConstructOptimizationGraph` creates an optimization operator to apply its gradient. Here results in the complete graph: + +![](images/graph_construction_example_all.png) + +## Block and Graph + +The word block and graph are interchangable in the desgin of PaddlePaddle. A [Block](https://github.com/PaddlePaddle/Paddle/pull/3708) is a metaphore of the code and local variables in a pair of curly braces in programming languages, where operators are like statements or instructions. A graph of operators and variables is a representation of the block. + +A Block keeps operators in an array `BlockDesc::ops` + +```protobuf +message BlockDesc { + repeated OpDesc ops = 1; + repeated VarDesc vars = 2; +} +``` + +in the order that they appear in user programs, like the Python program at the beginning of this article. We can imagine that in `ops`, we have some forward operators, followed by some gradient operators, and then some optimization operators. diff --git a/doc/paddle/design/others/graph_survey.md b/doc/paddle/design/others/graph_survey.md new file mode 100644 index 0000000000000000000000000000000000000000..faf70f47206e5973333b8feac0178e42827b03fa --- /dev/null +++ b/doc/paddle/design/others/graph_survey.md @@ -0,0 +1,232 @@ +## Survey on Graph + +Neural network framework often provides symbolic API for users to write network topology conveniently. This doc manily focus on symbolic API in most popular neural network frameworks, and try to find out how to parse symbolic configuration to a portable file, such as protobuf or json. + +### Mxnet + +The core concept of symbolic API is `Symbol`. Mxnet implements `Symbol` class in C++, and export to Python using C-API. Please refer to the comments in Mxnet: + + +`Symbol` is help class used to represent the operator node in Graph. +`Symbol` acts as an interface for building graphs from different components like Variable, Functor and Group. `Symbol` is also exported to python front-end (while Graph is not) to enable quick test and deployment. Conceptually, symbol is the final operation of a graph and thus including all the information required (the graph) to evaluate its output value. + + +A simple network topology wrote by Symbol is as follows: + +```python +def get_symbol(num_classes=10, **kwargs): + data = mx.symbol.Variable('data') + data = mx.symbol.Flatten(data=data) + fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128) + act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu") + fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64) + act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu") + fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=num_classes) + mlp = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax') + return mlp +``` + + + +Varible here is actually a Symbol. Every basic Symbol will correspond to one Node, and every Node has its own AnyAttr. There is a op field in AnyAttr class, when a Symbol represents Variable(often input data), the op field is null. + +Symbol contains a data member, std::vector outputs, and NodeEntry cantains a poniter to Node. We can follow the Node pointer to get all the Graph. + +And Symbol can be saved to a Json file. + +Here is a detailed example: + +``` +>>> import mxnet as mx +>>> data = mx.symbol.Variable('data') +>>> print data.debug_str() +Variable:data + +>>> data = mx.symbol.Flatten(data=data) +>>> print data.debug_str() +Symbol Outputs: + output[0]=flatten0(0) +Variable:data +-------------------- +Op:Flatten, Name=flatten0 +Inputs: + arg[0]=data(0) version=0 + +>>> fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128) +>>> print fc1.debug_str() +Symbol Outputs: + output[0]=fc1(0) +Variable:data +-------------------- +Op:Flatten, Name=flatten0 +Inputs: + arg[0]=data(0) version=0 +Variable:fc1_weight +Variable:fc1_bias +-------------------- +Op:FullyConnected, Name=fc1 +Inputs: + arg[0]=flatten0(0) + arg[1]=fc1_weight(0) version=0 + arg[2]=fc1_bias(0) version=0 +Attrs: + num_hidden=128 + +``` + + +### TensorFlow + + +The core concept of symbolic API is `Tensor`. Tensorflow defines `Tensor` in Python. Please refer to the comments in TensorFlow: + +A `Tensor` is a symbolic handle to one of the outputs of an `Operation`. It does not hold the values of that operation's output, but instead provides a means of computing those values in a TensorFlow [Session](https://www.tensorflow.org/api_docs/python/tf/Session). + +A simple example is as follows: + +```python + # Build a dataflow graph. + c = tf.constant([[1.0, 2.0], [3.0, 4.0]]) + d = tf.constant([[1.0, 1.0], [0.0, 1.0]]) + e = tf.matmul(c, d) + + # Construct a `Session` to execute the graph. + sess = tf.Session() + + # Execute the graph and store the value that `e` represents in `result`. + result = sess.run(e) +``` + + +The main method of `Tensor` is as follows: + + +```python +@property +def op(self): + """The `Operation` that produces this tensor as an output.""" + return self._op + +@property +def dtype(self): + """The `DType` of elements in this tensor.""" + return self._dtype + +@property +def graph(self): + """The `Graph` that contains this tensor.""" + return self._op.graph + +@property +def name(self): + """The string name of this tensor.""" + if not self._op.name: + raise ValueError("Operation was not named: %s" % self._op) + return "%s:%d" % (self._op.name, self._value_index) + +@property +def device(self): + """The name of the device on which this tensor will be produced, or None.""" + return self._op.device +``` + + +Tensor can be taken as target to run by session. Tensor contains all the information of Graph, and tracks data dependency. + + +Here is a detailed example: + + +``` +>>> import tensorflow as tf +>>> c = tf.constant([[1.0, 2.0], [3.0, 4.0]]) +>>> print c.graph + +>>> d = tf.constant([[1.0, 1.0], [0.0, 1.0]]) +>>> print d.graph + +>>> e = tf.matmul(c, d) +>>> print e.graph + +``` + +### Dynet + + +The core concept of symbolic API is `Expression`, and Dynet defines `Expression` class in C++. + + +A simple example is as follows: + +```cpp +ComputationGraph cg; +Expression W = parameter(cg, pW); + +Expression in = input(cg, xs[i]); +Expression label = input(cg, ys[i]); +Expression pred = W * in; +Expression loss = square(pred - label); +``` + +The input data and parameter are also represented by Expression. Every basci Expression corresponds to a Node. And input data is also a Node. + +Expression has a data member ComputationGraph, and ComputationGraph will be modified in users' configuring process. Expression can be a running target, beacuse Expression contains all dependency. + + +Here is a detailed example: + +write topology in C++ + +``` +ComputationGraph cg; +Expression W = parameter(cg, pW); +cg.print_graphviz(); + +Expression pred = W * xs[i]; +cg.print_graphviz(); + +Expression loss = square(pred - ys[i]); +cg.print_graphviz(); +``` + +compile and print + +``` +# first print +digraph G { + rankdir=LR; + nodesep=.05; + N0 [label="v0 = parameters({1}) @ 0x7ffe4de00110"]; +} +# second print +digraph G { + rankdir=LR; + nodesep=.05; + N0 [label="v0 = parameters({1}) @ 0x7ffe4de00110"]; + N1 [label="v1 = v0 * -0.98"]; + N0 -> N1; +} +# third print +digraph G { + rankdir=LR; + nodesep=.05; + N0 [label="v0 = parameters({1}) @ 0x7ffe4de00110"]; + N1 [label="v1 = v0 * -0.98"]; + N0 -> N1; + N2 [label="v2 = -1.88387 - v1"]; + N1 -> N2; + N3 [label="v3 = -v2"]; + N2 -> N3; + N4 [label="v4 = square(v3)"]; + N3 -> N4; +} +``` + +### Conclusion + + +Actually, Symbol/Tensor/Expression in Mxnet/TensorFlow/Dynet are the same level concepts. We use a unified name Expression here, this level concept has following features: + +- Users wirte topoloy with symbolic API, and all return value is Expression, including input data and parameter. +- Expression corresponds with a global Graph, and Expression can also be composed. +- Expression tracks all dependency and can be taken as a run target diff --git a/doc/paddle/design/others/images/graph_construction_example.bash b/doc/paddle/design/others/images/graph_construction_example.bash new file mode 100755 index 0000000000000000000000000000000000000000..35e6997abd17588e17a82d448918fc1b3bd7220e --- /dev/null +++ b/doc/paddle/design/others/images/graph_construction_example.bash @@ -0,0 +1,11 @@ +cat ./graph_construction_example.dot | \ + sed 's/color=red/color=red, style=invis/g' | \ + sed 's/color=green/color=green, style=invis/g' | \ + dot -Tpng > graph_construction_example_forward_only.png + +cat ./graph_construction_example.dot | \ + sed 's/color=green/color=green, style=invis/g' | \ + dot -Tpng > graph_construction_example_forward_backward.png + +cat ./graph_construction_example.dot | \ + dot -Tpng > graph_construction_example_all.png diff --git a/doc/paddle/design/others/images/graph_construction_example.dot b/doc/paddle/design/others/images/graph_construction_example.dot new file mode 100644 index 0000000000000000000000000000000000000000..e115f9844bae6ad24f638c8ed4749cea8aff06a9 --- /dev/null +++ b/doc/paddle/design/others/images/graph_construction_example.dot @@ -0,0 +1,68 @@ +digraph ImageClassificationGraph { + ///////// The forward part ///////// + FeedX [label="Feed", color=blue, shape=box]; + FeedY [label="Feed", color=blue, shape=box]; + InitW [label="Init", color=blue, shape=diamond]; + Initb [label="Init", color=blue, shape=diamond]; + FC [label="FC", color=blue, shape=box]; + MSE [label="MSE", color=blue, shape=box]; + + x [label="x", color=blue, shape=oval]; + l [label="l", color=blue, shape=oval]; + y [label="y", color=blue, shape=oval]; + W [label="W", color=blue, shape=doublecircle]; + b [label="b", color=blue, shape=doublecircle]; + cost [label="cost", color=blue, shape=oval]; + + FeedX -> x -> FC -> y -> MSE -> cost [color=blue]; + FeedY -> l [color=blue]; + InitW -> W [color=blue]; + Initb -> b [color=blue]; + W -> FC [color=blue]; + b -> FC [color=blue]; + l -> MSE [color=blue]; + + ////////// The backward part ///////// + MSE_Grad [label="MSE_grad", color=red, shape=box]; + FC_Grad [label="FC_grad", color=red, shape=box]; + + d_cost [label="d cost", color=red, shape=oval]; + d_y [label="d y", color=red, shape=oval]; + d_b [label="d b", color=red, shape=oval]; + d_W [label="d W", color=red, shape=oval]; + + cost -> MSE_Grad [color=red]; + d_cost -> MSE_Grad [color=red]; + l -> MSE_Grad [color=red]; + y -> MSE_Grad -> d_y [color=red]; + + x -> FC_Grad [color=red]; + y -> FC_Grad [color=red]; + d_y -> FC_Grad [color=red]; + W -> FC_Grad -> d_W [color=red]; + b -> FC_Grad -> d_b [color=red]; + + ////////// The optimizaiton part ////////// + + OPT_W [label="SGD", color=green, shape=box]; + OPT_b [label="SGD", color=green, shape=box]; + + W -> OPT_W [color=green]; + b -> OPT_b [color=green]; + d_W -> OPT_W -> W [color=green]; + d_b -> OPT_b -> b [color=green]; + + ////////// Groupings ////////// + + subgraph clusterMSE { + style=invis; + MSE; + MSE_Grad; + } + + subgraph clusterFC { + style=invis; + FC; + FC_Grad; + } +} diff --git a/doc/paddle/design/others/images/graph_construction_example_all.png b/doc/paddle/design/others/images/graph_construction_example_all.png new file mode 100644 index 0000000000000000000000000000000000000000..261611a5721f9aa97874f7e6d897fe48cf667db2 Binary files /dev/null and b/doc/paddle/design/others/images/graph_construction_example_all.png differ diff --git a/doc/paddle/design/others/images/graph_construction_example_forward_backward.png b/doc/paddle/design/others/images/graph_construction_example_forward_backward.png new file mode 100644 index 0000000000000000000000000000000000000000..4c69687f4a6a181138f3df72ce5e8aa48487b5be Binary files /dev/null and b/doc/paddle/design/others/images/graph_construction_example_forward_backward.png differ diff --git a/doc/paddle/design/others/images/graph_construction_example_forward_only.png b/doc/paddle/design/others/images/graph_construction_example_forward_only.png new file mode 100644 index 0000000000000000000000000000000000000000..e668c16e0cac73acb4e5dc2b1827557ae77126b4 Binary files /dev/null and b/doc/paddle/design/others/images/graph_construction_example_forward_only.png differ diff --git a/doc/paddle/design/others/parameters_in_cpp.md b/doc/paddle/design/others/parameters_in_cpp.md new file mode 100644 index 0000000000000000000000000000000000000000..6b13e38cff7d3c6269c8d77643b4ec8cecbe126c --- /dev/null +++ b/doc/paddle/design/others/parameters_in_cpp.md @@ -0,0 +1,41 @@ +# Design Doc: The C++ Class `Parameters` + +`Parameters` is a concept we designed in PaddlePaddle V2 API. `Parameters` is a container of parameters, which makes PaddlePaddle capable of sharing parameter between topologies. We described usages of `Parameter` in [api.md](./api.md). + +We used Python to implement Parameters when designing V2 API before. There are several defects for the current implementation: +* We just use `memcpy` to share Parameters between topologies, but this is very inefficient. +* We did not support sharing Parameters while training. We just trigger `memcpy` when start training. + +It is necessary that we implement Parameters in CPP side. However, it could result a code refactoring for PaddlePaddle, because PaddlePaddle was designed for training only one topology before, i.e., each GradientMachine contains its Parameter as a data member. In current PaddlePaddle implementation, there are three concepts associated with `Parameters`: + +1. `paddle::Parameter`. A `Parameters` is a container for `paddle::Parameter`. +It is evident that we should use `paddle::Parameter` when developing `Parameters`. +However, the `Parameter` class contains many functions and does not have a clear interface. +It contains `create/store Parameter`, `serialize/deserialize`, `optimize(i.e SGD)`, `randomize/zero`. +When we developing `Parameters`, we only use `create/store Parameter` functionality. +We should extract functionalities of Parameter into many classes to clean PaddlePaddle CPP implementation. + +2. `paddle::GradientMachine` and its sub-classes, e.g., `paddle::MultiGradientMachine`, `paddle::NeuralNetwork`. +We should pass `Parameters` to `paddle::GradientMachine` when `forward/backward` to avoid `memcpy` between topologies. +Also, we should handle multi-GPU/CPU training, because `forward` and `backward` would perform on multi-GPUs and multi-CPUs. +`Parameters` should dispatch the parameter value to each device, and gather the parameter gradient from each device. + +3. `paddle::ParameterUpdater`. The ParameterUpdater is used to update parameters in Paddle. +So `Parameters` should be used by `paddle::ParameterUpdater`, and `paddle::ParameterUpdater` should optimize `Parameters` (by SGD). + + +The step by step approach for implementation Parameters in PaddlePaddle C++ core is listed below. Each step should be a PR and could be merged into PaddlePaddle one by one. + +1. Clean `paddle::Parameter` interface. Extract the functionalities of `paddle::Parameter` to prepare for the implementation of Parameters. + +2. Implementation a `Parameters` class. It just stores the `paddle::Parameter` inside. Make `GradientMachine` uses `Parameters` as a class member. + +3. Make `Parameters` support Multi-CPU and Multi-GPU training to prepare for sharing `Parameter` between topologies. +Because we need share `Parameters` between topologies, it is `Parameters`'s response to exchange Parameters between GPUs. +`GradientMachine` should not handle how to exchange Parameters because `GradientMachine` only used to train one topology and we need to support train many topologies in Paddle, i.e., there could be many GradientMachines use one `Parameters`. + * We should use a global function to exchange Parameters between GPUs, not a member function in `Parameters`. The `MultiGradientMachine` invoke this function, which uses `Parameters` as this function inputs. + * The MultiGradientMachine contains many functionalities. Extracting the Parameters exchanging logic could make MultiGradientMachine clearer and simpler. + +4. Make `Parameters` as an argument for `forward/backward` function, not a data member for `GradientMachine`. For example, `forward` could be `forward(const Parameters& params, ...)` and `backward` could be `backward(Parameters* params, ...)`. After this step, Paddle could share `Parameters` between topologies. + +5. `ParameterUpdater` is invoked by `GradientMachine` and `Trainer`, but it updates `Parameters`. In the end of this code refactoring, we could change `ParameterUpdater` directly uses `Parameters` to make `ParameterUpdater`'s implementation clear. diff --git a/doc/paddle/design/others/releasing_process.md b/doc/paddle/design/others/releasing_process.md new file mode 100644 index 0000000000000000000000000000000000000000..5b321611687043cb402ada3589cdba0e927cd9ec --- /dev/null +++ b/doc/paddle/design/others/releasing_process.md @@ -0,0 +1,68 @@ +# PaddlePaddle发行规范 + +PaddlePaddle使用git-flow branching model做分支管理,使用[Semantic Versioning](http://semver.org/)标准表示PaddlePaddle版本号。 + +PaddlePaddle每次发新的版本,遵循以下流程: + +1. 从`develop`分支派生出新的分支,分支名为`release/版本号`。例如,`release/0.10.0` +1. 将新分支的版本打上tag,tag为`版本号rc.Patch号`。第一个tag为`0.10.0rc1`,第二个为`0.10.0rc2`,依次类推。 +1. 对这个版本的提交,做如下几个操作: + * 修改`python/setup.py.in`中的版本信息,并将`istaged`字段设为`True`。 + * 编译这个版本的Docker发行镜像,发布到dockerhub。如果失败,修复Docker编译镜像问题,Patch号加一,返回第二步 + * 编译这个版本的Ubuntu Deb包。如果失败,修复Ubuntu Deb包编译问题,Patch号加一,返回第二步。 + * 使用Regression Test List作为检查列表,测试Docker镜像/ubuntu安装包的功能正确性 + * 如果失败,记录下所有失败的例子,在这个`release/版本号`分支中,修复所有bug后,Patch号加一,返回第二步 + * 编译这个版本的python wheel包,并发布到pypi。 + * 由于pypi.python.org目前遵循[严格的命名规范PEP 513](https://www.python.org/dev/peps/pep-0513),在使用twine上传之前,需要重命名wheel包中platform相关的后缀,比如将`linux_x86_64`修改成`manylinux1_x86_64`。 + * pypi上的package名称为paddlepaddle和paddlepaddle_gpu,如果要上传GPU版本的包,需要修改build/python/setup.py中,name: "paddlepaddle_gpu"并重新打包wheel包:`python setup.py bdist_wheel`。 + * 上传方法: + ``` + cd build/python + pip install twine + twine upload dist/[package to upload] + ``` +1. 第三步完成后,将`release/版本号`分支合入master分支,并删除`release/版本号`分支。将master分支的合入commit打上tag,tag为`版本号`。同时再将`master`分支合入`develop`分支。最后删除`release/版本号`分支。 +1. 编译master分支的Docker发行镜像,发布到dockerhub。编译ubuntu的deb包,发布到github release页面 +1. 协同完成Release Note的书写 + + +需要注意的是: + +* `release/版本号`分支一旦建立,一般不允许再从`develop`分支合入`release/版本号`。这样保证`release/版本号`分支功能的封闭,方便测试人员测试PaddlePaddle的行为。 +* 在`release/版本号`分支存在的时候,如果有bugfix的行为,需要将bugfix的分支同时merge到`master`, `develop`和`release/版本号`这三个分支。 + +## PaddlePaddle 分支规范 + +PaddlePaddle开发过程使用[git-flow](http://nvie.com/posts/a-successful-git-branching-model/)分支规范,并适应github的特性做了一些区别。 + +* PaddlePaddle的主版本库遵循[git-flow](http://nvie.com/posts/a-successful-git-branching-model/)分支规范。其中: + * `master`分支为稳定(stable branch)版本分支。每一个`master`分支的版本都是经过单元测试和回归测试的版本。 + * `develop`分支为开发(develop branch)版本分支。每一个`develop`分支的版本都经过单元测试,但并没有经过回归测试。 + * `release/版本号`分支为每一次Release时建立的临时分支。在这个阶段的代码正在经历回归测试。 + +* 其他用户的fork版本库并不需要严格遵守[git-flow](http://nvie.com/posts/a-successful-git-branching-model/)分支规范,但所有fork的版本库的所有分支都相当于特性分支。 + * 建议,开发者fork的版本库使用`develop`分支同步主版本库的`develop`分支 + * 建议,开发者fork的版本库中,再基于`develop`版本fork出自己的功能分支。 + * 当功能分支开发完毕后,向PaddlePaddle的主版本库提交`Pull Reuqest`,进而进行代码评审。 + * 在评审过程中,开发者修改自己的代码,可以继续在自己的功能分支提交代码。 + +* BugFix分支也是在开发者自己的fork版本库维护,与功能分支不同的是,BugFix分支需要分别给主版本库的`master`、`develop`与可能有的`release/版本号`分支,同时提起`Pull Request`。 + +## PaddlePaddle回归测试列表 + +本列表说明PaddlePaddle发版之前需要测试的功能点。 + +### PaddlePaddle Book中所有章节 + +PaddlePaddle每次发版本首先要保证PaddlePaddle Book中所有章节功能的正确性。功能的正确性包括验证PaddlePaddle目前的`paddle_trainer`训练和纯使用`Python`训练模型正确性。 + +| | 新手入门章节 | 识别数字 | 图像分类 | 词向量 | 情感分析 | 语意角色标注 | 机器翻译 | 个性化推荐 | +| --- | --- | --- | --- | --- | --- | --- | --- | --- | +| API.V2 + Docker + GPU | | | | | | | | | +| API.V2 + Docker + CPU | | | | | | | | | +| `paddle_trainer` + Docker + GPU | | | | | | | | | +| `paddle_trainer` + Docker + CPU | | | | | | | | | +| API.V2 + Ubuntu + GPU | | | | | | | | | +| API.V2 + Ubuntu + CPU | | | | | | | | | +| `paddle_trainer` + Ubuntu + GPU | | | | | | | | | +| `paddle_trainer` + Ubuntu + CPU | | | | | | | | | diff --git a/doc/paddle/design/others/simple_op_design.md b/doc/paddle/design/others/simple_op_design.md new file mode 100644 index 0000000000000000000000000000000000000000..c93c21affe9d6c79bde30e652248f2464401c8f3 --- /dev/null +++ b/doc/paddle/design/others/simple_op_design.md @@ -0,0 +1,202 @@ +## Interaction between C++ and Python + +Users employ API in Python to describe their own network, however, the network construction actually happens in C++. so Protobuf is introduced to send the message between Python and C++. + +The Interaction between Python and C++ can be simplified as two steps: + +1. C++ tells Python how many Ops there are, and what parameter do users need to offer to initialize a new Op. Python then builds API for each Op at compile time. + +2. Users invoke APIs built by Python and provide necessary parameters. These parameters will be sent to C++ for finishing the Op construction task. + +### Message from C++ to Python + +We define a Protobuf message class `OpProto` to hold message needed in the first step. What should an `OpProto` contain? This question is equivalent to “What message do we need to offer, to build a Python API which is legal and user oriented and can use to describe a whole Op.” + +Following message are necessary: + +1. Op's name, and its simple comment. +2. Input and output variable number; each variable's name, type, and comment. +3. Op's attributes; each attribute includes name, type, comment, **default value** and **value range**. + +So `OpProto` can be defined as follows: + +```proto +enum AttrType { + INT = 1; + FLOAT = 2; + STRING = 3; + INTS = 4; + FLOATS = 5; + STRINGS = 6; +}; + +message AttrValue { + AttrType type = 1; + optional int iv = 2; + optional float fv = 3; + optional string sv = 4; + repeated int ivs = 5; + repeated float fvs = 6; + repeated string svs = 7; +}; + +message AttrProto { + required string name = 1; + required string comment = 2; + required AttrType type = 3; +}; + +message VarProto { + required string name = 1; + required string comment = 2; + required bool is_tensor = 3; +}; + +message OpProto { + repeated VarProto inputs = 1; + repeated VarProto outputs = 2; + repeated AttrProto attrs = 3; + required string type = 4; + required string comment = 5; +}; +``` + +To generate Python code automatically: + +```python +def create_python_ops_creatation_functions(): + op_protos = paddle.framework.OpRegistry.get_all_op_proto() + for type_name in op_protos: + op_proto = op_protos[type_name] + def __impl__(**kwargs): # User must use key word args in Paddle API + inputs = [kwargs.get(ipt.name, "") for ipt in op_proto.inputs] + outputs = [kwargs.get(opt.name, "") for opt in op_proto.outputs] + attrs = [cast_to_op_attr(attr, kwargs.get(attr.name, None)) for attr in op_proto.attrs] + opdesc = (input, outputs, type_name, attrs) + return paddle.framework.OpRegistry.CreateOp(opdesc) + __impl__.__doc__ = create_doc_string(op_proto) + globals()[type_name] = __impl__ + +create_python_ops_creatation_functions() +``` + +### Message from Python to C++ + +To hold message needed in the above second step, we define Protobuf message class `OpDesc`. It is used to hold user-specified parameters in Op describing. + +```proto +message OpDesc { + required string type = 1; + repeated string inputs = 2; + repeated string outputs = 3; + map attrs = 4; +}; +``` + +## OpProto Register + +Every Op has its own `OpProto`. For using convenience, we need to register them and record all their messages. For each `Op` class, we define a corresponding `OpMaker` class, in whose constructor we implement the `OpProto`'s building process. `OpMaker`'s constructor will be invoked by another function `OpRegistry::RegisterOp()`. + +```cpp +class OpProtoMaker { +public: + OpProtoMaker(OpProto* proto): proto_(proto) {} +protected: + OpProto* proto_; + void AddInput(const std::string& name, const std::string& desc) {...} + void AddAttr(const std::string& name, const std::string& desc, TypeId type) {...} + void AddComment(const std::string& comment) { ... } +}; + +class OpRegistry { +public: + using OpCreator = std::function; + + template + static void RegisterOp(const std::string& name) { + gCreators_[name] = [](const OpDesc& desc) { + return new OpType(desc); + }; + OpProto& opProto = gProtos_[name]; + OpMaker()(&opProto); + } + + static map gCreators_; + static map gProtos_; +}; + +template +class OpRegister { + public: + OpRegister(std::string type) { + OpRegistry::RegisterOp(type); + } +}; + +#define REGISTER_OP(op_class, op_maker_class, type_name) \ + class op_class##Register { \ + private: \ + const static OpRegister<#op_class, #op_maker_class> reg; \ + }; \ + const Register op_class##Register::reg(#type_name); + +class CosineOp { +// ... +} + +struct CosineOpProtoMaker : public OpProtoMaker { + CosineOpProtoMaker(OpProto* proto) : OpProtoMaker(proto) { + AddInput("input", "input of cosine op"); + AddAttr("scale", "scale of cosine op", float).Default(1.0).GreaterThan(0.0); + AddType("cos"); + AddComment("This is cos op"); + } +} + +REGISTER_OP(CosineOp, CosineOpProtoMaker, cos); +``` + +In `REGISTER_OP(CosineOp, CosineOpProtoMaker, cos)`, we register not only `CosineOp` but also `CosineOpProto`. As fields of `CosineOpProto`, the default value and value range of `scale` are also registered here. + +## Python API + +Python APIs are divided into two types, high-level API and low-level API. + +### High-Level API + +High-level API is called by users directly, so it should keep its style consistent with existing V2 APIs. + +Here is a sample about how a define a fc layer: + +```python +hd = fc_layer(input=data, size=56, with_bias=True, activation="sigmoid"); +``` + +`hd` is the output of `fc_layer` and it's a `variable`. It can be further sent into other layers as input. + +The definition of `fc_layer()`: + +```python +def fc_layer(input, size, with_bias, activation): + attr_map = {"size":size} + check_attrs(attr_map) + w = make_variable('w') + if with_bias: + b = make_variable('b') + else: + b = None + fc_output = make_variable('fc_output'); + fc_op(input, w, b, fc_output, attr_map) + act_output = make_variable('sigmod_output'); + if activation == "sigmod": + sigmod_op(fc_output, act_output); + elif: + # ... + return act_output; +``` + +### Low Leval API + +In above sample, `fc_op` and `sigmod_op` are low-level API. They build `OpDesc` and invoke corresponding C++ code. + +*TODO* diff --git a/doc/paddle/design/others/test.dot b/doc/paddle/design/others/test.dot new file mode 100644 index 0000000000000000000000000000000000000000..62c69b8fc8010a26a54a6ee8ef1488aad94d747a --- /dev/null +++ b/doc/paddle/design/others/test.dot @@ -0,0 +1,35 @@ + +digraph Test { + z -> generator -> G_img; + G_img -> discriminator -> D_f -> d_loss_f; + label0 -> d_loss_f -> d_loss; + + img -> discriminator -> D_t -> d_loss_t; + label1 -> d_loss_t -> d_loss; + + d_loss -> d_loss_t[color=red, style=dashed]; + d_loss -> d_loss_f[color=red, style=dashed]; + d_loss_t -> D_t[color=red, style=dashed]; + d_loss_f -> D_f[color=red, style=dashed]; + D_t -> discriminator[color=red, style=dashed]; + D_f -> discriminator[color=red, style=dashed]; + + D_f -> g_loss; + label2 -> g_loss; + + g_loss -> D_f[color=green, style=dashed]; + D_f -> discriminator[color=green, style=dashed]; + discriminator -> G_img[color=green, style=dashed]; + G_img -> generator[color=green, style=dashed]; + + discriminator [color=red, shape=box]; + generator [color=green, shape=box]; + z [shape=diamond]; + img [shape=diamond]; + label0 [shape=diamond]; + label1 [shape=diamond]; + label2 [shape=diamond]; + + d_loss [color=red]; + g_loss [color=green]; +} diff --git a/doc/paddle/design/others/test.dot.png b/doc/paddle/design/others/test.dot.png new file mode 100644 index 0000000000000000000000000000000000000000..4e121a40b9f7b2232d7cdda315bad15926446f55 Binary files /dev/null and b/doc/paddle/design/others/test.dot.png differ diff --git a/doc/paddle/design/quantization/channel.png b/doc/paddle/design/quantization/channel.png new file mode 100644 index 0000000000000000000000000000000000000000..b68b5bb9b7e6c450ffbaa1add71bb91a8bc0ecf0 Binary files /dev/null and b/doc/paddle/design/quantization/channel.png differ diff --git a/doc/paddle/design/quantization/dequant_formula.png b/doc/paddle/design/quantization/dequant_formula.png new file mode 100644 index 0000000000000000000000000000000000000000..117770e29cdee78a4c40fee3736e1c49e0e74428 Binary files /dev/null and b/doc/paddle/design/quantization/dequant_formula.png differ diff --git a/doc/paddle/design/quantization/dynamic.png b/doc/paddle/design/quantization/dynamic.png new file mode 100644 index 0000000000000000000000000000000000000000..ee3c89043b29f4fde895160584c7d0106788a945 Binary files /dev/null and b/doc/paddle/design/quantization/dynamic.png differ diff --git a/doc/paddle/design/quantization/fixed_point_quantization.md b/doc/paddle/design/quantization/fixed_point_quantization.md new file mode 100644 index 0000000000000000000000000000000000000000..6969bf38221aa58fdd39ece2843649f76777be64 --- /dev/null +++ b/doc/paddle/design/quantization/fixed_point_quantization.md @@ -0,0 +1,110 @@ +Fixed-point quantization uses lower bits, for example, 2-bit, 3-bit or 8-bit fixed point to represent weights and activations, which usually are in singe-precision float-point with 32 bits. The fixed-point representation has advantages in reducing memory bandwidth, lowering power consumption and computational resources as well as the model storage requirements. It is especially important for the inference in embedded-device deployment. + +According to some experiments, the apporach to quantize the model trained in float point directly works effectively on the large models, like the VGG model having many parameters. But the accuracy drops a lot for the small model. In order to improve the tradeoff between accuracy and latency, many quantized training apporaches are proposed. + +This document is to design a quantized training framework on Fluid. The first part will introduce how to quantize, The second part will describe the quantized training framework. The last part will illustrate how to calculate the quantization scale. + + +### How to quantize + +There are many ways to quantize the float value to fixed-point value. For example: + +$$ r = min(max(x, a), b)$$ +$$ s = \frac{b - a}{n - 1} $$ +$$ q = \left \lfloor \frac{r - a}{s} \right \rceil $$ + +where, $x$ is the float value to be quantized, $[a, b]$ is the quantization range, $a$ is the minimum value and $b$ is the maximal value. $\left \lfloor \right \rceil$ denotes rounding to the nearest integer. If the quantization level is $k$, $n$ is $2^k$, for example, $k$ is 8 and $n$ is 256. $q$ is the quantized integer. + + +The quantization we applied is parameterized by the number of quantization levels and maximum absolute value: + +$$ M = max(abs(x)) $$ +$$ q = \left \lfloor \frac{x}{M} * (n - 1) \right \rceil $$ + +where, $x$ is the float value to be quantized, $M$ is maximum absolute value. $\left \lfloor \right \rceil$ denotes rounding to the nearest integer. For 8 bit quantization, $n=2^{8}=256$. $q$ is the quantized integer. + + +Wether the *min-max* quantization or *max-abs* quantization, they also can be represent: + +$q = scale * r + b$ + +We call *min-max*, *max-abs* as the quantization arguments, also call them quantization scale or quantization range. + + +How to calculate the quantization scale (or maximum absolute value) for inference will be described in the last part. + + +### Training Framework + +#### Forward pass + +The forward pass is simulated quantization, see Figure 1. + +The training framework is as following figure. + +

+
+Figure 1. Forward in training with simulated quantization. +

+ +- Firstly, both input and weight will be quantized to 8-bit integers. +- Second, do the multiplication (or convolution) operation with integers. +- Third, dequantize the multiplication (or convolution) results to 32-bit float point. +- Finally, do bias-addition in float type of 32 bit. Here, the bias is not quantized. + +For general matrix multiplication (GEMM), quantize for $X$ and $W$: + +$$ X_q = \left \lfloor \frac{X}{X_m} * (n - 1) \right \rceil $$ +$$ W_q = \left \lfloor \frac{W}{W_m} * (n - 1) \right \rceil $$ + +Do GEMM: + +$$ Y = X_q * W_q $$ + + +Dequantize $Y$: + +$$ +\begin{align} +Y_{dq} &=\frac{Y}{(n - 1) * (n - 1)} * X_m * W_m \\\ + &=\frac{X_q * W_q}{(n - 1) * (n - 1)} * X_m * W_m \\\ + &=(\frac{X_q}{n - 1} * X_m) * (\frac{W_q}{n - 1} * W_m) +\end{align} +$$ + +From these formulas, dequantization also can be moved before GEMM, do dequantization for $Xq$ and $Wq$ at first, then do GEMM. The forward workflow in training is equivalent to following framework. + +

+
+Figure 2. Equivalent forward in training with simulated quantization. +

+ +We use this equivalent workflow in the training. In our desigin, there is a quantization transpiler to insert the quantization operator and the de-quantization operator in the Fluid `ProgramDesc`. Since the outputs of quantization and de-quantization operator are still in floating point, they are called faked quantization and de-quantization operator. And the training framework is called simulated quantization. + +#### Backward pass + +See Figure 3. The gradients are calculated by dequantized weights and activations. All inputs and outputs are float point with 32-bit. And in the weight updating process, the gradients will be added to the original weight, not the quantized or dequantized weights. + +

+
+Figure 3. Backward and weight updating in training with simulated quantization. +

+ +So the quantization transipler will change some inputs of the corresponding backward operators. + +### How to calculate quantization scale + +There are two strategies to calculate quantization scale, we call them dynamic and static strategy. The dynamic strategy calculates the quantization scale value each iteration. The static strategy keeps the quantization scale for different inputs. + +For weights, we apply the dynamic strategy in the training, that is to say, the quantization scale will be recalculated during each iteration until the traning is finished. + +For activations, the quantization scales are estimated during training, then used in inference. There are several different ways to estimate them: + + +1. Calculate the mean of maximum absolute during a window. +2. Calculate the max of maximum absolute during a window. +3. Calculate the running mean of maximum absolute during a window, as follows: + + $$ Vt = (1 - k) * V + k * V_{t-1} $$ + + where, $V$ is the maximum absolute value of current batch, $Vt$ is the running mean value. $k$ is a factor, such as 0.9. diff --git a/doc/paddle/design/quantization/quantization_backward_and_optimization.png b/doc/paddle/design/quantization/quantization_backward_and_optimization.png new file mode 100644 index 0000000000000000000000000000000000000000..84f8235ab87cb631992b691f8e05b9c0b6c93da2 Binary files /dev/null and b/doc/paddle/design/quantization/quantization_backward_and_optimization.png differ diff --git a/doc/paddle/design/quantization/quantization_equivalent_forward.png b/doc/paddle/design/quantization/quantization_equivalent_forward.png new file mode 100644 index 0000000000000000000000000000000000000000..df49c864537c047c785da12d24893e54ce0a5341 Binary files /dev/null and b/doc/paddle/design/quantization/quantization_equivalent_forward.png differ diff --git a/doc/paddle/design/quantization/quantization_forward.png b/doc/paddle/design/quantization/quantization_forward.png new file mode 100644 index 0000000000000000000000000000000000000000..0913f61621bb6533bcb10bd1d18120ccaaa96cff Binary files /dev/null and b/doc/paddle/design/quantization/quantization_forward.png differ diff --git a/doc/paddle/design/quantization/static.png b/doc/paddle/design/quantization/static.png new file mode 100644 index 0000000000000000000000000000000000000000..8d2faaf6e727944824aaa5e1363c0b596e58aeb0 Binary files /dev/null and b/doc/paddle/design/quantization/static.png differ diff --git a/doc/paddle/design/quantization/training_quantization_model_format.md b/doc/paddle/design/quantization/training_quantization_model_format.md new file mode 100644 index 0000000000000000000000000000000000000000..55811b3721ef4e551bac1ab68dc8caafd70e2db8 --- /dev/null +++ b/doc/paddle/design/quantization/training_quantization_model_format.md @@ -0,0 +1,143 @@ +# 量化训练模型格式说明 + +PaddlePaddle框架主要支持动态量化和静态量化两种量化训练模式。其中,动态量化会在每次推断过程中动态计算量化比例系数的值,而静态量化则对不同的输入采用相同的量化比例系数值。 对于权重而言,在训练过程中采用动态量化模式。换句话说,在每次迭代过程中量化比例系数均会被重新计算得到直至训练过程结束。 对于激活而言,可以选择动态量化模式也可以选择静态量化模式。若选择使用静态量化模式,则量化比例系数会在训练过程中被评估求得,且在推断过程中被使用(不同的输入均保持不变)。除此之外,卷积权重的动态量化亦包括两种形式:1)Tensor-wise量化,即直接求取整个权重Tensor的量化scale值(单一值);2)Channel-wise量化,即对权重Tensor按照channel维度进行分片,然后求取每个通道Tensor的scale值。 + +## 1. Tensor-wise量化 +### 1.1 动态量化 + +动态量化主要通过`fake_quantize_abs_max`op实现,该op对输入tensor进行量化并输出值域在-127~+127范围内的量化tensor。`fake_quantize_abs_max`op在对输入tensor进行量化时使用的量化scale是动态计算出来的,即取输入tensor元素的绝对值最大值。动态计算出的量化scale会作为反量化op的一个输入,用于求取反量化tensor。下面是对`fake_quantize_abs_max`op的整体描述: + +``` +fake_quantize_abs_max { + inputs { + X(Tensor): 激活tensor或权重tensor + } + + outputs { + Out(Tensor): 已量化tensor + OutScale(Tensor): 动态计算得到的量化scale,其元素个数为1(tensor-wise量化) + } + + attrs { + bit_length(int): 量化bit数,如8-bit + } +} +``` + +### 1.2 静态量化 + +与动态量化不同,静态量化的量化scale是在量化训练时通过**窗口滑动平均**或者**窗口绝对值最大值**等方法计算求得的。静态量化主要通过`fake_quantize_moving_average_abs_max`op或者`fake_quantize_range_abs_max`op实现,它们利用输入的量化scale将输入tensor量化到-127~127值域范围内。`fake_quantize_moving_average_abs_max`op和`fake_quantize_range_abs_max`op的输入和输出格式都是一样的,不同点在于op内部计算量化scale时使用的策略不同。`fake_quantize_moving_average_abs_max`op使用一个窗口内绝对值最大值的滑动平均值作为量化sacle,而`fake_quantize_range_abs_max`op使用一个窗口内绝对值最大值的最大值作为量化sacle。下面以`fake_quantize_moving_average_abs_max`op为例,对其进行整体描述: + +``` +fake_quantize_moving_average_abs_max { + inputs { + X(Tensor): 一般为激活tensor + InScale(Tensor): 量化训练中计算求得的scale + } + + outputs { + Out(Tensor): 已量化tensor + OutScale(Tensor): 量化训练中计算求得的scale,用于继续传递到反量化op + } + + attrs { + is_test(bool): 指明是量化训练过程还是预测推断过程 + bit_length(int): 量化bit数,如8-bit + moving_rate(float): 滑动平均衰减系数 + } +} +``` + +### 1.3 反量化 + +无论是静态量化还是动态量化,在量化计算完成后都需要进行反量化操作,该操作即是通过`fake_dequantize_abs_max`op实现的。具体来说,`fake_quantize_abs_max`op负责将fp32数值映射到int8值域(-127~127),而`fake_dequantize_abs_max` op是将int8数值映射到fp32值域。 + +

+
+

+ +根据[量化训练的原理](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/docs/tutorial.md#1-quantization-aware-training%E9%87%8F%E5%8C%96%E4%BB%8B%E7%BB%8D)可知,`fake_dequantize_abs_max` op主要通过公式1-3-1进行反量化操作。在实现中,`fake_dequantize_abs_max` op将激活scale作为Variable(Tensor)进行输入($X_{scale}$),将公式1-3-1中关于scale的剩余部分作为max\_range属性(即公式1-3-2)。`fake_dequantize_abs_max` op的整体描述如下: + +``` +fake_dequantize_abs_max { + inputs { + X(Tensor): 输入tensor + Scale(Tensor): 激活scale + } + + outputs { + Out(Tensor): 已反量化tensor + } + + attrs { + max_range(float): 根据公式1-3-2和公式1-3-3计算所得 + } +} +``` + +## 2. 卷积权重Channel-wise量化 +### 2.1 分channel量化 + +分channel量化与动态量化类似,也是将输入tensor量化到-127~+127值域范围内,不同之处在于分channel量化会对tensor按照channel维度进行分片,然后求取每个通道tensor的scale值。在PaddlePaddle框架中,`fake_channel_wise_quantize_abs_max`op实现了分channel量化的逻辑。注意,目前仅对权重进行分channel量化,对激活是不进行分channel量化的,并且分channel量化只作用在卷积操作上(包括`conv2d`和`depthwise_conv2d`)。下面是对`fake_channel_wise_quantize_abs_max`op的整体描述: + +``` +fake_channel_wise_quantize_abs_max { + inputs { + X(Tensor): 权重tensor + } + + outputs { + Out(Tensor): 已量化tensor + OutScale(Tensor): 分channel计算得到的scale,其元素个数与输入tensor的通道数相同 + } + + attrs { + bit_length(int): 量化bit数,如8-bit + } +} +``` + +### 2.2 分channel反量化 +若对卷积权重进行了分channel量化,则反量化操作时必须采用分channel反量化。`fake_channel_wise_dequantize_max_abs`op实现了分channel反量化的逻辑,它的输入Scales包括两个scale tensor,即激活scale和权重scale。根据2.1节的描述可知,权重采用的是channel-wise量化而激活采用的是tensor-wise量化,所以激活scale对应的tensor仅包含一个值而权重scale对应的tensor包括输出通道数个值。下面是对`fake_channel_wise_dequantize_max_abs`op的整体描述: + +``` +fake_channel_wise_dequantize_max_abs { + inputs { + X(Tensor): 输入tensor + Scales(Tensor List): 一般包括两个tensor,且第一个为权重scale,第二个为激活scale + } + + outputs { + Out(Tensor): 已反量化tensor + } + + attrs { + quant_bits(int list): 一般包括两个整数值,分别为求取Scales中不同scale值时对应的量化bit数。 + } +} +``` + +## 3. 注意点 + +1) 8-bit量化训练中采用有符号的int8进行量化,且所有的scale都是没有除以127。 + +2)以上描述中仅`fake_dequantize_abs_max`op将scale的一部分作为属性值,其他op的scale均作为输入Variable(Tensor)。 + +3)若之后为量化训练添加新的量化op或反量化op,**建议使用Variable(Tensor)作为scale的数据传递方式**。因为量化训练的主要目的就是为了求取合适的量化/反量化scale,而将这些scale信息作为tensor的方式存储下来会方便后续scale数据向其他格式的转换。 + +## 4. 附录图解 + +

+
+图1: 动态量化,其中卷积权重已预先进行量化 +

+ +

+
+图2: 静态量化,其中卷积权重已预先进行量化 +

+ +

+
+图3: 分channel量化,其中卷积权重已预先进行分channel量化 +

diff --git a/doc/paddle/faq/index_cn.rst b/doc/paddle/faq/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..27884511d1664d2fb1e9730b3d12c1a6f1fbb3c1 --- /dev/null +++ b/doc/paddle/faq/index_cn.rst @@ -0,0 +1,29 @@ +############## +常见问题 +############## +如果您在使用Paddle框架开发过程中遇到了使用咨询类的问题,希望快速得到官方的答疑和指导,可以先来FAQ中查阅 + +FAQ模块根据用户的常见问题给出经验性的指导意见以及一些开发tips,能让您快速解决棘手问题并快速验证自己的想法。 + +FAQ以常见问答对的形式收录了用户在使用Paddle框架时的高频使用咨询类问题,包括 `安装类 <../faq/install_cn.html>`_ 、 `模型框架类 <../faq/train_cn.html>`_ 、 `其他常见问题 <../faq/others_cn.html>`_ 。 其中 `模型框架类 <../faq/train_cn.html>`_ 包含了 `数据处理 <../faq/train_cn.html#id1>`_ 、 `模型搭建 <../faq/train_cn.html#id4>`_、 `模型训练 <../faq/train_cn.html#id6>`_、 `应用预测 <../faq/train_cn.html#id14>`_、 `参数调整 <../faq/train_cn.html#id15>`_ 几类问题。 + +如果FAQ无法解决您的问题,您也可以在PaddlePaddle的 `Github Issue `_ 中进行提问,我们会有专门的技术人员为您解答。 + +FAQ问题集: + +.. toctree:: + :maxdepth: 1 + + + install_cn.md +.. toctree:: + :maxdepth: 2 + + + train_cn.md +.. toctree:: + :maxdepth: 1 + + + others_cn.md + diff --git a/doc/paddle/faq/install_cn.md b/doc/paddle/faq/install_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..31a4e88107e7959651c4865b381da0dcf523e88f --- /dev/null +++ b/doc/paddle/faq/install_cn.md @@ -0,0 +1,153 @@ +# 安装类FAQ + +##### 问题:Windows环境下,使用pip install时速度慢,如何解决? + ++ 解决方案: + +在pip后面加上参数`-i`指定pip源,使用国内源获取安装包。 + ++ 操作步骤: + +1. Python2情况下,使用如下命令安装PaddlePaddle。 + + `pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple/` + +2. Python3情况下,使用如下命令安装PaddlePaddle。 + + `pip3 install paddlepaddle -i https://mirror.baidu.com/pypi/simple/` + +您还可以通过如下三个地址获取pip安装包,只需修改 `-i` 后网址即可: + +https://pypi.tuna.tsinghua.edu.cn/simple +https://mirrors.aliyun.com/pypi/simple/ +https://pypi.douban.com/simple/ + +------ + +##### 问题:使用pip install时报错,`PermissionError: [WinError 5]` ,如何解决? + ++ 问题描述: + +使用pip install时报错,`PermissionError: [WinError 5]` , + +`C:\\program fiels\\python35\\Lib\\site-packages\\graphviz`。 + ++ 报错分析: + +用户权限问题导致,由于用户的Python安装到系统文件内(如”Program Files/“),任何的操作都需要管理员权限。 + ++ 解决方法: + +选择“以管理员身份运行”运行CMD,重新执行安装过程, 使用命令sudo pip install paddlepaddle + +------ + +##### 问题: 使用pip install时报错,`ERROR: No matching distribution found for paddlepaddle` ,如何解决? + ++ 问题描述: + +使用pip install时报错,`ERROR: Could not find a version that satisfies the requirement paddlepaddle (from versions: none)` + +``ERROR: No matching distribution found for paddlepaddle` +![图片](https://agroup-bos-bj.cdn.bcebos.com/bj-febb18fb78004dc17f18d60a009dc6a8bd907251) + ++ 报错分析: + +Python版本不匹配导致。用户使用的是32位Python,但是对应的32位pip没有PaddlePaddle源。 + ++ 解决方法: + +请用户使用64位的Python进行PaddlePaddle安装。 + +------ + +##### 问题: 在GPU上执行程序报错,`Error:Segmentation fault`,如何解决? + ++ 问题描述: + +在GPU版本为`paddlepaddle_gpu-1.8.4.post87-cp27-cp27mu-manylinux1_x86_64.whl`的环境上执行一个程序,出现`Error:Segmentation fault`。如果将`place`修改“cpu”,则程序可正常运行。 + ++ 报错分析: + +造成该报错的原因通常是环境不匹配导致的。安装时,GPU版本为`paddlepaddle_gpu-1.8.4.post87-cp27-cp27mu-manylinux1_x86_64.whl`,`post87`表示需要在CUDA8.0、cuDNN7.0进行编译。如果机器上没有安装对应版本的CUDA和cuDNN,会导致执行程序时报错。 + +此外值得注意的是,配置PaddlePaddle的GPU版本,不仅需要CUDA和cuDNN版本匹配,还需要与PaddlePaddle版本匹配。出现类似错误时请检查这三个程序的版本是否匹配。 + ++ 解决方法: + +CUDA的安装可参考:https://docs.nvidia.com/cuda/archive/10.0/index.html;cuDNN的安装可参考:https://docs.nvidia.com/deeplearning/cudnn/install-guide/#install-windows。 + +------ + +##### 问题: 本地使用import paddle时报错,`ModuleNotFoundError:No module named ‘paddle’`,如何解决? + ++ 报错分析: + +原因在于用户的计算机上可能安装了多个版本的Python,而安装PaddlePaddle时的Python和import paddle时的Python版本不一致导致报错。如果用户熟悉PyCharm等常见的IDE配置包安装的方法,配置运行的方法,则可以避免此类问题。 + ++ 解决方法: + +用户明确安装Paddle的python位置,并切换到该python进行安装。可能需要使用python -m pip install paddlepaddle命令确保paddle是安装到该python中。 + +------ + +##### 问题: 使用PaddlePaddle GPU的Docker镜像时报错, `Cuda Error: CUDA driver version is insufficient for CUDA runtime version`,如何解决? + ++ 报错分析: + +机器上的CUDA驱动偏低导致。 + ++ 解决方法: + +需要升级CUDA驱动解决。 + +1. Ubuntu和CentOS环境,需要把相关的驱动和库映射到容器内部。如果使用GPU的docker环境,需要用nvidia-docker来运行,更多请参考nvidia-docker。 + +2. Windows环境,需要升级CUDA驱动。 + +------ + +##### 问题: 使用PaddlePaddle时报错,`Error: no CUDA-capable device is detected`,如何解决? + ++ 报错分析: + +CUDA安装错误导致。 + ++ 解决方法: + +查找“libcudart.so”所在目录,并将其添加到“LD_LIBRARY_PATH”中。 + +例如:执行`find / -name libcudart.so`, 发现libcudart.so在“/usr/local/cuda-10.0/targets/x86_64-linux/lib/libcudart.so”路径下, 使用如下命令添加即可。 + +`export LD_LIBRARY_PATH=/usr/local/cuda-10.0/targets/x86_64-linux/lib/libcudart.so$LD_LIBRARY_PATH` + +------ + +##### 问题: 如何升级PaddlePaddle? + ++ 答复: + +1. GPU环境: + + + `pip install -U paddlepaddle-gpu` + +或者 + +`pip install paddlepaddle-gpu == 需要安装的版本号(如2.0)` + +2. CPU环境: + +`pip install -U paddlepaddle` + +或者 + +`pip install paddlepaddle == 需要安装的版本号(如2.0)` + +------ + +##### 问题: 在GPU上如何选择PaddlePaddle版本? + ++ 答复: + +pip install paddlepaddle-gpu==需要安装的版本号+'.post'+CUDA主版本+CUDNN主版本 例:pip install paddlepaddle-gpu==1.8.4.post97表示需要在CUDA9.0、cuDNN7.0进行安装。更多安装信息请见官网:https://www.paddlepaddle.org.cn/start diff --git a/doc/paddle/faq/others_cn.md b/doc/paddle/faq/others_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..8842edfc3dc32bf774203fefd67707b4e0ba8898 --- /dev/null +++ b/doc/paddle/faq/others_cn.md @@ -0,0 +1,20 @@ +# 其他常见问题 + + +##### 问题:import paddle.fluid后logging模块无法使用,如何解决? + ++ 答复:操作方法可以参考[#issue17731](https://github.com/PaddlePaddle/Paddle/issues/17731)。 + +---------- + +##### 问题:使用X2paddle 从Caffe 转Paddle model时,报错 `TypeError: __new__() got an unexpected keyword argument 'serialized_options'` ,如何处理? + ++ 答复:这是由于ProtoBuf版本较低导致,将protobuf升级到3.6.0即可解决。 + +---------- + +##### 问题:Windows环境下,出现"Windows not support stack backtrace yet",如何处理? + ++ 答复:Windows环境下,遇到程序报错不会详细跟踪内存报错内容。这些信息对底层开发者更有帮助,普通开发者不必关心这类警告。如果想得到完整内存追踪错误信息,可以尝试更换至Linux系统。 + +---------- diff --git a/doc/paddle/faq/train_cn.md b/doc/paddle/faq/train_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..3c677ac0b6ab2b85527405988419ee72251811fc --- /dev/null +++ b/doc/paddle/faq/train_cn.md @@ -0,0 +1,317 @@ +# 框架类FAQ + + +## 数据处理 + +##### 问题:如何在训练过程中高效读取数量很大的数据集? + ++ 答复:当训练时使用的数据集数据量较大或者预处理逻辑复杂时,如果串行地进行数据读取,数据读取往往会成为训练效率的瓶颈。这种情况下通常需要利用多线程或者多进程的方法异步地进行数据载入,从而提高数据读取和整体训练效率。 + +paddle1.8中推荐使用两个异步数据加载的API: + +1. DataLoader.from_generator,有限的异步加载 + +该API提供了单线程和单进程的异步加载支持。但由于线程和进程数目不可配置,所以异步加速能力是有限的,适用于数据读取负载适中的场景。 + +具体使用方法及示例请参考API文档:[fluid.io.DataLoader.from_generator](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/io_cn/DataLoader_cn.html#id1)。 + +2. DataLoader,灵活的异步加载 + +该API提供了多进程的异步加载支持,也是paddle后续主推的数据读取方式。用户可通过配置num_workers指定异步加载数据的进程数目从而满足不同规模数据集的读取需求。 + +具体使用方法及示例请参考API文档:[fluid.io.DataLLoader](https://www.paddlepaddle.org.cn/documentation/docs/en/api/io/DataLoader.html#dataloader) + +---------- + +##### 问题:使用多卡进行并行训练时,如何配置DataLoader进行异步数据读取? + ++ 答复:paddle1.8中多卡训练时设置异步读取和单卡场景并无太大差别,动态图模式下,由于目前仅支持多进程多卡,每个进程将仅使用一个设备,比如一张GPU卡,这种情况下,与单卡训练无异,只需要确保每个进程使用的是正确的卡即可。 + +具体示例请参考飞桨API [fluid.io.DataLoader.from_generator](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/io_cn/DataLoader_cn.html#id1) 和 [fluid.io.DataLLoader](https://www.paddlepaddle.org.cn/documentation/docs/en/api/io/DataLoader.html#dataloader) 中的示例。 + +---------- + +##### 问题:在动态图使用`paddle.dataset.mnist.train()`获得数据后,如何转换为可操作的Tensor? + ++ 答复:调用`fluid.dygraph.to_varibale(data)`,即可将data数据转化为可以操作的动态图Tensor。 + +---------- + +##### 问题:如何给图片添加一个通道数,并进行训练? + ++ 答复:如果是在进入paddle计算流程之前,数据仍然是numpy.array的形式,使用numpy接口`numpy.expand_dims`为图片数据增加维度后,再通过`numpy.reshape`进行操作即可,具体使用方法可查阅numpy的官方文档。 + +如果是希望在模型训练或预测流程中完成通道的操作,可以使用paddle对应的API [paddle.fluid.layers.unsqueeze](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/layers_cn/unsqueeze_cn.html#unsqueeze) 和 [paddle.fluid.layers.reshape](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/layers_cn/reshape_cn.html#reshape)。 + +---------- + +##### 问题:有拓展Tensor维度的Op吗? + ++ 答复:有,请参考API [paddle.fluid.layers.unsqueeze](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/layers_cn/unsqueeze_cn.html)。 + +---------- + +##### 问题:如何从numpy.array生成一个具有shape和dtype的Tensor? + ++ 答复:在动态图模式下,可以参考如下示例: + +``` +import paddle.fluid as fluid + +with fluid.dygraph.guard(fluid.CPUPlace()): + x = np.ones([2, 2], np.float32) + y = fluid.dygraph.to_variable(x) +``` + +具体请参考API [paddle.fluid.dygraph.to_variable](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/to_variable_cn.html#to-variable) + +---------- + +##### 问题:如何初始化一个随机数的Tensor? + ++ 答复:使用`numpy.random`生成随机的numpy.array,再参考上一个问题中的示例创建随机数Tensor即可。 + + +## 模型搭建 + +##### 问题:如何不训练某层的权重? + ++ 答复:在`ParamAttr`里设置learning_rate=0。 + +---------- + +##### 问题:`stop_gradient=True`的影响范围? + ++ 答复:如果fluid里某一层使用`stop_gradient=True`,那么这一层之前的层都会自动 `stop_gradient=True`,梯度不再回传。 + +---------- + +##### 问题:请问`fluid.layers.matmul`和`fluid.layers.mul`有什么区别? + ++ 答复:`matmul`支持broadcast和任意阶相乘。`mul`会把输入都转成两维去做矩阵乘。 + +---------- + + + +## 模型训练&评估 + +##### 问题:在CPU上进行模型训练,如何使用多线程? + ++ 答复:可以参考使用[ParallelExecutor API](https://www.paddlepaddle.org.cn/documentation/docs/zh/1.3/api_cn/fluid_cn.html#parallelexecutor)。 + +---------- + +##### 问题:如何提高单CPU多线程利用率? + ++ 答复:线程数是设备同时并行执行程序的个数,可以将线程数设置为“CPU的个数*CPU的核数”。可以通过 + +`os.getenv("CPU_NUM")`或者`os.environ['CPU_NUM'] = str(2)`获取相关参数值。 + +---------- + +##### 问题:使用NVIDIA多卡运行Paddle时报错,`Error:NCCL ContextMap`或者`Error:hang住`(log日志打印突然卡住),如何解决? + ++ 答复:参考[NCCL Tests](https://github.com/NVIDIA/nccl-tests)检测您的环境。如果检测不通过,请登录[NCCL官网](https://developer.nvidia.com/zh-cn)下载NCCl,安装后重新检测。 + +---------- + +##### 问题:多卡训练时启动失败,`Error:Out of all 4 Trainers`,如何处理? + ++ 问题描述:多卡训练时启动失败,显示如下信息: + +![图片](https://agroup-bos-bj.cdn.bcebos.com/bj-13d1b5df218cb40b0243d13450ab667f34aee2f7) + ++ 报错分析:PaddlePaddle安装版本和多卡训练不匹配导致。 + ++ 解决方法:排查当前安装的PaddlePaddle是否支持并行训练。如果是开发者编译的Paddle,请在编译时打开 `WITH_DISTRIBUTE`选项。 + +---------- + +##### 问题:训练过程中提示显存不足,如何处理? + ++ 答复:这是一种常见情况,你可以尝试调整`batch_size`大小,也可以更改网络模型,或者参考官方文档[显存分配与优化](https://www.paddlepaddle.org.cn/documentation/docs/zh/1.5/advanced_usage/best_practice/memory_optimize.html) 。建议用户使用[AI Studio 学习与 实训社区训练](https://aistudio.baidu.com/aistudio/index),获取免费GPU算力,速度更快。 + +---------- + +##### 问题:GPU显存占用过高,如何解决? + ++ 答复:建议调整 `FLAGS_fraction_of_gpu_memory_to_use` ,并检查`batch_size` 。通过设置较小的`batch_size`能降低显存消耗;`FLAGS_fraction_of_gpu_memory_to_use`默认值为 =0.92, 当申请不到需要的显存时会直接报内存不足。如遇到此情况,可以先检查一下GPU卡是否被占用,再设置较小的值,以启动程序。 + +---------- + +##### 问题:GPU内存不足,报错 `Error:Out of memory error GPU`,如何处理? + ++ 问题描述: + +![图片](https://agroup-bos-bj.cdn.bcebos.com/bj-3cbc8370534cb998f321af9b32aa2859403d9c9d) + ++ 解决方案: + + 1. 检查是当前模型是否占用内存过高,可尝试减小`batch_size` ; + 2. 开启以下三个选项: + `#一旦不再使用即释放内存垃圾,=1.0 垃圾占用内存大小达到10G时,释放内存垃圾` + `export FLAGS_eager_delete_tensor_gb=0.0` + `#启用快速垃圾回收策略,不等待cuda kernel 结束,直接释放显存` + `export FLAGS_fast_eager_deletion_mode=1` + `#该环境变量设置只占用0%的显存` + `export FLAGS_fraction_of_gpu_memory_to_use=0` + +---------- + +##### 问题:如何提升模型训练时的GPU利用率? + ++ 答复:有如下两点建议: + + 1. 如果数据预处理耗时较长,可使用py_Reader 或 multiprocess_reader加速; + + 2. 如果提高GPU计算量,可以增加`batch_size`,但是注意调节其他超参数。 + + 以上两点均为比较通用的方案,其他的优化方案和模型相关,可参考相应models示例。 + +---------- + +##### 问题:使用CPU或GPU时,如何设置`num_threds`? + ++ 答复: + + 1. 如果是CPU,最大可以设置到当前CPU的内核数。 + 2. 如果是GPU,受显卡多处理器的寄存器数目限制,例如GeForce 8800GT的显卡,最多8192个寄存器。假设每个线程需要的寄存器等于16,则最多只有512个线程。再高的线程将会将数据切换的显卡显存,反而降低执行效率。 + +---------- + +##### 问题:如何处理变长ID导致程序内存占用过大的问题? + ++ 答复:请先参考[显存分配与优化文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/1.5/advanced_usage/best_practice/memory_optimize.html) 开启存储优化开关,包括显存垃圾及时回收和Op内部的输出复用输入等。若存储空间仍然不够,建议: + + 1. 降低`batch_size`; + 2. 对index进行排序,减少padding的数量。 + +---------- + +##### 问题:Executor与ParallelExecutor有什么区别? + ++ 答复:如果没有指定Scope,所有的Executor都会共享一个Scope,即`global_scope`。 + +1. `fluid.Executor`执行对象是Program,可以认为是一个轻量级的执行器,目前主要用于参数初始化、参数加载、参数模型保存。 + +2. `fluid.ParallelExecutor`的执行对象是Graph,ParallelExecutor内部会将Program转为Graph,这样更便于对模型进行分析。 + +---------- + +##### 问题:训练过程中如果出现不收敛的情况,如何处理? + ++ 答复:不收敛的原因有很多,可以参考如下方式排查: + + 1. 检查数据集中训练数据的准确率,数据是否有很多错误,特征是否归一化; + 2. 简化网络结构,先基于benchmark实验,确保在baseline网络结构和数据集上的收敛结果正确; + 3. 对于复杂的网络,每次只增加一个改动,确保改动后的网络正确; + 4. 检查网络在训练数据上的Loss是否下降; + 5. 检查学习率、优化算法是否合适,学习率过大会导致不收敛; + 6. 检查`batch_size`设置是否合适,`batch_size`过小会导致不收敛; + 7. 检查梯度计算是否正确,是否有梯度过大的情况,是否为NaN。 + +---------- + +##### 问题:Loss为NaN,如何处理? + ++ 答复:可能由于网络的设计问题,Loss过大(Loss为NaN)会导致梯度爆炸。如果没有改网络结构,但是出现了NaN,可能是数据读取导致,比如标签对应关系错误。 + +---------- + +##### 问题:在AI Studio上使用GPU训练时报错 `Attempt to use GPU for prediction, but environment variable CUDA_VISIBLE_DEVICES was not set correctly.`,如何处理? + ++ 答复:需要在Notebook环境中增加:`%set_env CUDA_VISIBLE_DEVICES=0`。 + +---------- + +##### 问题:使用GPU训练时报错,`Error:incompatible constructor arguments.`,如何处理? + ++ 问题描述: + ![图片](https://agroup-bos-bj.cdn.bcebos.com/bj-3779aa5b33dbe1f05ba2bfeabb2d22d4270d1929) + ++ 报错分析:`CUDAPlace()`接口没有指定GPU的ID编号导致。 + ++ 答复:CUDAPlace()接口需要指定GPU的ID编号,接口使用方法参见:[paddle.fluid.CUDAPlace](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/CUDAPlace_cn.html)。 + +---------- + +##### 问题:增量训练中,如何保存模型和恢复训练? + ++ 答复:在增量训练过程中,不仅需要保存模型的参数,也需要保存模型训练的状态(如learning_rate)。使用[save_persistables](https://www.paddlepaddle.org.cn/documentation/docs/zh/1.5/api_cn/dygraph_cn.html#save-persistables)保存模型训练的参数和状态;恢复训练时,使用[load_persistables](https://www.paddlepaddle.org.cn/documentation/docs/zh/1.5/api_cn/dygraph_cn.html#load-persistables)进行恢复训练。 + +---------- + +##### 问题:训练后的模型很大,如何压缩? + ++ 答复:建议您使用飞桨模型压缩工具[PaddleSlim](https://www.paddlepaddle.org.cn/tutorials/projectdetail/489539)。PaddleSlim是飞桨开源的模型压缩工具库,包含模型剪裁、定点量化、知识蒸馏、超参搜索和模型结构搜索等一系列模型压缩策略,专注于**模型小型化技术**。 + +---------- + + + +## 应用预测 + +##### 问题:load_inference_model在加载预测模型时能否用py_reader读取? + ++ 答复:目前`load_inference_model`加载进行的模型还不支持py_reader输入。 + +---------- + + + +## 参数调整 + +##### 问题:如何将本地数据传入`fluid.dygraph.Embedding`的参数矩阵中? + ++ 答复:需将本地词典向量读取为NumPy数据格式,然后使用`fluid.initializer.NumpyArrayInitializer`这个op初始化`fluid.dygraph.Embedding`里的`param_attr`参数,即可实现加载用户自定义(或预训练)的Embedding向量。 + +------ + +##### 问题:如何实现网络层中多个feature间共享该层的向量权重? + ++ 答复:将所有网络层中`param_attr`参数里的`name`设置为同一个,即可实现共享向量权重。如使用embedding层时,可以设置`param_attr=fluid.ParamAttr(name="word_embedding")`,然后把param_attr传入embedding中。 + +---------- + +##### 问题:如何修改全连接层参数,如:weights、bias、optimizer.SGD? + ++ 答复:可以通过`param_attr`设置参数的属性,`fluid.ParamAttr( initializer=fluid.initializer.Normal(0.0, 0.02), learning_rate=2.0)`,如果`learning_rate`设置为0,该层就不参与训练。手动输入参数也可以实现,但是会比较麻烦。 + +---------- + +##### 问题:使用optimizer或ParamAttr设置的正则化和学习率,二者什么差异? + ++ 答复:ParamAttr中定义的`regularizer`优先级更高。若ParamAttr中定义了`regularizer`,则忽略Optimizer中的`regularizer`;否则,则使用Optimizer中的`regularizer`。学习率的设置也可以参考此方式。 + +---------- + +##### 问题:如何导出指定层的权重,如导出最后一层的*weights*和*bias*? + ++ 答复:使用`save_vars`保存指定的vars,然后使用`load_vars`加载对应层的参数值。具体示例请见API文档:[load_vars](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/io_cn/load_vars_cn.html#load-vars) 和 [save_vars](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/io_cn/save_vars_cn.html#save-vars) 。 + +---------- + +##### 问题:训练过程中如何固定网络和Batch Normalization(BN)? + ++ 答复: + +1. 对于固定BN:设置 `use_global_stats=True`,使用已加载的全局均值和方差:`global mean/variance`,具体内容可查看官网文档[BatchNorm](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/BatchNorm_cn.html)。 + +2. 对于固定网络层:如: stage1→ stage2 → stage3 ,设置stage2的输出,假设为*y*,设置 `y.stop_gradient=True`,那么, stage1→ stage2整体都固定了,不再更新。 + +---------- + +##### 问题:优化器设置时报错`AttributeError: parameter_list argument given to the Optimizer should not be None in dygraph mode.`,如何处理? + ++ 错误分析:必选参数缺失导致。 + ++ 答复:飞桨框架1.7版本之后需要在optimizer的设置中加入必选项`param_list`。 + +---------- + +##### 问题:`fluid.layer.pool2d`的全局池化参数和设置参数有关系么? + ++ 答复:如果设置`global_pooling`,则设置的`pool_size`将忽略,不会产生影响。 + +---------- diff --git a/doc/paddle/guides/dygraph_to_static/basic_usage_cn.rst b/doc/paddle/guides/dygraph_to_static/basic_usage_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1a249870e65dd6e1f6471300f837ccf8f77c3d12 --- /dev/null +++ b/doc/paddle/guides/dygraph_to_static/basic_usage_cn.rst @@ -0,0 +1,142 @@ +基本用法 +============== + +PaddlePaddle主要的动转静方式是基于源代码级别转换的ProgramTranslator。其基本原理是通过分析Python代码来将动态图代码转写为静态图代码,并在底层自动帮用户使用静态图执行器运行。这种转换方式使得用户可以灵活使用Python语法及其控制流来构建神经网络模型。除此之外,PaddlePaddle另外提供一种基于trace的动转静接口TracedLayer。若遇到ProgramTranslator不支持但是可以用TracedLayer运行的情况,可以作为备选方案。 + +基于源代码转写的ProgramTranslator +----------------------------------- + +源代码转写的ProgramTranslator进行动态图转静态图,其基本原理是通过分析Python代码来将动态图代码转写为静态图代码,并在底层自动帮用户使用执行器运行。其基本使用方法十分简便,只需要在要转化的函数(该函数也可以是用户自定义动态图Layer的forward函数)前添加一个装饰器 ``@paddle.jit.to_static`` ,一个转化例子如下,可以直接运行被装饰函数得到结果: + +.. code-block:: python + + import paddle + + @paddle.jit.to_static + def func(input_var) + # if判断与输入input_var的shape有关 + if input_var.shape[0] > 1: + out = paddle.cast(input_var, "float64") + else: + out = paddle.cast(input_var, "int64") + + paddle.disable_static() + in_np = np.array([-2]).astype('int') + input_var = paddle.to_tensor(in_np) + func(input_var) + + +若要存储转化后的静态图模型,可以调用 ``paddle.jit.save`` ,我们定义一个简单全连接网络SimpleFcLayer,需要在下面SimpleFcLayer的forward函数添加装饰器: + +.. code-block:: python + + import numpy as np + import paddle + + class SimpleFcLayer(paddle.nn.Layer): + def __init__(self, feature_size, batch_size, fc_size): + super(SimpleFCLayer, self).__init__() + self._linear = paddle.nn.Linear(feature_size, fc_size) + self._offset = paddle.to_tensor( + np.random.random((batch_size, fc_size)).astype('float32')) + + @paddle.jit.to_static + def forward(self, x): + fc = self._linear(x) + return fc + self._offset + + +存储该模型可以使用 ``paddle.jit.save`` 接口: + +.. code-block:: python + + import paddle + + paddle.disable_static() + + fc_layer = SimpleFcLayer(3, 4, 2) + in_np = np.random.random([3, 4]).astype('float32') + input_var = paddle.to_tensor(in_np) + out = fc_layer(input_var) + + paddle.jit.save(fc_layer, "./fc_layer_dy2stat", input_spec=[input_var]) + + +基于trace的TracedLayer +------------------------ + +trace是指在模型运行时记录下其运行过哪些算子。TracedLayer就是基于这种技术,在一次执行动态图的过程中,记录所有运行的算子,并构建和保存静态图模型。一个使用例子如下: + +我们还是定义一个简单的全连接网络作为例子,注意这里不需要像ProgramTranslator在forward函数添加装饰器: + +.. code-block:: python + + import numpy as np + import paddle + + class SimpleFcLayer(paddle.nn.Layer): + def __init__(self, feature_size, batch_size, fc_size): + super(SimpleFCLayer, self).__init__() + self._linear = paddle.nn.Linear(feature_size, fc_size) + self._offset = paddle.to_tensor( + np.random.random((batch_size, fc_size)).astype('float32')) + + def forward(self, x): + fc = self._linear(x) + return fc + self._offset + + +接下来是TracedLayer如何存储模型: + +.. code-block:: python + import paddle + from paddle.jit import TracedLayer + + paddle.disable_static() + + fc_layer = SimpleFcLayer(3, 4, 2) + in_np = np.random.random([3, 4]).astype('float32') + # 将numpy的ndarray类型的数据转换为Tensor类型 + input_var = paddle.to_tensor(in_np) + # 通过 TracerLayer.trace 接口将命令式模型转换为声明式模型 + out_dygraph, static_layer = TracedLayer.trace(fc_layer, inputs=[input_var]) + save_dirname = './saved_infer_model' + # 将转换后的模型保存 + static_layer.save_inference_model(save_dirname, feed=[0], fetch=[0]) + + +载入的模型可以使用静态图方式运行 + +.. code-block:: python + + place = paddle.CPUPlace() + exe = paddle.Executor(place) + program, feed_vars, fetch_vars = paddle.io.load_inference_model(save_dirname, exe) + fetch, = exe.run(program, feed={feed_vars[0]: in_np}, fetch_list=fetch_vars) + + +但是也正如我们阐述的原理,trace只是记录了一次执行涉及的算子。若在用户的模型代码中,包含了依赖数据条件(包括输入的值或者shape)的控制流分支,即根据数据条件触发运行不同的算子,则TracedLayer无法正常工作。比如下面: + +.. code-block:: python + + import paddle + + def func(input_var) + # if判断与输入input_var的shape有关 + if input_var.shape[0] > 1: + return paddle.cast(input_var, "float64") + else: + return paddle.cast(input_var, "int64") + + paddle.disable_static() + in_np = np.array([-2]).astype('int') + input_var = paddle.to_tensor(in_np) + out = func(input_var) + + +如果对上述样例中的 ``func`` 使用 ``TracedLayer.trace(func, inputs=[input_var])`` ,由于trace只能记录if-else其中跑的一次算子,模型就无法按用户想要的根据input_var的形状进行if-else控制流保存。类似的控制流还有while/for循环的情况。 + +比较ProgramTranslator和TracedLayer +------------------------------------ +基于源代码转换的ProgramTranslator对比基于trace的TracedLayer,前者能够处理依赖数据条件的控制流分支。因此我们更推荐用户使用ProgramTranslator,如果遇到问题再以TracedLayer作为备选方案。 + diff --git a/doc/paddle/guides/dygraph_to_static/basic_usage_en.rst b/doc/paddle/guides/dygraph_to_static/basic_usage_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..f8b30dd74a3475371079d133dc879210466c8a68 --- /dev/null +++ b/doc/paddle/guides/dygraph_to_static/basic_usage_en.rst @@ -0,0 +1,139 @@ +Basic Usage +============= + +The recommended way to transform dygraph to static graph is source-code-translate based ProgramTranslator. The basic idea is analyzing Python source code and turning into static graph code, then run the static graph code using Executor. Users could use Python syntax including control flow to build neural networks. Besides, PaddlePaddle has another tracing-based API for transforming dygraph to static graph which called TracedLayer. You can use it as a back-up API in case ProgramTranslator has problem. + +ProgramTranslator +------------------- + +The basic idea of source-code-translate based ProgramTranslator is analyzing Python source code and turning it into static graph code, then run the static graph code using Executor. The basic usage of ProgramTranslator is simple, put a decorator ``@paddle.jit.to_static`` before the definition of the function to transform (the function can also be a method of a class, e.g., the ``forward`` function of user-defined imperative Layer). An example is: + +.. code-block:: python + + import paddle + + @paddle.jit.to_static + def func(input_var) + # if condition depends on the shape of input_var + if input_var.shape[0] > 1: + out = paddle.cast(input_var, "float64") + else: + out = paddle.cast(input_var, "int64") + + paddle.disable_static() + in_np = np.array([-2]).astype('int') + input_var = paddle.to_tensor(in_np) + func(input_var) + +To save the transformed model, we can call ``paddle.jit.save`` . Let's take a fully connected network called ``SimpleFcLayer`` as an example, we put decorator at the ``forward`` method of ``SimpleFcLayer`` : + +.. code-block:: python + + import numpy as np + import paddle + + class SimpleFcLayer(paddle.nn.Layer): + def __init__(self, feature_size, batch_size, fc_size): + super(SimpleFCLayer, self).__init__() + self._linear = paddle.nn.Linear(feature_size, fc_size) + self._offset = paddle.to_tensor( + np.random.random((batch_size, fc_size)).astype('float32')) + + @paddle.jit.to_static + def forward(self, x): + fc = self._linear(x) + return fc + self._offset + + +Call ``paddle.jit.save`` to save above model: + +.. code-block:: python + + import paddle + + paddle.disable_static() + + fc_layer = SimpleFcLayer(3, 4, 2) + in_np = np.random.random([3, 4]).astype('float32') + input_var = paddle.to_tensor(in_np) + out = fc_layer(input_var) + + paddle.jit.save(fc_layer, "./fc_layer_dy2stat") + + +TracedLayer +------------- + +Tracing means recording the operators when running a model. TracedLayer is based on this technique. It runs dygraph program once and records all operators, then constructs static graph model and saves it. Now take a glance at an usage example: + +Define a simple fully connected network, note that we don't add a decorator before ``forward`` function as we did in ProgramTranslator example: + +.. code-block:: python + + import numpy as np + import paddle + + class SimpleFcLayer(paddle.nn.Layer): + def __init__(self, feature_size, batch_size, fc_size): + super(SimpleFCLayer, self).__init__() + self._linear = paddle.nn.Linear(feature_size, fc_size) + self._offset = paddle.to_tensor( + np.random.random((batch_size, fc_size)).astype('float32')) + + def forward(self, x): + fc = self._linear(x) + return fc + self._offset + +Save model by TracedLayer: + +.. code-block:: python + + import paddle + from paddle.jit import TracedLayer + + paddle.disable_static() + + fc_layer = SimpleFcLayer(3, 4, 2) + in_np = np.random.random([3, 4]).astype('float32') + # Turn numpy ndarray into Tensor + input_var = paddle.to_tensor(in_np) + # Transforming imperative mode into declarative mode by TracerLayer.trace + out_dygraph, static_layer = TracedLayer.trace(fc_layer, inputs=[input_var]) + save_dirname = './saved_infer_model' + # Save the transformed model + static_layer.save_inference_model(save_dirname, feed=[0], fetch=[0]) + +Load model and run it in static graph mode: + +.. code-block:: python + + place = paddle.CPUPlace() + exe = paddle.Executor(place) + program, feed_vars, fetch_vars = paddle.io.load_inference_model(save_dirname, exe) + fetch, = exe.run(program, feed={feed_vars[0]: in_np}, fetch_list=fetch_vars) + +However, as tracing only records operators once, if user's code contains Tensor-dependent (including Tensor value or Tensor shape) control flow, that is the Tensor can cause different operators being executed, then TracedLayer cannot handle this case. For instance: + +.. code-block:: python + + import paddle + + def func(input_var) + # if condition depends on the shape of input_var + if input_var.shape[0] > 1: + return paddle.cast(input_var, "float64") + else: + return paddle.cast(input_var, "int64") + + paddle.disable_static() + in_np = np.array([-2]).astype('int') + input_var = paddle.to_tensor(in_np) + out = func(input_var) + +If we apply TracedLayer.trace(func, inputs=[input_var]) on above example, tracing can take record of operators in only one branch of if-else, then the model can not be saved as what user orignally means. The similar situations applies to while/for loop. + +Comparing ProgramTranslator and TracedLayer +------------------------------------------- + +Compared to tracing-based TracedLayer, source-code-translate based ProgramTranslator can handle the Tensor-dependent control flow. So we recommend users to use ProgramTranslator, use TracedLayer as a back-up plan when ProgramTranslator doesn't work. + diff --git a/doc/paddle/guides/dygraph_to_static/debugging_cn.md b/doc/paddle/guides/dygraph_to_static/debugging_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..40bc3e002eadc30f03383cf4d262931179001322 --- /dev/null +++ b/doc/paddle/guides/dygraph_to_static/debugging_cn.md @@ -0,0 +1,204 @@ +# 调试方法 + +本节内容将介绍动态图转静态图(下文简称:动转静)推荐的几种调试方法。 + +> **注解:** +> +> 请确保转换前的动态图代码能够成功运行,建议使用 [paddle.jit.ProgramTranslator().enable(False)](../../api_cn/dygraph_cn/ProgramTranslator_cn.html#enable)关闭动转静功能,直接运行动态图,如下: + +```python +import paddle +import numpy as np +paddle.disable_static() +# 关闭动转静动能 +paddle.jit.ProgramTranslator().enable(False) + +@paddle.jit.to_static +def func(x): + x = paddle.to_tensor(x) + if x > 3: + x = x - 1 + return x + +func(np.ones([3, 2])) +``` + +## 断点调试 +使用动转静功能时,您可以使用断点调试代码。 +例如,在代码中,调用 `pdb.set_trace()`: +```Python +import pdb + +@paddle.jit.to_static +def func(x): + x = paddle.to_tensor(x) + pdb.set_trace() + if x > 3: + x = x - 1 + return x +``` +执行以下代码,将会在转化后的静态图代码中使用调试器: +```Python +func(np.ones([3, 2])) +``` + +运行结果: +```bash +> /tmp/tmpR809hf.py(6)func() +-> def true_fn_0(x): +(Pdb) n +> /tmp/tmpR809hf.py(6)func() +-> def false_fn_0(x): +... +``` + +如果您想在原始的动态图代码中使用调试器,请先调用 [`paddle.jit.ProgramTranslator().enable(False)`](../../api_cn/dygraph_cn/ProgramTranslator_cn.html#enable),如下: +```python +paddle.jit.ProgramTranslator().enable(False) +func(np.ones([3, 2])) +``` +运行结果: +```bash +> (10)func() +-> if x > 3: +... + +``` + +## 打印转换后的代码 +您可以打印转换后的静态图代码,有2种方法: + +1. 使用被装饰后的函数的 `code` 属性 + 如下代码中,装饰器 `paddle.jit.to_static` 会将函数 `func` 转化为一个类对象 `StaticLayer`,可以使用 StaticLayer 的 `code` 属性来获得转化后的代码。 + ```Python + @paddle.jit.to_static + def func(x): + x = paddle.to_tensor(x) + if x > 3: + x = x - 1 + return x + + print(func.code) + ``` + 运行结果: + + ```bash + + def func(x): + x = fluid.layers.assign(x) + + def true_fn_0(x): + x = x - 1 + return x + + def false_fn_0(x): + return x + x = fluid.dygraph.dygraph_to_static.convert_operators.convert_ifelse(x > + 3, true_fn_0, false_fn_0, (x,), (x,), (x,)) + return x + ``` + +2. 使用 `set_code_level(level)` 或环境变量 `TRANSLATOR_CODE_LEVEL=level` + + 通过调用 `set_code_level` 或设置环境变量 `TRANSLATOR_CODE_LEVEL`,可以在日志中查看转换后的代码: + + ```python + @paddle.jit.to_static + def func(x): + x = paddle.to_tensor(x) + if x > 3: + x = x - 1 + return x + + paddle.jit.set_code_level() # 也可设置 os.environ["TRANSLATOR_CODE_LEVEL"] = '100',效果相同 + func(np.ones([1])) + ``` + 运行结果: + + ```bash + 2020-XX-XX 00:00:00,980-INFO: After the level 100 ast transformer: 'All Transformers', the transformed code: + def func(x): + x = fluid.layers.assign(x) + + def true_fn_0(x): + x = x - 1 + return x + + def false_fn_0(x): + return x + x = fluid.dygraph.dygraph_to_static.convert_operators.convert_ifelse(x > + 3, true_fn_0, false_fn_0, (x,), (x,), (x,)) + return x + ``` + `set_code_level` 函数可以设置查看不同的AST Transformer转化后的代码,详情请见 [set_code_level](../../../paddle/api/paddle/fluid/dygraph/jit/set_code_level_cn.html)。 + +## 使用 `print` +`print` 函数可以用来查看变量,该函数在动转静中会被转化。当仅打印 Paddle Tensor 时,实际运行时会被转换为 Paddle 算子 [Print](../../api_cn/layers_cn/Print_cn.html),否则仍然运行 `print`。 +```python +@paddle.jit.to_static +def func(x): + x = paddle.to_tensor(x) + + # 打印x,x是Paddle Tensor,实际运行时会运行Paddle Print(x) + print(x) + + # 打印注释,非Paddle Tensor,实际运行时仍运行print + print("Here call print function.") + + if len(x) > 3: + x = x - 1 + else: + x = paddle.ones(shape=[1]) + return x + +func(np.ones([1])) +``` + +运行结果: +```bash +Variable: assign_0.tmp_0 + - lod: {} + - place: CPUPlace + - shape: [1] + - layout: NCHW + - dtype: double + - data: [1] +Here call print function. +``` + +## 日志打印 +ProgramTranslator在日志中记录了额外的调试信息,以帮助您了解动转静过程中函数是否被成功转换。 +您可以调用 [`paddle.jit.set_verbosity(level)`]((../../../paddle/api/paddle/fluid/dygraph/jit/set_verbosity_cn.html)) 或设置环境变量 `TRANSLATOR_VERBOSITY=level` 来设置日志详细等级,并查看不同等级的日志信息。目前,`level` 可以取值0-3: +- 0: 无日志 +- 1: 包括了动转静转化流程的信息,如转换前的源码、转换的可调用对象 +- 2: 包括以上信息,还包括更详细函数转化日志 +- 3: 包括以上信息,以及更详细的动转静日志 + +> **注意:** +> +> 日志中包括了源代码等信息,请在共享日志前确保它不包含敏感信息。 + +可以在代码运行前调用 `paddle.jit.set_verbosity` 控制日志详细程度: +```python +paddle.jit.set_verbosity(3) +``` +或者设置环境变量 `TRANSLATOR_VERBOSITY`: +```python +import os +os.environ["TRANSLATOR_VERBOSITY"] = '3' +``` + +运行结果: +```bash +2020-XX-XX 00:00:00,123-Level 1: Source code: +@paddle.jit.to_static +def func(x): + x = paddle.to_tensor(x) + if len(x) > 3: + x = x - 1 + else: + x = paddle.ones(shape=[1]) + return x + +2020-XX-XX 00:00:00,152-Level 1: Convert callable object: convert . +``` diff --git a/doc/paddle/guides/dygraph_to_static/debugging_en.md b/doc/paddle/guides/dygraph_to_static/debugging_en.md new file mode 100644 index 0000000000000000000000000000000000000000..2ca87c976b0a9fc30dc588d1b4a8f814463f42de --- /dev/null +++ b/doc/paddle/guides/dygraph_to_static/debugging_en.md @@ -0,0 +1,202 @@ +# Debugging Methods + +This section will introduce several debugging methods recommended by Dynamic Graph to Static Graph (hereafter called Dynamic-to-Staic). + +> **NOTE:** +> +> Please ensure that the dynamic graph code before transformation can run successfully. It is recommended to call [paddle.jit.ProgramTranslator().enable(False)](../../api/dygraph/ProgramTranslator_en.html#enable) to disable Dynamic-to-Static, and run dynamic graph code as follows: + + +```python +import paddle +import numpy as np +paddle.disable_static() + +# Disable Dynamic-to-Static +paddle.jit.ProgramTranslator().enable(False) + +@paddle.jit.to_static +def func(x): + x = paddle.to_tensor(x) + if x > 3: + x = x - 1 + return x + +func(np.ones([3, 2])) +``` + +## Breakpoint Debugging +When using Dynamic-to-Static, you can use breakpoints to debug. + +For example, call `pdb.set_trace()` in your code: +```Python +import pdb + +@paddle.jit.to_static +def func(x): + x = paddle.to_tensor(x) + pdb.set_trace() + if x > 3: + x = x - 1 + return x +``` +Executing the following code will land the debugger in the transformed static graph code: +```Python +func(np.ones([3, 2])) +``` + +```bash +> /tmp/tmpR809hf.py(6)func() +-> def true_fn_0(x): +(Pdb) n +> /tmp/tmpR809hf.py(6)func() +-> def false_fn_0(x): +... +``` + +Calling [`paddle.jit.ProgramTranslator().enable(False)`](../../api/dygraph/ProgramTranslator_en.html#enable) before executing the code will land the debugger in the original dynamic graph code: +```python +paddle.jit.ProgramTranslator().enable(False) +func(np.ones([3, 2])) +``` + +```bash +> (10)func() +-> if x > 3: +... + +``` + +## Print Transformed Code + +There are two ways to print the transformed static graph code: + +1. Use the attribute `code` of the decorated function + + In the following code, the decorator `paddle.jit.to_static` transforms `func` into a class object `StaticLayer`. You can use the `code` attribute of `StaticLayer` to get the transformed code. + ```Python + @paddle.jit.to_static + def func(x): + x = paddle.to_tensor(x) + if x > 3: + x = x - 1 + return x + + print(func.code) + ``` + ```bash + + def func(x): + x = fluid.layers.assign(x) + + def true_fn_0(x): + x = x - 1 + return x + + def false_fn_0(x): + return x + x = fluid.dygraph.dygraph_to_static.convert_operators.convert_ifelse(x > + 3, true_fn_0, false_fn_0, (x,), (x,), (x,)) + return x + ``` +2. Call `set_code_level(level)` or set environment variable `TRANSLATOR_CODE_LEVEL=level` + + You can view the transformed code in the log by calling `set_code_level` or set environment variable `TRANSLATOR_CODE_LEVEL`. + + ```python + @paddle.jit.to_static + def func(x): + x = paddle.to_tensor(x) + if x > 3: + x = x - 1 + return x + + paddle.jit.set_code_level() # the same effect to set os.environ["TRANSLATOR_CODE_LEVEL"] = '100' + func(np.ones([1])) + ``` + + ```bash + 2020-XX-XX 00:00:00,980-INFO: After the level 100 ast transformer: 'All Transformers', the transformed code: + def func(x): + x = fluid.layers.assign(x) + + def true_fn_0(x): + x = x - 1 + return x + + def false_fn_0(x): + return x + x = fluid.dygraph.dygraph_to_static.convert_operators.convert_ifelse(x > + 3, true_fn_0, false_fn_0, (x,), (x,), (x,)) + return x + ``` + `set_code_level` can set different levels to view the code transformed by different ast transformers. For details, please refer to [set_code_level](../../../paddle/api/paddle/fluid/dygraph/jit/set_code_level_en.html)。 + +## `print` +You can call `print` to view variables. `print` will be transformed when using Dynamic-to-Static. When only Paddle Tensor is printed, `print` will be transformed and call Paddle operator [Print](../../api/layers/Print.html) in runtime. Otherwise, call python `print`. + +```python +@paddle.jit.to_static +def func(x): + x = paddle.to_tensor(x) + # x is a Paddle Tensor, so it will run Paddle Print(x) actually. + print(x) + + # The string is not a Paddle Tensor, so it will run print as-is. + print("Here call print function.") + + if len(x) > 3: + x = x - 1 + else: + x = paddle.ones(shape=[1]) + return x + +func(np.ones([1])) +``` + +```bash +Variable: assign_0.tmp_0 + - lod: {} + - place: CPUPlace + - shape: [1] + - layout: NCHW + - dtype: double + - data: [1] +Here call print function. +``` + +## Log Printing +ProgramTranslator can log additional debugging information to help you know whether the function was successfully transformed or not. + +You can call [`paddle.jit.set_verbosity(level)`](../../../paddle/api/paddle/fluid/dygraph/jit/set_verbosity_en.html) or set environment variable `TRANSLATOR_VERBOSITY=level` to enable logging and view logs of different levels. The argument `level` varies from 0 to 3: +- 0: no logging +- 1: includes the information in Dynamic-to-Static tranformation process, such as the source code not transformed, the callable object to transform and so on +- 2: includes above and more detailed function transformation logs +- 3: includes above and extremely verbose logging + +> **WARNING:** +> +> The logs includes information such as source code. Please make sure logs don't contain any sensitive information before sharing them. + +You can call `paddle.jit.set_verbosity` to control the verbosity level of logs: +```python +paddle.jit.set_verbosity(3) +``` +or use the environment variable `TRANSLATOR_VERBOSITY`: +```python +import os +os.environ["TRANSLATOR_VERBOSITY"] = '3' +``` + +```bash +2020-XX-XX 00:00:00,123-Level 1: Source code: +@paddle.jit.to_static +def func(x): + x = paddle.to_tensor(x) + if len(x) > 3: + x = x - 1 + else: + x = paddle.ones(shape=[1]) + return x + +2020-XX-XX 00:00:00,152-Level 1: Convert callable object: convert . diff --git a/doc/paddle/guides/dygraph_to_static/error_handling_cn.md b/doc/paddle/guides/dygraph_to_static/error_handling_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..bb92cb80aa7a2485e2203177be2b3a4813602d91 --- /dev/null +++ b/doc/paddle/guides/dygraph_to_static/error_handling_cn.md @@ -0,0 +1,160 @@ +# 报错信息处理 + +本节内容将介绍使用动态图转静态图(下文简称:动转静)功能发生异常时,[ProgramTranslator](./program_translator_cn.html)对报错信息做的处理,以帮助您更好地理解动转静报错信息。使用动转静功能运行动态图代码时,内部可以分为2个步骤:动态图代码转换成静态图代码,运行静态图代码。接下来将分别介绍这2个步骤中的异常报错情况。 + +## 动转静过程中的异常 +在动态图代码转换成静态图代码的过程中,如果ProgramTranslator无法转换一个函数时,将会显示警告信息,并尝试直接运行该函数。 +如下代码中,函数 `inner_func` 在调用前被转换成静态图代码,当 `x = inner_func(data)` 调用该函数时,不能重复转换,会给出警告信息: + +```python +import paddle +import numpy as np + +paddle.disable_static() + +@paddle.jit.to_static +def func(): + def inner_func(x): + x_tensor = paddle.to_tensor(x) + return x_tensor + data = np.ones([3]).astype("int32") + x = inner_func(data) + return x +func() +``` + +ProgramTranslator打印的警告信息如下: + +```bash +WARNING: doesn't have to be transformed to static function because it has been transformed before, it will be run as-is. +``` + +## 运行转换后的代码报错 + +如果在动转静后的静态图代码中发生异常,ProgramTranslator 会捕获该异常,增强异常报错信息,将静态图代码报错行映射到转换前的动态图代码,并重新抛出该异常。 +重新抛出的异常具有以下特点: + +- 隐藏了部分对用户无用的动转静过程调用栈; +- 转换前的代码会给出提示:"In User Code:"; +- 报错信息中包含了转换前的原始动态图代码; + +例如,运行以下代码,在静态图构建时,即编译期会抛出异常: + +```python +import paddle +import numpy as np + +paddle.disable_static() + +@paddle.jit.to_static +def func(x): + x = paddle.to_tensor(x) + x = paddle.reshape(x, shape=[-1, -1]) + return x + +func(np.ones([3, 2])) +``` + +运行结果: +```bash +Traceback (most recent call last): + in () + func(np.ones([3, 2])) + File "paddle/fluid/dygraph/dygraph_to_static/program_translator.py", line 332, in __call__ + raise new_exception +AssertionError: In user code: + + File "", line 7, in func + x = fluid.layers.reshape(x, shape=[-1, -1]) + File "paddle/fluid/layers/nn.py", line 6193, in reshape + attrs["shape"] = get_attr_shape(shape) + File "paddle/fluid/layers/nn.py", line 6169, in get_attr_shape + "be -1. But received shape[%d] is also -1." % dim_idx) + AssertionError: Only one dimension value of 'shape' in reshape can be -1. But received shape[1] is also -1. +``` + +上述报错信息可以分为3点: + +1. 报错栈中,涉及代码转换过程的信息栈默认会被隐藏,不进行展示,以减少干扰信息。 + +2. ProgramTranslator处理后的报错信息中,会包含提示"In user code:",表示之后的报错栈中,包含动转静前的动态图代码,即用户写的代码: + ```bash + AssertionError: In user code: + + File "", line 7, in func + x = fluid.layers.reshape(x, shape=[-1, -1]) + File "paddle/fluid/layers/nn.py", line 6193, in reshape + attrs["shape"] = get_attr_shape(shape) + File "paddle/fluid/layers/nn.py", line 6169, in get_attr_shape + "be -1. But received shape[%d] is also -1." % dim_idx) + ``` + 其中,`File "", line 7, in func` 是转换前的代码位置信息,`x = fluid.layers.reshape(x, shape=[-1, -1])` 是转换前的代码。 + +3. 新的异常中,包含原始报错中的的报错信息,如下: + ```bash + AssertionError: Only one dimension value of 'shape' in reshape can be -1. But received shape[1] is also -1. + ``` + +运行以下代码,在静态图运行时,即运行期会抛出异常: + +```Python +@paddle.jit.to_static +def func(x): + x = paddle.to_tensor(x) + two = paddle.fill_constant(shape=[1], value=2, dtype="int32") + x = paddle.reshape(x, shape=[1, two]) + return x + +func(np.ones([3]).astype("int32")) +``` + +运行结果: + +```bash +Traceback (most recent call last): + File "", line 10, in () + func(np.ones([3]).astype("int32")) + File "paddle/fluid/dygraph/dygraph_to_static/program_translator.py", line 332, in __call__ + raise new_exception + +EnforceNotMet: In user code: + + File "", line 7, in func + x = paddle.reshape(x, shape=[1, two]) + File "paddle/tensor/manipulation.py", line 1347, in reshape + return paddle.fluid.layers.reshape(x=x, shape=shape, name=name) + File "paddle/fluid/layers/nn.py", line 6209, in reshape + "XShape": x_shape}) + File "paddle/fluid/layer_helper.py", line 43, in append_op + return self.main_program.current_block().append_op(*args, **kwargs) + File "paddle/fluid/framework.py", line 2880, in append_op + attrs=kwargs.get("attrs", None)) + File "paddle/fluid/framework.py", line 1977, in __init__ + for frame in traceback.extract_stack(): + +-------------------------------------- +C++ Traceback (most recent call last): +-------------------------------------- +0 paddle::imperative::Tracer::TraceOp(std::string const&, paddle::imperative::NameVarBaseMap const&, paddle::imperative::NameVarBaseMap const&, paddle::framework::AttributeMap, paddle::platform::Place const&, bool) +1 paddle::imperative::OpBase::Run(paddle::framework::OperatorBase const&, paddle::imperative::NameVarBaseMap const&, paddle::imperative::NameVarBaseMap const&, paddle::framework::AttributeMap const&, paddle::platform::Place const&) +2 paddle::imperative::PreparedOp::Run(paddle::imperative::NameVarBaseMap const&, paddle::imperative::NameVarBaseMap const&, paddle::framework::AttributeMap const&) +3 std::_Function_handler >::operator()(char const*, char const*, int) const::{lambda(paddle::framework::ExecutionContext const&)#1}>::_M_invoke(std::_Any_data const&, paddle::framework::ExecutionContext const&) +4 paddle::operators::RunProgramOpKernel::Compute(paddle::framework::ExecutionContext const&) const +5 paddle::framework::Executor::RunPartialPreparedContext(paddle::framework::ExecutorPrepareContext*, paddle::framework::Scope*, long, long, bool, bool, bool) +6 paddle::framework::OperatorBase::Run(paddle::framework::Scope const&, paddle::platform::Place const&) +7 paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, paddle::platform::Place const&) const +8 paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, paddle::platform::Place const&, paddle::framework::RuntimeContext*) const +9 paddle::operators::ReshapeKernel::operator()(paddle::framework::ExecutionContext const&) const +10 paddle::operators::ReshapeOp::ValidateShape(std::vector >, paddle::framework::DDim const&) +11 paddle::platform::EnforceNotMet::EnforceNotMet(std::string const&, char const*, int) +12 paddle::platform::GetCurrentTraceBackString() + +---------------------- +Error Message Summary: +---------------------- +InvalidArgumentError: The 'shape' in ReshapeOp is invalid. The input tensor X'size must be equal to the capacity of 'shape'. But received X's shape = [3], X's size = 3, 'shape' is [1, 2], the capacity of 'shape' is 2. + [Hint: Expected capacity == in_size, but received capacity:2 != in_size:3.] (at /paddle/paddle/fluid/operators/reshape_op.cc:206) + [operator < reshape2 > error] [operator < run_program > error] +``` + +上述异常中,除了隐藏部分报错栈、报错定位到转换前的动态图代码外,报错信息中包含了C++报错栈 `C++ Traceback` 和 `Error Message Summary`,这是 Paddle 的 C++ 端异常信息,经处理后在 Python 的异常信息中显示。 diff --git a/doc/paddle/guides/dygraph_to_static/error_handling_en.md b/doc/paddle/guides/dygraph_to_static/error_handling_en.md new file mode 100644 index 0000000000000000000000000000000000000000..22c9eb8b37a93c68c0e20a83844be14a560bd6a7 --- /dev/null +++ b/doc/paddle/guides/dygraph_to_static/error_handling_en.md @@ -0,0 +1,160 @@ +# Error Handling + +This section will introduce the error information when an exception occurs, so as to help you better understand the Dynamic-to-Static error information. +When running the transformed static graph code, the internal procedure can be divided into two steps: the dynamic graph code is transformed into the static graph code, and the static graph code is run. We will introduce the error reporting in these two steps. + +## Exceptions in Dynamic-to-Static Transformation + +If ProgramTranslator cannot transform a function, it will display a warning message and try to run the function as-is. + +In the following code, the function `inner_func` is transformed before calling. When calling `inner_func` in `x = inner_func(data)`, it is not allowed to transform repeatedly, and a warning message will be given: + +```python +import paddle +import numpy as np + +paddle.disable_static() + +@paddle.jit.to_static +def func(): + def inner_func(x): + x_tensor = paddle.to_tensor(x) + return x_tensor + data = np.ones([3]).astype("int32") + x = inner_func(data) + return x +func() +``` + +The warning message is as follows: +```bash +WARNING: doesn't have to be transformed to static function because it has been transformed before, it will be run as-is. +``` +## Exceptions in Running Transformed Code + +When an exception occurs in the transformed code by ProgramTranslator, the exception is caught and the error message is augmented. It maps the error line of the static graph code to the un-transformed dynamic graph code, and then re-raises the exception. + +Among the features of the re-raised exception: + +- Some useless call stacks of Dynamic-to-Static are hidden; +- A prompt will be given before the un-transformed code: "In User Code:"; +- The error message includes references to the original dynamic graph code before transformation; + +For example, if executing the following code, an exception is raised when the static graph is built, that is, at compile time: + +```python +import paddle +import numpy as np + +paddle.disable_static() + +@paddle.jit.to_static +def func(x): + x = paddle.to_tensor(x) + x = paddle.reshape(x, shape=[-1, -1]) + return x + +func(np.ones([3, 2])) +``` + +```bash +Traceback (most recent call last): + in () + func(np.ones([3, 2])) + File "paddle/fluid/dygraph/dygraph_to_static/program_translator.py", line 332, in __call__ + raise new_exception +AssertionError: In user code: + + File "", line 7, in func + x = fluid.layers.reshape(x, shape=[-1, -1]) + File "paddle/fluid/layers/nn.py", line 6193, in reshape + attrs["shape"] = get_attr_shape(shape) + File "paddle/fluid/layers/nn.py", line 6169, in get_attr_shape + "be -1. But received shape[%d] is also -1." % dim_idx) + AssertionError: Only one dimension value of 'shape' in reshape can be -1. But received shape[1] is also -1. +``` + +The above error information can be divided into three points: + +1. In the error stack, the call stacks related to the code transformation process are hidden by default and not displayed, so as to avoid confusion. + +2. In the error message processed by ProgramTranslator, a prompt "In user code:" will be included, which means that the following error stacks contains the original dynamic graph code, that is, the code written by the user: + + ```bash + AssertionError: In user code: + + File "", line 7, in func + x = fluid.layers.reshape(x, shape=[-1, -1]) + File "paddle/fluid/layers/nn.py", line 6193, in reshape + attrs["shape"] = get_attr_shape(shape) + File "paddle/fluid/layers/nn.py", line 6169, in get_attr_shape + "be -1. But received shape[%d] is also -1." % dim_idx) + ``` + `File "", line 7, in func` is the location information of un-transformed code, `x = fluid.layers.reshape(x, shape=[-1, -1])` is the un-transformed code. + +3. The new exception contains the message that the exception originally reported, as follows: + ```bash + AssertionError: Only one dimension value of 'shape' in reshape can be -1. But received shape[1] is also -1. + ``` + +If execute the following code, an exception is raised when the static graph is executed at runtime: + +```Python +@paddle.jit.to_static +def func(x): + x = paddle.to_tensor(x) + two = paddle.fill_constant(shape=[1], value=2, dtype="int32") + x = paddle.reshape(x, shape=[1, two]) + return x + +func(np.ones([3]).astype("int32")) +``` + +```bash +Traceback (most recent call last): + File "", line 10, in () + func(np.ones([3]).astype("int32")) + File "paddle/fluid/dygraph/dygraph_to_static/program_translator.py", line 332, in __call__ + raise new_exception + +EnforceNotMet: In user code: + + File "", line 7, in func + x = paddle.reshape(x, shape=[1, two]) + File "paddle/tensor/manipulation.py", line 1347, in reshape + return paddle.fluid.layers.reshape(x=x, shape=shape, name=name) + File "paddle/fluid/layers/nn.py", line 6209, in reshape + "XShape": x_shape}) + File "paddle/fluid/layer_helper.py", line 43, in append_op + return self.main_program.current_block().append_op(*args, **kwargs) + File "paddle/fluid/framework.py", line 2880, in append_op + attrs=kwargs.get("attrs", None)) + File "paddle/fluid/framework.py", line 1977, in __init__ + for frame in traceback.extract_stack(): + +-------------------------------------- +C++ Traceback (most recent call last): +-------------------------------------- +0 paddle::imperative::Tracer::TraceOp(std::string const&, paddle::imperative::NameVarBaseMap const&, paddle::imperative::NameVarBaseMap const&, paddle::framework::AttributeMap, paddle::platform::Place const&, bool) +1 paddle::imperative::OpBase::Run(paddle::framework::OperatorBase const&, paddle::imperative::NameVarBaseMap const&, paddle::imperative::NameVarBaseMap const&, paddle::framework::AttributeMap const&, paddle::platform::Place const&) +2 paddle::imperative::PreparedOp::Run(paddle::imperative::NameVarBaseMap const&, paddle::imperative::NameVarBaseMap const&, paddle::framework::AttributeMap const&) +3 std::_Function_handler >::operator()(char const*, char const*, int) const::{lambda(paddle::framework::ExecutionContext const&)#1}>::_M_invoke(std::_Any_data const&, paddle::framework::ExecutionContext const&) +4 paddle::operators::RunProgramOpKernel::Compute(paddle::framework::ExecutionContext const&) const +5 paddle::framework::Executor::RunPartialPreparedContext(paddle::framework::ExecutorPrepareContext*, paddle::framework::Scope*, long, long, bool, bool, bool) +6 paddle::framework::OperatorBase::Run(paddle::framework::Scope const&, paddle::platform::Place const&) +7 paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, paddle::platform::Place const&) const +8 paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, paddle::platform::Place const&, paddle::framework::RuntimeContext*) const +9 paddle::operators::ReshapeKernel::operator()(paddle::framework::ExecutionContext const&) const +10 paddle::operators::ReshapeOp::ValidateShape(std::vector >, paddle::framework::DDim const&) +11 paddle::platform::EnforceNotMet::EnforceNotMet(std::string const&, char const*, int) +12 paddle::platform::GetCurrentTraceBackString() + +---------------------- +Error Message Summary: +---------------------- +InvalidArgumentError: The 'shape' in ReshapeOp is invalid. The input tensor X'size must be equal to the capacity of 'shape'. But received X's shape = [3], X's size = 3, 'shape' is [1, 2], the capacity of 'shape' is 2. + [Hint: Expected capacity == in_size, but received capacity:2 != in_size:3.] (at /paddle/paddle/fluid/operators/reshape_op.cc:206) + [operator < reshape2 > error] [operator < run_program > error] +``` + +In the above exception, in addition to hiding part of the error stack and locating the error to the un-transformed dynamic graph code, the error information includes the c++ error stack `C++ Traceback` and `Error Message Summary`, which are the exception from C++ and are displayed in Python exception after processing. diff --git a/doc/paddle/guides/dygraph_to_static/grammar_list_cn.rst b/doc/paddle/guides/dygraph_to_static/grammar_list_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..15232af266a0fc9d76fd9788158f9af5dcc3d17d --- /dev/null +++ b/doc/paddle/guides/dygraph_to_static/grammar_list_cn.rst @@ -0,0 +1,122 @@ +支持语法列表 +============== + +ProgramTranslator本质是把Python运行语法转写为PaddlePaddle静态图代码,但是Python语法的表达能力和PaddlePaddle静态图表达能力存在不同,这使得一些代码无法被转换。 + +本章节我们将详细讲述在动转静过程中支持转化哪些语法,不支持哪些语法,并且讲述如何改写代码能够解决语法不支持的场景。 + +动转静支持的语法分为以下几个大类: + +控制流相关关键词 +------------------ + +控制流指if-elif-else,while等能够控制程序语句执行顺序的关键字。PaddlePaddle静态图通过cond,while_loop API来实现条件判断和循环,如果动态图Python控制流的判断条件或循环条件依赖 PaddlePaddle Tensor,动转静后会被转化为等价的PaddlePaddle控制流接口,否则仍然使用Python控制流逻辑运行。在动转静过程中这些关键字的转化情况为: + +1. if-elif-else 条件 + +当 ``if <条件>`` 中的条件是Tensor时,ProgramTranslator会把该if-elif-else语句转化为等价的cond API语句。否则会按普通Python if-elif-else的逻辑运行。需注意cond支持的Tensor只能是numel为1的bool Tensor,所以请使用这种Tensor进行条件判断,其他Tensor会报错。 + +2. while 循环 + +当while循环中的条件是Tensor时,ProgramTranslator会把该while语句转化为等价的while_loop API语句,否则会按普通Python while运行。需注意while循环条件中的Tensor只能是numel为1的bool Tensor,所以请使用这种Tensor进行条件判断,其他Tensor会报错。 + + +3. for 循环 + +3.1 ``for _ in range(__)`` 循环 + +ProgramTranslator先将其转化为等价的Python while循环,然后按while循环的逻辑进行动静转换。 + +3.2 ``for _ in x`` 循环 + +当x是Python容器或迭代器,则会用普通Python逻辑运行。当x是Tensor时,会转化为循环中每次对应拿出x[0], x[1], ... 。 + +3.3 ``for idx, val in enumerate(x)`` 循环 + +当x是Python容器或迭代器,则会用普通Python逻辑运行。当x是Tensor时,idx会转化为依次0,1,...的1-D Tensor。val会转化为循环中每次对应拿出x[0], x[1], ... 。 + +4. break,continue + +ProgramTranslator 可以支持在循环中添加break,continue语句,其底层实现原理是对于要break,continue的部分在相应时候使用cond在一定条件下跳过执行。 + +5. return + +ProgramTranslator 支持在循环,条件判断中return结果而不需要一定在函数末尾return。也能够支持return不同长度tuple和不同类型的Tensor。其底层实现原理是对return后的部分相应使用cond在一定条件下跳过执行。 + + +一些需要转化的运算类型 +------------------------ + +1. +,-,*,/,**, >, <, >= , <=, == 等Python内置运算 + +由于静态图有重载这些基本运算符,所以这些被ProgramTranslator转化后都适用相应重载的运算符,动转静支持此类运算。 + +2. and,or,not 逻辑运算 + +Python内置and,or,not逻辑运算关键词,ProgramTranslator在语句的运算时会判断逻辑运算关键词运行的对象是否是Tensor,如果都是Tensor,我们将其转化为静态图对应的逻辑运算接口并运行。 + +3. 类型转化 + +动态图中可以直接用Python的类型转化语法来转化Tensor类型。例如x是Tensor时,float(x)可以将x的类型转化为float。ProgramTranslator在运行时判断x是否是Tensor,如果是,则在动转静时使用静态图cast接口转化相应的Tensor类型。 + +Python 函数相关 +--------------------- + +1. print + +如果x是Tensor,在动态图模式中print(x)可以打印x的值。在动转静过程中我们把此转化为静态图的Print接口实现,使得在静态图中也能打印。如果print的参数不是Tensor,那么我们没有把相应print语句进行转写。 + +2. len + +如果x是Tensor,在动态图模式中len(x)可以获得x第0维度的长度。在动转静中我们把此转化为静态图shape接口,并返回shape的第0维。另外如果x是个TensorArray,那么len(x)将会使用静态图接口control_flow.array_length返回TensorArray的长度。对于其他情况,动转静时会按照普通Python len函数运行。 + +3. lambda 表达式 + +动转静允许写带有Python lambda表达式的语句,并且我们会适当改写使得返回对应结果。 + +4. 函数内再调用函数 + +对于函数内调用其他函数的情况,ProgramTranslator也会对内部的函数递归地进行动转静,这样做的好处是可以在最外层函数只需加一次装饰器即可,而不需要每个函数都加装饰器。但需要注意,动转静还不支持函数递归调用自己,详细原因请查看下文动转静无法正确运行的情况。 + +报错异常相关 +-------------- + +1. assert + +如果x是Tensor,在动态图中可以通过assert x来强制x为True或者非0值,在动转静中我们把此转化为静态图Assert接口支持此功能。 + + +Python基本容器 +--------------- + +1. list:对于一个list如果里面元素都是Tensor,那么动转静会转化其为TensorArray,静态图TensorArray可以支持append,pop,修改操作。因此ProgramTranslator在元素皆为Tensor的list中支持上面三种操作。换言之,其他list操作,比如sort无法支持。对于list中并非所有元素是Tensor的情况,ProgramTranslator会将其作为普通Python list运行。 + +2. dict:ProgramTranslator会将相应的dict中的Tensor添加进静态图Program,因此使用dict是动转静支持的语法。 + +动转静无法正确运行的情况 +-------------------------- + +1. Reshape后的变量调用其shape作为PaddlePaddle API参数。 + +具体表现比如 ``x = reshape(x, shape=shape_tensor)`` ,再使用 ``x.shape[0]`` 的值进行其他操作。这种情况会由于动态图和静态图的本质不同而使得动态图能够运行,但静态图运行失败。其原因是动态图情况下,API是直接返回运行结果,因此 ``x.shape`` 在经过reshape运算后是确定的。但是在转化为静态图后,因为静态图API只是组网,``shape_tensor`` 的值在组网时是不知道的,所以 ``reshape`` 接口组网完,静态图并不知道 ``x.shape`` 的值。PaddlePaddle静态图用-1表示未知的shape值,此时 ``x`` 的shape每个维度会被设为-1,而不是期望的值。 + +遇到这类情况我们建议尽量固定shape值,减少reshape操作。 + +2. 多重list嵌套读写Tensor + +具体表现如 ``l = [[tensor1, tensor2], [tensor3, tensor4]]`` ,因为现在动转静将元素全是Tensor的list转化为TensorArray,而PaddlePaddle的TensorArray还不支持多维数组,因此这种情况下,动转静无法正确运行。 + +遇到这类情况我们建议尽量用一维list,或者自己使用PaddlePaddle的create_array,array_read,array_write接口编写为TensorArray。 + +3. Tensor值在被装饰函数中转成numpy array进行运算 + +具体表现为在被装饰函数中没有返回Tensor时就使用 ``numpy.array(tensor)`` 将Tensor转化为numpy array并使用numpy接口进行运算。这种情况在动态图下因为Tensor有值是可以正常运行的,但是在静态图时由于Tensor只是组网变量,在没有运行时没有数值,因此无法进行numpy运算。 + +遇到这种情况我们建议在动转静的函数中尽量使用PaddlePaddle接口替代numpy接口进行运算。 + +4. 一个函数递归调用自己 + +ProgramTranslator还无法支持一个函数递归调用自己,原因是递归常常会用 ``if-else`` 构造停止递归的条件。然而这样的停止条件在静态图下只是一个 ``cond`` 组网,组网并不能在编译阶段决定自己组多少次,会导致函数运行时一直组网递归直至栈溢出,因此ProgramTranslator还无法支持一个函数递归调用自己。 + +遇到这种情况我们建议将代码改为非递归写法。 + diff --git a/doc/paddle/guides/dygraph_to_static/grammar_list_en.rst b/doc/paddle/guides/dygraph_to_static/grammar_list_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..57a7998028e94be550a7d66084c16a74f6ffc3fb --- /dev/null +++ b/doc/paddle/guides/dygraph_to_static/grammar_list_en.rst @@ -0,0 +1,124 @@ +Supported Grammars +==================== + +The key part of ProgramTranslator is transforming Python grammar into PaddlePaddle static graph code, but there exists difference between Python and PaddlePaddle static graph which causes some limitation of the code transformation. + +In this section we will talk about the supported grammars and unsupported grammars, also give some suggestions when the grammar is unsupported. + +There are several kinds of supported grammars: + +Control flow keywords +--------------------- + +Control flow means those keywords that controls the execution order of program statements, for example ``if-elif-else, while`` . Conditional operation and loop were implemented as ``cond, while_loop`` APIs in PaddlePaddle static graph. If the condition of a Python dygraph control flow depends on PaddlePaddle Tensor, the ProgramTranslator will convert the control flow into equivalent PaddlePaddle control flow APIs, else it will still be executed as Python control flow. The transformations of those control flow keywords are listed below: + +1. ``if-elif-else`` statements + +If the condition of ``if `` is Tensor, ProgramTranslator will turn this ``if-elif-else`` statement to equivalent PaddlePaddle static graph ``cond`` statements, otherwise the ``if-elif-else`` statement is executed as normal Python conditional statement. Note that ``cond`` API only accepts input conditional Tensor with numel equals to 1, so please use this kind of Tensor to write dygraph conditional statement, other Tensors will cause error. + +2. ``while`` loop + +If the condition of ``while`` is Tensor, ProgramTranslator will turn this ``while`` statement to equivalent PaddlePaddle static graph ``while_loop`` statements, otherwise the ``while`` statement is executed as normal Python ``while`` loop statement. Note that ``while_loop`` API only accepts input conditional Tensor with numel equals to 1, so please use this kind of Tensor to write dygraph loop condition statement, other Tensors will cause error. + +3. ``for`` loop + +3.1 ``for _ in range(__)`` loop + +Firstly, ProgramTranslator will transform it into equivalent Python while loop, then convert dygraph to static graph by same logic of ``while`` loop. + +3.2 ``for _ in x`` loop + +If ``x`` is a Python container, iterator, or generator, it will be executed as original Python statement. Otherwise ``x`` is a Tensor, ProgramTranslator will transform the loop into PaddlePaddle static graph loop and fetches ``x[0], x[1], ...`` as loop iteration variable in each loop iteration. + +3.3 ``for idx, val in enumerate(x)`` loop + +If ``x`` is a Python container, iterator, or generator, it will be executed as original Python statement. Otherwise ``x`` is a Tensor, Program +Translator will transform the loop into PaddlePaddle static graph loop. The ``idx`` will be transformed to 1-D tensor with value ``0, 1, ...`` and the ``val`` will be transformed to ``x[0], x[1], ...`` in each loop iteration. + +4. ``break, continue`` + +ProgramTranslator supports ``break, continue`` statements in loop. ProgramTranslator will add some PaddlePaddle static graph ``cond`` statements to skip execution of corresponding part when ``break, continue`` condition is meet. + +5. ``return`` + +ProgramTranslator supports ``return`` in a conditonal block or loop body, not necessary to be at the end of a function. It also supports returning tuple with various length of Tensors with different dtype. The implementation is adding some PaddlePaddle static graph ``cond`` statement to skipparts of code when ``return`` is triggered. + + +Some Python basic operators +--------------------------- + +1. ``+, -, *, /, **, >, <, >= , <=, ==`` etc. + +Because PaddlePaddle static graph overrides those Python basic arithmetic operators and comparison operators, ProgramTranslator can support those operators. + +2. ``and, or, not`` logical operators + +Python has ``and, or, not`` keywards as basic logical operators, ProgramTranslator will check whether the variables of the logical operators are Tensors, if they are Tensors, ProgramTranslator replaces the ``and, or, not`` statements into corresponding PaddlePaddle static graph logical operator and run it. + +3. Type casting + +In dygraph mode, users can use Python type casting grammar. For instance, if ``x`` is a Tensor, ``float(x)`` casts the data type of ``x`` to float. ProgramTranslator will check whether ``x`` is a Tensor during run time, if it is, the casting sentence will be modified to PaddlePaddle static graph ``cast`` API so that its dtype can be changed in the dygraph to static transformation. + +Python functions +------------------------------ + +1. ``print`` + +In dygraph mode, ``print(x)`` will print Tensor value if ``x`` is a Tensor. ProgramTranslator converts the built-in ``print`` to PaddlePaddle static graph ``Print`` API during dygraph to static graph transformation if the arguments are Tensors, otherwise ProgramTranslator won't convert the ``print``. + +2. ``len`` + +If ``x`` is a Tensor, ``len(x)`` can get the length at 0-dimension of ``x`` in dygraph mode. ProgramTranslator turns it to PaddlePaddle static graph ``shape`` API and returns the 0-dimension of the ``shape``, else if ``x`` is a TensorArray, then ``len(x)`` will be transformed to static graph API ``control_flow.array_length`` to return the length of TensorArray. In other cases, the ``len`` function will be executed as Python built-in ``len`` + +3. lambda expression + +ProgramTranslator supports Python lambda expression and it modifies code to return the expected result. + + +4. Calling function + +If the transformed function calls another function, ProgramTranslator also transform the called function. The benefit is that users can add one decorator at the outside function to do transformation, no need to add the decorator for each function. Note that ProgramTranslator doesn't support +that a function calls itself recursively, the details is in the unsupported grammars section below. + + +Errors and Exceptions +--------------------- + +1. ``assert`` + +If ``x`` is a Tensor, ``assert x`` statement can assert ``x`` to be ``True`` or non-zero value in dygraph mode. ProgramTranslator converts the statement into PaddlePaddle static graph ``Assert`` API to support this grammar. + + +Python containers +----------------- + +1. ``list``: if all elements in a list are Tensors, then ProgramTranslator converts it to TensorArray. PaddlePaddle static graph TensorArray supports append, pop, and modify, other list operations such as sort cannot be supported. When not all elements in a list are Tensors, ProgramTranslator will treat it as normal Python list. + +2. ``dict``: ProgramTranslator will add the Tensors in a dict into PaddlePaddle static graph ``Program``, so ``dict`` is supported by ProgramTranslator. + +Unsupported grammars +-------------------- + +1. Use the shape of output tensor of ``reshape`` + +For example, ``x = reshape(x, shape=shape_tensor)`` , then use ``x.shape[0]`` to do other operation. Due to the difference between dygraph and static graph, it is okay in dygraph but it will fail in static graph. The reason is that APIs return computation result in dygraph mode, so ``x.shape`` has deterministic value after calling ``reshape`` . However, static graph doesn't have the value ``shape_tensor`` during building network, so PaddlePaddle doesn't know the value of ``x.shape`` after calling ``reshape``. PaddlePaddle static graph will set -1 to represent unknown shape value for each dimension of ``x.shape`` in this case, not the expected value. + +We suggest to set fixed shape value as much as possible, reduce the reshape operation. + +2. List of list of Tensor + +For example: ``l = [[tensor1, tensor2], [tensor3, tensor4]]``, because ProgramTranslator transformed a list whose elements are all Tensors into PaddlePaddle static graph TensorArray, but TensorArray doesn't support multi-dimensions, ProgramTranslator cannot run this case. + +We suggest to use 1-D list at most time, or use PaddlePaddle API ``create_array, array_read, array_write`` to control TensorArray. + +3. Convert Tensor to numpy array and do operation + +For example, user doesn't return Tensor in the decorated function but call ``numpy.array(tensor)`` to convert Tensor to numpy array and then use numpy API to compute on it. In dygraph mode, it is okey because Tensor has value, but Tensor is variable for building network in static graph mode, it doesn't contain value if not in static graph running time, so we cannot do numpy calculation on it. + +We suggest to use PaddlePaddle APIs to replace numpy API in this case. + +4. A function calls itself recursively + +ProgramTranslator doesn't support a function calls itself recursively, the reason is that recursive function usually uses ``if-else`` for a condition to stop the recursion, the stop condition will be transformed to a ``cond`` in static graph mode. Since ``cond`` just builds network, it cannot determine how many times it recursively builds network during network built stage, so the function will recursively call itself and build network until stack overflow. Due to above reason, ProgramTranslator cannot support a function calls itself recursively now. + +We suggest to write non-recursive function in this case. diff --git a/doc/paddle/guides/dygraph_to_static/index_cn.rst b/doc/paddle/guides/dygraph_to_static/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2e1e51e2308171a012f2c60cbc67e20c06e525ec --- /dev/null +++ b/doc/paddle/guides/dygraph_to_static/index_cn.rst @@ -0,0 +1,39 @@ +############### +动态图转静态图 +############### + +动态图有诸多优点,包括易用的接口,Python风格的编程体验,友好的debug交互机制等。在动态图模式下,代码是按照我们编写的顺序依次执行。这种机制更符合Python程序员的习 +惯,可以很方便地将大脑中的想法快速地转化为实际代码,也更容易调试。但在性能方面, +Python执行开销较大,与C++有一定差距。因此在工业界的许多部署场景中(如大型推荐系统、移动端)都倾向于直接使用C++来提速。 + +相比动态图,静态图在部署方面更具有性能的优势。静态图程序在编译执行时,先搭建模型 +的神经网络结构,然后再对神经网络执行计算操作。预先搭建好的神经网络可以脱离Python依赖,在C++端被重新解析执行,而且拥有整体网络结构也能进行一些网络结构的优化。 + +动态图代码更易编写和debug,但在部署性能上,静态图更具优势。因此我们新增了动态图转静态图的功能,支持用户依然使用动态图编写组网代码。PaddlePaddle会对用户代码进行 +分析,自动转换为静态图网络结构,兼顾了动态图易用性和静态图部署性能两方面优势。 + +我们在以下链接介绍PaddlePaddle动态图转静态图的各个部分: + +- `基本用法 `_ : 介绍了动态图转静态图的基本使用方法 + +- `内部架构原理 `_ :介绍了动态图转静态图的架构原理 + +- `支持语法列表 `_ :介绍了动态图转静态图支持的语法以及罗列不支持的语法写法 + +- `InputSpec功能介绍 `_ :介绍了动态图转静态图指定输入InputSpec的功能和用法 + +- `报错信息处理 `_ :介绍了动态图转静态图的报错信息处理方法 + +- `调试方法 `_ :介绍了动态图转静态图支持的调试方法 + + +.. toctree:: + :hidden: + + basic_usage_cn.rst + program_translator_cn.rst + grammar_list_cn.rst + input_spec_cn.rst + error_handling_cn.md + debugging_cn.md + diff --git a/doc/paddle/guides/dygraph_to_static/index_en.rst b/doc/paddle/guides/dygraph_to_static/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..5471b2e12985f7795bbc475e07ee775bc8a74657 --- /dev/null +++ b/doc/paddle/guides/dygraph_to_static/index_en.rst @@ -0,0 +1,35 @@ +####################### +Dygraph to Static Graph +####################### + +The imperative-style coding of PaddlePaddle takes advantage of flexibility, Pythonic coding, and easy-to-debug interface. In dygraph mode, code immediately executes kernels and gets numerical results, which allows users to enjoy traditional Pythonic code order. Therefore it is efficient to transform idea into real code and simple to debug. However, Python code is usually slower than C++ thus lots of industrial systems (such as large recommend system, mobile devices) prefer to deploy with C++ implementation. + +Static graph is better at speed and portability. Static graph builds the network structure during compiling time and then does computation. The built network intermediate representation can be executed in C++ and gets rids of Python dependency. + +While dygraph has usability and debug benefits and static graph yields performance and deployment advantage, we adds functionality to convert dygraph to static graph. Users use imperative mode to write dygraph code and PaddlePaddle will analyze the Python syntax and turn it into network structure of static graph mode. Our approach retains both the usability of dygraph and portability of static graph. + +We introduce the transformation of dygraph to static graph in the following links: + +- `Basic Usage `_ : Introduce the basic usage for transforming dygraph code into static code. + +- `Architecture `_ : Introduce the architecture of ProgramTranslator. + +- `Supported Grammars `_ : Introduce the grammars supported by ProgramTranslator and list unsupported grammars. + +- `Introduction of InputSpec `_ : Introduce the usage of InputSpec to specify the input signature from dygraph to static program. + +- `Error Handling `_ : Introduce the error handling by ProgramTranslator. + +- `Debugging Methods `_ : Introduce the debugging methods when using ProgramTranslator. + + +.. toctree:: + :hidden: + + basic_usage_en.rst + program_translator_en.rst + grammar_list_en.rst + input_spec_en.rst + error_handling_en.md + debugging_en.md + diff --git a/doc/paddle/guides/dygraph_to_static/input_spec_cn.rst b/doc/paddle/guides/dygraph_to_static/input_spec_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..26db16711283e109f8925993af2c521be13fc737 --- /dev/null +++ b/doc/paddle/guides/dygraph_to_static/input_spec_cn.rst @@ -0,0 +1,200 @@ +.. _user_guide_dy2sta_input_spec_cn: + +InputSpec功能介绍 +================= + + +在PaddlePaddle(下文简称:Paddle)框架中,可以通过 ``paddle.jit.to_static`` 装饰普通函数或 Layer 的最外层 forward 函数,将动态图模型转换为静态图执行。但在动转静时,需要给模型传入 Tensor 数据并执行一次前向,以保证正确地推导出网络中各 Tensor 的 shape 。此转换流程需要显式地执行一次动态图函数,增加了接口使用的成本;同时,传入实际 Tensor 数据则无法定制化模型输入的shape,如指定某些维度为 None 。 + +因此,Paddle 提供了 InputSpec 接口,可以更加便捷地执行动转静功能,以及定制化输入 Tensor 的 shape 、name 等信息。 + + +一、InputSpec 对象构造方法 +------------------------- + +1.1 直接构造 InputSpec 对象 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +InputSpec 接口在 ``paddle.static`` 目录下,用于描述一个 Tensor 的签名信息:shape、dtype、name。使用样例如下: + +.. code-block:: python + + from paddle.static import InputSpec + + x = InputSpec([None, 784], 'float32', 'x') + label = InputSpec([None, 1], 'int64', 'label') + + print(x) # InputSpec(shape=(-1, 784), dtype=VarType.FP32, name=x) + print(label) # InputSpec(shape=(-1, 1), dtype=VarType.INT64, name=label) + + +InputSpec 初始化中的只有 ``shape`` 是必须参数, ``dtype`` 和 ``name`` 可以缺省,默认取值分别为 ``float32`` 和 ``None`` 。 + + + +1.2 根据 Tensor 构造 InputSpec 对象 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +可以借助 ``InputSpec.from_tensor`` 方法,从一个 Tensor 直接创建 InputSpec 对象,其拥有与源 Tensor 相同的 ``shape`` 和 ``dtype`` 。使用样例如下: + +.. code-block:: python + + import numpy as np + import paddle + from paddle.static import InputSpec + + paddle.disable_static() + + x = paddle.to_tensor(np.ones([2, 2], np.float32)) + x_spec = InputSpec.from_tensor(x, name='x') + print(x_spec) # InputSpec(shape=(2, 2), dtype=VarType.FP32, name=x) + + +.. note:: + 若未在 ``from_tensor`` 中指定新的name,则默认使用与源Tensor相同的name。 + + +1.3 根据 numpy.ndarray 构造 InputSpec 对象 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +也可以借助 ``InputSpec.from_numpy`` 方法,从一个 Numpy.ndarray 直接创建 InputSpec 对象,其拥有与源 ndarray 相同的 ``shape`` 和 ``dtype`` 。使用样例如下: + +.. code-block:: python + + import numpy as np + from paddle.static import InputSpec + + x = np.ones([2, 2], np.float32) + x_spec = InputSpec.from_numpy(x, name='x') + print(x_spec) # InputSpec(shape=(2, 2), dtype=VarType.FP32, name=x) + + +.. note:: + 若未在 ``from_numpy`` 中指定新的 name,则默认使用 None 。 + + +二、基本使用方法 +------------------ + +动转静 ``paddle.jit.to_static`` 装饰器支持 ``input_spec`` 参数,用于指定被装饰函数每个 Tensor 类型输入参数的 ``shape`` 、 ``dtype`` 、 ``name`` 等签名信息。不必再显式地传入 Tensor 数据以触发网络层 shape 的推导。 Paddle 会解析 ``to_static`` 中指定的 ``input_spec`` 参数,构建网络的起始输入,进行后续的模型组网。 + +同时,借助 ``input_spec`` 参数,可以自定义输入 Tensor 的 shape ,比如指定 shape 为 ``[None, 784]`` ,其中 ``None`` 表示变长的维度。 + +2.1 to_static 装饰器模式 +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +如下是一个简单的使用样例: + +.. code-block:: python + + import paddle + from paddle.jit import to_static + from paddle.static import InputSpec + from paddle.fluid.dygraph import Layer + + class SimpleNet(Layer): + def __init__(self): + super(SimpleNet, self).__init__() + self.linear = paddle.nn.Linear(10, 3) + + @to_static(input_spec=[InputSpec(shape=[None, 10], name='x'), InputSpec(shape=[3], name='y')]) + def forward(self, x, y): + out = self.linear(x) + out = out + y + return out + + + paddle.disable_static() + + net = SimpleNet() + + # save static model for inference directly + paddle.jit.save(net, './simple_net') + + +在上述的样例中, ``to_static`` 装饰器中的 ``input_spec`` 为一个 InputSpec 对象组成的列表,用于依次指定参数 x 和 y 对应的 Tensor 签名信息。在实例化 SimpleNet 后,可以直接调用 ``paddle.jit.save`` 保存静态图模型,不需要执行任何其他的代码。 + +.. note:: + 1. input_spec 参数中只支持 InputSpec 对象,暂不支持如 int 、 float 等类型。 + 2. 若指定 input_spec 参数,则需为被装饰函数的所有必选参数都添加对应的 InputSpec 对象,如上述样例中,不支持仅指定 x 的签名信息。 + 3. 若被装饰函数中包括非 Tensor 参数,且指定了 input_spec ,请确保函数的非 Tensor 参数都有默认值,如 ``forward(self, x, use_bn=False)`` + + +2.2 to_static函数调用 +^^^^^^^^^^^^^^^^^^^^ + +若期望在动态图下训练模型,在训练完成后保存预测模型,并指定预测时需要的签名信息,则可以选择在保存模型时,直接调用 ``to_static`` 函数。使用样例如下: + +.. code-block:: python + + class SimpleNet(Layer): + def __init__(self): + super(SimpleNet, self).__init__() + self.linear = paddle.nn.Linear(10, 3) + + def forward(self, x, y): + out = self.linear(x) + out = out + y + return out + + paddle.disable_static() + net = SimpleNet() + + # train process (Pseudo code) + for epoch_id in range(10): + train_step(net, train_reader) + + net = to_static(net, input_spec=[InputSpec(shape=[None, 10], name='x'), InputSpec(shape=[3], name='y')]) + + # save static model for inference directly + paddle.jit.save(net, './simple_net') + + +如上述样例代码中,在完成训练后,可以借助 ``to_static(net, input_spec=...)`` 形式对模型实例进行处理。Paddle 会根据 input_spec 信息对 forward 函数进行递归的动转静,得到完整的静态图,且包括当前训练好的参数数据。 + + +2.3 支持 list 和 dict 推导 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +上述两个样例中,被装饰的 forward 函数的参数均为 Tensor 。这种情况下,参数个数必须与 InputSpec 个数相同。但当被装饰的函数参数为list或dict类型时,``input_spec`` 需要与函数参数保持相同的嵌套结构。 + +当函数的参数为 list 类型时,input_spec 列表中对应元素的位置,也必须是包含相同元素的 InputSpec 列表。使用样例如下: + +.. code-block:: python + + class SimpleNet(Layer): + def __init__(self): + super(SimpleNet, self).__init__() + self.linear = paddle.nn.Linear(10, 3) + + @to_static(input_spec=[[InputSpec(shape=[None, 10], name='x'), InputSpec(shape=[3], name='y')]]) + def forward(self, inputs): + x, y = inputs[0], inputs[1] + out = self.linear(x) + out = out + y + return out + + +其中 ``input_spec`` 参数是长度为 1 的 list ,对应 forward 函数的 inputs 参数。 ``input_spec[0]`` 包含了两个 InputSpec 对象,对应于参数 inputs 的两个 Tensor 签名信息。 + +当函数的参数为dict时, ``input_spec`` 列表中对应元素的位置,也必须是包含相同键(key)的 InputSpec 列表。使用样例如下: + +.. code-block:: python + + class SimpleNet(Layer): + def __init__(self): + super(SimpleNet, self).__init__() + self.linear = paddle.nn.Linear(10, 3) + + @to_static(input_spec=[InputSpec(shape=[None, 10], name='x'), {'x': InputSpec(shape=[3], name='bias')}]) + def forward(self, x, bias_info): + x_bias = bias_info['x'] + out = self.linear(x) + out = out + x_bias + return out + + +其中 ``input_spec`` 参数是长度为 2 的 list ,对应 forward 函数的 x 和 bias_info 两个参数。 ``input_spec`` 的最后一个元素是包含键名为 x 的 InputSpec 对象的 dict ,对应参数 bias_info 的 Tensor 签名信息。 + + +更多关于动转静 ``to_static`` 搭配 ``paddle.jit.save/load`` 的使用方式,可以参考 :ref:`user_guide_model_save_load` 。 \ No newline at end of file diff --git a/doc/paddle/guides/dygraph_to_static/input_spec_en.rst b/doc/paddle/guides/dygraph_to_static/input_spec_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..c7c62ffc668f24dc9b25aa9a371152d26125250e --- /dev/null +++ b/doc/paddle/guides/dygraph_to_static/input_spec_en.rst @@ -0,0 +1,196 @@ +.. _user_guide_dy2sta_input_spec_cn: + +Introduction of InputSpec +=========================== + + +In PaddlePaddle(Referred to as "Paddle"), The dygraph model can be converted to static program by decorating the outermost forward function of Layer with ``paddle.jit.to_static`` . But actual Tensor data should be feeded into the model to ensure that the shape of each Tensor in the network is correctly deduced in transformation. This transformation process needs to explicitly execute the forward function, which increases the cost of the interface. Meanwhile, the way that need feed Tensor data fails to customize the shape of inputs, such as assigning some dimensions to None. + +Therefore, Paddle provides the InputSpec interface to perform the transformation more easily, and supports to customize the signature of input Tensor, such as shape, name and so on. + + +1. InputSpec interface +------------------------- + +1.1 Construct InputSpec object +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The InputSpec interface is under the ``paddle.static`` directory. It's used to describe the Tensor's signature information: shape, dtype, name. See example as follows: + +.. code-block:: python + + from paddle.static import InputSpec + + x = InputSpec([None, 784], 'float32', 'x') + label = InputSpec([None, 1], 'int64', 'label') + + print(x) # InputSpec(shape=(-1, 784), dtype=VarType.FP32, name=x) + print(label) # InputSpec(shape=(-1, 1), dtype=VarType.INT64, name=label) + + +In InputSpec initialization, only ``shape`` is a required parameter. ``dtype`` and ``name`` can be default with values ``Float32`` and ``None`` respectively. + + + +1.2 Constructed from Tensor +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +An InputSpec object can be created directly from a Tensor by using ``inputSpec.from_tensor`` method. It has same ``shape`` and ``dtype`` as the source Tensor. See example as follows: + +.. code-block:: python + + import numpy as np + import paddle + from paddle.static import InputSpec + + paddle.disable_static() + + x = paddle.to_tensor(np.ones([2, 2], np.float32)) + x_spec = InputSpec.from_tensor(x, name='x') + print(x_spec) # InputSpec(shape=(2, 2), dtype=VarType.FP32, name=x) + + +.. note:: + If a new name is not specified in ``from_tensor`` , the name from source Tensor is used by default. + + +1.3 Constructed from numpy.ndarray +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +An InputSpec object can also be created directly from an Numpy.ndarray by using the ``inputSpec.from_numpy`` method. It has same ``shape`` and ``dtype`` as the source ndarray. See example as follows: + +.. code-block:: python + + import numpy as np + from paddle.static import InputSpec + + x = np.ones([2, 2], np.float32) + x_spec = InputSpec.from_numpy(x, name='x') + print(x_spec) # InputSpec(shape=(2, 2), dtype=VarType.FP32, name=x) + + +.. note:: + If a new name is not specified in ``from_numpy`` , ``None`` is used by default. + + +2. Basic usage +------------------ + +Currently, the decorator ``paddle.jit.to_static`` support ``input_spec`` argument. It is used to specify signature information such as ``shape`` , ``dtype`` , ``name`` for each Tensor corresponding to argument from decorated function. Users do not have to feed actual data explicitly to trigger the deduction of the shape in the network. The ``input_spec`` argument specified in ``to_static`` will be analyzed to construct input placeholder of the network. + +At the same time, the ``input_spec`` allow us to easily define input Tensor shape. For example, specifying shape as ``[None, 784]`` , where ``None`` represents a variable length dimension. + +2.1 Decorator mode of to_static +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +A simple example as follows: + +.. code-block:: python + + import paddle + from paddle.jit import to_static + from paddle.static import InputSpec + from paddle.fluid.dygraph import Layer + + class SimpleNet(Layer): + def __init__(self): + super(SimpleNet, self).__init__() + self.linear = paddle.nn.Linear(10, 3) + + @to_static(input_spec=[InputSpec(shape=[None, 10], name='x'), InputSpec(shape=[3], name='y')]) + def forward(self, x, y): + out = self.linear(x) + out = out + y + return out + + + paddle.disable_static() + + net = SimpleNet() + + # save static model for inference directly + paddle.jit.save(net, './simple_net') + + +In the above example, ``input_spec`` in ``to_static`` decorator is a list of InputSpec objects. It is used to specify signature information corresponding x and y. After instantiating SimpleNet, ``paddle.jit.save`` can be directly called to save the static graph model without executing any other code. + +.. note:: + 1. Only InputSpec objects are supported in input_spec argument, and types such as int, float, etc. are not supported temporarily. + 2. If you specify the input_spec argument, you need to add the corresponding InputSpec object for all non-default parameters of the decorated function. As above sample, only specifying signature information x is not supported. + 3. If the decorated function includes non-tensor parameters and input_spec is specified, make sure that the non-tensor parameters of the function have default values, such as ``forward(self, x, use_bn=False)`` . + + +2.2 Call to_static directly +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If we want to train model in dygraph mode and only expect to save the inference model after training with specified the signature information. We can call ``to_static`` function directly while saving the model. See example as follows: + +.. code-block:: python + + class SimpleNet(Layer): + def __init__(self): + super(SimpleNet, self).__init__() + self.linear = paddle.nn.Linear(10, 3) + + def forward(self, x, y): + out = self.linear(x) + out = out + y + return out + + paddle.disable_static() + net = SimpleNet() + + # train process (Pseudo code) + for epoch_id in range(10): + train_step(net, train_reader) + + net = to_static(net, input_spec=[InputSpec(shape=[None, 10], name='x'), InputSpec(shape=[3], name='y')]) + + # save static model for inference directly + paddle.jit.save(net, './simple_net') + +In the above example, ``to_static(net, input_spec=...)`` can be used to process the model after training. Paddle will recursively convert forward function to get the complete static program according to ``input_spec`` information. Meanwhile, it includes the trained parameters. + + +2.3 Support list and dict derivation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In the above two examples, the arguments of the decorated forward function correspond to the InputSpec one to one. But when the decorated function takes arguments with a list or dict type, ``input_spec`` needs to have the same nested structure as the arguments. + +If a function takes an argument of type list, the element in the ``input_spec`` must also be an InputSpec list containing the same elements. A simple example as follows: + +.. code-block:: python + + class SimpleNet(Layer): + def __init__(self): + super(SimpleNet, self).__init__() + self.linear = paddle.nn.Linear(10, 3) + + @to_static(input_spec=[[InputSpec(shape=[None, 10], name='x'), InputSpec(shape=[3], name='y')]]) + def forward(self, inputs): + x, y = inputs[0], inputs[1] + out = self.linear(x) + out = out + y + return out + + +The length of ``input_spec`` is 1 corresponding to argument inputs in forward function. ``input_spec[0]`` contains two InputSpec objects corresponding to two Tensor signature information of inputs. + +If a function takes an argument of type dict, the element in the ``input_spec`` must also be an InputSpec dict containing the same keys. A simple example as follows: + +.. code-block:: python + + class SimpleNet(Layer): + def __init__(self): + super(SimpleNet, self).__init__() + self.linear = paddle.nn.Linear(10, 3) + + @to_static(input_spec=[InputSpec(shape=[None, 10], name='x'), {'x': InputSpec(shape=[3], name='bias')}]) + def forward(self, x, bias_info): + x_bias = bias_info['x'] + out = self.linear(x) + out = out + x_bias + return out + + +The length of ``input_spec`` is 2 corresponding to arguments x and bias_info in forward function. The last element of ``input_spec`` is a InputSpec dict with same key corresponding to signature information of bias_info. diff --git a/doc/paddle/guides/dygraph_to_static/program_translator_cn.rst b/doc/paddle/guides/dygraph_to_static/program_translator_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8a7f7ca27b5adc5548c145d6dd4f86e19201ef15 --- /dev/null +++ b/doc/paddle/guides/dygraph_to_static/program_translator_cn.rst @@ -0,0 +1,42 @@ +内部架构原理 +============== + +TracedLayer的原理就是trace,相对简单,因此我们在这里不展开描述。本节将主要阐述ProgramTranslator基于源代码将动态图代码转化为静态图代码。 + + +转化过程发生在用户开始调用被装饰的函数,转换过程在装饰器中实现。我们将内部涉及的过程分为以下几步: + +函数与缓存 +------------ + +动态图转静态图的主体是函数(Function)。对于函数内包含的PaddlePaddle接口,如果是仅计算相关算子代码语句,那么因为PaddlePaddle动态图和静态图接口一致,我们不需要额外转换这些代码为静态图代码。但是对于动态图,此类代码接口是直接运行计算和返回结果,而对于静态图此类代码接口其实是组网。那么如果被转化的函数被调用多次,动态图转静态图后会多次组网添加对应算子,这显然会导致问题。为了解决这个问题以及为了加速动转静转化过程,我们维护了被装饰器装饰的函数(Function)与其输入形状(shape),数据类型(dtype)映射到被转化后组网的Program的缓存(Cache)。当要被转化的函数命中缓存,我们直接用对应存储的Program运行静态图得到结果,否则我们才进行语句转化,并且转化成功后的Program存储进缓存。 + +动态图源码转AST(抽象语法树) +------------------------------ + +动态图转静态图的最核心部分类似一个编译器,解析动态图代码语句为AST,再对应AST进行改写,最后反转回成静态图代码。从函数转化为代码字符串可以使用Python的inspect.getsource。从字符串Python提供了自带的 `ast `_ 库来解析字符串为AST,但是由于Python2,Python3的语法略有不同,为了避免我们需要额外处理这些Python2,Python3的不同情况,我们使用了统一Python2,Python3的开源AST处理 `gast库 `_ 。这些接口使得函数转化为AST没有本质上的困难。 + +AST改写和静态图源码转换 +------------------------- + +这部分为动转静最核心的部分,我们对支持的各种语法进行ast转写。其中最重要的Python控制流,if-else,while,for循环被分别分析转化为PaddlePaddle静态图接口cond,while_loop等接口实现。我们对想转化的每一种主要语法创建一个Transformer(这里的Transformer是Python ast转写的概念,而不是自然语言处理NLP领域的Transformer),每个Transformer扫一遍AST并进行对应的改写。最后被转化完成的AST我们使用gast提供的接口转回成源码。 + +静态图源码作为动态图一部分运行的技术 +-------------------------------------- + +为了动静转化更加易用和被转化的代码能在动态图中复用,我们在拥有源码后运行生成Program,并将这个Program作为一个大op,包装成动态图的一个op,这样既能把用户的代码转为静态图提速或者保存部署,另一方面如果用户想在Python层使用生成的静态图代码作为动态图的一部分继续训练或者别的动态图运算也是可以直接使用。 + +易用性与Debug功能在动转静过程的实现 +------------------------------------- + +正如AST转写类似编译器,而一般编译器都会提供debug断点,报错,输出一些中间代码等功能。我们在进行动转静时,万一用户的动态图代码出错,或者用户想断点调试,或者用户想看看被转化后的静态图代码是否符合其预期,我们也希望能够像编译器一样提供这些易用性功能,使得动转静兼顾性能和部署同时还具有易用性。我们这里将列出这些功能的实现方式 + +A. 报错对应到动态图代码行。由于被转化后的静态图代码和原动态图代码不同,Python运行出错时会报静态图的错误,因此我们在每一次AST转写时添加AST节点对应的原动态图代码行等信息,在Python报错栈中将静态图的报错转化成对应的动态图源码报错 + +B. 设置断点功能。我们保留了被转化后代码的中的pdb.set_trace(), 用户可以使用这种方式进行断点调试 + +C. 查看最后转化的静态图代码。我们输出为一个StaticLayer class,这个StaticLayer可以直接被调用,但是也存储转化后的代码,可以调用StaticLayer.code来获得转化后的代码。 + +D. 输出中间转化状态代码,甚至不同语法Transformer转化的代码,比如经过for循环转化后代码是什么样的。我们开放接口设定了log level来让用户可以打印中间状态转化的代码。 + + diff --git a/doc/paddle/guides/dygraph_to_static/program_translator_en.rst b/doc/paddle/guides/dygraph_to_static/program_translator_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..e130bdac0c81fc4bae325c8bc4c8319cc11bb634 --- /dev/null +++ b/doc/paddle/guides/dygraph_to_static/program_translator_en.rst @@ -0,0 +1,41 @@ +Architecture +============== + +The basic idea of TracedLayer is tracing, it is relatively simple so we won't expend here. This section will talk about the source code transformation of ProgramTranslator. + +The transformation is implemented in the decorator so transformation happens when user calls the decorated function, the procedure includes these steps: + +Function and cache +-------------------- + +The entity for transforming dygraph to static graph is the decorated function. For the PaddlePaddle APIs in the function, since they are same code under dygraph mode and static mode, we don't have to transform those code. However, those APIs are computation in dygraph model while they are building network in static graph mode, if the transformed functions are called multiple times, those APIs will build network multiple times in static graph, which can cause problem. To solve it as well as speed up the transformation, we maintain a cache that maps from function, input shapes, input data types to the Program built by the transformed function. If the function hits cache, we run the stored Program in static graph mode to get result, else we do the code transformation on the function and store the transformed Program into the cache. + +From dygraph source code to AST (Abstract Syntax Tree) +-------------------------------------------------------- + +The core of transforming dygraph to static graph is similar to a compiler, we parse the dygraph code into AST, change AST, then turn it back into static graph code. We use Python ``inspect.getsource`` to get the source code string of the function. Python provides `ast `_ library to parse string code into AST, but Python2, Python3 have slight grammar difference. To avoid the work to handle different grammars, we used an open source AST library `gast `_ that provides compatibility AST among various Python versions. There is no essential difficulty to turn function into AST with these library. + +Transform AST and turn it to static graph code +------------------------------------------------ + +This part is the key part in ProgramTranslator, we modify AST for supported grammars. Those important Python control flows, such as ``if-elif-else, while, for`` loop are converted to PaddlePaddle static graph API ``cond, while_loop`` and so on. We created a Transformer (AST-to-AST Transformer in Python, not the Transformer in Natural Language Process) to transform each grammar. Every Transformer scans AST and modify it. Lastly, we turn AST back to source code string by ``gast`` library. + +Running static graph code as part of dygraph +---------------------------------------------- + +In order to increase usability and re-use the transformed static graph code in dygraph, we wrap the generated Program as an dygraph op, the op can run the forward and backward computation of transformed Program. Then we can not only speed up dygraph code or save it for deployment, but also enable user to run part of their dygraph code in static graph mode so that they can continue training or other dygraph computation in their dygraph code. + +Error handling and Debug +-------------------------- + +Compiler usually supports debug functionality like breakpoint, throwing exception, print some mid-level codes. ProgramTranslator is similar to a compiler, users may would like to set breakpoints for debugging, or see whether the transformed static graph code is expected. So we also implemented those error handling and debug functionality. Here we list those functions and their implementation. + +A. Report errors/exceptions on dygraph code line. Because the transformed static graph code is different to original dygraph code, when Python executes the static graph code, the exceptions will be reported at static graph code. To locate the corresponding dygraph code, we attach some informations such as line number on AST nodes when we transform AST, then we can re-write the static graph exception to the corresponding dygraph code exception. + +B. We support ``pdb.set_trace()`` when running ProgramTranslator, user can add this line to set breakpoints. + +C. Check the transformed static graph code. Our transformed output is a Python class named ``StaticLayer``, this class can be called, but it also stores the transformed code string. Users could call ``StaticLayer.code`` to get the converted code. + +D. Print mid-level transformed code, such as what's the code after transforming ``for`` loop. We provide APIs to set log level to let user check the mid-level code. + + diff --git a/doc/paddle/guides/images/Axis_2.0.png b/doc/paddle/guides/images/Axis_2.0.png new file mode 100644 index 0000000000000000000000000000000000000000..32e4113315cfaadb7ef5ea1b33bc5b4750310611 Binary files /dev/null and b/doc/paddle/guides/images/Axis_2.0.png differ diff --git a/doc/paddle/guides/images/ComplexTensor_2.0.png b/doc/paddle/guides/images/ComplexTensor_2.0.png new file mode 100644 index 0000000000000000000000000000000000000000..bf09984a70d96d035670180e036116a83c9b195e Binary files /dev/null and b/doc/paddle/guides/images/ComplexTensor_2.0.png differ diff --git a/doc/paddle/guides/images/Tensor_2.0.png b/doc/paddle/guides/images/Tensor_2.0.png new file mode 100644 index 0000000000000000000000000000000000000000..6ac481de976dbba3dae7b7defcc4827cb9dc18c1 Binary files /dev/null and b/doc/paddle/guides/images/Tensor_2.0.png differ diff --git a/doc/paddle/guides/images/load_2.0.png b/doc/paddle/guides/images/load_2.0.png new file mode 100644 index 0000000000000000000000000000000000000000..dcbc89f0f5cbe43c4a443eb803ceffa39b610481 Binary files /dev/null and b/doc/paddle/guides/images/load_2.0.png differ diff --git a/doc/paddle/guides/images/save_2.0.png b/doc/paddle/guides/images/save_2.0.png new file mode 100644 index 0000000000000000000000000000000000000000..5a2fe671437d837ed96490e411896ed42d1d0203 Binary files /dev/null and b/doc/paddle/guides/images/save_2.0.png differ diff --git a/doc/paddle/guides/index_cn.rst b/doc/paddle/guides/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..22c01a2181ef8af898663ddcf6af1708ee994c5b --- /dev/null +++ b/doc/paddle/guides/index_cn.rst @@ -0,0 +1,24 @@ +######## +使用教程 +######## + +PaddlePaddle (PArallel Distributed Deep LEarning)是一个易用、高效、灵活、可扩展的深度学习框架。 + +您可参考PaddlePaddle的 `Github `_ 了解详情,也可阅读 `版本说明 <../release_note_cn.html>`_ 了解新版本的特性。 + +让我们从学习PaddlePaddle基本概念这里开始: + + +- `Tensor概念介绍 `_ : 飞桨中数据的表示方式,Tensor概念介绍, +- `版本迁移 <./migration_cn.html>`_:介绍 Paddle 1 到Paddle 2的变化与Paddle1to2转换工具的使用。 +- `动态图转静态图 <./dygraph_to_static/index_cn.html>`_:介绍 Paddle 动态图转静态图的方法 +- `模型存储与载入 <./model_save_load_cn.html>`_:介绍 Paddle 模型与参数存储载入的方法 + + +.. toctree:: + :hidden: + + tensor_introduction.md + migration_cn.rst + dygraph_to_static/index_cn.rst + model_save_load_cn.rst diff --git a/doc/paddle/guides/index_en.rst b/doc/paddle/guides/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..5a9837507b0bdd242c3f547ecaa31ce773c8fa72 --- /dev/null +++ b/doc/paddle/guides/index_en.rst @@ -0,0 +1,21 @@ +######## +Guides +######## + +PaddlePaddle (PArallel Distributed Deep LEarning) is a +simple, efficient and extensible deep learning framework. + +Please refer to `PaddlePaddle Github `_ for details, and `Release Note <../release_note_en.html>`_ for features incorporated in current version. + +Let's start with studying basic concept of PaddlePaddle: + +- `Introduction to Tensor `_ : Introduction of Tensor, which is the representation of data in Paddle. +- `migration tools <./migration_en.html>`_:how to use migration tools to upgrade your code. +- `dynamic to static <./dygraph_to_static/index_en.html>`_:how to convert your model from dynamic graph to static graph. + +.. toctree:: + :hidden: + + tensor_introduction_en.md + migration_en.rst + dygraph_to_static/index_en.rst diff --git a/doc/paddle/guides/migration_cn.rst b/doc/paddle/guides/migration_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..efd559c5fe334ba44fb8cdbee15e3f7dbd1ed7c3 --- /dev/null +++ b/doc/paddle/guides/migration_cn.rst @@ -0,0 +1,234 @@ +版本迁移 +==================== + +飞桨框架v2.0-beta,最重要的变化为API体系的全面升级以及动态图能力的全面完善。下文将简要介绍Paddle +2的变化。 + +主要变化 +-------- + +在飞桨框架v2.0中,我们做了许多的升级。首先,全面完善了动态图模式,相较于静态图而言,动态图每次执行一个运算,可以立即得到结果,能够使算法的开发变得更加高效。此外,本版本对API目录,进行了较大的调整。将API体系从1.X版本的 +``paddle.fluid.*`` 迁移到了 ``paddle.*`` 下。原则上,Paddle +2仍支持Paddle 1下的所有语法。但是,我们会逐步废弃掉 ``paddle.fluid`` +下的API,强烈建议您将Paddle 1的代码迁移到Paddle +2下,以避免后续带来不必要的麻烦。下文将介绍手动与自动两种方式,来完成Paddle +1到Paddle 2的迁移。 + +手动将Paddle 1 的代码迁移到 Paddle 2 +------------------------------------ + +本节将介绍如何将您的代码手动的从Paddle 1迁移到Paddle 2。 + +1、API的变化 +~~~~~~~~~~~~ + +对于Paddle +1下的API,您可以通过我们提供的API升级表(TODO),查看每个API的升级关系,从而手动完成修改。 +### 2、句法的变化 在Paddle 1中,通过 ``with fluid.dygraph.guard():`` +开启动态图模式,在Paddle 2.0-beta中,可以直接通过 +``paddle.disable_static()``\ 开启动态图。 + +Paddle1to2 自动迁移您的代码到Paddle2 +------------------------------------ + +Paddle 2 包含了许多API的变化,为了节约您将代码从Paddle 1迁移到Paddle +2的时间,我们提供了自动迁移工具–Paddle1to2,能够帮助您快速完成代码迁移。 + +安装 +~~~~ + +Paddle1to2可以通过pip的方式安装,方式如下: + +.. code:: ipython3 + + ! pip install -U paddle1to2 + +基本用法 +~~~~~~~~ + +Paddle1to2 可以使用下面的方式,快速使用: + +.. code:: ipython3 + + ! paddle1to2 --inpath /path/to/model.py + +这将在命令行中,以\ ``diff``\ 的形式,展示model.py从Paddle 1转换为Paddle +2的变化。如果您确认上述变化没有问题,只需要再执行: + +.. code:: ipython3 + + ! paddle1to2 --inpath /path/to/model.py --write + +就会原地改写model.py,将上述变化改写到您的源文件中。 +注意:我们会默认备份源文件,到~/.paddle1to2/下。 + +参数说明如下: + +- –inpath 输入文件路径,可以为单个文件或文件夹。 +- –write + 是否原地修改输入的文件,默认值False,表示不修改。如果为True,表示对文件进行原地修改。添加此参数也表示对文件进行原地修改。 +- –backup + 可选,是否备份源文件,默认值为\ ``~/.paddle1to2/``\ ,在此路径下备份源文件。 +- –no-log-file + 可选,是否需要输出日志文件,默认值为False,即输出日志文件。 +- –log-filepath + 可选,输出日志的路径,默认值为\ ``report.log``\ ,输出日志文件的路径。 +- –no-confirm + 可选,输入文件夹时,是否逐文件确认原地写入,只在\ ``--write``\ 为True时有效,默认值为False,表示需要逐文件确认。 +- –log-level 可选,log级别,可为[‘DEBUG’,‘INFO’,‘WARNING’,‘ERROR’] + 默认值:\ ``INFO`` +- –refactor 可选,debug时使用。 +- –print-match 可选,debug时使用。 + +使用教程 +~~~~~~~~ + +开始 +^^^^ + +在使用Paddle 1to2前,需要确保您已经安装了Paddle 2.0-beta版本。 + +.. code:: ipython3 + + import paddle + print (paddle.__version__) + # TODO change to paddle 2.0-beta + + +.. parsed-literal:: + + 0.0.0 + + +克隆\ `PaddlePaddle/models `__\ 来作为工具的测试。 + +.. code:: ipython3 + + ! git clone https://github.com/PaddlePaddle/models + + +.. parsed-literal:: + + Cloning into 'models'... + remote: Enumerating objects: 8, done. + remote: Counting objects: 100% (8/8), done. + remote: Compressing objects: 100% (8/8), done. + remote: Total 35011 (delta 1), reused 0 (delta 0), pack-reused 35003 + Receiving objects: 100% (35011/35011), 356.97 MiB | 1.53 MiB/s, done. + Resolving deltas: 100% (23291/23291), done. + + +查看帮助文档 +^^^^^^^^^^^^ + +paddle1to2 会随着 paddle +2.0-beta安装。所以您可以直接通过下面的方式,查看帮助文档。 + +.. code:: ipython3 + + ! paddle1to2 -h + + +.. parsed-literal:: + + usage: paddle1to2 [-h] [--log-level {DEBUG,INFO,WARNING,ERROR}] + [--no-log-file] [--log-filepath LOG_FILEPATH] --inpath + INPATH [--backup [BACKUP]] [--write] [--no-confirm] + [--refactor {refactor_import,norm_api_alias,args_to_kwargs,refactor_kwargs,api_rename,refactor_with,post_refactor}] + [--print-match] + + optional arguments: + -h, --help show this help message and exit + --log-level {DEBUG,INFO,WARNING,ERROR} + set log level, default is INFO + --no-log-file don't log to file + --log-filepath LOG_FILEPATH + set log file path, default is "report.log" + --inpath INPATH the file or directory path you want to upgrade. + --backup [BACKUP] backup directory, default is the "~/.paddle1to2/". + --write modify files in-place. + --no-confirm write files in-place without confirm, ignored without + --write. + --refactor {refactor_import,norm_api_alias,args_to_kwargs,refactor_kwargs,api_rename,refactor_with,post_refactor} + this is a debug option. Specify refactor you want to + run. If none, all refactors will be run. + --print-match this is a debug option. Print matched code and node + for each file. + + +Paddle 1的例子 +^^^^^^^^^^^^^^ + +这里是一个基于Paddle 1实现的一个mnist分类,部分内容如下: + +.. code:: ipython3 + + ! head -n 198 models/dygraph/mnist/train.py | tail -n 20 + + +.. parsed-literal:: + + with fluid.dygraph.guard(place): + if args.ce: + print("ce mode") + seed = 33 + np.random.seed(seed) + fluid.default_startup_program().random_seed = seed + fluid.default_main_program().random_seed = seed + + if args.use_data_parallel: + strategy = fluid.dygraph.parallel.prepare_context() + mnist = MNIST() + adam = AdamOptimizer(learning_rate=0.001, parameter_list=mnist.parameters()) + if args.use_data_parallel: + mnist = fluid.dygraph.parallel.DataParallel(mnist, strategy) + + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=BATCH_SIZE, drop_last=True) + if args.use_data_parallel: + train_reader = fluid.contrib.reader.distributed_batch_reader( + train_reader) + + +使用Paddle1to2进行转化 +^^^^^^^^^^^^^^^^^^^^^^ + +paddle1to2支持单文件的转化,您可以通过下方的命令直接转化单独的文件。 + +.. code:: ipython3 + + !paddle1to2 --inpath models/dygraph/mnist/train.py + +注意,对于参数的删除及一些特殊情况,我们都会打印WARNING信息,需要您仔细核对相关内容。 +如果您觉得上述信息没有问题,可以直接对文件进行原地修改,方式如下: + +.. code:: ipython3 + + !paddle1to2 --inpath models/dygraph/mnist/train.py --write + +此时,命令行会弹出下方的提示: + +.. code:: ipython3 + + "models/dygraph/mnist/train.py" will be modified in-place, and it has been backed up to "/Users/chenlong/.paddle1to2/train.py_backup_2020_09_09_20_35_15_037821". Do you want to continue? [Y/n]: + +输入\ ``y`` +后即开始执行代码迁移。为了高效完成迁移,我们这里采用了原地写入的方式。此外,为了防止特殊情况,我们会备份转换前的代码到 +``~/.paddle1to2`` 目录下,如果需要,您可以在备份目录下找到转换前的代码。 + +代码迁移完成后,会生成一个report.log文件,记录了迁移的详情。内容如下: + +.. code:: ipython3 + + ! cat report.log + +注意事项 +~~~~~~~~ + +- 本迁移工具不能完成所有API的迁移,有少量的API需要您手动完成迁移,具体信息可见WARNING。 + +使用Paddle 2 +~~~~~~~~~~~~ + +完成迁移后,代码就从Paddle 1迁移到了Paddle 2,您就可以在Paddle +2下进行相关的开发。 diff --git a/doc/paddle/guides/migration_en.rst b/doc/paddle/guides/migration_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..6694086966958da182596cd554c9a4c708b2180f --- /dev/null +++ b/doc/paddle/guides/migration_en.rst @@ -0,0 +1,7 @@ +.. _guides_migration: + +================== +Migration Tools +================== + +how to use migration tools to upgrade your codes. diff --git a/doc/paddle/guides/model_save_load_cn.rst b/doc/paddle/guides/model_save_load_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..73d0f00724ab993974dfeed74ef350bf1bf27214 --- /dev/null +++ b/doc/paddle/guides/model_save_load_cn.rst @@ -0,0 +1,742 @@ +.. _user_guide_model_save_load: + +############# +模型存储与载入 +############# + +一、存储载入体系简介 +################## + +飞桨框架2.x对模型与参数的存储与载入相关接口进行了梳理,根据接口使用的场景与模式,分为三套体系,分别是: + +1.1 动态图存储载入体系 +-------------------- + +为提升框架使用体验,飞桨框架2.0将主推动态图模式,动态图模式下的存储载入接口包括: + +- paddle.save +- paddle.load +- paddle.jit.save +- paddle.jit.load + +本文主要介绍飞桨框架2.0动态图存储载入体系,各接口关系如下图所示: + +.. image:: https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/paddle/guides/images/save_2.0.png?raw=true +.. image:: https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/paddle/guides/images/load_2.0.png?raw=true + +1.2 静态图存储载入体系(飞桨框架1.x) +---------------------------- + +静态图存储载入相关接口为飞桨框架1.x版本的主要使用接口,出于兼容性的目的,这些接口仍然可以在飞桨框架2.x使用,但不再推荐。相关接口包括: + +- paddle.io.save +- paddle.io.load +- paddle.io.save_inference_model +- paddle.io.load_inference_model +- paddle.io.load_program_state +- paddle.io.set_program_state + +由于飞桨框架2.0不再主推静态图模式,故本文不对以上主要用于飞桨框架1.x的相关接口展开介绍,如有需要,可以阅读对应API文档。 + +1.3 高阶API存储载入体系 +--------------------- + +- paddle.Model.fit (训练接口,同时带有参数存储的功能) +- paddle.Model.save +- paddle.Model.load + +飞桨框架2.0高阶API存储载入接口体系清晰,表意直观,若有需要,建议直接阅读相关API文档,此处不再赘述。 + +.. note:: + 本教程着重介绍飞桨框架2.x的各个存储载入接口的关系及各种使用场景,不对接口参数进行详细介绍,如果需要了解具体接口参数的含义,请直接阅读对应API文档。 + + +二、参数存储载入(训练调优) +####################### + +若仅需要存储/载入模型的参数,可以使用 ``paddle.save/load`` 结合Layer和Optimizer的state_dict达成目的,此处state_dict是对象的持久参数的载体,dict的key为参数名,value为参数真实的numpy array值。 + +2.1 参数存储 +------------ + +参数存储时,先获取目标对象(Layer或者Optimzier)的state_dict,然后将state_dict存储至磁盘,示例如下: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.nn as nn + import paddle.optimizer as opt + + BATCH_SIZE = 16 + BATCH_NUM = 4 + EPOCH_NUM = 4 + + IMAGE_SIZE = 784 + CLASS_NUM = 10 + + # define a random dataset + class RandomDataset(paddle.io.Dataset): + def __init__(self, num_samples): + self.num_samples = num_samples + + def __getitem__(self, idx): + image = np.random.random([IMAGE_SIZE]).astype('float32') + label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64') + return image, label + + def __len__(self): + return self.num_samples + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM) + + def forward(self, x): + return self._linear(x) + + def train(layer, loader, loss_fn, opt): + for epoch_id in range(EPOCH_NUM): + for batch_id, (image, label) in enumerate(loader()): + out = layer(image) + loss = loss_fn(out, label) + loss.backward() + opt.step() + opt.clear_grad() + print("Epoch {} batch {}: loss = {}".format( + epoch_id, batch_id, np.mean(loss.numpy()))) + + # enable dygraph mode + place = paddle.CPUPlace() + paddle.disable_static(place) + + # create network + layer = LinearNet() + loss_fn = nn.CrossEntropyLoss() + adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters()) + + # create data loader + dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) + loader = paddle.io.DataLoader(dataset, + places=place, + batch_size=BATCH_SIZE, + shuffle=True, + drop_last=True, + num_workers=2) + + # train + train(layer, loader, loss_fn, adam) + + # save + model_path = "linear_net" + + param_state_dict = layer.state_dict() + paddle.save(param_state_dict, model_path) + + opt_state_dict = adam.state_dict() + paddle.save(opt_state_dict, model_path) + + +2.2 参数载入 +------------ + +参数载入时,先从磁盘载入保存的state_dict,然后通过set_state_dict方法配置到目标对象中,示例如下: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.nn as nn + import paddle.optimizer as opt + + BATCH_SIZE = 16 + BATCH_NUM = 4 + EPOCH_NUM = 4 + + IMAGE_SIZE = 784 + CLASS_NUM = 10 + + # define a random dataset + class RandomDataset(paddle.io.Dataset): + def __init__(self, num_samples): + self.num_samples = num_samples + + def __getitem__(self, idx): + image = np.random.random([IMAGE_SIZE]).astype('float32') + label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64') + return image, label + + def __len__(self): + return self.num_samples + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM) + + def forward(self, x): + return self._linear(x) + + def train(layer, loader, loss_fn, opt): + for epoch_id in range(EPOCH_NUM): + for batch_id, (image, label) in enumerate(loader()): + out = layer(image) + loss = loss_fn(out, label) + loss.backward() + opt.step() + opt.clear_grad() + print("Epoch {} batch {}: loss = {}".format( + epoch_id, batch_id, np.mean(loss.numpy()))) + + # enable dygraph mode + place = paddle.CPUPlace() + paddle.disable_static(place) + + # create network + layer = LinearNet() + loss_fn = nn.CrossEntropyLoss() + adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters()) + + # create data loader + dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) + loader = paddle.io.DataLoader(dataset, + places=place, + batch_size=BATCH_SIZE, + shuffle=True, + drop_last=True, + num_workers=2) + + # load + model_path = "linear_net" + param_state_dict, opt_state_dict = paddle.load(model_path) + + layer.set_state_dict(param_state_dict) + adam.set_state_dict(opt_state_dict) + + # train + train(layer, loader, loss_fn, adam) + +.. note:: + ``paddle.load`` 接口可能仍会改动,后续可能改为仅返回一个单独的dict。 + +三、模型&参数存储载入(训练部署) +############################ + +若要同时存储/载入模型结构和参数,可以使用 ``paddle.jit.save/load`` 实现。 + +3.1 模型&参数存储 +---------------- + +同时存储模型和参数,需要结合动静转换功能使用。有以下三项注意点: + +(1) Layer对象的forward方法需要经由 ``paddle.jit.to_static`` 装饰 + +经过 ``paddle.jit.to_static`` 装饰forward方法后,相应Layer在执行时,会先生成描述模型的Program,然后通过执行Program获取计算结果,示例如下: + +.. code-block:: python + + import paddle + import paddle.nn as nn + + IMAGE_SIZE = 784 + CLASS_NUM = 10 + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM) + + @paddle.jit.to_static + def forward(self, x): + return self._linear(x) + +若最终需要生成的描述模型的Program支持动态输入,可以同时指明模型的 ``InputSepc`` ,示例如下: + +.. code-block:: python + + import paddle + import paddle.nn as nn + from paddle.static import InputSpec + + IMAGE_SIZE = 784 + CLASS_NUM = 10 + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM) + + @paddle.jit.to_static(input_spec=[InputSpec(shape=[None, 784], dtype='float32')]) + def forward(self, x): + return self._linear(x) + + +(2) 请确保Layer.forward方法中仅实现预测功能,避免将训练所需的loss计算逻辑写入forward方法 + +Layer更准确的语义是描述一个具有预测功能的模型对象,接收输入的样本数据,输出预测的结果,而loss计算是仅属于模型训练中的概念。将loss计算的实现放到Layer.forward方法中,会使Layer在不同场景下概念有所差别,并且增大Layer使用的复杂性,这不是良好的编码行为,同时也会在最终保存预测模型时引入剪枝的复杂性,因此建议保持Layer实现的简洁性,下面通过两个示例对比说明: + +错误示例如下: + +.. code-block:: python + + import paddle + import paddle.nn as nn + + IMAGE_SIZE = 784 + CLASS_NUM = 10 + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM) + + @paddle.jit.to_static + def forward(self, x, label=None): + out = self._linear(x) + if label: + loss = nn.functional.cross_entropy(out, label) + avg_loss = nn.functional.mean(loss) + return out, avg_loss + else: + return out + + +正确示例如下: + +.. code-block:: python + + import paddle + import paddle.nn as nn + + IMAGE_SIZE = 784 + CLASS_NUM = 10 + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM) + + @paddle.jit.to_static + def forward(self, x): + return self._linear(x) + + +(3) 使用 ``paddle.jit.save`` 存储模型和参数 + +直接将目标Layer传入 ``paddle.jit.save`` 存储即可,完整示例如下: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.nn as nn + import paddle.optimizer as opt + + BATCH_SIZE = 16 + BATCH_NUM = 4 + EPOCH_NUM = 4 + + IMAGE_SIZE = 784 + CLASS_NUM = 10 + + # define a random dataset + class RandomDataset(paddle.io.Dataset): + def __init__(self, num_samples): + self.num_samples = num_samples + + def __getitem__(self, idx): + image = np.random.random([IMAGE_SIZE]).astype('float32') + label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64') + return image, label + + def __len__(self): + return self.num_samples + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM) + + @paddle.jit.to_static + def forward(self, x): + return self._linear(x) + + def train(layer, loader, loss_fn, opt): + for epoch_id in range(EPOCH_NUM): + for batch_id, (image, label) in enumerate(loader()): + out = layer(image) + loss = loss_fn(out, label) + loss.backward() + opt.step() + opt.clear_grad() + print("Epoch {} batch {}: loss = {}".format( + epoch_id, batch_id, np.mean(loss.numpy()))) + + # enable dygraph mode + place = paddle.CPUPlace() + paddle.disable_static(place) + + # 1. train & save model. + + # create network + layer = LinearNet() + loss_fn = nn.CrossEntropyLoss() + adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters()) + + # create data loader + dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) + loader = paddle.io.DataLoader(dataset, + places=place, + batch_size=BATCH_SIZE, + shuffle=True, + drop_last=True, + num_workers=2) + + # train + train(layer, loader, loss_fn, adam) + + # save + model_path = "linear.example.model" + paddle.jit.save(layer, model_path) + + +.. note:: + 后续仍会优化此处的使用方式,支持不装饰 ``to_static`` 也能够通过 ``paddle.jit.save`` 直接存储模型和参数。 + + +3.2 模型&参数载入 +---------------- + +载入模型参数,使用 ``paddle.jit.load`` 载入即可,载入后得到的是一个Layer的派生类对象 ``TranslatedLayer`` , ``TranslatedLayer`` 具有Layer具有的通用特征,支持切换 ``train`` 或者 ``eval`` 模式,可以进行模型调优或者预测。 + +载入模型及参数,示例如下: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.nn as nn + import paddle.optimizer as opt + + # enable dygraph mode + place = paddle.CPUPlace() + paddle.disable_static(place) + + BATCH_SIZE = 16 + BATCH_NUM = 4 + EPOCH_NUM = 4 + + IMAGE_SIZE = 784 + CLASS_NUM = 10 + + # load + model_path = "linear.example.model" + loaded_layer = paddle.jit.load(model_path) + +载入模型及参数后进行预测,示例如下(接前述示例): + +.. code-block:: python + + # inference + loaded_layer.eval() + x = paddle.randn([1, IMAGE_SIZE], 'float32') + pred = loaded_layer(x) + +载入模型及参数后进行调优,示例如下(接前述示例): + +.. code-block:: python + + # define a random dataset + class RandomDataset(paddle.io.Dataset): + def __init__(self, num_samples): + self.num_samples = num_samples + + def __getitem__(self, idx): + image = np.random.random([IMAGE_SIZE]).astype('float32') + label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64') + return image, label + + def __len__(self): + return self.num_samples + + def train(layer, loader, loss_fn, opt): + for epoch_id in range(EPOCH_NUM): + for batch_id, (image, label) in enumerate(loader()): + out = layer(image) + loss = loss_fn(out, label) + loss.backward() + opt.step() + opt.clear_grad() + print("Epoch {} batch {}: loss = {}".format( + epoch_id, batch_id, np.mean(loss.numpy()))) + + # fine-tune + loaded_layer.train() + dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) + loader = paddle.io.DataLoader(dataset, + places=place, + batch_size=BATCH_SIZE, + shuffle=True, + drop_last=True, + num_workers=2) + loss_fn = nn.CrossEntropyLoss() + adam = opt.Adam(learning_rate=0.001, parameters=loaded_layer.parameters()) + train(loaded_layer, loader, loss_fn, adam) + + +此外, ``paddle.jit.save`` 同时保存了模型和参数,如果您只需要从存储结果中载入模型的参数,可以使用 ``paddle.load`` 接口载入,返回所存储模型的state_dict,示例如下: + +.. code-block:: python + + import paddle + import paddle.nn as nn + + IMAGE_SIZE = 784 + CLASS_NUM = 10 + + class LinearNet(nn.Layer): + def __init__(self): + super(LinearNet, self).__init__() + self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM) + + @paddle.jit.to_static + def forward(self, x): + return self._linear(x) + + # enable dygraph mode + paddle.disable_static() + + # create network + layer = LinearNet() + + # load + model_path = "linear.example.model" + state_dict, _ = paddle.load(model_path) + + # inference + layer.set_state_dict(state_dict, use_structured_name=False) + layer.eval() + x = paddle.randn([1, IMAGE_SIZE], 'float32') + pred = layer(x) + + +四、旧存储格式兼容载入 +################### + +如果您是从飞桨框架1.x切换到2.x,曾经使用飞桨框架1.x的接口存储模型或者参数,飞桨框架2.x也对这种情况进行了兼容性支持,包括以下几种情况。 + +4.1 从 ``paddle.io.save_inference_model`` 存储结果中载入模型&参数 +------------------------------------------------------------------ + +曾用接口名为 ``paddle.fluid.io.save_inference_model`` 。 + +(1) 同时载入模型和参数 + +使用 ``paddle.jit.load`` 配合 ``paddle.SaveLoadConfig`` 载入模型和参数。 + +模型准备及训练示例,该示例为后续所有示例的前序逻辑: + +.. code-block:: python + + import numpy as np + import paddle + import paddle.fluid as fluid + import paddle.nn as nn + import paddle.optimizer as opt + + BATCH_SIZE = 16 + BATCH_NUM = 4 + EPOCH_NUM = 4 + + IMAGE_SIZE = 784 + CLASS_NUM = 10 + + # define a random dataset + class RandomDataset(paddle.io.Dataset): + def __init__(self, num_samples): + self.num_samples = num_samples + + def __getitem__(self, idx): + image = np.random.random([IMAGE_SIZE]).astype('float32') + label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64') + return image, label + + def __len__(self): + return self.num_samples + + image = fluid.data(name='image', shape=[None, 784], dtype='float32') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') + pred = fluid.layers.fc(input=image, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=pred, label=label) + avg_loss = fluid.layers.mean(loss) + + optimizer = fluid.optimizer.SGD(learning_rate=0.001) + optimizer.minimize(avg_loss) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + # create data loader + dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) + loader = paddle.io.DataLoader(dataset, + feed_list=[image, label], + places=place, + batch_size=BATCH_SIZE, + shuffle=True, + drop_last=True, + num_workers=2) + + # train model + for data in loader(): + exe.run( + fluid.default_main_program(), + feed=data, + fetch_list=[avg_loss]) + + +如果您是按照 ``paddle.fluid.io.save_inference_model`` 的默认格式存储的,可以按照如下方式载入(接前述示例): + +.. code-block:: python + + # save default + model_path = "fc.example.model" + fluid.io.save_inference_model( + model_path, ["image"], [pred], exe) + + # enable dygraph mode + paddle.disable_static(place) + + # load + fc = paddle.jit.load(model_path) + + # inference + fc.eval() + x = paddle.randn([1, IMAGE_SIZE], 'float32') + pred = fc(x) + +如果您指定了存储的模型文件名,可以按照以下方式载入(接前述示例): + +.. code-block:: python + + # save with model_filename + model_path = "fc.example.model.with_model_filename" + fluid.io.save_inference_model( + model_path, ["image"], [pred], exe, model_filename="__simplenet__") + + # enable dygraph mode + paddle.disable_static(place) + + # load + config = paddle.SaveLoadConfig() + config.model_filename = "__simplenet__" + fc = paddle.jit.load(model_path, config=config) + + # inference + fc.eval() + x = paddle.randn([1, IMAGE_SIZE], 'float32') + pred = fc(x) + +如果您指定了存储的参数文件名,可以按照以下方式载入(接前述示例): + +.. code-block:: python + + # save with params_filename + model_path = "fc.example.model.with_params_filename" + fluid.io.save_inference_model( + model_path, ["image"], [pred], exe, params_filename="__params__") + + # enable dygraph mode + paddle.disable_static(place) + + # load + config = paddle.SaveLoadConfig() + config.params_filename = "__params__" + fc = paddle.jit.load(model_path, config=config) + + # inference + fc.eval() + x = paddle.randn([1, IMAGE_SIZE], 'float32') + pred = fc(x) + +(2) 仅载入参数 + +如果您仅需要从 ``paddle.fluid.io.save_inference_model`` 的存储结果中载入参数,以state_dict的形式配置到已有代码的模型中,可以使用 ``paddle.load`` 配合 ``paddle.SaveLoadConfig`` 载入。 + +如果您是按照 ``paddle.fluid.io.save_inference_model`` 的默认格式存储的,可以按照如下方式载入(接前述示例): + +.. code-block:: python + + model_path = "fc.example.model" + + load_param_dict, _ = paddle.load(model_path) + +如果您指定了存储的模型文件名,可以按照以下方式载入(接前述示例): + +.. code-block:: python + + model_path = "fc.example.model.with_model_filename" + + config = paddle.SaveLoadConfig() + config.model_filename = "__simplenet__" + load_param_dict, _ = paddle.load(model_path, config) + +如果您指定了存储的参数文件名,可以按照以下方式载入(接前述示例): + +.. code-block:: python + + model_path = "fc.example.model.with_params_filename" + + config = paddle.SaveLoadConfig() + config.params_filename = "__params__" + load_param_dict, _ = paddle.load(model_path, config) + +.. note:: + 一般预测模型不会存储优化器Optimizer的参数,因此此处载入的仅包括模型本身的参数。 + +.. note:: + 由于 ``structured_name`` 是动态图下独有的变量命名方式,因此从静态图存储结果载入的state_dict在配置到动态图的Layer中时,需要配置 ``Layer.set_state_dict(use_structured_name=False)`` 。 + +4.2 从 ``paddle.io.save`` 存储结果中载入参数 +---------------------------------------------- + +曾用接口名为 ``paddle.fluid.save`` 。 + + ``paddle.fluid.save`` 的存储格式与2.x动态图接口 ``paddle.save`` 存储格式是类似的,同样存储了dict格式的参数,因此可以直接使用 ``paddle.load`` 载入state_dict,示例如下(接前述示例): + +.. code-block:: python + + # save by fluid.save + model_path = "fc.example.model.save" + program = fluid.default_main_program() + fluid.save(program, model_path) + + # enable dygraph mode + paddle.disable_static(place) + + load_param_dict, _ = paddle.load(model_path) + + +.. note:: + 由于 ``paddle.fluid.save`` 接口原先在静态图模式下的定位是存储训练时参数,或者说存储Checkpoint,故尽管其同时存储了模型结构,目前也暂不支持从 ``paddle.fluid.save`` 的存储结果中同时载入模型和参数,后续如有需求再考虑支持。 + + +4.3 从 ``paddle.io.save_params/save_persistables`` 存储结果中载入参数 +----------------------------------------------------------------------- + +.. note:: + 以下方式仅为暂时解决方案,后续计划会在 ``paddle.load`` 接口支持此功能。 + +曾用接口名为 ``paddle.fluid.io.save_params/save_persistables`` 。 + +此处可以使用 ``paddle.io.load_program_state`` 接口从以上两个接口的存储结果中载入state_dict,并用于动态图Layer的配置,示例如下(接前述示例): + +.. code-block:: python + + # save by fluid.io.save_params + model_path = "fc.example.model.save_params" + fluid.io.save_params(exe, model_path) + + # load + state_dict = paddle.io.load_program_state(model_path) diff --git a/doc/paddle/guides/tensor_introduction.md b/doc/paddle/guides/tensor_introduction.md new file mode 100644 index 0000000000000000000000000000000000000000..ace9c23cb495415aa058ed7e7ef706bb923feb60 --- /dev/null +++ b/doc/paddle/guides/tensor_introduction.md @@ -0,0 +1,497 @@ + + +# Tensor概念介绍 + +飞桨(PaddlePaddle,以下简称Paddle)和其他深度学习框架一样,使用**Tensor**来表示数据,在神经网络中传递的数据均为**Tensor**。 + +**Tensor**可以将其理解为多维数组,其可以具有任意多的维度,不同**Tensor**可以有不同的**数据类型** (dtype) 和**形状** (shape)。 + +同一**Tensor**的中所有元素的dtype均相同。如果你对 [Numpy](https://www.paddlepaddle.org.cn/tutorials/projectdetail/590690) 熟悉,**Tensor**是类似于 **Numpy array** 的概念。 + +### 目录 + +* [Tensor的创建](#1) +* [Tensor的shape](#2) +* [Tensor其他属性](#3) +* [Tensor的操作](#4) + + +---------- + +##

Tensor的创建

+ +首先,让我们开始创建一个 **Tensor** : + +### 1. 创建类似于vector的**1-D Tensor**,其rank为1 +```python +# 可通过dtype来指定Tensor数据类型,否则会创建float32类型的Tensor +rank_1_tensor = paddle.to_tensor([2.0, 3.0, 4.0], dtype='float64') +print(rank_1_tensor) +``` + +```text +Tensor: generated_tensor_1 + - place: CUDAPlace(0) + - shape: [3] + - layout: NCHW + - dtype: double + - data: [2.0, 3.0, 4.0] +``` +特殊地,如果仅输入单个scalar类型数据(例如float/int/bool类型的单个元素),则会创建shape为[1]的**Tensor** +```python +paddle.to_tensor(2) +paddle.to_tensor([2]) +``` +上述两种创建方式完全一致,shape均为[1],输出如下: +```text +Tensor: generated_tensor_0 + - place: CUDAPlace(0) + - shape: [1] + - layout: NCHW + - dtype: int32_t + - data: [2] +``` + +### 2. 创建类似于matrix的**2-D Tensor**,其rank为2 +```python +rank_2_tensor = paddle.to_tensor([[1.0, 2.0, 3.0], + [4.0, 5.0, 6.0]]) +print(rank_2_tensor) +``` +```text +Tensor: generated_tensor_2 + - place: CUDAPlace(0) + - shape: [2, 3] + - layout: NCHW + - dtype: double + - data: [1.0 2.0 3.0 4.0 5.0 6.0] +``` + +### 3. 同样地,还可以创建rank为3、4...N等更复杂的多维Tensor +``` +# Tensor可以有任意数量的轴(也称为维度) +rank_3_tensor = paddle.to_tensor([[[1, 2, 3, 4, 5], + [6, 7, 8, 9, 10]], + [[11, 12, 13, 14, 15], + [16, 17, 18, 19, 20]]]) +print(rank_3_tensor) +``` +```text +Tensor: generated_tensor_3 + - place: CUDAPlace(0) + - shape: [2, 2, 5] + - layout: NCHW + - dtype: double + - data: [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20] +``` +上述不同rank的**Tensor**可以可视化的表示为: + +
+
图1 不同rank的Tensor可视化表示
+ + +你可以通过 Tensor.numpy() 方法方便地将 **Tensor** 转化为 **Numpy array**: +```python +print(rank_2_tensor.numpy()) +``` +```text +array([[1.0, 2.0, 3.0], + [4.0, 5.0, 6.0]], dtype=float32) +``` + +**Tensor**不仅支持 floats、ints 类型数据,也支持 complex numbers 数据: +```python +rank_2_complex_tensor = paddle.to_tensor([[1+1j, 2+2j], + [3+3j, 4+4j]]) +``` +```text +CompleTensor[real]: generated_tensor_0.real + - place: CUDAPlace(0) + - shape: [2, 2] + - layout: NCHW + - dtype: float + - data: [1 2 3 4] +CompleTensor[imag]: generated_tensor_0.real + - place: CUDAPlace(0) + - shape: [2, 2] + - layout: NCHW + - dtype: float + - data: [1 2 3 4] +``` +如果检测到输入数据包含complex numbers,则会自动创建一个**ComplexTensor**,**ComplexTensor**是Paddle中一种特殊的数据结构, +其包含实部(real)与虚部(imag)两个形状与数据类型相同的**Tensor**,其结构可视化表示为: + +
+
图2 ComplexTensor的可视化表示
+ +**Tensor**必须形状规则,类似于“矩形”的概念,也就是,沿任何一个轴(也称作维度)上,元素的数量都是相等的,如果为以下情况: +``` +rank_2_tensor = paddle.to_tensor([[1.0, 2.0], + [4.0, 5.0, 6.0]]) +``` +该情况下将会抛出异常: +```text +ValueError: + Faild to convert input data to a regular ndarray : + - Usually this means the input data contains nested lists with different lengths. +``` + +上面介绍了通过Python数据来创建**Tensor**的方法,我们也可以通过 **Numpy array** 来创建**Tensor**: +```python +rank_1_tensor = paddle.to_tensor(Numpy array([1.0, 2.0])) + +rank_2_tensor = paddle.to_tensor(Numpy array([[1.0, 2.0], + [3.0, 4.0]])) + +rank_3_tensor = paddle.to_tensor(numpy.random.rand(3, 2)) +``` +创建的 **Tensor** 与原 **Numpy array** 具有相同的 shape 与 dtype。 + +如果要创建一个指定shape的**Tensor**,Paddle也提供了一些API: +```text +paddle.zeros([m, n]) # 创建数据全为0,shape为[m, n]的Tensor +paddle.ones([m, n]) # 创建数据全为1,shape为[m, n]的Tensor +paddle.full([m, n], 10) # 创建数据全为10,shape为[m, n]的Tensor +paddle.arrange(start, end, step) # 创建从start到end,步长为step的Tensor +paddle.linspace(start, end, num) # 创建从start到end,元素个数固定为num的Tensor +``` + +---------- +##

Tensor的shape

+ +### 基本概念 +查看一个**Tensor**的形状可以通过 **Tensor.shape**,shape是 **Tensor** 的一个重要属性,以下为相关概念: + +1. shape:描述了tensor的每个维度上的元素数量 +2. rank: tensor的维度的数量,例如vector的rank为1,matrix的rank为2. +3. axis或者dimension:指tensor某个特定的维度 +4. size:指tensor中全部元素的个数 + +让我们来创建1个4-D **Tensor**,并通过图形来直观表达以上几个概念之间的关系; +```python +rank_4_tensor = paddle.ones([2, 3, 4, 5]) +``` + +
+
图3 Tensor的shape、axis、dimension、rank之间的关系
+ +```python +print("Data Type of every element:", rank_4_tensor.dtype) +print("Number of dimensions:", rank_4_tensor.ndim) +print("Shape of tensor:", rank_4_tensor.shape) +print("Elements number along axis 0 of tensor:", rank_4_tensor.shape[0]) +print("Elements number along the last axis of tensor:", rank_4_tensor.shape[-1]) +``` +```text +Data Type of every element: VarType.FP32 +Number of dimensions: 4 +Shape of tensor: [2, 3, 4, 5] +Elements number along axis 0 of tensor: 2 +Elements number along the last axis of tensor: 5 +``` + +### 索引 +通过索引能方便地对Tensor进行“切片”操作。Paddle使用标准的 Python索引规则 与 Numpy索引规则,与[ndexing a list or a string in Python](https://docs.python.org/3/tutorial/introduction.html#strings)类似。具有以下特点: + +1. 如果索引为负数,则从尾部开始计算 +2. 如果索引使用 ``:`` ,则其对应格式为start: stop: step,其中start、stop、step均可缺省 + +* 针对1-D **Tensor**,则仅有单个轴上的索引: +```python +rank_1_tensor = paddle.to_tensor([0, 1, 2, 3, 4, 5, 6, 7, 8]) +print("Origin Tensor:", rank_1_tensor.numpy()) + +print("First element:", rank_1_tensor[0].numpy()) +print("Last element:", rank_1_tensor[-1].numpy()) +print("All element:", rank_1_tensor[:].numpy()) +print("Before 3:", rank_1_tensor[:3].numpy()) +print("From 6 to the end:", rank_1_tensor[6:].numpy()) +print("From 3 to 6:", rank_1_tensor[3:6].numpy()) +print("Interval of 3:", rank_1_tensor[::3].numpy()) +print("Reverse:", rank_1_tensor[::-1].numpy()) +``` +```text +Origin Tensor: array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=int64) +First element: [0] +Last element: [8] +All element: [0 1 2 3 4 5 6 7 8] +Before 3: [0 1 2] +From 6 to the end: [6 7 8] +From 3 to 6: [3 4 5] +Interval of 3: [0 3 6] +Reverse: [8 7 6 5 4 3 2 1 0] +``` + +* 针对2-D及以上的 **Tensor**,则会有多个轴上的索引: +```python +rank_2_tensor = paddle.to_tensor([[0, 1, 2, 3], + [4, 5, 6, 7], + [8, 9, 10, 11]]) +print("Origin Tensor:", rank_2_tensor.numpy()) +print("First row:", rank_2_tensor[0].numpy()) +print("First row:", rank_2_tensor[0, :].numpy()) +print("First column:", rank_2_tensor[:, 0].numpy()) +print("Last column:", rank_2_tensor[:, -1].numpy()) +print("All element:", rank_2_tensor[:].numpy()) +print("First row and second column:", rank_2_tensor[0, 1].numpy()) +``` +```text +Origin Tensor: array([[ 0 1 2 3] + [ 4 5 6 7] + [ 8 9 10 11]], dtype=int64) +First row: [0 1 2 3] +First row: [0 1 2 3] +First column: [0 4 8] +Last column: [ 3 7 11] +All element: [[ 0 1 2 3] + [ 4 5 6 7] + [ 8 9 10 11]] +First row and second column: [1] +``` + +输入索引的第一个值对应axis 0,第二个值对应axis 1,以此类推,如果某个axis上未指定索引,则默认为 ``:`` 。例如: +``` +rank_3_tensor[1] +rank_3_tensor[1, :] +rank_3_tensor[1, :, :] +``` +以上三种索引的结果是完全相同的。 + +### 对shape进行操作 + +重新定义**Tensor**的shape在实际编程中具有重要意义。 +```python +rank_3_tensor = paddle.to_tensor([[[1, 2, 3, 4, 5], + [6, 7, 8, 9, 10]], + [[11, 12, 13, 14, 15], + [16, 17, 18, 19, 20]], + [[21, 22, 23, 24, 25], + [26, 27, 28, 29, 30]]]) +print("the shape of rank_3_tensor:", rank_3_tensor.shape) +``` +```text +the shape of rank_3_tensor: [3, 2, 5] +``` + +Paddle提供了reshape接口来改变Tensor的shape: +```python +rank_3_tensor = paddle.reshape(rank_3_tensor, [2, 5, 3]) +print("After reshape:", rank_3_tensor.shape) +``` +```text +After reshape: [2, 5, 3] +``` + +在指定新的shape时存在一些技巧: + +**1.** -1 表示这个维度的值是从Tensor的元素总数和剩余维度推断出来的。因此,有且只有一个维度可以被设置为-1。 +**2.** 0 表示实际的维数是从Tensor的对应维数中复制出来的,因此shape中0的索引值不能超过x的维度。 + +有一些例子可以很好解释这些技巧: +```text +origin:[3, 2, 5] reshape:[3, 10] actual: [3, 10] +origin:[3, 2, 5] reshape:[-1] actual: [30] +origin:[3, 2, 5] reshape:[0, 5, -1] actual: [3, 5, 2] +``` + +可以发现,reshape为[-1]时,会将tensor按其在计算机上的内存分布展平为1-D Tensor。 +```python +print("Tensor flattened to Vector:", paddle.reshape(rank_3_tensor, [-1]).numpy()) +``` +```text +Tensor flattened to Vector: [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30] +``` + +---------- +##

Tensor其他属性

+### Tensor的dtype + +**Tensor**的数据类型,可以通过 Tensor.dtype 来查看,dtype支持:'bool','float16','float32','float64','uint8','int8','int16','int32','int64'。 + +* 通过Python元素创建的Tensor,可以通过dtype来进行指定,如果未指定: + + * 对于python整型数据,则会创建int64型Tensor + * 对于python浮点型数据,默认会创建float32型Tensor,并且可以通过set_default_type来调整浮点型数据的默认类型。 + +* 通过Numpy array创建的Tensor,则与其原来的dtype保持相同。 + +```python +print("Tensor dtype from Python integers:", paddle.to_tensor(1).dtype) +print("Tensor dtype from Python floating point:", paddle.to_tensor(1.0).dtype) +``` +```text +Tensor dtype from Python integers: VarType.INT64 +Tensor dtype from Python floating point: VarType.FP32 +``` + +Paddle提供了**cast**接口来改变dtype: +```python +float32_tensor = paddle.to_tensor(1.0) + +float64_tensor = paddle.cast(float32_tensor, dtype='float64') +print("Tensor after cast to float64:", float64_tensor.dtype) + +int64_tensor = paddle.cast(float32_tensor, dtype='int64') +print("Tensor after cast to int64:", int64_tensor.dthpe) +``` +```text +Tensor after cast to float64: VarType.FP64 +Tensor after cast to int64: VarType.INT64 +``` + +### Tensor的place + +初始化**Tensor**时可以通过**place**来指定其分配的设备位置,可支持的设备位置有三种:CPU/GPU/固定内存,其中固定内存也称为不可分页内存或锁页内存,其与GPU之间具有更高的读写效率,并且支持异步传输,这对网络整体性能会有进一步提升,但其缺点是分配空间过多时可能会降低主机系统的性能,因为其减少了用于存储虚拟内存数据的可分页内存。 + +* **创建CPU上的Tensor**: +```python +cpu_tensor = paddle.to_tensor(1, place=paddle.CPUPlace()) +print(cpu_tensor) +``` +```text +Tensor: generated_tensor_0 + - place: CPUPlace +``` + +* **创建GPU上的Tensor**: +```python +gpu_tensor = paddle.to_tensor(1, place=paddle.CUDAPlace(0)) +print(gpu_tensor) +``` +```text +Tensor: generated_tensor_0 + - place: CUDAPlace(0) + +``` + +* **创建固定内存上的Tensor**: +```python +pin_memory_tensor = paddle.to_tensor(1, place=paddle.CUDAPinnedPlace()) +print(pin_memory_tensor) +``` +```text +Tensor: generated_tensor_0 + - place: CUDAPinnedPlace + +``` +### Tensor的name + +Tensor的name是其唯一的标识符,为python 字符串类型,查看一个Tensor的name可以通过Tensor.name属性。默认地,在每个Tensor创建时,Paddle会自定义一个独一无二的name。 + +```python +print("Tensor name:", paddle.to_tensor(1).name) +``` +```text +Tensor name: generated_tensor_0 +``` + +---------- +##

Tensor的操作

+ +Paddle提供了丰富的Tensor操作的API,包括数学运算符、逻辑运算符、线性代数相关等100+余种API,这些API调用有两种方法: +```python +x = paddle.to_tensor([[1.1, 2.2], [3.3, 4.4]]) +y = paddle.to_tensor([[5.5, 6.6], [7.7, 8.8]]) + +print(paddle.add(x, y), "\n") +print(x.add(y), "\n") +``` +```text +Tensor: eager_tmp_2 + - place: CUDAPlace(0) + - shape: [2, 2] + - layout: NCHW + - dtype: float + - data: [6.6 8.8 11 13.2] + +Tensor: eager_tmp_3 + - place: CUDAPlace(0) + - shape: [2, 2] + - layout: NCHW + - dtype: float + - data: [6.6 8.8 11 13.2] +``` + +可以看出,使用 **Tensor类成员函数** 和 **paddle API** 具有相同的效果,由于 **类成员函数** 操作更为方便,以下均从 **Tensor类成员函数** 的角度,对常用**Tensor**操作进行介绍。 + +#### 数学运算符 +```python +x.abs() #绝对值 +x.ceil() #向上取整 +x.floor() #向下取整 +x.exp() #逐元素计算自然常数为底的指数 +x.log() #逐元素计算x的自然对数 +x.reciprocal() #求倒数 +x.square() #逐元素计算平方 +x.sqrt() #逐元素计算平方根 +x.sum() #计算所有元素的和 +x.asin() #逐元素计算反正弦函数 +x.add(y) #逐元素相加 +x.add(-y) #逐元素相减 +x.multiply(y) #逐元素相乘 +x.divide(y) #逐元素相除 +x.floor_divide(y) #逐元素相除并取整 +x.remainder(y) #逐元素相除并取余 +x.pow(y) #逐元素幂运算 +x.reduce_max() #所有元素最大值,可以指定维度 +x.reduce_min() #所有元素最小值,可以指定维度 +x.reduce_prod() #所有元素累乘,可以指定维度 +x.reduce_sum() #所有元素的和,可以指定维度 +``` + +Paddle对python数学运算相关的魔法函数进行了重写,以下操作与上述结果相同。 +```text +x + y -> x.add(y) #逐元素相加 +x - y -> x.add(-y) #逐元素相减 +x * y -> x.multiply(y) #逐元素相乘 +x / y -> x.divide(y) #逐元素相除 +x // y -> x.floor_divide(y) #逐元素相除并取整 +x % y -> x.remainder(y) #逐元素相除并取余 +x ** y -> x.pow(y) #逐元素幂运算 +``` + +#### 逻辑运算符 +```python +x.is_empty() #判断tensor是否为空 +x.isfinite() #判断tensor中元素是否是有限的数字,即不包括inf与nan +x.euqal_all(y) #判断两个tensor的所有元素是否相等 +x.euqal(y) #判断两个tensor的每个元素是否相等 +x.not_equal(y) #判断两个tensor的每个元素是否不相等 +x.less_than(y) #判断tensor x的元素是否小于tensor y的对应元素 +x.less_equal(y) #判断tensor x的元素是否小于或等于tensor y的对应元素 +x.greater_than(y) #判断tensor x的元素是否大于tensor y的对应元素 +x.greater_equal(y) #判断tensor x的元素是否大于或等于tensor y的对应元素 +``` + +同样地,Paddle对python逻辑比较相关的魔法函数进行了重写,以下操作与上述结果相同。 +```text +x == y -> x.euqal(y) #判断两个tensor的每个元素是否相等 +x != y -> x.not_equal(y) #判断两个tensor的每个元素是否不相等 +x < y -> x.less_than(y) #判断tensor x的元素是否小于tensor y的对应元素 +x <= y -> x.less_equal(y) #判断tensor x的元素是否小于或等于tensor y的对应元素 +x > y -> x.greater_than(y) #判断tensor x的元素是否大于tensor y的对应元素 +x >= y -> x.greater_equal(y) #判断tensor x的元素是否大于或等于tensor y的对应元素 +``` + +以下操作仅针对bool型Tensor: +```python +x.reduce_all() #判断一个bool型tensor是否所有元素为True +x.reduce_any() #判断一个bool型tensor是否存在至少1个元素为True +x.logical_and(y) #对两个bool型tensor逐元素进行逻辑与操作 +x.logical_or(y) #对两个bool型tensor逐元素进行逻辑或操作 +x.logical_xor(y) #对两个bool型tensor逐元素进行逻辑亦或操作 +x.logical_not(y) #对两个bool型tensor逐元素进行逻辑非操作 +``` + +#### 线性代数相关 +```python +x.cholesky() #矩阵的cholesky分解 +x.t() #矩阵转置 +x.transpose([1, 0]) #交换axis 0 与axis 1的顺序 +x.norm('pro') #矩阵的Frobenius 范数 +x.dist(y, p=2) #矩阵(x-y)的2范数 +x.matmul(y) #矩阵乘法 +``` +需要注意,Paddle中Tensor的操作符均为非inplace操作,即 ``x.add(y)`` 不会在**tensor x**上直接进行操作,而会返回一个新的**Tensor**来表示运算结果。 + +更多Tensor操作相关的API,请参考[class paddle.Tensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/tensor/creation/Tensor_cn.html) diff --git a/doc/paddle/guides/tensor_introduction_en.md b/doc/paddle/guides/tensor_introduction_en.md new file mode 100644 index 0000000000000000000000000000000000000000..18f7f18afdabd9e38e07337d83305532904f28e0 --- /dev/null +++ b/doc/paddle/guides/tensor_introduction_en.md @@ -0,0 +1,502 @@ +# Introduction to Tensor + +PaddlePaddle(Hereinafter referred to as Paddle) is the same as other Deep learning Framework, it use **Tensor** to +representing data. + +**Tensor** can be regarded as multi-dimensional array, which can have as many diemensions as it want. Different **Tensor** can have different data types(dtype) and shape. + +The dtypes of all elements in the same Tensor are the same. If you are familiar with [Numpy](https://www.paddlepaddle.org.cn/tutorials/projectdetail/590690), **Tensor** is similar to the **Numpy array**. + +### Contents + +* [Creation of Tensor](#1) +* [Shape of Tensor](#2) +* [Other attributes of Tensor](#3) +* [Method of Tensor](#4) + +---------- +##

Creation of Tensor

+ +Firstly, let we create a **Tensor**: + +### 1. create **1-D Tensor** like vector, whose rank is 1 +```python +# The Tensor data type can be specified by dtype, otherwise, float32 Tensor will be created +rank_1_tensor = paddle.to_tensor([2.0, 3.0, 4.0], dtype='float64') +print(rank_1_tensor) +``` +```text +Tensor: generated_tensor_1 + - place: CUDAPlace(0) + - shape: [3] + - layout: NCHW + - dtype: double + - data: [2.0, 3.0, 4.0] +``` + +Specifically, if you imput only a scalar data (for example, float/int/bool), then a **Tensor** whose shape is [1]will be created. +```python +paddle.to_tensor(2) +paddle.to_tensor([2]) +``` +The above two are completely the same, Tensor shape is [1]: +```text +Tensor: generated_tensor_0 + - place: CUDAPlace(0) + - shape: [1] + - layout: NCHW + - dtype: int32_t + - data: [2] +``` + +### 2. create **2-D Tensor** like matrix, whose rank is 2 +```python +rank_2_tensor = paddle.to_tensor([[1.0, 2.0, 3.0], + [4.0, 5.0, 6.0]]) +print(rank_2_tensor) +``` +```text +Tensor: generated_tensor_2 + - place: CUDAPlace(0) + - shape: [2, 3] + - layout: NCHW + - dtype: double + - data: [1.0 2.0 3.0 4.0 5.0 6.0] +``` + +### 3. Similarly, you can create multidimensional Tensor whose rank is 3, 4... N +``` +# There can be an arbitrary number of axes (sometimes called "dimensions") +rank_3_tensor = paddle.to_tensor([[[1, 2, 3, 4, 5], + [6, 7, 8, 9, 10]], + [[11, 12, 13, 14, 15], + [16, 17, 18, 19, 20]]]) +print(rank_3_tensor) +``` +```text +Tensor: generated_tensor_3 + - place: CUDAPlace(0) + - shape: [2, 2, 5] + - layout: NCHW + - dtype: double + - data: [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20] +``` +The visual representation of the **Tensor* above is: + +
+
Figure1. Visual representation of Tensor with different ranks
+ + +You can convert **Tensor** to Numpy array easily Tensor.numpy() method. +```python +print(rank_2_tensor.numpy()) +``` +```text +array([[1.0, 2.0, 3.0], + [4.0, 5.0, 6.0]], dtype=float32) +``` + +**Tensor supports not only floats and ints but also complex Numbers data: +```python +rank_2_complex_tensor = paddle.to_tensor([[1+1j, 2+2j], + [3+3j, 4+4j]]) +``` +```text +CompleTensor[real]: generated_tensor_0.real + - place: CUDAPlace(0) + - shape: [2, 2] + - layout: NCHW + - dtype: float + - data: [1 2 3 4] +CompleTensor[imag]: generated_tensor_0.real + - place: CUDAPlace(0) + - shape: [2, 2] + - layout: NCHW + - dtype: float + - data: [1 2 3 4] +``` +If the input data contains complex Number, a **ComplexTensor** is automatically created. **ComplexTensor** is a special data structure in Paddle. **ComplexTensor** consists of two **Tensor**, one is real part and the other is imaginary part. **ComplexTensor** can be visualized as follows: + +
+
Figure2. Visual representation of ComplexTensor
+ +**Tensor** must be "rectangular" -- that is, along each axis, every element is the same size. For example: +``` +rank_2_tensor = paddle.to_tensor([[1.0, 2.0], + [4.0, 5.0, 6.0]]) +``` +An exception will be thrown in this case: +```text +ValueError: + Faild to convert input data to a regular ndarray : + - Usually this means the input data contains nested lists with different lengths. +``` + +The way to create **Tensor** from Python data is described above. We can also create **Tensor** +from numpy array: +```python +rank_1_tensor = paddle.to_tensor(numpy.array([1.0, 2.0])) + +rank_2_tensor = paddle.to_tensor(numpy.array([[1.0, 2.0], + [3.0, 4.0]])) + +rank_3_tensor = paddle.to_tensor(numpy.random.rand(3, 2)) +``` +The created **Tensor** will have the same shape and dtype with the original Numpy array. + +If you want to create a **Tensor** with specific size, Paddle also provide these API: +```text +paddle.zeros([m, n]) # All elements: 0, Shape: [m, n] +paddle.ones([m, n]) # All elements: 1, Shape: [m, n] +paddle.full([m, n], 10) # All elements: 10, Shape: [m, n] +paddle.arrange(start, end, 2) # Elements: from start to end, step size is 2 +paddle.linspace(start, end, 10) # Elements: from start to end, num of elementwise is 10 +``` + +---------- +##

Shape of Tensor

+ +### Basic Concept + +The shape of **Tensor** can be get by **Tensor.shape**. shape is an important attribute of **Tensor**, and the following are related concepts: + +1. shape: Describes the number of elements on each of the tensor's dimensions. +2. rank: The number of tensor's dimensions. For example, the rank of vector is 1, the rank of matrix is 2. +3. axis or dimension: A particular dimension of a tensor. +4. size: The number of all elements in the tensor. + +Let we create a 4-D **Tensor**, and visualize it to represents the relationship between the above concepts. +```python +rank_4_tensor = paddle.ones([2, 3, 4, 5]) +``` + +
+
Figure3. The relationship between Tensor shape, axis, dimension and rank
+ +```python +print("Data Type of every element:", rank_4_tensor.dtype) +print("Number of dimensions:", rank_4_tensor.ndim) +print("Shape of tensor:", rank_4_tensor.shape) +print("Elements number along axis 0 of tensor:", rank_4_tensor.shape[0]) +print("Elements number along the last axis of tensor:", rank_4_tensor.shape[-1]) +``` +```text +Data Type of every element: VarType.FP32 +Number of dimensions: 4 +Shape of tensor: [2, 3, 4, 5] +Elements number along axis 0 of tensor: 2 +Elements number along the last axis of tensor: 5 +``` + +### indexing + +Paddle follows standard Python indexing rules, similar to[ndexing a list or a string in Python](https://docs.python.org/3/tutorial/introduction.html#strings) and the basic rules for NumPy indexing. indexing is used to work on Tensor "slice". It has following characteristics: + +1. negative indices count backwards from the end +2. colons, : , are used for slices: start:stop:step + +For **1-D Tensor**, there is only single-axis indexing: +```python +rank_1_tensor = paddle.to_tensor([0, 1, 2, 3, 4, 5, 6, 7, 8]) +print("Origin Tensor:", rank_1_tensor.numpy()) + +print("First element:", rank_1_tensor[0].numpy()) +print("Last element:", rank_1_tensor[-1].numpy()) +print("All element:", rank_1_tensor[:].numpy()) +print("Before 3:", rank_1_tensor[:3].numpy()) +print("From 6 to the end:", rank_1_tensor[6:].numpy()) +print("From 3 to 6:", rank_1_tensor[3:6].numpy()) +print("Interval of 3:", rank_1_tensor[::3].numpy()) +print("Reverse:", rank_1_tensor[::-1].numpy()) +``` +```text +Origin Tensor: array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=int64) +First element: [0] +Last element: [8] +All element: [0 1 2 3 4 5 6 7 8] +Before 3: [0 1 2] +From 6 to the end: [6 7 8] +From 3 to 6: [3 4 5] +Interval of 3: [0 3 6] +Reverse: [8 7 6 5 4 3 2 1 0] +``` + +For 2-D **Tensor** or above, there is multi-axis indexing: +```python +rank_2_tensor = paddle.to_tensor([[0, 1, 2, 3], + [4, 5, 6, 7], + [8, 9, 10, 11]]) +print("Origin Tensor:", rank_2_tensor.numpy()) + +print("First row:", rank_2_tensor[0].numpy()) +print("First row:", rank_2_tensor[0, :].numpy()) +print("First column:", rank_2_tensor[:, 0].numpy()) +print("Last column:", rank_2_tensor[:, -1].numpy()) +print("All element:", rank_2_tensor[:].numpy()) +print("First row and second column:", rank_2_tensor[0, 1].numpy()) +``` +```text +Origin Tensor: array([[ 0 1 2 3] + [ 4 5 6 7] + [ 8 9 10 11]], dtype=int64) +First row: [0 1 2 3] +First row: [0 1 2 3] +First column: [0 4 8] +Last column: [ 3 7 11] +All element: [[ 0 1 2 3] + [ 4 5 6 7] + [ 8 9 10 11]] +First row and second column: [1] +``` + +The first element of index is corresponds to Axis 0, the second is corresponds to Axis 1, and so on. If no index is specified on an Axis, the default is ':' . For example: +``` +rank_3_tensor[1] +rank_3_tensor[1, :] +rank_3_tensor[1, :, :] +``` +These three are exactly the same. + +### Manipulating Shape + +Manipulating shape of Tensor is important in programming. +```python +rank_3_tensor = paddle.to_tensor([[[1, 2, 3, 4, 5], + [6, 7, 8, 9, 10]], + [[11, 12, 13, 14, 15], + [16, 17, 18, 19, 20]], + [[21, 22, 23, 24, 25], + [26, 27, 28, 29, 30]]]) +print("the shape of rank_3_tensor:", rank_3_tensor.shape) +``` +```text +the shape of rank_3_tensor: [3, 2, 5] +``` + +Paddle provides reshape API to manipulate the shape of Tensor: +```python +rank_3_tensor = paddle.reshape(rank_3_tensor, [2, 5, 3]) +print("After reshape:", rank_3_tensor.shape) +``` +```text +After reshape: [2, 5, 3] +``` + +There are some tricks for specifying a new shape: + +1. -1 indicates that the value of this dimension is inferred from the total number of elements and the other dimension of Tensor. Therefore, there is one and only one that can be set to -1. +2. 0 means that the actual dimension is copied from the corresponding dimension of Tensor, so the index value of 0 in shape can't exceed the rank of X. + +For example: +```text +origin:[3, 2, 5] reshape:[3, 10] actual: [3, 10] +origin:[3, 2, 5] reshape:[-1] actual: [30] +origin:[3, 2, 5] reshape:[0, 5, -1] actual: [3, 5, 2] +``` + +If you flatten a tensor by reshape to -1, you can see what order it is laid out in memory. +```python +print("Tensor flattened to Vector:", paddle.reshape(rank_3_tensor, [-1]).numpy()) +``` +```text +Tensor flattened to Vector: [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30] +``` + +---------- +##

Other attributes of Tensor

+ +### dtype of Tensor + +data type of **Tensor**, which can be get from Tensor.dtype, it support 'bool', 'float16', 'float32', 'float64','uint8', 'int8', 'int16', 'int32', 'int64'. + +* If create Tensor from Python elements, the data type can be specified by dtype. Otherwise: + + * For python integer data, it will create int64 Tensor + * For python floats number, it will create float32 Tensor by default. You can change default dtype by set_default_type. + +* If create Tensor from Numpy array, the data type remains the same with origin dtype. + +```python +print("Tensor dtype from Python integers:", paddle.to_tensor(1).dtype) +print("Tensor dtype from Python floating point:", paddle.to_tensor(1.0).dtype) +``` +```text +Tensor dtype from Python integers: VarType.INT64 +Tensor dtype from Python floating point: VarType.FP32 +``` + +Paddle provide **cast** API to change the dtype: +```python +float32_tensor = paddle.to_tensor(1.0) + +float64_tensor = paddle.cast(float32_tensor, dtype='float64') +print("Tensor after cast to float64:", float64_tensor.dtype) + +int64_tensor = paddle.cast(float32_tensor, dtype='int64') +print("Tensor after cast to int64:", int64_tensor.dthpe) +``` +```text +Tensor after cast to float64: VarType.FP64 +Tensor after cast to int64: VarType.INT64 +``` + +### place of Tensor + +Device can be specified when creating a tensor. There are three kinds of to choose from: CPU/GPU/Pinned memory. +There is higher read and write efficiency between Pinned memory with GPU. In addition, Pinned memory supports asynchronous data copy, which will further improve the performance of network. The disadvantage is that allocating too much Pinned memory may reduce the performance of the host. Because it reduces the pageable memory which is used to store virtual memory data. + +* **Create Tensor on GPU**: +```python +cpu_tensor = paddle.to_tensor(1, place=paddle.CPUPlace()) +print(cpu_tensor) +``` + +```text +Tensor: generated_tensor_0 + - place: CPUPlace +``` + +* **Create Tensor on CPU**: +```python +gpu_tensor = paddle.to_tensor(1, place=paddle.CUDAPlace(0)) +print(gpu_tensor) +``` + +```text +Tensor: generated_tensor_0 + - place: CUDAPlace(0) +``` + +* **Create Tensor on pinned memory**: +```python +pin_memory_tensor = paddle.to_tensor(1, place=paddle.CUDAPinnedPlace()) +print(pin_memory_tensor) +``` +```text +Tensor: generated_tensor_0 + - place: CUDAPinnedPlace + +``` +### name of Tensor + +name of Tensor is its unique identifier, which is a Python string, and it can be get by ``Tensor.name``. By default, Paddle will customize a unique name when creating a Tensor. + +```python +print("Tensor name:", paddle.to_tensor(1).name) +``` +```text +Tensor name: generated_tensor_0 +``` + +---------- +##

Method of Tensor

+ + +Paddles provide rich Tensor operating API , including mathematical operators, logical operators, linear algebra operators and so on. The total number is more than 100+ kinds. For example: + +```python +x = paddle.to_tensor([[1.1, 2.2], [3.3, 4.4]]) +y = paddle.to_tensor([[5.5, 6.6], [7.7, 8.8]]) + +print(paddle.add(x, y), "\n") +print(x.add(y), "\n") +``` +```text +Tensor: eager_tmp_2 + - place: CUDAPlace(0) + - shape: [2, 2] + - layout: NCHW + - dtype: float + - data: [6.6 8.8 11 13.2] + +Tensor: eager_tmp_3 + - place: CUDAPlace(0) + - shape: [2, 2] + - layout: NCHW + - dtype: float + - data: [6.6 8.8 11 13.2] +``` + +It can be seen that Tensor class method has the same result with paddle API. And the Tensor class method is more convenient to invoke. + +### mathematical operators +```python +x.abs() #absolute value +x.ceil() #round up to an integer +x.floor() #round down to an integer +x.exp() #Calculate exponents of the natural constant of each element +x.log() #Calculate natural logarithm of each element +x.reciprocal() #reciprocal +x.square() #Calculate square of each element +x.sqrt() #Calculate sqrt of each element +x.sum() #Calculate the sum of all elements +x.asin() #Calculate the arcsine of each element +x.add(y) #add element by element +x.add(-y) #minus element by element +x.multiply(y) #multiply element by element +x.divide(y) #divide element by element +x.floor_divide(y) #divide exactly element by element +x.remainder(y) #mod element by element +x.pow(y) #pow element by element +x.reduce_max() #max value on specific axis +x.reduce_min() #min value on specific axis +x.reduce_prod() #multiply of all elements on specific axis +x.reduce_sum() #sum of all elements on specific axis +``` + +Paddle overwrite the magic functions related to Python mathematical operations. Like this: +```text +x + y -> x.add(y) +x - y -> x.add(-y) +x * y -> x.multiply(y) +x / y -> x.divide(y) +x // y -> x.floor_divide(y) +x % y -> x.remainder(y) +x ** y -> x.pow(y) +``` + +### logical operators +```python +x.is_empty() #Judge whether tensor is empty +x.isfinite() #Judge whether the element in tensor is finite number +x.euqal_all(y) #Judge whether all elements of two tensor are equal +x.euqal(y) #judge whether each element of two tensor is equal +x.not_equal(y) #judge whether each element of two tensor is not equal +x.less_than(y) #judge whether each element of tensor x is less than corresponding element of tensor y +x.less_equal(y) #judge whether each element of tensor x is less than or equal to element of tensor y +x.greater_than(y) #judge whether each element of tensor x is greater than element of tensor y +x.greater_equal(y) #judge whether each element of tensor x is greater than or equal to element of tensor y +``` + +Paddle overwrite the magic functions related to Python logical operations. Like this: +```text +x == y -> x.euqal(y) +x != y -> x.not_equal(y) +x < y -> x.less_than(y) +x <= y -> x.less_equal(y) +x > y -> x.greater_than(y) +x >= y -> x.greater_equal(y) +``` + +The following operations are targeted at bool Tensor only: +```python +x.reduce_all(y) #Judge whether a bool tensor is True for all elements +x.reduce_any(y) #Judge whether a bool tensor exists at least one element is True +x.logical_and(y) #logic and operation for two bool tensor +x.logical_or(y) #logic or operation for two bool tensor +x.logical_xor(y) #logic xor operation for two bool tensor +x.logical_not(y) #logic not operation for two bool tensor +``` + +### linear algebra operators +```python +x.cholesky() #cholesky decomposition of a matrix +x.t() #matrix transpose +x.transpose([1, 0]) #swap axis 0 with axis 1 +x.norm('pro') #Frobenius Norm of matrix +x.dist(y, p=2) #The 2 norm of (x-y) +x.matmul(y) #Matrix multiplication +``` +It should be noted that the class method of Tensor are non-inplace operations. It means, ``x.And dd(y)`` will not operate directly on Tensor x, but return a new Tensor to represent the results. + +For more API related to Tensor operations, please refer to[class paddle.Tensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/tensor/creation/Tensor_cn.html) diff --git a/doc/paddle/images/1.png b/doc/paddle/images/1.png new file mode 100644 index 0000000000000000000000000000000000000000..67daf566f91aab570e60971c4ea8e2be876e214d Binary files /dev/null and b/doc/paddle/images/1.png differ diff --git a/doc/paddle/images/2.png b/doc/paddle/images/2.png new file mode 100644 index 0000000000000000000000000000000000000000..43367777f41449a666e7a3b571f09ac5d5dfb1ae Binary files /dev/null and b/doc/paddle/images/2.png differ diff --git a/doc/paddle/images/2_level_rnn.dot b/doc/paddle/images/2_level_rnn.dot new file mode 100644 index 0000000000000000000000000000000000000000..5d77865061ca7bbbfcf254dd938f09aef5553505 --- /dev/null +++ b/doc/paddle/images/2_level_rnn.dot @@ -0,0 +1,56 @@ +digraph G { + + rnn [label="1st level RNN" shape=box] + + subgraph cluster0 { + label = "time step 0" + + sent0 [label="sentence"] + sent1 [label="sentence"] + + rnn1 [label="2nd level RNN" shape=box] + + sent0 -> rnn1 + sent1 -> rnn1 + } + + subgraph cluster1 { + label = "time step 1" + + sent2 [label="sentence"] + sent3 [label="sentence"] + + rnn2 [label="2nd level RNN" shape=box] + + sent2 -> rnn2 + sent3 -> rnn2 + } + + subgraph cluster2 { + label = "time step 2" + + sent4 [label="sentence"] + sent5 [label="sentence"] + + rnn3 [label="2nd level RNN" shape=box] + + sent4 -> rnn3 + sent5 -> rnn3 + } + + + para0 [label="paragraph info 0"] + para1 [label="paragraph info 1"] + para2 [label="paragraph info 2"] + + rnn1 -> para0 + rnn2 -> para1 + rnn3 -> para2 + + para0 -> rnn + para1 -> rnn + para2 -> rnn + + chapter [label="chapter info"] + rnn -> chapter +} diff --git a/doc/paddle/images/2_level_rnn.png b/doc/paddle/images/2_level_rnn.png new file mode 100644 index 0000000000000000000000000000000000000000..0537a75beb175c0c284717421f7aa908da2a5038 Binary files /dev/null and b/doc/paddle/images/2_level_rnn.png differ diff --git a/doc/paddle/images/3.png b/doc/paddle/images/3.png new file mode 100644 index 0000000000000000000000000000000000000000..481021ef306e2596818aab7fe17a570754f63635 Binary files /dev/null and b/doc/paddle/images/3.png differ diff --git a/doc/paddle/images/4.png b/doc/paddle/images/4.png new file mode 100644 index 0000000000000000000000000000000000000000..4279f41e06de459f18b9a622539511d555e9a0af Binary files /dev/null and b/doc/paddle/images/4.png differ diff --git a/doc/paddle/images/LOD-and-shape-changes-during-decoding.jpg b/doc/paddle/images/LOD-and-shape-changes-during-decoding.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8b0d90f7b9d8184b314b0ee4e521f53eb5f1b455 Binary files /dev/null and b/doc/paddle/images/LOD-and-shape-changes-during-decoding.jpg differ diff --git a/doc/paddle/images/LoDTensor.png b/doc/paddle/images/LoDTensor.png new file mode 100644 index 0000000000000000000000000000000000000000..75369f5378309e0f304b83f6bb69bdb195eac079 Binary files /dev/null and b/doc/paddle/images/LoDTensor.png differ diff --git a/doc/paddle/images/asgd.gif b/doc/paddle/images/asgd.gif new file mode 100644 index 0000000000000000000000000000000000000000..4a0da7bf6df9326a2aab1638b77c5455c18b8c4e Binary files /dev/null and b/doc/paddle/images/asgd.gif differ diff --git a/doc/paddle/images/batch_norm_fork.dot b/doc/paddle/images/batch_norm_fork.dot new file mode 100644 index 0000000000000000000000000000000000000000..4bc47713cba2cb23f1b34fffe6426ef10ac3a9df --- /dev/null +++ b/doc/paddle/images/batch_norm_fork.dot @@ -0,0 +1,25 @@ +digraph ImageBatchNormForkGragh { + subgraph cluster_before { + Prev [label="...", shape=plaintext]; + Rnn [label="rnn_op", shape=box]; + BatchNorm [label="batch_norm_op", shape=box]; + Fc [label="fc_op", shape=box]; + After [label="...", shape=plaintext]; + Prev -> Rnn -> BatchNorm -> Fc -> After; + label="original"; + } + + subgraph cluster_after { + Prev2 [label="...", shape=plaintext]; + Rnn2 [label="rnn_op", shape=box]; + BatchNorm2_1 [label="train_batch_norm_op", shape=box]; + BatchNorm2_2 [label="infer_batch_norm_op", shape=box]; + Fc2_1 [label="fc_op", shape=box]; + Fc2_2 [label="fc_op", shape=box]; + After2_1 [label="...", shape=plaintext]; + After2_2 [label="...", shape=plaintext]; + Prev2 -> Rnn2 -> BatchNorm2_1 -> Fc2_1 -> After2_1; + Rnn2 -> BatchNorm2_2 ->Fc2_2 ->After2_2 + label="forked"; + } +} diff --git a/doc/paddle/images/batch_norm_fork.png b/doc/paddle/images/batch_norm_fork.png new file mode 100644 index 0000000000000000000000000000000000000000..aded62bce5bc268b7a3ef4dc96c89fe21d6ea955 Binary files /dev/null and b/doc/paddle/images/batch_norm_fork.png differ diff --git a/doc/paddle/images/batch_norm_op_kernel.png b/doc/paddle/images/batch_norm_op_kernel.png new file mode 100644 index 0000000000000000000000000000000000000000..a99ce81ff3bf42880ebbd6a1297de3bf038e09b2 Binary files /dev/null and b/doc/paddle/images/batch_norm_op_kernel.png differ diff --git a/doc/paddle/images/beam_search.png b/doc/paddle/images/beam_search.png new file mode 100644 index 0000000000000000000000000000000000000000..7f7e35f34223162d0f7f0ed97375909c43b830ae Binary files /dev/null and b/doc/paddle/images/beam_search.png differ diff --git a/doc/paddle/images/ci_build_whl.png b/doc/paddle/images/ci_build_whl.png new file mode 100644 index 0000000000000000000000000000000000000000..232762b82a9ae3e979a1f38a7beb715c87438f40 Binary files /dev/null and b/doc/paddle/images/ci_build_whl.png differ diff --git a/doc/paddle/images/compile_run_time.png b/doc/paddle/images/compile_run_time.png new file mode 100644 index 0000000000000000000000000000000000000000..0bc9b2fd0e81b4851e6d96171ccb9a05d0f42a48 Binary files /dev/null and b/doc/paddle/images/compile_run_time.png differ diff --git a/doc/paddle/images/compiler.graffle b/doc/paddle/images/compiler.graffle new file mode 100644 index 0000000000000000000000000000000000000000..8cc678fea3c820103e7ce81f7a5d625d6c1d92de Binary files /dev/null and b/doc/paddle/images/compiler.graffle differ diff --git a/doc/paddle/images/compiler.png b/doc/paddle/images/compiler.png new file mode 100644 index 0000000000000000000000000000000000000000..65d34f841afce9756def07dd8ecb9ca44e658bfe Binary files /dev/null and b/doc/paddle/images/compiler.png differ diff --git a/doc/paddle/images/control_flow_graph.png b/doc/paddle/images/control_flow_graph.png new file mode 100644 index 0000000000000000000000000000000000000000..3579998e58d07abc50bd3332128d4733a391cb3b Binary files /dev/null and b/doc/paddle/images/control_flow_graph.png differ diff --git a/doc/paddle/images/dataflow_equations.png b/doc/paddle/images/dataflow_equations.png new file mode 100644 index 0000000000000000000000000000000000000000..c10f7f69f4007952e5b0394edaa04efa1cfbb658 Binary files /dev/null and b/doc/paddle/images/dataflow_equations.png differ diff --git a/doc/paddle/images/dcgan.png b/doc/paddle/images/dcgan.png new file mode 100644 index 0000000000000000000000000000000000000000..15e8e290a111ff43900934341365cb4360d87d28 Binary files /dev/null and b/doc/paddle/images/dcgan.png differ diff --git a/doc/paddle/images/deep_learning.png b/doc/paddle/images/deep_learning.png new file mode 100644 index 0000000000000000000000000000000000000000..026becc4d94e01e407dacb2a5314a0e5723334ff Binary files /dev/null and b/doc/paddle/images/deep_learning.png differ diff --git a/doc/paddle/images/dist-graph.graffle b/doc/paddle/images/dist-graph.graffle new file mode 100644 index 0000000000000000000000000000000000000000..941399c6ced8d5f65b6c595522b770c88259df4b Binary files /dev/null and b/doc/paddle/images/dist-graph.graffle differ diff --git a/doc/paddle/images/dist-graph.png b/doc/paddle/images/dist-graph.png new file mode 100644 index 0000000000000000000000000000000000000000..3546b09f1c2ee3e4f60f519d5e47f823f08051a7 Binary files /dev/null and b/doc/paddle/images/dist-graph.png differ diff --git a/doc/paddle/images/distributed_architecture.graffle b/doc/paddle/images/distributed_architecture.graffle new file mode 100644 index 0000000000000000000000000000000000000000..d1b60141342232e06227c2d430ebc60ec349a907 Binary files /dev/null and b/doc/paddle/images/distributed_architecture.graffle differ diff --git a/doc/paddle/images/distributed_architecture.png b/doc/paddle/images/distributed_architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..29c7b0c0783f97c6d33b1db1ed484d6a2b9dd356 Binary files /dev/null and b/doc/paddle/images/distributed_architecture.png differ diff --git a/doc/paddle/images/ds2_network.png b/doc/paddle/images/ds2_network.png new file mode 100644 index 0000000000000000000000000000000000000000..1a5b2184d47928cc2849d5a7c8ea2d8cf5337e11 Binary files /dev/null and b/doc/paddle/images/ds2_network.png differ diff --git a/doc/paddle/images/executor.png b/doc/paddle/images/executor.png new file mode 100644 index 0000000000000000000000000000000000000000..b29c0d779e3d46b779b5baeabe3176adaeb00a6d Binary files /dev/null and b/doc/paddle/images/executor.png differ diff --git a/doc/paddle/images/feed_forward.png b/doc/paddle/images/feed_forward.png new file mode 100644 index 0000000000000000000000000000000000000000..d312371a04c26aa6cd196e0bd1f51becb425180b Binary files /dev/null and b/doc/paddle/images/feed_forward.png differ diff --git a/doc/paddle/images/feed_forward_regularized.png b/doc/paddle/images/feed_forward_regularized.png new file mode 100644 index 0000000000000000000000000000000000000000..677e99bfd9f8e72ed9fe4b27127af2ced202f447 Binary files /dev/null and b/doc/paddle/images/feed_forward_regularized.png differ diff --git a/doc/paddle/images/fluid-compiler.graffle b/doc/paddle/images/fluid-compiler.graffle new file mode 100644 index 0000000000000000000000000000000000000000..c933df2cb855462c52b2d25f7f9a99b95652961d Binary files /dev/null and b/doc/paddle/images/fluid-compiler.graffle differ diff --git a/doc/paddle/images/fluid-compiler.png b/doc/paddle/images/fluid-compiler.png new file mode 100644 index 0000000000000000000000000000000000000000..1b0ffed2039c91a3a00bbb719da08c91c3acf7bb Binary files /dev/null and b/doc/paddle/images/fluid-compiler.png differ diff --git a/doc/paddle/images/fluid_examples.png b/doc/paddle/images/fluid_examples.png new file mode 100644 index 0000000000000000000000000000000000000000..aa99472c0f914cde128fd7b3bd8dc29ac24f94b6 Binary files /dev/null and b/doc/paddle/images/fluid_examples.png differ diff --git a/doc/paddle/images/fluid_module_1.png b/doc/paddle/images/fluid_module_1.png new file mode 100644 index 0000000000000000000000000000000000000000..554782ba54e43efc3d6babbb94e3cac3530ac649 Binary files /dev/null and b/doc/paddle/images/fluid_module_1.png differ diff --git a/doc/paddle/images/fluid_module_2.png b/doc/paddle/images/fluid_module_2.png new file mode 100644 index 0000000000000000000000000000000000000000..4219efccbb1e87839adf6b5720fe46808b7d2fcf Binary files /dev/null and b/doc/paddle/images/fluid_module_2.png differ diff --git a/doc/paddle/images/graph_construction_example.bash b/doc/paddle/images/graph_construction_example.bash new file mode 100755 index 0000000000000000000000000000000000000000..35e6997abd17588e17a82d448918fc1b3bd7220e --- /dev/null +++ b/doc/paddle/images/graph_construction_example.bash @@ -0,0 +1,11 @@ +cat ./graph_construction_example.dot | \ + sed 's/color=red/color=red, style=invis/g' | \ + sed 's/color=green/color=green, style=invis/g' | \ + dot -Tpng > graph_construction_example_forward_only.png + +cat ./graph_construction_example.dot | \ + sed 's/color=green/color=green, style=invis/g' | \ + dot -Tpng > graph_construction_example_forward_backward.png + +cat ./graph_construction_example.dot | \ + dot -Tpng > graph_construction_example_all.png diff --git a/doc/paddle/images/graph_construction_example.dot b/doc/paddle/images/graph_construction_example.dot new file mode 100644 index 0000000000000000000000000000000000000000..e115f9844bae6ad24f638c8ed4749cea8aff06a9 --- /dev/null +++ b/doc/paddle/images/graph_construction_example.dot @@ -0,0 +1,68 @@ +digraph ImageClassificationGraph { + ///////// The forward part ///////// + FeedX [label="Feed", color=blue, shape=box]; + FeedY [label="Feed", color=blue, shape=box]; + InitW [label="Init", color=blue, shape=diamond]; + Initb [label="Init", color=blue, shape=diamond]; + FC [label="FC", color=blue, shape=box]; + MSE [label="MSE", color=blue, shape=box]; + + x [label="x", color=blue, shape=oval]; + l [label="l", color=blue, shape=oval]; + y [label="y", color=blue, shape=oval]; + W [label="W", color=blue, shape=doublecircle]; + b [label="b", color=blue, shape=doublecircle]; + cost [label="cost", color=blue, shape=oval]; + + FeedX -> x -> FC -> y -> MSE -> cost [color=blue]; + FeedY -> l [color=blue]; + InitW -> W [color=blue]; + Initb -> b [color=blue]; + W -> FC [color=blue]; + b -> FC [color=blue]; + l -> MSE [color=blue]; + + ////////// The backward part ///////// + MSE_Grad [label="MSE_grad", color=red, shape=box]; + FC_Grad [label="FC_grad", color=red, shape=box]; + + d_cost [label="d cost", color=red, shape=oval]; + d_y [label="d y", color=red, shape=oval]; + d_b [label="d b", color=red, shape=oval]; + d_W [label="d W", color=red, shape=oval]; + + cost -> MSE_Grad [color=red]; + d_cost -> MSE_Grad [color=red]; + l -> MSE_Grad [color=red]; + y -> MSE_Grad -> d_y [color=red]; + + x -> FC_Grad [color=red]; + y -> FC_Grad [color=red]; + d_y -> FC_Grad [color=red]; + W -> FC_Grad -> d_W [color=red]; + b -> FC_Grad -> d_b [color=red]; + + ////////// The optimizaiton part ////////// + + OPT_W [label="SGD", color=green, shape=box]; + OPT_b [label="SGD", color=green, shape=box]; + + W -> OPT_W [color=green]; + b -> OPT_b [color=green]; + d_W -> OPT_W -> W [color=green]; + d_b -> OPT_b -> b [color=green]; + + ////////// Groupings ////////// + + subgraph clusterMSE { + style=invis; + MSE; + MSE_Grad; + } + + subgraph clusterFC { + style=invis; + FC; + FC_Grad; + } +} diff --git a/doc/paddle/images/graph_construction_example_all.png b/doc/paddle/images/graph_construction_example_all.png new file mode 100644 index 0000000000000000000000000000000000000000..261611a5721f9aa97874f7e6d897fe48cf667db2 Binary files /dev/null and b/doc/paddle/images/graph_construction_example_all.png differ diff --git a/doc/paddle/images/graph_construction_example_forward_backward.png b/doc/paddle/images/graph_construction_example_forward_backward.png new file mode 100644 index 0000000000000000000000000000000000000000..4c69687f4a6a181138f3df72ce5e8aa48487b5be Binary files /dev/null and b/doc/paddle/images/graph_construction_example_forward_backward.png differ diff --git a/doc/paddle/images/graph_construction_example_forward_only.png b/doc/paddle/images/graph_construction_example_forward_only.png new file mode 100644 index 0000000000000000000000000000000000000000..e668c16e0cac73acb4e5dc2b1827557ae77126b4 Binary files /dev/null and b/doc/paddle/images/graph_construction_example_forward_only.png differ diff --git a/doc/paddle/images/l1_regularization.png b/doc/paddle/images/l1_regularization.png new file mode 100644 index 0000000000000000000000000000000000000000..e1b9c7a44f94dc027598a98da93ddb8133190972 Binary files /dev/null and b/doc/paddle/images/l1_regularization.png differ diff --git a/doc/paddle/images/l2_regularization.png b/doc/paddle/images/l2_regularization.png new file mode 100644 index 0000000000000000000000000000000000000000..d5c2fcbc2ccae75ad083162e5a2dceb0210be298 Binary files /dev/null and b/doc/paddle/images/l2_regularization.png differ diff --git a/doc/paddle/images/layer.png b/doc/paddle/images/layer.png new file mode 100644 index 0000000000000000000000000000000000000000..e46db4c9c6f5b65ff274b498b716b11de343a8b0 Binary files /dev/null and b/doc/paddle/images/layer.png differ diff --git a/doc/paddle/images/learning_rate_scheduler.png b/doc/paddle/images/learning_rate_scheduler.png new file mode 100644 index 0000000000000000000000000000000000000000..75210095cdc3a25919a62d38d2ce93ca782a3123 Binary files /dev/null and b/doc/paddle/images/learning_rate_scheduler.png differ diff --git a/doc/paddle/images/local-graph.graffle b/doc/paddle/images/local-graph.graffle new file mode 100644 index 0000000000000000000000000000000000000000..19e509bd9af3c1e9a3f5e0f16ddd281457a339c5 Binary files /dev/null and b/doc/paddle/images/local-graph.graffle differ diff --git a/doc/paddle/images/local-graph.png b/doc/paddle/images/local-graph.png new file mode 100644 index 0000000000000000000000000000000000000000..ada51200f793a9bb18911e7d63cfdb3244b967d7 Binary files /dev/null and b/doc/paddle/images/local-graph.png differ diff --git a/doc/paddle/images/local_architecture.graffle b/doc/paddle/images/local_architecture.graffle new file mode 100644 index 0000000000000000000000000000000000000000..49fcc663ebe3824aa234e3a67aadf285cb417877 Binary files /dev/null and b/doc/paddle/images/local_architecture.graffle differ diff --git a/doc/paddle/images/local_architecture.png b/doc/paddle/images/local_architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..14adc9fd72b855bb9f74fbf2c84ac9ec0cf2b122 Binary files /dev/null and b/doc/paddle/images/local_architecture.png differ diff --git a/doc/paddle/images/lookup_table.png b/doc/paddle/images/lookup_table.png new file mode 100644 index 0000000000000000000000000000000000000000..72dfe3547f731d0d090338afb206b0549dff472e Binary files /dev/null and b/doc/paddle/images/lookup_table.png differ diff --git a/doc/paddle/images/lookup_table_training.png b/doc/paddle/images/lookup_table_training.png new file mode 100644 index 0000000000000000000000000000000000000000..cc7cc4aeb3b885850fe2f70f19fb84d5873bed1e Binary files /dev/null and b/doc/paddle/images/lookup_table_training.png differ diff --git a/doc/paddle/images/loss_equation.png b/doc/paddle/images/loss_equation.png new file mode 100644 index 0000000000000000000000000000000000000000..14212ec8d36c803de96bde8a9a4b5591bd20434e Binary files /dev/null and b/doc/paddle/images/loss_equation.png differ diff --git a/doc/paddle/images/multi-threads.graffle b/doc/paddle/images/multi-threads.graffle new file mode 100644 index 0000000000000000000000000000000000000000..e71173715fff92a0a933d0c7d83599ba948552c6 Binary files /dev/null and b/doc/paddle/images/multi-threads.graffle differ diff --git a/doc/paddle/images/multi-threads@3x.png b/doc/paddle/images/multi-threads@3x.png new file mode 100644 index 0000000000000000000000000000000000000000..e40a869987dbbf5019d4cb03c1dab55b74d6c9f9 Binary files /dev/null and b/doc/paddle/images/multi-threads@3x.png differ diff --git a/doc/paddle/images/multigpu_allreduce.graffle b/doc/paddle/images/multigpu_allreduce.graffle new file mode 100644 index 0000000000000000000000000000000000000000..cb5bc420ceafe8ba4c87694d44ee4e5e4ad06779 Binary files /dev/null and b/doc/paddle/images/multigpu_allreduce.graffle differ diff --git a/doc/paddle/images/multigpu_allreduce.png b/doc/paddle/images/multigpu_allreduce.png new file mode 100644 index 0000000000000000000000000000000000000000..87a1b3e8f6dd4a713ec9df9f0037d1da04e9178a Binary files /dev/null and b/doc/paddle/images/multigpu_allreduce.png differ diff --git a/doc/paddle/images/multigpu_before_convert.graffle b/doc/paddle/images/multigpu_before_convert.graffle new file mode 100644 index 0000000000000000000000000000000000000000..6c35ab1b21fb76ceae82d3693ed0d085b5bc0855 Binary files /dev/null and b/doc/paddle/images/multigpu_before_convert.graffle differ diff --git a/doc/paddle/images/multigpu_before_convert.png b/doc/paddle/images/multigpu_before_convert.png new file mode 100644 index 0000000000000000000000000000000000000000..9c8f7711165d80a2fa3911280fdee91855a401b1 Binary files /dev/null and b/doc/paddle/images/multigpu_before_convert.png differ diff --git a/doc/paddle/images/multiple_reader.png b/doc/paddle/images/multiple_reader.png new file mode 100644 index 0000000000000000000000000000000000000000..b22126b31db4982c13fc3a0827805e6aaf955046 Binary files /dev/null and b/doc/paddle/images/multiple_reader.png differ diff --git a/doc/paddle/images/op.dot b/doc/paddle/images/op.dot new file mode 100644 index 0000000000000000000000000000000000000000..c8ad839cb88788e9b5906402257cc7bbc3ddcb54 --- /dev/null +++ b/doc/paddle/images/op.dot @@ -0,0 +1,4 @@ +digraph sample { + graph [rankdir=TD]; node [shape=record]; + op [label="{Operator| InferShape()=0\lRun()=0\l | map<string, string[]> inputs_\lmap<string, string[]> outputs_ \l AttributeMap attrs_\l}"]; +} \ No newline at end of file diff --git a/doc/paddle/images/op_op_with_kern_class_diagram.dot b/doc/paddle/images/op_op_with_kern_class_diagram.dot new file mode 100644 index 0000000000000000000000000000000000000000..8f24e9ea83acf879c7008f2d97113c0a4cc111c3 --- /dev/null +++ b/doc/paddle/images/op_op_with_kern_class_diagram.dot @@ -0,0 +1,38 @@ +digraph sample { + graph [rankdir=TD]; node [shape=record]; + op [label="{Operator| InferShape()=0\lRun()=0\l | map<string, string[]> inputs_\lmap<string, string[]> outputs_ \l AttributeMap attrs_\l}"]; + op_with_kern [label="{OpWithKernel | InferShape()=0\lRun()\l | map<OpKernelKey,OpKernel>kernels_ }"] + op_kernel [label="{OpKernel | Compute()=0}"] + op_kernel_key [label="{OpKernelKey| Place place\n...}"] + + op -> op_with_kern [dir=back, arrowtail=onormal] + op_with_kern -> op_kernel [arrowhead=vee, label="contains many"] + + { + rank=same; + op_with_kern + op_kernel + } + + op_kernel -> op_kernel_key [style=invis] + + { + rank=same; + op_kernel + op_kernel_key + } + + op_with_kern -> op_kernel_key [arrowhead=vee, label ="\nas map key"] + + mul_op [label="MulOp"] + op_with_kern -> mul_op [dir=back, arrowtail=onormal] + mul_kernel [label="template <typename Place>\lclass MulOpKernel\l"] + op_kernel -> mul_kernel [dir=back, arrowtail=onormal] + mul_op -> mul_kernel [arrowhead=vee, label="register many"] + + { + rank=same; + mul_op; + mul_kernel; + } +} \ No newline at end of file diff --git a/doc/paddle/images/op_with_kernel.dot b/doc/paddle/images/op_with_kernel.dot new file mode 100644 index 0000000000000000000000000000000000000000..4f5af4f7b5f5a69693a058c99eb658900136077a --- /dev/null +++ b/doc/paddle/images/op_with_kernel.dot @@ -0,0 +1,26 @@ +digraph sample { + graph [rankdir=TD]; node [shape=record]; + op [label="{Operator}"]; + op_with_kern [label="{OpWithKernel | InferShape()=0\lRun()\l | map<OpKernelKey,OpKernel>kernels_ }"] + op_kernel [label="{OpKernel | Compute()=0}"] + op_kernel_key [label="{OpKernelKey| Place place\n...}"] + + op -> op_with_kern [dir=back, arrowtail=onormal] + op_with_kern -> op_kernel [arrowhead=vee, label="contains many"] + + { + rank=same; + op_with_kern + op_kernel + } + + op_kernel -> op_kernel_key [style=invis] + + { + rank=same; + op_kernel + op_kernel_key + } + + op_with_kern -> op_kernel_key [arrowhead=vee, label ="\nas map key"] +} \ No newline at end of file diff --git a/doc/paddle/images/operator1.png b/doc/paddle/images/operator1.png new file mode 100644 index 0000000000000000000000000000000000000000..3975b06f615b7a88dfc11e71b6451fdf4ce42d60 Binary files /dev/null and b/doc/paddle/images/operator1.png differ diff --git a/doc/paddle/images/operator2.png b/doc/paddle/images/operator2.png new file mode 100644 index 0000000000000000000000000000000000000000..b7bb1fae2050d3a70797517bc20dbbdef3dfcb7c Binary files /dev/null and b/doc/paddle/images/operator2.png differ diff --git a/doc/paddle/images/paddle-compile.graffle b/doc/paddle/images/paddle-compile.graffle new file mode 100644 index 0000000000000000000000000000000000000000..a6348cc3dbcaca923c6e794681b2edb85cb9f8f6 Binary files /dev/null and b/doc/paddle/images/paddle-compile.graffle differ diff --git a/doc/paddle/images/paddle-compile.png b/doc/paddle/images/paddle-compile.png new file mode 100644 index 0000000000000000000000000000000000000000..e0f13d551ac41afaec627a57dea79356464bf0bf Binary files /dev/null and b/doc/paddle/images/paddle-compile.png differ diff --git a/doc/paddle/images/place.png b/doc/paddle/images/place.png new file mode 100644 index 0000000000000000000000000000000000000000..14e77511d639af155e5a3725cde05323e0cc94f2 Binary files /dev/null and b/doc/paddle/images/place.png differ diff --git a/doc/paddle/images/pprof_1.png b/doc/paddle/images/pprof_1.png new file mode 100644 index 0000000000000000000000000000000000000000..8e9edbf377672d0ef40f2fc7bd39e746923550cb Binary files /dev/null and b/doc/paddle/images/pprof_1.png differ diff --git a/doc/paddle/images/pprof_2.png b/doc/paddle/images/pprof_2.png new file mode 100644 index 0000000000000000000000000000000000000000..172ba20399ba974d27f4c072425277b69b02520b Binary files /dev/null and b/doc/paddle/images/pprof_2.png differ diff --git a/doc/paddle/images/print_fluid_program.png b/doc/paddle/images/print_fluid_program.png new file mode 100644 index 0000000000000000000000000000000000000000..e8e459e1b3d5c8706b3caa05dc371db8d46df4a5 Binary files /dev/null and b/doc/paddle/images/print_fluid_program.png differ diff --git a/doc/paddle/images/profiler.png b/doc/paddle/images/profiler.png new file mode 100644 index 0000000000000000000000000000000000000000..d57b71ca88aaba5d05584a6219d84214e285a1e1 Binary files /dev/null and b/doc/paddle/images/profiler.png differ diff --git a/doc/paddle/images/program_desc1.png b/doc/paddle/images/program_desc1.png new file mode 100644 index 0000000000000000000000000000000000000000..0656336914ece957f2e5bb4d70ad337a63e31d88 Binary files /dev/null and b/doc/paddle/images/program_desc1.png differ diff --git a/doc/paddle/images/program_desc2.png b/doc/paddle/images/program_desc2.png new file mode 100644 index 0000000000000000000000000000000000000000..db5bfa1231345add8661b4f8ef0fc9d861f40d24 Binary files /dev/null and b/doc/paddle/images/program_desc2.png differ diff --git a/doc/paddle/images/raw_input.png b/doc/paddle/images/raw_input.png new file mode 100644 index 0000000000000000000000000000000000000000..0725f92d2b169c2b59ec7c68b402859c2a2dd1d8 Binary files /dev/null and b/doc/paddle/images/raw_input.png differ diff --git a/doc/paddle/images/readers.png b/doc/paddle/images/readers.png new file mode 100644 index 0000000000000000000000000000000000000000..fd59168ce16c9e2a0ef45303c28c997cfd7740be Binary files /dev/null and b/doc/paddle/images/readers.png differ diff --git a/doc/paddle/images/remote_executor.graffle b/doc/paddle/images/remote_executor.graffle new file mode 100644 index 0000000000000000000000000000000000000000..41b2067311694b56d211a4f32d1b76884eeffd2d Binary files /dev/null and b/doc/paddle/images/remote_executor.graffle differ diff --git a/doc/paddle/images/remote_executor.png b/doc/paddle/images/remote_executor.png new file mode 100644 index 0000000000000000000000000000000000000000..744e2fb2e0f1bbe058e991ba7b2a09000965ee79 Binary files /dev/null and b/doc/paddle/images/remote_executor.png differ diff --git a/doc/paddle/images/rnn.dot b/doc/paddle/images/rnn.dot new file mode 100644 index 0000000000000000000000000000000000000000..c1141cd9c981bb3cbf50d8bf7a6ed210280d79a5 --- /dev/null +++ b/doc/paddle/images/rnn.dot @@ -0,0 +1,87 @@ +digraph G { + label = "simple RNN implementation" + + ranksep=2; + + //graph [nodesep=1, ranksep=1]; + + node[nodesep=1] + + subgraph cluster0 { + label = "global scope" + rankdir = TB + W + boot_memory + input + output + } + + subgraph cluster1 { + label = "step-scope 0" + rankdir = TB + memory0[label="memory"] + prememory0[label="pre-memory"] + step_input0[label="step input"] + step_output0[label="step output"] + } + + subgraph cluster2 { + label = "step-scope 1" + rankdir = TB + memory1[label="memory"] + prememory1[label="pre-memory"] + step_input1[label="step input"] + step_output1[label="step output"] + } + + subgraph cluster3 { + label = "step-scope 2" + rankdir = TB + memory2[label="memory"] + prememory2[label="pre-memory"] + step_input2[label="step input"] + step_output2[label="step output"] + } + + stepnet [shape=box] + stepnet0 [shape=box, style=dashed] + stepnet1 [shape=box, style=dashed] + stepnet2 [shape=box, style=dashed] + + + edge[color=blue] + boot_memory -> prememory0 [label="init" color="blue"] + memory0 -> prememory1 [label="copy/reference" color="blue"] + memory1 -> prememory2 [label="copy/reference" color="blue"] + + edge[color=black] + W -> stepnet0[constraint=false, style=dashed] + W -> stepnet1[constraint=false, style=dashed] + W -> stepnet2[constraint=false, style=dashed] + + memory0 -> stepnet0[style=dashed] + prememory0 -> stepnet0 -> step_output0[style=dashed] + + memory1 -> stepnet1[style=dashed] + prememory1 -> stepnet1 -> step_output1[style=dashed] + + memory2 -> stepnet2[style=dashed] + prememory2 -> stepnet2 -> step_output2[style=dashed] + + input -> step_input0 + input -> step_input1 + input -> step_input2 + + step_input0 -> stepnet0 [style=dashed] + step_input1 -> stepnet1[style=dashed] + step_input2 -> stepnet2[style=dashed] + + step_output0 -> output + step_output1 -> output + step_output2 -> output + + stepnet0 -> stepnet[style=dashed] + stepnet1 -> stepnet[style=dashed] + stepnet2 -> stepnet[style=dashed] + +} diff --git a/doc/paddle/images/rnn.jpg b/doc/paddle/images/rnn.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9867e404cf959df0dce6ded5222b466c788fb840 Binary files /dev/null and b/doc/paddle/images/rnn.jpg differ diff --git a/doc/paddle/images/rnn.png b/doc/paddle/images/rnn.png new file mode 100644 index 0000000000000000000000000000000000000000..e139e373fe8396782044cfd936fdde624f8c66fe Binary files /dev/null and b/doc/paddle/images/rnn.png differ diff --git a/doc/paddle/images/rnn_2level_data.dot b/doc/paddle/images/rnn_2level_data.dot new file mode 100644 index 0000000000000000000000000000000000000000..1d85ae2617a915ad0ad8288d848b607cc37ad297 --- /dev/null +++ b/doc/paddle/images/rnn_2level_data.dot @@ -0,0 +1,75 @@ +digraph G { + chapter [label="chapter"] + + subgraph cluster0 { + label = "paragraph 0" + + top_rnn0[label="top rnn step 0" shape=box] + + p0 [label="paragraph 0"] + p1 [label="paragraph 1"] + } + + subgraph cluster1{ + label = "paragraph 1" + + top_rnn1[label="top rnn step 1" shape=box] + + p2 [label="paragraph 0"] + p3 [label="paragraph 1"] + } + + subgraph cluster_p0 { + label = "sentence 0" + + low_rnn0 [label="low rnn step 0" shape=box] + s00 [label="sentence 0"] + s01 [label="sentence 1"] + + low_rnn0 -> s00 + low_rnn0 -> s01 + } + + subgraph cluster_p1 { + label = "sentence 1" + low_rnn1 [label="low rnn step 1" shape=box] + s10 [label="sentence 0"] + s11 [label="sentence 1"] + low_rnn1 -> s10 + low_rnn1 -> s11 + } + + subgraph cluster_p2 { + label = "sentence 1" + low_rnn2 [label="low rnn step 0" shape=box] + s20 [label="sentence 0"] + s21 [label="sentence 1"] + low_rnn2 -> s20 + low_rnn2 -> s21 + } + + subgraph cluster_p3 { + label = "sentence 1" + low_rnn3 [label="low rnn step 1" shape=box] + s30 [label="sentence 0"] + s31 [label="sentence 1"] + low_rnn3 -> s30 + low_rnn3 -> s31 + } + + + chapter -> top_rnn0 + chapter -> top_rnn1 + + top_rnn0 -> p0 + top_rnn0 -> p1 + top_rnn1 -> p2 + top_rnn1 -> p3 + + + p0 -> low_rnn0 + p1 -> low_rnn1 + p2 -> low_rnn2 + p3 -> low_rnn3 + +} diff --git a/doc/paddle/images/rnn_2level_data.png b/doc/paddle/images/rnn_2level_data.png new file mode 100644 index 0000000000000000000000000000000000000000..4be81b2430717a6a506342a09fc26899568574c6 Binary files /dev/null and b/doc/paddle/images/rnn_2level_data.png differ diff --git a/doc/paddle/images/scope_variable_tensor.png b/doc/paddle/images/scope_variable_tensor.png new file mode 100644 index 0000000000000000000000000000000000000000..59b0de6fb36f9f6b469227c05760a7612bb30b4d Binary files /dev/null and b/doc/paddle/images/scope_variable_tensor.png differ diff --git a/doc/paddle/images/single-thread@3x.png b/doc/paddle/images/single-thread@3x.png new file mode 100644 index 0000000000000000000000000000000000000000..4083aebfdd45af5fbac25fa2c4176bc08c3cb44a Binary files /dev/null and b/doc/paddle/images/single-thread@3x.png differ diff --git a/doc/paddle/images/sorted_input.png b/doc/paddle/images/sorted_input.png new file mode 100644 index 0000000000000000000000000000000000000000..ff601128368ee179e3fd33e5e295a9ddd3dcbaeb Binary files /dev/null and b/doc/paddle/images/sorted_input.png differ diff --git a/doc/paddle/images/sparse_update.graffle b/doc/paddle/images/sparse_update.graffle new file mode 100644 index 0000000000000000000000000000000000000000..08d689a58f83698d8c1158ee3990ed8abf3a7a9a Binary files /dev/null and b/doc/paddle/images/sparse_update.graffle differ diff --git a/doc/paddle/images/sparse_update.png b/doc/paddle/images/sparse_update.png new file mode 100644 index 0000000000000000000000000000000000000000..8c872e6ac479f7d1b818a4a207956c43155d0ad7 Binary files /dev/null and b/doc/paddle/images/sparse_update.png differ diff --git a/doc/paddle/images/test.dot b/doc/paddle/images/test.dot new file mode 100644 index 0000000000000000000000000000000000000000..62c69b8fc8010a26a54a6ee8ef1488aad94d747a --- /dev/null +++ b/doc/paddle/images/test.dot @@ -0,0 +1,35 @@ + +digraph Test { + z -> generator -> G_img; + G_img -> discriminator -> D_f -> d_loss_f; + label0 -> d_loss_f -> d_loss; + + img -> discriminator -> D_t -> d_loss_t; + label1 -> d_loss_t -> d_loss; + + d_loss -> d_loss_t[color=red, style=dashed]; + d_loss -> d_loss_f[color=red, style=dashed]; + d_loss_t -> D_t[color=red, style=dashed]; + d_loss_f -> D_f[color=red, style=dashed]; + D_t -> discriminator[color=red, style=dashed]; + D_f -> discriminator[color=red, style=dashed]; + + D_f -> g_loss; + label2 -> g_loss; + + g_loss -> D_f[color=green, style=dashed]; + D_f -> discriminator[color=green, style=dashed]; + discriminator -> G_img[color=green, style=dashed]; + G_img -> generator[color=green, style=dashed]; + + discriminator [color=red, shape=box]; + generator [color=green, shape=box]; + z [shape=diamond]; + img [shape=diamond]; + label0 [shape=diamond]; + label1 [shape=diamond]; + label2 [shape=diamond]; + + d_loss [color=red]; + g_loss [color=green]; +} diff --git a/doc/paddle/images/test.dot.png b/doc/paddle/images/test.dot.png new file mode 100644 index 0000000000000000000000000000000000000000..4e121a40b9f7b2232d7cdda315bad15926446f55 Binary files /dev/null and b/doc/paddle/images/test.dot.png differ diff --git a/doc/paddle/images/theta_star.gif b/doc/paddle/images/theta_star.gif new file mode 100644 index 0000000000000000000000000000000000000000..dd24d33e124396be3fc410c9b12f33148f64efe2 Binary files /dev/null and b/doc/paddle/images/theta_star.gif differ diff --git a/doc/paddle/images/timeline.jpeg b/doc/paddle/images/timeline.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..38ec3f80c982857531f30a8bb0fa26ea5bf05385 Binary files /dev/null and b/doc/paddle/images/timeline.jpeg differ diff --git a/doc/paddle/images/tracing.jpeg b/doc/paddle/images/tracing.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..3a49fc4f8a401a9463b0157e2f38c164ca02dcc5 Binary files /dev/null and b/doc/paddle/images/tracing.jpeg differ diff --git a/doc/paddle/images/transpiler.png b/doc/paddle/images/transpiler.png new file mode 100644 index 0000000000000000000000000000000000000000..422973c0dc7aa2b544d2fc86a97ace706388cb9e Binary files /dev/null and b/doc/paddle/images/transpiler.png differ diff --git a/doc/paddle/images/user_interface.png b/doc/paddle/images/user_interface.png new file mode 100644 index 0000000000000000000000000000000000000000..ffc94e3d8945ec6291460afd90e8fcc600828390 Binary files /dev/null and b/doc/paddle/images/user_interface.png differ diff --git a/doc/paddle/index_cn.rst b/doc/paddle/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..de09e58d9c1124033d7375d939d8f126acb1c18e --- /dev/null +++ b/doc/paddle/index_cn.rst @@ -0,0 +1,20 @@ +.. PaddlePaddle Fluid documentation master file, created by + sphinx-quickstart on Thu Jun 7 17:04:53 2018. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +############## +欢迎使用 Fluid +############## + +.. toctree:: + :maxdepth: 1 + + + install/index_cn.rst + guides/index_cn.rst + tutorial/index_cn.rst + api/index_cn.rst + faq/index_cn.rst + release_note_cn.md + diff --git a/doc/paddle/index_en.rst b/doc/paddle/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..23d44e5db43745efca0e66d94cc8caee7e5c5d24 --- /dev/null +++ b/doc/paddle/index_en.rst @@ -0,0 +1,10 @@ + PaddlePaddle Fluid +========================== + +.. toctree:: + :maxdepth: 1 + + install/index_en.rst + guides/index_en.rst + api/index_en.rst + release_note_en.md diff --git a/doc/paddle/install/FAQ.md b/doc/paddle/install/FAQ.md new file mode 100644 index 0000000000000000000000000000000000000000..4d079b67e17511762d8578353e3741f7b857bb24 --- /dev/null +++ b/doc/paddle/install/FAQ.md @@ -0,0 +1,110 @@ + +# **FAQ** + +- 报错“nccl.h找不到” + + > 请[安装nccl2](https://developer.nvidia.com/nccl/nccl-download) + +- 报错`Cannot uninstall 'six'.` + + > 此问题可能与系统中已有Python有关,请使用`pip install paddlepaddle --ignore-installed six`(CPU)或`pip install paddlepaddle --ignore-installed six`(GPU)解决 + +- CentOS6下如何编译python2.7为共享库? + + > 使用以下指令: + + ./configure --prefix=/usr/local/python2.7 --enable-shared + make && make install + + + +- Ubuntu18.04下libidn11找不到? + + > 使用以下指令: + + apt install libidn11 + +- Ubuntu编译时出现大量的代码段不能识别? + + > 这可能是由于cmake版本不匹配造成的,请在gcc的安装目录下使用以下指令: + + apt install gcc-4.8 g++-4.8 + cp gcc gcc.bak + cp g++ g++.bak + rm gcc + rm g++ + ln -s gcc-4.8 gcc + ln -s g++-4.8 g++ + +- 遇到paddlepaddle.whl is not a supported wheel on this platform? + + > 出现这个问题的主要原因是,没有找到和当前系统匹配的paddlepaddle安装包。 请检查Python版本是否为2.7系列。另外最新的pip官方源中的安装包默认是manylinux1标准, 需要使用最新的pip (>9.0.0) 才可以安装。您可以执行以下指令更新您的pip: + + pip install --upgrade pip + 或者 + + python -c "import pip; print(pip.pep425tags.get_supported())" + + > 如果系统支持的是 linux_x86_64 而安装包是 manylinux1_x86_64 ,需要升级pip版本到最新; 如果系统支持 manylinux1_x86_64 而安装包 (本地)是 linux_x86_64, 可以重命名这个whl包为 manylinux1_x86_64 再安装。 + +- 使用Docker编译出现问题? + + > 请参照GitHub上[Issue12079](https://github.com/PaddlePaddle/Paddle/issues/12079) + +- 可以用 IDE 吗? + + > 当然可以,因为源码就在本机上。IDE 默认调用 make 之类的程序来编译源码,我们只需要配置 IDE 来调用 Docker 命令编译源码即可。 + 很多 PaddlePaddle 开发者使用 Emacs。他们在自己的 `~/.emacs` 配置文件里加两行 + `global-set-key "\C-cc" 'compile` + `setq compile-command "docker run --rm -it -v $(git rev-parse --show-toplevel):/paddle paddle:dev"` + 就可以按 `Ctrl-C` 和 `c` 键来启动编译了。 + +- 可以并行编译吗? + + > 是的。我们的 Docker image 运行一个 [Bash 脚本](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/scripts/paddle_build.sh)。这个脚本调用`make -j$(nproc)` 来启动和 CPU 核一样多的进程来并行编译。 + +- 在 Windows/MacOS 上编译很慢? + + > Docker 在 Windows 和 MacOS 都可以运行。不过实际上是运行在一个 Linux 虚拟机上。可能需要注意给这个虚拟机多分配一些 CPU 和内存,以保证编译高效。具体做法请参考[issue627](https://github.com/PaddlePaddle/Paddle/issues/627)。 + +- 磁盘不够? + + > 本文中的例子里,`docker run` 命令里都用了 `--rm` 参数,这样保证运行结束之后的 containers 不会保留在磁盘上。可以用 `docker ps -a` 命令看到停止后但是没有删除的 containers。`docker build` 命令有时候会产生一些中间结果,是没有名字的 images,也会占用磁盘。可以参考 [这篇文章](https://zaiste.net/posts/removing_docker_containers) 来清理这些内容。 + +- 在DockerToolbox下使用book时`http://localhost:8888/`无法打开? + + > 需要将localhost替换成虚拟机ip,一般需要在浏览器中输入:`http://192.168.99.100:8888/` + +- pip install gpu版本的PaddlePaddle后运行出现SegmentFault如下: + + @ 0x7f6c8d214436 paddle::platform::EnforceNotMet::EnforceNotMet() + + @ 0x7f6c8dfed666 paddle::platform::GetCUDADeviceCount() + + @ 0x7f6c8d2b93b6 paddle::framework::InitDevices() + + + > 出现这个问题原因主要是由于您的显卡驱动低于对应CUDA版本的要求,请保证您的显卡驱动支持所使用的CUDA版本 + + + + +- MacOS下安装PaddlePaddle后import paddle.fluid出现`Fatal Python error: PyThreadState_Get: no current thread running`错误 + + - For Python2.7.x (install by brew): 请使用`export LD_LIBRARY_PATH=/usr/local/Cellar/python@2/2.7.15_1/Frameworks/Python.framework/Versions/2.7 && export DYLD_LIBRARY_PATH=/usr/local/Cellar/python@2/2.7.15_1/Frameworks/Python.framework/Versions/2.7` + - For Python2.7.x (install by Python.org): 请使用`export LD_LIBRARY_PATH=/Library/Frameworks/Python.framework/Versions/2.7 && export DYLD_LIBRARY_PATH=/Library/Frameworks/Python.framework/Versions/2.7` + - For Python3.5.x (install by Python.org): 请使用`export LD_LIBRARY_PATH=/Library/Frameworks/Python.framework/Versions/3.5/ && export DYLD_LIBRARY_PATH=/Library/Frameworks/Python.framework/Versions/3.5/` + + + +- MACOS下使用自定义的openblas 详见issue: + + > [ISSUE 13217](https://github.com/PaddlePaddle/Paddle/issues/13721) + +- 已经安装swig但是仍旧出现swig找不到的问题 详见issue: + + > [ISSUE 13759](https://github.com/PaddlePaddle/Paddle/issues/13759) + +- 出现 “target pattern contain no '%'.”的问题 详见issue: + + > [ISSUE 13806](https://github.com/PaddlePaddle/Paddle/issues/13806) diff --git a/doc/paddle/install/FAQ_en.md b/doc/paddle/install/FAQ_en.md new file mode 100644 index 0000000000000000000000000000000000000000..7e0da11d2a542ec2356d8d0d22abf968213ca306 --- /dev/null +++ b/doc/paddle/install/FAQ_en.md @@ -0,0 +1,130 @@ +*** + + +# **FAQ** + +- How to compile python2.7 as a shared library under CentOS6? + + > Use the following instructions: + + + + ./configure --prefix=/usr/local/python2.7 --enable-shared + make && make install + + +- Ubuntu18.04 under libidn11 can not be found? + + > Use the following instructions: + + apt install libidn11 + +- When Ubuntu compiles, a lot of code segments are not recognized? + + > This may be caused by a mismatch in the cmake version. Please use the following command in the gcc installation directory: + + apt install gcc-4.8 g++-4.8 + cp gcc gcc.bak + cp g++ g++.bak + rm gcc + rm g++ + ln -s gcc-4.8 gcc + ln -s g++-4.8 g++ + + + + +- Encountered paddlepaddle*.whl is not a supported wheel on this platform? + + > The main reason for this problem is that there is no paddlepaddle installation package that matches the current system. Please check if the Python version is 2.7 series. In addition, the latest pip official source installation package defaults to the manylinux1 standard, you need to use the latest pip (>9.0.0) to install. You can update your pip by following these instructions: + + pip install --upgrade pip + or + + python -c "import pip; print(pip.pep425tags.get_supported())" + + > If the system supports linux_x86_64 and the installation package is manylinux1_x86_64, you need to upgrade the pip version to the latest; if the system supports manylinux1_x86_64 and the installation package (local) is linux_x86_64, you can rename this whl package to manylinux1_x86_64 and install it again. + +- Is there a problem with Docker compilation? + + > Please refer to [Issue12079](https://github.com/PaddlePaddle/Paddle/issues/12079) on GitHub. + +- What is Docker? + + > If you haven't heard of Docker, you can think of it as a virtualenv-like system, but it virtualises more than the Python runtime environment. + +- Is Docker still a virtual machine? + + > Someone uses a virtual machine to analogize to Docker. It should be emphasized that Docker does not virtualize any hardware. The compiler tools running in the Docker container are actually run directly on the native CPU and operating system. The performance is the same as installing the compiler on the machine. + +- Why use Docker? + + > Installing the tools and configurations in a Docker image standardizes the build environment. This way, if you encounter problems, others can reproduce the problem to help. In addition, for developers accustomed to using Windows and MacOS, there is no need to configure a cross-compilation environment using Docker. + +- Can I choose not to use Docker? + + > Of course you can. You can install development tools to the machine in the same way that you install them into Docker image. This document describes the Docker-based development process because it is easier than the other methods. + +- How hard is it to learn Docker? + + > It's not difficult to understand Docker. It takes about ten minutes to read this [article](https://zhuanlan.zhihu.com/p/19902938). + This can save you an hour of installing and configuring various development tools, as well as the need for new installations when switching machines. Don't forget that PaddlePaddle updates may lead to the need for new development tools. Not to mention the benefits of simplifying the recurrence of problems. + +- Can I use an IDE? + + > Of course, because the source code is on the machine. By default, the IDE calls a program like make to compile the source code. We only need to configure the IDE to call the Docker command to compile the source code. + Many PaddlePaddle developers use Emacs. They add two lines to their `~/.emacs` configuration file. + `global-set-key "\C-cc" 'compile` + `setq compile-command "docker run --rm -it -v $(git rev-parse --show- Toplevel): /paddle paddle:dev"` + You can start the compilation by pressing `Ctrl-C` and` c`. + +- Can I compile in parallel? + + > Yes. Our Docker image runs a [Bash script](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/scripts/paddle_build.sh). This script calls `make -j$(nproc)` to start as many processes as the CPU cores to compile in parallel. + +- Docker needs sudo? + + > If you develop with your own computer, you will naturally have admin privileges (sudo). If you are developing from a public computer, you need to ask the administrator to install and configure Docker. In addition, the PaddlePaddle project is working hard to support other container technologies that don't require sudo, such as rkt. + +- Is compiling slow on Windows/MacOS? + + > Docker runs on both Windows and MacOS. However, it is actually running on a Linux virtual machine. It may be necessary to pay attention to allocate more CPU and memory to this virtual machine to ensure efficient compilation. Please refer to [issue627](https://github.com/PaddlePaddle/Paddle/issues/627) for details. + +- Not enough disk? + + > In the example in this article, the `--rm` parameter is used in the `docker run`command to ensure that containers after the end of the run are not retained on disk. You can use the `docker ps -a` command to see containers that are stopped but not deleted. The `docker build` command sometimes produces some intermediate results, an image with no name, and it also occupies the disk. You can refer to this [article](https://zaiste.net/removing_docker_containers/) to clean up this content. + +- Can't I open `http://localhost:8888/` when using the book under DockerToolbox? + + > You need to replace localhost with virtual machine ip. Generally type this in the browser: `http://192.168.99.100:8888/` + +- After the pip install gpu version of PaddlePaddle runing, the SegmentFault appears as follows: + + @ 0x7f6c8d214436 paddle::platform::EnforceNotMet::EnforceNotMet() + + @ 0x7f6c8dfed666 paddle::platform::GetCUDADeviceCount() + + @ 0x7f6c8d2b93b6 paddle::framework::InitDevices() + + > The main reason for this problem is that your graphics card driver is lower than the corresponding CUDA version. Please ensure that your graphics card driver supports the CUDA version used. + + +- `Fatal Python error: PyThreadState_Get: no current thread running` error occurs when importing paddle.fluid after installing PaddlePaddle on MacOS. + + + - For Python2.7.x (install by brew): Please use `export LD_LIBRARY_PATH=/usr/local/Cellar/python@2/2.7.15_1/Frameworks/Python.framework/Versions/2.7 && export DYLD_LIBRARY_PATH=/usr/ Local/Cellar/python@2/2.7.15_1/Frameworks/Python.framework/Versions/2.7` + - For Python2.7.x (install by Python.org): Please use `export LD_LIBRARY_PATH=/Library/Frameworks/Python.framework/Versions/2.7 && export DYLD_LIBRARY_PATH=/Library/Frameworks/Python.framework/Versions/2.7` + - For Python3.5.x (install by Python.org): Please use `export LD_LIBRARY_PATH=/Library/Frameworks/Python.framework/Versions/3.5/ && export DYLD_LIBRARY_PATH=/Library/Frameworks/Python.framework/Versions/3.5 /` + + +- Use customized openblas under MACOS. See issue for details: + + >[ISSUE 13217](https://github.com/PaddlePaddle/Paddle/issues/13721) + +- Swig has been installed but there is still a problem that swig can't find. See issue for details: + + >[ISSUE 13759](https://github.com/PaddlePaddle/Paddle/issues/13759) + +- The question "target pattern contain no '%'." appears. See issue for details: + + >[ISSUE 13806](https://github.com/PaddlePaddle/Paddle/issues/13806) diff --git a/doc/paddle/install/Tables.md b/doc/paddle/install/Tables.md new file mode 100644 index 0000000000000000000000000000000000000000..5333d6a2473f490da295936e6da30719b117f2d1 --- /dev/null +++ b/doc/paddle/install/Tables.md @@ -0,0 +1,532 @@ + +# 附录 + +## **编译依赖表** + +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
依赖包名称 版本 说明 安装命令
CMake 3.10, 3.11, 3.15, 3.16(推荐),3.17 3.12/3.13/3.14 版本存在官方Bug,请跳过该版本
GCC (Linux Only) 4.8 / 5.4 推荐使用CentOS的devtools2
Clang (MacOS Only) 9.0及以上 通常使用MacOS 10.11及以上的系统对应的Clang版本即可
Python(64 bit) 2.7.x. 或 3.5+.x 依赖libpython2.7.so 或 libpython3.5+.so apt install python-dev yum install python-devel 如果安装python3请访问Python 官网
SWIG 最低 2.0 apt install swig yum install swig
wget any apt install wget yum install wget
openblas any 可选
pip >=9.0.1 apt install python-pip yum install python-pip
numpy >=1.12.0 pip install numpy
protobuf >=3.1.0 pip install protobuf
wheel any pip install wheel
patchELF any apt install patchelf 或参见github patchELF 官方文档
go >=1.8 可选
setuptools >= 28.0.0
unrar brew install unrar (For MacOS), apt-get install unrar (For Ubuntu)
+

+ + + +

+## **编译选项表** + +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
选项 说明 默认值
WITH_GPU 是否支持GPU ON
WITH_AVX 是否编译含有AVX指令集的PaddlePaddle二进制文件 ON
WITH_PYTHON 是否内嵌PYTHON解释器 ON
WITH_TESTING 是否开启单元测试 OFF
WITH_MKL 是否使用MKL数学库,如果为否则是用OpenBLAS ON
WITH_SYSTEM_BLAS 是否使用系统自带的BLAS OFF
WITH_DISTRIBUTE 是否编译带有分布式的版本 OFF
WITH_BRPC_RDMA 是否使用BRPC RDMA作为RPC协议 OFF
ON_INFER 是否打开预测优化 OFF
CUDA_ARCH_NAME 是否只针对当前CUDA架构编译 All:编译所有可支持的CUDA架构 可选:Auto 自动识别当前环境的架构编译
TENSORRT_ROOT 指定TensorRT路径 Windows下默认值为'/',Linux下默认值为 '/usr/'
+

+ + + + + +**BLAS** + +PaddlePaddle支持 [MKL](https://software.intel.com/en-us/mkl) 和 [OpenBlAS](http://www.openblas.net) 两种BLAS库。默认使用MKL。如果使用MKL并且机器含有AVX2指令集,还会下载MKL-DNN数学库,详细参考[这里](https://github.com/PaddlePaddle/Paddle/tree/release/0.11.0/doc/design/mkldnn#cmake) 。 + +如果关闭MKL,则会使用OpenBLAS作为BLAS库。 + +**CUDA/cuDNN** + +PaddlePaddle在编译时/运行时会自动找到系统中安装的CUDA和cuDNN库进行编译和执行。 使用参数 `-DCUDA_ARCH_NAME=Auto` 可以指定开启自动检测SM架构,加速编译。 + +PaddlePaddle可以使用cuDNN v5.1之后的任何一个版本来编译运行,但尽量请保持编译和运行使用的cuDNN是同一个版本。 我们推荐使用最新版本的cuDNN。 + +**编译选项的设置** + +PaddePaddle通过编译时指定路径来实现引用各种BLAS/CUDA/cuDNN库。cmake编译时,首先在系统路径( `/usr/lib` 和 `/usr/local/lib` )中搜索这几个库,同时也会读取相关路径变量来进行搜索。 通过使用`-D`命令可以设置,例如: + +> `cmake .. -DWITH_GPU=ON -DWITH_TESTING=OFF -DCUDNN_ROOT=/opt/cudnnv5` + +**注意**:这几个编译选项的设置,只在第一次cmake的时候有效。如果之后想要重新设置,推荐清理整个编译目录( rm -rf )后,再指定。 + + + +

+## **安装包列表** + +

+ + + + + + + + + + + + + + + + + +
版本号 版本说明
paddlepaddle==[版本号] 例如 paddlepaddle==2.0.0b0 只支持CPU对应版本的PaddlePaddle,具体版本请参见Pypi
paddlepaddle-gpu==[版本号] 例如 paddlepaddle-gpu==2.0.0b0 默认安装支持CUDA 10.0和cuDNN 7的对应[版本号]的PaddlePaddle安装包
+

+ +您可以在 [Release History](https://pypi.org/project/paddlepaddle-gpu/#history) 中找到PaddlePaddle-gpu的各个发行版本。 +> 其中`postXX` 对应的是CUDA和cuDNN的版本,`postXX`之前的数字代表Paddle的版本 + +需要注意的是,命令中 paddlepaddle-gpu==2.0.0b0 在windows环境下,会默认安装支持CUDA 10.0和cuDNN 7的对应[版本号]的PaddlePaddle安装包 + + +

+ +## **多版本whl包列表-Release** + +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
版本说明 cp27-cp27mu cp27-cp27m cp35-cp35m cp36-cp36m cp37-cp37m
cpu-mkl + paddlepaddle-2.0.0b0-cp27-cp27mu-linux_x86_64.whl + paddlepaddle-2.0.0b0-cp27-cp27m-linux_x86_64.whl + paddlepaddle-2.0.0b0-cp35-cp35m-linux_x86_64.whl + paddlepaddle-2.0.0b0-cp36-cp36m-linux_x86_64.whl + paddlepaddle-2.0.0b0-cp37-cp37m-linux_x86_64.whl
cuda10_cudnn7-mkl + paddlepaddle_gpu-2.0.0b0-cp27-cp27mu-linux_x86_64.whl + paddlepaddle_gpu-2.0.0b0-cp27-cp27m-linux_x86_64.whl + paddlepaddle_gpu-2.0.0b0-cp35-cp35m-linux_x86_64.whl + paddlepaddle_gpu-2.0.0b0-cp36-cp36m-linux_x86_64.whl + paddlepaddle_gpu-2.0.0b0-cp37-cp37m-linux_x86_64.whl
win_cpu_mkl - + paddlepaddle-2.0.0b0-cp27-cp27m-win_amd64.whl + paddlepaddle-2.0.0b0-cp35-cp35m-win_amd64.whl + paddlepaddle-2.0.0b0-cp36-cp36m-win_amd64.whl + paddlepaddle-2.0.0b0-cp37-cp37m-win_amd64.whl
win_cuda10_cudnn7_mkl - + paddlepaddle_gpu-2.0.0b0-cp27-cp27m-win_amd64.whl + paddlepaddle_gpu-2.0.0b0-cp35-cp35m-win_amd64.whl + paddlepaddle_gpu-2.0.0b0-cp36-cp36m-win_amd64.whl + paddlepaddle_gpu-2.0.0b0-cp37-cp37m-win_amd64.whl
mac_cpu - + paddlepaddle-2.0.0b0-cp27-cp27m-macosx_10_6_intel.whl + paddlepaddle-2.0.0b0-cp35-cp35m-macosx_10_6_intel.whl + paddlepaddle-2.0.0b0-cp36-cp36m-macosx_10_6_intel.whl + paddlepaddle-2.0.0b0-cp37-cp37m-macosx_10_6_intel.whl
+

+ +### 表格说明 + +- 纵轴 + +cpu-mkl: 支持CPU训练和预测,使用Intel mkl数学库 + +cuda10_cudnn7-mkl: 支持GPU训练和预测,使用Intel mkl数学库 + + +- 横轴 + +一般是类似于“cp27-cp27mu”的形式,其中: + +27:python tag,指python2.7,类似的还有“35”、“36”、“37”等 + +mu:指unicode版本python,若为m则指非unicode版本python + +- 安装包命名规则 + +每个安装包都有一个专属的名字,它们是按照Python的官方规则 来命名的,形式如下: + +{distribution}-{version}(-{build tag})?-{python tag}-{abi tag}-{platform tag}.whl + +其中build tag可以缺少,其他部分不能缺少 + +distribution: wheel名称version: 版本,例如0.14.0 (要求必须是数字格式) + +python tag: 类似'py27', 'py2', 'py3',用于标明对应的python版本 + +abi tag: 类似'cp33m', 'abi3', 'none' + +platform tag: 类似 'linux_x86_64', 'any' + + +

+## **多版本whl包列表-dev** +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
版本说明 cp27-cp27mu cp27-cp27m cp35-cp35m cp36-cp36m cp37-cp37m
cpu-mkl + paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl + paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl + paddlepaddle-latest-cp35-cp35m-linux_x86_64.whl + paddlepaddle-latest-cp36-cp36m-linux_x86_64.whl + paddlepaddle-latest-cp37-cp37m-linux_x86_64.whl
cpu-openblas + paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl + paddlepaddle-latest-cp35-cp35m-linux_x86_64.whl + paddlepaddle-latest-cp36-cp36m-linux_x86_64.whl + paddlepaddle-latest-cp37-cp37m-linux_x86_64.whl
cuda9-cudnn7-openblas paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-latest-cp35-cp35m-linux_x86_64.whl paddlepaddle_gpu-latest-cp36-cp36m-linux_x86_64.whl paddlepaddle_gpu-latest-cp37-cp37m-linux_x86_64.whl
cuda9-cudnn7-mkl paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-latest-cp35-cp35m-linux_x86_64.whl paddlepaddle_gpu-latest-cp36-cp36m-linux_x86_64.whl paddlepaddle_gpu-latest-cp37-cp37m-linux_x86_64.whl
cuda10-cudnn7-mkl paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-latest-cp35-cp35m-linux_x86_64.whl + paddlepaddle_gpu-latest-cp36-cp36m-linux_x86_64.whl + paddlepaddle_gpu-latest-cp37-cp37m-linux_x86_64.whl
+

+ + + +

+## **多版本whl包列表(gcc8.2)-develop** +

+ + + + + + + + + + + + + + + + + + + + + +
版本说明 cp27-cp27mu cp27-cp27m cp35-cp35m cp36-cp36m cp37-cp37m
cuda10.1-cudnn7-mkl + paddlepaddle_gpu-0.0.0-cp27-cp27mu-linux_x86_64.whl + paddlepaddle_gpu-0.0.0-cp27-cp27m-linux_x86_64.whl + paddlepaddle_gpu-0.0.0-cp35-cp35m-linux_x86_64.whl + paddlepaddle_gpu-0.0.0-cp36-cp36m-linux_x86_64.whl + paddlepaddle_gpu-0.0.0-cp37-cp37m-linux_x86_64.whl
+

+ + + + + +

+ +## 在Docker中执行PaddlePaddle训练程序 + + +假设您已经在当前目录(比如在/home/work)编写了一个PaddlePaddle的程序: `train.py` (可以参考 +[PaddlePaddleBook](https://github.com/PaddlePaddle/book/blob/develop/01.fit_a_line/README.cn.md) +编写),就可以使用下面的命令开始执行训练: + + cd /home/work + docker run -it -v $PWD:/work hub.baidubce.com/paddlepaddle/paddle /work/train.py + +上述命令中,`-it` 参数说明容器已交互式运行;`-v $PWD:/work` +指定将当前路径(Linux中PWD变量会展开为当前路径的绝对路径)挂载到容器内部的:`/work` +目录: `hub.baidubce.com/paddlepaddle/paddle` 指定需要使用的容器; 最后`/work/train.py`为容器内执行的命令,即运行训练程序。 + +当然,您也可以进入到Docker容器中,以交互式的方式执行或调试您的代码: + + docker run -it -v $PWD:/work hub.baidubce.com/paddlepaddle/paddle /bin/bash + cd /work + python train.py + +**注:PaddlePaddle Docker镜像为了减小体积,默认没有安装vim,您可以在容器中执行** `apt-get install -y vim` **安装后,在容器中编辑代码。** + +

+ +## 使用Docker启动PaddlePaddle Book教程 + + +使用Docker可以快速在本地启动一个包含了PaddlePaddle官方Book教程的Jupyter Notebook,可以通过网页浏览。 +PaddlePaddle Book是为用户和开发者制作的一个交互式的Jupyter Notebook。 +如果您想要更深入了解deep learning,可以参考PaddlePaddle Book。 +大家可以通过它阅读教程,或者制作和分享带有代码、公式、图表、文字的交互式文档。 + +我们提供可以直接运行PaddlePaddle Book的Docker镜像,直接运行: + +`docker run -p 8888:8888 hub.baidubce.com/paddlepaddle/book` + +国内用户可以使用下面的镜像源来加速访问: + +`docker run -p 8888:8888 hub.baidubce.com/paddlepaddle/book` + +然后在浏览器中输入以下网址: + +`http://localhost:8888/` + +就这么简单,享受您的旅程!如有其他问题请参见[FAQ](#FAQ) + +

+## 使用Docker执行GPU训练 + + +为了保证GPU驱动能够在镜像里面正常运行,我们推荐使用 +[nvidia-docker](https://github.com/NVIDIA/nvidia-docker)来运行镜像。 +请不要忘记提前在物理机上安装GPU最新驱动。 + +`nvidia-docker run -it -v $PWD:/work hub.baidubce.com/paddlepaddle/paddle:latest-gpu /bin/bash` + +**注: 如果没有安装nvidia-docker,可以尝试以下的方法,将CUDA库和Linux设备挂载到Docker容器内:** + + export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') \ + $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}')" + export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}') + docker run ${CUDA_SO} \ + ${DEVICES} -it hub.baidubce.com/paddlepaddle/paddle:latest-gpu diff --git a/doc/paddle/install/Tables_en.md b/doc/paddle/install/Tables_en.md new file mode 100644 index 0000000000000000000000000000000000000000..85487d42d391485421db07797b61fe873fe0c57b --- /dev/null +++ b/doc/paddle/install/Tables_en.md @@ -0,0 +1,532 @@ + +# Appendix + + +## Compile Dependency Table + +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Dependency package name Version Description Installation command
CMake 3.10, 3.11, 3.15, 3.16(Recommend),3.17 There is an official bug in version 3.12/3.13/3.14, please skip this version
GCC 4.8 / 5.4 recommends using devtools2 for CentOS
Clang (MacOS Only) 9.0 and above Usually use the clang version of MacOS 10.11 and above
Python(64 bit) 2.7.x. or 3.5+.x depends on libpython2.7.so or libpython3.5+.so apt install python-dev or yum install python-devel if installing python3, please go to Python official website
SWIG at least 2.0 apt install swig or yum install swig
wget any apt install wget or yum install wget
openblas any optional
pip at least 9.0.1 apt install python-pip or yum install Python-pip
numpy >=1.12.0 pip install numpy
protobuf >=3.1.0 pip install protobuf
wheel any pip install wheel
patchELF any apt install patchelf or read github patchELF official documentation
go >=1.8 optional
setuptools >= 28.0.0
unrar brew install unrar (For MacOS), apt-get install unrar (For Ubuntu)
+

+ + + +

+## **Compile Option Table** + +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +**BLAS** + +PaddlePaddle supports two BLAS libraries, [MKL](https://software.intel.com/en-us/mkl) and [OpenBlAS](http://www.openblas.net/). MKL is used by default. If you use MKL and the machine contains the AVX2 instruction set, you will also download the MKL-DNN math library, for details please refer to [here](https://github.com/PaddlePaddle/Paddle/tree/release/0.11.0/doc/design/mkldnn#cmake). + +If you close MKL, OpenBLAS will be used as the BLAS library. + +**CUDA/cuDNN** + +PaddlePaddle automatically finds the CUDA and cuDNN libraries installed in the system for compilation and execution at compile time/runtime. Use the parameter `-DCUDA_ARCH_NAME=Auto` to specify to enable automatic detection of the SM architecture and speed up compilation. + +PaddlePaddle can be compiled and run using any version after cuDNN v5.1, but try to keep the same version of cuDNN in the compiling and running processes. We recommend using the latest version of cuDNN. + +**Configure Compile Options** + +PaddePaddle implements references to various BLAS/CUDA/cuDNN libraries by specifying paths at compile time. When cmake compiles, it first searches the system paths ( `/usr/liby` and `/usr/local/lib` ) for these libraries, and also reads the relevant path variables for searching. Can be set by using the `-D` command, for example: + +> `Cmake .. -DWITH_GPU=ON -DWITH_TESTING=OFF -DCUDNN_ROOT=/opt/cudnnv5` + +**Note**: The settings introduced here for these compilation options are only valid for the first cmake. If you want to reset it later, it is recommended to clean up the entire build directory ( rm -rf ) and then specify it. + + + +

+## **Installation Package List** + + +

+

Option Description Default
WITH_GPU Whether to support GPU ON
WITH_AVX whether to compile PaddlePaddle binaries file containing the AVX instruction set ON
WITH_PYTHON Whether the PYTHON interpreter is embedded ON
WITH_TESTING Whether to turn on unit test OFF
WITH_MKL Whether to use the MKL math library, if not,using OpenBLAS ON
WITH_SYSTEM_BLAS Whether to use the system's BLAS OFF
WITH_DISTRIBUTE Whether to Compile with distributed version OFF
WITH_BRPC_RDMA Whether to use BRPC RDMA as RPC protocol OFF
ON_INFER Whether to turn on prediction optimization OFF
CUDA_ARCH_NAME Compile only for current CUDA schema or not All:Compile all supported CUDA architectures optional: Auto automatically recognizes the schema compilation of the current environment
TENSORRT_ROOT Specify TensorRT path The default value under windows is '/', The default value under windows is '/usr/'
+ + + + + + + + + + + + + + + + +
Version Number Release Discription
paddlepaddle==[version code] such as paddlepaddle==2.0.0b0 Only support the corresponding version of the CPU PaddlePaddle, please refer to Pypi for the specific version.
paddlepaddle-gpu==[version code], such as paddlepaddle-gpu==2.0.0b0 The default installation supports the PaddlePaddle installation package corresponding to [version number] of CUDA 10.0 and cuDNN 7
+

+ +You can find various distributions of PaddlePaddle-gpu in [the Release History](https://pypi.org/project/paddlepaddle-gpu/#history). +> 'postxx' corresponds to CUDA and cuDNN versions, and the number before 'postxx' represents the version of Paddle + +Please note that: in the commands, paddlepaddle-gpu==2.0.0b0 will install the installation package of PaddlePaddle that supports CUDA 10.0 and cuDNN 7 by default under Windows environment. + + + +

+ +## **Multi-version whl package list - Release** + + +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Release Instruction cp27-cp27mu cp27-cp27m cp35-cp35m cp36-cp36m cp37-cp37m
cpu-mkl + paddlepaddle-2.0.0b0-cp27-cp27mu-linux_x86_64.whl + paddlepaddle-2.0.0b0-cp27-cp27m-linux_x86_64.whl + paddlepaddle-2.0.0b0-cp35-cp35m-linux_x86_64.whl + paddlepaddle-2.0.0b0-cp36-cp36m-linux_x86_64.whl + paddlepaddle-2.0.0b0-cp37-cp37m-linux_x86_64.whl
cuda10_cudnn7-mkl + paddlepaddle_gpu-2.0.0b0-cp27-cp27mu-linux_x86_64.whl + paddlepaddle_gpu-2.0.0b0-cp27-cp27m-linux_x86_64.whl + paddlepaddle_gpu-2.0.0b0-cp35-cp35m-linux_x86_64.whl + paddlepaddle_gpu-2.0.0b0-cp36-cp36m-linux_x86_64.whl + paddlepaddle_gpu-2.0.0b0-cp37-cp37m-linux_x86_64.whl
win_cpu_mkl - + paddlepaddle-2.0.0b0-cp27-cp27m-win_amd64.whl + paddlepaddle-2.0.0b0-cp35-cp35m-win_amd64.whl + paddlepaddle-2.0.0b0-cp36-cp36m-win_amd64.whl + paddlepaddle-2.0.0b0-cp37-cp37m-win_amd64.whl
win_cuda10_cudnn7_mkl - + paddlepaddle_gpu-2.0.0b0-cp27-cp27m-win_amd64.whl + paddlepaddle_gpu-2.0.0b0-cp35-cp35m-win_amd64.whl + paddlepaddle_gpu-2.0.0b0-cp36-cp36m-win_amd64.whl + paddlepaddle_gpu-2.0.0b0-cp37-cp37m-win_amd64.whl
mac_cpu - + paddlepaddle-2.0.0b0-cp27-cp27m-macosx_10_6_intel.whl + paddlepaddle-2.0.0b0-cp35-cp35m-macosx_10_6_intel.whl + paddlepaddle-2.0.0b0-cp36-cp36m-macosx_10_6_intel.whl + paddlepaddle-2.0.0b0-cp37-cp37m-macosx_10_6_intel.whl
+

+ + +### Table instruction + +- Vertical axis + +cpu-mkl: Support CPU training and prediction, use Intel MKL math library + +cuda10_cudnn7-mkl: Support GPU training and prediction, use Intel MKL math library + + +- Transverse axis + +Generally, it is similar to "cp27-cp27mu", in which: + +27:python tag, refers to python2. Similarly, there are "35", "36", "37", etc + +mu:refers to unicode version python, if it is m, refers to non Unicode version Python + +- Installation package naming rules + +Each installation package has a unique name. They are named according to the official rules of Python. The form is as follows: + +{distribution}-{version}(-{build tag})?-{python tag}-{abi tag}-{platform tag}.whl + +The build tag can be missing, and other parts cannot be missing + +distribution: wheel name + +version: Version, for example 0.14.0 (must be in numeric format) + +python tag: similar to 'py27', 'py2', 'py3', used to indicate the corresponding Python version + +abi tag: similar to 'cp33m', 'abi3', 'none' + +platform tag: similar to 'linux_x86_64', 'any' + + + +

+## **Multi-version whl package list - dev** +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
version number cp27-cp27mu cp27-cp27m cp35-cp35m cp36-cp36m cp37-cp37m
cpu-mkl + paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl + paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl + paddlepaddle-latest-cp35-cp35m-linux_x86_64.whl + paddlepaddle-latest-cp36-cp36m-linux_x86_64.whl + paddlepaddle-latest-cp37-cp37m-linux_x86_64.whl
cpu-openblas + paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl + paddlepaddle-latest-cp35-cp35m-linux_x86_64.whl + paddlepaddle-latest-cp36-cp36m-linux_x86_64.whl + paddlepaddle-latest-cp37-cp37m-linux_x86_64.whl
cuda9-cudnn7-openblas paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-latest-cp35-cp35m-linux_x86_64.whl paddlepaddle_gpu-latest-cp36-cp36m-linux_x86_64.whl paddlepaddle_gpu-latest-cp37-cp37m-linux_x86_64.whl
cuda9-cudnn7-mkl paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-latest-cp35-cp35m-linux_x86_64.whl paddlepaddle_gpu-latest-cp36-cp36m-linux_x86_64.whl paddlepaddle_gpu-latest-cp37-cp37m-linux_x86_64.whl
cuda10-cudnn7-mkl paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl paddlepaddle_gpu-latest-cp35-cp35m-linux_x86_64.whl + paddlepaddle_gpu-latest-cp36-cp36m-linux_x86_64.whl + paddlepaddle_gpu-latest-cp37-cp37m-linux_x86_64.whl
+

+ + + +

+## **Multi-version whl package list(gcc8.2)-develop** +

+ + + + + + + + + + + + + + + + + + + + + +
版本说明 cp27-cp27mu cp27-cp27m cp35-cp35m cp36-cp36m cp37-cp37m
cuda10.1-cudnn7-mkl + paddlepaddle_gpu-0.0.0-cp27-cp27mu-linux_x86_64.whl + paddlepaddle_gpu-0.0.0-cp27-cp27m-linux_x86_64.whl + paddlepaddle_gpu-0.0.0-cp35-cp35m-linux_x86_64.whl + paddlepaddle_gpu-0.0.0-cp36-cp36m-linux_x86_64.whl + paddlepaddle_gpu-0.0.0-cp37-cp37m-linux_x86_64.whl
+

+ + + + +

+ + +## Execute the PaddlePaddle training program in Docker + + +Suppose you have written a PaddlePaddle program in the current directory (such as /home/work): `train.py` ( refer to [PaddlePaddleBook](https://github.com/PaddlePaddle/book/blob/develop/01.fit_a_line/README.cn.md) to write), you can start the training with the following command: + + + cd /home/work + docker run -it -v $PWD:/work hub.baidubce.com/paddlepaddle/paddle /work/train.py + + +In the above commands, the `-it` parameter indicates that the container has been run interactively; `-v $PWD:/work` specifies that the current path (the absolute path where the PWD variable in Linux will expand to the current path) is mounted to the `:/work` directory inside the container: `Hub.baidubce.com/paddlepaddle/paddle` specifies the container to be used; finally `/work/train.py` is the command executed inside the container, ie. the training program. + +Of course, you can also enter into the Docker container and execute or debug your code interactively: + + + docker run -it -v $PWD:/work hub.baidubce.com/paddlepaddle/paddle /bin/bash + cd /work + python train.py + + +**Note: In order to reduce the size, vim is not installed in PaddlePaddle Docker image by default. You can edit the code in the container after executing ** `apt-get install -y vim` **(which installs vim for you) in the container.** + +

+ +## Start PaddlePaddle Book tutorial with Docker + + +Use Docker to quickly launch a local Jupyter Notebook containing the PaddlePaddle official Book tutorial, which can be viewed on the web. PaddlePaddle Book is an interactive Jupyter Notebook for users and developers. If you want to learn more about deep learning, PaddlePaddle Book is definitely your best choice. You can read tutorials or create and share interactive documents with code, formulas, charts, and text. + +We provide a Docker image that can run the PaddlePaddle Book directly, running directly: + +`docker run -p 8888:8888 hub.baidubce.com/paddlepaddle/book` + +Domestic users can use the following image source to speed up access: + +`docker run -p 8888:8888 hub.baidubce.com/paddlepaddle/book` + +Then enter the following URL in your browser: + +`http://localhost:8888/` + +It's that simple and bon voyage! For further questions, please refer to the [FAQ](#FAQ). + + +

+## Perform GPU training using Docker + + +In order to ensure that the GPU driver works properly in the image, we recommend using [nvidia-docker](https://github.com/NVIDIA/nvidia-docker) to run the image. Don't forget to install the latest GPU drivers on your physical machine in advance. + +`Nvidia-docker run -it -v $PWD:/work hub.baidubce.com/paddlepaddle/paddle:latest-gpu /bin/bash` + +**Note: If you don't have nvidia-docker installed, you can try the following to mount the CUDA library and Linux devices into the Docker container:** + + + export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') \ + $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}')" + export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}') + docker run ${CUDA_SO} \ + ${DEVICES} -it hub.baidubce.com/paddlepaddle/paddle:latest-gpu diff --git a/doc/paddle/install/compile/compile_CentOS.md b/doc/paddle/install/compile/compile_CentOS.md new file mode 100644 index 0000000000000000000000000000000000000000..cdec66e07eaa455dc22f454ff0ff6a35d1ef03c8 --- /dev/null +++ b/doc/paddle/install/compile/compile_CentOS.md @@ -0,0 +1,293 @@ +# **CentOS下从源码编译** + +## 环境准备 + +* **CentOS 版本 (64 bit)** + * **CentOS 6 (不推荐,不提供编译出现问题时的官方支持)** + * **CentOS 7 (GPU 版本支持CUDA 9.0/9.1/9.2/10.0/10.1 CUDA 9.1 仅支持单卡模式)** +* **Python 版本 2.7.15+/3.5.1+/3.6/3.7 (64 bit)** +* **pip 或 pip3 版本 9.0.1+ (64 bit)** + +## 选择CPU/GPU + +* 如果您的计算机没有 NVIDIA® GPU,请安装CPU版本的PaddlePaddle + +* 如果您的计算机有NVIDIA® GPU,请确保满足以下条件以编译GPU版PaddlePaddle + + * **CUDA 工具包10.0配合cuDNN v7.3+(如需多卡支持,需配合NCCL2.3.7及更高)** + * **CUDA 工具包9.0配合cuDNN v7.3+(如需多卡支持,需配合NCCL2.3.7及更高)** + * **GPU运算能力超过1.0的硬件设备** + + 您可参考NVIDIA官方文档了解CUDA和CUDNN的安装流程和配置方法,请见[CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/),[cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/) + +* 请确保您已经正确安装nccl2,或者按照以下指令安装nccl2(这里提供的是CentOS 7,CUDA9,cuDNN7下nccl2的安装指令),更多版本的安装信息请参考NVIDIA[官方网站](https://developer.nvidia.com/nccl): + + + wget http://developer.download.nvidia.com/compute/machine-learning/repos/rhel7/x86_64/nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm + rpm -i nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm + yum update -y + yum install -y libnccl-2.3.7-2+cuda9.0 libnccl-devel-2.3.7-2+cuda9.0 libnccl-static-2.3.7-2+cuda9.0 + + +## 安装步骤 + +在CentOS的系统下有2种编译方式: + +* 使用Docker编译(GPU版本只支持CentOS 7) +* 本机编译(不提供在CentOS 6下编译中遇到问题的支持) + + +### **使用Docker编译** + +[Docker](https://docs.docker.com/install/)是一个开源的应用容器引擎。使用Docker,既可以将PaddlePaddle的安装&使用与系统环境隔离,也可以与主机共享GPU、网络等资源 + +使用Docker编译PaddlePaddle,您需要: + +- 在本地主机上[安装Docker](https://hub.docker.com/search/?type=edition&offering=community) + +- 如需在Linux开启GPU支持,请[安装nvidia-docker](https://github.com/NVIDIA/nvidia-docker) + +请您按照以下步骤安装: + +1. 请首先选择您希望储存PaddlePaddle的路径,然后在该路径下使用以下命令将PaddlePaddle的源码从github克隆到本地当前目录下名为Paddle的文件夹中: + + `git clone https://github.com/PaddlePaddle/Paddle.git` + +2. 进入Paddle目录下: `cd Paddle` + +3. 创建并进入已配置好编译环境的Docker容器: + + * 编译CPU版本的PaddlePaddle: + + + + `docker run --name paddle-test -v $PWD:/paddle --network=host -it hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash` + + > --name paddle-test为您创建的Docker容器命名为paddle-test; + + + > -v $PWD:/paddle 将当前目录挂载到Docker容器中的/paddle目录下(Linux中PWD变量会展开为当前路径的[绝对路径](https://baike.baidu.com/item/绝对路径/481185)); + + + > -it 与宿主机保持交互状态,`hub.baidubce.com/paddlepaddle/paddle:latest-dev` 使用名为`hub.baidubce.com/paddlepaddle/paddle:latest-dev`的镜像创建Docker容器,/bin/bash 进入容器后启动/bin/bash命令。 + + + + * 编译GPU版本的PaddlePaddle(仅支持CentOS 7): + + + + `nvidia-docker run --name paddle-test -v $PWD:/paddle --network=host -it hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash` + + > --name paddle-test为您创建的Docker容器命名为paddle-test; + + + > -v $PWD:/paddle 将当前目录挂载到Docker容器中的/paddle目录下(Linux中PWD变量会展开为当前路径的[绝对路径](https://baike.baidu.com/item/绝对路径/481185)); + + + > -it 与宿主机保持交互状态,`hub.baidubce.com/paddlepaddle/paddle:latest-dev` 使用名为`hub.baidubce.com/paddlepaddle/paddle:latest-dev`的镜像创建Docker容器,/bin/bash 进入容器后启动/bin/bash命令。 + + + > 注意:hub.baidubce.com/paddlepaddle/paddle:latest-dev内部安装CUDA 8.0。 + + +4. 进入Docker后进入paddle目录下: + + `cd paddle` + +5. 切换到较稳定版本下进行编译: + + `git checkout [分支名]` + + 例如: + + `git checkout release/1.5` + + 注意:python3.6、python3.7版本从release/1.2分支开始支持 + +6. 创建并进入/paddle/build路径下: + + `mkdir -p /paddle/build && cd /paddle/build` + +7. 使用以下命令安装相关依赖: + + For Python2: pip install protobuf + For Python3: pip3.5 install protobuf + + 注意:以上用Python3.5命令来举例,如您的Python版本为3.6/3.7,请将上述命令中的Python3.5改成Python3.6/Python3.7 + + > 安装protobuf。 + + `yum install patchelf` + + > 安装patchelf,PatchELF 是一个小而实用的程序,用于修改ELF可执行文件的动态链接器和RPATH。 + +8. 执行cmake: + + >具体编译选项含义请参见[编译选项表](../Tables.html#Compile) + >请注意修改参数`-DPY_VERSION`为您希望编译使用的python版本, 例如`-DPY_VERSION=3.5`表示python版本为3.5.x + + * 对于需要编译**CPU版本PaddlePaddle**的用户: + `cmake .. -DPY_VERSION=3.5 -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release` + + * 对于需要编译**GPU版本PaddlePaddle**的用户: + `cmake .. -DPY_VERSION=3.5 -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release` + + > 我们目前不支持CentOS 6下使用Docker编译GPU版本的PaddlePaddle + +9. 执行编译: + + `make -j$(nproc)` + + > 使用多核编译 + +10. 编译成功后进入`/paddle/build/python/dist`目录下找到生成的`.whl`包: `cd /paddle/build/python/dist` + +11. 在当前机器或目标机器安装编译好的`.whl`包: + + For Python2: pip install -U(whl包的名字) + For Python3: pip3.5 install -U(whl包的名字) + + 注意:以上涉及Python3的命令,用Python3.5来举例,如您的Python版本为3.6/3.7,请将上述命令中的Python3.5改成Python3.6/Python3.7 + +恭喜,至此您已完成PaddlePaddle的编译安装。您只需要进入Docker容器后运行PaddlePaddle,即可开始使用。更多Docker使用请参见[Docker官方文档](https://docs.docker.com) + +> 注:PaddlePaddle Docker镜像为了减小体积,默认没有安装`vim`,您可以在容器中执行 `yum install -y vim` 来安装 + + +### **本机编译** + +1. 检查您的计算机和操作系统是否符合我们支持的编译标准: `uname -m && cat /etc/*release` + +2. 更新`yum`的源: `yum update`, 并添加必要的yum源:`yum install -y epel-release`, 并提前安装[OpenCV](https://opencv.org/releases.html) + +3. 安装必要的工具`bzip2`以及`make`: `yum install -y bzip2` , `yum install -y make` + +4. 我们支持使用virtualenv进行编译安装,首先请使用以下命令创建一个名为`paddle-venv`的虚环境: + + * a. 安装Python-dev: + + For Python2: yum install python-devel + For Python3: (请参照Python官方流程安装) + + * b. 安装pip: + + For Python2: yum install python-pip (请保证拥有9.0.1及以上的pip版本) + For Python3: (请参照Python官方流程安装, 并保证拥有9.0.1及以上的pip3版本,请注意,python3.6及以上版本环境下,pip3并不一定对应python版本,如python3.7下默认只有pip3.7) + + * c.(Only For Python3)设置Python3相关的环境变量,这里以python3.5版本示例,请替换成您使用的版本(3.6、3.7): + + 1. 首先使用``` find `dirname $(dirname + $(which python3))` -name "libpython3.so"```找到Python lib的路径,如果是3.6或3.7,请将`python3`改成`python3.6`或`python3.7`,然后将下面[python-lib-path]替换为找到文件路径 + + 2. 设置PYTHON_LIBRARIES:`export PYTHON_LIBRARY=[python-lib-path]` + + 3. 其次使用```find `dirname $(dirname + $(which python3))`/include -name "python3.5m"```找到Python Include的路径,请注意python版本,然后将下面[python-include-path]替换为找到文件路径 + 4. 设置PYTHON_INCLUDE_DIR: `export PYTHON_INCLUDE_DIRS=[python-include-path]` + + 5. 设置系统环境变量路径:`export PATH=[python-lib-path]:$PATH` (这里将[python-lib-path]的最后两级目录替换为/bin/) + + * d. 安装虚环境`virtualenv`以及`virtualenvwrapper`并创建名为`paddle-venv`的虚环境:(请注意对应python版本的pip3的命令,如pip3.6、pip3.7) + + 1. `pip install virtualenv` 或 `pip3 install virtualenv` + 2. `pip install virtualenvwrapper` 或 `pip3 install virtualenvwrapper` + 3. 找到`virtualenvwrapper.sh`: `find / -name virtualenvwrapper.sh`(请找到对应Python版本的`virtualenvwrapper.sh`) + 4. 查看`virtualenvwrapper.sh`中的安装方法: `cat vitualenvwrapper.sh`, 该shell文件中描述了步骤及命令 + 5. 按照`virtualenvwrapper.sh`中的描述,安装`virtualwrapper` + 6. 设置VIRTUALENVWRAPPER_PYTHON:`export VIRTUALENVWRAPPER_PYTHON=[python-lib-path]:$PATH` (这里将[python-lib-path]的最后两级目录替换为/bin/) + 7. 创建名为`paddle-venv`的虚环境: `mkvirtualenv paddle-venv` + +5. 进入虚环境:`workon paddle-venv` + +6. **执行编译前**请您确认在虚环境中安装有[编译依赖表](../Tables.html#third_party)中提到的相关依赖: + + * 这里特别提供`patchELF`的安装方法,其他的依赖可以使用`yum install`或者`pip install`/`pip3 install` 后跟依赖名称和版本安装: + + `yum install patchelf` + > 不能使用yum安装的用户请参见patchElF github[官方文档](https://gist.github.com/ruario/80fefd174b3395d34c14) + +7. 将PaddlePaddle的源码clone在当下目录下的Paddle的文件夹中,并进入Padde目录下: + + - `git clone https://github.com/PaddlePaddle/Paddle.git` + + - `cd Paddle` + +8. 切换到较稳定release分支下进行编译: + + `git checkout [分支名]` + + 例如: + + `git checkout release/1.5` + +9. 并且请创建并进入一个叫build的目录下: + + `mkdir build && cd build` + +10. 执行cmake: + + >具体编译选项含义请参见[编译选项表](../Tables.html#Compile) + + * 对于需要编译**CPU版本PaddlePaddle**的用户: + + For Python2: cmake .. -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + For Python3: cmake .. -DPY_VERSION=3.5 -DPYTHON_INCLUDE_DIR=${PYTHON_INCLUDE_DIRS} \ + -DPYTHON_LIBRARY=${PYTHON_LIBRARY} -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + + > 如果遇到`Could NOT find PROTOBUF (missing: PROTOBUF_LIBRARY PROTOBUF_INCLUDE_DIR)`可以重新执行一次cmake指令。 + > 请注意PY_VERSION参数更换为您需要的python版本 + + + * 对于需要编译**GPU版本PaddlePaddle**的用户:(**仅支持CentOS7(CUDA10.0/CUDA9)**) + + 1. 请确保您已经正确安装nccl2,或者按照以下指令安装nccl2(这里提供的是ubuntu 16.04,CUDA9,cuDNN7下nccl2的安装指令),更多版本的安装信息请参考NVIDIA[官方网站](https://developer.nvidia.com/nccl): + + + i. `wget http://developer.download.nvidia.com/compute/machine-learning/repos/rhel7/x86_64/nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm` + + + ii. `rpm -i nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm` + + + iii. `yum install -y libnccl-2.3.7-2+cuda9.0 libnccl-devel-2.3.7-2+cuda9.0 libnccl-static-2.3.7-2+cuda9.0` + + 2. 如果您已经正确安装了`nccl2`,就可以开始cmake了:(*For Python3: 请给PY_VERSION参数配置正确的python版本*) + + For Python2: cmake .. -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + For Python3: cmake .. -DPYTHON_EXECUTABLE:FILEPATH=[您可执行的Python3的路径] -DPYTHON_INCLUDE_DIR:PATH=[之前的PYTHON_INCLUDE_DIRS] -DPYTHON_LIBRARY:FILEPATH=[之前的PYTHON_LIBRARY] -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + + 注意:以上涉及Python3的命令,用Python3.5来举例,如您的Python版本为3.6/3.7,请将上述命令中的Python3.5改成Python3.6/Python3.7 + + + + +11. 使用以下命令来编译: + + `make -j$(nproc)` + + > 使用多核编译 + + > 如果编译过程中显示“Too many open files”错误时,请使用指令 ulimit -n 8192 来增大当前进程允许打开的文件数,一般来说8192可以保证编译完成。 + +12. 编译成功后进入`/paddle/build/python/dist`目录下找到生成的`.whl`包: `cd /paddle/build/python/dist` + +13. 在当前机器或目标机器安装编译好的`.whl`包: + + `pip install -U(whl包的名字)`或`pip3 install -U(whl包的名字)` + +恭喜,至此您已完成PaddlePaddle的编译安装 + +## **验证安装** +安装完成后您可以使用 `python` 或 `python3` 进入python解释器,输入`import paddle.fluid as fluid` ,再输入 + `fluid.install_check.run_check()` + +如果出现`Your Paddle Fluid is installed succesfully!`,说明您已成功安装。 + +## **如何卸载** +请使用以下命令卸载PaddlePaddle: + +* **CPU版本的PaddlePaddle**: `pip uninstall paddlepaddle` 或 `pip3 uninstall paddlepaddle` +* **GPU版本的PaddlePaddle**: `pip uninstall paddlepaddle-gpu` 或 `pip3 uninstall paddlepaddle-gpu` + +使用Docker安装PaddlePaddle的用户,请进入包含PaddlePaddle的容器中使用上述命令,注意使用对应版本的pip diff --git a/doc/paddle/install/compile/compile_CentOS_en.md b/doc/paddle/install/compile/compile_CentOS_en.md new file mode 100644 index 0000000000000000000000000000000000000000..4a21b30dc73ffbb6108517add6c145eada07138d --- /dev/null +++ b/doc/paddle/install/compile/compile_CentOS_en.md @@ -0,0 +1,304 @@ +# **Compile on CentOS from Source Code** + +## Environment preparation + +* **CentOS version (64 bit)** + * **CentOS 6 (not recommended, no official support for compilation problems)** + * **CentOS 7 (GPU version supports CUDA 9.0/9.1/9.2/10.0/10.1 CUDA 9.1, only support single-card mode)** +* **Python version 2.7.15+/3.5.1+/3.6/3.7 (64 bit)** +* **pip or pip3 version 9.0.1+ (64 bit)** + +## Choose CPU/GPU + +* If your computer doesn't have NVIDIA® GPU, please install CPU version of PaddlePaddle + +* If your computer has NVIDIA® GPU, and the following conditions are met,GPU version of PaddlePaddle is recommended. + + * **CUDA toolkit 10.0 with cuDNN v7.3+(for multi card support, NCCL2.3.7 or higher)** + * **CUDA toolkit 9.0 with cuDNN v7.3+(for multi card support, NCCL2.3.7 or higher)** + * **Hardware devices with GPU computing power over 1.0** + + You can refer to NVIDIA official documents for installation process and configuration method of CUDA and cudnn. Please refer to[CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/),[cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/) + +* * If you need to use multi card environment, please make sure that you have installed nccl2 correctly, or install nccl2 according to the following instructions (here is the installation instructions of nccl2 under ubuntu 16.04, CUDA9 and cuDNN7). For more version of installation information, please refer to NVIDIA[official website](https://developer.nvidia.com/nccl): + + + wget http://developer.download.nvidia.com/compute/machine-learning/repos/rhel7/x86_64/nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm + rpm -i nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm + yum update -y + yum install -y libnccl-2.3.7-2+cuda9.0 libnccl-devel-2.3.7-2+cuda9.0 libnccl-static-2.3.7-2+cuda9.0 + + +## Installation steps + +There are two compilation methods under CentOS system: + +* Compile with Docker(GPU version only supports CentOS 7) +* Local compilation (no official support for compilation problems under CentOS 6) + + +### **Compile with Docker** + +[Docker](https://docs.docker.com/install/) is an open source application container engine. Using docker, you can not only isolate the installation and use of paddlepaddle from the system environment, but also share GPU, network and other resources with the host + +Compiling PaddlePaddle with Docker,you need: + +- On the local host [Install Docker](https://hub.docker.com/search/?type=edition&offering=community) + +- To enable GPU support on Linux, please [Install nvidia-docker](https://github.com/NVIDIA/nvidia-docker) + +Please follow the steps below to install: + +1. First select the path where you want to store PaddlePaddle, then use the following command to clone PaddlePaddle's source code from github to a folder named Paddle in the local current directory: + + `git clone https://github.com/PaddlePaddle/Paddle.git` + +2. Go to the Paddle directory: `cd Paddle` + +3. Create and enter a Docker container that meets the compilation environment: + + * Compile CPU version of PaddlePaddle: + + + + `docker run --name paddle-test -v $PWD:/paddle --network=host -it hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash` + + > --name paddle-test names the Docker container you created as paddle-test; + + + > -v $PWD:/paddle mount the current directory to the /paddle directory in the docker container (PWD variable in Linux will be expanded to [absolute path](https://baike.baidu.com/item/绝对路径/481185) of the current path); + + + > -it keeps interaction with the host,`hub.baidubce.com/paddlepaddle/paddle:latest-dev` use the image named `hub.baidubce.com/paddlepaddle/paddle:latest-dev` to create Docker container, /bin/bash start the /bin/bash command after entering the container. + + + + * Compile GPU version of PaddlePaddle (only supports CentOS 7): + + + + `nvidia-docker run --name paddle-test -v $PWD:/paddle --network=host -it hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash` + + > --name paddle-test names the Docker container you created as paddle-test; + + + > -v $PWD:/paddle mount the current directory to the /paddle directory in the docker container (PWD variable in Linux will be expanded to [absolute path](https://baike.baidu.com/item/绝对路径/481185) of the current path); + + + + > -it keeps interaction with the host,`hub.baidubce.com/paddlepaddle/paddle:latest-dev` use the image named `hub.baidubce.com/paddlepaddle/paddle:latest-dev` to create Docker container, /bin/bash start the /bin/bash command after entering the container. + + + > Note: hub.baidubce.com/paddlepaddle/paddle:latest-dev internally install CUDA 8.0. + + +4. After entering Docker, go to the paddle directory: `cd paddle` + +5. Switch to a more stable version to compile: + + `git checkout [name of the branch]` + + For example: + + `git checkout release/1.5` + + Note: python3.6、python3.7 version started supporting from release/1.2 branch + +6. Create and enter the /paddle/build path: + + `mkdir -p /paddle/build && cd /paddle/build` + +7. Use the following command to install the dependencies: + + + For Python2: pip install protobuf + For Python3: pip3.5 install protobuf + + Note: We used Python3.5 command as an example above, if the version of your Python is 3.6/3.7, please change Python3.5 in the commands to Python3.6/Python3.7 + + > Install protobuf 3.1.0 + + `yum install patchelf` + + > Installing patchelf, PatchELF is a small and useful program for modifying the dynamic linker and RPATH of ELF executables. + +8. Execute cmake: + + > For details on the compilation options, see the [compilation options table](../Tables.html/#Compile). + > Please attention to modify parameters `-DPY_VERSION` for the version of Python you want to compile with, for example `-DPY_VERSION=3.5` means the version of python is 3.5.x + + * For users who need to compile the **CPU version PaddlePaddle**: + + `cmake .. -DPY_VERSION=3.5 -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release` + + * For users who need to compile the **GPU version PaddlePaddle**: + `cmake .. -DPY_VERSION=3.5 -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release` + + > We currently do not support the compilation of the GPU version PaddlePaddle under CentOS 6. + +9. Execute compilation: + + `make -j$(nproc)` + + > Use multicore compilation + +10. After compiling successfully, go to the `/paddle/build/python/dist` directory and find the generated `.whl` package: `cd /paddle/build/python/dist` + +11. Install the compiled `.whl` package on the current machine or target machine: + + For Python2: pip install -U (whl package name) + For Python3: pip3.5 install -U (whl package name) + + Note: For the command involving Python 3, we use Python 3.5 as an example above, if the version of your Python is 3.6/3.7, please change Python3.5 in the commands to Python3.6/Python3.7 + +Congratulations, now that you have successfully installed PaddlePaddle using Docker, you only need to run PaddlePaddle after entering the Docker container. For more Docker usage, please refer to the [official Docker documentation](https://docs.docker.com/). + +> Note: In order to reduce the size, `vim` is not installed in PaddlePaddle Docker image by default. You can edit the code in the container after executing `yum install -y vim` in the container. + + +### **Local compilation** + +1. Check that your computer and operating system meet the compilation standards we support: `uname -m && cat /etc/*release` + +2. Update the source of `yum`: `yum update`, and add the necessary yum source: `yum install -y epel-release`, and install [OpenCV](https://opencv.org/releases.html) in advance + +3. Install the necessary tools `bzip2` and `make`: `yum install -y bzip2 `, `yum install -y make` + +4. We support compiling and installing with virtualenv. First, create a virtual environment called `paddle-venv` with the following command: + + * a. Install Python-dev: + + For Python2: yum install python-devel + For Python3: (Please refer to the official Python installation process) + + + * b. Install pip: + + + For Python2: yum install python-pip (please have a pip version of 9.0.1 and above) + For Python3: (Please refer to the official Python installation process, and ensure that the pip3 version 9.0.1 and above, please note that in python3.6 and above, pip3 does not necessarily correspond to the python version, such as python3.7 default only Pip3.7) + + * c. (Only For Python3) set Python3 related environment variables, here is python3.5 version example, please replace with the version you use (3.6, 3.7): + + 1. First find the path to the Python lib using ``` find `dirname $(dirname + $(which python3))` -name "libpython3.so"``` . If it is 3.6 or 3.7, change `python3` to `python3.6` or `python3.7`, then replace [python-lib-path] in the following steps with the file path found. + + 2. Set PYTHON_LIBRARIES: `export PYTHON_LIBRARY=[python-lib-path]`. + + 3. Secondly, use ```find `dirname $(dirname + $(which python3))`/include -name "python3.5m"``` to find the path to Python Include, please pay attention to the python version, then replace the following [python-include-path] to the file path found. + + 4. Set PYTHON_INCLUDE_DIR: `export PYTHON_INCLUDE_DIRS=[python-include-path]` + + 5. Set the system environment variable path: `export PATH=[python-lib-path]:$PATH `(here replace the last two levels content of [python-lib-path] with /bin/) + + * d. Install the virtual environment `virtualenv` and `virtualenvwrapper` and create a virtual environment called `paddle-venv`: (please note the pip3 commands corresponding to the python version, such as pip3.6, pip3.7) + + 1. `pip install virtualenv` or `pip3 install virtualenv` + + 2. `Pip install virtualenvwrapper` or `pip3 install virtualenvwrapper` + + 3. Find `virtualenvwrapper.sh`: `find / -name virtualenvwrapper.sh` (please find the corresponding Python version of `virtualenvwrapper.sh`) + + 4. See the installation method in `virtualenvwrapper.sh`: `cat vitualenvwrapper.sh`, this shell file describes the steps and commands + + 5. Install `virtualwrapper` as described in `virtualenvwrapper.sh` + + 6. Set VIRTUALENVWRAPPER_PYTHON:`export VIRTUALENVWRAPPER_PYTHON=[python-lib-path]:$PATH` (here replace the last two levels content of [python-lib-path] with /bin/) + 7. Create virtual environment named `paddle-venv`: `mkvirtualenv paddle-venv` + +5. Enter the virtual environment: `workon paddle-venv` + +6. Before **executing the compilation**, please confirm that the related dependencies mentioned in the [compile dependency table](../Tables.html/#third_party) are installed in the virtual environment: + + * Here is the installation method for `patchELF`. Other dependencies can be installed using `yum install` or `pip install`/`pip3 install` followed by the name and version: + + `yum install patchelf` + > Users who can't use yum installation can refer to patchElF github [official documentation](https://gist.github.com/ruario/80fefd174b3395d34c14). + +7. Put the PaddlePaddle source cloned in the Paddle folder in the current directory and go to the Paddle directory: + + - `git clone https://github.com/PaddlePaddle/Paddle.git` + + - `cd Paddle` + +8. Switch to a more stable release branch for compilation (support for Python 3.6 and 3.7 is added from the 1.2 branch): + + - `git checkout [name of target branch]` + + For example: + + `git checkout release/1.5` + +9. And please create and enter a directory called build: + + - `mkdir build && cd build` + +10. Execute cmake: + + > For details on the compilation options, see the [compilation options table](../Tables.html/#Compile). + + * For users who need to compile the **CPU version PaddlePaddle**: + + + For Python2: cmake .. -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + For Python3: cmake .. -DPY_VERSION=3.5 -DPYTHON_INCLUDE_DIR=${PYTHON_INCLUDE_DIRS} \ + -DPYTHON_LIBRARY=${PYTHON_LIBRARY} -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + + + > If you encounter `Could NOT find PROTOBUF (missing: PROTOBUF_LIBRARY PROTOBUF_INCLUDE_DIR)`, you can re-execute the cmake command. + > Please note that the PY_VERSION parameter is replaced with the python version you need. + + + * For users who need to compile the **GPU version PaddlePaddle**: + + 1. Please make sure that you have installed nccl2 correctly, or install nccl2 according to the following instructions (here is ubuntu 16.04, CUDA9, ncDNN7 nccl2 installation instructions), for more information on the installation information please refer to the [NVIDIA official website](https://developer.nvidia.com/nccl/nccl-download): + + i. `wget http://developer.download.nvidia.com/compute/machine-learning/repos/rhel7/x86_64/nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm` + + ii. `rpm -i nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm` + + iii. `yum install -y libnccl-2.3.7-2+cuda9.0 libnccl-devel-2.3.7-2+cuda9.0 libnccl-static-2.3.7-2+cuda9.0` + + 2. If you have already installed `nccl2` correctly, you can start cmake: *(For Python3: Please configure the correct python version for the PY_VERSION parameter)* + + For Python2: cmake .. -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + For Python3: cmake .. -DPYTHON_EXECUTABLE:FILEPATH=[您可执行的Python3的路径] -DPYTHON_INCLUDE_DIR:PATH=[之前的PYTHON_INCLUDE_DIRS] -DPYTHON_LIBRARY:FILEPATH=[之前的PYTHON_LIBRARY] -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + + + Note: For the command involving Python 3, we use Python 3.5 as an example above, if the version of your Python is 3.6/3.7, please change Python3.5 in the commands to Python3.6/Python3.7 + + + +11. Compile with the following command: + + `make -j$(nproc)` + + > Use multicore compilation + + > If “Too many open files” error is displayed during compilation, please use the instruction ulimit -n 8192 to increase the number of files allowed to be opened by the current process. Generally speaking, 8192 can ensure the completion of compilation. + +12. After compiling successfully, go to the `/paddle/build/python/dist `directory and find the generated `.whl` package: `cd /paddle/build/python/dist` + +13. Install the compiled `.whl` package on the current machine or target machine: + + `Pip install -U (whl package name) `or `pip3 install -U (whl package name)` + +Congratulations, now you have completed the process of compiling PaddlePaddle natively. + +

+### ***Verify installation*** + +After the installation is complete, you can use `python` or `python3` to enter the Python interpreter and then use `import paddle.fluid as fluid` and then `fluid.install_check.run_check()` to verify that the installation was successful. + +If `Your Paddle Fluid is installed succesfully!` appears, it means the installation was successful. + +

+### ***How to uninstall*** + +Please use the following command to uninstall PaddlePaddle (users who use Docker to install PaddlePaddle should use the following command in the container containing PaddlePaddle. Please use the corresponding version of pip): + +* ***CPU version of PaddlePaddle***: `pip uninstall paddlepaddle` or `pip3 uninstall paddlepaddle` +* ***GPU version of PaddlePaddle***: `pip uninstall paddlepaddle-gpu` or `pip3 uninstall paddlepaddle-gpu` + +Users installing PaddlePaddle with Docker, please use above commands in the container involved PaddlePaddle and attention to use the corresponding version of Pip diff --git a/doc/paddle/install/compile/compile_MacOS.md b/doc/paddle/install/compile/compile_MacOS.md new file mode 100644 index 0000000000000000000000000000000000000000..9881cb37704dd9f8a3efb76e3892b8d76c18af05 --- /dev/null +++ b/doc/paddle/install/compile/compile_MacOS.md @@ -0,0 +1,217 @@ +# **MacOS下从源码编译** + +## 环境准备 + +* **MacOS 版本 10.11/10.12/10.13/10.14 (64 bit) (不支持GPU版本)** +* **Python 版本 2.7.15+/3.5.1+/3.6/3.7 (64 bit)** +* **pip 或 pip3 版本 9.0.1+ (64 bit)** + +## 选择CPU/GPU + +* 目前仅支持在MacOS环境下编译安装CPU版本的PaddlePaddle + +## 安装步骤 +在MacOS系统下有2种编译方式: + +* Docker源码编译 +* 本机源码编译 + + +### **使用Docker编译** + +[Docker](https://docs.docker.com/install/)是一个开源的应用容器引擎。使用Docker,既可以将PaddlePaddle的安装&使用与系统环境隔离,也可以与主机共享GPU、网络等资源 + +使用Docker编译PaddlePaddle,您需要: + +- 在本地主机上[安装Docker](https://hub.docker.com/search/?type=edition&offering=community) + +- 使用Docker ID登陆Docker,以避免出现`Authenticate Failed`错误 + +请您按照以下步骤安装: + +1. 进入Mac的终端 + +2. 请选择您希望储存PaddlePaddle的路径,然后在该路径下使用以下命令将PaddlePaddle的源码从github克隆到本地当前目录下名为Paddle的文件夹中: + + `git clone https://github.com/PaddlePaddle/Paddle.git` + +3. 进入Paddle目录下: `cd Paddle` + +4. 创建并进入满足编译环境的Docker容器: + + `docker run --name paddle-test -v $PWD:/paddle --network=host -it hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash` + + > --name paddle-test为您创建的Docker容器命名为paddle-test,-v $PWD:/paddle 将当前目录挂载到Docker容器中的/paddle目录下(Linux中PWD变量会展开为当前路径的[绝对路径](https://baike.baidu.com/item/绝对路径/481185)),-it 与宿主机保持交互状态,`hub.baidubce.com/paddlepaddle/paddle:latest-dev` 使用名为`hub.baidubce.com/paddlepaddle/paddle:latest-dev`的镜像创建Docker容器,/bin/bash 进入容器后启动/bin/bash命令。 + +5. 进入Docker后进入paddle目录下: + + `cd paddle` + +6. 切换到较稳定版本下进行编译: + + `git checkout [分支名]` + + 例如: + + `git checkout release/1.2` + + 注意:python3.6、python3.7版本从release/1.2分支开始支持 + +7. 创建并进入/paddle/build路径下: + + `mkdir -p /paddle/build && cd /paddle/build` + +8. 使用以下命令安装相关依赖: + + For Python2: pip install protobuf==3.1.0 + For Python3: pip3.5 install protobuf==3.1.0 + + 注意:以上用Python3.5命令来举例,如您的Python版本为3.6/3.7,请将上述命令中的Python3.5改成Python3.6/Python3.7 + + > 安装protobuf 3.1.0。 + + `apt install patchelf` + + > 安装patchelf,PatchELF 是一个小而实用的程序,用于修改ELF可执行文件的动态链接器和RPATH。 + +9. 执行cmake: + + >具体编译选项含义请参见[编译选项表](../Tables.html#Compile) + >请注意修改参数`-DPY_VERSION`为您希望编译使用的python版本, 例如`-DPY_VERSION=3.5`表示python版本为3.5.x + + * 对于需要编译**CPU版本PaddlePaddle**的用户: + + `cmake .. -DPY_VERSION=3.5 -DWITH_GPU=OFF -DWITH_TESTING=OFF -DWITH_AVX=OFF -DCMAKE_BUILD_TYPE=Release` + + > 我们目前不支持MacOS下GPU版本PaddlePaddle的编译 + +10. 执行编译: + + `make -j$(nproc)` + + > 使用多核编译 + +11. 编译成功后进入`/paddle/build/python/dist`目录下找到生成的`.whl`包: `cd /paddle/build/python/dist` + +12. 在当前机器或目标机器安装编译好的`.whl`包: + + For Python2: pip install -U(whl包的名字) + For Python3: pip3.5 install -U(whl包的名字) + + 注意:以上涉及Python3的命令,用Python3.5来举例,如您的Python版本为3.6/3.7,请将上述命令中的Python3.5改成Python3.6/Python3.7 + +恭喜,至此您已完成PaddlePaddle的编译安装。您只需要进入Docker容器后运行PaddlePaddle,即可开始使用。更多Docker使用请参见[Docker官方文档](https://docs.docker.com) + +> 注:PaddlePaddle Docker镜像为了减小体积,默认没有安装`vim`,您可以在容器中执行 `apt-get install -y vim` 来安装 + + +

+### **本机编译** + +**请严格按照以下指令顺序执行** + +1. 检查您的计算机和操作系统是否符合我们支持的编译标准: `uname -m` 并且在`关于本机`中查看系统版本。并提前安装[OpenCV](https://opencv.org/releases.html) + +2. 安装Python以及pip: + + > **请不要使用MacOS中自带Python**,我们强烈建议您使用[Homebrew](https://brew.sh)安装python(对于**Python3**请使用python[官方下载](https://www.python.org/downloads/mac-osx/)python3.5.x、python3.6.x、python3.7.x), pip以及其他的依赖,这将会使您高效编译。 + + For python2: brew install python@2 + For python3: 使用Python官网安装 + + > 请注意,当您的mac上安装有多个python时请保证您正在使用的python是您希望使用的python。 + +3. (Only For Python2)设置Python相关的环境变量: + + - 请使用`find / -name libpython2.7.dylib`找到您当前使用python的`libpython2.7.dylib`路径,并使用`export LD_LIBRARY_PATH=[libpython2.7.dylib的路径] && export DYLD_LIBRARY_PATH=[libpython2.7.dylib所在的目录的上两级目录]` + +4. (Only For Python3)设置Python相关的环境变量: + + - a. 首先使用 + ```find `dirname $(dirname + $(which python3))` -name "libpython3.*.dylib"``` + 找到Pythonlib的路径(弹出的第一个对应您需要使用的python的dylib路径),然后(下面[python-lib-path]替换为找到文件路径) + + - b. 设置PYTHON_LIBRARIES:`export PYTHON_LIBRARY=[python-lib-path]` + + - c. 其次使用找到PythonInclude的路径(通常是找到[python-lib-path]的上一级目录为同级目录的include,然后找到该目录下python3.x或者python2.x的路径),然后(下面[python-include-path]替换为找到路径) + - d. 设置PYTHON_INCLUDE_DIR: `export PYTHON_INCLUDE_DIRS=[python-include-path]` + + - e. 设置系统环境变量路径:`export PATH=[python-bin-path]:$PATH` (这里[python-bin-path]为将[python-lib-path]的最后两级目录替换为/bin/后的目录) + + - f. 设置动态库链接: `export LD_LIBRARY_PATH=[python-ld-path]` 以及 `export DYLD_LIBRARY_PATH=[python-ld-path]` (这里[python-ld-path]为[python-bin-path]的上一级目录) + + - g. (可选)如果您是在MacOS 10.14上编译PaddlePaddle,请保证您已经安装了[对应版本](http://developer.apple.com/download)的Xcode。 + +5. **执行编译前**请您确认您的环境中安装有[编译依赖表](../Tables.html#third_party)中提到的相关依赖,否则我们强烈推荐使用`Homebrew`安装相关依赖。 + + > MacOS下如果您未自行修改或安装过“编译依赖表”中提到的依赖,则仅需要使用`pip`安装`numpy,protobuf,wheel`,使用`homebrew`安装`wget,swig, unrar`,另外安装`cmake`即可 + + - a. 这里特别说明一下**CMake**的安装: + + + CMake我们支持3.10以上版本,推荐使用CMake3.16,请根据以下步骤安装: + + 1. 从CMake[官方网站](https://cmake.org/files/v3.16/cmake-3.16.0-Darwin-x86_64.dmg)下载CMake镜像并安装 + 2. 在控制台输入`sudo "/Applications/CMake.app/Contents/bin/cmake-gui" –install` + + - b. 如果您不想使用系统默认的blas而希望使用自己安装的OPENBLAS请参见[FAQ](../FAQ.html/#OPENBLAS) + +6. 将PaddlePaddle的源码clone在当下目录下的Paddle的文件夹中,并进入Padde目录下: + + - `git clone https://github.com/PaddlePaddle/Paddle.git` + + - `cd Paddle` + +7. 切换到较稳定release分支下进行编译: + + `git checkout [分支名]` + + 例如: + + `git checkout release/1.2` + + 注意:python3.6、python3.7版本从release/1.2分支开始支持 + +8. 并且请创建并进入一个叫build的目录下: + + `mkdir build && cd build` + +9. 执行cmake: + + >具体编译选项含义请参见[编译选项表](../Tables.html#Compile) + + * 对于需要编译**CPU版本PaddlePaddle**的用户: + + For Python2: cmake .. -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + For Python3: cmake .. -DPY_VERSION=3.5 -DPYTHON_INCLUDE_DIR=${PYTHON_INCLUDE_DIRS} \ + -DPYTHON_LIBRARY=${PYTHON_LIBRARY} -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + + >`-DPY_VERSION=3.5`请修改为安装环境的Python版本 + +10. 使用以下命令来编译: + + `make -j4` + +11. 编译成功后进入`/paddle/build/python/dist`目录下找到生成的`.whl`包: `cd /paddle/build/python/dist` + +12. 在当前机器或目标机器安装编译好的`.whl`包: + + `pip install -U(whl包的名字)`或`pip3 install -U(whl包的名字)` + + > 如果您的电脑上安装有多个python环境以及pip请参见[FAQ](../Tables.html#MACPRO) + +恭喜,至此您已完成PaddlePaddle的编译安装 + +## **验证安装** +安装完成后您可以使用 `python` 或 `python3` 进入python解释器,输入`import paddle.fluid as fluid` ,再输入 + `fluid.install_check.run_check()` + +如果出现`Your Paddle Fluid is installed succesfully!`,说明您已成功安装。 + +## **如何卸载** +请使用以下命令卸载PaddlePaddle + +* **CPU版本的PaddlePaddle**: `pip uninstall paddlepaddle` 或 `pip3 uninstall paddlepaddle` + +使用Docker安装PaddlePaddle的用户,请进入包含PaddlePaddle的容器中使用上述命令,注意使用对应版本的pip diff --git a/doc/paddle/install/compile/compile_MacOS_en.md b/doc/paddle/install/compile/compile_MacOS_en.md new file mode 100644 index 0000000000000000000000000000000000000000..f838a623a095f9294639280e5270469584fcfa7f --- /dev/null +++ b/doc/paddle/install/compile/compile_MacOS_en.md @@ -0,0 +1,229 @@ +# **Compile on MacOS from Source Code** + +## Environment preparation + +* **MacOS version 10.11/10.12/10.13/10.14 (64 bit) (not support GPU version)** +* **Python version 2.7.15+/3.5.1+/3.6/3.7 (64 bit)** +* **pip or pip3 version 9.0.1+ (64 bit)** + +## Choose CPU/GPU + +* Currently, only PaddlePaddle for CPU is supported. + +## Installation steps +There are two compilation methods in MacOS system: + +* Compile with docker +* Local compilation + + +### ***Compile with Docker*** + +[Docker](https://docs.docker.com/install/) is an open source application container engine. Using docker, you can not only isolate the installation and use of paddlepaddle from the system environment, but also share GPU, network and other resources with the host + +Compiling PaddlePaddle with Docker,you need: + +- On the local host [Install Docker](https://hub.docker.com/search/?type=edition&offering=community) + +- Log in to Docker with Docker ID to avoid `Authenticate Failed` error + +Please follow the steps below to install: + +1. Enter the terminal of the Mac + +2. Please select the path where you want to store PaddlePaddle, and then use the following command to clone PaddlePaddle's source code from github to a folder named Paddle in the local current directory: + + `git clone https://github.com/PaddlePaddle/Paddle.git` + +3. Go to the Paddle directory: `cd Paddle` + +4. Create and enter a Docker container that meets the compilation environment: + + `docker run --name paddle-test -v $PWD:/paddle --network=host -it hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash` + + > --name paddle-test name the Docker container you created as paddle-test, + + > -v $PWD:/paddle mount the current directory to the /paddle directory in the Docker container (the PWD variable in Linux will expand to the current path's [Absolute path](https://baike.baidu.com/item/绝对路径/481185)), + + > -it keeps interacting with the host, `hub.baidubce.com/paddlepaddle/paddle:latest-dev` creates a Docker container with a mirror named `hub.baidubce.com/paddlepaddle/paddle:latest-dev`, /bin /bash starts the /bin/bash command after entering the container. + +5. After entering Docker, go to the paddle directory: + + `cd paddle` + +6. Switch to a more stable version to compile: + + `git checkout [name of the branch]` + + For example: + + `git checkout release/1.5` + + Note: python3.6、python3.7 version started supporting from release/1.2 branch + +7. Create and enter the /paddle/build path: + + `mkdir -p /paddle/build && cd /paddle/build` + +8. Use the following command to install the dependencies: + + For Python2: pip install protobuf==3.1.0 + For Python3: pip3.5 install protobuf==3.1.0 + + Note: We used Python3.5 command as an example above, if the version of your Python is 3.6/3.7, please change Python3.5 in the commands to Python3.6/Python3.7 + + > Install protobuf 3.1.0. + + `apt install patchelf` + + > Installing patchelf, PatchELF is a small and useful program for modifying the dynamic linker and RPATH of ELF executables. + +9. Execute cmake: + + > For details on the compilation options, see the [compilation options table](../Tables_en.html/#Compile). + > Please attention to modify parameters `-DPY_VERSION` for the version of Python you want to compile with, for example `-DPY_VERSION=3.5` means the version of python is 3.5.x + + * For users who need to compile the **CPU version PaddlePaddle**: + + `cmake .. -DPY_VERSION=3.5 -DWITH_GPU=OFF -DWITH_TESTING=OFF -DWITH_AVX=OFF -DCMAKE_BUILD_TYPE=Release` + + > We currently do not support the compilation of the GPU version PaddlePaddle under CentOS. + +10. Execute compilation: + + `make -j$(nproc)` + + > Use multicore compilation + +11. After compiling successfully, go to the `/paddle/build/python/dist `directory and find the generated `.whl` package: `cd /paddle/build/python/dist` + +12. Install the compiled `.whl` package on the current machine or target machine: (For Python3: Please select the pip corresponding to the python version you wish to use, such as pip3.5, pip3.6) + + + For Python2: pip install -U (whl package name) + For Python3: pip3.5 install -U (whl package name) + + Note: We used Python3.5 command as an example above, if the version of your Python is 3.6/3.7, please change Python3.5 in the commands to Python3.6/Python3.7 + +Congratulations, now that you have successfully installed PaddlePaddle using Docker, you only need to run PaddlePaddle after entering the Docker container. For more Docker usage, please refer to the [official Docker documentation](https://docs.docker.com/). + +> Note: In order to reduce the size, `vim` is not installed in PaddlePaddle Docker image by default. You can edit the code in the container after executing `apt-get install -y vim` in the container. + + +

+### ***Local compilation*** + +**Please strictly follow the order of the following instructions** + +1. Check that your computer and operating system meet our supported compilation standards: `uname -m` and view the system version `about this Mac`. And install [OpenCV](https://opencv.org/releases.html) in advance. + +2. Install python and pip: + + > **Please do not use the Python initially given by MacOS**, we strongly recommend that you use [Homebrew](https://brew.sh/) to install python (for Python3 please use python [official download](https://www.python.org/downloads/mac-osx/) python3.5.x, python3.6.x, python3.7.x), pip and other dependencies, This will greatly reduce the difficulty of installing and compiling. + + For python2: brew install python@2 + For python3: Install using Python official website + + + > Please note that when you have multiple pythons installed on your mac, make sure that the python you are using is the python you wish to use. + +3. (Only For Python2) Set Python-related environment variables: + + - Use `find / -name libpython2.7.dylib` to find your current python `libpython2.7.dylib` path and use `export LD_LIBRARY_PATH=[libpython2.7.dylib path] && export DYLD_LIBRARY_PATH=[libpython2.7.dylib to the top two directories of the directory]` + +4. (Only For Python3) Set Python-related environment variables: + + - a. First use + ```find `dirname $(dirname + $(which python3))` -name "libpython3.*.dylib"``` + to find the path to Pythonlib (the first one it prompts is the dylib path for the python you need to use), then (below [python-lib-path] is replaced by finding the file path) + + - b. Set PYTHON_LIBRARIES: `export PYTHON_LIBRARY=[python-lib-path]` + + - c. Secondly use the path to find PythonInclude (usually find the above directory of [python-lib-path] as the include of the same directory, then find the path of python3.x or python2.x in the directory), then (the [python-include-path] in the following commands should be replaced by the path found here) + + - d. Set PYTHON_INCLUDE_DIR: `export PYTHON_INCLUDE_DIRS=[python-include-path]` + + - e. Set the system environment variable path: `export PATH=[python-bin-path]:$PATH` (here [python-bin-path] is the result of replacing the last two levels of [python-lib-path] with the path after /bin/) + + - f. Set the dynamic library link: `export LD_LIBRARY_PATH=[python-ld-path]` and `export DYLD_LIBRARY_PATH=[python-ld-path]` (here [python-ld-path] is the [python-bin-path]'s parent directory ) + + - g. (Optional) If you are compiling PaddlePaddle on MacOS 10.14, make sure you have the [appropriate version](http://developer.apple.com/download) of Xcode installed. + + +5. Before **compilation**, please confirm that the relevant dependencies mentioned in the [compilation dependency table](h../Tables.html/#third_party) are installed in your environment, otherwise we strongly recommend using `Homebrew` to install related dependencies. + + > Under MacOS, if you have not modified or installed the dependencies mentioned in the "Compile Dependency Table", you only need to use `pip` to install `numpy`, `protobuf`, `wheel`, use `homebrew` to install `wget`, `swig`,then install `cmake`. + + - a. Here is a special description of the installation of **CMake**: + + Since we are using CMake3.4 please follow the steps below: + + 1. Download the CMake image from the [official CMake website](https://cmake.org/files/v3.4/cmake-3.4.3-Darwin-x86_64.dmg) and install it. + + 2. Enter `sudo "/Applications/CMake.app/Contents/bin/cmake-gui" –install` in the console + + - b. If you do not want to use the system default blas and want to use your own installed OPENBLAS please read [FAQ](../FAQ.html/#OPENBLAS) + +6. Put the PaddlePaddle source cloned in the Paddle folder in the current directory and go to the Paddle directory: + + - `git clone https://github.com/PaddlePaddle/Paddle.git` + + - `cd Paddle` + +7. Switch to a more stable release branch to compile: (Note that python 3.6, python 3.7 version are supported from the 1.2 branch) + + `git checkout [name of the branch]` + + For example: + + `git checkout release/1.5` + + Note: python3.6、python3.7 version started supporting from release/1.2 branch + +8. And please create and enter a directory called build: + + `mkdir build && cd build` + +9. Execute cmake: + + > For details on the compilation options, see the [compilation options table](../Tables.html/#Compile). + + * For users who need to compile the **CPU version PaddlePaddle**: + + + For Python2: cmake .. -DWITH_FLUID_ONLY=ON -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + For Python3: cmake .. -DPY_VERSION=3.5 -DPYTHON_INCLUDE_DIR=${PYTHON_INCLUDE_DIRS} \ + -DPYTHON_LIBRARY=${PYTHON_LIBRARY} -DWITH_FLUID_ONLY=ON -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + + > ``-DPY_VERSION=3.5`` Please change to the Python version of the installation environment. + +10. Compile with the following command: + + `make -j4` + +11. After compiling successfully, go to the `/paddle/build/python/dist `directory and find the generated `.whl` package: `cd /paddle/build/python/dist` + +12. Install the compiled `.whl` package on the current machine or target machine: + + `pip install -U (whl package name)` or `pip3 install -U (whl package name)` + + > If you have multiple python environments and pips installed on your computer, please see the [FAQ](../Tables.html/#MACPRO). + +Congratulations, now you have completed the process of compiling PaddlePaddle using this machine. + +

+### ***Verify installation*** + +After the installation is complete, you can use `python` or `python3` to enter the Python interpreter and then use `import paddle.fluid as fluid` and then `fluid.install_check.run_check()` to verify that the installation was successful. + +If `Your Paddle Fluid is installed succesfully!` appears, it means the installation was successful. + +

+### ***How to uninstall*** + +Please use the following command to uninstall PaddlePaddle (users who use Docker to install PaddlePaddle should use the following command in the container containing PaddlePaddle. Please use the corresponding version of pip): + +* ***CPU version of PaddlePaddle***: `pip uninstall paddlepaddle` or `pip3 uninstall paddlepaddle` + +Users installing PaddlePaddle with Docker, please use above commands in the container involved PaddlePaddle and attention to use the corresponding version of Pip diff --git a/doc/paddle/install/compile/compile_Ubuntu.md b/doc/paddle/install/compile/compile_Ubuntu.md new file mode 100644 index 0000000000000000000000000000000000000000..74954df2a1db1f79cc64643aeaa08c8dc64c8f0e --- /dev/null +++ b/doc/paddle/install/compile/compile_Ubuntu.md @@ -0,0 +1,274 @@ +# **Ubuntu下从源码编译** + +## 环境准备 + +* **Ubuntu 版本 (64 bit)** + * **Ubuntu 14.04 (GPU 版本支持 CUDA 10.0/10.1)** + * **Ubuntu 16.04 (GPU 版本支持 CUDA 9.0/9.1/9.2/10.0/10.1)** + * **Ubuntu 18.04 (GPU 版本支持 CUDA 10.0/10.1)** +* **Python 版本 2.7.15+/3.5.1+/3.6/3.7 (64 bit)** +* **pip或pip3 版本 9.0.1+ (64 bit)** + +## 选择CPU/GPU + +* 如果您的计算机没有 NVIDIA® GPU,请安装CPU版的PaddlePaddle + +* 如果您的计算机有 NVIDIA® GPU,并且满足以下条件,推荐安装GPU版的PaddlePaddle + * **CUDA 工具包10.0配合cuDNN v7.3+(如需多卡支持,需配合NCCL2.3.7及更高)** + * **CUDA 工具包9.0配合cuDNN v7.3+(如需多卡支持,需配合NCCL2.3.7及更高)** + * **CUDA 工具包8.0配合cuDNN v7.1+(如需多卡支持,需配合NCCL2.1.15-2.2.13)** + * **GPU运算能力超过1.0的硬件设备** + + 您可参考NVIDIA官方文档了解CUDA和CUDNN的安装流程和配置方法,请见[CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/),[cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/) + +* 请确保您已经正确安装nccl2,或者按照以下指令安装nccl2(这里提供的是ubuntu 16.04,CUDA9,cuDNN7下nccl2的安装指令),更多版本的安装信息请参考NVIDIA[官方网站](https://developer.nvidia.com/nccl): + + + wget https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb + dpkg -i nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb + sudo apt-get install -y libnccl2=2.3.7-1+cuda9.0 libnccl-dev=2.3.7-1+cuda9.0 + +## 安装步骤 + +在Ubuntu的系统下有2种编译方式: + +* 用Docker编译(暂不支持Ubuntu18.04下GPU版本) +* 本机编译 + + +### **用Docker编译** + +[Docker](https://docs.docker.com/install/)是一个开源的应用容器引擎。使用Docker,既可以将PaddlePaddle的安装&使用与系统环境隔离,也可以与主机共享GPU、网络等资源 + +使用Docker编译PaddlePaddle,您需要: + +- 在本地主机上[安装Docker](https://hub.docker.com/search/?type=edition&offering=community) + +- 如需在Linux开启GPU支持,请[安装nvidia-docker](https://github.com/NVIDIA/nvidia-docker) + +请您按照以下步骤安装: + +1. 请首先选择您希望储存PaddlePaddle的路径,然后在该路径下使用以下命令将PaddlePaddle的源码从github克隆到本地当前目录下名为Paddle的文件夹中: + + `git clone https://github.com/PaddlePaddle/Paddle.git` + +2. 进入Paddle目录下: `cd Paddle` + +3. 创建并进入满足编译环境的Docker容器: + + * 编译CPU版本的PaddlePaddle: + + + + `docker run --name paddle-test -v $PWD:/paddle --network=host -it hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash` + + > --name paddle-test为您创建的Docker容器命名为paddle-test; + + + > -v $PWD:/paddle 将当前目录挂载到Docker容器中的/paddle目录下(Linux中PWD变量会展开为当前路径的[绝对路径](https://baike.baidu.com/item/绝对路径/481185)); + + + > -it 与宿主机保持交互状态,`hub.baidubce.com/paddlepaddle/paddle:latest-dev` 使用名为`hub.baidubce.com/paddlepaddle/paddle:latest-dev`的镜像创建Docker容器,/bin/bash 进入容器后启动/bin/bash命令。 + + + * 编译GPU版本的PaddlePaddle: + + + + `nvidia-docker run --name paddle-test -v $PWD:/paddle --network=host -it hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash` + + > --name paddle-test为您创建的Docker容器命名为paddle-test; + + + > -v $PWD:/paddle 将当前目录挂载到Docker容器中的/paddle目录下(Linux中PWD变量会展开为当前路径的[绝对路径](https://baike.baidu.com/item/绝对路径/481185)); + + + > -it 与宿主机保持交互状态,`hub.baidubce.com/paddlepaddle/paddle:latest-dev` 使用名为`hub.baidubce.com/paddlepaddle/paddle:latest-dev`的镜像创建Docker容器,/bin/bash 进入容器后启动/bin/bash命令。 + + + > 注意:hub.baidubce.com/paddlepaddle/paddle:latest-dev内部安装CUDA 8.0。 + + +4. 进入Docker后进入paddle目录下: + + `cd paddle` + +5. 切换到较稳定release分支下进行编译: + + `git checkout [分支名]` + + 例如: + + `git checkout release/1.5` + + 注意:python3.6、python3.7版本从release/1.2分支开始支持 + +6. 创建并进入/paddle/build路径下: + + `mkdir -p /paddle/build && cd /paddle/build` + +7. 使用以下命令安装相关依赖: + + For Python2: pip install protobuf + For Python3: pip3.5 install protobuf + + 注意:以上用Python3.5命令来举例,如您的Python版本为3.6/3.7,请将上述命令中的Python3.5改成Python3.6/Python3.7 + + > 安装protobuf。 + + `apt install patchelf` + + > 安装patchelf + 这是一个小而实用的程序,用于修改ELF可执行文件的动态链接器和RPATH + +8. 执行cmake: + + >具体编译选项含义请参见[编译选项表](../Tables.html#Compile) + + >请注意修改参数`-DPY_VERSION`为您希望编译使用的python版本, 例如`-DPY_VERSION=3.5`表示python版本为3.5.x + + * 编译**CPU版本PaddlePaddle**: + + `cmake .. -DPY_VERSION=3.5 -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release` + + * 编译**GPU版本PaddlePaddle**: + + `cmake .. -DPY_VERSION=3.5 -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release` + +9. 执行编译: + + `make -j$(nproc)` + + > 使用多核编译 + +10. 编译成功后进入`/paddle/build/python/dist`目录下找到生成的`.whl`包: `cd /paddle/build/python/dist` + +11. 在当前机器或目标机器安装编译好的`.whl`包: + + For Python2: pip install -U(whl包的名字) + For Python3: pip3.5 install -U(whl包的名字) + + 注意:以上涉及Python3的命令,用Python3.5来举例,如您的Python版本为3.6/3.7,请将上述命令中的Python3.5改成Python3.6/Python3.7 + +恭喜,至此您已完成PaddlePaddle的编译安装。您只需要进入Docker容器后运行PaddlePaddle,即可开始使用。更多Docker使用请参见[Docker官方文档](https://docs.docker.com) + +> 注:PaddlePaddle Docker镜像为了减小体积,默认没有安装`vim`,您可以在容器中执行 `apt-get install -y vim` 来安装。 + + +### **本机编译** + +1. 检查您的计算机和操作系统是否符合我们支持的编译标准: `uname -m && cat /etc/*release` + +2. 更新`apt`的源: `apt update`, 并提前安装[OpenCV](https://opencv.org/releases.html) + +3. 我们支持使用virtualenv进行编译安装,首先请使用以下命令创建一个名为`paddle-venv`的虚环境: + + * a. 安装Python-dev(请注意Ubuntu16.04下的python2.7不支持gcc4.8,请使用gcc5.4编译Paddle): + + For Python2: apt install python-dev + For Python3: apt install python3.5-dev + + * b. 安装pip: (请保证拥有9.0.1及以上版本的pip): + + For Python2: apt install python-pip + For Python3: apt-get update && apt-get install -y software-properties-common && add-apt-repository ppa:deadsnakes/ppa && apt install curl && curl https://bootstrap.pypa.io/get-pip.py -o - | python3.5 && easy_install pip + + * c. 安装虚环境`virtualenv`以及`virtualenvwrapper`并创建名为`paddle-venv`的虚环境: + + 1. `apt install virtualenv` 或 `pip install virtualenv` 或 `pip3 install virtualenv` + 2. `apt install virtualenvwrapper` 或 `pip install virtualenvwrapper` 或 `pip3 install virtualenvwrapper` + 3. 找到`virtualenvwrapper.sh`: `find / -name virtualenvwrapper.sh` + 4. (Only for Python3) 设置虚环境的解释器路径:`export VIRTUALENVWRAPPER_PYTHON=/usr/bin/python3.5` + 5. 查看`virtualenvwrapper.sh`中的安装方法: `cat virtualenvwrapper.sh`, 该shell文件中描述了步骤及命令 + 6. 按照`virtualenvwrapper.sh`中的描述,安装`virtualwrapper` + 7. 设置VIRTUALENVWRAPPER_PYTHON:`export VIRTUALENVWRAPPER_PYTHON=[python-lib-path]:$PATH` (这里将[python-lib-path]的最后两级目录替换为/bin/) + 8. 创建名为`paddle-venv`的虚环境: `mkvirtualenv paddle-venv` + + 注意:以上涉及Python3的命令,用Python3.5来举例,如您的Python版本为3.6/3.7,请将上述命令中的Python3.5改成Python3.6/Python3.7 + +4. 进入虚环境:`workon paddle-venv` + +5. **执行编译前**请您确认在虚环境中安装有[编译依赖表](../Tables.html#third_party)中提到的相关依赖: + + * 这里特别提供`patchELF`的安装方法,其他的依赖可以使用`apt install`或者`pip install` 后跟依赖名称和版本安装: + + `apt install patchelf` + + > 不能使用apt安装的用户请参见patchElF github[官方文档](https://gist.github.com/ruario/80fefd174b3395d34c14) + +5. 将PaddlePaddle的源码clone在当下目录下的Paddle的文件夹中,并进入Padde目录下: + + - `git clone https://github.com/PaddlePaddle/Paddle.git` + + - `cd Paddle` + +6. 切换到较稳定release分支下进行编译,将中括号以及其中的内容替换为**目标分支名**: + + `git checkout [分支名]` + + 例如: + + `git checkout release/1.5` + +7. 并且请创建并进入一个叫build的目录下: + + `mkdir build && cd build` + +8. 执行cmake: + + >具体编译选项含义请参见[编译选项表](../Tables.html#Compile) + + * 对于需要编译**CPU版本PaddlePaddle**的用户: + + For Python2: cmake .. -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + For Python3: cmake .. -DPY_VERSION=3.5 -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + + * 对于需要编译**GPU版本PaddlePaddle**的用户:(**仅支持ubuntu16.04/14.04**) + + 1. 请确保您已经正确安装nccl2,或者按照以下指令安装nccl2(这里提供的是ubuntu 16.04,CUDA9,cuDNN7下nccl2的安装指令),更多版本的安装信息请参考NVIDIA[官方网站](https://developer.nvidia.com/nccl): + + + i. `wget https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb` + + ii. `dpkg -i nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb` + + + iii. `sudo apt-get install -y libnccl2=2.3.7-1+cuda9.0 libnccl-dev=2.3.7-1+cuda9.0` + + 2. 如果您已经正确安装了`nccl2`,就可以开始cmake了:(*For Python3: 请给PY_VERSION参数配置正确的python版本*) + + For Python2: cmake .. -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + For Python3: cmake .. -DPY_VERSION=3.5 -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + + 注意:以上涉及Python3的命令,用Python3.5来举例,如您的Python版本为3.6/3.7,请将上述命令中的Python3.5改成Python3.6/Python3.7 + +9. 使用以下命令来编译: + + `make -j$(nproc)` + + > 使用多核编译 + + > 如果编译过程中显示“Too many open files”错误时,请使用指令 ulimit -n 8192 来增大当前进程允许打开的文件数,一般来说8192可以保证编译完成。 + +10. 编译成功后进入`/paddle/build/python/dist`目录下找到生成的`.whl`包: `cd /paddle/build/python/dist` + +11. 在当前机器或目标机器安装编译好的`.whl`包: + + `pip install -U(whl包的名字)`或`pip3 install -U(whl包的名字)` + +恭喜,至此您已完成PaddlePaddle的编译安装 + +## **验证安装** +安装完成后您可以使用 `python` 或 `python3` 进入python解释器,输入`import paddle.fluid as fluid` ,再输入 + `fluid.install_check.run_check()` + +如果出现`Your Paddle Fluid is installed succesfully!`,说明您已成功安装。 + +## **如何卸载** +请使用以下命令卸载PaddlePaddle: + +* **CPU版本的PaddlePaddle**: `pip uninstall paddlepaddle` 或 `pip3 uninstall paddlepaddle` + +* **GPU版本的PaddlePaddle**: `pip uninstall paddlepaddle-gpu` 或 `pip3 uninstall paddlepaddle-gpu` + +使用Docker安装PaddlePaddle的用户,请进入包含PaddlePaddle的容器中使用上述命令,注意使用对应版本的pip diff --git a/doc/paddle/install/compile/compile_Ubuntu_en.md b/doc/paddle/install/compile/compile_Ubuntu_en.md new file mode 100644 index 0000000000000000000000000000000000000000..c858da3fe4f73c9c5ab942142a9f647b6b537fda --- /dev/null +++ b/doc/paddle/install/compile/compile_Ubuntu_en.md @@ -0,0 +1,278 @@ +# **Compile on Ubuntu from Source Code** + +## Environment preparation + +* **Ubuntu version (64 bit)** + * **Ubuntu 14.04 (GPU version supports CUDA 10.0/10.1)** + * **Ubuntu 16.04 (GPU version supports CUDA 9.0/9.1/9.2/10.0/10.1)** + * **Ubuntu 18.04 (GPU version supports CUDA 10.0/10.1)** +* **Python version 2.7.15+/3.5.1+/3.6/3.7 (64 bit)** +* **pip or pip3 version 9.0.1+ (64 bit)** + +## Choose CPU/GPU + +* If your computer doesn't have NVIDIA® GPU, please install CPU version of PaddlePaddle + +* If your computer has NVIDIA® GPU, and the following conditions are met,GPU version of PaddlePaddle is recommended. + * **CUDA toolkit 10.0 with cuDNN v7.3+(for multi card support, NCCL2.3.7 or higher)** + * **CUDA toolkit 9.0 with cuDNN v7.3+(for multi card support, NCCL2.3.7 or higher)** + * **CUDA toolkit 8.0 with cuDNN v7.1+(for multi card support, NCCL2.1.15-2.2.13)** + * **Hardware devices with GPU computing power over 1.0** + + You can refer to NVIDIA official documents for installation process and configuration method of CUDA and cudnn. Please refer to[CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/),[cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/) + +* * If you need to use multi card environment, please make sure that you have installed nccl2 correctly, or install nccl2 according to the following instructions (here is the installation instructions of nccl2 under ubuntu 16.04, CUDA9 and cuDNN7). For more version of installation information, please refer to NVIDIA[official website](https://developer.nvidia.com/nccl): + + + wget https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb + dpkg -i nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb + sudo apt-get install -y libnccl2=2.3.7-1+cuda9.0 libnccl-dev=2.3.7-1+cuda9.0 + +## Installation steps + +There are two compilation methods in Ubuntu system: + +* Compile with docker (GPU version under Ubuntu 18.04 is not supported temporarily) +* Local compilation + + +### **Compile with docker** + +[Docker](https://docs.docker.com/install/) is an open source application container engine. Using docker, you can not only isolate the installation and use of paddlepaddle from the system environment, but also share GPU, network and other resources with the host + +Compiling PaddlePaddle with Docker,you need: + +- On the local host [Install Docker](https://hub.docker.com/search/?type=edition&offering=community) + +- To enable GPU support on Linux, please [Install nvidia-docker](https://github.com/NVIDIA/nvidia-docker) + +Please follow the steps below to install: + +1. First, select the path where you want to store PaddlePaddle, and then use the following command to clone the source code of PaddlePaddle from GitHub to the folder named Paddle under the current local directory: + + `git clone https://github.com/PaddlePaddle/Paddle.git` + +2. Enter the Paddle Directory: `cd Paddle` + +3. Create and enter a Docker container that meets the compilation environment: + + * Compile CPU version of PaddlePaddle: + + + + `docker run --name paddle-test -v $PWD:/paddle --network=host -it hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash` + + > --name paddle-test names the Docker container you created as paddle-test; + + + > -v $PWD:/paddle mount the current directory to the /paddle directory in the docker container (PWD variable in Linux will be expanded to [absolute path](https://baike.baidu.com/item/绝对路径/481185) of the current path); + + + > -it keeps interaction with the host,`hub.baidubce.com/paddlepaddle/paddle:latest-dev` use the image named `hub.baidubce.com/paddlepaddle/paddle:latest-dev` to create Docker container, /bin/bash start the /bin/bash command after entering the container. + + + * Compile GPU version of PaddlePaddle: + + + + `nvidia-docker run --name paddle-test -v $PWD:/paddle --network=host -it hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash` + + > --name paddle-test names the Docker container you created as paddle-test; + + + > -v $PWD:/paddle mount the current directory to the /paddle directory in the docker container (PWD variable in Linux will be expanded to [absolute path](https://baike.baidu.com/item/绝对路径/481185) of the current path); + + + > -it keeps interaction with the host,`hub.baidubce.com/paddlepaddle/paddle:latest-dev` use the image named `hub.baidubce.com/paddlepaddle/paddle:latest-dev` to create Docker container, /bin/bash start the /bin/bash command after entering the container. + + + > Note: hub.baidubce.com/paddlepaddle/paddle:latest-dev internally install CUDA 8.0. + +4. After entering Docker, enter the Paddle Directory: + + `cd paddle` + +5. Switch to a more stable release branch for compilation: + + `git checkout [name of the branch]` + + For example: + + `git checkout release/1.5` + + Note: python3.6、python3.7 version started supporting from release/1.2 branch + +6. Create and enter /paddle/build Directory: + + `mkdir -p /paddle/build && cd /paddle/build` + +7. Use the following command to install dependencies: + + For Python2: pip install protobuf + For Python3: pip3.5 install protobuf + + Note: We used Python3.5 command as an example above, if the version of your Python is 3.6/3.7, please change Python3.5 in the commands to Python3.6/Python3.7 + + > Install protobuf + + `apt install patchelf` + + > Install patchelf + This is a small but useful program, it can be used to modify dynamic linker and RPATH of ELF executable + +8. Execute cmake: + + > For the specific meaning of compilation options, you can read [Compile options table](../Tables.html#Compile) + + > Please attention to modify parameters `-DPY_VERSION` for the version of Python you want to compile with, for example `-DPY_VERSION=3.5` means the version of python is 3.5.x + + * Compile**CPU version of PaddlePaddle**: + + `cmake .. -DPY_VERSION=3.5 -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release` + + * Compile**GPU version of PaddlePaddle**: + + `cmake .. -DPY_VERSION=3.5 -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release` + +9. Execute compiling: + + `make -j$(nproc)` + + > Use multicore compilation + +10. after compiling successful, enter `/paddle/build/python/dist` Directory and find generated `.whl` package: `cd /paddle/build/python/dist` + +11. Install the compiled `.whl` package on the current machine or target machine: + + For Python2: pip install -U(whl package name) + For Python3: pip3.5 install -U(whl package name) + + Note: For the command involving Python 3, we use Python 3.5 as an example above, if the version of your Python is 3.6/3.7, please change Python3.5 in the commands to Python3.6/Python3.7 + +Congratulations, now you have completed the compilation and installation of PaddlePaddle. You only need to enter the Docker container and run PaddlePaddle to start using. For more Docker usage, please refer to [official docker documentation](https://docs.docker.com) + +> Note: In order to reduce the size, `vim` is not installed in PaddlePaddle Docker image by default. You can edit the code in the container after executing `apt-get install -y vim` in the container. + + +### ***Local compilation*** + +**Please strictly follow the following instructions step by step** + +1. Check that your computer and operating system meet the compilation standards we support: `uname -m && cat /etc/*release` + +2. Update the source of `apt`: `apt update`, and install openCV in advance. + +3. We support compiling and installing with virtualenv. First, create a virtual environment called `paddle-venv` with the following command: + + * a. Install Python-dev: (Please note that gcc4.8 is not supported in python2.7 under Ubuntu 16.04, please use gcc5.4 to compile Paddle) + + For Python2: apt install python-dev + For Python3: apt install python3.5-dev + + * b. Install pip: (Please ensure that pip version is 9.0.1 and above ): + + For Python2: apt install python-pip + For Python3: apt-get udpate && apt-get install -y software-properties-common && add-apt-repository ppa:deadsnakes/ppa && apt install curl && curl https://bootstrap.pypa.io/get-pip. Py -o - | python3.5 && easy_install pip + + + * c. Install the virtual environment `virtualenv` and `virtualenvwrapper` and create a virtual environment called `paddle-venv` : + + 1. `apt install virtualenv` or `pip install virtualenv` or `pip3 install virtualenv` + 2. `apt install virtualenvwrapper` or `pip install virtualenvwrapper` or `pip3 install virtualenvwrapper` + 3. Find `virtualenvwrapper.sh`: `find / -name virtualenvwrapper.sh` + 4. (Only for Python3) Set the interpreter path for the virtual environment: `export VIRTUALENVWRAPPER_PYTHON=/usr/bin/python3.5` + 5. See the installation method in `virtualenvwrapper.sh`: `cat virtualenvwrapper.sh`, this shell file describes the steps and commands + 6. Install `virtualwrapper` according to the installation method in `virtualenvwrapper.sh` + 7. Set VIRTUALENVWRAPPER_PYTHON:`export VIRTUALENVWRAPPER_PYTHON=[python-lib-path]:$PATH` (Here, replace the last two directories of [python-lib-path] with /bin/) + 8. Create a virtual environment called `paddle-venv`: `mkvirtualenv paddle-venv` + + Note: for the above commands involving Python 3, we use Python 3.5 as an example. If your Python version is 3.6 / 3.7, please change Python 3.5 in the above commands to Python 3.6 / Python 3.7 + +4. Enter the virtual environment: `workon paddle-venv` + +5. Before **executing the compilation**, please confirm that the related dependencies mentioned in [the compile dependency table](../Tables.html/#third_party) are installed in the virtual environment: + + * Here is the installation method for `patchELF`. Other dependencies can be installed using `apt install` or `pip install` followed by the name and version: + + `apt install patchelf` + + > Users who can't use apt installation can refer to patchElF [github official documentation](https://gist.github.com/ruario/80fefd174b3395d34c14). + +6. Clone the PaddlePaddle source code in the Paddle folder in the current directory and go to the Paddle directory: + + - `git clone https://github.com/PaddlePaddle/Paddle.git` + + - `cd Paddle` + +7. Switch to a more stable release branch to compile, replacing the brackets and their contents with **the target branch name**: + + - `git checkout [name of target branch]` + + For example: + + `git checkout release/1.5` + +8. And please create and enter a directory called build: + + `mkdir build && cd build` + +9. Execute cmake: + + > For details on the compilation options, see [the compilation options table](../Tables.html/#Compile). + + * For users who need to compile the **CPU version of PaddlePaddle**: (For Python3: Please configure the correct python version for the PY_VERSION parameter) + + For Python2: cmake .. -DWITH_FLUID_ONLY=ON -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + For Python3: cmake .. -DPY_VERSION=3.5 -DWITH_FLUID_ONLY=ON -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + + + * For users who need to compile **GPU version of PaddlePaddle**: (*only support ubuntu16.04/14.04*) + + 1. Please make sure that you have installed nccl2 correctly, or install nccl2 according to the following instructions (here is ubuntu 16.04, CUDA9, ncDNN7 nccl2 installation instructions), for more information on the installation information please refer to the [NVIDIA official website](https://developer.nvidia.com/nccl/nccl-download): + + i. `wget http: / /developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb ` + + ii. `dpkg -i nvidia-machine-learning-repo-ubuntu1604_1 .0.0-1_amd64.deb` + + iii. `sudo apt-get install -y libnccl2=2.2.13-1+cuda9.0 libnccl-dev=2.2.13-1+cuda9.0` + + 2. If you have already installed `nccl2` correctly, you can start cmake: *(For Python3: Please configure the correct python version for the PY_VERSION parameter)* + + For Python2: cmake .. -DWITH_FLUID_ONLY=ON -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + For Python3: cmake .. -DPY_VERSION=3.5 -DWITH_FLUID_ONLY=ON -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + + + Note: We used Python3.5 command as an example above, if the version of your Python is 3.6/3.7, please change Python3.5 in the commands to Python3.6/Python3.7 + +10. Compile with the following command: + + `make -j$(nproc)` + + > compile using multi-core + + > If “Too many open files” error is displayed during compilation, please use the instruction ulimit -n 8192 to increase the number of files allowed to be opened by the current process. Generally speaking, 8192 can ensure the completion of compilation. + +11. After compiling successfully, go to the `/paddle/build/python/dist `directory and find the generated `.whl` package: `cd /paddle/build/python/dist` + +12. Install the compiled `.whl` package on the current machine or target machine: + + `Pip install (whl package name)` or `pip3 install (whl package name)` + +Congratulations, now you have completed the process of compiling PaddlePaddle natively. + +

+### ***Verify installation*** + +After the installation is complete, you can use `python` or `python3` to enter the Python interpreter and then use `import paddle.fluid as fluid` and then `fluid.install_check.run_check()` to verify that the installation was successful. + +If `Your Paddle Fluid is installed succesfully!` appears, it means the installation was successful. + +

+### ***How to uninstall*** +Please use the following command to uninstall PaddlePaddle: + +- ***CPU version of PaddlePaddle***: `pip uninstall paddlepaddle` or `pip3 uninstall paddlepaddle` + +- ***GPU version of PaddlePaddle***: `pip uninstall paddlepaddle-gpu` or `pip3 uninstall paddlepaddle-gpu` + +Users installing PaddlePaddle with Docker, please use above commands in the container involved PaddlePaddle and attention to use the corresponding version of Pip diff --git a/doc/paddle/install/compile/compile_Windows.md b/doc/paddle/install/compile/compile_Windows.md new file mode 100644 index 0000000000000000000000000000000000000000..1628169aaa3998d0dd9a934513654efc73ddbb37 --- /dev/null +++ b/doc/paddle/install/compile/compile_Windows.md @@ -0,0 +1,115 @@ +# **Windows下从源码编译** + +## 环境准备 + +* **Windows 7/8/10 专业版/企业版 (64bit)** + * **GPU版本支持CUDA 9.0/9.1/9.2/10.0/10.1,且仅支持单卡** +* **Python 版本 2.7.15+/3.5.1+/3.6/3.7 (64 bit)** +* **pip 版本 9.0.1+ (64 bit)** +* **Visual Studio 2015 Update3** + +## 选择CPU/GPU + +* 如果您的计算机没有 NVIDIA® GPU,请编译CPU版的PaddlePaddle + +* 如果您的计算机有NVIDIA® GPU,并且满足以下条件,推荐编译GPU版的PaddlePaddle + * **CUDA 工具包 9.0/9.1/9.2/10.0/10.1 配合 cuDNN v7.4+** + * **GPU运算能力超过1.0的硬件设备** + +## 安装步骤 + +在Windows的系统下提供1种编译方式: + +* 本机编译(暂不支持NCCL,分布式等相关功能) + + +### **本机编译** + +1. 安装必要的工具 cmake,git 以及 python: + + > cmake我们支持3.10以上版本,但GPU编译时3.12/3.13/3.14版本存在官方[Bug](https://cmake.org/pipermail/cmake/2018-September/068195.html),我们建议您使用CMake3.16版本,可在官网[下载](https://cmake.org/download/),并添加到环境变量中。 + + > python 需要 2.7 及以上版本, 可在官网[下载](https://www.python.org/download/releases/2.7/)。 + + * 安装完python 后请通过 `python --version` 检查python版本是否是预期版本,因为您的计算机可能安装有多个python,您可通过修改环境变量的顺序来处理多个python时的冲突。 + + > 需要安装`numpy, protobuf, wheel` 。 请使用`pip`命令; + + * 安装 numpy 包可以通过命令 `pip install numpy` + * 安装 protobuf 包可以通过命令 `pip install protobuf` + * 安装 wheel 包可以通过命令 `pip install wheel` + + > git可以在官网[下载](https://gitforwindows.org/),并添加到环境变量中。 + + > 如果您使用的是Python2,还需要安装[Microsoft Visual C++ Compiler for Python 2.7](https://www.microsoft.com/en-us/download/details.aspx?id=44266) + +2. 将PaddlePaddle的源码clone在当前目录下的Paddle的文件夹中,并进入Padde目录下: + + - `git clone https://github.com/PaddlePaddle/Paddle.git` + - `cd Paddle` + +3. 切换到较稳定release分支下进行编译: + + `git checkout [分支名]` + + 例如: + + `git checkout release/1.7` + + 注意:python3.6、python3.7版本从release/1.2分支开始支持 + +4. 创建名为build的目录并进入: + + - `mkdir build` + - `cd build` + +5. 执行cmake: + + > 具体编译选项含义请参见[编译选项表](../Tables.html#Compile) + + * 编译**CPU版本PaddlePaddle**: + + `cmake .. -G "Visual Studio 14 2015 Win64" -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release` + + * 编译**GPU版本PaddlePaddle**: + + `cmake .. -G "Visual Studio 14 2015 Win64" -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release` + + 默认为Python2,Python3请添加: + + > -DPY_VERSION=3(或3.5、3.6、3.7) + + 如果你的设备信息包含多个Python或CUDA版本,你也可以通过设置路径变量,来指定特定版本的Python或CUDA: + + > -DPYTHON_EXECUTABLE: python的安装目录 + + > -DCUDA_TOOLKIT_ROOT_DIR: cuda的安装目录 + + 例如:(仅作示例,请根据你的设备路径信息进行设置) + + `cmake .. -G "Visual Studio 14 2015 Win64" -DCMAKE_BUILD_TYPE=Release -DWITH_GPU=ON -DWITH_TESTING=OFF -DPYTHON_EXECUTABLE=C:\\Python36\\python.exe -DCUDA_TOOLKIT_ROOT_DIR="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\v10.0"` + +6. 使用Blend for Visual Studio 2015 打开 `paddle.sln` 文件,选择平台为 `x64`,配置为 `Release`,开始编译。 + +7. 编译成功后进入 `\Paddle\build\python\dist` 目录下找到生成的 `.whl` 包: + + `cd \Paddle\build\python\dist` + +8. 安装编译好的 `.whl` 包: + + `pip install -U(whl包的名字)` + +恭喜,至此您已完成PaddlePaddle的编译安装 + +## **验证安装** +安装完成后您可以使用 `python` 或 `python3` 进入python解释器,输入`import paddle.fluid as fluid` ,再输入 + `fluid.install_check.run_check()` + +如果出现`Your Paddle Fluid is installed succesfully!`,说明您已成功安装。 + +## **如何卸载** +请使用以下命令卸载PaddlePaddle: + +* **CPU版本的PaddlePaddle**: `python -m pip uninstall paddlepaddle` + +* **GPU版本的PaddlePaddle**: `python -m pip uninstall paddlepaddle-gpu` diff --git a/doc/paddle/install/compile/compile_Windows_en.md b/doc/paddle/install/compile/compile_Windows_en.md new file mode 100644 index 0000000000000000000000000000000000000000..0eb652a7dcc6fd1da43d7539a7b6b1736bc63562 --- /dev/null +++ b/doc/paddle/install/compile/compile_Windows_en.md @@ -0,0 +1,120 @@ +# **Compile on Windows from Source Code** + +## Environment preparation + +* **Windows 7/8/10 Pro/Enterprise(64bit)** + * **GPU Version support CUDA 9.0/9.1/9.2/10.0/10.1, and only support single GPU** +* **Python version 2.7.15+/3.5.1+/3.6/3.7(64bit)** +* **pip version 9.0.1+(64bit)** +* **Visual Studio 2015 Update3** + +## Choose CPU/GPU + +* If your computer doesn't have NVIDIA® GPU, please install CPU version of PaddlePaddle + +* If your computer has NVIDIA® GPU, and the following conditions are met,GPU version of PaddlePaddle is recommended. + * **CUDA toolkit 9.0/9.1/9.2/10.0/10.1 with cuDNN v7.4+** + * **GPU's computing capability exceeds 1.0** + +## Installation steps + +There is one compilation methods in Windows system: + +* Direct native source code compilation(NCCL, distributed and other related functions are not supported temporarily) + + +### ***Direct native source code compilation*** + +**Please strictly follow the following instructions step by step** + +1. Install the necessary tools i.e. cmake, git and python: + + > CMake requires version 3.10 and above, but there are official [Bug](https://cmake.org/pipermail/cmake/2018-September/068195.html) versions of 3.12/3.13/3.14 when the GPU is compiled, we recommend that you use CMake3. 16 version, available on the official website [download] (https://cmake.org/download/), and add to the ring Environment variables. + + > Python requires version 2.7 and above, which can be downloaded from the [official website](https://www.python.org/download/releases/2.7/). + + * After installing python, please check whether the python version is the expected version by `python-version`, because you may have more than one python installed on your computer. You can handle conflicts of multiple pythons by changing the order of the environment variables. + + > `numpy, protobuf, wheel` are needed to be installed. Use the 'pip' command. + + * To Install numpy package you can use command `pip install numpy` + + * To Install protobuf package you can use command `pip install protobuf` + + * To Install Wheel package you can use command `pip install wheel` + + > Git can be downloaded on the [official website](https://gitforwindows.org/) and added to the environment variable. + + > If you are using Python 2, you need to install [Microsoft Visual C++ Compiler for Python 2.7](https://www.microsoft.com/en-us/download/details.aspx?id=44266) + +2. Clone the PaddlePaddle source code to the Paddle subdirectories of the current directory and go to the Paddle subdirectories: + + - `git clone https://github.com/PaddlePaddle/Paddle.git` + - `cd Paddle` + +3. Switch to a more stable release branch for compilation: + + `git checkout [name of the branch]` + + For example: + + `git checkout release/1.7` + + Note: python3.6、python3.7 version started supporting from release/1.2 + +4. Create a directory called build and enter it: + + - `mkdir build` + - `cd build` + +5. Execute cmake: + + > For details on the compilation options, see [the compilation options list](../Tables.html/#Compile). + * For users who need to compile **the CPU version PaddlePaddle**: + + `cmake .. -G "Visual Studio 14 2015 Win64" -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release` + + * For users who need to compile **the GPU version PaddlePaddle**: + + `cmake .. -G "Visual Studio 14 2015 Win64" -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release` + + Python2 by default,Python3 please add: + + > -DPY_VERSION=3 (or 3.5、3.6、3.7) + + If your device information contains multiple Python or CUDA, you can also specify a specific version of Python or CUDA by setting the corresponding compile options: + + > -DPYTHON_EXECUTABLE: the installation path of python + + > -DCUDA_TOOLKIT_ROOT_DIR: the installation path of CUDA + + For example: (for instance only, please set it according to your actual installation path) + + `cmake .. -G "Visual Studio 14 2015 Win64" -DCMAKE_BUILD_TYPE=Release -DWITH_GPU=ON -DWITH_TESTING=OFF -DPYTHON_EXECUTABLE=C:\\Python36\\python.exe -DCUDA_TOOLKIT_ROOT_DIR="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\v10.0"` + +6. Use Blend for Visual Studio 2015 to open `paddle.sln` file, select the platform `x64`, configure with `Release`, then begin to compile + +7. After compilation successfully, go to the `\paddle\build\python\dist` directory and find the generated `.whl` package: + + `cd \paddle\build\python\dist` + +8. Install the generated `.whl` package: + + `pip install -U (whl package name)` + +Congratulations, you have completed the process of compiling PaddlePaddle successfully! + +### ***Verify installation*** + +After the compilation and installation is completed, you can use `python` to enter the Python interface, input `import paddle.fluid as fluid` and then `fluid.install_check.run_check()` to verify that the installation was successful. + +If `Your Paddle Fluid is installed succesfully!` appears, it means the compilation and installation was successful. + + +### ***How to uninstall*** + +Please use the following command to uninstall PaddlePaddle: + +* ***CPU version of PaddlePaddle*** : `pip uninstall paddlepaddle` + +* ***GPU version of PaddlePaddle*** : `pip uninstall paddlepaddle-gpu` diff --git a/doc/paddle/install/compile/fromsource.rst b/doc/paddle/install/compile/fromsource.rst new file mode 100644 index 0000000000000000000000000000000000000000..6af43229020691d31b5c1e935f5ff81bc36da6f6 --- /dev/null +++ b/doc/paddle/install/compile/fromsource.rst @@ -0,0 +1,11 @@ +=========================== +**从源码编译** +=========================== + +.. toctree:: + :maxdepth: 1 + + compile_Ubuntu.md + compile_CentOS.md + compile_MacOS.md + compile_Windows.md diff --git a/doc/paddle/install/compile/fromsource_en.rst b/doc/paddle/install/compile/fromsource_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..970a0abcb59f687fb78ac976be60b4ce4a963f7b --- /dev/null +++ b/doc/paddle/install/compile/fromsource_en.rst @@ -0,0 +1,13 @@ +============================== +**Compile From Source Code** +============================== + +You can also choose to compile and install PaddlePaddle in the way of source code compilation. However, due to the diversity of the native environment, complicated problems may occur when compiling the source code, which may cause your installation to fail. In order to ensure your smooth installation, it is recommended that you prefer the normal installation method. + +.. toctree:: + + + compile_Ubuntu_en.md + compile_CentOS_en.md + compile_MacOS_en.md + compile_Windows_en.md diff --git a/doc/paddle/install/index_cn.rst b/doc/paddle/install/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8fe0f24e58e9454c89631a7cd404e3c4dc90d863 --- /dev/null +++ b/doc/paddle/install/index_cn.rst @@ -0,0 +1,249 @@ +.. _install_introduction: + +######### + 安装说明 +######### +本说明将指导您在64位操作系统编译和安装PaddlePaddle + +1. 操作系统要求: +============================ + +* Windows 7 / 8 / 10,专业版 / 企业版 +* Ubuntu 14.04 / 16.04 / 18.04 +* CentOS 6 / 7 +* MacOS 10.11 / 10.12 / 10.13 / 10.14 +* 操作系统要求是 64 位版本 + +2. 处理器要求 +============================ + +* 处理器支持 MKL +* 处理器架构是x86_64(或称作 x64、Intel 64、AMD64)架构,目前PaddlePaddle不支持arm64架构 + +3. Python 和 pip 版本要求: +============================ + +* Python 2 的版本要求 2.7.15+ +* Python 3 的版本要求 3.5.1+/3.6/3.7 +* Python 具有 pip, 且 pip 的版本要求 9.0.1+ +* Python 和 pip 要求是 64 位版本 + +4. PaddlePaddle 对 GPU 支持情况: +================================= + +* 目前 **PaddlePaddle** 仅支持 **NVIDIA** 显卡的 **CUDA** 驱动 +* 需要安装 `cuDNN `_ ,版本要求 7.6+(For CUDA9/10) +* 如果您需要 GPU 多卡模式,需要安装 `NCCL 2 `_ + + * 仅 Ubuntu/CentOS 支持 NCCL 2 技术 +* 需要安装 `CUDA `_ ,根据您系统不同,对 CUDA 版本要求不同: + + * Windows 安装 GPU 版本 + + * Windows 7/8/10 支持 CUDA 9.0/10.0 单卡模式,不支持 CUDA 9.1/9.2/10.1 + * 不支持 **nvidia-docker** 方式安装 + * Ubuntu 安装 GPU 版本 + + * Ubuntu 14.04 支持 CUDA 10.0/10.1,不支持CUDA 9.0/9.1/9.2 + * Ubuntu 16.04 支持 CUDA 9.0/9.1/9.2/10.0/10.1 + * Ubuntu 18.04 支持 CUDA 10.0/10.1,不支持CUDA 9.0/9.1/9.2 + * 如果您是使用 **nvidia-docker** 安装,支持 CUDA 9.0/9.1/9.2/10.0/10.1 + * CentOS 安装 GPU 版本 + + * 如果您是使用本机 **pip** 安装: + + * CentOS 7 支持 CUDA 9.0/9.1/9.2/10.0/10.1,CUDA 9.1 仅支持单卡模式 + * CentOS 6 支持 CUDA 9.0/9.1/9.2/10.0/10.1 单卡模式 + * 如果您是使用本机源码编译安装: + + * CentOS 7 支持 CUDA 9.0/9.1/9.2/10.0/10.1,CUDA 9.1 仅支持单卡模式 + * CentOS 6 不推荐,不提供编译出现问题时的官方支持 + * 如果您是使用 **nvidia-docker** 安装,在CentOS 7 下支持 CUDA 9.0/9.1/9.2/10.0/10.1 + * MacOS 不支持:PaddlePaddle 在 MacOS 平台没有 GPU 支持 + +请确保您的环境满足以上条件。如您有其他需求,请参考 `多版本whl包安装列表 `_ . + +5. PaddlePaddle 对 NCCL 支持情况: +================================= + +* Windows 支持情况 + + * 不支持NCCL +* Ubuntu 支持情况 + + * Ubuntu 14.04: + + * CUDA10.1 下支持NCCL v2.4.2-v2.4.8 + * CUDA10.0 下支持NCCL v2.3.7-v2.4.8 + * Ubuntu 16.04: + + * CUDA10.1 下支持NCCL v2.4.2-v2.4.8 + * CUDA10.0/9.2/9.0 下支持NCCL v2.3.7-v2.4.8 + * CUDA9.1 下支持NCCL v2.1.15 + * Ubuntu 18.04: + + * CUDA10.1 下支持NCCL v2.4.2-v2.4.8 + * CUDA10.0 下支持NCCL v2.3.7-v2.4.8 +* CentOS 支持情况 + + * CentOS 6:不支持NCCL + * CentOS 7: + + * CUDA10.1 下支持NCCL v2.4.2-v2.4.8 + * CUDA10.0/9.2/9.0 下支持NCCL v2.3.7-v2.4.8 +* MacOS 支持情况 + + * 不支持NCCL + +第一种安装方式:使用 pip 安装 +================================ + +您可以选择“使用pip安装”、“使用conda安装”、“使用docker安装”、“从源码编译安装” 四种方式中的任意一种方式进行安装。 + +本节将介绍使用 pip 的安装方式。 + +1. 需要您确认您的 操作系统 满足上方列出的要求 + +2. 需要您确认您的 处理器 满足上方列出的要求 + +3. 确认您需要安装 PaddlePaddle 的 Python 是您预期的位置,因为您计算机可能有多个 Python + + 如果您是使用 Python 2,使用以下命令输出 Python 路径,根据您的环境您可能需要将说明中所有命令行中的 python 替换为具体的 Python 路径 + + 在 Windows 环境下,输出 Python 路径的命令为: + + :: + + where python + + 在 MacOS/Linux 环境下,输出 Python 路径的命令为: + + :: + + which python + + 如果您是使用 Python 3,使用以下命令输出 Python 路径,根据您的环境您可能需要将说明中所有命令行中的 python3 替换为 python 或者替换为具体的 Python 路径 + + 在 Windows 环境下,输出 Python 路径的命令为: + + :: + + where python3 + + 在 MacOS/Linux 环境下,输出 Python 路径的命令为: + + :: + + which python3 + +4. 检查 Python 的版本 + + 如果您是使用 Python 2,使用以下命令确认是 2.7.15+ + :: + + python --version + + 如果您是使用 Python 3,使用以下命令确认是 3.5.1+/3.6/3.7 + :: + + python3 --version + +5. 检查 pip 的版本,确认是 9.0.1+ + + 如果您是使用 Python 2 + :: + + python -m ensurepip + python -m pip --version + + 如果您是使用 Python 3 + :: + + python3 -m ensurepip + python3 -m pip --version + +6. 确认 Python 和 pip 是 64 bit,并且处理器架构是x86_64(或称作 x64、Intel 64、AMD64)架构,目前PaddlePaddle不支持arm64架构。下面的第一行输出的是 "64bit" ,第二行输出的是 "x86_64" 、 "x64" 或 "AMD64" 即可: + + 如果您是使用 Python 2 + :: + + python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" + + 如果您是使用 Python 3 + :: + + python3 -c "import platform;print(platform.architecture()[0]);print(platform.machine())" + +7. 如果您希望使用 `pip `_ 进行安装PaddlePaddle可以直接使用以下命令: + + (1). **CPU版本** :如果您只是想安装CPU版本请参考如下命令安装 + + 如果您是使用 Python 2,安装CPU版本的命令为: + :: + + python -m pip install paddlepaddle==2.0.0b0 -i https://mirror.baidu.com/pypi/simple + + 或 + + python -m pip install paddlepaddle==2.0.0b0 -i https://pypi.tuna.tsinghua.edu.cn/simple + + 如果您是使用Windows系统请使用以下指令: + + pip install paddlepaddle==2.0.0b0 -f https://paddlepaddle.org.cn/whl/stable.html + + 如果您是使用 Python 3,请将上述命令中的 **python** 更换为 **python3** 进行安装 + + (2). **GPU版本** :如果您想使用GPU版本请参考如下命令安装 + + 注意: + + * 需要您确认您的 GPU 满足上方列出的要求 + + 如果您是使用 Python2,请注意用以下指令安装的PaddlePaddle在Windows、Ubuntu、CentOS下只支持CUDA10.0: + :: + + python -m pip install paddlepaddle-gpu==2.0.0b0 -i https://mirror.baidu.com/pypi/simple + + 或 + + python -m pip install paddlepaddle-gpu==2.0.0b0 -i https://pypi.tuna.tsinghua.edu.cn/simple + + 如果您是使用Windows系统请使用以下指令: + + python -m pip install paddlepaddle_gpu==2.0.0b0 -f https://paddlepaddle.org.cn/whl/stable.html + + 如果您是使用 Python 3,请将上述命令中的 **python** 更换为 **python3** 进行安装。 + +8. 验证安装 + + 使用 python 或 python3 进入python解释器,输入import paddle.fluid ,再输入 paddle.fluid.install_check.run_check()。 + + 如果出现 Your Paddle Fluid is installed succesfully!,说明您已成功安装。 + +9. 更多帮助信息请参考: + + `Ubuntu下安装 `_ + + `CentOS下安装 `_ + + `MacOS下安装 `_ + + `Windows下安装 `_ + + +第二种安装方式:使用源代码编译安装 +==================================== + +- 如果您只是使用 PaddlePaddle ,建议使用 **pip** 安装即可。 +- 如果您有开发PaddlePaddle的需求,请参考:`从源码编译 `_ + +.. toctree:: + :hidden: + + install_Ubuntu.md + install_CentOS.md + install_MacOS.md + install_Windows.md + compile/fromsource.rst + Tables.md + diff --git a/doc/paddle/install/index_en.rst b/doc/paddle/install/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..f3d4be6815a70bf03e6dfd2287388e4b2243c3cb --- /dev/null +++ b/doc/paddle/install/index_en.rst @@ -0,0 +1,253 @@ +.. _install_introduction_: + +============================ + Installation Manuals +============================ + +The manuals will guide you to build and install PaddlePaddle on your 64-bit desktop or laptop. + +1. Operating system requirements: +================================= + +* Windows 7 / 8 / 10, Pro/Enterprise +* Ubuntu 14.04 / 16.04 / 18.04 +* CentOS 6 / 7 +* MacOS 10.11 / 10.12 / 10.13 / 10.14 +* 64-bit operating system is required + +2. Processor requirements: +========================== + +* Processor supports MKL +* The processor architecture is x86_64(or called x64, Intel 64, AMD64). Currently, PaddlePaddle does not support arm64. + +3. Version requirements of python and pip: +========================================== + +* Python 2 requires version 2.7.15+ +* Python 3 requires version 3.5.1+/3.6/3.7 +* Python needs pip, and pip requires version 9.0.1+ +* Python and pip requires 64-bit + +4. PaddlePaddle's support for GPU: +================================== + +* Currently, **PaddlePaddle** only supports **CUDA** driver of **NVIDIA** graphics card. +* You need to install `cuDNN `_ , and version 7.6+ is required(For CUDA9/10) +* If you need GPU multi-card mode, you need to install `NCCL 2 `_ + + * Only Ubuntu/CentOS support NCCL 2 +* You need to install `CUDA `_ , depending on your system, there are different requirements for CUDA version: + + * Windows install GPU version + + * Windows 7 / 8 / 10 support CUDA 9.0 / 10.0 single-card mode, but don't support CUDA 9.1/9.2/10.1 + * don't support install using **nvidia-docker** + * Ubuntu install GPU version + + * Ubuntu 14.04 supports CUDA 10.0/10.1, but doesn't support CUDA 9.0/9.1/9.2 + * Ubuntu 16.04 supports CUDA 9.0/9.1/9.2/10.0/10.1 + * Ubuntu 18.04 supports CUDA 10.0/10.1, but doesn't support CUDA 9.0/9.1/9.2 + * If you install using **nvidia-docker** , it supports CUDA 9.0/9.1/9.2/10.0/10.1 + * CentOS install GPU version + + * If you install using native **pip** : + + * CentOS 7 supports CUDA 9.0/9.1/9.2/10.0/10.1, CUDA 9.1 supports single-card mode only + * CentOS 6 supports CUDA 9.0/9.1/9.2/10.0/10.1 single-card mode + * If you compile and install using native source code: + + * CentOS 7 supports CUDA 9.0/9.1/9.2/10.0/10.1, CUDA 9.1 supports single-card mode only + * CentOS 6 is not recommended, we don't provide official support in case of compilation problems + * If you install using **nvidia-docker** , CentOS 7 supports CUDA 9.0/9.1/9.2/10.0/10.1 + * MacOS isn't supported: PaddlePaddle has no GPU support in Mac OS platform + +Please make sure your environment meets the above conditions. If you have other requirements, please refer to `Appendix `_ . + +5. PaddlePaddle's support for NCCL: +=================================== + +* Support for Windows + + * not support NCCL +* Support for Ubuntu + + * Ubuntu 14.04: + + * support NCCL v2.4.2-v2.4.8 under CUDA10.1 + * support NCCL v2.3.7-v2.4.8 under CUDA10.0 + * Ubuntu 16.04: + + * support NCCL v2.4.2-v2.4.8 under CUDA10.1 + * support NCCL v2.3.7-v2.4.8 under CUDA10.0/9.2/9.0 + * support NCCL v2.1.15 under CUDA9.1 + * Ubuntu 18.04: + + * support v2.4.2-v2.4.8 under CUDA10.1 + * support NCCL v2.3.7-v2.4.8 under CUDA10.0 +* Support for CentOS + + * CentOS 6: not support NCCL + * CentOS 7: + + * support NCCL v2.4.2-v2.4.8 under CUDA10.1 + * support NCCL v2.3.7-v2.4.8 under CUDA10.0/9.2/9.0 +* Support for MacOS + + * not support NCCL + + +The first way to install: use pip to install +============================================ + +You can choose any of the four ways to install: "use pip to install", "use Conda to install", "use Docker to install", "compiling from the source code" + +This section describes how to use pip to install. + +1. You need to confirm that your operating system meets the requirements listed above + +2. You need to confirm that your processor meets the requirements listed above + +3. Confirm that the Python where you need to install PaddlePaddle is your expected location, because your computer may have multiple Python + + If you are using Python 2, use the following command to output Python path. Depending on your environment, you may need to replace Python in all command lines in the description with specific Python path + + In the Windows environment, the command to output Python path is: + + :: + + where python + + In the MacOS/Linux environment, the command to output Python path is: + + :: + + which python + + If you are using Python 3, use the following command to output Python path. Depending on your environment, you may need to replace Python in all command lines in the description with specific Python path + + In the Windows environment, the command to output Python path is: + + :: + + where python3 + + In the MacOS/Linux environment, the command to output Python path is: + + :: + + which python3 + +4. Check the version of Python + + If you are using Python 2,confirm it is 2.7.15+ using command + :: + + python --version + + If you are using Python 3,confirm it is 3.5.1+/3.6/3.7 using command + :: + + python3 --version + +5. Check the version of pip and confirm it is 9.0.1+ + + If you are using Python 2 + :: + + python -m ensurepip + python -m pip --version + + If you are using Python 3 + :: + + python3 -m ensurepip + python3 -m pip --version + +6. Confirm that Python and pip is 64 bit,and the processor architecture is x86_64(or called x64、Intel 64、AMD64)architecture. Currently, PaddlePaddle doesn't support arm64 architecture. The first line below outputs "64bit", and the second line outputs "x86_64", "x64" or "AMD64" : + + If you use Python 2 + :: + + python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" + + If you use Python 3 + :: + + python3 -c "import platform;print(platform.architecture()[0]);print(platform.machine())" + +7. If you want to use `pip `_ to install PaddlePaddle, you can use the command below directly: + + (1). **CPU version** : If you only want to install CPU version, please refer to command below + + If you are using Python 2, command to install CPU version is: + :: + + python -m pip install paddlepaddle==2.0.0b0 -i https://mirror.baidu.com/pypi/simple + + or + + python -m pip install paddlepaddle==2.0.0b0 -i https://pypi.tuna.tsinghua.edu.cn/simple + + If you are using Windows environment, please use the following instruction: + + python -m pip install paddlepaddle==2.0.0b0 -f https://paddlepaddle.org.cn/whl/stable.html + + If you are using Python 3, please change **python** in the above command to **python3** and install. + + + (2). **GPU version** : If you only want to install GPU version, please refer to command below + + + Note: + + * You need to confirm that your GPU meets the requirements listed above + + If you are using Python2, please attention that PaddlePaddle installed through command below only supports CUDA10.0 under Windows、Ubuntu、CentOS: + :: + + python -m pip install paddlepaddle-gpu==2.0.0b0 -i https://mirror.baidu.com/pypi/simple + + or + + python -m pip install paddlepaddle-gpu==2.0.0b0 -i https://pypi.tuna.tsinghua.edu.cn/simple + + If you are using Windows environment, please use the following instruction: + + python -m pip install paddlepaddle_gpu==2.0.0b0 -f https://paddlepaddle.org.cn/whl/stable.html + + If you are using Python 3, please change **python** in the above command to **python3** and install. + +8. Verify installation + + After the installation is complete, you can use `python` or `python3` to enter the Python interpreter and then use `import paddle.fluid as fluid` and then `fluid.install_check.run_check()` to verify that the installation was successful. + + If `Your Paddle Fluid is installed succesfully!` appears, it means the installation was successful. + + +9. For more information to help, please refer to: + + `install under Ubuntu `_ + + `install under CentOS `_ + + `install under MacOS `_ + + `install under Windows `_ + + +The second way to install: compile and install with source code +=============================================================== + +- If you use PaddlePaddle only, we suggest you installation methods **pip** to install. +- If you need to develop PaddlePaddle, please refer to `compile from source code `_ + +.. toctree:: + :hidden: + + install_Ubuntu_en.md + install_CentOS_en.md + install_MacOS_en.md + install_Windows_en.md + compile/fromsource_en.rst + Tables_en.md diff --git a/doc/paddle/install/install_CentOS.md b/doc/paddle/install/install_CentOS.md new file mode 100644 index 0000000000000000000000000000000000000000..b10573a1774c2624252e4ebeb69501678b964744 --- /dev/null +++ b/doc/paddle/install/install_CentOS.md @@ -0,0 +1,125 @@ +# **CentOS下安装** + +## 环境准备 + +* **CentOS 版本 (64 bit)** + * **CentOS 6 (GPU版本支持CUDA 9.0/9.1/9.2/10.0/10.1, 仅支持单卡)** + * **CentOS 7 (GPU版本支持CUDA 9.0/9.1/9.2/10.0/10.1, 其中CUDA 9.1仅支持单卡)** +* **Python 版本 2.7.15+/3.5.1+/3.6/3.7 (64 bit)** +* **pip 或 pip3 版本 9.0.1+ (64 bit)** + +### 注意事项 + +* 可以使用`uname -m && cat /etc/*release`查看本机的操作系统和位数信息 +* 确认需要安装 PaddlePaddle 的 Python 是您预期的位置,因为您计算机可能有多个 Python + + * 如果您是使用 Python 2,使用以下命令输出 Python 路径,根据的环境您可能需要将说明中所有命令行中的 python 替换为具体的 Python 路径 + + which python + + * 如果您是使用 Python 3,使用以下命令输出 Python 路径,根据您的环境您可能需要将说明中所有命令行中的 python3 替换为 python 或者替换为具体的 Python 路径 + + which python3 + +* 需要确认python的版本是否满足要求 + + * 如果您是使用 Python 2,使用以下命令确认是 2.7.15+ + + python --version + + * 如果您是使用 Python 3,使用以下命令确认是 3.5.1+/3.6/3.7 + + python3 --version + +* 需要确认pip的版本是否满足要求,要求pip版本为9.0.1+ + + * 如果您是使用 Python 2 + + python -m ensurepip + + python -m pip --version + + * 如果您是使用 Python 3 + + python3 -m ensurepip + + python3 -m pip --version + +* 需要确认Python和pip是64bit,并且处理器架构是x86_64(或称作x64、Intel 64、AMD64)架构,目前PaddlePaddle不支持arm64架构。下面的第一行输出的是"64bit",第二行输出的是"x86_64"、"x64"或"AMD64"即可: + + * 如果您是使用 Python 2 + + python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" + + * 如果您是使用 Python 3 + + python3 -c "import platform;print(platform.architecture()[0]);print(platform.machine())" + +* 默认提供的安装包需要计算机支持MKL +* 如果您对机器环境不了解,请下载使用[快速安装脚本](https://fast-install.bj.bcebos.com/fast_install.sh),配套说明请参考[这里](https://github.com/PaddlePaddle/FluidDoc/tree/develop/doc/fluid/install/install_script.md)。 + +## 选择CPU/GPU + +* 如果您的计算机没有 NVIDIA® GPU,请安装CPU版本的PaddlePaddle + +* 如果您的计算机有NVIDIA® GPU,请确保满足以下条件并且安装GPU版PaddlePaddle + + * **CUDA 工具包10.0配合cuDNN v7.6+(如需多卡支持,需配合NCCL2.3.7及更高)** + * **CUDA 工具包9.0配合cuDNN v7.6+(如需多卡支持,需配合NCCL2.3.7及更高)** + * **GPU运算能力超过1.0的硬件设备** + + 您可参考NVIDIA官方文档了解CUDA和CUDNN的安装流程和配置方法,请见[CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/),[cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/) + +* 如果您需要使用多卡环境请确保您已经正确安装nccl2,或者按照以下指令安装nccl2(这里提供的是CentOS 7,CUDA9,cuDNN7下nccl2的安装指令),更多版本的安装信息请参考NVIDIA[官方网站](https://developer.nvidia.com/nccl): + + + wget http://developer.download.nvidia.com/compute/machine-learning/repos/rhel7/x86_64/nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm + rpm -i nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm + yum update -y + yum install -y libnccl-2.3.7-2+cuda9.0 libnccl-devel-2.3.7-2+cuda9.0 libnccl-static-2.3.7-2+cuda9.0 + + +## 安装方式 + +CentOS系统下有3种安装方式: + +* pip安装(推荐) +* [源码编译安装](./compile/compile_CentOS.html#ct_source) +* [Docker源码编译安装](./compile/compile_CentOS.html#ct_docker) + +这里为您介绍pip安装方式 + +## 安装步骤 + +* CPU版PaddlePaddle: + * 对于Python 2: `python -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + * 对于Python 3: `python3 -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python3 -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` +* GPU版PaddlePaddle: + * 对于Python 2: `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + * 对于Python 3: `python3 -m pip install paddlepaddle-gpu==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python3 -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + +您可[验证是否安装成功](#check),如有问题请查看[FAQ](./FAQ.html) + +注: + +* 如果是python2.7, 建议使用`python`命令; 如果是python3.x, 则建议使用`python3`命令 + + +* `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` 此命令将安装支持CUDA 10.0 cuDNN v7的PaddlePaddle。 + + +* 默认下载最新稳定版的安装包,如需获取开发版安装包,请参考[这里](./Tables.html#ciwhls) + + +## **验证安装** +安装完成后您可以使用 `python` 或 `python3` 进入python解释器,输入`import paddle.fluid as fluid` ,再输入 + `fluid.install_check.run_check()` + +如果出现`Your Paddle Fluid is installed succesfully!`,说明您已成功安装。 + +## **如何卸载** +请使用以下命令卸载PaddlePaddle: + +* **CPU版本的PaddlePaddle**: `python -m pip uninstall paddlepaddle` 或 `python3 -m pip uninstall paddlepaddle` + +* **GPU版本的PaddlePaddle**: `python -m pip uninstall paddlepaddle-gpu` 或 `python3 -m pip uninstall paddlepaddle-gpu` diff --git a/doc/paddle/install/install_CentOS_en.md b/doc/paddle/install/install_CentOS_en.md new file mode 100644 index 0000000000000000000000000000000000000000..b21c1bb23dfea2ede0c2e06f9c83ec3fe91a8fe8 --- /dev/null +++ b/doc/paddle/install/install_CentOS_en.md @@ -0,0 +1,128 @@ +# **Install on CentOS** + + +## Environmental preparation + +* **CentOS Version(64 bit)** + * **CentOS 6 (GPU version supports CUDA 9.0/9.1/9.2/10.0/10.1, only supports single card)** + * **CentOS 7 (GPU version supports CUDA 9.0/9.1/9.2/10.0/10.1, CUDA 9.1 only supports single card)** +* **Python version 2.7.15+/3.5.1+/3.6/3.7 (64 bit)** +* **pip or pip3 version 9.0.1+ (64 bit)** + +### Note + +* You can use`uname -m && cat /etc/*release` to view the local operating system and bit information +* Confirm that the Python where you need to install PaddlePaddle is your expected location, because your computer may have multiple Python + + * If you are using Python 2, use the following command to output Python path. Depending on the environment, you may need to replace Python in all command lines in the description with specific Python path + + which python + + * If you are using Python 3, use the following command to output Python path. Depending on your environment, you may need to replace Python 3 in all command lines in the instructions with Python or specific Python path + + which python3 + +* You need to confirm whether the version of Python meets the requirements + + * If you are using Python 2, use the following command to confirm that it is 2.7.15+ + + python --version + + * If you are using Python 3, use the following command to confirm that it is 3.5.1+/3.6/3.7 + + python3 --version + +* It is required to confirm whether the version of pip meets the requirements. The version of pip is required to be 9.0.1+ + + * If you are using Python 2 + + python -m ensurepip + + python -m pip --version + + * If you are using Python 3 + + python3 -m ensurepip + + python3 -m pip --version + +* You need to confirm that Python and pip are 64bit, and the processor architecture is x86_64(or called x64、Intel 64、AMD64). Currently, paddlepaddle does not support arm64 architecture. The first line below outputs "64bit", and the second line outputs "x86_64", "x64" or "AMD64": + + * If you are using Python 2 + + python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" + + * If you are using Python 2 + + python3 -c "import platform;print(platform.architecture()[0]);print(platform.machine())" + +* The installation package provided by default requires computer support for MKL +* If you do not know the machine environment, please download and use[Quick install script](https://fast-install.bj.bcebos.com/fast_install.sh), for instructions please refer to[here](https://github.com/PaddlePaddle/FluidDoc/tree/develop/doc/fluid/install/install_script.md)。 + +## Choose CPU/GPU + +* If your computer doesn't have NVIDIA® GPU, please install the CPU version of PaddlePaddle + +* If your computer has NVIDIA® GPU, please make sure that the following conditions are met and install the GPU version of PaddlePaddle + + * **CUDA toolkit 10.0 with cuDNN v7.6+(for multi card support, NCCL2.3.7 or higher)** + * **CUDA toolkit 9.0 with cuDNN v7.6+(for multi card support, NCCL2.3.7 or higher)** + * **Hardware devices with GPU computing power over 1.0** + + You can refer to NVIDIA official documents for installation process and configuration method of CUDA and cudnn. Please refer to [CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/),[cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/) + +* 如果您需要使用多卡环境请确保您已经正确安装nccl2,或者按照以下指令安装nccl2(这里提供的是CentOS 7,CUDA9,cuDNN7下nccl2的安装指令),更多版本的安装信息请参考NVIDIA[官方网站](https://developer.nvidia.com/nccl): + + + wget http://developer.download.nvidia.com/compute/machine-learning/repos/rhel7/x86_64/nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm + rpm -i nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm + yum update -y + yum install -y libnccl-2.3.7-2+cuda9.0 libnccl-devel-2.3.7-2+cuda9.0 libnccl-static-2.3.7-2+cuda9.0 + +## Installation method + +There are three installation methods under CentOS system: + +* pip installation(recommend) +* [Compile From Source Code](./compile/compile_CentOS_en.html#ct_source) +* [Compile From Docker Source Code](./compile/compile_CentOS_en.html#ct_docker) + +Here is pip installation + +## Installation steps + +* CPU version of PaddlePaddle: + * For Python 2: `python -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` or `python -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + * For Python 3: `python3 -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` or `python3 -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` +* GPU version of PaddlePaddle: + * For Python 2: `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + * For Python 3: `python3 -m pip install paddlepaddle-gpu==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python3 -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + +You can[Verify installation succeeded or not](#check),if you have any questions, you can refer to [FAQ](./FAQ.html) + + +Note: + +* If it is python2.7, it is recommended to use the `python` command; if it is python3.x, it is recommended to use the 'python3' command + + +* `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` This command will install the PaddlePaddle that supports CUDA 10.0 cuDNN v7. + + +* Download the latest stable installation package by default. For development installation package, please refer to [here](./Tables.html#ciwhls) + + +## ***Verify installation*** + +After the installation is complete, you can use `python` or `python3` to enter the Python interpreter and then use `import paddle.fluid` and `fluid.install_check.run_check()` + +If `Your Paddle Fluid is installed succesfully!` appears, to verify that the installation was successful. + + +## ***How to uninstall*** + +Please use the following command to uninstall PaddlePaddle: + +* ***CPU version of PaddlePaddle***: `python -m pip uninstall paddlepaddle` or `python3 -m pip uninstall paddlepaddle` + +* ***GPU version of PaddlePaddle***: `python -m pip uninstall paddlepaddle-gpu` or `python3 -m pip uninstall paddlepaddle-gpu` diff --git a/doc/paddle/install/install_MacOS.md b/doc/paddle/install/install_MacOS.md new file mode 100644 index 0000000000000000000000000000000000000000..d7d386558547203af51bfcf7e99cc530b75862dd --- /dev/null +++ b/doc/paddle/install/install_MacOS.md @@ -0,0 +1,102 @@ +# **MacOS下安装** + +## 环境准备 + +* **MacOS 版本 10.11/10.12/10.13/10.14 (64 bit) (不支持GPU版本)** +* **Python 版本 2.7.15+/3.5.1+/3.6/3.7 (64 bit)** +* **pip 或 pip3 版本 9.0.1+ (64 bit)** + +### 注意事项 + +* 确认需要安装 PaddlePaddle 的 Python 是您预期的位置,因为您计算机可能有多个 Python + + * 如果您是使用 Python 2,使用以下命令输出 Python 路径,根据的环境您可能需要将说明中所有命令行中的 python 替换为具体的 Python 路径 + + which python + + * 如果您是使用 Python 3,使用以下命令输出 Python 路径,根据您的环境您可能需要将说明中所有命令行中的 python3 替换为 python 或者替换为具体的 Python 路径 + + which python3 + +* 需要确认python的版本是否满足要求 + + * 如果您是使用 Python 2,使用以下命令确认是 2.7.15+ + + python --version + + * 如果您是使用 Python 3,使用以下命令确认是 3.5.1+/3.6/3.7 + + python3 --version + +* 需要确认pip的版本是否满足要求,要求pip版本为9.0.1+ + + * 如果您是使用 Python 2 + + python -m ensurepip + + python -m pip --version + + * 如果您是使用 Python 3 + + python3 -m ensurepip + + python3 -m pip --version + +* 需要确认Python和pip是64bit,并且处理器架构是x86_64(或称作x64、Intel 64、AMD64)架构,目前PaddlePaddle不支持arm64架构。下面的第一行输出的是"64bit",第二行输出的是"x86_64"、"x64"或"AMD64"即可: + + * 如果您是使用 Python 2 + + python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" + + * 如果您是使用 Python 3 + + python3 -c "import platform;print(platform.architecture()[0]);print(platform.machine())" + +* 默认提供的安装包需要计算机支持MKL + +## 选择CPU/GPU + +* 目前在MacOS环境仅支持CPU版PaddlePaddle + +## 安装方式 + +MacOS系统下有3种安装方式: + +* pip安装(推荐) +* [源码编译安装](./compile/compile_MacOS.html#mac_source) +* [Docker源码编译安装](./compile/compile_MacOS.html#mac_docker) + + +这里为您介绍pip安装方式 + +## 安装步骤 + +* CPU版PaddlePaddle: + * 对于Python 2: `python -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + * 对于Python 3: `python3 -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python3 -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + +您可[验证是否安装成功](#check),如有问题请查看[FAQ](./FAQ.html) + +注: + +* MacOS上您需要安装unrar以支持PaddlePaddle,可以使用命令`brew install unrar` +* 如果是python2.7, 建议使用`python`命令; 如果是python3.x, 则建议使用`python3`命令 + + +* 默认下载最新稳定版的安装包,如需获取开发版安装包,请参考[这里](./Tables.html#ciwhls) + + +* 使用MacOS中自带Python可能会导致安装失败。对于**Python2**,建议您使用[Homebrew](https://brew.sh)或[Python.org](https://www.python.org/ftp/python/2.7.15/python-2.7.15-macosx10.9.pkg)提供的python2.7.15;对于**Python3**,请使用[Python.org](https://www.python.org/downloads/mac-osx/)提供的python3.5.x、python3.6.x或python3.7.x。 + + +## 验证安装 +安装完成后您可以使用 `python` 或 `python3` 进入python解释器,输入`import paddle.fluid as fluid` ,再输入 + `fluid.install_check.run_check()` + +如果出现`Your Paddle Fluid is installed succesfully!`,说明您已成功安装。 + +## 如何卸载 + +请使用以下命令卸载PaddlePaddle: + +* `python -m pip uninstall paddlepaddle` 或 `python3 -m pip uninstall paddlepaddle` diff --git a/doc/paddle/install/install_MacOS_en.md b/doc/paddle/install/install_MacOS_en.md new file mode 100644 index 0000000000000000000000000000000000000000..8e819e7974f5338c5e272e9e5d8585d709698015 --- /dev/null +++ b/doc/paddle/install/install_MacOS_en.md @@ -0,0 +1,101 @@ +*** +# **Install on MacOS** + +## Environment preparation + +* **MacOS version 10.11/10.12/10.13/10.14 (64 bit)(not support GPU version)** +* **Python version 2.7.15+/3.5.1+/3.6/3.7 (64 bit)** +* **pip or pip3 version 9.0.1+ (64 bit)** + +### Note + +* Confirm that the Python where you need to install PaddlePaddle is your expected location, because your computer may have multiple Python + + * If you are using Python 2, use the following command to output Python path. Depending on the environment, you may need to replace `python` in all command lines + + which python + + * If you are using Python 3, use the following command to output Python path. Depending on the environment, you may need to replace `python` in all command lines + + which python3 + +* You need to confirm whether the version of Python meets the requirements + + * If you are using Python 2, use the following command to confirm that it is 2.7.15+ + + python --version + + * If you are using Python 3, use the following command to confirm that it is 3.5.1+/3.6/3.7 + + python3 --version + +* It is required to confirm whether the pip version meets the requirements. The pip version is required to be 9.0.1+ + + * If you are using Python 2 + + python -m ensurepip + + python -m pip --version + + * If you are using Python 3 + + python3 -m ensurepip + + python3 -m pip --version + +* Confirm that Python and pip is 64 bit,and the processor architecture is x86_64(or x64、Intel 64、AMD64)architecture. Currently, PaddlePaddle doesn't support arm64 architecture. The first line of output from the following command should be "64bit", and the second line should be "x86_64", "x64" or "AMD64". + + * If you are using Python 2 + + python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" + + * If you are using Python 3 + + python3 -c "import platform;print(platform.architecture()[0]);print(platform.machine())" + +* The installation package provided by default requires computer support for MKL + +## Choose CPU/GPU + +* Currently, only the CPU version of PaddlePaddle is supported in the MacOS environment + +## Choose an installation method + +Under the MacOS system we offer 3 installation methods: + +* Pip installation (recommend) +* [Source code compilation and installation](./compile/compile_MacOS.html#mac_source) +* [Docker source code compilation and installation](./compile/compile_MacOS.html#mac_docker) + + +We will introduce pip installation here. + +## Installation steps + +* CPU version of PaddlePaddle: + * For Python 2: `python -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` or `python -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + * For Python 3: `python3 -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` or `python3 -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + +You can[Verify installation succeeded or not](#check), if you have any questions, please check[FAQ](./FAQ.html) + +Note: + +* On MacOS you need to install unrar to support PaddlePaddle, you can use command `brew install unrar` +* For python2.7, we suggest command `python`; for python3.x, we suggest command `python3` +* Download the latest release installation package by default. To obtain the development installation package, please refer to [here](./Tables.html#ciwhls) +* Using Python native to MacOS can cause installation failures. For **Python2**,we recommend to use [Homebrew](https://brew.sh) or python2.7.15 provided by [Python.org](https://www.python.org/ftp/python/2.7.15/python-2.7.15-macosx10.9.pkg); for **Python3**, please use python3.5.x、python3.6.x or python3.7.x provided by [Python.org](https://www.python.org/downloads/mac-osx/). + + +

+## ***Verify installation*** + +After the installation is completed, you can use `python` or `python3` to enter the Python interface, input `import paddle.fluid as fluid` and then `fluid.install_check.run_check()` to verify that the installation was successful. + +If `Your Paddle Fluid is installed succesfully!` appears, it means the installation was successful. + +

+## ***How to uninstall*** + +Please use the following command to uninstall PaddlePaddle: + +* `python -m pip uninstall paddlepaddle` or `python3 -m pip uninstall paddlepaddle` diff --git a/doc/paddle/install/install_Ubuntu.md b/doc/paddle/install/install_Ubuntu.md new file mode 100644 index 0000000000000000000000000000000000000000..8004e674aecb0a8a073578a035e41a05fc584206 --- /dev/null +++ b/doc/paddle/install/install_Ubuntu.md @@ -0,0 +1,126 @@ +# **Ubuntu下安装** + +## 环境准备 + +* **Ubuntu 版本 (64 bit)** + * **Ubuntu 14.04 (GPU 版本支持 CUDA 10.0/10.1)** + * **Ubuntu 16.04 (GPU 版本支持 CUDA 9.0/9.1/9.2/10.0/10.1)** + * **Ubuntu 18.04 (GPU 版本支持 CUDA 10.0/10.1)** +* **Python 版本 2.7.15+/3.5.1+/3.6/3.7 (64 bit)** +* **pip或pip3 版本 9.0.1+ (64 bit)** + +### 注意事项 + +* 可以使用`uname -m && cat /etc/*release`查看本机的操作系统和位数信息 +* 确认需要安装 PaddlePaddle 的 Python 是您预期的位置,因为您计算机可能有多个 Python + + * 如果您是使用 Python 2,使用以下命令输出 Python 路径,根据的环境您可能需要将说明中所有命令行中的 python 替换为具体的 Python 路径 + + which python + + * 如果您是使用 Python 3,使用以下命令输出 Python 路径,根据您的环境您可能需要将说明中所有命令行中的 python3 替换为 python 或者替换为具体的 Python 路径 + + which python3 + +* 需要确认python的版本是否满足要求 + + * 如果您是使用 Python 2,使用以下命令确认是 2.7.15+ + + python --version + + * 如果您是使用 Python 3,使用以下命令确认是 3.5.1+/3.6/3.7 + + python3 --version + +* 需要确认pip的版本是否满足要求,要求pip版本为9.0.1+ + + * 如果您是使用 Python 2 + + python -m ensurepip + + python -m pip --version + + * 如果您是使用 Python 3 + + python3 -m ensurepip + + python3 -m pip --version + +* 需要确认Python和pip是64bit,并且处理器架构是x86_64(或称作x64、Intel 64、AMD64)架构,目前PaddlePaddle不支持arm64架构。下面的第一行输出的是"64bit",第二行输出的是"x86_64"、"x64"或"AMD64"即可: + + * 如果您是使用 Python 2 + + python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" + + * 如果您是使用 Python 3 + + python3 -c "import platform;print(platform.architecture()[0]);print(platform.machine())" + +* 默认提供的安装包需要计算机支持MKL +* 如果您对机器环境不了解,请下载使用[快速安装脚本](https://fast-install.bj.bcebos.com/fast_install.sh),配套说明请参考[这里](https://github.com/PaddlePaddle/FluidDoc/tree/develop/doc/fluid/install/install_script.md)。 + +## 选择CPU/GPU + +* 如果您的计算机没有 NVIDIA® GPU,请安装CPU版的PaddlePaddle + +* 如果您的计算机有 NVIDIA® GPU,并且满足以下条件,推荐安装GPU版的PaddlePaddle + * **CUDA 工具包10.0配合cuDNN v7.6+(如需多卡支持,需配合NCCL2.3.7及更高)** + * **CUDA 工具包9.0配合cuDNN v7.6+(如需多卡支持,需配合NCCL2.3.7及更高)** + * **GPU运算能力超过1.0的硬件设备** + + + 您可参考NVIDIA官方文档了解CUDA和CUDNN的安装流程和配置方法,请见[CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/),[cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/) + +* 如果您需要使用多卡环境请确保您已经正确安装nccl2,或者按照以下指令安装nccl2(这里提供的是ubuntu 16.04,CUDA9,cuDNN7下nccl2的安装指令),更多版本的安装信息请参考NVIDIA[官方网站](https://developer.nvidia.com/nccl): + + + wget https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb + dpkg -i nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb + sudo apt-get install -y libnccl2=2.3.7-1+cuda9.0 libnccl-dev=2.3.7-1+cuda9.0 + + + +## 安装方式 + +Ubuntu系统下有3种安装方式: + +* pip安装(推荐) +* [源码编译安装](./compile/compile_Ubuntu.html#ubt_source) +* [Docker源码编译安装](./compile/compile_Ubuntu.html#ubt_docker) + +这里为您介绍pip安装方式 + +## 安装步骤 + +* CPU版PaddlePaddle: + * 对于Python 2: `python -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + * 对于Python 3: `python3 -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python3 -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + +* GPU版PaddlePaddle: + * 对于Python 2: `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + * 对于Python 3: `python3 -m pip install paddlepaddle-gpu==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` 或 `python3 -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + +您可[验证是否安装成功](#check),如有问题请查看[FAQ](./FAQ.html) + +注: + +* 如果是python2.7, 建议使用`python`命令; 如果是python3.x, 则建议使用`python3`命令 + +* `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` 此命令将安装支持CUDA 10.0 cuDNN v7的PaddlePaddle。 + + +* 默认下载最新稳定版的安装包,如需获取开发版安装包,请参考[这里](./Tables.html#ciwhls) + + +## 验证安装 +安装完成后您可以使用 `python` 或 `python3` 进入python解释器,输入`import paddle.fluid as fluid` ,再输入 + `fluid.install_check.run_check()` + +如果出现`Your Paddle Fluid is installed succesfully!`,说明您已成功安装。 + +## 如何卸载 +请使用以下命令卸载PaddlePaddle: + +* **CPU版本的PaddlePaddle**: `python -m pip uninstall paddlepaddle` 或 `python3 -m pip uninstall paddlepaddle` + +* **GPU版本的PaddlePaddle**: `python -m pip uninstall paddlepaddle-gpu` 或 `python3 -m pip uninstall paddlepaddle-gpu` diff --git a/doc/paddle/install/install_Ubuntu_en.md b/doc/paddle/install/install_Ubuntu_en.md new file mode 100644 index 0000000000000000000000000000000000000000..2a08b8c54ca6e10431ee5913b9eca0c7423efc72 --- /dev/null +++ b/doc/paddle/install/install_Ubuntu_en.md @@ -0,0 +1,129 @@ +# **Install on Ubuntu** + +## Environment preparation + +* **Ubuntu version (64 bit)** + * **Ubuntu 14.04 (GPU version supports CUDA 10.0/10.1)** + * **Ubuntu 16.04 (GPU version supports CUDA 9.0/9.1/9.2/10.0/10.1)** + * **Ubuntu 18.04 (GPU version supports CUDA 10.0/10.1)** +* **Python version 2.7.15+/3.5.1+/3.6/3.7 (64 bit)** +* **pip or pip3 version 9.0.1+ (64 bit)** + +### Note + + +* You can use `uname -m && cat /etc/*release` view the operating system and digit information of the machine +* Confirm that the Python where you need to install PaddlePaddle is your expected location, because your computer may have multiple Python + + * If you are using Python 2, use the following command to output Python path. Depending on the environment, you may need to replace Python in all command lines in the description with specific Python path + + which python + + * If you are using Python 3, use the following command to output Python path. Depending on the environment, you may need to replace Python 3 in all command lines in the description with Python or specific Python path + + which python3 + +* You need to confirm that the version of Python meets the requirements + * If you are using Python 2,use the following command to confirm it is 2.7.15+ + + python --version + + * If you are using Python 3,use the following command to confirm it is 3.5.1+/3.6/3.7 + + python3 --version + +* You need to confirm that the version of pip meets the requirements, pip version is required 9.0.1+ + + * If you are using Python 2 + + python -m ensurepip + + python -m pip --version + + * If you are using Python 3 + + python3 -m ensurepip + + python3 -m pip --version + +* Confirm that Python and pip is 64 bit,and the processor architecture is x86_64(or called x64、Intel 64、AMD64)architecture. Currently, PaddlePaddle doesn't support arm64 architecture. The first line below outputs "64bit", and the second line outputs "x86_64", "x64" or "AMD64" : + + * If you are using Python 2 + + python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" + + * If you are using Python 3 + + python3 -c "import platform;print(platform.architecture()[0]);print(platform.machine())" + +* The installation package provided by default requires computer support for MKL +* If you do not know the machine environment, please download and use[Quick install script](https://fast-install.bj.bcebos.com/fast_install.sh), please refer to[here](https://github.com/PaddlePaddle/FluidDoc/tree/develop/doc/fluid/install/install_script.md). + +## Choose CPU/GPU + +* If your computer doesn't have NVIDIA® GPU, please install CPU version of PaddlePaddle + +* If your computer has NVIDIA® GPU, and meet the following conditions, we command you to install PaddlePaddle + * **CUDA toolkit 10.0 with cuDNN v7.6+(for multi card support, NCCL2.3.7 or higher)** + * **CUDA toolkit 9.0 with cuDNN v7.6+(for multi card support, NCCL2.3.7 or higher)** + * **Hardware devices with GPU computing power over 1.0** + + + You can refer to NVIDIA official documents for installation process and configuration method of CUDA and cudnn. Please refer to[CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/),[cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/) + +* If you need to use multi card environment, please make sure that you have installed nccl2 correctly, or install nccl2 according to the following instructions (here is the installation instructions of nccl2 under ubuntu 16.04, CUDA9 and cuDNN7). For more version of installation information, please refer to NVIDIA[official website](https://developer.nvidia.com/nccl): + + + wget https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb + dpkg -i nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb + sudo apt-get install -y libnccl2=2.3.7-1+cuda9.0 libnccl-dev=2.3.7-1+cuda9.0 + + + +## Choose an installation method + +Under the Ubuntu system, we offer 3 installation methods: + +* Pip installation (recommended) +* [Source code compilation and installation](./compile/compile_Ubuntu.html#ubt_source) +* [Docker source code compilation and installation](./compile/compile_Ubuntu.html#ubt_docker) + +We will introduce pip installation here. + +## Installation steps + +* CPU version of PaddlePaddle: + * For Python 2: `python -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` or `python -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + * For Python 3: `python3 -m pip install paddlepaddle==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` or `python3 -m pip install paddlepaddle==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + +* GPU version PaddlePaddle: + * For Python 2: `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` or `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + * For Python 3: `python3 -m pip install paddlepaddle-gpu==2.0.0a0 -i https://mirror.baidu.com/pypi/simple` or `python3 -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` + +You can [verify whether the installation is successful](#check), if you have any questions please see [FAQ](./FAQ.html) + +Note: + +* For python2.7, we recommend to use `python` command; For python3.x, we recommend to use `python3` command. + +* `python -m pip install paddlepaddle-gpu==2.0.0a0 -i https://pypi.tuna.tsinghua.edu.cn/simple` This command will install PaddlePaddle supporting CUDA 10.0 cuDNN v7. + + +* Download the latest stable installation package by default. For development installation package, please refer to[here](./Tables.html#ciwhls) + + +

+## ***Verify installation*** + + After the installation is complete, you can use `python` or `python3` to enter the Python interpreter and then use `import paddle.fluid as fluid` and then `fluid.install_check.run_check()` to verify that the installation was successful. + + If `Your Paddle Fluid is installed succesfully!` appears, it means the installation was successful. + +

+## ***How to uninstall*** + +Please use the following command to uninstall PaddlePaddle (users who use Docker to install PaddlePaddle should use the following command in the container containing PaddlePaddle. Please use the corresponding version of pip): + +* ***CPU version of PaddlePaddle***: `pip uninstall paddlepaddle` or `pip3 uninstall paddlepaddle` + +- ***GPU version of PaddlePaddle***: `pip uninstall paddlepaddle-gpu` or `pip3 uninstall paddlepaddle-gpu` diff --git a/doc/paddle/install/install_Windows.md b/doc/paddle/install/install_Windows.md new file mode 100644 index 0000000000000000000000000000000000000000..103318d57522c5111f66d73dfee72800cdebf6d4 --- /dev/null +++ b/doc/paddle/install/install_Windows.md @@ -0,0 +1,96 @@ +# **Windows下安装** + +## 环境准备 + +* **Windows 7/8/10 专业版/企业版 (64bit)** + * **GPU版本支持CUDA 9.0/9.1/9.2/10.0/10.1,且仅支持单卡** +* **Python 版本 2.7.15+/3.5.1+/3.6/3.7 (64 bit)** +* **pip 版本 9.0.1+ (64 bit)** + +### 注意事项 + +* 确认您安装PaddlePaddle的 Python 是您预期的版本,因为您计算机可能有多个 Python,使用以下命令 + + python --version + + * 如果您是使用 Python 2,输出应是 2.7.15+ + + * 如果您是使用 Python 3,输出应是 3.5.1+/3.6+/3.7+ + + 如果您使用的是Python2,还需要安装[Microsoft Visual C++ Compiler for Python 2.7](https://www.microsoft.com/en-us/download/details.aspx?id=44266) + +* 如果不符合您预期的版本,使用以下命令查看python的路径是否是您预期的位置 + + where python + + * 如果您是使用 Python 2, python2.7的安装目录应位于第一行 + + * 如果您是使用 Python 3, python3.5.1+/3.6+/3.7+的安装目录应位于第一行 + + * 您可以通过以下任意方法进行调整: + + * 使用具体的Python路径来执行命令(例如C:\Python36\python.exe对应 Python 3,C:\Python27\python.exe对应 Python 2) + * 在环境变量中,将您预期的安装路径设置在第一顺序位(请在控制面板->系统属性->环境变量->PATH中修改) + +* 需要确认pip的版本是否满足要求,要求pip版本为9.0.1+ + + python -m ensurepip + + python -m pip --version + +* 需要确认Python和pip是64bit,并且处理器架构是x86_64(或称作x64、Intel 64、AMD64)架构,目前PaddlePaddle不支持arm64架构。下面的第一行输出的是"64bit",第二行输出的是"x86_64"、"x64"或"AMD64"即可: + + python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" + + +* 默认提供的安装包需要计算机支持MKL,如果您的环境不支持,请在[这里](./Tables.html#ciwhls-release)下载`openblas`版本的安装包 +* 当前版本暂不支持NCCL,分布式等相关功能 + +## 选择CPU/GPU + +* 如果您的计算机没有 NVIDIA® GPU,请安装CPU版的PaddlePaddle + +* 如果您的计算机有 NVIDIA® GPU,并且满足以下条件,推荐安装GPU版的PaddlePaddle + * **CUDA 工具包 9.0/10.0 配合 cuDNN v7.4+** + * **GPU运算能力超过1.0的硬件设备** + +注: 目前官方发布的windows安装包仅包含 CUDA 9.0/10.0 的单卡模式,不包含 CUDA 9.1/9.2/10.1,如需使用,请通过源码自行编译。 + +您可参考NVIDIA官方文档了解CUDA和CUDNN的安装流程和配置方法,请见[CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/),[cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/) + +## 安装方式 + +Windows系统下有2种安装方式: + +* pip安装(推荐) +* [源码编译安装](./compile/compile_Windows.html#win_source) + +这里为您介绍pip安装方式 + +## 安装步骤 + +* CPU版PaddlePaddle: + * `python -m pip install paddlepaddle==2.0.0b0 -f https://paddlepaddle.org.cn/whl/stable.html` + +* GPU版PaddlePaddle: + * `python -m pip install paddlepaddle_gpu==2.0.0b0 -f https://paddlepaddle.org.cn/whl/stable.html` + +您可[验证是否安装成功](#check),如有问题请查看[FAQ](./FAQ.html) + +注: + +* `python -m pip install paddlepaddle_gpu==2.0.0b0 -f https://paddlepaddle.org.cn/whl/stable.html` 此命令将安装支持CUDA 10.0(配合cuDNN v7.4+)的PaddlePaddle。 + + + +## 验证安装 +安装完成后您可以使用 `python` 进入python解释器,输入`import paddle.fluid as fluid` ,再输入 + `fluid.install_check.run_check()` + +如果出现`Your Paddle Fluid is installed succesfully!`,说明您已成功安装。 + +## 如何卸载 + +* **CPU版本的PaddlePaddle**: `python -m pip uninstall paddlepaddle` + +* **GPU版本的PaddlePaddle**: `python -m pip uninstall paddlepaddle-gpu` diff --git a/doc/paddle/install/install_Windows_en.md b/doc/paddle/install/install_Windows_en.md new file mode 100644 index 0000000000000000000000000000000000000000..4f2569e6c602c552507c09a8ff999b30fb420dd4 --- /dev/null +++ b/doc/paddle/install/install_Windows_en.md @@ -0,0 +1,94 @@ +# **Installation on Windows** + +## Environment Preparation + +* **Windows 7/8/10 Pro/Enterprise(64bit)** + * **GPU Version support CUDA 9.0/9.1/9.2/10.0/10.1, and only support single GPU** +* **Python version 2.7.15+/3.5.1+/3.6/3.7(64bit)** +* **pip version 9.0.1+(64bit)** + +### Precautions + +* Confirm the Python you install PaddlePaddle is the version you expected, because your computer may have more than one python, use the following command: + + python --version + + * If you are using Python 2, the output should be 2.7.15+ + + * If you are using Python 3, the output should be 3.5.1+/3.6+/3.7+ + + If you are using Python 2, you need to install [Microsoft Visual C++ Compiler for Python 2.7](https://www.microsoft.com/en-us/download/details.aspx?id=44266) + +* If Python doesn't match your expected version, use the following command to see if Python's path is where you expect it to be: + + where python + + * If you are using Python 2, The installation directory for python2.7 should be on the first line + + * If you are using Python 3, The installation directory for python3.5.1+/3.6+/3.7+ should be on the first line + + * You can adjust it in any of the following ways: + + * Use specific Python paths to execute commands(e.g. C:\Python36\python.exe corresponding to Python 3,C:\Python27\python.exe corresponding to Python 2) + * By modifying the environment variable, set your expected installation path in the first order (please modify it in control panel -> system properties -> environment variable -> path) + +* Confirm whether the pip version meets the requirements. The pip version is required to be 9.0.1+ + + python -m ensurepip + + python -m pip --version + +* Confirm that Python and pip is 64 bit,and the processor architecture is x86_64(or x64、Intel 64、AMD64)architecture. Currently, PaddlePaddle doesn't support arm64 architecture. The first line of output from the following command should be "64bit", and the second line should be "x86_64", "x64" or "AMD64": + + python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" + + +* The installation package provided by default requires the computer to support MKL. If your environment does not support MKL, please download the `openblas` version of the installation package in [here](./Tables.html#ciwhls-release) +* Nccl, distributed and other related functions are not supported in the current version. + + +## CPU or GPU + +* If your computer doesn’t have NVIDIA® GPU, please install the CPU version of PaddlePaddle + +* If your computer has NVIDIA® GPU, and it satisfies the following requirements, we recommend you to install the GPU version of PaddlePaddle + * *CUDA Toolkit 9.0/10.0 with cuDNN v7.4+* + * *GPU's computing capability exceeds 1.0* + +Note: currently, the official Windows installation package only support CUDA 9.0/10.0 with single GPU, and don't include CUDA 9.1/9.2/10.1. if you need to use, please compile by yourself through the source code. + +Please refer to the NVIDIA official documents for the installation process and the configuration methods of [CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/) and [cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/). + +## Installation Method + +There are 2 ways to install PaddlePaddle on Windows: + +* pip installation (recommended) +* [source code compilation and installation](./compile/compile_Windows.html/#win_source) + +We would like to introduce the pip installation here. + +## Installation steps + +* CPU version of PaddlePaddle: + * `python -m pip install paddlepaddle==2.0.0b0 -f https://paddlepaddle.org.cn/whl/stable.html` + +* GPU version of PaddlePaddle: + * `python -m pip install paddlepaddle_gpu==2.0.0b0 -f https://paddlepaddle.org.cn/whl/stable.html` + +There is a checking function below for [verifyig whether the installation is successful](#check). If you have any further questions, please check the [FAQ](./FAQ.html). + +Notice: + +* `python -m pip install paddlepaddle_gpu==2.0.0b0 -f https://paddlepaddle.org.cn/whl/stable.html` This command will install PaddlePaddle that supports CUDA 10.0(with cuDNN v7.4+). + +## Installation Verification +After completing the installation process, you can use `python` to enter python interface and input `import paddle.fluid as fluid` and then `fluid.install_check.run_check()` to check whether the installation is successful. + +If you see `Your Paddle Fluid is installed succesfully!`, your installation is verified successful. + +## Uninstall PaddlePaddle + +* ***CPU version of PaddlePaddle***: `python -m pip uninstall paddlepaddle` + +* ***GPU version of PaddlePaddle***: `python -m pip uninstall paddlepaddle-gpu` diff --git a/doc/paddle/install/install_script.md b/doc/paddle/install/install_script.md new file mode 100644 index 0000000000000000000000000000000000000000..f35ded4316b9d30f5c9efc23e239d8b8cf8d8ebd --- /dev/null +++ b/doc/paddle/install/install_script.md @@ -0,0 +1,52 @@ +# 辅助安装脚本 + +## 使用方法 + +下载脚本至本地后,使用命令`/bin/bash fast_install.sh`启动脚本 + +### Ubuntu和CentOS + +脚本会执行以下几步: + +1. GPU检测 + + 检测您的机器是否含有我们支持的GPU,如果有,会安装GPU版本的PaddlePaddle,否则会安装CPU版本。 + (PaddlePaddle目前支持NVIDIA[官网](https://developer.nvidia.com/cuda-gpus#collapseOne)列出的,算力7.0以下的GPU和v100系列的GPU) + +2. CUDA,cuDNN检测 + + 检测您的机器是否安装我们支持的CUDA,cuDNN,具体地: + + 1. 在`/usr/local/` 及其子目录下寻找 `cuda/cuda8/cuda9/cuda10` 目录下的`version.txt`文件(通常如果您以默认方式安装了CUDA)。 如果提示未找到CUDA请使用命令`find / -name version.txt`找到您所需要的CUDA目录下的“version.txt”路径,然后按照提示输入。 + 2. 在`/usr` 及其子目录下寻找文件 `cudnn.h` , 如果您的cuDNN未安装在默认路径请使用命令`find / -name cudnn.h`寻找您希望使用的cuDNN版本的`cudnn.h`路径并按提示输入 + + 如果未找到相应文件,则会安装CPU版本的PaddlePaddle + +3. 选择数学库 +脚本默认会为您安装支持[MKL](https://software.intel.com/en-us/mkl)数学库的PaddlePaddle,如果您的机器不支持`MKL`,请选择安装支持[OPENBLAS](https://www.openblas.net)的PaddlePaddle + +4. 选择PaddlePaddle版本 +我们为您提供2种版本:开发版和稳定版,推荐您选择测试验证过的稳定版 + +5. 选择Python版本 +脚本默认会使用您机器中的Python,您也可以输入您希望使用的Python的路径 + +6. 检查[AVX](https://zh.wikipedia.org/zh-hans/AVX指令集)指令集 + +7. 使用[Python virtualenv](https://virtualenv.pypa.io/en/latest/) +脚本也支持按您的需求创建Python的虚拟环境 + +以上检查完成后就会为您安装对应您系统的PaddlePaddle了,安装一般需要1~2分钟会根据您的网络来决定,请您耐心等待。 + + +### MacOS + +脚本会执行以下几步: + +1. 选择PaddlePaddle版本 +我们为您提供2种版本:开发版和稳定版,推荐您选择测试验证过的稳定版 + +2. 检查Python版本 +由于MacOS自带的Python通常依赖于系统环境,因此我们不支持MacOS自带的Python环境,请重新从Python.org安装Python,然后根据提示输入您希望使用的Python的路径 + +3. 检查是否支持[AVX](https://zh.wikipedia.org/zh-hans/AVX指令集)指令集 diff --git a/doc/paddle/release_note_cn.md b/doc/paddle/release_note_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..440006a10d6df7a8296b97facc5c65585e39f095 --- /dev/null +++ b/doc/paddle/release_note_cn.md @@ -0,0 +1,270 @@ +# 版本说明 + +## 重要更新 + +本版本为飞桨框架v2.0的测试版,最重要的变化为API体系的全面升级以及命令式编程(动态图)能力的全面完善。本版本系统优化了飞桨基础API的目录结构,全面修复了历史遗留的相关问题,并对API做了充分补充,特别是提供了更为完善的高层API功能;同时提供了对动态图的量化训练、混合精度训练的支持,动静转换实现了完备的语法支持,并且易用性大幅提升,动态图相关功能趋于完善,推荐使用动态图模式。此外,推理库的C++接口也做了升级优化,推理库对量化模型的支持以及推理性能都有了全面增强。 + +## 训练框架 + +### 基础API + +#### 兼容性说明 + +- Paddle 2.x版本推荐用户使用位于paddle根目录下的API,同时在paddle.fluid目录下保留了所有的Paddle 1.x版本的API。按照设计,Paddle 1.x版本训练的代码,不做任何修改,即可在Paddle 2.x版本上正常运行;Paddle 1.x版本训练保存的模型,可以使用Paddle 2.x版本进行推理。 + +#### 目录结构调整 + +- 在2.0-alpha版本的基础上,本版本对于目录结构进行了一些调整,调整完最新的目录结构如下: + + | 目录 | 功能和包含的API | + | :--- | --------------- | + | paddle.* | paddle根目录下保留了常用API的别名,当前包括:paddle.tensor, paddle.framework目录下的所有API | + | paddle.tensor | 跟tensor操作相关的API,比如:创建zeros, 矩阵运算matmul, 变换concat, 计算add, 查找argmax等 | + | paddle.nn | 跟组网相关的API,比如:Linear, Conv2d,损失函数,卷积,LSTM等,激活函数等 | + | paddle.static.nn | 静态图下组网专用API,比如:输入占位符data/Input,控制流while_loop/cond | + | paddle.static | 静态图下基础框架相关API,比如:Variable, Program, Executor等 | + | paddle.framework | 框架通用API和imprerative模式的API,比如:to_tensor, prepare_context等 | + | paddle.optimizer | 优化算法相关API,比如:SGD,Adagrad, Adam等 | + | paddle.optimizer.lr_scheduler | 学习率衰减相关API | + | paddle.metric | 评估指标计算相关的API,比如:accuracy, auc等 | + | paddle.io | 数据输入输出相关API,比如:save, load, Dataset, DataLoader等 | + | paddle.device | 设备管理相关API,比如:CPUPlace, CUDAPlace等 | + | paddle.distributed | 分布式相关基础API | + | paddle.distributed.fleet | 分布式相关高层API | + | paddle.vision | 视觉领域API,比如,数据集,数据处理,常用基础网络结构,比如resnet | + | paddle.text | NLP领域API, 比如,数据集,数据处理,常用网络结构,比如transformer | + +#### API别名规则 +- 为了方便用户使用,API会在不同的路径下建立别名,比如`paddle.add -> paddle.tensor.add`,推荐用户优先使用较短的路径`paddle.add` + +- 所有framework, tensor目录下的API,均在paddle根目录建立别名;除少数特殊API外,其他API在paddle根目录下均没有别名。 + +- paddle.nn目录下除functional目录以外的所有API,在paddle.nn目录下均有别名;functional目录中的API,在paddle.nn目录下均没有别名。 + +- 以下为一些特殊的别名关系,推荐使用左边的名称: + - paddle.sigmoid -> paddle.tensor.sigmoid -> paddle.nn.functional.sigmoid + - paddle.tanh -> paddle.tensor.tanh -> paddle.nn.functional.tanh + - paddle.remainder -> paddle.mod -> paddle.floor_mod + - paddle.divide -> paddle.true_divide + - paddle.rand -> paddle.uniform + - paddle.randn -> paddle.standard_normal + - Optimizer.clear_grad -> Optimizer.clear_gradients + - Optimizer.set_state_dict -> Optimizer.set_dict + - Optimizer.get_lr -> Optimizer.current_step_lr + - Layer.clear_grad -> Layer.clear_gradients + - Layer.set_state_dict -> Layer.set_dict +#### 常用API名称变化 + +- 此版本使用Tensor表示数据,创建张量API, paddle.fluid.dygraph.to_variable修改为paddle.to_tensor +- 加、减、乘、除使用全称,不使用简称 +- 对于当前逐元素操作,不加elementwise前缀 +- 对于按照某一轴操作,不加reduce前缀 +- Conv, Pool, Dropout, BatchNorm, Pad组网类API根据输入数据类型增加1d, 2d, 3d后缀 + + | Paddle 1.8 | Paddle 2.0-beta | + | --------------- | ------------------------ | + | paddle.fluid.layers.elementwise_add | paddle.add | + | paddle.fluid.layers.elementwise_sub | paddle.subract | + | paddle.fluid.layers.elementwise_mul | paddle.multiply | + | paddle.fluid.layers.elementwise_div | paddle.divide | + | paddle.fluid.layers.elementwise_max | paddle.maximum | + | paddle.fluid.layers.elementwise_min | paddle.minimum | + | paddle.fluid.layers.reduce_sum | paddle.sum | + | paddle.fluid.layers.reduce_prod | paddle.prod | + | paddle.fluid.layers.reduce_max | paddle.max | + | paddle.fluid.layers.reduce_min | paddle.min | + | paddle.fluid.layers.reduce_all | paddle.all | + | paddle.fluid.layers.reduce_any | paddle.any | + | paddle.fluid.dygraph.Conv2D | paddle.nn.Conv2d | + | paddle.fluid.dygraph.Conv2DTranspose | paddle.nn.ConvTranspose2d | + | paddle.fluid.dygraph.Pool2D | paddle.nn.MaxPool2d, paddle.nn.AvgPool2d | + +#### 新增API +- 共计新增140个API,具体参考[链接](https://github.com/PaddlePaddle/Paddle/wiki/Paddle-2.0beta-New-API-List)和API文档 + - 新增环境设置API:paddle.set_default_dtype, paddle.get_default_dtype, paddle.set_device, paddle.get_device, paddle.manual_seed + - 新增Tensor操作API:numel, chunk, masked_select, isfinite, isinf, isnan, sort, topk, Flatten, dim, tile + - 新增组网API: Linear, Bilinear, Embedding, linear, bilinear, embedding + - 新增视觉组网类API:Conv1d, ConvTranspose1d, MaxPool1d, MaxPool2d, MaxPool3d, AvgPool1d, AvgPool2d, AvgPool3d, AdaptiveMaxPool1d, AdaptiveMaxPool2d, AdaptiveMaxPool3d, ReflactionPad1d, ReflactionPad2d, ReflactionPad3d, ReplicationPad1d, ReplicationPad2d, ReplicationPad3d, ZeroPad2d, ConstantPad1d, ConstantPad2d, ConstantPad3d, PixelShuffle, Upsample, UpsamplingNearest2d, UpsamplingBilinear2d, conv1d, conv_transpose1d, avg_pool1d, avg_pool2d, avg_pool3d, max_pool1d, max_pool2d, max_pool3d, adaptive_max_pool1d, adaptive_max_pool2d, adaptive_max_pool3d, adaptive_avg_pool1d, adaptive_avg_pool3d + - 新增文本处理组网类API: SimpleRNN, LSTM, GRU, MultiHeadAttention, Transformer, TransformerEncoder, TransformerEncoderLayer, TransformerDecoder, TransformerDecoderLayer + - 新增激活类API:ELU, Hardshrink, Hardtanh, PReLU, ReLU6, Tanh, Tanhshrink, Softmax + - 新增归一化API:BatchNorm1d, BatchNorm2d, BatchNorm3d, SyncBatchNorm, InstanceNorm1d, InstanceNorm2d, InstanceNorm3d, weight_norm, remove_weight_norm, batch_norm, instance_norm, layer_norm, normalize + - 新增Dropout类API:Dropout2d, Dropout3d, AlphaDropout, dropout, dropout2d, dropout3d + - 新增相似度、损失函数类API:CosineSimilarity, PairwiseDistance, CTCLoss, KLDivLoss, BCEWithLogitsLoss, MarginRankingLoss, SmoothL1Loss, consine_similarity, binary_cross_entropy, binary_cross_entropy_with_logits, cross_entropy, ctc_loss, l1_loss, mse_loss, margin_ranking_loss, nll_loss, smooth_l1_loss + - 新增分布式通信类API: broadcast, all_reduce, reduce, all_gather, scatter, barrier + - 新增概率分布类API: Distribution, normal, bernoulli + - 新增Optimizer相关API:step, AdamW + - 新增数据集相关API:Dataset, IterableDataset, TensorDataset, Sampler, RandomSampler, BatchSampler, DistributedBatchSampler + +#### 修复和完善API +- 共计修改和完善155个API,具体参考[链接](https://github.com/PaddlePaddle/Paddle/wiki/Paddle-2.0beta-Upgraded-API-List)和API文档 +- 修复随机数生成相关的API,包括:种子设置paddle.rand, randn, randint, randperm, dropout, Uniform, Normal等 +- 以下API对应的底层C++ OP进行了代码升级,理论上可以实现兼容,但不排除会出现少量不兼容的情况:linspace, concat, gather, gather_nd, split, squeeze, unsqueeze, clip, argmax, argmin, mean, norm, unique, cumsum, LeakyReLU, leaky_relu, hardshrink, embedding, margin_ranking_loss, grid_sample, affine_grid +- 增加了relu6和Sigmoid激活函数的 oneDNN支持 + +#### 多设备/分布式训练API +- 动态图单机多卡训练 + - 新增paddle.distributed.spawn(func, args=(), nprocs=-1, join=True, daemon=False, **options),用于启动动态图多卡训练。 + - 新增paddle.distributed.init_parallel_env(),用于初始化动态图多卡训练的环境。 + - 新增paddle.distributed.get_rank(),用于获取多卡训练时当前进程的rank。 + - 新增paddle.distributed.get_world_size(),用于获取多卡训练时参与训练的总进程数。 + + - 分布式集合通信 + - 新增paddle.distributed.broadcast(tensor, src, group=0),将指定进程上的tensor广播到所有进程。 + - 新增paddle.distributed.all_reduce(tensor, op=ReduceOp.SUM, group=0),对所有进程的指定Tensor执行归约操作,结果返回给所有进程。 + - 新增paddle.distributed.reduce(tensor, dst, op=ReduceOp.SUM, group=0),对所有进程的指定Tensor执行归约操作,结果返回给指定进程。 + - 新增paddle.distributed.all_gather(tensor_list, tensor, group=0),聚合所有进程的指定Tensor,结果返回给所有进程。 + - 新增paddle.distributed.scatter(tensor, tensor_list=None, src=0, group=0),将指定进程Tensor列表中的Tensor分发到所有进程。 + - 新增paddle.distributed.barrier(group=0),同步所有进程。 +### 高层API + +- 新增飞桨高层API,对模型开发过程中常见的组网、训练、评估、预测、存取等操作进行封装,实现低代码开发,MNIST手写数字识别任务对比命令式编程模式实现方式,高层API可减少80%执行类代码。 + +- **数据管理** + - 统一数据加载使用方式 + - 数据集定义,继承`paddle.io.Dataset`进行实现。 + - 多进程数据加载,使用`paddle.io.DataLoader`。 + - 新增`paddle.io.IterableDataset`用于流式数据集,并在`paddle.io.DataLoader`中支持对其进行并发加速。 + - 新增`paddle.io.get_worker_info`用于`paddle.io.IterableDataset`中划分子进程数据。 +- **模型组网** + - 新增常见Loss接口`paddle.nn.loss.*`和Metric接口`paddle.metric.*`的封装 + - 发布基于高层API实现的12个模型 + - Transformer,Seq2seq,LAC,BMN,ResNet,YOLOv3,VGG,MobileNet,TSM,CycleGAN,Bert,OCR + - 发布于[PaddlePaddle/hapi](https://github.com/paddlePaddle/hapi)仓库 +- **模型执行** + - 新增Model类`paddle.Model`封装,封装模型开发过程中常用的基础功能,包括: + - 提供`Model.summary`接口,用于查看动态图组网的网络结构与参数数量。 + - 提供`Model.prepare`接口,用于指定损失函数和优化算法。 + - 提供`Model.fit`接口,实现训练和评估,可通过callback方式实现训练过程中执行自定义功能,比如模型存储等。 + - 提供`Model.evaluate`接口,实现评估集上的预测和评估指标计算。 + - 提供`Model.predict`接口,实现特定的测试数据推理预测。 + - 提供`Model.train_batch`接口,实现单batch数据的训练。 + - 提供`Model.eval_batch`接口,实现单batch数据的评估。 + - 提供`Model.text_batch`接口,实现单batch数据的测试。 + - 提供`Model.save`/`Model.load`接口,支持动态图训练模式存储推理模型。 + - 新增Callback接口`paddle.callbacks.*`,用于模型执行接口,进行日志记录、Checkpoint模型存储等,用户可继承`paddle.callbacks.Callback`进行自定义。 +- **领域API** + - 新增视觉(CV)领域接口`paddle.vision` + - 新增Dataset接口`paddle.vision.datasets.*`,对常用数据集进行封装,支持数据的随机访问 + - 新增Resize, Normalize等24种常见的数据预处理接口`paddle.vision.transforms.*` + - 新增图像分类骨干网络和预训练参数 + - `paddle.vision.models.lenet` 或 `paddle.vision.lenet` + - `paddle.vision.models.vgg` 或 `paddle.vision.vgg` + - `paddle.vision.models.resnet` 或 `paddle.vision.vgg` + - `paddle.vision.models.mobilenetv1` 或 `paddle.vision.mobilenetv1` + - `paddle.vision.models.mobilenetv2` 或 `paddle.vision.mobilenetv2` + - 新增自然语言处理(NLP)领域接口`paddle.text` + - 新增Dataset接口`paddle.text.datasets.*`,对常用数据集进行封装,支持数据的随机访问 + - 新增领域组网接口`paddle.text.*` +- **自动断点重启** + - 新增接口 `train_epoch_range`:可以在静态图上实现基于epoch粒度的 `checkpoint` 自动保存和自动加载功能,支持自动断点重启。 + +### 功能优化(含分布式) + +#### 动态图转静态图 + +- **ProgramTranslator新增语法支持** + + - 新增对return语法动转静支持,使得动转静时可以在if-elif-else或者循环条件中提前return,也能return不同类型的tensor或None。 + + - 新增对print语法动转静支持,使得print(tensor)也能在动转静中打印出tensor。 + + - 新增对for遍历Tensor,for enumerate遍历Tensor,for遍历TensorList,for enumerate遍历TensorList几种语法的动转静支持,使得循环处理Tensor的相关操作在动转静中能够灵活使用。 + + - 新增对assert语法动转静支持,使得assert tensor也能在动转静中保证tensor为True(bool类型)或者非0(其他数据类型)。 + + - 新增对数据类型cast的转写支持,使得float(tensor), int(tensor) 等类似的动态图类型转化语句也能在静态图中进行类型转化。 + +- **ProgramTranslator易用性优化功能** + + - 将动转静的返回类型从callable函数改为class StaticLayer,这个class可以调用.code,.main_program等接口更轻松获取转化后的静态图信息。 + + - 增加 set_verbosity 和 set_code_level 接口,可以让用户设置log级别来查看动转静运行过程的log或者查看中间状态转化的代码。 + + - 新增InputSpec,可以指定动转静时输入Tensor变量形状和数据类型。 + + - 优化了动转静运行下如果出错显示的报错信息,使动转静后静态图运行错误的代码也能汇报到原动态图错误的代码行,并且删除python栈中动转静部分报错,使报错信息更多与用户代码相关。 + + - 动转静支持用 pdb.set_trace() 进行断点调试。 + +- **优化部署模型存储载入接口** + + - 新增 paddle.jit.save 接口用于动转静模型的保存,使接口更加易用,删除旧接口ProgramTranslator.save_inference_model 。 + - 新增 paddle.jit.load 接口用于载入静态图格式存储的预测模型,包括paddle.jit.save和paddle.io.save_inference_model保存的模型,模型载入后可在动态图下用于模型推理或者模型训练调优。 + +#### 混合精度训练 +- 增加了动态图混合精度的支持,ResNet-50模型在V100上使用混合精度相比于fp32训练加速比为2.6。 + +#### 量化训练 + +- 新增`ImperativeQuantAware`类,提供动态图量化训练功能,目前支持对Conv2D、Linear等层的量化,支持的模型类型包括MobileNetV1/MobileNetV2/ResNet50等。 +- 模型经动态图量化训练后,使用`ImperativeQuantAware.save_quantized_model`接口保存的量化模型可利用Paddle-Lite推理库进行预测部署。 +- 静态图量化支持Conv2d_tranpose量化,支持Linear使用per-channel形式量化。 +#### 性能优化(含分布式) + +- 简化动态图模式下DataLoader底层实现逻辑,降低读取线程开销,进一步提升数据读取效率,提升模型整体训练速度。经测试MobileNetV1在V100单卡、BatchSize=128的场景下整体训练速度提升34%。 +- 动态图组网API升级和性能优化,大量动态图API将直接调用自动生成的Pybind接口,提升性能。 + +#### 动态图基础功能 + +- 支持多卡训练时配置Embedding等API使用稀疏参数梯度更新的功能。@威行 +- 增加Tensor类成员函数,包括Tensor().abs()、Tensor().add()、Tensor().cos()等120余个。 +- 增加Layer的dir()接口,可以方便地查看Layer中属性和函数。 +- 增加optimizer.set_lr()接口,用户可以在动态图模式下中灵活调整学习率。 +- 增加全局参数初始化方式的接口set_global_initializer,可定义全局的参数初始化方法。 +- 增加了对动态训练和推理的oneDNN(原MKL-DNN)支持。Resent50 oneDNN动态训练可以使用(Minist数据集) + - Added oneDNN support for dynamic training and inference. Resent50 oneDNN dynamic training with minist dataset is enabled. + +#### 调试分析 + +- 将框架内仅100处使用LOG(FATAL)抛出异常的写法统一改为使用PADDLE_THROW,优化由于框架不支持某种行为而导致的报错格式与内容。 +- 完善框架内Signal Handler实现,优化执行遇到系统Signal错误时的报错格式与内容。 +- 优化框架报错栈格式,将编译时python报错栈移至原生报错栈下方,提升报错信息阅读体验。 +- 累计进一步完善约1300余条框架内检查报错的错误类型与提示文案,提升框架整体调试易用性。 +- 动态图报错信息增强,动态图下Pybind层的报错信息进行系统性增强,提升用户体验。 + +### Bug修复 + +- 修复动态图Layer使用add_parameter接口可能意外出现AttributeError的问题,增强输入检查。 +- 修复无法正常打印int_8与uint_8类型的Tensor的问题,使数据可以正常输出。 + +#### 依赖库升级 +- 升级oneDNN(原MKL-DNN)从1.3至1.5版本 + - Upgrade oneDNN from 1.3->1.5 +## 推理 + +### Paddle Inference + +#### API +- 全面升级推理C++ API,推荐使用新版API。原API暂时保留,但使用时会报 warning,计划未来会删除;新版API主要是从规范命名、简化使用方法角度做的升级,重要变化包括: + - C++ 接口新增 `paddle_infer` 命名空间,包含推理相关接口; + - `ZeroCopyTensor` 更名为 `Tensor`,作为推理接口默认输入输出表示方式; + - 简化 `CreatePaddlePredictor` 为 `CreatePredictor`,只保留 对`AnalysisConfig` 的支持,不再支持其他多种Config; + - 新增服务相关的工具类,比如 `PredictorPool`,便于创建多个predictor 时使用。 + +#### 功能升级 +- 升级算子版本兼容信息注册表以支持更精确的Op版本信息,提升推理兼容性。 +- 新增对TRT 7.1版本的适配支持。 +- Paddle-TensorRT增强对 PaddleSlim 量化模型的支持,涵盖CV上检测,分类,分割等多个任务。 +- Python端推理新增对用户自定义OP支持。 +- CPU端增加了`elementwise_add` 和`elementwise_mul` INT8 oneDNN(原MKL-DNN)内核支持。 +- 提升了CPU端测试量化模型的易用性,支持同时对比测试原始模型和量化模型。 +- 新增对Jetson Nx硬件的适配支持。 +#### 性能优化 +- 新增 conv + affine_op pass,在6248机器上,MASK-RCNN fp32单线程性能提高了26%。 + - Added conv + affine_op pass, MASK-RCNN single thread performance is improved by 26% (1.26x) on machine 6248 +- 新增fc + gru pass和oneDNN(原MKL-DNN) GRU fp32内核,使得GRU fp32模型4线程推断速度在机器Intel Xeon 6248上提高 20%。 + - Added fc + gru fuse pass and enabled oneDNN gru fp32 kernel, speeding up GRU fp32 model inference on 4 CPU threads by 20% (1.2x) on machine Intel Xeon 6248 +- 增加了对许多Op的oneDNN inplace支持(人脸feature fp32模型提速2%) + - Added support for oneDNN inplace support for many operators (speedup 2% for Feature model) +- 优化的oneDNN LRN op,使得GoogleNet fp32模型提速1% + - Optimized LRN operator (speedup 1% for GoogleNet) +- 升级了量化模型的转换和优化 @intel + - Improved the transformation and optimization of quantized model +- 优化了CUDA 的ArgMin, ArgMax OP,使得该OP的二进制大小从60M下降至1.3M + +#### Bug修复 + +- 修复CPU下的mask-rcnn推断错误的问题 + - Fix mask-rcnn inference error under CPU inference +- 修复CPU多线程量化模型和推断过程中出现的错误 + - Fix the CPU multithread inference on oneDNN quantized INT8 models diff --git a/doc/paddle/release_note_en.md b/doc/paddle/release_note_en.md new file mode 100644 index 0000000000000000000000000000000000000000..b9b4eb294e66561a611bb3e81c6e0383ae892315 --- /dev/null +++ b/doc/paddle/release_note_en.md @@ -0,0 +1,276 @@ +# Release Note + +## Important Update +This version is the beta version of PaddlePaddle Framework v2.0. The most important change is the full upgrade of the API system and the comprehensive improvement on the imperative programming (dynamic graph) capability. This version systematically optimizes the directory structure of PaddlePaddle basic APIs, comprehensively fixes relevant issues left over from the past, fully supplements APIs, and especially provides the better high-level API functions. It also provides support for the quantitative training and mixed precision training under a dynamic graph. Perfect syntax support is implemented in the dynamic-to-static conversion. The usability is improved substantially. Dynamic graph-related functions tend to be perfect. In addition, the C++ APIs for the inference library are upgraded and optimized. Both the support of the inference library for quantitative models and the inference performance are fully enhanced. + +## Training Framework + +### Basic APIs + +#### Compatibility Description + +For Version Paddle 2.x, users are recommended to use APIs in the paddle root directory. In addition, all the APIs of Version Paddle 1.x are reserved in the paddle.fluid directory. Codes for Version Paddle 1.x training are not changed according to the design, that is, models saved for Version Paddle 1.x training can run on Version Paddle 2.x normally and inference can be performed using Version Paddle 2.x. + +#### Directory Structure Adjustment +- Based on the 2.0-alpha version, this version has made some adjustments to the directory structure. The latest adjusted directory structure is as follows: + + | Directory | Functions and Included APIs | + | :--- | --------------- | + | paddle.* | The aliases of commonly used APIs are reserved in the paddle root directory, which currently include all the APIs in the paddle.tensor and paddle.framework directories | + | paddle.tensor | APIs related to tensor operations such as creating zeros, matrix operation matmul, transforming concat, computing add, and finding argmax | + | paddle.nn | Networking-related APIs such as Linear, Conv2d, loss function, convolution, LSTM,and activation function | + | paddle.static.nn | Special APIs for networking under a static graph such as input placeholder data, fully connection fc and control flow while_loop/cond | + | paddle.static | APIs related to the basic framework under a static graph such as Variable, Program, and Executor | + | paddle.framework | Universal APIs and imprerative mode APIs such as to_tensor | + | paddle.optimizer | APIs related to optimization algorithms such as SGD, Adagrad, and Adam | + | paddle.optimizer.lr_scheduler | APIs related to learning rate attenuation | + | paddle.metric | APIs related to evaluation index computation such as accuracy and auc | + | paddle.io | APIs related to data input and output such as Dataset, and DataLoader | + | paddle.device | APIs related to device management such as CPUPlace and CUDAPlace | + | paddle.distributed | Distributed related basic APIs | + | paddle.distributed.fleet | Distributed related high-level APIs | + | paddle.vision | Vision domain APIs such as datasets, data processing, and commonly used basic network structures like resnet | + | paddle.text | NLP domain APIs such as datasets, data processing, and commonly used basic network structures like transformer | + +#### API Alias Rules +- For the convenience of users, APIs will create aliases in different paths, such as `paddle.add -> paddle.sensor.add`. Users are recommend to use the shorter path `paddle.add`. + +- All the APIs in the framework and tensor directories are aliased in the paddle root directory. Except for a few special APIs, all other APIs have no aliases in the paddle root directory. + +- All the APIs in the paddle.nn directory, except those in the functional directory, have aliases in the paddle.nn directory. All the APIs in the functional directory have no aliases in the paddle.nn directory. + +- The following are some special alias relations. It is recommended to use the names on the left. + - paddle.sigmoid -> paddle.tensor.sigmoid -> paddle.nn.functional.sigmoid + - paddle.tanh -> paddle.tensor.tanh -> paddle.nn.functional.tanh + - paddle.remainder -> paddle.mod -> paddle.floor_mod + - paddle.divide -> paddle.true_divide + - paddle.rand -> paddle.uniform + - paddle.randn -> paddle.standard_normal + - Optimizer.clear_grad -> Optimizer.clear_gradients + - Optimizer.set_state_dict -> Optimizer.set_dict + - Optimizer.get_lr -> Optimizer.current_step_lr + - Layer.clear_grad -> Layer.clear_gradients + - Layer.set_state_dict -> Layer.set_dict + +#### Name Change of Commonly Used APIs + +- This version uses tensor representation data, creates tensor APIs, and changes paddle.fluid.dygraph.to_variable to paddle.to_tensor +- Addition, subtraction, multiplication, and division use full names only +- For the current element-by-element operation, no elementwise prefix is added +- For operating by a certain axis, no reduce prefix is added +- For Conv, Pool, Dropout, BatchNorm and Pad networking APIs, 1d, 2d, and 3d suffixes are added according to the input data type + + | Paddle 1.8 | Paddle 2.0-beta | + | --------------- | ------------------------ | + | paddle.fluid.layers.elementwise_add | paddle.add | + | paddle.fluid.layers.elementwise_sub | paddle.subract | + | paddle.fluid.layers.elementwise_mul | paddle.multiply | + | paddle.fluid.layers.elementwise_div | paddle.divide | + | paddle.fluid.layers.elementwise_max | paddle.maximum | + | paddle.fluid.layers.elementwise_min | paddle.minimum | + | paddle.fluid.layers.reduce_sum | paddle.sum | + | paddle.fluid.layers.reduce_prod | paddle.prod | + | paddle.fluid.layers.reduce_max | paddle.max | + | paddle.fluid.layers.reduce_min | paddle.min | + | paddle.fluid.layers.reduce_all | paddle.all | + | paddle.fluid.layers.reduce_any | paddle.any | + | paddle.fluid.dygraph.Conv2D | paddle.nn.Conv2d | + | paddle.fluid.dygraph.Conv2DTranspose | paddle.nn.ConvTranspose2d | + | paddle.fluid.dygraph.Pool2D | paddle.nn.MaxPool2d, paddle.nn.AvgPool2d | + +#### Added APIs +- Added a total of 140 APIs. See [Link] (https://github.com/PaddlePaddle/Paddle/wiki/Paddle-2.0beta-New-API-List) and the API document + - Added environment setting APIs: paddle.set_default_dtype, paddle.get_default_dtype, paddle.set_device, paddle.get_device, paddle.manual_seed + - Added tensor operation APIs: numel, chunk, masked_select, isfinite, isinf, isnan, sort, topk, Flatten, dim, tile + - Added networking APIs: Linear, Bilinear, Embedding, linear, bilinear, embedding + - Added vision networking APIs: Conv1d, ConvTranspose1d, MaxPool1d, MaxPool2d, MaxPool3d, AvgPool1d, AvgPool2d, AvgPool3d, AdaptiveMaxPool1d, AdaptiveMaxPool2d, AdaptiveMaxPool3d, ReflactionPad1d, ReflactionPad2d, ReflactionPad3d, ReplicationPad1d, ReplicationPad2d, ReplicationPad3d, ZeroPad2d, ConstantPad1d, ConstantPad2d, ConstantPad3d, PixelShuffle, Upsample, UpsamplingNearest2d, UpsamplingBilinear2d, conv1d, conv_transpose1d, avg_pool1d, avg_pool2d, avg_pool3d, max_pool1d, max_pool2d, max_pool3d, adaptive_max_pool1d, adaptive_max_pool2d, adaptive_max_pool3d, adaptive_avg_pool1d, adaptive_avg_pool3d + - Added text processing networking APIs: SimpleRNN, LSTM, GRU, MultiHeadAttention, Transformer, TransformerEncoder, TransformerEncoderLayer, TransformerDecoder, TransformerDecoderLayer + - Added activation APIs: ELU, Hardshrink, Hardtanh, PReLU, ReLU6, Tanh, Tanhshrink, Softmax + - Added normalization APIs: BatchNorm1d, BatchNorm2d, BatchNorm3d, SyncBatchNorm, InstanceNorm1d, InstanceNorm2d, InstanceNorm3d, weight_norm, remove_weight_norm, batch_norm, instance_norm, layer_norm, normalize + - Added dropout APIs: Dropout2d, Dropout3d, AlphaDropout, dropout, dropout2d, dropout3d + - Added similarity and loss function APIs: CosineSimilarity, PairwiseDistance, CTCLoss, KLDivLoss, BCEWithLogitsLoss, MarginRankingLoss, SmoothL1Loss, consine_similarity, binary_cross_entropy, binary_cross_entropy_with_logits, cross_entropy, ctc_loss, l1_loss, mse_loss, margin_ranking_loss, nll_loss, smooth_l1_loss + - Added distributed communication APIs: broadcast, all_reduce, reduce, all_gather, scatter, barrier + - Added probability distribution APIs: Distribution, normal, bernoulli + - Added optimizer-related APIs: step, AdamW + - Added dataset-related APIs: Dataset, IterableDataset, TensorDataset, Sampler, RandomSampler, BatchSampler, DistributedBatchSampler + +#### Fixing and Improving APIs +- Modified and improved a total of 155 APIs. See [Link] (https://github.com/PaddlePaddle/Paddle/wiki/Paddle-2.0beta-Upgraded-API-List) and the API document +- Fixed APIs related to random number generation including: seed setting paddle.rand, randn, randint, randperm, dropout, Uniform, and Normal +- Upgraded the codes of the underlying C++ operators corresponding to the following APIs to theoretically achieve compatibility without excluding slight incompatibility: linspace, concat, gather, gather_nd, split, squeeze, unsqueeze, clip, argmax, argmin, mean, norm, unique, cumsum, LeakyReLU, leaky_relu, hardshrink, embedding, margin_ranking_loss, grid_sample, affine_grid +- Added oneDNN support for the relu6 and Sigmoid activation functions + +#### Multi-device/Distributed Training APIs + +- Single-Machine Multi-Card Training Under a Dynamic Graph + - Added paddle.distributed.spawn(func, args=(), nprocs=-1, join=True, daemon=False, **options),which is used to start multi-card training under a dynamic graph. + - Added paddle.distributed.init_parallel_env(), which is used to initialize the environment of multi-card training under a dynamic graph. + - Added paddle.distribued.get_rank(), which is used to get the rank of the current process during the multi-card training. + - Added paddle.distribued.get_world_size(), which is used to get the total number of processes participating in training during the multi-card training. + +- Distributed Collective Communication + - Added paddle.distributed.broadcast(tensor, src, group=0), which broadcasts a tensor of a specified process to all the processes. + - Added paddle.distributed.all_reduce(tensor, op=ReduceOp.SUM, group=0), which performs the reduce operation on specified tensors of all the processes and returns results to all the processes. + - Added paddle.distributed.reduce(tensor, dst, op=ReduceOp.SUM, group=0), which performs the reduce operation on specified tensors of all the processes and returns results to specified processes. + - Added paddle.distributed.all_gather(tensor_list, tensor, group=0), which gathers specified tensors of all the processes and returns results to all the processes. + - Added paddle.distributed.scatter(tensor, tensor_list=None, src=0, group=0), which distributes tensors in a specified tensor list to all the processes. + - Added paddle.distributed.barrier(group=0),which synchronizes all the processes. + +### High-level APIs + +- Added PaddlePaddle high-level APIs to encapsulate common operations such as networking, training, evaluation, inference, and access so as to implement low code development. In the MNIST handwritten digit recognition task versus the imperative programming implementation mode, high-level APIs can reduce 80% of executable codes. + +- **Data Management** + - Unified data loading and usage method + - Dataset definition, which is implemented by inheriting `paddle.io.Dataset`. + - Multi-process data loading using `paddle.io.DataLoader`. + - Added `paddle.io.IterableDataset`, which is used for a streaming dataset and supports its concurrent acceleration in `paddle.io.DataLoader`. + - Added `paddle.io.get_worker_info` for dividing child process data in `paddle.io.IterableDataset`. + +- **Model Networking** + - Added the encapsulation of the common loss API `paddle.nn.loss.*` and metric API `paddle.metric.*` + - Released 12 models based on high-level API implementations, including Transformer, Seq2seq, LAC, BMN, ResNet, YOLOv3, VGG, MobileNet, TSM, CycleGAN, Bert, OCR. The code can be found in [PaddlePaddle/hapi](https://github.com/PaddlePaddle/hapi). + +- **Model Execution** + - Added class API `paddle.Model`, which encapsulates the common model development methods: + - API `Model.summary` to view the network structure and the number of parameters of the dynamic graph networking. + - API `Model.prepare` to specify a loss function and an optimization algorithm. + - API `Model.fit` to implement training and evaluation, which can implement the execution of user-defined functions such as model storage by callback. + - API `Model.evaluate` to implement the computation of inference and evaluation indexes on the evaluation set. + - API `Model.predict` to implement specific test data inference. + - API `Model.train_batch` to implement training on a single batch of data. + - API `Model.eval_batch` to implement evaluation on a single batch of data. + - API `Model.text_batch` to implement testing on a single batch of data. + - API `Model.save`/`Model.load` , which supports storing an inference model in dynamic graph training mode. + - Added callback API `paddle.callbacks.*` as a model execution API, which performs logging and Checkpoint model saving, etc. Users can customize a callback by inheriting `paddle.callbacks.Callback`. + +- **Domain APIs** + - Added computer vision (CV) APIs `paddle.vision` + - Added dataset API `paddle.vision.datasets.*`, which encapsulates common public datasets and supports random access to data. + - Added 24 common data preprocessing APIs `paddle.vision.transforms.*` such as Resize, Normalize, etc. + - Added image classification backbone network and pre-training parameters: + - `paddle.vision.models.lenet` or `paddle.vision.lenet` + - `paddle.vision.models.vgg` or `paddle.vision.vgg` + - `paddle.vision.models.resnet` or `paddle.vision.resnet` + - `paddle.vision.models.mobilenetv1` or `paddle.vision.mobilenetv1` + - `paddle.vision.models.mobilenetv2` or `paddle.vision.mobilenetv2` + - Added natural language processing (NLP) APIs `paddle.text`. + - Added dataset API `paddle.text.datasets.*`, which encapsulates commonly-used datasets and supports random access to data. + - Added networking API `paddle.text.*`. +- **Automatic Breakpoint Restart** + - Added API `train_epoch_range`, which implements the epoch-level `checkpoint` autosave and autoloading functions on a static graph and supports automatic breakpoint restart. + +### Function Optimization (Including Distributed) + +#### Dynamic Graph to Static Graph + +- **Added Syntax Support for ProgramTranslator** + + - Added dynamic-to-static support for the return syntax so as to return in advance or to return different types of tensors or none in if-elif-else or loop conditions during the dynamic-to-static conversion. + + - Added dynamic-to-static support for the print syntax so that print (tensor) can also print out a tensor in the dynamic-to-static conversion. + + - Added dynamic support for “for traversing a tensor”, “for traversing a tensor using enumeration”, “for traversing a TensorList”, and “for traversing a TensorList using enumeration” syntaxes so that operations related to the circular processing of tensors can be flexibly used in the dynamic-to-static conversion. + + - Added dynamic-to-static support for the assert syntax to ensure that an assert tensor can be true (bool type) or non-0 (other data types) in the dynamic-to-static conversion. + + - Added support for the transfer of cast of data type so that type conversion of similar conversion statements of dynamic graph type such as float (tensor) and int (tensor) can also be performed in a static graph. + +- **ProgramTranslator Usability Optimization Function** + + - Changed the dynamic-to-static return type to class StaticLayer from callable. This class can obtain converted static graph information more easily by calling .code,.main_program, and other APIs. + + - Added set_verbosity and set_code_level APIs so that users can set a log class to view a log in the dynamic-to-static running process or a converted code in intermediate state. + + - Added InputSpec to specify the shape and data type of an input tensor variable. + + - Optimized an error message displayed in case of error in the dynamic-to-static running so that codes with running error in the static graph after dynamic-to-static conversion can also be reported to the original error code line in the dynamic graph; deleted some dynamic-to-static errors from python stacks so that an error message is more related to user codes. + + - Support performing a breakpoint test using pdb.set_trace() during the dynamic-to-static conversion. + +- **Optimized Deployment of Model Storage and Loading APIs** + + - Added paddle.jit.save API, which is used to save a dynamic-to-static model so that the API is easier to use; deleted an old API ProgramTranslator.save_inference_model. + - Added paddle.jit.load API, which is used to load inference models including models saved by paddle.jit.save and paddle.io.save_inference_model. After being loaded, models can be used for model inference or model training optimization in a dynamic graph. + +#### Mixed Precision Training +- Added the support for mixed precision of dynamic graphs. The ratio of the speed when the ResNet-50 model is trained on V100 using mixed precision to the speed using fp32 is 2.6. + +#### Quantitative Training + +- Added `ImperativeQuantAware` class. The dynamic graph quantitative training function is provided. Currently, the quantization of Conv2D, Linear, and other layers are supported. The supported model types include MobileNetV1/MobileNetV2/ResNet50. +- After dynamic graph quantitative training is performed on a model, inference deployment of any quantitative model saved using an `ImperativeQuantAware.save_quantized_model` API can be performed using a Paddle-Lite inference library. +- As for static graph quantization, Conv2d_tranpose quantization as well as Linear quantization in the form of per-channel is supported. + +#### Performance Optimization (Including Distributed) + +- Simplified the DataLoader underlying implementation logic in dynamic graph mode, reduced the thread reading overhead, and further improved the data reading efficiency and the overall model training speed.The overall training speed of MobileNetV1 in a scenario of single V100 card and BatchSize = 128 is increased by 34%. +- Upgrade and performance optimization of dynamic graph networking. A large number of dynamic graph APIs will directly call an automatically generated Pybind API, improving the performance. + +#### Basic Functions for Dynamic Graph + +- Support the function of updating the gradient using a sparse parameter by configuring embedding and other APIs. +- Added over 120 member functions of Tensor type, including Tensor().abs(), Tensor().add(), and Tensor().cos(). +- Added dir() API for a layer to facilitate viewing the attributes and functions in the layer. +- Added an optimizer.set_lr() API so that users can flexibly adjust a learning rate in dynamic diagram mode. +- Added a global parameter initialization method API set_global_initializer to define a global parameter initialization method. +- Added oneDNN (former MKL-DNN) support for dynamic training and inference.Resent50 oneDNN dynamic training with minist dataset is enabled. +- Added oneDNN support for dynamic training and inference. Resent50 oneDNN dynamic training with minist dataset is enabled. + +#### Debugging Analysis + +- Uniformly changed the wording of LOG (FATAL) throw abnormal at just 100 points to PADDLE_THROW; optimized the error format and content caused by non-support of the framework for a behavior. +- Improved Signal Handler implementation within the framework; optimized the error format and content when system signal error occurs during the execution. +- Optimized the framework error stack format. The python error stack occurring during the compilation is moved to below the native error stack to improve error message reading experience. +- Further improved an accumulative total of about 1,300 error type and prompt copywritings of check errors within the framework to enhance the overall debugging usability of the framework. +- Enhanced dynamic graph error messages. Error messages on the Pybind layer under a dynamic graph are systematically enhanced to improve user experience. + +### Bug Fixing + +- Fixed the problem that AttributeError may unexpectedly occur when the add_parameter API is used on a layer under a dynamic graph; enhance the input check. +- Fixed the problem that tensors of int_8 and uint_8 types cannot be normally printed so that data can be normally output. + +#### Dependency Library Upgrading +- Upgraded oneDNN (former MKL-DNN) to Version 1.5 from Version 1.3. +- Upgrade oneDNN from 1.3->1.5 + + +## Inference + +### Paddle Inference + +#### API +- Fully upgraded the inference C++ APIs. The new version of the APIs is recommended. The original APIs are reserved tentatively, but give a warning during use, and are planned to be deleted in the future. The upgrade to the new version of the APIs mainly involves naming standardization and usage method simplification. The important changes include: + - adding a `paddle_infer` naming space for the C++ APIs, containing inference-related APIs. + - renaming `ZeroCopyTensor` to `Tensor` as the default input/output representation method for the inference APIs. + - simplifying `CreatePaddlePredictor` to `CreatePredictor` and reserving the support for only `AnalysisConfig`, not for other Configs any more. + - adding service-related utility classes such as `PredictorPool`, which can be used when multiple predictors are created. + +#### Functional Upgrading +- Upgraded the operator version compatibility information registry to support more accurate Op version information and improve inferential compatibility. +- Added the adaptive support for Version TRT 7.1. +- Paddle-TensorRT enhances the support for the PaddleSlim quantitative model. Multiple tasks such as detection, classification, and segmentation on CV are covered. +- Added the support for user-defined operators for Python-side inference. +- Added the kernel support for `elementwise_add` and `elementwise_mul` INT8 oneDNN (former MKL-DNN) on the CPU side. +- Improved the usability of CPU-side test quantitative models. A simultaneous comparison test of original models with quantitative models is supported. +- Added the adaptive support for Jetson Nx hardware. + +### Performance optimization +- Added conv + affine_op pass. The MASK-RCNN fp32 single thread performance is improved by 26% (1.26x) on machine 6248. + - Added conv + affine_op pass, MASK-RCNN single thread performance is improved by 26% (1.26x) on machine 6248 +- Added fc + gru pass and enabled oneDNN (former MKL-DNN) GRU fp32 kernel, speeding up GRU fp32 model inference on 4 CPU threads by 20% on machine Intel Xeon 6248. + - Added fc + gru fuse pass and enabled oneDNN gru fp32 kernel, speeding up GRU fp32 model inference on 4 CPU threads by 20% (1.2x) on machine Intel Xeon 6248 +- Added oneDNN inplace support for many operators (speedup 2% for the feature fp32 model). + - Added support for oneDNN inplace support for many operators (speedup 2% for Feature model) +- Optimized oneDNN LRN operator (speedup 1% for the GoogleNet fp32 model). + - Optimized LRN operator (speedup 1% for GoogleNet) +- Improved the transformation and optimization of quantitative models. + - Improved the transformation and optimization of quantized model +- Optimized the ArgMin, ArgMax operator of CUDA so that the binary system size of the operator is decreased to 1.3 M from 60 M. + +#### Bug Fixing + +- Fixed the mask-rcnn inference error under CPU inference. + - Fixed mask-rcnn inference error under CPU inference +- Fixed the error occurring in the CPU multithread inference on quantitative models. + - Fixed the CPU multithread inference on oneDNN quantized INT8 models diff --git a/doc/paddle/tutorial/cv_case/convnet_image_classification/convnet_image_classification.ipynb b/doc/paddle/tutorial/cv_case/convnet_image_classification/convnet_image_classification.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..9fe485839bead2e9054eb73d14ef2be9802f51fa --- /dev/null +++ b/doc/paddle/tutorial/cv_case/convnet_image_classification/convnet_image_classification.ipynb @@ -0,0 +1,362 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 使用卷积神经网络进行图像分类\n", + "\n", + "本示例教程将会演示如何使用飞桨的卷积神经网络来完成图像分类任务。这是一个较为简单的示例,将会使用一个由三个卷积层组成的网络完成[cifar10](https://www.cs.toronto.edu/~kriz/cifar.html)数据集的图像分类任务。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 设置环境\n", + "\n", + "我们将使用飞桨2.0beta版本。" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2.0.0-beta0\n" + ] + } + ], + "source": [ + "import paddle\n", + "import paddle.nn.functional as F\n", + "from paddle.vision.transforms import Normalize\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "\n", + "paddle.disable_static()\n", + "print(paddle.__version__)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 加载并浏览数据集\n", + "\n", + "我们将会使用飞桨提供的API完成数据集的下载并为后续的训练任务准备好数据迭代器。cifar10数据集由60000张大小为32 * 32的彩色图片组成,其中有50000张图片组成了训练集,另外10000张图片组成了测试集。这些图片分为10个类别,我们的任务是训练一个模型能够把图片进行正确的分类。" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "cifar10_train = paddle.vision.datasets.cifar.Cifar10(mode='train', transform=None)\n", + "\n", + "train_images = np.zeros((50000, 32, 32, 3), dtype='float32')\n", + "train_labels = np.zeros((50000, 1), dtype='int32')\n", + "for i, data in enumerate(cifar10_train):\n", + " train_image, train_label = data\n", + " train_image = train_image.reshape((3, 32, 32 )).astype('float32') / 255.\n", + " train_image = train_image.transpose(1, 2, 0)\n", + " train_images[i, :, :, :] = train_image\n", + " train_labels[i, 0] = train_label" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 浏览数据集\n", + "\n", + "接下来我们从数据集中随机挑选一些图片并显示,从而对数据集有一个直观的了解。" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAj0AAAI8CAYAAAAazRqkAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+j8jraAAAgAElEQVR4nOy9S5PkupLn93MAJCMiH1V1HtP3thazmcWYFtpooZXMZiF9x1nOUt9FWmkhk7VJZqPu2+q+r3NOVWVmBEkAroUDJIIZWVV5bmXdbnV6WVQwGXyAAAj/4+8PiKryKq/yKq/yKq/yKq/y/3dxf+0CvMqrvMqrvMqrvMqrfAt5BT2v8iqv8iqv8iqv8m9CXkHPq7zKq7zKq7zKq/ybkFfQ8yqv8iqv8iqv8ir/JuQV9LzKq7zKq7zKq7zKvwl5BT2v8iqv8iqv8iqv8m9CwrMODkGHoUdEcN4jAihY2LuiWUkpgSriHOI8CIgIIoavUoqkGO2cnO1bFbvYuchT0fQCy0/abKNP7C/bUk+XuokTWX6oZyvrM21ue3b+co2yLQLBO7x3KErKkLOiqH1rubqwnB+84MofrtSBKqSsZIUxZeakjyvnL5Q3b97ob37zm6992Vf5jPzd3/3dn1T1x699XZEn35b2mLO/XzpdxfZ+l/aJCCEEnHN479ntdnjvz8qXUiKWMUNEcM4t3yE8HsJSSqSUUFWmaWKaJlSVXMebT8hz60T167+bXT/o7nAFlDGm/Cey1t9SjyJl6Nzsf3Tu9jyaUbCe8GjPRtpfz+tJdbu/jKW6/n1etfZ3W9/nda/LGK7NYF6PWcbnVv9ovW49pi3H+ZPohZ3jw8NXfzffvn2nv/3t3178rdU3Vay5Nm148Y/PST34cn8uqhs5056XClN+e3SZTV/Y6NtHt/3ab4k82ji759/9n//Hk235LNAzDD3/7X/8j3RDx9XNFaELaEqkeUZzYjyeuPv4kRQT3W7PcHWNCwHfdYRhQIGPP/2ZD3/6A3GeSacT6TQW0OPsgyxgR3StyQoqAFQgoeTSwXN53qwGNOrLVPcrSirbToSAs2/n6bsOJ46M2jVVybkOsnmpTcHAjReDPJ1Cp4ID9k7ZiQGYd28O3N7uUZQPx8jdGEk583CaOc0RULwoDuiD8N2152oQvBO6zuOdMEXl/TFynDP/+z8fn9NEXyy/+c1v+M//+T9fHOQvKauXkvZeTymcp5Tnv7YcUyLCf/pP/+n/eanrVwCQcz7bb5MO+SToaZXJr1D8Z3977x/dswUrrex2O7777jv2+z3ff/89/+E//AfevHmDqk2gcs7c3d3x5z//mXme6bpuAUY3Nze8e/fuDPjknPnw4QPv379nHEf+4R/+gb//+79nnmfGcWSapk8+RwVLT70X7f5tPX8t2R2u+O//x/8ZEXBOyiTT0XXe6tA5fPBlYunwIZyBwLbORQTv/dImPoQyYbWJaNtO2z6yKuAL/afpLwYmqxJPtj8ncrYxdFuvFXzmnIkxnv1dj9FmX8r5rD+oKinO5DJGxzmSk4HiGONyr/N7ruVNuQFceW3Pv/vf/tev/m7+9rd/y3/5L/9LU20V6Ky6K5WJMALi1rrevi/LptgVLgGaR+24gENdz3nq/S71nTXbzVw7NsOK712z7/wW2/puy9V+X779E+WSzXZhGWT5w35QleXR/qf/4b97si2fBXpEHMN+TygvnN0I66DJBoAQ7IUKncd1DucdEhziy0N7K6c4xXlBfaVtVjZIavuIIKorCBIpLM/K6ChSOtDKwCiFOarIH0HJ5AJekMK2OPBe8M7Zi5XzcnNHhTqKqH17ETwGdDoRenF4YOfhECA4Rx8cwUl5l4z9MraHZQBBFBEFJyQVojpUBcmCiiNqJqmQn+gDX1O2A/lfC/A85/f2BfrXBnxeUs5nwSZP1dFT+75GfW6vcYndqZ+u6xiGgd1uR9/3OOfOrlGZoMPhQIxxOd45xzAMhBAWZgjAOUff9+x2O5xz7HY7DofDwvZsQc2iZJ9kHJ5+rpcSEaHre5wI3q+gx8ZdMSBUgYtzuPL83vtFUValWfd7X4GRX+qrgh7gTMFKZXzKBK9li1ZdalpOtYzJG5pAHUjSoojOAVLbJ325rzYA0hSvruN/U8Z6nhNBnYNswFDzOdBrj1+ZoNqnrCyCoLKlKF5GlM3EorIjsrJUtdzaPPP5dgU+Txe4PZ7lPlvgcy7SAjE274Gsp9q1ZLnGAoQWsEOz//EY9KsAz/K452Wpu4UMBQOgwpcQr88CPT4E3rx7VxrNuBPNSpoTmiKiMPQ9iuKGnjAEJHh7QTtnYDYIBCu5y+CyK7XkEXX2bAvTU7/t8Za2BCCTForU9mbEyKLaxpiJKJPJmshknDgDXQIuQOgcwTlcyhCzvXAr3LFGznb/4AzsOIGdOPbO4UW46R3XnSN4x64PDF6IBT/FlM1UhaDiEdFKaKFOiDjGJDgVkng8wpxgzgUMPaeBfqV8Saf81Lmfky9lk77kWpcU6Espo69pDvoWYLIyD58COZ+rr68BgKuiucQi1E81aQ3DwM3NDVdXVxwOB0TkjEEREXa7HV3Xoap47+m6bjm3AqAtY+icY5om3r59y93dHdM0ISILW1AZhnp83d6awL7lJKCK846r66vC3HicE5wIrgAgRJA6Cy9/gyn5WhctAPI+LEyP88YOrc91uX2ctGCn3koKYGgVOIXNKaaSopQ1e5I3MNMCnZbRcc4Y97rPpWRcUSps0aKMLzCRFeiKQGHkddN2udzb+qIu5aw2AFVF06rEX0qqRcIwh67Po0oWRaVADRVQd1ZXVRbgUyfun5CzczWvQEar7WOV9ulb60gFSOdAhgIsTB+ajq3fSzNQ2b06BrRjz+cYnycnXlIKUZt7Ob0q04oDvjLosRnIUMw/ZgZaqMhsjeG9MzIjOFxwiDeWR7yxQtWKJc7KKx7IYmxKPrcpS30EWfdLwSCuWCPzcpwsrJ9IZX/qWS0zpAV4FOrXCd7V+lTcYhBbEWytSAcYUSUEEToRghN679gV01RXgFAFp5qbDr8wPevDZYztUTXgo9n+ThjT8614jJcc3P+abNKvlV9rUnsOs/K15Uvu+5xy/Fog/CWzvOrDE0JY2JtqmtkCtHpca6pxztF13bJdjwcz8/V9D7AAoxZotce25X2q/r51f3WFAbNnrOBlNXUBVHv/UuKG3anfi0nLr9tmEtuaOjeAp2GLyqWX71Uhr0yEajUx1HZTjHhxLA4CDeBpGRtttnPRrFvleG7iWfcb2yM4cahktIC+VuGu98qoOnKu18fuxcuOsRcBW92uk3bgUkEegwYWgHF+8OP37VE52n2fZFX0nOE5Y3V4tL0tSv1t+05deq8v3/7Cu3hWNxUgVvhXGI4zff1peRboAdDyouXycCKO4AMqzkxFocxC+oAOA/ji0OwDqkroA/2uI3nFeYcL3i4WHZqKuSwlcsogBjAW8FOd+Zb7G4DJuWzXWYlWEKS1OvAIitA7z77vCd6z6zqudwPBe+I8M2kBcq6A7nLNWu+dF4bgcAJX3nMdAt4Jt7vA7a5DHGSHofeCmVPOxYa8DlaCLObSlCGiSIakCSdCzJk5KvExMH8R+daAZKtInnP/T537NeXX3udbmUC+hXzOvPilz7pVYNX3pAKerusWQBJjXFiZc/NMVeAr6NmaZOrxIYSFXbi5ueGHH35gHMfFITrGyMPDw8IofIv+9DwxgCZOFlZG6sBXwE5lfCwIwp57y/Qs7E6zfwE9yHodzuvvsS+JjaIV9MCW6blkQjJ3hXLJMyBSWTUB8L4oWcFzXv9bsHCmELMjSwaMuXLqFvboEbiooCorzikuq1lpnENe3IdAFzC3uDy0jE9jVBLA5Yw+Yt5AVc7mzO31q2yBRX3+BRgs2w27s6IoK2sDKuxCLcvDUtblx0tPfIGl2gK4rbTs1iN9oGdGt6XeTBraR+Grm7fqLTIUh1/FidD3A06VbgjsDj0+OKJ3xOANJImQxTrj7jCQxh05ekLKhJQgQxqVeDJmZJ4mZq1Ov64QWPUltfsbO1TMV2q2JCnlOmeA7FxfbEr70HO7PzB0Hfu+43a/p/eeeRwZRcgpQoJye5yzmZKIAZ597/BOuOk7boeO4By3h4Hbg5n13h+PfDidkCmCwjxHkkJ2HsRT3jWbtaGkpOSUF7yqas8wZwNzL/1OfooJ+LWmqy8591NI/9ee+yXHPgfU/ctQgH8d+Vrgd8seVMBTfXj2+z273Y79fr+YpOZ5XgBRywhVsNMq8/b61Vel73u89+Sc+fHHHzkcDozjiC8Kdpomcs6M47j0t3ZA/pyDcj3+pRyZnXP0u93if1isTIvSc858cxBwspqrztmdc/8eKSdXR+jl3AqeHrE7K0sOurnOKq2iUs2Lz1S9V8siwdbkZCBMVZFkEz5t6nfbNiKyXr8FWVo8OzdMwnovEMmIFOdotXrNOYM3F42XElUz1xW80zAnFXysjI9ZKM4nCAsrRQN6GuatyiXAs95nhVcLwmmARK1D3exfaCUtekitHI+lApb1vEvgdfueXSr70+ZlXW5v5sAKE5uPPg2qWnke6CkvSKucHcU5Dgihox96fLFD44UslYJ1qGZC8ITOk53SpUyXHGQlJoVgACAVOrc2tGOd2diVKOjeOmztGFJfTrT6Kps/0HKN4mzsA0PXMYSOXdfReY9PCfF+QeOlngs1bLbnoXPse4/3ws3QczP0dL6Cnh1ZM2OM3E+TPb/a7KK4BC3PVP8XxABb6Wg1qiBjDFB+YaX7XKX+XEbm18rXYp5ewlzxl4Col5avxVh86pl+zfNumZ6qlCvbU1meqtTqsVWBt6aKlrWozER7bVijx3LO7Pd7RMxcVAFWPeapZ/rUjHT7HC8iUsvXKCmjr4vik4tRPltAWLfPyt2cJxvQ+Bj06FogtgxQVd4t+Fv9f8pFlvNaqXV75phctwtbs7BBmzJd3HayMD2t+eyc+ZHluou+kNZR++WkHcdbZmWBIUU/1N/O+58WlqcqecrzN2TMBcBzDjrOy/MIeOllkFKPrl/rbusbq9PJ+bn1uEtsz1a2+9uyPPptYana76affuGQ9yzQ40TY7fZMo2OeJlPmxdnOi9APHUM/WDglSiqzFOc7XNejgE8TPh3JaUamGRkni3CaEyLJTGMVvLWsTekZUkFUiywr8lfFFdBSvHqo2NmXlz2IGADKkGNiPo0WBRAjQQXE4b2BI6GAnuBwThh6z74PeC9c9x3Xux7vhKGYuUSN0rX4LnvRc5lZ2AtdHO5SLbsuZbUfrLR1NvItfXo+J38t1uMvBSmv8uXy1Ex++/eXtMf5THVVwtURuUZsVX+b7b2qwmrPOzPTbADedoCtx7eswvX1Nbe3t/R9z93dHV3XWUh0cW6u537Js30TkRVE2KTPtltAY3X72Ix1yfyHyMIMwWXgVI9dQU/9yOJTtJ3Vn3/Oil+3zoBPba8KUCqTVH9rj3u83ya66oScrEqcOPDrMZUNqsC3Mk/LPgXzphRwehY59vVFF9BT6+Ox6l4ZFZVttFo7WT4HofWaT4GEejczH563z1MmwHNzV1PaM9BTN8y8WH9v0wJ8yoKw3f+pMWdp/6WCyjELCNyW9yubt5z3XF/f8OAcD8d7a0znCMNAHwLDrmN/2FnEQUqkGMko/bBnd3WLc8IYYOwzOU7M9/fE+ztSzORpYvYZtIKeWoGZBMVfx1UstObgqf/lXKsfVwFP8Vx3CN4FnBN68XgVJCuaE6dpxgO9CHsRgnh679h7O94HR+iMedoNHbtdwDvHVd9xPXR4ETrv6Jwj5RqC6RHJZLV9SQt0U8UJaE7meMfaoRVBnTcmTY31Sd/Ip+dzskXfX0O+FivxKl9PWvbk0sD1nOvU7xbwVN+dYRiWaK1hGM6SEdZPdVKuTNBTvjxPPUOd4VdzWt/3vHv3jpQSx+ORu7s73r9/T0ppMXdty/4U4/hNQI/UZ1n9eeptvV/z8dRnrcDiEuuzgpjq07M6c9f9ZjLb7m/AjNiktzIMOddcLHVaeg58VkX9uO5qG9fns3bSss2j9qvnWF1UE1lawYG3RCItI3hu3lKcK5G52abCIiVFiSr6gu1pY3kqdVr0V2VpFqzzGHC0/c/qQ1j8emrgzRPFfoqpwa3QKT8FenRtw/YSK9BdvVxVQclnv7ff27JcYnW2rOlFdn4xl5Vrynr+JcDzueZ8XvQWQhc6vA/20OUOznuc9/gQysfjVUs4Injn6YLZ57XvYe7JDphG1Jsjs9mZG9pRZOkM1hcqIyIN6K00G8WmW+GDHe8xxywnghcrvxNZUbVmckqoZjrvcT7gBII4Om++O8FbKLo4oQ9+YXWG4OmDL0kO7bq50KV1ILGGqWUtZa8AiJUSbr/W884ssC8mlwb2dmBaynKh8z6lILbXbeUphfEU9dn+9imK9Dn3eq58ifL/lwbinlue59TVc673FONTfXa2JphtP7vEXCzjwyfK0R5TkxfW/D055wVIXZppbvv39pleXgrCEc4jreruDTvzVJLBi58FRBUQVMDJpfZqiwNY1K3IMnS1LIOJrgqnTsSXgc3uKY/qdfWPkpJvZGveWkFM6QtSw1usEFLLfWHMOndsdlDSlqh5VTR18XKSiy5suRpo2AsakPCILVuj5arqQtd6Vl375ZPvgxRtrStgWMpQJxoL4Lo82VnIhcUu0ejm5fO0mexTTM+z/i6F0fpgVe/LWqbPDU/PY3qc47A/kFKk73tiigy7HVe3Nwxdz9B7hsFAwWlOjMeJOWXQDudHnHfkMaLJqBpNmRQtY7HzjmE3mHN0CPRDb5VYkgZWpsdeCEWmxBQzIokU08Ki+NqHvYNgLWwJACFqRlLGp0gU8DkRUsQVpijkTCcOUaUDM3sJSPBmEvMB3+/N5DUM+J0lECPXPECJhCem6pMj1MRBwXtc8ARR9sGx8zWCwQafrMJJHVEdIsqcIb0o7fp5oFFT/8/zvMyG+75fcqPU2Xc9vk39X51E21ldG6VTo3ZSSpxOpyVTbp2Zee/Z7/dL6PHnlNyrfD35FIh9juK/BHa2vjytP08LbGo/qyxNG43UAprHs9RzqTP+yh5dXVn+m5ubG25ubpZ+vc3U/Cmm51uIOIsSleLfKFCDtGyS6R777mxBoiUkBOfWsH4uKHmbDBqbgNgEbskDRE3YCnWCqVIBj4EyV2f9TY4ZO7xVguV6DrxfI5pquHvraAycAVKR6vuzAqSUIj76wuSvwcsrU2n3VM2F1ajPIuYLmmobZ3J6Qaan/FvD8VeAUAGGVuTAY9BjddP47NiORbE/q1uWuXglSCpvsDRX+W8pwdkP6xNVRFuZlhb02Gnnfj2fm5w8V5T2XrBWn35RfTwvOaH33N7copoZdnvmnNldXXP73XfsdzuCKL3LxrbcnzjdjZymiRwdYAOX0xmXV9BTk4T50LO7MgWnKS/5bVKM5GjGWy/28saU0bsTeppISYjzhJDxCIMXupqDpzggn1Lml1NijpksQo4ep4rPiS5FnGaSOJw4OhG06+hUSd6hbsD1gDrwA35/bYP1bkfY7xEgzxNpGokamfXBkgumAnqwQavrOoY+EDy8HZRDVxkqa6U5wYdZOEWQpIyZdXT5hlI7YUppCfN9eHjg48ePpJS4vr7mzZs3izJqlz6YpomUEh8/fuTDhw9n/hLOOQ6HA/v9nhAC19fXeO+X4+/u7pakcSkldrsdP/zww6NInVd5OWln+E+BzEsMyFPXuQR2KtitoKY1W3nvl6zMu91uSUrYRnK15avmi7Zs9bvdrqCnLl8xDAPv3r3j7u5uAdz39/ePnvs5rOTXFBu7bG3DGjlasYqBmMsOy+fmrTbMPyzXWxjkRumCZX52ziYwBqpqPa+mK6tHcyPIRYEKWB62BfjUSjpvh8WZtzyEa4GqQs5pyfvWgtqcwTm7v3OhjCeJHBMpRCiBIFJsAYgsEWEuO2NZ1JxuF3MXIM50DKol9P3lxOogG7PEJRb9034MZ/2toKZPvYPnYL1uNAeshggso3YxSaEr6FqRzPlJFRnVQ7KeHV6327I85ZT+KXnEsrKSh7kBiut92yM+Lc80b0EoKc19oYdDCISup+t7PBmvpSMCKSbSnIjloxk8yQiyXBzSKpMjYjl7VMDVBzI7bl7yR3hzWpNkL7FzSKE8jeK0tbF8MTcFb7OXWcFWONGyPpd1dFXz+/GqtvRDoY5TWe9FRApCL/DEOZwLOB+Q0CGht/tmBZ/KVMOzrCHW0M51JhWcrdHVh6Z5Crp2sY4J34bR+Bx7UkFLXbcoxsgwDKSUziIv1gEqLz4Sp9NpWfixgp7aX9oXIee8XL/mUUnJbOCtQ+KXlPcl6uBLr/HU338Ndmp7z7+0PJdYn+cAnxYAXfo8BZK2fjytz8ml70tlbhVMZRhTSgvwqn35UyaCv0Z7rkBHmm02+x5HXp1/tiHrGFC5oBvaunSy3r9VJvVvVV3G23rRRbc2xxixsh6/Ppc0+wtyWgBYPitvfWYz7+Tz51oeqFxbtSmz6RGn69IXUgCRqC0zlDWXv79Giz0tqmZ/Ec77TwWe67Ovsu1z9ndRFLK2yXqdz5Thqa0z5qktFwuSWVt3BUAVrNbD1nMf79te9+n3tUFjn3uehulZ71Xr5dPyLNCjCmlOBBd4c/uW3f7A9dWew/Weoe/QOJGnhOZInGfm08h0GkEdmsuieXIiyAOiieP9yHiKoErolK4rC+iVD0hZdLTMTgpQmufEcUyEWLz/NZlPjiiDNxOXc2L7nDATERcBSwkeydQlV6p5JmblmDJezWGv954OJciAdAHXdbhhh9sfbAHVw4FwMKrca8ZjrNToB2LYsTuN/HkSfpmMXg1e8c5McHVQAcvTo6pMSTmNiYc5MydlitnC+F9wkL30YrUDaJ0ZjePI+/fvl/wpt7e3C8PThhrXWfNPP/3E73//+yURXEqJEALv3r1jHMclcudwOJBS4sOHD/z+979fAFaMkdvbW968ecP19TVwvojll5gdnhoILh2/3bd9OT8FjJ66/0sCtZeSxzPQ55d7C25alqdmR65mq9bBuY3qqoxPZXkq0/MpcNMqymqWrSbXGgZf75Fz5urqaunHv/zyy5LQcM0zc7l+vpUIxbwlbgE6vqxf6J1bzFveX3ZeVoWUzMn0dJyIMSIIu33PsOvPQACwLk2x6J0a/br6H1aQUX1iZFmQOa9qUZvFKF0137AsHu2cKxPJ0sfquoQLUySklMlJyZqZpsQ8JstdNs/EecZMUtX5vEboFmDhvLEpYFGyWRcmo/pSesGyyLps0b4vyfSoMRPSsB2LY67C+ZIN62lt6P3ql2Tz6cIJ4Mr6jatsJ18NNCpMzvp/KV6DGtr9rd5RtMllJM3P0iw9cQ6eVvDzmOlpJyFPVlszDjWlsLqhLIrKQvQ1Y8NXZnpUlThGguv44fsfUQd977m67glBiMfEOEZSmpimE+PxyPgwEsfI+DAiTuj8TO8nhMR0PDIeZ7MpO3CDDZBD39N35i8TgkVLqSrTODPPkWmOPIyRKRpL1HmH5oSQCSQc2YBLCHjvmBlxblocsOZs2Z47Z6sOe4FxTuQ54sqsoPczST0DIEOP73v8/kC4usZ3PeHqmu76huA9h6HnUGaNaXcN+2sejid+mpT3k9mfUzyR00Tx115YpJQjMWXGOXF/mvh4smSGo1IWHX1Z0LPthO3suyqN4/HITz/9xOl0YhgGfvzxx0UJ1UE2xsj9/T2n04k//OEP/Nf/+l8XAFMXinx4eGAcRw6HA7e3t7x9+5Z5nvnpp5/43e9+t5jU5nnm+++/5ze/+Q3ffffdUt5LkUXPMXk91zzWHv8lZo+/5F5fQz7FVnypfMo/5kulRkzV7wpaahLCYRgeAZr2mGo6rQBoC3paP7FLZZ3nmXmelwG3ZRrrdW5ubhYA/uc//3lhfdpBeivfCsBWHxtjXYxhsSSpqxkrLNtr8tRaV4gQ50yMmZwydx/v+fjxDkH47vs3DF2H8wUwebcwB+uEeWUgbNX0BCqY946zsGov1GWZrczmbIv6+ldZc6qxiNTJa6Mgc5nYpWI6yTmDRlIyAHp6mHi4n0gpM00j0zgBma7LhJBMxZXll6BEcvmAKsbmZzUmXkEkWUJb70CzRfAiSHo50GPWhRIer1w2bzUJ+eqw0eYUavujY01gmOWMY1vObRm4tRzlXjx+v7X9fUPbCKyLZi9sTPkuILUc+ihk/blsz/lxKyCs9ViBT9ZMXqLGqn9Ty0x+Wp6/DEWJtOq6DgmOrhMzeTlIImhBYXXl9Zws4FyLE5r4iAsRIRNjsk6/IMnaOdfZYgiBLgSr9ILqslKiLwK4bNanbN3B62z+PSIlSaINdq4MIFpnM1C2jXpRSVRiJamW/DpqbjWurCHmXE2nDN4WUsUHfNfT7Xb4lBj2e3b7AwnHsNuzGwZicswSiUS8wxYOdCwvXVbIxXE5pkTGHJtfOjnhJbnUGesMuJqqnvL1qAqjLiVQlU/122pBULvYY4yReZ4X01h7zr9GuWTu+tfA9jyXzXpKWpbrKbNVy048ZfraMo/nkTiX71t/b9me9viWgdpGkD2HNfy1TNgXSTUv1T/rOMVanzR1e/ZpZs85G+iZ55nTcUSAOB8KCKxtZEzS2RqOKw/QbBcFe4kpbRRva6IwHaTrJSr1UH/MDRsh59eowChnJcZESpl5ikzTDChOlOCbQi+3LnVQWBGjRPJi1hIxk5e5sui3M2+x5pdr97cfkXOGZDEBLt8rYKQq+AYsGeNhQMDqVRGVtTVlA3ae2mY1R67wtz22/lLAsp6Xe8v6XDbVcfbb+uznjbHuWz2OFqZsAZD1/C9ryOcxPTkzPhzZXV9xc3VNv98hMoM7ARF0Is5HpvHIPD0Q55E0T+QYkXkGILlIdAZM5mliHmOh346kaE53GjNyEELw7LqBobcsqn3oSTsDBj4MXN+MVmGajWrVTI4j5DLgpWT7fM/tmPF+Ijth9o5cZk/B20uvMRMBshrz2Xl88ITeE3rL1ZOJHMcjPs/2e2eD5v4wIENPQLj94Qf84YrTODI5z3B1xTxP3N+951LiFmUAACAASURBVHS8Q1AGn+gkM84z97+85+505BSVU1KmXGYH0HS9l5F2NePzl89mF3VmfHV1xXfffcc0TVxfXy85QirAAeu8wzAA8ObNG3788ccl6muapsUsdn19vURlVR+Lm5sbvv/++4XpiTHy7t07drvdovTae12yfz9HAX1KiX/KdNa+sNvvS9f41wB0npJfW59bkNI6Le/3e66urpbQ8TY6q363rE8FJO2Co9VfrILvlqWs5TidTvzyyy/knJfr1WUpatnq/eZ5Xo6p1/mXIEvUVp291x9Ks1z022nC26dp4uOHe+Zp5p//6Q/80//7zwCkNNF3gX7ouL65puuHQvQUk8HGSqDtvcUheER0wwbZkYqg2WgXJSOtksp2TLY1jJZ7uJJUMEVdmKkUtczghRSVaTJfv48f77n7+AERePfdnt1+XyLNKOH0zpj7rluYnuxsLR8zyZmrAdkYHyUhSciavmrbtaLYJNqRVx8nMNZrYb8ejxerSYjizG314WqovWJmu4Xdqaw9y3et4/qti1P6eautbMo53C0EYOmHlv2ujXm3968yPufj3mLWrLeXFrCvxEYrLehjKdca4VZ7U64mQZr7l/O/RGM+C/TklDneH9nt97y5vuX6zS1zvOM0jqScUZ2Ypzum8YF5fCBNJ9I0lUopqI+IYAtbxbmEmwNxemA8zjbryuDFcvr4a89+ty8OzAbnsyrXN5E52kvqxZzvUk7MpwdinElx5vTwwDxPEEbezbDrRiLKKErC2i8Xg2ecE3NpsCSC6zyhC4Te0w2O0BvoeRjvkBgQ73Cdp8s9N3KLK5Eh725ueCeOaZ7pr6/5/sfvmcaRn/78Jz6+/9kiFNJEzpH7hyO//zByNx85RTgmGJcB4Qtb8FdKNV1twU51Iq65TJxzXF9f8+OPPy7RW3W/qtnZgSXixnvPu3fvFrbmeDxyOp0Wn56bm5tF4VVQdXt7y48//riwQfU++/1+8blqfTM+9Uxb+UvMTJ/y16m/PeUE+6JMwAvK59ieT/2+/VTQUyOxrq6uuLm5IYTAfr9fwEYLei4BoMr4gimD4/FIjPFRuHYFv9Ucm1LicDhwOBwWX57FPLQBPV3XnT3np+rnpdvVzP0V9FRVdGbIWMxfK/BhCZZQhWmceP/Le47HE//wD7/j//q7/xswv6Cbmyv2hz39MHBza2CSPBf/GmVxAmkVpNT8OAZ6nFPElfpoZ/yuMDWAkkwxZyW7XBgKJZVErcayVFYnM8+ZFDMxKpoNQMWYmUZjjj+8/8if//wnnIP9/kfk3RXOuyV/kDhHCB2h60EhSVr8inxSNJXQ8ZiIWVEi4oSs8eUaU8vyQoKZBWEFPGW7/tH61zgn5MKEOUfJUaRkFVwWwx7agGKRhb1ru+fSgmJ1vyT9FWlu34CVBvYI602kMEtar1+BTj5nXOp11izU5070C4vpWtCjj8bOhd0p71tmBTqWgqCsV1mDjBTENRXyCXm2eYtinwze04UA6pm9Qyk5cpaXJS+RWbrYAxVICJZXJxeHNYBcF35zZemKEkLuvSU2rDl6qGjWB0Jal5hwIqQUmRzEeSLOAU1p6Ty7YYAMsxmUiKxOzapKdm59zeusya+zp0oX5mx25JgjMSWcT4Y8y3muJGjEOXZ7c9YN3nN82DGPO3JOpFlIyeFDRMXZgqT6eNmJl+R5WtDTpuqvHbH6QlRFUcFHBUQVHNUO3TqAVmUHnGXUveQv1PpbtHRuvV9rTvucstn6elyiUT93jUumqUvHtM++vfYls8q3kC9hvF7a12h7/a1D86WIrC8xbVWpwHie57NcLu2ssfXleQoItya1p+71KXnRtpVaj63iaH5u+t364WzAyJoXU/I0TZxGM29N48Q8R7o5Lj4Y1SyEFDW32HuEJXK1aKz1floYH0At6d9qY1oeAlD7OdVzVr3UMgeqdZ3CAvDEl+Pckv05lRQnztflGdwZI1YBoLkxVKdp8/1w2QCZliSIjlySyS6GpxeTLSfGwoG1B7UMjDEYUkPJ1eq6YMYzwFOqy5rh4mPoZvvRnc/uXzebkfPx9So2a2mhs0053wmPwMgnuPbL71Z7r2VsrR1JbWHzzXM8Jc/L0+Mct1d7bvY9h85xCEJyHbtwIGtgiBP57oZT8Hzo7snzzHw6NmBWDfAUpsfQfEl6uNtz++aGvuv4d3/zN/z4ww+WOv67d7x588ZAUPHRUQw9L2iyfDQnM6flWJieI3GeOY0Tv/l3d4zjxKzKSRNRldM08uH+jinO3Ln35I/3zDHSBccwdAxDR+h9QZCJpDMpZnCOcTRFPsee+4cb7h/uCF1Hr4meHSlHnIN+CIjL3Nxe4Z2lTx/HkwEzAfGeOSXLPbRQgvKsAfjXyDzP/OM//uMZ+IHHSktVz0wKp9OJ+/v7M2UG5yDp48ePjON4Fg1Tz3XOcTqdAHh4eGCeZ37++WceHh7OgJCI8Kc//Ynj8XgGINrtdvCv96jShje3oORLQMGnQFMFZFVxVxPMVtE+pXD/JcrW5v6XyNY8WKO2KovTOjBXs1XdXz+tc3Ot2+qj8/79e373u99xPB7PnJ3fvHnDu3fvcM4xDANv3rwh57ywTPWeLUPXms7a5IdPvQvfUrwzpS+2mGBZemJdQX1J6uhs0mdMTJlxZyXFtIBDgL7rUZRpjrx/f8c8J27fviVGxQfBScB1AVAyqRjYWxGcdgge5xTva7TR4rJcgEvNdJxL/ZUdZXLpGsUb58xc/DrHKTOOZv4ahj3X1ztUlYcHJetHUhZwgdDt8UHohgPD7grv6/payZge7wmlzyRHydIs5GLacgLelnSGHGE+ofPES0md5qs4EF9Wkl+pEi0OwUVjL0o7k4unsixskCBkdPGFOgMqZeJf79oCOaklKWyPUJiis/4t59dTWVdWX4uF1pXMdTUtmaykgboGzEqJ/VNbDmphmtBSFsgl99JSBU2RnCtPkuuaaboAn23ZLVLvKzM9zgu31ztuDj1XveMQQCQg7gqRgT5NpPtbjiHwh/4nUpyYxtNZpUECiQWoeEP0CIf9nu+LH8d/89vf8rd/+7dm+nhjfiCVkvaF5tZCt0F50+pq5Vr8eFJiOp1IMTLNM/d3J3OE08QxJaJm3n/8wD/98Q8cj0f+HBP3f/wTOkEXHP2uN9DTefAZFSHrxBzNGVpO1nm7uefu/oa7h490Xb9GNuSMCwZ6fADRa3aDX6Khpmlkyop4T0yZmPNi6q4D2EsOutM08bvf/e4sgWAr7ey3BRSX/Gng3MzTOiu3zs/VLOG953Q68fPPP5NzXqK62uvX83755ZczELE1x7URXes6O+c5XVr2agug2meoz7HNB9M+n4gsJhjn3OKf1CZWbAHgv3TZ1mH7vZVPAaJLpj0RWcLQW+Czzcrcgp5aty0walMf/PLLL/z93/89d3d3i8mssoo3NzeLOe3t27eo6nLdClBblqdlnkIIC3v5OXlx85asyQmrVd97R993ZwEeleGRsiaVFto/ayameAZ6ut78XOY58sv7j0xz5PvjSEwZxNH1wcY6lERq/Fyq4hVEPaIWSu+9Yv7CuviK5Aya1EwNYtetGZ2VtLAT4kxxxdmicWPMjGPmNGZyhsPVgTdvfwDgT3++J2VHTIK4jtDb2o5DAT3OCSnOxDhbJJsLC+gxPV/e9+LOIgXUCQnyDPOJPJ5erC1VSoCPOKTkcFPNprNq2RrQs4AVXcFC6RQNh6bL8StEWKPwDCDZ2FMj2k1dNgyeFD0Ga3BPAzjMdFWZlBXcGKiutz4HTcsaZlLLBm0EFiiu0lSsZTSSIi7PWYGLOI8UXyalROFVcKbVFLeG7ulZfTwtz05OWPPM2DrihpxtBgJD1zH0PTlGuhCoK6Ro00gilcsosy1XB7668rJ9alhr33V2LeeWtb3WysEqICcsP4Qi2e6YnUDuyHVdrMGWggg5Q45EVaZ5ZNd35Bzpu0Dwa46gugBfTf2+MHaiy2BTO62qIdVUPjVzKWqzISdSQkSLicjJUv5q/1ztqS278JzWeZ6o6iM2ZjtLb4FMe94lZd6yPi3QaRVpe69pmpZ9FSBty1CXCNgyJ1tzXAvMKui55GPyOZNHfY4ty7b1U6nPUxVyZSNqcsWWsfqW8iXmrS85t92u9fwl171kWtyakS75/WzNS9tPe716n+2nBZrtc1y61qfq4EvlWwCf1WxTTDkX62c9Ft2aLuwvezcD1SRg72EmJcsT5lz1x1jsTuvMvv6nUMNMLYDV/ChNGbuFRbDoXW30qxTzVOP3odW8rcyx+PEky0tW8YBqHR9thm+Kz/ITOedNZZbXy8Kpzyf/i0GuMiWlfPZw2RZ9zomcIjnOX7XtWhGkLPLaRN3hTIFqySSdK9QpgKjqgeVL1nZYZNPSZ22/VkbVKGeaperglZBZ+3NLs7AhVOpltVbpOaOzLZUuNVA7kFywa610UvuMq3lXFkaoNuVawuIPtkI1tm/AJXlmnp5MHh/QqcfFIyHZiuTXw4APwpATXZo5nU789E+/8ObqQDxNRrXGiKoQOk/XuzJLvmK/s2iO3/72N/z2t79h6Ad++P57bq6vCcHTd93i/CUF4LSVpShaFg2VMtNByyq8OSKa8KL0vSN4W14iEEgoTiNxfsPptEOOJz7eXvPghOvrHcPOTFu+80gQxENXUrsjrsxed4TQ4ZwwzxNZMz44mwGpMqeJrNGoysIA5ayc5pG7h3vuHh4Yp4k5RnvhYRnlRKq9+WWkzpqtaj9tPmqV1vnKxSsr0m6312jNVRUQ1O0aUVNn8Vs/n8qmtNdszXHt7L2Vp5RldYauZXjKfLW951ZJX11dneWTqfXRsjzVIftbyVYZfqmJZvtsbds9xe597rpbNmXL3Gx9e9rvrb9PNZFVhu27777j3//7f8/xeDw7fhiGxXn+7u6ODx8+ACyruVeTWduH2vQIn0rH8K1FWJke52ySVFdW34LExXUGcwz2xQenPm9Kanmx3rxBszLs9oAnZeF4nHj/yz2hDxwOe3Z7u38sk7isyhQTMSZLCjsm8pzp+8C7twcO+x7nHWEIOO+JKfJwPDFN0XKplHHZ/HHqxKqCFOX4MHL38UiMyZieo2Xtz3LPlDwo/OnnBx6OyjQLWQdcd4N4uHtI/P4Pv5RFoS3xa/Cevgu4oTADQuM9Y4kJc56Yxw/M4wPzOPLw4Wem4/HF2tI5x831VWlYtyhvX4BqismWWsq2DuU8zwYay8hv4M3htKwDqZXJKW1en06NgLDW12XCreXJK0cA5d0ti1Qac1Oi2hqWpQIbrUvC5zIO5DK5zLoAS7ujmM8UBbcsw8PKvlQCYLmF5nIPA6LLE5UCOy2AfPnFCBfvrB6zFl+vbP5kSRNf8vo+z5E5Z9J0ROcBH0/42LEbBt7srhj6wJUXDsExTiP//O4P3B4OjPdlMckyoOz6jv3VgA+eN2++4+2b7xiGgb/97W/57W9/Q9/33NzccH19XV5uYUnHVJzSbEZj4MYazVC7VV4qSF7NZqsZJzB0Hg3WAQZn30EUcllM8+GeX95cMzjl+mqgL6Yt3zlzaPaW98f3PeJs/Z5ht7NF/Zwwx4msCT8JUmZOMY4kLeHzDjN7RWWcRu7u77k/PnCaJuaYSLmiWGk608tJSon3798Dj5P+tbNn4Cw5XMvWtH4QTzmCbv1pWiDQKteqcKoCquVqz6/HVgVV11H63Ppc9dyaD6gCmdY5uy1v/fsSa9DeqwU6LdtQGZ8aYt+W4aXlEpD7nDwOHT1PiPZrylDr6FNgpwU8TzkzV9BTTVjff/897TpvFUg75xb28OPHj/z8889Ln6vZlodhWNqo9oV6ndYc+df05bEC1EmEMcQiXMxvZGVlUTCy7LDjQ9fRZWV/uOL21oJKhmEHeHIWjseZ9+/vCX2HqgMxdTDnRMqWIPDhOHKaZlJMHO9OTKeZq6uhsOKB0DnC0OF8R1Z4OGYejrHk1ZktWCVr846YQ7L565z4+MFAT4xKnM3HZZzvuX+wZYP+/POR+5OSkgADvg8Iyv0xMk0Geq72gcPg6frA4TAsKRPrGmFmXTA/npwnpvE908MHpuOJ44efGB8eXqwpvXdcXx8WBa2qeOfogq0WEGNimmzR7XGcyLlk0m6mvAvoAUQTNUN2XWgVLE2kE232120WXx/H6vDuXGXB6rpn5igkrsT/qy7mrboSgpFkllagsoqiq4ksawk0grVPygp4Vn1uUhdhXexlWogVpTBiypnbULmORalZ7H7UWN5dJaZMyp8fZ5+fnFALwEi23AQ5WIptzTjEGrQ4kwXv6bwneb9kVQ4h0HcGKIa+Yxh6hmrTb1ZdXu+n59stv2Z7z7brMYstUTO15ird6UrteSd0waPZItG6ztN1pQw1vft2RiVrA0rdoev6Lu3gaeBZzM+nGZxiSkxxZi4rzOeKdptO+dLSshitWeKp4y75wXzKBNH+/SVK5FNg5Uv3199+7f2ee6/ttVqm5Ckm6SXlqbbc/r01Y36t8m37Qgt+njJfXSrjJRBax4TqGO2cW8LW2+dp14sDFtNpaxKtdXDJT+xTcokNfSm5bMK69L616001Y9wZc1ZVIyUCyhLGTuPMaRzpcubU93jfoQIxzcScCuiZGCvoOY5M44z3wjzb7+LMVCbJTFTzbEtHpJiYRhvfNNt6hzSgJ6vl35lqmHrClt3BluBxs4HZmBTEl0HXIq8gk3KEnMheiFGIvji9nkUKr6uv2/PXyVwixZmUIjlFy+f20lLum3PVBZ5q7tLmOyNlUQ9ZbEYWjVZ0Ykn2KNWUUxCGLkttnMfRqRTHclj8YC1QxkyFWXPN29KQMsVJmXrNqlNZ6/ZR/y/gR+vNlgen2jpr3xSRxU0GWH2BmpZaNz/zTpZ71rb9kvfy+eat6UQaj0wPH5l8YkwjDz4T+846YppxKTM4x+1hz3x1YOgCnbP8Om/e3vD2+9uSlO4NNzfvLAHeviSiUyXNM+PpWBoHo7rEkt85qfk0yiBfabVctutglhJxngvFWisGEIeW1OtB4ND39N6i0t7e3tA5YTd0DPseHxyh9yVqzBBsjhF1ieRtIFBfHHdLCKgPER9ma1wHQcLis5OykhQ+3N3zhz/+mQ93DzycRhsECjZf7PMvrCOds1XPz9v3HGC2jEubD6WdbW5Zgu21nspj0/ri1N9as1CriNpBvu3Y5w6d55/K4tTzgIUZ2MolRV2Pr3XQsg77/X5ZLb7mHAKWXDA1KV5VvN9KLoGJS8c8dc6ldtqC3S+5d+scXCOs2qUntozc5wBX285XV1eIyNlCuKq6sDaVKby7uyPnvCRC7Pt+6e8tk1dZn9bEdUm+tB6+hqwmPfCh1KlrAdCm3dy5Qsmai5P3NSGM/PzTRx7u70vft5m8957TOPPh4z0+BA6HK3aHA6pYNGkFPePEOEc0KXGcyTHx5vaK6/0eL2b+P84Z3wc+3p34pz++5+7uxDwnTseJFKsJg1LnK+Mxz4l5KuavXNYhBMI88zAZ8zBpR3f4Dp+L76RGNCemMRPHB5woOc7EEYa+4+qq53AorDS5AEIDHDFG5mnidH/P8cNH8jSh04hLL5enJ+fMdDoxzTMPDw/ENLMrkcpd19k6i9kYijHDmB22KoY58AqCl0CQvgCFbIkOadYfUzW/0YI2BMwMRjEnlb7hnceLwztPNwz0fWeTgmUCQAMABShLimQtelQXS4vdp/rrGK5ZFiVpskBbAkVrS98VZ3xxeC8LETIeldT4VVUMX41lFUzVPnQ2+VgmNA0g+4w8j+lRJc8jaTwyP3xkcgmfJo5eSZ2ZeZw3W9zgHDe7HfPVwag8sUK9e3vDv/vxe7q+53C44erqDd579vud2TnBIq70nGq2gdTZOjNuXZPGFKGhZFUhlYpIOZUkhdGqrtBe4kqCLefwouz7jqye68Oe25srgoOu8/RDwDkhdIILdi8DAjOSHSlEUnlZYvFZ8pqJ0ROjO3O8ro6DkjNJlQ93D/zxp5+4fxg5nkZipeRcXZ2dL2q8v0Rq5FFrxqr1XRmrrcIHlmiYrTmpvU5rFqqUfLu/KsYtYGrNFtsIqC1zsC1Xu92aAupvbRmfmtlvn6k1tbUKvQKdNttwlQoWvffM8/xNGJ7tM3xu/1MgbwuYLtXTU/49W1antkGtnxqh1ZponirXpeu3C5QOw4CqOcOP47hERN7f3y+A6P7+npQS+/2ew+Gw9OdaztoXKrCtwKdlP/9aUn16bF2tMs6dhSSft5mvS0qU4Aun5uN0dXWwyQpwLBGSMZrzsHOOh4eRX365s3baHeiGXQE95lSccuZhmhnnZBFZJR794WHkh3dv2A8DLgS6mHFd4MPHI3/44wc+3J2YxsjD/cQ8J+MAykzO1nFaCAXq7C6rkrREePmMG6O1u98T9ntASMnG85Qm5oc7Ho4lCismUkjMu47xdCDOc9FFBQ2ytnWcZ8b7I8ePdxAjTNOLgh5VZRpHjqcj79//zDhO3NzeMBx6xAtTVsZkKVimDFMWkgqqbgE9ne8QNxgj4jJOqt9qJuYZsIVTXaFzhLyyRKwgGecBj3OB0O/Z7QbmeWKcJrDUvAuIgmrhcMao5WQ5jrRyOksDsrRi+U0LIKrsCwX0OCf0NZIylNQCOZPmiam8c1JNMguY0qV/VLap3lFZg4CyfpkTM/yq5ITmQ5NjJM0zKVgEi5NC7+Oo0N6JEJyzj/co5mzmvTebsHdnodlazFK5pCyXhY4zydUpttBzHoFq/3OGMysVKOLOy1wpM7XIgsWc5ASn5w6Xy8rFJcqqzrBUm1nggjpZZhFZPV1KZvMEXFYDyw1mVYSUM/OcbJ2tukhbva6s3OBL68ttZFSr9LYmgbq/ZX2qL0V7fHtdq5/1mEtmlUvlaX2Ktn4WW9bnkvJs/X9aILM10219drblvVQHFRBeAou1foAloutbyqW6gE/X+yUAdMk897nrbYHP1odnu77VpetvzUefAqa1nbbbW1PapX3b3y6xXZf607cwa9nNV8a3/eZsX1vOut81fzuc9+ZzKFVB2Pp+qcyM5zJZS1lRN5OK38hcI7ty8bGLZnBxWlb4zhVEZLIkmCMOSvh5Kp+8bCNCcSqw8XKZkMsyutfFSetBy6y+joXlWcU5nHpcCITQIyRERqAEQpT8RM47glik11Kt0lDouqipdVmMlxBdo8u2CXZz+c1Slth36/tT68BXdkwMBKSyzHrKFQzUxypMT6lfofg0qdV+Xb6irvXYJsStDsuVuxFZ2fiUpbTJ0igXDBEry3Jm3Wr2V2nfv1z+vtgCajfSyi7V95DHk5LS678I+Dyb6ZGcSdPEw917NB5J8wHvpOTfEHww0JOmmd47dsEcz5wY+NwPPX1wBO9K1J557Kc4M08X7P1iD2Oo3dbdcM7R9T2hs2RPocyKVMvyCNkQfYqNTTmZbd8BQS1fgBNb6wtgN+y4ur4qnZIl/N8HcF0ZVFJE4lrFORWTTDyR72e8d2aawZTesBPEhbJ4qaH3mOE4znx8ODKOs0VtSV2vpsyGAKkpy19IUkp8+PDhkfJ/6ru2R3Uq3TpUXnLobRULPF6XaetIXFmS1i9ja45qo7paQNOGvrfArC51sXW0ruaQrRmtKmg4Z3pqPbTmtL7vF3NLPbct03Zts5eWLXDZ5gpqgdmlNmmv0co2XcCla1YGJ4SwLP1QWZYaPVXr7lMm0da06dy60nS7DEVbn21fCsHCsne7HTc3N6SUluityjTVe1ezV0ppYaJE5CySsJapfn9LELu0pSsTw2U19fLMSwg7xbxVjnVWT12/YxgiqgEfBhRPRkg4WzddhdOc0eOMuIQbM86fqGzMqt+EUO7XB8uBM3TBfHxOo7HT0wTecXc/cnw4MZ5G5ikxTjNzrJPU2i+hjp+VsapKculLoriS3FDyTI4PVB8U33egnj58j94MtnzG6WeY3iNgiVNlJHSBq9trdgdLcuidgxCIoUOkB/oC/IQcX65dFVtTTCRwuL5lp5luGIgS0AT348wvHx4sSWMWcqr6XcjZ6ihGZRzLe+AsUk1QUhzJaUaAEMyXFgRR2TA9Bi4T2MLgKOEUDQSnSJyNaLB3yRcCwxM6852bxol5MreDRjWytqSCGotUf1m0l6ilw0aWfHqqlgwzBHOoFzHyQptrgqI5slrbKjyuugLQkg7GuRLxBu4L3tHnMz2Fjjrd35Fnj+ZE1w/EmPBe6IIxLnme6UTYBY9zGGgBdn1HCJ7gBUuzncxjPc7MQvNinCtb15grnPeF3RHUOYuyCpZYC+9AszE4JwsvN699m+EgUjI32rW7MhMa+p79/oCII2kmZosekI7VvFWef6FkM0BmnGbGKZvzszdnaDNtdYSuhGmqkHHkDOMUeTiOTFM005YUwLMAvToovNzLWP0etiafz0XthMbZfAt62uzL9TotuNmet51lw6rU6uKj1S+m9eOpi5W24CbGuGSNbq/ZZu2tShlsUcrj8fhIidbn2z5T/dRke9V/pzV/1fJUhd3W6UsrzEssRQWDl467VPct2NmaoC75WLXHtsBjt9udAZ+6cOx2nayttKAH1qVN2rQH2/7a9qUW9BwOh2XtrerT04Ke6hAdY1x+A8sN1bbdpTJ+CzEw89h/TmQd6JfjjAbBiS/5eCCEnn7Yk9XhQoeKsfAZNfMJAjGTZMam0/OiVFzhZcStCWG9g6GkEOlLqolxnFEgCqgo98eZ8TgyjzPTnJhK4sHzNjcWqkxlbXJ3xmAZw++XnDozmjIqjuD3dMHcKPqDJ/gbNE0cf46M6QEkcToeSXOk7zvC4Ol3wZ7JCUIg+ICTAHQliaNwwc3vq4kqxGhJaHeHwSbn3pHEkzIcp8iH+wemacaJx0mNkhVyqi2ihdESvMNWl0fRbD6rIjAYO3BmZrJzWUxSGSFpAT2T+fIYsChMFFKsHIIPgWHorT/lhjlvDWlPQwAAIABJREFU+qcxQ+t6Yivb0gAfKceUvERaGBsD8X4B9ueAh+V+OZvTfQ0qMnDQwC6p1pric/sS0Vtk85kxk1YmzjNxjuYJXpaVABuo6ssZKAkx62y4vZ4a53Y2yKieIf9lNqDrjKvSgCxe25UCrYOER5wvs0VdKLF2zDqjur23xepCgpxIAmjGkhTay5jrcr4NVWmJ1Y2W1GTriaVklG5Kq5NVdeDLxaZd6cWVEDyrlPXzQnSP6rpY6Bb0XDJvbGe8W8XZmoKeAk9VabVKFM6dmuunAo4tW1ITArZsRTtD364JVstZ/WxquSqLdCkXTd23zd3Sgpn2mVql1P7e1tVfEgL+l0rLirTbTwGeKtv2uvR7y3K1q6O3y0hs+8ql+18ybbXm0i3jdun5qji3Lg+yXfutHr9tsxaEP1WH23Z/KQC0MCMtGyIV4FQzV1uP9duVMdKitKY5Ms0WPl4nVc75wprbmCfF/FPDkgUI3i3sed+v7XjYGyO23xvo9yGQtASN5LJCek6LG4IWPxBDaq4Mdm4d0+qSBGrP4NCyZpYxAvW5rF2U4Cl50CojK5ZduSrNRqlWrqE2Z2t27vqeftjZeo3TiRoCDl8/dD1n5TSO+C7QB4eKhyyot2SLSS1XT5wj3pcJfXFJPltWolEFVddp1R+NqWwBIy0TihTTGGUJCzOlmZ9tXtIKWKKf8i4UFxFZFvpepbqKLIxM3S56+8zIJatryZnJWpuxs7I1hUQQTGcu/Zr1+jT3BF3fCQo4fOL9beWZ0VtKmiOTKnd6xHuz/0q3o5tmRBOilpDq/v4e78UyHjthV/xj+l1XWB4rpC4DmyMnt7zUVXwJfa80r2YlSzFlVGRaXmLvHEPXEYJDRej6k9mrmcjjRMxaMMs643DOZn/DsOfm9g39bs9pHtHTiZwToXP0vZU3ckKjJRrsuoFhf4WqMM0P5n0ulmvh+HDCh4DzPeI6FJjmxJwy45RsFhQzc6xOfZuGUngpsFOlzdPTtu/W2Xj7e/vdKorWQbXteC0TUhMSttIqxHp8LUd1at6aM9pkdpU9qkzPJXNUNV3U81tQVYFUHRRb80YtRzWvtJmXa7nbEOqqaOv9WwD30qCnBQktkGgVcwtmP6XgL8klcNT661xdXS0rp799+5abm5uSYX23tNHWJLktwxbstICxNVtWIHWJXVRV9vs93333HarK27dvub29Xcyc9ZjadpW52+/3eO85Ho+PgNg3F2FZ7Lj6Fopb02c4qeat8g56W8onqTInSEn5cHfkD3/8iePxxN3DCfA439MHTydFsYaAuA40E6eJOE0EL7y9OXB72BOC52q/Yxh6QtdxfXvDsN8TusD+5op+N3AaJ06/fOB0mjmdJqbjyHQqucfmSE65GRNWIEcxlIiEAuIK+44iXcaHjDjY7Xfs9x7nwfdm3jJFaFPpNGMTzJjMJBYs754Xc/A2k6DgCAgOh/LD3/wN11dXTOPIx1+uOJ1KcsJ/+uWrN+U8z/zj7/6Z4bDn+t0b+t2A63u8H8AHxlm5fzgyHk/sh4F+7wkORDwSKqPhoDg1t+hHEXIxY82xmqhASt4bYXX9ELV1uwTFJSWlcWHTXJ5BE30f2O87+qEv71q/mkyrL1IDYNroKjQjJeqM1mpRZ/YOcjI/YGNGdsv7H0pASE0lkAsgE+8R70G1pHYx1qdon0VNLtagJgfQp+T50VspMeeIphmRRMYRdg90MaNpRuOEZgtX9OLou2AvV1c6d7fOWgyZZ8v22PiBtLOZOjjV/aoGa2O0qICsjpACLvkyewmEriNn8F2Pj2UxT6rjlq6NUp39nKfvdxyurgn9DKfAlJSYoyXf6r3NQmIiOfMDCaFn6PdkFZybSpihOSiP44RPma6fbSX10innknZ9jkqM5iho/URe0pJ1UXK2hUFb5dOaF9p6t6a3AtakbnDu1FzNSC0oqedVJea9X0BJC65ahqSN2toCEHjMLtS1nGr48nbleO894zieKc9WKqCp5o3j8biYyaoYKB7OEjTWc9sFK1vQU0VVzxill5TnMBFPMXnb7fb47d+1j9Qw/uvr6yW5aF0HqzUtbn26LpWhNW21TE8FPTVibssuttt933N7e4uIcH19zeFweMQm1rarfWi32wE8ar+n6uOlZennTpo+34CeOtlwq7NyigYC55i5fxj56ZcPHI8nHo5TmcEHXDfgwmBjjg/gzEUh54TMivPC9WHHD+9u6EPg9ubA1X4g9D03796yv75CnUO7HnxA7x7Iv3y0nDuTreg+j7amV44Rzbmw9iWgWhvQIx4ksbIC9uyeBM4mFn0IXB3Uito7/BAAZ2Nnqus31XfSlL3VD6zRb47ggpn/RHDffcfV1RXj6YR0nu4FkxPGOfKHP/7E4eYa7Qd2KgQC/c7jfGBKMJ4mTg9HAoIMOyu3OEtMWRZ8qj46ltpEK5lVQI8SU2Woq8mpsh4eX8Hmwshk5jmCJhxKkIQTLS4kHaEbrL1czSVULBxFT9WAnjIFLo7T+cyshlZfHbupYoFIKUW7jGqJpMZy+nUdOVmkmCYrZ3AOX1Ld6Pz/UffmTY7kSJbnT3GYkX5FRmRWVW+vrMh8/+/UMz2900d1ZVXG4U7aAUD3DwWMcAt6HNXpmb0IoTjDnTTaRejD06dPK3u43Tv1OymCNDZ6q5n/8vhO0FODlRTQjEhmXVbO56n2TUkGerRal4tuzbrEufqzTaAXcNO+BN2U/flr2klvQRg6uqs71u31LZh+PuHadZHtX3utD5GIENbVLohW8ZwLdReC0ZOqNT9upfLiPCHaqbQA+NyBVhUWNZvxnHKlEyul2O7euk8traVs9/mrjT0j8CUdw7XKrB6k9oHppQDab79nUfrX7TU0/b7umaZr+3XtGNrnvFSRtt+/PZPVgvu+G3jfMbxnIfp97vfvtceXzvWX0ldf2sa15z1w6T159udkX7HV3nvt+ZfSaD2IbanKJnJ+aZ8aiN3vwx5A79Nb/Wv6z+3B0m8xrmne9gtCgZpWt4XEPCfO82IWGutK6+NkWktra1HrmuvxW3qgOGNFircg49xlWlaUrIpTpcEKuMy3CpteMmeb41IzXe21WBufLXViM0O99isqwENAnbnguyC4YF5AznvEu+ozawUtpTpHF61pNFewxqyeIQ7dPeiqXsaDV0KMiJpEYjgcX7l4y65PTivLZI1NIw45rJZ2Klh7pGEk+FBTUGqtizSDlAp8/OV891egLZhrXBOkMj12Yrd5FgNQxtC0v1YdTAVOpVjpfC6KSEGx3pOlnqtS39PAilago89SE/Zc6nXa+IUay33/fRP7fFezOUUEzR7NHsTE58G7ugDu5v/6iVsuTTpA9w3fz+8CPQVlSQkTl02gifOU+fS01NxwvVDYhRziiIsOCQ432M2Hp+YOxXwDqttmkQsIcXVVo86B84gz8RrO1/dcDu75/VqdK8Xe4+NIrGV9MUS06jq8tIqIhmY9YThwK96+vDgepxlwDHHgcDzUlZSgU7H0GpG1GBiLhyMPYsDo5jBwHCM+eO7u7rm5vWNNmafTLzw9nXh6OjHNk5nwleZk3Y7kMokrkJ8BwV93tJVuz+7s/94DmmtNQXvg0gcQ+Ly1wXZcL6R8+lX4HtzsUyt77dCexXkpxfaSwHp/3O3vzYSwPW/pmrdv3/LmzZuN3WhMQg96rrFnrzna5/THcA04Pgue3Xv7n/vn/eivbzsfZjR6z8ODmY62VFcDQ9cC95f2oz3fa3rade5Ba39vtPQVsInR+95se6uBHiC1FhUtbdYzjvtz9NrAp7GP7Tvag7kL8LHrsKyJaT6Tc+HT4xO/fPxIWhPn0xnRzODh7hBJb26sx9WamZcZ1FxxfbA5wA+OSCD6QPACWDrhvK4kIKoitSJWasGGc8JcMlNKnJeV0zzz+HTidDpvKRA7aXAJio3tAW1tq0WQEJEYEe84hEi4G4nRMdwfGe5ucN6zElixNM6aJtZpJi9nluVEzhNehNubB3788ZY4RO7u7zkea3WlRgv6oeBdsKbYKeEG86p5tWupiksry+MTf1lXcJ7Dmx94yIF4OFISvHl4C7f3BlZKJq+ZUhKlWDVda7JqbEZjXSxa2PexpvKaX1PtP4lWB+412WskbqHX13SpaiFrIRfBJTjNicJSRewmOJ7WzJKVtQjBGfvmRLbKr03JIxfA4+oi3jVixjniEDkcxtqVIdg+iHA4HCxrUzKLdyzeQNUQHNG7ml1Y0JysGEhkM/N9Lppu6dMvj+9Ob6XWmXZZjNlhpXw6b4ALZ5PCzc0tD28i0VtuUoK/dBffuoz7jsEx5kSq1mbz2pEGdCpbtDE5DWHaybUfjTEygOR9wIdCCIngPdmZG2XzBjKWpzqUBsfoI0WV87zgvTXr82EkxhucE6aYUDdTNJMlkNT6yPth5CYOOBEOQ+AwWGnn8XDkcDjiFmsiN02zVSQtKyllipadfKdBVy6246842mp5rwfpA0kfuHuQsgcoLXXVUkr7YNve0362QPalQLsHOy9to3/9NfamB0l7YHBttO2YwdutiThvbrYgf3t7uzUdbUaFLUD1Gqd27n6L6i3gRRC5P65rx/6t7BxcWIjGfpm41crTY4xbtdRebL6/r66NnvHpj2cPPvp9aNvs02itueie5envuf1xtLRtXzX2e6W29uLq5+fy4kOWcuF0nljXxPsPH/j5r381U8y68vUODmPg/sYqbOXpTJot5R4IBF+FqNHhCWYl4uvKXwtLSiQgOSGmhORk5cZa8KqsmlmSPeZl5TzPnM+TffY2f2xHthHbYIGq0Cq5QELAqaIh4A9HwuAJRzMnFOdYkzP/IApLXpjXE2WZSGmi5AVC4HAYeHi4J8TA4eaGYRxt+yWAWvWPc2aKF3PGx+HZHPIq17Nk1mnl6emJVJTbVXHHtwxJGL1wd3OHF2Gdzsynx2rAmFlrKw7vL3YP+GA6FxHwRhqISG2bVEN+EVBnxn/ZmDdBwDu0mlz6hkaAXKziec0wLQWVjHOFoFZ1vaRMKpAV05LVJtsFRdOl2/0uDBsBI2yWC9YQtjLB/gJ6hmguzSVnpJguWATG4BiCY1kF/0hNlZrFi8qWxLuc528kCb5PyIz1jdJWlZRLNbyymn/nDTxspn6WWDUxVKUZ1VGdhzGWpQIZyyH2dG5dKXOZ6IoqtTDdvtTPJs4rk2jdDs9Wlu1gahqpqNGsdX+dqn02l5tC68udD4zjgVwKPsQt/aRqguzWZbZCW5q3hHNmvlCyVTY0fGpdZOrBNL5OdetF8rpOPZdeUV9iZdroA2Z7fR/Mr23jS8HtGqDZf+a1lMfX2KD9//fP90G4ZwH617TA2YS4h8PhGdvTt1bYN83sg1Q7v7918Px7P+9r6a2eCdtXR+1TWl9jRPaA5No1/FK6qzE/L71uz3i9lALdX7t+ey8B8dceX2LGtCgpJ1RNKLumxJoSCLUs3zQhXoT6IvIUSF7IOZCzTfv+4PGjVVUl8SSvBOeIUWyd6UCcreCVQsoL8zrj8krWjAuBZT6T02I9rEquDENLQ2xP2ebnzy5TvdZaaiGMoimRl5mknvmsnH1CnGPO9ihFWaYTeZko6wyacE43HY+Ju1sFkgEA2RbEbEyJStWVqH5TsPx7hgOOQXBZWHMVEpdMnhdWN+OjJ0uEWs3lXITgUDK+Gv167/HNpwozmNxiYLPGUQM0xjvU1g1b2rG6YmvCC5SKRpwTzNnZrp0iTHWB7rwjVLnCPC+cp5llmRmiB81474z8qPvy7Fpu/7UlvaWxLt/plhrb6zsVpVXmCZfeXJdc6iUdqhtbcEmcPk+zvTy+L71VCudpRsuKrgmKfdnmeSHnwnAcubm7w3uH+oAMI26IuGHEj5YmUJEa1GVLLSGNwmtCvcsEpNQKMSeE4qCZVjn/DOO1x7aQaCDGV2qwpwhrjtJEf8la2PuAj7YvJgxs6QkhJ0W9cDjccjjeoQrTsjLNq7mWVntz5xxDjOAC+IALA3EYrIKsFNIykdcFp4XoLJe6iZm5AJ+LHO31RqPQe1O3Zsffsz9tom0r6msd19v24LlnS1t9t5TP3sCwByBwCWb9qv5aSfl+vJR6uMYs9OmnfQqq/X4cjYJ9+/Ytf/zjHwkhPDO5+8Mf/sDbt28Zx5H7+3tubm42oNMDw30Q/+86vje91e6HcRy3c9L8eFq66Eupqy99/l5zs7+eqiYMb/dfz1Beqwbbp0L3Kdp2jzY90jUd0rV9fW3wY/Ph9fTWnBaenqw7+Xma+HQ6WWBzjnc//oAAhxg4DgNo4ZebyN+CzVHnW8f5wZuWIkYkDgZ6UthSiIOPxOoFU5yikskoj6f3nNZHO/56rqdp5fz0RF4W8jqh2SqBzHm+zvNczueWfjCyvtbgKE4TPk+ICvm0cJJHvBemD8rHaOd8LcJanIW5YqXN5ISkJ4YI4wDj4BlGK6f3zZNHBIo3YFDFH+qMdVJvwujXuprRC/94FzktiUET06roOjP98jf4dCLf3OAf3hBiwBEJo2nRrGF2nZM8eG/3/tP5zPnpZMCkEgXU8yhi9+UQQm2kXVjmmWWeAcGxIJi2y2xY6jUutXjFOx6nZauIjtHuwWk68eGXv7EsMzF6bg7GCMYYOIzRemhtGqNaXm88AgFXy+fNXLg1Gs3ryimXCy1URwiB4A6gitSqMtRsZKzqS+zcVOuYxmI0cfe3zLXfXbK+pAQ5QbKdWZaV89m+gOqE8VbtFhKH+IALERciPsatbf1mxCcOkTbJuGegZ1vVYG0bHBd3VkOo9RzvkkPPgY+7gB+3Q/sN+BRzUG6ePtKBL6l0bKlmhuPhwOF4BBH04yPn5ROFXLumJ7wz34WmK2o28N4nUK3leCuiheAq4Kk3ifZHoD2SfZ3RMzYtHSUi28TXrncbfZXW4XDYXrsvGe5ZjhYc+xREAz771Ef7vJ7d2T/a6FmlawH7Jdbn2qq5jV7n0Y7z5uaGh4cHhmHg7u6O4/G4VQa1SqWWytlvd39s/93Ht6S34HnqpVVS9Q/37Lt2ASr9z/b7a/twTTOz/38PhNvrrzWYba/ZV/TtQdW+FP53H9t99Hk7DQN75v2yLKuVjE8zuRSOx5G7O9NS3R4G7g8HY1DSRD5/ZF0c46gcl7omDrY4UyBnT8pVqqpi8kzAFmH2b51XyvL8nliXwjKtlFTQvNTAVBnrclEruhqQLU5dlvCt15KQccXKrMuyMKM4UdAVWFCUXIRUg6n1bnI4CkOZCUEJQQjBEYKZ1Trf0oAVXolD0NqRXC3j4PwV9unXG0Hg7cERxbFM4ERZ0srT4xPZLTiF5XCHEoghEIehyi8UqwFXnDcWS7XwNM8szXm+3ScKdsGsck0OI14G02KuK+uyVNrN09qBtIKiRgAo9j0618W7c0KM1u/yfHri/YdPzPPEED3T0UDPzc0BH+7BNei6UTJGQdRrVZDNKbk1QM05U1IDdb4SIBCdw3uzUchrRpPFTnsUaG2uME8nVa1MV2Nnf2WmR6GKb6v5UUVa4jzeQ4wDh8OR4XBgPB4ZxpE4DKZMH8YL09NOfAU9hoEa0GgUWF0lCNBESu2L3z3aflmKqTUgs18K8uy1zocNDTft0NavRi5gSKSVgvpNYQ6GjkMIIHXCj5HkHDllkLWCObcJtM3mu9RAnzf6N3jHOESbjNfEksuzS9Uu3Y4s/FXHNX3LHiVfAxQpWVnq9oW5wsT0k3TPJPVBqlXgtNf3KYW+fL3X4vTVXj0DtQ9UfaB7iTm4BoD2gbwJmVuaqz3vO4bv+0r1jFd/bn/vsU8JXksdtr+99LMdX1+t1c7V3oun3961c97/v4398z1A+tr+vpSSuvb6/vm1e+Fr1+012TuhrcTrAtB9fp/adwqbT50BocMhWuNm7xi8Q2pjSu8gBmerfBfx4QJ6JFh+JFfTVJs1HVJdm7N4tPIxicImW61pjXlK5FnRtFpljlhKB3FWEr/Ns58L2pVc2QwDOE5XpCi6JFJa6/J0RSRhQc6KUBBznxbxVdVRNgbBPI3k2TmjNqQWrSZ9YkwVtfVCk1u8zlB8WQklEckkMkUzLpskpCwr03lmTUoIYWsG65xaf1DRalpo21IcYRjQUsw5u92nxZihds9oBQotntpLyraAF5WaJmqgp8pTaqm668TqPgTiMKJACJV0qKCcJmGxD6r378UYcuutWa/J5iaufTrqQl30sTytK2kxV/5SrAXGsyypdgwPWrXbvzboUWVZzYTQlWwqcRHiaBTp3Zs3/PSnf+BwvOF4e8vtmwdCjIRoDpiN6alnqHs8O4puAtsoELOtDvHSv8dLRfK1umhzQc61oZ4heYfH15LACwVUg6s3CtSo2suF8M4xjoNNPLXEU7VsfYWc8xvAWVNCC5znxX7nIhJG6/OSbUU2TTPLPJGWCfLK3WHgpx/umNfEL59O5GnZdq0B1+ek368/SimcTqcNmPQlwHvmpRcdr+vKNE3PJuA9/d6Eoc0DZRiG7b12Az8XTfdB5hrL0Lpotz5c7TNbpVBjkto4n898/PhxS9W1fb8WzERk0+l473nz5g1v3rxhHEf+9Kc/8Yc//IEYI3d3d9tntYqt5k+zZ3r6Y/s9U1tfAgh7tqR/fo1dU714NzV26+7ujjdv3hBj3ADh1wDDNebiGtjp76096LkGXJ55fHXb2h/jtf/3+7MvE/9dhtReZlK/U12wEIFhiNzd3ZJzqbpJX78PjhgNeOTlTJpOlJKIAW7vBkr2iBvAFWyqCrjgLwGsiWIlXtJCEkE8SnVb1tYmxs7hp49n/nf6C+9LgdUTvWdxroKr4yZhcM6qp5y7zNuqCyVPKAVJT7j0iJbEOn9iOn9EiwlqnTMGYzyYjk6cw+Vo+y8QvBC8tWjwvgraq6Gt1KaPl7BqgAkUiolxKa8nZBbNxPkD46LclIxHcRnm5EADS4Kn2VF8tHg0WEYkDI5htBZOoV5XceDEc3P/BsEKoT1i5fc5YS0b7Hh637vgTAeVciKtltm46EYhY3EnDgPjzcAwjjgvDNGbV5D3iBdSWmvH9Rm04OKAjxEfA3iHhAqGqOk2Ba+KK5aK86G2aPKenC1FaTFTaOlOVasazCnx9PTI+fFjbdC92PFjO6tq7y1avYu0NmD9tdtQqBrTI2pNPJsnYjPHOhwO3N0/cHN7x3A8cLi5xYdAiAOxgZ7GhtgtQTvzWhmkbYV/hYp2myDaQE9D7EbPVu+ONqFvQa6K2nzA+Xw5ENiE1E3rQ/dZGztBQ5NamQAL5odkJmAhJZ7iGctZSk3ARrSmutbVzPxyWin1phmiOZ16v/J4nuvkqqgKF8nX644GQvpU0TUvmXY9UkqfBdFev9FYD2ALjr1Wog+kvbYCroOf/X4uy7JpOXqWp4GO/vWNwWrsUL/v10BPOwbvPYfDYQM9LY3VSrL3lVyN7ejLsq99Rtuv33J8je3ogU//u5dATxvt2vZOxu3/fXroa2mql9iVa6Bn/9798V3b5y+xQvvxPUzPbwGEmgZGWgsc5yrosc/2wTMMI6rmOTMeDrV6p/Zl0sL5MZEnsxBxHsYxoMXhBwjRjsNHj4ttPo0VoDiCHw2kiIEXkWZtsVb9B7XFjjI4z89//sDJn4ne1Yotq5INcUBcwLmAr9tzzoIfAlqMJVdNljlYJsgLy/SR5ePPlJw2IOOcMOoNwd3U+TqCmJu+dwbSfBUy92yPBQmx6G6yko0dUXE4da8644oqPk2EDGNd2aaS8Nm0nXP2PC5PJInUdAniHOMhcLgJpq0ZHEPxeCfcHEaOB0uBBa0BXLFrk80MMq2zZR8q02PMed6quZRmcigX0INYetGZJMV7h4/usvB3BqxyWlhm80gSZ47JLpgNtkRLURnosZ8+FyRdHOy996YB0rIxiL0fXWOeSsks88zp9FSZHGrbjO2VqNYeplh7p9w1yv3S+M7eW2YH7Sp1pqp474jBbr44jPgQcSHUVFJvKFhpLOdrf5FGhdU0Sa1s2miutvPthhW5sEQbmOl0N3XfTGdS88q0tJmBHh8qkKqg6LPVZv0yOLl42BjwMdZIlc1fJ5fal8t7xsOBu/t7RITb2zuON7f2xSory7IyLwvzPDPPE+u64IAhBqsC67RGUi9624/XnF5VdWsL0diyayvg/vm1wNL3weorafavaz/3Qa3/fc869cM5x7Isz1bzDaz0brptLMtCjHETZvcB8Bp70ae1bm9veXh4YBzHZ+xOq9zai16/xFj0n/NbBMuvAauXgvm3MD39Oe8beO77WvWv/9I+fM+jf1/v0/MtQPIaU3SNMdqL0PfX7CUG6bWGSM9NbL9FxDxWYrA50gcLhvb6Kuyuxn1FC6X2sHLeo9Wh2AVXPXoCPrSCAyuFNtAT8W6o82yooKfgnVKKOeSKCCXrxnqXmg4THy0dEwZcZS9ErN2FUJmXylgptohVVYbDwDjeImVgCCvRTZRsTaxbO4nb2xvubq1oYFVYtTWu3Jne9bpMaefTfvbLulbW7+T1vpu5FD6dJtYszKuQanFbVAu9KonkEkmkisZN/mHsjZEKa/Koy0jVw5YK3IKKCYVp0b/e0+pQaoFQoMa/YqXpVcOVlHrNLJYXVVxwiCs4yQgFrUJ0SsaLuXWjUpuHX/oTplWguC2V1iS3AlCKic6laYns/cE5fCMVtHppq6JJN4dwpJc6aO1oYcixRn5oaa3uNVt66IXx3UzPmgtOM05NPR+GgZv7e4Y4cHv/YN4IhyN+GC425+IxKXJ1PR4O9UtqN6Wqss4LSRejT7N5RMDzfCCb8aDUL6yrK5BEScV6cS2r3RSNbXDWZTiOB5wPFQkbIm5C40v+147Te89Y0zN2Y7bjLzw+PVaA1Ep04e27d9y/eYc4x/39D9zd31NS5pef/4OP73/h9PTIh/fv+fjLL5R6A93fHGqvsBPBzRWxNrCntYxSvnYn45wVAAAgAElEQVT9/u6hqlt7hl6Tci1dBRemrW8C2liU9pqXAFRfut2nwNpon+m9f5YmaZ87z7Otarv3GMC83UwC9+Xij4+P23v7VM6+gamqbumr4/HIn/70J/7H//gfHA4H3r17x9u3b7eUTuux1bswN31Pv1/789z//L1Gf333lWX9Pu7PDzy/Pjc3N7x9+3ZrN9HOfa+t+hrA2+/L/trt9/MaCNqf12ufuQcxL4GlXsjcANy163UNgL3q6BZiiGw6H+8cQ7yYRDovW2o/pbk2hF4qM5MQJ8RhAJQwBPxQO5UPnhgvoMdvoOdgYtKqpawF0GhJQCZnZZ4LKRVimCnFsayQSsDFG8Kx4P1AGI+VMfIIBnpwJua1gFbArwiOt7cP/PHhhugK58dbnj7eUHIi+NoZ3Tnub4/c3R4pqvz1/Ud++fAJuLjgxxCIwRYkEszBuSUVBBDL6pl2SaFVE/GKkp4lZf7l518wrxbTOOFHbiLgAkcHY4hkl1jFs8hKEetDmRdvYGh15Nlbw24XN9YsSmCQgAOiV6I1PWD0jugGnIMh2vlWhXhTrDGswlQwAFayWQ6UxBAcwWccM1IU1urC7IQhGnmwOo8WT/EgKMs0k2RBvLWbQmTreyYCoaargjdGzcea4nImklY1G5yczcF7yoU5zbVdhRJisMxA7eMGFbC3E1xqo1uFlArpG+bZ7xYyl2YQpMXIKecYRgsIw2HER2tOJ1WR3cz/qHSaiDc2qK3yAVSNjnPOBOvSlWu3ia7bDnLx/imlUBLkynWlWn0UqtgSMXrYB/sSl1xwBVTKJoputHHNKppg2YduwqFSgcqyWPlfHEZiMDYr+gH8gPOeu7sHjjd3pHXh/V8d07IwzRPTNDGdrcfLMAR8CKRstukm7OouVqVBfwump03ebSLvV+17JqZ/754F6IXGLzE9+yDWf0777L6HVx9kmmNuv83G8uwD7+l02szm+q7r+/1qz51z2+fe39/z9u3bLc11f39vILjrvdUzPA38vDS+lZF4zXENNOzHNYbnJaanpRTbOflSGmq/nWv7dA307J9f2/5LQGf/eXuw8tK+XANZ37qNX3s8+4ztMyvD7ZwBgW5+AqVQGfOSTOOhxvbYItHChGkwzMsnxAvosQViBT1hMKaHy5rrkgJy5FQqK2+siqqVpysO8QM+gvMRH0acC5jypC1klNqsCRH7mxM43gTevhPGoJwPynGw1X7wkeDjBnrub4+1g/zK0+lEKWpVXM5dik9a7Om0Ig3cOAWR59VFjtf7juZSeP804cURnZV3BxWGOFjwdgnxK9nBjDWzzuJY1bGmTBFhzY7FmRMxWzMQYXQDg5je6RiFQ8BYMXFEb2SBD6Z3asxLwMBOzkavlJxgFVx2eKc4pwjJol2z8RCPlwHnzcIleEfGEGRO2TifrAYqxVGcievt9jRyxNX7tGlxYzSQCtVrak1ksdc2zycR8L7FoHYXPve409JS9a0p6a8MeuyLZU6bwUecaK3OauKnQFEDHk4EsplNifN1Z3VLZal2Omu96HiaEGnzH9iqF1qWsNJhjc4qLbeniFx+l51RXb5JglsDtZoqu7Rl40KJaaPLGoVWz7a7kGm5vb9+vlR1ve8nbrlUWAzDQBqG2u32UrXm6hd0iBY8zIUz1cnk9SdV24/rK//e8r9naSydeUn79YGgpYf6zudtu3sA1IDDfjT9Tr9/QO0jpFeDUmsO2oOox8fHrUP73rF57xbdA60maL65uXlmRtintK4F6R70/J4A56WA/xLL86XRX+OW/ms6nnEcn1Vs7QHxfvtfAg8vgaT9sVwDPtd0PfvtfOn4+lYTe8H7tbH//NcGP7bWu7DQny08Okhi85altlqK3DUvs7odsI7WDfTE6udii8SAryXe3l2AvG6fBY02SaLMa2aaE+dpZVoKy6KsCdBm7Oo+m1fNmqMAVo0VXCaYBpZh9ByOkTGCY8U7Y+N97X3onOMwDoRhgJxwoYpsa0WtdeoOhFjdi721MVJ38XORunAVsWd2b5uj8OtdS4EwkFXRqm/xRciccC6gsUDxeBcZfEQ8FOdYnSNooCCsQFCTcWSKAQ4zcQGXQcWyI/WeUec21+KiYilJYC2QVMhAVuuAAJbOxAmDh2FQDr4yvGrKH+etu71zbgMWqWp6tUWrYp5J1PSrbppbCE4pDnJtVi31Xi3ZClNaL0ot1pA0p3WLPcMwbDIFrfdRUTsH9r2tLJHWKq7XYHpSAR894zESg+Pm7p67hwfG8UAcR3NsXhZ8KdY11je/lohi/TqWdak3nmwbXtdk3dBVaW0hwABC+9Ijtaeu6qVzeinW0bcClTWZMVZAiENF8eIs1+wCSrLqBc2oGOBpF0GrSVOuan7BwJeriLTduG0iSMmcQv1wsMmkBXzv8Oo5Hg483N/hKBwOIz4EBCWESAieQeHu9paMdQJP60ragv5rT6jyLIg3kLMX/V5LS+3TOSLCzc0NNzc3mxi4ZwD27RF6ZqeBrBaETl3H4z3j0HQ0bdulFD58+PBZ8Dqfzzw+Pj7r0t6DrR4EtGqsh4cHbm9vefv2LT/++KOJ8u/uuLu7u6RKdz4ue9DzUirrtwRCPVjoz3VvKrkHCHvmaz8au9OYsB9++GFL+e2P/Wv71v/s93HPln2Jrbnk+Z/bJeyrD/tj2j8ayEl1Im5C+T04v7a/bR9aNeGrjApUtsXU1qwZTMSZyHXuaivgUkxoTPVwiSGiGvBiwEJECHGwACaOIURi7aPknTN9YZUc1IQWuYIoax7tUKdoWvh4Xnj/4czPH858eMx8OimlOLTYthvoKdX7xwpTBNUVLROQuTkWDiMM0XF/P/L23S2HwVF+OFLWN6Dmju+aHkRtmbquK8NxJA5Wtn88joyjmWPe3B44HEdwQkbIlVxCBC0gCE4KFEGkXetXTFM6jx7vyfPMfH4kLwkvM1EeEYRxvOF4nPEhMo43hOM94gOrCyzB3JMX8UxqnMmEY9ZsVWcOcLY4LzmTxGJaKo6cDPTgheKErHDKwpRtn0o8oCESnGc8RKIXRq88DIVjqM7R1QoAJ2bi6ISshfQJltUW506lpgwzJc+oWjVdDCasZ/CE0ewS5mXmdD6xLsbu+RrXTdxsbYzW+cw0nRDgMESG47h9P1Ur4CmJpEoqhWVJrNlivuWevh43v7vLekP8VrsfNh+ejekxcxpD1sVa17dqqrYasZp792y7m4cA1+n4S4qivqVYDxZVrRTXRcyUs5U5qja+xiHOXncpmy8ti3SZDEsthev2w9WHfWkKWS6C4+aVIFwcfl1tZqpyKd0eYqzuoA5Ln13oWFs9V1F4nXS2C/eKsbIPiHtG5iXn2vbaPUvTC4H37Qjg8zRJ/3P/WXvX5rb93hOnaWrmeWaapi2N1crhW2l8v622nb1mqV2jnulp7sJ9D6mX0lhfS3d8iTn4LcZL17n3MWr72cYecPRMVztX10DgSyxTe/4t42vnaw9Arr3+S8fSfncNAL3UeHf/89Wvp7ABDxGrSKLNh3YEVZLS2J7nBRoXhs5Ks4OzlISIEIYBX/sExjAQfa2+dLXyCmj1PNvKGqofkE2dKsK8Zk7zynlOzEthXZsEwDx5gCo8pTL5dT7eVvgJVbGCpWD6osNh4Dh60AA61H2pTL1SK2CTZRuq+SBAiIFhiMQhEmKo1WHSze/2EEd3boycECc4fUVRjwiESFkTSzbvGQekYikfV+DGDficiOI4DAecQBIhaqHgCGo+RlVJaWlMCkUvNiNKopCM6fHG9BQRpNiNlFVYk7AUDCg5NTDjWtNdx+CVcSgMvlRQYkyPOkdxBqKc89XUtzPVreA2rWYkqF6gVINDp5QYUVe2OZqKCxKWdhzigIugJW1Mj2VLBobBFtiXxRVVsJwp2aq8SjE9r8q3dTH4u3pvFS5uw877miJqyaJuQqlux2acVKCiQteYG603NFrTPX2AdVs+tj0p7e4Fcmm39PMfl35Xvq4SzJBKa72iqd/tork6s0j7gjQPinJpxxAEHOHCDGzl9pemqKrmtqylsIbZVqLVM6FP99BAADWl5qx7t4QDwXs+ffzIdD7XrWOrlFccfauINpk3EPKlHkp7ULoHJb0g9FplT78NYKu+auzZvidL09S07TZwNU0TqsqyLFsKrAWvft/a6HU/PYA6Ho9bE9EGePq+WvttvZR2aX/bsy2/V8rr2qKhMWTXAOlL2+j9lto56ZurvrSdPkX2tVTT/j17MfG3bOelz997EvXAvrE97b67xkrCy/q21xi2iJKLz0z9HarVXK+Ck62GVyorffHycZiHjahV2G6gJ5ie8JLSakyPCVAvo7JmXOb3RTOpFOYVHk+J9x9nHk+JlAUVD9sSUDcfGEtHKKoZ666ecC4hZI5j5O3DwOHgebg7cBgHhsGhWaqxc+2DKAZ6HMZ8qMIwDoyHARCONwdubo6Moy1SNtDT4oJada+WxhaVugAWHK/bF88gqbFOBvVqZZbast+nwjRNZJcoGaR4Iw/igA7WlT24wOhjdfrPIIUiQiaTnZX/e00E2zpRPbGmGV1pZIFAdpAEvKKraYNyKcyayB6SK/g5s3rT9gRnQm/1DkJEnbCkgvMDPpqBb3ABQSgp1xioBA+h+hbG2EwOrQipqBgAy7m2LIHztFicLplpqkU9oiyL/T2tySqy5YIDcqam2rhcZ3SLsV8a321OOKeVUaNVYY2WsrHcYa04Uq0+PhlNgrqChmAGUAKCAYmWY9YN+FycGqWbTA1EdQGxJLQoa21y55xjGKu+ACshDyFu1QjOeYqoAayKCHNR1lxsdePbZ+UtR5jWlXmeyLlwEBMt2yThCa5WdOGMrq0M0DydDQCVYn48tqzhMA6s82AN4xotb98EnAv88MMDLox8/PiRTx8/cD494tRWIUVfb3Jt6a0eCOy1PP1E37MlPVhq721MSQMovRi5D7x9mqwFzlax1QBMSxu0ANRMIfv0lveep6cnYoxM08T5fKaUsml5+qDZmKk+xdOexxh58+YNP/30E3d3d7x7925rPdGDnmvanT0LsGdOfk99Txs92GmPl9Jv1/ZX5NKSo+8qfy3V9/eMPTDp9+MaUNuf1z3o2v/smcS9fqeBnWVZmKaJeZ6fuZRfc/v+rx7vtwxLQ4XNm0daSqtS7WL0T7dwBBQ8vqpW1NyWGRAxjUUIFgS9b3OeI/iB4KzAw7tWuq2Vha/taGoF7poL03lmWlYez8J//m3h//z5ifNTYkoO3EgTELV9UK0pLs2bv4+XhegXnBR+uD/w//xfD9zdDry9H3i4PRCDkNNKWhdQpZWfA5UFyHgfuL294f7+DhHhzQ9vuGsVxHfWC05FcGoMh6qyOmObKMUEujWl5bQ7h68wFFMwrQgLjqWWmFctN3lOyPkTAWEMJ9bhyXRM45Fwc4P4wDCMHIcDOM8hRI4hmsDZe1bxFRBaVbV3wrEEDurNdRkDoxTBzR5Wa0yKJjQ5CoWlzKgmoitMMTG4gvdW+eWd2L00juC9dVofbhkjxDAwDkcTs+eVsi41tVrwZJNyOCX4qg11kaSOUoR5WljOJ0rOrPPMuswIMMTIEAOgPD6dQZ8oRZmXhJlkGmGxpLLpd1spe03yfvWafH/1VgM2ztEac7a/acfENE6xUa7tYZRtX41giNzuawtOfaC04HKpGrMDbROWmeuFGOvHdgxEEz9vlO0lbdTQ4UUI2I7vc7q71PpGqUCteQzZuTDQVtSMm0ScfWGd21gvY8QurS5Q3c6jl1ryPN6wrguxlthr+ZbM5H9tNADSgxgwYLB3TN6PXpPTb2P/uMYQ9Z/d0lQtmJZirtdNW9FreW5vb7dUUwMjqsrxeATML2LPQOxZjv7v7bNb48w+oF/rI7UX7H4pGO5Znt8zvdX2o/3sH19LA/Wgo3nyXPPl6Y/xJcbna+MaENuzO9fScf05fmkbX9L19N/1fXprzxR+67H8GmO73168z57/yVjuZmDYGPX2XbswPd61cmJLrzdjVtel0wQFqTq4EBAXKGSUlZSFNcFpyjydEvOcrbFoTaXZe22HtLJQtHOOAtmCossMUbi7Gbi/Hbk9WrALXmzRnH0FPZd0ejsyVa2p1lgXXAPHw0gYBgOL3rQdWsFMKSbKbZU/osXOUAOQrzzbtnor0xhJzUwIorb4XlYDhJIyfi0U56BUuYP3eLWeVOJ81VYpRRzOmVEwGOgRLTjFTAvrZ7S4LMUh2VkljiqabOVdNLOuC6WsZCm4sJK8gZ48WOWXhGzSjKCYZUw0YBZH4sHMIskr6r358pBxmhAKnur7Uy1nSj3vKRXmxSQI0+nMdD7hRLi/vbWqLrWWRyUnmmylzTFNCpNVP6vW+pbv53eBHvsCRZwLVaydycEso5v5pZZQaaawpbLaiqXXvEgVGNPaP5R8ERBLS39dQFbJ1lHYPFtKdaDMhGDdgUMI20rIjr6995JbNmLzUrllXc6NSsvazJpM9JVyIWerREupoGQjb6vHhG6rA9MC5WJfpFUxTRNKSQuaV05PTyzLXN1HjabzTuw5lq+VVj9ZH1tTvFcc/eq/D+4tiFwL1j2709iSvuS7D5B7JqQHJE3sHELY/F769BRcglZLQfUuz72nzzRNnE4n7u7umOeZ0+nE+/fvWdd1W8nvvXr6z2p/b8f9Uj+ta4H9a2mi/w7prT3T8xKo3R8v8AwgtmvdXtu/bw9MvsfD5kvAsGds9kzPnvXpf38tpdUDndZOxUxD5y1Fuk9vfen6vxqYbZ/72a978Nr6HTVZgREWKrZK3oMeu5+N9fG+Y32aCwhb9sBceSXUih9PznCeMn/5+Ym/fXzk/fsTT59W1hlycogP+MFvK21FcQV8NuAhJGMWtDAOyt3BE73jOAJlRRPkVavJnQXFEGr8qH4u1OfO5Tp/HLm5vbNy7ZtbDscba1MUq5N01XigbKXqDfRor+HZMg2vMxQzAkxAFkcRZ3O7SN2vwhwwYa5AFvNxC2UmLgZ6giZiWRHnSDGSYkRFSCGQQ7D0ltRspxPWsiLZmova/RBI6tCsuBJRWh9ID1oIomSxlFSojVybGNl7AW8+e1ogaWHJC1khxsK0WJpLtODVSt2dqGVzcHixxb1T+HROqJhIuaRMIaDi8IdbbsJorUaOR+JhtBh/PpHLTNHCWqzQKWVlzVbIVNjH91dIb4k4hniDd5GSC+ua8N6R1hkwZFeCA+8vvVBcW2k0BqC1faiApwqjNJtxkjY2qK4LSra8Y06Jj58+8fj0VFcAlm/cGkAOg4nluputKAZqKqCxNLGj4CjYSmLNtp2StVKhQsrKslYwFxJhWfFZiSqIVD1E0/YolLSSKzuR54lFDNCs80RaF05Pjzw9PbGsi72lXhdRZ6i43uji7AYDrSWIrxcs90xPz6DsJ/s+uPTBsw+ENzc33N7ePguS7XPao7EozeTucDgQQuDNmzfc3t4+Y1T6z+xTcb2mJ6XE+XzeenO9f/+eZVl4//49f/7zn5mmiU+fPm2/b2mMfoUfQti20QJr3zqjP1/Xnu9/d431+L2Ynv4a78HtNTC2ZzYasG1+SE3A3I6nZ7/6Y75W1fQSWOnByB5w7V9zjZ3aX6OXGB3g2XWfponHx8cNJJ/P588quPpzda3U/1z1d7/2EHh2Hdqx9fvS2OO69qXuMK3LeQM9YIEs+qZlc5sLvHmm2Gu0+xzvBzNyzcr5lDlNhY8fE//0v3/m3/7tL5zOib/9PDE9CUWr+WuQWkpcvYEyuFXQYs1QHSdUV+6Ojj+8HTgMwsONIHkizQuLS0yhNWMeGMdDXShfWMVSStVbDtw/vNmcoe8fHri9vTNpw3DAO18BT2W8KuvhRI1FAYpkWmbgVUGPwlKURYXVeVYfbZHdArZkirceVR4ImmyfU8KXah7rPKGa6BIiEqJd6xCsp0i1HwjejHvVe9ZqNCl+RFwk41G9xSuWAioBsqC15sk7iN5xiJ4hgA+Ow2g2BklgxpELnJfE+6fFGqP6iRBmRDxDcByC9QqLXhhjKwLSyiIqT+vMzx9P5ss0Rm7GAecdx2PgEK3QZxyM8cspM5dfWBPkkjknZVmsr+ZUW0ApVeNld67F92+Imd8HemBLaaleJpFSMpqhBOv0Wnu+b6uHtuLYHu1m66oS7AbRiyC6sTS1Dj/lbC0d5hkwQyrrteI2Nujyr36JtU2MLf3GdoI25mebHNvfpX6u5Q5LVkouiJRagdDDqjbJls17qB2DamFdLFe5zDM5pa2svt/XjRJuKbS63LJU2+sGy30w7MvB+2DWrnP/nhYQm0i7f74XubaJum9E2lJKMcat3L1tsxcM9+xL+9k0JTnnzbSwNQxdlgXnHOfzmRjjszL4nkFqpcqN8emZnmsBbh/c92OfbvnSa19rvMTQXWN09mOfittfuy+lLPf3ykssYdvH/f7ugcse+OxB8EvHvQdJXwJavaanFzLvr93+PLy0n7/2uLblZ3Oo1JJggNaKQAFtpq/9/Nszue27aXHT7M+0/xCzGfHW5yKVwpyU85L59Djx/v0T81xY5kxJlU0JFZhoqWX0de+zXHzRxGqOgnccR3sMQYwBytWBOAUTTismC+jmpnb8ld7avOHEOYbBFr3SUmFdjHEmW67nS01z8syG+dt8q/7eoVhGqYA5LTddp9jsn5zi1JFRfCmk5p6jBVc925w4Ql7teHLCldqDMEekPncl4qpeMRWPZldBjyCeyi4lIJrWR0sFyKV1wOoyMtXCoJIUTQ5iAmLT1ywpIw58coh4cjQPvI2V8+ac7OQikNecKWmx3zvPMDqCeFwYiZ3bvQselRVcxNRBlS0rVAEzW2d4bYweLWbyVbbn+0rWxeHCgDhrOLdS8E5Ylhk1X2pi8GYq5e2nukqlOl/FxbUvl7MvltQyd3HJcnQls0wz6zJRirIuC2tKxvQ8fuJ0PuOccHM8Mo6RrFppr4K4UhGofcFSNO6rgRir2irbJKdaoCSowuuSVlSLpdEWY2+Qs9G1VTsUa0dtas5Y0eqvk+x1HWhbV/PeWeaJEDz397cANBdm5xx5nTmVzHR+Yq2mTBe67rIC+7WH956Hh4eNOenZm/0k0FbLqvoM0PSVaY0JaIGvrZgbYBIRlmXhfD5vgKUxLSJiplU7hql9dp+eacBoz6b0LM3NzQ0//fQTy7Js7sHLsvD09MTHjx9Z15VPnz5tHj8t1dFcnK8BmP8/jZ4R6INGu4a9fqUHOvvUVTvXPVsEF/C4Z456wfG1dhb92LMw7f3fqtVpP1/y5tkfK7BVZDbA2z/6qsG9JUMPhPtz8+qgtn0We8DTxM3S/Y260NTtrQ47f6EDrM3+HwHnFfFaF5613Ka6K69r4Xxe+fOf/8Z//vzI6enE04cndF1xKtyOkdE7kgqn7FhVyKipONTkAJTVOqXrilVsJe4OkXdvDtweI56FvEzMa6GsgXU644Pjx3c/cnd/b/qcTtOTc0ZqkcXxeGP3gQjj4YgPrUFqzSJczo5VSZlbCOBw5RIof4th0KLKKsRRqnGuKtX72ECq01LL5xWP4OsxeJFqlWLaK1dqdWrCqtFELB6nXKUTvnayt15auBXFkyWTZTL91TpBjIBSJKNSyEHw6u2n96RkDtLZOVYfKeKsVF0c5mrtaqpQSBmmpLhcWIuwFK2gR/FSOZhSzLlZIMvKnE3m8XhOHOKMEyy1VueR+TyxLEY8LNmxlur+rblCw8ZpflupehvfrekJwxE0GyjBjLCCV1K0Mu3gLuXLJeeNSg3BnEBDGGr3XQNQLgwUVU7TXFvIJz58/MCnD+9r+uLEPE2UXJiWmXlZiEOEH3/EDQFKYckZn5J5GqwZJJODw8WyaWOal09KmXldWZeFlFbW+WwsTcloFU0t88T5PJFzZl4WTk8nRIzl8sEjSL1p7VTnZE7KdrOyIWerNLNJd4yBn9693W5wgJQTp6cT8zzzeJqMESqFXC6rg9caIQR++umnrYHkPq3VB6s909NSV30gaKkrMNq/9bya5/mZo3J7f9NStNLzvmz9pbTLPn3Rp7oOhwP39/dbCub29hZV5enpiQ8fPrCuKx8+fOCXX35hnmf+9V//lY8fP277+OnTJ5xzW5+vv2fs2ZLfCzD1zF17AM+CfwOl/bndi9rhouHqe1K17fTO2724uYGLXiS8ByV9Sqv/fX//9b/vf7bnfWVWD0L6e6QvRW/7sq7r5vHU7sNWudWAHnyeUtoDnlcFPf1n8bxq8hn42oTJF/EybCS6sfPOW+pD2Fo1ICA+I75qDKM1B80F5qfCecp8/Hjmn/7p3/if//PfyWtiPj2Rl4XgIrd3twR/YFrh5xM8LWYjoiVXgemZUs5IXsyMzy84t/L27pb/+0/33N8dePrwC+9//itpXaowtVTvsoF/+Md/ZBite3x//+Zs3cLFew43N6BcgI6A4jZA09Ja2tpeAEUKpbiLDKKCj9caShUw48jiyWJkXCu9z1KYpCEyRdSqnqxxh4EeU+XUa6lqoAeQlHEs9irxtaJPcM6ax1Zki4iv5yWaxsd5q8YK0aQWTlFn2ZM8h2os6AjRFsMSR9zxDkIkJ3ODturrul0cSxbWtgBx9T4UrcaYW+8Du14ofloIMiMC0QlBmtVCVdyK1F5gzrI+2aElGEFhyUH7rI39a5qer1+T72R6BDOeKtYBVttEUiuXqqjYOXlmlNV2ztWeWa3zuXVcN+AirkIFVdaUmKbJQM/pxHQ+k0thWWvOnULKqZs8u94bNS0l5fJ7Wuqqpp5KrvtdV3y5Az3G0KSuegtyTT05l/Cpdsft02K5doWtoMdL+7sJl0sxRqyBgiZgNvSbSetSv/j5Vdmdfjh3aZbZtDXtOrXj22shgBdFvi291QOllkJobprttTHGLdC0gNq/71qKoT3adkspnwG25p7cAle7Ri3QNdFyq9hqQbyt/luw7pr0oNkAACAASURBVMc11uulv7Xf/Z5M0eeMwHWw0bM8e2aof08PbPegon1e+9lAT58GugZsrqWe+nENOO7B8J4p+tJ7rpWsX3v0Wp7+XO6P77dgebZrw/P77MLyNEDUywYq0OkAkM1b3Wu268lFWlA9gcxzzYzfrLom8fR45sP7TwY08lrTQ8oQPEM0sXNwFv5UrCmBlYu4amFi0gDTLRaiVw6D5zh6Zg9aks1/uZBSxnlPSnnbn55h3O4bMSBg7Y0aaLn48oBBHBH7m9Tnrr73Aqztla89tDH3cjHQa2mZIpBFDXw0PZY28USz8G1VZ3YOfH2NqLJNV6JYp3NLZ0nRejMoSAaaIW8yQCQ1y2HYxbKi6khrBjXdbSnVMkAdfsg45408sA9kW76346t/s/6ZVpzjxO6PdgzbO7em5RUYbX8zyxvnHMfDwDhEY7MKNS13eYj0W23n+uvX8zt7b3VNvlJBSjabcjLeQ8qK89F6b/mB8ZBQcaRkuhxx1mZetQrtnBCDp6hnHA/c3N6ydMGz5MQ8nXj6+MFEctk+j5KYpzPn84E1ZZy3NFcMA+IjWSHkUlcuBs4oGbQwn05MTx+Y58n8eCbzCkCLgZ6arsrLUsEJdQUB4j2lXLqKS30gpj+2NF7vbFoBoipDdOQ8GBCst/08z0xPT8wiBBGiE2rzZBYxsPVawznHw8PDBgD2Pan2AaX97JmDfrRKrB4opQpeT6fTM6ZnXdeNVWmsTEuN9YzBHoC1lXsDxH1q7uHhgWVZttYIbRstddVSaT/88MO2jbad1lbhS0H9e0a/nd8S+LTA3PfE2qd6gGcaph7E9te2Z+X222uMyLMFTWUi2javpauugR943j6i1wT1n9n+vr8vvwaeekDTp7NatVYTXffg7lp1237/Xz21tRvPP3+beVruy546qcavNQ3SWJ8NzLKBHcR8wpy3F5VVSGtmnhN/+Y9f+PnnTzw+npken4j1Ph7GAetb6hEyKU3kJFZeXpwFq2ImrdFl4o25HZdcyMuEppnTJ8d//rvj8f3AMp1Y1wQIh+OROIyEGLl/eCAO5m3mKlADW8halaviVKCWeuRc73HoQBBdQK5VW9vpaudSTdryyt/PthjfPkakufWAc3hxBnpKQUs9Vi5X+SK5UowLKWZSCbWeRmgNuQ30BJwLl+C0zUcZ1QxF0JRBFyr9hTrQIjiCVeSJx7nByIklIVnBB9YinLIZDIoExI0grlZi1ybdzu5Fi/GX45bLFbLGr/X3Tg3ooNSfVQ6RCtGviCihgiOhEDXjaxV0u50N97bY+uXx/Y7MGIbQJUNKeDEq1DlTVztnYlVxkfGwUBTGdWVNCdME1RWVqlGZlf24ub1BBeZp4m/Df6JaSHnl9PiR93/7S10x1IlxHDk93iJhwIdIVuE8J4ZhpEhgTUqMCcUxxgyakTQjmjk/PfL4/q/M04mUVpbpbLbolZmpkNWYmwqWXb15xDvwrZSydWdv/XFsoo/BV7G3bEIuE1gdKJq7M6mcn048vf/AWcwGfnDC6AVBWcTEbq81tYYQePfu3TNdTh8g4RJU9hP/PqDChX5vAbExJ6fTiQ8fPqCqWwAVEc7n8xZUmxNyX4beT/ItBZVzZp5nPnz4wDzPz/blhx9+4PHxkdvb22dgqA+Yh8OBH374wa5TrfpLKX1WldQC5b6C5nvGlxii1xq9y3QDIH3gb0zWPrXUp6l6pqwB2ZZC7EFPO6Y9S9SO/Zre5hqL2P++Pd+nk/rRA51rxoP77TWA14wrz+fzBnrned7SfD0z+DXA+18BxN8yGuNdP+Szv8sljnVsjwmWW1VXAz12LivLLmDaHduI99YDsBRYzgvTvPL0NPEv/+vf+ed//jfWJfH0fmIoSgiOh9uRw8Hm18dTYpoXUvJoGpHszRwvL0jJHGLi4U4YvOP8VPhwfmKdT3z664l/Sb8wROu0fRitYvT29o4f//BHxnHk3Y8/MR5sHrggGGta2QKlF0tfFVWKmmFtYxwa0Hke/zog0TE9fduh1xi2ULNFf9uhjV2raSnnXO0aUIuCVO0SdQzP5S4oqBhAaKSHNNBT9bPORzOVpL4ZW/iXvKKarKJundDkNqYHV0vq16oHwiMMIB51gfx4Qp0n41ixRqjODbhwQMSDj0gYbR+cwwVLoZpo2U58q69qovu2ayVnA2GomSrVONk6aTmBQ7DO8UHgJipjFeA7bawliHzbdfxOR+bK9GjtpVKsB4hqQkRZUjaK0tlFzqXgG8ot3YSilQaroAAxS/QQAjmGy6oELEeclsoU2EpGsyfnZCkuhHVNiFsRcaS0klLcAq8XB5pwaUVKIq8LeZ1J60JO9rzpcSrsr8/bT9ms33WjDOvJULdNOhuVLFJ1TGxqeGgVZG29YRdnDcGo50pjNpDUBGDNMOw1hog86yu1ZwX2k/+e5u9X5XBZTe7TDE0o3G+nf29K6ZnvT68p6YWzrZy4MUfTND3bXoyR8/m8bae5Mvf72zdCba0nGmPUB9n++L81sO1ZjPa71w6Q/edfAwzXUjv9fvaApWc7eiPHa8zbPt3U/30PNvv/Xxs9I3YtnXUNQL706LfXnrefe9PRl4DS/vmeAf0tWJ5v+ZzLd7Jdf7rVr1x+jzx7zfZ9lhbxCjkr65JZ5pXz05nHT4/ktVDWghfZ+ncNwdPS86Uki1FaqFSB6RlF8U4ZI4xBSJNpVcgraclMp0QKwvF4YBwu3/1DbWob67xkOp0+Xsh2fO3AZTtPLdXS0zw9eLzMpZfz8xuktuouPdOgbDvUHnU+3Y6iOck9v/7SHZpIH0loFF5lXfZC7fodsI5dFRQKSmajjBSKc5RsVcNSO7kLSnFKUrHqM/EksZ5gzou5gEup95iZCgqY35IY61O7ej9jelDr4IBeKqQ34qG0NJ/9zgu4moNTB9mL9RWjtb/q4+SvzfSoMq+G5KV2Li8FlmwVUHHJPE2Wn/VxYDxPpFwYjhPzPNkO+kAYBrsE1XHRSg2tAsg74c2bN5x++onpdGI9faLMLQWVrFlZ9EQSsp7tiyQF1ojGgbMUdDkTfECnR6YQEc34siCamaczaX5E1xlSQsqCq3oc0XbD9Supy8Th5IK8nTqjWNsNIka35XRBqradmsyq+p52wRVlOp9J80xeVzRlPDBUWnCNDnGKzN9zhb59eO+5v79/Fiz3q+h9h3I7H5fbq1XrtOft0XxdeiCzT6X0ATqltK26GyBy7uIb9JIGow+oe6Hq09MTwLPS9Ddv3vD4+LgJqdt56JuNtrQbsPn1/NapjL93XHNK7lNZvWalAZx2jHu9Sp/27NNbwFUW7KX0T7s+L7Eo11igayDn2rb39+41gNI/2r3RV2/193j/vv3nXAOyrz5eAIH24zoLZrrACgDa+fFuW6w5b82iwdJCaV1ZlsR//Ptf+fkvv3A6Tfz1P98zPU44cdzf3nD8wVpMFC0sa2FeCo9T4fGspJxY8pmEIw7K/Y3Zxxyj8DA6Bu8hwd/KQl4nsgukZYDi8LeR+/s3jIcDb9/9yI8//sgwjtze3nXMYktdgDhrtAE1aKulRoq/VENpd69fM8gUqT0H1VyNVQW58rpfawhiC2+B4hoorCk7EYujYqLj4quvXDF5sFcDCk5qJZdAa31k50A3GNF0spvkZSuDubyq1EeTa1zQTn26IZbLuZK2wM+WqWlbAakl6IYFcBENswE47yvTY8DyMlUoW3mOlovuN+dKPGD6Xm3VXiY7Cd4RGIlDtOtePOrtHnf1/jDw+wwGvji+C/QULZyXCa9KdAHvPGVVUlrQAm5KxKeJOXpwkTieGNZEHI/c3J1JVXUfh9EmobRaR1VfXSDjkRQj7969g5yYzid0fkTWsxkALifKOhuFpgssT6g41vVMco4cIj5NpHHEO88cBzN10ozXFakUX5rPpt8pBV/L1C8+zXV1umM22nNamk0FV1pppIdaYFgkUVI1GKwNTBVFi5XI23P7Us7niWWayPOCppWAMnpjh7Kotb05vU7A9d7z5s0bgM8m8RYgmgC5HfseGF1LbzXPnJau6vVCvZtzHyRbKqwFyHVdt35bDQT1oGZfEdRe04JZM55r6bVPnz6Rc+bdu3f88Y9/3EBZc3k+Ho9b5VerJmugai/w/t6xZxxea7R97c/rPqXVRgOmTczepwIbABiGYbuGDXz2r9lvsx89IGoaoXZPXUuXXgM+LzE9++dtn/Zs0rU0WBPVt+qt5sS9F1u/dH778arXsy2Gu8/ptU7NJ+35LhmbXHILKq624mlC5hrwgycOAVXh9JSYTivn08S//POf+V//9P+yzAsf33/k9PjE8TDy9g8/8cef3rGsib/88olPj2dOc+bjqfDxyVxxV5koKMfR84c/jtzfeQZx3HlPFCiT8n/KTJrPeAZW7yAHgh9588OP3N7d8oc//Ik//ekfGGqBgdt0g2pBVq0zutt6H9Z6oKJ4FXObUa2VuJ+z1W04OyGVgLCTmPNrgh4IrkK14imuLtaqK7YTj68NrYs6Us0M0C3AHca5gImec3W93s4NVNMlWzCrGKdj5+n/Y+/dgm3J1ryu3zdGZs7LWnutfalzqjl9OKf6tA1oi7RAIxq0toSIAYI8SEAYIaCEAg+ARnjBgFAe9MEgQh5EJMQQRMIQIcTAS0CEGLQS2he6ARu67SaQPn07depU1b6s25wzc4zPhzG+kSNzzbX3WnX22nWoml/V3DNXzsyRI8ft+4/vaiH7krlETro+lXIZFtFRu1HMhCCBENNyaAZeuWaRLSCoeKJLeTjFe1xroIdRRFXqnMBpNNATI2aRbdIfMs8kJlvMhRxnV39PbF16uyxBS8UrIqMK8WV0Z0PmlG7Bqp/s9FNSuYRSh5DVFiEwhIA3JhXTscZYDcrRw8shReLTtinwlMbAolvQdS3RwRB9zvOS7WRiThynmvSUKKHfpuzkzjFoRF0yuos64DQmkWxI8SPU9IdlgBn+zRIcExVnoylUM/LNArU8aVRzNWwXKFmdpbG8Y8xh2NPxuPPUbESNVgM8L1R+CrxfK9UMfd+OCK6rKW4qx66tvXfmUh27dv6x51sdTNojItfUEHPANS/XyFRqxtwuLy8JIbBcLsuxMfy6zgaeLGaQuTvXarKabmJ8L5Ma3DfdpAqaX1P3Ua2anKu85tKjV0k6burrfdKTOYCZt1stvblt+91Uv7mKb5/X1vwd9tGb6U97d6jXmbEO45VFWTJXfagZkNpqTaVOye7SIbLbDWy3Cfhcnl+y2/X0u6HE9vfO0TY+uaMrDFEZAiUdgLok5RaJKYrvUlitHJ1EFtkyJMWrs7XW1Cs5QF1OX9MtuhRwsOsK2xr7Rez/1L/2oiRJwjXgXNqqGl/WlKUZXJIq7JnXr5tsE+2cQJTyt0hqn0aSJiFkG4l5TNqsoMzHJpYRKhFNficZX7YeEPlAYVK2jD9NvkXHJ6as9DUQy+NR6xtlDA0jqV6aNR8qWrpjBGrZNKaAHh1BT6Vx0ZBCvqRYijlwaMEMJoXKwEem8+JldDdJT4zstlfEnD7CSUqOOWT0t4uRza5nCAPtpstp4pX1NqUAQBxd3ycXdycpa27ok+uaSE7LICwXSx6cnNJ1HQ9OH3J1/pBht2XLkDLCIilioxke59gMXj1c9YS+RcWhPovlSYGfUgtZPJ4ckTKGDDjqwF5SbI1G74EyvEp7jHPL5w8o+ZiMsjUW9RbURsIw9FtCGEqcoBSjIeJIOvQbpNivhUSSjcs+FUit0qrF6/ZdL/y2kzc1SYyR1WrFdrst9kK1+skAxDzuS/0xZrvZbMr9ZnBqWdSBks7CjKEttYUBJ7vPVGcvXrwoarP1el2ytNuO31z4h2Gg6zpOTk548OABTdNwfHzMer2eqIa+0aiWWhhoNKq9ukzSZVIck/TACD5Wq9Xkmvq95/ZPtyHrcxtbcy+9m8CS0Xwczsuur5+raGuVltmGzQM01mXdBtS9CRVX2jhVgM/mnUmlVbLrMtQ2E5bGD2Nc4vGuAXFJpbUZ6PvAz/7M+7z7sx+yudryta9+yNXlFlVYrdY8yGkdXlwFNu++YNcHPni+5ewqsOsDmyEQCHSNcHzS0i2FR6cLvvDND3h42hG2G/qzZ4Rdj7gUO00lIg20y2Qcf3yy5vFbj3lw8qAEIzRPrcTUEkrxFXO1fxPfTaCnycxYVbNdSsgRexUx35TC0ckijLSuO3c7lchHJZGUIiLGJF3SGKvYStn43ECPSApWGLOMpwJ9BnoUwatL74Yjyzwyn8otpKMXU9I0pDRMqCt4pfa/zaMEh0O0QSTl7arHVaU0z4Ihi9CT7tecvFQBiQ6C5Yaz1s1gp/DDpJpEKUKQwoeND8XkhORCwMUlPvbJKy2/fWmfPCDuyZA5sttcElyDNgucpKjLMUs+tkFx2x3eQdN2rC6vCCFHON6mIEr9Ypnj+khSWfU9+Ih4h9MmWWqvVvimYbfdcPnsA3aXL+i3V5yHS9idZ5HdruwaTJ8rIuhwkRO7pYBQliMrrQGCyafIjTva8YyNnsBOvi9bo88kyanr84BKgCfb28sIeqh0jFpy7abaCNDvesKwS5lks7pNNOJEaJxPFv73xFudS/my6hg2NbioVQdTe4KpbYhdZzFzDJhYOoi5TY6pperYGxbPZ642qSVFdmxlAAXsWGBCO97tdhPQs9lsCliygIhmyGz2Papakpc+ffqUtm158uQJjx8/ZrFY8Pbbbxc1j9kp3ZbehB2ItaMd27f1ZW2js1gsWK/XE2+vWvpjoGe1Wk1sq+YSopsAwFxdZcf1+ZeBnpver36nutz5eIXRgNvAjqm2DPTUY/6uffMmAO9EyoHtuMdNvMvAJp2z+DjjemOACMyrK4W62+bgrFdXW778k+/yYz/yE+y2PZuLK3ZXKYTE6ZNTHp6csBsC7z275Pl7H9IH5Wwb2AwpEXPfDwQd8G3DkycLTk9anjxe8aV3HvHk0YqzZ8/5av+CyyHmqMABdQOugW7ZsFx2PDh9wGc++xlOH57SLZY0GVxHTYt6kgYVmcNkJ2/MW1VpmpReQ2NkcCnVQ8zGsCGPj8g4F8zQ16m+EdDTtW3ik5IYvHMpQrSBHnvHwScnmDpRNmQAVHI9msRFy3+mrkpexslGxmLmRKWoglBHVJcxQvYMLlIdl2CPNoj6PM5MwmbghsInNYOeJsdmMiCUDmykkgNDptpGjUQLA1NUZDlbQqXeqm16NAYaafFxhY8tLsXMSTxYksbH4hplp8RX0t3UW4bKSB5ZoxWRKWbSi0kkRxbOdhgxEoOJkk29leLXoBFVKZ2ZBrrDNy0+DDRtS9N2aBgwt/A0RkfVVApalDpJs9wr7QLGrrLWsAFC7kCnpvtMjZ1eR4gpwlKSvKgbM/ZaU2glFhdNYCc/oCQULdswG6Qmwst7tZlofVzEpsf3QTWTsOPatuGm3fY+m4o5EzN35xDCRKJjZdTfdlzHkrF61FKLWtUxD2C4r677VGk1MKizr5sqDCigqO971ut1UYfNI0vXoOc26pc3ySjnoACmKkcLHVCrtqwP7dy+9BPzNt737JfRTWPrJpC0r9w5+NkHhua2PLXH1lxFepPk6CZ60+rKiZBjDi6L5Cd5MSHVelGWvQo0ICXo4HY7cHm55fz8Km2+dgMhklTq4lKqoAh9SHm3hqDshsgu2A4TvBeaRlguGlarhtWyYbVoWHYNu7ahaXyOqOzwjY09T9M2NG1D2zW0XUvbtfjGF5WPKGMcmmoVnAi8xCRc1VhyDpnYPlVjzDaoddPev7AuwYmcWR0R1NXhBPIaKlmrECXFWsqAZxRcWMdWkr3Mm3JTJAAFo3YgIwHJ7SSTcpiNKwM+qR4mVaL0wdj2qay86a2Siaa21Fnhma2bFNLMPUrLJJ6qWgkdNAslFMZgjaOgIksc0nvbfGdUod0Gv97RpkeR2KPAEHqIKejeev0A7wXCDvorVANDjFxttgwhsL7asNluUGCx3BYD2bbdMnQdPvqUD8Sl4EYqDtd6vEba1RHL49M0CZ92pEDQCnGXPhTgW4DOOFFKgICMS00HTHXFqEu0gtJkqSQ9JqcTyoCYLJgSIWftTcejpEcy+k4jNetgxXYuKb6RbxJq7UJqgz7CNiT9+X1NTNv910DCdr4W7diAx5yhGNWSAfN+glHttNlsePbsGS9evCgxUiy+zlwF8ap4KwZKatAjIpyfn5fxZPm/+r6fxPup76/LNOmPGbcCJeO6JS/98MMPWSwWJUP7YrHgrbfe4vT0dLKo3sXu5D7I2gXGd6w9r0y6U2dNr13URaTYV1icI/PAqz+1lx7sj7hcH9fSployWJdRj625imrfe+4DLbW6yiQ6McZJPB4DrjbePgq9CakdeR1zUthP2hDaxlAEtfxR4orYJ6k4EjNJ2dSbItnohwT4333vGT/71eSl9RM/9T7vvn9ODJG2aWibDqXhxVaJ5zt2Q+BsG7noU1qcXtOGt+2Ek6Mly85xetLxzudPefJowdG65WixpKFh2a14+PAxq+Wa3Xbg81/4kOOTB6zXa05PH9J1HQ8fnbJcJpVy03p8k9daUTTUrVGaYFwPdZShp36pWW7F0SVtqKOtG1iKn7S2hnCzQf5rIQHvkqxEvMPMQyUDAcl9hgiucSWav0Vc1oxiTCqSZDIGdrMKSRVVM+CmvCvlDsl5rTLaUhBJxsnpt8wxHTgvZhNdDOFBaHKrqmZjcLJXWealE3lZtVu37T6kYLsuVuAzAyLncmRpybzdJFm5DOfMmFtxyYo4CTyMt5YvNw6Wl9BHAz0KvXoQpVusWZ88oOtahs0F2/OeOCh9TGqtvvdcXl1xdbVBFZabJf1uCxoZupbQb1Oa+5x8FOdw7QLXLPAqdKsHLI8fpvwqTZfsh2JE+h2Ey1yt0cyrEgKXT8KJSVojSHahkyy203LVOIFGYFKO8701X9Oy5chhvhHEjaouqaNKSUarGVClxXPIoCcNxoV6nAeGiIbAMNwf6IExKJ29i3n61GoM+20fE6olAzWTPDo6QkTYbrc8ffqUZ8+ecXV1xXvvvcfZ2Vkps5buGMPax5BqLySAOdAwW6Kaaa9Wq/Jb13VFjWfM8OrqqsT1MWYIFKmOPddsfcwoer1e07Ytx8fHEwb+jUC16tAkN5ZiZLlccnx8XKRwZqdTSy5M5Wd2PDXwrcfFTTGNamkcMAHMc/WZlbFvbNVAfE71+TnosTFiebVCCGw2G66urgoA6vt+r2G8Pfe2dN+M0mWPH9tVa173gGzYaWoaSPHCyH2ejr1vkueqZK+gLOH5yrtP+ZEf+0kuLjb89E+9z1e+doYgnJ4+4EHbEcXzYqts4pZdiDzfRC4GUxFFVAbaruUzb614dLrg0cmSb/3CE956tMZ7WLSJqay6FY8fP0n2UxGen51z+uIFy+WSo6Mj2qbl0ZNHLFcLukWTsqp720TYuIAR3RhrzW1R9YOFBUn8ueoXY8gixKiEkLQLgyYtBCS3/fvsSwEa57KqyWdDX4MwSWo8uq8rXlKPS9QinarfV8rmO8d0y+0yDCTHnQzwiKa1MCCQAuA6yV5uQ60ay+W5JHdI1bGNTv5NKco0b3t9k1iVlx37aAJ68r2OnGWgep9yTQatUXL9GXmnF/BivFoT4CFUhZjtmgk8Xk53Szia0aepbDRXzHmPbxtinxl+XrRCHDMuxxjKdwyB6P3Ei6uoi1SzRCbtYpz3KRy5b0ZAIVUgqixuM+wLLjeGpmNIluWApiiD6bsaeCP2zBNIHaW4yXwoU6vcidXDOknjnhs11zGjWAsdrtaG9YJffsqT8b53leNib8wovdL+HXt9fW3bUUdbroHAcrlkvV4DlMi+tZSn/r6JAdZ1qOtW2wgZoKklWFYHA28xxknKhX1i8Jqpmiu9lW+2ITUjv23bvimaS1SsX+pP3W/7QMlc7fWq95mPla+XXlbeq1Ra1n9mrFwbLc/HW/1O+8bcy+r2MlXfa6WaqaiOS4sq7Hm+gZ+xfkKKbp9zGm53XFxuuLjcst0O9ENSUaQkxwKRlDgyRPoQc/LQtEb5HF637RzLZcPRqmO1alkuGhZdkzJqu7SGprGXMp8vFmkNsI2JhYroujZnA7++obR3GZfccQNo52ssVFTg1lpSFVKLh6jXGTt3v2Rreq0pSDxhujnP2KQyRL8uuEjMXUbQI1KGheSH1ffWHMvGBowb+/JMKpBTvu36GnJlXpolRG5S0epp46At9zrJ9kXziul4rn4PG+MTFWd51FhukgrJrbvybqAHpSUQ1BE12W87L7SLjm65JA7b7E6eXNc3YYuXlHKguA0vOjbrFSEEuq4l9Mv0AsOANEMyotKMC53Hd0sW6wdEVZrlGt8tiQOEwRdDr9Lek3aWyZ9aHWmWmxm6tI4U6skhY1G52Q3qTZ9joCVFpUzALZafDU1TxxDIsyCFJ++JGlJusTjQD0NC7SEyDPFeJT3G3GuvmleJ7+dgolafmAu4ffq+54tf/CJt23J5eVmkMH3fc3Z2VnJy7WNec5XK3AvMGNtms8E5VxibMfZaklG/52KxACjSD5EUDNNi0tSG0nPQUEtIatXQNwpZfc3rarFYFOlObbxcS1xqKc6+d4VpSIF90p75NUZzkGx9cRsV0RwEvwwYAxOAY3Ga7Hi73SbP06zGrNWjhVnWzGBW//tXZ81IcwBIY0y2wCkjoy7MJv8+A/Dj5km5uLji+YsrrjY7vvLu+/zsV97natPz4rIn5JQCF7tIvNwiztH2Ee8Hoiq7ocdJoG0djx6tODpqODla8KUvPOStRytWy4aTB0sWnZ9Kzr3DLVMC07feSu1n9nI2h47y2EyrZHKISWtl5Ylj76nJhkhL+6SAhLbW561yGZ/XYy9VUpMyVq/3+esmY9Ip0emYTX1kWNnsIr+HI7l5u8JxqFnZBBxa9H5VSckfc0oN9U1JrREL8VizfQAAIABJREFUOEweY97lcAWa7Y2R5D0mJukR0h7RUpdIHnojB625oqlTa9FBzSWV6Xy14JKpzFR2FN2bAy2p7yQLPfxo04tCDsI5giwhTEDlzXRH0ANt1qkN2RvJuQx6VguGXZuypUuOcdLvEODq6pLLywtCCKyWCzZXR8QQ6JcLhiHZCBEGJAzFoyohUU/TLenWx0SNtMs1rluCwLD1hCwSuQYqJzW28+MuyTZJRWwoNeDZT1ofyLSDLKtwOWflToydY4WS00BNqToGoqY4RkNOrTEEGIYUD+M+l1ur2zy+zlyKUTP4mmHWnj9mT1PHdzEpzOnpKRcXF0XFdHl5yeXlJVdXV3sZjj3LJDTGkGFqu1Gr4ywej91nqhlj3im/z1Fh/KvVCtW0Iz05OSmBEE39Zb/V5VkuqrmKpq77x0VWn7Ztiz2TebVZ3ed2PDC2JzAJRHgT6AEmfTYHJvsATQ0u9knyXgUe50EW90llTCpXg57aY8vArCUYvQnozIHcXGVn19w3EFLVkv6GsosdN1O5IhXgqXbvzsBSkvJcXF7ytfc/4Pxiw7tf/RpfefcDNttAiB2BFkG47CNbTaDHN8mmTYDGBRyRRdfx9pMVn3nriJMHC770xSd85skRXmDhlcYEKiEtfr5p6LLBx2KRct7VasnS1tXirRoKyHGu2sxnkCcmlsjrq8SRBc+NmmuV83Q/PMZJK3V4A2QSGi1/VcxHx3YwmY9a3kV7LysnA536WBVwDvU+BSaMEfUZeMRIyB7BzgniJYNFG/slrmHyKmuSea0ZYL8MRJR5m4dn1LE/CkhSzc5PmOgo3ayjN57EHCk60yh5TkCxaTze+Wxf6wy55quz13QFzF5Fd7bpMb2aWWMnY6xxwo3RbhSN6a9xdz6MKq7oy+Ab1VuWw0XLSJ3uLEf9Z43wRgmabW+q36knFpV8rPq73KvXrxnlbRTgVK6vju3P7B9YlTZ9DNVuzRDYRMrx5naWc/VOvbOdg55aZbIP9MyPbdExW58YI+v1utj7rFYruq7bGyAuxjhxl6/ppr+NMdbvYe9gZRlTrN9/7kpfM/faG632enqVDc8blwxkqgFnrcZ6mcqqBhxzAFK3H9zuvfZJR+ZAoj5n4HJ+nf12k2Rnfs08LMI8sGUN3G4q6yZV3r7r75dZmtFy3Q/2t87Y0PV1ztYXs+8Yhmysv9sx9MNE1Tdrcagi+DpJ+bZa71h2DatVx9G6Y73qWCw8XeuScWneNCavq9lYcuPf+6S6xYV5D+1rYlvupVqHk2Ygt021lpVxbNddG991+70ZGtcnMSQEjACvjKvC06b3Wp0LfhDBDKKdOKLEMs9VFXUuuXnX7w+Tttj7wdja7UDPrKoTSqonnbR9aoGxH0RGvjN+XPmeDgYd23AGaW9DdwI9TmDpFUdg0B3gcbFPwf6GgIaYfe6BoMnNXJVht6XfXOE0sFuv2ex2RBE2fc82hGS8lcQbOA9+SNGbJcacj8oRvKPxQpMzPvhG0GYEPOPwTvEGco3HX4qb+rjDkPxO5c7SrsJoDySUGAkidjoxD28xgEZhHiEWzwNVweyDTAiYapWAjmhG7QX7ZHsjgcZDqzcPpK+XRJLH1W0kFnMD1H2gp8vRVOcM1Ty6FosF77zzDqvVisvLS46Pj3n06BHb7ZYPPviAFy9eTBKL2v3zndtc/XWTUa1JHSwmi0lxLLt7fb8Zu86BjWV/Xy6XPHmScgPVObpukmp8HOScK3U9OjoqWeQtg/xc5bev3nNvrnqRqo3a56EEatDxMjJj8fqZNUi24znNJUHzZ6oq2+2W7XY7CS9Qx+jZpyKzsurxs6//3nSfaq5LZZ6bAURSj1iw1CI1FimqertPB2UgAcCLy3OeP3/G5eWGYXdF5yL4FEwuxoA4oWnAdw7noOmgaYSubXjy8IiTowXHRx3f+i2Pefszxyw64eEDT9eGFDMoapG0pDQRxtCt/VyW3ExVlSJC4xIL0onTx3zdG3+oAYvLqprsUzMpv94AuLxRjk02YM5tqWoxYt7cJvPrv7duHFMHSkrllOfjxBwgf0y4k1RpEXUuC9DyvHe2trtsRJ9BB+6GetR81NSLiWpVV50uqL4/ASuXAeyozrRNab0ueJ8lPRkdjSYLMvvAfOTso7uptzLoEQ3scpp6F3vICcNSLJ4c5DjlpEA1Mux2I+jZbtnudqgIu2FgG0IKwx0GJIQUjTgECCkGjwdaLwwuAR7vBfVJ7xi9TIUvZPFXFUrJBkZqay3vYc1TjqUyyrLASXlQlIy1LoEeEcE1Y34RLLqzpsVGxUKte1QtsKFUcTVypGbTYecN1ijgSqHb26aKP/CayexwbHDdJPWBqZdWfX0NevapQwz0mJ2Jc47Hjx9zeXmZIrIeH3N+fl48bixv1m63K8+2STyvY+mzCnwY0zWpj11jnxrw1AviZrMp0Z+Pj485Pj4uoOH09JT1es2jR4949OhR8RDbJ5X4uAAPTEHPyrJV53eYg7kaLNSgsZZg1aCwNkyvvetqIFpLy+Ygxa6pVWn7QKsBstsCj1rFaYDW1FsWjqCW7s0lRy8DPvtA4fy6+6VRAmzPd2WXL+PClU5RqxhEIWjON9aHnH/uOZeXW4Z+Q+tSQJ4YI5EEetpGaNqUkLRbQNMK61XDz/nsCZ99csqD444vffExb3/mCJGAl0uc9GP+yJD7M6+/KpodSKzNHCLTsAqTzQzZO8ykT1LJtNTe29oiAR6wNVOKhNzKNKcFu118jv4ec1aqMm6CPeDeaT6GeMkY2gfEJ3NARtUmMJlHxVkEijYlqjLEBPpEheiSViGt49Wa6EaQKCXe8hS07KtPgueV+Uat3qrHcXGZN15NVuNO56XImKzaOVcZvOdmK+W6qgbw2kFPelFybg6zPp91nM7+mO3KQgz5O31G0XPW/YkFMExNlyQwo6V46hwZu2KPaE3mJ6q6T0/c9JKUAFl2WZHlZN2hdw5vcWAK6MnvlBzzxmdqhl6lx+qHjzM5RX9O3egbaPR+d5lzlUe9CNVkO4eaCc536HUZ9eJjH4sPY/Y4x8fHnJyc4Jzj9PSUq6urAnZs0prRqdFcylO7Uc+ZWP09P67JmLE9y2xA7Py8Dea5qPaV93GBn32qrHn/7gMkcF2aYufmx/ukLvO/byrjVSqquwKJuWRv7qV117KsvvtUc3Mw9Ob6OLOTvB6V3bSN8fyPMX2tfre1amzblPG6aVIpMaYIvc4JbetouxRMcLFo6BYt62XL0arjaNWxXnZ0bUoMLdlpIzVE+keuSWfm73BdLX2tDbUUOF5dHZgDSrr/pZhhQpOnTNYJK/ejxWy6Ld04Fm8xhm6ca6o5BMvL76m5jFABL+q5bCqtLLmxc4zgev6k/eM/MTu740YbG5n/odfWl33rjVSDbFRvgc2R68f76c6SnqZxDFHwKvhIyRti/yVg4jBDpcQ8hszQlM1mw+XVFV2MrDYbNtstbYyIb3FNi1cY+h7vd3kiadIrN1XsEO0ZfM5Im3c20/Z81WyoB/1IsXTX7Op83RAig0bEOZYnK04eP04omZiSmYbA+bOnXPTPq4F+vUYWgTpqCsSIT+6erTRIVBocjWuJOJoPrm7VN3cl7z3Hx8c3MsV6sNWSFmByfm73Yr/PxZQGULqu4+joCOccb731FpvNhs997nM8e/aMzWbDe++9x4sXL9hsNnzta1/j/Pycvu8nQQhr8HOTBKhmVnPwM19INptNkXpsNhueP39e1HUm9XHOsVwuizHzxynV2UemjjP1W9d1BWjuA7dGN4GZ+ve6n+1cDSznKsWaarBQ99N8zNXXwxiOYG6HM+9LAzpmyF5H2n6ZIfL8eTeBtWu78zdMFnOlGKwKKep9RkBRs6Fq0W+N4z5qigHvvKPtGkIIPDjuePJomROIdqAt4oTluqVbJmng6cNTjh8csVos+OZveosnj07oOseDtcebzU/O5WRtlGx3ZILCotbqxHitret5rFrdyrjmFjCnkGKz2HpqoUe4tsu9NjbRko8rhEAYkpdstHQa95hlvaaPIh186T0xeT7tI9sI5ssKyKHakJLPOxl5t2MMxnsT6CmbW+Nq5UvrP294ISukKnO2HtUbaSfjZ16Pj0p39N7KkT5J0hZHxSBn/0HuMNU8uHqQFAV3u90SNenhd7sdUcG3Pc0woAphGAhDj5MUlMg7R5NDmLdtA7HBeWfBJRNpAkgj0qsWucnRbMdh3/l+kSlarQFSiJFdDMnuqFtwfPowMUSSh0MYBvrNFRcvnl97Tg1+yjOBFHdIES95F+VonWfZLsE3eP+Vu3TRrcmY5D4GZJ8aUNRqpH0MZF8upVrio6pFzWVGzWZz8c3f/M3Fm+unf/qn+fDDDzk7O6NtW95///3ymzGy2mtrn61Rfb6ua22LUtffbEFqWiwWPH78mO12W1Rzpqqr7U7mz/i4yLmUS83UbwbOLJnoy5j2PpH1vI3m0q1anQjTtt0nCaslJPMxV19Tg5R9XmP76mCSus1mcw30zN9lXpd5uXOQNr9u3z33RcoovYla8YYswUFHcDPqFZIEPhoLUrLNjqftGtbrloeni2zH0oKmWDmrdcdylcbLZz57yqNHD1kuOr7ps495dHqCSMTLDscAxJzv0KQEOTIwYpkDkoQpplq8yt6rtGVOI2Sgx8CO3Tpxa66kFGBC9Km0txzHaUqSYQiEIaVHCsNQAhX+fUFay0+uSzXma7jGtJUfTSsSjeEQzHYnwxzj5aVt95dPvt7sd27rOVXKuVZuOluvHeYynyJD71HrfkS6m3pLUqRQp/byUEmc6stmko2cbCymZHXDMOC8Z7Cs2j7ZA9U7Ro2R6CRl2HX15DLmer1DxtBUWnYE1h3XmdN8Z5dqbmIzK0e1Clme6x+zuFlcSp+RUHLOR2Z6yiKHmz4uTeL0niEqQ/7kuGBZv+nwTYM0YxqF+6B6Zz5/Th3rYr7Tr2kuVbFyawnA/GPlG3gwY2dzH7dM648fP0ZVSxJR7z3DMHB5eTlJS1GXe5PEwn7bZ4Bskqka1Hnv2W63XF1d0bZtieBsEZ7Nfukm5rfPS+o+qe6nfdK7V9379dA+MFKX/TKV103l7Rsz+36ry5szvZtoDmJukgLNx3Z97W3a9eumIqnIf+aFVbR+rubrDCgIc9F303i6RQfAcrlgt+uzx28LdHgvrI86lquOrmtZrxasli1d19I0KRO51M9ifFZR2zMCNK3/s/7I73P9FcffR/AzVn6U8kwlQNd4DNf7vvCSmq9ote0s7/LmNi23GZcvu07RkmCzboV943Hc0O8vaxRWVGCp1GMKKl9GdV/cBH5evjms0Hx1/R4Wz3WgN/tbyz830h3VW45muaLplUaVGBTvm7HhKrFLaTxJIcD7PqAKV5sN7cUFu75nuV6zuboihkDjW7p2gXpl8D29z1m62yRZEufwvqPtlhADTlo0u7ebRGnecHWDGJhh1mRTVaBic9iuHobAbgjEqFwNkcshIN7zcKsEbfF04BVxilOPuBbwQM50W8Tz48Tu+8gQB/ohcr7ZcbkdQDw06T3bxrNeH9Et17g93iyvg2KMXF6mNB4GaOZSlImYcY/tzj4D4zmjrRmUGZbW95tNz8nJCcMw8ODBg+JN9YUvfIHz83POzs74qZ/6KZ4/f86zZ8/4e3/v7/H8+XN2ux0XFxfFO6sGcPPjWgJk5+29DchYG5gX2bvvvkuMkZOTE05OTkqsm4cPH/LgwYNy/XyxNSNxiy30KknL6yCR0YNuHk9o/ux90q76+6br9l2/T811k6TnNoCkvt7G4r74PDU4retyE1ja9251ffdtAF5lu3VvpDknlJPkji1Zsp7X08L8JWfR1ojEvAPNuZ2QvGYKPDg54ZtIsYwenJ1zcnZOjIp3C5x0eO84Pl6yXnc0jefk5Jij9RrvHcuFgO5I6qmBqEMGGILmbNxRYl4zNa15BiYsQF4c8/qNTJXMG1KF0/pofTTu6E3ig+ZAe7HcirhKopT7csiSmzoqd4yRwbICxFjiyVg/fiNIauH2Y2oE3rAPlNwE+sxBTrIAQXMZo3THVEmVlOeW65b1f6mDnb9hXd5bRsUXJus2lYZBgRun5XXJ1z66M+jx7QJPxIeQYjSYi6Ih8AkASWSxIhTYbpOXxRAC282G3XYLqiwWPWEYQGEYevww4J0j+gaaBsSlfDLNguh7RDwlwFG1Fbje7Dc1jdR/lOtNZGfAxxhgCMqmD1zuAuIbdkMk4onSJAlYCoiZcoQhQHKJ1FjtjBBCzNmKh0g/BC53AxfbHnGRRnIod3G0ixWro+OXLrxfD8WYchHBuAhZhON5Dqd9Lutm2Gv2LfXArg2cS5urFvdhY9B2nQEEVeXRo0cFHH3+859ns9nw4sULnjx5wtOnT3n33Xc5OztLrrgXF5ydnU2kPkZ1ffa9h/02V2VYkDvvPR988AF93/P8+XM++9nPcnJywmq1Ki7QJiGq7U5Ukxrv6OiouPDXz7wvEpECdmqD65tAyz4Q8LKyb3PdPDjhHBTdhWrVVc005/WvVap2302M7Ka2MBfn+pq9O+c9kqH7IJNaxKAEAzsI6vyMByUAFKMmAXPWgZl9RoqW61kfr2m6NkXEXy9ZHi1QhcYtaPwC7x0nxyuOjtLxatmx6DpAkRhA+9SmYcjgJz2bvFYm3pjsdmLM3lAKFi9tYl/FdN6N75zWTNt11uoPAz5z1mKgKgGZnMdvFq9pGIbMfzKArsE331igB+42TzRLNfaN6xEkVB+sXfM4ZjaXZEw+av+8qjbGL1/2LvUaKzJiBa10M6W8ffNPKGrScr/IiPxzaTe1xZzuFqfHOx6cnNDsIr3vaYbIYrG8xpjnz1VVgmqKDhnzbt+5wmCcc4Qh5JeKJUy4vVfpLnE4sXDU5vIW866jendsobXjsWGkmqwF0JZJPF5vnhKqpMXHVHMhIgSGPtD3AedCUvcpxCGrv2Le8ZhTF7YjS+X0IdJn4LMbAv0QEC9oiHhJaq+od/dAuQuFEHj69OnknNlD1EbI9m076tpNvY79UktO5nYvNgFr0GMM2kCPqYvsmWanMd+t1JOnlhbdta3m0igDevNs3ZvNBu89L1684MMPPyxBFe3+ekE3Bt00TUm7Ye1076oQXg5IbnvtbdtxLtHbV+bL1F77VGBzCVKdRLQ2Sq5d2/ftDusy6nrVz7ipLW67K71vUtVKApIlK1B90uJfVB1T3jY5roG3hTEYQU+Hd462bWgaA8p5PVczpB4NkQ1opGcKYnkY8wMnIEKn9TIeJcZMJ5vUTIXjzlQYmRnUDNbKSe1TP6eSNpXKMvl7IpW4R8xzU9Gvmmc3jj2tOVrmVXvGuX1fk3jmfw03TM/PniO675dbv8eNGwSZX2dlTedf/T1Wq+pMpOo8yUPk1Z15J9CzWq35B3/hL+L8quf955dsdgNBHTtNebCElBE1Ss6DkUFZiJHdMOCig80W8Zc0ux0XR0dcnp8zdB2LdkG/XBGbiG96XLND1RPaJk94wfmOplsSw0DTdDjXoDESGFANeYJV6qTS4eMkqo20TLRnDS95wrkiOk0eW7tdTx8iV9uBy6secZ6zs0uePz2jW2zpWkfXCDr0XF5u2e1S3CKiQExhv4egDBGGGDm72nG169kNgWfnWy42O1zTsAiOplOiazje7mi63UQU+zrp4uKC7//+75+AinpnbeLh+SCax+ax1BOmIjImvy9thCXtrMHK3CvKPJBqidLZ2RkvXrzg7OysGDSbpMlyadXSAHuX1K9ThndTTCLzDKvrqKolpcGP/uiP8vTpU5bLJW+//TaPHz+eTOoQUjyU3W5H27aT97BYOfdN9ftYG9Rg9DZMvB4P9nd973xhvQkk7JPw1KB1fs+8jnW8nRr01Lmb7Nika+YlOPc023c8r8e+trTv+bt8FJf4O5Gm7N9OAKdEhEYM+NRR7/M6l5IoZePiLHVxmtTuAl5yTDFS+61XaxTwrsFLmsuLztO2Y26jfkhx0jQGNEtvYgjFMHZcTyMpr6CBolhASGLI6YXG+H9SXK2Vmmel+D5S1uqaIyczhogS8vpu1iiqpE1iZSdpeblSO7kUBgWHQabIDBDcY1ca1WvSq1Svryhp/FdNWjIt8xroydeaMXMNPO3JpcVVMeOxkj6CsYkm8+JWepXra8araN+GqgBuO+WoJD1ZkBFfDdLgjqCn7RZ8/ovfwvnllvaDF1xtey6udnz44pLtbkjROSWFNFKBQKpDVKUPEYmK9D0uB4LbbDbJpidGdtsUIh0kWdSHFFwuJTZNBYlr8M0C73c41+DEE0VAkyvjvOPT4kQlyTGw44qUx4ktbCPwqSdjCEkN1Q+B3bZnu+3BRa4uN1xcXKXfGsfQOjT07DZ9UuWFCFFSIChV+kHpA/QhcLlNKq3dEDjf7LjY9PhGia6nxeHbgd2Q8nDd1zZks9nw4z/+4xNwU7df3/clUWM9Yesko+Yp5L1nuVwWLycDMFaOSWzsGJgwO3Ox7rqOJ0+eFDfx4+NjFosFFxcXxYNru91OAEzTNJNgiPtsQOYMbi6ZMomCgR/L1l4HvQN48eIFXdfx9OlTHj16NJmYfd/z4sWLYuxsoMc81kySdV80B3Hzhe82VI+BOUCxZ+w7flV5+45hGvJgTtb2NgatT+uo0haTqC7rZW75Ne2TBM1/nx/X7fiq6NNfDylJQmzuyCJV4shZ4FXV7DEl6R7J1wt5py6C847Gm/1aV/iCE5/zK4HzpkbT5NkUUmBZjRZIVdEwgr0cvhVyDkbB1FtmwDyVppDvKvW29ywvXRnOTtIIjYfKmKNJqpuTFD0tlSkCcZWQVHIbie1kcxDZvCl+E4AHbp4L5RVvkJJOz+vkWGen5uVOjydPK/+W0jMQnUjn0GkfwbU1AXJ3zRrSQO2N0p49NJ9zk+dotl1DuD547AVvZwpy5zg9zjf4JtItFkQ8uwGc26QKytiYczKJSx25NQxDisnj/ChhCJaHa0Thmh8uzuFdyriKuCLiVazDrXO1+ntEf5PhY6BRbHdU/cb1QWISIpdXlBgCfbZHohUIDo3DGPchz0RRIca0c0tJRFP29H5IqjK71ClgKjtJK1gM1yUtr4tUtTCUfRKdWq0A13fJMcYCZupd+jxQYG1MWAOgelJbnBWz8+n7voDiruu4uLjgxYsXnJ+fl6i7dm/tRWV2QaYasWvsU7tw195jdVC77XZbbJ1qg+A6u3otRbFnzL1EamnZPMjifVJdh3lf1NfM/36ZaHwfeJzTy0DQvExbCOcxeOZl1ZLHOsJ23c92zgCwgaFaZbavbi97l33vVNfbfrs39dds3RrPjRGXpwxttr7Zmlm+KwBb7jN345jW0ZJ0fExdUOphY8rKsf5khC/233WbmVGCbs2l+eQoESpXTxuB2Tikur++Mo+lZPdRq8Mp95V5qeZub8Ds3vaVk/rtO77pGniVdMSAZzree8Vsg5FLvVkOUtq3MNHSPvVTr9UroxsbBbnyt8KS09rcbkM11tFOFHhV3uNVdEeXdYfrFiyk5aEs6IeINC94fr6l7yNeJNm35F2A5KOooEGTGHQ3JNFqdj2+OD9n2PUcrY8TiIjKMPS0Mdl+RBPgiaNplyyWx2gIOOnQ6NIgDwlUpI6zSTdf7PeITfNOqewwbBip5Q1NImUTATXesWiT4XbYbjj78AN80+ScYAmo9JfnDNskoZEoKSR8hM0usO2VXQicX/WcbQaGGNn0yhAc4j3Ot7Rd8qYIoaffXXJf0UKHYeDZs2c3xr1Jr5/ar1Zp2XkDCBZF+ejoiKOjo5LBfLVaISIT0FOrK+ocW0YmMTKAYd9933N2dlaeZ5KXtm158OBBWdD3uaPXkilThxlYMnVcbfho3mLGPGtVXm0gXHuh1aDHyICX2SbdN9nz6rao2+OmnWQNHmqQW0vP9oE8+zbAW0ts5uqxGojOd7C1KrTuwxqA1sAx2aPoJK2GqRCPj48LqDZpndFNqq1X0bz99jOT108xpujJJQdgVu1oUNAxEWOqVtoBixOL7pHS2oj1a7pmyvEEIZC3BjhX27jFCdgpoCea5MfW97TOFuCjSoxhAlAQSLm3TDJFsu/MzNXAVcq/VG4oCrxoQApGQ2SsTqmYEGIOtqg57k6oNh9avLdqo/gYAzGMNkr3RzeD+5feNRuj03tHwLFvFO9/XgUx5jfV61bUrA4cezbdcwMIy3hjCll1cn19z/hOUmpUR9q+SeWcUICM/V4ebIBHx3OvoDuCHkF8QyPC2i0JEa62gcY3JWqiQHGNM7CgmvSuCEgI0GtRaW2vNhApGYBFXNEdq7PFUwCH9y1Nu6BpF4g0aHRpgo6pryqB3M0DzIBO2nBcB0LAuNPJ50QkGfz5JF6Ofc/V+RnOeby31BgR7TcwJPsiiWRJT3LZ3+4iuxDZbANXu5Di9AQIKjSkRG8+L+QaB0Kwznz9ZC7rtbFoDTRqAGS7a2NGJkXZ7XYFINQ2F8CEednHDINjjCXthOqY/XzOrOsd/z5pianYrK5tTgtS32tRvO3dzM7GVHO15CbGWOx2TIJVJ9q0c7WLc61SgqmUopaIvQlJzz5wUUt86vrZ9fN753YH++xX6gW5boOX7U7n5RvtU29Ze5ph/TAMRSoJjMkUK5ulOs/YZrN5qXH7XSU09XvN+/veqFrCREYvLRyIagZFGYBlKU1S+YzgQvM6rDHLcyZ9P+2HGPdIrmxRtTJKCA6zLAJDHpLLDNE2n5mEUs/x2SNomUoRM/BRraCUxUkzaU/+tvVezWNLy3GoYr7V42760cp04v7po4yX6/fsARwwFQjeCMr3XJwunNyrGpECtBlVIDoFMtOxVJVXWNZUAjW5R+t6jBLAVwEWrepQRJk3vddL6E6gRzWy3e0IUdj1aaLYgl5E1/Z4xUZlygJsaF2zi6XUAzCUCVXiPFS7AbsafNF7AAAgAElEQVRXnKdpWrxPKq60sNUZwpkAlekgmYKbWlw9LmZpx1E6Uae6eydCY15MAlIZ9RXAFWL17umFkzQqTUbblYx6aBnrW5gHpK1amI2o10f1Ln0uKbHfa8Zpko3pIiVjcru8u/bec3V1VX6vcyEZA6s9cebMuGZmc9XUfCLXAKmW6Mypdiuv361+ZztXR42elzFfQOvyTBph86Ft24nk5d5UIZlqQFGrtW4y6K7v2yeV2QeI6vfdB6Ls2n31mrebXbcPnMwlNPN2N9BtkjTrx9qGarlcTnb2+9513/NuopvG672TgRdjJDoFkCICziWVk7hR9aSzpUNfJT3Y82grpICUSgJUwRLRUYVkMXOAiknWZVaMuai3xo3quMkdt68GTUzCU987B+d3+4zte2+k03a+aUMwp5fPKxuDkz9vqVK6fkYLB8o8MoMLkarMV81zu1avX3tTJUZJz/6a7afEV9Wkgjr77RZ0J9AzDIH3nz4jBOgz6HlxdkHfD5ib9rgLqBYqgZCt5xNAiER1DH3PsOvx4hh2fXZf90nSYxGcK+Teth26OkZDT9ctaZouCWmlQRjSS4t14wzwTAZ3brh8/eQ6bPKmMvo+AQ9BaBtP41OTtR6asEOiZFO+XG4MWZ6sMCgSk+fWdjew3QV2QekHGAaXdecud5bH4ZKKEEV0GMu5BxKRoiqobXr2MbvaALlWRzRNw2q1wqQL5+fnqCrPnz+fSDxsca5d2euYOfWza0Bk0iVTe+3L/2XkvS/2OrXha9/3xeOrBlGWo8p7X45FpGQoN/VKbfdkwK9m1LXNycXFBdvtdmJsW3sZ3TdZfKG599KcUc8Bbw1uDajNbZTmsZvm0h0ra042Nuzbyq2P5wDSvmugVufYAkq/mWF727YlgreN0WEYuLq6KtLFOuP6fPzsk1LdBPTeJBkEyBUC8iYkMyYno2rKkyLWp3eppKUy5ie8SSK3/7xCjCTP2LwOZ08uVVNzjVKfpEqqgL7L81t0NLZWs6XRAggM+KgmR5YCa7RipmSgk3lykTyVsREn46R+D1vPY8g2PUVqVW1w74kMDE7b9Rb37RmHqV3H8ZDwhZTjl9N00z/VbRQUMoMeshfAXAM8xterS4XrY2xSsozqres1ejnVwLnKLEYJtfAKuhPoiTFycXVFGBLoCUHZbFLeIs1izVHsmQcb2aLemiaO+uA0QAfC0KSEcCEzqzjTJadZgvMNXbdgaBc0vsX55LJuMXs0P79eKEbwU8sAp5NuX4PaJDIPMLEFJucDcQIuhuo+Q1XV9iEqGnKQsWDGyyntRFApHgemvksDwbrw/gAPMJGeGFCoGZS9U830jOpAhW3bFoZizKX2hrJynXOsVqtrHl5z4+DJDjaTqaNq9ZWRMcw56JkbT9ceajAaatu72L1WL6tLbXdkjL+WehkjNRWMgSQDVC+TQL1OqtttbotjtI+xzUHPPknP/Pq6vH31sN9q8DkHUdb+1tavolrKYxJFkxza2Fqv16V/7GN9NK/HTe+wTyJWv9MbAz6zTWxquzEWScj1U7EFP/0Wo8M5SFKZvKHK4GHeXTf18fj3aDdg0eXL+CgbsuQMnkDPLJ5SXivrDaetr+N4qs/lZ5aSKccznk1Rc6nuHWfz9xrrPYKdN4lhP+q4mc87K6ZoeTL4nV8/H6t1890EL2oQIvVD9tWrGp06r5jqjZyrbGr2qLdu20Q6GRkVp1fduybN6c6g5/L8jGGA3S7Z6Wy3A0OYGq+leViN8sogahzgMg70MoBt8MZxcGaEDoqIwzUNvmlxbYdvF6ncre28tXRA2S3Y3xqpMxCXSTiZXukwCaxmC6NWqrtKmgVMolDr7FhjWqBCzB/V0XsARuPzisrkDfe3yNYSnrkBa/3exrxqyUAda8eAk4Eos90xg2VbbESE5XJZ7GhqqU/tgVUHMDSpgtnlzAMYztUmVq/S9jqVGM1VHcYMzVvLpDKmsjMXeXsnA0BWz7oOcymGRbY2b7Q3EadnH9iZg0cDoHPQU5+ff/apG41etYutgXQdA8r6wj5121q9b3JBt+tqtWvXdeXeGgAbWK5VYftoLnGcv9P8+D7Bj2Led8nzE5I0J73zlKml87XULmb7HOvLVKKtxzeFCZiDvLKe5twP8zFhTAbSprSW7Kd6mS3cTcysfp/MB8wygKoPGNfI2pPdNpJz0DMH7XPAbUu7baxNWnSf9Ko5ctv7J+ewppDJuVo1WH7TEThKLm+fH9cUQpAa6ia11rxKMt7zKpU3uYZjtCa5YYxMy5A8ZtKzrLYFdZQNwavoTqCn3+342s/8NLtBudgoQ1DEtYjvAJf86POOQrKayaQ6zICGUu0eYiTGgTAMyX19SFKfIIGhH+ibPtvTNLi2ZQg9i+MTFg8eIptL3OZiMgFHsalWEp08+QzUVI2fW7U0oJZ/0nfJSEv1TppDtNtxfkCZVArDEJOrelA2Q2AzhBygMMUwKgNT0oSOai6XEAZlkLma7vWRGRPPd2f2MamHHRsTtZ02wGq14vj4uKhvjPGcnZ1xdnY2sd8wlZIZHhuZt1etUjK7GPs2NZqVX0th7BkmaVKdel1571mv1wWomPG0vb+qcnFxMQF1Ndk4qiUHtcqubdsSFM/qG0IoZc8B3n1RDWLrmDV1v9beXHOmACPQMIBSS/3s9xqAzEGWPatWsZn3Wg1yrC9M0rbdbks5FrbAwGrbthNPOXuuqpbca9YHJl3ruo7T09OJ7Y+NlbnEryYDxi8DPXOmei+U+zK1YVoPUr8Y+Bwv9c7lIIQjA0htGVFNaSuiJm8l65t9qWPqd5yAhDiMKq04xuwZUYoSs+1hGjfm0ADifJY6WdlmYD0CHru+lroXl3kq0DM7DsNo9jAMSb1l71CD9XEjEgvYidXHIujfN83HzE3j705lzrQXI8YZx8LkMarlHqmT0o6Wqel6qSKBv6S+eypUqTCn86Wm/aBo/7uXDRmOKHGskamTxMxDhDj+8FK6o6QncP78OdtBOd9E+gBNt2S1PsH5dsSXphusYaNWnZD/Lv8VPWvOoVJLeWKK3YNz0Db4xuPbDt8t8d0Kn9VbVrY5JVzfRaR0FWCgZz4AdXIuCeBsgFDpxLOo0JTU+cFioM4wVLS0E8oQY5VRfQw0Nirjcn3KhM2Tc0RGr52MacyZXq26mu/ujbEa6Fkul4Uxrddrjo+PC+Obq8RsN15LdVST/cXx8THr9bpIXUzaY8cWX8ei786BmjFpky7VgMeAiVFt3GqMd27LYu0xjyxtv9VqFXOxryUadd2Ned+3eqtu07maax9Ymd9TH+/71ABn370TtcbMfX3qKjz1oqklPbVRPNws7bHfrc9rAFxL62KME6A9j99Tl7WvLV91fF9kko7URmSJT1rnDEQAGdBEnK1r2UEktZXDucToY0zpf9I9M7XHHgY0fmrHkjjaGBroqepJNU6AHK+nLje9mVa3W91q6YHqGCeoWhmv2faYU4gBmrlaa75OTH8bgdiblPTMj2+67lXgJ7F4qY6pAI/RNGLy+JtJTPJVMv3F7rS/b902Bnj2vM/Nt+iIYidypj3X6kx9VS7PdU0irFuZBn2Elfj+J/03EhWJ6kyn/PofcKBPMr0JZnmgu9M+ld2nlw5j9BuObtUlwutkJLcV6twnfeT5eIvKy10WYxH5GvDlj1abA31E+qKqfuZ1F3roy4+NDv35yaFDX36y6LX356EvPza6sS/vBHoOdKADHehABzrQgf5+pfs3NDjQgQ50oAMd6EAH+gagA+g50IEOdKADHehAnwr6VIIeEfluEfknPu56HOjVJCI/ISJv7Tn/60Xk930cdTrQRycR+YMi8m993PU40MtJRP5XEXl4x3v+pIj8i/dVpwPdL31a1tq7JRz95NB3A+fA//Ux1+NAH5FU9S8Af+HjrseBDvRJJFX9NfNzIjns7Dxb6YE+0fRJW2s/UZIeEfktIvL/iMjfFJH/RkR+nYh8n4j8dRH530TkbRF5B/idwL8pIn9DRL7r4631gYxE5EhE/pfcf39LRH5T/ul3i8gPicgPi8gvyNf+NhH5I/n4T4rIHxORvyYiPy4i//zH9hIHukYi8vtzv/xV4Ofnc98hIt+b5+ufF5FH+fx35nN/Q0T+kIj8rY+18p8CEpH/UUR+UET+toj86/ncT4jIWyLyjoj8mIj8KeBvAT9XRM5F5A/n6/+yiFzzkhGRf19EfiDP4/8iAyZE5K+IyH8sIt+fx8R35fM+9/cP5P7/HW+yDT5t9Gleaz8xoEdEvh34A8CvVNVfBPxe4K8Cv1xV/1HgvwP+HVX9CeCPAX9YVb9DVf/Pj6vOB7pG/xzws6r6i1T1Hwb+Yj7/vqr+YuA/B25SjbwD/DLg1wJ/TESW913ZA72aROSXAL8Z+A7g1wDfmX/6U8C/q6r/CPDDwH+Qz/8J4Heo6neQApcf6P7pX1XVXwL8UuD3iMiT2e/fBvxRVf12Vf0ycAT8NVX9duB7GPuupj+iqt+Z5/EKqJljo6q/DPg3qnt/O/BcVb+TNEb+NRH5ltf1gge6Rp/atfYTA3qAXwn8WVV9H0BVPwQ+D/wlEflh4N8Gvv1jrN+BXk0/DPyqvBP8LlV9ns//D/n7B0kTbh/996oaVfXvAP8f8Avut6oHuiV9F/DnVfVSVV+QxORHwENV/Z58zX8N/JPZhuSBqv7f+fx/++ar+6mk3yMifxP4XuDnkkBOTV9W1e+t/o7An8nHfxr4FXvK/KezlP2HSWtzvfbum8//LPBbRORvAN8HPNlTjwO9PvrUrrWfdJue/xT4T1T1L4jIdwN/8OOtzoFeRqr64yLyi0kSgf9QRP5y/mmbvwM3j9l5wKlDAKoDHegVlNfFfwb4x1X1UkT+CjDfuV+8opjJXMs7/z8K/FJV/SkR+YOzMvfNZwF+t6r+pbu+w4HuTp/mtfaTJOn534HfaKJZEXkMnAI/k3//rdW1Z8CDN1u9A72KRORzwKWq/mngDwG/+A63/0YRcSLyrcCXgB+7jzoe6M70fwC/QURWIvIA+HUkJvq0sqf7l4HvUdVnwJmI/GP5/G9+89X91NEp8DQDnl8A/PJb3OMA89L6l0hmBDUZwHlfRI6ra19Gfwn4XSLSAojIzxORo1vcd6CPQJ/mtfYTI+lR1b8tIv8R8D0iEoC/TpLs/FkReUoCRaYj/p+APyci/wJpd3Gw6/nGoF8I/CERiUAP/C7gz93y3p8Evh84AX6nqm7up4oHugup6g+JyJ8B/ibwHvAD+affSrIHWJNE5P9KPv/bgT+ex8D3AM850H3SXwR+p4j8KIl5fe8rrocEWn+ZiPwBUp/+pvpHVX0mIn+cZPj8LmOfv4z+S5I65Yey0fPXgN9w25c40J3pU7vWHtJQHOjvexKRPwn8z6p620l7oG9QEpFjVT3Px78P+Dmq+ns/5modqCIROVfV44+7Hgd68/RJWGs/MZKeAx3oQJ8I+rUi8u+R1qYvA7/t463OgQ50oE8SHSQ9BzrQgQ50oAMd6FNBnyRD5gMd6EAHOtCBDnSgG+kAeg50oAMd6EAHOtCngg6g50AHOtCBDnSgA30q6AB6DnSgAx3oQAc60KeC7uS9tew6PVqvUvxFVdB8mD+oIhrzNwgRFEQEEUkxN3UM32jFAESUmI81pabLf4yG1o4RpaVn2m9Svlz+y4mjazyNc6mqufAhRrYxEFQRQOze6lnOjXUW6jIFTz7vHOIaFOjDwC4EVBXViMZY6iMiKBBUUyIhe39NZXoRXKmCVu+WPpu+pw+hquTroUXX6NGyQyS1VekbVRQtzZ66uf5biaqTfgFI3Sv5eKyujQst99bl5nvtX7F7p68rTvDe45zDN57FosM3DSKCd45UfRtjkvrJ+3JOnOOmBrS+KHWpn1/aQgl9TwgDqpGhH4hhAMA7KWPFe4+4PFpyOeIczjc4l0buj/zI//u+ql5L0Pj1UtN47RZtHoPjzBgdFcaRrmrnlbqt03tUhVbtYkepTatyqllYk1QH9uTJlZOyx4ut+ef9JXt6UFzqa2Taz2mcxVLHMlZlLMk5yX1FGZ9Qtxc450r5zqVvVSUOgRiVzdWWfte/9rn51ltv6TvvvFPqtp9eFhT3piq99qruKb6Mslx1RaMSYizvYnNMqnad3LqP7upwM79c9hQ+u+YH//oPvfa5efrwSN/+pkf5XX169zxHbb1Nf0OMkWEIKInX2HiOqsSYrgsaCdH4lCvrSoypnVUjfd/TDwPk+8p6oGDt3jQtzjVpDnmfeAB2rWb+FMtzmsbnddXRNE2aPzJ+UhPXE9eeuX89itYGgMZY1XF6PWhac/LzUNLcLuPBxhSTuvzdv/OzN/blnUDP0XrNr/6nfgU6BOKmR0MkqLKLSkBx/YDbbpEQ8DHSxAGnSuMdjc8L2oRfCmgCBT3CLjfzjhQtSRWGYSCEAQEWCB1p4RmM+QqJ6Qm0Ihy1DQvvOFku+cLjJzxcrQhB6beBECIfbK74uy+e8aLfIjgcHhDiEBiGHo2RtvMslx7nhCXKSpQG4YFvOW0WeOfp1ie0RycE4MvPn/OTz57Rh4Fhc8GwuQSBdtnSLBoCcDYolwFCVHZ9ZBiUzsHj1nOU20ZEEUnAYIcQgO/98k/cpYtu35erjl/9y38+i67j+GhN2zaEkCZMmigQEmZl1w9stj0hRjbbLZebTV7EIkRFULxzNM4V5u+9B4UQIzEqISqX2x2bfiBG2EUYMjZ0kiadiOCbDBDypEeE4wfHfO7zn+fk4QmPHj/iH/i2L/Hw8UPatmG5XNA06Xlt0+KcY7lYsl6taZqGxjd0bZcW1wxzbQWwSRbUJmEGfXkBujg/4+rinH635el77/L8w68x9Dsunn/A5vwFjReOj5asli1t2/Lg9BHL1REqnugWRNewWK45ffJZlusU1uQf+vZf+uX76M+ua/i2n/fNhBjZhZDbPDJkMO6cp3EtIkLfD+y2O2KMGSwIguC8wztfmFABKzFCjAmkN2nhU5Q+RHpDsY1Q0HsMaeMDZaFXVWKI42IfJa0DImmTk+ex95kRAk6lLHguj62MQRCBbrlguVrivKNpWpq2BYS+37HbbROzDYEQUt5SyRsN5xxHD9Ys16u8SCagqjHS9zvCMOBEaJsW7x3Oe5pFh28bht3A5vySYdfzg997Pwng33nnHX7g+76PPBD3XKGk9FfGPCPXOLgIIrZFFKyRR+bHDGxUDEsVZdy4GTiewIYa1dafXFbUWPp7u91wfn7OMAwMfWC3tXW2Y7Vc4bxjtVpydLzOcz+theVdy46rboLrIMgA7sgPrZIV0yzljJt2I79evfa5+fY3PeI/+69+D1274PjoAW3TEvuBYdOjQ6Tf9ewut4Qh8Pzsgq99+IxdH1gtl6zXR4hz7HY9m+2OqMrlbsfFbgsI3WJJt0jBr/tNz267o+973n33K7z//vsMw8DV5ZbtNq3p/U4ZemjajgcPnrBcHdMtVjx88hlW6+MEzHwGoRogbBENnByv+dxnH7FeLTl5cMTbn33MctHhW0/btTiX1g7nHeIEJebNUJp/fRgKzw5RiTGy3WzZbDeEENhsrthur4gx0g87hmGgjG0U5z3L5ZK2a4khsN1c0ffbLIhIPLNpPOtlR9N6AH79r/r9N/blnUCPCgzOESXSayTGQIiRPkSiKtIPuD4gMdKIoK7DAb0GpA+A0gBeFSewcp6la9JuqmmhaQnAeQhchMCgylUMbEIayH2EwdAhaZALio8RR8Q3DSdtw4NFx4PlkserBaerJSFENm0gBGXnI8fbjkAkqKOPjoiwI3IZlBAincBWIt7BlshWA41AaBagSuM83a6jazoGhedXl3x4dUk/DAy7K4bdBueEo86zEiFqWoAaJ6CRqDt2MQBCjzKI4AU65/ACqkJDwrj+njZnIo6mXbJYLTk5fchy0aWdRmaSaUfgk7Rpu+Xs8ophCJxdXDIAQwhpQdbEDKVIztIO3BipOCEquBjxg4NBQBQVGJfzvMiqEIe+7Iq6boH3DQ+OT/jSt3wrP+dzn+Pxk8d828//Nh49epwkQM6V78Ynpt42LV3XlZ26c37/JjIvrCahi6psrjZcbTb0ux2Xz1/w3le/wm6z4YOv/gzP3n8PjQMybJCwQ1qHLKFBaURoJNI4RUUZACI04lm2K46W9xvLTRViH4no2LDGBGJEnE8LkzhijGWHWFNURWNABLxL4LNutwQPBKegkoCIz0hCOo+0Lu0ue4ghS1ryqFCFELScFwOfItgglyIhI22GMuh2gObqOi/4NvV5u2zpVh3OuzR28i5xGAJDPyTprsYifY67gPYB5z0sFvhVGhu+XeCaFo2R5vKKoFs0RoarHbsQwDnE+4TKlDTu43Vp52sjAztFIlBLSCgMxa7dF3bE2jLtM6fSlyL4sh21SeMKJkjzYSxsBnoEROu/rV6RLNzHpP35B2KMhCHw4vlzvvrV99hutnTdgtVyRdM0fOazb/G55nMsug6pRfrsb+fJO1fHRYBZtYRJC8YSde/xfZBJVdqmoXXpE0RBB6LCMET6zY6hH7g8u+TZ0xds+4HtuqcPaQPZh0A/pHU5qNA0izQMVRKABOJgmgbFicN7n469K9Kl3W7HZtMjVxsuL3qc61iujhkGz/EpdIslxyeP6BYrNA4oG4gDbXvMyfFnODlecXpyxFuPHrNaLhCfQJJIWvPJwodIWgNRZYiRYUggKKoQkLSh5Iw4OIIObMOWYRsS/+kDQxhSv2SG4jXdQ9aihGFg6IfctoLPEh7XeJr21ZDmzsEJlZSJLJBFbfnFYoy4LKayHVV0Lk+AWBagNBDSPHICC/JC6j2ubYm5/F4VFyO76tm2Gy/znazyyidFlc47lo1n2XgW3rPwjkGEiDC4SNt7Wu9ovYco9JrUXwGhj2nXT4xIlHRMwGkgoCzEs4tN4ilxgDAQFLbDwHYY6MNAHwJDDDgcnSoLBC1qF0eS0CUgEDN/iiguQQR8Vof5ste+H9QjgPMe3zR0XcdiuSTGSGt9KA5xCTWLd/RRGYaB/5+3N+tzHDmyPf/mGxYy1sxaJc10q59u3/n+n2XuS/fMbXVLqsqsjI0kAN/mwRwgIytLUk0rhPpFRSSDQYJwuPuxY8eOzTFuE0o/jE4oPlucLylsu72pbG9eOQdi+vnPYyz1nA41xuC95/r6hnfv3nN/f8+7+6+5v797/VmMxbY0kjUW19Jfa4ryyxdhvZnqBnxyyizzQgLiEjm+HJinI4fnZw7PT1ALQRJOMhiL1IzeXQUjFSN1SxrpPBCcdXgX/g6j9pePcskKbHvieVG/pKNXOnh9chVY03mCYKjna7emkLjIEqxrkuhiZ6xBrIKemg2y5S9lS2Wui/J2PqzU7+W56G/03NuWtQ5TOwGx5hxdOqX4a5FGMCnIq6W2NHNjQWql5kyJSTf7UjcQZ63FOk/NmWosIpaMArcck24qoqBHAGcMVmS7xm9ybOxGbfNRLq7dGvjVi3+dj8srWS/+dQl4Lv9OKm2TWh8/v8967UUuxQQ6blVEuaNXoOMzBmUdu6K/W5aF5+dnTscTIXQs84Jzjt1+R06Z4ooGwevYf349fnaZPvtAP3vaeuP8/FdvDXjWw7QAwbR0bBWja0NF79OUySkTo7I18xKx1hO6RN4Y27J9PGXwWiBRi45rKefPL5q+vUxBAeSSSSlRa6bOFepMzsLpNOO7CBKoeMT0CIkilSoWYzuCH+jDSB9Ghm5k6DsFv7atCW3Tr+0+WYG55Iqg51hEsGjQNdsFK1O774SalQEquWyBkRKwQjWX+0vd0n0iFapmata95kvB3OfHrwM9Ff0QuVKybtq5KDuiUSKsUyJVqEnZHVKkpgWoWO/onMcZw+gcO+exYhDnEWPIwGItqVaiwGKEBQVZsVZSXm9VHUhrIFhLsI7gNZ3hjEUwzLlwiMoYHYt+n6RivCVUR0m6MOZSybUSq5AbK+Otw1iDD5bd4PFG2Hcdu27AGYNxPdb3unjOE/VwoAhQvG6EIlQMJWu6KqbCkgup0XvrJ7AieGu376ExIzkrAHizQwTnPV3XM+52jMOgt2k7t1zKNq4uWZwz1GpaSimQkqXkrNqWNTJtqQ7VO5l2y7TNoWkopOWOV4CACFaUkZH1Z2PwPnB9fU3fD7y7u+Pu5pqbqyt244gzhoudlFVzVFsUIbaSRReHbWOGVwvANi1rZZlPLNOJnDMvz4+8PD2xLDOnl58oywukGSuZ4BXkOZSRs97hQ0foe5wPGOcQY6m0TRjRzy/r5367o6L38BfXfXRxLTlTRe+/n+8Nr6PlQiHV3LIbBSnr9imNwr4ArlV0HuQWfCBNI7BlulrQw/oKDVDJBmRef5YVj64pMNUsQNWIKOminkvRNaftjlsmpkBJhVoLRnSObeeQMqaCXSJ2XjCuYJ3HWWV6ak5IjDoBl5m8LIhxuN5hnNdgjboB2jc9LkDiazanRdLwai2Ehk/XH14dr/Vaeo3r58/YXrG+emZtALQxcWvAs733xfuvr31xQqoJcdRa6bqe3bjDiCUuC58+faJWCCFwfX3FMAwMY89uNyp78DlQ3sD5LxwXdNSZOZTX12O9X1qA+bboVe/dkgolZkox5CWSlkiOielw4unpmWVeOB1P5NwAQoU2nYi5sKSk97Ax1C0FqHuVCBgLiKadvV+ZbkvJghFHiomYKrXonrQslRQTKU08Pz+Qa2VcZrpxj7GOvvO8++o9ffDcXw+8f3fL1dgxDp0y6rT3lXZBpW5EqPL2dVtHpOoeYMQi4hBTcCbgjAcD3ni88VgM4tj2iCpQjUonBMPG/hZdV6w1GOOw1mHtGqT/9Tn5K0FPxS6FnAolV1KGlCpLyuRSsbXiUTQbcyanpAtJXCjLhKmV4WrPuOvoneM2BO59hxWhiDIiq8DYCixZSMawoPnAU84cY2nUl0FENrUAACAASURBVM7uDsv10DN2gdFaeh/orEfEcIiFuUYihRcyUSoLBdt5RmcoU6QsE6kkllKYC8QiIIHOD1Rn6d9d8+7be7rg2fcd1+OAESEXpeuJCVlmysszJVodf7MuH5YUFZnOS+EU9WbIDViYBnQ65zYQGKwl5cpEboj3bRZWYwx9PzLurri9vedqv9vWBgGmeeJwOBBTIpdEWBzGQC6dUtU5sywLyzy3qDpTG0I3l4JepPE4BYwDo5G2oWJFI1hvDbaJ8oILOGsZhpFvv/2Wq+sbvv76G37z7Xd89/U3DMNAZx2Sy/l9qWCavkikRfpFWQFZQccabSkHW2uhoinap8cHHj7+QIwLz58+8PzwkZQWDk8PpOMTJUU8M0Onn0knvGoS+t3IuN/rxhk6xDqoCnpyA2JiHMb6NxnH9agVYs4r7bU9rrG4CntjVPH1JQDRB9bXOKdNaq5kaeNZdRQ1ALesuYciCtBF9PmrDsSioLeUSim5AZBKrWf20orVTY01Oqxn0LLuqyuAoVIaXV4oZBT0eN+ASlttTQNJdWVoSmkLooVakVjJc8TajD2dcN5ivMNbj7fK9MiykOcZSRGOR/I84UJPN+4IvQY5JbXU2VtjnnVMLgEp9ees2AbqdU3ZHr98zvqTWccAaKwu50cuftKBqLW0VFfb2Kqu06/PYfv1xs6tJySAs5bOB6wx7Hc77u/umYaZP/7XH/nDf/wn0zSxLAvWWna7ka++/oq+7zHGKyuxsV2fET6fpf3kFcBZAcHKGl7+oWwnqev02w1krZBjJtdElogxlTgtzC8nUkw8Pzzy459+4HSamLLqPQsKelbd6pwzp0U1PWK9BldicM5ogC+ajraAtZZhGJjniVIKwXeaQlsS4PF2Ii6Jp/TCUmbmqfDhx//CfPrA1c07wrBDjGU/fsW//P73fP3Ve/ad4esrzxAsRirONJb0kjk2gNVrXGoTVregMq/7ggkY6ym1EuxC52Ysls71JNeRq85XV5SdzRSKqBTGiFnTIkq4ZN3DrO0IocO5xvKYvz6Wvwr0SNtbpLTIseig5KIfckX5Ba1WilnTXjW1XHqjQp21utlbR+edUl5tycy1EowhVI3mnYBtr6liqFUroAu3t7qROWuxzurm2W76mAuZzCKVyRSWqpoHsSpfNibrxrem6SrkKhQM1TiwDtcPdFfX9H2gH3q6ccAaYVkSLAkTI9IFqnU62tYi2bZz1AhYmZvaKMq2wLexMdKqjYzmYZ1RtC6pvC07IJpSdM4RukDoOj2XlosqFKZ5otRypmaNili9V5F3LZmczLZplYtFaK2kqnWtZmoJ34s0ScMf2+saY/DO4J2j6zzj2HO1H9nvRsahZ+h7gve65TZmSZX/RTdfNPRYeYz1HJCL9MCqc6gKBEopyuocDyzLzPHwzPHlkZwicT5S8wwlI1JwVs5RrKhGxnqH8x5jleVZF9xNy7KyGn8D7frfPUo7u/X6rsv8qp95VdH0+bGlU9ZvbaPhdfpxfV5df1yj5aLgZUudrZvna2Z6i76lpXvP8+GShlg33DP7sFV9FGV7al0rU2gp9dc0Rm3VLBhNYVF1HkqpGCmYXDA5YY2oJrClxWwpkLXogZSoKYHLGCM45zQQaffcG2OeV8crHcpnm/3688rEXbIacr4JuGRlqv7Rzz/DBgaEz9klqugYf/H+ufjzCyaotnMzxmCrbXq7biNqD4cjx+OBl+cXXl5eqLVyc3PzGuhtbNf6Hn+BmVnPXy7m/3b+l8+Tz//kjQ6lbWqu1Kx7TWnprBwTyxxVR3iaiAgVyyp/WOdNLrp3lKr3LnW9BmfQYURU6mHbPuJc08EYjFG5QggLcVEQqWRKIedImo/UZcGFnmWZSSlhDFxdXfHu/p7Rw34QekcLbmO7/7UU6NX13i51m/C1bmNtMBhRtaoxthWwWKzRLymi2j1ZwbjKBtZAXK+HbGsKVfQ1jGvby5cA/8+PXwd6RPBiSFWoGXJqpXQFKHKeWIJWsEimGsE4j606nH3o2PmOvqW5vHEYaWV5bZHtrKUIeDGcfCB2hTllDkvBlKjxpGjVR6laCp6NEIFTKUjj1EupIIZI5WgySRod3ijEY0pMqTBn/VpyIeVKqCh48Z7x5pavf/tbdruB/W7ger9roCcyz5F5XvhxStw8HpimmemxMs2L6gasKB1ZKkUKuVF2RgzWiOqPrGW0CtacwiRFts42ZfrbTMlaKzFG5mXmdDxp5ZVRylAETscTLy8HlrhwOE48vxxIKW/0q4jQhY7gwzYJpTEFpdQt17ykQswVpGg1lfMtCkiQC0YMQ/AE7/HWcrUb6UNgGAZuBs8uCL4uHD79wEdJWOd46nusdZSSSTE2Ya7qMkRUdGq9iprX2xOg6wf6YWzl74K1ygo9f/rI4fmBHBcoC33vKBmkeqR6chKmGWJS8Vw/9ITQ0XWeYdgzDDsqyuwsS6KK02opG/A+4HxolUVvf6yA51K/U4GL/2lK92IcV+AdvKP3Wo3hrcPbpotqzBw0rYDoNZ2S2j8oFa9FDeu+WlsEZFoqsFKppm7v+fmGvYLJsyBegbCgG21ZhYJtxZMCNWbitFCsxWB1Aa5QU0WKRapBqkXQ6sBh6AheA6X7uytur3dgLNkHMmpBYccRGxxTihRbkaPF9r2OeR/aOfZIBes2tdqbHxs8uVzXLxiejb34GQNzgX5fvd4XHpYvPvrzv7wYr9rmPXBGwvXyqZpyjnEhxsTxeOT56YnTNHE6nhqQMRyOR/70pz8zDAP90PPV119RqwZGq6D+nLz9+WeU9fsl2/PqabIBx7daU794VBT01EKskSKFZZqZThNpiaSYNAA1eo9asQrSRa1QpBaWFFlypFSUJHB20zoG7/Xa5Kxi5lJxzjEMg5ISoWq17JKYjoll1spO6yzGNTasVWyNveF6dNzuPdejZ98ZRi90bq01qOSSifNMyRmzCZm1uGAtMCgrG1Mr03Hm+WWi1IoNHS50AMRlUV2cMTgxWDEanBZRNlFROWvwk2JBpGmHxeOd4J1DxCNopXC5XOz/wvHrQA/QiyWSKamSlqJFBrlNLCMqshGoBopx1FrwTuiNwwtc9Ttu+oHBOUZrCc1PpZRMKRnXLkTAEpv+xYhlipHn08Jjo8qygWI0HZYEogGk8lQzcy6kAqc0kUolS2WRTJaCFYMTfc/DEjksiTlmjkvhuGRSUdBTfED6npuvv+Gf/8e/cnN7zdX1jpubPcYaLbk7zZxOE58S/PB04vBy5ENMHB6foUK2hmIsRSrJZGLzk/HOEoxhcIa9c1x73zaqJvoSwXpLcebni9jf6ailcJpOhKPl+fmRkiPWShMAw9PLCx8/PTAvC8fTicdnBT3OrSXghmHsGfteWapWQQWVaZ60HLEUDqeF4xxJqegkTSq4FdDKAGvZ9z37oaMLnvd3N1zvRhUvX3X0veDqkU9//DdePvzhvPGhdgbTpOWexjqs6xAxWN9STcYQc2ZuYtTrmzvu332FC16BVR8QKi/PP/Hy9JFaC0OwXO8CtXq8TVibiIvh6aUyxwVnWxn6zR1dUIH1fj+SUuHp5cRpXjBOCKPHh5GuH+m6nq6Vlr7dcQ611wDXGE1BrTF3bVqeXDKxVTc5p+kfawzXQ8/76yuCc+z6jn2vqVyzVkCh8zFXTdE+ThMv00IshZdp5rg0JvMC9KhGS1kDtUBY046Xm/XKUdSmHao4A8G3ajOUAQZl52pWL5M8J2aZlOkVhzNO2YgIknVpM9Uh1eEMvLt2vOtGgrN8fXfD/fUVuQqfJuFpUcH5zV1g5+G4RAgOczhgQke42WOHEWctve9w1r45kFUMczH/15/PhM3PH7+kOi6Pv6YP/FmUXF+/3mcM0iUzWNcyu8s/A1YrgJwL02linmeeHh/58OOPHI8nnp+fWYsmHh+fOBwOeO/p+o7vv/+eUgp939H3QXVkW6rt81O/ANEr6Lk4i4tat1eA7R9xCCAZSsnMZUKqMJ8mDs8H4qJBM2KwzlGNpRpHFaEaYU6RKjDFmVOcqYCXDtepdrULHX3QiuL5eGIpC7WWrXJVD9XDTNPMdIzMs9o3+M7hogIt51VecL2zfHUb+Pau45ubwN3OcjNohaa1BaSSU+R4OJJi1CDZ6bUPnacftZKyFCFlBSEvTwd+/PGBlDOu6/B9hzFC5yG0lJRrwEdQ65tcmjdPY3MqKqcoNQGClYALPcZZrOkAxRk5p43l/0vHrwQ9olVGF9RbrXUrkDjfcG24RfUcYkRLeo201JbDW4sz9lwWW88RhFsHyxSC1SqsUipOlEyrr27btfxZyKJVX1IhFdUALVl1AMlkslSsGILRtFIsKixev3L72pgkY3Bdx3h1xe76mv3Nnv3dtaLsacJ1E7brGPd7un4gxYKxtlFvLS6RtXpFPYiMnKNrK3qzuS0qqVvAZjhXe73FUeu5jDSmRIwLpejV3bxcloVl0WqCZYmk3ATaTm8sa43mU43ml33zb9F0RFYjyJixMVMtDRzpZDRNvKy6JoN3ls47hs4z9kFTXE7wBqQm4vRCnE1La2RKraSo1HBKWQVtrkMa4+O6DmMMS86cllbWmRW0+dCR+w7JPSKwnE6kZQYKhG4TXVprsNaQWp64lEIxFWsdIXT44HHOK+tUs0YkKeNEF3JrnVbIrb5Fb3qsaaXLBV3n1Lkw56yb2bQitbbqEiE4x9h1dN5xPQyqXzOCKQVp2orU9FypFGLJpFywOTMbg1XIvi0H7Qw02mfdwFeA81pUjtBEwu3+bylfY0xb/C5SXGtqqaApgwLFnPUpVF2pLlYsZSa9YddD5y1XQ8f10JGKcEqVY6x4A0Ow7HsDVuj7QIgREzzOO01lWkfXh80T6s0OkQ1YfBH4wOVAb4/Xz55fL578Ny0lcvFD/dmDX37tlo78Ytqr/T6v901MzPPMNE2kGFm9WBQATDjnOB1ParAXE34tQa4XX1S+uDBe7j9yvue/eEqffdQ3A0FKclNLoWSgoCXX7Ss37SGtyMOYpm9F73NlUYuK9tvjxpiWxtIvGnu6zhFl6x3b/S+GnOvmgWaM3eQKxgjOKuvtvaHzliFYOm8Itq2/LSBfg56UIjFGtJ5E9yjrhFqc7sVFWjWliqXnaSamjK+aENI9zyJO5QBNptyqAXUMtfpTOPumQcm1mSmqgNmIFiytUVapgpRfugnPx6/z6amVWDJ5Q9v6Zqu+R2omohUTpYlKjQid91wHT2cNu91I6NRQbk13iOiNICk14VNCcsFUobOOnVfh4+0wsmTV9RxrYabivcN4D94pu2QNWYRsz3lBMK3aoZArTClDhVPMzKmyZFq5eqviSpHTdKKawvF04OV4wHcO3zl2aTx7IDQdTtd17Hd7KBVnPalo/naSpHlTtBpqHdJV3Z4pLDkxJV3kg20ACDVhXDeAtzzUYTiyNE1NzurTMs8zMSZiTOS0qWRIqTDVWcXaFaQqkL0aB8ZOmYHghKt9R86ZoTvxfDixxKRaiWUmUxDJiCSCVIJkOskESQQSnoirBRJUllby38o0WauPKmlJxOnUyjCFUgy1qsGhDVoNGFNmWu+r6YV4esH7wM3NFeX2FussQmLs1J3aihCXRM6Zx4dnHh5+IsbEPC1aCt90V94oA3I8HIjziZQLz8eFaUl04rkOgd1+zziOqvl5Y02PAgXzKlBXLKFpYCumiamF3lXMoABo33muB493lm/fveO3X3+1ifb3Q4cB8nIkzSdqUZ1eSoVUC/c7z2GJxJx5OM48TwsxV57nhVNU340lZlIqmDU4acHR5mItCjaUJFZRvxWD9x1h2GGtI1dlegrNtTZrJYvq4HRVNmKxotUbzhacU/Yh2EJnK8HC9Wi5vzL03vHu9pp3d3ekamBv6BaNaG9cZHQJOzuu54VTrRAC9mrADD3OOobQ45sr7RsPKK/Ax198u3WGnsHSq1/VJhWXM/wRdKPZXvoCOG3YoXkgba8tl++24owLwPUFpmkV1zpn+fTpgWmaOLy88Pj4yE8fPzLPC86rdQZVeHp64U//9QPj7oWh14ohLQ5ROC0Cznt1BraGfhjo+p4N1F8AxnN6bwXEl1TUxYd5w6Fs8hlyzNRcVcczzcSlOSeLwVi2faIA2RQyRrkAI4ShU0alGfCt7u/raTtnqSGwdQVo8oKSNb21CtxXt2Pbgj/vHPvdSBcC7+6u+ebumm/vrrjfD/QGbImUWlhKotTK09MLP/zwgel0ou8D+32PcxbIGKO+QDmfmZ64xK1wIsVIpunjTKBznrUUy7RhUABkFOilTMxnIfu6nZdcESmI0WpuBYDgh4r7G8jXXwV6ClX9aEpBBZoGatFyvFwUdOR2Uxn17TAi7HY73t/c0HvLTXB0nSMYg+8CvusUj8aFvKg+w04TOc8ghsEFnK0MKROvKsF5llL4aZl5zlGjrxCgC1SB7LSkNuW1nDW3G0CrR3IpLLP6H0xz4RgrMcGcIDVB8xwXno/PxDzz+PTI4+MjxoLvPFfLHu+9Gu41n5vdOHJ3c4MTLbWOSQeslMyS9GZLnMuyK5CoxFo4pcQLjdEyAS9WPaKrOjK/5VYp6CYyz7OmFuTsc3A8TkzTogxPytSqrN2SEtM8Q1XQkeaId47BW3p/hfeOrtvR98rOPTw+8/h8YJ4X6jITDy8kMtYknES8FEaJalQpQi8LHVbdeJeJktby6Pa9rjVCLTo8KOiJS+Z0XMhtEhinTsAxZ5akFV6P3cCHfo9znq+/+Zry/fd0XeD29obbuxuMEeZlYZ5mlmXh44+f+K8//lH9i6xG+d56HSunfhNPDw/My6wu2kVIVcB2dP3Azd0tXT8Q+g7rfrUl1q8cTMEZy1lQ3uid5mpsrSM4LWMdneG6c3gjvNsFvrvp6L3lN99/xz/97nf0XWDsHGPnkVo4PX3k+PSTUvRLZWkViVOqLM2V+dNh4WmKnJbEf/70xMfnE0vOPNeJU2PmSq0robt5iAQDe6/u5L13XA97gnO4fk+4usf6rtlVtAKJVIipnG3sN6GkbB85pUqKzbeLicBE5+D9VeC7O0/fBb755h3v339NxrErHS85YGqijw+EfKCfFx5KJjoDIcDdHhl6nHEMvsMbr6aIbzKWbKBBjPlMULwePwc28qXHYbtGepUugLF8gf1ZU1qy/v4LLVzquTKnwhYAvWb6z891znF1tSfnzJ//9GcOhxceHn7i44cP/PGP/8k8zeyvrrm9vacW4acPD/zbv/1v+q6j7zxD5xobCdZosDnsdgzjgAue+/fv8CHofiRfrsXajBhfXaL65XP+Ox6CmuulXEhzpKTMfJo4Hk8sy0JBoLkZ15wb+6MmhKlVyhnvGUKHMZZu6HGdxza2Zj117zy2bzXjF27d8xxZltTSg6tdSsX5QD9U+i7w7v6Wq3Hk2/fv+T+/ued339yz3+3Z2YpLs7LlswY3Hz9+4v/9jz9weH7m9nbPt1+/UwuTHCgkBT0JYlR2Zp6mraggLjNp1nPxpqd3va7qJbMWm6+gp1RIS2Wa1Z1ZTNO3VmXAagOvVSII+GDYmdCC4798/OqVODfn2ldTq6W6KCo4XG8jafS0846uV/Gyd6YJsXQwTVBxYFnz+aWcy3rQ3ka1QrXQWcfgPKZkfE64Vlkkxmx50GqEYtBN2qDRUmmMFFrBkZq30KuUVqvuqDSPmpRITpTKS7ExHklTHOVM4K9AYW15IC2dtVa10TQMq0GhsO1Fyva0lMFqSrhOFcNfT8P/d45NU9EmQl5TV+1Nc9aS+VLWDUb/rjSTQkC9H0xU+rpZ/RsB72xzeNZJtSwq7A7O4BqkL8111RtwRrsYWKkY1F17LVdedZHl4nqsOfqaIyVHSk5abbXM5FSUAnVayply0WgBdQMuSZ27p/3AMh0wZGoesa3VQa2VnDIpZbV/n7Qk3/QG79ZkiZ5MLYUlRqZpomLI4ijNxdo4i/e+9bgx583krcZz+35mE3WMTSMNLM46ZVO8ZeyUeb0ePLdjYAiO+/3A/fWOvtN/j8FCzfgUsNGSE3hTWYzOleC1lUjMhVqbi7aLPBwCxzliozA7Q8qmLeTreZ2N05yBYITOVnpnGL3T8uYuEPoe4zty1QpOBTQFE8u2gK9GZqVempe2BakWXLW42t7HKn3fe0cfPF2nrULGOlJqh+RImE/4ZSGUQvDq/VW9pToLXrVPa5Xom/v0rIzF52/z89379VP+wrqxSgP+4plfAp8vvP4afLwSSb9KN12mwHS8V+NQY1qg3NIkyzwzzzN9f15blyVyPJzUnXdx5GaXobrBVmzRUjy1rT3r6dDO7VxJdHGNPt+3LtKrb7nWbkzbhfleztpnizXt3c5F96FCqVr5umJJa21LlbeU79pWYyXWjGHF4Apsa2szk1+nJFsqUvsYOpxzdCHQdUHnfecZg6dzynzK6oKcdU2MMTJP6lo/zIGUEznb5pIckWJIEWJUhkkD5vbepW6p6VJWfZbO1fP6tT3U9iV9jqmrZUarGi+rfKTtBmLJuZDz3xn0FCoThYXSaui1jr5u3JS6pYqBfujZ7fc473h/f8dX33zNEDy33nETNMr0ISilSWU5HFkORyRlLe+tIKVgc6sOMAbvFoI1lJZCW8tWY0yUxeqkqKpGr7lszUELEGPeov7TnEipsKRKTPVsj5+LToxctGxvEaZ55nA44oNnf9ozT8vW2I1SWWb1qlmWiSVOpJyVjm953IT2NzJele5VoFpDtYYscKxq6hYqMM1MNsLqhiy/FOX9fQ7T3mMtidx6XkkDjejNl1NWlJ6Llqk30BcRplpJ1vD09MgHpz2g3t1fEdyV6ihs5XoMdBaO1wN5GlWTkztKTlhr2PWBPriWOirUvDSgs+ZoWnpLVhfStUost+qOtRFeMyM00nCzuu124lqWwFBJSKks0wsPnz60+y9RyRhjOJ1OWj6aEst0IhhLNSqsFTI5Lfz0048cDo9anZYVNLvQcX3/jmF3w9XNLVc394z7603z84841tJ4b13rW2YQ8SCG3gf2XY+3lnc9fLcTeifcjY73e0fnLe+uBsZhaDqtQpXmBEZGakLIWCN416Z7abl2Z/B+4PbKsKTC1W7P45SYY+LT0wsv00TMKnaeWyPENRrtbeUuaAqqDwNX4zXBdxQ3kEOgWMdSBIoanorTcls1eD/3d4oxUpZFH5sXyrxALVQ5ARNSDM50dH2vZpJdjw0dxgZ24RrvdpAWeJoRE+kQeh8Y7EwSwykmltOspqWdpdoVQLzl8UvvcMHkbf++/PGX8jXnVM8Zq3zheZVNiP7ll1FovQKNy3TZlgK7OCVtDKup55vbG/7597/n/t07jLX86U9/1s1S2qbYRM8Pnx7VCNU7TkFTiV2wdG2dCKGjDC0APRcNXnyGz8DiqxTXeg20UujtNQRQc20gLhEX7UGWUtFgXCwYrXQ9zVGD4BC0AtUanAtY65uWxW6A55U5oTNaSbV9wNr6WkUtj89nkGGtMO5G7SvZBW6u91yPWkziTUXKQk2WOJ8woFXNMZGzus6PYw81M44D3uu5TXPk8eWg/RmnyPG0UAr4MBK6PcZ5xtBhvcEY6F3b11twQtW2RnFJnJZEjJmHxxceD1rhpw2C236Um0eewNrnqxscsY4M41/Pb/1K0AOnBnoSRf+TujlBy5rSMsKw3/H+63f0fc93X3/F97/9nqEPXIXAdR8UoFiHcVbdXB8eyY9PsCw6cZKuqDZlXMqIyXR+povKBSjoKdQizDEhok1NXdFSPGprUWGElFrJXtRKrcNpUQ+fDHFRRFo2R1moSW9OAU6nieeXZ4wzXF3tmU4ndbZtuchlXpimI/N0Yp4mYoyNPdL+VNrLSAhNMGbEUKwg3hJr5SVnllrwaEVNL6pVCMHjrH0zV+Y1+lpFblEK1uqNZdRzdguYc8oqGG6dxdHhZk6Z3EoPbY2k+YUuOCxfse+1M2/nDGPoSJ2hTCO+LFBrAyurdKG5MwNSCyVNCnoayyTGtHPV65Fq2QCvEdQtOqvHj4558+cTcN4SglZ/LEtimhKlJKbDIx9rxDrHshyZ56OCnuPEdJq17D4nOqe+Etag7Uhi5Ifnn5jnqTn1XmHDyP7K8+31O7757nfs9lfc3n/F/vq2iRP/MaAHVBfT9yPOOozr8N2IGMdVCNyPA8FafrMr/MtNYudh9MK+V13W7nbHfr/HuAB5gnJqqWG1QTZknBGM12iqK9IYHoPrdrgwULAcS2CpjjlGPnx64PlwZIqRn55fOEwztSZymqklMdjK+64w2Irvdoy7e5zrORXPU+5YquWYIaVVkK2BjXpglW0u1tIYvZQo04F8eFHxtZkRu2Cqw5lb+nGk7wf8MGK7EfEd4foO2d1Rl4lFJhKRjGUXXtj7mUngZYmcUiI6LZHN/o0d01m36NfAZyNhLgHPymCsE5YVfLyKn7fnb4HUBeD5Gfj50ptePtRccy8F6a+etqbTqmhPpvaL+/fv+Nf/+T9aAULif/3f/4vpdMKIUHImkzg8H6jFbKxk3xi2/diz33V47+mHgXGXKVbpYM30fJ7yu2CjtrGq27kpuPsLAuy/x9H29JwLaU7EORJnTcenJYMX3T+BOWm1ayyF0XqC63DeYXzTvjYN6drOYnW414pgjxffrrJ6wceYeOGgKbNmFAxFNVa7EdcFxuB5d7fneuy42fUEk5F8oiaYTy+UnIiNzU21Ykxlvx8IXo0kfeiw3vH4cuC//vxn5mni6eXEw+MzpcB33/+O3/4fV3QhcHU9cnW916BleqbMz833St0GS84s88TxODMtkY8/PfDjw4sGWKat/6USG4hrvA9VCsMQmPPCuOv+6pD8+vSW1HNDMYGt0ZYIXHRbXW/MYegZdgPDbqTvA10IhF7dOaVRdrUUzDxjpllvwuDBWciqxhajaa81JXM5PyvNhKwUzQXaQi3NWEfq8QAAIABJREFUMKmuhonNO2alFYsKoM7mZrC50VbOxoultp4oiRhj6/iuXZtrGyStfErEFElJO5Gvr7c6U6rKva5nu+70OhkavhMqsdF4DrClNPfUt5uRazWNAoz1PNl6Yr3q08TFsifnIaedYS4qiDZS23VKCkgaJV2tEJylD1rdtnbT1t49WaPyWlvbgrLRmysdKlIxrUKnfpZy285JpEVB5/vEyGp8KFqFtTr75sSyzNicmKYTp5OCnmlemJelEUzKFAlnILXS6csSte9MUJCIsfjQ0w8joR9wLQJaNQNvvD9uKQcRUbMvq/bszgWMdXgf6Lynd5YhZPZdZecqQzAMQVqXerOlILRHZ2M02yJapW4eIqzjX3Uc+6Aml0UcTnqSdMzLQooRK8K0RHJRpqiWRIqVWoTBVvYr6AmeoQ84F5BsmRcL2RCr+oScz0S/yvlUkObOTcmQE+RlA2siCUq718RslTJraxTrtDKrVEe2ltxS5iKtmKCyrRsgpJzbvfjWI7ruz2fW4jX58supmyZb3p79SyvJq1TQq5esryf4+aXO7/n5z58/UDc+ZXst7z3jbte8ZFQEa9qc3fzMshr3VVuwaPPeanXe6Zpdt/n5Ko21VjBdnMPqIbRqmtbruV2f+hetDv/bxzovqWvPqFVfuqZi23WqK4u9uhlLS2O1CufmLn0uzX/9Puc+g+2/tl7q2rPqr+r2t9Zq+t17p19OpSdGG4Lp/lZyKxVvF7Cu8gUHtbSxW6vDCtOkFi6Hw5Hn5wOlwt08b6aoxrau7AIxmk2byXYdzlKLnDJLTMxL1KE1ugcoq6usk9pj6KpgrAa1a5f1v3T8uuotGimtvtBgm9BNRSz048j1zRUhBL797ht+/y//zLgbeXd3yzdfvSMET+8cg3et8ZpOhloK1XtM35OWhdRaHJSYKIcTJU8swFQKx5w4lcRcC8vK1hYgVarRBqUrY2Pa1VyW1L5yY3iaZ0g5G1apME8XslwSy1IoNfHy8sKnjx/JcWE/dLzc6eebpkm9J6aZ//zDH/iP//0HDocjDw+PLDE2o6jWaBVdNE0urYeagkNr6laW3qCHnrFpKcO3pF3bhq46ltZcNVfEtD5LxrIb9/R9oet7+mHQVEJzsBaBYD3BeS07NxVvK85IU/k/46yh7zxd0HYBIpW+79sEPFcY5Nw8ZGql5LptuLmUBsaUbTBGF45UFHiXFbxWSBlW9+eLOUqphpR140pZyEXTecsUyZMagx3nxOPzAWstwXuCD1hr2V9fsb+64tzKQqP765iIKWNdYLx+T7+7Zhj3fP+733P//mtC6PFh0GW1Cl9MIfy9jwttg7OB4DuM9c3szBCI7FgYK4yl4FNqbud7jL/WLuPGkbNWa5T5hTo/UkukpkmNMo00ek7TosYEjHjEOrrxjtDtqWJxpqOIp88ZGzpu5oWYIvcv1+rynWbm4wM5nfBSGW3BSyX0e3bXtzg/cFwK7pBZUuVpTtQSmVNhSpXjonM4zYv6NOXMMh2Jx5fWSuJIWI4IBU/VeVbUXPJlTiSTGGNmSAVDwp0OZAwlLsRFU9QxZaY5c5wyLznzaZ55SIkQOuK10PdrtPl2xwrvNuDz2Xhv389it4s//iw6vERNl69xARRWYLTBrXoBsS7okNcA57OKqZVFYQ30GvBpCvaVZfDBs7+64ub6imWaqEWI85Ekhj54+s7hnWccB/aj+iNdXY1cXamH1937e27v7/DeEYLTruAVSs0qdGUNMdeS57L1FVyBx3bub3zoOkSTRejFc9ZTvVCdozpPNQbrMs53YAo+9IRuxDm3VbNs5dwtkCqlEGvagM6qnwrB4Zwll3yRAmrrLQURi/eGvtMeW2Mf2A8qMZCaKWkmixCXSfcFa/E+4I3B2YHgRTWgRt2QKzAtkYeHZ55fXvj0+MyPH36iFBiubnj/8kyqhaGxMGtazqz7/wZ2VFayLJF5iZwmNcatVVorCNs0ly2tKa3pqFQwkZfDRK75r47H/w+mR3XBatHYNgN0go43I1/95hvGceCf/vmf+Nf/639ydb1nN45cX+1wzqo54Op7UluTxFKwVzv87RVpWpoh3gTzQkyZOM3MCMdaeC6RqWROVOb1jtVGJRSpTLlqBCioIFYq8wZ6IjFDUjPgDWGvEUDzTKYmrZyKyfD8+MSPf/6B6XhkPwYe3ylV9/j4zMPDE6fjxL//+//Dv/3bvzOdZh5+emKZW5leE+RWVDckKbfLpukEQ2u8afT6rWh8bbS2dqx9i2OlRNfLRzOOqzE3/wbHft+rJqoWcsnt5tReXABD37PrR0SEvEzk5QQ1U2rk4fEBI7AbOsaua1GIYRwV9MxxITafjlwrKZ3FqbVFc7k1hBVTcUUwprzqSVMa7VoqlKQAZ71eGuNALkZrQUslZiEW7Ql1nBeOk0Yh5tMTxqrD7lfv3/HVV++wwXJ9d8O333+HMZZSNa2KGMQGxARc6Li5/4bd1S0+dNzcvWcY942G9qzNG2r9Qvrg73zoPZahCs4Fgh8R0xr8YeiYuKrP7GpiVyohFRyCHTtMNyK+p1pLSrNev9MT+fgRSqKWBasNuBBrEadUsw07rBsx1hPGe3x3A2Ko1oNRr6Pr2ztl5lJkOb2Ql5m0HDg+eeL0zDmnX+mGPfu7d/hu4Hic6d0zyxIZTKLOR06SeI6R+WUhx8JyOPLy+KLMYmz3X8l4FgKzRqXicNZjC8xL5GmKLOLYL4ldStgq5OMLNiZKjmo/kBJLShznxPMp8bwsfHh85sNpou97YhSGMW/i/bcb1AsQcQl6Pv/5HMDrjz+jxIW/upBcMiS/9F7r0VjV7fVXQLQBsHMouX7TzuJV9wC3o5TC9e0Nt3e3xGXh8HLg+eGJUio3V3sV2nc9Nzc33N7e4rzn+nrH1fUe7x1372+4vbvGOoP3ouCcQsyRXBIVTYMXlBVei1BqUVfhFQBRf6Gtxt/xWEX2a/ZAEJwPuok7R3GeIgbnKt5nsJUQRvpuxHlHLolU0sUYtv2zZS709ijkmlqbIIN1AVssYmoDghm1/lNn4+Atfe8Zu8B+DFyNHYO3SM3kZoRY7QlTCr7rGcZebUCGgNzuAZjmyMtxIcbMNC98+OmBx8cnPnz8xB9/+EAuhd3NHd/+5pFUMtc3e+Ci47xRPx+qVmUqg56Yl4VpjhxPE88HBT1VMtrTgdZRXgGPylIV1rmXE0uMf3U8fjXTc44E2uUXtiZ2zjt10BwGhnFgt9+x2+8Zeu1EbVsJu92ijoY+S1UDsNRRAdt12F5/Fu+1hazN2uBwPYeLfEu9PL9tnahbmmalErcvGth5RQlf/lTbjaS6jrholcH6RS1MpxPHw5HTaeJ0OinzM82kqH4Gl5GY1DPFSD2nhqTRhcYIZv25pQvOzdx+zQj97YfIxUZ8wRDXBiY2+3fRiiXbeoKVahRIUDVV2Xp2JTTqqMVQswrFjah6P9qEEY1u7KZTOqcpaj27h58ZmjOzaNqiQVHzu82wakshnp+/rdEt9dJISJ1Y7fkqNNfccCkVTEFSxWXbzMKUsl270BvrKFgKBhGLcT3GdTgfGHZ7+nHXWk1oKunc5PB8rd9SkP7zo0WDtaV4a8HUjGseSLY9pnRiWSexnnOLDkvRyjhKQmpuWbqWkmy0u22WDWK1oapY10ChY23iZk0bLyvYEshSSbJQg8MV1zYhXcSc0z5mznu8SwSrbHKwEEwhS8FLwZHJFEzRNBY5IXnBZBUvW0k4Ue8PJ/ZCM6bgPefcWMRW3ZIzIpGaEyU3pqAxAyVrg+W4qB7DiCXGjIvnqpS3OS5WtYu15PL2Xh+Xy7/42YJxMccv6ZxfetdXrNEFaHn1ihfvuOp56hno/Kxn1yWbZNYiDcE5q0yNd0ytl19VCn5bF53TlIh2Du/oum7z9PHetzYI2jhYKzQTuURlhMmtHY6CnlzOlUilXghp3/jY9h9W8kznpzGGIit5IG1+Wayoya2xtgVcZbvmK9vTruy2p6xVX1Ka47R8gWFe32bto9h8rpwxuNb0WeUG5dzTsH1fpQKrMaKIENP5vNbGoikpS7rE2PQ3KnXIJW/+apfpvXXPWRm5Vau5Mj+5VYYWKWdero2pXrragtuyGaX+teNXd1kvMbWyu6y9k5rfjjGG29sbvv/Nd1xdX/Ptd99we3fDuNvhm5vpOgjbBiXN+EoqtuvBWEzXcfXdN4i1pGnmuR84hg4zT/R5JqQTNWUGIBr1jsniqGKwIk1DomSfNL2RqYL1GYehxIIkrf1X2jO37ytIUl3I6gYbl8zTwzNxXvjz0DF0mv/86dMjHz8+ME0zf/7jDzw9Pm+0XFmRKOcbkgImK5hwqRKSNlPdYQiifiVXxtEJrQTes/ZjeZtDmqnU+aajgTzQ65JTZq0Isu1GF6MuzCKC8z3GeYwYvAguBGrJLJNjmYRcCy9z5jidMMYwDsLQOWqFmIXUnDtztedJsLVqLg0UFW0Cm9eJLq0prAKXJWkKrOnKtzX2FbHfFtw5Vk6LVltNEeasXjsGpxEDDhP29Ls7hnFkd/MVV3ffYr3HugHjtc2FmE47xiMUhMPhhDWRGCveHzDW0fX9Vq6uTqj/mD5NWvK7ICiT6Ko2+rXMjOOBvcl4EVJuAC5HfElQoqaIRedGXl7I8aggwoDbeuwEbFBxtAl7bNghxoPz2vJCVp2Wfq1dskuJ5DyT84lSlrZBmyZyVQ2RGI8Ypz2wnKYthMI+W97tLUusKr52uuA+upldhRQrJQk5qnuzE4s3anpqnRZLWGfoJCHxCKaQ5xfitKdYp3O1FGqK5OVEno/kZULijE8LZplJL0dOTwfKkDl1gwLwN01vXUZpjT252KDrut1c7NkrMN3+cYFNXm3tvyaS2qLc84vUFq1tRogibe08n+vr11jR//n9RQxd6Li9vlbD0nnisSZKA7CUBSkOb4Vx6Aldx83tNXfv7nDOMu665tZcyXkmzgu5JA7TM9NypLReV6muhrd583ZaAe0/IrdVSmVeFt3865pRaEGGNGVGypRWWt73HRll0ftO3eGXJBB1jTZikPqZ5YDQQKW6NotRBkS/pLHYovKSQSumx6FjN3TsOmV7xi4QjG1l7+1+KwVM2Roz11KaZY2+bc6pOdHn1rsv0HW9AlLnyaVoT8d2K+YUmU4HkjXE05F4PFKy6nZirsRUmJfEdIpMc2RZMilmBTsNKCvTc6ZfVGqhP8e5gba/cvxqR+bcQE9tanBntbbfec/93S2//e1vuL2/4+tvvuLu/o5+6DcEt73GBSVLuyC2AR6bM9di6K+viNOM8QGsxxwPdMcnuuMT5MQIFKNiqFgtCcGKtjRY02dcgB4XCl4sueZ2kXKbDPXCd0gBmGnpCWOEOCeePj0xHb2WLZeItYYPHx/48OEn5jnyw58/8fT4REqFkpQp0bdXFGqKQNEmiSaDzxByJYiwt4adaHXXnbeMVoWWtTFca9uGv/chog3bVHCtwE+RdWo+RAIoVahRlgLb4Dxd0B4rzgWMC9pSInRY08THYphzoaTE8fjCcjpijSHjEavXOWbdeGuFXO1Fn7hGhVGoYnQi16b1qVX77soZ9MzpLEhffX1WFg3RCVLQaGHaQE9hjjAl9cJwrR2IkYANe/qrdwy7Hbvbb7i6/x4fAv14QzfsGyPiqFhiXPjp4wdenp9BwB61D5QPgavrW/phUJ1QFax7W0fm9VhBTy2CKxHJJyBjXGTHxJUpeBwpByoWlxdyiVAsNUVqPQKVtDyT4kmj7uARaWPuO1y3Q6zHhCsk7EAsmKDjJcpaCqvoXBdFZY4mcj6Ry9JoTnumbQ1gNC2GdRjnCMFixWFw2GpJqTL3hrvBknLlkzsyZlEjtAQ5CTSHcNuM22rzTjLG0JkIy4EqiTy9sJxeFBRVMLlScyRPR9J8oMwzZplwacHME/HpwOnTC2XJHPtR053ljUBPowS2XlNbZHx2whf5TPTXAI9cuERvXNEly8Pq5LT+8vUm8QpIrdTEGo2/Ai9nk0Mx62RbudnLF3yNvGptcM1A13fc3l5DjpyeHrA1qSltniEr6AlOGMeBvu+5ub3h3VfvdXxdwRi9v5Y56WaaFh6fP/J8fKDUzFxmYm0+YquhXYWtV9hKmrxh6rnW2gT9yioXODvMb4URiUxFrGZKMIZx6BmaqakstTFZ2qBZ1nRl+wyypRSbbNwo2LFWsA34WCeEztEPHd0l6AmeXR/YBY8TgxOdu6waoJXxbMLmSoXW+y6lrFqqouRHFzr6LtKFTtu05NyyO8rrp7QwTQfdV48HltNRi4GW2PzUKvOcOZ0ipzmyzIkYdS8SVzajzvO9WCEnmlc7y2L4W3KVv76W9jPQvhrz6cbo6ftO01khbI0MV/3Fa+r0/DqXtKwYg/EO23XUqqkuN3TYkrBebcdtXRtcNgqf2vCNXKTb2Ha/tRJjo/11NdxOpb76UOfnSTMG0GoCIc4L06Qb2zxNzLM6FuecWBX6P7tQX7yGVekuo26dVtDeZGLwxmhndtGvNzdAexUGrgvtxU2FXBgyauSyaoDW6jRM1Yoh7zG14EKHDz3JRJgmctXS5lwUZOo7re7Ua2XCpQ6gQm2Ap03BvN5DUtW4S1SXtaarNvdCvZm2+6lUZWPWdFhaK/OqLhBq835uiaICwoHQDfjQ6ybvtTuw8x3qnWQo1SCtdDIumgM3soAIOSUtkxeaR4/g3mqD/Hw0a+tNljO1Jmj+OqZqybmRglSz0czlYnM9p2DPwsc1ctqqQ1p6QpqY+bxztM2EJr1dtXI1b1VVtUWFtM1bLuY8lc/8R5pZ5NYfyOh510rOgpFC5wzBqddKaazbWk1mtS00uQEEWdnfDUS0UvximkZgBRiXX+cUyMYQ5LOv1z8mvcXPgMmrX2/Zqwuwc7GJ18vncrHErUhoFSBfPkPqRb+2Xzi1quvmmkbdVtLLN7x47denqy9uWuDlvWtM8vrCjbZtvjJrWsVa0wCPbT5ceZMwnN/jIn3Cei+3a1g/vxDr9zcdyPPbimjlYGvRJCKvOtQLrV3EpfkgOrZmXStXwHPJZq9R3sU6Dk0T2j6ftAyCbddvTWtZa1pTYLO1PVqrpNavsgGfizJxzsUn6xtvvkFbEk42oFsacEpJKzhTTgqasqYdc0tzp1zU6y63fmONkDClwmfntQI9WAPfSv0bltlf3WVdW3ypCh9j2A8D79/f8//x9uZdjlzJlefvbb4AiCUXksVa1KORNN1n5vt/l+6WTuuIXVVikcwlFgDu/rb5w+y5e2SVusRuhvwQjMwIJALw5/7M7Nq1e4dx5Fdff8XXX73n/s0bTqeTQFtFlJY161H084vqokKhrNl4dR4zDFjnGN6/EZOy52eOH//E4fMH3LJwicKzSVUEBmup4B3Oj4TObxm8gUSUEXFeXt5bwiM3v1UBPKf+Ss3sMMckmjpPZ5liMYbz5Sp2FqngrOcwjuRcWDRD3Sor+VqsIRkZJVxKwkepyqoRSXEP9NYxGE8xhmiEQ/J6h/S/S1UvI+2hxkb4q4Y2NpDVb8kawzwLydxai+96um7Aec+79++5e/sVzjnu7t+T4kyMC9//4TuBZ0vhEg3p6YqzhnHo6Iej3EI7peu0jiNm5QlJkJlmuWGkjKlUY3R6q2wcqtzEtxzBiA2FGOBJTrTkyrSI+FepUJ24eN/c3/Pm7RvGceBv/vbv+du//y+MhwNff/trbu7fi4FpGHBhWP1k4rJwvZz5+OO/8uP3f5CKJS6UHPGh43RzR6+j64fTLaH76/oR/4fLqacgEeeJYhPOZYJf6E2hs0lYSbpZFE0QU47E5UKpWcUdm5R/VbEz8TLzXcC6gA0DNhxokxQ1zgicGlEYT9A2FB0oUZCeFMnzE2WZqFVEzvAOmbEMgMV3I8YFMA7jPE6J9r5WfM4Yl6m2UMj4ItocN8cg6q9q/SLJTKLx8sBSq0yZWEVOnUHPRcJUgyWzpU3tIUEjg/AurMe4jmo8KVWWOb9oN/2ih+aRRivk9dstmdlH+vUf7L/+hb/uAm8TEKz7xOfFPzMrgrAWc9Vom7h+8RZ2z9GLUMoJXrxu3b+6tuyDd9zcHCl54TD2eKO8wDwTlwveQc0LlowzWdXaM7ayJkSGKor/dcBlz5E7bHAUCqlGGU7RwLvD87f39IrDIu2XWefBG3wH1lWwiVRkYyq54EqWGOgdReOqqZWUIk5jovdqZG1YkXhLk/0QPqixVcjBVFVKFk2yNs3WDx0F6LuO4zhyGAbG4MTzEVYxX5DzFVOiZEHWzXXC6xisMXLuU0bVRqombVbkO6pR5QhR175eJ6yxnM8d49hhDcznZ+bzkyRCSyQvC/OSeHw68/DwzLQkrhfxJwOLDVuCuE3BVRo521qjKcZfBwl+tsu6Q6C5VjmdDiNfv3vH8XSUpOf9e+7fCNs+mG2cvap+zVaNs2a3aNaYq8Bm1YkDqwuB4e0bbN/hH0eOf7zj8KcRay3D+cyslZvNiZoy2ID1I753CvVK7rrkKhX6n//69WFgRYSclYqiXVAliqv0+amwTBPGmDUjLUWC7OFw0OkZsWdYE/gKWCgWsk5pLTlh1ZEd7/DVEjD0xjFYT9asPr0mzlN1EitnUoorcUzIvVvSI+2iTFySnKVV/twQhpFuSITQ8e7rjtu330jry0qrcZknprnw8dOFFBeu8cr5ciF4L2jKeICKkEdL1pH4iUKiVkuqmaUUYoLnWXQbGlHWGEttlih1qw5NrQSzVS+1Nm0pMZW9xiQ6K+p8br3n5v6eb3/7W47HE7/727/j//r7/8wwjNzeveHm9k5gVRwVq2rd4hJ9Pj/z8Yfv+dP//GdiXLieH5mnCz4EDsdbun4g9AOnu7f04/haK7ktKaohVCecsQxdoQuJwVc616jYVddepjBFPv5KrQmDJjYGQC1eYPW3MzbgwoDxIyBickWnvWgIKrJhCjWrvEh6yvxMibPcm0ad543HuAFjPK4fsF5I0FaTnmbr4nPF5gw2U02ilMw49pyOgZQKORZyzJrEV+FJALVYSvGr3ok1UjVbKrZmLFaI2rXIg9XljQrqYKNJj5WWYEoVM+dVjuI1jxedqS+/yV/64e455svnvEyOpKLfv1jLU3ZI+dqrZ9ssd4ckz3n7Yd3OXdONkX13h/UU2Zc77zidDlATh6HD24ojUfNCmq9ECyXNkujoKIFppF7VjwLw3mPsiK+Z6ivd0NGaPcINRJOx1g1oSLC2vl817RFyMt7iOxHircbhxF2UYgs+aTfEOYoTpN9QySnqYIHBq6p7rjtrBudwyhWU67nqnieFbGoE4irJSjeIynMfAodx5ND3jN4StOi2ZvN6LFUK4FQl8cd5XMoYU3C2aPIl91YVEEZTHid8tyTTt3FJTJcJawyXc+B58FgD0/mZ6fwsgpRRdJnmJfL4dOHh4Zk5Zq7XWeKOsQK27GJqVaSxjeE7V/5dKA/8b7S32kVjdEY+eC++HeNA13f44FcL+1orFEOb0Gpweilbxm0VDs8lk5SJWqsIh9VawEqlaUMgdDIFVjCiHbNEXClEJzPooRNdh/4gAaYAVFG6tE2id0WANh58u0HbpuidTBU4a+i7QN8HZa23aqvBkyKpHbx8ClGUXNYLbysE5TdVow92yZZpyZagDiuj35hWL73KUWFFV1aPLVW4lUSiBTCZjhC1dmkfrIlPRd6zczLp1Anh0DuZBrDOMYxHhsOBuDgWMjFHbZGoQJzAfPqeGtGvsfnr2pYqVTg8xlblSm0/q61dWORusGVDMtYJr7p5q7WNKHQd3ouI5vF4w/F0ZDwcxSS0H9ZpLIHpxSxMrvu2oSt0WwW6jXFmmSdKznjn19ZO7HuF7l/3WCFfJRvWWnaIwW7hYa34W0DaWlt1/xS9360SjJstYONQaetqfbKo29YV1S2UEqlVKk7p/2eF+bV4aufSbueztZbaFJ2xKmKKkelGV3UN7a4ltn3ONW7XbYOUP0ubta2lyG0IodtYJwKo6M/YfrexMlUWuozvAsGLDsrryRC8LMs2NbH27aqfR9tLDbVZiWz6/D3SopXxl+/5z6/L3c/bBoWiPm0PgO0d7b/Ul8gUlS/f+a7qlPfivZOWldsIr1TlgpXy8rG2W7fEqiFVRtER5zy+dEK2pm5Izg6VM6Z5O0rS82eZ3C94rMX0njit1zy7Ni61Sswze8FFed9tr2FtJe5ef22BbaFN/tneN/HP13jbw7ZWlNm/5jpxrHtcaQLBOnNrKrUI9aGqo3pVZ4P9ryu5iHjvklZx32rE2zHlQtXuQoqRZYksMbKoDlqzmNkiwy7hWZGedi+sp+uvHj8/6VGSYKfS4Ldv7vjm17/i7v6Ot1+9ox8HnJrzpCw9/KQfqtSqwl8yIWStSI3XWpmWmWmZ5USbtvjQWYfvOvrjgTe//hZbCzEu3H1+5HK5UiosQDIGHwLHuyPd0BNT4nK5EGPkw4cHPj8tXOeIy3UbB7fiV0I1hOAYevE3uTkeeHt/ous8tzcn3tzf4pzjOk1czhO5CCISk2z43nmcC6Sc+dP3P6jXSVa57EzjGa0VRrvgncV2HX4Y8N7jxhEXOm3ZJaZavqQF/mJHKZnr5bxqI0hfVUYOS1U3ZuvlZrDq+WKMTpQ5jLGM45F3X33DMIx8882v+eZXv6EfegkKwZPiQimF8TCyTFc+/vivPHz8EWsqwxiwSnyOaRGz0Fxk0iHKyHtMgsq095armtIaaP3dnOt2g6dErYVQIWNxNqtEj1LzKuqFBbf397x5+5ZhGPiHf/gH/p///J85Ho/8+re/4/7NW1Er7YY1Wa7KXzHV0ve9IIB54XA6cbi5Zbqcefj8E9fpIpu4SZA8xI7ZztT5ddtb7YYvWkAIvG8oxgrKaOScZArOeJzvdNTcU7EU5Vyldr3WqsmAxYQR299gradQSfOzoEVZEBcAYzxIdmUTAAAgAElEQVTS8irUtFBz0nVZNOHJ5GWm5ATGYV0niE4VAicgk3zzlRKjJj1e9hs8wQRcKdiUMU42T3d9ouDkc1VIVQJlLkWU7SsssXCNCWMrrpMJQ2xPqUesvcP6gOtv8cORnCJcVcnZdvih0h8HTiHzK3NDeCM6KN0g0gv/NbySvUitkGMjza27/Dq1UthaCnpvSiLJqp8mfoiagqzJHi+kPv5yzraDjnbJcjX7sMgWXWppnaY123zxsn8pCOmYsfeOYRgpKdF3nXBMLKBaMclZlvnCdD2L/MUyKz9M81ZN8oxOuZrq6K0TrRvYvfntLcvesSU9q+nlKx3GWkLXk4gss3QMssY4Y8SqxwXpQiQL0ckaOdcmkeS8W532Egdyeb/WOJz1a7Jo2ca3pzkSY5QYpG4EklBIrz9FkQdYgb4KIC4JwmV1lGxJ1UAx2CyNJCe9RawRZeR5SeRUma4z0zUyT4kcEdQYw3SJfPjxE8/DBWtlEs85wzS3f5t4fHjk+fFZhoJ+/MCHj59JpTIloXYJUT6s1+N66bUiXOuklAox/tIj60qycsHTjx3BO27vbvnqV1/x5t1b7t++VWfkIE7Vi4j0xWUhLhIA5yjig9KnFP2FWivny5nz5SIZvS63c47b21v60xFrDG+++YZx6Mkp8eb5wjLN0v8MnWyOwTOcRsLQMU1XPnz4wPVywYaef/mfP+AenrGpKMpQVB9HKr+gYk3eO97cH/j22zeMQ8+7t2/45puv8N7z4cMDf/rhEzEmFn0YYziMB8bxIKJm88zz8xMxCq+g1KaDglYkWuU2CXxNepwmPbbrMUk4RHMqr3Y71lK4Xi+SaMxxVT9OuakgO6yT9y7kN6kAnKIkBsM4Hnj39j2H44mvvvoVX3/zrY4sBkIXyDnhnOPm5obpeuH3Y6d9+oQnYhDzyZQW5lmSyXlRR/vc+tIqS94I1CAcsS84PSVnaSuWQiqQqzo6m51tAYIaGmO5u7vn17/+DYfjkf/77/6B//L//n8cxgO39/fc3d1rstMebSuXRLXrAs5Zco4cjicOx5u1qpqmK8EZEceLFqJn4QrzXzfC+yWOogijMUaaAiZQrKEoAVzOixOpAS/aOuv5UdsVYyR2Giv2GjYMuP4GYyxlnsnxWYOwIjcYqu0wxktyE6+UOFNrIedZ0R7hbtVcMCpcKPpHYGsCHVvPM0Dj9PTy+23FO9XmSBlswuWMDQPVOHI15FpXLRBJkrekZ5oSxkI3WLrYYWxP4Yi1t1jX4cItfjhAXMBdKEQwCd8Z+sNAGSpfj5WDbsDB7uT4X+OoVaw0NMmvAKVQc16Tn5oU1XQicCdtQAl9G/Kjo+Tq2A17oG8HO/z5G2CFysz++S+TnjaJ1cx+5NW+fL0vsB6jr28MzjtRaG9Jj+q4mSLGvika4jwxTxIwU1x0GrDBGbKvGkX8QAZCBHFs79f+OQSg+8IKAb5m0mPEWLvkSq6z8gk1mTEiqeB8B9YQEd2oQlVqxva+rW14zApXSzvJOi3iNweCnMUWSayT8koQnpfEMkdsFT/Fkou4K7R9FYNMYjoownTLIFJeWeyaa5WvWJnemqdMioXpujBPiWXK5ARC2DBM18jHnz7T9YHDoefNm1u8t8w6kh5j4vPDMz/9+IF5Xvjxwyc+fX6UYRUnk9sNW9yu2R28uA5MbNzTv3b87LvWqr+W85JkuE76/aHrcN6jhIAVLm9+IymroZgSm2otOBfxXmWsp4moyVATZRJkQYJuBbphgHik5Iy1nq5fwFhsCBjV4xiOA76T9trlcqGUIoJWXjkcVrVntLfvjaeaQtd5hkHac+PYczjIyOAwdvR90MBvV5+YtS1MXUcEV2FB20TcdpBhC5uV1a6sInBgKuLrs5TCkovwWLL8+bWQnkqDLXctrrqx4FfxLsRWwurNt6I9TtpDwzgyjKO0tbwXxMp74WsAXdcxDAPUsgqKFQu2JExZ38munVVeXDt7iHWFOpXJX3bt0q2NJee3eaChHktt67XaspCJw56ua49BWlqqrfNyJPjlht9at85ZfNfRDSNdXNYJLxkTba0Xu8HT/yFHu/8k8AnCZSiiT76190zjtzQUz22tjopOAEkLqK41pMDmdZ1WVONdDKbuhMdKmwbL2qYQ/kvJgsQYjJiBVssqNKayAlX5IeLK3doTCm0rJG+dTJ/Zxv1xnmLbe9QWqYIkRadCTBWic4oJ77V10gJea1nquanGKhrlcV6Qw8EZGbg0EJx8dfYVV3WnGLzh94pMSBakP5YyV6peOZ/b7tLaJNv3q1lXUpPjfcDff55dYNkF2oaQ/OXji6C0Pr99jj9/9ovfuG/PanuraNwoJe+kRdgVkltBsraN2p9bcrT/SGvy9gLG+jc+zy9z7NWrV0S2JY1tX2U3tWi+fEsbcXd9vfX5u7bgX1ya7SzXKhpl66TTdlmtCeT6XMkmNQnSVnCjI2jcq0X4q3HJ5KT7gLF4J1QH57Lcm4o0NdmR7ffJHttEDGNKco8m0eYxRnSCTCs823KuCevuM1ZBPxt15n91/DwiszH43tMNPYf7G7q+4/jmjuPdLce7W7pxAMzWjtBgOi0L5/OZGBMfP37khx8/kFLa0A8jTP6WmNzd3XE6iMfK6XDkdDjKeg4D9f1XcvNn4QkZYwSS02zfBYdxhsv5jA+ep6cn5ilzOt0yjmdKtbjzjEkakA8e7y2n08i7tzf0XeCr93f87jdfMQ6iaTCOHVQIrpLzJMaiSXqUxhhy9uTiRQPIFXwvWjspG1KUW9JV8EWRkmwwSVoRz9OMrZXOeWKqDH5hzpnP88Q1JdIrCaBJy1uM5VpFUDGU2rZ+qWoNdb0ojbV0g2M8nej6nvdff81v/+Y/cTje8O79VwyHAz4E0RZSEux4OHD/9g3TtePh4y3nxyM5LqRrIs2zEPAQPKVqQtPk1XMqxCgJYMo6LUBVu5CsI+h5bW9JqwtyFM8mY3TySB2KrXV0ncd5x+Fw5Ob2juPpxOn2juPplnEcCWHQd7Rtji/2HyMGpsYa+nHk7de/olrP8/Mj0xLFubtkfJ1wNRGcZRg93X+ITk+r5uV9p2qYi8NUy1wh4olUOitjwt4HQt8T+lEQnzZW3tIBAxUnyZOSJ9NyJc5iH9GSOowRcqoxq6aHqN9mUlzIeRbl5RQpuQjCY8DWjKsF4wMyIm+pRSa6rMsU02T0dXwV3YM6h8MyHkdOpzti6DiTWaZnSjXEBFNsU3+F6QrGZKx9omTHMk/M37yllDfUErHV4SpUEthCtQ6Cpb+xnIaB0RgG70iqXDv2HcE5hr57nWWsbSpu/askBIr0SJ6mAVD3QqNJqlTrEsDqGmiFiA9gcOBaoNtNNOk+vP2+7Tnov2wclPZnee0/j7N/EUVqiJFVmMzo/Z4zVU2cZahCpiDjfMXUwjxfmaYZ64KS06Vla3WYpnU4mj5L46WZKrwgo23Aqnw/jMV4v6E9a07wWolPSx7sSttIRdr4OWVC3+vwgBTJ3juZOK2s6vPr9L5syngd5PA+CIkbQ+PBiRF448QJqmpdh8mRnBFTTuOJURKVrIh5rSL9YbRwLMbiQi9tZRcI/SB8KVPEiZ3C89PCxw8PXC8z5+cFZ3qGzvPu7YGb268opXBdzlzmJxEQzcj9XR3GBrwfKMWQcuVynZnnmes8MS8y7NCGDlqS1a49a6oqWbckWd9/KhiT/uqK/MykB3xwdGNguDnSjwPj7YnxRh4hdDoiXNYKqxTpJZ4vV5Zl4cefPvIv3/2eZVmUWS5tprvbG+5vT3Rdx+3pxND3dKHjMAwcxoOIvgWPd4IACW/CrjdiIxhXVah7fn4iZ9FLeXq8cDwexDcnldUOo+sDd/dHuj5wf3fkV1/fMwwdX7+/529++zXD0NNMQ3POOF8peSGnRcYBs5AycwnkGkQd0lV8sIDbCTNJsuMqOEleMeomfpkiNRe8c8wJOudZSuZxWZhSIr2avotUiEWnt6QXujOTNJqR63q2jcQ4x3A4MIwH7t++45tvf8PxdOJOW5sN4QHZV/pxwLkbus5xc3vk5nQgLpZLOlMW8UszpmJNk9rZ0KcmQ57VJLYU1dqpWVGdQiqbZsRGf6jUGiVR7WCwTmQWvFhLhBAYxgPH0w3Hk7SnDocT/fr+W4Ly5xuhMQgXC+j7gdu37zHdgfHpkefzhWIDNc3k6wM1Squr76Dzr431bElaC0IZy1wdpliWCrFWEpVq/aqPEkJH1w0Y5zX5XRRNaEGh8X1klDuliTg/SyLvg9puOKoRufGm3tpI8SlFsnK7coqUkrElUJ3Dacg1aQGgFFHertVgQ6V65U7p61VkPNl30t/vh4Hj4YbFeZb5LIGlGlKBJYoZ7bwUllkkEAwXUW5OkWU+U8tVAmPtcDgKMuVSrBM+oQ/SSvGWu6GD4Oh84OYw0oeOvnullmWtkOJW1crJWQni+oPtuaXsEAy7/Zv1knOsJOe1DQarHhEoQm/0V9V1LHgrqM2W5GK2QYS1RfTl0Sr5L/Cfxj3S7+zRHGljZ1KMxGXGUIlq/eNDJ61sZLK1GhmGWOFzdsiYDs4YdQ0QUFKjuf4bgw4o2C2Be7Vj1xqoiObYHCMpJrCOvi9YWlyzVIO0wRStbug3IHY33mOsVS6pJD2roWotbOst5qLWeowVLZwYM9FmUpJWULZ23TcpRoy4jQj/mtDhbK/F0Yh1Hk/Gm4St8hoPn594frowzw5nO7rOcFStswr88OFfufzpLCahem+LhETAexlOSQWmeWGaFtG+i6J35q3HW6etyo3Q3fi+stwNxeLfbQD8s5EepwJHPni1pg9b68hI/7S1tIryMeISmaaJeZ65XK6czxeWZV79Qpy1nA4HtTsQy/teW2Y++NUN3DU/ErM5hK8jiHo0iLuNJHsvF4b8WxW48g4fHH0fOB4P9EPgeBw5HAb6IdD1QW9wrTKbkFLJ6knV2nzb5Ijc0sJul9YWa3uOJhveEOdaab5QqRSWLFm2NVGsFUphTknaW38drfvfP/ZV3Prl5XTZCgnrw3lPNwz0g7SDul7aQ/tkp73uCr23m1E3SAH4dIPWjXrlldTmjSU3YC7K5cRsE1gvHutw6opwCFFSNzcN3HvBOxFFrJsP086LyWovXaB//Ti7/5sVItciwHm6EIh9z+F45ObmlpIXSjCU2OMtDL4QXhnoaZhUW9GKJoi5EC3EXEkVUpF2VztXq+DgGkTaeqCeZS0JTVo9q4gfW1XdFFFbEH0hdlj2f2+ty7/wZ+VqCNoDVRVgjba5djNl7b/V4sPnIBs7kvSUL66dthUWRHOnNhTEWoyzK0ps1ptb4HIZwRUk2XuPDftk0b/Yd37pY2tC7f++u0/bnrILpmb/nPX48u+7JGVtGbO2stv3/80O1u7faplCM0t+SbnY7y0vpymztlfm6cr1cmG6XojzvAVuNgufv9T2ri8+g/7v3/jY27378gO9uJ4qK1L02sfLVnejSmz703p9v4hpKx7XXmR9/kov31EB9JVV1FGlGqxbH0Y165rhtyQeor/jiiSWsFEBnHMSQ73HVSONcoWCc25m0Y2S4uiC0B4AiQ1W3Nv38doYvfeMTIRuFIV2TTYQg5dL1/hLumz1RRLPej3/r46flfRYYziEwGkcuL+7Z7w5cnd3x2E8Mmpml3OGlFnmmet1IsXETx8+8N2//E8ulyu///0f+ed//o5liTTzsq4L3N3ecjrecDweePf+HV9/+42Mww8jXRC3b6dicph2g28XxZeHs46+H8hjZBxH+qFnGDpKrdwuJ/qh5/3X9/zt3/2Wm5sDh7Hj/m4kBEfvDaVkpjkzXyeul6uQtuYrXWcxJuCzJWSBIn2AighhGduQHiFHZy+TbKaa9eJtixtL5nnJXJLCdsxaWUNUAb38akiPHLL3S0JZNGisiEG7gazB6qTP6eaeb779LcfTDV9982t1Fj/QdZ3CpKtilUjEX8/M0yPz9UKaryJEmJWMaIPwbWyk2EI2ibkkrikTU+GSDEsy5GKZcyXpGGUuciaLBrY2utuIy/vkpqmFi3lhIHhRJM0x8vT4QE6Jp8dHnp+fyTkzDOPKJ9tk/1tsadMwmmIZVCreMQ4dwf0D3/7qW2pJ5OVKVcPOGidq+evuv/+nC+mcF2+cIuojS0o8XireGu6c4fMk12WXLLlqy8N6RWwCBaOE8ULRqUtjwM5XNeys5OUqCCdgjfrimApmUWJzQ0MjpQhylOOixYMQjS0Gk0V/o2KwaV6THqkG9Z62AWOTJiKBF1HViFP1eLgl+IHn50cMHdRCjJXzNZEzzFGuH2MsxXtKFyhDjzkcsKcTtuuwhxE3DGRXMH2mzpJiOGPpMLjgGU9Hur4T8m3f451fE+TXWEucoEirdYKpAhG3jy9PZP9lrchaVth+pO2V9dDEouZNcqBqofZlfrCZEmugWtMxo4FPCpuWpOz5im0DaYWvoP4z03wm5cSffv9H/uUf/4nz0xM/ff9H0nylZjG4taZireiwldUkNq1q4006YgOUXrxpPR/697o7GdZQ7ebLWF852alUcpVpyraHmDa17MC5IG0q5TpmWzd6TRunb8Fc6uf1dZppKCAT0uop6awMARmg73rScMDZhXFcSKnSeU82cM3it/ccpY1ssbhYsFjCEDgcO8JwkOnp8YgLAXLERCe8PjxxKcxzxrqR8XCDtSLMevfmLcYaYo58fvxISpGhH+m8gCQqKylxwDiWXIm56P6zS6S1oGoyJW2aGJWFMRn1/ENbgb8wkdlob/vQ9ZxOR463t+qiPsrJTVnaVrkQl8g8TcQYefj8wPff/8Dz85k//uF7/viH71mWRZGijmHoSX9bxOTxdMPt/T1v372R6sp4zQbZslq9nPizgqTu3qulCx25H+j7nq7vCF2gr5XT6UDXR969e8Pvfvdr7u9v6TvL8eBxzpCWiTifSUvier3w+PRESokYF7wXbR5XDEmDrxi2J2rNOmlioFq8t6qkWUW/q7SsXQlXpjKXrL1biLnxFw12lwG/5mFoeicVWw151yJZayHVaXHOMx5PvHn/NTe3d9y/fc/p9o5hGGlaDlKmZ0yVVkZcrizXM8t01RF2mfiRYOm16g5Um8nGEKtjypaUYcqGJUvbI1a7VgFFSbPNBkNa9YL+GUUBvU60eCdcMbFLkYTHO0vOicv5TCmF8+WZ6+UCiG3EMAxyNe2g+yZU+eIaA/pOfMnqOHI6HjWhK5SsY9px4Xp+IC4bR+PV1lIVxJvJZ1RjYAs89Z7nKBMyN7klPVJpOedlmquqFkeRfn9KBUzFLTPOSHurJJmgMYjsgVUF52ZCKQhvFCXYLI+cRa4iN90NI8mP0cmvkhZQcbFSpIQrGLATxmaM69T/6yUPQzSWjiTfEcIIJlARUbZpFq5AzoIeGmOE7Nx5audh6DHjAdMFbD9g+x5rMqZboJON02lUDV3HcRwYxkHb7AHrXlOnx4BOuK3XoEGi+x6CXI8WELU9rS+hgBCiRdRQwLqhAkX4NO3bf5HQu1IIVmxwhydCm40UsGTbkFdUEChLJF6v5Jy5XJ55ePzIssz8+Iff86/f/Qvn52c+f/iJtMxQ0ppEWYXHN0L81gqzys1e10CR+TVAtA9fjZoYayq2o+vVHUn+NY99e0ryriYDgqiEazfC2Eq18p5E9dgokto05/Q1zIYQWWtWxDtrkeKt07aQtGhT12Ow9P3MsmS8cxQMS8nYDJcU5b1gCVWSH7xojQ1dj+97xvGAC4ESF01MZKw9psqyFIbR0Q8HvB+4vb3n3buvsdby8PiJw3gixlkn9ESXCeNE8TwXMJZUKjHX1ZR1/dR1l/DUrUinFei1ASBVY/AvjPQYI2OawQe6II+mFClExcQ8L+ScmaZJJrJiIkYJgFWDVUvRpR0t/J8mbvSlh1VtUOdWP6w/+TNIc3unL/5mrejwtLYViIbQ7e2Jm9OJ0/FE6AxDrxciVZVmhcNi1ZLCqm+JKYbaPHuoK8jravOJ2UhY1hr5XHqTWSNqxd61ilYu7D0E3z7p64HnOwTaNF8b5VRo0rV5IMmUStCJPa9+atZaSs3SezdQUhaH9loF1SgS+ObLE/P1mTiLLUVtI5s7lDyVQkyb9lHTlhCPLSsTJ21ygD1/YbuGLKIQKjpfdfsMVkUn21f9XCImuGCdY55mrsJ2pe97cs4vq/jdRmpWBKzuEEd5Qku6xGYBqELmDt3wqq2Q9hZXuNtYqZB0MyxI33xJMKfKkqoSw+uqLG5N1skJsQRZlsiia+tMJTit8PPGqqylUI1c3NVWajHKBXnZithyx7oF8vU+b89p01saoFpblCzk4sYz2rde1tblJiJorGcbU5bIby3Souo6fD/gu4GCY06SrHVLwTpBF2Np01uaMDurU4ly3bdW2I4C8yqLaaywjVvF25KVlsxsSM+uPFn1Z8zL96YtMAO7+29rPUpuYUQ5H9Z2b61tb6trW8Ias1u/ZgYse7jmKNvv0dePy8L1fCXlxFWTnrjMPH7+yPnpkfPzM/N0XQuFtq82NHVrwW4Bfzu2jWTdM3ehoo3sS7bRzkVDePZP/CUW7i8dbeJuF9PafVnKulehLXdRbNavbQ0U3aade9YlpfkWrn/f/eZ1T2jnrG5dBnXXI1dpbUXdQ00tOCRmWQxO1e2dk24A2VGalIXq04n2mRf/RW2HeY2bwQf60Amu47xMORtDrln17kSLLaW8yqXUHZJpttP44lhFGTV+VQ2y/54k9mclPd463hxvub254e3tnSA9wwBZLuznp2c+fPjIvCzM08z5cialxMPjg7S9EA+Roe9lJFUNKGVKR2So+1l1WpJUgda+RCfbRW1bb1T/90LFUo/Ws3TecXt35N37O5zzhK7HWcf7r97xd3/3t5xuTohujxAZL8+PGAsxLuRiBHaLCdwsuiBtOi1vqE2pleQSvSq2mmrofKF4vXCtjOx5C2PnOHhPqZU5QypGBAnrrndrdKbpVWOlIEohiA5PLhWTZFNsBn/GGvo+MB5HQggcho7gxY13vjzz05/+iHOe6XLmenmmlEyKEznOUDMlT9Q8UVLi/PRZuFxKWCwVcq5crgsPTxemmPj8PPF0mQBLNZ7qAljtN+tGbbQ9VlRML6WMtQWr03xdJw9r3cq5sdbSBUfnxV4kLTMPnz8RujM//PA9f/j9Ow7HI6VWHV33Ky+saX40z6C2gbT9VNoPGhBasuQ9lSoq4l23ivi93lIK98RWizdG0MMs9g8UuCyVn57FXb4Lmfc3mVQiyc/U7oJ1C/Mcuc7zWpGfL09A4f5mIN/0OGvwpuCNBEHqDonUgCuCkXkNeqWC6qwLb6ptTk0UTu1DKu2PklSKueiEMV6I/7YDJ2hNyZLMVp0CMc5g/YDvj/hisT5TmahkraItzgeOb95y8/WvCcPI1Yz86+eE95XPFxi6JAG/GLITiYub08AwdIISKrewtRW+DDC/7FpaTDdAEXXx2hKeXWA3uyC6kpEb2WGXGW3vUZ5fSibHKInMSnxF2wxiCXN5EuRTRO5ESgQjBs/WCJqXlpmiCrvTvJCT2NS0fVh0sxZKyczXK89Pj8rtPPP0/ECKkU8fPvHDH39gnmami3gx1VKoY49Ro1znZP/pe+FRWSeSBcJHVuyqiTbCyr/XEyKaM/rHtZQ02/l47aNWVoJ2lUkMPTeiUZNyU08XRDJVuUdiziwpr3+OOSPopmdgwCAChiHI54uLJekHb8mLoeIQI+uEVSHaSPGFpQRklACeciQtBY/jgCFUQ58rDktnhbw/dAO+74jGCjpoIt1w4HS6pRZL8Cf6rse7wBg6DqHDOsft4cS7+zfEtMiAUhewzvB0mXi6nLlcrjw8PfN4PrPMC4taODXu04vujnZHjCZOYDAOLaTMLon8Xx8/k9NjOQ4jp+HAaTxyPBwZQi/JS0xM14mHzw9cp4lpnrhcLiRVRs4la9VlCTr1EJe8GkzmVORCSBuL35Ttzm3w7FpN2K16aZNbX97iraq0znI4DtzeHRjHkbu7N3TdwNu3b/j22285Ho+UkshVblIDzPMFYy0pZbplIbtExpAy62RR0ztoSZBVDol3DgpqxyAqtVmrFm8MvbeMwZGVDGj07jC2jRpuSMarlpMayL0XboXJhVITlKKZvWzwXXAcBm1Fdl5FxCpxnnj8/BFj4OnhM08Pn8g5scxX4nKBWnAm44xsriUt2vqoNEXZUirzEjlfZ6aYOE8L5ylKwtJ79ZZpQbYIjC39DzBCpJO1MKt7sffibN6Ie14DX7PHMAZSikzLgps9nz9/5KcPP3GcrhxPN7x9+44CBGNU7RUd/zUvT9/uO5J7mwaeATti92ubjbb30KburATFXBUtq5U5VZ4mqRjvpsJ5ykJUDBE/TVjnmabIZZpJKfP0/MzD44Mkm0R6n/DO0HsZCGg2JE2nZ61Wa93pcagMPmY3glu2hGdttRThvK2x3QjXpEaB+a3HKqmZIlwuuQosWI986XB+wIWKsRe0nBf+RPC4vmM43XB48w4XehbT8eks7ejzDJ0XdK/ve227e8bjkduTtDpbS2ivufKKKwmuEyXaskMJ9AQpxrnBtQ0927X+XmY8jRgs6FnOonjd2gbyFBmnLikxn8+cHx6IMfL8/Mw0XQHwGmxKzsLRiwtRJ3NjjKy8H2NIKbIoenO9XHj8/JG4LEzTlfP5kZQS56cLDx8fxV+pJGqJws3IEWmRZ5wV5ebmxC4Jz254pGXLO8SmNv0k5fUBcjtuEIJiPK+Np8sbKn+BuN8GKNYJLVj1bHLd3MZLKcScmFPEACkLb0diqYy4U6uikGZFB0UPrqqelN2S1ZTAVFItRL12piwUi1Ar4sJVyVnifTDizdWFgA8dFIh+oVZDCNL2ShG8G+mccAM77+m8aNsd+oGb44mUFg7DQBe8trUSl+uV8/XC5XrlcoYHlPoAACAASURBVJ2JMa5gx3r/thVa7781cslt0gYdqqxqKX99PX+mIrOIrtnd9JaxhpSkSpqmifPlwvV6lSQHgfz7vuN0OpFi5PnpSt+J7k1SZ+Rkk8zoXy547yQbXQWUZIFl5DVRshDCfHDr+LqzqIJl1XYRLxJ55xyH44Hb2xuGYeTm5kTX9RyUgOu9l7aOSvCLEGHAlbySzIwxuJiwbq9sKhdsqVr5KbHMWwfqeNuWwCFKGcFC7yyDd/oZleBWKi6z9XJf+14EWmvArl5DrYIG7wUZsc4yDgOnQ48PgSEYXI3YslDjlTTLG03zmThfRA10vrAsV6iipFus3FwlR4WwRfdHIM209nGBVYzSWNXiMBuxVXgI23XRro225C31bZvIBgXvp+zkEN6NVLnzdeL89EjJmceHz3z+/IkQAl3XE7pOUSJP8G692VaGT6vAFbLePskOdv+iZfsah3CZvKICyhMrot/SppUSot2zZJhSxi/ANGPOV6xzTNPC5bqQcuZ8mZimBWMq07RwvTq8M5jOYzu/3ndWN5s1GWjnoX55JswKr6+803XRtsmT1uIytVCNaAaRo3CJNOm1pQPMhiZphWd2QVd6/8rFcmZFa/q+x/pABZa4SDBIkcVKsty4X2WfmLV15iX0/nqLyZZYrQj2di23k2pM+3Zd13l9gd1rmIYUtc9T2mSdtiorsl+XwrIsfP74kU8fPkjS8/TE9XoVaoMmHDVn4jyRU5Tk5bpLejTRKCmRoqC683RluV5IMZLjJGTYkkWnyUDznKrrujXOStsPBHW22uL6suBoe8N67LbQunvWCtGyS9absOOrHdtQxVqgf9neMtsexdqiboH9i0Je5VPMrtW1nom1bSkFRnv9VbsI1nvEWuXyaWGHMVjrCa6nM52KzQYx/XVOC7qN61kA45xo3fUJbwLeepyxeIwWKeAt9MEL17X51akfWtvPX4Buu8/78n5+OaXd7sX1mlZA5d8zW/AzOT2W0HX0w8DhdOR0e0MulcfzE6VWvv/xT3z3++84ny+cjkfu7+/x3nM8nvjVrwKlVEIYmK4L58uFGD9wvV6Z55mffvrA7//wR+6eb/jNb3+llvIQvHzYnDKPDw+cn884ZzkcRoZBRqXHYRT0qLLqSxStfIy1HI8H/uZvfsNX7+/p+4HTzY2M1Q0Hbm5u8T6QUmReDLkk+uHA4XijAU8IVyklqrGiGpkzNhWKzVv2meXC6rvAOPYk51gukVm74sGAs9A7w/u+4804kmvlHDumnLmkzLVMLCXJxWVekol/8cMYcEH7tip25SxdH3DWMIyBm9NA8DJdN44C+R9vLEN5wC8XcnJcLkLUfn5+4vL0SMqZeRGkDyq+s7ggaE3NjQNQWaZIVOf0mKLwnayh7zuqFaSmGr3Zsqp6J+GFZf2aapV2ko7LN8yh3fCSeHuVKnDCA9XqUCxDBLb/6YfvKaXQ9T3np0c+f/pACIFxPDAMI845joeRYRg2fpcV3khOWdVjKzFFITK3wKKJRy5JeQqvd4gX2olcpW1VKCwGTIqQCslYLsZTcHxeCj88Xnj2hvA00318wmC4TpHLFNUDbWJeJqxBtFKmieAtt8eem2OnhN5uJYzbNvLejraxa/ZRayVX8dIpLVgXHZ01Epgb9aEWKLWdM4MpiVQiOE8oN4L8uEBSWX2pmFWmwnt8cISgZNHOYULA94HjzZG37+6pxvH5aZbpPR26yDHTDx2/+c3XuHCP850QsItOUTWthLWFs2Ycr3v8G6iS0YzHGKhlS8PWxGcNEnU9qSINoOavOZOXhRwjOSeuT8/Soj5f+Mf/+t/4/b98R4wLT49PTBexgei8I3jhVVmd0Mw5c10WUhahVtOUyBEVd4MUO0nb2pUiauxUBpOhdxSvSGCW4DUGS7ASMLvgGMeeYegJnaI9De3dfbamYF33n9049SLbrVNDhoraHb3qJiuFcNd11Ayzixi1Scg5idZblcLEOik+rXpdrdyxuiVMFYgpcZ0maXP141ZbKclbhmYq2cjek2JURWtRQneq79N1PX1/EH6pEZ5p143cnd5z0x85HW853N7TH2XCsRpProZUIVZIVELf8e7NPafhgE0eFwOmOkYLPi6YbDk4y5vbE6kmuhGcKpujbfBcZL9sJtS2yGRWoxY4r8mZVQFijCJlbTllAa1FhGfdL4z0GIOgPF2g70WrZZpnrpczS4w8PD7w08cPnJ/PGANv3r6h6zqOxyM3N3cAPD4+c/rjn8DAB/uZZRFhsqenJz5+/EQuWdpiOeGKVFy2Vv3+lYeHB9WEkQ8rFXlHqBoo16Jmu5K7vuP9+7ekJAjP8XgihIBzMvVhjfA8ckmYbESwrR+wTn5/ylHg2mXBB79WEZltmshW6Xl77+hDwFbwVgW0ayWYSgeM1nATPPe9GIs6mwmqJNnZuI4B7wDb1zuMTu9Yyfj7vpNEJzhOh543bw7CgwmOvldiWjCEesWkWcSu1L5ifn5mPj+TcmZaJq7LTAVCDrgia9P2qLL6wIibbmqmldYQFP6sGOFXaIG2F7tsFUxeIf2X1ZBMdRVKEb0ZGe1U8qmiMSU3bgM8PT6QUhYfuCxu6SEEjscTh8MR7z03Nzccj0dFAoTfUUuV6jXJprLMMynFtYpuStEpLa/O6THGEkIvwahArhknGDVYQzaGGRFePKfCw3VhcRVXJ5yMH3KdE+erjBanLCictQZHwdZM52VkWUZiLbkaQm0ok1s5VcbIdd88hkSNtqGi8miBertf9fvan5c2TNbbPANZN0aP726oVVrPSdvhpQra27S4nBpvSn/EYb1nGAdOpyO5Gj49Xdf2+/n5yvUyczyOvHl74r4cdeqr2Ww0RE+usFrt63e4ZFG3fWyH+JgXP4emOwVbZ+vFXyq0SRgRW5VpqBQX4iSyIk+fP/H0+TNPj0989z/+B//03/47cVl4enxkulyxxjB2nuCtWnEYnBFJjTlFUskqbaHK+NbQOStWHbVgapZEyDbpEUNnCjZYihOphOwKGOj0tZ2B4KS10nVhRQrs/rzoORHEpn3eKmT0yk5Xa4c7ln12/RoLtx0G9S5UfbmGEBe9vhsau7dGavzANpCwoaiVUmRCer0/XpwCSXSK0pSbVt6esC62M1Z9L3txCNBLJYSB8XjD6XDL8XCiGw/4fgDnqSqlUjQ2ZcAHz+l0ZPAdZjaYq8UUQzAGlyMUS2fheOjJ1WP6ArZZwOxFR2EdwNihhU1F3zYtLS02GsKzm2sDVH/v36F8/7ORnm7oCV1YxQErahPfHLFVkLDCOjrcdT3jOGKM4Xg4cDweqKXQ94HmY9Sada1maVCWjLkm5mnh8fGRn378IImHVvN919H3Yhexbpi611r9/ZhKyJ1OcYVVSNGgGbIRgp88ZHqlQe5ycYoGwtaHzbyADmmx16wCn6uOgi7eYAyDqYzBMzhLZw2uGhYvZpBL3XqvexjxtQ7hvgjRtw+CaB2GjrubA13wHA4dp8Og9iCWrrMrZykEaYvlUsXMshZS54iLw1vIxRLTNiJbWhW2BjzlQek0n3UWHxyuGow39EWqgZhlLD0mqSp9kukgmQRU52BvdxOAsiYhuHVj3dsjyTRKW9vdZESMLLPwD87Pjwy9yLvH6cJ0FqTnen5gHEessau3WK3CZRPfskJaFr32dUPVhLgpEb/uoZVTTZIc6qa3Xse1qZ9WliIcH1Olj9+1arHKdCHWQs5q2lmZlsz5mli8JXTL2m7oE4ROuWxuI/o620Zpy5qoVhWY3DFS1s2r1CLGmkWSobVV2O4xha+xFhsXUozYIgaUIpGRWGZBF5d51u/JBKC3Fh86grYquy6Qi9jeBGegKNfLW7wzK++rXT//1rn+jwB52ML4WtG++NmazOx+Ztr3zQr7VEU2SsnkFImLEI+v5zPX52fisvDw+TOPnz7x/PTM8/MzF21ZLTESc5I1zoBxYkaL0YGvjaMlQVUd1K2lDzL9ZmpVmceXoq7ZVqIpqrdlZDDQQNeF9RGCFxXu4Lf21noK9miPytKu/a+ND9LWqqn1y0ZU1grp30N+/d8+tO2yT2xakdBiqL659R9sk0ny/aajJmFyE08tu8Tnxb2iSc4mrJvWwmxL3luby9L5QGcdwyBK+/14ENsJVVyvzgliaxoya17cy/tCR/hE4s+nMOTmJWaMcjKbwLHwhHzo8F1HNUaAhFRfJDl/3kpeF3SNMewS6r92/Kykx3nH/bu3nO5u6QbheHC9Ms0L1+nK9ToJA3tJUKHve8bDyN39Hd988zXOWZ6fznz69MDj4xMfPz6oG3feera2kdUkB53nSIoTz8/P/OM//jP/9N//ib7v+N3vfsNXX73leDziQ6DveznldT3DdF0n/eWS6IIXXRFrcLZpbFRyEkRiWRaW+SoXSZw1C5exZtkAhIS3zPMGI+6mBmRyx6yoQrWGzjl6L9Swd53j3lt65/hq6LgNjozFY5mx2HnmcJ15WtLL4uOVKkprLeM4MPY997cn+iDtrPdv7xj6ji5YDoPHqWiZUw0M57x6vzTPFple612md1J5h0vBksm1sNTCEiUJzhVylcRlLolYE9VAGLyYxBon1YcLSnCW6YaYEpepY4lxS5w06Ykxra2veV6EUK68KmOsEPxoqHZZEZgUE1lRmWuKTOdnjDXMz595+PGPQhLU8XxjjGy8rZWjG5a85m40W5OMlvC28d5SknJEXu8opTCdz2RTyIgYmkDbstGlUphyJlF5MoWPZHoDB1M4iSwlGEvfCb9tiZll0anEtPB0TjhneLpEbp8mnLX0faDTYCRTi9Ia7btAp1MlxmYwTa3ZgHUiQwCagBpIi25gRhEgoxMuwu0rxlAWETTL2VIYsK4ThffrlZwSHz/9xIeffmJZZh4fz0zTQsXQ33Wc7u4ZDkdu7+64u70l5crN45nn3hMdUDzOFE30O46jWEx4vwuyu47Wxit71RXdJTNf6DPXdj53MFlll/DA6usC1DZAkDPT5czTw2fiPPPjv37Phx9+IM4zn376icdPn7icL3z33Xd8/8MPoqivxaAgEJlQnCIwyqUExEam4gx0qo84do77mwN98DgD3lYtCNfKlpQqsV1jORGzIK/D6Ybh9kTXC//y5ubIcDjQdV7PQdnOwVpgaCt0/R2KqVQJxCLYqV5cOWM0Mah5m2B7jeMF0mPtihT3fY+14j6w5/cYazDVYKqFbGiDGca7FdVDn5+iOB0YZDBjTXZyFo2skpkm8bRamnTMavrrcCbQhY43d/ccxwPH4cDbN99wO95I++twwoVe7j9jKAayscINNIa8hVqsqXhTVXh0oS5FlQIWvNX46OVBtfTDyPF0g3Ge0+mW082tEN2vF8xkVCHdsZK4/iIKsBHDZY+WPemvHT9vestahuOBfhzWzBuUH7HIqHmDm0EUcLuu4zCO3N7erGai9/d3EnQP41oxb4Qlqw7QkjemKAt3Pl/48YcPfPfdHxiGnr7vcM5qsJsVTZAT2g7XNISqVBxrtb3TEcmqIptTFGKe9lq3TFkMOeOyKLs8vlB9bOvQEMhVn8dou8uJqusxBO46R+8cR+84WEsyluwCzjjmWgnO4a0VaXB0Qu/nLNDPOIxBJlTGjtubkcPQc3s68PV7SXq8M3QKZcunzCs8us+mW8VBCdQcSNkSk2deLKlAjHoDVpAhVCmyUsmkKn1m7x3OCOp0GA/0oddxWb2mcsZ7WKIKNtbt98aY1+Tn4gxpZf8DmI3YpqiOJAENfSgrQT4n4ZCl6Znrk92CzDoOvG2mzQ5lW3+z/r9tukEnxqpC0q+L20kSGKNwebIVpCeXpm8jaEpUVHNOlQuVZCuWQm8S3oD1Rib5LIAVpC0XYoQLCWv1nCdJgAZNbpy1ck+GhHeWNFRS50XE01cxrm0b4urcrjVaLVo8tPhttQWaVdSw6CivVpZ2wPgJ6wrzNDFdRRbjehGEYlmWVR+saYn044H+cGAchAeYc6UPYgJrcaTgqMXRd9rK7TwhSMt7XeG1o7LHXF5zTVuyU7/4Xfrbyz5Y1xdfhNhWNVZoq1W9AuMyM18vzNPEw6eP/PinP7FME59+/JHHT5+4Xic+fvzE49PzFoQVrXBJ1stZWQtfBUltQc0aKY6clQnVYx9kL7GGTp3p958upcripWhKJbEkKSS6cRAOT9/rXi/ist47TWS2ZHAl6b7IQLWNtHNZb/dBLUUSnqI8INUWe7XDsCY7zSDVOvGRRBMiYC2KZA8x614CRltSipy3966xKUXZt7IW4uge17zMUowSm7VT0eJeayd5HxgPJ06nG47DgcPNHeNwlAGebhCkR8607J8t+UHnjs36MXFIAkzN1CRJDy5jnXRyqoXarp0gFJlcKv0wMPQDxljpGMW45gLtaMjwhvTtUD5dZGct7pdub1lrGfqe4MOqN1CyZMu1nWiVw45xYZ4n5imsjur7CYq+X9YxxFqlHxe8wJlWpdRrrUzTxOPDEw+fH3l4eOTh4Yl5Xnh4eOJ4PGCMZZpk3E3gOvHg2WwJ/FrdW3jRkmoBr1bhXaS0aNITV0PRlhmXuodBd5t0W3KzG6PcVWdWYT9vDMFaJY2JloLSLXRjadmtwVSZinGvKJHurOPu9shxHLg5Dox9YBw9wRu8EyqEW/VI9ho1+h71/EmmzQqbrujpCjGreByoQazFqpBd88QKxuNwWgEFhhDU4FLG/mNK5JKEr6zBT5KeSnLitj5rnzv5pKOfuxHjupv4YpsskokRK/YLeqpzAde6KQYlz7Hd3GbtxOqn1ErEsLZ1rLEEHe0WRPClpcWrHFXGQFeVWdMg/41421D9ZCqzOtKfOrmnOytmhqHvKVV0fcyzqC+39qGthlgcsQZyMdgiYmW2AHMmJ7lmSoEYVR02iFSKoWpwFG2mptO1nlN0bVWkMKXEopVpxpCbP5uL2LhgsqCzcVlW+B6E2Ou8o+tFJ2Q8jBxOJ4bDkW5Q/SVbOR5H7u9OpJwZxp4lRoah53Qc6YJfA+x6z9f1f/9BR335aLu+rnWTDdy9ue1r1dSsfW1tziIF3DRNTNcrz+czD49PLPPM4/nC00UczaMaKWPMCw/BhsKvSKY+vBP+oneGvrMEbW31wTEEh7cqdWCh7ZUASywYoyT0rEERCF6tGXxYicvCh2moTkti8m5dtnOzGZDu+Via8NQdSrQ7n6926N7Y0OikE8iYpoS/FfuNv9O2/bUdZQ2szaNtpXNOKiAqu1BDjUytZP3sbf9r10/rUEislvgtBGePM04TLJH6WAMUqBqyvk/rsa5iFLUt1pLJLCXLQI9pgQ2yLbqZapz7/3l70+1IsuNK9zuTTxEAEplZA0VRUt++a/X7P073WqRaJUpkFXPAEJP7mfqH2XEPZJVYLKkgl4KIQgKICD+T2bZtezsREpSSX3uYbWNtI7lSS4Sf5LMkqLKXta7eJouh59DfyIT9ZeUt56Qja+ghF9JlJl1m6QJYImleuJzPXE4nDs8HHj4/kFPi/v5easLBa7nrDdY59rsdXecxBoahZ7ebmMaRLnRY4yi58PHDJ/71X/+dh88P/OH3/8I///47hrHHO8eyLLx9d+Tb33yzdor1/UjwElWHrgPqijw135akreI5i59W4wSczwdyysQUWeKsQnuLOEUn6Xh4sVCUPNQCg4pOWI3EpUXd0BnD6Cx75wjW0BuD114jp2JbLogLuPNWPW2KcFxeCerp+8D/94+/YRwCb28HeiUsj73Bu4I1ToMebW+0fisy68FaYuucEc+UZvAoUIG0d0v2Z3EWqg9CiqPS9fI7xoDXin/wnttpx9j30uG1LDoeiXGw0haLBEsgqqE5CxfkPM889SdiTCwxcbrMlFywzlBrO7gb+lGFkGc8xRQSibm1FFlEBNgaeic8Dxlf3ViviHYtozRIGa3vutXyQpACWeAlpVfn9Ijy7Vlq5kK6wNQih5V2gGTVBDmXwkOudAbe7nrevh+ZOqnpT+NErjCXD3z/KKraS07MUbzyXO6gTMJHWyAkg6HgjguuSKDTKZdDOHQW6wX1HILFe0PwjnlK9EHum6BFkpk3jumyiP5LSlk3VilvdTkQyzPGBS6XC6fTWZXgZ1BO0jj2dOOA8x3vv/2ab3779/TTxJv7e0Y1Qvzmm/fc7NU+RQ8D5x37/Y5xGBSyN6IyDrTw9idR9l9/NKFpIa9lLNY9p3F0Nt2ShgXVFz+zmb4meeTI6Xjg46dPnI5H/vinP/PP3/0ry7zw9PDI4fkgxO45Ur0cDa3t31mDC0H2KCPz21urYquW4ISAvO8dnTdMfc/91DOoWOvQB6UVWGmBxnBZIqeLlKTnnAkqfRKGPWHYEbqBPnQEZwgWMbosEVHcXj17dB9uvZsNzdf1XKwGPGm7Z20fr8I/eV02c12rBZfLmct5lqDfWozfmiIEPZeE2NQKEamalIz1QoLWg2GdIZd5JseEtZabacd+txfE1zviLMniyZ7XgK89Ss7EOTKfLvSuw1ZH53uC71eOjXVOxslJ0ie8LLDe40JPNQ4TItUHSpI9tMQzNeU1eZfCi5NuXGfkcwSPqVX5QvIaxrpVU6mCmmxXahJ6g3MF6yNYlcpQ3ztB7DOlZjWozqIz9zPXLyMyW8swjgLtVyVxpkxJ4uGSUyLHRIqJZZ65XM5474gpKtJjtKQykFIi9EIqLqUIsVBZ+o0QWUrleDzz6dNnPn9+4NPHBz59emAYez59fmCcBpzznE9nYbTXSgg90vtzrZxqyC5fwWNpLY+kJKaIMc2kOCshW/2CtN24lUK2rO9625PAh/V/N7hVat3ahWANnW1oj1hdWJUPtxoFN7VRasUVgymvV95yzvL2/pahc9ztA30jKms2box4D0lW5/BetVFEvFzRBL2P2167xYIN6TFKCDVGLAK81FwtFa9oWOsg6HxgGAJj31FLxTu0vCKRyOLQzEA2zYZcVBWCFGJzxFoj6JBpw7WR0tfnbEhPror0VOEdrVQtI8S4Lf9oJoemgTuCZCHk3eA39edWHqm1kI0oDb/mJSWhBNWoKa/R92vWTLAWKLYQa+VSKtlCdY5xGtgPnmma2O9uyAXG8Qg2COWmij2DqYY5e0IR7k7JEuiK32iGNEu5w2m3nLWEzuGUIJwGaXnuQsbZQi0aHBkPStSU8awsMXKZFyGxG0t1Tr7aBdyMsUW4CvO8tv8KssVqMuu7jmm/Z393Sz9ODJOoimMMN/uJoW8He+OC2G1D1cPqGt3dEEyucfZXulrW+hKR2HShtuYAGsJcBQlZuwdLK3k0A+BCjAvn04nj8SS8yodHlnnh+enA8XAU1DtnOYQAVY3YUB5tHbZ6n5wzdEGCnt4bxl4Mm6feM3aeoRNn+rGXRNVYh3GttCM7QMpZVNadpVSD7zpC6LTpxOMasnR9P8oXaI2OEQ0NMq20VV/+/NVmZerfpuD7X7kkB26lqKRSLLKHWbfJXxjt9C1YNffdupsoRoUk9VL0JqdErkl04aYdnSb5lCSGoJoErCWgFfGqoo+nzgcGg9MuXtfGtyE9xrx4TWOltG8qGOepVpCeRCXq64ow4rbvWOPWQEhih3KFcrWWfHktETLVwC63TjTWJiK5X1fI/Rrca8dabWjQf3z9oqCnlMzxcMB1gUDFeidcl0W6KJJqPuQk4kldCPRdT3B+hYpTFCXG0+lMXOIV+VO7JlSBV1jnkVmVnc+nk7yGDtblPHM4HBmnkfNl5jLP1ArD0DLqLRhZtyvdHFZYrHVm5dZ5lrd2XXUDbxBgUba/0Rrr9WJpJQRjxKsk6ESOxpBoli8S3TQRrtqeg7rKiuBi14vR4Oo2a19nc7VAsBVvhGmfo0ZfrkU9Vuo8umAEG2kCkFtgUxFvrGod6GZmfMB64Up13uAapO0DRg1YlxSJWerRDXlJpbCkjLVSf54vMylmVfWeiSmJloUtMgattFRlobQArakwC5nRriWq63b3ipUD21S897LRVxRtk4AldMLzELT2SuZMy68N5WlkxWHo6UJY5eGds5JVRSMByStecnv9urGY1lWnhrYYtLQnx2nWsmuqEEshFemkE7dlGX6rpUwJMGUMi3FkE6SttmV0NVNNopKxFUKVMbcVcrK4anBZkCbvRLAyp0IXBAlKYyAEtwaRIFywlCFpBaPkSjWFYjK4iLWinZXX0rOecQZc5wnjKJyQaWQYJ7pBJChWUTrTjAuvpem05fclsIL+2TW4eOVzcnvxxju5ejNGyyVr7HX9Py0oWg//a06QzNjgA8M4knMWHTLnMVZlMirUKlxM6wWBtxQhKVtLcJ6g3W19CHROOuAGRXd6L/6FnZMmktAFUbf2fkU0MI3ThSZCikTWKgctEnyybj1Xbfa1nRVtLr8ciOvkZOtSavyx7VGRpHQ7IV7vqrWuDuhNl0bmnBXE/KrkVI0kv+u9WZNO1QUzogsmlh9gawuUJMBrwYOzjuDlHNmEEdcMkDbnre5eVv21VmsHWqKofnhX99cacNZTnREB376XubNkGAK1ocyNv9QFXN9JgKdWLlSxhfE+EHzS/UrejSTRMvite7JedeYW7UKUj3JV2mrzROkWf+36RUFPjJE/f/9n+mFgd3dL6DoOz88cng8cz2dOxxPLZSYtCw7Dftpxd3PD0KtrdS6cTic+fvzE0+Mzh8Nhzaack/JA1wWolWWZmS8zTw8PfPjhBx4ennh+fhafl1r4+OlB+Qvw6dNnHh+fGcekG9x6HK6DvR5YSDdNLomYFi7LhRhnCdyitLouUbzDcs4s87JqsdQiC7PSuBpb3bTVJXvnqCEIYmANhUJQIhpN2dI5ipU2wFILpSSwlXEK3HjZVLOKLP4tLXj/mctauAkVQ6LOC3GuEDy174WE4Qq0eixaPzdbl5IEa4aCOPZWW6kBcAWTDDZLMDB2Pb7r1rIQ1pBz5un5ieNJW89r1S4ReGLmAC4VZAAAIABJREFUpL40x+cTyywlrvmykGKWcogX8rdVM0hrLClXMA7rDD5AXwxea8CyYZirhVGxc5SySVGRMiPBWNf5tdyy2/XcjkFJ6dKdUmHlAQnsK1+dc0xDT98FyX61+yfnxOV8XgmHr3VZaxl32lKvSGkqiUtaVnmA1DJb3dxyhUupnFLCp8pYE85V5Wlo2VWzsaKq3dF0LHbEWkcJYrlSS2ZZDKlKltpVCGpA6VJTYC6QEqZknK30IeNtZRgcb28DQ+8IzjGoAN0SC6e5krIEtLGI31qIM0s+YK2XgCe3OaRikMbQ7yZu3r2nGwbefPUVd+/fiQda34kWl/LvvAbgrRtLNIT0cL1KcsGsHmusR8BrKmkVSGIVQb5Getq70STq6j3V9gONr7ISnTerCWsN4zRx//YtoeuZdntC15NypWBZssgPTOPEbujld/MCRVrWe2cJ1hCcYz8NDKpSvh879bUz7HpD5wxD59lPUjZ3ztF3PWJSDKnIoWqaCJ0RgTyvWjAt+KEK0itNJsLNFJUYOeqbTbNpsCtm+/xGukQxbZwUJTINDduIw695lVw4Ho+rCG9cFoyROW6Mfr5cMLZQ7db63QKdSlXpF0G3l3nhfD5LVcM6gvVrICyBC5iu0+46p3ulGi23gLlKwOQxeCwBi29Kyu2NV3VAMEVkADTossbQhQ7noUyV5S5jl0ixjqUk0H2uyUy4saefBkEJvcUGh6uFfoiMMVEwdF2/SeDUquKzdeVyYYpaVWWaorV12pBCRugLllwi+W9A1H8h0lM4Hg/kWgh9T62sKE9ctuCg5Ib0dCvPwai8fIyJ8/nMWXUgWgRnjbmqbaopm9bqT8eTlrAiST946+g6Hs+cLzPzvOCcXzkbDeJaNT6uruuIsWkM5ZzIRSLx7XsSVa8aMHy52VwZULKVhBrS441wWux1WcRadQ6/Ul2ushi9t/Q26OsJUffnoLr/7GWAzmlmm6V1uxiEd2Msa+osYT8N7Vn3+fqSDLwhPQVcwHiBTUM/MIxTO2cB6fY7O496ga/ZdSmFmhKxyDw5XmbmiwQ9yyVSUsZZR/UGZ4t6bFntKmqlL9k0nWtdQ2a1DmllgVrFYdw5mUvOV1wLMr1XFVBLpxpQ1krZr+ndicyaZlf6sbxtSI9saMMgQU9KjloSrxS7buNpRIPKGKPt+kYNyhOFiimmcUAV6ZHDIlVpZ49F0TKrvj0N6WlGojprCpZsAsWK2ScuUEkspmdhwVble1XFAXMRjkKulLnIGJpCcBFnC+NiCa6Ss5jBGireOaKiPCmLcN3S/LxMxrgoPnXrVNRgXN6idIZMI90w0k8j/TiuPIWG9MgZqXoxjRBbq3KKWpBg1wFeewpaKek1L6mJ0MpVlOvX02jMbP/ZUJ4GjzS0x7S7Ujc0OoTAMIykVAihU06FpRqJr7CC9AzDIMFBArJRZNjgjWgc9d4L2hM8Q9/TB0fwhqGzBG9U1DQo4ukUPbVX5aXt/q9CdG2rXtFFNECRUt3aov7l/W8gxvX36zZg8qyslMSXv/+6Y9m6KlvlQMi4RagPL8o02++0oPyFaKFq4jXkqJSC9eCwlFK3vag5ohtP0XLQS6QHRQC1akFrtmmCiC8rIhJcmvW8a5IdVHChw3U9xVjsMMPQ8dJjBkwnZWbrZAM1zkCVZFUI63HlerWDtHHsjFT2MKouXUpZW/prrpqEb9y3hvT83PXLgp5cOB2PVGCYduAc8yw6NsfjiXkWXk2D2NZDw1hiiuQiUe/DwwOPj09cLpe1G6DrOsZpZNQOi7bxOO/oh55+nsXpWEsPRTsRrh9tMshku55512f1Rly87uTSwrWY/L2AilnrqQ0zMlJRWXfCFVIuBYcQRLM1jN5B8ARjRa/HCYxonAGn5RllyJu8dXqBIj1f+pL8ilcrBWGMkkgroRMRN6cEM+vEngJdcKD5bW18GjX8LIV5XjieLqScOV0WzvOM4pywusXLfUopMZ/PLOeZUgtLzqQmOWAXjLXSgn46sVwWzcbl92vJLHGWRTwb6lHPiFqJRUX4ciEm6eqyRrkIbK9Phcu8SEBVCktMoi1lDMGBCVY0R7xjHDrptjOiZVMrJC0/VrOhBJlCTAbI+OpwCTCOUiSwMq8c9RjtrISruV9fHiqelmFrMGMNMRsOFyF2DmFh158xFS4X8eDKpZARfY6KYc6FMssYeSUZ1pqJuZKq02YPQ7bKQzBZfbScerB6vC0E77Eu4ztVUnWG0AVBaoNnjpVExeRKniPxMpNyIZeFgpeDuupBpwdLzEVgctfRj3v6SUTWmnCi/aIFtt2sa0i9HSwS6DWjW75InAyvtjDbu2ut1Dq/XsRaQgJc39SK89Rr4nPbu9q/y2dpCvkA027HNE5QoAvdqksVvJeASJOxqnwaV4uQiaWgIU7eVdAXg5OSUS3SrVhE0bo0ek21IgRaJOFoau6xVG2C2EqUwsfUdbvuRcIxeXHX12xzG5PNLkS+VVuAeB0ktke9Qshe8Vrb6lsHnLWrQehqqdF0rFRQ0hnRvsrFYb2MS5uGjWMKDRHazkMxhBWPwyUu6lW5eRiKQnPrLG4eXM2U2V8JJhoVemS7Q1VRtqqjZAzOS0t76Hu6aSKHoHNDKRt9wLaObMdqiupDkN/JhdB3dF1HSlLqQrlAxnndvz3GBYzqw0kHmYy30UqEc1pKDb+yTk9KiY8f/sJ+WQjjRAaeDwc+f/rM0+HI8XAQlAfoQmCvjHLnHJfzhVIrHz584LvvvuPp8Ynn5ydtVffsb8Sra7/f0fU9LXfr+479zY6cM+M40PUBY8RtVtxZRRTxdL7gvF/LUG3Cr6BFZd3QGrEsl+tqr05I1Lzvy6AI1oFeQZ72PGdhrZdCoDI4SymOrgvsxxFvDfu+o2+cC+8oa9Cjpp8m46MlZPU3yZmcrhbwr3wZIzo9oq9hlRPSib+U8yr5L5BqrnXtkmoBT61GS4GVlAtPz2c+fH4gpsQlRi6LGBDOS2KcpOuqlgwlk3Lm+emJ0/FIKYU5JZZ8LWAoxLXz6cISo6Aoocc7T86Fi6ogz0vk+XhiidKdV1eez5YcW9Oc1RvnSr6/xMRlWSilElMmKoLYuz1uDAQHu7Hj7e0kEGuO1BKppXKJiaUKBLsU6TCQebGwRDnEMSOlKrHQWbz9+cX4X7mcs9zd3Yhu1SJddZtvj8Hbxpdr5o4ODJxS4YenxLOHvBwxS8ICj48HVeMtxAqLdRQslzlS61E3vLBqYUlG3mGxzCYIubJWalnAZOEKdQO2VgZXmPpIcIXQVfoh03eV3TRyd3/P0A+c5kLxiSUW5vTM6XwUNNcn/Dlp0COt9BvOU3DeYvuJm7dfM+wmdvs7uq7xeRSxrFdHXa2qBSQwWKmN8yE8ph8jra2j8hULI6XCPMunKg1lvcrWnQU8Yu9xtT9cabhca7q3EM0aw263YxwnpmnH+3fveHt/Tx86nh+feQ5POGsZh5FpnCS4SQGKuqAvF2pacFRsyVitgLsqRW4LULRZ3FZyMto6nUlZJA+WVDlHIcAvMXOJWfmL2kNlVPohBEUS5GE70T5oAQTGfKGYrUGTljzlg5sNTfJOEg9FsxoC1vwAX+/aqgTiECCdsV6DcOmQVeOhKsFiRcxd9+MoqKkxisTJuGY1bQZWVDrnxPl8woB2HAtCnlIWoV/jxJMrJooT2yRvndp8qKtBvxHIC1JpKO0+NWkSoFU4nLWEvseq8bhxVsxQa4EiiZS/7oDVHLjUSl8mJuvBeabdnnGaqAhK2xIa6zrdYxw29FjfawLXULCKNR5jKl3fMYwT09T/7Ij8oqCn1sLlciH0AzFFQhKVx8s8c7lcWBY5FBoctzqxG7NaVFwuZw6HZ54PzyxROm2cdm41ESrR7pHXtM4JIa4L6wCjA9IIYi8sIurWyikLoS2MusHgL9Ceq8RAS07rfvblYrhaY6ZlEbVByAKtSVcS2jrsVHjPENSHxjTtg8aMt8J9aVo9Tkm5Vctbr7cczcrXaNG9D0HaUp22pxsVzsrSsdUCicYSkJJUU0aOnC9CNp5j5BLTejBa5ZjUnKhFSoZxnkmqoLykRMxZ0ZpC0r95uUg3XfWezntZuFUkBXJKnC8zT88HLvOibY9yqF9rObSyaROM9EoMX5J0UpSqGWcStW7hDcgBEZz6qFmoMVOzJZuCz5CN8EhW+L1CToZiDRhPykF4Taa5LL8u0iO1dllrDeVqNcVVoVYzPmnXl6AnlcppqZRUObnIyUs5dlkENS21rkhPI5tnDWh9AWtVW0QhamscGE8hAEXQMN2gPE40q1wRNM0XXMg4L99rSrX9MJAo+DmSKRh7JuUqQpRNFsHIOlmVrtvacmBcIAwjXT9pWUuy61X3hCvk98Xz7b82ZHgDFOBVQ53tqlVk/NtzLdfXVtoqRoS8rlqtW2ikJ9QVOLUFPhikqcQ6SqkMKgqXliRimopAOCeaaQbpQKUYajaiXm9YkR5BJRryU9ebKR5QZg1KRZA7y7wrmx1KKkXQu8rVOxWUYe3usVZNTLeW5laavLphXKPzL/TUKrrHKk9oRXrMep9eM+ipsKJPKynZbHpe1rQC3JqVC+BtLUFFPMV57ipYv5r3VoO6WgpJ0cEUW1NR1gYPldjQsaFod7MmvG3cpQlE0DVbK6UKRaEiEem6OhqKqHsrxlBCwA+D+P2VIoEyrUtZSlToV0vFeU8o4IN4Hnolu9t1D2dNvI31+nBr0NOgz1YVk+YVIUf/HCPkFwU98iEMpRTO5wvFWJ6PRw4HedRS6IdBtTJGQtfhQ5APolmWVT2Tvu+Yz/OmeaJw8tpSrGTCYei5u7ulVri9u+HmZs/qkZXz6i0SF+ETsfJgzLphyRhpprSqdGr3jU7EUr6ErBX/MRKUmQaHWvMCDjdUaa2LCVOErFlTglywWod2GGzR6BdRgZUWRYU2dcEWJWuVVy5tgUxY771kH0Fcs70PhE5IZ00AsKJVPCNQd0qFlGQzO57OPB6OxJh4epY5EFMm5sySM6tYpNHAToPDkgspyt/JpbDMiSWlNejJLSi9WpzeSsZAtXgr3W9o9pNy1hqv1J83Nc+2sEU/xhrxQgNIWdAd4fcUYhZNmyVG5mXBIIKH87JIYl0EyStFfj5pR1/KhZRkoFo3mOhCLSxLudroXve4bJ2RAKOWufoYCM5qZ9a2aeUKUekRBilPJioxiahgE/F0zuJrxWPx1qoCawdW2r7XTdKY1UbGqTBdCNKN4azDWuHsdVbu/+QzX/cLe58ZXeJNd2F0iX7sVu6CcQ4/eIqHXTG8r45liSyqBZZVaj9m2Y3D0BGGga7v6YeJYZjoh1HaoovU/oU7Vq7KGutx+eIQ3ZKm9TuKtFyvh9cbS6hU5dm1xKyVKHUjUlma1QdCf02DcA1CWlmslQDlswG5iLt6jMrDTFKKcE7GS5FuGtKk+5E+xRSYU1EUArploSLik7XIV6rFmYT3EkQtWUocc4ZjEjPhmCvzVRDbUMngA34YCd1AGEYpUfY91kugvo1Zu1+KzBv5rMKbAZrOmUBAcNV9CtvPvybOY4BmxOq8Ey61EV2y9Uxp0ghuS4Kd2Xyk5pyIygmiVu2qtHjnxB5HaQpF0ZWYkvrPVfWuRJPosnYiW2vWQMN7Lz5Y3kuAqcl/K6kaPedkX5Czyuje0YgP1ViqC5JMlUxVfSRrytr5KkwJYeH4avHGEXJeX3tzVZfP0xIX5zzDsKPrBkC4uLU2lXttYBA9aH424uGX6vQgLdk5JR6fn7HzzIdPn/nw8RNPzwf2ux1v79/Q9z23b+4Yx5F+6FWPQTaT4D3TbiRnMRF1/iSw4zoBJYNulvP72z3ffPs1wzjy7Tdf8fnjZ5Zl4fHxgePpQFoWkaO/XBiHXjKLtkHVjQ8oWUlTtdwCn9bibEy+gvEqrF5Toi1UdAIKU1z+cM1aQ0+ROl8k+LlE6hIxuWJTkjp4NdicdI8SklmTFbeSClBLIS+JOCfp/Ep14+29wmWMoevFP60fRqzzOO/xocNYq7V3WYymGkiFagoxRU4nkQ749PDEXz49sMTIx8+PfPj8KCx7WDOMlCpLzKJS7MQTS7riMsssAcv5vHCeFwqS/WU9dJxyoJwV4cQhOKKBFGTzs0b0eC6LeHLl2gJdRVaMUXl6id6s/g7UFaWSM0CEEq2xnM4XuuDJKXI4DhwmseTwpuKtchFSZolJCLZLYUmNRyZ/z5rM6RzX7gPn/KsHPaVULueZoe+52+/oQtB7syMV8UFLitwsMXOalQwJpFwxpTIvmYtNWCOBfPAOLHQ10NVOOtaMx+oGIxmcbN7BC2TvrWUatAvLO3a7nr4PeGsZ+kBwjsklvu4v7H2iqzN7nulYJJhylkwFHwg3O2z1dHfvuPuNdPZ8+P4H/u2770jnMzEV5kXmaJj2jDf39OPA7u4t+9s3K4G5qihlUZkKXQFXX4xqKoklgrUtk4V1AVa2Dq4v86Nf+aq1kJfzC0S6aeMIJ0K0TqoefkYPyuu27lXPpyE9DdXImZIr8XyRx+VCWmYshr7rViSiVG3rLlVbxgWdaQ0HxMKcoMsZYypL9HgHg5eg5+IM8yLotvOC1htrWarjVDy5WnIxpCL7arPtsdYxhJH+5o5uGBlubhn20mUmQY9Z/Z7WWG+9cWZNmE25Pv+q3LPWcWcUgdT723RhXucSNKT4Std1ZJtFd6foGVQzJS6yV3kn5RrbhGoFcc8pcZ6FY0cuWqaG0AWxY7JCT8hZLJLOl1noJIqY5yIJWU7iOVi7irOOPgiXplN0NYSA8wGcyEdY6/TsrCu3rJa6+paZKqGGBazz+NZyrqCEoFuip24MokPnFX/0lZoq2Ri6caDrO2JalPweMNYx7e8Y9vc45xmHia4bKCUzX06kNMu8VMsoUX1z0BC9v3L9cqTHiGjgEqWlcl5EI2eeZ6ZxJCgvpOvC6kbdtGZqVdjOu9WCYmv33iDaNhkxIq09TiMxRjHIHAeFs4wyulVtV8tnG1TZXnPLcmjBz/qtFevken1sy2iDJKGJmAlUf/UHacZ1JhdRbVYM3ujEsEY2HRr7XCE/i2mxlWxQVxnVq65D0KBCOyuCwoLWYX3QSL9od2fZNhqUMJwSMWXmZeF8vjBraWueF0UVVJ3aFHyMKvhmMDVgnNFsQw6Ykht6JFBsqhL0WNsg7qbqLJlP1izFqg5E42eVqrddg54Gk25ZXtOmuB65dT0Lv8vWtXMvWuGwxRSp9Uo8rLTOP3n/uWwdP23zzBTMquRv8K6uc+jVrioEbqrw6VYxOCP6OKkI+lZUe8NHCU5tsZRqNdEQhetG0HdGynUOhzMqKFfb5rJpF4nLul0fnZdHCI5p7BhVQX0aOkl6XOK2s+x9xBfDlGd8gYJj0azdWItzHZhAcAHrB8ByPJ4x3q/8raybMdbhQofvhs252QfduDUbbT97dZmWcbMFcF/e2IaX8AI5fsW4p1ZKTopayHu2KJG3Wm1pEdQYRUlWNGct77Rgv24xWq1btq9dts1KCFBUTtGk2tZGowBsxb9cpSxaTBU7EA18SpUjp1ppcjBVvO9c8TLXrGWulRnxFyw4LZlL8ckokdfoWDodR+flLDFXZN4f3X9jNguWhuQpQtaqDOv9kZd8dZQHfZ1WZhfh2ZdHjAFJoI1R+QvRQ7MIZ6b9jBzwG9JDQ5C1dG90rIoCBjGlbetreF87YzQhtcrz28xQvxAkXD+C7OUyk1SvTh8iHqBnmQZJWmdZP6Rp+4QTMcuW7DvqGiM0pLghx9Y6nO8InQgQh37Eh17Q3RQxRdy/6kqA33b3nxvTXxb0aJ0t18p8uZCNcHWm3Q5rHfdv3/LV1+/ZTRO3t7er4aJFBsi5yjj2vLm7E8f1w0nUJ2thmWeOhwO1Fm5uJmKUDgNroes8w9Bxe7vj7ds7TqfA6fTE+Sw3q3nKpBSJcSHGZZtQBhU30mDqSpX3+lG0JpoVRlw7t1ZOhiG3MoXyIkxBd4BKjYWaMsuSSbM43polYlIWZWOtT/oCHY7OBkytBCqhVgIqihfEy8gaec+vCaNX3cXX2mkTiWriYNqumLQ7K6XE8/HEw+MzS0w8Pj1xOB3VaFbIpc400LN1DAjaI9WoAtVKXb9asYGwBuMCVlqLsM0nzZh1E3ZWF4Z1OFs1GxVEr9YWiAhiIQemaAixlg+38LYFby3YAZUH0A3isiTc6cISHR8+P2JKkhb0YOmasGIuRPWuSsXQGs829eOrWrMxq1DXa17eO756+4Zp6Hl7e0PfBy7zQkX4SkbfY6lWM2LRMemKoysWT6XrI/2w4ExlMpA8pGpxdqRzk3J6LFkJ0sE77byy7MZBOt2sZezFiDR4x/5mYhg7LXsFvLOEurCvjr7OmAg5HShJeAtLSWQcdrAMuz0mDDjf48MIxvL48CQbYYjYGDHeQ63048jt/RvGaWLc7dS3ScrSrdPlpaDoz1/rkVivj1r9/ddcl7WS4rwG5XJIi6CdvGw7yFVPqGqZS/cxCXxUr6ay8tlqrSqW2fbJqCbKScan79cUMOkBu3JkjMEoP0pCLFHLjdZwTJVLKThTObmKa0r0rfpmo9gIGEtxgeSgWlXn7oM2UoiQqzOWYgP4HuN7ETR1ytezyufArOut3a+1Q2pdg1sQa1pJcEW+5GFaafAVx9IgZaRawNkszU8t8inbZ7hGFNs4NhHCfM1ZvealKXG9aqBq0WaOUsV4uQJKLQCrXVqCpIzDwG63Y5omuq5X9WuxDrrK+a8+iCSftco5XozMsXSlV9XwBbnnUkq0Vwr/1oslTcUoLlNflti8X5HGJq/Q9VIpCl1P6AZyitg4i44Vhmo0+EEbbl7IO/z09YvLW9Z5Uk48nU/MCpXe3d1xe3vHN19/xT/90z9JmevdO7zzaz3aacBws9/zzTdfsduNPD08y40smfPpxOdPn5jnCzf7kdvbvZCcnWGaekzNvH9/z+n4DYfDM4fDA4fnR4IX1dtlvjBfLsyXM/PltA7C+s6V55Ny2tQdV7XPzR8lpUhSApiMtVlb6HNrqUMi1ZKqcEmWQr2IHUc6L6TTRTbamCFnvLFgRYK9s44bPJPtiKUyV3k/CSEPul4ms00Z+4o6Pdugypha7zeJ+JbpmUwxhiUlDmex+vj0+YHvf/gofj2HM4/PRyUjahkHwDiqaQ73IjRnjRyyxcsGFauj2EDFYQJ44yXIy8KdsRbhA2jtOfhA8EFQCpcoRTqzVgg3V6JyjYwtGHX2tWZDhQwriCGBkp4o7XChwvE8sywLzhrm84nPnzzOGqahY+iDLGYXVuKuc17un1GfKYWat6TJ8NMIwq97dSHwj3/3DUPfcavlrafjUQQK5wVfLC5J2SK4QN9J0OZzFDSOwjgsTGPA24IdLV02ZGO5Dbdcwo1o9BQoWe7pbhoZh54QPO/f3nJ3sxMla21Bd94x7Qb6XvWDGqFyOVOeO+p8JJ4t5+MDaRG7izknUs3sRsfd23v66RYfBsKwx2B5ejowTHvmRdBGs8wYYHd3x1fffsu0m7h984bQCUJQq3CWYAt8f3zISVDzJZfnx92b8NMdXb/uVUphPp9a1KwoRsBUu5YWaXpDdXtei/CchA+Y12CPlsVXSDEyny/yuFxY1KzZO8duGnU9KJLfTmYk2fVhkEy9isBrLZlYCqc0K/G6YKt279CyikqpRkrPGHw/EnYG6z23+45340AIwruyJVOMpbgeE0ZMP2FCj/EBowrRDWW/HoE1eYWVXwaN5Nt+uCI8EHneJsMaRL3SZazIBJhqyD6zirxqEvIisFgROiOJd4xURBS4EZOvrVGKzm0DGCcWEpIESmNJU9d2TTfOBbrQMfQjN/sb7t/cc3N7K36XXa80jyYFsYaG8jn0RlpbcE64OYIi5bUsXFd6wSaq65zskdflrYoh2UqwEFIW4dCuE9FDRadF8qZnGndY7+mHPb4bSHHhssyYFCGLmFelUqolFVjyy/X6U9cvLm+hGYNA/wlrnMiZG8s0TUy7id1+Rz/0Qrxbf01ung+eYehJKYlsvpFJK46xC957yT5yQlyZ6loX7vvAOPaktIgInLdYJzOniQ02wUGZQxsJsJU7XrSiX0G3pUG/12RHxerae3+RQbRZUZB6Z5ZHymKlIOQ/YbIXI9ouqTYHbwEwFQ/BKkzYOrvEOqGRkf4bLqOrf90hNnCyTeiUkpp5ikvzPC/MyyxO2KVQjccYr6WxVulVRCULadBZLWkpHCl9H1YJmm4Txqsvg4XVqX3N4q7adysrZFu17NTgWK2DvsheNCfZsugvvi+t+QIhewrkBaedP6mI/YULYL0s/GAcXnHeFRBoc+a/IZNsl7WGaRzou05c04NnjouQUzXlLrViW+lQB9gasLXiEM0d50T1OlhL76XLy3QdthuoWCVHymZ2M41Mk5Sy72/33L/Zy33zdu3KnKaergus3SIYsod57sh5IVtPrVKmytVI557unD504oweBrphwBgn3A4nPDzbzAqpyk0b6Efx12robAVlyrLCM+aLARHA78dr7ae7eras9ueB9P/sJfuh0AKUv1YdjfO4Hkm1vcft+bVWzyrUdt3B2uQ6clK+hwRHBuGeyF6cN+6T2RARo8KdUs431GKpORGTqHuL2bIuqoZCFCXOZwl+OgJDV/BUxmqo1mFcAJQriaUYqzY9dtuXjKExyBtw86P7f5WoyhNeQHKbJltDz/RzvSrSw1a2sY07pu+h1YK+QBLXs6lkLVG1c2kL7q5LmddVifbnStnmRXsnRjvznHOrpo3wePz6/tqfhi3w2T7Jti/L85eftV5vgLRhkxKnHDFmtbWxRdH/1gyhr/+lIKNTvTirWk2mbNWIalZYidVG5W8Yk18U9ORaOc4zsRZR7XTizHx3e0foet69veeNEpjHcVwZ2asqpIG+69jvd1gDO92kU0qkGHl+eiKnyOmng/qWAAAgAElEQVTwhvl8XuFpYyolR0Kw7HY9kHj3/g0pLdzc3nB7t9fN1RPjwul02AauooMsUWQzJ2yPUsQBu1x/76r1/YXgV8sQkAMkZamPL1EIlTlmTpfE+SydF41FHSy4KETgnswwR3AzqRQOS+ScEyf1mlqSEmRTc4N/rY21BTqbyFuDvzGGeZ45HE/ElPj88MgPHz4zzzOPj88czhdi1EzbyIGLCm81nkVz9JbSkWxYlUwqcu/mZWGOgnLFlMRNW4NfUd4Eiv5t4DwnKoZlSZzmpDo7iSVmQZqyklXXoEc3ytrylYb2mDXQ1ruggouaPenzaipLBhaxoKgmkopkvCGB82KaF1JRPpRhyU41gVgP/sZ3uRbGe43Le8e7+xu8d/SdzPWp9Nzd7hhit5aYDagVgIz3fDpzPhxFDM8V5rwIT8k5qbdbhx0GumFS6FxuqbWW3TgyjsMqaleS3LeSJft0vgjKmdW7KQSJsrYTR2FpsSYoxopvm3HYbsR3vbacN9TwZSIQgudmP2Cs4fZ2x+3dDdNuRz/2a6eTaaknLzfs/2hVvSiBGfOT669enwqvcNUqDttrC74xkBIsEaviOMYIL6M6FYIzhqp6Om3O23b2qz5oLXCeZz4/PXM8HDkcz1wukswI6i3o9xKjrG1rhIQePP3Q883X77i9vZGyRooqiLdwOhzU8DlTU5KvpZKT/M3n44WnT4/MS+LNfuDm7XvGceLr92/5x7/7lr4PPH9+5POHD6RcOJwv/OXjA/04s3/7lvcxU61bjWzXm78ectsHNcatMV5rbmsNuz8aLtPKZa956SGu/Jm6Jn5XFIqqBcMiXcBYkWMQ3lPlPEty2YLVtkennEkxUq3DVYOthpyav2CL6VQ+xnt2uz3WOO7u3nBze8fNzS3TuFOtLcPKEOeL+wpXwZbeN4TD432l2LJKEaw3ee3Yaskfa1ItsbHyIXN7r5ukhrl6zaKWKrlI1SQ3wdRW5tN7a5rqt//5kOYX21A8nc8YL+hO74TH87t/+Ad2047dfsf9mzcKVfXqqbHpwEBlHAfu39wx9B03N3umYWBeFuI88/njRy7Hgfdv7zg+3eGD05ZHYbD3neXubqTvLcvyDdPUM00T79694eZmous8y3Lm8XGLnCtSG9xNO0II6qi+kFMrZSVSlq8xLrKYc9a2uLpuIPWLZdO6eHLMXObE+Szu8o/HhefDskLlxoiFQbaZmUxfwPiZpUrX0HG5MKfIMRdO88xFg505a7fE2m3ya18GTKusmi2j0E96Op359PDIPC/85cNH/vXf/szlcuF8mTmeziv5UcA8u2betcJZa8orubjIZI82Yqxkked5Zo6ik3Ndq645qVMuJOUG5CyBxZIqS0wcTgvLEjmeI/MiAVArc7WuE0OVgM4I8X7LHuS9vEhSrrKlNuZZA6OoQlhzrpxm0fLpu0wIUYQPlbAvCMfmnByCU80T4bI497qoXfCOv/vmrXwGreFYD8ZBTJkxBPbjIN1zpYr4Zal8/PTA9z8IhE7MnJczFkNwnq4bRBV12mP3t1Ki1bljrWXse/rQYazBG0+KEpQ0xNQ7hy2V2gfRq8LhOyccFPXyqtUQi9hM4B2mm/Au4IcdoR8J/dhqEPJ3UWKxMQxDx36yeG959+4N7756yzhNdOO0Zq1Yqx18m6YJsGbNL6dB/cnnX/7Mqu/16wzdT77GEgXpMUWUfIWvgGbFDuNEadd7hw1lDXpqkQaTNtcNmzFsrYXD6cT3f/nI4XDi0+OBw3kWqQMj9gi5VI6XC5cYhWMxdHT9wHh7yz/9//+Tf/jd3wnqmSK5JObLzKdPnzmdz1Q9hAVtF12lkiv5+w8cPpx5PEX230y8/ebvub9/w+9+8zX/63/+A+PQ8X9//898fnxiiWcenk88p+8J3cDN+6/4zRzBik5XQy3Jm+XASmJtXSFN5fhKz+W60WA9WI2RSoE1PzUMv85l2FSYnZOSUMlEpTUIri87kjRkJCqiIzari8GcMpcURUhVrZKMMcQYRbDTWrFJrKxeXnJmqQ6bFa7o3a1nv7vl7u6Od2/fcX//lr7vCb57gQh9CU2vna61oWgyBtaDx6zt8KY1EtWGpaPLfEPeVo23IpIlKbe/qyK9LUgERN5EhGdTyhQjxuDNHFx+URwOrHNiYvprKzJXxJHZV1FydN7Tdx3TJCWtaRzp+l5ddRvCow/9sHIgeFLyIoilon21FKJyKeIiwUcj74GQlYXnIZ1fw9AxTZJpdp2qNqocd0pxi1R1oKT05WgS3q2M1VrU19LWNYzYgp4vN0ddRA3RSGsHkvBKFh18geuEZrVkWIrI6s85c1FEY1ENhtS6kHJeu5oEoXzFw/LK76RF8q2NO+XMsizM88zlMnO+XKRTa5EssJay6t6s6PNayrk6GJrYIqjUT9F7VladnBfu560dsgE1VLKT7i7nHEnVk2NuwmZlLVG2hbaBcut39CGHrE6KH1edat3WflU0rxisEVQPRTiczXqoVEiCbhkrHW5SPtLMW2gUOPeif/ZVLmsNQx+osOocdcVLS6vLTH3PzTTinROvrSTluuOpwwUvmVSyG/eCrR16dXm226Z03V1nMFoC1fFs2aavxMUqbw/6rmxNBaUVa1oA0XzSnKA9V5B2XbW1toSxcTc67wlB1M+7LqyILhoYXSM912PQDr2XZOUfX9doz/b1dZdlba/V1n/VQ0XXiLhrF0VvDM1PbEPCpAOozXC5x2VFVVcx2RhXhNRYOY6Krs+YxJalGiOaSSEw7Xbc3t1RayHGhVIS526WAM2J5UpaNOhRqYqcK6E/Uo0nYzFOJDKGaWLc79jf3jD1Hf04qgWBlb3nMhMLzMuih2MhlJfWG1926rYxf7nWJAFeu9uukL42B177+rJkk9mI9S2JaGuhKYyv/o9Fghg5l1iFDtscaYHT1lFa1rNve91m9SSl5L4fVvKyb9269advRdtL2x7bPo++gvAqrZzTImjIFaxWtzmoiZj8x9XUvt5z2/3SR9UfWgPWeuW2zrU1FCua9rd0yf7ioKdakZ6+e/OGYRi4v7/nZr9j2o30/SAu094TfEfohJmP4UW5qN1J7x3j0NNE6y7nM6VkDodnHh8fCcHTdZ7QyYKKaQGESDXtpHW974UoaZ0s+tPpyOVy1gGTWzKMo6rVilx3+qLElXMS5CdGUlp08uRtZ9P3LaiQRJmXFDnNkiU9n08cjidSTByXyKUFPeKWhrPgcianTKiFcoFDiRo4idHpUkR5s+lVSH2c11uUxqzCgbVqySOJD1XOhaenIx/+8sDpfObjxwcePj9zuSwvVHCNM9pS3vgaV5uJzpiiGWrVjTtrgHNS4b+NQ9BIhvLDq9O3MdSUMJp9Lkvk+XAixsjxvCFO0oKpQ2Y3GQQZvpZNbGrdawms3Q69Jy8Cnyu0kCte0brhaOAr8gSinmpdxRZRsLa54KwlZ7A285qXtZb9blorRxVByG72O0otdN4zdR3OiDXI6TITc1bLDOFPJOPBqOcSAVskSDx9PnL8nBQRlHtpjVmTG2huyPLCJcvm5L3jZtcx9oGh63j39o7dNGLSBXP5jIlH4uVAzSKI6L2n393iu4lpusGHHuu8BsMyGn0fuL3b4Wymd5XBy2fY78Rjy7qNH7CtnZ+IUHSszXrg/PXrRQb7uqAdxhhC12nFYTvQW9aLE0KpTERVETdGROGq7FsZSZxqrZznE+d5YV4i//Ivf+QPf/iO0/nC8/NBxEmtwXuD86J79Xg+c5pnBmMIfcfN3S23d7e8ubvl/q6Vtxbxd5oGhs4xz/PaEFI1qZlnCXp86Hk+Rx4fj/zud3/H+3f33N3dcLefmIbA2Afe3O359tuvOR5P/OXhmT9/+gzG8f333/OnP/2J3TTx7u0t3f2tJLcpUbSDR9TY/RVi284ZcWLfzmEduNIUrtuJ/nqBz8qVchVrnbT5G1bu6RokIEoExZR1f7FW+CvFFIoRc2acpRZZc/6KOtKoCaVUvBNTWWsd47Bj6EctcfV4H5imPdM0qZSIW4OeUhUsU1ZAqU0+RcpQpbYATt6vTj+Zk01xmZcBabOxkCRU9lxB91FhV9EOWpa4CgxL+c5SciKlKHpAbqEYKwhjjroWqvJhN1032Y/++vWLW9artXTjwPv377i5ueHm5oa7u1uGcSCETsUIHcFLictaq1YRi6Is0mJmqHTBsZsGrIXL+cLpeCYuM48Pj3z6+EkQnTHQDwGoko2SsQ5ubiam3SjcojHgnPg1XQ4nsS64iib3+z3TOOK90xLWou9JSlw5yffiMhPjsgVnsAY+K3k7J3LKXOaF58uZOEceDkcenp7EDTxWEaursOqd2ErxiYsxuAzPZSHMgvoFrVPnKs7X3ko7sa2NrPs6C9IYox1bXjg3BWLMHI9i/fD50xN/+ve/cDge+fzwxIcfHphjFGuRLghRVeFhCXrk81x32TfkqJWd5kXaZHOpX5S34lqnXl06rCV0Xhy3FwnGjDHMy8Lzk3AIlkUQMnQh2lWYajvwZPGW9Xn798a3ATZV8O3urM9WqUshLyk6wIpKUQolGYyRDLzxp2Oua/eIs+mLv//rX845bm/2K1mycZrMem82Nerj6QwGlhgJwYMV4mg0nmgGRFrAY7IjV/j3h0f+/WEhrtwpteqwdhXZbAlGVbi7FgjecrvrGXvPfhz53W++4v72hsDMjgc6zpBmTI54I3y/25u3dLtbevXM8l66+op0DDBMHe/e3TANVjy8fMY7x91+WtXfWxcKZt2F22C+uNYx+RnYxqzBXkMvf/rv/VqXNYZuGGidZ22uCaQv7wcvGb7UFpLwVkpzI6+U0tDtysePD/zw8TPn84X//X/+wP/+P39gWRZSzFjvcRb6IdD1jsu8kD5+4ul0plhLGAbevH3L/ds3vHt3z1dv31Ap5LQoH7Kw3N9ufld6T2IqXC6JlAp3d2/ADTw9H3n//p7ffvMV027g7d2em0nEK9+9u+N3//BbjqcTj5ff8+HDB5aYePP2njf39+z3exy/5c1+FAHLuIioHwbrO4zXDL/t19TtfrTgsSU19mpS/DcEsN57KOBcpBZHRA73FIV/tTZiGChWUXElHANUW8E5yYEV8cMY5RLaDWmtMj8ksJHqx266YRonvA/s97eiVN737G9uVNZB+DwbonPV3JNbg09dO5oNFmy92hPNBrBp1itKCT9GRY0RIECkLzZ9tmVJLJeFRWVRZC5ZUozEZcG4QjEei1FgQh6tO7aZf3tv6cKvzOmRQp5RPywxKeu67icFhq4f297yctNoHR5eCcsS/UqtUohxBSeeZPr7yjgxaEYnhNGNdS6Kwcu8rANZEb7CVkJpiFPrLsgvS1tXOj7yR+V/BKmoq5R9rht5NuWsHAnxkakt5NWrgPqnKMJaChnpSquo+3WD4y2Y0shvr3tQrp4sDdGozYy1EGNmmSPzZWGZEzFm2SSt1SzK6AbcHq2z7ctXUdj26m/ntYMkr5yenIUUjbUrn66NH1XamdHAM2lJMCvv6j8KKK73s5cE1UqtdkVFXpRAXt6hqy8v/729rkDUwjNpKIC855fz57VRdNEDcWvZZ/Vya9obiIw8VRBW5y2u6BpVdKQai4YzZG0zLrUyx8zxPKv9htTU4aolWMIk/chmJTsH56BmYgyUUnk+ngne05sFb2eMiZgScUXIuE2Yzih/Y72NVwPprCV0gZwCnc10TnhTTYjxRefOT1xbB89fH5BV2+Xly//3XPo5rvGnLdD+ouyu5V3gxb9JuXHzsDsexJz5eDxx0gYFZ5X8aY36JHop4xqzkmGdc8rRDKsuU63CRKnFUYokKqXIJt32gRgLxkjQM00TN/sdFctumuj6QBcaBUKSPklwe0oVLlgr78zzzOl0wlrLssxipGtQZWlBvoxtgYDcqa0zt3WwfbmOt7XZ9pfXG0tWNOZ6zrWS4/WYiqhje4tNhBDh62g53lq7Cqy2TtZt/5ZnchYbnPWqcCxCnX3fMwwCTvjVsVx+v9a6diRfBz7rYw3MCqYY9WBDJBPMF3ttw9W+vK8rjrD9zWY31MRe19fRQL+UgjVlJTTX2rhcIrK6bQ9bA8vPbbW/KOix1q6E5ZvbW25uBeFpNXRjHN6JRb13KjZkDDnbqwAjrx1T3lt2U4+1lRhnTieoNXM6n/j8+QEfHNPSMy6dbuBXRNQmogcsCq3O88LnD585Hk+sGirWYqohNqsC1TBIaWGeL7oBzJyPR07HM0mFDXVa6f/LQlkWKa/klMlLFG2KnLHe0I2ysXc4qnHr+mrdQn5VoFbRLlMlSvaOZv1t13Uron6+oi35r3AZMQM1zmrrr2gJtda/lAuXeeF8lpq6s57gxRenU+2H4K26BWs91TlR6TTr7JYAKqW17V3gybJG6w1BKw2i9yo+WJU/47zYB6gnWW6LQz+DCFqVLxCdq9ry1bJYl0cFKCpgSOPlbWPOBuNuG8uWjV2v8FYzR3+u/U4T2JJ5YPm5Q/a/fBkU4WDdyIyWm9qHa/ckhMDtfkfKYkfxfF44n2c+ffzEh+cjJRVRxe2sqmQLTyjVzSQSWHk9bYPbtH9l3WQqpyURS2HJFWM/8fnpyOQz3wxiQ2FqxKcZUxOhXjh2B/zRMt3CvRvphiSChk5at431jONOzBpzJOVZdGBQ1rbZpCna4bHuvlfdkF8eQi/xyR9fbd9pZYDr+/prX6UULstFkqtcXhwSkgHIlLJZkLaU1MZnPTwKz08nHh6OzEvkj//2A9/98Xsul5kPHz8RkyCPb+5veHN/p0bIEoAcT2f2P3ygDx1j33N3s+f9/R23NztKTjw/P0mgVSKb8/Z2wBm93ylDSWLV0wfP+7f37HZ7dlPPELwY/9ZMWmYWEs5U9ruB4CxfvXvDb3/zFZfLgimRP/3xXxnHgdvR8f52pAte7AeyfA7fj/huWIezYY40zSBrMM7TBPNYUZ+y+V690mVgRZTbIZ6vkr+qHcC1Vqx3+F500owTVb+K7GtJvQmrbd5UaKmoqBqyxxn5t3HcMfQTznt20w3jMOGcox8mutCvZ/TL8KC9FpjSiMbS7CAadsIvah2/Qo+wOG81SFaNKEQzTfh9uheiP18qtoolVc7Kic11pTysoIMiuylF4jJjXRbNP2PIKaoWlSKLGhjW4qAmBFr469cvCnqcs+xudlLSenPH3d0dznnx61Dpf+/EPCwEieaNscQkqI1IoDdX9IQPlt1Njw9wPh8xVlCB4/EowUFw7OeRaRZFVyEqSiDjVUFSnN8NMSZOxxPff/8DD58fxaSsH7WzRswKW20yqnLz5XLhcHgmLjPn04nT81E7GdCDTLNZlQ9vxN6cC2mJ1BSpJQuhcuqoFXzocaHbygoN5eLq6DUal1oDwVJXMS05NhyqTIrBNtj2V76MMbjQrCfUFNRkhHhsSFE8sU6nmRQzzgWMcXQ+SDeSNQQnRqDWIL5AzlMqyjdIotGRBcZtwaaUt4rAlHFRpEf+XTIbbU/WYMyHIIsuZSF7l82QFIRXsvFrroSycrkKdeqLryAHhLn693aXN/I9a8Z9XSrbCNPXwdHVo/3fta6Q+bJ89grjibnaDLfvts2s+RKBIeiBVoFcDEuG83nmcl44xb+wzBkTAqGKbkusRnWmCrEUom440o9Vr15re2qAXAvHpcAC7ixilt4abntId3A/gCXj64IlY5YLvj5j+srtubDYgXGcGYeO/W6U1n/rmaYbOt+zzGfmkwR2mQBoWasFPchBs5Y8moYMaFmgvd+Np0B9OU/0n9cSaK1gWsfJK0U9pRbO84mV71ZfSi4ITpy2wNo2aFTkIUqp/PCXz3z33Z84nS784Z//xO9//28sMWJdwbpC1wfevX/D//gfv8U6R1wEGXo+HLj9458Zup5pGHh7d8vX798y9oGSIk8PD3KTEDNlY4ToLNzACjlrMGQoWfiCQ+f59qt3pCLaVt6rNU/JLMsZisWZyt1+Ig093371jn/6+99wPF14Ohz44//9PSEE7neBb+/34hFGwVIw1tKPC2FcMMauCugSbMi6NlU7fFq5s90uUyglUtNr8u02Rf+2RzVkO6sFSI5Skgyt09k7qhESuVAGJdkuteJ6g3dhTWhqEdPXhuqG0HH/5p77+3d47xnHPcMw6T7kMahQoQ9s+8O2v0mnq5owR+kUS0rnyMq1oZk5e4sPXhuIEjlLINx+t1ZEfsIK2uxCEH8u/h9vb9rdSJIkCYqambsDIBlnZlZ1bff2zL79sP//98zs7L6e3q7uPCIZJAB3t0P3g6iaOxh5VE4Hy/MhiQBBwC8zUxUVFUE3rq51R9T2ALA2qJIUv84zQkrQmJBE+B01w13ce49DtaBHy+9ekT9c3nK5aEpaJ8v0tjvpZvLfPd8WjN2nGRTvZTGWXNX0HzKaNuSckHNA6+/jhBaMxOXRs0jtqsrrsiIlohOq0p1meWG9zFJJTC7FFC+pFVMsCxIzHdQdnNgsMlcnKsPhRJbpAEEaE0UXQVEmF6PaC0s1C3pCEEiKdLYFbzYvlSBI75R5nU36RL5/+Nbh9F1m3EuWNtk6gXlDRTrCfAOPutHjDVzqxq7+s5mcOr+4R4h9j/rf7Y7ALoBI6AJrN3eY7LJ82T5zf4zb/uq2qMn+ly/+4jdIjxvQ/OK5/MJHfe2tIzzbd/ZvfQFP+bgDqHVzGEcSIC34VRG4fxqte0xYTW/LoX7ebnCyXZAPbN5NDbzGRQSjAEsWrFEQQDPUgAaJFeucif5MKy7z0om8NP2NNMEFxw8DdP8e6a/3I/dgZ4f4qV/gl9n9fpzp7dMvhuAv/f1X3qpB+c7p6fMotGfizjoV4/81CLRxvlvWFefzFefLjPP5isuF2lrTMeAwkFIwjQOOp4PpN1Uo6hc0hRSphr4JFxY7Kwwm2cgQIGKCiL0EEfp49dZ6cl0bghSI+WQR/efpdORwMAHb1hTP5zPyurK7d1mRl5W+VEK7CwlsTgk2b2sIvE8BbORahSgTZCgsE5XuZ7U1UbzOtpVcbhFnPtU+t+3f6/MvcFtmEkuWKVGglrzxeNhZSkBgHFnSGk3tmFGeNcfLL5Xbdlinzb+eQG5+g82CRfDdfh9CGMyZhRCDGAY9HqhDQj+GroXWEeLbqVkBM7ttu4eT9O1m8c4Gr7Nti87vViv/MNJDAvEB42iKyBI6cdIN0KKxzju58Wah2pQWvf281Wp6PFS+rTljhlLzRCq0rnSqPR6sBTf0M+UO6ezMYodULRXeldS/M5K7oFAsy4zr9YLnp2f89NMnzNcr1nnB5XxGKXQE56IgxgjnTZLzamUy+swcpgE6RIzDgLu7E78rmJ1DILs+pWhwb+3thHQhZ/Y4DNGM6LbsiXAsvzO9VnmLO8syhpUBQzeHVeNbBaTB+CGB59M9lby7KiVmVw2hw+u5ZCwLOSDOymcbPjMaUe+HsknRX+u/2/7zG6ibJLbm4bVhx+iIdZ8tPBiyGMXjnZu1qgcijoS87PASRASoaX5427TCgmZwX0PozqK7IN/zJ7EJ13fwdTcv9/Kc2bHp7p/+KzXVYzCYePfmDsfDiG8+vME3H9/hOi8sIcQEKHA8KT5qQikVz+cLLperwfQsX/aA92XEA0dDtBsuiggaGs5zQajGeG4F0IZ2nVHmn9HSguOnM/7t0wXDOODuSOf4IUUgXyHzE+CCoqUgBMFpzaiqiKpmXglADPHZLRq3E6PNT7sFwEuUPe7u98tuatbtb19jo47VzL0xSQCoK5xIP26CkWLK94Ja6XxTSsV/fP8J//Kvf8X5POPz05ko7Rjx/v09Pn5zj8Nxwp/+/C0+fHwLbQ3z9ROenz7j/HxBaxXjOGAax47YJ/NRzKXa9xpPrZnzUTAkzfRc/DxzvyrmJaNURQyKNBDxKSUgZ/K+SqlEFmpDSoJvv3mPZWEJ/Pn5CSLSBWzXccA40NSWlYSCcD1bcGUm1wBd2w2li8tgLfEWGAtRwJpXGnm+1uZjI4h5B9a+LmhtaBY8eEDTDKEujZISDTQ+Bji+Y6Q7OkRQ1oLcMlQUQxpwOt5hHEfcne5wf/eAaFpbQ2LQ05R8O5eR2SAv9LlNm3WXeWXGZFnIsapQNCplA6gtmGYQqHWXl16SrYVAw5BGDANlPxAiJOkGAmA3vHZZs4+sVgtKXhC0IuQEiQGta1EZqd/Gh2ilTlX9ykhPjAFv3tzhzoKelAKIR5iwkAshRZaUgO3e7wEICMO1SqRotKDH/Xoq0DupQgiQVtDyQPnzQn8rEjbZahuCWut1tJNdTc10c1fuUbBlK/NMctznpyf8+MMnXC9n5DWzJdvajJMHHSliHMytuma0kqHgxHqaKIQkkYGOyq71T6hmOg70/8m5bG7gqpRtDy5oZ9LuynKZeDARt4z8628CCL87Rieir9sCZiTxYYiIETBfRwyJXB62LPO5iFCrqDI6L7lgWWbk2rAsBevKspP2gMWl6TjQqO9gQQSw2XJ0dM5Et0qFul6Ol5zANwrwi+XcDnj0ABhWmui/7d/hSEAPXESYcWBr1YZNTFWpydNiRWiWPQWrr8NREUZd4pnIK24C59cA0C3YaW33vd4uC3JzRGjm++7tHUqp+PThLb795j0u84pSlaaqDTjeDYjTHUphSUWN0DwvGaWxddTV018epgdDnsWHENCgOC8FdSFkTyFLxSozruFnFDkzKRp/MOXnCe8eThhTxDEUPISMJK2XAlKMeLuwky8q7ydOhOiQvU/oXdBujxoastW1PmRDKfb3hceurr/1+znl/9rWlEKlnrX7dRXfl2LKx037/ooIltywrBQ2/Pfvf8L/+y//hsv5ilpJDk8p4cOHD/in//07HI8j/vwP3+Kbb95iXTP+/a8/4vPnR1wuM1prmMZxp31EDg6R9wLnV0pX1/Z0nRwe9KSGI6zWwoaIXDEMLI/EAJQiyIXmOvwAACAASURBVCVANWBdVszzglYbhhTw3bcfkUvB+XzGDz/8SAQhr3h6/IxlTDgdRuiB/Jc2q6l7CP2bhoFioWFLyOOQum+iSxqqKudzt+t4hc3nkRA2wn2KEUNMQFJUVGj0YEPQqkKUPMilWHernWIm0gnTMPG4M8+3BsWQRpyOd+zMOj3g/u4B7lQeLHmplVUGR3s86On3OTNQdpHt0J1mAQ+DHuu9NmROCsdCzgvWde4dg5QSEbRRIVbigmtN4WVCYUmjd53a9ak1Q9YZoSW7fkaw9/KWCMgYEggqUAu05N+9Jn/Ye8vLUTeZ6x7Tx++UZLaaRF94di/Z8Xp3Fag9UBm5t1rRSiFSYl1XDtV5UBPdsdX9efqNv+1CJ5SVas7suTuFt6rQqNvNKoIW7e8dgoR1ngXj/UQz6oR0oi1v0GAGfYSdAfc84oQsYSvvcXIOaGIaNcm6al4toWSWHiQAJmK5/yrBVroK3ooo2DR59qWtjskYsfGGlGaPvnD4Lb/RXv27+220u/G93LWHeP09rrmDHS7kgQpflr5o+ftfHuf+fPTM/0UJZM/h8T1W3VAEnyw6xOpH5tHVK5dCbreXgdytkSa7NKSjHSJ2H0dlWWEiN23OFUUpLZGSADZpH6YJy2FkoFIVtWydXKF/yS/t0/4KmwYISGbMjYlCRsPaKooEhKbIuwB0SBE5RUisOAwFCPwb3ppCW5Ilk4zftjviVnnZr93WndKvuWM/uj3fJ2s377+Zsb/+pgpT1QVC2xbkXvrQrdMlgIueJwa+OOVC7ZM1F3ItByaY4zRgmkZMhxFD2nW8+fwr2k16g42B1hRVFKmxwwigj5Y0O2GuU/EC4XEBuda2Ds0QFc26kbwdugXtiys5JbHrrQwDRWzdNJaIg1j5xIKYRsd3kYAaqnUigtpZygQNYZsLXHWeHmIF2l4v6OnXTn9t3nE01J6Lv/biPTsk8iZls6knGAoUw37d82aKYNd3my9fjskv5rzdGNmmOUsA4WgNqLguuO2Argr3/rrtNNzuja28pftpHtsQ4z9uS1zk+nwxMG2Ovem6/o3tD3ZvCcaRUbpqRS0ZMZLU6vXErdsFvV9/m1x2ZTCDZDvfwx7oD4UKa641F2gLmK8zCdBp6K7gYYo4He/w8PAGx8MJZW3mMRIMcQo4HA8MXhq9S3IpyCtLMOfzFeenC9vOM7OnlCLUCFoiFbFYOScIpindtHhK73xg4LKsK/LKuneUBrTMmxKF/xaSlD2yddNUbYJS2S0TYzApgLhl7195y7ng+//4HqfjER/ev0WcRqAW6kBoQxRgSGx5bE1RLD9yASgvGbqpbGu0zyitIZulRzbI2pn8vCkMTVFQzTkENCFpL9gC3Il/vePLCHVt81uhz47eIDgMeLBN5LbYB3Wuz0svpV2AcvOT9y3bJD343QJBlR0GexOQ9WV1E3D0N/494p6OSuzKWr5vUA5I+MRlgSM2K5F3b+7wz//4HeYl46fHC358fEZtRFIpEgh8++4BpXzX0YQfPz2axEFBLmZDoZuomd4EESS4QoCYIoYAaKFAZ66KIhHV2uabAlJJlL2uK+RJSWQ+BrwdAhAo1b+uBZCM9v/9iKdKKY239yfq9sSIu+OE03HaEp9d8Nd6AL5T+u1CU65dsuPkNUCCkd5difM1LqMqrktm8LFWOAdr8IaDBgR1Q192egKCNa94Pl+xrCuu80xNMTS8eTjhw/uPGMcB3/3pA969v8c4JiAoLleKqg5DxLt3bzCMA3746ZHO4DHg+XLF9z/+hHFIeHs/4WjodjMUggPMuSJswrAZgW3kAC7zist1xporxpYgYUBMAWOJRBPFOwTtlFrFQCG4u7vHh2++Qa0V43TAsrIT9HgYEUICxBTIGyBQ43xxX5onZkEoi+Aluk41UNM1er2gh80zmWUZ56NYQ0/1JNsI9lEixjiasnBCSAMR0NqwOJetKsq8EulZG6RSg2sIAw7jEdM4IcUB0NAfqvu11weABzswMrSpeltAQ+HA0oNoL3V5ukpdqM3agyKDO+6mSR9opVYSgxojuYOlq5pX1EJtvDVnrCWjau0xQ6srJAPQiLrSkBqq7NpTImINFaiKkoG8rliXr6zILAIMQ0CMIAGt0mU9DLZYGXve3m3S0dhNDgZHeU1RNpVeJyn1PnzWiChpHyq0ViytIq+EfIdpImFrmHA63eHDh49Y5gWiEW/fvO0Tca0Nx+Oxe89U84fJmRo0l/MVz8+XTm5WAEOLEKGOTkBACg1QtngfD+wkm44TDqcD9qQw1YbLGbjCFnmtvcYtqIjgJB7Sxm+ISSBR0FpAlIDaFDElHKapE0tfYyul4Pv/+AHv3j7gzXGCRmHQo42u2yI96CkmiqZKASjnOIVAozco2GKo1tJc3MeM2j4lEwFyZEjVFg7LXOsNehTsXpLeNkltmI3U6TV5518QhVK7FlRHFs/c7b7dL1C3ZYlfD3x4/zYmswK6P0MRNZoM4o6gv9NI4QBXsFjPDOsVIbubbY9I7DMfom0MejrG6kidne93b+8wDgk5F6ThJ8w5o9SGw3TCdDjyvh8DpiFgWVf899OEYaAJ7NPTFZfLYtYdm+Gst1xvqRzPOTl9AQUNrYC2IrCAxw/GssXWGvKaEQU4yIj2cICEgHWt+HwpqAo8rj/gf366IMaE7z6+xZ8/vsM0Dvju41srKYSeYAAbWsLzAHJSeoDm5263MLhAu91rr6ni25oStfKdAxOQw8HU3RVI6g0WtrBBsa7kXM3LYkEPy4b3b474yz9+i8Nhwsdv7vHu/Z3JZ1RcLhfU2sjteveAYUg43R0wTANCDDhfLvj+x084HkYM6T2GwRbiXKy0SfIwRBAlUDcphJ64ojGAu1wXLGtB1QFxECRErG7ZIzTA9eY68r8iUgg43XNur7UhQrvPHtF0K6m2baFEqx14qpab9KAnkDO65GxaU9rHxOttSq5JKybC4ws6dYbUIz0FIgLGNJJyoIoIqiBjXVF0tftQURbrXMoVwfzYxjjgOB5JYA4W9DRqb/VSlnh34xZo+TotNn9ZddLMTDefq2q2SLphojYXGwJn3VitWXLVdnpPrVqQ1HqA2aqJAmd6YdLk1rTX7Lq0uhpXJ6IOqTcX9YEIRTOeUSnk3K7rVw56+H2eNW6liq1TiydDtpUIfYlxdMv/U/fRcC8s3QKeflC7PxTpEenWIcD3OF8gGjF6GJIZPApibB2Z8P3t36/aW+ZUzXMLClobo6MxbthHNIv17N5xZt4HG6K1tULz3rDOJg39M4MtMg4pMwDY/QlgkeyvQ6L/2a21huv1isM4YF1XTGNCLcXOrS8Glh3cALD9sPrvHTXo0Ku8eL8vvP1//hlbKeq2+OEJkQL9ev8efLkvl26ChWIfdlve2oQE0fdMYOzX7d9AR5b8+sru971UpwonIG22Afb5fSz8ym5/xW1/ajbOyo67YvepOIIBWPDIRwiBZGHAOHYbmd/NU8dhwPFA5PPuOOH+dMQ6ZLTK8d5UuycalVxbh7ojiLwNgb5lIo3JCGTXMeb3yXYwDRb/WGWJtf8ARUNt5hi/FkBXhFhxviw4n2bqEM1HzPPKpgmNSHCuoX++o3Y8C9rLV9trfGrQf3/v622q5Dn1XQDHSi2VUIpyTCoEQZyh4qWfgmwcqRADokYMw4DpMGI6DMbRY8m/gSUx78rxZo9oWjAkCVfM8wKBdvf1qg1roSUPRKBC/7kYBGjRfgIexeRS+iOWgFypkuxClyKwpEZ7AgFDC52cLMJSVCsNFfu5ekuyeelkdx7Rh7Pu5qB9mV23O+5Vr2fvXNYvv68nBdjQZHIevXgrHZHpyRUAWNdegHXQ7bTreiK4Pw99s3VZd9NTT9a2xGkT8n1xlvY1OHUbl/0Mvq0B/Wtv1l5PFL3C4/HAi3nS9kHMasWbWG5iA2zrQtufm9/Y/qDLuiIvC0oa4d5UnCz3ZYmbv7CDq/SYqgXF1JbXdcEyL7heZprfLQu1CGrduCTibd8AhCegFXJeoHrDLXFuzDgm1EpNmYc39P46ng4YBpKF/cSw/ZIS2MtCc9O+ZgfBOFGB9DAl3B0Gy3KTlbcC4pA42BVYFx5Pa4qaKZ4koLs7IWnBvt2+eJS/ES2ICLlDbisoOgNx3USYvvK2LAv+23/7v/HNh3eIqHj7cE+/nGL+WJXKqOMwYG1099XWbNJl95xnZIAgVkVMigYTMBxGSKioLXcn3dvJxret64lZh8vts3NBKm1M1sxJEzeB4MtgbAtIaHJr3xU8bnbpgn1w8zdsFoBVWFZauTwGCaiRBqP65Xjd5USvv3HC4iTZdhnl/py3/hOcMG2CcIg6CGXcnTz89uGIXBqaDKgqgLKsezpNOB5G/Nd/+jO+fX+PXCqenueO9OTKUiez6soMsBas1xk1syU9lmdIXXmdg6BKQ9OwJUa6lcZSEGpZCWhyPB1wmBLO64y1Zlq/LBmzUkdkXgo+n68YhoSfPp/xw89PGFLEuzd3eLin3s/xeMBhGg2xjLvGC18MNiFUwJ5by75Em/dfCb2rreHnz88bSgmiruuUiZBJwBSiSVqE3ln5+HTGp8cnzOsKBfDu/VuoAh+/fYtvvn3ANI44HCNCYJDejPvjHDyIIkTBdJxwf38HEcHPj484n884ng4oteAyz3aNqa3V1FAaGDocI+1psFFMr/OCnx8vyLnisI5YlfYna11QdcUQKTkB66KKqSEWLqQVwHA8IJSG6/kZ8zIjhoDrcsKSK4M3CQjJljIvP2OjGrlWVrAATVJCDETHWofwXmfTprT8MJNm20lrIFGUVqk9Y+Ol0z/UO1mp7bPOM6oqJhxMTZmkaARq4lFlebOVqLVaDtfYOeVTnkVB6hLqTdk54mtiadZcQDJ1D0ahnumSlCzOl7MuqhQRWoI0TrbS/RkjbOoglcWqKbVms4MyC6iSUWuBN5IAlBSoFuxgWVCNauBm5rxDTJ3ZOLo5f+XuLdWGdV0wTQe7gI607EhTTK35fvuv6abEXIoHPZuD93KdsSwritlFOBwtwbRrLHh1Y8poLd496DGycoz0a2ptQEoj7u4e6Lqc2Cm2j167m7CZndGe3rLIaEHPkHA8jDjdTVSFTYT2Wc7aLvyyrnh+fjZiIZGbGKgie5yogzFNRKBqrbher1gWTkzNHqImzlQaRBpKpUjia5HslmXF//gf/w8uTx9wN0bM795AYkJIU4eBU6QYYc2ZELLR/wWR+UWQPshiUsRCgmOysphIQI4NMbB7q+4Czps0zNFB4SRRhd9TrcRZLIMtpW6ZkGx/+zKq4Ht6jtSzAy9/MWG/LXLt0Z2bVx1h8EwC6FmYBkXTiKZfEjw6DKwKvZV8fsVNboKcTbJ9H3DCqm6GYDWw/AZmjYMpXB+PIx7uDsil4ZIDrqsFPSnhdJwQA/D2fkLAR9TW8Py84jLTrHetlZN5U1znjDVX5GXF088/Y7lckdcZl6eCdSEM3kRQ0VBtfwBmtNUUfwMCNIITboxmMpwgsSBXYM4N57Xg88JF+Ok848fHJ6QU8enzGT88PmMaE/7h2/f47sNbTOOAjyHiME22GFp3CZxAzMCne5hha4RwhBP4MuT+WlutjW3m/n3geV/XjBgihhiRY7JGigARdqU+nS/4+fMT1pyhAN6+YwfPh49v8eHjA8YxQVBtDHBByZn+WU0bVZ6jYJpG3N+fsK4Fj4+fcbnOOJ2OCDEgV7L7SjMLmaZEfRqDnmQoNktd5FTOS8bT84xSKo5lQpGGYYhY64DaVusGRdclS0mRKg++AkjTASFVnM9nXJcVQQTXNWMpXAtC2oKebeXhfd6rBIYkUv8s0u5GHZ18XU5PztlauLUv6jFEtMggg+UjJuRe4qeXH9GQZrpLtTakkAB2rLPrNtDnMqXR2vW9U6vRC9ACXK++iCO/lqV51YZjjvo63W7Ggp4bACYwaIQIOVt2jiUkhNiggc0PokRlQop2/n0ONQHZSjFDBj/kN9Ez0/ZVBaql83x1XSCV6KVgggwCUuUbRAxYsZLc721/rLylG0vbT0PPhcTbOLeLvYefnLC8cWrWG1FAJ0ABnmVt5SNxZCdsWZd3FxB1bj0gUjupsC6oGDcy8LYYaD+em8d2NPCF2O8Pb+HLxbM89MUwl2o3NTsI/ETs0Q3dfUd/HeQNKLbPr6bTELp41+tkIT4Y15wxLwvmeUZIA5JSu6eaPPseev21bVsQNtRGwtaCuC8tvdyH7XkHXbfMyyHp/YINH6z9X1x8dNsHfsZtx93L/f3i5d/M2jfo1xc853xsu7VVu//2z/2am6VTNwCwZUx9J/fo1m6/brJAa/ENLCtAKgaNKGrCZzH28RdtcYutYZrUEkdFMp5ebQ0xRKy5Yo2Cch2BkiEacfWyVsfj9mUkdDFEqHatrGQlNvTF3qQyAn8/2Tgb0gbzl9pwXagTdb6u3UPsdFxxPDCIGMfQg54vzuoX1+/vdT2//L4+l7SGFlpvGJRA4nfdnXdqgAUzf45bN9bu/uiljN1zwJsVBlJQjJdBVDzjuiw9WVMBu++cb2fXLoh7MzUEbL+n9ox5twVgzYIlC6oGplE2V1QVVLi9AYnaLn5XWkO0RMjLeCmg0wf2DB1PeLpWlh+5l98FlsS93jVVbPMcp//tPr+dS7QLee7npv6eTv/YEkcPnjZx342/8xLlVe19V7Y2KaAN4oz8l2vjfvMSGHZ3o6E+PQGw+b53JnuiZ8ls/3l7ZuyztH8kv66nqdu7nepg6z0an++H52/TH7btj5W3VDHPKw6H0m/uTX/D2A6tmCMyeqbbWkHJq3UXfMaPP32PZZ7x6dMnPP78Geu6YF3WHpyM04jDYTQdm2DcAmAAW9eHccTxMGIaB8QoWJYrnp5+pgjWfKUdfSB/gHweC4zajgzbb66wPQwGBSIUEU0j1qI4X1aKeKoRrsHsXY39mNcFZV0hAI7DiDhEqAbktUHbSii1Aa0CtVUs84pldqSHSqq5VFwuK+Y1Q5oitAxpm8/R195U6SX2fD7jP374AZfrGcN4wOF0jxATLpcF67oa5GgWEZbF9ZjPEBOo9HvBF8ZkBGcnuG9EUfIHWvMuH3W+IwBA6tYxBQsytDm/idFKU1fElr4/XqLxHFxFb451C8J/YfuVhc0H+cu/2u4f07AQyhbEXXeZI58KGNz+dwx+/GS/mFTCbnLpXRxyi1j4sU7jgLcPd6hNcaoRubFT8eE0YEyJmblQSTlKwP2JqKZ/picExTgj1+uMv8aGp0fB81nw9DQgN0FptjQKFbWj2AIY2dkoQTANCafDgBQD7u5PkDCgISGmEcfjhDgkvJuOGI4nQALWwlZt//7vP50RQsD5WvD9T0+YhgF/+e6Cbz+cMU0Dvvv4AR/evrk9i78QrPIWu5n6X2UTEaSJKEkKLBH52BIACN5tw7JSq7z/l0qPs7Uq7o4H3J2OGIaEu7uDdeg1Q94tkVOx+SegaCMBXQIOxxPevxfM84J5WTsq/dPjZ5yXBSFFjMcD4jAwETbhziaAsrEOGQ2LkIxdcsVSiGbIWiDXK+IasOSAaw4mIugIBy1tYjfEJAqnreH5esZ5XRBF8PlyxfD4jJQiDncjDhh5WQK6YrAHhaEJUmimp4XeKKGqpq32mi3r2suAPkNJoKegJkBhJN5cME4Tcs4cO+ayLgY1e0JMs2WWOafx0A1Ep2nCME4mwCj9OxFY8hWYgrero5lQrAY2JrG8a5UVQzMlBM67HkD5EVnAAzc2htp1Z2AGVevcU1qUmAcn4wSY/q4lTYHoYPRHpFiv2nrr6s3021KEGvs4CAI2ArlCNTaF9t/a/mB5iwulk+x6rdTJvsIa3z6zVLAltVTW7q7XMx4fP+F6veLz4yOen57NALR0VGAYB0zHiYhPtMQObBmPQu+gaRyoCh0E6zrj+Zk3b87Z2toTRJglepnN66bd0ZUrErrA4lacAmmXAaVQwApQ84taeBEsWAEUQdmeGYJgCAk6UB+C7draF0CAtdZ1KViXzItkk86aC66XFdeFpaS2LkAtXQfla2+O9FwuV/z00ydcLmdMhxPu1oo0DFjXgjVnKoS21ls/90EPPwiET+GdV9oDH21gJiLM3Gi1IT279GDH1TmhVmuW1rMxkWBEWAuqFd2gj8AEM7Wbe90Wcu3wUYfdOsrTMyi8+MOX5+nFq5YPeWMRs+pqA64xIPMuRlfKfe3+kC+3LaDBvpMtbEHPLwNQHuIJxmHAwx2vT8WAigEiwJSAIfb1pSdx9NlzpW66PPMj+Z3P5wvackGiyhYkRJTGLL91J2zr/IFgGCKmA1XfD9OA+9OEIUUcDxMQBjSJCImaM2loePf+LT588xEhBjw+X/H4dMFaKn789IRPn8lD+fw0YxoCpiF1hPPueMDpcMS7h/svAp39v2/BR/3FoOhrbSJmUBwCBisRQY0nYd+vUtFgnmjWLMOgp6FUIA0D3r1lSet4nMwD0NE4oDVnQ/D8VxUUZXY9HQ4QGTAMMx4fn5HSBU0Vj0/PqE9PGMYRD+/eYjoeiTYIOSYBvqBxfnEvPC7WxqcsBTqTwB4zcF4Yo3vQg10i7WW9YRgAVczXK+bM8tbzdabP4xDRIiCDVwRAA2f1W8/mB0OIAjb+qSo9wFp7XaTH5Q56GiLseg3K+SubFVK2DiYXnAyDiVJiq7A4WR2A0SYOGMcRwzghJheyFOMHsZwk1dcf7QmRVrd0CNAW7JxpnxtosSXU9HLpGdsbdYRHtrJvEIq1MuhproJIZ4EQ++cFAVqg6XYMgigWAO14wSHGniz6etFqZbIbG2qJXXsppmjr9rau/N72h7u3umaJR4Mme7rBc5tGicNsrfpF3bg867JYgLKxsh0xCi4t7YhYL3Ns3R3eziwSWC5LCeputAaF+gNAF9XajmGLNLtujq3ErVJVGKoWkhDdKSWjZqtJgiiNgPdLAMwTB9RecNKstg4t9xKZtdIzYrYV5EW2LRKAkH5tdfpPbyJsg402+H0wrWtmR0xxQzztehzepeeSAwRjdnCpDYzezbcrbwU777+MebxcXBxL1f5c7HO3c3UbjvRXLKrR/Qf2/Xv57t0n9CBIdu/5rZzevr+XADdkxbMi39XXhM9vdufl8/6a9MC0n0Lc3lp+3v00+XX2N4r9dLSoQ9L2/9vrvQUM3dakv47+WX6umeWBJN1h7Ny8093IstU44P44IsWIQ4rWIUZZiJQigkYcDiPuTweEFJFLI2IqYkmPL8INuQhCaFhWKkrHmIy3oMYpQb+3PamzG+j2FP8Wcvif3XpW7F53vojbz7A7kdVlP7y8pRuabXtYGzuoQgg3eldrJpGZ1jFWLmrmfm0t5DAdMnF6QmsIbhJZWc4PcT9a+Ng6jbbuS09qaqVyOYqNE+OcsPkAplPD572zGujkaZWAXBoyOwq6HpgEYaOFLT9+vkIg/NPUrSkCEA1BaK2jxK+5uf2dB0EewLTmnLttf6q1dss2GPs8qrqnmGz36CYy6fpSnIH29Aqu1c5Zs+CEveTWTo7d3LGbz+D7ppCw2y/uAV6Og+37+fsb2gN28w9cBHibi2T/ibvXekTj95PdB9qU3MqmpsP3lTk9QQSHgfB2igEpcsdrpTUDFTJ1a+O2m/18fsbPn37CPF/xw/ff4/u//gfm64ynnz8jLyQ/Din2ktaYEqJl6FrY/ioiIA4XgbXifL6iVHZz5FIwHi428cIm9oDnyzMaGA0O42TaFIIxBkwx4pgS7sYBbUpGyCMvZ9UZj7nYhN2gFvQAgPbS3SZdfxgiJnPGXdYK0QxtwLoU5LUQCYzUCBEBUmSkG0LEeEhIMSEG7YM+xIQ4UlDNpdO/9jakhO8+vofJJZFb1a64ro2qtl30zInZ7HChmd1grcaBbHy1QM9QIS42NHtNZhNQm6JFCxVsUu25QyA5uk9CVXug1G98YZmztQYEpcL1bgHne/ivLnbZJ/5t8P7itlv990P4JvzxKEYVbnK3r5urTWLBMjU6T3M0U5PpdfSWtm23Lzs5h+24NrK4TzR+WFuw5p8Dk/GPPcMjYZAlstg/00Jc2S4EUTDCCSK81iLopNfa6PvFBIYL5vEYMCpwd3fEtx/f43iccDoe8P79PaYxYYgRh5EQ+eOnz/j+33/AaurL93d3CDHiL3/6Bv/1v/wFKSX89fufMY0J15kcnu9/fkIDuSKtAi0Dn54XIAge1oq/XGasuRj/JVl3zCZLocrOPbTWF4BXDHk4R01DvwaufdY964KrtQvWa8F1XZFzxdXKUbVUzGvBkgv38XzFWrIFCAXZ+Hprrnxu0gLF2ofXpSJnQ80lYTiemEzOV3Z7ScMyUyIgxYQ40deqJzsgOkCvvNq1WqCKWrTLD8BIthCWu71pw32qIMCQrAMWQF0L6poprRBmCKjcXKBYK+dZJOmNL7a+U1Q3MWgOMeAwjEgxoS/+r4jDCoTCnrWxg7kolmXF8/mCdaFWXFMGlrUplmVBrRWDKq0qjDaShsECmu09tTZ2HsYIYRcOIMGQJUpCoFRDz62i4VC3ejduRS0kBTcVVAU5VZVaQvTXoz5SbYqgQIsDE4QgpntFX69eNrVOZIAmxjFFVgFEEaSZ3ZCvoO0m+NlP6DdsK34w56NaUcU6/tBQq2vkCRuAfmf74+KEicQ4r8My6Ckbb0JCd0iFkYuv1wuenh5xvV7x+PPP+PTTJyzXGVdz/oWC7eDjCDeyDHYjlkbeBAluVOZSbbheF+TSEFNE04bRvLrSmCy4ibjOVyBEDMOIkFK3dUiBIlpjCjgOFD5ackVd2YVQSsFynW3CN9VHMR2LFPrVoU6GIOhIVr0AObPlXKvi/Dzjellt2eAFizHg/n7C8TAgJWCYHOHaJOFDjBgOR8Rh4M38CluMAe/fPqCUjGWZjWDesLbMLsY4ICZ2t4wxIg0khKeYqIgdxFj+JygTAAAAIABJREFUzeBy01mwRY4+Z+i1WshG1Gsk7ezCe3NpxpbtOBIQrCTT/YcEUAlo0m5BsP0/HLXpZa1fmtZuiff+Wv//L4Az6uAOuGgLtoBHRXrrr+AW7dkUyF9327LIbUneIy8dffkiyPP3b1w3QUAK2EqExsWSFwjPl3twm10a2t6R4K7WbDldCIJDYg3//ds7/NP/9hFvHu7w5v6EP337HsdpRAyCMTBi/u/4n/jXf/0rnq9XnI4n3N/fYxwHfPPhLf7xH77FMHBKW9cVz+OC40+PnJgbJ3NtglaApysTtdKoGJwLA4QhDX0sUs7fjqcpKjb+mPrhvsImAhoRCxd/nqltAQimowMR6FyxZCpTL2smUlur6eJQR6e0iuu6QMHFa83meVbVUB90+5ymipIby/q1oYWIOE4U5pwXlApAlMkcIjQJDoml3ADZ9Mgq0LI1l2gDYKJzxvHyGdEtIbx7B2pG03YNhpTsmgr5fpV0gUvMCLIipQANYGJMZgJMfcDQJisDjYUBWowoo2KIlTO46C8N9a98PRnAOQqXS8H1OmOZV5S1GArDMv66rkR6YkRsFc41Te4b1hiM9hb3XcOPl2B64qHKC2H3qXu0edBDzkxAq7Q/4vTNcMRL9bw3NoHYKMJOXjFJAh/f2NAgR6vExkkI5OEE0tNptQQq/3tbPvCLUy68CUa6mjWo8iwBpLsGK08q5llQ9SsjPSL03hmMyAiwxQ2VEGngEVIRtym5Ka2hltK1eUrOqKV0aE9tgm4me907DFwN1WA3VUVVllsk0NWlNkWsjN5bawgxYFSak4aY7LsIc9eS2eVQKtAqlYcBpCAY7O9zJOGrSYAzgRooc67WDSZWa3YuSdgtJn7xvcxHiE5s0bWuIrEJwrIOd5tvqtQYahUh0SMnDsPNAvV1tw2K3gcttQUuSqKdp7SjsNn+26OJZRBbW3hf6MWJ7vv6ebuBOm/2RG47vPZoxC32KR3u9df26EzPxL0k6u9xMT77iN4piN2L+HId+/XTr9hPlz1s0N258AH9+uh533oXpe6DG7n5/f7nvjuvx4iKHrTwF8BmVb+7HD06/DKYYncJboIDLtYMmsfpgMMxs4U4RUgIePtwh4e7E+7vjjgdJxxGOnxHESTjIjhJPkZaYzDLTUQdLaAKIVCQr9DIOMaA2BSkMfAARVzUdM9PuE01N8K6nYv9PfrK17TZwsISu4U9wuDHuXAqws64NWNZC13KrcSVS8G8rnTBDuRGQiknkAsX2lwbNbSgmx5Ls6DHyldUyW1byauRo7OuhdSNQTGEBa2qUQbYOEJbmmoJgZVUoKym3AxafyJw+gJMtkAsUAjVRhdreAiOTJnVQakNuVZ6gZGEBq4pPJ5gjSQpNVOLBmqshlwC4VfH+FfYxEukLtux86hquzFmWy9x1UpScwi0q2ibxcP+vbV5u3uz8pPu5j8iIATQBL11XWE6Oixv9RKkv998Bbs3WufCto3UvHs4f9L94BzpYUnPJw0YW4Hjz7vHWK5S+95bMUQ/xpsJSrZEUywhQR+jenN+fm37Q0HPOCT85c/fYBonDBFAXVFaQWkZqoI4DEjlyAwJhMFVGy6XM37++RPO5zOePn/GcmW3Vlkp5KequF4K1vmMIPS3cmfzEJKJbynmlU69InTNZXATME4D0pCQUsTpRAn102nGNE6AkpiLVpCGEcvlGbrOkLJilIqHKSGdBiw5YBT01kvT30PRhrWZ81TYJNf3ddY0REjka1UbL7gqEAVpGixgMx2GIWI8HnG8myj5/nDA8TBhqhVpijQITBHxcGAL+fA6SI9vXYgqVxQV5EaSdtQKSKUlRSDhUawLK6Zo4l6V3KamECn9Jg4CDJGlunFIKNNohoEClYLaBLFWuHDrL27CIDu8cJlXgZUd7WETlraNONn2lhV90P1WRvcrv5Ff/o0lHMz6eVTok7o/89V+H0m84sZyTLRaf/iVwb8PgrbgyCeOVrXbRuwnEOftQYCIfdDqn+O8Av+eDQnR5oFYxHS4w+le0MIRf8oRxzdXTGPCm4cDxjHh4f6AP33zDqcjScvTRF8/dpDwGh+mCXd3b6AyYBiPiOMdQhqQNeLpUjAMtCd49+YO0zTgzf0J98cDlphxndlFBABjCjgdB5wOI6ZpxDAMvaTiSGNTktQB3JzP145hW2u4XM+G7jjCyXPILaApvakeH5/xHz/+jHXJOF+umBdqnT0+nVG1GmpiJF/x5It3bC4bwXjJ1vFmAVMpdR/Jo5SK60rNJawV18sMKJBiwuPxTIQsUuYgmBBrSsb/DEBMamNaSFGAmCehLZhimKM2Fjxq2BBCs4youaCuxRKpASEMiJWCqe7dXbWgaTHEqqIWGpCOw8BgOQSMA0tdUQKmIRp5/vU2iREKIm9uApvNXqfR0A0SGGTkXBAqzX6vJUMFuMwLS1qtmQRLhARBKQXXy4xaFfNCr8PgiPoOTHCh+ejjFCwv8b0NFMhuVgIOJLU3+mnlapwp0+zREBCqq8sBoQaoKGquyCYu3Bp9OQVAmyZrDxIGOGbwqoWARC0ZNa8o69oBES91EvDY5k+xh1aS+EUFCESwnIsW/oZu5z8U9KQY8fHdG7bcCSgIVAqWlUF4LBMGJYqRhHLzqg3LPOP8/Izn8zOu1yvWdTXBpmLaOmqmokRltI7ASCZ6GoCURrSmmK80roMwO+yDa4iIKWAYEkq+w3QYoa3hen2DYYiodUQQwVAy8nyFlhVSCxIajmOAlIQkQGgRtQox+cgJcGkV0YxAWwgUUBM3suNEH8NWR25uZ6Egu3ww5KQy45GUMIwjxiNVog/HCYfDgNYS0hjZ1hkjwuHAVr/4ugNSFWbFQa5FaQHeDCvRYGht25Ie0CfSoHaT9V3kTRoEfSIZTBgyeLboC6mTXmExpO6wJws2yC/fYUi22rDEtYMcAEA2GfKbjMHLMy9Wqg3pePH6F0/2WIadM5iQqaA7STsedoOK7RaNvwfawyRBQMjXB7/239m7XiBtnilZMGeZ/3ZetlBOvvj7fdDzcm888LGTLwHDcMB0ELQw4l0NGE8r7o4Dvvt4j9NhxOk44MPbOxzGWyFRqpUrmijGYcDheERpAXGYEIcDJCbUFnBdWJ6GBJzuDogpEjGaRkCBvDYs5qVGgnTCODJZCjFtnS8+ub44t7uY7lU3F4HVjoR4UMlEsDZBbRGtAZ+fr3j8fCbas6ydmHyer1jbCtc8E1dON4RMAaylUnG7NVyXBctKfmXOpXfTJnPurqVizRWlKpH7eUHNJIJP1xUxEWEfjN8zDBGn08hzK8DgqLh1ZsFCOjEVbohCYZ5SjvhQlIX3siryUpDXFUECpqlgHCuSAnHl4qdQ5LKgVAZ+68pW8BCoEp9Ms21MDHBTjDiNY7deeY1N7NophF1sZuBZnLDspGtDYGqr5Me0ippXNAWWnQQDr0lCADuBl3WBQqwDuiKGbexy7LSuB6I7yRFvvRQotAY0MS6PWmdyZfmT4BpRu9LYpaytQYN2WygRYaKZaejNoIdIWrMgRGAZVqOJeKsVWgpaoVNDdQPW5uLBu0RRsZXBOjpk6WZjeVN3KNPvbX+4Zb2sq01//LmWhsvcUBowTAcclEqRQwyQIQLKlvVq8FXbTSZibWoqdqLVO8MsoAhbliI2eTZs5YP9ErPbSzu5CtQKrYVBjAsXtgJXh3bmvs8tElinRAwduYlVEXXrXBLLBn3/KBDVY6TN7FLB3l4b1NXg42GIGA8DhjF10TCY6rQTrSXGm6DgNTZvJeV+WU0fJMIBJpNu3V3DMGCcmB0Nw4hoGVNToEbWW5mBWBNscMuNADcQBdA7UfrgE7EaL6/tzWIs2zn3gKeHFX08bFd/by/SSxK7431Zetle35+V29b3Xr351XO47YLaCzdYyj5a8uDrFTfp/5deUrwN6r7cgb6f/f4WC1Ta7h1f7rvqy3Nn7/6F9/XrJegt7cdpAkLEcaJlyZASUojwBhOPFbdrwn1IQ8Ld8UAEJI5UETfzUr/GwcZxjVRFH1NESwkxZOIcQgHDg+mBDQMbM7rUwP5g9yfQy00vSrGvtXGNaBYzkxcDCEoV5CJoDbhcSV5eVwq9uup5qQ2ajUweIpMTEcQGBDu/7N6qvaM0ZyIPxXy1WJ1QtNA6ekr9l7Z1xoqgsGWVhNnGhBhVsRYi31E5xllOjEjRSj5u2robr27y6r/Yu935o4GIPB3VFbkKUuW7qu3nVj7Sm3IN7KeXaRrcnuWVBud2iL1ry/fDg5h9qd4394V0SQ8O0dv3VuP3iMQu8Iv93WuoN7vsALTWnecDLFDU0MtFBgVamQu9ZOXIy74sJ85dtE6YVpsFL1xnW2NFphub9mPeTMX5WQyAqgdCnujo7oTI7VO/X7aql1FRmlvw/Pb2h4KeWgo+//Qjcqm4LhmlUO3052dCa3cPb/Du4zcYxhGn4wFv7o4IQXBdrshlRamF1vHCSDEOASmOhE+zomZenGFMGKcRNBFNCClRVyAwC1IYzA7XXbCef2O6C1NWaF6gS4JqhdXj0PKMdbliXWcsZUbWioyGFoE40d1WYkQ0+WwUQcs2MGKExGFr44zRuAH0XxIBUpQ+qNkxMDDosVb2mAIe7iacpmTv5zAOQXAcBpbyINAQ+4B/ja01xfN1pT6QERcRBEj0KxumCXf3d0gp4f404v3DyUwoyUESCGIpiJFqtxDjTlXWkGtTlNAw1tRdt6sFvaGSa8FsdbvJm4kgbpYmhiDoRnp0Q8ut1dOQiX2rrq/I+5+7TX7hWR9UX0Q9X06HXtoS2x/KCu2QKKDvv7dB/702L3MxONyCwA0Yk/4731ny1BRJAkTY1dKaGAnVJ8FN42gLOL1EttetMWQCXudvXUpfhIH9MSVMp3uoRAwROI2CIXDstALkVm2/+u72CPLh/g7//I9/ts4jxWJtz/d3k7moB0TQsTqI4OF0xLu7O1zjinVecRHFGIF3D0f8+bv3uD8d8O7NCcfDaJM0et7kC9PL87sd72tdWJayWiu4zitKYXKyZnbTrFlxmYnOPj9f8PjzE0pm2SeaSexSCmorUCj5g4EdaS78pworaVnQYy3rqhTAc6mPaGg2DI1vhYvTklmOiNrQakSUhoCAGCqkBcQmmJtJkERBGsQC3gGH4UAKhARaQhhqsU+I+2mX7RbwXh8BMOcMXC+IMaDJiKYJEDbVNCVnqbRqFj+K4oghWAYByH8pDR2lf42tB2pKWYDVHQh0x3+0ayaWgSiAUgrmkomMR1ZPvCs2mBnssiyo5TOGccGbdxfMi3XTBZrGqlJXL5twrkunxCCQMUFSBMz7DqQWoyHyZzPn9Er6QykkMkuoKJnnVwNbxgWCdVkwX2bTEGKnYwjCCo7Js2gxdKdSrDjnBXldsCwzlnnGsszdimJjTPJ6B78RsEOwYB1cAmgFNe1E8XvbHzQcrWb6tuLx6Yo1F5yvK356vGDJBW/fX6BCcatW7jBGTnI5k1BXtaKZaaMGTkwp+kwagSZ90o4p8SINVsay0krrt73duEbO8uiVkaCxvWsGysqEopA0qW5wlhmEFa28sQQIKfSbMBqnqIqiKG9SsQAM3pKcki0mDRJY1hnHiHEgQnWcjpjGyVE91Mrg7DglTAOlKVtdAa0kX46xt3pX+5vXRHqWXC3bo6CZCImIIVLtdpwOGMcBp7sDHt7cYUixR9oAjFuzzwgaQmhIufbJN8bKIKkJUokokauKO3cLgBoaMzy7/sFW6P06uodstxKi3gQ/++f77Tcz85cn+FcCnS/On/2voykO+1gAJ/7f3zfiAXqozPPnljEvEa09SubBkBsGc1HnlMzzffs126LvAdXtMfr3A9iIjfY3HNMJ0/EBaZgQ0JA0Iyi7adTKoBLMiylgQwSgOB4mxA8jfb2WjOcrTU4PE4VKQwCSBAyEKXAYRtxNE0SBMXFxjqI4HUe8e3PC3emA03GiuCKsVddRwxdJ48Zx+nsgPSQsr7maP2DBZc6oteG6VDyfLem8Lnj6fEatFdM44HCgqOtaCpY8Q/U26IkxIQSWt5Y1Y1lzR4Y6SmRoONAbgni8laWSWgtyLQyOBEAt9EpsDaEqU9EGrNakGYIgFl77KTWosgs0SETq91m70eTZzrlf/k2jTZWmtrquZpkBxOhjv3S0oCp6YNGUDSmw15mwoHsfvubm++zolCdnvMyeut+ipK2x065pQ5TR1Mldt4lzK3XVGoZasSyLacsBkgbydawTeV1XAC4GKGghYIzC7kwJaFqtT8FbUpwjU1kdM1SPJPZqCYyV5pSjvWZ+Ty0F/klBxJCfjaPjpa1mflvs2qOGX3Ex3B2Ks6GrZm+iMOTfEFCbK3o3299wMf9weSuvGeuyUh1zzbjOGcuyGElrRV4XhCCYr4LnxJLG9XpBqdngUKIdAN2CkxhRURq1akAIO0SvY1uEZ1ySZMEITUSjeecMGIYBKZGwNg7Jsl30hVCsdVoC22MbnMOiyOo6AR5OiaXzJCA7QhFhbZEmdMjOBPLygl0g9yzyspznLs5sd+uKfk6tJV+jRaoGLTaELyaAr7kp2L3RIAhpRAqK4XDE6eEd4jDgdDrg7dt7kq1H4+VEYba0gyBdMMszXy9NhhChYNkrRkZvzgXiArgJHHYBuI7uyO0M0OOJDb1oe1QHt4HO7y5K4jnE7gu2dfXFW+XLl3coye3nWLDjr+++R/r0+3qbf5vfc/vy1jbH7kjW/ncvAqJ9uQtg8NH80/cQ++492+nWnp3v3+86SzVZx2IgQhvUTGD8XJl5K2+nsH3m/vMCxe1iJI+vqbI85TIahHv7gktNsYhpTDhMI6ZpoKL7MFgnqgVwu++wA2fX364U4cT41wR6VEGOnRHLS2lYS8U8E1G/zgXnK4OexdrtWzUR1JVjaq2ZyA0UCcLrDgCVmjkKNaPHuisNbIG7g4J+DhnT8y5mUkhkhQlStM5W6zDTzQyaSYwFwkql4FIo6RGkoko0VNADAe0lLgGg3jwCGOnXPkuw+T35TwCCaPtJHhg13vhhxNQVTau1OxesWdDa63F6GFDWLvZarTOr2MNFOSGhl9sARTUUuSpLccEDtsYyJwM5QWsBoQQzZHYeVsImHutdcQotFRXsgKw1QlPcRCSlgfRm2wPn0dkDntipkc1tLfficymF5dWSCURAoSEYjaRANaJphXdp0YuTaCTLW/bcJQ7su+TlPM6D4mxq81inMvg4/Z1r8seQntrw/PSE5/MF3//wCZfrgiVXnOeCUhUSBMfTAes64fIs+PQjd+rz+RmXyzNyyUBQHE4HautIwGDZvpaJ/Bug81lsxDHggWA6DGg4wgl2LLUkPNzf4XCYEAPVW5NpCQmEg3pUiuodDnSrBZBbw1IbzrVhrhVRognwBWrN2ABcS8VSMhQN0xjYHSLWKmidIAMomhhEMI4j7k6HvpCotyrWgrI2tBigg0DA0k5eaW0RQkCrrQ8AhGilrteJfGpTPM0ZKY0YTw+IKeHd+4/4h3/6Z5zu7nA8jnhzf0RKEXU5I18fodV4A6X2G42kRLVAh5mCd96wpGU1+GrePtZCOuRq2WVDbIoWFGgC8i0t+PMAUL0ejp5teJfRfvMF9m9pXbxZs2QrI6p8+b79T939fLnuORtEgJtJ/xXXx+27Zc/j2XhNe3Pg29BtQy32bewexHpDC0nNAWGXD/+S3g8XLSs5B4WvNNRbCYA0nGB2FSEiDgKJLlQWOmdPfNHdlTe1l+OM+zYEBAUOkeVvBROpmGyR9QlTgHEIOB4GSADelRMQGqZxwIe393h7f4fDNGKIsYtq0niUicmeJ+ZB9k3A93Uv4e5cKua5YlkLrjPR2OfnBT/8+Ih5oVXN5+cVubS+OAGKZc24phkCQUFFMc2SaVRMMLFML1kq398FDD3CAYCgCNZEkExUFQq0YNl6YvLpbcMkge/5cA1RIsY49EYMT5JqbbjMl/5OP4si+4RhGzEMWA3D93s1AJJCrwLEIVKHzZeLYMluLtBaGFQJFddUAS2mCC2CeZ1fFY1treF8veI8X3GZF8wLO7GuhsxM44TRPLOKSQQ0VcyV61OzbmAYh0ZaQ0bm9WhAa0RTrtcrrpcr6liR0oQQRzDEm+lv1xrKuqCUjHFIGKNZnAQqlAcbZ82651pRlLUhZ2s+qbYmajMxYs7t2cjm1+sF18uZvpeiiIHz/LpOKPXKcm1doJV+jjkvWNcZeZ2xrBcsy8XQKnZ0Q5Vc6w6ey5YEOdpj84KYnYaZ0P3uNfmDhqMN67Lger3i+ekZ5+uMtTTMK8mw02HCfL1AlYThWjO0NcxlxZpnChaJIk0JUGAMAaMFPbyCo002drEB69IBYF1aow5Eg4xky26OA07HA0m31k4dbaC4FoITc93XoyrrvGvjIwZgQCCRWRUwD7HcGrJFqAMaEEwLoTETA9gJYjgHUkxULBZBKblHrs1IXoLYW/EYDLFG2n2aEovMISnr3a8E96gCS2lAEpzGA8bpgPu37/Hdn/+Ch7dvcJgGPNwfkGLA+fFHfK4z8qoQCzY2GfRgfKatNBVioCdKC0ipIdXWJ1BH4FyAjKrW1TKTbeG5ISLv4M59KW2/3SIQf/v28k9+7xO4NH/5PnnxL5+2/x4BzxdbR8XQg4X+i93WA/OO1G2vO8+HwcOXbfAvz7WjR1sGhr6AkewvQPJOR9oA+Dorlob3svUu6Nn2u/XPDJGBWRLZBd0vgCHTeolRMA5Ulj4dRtR2wDiRc3iYJkzm38da1mY/wZ1nIPcSUTTE/dU2VWsXzttjXjKez1dcrjMuc8bjM93igwQk08YJAuTMC9CgqOJCoQmDBXWtkF/VVLvRJcDrDUPWo+ebdq5jDB0M1eAjINr12JWhe3Doit42xhW9o7LWxmYY1Zt7c8/l48br32JAMz4L1X3NiDQIfZ3sESI5XCkRQWxNUbRR0NbnEtkSgoZtLnnNTe08r+arteaCtRSTBShIyTiikeOrYFubiu1fUCXXtKPbzZeorsGTV1I2xKwZROKmmq+C2oAlF+R1gRqHq1Yf182aBxwVYvdWKxWtbCi62DVW40qpKjUOVZFdh69kBp2BY6+UFbWuLKm23EnOtbFjq1S2rZeysn298j1+D/Srs5usbtYFT4j2j9/Z/rjhqPXrV4feQsA4JigE40gn5BQo+a7wbisvH3GQ9EFELBSAUGfHyhydeQ+172k0M8NEgjHQYWzC2gZcKmySgpGvMqCK3BTD8TOWUnA5n02cy0jTQ0RtNEuEhC4IBgg1YTTYd7r0O+z7YTYcNEJNRqQ2OYwO8frMHkNACw0xSofhYZYbodEhOARha17gcUncTwJfdwsx4s3b9zjd3ePDN3/C4XDCh2+/xbsPH3F3/4AhCYbRdTaCyZE3dPNPMX+dF5MXYDX8GOCGr9GyPXG9EDf4u/2zLZNTQwocteAv7R6S34wi9qWt3+bybJl6Ty6x7Rb2v9tvHvGowjk7rnTaH5Cbf/fjeuXtl77DkRtODv7a73+G6IZSdUrh33QM++tzS+LuJqzufSLblb29FpZC+PeLbsEHbG4Tn1PYitQNEgGYOBCgwOl4wLu3D1hzofnmifydh7sTpiFhMMsJak1pD7yBrSNwP5FuKNffcCr+F7faGp6eZyw5m1o0UZ9cm/FUxHiOstNe8WDVTq2fZmGCmCxYoL4851NPCNUPaHdsHXHpa8le/6Vfot25ERvPYohAX5N2SYuPjQAPYr0ssf9+v4F09/BQy99ODgd5YCwXscQWlInrRhPG7ViX3fwL/C1r5H9qU6DzYfbzUohsmgmRJaVmBPK1UJG5WAkSgt4t1+czAwR63zlYeWDAEYyPZcmDo6fg+SolW1DoHVaGoAv7A7sVRScwq1F3xBzpK1SpyaaG3GtTNDUCc7dXVlD4sNgDUK2797A1Zf9vCEGRjWXlCJ9asKNw0S631lB1jm+z6/y1kZ6meJ4XnNcVa2Xf/jBMOB3vEWPC6XTAaUxIKaAosMI9xDmhBAAyxB64iLWRiwgOxyPuTycOzOonshEOzCRi3T+whNKUrfO1eMeCAloYobYGqGDNBY9PJF0fj0d8ulxxOB6gtaCuM7QVhACKGY4jagPWskPH7AYNMWGUAxGqIUICJ8YhiaFUgnEIJEkGChzGptuEbd1YIVGsMcWIMQUMQQBJGI8n4DBZBwV5TzEwO02vqMg8TQf8l//z/8K7Dx/xT//8f+D+4Q3u37zFN9/+CePhgJpn5OWZNdqnR+TaCIVXH3AW1PbuK7vGQRARMQYzhlQ6l5VSkXJByIKgnDF7wNQnREAQqH+jAIxADlCASgXUBpLActguAwc2tGdf5rrtttktWv5+u9Z7TOElInKzCTo0Tj2iaJ2DFDvrprkSLBjyI3v9jROeL/jbBAtsSIw/399Wnbuzz7QF2JilNqnwzf3vbgKCLmvrq+HttnWVcaLWXcLTCd8vztWGl23Xo2GnoeMLqn1+D3pALY9hUPzp24D7u3uT/8/dePP+SFFQVxEuxQnf+0l3O1liSUk/RIepXmFbc8W//NtPyJWmqLXRV+uymK4OBMM4IprwY+hBqV0GIU8kGNdmGiccRxKca6nM4FURU0Kytm+3COHp3Oi9bcfhq7X1oCfstMlkd29RbysAjT5nm96RGFoUzKKGCENTlrIpfBr61e78HgDb0k0kAgK2q2dFDAHLKiatFiCB+m7NZ5Md/MdrGGi07Bww98h7pU3VxB5roQaOAhIipmlEGxJiiCjW5DOvC54vFxSqBRqqJmhasK7W0diTPreBiIA0lDLjenmyzqg3gBY45CnSoFqw5hnX+QLVAct6RJnYNBNDADSgKrA08OfasCwNy0puDgYDBczUU9R0hzJ1oUqdAVkhoTIRcW6Qzr28Ja1CUEF16gLVzE47KfZ3FMSFVBjiwHsJCg0bkuP/oYdzCrUIYr1OAAAgAElEQVRuzb9FZ/KPIT2gcmc2caUGQGLA4TBhGEbWx82XSwMYb6sZijkC4ukIQFIdsVCkMeFwOnBxa4zk/n/23iXG1izL7/qtvff3OCci7o2br8qsruoquvphQJhGlmUb22KCmLSRPMASEiCwkBozMJ54AMJCLY8QFpInPCRPQJYsbASWPEDuFgZ3g4VNuxu73QgL+lHl7npl3mdEnHO+x957MVj7e0TcuDfvrcq4WZV5Virynjjxne/sb7/Wf/3XY6eUSsCxAaOm1LfJOdMdYBzsns4ZihQs5TSLMI4Dl7sdu33HYRhIztEeWrwojQdXqN+qDjivDFEZNRf/+LKhOefwzrK0nIfprJYpEl4QKi9UpaT9dNA9qnOanTrQUqHYO4eXpR6RdxVCZemlBQQ6Vr7sO1qPPgQevPMub7/zHl/88le4f/6AdrPl3vkDQlXTHS65epaKMvEWVBnzNYAwMzFrypGyISJzgKmddK2rQNOVIl5ZgJPFKSV09hr0kInpWSvmRblfv/TlnbaOs1mYBRYFyvrojRufnT5T7jPH7cwszwS+pmvXzMcbkNWzf2xA9+ppboLCCdit+LuXP8faSr/lK+e1P60JigU/bWzXmLfVq+fGcsW+zNNgBZyASWl7lNMTR9u2LHE5eX6+9Z3X55WZglz1oyoysZUrpvGuhjWlzNPLvaWpl2MghnEsx0asmJ45eHfpfpn2lRCoKjsYuK5qM6CwtZUnV6Ws3Cr5ehXziVljYnlyOdZhJnZKqQYWJsbaUkBtYWNWpXiWtT4lLgAyuy1X43fLnNX1v1qCmpPNAwsKdsUICzPXMN133ULrG0uCseK3YQGzdyFa9NiqYOo0Pr7MxVwCd8c00o89MaVSe8rOc8z2wNMDLTXcZmbDauSMcShV2eNEh82wwGJxpmsw91KOIELKDkmF4SndmmKemR5LmQfxMNfZyRavFNN0RMYIFOCyqimlGotba9Idef77UnuqZIPNBoeuAE+ZH+U928+muhJmjC3TZWK1Xj6erwV6fvjljrnM15VXWGtvUl9+IrJu8CfY3W+6H178fd97S+7WxXU3935Rk197aOX5ibF+59rXvMqjvKBh67dfxXXx8UPy8ZvoncirdPBrd9rryevM14/rpet83Q+I3MJ2fmoNOMonJh83lPI69SZE5CPgG99fk47ymvIVVX33k77pcSw/NTmO52dHjmP52ZJPfDyPY/mpyQvH8rVAz1GOcpSjHOUoRznKD6vc7WmWRznKUY5ylKMc5Sg/IHIEPUc5ylGOcpSjHOVzIZ9J0CMiPycif/bTbsdRvnc5juEPn4jIV0Xk1z/tdhzlbuW4Nj978nka088k6DnKUQDEjgw/yg+BHMfq8yXH8T7KpyWfGdAjIv+xiPy/IvK/Az9V3vtpEfm7IvJrIvLXReRBef/3l/f+gYj8haN1+oMhLxjDr4nI3xSRXxGR/01Efk95/10R+R9E5JfLzx8u7/+ciPxlEfk7wF/+9J7mcyteRP6SiPzfIvILIrJ5yTr82yLyF0Xk7wN/RkT+hIj8uoj8QxH5pXKNL2v0l8vn/71P9ek+p3Jcm589+dzqzPXJ1T+sP8DvA/4RsAXuAb8B/Fng14B/qVzz54G/WF7/OvCHyuv/FPj1T/sZPu8/LxnDvwX8RLnmDwD/S3n9V4A/Ul7/KPD/lNc/B/wKsPm0n+nz9gN8FYjAT5ff/xrwb75kHf5t4L9cff4fAT9SXp+Xf38W+HPldQP8feCf+rSf9fP0c1ybn72fz7PO/KxQjH8U+OuqugcQkb8BnGAb5y+Wa/5b4L8XkXPgTFX/j/L+XwH+2Jtu8FGek9vGsAX+RWzcpuua8u+/DPwzq/fvichpef03VPXwRlp9lJvy26r6D8rrXwG+xi3rcHX9X129/jvAfyMifw34H8t7/wrwe0XkXyu/3wd+Avjtu2j8UW6V49r87MnnVmd+VkDPUT6b4oCnqvrTL/jbH1TVbv1m2Wh3b6BtR7ld+tXrBJx/zPXzWKnqnxKRPwD8DPArIvL7sLK1f1pVf/4Tb+lRvh85rs2j/FDKZyWm55eAP17iB86AfxVbXE9E5I+Wa/4t4BdV9SlwWTZXgH/9zTf3KLfIbWO4B35bRP4EgJj88+X6XwD+9PRhEblt8z3Kpy/PuGUd3nahiHxNVf+eqv4nwEfAl4GfB/59EanKNT8pIidvoN1HWeS4Nj978rnVmZ8JpkdVf1VE/irwD4EPgV8uf/q3gf9aRLbAbwF/srz/7wJ/Sew42F/ENuajfIrykjH8N4D/SkT+HFAB/1255j8A/gsR+TVsHv8S8KfeeMOP8iryonV4U/6CiPwExu78LWycfw2LFfpVMargI+CP33mLjzLLcW1+9uTzrDM/l8dQiMipql6V1/8h8IGq/plPuVlHOcpRjnKUo/zAyWdJZ34mmJ7vQX5GRP4j7Pm/Afw7n25zjnKUoxzlKEf5gZXPjM78XDI9RznKUY5ylKMc5fMnn5VA5qMc5ShHOcpRjnKUl8oR9BzlKEc5ylGOcpTPhRxBz1GOcpSjHOUoR/lcyBH0HOUoRznKUY5ylM+FvFb21knt9XxTgQggIII4h/iAiJBzJqdkZ1ygWIy0ULUt7eYE5zw+VISqseqcAoIAiqYRTQOqGY0DOQ52f98gvi6va8R/TJPl2j+L3IzX1umfFwRyywt/WX6bXujtl8qLrrn9tqu2La363d/9XR4/fvyiK79n2W5bPb9/CvM4rZpU/jeVkV+d1zI1cL5aVs93/RGfv15WHSJM94ac1cZdQecBtO8XICPEDFltvvkQEOdxAt7ZdZozKeW5rTlbg5wTnPOIWEVY58q8FRBZYf6bPayQc0ZzBlVSHMkpoppJMZZ5bm3PSlkOfu6z1WOs5jpcXlw8VNV3XzY234u88847+pWvfrX06aslJ6wf+dU+8+JpuPzlxbPge5EXfV5uXHPL8L2ktd97G2wK253/yTe+wcOHDz/xtelD0Kqpbny7zN+9HqvgA1VV45yjbVq22w3OO4L3+LIvO+dwspqQN+V7SGaZ2pBzIsZY/k30fU+MkZQS49iRij5IKa/2Wrl1bKa/y3pfQcpYCs77spZtDwje23O3LXXdIE7wPuC9v3ZfufbYN2fOIv/Xr/7qJ742z+7d03ffew/NSs5p7rdpF1wrCecd3gcQ0KyknEBvzuOlzXYr2+9ijMQY7T7O2z53TRnK+qtKOxTNSkwRzZmcs41XXvZRpuvKv9MNZdUWVciaydP+yzKlcrmPiNA0DU3T4JyjqiqqEFbjsiiQm7Mx58zQ94zjCKq2L6uaLvAB553t+brMy4uLyxeO5WuBnvubip/9Iz+Kk4ALLeICfrOlvv8AV9X0hz27y6fEcWTMSh8VdY4vf+2f5Wv/3O9ne3qfB+9+kXfe/wqhavBeCN5BTvRPv8nw7Jvk4UD/6Ov0T34HXIV/8GP4e1/CVRuq8y8Szt6ZlSFMk3kZDFyZTiK4ZWpBLiOhQF4GJ6stxnlIV0prUmAiCwCYfl//u54Qzi2Kbv252/YVWZoLKFkz0wSbfn7mj93NESfn90/52T/5M8uGpFP73bxRhmCbZoyRcRzJOa+e166dNhibdAXsrnpUSEBGBHwZExHBExA8KWV2u4GuH8kKEU9WwXlHVdV47zgkx8POsxuFenvCvbe/QLM9oa0855uKOji6rufi2RXjMDIOie4wkJPSti2npyeEEGiaQNtWeO/wVaCqKsQ5myBuWcaKbTrD/sB4OJDGgcuPvs3uyYfEoefZo4fsnj0lJuWqS3RjRlygak9xobFxdRkEvBfqpmzEAv/rz/9P37iL8fzKV7/K3/3l/7OMZ5rH6qbI6l9ZzclroGf1er1F62TslD/M91Jd3fc62M3zq6LSrhkFcm2Hm17q8hWvBHpe6Q+vfaPVR1f9MYMI5/jDf/APffyHvwepmoqv/NM/xrInGEC3OWSgJydr01sP3uWD977EdnPC7/nJH+df+Onfy9nZKef37/P2gweEENi0G9q2xbnbiX1Zj/e0R7Lsa8oKvJf3UzEA9ocdDz/6kN3+isePn/Ab/99v8uTJE549e8Q3v/mbXF09o+t7Li93RSkLFONARPDlnjlnU/KAD46qcthscmR1iHhOTs7ZnpxTVTUP3nqH8wdvs91u+amf+km++tWvUjcN9x+8xem9e7OOEMAhBOdsicsyQ28C/XtN9YmvzXffe48//5/958RhoLu6IsURMBeL2EaBeANym5MTTu+f40OgOxzY7XbknPCAL3rO9ILt1ymbcRdj5KMPP+Lho4cIwvbkhM1mY33glr52ftJHSkojqpm+63jy+DGH/Y6+63j29DF9dyDHyDj05BjRnIhjj2pGsH183i2LntofOi4urwx8ZRij6YMhJoYx4rzna1/7MX78x3+c7XbDF99/jy9+4T3T/5pAzbjMZc9Yj81ht+Prv/lbfPfb3yalRHc4MI6jjffb77DZnjCmzK6PDMnWyy/8zf/5hWP52nV6UobswKvMu1JUcApRtai4gvBysgeJkTgMjMNgVkDOSEG9GjOaR7qrJ3RPPiSPB+LFI+LuGYgnue/ikiD1Fg0NhAZxnqpq8CFMXc/cGDVL3tiSiUdayzJpVJU0vUbmzVYKE6AyWRzuuQUislgjE+K+/jd9IeiZ3rf9Re27Vk9x/W5vTtZATWYFd/29qR+u2yvC2o5btkq7xF1XtagK2ew5sgoxaVkkEHMmquAdoAn1EBOkUUjRoYlrnaNSlKQIPvgyrlBHT/aZKkCQREAJmgma8VlwMSAy2jOx3KPYmaBK0BHnlZiVyivBles0kVIkJZ2tItGMFtBaOnPF8kwW3d3KNNvtUV7OyoheN/qnsVW9/ofplcp6LZXn0WUGLJba+h1ri8rtK/HaF3Cjiz7p/nrF+93st8lKnV7fds2dyLQZrfpt6k/ULHSAOIwcDgdU4cmzp3znw+9ytbsixpHgPXVdz2zIba2e2KO1ZM1oYV/HGEk5liYsAGj6WM6KDxV13RbGSWZmZWGZjGmyPcSZd4CbjM7CJpsN4hAnpCyIynwv7wPO+dn6997T9wPDMCLOGQswwx27m5Zfp7UupXtv55s+YREhVAFB0U1Dip6cEuMwoCkheLxzZUGCOEGc4IKnqipy9mhOhWVWYhyJcSSrkmImTgaOwHa7BYRQTSzh0m8igvcG1jUnchZyAQje2d9C8DR1jWhmdEIc+nlPkHlvZGHgcy4sn5Z5kkkz+2378MTq+BBo6ppNW9M2NZV3CHkmIdA0G87pJhsG1FXFdrMhpYQXYRwjoaoIzl+zjl5lRF8L9CgwZHt0r4KoI2UhZcUlZVAY1EBPyhmNEUGIfUe33yNS0Xc9Y4yoOEg9Ejty7Lj86He4/N1/jA4HZPddZPcQxJEvr9Dm27j2jJQh4QhVSzh7m1DVZkFqmm1KnbXgDUtxsl6yzos6q9qkmRFJ6WYneOeKDhSs8vZK4esEaJa+mTbCrCsmiGmDWMADTGxK2TRW+nANfBa25K5ladd6gYi4G5SwvafkG+6wBRzpanPO5bU31FO2bmcbkhrYQSFGGEboeiVn6FMiJvDOkUYheKXPjmEw8BNDRrPZb4qQRawPg6Nqa3zl8cERnJJTpvHQ+B4vQq1CE51ZTUmQKEtfTwvNeXOdAZU4XHAkMjEo0WPoPkXGviNmJSVBs6AiqCbsjE3rK+ds47ZN/27D56xHzBUozuFe4rKYAM/NDeJ5N2Z5f/qC8k3LmipjfwOpi8pypWhZP7zSfJ6VUvnON6CWrska0NwEPLPFPK+RO2qEYptoUcwz/plma7a5jUJ3OPD06WOqqiYEARKbzYYvf+nLpJTZbrZ45zm/d/85pkeeGw8x921SUo7EmNjtd3R9V95PqCac97RNQwiBnJWm2RBCw2HXFeY3IyhebB/1hRHOWQ0E+eIGzsVQUEjFJa0KVSWE4GdFqyoIDu8r6lDjfSCOkaurS1JK7K527HZ7sipjyqZbpnErk0jLhjttx/PY3fEW68QAZ64CTR3IKXE47OmGjjEOeKkAb4yXKy6u4Km1BixkZOgO9MW9vj907Pd7e38cGYdhdh09eOttAGKKpAJoxFt/OxGqKhC8IyVHioORD6iB46rCCUg+ZWwauv2ObrcjpWgWu9g+NgNaII2ZQzeQUqIfRoaYDfhkY6EU8D5QNxVVVXF2uuX+2SmbtqWtAqLJ5kAyNkmxz8XCUtv3CZKV7WZDPj8n5zyTJ4qgzhfQpDjWnpMXy+uBHi1xFQVYTBZ8UgpCK4CHyTAoYCEl0hiJcZyp96wZTSOMPWnsGPZXdFdP0WFP2F/gD1cgQkxCHgbc2FMdnhH6vdHyOU02edkf1rEps41Zflvar4UyXlBlnq0mLXd0gBagM5FH871mH6fdc82A2EKeLNvle5drrrMlIIjesu4mw/nOMc/Kdr/B8ixgbXkGa/e1J+OmWprYcVNaWvpOrv19/jcbSEwZUsIm/AgxqTE8kiGXeJ6oaCoWKPMXzPoBZy4xgBAyBIc6JQh4yXhRPIJX8BijuvhetLA0GNXsQrGMKoKvcM5YHu/Ai4HsnFPZpAsNv/J9X2PJ5ll6vR/uSkS4fU6tWjADnhsX3cZgLHEWRXGwWGCyenXDhJ6/zQCPW63Il0/qm7d4k4znzee/CXiev+buxlMwJpiZJYNp31HbiFGFGCN935NS5urqisdPntAeDtw7u8dutwfEYiFube1t7S+jpBaDMo4Dfd+XeI+RlCMhhBIbZ9c7F4wtCFVhdBamZ2YGZHEL+gK+1AmklbFX2IE1S1Twsv0uUuJVHJoz4zAS/Mg4jiWmpazJqf+uPdM0fjce+66BtRiQcS6gDjR7hnEwBa/ZGOKVgbvuqxA8OTtG52Z3VkyJYTQ92vc9wzDgnKNpW5q2sX7sIKVhBnnm5nJz/09uKrvYjHznhOA9uapwQAzBZkLZFye4vDaWMtaeGGNhnApwLfs6gJTnqKpAXVU0VUVdWYiBrJXB5HHJiz7Owjz3qxBom4acM14cKSRSNpCb8qIsX4W9ey3QI87hm41Zw36KhxA0jobY4mhPqxR6tKg/kaJMPJojY79H00A6PCPun5KGA7tnj9hdPCOPHb4/4PsREHLs0CC45HGXF7B9QtUOtCf3afJZwSXPWyxr1Wz9ugRXXzdM13xQLr85VMN0KwNZ8iJLeJqsi0K/vuDW18gtn1+UyAyV3gjgYW7TekNf05lTG1KyIEUtQWRGIYM6SkOFlBMx55nlMZJZyQLZlWdKtiDstUOTI8VEf9Ex7gdyhpzsR7wj1xn1HvD47AgqhGSsomj5YWKlFO89DiHniHhTnhWJRhIepQW2qIGeFUmRxUC7WYAZkQTijEvSDDnidMRpQkiIOXCNXRELlpY5lkvLj20mE6X+puQau/lxF9586yWTzoCmlj5bgWUVJkey6cDZSmDiK9fc5Ss1awJQn1Ox+AsPaGEo4GYfTq9EsBi1MCmXmrpuCMH25wmAm6Gp17p1jd9SWdc5Z/q+YxwGhnHkcnfJ5dUVKUW6fsc49ma1n92jbTcEX7FtTqlCjXPT97dUocG5CucCTlKx2tOsGxBBk5KizjFK096ctbjAyzxykzslZeI4oDlQN+ZOq6uKEPzsPrPPWw9NoGsdIM3UleXlXU+zGCOPHj0ieEdbm7JHwFeBoBknjpTN2zDGyBhHG+kpPEPAe0+oasR5fN+b/lUtQdsZESGlTN8bmBqGYQa6KWd8TOYqHF1heiKH/Z5h6IgxMnQdcRzK+Kel8fMytl3d7mdgDVXGMTIWsBlTLqrfmElX9r5N23Lv3hl13XB6sqWuK+rgLQGFiawoCSMUt62WESuuSs1aYtPaGfQY0MqkPBI1vZZ19Hqgx3uqs/tQFA2Updgf7N+U0JSN7SiWwmS5uRBwIZDiSH/1hNE5+iffoXvybdJw4Om3vsGzD79Ljj0udUjqbCN1GaTDtwNx8x0GaWlO7rM5O6e9d/+a0jbmROZ2Td+N5pndMUtpcWsskoBon9S6dE1hemYgsg4snOcDc1ZQ+dbn438oijlfY4W0sBULteOYrKwJPN6lxpz6bR2MrDf6CSzQebaislGYqJJcJhV2JaZELBlNM/sCOFGc2GQeh0QcktHzo6AR8pjoLzvifjQmMYu5CL0jNQEXPCo1IThwQj0kQsr4lHHqFisGzHJwDiUWJkepNbLVDk/mRJR7oubeklXXOpl50VzApwVQVqQU8DlS5Q6vA76An+JIwjkIFD+86PJpZ9aaoWHQ2+OKP1GxzZKPnTMLOH/du5d/CpW0jp0wfnTSJCvgowvwUfTal960yq4bKSvs9AMkbwLAighVVVk/ymSsZVKOC6NY3MziIFSOqvY0bcNms2XTbqjrZo5/UYGkGVf24+k7pqQCVSWOI8Norop9cWn1Q8+HDz/k6cVThrHj6bOP2O8vaZqWt996l5OTU042Z3zw3pepaouxbNsTTrb36LueKrQ41+FcXrl4pRg+SkpKHGxvTrpkQeYspORx6kBtaYo4Uoz0h4PFhzQtm7Zl024slsh7xDkUU8zqmN0d85oo26ysLGKZ/3c3MvQDX//61zk93fLeu++waVsQqJsaFxwxJoYhmmKvOw7dgaqKhT0LxpRUNS1CTom+H/C+A4QcIEyxXTFytd8Z+1fcPzbOiys2FEYnxUi3v2QYOosvGgdyuX5ROqv4RJaMKU0Wz2jgeKTrB2LMxS1VmDo3xQ957t+/zwfvv0/bNrz91lucbBqqUFF5b3ulKmiCKW5MzTBWzPjVAtTbusXVxvT0Vcc4jgxDpB8Tmke7btKZHyOvGcgsuFBxbZZMPjk19wOzwrwBKwrnqZpJ40AWGLsdw/6C2O/pD7sSNT4ieUTU3FdORgTFUzF0B8Jhj/OVRZ/nZOlSuJnKm8ZNJqW2otCWINyJVVl9YBpoVZhjEFh14tpSve7emn5/EZMzr7bp0zqBpeevWZRWYV1eMhrfj0y059Imnf+dQE8ubF1KSzq4lSVYgNEkKa5SxgsXYqDTWB9VAz1jbws89cVlNWbiYSR14wK0FNS7AqQzeIcj43zGZUWy8acl9s+UZwE/kI02LyxUIBM04UlUKDVKkJuB67YxqBjgsuUuRGwsssbC8uTyMwULq22qrJieWbGv6HQmgHt3KvNjsc5q07/1z69ALc4regLr12iZG9b0DR+bPvfC7jUbk+u5eLPNn5K86OvfRLPMVasz8EllQ5drw7QwihPI9t7jg7mA5ni72WC7HuKrqz0z5VQYXWMbhqFnGHq6/sCh29P3By6vLrjaPaUdNjSNuVK8BFJOcxyg9xUh1HhfGSOBs/U1AyxKeyhp3FPW5zKdtABq1fUawnRHirOhG3wgBAsEXtLyjXnMuuLbVxaqFMWw3nvvcixzTlztrvDesmCzWtCx8w5fslezLuniKaUydm72Ka2zZJ0zcCfZQKT3fh7bXNj4WO5TWjB3QS62XUrR2KBhIOdEWmXmTsms1xehzq6nXGK7lvaW35WZmZvYd+ccdV1xsrXswbapSykFV75nPRmKfl5UbPlOGyDvPaEEqqdozE5KupRimD/38fvY62dvpYzzHh8sKyDnbGltqqADqTd/o0WW2+QchpH9/oqM4CRTScIJdLsL+sOeNHSMYyKqpSeiDnJZKM7jJODxRBWSWnT3lEY9x0+IFJdHaajYCKvMXWtbrAJaaM+ckdhjkeMRGJj8q+pr1m6qZbNfROZONnNiShxbL6MpSPb59HedN4opwHACGVOdmRdkHd+JSAGkRnFOYGeKG0jEMVoMVEqksrjWLFsqi2B+ZpjBwcT0DF1iHBJkJQ+QR4GYkTESykTOZXycZnwwQIIK2fUWLxY74tjjhpoQQLVdNvdJNEMeIQ84HfB5wJMITghiabLeC26qE+HczPQYfVuszmibh6RE5TJt5RCtOD8/QxGSQp8cYzYoNGRHyuuWyMoAeHPyHFR+iZZez83bZO0gmJVl+f/S6zr/QVejMS/F+RN3IMoSA/QJo5HbbiUveP+TFOegae3b8uTmSMY+qlvi7hSlaj1VA6FSxA3EfMUQI4fugsurZ8Q4cHXvlP2hsxINTOBcqIInBFO8+8OO3e6KMUYuLp5xtbtiGAcury7ouo5+6BjGjiH2IHC5uzRd4AKaM957qhDMfVHX9l1lb3bOUzct3gdTjmlScEKUPJc8cKWDJ2AyGQ8Wz+OoQ82muLXunZ7y4Pw+2+2Wtx484K0H51R1Q1vXTMnuvsQBeVnc0M+Nq9z9eIrYOA7jQOg9MZlbKOcEAlVVoQHquqIK3oK+U+Iw7s34LCDGavdkQgh452iaZgZvqQQv52wp3X0xUPtS38aJUHmH98YYWd2xZPE9InhXgqZ7A0J935FinMNUdGUAWx2mzBhT+U5liRISqqpmu2kJIXB6csLJ1kByXbLYjMpxrBHwYnQzbxRrEiOlZJ6kqRZgYbis7c4CU0q2+MfJawYyKzEmKhcIzYYQKtKctpYhKeN4af7BpKQoII5DN/Ds2RO6oScPV7jhAg/0l08ZLi9I40Dfj4zZG2LNCc0eW5oBcYGggVaFRi14aRgGhu5g7FFxpxldVuItnEdCBc6DePBVYYMmF5ggOSNxD2kAIll7IEOlaNiCuLm8zzQIy9CuBmUKri0mhDiZwc7CLq3Bj1oglwgiYR5sAx1r0KOvMobfm0yby2psp/E1FK/zhI4xMfRjeW2Bg1lvTtZVDFeJeQELYXVi8zz2iXG04GQdBB0Fl5S6i4Qhr0E/EgTvEk4FcYksnpQicfAM3R4NHh+AfGrW5ErViyZIPaQen3uCHozx8Z5KKoJzNMUdIG7hwVXNTTdOQXmxZxgHXMo0PnHSBtra4fw73D9/i5iUXZ/oRqtH8eTywO4wWEzP3LEGZt8UY3ErNyjXr7gGy14AeK69Pzd/Zb1si9cAACAASURBVIYx5c9Nb2Vms6x8pzFwhYldf/LmpgbcQn2+VBaduAr0vYs+frN4FTDlfHoqtt6yKcwYHYhlQBE8NIDAZlPRbCAEhXCgj49RqbjYedrHNW2z5eRky/n5W9R1PYMeJ0Lb1DR1RUwjzy6e8vjJQ4Zh4OGjhzx99oyUM/0wEOPIEDv23Y5u2DHGgaxwGa4Q8aScZsCzaTf0286K0ImxE8FXbDenpjRjZhyL0soHBhkpvM5cR8fNu6vgxOPE2Jy2aTk9OaFpGt556wEfvP8e25MTfuSD9/nSBx/gfCD7MCe2BLHSF06E4KTUujG5DuDvUArw0pzpugNZp/3ViALvbR+aiyxWNd579v3BahvNAcJT1qxS1Q1OhO12w3azAdSCmkthyAsRq7MzjnTdgcvLSxvvOlCXsh45DmiOCKXAq/MMKdLt9/R9xzAMxGFYig1PRSZjoi/ExjBaSEPKauu8/LRty4Pzc+qm5q3z+5zfP6Opa9q6KjGSmHXrFh2yeDUW5sbKfdg1cYxzG3I04xxVvLNCnFpSqF7FwHxtpkdLEOtUidlJRjKYQ6NURkyZ4oFAoFTnHBDvGQPE3qyzNPakOJqPUNUsGXNkGOMjUuhRK96UZ0JMZ3rN2jRZDqUYnuY57RiniFNEfLHmS8ilQoHRkCOqI2hheoLFbCxwhWsb9XN7q06WcHGZ3OZS08U9lrMV63MlKv8219KL0ofvQtYoewlWNqtC8+LeyqXq8VQbYrIA0BXoK6DHihIWEeb75KjmRUwOkgUzSlZ8AY4z6JncV+pKamNCJZJzJCf7sWrJC6sws3xqdR/IydykxTXlCnfnULwT8yuXwMISPgBktDCKjozkiKh9JnhBxNM2AfFYyrpE8AmGiHf9db09kYBT596xfLwz9MVUyMfVnllP6+sAiDLPzQozIDJZ6hMLuwp9Xn/segOuf9fHPInc8uq5a24+0wsff/rDzQt0NpQ+YRLppSICIQgpY+tDzdXtZirELrKAZ8F5cF6BSM49MWXGsacfOkQcQ8luEnElPsaAQAyekP28P/dDR9/37A97dvtdKRiYZ8PW3BoJVBiGnpSMvZiMOXPDuJmJmGu7SKk0DECpESMZJ54pu/EGFF+9Kv+JZX1VwRilpqnNZdK2bNqGTdsiztGXvpoC6yfILTfvPLHt3D3wmeOmUsLFWIzbVMbUza7JpRSCsS5TVtrkAjTWzFxaU/2bme0pm6cTmf8uYixRHMcS0zMZKjpXmp+MXws8XlXYLvGZN2XxRKxjPyeAMrkdrT5UU9cz61eFYEHcdhdmC3f+fXn7tr67/p3LRZPbdGIHP3nQoxnyAMmhY0cmFz+eITyVwqq4gGpGMfosjj3D7gKNAx1bOm9KB1V83eBCoI1nRn3mSBw7YuwBwVVbnG8JdUO1OcPVLfjAMA7sdhdoKVIVoyHBPHZoKgu81HrwdUt9ch9fNYSqpdmc4X3NOI70+wNp7Eh5IMUDqpn2rOWksWq6FlRc/KLXR2O2cxdlCZpKLrRaRtBUt0fEl5iTVXfqGsHfcG+laQDvZkma2ypem0xjjAx9by6sbFZZVkhxnb2VCggoc3QysmV6zikuuACM4k5SheQdqbLuYXQz09O4RAjWD6kscPGC3zikMrbA+2wBkT4iDJB7JNcEjQRNaI7kNKKaII0QB0gDQkTIiCgpW52pDLjsCFiwZFLLHjGL2qqJpmwW6USlCop3zH0wxBKIGaMF8pXNbGIcc87FEIA57/bTkoUWebXLb2wc8+8rwkpVGXrL+kgpst9d0neWIm0UvcP5QNOeUdWtZZ5UNc7ZlrMon+/v0V5dXsYkPacSn/v9Nkh0V+IEts6RnTIG4zCHEcjZMlYSjL0pJQ+MlaDJ0Zm5SPCB2p2wrXfkpHTdga7vS5n+UtPEicWSqAGabhgZYqIfI/tuz+XuoiQuaCkrMaIJHBWo0Pe21g6HjrEEQKsqzgdCqKjqhrppqZu2gDSbPHEccaEvRpSn7j1RdOaGRawQXdO0c+AzusS1+Koi1BVVXVO3DVVdowrDMIBzFgKB1fkSV5WjOArTDvOePSn8uxYp45lTZL+7oju4uSWghFATx4hznqEf6A4GVLtDz35/sHo7YqaaYAHCWrK+usNhTvsex55xKARCivPeWwVPUwecCHXwM9OD2F5l1xggcQJ67x7bTUuMkZPtxsrMxGTVmQsQa5rGgHLK9GPxTkxtFOH++X3u3z+jKW6uya3mRJjCLWXGPAWgOrM6rRxNsZtmK8uAnC8AKIntybEA8jFGq8/0itkirwl6FIkdOCUPHnKFuhp1WwM74lGpUFFURrRkQ439gf3FY6vMmO9RuUwIntZ7ms3GfLre45utKZTe0ukQR6jP8PUJvqqpzx7g2xMIgUN/YHz6iBhHdhfPjDocB4arS2LfI6L4kj5cb8+49+6PUG9PaU/Puf9uoNl4umHg6eUVY7cjjj1jv0Nz5ly3tPcVXzlErXr0GhWXqWxFsAA1+AdkcurIqQeFpFNtHk+oNoRS/2XZey1GRmQKEp6QNFNV7jsjCHT23+bizrHNzyy4VFxZuZBhWoKXMRat9IBOmsAZjRw8BekrwdtkDqU4H4CWQn5kc20xOlxSQp3wvbFKQ0pWNMsLbuuRSnA4DpJJRHw1YoUoHC4HQh6o1QLbY+zQHGE8oLGDOCAuIs44wqRKF7Fy7NkRCDi8gb1owXhjVOJorNYwFN93VkQdobjAUoqF4lWGITOOU3abVaiaUjB1Kh+9rtT8acj3uLmvwc4MeCaAnjKHqyt2VxcMfceHH36TJ08eIgJtW1NXgbrZcP72Fzk5PSdUNduT+1SNn/GXfD+Nu9bQV7nNYqC8/NLrV6x/e1P4zItwLwSyKDFkskA/RFBhHKE7JA6HSIqWkdW7SKocaejouz3eeZw2tOGMcRi4urpkfzjMAa4pJZwIw2jzPqXEvhvoh5Fu6Lm4uuTJ04clvq7EWBbQEqQhpszh0DGMkcuzXTlvy4JhQzDA0jQb2s0Jm21X7GABp4xDjz9AihE0MB4CXkq9rmK1N3XLpj3BOWftLaAgVBV1U1E3Nc22ZbPd0rQtGTh0BhbMELMSKS44gkwqzurI2URejM95Tt/h4HoR4jiy318uQd+FWQmhoqnLESFTDTzMzrbgdcH5gPMBEStL4LOl6Oc00ncHu74U+JtibJ1YtlZbBXLbIGCp4t7IBp3uXYKNg/e0TcXJpi3naCWGwcDsOAzsd7v5fK++GMYxK0M5bsICme15H9w74523HlBXgdPthroURZxcjFI63RhCoAShTxlYomvO2lhEH8LsJoyuzOEyl4d+IGopiPwK4/Ga7q2Sl1Msa82WV4Wbvkpu/CyWcRoHUzyj+YiFXCrp+kLTBkJl6D/liMtWK8VXDb5q8VWF8xXiPYjVGshDTxwHum7PYb+z2KCrC2LXmWUuCRGjFavtGYriQk0cB3w1ZSkM1mljz3iw80XGYVzcNqqzdYSyZJk4ZtBj6fDGaqUU7bBUhYwnq0WqP49elCklfO1eynl5fZceEYWFvr4WjZ9m0DMFU0/VqyckPrkwRHUOIvVuOvxTCR4qP71vQXJg/aV58iOZFSdOCVHxuZQvLwUJxQsuCBKcFRWUEh/kMo5SL0eTZVVpwmlEcjQ3WE6W4aXpxrlCVkhTcymkWTLFkpYCiNkKI45J58ySyXUqLLEjEz2dMkvdorwax3l8Fxr3Va2Q70euUfV64w/fq+jN1+V5ciaOI33X0fcHdrtLLi+eIiLEsaZpKsYY2ZzsqeoNirm5Q84zDX697euN7vk6Wy+VFz3fLcrs47viZruuM18f5wb8pMTOZgJ1gnNKdMVKnrNr8lxCwoI7lVTQaXY2NmMcCLGyOLxoSR8pJWKKFucSAiEEUi6uK7U5n1KpF6OAekRLbRlPsciLm3uMpJjmGj/KxMjYWXN2/wo8uKDgjDFywVaT8yXrrLhGbDNxeGftclMpjazlWIry46dq525OwIgxWRiDDdJcUHZmJnl+Pl1LMLyjvdY8kcW9Fa1Ar8zlLOx7nUwFF7FaZqosjjlj8ygV8nMWXDYOOV6rb2NFzrQchzOb5s4RiqvLezd7WFTMAPVe5mMo0DLIMGeR5ZytOn5K+HGcs8hyzsSs+FQATy4ZhmLVoeu6MrAT/FyzZ1o6K3SwuMxlVc/MFt08LFNpBSegbin9AqUocs7XYm8/Tl6zOKGnrk8MKMfRTkZ3IzqOKB4ZeoKOMBdwK5MtjgyHPXHweB2RuMd7R2wqclMQnMpyJmgcEY0IHmHEMUyHDhiYiInucEGMhkIvnj3lsN8Rh57d48cM+x1ODPQ4ydTtlsuLC6p2y+bsnGePHlJvT+kPO66ePSKOBp7Gbm/KyVWcvfUFxthbzE8pxZ3iSCpFn0LTEpoWUIZux9Dt0JwYhgPj0CPO0W7vU29O8b5Cq2mgpHjLlmW4CgG69nOX5IDmTHfoSn2FVCZPKjQhJXXbrDOR5WBAr4rXKStLcIUCqIIQglG5lQ/G9IjRzH5ix9QWlqrACJqweJ46I6OxXVX0xJRsvdcODeDFI6EmOo9UFaFNSDWw0R1+/xHEC8gRGQfICRc7XO4RjYSyuXuxLbAfLeXVHXqc7BFnGVexgKFusJLqqhkSFgsmlh45lgqg/WjugJQLM5TszLC5OBdSKN/C8uRyoN4bkJmFe37EZ9D2Wmpbrl8/lgDHYej48Lvf4rvf+SZ9d+DDD3+XJ48/wjlhs21o6oq62XI4RE7OntJuTnj3C4nTs3MrNLZpTSEWZPK8SfAJyPeNT96UQ+u6pKQ8veotmL8Rix+LJUFDlBCg3XiqSmna4r6YTOjS3DH17A8XpDzw8PG3aL9psRXoRM66uaYPqMVcquDF0YSKbWOF4OJodVnMQyUlzm6qiZLRnEjRspFEhNOzU9p2wxgH3n77HZxzdPHAZfeUcRxJaSCRyC6TKCwBxniEZoP3gXsPHvDgnXdx3huoPnQAbE9OCOWQ4G7oeXr5lLpvwDnGVIKCSwyJaphdbsVDxqJqJ/y+xCTemYgddprcUh5gqvcmSjGYiq5MFqRrhvakJ2zrEAzkpUI6zGZBMcjmRAJVNCdjVLzQVB6aurgNLVvPDMFq3pOmtG9jn1wxVi27KuVsANn72TAeSoq7VUQu5U0KYAbYNDVtY+diBS8lUnepYWZjMFUcnwziQgCQ5pPZ52QgihGKliKMPXGMHPqBYRxMf5W7v0oyxGuBHieOdnNKjgN5vLKiQAi5+F5lVKo8n4pkbi6EPA50uwgipAP0F0a9jY0ntcGsg6bFNxvAEKioFSZ02uPxuIlhwmJRLh8/Zn95yTAOPH36lN1+x9h1XHz4Id3V5So1PuNDoN5urHjW6T3uvfM+dXti9QrGUqsgjgx9bx3sPPfeeZ+TocOpMQuo0u32dFdXKMr27Izt/TNAuXrykN2Th1bpsus49AMhVLz1/o9y/o4nVJmmzSVlsvgl0wR41oFZZcBWVOFdLcicM/urAxllLFa20ZTFkp+bU9I+y6INKHWJ6QlOqUp9mqoS6mC0beWF4It1MZ1BZTPIqGXFFncqizZZ7R1VZRitpLmKklxpRwjca1skBHOjugTS4RkJl3vUOchG9SuKJxPKMqidnfvinGMYYwE0ZjHFaBYkripxaLDvIrvDCGSaALWvzErTyJATQ0p0ZcGlrAzRjkqJqbBiSKHqSyFLzUiKc1zYpyU3CwG+2oeW9HNXFFQcOg67S/a7Hd/8na/zW7/xj+n6A08efYeLZ48L6GlpmopQtdx7+JR2e87Z2X00C2mMNG1LFd6iCsv2o7e0b6r1bNjjzQAQXYHDTwPwAIwp89Gzjqp2bFMgVM6qnWcDPqGC7YkdtFhVgbaty+GcatY/MIwdV/vH9EPAfTfTx0uCD8bCOFsPm3ZL22zwLnCyPWXTbvHiaeua03Zj+1ne06eIKDh1uOzmWlmSM5TaPsM42Blf98+tlowoT54+oWlbHj37iMeHh+z6K2PEJYLLJFmKEoa65d7ZOVVd884XPuD9L32JECquLi+5ury0tPgq4CuP8559d+Dhk4dUoWKIiX0/UIWKe2dnnG635FyR0sbABcYgIUvFZqUwviUt+65EgOA9KS3HLqPl7DR0PgBUsffSaLFaVvKlZLS2Vp/HeWehAdnWRE6xuAlLFXjynJoefIEZdcBLjROhaSqqyheD1dbXdJZVKoHuUxB0SqbzUkq4uiacnc0B1nHK5Mpm8M1xqEV3GMixQphVcHhRS3gqWbZS+kCnhAdXYoIpgSIrD8uUoJSSndOVYmK33zP05o7th8HO8nSClgztj5PXc28JzIXccrYUMhEoxCpZLNuGJZ3VUBolwNcoOa92PMEontGVSH9vgceIQwulapZFxkr3eqMGckZzJMWBceis+uQ4EEdL0RvHwc4jIZMZcZJx3pnLLHgry101jH1HmgotqU3+YegB6A97hm5PVdc4Mr4g6aHb0x2uULV4n9Aa1TccdnT7S6t0WQYjVTUpjvOgzYWXZKLL16CndK/ctHrvbjFarE4mlSBClZsAjNUEkmJECk4X4tVLyQgQqJwBHfMlO4LzC9NTArhlCngvoCqXaDbxipTU3CBW+mAK9stYfE9VCS5MFThKYLlmiBOprwVYmGUnbim0PJ3bIwVwTvUuUkyo0zJn/UzTmsUyjUvJGCxMUC5Wmrm1MBfvxM6t+3cavfJHuWOm5ybDc23p6/XrXiTrsbfnsVFwWo4Wmaz6YbDCdYc9u90VfX+wQxS7fTmSI6NaE1ImVDuyeoIP9N2Boe+srkapEVI2lWttXyoxy4S7r7f8ZfjnVd1P19bizT+96rq7m/WpqvTR2M4YTaGlSQFQmFejLi1AtBQnVEDsvNviah8RUYbhwOFwZcGg83lynslNaXElzbR08OKpvDfALiwsZS77RJ7Kd+rs0o8xIkFwtbOigVVFU9c0TVNO/Z6q2Rd3jMyrlimLKVQVVVVbteXNllBVZb6NlkBR4mAQK6Y4DpY5NgyDHcGQdc4ymw44tTlWasKUhArleoZsvmuDZLWvLm7cFb0/0W/TJlLaPZ2CbvGBuZS3KaAHy8zKBZxMR+OoCF4K5U4uhqfMZ56F4labAoqTCEns4OZ1FtmUTg9WGHAyHHPOuGjZ1l6Z3VtL5rGWMIMyV68t1JV+mRb8RE6u1u20d05jNO2hE0M21S1KubhWNaPqlj33Y/aA1wI9OWf23Q5NA7lYr2sKKiUhZTfTXYbUKLTU9KzFNeKgH4VeomVJ+R4pFK0d4miBwomEph5nRVlI/QU5ZVx3RZM7vBPOTjc0mxP6zo6uqLen5Ngz7p+Sh846YxyROM7F53yo500dnVLgrRT3o2/9E77ebGk3J3gnBG/X9Ps93e4KzUp9sqE52QKwv7xkf3mJolRtS9W0JfaIksViEffDGOcgrkmuH2S4FC4szbozEaAk9aNi1TrtoFTPTa3isYUjKEGVqjA9tVfqUgmgCo4quEKFWu2EKdtgOeaifLMqWRzqdd5omSZ48BaglpMxitlSx70mPA7VZNW4pzlW6OCpuQKWtVfZoYieqmQP+FJc0f4NjpIKr+YvJ5VMBmgaA1deplPELFNgiIkxmiUmbjpx+MayVr3BWrxZtuBF7q1XaYUWi0rV6n7YQZMKOUKKpBS5ePqQy2eP2e93fPTRt3j69CNLj+52FueHoCmSoxVvPFxdEPsRjSMffeuUsTtwcnqGd5DiSKgCzeYEH6rihriN8Xm1h3mt5fLpkDivJDkrV1cH+sozjCM+OHAZvIIolfc0lZ/jNSa2VaaYGwEniZw6Io79AZShKDSLlXHOsam3NPWGumoI4thULZozbd1w7+ycvu/Y73cMw8FYvnL8QFZziasoQzrw6OmH1K2BlfOzc5q6pR8PNNuGe9xjP17RNA39WJGTnQI+xczZidwQqprTs/u0my1vvf0O733hfUJV0TYtVagKm2QFEhVlHA+kPOBDwDtjveq6pqkqqlCRVen6nlBZ8ghuKoEiJRaoVPcdpwSEu5EYIw8/+sj0I+m5aTdlt5qx4BAvxnLHRC7p6s45C/b15Xy1MMU62an3Fl/Xl7hZ6EvQsD22lLqrDocvZQKWfcJOX6+M4RFHqMrRJbpk9U7lCAA72b0ceGpYZPJOmKuzWIdMEEQ0o6WETVILnBRx+MrCDqyR3n5UTefPICpfA9hQSvzIVOqnhGTEaPrWKaU01EvlNUFP4mp/AeQSczMh0FSAg5CzxWysrd9caFfTd1a91mWhIhHUKuXixA5Pc4JVTDbFE2OP9HYGSe4vcLX5pV2CJkPtG6p775GqewzDgG9a9vs9/f6SZ98dGNNoAV7DYO3sOi4vL2zCFWQ7B1GVXTMNI/uLC0KwM58mSnDoDgx7i/uRqrK2IHRDpO8jPgTe/eAD3vrCe7iqKtH5vjBNGcaBiTWBwkA4P1cKnQK+4HYL9JMUQfDTCeEl60phDgCc2ykWpx6KOqo0U5HxQO2hLUaFLUY314kwpqfE06yC3+YiVDrZeSvqOWdcqSwa00jcm8Ug5HKMREnrn2pXRIvczynNpwhbiYIacRscniBKHax2CKpoFWwjUSzWRrJZQZJQDPTobAUDJZg7xkw/WGqk4aSyeTi7xrDqZMlkSvihXXO3Q7ka05f//nEyZfCllLi8vOTZswtjQseeFDtSHHny8Ds8e/Ihh8OO73z7Gzx69G1yjhbflxPgyGkkR4u9i30GLhj2l3jg8ukj7p+/RdNUaI40my2hrvFlvaxBj5SJ8rqerbvs7tmavcOA5pQyF5d7U3J7A++hFurW4YPgWkd9YunHjlW8nVgWpQIikzUMKXUcDk9npt6yZRxtvaGtW5p6y0mz5f72/vx+E2r2hysePvoO/XBgjCP7fk8/9jjnjcEJgX7c8d1H32TMA9vNCWPs2LYnpJTYnLQ0bc1uuGDTNvRDVTIdKYqwpOBnJVQN984fcHJyyjvvfYH3P/giVV3bOWJVxTgOPLt4QrrsS8ZOR8yW1SUq5Kg0dUNTt1R1Q8yZ5tDZQdeF7UUsk7SqarzzJWZpvFvQM458+O3vUDUVm9MtofbFKLFg62n8vAjqPM6ZKT4qdjxEygxiJ0M652iaGidVcTWlOXGj7/Z0ux0wuZfM4N5uWtq2BjyilXktVoaaF8FXFZT7V7UVR0RWujEvAIj9np0aozYrCMTAV4l9dbLE8FBqviE25smlAuI8PlgMEeIRH5CsKMOSDZYtYUQwttlRmMbyb0YZS9C9qOJDxvHxqOc1KzJDzKUGQImZmF0iOtFc08YgK5NzReFhwMdYvCXVTUsGlBjlgJYDHe0sJrV4AJdRYmEGHK6chh68w1cWe1E1DXXO5DTgyknDqsWFo2UQCgPlnClouUafC2Pfcbi6xIdqBj0Ase8ZDnvb9EKA3rpviBbQ5VNlKegUBkAmq8JGKk/KEUodAusbS2tf9/RttvodyPy9pZTXBNpXgMfauvLdSDmokMl1xMqimKLsl4JRN09xXz/iHDEygR4xF5eDcpjoYpFYtW0DFKJ5dVBdydaa5lgp/zz9budjrayeqRT9+riI1XM6odTjKedw6RKol/JUMZulYdZhy/QuNPrN8XuTxMJt4GetqF+ktNcU9VJuPhKH3n5iyZQ87Om6A8PQE9Noruspw3G9J6BWQgA7HXvoD/RVRd9tGPqOYehxoSruhdWesZr/63n5oj6cpyYLR3nXq+eujZLJnWpnMwlEh086u+TW1vqa3XMyOVCWSZlLkH1BPVACVr04HFb12EozpHnNuQKMFC1uhGTZrnEk+EzIwWpc5Ug/dOwPO0Tg0O1LYsrEAkxMgR0XEKc4jhvivJ3QXtU1dVVTN40VtasqQrC6b8uhzoWZjxH1JcEkjkTnrHRFirjk5urxzxmZ4pBQQjRu+qU/8XE09tRnY9BnR27Zk9bLcL3+Jjd8VoufciVDaQYfsyclLxl8q9Iq096HLmbE+t/nsieF2QU2ubic93Mcj476/JxXnQOQJ3NlpSaYy06WPtby+prxO7dFQHQGNZNJPPULrIdp3WnMew1w7Ty5F8nrMT2a6bquMFJLyh3Fx6iZGVBoeRgnzBVtp0UpskrIm0BSSjAWhaGlQxGSjIZbRXDJ6D+HEMRbgHOd8QxIyAQXwD/gJJ2zv9wwHC4BiGNPfwWRoaR3Wv2grGIZRDO7UTr70DOOz2YatER9mQ81ltor3hcrwhHqmqpuqOqG7XbL6dkZ7eaEutkgvkZcMGbglolmOrIo1mws7DrO585kIiSwOKyp5s40vdJUKJHpQEADc7WXUv/C4gmCtzG2eAE/U6Hr6qLXvraAAjf1rUgJJPa2eGVEsrEu3lc4F1eB0DrHEjnBjvsIjrnQJ9Mi11I3qKRjCgTBDjGtKuY1Uqqh+eBLGi2zVWNWYGSIkRgTXT9y6IZSm8KKN84p/ToF8eV5M8qayJPVJRTm8g3JLXNnPQ4vPHqCZUOcLslZORwOXF08YRx7njx+xNNHDxmGjq4/lAm8GD6adfX9UycrOQ90h0tUI0jmow/v0w89Z+cPaE9P8JWtEefrZRN8E8B/JQq3sEqvSTN9AuK9462zMztjUPM8j9JgFc0H8YwhI1U5S86VTEkv+AIcVcQO3sXm6JRLaw5bEMmMDGZBi9B3V3SHy1LUzxIS+nigGzu6sWeII/u+49B3eO+JmgljZTGa3/0dnl0+pm1anj77iKZuCS6UU9Y9F5dP8F7ZbmrQyCBmRHgn1HVAHZyebHnw4Jyzs3vcu3fGpm0KSyxkTSVuaLTswRSJ0UqfuJTo+wPeBVIcuLpsZ2O2667Kwag6B8fWdc2D+2+x3ZzMaftTHbH/n7s3644kSbL0PlFVW9wBRERutU33zPDwif///5DDw+bpri0zFsDdzUxVhQ8iqmaIjFyiKpFVRTvpCQTgcDc300XkVmrWoQAAIABJREFUypV7X+ZeRh7u74lDZEyDBxOB6FItgrhSfWW9bSwXE27ctkzejBMaUvR1zcy982rzIrtki3qH6DwaWjoNicFbxadhYBiSBzO24YhAislpF/t6Z5o9FngK9IR9WYwna87m604nsAUTcImFZChWCtaoJCjqZXFa0lhsw5WUCaV4DKAHXBwfqc+/O87C1vIfXFFfYjSOmpcwf+r4Gzg9F1dXNRddlO5l1SLOFvW1jh+LKZ6nay2j6J+oFNgcZvSMvipkQsN2kM0sJYIETiERQyKgjKykVKlhYHr1mhInPpxmnt6/pVY1AuW62Z5EcU2VvhJ87zKv641arqCm65J7EBfsMwvu2WRckddvBk53I9M8cb478/DwwHS6Y5z3oKcJYn0c3VuGuwc/tR6zpBde9hvPSg/WHB6cNs8tQ8kqqtngxSk6Odm4O03JM7k2RzOvO6I9nzqangcSIE4QBrsnIbovjXinSfTWeNxQ1idccBQgRQtea+t2MHi1TbwUIIqa704MBIkdacSzX4mmGaKotY2WQqFSNbO5gNt1WbncFnKFteAaPfTgx3QyGgLV0CGDk1rN/lc5fmTA/FhZpgWjz++ZzZPL9cK3333Lutz49i9/4e23fzZ9rOt1f8OPstT9AtsgKwWu13es25VcNtI08/j0yFfrjS+/+S3z6UxMAyE0o8pfN+CBA8j0/Z9++nihU0wh8uXDK5aSeVoXt30p5NU2uFUjayhIAomCJAtS42CEfMRK1iaJZRtdLTavzbDZtJIii1u0wO32yO3yzoTwBtuYb9uF23bjupkH3dPtyvV2JcTIVjIpRa7rlWW7MjqX5nw6M7jg3sPdG4ZhpNZMChb01Lzy5BtiDMI0JkjC/cOZr778godXpuZ7Os2+Rgi1ZnLZm1RMR8iCH5HAGq+IQvYNvmohSODd+9CR/pwzpVbuzneU32XevP6CYRh4uLsnjePL3EhsnXu4v9/vUxDSkBiniRgD27qx3kwE8HK58uHtB/Jmn0s82Y/VJGBEA7VksmvCrbcriyce85iYpoEYAvcns+aQlhw6ch99XglCSib2WGsxL8VqQfM0GTfLDtukt9U6m6+3m/GxfB2xJqWm8B0YYnROpzB69+62SkejarV2d0RMf68WD5a0ixM6PkxHKtvRv23cp+hJUvLgxx0PfsY6+9neW9WJSFQvOfgJHZcpVZ6Zu4GXSJ7B1ofqfVttOgRWD6Jyh/bpjtiqBy62GGh1B9bm5RKsrbEpWYZgF8hhFJrokzYt7GfJqbpZqrfluax5E9NqNqh950VcLXQkpZGYRtIw+gKedjSjZ88ewH3i3mjfKNq/X26jbCWbZ/wrbaiOX9+O9rTo8KDPE3ZhsV18KnxUKvz0YVdPep29PdS7uyy4lM7Taa8fxOvgfh+tROmLARDqjiY2H6DQr7si7H5A7Z4/H5X2vA4FtbFGBx+fQbIi2vkCx0DBaxB7UBV+GF35RY/DGvGpd2siaT/qqu7wcMukWgtrkB29a+/TyhZarftjH+fHkqadV/B7YCiYZerr2iwMNmrNhOpJlJdH9nvRT+7vujy/+PFCpyMijOOIZmEtpee6ubDPyxZsiyfbIrYme7NSS0YFt8Jpw7GVTPDuzWAdQFYKWgm1WDdjFNZtYctWLmoCptUh6VIrUgQR8+1qgp0ikFMydeY4enOIIlL7vWxoYpujBNOBiSk6AtLm9/NSqY2f/WF7kRGsU7Ey1rotDO45JtmCDFWz2KmlEIP5hm3bioh1gb1kqbKTgH2sHOkgx8/x8UPw8pSIe1nWfu1qW1/QnlymlBiHgRgDg4sC7vQAW5NCAyFaaS00wAKOybf/0/dinpWi2klIK1NoQ9cP+0Jbv/lo3duXju+V8nb4ve+Qhzf0GdDO5XvLQlt77O9+atv8fE5PwfQZnARqm9fuixsIVq6hxZRt42jiRFgPv9L1X6IvtLU6QqDQDCKDIzpIcNf0hGg1UaK8IXXj+t17wprQNFPPoGPm+nilVEHiRBgqw3S2d84bGxFtbW9Oii2OFDQNnS3bVU0pMIwGD06jGbxZe6X5eMWUePP117z56hvGeebV1//O+fUfGKaJYb7vgY80LaNey21fd0hOfTDsN/jlFvmqynXZ0GTN/RoCVc2SQrWZi1q4N8XANJrIlrkpJ4YYGENgdKQnpkTyjrVjlP7xgtI2RAtmXKLAr4GVrhxNiwPzdCIFU7QekyGHhiaWvlj04LTs+hGn85nz+c44AeNIJ2nbGYCYANxabFIHNasLUNZc2XLpomkaEhIqcRhJk5nrqm8utUIQpUYrecWlgGZHe2wTIgREUu9++EcfPxbwhBDAlXAfHh5IydR8pxR4OM8stytjrATd2LaFGJUQXM17s47OGCLjOHmLq3RYPQQrA8doWis537he4XI5c3l8zzyfmOY7hmEmMBja+Otdln+qYxwG/vC733NdF95eHlnyxuVy4e3bla1kSlbWzfrHpQYGFWow6nwS2+ilQmrZovqYVQPTTehQ0Q22AHkofJf+TNwAETKFrIXbuvCnv/yRd+/fueloMfShilm1ZKVmJRAoSc0eYLNydErG/YoxMsTENI6EENi2jcFLSgOVUSZEhdPd5MRWZV1vvH//DhHhdr11Xl3TkVGt5JK5Lub1lsvGbbH3um1X3j+93RHnEFDvzK21crt74G6arBw0zQhCzi9HZNZWUiy2VlWtxCWyLCtB3GYju+1PKca3isFK66t1qk1lI9fNyk8pMg5Wuro7TcxvHkgxcH934u48237rQEBf8VqS4iTMIEKIjZlJX0c32bhdrpQtOzrm3pm5kELgNE9sW0Ax/7eSm2dhJYTEOA6u7Oy2TOpdwTF0rs0QbB8kCFveCKEgKSDVEDmhkoK4waohk6gF6Hj1YV1MomDdrCJglZRIAyF+6vi8oAeDqRXrnKFvYLaYmm7LQYKoRbdNQwCIqAc8tsGZeB3ArsocW7lFhBgGNI32oYY7iCe0bGy3t5RlhZxR3qGXAsMJ7hM6V9PhqQGJEzEpw3RGJDpbX8wdWzNZrStty3Bbq6nzVtsQFbhPgWkcSTEwn0/cn8+m/DnfMc33xGHkzTe/4/XXv/Og5984vfm9me5Nd5jBojwLenqX1kflHyu/cYhkXy4DUVVu64ZWY1upC1Jt3iaudc8u0pwI42g6D9EUYHvQE4JH+HHv0lJrpfz4/drRCN72/EANwWnrJsmO2O2W+UQdkpfPjBuDqnUL1Ragls7DaUHP+XTi7s6Cnj2o9LDHkYqsyrIVH5PRuhpQV1g2wnI1WAmiENPIMFoXQcuotUISpSaQdSOGG001HBcQs2zHF5x/4uMYjAKklLi7u6fWwv1p4s3re5bbFS1X8vLEslxRXal1pdZMSZZQBDFewOi6HuMYSSl40GOtsUoklxvbdeV6OXN5sqAHFe7v3xzK3i83/v+Zj2EY+P1vf8fTciN+mLht1m7+/t1bQ6GL+b1RhZBgIzitUro2VUhCchkkQ3UBFXTLlM2aOQoV0cIWN76tf6ZcbyiVp/XGLS+spfD2cuFxNfPnKI7CAGWtZKAGc4EvqRJEWVcXyhNDxkVgHk883L1iSAOBwJAGhggaQQcQAue72YKeoKzbwvv31mF7vV27YrE0HofaXL8tV1SVZXVENwiP1/ekZG3qKey2C9WDnuX+NQ/ne4KqSZKkifLSMj1YR96yrhTvFJMDMtGW+VJMiyjEQF4yl9sTtVTnL62mbzUNyDSShsjdm3u++eYrxnHg9at7Xj3cUWvl8fE9l6enHUHx19/NoZ+jT7WYrQiaueqVLVq5rXlvxRQZppHBZUBKzWRDKawrW2vnZ8UYnX9lJGtLHAMqeuBm2inlsiE1EEoklKblpqY9VWXv1FILGqs3V6zLxu22WtlXQcRb3uUFgp6G5nR3cT20WLNn0jsYuf+uX3jAiI/S74d9lU8yr9vvwTJrFXMgb8aQWgpVVrTcIAsaLlAi23pjWRbWdTUfmuI+U04+rfpRa33L7Glfj5+JHbrvJKpk2f8wGuozTqRhIqZGXDZ0yqkS/hLt033MdTlcp+9d8Zc5FNv4qYoWC0jNh6vpIzxn67esPcYDu18+5u40YPL7tgLPSivHYK+XnvbP39tLg3EUjCjt1clqglsaPChTFzkUIThUKiH20hg96JFnk8K0RpyMzE70NAK3HMav/V1bcNXbDdWVb1vZ8hjMhuBlhpZhfe9+v/zxt0D2z8nOoVcem9ActTAOJvNfa3byegAiGiIajEuxjwUL9vfxErzcgCGt6j5PLipa8tY3uM/+TIfV9IDQ/0seIqadMtTCOI4U1Jywnd8mPF9zO7Doa5mVeuk+obCXugz1MeSk6aaYkWlm26xdeNvMqyt7p2vD7Ps89/fQVl5zWE5D61BVkNYxBjFsJl6IGA8zNoRbe6lEoaPLkjdYb4CYHMWzEvr+6Zv4oGqliljnr+CcHktY1EvhzZAzbwvbap2DMVorfM75xe6lKt2oc8vb3t3bBVulbxDFy1hVmxWF10h079ZrJaQYTIZjHAfGceglLa32uz2m2oOq53u0DZZj6co6xrxLzHlktVaC+yV+rEl2+JT9PDmWIA/PaEGeHNbfltjvJUz793Ev6CXAJgjriH7OmXzco44lr5+Y+J8X9AiMQ8JEppNvDs+DHBMEbRfVgyQN/YI0qwMRdUEmaxGWGElxtAtTV6RmKrBlIz8VCo8FrtUksy8fnliuN6ouZFkpJGpIlPHPqNeSt9vVYE3XGqnVymrSZmPJUEpXvBT3tgl+bkoDCYz0bBdcQZR5mJjuXzOMM6dXX3D3+kuGcSKNM5hphgtvVdeuadyUHj89O9p7HdvXXzLRLbXyuK4QBC2mMGgq202Qim70NsSB03xmGiPnOXGaknVGqWn4+Pa2pxINVkef8TsaiiDimZl/SFMrzqB25RRbFCRNCKPplHg3gimVZv9aXK3V6+DRYNk0zkgcwZ2JgwslmqGIKS8vTxvfvb+yFfdEcxGveYpMkxEbVZQqigZIU2COEzkXru+fuN0WUFM/jSE5AjZ6mdSEyGquO/fpH1je+rFg4TgMny9S+/fDMCDnO2IM3D888OrVK67XxNPTmaenkRgKBe3+Qib81uZ4IKbB0T3frpw8uRXl6ekDb7/9s/PoKm+++A3zZAalwk+uX89O/lgN/lumjsKz7i379vCDX+mQEDidTpAiOSpz3tCSeXs+I7UypuRBkAX0G1YGqMVKKSJKKpBaZ6o3XYiqJYabNypshbxthFhIYUElQVCKVGoKiMIUJsKcPLAJoGLvs2RKe79jI0R16YlayHVztXuB+tQJzvenwbuyjFQtqtyWK+8/vGNZl55cgiGs4zgSU2IYB0IKpt6OlazK0WRTQDbfU0Q4DQNTbJpbtmYsV3j33Z+oJXM+vyIOs63pL3SUnPnrX/7Cljduy82QkxBNBNAThDYvDHEpXlranB9jtiB3pxMpRe7PJ+7uTozjwJdfvOGLN68ZUmQck++6eyOR6QAF177bZVsMeTJV5VoMqTHCc6Xk1cRo1JpGmiitlkyhlQlLF0a0RgX7u9vtuM6p/1973JWcawviaFCxqeU2UAJoqd3bTb1bupTC7bqwLiZB8P79B54uVxShhtCNv/Vn6i19ZtAjjEOiaiSQUA3WVumS03Xf/hy9aVLlB+lsz64tELdurBiENEZq8qAnV6CgFbZcWcvGWuDb28bbNbBthfcfnrhcFqvTrh/IuVKwrvcCPYptAyB45ppCYh4GokQChYHqG+1uiBZ80hj1FXAJcHOWtQBI0sR4fsU4n5gf3nB+9cZQn3G2dlGVnrloCMQEzXtr31B2dOQn2Ve/8FFVedo201nImVaNDK5fE721UgikOHCaZ+Zp4HQKzFMgRYFcjeSlHbDpR8syPtbrea7d43mHZzY7ksihHOKlESfpmQy5IQI5ZySYWFktBYItGHGYdv7XoZXeWUNQlbXA28cby5Y7/B+C8OWbe4bxhAShSqG6rn8ahXkQM9vzoCdIYB6iye/HwDgOKMbvWYqpOB/b93/t4+egPR1dsz/wf4j/rdfhh8QQT6QYuLu75/7+FTFG3r07MQwjJWQz+M2O4vgcN6AtEmN6FuhXrWzbwrplrpcPvP3ur2y5kuJAXhdDH/w8fm680dDkv3saycff/vr3LYTAfDohY0IHYauZbVm4O5/QYvyKdCDpZ59/taqVrBCKmAq+8e2CdXapQhF0xbtaK7dbtgaNtFJDIiQIs9tcSGQaI4PYWCor1KJkqXAraKFLXfS1rAL+3stqju4lK1qsG/P+LJymO0KwOZf9Xt/WhcfH9137qRRzI3/95jd88cU9AzYOQ7SgpvF6TIBxs0QIRaWgUs1ZfBwJzdRWM2hlpfD+3Z9Z15Xb/Y3p/JqqL5eQ5Jz59tu/knPmeruSS2FIiXma93XJUawdudpRHQnC5EHPkBIP93e8en3POAy8ef2KN69fEWNANYM2Hs9h33NisYJp1OHXLmeyt5Kj2lvHi6tld96lBz21ZhOBdMSsawQ5Tp7zxk1r5wt1tWWDiGxPSabFBKbTe7TZqI62dSHXpuScMyUXltuN63VhWTc+fHjk8eliQMk0E1xcUYPXc3/i+LzuLd8c+gLjP27jfl84pQFez2C0drS1TGgIp/Z2ZAuYvARVlTUrt83ahK9L5boIWy7mhr2ZXsqyFXI2PYtNldJO0E/aYFWnVcfCJpEaIEkLeJqVwVH0S/qHq45GFOeN4B0L67qgItyuT1yfHl0g0aLhECN5HC07CREtJ3QaEQldI2E/y3ajxNGe/Tq95KLrYOT+3h/tka3V0QjnYUerjsrRLaLXXSr8GMvZ55AOY3+yjV21v3d3aznsd20CSoiEtsoGWxxKzP0aRvV75WWs/nH6jitYr3sbox6cqo01cLTffTmqux7btA6GFBUTRdu2jSCRIRRSaG2b7kas7AuGv+2vHfN8bnnr2bNbGdLv7rG0cVyk1a/bUdtI1TZG9cSm5ELJxqIV66a2jcofOWfWdWW4Wckhb14K8ZqmNIUvPY7Vj45f6OL+4Kv8+mAPrVsxBBNhbXYu1pH4kQZWA1nY19Mq9PLSvqU3mJn+gdoWWRX39xJED7210srKggaLKkNQDz729Uqr7vN+r6M8Gyci9TBOXDbEj1qtg0xEKGVlKytBhJytuw8MPbKN1kelBIJUS7ZF+lv3so1WihZvLLBEutRiau957Wv4stx++RvoR+Mdtu63Juuifq3VUbK27z0rC2nrsjKkexgS0zRxPp28nJVoDR32mqU3n1iH697JqqqdPiLtdRscJGqBZO2QxZ7EesBLL2Xu96zt963Dt2Qbd4mExtiaCI/Dra9L/ZwxZPM4x/peoPu6UhwFq4e1YxfiPLzBzzg+U5wQrmvLwnYYK6BuyrlvNYqSvVSBR4SCEZ3HGIgCk8BEJaowlcy0rQCsubDmypqVP75f+evjxpKVv16Vt4s6urOxba1zR3uNsItvHa6DLdp2XiFbsBREGIIyx4pbflmZLUBk56goym3LiAib3rjlal0CufL+8QMxJe7+839xvn8gxsR8vmeez4QYmeYzwzQzjCNffvUNr15/wTCOvPniK+7uX/ka5DKNnwxwXrC+ZbPJ10B73yBinx0nmXtZZkhW8pmngXGAGEz3plC7y68trU44jP7wqL8RnJ9xf4S+mbVPftxbpBO+8IU2kqL5MwWZEBGHgFOfBHHbqFot8ACoNpmrtHb32EYqkkbSMFO0GHHZtUtqTeTNzuJ2WXm6mO/QJkIW4basfPvtW/7612+JMVHuKvVkMg5TGpnHE7c1c1vVXNxDaNqW/1JHQ3rsH/bQntEHVIV13bhcrqaGuy3UvHUVYS1mWJi3wjgtFjw71zCXwm1drUuuCjH+kccPTwRJfPu7vyBExnnmdP9AGmxBrD86Fz4dlfxisco/6OZJMBE7reYrNc+zex6aSm87+kZyOFGtxnULCCQ5NAhEQkrEUJEckcFaxktQNqnefQtyKL2ICER1pXIlZeMqxmQl6ZwzecsuChhsA8XLGY40lFLQUtnSynK7UXOx7lyXkcjrwuP7t4QYqXUh19vz95fA49M7s8TYNoII8zjTrVOqITmFhaIrQbAONO/MMj2iSpFAvDyyZetUTeOZp8vTi91DVWuqqWppk4iLq3qib4HKzttpR/X/qa+/r1+9Yp5nfv/73/D73/+WlALbthhhmerlO1Nk1moijIYWubK2Wtt+dsueIRk/DFULZr15pUhxE2X1AM1KmGUrtHDTqqURLZvt91qME7auoMrpfGJIpqUTnToAgpbmXahd8FBEmOeTcWFpiY2dT9k2lmUh58Ky3LgtN9bVyoTLuhBVGc5nYjIis/rXnzo+M+hRblv10lS7gWrW8XaLOzSd1UpB/UM0FEUiY4ikAFOACSWoMtbMsFnIdN0Kt025bYW/vFv4j++uLFn59pJ5v1QnNtHb2p4d30MYevzffy1q9cMxCjqaX83gSsNGzqU7g5daWbMtuWtRwmoB0NP1Qnr7F+O8eNQdQmQ+3TPPd4SYmM73jPMd0zzzh3//n3z9m99zOptS83x+eD6pj/BIC7NfMObxC0RrnQdckj7sQY80A1GrGU/jwJAqMVinRlH1+m4TB7FwMx5QjuDdCA3FOt6gFh7vOBf9J8+CHoQgCTNLDN5FYO2vqsFq+46u1UZ07gGwZTjVW+Mt8AGJAzFNJC1UCs0Rt9ZAzjbpl8vG9f2FosoqWNCzrrx/957v3r5jSImBxCCRlAbm6cw0nwhh5cPTjRS2jvj8ax47rPscNLfsfdsyt+uNnLduU6HuiVZKJYZIzpVxNA2VkGzBtDllnSylKOjIMFyYpzPv337HOMyca2E6n/l5S9Snr++nUoifuhPH5/wDAJ5nhyUN0cr/w8A8T9aFlAtl20nfTYBQgUZPaehNECUinfcgIRJSpFZBUoAiHvTA5p56UZJvltK/etyBiFKidTeGKJStsqwbeS2uEePk9mDljCihj4mqu9CdlsoQE2MaCSKUvPL0+N7KPHqj6NU/f0KidWFero+sq5WzjWphQU8MVgJRipWU1XkmasiO4VamfVOyILcnts024BAnLrfLi91DVWXbFrv27Z5Kx5mti86Vl3tSKLvfVYzRuDz3d9zd3fHNN9/w3//7vxGC8Mf/+k/++OGt2XGoBT1BpJObpaFhSNdYakbgiRb0gEr1oMdw1VoalcPsZWopbGUzfmqMxHHwhh6hGY1u68Ll6dHPGfR8MkFGiZ3nY4GRddGt68a6WNAzDVMHGmpD8tSSo80NTpd1tcakbWPdVpZ1ZfQxHaIHOyEacf0njs8WJ6Tdvo5/NcS0p4K0tPDIQHcQkk2ERYQsoMGIosZ1KahainDNhWuu3LbKWpTsyrcdk/A6448uSPLsy+HQ/v8DCtv/SL73V8fvtS8u1QmrgnhJq7iceTT9lpgoCluxeufjh/dM85lSCter+RaFkEiDZXOfOl465mmb8bPPfHjTHrQcHi1IE/TZGNj/qD3k2aO/h+zv88lNpW2y4tmG0seSE8V6uWWHbu1xJNE1ZO9YklFfBCSYPPwwDP63O+cMTBZeqzkwl80IfxoD1cnezxnm+szDp5E69842v47/5HHP98th+6jwym//9/H6i+zChTG4/IF7mLXSRik7mbEGPXSnWFmk5IyILYLX65XL0xMhJdPQ8nr9L3H8nFvwQzP/H3L4mGocD/EgRKU+f0q7d4d72Lpp2jxqk83mdDB7lNbh2PVT5fCa/hpVzc7C5yWh5SHPEyb7O/UysQUYQbUjyaEJJvaE7rk4XTeXlCZ5UVHBdNXWG0gg5/UgV2FBTg/FW+IWAmi0MKd6CYnn59htYmpmywuy/o3b4M8+WvfbMbW169Q963yftAu8r3e23uzisLBzYqUl5zFQa+gl5Welz4+ucxP9tXviCW5MCNYpt2FyLiVD8aDsWQJ++Aytq27X4qI/ryFFYDI0KL001Z0b1AfE8xvkVja+BreyYB/jh+f6iTT6hB4/948cn01kjk5Q6kLD0FfEXjv1rzVbW+pWKksuVOCRzLeIu8sqCVsgxxQYk21aS4GlKrko310KWzUdl2GI3EczFi3eTkePXls26uHQD0YMXuoSujBiW7zRsGeyZQ+K2oWU9rl94JXSWvXaeiOEDzfLTsQca1MaSOPEu3fveHjzHzy8emP8kArTdOLNl19zHtwfpjaV0ZdfbgVIHwU6othgQ5BkwoxmuBqYBhPFGoIhYw06jSLeMm6LpC2qBs03VWVpglSHo123j48+uN2npcG+ddvMaRglMJAkQlSGZNLrJezdXU0dFpTkKq8hRIoMiIyICuez8uUXkS0XltvGbdmcULdyebpQvEtrefdkGfQ4oGNCswW3wziSgnUi1FKpYvC9lopUtTKu++tEJ1/+Mxw/pch8eGb/v+1TtvAFMa2mIZmU/ul0ouSBMia0jNSqbMtG3gpG5o+siyO9wRXy+sM4P9frlXDb+G78C//P//q/ePfdO77+7W+ZTifutVrnzjD6OPrUuPk0JvN3IzX/KKhHrX05q3V/FqyRIsZEGpKNU3U+Vd8YD3ic0ikHVYSao3W2iJh45DQSa2WrhQ1rYkhDstKUowxWzhDT4fGyTAuO2uWPyTdaL2dXrayLLZxxEAYRc4UPYuUOEYbB1l+l7uKijjTgKC2ygZ/X5fKWZbOOrlx3099tu5Hz4mvvnlgN0RoYVIs9h9WuSQWjeUOumyXia6F8gHh5/2K3UnBtOg82ESGIdUPV2pKo7LyZvXqhbfOJgerE7pw3rtcnPrx/64mycn9/h9biaPcG0Nv7tbr9xpZ7ot7W1+RG2+Mw8vrVK07zzLosvPvuLcvtyrJYa3/Jm3m/Ne0lT+BEYEiB0zxSakQo1DxQSyEFqHkj+15iBuLKtmbWNfeER9R+qsSgAAAgAElEQVS7f7WVa6UHRnnLLkK4mHhjaZ5JnmjFcCh9tp+ZHcVPHZ8Z4sruGt58Jlrgg1qtWVrgU10+27pZnhYLgLTSSeZmF2+bWopCAzyyCtlv/FKETQ2eHZIZ7BmPR9yjCXI1RUubnmFfFOsnMtiGQuDeYNI4F4KoDUoffxzD2y765R/7qAC85erqlY0Ia8+J7k0VU+LDhw+c7v7E6y++4ouvfsvd/RvOd/fcPbzuGbEe9Bk+Sfj9BQ/BzQn9c7ZErn0jeNdHCgz+GN1oNEhF2HV6EEG9fOnRkBnB7RHQ909AD+8LvSTVSastI1FfuEtGg6AhdCQlBlN7Lc0iIbTuh9ID0piMU2SeaQkkIRqYT8KrMlBK5TqsDMlg86f3G09XI9QuT1e2R2uN1DsP7Eq1jX9Iph6NZTAaYg96UCVKYPDAr+tm/RMcezavz77/waNnofvfG6HWApFxnKgxojWidaCWSiASJFMrLEslb2Donak6SlCSecw6MfwGBN6/f8sf/+s/+fDeAs3f/OG/kUZz3E7DgAEOwrPM087q05/1Z12RQ2TzcZDzD4J6FJf2cLuHZuIcvBMuh+wlfu0yE9DWj7ZyWYYeRKh1MH0rz8jHMFC0MpREqskSQLcbERHbfLOjChzWoYCr+rr9QcBKGE5a06rkLVOLMhCIUySovWYa3a8v+P3D5mmtlsbYOHOXdylIsHbm2/WRcnm0jS0mJAzW9VWu5NLKRgMQnQNlApimMVP82lVUdrG84p1cJWfWkoGf3ij/nrspnti3YMHGb3FswMpD+PpfniklWsepBQIW+KzLjaenJ5fwUM7nGWtxX8lbstFTXVHejVrX1biynaflCFEIwjxPfPnlF7x+9Yrr5YKWzFPbw6nUmu08XI6i0T8ESCHAOKAaoWzkNVGyiRTXYtf4aMq8bXvQE0MiiBmUNr9CwDu2TKU6b2Yw2/SL2rn3ZPrgZnBsdvmp4zODHpsIiqMCHoB04rCJ04AaSmM6NTvR2Do9DlGsL14ipjMhDkWWztlpzxXPDA/8E99cbWuzkoJxQYy4akih0npY9fAZnut5mPKucbnUlHb9o/hH7H8vCA3iCgRSMPa8REiDE6k9AxMcRg7iGhOzObEPI+FwYz5C9jri1/79oschqLKl6PCGcnjKx4t/i1B6lN3UM31WS/joYS8iz+/C8QX7GfSv0kxBn0PgepggDfeXPjwcpXPRTPtZC4j8XEKkaiAlGAYhhErOSs61T5w+NH0ctg7EdsrNxTk2Ib4WPNA2nUqKbqYolgX/mkHPjwUxnwqom3DkD/4NOOny+fho+lNCxITnTdullU0aGXon2tp1NKS0vdC+UZecWZYrIURutwu3y4V5nhGBeZ6s06DdejkOSz38/zinDs/40cmkn/z2U8ezROQF5+enRNek///Zh+/jswVB7adWgtYunRHUSlVKk/y3clkvEThCbmUt9iSmvaL1O9v0d36PKl4u9jU4BUQqzWB3n7uO2KLUUN3zKXRdIVShtE2tqa/7etqDXEtrVfeuwTayhIBopbofnimq489p6OAOU2lfatpq/3JH03pqJeFWemzXx8/o+brWrh2Q8+ZiioHb7cbtdvVuLkPiwda5lKIBAlp9H/ZkvtlEhT3o+bSMSEvsd22xEJ538rVT/VRC0F5D2SsxbblWPaw9/c89oToE7qgefn9Ifts5hh3hOSI9bUr84uUtC0iKbf5qJYsWnKBQc2VbC7UYM35ZM8UtHbatdoPLvlg4RwSgimnsoGYDUbR1Y+1ChxYp+yQNAkRT1m2u12p0td662eVCdwVIi/S1owqluhqoutAWbWq1CPLwiJEYJyQExmRKzBICwzAxjKNfD+1+IS2oizFyd/+K0/mOu4cHzveviMNAiAm81PV8cOwQ5YsdYlkaxwGth0HmXRj74G41Z3uyuEqxxGhaPz4WkEAIA5pmkGCLrDQulsHu9gG9dov/rkWY7f0OKERr+7R21kzRQtRCM0Jtk9gWFJ8YREN6opGMJURIEyTzYEsJhmSL8TjeSOnGtmWW68W6TcQ+T/G2yhaBK5CGkdNsEziF0TtjhFo2cjYC5sP9yINMLaf9KMh7ueOnWtU/F+lRwUqNLXPGyhEiyjAm5vPJurdWKJtSJVCTomqbn0hGsaw2Z+dtBHXeGxyD3cvlkW//+l+Mw0yK8NVXX3B7fMubr75mngbzXpOdlPuDZ33MVv5Fj4b29ARRW+LQOHV2fRsi1HiFTdwuipGYgwhhWd3TzhSKNUYXFYQxjSZiJ5iiMe4VpdVRPdv4bJ3NlNJ+3oisShqiqwFXdPLyhQdcu8K7kdlLAoiEUEkBiE0/SGn0kZSENNgGWnXvSrPAekMVtrWybW29tO4lczAPxGRl7pxNYR5vtGnXzpouTGhxp+i/7N0MQAq2rpvPVkvgdkFYVUc90L1FW4TH9+/585/+yDTNlLyQtxvDOPDKrSdiDAzjwDCNqFYuT9YooGqo9DgM4Mh46ErYTb9OwOkAqtU06qKVI+dpAJ288cBQs8a1MamPvf0efy0NLjy4GUJ0fESJTIOFHLWYMKIqlG1jW0yB20Rid6V/sDUrpoRErOIzjKSxkoahK1vjaNTPaRr5bKTHslnxKDLQlMxRs4a4rbY4brmybm6k5vyc5/mYC/+1oMfHno1/twOgtaq2jdldvcRKU8EnhYggxTa84puv0OqFjtx4vbRF/40CV9TLYt5pJrJz/eFYwxQiZoQpITLMd0yne2JMnO/uOJ/vERFr72vohJf3Yozc3d0zn06cz3dMp7PxfpwT8nHA86ut18eIrl3lFqy5+3Ebs/3eCfvzW+ADvoDggU6CMPa/9/XaS5m+CGm1Dex4Otre6/kF+NiBuBGHm3L0DjrtEyw4/yO6J5jEREgjMpqPWwgmsGjEy0itgZRWBlcPbpeiOVnTxpKayNo4eqZM7LCDwckrKUXm+cwwDm53UbsO0D/L8fORnkYoL/7wiSrWnTOOAyUIdJND6/BIfq3ExR2rCsV9oyTYe4WWYDtfYFmuxl0JkdM88t2f/wvdFoYUKb/9LTpNNAn3/Ww/cd4vOIGO5eeXPA75/wFkPAY++3ObRH/x9m1V00tppYgUM6u3eRMjFJt7EoQUk8/PPag3lMQQGHEY1Xg0xUxHRUwcMZhv4jBGYsQSYfOnMEPfrXQJg4b0QCCmYgUlCQS1ALpWpazq6I6Voq31fUdGis8lVbMgamoZjUZh3T+FVJsppt8re1sQYccZjq0wLz83pd+PhmD7GtZIvdBRDvt5sU494Hp94t3bt6ZMHYxf074/zQOQmGRkmkaTc7lce7u5mbR6iTGmvi42srj4AtqIxSJ44GNdsrUmci7d5wr2JKlZFX2MxpRS2LyMZSiTvX8cZ+cSmeBmrVburjmT19XKrykRYqIr9mO3KwQTU4lAHBIpmzHyUavq43nxQ8fn09Z9YxLfHHeoEbL7WzXV5b1Fbp+84ifXINXeAdA2/DZCdrU6j8Z98ZW+AnSoLEjoyLd4wCJqEJQ4+tITJsTj7kbM2j2GWkmleuAFiqTBUZnIfPfA/avXxJSYT4eg53zH+e6OIOFZVq8OwYYYOJ/vmOeZeT5xvrs3y4o0uNeNdQjlbANB2KG7l9wsdwM64NkCsCMu+yDSHQlS7QtRE6vcxc8dPvfSloruLy0HKNszDrv1P1z7MfCowZh4UHTsAGqFyH3itSitnU+tlvVWVcKzy7lnEjYOdk5Xe7hvqC9GDURwNKkLi7XFtIXLsWdMVi5tv3/Z43MFCX/6aChMu9e2OJa6m9GK+2n1gdKujZgis+k02QjJfTC1jeywBrRr1DLdvLFcL1yGgdv1wrYs5HFCkicLjdRsn/yjq/vD1/rvvUZHdOwlt8q+kbT31D3gag0Pzaep9p/XTm5upOOKUKoh720jsfXX7lN048kqdVdXVu1BhLjW1bEDqH/+Llh3QPB9MVDd1z9pUYcItuU4B0dGJE5dJqN1FsVkbfXSSkENj6kmj2Jzs4B4+STuJe2YbDNsFgmtm8yGRKE1uhylF7TP8F/+MFypzf7jWmHXWQ/3VQ5/066xP63dGE/4g3ve4R1OlW3buN2MGL1tW0f8cH9JEEp2k6jWSAAEWfjw4QM5b9Z2frmwLDc3G819jLWgApxsrX2q9nNsj23LXK83aq0ueWD30tAdWw3zZrwdwbmZKfVtZy+thS72mnUfc238h8McbHvmz7H7+eygJ+y1I2xxqtxWI69lL2lVZ9lvpQVF+4bR1H1FIEWTU++BiQ/AUJprMGjRHmFWrVCcFBZdRwYxH6j2YVvwcsgwzDfLSllWpTDMQdwDJYaAkdxM3Cnn0u0J7udXzG++ZhxnfvOHf+O//fv/xjjNzKc7Tuc7YkycTifOp5NzPZxQte/RhBCY5olxHIghMc5n0jABwrrB+u7JtAt8sB4H/JZ/np/I5x4tUBHB+S7imYZPz3DYzKRF9ZVa96UjO4pn6FwgN7kxGdAwGedKGtTtGUTwoEkELb6B1DYxbay0xa/d0BiNmR+il5FqdRK77qjPYQsyAnXsgUrOBanmGBecIGkt6rYABTGCaCqKEHqwnlEj1IOrhTt/TSLJOQzRFWwtcFagePkT5lGoVViLHDgsL3/8HE7PD/3740Ogl1BqKazbym25sq4muGZdJM7h8AQmupKySGWaQKSQs1DK1rvq8HUhhGCWMN2uw7gIt+sjf/rP/+DDd39FBL765jeUnBnPd8yvhu95/Pz/9WhsRfV1qemWrJt18zQDxqZau7XyVlvjBK5bZvWyVEqJlAohBk7DZLwzhE0Lm9oGtxTYtkJrsLD1u1Jyk+Ywj0Wb20LIYpurZYuO/is1++YUDGW11fqMhNcQRkKaGCYTcg0EIz0j7GbBdg1agFy9AoDunB2AcNCCaaU+83d7ouQbqhu1PFLVSiiqpq6O7rSHl7uBpnP2Q3pdqrp3qcph0/Y5Z99XT+7NiuN0Nu+tGIN1Z5XCcn3q3cylmO/fEZAopXK73di2za0tBtJgvmR//ON/0pqPaL6GjhpaBcTXuhiMo5ubmeyuE1Wdv5tL5cOHJ/78pz+biGSIpGDdzPM4MU9zD4C0GhLPF8qYEpFIGIUhWV/xaZ5YTidyqTzdFjY3bbVHhhBc1wcQSDGRfobExd/kst6yAcWgxW3L3s1U2XKrM2vjND+HZ+XoRh32YOWABtnG4kTIumdVDVr3IkkbU6b62CW7faEWWJ3TUz2BNP5RQ3oEJBHTRAzB4L66eubUPgdoGBnOb5hOd7z55t/5/f/8P5hPZ0496Imc54nTafbWzNjbM2NoAZ4wjsnhQiPN5mzX6+mysqwLpRbWdesdYY2024KglzjaYGk1fe1lLjkEPM83SKURtRuS58gY2EKCbWJW4tozNXGpc7vdXudv6JH4nMMxgMqzjfgZeU0s9Oit6fqpaMI4R+1D1lJd3dRbytvYAt+oAzFESrDf1bovqra40zNR9WsSxUqTvfu6Bz2GKqUIQ7JgRxEyL7my7sfP5fT80L9/6DWbjkp2V/SczesohIDGvYuizW87AsPQNhVDPCU3pMTutc3bQO8scbRsW268/+6v3IaRh9dvePrwgZQGSInpE/f8ea7+6cz9l0DCfq3yVj8a+oIJgRpvJ3fn7kZSbmXU0pAeR8QF49Xg7t6jJwhRI8Jkhpwillz6HLS9by9vNTSiFlfoFRCxbrBGb5A2hz1LqLVxNtTnT0IkITIDd47yzMTh3kUME4PrxbSdpt2t9rVWJfbkWDpSOCQz/LUOocWC62ICF4GBWhe2uoJujlB4a36/ny97+3qX67OfN2RsX+ubbo59aN3FJw+QihkbDwzD4J15tl/crhfW5QbqnXhizTIlG7qSc+bDhw9cLlfbs+5mpmlk21YuH96xLFdiDMyjXUs48Eqdp9nGfBM6PBLAW4BVq3K93fju7TuWZSGFRApWVpvHiXmejVfkHynFxPl8otZ7Qm3lWAvGhjSYsnTOsCy7fU21R/AuNUuqn6v//9jx2S3rbTHR/vDMvUGsh4DHCeT2l0dotV8oj9z9FVvQ097p2b7bYLR2BoeBevQ42gXhlFQ78MM8RitZSULDhEpkHAbOp5O1gZaVbbtSagFZycXQp2mazGxzns1EzbOIvK2sN89IykrerqbfMphHSochfeAMKZKStdPnzVqqLehZHB2rrB48Wj365YOevbSwX/O2wKLH5afdh8bqN35OCQkJ2SBwLLtv/dn2Mg3p8WyA1segSIiewWJzJ/gi7eINYido7+3dV8fg5zgAGrdn5/Tsv67qRn5U1KXwkQIa7F623+v+Wh1OFzGiNnoYiByu2RGsVudWuJZIC9QP1/qlj1++vPXx4STDGFySIByueQtm3DeveLcm6i2mjs6mhsh6gBgCIURCDF7k3CH0nAuQud0Wnp6eiMOIjDN3pRLTR+TwZ5f3F77Wn4ihXvJa9xWxI5nPOW2lf+86Pr5ptlKEKjYne2nZ1mgR89eyMu++Zqu6BEc2RWNzX88WDKkFq4p7SDUtHTWqgQQXoAzBCO9bBudHVofrYxwZxntiGknjPWl+sJ9NM8N8b51IITImD3rkufRsW5La57Z9wtYSUyAeGd2Hal1HR8AyMUCOgVoTMa4muq6KibDtHl71Bbu3FHZpAU8mn38OOsre7vkODgS/fs13LXaFZmtZb8KT2pEZ1GQD/JazbeZ9l3Pm8fGJ6+3m2mW2ntbi+k0x9U6tqtqJxE3/pnFQVSqh0vlC4kKZMQ+kwZp5UrImnRCsgWHL2RH1Xcg0xdQ9KI/8nYZ8leN4L9V0e7p1zb6ntGu8J2Y/fS8/n9MT4gGJaBmxTbZSK1tWJy+bCbdi3LkhNgXdPRMsVcnV3VVFugaO0LpThRr3AKtDmkLPCIOY9kMrKUXfGGtVgnc3iARehWg3aTgTT184tHrqnVTL9YmnD9+Rt42npwvD8J5SKl9+8RXffPM18+nM3Wmi5oXtVtluF568CG4S4HZjh2Yy6rCmBT3eCthvqmkYlVq5XTfWxuk5cKGMOFYten+Bo+mtADvE1q++/bAFnqa6aRNuSJFpaGZygVxwYSnztbLdLVEdqjbBx2oRfAjUZnORojWsNnjXF26pjZS4B2UhROKQLINpolQWs3AQF/BF4nnQY4TKTFUIdUXqYqWvhhaodRXtfAkL1osKxGROzQKk5J1Dx3AHghpoj1Zzh9YMUXZuWzNv/LWQgRc5dhQnpsQ4TJZRpuSBqKMFbaHNuV/TikKAkALTPBDS86QnBNOx6pmwdzTkAk/XhSAb3377Hf/vf/wHb9994HdFefjia6IjiRJ+5Lr+Upf8H3DrbLN0N+xqZa113VyG39aMVtrIXuptGwRob6gQaSUc1zlpfBAxDmZ2i53ltnFZV3KpXC5XLrfmlxb29nNxcUl2fS5b22yum4r5hpZq5eI0IjExza+5e/0/jAM5nEnzGyQO3J1mXt3f+ZoSOA2xl0kPEmI+lS0Izu7GbT+zOX93PnOaZ1Qry+3GuqyUvHF9/MByvVDrSlnvKPmJmleW63vyeqNqYWUjv2DpWauyrptfq+zxTVOubii4gEtlNC4pIqQheZVgMO/DeeR0mjifzXDULo6XojbzNCulcrveWG4LpVSW28q6GG3icr2xrBvjNJJL4aH6upwGhnHwLs3NEv9oGmgxRWIcGCdrvlm3TJXNu/gw41nUiMYhkvPG6Xxhnu9AI7frjevTBa12LtfBkKbXD684nU6MaTALp2jJdMmFdVkNUV5X8raxritPT0+8//DYxY4b7aKV3nOxSsnPocB+dtDTSksWEbZotfE9POAp5gHTgp4GkzVX3jaeGyQL1o1Fjy693MK+6aqXFp5hTR4lB1cJ7rohIs4Jss3HIEErY8X5zPDwBWE4MZ7uOL/6kjiMXB7fE0TY1gUkWYRcCnfnMw/390zzmXFMaM2UbIqlebP657bdWNcrIhhBeZws6EmR5KJOVpf1zdX6621QLivbVpw87dfViYdVK8WU3V7kaBvN86zVFrWG7gD93oVgUX/yTSrmSkgZippGCxFXMGMnNeuh5dSFyARwsqt4ZqPiZT2sPAT0TCfEXWtHPLi189rxRitpib0/9LKAavEatFIotvAExeiFGQ5ITyvDtu8JgiQX/GolHHEekL9Jc3SnNh2qQipWZjC0xwOeozjUv+ThwV6IFqSU4gtdQ3v2rLaVXHSfooQoxCH229OucUN6eheJydJSK6w5I8DT04W3371l3Qr3r7+weVd3NHAvAfEvHlzuxx6E763oraxVcvEAwF3DcxPhO3TS1NqHnCHwtkn1YDzsmXFDn9fFDCnXZWNdDuq+LXFzE2HEyr42rqGlALUU6mrK5jGZtlLUiMjMdPqS0/0bwnAiTK+RODCdJk73Z4aUmEfhbrIGgGebBPv61Muqbc3ASjkPD/fc391Ra2W5XFkXExdNITKkkVpWyqLUPJLXK7ItrHkzaRReWqXH0Lh+yLNffa+01krBFtDFrvNm6I5RJIZhMKpEydRqe1BTMc45c3l64vHxyYRXn67cbqsl2MvKumXmeeZ8f8cwTUbHmAeGIVJrpq6bAQbg3KpITANpHIkxUQmkDZBCM5htZ1y1EmJiGGbSMBG3iurCsm67O3qOpBi5v7sjOWoVO9rTpAZsHPZxnwvLunK93UxyxpPbtlZXtX3EZE1+YSLzsRDVyKmAw1yKsen3FkOXNvFSl5OVZVfvbUTYdrOT7x+2v8jhDRuRzT24gvSuoQ7Rtg1W7YYFgXkwCD7FwGm0rwyKhhUkIBoo2yOqAzVfEDaCFMYE53mg1sCUINYbISt6g815H1su5M0IhFu+kd1ULo8TaRiRIAwxOlFbkR70gJZiQU+tLEvuzrNZd3HDrWQvl7xMeUsQkqN26ufVfmP/SY8CWltlgw5DiDTfKxNaVCjGnVFRNBc0mDGrimvzCKyq1GLB1hDdnsEH7Meb1hHpaUTr3unn37eODdDDXqd9MbHHjmA1VEFCAo2g0cZVaeTHXQW3lai6Za5+9LUHjPS4y1rwA1XF7C3WjCKUGtFfOeg5BrK/SGntGAUjfb9rZbzgCs0hVuNMBEPvtty0Y/waf3R/2qbbuSOdCIiTx4V1XblcLqgIT0+PPD1+sJLxNDJNs48PvyeOBPtF+NHr8jddhl8jqPJTbMFKpw0cAvO9pLjfEyuH1172aafaklIRMXSnFEN31kxKZna5rBvbmt3k0dY2aJ007bW+r90F0LtOqnf5BBiGgdPdPcM4c/fwmruHN5zuXqNxROMI3kSSBt/Q4y6G18ea4Gu/7Tdd9LLf5KYj1AQW7cdtf6p4pxE2PgMjWovNfwmEOFgF4oXLW/s2dljnPLjZf9+kIfxfYRf5bGvcHtxsgCXEJZsRaOkNL17yLKW3mlsX1jGQ8uTUL5qJ5w7UEq3YV6Np+sS4z63jmkpLzkG9s6sUK2eqeoNQGhmG4pZM0ktow2jIToyxD99Gm7AuQaMhHHlr2bvInk1uaXwjdrqBqmu//fjx2UhP07NpR9uUggqCZ+vFOD27No/dXgNz1MoB2ARtSM8YhTF6Fp9azwJHPzZKVbZqEGCKQqguJ1MVkbq/l8A8RN6cB05jZEyB+9m+LlTe84GVKzU/cnv8gEqk5kzQjSFU0gzndEKAu7kyrn8llki+Ch++M17OkguL6xCVspGLQZghDWbgFoQhJtclUCLmTI4jPeoZ17IVcrENdnMGfNVqQU9V8vYy5a0gwpSG3oL8rLwlkLBoVYFaimeYAcaRmJK1GRYlZV9oi8HuVkYSQlGauFmKRhy+5EwtFgzN08A4JILAlGBwAEiCmSG28lvw8SVdN2gnrDe0RbWtxOpj1HWhVN083XlVw2gaSWGglEDNRlou29ptRaxDJlungPsf9WCnBejStKBM/VS6bpUhGVkDj9eNpdgkrRr5sbb8f/qjlzI8AHV1a1Wce4ZZRaRkVgRT4xFsfHj8wFY2KoWi2aBzBTN69bl9VGnvu4D9UoD3j4+EP/2RcZoZppk3X37N/atXvH7zBV9+9bV1bBy7N4+Bz7/ksZeqcp97xfVoauc02AhsCsuujbMWL+l74iJ9b/F76Jl8COSsXG4m9ndbNpbNymXX28Li1gW96SQGxpqs4wd6YmtJUevEEVIyZPZ8f8c3v/s3zvevefXVH/jdv//vzPev2XLlutk5nk8j57vZbG6CdeGGFux0RLA1LfgnbnGtj8kQdn5LxTmEUqlSKWrzVyQwjieijEiMhMs72G7EGJimREgvOTf3+1PLHsBV79zYZVuko6SqMIZESkO3MrKgp7CuC09Pj8QYWNeFbV38tQutozpvG9frlZILt+vCuqyAEKIRg4dhIoSEcYwS83zm7jyjWqhlQnW3xTBQLbhuXvVqhF2v3Dq8tO1nAVVrDjqd75GQuF5XJAxIFab5xMPDHSlFpnl2fUg1FNglCratsJXMtmVutxuX69U6FVu3bkt6D81QzdOsOj3ip47PD3q+1znRui+sztYzOKV3AxQBqUrt5Sl7UhMtbGM84mqfAeOZfoQF7iUg+1oV19Q66kpUn/CR8xh5mBPTILw6RcYkPFVl2RZq2djqQskLxd3dhV1+Ow2jkeSSksoFqfQNsapy3Qo3n7yNTQ6YP4yLK7WuAhFIYsUd0APSo6xeCrHvzZ/MylvZJ0H+3Fv0s4/oiskCHPWA7Pp5gOaktYaAKIb0RG8hjzEZUiRGwjY0L1MbCqPJEzbLJtd19QXK7mEKZvrajF+DuOjfIcsJhxJKi+ztPIVnmXcbC9qg+2OybwvkkEZCNB+n7SBU1BbuThj1z1I86GlYanetxsenv6eJLnrbLMK6GSnUSn0V/pWDHuCIKLQNp218hvTYmEAhhooWZV0DT5cnaM/rGxjOx/JW16yfCHqgafEuy8Lj4yPDuvL+/Xs+vH8HWJNBLRPfeZIAACAASURBVNVWMYOL9nP9KPA5ZtF/z/FLvc6Pvgd76/XRf6tb+Sg0eP+I+FTXSvsY5RN2gdWcjcgvjspvfu3XLTtJtLKtG3l1rqVvLtHJszW0se+dRVWpzmOQKMgQXCF45O7hNQ+vv+LVmy95ePMl891rbutKuVwppZiK8JgYuqffIejpKNXePaahWpkZqy7Q1obe2QlIw3n2xpogENJACgOlZMRNSUOMjPNMGl7WZb0jIy2oYW9Hb+KBzyoZ/nybU64f5q9iXloLMQSW5cay3AC1fcvHf62t1FXI2UqCZlMxGWE5Jk9avHM1De4oUKGaxlGtpvdj5ykddFDvRm1t6m3MtAlnSE8iDRNjNZPcZkcU08A0z1bSGtKO9HiTQ9snSs6mG+TyDFtvnd+PFsTvaJGdQlXTufux4/PKW9r7WnYI9jD1UHYItgU/KFLBSeJ9PNuN3zelWqF4V1CtrdVXuzQ5ahM3+qNvIZ5pVNy9tiOtJp29ZLtBKSpbEa5V2HL0LgZXEG2LpLY6oVAwraCgwqpmSmp+Ynu7XnDNGbv+/smCgLRMq0Jvm7euLBoEXfdNtSgUF1mvWrvAoXaS7i9/WPYWu3+NmYjuQUAQzxxcLTXn4gRVg8BBDJXyMyxVjYdUlagQPcOqAYc9K1vOLNtmWQ2wlWLKrpiOQxTxcuSO6DRoXRz9kXbTD2UwfJxU55GUgxq4SkLDgDQr6IMRliUNLbirvWugVLsvFQ6q0c9LCS3lbAJg9qPQz7e0DBjtGdhLHj93I/6bN2zPsJTmqdeI2hgi49kXiqEB0Z4/zyeH252LIVtXRO+BttgckWDdHZZ++NzB+EBgXZPL7cK7t9+St5U0DNw/vGLaZoZpYpxNA6T3df3AR/17gpZfq1W9jZhW6YGWee9dV0BProwQC2a668q6Iv3a9nWq+y+1cljpQUW7YN2vTnYic4jWidqsJ1oVUhAjLCMMQ+R0Gkkpcnf3ivuH19w7YTUGEC2IFoJbyIhWRC0BrJj+l6uU9X2lluK8FR9z3s0aYyKkZOeSCzl6+37eeSBbNnmFIEqoSgnKuma2DbYS0BApdSToT2u7/M33UZWtqWS7HEkrW1qetBfregcWME2jcXeSdQNH1/mppbDcFiQI63L1oAemcSSNIyEEhmFgmiZiyNyut66XE4fax8UwJKZpYhonpslMfbUWa3H3TVxpQoSVihmzmi5b08YR107zoEwrSkAk0rmO/ash98MwMqRk3cwxEkMwnaHlhvieYmO+VQmi/ayt91g3G9EbW2QfzzZGfzqk+WwbiqobPUiwu0r7hyEwhuA09EL9xhcXNWzB0XFFEgwN2gq9BNZf0xc+URiQzvdxuRd7bQUtzaTOXK3XnHlcVnItpChcVpu0GeFWI1kdd/HXMqTWgrl8WFRyiGRvpdw94W0jTD5giYISD4mqx/aq4FoCxdskrYxVOmqiagGAcXpcd0BfNuCxCyrEZF4tUkPPlIUWZGTbeMQg9svtRq2ZKQ0s60ZJxqIvmPXHmg0Wr7UyeEnLuj5OXgZSLsuNx8vNJvnFJsQ4RMqrE0VHhhgIcSI1hKcR3MTIzYHi3jF76Ny+K7VS3OF52QqPt41SlDTfMZzvCHGANEMcISQkVIJUbDM1SNgema0UL21BbcS4/p7793aXLSMPYnwhy9ps8SqbSeyHKDQNn1/j+KWRiNa6itiitGWzmWmmwqh1Z9lG6b50YWDLGyowzRPrtiJumFhypS7bMw4I4sqsw+hioQpaUNTLo4Ykvn/7F/7v/xOmaeZ6fSKEwOl8x+svvuCL8RuiOPH8F70Cz6/p/v3L3dOu7B5wFfEmiWA2Pw2ZTCkyjAMiUDVTygiobRhetim5UByV3krt6E4phVx3wjnY0E4pgBgaOoz2Oob2BEK0JKYUE5hLMXGaz6apMv1/zL1pcyTHla75HF9iyQRQG9WSum3a7P7/3zQ2Y3PN+nZTElkLMjMifJsP53hEokRSLDUhdUhJoAqoXCI83I+/511GHp8eGYaBD//yR/74b//O09sPFj8A1BWpCY8mcPsGUlUWUGp5URz0IrnnOHV0pHskjdPEME6q1gUjvxfWZWNdE2lL2h65XhEaqxQclZQ2LktjXQOhjfh81ky+17qOtXK5LVpE3K+VreM/pupC57CcEgiczydOpxPjEJnnkSEO2tJaNz5+/Ag0U6otOCe8e/uW0zThQuR8OtMKLMvK50+fuV2vSoYeJgYBHxzzaebp6YlpHnl4euLxfCLnxLI0cmI3aC1FN+EldbGHo7QDJQp27poZNtVa8WFQQ0rfN5taCMVh4vzwaF52yuvUltbGp88fcfZ8wY/WKtXCrNncoOPTqSTeaBbOq1+ac5oxFsJvbE4IvTDYYZ69Su0X8qdId9oX/Noq/liwMI5Erbrw1tp29dXd7bjLvulfe5uhdcBGIUTMXG4zcpcvUJv1fZuQ+uegHRBV/3T2GXInELsALdpiq0jBPYHqMJ3qHgeHGkB9aLSi74FuXY6ee7VsaqZq1f9OEt+t01/nEFEPGqn71dN3I90gq++0ZfdMSaIO0dnCP3uAbKWH6BWVMraqtjvO4Uuktmi7mLLzfnpJl0tgmQNz9jsCaFVOp5LY4+VC0/jr8dRKNf6RSnxzaRArQTy4QDMPoe5guWcLGdLT0bduG9CLYjtj9t+7V+0AYX8/ovLtVsX4K7rjbtLuDPte7/i51svfXwTdbUxEP3tr3YTsiD/Y7yNrN4QQiWFAnGOaJkBRnLjcbCdbkC3/1fgW54yfoS7P2gLruzxd8Nb1xudPPxLiwMPTE5fnL7RWmc8nNXPzv+Hnt8nt64Ln1dtb7Rh/++gzRPKvVFrOqZrS9UJc76IQ1DMMIG2Z5JXL01ZFQjRU1IwL9QX0CyBO8JhrfvCE0HfSYr48x/wv6PUeholxGJmnB8Zp4nR+5OHhiYfHJ+VMOmitIFTN3duRHoWMWsmUnOhu6zkpwTpbi+MQrNi867z5pnlFelzepcs9rLMTYWlVlZu2GUkZcvVI9ZQWqe31ip7WGindK3DvN/t6hfs1VvuBQ7gSYyAOg0m6Lfi1FNZFN1q9vdXREp1/GtGKhY6kqpGoFSZ2/rRIHRiHgWHQwGxxsG1dken6KmRze6bWvh73fpLYvQpqTaIdgB7pI/dID4r0DKY8C64LliyRfS22aQoEUwlq3IZlJ9752XQTQu+O7C3p7cDgf9v2lj672WVzSIfVG8LUOJ1k1NQcqzX93PeOlPc3jW5i1M+kGNpSm+w3fPdDQbpHii3JNiFWK0A8ukaru67Bt+KUdCpCxuERqjhDZYQeXqrvs5kTtC6CpknaFQW9Z9wdn1/mfOgg0ORZRZ7658PelxYPgjQIiJJfe9knQnMqI+2LyGG+9c1X6Fcf4pwFD+o1eFHEtQpNB5C4uxugqZO0iLaQbpsSLjWFl31S6unMwl0xbPhV5/NUlIS+psJ1UwnplIoufFXHkHedk2CUzaY3gBMHtSG1HBB3TrvJWirVPIQavmocBFuhXRaFeiv29zbJZrP171JfMYK+9N6zHBdjb8HoB662MFeD4NV/xhybnezS7n/k0XfEv8lxh/QAO88kZw2y7JVfC43qIYZK0IGAD5Fh0PlhHGcj0Wa2rSgSXKpOerUH/io/yEmfFLtPlL18K+S00Grm+csnfvjL91xvKr99evuOoTXEO+Ut8I0Fz32VwVff/5MOvXf6xtHmJzl4VcE7xtFkv75bdjTl4NhJK9lTUjDUIXG5ZoslyLtPmgtmOikQRXeV3gnnU2AcPLvCRoScKpfnzLoW8+PR8e18IEQlytIct+sV56NuNC0WQ5FCvUd8jayuaryBIa21VdK2kTZNCU/pKID6VrVHOqidiZkiGqF2W1ZS2khbUvJ37QhEtvZNouRMLQVpjTF45uEV21vYJvh+g7Bf0L5A2IbTqXO/d45pGphGDRH1Tqi50cQQPC+KnIQBaSrbDz5akSGEGJkmfeqHxwfevH0khMi7D295fHzDNM+cTwNDdMSoaHscAs418jztxoXrttF9+NQTSrmJYhxFNRXlKDxsTNZa2baNdV2VQ9Vb37BvKg/7GbFoKixD01k6QF8de0K9Ixqy040Nxb8scAR+1Zz3bUWPHAVMcN1dUe/GUho+6Y3nzMNKMyUb3qo63TUdDxuPdt2bKWJ0sxaatp28U8Ktvr7u0mtrrOYeqsnB+tAT6AnO47zQxJNFdpQm7/1FleyJNOOytDsEyyBHK2CdODNgslgJ9WHbIyZsK0+v23tbb7/AVsg1J5b3opyX2tt2e1nX9u+QZopCVam9yiECMZpkXEu8TiTUxUWMiV4Q7+nOxilX1i1TSuOyZi63RC6V22omX03Py2gTIU52uXttaNOsqTw/N3Xu/bxslFYYh6C7U6cBsjWI2hi0ZnyApnwR1/AEI4Jp0ZO2pIZcubDmxrKZBUButGKf67rB82cE4RwjD0NEgLStrOuN1Uyxmmi/34mjCzsax43Y24BaWNRdMVBbo1oKtQvBPJrkhZfNP/L4TQqfXgyb1LU25WKlnFnWjev1BkDJkRy8oTwjITTAMY4zwzARt41cGz4OrOuqnKvWSC2x5ZW0ZZx3bHnVyT9GTtOgBZBrxKDt61o31usKCH/2QiqZcZppwNO798ynxDjNjHPYeSu/Wr7/P6DIuT/u+Tuto2kOM+cUXBPi4Hl4GIjRM02N+aR+Ouqp1QdvVbSjNj5+XPj4aSGlyg+fbtyKFj1+FuKkaM48B4bRMQTPu6eRh9Ogc7FlHS5L4c9/unF5TjQClUgjEIeRaX5gnh8QAp/+8pHL5yupVJakyKcLSnIV5yjTiKQTwaupXU6beu0sC8vtqrli2YqefYOqm828LpR1wXnHajYhrR3GmCVn0pqouVBLIW0rJW3qpr9u1JyQceQ8Rp4e5le79A1ItYFJsVtHtmoPBC3GU2rqCff4RIyRN49nnh7ODMNASYlkvknjMCBO1cHTEHHTCecc4zAjqGhmnmamcWKaR9Zt0cJmGPiX3/+Bt+/eEUJgOp2Io2eaAvMcOc0jpQZCdORauF1vrClTEVJZ9J7dEpqtqHSPEBXh68iQs+KmpMTl+cLtemVbN+6NcGtRZV1zltMIRNfpEE6pEU7BAS/gWsMLjCFwGkfEe/XBG8YDCKl2j3dg6W8c34z09A1fD/xsrZp5YDey4g796qQk9uJor3xhbw/AQYDu/JodRcP4Owaz9EjM1izmQpql7PbX7WZn2ElxVBGK3JMkOyO+Hrum1qyC1eHv7EN01YMYItLrHG8Sy+4Xg72nXr3uE20v9O7aA7R+ubtbgr2mvR6WUyXdr+hVDlEn3Q552qO3YaQ5dlF4VyU1djQDkT2Yteeu7Z/foElxB1/pRbGL7KZguamCzSdta235eK5saKrQkJr3ExyrFRpdImgEx1qKMv/L4aqcrePhGtRUqKkgTYgNarCWWinUktXPwiB0G7Q415G6o53Wr1dfVJ1xUPpD0J1JH4fun1T0wC8jHT/3nn7q39yTX7taspRigbhNSeiA4HaFkU52GmjZmhp3dgK0txBGpCNH2WJKqqGr0Ijsae3mWFCrXuPWGsv1goSPDMvC5fKFbV0Jodvhfw3bHJ/t5y/FT6DR/6RjVwg2+/6v3o7OT96LLWqOaRLOZ/DBFLWu37+WlVcVnV3WjHNFI0FM/CEB3KC8neHkmWd1Xn98M/B0Hsx6VIueYcjcLpmaG6UGtqKbIuettRkjIKzLSkqZLWWuqxrkhWFgnBUh2gS2GKjeU3Ii5Y1WK9uysNyW3SojW3uoz4feO4YYSNb2URGDzmOdZ6a5ZOVQnpqZYyvZMsR0tMXgmV4R6QHbMPXWZK0vCiCVied90YtBs7XGQS09hhhYc9nl7jXotRe0UNB2sFObFLlrQ9qEfjrNbNuJcRx58+aBt28e1ZU7BMTrGhaCxsNIgyYDzigCO2dGZEd2BYeXhhNHM5RNBSbawhajCvS2ZK3F7re+RrYX41mBCkN47hC8Kjbv27/0TknNYm0tZ12Key+mXzvFflPR45wwTwEvalHtRL0eHJoltaZC9EL1ulr1VPNgMRSyDwB9qP9V2wuWPkC0YpO7gkn/vrSqqqrWdlzEiT73EBSme3oIzKPaWsfR4YP0Kg2MiNyqfnWihdve0eknubeuRO6Km17ouAOW45DyFUuUV+l9f7+9wOs9sX4mX0Kb/a+daGSniFKJOsT/mkd3MX6ZdnP3c/ruSmxxV15PqWputqzaUuoBrR1VE7sJaxOS5YztBlZ69ndkq1QdC5Iqz7cVmiJc2TyWvEA0aXtFFGYVzdFqOasJXsoqE8+NrSM9FVa5ceWTeUU4pDocjtFBndSWoHQugJHO6S2E7kaLoXV9x02/dHeqgruCoBN6X7Q//4cfv4QKCbqjUjKrJhnHPKgSI0ag7ROkGLFQ29ZaMOtEHBjHySboSCmFcZpYbipfdl6smCpKyK3Cllf1OAoeL1GDhYEYbasgGjDZaDx/+cxf/vI919uV9+07xmnmV2QP/o88lAeyknJi3RZyyWx5o5j7bqNQTe9ZqvqaSG5sWdg2hytAK71rolfDVJhpM2sNByE6hinSBOLkCaMupH7QhwT1vSmt7q7q2N0QomMYvfpdbSZCcT2kFEIQxkldfL1FuORSdYy0RiuaX7jctA3e6mFiWWq1eVgRimHQsEvNjFLFz+l0Zj7NFqsR8d7vc1MuBe8ceRgtSDXgWiV7QYgEBhyFp6c3fPfhA+/ev3u1a7mrZEHRHatGeoES/EgMJ7wTHh8feHx8ZBgiwXu2daXkzLZsLMu6rxUOy0MzpLq5yrYJdMsXczmuVV2Tx3GwVHbL66Kybsve2p9m7YVVuipTC5wYI23Wa55TZZs2PZ+W4K4k8moqVaHkZEhWZQiBMkTSpmGjtGaO0oOp0fpIgpSTBdw6WtXOSc6qUrtdbxqka1yn1pT7VFM26oqjicOZe7P/yQDql8c3FT3eCW8fZ2VKW9GzpcLii6WGF+ao8I0TNY1rVvQMhoocR2NJ6M3YMOm22V+L5nCof4sWPRpZAaupDWjgUShsio55dMxT4F8+DDw+DHgP4+jw3hbeqk65pULKKonvqpS9QraedbQCyjlHDMIQtegJInu7o6RiPePGsmbWVRf9eucWXY3kK+hA7F4EncAFHJlTNBzKlVFJoZ7v+FqkHkMgFMEwufp9nxn2IDhn7Q3vHaXCsmaQwvNt5flyI9eOvxl5zUWcn0CgbFkNGKuqRkqx8sqSdwEzYqxsqVHKlc/PN4bgeXMemYdIDI7HKTIEhV1StnZZLmSzOE+b7mBLaVy2wvOtsJVGvhbWHxeaOKIMjH4kOM/o4P35jHiFc7ecWHOitGrcHW1LeWfo5I6IfeWcKr0YvG+h6M5rt2j/J6AGP4fW/Nzxy20fCws0yek0zQBM88w8n2itWriu28mFbS8eFbIMwfHwEHRRy5nz+URKiefnLzQq8RJI28bl+my7/sL1lnEG43tG29lGpkkh9Vwr6/LMunn+9Kf/w/D/nJhPZ2prPDy+0YRmdxSfd+Xq33lW/zFHrYXb9ZktJy7LlZwzt+VCKhu5JXLNlGb8l+pYN09tHpymazsn6tOy5RfIVqORDDEQJwxj5PQIOAizI86qzhpOgTDp2K+usdVyGIGihdAwOWBgS05R263ifbNHZRg8jw8nhmFiTYkQo7lBW7ByLawlk5dlf3/7+7R70COM48g0jjivpHj93jOOo7ZWrIjQzWxlM4Qhl0KrgneRWjMlOkpeid7xMEfGwfH09Ib/9b/+nffvPrzateymqEWMR6Q7AeO4OE6nEx/evWEYIufTiTdPqm6iwuVy0QJ4Say3DVojb4myJTWAPE2IbbJKTqyLrmHDOGhkUlNS8/l8YoiRcQyEoG7x1+cvXJebOpoLrOti83LfxDmmaWaez5zmxDyd9gLs+nxRcnTV4rV11DeZc3JJzNOIFyFvG1enoc1jHDjNJ6Uw1AzV5v1tJW8rzrkdhUsp8/zlC58/flF7E+vStKrnoOainR/t5QKQS8aX31iyrkQr5ToMHZ4GSjBI32uREZxQjcdTm/Fy7syTrJtEN9nFCK77XCT7ZlsfnShK0yDITnkRxQu8cwTnGIJjGj3nk8c7GAdHCFrorMVpu6Po8yn3uhc97kVu1xA906iKiMGKHueEIBDs/SYHSSq1QM1CdupH5AR6yGRXKYlAMJKhEw1g7bWMethoz7c377y97h7H8YrHPaeIvli1A1YUXhaEoKgNiHn3KCHVWVux9YXOxkZFdp5T92Vq+zNr0aXoD1SpyFbJ0igxMMVgLUlFzboLcDXXy2qk5VrU1TpXc/kuyj1KpbJWuFXdqY6+0EIj+rBn+PTHnnx89OjoxLx+nuCvi4kXJD47jnP1crz/s4+v0ZxfV4z1RVP21oI3jwz9qoWM31vAzsZR/9d9DLndqLP4ANIYBm0pjuPIllZa61B4sx2/mlyWolEeTjwQ9uiB3FTSrGGGN56fv5CLfl+NHO1/6v5pR8sS5K4G+uvz8XPX+7WPYmqmYjb8HQWx7RS7k1dTtMcZvS1nvVfTpllazcxa7d2rKtbGpvNiHDkIUXDB6dxkpObe2u3tA2056Dvw3hGi3t+KAr+cs71zxBgYhkhDNyrOVWiJZHB/yYpX3aPJ0J/jQBaHccB7zzRNzPOMc55hGBmGAYBuYPhSUaj+RSVUWhVcjXipDNFzOo3MU+Th4czD+czDw/kVr6RuGKvUfV5hj89RhdY8z4zjwGmetVjwnrRubMt2ZxapZO7gzN+mep37alfzgdR2eCr5zjlEf7+72wvQKilvbItGJy3Lgg9B5zxrZ2mbcrScRW19lVI0n3LdFKHP3VrAPM6yBpsqUu+pPVfLNoXuTo2153aCFUxJ7+mUyUG94HrYaBOhhoj4YIiZuZJ3o8zW8Ka+/TVz2jdyepTArANaYUYvQmiyV2jrOivykeG6VUo1OM469qAohy4uelG0BaJIgIga29UCOIj9BkQ4eWFoWkzkpBEDPojJMx1DdJwG4Txor3s0j4lSIWZV8CRpLNaO6oRhoeGDMERnO0vPadLJNTgIJuBprRlpCpoTmnNUGjE4xsFynOz9tYZGZhRdRNWzp5rCTHbCldtvUvXd6EXH1qyt91qrpU0SOv3ZZNr7jjSctJ1JryZSQXd+TViTom1bruRuJGnGdGLtvtx77FmTnLM5JO+k7yZ95bHkdYXH1PtHz9lawBcxNYH5PlBpNSm0b2aC1a5nyhp2u6bKbctspbLVyloKFYcbPKPTm6WrEmplz9oqtWrx7SxLzogkuj4KXfJZzGQM7nrUyL64qDv4gZr9o5tbvzRm7hGqX/s8fa4WhGEYLSzQ2WQ9WRHaC0anZpv7Ylt1bNDossbuyiziiHHg4eER7z3LcoXaWLcVbEHXQeVIWYm4SCCEgvPQcMRBbSRqzVyeP5HTxuePP/D5x78wjhOn0wPz6awKxNZ9hLUVYE1L9uL07r9/6zy+5uGcaJJ2jrjgyCUzBIcT9ZnZto3bbaGUsitwVM7cWBb1fEmbIT27AaR9CR6CqjFDiMxOJzdvz+EdRBy+OVyFls2XRRwxjDgXkeiYzwPTHEhZiJMjJWGez7x7/3um+cQ0nXh8fEccJsK2gYuknPE+0XC7rLzzs1QEoFOCs8JJKSMa4yLNU5ujVN1Q5gKS9XN1gLqhqiYfOlLRcC5oW2lw1DIQgzBNgSFqy+Xz50/kXH76QvxW11OUFkGIVOeJ3jEOOp8+PT7w5s0bNRe01nA3g13W1dBsdVimNW40ct5wzrGlhXW9qcWAl908cpy0naXjW+XtIejm5Ha7kUvh+XJlWVZSLoj8yOV6UyR3GHDBM88nzqcz0zRSY2QIwTzYPC0X0raxXG88365WnKgvkqriCg/niTJFSl5ZbhctSGvhy+dPVoA1vOjmpruBC4XndjFBSeG2aGtXs7wiYZp0Y5s0koValbayd153KdAvHt+G9AgMlow6TZoLIrUho5JKp+iJzrNtlUtqfF5UNlxzomwbu9rFdipDboxe20Jbaqy2jxC0WGgVsKJGRJiiVom5NJ6XyrJVYhDGwTMNjtPoeZo972aPt7/3QfOVctLQ05QKtwZZdPFrThfYYRDmWSvT8xh5Oo97WCgmvU9JQyQ1XcCBC1RpYEWW1UQUoNXGslXSzgKpWAUIO6n6KDJaVeOwWgu5/z7tcK39jY+GpdKiKJMu7I3OQgqhGZPeMYRAtByYdctcFw25W7aj6PEdnqtauKwq5WJNWcnOVSNHatXfa1XhXbWNB0QoTUnNVMjAmACvxne4gRBHKImaNqhGoLbQ1y03lqyFj7a3EmsupCqsxThczTOHijdyYy/MUilsWU0JqzQkKO9AnKFWYOfpJWFZOSj3uwtDsERVb83I+93d9n/S8fcs6M4Jp2nm7dt3LLeJTx9/5Mvpi/mhqORfSY8aqSKoctO5tu8Ge/Cl5ix5pnHiu/ffkZOGigYfWJdF3bu3ReNdauVmk35pAXwmhKZW9+OIE0fJG59++J4QBt48PPD24YFpPvHd7/7IaTopAovsGy9dDg5HLf077qwk/nmH9563b95QSmZNE7Vq5tLTw0TOmWVbuFy1xaAEfCUA51xYrlqQl7v21oE4CmESwqTKyiEGRq9FjwsW5+AgouR/aVCrY8tC8JEW3uL8GTdEzvEN3k+05sjFUatjHCfevHnPOEyM48T5/EiMA8uyEuMXVfwtC94Hcsmsa7Li7QjIbDQrvnQBz9VTCQieXDxbtgIPdXDuCKR+bZovFXQDGsLAaW60VqBM0DJOGmOoRLMH+dP339Pa9692LQVVvXkfCKNWaKd54t2bJ4Yh8nA+8f7dG2IM5G1jWxdqVpPF67O2NluptKTzzO2miLQIDIMGeDqnbtid1DyMkWGw67oXkI7Pnz4TKaXC/QAAIABJREFUhqj7jx5nIsKnzxfEqRrr/PhAHAbev3/HH37/e0XBbCzQGtfLFS+wrSsfa+bjn28st5saQT5fqaUwzifev3nQzX0rpG0h5UTNiT/96U+IwDyNzONgBbpaDLRWeb7czIW7sWyZLRfCMHIeR06PT2w5s365ULakm9M7Tqwgv2oz981IT2dad1KvVmyKZOQUOI2V4CrNNVLTHnIWdSFuHVmQDitX4/00WhVTYcku9UY67KkDffCOEB3eNZbUDumm77C7GKnZVA1BkZ5awFV0QSvWijLotjn1DIgeBm8EvCjM0YIyuVvkCiQDKDoEq7L6HiqnHj2C+gd5J2TRiXbfNffuST+jtgk+vBwO9EBf+9uu0LceHVmyP7BD/M3k+q5/dcbTEiOWVQtH1TLV7gmqdFGVvn/NEjOzNa1uFOXZ2wqdQu1Qx+pODtcitdQu7zfDrE4YvmtHqZCr2b9hL7BSbuRm4aPIUaB8VbjsBUy7U271r/ZtV/PVrlb4yTaR7NeztY749J//D6t6fvVxPwAPg7FasgUiBr1eZo7W/4V+9qoqQDvvxYp676wXL+DEq9usGayNw6johIiG+IK1Ro82ZKkVqsPTfbSUtJm3YuZtV5bbFUHls7R2d98pJ+UnP2PrBOyfv+leTKqvdG9qiz1SzHOn1GLdYiNreqG1Qi6ZtK1sa7YioJr3UTPRQL+lDySyNS3kO+8sxKjzrOsZVuBd6xncgG5QmgsII+ImnBuJwwMhnqAJoXpac8q/mc7WehqJw0QMkVIaMQ6AUGIhmmtu9tXoBQB1R8i7AIJ9blElabO5ows3bc9CNxkVjIdoBqvioTn0P07NVr00gis40UJrXVZSfr18Q+iGug4MlIhxYJonxmFgnmbGcSSGALWxrauuAV1ckTOt9A5D2zcYgHKVSlaZeAm7SaVmZwUrdmQvemqt+KQbE5FDcNBIVNA2YtQWUkqZLjXX2VfPaU6JIWpQtTqnV2rJ1JwtuLkwzRNDDEZEVwUa6Gdb11WLbycMQZUGZQ/S1byvtCXj8HaT42bKwGA0hzuhxP1NKK9Q9HgRHsdICMqf8XdwqGsQXWAYJnIRnjdhXoRU4Ha9cP38mVoK3lWCK4rmEKhmFJgL+iFrY1k3lnXDO3gzBx5nRXqCveaaG7dt5bJWNdCaAm8fIo9z4DwMTEGr30OijkUOaJr3HBzFvAK665n3EKi42qgbXOWQIvfFcE2VNVWTRDZy6rv9w1wN65s6YPQQBpOm3/W7h+5yai0vdZB2rNmTNeDWFGbt1dRbfYJAXK+49P3bGHLWwhyC5rQ4pz3hRqVUIVdt7dnctBdtWAFSbWHRhHSPqw3n9KFkSEN87gatTnDepIie0iAXVWQs68IoRVtb241WkiFrSpTV53Q00R18sRu50kmD8mJR7sVll+Dnqpb8pVXLZWtIc4dZY59Urb0L3PXTX+44d+TAPlovfv6nHj+n3OqcgNaOks37wBBHaq1M48w8KSG5lMJmydy0ohwGhCK6sGGFautRCiXvfD4sJNA5LYB6SyalTTcEonB2qZVNNv1d77XNiG54EI/Y2FluFz59/IF1WXh4fMPb5UYIEQlBCXX3wNz+Yb/6+gvn6rWP4APv339Ak9OVl1NqZkurEf5XluVKMQRoXVUBt62J23XVTUYpFgSqSklnC0KcR4aTZjQN00gcB7rCSB3rG1vZSHZ/5SLUAt4PnE4fGIcHRAIunBAXtT3cjWVLRBYIuRBTYis3gt80JDN301lVdnqEUIVx0kUvlEIYdPJzrvMfhWkcGULYN10K/6tSqOdw9eT53pbf3avNhFApCypKwTUCDZwW6su6sK7bq13LPhuMw8DDw4kYI/M48PjwoHyecWAIqqzK5g1Wba5yRtStVbPK+kzbuV0pVXJJiKiTck9kz2VgLIMhQGoeqcWkKv+wjspe9IjQBLyvVkgIOWc+fvy4t7SmYdz9lELw0CLTGJmnAWqi5cTidD1rJZHWG+Kc3uc2Z9bW2LZk10kO7larWoh36XLnczkVxoQhIubPd4RJV6TzC6MGpsaghd/fOr6t6PHC+/NopF9dyKMTJqdGQtVPVH+mSuBL8vywRrYMn3/8kR+9kHNi9IXJV+W6xhMSz1jMKjhPqZUffvzEx09fcNL47iy8nY/2gAhclsKnS+HjcyY4x5vzwL+8nTlPnqd54CGqDLMYDCqWsaT8FCEOTiEJ79jdBgVoBYqywL+s+oKldem5kCpsxdb2fEj3xCzOEXa7cBEYo8NFs3P3/ZwJw2Ay+AbdVKbUxpI8qRxtsgb7AvubH1bFN4u+2FGXpjeE945pHBiCysaDC8Z/LOSqyplsvX4lO+pC1pwp8Qy/wQkueKgN16pymKzosbLIdnBmGGBRH00cqQq+NG2pXa745PQGS89QE85FXJzVb8hBFSXQV2kUCWSBJmqCJTjb+kEv8GqrGoZbi+VtWeFjjQ+H5aeJuYYaykQIiN14L4527I8P76Y79OwfcPw9i/LPStU7bNWft6nsdJpmRITT6cz5/MS2rco/qOhCfY9s3hWB+3fmU0JTx+0hBuMkeMZhUh8OEVLShnfZnbYzJReWZcWJsJ1OtNaOIMM40qRxef7E9//1H0zTzMPDE+/ff0ccRuI0E910zPn7Sbv/+rdRuV7kvtZVjXHgj3/8V2upKkrVOFDKUtXTpjZVvvQC6HZbuVyUV9GqTSJg3EstaoZpZJgnnHfM88Q8T9ZuVMuBXAp/+fSZz5cruTZuK6wJvBsYp/fE+KB0BNuktqJBw6lVtiws14aQiaEwXZMuxMJuZlvxuDCCKwwSEB8Uxavm/9XfrxU5wxAYY2R39bVNRjGritoq27qaM/gdQl4reVspOTEMgfdvH3g4TzQnRDObK7lwvVy5XK+vdCX1EOA0T/zbH//Iw8ODuQtrKyp4NYF0ImSX8KKkZ+/Uj6a5ClKPINbu7VOVxF9KxioVQ3Q854eZ02nCe89pnhAGKwSV9wUdPZfdfR7R+68jbdu68afvv+fzp0/M08Q74x0JWHaWY50nHs8TUQqklaszvDGtrNdnEEdNybLzArVUbotK5TvxWY0Vo6FBtgl3iiqFMGiBHCLie9FT942q96hB4jSZmk8zyv7W8U1FjyBE7xFpu/IqCESvX1v0MA40FykpsLiBUCDdrlyGaP1UxxS0+vbDiBt10ZKgN0AplXXbWLcNR2WehFnNF/dddQoWTyBaRETvGKOiT9HI1VV44S2hLQkdHG43wxFzOO0tmWrQPHuCuBY9lgpehVS1KGmlKZmqdeC17gOvyx6cQDAFVvBa+DinLbgY5Khs0LdSzKOgYe+HX0PL+vsOHeu2DPVFrx0TeW8reu8OR2Hh7v3JUbzczf7NULF2vIh5Q3Tb+MoessqxvOwdth1y7YoMa1vlQnYVaoKsckdCt3iUvS1l+z1U0WGIEu6APnc0pr8edzdT3XcT9+vg/TXoMPr+vXTTPu4W0wPp+UciPPcFz0sJ/X/jTTReoHHdf8h1cmGIlGqtrXYgaJ30jbUxeyu4t490Ya7gPS3ohNyRtO4m7KQjbUdLs48tESFatpIqQzwhqJii5LSnNm9pJaesP9/7PcdY/tkPfT9GXhSSfby/XiErTjSzrN23mQ7uQq2ZXJUDsW0rw6C+RzGseDfYvNT2m8qLI1gUyjCNDKYQOp206HFyqGpSVlQmFVEOnl1/5wZCmPFhtsmpgFSamFWHXZhi37Smbf1QjAYRDxS0W4U413ZSa3XNgo970dPjCbqlCPvn0bFgGWK1Kp9sU7+mboxZa1HX5rzSaiSnkVI0qkHbdZbx+FObl9/0YhqROQTmaeI8z3t7p8f+9Lier+9TO1v2JzvBO7KsiGkPZu2/5n2l5EgxcnaPeNGpr76gGLR+H1gb4p7qUGthXVU95kSRn+C9qjSdNru8BUur0EVsrCpyW4uND8uwFLtu1cQn2XLGfGvUeoR136MbOg8cKvF2lzvXp9muCLuPi/pbx7cVPQIhBG1C2PrhXSM4ZWKLd7jowQdqmMjjA6k6yIl0e6aklYeh8jgWghPCwwfC+TvERySOEEZKLZyfHnn79geomZkrEwu1NC63xLJlUoEhBt6cJx5mz2n0TBEGrzdDysrTqVaZqRDHHEpbVViqtR3WawI1V9ZUqEbEVThWF9ystY22W1p3VWZXo5hAfe+dOuMalRAIliNUDCoWUa6Jd+ikZg1qhZI7F0ZbNa+LEHR/GWsxtXYn4213vitadZfab6Jqbse98LG0eutzCV1Qq22NYRoYJah1wZJxWzaSmrqkKqpV9kKjm3dVKttWoSjas0gjZJCW8XXDtaztjEEXYr0cCrOnqq3IZStmE9PJxJrEO0Q1/+o3WGtNkSszJ2xia8Zdy6/VqjlvHG2tPmHqpCn7on8smjq5/Nqb8bc47j2EvuXf/O331/ZdoOYtVU6nE2/fvmUxufgXr/4iKZXdTKxP2r3gdNLzzooRFgtKMBV6KK+6shqnrnnCPqEeBVVD40NuV/UGKklRIO+9jQfHtm18+vQjP378C+M08SSCN++eFxPsN91nr4/aid2bBzJmUmSD+2FgtG1DmQrnknZDubSpq7i2tfS9Hias2iqIQ0ScYxwHxiH2V6E1WNYN2oV1UduH6zVzXSsiGX8TvLtqFEmtuxXFmoqG+9I5NeCiR5ySzKU61dM7oZVMM+dlasUZLaDVihT9PudKtjG82cJ676UGHPdea6zLTaXNTVVPxYKH1/VC3laGIeBIrMvMEAP5YWYeBkotnE5nhmF6tWsZfOB3Hz7w7u0b3r15w8PDg84lVqxIOwqZajLtZFLtbd00D9A4M60XeGmz39e/Q/R1uvmgiOxS8uUGpXOWdiRag5FFNMcwDBEfPNu6sSyLrl3DoG0sdJ4LwTOOAzUXUtr2zzCEgIwDt+ANGe8hsoXWhLStXJ+fSbmwLou57dc7jl5j3dKuoKsW0N3tL1RK71hTYi3NMhXLPq41CmM0dRq/Son3jUWPeh/sxSGNYMSwIA0XPHEMSAg4d8L592QCvhXapkXPu7nw4VTUbvvd7xnf/zviR9p4guFELYXPf/kvnn/8EyWt5M/fU55/YEuZy6IZT6nANA68l8B5cjzOnvMIwTVqzWwbxuwOO0EvmE+BnlGFBDsZrqIyuGVL5Nx2ybM6LWsRpIug7OhRr8JFetFznKN+c6axMsSAcxBKd1dW+BeU1+SbEd2AfWoXh/Sq8pXWShHUKbVWjfJozRRVehP2/rAuPFUzU1ozm3dFpSpO88Swoqfqh6mtKmncwXyamCY1jPPXDX9T47lUr6ybFhmlFEpfqC0JPbfKVpUbIr5xLRkXqnptSMJLBQkEIzk3O6PZoPfbVritBR9QLlhrNtGPjEMkxLiveaVVclFviNL9I2TfW9lYUTTia0+fl+Tog6TdiwjnOCIXXvm4l9H/1M/krsj7qe9/+bn1qxPtZQ8DPD4+ISJcr1d+/PgjIUQF4epqvXsjUmLZZ17RhlqLBRjqfZhWXcAV7u/9fL1naXUXTKggoJiFP2zG23Di1JAtrdaiqWylMAwTb374Ew9v3jHPJ9wwMD884JoZmr1wzP7WwucVi1i7N7Vw0UNtQg6uYjCC6VEI3Efm6HuULtDvLV4x+w/fW+7KA2kNti2rpPy6QPuR27WybYUvzxvPt54SfqWjqs1Ul7VCLtV4a0dbyk8DEh/wLioFtiiloOVMtYwtxWTtjNa8xzHowr/tY7NzP5z3d3mHhug0ja3YNuUyrVsiJR1bt9sz63rTyIr1mS/TyDSNlPSOdDrtdglx+Nstkb/3GGLk3/7we948PfG79x/MlHPjdr1q8KnFqmhURtZiZ1MPnWVZNIKjFpq5cavSaWXnmraq61vwTNO4n6+SC4VMTtvOy8u5UUtDxBPioE7WwTOfZzsHwvVyVTLyNKqJYGtUMzecpon1duP6vJLTRslJ8xJl5BKDxlO0oiHQVflZ67Lw5fNnti1xWzZNvad7KgGtGidQQYMYTIUGmtM2DJTauK0ba140r69U3UB5xzCMzNMMTuNI8q8gpX9je4vDQKo/pO/i9tbgjnhoyJgSqULwuOaJEcZBP9w4KYvdhZE2zLRRi550myjrSNlgvQVWd8TVIx5x6o3TaAxRJ0g9DJ2xYs+ZX8ouQ905CoI1yukKqWpVZzYpc/cFKeVwMW135c1OxLo7F7rYaU/DVcGHSvGmRLMmgVb2hV70NA5lkJNm37dfvRj9/cfR7mnSd+O2M5fj571181LpZEiI/d59KwhDbKpB884JPgZcbfhQ8KHuqeTt7n/Y+WVHSuxa0pTIanC2SKN581jp74Wu5lD1XLUdaKkNqcf7BrlDfe52+a1ZRs8dSrKvgccCt7d5WttfX/+ov9PPU2/F6s9ejpDXPO5Rnq8Lm/uvv/T9Tz+xfpGOKaMLqdrKa9ZSCLrT7KjX/poHCPZiDKlHUr+O5Tin5uisr3H/OBa/uzduCJwVonbj55JJaUPEsW6rtrqc0xTvWvRe61WtvPiI+/P+3Pn9R7S3+rGfB9idpTspVNtC3VT1SEEPpnzoPlH6FIaU7OP/IIqq2qftLX1nvLdqqHNX1ujYvvOnsvOm863+ey1stSipRXShrh1NVqRUs6/yoYLcT261h7ZH6IGcNsk2u/5N+uaj/7OD2Kv/zv5tLSYwKZTc2LZVuxECaUukIe+tk64me43DOSVjj6PGL8SovkHOaXZVu2ut7fdH31TZZ+vRDjvKaV/3+5KjFWjPtD9v60aWDXKqGhvlKjsFgEbJBee1VZZTxgdPzmG/1h159FYsK8G+209ghTh70a33tI2Z2hFdpQ8ct1bfHLJvfLv5aXvxc/sdQzIrvHiOF4aocPhS/cLxzYGjSPd02f+zc09qLqTrFdzG5j23OFEksqyLms61pmRgM6mLffcXnHmaZBqagZQtyfnzbePLl5XWhPHxHX94mpUFntWF17VEKR/5y5erpbE6Iw1XhkF3M86JpjS7fkE0JLG0o6BZ1sLzogXPslWuS91N9lI+eqFYUeD3ycMQnz7YrIASEcbSGDZ1sRyCcnrEMAnMr8R3N1MOeaFUoYM+r2TTAyKIizTKPtE0aSD656NVp1LHZEVBqpCaIira71dkRGfNAgLrVrguQogwzLIboIUIQ/NISjh/s4WvF1EvC4PeXgJbONv9W3eI059uuUArPF8zP3xKXLfK5+vKkrIidCIgheaNuCyyS3PZb3pzPU1KDlX0y17yTvF1FMm9uDgmIejySltcZB8RR6bXP/h4zcJZZdUD7XTGOW/ePQvLuuxKrmaTWbMFrvpytLfMSVt37Ho/6EJZbaea0TVPFRrjMKmzrbXGaIqghRDMnE1VHIhQSuFyvbJsiT//+U+EODHNJ+JgX+PAdDozTie7pi/O2s+ey1/6+W995JJtIfP75mO3SrAW8IFIdjxS70mxzUi/nfbNjT133QezopO1qp/Wsm4s20ouhkRLZRwcDa8Lk6mh+iJVO0fKJoOKbqAEIK9I2fb5t3NzupGo8rCcFWz9/OqNPo8DT4/nvZCTr1DSvrh1pKPmmZKVyFzTpvLpUrjcJtZVDe6W643Pnz6xLSuD96ScOJ3URDG8YtETQuC7D++YJ5VxO4zjI1olVDCvpbKHJnfTWG/tXQ2FNUuUvVB92fIL/lCa5tLby73otVZS6S15/X6VDWfnwkdPHCJr2hjGgfcf3vFv//pH3r17y9s3b3g4nznNJ8qW1Ak/JfK6kW8LNSdqbUzTjBPHbctcbjdK0bnxdJqJRV3xVw3cZBhHTuczgjpCg7pDi3M77QQRa1VrLbCuq4lVggaJV0WuSjHF3q/cW35b0SOY34GeSBomL9ZXyzmT0kUdloOwDgOFyO12VVfc2thqY7NKfRSHBKcuoR50X58pVaHWNSU+Xjb+/HEhxJE//Ovv+PC7fwWglo1aCsv1C//nf9/4y8cfTQni8RZA+jDBEFTWHmIzyTX0GINSqxrSNW1pXVcNTl22wmXRgMycC1vWXUcPyUS0ZaKmiRyLXLN+dNHBNqwawOqdMEVhjAdPR1CfIR+PPqzr8RT7ZKW1xOscgvgBEVV6NDl2cdphE9tROiM1qlpjK00J3XtLxyqSoiZylcayaWZPzHB6gua0LxtGz+iLFrre3RGHrS10t7PdxxfsCErfmTsrOGsTlq1QXeLzZeX7H595vmWWXLlteh2qZbr3PnLf6epMqy9TcyGvm8KsYu0+xLhfR/FyIF7VPnvj2H1x+Iz0/4nTdlm5gxRe+fia0/PfL3x+epEXhHGcVCoaBz68/45WG9frhev1yuVyMXWJ8hREoHSErVXldnQJe8n7JqDkbItgD8wUvI+M00QscSefQtsTvZ1xdLoi8LYllutire3IsiXm+cQ0nzk/PKr6zHvGcTYk86uC5qvT9fJ89t97vevZWiPlRPAaObCjM+4Ofb1DqLrvkEbftH0Tpsg4O1wmdOtRes2Da52Xk7gsC7d1JZdEoyCuMY4aBVFrYVttt10qa1qpyYrWbKGThrLSNCtpfXb73Oict9vbIy4gaEJ4jBr667w63zvvOM8T796/0+6A18xCUL5GNjf0YhYG0poZ1On3oW74puPu+XpmWVeeL1f+7//3/+PHH35kHEdEYFm1Bfv4+MQ4jq92LUMI/P5331kyvPI7vegGmOYo0hT9ypmak7Zuu7Gn12x7zbiye8U7itP5MgS3o3xqTGioXanq4Ezj6Ab2AkHVXzlvWJeIy1XpFCEELtcrYVDe42k+8bvvvuPN4xOPj4+cTyfW201tE7ZEWjfW641im8X5NDMMkfXHz1wvF7ZUaD5wfjjvbanLoj5Eqqx83MdtMZqAONn/znbetFJJOXNbF+30REUUFZ2sFkDN3mX5m9fk2y6hLXT7asTB3EcddXMpGhNBJktSFVUtxy4Dy6Tq7HF3KGt6u6jLibtrbsoVXMOFwDjNioBkr54tZUWzXRrFNXCOgJ6AXDWTAxyuNiXStcO0q5vZ7dyeenyevogV+5kuZpYaJUeo6P2pOc5HMzfThnmz7e0XsffW0HZBb731/92n8L7+nvIOWbn7Kj8x+VcOVdNLiJH9s2sRfLQK3X2bSdqOdli9dDyHjQXZn+NQgN23m17CmsfOdkfkUlFL827MRldmNavV20+c08NYkdZf13YN92flJy6GfP2H9tWf913W1z98nePnip3/rrfMX43EPjyc4PG2eEXGcdR05tBTr/v11vNXqGZwWfeb7ID01WOl9jbG/jN7LVGjPud6wdH2/K/u0KtLny76tTWkVlJWe3wRx7re2FYNNlRy533xcqAlv9Te2pV6r3g9dY4qVHfvY/USQbS/Ot7+X831X/9QfuJ3jmP3PylHflXf0Din10+NSrXg6uINnSjLXvT0r4LmEu5Fj8nEnQRTzzpDt4vaSgh6nzQVUYzDQIjBLEDM68wVZFfrFZxNzEpbVxJtbA1f1a4kxUCtmejVJ6rkQvaZlLKa4Jm/VHm9nSUi7PL0riDuG2Unuga02rmBR35Ua19dWvvP0eLtfCd5cZ/fP+C4f14g5/Zz3QTq66vYxwxc7RoqQud/9vl1fc578HZX/O6UCDSTUaIKWYL5LelzK4Kr1/VQB+8q4V6sw/7atTZwFXd3bl5M0b88xPfjm4qeWhu322qLtU5aThyb14o+1caaqxYK3pNipEkk10IYBiBCaCw0SvM8OPXp8XHQgREcaXNIraxXtbdel5V1TYgLxOA5PYwIkDYoRVi3SEXYctFWkddqz1VzA8boweL2E94HQwNVT9GIXqjRmcunwoe5KATbF1bdyeiAjTFwPkWcsHtitKaSwVwbTmAeA9NohMN2MII62c85IVhURrcSdwZTBudtYLzijpJeoPRdk7aKpBXg6NfqtdcitLvjqvO0hciKQPPQNEiyNAfFUYFPnxdq+2hth7r7rSy3zYrPvZS04qMviFpoOdgN0LQrqkRMH4TcvAaLtsxtTVyWjeuSyM1RmtcCu+hii5M9BywFdQJ3bgAaXiLOBUSqEipb2zOAdJN5tDER9QPa5Z3S/Y76wtHvwzuu03+z6Ph7j9+qvdUL9L0w/epwLvD4+Ih695y43a6IE9Zl4S9/+TPPXVpsi4zQcPs5ObgcxR6gLcGc097OSqmrxxxi3ALxERfUXE8REW9vKEEY92vy5fkz67by/ff/qcnRpzMxRh4fHnBefaH2HdfPnMeXZ+N1j9oql+XCGEf1z3F+H1PQ5d4Hj0e825EduZM/HwGe/T61MWxP1Q3/qjSzCsksW+K2rFxvi84Le/tW24kxBFLK5KSoOLXsRYSONY8gavBp7Y3+fhAIfmCwVmWpBZcL1ZSVaoAamKaR9x/eMY7j3kZT9Es7AK01UipGWm2dgKRft4WWN2pK5NuVfL2RbldFUZq25W43DaQVcVyvN7UHeMWj90K86DrpBZvXHa0VtnVh3VZd79abJcWvFFPl7TFB1h1whkAf/Bms9ajnSUUCR9Bs/6qcK6Hz+EPT6yJeUcTT6cTv/vB7Hh4eeP/hHdu68eMPP2owaa2M48j1ciFtGcGRtsyXz89sy6Lv0a6TC4Hz4yNjqYRxJkwnVfxluNyUcD6OIyFq+TEMAzkPu+qwUXUseHvUdgeOOHzw+J1HGA6RyK+c67696LluBina7C4qf0M0NHTZshYbXqjBgUT8OBLm2chbjaVVcgtkd8LFM34YGKKmmW9OkFpZetFzW9jWTT1BolODKWANkJIjLJGKhl96J7igTsuu6vvt6IQ4d5gv7RVwozpBKua06I2Qp22oYg3yzWzANTtKb+IQA+fzqCGcqVA2heND0OgD74SHc+Q0R1prbKtOFH2C7l4Uw+gNpnQMY8BHRxBhjGGP+nitoxl6ky2pWVrBVQ1+6yQ1McVZ3SFlRdBKNeNEZ+SjpqR1XbyEkkFKo36+clnt5h6sAAAgAElEQVQOxn5PyN3WRK19B2ETAEYmtlQdj6JK1XXUTSXyLkR8dJAcW67q0L1mrospTVygmSRdP1+hOSGnYv4Qir44p74dzg84iera3ZT42GgEb0iAHAokUHSu75p6YGxD8+L6LrnUrviCI4LjH3/8vYXPC9Cq3Rc+vTY4EBLvPY+Pj8zzzLLc2LZVofLLM7fblev1QndjTduKCATb3YugYcSCOQ+XfeJetm3nBXS0yAdHdNrSEh8QP6gkO0bCoAWQxIQfMqUWrrcrz18+E7znP//rP4DGw8MT796+5V9+9y+6+wwDzgc6x+tvX6p7hOi3P0otPF+fKVNlmmZCR9flKHi0SO8J5zZGdxSAFwiAu/v+fie9e8QUDQZe1sSyJq7LxuW2GNpeaK0QfGA4nRnHEecS65JIqdLQYnZbV12QfNQstFLY0qbE8f2cwTiOtuAHpDgyWiwJEecGnINpGvnuuw+qFlo35XJUbXGkpMVLV5u1WpVTUhIU5Ye1tFK3RLldSJcL+bZQUrZ7tHC9Xi0OwXG5XF616FGAQNEJh6KZzXV+p0ArbKtmVy23K8vtRjJZuvLaqqJDtP1+Ua5PO8DlpmHdPbC6dlQEdsQGOqKi64lGLIlaengtlh+fHvjXP/6B9x/eMwwDadv44YcfuF4urLeFGKP67KQCzbFtiU+fvrBcLyppDzYmfeT8+ERDGOYz40kNLW9r4tPzM6VoARWjcqmGIZLzSLN1qFS1nuhFj5gyUGsNRYhiVAWu92riqCPs182z31T0dEiLpqqYDrFhjPrS0ZUKVbTSxxWcXSA6pwZHbWJyZ3lZoRms2kldtONiKwn1Dvn6ajB0ZML3ycD3r26XYDebvXX34rTo6fkvgrajaMTacEVDN6Nl4IhBqEri1GR174RCBycavkFoSpoeh8A4+J1wpXJBY8I75R553+3DD7Mn77rpk7w4Nb/tca8E0If8FWz/yyOocTf99xN4YLHAkRgM9zDuPe9E7h4//3o9d6ejQv3EdC+lnte2q7ToFVuXDynk2tGmUqztRjPGv7z4vd2rp331kegf8/iLDudWpw7P/XQcn+j1K56vW1u/ZXurFzzH85tKsbEvtmoOqMndwzAyjhM5J4Y4KFG0vVS1VaoqFZtGlsgd5N75V/2hh9P/G2HZO2cboaPo6flO1Roerhb8uu5Ds5iqK6X1LiAVolNvsZ+7Tvfn74Xq77Wua2vkkq3g6EXYT7W35Ljv7Hd+7bF/il6kl47YZPW62T2otOgR20Acp+JrYnV/jxyD/6/eTttffJ8uvnp8PeH1Nac/lMBcd/rDoQRUxVIrVTl6RZO7s5Gaj1arFj61Vcuxyr9K5vz3H4fdqqKaHG0seyiJ2a63ZVTeK7Vkb7f/BJhx17/6a+XpcciL8SO74ax64SjHchwH5nnidDrtXKFmCr7Nis1WG9j57ByhXIyP50ywYGgkdIPBoHyrEDQORgo+HDYeYuTsWu/H1M9+lBdtvZ3jhs1Rv+IW+MaiR5U5NHb+hQtqMS4mQVKn5IZ0SLbpYEw5q3W/9YRbc+olkBPOKas7LZVtXViuV/J2o6aFwTcep8A8BoJUSjYTqnRVF85yYw6Nt6dA8I7zadT04MHz5jEyjd6s8wdCCAazWw+6FkUbbMHs1tzrVrje1Kdn2TxPp+FuElZewvsPM999OOG9IT2d1IczeaUwDJ4YNBX+05eVy3XTvw/e+rwwjp4Qrb0VghZATpiDI7p7Of5ve+huerWdtao1kLJf2Ebde/yg0Gz1zdCQg8hrHDIEv9uedBWaIiROK0l7TethAT3LCw4n6F7k2UJqklxxFXaej0Olbd7GQSFtSt7zLhAjiB9xw0kNuGrB1YygeWlfvtzYlkxokSiKDHy5bpTmqAQqmda83dQWhifobmJvdXX0h51c2qz3uaOLnSTW2F2FX/v4KRLzf/+QuwXyfrG0kq4dpa/K2CNv3rwxpOfM7XbDe8ftpr4k2XbnpWRy7UyccoynTlStauJZm8YyjJPZ6p9OPD09mlx+UGKqOUT39taWEmvSbKZpPjGdJgQYx4FcNtbtxufPP/DnP/0f4jDy+OY9p7PxCpzr8OYvnOPjfLzGUWrlslzxFtHQ7zDZ51ndpXcUsa+J9f4CSVdSCU2M3Cw2Xulu5yodX9aNP/3lR/73f/wX19vKDx+/cLmuVmhkas0EH6jVsa55b2+7EPENxvmkC5tRCETUiynkeGyqzIRwHEamed4doGPwJprweLM9uN0W/vM//4sYI7dl4XZb9gKsGI/nPoQ2F22btpzYvnwiXb+Qc+bL5wu3641121iyKktLLaybRnU4ET59/oS34MvXOhzafsubbv62beNyvZJT5vnymev1mWVZWNcbuST7nGl3WxZ/tLJ0J9bn3/4K5uwcdNyWoq0mHc73Ki+jVXhHHEaCbRQe3zwyThNv3r7l3//9/+L9hw82/5W90FmXjbWtVlhqYXa93ki56LVIhZyLtjbFUSzqQHLFW1E0Tid97tqYpolpnrQ2SBvruhyFsin2Sq1sObGlrMaxtZqPG8fn6Zyvb0AHvhnpWbcuWdddcQAkODzOpKMaPppRgyGV5BVqynqzdn8CdMGqOVMEjZ4vibQurNcreb1SUyK6xsMcmSaPl0K1zJm83cjblVYWTlGLnug9j+eBaYwMY+DpzcQ0BXz0jPOAD56SC2lV2Z20gjSHWNJSN7lblsx13MilkpOwbd2DpQIV54Tvfnf6/9l7t1jLsu6u7zfmnGutvc+lqqv64s82fP4S22DyEkcYGWMnRIK3KIEoJiIhBAhRFCXCIEQkUPJgRTwkQlHyYCVBQZGRA0pEuMhBAhGBTIzBsUMAX5WLEhzb37W76tQ5Z++9LvOShzHmWmufru6usutUf93fHurTZ9c++7LWvI75H//xH7z19rk6PdGcHrC4tDdUSzt+mpLWvkEzGLrW05ij03aOptGB6V1Q4TEHZ0FT+t1LdOZL9SWFmHRh0yJ0ClVX9AxWEu0WrivFr0TiKkKisLvHIeLnhbU6ayLLvjiTHYudPEz+IMsS+6/S6CImQS4O59QhU5DfUcQDlko/RcZRkRtFGxy+6QjdBnEBTAFWyuL0HPyI5IAr2t63+4GYvZG1PTk7EHXoXMy2B5oDJxabN4XbhbhXQDLiTBuIenq0970Gpwfe7/i8gk+0/2RGroTnbfi6sDZNw4MHb3B+fsFud05/2BO84/r6muvrZ+z3OxKFaUxzmjHZxEJXoo+pCLFo8cXGN2zOL2mblocPH/LW22/RtS1tq86QMycgm3jkOI6Mc8Xnjs220TABkNLIOML19VO+8u4X2XRbfNPQbbbqPEnzoetnmdHQ++vPXAr74UDbbhbRTlmcnqMkh8JMIhWKOThQ5rVDbB+xEIedvqHo3Jkm9oeed9+74hc//yWGYeTp1Q37va6zKWqIyvtIjNA0VkAyBEXdELpNwYd2QeNhDrNUh6c6K23X0m03BB+MAB9sU3bz733f88UvfRnnHIfDgb1lDFEPTdYeWBp3LWmQ4sT++pp+p07PYXerKesxMUQttJly1mzioUccXD17pryuezONDpSSiXYo6vue3e0N0zSxu71hv79lGAaGcTRnR53NqqrsxIOJUVbHp3IKdXjIrIVX19EEs9NT16iaJRxC4Px8S9dt2J5teeczn+HywQMePHzIZz/7a3n0+DHjOLG73TFNE0M/cHN9zTROhqRFc3p6K3paAQRD9J2nuAbE4dtEmzLiPJvNlseP9Rqb4AlNMI7VftYA0g1DD5W5KMI0GUdJE4TsgGVzoXJ/1gfmj7KX1umpDuc6RjzHlYsoTK1/QQNIKFGuVLGkBZJa69yUooqUydJXK4m0CVraom1M4NALkgXvlLHvRUNQFT1pm+WnsR8XNIRUOUVH115PQxbagOWw54rWI22CoPVELQPBiRZd8w7nFfb1aExEF07d5LItxLWwXJVTr6G22m53Id4Ztlvnr79qmzfH9Wn96M/22xZ4cYjBl9SrEmZhQ9ZQ9wqtAWYuyNFmUWHJ6gDZ54ktZse6JNYhYjmWq0vVeWIEN+/xuRjnQdV/S6knd00rryGtmDTTy4ly0ebwmB036t6mWUCCc6UGyo42lPkq5glb5sk3Q8nPx/pfub06lEeOHlXouAYO1yG7crczUM6BipIuoa7NZqBrO9q2JUVHjjrPyZlcHJQ0IxB1Ia9k3aZp2Wy2dF3H9uxMSwd0LW3T0nWbOdmgOj3O64acciTGnin25FJDB2nWnopxIvowhzgqqdl9ANLzurK3eN/Hr8bPB6KGq9BGBQXsicUpx9a4yv3IM1F8mLQEQW8cmmma9O+rMFv08QjN0f4qtibWDKsF+awhSy2po58x80qObsO2c2vXGLXuk3OizoBxemaUeOUkFVg5PerEjabIPCXVckuGMt0NmSjaleeSKfdnS7bTgljpNVahxkpYBhCLJS9L63L4rfysRfykzK+pv/X1LGvybAviE0KgbZvVj1Ypr3pKriIpVtGAGVnMK7Q2LXSWrGi78mpE78XVvlFOE6LyB1BmlXqdr+q81LspxqWt/M6YqrCh7lNr53q+M/MjXgTxeTlFZidsNpW4qg3RNJ6zrUKVStbSsRXxjEVTSPdx4iZO5CL4swu223OatmV7tmG71eJ3N8Mt+/0NcexpXOTxZUNODs4dpI52c8bbb13y6PGl1vyQnqEMTCXgHmy4yOeExnF2uaHdNITGc3a+JXSNNaYqUCrBTrN4HJlgJ6NUVI25FGEcM2NUOM3h6YIOmOD8XFxtGwIuV6fJmcCOdvgCuwox66BoQ8PDC53sIWjFdb2uZEx7HShFQIojBVt878vnQeFt5QysFswy/7JaVloHzIUGctHKtt7q+3iv8U3RIq+1TpcKcJWjb5PqKBeNjc4hIgGclolACsUJxakytXfeTqZZQ1xeQ6Mx5zn0EZqGFtgCFxmaJBQJFNdQcIreJGfUHkcumqG370dKuUYQdv2B/ajE15hBRGvOpKz8APWzPJpEIArl16rzene2Hi9p1jJvBBZ6eU1Iz6u2Gn5UU6Lv3LWycnrqkyJaGdkVuu0Zj958i6Zrubh9wDRFLi4uGIeBm6srhv6gG1W/J8VJyctDT0yJtulozi5xvuXtd97ms9/0Oc7Pz7m8vOTRYyVaOu+1nIgdWqrC7GRS/ilFrm+ecHPzhJTiKmXd40OjBRWLcH19TUHDK5vtOa2FzGo9I1gjW/fs7ICFwDc0viN4LRfgrDxLdcg/jJw+b4Z2qVUqoqZMV95ajIlhHDkcDjx57z0+/8u/xDCM3Nwe2O17e09FDDxxM5oukqdtFTk3ir/+3ylyUzewarXsiDo9Sz0nLVlSVnNDv2yKiUN/ADCnp18QzDrMXK2xplytbCHT/e01/X5PzmnOgEqmH4ZgpQtaPbh23bzx3qfV4qjTqOjj0B8YDvuZ2K8Ov4ajtHCng+CRoqrIXRPoOq003xg6VtGzdR2qRcRxcZJmRN5ZIVerofXw4SUPHjyg22y4uDhnu2ktDH2A2bHXNPMmNFq9vEDaJ3a7A9OopTQO/UScIjFOjFY6BOfBHOQpC2NW7mrXbei6jWagSS1A7dhutsSLSUOPw8AwaZ/1w8gwaXmgYRg1wzgXPRTXbM3VPKhUg4+yl3J6nEC3qRNPG7hrPGcbdQYaEVrzwCLCWNSZKPvIdT9puEC2bLZ6QttuOzbbDifCdUkc9jekaaBxiTcuGiiegMaNm+0Zbz2+5OEbF+Q40ow39JMnEWguOwbO8K1j86Cj2epJr9lu8aFRwcEhKfEqC+NUmCat41ScppdPqTBEvd4UVUip5EznHK1XD7trAl3b4o1/I9k2easYrKeOYmQ7mGJhijqA2hB04JjWiJh0/hif4/SgRSoqbH8/VsglPg/k0f614p0JzOnRFCoXNJ3Qe4uvhgDmoFQWvZuLzun3HGF/Bhk4ybMmix5KzOnxCrGpLFd1ehLiM1XYQhW7MwXla3XOEZ3jQgJNEmLWyZaLps4WccbHcbOm1L6f6AcNq4wxMtiinCi2uehJuJjTgzWTK44WZkh8rYdRm3LZlGSuYC0fgB68anvl4a3ZH64I24z9MN/t2sGV2jaOVhwPHz9me3HO+e6WFCMX5xf0+z1PmpbDbsc4DOy8YxoHRhOIzLnQth2Xl2/QdBs+85lv5Fu+5dfz4MFDzi/OlTNk6suVS1iLKRYwp2cgpciz60uury+IceL29ob97pZSCt4FLXSYC/7mhpiVhH1ZVGPce490C4JRb06RqPstEeNwNGFDEzpzeALOBY6QxefY7IveQVaXQ42mPIshmFN1evqep0+f8MUvfJ5hHNnvB4ZhBBHVXAqKnKY4zWKQseuUg+NULdv7YOtig69ZPIY0KEl6Wp3WtQ01waGiLKvSEqWnYGnq0zjX4Zo3cJEZzYVaKkMTX/b7W4Z+b6+P1NIZxRAC5bO0+BBoLdvvnl0eqgBnHLX21Dj0c5HUaRqsErnuQ96LIWNe11A0u2ljoorJOxoLY6nWUJzR+Np+zotSSMqinC1W+DgEdVgfPLjkzTcf0TQtZ+eKojovmj02TYTQsNmeEUIghaxJAkU4lJ79rtcQYd9zGCZSTIzTRG+InKycnjEVhljwIfDoUeDiQadIcNGwX86OzWZLLTNUEGIp5CkyjCM3t3sVz4yK2Lmy9L/YIbvOxzUi9mH20gVHQ6iLiy7qVZk4OD3tB4f5/bpxejREFbzubsGylJylyc3s6xWjXSGs+g01/CPzyVJ1c/KRsNTsPTpZoiHLGXz5LH31/FNK1Vex7LOsjksy5yVRSKKDNxkkTHZIyohtiFp3pDo9pmVTIKcVI93pYBQqMVfvxyWHclvu1i+5f5sXGfufDiCZl0ltZ+3HdejHOcFlZ4QzxXYqrL20s8yfLfa/GrYQUURs7fAsqbT2+2iBt4VJlgXc0FJ8cJTsCcXhk8eLavOo9OyxR6ehxWDo3HK/RWqqcmFWAp0b5jissA6/Le0oSC3GsoJdj0KVr6FPPyi89SJfXdb3+lyrM8fayITpiv3Oc60QAfzs1ANz7L0KGJac2Gw2lJS0v+KoyRAIw9hDKTR2SGjbjk3bsek2bLpOKyqbRgdiG8Q8XnQxzSHQlIxzzsJrW7wPxCkaObOOUTfXX6rhHlWVH8k5KNJTM0ze1+f3uFUKS63B+n028udoMysnhznqY6+tf/8QfLHU6bYamxrvIc+1y/RTctEDzWSp0hXR9SmqKnZo0e6QpTTIfA96MbkkJDM7QHMZiVzvrM5IDaFU4nOcJiZLXilVNRZBTMdMEes0h42mqRa1rDW5Vhmq1h41tOKtAO59zk0RR9ttEHHm5C210qS2+axVVZMfCnMdMvSeSy1ix7I/zGsL1terAaARJpXryCnbPuqsf9Tx2Ww2+BAIppYtMNevE0wdHVSSwKq+j+NoeklJw4dRs7diWiRdnIDVXJ/3aqwobT3+zqNUxLSH/IyQ1/usmWNpfZC7s6bakXPmOL0IRvBSTo/3wuOHG5t8enFNE9h2nUqmC3oKBiMzed172okpeKYM28sN3bahaRua1uMr8bok1SaYtGaLK9WD1cYrObMfJmQ/EIeBq6tbdk+uKXEEq+hcBFpftKSFZXrpIidKInaOZLB1yULJ2nkULUOxG1SDRgl+OvFan+iDpsE3U6EbNH1T9lUfw/QX5lG+wPyzuJL3dK2n65x1mqIEivAUwmQEM1/j5RAkG2nrnhbXcjSnqOiO/jhiFIapSgzUdHBFfbq2I4SC+BZCSyXqzktsFsjmSORaTVe5UnNKuSzLdpUdUKRHw1uaRKYhSa3po6GqIlBcAgdNCDxozimuxQ9wuC34CQ5DpJ+UiF6yZaZRaNvAxeWlVu+2by8lk3c7+jTNk3IpwOr12pxYenQwiLhRjZh6B3VSOsHlvHoOO33U0MRXk5XVIvNRZgtRBXssdBLzSEoDuSTGYWCcJnQcNHOIsFjbI3B5eUnXNExDz3nTMh564jRy2N8Qp4mb2xu+/OWWw+HA2cVD3njrTbrNGe+8+SZvPnrE5eUD1ePxjYY2zAFdvDq9Rh90s/El80Ae09lJsn9wmEMlS3FZqd4pFLi9veXm9kZRn0stWRG8KsE3Vnbmvk1wdGFLIw1k3YiSaDhKMyj1JD+fD1b/d3NXrY94dTzbq2y+e+9nx3LbtWw3LSUnprHn5vqpOgiWUSXO0TYt3ld9IN24z87P+czXedo20ATHg4tzNpvN4ocKFrb0xJS4vn7G0yfvKm9oHBd0QBbC7UzSpczcF/03K0/vjpNk76kZaWvnZr4YURSy22wI3tN1HZvN1qQO7se6ruNz/8S30h8OXD97yjgMXF09od/tICVG9Soo06h6Q4MiYqqJZg56jpCnOWRe2z54XZ90HrvloGlrdUqZfj9w2A+0bcODhw+4OD/nwYNL3nn7bb7hGz6jiFFUx6ikRIwHSoHJeaZDj3OB65tbvvilr7DfH+j7kevbHdMUGYae3e6gpWZSJFqNyo0PhLDFORWI7YeM85FhMj0hOzY57MDoG5q2Q1zC9QOC1tqcpkTfj7omV7Fey/ILbcAFRy4qYupEoPhXj/R4Lzy40AGS7fTdhIauU5GgwOL0iNe4eREhBU8vjilBOG+NcxPmeiEAJWetBxInfI5QNM04EVR3Ixf6MSKHibEfeXaz59nVLY5IR6RB50F2qE5eyaQ0ISWCeLydCJNzeBdIzpCdKOQMw5g59IkpWbhjVM+0DZmp0fTGZso0Tr3fRbPCkqjtpOWcErCdd2zOGrpNg7hCCC3dtp4q9CcnjVFORb3d4IWqRShV8OU+wdfqyJS6eAiSNbMuZWGI6r+UgtXqVaenaRtV8/QtzmuYoRSpPLz5wEIpZAFyXsJoKx2buhzXitCIOjzF2VElVazGHB/ENjpFi3zrac63uLAlHxLXKUIoTFlruGVLxy9FK9qHRuvANKFZnLmc6eMEh3VpEbG90IHYZFvVedLMFV9fOUNlkpUYCSwnD8QcgNewW76w2V0uF8/R6evIZPWzvD5TTAulJ6XIbr9TPgDOnB5NYW4ah7dU2rPzM863G9I4cd50xGEkxZGhf0iKE1dXV6Rp4qa54eLyIY/feMRme8bjN97gjQcPuLi41IKDxuVAnIUC5psBMH0u7Z+mbTkrl7q4j6OJ2xlKbGGRftBQQ0wT+1u9j7oRppRVa6hp77Ua99qcCK3v8K6xjTwjSUjeMhjtsLXGU6vzWitvzQkj9pkV1QELB6AyDG3T0jYNXdOyaVviOJLiwH53jdL2/Exo9b6WF2FGTx6+8YjHjx8DF4QgnJ9tuDi7ZJa+EBinEfEqOHn9LHH97Cm721v2hz03N7ekFPE+GEJk49DmlFbnTgssbaYI/fyvBfl13hJEFOVTXRiZET3nPNvN1qqet3Rtd6/92jQd3/BrPsd+d0PXNvSHAyUnnnRfJo4DQYA0UeJIGUfSOMwCrhUpjcVTUlQeUtvimyqvIGgsRY6Qwbr0TGjW03CYEPSwf3Z2xsXFBW8+fsQ777zNNE1cX13T9706WsOoiJQ4EOVrPr16xud/+fPc3O5IGUaLZPTDwO1h0DIUc+gRfAmchQ3Oe0NOIxLLnOmFKBo0Hy6dx4cWJM4OHUVIMTMOkwIInZtT1H3wpsas9RtrcV4pRoH4CHu58BZimRmYKu1SbdzJcWJulUTXDlkRl1YwXDEotRSr4BsTKSbKlJApGflV+S7FaXqyt4VrsuJzqq6i+jG+yNFmU/HgWRzKJqISqISyypzSisb6ypTsnGQbfsqaCuqYS44ah6NOzhWMXB2JuoPOk1d1D+YwCUJOtqXPaNHx58xEkXuzmmHlFIkR5pT0mr6/3MYSYnLOtsdVeHKWEyl161wHoupNFGTNgl1fiYj2j67O9Unm1Uzc/Hcd/CwCW3M9mzvI2Jy+a9dlMW5XUSP0lFhJjnOIrb7dOc3kE5n1IObMlTuxhPm0ZY6E1Os3h+F1cXpg2ZCWTe7orx/0LhYUYO38LPNGP0ulDXKOxGlgGA/EGNnvb9nv9ui9dogE23gUThe0Dp4jk6aJZBIVOS8bmnIOTG3VFjU9FGXT+JlWTo86wFkW3Y7axmsXrfaDoKd8n7UEiV6ncgpCjhqek2WTF9ECyuM4AgrxJ9tAVVH4/pzYAqSshZdjSvik8gc+VYKwm9dFRX3qO+9m1unGIpVHY2tMtklaR4IToe1aLs7PKCWz2Wxo22ZOhZ+vq2RyFpZMpDKTiNf6RXUOi9P10ntPsDpLVcMnJRMGnEYTYsyG+Cz5gdXp0TFX1iPy6Pr138brCCDiZ021lJJlILkVSCRHyNL9hrfEHDpFM0pWfszamax7p+5BzurIFbytW7rfWImbxdObETc42oGYUS3n6LqOs/PMZtNxdnamleW3W82ikrrPySwvk1IlnUPKEwXhcKj8o9HKAdk1Vi6RrdO1X2rZJqTMHMo5FFXHSV1vMQfWiNm5yppwzE2s17med/Xzcla6iOr/vWqnxwmbTacXngolo/LkNUW4XgmYiJB67l4yjW0oHtGspyxMfc/u6opcMrfXz9jd3OhJ43BLPuwRVJgshAbXwOCvaaaGOI5cX9+yPwwq/ORHskzgMnkKEOs2beyikiFPQMQXaIMqJidx+OzJSQhOtQNyLtz2qtwco4a4xsHKKHgxoSjmSa1ZCybQiAXjStFMtqw/xMxw6Ilpmjuvuj6uCDUHQisFG1kyZ6vs+TI99DKdqdWrKRVoBE8hmJPofGDKnhRFZcq9t3sV2lCH3sLv0iypYsVCTfOHOiCqs7MmNcOC9MjinKzGtbhFIkB80SyvUGg3gU1bENfWBqaURM6TiSlOeF8I1QGzdhynnqtnT40EJ+AV1RqmQU9NtujUzDLvDTUQoW0b0xRxM7Gz3l8Bq0WnmWawbD/YqPgQdsUrswSFQ00AACAASURBVFcjTlju/F53SSGlgZRH4jTy5L0v8vTqS4zjyLvvvsvVsysEh/cbvNOF/fyspW21Rl0rWmE6x0Q6DOSY7OBg6FGJnF1s8a3XcFKnZWWGac97T77E7f4ZuICExlAeT7FyDD4oUuDEqnd7P4+j+h3eOVxrSPWMyGW6TUPKeiL1XkP2Wmdwz83NDV23ISfV/wmh4fz84l5DIilHrvdPiXkgBMc4dbRNS8ppLtpY9W20KGQt6VFP/PbYNj852jyW9q4FRUMT+Iav/zqm6du4vrmxuavIzOHQM4yjbYKWbp0L06iCgH0/Mo2q4jzFxBgTY4z4EOhsc5cUcEE1WbruXc2gHQeGfs9uf0OcoqnmL4hoxR0p1emx1WI+D625OBbicY6utECLSGYcwDmVIuhE5uwm5ytSoKjQvc5M0fBNtznj0aM3yXEijiNfPDsnjgPjoaMNDSXEOQO2FKUEZLuvyVLxC+CzZmzVtHPvFaVS5DKtnEGVevjGb3yDpt2w2XR8/Td8HY/ffINuoxIQ4ziRo0YxQlDO27A/sN/vGafIze2eYYzs+4Grq2v6cbT514I5Z2JzLudMiZq8MqXMvh9NqsXr/u084IlReVa+1flaSmY/7bi92RFTZBgG495piDI4DWMvh81lfdZDrDrMzjmkeEVBP8JemsjctJq6XMgUKXjnCeatMg9KPZmLeNM0cQQ7+7sCroiS2oaRw+2NQsy3t/T7g2Zx3PRMuwOCsNlC14GMwuD3uNioCNX+QD+MeMl4P4GPuAlyzJRklNuqyVOKOT2CFKGxwZ4QJHmyQOOhbWpBUuiHhBStuzVOCXJBghAamVUta00MzdCxDTwrH0VDXlZYMRXGYaRMtgXaJu+do2taGq+tI0U3eSmFEtWpvC+nR4nTNatInRcvKGqGLioxV6fOEUTDWN7KfMxol4WxkkAt75FnxKWghUttM5bltFY30QrRVydwVnOq6A7mXDqFolyAtoOuhSyeJMXg70gpVqCPhHeF7E1l26uc+hRHbm7tNBm8KrGKaNqn06wZhfF1YoWgoRnVimlWQmp3Todl0eZhRdBe7lTgBU4gr8J+ddlbz39fDS4qR2skTT3jeOD62Xt85cufp+97vvCFz/Puu+9pSCucEXxHEzyXl1tNhxWhM5XxkjJ5TJSki1WwsGEumW7b0WwafNPig+C8KodfXz8lHBoktPimA/GWGqv8nqZtrS6Uh9KoMvyydQKLOi1w1H8FzarUzDE94AzDyM3NLTc3t3TdRkOipZjm0JbmHiNdOWd2ww1FMt2hmze0qi+kzkGZkQL8kr1SpD7GygFgaLfeacmV4Lto6gTveOutxzjvuL6+4enVFTe3twzjyNXVM9hpBlYeJyskmubMISW2avHRaCUJJuPUON/gG3V4qhp+aFpySUxRM+w0W2g09PZ5IoHLeF76rGqdLRmUpWCcj4XzUsv+eJM28N5DUU5Urdn0OhIMxHnadsMmeCiJ2+tnbDcb+rajbRqa4EkmhRKMLK6ugSEZfSGP4woNUWRDxM/OdynjXO6nWmga3n7nbd58/BbdZsM7n3mLh288sEOkccRSmvciAcah57Dbsd/3fPndJ+wPPWPK7MaJKWVc0+LF4dB0enEqrVJiBtH6ZjEV+mHCOcemczStFgZFxHis2fowKPk8JvYHk6+wtP5Kvp7Vlm0vWNBztZQSSMZnIYkCHR9lLxneqifa6ksuG72WCVhDnDJ3DkWhumJaNqREwZHTUk+louhOMSty1lNJhT2dU5lxpgHShOSk1WqlLJlDYihJKpaRA3ONIPvcqiRdiyvVzRYpeHu/d0LjnV2Tg6wDsQuOduX01GrPTRUeRFEFyUp+neXiYSbJagtUVWtILpkDYovxnQjNfZo3FcaKNSnr3sKiVpOsQqDWqTM0CUubzrDljOO8f+OdF5cZsl1OdDWUqA5k9Q9k9uj1b26BMA2cEtRpq1l73gnBTr7eBOuUluNNqdYhxUIdK3jVOW2B+bGdnNfws/fHcPg640Vj0wUpdXKupyVUhdH7tF8tyiOr/9sn2PxZPksFRCemqSeOPdM4GE9mIMWJkiNaVGK0ueOZRsFL0n7JWuOn5EKpAmfZkXLU9aJAwrJ6RJE4l6OGaLzH+wYJDb7ZgPO6hqCOa9tphpfznm3XkdLmmNQozKUP6tha9lBbv+zE2DSNbtAVPXLOtEIGfRyXyt/3Yblk+nGPc45h6mdEJ+ft3M+VV8MqrLRk1lWI0xAQ1n24GicWvqphrOBVA2bTdZyfnaluy/7A0A/6+VJ1vdbDRKkJ0zQyjSPDOBAGdXCcdzRRN0cVh9TipHM4zMZXnef6HStndObEsfqb2NxcajfV0E+t92SXhWZuGcKzRrtWc/h1ZFZWdeqK/laOkfd+Ro2d95rpa/fsbA0slPnvmgJsB8OinJ9o2YhK9zB0xKIjTauyAmJE9ILRFUygNedCihOH/YFpGDjsD/T9wDhOcz2ySp5uQkB8QUJYfaZHgvJbR4maOJJWCH9tc4t7qTaTyoTE6IkprIQaNRNbaQLWL8bNKobSySr0vKC42Br84ge+l1NkLgWXJsvOsQ0kVScmk8lEi7+60JDbYshFZhs8KcOUI+PhliyB5CA2eoJyqbDBMYlnKMKU9PTcZA1bkBK+f4akHpcLbR7wXiurb4KWoAjeUZIjDiaO5/O8Ic0QeoaSJ8jmoJQyD7BKgjrvAuVSq2an6ImTnhC64Ogay9pyzqSbUViyThxLc1LOgWMqYnFOmcs2SM5I1iywOKT5hNI0jWZwiSNI0HIHL9VBL27OidYtEmcnHoeTpfp1PW0oN8RR62MVa0NgLiBKwQiHZZ7gNXwAy2K1djSq0JjYtcyoZGWFF21jwcjd3ivJ2QvFO4pXp6xxutB1jXDW6pYriErmp0IOkINtDKXMIUPxMofPQhFVBQY7KZpydvCK9NS+aTRjq3J3irXB7KWW5XE9TWuq7dGa+1VpNjvmR/VZjberknFOkd3te+xurhjGAzfPvsLu5gnjOBDHHeRBw8IpkoonegcxMDaOxnvypqVrvI6bSR2HmDL9aFpVIRC6TsuHOEF21ekNOEN0xJvTI45hTPRjIhfYblWpOQTPg8tLHj54oM6rHYicODbbDZsqjuaWBdSFpTRDCIGLywutMj2luf7VYb9nd7vj/PyCzebMoPn76dQpDvzye7/A5faSUiLb9owH5w/ZbLYziqHFKB3ZQumgS09NgNAZNx9B5sMJK2XbCtXOJSKawNmm4523HjONA/vD3pyqrEJ6U2RksjnrjNSc2e93PLt6yjgOOCfcmL6LVlQ3zolJCD95+oRxGOf1wfuKGCjSM2Nz5XgcwkohWDS8F0KAoqq/VevMGeGwYErMFpVIeYMiyCyhoeBXjvD9WClaEd6RSHnSkhRAt9lydn5B3/d02y3FwonTJHO4pl5XwhHFUeuwJVBplX5k32shUFUV1/Dn2w8e8ejRY9XaOTufQ1H9OOH3BwNMtC32u1s+/4u/yLOnT7VEU98TTfvHe8fZ2YYijjOL2uA8JbRUXZjiAwUxQvozJuIRCFEt56LOc8l475TTl6e5hlc/9HPWXQgNILSNqkXjHNJ1OEsoqCn/FObirFIcWdJStuVD7CXLUBTdrLNTBER7RLNzUC2GWKKFscrcMJKhdaIkxJgoU0/Bk7uOFOtFQ1PDGUW0kKWUWQK9JAfjDuKAAxoUkdFSFZ7GW/wvCWmyw3UpNS+Omm9WcqEkyybKghQVUtPJoJ3UtcoHyLlQopCjztg2ODatbXhOa4jBHMDQFjJJ7lJgSBBrBpLt44r/ZbA6JZFkEKyG1kpQNdslk+F+TEQFFp1TkmHdGLwhVtlOEdnEwNb7eq5e9qro3/pnfQpeTmdL7Rf9HCPbopwot0Yu5wlTHRF1UIrTdleyjTqp3jJSW1/ogtf2T0LfmHZSEbJf8aSSjbf5C0HZTIVKDF07Pc4KIjZ26l8IdWq1bthi1bnWMII89zX3Z7/y8JZuhMs5fvmd80SOEylN9Icbbm+fGAz+jH5/owTHOECZFL2MEyWbkxuFaGVbQtniUmMnTD35D2Pk2e7AMEaabsPm4hLftBSrpK2ZhJA0wqwhk6BOz24/crPryblwdnbOxcUlTdPw+NEj+seP5jlUN8vLy0vOz89tQwnz4hkarc/nnKPddDRNg/eBs0G5LOMw8nR3xX5/QGsnDWy30731acyR926+xJQGNs2GuBlVJC5GSnM813SDL0u4uVR8YNWn85hYMk51TTVnNqs8RuM9uWl4+OCSaRq53W149uya3W43r1GAbZo6n0spDP2BfQikFAkh0A89VZ25EoZryPj25pYYo23gmuXqCTP6sQzHO0TWGpKytaptNeuslIKbIlEWZeKKOs/CfK46ifadNRvNQmHHApSv1kqpVd0TLicoisw0TUO32dB2HU3bKnkYDbU7QzadhYRCEZqCEce1sOfs6ExWoDep89p1Qmg7HrzxyDR5NIM6w5wCvnZ6bm72fOELX+LLX/wiAjQzsh1oNxsa71Weo23BebI4svNKTAgNLnSK0qbM9bNbUkoziloBmTr+pmmyTD1NPlEALjOMgyYMFKzotoov+hAITVAuZ9MgdvB03q3ad9lzcnZ3KgE8314S6YEUs4VxssbP5pARlhVgk8tkwRFHTm4WuUspMk2FgifEiSYpU7wguBBwOS+nMCvuWbPDNOapr62Mcy0LoARjFJCYM3PE+CbmEtopBw2rFdDSFHoSqPB6TZmcK4XjKKL3FYKbiW9Z3tc0wHJWnh0dQ5pnZvl84tIBkSizkF6KGl/Fe5KkO4yEV22m8zAvSG614esr1HHV08UMP7MOVS0hKLIllttnVAh+7fHrArjgkooiWQbgHGpg/s6Z7l1RFXTyxgxTFgvFOQuJGcnNFUOBTSW76OdU0cGaaVAXbtawqNTPk/ne7AqoG8Xces+B449aVwSxMhhriP4+7eU24dlNZ0an9EMs7LAgVjFOTGNPmkb6/qA8jLGfoer6ac4WV5HFecopa2kPCnEaGW2NSOZ8xhQVGUoJUsRNEx45cnrUV9VMEJcLMatDPI4D49iTMwTvGUIgxYld29A2K6KqbbwqXjdpCHPl9PhmOfVvtlvarj3i0XjvlTOUMm2rBUnXSOartlIKaUqkKZIsozWntLgyZbXYl4zLbubCLSGFPCM8s9jf7PgsTo+ieFl1liwzFjvlNyFY1o8WY22CCnuWeVPTfpqmSfWPKOx2t9bGi/hf5dAgsNvt1EmuB6p5ji/ry/Mc96OQHsc1s9aHrJoBtmyEiZz90eEMlpDa0Xp2L7b63lV9LSXe1yxFlcBwKenhLivnpdh9p6xlk1LOpJgs2606Oskcqzwjp8MwsT8cdB+VYdYZqwkpOk4SpeQ5Vb0U5sNA8IqEdW2riKbzlKDp6xkhSi37UjT7GtFxVFdpO0BXHbu7fZizXm+M09xPS9hSv7vAvC+xWo/XC2mpm+i8z7xYj7yU05NzYX9IOMuzF0NlpNQcZiMpSSHHicnEihKBVAK5wO6QuNlPZBzRO1KzMdFAT3PxAJkmmsOOdh+ATNN42sZpGKvxCo+jmVfFEJrqjDgpJLH0RmtY5chkyHF2eir3qIg5NOIU9anZXgJtY45VEVypYaZad0g9mTR/nlSWzjy5cinEYolk+k4LtdUSHkZYi5mcMsHpwhS8OiOljQTn721xdc6x3W7nE1YtvFoz0lJWxek5xfA5+6k6GibG5xZO1+L4LK9dTmpL3F3sf14WeYWy/rL6fkOAchFigsOoDmoTHBsXCOJI4hBf8NkRQkJR0aKOrXnDol8wf2+1lE2ci3o60cm6ruVSbMFRJ83PmSACR4/1shcCvWRDF18T0vNiVtG29XMFVll3lXieS2K/v+Hm6gnTNPDk3S9w9eTLxDhx6G+1NAmKsjZGVo11cc+ZcRxV8M45iANtWJOJYZgS/TAwTHoS7jOI17lfU5W1nl0dDh4t7yv0/chuN5BzYez39Ic9zntubp7x3rtffh8HqxYtVafHz6nMGsYMFtq6ZLPd0oSGy0sNKXk7dafLTGsbgW4299OnJWX62wNt7ui7Hp8CYzfZwQJzGjXslHOhpOoMFWr4Kls4shZyjlbRfk4BL4UcVaE6Z938xlHrG8VpomtbBHjn7Tfpupab2x273Z7dfo8kSNHZZhy5vr5iv9/hvOe99969w5uqKK86PUN/YHd7S4ya/aXha5MdWA3Fu46PICSS8lFEjB8U5/ue1ymWtkgpzjyROE1EK/mjY0/melX3jfTkXObrrby37fYcL46+39Ntt6rJI0K0cgypqLp/KYXDMHGz75Ukvio9UfJy3/VnTCNfeu8pQ9T1bDJ+zma74Zs++1nefudtRVj7PdM0ah2wYcK5QNMELs7P2bQtTdNwdn5G0zTEUhisuGjKMBatTjDGiUPfk3KhPxwsKoM5WSvVa5vvuWiUJWeh73tyUqdnGqP1hVIutpsN0xS52e0JoVeHx6gIYpBmTd2v3J9j5P7D7eWcngLjlHFStMq5cT0WeZWM95qyXXJmSmlOvYuiAaZh7LWBEGRzgYw9zjV6GthswQdCLWZXZK5O7p3QNI62dQbXeRIWk/d20+ZVJ9s4c9JNh5wUH6+LKOgJXwqzGiCCZG3U4IQQjNgrQrAJmZM6KaWiWkbAXpj2HPNalpfMHaLeu55U1TOHHAvZqdZQMQVWRyEHP29Cr9pqRpJbTfojVCYvThzzpFoWIUVprITDnedre67Jk4vT4++gP+r0zPyDOwTR+TNQZyIVYVA1d4oIwcKTBc0k0HIf4L31tNWxAQu3PKctUnJEiUffO+Oz2GEiF0pJ82JZ7+l5iI+wlLWoeipfPfa8a6mzYv2DLS6ZYThwc/uMaei5fnbF9bOnBtlPFUObNUYUBdXMzoye5tI0kZzgykQKdc7qQWWcEmMcmVIlPgk4TWVldsDqnFHHV1FjGIaJw0GdnhgnYpwQcex34Wic1Z+2VUG6GqqZw5hNmIuXPnz4BufnF2y3Z7TthvPzC7wPNE0HZXGW7hXpyYWpn5jcxDRMTDKRpoRKCVWHdUn2WDuqJddwx0Q0IcZoJONa5qdSBlQbTVN+D31PPwx6CPGeJnhEtEZTMP6MFoe2sW/6VSkn9of9MsZXU6hOJ7FNqyIwJR9nGR3Nj1IPT+X4s1CIIFHJ1xmRdDT/7obZ53RmcTNyV/ttCcHc/9wsLMhUSZrh1Latyjh0G5VlaVSvSJxXWkbWquUpF6YpcRjUaZumSdPX77aP6X24DNc3O6aoyM9+rzWyzi8uuHzwkLOLy7kw69AfSNPENFkKvG/YbLacbTratuXy8oKmaRhjRIZBy05YiM3qLDEOvZakmMZVCSkLW7qKtC5tnM07G0d1xkHHQ11XQ2iWgr+WRKAfaAfR1fiv1ImK2r9oT76U01MK9JOiPI0vJmQHUmp6mYZ9RCDmpcJ4LJmpqAM0jJFxiuQihKjxSRccAXVs8Jom3ARdQJvG0wRnIoiGEhQxR6N+/xo5sPCMEdjWG3ap2hzYJDLhM31bhdD0k4qFTRpzuOZYk9fvLymvQlnCkrWjzzpEBROlGBKUlcxc9LpKKXNx0Xl/leogcayufw8m8L6TMCyLgdbGqiEi9ezWfJwjQjLLaWu9+FSrz68drPm7YR5H1OV7TQ6uDV+qw6gpkTVTxfmE94VhSkwp65ib0SZ9/1oYcA2Owp2Feb32rn5rn+mLlwVX73Mtnf9hC+hrWVxX7X7Mqap/h+N21X/XNlANFkMHcpxLAOx2O/b7PdPY0/c9/TBaWCSSiypfj5Pq3ORclAtmcHt1/KVAKiqrJFIoon+LRoCvlcDnTq7w3vop+8c8bkwHJLtiJVxqSFJREIpY4pyil3NGChDnsQ8u6uLqg3J5UlIh1MvLayukqSnHlU8yE2bvybz3PDh/wOX2gm23ZdNu8M4T48TQ96Zntax5tZVU7PX9Tk+OkWRIz9HOoF4TIqrT0qIcOx+8blox0jaBaVL1fBWOXEQGa7HQYhvZ0fBiPdJEk0Zkee0xDMw8vyrKo/P9DuKLLJ8xD18dK0fr0Oy4r5xed5x9WRWM3T33ZT3sUTxZamjQzShjCIG26zTEmxO+7w0RUWHKlDOTkZSr05FXzTc7ATYScoEpJdxoBOecNSSVMvtDz/XNLTkl+sOBOA5QioZurZj22XbLdtOtdIyU/hC813GXMw1KKxkmKw9SFlS2dtbc7iz7is7desA+RvEQTeBRsKJmR5ozC8fjan0AX33Hi9pLOT0xF9691fTTTYAgdTOzyeLBW4HNKRXGpEhBPyb2g2o57A89t/sDBUfe9Mj5iG8Km67FtS0SCu1Z4PyiwZG53DacbzSlel0DKuZCXKENzgZyEzzOQ4mJFAuZqItrzHMxtTottOZTQUVaBFxavEoniIOzzjRDnGpgGPuKVITRUB9xTj10wJWoSFdRxMuXRAaGXCF6g5iru+QLQSwRzCRzxKtGTBuq/tE9mCyF7+4qk86OX1mdnmT91vdf0zquvs48WL/2aNFxy705Q3rm0+cK3aobtWaJadhtNyQNEvpEM0TECSlZDZnq9FQ9IdOREGqc31ze1cSpvI/6XbMTMDtaNhFXTl11fNYKvh/WHvft9Cyx8eW767Wq3fn++enaHkooPBx2ehI83LLbaxji2ZN3uXryLnEaub56wu31lSFylrWZE31/YBwHaogTa+sYIWdHQnDJWRg4Q9KikDEVJjvBK8xpiyaCuLBs6GWZa6GW9WhN4qCAd8rLAaEqBd+1mEblpIAeWupGaacO5z1XV1c0rdZk2u8PPHz4Btvtlrff+jouLi7ZbDrOzy80THZPKttdu+HX/dpfR9dseXD+iDZ0tO2G3e0tw2EgpnpwzDMah43RFKvC/USMIxQL8EpVXu70JC0qwuhDwDnhrG04N90x5Tc5xmkiFa2dNk0TF+dbLs/P6IeRvp8YmRZH+46zvdqj7GD3nBPF+4bk8QFqeb+s/l4np6zWJHnfa6EmpmhaeDCUOYQwP17/3JeJc2w258TJUaJmLznv8U1DobA5O+eNR4/ptltcCPTjSB4H0hTZDwNTTNwetNxDNDR0niGilAxgaZlS2PUDh2GanYVShGFKfPHL77E7qKNDjpATZ9sNv+br3ubh5QVdE7g827JpqwiVITdO27q10HWTtQjoZMkIKUYNpdbkDamq+c6cS3VMapFx/ehEKUY+d1omSg+URTMFox6m9FbXB7dFxmYdNq3ZxS9iL+X0pAK7sdA4IGdVIRYNJThhURcWJZuOSZGefszs94mYCod+oj9MWsNpnOhiJIij6QolCGSHb1S+3otj0wW2XaAUGKN+hjo/xTK8TJRXsDRkhdSyExCV0Z5PlDUGOt9RNhZPMV0Q/V0Ee78QGvXQXY0ZGvJRopKQFfxxC6+jOFwxAoIUnLP45wpxmonWKFrmLBlprb9UCdz3aWv05S7Soc9XCH0tdc+Rg7R+/d2Nvr52/bs+nlPDWZ1US6G4FadkeYed8BQti9FaMIKbkmVnlLn213JvModdwHiEZXFqsO/VPW9Nvl59/4yCKJFwfe+17Z7H7bgb2ntd9sFoT72fBeFZfumSmaIWEYxx4ubmhmfXT4jTxLOrp1xfPyNNE7e3t5bFNAd11ekZVKa+fp+BraQMuThKNn5blTsoCVBkLpWZyz//iNRKzcf3pIiy9meY1ZVAZMnCmXLWMM+ddmHFfYmTkqfrJRVDBPeHYS4w6pyn7wcuLi45P1NHpwpUhrAoPr9qa3zg7TfeoQkd2+4C7wIgjMPAULSwq3Ip7D4N3ckpGW8kHzk9jXe0Fs7bnp/rmuUVzQm2Bqj4ppXZCG4mlO73HeMwzEhA17WaJr9am0qpiPpzuGuyDDE5elrePyS5O+/tOUOg6oFET/9l5VmtvChZPr9OOydLCRk9bLmlNNI9H0gqjYCsdaXSjDqp9k7TtmzPznDe0w+DFjNOelAeY2QcI8M0MYxxWX9mlHxVZLS2RD04Gkk8eNUBmlLh+nZHP4wqTeI0aBG8Z7PZ8ujRI9oQON90tMEb6X/QbCuBBq+VFIoHQ2eDd2BilWuuGNT9yziyM+LmlrW4ZFuvxSQkrF+zFqh9P2dOWND+Mqf1V0RvboQX6MqXFye0XSojClfrHWhYq+gChiiFZkpG6LWS82u+hoY1HMEpZyc4VWsFSN5RglfpMS/zhuztWOfMq6txZe8XjoCAaY3NLWSNJHPaayqWCWJ+sgiKsBhsquUq1QFqvKNvo+nnqGpxKarhE2NFDDK+VIc0ryquV8aDIKInrpnMLFW52Wl40OZrpflqKYsX9V1f3t53+tKmuhNSWzlCtm/ehfbr32sKKiwOECwO0gc5AfVcuD6r1VOMXs+apKiNnw210Y1VjuVlju5htYsCWJomIjpm1zET+/g1HDvD7EVRgLK6nw9aKNehrvW9vs7w1tp5q88vq0HdQPTfOSdSiZScOBwO3NxcM00TNzfX3N7cEONE3x+MCLqUGyg5G9KgvxXFNMK/jXFB9VfETosqMCbq8BivpA58Pay5+b0YeRlU3yrZgSWwLJDLhlUQ8asxo/dJWVC82am2Q09tm2UeyIxQeuMSpKRchb4/cH19DQjjOLLZnFHDnPdjAsWRUmEYRpzEuZ5RKTBNE/0wKPnVMrMq4lOM8S0CbaOZZl3TsGlVnmJ7fs72/ExDdl1L27aKnlXEYzVmUspMY2QYRkWWbCMVcXONNO2bNM+VXA98LMhNbef33eNz/laO5i7LWJU6VKpjs3Rg3VSRVdYQi0RG27Rstls22w1d16lEwYpfeK/hLRtX6qioswOqLVTDVsmkP1JK6uAMI8M4MZg2UrLT/eLwuKUN6vpC3fPrmp1nhzOXAkY6131XwENx6mD048Ch78lNoAuO4GoIX3XL5jqSQE1QDkZ1kAAAIABJREFUUMqDrq31M3X8lIWHA5oo4Go7yxzCcj5QnNN9ce7bMkdCsmX3rakFYHxhW9Mrcu+cEl1KcUf70AfZS+r0QPAWHy9CMsl9RVqWAm+6uOjfa4n4aJOmpGwcHKFxnk2j5MKzxnMWBCmO0AXGswYpha5xhKDZYV4y2VLWu2JQuQj4gFayts6JeqqTpB1TEkwRpmjZP5MSxJwsJGXnC1qDLTPExGGayKUwDYmcRDOFmsC2axBgGAuHXjvf+6SV1QW6xuGDbSh4U4ZWRCc4yxBaUpX0lKZVEFCN2YIvhZASwTr5Pm29UdZNwy5NCbtyvEmuQ1f2Sr3Xj8hQ0kX0WMl2doxmV2N1Yq+IWFk5Luas5mxjb/l6nVSrMhdLecFCMcFCJdCaY13A6OizCZjKtk3YslRelzshu49yfF6E5/OqrbZtjYfrtRxd2fy7tvg4jvTjnhgnvvLul/nCF36JYRy4vbni+vZK+yxOFEsr3x96dvvB1JSNGCpVq7OZHUMqpC0OivLjpFGnhzxp9lE2h9KuqRAoEvR3gZR11U6pzHokpRFNlnBC8IGmWWDxiuRpZMuEzvzCQ0kGi+tr05zy7E3t1nlP17W07QbnHOM4cn19o/Wn+pG27Xj48CEpZR49enPmCL1q0z3KMU2R3ThQNbOGcZofT9M0h7cqsB+cEpCdOM62G87PtvjguTw74/LiDO8D27MzNmdnM7oTghbSK8YbyDnTDwPjpI7OzW7P06trdrsD05QQUWXszWYLonoqMcY7Y+8YaTx6fIS0swJp7s4TmZ2cI76hHWyrOrHSKhbRwlClCJyY2KsnNC0PHjxku9Xsve3ZGU3bWl2ocK/hLQS0On3Eh8YOC8KhH+j7PYe+Z4qRKSX2w8iz61t2+z3Xt7c8u95p3a2Epf5XWY66HleHR2anSNfINLerJqtlxOZrlX7ZNMqb3R80pOuAs01n+6E6It45XHAQYWRUZydlI7/r3iWm7uy9o+0azaptgiH5ioi2TauO1mpN0rT4qqid5tDYNPZWUFwL0uaU9DDkkxKGXSXjL+WNcpZZ24gXiI68ZO0tG3iYHgeodw9gugHjtOgvFLwtQqtyE7aLiBjS4zXe2nhHq4IrlOCQJiAlWxbV4t1X5eNsKEqRKhKoDlaehQfRBaEYMpNVKHBKMEwafnMUgjkrvhiMKlpr69BPpKy1xbpmYgoeKY42qBMQY1GeUCma2WMCzY2Hmi1EvT40i6dyAOYDrS3sxTnEFi+BuWaXy2sE5P7sg/gf2uwmGCkLF+eu06PvvxtOef93PC/0Uj8DMDmAusgVLRQrMsPnMwJgfWp+ix54CpZifvd0aQ6TtfdMapa714AtHMwnxZrmvjgQ8v73HL1d3vfv1+/0HKNux+jd+/HflOJcQXm323F1dcUw9NzunnFze0XOGZPAIudshOWqphznE2HThhl2n2FLHLigNc1EEO+YYdDstWAo1m+W0Vhs7hQWWZOUMFQ1W1YegKK7rYWbktV9yhlT/9V+rwsvoKnWgOpJ1XaxBxZyCD7MooY142eaRkarJRRj5PHjtwihnZ2m+7CcNYx7MIXcKUb6YTDBUG37qvpdt7+2aZCyKNa2naUeX5xz+eCSEAIbq7K9FKw05ME2GEUbxhl5GEfNkBuGkZQKKuSqDlNBZjG62hbrsMTd+f5cJ2i+A3u0gpZrmHR9yJjn1JzCrCTsGi6vwpK1wnhFpM7Oz616fGuv0fDSvYe3WIjLFfFRtC4ad0XLR9T0/34Y6YeBQz9qyDglLbWCX9KyV+U2Zrdn5kaAItoVJVk4dsmQmeAELwHBMVna+W63Q0pmms6JsbGs6TBTBKROSHN8ZlSxIqqG9DhzQms/eUMRK5G59ntoGrrWDiNJOUE5JyZMzNHGea6buYZUnrOPGNrqHMUJOX/0mvvSSM/Rx9X0o/sFI973va/h616/PbefPpV3+qFWm+Gj7/zlR8QS7nm5azr+jI9eID8MATrZB9hRd37qZ/unzj7qQPC+17+W49yL2ydtbn6yrvary+SlBqrIV4BfuL/LOdlz7JtKKW+/6g899eXHZqf+/PTYqS8/XfbK+/PUlx+bfWBfvpTTc7KTnexkJzvZyU72SbX7o62f7GQnO9nJTnayk30V2cnpOdnJTnayk53sZF8T9qlyekTk+0Xkj33c13GyV2enPv1km4h8TkR+5jnP/xkR+ade4P2/X0R+4H6u7mR3TUR+54v0y6/yO547Juxv87gQkX8sIm/d57V8LZuIfJ+I/LyI/LmP+1pep7109tan3UQklFLuR4DjZCc7GQCllH/7ec+LiC9Vn/5kH4f9TuCvAj/3cXz5B42Lk92L/XvAby+l/FJ94mth//vEIz0i8h+KyP8pIn8H+PX23DeLyF8Xkb8vIj8qIt9mz78tIn9RRH7Sfr7bnv9+EfkhEfkx4Ic+vrs5GXxgn367iPy4iPyUiPxlEXlkz/8me+4fisif+qAT5Mk+Vgsi8ufsVPk/isiZiPyIiHwHgIjcish/JiL/CPguEfkD1v8/AXz3x3vpn3wTkb9ia+HPisi/Y8/drv7+vSLygyLyW4B/CfhTNp+++UPm3Y+IyH8uIv+b9etvEpG/JCL/l4j8ydVn/1ER+Rn7+SOry3rfmFh97nc85x7+DRH5CbuuPy2qRnuyX6GJyH8N/JPAXxORZ+v9z5C4v2V9/jdF5LP2nm+2sfDTIvIn12PoE2XljuDPJ+kH+I3ATwNnwAPg/wb+GPA3gW+113wn8Lfs8Z8Hvscefxb4eXv8/cDfB7Yf9z19rf98SJ/+FPBb7TX/MfBf2OOfAb7LHv8nwM983Pdw+jnqz8+hYjvfbf/+b60/fwT4DnuuAP+qPf564P8D3gZa4MeAH/i47+OT/AM8tt9bmy9vArerv38v8IP2+AeB71397YPm3Y8A/6k9/sPA563vOuCX7DvqXD4HLoCfBf6ZDxoTq8+t4+IfA28BvwH4n4DGnv8vgX/z427XT/rPqn2P9j9r699nj/8t4K/Y478K/Gv2+N9dj6FP0s8nHen5Z4G/XErZl1KugR8GNsBvAf6CiPxD4E+jkxHgtwM/YM//MPBARC7sbz9cSjm83ss/2XPseX16DrxRSvnb9po/C/xzIvIGcFlK+Xv2/J9//Zd7shewXyyl/Jg9/u+A77nz9wT8RXv8ncCPlFK+UkoZgf/hNV3jp9m+z1C0Hwd+LfCtL/ImEXnIc+bd6iU/bL9/GvjZUsoXSikD8P/Y93wPOpd3pZRb4C+h8xs+ekys7behDtRP2tr921CU4mSvztb733exrKU/xNI33wX8BXv8iV1rP42cHgdclVK+/QP+9ptLKf36SVE1zt1ruLaTnexr0e6Kgd39d19OPJ57MRH559HD3neVUvYi8iPowXDdB5tf4ccP9juvHtd/f9Te8lFjYm0C/NlSyp94ucs72UvY18z+90lHev4X4HeKyFZELoF/EdgD/6+I/C4AUfun7fV/A/hD9c0i8jzH6GQfrz2vT3fAUxGpp8TfC/ztUsoVcCMi32nP/+7Xf7knewH7rIh8lz3+14G/8yGv/V+B3yoib4pIA/yue7+6T7c9BJ6aw/NtwG+2578kIr9BtGjTv7x6/Q1wCVBKecZz5t1LfPePonP5TETO7Xt+1P72MmPibwLfKyLvAIjIYxH5ppe4jpO9nP1dlrX097D02Y8D/4o9/sSutZ9op6eU8r+j8Pc/Av4a8JP2p98D/EGDdH8W+B32/PcB32EErZ9D45In+yqyD+nT34cSLH8K+HaUXwDwB4H/xmDvc+DZ673ik72A/R/Avy8iPw88Av6rD3phKeULKMfg76F8np9/HRf4Kba/jpKGfx7lvP24Pf/HUY7G3wW+sHr9fw/8ByLyD0Tkm/ngefeRZnP5B4GfQJ3ZP1NK+Qf255cZEz8H/EfA37Dr+J9ZKAsne/X2h4A/YG39e1HOFsAfAf6oPf8tfELX2lMZipN9ok1ELowvgIj8ceDrSyl/+CPedrKTnexkJ3sJswy7QymliMjvRknNv+Oj3vfVZp9GTs/JvrbsXxCRP4GO5V8Afv/HezknO9nJTvaptN+IJgIJcIVmdn3i7IT0nOxkJzvZyU52sq8J+0Rzek52spOd7GQnO9nJXtROTs/JTnayk53sZCf7mrCT03Oyk53sZCc72cm+Juzk9JzsZCc72clOdrKvCXup7K3NZlsuHlzinMP5BuccOWdiipRSEAQRQQAfAk3T4pzgfSCEgIjoe52vKsjvt7J+eEyyLqVAgVIyOY6UFCklkWJPzhOlFHKK5JwppZCS/i5Att+CIPP3y3wdpeT5fXoRmVoVqH6GCoPq/50TnLf3Zij2uhgzMWZ9nYA4wQmE4PG+3nP9jmpin+wAoVDI9pm7/cAwTB/QWL9yC00o3aZBACn1zpYryaVom62I7oLYzVs7iEOcvcM+Q9vG4b360zllctb7XdoXRJbPLcdNYW1gfzv+g73exoF2zvEouUPML3cefzRtX577PsTZjz2ubSH1fgDndDyJgNPXijicaxCn9REPn/+ld0spb3/kZbykvfXWW+Vzn/scKSWmaSSnzDD03N7eEqeJQqFkHZfaP96uX5ZRLczzwoks97K2Yu1RCrlkStYxknMip3zUciKC8w5n7SZ1dL2i0fxBS8jzrNi6UcdfWo9F53QsixBCsPUJu277krL8quPvydOn7Ha7Vz43vfelaZrn3MNqrK3WonqJKSVSSnqvlHkqiCzrnK6/eq/e6Y8ToQuBJnhdu53D2TgozttYFrDHslyErfl2JUXnOBSmGOn7npgSJWddiylH64yI4L21e9PQtg0iunZ4f1xPdHW39sg+qxRKnCgp/v/EvcuyJEmyrvWpXdwjYq3Myrr0bfe+9IgRQx6BCcJr8AC8AK+AILwNEyaIwOAwQEBADgKIcAQ2Z9N7d10yc60IdzczZaBq5h6Z2dWVTa06VhIVkbEiPNzdzFR//fXGR7tb/RpUUT8PVKFW8PnX1qDZ9/7N7fqz782Qgqbcr0U+PsdPjZ+UXCSf3EfjrZ+6OZSPjvOjZ6l3Tx9/8v9vXtTdZckn3uNHr63/5fa8/tm5/CzQc3l84D/8j/5jTg+vePP1b5hOF27LjXfv3lK2bQAekcA333zD3/zN33E+n/n662/47W9/xzTPnE8XLpeHsfG6yDWhhL/elZoBDvtjqxWtjbLdeP7TP7K8+xPr8gPf/8u/5vndP7IuN374/p+5Pb9n3Qpv3z2zLIXSGrfSKK0RwkSeHpCQyCkzTTMxBNaycLs90VqhtpVSb6hWSmlsxQRJDMnAngTOl8D5ISIIy62xLAZ4/vSnZ777zlqYzFNkmiI5R775+sIXr0+IKBI2RIpdYxNQQSQSwgmRTCnK9VbZNuW/+q//p8+Zop88TueJf/8/+PdIqsyqRIWAkDQgCM/rxrvrja01AzTNdkeYE2GekBhIk90/ESG2RqqNIMLj5czD+QIoz083rtcbrVbKtlC2FUEJUQnBBHNRB0YiqESQgCK0/qxQ2w7Emguw5iB3KF5Hnx2AKkpTofn6qqpUB0kKqPiOF0HCLo5RA11NTLRqCGg+Q54hRCSfkDwZcMsVYkNiRB4ekHlGUoTTjOREyg9cHn9Lnl4B8D/+Z//pv3mJ+fzDH/7Av/pX/4p3b3/g3/7j/8Xz+/f8H//7/8Z/99/+N/zzH/9IKRvbeqO1yvly5tXrV8QUCTEibpDEmIjRXk/TzDRNDgYMuKgKrdlctdbY1pVt26i18P7pPbfrsxlB60otGyEGzqeZKWeCCDlFYnBll+K45wPg+vwe9/7dB/YZAoEgMgyJ0BX1R7pAx7GWZWFdV5oqa6lstRFC5PLwitP5QkoTb9584fIpMuWJGE1Edn3ZWmPdKq0p//l/8V++xFSSUuL3v//9uP7js6qaIenyMwXI0YDH09OVt+/eU2ql1sZW7Xs5T+ScCTFyPp85n8+kGPjiPPHqlDnnxB++ecPv37wmx8DD6cQ5Z0gZPX+BzhckT8TXXyIuu1NKBmgRcogEEcq2cX1+pmwb//THP/K//K//mm+//47bbeHt23dsZSMgRIe/p9PMw+MDOSV+87vf8Pf/8Leczydev/6CL796Q4zx3ihT/7ZCrIVYK7qtrN/9W7Yf/hnV5rDI4FVrGdVI2wrrDz9Qnp/RdUXf/gDXK23dKO+utKUA8J/8z//Dz743U4785g/f3AFFG0frfjfxlNatirvn8ZEjyg1yMFq4A7b99U4u7IZkN+T72PeNuAHga30YCfum2ht3MmTtONfDfu1v93/LJzfxh++JX5KM/T2MsLAbZHK87nAwzHQ/9L/+7//PPzuXnwV6ail89/13XLZKyGfmrVLKRi3GrpSysS4L2pR5yty+/IogSqsbMQo5BmKEIEoQNSZkMC0f3Lv+2oUNYhaZIqCRPE9oOaFygxgoomw0bmXjeV1YV3te1o3SlGWrlKqIFOS2IQSmeebh4UKKidYKQkVEUS2UstBaodRKKQVVCNPJLUElxAbanEVq1GrMkkgjJbumlIWUBJObZg2HCDlFUgpoU7bVgFUgOEEghAgpRkA/y5r9nCEinKaJBJxUiQANpJmQSbURQzBh3xqtmPII2SxgkYhIoPnCDAyDiVIbW9lQhWVdWJYb2hqq1ckS2edeIKiAKCqCEFC3ukUShECpjbVslFKdBevgxgEO+7rplnhrbTBmRfsmVlr/zGCafBO1cK8t+z4X37DBWDsCtoaDGOETA5JAU6TlhOYEKSF5gpyRPEM62eOXGIduwoizOikagPe5EpFdcIkr0BDsmg6swP0QX5+2TmOMhBDJ80xrjZgnzpcHyrbx7u0PXJ8NwDw933jSZ0IIDnrMks/TRPA13vkhkyF1F6wHNsYvjs5FCthxkrEyMUVSjA7ejGESnN3BGKnbdmNZb4AQ08x5nkkp88WbL3l8fE1KicvlgdPp5NcbhlEWggzQoyrUpn+erf4ZRq2V1pozN/dsZgwBdYZEUiCF6NeUOJ9O1NbZbUCEnCdSngghMM8z8zyTojDPmWmO5JSIKQ0WqD+QACGgIRBisu+dz8QYmU+TsfdAMunFsiy0UhCUFAMBRbSBVlSr70lQtbu61ci6rag2qnsLVCFPmYcHA0MGehz0NTcSm6KrQisQbPcfTJl979ryMqMoQov2ngbbz+qE7YuOLpg6IXDguuz/OpS2rdXg19P/Pw5y+F4HFzvz1RktRIy9CnvkygBAh6PY76nvd2f/DoACFNEw5Ehn+ceavKOIPjoynzx1Dq8PTL8cLmO81COvp36tHxwbrMtbP9ZP3I+fBXqaKsttIeaVdd2QuBqt7ZZ2rZV1XdHWWNfVAFEtqDaC2Dz0e+r6zhHg4Yo/nNuBMBnuIu0UqFuqBLfMVamtUlpla9UAS2uU2sz6adp3DjjIqHM2y0BN9IrDam3VGYVKa32SmwGegIEjugXWDtS5jvXWFaO4udIXSnBq2URA9ak8LgL/XmO/AT/zEMzV6HLAQE9fbU0NZfsW7UBiLOADAu/D75yzK2r3uumBcm8Yb2IAZ9+IeqdolQNd7oLXgKhS3T0zlDr7KX0IfLprrmm30k25NmcR7tZZA4IeBNJu3XQLZQAgEeLRIgni4M+Ug4bgC90YEiQaOySRl5rL4zjajPh+6ff3/j4fxifm8+7PB+HW966qEJOtn+62DCGwhchzenLhq5StoK0iIrTWQU+0PdyVs59va41t293TH4KeIaSxe5+iuddDEJIrzBAEJZCG3GhAG8K61IJIIAVjVHLOzPPM2ZX5NJkyB9l1i+5WswISlPChlfozjw7cO+gZw6+/iVm/fb0LZozElAjaHUk2WWmAHiHnTM6JGITk8jPEDnJkrIXh2jy4O2OMpJSIKY7jDOZGobVKjMa4hW6JO4Vve7YBQlPba8YaNqqzh/0yO5OUs8lmcdqgVQcwTWlF0D5FgzXQez3cn8WNHDnoTt/Q9v2XnUv89HbPxkGmHGWOUyxHOPGRp2v3WR7+Lfs1KKgf51OgXEzb3b8nYq5coIcsWLhK248h+2cRm2/t4LIrZ9iZoQ/Rzt1P7vL3eIrj5fHc+573NXB8v4OzrpPQw/z/yPi8iswObFqtQ6BGScQYXCBh1Lb7V7dtYVmixRg0U3youvLrym6/4K52VFzA0YGB3+x+H6Nt7pQzcbPYhObujdoatdWBes11YQyTBKU1QasJw9YK63qjtThidEIQskaUidYiVRu5VRTIaTahIgHVOmKGSjHXVnU6OTjq6fFL0Z+D08CgbjUqtSrbVhGBVrehUGoNQ9i+xOgxHtrvN+yogR2EyXj0zXKHv13oqgvaLuP2+B2bkwqoxQ8EB1OODVRBtEMmweiTYBt3HG/f63cukA5+xt/3+7UrcR1riuZK8M5wULNutK+5/jVFJDj7ZErOFAvk4LEIAVszGTRGthCofv49PivixugvNA53wICtBGKItG69+31Wd1GFj6SqDZtDRaQNytu+2+OWGPfM9lgkRCWmRkoTOU8OeAulFIIopUATM0C22gaIuGd6is/vAdiOxXnUZcZQJVeyMQaSu8xyiuQp+VyqSxG7nhgzEgLTfOJ0ungsiQEdY69214A68OkgXLUrCHGB/0IGiRg4iTEaQPwA9MRgACQ48Esp4kt4gH2RDryFGDMxZaQDnRDcot9FapebQZRaCkUCWpVbfccWbkieSKUR3r4jxsjpPJFzQhCSGNOzrSvv379nXVd+ePs919sz67qwbbsBjHYFZqxCXAK1Vq63K8/PT6g23r17x8PDhZwzKQhpsBaOWpqiZaXVDa3bwSh1qCddl+jQh33NijBkC/syftEh/YcxWfmBabWfwREn6DH+TYZJecDih3Evlz/+/eMrZ3r7/ZJAlDBcSSFG2+8wmN/aGlvdXB44IO6iBMzA6UK6g4+jEf/RuX7wh3F7PC747mx/bNzBw7/46T4+m+lZ15W5mA80hECKkWnK/jp43AZA4/n5idYqt9sztay0ms3FMUCPOghw1OjnrQ4hTSHKAe3aLWkSmeeJyEytE4hQnOGpzvDU1uyYMRADECKqmK/bAVitK9frRggwzzOXhwspRmLKZH2gBzO3cWIBNNL9netqlOy6Ktum1GrnHAfNniyAOQVSzKSUPQbYLJxWYV0by63Yb6mCriCRECckpI9Q+c821NyODTGuSWS4tkyZfRCUOu7//lrVhKVIt95sVGdl1AVpbbZe7N6aR79vKDDLzVyFHjhJMFamiR9Ld9eUQquHWK92H2y9myRhXGc3S8zNcTCO+q3wzwmCGr1m1yrNXHlq7r8oBnrmEJhSJEQhz+bGbDGypkQJxmI07BEIHsPwC42Dpd4DlmOMtJZ8XeKCrKJFCekQMDqAzQ5czTptw9UjwY6rujM0IkpMyYRlEObT2QTltrKuiylUYCtmDDU1V3Bz9q1qB9B6YDZ2g+hudACkzmwQxnqK0a57njPzKZshEyBGE9Z5yuRpJqXEw8MrHl+9IsXM5XJhnj02LXaAZyt9yHVXFjTFvEk/zar8a4aIcD6f74yH/j5wF9MTRUjR1lePyVJVQkymwETM2IrJAFMMHlzOUFZVnQUrG7TIKoGgsFXl29s73q2VFhJl/iMtT8Rk7q2ck8VqhUiUMJj+1irfff89b3/4nuen9yzryrJYUDMqw73VZUOMkcvbt3z77bfuWjRZknNmnjKnqbvePOZMFV0WdFmgbtS67Uyys+T7DA47ajwkuL5xGfCSKcxHFuWIbfrKUhjuO3Rnu5Cu7ZxdDbsx2t2dH3Ap4/eOy/IOQHh83nhfLW7GDHJxhi0TPJD8NGVSiNy2lXdP71nbBtjnB1vuMQ2tNdoAPMcT+zP66xOI5lPnvY8OavSDTxzAjhxe/8j4TKaHjzZhCMEYlxDcOgm0GlyAFcq2WbCpu4D6VPUFt1/kAejs5vbdRfRXxsqEYZnZ97o746AQO4IUIbiS1gYiBry0KZVGa5BzRHDXFYEoESQOt6kitAqtmqBohRHQ2ZpaoK27o7pi2R/Bs7h2i9mAUz+GsS7dVRiikiQTXph21dZQcQiqulM+dz+rdxvn/gA7YDh+xa7NEErrc+EH+NDVog4uLJjbFU2nT+nzujNIx2y8/mO6L5p+Woz1I3L37hAqfnyR/qaj7uNmVbk7WheQMUBy1ioHi1VrocciGTtU2d3NwZ9f3qb8cBwAUBe+soMavROyH87uB2DyDijKWMf2NxnWoTE+kRiTZXM5mzMyZRwQr1sxt4aag7fv3dba+MlPxhfpPkei4tEkMtznw4oVBwcJYmPIqc64Js8uTSl5nF749O/59fb7010U8oJ7U9yVBLu8PZ5XBz2AuVrDLi2jz1lw422XP9EBQV8TbpR3WYQOtrzVRhNjsm+3G0/XlSqB5bZSojH7p/NEctAzxUSUMNxxrTWu12dWZ/37e3Yt0IWq1EopJnu2dWVZFkSE6/XG8/OVKW9omxCdXCF3w1ChFmgV3HXuV8JgRgaZc9i/MnK+Bsv4yel+wdE9MB3CyPEPdx9kLLvBVH3qYD/pR7kT3j0Gs6/lLhuCx/bEEMkhMqVMTona3PjrRu9RP7vu/fT5HX/z49Pthxlf/TNzcS/Zfxzs/JT5/Ksajh6FaIgWQBhjJOfEPE1EMaurlpVNlHVb2NaFLSdjZugxNdqzn+8uo+sH+YQOFv82LkS1NbT6Rq32ur/fo1ZN4HoauDmGwQFOyhbAmVIYsTridD3+W83PZ9sqy23zDbyNtNdthbYJrUEtRuMHAU0MIOH7E1voFTCh0qqAWryHqmfIVFhbAenp3j//6IJGMYXQMGAYEFCh1mIgTNsuTAQDbzEgUSwY0FS7Ue3JVZD7hIc9IvsC7+tUD+dR3a+vDngQY3W2aixPre73PwLbvgYOLq6+4HcXWA903u+hHGjZfk5m7RwfAJ7zAAAgAElEQVR3Swf1+yOIEjE3V9RGaJUYAnNIzCmgIXJKiZYTTQIlBmoQIwd9vn/pYeceDgrdr7m1kW0ZU6S1yT8bhy9O6PF3HVAc4hF6AH9nVFHbf9osqaEWtFk5iU51d0sT6bE3YbitOoANyK6M2JXzB1c15lPogrgzUOJxgz3misGEhGDP/SG+KbUKrRRasFgfUfGSDEp3UQ6zeAj6T9nYP99QVbZtu/v3cYRgmY0i0MRWlggDWNjZCkj1e2juZ0QIauxj8+82oAUZ8VPdJd1djbfbjaenGwV44pmFQPAg6JhMUeYQDmyGnettWQAlpYiK8BCCZU426G77XtoiuAvl7bt3XK9XFs8IjjEyT4nTnD1eKRtAFeEkgTkIURvnemVGB3tlWYFu6CJoXxu2kVExAyV4dugBerzEZPq93Q1EW1K+hnbROgw64F63h50UCD3mBg4G/lFP7vFY93F698ZmdHYO6fFXVq7hcr4wJSMvTtNEirbHtm0jxUhpjaVunjnbqGaycJSZONBGnFwbgnkHewrssVR7WIHD4f1Y/XP9en4MIP1EBPt5oGdY6sGtuUiKiTxNpBip9cTDw4WyrYCyrTdqWVmuT9xuz8QonE8zIs0f4RhkPuTKwZjzzXv4e3/hG3NYF6XQisUbWWq7u01GQO3OqKAVoZFi5DRHywLJkRg9qyyEQQ2XBlpNMK+3lbffP1NrA6n2UKglUjZBG2xbY9uaBw0mtAnahFaFbbPF0Woxf3SDWgQ0+yXZZ2tT1rJQm7njXmK01rg+P5NFIAaSg9gk5owptVCapfuL7rE4MRq4kRjNOnTQE2NmitmpYq/hos0tboYW3cOU9rimUhtb9YBuMa93a8pSGqWnR5e2Z3+0na3YmcfdSu9Bk8Yw7KBHHOCo7L5jwZnDsG9M+/A96ElAEgc9rRFbIWvkEjIPOSIhkeaJkGeqCEsIlCDUoGxUqq4vMo8/Ptw46Rk5fjG1VWopxgykwFxmZ2nYGQ0MMIizCtEViRkQ5oJq1TI3u1uqNgc9ZbG6WW0DTyPGAVi3EqOXQKA1Qqm0YaT3gEoZ58t+WqM8gelwoafgRDdcTKDvwbQ9azREYcqBORvoiTQomxlDYaOoZSNqNSavK88haHvygigSmsWhvZCubM0YljtGVPc1f8yy6/Fw41T8RXTg39l48ewci7+0+1Td/Gp1T0/ue6pQ2LaNp6cnvv/+HWtrfLcWnkq12KAczJ2IuOwQz9CzUAeCIB4YnkU4e4ZZLz9hun9XlqVV/uVf/gXwPe3gLadAzsagGiuXSTHyzevXfPX4yCkFfvOQmc/mvutegB1KGLpLMaAxoiHS+rN0U+/lQI+RUl47SdtQ+oMpPICe4xBfc4Z5AhowOZyjB9pD1Z09q4eyH2a0+z7aFSkGJyyGJ+eJ7OUYOqY/zTNvHl9zOZ1Nf0UrMTGlCRSWbWUtG0/L1eLycC8OPY7MAK24cTzi7rzu1XENN47JP+ME/T7o3fRBl03+iSPwObyWceN+fHw207NToz1uwIWqu5tSjKDJrcDN3Td1PEaW1EC7hwmXnQLrdNig0jovOP6pI5J/F4S6I6bDxxWsGJV/Tg5KsKfQhrBTd8fr6zYfygg6LqUSgg4/a60GWHqmUBtKmfHchptGqcVAWAc6dkE4K2U4rRbLOvvJFObnDrV6N1XErT5x+WDgp1Pdu2uqrz6/N7unBHEra7ABrW9yxnc/XIuDqWGfOwMpxvh0V4fFfvSNst/UHdDs1kM/1cExHQDPnYvtAHjuXsv91huZQpjCc7VvDzXrOQokMaWbPeiyusLcgCoAzVJ3f+FxFAJHCwq30nrG1Ng3R0bMv7rviQ4owe5mG2UITFFapqMeEhb0w2P6Xt6V+f093j93L2OO32++IUVgBGZ8YMUe3TemjL2mT8CL8bns8UxOrRUNBtBVGs3l0xC4xz3wgZB+qVFr3QGLyC7f6IqlM3c6YkKO82QAs42g3eAfaGrK04wO8Yfu039gT5sa27NtG2ut3G4r162Y3BygByI9I9VAToyRmBLTKSDRXWspIWIlMMIo5tozaZWyFtZ1Qb3+07quqCo5hQFmLXPMXC4TcAoBzYkyC0i6lzGyQxlbwz0A3SHiYA4+BTl+3qGjhIDLKOl66NOjs537Old6qpS5ozykw6ICAEX0KGP7npfDv/a9jAhRDgUgfd5jjMx5Yp6mESTfWbicc9/1xM3KrdQuIHQ8+ZrcoaQZTL6GObjMG77PdkDfT+YIQj/CMIdrPEqre8Dz4/P5WaCn37SUIud55nI+MU0Tp2m2ANVW0cvF/LilUMuKiHCarUDZlG3BphiNNpOwu7d0Bzl9/3XIoX43+0ebQt0K6+3GertZkbTVUKexN5OBCmnEqpRSqZsJ6RAgTkbnT1PifJ6IKYxpaq0ZS7RaxtZalHUzN8vturFtnjp5sCi0RVQtzTXGSKa5pZncZRUom7IEc6210mloAY0IYTAetSilVm63ja3UF3NvgS3I3APWooWEBrGCYJtCiIVKNYEmHpydAhI8eDXs1GgIXSHoCIrsLq6UvdJrDIN67jW47li9brE4eNxdWgfQwr26OarVD90ADMXoPud2ALx9xqUrxB3Uja8H8ZR0TLE0Y6OCtuHqSjSShy1nLSTdiG5dZhFa28hto4a/ypP81w+/zcMV7UHNwg5mR1p4f0iw1H354DCH+yMYqBeEKub26+mtvR5LBz84AFIHfM1pvuYR6YIpY0s9/+B3RQ7M0PF9pbnRs7NASoi7cozJal1FwdxZKTj7g2sKaGWhCBbjo4BWgkQDQWosdESRmFDp8XjaJdL47yVGa42npydgzwT9cMjdctUxTx0MHavgpxTH6xgi1Wv8hCpICoTUjCHXRmuBSqXoMajcf2lYhAzA0AOlo+yMYg+Uf3p+Nu++u5M66GqtWxcHS08b2spgJHLOQ9fk7Bl6Kdt1xTiy23q6fa+ovQMZ1x+HOBWNCY0NTYkWExLbqDL9cmO3/nbWpYP5UVhgv8WDsriXd00tdb+2htQ67n0M0cFGo3bDSnYQEIIMt3FwN2SMlgg0TzMxRE7TTIqJ0zzz5es3nE8nPx07v5gzpSnrthJuV9Z1RRpoSJRQaHj5AD/lKWamNJnnIAaSM0pmuJq82MrGtq0uh/ZyL6YzuotW8cjIA2LjeMfYdwAdNPzF8dmSOAQhp8TD5czrxwdyzpzOZ0KMTFHIEVotXgF2RYCH84XzPHOaJqacmFIcGU7jLN3wAvYbKLj/d1dU4rehLAvXpyduz8+sz1e2643WipVTP5+pVYmxUYpyW1Zu1xWtlRAzp5NlcMxz4vIwk1KglMq6bLSqbFthWYu5mdbGdWnUBstSWG+mlGMUYvPaFpqBjCCkCDG5Ug12/tWztLRugAkYyzbwlNuQvEZJY1sb21Z5frLCir02zc89uuU058zlfGZKXkfGFdMGyLb4dViWnoRAzLKDnri7AWMTpMc4bSvLYi7OlKzYmAgOqkzgWZXkHpsjfbKN4fIA1+IB4h0c2TLp1oW6FeuLXvcg9r2SJ2O/KGouCr+dwcGOWSOm1Hcx5EcddZbMjYCXYpBWidpIKiRtZG1ErUy6kRogwhR6NWmhtpvFbv3C4w7w+Bw2sNTh5sKmNa9FJXtKu8oQeAYKGe4toSFYUkAMHisnDdFqxeL8WWvZ4+e03bGdnQYUDJjEGEb4zH7uIISDeLAF0BBqdxOI0oMYUxJTjkHIyYuCir03Z8tOSQECdk613NjahkiAutHKbFkprRLrCWtF0RlpQbBgZ5NRDuReyB4ppfD9999/NJefcnXdgZ5eYBJzrffsqnmamObZ1sIxLT8HSAHJyVqV1EYTHRmZpRSLwxF3S+/02Si6JsGUWvRyHP34y7ry7vrMVjaaCkX34qXHUgB9ynOMlhEZhClnTqeThwhEpuxZaJ6dlno9JQ+xGLWG+rn1+4WDIQc8krweUJogNYu5TAmNLxtv1+sTHZkXGczNB7r8UDS1r/q+ZwRFSy/yKqRktZLAQGbwfabaW35YZl1y47yDw5wil9OZy+nCPM18+cWXFsuTM68eHpnz5AV3zXV9Wm6IWCHJGCLL7YY0YxhbzDSpPqe2rh5OF14/vDLGL3RGSQxUq4WZXG/PXG9Xkwmt0jpjrNXBm3uI1Oamw6CjkOjraBi3P3F8ZkzP7tKyuhhp1LeIMUJKtJxpTiHj1ltKyWvV7FRXGELUTtgBPj2dsSu2/rudQuuXpq2N+J2RGeBF9YL3fYpB0Kh31pKAFzbc03ljDNTaF5j7tIu5VnqMTmumkFunZsXaE4gDBe1BlZ39kG5AuIBqUN2yMZccg643QSbDNVad9Snl5QQr7CCkz6VtLrvDQzCKMR4S90DAYfDJ7tKSbp2427HXcoJDpV9sxv0uDHRu/97pzw8ztnZgs5/7eCm7iTGUAbswGSLkKEvY19K9C+uOWPVr8uvzaxPdrTOhB+pBwN1d7p4TZydVmwujXzCQ+QPw0F98aAl3t9buFu50qx6+e/gefd578bLjnuxAoPvqP+Gzd1pvZH8eTvBTRlyfky7Agd2j9cH19vPqGUpBGIVB9+vuzAIuaNVcuaHQJIA2Wi2EUNzwasM42Q2v+7X2UqN4WZDjOMqx+/u6X+deZ8iZNK/pk2q16sr+3SBCC1aIXA+uf/DgZldO+6brG0c4bBiXeYdg+YMro5TCum5Uha11xnY/lrAHuUpOpAAetTsycztLZbE6iRATKcTx97t2Rkcjuv9An3//rIrFN3XQhshet+dFh0mlDyTM/lr2z5gu1/GJ3UW/G3cySJ1DUUFsfTZsLR/DT6R/RsQZfiv+OE0Tl/OZR6+AfT6dmaaJ1qwTQWuWoDBNEwBTtjT2FCItNFKInqIR3EgRpjxxmk87KPWYntp0xCHVZmEixi6K1c9Tter53jdIpQ190aXAvu0O4Mdde4Pt+Qvjs91bUSxdd4rC7MGBl9mi6lsOlCmhzZqQlm1DEF6/es35dOY0nZhiJonVLjmKzEFkyZ3cHX8Dr8ypSnDq3PLGq9dRcYpNdgtoCIIuDIIVaeiyvdTG7bYRY2BdNq7XzRs2VpalWAxPUcrqmV9191ZaG4boGzxhkWY24XmabHFFY4SMurVCWqpC3YrVmqGxsAAbtSq3ZfOGpYVaDhH/LzCOit7SeL2Qovde2lCmZQGnUENM+wY6gl9nSOjZc01prQyXhngxuz0t0hauxYDoyI6r6tWT6VEzezq16t4+wv7dhgA4Zi/0z+7KtG+Zo43gCkH24OyR2fFn7hEwyrIEIIfAnBJTTpymmfN8chdZzxfpytF+K1GBj5XYy40OUmS8HoJfdbe+xFxIx4DwnVFw9/Oop3S8I7srqjM421a43RZqrSzLwrKu5h5xprKDkeOcqu5r8CPww8HmOVL96pW5VXf3Y5/DsKetDyjWrLWFubYC6ixGCma4mJJWYENCgiA0CqElJCoSKkpE2owcyksM2uIXGvKJ9el/gcNe6W6QHksJwWOvzGDr8M2q9rrV1bqxYrVWLMlUPZnB65+puRVHWlyPLA3h4EozNjGIIJu552/ram1ktrpnXbIbVj3oPAYBsdYkp/nEq8dXXk/pxMPjxZjKPFnZAQk8psQlJXKAnGWP92v4mrBA/N4kWNwopyotJlqM1JjYYmSLL+neOsqd/uqA3O+EjNwZZRze3l97kSHESVTbs6lXe4VhXAQx92Dypr8xGVM2zye++fobvnj9hsv5wm9//Tu+eP16ZG/lnA+gp/L8/Mz5T//CbbnxcLkY27PcWMvGst4spT0ke0jki9df8PVXX5FTHvFdYHW6tlKotfHu7Q+8ffeOWgvbtrCtK00bt+3GutnrdVusnRHWaeFDd3IPe4HP05OfDXqSGCt6SsI5C6eceHWeySnfKZ8ehIrAV2++4uHyyDxNzHn6APTsgMf37+Gq7n9f1YSN1DYodGoh0ojS17zlFJk8DL4/g1Ogyft3dUFdvYqysC4bz9eFWipbqWxL9YrJ4rUkQAkENWURJBPEMgbUq15JCEyjoaoFgsUotFZZnt+zFrMwy7ZSNlM0SzGh0JqyOrukzip9UB/wZx+mxI0ZCymRU2I+nYghsqGcbjfPYDEXnIgQvChfBw3J6eRK8zT35qCn+rlHr/8gwzrTpmy1UTEfb1Gl6l4eoEfedOBzx/xwqP2ju3V6DPQEPmBtxpvj5QDCMMBYH51ckrv/cCZHmGLklCfmnHg4nXk8X4wlaN5ygd5U0WJdRAuiLytY7y7zeLkOKPp8KZ55mRLSGhJkFCE8xm/0+erW4VEMd2YSfD6atZ15fn626rrXG8u6OmvZCxzuAOXI5nV2oh93D5j+dGG+DszU9VlwJRyijHV5wFC02tgciLZmlc5FlBaUFp1p0BVVM2JUC6HeiDETUoNQDPAXt7pVB7i/ox5fYPx5oPPRJ+nARz1urTahek2x2qrPsQ4RqyLQgjUSbntsVxMBj6XbNlNSW6uUpqZrDVV6aQlBQiQewIhVhw7IbWHZCtfrwlYK19u6u+r9umK0vl8hBov7CxeiN0X98s0bpmniy6/e8NXXb0gpMU0npnk2kLutyFbcrbzQdLELCyZIRHqWW3T/bEaSIA1azpRcLKQhBdb00qDn3sDfQeP9252Vup/X3XiHne1xOsgqewZI7vbruKnHK1pMjbFc03wi54nLwyN/85u/4Zuvf83j4yN/97d/z1dffkVMidPpNGrzlLLSWuPd27c8PDxyfX7i/dN7Hs4XluXGVjaWdaG1SooTOZ8IMfH1V1/zm9/8xoLaU3bQY73ZbrcbpRS+/fZbvv32T5RSeH56z+36RKmF99f3PN1MjjxJsIbRHueJs+UfMf4Cw731E7bMX5W91dNBrVCbpYTmFOlR8QpeZM8UQIppxFnci0+ll9vebchPRLWP2I+u+ZpnY/VMkQ+vdbeyD5wG3V0zWAKgFduI1ljUmn/2zClz0wjavCWEUQjcrdKDVd0p1E7HdpTdamULyalIj2+oXrm47l3crR6NC6FfwJCU+/+Z8vCqusHjAz4MEhxWyzBK3PWgDkoPLpNPCe2usPY52DMbOrOlftR+nE5bfpjeqPcH3hV3X2fOefY1dWR75PiZj85xvz/7TMtge4RjrEwkhuhWc6V2VwO7URxkD6D+pccOgIaT7sDm+F8/AIwfff9w73fAcqib5GUjxsOV7IeARZCP7ve962w/rw7EPj6h7k71sgM+SeLreD/eDsrU8+FbAyuVgZVGaIqG7lo2pqhpgRYdDHq9IRGsYaal1nd30EuOT137j7nURiyauvzve0v3Oetur0GxDSaNjw0Id2u0MdeDQBgyQ32hD3nhtW+kAyPd+/BVXyN+ceOHgwZLathpPW8bFB3oZOb5RM6JaT4xz2e/Rpvj0AJSN6hdpticm0g6gIwD098pQQ2BFq1560uOj4HM4byOb+MA6SCT+7+7G2tIx86cezXl5ADSZNMB9KRAcqN/nmayJx5dzmcuFzPQHx4eeXh49EayFu/aWqNs2d1QhfPpbBX2a+VyPhNDoJSNlCKtNVKamfKJGBOvHl/x6tVrBz2WcQd4MdBMKcXY4NvNq4BXqx1WC2vZWIvF2kX3QBiINZcdh7swXu+C3TMTf3x8HtMTLBOrd79VbaQUefX4yOl0tgXuwVTrtlnXXVVKXXl6ese6JLQVTx3d6/wMATWaQPmkjwq5Cq2xLUaDLbcnnp/fc72+Z1muNG2EGGlUWrENViqsBbbCaDIYPKBq24zNad4Z3errFG631YBHbZS1OVUc/TaJMwxejM+ZHwlWiTpN84iKTx6o/XC5cD7NtFpJQbjGYJlmy0KpV8/YcreO3gOdg0x6kSF4Rhbi96u6QDMrf9s2At7B2YMBu3ZRp5GbNFrpQtNcCE2GjAF6fFRBRCi1EYLR3Ld1ZS0eLL41tupg0OOdmrsxeiPCOxYnGIM36vGopyJH68Nm2Wb2mdJrNuFpu6FX8N1dWh/Jnk8o0DD4PEtTt4cQQyLFCW2VSjEBLmZh9vL/OadB8f67HBK88/W44F4DqRBawyoVZ0IwKz/G1QBvKYhXYCqljAayy3Jl3Raz1p6fuN2uzo7uWT/3cSiYgXJwm32SofsAlB2VRiTC5IxP2NmdlLy/nvSK7bsxNZR5s/UZZFeahoasKKE2oZWKqjVNvklgKxshZPJWCHEeRTMtu/CXLzj5qXEP/vf3DNT5c+vB38Y+Bwm2fr1vlpjlYQZJ6y1kTBFtpbAprBWW5gkMIRMIIFajKkSr/tyZoJAcpNQKcWOzeAJ6eQOwqtEWE2rGorUnqSNNPoRwqFPW95R3Xk+TpWm3QtAVqjnuWjOuP6gQEl6vRiEnepCXXM62dnIiNaXN8y8wS70FhAtIf31kJQf7LHisaTqAGw9GjmHcr4fLmYfzhRjDyI42160BnX1fGIg8Xx6Y5pnL5YG//ds/8NXX33A+nfnmm294fHzcmbo+F9lCVUSV7csbl9OJVw8PvLpcPMjd9YYqMU3kbMkAr1+/5s2XX5L8fDqrVTab11orl8uFL754QykbT+/e8fz0nm1b+dN3f+L7H75nLRspZeJTotbK83JFyup4ope+Oax9R+ODm/iR8VmSOEjgfDpZry0HIlNKfPHqNY+Pj5RWTZFp43q9UttmRYzKyrt3P1hhrFasnkky5LkLP9kpKkbY4rii1irLYk3plusT75/e8v79W7b1Pa1VQoyoBhdKja1Y5tXaa+KIFdBrrbKsZSjjXmG5lMq2VrdadcTUxDB58HWwHjW+B6NaHksIkdP5wsPl4v5oc/XllHj96hVfvH5NddCTQmC53Xj39i2l9ErEatTx8Hfb9e//f6EhFqsjWM+cDWe7qgGUrVQCQg7JUk679WZoz9wabtkLmNvRk3D8o4C5OdetDIYHscKDt3Vl3QqtWVmAzV153aWnqsPVd7cUjpVVPdYHj1OZvFprt3bsHopZmEPZxt0aGp/RnWH8BBgS4WPA4669FBI5zDQKm65oc1eSRGLI7kPP5JxfcDJ/bOxsi923OLLWwN0/Xp3ZQM9GjG0EiYLfZ1+j27ZRvBP6sj6zrjd3aV253W4O3Hdrth+nu4WaqrvOLBtqNBntbjDprjgvc/khWyQ9TVetgb0H1o+GwVhBwtgLonns2IgfwmpR9ZJ+qmLVCD0Ts1E97mdjqwUJyQpP5pUQJ8sqrMHazrxQ4dA/Nz4Eh39uHF2B6sxUa1YkUqKOGJoYAknsMUAPzux4D8OtFtZSWJtyq8qtQdJorYecNQkpE3O+Y/JCykynEydViCurKtIB82YGc4iRPOXh9q6tEVqjlI1lXRGxwn5G3NieCtHYjJg9fKEmYr1BCX7eitaKREEmB/hB0By8wmhEWiNMmbBu1nJhecnCoc4NS4+TcxDQQc9hvmL0LFkRcp6Y55neMLhnO09TdgYl8uUXb/jyiy+GsT15fZ05Z6YYR4PZ6A26Hx5fMZ9PnM8Xfvvbv+HNl1+SU+Z8fhiByt3yPhYhzTEi2tiW1Zmfjd7CqAPJEDMpW5r6fJo5XS6+v13/OuPaDdXr9RtutyulFJ7evuXp/TuWdeHx3/4/XE4XlmUhSiQQ2EqhBzHXngFW2104DXAXB/xj4/OYHmHQmEfF3GuANEygaTOWxgoSFkrBirmFwDbPbGUF8sgqEBGqyqjj0cOFgRFk1/xml22zjr0eZNWGBdMLT+1esF7gru2cLHoQ4q23OKhtVGzu2VnN72hTs/CHq6Xr4PFvF8pB9iwl6Uqmp+vpCBSWoUh2OpZxJ52n660w9GWBTz9XuvNFewNRuO+BhNPFTr4delT105d+0w/LsLutvCLaoMY7izN6Lxl+tu/4IcYzn1jMRxagr8NBsbvSPF6jw5r+elx3twr00F/syDLI/ltH996BoB7XeXQj7Ip/Bxs/oqNedrgl2d1aPR7AztlutCn5MLIgu3vJ3BFC7y+nTYe1ZvR3GcxP769kI9zfx34ihzW+g6q/cPqHGzdeC3TXloRD0kL/mf43/3Drbx89OwrDLOxNjX2x2X/BFqUWz8TbCGp1ZrYSRsHMlxp/KZ5nBzY/fpzhkjrKqg/W8R61tn/HX+1ytD+a0lo4pLD3ud670wseJxeN4YytmX5wwy60iqrHEnqGT8/62o/Q3d2uhD++A/QCjMcdOa7XP6N9UfQg+l4osTWCQpinF2042s8DGHphvNfn4SDL+p6JoxRAb1ZtRsI8z8wn6xn3+PDA48OjG1bTaPw9p8R0qNEUPDv38vjI6XzmdDpxuVw4n86jfVSM0fVs2zeJn1/wc9HUiGrB58a0Bqtz1IOkk/VIy1NmyhMShFotlkxVITLWockKr+S+bdRaCCFwOp05zSfAkoKmZGDMjJjgXXI6PfaJxf+zu7ckcDqdmNyFE6IFBW/uylrLxlIWSq18/8N3/L9//CeWdfX4HxN033z1Fet6JU+Z83zmdLpgrg+lFKfEZe/lIr6Ka608vXvH9fpMWa0/y94bSlCJNCJFI5tGNnfZmPtIXEj1+Jze60pBjBOPKRKj3WDrn9VdK+KZVh1E+T3XRmiWVtdqoW4rtEiLCW2JVoXb7Zn372RYwuu6sG2bydYYCE0sBXrUX7KNbgXb3HL6CYrhrxm2ISxtcZ5nUorWJXmzLsmt6t7NfAAX9h6cYkK/dYbEXV6qXl/HFcxWy6hE2psaqipbr7YMlBaorYNcP8Gj8DqkxVtQeu8/Y/PTrZFpyp5BtYOhzk6Ap++G0GWgV+bFaHNXpGnUkPrAivBjWoFFO9HWCrfblfdRaLVyuz1TtsUaMaY46pnU2ojp5RTkR+MOqO2pxNZ+wP7eQX+rhbqqv8eo15PiSoxXEFu/rTZn36qVbtBGqSu17kxpzyfvaap9DruF1124IlaLJedEKRb8XaoHKPslmGzbU8U7K9erp4NYTQ/P6jDMYhNq5UBF2nkAACAASURBVBWSrdFW3LXj2V7NFGEVoSCjV6GRwer+LzsDlYpKAypIQ7k6M2lV2Gt9uYy8AQAGC2C1aczlU4cLsdbdxSD7x02BuSzpAHbE9mCulNRTl72SfvLaRBX1YpG2xxqNqr1dTBtVli2AeSLPJ6bTqaMrVCGfKpeHB0LO5G0l5IlSi8U3OlMw58z5dCIG6/P0cJpJMfLwcDE9kydiiKNcSCmWWRsQaM1rxbghptjcjS7uVlJEY3fLe7JJtpCMUK3kSXr1YGzSC499L/ZHclCxG1NWSNDcjXM+8frhFSln3rz+gi89G+rh8YHHV4+klHj1+Mirh1cGauLOBuVR/LcbXeZKPF/O5HliyhMPF6uxF8Sy+8q2jedWq8+j7T0r/FuH0dAfIUTiZC6tmBJpmumu62Y1WgZDbGtyvx/G8k2EWpnmmbIVYkh8+eXXxJhYlpuZHkG4LQvLtnJbblQMHyCyVxbf7/JPmovPc28FYT7Ng14LIaD0YnSJpSxc1yulFr77/k/84z/931yvV0/xNGvy+fk3NN2YppnL6cL5/ICIsK7mXlIYzdCG9aGW4nx7fmK5XWl1ZV2eaVos2A6hSaSJg54WKQ17VNuE1YORmwqq5gqzYcHWwat8ilihwuK1edalsDj9vltNGOBRiwdqpVC2FW2RljLkTAOW6xX1jKbr8zPLsrKumwmjEK2PT7e+cPtbIUpgTpba/6lU6p9jGIWaHfRYoOCy3LjergbMFFrtVEtjZBsEhoXcg1ltN3gKpVq7jtosBmrdXMi50Kw9Ar+zDQjN8u/s+D3Boz9s545U+RA8g8xBTzfRc/Jmt3cWown8GBfzO4c9luTo3upUvwSrQzFNebAdo1KoM44WD6FApTW43p7pLRjW9UbZVutFN02WJhvCYCR/8XEAPjZvPd2VQTtvpbAWE3K1KLUw+uZ0IXJUsupgCdRcQXpAqXg2YLAUZm3mf+/3cbiZfO2d5oktBKoX7bS1oAP3djpOpHNTBnrMisUqpXvvr85ImI1grjqwbM7mrI7YCSIq1irEQY8qBMvuJjSzg8b1YYbOVpdxz5a1uZH2MqCng5IjozlNE4+Ppuy2beO2WHkA6QUEj8xsB4GDnXHWznxdozBn74uURtPo0DkWlIYEa0xa1WI1t1ZZSyWkhARzcSUvfDjNp8EUKDCpcnl8Rdo2Si1M52Wso16tPafEKecBei7zTIyBh4cHTuezZ3ZFZwssDjOsxfe3EhSkqbW16G7xsRoN9OAVtS3gy3RKuvja7kznCzJ2wAF8dMATvet92N/3dR6xz56mmdePr5jnmd///m/5h3/4A6fTiTdfvuHLr74kpcRpPnM6nT9gQ/Hq2P1G2JxLCExTNoMsBKZsXRR6TbXajLFdbzfqodktuLfE189977tInuYRrJyn2dZMLdRekf9gxeh4Vmf3hFgrZT5Rt0ZOVurl8fGR2+1q61uVp+dn3r5/y/v37+heE3HjpVvhP9W1BX9ll/VjxkbZCstiAY9rWVm2hVL36OzbzUCPVqPNn69Xnq/XEWeBI91lrayLKcQoRqENe91Bz3K9si43tJmbq5ZKrcUK+VULCq6N8ejWrM/7yEBwDLa7IT5A4T0mpXdc/5E7Me7Hx4/mNQh6rZDiLgAP8hTfdEdXTTAh3FOo790DLzOOx79bN3r/euBpt6zste7AQzuQuc8a6Yqi11eprVOndI7Iwc+nzg0GlTlcTrv7cMf5PdjvUEfIj9Gzz0I41qBhPPd7MMBQ6AXP7LfvjBTl4KrSwXyYEG/DxdOaMRC1VkINlFoJL6Qg//LoMXP7O6YQGUJJPZajl5kIh9i6HotTRz81vV8P+6+M3+qPzgreCT//7N4ItYPQXmulT8r+aeiVkXuw5/6dneTue0UO53K84H7d4ltvdzGLn6M0BigChjtmuMk7O9Zetj3M8cqBcT9H49hDrazdRbIbCnYPtDenHopd7463FycN+x2DkRXX72Gvamy4IRFIOZMnC1yd8jSMp2OZgdwakyvBUK3TevW9UYoB4pwis4OeeZqYZ+vbNc3W3qgXv5WwBwE7MWfzh51rT5/vcVrN2RKr6t5GSEHv/I0ncHx4n19qDvu9vGdg+xr2jDcRT4ywPdEL/J1OJy7nCw8PD5zPZx4fHnl8ePTMtpl5Pu1A1Sf4aNBZTFtzltvLhzjg6wHuo8BvtUbEtfbO6X4M7fd5Z6xw5tSyfeP9mqQzPB5s3GWoyxqA3uD2TkeDxxVaY9mc0oiPvStCCbv+ZN+v9iN/eU4+C/TU2nj/7j0gnE5nnp+eefvDW7777ntSTN6zpVC18qfv/oV/+uM/cVsWtFZasdidsm0st6tbeidO8xkQY1QW670ichBdvilVlVY6zVZAn5F2Yysr799/y+32nlI3rtcb67ZRa2O51VFF2WrheDxJ2d0sTSNmBkeUXmwQk4RBzQcc1alvR0+HxWBsT0V7t+ltpTjNuFyffVKbuf88cn3dvHKIWMYJ3gsnB/Ph0hqy9YaOLytcrZjcOkBZCIkpB1N0xTeDqmW14IClkz7h0LNqcFUm3NZq8SFr8Q7pqt56whVwV4pdaPVjHNbvoH3Fg1QdkMYow6IXzBVl3ZgP5fA9kNVqyEx39D5+zOgUcOpMj3gH7lF1uoMFMde1CylEqc0Ld7XC9da1u5mclYY8PVmvmhh5vl6tEe8vPHYlyACd2vYYtlJ7DJvPW4Wy1X0P3IEbYwF65ghdQQp3jJvdK7dcaSNuDva6P7EHVwZBY2CeEjEYo1AOxkk/+yBYCwmxuiM5mTuyVWdCVUlhZ2pjTIi3qNZm1wWY8ozRFSOWUYTJBty9FjQQ6r4Odx42EoKxmyFY9e2XGooxVIJfX59IdWZYrYGtqXd1BSejOngvkEl3ealaTSYRIjB5DbEpwhwaU7A+Y4paFptEqzafhDydmS+NKUQezo+0PHM6nfjmq695fHgkp8zj5cI8QI+7r0vh9XIbTN/WypC5vbRFb40SRDhNmfNsGbCPlwuvHx/t72mPZwlxIqTJFHCrBqIEWpio6QzaKGw0sVi059uKbo2QEufLA3nq6X0me0UsrEGC/JmZ+HmGyJ7O31P6LTPLYmUmDwA+n0+8enwg58Svf/Urfv83v+d0PvG73/2Ov/27v2eeZy6XCw8PljQTvZgs6Iiv2w1x7H6sK9XfX2TXqZaltcfxdANuXW5Uz7bt+zwmY3RyiCO2S1HSNFt7E9d3qkZubOvKcr3u3hE3emotQw5bxwNzmz8/37hdF18TXntLLSv6fD5RtRhLlQIqkVB2mP4RU/dzg57WKu/fv6c1JcXM+/l932r2gQiaTDH+8O4H/vnbf2bdVuvpUgqC8Pz8xNu3P7iPemaeLF1wuRXW28YIrvQNGz1wWhBiX6+ipLCSQqXUjfdPP7AsV2qzmJReJK+sXchbvFBrnh7uJJMclKPF9uy1hqzIlSIxYtna5uPfs1S7ksPRcrFeJGWjbEYbXm83S9v3DKYyFC/jd4LHmMQQOE9WDqBthbU+U8vLu0Rqbc7U9QC6hEShivVnEkBLoVWPjRAskFm8+GLuBRqxLC81t+JWjNXZSmP1OkS9xdZBLPuQTut8ZN2K4KnJYVj50avvHoOEc4rktJc9T57uWasVzuugrpRiyiBYM0oRi22I3majF5Q0ZSGdjBs1Iyw+BmozkFuWQnMWczS29Fiz2y3S42lemrH7s6Mrb/UGrrUOZrRWY0TN1WuNdIuH/Q7lJO67d/rTlFD26+qWXRec3bLbubvWrOFvOCgwK6RmXc9JgalFYjS3KNXqunRWBrwCfBIHP4GcTB6Uza1Wep2SaWdCCLvbyygklABiIq968XyaN2D1vjBRdTB9veHp/lrculWCVl5ySp1jci7DmRsHl+YS96rUHfSoJaGl4IxNY8jQcAA9CasonkNgCjAFZRJzeanTKOpxjhKFPJ85VUjzicdf/5b59RtO84mvv/yah8uDuUq8zVAHPTjAXss23F2tw2dh7IVRTwaYp4nzyd1b5wuPD1bgdVvLcLeb8yf4tBVUCqgQ4kzw9VqIxlZoY7ltbPVKniY0zVzy7Bdo2VNBAiRrOP1iw5kR6XpMAlEiyeu2TXnifDqTYuLNmy/41a++4XSa+d3vfscf/uEfOJ/P/OrXv+Z3v/2tuc1z2jNBO3vpbotae8PWvvcsM3O73QzUePxcD5S+SyQQaLWyrusIKo7BXWGnE+fzA3maRgFZgOiZZCHGIVsswWHldrt59m3bgfC2GR5ojXUpI1v6et1YFssgvZwnzqdsoCdlzueZ2jbmnLwWoIcpuOHSmefPGZ8FesaJr6sFGrkm026ZRSCbBXi73Sy9tVjwmnqw2LqsXNOVGKMXA6yIwrpsrIv75w/VTgfoEUjS66soORVqtBTrdbNHbdYnq7hF22qPQbC+H23E99jhu+K0azts+k676U679ZUxXFEHKm8IJH+II+FezGm4PDwbxqRlGC9F9gy4GKPHP/0ySnKPpO81Vaxi7YcuiXtE7S6OwHhPtWd5+DF/5B4eXx9gzniS/izdnXQsBrgHsxoYMZXQe6j19Nce1Ld3mLbDN69HdDyuhKPb6gi89ufeb+7oCtPu4mrWib41X0+eBdVdDyM17d/xuMsu6/Pp5N1wNbrQ7MHB9vfdvSKy39sQZNRBuYt/8meRPag5HuqL9EBk/P9BjO1rHlMzild25Rj6/PfXvdK6+LGCZ6js/ZiCmBKOMRGTuRZj9CyVTszh5+wJC06m0DPcOnMFh7XgC/RF96ffnnHP+6OLjvHTxrTZfVWSF4oVQGu1Arbqe2RY+c6cCsNctUvxfbwfGfCA5ZiIMY806pwtSwh3Vxib42trsAYWeFxbA7mP0wqeQeCrBBG8eKEB7uZGoRK8REAbRKJ2t90IZLa5Q7zSvkSqNOv3Vc3gItRR9b5HPluX8P0cXnIMt0zXGcOg2zN8U4xMOXM+nzmfT1zOZ399Zp6s4nVf30KfNN2XqMtNC7f0vx3WaJdVTff+gQzZfXRv7/GHQQ4y3/d6Zw4HE6x7zE8txuaVrbCt2ziWtaPxzurrRtPGtla2pdCasq0b67oRglCydUCoPQGmyx6v6dfUDVCxrGiV+7ihnzKTnwV6Sql8++23TNMTz89XUsrszLagA/TAUhZum/XlMHrFKx9v73l6urorIZGSBxyWRiuefu6+xk7F9Q0b+2YVmCfIyTbXdXGXVmusq9UGss8b/duaZWSZgtLh6opRSNno1aiM5p/bVliXXkbfmSG/2Tlld791K8uDa6VbWkJOVp8gjtggBghAuvBNo/WNgbhk6YTTxHZbqM8LTbYX3Y6C+ALcRr2b7vs3kWOrqXaU2C1nXHh0wwLxGkYmsG7LxjpqIXkcDyCx+9YH0jOF6kF9dqSdgU4xjoyrOSdvjGrukJTCUKp9LVlxLI/hsWhUYjS3V62N2/WZp/fvqbW5m2TvC9ZdWka82VoVPxlLFbU6GEd/fI/nqVosQL6q96ExYBd7AO8LukL+8jCJaG7dXjW5jKrJ0IVZBz0+V53JYc/ys5TSE/M8IRLIU/T925mhw6+Kdeme5/fcbovdW2fTYgxYcpcpxNAzTYIi4k192ZVRijJcWjH0eD+YT0pzoXe+WNyDMbcy1tO2FcpmoCclLxKp6plnxd2fC9u2juvvBpC5s/Y93O9nr2/1UsBHRIjZ4lnm+WTVdOMeqN9ZZdHGadrfP02Zh9NMCLBeb+ZicDdzqyarTtEYnhwgByVJo0e4NKN5KEQKgSqBPF+4hJk0n7icHznPD6gq3337A3+q33lmVaH30qu1epFBk6HVmR71khXmpooHQ8b20jRNnOcTKUW+/vprfv2rX5NS5Pp84/np6ixioHfynlIkp+B1aYQpziBKjZFKopSNH5b3PD09Mc+FfHogJo8ximI1nhpoqC/eRm3EinqdsF53J0pkniYezxemPPHN11/z93/3tzw+PPDNr37Fb377G+Z55ny+ACaHWzUXHkPvBHcvN9zzPlil1hp1mgYYDWJrB9QZTq9/d7OWEgwpADElcrb2RL3Rq6QIdc8ErM4MAaOgZK2Vp/fv+eH7t5SycbvduLqrq/MCClTvfKANIy3cpXZ9skQS1cqyPFlyUxUeT6/51ZvKbVtRfgCxVhW3ZaHoNowYfoKs/cyYnsK3335HSom3b9+5T/BQ4yIK6qDHKrgxEGgPfn0uV+pWTKjIHhNin5ExIbsVul9EpGcBC+c5WuyJKotX9K21cVsLW7FigKdsBeQsGNNYn9qs7UNtaj5CTKk1b2IpQVjWjdti1ZnF432MQg+kaTI6uVXQDq46GLPsnhQDrfd+Ybd+TSF6bYo8GaUoigoW43Q+c54nIsItRV4W8tjo1airC8dtM7YtxcCUrZ1G72BvX9iVeFWzqBBhq+bWak1Z1sK6lt2l0o0F2C2dYBTzCNDsoMcBZXDw2At2neZMTqZkL+fJlKCnSvYYnujR/zuD5ML0bCXU38VgMVelWMaWW8UhmOsUX1+oB852ayxa5sP5fPLz7+B44/lqlXnFFX+/niPoac48/mJj0JaHt9pe7bZbYKoNq3K7FyE8gp4eYGlBk5ODHovpCCEwnzLzlA/0Q1+vJuSLV1Wd5yugNK1A83s7uCS3/K0ZqjWk1Tvr23o0xbtgczBLvU/cw+OFV69fGQhmr47ZPG7JficON8y63tg2a4oqsl/70dJ1ss7vHyODfrjEX2p/CsQpGbNysSymLIEpJKII5f8j7s26I7mObM3PzuDuEQEgM0lKVFXdtfrh3v//k7pvr65bkigyBwAxuPsZ+sHsHPdIqiSRi5BcApHIREx+JrNt2/b2zkpdhTEGPjxp+/Hj8cCHRy0LXZ5fePnyhZwy87xyu2kJe3DC4NRDMUjFS1FjTmr3vUs4VjxZIAyBQ4QwjBymE4fpwO068+XTJ15ezl2Nu2XwKanZcFoTsxnQVsysFLrvW0MJFT11uk6niRAC/36eqQRCjJxfz7w8n1FxRVNTd47jceJwmAjecTodOUSVGyl4imSWLHyZE19ezhzWzMPDwjistk+of6ALgHe85crUtGELeJrYYHC6f43DwPFwZBpHvvnwDf/j3/+Dp6dH3n/zgd/97nfGU9OETKVEShfFDHEDDahVu0oFpQI41SIKMSraXkx/LSn3Rs9gHa/L9cJ8u1kJerDOKu08dd7jgkeC127UhuyUQk5qKq7ojsrW5Jx5fn7m08dPLOvK68szX758sURTCcrKycK6fSupZJVCEEtMTDLEmf10zcJpfMC9C1yXmduqMidrWo0bpMkTddtT/tb1C8tbTfOhRXv2921Gi1Cz4aYdzsOGvf1u7botCqkZhN3hZH2hdkjss2TZ/ScXsQ6txhRv78++7LlVVn07B74utbSNy4AOK62x+6UdNOladC00YnPLiPeX2L+3tlAtp+xFvJoy53Ze3JdWtrLDW19bZruREEsjCBoS8DNV5P7YhvIZV8pKiR3qtjHern3HyQbz3ltCKIthK2dZ4GMBzxCDabzsuDtOaLYPiNyNdSun1OK6zEL3FZMdUdrG0N2NxS4g3yE8G7djKz20DOa+DLi7Uf+Kq97/UP/aXwNtXPbvu2u69HHZyrkbZ6dQTfJBxO+QD7uXTlG2WgZtHS/qYaVHa+MAmXdbDzp3AY9sY+LkfgwQIUho0Jx1E0W1IqnbexArZWhw602vqOCLp5SGBmrm3ZoO7kd+u0fs/1bebn22g71zpu6hJrt0rLTzScXgjtPI6ai6Ny5nyImcEkNcFaEGs8aZCE4Yg2PwVoIMpryOlmlzrRRDYT2aVCjRdWHuX2pBsizaTVuK2la0btVlXcgpb+VKwHuz97Fuy+w3xMyJalrdbjeutxvRmiyaGGaTxWilbVBLDJwSv/Xm6UHZVHxbe32zL3AV0ycQkELJ+v0tr32X3V+bNO0M2CNfVG0ycSLbvgLKjy3bSm73oXcFg3VjalRRt016ey+1obhutx82FKqVh2UrfdZqXWDZVPiVspJa0FNKp7zknHvX9rqszLeZ5aa8VkI1YEOscwwDLdRvCzaagojmM87pfdDEPCtP6KuD3N4irQDx965f2LJeqdUkobssna7+2r/bb9ZC6WKDmxcSljlSN3E4LCjq88FVKAbbmRKs/taW/5VcSVJartgRCWwzLeYyLA1SaxuhdQAh9W7AoXWZaJDU2vtUuE4DlTFGpmHQAxPdoMW+NzSnqSg7cQzDSDWbh8qtIxxxCPgYqFRyzWQbsZIT6yom2PaPQXW/+mrBTd0FKFaLb8hOseh7c1PXryai2FpCK0pcXqxsmFLeSfTXvti9NOK4dHPWTqqTJhpoBHbvOEyK7gwx8PhwNAn2yOPpoOqjYvCruE6aLlWJs/OiNWHV7xkRgbTOXMbYD9F2kLaOoLZ6Ople3/79l4jpUqjuxzBOjCnvYP7SDy3vPY1M+i/R6dldFbodwV1QZvcA9HvZJRvFkMloWTSYj91acVlLFtkCy2GYCH5AN7QAVbPpd0+PiDwpjL7eyFnV1Of5onw+e93GQ/AOJepastTeX+OBOO/wQbu0wjgQp0kz5nFgnCZUWbrx5+qWZLEDpKoKkcYh6KFcK6DGwLM4BNMpafeFVi6THn/0hoc3uESEcYjWhbYFoEW2JKQlBqfDxPfffcPxcODb90/84bsPxBBYbzeW283Q28yyaIkvhsBgJfpAxdsOWnJLJCu3VLguieoD7njgOB5YS+EvP37iuv7Auq68PGvZsmX8vQPIDt+0JuZFg579+ZBLIaBBjzjBJb2Pigwtxv1Skm+MoYukllqZbzPzbQFqT4LECUMMhKiB6/E0MU0jToQYRz787g94J2QcL5eLBolhUSFGJyw37Qx9y6vN74pNP5Q8j4EIauxqPmfLyjIvPH/+wjLPNAuX9thmciwiTOPAOA2WOG6xcREtFZVaWZeFtK49CxQwhG1ERMhR1+w4HrbSs32PQfAUJK8s51fSLbCsC9fLtQc8yzyTS+Z2vfJ6PrOuiecvz3z8+JG0rMzXmdvlBrUyHo7Uw0nHOyUtPefMxy/PfHp5RgUxdaPV8fMEI5lnW8drTlzOZ9Z50cagfbvnjgv8t65frNOj5L9sYKixPloGSO3aHNkGsrGtZddt4004TIPU2je+ziWv1ZRQ9fUKZbNksMBi8/TQ00us3bwJsFXEnMz1lXTj0t8XZ1C6wXjiDPIz7ko1CJtaew1WRDrKoK622vUDlZxWFXTq0Bf2+7FnSKltvs4Roh6ardbaxd5yJotQ/hlBD9wFPKDBXml2HBTr5NHOKG+Cfe1u6uNL/1xLyswp23PsymE9aXcG7W4lrdbVFJzv2VtzJA/BGV9BOQuPDwemcWQcI0+PD4xD7CRLEWFZM5frrO3HdWFGtXN88BwOyvW5XUdiDFCVL9bMN2MMDFHHuHEg9mR2ui9cI00r9I44RRjMi6qXsaRtLH6XlfyL0J529bFu86olEi1r3pCevd7UprehA6kqxEnLU26lkKxcmwmu6vozAmRwgYeHI9N4IOXE9frKmhaW+UZKN5p0kZaBbVOvG7LW/r63CYhxekLAOc/x9MDj+w8ahHotWwBmV7P0z9totNX2I2pV9fWi3k5N1iJb8Foa2mvXhnI15OUO8/nNrzYnO3rWUPG6JVft9w7TyLcf3vH0cOL7b7/hf/zb77R9PGdq1k7FlCqr7W171Y2SW+aeeX25cr7MlJRZcuayrLgoHMPAdDxRrjc+ff6BHz5+7IhPUzJu92c/b7RTZ+1BT4s4fUtgXWvkuL+HLcGstRqXbCAG5aW8vrzy8vLak9s2SK0hJcTAt99+4N2Tekx9/4ff8/ThHZRCvlx4vd4ITihhZWiyFK523dW3u+5hQSUUFyjQnOybNc9qJOB1WXh5VjG+hvSAilSOVmpeDxNp1b1tsFKX3s7UXydZ56omYlt5OMYB79XMU3CMowb6XvaaWLby8sp6U3RsXhbO57PZ0Whres6J8/nC5+dn1mXl+csXPv70SVGgJVNmQ9YyONFz5LYs3BaVcfnzDz/wx7/8aKVl6zBzwjQ1oVjVhvLm/j6vC8l4iTU3rlALev7+aPwKccK2K7EN5FffuyiZbTDbZitUcRRRBMESOi3/1LrptZT7DpNqLaf2pF0kSQmjBh2KWGC1B/F7ane3WTVR4U5Cds4OaWnJvD7OeDmbUGB73vZWShc224SWNv+iluW3pLDB1XcllPYz9CCob9b1HxvEX3NVuNuk/tpL1a/+rLwVeuah88xKW7aZ7t3i2+MbEf3++nlHCR2Bad4zzSFY/9x//rpEZWVHbxosvomaSdkUmr/aXHVcttfvnQl3v1u5DwAa18PIoS3jdpsXzb58JzbJdQN522zy7rN9tR6/7hhp134O2KfdEJbdczXUSh2eVaTOuUoMmRBFResChGD3yXSwxG0QmYjxOao3Iut20vRDfMOO2cT1Wm68/ex9MM8mDX6a+nI2QZ4uAkq1ji/rMKsb4boa12xfYuv35quF0OZzX7MNgfq1A/QPXC3cKznTBANz3x43or0mYL7z3aI1h1Q2ATjnMqLZnyZwZh9UnCM7Dy5T3axNHgDeq6J4iOBES10N+RRFvbwZf+5Ljq07qO3b3qt0R4cgoPttbXPy/lO3dZJLQXLG+60ZoO3NuqzayYG9ZsEV5dnNy4I4p4FXysgugSylkpa5P9Y544O81SX390iTbkOCRdGpfYdpKxu1hoPWzdzkINIwkMcR5x1QcK0RoEaEeL/G7b6IrT8vW/nKixKpxUHxwUAFLTvvu5Ebql+bwWfOpuxbrNyVugJzMlSppNS761oSA3abixqVavlTA+M1Jdam+p4TpSScE9YVWkWmSu0BYjK9n1KznZONv7LtJX/r+sVBjziNrJUBr3XBKg3xYfuEdsMbiTjZhrSTFWJ7SON1tB7C3UacNxJoEwQLhgiC3QAAIABJREFU3nGYBqZDxO7jph3givUeiHUFNTLqLmiR9jzNr0ko2VGco3lM9bxe6JmAl0rNC6UISYRimWrOiZy0/jrfbrhqE691JdWCFxUFw2ECTLUfMq1+Sk6kksnrQjWi9NtFPdXkBO5LWq3WrpOpZZgKfRdngY3d7zVXLWlVzSSTLQz6Iab/3Xfg9fIfG7G5OZaLg+g93im0eTyMTKNG+4dpYBwjQwzE4LrGTlvfwXsOk7a4DsPAMIxqS2Cv31AYVdPVTMLLVl4LRp4rzmndvFZqSSQrAa7LipZGHblC6GiWZU0um3BmMU0fe28i1ib95ukk203f7nlD1DqBvtZOXKZaebW118u9dketFe88h8OBx8dHfPAMg2MYHOIKcToTxgvOCeNoopZFmK+qw6FUp4XVTus4BOIQ7J68gHiacnlbI84SIu1C0fJ0NXmHimp3PJwe8TEyTMfekTMvV+b5Ygd9AlbECafTxPGkqrUpJ/XUy5XrsnCbV53/tskWI1buS4C1BVrOm8qEiS+6jSz9NsOoflPKZ6kEcRSvpP2Skx6coi7lh0n5PIdxMH5PYDVV8JIz823mcp0B4Xh65HB6ABFS1eaDZVmZv1z5PC+AI54emT4cKAi3AufrzG1Z8SFyPD4Am8TDz4Ie41zcbje+fAksi75uu1c+eELU8lV7POwCc0T1YEomAT5nSjDibvBMh4MFAa0sqQT5Yvozl8uVZVkYxxFi4JoyURwPwTN5R1pmLl9+olzPupnVtEFfbzKOWLOFkou1bDNwNG2ep8cHPrx/x2GaGGPkcr6wLiuXy5nX1xdFH42bJcAwjozmffn09MjTu0dCCLx7fODhdLxLFgVTebY9djQVbYfgXcQ7s6GYoiZ0JVPLAia8mpLeV7Uh0a+6JiQnXMmwrqTbVUnM51euL1/UZum2MFZhEDOStQ5PKtR1IVd4fX3hp+cXlrTyfD4zr6tVCFZqUSS51IUlaZDozZKkVktq7HzKPej5xwIe+KVBT0csbEMyyKLeLX7L7lqPv6EWuUXadfPJaIEFKKzZRAg77NCQj7J7LIoGeO8Zx1EhvGzGg0Xb1EUyzeMEQ1iM42rIir7HEBxD8xAT1UCtsrWhgyFHjRiEHoTtp9IWuukUFNFMw1WL5INCclK1/Tt4vV/eVcSVHs4ItrkXbfdUIcD8pouxomWofcCjxFXNihsKANrKmqu2eJbadI4qa1YejSrpaqmLyh0/q0GlPdCt+6DSxlJaICgW0DiGITCNuqEP0cpc0TJaU/PdHzrBOzX5RIilMoyaoSTTleq1c0NpGodoQzJcnxtFBFcqiWyZqvKUYLEsWXZtrirYV8QRwkopuUvJg3UmVYfZA7/5JX2Rtkx61yHXOEat+8mSi7Y2NdtsxFI7pLxnMCXYEAKHQ2CcPM5lwqHgRw0YhuiJUchpJ2xIprCQsnK5Yhw7pO586K+RbcMVqTQLNqAb8SrippPGe890OBKHERdHvI9UhJQyr5ezCqv5jHMZ74WTD0wH5Y8sS0ZWSGul1JVluRk6+9XBXZqSrL0P0X3A23sQ68R5a+yu5MIyL6SUic5DyBaobxYAMXjGIdpXsKTAUxbpxNN1mbleLogIh9MDwziq6GpVEcfsZ1ZxvC4J7wMfDkce33/DmgrX51eu5ytrzjgfGacD3pmPk2lyOJM8KLX0oOd8PrOk1WQqtuDGe/Xsaqj3PuhpAbcmKRrY5B1yrnv+gHJhWpZflSdWFIFVUTw1sXTTxFqFKQaGxwcmP+o8+fSJ2+efqDVDXtgpzv72V1t71mghCMGb32EIHI9Hnh4fOEz683y7scwznz9/5qefftQ28HUhW2u4ctdGQghcvnnPfHuvJaC84mvBe5X3GEyKJcSobu3imUJkGiYNhgg4vO5LLqr2TknkVXPtBMy2r9dSuuNAzRkpGalFifLzTFpmluuF2+XMsiy4BAO6TrxDxX1RptySNi/Kz8/PLClxvt5UwLZYSbasgHU7Z03InFeEsgU3dbf/3qP4f39V/jrvLSO5QlH9Beu4UJZ+tSTS1Hz7g/Q/rWy1vcF6B6dvj9gOCUteuxJjDOoMfpgOqgNxa15eGyTXv2R7rm0r3+qWHX7HRNKc/Z5sr9v/sEOgkI0D0R77NaHM2UFeqx7K1VppvXd6eNZKrY7aognaYXEfeL3ltXXDbJt7C0Xb+O3LpaVYxG9Qdm9225W0Kvupdw/v7o1DvXWo7DuqfGuTbKWifof1+52QWG0tiiaE12G5lqVX0lq7SGZq3JseVLen3iGLHSbd7k/7YBUQ05Aq3dtgN6dQIbiGWvom9OK494f5p1w6gvcz/y78tHGv3Wurdcnp4a/P4jvJXxWIS/U0Ym/TQ2qE8I7w2VyqGLJWF22hdQFwRkrU91V7yaneC42J3e+6n6ObP540HZJloVBZ06IIjyRECs5nxAuQKHVFimxwOBo0aHu7jk9rT99eqzbRY73qRj5V7Zm9mMYbjJyNSStvFKxVWaxkJ9tvt/th9X1EHFW2IkUpdM+7JtgnBW4lMRe0Q6pUdSV3ric5pQ2m80iFECMDW1eeD972yoamVrKo15Pva/ge0XEmUHkf9Oz3iK9uxG5ttscC1OL7WObsyFl/TsZTCmZJUuumCp6SIiaUolUDu7dd0+CNxtLJbp+TVrLXkpa6nFeaenIrWVwuF87nsyJ9OVHX1faaqioNObPOM+t8Q2omLYsigHiI1nzjDF0KEeejau1Y4CWmzCh9g1cIvxtgl0pNRUWFu3Glwfy2d9ZdNUfgzjusa9TViuy4Nv1cKWUrjeWVnBZqLap03srhsoEiOui7Q2Z3f+8X4t9flb8s6KmgJb1CrgtI0sO/dTY4OkFtH0zUAj01bhONFlgowVhblbXzpQUdPdgQDSrGcVTVymniD9//G99++4F5vvHHP/1ROwnyJsvedHM2uHzb/r29rqsZyQmpTq22eofS7tDeLdjCpji8v+nOWj6dCEMIDCaqF4MjRC39xSAUokWtumuVWplTZs3S2zxL1kEfoifK+KaBjzg9sAKa2ZZs76FWpCg6oRAi5FXJaKWq2GCtlaUU1rIR8bbOdrF5wV0AE4K5/IqYqZwukBgVvdEuoGD8Hb9tWhVKla5xmUs1jkIjqpr31hCMPEwnIV/Pr3z6+JHbPHN+eWWe1b/N44heoIqWX5Me6k07Qg9hrHZu89Hq0XlZNePB7DGc4GohyqZAPAZtp9e0tP4ja/E3ubaw/6vwvx+KrT4tVmp1quIranI4DGMPeqoFJjknXl9fdMyicJgGwOGIeBkQqnpcmbCgc0IYhJwKl9srywLeRYa14sNk9gCQ8RoYVc3qVSxuH0S34Ay8C9CI6y6AeK7XK8+XsyY8bgZ/RVwhjJVxUpFIHFzNZiUnT06eWiCGyOkU1YblurJW9cML0RGzN12j2tuDSy2QVSBuaWshv+1hSbXmhpRB9AByqKDq4AKuUQvEU12A3VeVQMaRqmMpwi3pGpfzTPKvFITP15kv15mUM5frQh2PFOe5IbAkXW8uEiaHr+DGwqEWml3L13tT38NKYc0ZHyLOr5032dCaEBrSszf33c3humUktWpZ0jmnKsV3ruKa9LTStQZA+vo4hxsnCo6U4Xq5IbcZWa6EXIjeQdbH/KNlkV83kNKVwEMY8c4zjhOH6WgSC4F50RLm2rqhcubz58/8+OOPrGklYGi4CE+HAxyPxOA5k/F5YRgHjtFzOqh0wWG0MpYPDIcTYZhwLuCHA+IVKcOCmVoKsiyQElIVxQFgzeSzojgZ2zqouFoM6tfvNanbQkA4DiOjDxoWJM1a8pxIeaWiVIPQkoWcSKaVNV+euby8IFQOQ2CMym8S7yxxQQGVu2DnPiH+JSP4y3R6sGCPQiorPW5rGYYTFTJqnTitRNQ20LaT7jt7LLCR2gThvspHjXWsiqMqYHU8Hvnw/gO/++73nM+v/PTTR2quPQptiMXd1z5QMadlNe/LprdjnUW7T1u3t6d/Y59B+g/opLZDXNufPcHrIg/eDlYRwFsDUCsLqhZG+19ByDukp2XcXxNwf6try9ah+RQBSBGNzLebbyWtCmjAk0uhoNBnNnJZ26bas9eeuW2ckia3rgGhN1l1/bM336oYA6FrNQj7adO/Su0WGO1Qqr4SjJtDf4zCsefXF86XK/Nttjp1IfqG9miGWppFiKGRPbtskDt0vlctiZINuarK7HUUggjFSmXRayCnD673K/afcG1IxA7tuUMD6J5mqnmk3YnjGC1rb8GfBuTX25XgAw8Ph3Y3UDmHqOhgNW+2iqEozoRAF663jHMjuY6E7NWHrqB2mVVo3lcFu027cW9cj+7mLB4R9WCa54UvXz6T0sp4yEynjHjwoTKMmDRFYVkXvQ95opYRqsP7iB+jduuthewTrkpHu1p5Bdo8Kz2YzrmR999YhqBCzYosNuSl7UdxCLtg1lsLkwfRgFAtHJwe+gXWZEnJvJLON3KFH55f+PHl1aT9Iy6OVOdZcZCyPr/3eFEeVrBkRoNQerCyJ/q7VUu8cY7WHeuMAO970ONtr2x7Qv+4HUm/J9c3gnqIkdPD6WeB0l2zQW0myTBXSLZ3zbcFqYWQZo6lMNg6+DqJ/a0vobWZq2O4c75zDqN1966rdhA2X8p1Xfj8+Qs//vSjqpp7p0mUc4ScONRCDYGbF7wU0jgyv3tgnR97B7QPAz5EwngkTielj/gRTLSQbKeOqtPqV9s1BEiVclvJt4XiBIKdW2Z2q9mwGoxW0xMaY6RYNzTKs2ZOlSTJ1rIGbxmgZHJaSOvCOl9Yri+IwOSP+KHZDEkvxatuVDuTfx7wCP944POLkZ62KfVXkO1bfwu2WW1oifTNaytB2WSwKEdF6LaySv9YdpoGr7XraRq77LqW2VReP5n3lhMhelX8DDFY986mCdQXLPqawcjOftdO3csmFS1NNGi2NLGoTVBNMJl+W8gd6RFR4mYIHbbqQY9intotUbVrLUumRGPR14oXr231b13i2o9ji2F75LkhFK17YkN16m6T4n7G2XNIC4Sb2FUvZd2XtJoI4Z1QVs8Ct5+76eNdILgFKUpSlu77o+72alHSBM72JTakoYr6wUUM5W3Q9652XKgaAtZWjtWxKVXlD9qyUG7abu7vnuufffU5Kht6qety/9n/m8f2aN8CwaI6Meuq0vI+gPjU4yjvPR4VBuyKyDWREixLxrmVWmeCVyXV1h5uG4qiZn3eNKJ7x31pO0IuWfVLcmFeVtZFtT7CABhiu3+UyTajwnZOxehaMGF6Usk6RxpPpH3y1vUF9A6xWulBz1slJD+7+jKs9v8tyahVD5FclZi8WitlqkIRDXqqC+Aj1EoqQlmVnLrmSjZ5D+88LkTAFOmXbNlj7A0ZjZArbW/Y7f2INmOEEKjVKQ/sdNL123zRZJN76OKjzaG73VzY9l6gWS1473l6euLp6Ylm0tzxnt3jmtJ4rnBLlbVUXM4M8xW3LrrPi507rpWgd0Sy3/za6APW/6ZIWEr9PBTb5+dlYbFSvKoNJ1JacdUjtVCc60haLUXvt9NEMsaBYTwQx1GRnTgiPlDwJCvdKm+1IKXgd3hwbeWt3Z4/58zr9cbtfKY6oZrbd/EOhmB2MBrAhmY0nCvFF+pq40DpjRRC6cmMoKX0IQRqyXpmRj0/ow/aYdjKNdu7tHG+3/vvf/7Hrl/B6dkFPPaSrm2k0M3MpOx4Lk7uiH92xKgVgAUl3osGPbJfT7X5wxFj5NsP73j37h0xRKIPLDcVP7q8Xnh5PuO9MI6RcDho5jqEbnIYvAngWR2SqryZYAeyHlh2nNWtRbBlw9Baldt207Ie1eNRsTxhCoEhtBKOCWi1gGe3S4gIqRSGlJht051jIK0ZDww4ggVUb3G1AObrS0TMRZ2+seZau51ELlnJbbWiak17fGfLylugo+20wcpYsVtbxBA2pCe2UpfpTXhtrYyW3UQfiSEyhNhb1r2j6wlpGWA1/RfP9Xrj+fmFdV35y0+feH5+Zp4XnLjuvh6Cx1uA0gIsgFIzKRnsveN01LyhNWI3sJVDW5efKj7vAv6m97PrQPznXNtG2+BT2RGZ9R56qjNj24agYQdIQ+jsUFcF9pVSEl+evzDPN3xwPD15Tg+Kyh1PB/w02L4peISab1zPFz5/uiIsiMuIRL15WvPG1UJAEOeJDgZvnXxAw9iqaxwVuF5nLuknKo7z5cLLy9XI4yPHw6jvPXtc2RzYQblDefGkFKgF0pxIi4rr3a4Xlnk2FMcUo53qRwmG2gYVC9QYVp8vhPh2Q9ih013gaQFPxpRYKixVuBXHNQvntfJ8y4QAaxJmBooLlFiRg0CpnNfM9cuVXCuXVEgy6KE0HvHjRFoTf/nhRz59+oLzgdPpidFKMQ8PD1Z6MBE52Rh2AkgIjHZgxfHAOB1Z11XJ8E3fLMRuaeJtb9gSpI3ILJZQtiYD54TT8cjxeOyJ57YNb3N14/lUXm+Jec3k243rn/7I+uUzATj4yOBsHxjrVtJ5i8smcgvIIDMvs1L9LOgK1mm1LDOX64WcEq+XM5frhZQSs2hpKzjHwTveTyNOIHrH42FiOhx5//5bPnz/b8Rh5PD4DeH4jiqOpTrSavtTSjjzoxyqFZdrYSlJRUcNfBARvlxn/t8//pkvP/2kCTyahBzfP/HNf3zPcBhhOJAeHliTdkGWgzZ+zNcbl5eLlWYLYTDjUUH9LSucxoFvH04s6wA14S2/GIaBIUYQSBTybnPa02Jatlb5+gz7+0HQrzxRd5kjPSDrh0Hn1di/64A1AqQdE2Ktyn4LekLYPUcLnmg3I/LwcOD90yOqzeHIpuo43xZu1xvDEDkeVcTOe8cwhr7BDyHQ9HhqVrJdUxpu99C121g3roizkh3QSYXa5bQFPaMJRnknjDEw3pW6Wonj54ORa4XktC0zq/FfCokgjoMLBLmHf9/yagT0Pcy2ZZNKXlYDVlVdLWwZp15tbOloSlP23AeeLdAJYePxtPu0/dl3FK61k4dm1Ne0edrbtHKgtliuSM7aGfD5M/M88/z8wvVyZVlXxmFkiIM9zz1y1HRjWseIApJtI8Vk0+8FI10VQlGCqZppujtHa2hzJm9ijf+MS7Y/9ENJxIj6DT1r4n8ty94CnxafNz+0bK2rANdLYZ5nU2494uRACMIYBxiOuh800napzHPlcl40r6wJ8NY+G4zcr+vQ0dprnXHuNgRBkVB9U8u6cLtlcoHrbeF6URHCw2Gg5oB4j9SAq2FXVgeKUJIjL1rOXG7ZTIXN6mBZ7sqlYO7looTTaZosyBFq1bJcMz59i+tONHT7W/s347gh5CqsBdYi3BJc1kIoQslCIqi3ny/IAOTCfLvwfLlpx6VTnR7nPMQBPx5Yy43n85U//fAjIUQ+ZMdDdYylcjg1LSrQYlvpiKHeL88wDtqpN4xMhwOlFJMGGYwAHZnG0QLJ0L2lmg3CliS7PgYNBZ6mkWlUBeKGMu8fq3dI71HKhZfzwuWWWM6vfHx+4fx6JpREFE90Hl8hSsTXf4KchDS0opCSBmVtn/AW5K3Nv6pkbvNiitaJxszzzjEvB1XA9pq0HeLAYRw5nh44Pb0nDhPx+A43Papi/py4JQ3kHbUHPcVXBqkkKpdaWKyKEWz9PS+JP3/+wse//EiocKhqyl2r8N333zNIIAftQnMxIFlBwVqq6iVdF5UcCKrLhYglyMqxHULgcRpZgmdeVbxUEzIti1aMz2ZCyH0do5URsQrJz6+/j/78ahjB26G/3+g7PGaoSetk2jqa6qbT04X/sABoK0Gxe9sOXQTBeBIxqihWM7crZhjYSkyTdXV574ij7xyPhkbVlsm3UpUh8V7D3LvYRHY3cH947E733SLddSNIi3FaWCB3h2XbzNkHdhi31Nnm7xXl+OXg3T941Z3tRoNdWwt7LZvQYt0pN+8+0Vessg0p292PhqDspc03dr/8lT+7Dtcq0hN60OOtFNrmlbMMyjuty7cqYEX1h27zzO12Y02pd+p4f+8ts7sV9nGMYVX3n9MCHxGajXHv8MIIv/a9FO2s2UTa7PDK98HS217b55LdHP36z50fhXlTSUN0tOykrfs2L2oTcBP69BeMfJmpVbjNK86rqmspQqnCPGfSqro4tT2PphLU4mztmUed7SMNxdt/kmyHXKGQM6xLJlUM4dEVFHwghkE5Y/b8VLmTZFiXyrq09526fULT5flqSvfkrCFnmxWFBj1vtzh376Gtod06GqaJw+MDcQgcHx4ZpiNhPODjCE7LUdU75S6Wih8dAxGfC0ciJR4s6BGSPf84HRjHCXGe0+MTj+/OeB84nI6M06SJ3TgxTRM649tY6kTSBHEnViG7Tk1/z09sSUUl9cCl29K0/bY/zeZ3uCwz8zjov+2Xk2yVhlboSrlyuSZuS2a9XFiXpSvFK3/TK4erbt2LbzaONoc6bQJQuyTdy4q9fikbsbohOVLVXFZq0YTPmUSME9OqOjBMB8KgCsuIsMwz86IJ6nVeuS2K4kSvFBIvgosCXljnmcvlzO1yNWqI7pHLfMU7YRgHQqn4rDp5fS8XR8ARNV1RfmxR1/RJPDlEMo7oClFWVPCjKM8KYfJq6+REOI4jc1qpYOT3QAFCUd06LQeu5LxuAKh97924NNLB379+VdDjLVPXA7ppnPQjwxaAKreKazosLbKtnWwVPSYyR1d6FQzGMu5EcFpvnobA6TDweJrIufB6nrldV9blRgye0+HA4TDxzYcPPD09alYxKPSeUuZ2uWnLYslcr6ocqcGOEqiHGJimwXhFems78FGthc61NvJdICRC9E4DlBb8uLYAtSy0pdINGZOerWB6PI5ir63B3SFGRh96xvNbX83ojX5Aa5CTsqISuVSzmKCTl+suAOrQcpsTVrsXux+tjKXWHfo5VEfE+E5WxlLUZyMvT0NkiFGJ6+OgrrtOtSc0+NF5451uFl68Eamdco1K5XK78eNPH3k9X6i0rrGBoUHrYl5vLR5FD0Idr9r9xOibYduULXaxgxJxZBzOAghXKmS9j3ldu4N39537p10bZNcQnnaoKKPQCMS7TjXAhMFaYN8OfUwIz7hxXghBx3lZMrXMeJdYZngdEobzUhFut5nza2JeNpJppaqfmoMgAedhcJp5Ry+MQdd8PxyqkqlzzuRauN0Sz5eFlCvBRwZzhT5NRx4OT8TgEVcpix4gapCpG+eyFNZFOR9pXchptbJD6mXrHvg0JLeva2+oqyDS9IzeCCHQhNj2P9UnmaaJp8cnYowcnx54+u4bhmnk+9//nne/+3eOhwPjMCDjpGheKUjUvXY6OoaqY/KEkE1MVg1F6t3nXeaFQuDw+A5BGIdJdWWGgW++ec/pdNI3aMKpzWS0+bqpcJzu9WEYOvLbxBxzKZR5sY/Zsw12maKqEpsESSktsdU9ehg21WE1j9XOy80Au3H8hDVByoKsK/L8jCwrrhS8jwzuiKuZWAK+vmF5i8YH2+tXYUEjatdr50I2nTZQIcbTNFJK0PJbThqEBNv8QmB8fOTp999zOJ04Pr4jjiOlwsePP/H585mUM9frldusnmYPx8n2VM/pMDEOkdv1yo9/+jOvz88aUHshiJDmmWEKfPjuPZIKMickFw2AfWQUr+uAgYzqytVVCdInIh+mJ2ourMmxXBKFxEtNvNTCiuDjiD9p92+MgcPxqE4Lw4iEEVD39VxV9uJ6u7AuM81/KxdF5Jd17SakezeAv3X9ckVmS3hby6KWqLYoK7c11IIey8obeVi/mq4KhIa07LUnWpRPJdjzx+C6SJ0iNRfWdSanFS9aYprGkdPxxMPpQZGeGHBeFL6eTY210A34hK2lHSrDsAUZ0v/Tmn6bb9j9RvfXkJ4GBLXSkGxptcYKLVWmBXcWKGpEpKhWVEn5tyRLtix4b6/QMn31gtnbTOyIy/tQe3c4tmCu2UZoQNNKWpuVREPu9iWtVs6KPljnk/IAGtoTW7nMgmgntWffpSohM2ed9Ou68nq+8PL6yjCMjIdT5w8EI5nTyHtsyEwLfrrk/g7paxpAgjUfSpsZXWJpa28uhcWQBCdCtc/wr7j2ZVhVON6X3uiInl7awXaHdgq6jqIFQqY9JRiKknUN5eyYb9XmtS78eV5YloxaQBWDqjUkqkUJr1TLGp0Qzc+uIT0t65VsXVOmZTLfbqy5cJiEw3gghMAQB8Y4EUKgVNX/KKWwzjBfdY4vs5bDVQV6pZTVVt7m47T75H3t9S5E+657xBuisNi+gQZb+MowjDw8PDKNE6cPT3zzu98zHCbeffMth4cnxlE/OyEoEuA76I53A+JVt8YNI34YAWGt2n3Z0NFc1C37elupoqR0Z9j8OAxaQjkddWRM86g5bOduDVA7+t5Iyvv71P69dXt1Y9i6fW41ozTX9mSGmVQVXoytBOZ70LMsC8uy9hKtPo9Qq6fi8SVzXBaGlLUhwXk8EVe97iXlLUdyj+XTUbGfNYBUXSMNUffGb6xFkAxI6Wh5dSo7EaeJw+MTh+OROE34ECkpczmf+fjjD6xr4nq9cLvdCMGTnx5Jx0mrJeWBkkeulwtfvnzi+fNnnMBolA9XIUbHEA7UVUVGyYUQI9GptKEqiXrtQi5mMJ7Boarb+EryCyueQrX/woqweM86jqy1UJwgg85bGQ4wjABk68TLORGjZ54juWTz3tLAWG+jyqzQObd/+/rFisyg0FsIJrIkptlgSE+xg2AzlNx1tMgW1QrNw6eVtwQf7ns1hAa36QE4jpFpHFh9ZpoG1pSBwjSNzMuoWQBsDH7LEOZ54XK5Mc8L8zxzm1fW1WqlkjfEJq7k4i3IMjXnO97H7jbsriYuJSLkjN2FjXsC9C40aUECdGSieeTA5jJrb+nNrq4k3Q78/Z9pmTl337++nEW0GqttZSMtUWxdWe3n5pguYkRmr9lyNE3GdooOAAAgAElEQVQbbwjREKMGKX5Dg9yutKUqn6pzVPMmIdCQKMTk7g0qbXOvbSzFRB/EEB0nvoNxBult9gdtNrZA1bLY3pJvCIAi2MZ0qlsLL871n//JcA9AD2qb3kvOpsMkmweS9DW7L0PU7e1KvzmWudtt2v1Z4frSA3tVrW6aLp6CgM31Pp7UXnZ2ogFZr5i0+8hWegRMkt5TnVPyuwfnK7UmlmWmlGT2C4lSNUjKKVtprfTPpKBXS9Y2yFJMFb51DHnrJgH6Gs3ZvAPfatBEGzfGwfPwMOHEc3p44LtvvmOcJo7vHnn/u++I08S7p3ccT48Mw4DfBaw5Ze0QatmUQ4P2DG5VccVk5YNK7R1s65qYl7Uju1JLV9C/XC7sOXQqYZFNQE8fvyyLdtFaMvM1Ut24cbqvWMJVt+SiAjklkikAq4ea7q1xiJxOB+Wa2cRTaYEWvNedYfBuDtWt4zSjnW1JGo/MIf5tmkXon2pD/vt96P/eJ56VTLXTyXmPixGqR7JDsu6t0zhyOByYDgdrksnUsnJ5/kRFz7VPP/yZzz/+aOMxdzK5rzN5mfDec7u8EoeB2/XCp49/4fX5WdH1oFWL4ByTj4o0loLzuu8VFHWpFFVivihhWVLBrdnERL1y69zGfa0CocBUwFc4VUeujrUK2QeVa3EOmSbkMPUkzYmQS+ZwnViWmZQT5+uFZVl0fiPIvGjJOic2783//vpFoy1UnCuEGDhMYevEcTvVzd050aoDu+7G3o0FWtpqHkoqYx96y3nremr1w2maeP/ugffvH0lJFSviELndRlJaLAjTRbYuWlqYZ9Vluc0Lnz6/KOKT1ItGMwz1XhGpHJaVXDIhqOfTU7ROof4e98uSux1PSbQ6cZfWVoF0Ai4Cnq2VudgizlWh4VybW/GGjLmdxtBbXBXUV6psgoQtC6Fu5S0NJFob9x77aOW+Jjzoe1vqED3jcF/S8t5xPIwcprEHRq3La4iDIjrec5wUdm0dXuoh0+Dg2oOnEKSX4prdSc6qIyTimKYDudmBeOugo6oia5+HO2i/2ACL27p0UG6KYIMi7g7pEqfuv3Ec9DCoi2bAqD9MSqlrE/1zLz2c29iuZsXRJPpTyr3+X0ul+q0k3TvSdgtVaOrjRZEe31DPhs5q5p/7AeQoVpIKwXE4jMrBy0Ku2ZC8htopnK5Se3QNrw19qyYWq7wp7z2Hw0SplWGIjCM4V8n5yuurBt/JArxam69c7hM3eEO4RAObVo5vFixaTtXylbq5h34wtT2juc63UsVvfTUhvqen9/z7v/1fHI8PPDw88s233zGOE+PDkeM3HwhDZBgmpsPJ9r2F1cZ4Xi68nrU7LbNQqjYp9zb2WhVFSUrgTiWTLAiZ55llWTVgSOpk7US4Xs9E7y1xMHVrK2+V/V5iQY9rqCq7wNUSjz5N7WrJoAYym/eZ941OEXh698T3f/g93jnjn6k0xXN45uXllZwzt9ts7u9tpxJDGbWU4qRyLRrcBhHEC/HNLWI0tP5rpRf1Td0Cn+ZlGQdHHKImBjnhS8I7x4dvvuXb3/2OaZo4HSc8ibK88qf//X84X15YloW//PiRjx8/a8l9V579Mk6EOFARMlqaz8vC5fkLy+1K9J7jNCo3dhh5//SozR8ucIojwTmWOvPjpx/wzpNuN9aXMyVlJh85RhVfdMYvcyJk79WzUYSxgl91iZ9wfKiRVSqHEBiGkRo8/sM7nPn8nY4npmmi1MJsjuzLsvDTx4+8nl+5dRqDCpReb7fu8fm3rl9tOBqjquc2wlVzu+4Kilik3VO/VubRf1TYdZOrDt58sEQYjNchFo0LKMoz6aGZcuG2rOSqZOjD4cC6JoPx6RnL9XJjWVaut4XX1wvXWclsq9kRaNCz0lZfiJ6YCzGqn48alhoa1WfpV5N3n9VDN2YTrKLluYt2VZNC70XLdPbt8XdQ19vlkvpe20a122TaZ9pQqD58d1dDBDZbCSMpi5UozB9LA1tnbauB0WryvpXCnDe/oNi1mHp7q6E8WjtqJcANSdKDqt2nanC78QliZMhNzk367SyWuVa2YLx5ujWkR5zvULk+v42ffnD70T6/EaRV2XlzoW+ZpTS0gp8jhG9z7bLJWo2crkFAsgy6FO3CcfbRqmyLckNuBHpr8v5e0Eu5rREBsLq6omg6rxqxW/lbpWSqqJ2BooAYQmjNCg0VrbV3unWU0VQLaxWagndBS97eHN9LSSzzDYCU9DD8evl4L104FbDsGmpVF2ftuAzm3q4ChltXX93Gdqtvvskltgc+PDzwh+//wPv333A6PfDNdxr0xOOB8f0TPkbEBZzXrrLb5Uxa9XPnAvOqAW6qmWQdZ0lpF8prWmbSOlNrZa3ZzBu38m41blq1IGKZ27ZUOgJTiqI9+3LVXVeVBZb3m8h24xpZfGsCuV8lQ/S4QRPHcRp5enoiBM+6aKkypcR8m7ndZnuOZbeXaaBe2VCeXKt6QFWheqx77e3W5v6T/rcTRhrY1fYqNfodvNciUgkEC3oOhwOnh5O6EwwBR6bmzMunv/CXP/8n87Lw8dMXPj+/6P0bBu2Qcx4XBsRFcoXrWlkKlLSSrhfKuhBCYDkcGIaBZZoYBq2RShzADbjgyDVxvr7qHLvMrF9eVe9qPBBPQvWBGrU7z7dgwfbMUCFaJTkijHhWKlcH1wglBsLhiH88EWLk/bt3PJweqFX5pylnbrcboEHwJV65XG6kpGjj2gQ1/1p0ubt+Oa4n9M6aGANexIIe7MDQ3zFnrm2il40Y3AKZdjAqsdUrXC1iWaB06F1dYZ2JMEWcK0zDoN1HVYmRTrTc0TKUZUlcb5qxzMu6QwXsfXYSokcPTOkbxmqbhTMStTPX7K95IO0k2HyydMKKHRCtO6kTQqWVggw9ENT0rxQLrqQHBE28622PSukH8n6i7GJTtoUqu+qPvq+G1EgLRHZdWht3RxG8rrvTRMr2SM+gOjzNjybEYGWUDRxWDY/aXw+caurkQkqFNSlnI1kGqPpAsR9Sm9Jr2T5SwQ6+9u9b0NI+X+PxtLHT5xNqM8lhe25owYCKdpWy8yD6J3J62sGVc2ZeVxU9W1blGeVspViH8zY3t0+yZeGy03RuZ5EFCTlriTtXms6DXXqvNjVVGFyzLilI0pJKkN0aqLtlJcoqKraWG5F+rdJF9JzJTyBCDMIQXQ9WQmjzZkNg9sic89KD6IpTKxNRVEsDam199q5px5i2F7Jl5Db/tRTxNmvTO8fpdOLp8YkPHz7w4cM3jOPEMKhhq4giHUUSa5pZFh3T2+XC+eWFnBKXy43X80XLPeLJBPscvvN1dMnrZ3Dm16WxryWrTu0nqtdgVanQreyoQW5loxPoOJq3me1htGC2/3cbD9it8Rb4cL8fBS+EoCXNdV15ef6COMd8m5nnhZwyr6/KW2kI0bY/mddUybhS9IyqmTUXpGZyESLy37Q+/3aXglyVv3oY21/1uySGQgVHtPMwUgg1EwwBPFk33eAdrjajzpWUV1JauS0zl9uNCiwpE8KKc544FkLUrjFMS8s7CEMAryXV0+nAGCPTpKTnxqt0Fry09U0FFwJ+nCAUpjgyWDmMWlnXhSTCQmEZAsVvlQFqpaSVYh1bUguuVKQIHuvmbJ2cDSV3jmjB9HQ4MC8zuZQuQJx3dkd/jwf7Czk9JtQVA8fjgeM0aTeN23N6GjWwGkmOO82d4PcqyJr1CUp4jb79Wdu1EVEinWgX0GEcOR0Olmk5xmHkHCKfxk+8eMeyrpxfz1xvC+uaeDWkRwmQuXdSOXEW6zhq1cO0FLjeEiLqNHw6rpSqCqNhGFVvp2T9olp7nm6uzm/8iCptEe0PFOlaDC2A886RixDMf2iPmgTjibxV5xa0HIjOwcr2l5ukO30sYUNg9yqq3m1BTAyeGDcNlnFQcvLxoATz4L2Vt1QMLfrtMw6DaqBox5bCq8rJ0q6AJkngXQuqnZWQhHlOLIu2Zb5ctJyZ1qRdJ2FgXVT7QstzpfMJqGwmoM5poZndZxYsSNMW2dZRWA0mL9lmedGsVwRTHtUDeahTtybxssH8b321oCQbGvryeubL82snAJdSjPi7oWnRLE+aAmwPEFvQAxYEidbhkx5+wYtWBUVQewisDt+sIsRq+84CsJmUknIE0qpdKUW7yAx3JYkmDqlUlmzmtuLJTgmccdBmBec93lW8r8bz8n2DTCkTgrlC12LIUe0ml7VWbV1qB86OpBzDYIGFIKJKxGK/gwXb+vfyZmMaY+QPf/gD//Hv/8H/+p//k++++94sPRTJzE73urKufPr0mT/9+S/KVbxcuLw8W2ArVikUJGhHjIgjjgficLDJItYSjXLTbJMutpcL4EcNdqjastwC4brnLNDuo80WsdClBZuyta3z84dh77LHkIoeWSJRuxQjl8uF/+8/L9RSeHl+5fx6sQDL9zF0Lpo/myMOg1pelKI+TilR08rreUXWwhAcRTyTf8uERHXNGtL1NaKu86gFabqnigjTOHA6TEpodsLoIDrPdx8+8P3794wx8DgEYtFAJ6835tuF6zzz+eWFHz5/0fb8hr6HwOPDieNBOWIhDgwu4r2oBpU1jjw9PmpAFQdOpwcG8wsLTu+pq5aUI4QpMPgDvgqDeA4u4FCk93x5pVC5SebydKDWSkyFuKpnV3p9Ib+u5FqQkhjWTJWKr6p5Fr2z8vWIOCs1e8/1pgr7zjtCHPj06TPXyxVEuPmIy/W/G4h+/YruraZQqu2DXiA6MauNSsYgUirNJaxXbISutdPahr1tmqGXujb9lob0SEN6grcDsTINA4Ju1K19HkN6brcby5q43WbmRbVDmknmVjDDIlfL/EphLRkRc+TNamtfK1bCaO1nG3rVDoV99xaGDtSv4O/2SntExznBlU2Ia9O5+Tl5+k0ue/K6faSvNHm2YKd9CkXzTMnYhB+/7thqgoQNnRuicnM2vy3XOTzOeVNkNlO+aKahRSgl9QDEubZx6ttuaHnKhTVpOXNZlMiXK1Ya85S8defph/3KJRixMhQg96UoZ3MOoIopPze1drsjjSwJaECG087FYIaXNPzjnxX0tHJrNSLjym1eSCmxJBXlbO3DDZGKg3UJChu5lD0OuH9+urCaozYrL7V9wua52+axN2i9NNK489SU1LnY3KRqsWSB2udiKrAkrf/Xxpiumr0Pw2jBtnIMG6rqd4fXdh+E6hp6Zd+qvceyoRzd7sSQXd17XN8jnFkxbAjCpgL8W1/OOR5OJx4fH/nw/gPffPjAvKy8nK9amhcNalOtXK5XPn78qK3J5zOX1xclllrZSzu2Cn4worof8MHI+HWPMAi4VoKnxy7BklMB9Wxqt7HFgLvMupV7+xi0pNASJSe7dXCH9tx/fi2TWSdTWSlloZbCsly5nG/knPjy+ZkXK+GM04FhmBQFHLwK3DmvzQwxKIxoTS2lFlZDEkutjLRx/m3HsN8Ddsr3u41W9r9gkaCwaRIp4q26U4NXXZvoPYdp4jSNZncErmZcydScyFmTlnlduN7m/roVFQNUY2fTtGpaaC4wDZoEjcPA6TAyjiMxDEzDQPBmPdJMYWxdu6pAxeQCoToiwlAFqUYvSQuZyhKEZQz6MRNIqEjO5PmiwXVVDSJFrFxvbug2JSZuOAwjPqph93Q4MBnvrJ0bLpcNSfo71y8Oekqp5nlVyCmbJo2zQdODu4qSx5pL1XbKNB6H25AeW1z6IdtzsB1UdgAX68bKOff14pyVtXIhrckOPu3QWnPRfv7aMvQ2AcoGt7YTtbb3qBvBumbOlxvruhKD43QcKbHJ5JudRi2aEiH4EDoxVJzXzcOg3pJ1Qnfegm3QzjZxX5RY2A7ZlnG+9SEpSFfKzFZfKKUYd6b2Etf+EfSsXnqG4qSVsUIvY41D4DAqEfxwGC2T2BRVNeiJ+G6qqnX/YqhJkR1J1F4TO5QrkIzzseZKzkpyzQVyKuRUrNtsyyobzI5zxmS1vacderYBtHm7GUkqmtMP8rpBz5TWrdYO+7pFSzRkYPc0/4rrZ2PYNmHVBKloF2YLZL2Vj2s11e2sNXJN4Hclae87ly/YJq2k3xYAR8Trz8OkiFuphZOV1vK6sLy+kOeZAERb/51UTMWXSnDaUehixA2a8U3HA8fDER8CkNHuy8YhaWta0ZiG9LTNvycRFUKKnfS439C9DzivpaC931sPegztuQ99f/urFPUUvM43Ltcr87xyuVxJKXFJC1+WK0vO/PlPf+bPf/wvrtcry+3GfFEkxIVICCPiPOOxMKKdaExls+QREAsU1VupJTq1VfK03GAzR8Rt5P+7LapN/Eot6tmlAUsTfTQ0vWcru1Jx7/j7erfR+bAuM8tyJefE68sXXp8/k3Pm/HrmctYMfxiuVvrzHE+PTJPOjyp1Q6wciPcQvHpYWWk9ibC+5VbbkquWpdGSCbuNjbsoirg8HI8EHzgdj7x7fFA5j5LxJRFEGJ1TV/SsqvgJc6GnMoRAjpHJOp1zVr5mLjqejWOre2um2TKVJGQSKyvLDaTMFB9xaSX7iODweN3rq5CqrpiEp1ZtQYhVmNE9cimZW0kUYPEjKUQQR46OXAVKYU0za1pIOXG5ZV7mFaQS5xvhetGuwGU277GCCwEpvmtANbHZECIxRkqtxsX7rYnMtQXNSgZe/QreEetmENbM4LRD1LawNsZinjaW7fegBxUJdFbmafJmoF5ItQjFyhbLsmj2YiJ+XgI5FSWzXW+czxdeXs/kWtXh1bhEe45w08CgqUnSuBxqkna9LqS8Glm2MI2RaYw8WhSsJTlzm0U0Io8qj64ZoOlHrAvrutrruy5a2IOGAqV602HYYPam6fLX1EN+q0v9cSZ1F3fqiJ1WNW8sGxhi21njQtB5OM5UTVtb6jiOnI6jiWANnA7K0TmdjpwOqpA9jaN1ZqkcfXOznpfEmgrVVda8CdMpr0DuvkqpHeaf18ySYM2wrpVlTizrquNqnC0pFU9Dz1RDAizpyvaZYkTMR6nmVVmg9g6klj4m3ulB6krA01BJnQv0RwCGDCh3wg6Cv0Ou+y2vXqK0z2kelDTFqSbq5SRva9E53DAQhwFqNR7QckdKbVIDDakdYzA+nXU9BQ0MQhy6seTp4ZFxOpiHnSIky+XC5x9+4Hp+hZypy6zf2eZ8Q40qEMeJ6XjCh8A4jRxOBw1Sa6LkFSg9wAG6/AINhbMnbAa2UHv3D3ecLzZ0R5wiOhbYB+M1lGr6VnZf3mT80H12XhJfnl8RNzDPK+fzlTUlPr584b9++oHLfOPPf/oj//v//n+4WutwSbrpx2Ekjge8D7z75ve8Q60hHk6PjFHb8L1EGi5TnZW1LFAohmxSiiE8thbqFphW2cjdbZ/XIahdj6dUzdyzCfTtI/FiHY4qmWHNIIJ5GQ5U4Hx+5eXlM+u68Kf/+k/+/F//R8uwa1KyughDHAlBCbsfvv0dj0/viUPgoTwx1YN6VoWBMEaKVFKMzClRvXB1jvJWIpM2ltna8lsjbjuLEKV7aILo+fD+PX/4/e+ZxpGnx0c+fPhA8J50eWV9eYGceayV8HpBBPIEt1U7In2tPI4jQYTH44Gn08yaM+d5IS0rRbQSU6y71LGq6WiFtFzJK+TF49dXUtByVvIT3mlQo8o8G8dWz+xA9NH+XQjW6ZoRVkxPaPgA4xMSInWI1Kj7y20Q5sGxLjM//eXCX56v1OQ4vnxhGoRhnFQN/HDQEqULIJ6ci1UHIsOgCfXhcNTg/rp0l4G/df1CpMcOo4pNUpWq142lZdYNR/k5jNn+Xax81c4yDT/MoqJNiF6OgMalaG2RDSZ1FiTUoqhT06ZYkyI8qejmsT1ne1/1bmPtubq9ZEqFXBLOCbdZ+UHeOcqkMtxqD6F6CoKpC1u3WYO9u+eSZZN3NhW0Eo1YYGWLbk/EelugB5DOgXBFdTvEtW4nver2q/aepcPZHfFxTaTS9xbzaJtW8E67scZBa9ODGbO2oMd7PYCTFkZbVpSN2C0G+e6Taj0QqhKYS0MtTDXaxPKk+SqAEjDtXjvZWrI3PrONmWtB0h6e2co77bFa7nT4Yr/fEEn0NfoaEH29avf2X3Ft/KwN5m5ZeCnFDq3WObhpaQEkE5iDLVjQ0uXW3dSsXxrPr3G0ghFuYxw4Ho9MhyPO66HrQ+AaB5bzRdHFdWWthSrCNvt0dbU6yzBMjIejltRHtUNwzlHKSkliJFrrKNIBwrN97na1gB2aMKeVwHbaLtjr6/rY0KsQogU9lYq2cb/1sOZcTGpgtjKlcqIulzOfP3/ifL3w6aef+PTTj1wvlzs0YRgPDGvCh8h4fOSUVusyrJ0gGkUItraKoHu5qKRG64JpWInc7e0b902EXeBDR9U3HbBCFXcf89v71C7bddOS2pXP1Gi5arlmvjHPMy/Pz3z8+BNpXWnyPoIQ4qzq3MPAOB0ZhpFSB4Z1IaQAPlAj4B3VO4oTsgj/P3Hvth25kWQLbnN3ABFBZqZUVd3n9Mz8/x/Nw6x5mbOmu7pukjJJRgQAv9h5sIs7mFTp0qIaWhQjg3EBHO7m27aZbashoII0Y+i9DtVw039Z5agxp8ZapBhxXmZ8fHzE+XzGp08f8e233yDFiC0S7iUDpWBaN1DJIDBaBFoU7RxixhQjaoretRwEUNYkfGN63DloAk9YNZe4IbSIggoqARwSKBTJR0RAkrbADnoAIMaEEsXZjyBEltYnLURUSuAQNUcngFJEnGfU0wIwo6wn5O2EPQJbCLjVAjAh5B1hWwEilJxRqlRl11YRlTk05jUELXxJCalUZex/Wh7kF4e3eqWK3kSQMxSWKyCbIjtFCTbxPtkErPxYk8hl0UFoP5nIXaWYFbiIBoQsgEARoAQgYls33O8r7vc77uuGvGeUXDSIZV/Qz9i6TZHOwIYG0UbQ3RFKxWocIOeK+106MF/mhFIYlNjbY/gNoJ7dDv9GhiWAWt7LeIQgcuPwUEgHExYCey+7SgQvW05gaaEARsmS32EKl2ZYeuWZhiLJmsQGBTQR8zw50FmW2cMlxgZZzyJmBR2BALbcn6RAp8PQ4N9p97CXpY/A2xKv+5YJB03MVm0zhDWhAM5yqULUJndA4uQsATFcAr8YXQ/4b2PrG+BenD3n1UkBIoL4OzI9doyyAiae6QBIf5dielYF0yRUMQBvBNlfrwY69QT0mDrYtxCvaYIETWBuDC2RB0KUPIXT+YJPf/gDTqcT9nXFy2fCvq7OuoAZ87IouzNJryllesxhkTJkUi0p7pWZft09L9DCm0aJ23VXbQRbS3EPcUxqttwQGQBNtuau2v1uNCwIjCilxXtB2rKU5Gp4/36/48sP3+Pl5Rm3l2eAqyre99A4gRUcMLb7HbeXZ9Q9Y338iLyt4JQknKUMbkXrukjUvPKWqxVvQJhttrGvvol6/o1uoHY/rARe7EU8OCIAVGfHRAg76AkkSfLMwO36gidtHrxvq1aHSqWvXjHEzosg5bbecLtOmPKENAeEIOXfnDPWNKHsBdfbDfu6oaSAiRLQ3ldHi4f/MYs2VVBncZomnM9nqZx6/IAPnz7hcj7jw4cPeHx8FEZzu2MNAtZ4nsFBGnLSQggzwC1j3yuevzxj3TO2dQNXkeY4TdLqRdIOAiQLg7HXDEZRJlo22RSF8ZcISsUUGyKpXh60lxtpDhQAoogQEoAg/fuq3ts4gdIMignL9oBzviNQRcvS6JRA4EiIl7Mo9j88YHp8lPkUCDnvoBiw7yv2bUVrFWlaRHG6GYkQXIB2mqSS2/J7fur4FVKU6h/rBhW8tEyUyhz4tE4ziVJmAQGoNclGgmAVw+4t91inNLSzvC9ujFIL1vWO6/UFUhmSQJTwfH3B09MTPn/5gm3bsd7vyNsOpgAoJSbgxs6bwNpHSEIXDa6CHATwcGveguG+7vjy5Ypl2nCOEfnhgoAJaRaUGVTMLkRrEd9VeV3nBAIQDPQw2zVKXs0Ea1ypp0FACvF9E2BtA2MrC2TdHNkF7JCz0/gjAAkBmmwnIoQxRpxPCx4uUgXw8HDC48OCEKOIW82zg0PmIJUljVA1vJTiBFFFFn5cwo/qmWsehoAda0fAWqHUf6SRYGdkguUcMIGjga2usSJhCwnHxJQQUrJh0bwPBnNFK9InqBGh4AhOReSr8zhB6V1jwCLJdSI0/DTp+j7HKCvgs4mhjBhj3zdcIWJyl8vFDcekqtgAXOAQkHtujVsnu7fOAKkTkAQsUIhoAHJr6pHPOJ0uWJaT9HBqDc9PX/BnAurTkyQ360Z9/vgJ//Z//l+4XB6Q5hmn0xkUIq7XF3z58gU5Z5RG2IsabbDPm6gK34GEgZqSgLEpTUgpgcHy/lrATdoYlFw8p8erf5JUApWqvftyGeZfw7thHhA4TNhbwPN9Rwl3VdTe0WrF09MX/OX//1/4/PkH5JKBumOK2l4hSYHHnqsASSK8xM8AJBn04XLG+uFRxiUSahT3smg/I2ZheiTXRu3Yq8R2YWOLs++ie1bVMa0KXnjIpeoVnz2twBTpzc43Z0S4nUEs4Onz99/hL3/+D+R9w3q/STPmJNVHKc2SRqBJraU03F4+o+wr5mUGUFDL7qHKQFGqCFdR8z2niNBm1On9FJklvCx6XdaOKIbk6+h8PuPDp084LQv++C9/wv/4n/+Gy8MFj5rTEwho2x0vKQrQnGY0kr0mCYmFtq+4Xnf87T/+jr1k3EpDq1Ki/3iaEafF20LFKI7jrexom9jaqq9PKeKyTJiTpS+YZIMTiLCqRnkuoGpj31Ya2iageJoXLKczYprwzSXhD/cHpLZg5oydS1+XyyeEknG6X3HOK1otqJxxX2+oreB2fcbpdMI0zYhxRghRxWcJUcGOhbcAwjLfRHbmJ45fdLd5vHpYGKaHPAYE00NKHp7i4bc+Tx0AAPDkOQz5CP7y1ulQGkamF+UAACAASURBVPpllZxdqdEl573EZiix9P+TFpXpJjnQtEbnS0KrvFrylzKIWRe0brAQ7yWEHtKSbyD/Nqc0jcHwHAD2v0pypLUqgL/PMtj9Lb/xYaCCmREdmFXf6BozqJIgeBh70sOPRpR4pVaUSi2psItaYt5ZAdtyjZERECKPpeuxaN9IKfix4s/GYAzXeI8sfpWTMcw/YBx3Bh9K0QaGknrYIzTSfBHNSWgjN6g5Z1HzE4bzEhCk/4aGAsZ18d9wHMOp5PcP6tEL+GnIpajKbTcYMUbM8+z/di/cQFRQxV0DOsrseCNOvW5WZsQYN1HpTUiLNH4ttSIuC8I0SW6FiobGecHlUbzdNM2YNaRVagU9v0A6dPfCCh7WnLRjEGYhhCSdm4OoZyet/mS9P601xHoUtTQtHMtNAmUghM70ML87iGVENAb22hCLabH0Xlc3rdQCoHL/8ApKgEC5KusiveiMScv7jlIyiFhBuszfUkXoD9AKPkvmH1jK16DHgE7W5rr22NeM2nGp3uraR8YUjPIYY05WyRFlSgpoVty9S3rxeWyFE8yMkneIfIQ+Vodl3zfM+6brLwIQp04AUkXkhJwDynsvT3dou4NkDYBjSpjnGfOySBf7yxnn8wWn8xnLaREmfkoqXEtAjOA0a381BkUAtaJUxnrfsJeMit4mZ0rSeJuIgSCOODdV4NY+iyXL/E/MCFHUmi3aYLbTRGolPSXq2hHHlRuh5oa6CnO6cMM5EBJXLGVDrjtQCVQioCHjmCaEeRLGZ1mQlgW1RtRNehaWGlBLVtBK3kvP5qLsj2JPkrLMP1fm5VeFt0aq3jYmQA2rDlQDMAGITRqKNS0pn1MXOwokicyANB6NvolQT4izUvMA8UqvzyCKiPGEECfksiMEYJ7Fg0spIGbZkDxTneGcqqiIdiXfBha9lsNYtQ5iWHJ8AlnS8qReRkKaog98V6K2Hio05H6QTDbbzEOviIGCHmWTfUwTWWfu91uRAmD6ppwsSdXykiy5kPuGYkKMUUvTp0kbiao6buwsNqwiqxRrZCmSAdJiIw5YQINEzG5YSSl6GuaavFRWn4Pj1rV3pIpHwFhQ5V0GLIvX6XiG6SZ1sNo0R4MHj0wUi5X961+tYavuATV9rY8n7LuOQO33PnpCcweERNYPz4BuV9IG4MDH8yuIDqyPHF2jxkJXosWlzkKzMAghlAamipBEBqI2BiKBVIxyPp9x+fARhYFaC9ImyYiXDx+xXB4wny7SPy1NMif0caqMWgsoaDmsVv4BkGaIFER7Jk2YZtGooSCZg7D5FwTwL2epMgsUxKtU1k+inKQ92qirk9M7MrDQ1cAaYhdaFaU0bOsdVUP8TbuQn04LLg9SKBC0TJ0BXK8riGSTD2jYtxu4ZdxfnnB9+gFJmzHHYM6dhvswztfurPqZWQisDkxPyeogsLNn8FkHdyp66K3b1gNA0u9pUpIJZgnN1X0Hlyytis6zFk6cMC+LbLIpYp0maXVSKmreQMQo24a8rQrChfWXqININ7RaUEtAfadWP1/d1P4LfViHHMkgGnbSvkjWJQFI04zpdAbFJG1HFikMiGiIVBFjwPzxW5z/8K+IJev6EyOZpoaQ7H4UAFW2IwpOHpBWPTIkP5Kqb0lio22+EwEUVQpBnL+QpL1JiJIdxY1BKaFC7MC2b7hdn5HyhuWkLUqi6LkJaG2YlwmPHz+ilox7ZNBahWW1PEsfuZ7bK/O1eBui47z958cvL1lXZD6yF+NNi7rrUQgyICwLpBVRAzstQolHBT2WyGytCwjQxMIhGa5JDf79fkUpu2h1LA+YpgXbdkMIjNNJROTmKSJnSZyrzeLUgGmwgKC5JHL+gaQCgBo6StQJQ5BqoVKqg4N5XrCcZhXgS65JYqExTUkAsYQCmJNPdWb5LlN57puQAiOdeFbebuPzXoe3DjmwI4sYsj0oE1Cd9RBqVhSzQxA13GWRpqDzLMDH/maHNLk0MKAJsSSqTsafRGoglUl0vsTAhw3KwBx6O5PWvNEhm0AgCeCJSa6raV8zS8mUjgaaNK/3gJkkMRWwBDJntkIM6tn2EKmq4ckcq03aoej4ySxQBoJYxQt7q5Hf+xgNgbcMITiLaPfT8nfMa7fcHhGPnJ0hsVYWxv+V1hDMabDlUyXZNzSAQ5Ey1SA6QVOtEvqbF8zLjBMzPvzpTwjnM2qp2BX0fPz0DR4+foPz+dI5HAZiWjDNJzACSqsI+wqpSBMRUnFkAihM4gXOJyyni8yFxh62oQBvhTLPk1dnzfOCNEm5r7Sx2UG1gx6rgsN7Vvyw6E856IkRuRU8vTxj2+54ub6glQxww8PlhP/jf/4rlmURZg0yLz9/eZH+SKVg3QrW6xP2EPH0wxnnUxJgN9wzsbdHhsZOprPxfeNhTX5tLMnG5oWP3rgdY66RfKa6NjywdOqUEIBtOWFf72Bm3J6fkbc7uDV8fPyEb7/9iBSTaLUs0pfp+nLC7XZHzjt++P4HPN1uaC1ju2tTTgqgOAGUhn0BKCCUjZFrfrd7eYTHyjirc9DcnPRK2GlKB92yQIRpWbA8PCLWivPlEZfHRwQK4LqDa0baNpz/5d/w4bYJa5t3TKWAuYL5Dm7SE7DWDa1mECooNGEIYU6LFH/kpm2UAlCj7s8haPqGylFMJw1fqxYUAvJeQCmrA9qwtwpqFdf1hvT5O6Qp4XR5xDlvKkRLWBbJBzo/SIVWKRnpMyGE5ppt4iBrfwe2RuIqyFhKzyEzB/hn2NlfWLLuPN0/ucudRg8c1HOOWhlkrRnCYVO3fI/ooINgZahjKKi2Ct4ZMTaEOIMoorpib0ApQdSRQ59UxhSwAhkcPpK75w7VFjLWyhdop9ABS0CL+j1BFYJtR+6JdaN2iHn8fYg05wS9FLjH9obcmXcNjXSPy0ArMzvwM+0aAaHVpdp75RYOjE9QteRDbyOfLlbBJGwPqPdUMjZmBHcyDL2dBzAYYre9aih9sssfbcj6jxka+5zhW8bxHZgge6FfB9nEsZcO3qqeWxv+DftGtkf/PUyPnwfglyv3oFcThtDvKYCvEp6BLi4nfzcjBB+7YWYfGDFjwQ4/WsRAIainmJDmGVMpCKmCKSDWhmkRMbKQtPmrvg+2/oJS2e6xmshF90rJ16qEVhhVemeo90oIWoUmlT8hKuhJkyR4x+yf1bd8G0z7rvc5JIRmjppJNUgYv9QCy19KKlgnTVhlbrbG2sNO2nLsWxHJ/1CRsyQExxLBgcDu5x3n6NuPB5vcpOFoUyBsLBu/BXpIHB79lxFGP8qCBssRVUAlDknTIgkB4KdlxukkDlrRLu8EUefmWiS6UIuGxLTa1u6jeD5SVWZaau94kLtyfS8b10zfM3vifU9v0MKAJC2f0jwhLcJccgFaBkJjxOWEdH4ASsYUE0pWoFOqMmcANQs793QDyzm1HEVz2gPgjUIttzRQZ1ApSDPeGEWDJzZCSJoPW2U+EIT5y3lHQ0PKu4SrwFJ52Zpi+oh5CaqyPHnBwdeSEHJy5kgawH7981PHLw9vgZ39KEVEkTIBHAOClp6GcDQSTrdBWJyas76u997iwRPw8ArbJtbRSmO5gbVlhBYAqjhfJnz8+IBlmVFbxTxJwtO+ayfe2rDlXbzSQNJzKJjxH3CF7X+NZSEYNaQcgSDNCmbRsmkMkGva8PDb6R7EGPoE1008Rssh6TkyCPCWGyPoeUeiRy95WHDDf1Zi3hqjZvEq2ap03fs7bpjcpPLjMPGog8pRiTpFICVlGyh4aFMMlIydsUGNGa1UFagkFBckrINnOYKSbqgZog0VAEjDSsntGXEMD3evKe3tHq0yRcZDgQQYcNWkT81lCFr9RwrWWhOmirmhleLqtL/XYXjOWp4ExdXMRlSRry0XA+XmYYt1vQMsmhjLMnsyOpGEM4msTF2SfkU9tgt0kv62cNGk6rKA9gRbV9Qmv1u1RWSGVcZY+ikdpfoJ7JVlaLKZ1yZqtHORHKTT+YTz6YwYozTonGZdd9FzVWzdWQWNtAwBAJLmrDkj7zv2fZNmxmBPUPf5/06Ls7WG+3pDui+43a9ACHi5PuPLl8+436643+/SHVyr7aYkbKslWYOA0zzh4+MDSinai0tASs0b9u0m1YrKIgHjeoFc35u/h7XG0o5Hqrdqf/+rtSgXBGijG98UzIFx52Bw+ErJWO9iQ1uV8E2ggNMy4/Fy1kT1KEq+aJhiQJsnEDdX5xe5iIpWsjTbRBQhRvOGWc6pZmGi3/NwK05dFmWMYrTa3I6wjrFZJJmjAgbGdgyBCLUFaa8RA9IyY3k4I+SEjRihSbgRRGhaMduY0JoWq2hVtOQ5asgXBk7lt/DhAYBUaYm0hxYHaQgZJmeRIqZ5kvBW6fu1MKxNVJhrQS2iW1fzjpJXjXhIYjch4rQsqJcLRNZk1u8LnlubSxH18XXF7XbH09MXPD09y7/vN21I+s+PX5HTA09Y22MAakBsFSkQ0iTxdWtO5nkPBh+Z0UpBto7GuuhakP5EUJp9FLOtTQaNIQhSFJkbattAjQGqePhwQpwI+5aRpuCdV+/3jJIrtn3Hl+cdvIvuwDQFbT54zP2Q+DWhFNGB8WnHDWBSsFPRWvFkP4IwULUNr6e+yYekCs4DOOjsjg2qvEWUUqMCgQ5I3vMQgCXMG6ukv3mQSbuN70SyKFF1cxSjYQvSGSGVJWg9601fY9OBHPClKSDNsqRS0I68GEGo6a5IWetWiyZJEkqJ0s27VNdYaSOoUEaBVBRDhCGNGVI86syAeQhy/2Rhar6CSruDCEGNDRCk67RufAa8LC5OiMCQN8DD49/rsCkj4FLaf4CDubggMlFFuX6biFLJIVR/Kxnb/QaigOW04LRINd7D5QHn81kq804XzNobR8CNgBHz1iRRc+76PVME0NBqxu3GoLv2+9JGv6TeJEWZb7frFdu6SXL8NGseEauHP2nbGuiarK4GfFoWLCcRypxmBWz9KiEJnrJxQsciKJuybZsY15yx3u8i+qd5YzHaphWcaXmPo7WK55cnIEZcnr6g1IbPP3yPv//9b7hdr9jvLzr+i/ZJmjCnSZsqS5Xsw1n+VkrFvq348oPYqLLfcLs+yVxOExCnAbwpw/cK7NhzbQA97tbyYMR+bERUZgL6mQD6/VDH0hwiBiHvO7a2glWmZErSBujhfManD4+IMbrMALPk9CRakAgimGlzuxaUvAlTgm5zSdcl6+af3zWnR91IApiDOnMy1whNRTKraoxZfmJ3HBnwNgytNRd1FVJAqmFpipguJ5w/PSLmjBUVsWSgMrhEVLYQfUBl0ZqqjVCr3EnA8itl35KWMAJ2xBuPHi520EMRCL2ZstgUCYGH3fIdG4BeIGGJyUBD3lfs2w0xJszzRZ0iQn04S/4siUBhiBNAUYqIVumS8PL8hNv1hpfrDd/94x/4/OULdu0zuO/7T96RX1y9daygaSJqVSsqixcULTciDJLl3BeH5IcI49IgpcABr6qXBnbHmQSvvFE+hS1+J8nL8yw5CMsitfwlR7TKaszKUHINDcmQltrahERvy9SAqs8N9gC2Qdr19Bh489wRp9eHydTZkJFVga98Hq6TqOdbvC/c8d3fiM7hP/i5gjQUYdfh98Bf1T9xtIE2GsP1C8i0svLORpB6ZkBPQpbP6oJxTmmyaO60Noin+ZeOIJG/OpfucI2vGUbZ7bjOb+tDRQRqrKEAdm/RNgZ3XVmlFmCTidU7/m/S6YHS5PIPqehgG+/j7DokrgJDN3ZCzAE5BCRmf84S2oMCmhB7pd6UkralCIf2FDJ/2BkykLB1Np8MGAujSB4yEYeguaaMN+bVKkGZC0HUzQEkZUBCEP0ndzIU9sh7kzeEtdVqKl6jWN5BNI+gLOE7r0wNGRn4SnnHnnds+y7dpavYM9bqOWPzuDGqrh1QgEtLRAuwsIN52f3VWx+o7jFE8PXjkVUdJUYw2IVhfY2MzrgX+G/5BCKWzVvvUWvNHQ/mptpP5P37YoxebMAMcBT7VEpQBtMcWQ19a6adn6OtTQjweU/xARsPuVzu9tJt5RsMmZ0bdCeh3lOqtypy31oeR3GwQ6suGwPqEReTIxy2L88pAtFgx+1D0Q20FQZ44Qf5S2zuGFkhbLq9Lji71XHDEDFpljYhYckG1dKaRAhxBFRNBftqrdpyKiPnvbedytJ3zCoQ/9nxq3R6TCgwBwIqITTRJClVvAlTaE3zoh4wqyFjVEgCHBFErloNaMkVecuwJo8i2gdYiEMAkISGZDNuYBSAGCmJ5x7jBG4POJ8WlFrxcJHSxOv1jlJ2xMCYlwkPj2c5vxSxLJKf0yqjFNlkb7cNz8931Nq0e3iQPkMxIEYGRQZoaBJBDSEMiM3Fm14bSLvxjqEADJQnVVFEpbfe+w4HDxDNJvHo5ZnBa4YGBSRIfkXTnBsLww0JsnZtrOqv2ondhO1kETOIJB/LKohs05MSRSCXhlKltDLrbwEj6pH4eHZja20GvBJkiP0anSv5XRa60s3bKwWShsAYpQzg4I1SSIKGjyDVYCkKawASbwkK8MFA/Z0Smcf76SgOUEsnEvTUSENxRyAA3dTt32P+lsXq877hfhOQU1kSbkOMmJeGaW4i2BlIW66whqBlTIxRJJ0LIQRMLWKKUsVh1YKWWyagJKgO2HAupEITnMBtQuNeWUYEUeZdZkifNGk82UMMAqiShZgZyhKJqvu2iREtmtB9qAyEVL05Tn6n5dlaw/1+A0LE9P0/MN9ueH76jOvtBdt6R0TDvCwIBFwuD3h4+Ijz6ST3D7LBSDuelx7+JQEXtv25x0XHi/lnNsdFXA9MD4bfGD7vCCTIPEsibd+j27qHCm3NsmoSZXcwraIpqgis9KZ6wDIvYGas64pt33C/3/H9d9+r1IKuXe0BmULAnKJ8dq7gXCTcOyRzv8fhDqWCBSZWHSzpUzhPM5Z58QKfEYybwwXS3na6t5qNNgmXknfkbcO+3gUcrzds603X64qadzA31JI1z6nbRL83eu9Eg0fTPwx0hONexMakBwnLeaqGOi6kzpCtY1JvlzS/LgQRgpXCg4pSM4LqoVlLI0BY4hBVumDfUYoocz89fcHz8wtu9ztutyvWdRUnQfW2fur4VSXrrVXp3QPZoDlr89AYRdMmBCynE84xISJ6eMuSTpuVjyttLEZoc69sOZnRElG/YB2cQ2+dAGponAECpjlgYkl4PC2zqDhXRt4LamE8PT0JlRYY5/OCb779hGWZpFTu8YSYhD7Lu9CMnz+/AKrnYNozKUWkiVTdEpKYTVKh4zkRwzQ31qE/P3hMriszIGy2qhP4psnDpvQuh4apeg9dOFiRzYDVMWJ/ztW09SOk0swS00evWbyaGAPmKUmoRWlqAAoSq2xqOr7CcgmoLcRoW9O2ItnbiwjlGv38bQ4BDG8xoQyN5J9pKS5Lx+2mbSYqa0iFVMslyvdzBFjp4BBEqp4hFQ3msZirZNcXgoCeKQFTVPaLZURLkUTJ361467WjBoOHVnHDksthOTLjHCUTmO96PND7yk1yqrZ1Ra0VFKJUY+3S3uBcG5bWNAlRPG6w9N8RVl/63JmS9zIJG0TmqkJBYu8P8uowMN6TrANNDlCFddL+YMr0mIccjLEk24Ig94/lU0uVZNhSCrZV1N3Fo+wbLxH7aQYMXvI7HK01XK8v2EvFXhlpnnG/XfH8/IS8b3g4LXh8OGNOER8+fsKnT9/ifDpjmpP2rGL85a9/xfPLi+bbNAdrBAG/Vjgh976vWsCYCfbf9thZTQDWb47VLvBXN01mHQ0snr5hvNKBYZR2CoCEQYqCnhQ6oyfK7hFTmvCnP/4Jf/zTH8FN7PvLywterlf87W9/x7IssHxQbg0IkvdzmhNKZuw36ffGgbTR6js6l2rT+lgDU4yYNQR8Whaczyecz2fpbu75ZeQFJAR4I+AQ1alrEorO+4asQGe9v0jbktsz7tdnsX15Ra27hgp317uxNiyWywl1GEkb60phSnSF9UPuJgt45kawPnf2XuAIeigm+dEKMPkRYFU1IT+ogCQRaQWbVFsGbVycc8H6fMX1esW6bvj+++8kj2fb8PLyjPv9psry+/swPeZJmFfWlJYitbJUrZ9Hz6zuVB7QqU7dsKp4HaS9QCz8ZWXqbGwIbG6+WkDU0SVYJ1gjl7tusWGeJ43hCz06T9IyYZknnE4zUooopSGSxIkXfb3E8a0UOxyrlBQhENR+uPEY5rsbWvVmvgpxGIWpr1GGwqDO+5Kuwwb+6pzc+xo2emN/MF7n8NaRGYC9Vl8ZqG+uR0dyMKI2VsNrGluISWl5y9OhTv+OB8HYin5q7oj6vDM2o1/76/CYNYwNIai+FHyD9KtnC0FqHoraTrOfRu8G4uO4/A7H6Lsb+PnqNvthm9LR0399P0fmrNaKwJL/FIqIBFq+HaHnOcnrq/czE1qbOpuk52bilQwRzLP1YvR/Z0d1vdjaI7g8glVUCnPYGUVLnPdrsrWmFTx2bcYIVpVAsOTcNxNzD5/12x+SW1ZBJWPPOyqrlMBQouttQJK09rDcpeW0ALBkb/PSMZimvjJIHZPXbM8IeGS5j84XwRya8Xxfj8W4pqwS0z+HyRaU2zoDR/rmQXDSGD7S/nBSkTfPM86nC5itOW7GnLMrcTdrbOR2qAue9tAzgzyf5f0OevX4UKEVyB0MUz+WMcBXLLa8fxh3NrtoP5pzWofH3PdhYcmHFI3xvAbHoKdivD778eT6+b22bc5sjY4GGbB6xRr5+lb2kSwc3UNbgLBaIkAs93rfd2k5VaqG4u3nty5Z75dsWzSsogWQQSiNQdywl4qYC2LTPBq9oVVpZEBCHylJguIUpWGlCIRN0uCPCCFZyMA8AlkIgbvhkayg4FuoLaIgXfKQIuF8mlHKgmWZkAIhgAX9L4tUKdWGMjdVW5acg5yLhrekf9Qfv/2Ih8sJ85QQQ0OKVro7jk705eUGhVVt2Ba5WXu5kv5aln411ocMgEu5/+YHK42sNCoReXKfN5PVRME20PzgboTsntuYiQJz0Eopaxshid/EovTpNbJ6fTUEjRwNgAKEPe+43e5YtxW1MnKxpMphuEmlDmLTxaOJuqHPhtCa2V277L7IFFCHQKjVYuUCrHpIwBH2gZ2TGDYhqdRCCMIAhqiX0vQOugf1Hjfxpw6rkIwaM5cBMLraQOhbQOcrQ6bGy/IkGiClwgz13GRjTjGi1Yy8LAJATIh00PtJMWE73T3vRnpcjWHJgVXD8Bvs88P+LbW4UBY4eJ5e8HDY0MTX5gAI4OqbRda8gFIKdu0ubwBo3DAOY/GeHgkzas0INQKtyJppFU0LKQDW/lMTzpcHfPr2T3h8eJCGvot0J7+tO56vd9zuN3x+ehYvvLE2dpWTjxhlMWSM3LkB/DEDg0jokemBj8/rS2B3liwt1t5PQ4sLf43fI4apKaOxVCepAOzp/ICP3/wR5/MJHz79AY+fvpVzDQnTfEZaTljOD5Js2zQ5WO3TLVylarg20bYhxpISPj5ecNF80Hc9BrtlzVs9dK7jsOcd15crWq1YZinJDyowmbQ3YAiDi6wEgRVJiOCmVq+ps1bBkjvDY36a2kh3CjQkDdOMC+ooyd7eGjxpnIg9hG9Cs8EcELMlUcUg1TbIvDI7mOSeKots+x635pWRgKiC131Da8C6bfjy5TM+f/4B67rhh8/f4+npWRig9S6haF2vlgf4z45fx/Sg62+wokFtsq43gIDSQHtFjMCUImiSktCcM/a9gME4LwuWSYQKJ+0OG4gwTTMmrQQxA8YMqZrSCg0EDcmw6hhonL/pT9AfEGNKhMt5BljEqqRKSs7rcjrhtCw298BgzFPCFANqKZinCad5QYoB33x6xIeHi5bXVfR8I/QZzcYkdFTOzFoWqQbD6VSdCDAxRfbyfi9dfCcenRmoRSo92hBDHssoJadBQY9Oeh93pV09REkavtLcHF9oNaKWqItQKFH5fgEqwV6v3kdTZLPvO15uN9zvq45U6MbZh1u8eglj9UakIjwo84EGFqF7J6a9JHM1BEj+TugVff3+DOma6owSCNHoWNWwiUF64aSoY+velNHEvz/qseaMMUYwSwiuX0hn2OQ3OUvyY4c46jo3K6PxDhTRQSl5Vyn4gLyvUtUFuHy+SCAsnrNw0SaLUr0xe/832zxfA42+zjpg62FRWUdRhTFjsgKFzgwAgDclVdtllTPSyVzyAtZ1xbquzv504OWjgB91gH+jg1na64QYNSRZwVxEf6Yq6NFmr+eHD/jDv/wPfPzwUYTt5knWT2m47Rm32xV//ft3oJhkDaMntxpTZmvrNav3xpk53JTwbWeAjVG1+2SsAiBDFcjchZ5ErC+U15hMCaRknbiHOq131uXyAd/88V9wOV/w6Q//gk/f/BEMIC0XnB9XpOWC08MHIE7gRqh1ExE7KkCp2HSjn2PCRAGnKeHTh0d8ejj/pvfvR8dtGNYx/1Hy2Rr2bcPz8zP2fcflfAZYwa22WoDZSZhqfFNW1ZpwRkhrJNOqIwBSZMMqzNmUUSGtmKVh7DGAHj9rNse7oVbp3SX2lQAW59L19rTqmlJE1BSEzq/pekzWjDg6+yOmsikBp85tY2zrhn3fcV9X/PDDd/jHP77Dtm34xz++k3y1KuRKq+xkSn0vpgd45RWa1g66pq5NemKtuBluunvsB89S80IGSouUEbHkVIn7GaoM7nHbJuxfIugF9m1E2pfGOkRT3ziNYtSrApi9kWYJhGWacFombbmQDvkKvgvqW3Vf9Y3EbAcNP/bv0bsy7E42RDQa2fc6Bk/rZ/zYYSSV06w65q+vEcPkFWbg+Iqxosm8anks31esjHMAJhZS4vE++6Y9XBr1c7PRlQV2HFT7HMMncq1+F2BcwjhmYDrexz4S49v8883svzeF/ubhJzqw0QAvawAAIABJREFUjm8e/OoN9jY6/H79HjNUROzNY4GoZf8aNmg6u7XKCCwGuwxd3MVAN/9coN+P4/nYfZR8t8Dd0soGbq8x2/LGvODj51tVoFWrvSV45lfsDMj730zbCLlVdyzGitV+ScHbdIQkfcbALBVs84yU8wHI8vAjrgR1RtPZZ+At80NvPLbXik2z9SOHWciAgQVgEZN0I2ffqYCEAWdADi1oiESPJk2I1h8tSLWe5YkIq6Cbtl6PA7mm/JbKa4zCqjG9Y5d12+wOzxlrzQ6uq/bCkt6ShCklbd8jlxJZ8+3Y0iAO5q/voVpE0G+nwdQBaIIBCof1/TWQNzuNAWwZKDHnZDR4PRQq9jb4+5vOMdvzXu2E6LZ8DMNJkrRIWuRerZV3yfHUfLtWVfC0fb1ef+z4xaDHhJKW0wnnZcIcIy5TQgyEUhtyEUpR5PsbahUkmHPPdzDdDNMcCEOVRSAxgKUUhECYwyzaNQFASr6BJaOwh5vUqKHVXdBpk7JPo6hPqtthMX6j40rO2F9tnISK05LAU8RpmfGgQmfnZVJ0HJStcTlTmVQE8X58glhIyCo/DCSaCKGyAGRNPu3+d6ZnbOnwWx7M0j3bYrIhiJ5CK4ORrU0XFqmwnLBjk1beBLLycXlvLcXbM7AaOWZpkGjf0WO78HtRaxONJ2aN0dpGBIQwYYSNtUEE9Bp0Qcg9ltwfK3+Gb4QUILketnr12ms9gmMrgwex5qAYE1DgeWZVktdblZAkNy0HZw3RNUAjtyoIBvFewjsa1TePHpc3IySPO1DvwHzIvYOsVwGZxxDXaCBDCN1oN2lJ0cCQBFlh9lqwUJP2p/PBEGHPkjegFdQQ0Oreq+d84yXbSWGK0KSPUxJ6PE4kxQVBfsdkpc3w0LaDM4aGbWUD3Pcd+7aJUKI1K1ZbcQR5tsF0+QSXnHkTGvzXj9Ya9vWu1TsBMd2lk/i2opaCfduwrhsIhNu64vl6B0XJ52GSqo9GEXE+IZUGxCSVdk2ZVBirFzBN2qZBBt8urF/b4RKdI8LI1piNY33eAFXPpzFHSQ5iEyD18onBcRa9shQDqtrUqqKLjCi6QiFhzQ1P1xXMDc/Pz7jdbpLculcgJkQKWuGmijPcELmJgvUiKQqnaUJ6xw7rPmZaNWmHaI2J4Ov9fsPnz58xzzPyLg2z53nGh4cHbOsdU0qSf7pIjpbtkzb+SSMoy3LC+XJG2gNOtwXrPKFUQi0RtapjUANAlkNLx3sO9H1M9zCCOZnaoxIAOEiDapJ8R+ZJ54tVv7KopmviK8OUpcVmVt1TYtQCJyPA9d7nUqRHX614en7G7XrDfV3xrMnqe5Z1WzyXsGlVLvcmuT9x/KI7LhnVkuNyvpxxOS1YUsTDMiOFgD1n3O+bCvVRpyeLGDzJ3RGaOwTCrAJhZtSSxv9qrdoJWETPTHJeqHETQhK63nIMhB0oKCiobCWoUs8PAGcVWJPNzTZ7aFdeyQtwrRgCziepLLmcTng8n/X8kvYH07CU08EGbgBGRZdpZnCtfexC8N9eGRMnp987EoaHtX5O19hfcxiFzlWkpUMIhzwGC/+YNxGiAE7pom7NCrV8smkPrFLATZsY6mnn3fra2JjJdaeoiachINeKmOT7rQOylKZOCGHujBHgr2mN5f6Wov3BVPsBCuKi3UsNgZlXSUL7EjRkR8Y2WZK0mAQGu/eFJvSpVTuI7AL0MYObhOhqtMp2y2KQ+8kmUPTfeeh1AR1sAoD0dhwqcWzLUiD+GvjY3PXwTy1qDjV02SLalNBiQFDtlRAJASoBS7Kh1h2qr0LIew9ZWbjTmlSCRFMnBilBlyo58WhDCkgTqSI0YZo662xruXGfx62ZZ9hE82ZdUWvBvu3eHfw16Bm9cQ+Nmqf7PphHHAUNt9VSEWLUfKPNwyDrJmHf623F0+0OxAUXEMIkMiGVEuJ8RioMxAmlAUVV5KUqNEjPvCkps/J68+uPYZc6XrQ+NvA7shed2RhdUuc84TGBId/KfhjsYoT2OZW19x0FIM7gMGHNFXy9odaGL5+f8fT8hOvLFfe9AmFCjITHy4zTIkrNlHdQlRzNx8tF2lkEmUvvBV77EB7zTFoTTaHWKugmQ5pixHq7Y1s3TNOETx8fJUw8JSzLjPN5QQgBp9MJp9Oic5QxTxOqPp/3M/YYcTpJ7mosQN4jSrGqqqAO3TDmhxM1Jkfz5JpFUAaGMUqDVlaWTLR2oDIEYmOD4gQyr5N6+LSUihAYUxJyJHJ3zhhQqYiGkguevjzh+ekJq5WpvzxLheW+O+hxpgdwnb2fOn4xzO0VEZpxrjkDMYgwYYiWQjrQpcZuAa7DY5tRGKk49cyMhabXd8Q5uONNa4N3+HoRmQEz8MCquMwKwuQ5Cy1Zv6Vegu3Z9cZOHKeJjQr6cNuyPi78N8/LH+vrBuNjOidvft1vcqjQHAiHpm3m9Y8hLSJnqQKFYTxeGUo+horkadJy7X6fBSzofWRhdEAyeS3PggMQTfuI1VgrGLOSy9oG+XZ0+fYApdmJDovA5odeFCwk2kMhA83bCSPwW/dA0eBIqR7Dyfp3p5ze7Ub+5OEcl63DPgR+3a9p4fEe/lhicx9dHhaszgMNJ7jneFgj+hn2RWTfZWEuBVEeSuwBpfGcx1Py9evf1+eOz0kFrsdKLctj48P1/lOa/H33yL756PwWsF/9Qox5rFalo2uhKjgPCMc10tjvp6+hYO8tYJcq0Hs0MD3M9j99jo/3/E3Q88YAjffRK6m0Z1rfU8ive2w27HZJ81Jq6/dMurxL0UkuRcGpnX9PH7C1TeiVUwQBxe+tofX12ursowkxghk5SyI9M2Pfdmzr5jlcIcDlIKYpqS3ubZpCIP+77c0e7hqu/80NZQC2FrrqTOvX77Gx7aGufwIbycJtowimfObrMzFG1cJ9tRZvcNzXaet5roPt/SVr8leBnhCFdZnnGXNKWDTR17y0qqJKpPH2QKLjE4hwOi24nAS1Sha/TLgASZiVio+IJc6wPIx932SjbBVF29LTNCEMsViCTIrT6YR5njU8FrxuX5xG2eCtK3BMQpUfcwGEGrfeREDvMG7Jvm8bRTOYcjV2f/vEDMfSWQU4nsFP9hn81We+x2GJYhLb7kllDhI1AVRYteAs3DxFLJOAVRHYin4NAp4spivfo82Y1VO0ygCRFKhRKolqI4Qkk75USUzjwKhasdcqYzcPoFRs26ZJa6KlImEA9jE09VYoeDZDKPgjwZhCJKXTU9S+UQCFXr1FOmeZGYEqqjba9KRqJi+nl0HTe0bkvyloS5Hflenh/mNrzOeXjIPNxRh7IrltOIAYn1o1QT2ake0sCkPbXDDEuwvkr2HNTRh76EnX5AkUgzYYnKU9Bobz0nMD6KAPkgaF55CS64cAKuDGjExWIUKIgSE6UBKCNN2lnCtylgaM633Fuu2SDJmL5k8IWCayChj7DAaTgg5SYPWOwIegSlTM0hVbmzemQNISgBu29Y5WZS2IKKyM+bptIAA/fP6M//zP/8T9fsXT05M7Nvu+4/ZyRQgBed1wfXrqw27QkXvOhoHBkVE/3LM3WCEcX4ERtMq3yF+sspDUAU46H/Z9l7ByYzSImB5RwMv1BT/88Bmn0xmfPn7ENM2otWHbNlyvN/m53XG73SWHiAvKnkBomGpF5IqWIiIBNWcEAvb4vjI9DEZjCQFD/18boxSTalCB02oRj4SYdpSccbvfEEPA+bzgcl6QUsTHjx/w6dNHxJiwLJPo0jGLftEkGk3zPAkwAmNPCTklUCWUUAaoYUCVDFvquYr4awgBiZJGIRiSy8hOWBtwq1a4pE6OAUkuGSAJO6cofcMm7cFnAG1Md4ASBTlXbwVjRQXO7AyA5wjKftli/BWgR26WCIDNmKckBmwQJRONnQ6CCF3D5OFyxuPDA0IgFK275ya6FKUWBApY4ozT6QRANrs9bwCA2sTwpRgRdTMewWMMpuJKDnokMQzebkJCZ7tQiwQtJTZgIp9jFTzG2Zi43SEv5Y1xkd8dOMRInlPnIS3AjYqhdNuQfsxkvMfRWIwFoUubh2H8KCakKflzsz6eUsBiYoOqpCyVWXIPA2lrEU1ks5CCPbZKr5aAoIV4pYjKNWPQZeKGGsRYllKxrVmlxotX2jiAZW2NoF1Lg2oqKZHjtliMioLNSID2D07TJG1MCDDF2g56AItv96aJ5PSP6YkwhGqV0ydneWIMoESI70fZvXEcWTvz0I1IHAG+N/mExsfREwkN9FjDUqtu8ylL6EmTViZLCn4zaylscP8eug5iEtAzaz6FiUs6UwgtqY19Q3TQE7VxJKnkYjWQptEzBT0xqgnm4KBH1JYFJK/bhm2V0nSZV1b2Ozgk2pwWgTVibYzAMZn4PQ6RfWBwKQowg1eqoYlIZCkF27aitILSKnLJ2LYNzIzPnz/jL3/5T9xuCnqqbBh527UjOXBDV5r2Q71tuxdVG1cKQ1R103nb4Ru9d/7qb18fBnQkcVfWoLMTQ+PZoiGZ6/WGzz98xvm8Y5kXPD4KKNu2XUHPFbfbHff7Ko5L3bEncbhPYEzEqJrfVzUXJpBKm7zbwWANhzvcq4wCONMcKaCpYB+ps3m7XoEfZM+5nBc8XKTycd02NG6YpwmPHx7FXgOgGJEU9EzThHmaQGBpRquvseIfpz/d2YE4cCyhRPVjYCFmGX1z2Bka4NUeXqryzrqWAQcn4rBoHz5NixGZCnFkxrJ5VuYol4J1NdCzYdV+W5I6IeiMfOwINDa41kv7qeNXZXHR8H+/tbox9B4h6v2hA57uLepzCmCsMze1po1byZV7Y61or7xkhwcH96EzFR1wGUix9gYQ7ylYNZjeRCKQiciBD+GbrmnyzzctAzGGdvVE/W9v/tb/u6n4iiZ8Zx5dT/qQmT/EFZ06ddDWQd+4cdoGeLx6Q+T9IPTP1pWmDBCDq3jRXhTrtPYxJDF2Iu7ihdrws+kcaMIUdZ/y65GUuSqPPWlXQUA34cMV2OLk41X5+WPk6Xp4xc6T3vtevnl8fVb9n/1e9RAw0OUWvhYd+6qiiYY5YqAHfR12+3oMT7jCtx5jknRTO+LJ/q9ocR7eatS6vJlBUIYZmp+lb/D5YlVaHhrqdPkw7Yejh0D9mvT83vNuymYhuizWuzeELtBHgIe+Ss7Y1hXrcpfydh2T9X7XnxUl7z0E1RpUjLznYYzAh+ECdszsoSQw+9r7yfPHz7NcFgrz0Hkxpi4iKGgVFXWghYqieVgEwrauAvgU+G3rim3dlPkVZqwWRoHIqVTTk2FpVlrInFF+477/hgfbddpiAJjEPhC64rX0FbScSgAq6UEA9hQw7dIodNdKJkCcwdqa2yljzkymorUOLJpf5ABH+fU52vHGgNAQjqK37jBjsN79g4d9v/8+5nId7OlgK2x+9LAzHIAJ0zNYavvnz7iXv06ccPAGapOO6xwkIz+SaX0wPHQVpHeVJUXlbVU9ngmXxwsAYN2koR6BcDpLJjoBWE6LJ9dafM834HG2+sD05zxWTEPlVGQAEdY4UBotk8ZM1WCjx0AjJaQwgdApufEGvR4XcG9g18uyyb1mUoQg95pAQwPDEfL4dbyTdSXQUKrZN3YxhiPax9DoL2hnaxtr1pYc8PBlCK9kAJjdyxGUEfxzRZWVXfjKzkF+xGC1KgxSVXE2gFXmXFRXGzQmX8n6RSKmhNnLV+HegYW6CCMgNoZCFmy1FhbMqHWg9z3hbvCO0DcIAL11iG300LBI3n+WaNZvefQxlgGQEmjbFKXqzlic3g+t575JXL032qy16PPdWE2JkCYtLEgJISSdtxGMDloaS4+u27oilh3zNCFEQuUqPZHmWcMcXbTswCeEiNJkXhI3lCbtZ2SWiqcpG6WFwwAhkcQbZA1D3m833O6rhLRWUXRlhsoq2PY/buqWeM/OYArQ6p77exwpRfzrH7/VMIiq6qJXjcnz4v1+94+/4f/9f/5vnM8POJ1PeLg8AAD+/B//jn//X/8ftnXF09MPqHkDVxFIRbVtsr0J33pu3gBy1bbZMW5Vb5moHxuZw2uZpb0JRPVst7SI4RMYAY0JJe/47m9/QSBgnhfcnn/A0/d/RKkVf/7zn/H3v/8d67biy/d/w36/gsBou4CaCEIOwEziTO95lwrUSIhTdIb4vQ52p2cYMTG0AHokAfvuDMmYZwiwdgeIUqoP2TulsELW75QCltMFMU24PHxEKRn7vmHfNmyrdKwXIvb1njXca/teGBCUvDq/J3pOrkAfgl+T6LkVZfI7aZCCFC/FFLwTgrCWEyZpmgkpmggOGQwAkgLiqFGFKUYEEIpWr1V1VoruuNZi6KeOX6nI3BeDeBsABxFqm5LqLQzqjwFBBQEJaBV5qwiBcFpmPD5Yn43o3tTpJP1IRgBTa8XtdgOztKewZmgy4F97H4ekYfOSCBB9ggTo5yStBIkxIE2xe3buzAYVWjqGtizcdQA+5vG7sRCWws7HwI7n9xCkwkWZpxH0mJLsux1kSrojw6OXAQgIoB7qM9BD6lH3v736GUCmXIh/XQcYGJgcMLiI5w2gd9VV0EPBEpa16zK06zJIMdPA+KgtmUgqh6IuJFvo1ixVGsRaCMzUe+FjYSBbvNyBXSCtVrP/WJNCzZMLBor63eRaAYJ2E/89D/Pg1VuCVC1KJUYALCQYpgNYMUmX1hrCoJ3UQY/8XeaEtHCxEGhMIi/ASBDgg26YakXZCrADeZ4wTRGMJg0XF6HuQ4yISeL/lpjbWPzHamuy9fJfhpTAAxJKiwp02yS5PLI+yQ3pfV1xu17RqoS0apYeVCZiKKCYfW3H2EGPMWOSz8MAfprx+LWHqL9/o+X0WUF/w54rGjO2PWMvO2pj/PDdP5BzRZpmPDxc8OHxA4iAv/31r/jP//h3acJYMlrOMg9qd8pMffz1YfN9xDmveYAR9Lz2+3/M4eZXr2VneIHCXavL1pV9uoV8vv/HX6WMe5pwf/mC5x++R6kFf/nLX/D3v/8DpRTcbjfkbQWYsVMDSUdDlECYg4Tjs4OegHSeENN759t1y07Q/UVHg01qgywpu7izYJXClpRu+09rTR2HiGleME0JHx8fMS8npDTh8vABrQoD9vL0hBgCmkUvDrtMPztjgSnYWR5ZXbZUlaA5sIALvxpjJTm5As4opA5YUlDHOWGeE4hETTyZveCh/MCdkC7zEg6gBygxaMWsJu6rY8N0wOU/evyqhqNv3dKDZwB09ceRDtdD2sQfK5yCep22+9RaPbY7hlZe58bI99un6O8BmAhVBiB0I2iehE1Az7EBhs0P/ipHs3yk/L/q8+EWgg//7I/7su/lz1ap0EdTEAUf+j391oeHIhXhv742H5dmpYhdgPB1QrZ5JKOhcwxltDoA70SPzkSoc+dnZa+Vt7K/v5NR4/eRv3n8/i4noK9F91QwXuPwPZ1K/Vqc7ucspIMtA3wRy6LGz1uNv8nxypPDMCdtbG1eOWiUih/gyJRaabokdvePga334SrNYQEF0dUi0V2iYIquLGFsdGeh9wtqXZPGvpsCWHO6rF9XBzt93Tv17V5nX6tQEOPh0VfVWraxmnrRYdzMJhzm3ejkvC87oLJPB3ChJ6a/hflqtSDnDcwNWxQZECJC2TevIgiQVjyWjOrl4578fzz0djno6eN+PL+v3vfGc28BIh9Bks/xKUejXddz1DUuxawNrWRUAvZtxXq/SqfxfUOr0kGc0KBpez7XXRjx1TXat7zvyuyhLLz6Pts3fT0aHW1/1e2iNRFrjSzMX86SGJ1LQc5FgRADCKDAiFEShltr4ozE2FMACNb2bDgX2+wU2L9hyMV+9qfHsFS3Cbbvmc0+7of6zoHB6nmXdvmHH+ohMQM/zD2UzoD/rdnrf8Yd+fXKTGYEIAmQIKh6qFxI08RkGzRBeCQaPlVuVM5y06z9/HISTZZSdjw9baBAOJ9O2jUXmOcJKRkNJgJjQL8XIyBqGnbbtk2rc07dm2PZ7FvtfcCmSXqBRR1M3yu0k7qhWfNGTMH1q2HB64EnNyDOEjhY60yBMFI9JGAe9HvtlRRoaL/RJ6s3+uOGnHdUEpSNtgDGiKnkQErC7FlohMjWrVa8Ab7BCDOkbBmMfYua3AyAu/ZLz9FpXTtJNX1ak5AWqCE0oXsBCHBDABNE3ydK5YGHNmGLaWAPjX3x28ioRcuEh3P5+h6QGwIKEE8Dej99TzJDohTxz6Ref4vDWSYFra4xM/69iVEqOWt1zLGiwrSzAHgDTsAq9OSzi8rAhwDRy4kJMSQsywXTfNLvss2zodQdjSusa/u+reBaESkgp4RpniUhU5nfSVkf0Tx6ndcHCP1e+9qyL6OgSeWMvGfkXUqZ7/dNNZ4aWpEfgKBErnzucLN7ryLo5hmkMKH9REn7f/mQDtq1VNScHayVveh6qVoRSEDNyLdnlBDQtivK7Uls63rHOQGnECFdtubOttoO1qMTw5gODtqwOb6FfF5vhHj12oMv43+nowTEaHuGz/bN2B0GFaStG7hlvPxQsL98QeOG9X5HrDsCGGkKOKfZQXljaZswkQiJxiCMfowBIUk1IKb3Y3reBFW6p8jfJTwvVjEMzJt1NGBkltQOA92lVkyTVk/PM5Z5wfl0Fmil7Trmaca23nF7ecJ2v2LbN6zrDdtmdraz+4Nmqd5Tdkbf3X5ij5b04pt+lWJjshMJMcQDa+N2qDXViSIEtd1Be6QRgBgIUyBQDJhjQE4RAQ3LlMA1odYgoToApcn7ECTvC5XfGOyvj/9CGwqYK++GoUESzgiSA1A04WqaojfbtGRCgijq5lI0uzsgKTq9Xq+43e+O7kyXYJ4TQphRa8V6W5HzPmygr4wis4Me8AwsgJQay2+GJPQV1XYgEJqIfQ6sjIAeiZKMi1OrzYY29p4MqvFMQbQDBOLOhvWkXMlWr1WarYm0uoq/NSBGvLHh/jZHIMIyz35N0Htj4wEwai6oYNR5giQwyrVNaajeSpr7ZJSneihjMmQv85fKKGifpBiCJClWeDmsLQxhSBQwoOeaCDukSe5apcWIij4kRBpjEjFFDSNGq0AY6AqrRgHgnYgBuPKy3KvX4cl+EOAxcDfc3hm6HzYGvx/kOX6v3Ae7vmEOq0ErraqMNLy6wiumTGDs8JkdCNTWkAsQkyqvBglxnc4XnM8XNY7y/tYqcllRa1apgR15r+BaEUDIMeHEwHKqQBLAOi+LgFoKILLGheM1KugxB6ZaLpYlKjP2PWO9b2itYVMRQtaKQq6aNA0C4tfuiosVYkjgx2uD/9sfzIxWClqpaEVAaSsVNSvogTEkBNSCXDWxNRC2YOsx4JQiCKKhlkJUx2NQyw1HAHIc2/7MQD58van4cAin889Bj7yY9T72+cl+D+VlQ7i5iTK8+GEVrIr7t/WKZ+s5RdTzTKYIUXEHqlYkAUBU4kFSGiSPh1IApSjG650PY32N+VKM4U6wWjxYelF3kiG5WK0CIGd9ppRwPp9xvpyRl4Jv8zdgyN5xOj0gnC7Y5hO+PHzA9XLxiusYgzraVonKw14OP7OeVWdhJlNo5r75E5zFkmR3+YRggGe0/y34/SZmZ4RBhNYKTLktElSaQfaXOQUERMwpoqWEShU1ySYdGqGYaG3QmrKf0avyV4e3POzErJoIXb/CaTvdEBprAin1agQxuMLG1NaQkJCoL4TWGhDIBYqO1Vjkj8fzes3EHPNt+u/eSXyo4qi9TBN6fnKdYrhp+I4RuPSD+uRR6BysAxs62AG6B9nYqHfLk5HyyZ5c9kvvzq87SFciGSuiYS+EjvQxMFQeXjK2Y1gwDGgi9BEk+uMmnyvaJ3w4B1avTuhROry3n+jXJ++b64+ErwDzYMiTUo5ULA7PHc7n9Yfw8FhHx6uOZJL4WPjf3cT9vkdnQI5mVk5s3Kr09cN4v+5BZYevO/f4BPA0Hpmx/nqZT7aJJSkYaFL04B3gh1y5qsCbQhQGChUUjUnseXpyvhr2Yoa0t+jrxkNY3tKkr+/DbTW64o1bNOa6mXE3EPSet9PmVAfsMsZT03AdjfjDACw8h4KgORBByrKj5rIQdXFRkCbdh1dAUi58WL/67JuURT9fHw+2sTq+wsCVMD26YbYOehqzNkTlA5PAtYGjgp4atM0NIxTyfm8hBs+BtMcM6fnUzGlS0EMBkscZCYgBFBPovROZMQzPK5D51pDK+PHh3x2EsuaodTHXcR+zXLQQhG1JMWn+TPE5YGzSGHGQg8ZvtMkAkxKxfcJYY/L1NF5Fv8DX6RKd1fMXHK7bHBDbWw4V3+F1eCsgcK8S5/ZPzf/h+EWgh2EqvsCcpPt4ACOxlAhagzDWETL6qjFw36T/UlQxPAC43Vds24YQAi4PFzxot9tSsupDQBLT9oyYIh4fHjX5Vjqxx5DQNMwlrAsLswPT46kee7QNt5aKfdPkwCrS7qLzEnA6CRUaIqmXq8yNegJVWZDGop5pmhhjVVbUju998gldM24ipVT/dy4VtYouCXMUWe4oyJVssrzXweI92BGJEOepb966Q0wpYczvMeNq0gN2ogcgoQu0Vz8BJVftxUbgifQryLsx22fYd3GF/82qemz8HaAOINVCg0QmKNe0oaJVVTm6OYBQfbeAF01ylik8JHoSjr9Z3hM02VWMdpHETL96ODj4OYvxtzjMw/JrHECPY3I6Vj7a3AXghjPnPAh7dkcjTZO819BuAICAPVcwb0hTwzzviHFSsVARMKUAxDhJWXxrIkKpDUst2ZmIsN7v2LYN07RLHkNMmJcTzhfR9oopORvl84QZ275r0nlFrRnruotw3bpjXbOymM2BEhFDOm0M98Yt+xEAmzOlKkYyJvR+7AARaV9Cyc2wpVhtztt8Rp+jBG2qrIxOChHJEvWpM+HBKuQAsC/gg094ECFsg/c1rrfO4gwM2Y9gwb7f0SidVvseAAAgAElEQVRxpS1CNOTf+no04GVfatpA1KR6jwFXYpdTsF6Ito5ltjeYRhQ8F51IfVJiAXyTMF7veXwNKV793Ww9oadSKGxz59vvk+pT6d6x7RmggHXfcd820eU5L5imGWDG5eEBHz58QooJn+dZ8mbtU7QKsakdHuGq5NM21Ysin4ON4U0iWwA4NlijcAFVKmobtKyXxhgqqd3s9pZAqpWhPbmCCscGKX6aI6FwwJwSeEooWuQAkrBYZhbGvbHUcf7mTI8ZVBKKcJomBT3iuRVYE0sAOhEJEmpb9yxgaUoI8wQQ4b5tyLuoAjMxpllElEqtHnpY1xUrVhFcmhec1NNKKQGRFLjsnmNTSh6qAOAD7oajiupjLQWlZGy76DpMaUItUoItFHDsfXx0wlXqHq30wtkH0COvSVFF1EBgTt5KYZS6txCSNEWVKpUQGIGa0PhgNO/j9H7HyMiYB2/ihN1SiZquATcwH3SXDpu5fZ55CCM4gdCy5KX7lkghlTZ+j9A3aSnMUQCt59j1G44eigHUvmlZiMwlRP3nxxgM+03BWCbLW3vtC3fL74J5LLF5UoPA/SU/2wP5zQ67tuE6+3kc2ZJO9XTQIx/BPreTAg1A5rc1aaxWrEDSbLi2jKkBe86YsgImktyvlAJOpwnzFB1wNgAlF9zXXcB/awpeGqa5oAGasxVw0nBZjFF790ls33prtcbYKUM6vjP2vWj1U8FuuTBDuNTmTA9pqT8+AJ8+nK8Zx85yvctB8NCiJ+UTARSHx5bkqbIRpNISMYG0ktZYIrvOntyvnxHI8ytsBICuxzOu3840+CkOY9fnk2zeNHya/M+0zN0dUqaHFcRwG5i4IdxsjpdaCgSzLV41+ZpRGJwOimjeFNqmujFjLPo9gd9uM/MbH28BHxqfGNgtZ7l03XYw+Xovq54Tu+8Z276DATzghJgmMDNOy0VkDJiVKDDUyQ40WiMb5uHMlDmK5MwOQ4hysxGS0tB6ZZeJGSr4MVAznrsd8vxR880kT6KFt0JACgGIjCkG1JRAVFFqkntPhFSbSMY2lhyfn1FV+YvDW6aqa1S2C9cRo9XuJYnyqm2gIoVvXqaBIQF+7Doee7ZkLS2nVpDFSuWVUrHvGYECIklJnFRj6UAPjAFg4a+enxBUL0BKFCOAhtpEs2fMX/A4JIsXYPJPtbU+OYg0djnqKpCKQXXPGAYgBm/Iw3IkKYbS8FNUZ2OUTTTEnpT9LoeunHEDtAo6CobIZTyttPvrvBaWBUTwsSfGgV3wkJZ9LXpz02YeA5O2Cxj65vjC1PdpAtxofAnjxvNqo9JrFOzWK9Pc+CjIew1ax/M1OpWG98FYQ34taqaxbxX4M0Xd3xPr/NjhIZpX7IWszXHrkv/T8FoL/x1ClI7oZFOGsWmQe1dKxZ4zmBv2PemYRMxTAGt5MAWp1OKUkJLkeYVaPRTurVEgOVfSJqKAKCDHrJWHdn7sOTydWRyAp4Nzof3N4weUAdB5xvaHYeP8ahyPT/y6G/IzDg+rO7g6OhjH2Xw4qW43x/sNXaMk9lI9U79v4+tchJDZe97JH7lv1s5GCGvmVbEjo9pdKt9Q3WUwp8rpI7VHMMepf+ebgsmEw/OH9c3DWVqpki/i4Tv8Ve97HO1PZ1P89DvuPBydgcTwgj6/e4+q6rIGrVYHBND9KMakPxLuYsALSvxO8fFLHeToPWrGyA0AuPuRttbIfwywmx8xoKrBrH+1wjoD5JEEciYpEEmfOP23hCsVT7DZ9J++o78svMWMkgvypiqgU8JlmbAsZ0wp4M4N68oAGqa04PLwiBijJi2uYDDiFBGTho6qiLswgPtWUL/cEELA+TRhOQk6zXtGaRm1AtfnFdtddAAupwcsy0k2mJgwk7BMpWagysY6TwtiSJimhPPljHmaUEpGSkGVWSvOeQEzY0qTUMOQZNbN2KIA7xje3ZSAmGacTknnSXDK/4De7VDPUO4HI4YEY8x8cYYgoYPQu86H0Gnb9zmkVNlivWmK0p039A7bAlRN3BGaM6CDwQBzwxgBYgCsyXZfG181NFrRVKtYozE3o4ecJHYtKs2D8L8u5F5hZADyVX6XWrrWmorKHW1L96Rt99OrslwthvQEC9WBU28SCN0USEKjCtCkX42caHvtrb65Qb3XoX4xw9WGe5K19CaDG4kodPSriWvOB6AJ7rloyauOZSCEaUJMGsLWRJPaGM/XF9zud0wpYttuWJYZp2UG8BHASXtvLYgpIjUgzQu4AaVVZWWswkPCE6VUXF+uskZSwjQvIAraE06+f902/G/23jXWlmy77/qN+ahaa+99zumHu68dIE7sRGCSgCMbOSYGTPBXwBaJcAiPOAJsEXCMJfOQkHjISAFDrJAIgpQPCcZRkCEBg8Qzka1gYxvysuNYgIJyIWD73u6+3Wfvvdaqqjnn4MOYs6rWfvQ55/bZfa671zhaZ6+99qqqWbPmHPM//uMx93vboHGabA8422LD473WWNU2AnRhGRR0xSyvAVHtiHp/jT1rC5c+4BMVG8+yHjV14AlQa5IItQj57C+ybdRFIMkS3HvUUDl+xmtZFsCFgVnZHUcg7/jYBjDa5+trHK936+u0hXNZQI9Zxnb9dn8izAbocmy92hqozgZXOWr7MQSsAPDm834Aafe/bHfRoM+NUoELZpj17k0p2sIjzKW739kebPvDgcP+YLo3FYtlEkeMPf3mjFIKZ2cXnJ1fmOs6m6FAacb8sh4Zu+3INpwWFrsVpZ1XAEVyoZS6T50s7i3nw2y4WPKDzPOnFdTRlt2k1RkpivfUWn9WEDdH000xBAufcYVYsvE5kvE5YMW7FV8EzS8Z9KBQklld0zgyjSOlUtd9F2s2ld1ECJ7t1oolHQ5QivnVbdPASnr5CnpUGabEmHTZjLDrMQtOYbL4nv1+hJKskBk9rYS4SKgZJMtknGn50NF1kb63Ik4+OJy3+CMthZwCWrSyR25esFLOKLbpZa4D0YuvwMiKuvm4uFXm2kGUebLNNG21PKQFY9LAhKXmSi222PYXkpXCuqmYXq5YPICr2W/Re7pa1+EoUNxWTaAWHawxBNUUWHRcU8SlkFPbT6mdYr3wC9DKrTcXVKNRF+W1ZhdKszJXQZniGltj55yL6q8sCa2gx91Q9i1uon4w/62UgmtKIOvMepj16+bMttmNN7tMarBtRciyooHLTa3/Mcrcj3Uhax8K2jwct1g8beNVlt9zKUhhjq1y6nBxidtrbslSzCVdciEET84Tm75j2vacn3X0nW3s2lypiCPW3OmcCzEmK0xY0+FbjZ394QCYu8sPkz2/EOm6DkEYhpGx7quVU6nDVSo4WoyTmT3KS32gnPPq8Sizv+No6rWF2eoasWYjHkLumPdr1/GavSgVwGZd6m2ZnrHN7W4xVOvL3LoGdazcbYsfN/F2G2+OJbnBJN3XZ0dzf20ozfWZ1myXzJ/cd3/L+fSG7mE5tvbbQ6Ke+cqqFajK3X3awM4dzVnmoYGFZlClaaqbccM4jLMXJJeCVuDsg4WF5JTo+w2bfosTT/ADzk1VJ9RW2gLa+JZFtecGemzdmj0ZxdZHJ8zzjRoz5mqFdKhJHvP2M+uOWX6xrDAzTtQLohaMH2rZjOC9ZW3lbIxVdY97l20NwDZDdR82YKt8UbusozrHtKRuzvG+oSN0zs8X1inH1YIUq4vTdV1VbDXXvpo3Ve3OytG+Y5akRa3nWrsAfFjVc5k3SROrwaO24ahlgEkt8+1wHguAUlBXA5jb6CrgWFxZWtmGNQhRXYqbCUu6XuF4sjVFJc1sk1pNuDEnYlVILL4m112+bdPEtZvhpUtd8Fz1nbq6eai/ASpAoJTj+LA2k+dKtStdPLsW9B5ldDypZ3Ayf9U0eVNoR9buDILqOdq4a4r1aCIt4LdRo7P7rtGjVUEfbSVxq+0r62y+3l3Kac0EmDT3mTui/D8uqc+usmg2mKuF5aqSEihufe/re1nWVViyKjgKalzKEcwl6Z2SAedKLTZq21DkooxTZj+MdAVCN9WCoQ7no401VyuzOyuyprWOkxXR1vrcXe1ga0fJ0Ion2kvmv88tl5Z9srw/Rur1VQfRHCtSUfx87lXvPti8vHH+tmCu5T6Gpn117U5pAd/3yRcLeu4671o33vzOs4BP05Pzz6P5J/PPFnC7zPn77021BZ4f66J2ttrIW33wsmWt83SZVEdtuFNkJkLmg7TquNa3LYMr5zyXf5mSJfHY/l0yA5DgrQpyKWrEgavbxSRd2jHHRdpLV92vSnUx1d9Xc2ip/W6/r+PHZletW7tr2/Vs7bs50NY6Vo5+r+dwgqy2xFn//VnyQqDH2BPrtKura8o0EQTy64/tCzqr2sqiTDTHRIyBhujaDuf9ZsPFRUQV9ruB3X6ca9y0PUi62NGFjpIK12kgJ9ufZLfbk3LBe8dma/ECQA3EtSJzu+trUspsNj2iSr/pLZWzD3gXbfB0ZWan6pWtZH7NHEolM6qxEq22DMCYEuOQavZYqvv4qMWdNI/LyrIUcVS4M5fLt1L+bc+vXDuw7qfi7O9t/6OXLQJ0wRNCYNN3+BAI3tdaSKvNHrGFJWmeFcjMbqzG71x4qtKvKS3bLiwDcR2XsFhua73VmLCFhqgL3nr7gVLIug5kbOc4BjxADXy1rIUuRvquM6u4+sFVrSZKrlmH63iCtRXU0kKpDIlv0aOytK208umVwVsD/Y9PapHGec8pR7EiFrUIpFKcxao1UG37cN04S2UbjSV3c8l5Rch1buhUKJii3Wx7+r4Hba6gFroKCeWQCu9dXnN1GOn7jselsNn0tv/emRBcsIJkoa8K3TabbO6tcVxS0ms8vaUs13mbJtDiqtuqFjPUygTPPv8lZbfM1jCI83MMVs41OBvmc9sIqIsOLWbtfkDwUUXVaozdGUdn1sDRutEYBKoa+cjjTe9x3a0ZnDv/fLu991/iw3pPOf7zGqjK0fc+/BoLK7sw7h/zfFyQwAx+2niaDTtWrzVYWAPzm6etY3VKCUTY7Q9cXl4xjRPnZ+ds+42xJwixPyMrbM8f8eiwYxgGDmMiZdu0dMpS97NbQM+yGtoAa0avEwhzLToHtdYO4lC1fdJqdKPNN2/hLC3hY/0St9b/Ne5XixEOaA3SNyPNnEK2gnoveDVjynmHL43pUdxzGCMvxvTIYr0OhwFy4uJsM2f1tMdj6NT2S2oILNTNLYtmSpnsYYTA+fk5Ft8Bh0M6QsQq5svz4skus5exBixSK6sqIXpi7CnBDnS+Zj9lK0w4HMzPv+nrXl5dpN/YIt/qvM44tQbvmYFaYzuKASgV20y1BbeWooyTZYjsdwO73cEWECdzsatl4bPYAld9nt6B8+ZSyyqEugGkVZc1yzl4P/f1Q4ixSTIDnRboFsMS69JAT2qLolL7SY8mM/V5t43zWs2Idp2FcjdL/K54gGbJzYpgnoAY+6XLGFuzMrMNOFsfq7PKwhQG7+n7nu1mU7MdRoZhmMfuzR2kbwWC6mohv2VRWJ+0gHtxzqoKz67KjxP0sGJC6qvIDAgMVy/uVbuhUt8v9+ucR3xjeKw44Hxqxc6Vi22oihWE7PtahXnFlk2TZVNOuXC1H/FDok8Z1wUySq+FbtvjqVlH1QK1RAOjx6cpg47Voq0gU0GzkKs2tqySxkI5lpScVbzdCnwfLaAiiPMG2jlmNYXjBVq5NfQfQPRo/tx0FzXgLXUFLQJzwO5yVw8jHxH4PJc+u4Mdmjf8fU5Z5uuauV2xPR/znFwRPIus2zAzIHL8OTcTKRYpdW0SyYzjyOFga9DhcOAwDHN14xAiMXbm3tqcgXi6bkfXJZAJGRK4Wr5kTjFv4AVKBT0KzZaoBSYbQGurKLPOafez1NhZZXbVWNWlzEDrIZ3XYmHJEnZOayiDWL2l+XwtNKNtliw1e/bD5QXdW0thslIyKZmba5rSnIbtZMmEKqVAToTgLThWQFNeajNU5ezEfHZdjLRBOo2j3ViUGvgshBjo+84600vtrRqwNteQcUgIlFws+ClkvLPA5bZtRTfFei9YyetGw67jNVhiHoKT6j80pke11TGwB2uVaRNFlaxG74NZkPOmbOItDV6kVl62rRy60aoTzyX1541Qg4Gi/ECgB2NBfL2P5v5ZIuAX5bUAUevrWSHP9KWutjuwZ8KKkl7ox2odrKa/xUhUFgKt/dXauFIVK2WqtEDUVUXX9p3KQCnL8zlSIfcou6Wo3ppWv6vvlzYtCr5RzXnu29Zva1faq5HlfpbbM1dxswTJFRQcHbXEyClY/aj2DBuzsLJgGxPWxs8asPvq25/dplj8zjRauq2l3Xp8Ade27WjtUHNTL/tltXL2YM5ka0dL3zWm0cpR2H22BW9FtXOclr0uqtj2AzvqOtZA+3YBxoeSuwHPDdB9Y418sFH2DLBjX3k20/Msd9v6Wkt2Vhucd5/7rvOt52V75u1rr2IuyhFLtX5oy3u5ccStMwiLrqu6L5eC1A10x9Fq4Y3TxDSNFOcs7jQsAcbeB7xLNXmhbai7NgJaILPMhsMR61Rr7zQ2B3Foq8ezOp8BII6+167nVsBKYFU2YQFLN8d0VTuLQc6Nn6u/P0te0L1lW0oIyjCMJIHdbs/19c42yMuFEOIMeqZpQJLg3Ya+2+CcsMuJw5TNMt6UWn/Fse17PK66iw48ff8a5zxPHj2iO7ft6M/Otmx6Y5bGKdV9s0rd8sJihPouWF0QF9CsdNHcZ4f9gf3uQL+xfbxiF+hiQDaR4F3dN2ycrQMnBXM1LcUUnQs4b+ebfMZ7Y5umnLk6DKSU2R1G9rOry/YNstgk213WFoVogEggeK21fBQ0ARbsbTUVHGPdyuNlizhh00W8DxbT45bCUC0mqo08K5dji0TOSqrMgNZNDBHIKc8ZW2Vmc5bNYo8XHQCZ3RU5LQum1IqquFaFt1W3ljno3WoyLZNetYKlFhtSbYWZZVnF76xfwHy+eTuSCqSBWlxyWWjWcQbt+DbebSEcKWVhNwUDx1b+vW2l+cDShtKKeWqux1Z1XMSsxPkR1zVeGpgBxOW5n6zkg5WfCKFuT9FcRjXGJ02ZYRhrQcJ+LiDo5gwOndNpRYzVHcfEOGWcC4xjIoSOTbJgZQM+rrLAhTSVOX4hVcMpz5layzNcQI+5i5eF7vbefI0BWLs6U0pHu33PIH52+WhlhO6OWXs5Ikfj5X7gc7wgtMXw5jEfeqXnACkvIs9TZuPF+m0NFrjxfjnfvcHM2sD37T687/eXK0ILa5jnljRD+EZpk7V+mm/i9hnbraaUQayI5/Vuh/eOvu+5ONuy6SIxBsKjMysi7CMx9nTdhlIgxA4XIq6AsbiuXt8xa4H5Wbq6IXFrq7m0VDzqAuocuGj613tw3tYABcWB1LXPBStQ6mvGKM0YduA8CjjxlAaQ3GIs13qF5t6SyvLoUm0cVbxbF7q9X14M9GBWnLSFRwvTODEMI12IOKmxDhW8zHtTbfrZXeNEKKlaWaXuayNCDB4vlZE57DjsD3jnSNstqjUFvY8IrirvXd1Vlxo/BC5IddEEsya7DgdMKbO7PjAlo8VjtLgkUaXEQGlMRTYQJTUiHWwvEN8GovOIN0Xs6waYSCGXwjhOjClxtdtzuTsY3T5ThM4Ak/Pze6np8RZ93qiOjFTQ03Ud3gdSeqiYHnNttYW9paTP1VtXlpUZSisXTo0byCyum7aVR+011iDnaHKznLdl++SWJi7GrFlVUmeZUKtVa2lGZXdWFrqI2CaM1SXXQNusQJpi4baSawvf2r1li+RNhXt8TFtEQwgVtE2zhdJ+GvPpZvfuxyEz2WYNnfurVBfhvAY0U3qVgdcsJxrbLULQJUOINg/UMiVaqEQDIxXq2f5nDbxXULhUedYKUCyaeuhHwFzcTgJ1a526YEkFO23s1Vokpe2tN84ZWCkdA6C1iLQK6cesW0uKaMVNc05HgKiNt9ahR2zgQ7meuX9hXo+vGfSsjkPuHq+3rnHP9+4CWPfJXYHMzwMgnovtsW+ufj4bnN0+53KMudVvH/PxyMJkNAbqCOywYjDWgGd1eNN9Dd4qba9E+3wcRg5x5d46HNASyWeb2aXk6pYUPuR5PTLjplVObudn1p2N7ZVaPsK+2+J4fH0ZaJH6MmNlVem5fk+kYYCllt3MnK/6Y83Qr/BgbUeNmXQOV8y95Z2jaL677+6QFwM9rm5SWTIi5g6S6u6x6o1rpaLIUQ5xtUAw5aM1bz9NU2UWHN5W3bljRISSTVk6Z+4vKyyohBhmy1xYAmlRA1LeOfq+IwbPOCW0QEjZ0mS1bVMhTGkCPLX8bz1+GWhtAIDMi75W5iGGAAhnZ1sePzE3HxLABXJRplRIRStNWCOIjgLFam3QwmwJNFrRSJMljfMhZD3p2u/ma7UOUF05eVZ6p1mU9txlZg7aAmHph1UpzutDu486abVdQ5eFeP6e9XXb8mShStu2FE3bt12JW4r/sqApq9nSZL5WjerVNl6WQn2LPd9eayXgYPU8jtt8XKSy9UVzoxR9GPB6U1pcTIxWpuHs7Izz8wurcVE325wZngp82h5oM+CZT2b/+QqORZyVuQ+h1pUK+BhrvaQlAH7JbGzZXW4eHzbG28nN/5+y4qaMFsHJdLRdBNR4nVxZwQZuyuLGKtr22MoLuGtMzAyWtWaHNsS3gJiiDfAuz1Zk+e1Ikc5/0Bt/eHmi3AYUDdivmatbTI+u2/9swHLXd9af3/ed9re7pGXzPevazydrhuaYrbl5vrv6ax2HN48HOLq/9e8PIRXizO+WISY3vrX+df29O/5eZVlal0SLuUhhKatED1eLFPo5dtPmanN7rUuu2LWVNv/qaw3WatblstXEhxiHMG8yOhsRMD8bgVkPLwbFghfWfTADnzUgaob56u/PkhcCPd45Hj1+hKaEjgc02/YNZnVZ7EwMtpPrNEJOE0qxG7YqR3jn6UJvTE8q7K6u8c6x3WzoNlu0CHvviZXencaR66sryy569IgQe0Iw18em91bP43Agjcksz2xkXAiei+22uogSV2d7xsn22xrGkWFIqCacy7UeAMQgM8OzzDGZ/837K6kxUxfnRhV2fc9rr71Oypn33r/i/afXTClztTuw24/WP0lJWVHkKDDMtmCoC3ejEFUsWqEGbT+EGJPhVrFJ9qHU9PXmPmogYLFMFoBSCuRasyfXYle2ztT9gEQoUsEvzT5ZD+a6QGotU6/UcVKWbPh68NF65RobpbPLS7yfU6RpC+tN60kLpdg4KTmhOc1xXL5W3i661NVpIXUIFoMVZAZN5ls3FqLUasBCrQkjYK5AO/eUHG1TyIcW7z1nZ1tE4Mlrr/H2Zz5DCJE0TXNQ/xxEaDjvRlmaZQLMhNCqH9e7r/sY5m1LfMvKcM4KDU5WfgEVssdArMKcVUVApKBFOOwnprHgXGK/T5Y1ttJ7Bq4byC5L1l1eauysgdbCzgCr90VlGYu6gJ7mCjSrViurL6tjjxXpYnE/FOrRW0wVHCv0NePjVr/f9d375C7Gc2GUnn38XUzPF+veuutc9d3MjrTfb8rdMVbHwcu33y/Xefj4rBX7XPX7Imt2YmWkLbj8ttS/NbADrGJ6qMUHMyFbnSbxAa+F2HV0mw1ZCyGaJyGEQoyBLkfLVs5awxmEon4OE1jSzV3dQ89V5jRUXVLjdGZUXrFbnac0XVlqBrdmSt14WEpGSrI04faiZcrWMIV5k9FC22TUFbf6XOeA5mfJCzI9xp6od2TNaE1Da7u+qlKrpHqyWyysGbk1Fsb7memZxokiwqbrCM7KTPvK9LRMoGEYiTUgsVXgFQEtzipEH2qxMe9moBicNyAVgzE9OOJkGV2HwfbqSpMyBTsPapv14eXGsrxC6mqLmWLZWF0MKELoes7OMQsVj4o31gc3b6iIWGaWldm3c7eN3JYRvlxNW/2RF3lALyhztecZQTNX3bWZWBd3WbEDrNgQVQPptIVpffbbi85tubFA1XOiSrEHbJ/J8kxmxdoW4srMLUF5q1G/tgpojcwWOFcy8/Yous6GWfp8IapsUpvrWOdNRVt7W7BsA5JrC3MpgHebXn8Icc4qe3c5s91suLi4YBonpnEkeNu53BRhA7oN9Nxt9a9lppVdq+C9FLKclVMFLDkXnBooVm0b/85nqouNua9SMnAvFNKUsUXieGwcW7R5sRrvsPKX9h/9tlJDenTem7Jed5ZFcQGDD71Q3sX0cMdn8zRdrZX2+dK+DwMv912jHf+iLM+zrrkGms9zvluuoCM9uRx7H+hZUMP9z+rhAQ+sAdsR67MCPOu/rI/Sm/NSmJM1mm5VlnmRU56zZ23riGrIzu6tMLOyrhURdGbIzItTM+jUyl8I4LTpUsfxjgELY7TIcasb07NsFL1m4RSOmPe7C38esTzr9aoCIint97vn9FpeuDjhS5OPYwX4uOUl39LDTsgvorEv+/5e7ulMvmSH1W3G4NWL3Pn2QZH2yoS9f0H+OBaie6ShiCM2YPnzbMjKA4PYL8Xh8tzypdX4V9uaF7/y80WmnOQ+eda8lBdZWEXk88BnP1qTTvKC8pWq+tbLPunpWb4yOT3PT46cnuUnS1768zw9y1cm9z7LFwI9JznJSU5ykpOc5CS/UuU5wn5OcpKTnOQkJznJSX7lywn0nOQkJznJSU5ykk+FfCJAj4j8GhH5K3d8/kdF5G9/juN/t4j84Ydp3UleRETku0XkF0Tkh191W07y8YuIfLOI/N2vuh2fVhGRb30enfkRr3Gnvq5/m3W2iPx1Efmyh2zLp1U+zWvmq8ve+hhEVf/puz4XEa+q+a6/neSVyz8HfIuq/o32gYgEVb1duOQkn0T5ZuAK+MlX3I5Pq3wr8N8Af/VVXPw+nX2Sj0c+DWvmJ4LpqRJE5IcrS/Cfi8iZiPyYiHw9gIhcici/L4bfs9sAACAASURBVCJ/GfhGEfkOEfk/RORngN/6apt+EgAR+SPAVwH/rYh8ICI/JCI/AfxQtUz+rIj8rIj8GRH51fWYrxaRnxKRnxOR7xeRq1d6Eye5U0Tkn6zP7i/X5/oPishPi8hfFJH/SUQ+IyK/Bvgu4F8Ukb8kIn/Pq231J0NE5L8UkT8vIj8vIv9s/exq9fffLiJ/rDJs/xDwA7X/v1pEvrbOr58VkT8tIq/XY35MRH5QRP63qnP/LhH5UyLyf4rI96/O/b0i8lfq63tWzbqlr1fn/fo77uEfF5Gfqe36j8U2jDrJR5NP5Zr5SQI9fyvwH6rq1wBPMcZgLefAT6vq3wn8NeDfxB7cNwEPSuee5PlEVb8L+P+Avx/4Qey5fIuq/k7gDwF/XFX/DuCHgf+gHvYHgT+oqr8J+Bu3z3qSVy0i8huAfw34bXX+/T7gfwZ+i6r+ZuBPAv+Sqv514I8AP6iqX6uqf+5VtfkTJr9HVb8O+Hrgu0Xkzbu+pKo/Cfwo8H21//8a8J8A/3Kddz8H/OurQ0ZV/Xrsmf1XwO8FfiPwu0XkTRH5OuA7gG8Afgvwz4jIb67HPktfzyIiXwP8o8BvVdWvxXaG+11fTEec5Eg+lWvmJwn0/D+q+hP1/X+KPZi1ZOC/qO+/AfgxVf28qo7Af/YxtfEkLyY/qqr7+v4bgT9R3/8Qy/P9RuBH6vs/wUm+FOW3AT+iqu8AqOp7wN8M/Pci8nPA9wG/4RW275Mu312t9Z8C/hbg1z/PQSLyBHhNVX+8fvTHgb939ZUfrT9/Dvh5Vf1FVR2A/6te55uAP62q16p6BfwpoLF3z9LXa/kHgK8D/lcR+Uv19696nns4yYfKp3LN/CTF9NxVh3wth0+KT/JTJNevugEneTD5Q8AfUNUfFZFvBv6NV9ucT6bUvv0W4BtVdSciPwZsONaPmy/y9EP9WVbv2+/PWluepa/XIhjL+6++WPNO8gz5VK6ZnySm51eLyDfW9/8YRp/fJz8N/H2Vgo3A73jw1p3ko8pPAt9e3/8uoLk+fgr4R+r7b7950Em+JOTPAr+juVVE5A3gCfD/1r//U6vvXgKPPt7mfaLlCfCFCnj+NszNBPDLIvI1Ypsmfdvq+3P/q+oHwBdWsVX/BPDjPL/8OeBba6zIeb1Om7cvoq//DPDbReRtsPEjIl/5Au04yd3yqVwzP0mg538Hfq+I/ALwOvAf3fdFVf1FzLL8X4CfAH7h42jgST6S/AvAd4jIz2LK9/fVz78H+N76+a8DPnhF7TvJPaKqPw/828CPVzfLH8Dm34+IyJ8H3ll9/b8Gvu0UyPzS5L/DAlZ/Afj9mJEA8K9gWVo/Cfzi6vt/Evi+GmD+1Rgg/YE6v74W+Lee98Kq+heAPwb8DLZo/lFV/Yv1zy+ir/8qFhP2P9R2/I/AVzxvO05yr3wq18zTNhQn+RUtNetjr6oqIt8O/E5V/YdfdbtOcpKTnOQkX3rySYrpOcmnU74O+MMiIsD7wO95xe05yUlOcpKTfInKiek5yUlOcpKTnOQknwr5JMX0nOQkJznJSU5ykpPcKyfQc5KTnOQkJznJST4VcgI9JznJSU5ykpOc5FMhJ9BzkpOc5CQnOclJPhXyQtlbXdfp2XaDAh8e/yyr91o/qT9FcLL6i4LafyggCN4LwTtEhBg9ITo7Z71mKUpKiZwyIuBEEBF77xwi9TrOgYA4h3cB3G2Mp7URpRRyyrTAbl211xKD7Pzt3sQ5XP385r0u3bD+myz/6/oKN45WO845j4jjl375c3zwwQfCS5bzTdQ3HvXWl7mgqpTC/F4VctH5PsTZM/DB44Kfb601bH4GCOLqMdjzcPXLzrXv1M+dvZeb/VjHhBZF0fl5l2KflZTRoogqoiAKlILmDPMxBQWKKrmOLWX9XEGcIgI+OkK0NqiCFub7b32gqybqcjKkLO3Ntd+0fn9+vvU+Acas76jqWy/lIa7k8ePH+vbbb2EzSI6H5UeSj5Lo8NKH7UpeVrue8zzrr9Xx/LnPPczcfPT4sb711tuzDms/Qwiznmv3oFpsrKuN+ZKzjftcyCWD6pG+1lIoqjYHfcB7D7PebPpxuVlX56uilJwppQDYT1XTgyGYvnIOHwIiiw5u831WhbqcPedCyqZzrV12zuA9oeoY5xze+dUTk3tH1VqjllKYpkSu58+rtmudwE33N/nsZz/70udmDE676I+eZXtWWvvP13Uq50KarL3Bu9oHdpz3Mt+X6ba6WoqtrDlnctV5qsx6CJVZX7X+BfAOxC5b9bI9a+981eW2XLp5La3X0uX8pSg513EggjhZnrO056DWLrW252INK3P/23eK2iFS1wXm8VNPV9+0/nDOUUohpUwpua47dZ0Gdrvp3mf5QqDnbLvhm77pG2xxnAql2M2ybpjcABYKUBASItB5xyY6hLqoZK2DEnIB74TXn2x47VFP3we+/Cue8NZbjxBxqM1hhsPE53/pXT74wiXeCds+0AVHjJ7zi56uC4QY6Lc9IQZiv+X80RuEuAFxqDgUsQWxTob9bsfT999nmkYblG1whECInXW09zhviqfrN3RdPz8Mu9dl2tki6RAqWMAv07XdiC3t9uCxPigKPvRst48Jsee7/vnvfZFH9NzyxqOe7/m238T+kHjv/T2HIbE7JN5/emCYMsNYuN4nclF81+M3Z7jgOX/9EY/eekKIpoikKtBt7NjGDidC6D2h8zjnOOt7tn2Hc8Kmi/TRE7zj4mzDdtPhxNH5YBO/dg0KKWcOw0DKmf1+4N133me3OzDtR67fvSTtR0JS+n3Bp4LuDuT3P6AMI1Oe2I8HcslcZeX9KTMVJSkktT6Pm0x3lvER3vjMlje+fIPzjmEPwwFygg+uRp7uJgpQeqFEQVXQEUhABtkpjJBK5mocGXImAQfvSCKIF1znkKq0/u/3hs8+xPN8++23+Pf+3X8HEYfzEefcanTBXYv7c63Wcg8oeCZWaKivgjAqOv2wiz7jnPohv62u+kzRe0DPXccKtkitrbwGEL77e77njiM+urz11tt8/+//AbquY7vdEmPg/PycN998k743neO9zZfhcGC325FTYre75urykpwT19c7Li+v6mJoi5OqMgwDwzDinOPRk9c4f/QY7wPddkvsN3VO2yR0An2w+ZrTxPXlBwyHPTlnhuFATom42fLozbfZnF0Qt2c8evNtuu0ZwQubLhC8wwt0DhxCUUjFfj69vOa9L3zAOE2Mw57hcAWqvPb4gi97/TExBh6dnfH4/BwvrgKwDwc9830eBn7plz/H+194n2maePr0kv3+YAtvHmfwkEuiFNtp4Tu/8ztf+tzsouc3/to3iTGyPT8jhMA4jVzvduSc6Tc9FxdneO/54P0rPv/590kpWR+8+YQuBi7ONzx+tEUErndXXF1dUbQgISIhUErh/ctrPrjekYsyJBgzlCKk0ZGTUHJhHA7kacR7OD+DvhNiEM42ni56+hh47eKCbdcRAzzaQBeNeDg/3xCCJyeYBsgZdruRD97fk1Ihdp7NJtq4dMzI4np34PLavnO9H7i6HsmlMCRlzGbQ7ofMYSyIE7ZnGzZbWy9i5/HR1fHuESfEGLi4uKDve4Zh4J3Pv8v19TXOObquwwdbl/7Cz/zivc/yhUBP0cI47MmpMI6Z0qzgWXG42Wo3lNms6gxkBKUPnhwNTTZLWhWmpKRUcE5wHCBH+j5wca6cb4spGuyVxolp3DONBxKgCUYHIXpUN/QbAz3KllgiqoW+6xBNFfR4EJt4WhpsHREmhETJmZSSgZ8c0ZKtXT7iQ6zo3JMrAHKiM5M194cKOF81aeuPBn8Tqs3qKHVxEkqpi2rJ5JwRtzBPL1ucCNsuQFHOel+hmZJSoJ+EIRS8g5wVgkeDgBd8VV6CmGXg7WcMgRgM6PjgCd5bn1WGaFFVjTKxhcOJ9WXwfu47rcyJOEGzgUMnNpdEC6SJMo2UpJSsSFGUDA4kOMBVU0bNEhYhOwPZkyoqELwQOkeIEDuh6wTnIO0VnZSSlDwV8lgoAuo96h2VPkKL/RS164sqnjqhRHDtWetz4IOXICKOrt8g4kxBiOPm7Gy9f1vuaKHcDQw+9LDZmpcbHzfQc1/rn32pu79y/OWZ4dLbfztu4PHR8iEXnXmOek6zaH39+TDRAaqFcRxQNX2Yc8A5x9XVFeM4VsvfdG2aJtI0zcyLcw5VT4wdm+1ZXeQLKRuTIs4jPiDO0W22dJse7wN93xG7zu41g+aMUK1zlJRNJ+VqWTfd7kQIdf6aDrBX8ELwHu+E4ITgwIuQcmVPK6Va8kRJI3kamA57VAtDdOw6T4yB3nvKZot4Yxr8vX1eGYRSmKaJcRoZh4HD4WBegcYordnYygg3BuiBHiYpTfa8UJyrLPPMhFh7c84gynYbydlzcbHl0aMzuhjZ9IEQPG38af3nnDHvFDH23TtECuRm7igy62qIzuGcJ3rYBNhEoQuOx9vIpvdsYuT1i56zCnrOe4geYvRsN5EQHDk7cgiUIrgi7NxA0YzkQpkmNIMEj6vEgiAEHxBRuqz0ubKEU0anTFGliEAo5tnpBOfNQMoKmjLOe2IXiDESQltXrA99EHxwlf25obbukRcDPTlz9fQ9pilz2I/VHWRgSKtVB65aUovKFTIiNok20bPtgoEehKIGPoYxMYwZJ3D91PF069lsAtFfEt1rBO/pQkfwHeM4cbh6yv5qh5bMZRnRkojRs3u8pd9E+j4yTRdsNh39ZkOUhPbG9BgYafyet3anA1IOOE1M08C4M4vGh4iPxkj4rifErQ0umGk2dQVfrdg2II16jCCxUoz2AqXkhBazpqzvClrhseKQ7EjTWN0sD7Nkeie8dtbRe4GcGbrE0AubAFPKjFNhP3hyVgYN7ItQgK6CHMERg6eLppA3IbCJ0SZitEkqzhSfkwqT1FW/j1h/ijE8MUS6EG20eFOIYxL240CpDIEXJTrIJaPDgbzbIRlSErSAlIREV2n2jBQHlTadnDIVZZTMoSqDTRT6M0/XCednjkdbU6rj00w5ZPKkpH1i2me0LXTegITmQkkFyYrmgpSCUyVi4MMBQ3O7uRXV/IDinOf84snMAjiR1Qy8yylws0U3fr/lur0tHzY0DYA0P+D68+X/FUq6v1l3tnL9/+JnPWqt3tX2+zieY5EP+4YIroJK5/yHN/aLlJwLV1dXxBBJKRk7MI6kaSKEgPOuLiQ2F5v7uGTFu4DDIVtjuBUzJqe66A/jyDBOiAhnFxecnZ3jvWfT93Rdb4DrsGcahwpKEnkqpDQxDiPjWJnwavCahR3ZbjZ0fc+27+n6Hu+FLgheDPD03pmrZMpMKSGqaJrI4540DAy7S3aX71NyRtKApoEuBjrveXR+AQLeN9e63CAgtfZbJqWJ/X7PYX/g8vKSp0+frtwg1XXfjqpMf87pQZ4jmHt9GAdcEJCCc2pu9WrF5ZLYHWqYBo4nj89wTnjz9Sd8+WfepIsRs7AyRTPOC2rOIVwQYh9xqvhxwI8BzWYUqiZUK+DB3ETBm7chBnjSwVkPmz7wZU82XJxHtl3Hlz1+xPmmJzjlLBSCU3zwxMrimJbbonh+Gc/lO1eUAkwTU8r2nLqI1x7E4cTT9xtzqfpA6KJ5D4YBOQwUVaIKm8ISQiAG4FOeKFMhxsjFxZbz842BtuhxDrw3NqhPoYaECHIfM72SFwI9qoVpGpjGxHAYKupf4ieqpxgaJUx7FUTMvUUKuBwRJxQq8ClwGBLDmOwMRSBDSpH9dcewD+QQkC4jIZPHTJoG0jRSSiKNe3KeSNETYqFoRLVjs/U4l/FOydOG7IqBHB+AtpBF62mdqtspoyWRp6FaB/YAinO2+ElA1FNSqhaCsTzq7P0ypQTUg5a64FQGTAuqub6UonnpKzGoan76vASMPICIQB89JWf62Fg0T85mCURv1kHOisuONAlZwUvjbSpw8Qu744MxeMH7yjbYd6qDlpsuD0FWbE91b2FWlyt5IYXqDy8GqTUnNCVKAc0NTJYasiUGeJyBKxWhCBSBLJDbE3JmJYQohGCKwD7WCmp0fqmAy0IL1NEWx6PQHNyiaj0okJt/ev4Oduxz+ZO+2Ocp1Q0reLeGOHLj0scGyTPOyjOBz/xfa8fqWFl0wAJzbjJ+Lza+10bA/E6O7447f7vbjXU/H3RPu0Rw3oC+uId5oKrKNI6gtuA0JsI7m2/eewM/da7FGutjZJoxzI1ZUcCFgqRmVZt7H3HErid2sS4kkS6aqyQ7RxabV0WVXDIlZ/tZgcMMOSv74p2zWJzWPmessHc2J7wz0GOseLvPYnM5G3ObxpGcE2PwDNFTcmQamyuqXs+ZUXGMl20ciUgFOIkpTcb4zCCN9QFL7GaNr3kwUaWUjJYCok3F176zPsjJFEYXI10fCN6z3facbzfEGG1tS2rMsrR7sPt13qGqOO/MGFc1Q2ue3YoTs7q82JiNDjpvrz54zvrIxaZj20Uebzsuth1BChtfCK7gvCd0xg6Ki4jrgcDTLhLE4TEWvWiydjmHywW86Xhfn1nB1GAuhbEkQnY1vsxWn0agFLXnrZrJuRhL5oQQQ/W4WAca42NMT5OXzvSIGHp36mDjyak1tJqy1ZUzP++2gDlwLhraFEesvtlUn5+IUWgFQ2o+UkODHOOYub4eiSFD9tA5pqkgQIhCyUbn4opZAc4jEigqjEMGElpGor9mGidEPC7YBXyIhM4mkmY7Zwvcm8EaBRG1djkhRovtCdETQguqUnyNbZpVvAjO9zZIaAumVFoyoTUYrbm3qm8GxCES5tihhxInQueFEj3b3uMdpqikkLNjSsq2s3iAy0HIe2FSqS6nQhFj9xpgcWIBaM3ybEF7FtS4+OKN8TF2RouiUuxnUzxVoYoqThWnxqIFB8VBjI7NtkNKxhXwxgpTkmMcC5rhMMB1VlJSDuhCqSdmCjQ4R+cCUQSdHIcrUxOHXWE4ZKZJmaZCzkoRkFyQqbatLEu2F6xtFeA4tUcdCngwhu8h0c4sBqCp4MymojMXjB47cF64PbeYqrZgLJ8fwawWgKhiVq2txqu51ZigO5geng2FlmQDYeVF5E6AtgZkRz/16DorU+VIcc4919BdC9K9+2ovRbQUhsPBXNwi+BAsQLmU2cgI1ajo+56zzRbvbPGzYGSdfSgCeGwM2qIjiDf3vg++YnZbmHOy+JaUpgo2MtM4kNNETolxHBmn6aiPcsqMw0iIB8QHc+XkRKEZUjIv1AJ2/mlkmuzc03ggjQdKnhDNOAo5jQwHR06J6+trnj69pOs6SlFjjZ1b+l+VnNPsJrq8vOLy8spilypgUmUJu1CZ3Vzm+kuk9HBMT4iBt996g37TcXFxRuwCCBzGkVIXjFLvJcbI+fkZMQTOz884q/Fc+31mHJLdpxZLjFAQb2sRKCFGur7DhcyYjNkyHevQYsklXu3VOWEThPNOOOsjr51veO3Rhk0MXGwC22hAJlJwCpRCmRI4A/w+ml7vu57XX3uNrtuyP1xztcs1MN30nhTsGGcMeZSABAM66kCCUDAgXmqQdC4L6EmT3UcIgRg9FvnaxrcByBgDpcTa2883I18I9DgH5xtPicI2WLxFUzZmTSrOLWqkgZ4QPCFGBCxOYkgUVcaiswsC73GdTaau0oAqjt0u8d67O2Lw5AtHOZMaQK30vaMUEB/J2YKdnO/AxRpoNTEcMocwkg4jMXi8r4HJ3tP1G7bnig/RFAzURRsDOxRECs6V2sFCvwnmA994Nn2YI9ZFWj84lKpU/AbxXX0UC9OhZYmin63tFgReLayszhiMBwI+TuAseoIoopEpO0r2TMlVZWAsTynKe9eF4ooFyHlhSNmokwjehWqBOnxD3t7NVOoCeCoAYsnE0xrEXlwi1y6wxC/FaSZoIZqqpveKC+A2nvLalmnja2aW+eiHaWS3t1is3U54Wgy0HFxhqAHzSCHWabNxga3bEMVRDoXLd22iXX4hcXVpltVhVMZkMUEMBcRo6AgEqp/cC12d5EEhq+AU9iipgDqDzvrA1SEsiHOyfsx1UfARLx0ifoll0Nk6MZEFsOhyMo5Yyxnc6OpvVIOnnmZ1njlLTyC0ISwWZlUTVViyImHtq1iDp2MWSW99XqT17WKi3Pp2a9/qBYsTvn3lJuipS+TSMzO7ITx0pY9cMpeXl2blp1TjGIzdaVmjLSvy4uIC90SIMdZki1BJzqpLBIIHidZPQZVuVtE160sLOU0kNUZnPOwZ9ntyThz2O8ZxoJTMeDiQkrnGQnXtj+PIfrezsYCwPRxwISLRI7Gr7CzGQABlmhj2O4Zx4nB9ybC7tADb4YBowmkhDQeuauxS9BEnnq7reOP1N3HO1wwjYz0aQBzHgXGceOedd/ngg6dM08TuekdKeQ6EnYP7k1ZXWGIcJ6ZpfLBnuek7fv2v+0rECS4YA+2jYz/sUZLNnwIgbM42vP7Ga2z6njceP+a11x7XIPKBD6aBaZooJVUXl4FW3wUc0G83bAVSyjVOtlAylOIoxUBShxKATRCe9IEnW8+ji56/6Y0nvPnGGdEJ2+DonFibElCD4KeUKSihCxbH6R0X54/4Vb+qZxwz77z7eQ6DBSxTjVqgZlvZehiDIMEM//48cpY39jy8QyswSrkFo1uyVM4GCZ2fuaJ5gjoPm01HqEzP2nX5YfJiTA8WAKrYg9Pq0taq1UQUJ8UGo0VjAIbGus7cSJMkhqyWUolZ8gh4XKXDaqCs2M2mrAxDomSY+kxKC73qfWVUii0qzjfK2SZ4SkomocURBLI3q6krWlPCPV2fLeizlJpc0uy/JRisWSnNj+i9LfLOt0Dd1kEC4hEsZkh8wLkwcxyzOL0dqyMyp9QXpQ7YF6P+X0yE4IWiji5YEFhx4J1H1ZnbMkMpsE+ZPiTLMBNqWnipZ2kuKpmpW3HHYOf2lanriC6vsqJPANQYAqeKQ/Fi7Qte6LqAYIAHLdYSn8nFkbxjmoQxwqSQMhRflyszkvAY9e/F43FoMgCeizKOZmFM2YBSrr5mLYpmrYGbSzudGIVvrJUt9FnVrCoWBkIfkLWj9WftQ1uaDXxpfT5LRuIx34E2ZnJubAUVdQ6skEeLSWt/LmumR5mZBYrNQ1e/pzWhQ9xSNcLfBD3t7RqAHSEXma97fNt2RLlxrKwPXcly97PNeCS12kJ9bwH0qM46Z7n+wz3P2b2FEoJHNeBcnpmfNZPaxY6ccy3PIRWO1fTi+jzaMVpZF1f1Y0mJkhNU1qNgaektziWv3EQtuWNuQ21rydWdNE3EGpBbckarC0JkBR6V6s5Jq9dETpOlHdf4xqLGCIk4hmFgvz+Qc2E4H43B8MyuslIKUzI31jiODMOB/X5PTksyyloaO3TM9uQHe5beOx4/vjAKQGxNPAzegIOr8YgVaQfv6fueTd/T9x1djIT5e1aCQNXmypwiXieUCwaKoZUVMcOn5pxXHSp4pwQnRO/ovGfT3FvbHo/SO/B1BpU6FovW9VrBeTPYFQtQPtt6YlQur57ObWn2gXnaVmEMXnBx+Y40Btr8oCiW0JRraZIULFFFtaW9r3nZWm7Em2dknXH9LHkxpkeETfSWIRNby+2G7EEozpnv0nnBBZt8LgRCMApqdzWgZFKGPGWmZLVWnLMsABGLMo9iCx1emIpNxuv9aKhVml2nVp+llDntMKUM2CJkAcMGjsZDwHtHjIHNFkIITMWRuTKmp1Q0q8p+v+Py0iydGDOxV6tnETZ0W+vwxoIspuTapRCOWA1FViOhDQidB3uTNqCby3C9qLxsafjCi1jQcRHwQlfMGrJF1CjHfZrYdEoRJZl5XbPeFoVa/Rl2H6VQckFWSraIoMWtAF+L7aemvdsqmjWBWuBkmQ5oGpCS6JziA3R4ukc9JQWywoQN9sMUiL1jSomzjSmNFnB//fRAToW8G0jlYNcpyvWg+FRIopaBXuBqgOsk5GLByFOFvqXWB3IiFgsUpLplhU1waF6AU3FCKGZVK+bT1gcKer31XMXhXawxPj3d5gzn46oeUx1fLfPyCOjUKc0KkM6LPhVQrRgXOQYB83ieXZtYyr6v4ySAr+9Dq9PUsH4dQktdmHtkZkgN6OS6gGeFbI06Ym6Wmztme9ZMTz3MPpfVd6SGhq3iMsyNYrEtD+V+VlVjH9RAdgrGboRaA8fYVEsQGMeB3e6aEKLF+7Q6Oc4WEpFqEgcPOMus8b7O70yaJlo+UK6gZzwczK2V81F2WC4WY+Gc1JgfZUrGBuVcEB8YDnt8jHjpQXvTDRVciCppmhhr2nwaBzQnKBmhWN87QZO5vEEZDweuKuvVxVjZ+uaws/Pudtcc9oeamv6U3e6akq1tpZSa0bYMnwXwmLH1kJtuhxj58i9/m3Ga2A07009nWy4enxP7SC4wZZtHsTcAO6bElBIpJ2qUIeIdvljtnq5EVIQQ7ZkXbD6P00SqzEgM0Vi+Tsy9VQquTBYWoNAHx7aPbPuOTWcvciZPo7mociZPGc2FKRd2KTOVgu6uye+DSsT5Dh/OQDxZhRj7Wf+llEEyjsruqkO0ZnWJhQG4ur4VWVMMxjw296sZ5gotww1qmZkKVMWwA8hz868vBHq8Ex5tQrWSaxaPq9STCOJKfSmxC3Rn0f7mLGNKFb7wBSExMiUoh8I02AN30bJ+xAnRC9FXSjQX9jnjipLSnqvrgeCstsCmc9X/t6QkTnWCpinbRBhTLexl9Gbfdzx6pJb+Fif6fcJ5RyqFsUb4Hw4D19c2kbuuo98kfAjgt2zOM0U9vltQaKPbDHl6nI8VCAaava9aatzKgmRUZS7kp2tlrjrXMLpl2r5EcRhz0ld2R8RQfnNV1JBBxjLw7rUiLjONDpLOwMiJx4mvzNwK9GieQY82S9M7VC2dqRn3jfoWLaaExz05jaQ0kQ/XlGnEqbL11cLsAv4sWhwPwu+nQAAAIABJREFUylQXu0OaeDycMeXCYUxcHyabrE+v+eDdp6Rh4vq9Sy7HgVyZnC/sbHLti7IrxlykPaTJ3g8KY72rlApJLX4nOkePQxxsguPCQ0ngiykGCvQVSJkzPUJ4oan2xT9T8YTYWe2V/ozzx08IoSNltTITam7LKTXFb+NN6mLQAM46Q2cNkkwEKuDR2TXV0ILFjJgVarrB1VIHoXcEb3qki83ahViZRleDX9ehPtLOvZKltpUx8KpKUiE1JcrK7VbPY++XszVzBKrVXM9dPRBHoKdoIU8W7zJNE9dX14zT9NEe1IeIuWz2JG/zwFV3cd/1NV3dE2J1de1r3I43F5j3NZPF+1peQ/BdR+h6xDki/QwaSk6k8WAsd05QF5NDdRflnBkOe6YaDNwWGzOGDJBl1coA7MmqnL/2upX0EEX0zGIdFUrKoIVxGNldX7M/DAyHPSWNaI3nqeSQ5SZlG6u7q2vGIc1xTWkaK7uuoOb+2F1fs9vvyCmz2+05HA6AuVWc1FIYWgBHK+ZYcqvT87BMT9/1fNVX/Vqudld87p3PsT/scd5RUMYp2bxMZuAG55hypgwDw9QxpRFVj1LwwaEEOlG0+ohj1xG7SKoFAPeHoc5Toe86yLV0hnNoSuRxJOcRp5Ft9Dze9jza9jw623Kx3TINI1fXe4bDUNdQA42HKXN5GBlz4Xq/572n7zBM8OTJa7z9ma+g6zdMKnTbM1wI7CvjVoritRDEgKfvOnN1OTNSnFaGVrQ5rlAspseJERTBxzlhSKs7tsVwSf2OZVMyx48+S17YvRW9w4vRYzPoaWmEriA1srTbBjZnnUWXO8sYKKrEncd3jkKxGA1jVy3fPlq2TwgQgjEKRQslmaKb6qTL3tFH5jo72nyYKuTS3GKZaUqMY0KcucmsxoGj6xMtsA9nkyjlwpCt6u9hmDgMqWalOXCJUGBKFtjqvdYqkuZ+udVPLYhhLaq02jz2O0brq5s7V2mWyGKNPKwIUmlPre4IX5W+ZSJZ27qKuINzsyVu68ViFq/dgrM7YOWq0hpoubbBj3qo3njJRnmXnGqF5YSoBQx7b2A7OocXR0YJdeK4JCSBqRR8DBACqSiSM9PuwOhg6qxuiFbrfayZWfsCu7x2K1orM1DExkmuFGurgNqcKM5DqMF53hmj5cRcJ64tsrLQ0A8pc1ZdrcfhgyeEaPVXUqGoq1ksSm5lJsTcdkitPbTmSUQXP7n5r44wuNa/N0WjtQ1IKwkhS+R4c3k6qeyPNzDkhRAX0BOai9RuaL6z4/usoAxmF5rTOp2wEhjzrdw4ur1vHsr5VcewZRrZd9pcKKWQEEoxw815Dw8JetTKgwCITLhiWTnZm3GCgCtmkuTqWjIXl8d7Y7p9DAZmnelf8QF3Q6doq+BcCqSEpsropGnOTm1VmHWlv4oqUmqsG1YrSLIFEqc0WfX0vLiWZtYw2/lmt1a9dsuQdbLSDmoINqVEUcFnA2OHw8HcPdpAT2a337HftaKJFvsiVDbWu6N2zKeeAf7D6ljvHefnZ+SSiDEyTiMhBro+WpBvBnWFUirzUQq5uRvrmqbUBb0GBS8kg2XhCRUM1EzqQC3dgZUUEG8su1YAL5iuatl2wdkri7NaZpNlZadkRtGUlGEqjKmwO0y8/3TPYSy4sOHJmBBvANX76opN5mEpxcp5aLFaZ7Nbqq470ObjjedSzZoWi0V936pZN7dkU6lS3X2tyv+z5IWzt/pg2Tr2U47S2XDF0lZE8b0nbgPOC1mgVLeGP4NwAZqEzSbgBl/dOEKpgcO+c4TOJnqJQunMfTBdJ/ZjwYswlUy391YwMU1MJeOdp+vNdZVztrgMsCjykkEhiycMk1ntuZDU0v1yUaZsJdqHaWKYErkUUoExm3srbq/YXD0lxo4hDwzT3hR1DHOQYcwTIU2Vhu5wrrPFPI2UXBXlSplbv1W7sy4UpUBJ9vMhJ2UbgjTAIksgt3eOWGsYRZ+IzhG8EirgUDHK3TtzGzqXm/t49pXMuEeoCioxTQUtnjR5kneoEyxnQ8zynAamcY+WgiMRnRIEeueINTuscwa8izpqAWe2WeiDgd6EMKqBmC8Eh0+Jw35kujowOWGkhcQVCo69KPuWgeWclSdQZcLizopAkpoYheFUF4QQHNtt5GLjyWMhj5CT4rPgcvWjx0DcbvBn/YM9x7U0F5Gv8U99dMToaoE4qUqs4EejjVNSEqZcLUaigpfZumqfLayjNodIXZjUTIMay2e1t6QCyAklS1XuKvhkMQpT0hrwbq7EEITgK+MrDUB9+H2KVh5VlrE80+UKqxCk42NZ2J759wrmXQU66+9Ju2CVhzdG2nVWoASr7eJr+f22oFl8hdbsSV+tXiHkSIgZcW5mM5xzFq8zTSgw7K4ZDjtblKZxBj3jNM5WfovjaWPhqA8qy1NUQEqtjXNFRqBkHm03aE6UaSIPB0rOfPD+B1w+fcr+MJCmHbmCozWj19ZHrWnLBr48h90VlxUcN6vf3Ft7DpWdMOCVLI6tgvRmBIDMFfhfJAbko0jOmeurK3a7HcNwYJqMvWvXTimx30+kVGYPinfClGw7EZy5sbZnW1LuyLtr8jBSVJlkQHI1yLLShc4YuJpMAcaitDHS5lcXhb6LNQg4ME6F693E9fXAL3/+mqeXV3jniZ2RFofkOGTHWJTdpFwNif0hIU+v8J97l77v8S4bi1vrSNnQrJpCzbWpmjEeTyhqcWMFDGxVY2WaMlMq9r0sTJOt26XUEBMEyNW21urhMXDZXLzPkhfO3tp2geitmm/wZlF0mx7nPcUXNBTzj3eCbAUcJApTLTjks9L9/8S92XrsRpKt+Zu7A4iBe2vIrDp10V/3+z9S39TQVZWTtAcOEQHAB+sLMweClDKVqpPUwacQuckgAnAA7mbLlq3VLDMf6sipJENX5pV5ziBKOibGo51ey0AJlLVxuRZe5gVVIbxERC3Dzq1QtJJS5HhSJ027Wi7WZday+TWtzThGY66kITDWTAgGNzYfyNuSuS1eH5UCslhdeIiEKRkx+5qYJlvwj8eJ43FyvYsTw3Cy79ORIU2gupVtNsxeOgIwWM0dobestyaUmmgtvGt5y9esre3XFsvgKqqR0UnYU8qMKTIWZYiRFKxsNwRXX03RvVyqT1j3C6R3Wai3t1ahxcCaLIhJQRgIRIRaMut8YZ2vBFGG2IhRGWPgPBrxLopZmXQ3HjVRCqpCaYN5uAwJGScQ4U/HkaiNl8vMy9OVNQYuAgVf7FEWEebo8gtBtqCnE7cVqCIWYAm0JITBFJ3PH0e+exhZ58q6QM7WIp+adUoM08j5w5nxw+ndruP9ZiVWo3CMKXCcItNoWX9x7sCaG0u0Lo81NxYRRy4bbqVjhHXpoqOOpvqlrdrvH0d6PEIJ2AA1D3JBTK/ey5xL9tbZEEhJLbAeAqWZFME0mOde92Tbt58PgYJ0dIedW4Yjdl52+zlenMArR4yOXoq4FlQPerZOs1cxz2+webbblOJSDq1FSwRCcMKqqS8XJ/Ei3aPJg55xNJuc4IvBYiX3YZwN+VNYF1O1b61R19kEUVUNiakdPbCs/z7wE7pMiQJWylYEQuTr5y9cbwtlWTiOiXU5UtaF9Xqhlszj0wufP31mXleEQrAWobtrINuCZor4eeNPvWgmL1dEZGtrbqosy8q6lldcnRACrUKNe5u/iHtU+Us9eHxPaZBaK49fv3C5XbndrizrQi792GHNhefnC2suNuemSIyBJWd7jmJgPExerSjMOZPLs1Uy1kpxudVK4DAe7fpUNeFUGioZxaovMYGOwvEQOR5HTqcDwxBZ1sbj08LXxyv//t9f+fHTE8fjge9+/x2H48hSlJcSWSs8r8qXy8r1tnJdledrYUiR77898S///JFxSKTcgx4chSpoE9DkqJ6NSy7ZeXmRIpHW1ESKF0M5Vykbcj1No3ParOxWsSnGuvMa0PWmxl+8Jr+aaNC7VWKHpmMgJYPcWhRa8pkoAVFwNHaDyiWCDJ5R1QApWvZZC7IaRh2SvTpcr8F22YJS1CJA0xG0CDa3StVGRYilQewQnpNo8dZmr32W2oixQYVYTa7Ouud30mpVQ31aU1+4A2vOpv2gDZUCYjdoGmCsgmq0bi2x+nogGi9GTXui5mUPeIIaNKmKaPIfKkikqWkrvKcic1/ANp0Tq2FsE4DQFwPZDOjk/qv2tuTeqdXLK7oBR9tn9TKKowKqVjLQ1oyOrvj5GiG9tYoEQw+iL0QpWstjFBiCZUS2c/vkppCaEYfDmEiHAZXA6TByGAdyqcQhulihoY8FdVc4L2XBJmQIRpWvvnjW+98JWydSSqbd1KqR96VraAR/Twzm/zb+Npwe2JGLIEZUNyKhHbgtCkJxAkV0SXewk1NRR05ku0eal476znug8Vc/fAtS+jW3zUxixZGLRvNSVm2RUJUWu6Gh3UNyt9P+GPzc+nSP3Cg9aOm8k585VmUvoeEIzx260z9n+34/Cv7Gmf/Dt/tn/94u4f77e8QixGhZdQhICdQYTagTX4C2Z9WyECMpF1St06mUspG1O89Fm5OKe1m7l3cdnVHtAbGhKOu6Qkysy2KvFE1xeZ6pJbPMC2teyetKDPtcrX3f/X7pQZU62RioRSgOzzU/79bU0Z07s2jdx0zEAiYT0b3rwrtDrt4z6FFVU9MuZS8VdjgVvIxkVAyidwuyG5J27aWYoifKwTmfjdKUtVlSQRo3groV5BtCYCdUeONMeN2FLCKU2pDcmJfK5VZ4ua40SZyLEpt5pRU1cdqiULwqIrkQ5pk1RB7OFmx0KQVD4/DneJ8P+tyg3YlAMZ0ev5at3TVZUA3NjXEro8s2Mcg2vj1I/nsD2F89E7ftZTX10iptzYhUWlRqajZxuiCcipKprJg2z1wKYUiQFGFAdEAbrG0lOJ8sTBDdFoDmC+wiTNeBqTQzPZuVttrJp5BIYvBXnCJh6H5QVpfXKqiXyKYxkaZgztoxEN1GQWuj5R36jJ0s15qRU7VxnW98fvxCSoFvPhpXIkpkmITzR0OXbrdnnp9eQIXIRFArb2ldjZ8SIA7eUhgiKU2EMNikItFv6kBtg9Vh73lA/9BN94kDu2aCEjGOj0QhYWTkuukK2SRk9WbTE6mtIdXq50aQxLq0nKsknlYHsfEeXK01iNBaJfhkWQW0FaIoYzIE6DRZO32KgcOUGN15uYOc3eBWEPO/cm6XhIroiiCkUBmnyNQGDqeJ48cTNUVE7WG2jLq5toVSca8tcbVw97BS8ZKYdyPFyOZmYu2HhhLVKGiwcHccA9P5yPnjmeM3D+90Hd9sWz1mXwC0z7E+0ZqSbzClVnrQqKju/AdTCrfm1VK7MbB9X2pPDu4cu+mBgmyoi2Kljz752fH5cy0bnuHPXPDF3DhVAltHlYkW2Ml1/6Lt3DrqxC6igOyqSG/DlLv58tVw3SM68vot+4f9Rtt2PGGf5DtfwVBTpZSy8xd8TEP1QCd0JKQ5ubML+omhPikhYKWlli0ZLCu1ZitZNd0WpK7vZI9afBUM2kfL1j0jquTFrAWetfKXCE/TSMsrebmhtXCbzROrlmp0CG0bkbVsJR9Tf1YPVHqdvHdsebS1zUmWzFa/0Dv3o2lDtCEqm93PFvhsQfz7Qni1VtMNqtk5gWyt5l2FuFTrCKzVxPhiEF6uN55eLkx53ZLO1iq5VEqulFLNJHqtIMJwUNLod32uaDGSohSTAhDcAsM7KKtW1pKhCJdbRhW+PF748euNT08rc4vE88ypCZXG6qBCC2ZO20Ii0jviGzHYvD0Ngp5GJJytrKqVjD3QyRsWOhewFJt3M9aE0FRoLRDjZNeo1e0+tOTWEJQYEpr82W5d5LejPr8sNPnrbCiwrK/6DdWQTcROEVqwoKeJUlM1yFGUVQuLms5LTNEy8SDEYG1v2pRZZ4Lj5vEkpLO7qwZrc4tr45Arq0DJSn0q5Ku1EI9jNNMxsbKMwbwwjsYTwKtUNBhiZBzNOTjFRHJRr6bZ5L5rQxubAFajawQ0nm8v3MqVmARJBx4+HkiSmI6JD9+dUK08Xa/88OVKLaBzpK0WCEQxZeEQA9N5JE2DcZDGEylOGxFLnKvSdHDi6ft0FqhCaRa6VnVzhiAENQ4LTU2BWWQLBFEnjLdC1eiaHuaOiwc1tkgZ8mJBfXNkxATUzIndsg1t3k1UGsUnqBSUIQlDCjwcBw7e5XPwUqIFXhaEdRl2wY6xlZ4ZFYSCNkihcDgkqsDp45Hzdw/oYaAtmXl2xdZsvjatKQUzJ1W81twhDsHq41HMySRCiibGKc6ibUkoQ6ARSOPEROT48cTH7z7y8N3Hd7mOf/saexbeXi/8IRhqpirEqKQYeq69rQG2uNhFtKDHsuu1NHKxiahWm7T63+0cmZ6FealJ97njPgjpCKvdU9CamO0J8mpxtb/xIDqYqqyDFa/OrSOK9yKIr0KVv7K+3Qc5rwKe+wT1t9x68CWmB7YFNZsOSqOUXVCvB4QxBoJbUpRcyNHUk++5gTEk1yhzwr142XIzQVYPgH3fG8Lm+kA9mek4mVjzSdeCWm43WFbW25X58kQMYotXyb6AiZNzBQ26fWafW1Td6Lfsc05HlTr+KI5l2PGp80Ms6LFgwo7xNdJTnYheHdHfxRfeE+kppfDpx09mmDqGTcMshI42CWuuLGvx87TjPT098+XxyHQYGYfENBpfJ+dCzra2Xi43Hl9mkMD5Q+D0YEK4bcm01fzNhloJHvTE6Ppk0aRBlpLJufH4vDLPlafnG3/48crnx4VzFtrhyrk0s7cYI0ShxYHDw5k02TVlMY+2FJTDKBynwDgeOJ2spH7LC9d1tkaPwTo5myfOeS0mDdKE1bqKiHEkxsFLq9ZZaI0nginNW7nWBFedc9Ya2sQJ2L8sNPnrkB5leyDswKFhZN+GlZ8sU1YKlSyVFqz8lFu19tbgiAayibmplwJMWNBKYC5q7I0g1noXxkAcAypKGKRTYIz4nGzxM5d2y8ZDwkoOHdpVD6JS2Fpq+03YF+rOXZBXKWHPbKsZ9yG0loDqbbmVmHzi1kwuiwVmS6DNFgik6O2wKSAZNCgtJoL94459bm3d1q0e3zXD7J05quycpqZexrkjqnaUhx1OZPvb+5XMryd3a8Vd9mxky7ApXncdmA51om0ThOtl1OQwbHeWttZ/8f32clvPjDGZg7tMTkQ3KDelyDAlhlpJrjdiyIRPuH7M6ihFL/NsAY/s5xP65/fB9PNuYoR0O89k5qtjIg3vX97S++/k7c9e/8omenUo2k/BxxMcSpae5dvi2Mu/4CUvAal70CNvPkP9froPKPT+dxvU3dc2n1OkL+N7wLldh+bo2r4evnpEeunm/jNff7P/w+/oexDq7m37AL4Kqt7+/l02ubsWnau0Bxn3op5+iPshtbrfl2rjUdvuXdVCI4Zk97BLBNi5mScgsHEbX52nQUx0iY79Hur3jfjfWtJYS2VRR6NaBRdBlGBickh/li247q3kaDdh7vOPbkfR0bifbHfHK6rbfWXn9Xoes7ftYXcv17/X1mVUAoFBB0feXt95rxAtbV5yMmPWkG1NG+7I153L1Fy9GEw8sFXLAFqx74O/N/axkb2ZsiNhpVkAdZuL+V+WRi4mabGWxlCMp5k0WGm6r1PRUZZgKLvJQnXfP+sWVJSshVAC0HavOrlPiPbEjLvgpk/Ab5/vvgORuzYEf485NfxyZeRXzcS1KU/XhRRhLtZemhus2ciNLSgtNTSATooegWCLaPUYPZeG3FbPXjLdqTjnYuWRADJEdBBaMD4PYoFU/Bg5hJFWlHgKnOaBEGAag7UnqmUignjFwW6rFCKHdDAUCCFhX7VBK2oQXC2spbiM9l0eIM1VqCGNkWGyz/rwYeTjNyPTFElJyXk2R/L1xrLMlKyUGerNHqpxjGiKCIE2K2sthBBZk5K8bT6l6HLbAZUJFetOe5+tt/mb6Ji1two5dBXaXU0zVyO41v6wYZLkuk1ODW2BVgCf41qxyWyTNejGohLtuqgXSpuCmjiZUEGLt6nHTYnZXr1kEjZRvH0p6BlghzodnWvmzn52lOifvj3x//zLt1xuK395fKHRWEoxKYTaWz6DL8g9G/WJvZtJiFlPTCkwxmBk+hIM8ZRonIOYGI8nxmHi9OHE6Xzk9Bt1b91dXj/u19sOXNwFca4StncuOQ/IF4OoSo025ub75OXNarwC+sTVF+F2H3faNTHlXO8E2hzKAy04hw0jMZZ1QXBpimLoQxoOTIcHQjQCr3RVj+0zTIxtcBQROuJ3FxhvQQN0gm6truq6BdRsC/pfG8/fGvXZkDdHVPrBtdo8EWseJEBwLk73vOvBkiF+gECKCY1tK0t15eSedNgp7kjPNh4uOhd6cuNokYgh5mbxsJc1URPztPKn7mKQ6uisH5sh2f16VFBbyFsPpvs+vZRNcLNorCNIPOv5KSDX5ydDNA3pCT6f9iDAxym830XV1liWhYGBNJkrQRd1LKWRc9nPV3S7T0tt3NaVhmsJlbY9I11lPaWB4+SE6CWzrl/tuVsLmk336BQDGoXojSEhGXWheXlrWQvP15nnl8xtriADwyRISqylIUsmNmUaIkFMwLV6wJJiYDiYkvP5OHE+JM6OqlcxplalMpfF1PO3BFd3rSQFCYkUJ1QN6c3ZUcFWwPlcOVdCsNLVPQrZWi+9V0f0flka5FcGPY2vL7OVjhaDkZfSuM7N6vxRaUkhQDwFUo02yN5KCeKkrewZwx6BjofIOCUkGQzYRhN/a6JmSpkgfRs5nRPalNM60LISgzINuJihIEWQKqbIeavUohzGgW8fHkywqYFmhQbrXLg8r5TcWGthzsXqguAPgl2owQ2EDofE8ZQYhsC3Hye+/XayUtnYWMuVkpVluZoHydrIt0a+WmeC6gGVEWnC2jKsBsOmkElibX7TNJiPiCQkVQjOdn+HzSbM6sRuK+1owEpc4mQytYlhLdUFsDrZ24ML6h7wVLVkDjbhr86872akUQJJXAS/12yDIu5uj1akZWgZUSVKM3TMM4juGt2ka1ewefqg3YXJukmKa00EAg+HxKEJ+fsz2iq3JTOMgduycl0CL3OhVVcsBlQ7j6f7qClIc8TDuseOQ2Lagh6h1kAhUVyE6/DhzHA6cTwfOXvg81tsd6CcbT2w8Yv+KuDZUBxLrnq2Zve/owDSSaP2tylxx+9pmzx8KdUWYoUi1hpv4qK97biQ15tpqIRISiMhJFpMNE00DZQ1s1wfqcW0Vub5SmuN0+kj33xnCtPaqk2GdwsXIpyOJx7igAbv1qQ6yhecn9cDCAEXG1xz8YRk2HgT+2C9HdT/E9uGb7BpHIls46weIJTe9l3e2OL0JbQHwCK0NKBD96My/qBt9yn169LPZinjC5CEXScmBhNKjMH4X7Fn8K7b05pxVIjRUEXYbDQs4VIvU5jRaS91muK78wTDnbK0G1db0NNV38N+rv34tS+wVt6rrZhdkT8gHSHehA7faWuqzPOCCkx6RMQoC9nvv07CNpqAx3UIuVZr66+mNbckkztZ1mxoiFin0lETtTYen688v9xs3IpFJikIehxgGhgiHBJGA4lQtLLmldtSeLrMfH1aKVXQMDIcJiTa2l5uK4MC00gKtuuONMUYOB8mxiB8OE98OA2cjwNVAi0Gqiq5ZS5rQKp1wnYeYKsmLNlUCEMiDgdag3VduN0WDIUuCJWo6s/qfUXEUKDqaFdrQmt3PLe/sf06To9PdHbjW0fUWhpL9qDHO500KEOJpggpwWiIHo3Xat5Ge8nEYK802IXskxgd5RJ1uFus5IUgbo8QxEoUKUGM1hkSxCLZms1rS6tP4CmQhmgdX14nJOwwX3sDGZuIk31sJ14G18kwPpAhMylZK6Q6WqIdwZDmC6Xj8UG3EkmjbWMlraACUYN7ibmLc6j8/cLa/4OtQ8LKNt+p67FYicZu7K7Uu5XCtglRXk8VapOU3u3fMpf7iZN9funiVbyt6zsKdFei2g7SN3nzGR6pbO/Tu2MGW8gRYRoix8mg/cNgxOjs+hi8/ahXz47sy494a7/sZYdO1t2QoRCIyUpaaUhGsI/veC37od8tzq+7/n5hUpf93Pr12sp3Ynw9eXXV+6sTDdXu/+aLcbPsWr0NvnkWX8pKzqt3ODrvovZyjSVVa86UdfXJ72aaK2HksKze1XgX9Mh+b9Wxbmu23bdd3A9POu5Ldrpp14gI2rpqOmz99/dj2V9bgM27B0JeRfrb7/H/v5q3Wnv7BjoZWnTXt7HP0Lv7RN+c853o5JZZ92fr7iD6NfA2OWMwWFrE1qETXp3Qq8ftbt7dNKHuylC6naW8mgGs3PnzA9RRvj42+7ywn+8rFPAdy1sod+vLncFrb513wrbNe3twC2wdZ1UawTu+mva11NbW2FVN1DVrrMPEggyMhrGP592Yaec1dRsnu04WVEQk2DHTGqG2V51Ve/nzLqHtXVuCJR6yOza8mv95FcqzNz50BN8CIlAIzjnrAX7r1k1q4MWbR7DfP7+0/aqgpwGzKqEpazV4cW3KTdUk4JsNPA2GKhyqEkMjaqQ7yeYZlit+cA2VajycFEhHJTTLPsVLW0WNaIuK2R0EX5g9cAgNKN7WDBxcPA9pBLWCB80cqHPGNXvsb9c1s6yz6R2UasgFttjX1QY+xegy+cKgkSkkxhAY48gYJy99VWozMa9hapw/KFoFOY9Q7EZKw0Dw9vx5qazZWofXrNC1gnQllYjEwUpw6e3i9Q/eql0z9TVExeXBxUTHimdExRTq3W+s37Kvb9r+CgDuuC4iTGNiGKMFCSgtr6CVsl6o+WbkOo8RgzZiLcRWKSV4Xdsi/RQgqEHh0txxSX1a1L2F1RZPHJWy84oOrZ+GyPcPR9ZcePrmzOenmcO08uVaiOFGrdaxZV1B5WjXAAAgAElEQVRMdoa9mzuKlXMHFYaQGNPEEAwJyjVQFGSIDCqMxwPnjw+cPnxgcj2Mw/TL+hH/iM3KSBYAFLdV6WN0v2b7BbSM36+ooXF4yYetlb1WU9itrXG7zWYA2Uz9dsmrtQ2vmVwqIpE0nIhxorbGkmdyySzLzNcvP3C9vDCMBz58+J5xPHE8nhhHI9fOt5nPn78wX5+5XJ75/PkH1nXmm29+zz//y5VpOvmNuovzmVxGJND4+HBCiCzrzDJfQJVhMC2pntUHF+t7ccG4GCMfP34Eznb+MTkq/So+966fjnS+d8DzumUZcf0sEiHI3cLnF7EnLvfHpfvcEZzcu6FFQe4CGvvj5s0a0IMACxCDd2z5HcK2ZHWPM+0j1SMsey4twbtLk0RdDsGTPenBV99vf90t7ti8UEUJ2MJb2+651a9JP+a3c+X9ufUA9367bx55r03FgpbcMpf5RqqZl8uNT5+fWZbMy8vKfLuR18rhOHGaDsQhMg4DiqvAh+BFEYEwEAcgNoIWpGaCwjgOnI+jK1yb5UcIhqK0lKgBVkwnT6KhdGMS6hD5cEpQlVyEuETWYq3utZf+KbTLSliszFZWV5BucHReZC6GlOdqrgVZxJWlIaUJXLw2BAvkprFxOnjHlphDg6qJZK7LDVDvCLZSHGt2VwW2AKp/n5KNlTqK9Evbr+zeUpaOF7orbq7CrVkPf2vWiUNQpmpwaKxmHirVHpR8E5YXjxitN8qk/A+RsaoZEgIS7YbJrZGbiRR1iXhVQ5laNSSiFgjVxNimg2xW8wELfFCllmzmkbW5kZrpJyzrjbwWSvGOIzHdgjUbcnMcAqN4KYrEQSzomcLAmCzoWfVGqZmqlWGsfPhoN+gUR6Y4oshmZ5Bzo3xRlpqtWyUX6mKE66qJOAgxTYxhICJ/10X8H20K/mxsX5sBTYaCRe+XEVftLd7Bs0XYXgaR/ebrX0N/BVP+nKZkGWA2ddZWC8vtwrq8mP4OiaC2cA0to1pdtbewlsigjRasPi8oQS3o6WT6DgpVNeZAq70UZ5NZF5w7j4lBhFyVl1vmy/PK4bby568zKSSKFEdtfNJ3wZ6AEdEHFQYNjP3aiwVIaw0Uh2mHGJhOJx4+fuTDtx8Zx4HT8cg0De9zHd9eVrVSkzazbalblslPAJ/tejkIFWXveupaXMY/qNRqDteXyxeenh4ppXC5mudRa415WVlzIcaB88PvOEwfqK1yW2+seeF2feGPf/gPnh4/czw+8Pvf/9+cz99Qyjc8PHwgDRPX28ynT594efrC16+f+O//+jdutwvf/+5fuN5WjqfzxvEQMJHQ0YKa0zSi//R7BGFdbjw9PaLaGIeBcRh9cjSzyloLj1+/8PT8ZC3cITAOJm6mWAn2Hsls7bUujP4GgU/1lFW6No85tRpqsl0/eb1g3x3S1nHp7+vxymtu0I5+1Fqozf3yvCT9GgnpL5Oj6EG0bsGWOrLbo5AuatI8RvKytpeKd1EB2eaTvn9lP/ZueaEE6xasu+7OjhD9dPx2VKd3BlZqfR3o2HnF900sgRoaqpk6K2EVHp+ufPrxM7fbyrI05ovxy46HkdNxYhwtsFGMbyoasc4lQeNAGCM0JdSZkO3+OIwJmnU9acioFIRg7d0MNIFMJZRKTK5snwKMysdTIiksxURLYrA58pIba6tIFZa2Qqw2lm4dlVRZB0NSc4XsLferKquy+bLFNNp8jTWqtKAcRigtGB+pRnIzWkIrC3m52jVkQCWZ5MCSWYPr76sJD8eUOB2PDONgJcM7/aq/tf3qlpJeFuorXvMyVPPfaeilqW013DIkdIfsWvX3y+uHx4hnu+DW3nKrm3eMNtCiNHdoF1eXa6hBFZ2GsT1jzm4Xc41VhwHV9Xe4mxxEvAz1alLrZLzOUA/+ihZNayCoEAmkITBMEVErp0zuLl86GibGxRM/RjMN6mPmxDvnzLz39qrzypMuZf9Zz8R2eNu+7tPsDlLaf3tXx93lNw+qLofeqpVCaqGWgqhQqzjydzeB6c7u31j+d1ml6W/sUHj3qLEutD6B7jvrAVr0WnAKgSEFc212tdZN/Or+DNX+51I8dHRVve224sQ+vKy1eV7Z1+jk9Ndci/fbtjKBeuZT7wwVPQjUbURsjFT9u7sM6r6811tDiyM2t9uFUgrX2wu329UQnS3oGUnpiEiitcq63sh5YV1vxnebL4QQyetMHg+UvDrEb2WwUjI5Z9Z1YVlm5vnGPBtPThxCj67NpE6A19acD2JtyWZ3sm7dQKrqulA96Km2/3Wxz6zFu2D2Utt27vTywWvU9b0Xyl6Oe1uKug9k/sfH8DOVof1J0bsOKF6Px6t54n7O1v3rq8nidXDY/+WzCq+e91fP3euDU9+fsqM3b8dje+/PjE8vaf61977rJrikB5g8iGwoVdvKf/sk3J+/rTTkAlg7+oZrLvVOuB7ImfaWajNjZ9d8i0SCJO9qbdtz30vZ1pgRGAezAkpRiFUoPZlsRleRrctuv05GELcGl9p0bwbxYKf5WrJxwsAVt3RDXWm6lcWcUmp0ke0i7ffY9iA6NWT3vbwPyn95+3VBTxDiWZAYiWM0NGZuzNIgNytRTSMSYDolptNIjEK9NebrglZY57prMAxWEpAIGrv2giK5wq3byBe0NlChFhtYLUp+qpSrGrSndiKaGmRgqoQKw6LEolArOd8o4hfR1TtbUUZR0hDIYg+7SZhbINIapNAIwQiPwzhwPkWmQ+LhYeJ8PpKGQImRGkaaNqZT4WExZOowHpiGidaUS16Zc2FZK4emrBG0CMOU0LWT6UxMSBLEUYhD+MW6/v/W1u8jwJ5KtehVnBXvnBwLVD340YBwlwl6QBiDcab8+bbr6PtUd/Gs640236ilcLs8Ml+fSSmQ5IC00UxNx0RIZs2RK0g2ktoYISJEaYhUAu5EX9RI1hpYkS3gUUn7xNcsHApNSa5ZcYjwMEZojdMYOSQTr0Rls2LwaIoQjCg/hcAggVqV21KJmMZEVCANxA8PPBxOTMcDh9ORw2Ey3leMG5n2vTfVLjoXmOcbT89fGZYREXOcthJHRIK7cQse6Lvz+ZDu1h5TT365PPLjpx9Ylhv/9d//yR//9AdKKczLzLKYs/O6ZnIppDTy7bc/cn74gGpjzQu1ZW63K18+/4Hnp0fyemUaJkpeQDPL9TumFFjnZ9b5hXV5YZlfmJcn5uWFy2Xg8cuZ+XYkSrISlASGYeAwTaQ08PL8nZXOhoGvX7/w449/odSydQ2KQBqs87LWyvPzIy/XF8bRLGT6fiREYhy3sVS6UmzbXvcct/e5hj9dpLteVv/+/tUVa+8DpF8OivpFvue/WGnJtZgdEbF9BGFTKt8QFFFvExYTB7zbqjdH2KPXkGo+YLgHWDeg1q0tWXxu8UDFQyRtjnaIuCCjcyh7stN2fsx+bK8RHVV1KRDdko/78t57Bj8SAoeHw6ZNo6qkKfHw8cQwjVyvK7nMlGw1/lwWJFSGw8TpfCR6Y0uXZDWTbg8wipC9Nj2O/b5VQhqRqFZqljNRDlaZub0g62zBhzaiFiTCNw/WdXVdlNw8u1sCz4vRGmK07siQBkuckn1olMK6ZmpWLrfE8201ZDmY1ZCK9bxKtG7p4Jq1TdUlQ4xXMYRIUOuwPZ2OmwemEUwsQAvR9PrAObGq9P5stYF+xRX8W9uvMxwNEE62GA9Hc0muQ0HyiooQp8B4MjG5qbusB+G2zCYEl5sRjF08M4oQhmDclWiS/00VsqI3h3VLQ4tCE+PZ5EDLyvKlkV9s4emdJjrYJM1q7eupBmiB2gqLuwZb5metkaH7S0UhCUClNpBqF0cbJLPC9EVBOZ0jx0PifPagZ4zoMKDDhKKcSmXNFZHAcbKgp7TK5+uVcLsR18pLaQyiUANyHJGcaLWS15tJgkcIo7nOb9oG77b1co5PQB6d9/gHegnJWvxRb/M0lsCWMURHTkIQl67vqIg6waaav8/thVIyt8sT18uTaeeEilCYUuI09KBHyN5pp6ocCyQRRBoSrJMKbdb2WZVCZNZI026PYkEPtaBqSrOijaSNoI1DVAt6tHEaAlMM1NioTchYAC7OcwgKaRCmIAxiwfdtcZ+xYgM1HCLfTAdO33xgPEwcTwfGabSgLv2GQQ/Gq2tUlkV4fn40E8CQSGmy7C8OxGHcggHxhS8wMg6RO7YIqo3L5YkffvhvLtcL//bv/y//9u//Si6Z7N2OZgVg34/jwO9+9z0fP34wQLsVmhaWeeHr5x+5XC7k9cY0HKk5E6UxX/8Xx2kg3wU86/LMsjyzLC9cr4GvX81LKoaRIU0EiYzjyDodScPAy/PTFvQ8PX7mxx//bN4+zRZOgDQoabCF8np74TZfOUxHvvnwDQ/nD4zjgXE8MXglsgc2zRW798Dnt0Fh/1rQc48a7tpePx8o/XTbM+LO1XmNJgFaPYtXRBLixOSOnFuQ4m3vjgCE+poUbGPW25OhSvNOSyEElzXpiOlWPjBCue/hLstvW9Cz6xftY1Rr2cam/+w+2BERN6bUn4zhe6M9IQjTeTLfwdWc5Ycxcf5wYjoqyJXrtdA0o9IodYFcOIeB43n00o1QXUojVGtia7WxroEQbfxjiE6EhjQGT5gjKXwgygktmbVmayTADCqiFlIMHB8GgkSeb8plKa6kLAYCVCWqkOLgvlaBgLXeU2aWZUa0crllnq8rpTXCmIijQTcaw67k7XZVVZW4FkJYDUWXRJJEbJHz6WB8otqYcyEX00GxqkoP7o2QbzDYzjOzjq5fvp6/DukR+xyJGGSfBDGDKz945wR0D6L78tZdi9omNNbfG/fFXfC2RjX0Q5vs3JOs6NpoWaGokV8x0b8k9sjYSzsKu0GqYZsQ/KGSXu7wCSUqsXkZRkCD3WTRACmiCMO4v2JyXZOgpnURDbhLiEOLRl5Og9lJBHcU166NER1mTA79BenixS7I+FMI+h+5GWGbuwkPDyR8zETfTIa6jeWrEtAdwtgJZpvcm2dkTRt4+aJp9RKX/UybKz5XpcU9g1a1TsGm1nZem3GutujfP6CXvkprrKVQVRhSIki0zKIfo+6QOvgUGzpBOTB4R170br/NX6rt+jU9s1asXt3bZUUAl2WIybq1ghtnbl407wrZ9WvQNVm8+yGDzJYlxTgwDBbox9QYkA0mh0bAVLBbG3xkheJlrWVZuM03brcr19uVq5e3SvFnuqn/27q5luXGPNsC1tS8ncx/KBt3pFqprBT/WX/VTGv2fnVdJOseazQttGYBWa0RlUYtgRIzIlCKdXypNtZ1Yc3W9l67aBtKqkos0LQyz1eW5YYgrOtCzisSopXafDh7OcVg/h3l6eUV3nHBfFtK+7nXvTjg3/r7N7+hl617+ZntebJ/7/OCBRyteRdNuC8x++mLURVsf50eIG/mFba/246/qWXp94P980f75m/3MuQ+Nj8dp/t//9L3770Zp0q2ObL78UlQhjGRhmjohyeNIfocFO5pAjuCZSKsvksX4+2MDLmbp3pJaZ//9q5TD/k2ykYIgRjaVl6y38rd/On3nK0cCMGfCeNc1WqihkNVYlO3pfF1Yvtii34HETY6gbCbXrtpLiLE2igbt6wn1Dvi2K9fV2X/a/yut9uvDHoUxkaYAsMpEIfIWptxVIoiyZxcQ1JnaRuRuKyV5VaouRKiXWQJwnBMjA/WpTRNwugaMec4ckpGyrpeV+ZLphbl+lSZL4WgMGngECNjgu/PgfMkpKicxsaQHPosvc3OtFVa6zVRv7CGm4EIuVots6kSUiROgyFIWBAVg/C73x34/T8lxily/rYRxhlJtiKqX5whHkmY8uY4TAxpQEshs3AtV5YKS1DyaOW0NNhNItVUplO2zqeQ2OwN3mNTxZj5utdhg4c1gnoXgGVFpTZ/qbUNbxGlLZhsC5RF22JINYAtZDmjtdDWlVYWWnX9heABanPPGRFKaZRsWeK8ZkqtnA+JyBkYmJJwGAYkCa1lllpZsvJ8nfnL48yyNh4ejvzu24+Mw0AU3TqS0B6SKUlgClAjfDhEvv8wcZwST3MjLJ7NN0GrTQRTUpL7bRVgVkO3jscjwzAxHQ4cPjxwfDgxDAPjODA4t2f0DqL33gQh55Xb9cUCilbJZUW1MU1HzucPpDQwDkcOxzMhGL+legb9/fffEuT3xBQpefVAYuY//+u/+Nd/+1eulwv/+Z9/4E9//IHaGsGzL9TJzqUS4wo0rtcXn+QtBC6lcLut5NyIIXO7vSAI45h4fvqRGBqXy2dyfqHplRBXDieBGJkmUz1HsqnIFlv4WrPAKaXE5y8/8Kc/P5CGxF9+/AM/fv6jt8gX8lpQlBiVEC14uc0mIno8nDmfviWliePpgWE6Mx0/+v678m1jWQrFSfiltA09eu/tbaDTHcf7tnEj2Es9+Hj/3OK+m276HeOBSoiRxN7pZD5G1tVQg13rpKCRV/uTAKXYAm3db9GDnvvP7CU3fE7pzth9tbPMveneufWaJ9T1diq1vg7wrPS4eya+DQiBDem5//63CnoU06ppqkiydvBpCEynBGoO6iENrGshDcI4mnTJdEqE0ECKIR8dFXO1dxFlmIRDSTZ/rtUadBrUjAeiylJu0ApBG5Mq0zRyGt20GRNvTdG8E1MEeiey9uA/Upuy3m6UvCIaEIxYTctQVoTGdc58vSzMpfEgkeHBupWLqz4jbBIvQQNpHBkPhdqUJStrKbRmNlWn03HrWguuCRVdALMnMRuHqCjZy121/H0o7K9GehgaYVTSMVqUuhZzRA9KiOaOHpM9SJYlCCVX1rlQ1sp0DIRDIqTAeAxMD8lqhlFJoiQRHsLEN+loKpUlUG8CS6V8zlwfF4YgnB4mHg6BwyD87iHwzUMwyK739jelyN7d05V2rT3eHnazW7BSxqBCbEa+OpwGzh8PpCFui2YIwrffTHz/fWIYIqeHRhwXq1PHweGZyDAcCekBU8wcSXGgyELhkVsRD3pgHS3IiL2G7bXTsFo0fo9KvMemKGuHn7sFgwcxIji6bK3P1Q0nq+s07DcDjlF7C3HPMoKrSmujrCt1XtFWkLxAWS3o0bp7/7RGK5UiJl5ZamNZM1+eXrjOCx/PBz4cB4bBarYaEqRIW2GtC7esPF4W/vDnz1xuK7//7hsO44ge1Epu6XVbqvjDPgbTcnqYEt8/TGbeFwst2sOoVaHagjKEat5pwRzal6YMMXA6nhjPD4zTxOHhzOF0tDbpMZGcKD2kxPgb2FAAlJp5uTwyzzdu85XHp8+seeV8/sB33/7OBM0OZ87nb4khsubMuqx2PbVxPlnQdru9cLk8My83/vCH/+Y//uP/43q98Ic//JE//+UTqsrhcGCaJlAo2XgVEoRcZl5eOt/EJi4rgVk7awiFeb6CwjgmXp4/E0PjenmkVAt6JKxMRyGkSEp40CNoM/V0VChlJTAQY+Tr10/85YcjKQV++PRHvnz9k7fKryzL6gthBbGgZ1kWlmXleDzx8cP3TNOJh4eZb7/7Fz661oH5jVlH6rIU8moBZHXvsffe3gYtrbWtlAV7Rn9fwukoy32b9n2wtFm+WFHalZStPNIRIFVXzsbL1FI3Be3dGqAHFz71iQVgg3sZ7sd3hyLTP79bZewE3Z8QlHkd7AFONi93z7L9Xa3tJ0HP/faWA9XH87fYLOgxjqfEgEQYhpHDdCKGxHgYCSmRswWCIdj8Ox5MK8dwFTeBBYREiNYNm8bA1Iwaoc3ABdQkRlDsZ8tMXReGAMcDnMaB46CkoJvlzxDx9nCb85tmV662smOryjovjjCJQy2CaCO4COh1LjxeMnNR4vHAxzgQhgiO4qK4nIl7UQ6JNE1IbcxlYS0FVYhpII7JgvMNItrLW6VU1mXddO6qM65baz4HvUP3VvfI6uW0rYwVOrS2LYdb15W23oljy3jPCrqfkumDNOPn4D5bXtYiQ/OSVitGYtYkW3kiRSxSjS7W5OWG7psUOgG3WVlNBA96jLCX1dE3dcY8QhrMITsN1qk1RDve8RBIXtqSaLfjK1xYjCRq2hZGsurM9lKs/JJLMbKXe4Q0h+9V/a6Ie5mvl+HeYzOkx2+YPrF6OaHbTHWvpd6ye5+9bZjp/ebXf/upisOeFVoltN4GbxNR9Ej+/hTvJ9Te6beVGPqn7pivwaUdRu2HtQVf95Nd/3ufPERcZFI5jInzYSTEyoqw+nVruVGLE7UxMcwYvCzkr5CStT0PyZEPX4juX3fZ+PttXlp0fs26ZitL3axlPIbEepihKSkMlGFFY6LklZwXQMheEgJlXRbWeWZdZvKyUtZMyYXq6rG6oQJ2H5vYn3VU1GLHc6+O2rN9u8b6+jp7Wz0CISZiGkjDxDgdIQRSPJDSiEgy/ateFtHeWm732bou1BbIZTUJiZq3brC3Qc+6ZtZ1JYaBeVmY55mUJpZ18fEIm+hkF5Gr3u353uTXt9tPysx/BdG437ZS7N94z/4B+FPx0190tPfeyuFt0BN8/refNw9yenDxprzVAirWmKJbCaZzCX+h1rWNx/3x7e993bH1t8/5l7lP/7htH1vdztWLtn7PC1FdyDfaXGVK1M5f0m56bQGo0rsMm4cF93QCHx9HzGqtlGpNwiYSuV8/K03t1RDTQdpL1rZee7OKsJWsxOdsVTGx3x501EasJifTA5at7AR0fzXlDs3brpefwP2o9c42ZaNbbP/2c6jVgmjTCHwHpEeCMEyROAbjohi4QUgedIT9Jm5Vac5YztnsIMx2J5CmgTQEpsPI6TARAqRaGVolVCHOoKXScmP+vPLyw0IujfVS6SaqY4LTCY4THE+B48nIUjGk3X9H+o3W7Q+8IunIxpIrz0sxc7YgjNFKVaeHyMdvAsNo1hjHg5W6pkOw7DOApEplRVokpJGQogtHTaTxSFNhXjLLeuO6zPz4+MRfPn+l0JgprFJ9slCqkwXjZJ1bqCBqMOB7PZClKZ9fFovl3XogBRikEXrg40/Pms2ALmcxTxUZLMAT48/vdDKLNpsa16ZW5XJdeXm8IFo4h8zRA+VhHKxcJEaSCxIJBLt5i10TQ4+snmtlyIiGSJNIlQhxYBgPVEkcjo3z8QhEHk5HHs5HTsfp7tEz+L8045eFGDgfR8ZR+b8IpNOJtTQ+XRY+XxdKaVyumdvVyLBGljQp9GkaGNLAOI6cPz7w8OHjhu7YPQhDMoGxYUgcxpFxfH+dHgFKzjw9PfH0/MTLyyN/+fGPLMvMdx8uxFU5jkfaw2L8ljSw5JXrOqMCx8eRw/FIipHb0yPXp68sy8zzpy+sl4V8W8lLNfV1Vda1omry+D0Ysk4eRy1j4HAIdzyR/Uib4bL2Euv0SOOBj9/8M7VllnVhevgnSi1WU1EjUWrt2Z2Jp5Xs8vwl8+Xrj0hQnp4/cbs+UWphnjPznJ2Ua0hmrY3L5cbtOnObCn86/QXayIePz5w/fE8cBkJIxDQR40AtZYf3seBXNq7J+2yGjt3p1WxIzb6o92A6pfSTAOce1Xj79X6fbne1Eb613a2aYBwrhOBcq1Litg/Fm0iaJ64xWElGg3MwevcU2/5MJRigl8S2mWPbNu5lT6J0b82+31d/bx8L1b1L634MfqJn9Gb7uwLD/+Fmd5yPpweQtTRKa4gESqkQDfUehsih0yqiEJNFMbflxvV2tQW+WXMOQCQQtyAKTwK9Zbyatc68FtZbpcTAnAYOIRGAeVYGNTpKaZU0CC+3wsst83zLzFltrlTzrpzGxJAMcRnjSBBhnmeeXywRmtfK862wNDjnZoSHYDzX6gHakjOrJ5/XeeW6rFY+FiGNI6o4vWGl1soyz8y3GRCGYSTGtJW3VJWSK8t8o7V+f/99RNhfF/QIZg8/hN0JPfYJbg96eiTWOx1K6WaOapnbmEhjZJxGDuNoQc9aiMU5NIvA0tC1sT5mLp8XNyJrlulFGBIcD8phChwOFpBY9h69DqiEVJGgRAlMMRHF1Fiz29HL3LjYKmgw+mTR9fEUOX8wtOd0Hjh/OBJjQEIlREd3WjMTNawTS2K0dtdhJA4HaMpyWXm+LlzmG1+eL3x+eqZKox2gjXbTikALFgCNo5CajV+opv/zXkhPrY2vl5UYhCGZL84QlBabcWC0mTCasgc9FVqIaGxYmLM7ou/kZpuYSsV4WLeVx+crkUo6Vg4HEALDkAjJ3h8kIdhEqa6ppK25No64zk8AMf0JWzADGiJxHBklMU2F4+GAqnA6HDgdD5yO0+Ytpqq0ImTXfQoxcDyMTAoyHXj41kQpf3y58enlRi6NL19nvj6Zkey8rizu1ZSGgTgMjOPE4Xzm9OHBUKMhGj8iiCt599KWCeT9FlspheeXFx4fH3l8/MwPP/zAPF/ROfOgE+0wI6UyDYk0DNzKyjXPKPD0dGCaTsQQuX39wvXLJ/K6cPn6TL6u5CUbhFxsms2lbte7T0YiUJvrhiQlpeSw+f3WhQ6sdVlFzIplnHiYzJh1LZXxnE2OoBhPQVv/HONwrMsNvV3QVllL4en5C1C5XB9Zlgu1VuY5c7uVPbP37PDl+cbl5cYwFE7Hz6gmLtcb3//+nzmej6Q0Mh0eGMaD+UgtKzUX815L5gX3XtvPLdTqWft9APM2IPpbf/9zW8/2N2Lxm8x7K32rtQ/XVt7sswcZ3WYlEisIhnSLxP04N7RqRz5CCNbd40jtfdmqn8f+8/uxsM/u57DtS3fU4e85/99q68jOxpEsFTT3UQCvfAxT5HieSNGaAGC3R5pvC7mUzbBeEA7jxHGcHKnb07uqzeffxrIuLEumxcA6KdmrJPOiDGqARaESa+Byq1zmwmXOrFUoNdDNTadxYBwiYxo4jQdiCDyq8vz8YnSE3LgslVWFpXQrI1N2bs7XKiVv99O8ZuZsJa0QE2kcrAReFuMT1sq6LCzzbNdRQdNeysTnnNs1W68MRNsAACAASURBVGkwmBdZV1P/W9uvLm91+M0Qkx5hssH6vQWxzzFCZ6DvHVqGAjVqbuTVCKy6VtpSqV5aUW3k3FjX6kqcllltxZ+t1vEapuylMwMHAhIt6InR7Ska1A6tbqJRvlzLXrrrLHr73oGGYCaoNhBiGai3WnqF1GS4SzHH9bJyKwtzyRRHl3r9r/twGThi47grXZhGzi9fvv/5pgpzrqTQuU4BDUpURV2zKPhkZfYTrtWzEau3Hi3/au9V1ANUW4DXbIZ5URpltMUKPzeDSb37QAJbJ5Tavk0fIpKiO0b779oG9xr4GrxUNg6RWpPxeGQ/rj6591JjnyhjCARgDIFjFEoLPJSBtZrswDIVlnEwQ8AWTU5AxL20TH+nv2K07Kyb4d13bv092cc/ZOsLVb3z9imFUoq3lGdyTqx5ZVlnqlZyXa1NFljzzG2+WNAzX7jNN7J3NtVW6Q7VZoVgl7wb0+qdJcAGn3cIe0NF+jgEwJViWyBnZVnbVioRgVKFUo1EWas6edgh+OJ+QXWXU2jV5P7R5iR8y6wFdS8oXxKETU6hH2fOhWVdmJeZ6/XC5WJKzbU1xrravpdCK40YAykYMvlLZZj/rUv5M4v2fZBzv7i/DXru39v39XP77Ilp39dbHaK7vfk9fL9P3b6G8OYlioS2rQn2l37D9IF/fbb0QHjry/TPex3w9CBoP8e3Zbz7n/3c6358fpPAyANH/5ZtPlJfeFCCxK0qwd0zg1+XXS5BARM8FYwUjY+PUUWsYrAjZcIwJlpVIy6n6Ou3emLqdINiaFQu6nYSuiGA3eOso2nRu6tStDWV+3sr7Oien/y+sPZSV1MXkN1pC02VoDtPrtMDzOfSzjHcX0N/bu9pEILNeX/PE/nry1uHiAbIaoZMhUoYhaQRNFCKnWCXsSdAGhLDIRKK3ZTzdSUuQlkq1+fVyiNrIazVFttqr1qVp+eVy5Lp3lvReTvauqmo+0I5fJ0SDK5vE0erkcYQGGMiBoMTNUKolZjNZK21RsRbBZMSB0O0DNWSTRtKQiRK8ptgQmQEibR4InOgtcDtsjLXL5Ra+PzylafrE2vOXHUlTKMhT0OD5A+0q1q32lhbg+LtxGTzPHmniTXXxp8+z6QoHIdEDMIU4TxYmSuiRPe6us2wLMraoJIgORFR1NsdDYatxTq5Xi4rzxfTcfn0+YnPX55J0hgRDlFIQVwoLmJChwkhmd+TWL14iMLHw0iblNNx4pCCaSlpY55n8mqwd5DImCIPh5F/+ubMepo4nw8MUQkUWq3M2RR3lzUzrwVtjWlIHMaEBGEKkW9jogHfngf+eTmwlsofh5E/xxtrrvz4dMUaTgLj4cAwTYzjxMPpwPk4WflzNPG7cRg4TCOHaTQC3l1W/p6b0k07V+OpLKu95sw1zXx9fmaZV251YdYbcYhoaDS/nlUXbvMzQuD6+MT10ewmvj49ccs3crX28HEwomGrHe3Zl0OFjbwn0rw9Xa3cIeasHsIR+IDqR5b5wKcfMpfnF5vsogmfNRVyizQN1LKS13lT8m7V3Ljt33bP1dCo0TS1au4O3MqQQCcrAYgjq6XA5eoEa4WXywu1wfV65XhM3G6fSGng9GAEdSESdUA0cjwc+f33vyeeTu96Ld8u0MArjZm+aMcY74KefRF/a0PRF6PX3V6d+GmSA61VR5TKvnCGntwou4GwJST2VTmMSvLOuGGoZv4sQpC2L4r9HDxhsUzTs8kNJfZSl6NE+2IX/Ricw6XsfA/2c7wPfPrP3pK9f26c35Nvp6qsOW8JuWDPaMmG4sTg86AEShOWEonNBFCtG6lxu2VW97s6nU6czx9NqM9NBlWVeIhomja+TMPugQ8fC2UtJOBBYBTMtbwWKNWS2FUhKF8vytdn5fFiT0tPwkMQxiExjYnDYF6CKSZu8+IVEGvBPx6PjIfE4KV8VTUR0ZRo2liaSYvUpuTSEXig2Vzh2N2mkv/h4cRxmvx6W1BcqCxk14Jrloy1gjazLfrHBz0CcQxOAK4GtWkjJKvrthqoqyE9kszrJojZ2Q+jO6irss4ZQViuZTNTk7WaEnNTWi60bA9gqWZnH0SYPMIMGMm5Ft1UmltTNCohCiboG0jTXdCTrKtASqDSoGI8HLo4lngH2l5Pjcm0iHoZz9RdrYNI4pEQT0BkZaQwUhs8zStfr5lcM1+uX3icHw1m10wYkhG+U3WXTYuiFTPVy5oprTIRGYO1db/XUlmq8ulpsQ6k0RCfYxJ0Em9nbAxiQc+8OK+ngQYXTfIMupe3wBVHm0GxT0831pz5+nTh69OVISrfHCIfDwlNgo4jKUZfiO6DHt14Roc0EIMwTgNjCsbx1sa6WntCDNZJF0LgOA3wcNzEv4ZgzJGmlTUXcm0suXLLpiVjDuh2Pw2jPaiC8M0psdaJNTekCFqEeSksa+E2Z5BgHJ3DgXEcOU72CkEYBuuAGMfBS1rDhly8JyrQN5vwrHybc7bOrNVIzbd54eVyoYyZWWdmrsQUSBMMR5uQl/XGy8sz2oTL8wvXpxe7d9f1/+ftXZscyY4zzcfPLQJAZlWTtJFmZ8z2//+j/bb7QTYmiWqSXZWZACLOxfeD+wmgmpwR24ypkEFV7MoLEJdz3F9/L2zN0B5EbREbg601WnUpcHxsKgdxceDkVzMgizESJBOkABfglVoD3741Pt7NbCzEKQJIDI+K7W1Qt80Xt4oO8+MJTxvJCGp2+XgopY5DzVmydazBLTFqsHw+cZTzdruz7Y37dmVZBtv2Z1JOXF4vLOtKlMwSX0hh5eXlC19eLpxP699ALP6xx68Ln2dUZm7wz3+3V/hh458/5zlv6lEY6SFtb838kyYS4TC6FT3H/evFRLA13ca4SilKTkoIVvSEMN+P4WvPpcYYA/GNWuVhkjHH5QaDc0QsmBjgYUDnGiBDnb3wCYGDs/W3CN7/p3HfZweODqC25vYntlb1MdhbZYxhe2R0LzINtF4d/RzU3dDMbbPGQRVyXnl9/YkYAvt9o942Q8ZDQrIjmUGNWI5a+kCzlPa8VVJtSB+0W6fvhvB0UTrw/aq8X5WPm3pz/phwpGTj+lIS61LM+qLk4/zFZEq0ZS3mQeRIlTjCJBqQ2o6iZxKm7a7S47oGfCoTIccEi13bvZqNAsPuFh1zvzEE2sRCnf6Pl6wbOVFRgnq3cZgQ2pdYByaMLmgzdZt2RT2kS10J5D8Oo7aB1IFUh/emGeExJrOHYU4KHi95GmmFx2ghzmLHTOKiq2yid3YS4tG1TxmaOBFv8pMeacSPr0ESIsUHMgWwMNHa4NYbbSgft42P204djXutVL8oKoKkZN2Sj818WZkn9zDa6/hNIZ+XwaXAkEDHwuVUDe3ag6DTP8cJm63hEKsTnqOZLZYcWUsyEm8MFg2BGoF03zzFvpkccUDtYp9rmPRVHDodPopQVapY4WReRVYUTYi3q/nmWIfQSRFWOikKbaiPDe3Tte65Xm08eAljeETD3DiGF7yHd5iRkBU0eIBtjuYf4WjWNF8Lv375/WN+F+HI25rPhj5ms592zNHvjAgJEg4vHREbTXYGkc7QBhrM4LOLubkSGH5jqlpoIdJtVBFNGBCTmJlaF1o0iS3MsfDcqGwjipGD6xc8piQGSzE3b6aGdpBRqfMZjsmKETKDjhIZ/U6rZnsADTxzS8IjN+0gvDP8T99EZT5BDwXgHNGlFFB1YYJMDkWnd9uktKuhVjJlw/0gUc7C7rOP5/HNHMcJcmzyf13gwDMC8reOZzWY/+U4hwAE8Z8NEs2n6olPYOKDbGh+ikLOGNITxLyQwq+RJ316O0Zfn+dv4LywpxGoNZfzeZ4EdHl6cfx8a1pnwSNPX/v3HcrD6vTzDrs3bdxt+XwpDoaIIyWPdWsMQ196M2rH8BDPKeHu7mmm4UmtpI/41mMS6XRQ85GDMISsw9Dy2p1kbA1/G5MH5JlZ6movHiPg7g7brXf31IHa+tO9Zw2HzIxFX9PRObTUeSYcLbS1yWoB2+ePf/OfaEWNc4N7O0b2quPp99nnDUdxNU/E/r+9Gr+p6AlBKKdivBu121V7s/FPN7VV2y3BXGX+XqVeFd0C2oSmDyvwY84LSFWk2Zk2nxqHyNz4LmBREzkOlxqL8ykMPjP1TCAviVzMGbcsy+GQm4tle8hWqS2gNPcl3ME7l2XNpGLIQsymyJKwACtKRDgRwwUhonICVvqAX96+82/fvrG3xs/vV/58vdIZjLLTk0GbsWTymvHaH8RIXN3REXW+Se02n9yooNvnjUUk0NNKH519b8joLAG2u7lbZwaLpyZfu3Bv0BAup8DrupCXwn/7cuZ//OGFnJKhCtXGSR8f7/z7z3/ivjW+ffvO97cbJcEfbiu3zQpfPQkxJIYq99bYW0V0EPtO0EaOkdfzwloyXS1HrQe4751fvn9w33dO68LvXi3JPWonu/dPrTt3J+fuQ9i70BW2beN6u6JjUBLUNRtyocF0ECKUMEgMkipfTont68rHvfLHv4BoBQ1EMY+enCIlWVhfdGJ0KYmUE+uyOMzrKOR/yRYJUQI5ZZa8sJSVdT2jCHHJ9NRpESQ0ojbCEEZT2s2795AIFu/MkN18qIaS44A0jMcTE6kY6dv4bfa5QorEEEG82Ak2AllP7mAeM6fThZzPtJ653zZut29o36F+wDCTwbIUWwylUDn5c2fET2GQEizJ3m8qC6eL+8IcnhiBNjKjZ1BXA2rzYmEc16PkwOvLas+gf2tKCtrprZIkkHpk6QszibvRaKXR2/i7Osp/1PHMkbD36xpjOEwSZ9P3KAYejeUz/2cek/ul7v9i/AkrAFM2szoRJTqCI2ANmxdDOXmhH5Sch4+07Pl7/Bp53PVeiE0zQSt6IoNk29cIjGHcyOBiAEOBPXJmokC+Zf06RLT3Xxc+f+eh+sRT/McfghnrpRgpuRBiII9BydkboYHhLIpqo1Zr9q8fO2/fTEVa/QXC9boR5M3u+T6gG2hgzvIC7n0WoqHwS47kmIhDWbZEqsrYGrd9sN8NoLhvg60pH/fO7qqtKHqEOrfeuO07Qztb7dw3y7R7e3+n9s4sX0UbMgQZFW07I8Sne+aJtxnMF0hDsvu590PpLQjBeTptr+ybRYzctp26Ny/6LGFcRIkRklrDFZ+IzH/51+v/9pr8ZqQnlWSPlBrZtWUgNnMlxmTqow7Mw8ZZ1ltk1IQ26KO7o+okQtvXSAdpflv7TWIPj1hXJ2ZhE4Oak2+YvCGHw1I0uD5F6yhTopTVzI6SjTBCDAyNhDSQbmosywXphBjJJZKX4KGXyWXoRrhUIsiJIC+IJFRWBiuqg/fbN/7jz+/ca+WP72/86eMdgpK/COnFxmuntJLKAgxHstyFdFhhZ1W8yfuaDvZheVSfVfQowohWUPS6M3pnd6gzAVmU1QnHNxX2IQxHEE7FOCtfzgu/fz2Rc+TtQxltp6oFUf7l+xv3rfL2fuX9trEk4bYltmoqOlUbF6oqtVdu1Thist+gVpZsI6oAaIjcq6IZ3u+dP37beL/eeD0PJBROHdYwiMnk9q037nV3tCzSyAzEzeo2U/6shTa6dzQDER+XYG1OjMplidzPhjCWJKAN0eguprPw9j+TSdmXJZO8CE9pZsU0/kv2SLUNMgVb6ErK5LzQuhKzoHHQfTHsmBR5NHMDFx9Dp+l9I42Qu8VxRJDk+6xEQgouLW3U3XrMlOLhxhuTHEhPWTAkICdOZ/Peud+F9/ed260y9iv142dGvVqA6GklpUhnYefM8LFnivYe1xIopwQIMWSWxcbQo0V6Tcbz6gnRdBAxHx5Bxv/RASkFTqeIDlOKtWFoFhhvSEMmjEAeGVVDQ4caX6j36WHy+ccDyZneRE7+9KJHsFH+PGYg5/PxXBxMFRhwIKDAg4gfhXUNlMVDWtMgRv9dXoAEETOMDBD8WTkQvx/kGHMCOJFcfM3rfm1cnYQwRqR3+5yG9AyfdOkTymSikfnTH0TWacI4i7z52/+zYw7XPrPosXUuRvfzihEYqHpMy2hH3I6OQesVHXC/3Xl7uxqB/5BKC9tWEa5YgrrHSgDEYGhtgMCAoEgUyhI4LYk4YMlQqlJjpOaNKs05R8ptMzFB83sbNe6P+tRhq9XWydq4syMI9/vmsS1e9GBmhYxmDvxdDXGKE90RcM5mdHuY6bszf+mh79FBc+PU3gfb/c62V7v359cw/B6ElCPreSXl/9wa5LfbxFrFw8SRDqWTM7uzxz1EhOR3oIZAEyPsTuT1eIDnj/Ub9sHfxys/HrNd7x6jf9AD+vTAkOOkSjzGUuJGgRIMNjdzMs/AdDXIhNhm7kcMEZGIJWlloIAm0MwYGZFIU6gulb1eN94/btzrzv1u7pIERXogDCsUhv/f8TDK4zyY59mDif6D6uU3X6C/8zIGIZdCDGKptr0R1TrdKVWto1sRhhFL1UcmOT2hHEnI0V5z4ZxF8VAzqjJoVKiuikpB7OGaapw+qL1b0dOMzB3CsA4c3Kq80e/mI/HL2523jxvdiX2DgBThxYnrQRXqNKP0lG1MCbaUjOowLpBjo/OBnMqA4/o85clNJRnBNvd5z88x6Oy0p9kmMpdUQz0/mf7BfOMxRtbTmXOtdAl87YN128hZWRZPjE9KLuPgYiHjWIhiminW9oAoUKvZTth9aUZ9aLfirtgml6KNrQSeYl4e78t3MH+r08xMbXEczbk/5rhsZpjNifw255cUvMmJpJSs2PQE+xCjEWXHfH/OAVG/vgH7O7YxqCO7xonBRs99+M8tJPfnCZKAZM+p+46pO8Aei/VnXc0nXs2PhxyrwvNbeNxfemz+8+tngQD2vE3e7iRCi5j4Y6I3pUDJxnGM0RpN+4YZN8PB57Mcbeu8me9W/X0e72+eL/W8pon0BI87EHRE0AzT9UuMB2LeKN1/qGWyTQTJ6A9z7Pfr88bxmX88F8cbPP73f4XIYKIYtsb8iMbZO3kM2hT1yUMhNsurnPe0olQnRmuwvU7FxvKTF2OFloEEKZgoJAbhhLBEYR9wi8Iu6tMTISdIQxyl1YfXnTfmrfvoGLHrgqlzxzFqwtMLXL2HIUXDKS2K0lv373mUSIqPpqM7grfh2Yx6PGfq8SSOgbiFib2KBGIyz7GypM8oepQhw1yEvfCRqOZPMgJlBLRlaEqik2kIykcT4jXQgGvr7NVh1bku4QQm9/kJBLxxsM0FU+4sOXBa7KHMeRCSmczN4FJCRGI+XiGthGhjqpgX4xLcr2z1g9sd7ttgb90UKFJY1oX1nA0hChcCCeEC+gVIjH6ijTNK4P2283a7ct83/uV//Yn/91/+F1ur3GSwiZkM9jWjp0TUThym/rAbchy3fB/Qmm0qtdm81kaH9vqs3TLFyO9+/xO9NrbzxmgDWoVtQ3s3BdDtZqPKAD1a5kpOmZfTyvm88uVS+Hq2IkhHoLVg45+IFzv2me7VNrJv150/vcO9Zv7w0tmbkdTft8r32w36QLYNaY2m8NKFhUht8PbLjREqP//ljf/n//t3/vTLG3/43Su3Jnx9PfPPP638/ssXyinT7xvUDjRiTJRcbHQV4VQSqoOlZJKToMW7jmOnHi7bF3VvJmU9ZV5eTxAi66lQluxhgelAF0vJlKV4wQ1dZ0erPIbun3us5wv/9D//b17/sLG1xj/v1YzQtCLjBnRk7PZ3NfVD825tcjTM70rce0XZtsq2WUL097d3JHy4E6qJFRQbVYr4GGgaFWJFiMltHXFg0HWwbbv557Q7Y7uh7Q6ayFFgJM/sMQZIWldKvJBT4rwWXi4LKUVO54XTuhqCmzItdXQ0uhbGbhw5EXuPtjA7fw+BYnwxVVxJMkgx8/rylfPphRwKKXxBxpnROv12pe07reyM2hz2+qzN0hb6uaHP48GM+Fsb/jOJ1776+Glz48Aaxd7FfcHMPDMG4XxKnNZEiEoujZy7IzsNEeNRGIl8FiCGmtl4Zvf/ztP7EXTm9PEoEkdvZj7qG6LtbUKQgsjJrlFYgeYIh/G4lOjPkl3DMYKj5U/Gisd5m591noMHCjT/++Qa6fjc8RZgNVvwPc6Dh4ZH/EzHKjCj1+F8uPNlYVkujAH3rXO7V2v+7o3v73cA1lw4eexHlMQIyXzuxHIscxAuS+D1FCkS+V1ceAmZ29ud8ec3xjd75odEyhqJN+WXDVMRi6BiI+29Ce26WzNktnaoGu8ziTVPMQyWNDilQQnDx+fBCiZsP9juOx+3zUCGuU8HYUnmeaZjsPUbW+s+MeqMZuHUZj2BTZs87dsc8bONtmIkryvx78g4/M1Iz8NvYconrWuiKyEHYrFOL0unYGdJs1CjEAbsCHSOoscaMpeYe0ce3MNAwCIpxFQXKQolWxdiFalnlQS8oxPEdOdIyI7u2CtEK3qQSuvBQ86U1o2ghUDOmaUUUioEWRBJwAn0ApIZutD1hA643Xa+f9+4bnf+9Oc3/vgff2Hvlb5ExmIGjqlB7qASacN5FH4WrW52pGk85Vt1m3QPRwg+6wgxcLmcLegzFVrvjH2nqjA8QXvrPv6I7mmjdnOdlsx5yZxL5Owy7XsWSg7UZt2iqho5bgyLuwBue+P9ZpXuvQ5at7HBVju3vUIfhL0hrRGioWmdQOvK+7azj8of//LBv/zbN/79z9+57srrlxfqEM5LQqPlucQ+THYwLEixpIh4nMhabGOeFu/miv3oHHhm0ompGEJQSo6cTgUVUzBEDxONyUnLPmJNKR3f/4z0/JfMQoBcCl9+9wdOfdAl0IiWL1dv9O0NHZWx3xjbGzoatTvRXKepom0KJZsLK8D9duN2u9N6ozZlq9Xn8A+eiEhGiG52Vxmj+xrh3R4Tt7BxhIWBVrRtaNuhV3pQRk8Mmb5KwtAIJdvILpnJ47rYSHVZzBU7hEco5hiRsFshNlQczbCLKphKUMSKtBBsI01H0VNYlwtLeSFJJsgKusDYGbvS742xNetUPh0deKioHkjEhIcfhYX9/a8dhR/y7cfPs//uXCAn1pZcSClwPmcu52xFT95IqWLbFdg4pjNaY/QGDEfnBtAZuoN4Tt+Yv994OlZwzK4dD2zdrehxHiNADCdSVEN2VA9kbr5zSylMDC1WxA1A41FkPfu2/Bofnxwf1VnwHP8CTq/41EMBFwrIJNhPnx49sLgDDBWBtCTi+QIE0sdmeFrr3G47t/vNrq8OsxfR4Ofd7vMggeQcnyUFziWwxMjXpfA1r3yg/KUE3qOtgz0Ewog0jSzZUJ+BNR2AKa52Q317V1q1EfGaApcSkSim3IvqGZoD0W7sFrVx2RhK3Xa2252uEIsFbEfn4iyloH1Q5Y72jnZHBLsXsaoHMh9TRKJTWNaVlA1lTiV/jjkh4h9kDE8gnhdODxiY4ZK3aCjNkmBdhBQsyqAkd+xVMwoEn9bGWeioq0mcFCnic/1gMGwWco6UHJ07kSyFNT6KnMMyeuaka0TV4GqReKhapuJLkKOKRRMhrMSQSfFMimdb1NWScRVTWbXR6KM7JFyQLrQktGlOB0frY/la47jX5+aqYrJBg2v18FiQA3n4nEMQcggEd/hPI9JFCGMY0gP0WuktEmMiZptHxxyP9zzdrZVA6w9TOJmogRerwe+N1g31ibHxftv59mEGeaZSsBGnin92f0mwkxRiIPoGJk+jzbmITaXdw5fjyeAqRVcMWff5PDYQ8YJuqmSOdtBPlF+smCK5mC9TWYzInXPxe2+SSP1bFL/ezD3q6Qd+5mHPnj1cdr+10elA23fqtjM89b5vHR0W7lqb3YHyOO3sWdmz/cxt62x3k5huG7Qa6R1aS088DHuuzEDNeXgMdERUOqMvtJoQSbSqqHqDEyKhLMgQ5/G5jYR353PUe3xCRxClD0IdhHtDgjxtfMCIBIrdPwQQg7ytMEt+f0QvetTCb3WQQiJpIjYx9VCrqNwZtcK2w1ahNlvfPrnoeRQ8x3/g2J2FH7g+8+sPozZRptGnyNzoXRLs4/4QAsXjUUx1aJzHIONXhcFEiQyfMNTn6e8Ma2y1H+NqQ1+s6JlFyXDORu+eBj43Q/8MGuzn2Hi0EeMGRJvRBMFwhWKqQ4JzYPBiaqrBxrHO2hGeziVP5+QxRtJP7kis0Bv00Kl79fMwGwJ1JDIBliA+0eEZRQFCKpF1LbTWDXVdjQRtHMLka9z8HL6Ium2DoTEuTglCjlCiUJKwZAMiah906eQ4WDKsxRRdo6mpYu2d8mBAidekNg7PMVByskZ4zRZZMRXUnSPO6KgbfI1S6RCn9NztB8TXAYScMqO4HUEItG6hwxbcGo5zNBVwQ4fVH//J8dsDRwm2KW5mFa1bgB4s2LMNxt6gKmEJnEq0E32BVc1TJwfzPqlNuO1w2+zpylk4FZsV4zeznddoBnQZLufIy0uklMCX14XXL8mysc6no+KL6USIBYkJlQWz+sv0sYJkVBshrqQ8yLmaY6V280+o0HZYlpUl/YGcT6T8hVx+j0hyo7UKasTH+35jqxu5JH7/01dq79ypbDSLyhAI2pGhMJp1RL7QazCpdhcbYzUMFWl9kIJ1PDHmTyt8osBLjIyUOK8RFUtpbu3V2PIfH4Q10fZqqhpfFJeX1eSSdO5t5+16JcXA7W5mhL01InBKAU2RRYSsCl253is/S+V92ykSuF+ro3RPG64XYqFEQo5IjkQCiyaimsIuJyugj7DZFB6Fh3NucjRkaimJy3l1jxgOR+3R++FLknN0o8SZDWTcLyvuABGW9cSXL8nGW5ev5PVCSonz+WwqwWj8nkkO7e5ui0szj4H0px6KtoZe74y9sdfB+1apXblvd95vH2Zl3xpt35xfYbEOemweNqbIyc4r2Oy+NTN1vG3Cti+MMag10lrG7umJnHiR6tdVuwV1ak989DMxFrY9oq0RBXKOnE6JHLobHnYv/GdYr8W9uDSC1pXb1olVoU4d7AAAIABJREFUue8DuTYn3CZySnYO+sISvgKDmEwpgghBMkg+igAJru6KG6NWggprzZQqFsGyf7foldbQmxHs5XSy0elTDfKpV/TY0H9EMQxNnXJ2+1pzrW/AJBpH/3smZ/vcOS+OjnkMSynEgFEGkiJhEMPOVEzZ/TA5O9Vf4/HfdDC6KXv6sKig7nw9UyK5eWx307w+QyKfx0wQQyeFza9LQ4LFD4R8IubdCuoDNY2MntDhfCu1QGNFjSN0jLcGD4aoHNYN0ynd+F/tuOc/5/opdbfolrZZgZ6SEcVDMKLxejoRgrDvO7f73f17TPUoEgh5YT2vjKHkEknZ7tvzsnJeLF9Qa0U9zZyuSHXjVlVOQThFeMnwpUBa4adLoL0E9g5sldgqqo2fXm3tve9KfzMbFgOqxPO0QKMVs7Ek1lNmzZGfvlz45z984eWSySmwlOhNvTW1ojBaZ7/vdFWkQ2hKjJElJpaYDeQIkbUsbluSOa1WJO2tHkqxjnFFY4osi0VY9NHZ6t0Ko//k+O1ID34jt06rDVpARvLq0iBQmiI5UUI2ElVR8tlQlG0X3hfrwluDu3fSKViIqAiHoytMcyrx0VZkXSKlRNY1s67ZjeUKKWfvVoovbIlppayaUCchQ/YF2pAhixSwh2DmmqCZFF/I6UxKX8jpi/08vdEdKjW+SqX1SoyBy+lEGx1pwdLho5KkH7I/VfOWeWQNWezDXMzHRHm8A5ryvs/aKAUo5hyJ5oKG4OiVJ14HYW87oVZyDCzuc5SWjAp0BrU3tr3Solio7DHyUFIwcnMS83wAZW+dsZmL75/jDWmmgjqviSVHa8yymBIhBpfrOafLPLPJKbh6z5vASSL2TWwSl6NDFkZeNjRweukAHslg3VEKjyylMeSpQPFpu1je1romJETW04lyOhOj5ceZculhCvdc9BhNKDw8UD7xMNh/oHtD7zt9b+wfd7ba+dh2vn1czWOjD5o7U6uqQ8jqDr0W4Bk9Uw84iiMzCxVaT0zj0N7tHo0xMUb0TcZFA+qb5hj0Hhl9QUI0pGdYwGyKgdOaKckM8m73zQo35HgWnrktx8YaQRvoZovcsoAuFj/CSEQ5ISjFDdXMz6scRc/Bth6DPu6omjy/7EpqQO/obUN3s2HQbUNah21H+n/BSOSpqvphbDPHIXM0+3RbmTLLUFBLQ/cRbgiOSEaWZWFZVmIMrEthKdmKxtgIcRyo6Y/vwVAYQyg6s+gRZsJ1Q3Uw2rBstu4cxYqTYB8FkI3y7afbmNGI1D0MRjAeEdL88gTboAQICaSgLCjBEKU+9wlHoo7Cfc7YHmKGZ+PG6WNlx+cVPPMUjm4hyjNtq5RIjAVxH7I5YkSVvVokTIjmiWW0j8Qimam8swZEOS0Lp8Wyt/Zrd1m7gUjSldCFhFKCrfVLhDXBSHAuxveJbXAbnc5g0c55ge5+V89G1Yfhg4hLas09PedEKZHTKfP6svLlkpniD0VJ3bydBWxk1awJEg10Nb+p7hyeID4ATMkpCImeDb0MLZK8gdq725tMtXYKaBv0Uamt/qeX5DcVParQq9KrMqowmpix2TBzs9l5WFmntL3j+lgbcYhSEpzWQIpQm7G9wXxPVq9+n3llo1uujgQPNXWFSUp2wu2VSblYNx+dzzOVF5LtT7Uxl0hyN8nCuiy8XE6UHLmcV07rmVwySz6x5DM5X0j5TCknkMgYFd1nNzyQA6EQ4mTREx6cpKe5rT2TDwNCcTB2krmJgqRI6JEgwYqhTyRLSgis64KG+EPRE1w+2deF7XSyxdL5MNMwcH6sMdSqbw1HwTOGQdQpBCt8/KVq13UMpaFse+dDqm1KMVLiLDACEszM8RifiaLepaUYeLmc+LrtXC4nSore9Q72beMe8TRsdVsDjj9tj/BOWXxT10dYLhi8Ovw9zCyZEE0ynRLIVA+5RHtaJhxuxHjh6mZeIqZS+XQKCIC4u7d2mnYGw56bFIjNeUcaabVz3+phtDf19KPbuFZVXSU5Rycz5G/ml/la4ETFSZwNc7P0nDkd+vDgCEqIGQkRHYGYz4S4UhLkFVICWqPIRmwdaUpjIEOReGJQaCMRKGhYUR9Ni1v455JJiztg64ro2e6BZNcXv19Hs4dRwji6fbRibMNjlmSjQR8FqqNN/p1HIfxZx7xXHujb04hP5vjwychN7b47FIUiJv9fF0IIrOvKuq6EEFg8PmWaRYYYnEcyWYbz8KJfp2P9HAc9cY18MK/K4TU2mo1Be4NW7V4ZXejdVb0xs66rUwzMqA+8sO7dbRGskLLZc7RxxugQNpAdiO57N8/FNDC0rVl5FD32GaZBqSu/FCAcXJH5+T7lEFNFDkfxrYF4jP2CBFLMHlA8KHmhx0HKxeMpLN9RxZSGMYgppD0C6HA+fh4hdXP4HwFDfbpzEzEuaRQcSQr0oMQKoQ2nkAg5BVI0ZVfwERwxuBpb0GT3zHlJnEpkzWbiWrJ55lkGm90xhg6Zr9f1Xni7Z1ofNIk0b+yHj9iCiMUfBdsrA2ZuHAIkMe5QVEWrQm8c8SF2A5ropo+/fR2ejt9W9Ay4vXVTG91trk8PxGrjLVFbZIk26rp9t1ytkoS1CJKU1wtIzNSm6Ghc382k7+UU+P1P6eG7E+1kvL01rh+W55KzsJTIuiTO54WXl5VUEufzheV8IoRITCsSjTsQwgmRhEhGWRjDuDqn88U4QUtiPUV6ryzLwunlTEqZ19d/4uvLf6eUC7FcyMtX7GHZ+bg1um4glZA6UYd9vpSpPbC3Sh4RFbP+lmHQ+hj20M/5dVNjxreoGFAmpFNBUiD1Qt8HW6+H+dg/+kgx8rufXtEQGbmgEuhD2XtnKCzFIPHWu5HS1JR4S7HRXMe+9navhGBIz7QKj8ApR6QnzjFyCZGusI9OrYMhg1/anftHZc2JTGQN5ZG6nuwhv+3VjCODGU+GqKxL4n/+X3/g69cLl9PCy2WhJGH0yrdffqFdIykKS7JOyVyVg7vvPowxrRCy2z+E6Ubri4ZaZsxcGOiwdOEkgoTkSM/JXKldBeZPyCHBb63ZuZPgVv6fLN/yGftAuffKvW00FUKJFI1UlFJN6fDxvvHLL1e2rR5yfsCQntFsEQ3Co+GfSIvzLw4pmvGjfG86OkOTJFve2e1uURghJFKZRqGFy+UPlKWQk3BeAzkJvTeWbWcMK8rk405rg5QyVVd6i1DO5PjF8/xOrBdH3HJiWZJztJQYXD02Grgs/vr2F263X0CVGJQUrAuN+kEMV2vMTIBIp3PbNvaP6iMRQ/laELpD/Z95mMDhEQT6LMU+CPhPhTYKIaQjGuByOfPy8kKMkcvlzOVyJoRZsFs3bpv+JGUP29DUeG+mxBtMCbltmeoOytNQb6q6hvFWWqdulnpdG8b/6jA00XtCiby8/sRPv/8fppAV4wsK8P72je/f/kTvldY/aP0GMihjp+uGhGhj72hIoo4CugCeA+Y36yMo2VVGXkSNnhhhjtMsSkh9Df7MhiQEYTkV6l7Z606rzfc2O88hFJZ8oSyZFBdSKvQxzIbB6Q1j+EuUNSfakm0E5p48PvhFsWe3VUPZUk/o1ojVwIU0lCRKjnA+R+rXTNyE97qz904BzsX8YEbHFWBYsZHjoZIq2cajl0X46SSULHx9ybxeFl7OC8Gl8AAvcDSwM6pka51vu/DdbUX2oeh9J4rwskSW/OBizYI0SUbEmutwD8SZZyYY8bnZa+z/4PGWDqXeh81oq6A9whBkmMRcVB7K367UzUY7aRVCsX8rWbiESOvKt9KJbhNfsnBao200novUu7JvnfttJviKozzBipZi87xcjFQqMm+USWZ2lIeEkZgjEhI5FYL43DQZBJ9yoSwnYkyclhdO5YXsRU9azoAQrwnzsTEFg0x1zw9ITyTMhWPCOA6NDEc7umIbCSZR1AiTLCsihBrQDeNM/ZYL9BuOIMLptBrCk7JFUgxDeiy81VGD7vB134CB5UHav7Wh7H0Qh8l++4H0KDkEegiUEChBaEOoaryDAdy00bBRS61u2BgM8iYYobj1zrYbChCT8a5yCnx9PbOsC2XeAzGgo3O/3ZEmnJbEmsrhMjvHV8/OyAbjO7GRB1pxxCmg5kEVreNIKZCH+T1NhDEEU2wdAY7aD4h9blbgpPzPn27Ze8CQnjo6SkBiMjfzZO91Kilut8r9vh9RFfBU9DD7/L8eroqPhR55RvPcPRCAPkdhzYqXbWvmD6RmICkxksoLy+link8nu06xd2Iyo0yNO3v/ILQOEugaGSPQdWGEMyNkpLxQzl9MxZGj57CZm/AkdvZ9Y+wbtMrgyl4BHWRbsAjSiLEish0f2HigndY2C4sM0e0NYMizyPjzjgcxeRyFup3/ee8+uCoHCCTi6GM0JHs9kXPykMoXbyajG+TZiLe3xowOmhmEE+F5vgv0KA7mMzRR6IccXX1U0WqnNxtptQZKoA/31ZcTp9PvOJ1eCBJNfqxQd0V5M4S0Y8o+OhIVSWoNLXdgxbhEPL2/aQz1eJZns+hLLUa0bgdl4KHoUv+uT0LUMQ5P73Kgv/Z8+L9KJMVCjsVJ90bIPWZEOCfK36U5wQe0T6TvQTLXuf64Km4I0IYZ/7pQJDiSknNgWSPNXc6tCYCcAgXz7kmTQiCG/sQYySlxWrONpTOcV6OlnJbIkhNLiYdHkDjKFaPtLdftztutsNXOBrwPA5n7AK2dFAODcCCzHiEGYuPzECK9B+poP6KOE2ns40Ct/0/Hbyp6eleu33crRmq3yHoV0EBUSF1NsRMD2qzjZXh2U8PI+EFYs6XIntbIuhjUtxRDg1IWyhLJS6ZV84wQqbYYqXUT7chwGoShxw3k1Gdm9o5I9OInEcSgQhUzUURNPqjRZI/mYFsIMZOiVZWm6Oq03SINWr3T+m6pz3Vn33cLc7ze+Xi/0Vrnut247htEJS6DuJjLpDYl+gzaRicOC/szi9qf4mhQ78MyUj6pDZFg7tqEyEgZgknDqUYkDVbx+CJkcknUHI+nk2rvg702ogh1b97hmQdT8piGHAOLmHOoJyZZIYwQfASonigcBHpzbpUMavVZ8BBi2A0+78bBMXt1g0PFzatGHzQBzVO5NdGKRzt3iLNmcTP/t+BzZPd/gCN8dmC2DLEHl0o+DAqjGxIan0aOXLnn47lL/7zDPa5EjKuWkmWVDXf57ha8uu/Ns8vmTskP5yB4QesuEP7+nz6L/HqLmAgZj9cQJyVz5JyZzLkiPdB7tGepWtMUJaBRHC0wdLPtlda6ExOnJ4wQthvxIxFTpvdKb7tZ+/v9Zhv/RASUUe+MemP0yvX7f3B7+xOCsi6OAMbB+aRcTpmgSmyD2JX9LtQqdPM4JRX7melViAuPcPBPOX4cI/147zzUiY/UdBvxLcvC6XQyW4nTiWXJxwgWxnF9xiwIRj84MBPhBOfKyOTJ2O88RlzHqB5+GLthBXGMiZzkQNPM4DuT8gmkkPMKZHNgHsp2r5grMKS0IgS63lH3WTIbD48RkU4MpuCxfC+18VGyMeZfvT+swzTKhalMUc9ftETOY+/4tEMevMOUzCU8eaMkIugY1GqNRuuVve3mSxNNlg3C/V65XndrEPdmtBGM9BslWEaiK5JlOIopRhIW/DqNYXwgjwoKQQwt7XqsZ1kCJ0lIDwwZ/H5PlJMV3bXbiDDoIGgnqB7+2KZZMDBgDONo+YDDm07zDlpz4uW0kFLnW60wqo3HIyAzkmKYbx5KVaX6SFKCi40cRe+9P/G0bA3OKf1dk5HfVPS0vfPHf3kzclo1+HUJwovLky8lcnZ1zb4N7huMalEGawRNsL5Gzl8SA7h+KG/fTKL305fI19dALoHTxdjq+z54/6j88ou5g1qQpc3j91qptSChO8fAVye/FCoJQkHiQpBICJbpI1ohJWJIZkcfB2gnlzOn0ysxFtZycrM1odWNtt3po3O7/sK2vbHvd27XN96+v7FtO3/6+Y2ff36nts5H3bnWHcmwhsSSEiErMQ1iUh91mKmX8IQ2iBBaOLJw6l7Zr+NYoP7Rh4TA+no22Nhi6S3kbmu20OTOOWfj7bQ7900cCajuswH32pHREdRg7b260/JgWTIB4ZIjNVmRm0akTNm+r6dhwNgH9VbRLuTVku17G9AHu2es9drJKSExcior5xSta8HGbzRDBanKKT+I7xYU2pncRmuePJQ2pWMDn+THoBCxYiEVJbstdCGwuMP3spg5oYRwhIsOHaZe7DhRXRwRmyjf5xY9s3YOMZHWE1kiWhvtvtH64LZX3j5ubFvltu22iTAl6mrojZM+rTB/KGumFNr+7swPnWUWjmiqGxBa0Tq6jTZatYJ2hAryQY93RHZuHwn6RgzQkxWoOh4WOHtrbNtuPAhHPBTY7jeu7+9G8IzxcFYOzAJXHv5fOqC/o/3D0Mr6hrZ3Ygx8/Xrh5eXM6RT476+Zf/7vJxsBaCVq534HLoHwiyGPsQRCCpx+J5RXyMujKPxHH7b4T1PHmVI/+ZJWfOXsWXFl4eSk+vP5zMvL68F3LMVMOWOyomc4ojM5k4dUmIeUeG6QFu1gZNbZROoIB8plkvHumhMBrAlYloWcbLxYXd6fyonl9AdiPFHWrwgnei9cr3fevn/QWycnYV1/D3Ta2Hl//9lI2aGBb3jCRoobQTqxJFK28XEqplY7zh821pLNiNVgHMEpd6e7/YiYd9OnFz05kDRaAnlKlJzNwTxEWhu8v12JMXjRY+Z9KWVSKaDwp59/4d///c/01lmXlbWYIedaEmGxfL8UGj10ggyKwIJ5iwUC2rrREe4bNzGOXUyB02VlhMi6Vu7VRkgpnOmS+anB6++ErcHb2wf/+m8/c73eSTrHZOZ6lcU4m4EdbRujYki9N5whCyf3Lvv964pE4bZ3vm3fGO1qfF1ZrPABttYQrSjKNjq7m16OA9zwkaVa0bcsq51Lsea5pH+wI/MYhvTocNjSW8KS7UN2gXAqh0Kmt0FvSm8+XxWv+BaTSJ8WQ3rGUJYiLMVkeusaOZ8TMVlcgC3AHmvQO62HY5QyT4aBlE+QrM0mzLcHj6Ngpk4Hpnf39AXJycZeMRZicEIkYgZudTe5et3obad3R3q2jW3buV/vfLxfqW1wbZVrq4QshE2IzSv6bh2UgEvY1WFA70SUw5dmVrza+LSBs4iQihFLY/J5qQxwlGX4TT2GEqTTR6J3bHaKbYStK/uwZbDWxr5XRlcvJiMkG3MVgS7QxSBuVT1EsBPpGW0wgqM83TrLhvn3jKgkMauEXArLyV12dRjB0bkJozV6sEU7yPTf4RglHgD4LDIPeYLabG0iPQ4Dh6jGIxDcgNBkr3NEYOR6j52YEMhj6uDw0edjPL++riFFQk8wTP1kCcqdvVW2agjK/Fp4jKcQ3OKdY4Tl/+L35Vx1Hr9vTgfm6BadCI94xpy6pXxnRFAavQmt3qlBGEEIze4PnU67ap5OzTOxZpcKSm+Vuu9P+9Rj2HRs5jqHLwP6G4w30E7UG4EbKUVKGZYLVjKpZC6vyTpkhtnp3wLrzXLnJAihmIAovUAoePPyWVfRizx9cHnUr5dPfw/0IOfEsthmejqduFzOh3eUEeznT3Snlecx1SRn60SWZsE1i1x1rtavbuyJ8szr71dAJPiIQulxEIMygjldL+VEymdiXgFT+tUdrlezuni5ZM6nlRBMqqyu7BkDc4uP6vEEHdxPKEZrIlM0tOfZoLF3iM0/hyvPjPsUMVWX/T0cKtnPOayReCjohOFrh6lMdSh7rYQmtG7By1bky4F+XK8b3/7yndY6X16U9JpJKmjmGIkFiZ5ZJkZUxtY/Q8r1afRo1zoEp5HU4bmVpmiOeUFDoWggnhJ1CAHlTz8H7o6qB7o1GW4cbJpNVydrt2cfa/zmOC2EifRAjJ0Sg6m9h3Gw5lPcx6Cq8ZT23qgei3QkymNUkhnpYV5fgaQYifrveCh/m2R9wNimBM82Ny3OsxEhh+iJ55EtVreSH7RhzPOh4ifbuuDTKfD6Yovc5RRYixFHT4uZMYl042ykdECZ3UldHjBr/g/dxh6CQYcSEkESYUrX/UEFrLPxh0dETe0FJnUPGYI5wtZhao7r/cr7xxutV97vv3Dbb9S2owJlWRgEYrzZpjlsxKFukS5EJ0DaRt9twG3S0GgXOgS1VUzVHCw7hg2OYH9+YhdydPtT9YGAm0H11qkKQwZ9BFKwGArVxMgLqoOMkUFRK2xDsNFXiJODFSgxsAQzdFxzhLDQVXnfKrfWyGKeKL11cwdtMJo9yN1IUqYR6Tbe0N7p+2aGcjNRWKDEwGldWCLmPeLhlz/MaIKNpxB5/CnimwteDHmsCb6xBS+WngqAw+5f5+jq6U8vHh6duWG9+onX8fnovXO73rneN273je9v79S98vF+Z7/eqHtjtH3eaHbN/bMf52ke8vyH+iTraSzGU+HEY+xlfDD/+oP/IR5VIGhvaKtoTEZyjNMU0qIt1L9Wh2mej2dW3XQv6A/vD3Buls5hG6hRb0sJ5HRCcMNGh/ZHH1yvGyKD2+3EdjdFS1oCKQcKidevxcISQySkFYmZy2VhKdFG5J90/O3RkRwqwVxMmRVjdGXW4m7gjuiMKfl3ivrMKIRDqT9/zzQkfi56zBgy+PdMg9eBEYCTI2mR6W8zzVURMw6NIqRuHkzCw0g2egNrSsDGtt8tqbtWVBeidEJQ7lun94COZIrQ7iTrPlFFnN+XnFwbnhoYO2LA40fszlR1hrojkXjh94NU+JMOW1vHgdzVpnDTw0Jj3+26ttbYa2WouodcQ4Hrx51aB60N9t0MCntTUjSrB3vMguXFqa3J2fk5Rz5dMOSzd/Um0BukI7B7WqlMT6hObwNjNTSWHDifjDt3WaNbjQivl2g83VNhLfbvtibIw/BVrVYIqNcJsMTAmgIBtSic3fbbIY3Kkx8PhvBOFDKINyGu5h2etTdUbaQd/sGOzNqhfjOia3PuxnqO5DWxSmBNifO6UnLkGo3kutXG2gJ9BOKwm3FdAjEJv/8pMqr5D/z0JfL1IqQcuLwUzl9O3LfB+XJnOdmcEzHZX+02b98rEDwQsSpRQUoihcUyt+JCiIttTtNBegxGq4y+E1Mmp9WIlflEyAvEbGkv9QNF+OX7z/zHf/wrtW5sY2MbN7pa/tj58krKjZJvyAj2O1o0WWwIBCnktNp77Erbt4PXFBAnZtuSogNoAg2kCdIi2sKnwQRzK4piTtfRWWtLyqBmlHV3mF010VI2yWmIpFRQlOQvI89NNd8gpm7QsyoncxAkEHk9nbisC7V3/vX7Gz9fP6xSb52qG9oDbc3EYFC55MCIAUnKcCfYPjo7jRYDJUWWUyHFyHnJ/O7rmVM2z4icozmCy6PLlRAsm8V5OzIfkCEgFnwoUW1DG1hqe1KCe5+ot7ZTSjyGuBPodCt9bMbBCz3UJPCfPN06Nqxt2/nLX77x/e2Dj+uNv/zlF7Z9Z98aHx87vRlxnNGPeEJFH0HWx/t09IpH0SMcgBiAL2rzDTx93/zjuehRNV8dcV+q7WYjEgkmSw7z3djPHL2ZU6gTeXsbrqqL9vXMgnamNiszDVzHo7j5csn89PWMyGDfAnW3NWDfGh/XN7Z74dsvF75/F9bVVC3LRSiqlJOlydvoZnJSLlwu5qvy6+iHf+QxSfDztM7OPIRguXdfvvgIy8ZbIVqA7tDmKJt76KA+PrefFA7kxtABOVyLny6/WtiyKbkyOsdrYbEmDVO8zi3JcrQsLjOl4FdkcO5Cz0peCqdz8bibYHE9o/P2/s4ff/4P7veN18uJ/f5CjMK+bbTdUA6RRqCZIV6251KCufWu6+rO9cYd+uFqBJAQKT4uNYdob5ir2aAcn/gzH05fCyzcttNaZYzB1flU4mg/mBhk37uPtwopL4Dw9v3K9WNnDCWyI3q3QkYyOQ63aYjktDqVZHCOypICSymG6Iii2thrR/B0di94ckksSzexAKbQG32w3yv3OmDsvF4Sazm52Wshp8Bljfz0asHTv//pwutlYSnJvNid1BPcAHTgNJdg1IqXHPi6Jrba+b41rvfd1tRenXAOMZshrjwjz/79yeeZbRhxPrhtRf47mpHf7tOzu5zSYTKK8yBEzJslGiNfghyhk1318BuUMF10hWURzmdDSE6L2WKnLCzFnHSHDrKbalknqC7f42+8nHxLcJgvPojMcwGZnhdjmm2ZAVL0+AqCEXtVlD6q8Vb2Kx/Xb+z1TqNTpU0wnZQzQ1398qTUMvv14O8l2kOpzSF6oBtx07rfYIF3T94+qoYOWJrtJyMEYotItOG9ZZco6IjUaO9lEnbRSIzmmQNmDB/tiwnRVC6iVgiEoGgwhCgHG5W95sxP64m9Nb6nG29ihYGoMlo3cKu7l0o0sAvUFDPOzleEUYehJ2Ju27NwW0pmXbKreMKPSM/xWa3g+aHlnV9n7c8DNRDrduXoBqdXi9/7Mv8+4f6JhvAgBYv5WH120TPHFRYVsXO73bldb3x8fLBtG60O2mbGcTYm940M/Fl4oKHHh/gB9HlswHbabOT867tT57c/VUMyz9p8DkO3dPVmXhudaWDJUaTqaP7qNqZzxYs64mvXyzfsA4HyFHAMMjUr+4XLeUFkED2Govfu6rVKjMK2DZP4JiuiYrbPn/LEriJimhZiNMfZzyx45rU8NmVHDicykJJJ00spplx1mfqDjzMYrlaaF3F6ilnd/vBTm+P959cUhNg9YMw5U7pZMWQV8uRPTnTO0c1gIwd7n8YXy442xBitQWhToWUOxLfbjRSEqysxexuM/sQjmuOSeTrUDAbniFllPCGLHMhlFEOKdYiN5oMgzjf7ocj/RLTn4MnxQO9679S6HQR/cbjNih6T0cc4yNk+yHavBjIotNape0MTrpIzJCHK3PfUzn2cXlvRx/yP9cHW83is/cGjHFTn6WtTAAAgAElEQVTFqRf2/Bilo4F2Sg7EkFiKqbdyipxOkfOaKDlwWrIpKJPZkzRHex5xKXZ5rE4wddgSjdoi2ui1MnRQW6V3k6MXIDGNRR/L9DSZBFe6qToia4Xcf3b89hgKv+nmTZNCYC2Z0xpZnDjqxbeNScagD3GHWoOhcHvwkoXLxTbZ8zmwrtaJlxJJWUjdkJBgIo9j3twnV8AhSpuZPhQ70U3lcojEkOhjp9Y7o1UYd78IJj+OuRBzRkOkYgtsHZVt3Olj8L6/s7Gxs7Nr5d53l9kF2gjmT9OrfT7npvRqIagyZh/6TLQE+mBUfPM15j1DvEgzVZHGwPhECB07dY+qP4jPhU0J0scgOPwYx4MfECVQghHTJvBtc87JRTIiYu1CGEKP4eAHnYPwKsIWhEsMnGJw191pkieMNtBmGU5Nu/FoxmCkiJqhDikYirPkyOW0sJbM5bRwWorlviRT6D1k1fLoFI5Cx//f3ECfxjQgztkw0rmI10jeZYQwIy+eS1J1ddk4nLVVcev98fcoKf9Bl3QW9zOozz+PPJkHTnROjbOUXPliLrozM+zZlO5R/+h0pvQRR5A5Ppj//kDC/lp5eJSMB+8GjBMAP5b3emzCVnjGaD87xXCgLGb2qV6fPVQfo5t9gjKjVZoV4WrohhGxd++8O9vWuV4tCrs222TtvZo1xQ9XWZ/f2+cdNsrj2JxKMWQjpQeHZxoLmrrJxkZj2lzo9NKZ9f28QPJAIHnc78xGzUe9Y9hmrRoRcbhEEyLmxCwhmX9XUCTO4sqeZeBY562Aq/R283N3MkQiRr68nvmnf/oD920jihU2rQ3q1tjvhiStixKISAoEMjkuFqsRCzkuNi4TZfjdNMbk7nCMVWahN6/dI/7icX9/5hF8NJlzAufTmGu2FTzTWDF1yAVHuIyeoQijYS9Vc2+OVsjoGNR9p4fAungGpUCQBmqux3I8ZdbkhKc1ceYF2tir0TXQqpq9SO30tqO9EyXwci6ImH/by2VxpCfx06s1AedTNm6VjMcagRUoMSarCcbweynwshZ+/3rmtjdutfPtuoGq02Os0Ek5+YjSiht9vlLyY2NmiuL+d9Wvv63oUaALMizZVYdSYuT1vPDlJbGuhRwsGBJcftYHrVlOXwxmdQ8dCYHz2ULvBDgvkdNi1ftyTpTVUIBchFgAU7jRm7nj9vGYeYcDZbJCJzvatCSDVLe283F/Y7t/EEMjR+taQiqk05lYFvY+uPdO643rvvF2v1J753r/xhvvNKlc28bbfvMCLrr0dvC+3ahuzNf2Tr83hAgN0rC4CaOwubV+7YxmskTFir6gnvycIz2blP4wO/iEYw4Gohg6l93NOpdii50M7jUxghJJ5J7RMYh5JZWTbfxiPTCq5LKSsoXiDbnSxpUmO5KNVJkVfgrCfwvCpoHvKXDNgTYGH1XZejcq0660aAVHn0YNRWhJyRJIIVKi8cZeToU/fL1wPq1cVrNBX7JJJkN8EAj/Ct15ellNYFXNAaNiSiL5/3l7sy5HkiNL8xNVNTMA7h5LZpLs6nPmbf7/v5qe6apuMiPcHTAzVRWZBxE1ILKqSGY1nZYHGRG+ALboInLlyr25+6+Yc5ScOHmvgcvj3helUy8HRrJkhP+n8iC18mGHL+5h6tjdC+ku4+B6R2ZGTh2NDWmajeXkm+L7ezhgRxfW4ZL9Q1ATiKYIJhkbbReSGLiPHXNzaIjcMTKso6ZhNpyO9xoExDtHSgJhcPPPof84REWTJJ9vrbt+WHNxTNftcB5QNnHPnpqPoCcnF6lTgxpCce/vlV+/VVovbOtE7zPDt0kIg8tAwsZa+5FBz7AEySW7g3Ryf6aXTy/MIRo6L9MhldDaBgyLAg/8HG0lEpUYqIGiHXuFZV+nEMxCtd6iC0+HhlWJ/EAgTzgykUipxnsqyXy9QDvat+gKc8QtJUN1ZVu/I7JRFjhdPrs6d/nC6elMbZ1f//KdP//bn9lqY71Wrt837w57KZQ0IZZJdgql/JnT/MwyPyHi6sQ9gv3RdHJ0/A2EcATx5sHcaD4YBskfdYhAzpkyTSyL87BUO3MrHvSIkYaCtERjjUjIhPg6NOWJubi7fIpKhiD01ri+v5NzYi7PTJeTBz0anUPqSs1jZqYj0BkNGOEXGPYzrRvbCnsz9qa0zcvh5/OJr1++sERS+fy0UCLo+fq8MBUJh3UliZf9Rwtlkol5ciRSe8N6Y0qJX14uNArXrfK27vzbX74jwHK6kJczIkNyxP0M97qF7+UYy2P+jTKw0rSGeOZfP36/99YPqKtvmlNxRdSSfSN0yDxKWqEX9OijM94glyDnIU44nFLAoq6PkjLEGEDSsKi/bygHofRhH0vim/nwWMkSFeZWaXWFbEwpunQku3hbLmBuJlpN2fvObV/Ze2VrG80aTfqB9IygR3v2LhMNk0TlYMofbvMExEkkxyMTDr0bv6jItyQmpCQ+2mWd47zuUf/R8ZHS0a0kXQ6is+FdWUOR0zsFfOGYo6tGpFHK7iXJ7DoSo5tgRlj8g5nFFT/FJFzYx2bl984Evzei9MhqRq97EsLba5S0fDLOpTCVHIPzXhZ4FNIDolQqP9yJH0pdDDj1RwG+++vePnzf1B9RDiLTlONrHyU98HiMstuhPP1ATh1oD8RmmDwoKRnm4vcr54ajG1HWsHRHNh4QgsPoUsbQfUDMeJyb99sbNwgeNt07giSRcf72iu6btWfHxPoQSA9R6g70qMdma8EhEh12IH3cIHx10AMJUNWjrLDv5p2D+kCCl0eNmntm+c86RhnH552XtB51XmAE2iPY9efnOksDhXtENcaAiO+Ne2x2F/R7eOb++6PDKbDdQKe9I9azeQmirAe39z/92Xe0VxAhW3NftyKcTjOW3JZgvW3+G30Qdj2I6q1g3XlfSRy5z9klR3IaPmp+3gM5OYJtiy4o7kjPD88vhu1HaaHdP0QYWjJmOTqDze8rgSZDqE6743pr/mRUA60a6tKGk7njvFuPNnws1mQLrTf74XUEdo/r4X2zxjmKAyW1uyitujbbsmQuZ19nz2cvb11O/ve5JMSaB2/2UPKOJCENjqalYy4tc+H55GN0zin0vr3s5kLDUTHCr+c/9aF8+LBxDX/r+N1Bjy+m3AePeSuhdu82Em2eCeCaASndnW5jXzse4A8tmFGrluDveOdXp7bGVndaVeraaZvSFH79vpOmxLka89MNK5lp6qjMLL2TZOV2a+S0sK6vfP/+v9jWN87nhcv5iXmekJxZW0OA1+3GX97f2Frluq98v71TtdH6yq473TpXGpt0uigNJ9V2M1ox5JxJEfGepkRenBtg2rFOkMM6iKv7Ss4UyUwyUWQCc2Ss9YZ2oWmmH6qF//jjgA8P77LpEDk7AsaUKcXLTe5J5ZNJtTkUnhIpZ4+z9f6yaK2T1h3SDK33va68r52qiunGlJzMl8LQ0BR67bQU150cDtXssvGpZOZ54unpdBjcPZ1mnpaJZXan9JyGXP5AGcZmHQEAd5+aOxb6mO05IuTSNdGFphEkRCB1kJaPRxMbfFDFRmY5xvxwC/+nHMd8bPTuhH3tNdpV69E+jlYvGxVBexCye0X76mVaTZiGL9IoDZirhPcWgmFyHy8yoIVAliCFxszmXVgOE+NIQY+yYUgLSCOTDpIzQJLuMD0e+CbxbsuShbm4LEZLRg7vHu2NnUqPREJVaQbXq/Itb07AtR0vazmc747Vmdbg/V0R6by9dl5fOyUrpxNMUxCrNRbV0en3wYcHeMK0jI7Y0bXmbb6tKffOmMDRzEVPsdHWrkegbvehHv0wI0gZyQTHD5m32jq/KkrK/u2Ck6PBeER9Rq+NYj1QHwmUVr0TSHXDUOr+zvr+KynfqJqo3dV6ta+IVUQa85R4upwRlE8vZz5/ujDPhefLC+flQpkmpjyRY5zJ8KZK6h0/41k1i7H6sF1BzAGOe/aRz3PcVtWQYGiheB7I8vC7EoEcFiGICxI6WVwDZQYSEfjloI+4llOKBMa0Br9tID3q3LmesTxKbJ5UT5O/zzwryzJzOjVKGGXPHU5dmZZG7cblcuL5PLEsmWnyPS5LJEyTN5RY72gdtiRy8LA0fOs4eFiFlNzuYlfvAvvp5cIvX15oincGl2gCedDmmcIwOkUzinN3gkYQQXa7t+X91eN3Epm9lTm6449E11pHm2DZvPxlQrJKkU6WMTmdxKbqRqWaoEzDsyfUIZcZSZmehKqdvTdu+877ulJ3ZX0z9psxr0pabqy9c75UWGZ2UeZpYuudZVnAMvQJNFP3d67f/5W6X/n69Sf+8PMXzpcXtgRv+0bdN/7y9o3/5y//xm1fWdvO236jW4diUDomxo2da2p03BG6ho7Etih8yqSeWGqmVCNNQlkkAgSj75VujZQTy9PCPHmX0pIWpjS5T1Gr3r5ZE6oFtYT9HQ/xv3JIEpbT4q7LpxPTNPGIBuTQABHx55tTEJxN3dAz6iUjPlHT4DW5SB97h9qQ1knaMVVut51f97E8dk7ZM7MsHaxj3ahbO4i2KXVIypxxxv48s5wXvnx+5vl55uVy5uvLmcvJ69zLVA449GjBheNPLLqUIgu9139HW3pkx5KwIGOnZEcL/sGNkHtpdbydrzGC9R/BctNR5v1gN+e4PlOvxfe20etG21fa7urh27q6d41YzEvnjPSwb9B2o+3vzoexjNkwg4yF24x13di2LeJJD3IcLo/MXxI5z6Tk6qja2tFIcOjpSPdX6qSkvoBGqVFl0J47zdxN2UXywvutZC6L6zS1Bq2YcwabcruumHZat8Pu5Pv3xnbtkVHvCNXVyKeF02kil8K+w7dfG63Cn/93Y54qy2L8/LMxzxJBn0azxCNX6mMOEUe4y5Q8k55HN6IHZqr1aD3msJGUo6RFghTdbE42j6TC7og7QNeMahCZ75DgMS88GXUbEyG5uF+gZeCBrCR1VfSUXFk9j1KgBOlcMSq93zDd6N2ou4FMqEz0tPi+UF8R28hUypK5TC9kEX76+sIvP78wTROfPj3z8vQcqs9nspUItickFboqDaGaQO/03Z0DuA9VeFhRBwL7oUGsjTXA2HfXfCvhFec6S87H8e68iXmZkSS8fn9n30NaohCkVlimiXmaAWHfN/Ztc3SHhunuFYS+g7rYq7aCVh8jJfuYT5IoQTvoCE/PFy83q7B077Tu5t6K3Yx5mjhfTmE+7fQSEWUpTkuZp0Jdd7atob2hPaE9RdDT3V0ACeNqX1NexCiTstaZ//7LZ1b1bu/XJrw1D+ZrdZsiEWGZFw90xDm+kuRAdBUPAGutd1T3rxy/m9PjYnVRrhFhwMSupeFEOEEQsyNTfqzSDLTn6FYaaE9y7RSJbHHIoveAp71VHfaK8032zm1rkBPr3lj3imJMu3vomApWd6wLbb+xbTd6XenNmeG5uI9W653dOmvd3UJiW1n7zrWtdAsRpuATNJQuRhejBdqjGJYMmb0UlMWQHK2h+WHUj7IN5vwdUrxcNVOJSTgWJ5QHoPAfftx9eh4EGx+e0SC9jdc9ZfTAZzyjR52a4/VY13yIPHqITXUMC4PKpA/dPfg46i3IfabIwQ+4l+C8xXLyktaUw2k93Ul6eOA9ljcvgcrjxR1fP+7Hj3fnGN+PZTF5+MFj6bzvFXGdcv8rMCw7/hnlrbFxDRNIU+/g0fiztfC1OgI4QrzNuQOuhdOPdml/rzvK48aSO61ucY1RChQhazlI3hLnMgjNDuseJ+m/ebhiExuzo3JHYMS9VOEGJhG4oGEoakFBic8fmzue7Y1uulYrtBrIUkWo3qpbpoPXoB1q9ZLKtivbpnFv/Ex+vMWPCN/HHWNNzNnbwHOWuGd+P3VYiQSn6mjhP8qxrmE1AnzsN47oRALaiaBpdLRG2dt8YxnInQ3j0Qfle5Eca7iXJ/TBl+1H6oWjPWa+IauuGM27+DIoCdMWCKCjeTlP5CTOI1lOzFNhnmam4oj0YZRrXlcQydhIxvXOTQppqJi797kvD4jtR6ulj3vuCKPeeTVhITLW4aGkLUmCM3iXG0jJTz9ncYNjcxHXwdtChov8I+ScuDc1yMNn3l/jc0sp7hmZM2oe9GRzr75SQmcpp2NcjXPyxiG3azkWw4cGikf9J7gLwk7Z/LkjnOeJp9NM6cq6KlmVrj/OPD9X52vaQN2P0qz/zJBP+FvH7y5vHRub+MRpXdm2zpRhSoJQyOLKypdLQnLifMrMJ++qAWFbPfM/yUyJdssynZnPTyBC7cbajLVBNaFJogtUjF0ME2ElMeHaMP/6euNKo5TC5XVz0lcHXQ2tMOXO07wxpVgE4m7utfLr9TvXVvm2vnFrG5tVNq2sWummHqBohgQqiVzmH8oj2nGX1+TEMQ+ZfUM/PSWWxTvXttaxHUSF3DOTTSRL3pY+xE+yT2KZhLxkxMp9s/6AwzUQhvv4Q9Bi4Zs2kJEDLvHnLoENe/eLl7y8LBemo73Tm5fzOkZP0ZkRJQmLaL0kV2lO4po+no32WLwNskLymm5OxcW45oXL+eIlrnlmKlEvJkqvInQNw03T0ENKHoT/AMHcM777Pb5zSO7f9JdPOlxXZgRN430iIDginfF7x237+IV1XIf70+20urJt71zfv7FtG703tNfQu3pINMQDG9+b1O8T+IYm0w/XMDrAUhrBUnA3TLBkJPMuRF/zWmxy/fhM3ygFtOCmvQ0l0dS8hGLBBjGj9kptV8/crCHsgKF9w/rmCEQorvtzaEzZix1mjhL5w2iYhHM8+tBxF0w7g33f6XrFLPPtm7HMjaenxNev3jQ7lNyFTE5ubPzRz3GOzpUUJWUPdJpvZEHQJQK7kYRkC5FHiftPi+cb8wqik9aDxdYzPbq2NH5GRCipUFKgvMPDUACZHyL/Gmd754EJmSTFg2oTV1Fm6MHg5UttdL1hlmlto9rN29h75bQoywRz+CDmlHh5XqIzKSNitLqTUqIpSHUzWskVSRNdXXDytm603rneNtZ9c5mUSUgluDUlH3N45GUfdagq7+8r+7azhQN9koycSvj3jSR//IavrTm5bEtOQktGi808FzkczHNIvwhO9vc5R6Dn7qwuI0A++g3SfZ3o/qHLMvOk4t6Lu1GbAxtFXDx0JJruMxjmo8l5Ob5u3EnT4iPGDZ6jo3DbKzkpaZ6YUtx7cx+vCeP5NPHzpwtr7Wx647pXMKWqy7x412mOIPye0JgpGkml4dwh+zv2y99X3opMXIWAO91b53r1GuJcnJRaMpwW4eVTZt7hNBdOp4mSvG5/uzbqLuSycL4sSCpMpxfOz58xgde3K9d15X03VhWaZGoS9qSs4hDmu99ars14+/Mr+VWjnXpyx/Oq1LeGbspPnyb+7//riS+f5njygBjrvvKvf/5ffF+vXLXypjeqdW6686473ZSsmaK+ROaUKcnfI8WeYGakuZNfPJPJIaudgDl5INhq5/besJvAJJRWWGz2hUGza1H4SAcREpnyNLk3VP6YoEdEgkPlbeOt3+X+vSOkHbXvA80zuwcnIk54C6XcWmsYwTVac30HbY1mRg0WfiuJ6jw9JAtzEjR1cqouRW7gPlkGCax4Upk6lFSYpoXTcubl+YUvn55YSmKeMiW7js/h7q0uI6DmXXNTzj4ZLbKDcVnBs5H0yEeJARKlLid6ewdGnkaGGxkMHMiWPqBe8APAFZyefw48oKbU/ca2vnN7/8brt3/jdrt6CTkPAuPQ9EgIUfv3Oua4eu9uzBOj9DGc40e2J6KYuXmhIz3dN0cEenWk9YegJyEyAdmF5nTHbKJrYk+FFj+vYStS641t+0bXHdPqwY4ptzzx/npCUmY5PXG+vPhip5V5MjQ7v6daSFMEb8wvzI77NPRTzIzb6qWXfc9cLhXVhS+fJ/70py94m3amiPtQpLIcarC/RYH+UUdKidNpce+kCOhd+FQPTaKBOGmUJTCgOOlAEl7e0N0TM/NnYOAu1d0J6nsv3qKPuM2MbiTJnJczTE5cNjKkyRElh38gelFdi6z6uImg0pXtEypeCjPpBN8aUwtnd+c51mpcN8/qU/ZW6CSJy2nm6fTsitPzifNpiuBPXeAV6FQ0Os9IC8hEV+M99Jda77zd3ln3jZSF06VQZjcLXnKKBgx88//AqKd35fu3N2ptbKtfe0kTKRVKmckjER/Bl0ctlJI4nWbX9En9sGOYihvqAmHsGmtbMiceRzdYTt4t7STy5mrWWNhucHAMDTifz0zzmb029M1ROMmZPE1BrpajeWCeMueTo3BTwtcBheHwLp6/UMRL1dqV27Yfdh9Tmf1ZGhQUEny9LMhUuO2V933n19fd0aLefKzkjDGTIjHytd6lCTRqIiCkMvF3xDz/NaRnJMIiHG7braVQufTDeTqC4rLuuYRUuDga4OQ6zyKSFFKaSGUK+qlnf8Mp3oJEq+KZjCX/exMvo/XWg0AtTCiZ5CaWrzu6dZZyovUz92XKz1O1s9eddd/YaXTRQ/p6vHDLtoim73YN/h3fqNMUe6YZ2YyszsZ37pmQemQ+HUjibHTzl5rcBzwMNBnJQhpt0R91HGWeOxqhR6nyr8P4EXMfv3uPvh/+bfbQnOY+S94FHTB8+m35KEpjGkTZCDYdnHvsMgtNinx3mx7y6eMaNFRARxbw127jf/b9H9bCKPMNnY8ffvnhh8d9OQIru9/ff85xz+xVG727V5xwVzI+SifJIeoUMHqSIag4SnsjEPTRPsQIR6WTA0oXhlbTCJIYCEWQl/3+5ChoednKzDdx1+AIKD6CaBfXqxHwVHdfj4DY3z6Tc6H3MwMJGdWYe1B2ROzxgP1JH2BFZNhDaiBXZd8r25bY96Ha628qR0nno4UJ/RhdlPfTHkkBxzUdpYOB6Jtw2OGII2SjhT+IcljP7nVngjYXqzQT3N83HyjevYoXCxIwSlqHzpmE/13cI5MgFY/7FIQ/UR9nvmeMMeEbpfaOKuGSPTyhEvOSopMnkccc19C7GoC6xiocNZ6uRquN1txrbkgaJBN6TySVB6HQY6Z+KNIzuCmteQOAanCq5KEDNNbOcT4+x7y0CQnNRk89SlSA2FGpO6gmMRctxsfRmMW4wHia44tGCPvJIfKoZqFxN4Qwc3D15OFrw9oikaMEbbHIjf31WKu5xwcqobHnj+xYNxMuLHuKezUlL1UPuvx47+P4zdTzcRr0BZG/a27+/pb1OJnR8q0mrJsPpOuqXFdnupDh+euZbuZZei6RbSqpKCkn5ssTT59/ZpoL0+UC04yacmvKr9eN91tlQ2D2iHgRJZ2VNCWmz4n0POqZAuK1+dutY7Wju9KuHd06z5fRuuiTw0K4y472Rg2RNidi1u7EXTPIZWiyJOacOZXik3xvVG3eSaGNPRCSKRYjN250SFDFQBM05+7sa+VWbpAF7QWb3OxzzzstdYoI8xT6Gh+4wGoEFy01kqaQ+w+Niz46VSyIn16G5EHp+I5mGCUCETHnS+WSMc20JNxwmJXWqL35xhRdIbU5+TkazQNJiuqWuaS9KNSts10r26VRt07dXTzRioeWBvSQDhjIivuxjDkv9yjkh/rVvdwxdDtUuy+q3Ul4GoukBEtUUj4sLMy6r8BjAQs04Qj4VMM885/TvZWzC4W9PM9gZ9r+wrZNlJKc6J0S0yTMiy9u59PE09lbf//8604qO7Uq1xtcb9WD+uQgpEZ2mCJCUr0jYceCJ/Hzg29D8kDX0tEB5d1l3lnmPMAgwmpD+7Ce2KLzE5KUMDG+b7YSqNa+XV1OYazrZpQkLNMUgXiLO2OxcLvStvO/xlhwlFMVWm3su/sh1dZpzRVsyd66+7iBfNThd83/Q6OcGnIN41oGf0PMyEOVto+AzxBtmFZPJEyjo8mvrzYvcd12Ya0eyHcrKN49dFku3tGZs7t9l4mDdxUZu+nCoQHFzgiODkFE8XvtgZDP/2FnkZKXQPOSKYvRDbddKI6quYSCa67U3jGrkfwMXpHQNUpzpGNq+wY7AogRuPsYHRZEoqE8PIK5wdL/oKN35fX1NrZNMKE3Y1srqM8TLZF8pHSoMN95ryMpDjQbDf6Tf6+UiZF8DC6kJAukR1xOJvwrkbAVwlvSVSPZiPJRSsI0lyiRZqQMnp6QS3Dncg6boIR1Zas+X+kaUgKDqeqxwXVTbuvuYyWaI3JKzElZst/3bJ3ZGp3G0wSfzoWtKVWhGkiI0R7J8dH+Psj58fWR2/yN43cSmSNrSDlIx4VuxnWFWmGZldfrTtdEOk98/nKK8sz94c14i3LOmdPLZz799Ed3Or4kWBLaKm9N+V+vV65rZzXgdCKZcX42TiipCOVJyKdxYrHprcr1tbFfO7YZ/b2hN+XTk6tBD/NRJ5c296mRBtJJRZhPhZ5gj24llwP3oCdL4pQLz9OMxOJhocRcW2MdgliBMhTxDTmLx8OiCauJrrC+bZ7FZqHXjE4JK0Y/Kzo7Pphm7974KANg13joUfIhCJ39CHpcb+he/pmmCSvljg5x7DGYQZla6Cs0yjSRZ6+x71l4i0x8q405jFxzEI9797bSHHGIRBYrQfZOCNKNequsbxvreWG9NbZbJ50EPbmYk2E08xJXU6V2Dz5ztzsCgQRo8UARl4fvMUpRja5Exqi0EMrzmnEEPTkfUOthjRCdM2aOMnUzP5cQzvtnHFNJvDwtaDuxTJ2lfKHWjWkqnELQrhSYZ0d7LpeJ5+cFEeHpXzdyXlm3zv/813f+/OsbvRvz5C2uQygkZXF5imNTGbfP76vL4Pv9dOVYbyuv6h1+1pvzjnKOEpMiKXkg1G6+qEfnJwVECpK81HbXPPJOtW19i88p5DwhuFBpWlzMranQjm7A8bpLZSjGIZio7ni9rsK6JeruGXoKGF/yaPH+YNROoEiMfnXOn426wbGlB8nbOslcOTp3/zeY14Szd51qH3ISUHfYdhxhwcMAACAASURBVEdFrjflffONT5Nr5szzwpeXLx70FC8pT2WBo7GiYdYCSQq0h6EVk3EUR2JNxTklqSM0352mxIKLUjaES2hBue5PPOOuWF8xE/bmrc0gXhGQEkFPofeCjYAGJ+t2G8GX38hB8L63+Q8Iy/WaLDhoH3X0rvz6lzdKKZzCCLk1Zb3ttNqZitBnR3UkJZZugXjj9xcYavcetOqBqkyTC/8BaN1ozcuZzK6fNqXEHJ/rZrTQYp+qtdJ794Bicu5tyk4czwWvroR8R0pEpca7JnMplCzUtrOtOxqCg6coXZkkVHz/vG4r77c1bEc8yC4p8fmUmE7O0ypamdWR3JcZvj5NrE3ZFDb1LMrvz/2ZJkvRK+MBvAx0+u8Ie/4LSM+Ph9eDYzp0cyn7rswCecqkKf0AwXoXo/hNLsMGori4lRiK0NQVIWv3jJtQz0zZ3GMjQypAHotPXGzyqenigRH1R7vg0UhkHKUX9575scySkv+bFOd5wIeEAvH4vHspRoPvAKP90bMN/50QjIqJTgQUvXWMRO+CZoIXEp0+mG/MH7y23ktQisub3MtbozzDQ1A0eCzEPbnTVIy7XoscworuizP6afAASzVgTyWLi8sZR9HhjhjY+NOD5bFo9YCJ/e+P3WOP1/Xwevj3kaUHvPyf3ZMBQY8g9p7cD2RjnOVIMeSO8HBHeh6FCf8pnB5GWdmzO7OJ3hZa84zvdJrJSSjF5e5TgstlDm8q4Xw2lsUTgpTSIVamOcbtb1DmQ7iRhwBIJDRiZDxYLz3esXZ/ja4uYLSAj7KcWcdJx3ZfzJIjemrqqIHZ8bN+Pin0SgbaFDoeJFJ8+ABOHxfH+yiIQGo8twee1uNr/OQ/45CHMSzcPYyOc+XesebNFR3Rzuh8tFD8dXuUQB67q9oP0+gW65ATRbx09UOXjzyOdyc9S/ApvVypjgqQA32K8hax5slAn8RlILiXTUtgWnfVcF/nO1Fii81/ILUSGlw+le+l67uVTah4x9ke0GPcsmPtH3fRfvzzQ46xnujYETyA6d1LsCkJ6lDoUbr9968BHMR1qB3JmhxZ8Z1nyAPikeRempLjPjyMZxvjKJ5CSmQI2YHQipYf57ccn8eBoBGq9yPokdA6gyHZYaTWkL3RU6KWgqoc6GlCyXjyO2Whaygyjzrdw/ohY+19WMKNmC9/xyP5fUGPeLDiYlU+0FqH1Zy49H4zfn3vbF24nCEvhXJyA88hez9TKLidQHou2Ay9wKY77baz1crruvK+VdatO8zVvfZfBnk4QcPckTy+7irGXs5IBaQR3B9/MLfbytsbnJ9OtFYxbZSUeL44ae9qlV1XmiraGtaccyDmUj1JFGl4qcrA1hW7rZgpSY0Sm9qSC6eUKSnzND/xPF+ooryflO1iIIpMDcnq3aZTIs3FhbVEvb2YTrdGZ3Sg/OMPNWPbN1JytVdxSdVDyXestoOrI6GebPIg42ehQo0dgInIvQFdwXk8U8IkURW6uTFsEaFI7HuhVooY0gO2F0gxrnUotiZlnoT/+T//N9t24/PLhbn8hD1F149kH5+KM7LUfdq8Ey0CArV7tSDmjg5JAYPaldqMpsa2hUqvCZVMD3nwpLGp+5neAyvuPKJRYqvNu0iu6/4hz/G3x7LM/OGXL7w8B1lYP2HWKTkxRztsEiNl59UtS+F8cgRU7Y3r7ZXrtfLnv+yk9EYfe4bdOTISC87sLZskcQfwKVpKR4u1qvsoNb23m/t47iBhRyOhJJt84YsOeu/ozO73o5ZRmxyKt0DWMLAaG62Tqn0jEUqZDy2TYgXVKYJPNzH1JCcdUyulxFQcJZumHKKFvhG13n171+4L+d8pgPZ/dJihbUVSiU3HScRHEEgDqThXqvl9QEGCy4OXenvwmvam7LXTFd5vnevNycNbF2oIU57PM6enl3BtPzPPs2tzkQ65BTtKTBnXVx9k50ekp8XPtAhPWswNf97+bCORGmXyEfTgvT8pJSw5ikP3MrkjPRM5zY4A7aHpZBJCmtGWFwHCcOLWI7n1MeMlzI5QHdnbW+jIfMyRcubl+ZNr4wx+jKQjKY/ios+hUmjV1926K9utBjIqZJldouEIAIQsBSHKWxYNMYHs9abBpRxl3QfFfZODCHwPYjyQSsMJQL1qf/CtptnRHoKjM4QRJSHZy6LLdHI6B45y+88p61bZq/L2Vmn9jZIS7aczRc++p0sjpRbdXI1ZOl2MjMY64RYWxOONHeIhWPXF/O/NK39X0CMCEm3nOrK15pFoQsiTMn83ll34+gUup4I8TWjuaPH2SaQwpYk5ZdJLQRfPkq/7xvv+xrpXvt2uvK472965VWNX9ZJPdqXjmEpeFhlfTykCHiFPoM1bskm+kV+vN75PjcvzmVY3tFdKFj4/PzPrCdY3fn1/p7ad1qsL7Km3uGanKLlmTK8Oq203bF3BlBSaO0kSp1R4ygtTLrwsz3w6vVBz5/rU2d9BpVPLlV52KOIo11xo0oHdFysazSqNjH3QAmuqrNsWm0s59BMeRIqPFEgQ0uCwCEdboPZwtVY5soERlY+gQnNCp4yi9Cqg7gUzWxgemhudlhxaIdIYRMc0pmNTbu9uC2LWWU7w7dvCH375zMuz+4BJEfLsiCHdUKs0E7KaZ7PJN1criqUI3kaAZiNYgb12tuYbxLop626oJWoutBQSAtlCB8jvDmNLj4yzR9DTur/f+3Xl9X39kOf42+N8WviX//YT2i/Ms3E+KSV718YwqHRlYW8zLtkbDRzZ+JXrtfD6tvH//estnnlkgxr8N4tnnmCeprBE8BbraYrNKXlQ2Jp7zPU2AgWLZ6yI+MYtkg+TYKOj2XyuH7ocrk5e2+Sk/6NkEeiRue+U9uCwiDAvE8vZO4Fs/GfKvq3s+8YgaQ/VbNeqGgTawnIqTLMHGr17Z1JPzde/IFp/5GGm9HqDNLn7vBRHftNATRqSdl+FrUGUjL0ENtAPPThut61x21zr7PW98vrePFnJE8hEmYXTcuLr589M08zT5cKyzKF/4yV5P7E7quABiwKugWRWQEoENxljHxlNBD0DmWBQK4OKKQdqMerDYhPCjOvMFNApPnMmyxzNEUrDeVgDRb83GXhgUawEQGIojUGEr7tfv6qLFz424Pyjj5wLnz59DZ6az58koY+kinXnNKbkqsOt+TzZt87tWuldKXlmzrMjXSNZI0q6UgIp3aMLWCOB7k4HM1ct9znmpamkStfYWyQEgAKlc0TIv2fDM1CEaZqZ5oL2SttXXw+6+hohiTk6a0tOtN6orSJmR/fWbWt8/77x6/fNien1C5f8maUklyqYnZs20Vik05MLqKbozlKFbl7HsvhMT8jv6+8dkf/rx+8ub6VDsGocd32NHl0AeUSJSdAsaBG08FD2cEKslQieRGnmCsx7b27QaFFiYDDa7QGufChRSND+JFzWc7De8x1WxYiSyNCPCegXwqTU9RCOxV3tEHCQQCYSIGpYZ/SJIoGKiDjpOCEUSYfh6ZQLJRcsC1OZmKbi0G0J8phr8EP2+vIBH1oEDx+4uBpOaiMWkOP5jmc7cFKDgxF/YMY8/Ps35/ibfw7XeMlxzTKi9AMauv/58DnjU4SHwCSk3LdtJ2cig2jU5nIF3hg2ApDxOxYL42OZ4h7T+Tb8UM4KVWnPcuLfcOfu8O9//4DJ4Xj/8X5dRydJ46MPifudpgJlYpnh6eJGo8NaRMS7Frt6QJNDCE3VFc2nqTBNPVrTB2l9XN1vPkvkCBhy8Xbg8XWJjP6HboohtjbCxSFyFl8nQdLHqf1YWrm/7GF0MMaSREAmgReMsjX3rHBolIzDbJxP3J8kh4u7X3/8XPx/IF1HufTDjnsH3tF08cNnRokv/vQShWsmOQ3vzm1T8zJWba5UXaM0rOblzZSTJx0lM03TQWDOg7R/P6VYEkZJJZ6Z99/E/XfZApPsyEOgN0eUwz05kkieDo8zRqegv3+K3ldHefzvSfzvA5kYScfjXvfDn8d+wYjdj7KYqKI9Sn8fWHoWoKQcFj4eW/glB+I8gsJYM7QrKt4J3eOVEwxByMNT8JgLx4VyGAEfZfl7ynx0VIl30bp22f2eP5K/j+B0lJEefvc4Xx1+YSkMme+Ch04L4ZhbxH2vrbNuO0mEddvZdkcFS4H5GN8D9+MofA4y/vG8VQ7uJA+/dz/fv378rqAn5cTpyxL00gmRhDZF99BWOSX0VOhLYp8zt+Iy8TYrujg8e8pCL24et6bK9/0VEF5vV77fNvba2DWRy5liylwqS/GOn5LCwV0SU56cFyTCaUpM2b1X5s+Ffmrsb43X95WtNhTl7eoS50/PN65vN57OC5pxbaGUOSWhoGQ6qXfY/JrSlJlSoUgim7dfmrpa5WSFhLFMMzIv5Jz49PTMy/nClCeeT594mp99kdkTc1moVvmmiXfD1fmeZlgKBRciO7Ew6cS5XVh0fqjZ/mOP3jtvb2/kUlgCyk7JEbMRKB4LnuCEdBskcF/4eyiMmro4Yeudpp0e3CQElnni5fmCtoaKouIlw8myL18KbVN6jUEbs8TLFOKt+1N27tfscP+37zfer1dEhJ9/+kxrndNl4SU9Mc2Fbe9cb7vrBi1KTsnNcJNQok48+vYwx5WGqv/WlL150FOb0Zr/XE1+bZLAskLSozZvGgTo7mWx3j2D3Ktyu218//7GX769fchz/O3hm3dGKJTsvj4OIT8GMcHjiGc0eAMpFeZ5YlnM7UmWE95t0UnJM2URQm3dy0GnxTurXDl38sU7OHWeSErsdUONVUNUzcvSpQhL6KdYh55zaPzI0Z6ssSv8sNkdi/pYIscTdXFC1UoaGSuD19JCBwiqGk0aKQmnQHbOp8zPP838/MuFyyXzdPGut/QQLI3W4o89DNjiOgSG8jGD3tiAHe86666LRPCvImHcW2OrFVUL9fqOGtSWaOYeh5fnLzy/fGKeZ37++Re+fv1KKYXz6XwnyKoMBQnfbHyU+Y6WQCxTxDCbUC2AIjKBTbEpFZDquqvWudu5gKTieh8eGnAXglxIcvLnqhOmE2ZCq64L19W4bcq6aXRIJobChmo/uCZde7S1B9F5bJ5bpaUe69YIFD/wEHPT5ZwRyQylbQ8MhvK4t3O/vt0QgX2rbHvFzEh5Yoaj2UzHGDTQ5mlbM6XHY6kxvvMh++LzJJeJeT757+eZFEhgbd15ucIhDyACrXsCUZzR8kOqYSKUXDiXhSywBNI7AqMxXk9T4vk0IcCf7crr+4ap8T9O3s29zJk//nTm53TyRFwzGRctPEnngq/F6+DOApUW+uyxho/j73yQvy/oKcLly0wiU8TbXOvWuF2bG7udsgc9c6bOmWtxuWmbDT07h+VSoE0uI36rFdtfsQ7frhvfr5s7HmsmTRcKyjTdWMIFOomHWyklluVEmSeywJwTRQSK8lRmrCu3aaf+udFuEfS8d3Q3Xl5uXN+urJcZmwvL88KcE6eUmMSDHukd27wVOYl73xQZI87F3JK5InQBlvnE8nymlMKXl098enqh5Inz9MJpenKIkomny4W1b9he0dqwLNh5wmbPumeZMIzSCqftTOnuk/IRh6ry+vbGVAp6PoevSsighzP9kedlbxF0fkWIABL6TFG3bdpp2hxSj+8jzjGRlycXKgyzVtFoR9fkqErbabXd9xLxLLRkQaZEmgplXijzRNedX7+90vuOmfD16yf2vfHpyzPT+YRJYd2V99vmPmZqzDljJZPEKMlRANf58C2sG96hZUZVYw/SZ61C67751tRp0tzQMfcj6NEIgrt6p5aT+ZVtb+zV+Ty/fnvlz3/+9iHP8YdD8FJr8mC8FA8qpgLeauzcHedZCKPt2dGpRMqFZZlpDZf+X86oFYQ9YKwoLYU2iwc9E7lkLpeFJTqm9t21UogMcCSivtA7klKyn1cpwrK4DL6q0KvzBVqHvnNID7h/1PCieyQ+x/wwz559Y3ONIkJDxBiE8rDYUKObi2+Wkricz5xPheenxC8/L/zLv1xY5szzs3teCf9RF+VHBj+GsbtWUTdMHCW9a5E01CpIoJLRcbntjXVtqCq3rXJdd1eDr8paPXLJ5UwuJ0qaeXr5yh/+9EeWZeann37iy5fPwfFbSKlgxlFuOQx0I7B04qo4Qo2LXPY+BXq2Y7Z5+cYqYnvsmO77ltLoCi1ImhmChzABiZxO5HxGSB7w6IwqbNvOLa7ptnbWrTFU4yx0jHof7fmD3BznL4NPRuivSSCxfGj3lh9uPTHPkVgOsb8U8ilDVLVVvr9eI2noB9eoFOfneLfhnVeq6l1xh65VTLUKbGqkDs3uDQilTMzL2TGRWcnmpf+6Xum7B6au+OwRztR8vpckR9DzgNsyTYWny5kpu0JzEbvjddHcdJozz+fZEXs1Xt9W74gUuO2d01LoKVMuFyQJrZfw4lNOJJ5JVHN0cu/+5DY1dvPEOIcFEWZI74/dNf/p8buJzHnyNuwp/E8MJbdg5s9CmhJpTsiUjvKNiT+oJHC4LkiQjGMZ62rUHpG3wWD+J/H2O5MhxDREAqM1VfzPKUWrnBkmLuyXSnKEIvnC2dSVh4dNgpREcOocTpOoNok/ZB0wYiz4Y407ZJgCv3NeTI5XYYoAYvgQCQG1Z4d7nWw7PsyQ7PdghBope3t00o+Tu3eriR5t4/3IZFP3FkUZEOcoJ0a2jumxifzw0gEV3zca4OBmJBt4enCHdGTP9y4AuE+qccsPODz5fVFLIQvhQVet/Xh1p2EdkLUeHTiBZsT3LHGcr+Ftj87ruQcxDxXO4xoVL3eO94Z4n3H9D/D50Axx4c5O/SeVt/79RnwM2jvk/9ufsLtNxijz5Cyh0+KeekeWP4b+w1vJ8eY/vufjlw64W0aGOz7PN6WhA3P4DcWZ/1inHxnufXz9x/fAf9af20AmPegx7Z67hADnGJIlNqJ5ypzmwjQHOhgI2WGuihxT/8OOOG8v1fYYhBJBn6Otw0R0oK2jjLU33yz32tlqP9bVHtyMLJkc2jvTPLMsC/M8e2JX3NojhWL3Uf470FeilDi6GGMuj5Ur5eCBOflYpfjzEnXEx2Co6vmzfry399fxdfJx38Hn40BSe1AoHK0cgdlDNxH3eXsvhdm4vcfPf7BMT9yfAMaSkHKUlkL/xlG0+xw9TJsh7hP3OpHDrMcg/1EUNq4/rlbhoftwXOM9cE4mJPPqyyhv/bX5+uM3x9fTQV1I2DFOHkuYw5trSJQQ62rt7p+JCGtVtuZyEn10MoSnWk7eOp+kP5TKwvw3CWYxTsfr70hEflfQk7Pw/MUduZ8CHbitN76/KnurXF5mPv18YloKlz8snD/NlFOiSWdv3pkjmJvMJXGX7nxCO7Q3FzFqLVF3r7eKwpQypzn7jS5AdkLWpUws80ROKTyYCr03rtcblUqaM+U5s/RMFqWJsglsdHdSv12ZZOZ0SiQKZ5TPy8SU3fW7W6Z2Y5ZM68EDCj8qxagFWmQ76VyYzy6uNc+Fubjy7b6vrK3SuvLteuNt3djZeZONdVY3TistSt6JkmayZEpNTGlhksWDvw84zCw8mVz7cohOzZMjeFOZWObZyb+MQe5b3QhsDrE5c9uKbd9otdP6kKV3FG6aJkyEvUzUXFCCZ1A901jj5QjagFm9ddEKyJSYzgvL05lcM0olNZfHX7fO6/tGWRa2XcmTUZtL83swM7ZnP+9WPfjuI8sDr6EPraeuLpho0FtICuAkui49dH4qnRQTeAjbKbUqvXt2vG+Ndavc1p3rbeX9evuQ5/jjQw3oWyvKTqqw7wHfPyA9rtRcj2dn0emm6h45JRtP58JPXxbWLbFvzqPSnugplNCB1hrrunnHhip73T3bDqVfH1tKKemBZxCID93Psyut4oGVBafOzIMsGzIPo0sJHL0ZHYZ6LNX38elL+CgZ1OoO86rKvu3sUTK4B6kTy/zM1y9nXl4Wfv75hV9++UzJwrK4kKOPH7diSPkc3JLf5r3/yMdoQTQP0nhwXiR0W7S7uKOZsu+Ndd3p3dGd9+tO78bWOuvuc7tMJ8p8IufC5y9f+fz5K/M886c//ZFffv7ZM/anJ05zlNPj+u5Eb08GJEXZUh5kKWRsaIaq666YuZWHagKrmG6YZsy8mSWl2MAkx6IuQMEYpa45SmWJVoXWPNC5ro3rbY+gzsvQ/uwBsWP+3tcnDZSQH4IFRkAeSO1H+uKJcCA70+weYgRqM44RtA4BSYhuyGlCkjBPs/PlIvmTsGxptR66O24u3FGUzZQpzLL36tIvRV1jLWf3p+rm7KhunsSrOTqjISkSsUtweJ3Qb/icK3mCbG7aHbIkBr6fxWNN2VlZ05Q5L16evJxnnp4WSu10SXxfG7eunH99h8mTjGVOzJOv3dM08ek8satSU0ZTZQ+Uz7qXfhNgQXs5Kj5/4/jdnJ6nzwvny4kvXz4xLxPvt4J8u7FX4/xp5tMfT0zLxPJp4vJpJk3Ctu/0LXDEZAGtC5InUjlBgy6V25ZoValVsC7QHcU5T9mzfuezkXPmPBXOMRieosVyrzt7a1TtyJyYXhKzZJJC3V2gapXOum2s6xWScrlMTKKcMT7PE8sEKSmVTu3mys6Ds8RAINxPqk/ZOSfniensgdc8T0y5YAq3beN6bdTe+cu68rpv1NR4O+2sk/NCyA2yUqRQAuHJJEpemNr5wzg93rK+e0CndvB5avFOrtOyuEJ1zM6hZWQ90JCAjwfZt7bGtu301uitHTXdkhJlKg7PlhKCliEgWDu1KVvt3KpXaTMelEwJenbeB1NmOp9YLhfynmm6IzVBmritDdhYzpVtV8rsXVNqgtqdCgcc+khOuucIeiwVlzeAQGaal7x6pnfXqmgROCOC1uYda3AEDhpdYq0brXp5a9sqawQ919vHd2/5Qq7RcbSTxNirhXhXQqQxgh7VEfSoQ+NqaN9JopQMl3Pm6+eFbcu8vVVMXTW8Bo8OXPLfzDOu2qov6IwMlMiylVxG0Oln6UhikOh7oHRtaIAE+dgCDT0uzrvNRonZuMP2D9jrA9oV3J26crt6yWBbK/teA0nx38xpYZ6FL19OfPp05pefXvjDz58f0A3DW+XdN0zSiZT+j+XN/uah5gGjcz4AS3RzpLi1Tt0j0Fkrb28rrSnX287rdaP1ULPtgCQ+fXpimZ+Y55kvX3/hX/7lvzHPM7/8/JWfvnwm53x04iH3ZMHMg0mL7JymD3pTgQwHeuGNsm4NgzVUd7QLZg3VDW0pgh51+Y/jvkaAhRssh+weZsHjaca2e3njtjbeb/UoYY356xwhgSi9j7KWRgB0RyzGnLXjz7GWfeThon7JlZGnsPEIFMZRK19DPehxrKKUmfPTJRoF0kN3rSDxu7UbVWug1hH0mLGpMqnf5z2I6607cpgi6MlBE07qopMe1Pr7DPFvVy4HDFrriGqIm5ZoXnBylieOnp8IdnRaIzBPxYMePOi5PJ1IW+NWO+9rI1Uh/3qlYkwl8dOnM5+fT0fiPeXMrspKJFutkdiw1uKzvYIgObGUmbn87erI7/feinLEgNsk4aUkhDScbEenFgOq4hDaurt2//v3fhQ/GrBezgmZvPVQs2HZQvgsHpAGN2HUbxPOQclCmhP55EFPTureK1PC0qj3xkDpfvME7l1gIq5RIhEFP5y2IY7OZAkBPifKebtydw2EIPfWtlO70nqlaqPRD1Gt+x0NpVfuk2HoxvwdaN1/7YhMSJDwdfEPSzqy8iEAd4deR6Z0oJ0/lLg0Jm5YV8QPDtY/Ivdka+xd6pm9RmnpDk8ed+NwjR7jY7QzD9NHM6KrS6l7Y99bbKLD8f3hPgo/XM+4tcdttscNO148dgv5D41Fy2/BfYO3kTnqne/kC7R7O/3zjocA/fEZMsqJ+h+8fHIOIbN5SpxPhZSgtUKthZah10LvE5i5Unn2MmXJLhV/3DeNqR9cMF9EfVF/JAYfBrbEqhm1ox82ovEAH+fD35gXx8Y2AnQN7oQOnsfoUHkoPcQiqvoA1Y/bdi9Ux5c/sr417hf3wozd24d779ToQm210yJ5aMeY84DCkRhvU/YS1uIE9XlmDnS85OwdXOmO5I4SHgTp3zi0qYxj6B/34HiWIlGO8kYVS8W72qVjUuJnFJEhoHgvG7rqtI+Bgz9kgcj2ITg7vKtGZ+/9Zt2Hxb8vvd/P9WHV/WHefmzQM0q9/1FJ9gfvwRGYCaHncw/UjvWLH9cbX6Mey/gRTOkI6O7Py+ChLPuwoMc/h2mzcCewq0lYiHR/NjgR3SzRxRNMM4HQ2TIiQn743Ecvr6lkWveqj8Y+V3tn2ytqmb259cjQ3RsUi+H51S1kNrJ38Q3eafxYJGR//fhdQU/vxvuvjbYlkBvTqVJ1pyfxwCQLGu2VrQnb1fkr2hXpLqueeib3RDbBrNLaFVUo0ng6J1oR1m6s1SO4p8tCmQqGsulONY9s931ju93IJVPrxnKa3SS0tMNS4vl85tQK2WBRRxE+nRfkGerUSCSutxt5L6ypojlhUkjaKdEipwiZHH8PdWEBnRK6dBDYqPy6fidLou0be7mi3Xh/27m97zQ1XrVzs07P3lXgWawgLRZ+cUKviGI7rLcrbeOoT/+jD1Xjel0pOdF7O7g3OvnfU0mctCOWvbTzsDhISiRzI719c7Ro3Ta2fXNNFvMOHbFEUSX12NACOdPWaXujbnvwuGIC4I5oEqWQBe/naLjYWusVLFqrs6sMd4W9ur/N//h//zfTPNP7Tt1vmClZ4cvTGUs5JqwEf0RCN9YfqKov7q0Lex+qBVG35q42DRKBWpgmjrmh3rFVa2XdKtfrytt14+1643rduN4+XpxQjkUsggfzNmJHKfUQrvNkYY8FdJSIjKnA0yUzz8J/+9OFqQi1dd5eZ17fT/TWuV6fud2uDxvFY4Dli+4wWPSyZ5hKHpl3BBw/IHB6BJ3jPk1jQgAAIABJREFUa6re8jwaVx/RBz123lHGkh+C2GGnAg7Lj5KHRoAHQdjMbmiJOMK37ZW//OV6aA+VPNRsC/Ps3YSpKFLujdMfdRg5SjOGWHRKNl/7trXy/rbRm7JtjevVyb21G9odEVjmE0+nC6UU/vCHP/DHP/6JeZn5489f+eWnT0yl8HRZmMrYXO+fLBHw+HB3DTIzg55IeicMD2Vgs7GhJlKaEIqXHAxPLPsUKuzNET6idCcjoYjnq+PZh4+ewro2rqFXs+47TUfS+NtkJM4+EjZP6nogPRJUwoGaPwZFdxT/Iw4ztzZBzMeaWZS37nM150JK5hSD7GVzF1bUw9dveIk/8iZbCxVq8z1Wmz+bvbolSTYXWh1cR2zsYuJJZziwO+ztJfz32x7zMdOlOImee76RAsURicRozpQkTFlYSnBwY+ZiUBHSNFEscbmc+PzpwrxV+nfjtm2+D91udN0jqGncWqXkzOl0Yp4XTBLL6US5PLG3BmXism507ez7RuvuV3m4KfyN43cFPdqN67dO3Ss9rZQ1IVNHzoKUAjlh4m1lrQrchlaGC/glgdwTqUctrjeauWJnFuPplLzFfUu07CWX509PPD0/eQfW9sa13mh75fUvV26vV3LJNN1Y2gwlkZ4n0uL+SE9fz5jMFBHOOTOJ8BwdKC2MCGV1/ZKtCLok18xRoxBokiUCYKYN6F1Ai2InQcWobee2NgRosrKlGevG7XVjvVY6cE3CloYEeb4HPX0gHB1lp9HQHeR2o61gHxb0KLfbSikZ1U5OiVwyqs6TKtNEUyWFJ5GrFksM/BRZl/M4Wuvs286271jvZHzDEBNyk7uyRgetbozX9kbdqxMT1d3tD1JeZDNjiXT+Taf3hmBMc3E5SPHgrVbl7W1lbX92Xyw6KdR+L9PkdhUhXHrPZSXImkK3hJrXt5vim4fh2hpBtBwmlRbno+FaTbSBa3cIeNudy3O9bbxfb7xfV663jds/SZF5ICXjWk0V5O6+DL75qLra9wh4MJiKcL5kTpqZsvDyPNO78va28HY9e9CzbqzrFuWwfmTfw5xT1dWz970Fz6kdX2/1/vP6UCYd6OCjjhIQHTejNTVHYDVEBT3geVRpGUGXI2vVf+/hHAfS4zwUX7TL5Llia86P+fbNA7qcvZ22lEQpM8/Pk5cnzCiL8HEtBgNddC0csw5i1O7l0t6U23vl+6836u6lwW1z42NLGVJGUmJezjx/+sw8L/zhlz/w3//lX1iWma+fn/j6+TlczQtTeKT5LQ14SUbmHBtcmC/TjTQQBFx1N4ZaPAEhpyneIiPmXlw9FcSIMdewoaR9B3GjqSB4OUqMGWPbvDzcu7LvgSTHPfKPdJFRHxED8VAOWx990D8bTvXHeIkx8YHtW2rGvu+IGLV6l24KR97hAVdKPsbuKBGLRIBznLNfb++OGh+BZxhvqrq9iAdZRqpu69CaRUI3qhl2v241rDfnqqorVd+uK3WvkDI2zZByNAEFP2qAsbgi+3kplHQPgHIajUVuMlJNkDJRyJwuC59ezkxT4bpuocDuJdrrbuScaHRuvVFK4TPwlBK5FM7nJ+bThdoakguXZWOvO99ejbX//+29bah025bf9RtzzrVW1d77ebv3ntt2WlqxxZCA2hpD6Gg0gn4xCQmI0DEEkygoEY0fEozkg1EEBRGJRm1fMMROB4JKQyPoB5W0SZt0B2I6kfYFWolGO0l77z3n2XtX1VrzZfhhjLlW7X2ec87z3D77nHvPqfFQ7HqqVlWtteZac47xH//xH0cvPjq7oD7G3rHhqEt4D5ZXJpqcfeqDoqDVBgoaNTRvS69b49GziFnY0l29eiqINTUbByNtDWMiTdEhVuujYwrAfcrWVX3UEJPgvbPUOHJddyD6YuBeb1VDFLJWK91TXyrVa/89ldJLIbVZqWPx56toHVBRihPHCk7kbYqKIQsBk+avwfpsNT9P4hMx0qxfSXRYUx3q3AoXPnVT1AXzlMGVesVThZbC7M7HNhn1v2by4K84hKlqIoFBWYsSeqXMQ7EsnGfStphfVmT07LEhAuiZEKSwpkLB+rXlUwYp1sdFTOV0XoovyJUQdasiPDuxqjaBN/XqL+2QrVeNEVZOk5Vts8EKHc3VjR/QvEqwuCPQe8E9rW3jIWf32UM4e3MorH6mQ9xepePifE1gGKM5hLXR6miLXG2EaPenuuiiad8YupPduQlipeCtKXnphQBKTh11Md5TJ8IbOHgmqHYWyffFtPM3HmUp/N1+bP7XK5rOXxNwiN3TcckbCXvKpjVzxg29zLYd6gRcG8cYqzUdVX28C5+6qQddHe2pTde0bS5lFRmsnr5Vd+JDikiITOPAfjdZSms3Mo3Ju21Hq1gT1gXQfqp7yd3xWT0femuZldiq1n5AVzWCx5OUX1edgC3WFFikpxbrerl2p+ncdeopxtb63N4DLz3bivNczQNH3/7/hnN6PmhPPYBv+O1zKoA6urymqM6cMdu+V8s93O+emu33sjkwHQGyyagLG65zGdv8VL3su/OimiN2Z7UBZ79p92DVjsY/rMxVtXu9RgEiKagJEktEo30+V2sC29ScvXFI1GbOXYz+/SJbFTdQHMnNrZFbNT5h2wjqKZjDrq1ZBXevDOPRGH+EvWN6q3H/+p6xJHSoDDkyNCFORmZrizK/NsEvUkNHW/Snq8j+2WDl63Hwcr1ArIYCqELEFqsUhBfPr3jxbLKI5VkiXUdKE+Z7mI+NdlLCnSDHCMFRm9YQDSakFqM1I50UIlQJlAAqgbkKB1dID1qIzcTycgycWqQKLBHKKFbBlQtLqbSinHLhsGQrXZaITAGNUFohU4xO1Bq0TCQw7ieu9ns0BHKK1JjIrfDNw4G71yeHFzPaMsM4sH+RmK4mYhwZpsH0Vt4iR/ntWC2Vb37zNbtpQG/21kZg3NoKGEepUqnGU9ItBeHyLqbpkUzXZ7/bQbPcr9RKqA0tleVQWeYTNbvaNoagpHHgimB8J12oLZuiN6YzYXC45/e1674qKQq7sd8w5pwowuFw4hvv3zHnQgomOhmD8Yhe3Ow4Xk9c7wJ6Fb2RXTKVaGApjWO25ranqizNSX5xZBh3FnWLSb43dXTOU2ClNJdCMJ6FdVTP3B9PvL47cnc4cpwzp/z0JetgjmCM1pMnRiGGgslYmPaHpfaAGFGioZkOncchooM1+NnvZJ00uyRAU0PVcjGl4JxnarHKqDkv5GLVWPNcHdVR8uKOX7VUTHcEe5fp2tr6OSvz98q5JuRsCNy8KHq0PmptjX470mMtLlqraDUnfqZSqiFrrTXrpRmMGMl+IIi1qhjdEdCm3N8bOiUC89Gcnt0uMQ7BIPaW2F8p025i2leG8WnHUTW4yKDdcMfjwuv371lyZjlVDvfWayxg/LYkgd3Vjuuba9KQePXVr/K1r3+dcRr5yqtXvHp15Z2+A2DjVzqCK7JFKc5ntIBrk4uIHog1FTQoIq4h5PlGdefMGsI6ShSSOaQBQqq0Zv2B1Dosr1lYCyKcl9ksJbnk4ilHr4pcZSK2QKeLJ60Kz9gcr2KNUNXAMlZ4Yj25Tzt252ZITvLWE80qix0p75zEjcOkfj8a2b+547EeL65zFT0Vn2dvqWE97vKcCc0Z7Nm0bqyS1SpKj6cTt/f2m6faKI7uLHOxIKTadQcJkYSGAVKkLguHpbCUTC2VeZlp2piGyNVuIMbAfkzc7Me1t5cdkKAkVE3zKY2Rr37tBctSOOaF2+M9uVZOTZnVfIUaB06SrK1TLpTTgRgTWWGaF8SFiaf9nlOM5NPRSc2Gape3QO3eyenR2jjeHag6IDurbtGY2FXrAtqyTZCCpX/KoO7oBIbnkZAiRItEjDDcuT1qugEYkXK/27HbvTB05rohu8bS4D4IMTRqVGQUJFkz0yoVVUs+qVSHDwUZ/IFSvEPvIsqxGkTey2alKblFFk2mgCGBOkaaCiUIp9qo2fKdt/dHKsqwG0nPR4PGqatGZGwVqjCExH6/52Z3bT1ThhFNidO8cH9/4vbOy3XnTM2FcBWJV4kdEyEODGOy3kNPU7xFrY3Xr+8o+4khmfCiBGg6YlUVjaqe3vJHR/P6w27ARJCG7iaCOGy6LJAzTWDWZqXx2RWbAURIaSCRiLUy50LufBm1jH/v9NuRnmD+K0lgGiLDYBFErobOnZbGX/3F19zdHxmTVfwNUdiNife+dkMuBS0DUxw9ZRFIlj63RXgpFIVZAxlLaY0hkYbJuwZHCtGRKI92qus/lWrHVq2/0VIqx5Oltw7HmXnJLPkzIDKLTZEhRAKJGA0ytnLi1nFRR9h61UzzyQlCSMbJEOuHZporsjqWHdUyZ6ixLEdyPtK0cVqOLNnUVvOiDqsrea7UYpHi8ZjdgSrc3R5YlrxyaTrhuzgSlLNymk3sUaSRF9OrMfSlozeGKJtYoRUQoI6KSHNk4oxs3Ym7QZimgXFIzqFQTseFvNhKM5/M6dnvEtMYmKZCSpOlElietEFlH0iLsGHJNnaHY+aD2wPzvFAWa4arFcZhZDcOVtG631tV7Tjw3ntf4Xt/2XtM08j19TXXN/sVvbKy/47KO5wcxTO5tk2EFf3r2i6R6EGqAoEWdKuIPEPW7F0wnZ2eIbZKNJXsi3zzzhPugWiFaqm8qlaS3uUjcu0E5jM0UzpR+8wtEJOfCGKFEKYLpz2n4L/zoVP9pE6QIKvKvTX57JIf4SOdHoAWIMSO1G4cuBiicYAUTrpYMNIsNZsXW8vCUpHcGMKW3rIGyjOHgwUVSzXV5latuWktXSPH5RgkQUwQIpXCcSmclsyyzNwdDpRSmMbItTs6V7vRSuPj1kwVhRRHUtohEohp4NXLPbk0vnV3y9UHI0suhgJXG7CSBgjJrp9qqGsKkVaVZV4Yh5GXz19ytd+TRDgOIyUtVFXmVnyd+nh7R3FCJwAmg4fTEF3eP7hasq5aEue9VLo4YUW9XLluDo9LxTcVO1F03YQOtzYjcWldpcT7e/2mNAIYfo85FPvgSu6ETWxBR5yv4WFGgCJKxqNMgeLE1rkW5lyovQx5sXJZiqEZiPF6VOz+tUXBZc97OaR2kN7+RQJJjImes1BnpSSlZaUW7QHXgyqXT9tUTTk3pWi6KilaJOV46Iq2rgQLT9c9vBzWfHmQsGr6WE2ya3ggLp1v5ZJxSB4ZNmgmlW6ddixuP2+BIb4vgriCb3D1T/u92npvK11Rlly8p5pY1DIvheNxIUZhN0CuyTvGd9aEpTpLM7lz7WJpwbgM1q8pdFYaPRVkJ+AMrl6rS1wlt1r6p7okwFNXiLCCzuvAPXr0Hk5d28Rne99/M3cgHvT48UNFbQw9itcmXq1lk9yQXB2n2feGYPdBQKhJSdXuymFo5CXSarNrrzbSEDetEnckc1bSYOmbFC2oycUm51x80W7Z7y9L9QSszNv23eahtbN0MOJ7RzJ3k6E81kE6rv22Os/Csttn4nzChpI9TLI8wUh60URTR8l6dZaJt1ZHAPA5Ig3W42+aRna7HdM0sJtMcHBwx05km4sseegTVp9L2e7zdT/0bCGWDW9QzlKna5rqYcXdxrnBnGrpd5vdX32O1vW3ZEVzes+pngpdp9B+3Jynbx8m19b5CKvSDSt52QM2WX9wc4aeaI59uE/9Z92pcaSnB3pvtAdpVE9p9RVQ+3c/2Nxv6Yf8OPXf7X0Am4q3p8BFHns6ka3qyx8EdcpHV7/qaWd7XhzpM0V6WyN6atJ8Y0WaBdQp+BqgeK+3SMP4vNHXCHFeGkG98NvTcdrL+u1+qKWsfL21Au8tb8l3EydMwvNXA9P1judfu2bYj6RJmPaB4OKbazOMBG0UNEKNykymUrgtjfFUGSRwE65BdoBwnxdeHzIgSJxJw8nK3GOhSSHXwrKcyLVQWrUSudE4CNc3O6ZdosbGHI1Yi1iZZS+JbWrS+0UHsozEmNBQaS4FfoqNW5kpqpQq5BZoFe5vT9x9455yqsxH63yLwNVeac8jYbAS/TBEi5uLwbEahNNSSDITYjRUQxNShV0ceT5eM9eF+7uZu/cr5VjY7zOqC7sU2O8D05Se7H4spfGNb92x5Mo4DhSXu7+6UlLsaLNd+H0RR7Gu6LrBytFLx0mJ0IZeemFCZgFKitSUaB5xj9c7tDTK7Uy9XwgI+5CAykLl6DKBoaM7rTGK8Hy349XNjfERkpVNLnnh9u7AaS58cHfg/lQ4LNWqqBZLb+2+ccv+KnC9H5i/5wXjPrJrA5Mk9qPdWKdWOJbFFpMhEdKIREN50rAzJ8gJ8IYeFyRU54dVcslG5s6Z05I5zgv3hyN39/ccTlZd8Fkh6opXZsmMaqbpjLUtKFSdUbW2AbhYGWcVTeagVSAQ42iaK4bbrpN1r2ZDlBQ9um/mLIZohEy7DNw9aAIeQNRqys6lVI7Hhew6P31xs2XQxOpyaZxOpih8OGZuX8/kUpmXysm1WpblxHy6t8q+ZWZZTmhzlVl3WExV2oKHYTCEL0Rhv0/sdsnbA0ykZATc6Eq5McJ+FxhHmEaYJmUYlJS0i4o/4SAaUjafCq9vjyzLwuk4czgW8tKcu2O9D/dX17x88YJxGPnae1/he7/36+ymkecvnvHy+Q3RtXdKsbm1czaMpOwBgECQRgyG5loa1Ii/lN4CwysyxUuFfSG39giy8iBVzyaszrNgEzy0vLU5141OJO4tNCxIOBytEKCTl1vbREbt+jM0M8Z4/jNYQCI0l92IzVKEnDsAq2fgwqKE9Vp9CusIo/bA3XWDuoKFVbylNbjdnLjzhby7OjYOYS1MwNFYD2IsT0hVZamGkGVVisLSlMOckWRaXVW802RR5mzXWymNk6egGZQWAtTGXBo1RDSNtgZMiiRre7GoAQRUIRQlqjlPzQt1UsGEEgWuBws2whC5enbF8xfPmEtFswX8TQQZBxgHc8JloWHXbW4NaZlalTu9JR9nlpw5Ho4s82IdHpL4dfbx9m7ihEG4fpbY3Qw8e7FnupqQqIQJm2iaidcpoMnQC6LSImQpVsXUGkMuDETCsGeyxkCcCtyf7EqYpsyuLIac5IXGQlZbXHIt1kQuqCFOQ2R/teP6es+ieUWScIdHFFOarA1RoRKpEn3QhRIMATpI5ZaFRZvxhxahVeXucOCDD24pp0o5KsvByNm8SKRjIdVg1RxOBtZqFxDSmFMhSSZGheDaFE2YZKQNCiehHQPH943/cniRCSkRdgOyF+v781Scntb44PYAwM219bkZhnFTBW26PnrVS4PV4ekIjHXrNQ89ROu2HEK1hTYElhjJydpHhHFgCIGWK0uGPDeCwhSsM7MAyQnh4jLpQZVB4HqceLHfG9dIHPVrcDjM3B1n7u5PBsFmq/OpYvD2t14fGP+6st8lxinyta8/R0OAsZHcFVm0ciqLq4DCkAZCTMQ0ktK46odIg+q9g0QMUWhqVWWlVmuWm63R42meORxPnLyk/7Nxenok2CtkFtYeTpgDZE7PiqvRBQQBrxR0pAebQAw5rZZ+AGLw3myiaGhWlhxgkN5QlG2BQhBJ3j8uIDoCidoay5ztvKwLEbZtmIC4phxrVWvaensg58rxeCbGd7jn7jYaB2QemE/JHDjRtanlkAyNliAMg5AG6310fZ3YXyV33EdC6A02bRGOEXY7GEcYRhgGSEmJyQQLn3QU1eRB5qVyf2/VcsuSjStVDCGbPP097XfcPHvGbpr4yqtXfP2999jtJvZXE9fXeyQISyks3ny0E2gRIalJU4RgPyruEAePqm3qMX0WkUAMG/oXWMUC6LVTQk9HdwfWF3Dp0gMYsi/dCWhGUFdTUZ/n6vIXhaP3ECtF11J2WdtU9DTPudPTx2TTtjHn50yduXk1oDtBQQOtWTHFk5mjcV07Zyt4sN80p2VravsQuVH/u76CSFsrulZiOWcNccVQEVrzylj1qlRlzoUwGwqq0SbI2mCp1ljZ7jlDzFFo0RrdltqoEq1XpIIMigRDVbOLgEqzloQRvBzenR617E4MsBeIYyKosttPXD27IpXKMjdTjkdow0Abkl1drYun2poqrdKkcahKFgsml3k2kdQY0DjwNhHJOzk9EoRxPzBMyYJf90ZbcUjZe2ZYtNB32BCXzhBvIlTPGxdtZLXJtwmWV6aL/VUvczSCVVNoFcs9tT6hevRdlbJUqqenGl0h1AYB8Rp+hUECUYJN/Kqg0eA6td5NFZz57oqf6ryAoGiwjrOIabwEK7yCqmi2i1JdyEcxB9AUTK2SR4LlLyWY9z8MlRiT82Ksy3TJlZLamabJ05g6Iz9XF/ZzXkqrxv7vSstd1K3Vui1O9BzzGVrclFast1FdFjjNtFKpS0Fds0WD2HVQKlqKP0zpM6hFCQmTCzC1D2tBGBWolZYtZ5s1U7UyH2drLTBbw9LewqDjE33ObtkqCYtHNLlAKtZYFIHq8LuKINHE3CwK9jRdh8ZtiFc7B9b7xFqK9XUrpXh6a2uV8OSmHQI3EroGR3HWZoubM7Ieg1fYmPX+R+7EBgtIRLccj6UxI1s6rVf8sUX2XZdehHDm9AQGkGQkdxXC6vS4oKUkj3qjBwph5Yp0QT5L1Zg8QIyFIDtKKeRJmEfopdAb0mPy9iJi/bQGmwuurhP7nSmOP3R6DN0M7vQMAwyDi/kNhgh1YcwnG0Y1tevqfbSao6w2Txgytd/vSdHaR1xfX7Ob7LVxnBgGQ7ItpSObEKiyIT2cp0P8WsDTXOdvqF1PYE6xOspwvhDb0Pf5oC/C27uPMw9bWqdfL30+qn7MdU0T68MPbkKnvSp3y7+Cbk7A6nitbGYHNs/Qnq5E/5Rj2e3RYdCdr20DXVN2Z1ueva3rZwyNtfdidPZUMvE+miklxxS992ToZdGrnIxgZG8kujp9tWIM9fXPr5PWFK1WbFC1p7kM1TWuaU/NKQ2Xdumxcl+7vKdew6qxegDYcM00r5B1FcttXH0OEg2erd6qFHto1tFF+7wp578Ni+CdnJ40Rr72fS/RENCkLLpQl8p8V7xKIpIGk6gu2iihoFEhC6HY4CwBDhpIArFmQpgRFfIA6dqIwTooi84WWS+N1itkjkKbI1oCsWENPFU43s7M95kmyhwrJQhhL4xXgUlck2O0/hwDE3smEgO5RSNjN+MKzHNgbtY3KBdFqxIRrqbJCNNqREtEuAoDU4kW8ZRCPhqBW8Sa+qkEsiiixeTA85EYjbk/DBM30zUhDDx7fmI+gAyQs1JvT1Aih6uFQZYnc3yaGrIWYub13YlcGyEkXjzPiERSsmoAUdBihLcQgjebHVbuTZ9tyjxzuL2l5UJ5/zXlg9doqdT7mXqcrRJFlCzQcmV+fUt+fbDS/aUyqkkOXPv0sBN4LsKNCPtaabcHTqrMOfP6eM9cCrenmW++f8dhyRyWis6FWBtJAqNYH5awQLmtLDMcbwt3d9YCY6FyihUJwtwCOuwQiYz7a65unnnzxMHRENkWB90m0NWzCnYTn+aFu8ORu/sDd4cj94eDKee2dj5/PampNmpdwNNaqRVEzOkMYhVbW6PHTfXVOBle0UVgGPakdO3pBJ8F+/buJA1skga1WgWelRtvwnXRGxOvwnUS0aZMK9epUmu2UveQiHEiuDZI6yhALpzmK6sSWzJHT33MpxsOhxtvZTGT59MqQKgYMhWkE0khjabLY0Rmc4LMiRuJwaTytVnAYmX3jRiVlCb2V68Yx2tiesY47AmSHjiPn6bV2nj9/j3zPLMcFrJz7148e0aIkZubG77y6hXjOPHy+Qve++rXmMaJ58+f8erlS5MUoDeuVEqGmrdqIFytWXvvgCbuFzuy0Doy6UGrc6QK5qhCFxK0+2JDHOSs2rRzyPwaWfmNjtg4p6w7IctSTCOmFObT7P3SFOkl74jzsxydi5GYwooy4Z3HVTdnYu0t14Nnd3bUrytD9CJPJQBrO4GrsXe9oK6/1OeRsF5FVvEW19RiR7B6axxUTSKiGn1DglEgVNV6FaYArRl6N0b2u4FhPxHGEUmWfakB483EHcSRUgvHduRYrFpuaS4d2Rp5XmjZStZzw/sGCC2NnnWzAgJUqTEwq/UFK86vVIUhBAbX3AunGUnWg3BuDfXUK7UixXHB0FGvQBI1gKKZjp2Jw2w1my0EwjCSYqIApdnc/kn2jg1HA8+/es1SK4elUGtlzqYOWkpjmBLTlTVYq6FSiwmgxRrQGmkiFA3MRIoIQ6sMLZvjEJW4s1yeRiia7YIx3STrp7QE2hxtYNX0JrQqy6lQi5UotlFQ1+cZNDIhDCGwHxIpCpGRQQcCA612pnqj5YVcxEr8erPEZpHLbrAosBar4kJhksRQzSNdmlKblbu2ZGV+BMEOv1oetigimZQS07Rnf7VDW2B/tefqqlKlspQTyyGTyCynzDKUJ1MLbarMuTEslcNpQYHdlFmWwpAqeaxUbwy3RofBygUlyraQufNTSmE+HKnLwvzBa+b/71toacRslQSWdGlklJYr+XAkHw9WxFEgOUoxYeO9E7jyx9QUPc5kVU7zzAevX3OYZ+6XzO3dkWMu5AbarHtwFGUIQiIQCrRToxRYjo3jsaGhUVIjT9UUw0XQOBr3atwx7fZeVmpVfudBbY+SHxA5xSfukjmdZuvt5o9S9Wkn1cemSmsFNNNaoWkleOWdSE8HbERzkeBpAlsMVROIcXp61cWKmvXj7oiRo0RK25we1ZUb1Benfq2Y8qz1HhpaTzsUSplprRBCIqXRkKSzqLe2SikTXd12mRdTY50nTqeRVislL+Q8r2rZrVn5uhF2bayGIbrTAykJMeH7NRLDSFe37UhBT2PGOLHbvSSlKwjXSBoxUa2nGcLWlOO9Lfx5NsRnTAPXV1cMw8DLly/5G77n6+x2O148f8l7X3mPcTQS8/X+mhCEJc/M+eTEYOOQdC4Pfk46CiRremsjaqtzu1DPe/NfAAAe0klEQVTj5onzYAjRg51NLbs7Mau2GOZ81NbOHI9zMv+GE3f0yUqhF/Ji/dE63yuGwblEsqrGi8jaAmWzLa22Om9nDpAJ9HWnbnN6RMKT3p8dARbRM/E8c2rs989Q1xVJdTL9ei6LcZXcoe/8t2EYrJmzO54xivEpBSQK45SI04AMyZyeCC0oEhKEEQkjNQiznjg6up8dzenpsCr+fzxdGQISNz4gzZyxKoInO8gN5mxO5hCVUSEEZciFtMyAsGgF791F9HSQIz0EEzIOIVnA0hTqsiKMTXs7YUGSVbJJa7Rq3Q8+yd4tvSXizSOFWEzMTYiYtwFaBS0eUQRBqhEepVr7CZOoTsQ4EAmggVb6wDpJTf0G6xcHlpYKogQiSTZdEW12Y9XFCH4kIaRIUBMxDC2aoGEIJEYGCUhLaJGVsU4VRANBEyMW7Vn5uXWVFVFCtCshJCUONjhjGhjCAME8+eLy70iwVInnzddHh+5UrFS0WMnpEBLTMFoH71ApoiSJaBVqbjyAQD9NO4MhS9W1KV3xqolOLm2tGXzYrwFkq9bzVIee5dg1WCRfckFzpS0V8Yq36tV3WirkYqx+tWg8uG755OM+BYhqzH8thXyaCdpYloU6Z1q21FhoarwShNGhz0kCewkkCUwS2PnzpMFUoYupl+biE/0QkZhM38nF1+gIf3OUx2REDO5vG5rRI9WqupKZTWOkevNDJyE+MQ9kG6CAyAAMdNLnuhj44m/Rc/BosQvHic2KppCJyGTf8yGFzG3i1jVVZsdnE3gviXflOnrT1+CtBuxbWofQ+3XoaJqds80hQ3qJvPMJxKrqAkJMjWGYaLGtJfar06OuuM7mPA1DIA6O+iQvCaantywdZA0wvUov2GJl749IsPTck+lIdFPjfMQgTNPIMESurvbcXF8xTiM3N3uurnbspomd6w0Ng5XjP9DX8UcM5nCaAGEnoss2NT347e7H93F2Z6dXrnma5QHK1ec3YT3XW6DgztWDlBh0h+c8jd4bhK6VP295y2zH0DlEdk12X8bm8bM99v209Na2z09iwtlY9HXtwdsPjqNf91ZAasdh4pBbGrHvc/CSf8GoLDGZUxJaJIrJnkg0rX9RoayZbrtfFUvxz7UxO32hqnr66eyhW0pJzqYy6c6vVwEWP7zSK7nUhGBLgNiUpTXmUkHEyuV1G+Lg7SOamtabTbtewdUvHP9jSI9XhZ/tpzWJfgKk5+bZNae5UNtMyJUm2VCMxfpJ1SamMbALRAYkYXLnu4GYAtO04yrtrUw5p7V0LrkGhFj6cb0aAtEWWZR9HAih0LSRmwkltSVzev/E3f3CsEs8Czt2aWJqgXGJDHNg0oGb8ZqJgaVUDncLOS+uv5KICPsivJRIiZVFM6c202hO1FaIrio92s2TrgfSNNjALPcsJ18Ac6LFiAYhjpE2WIls7GqoNVJPytwybVGuxyvSi4mihUMbWdrCGEY4CUur6yLxaZvpgAhLVQ6nTG3KbrdwPGViTAyDNe/sOfqUfIGIkWmavGrLy9SbUvY76n5PlsB9VQ6399Q5w2GBQ2GdxYyoheSFVDLWTdeiNhXYSbDrR4WxFeRUqLPw+nhCgpiYlXeHpym7qsRmkOce4+Vch8jLwZzccYjskl17122gHWBpjUUapIYMcJ0GrnbX1jQzRFpdrOKoyor2bH9dl6cUbyRrjuKSC3f3R97/4Ja7+wOH08kgeuQsrfD0JpKQdIMwEmJDQl3X6J6WgkAQQ1NCiIQ4+CJ3xumJO1R2K6qj2w90F8qvoz45FQjVvcOMd6kzUcs+aVVdX9+cxuaig/iMvNiehkdVLb7vJhoZ7b4ME2m4dnTrTCtm1UrXbYHHBFRDz6ywSQ/IA0cgeQqCdaGUkEjpihBGYKS3xHhKk1qZhsTL59ekFHn27Ib3vv5VdruJ6+srXr58wTgO7HfX3NxcGd8nJKIvHiEKUaMp2KdAaoOdEd2cgS0mcx5cW/2X1d+xBbK6I9zO0qH4tRDoMiCbIyWYMvCmqt0d3H5eAdNnycbjWRZrs5GztdroCPfjmO9h6mpzFPqiufbIWxFHK9DvkiIAahoarmMV34oH8u2acbBGrLJmuy4fkJTPzovJJlgPrpjsOtyQKGvym2I6c6a8wCDZNRxQxpvEqMoYA3EXmFsmq1AwQWAVocZKk8K8ZL55yhxmE/M8a1xAcZ5MBbL67dsHU/pa7fwtbeDczZwbc7bng1owGiRQT5mlO0ZeWdZUIQppsMrYuWZKXuzYUjQ0qGnPolk4Iiag2WDl4S4Kp1w5vYUe2jsTmafdhBI5zQZNR4FQC1IsGmvKVgaZAqEJcRlIy0hsgSFNjLo3cmNRWvYdiR26PJ+UbBKCiARllIgEq/4wdKlAUZYDHF4XdiXw/EVi0JFBA7FGYhGGMDHpNTtGtM7U00Keq6FRKVjapgnXRKoosywIgSrVOr2mM6EzzyOHKRHTYMJsrSAuzKY10mJXNfb0SDRCWYwRaYG2KKUVtAZ2aWS8CpRWkAypRKIkNJug1BO2hTFVz8oqnDfnsqrv5uKdymMj+gRkUXFkSIOP1eb0jMPIOIxIsdltPs6U0wy3C3o3g5qicnBim3VbblhjR0gDnmqwG0mroidTwm5Nmb1ppeWX2zqBDtK7Y9sEjAg3IfIyJsYQGWJkiiMhBoaWaAtGlB+VulgEtNNIGnZW2hwEWjZnxUusVQVtCdXgJfx17YFTa1sjjJOXqh+82sZIe7IJwD21iQIRkT2B5KCNL/5rLg5Eoj8ECcMqSMi68MuKFnVEp0fyD9eH7vJgCyMmLocuhpSq0jSjWv25OTy9ggXd9EEMtt56mgWJxNTTqBsvR3FEB0VCsB554OhIj4ZZVzjplWOwVnTZ5tvCup4+EcSJ1ObwuAMo0ZAeiVhJ/VN23mI9njFFXjy7YdqNvHjxjK+/9xX2+x37/Y5nz64tVT7u2e3GtQqoI2IhipVs+1FoTxXWTc7/fDRXJ4czUPIc6Xmg0NJTNXZdNDaNrd7wsY8b/Ss+hPS4ukXp7Vo8kMhlTad//PmxMYTNCespsK4Z1LxIZvuyfv2HNUUXwsfo5HwaJqzpY/Mh+z4/cnx84z6nxmhq94gSFucBYeK9abB18rwlRIom3xJE2MfALggpCDIEsqeaa4Wo1gs910aVyjxX7pbCIReCwJis0KdJH/GtNURzVLt5jr+3dQn4vVstiMnVECRVNeJ7iCYYWSqlI1cratPPkZOZc6VmS+cFxhVJ7qh6Q1xBxJ0esd8uWFl+Lp8y0hOI7NI1pMIcA5IqOmTyLpHFSKHB031xEtJoN98YIxMDUYWx7Ul1IrSwdncVrBRaXKVSXBTJbiqH3BuWJvPmVYkAodBi5GpaqFeBaTewS1dMYceAWHPTLMBIPUVKDZRTRJcBXbDcZLMyN1ElafNyzcCorJO0YETU4JVfiBAlETTRVJlCZR+tlLCjBUFgIJIwXZvUIrEZGz1IQjRaDzHtGFdgpEAwNdxRJoI+HVnSzGBTa+jZBfW8AqluPaNWsapwJo6FIXIm3qekITHtJgKwv9pzc3NNiYmyKOWYoTWSOI4gnbVvDQ2nKTKMnqsXQ/ZMhK6upEo9y8lb3G4kc10xAFmLkCaMZDvQSASiNkKDiHok49yD0CfpSq0zEAgdA1bxvksJVCgMNB3sXHlFTfMO8b2315I3PsIqmrXO+x+Gtj/dofQJUwJRbIGOwTRrtqi8QxsBu68EJKLu3PSHqhCItDOUxA/i0SHYhGRHF1YkQTX50uhRdneGcCRIQKS549IQDAkMDruj6sKQaYX7N49FNyRiTbPYIid9JnX0oh+PbFDX2Yruej6Pj8fTez0lJyuaYeqnStx4Xk+0WMYgXF9ZCuv6esc0TdZHa0gMydoQBFGCGLBvTqXtUO9bZsdn06i6U2DIiJ3jh+KtZ+nEfnY6yoOPrguPrqmaYNv3FGdY70I5u0gcdekPRzz7Yl97f7ps2jBrhdnZvvX0MeDcGO/n6PyrzXnYrsxzzZu+H/b/szRJT8OprI7wk9i6/7qiM6vT75jpSgJfSbyum71CcqyqzskFNkU6OduDhPUWUayXoyE6CjSMNkIPLsSRmyAUtaBMgyHttacG1fthiTzsh7jeiz1b0HsWuiSA9h5p/bdlDVCr3969TYmdkB68qAc3BqT44DxMh6vNZUXVAld1MUX/26UyPsneLb0VBr66+z5OFNJ8ZAmVTOOVWEQOFlkKIIO4fo8QRnWyEYzLwMhoN1h3etQW0Bh8CpN2NiHZJIRCKmLwXFPG0GhDo0hl+uor5meZmAL7q4EhRSIwzkosoDFyOkzkEE0B+YhBHCHibEZSCCTHvsdWmNqyRgva1WldCdhu8ohUmwhSfM7VbtkiTffXUojWxwbb7zj7hJCxxUSFUEekRapWUthTpPi3J1cqftqosjVlcSnz41w4nBZCCEzTyLJU62UWLApTvEFdP07vgCsCV89u2KVEnRfkl90zHRfK8cQtf5W74wKtEsW1cYJdH5KsAuPqZmLcD3bcGgkElqXyWhdOGAuzVVP0Bq/A9It79JunV+lYpAPPpBqy5PoRQQODVqYohCFQx0DbBRgE0RPLMSOiHOcjupxAIcgOwXRjiNdouLLWGghZxNtNLNwfT9weTnxwe+D913fedqLQGyR+FnwewVROY4ikuKM3lYxxW29k3XKFU1EJD5GoPkFhQcj27f7s0Zyycc7WadHREJuYeqqpT4OrqrecuUV9UdBt0esOHD3F5OP84SnNF9vHDtnZwnv+hp4vfL68nn/OUiFnC7g/a6scR6R5hZu+YW8+DRuHge//vu9hf7Xj1atn1jB0N3JzMzEMyUQWYzMagC4u+hiARBcBFB/7FWFwp8dGxLVVHN3rPL1OdD93NHr1UwhCSoAYMh5iXNElS/d1JMP5OKv8geK67AjG2eipzXnO3N0dyF6RV3JvFLuNdScC22u2wEvr5OnuNJxxDnt6y8etn4t+XH2b7b23Wyi/XVOUXPIqeBlCoNZCrRYYpTSQhtGqetPgZH/TF8rZCoFQGCerbB6nxG43gEAp2ZENtVRdcBKzqCVCxFJaSzO9LZqlIhvKEqAIlBrIcaBN1lC0aUFobKR+dQ6NUQfoislifNpSrHhBa6FVQ3d75SZin+uRX/G5WHytT9GuvRRhFOfzlWJ8TbX4qHrwWFduJHQas7VAMWmZWpuVzIdPdmnezemRyHV6SUyFOkzMraA7K3HTejYFiPNfBvzaN/43CrFGhmzVUD1h6LGaT27uHa1hpUPMKgR10rTa5KVRGUUZQqXurAN01ycSgZAbUhsQyESqCKUMllJrvbO5p2lcnwURIpWkO78IOm0aJyIHj21kXRSi7BiHug24311RrL2CgKH/xvx2J8oapCYdLarWRpBEodj++Cc3HsYTmNhNWX1SyavAnqW4OqG5OsnQKgjOFvBgabsgQpomq3gaB+rzF/CVV+T7I/qNDyiDpauS2JIhAWQSZIA0RK6uBqYrgzI7GhZj5HBsSL/vpfRRcDfQIPbgE1cSYQjm/EwCE0rqHdybIUMJq+rqCFMbxNYJCjXPoI18uCUfbu1alWuCXBnikCwVqhIoMVJDNJQnW++weTFBwuNpJrtGz4oGdPLBE5qlX8VlI4aNe3EGcvTdWf/n0dobF++P8tM+9Ppjl2F73nGCXk5sE5augXbHJOTRF29VLY8j9sf7ceYIvWkb+fD+rX/lbJP1vfNtHzey6cfUF/Onsxgjr14+Y3+14+WLZ4yTtZPYeXPH+ADpKWjL5rg6oVTcie08ly1Kx8p/xdE2NjXflXxO1/TR1YFZEaIzJ7CjEudVXJaidARPHjoXq3bOmW6QNbvMVqW2WMl0j+xXdFJt245s9P0NzvvT9Sb7sPV92NKecnbd9EvmaZ0elNVp69VmeoaMWNm8Ec2jO5K9UsscQDvmFC24HMeBYRx8POrKFzSkR50Pa4G6EX2Tty0J65h2DkwRDOkJ0YT91JwxuoPRNffAkSMPQtzpaeptUVq1AoLSa6rsrhSxdk/q/zdExs772AEEEYYoDAFiFZZocja9gqwXgrQePnU02JH/2roCtGkFvY29Y/VWZD8+QzSTx0ikoAla6GNzNun17pC+4qsrEocQjYiFH5kDGcbz6FHgyhWn3zCWm/Xo073APoijE9YkmKPXnR7pni/W70pESKERg28vsgrQhRAIyeD06t3FVdVym10B89zhifYAqNEEtdbeNj6wRpx0otequ4DlOUVtcddE1EjDOvBWqrk6EtcKqaczdR6WQlNv8GeOTxcr7ByA5ukH9edt5VBsYLSEno8OpBghRaYpst8ltAlDUkceFEyclxCtoiY4ubD6xJTB0BQRg2FFyFgfrz6FQkd9lBQC0xCJIbBLkd1gpM7WTKFUpRGTMO0ScT9Qd4E6muOMzmiZUa20ckTLDCo0seZ3NCVIIQSvKgjBxTKNwDzPi5Mwy8pP6L7h2VL+0Y7EpzacvlC1ZqrTKNp6VvzBhqyLuqy6uW+3i2fAyeP1Rs6uhfMv1Edf/OGtHv/q2WLW8zIfOtZ1dzbn6g37+titefMR6qO/8hHPARoqXs79RAMqYuXIyQMym/sssPR41q/DjnLY3Lqt5wIrp6anGez4qveD678jIoirWHcUrFdU9VOisPYVXNMWupW3b7ZxTDYVZKWXrXcHoJRqMgPO4SmlrGXYHX1Zkb0VpdkcMVCaBncahBA2gnW/dlS7yKof+fl1Kt1V/gTH+lMw2w9jVj28jP23z9J/6o6E4GPgx7uS77sTqOrgsawSED11Z85tcIFfS191VLxTFRqNuVUyQtNqBRkbsc5P1nbfGS9x2+tzP1HEqm/XXpb9syIPNu6OT39eVREPpEWBYF3oi6er+qOu7Hf/JlG0mkgmDVrFiqfaduyfZO/k9Axx5L3nfyOnObOTI3kpayC3xQBnU4uII1uVjt70clkBNgVn/0SPVM5u04702Ca9nJT1MzY31vVmEzk7xWcw5zmq/aA52TqJb9urbMyVpkLTTnzbLlTp0QznN6P9gPbzcVYOeT7/Kp4yU8tNi9py0aTYpCbBc7iBIY1vP0DfhinGSxGB05K5O1rX7N00cFqyRR8pMhUTXzRHyIlmrVLVYt8YXJRRlXEcuNqNFK3wfGL82g5ojFeRYRdN6JFmMDt2/IJ1S1+aOTwn4C4G7pM1Zj0Fc4gisAMTHmSjle7HyMtnO4YhMY3D2v33eJy5vTvQtDFdRb7y1SuGmx35ppGfN1qoHO9eczx8k1YL9Zipp+x8AStZljAQdUdkTwtKIVCjpQXv7o988/07bm/vubuzrurVG+NtA/7U3k5fZEybJri20lpmvI612dkV+eCVdSO7Od+4pD9eGtb77iO2lA8d+4MZ4i3chkeO9Yd/4Q2L77kbI48/8OGNHnz28Ubd6emTryBUX4CeZlxjCNzsr62hs5gEBw1qrlZRGBot23wVkhK8CKD2KkMsQO19iHRdME1PrJfzh2iidjb3VTqq3GpbScCrAxJ8IcLnt2aBxJpm8u/vfJW6cgPrWllnjWSriUuWwuFw4nA4kfNCrdUQ7nDu9Jidc3pUe6NORVuXWmANYkNoa7qrefGD9HE7c4yeXHbgbN9Nc8jFFMVSpSrumIZIb2ysmCYVIpRWvU2LkqSn88SKJoo6UhqIcbBzoZ42UWhE7ykJJ7Vu6qpWbNFaobbAqSq5mYhllewdEIySTidb9zVPoKOCQTxodRQ5xM7vsXHpxSS9CKDzigRxJMbW/FzUpWFgwVhz2qwHW662v6bgbL8XvRuCdQc4om2haaDUyNICaEB1XK/5j7N3IzJL5Gp6QSSjeSTHjOBlbm8acPp8v8F0Zt3ZOXMZ14nl/MH2vufTN5fP35X+/f6Z9UcdD9CHN88DvYbtjUd7ve2a5UDDWv1wPsmamNfbTHwPJ149O8bzti9NzHkLslUhxPB0nJ6+7z1y6P2jYggspXg5dnPYuTf+U0d6zkTHRLYJMkRiigwpEoZI2yW4TgjK9HxguLaOV7kWqlbLyy5q+k7N2PgZJQOLGNxZayCLOT2GvvUFxwnNAkMM7MeBaRoYp4H9zY4YgzleB9CmpEHY7wfGq5G8zyy7SpVGvj+h+bXnkx2m1YB2vZugSCqIk6pbU5pYWtC6uM+cTovdsM7laes1+NnZqnzbTKhzRVkeb+d/5bw55ANERN/08oPPfpSTcW5yJuz48Nsfoy8fZfqh/X+Mv8iDueIx7nD+f3nDszf/5ptfOfumVZPoaZweEWEczWnvHELTQetoia4BVUOIYsTI2ow3YUfohPw+b5lvQNPu9FgaH1foZuVVccbbs5JuwQjRptliC1+zL/NzYXNhDyh1FdFrKwJuukyGehRHd/JSyE7+7zv5mHzM2bnv6TY5Q817xVb/rEZWFfsVecdSv+u18+g3nvouXREWP19rAN0jYXeAbJidq9KakXNRS+P5HLsiPX4c5ox4YNDvN7E1S8SchKJ4RXnvVK6cspKrrTcanFgrvZmErmtox2YC3dHqtqUwV2dyPa/nhQfr1g/+9upNdFtH1XutnaM8TU3LLYnNJ+L8IeoCKtQ6UtX4bLLu8cfbOzk9H/6+j4iizufMcxLnO/oHHx2AvXmj9dXz0ZHNp3mofvnJ9nHx35tMPup/jwLpN3/ioyLmJ7RP88c+2/X9s/+9X6p9Bryej/lxvvtO2He2PZlo6HepfTeeje+kO+JzmxreYN/Ns4XKJzs+8i43r4j8IvCXf4n7dbF3s79JVd/7tL/0Mpafm13G84tjl7H8YtmnPp6Xsfzc7CPH8p2cnotd7GIXu9jFLnax71b7bNhcF7vYxS52sYtd7GKfs12cnotd7GIXu9jFLvalsC+E0yMi/7yI/C8i8mOf975c7NM1EfmDIvJ7P+/9uNjbmYi8FJHf/Sl9168Xkf/q0/iui/3STUT+ZhH5n9/w+n8iIr/yLT7/O0TkDz/N3l3sl2Jfpnn2C+H0AL8b+IdV9bf1F8Q6lV7sYhf7bO0ldj8+sMv9+MU1Vf2nVPXnHr8u8haiKRe72Gds3/VOj4j8CPC3AP+1iHwgIj8qIj8F/KhHJv+9iPxFEfnvROT7/TM/ICJ/VkT+koj8ayJy97kexMUemIj8ARH530XkTwO/3F/7QR+zvygiPy4ir/z1X+2v/QUR+TffFIle7DO1fwP4AR+PPycif0pEfgL4ucdIgYj8XhH5g/78bxWR/1ZEflZE/ryI/MD5l/o4/0+PX7/YZ25JRH7MkfX/QkSuRORPisjfAyAidyLyb4nIzwI/JCK/0+/lnwH+3s931y92bl/Wefa73ulR1X8G+H+BfxD4t4FfCfxDqvpbgX8X+KOq+ncAPwb8O/6xPwT8IVX924G/8tnv9cU+ykTkVwE/DPwg8I8Av9rf+s+Af9HH8i8B/7K//keAf1pVf5DekfBin6f9fuDnfTx+H/B3A79HVf+2T/jcjwH/nqr+ncCvBX6hvyEivxb4EeA3q+rPP81uX+wt7ZcD/76q/grgNR9G9a6Bn/Zx/HngX8Gcnb8Pm5sv9h1gX+Z59rve6XmD/YSqHv35DwF/3J//KHbj9df/c3/+x7nYd5L9OuDHVfWgqq+Bn8Am0peq+pO+zR8F/n4ReQk8U9U/469fxvI7z35GVf/Pj9tARJ4B36eqPw6gqidVPfjbvwL4j4DfpKr/19Pu6sXewv5vVf0pf/7H2ObUbhX4L/35rwH+pKr+oqouwJ/4jPbxYp9sX9p59ovo9Nx/3jtwsYtdbLXz+7HwcM7ZvcXnfwFrxfZ3fZo7dbFv2x4Luz3+/0lVv6uRgIt9se2L6PSc2/+IQXgAvw34U/78zwL/qD//4ccfutjnav8D8FtEZO8IwG/CFs5viciv821+O/CTqvo+cCsiv8Zfv4zl52+3wLOPeO+vAV8Xka+KyAT8RgBVvQX+ioj8FgARmUTkyj/zPvAbgH9dRH79k+75xd7Gvl9Efsif/+PAn/6YbX8a+Ad8vAfgH3vyvbvY29qXdp79oldU/HPAHxGR3wf8IvA7/fV/AfhjIvIHgP8G+OBz2r+LPTJV/fMi8ieAnwX+OvDn/K1/AvgRXwz/D7ax/CeB/1hEGvCTXMbyczVV/YaI/JQTHY+Yo9PfyyLyrwI/A/w/wP969tHfDvyH/n7mbIFU1b8mIr8RK1b4Xar605/FsVzsjfa/Af+siPynwM8B/wG2YH7IVPUXnKj+ZzDn9S98Vjt5sY+3L/M8+6VsQ+EDelRVFZEfBn6rqv7mz3u/LvbuJiI3qnrnz38/8L2q+ns+59262MUudrEvjH2R5tkvOtLzUfargD8sIoJFIL/rc96fi3379htE5F/CruW/DPyOz3d3Lnaxi13sC2dfmHn2S4n0XOxiF7vYxS52sS+ffdGJzBe72MUudrGLXexiwMXpudjFLnaxi13sYl8Suzg9F7vYxS52sYtd7EthF6fnYhe72MUudrGLfSns4vRc7GIXu9jFLnaxL4VdnJ6LXexiF7vYxS72pbD/H1JWeOz8Get9AAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n", + "\n", + "plt.figure(figsize=(10,10))\n", + "sample_idxs = np.random.choice(50000, size=25, replace=False)\n", + "\n", + "for i in range(25):\n", + " plt.subplot(5, 5, i+1)\n", + " plt.xticks([])\n", + " plt.yticks([])\n", + " plt.imshow(train_images[sample_idxs[i]], cmap=plt.cm.binary)\n", + " plt.xlabel(class_names[train_labels[sample_idxs[i]][0]])\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 组建网络\n", + "\n", + "接下来我们使用飞桨定义一个使用了三个二维卷积(`Conv2d`)且每次卷积之后使用`relu`激活函数,两个二维池化层(`MaxPool2d`),和两个线性变换层组成的分类网络,来把一个`(32, 32, 3)`形状的图片通过卷积神经网络映射为10个输出,这对应着10个分类的类别。" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "class MyNet(paddle.nn.Layer):\n", + " def __init__(self, num_classes=1):\n", + " super(MyNet, self).__init__()\n", + "\n", + " self.conv1 = paddle.nn.Conv2d(in_channels=3, out_channels=32, kernel_size=(3, 3))\n", + " self.pool1 = paddle.nn.MaxPool2d(kernel_size=2, stride=2)\n", + " \n", + " self.conv2 = paddle.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3,3))\n", + " self.pool2 = paddle.nn.MaxPool2d(kernel_size=2, stride=2) \n", + " \n", + " self.conv3 = paddle.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3,3))\n", + "\n", + " self.flatten = paddle.nn.Flatten()\n", + " \n", + " self.linear1 = paddle.nn.Linear(in_features=1024, out_features=64)\n", + " self.linear2 = paddle.nn.Linear(in_features=64, out_features=num_classes)\n", + " \n", + " def forward(self, x):\n", + " x = self.conv1(x)\n", + " x = F.relu(x)\n", + " x = self.pool1(x)\n", + " \n", + " x = self.conv2(x)\n", + " x = F.relu(x)\n", + " x = self.pool2(x)\n", + " \n", + " x = self.conv3(x)\n", + " x = F.relu(x)\n", + "\n", + " x = self.flatten(x)\n", + " x = self.linear1(x)\n", + " x = F.relu(x)\n", + " x = self.linear2(x)\n", + " return x" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 模型训练\n", + "\n", + "接下来,我们用一个循环来进行模型的训练,我们将会:\n", + "- 使用`paddle.optimizer.Adam`优化器来进行优化。\n", + "- 使用`F.softmax_with_cross_entropy`来计算损失值。\n", + "- 使用`paddle.io.DataLoader`来加载数据并组建batch。\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "epoch_num = 10\n", + "batch_size = 32\n", + "learning_rate = 0.001" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "start training ... \n", + "epoch: 0, batch_id: 0, loss is: [2.331658]\n", + "epoch: 0, batch_id: 1000, loss is: [1.6067888]\n", + "[validation] accuracy/loss: 0.5676916837692261/1.2106356620788574\n", + "epoch: 1, batch_id: 0, loss is: [1.1509854]\n", + "epoch: 1, batch_id: 1000, loss is: [1.3777964]\n", + "[validation] accuracy/loss: 0.5818690061569214/1.1748384237289429\n", + "epoch: 2, batch_id: 0, loss is: [1.051642]\n", + "epoch: 2, batch_id: 1000, loss is: [1.0261706]\n", + "[validation] accuracy/loss: 0.6607428193092346/0.9685573577880859\n", + "epoch: 3, batch_id: 0, loss is: [0.8457774]\n", + "epoch: 3, batch_id: 1000, loss is: [0.6820123]\n", + "[validation] accuracy/loss: 0.6822084784507751/0.9241172075271606\n", + "epoch: 4, batch_id: 0, loss is: [0.9059805]\n", + "epoch: 4, batch_id: 1000, loss is: [0.587117]\n", + "[validation] accuracy/loss: 0.7012779712677002/0.8670551180839539\n", + "epoch: 5, batch_id: 0, loss is: [1.0894825]\n", + "epoch: 5, batch_id: 1000, loss is: [0.9055369]\n", + "[validation] accuracy/loss: 0.6954872012138367/0.8820587992668152\n", + "epoch: 6, batch_id: 0, loss is: [0.4162583]\n", + "epoch: 6, batch_id: 1000, loss is: [0.5274862]\n", + "[validation] accuracy/loss: 0.7074680328369141/0.8538646697998047\n", + "epoch: 7, batch_id: 0, loss is: [0.52636147]\n", + "epoch: 7, batch_id: 1000, loss is: [0.70929015]\n", + "[validation] accuracy/loss: 0.7107627987861633/0.8633227944374084\n", + "epoch: 8, batch_id: 0, loss is: [0.57556355]\n", + "epoch: 8, batch_id: 1000, loss is: [0.83717]\n", + "[validation] accuracy/loss: 0.69319087266922/0.903077244758606\n", + "epoch: 9, batch_id: 0, loss is: [0.88774866]\n", + "epoch: 9, batch_id: 1000, loss is: [0.91165334]\n", + "[validation] accuracy/loss: 0.7194488644599915/0.8668457865715027\n" + ] + } + ], + "source": [ + "val_acc_history = []\n", + "val_loss_history = []\n", + "\n", + "def train(model):\n", + " print('start training ... ')\n", + " # turn into training mode\n", + " model.train()\n", + "\n", + " opt = paddle.optimizer.Adam(learning_rate=learning_rate, \n", + " parameters=model.parameters())\n", + "\n", + " train_loader = paddle.io.DataLoader(cifar10_train,\n", + " places=paddle.CPUPlace(), \n", + " shuffle=True, \n", + " batch_size=batch_size)\n", + " \n", + " cifar10_test = paddle.vision.datasets.cifar.Cifar10(mode='test', transform=None)\n", + " valid_loader = paddle.io.DataLoader(cifar10_test, places=paddle.CPUPlace(), batch_size=batch_size)\n", + "\n", + " for epoch in range(epoch_num):\n", + " for batch_id, data in enumerate(train_loader()):\n", + " x_data = paddle.cast(data[0], 'float32')\n", + " x_data = paddle.reshape(x_data, (-1, 3, 32, 32)) / 255.0\n", + " \n", + " y_data = paddle.cast(data[1], 'int64')\n", + " y_data = paddle.reshape(y_data, (-1, 1))\n", + " \n", + " logits = model(x_data)\n", + " loss = F.softmax_with_cross_entropy(logits, y_data)\n", + " avg_loss = paddle.mean(loss)\n", + " \n", + " if batch_id % 1000 == 0:\n", + " print(\"epoch: {}, batch_id: {}, loss is: {}\".format(epoch, batch_id, avg_loss.numpy()))\n", + " avg_loss.backward()\n", + " opt.step()\n", + " opt.clear_grad()\n", + "\n", + " # evaluate model after one epoch\n", + " model.eval()\n", + " accuracies = []\n", + " losses = []\n", + " for batch_id, data in enumerate(valid_loader()): \n", + " x_data = paddle.cast(data[0], 'float32')\n", + " x_data = paddle.reshape(x_data, (-1, 3, 32, 32)) / 255.0\n", + " \n", + " y_data = paddle.cast(data[1], 'int64')\n", + " y_data = paddle.reshape(y_data, (-1, 1)) \n", + " \n", + " logits = model(x_data) \n", + " loss = F.softmax_with_cross_entropy(logits, y_data)\n", + " acc = paddle.metric.accuracy(logits, y_data)\n", + " accuracies.append(np.mean(acc.numpy()))\n", + " losses.append(np.mean(loss.numpy()))\n", + " \n", + " avg_acc, avg_loss = np.mean(accuracies), np.mean(losses)\n", + " print(\"[validation] accuracy/loss: {}/{}\".format(avg_acc, avg_loss))\n", + " val_acc_history.append(avg_acc)\n", + " val_loss_history.append(avg_loss)\n", + " model.train()\n", + "\n", + "model = MyNet(num_classes=10)\n", + "train(model)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEKCAYAAAAIO8L1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+j8jraAAAgAElEQVR4nO3deXyU5dn3/89BSEgIWyCsCbtAABWRCKioKKK0KLQqora1Wi2tt0tt77a39fFR6tL6dLHW1l8f0UerdUHFqqioFcTihibBCkKQfUmAECAEQvbM8ftjhjiEAYJkmCzf9+s1L+da55iRzHeu67zO8zJ3R0REpK5WsS5AREQaJwWEiIhEpIAQEZGIFBAiIhKRAkJERCJSQIiISERRDQgzm2RmX5rZGjO7LcLyPma20Mw+M7OlZvbNsGW/Cm33pZldGM06RUTkYBatfhBmFgesAiYCeUAWcKW7rwhbZxbwmbv/zcyGAfPcvV/o+XPAaKAXMB8Y7O41USlWREQOEs0jiNHAGndf5+6VwGxgap11HOgQet4R2BJ6PhWY7e4V7r4eWBPan4iIHCeto7jvNGBz2HQeMKbOOjOBf5nZzUAycH7YtovrbJtW9wXMbAYwAyA5OXlURkZGgxQuItJS5OTk7HD3rpGWRTMg6uNK4O/u/kczOx34h5mdWN+N3X0WMAsgMzPTs7Ozo1SmiEjzZGYbD7UsmgGRD/QOm04PzQt3HTAJwN0/NrNEILWe24qISBRFsw0iCxhkZv3NLAG4AphbZ51NwAQAMxsKJAKFofWuMLM2ZtYfGAR8GsVaRUSkjqgdQbh7tZndBLwNxAGPu/tyM7sbyHb3ucB/A4+a2U8JNlhf48HLqpab2QvACqAauFFXMImIHF9Ru8z1eFMbhIjI0TOzHHfPjLRMPalFRCQiBYSIiESkgBARkYgUECIiEpECQkREIlJAiIhIRAoIERGJSAEhIiIRKSBERCQiBYSIiESkgBARkYgUECIiEpECQkREIlJAiIhIRAoIERGJSAEhIiIRKSBERCQiBYSIiESkgBARkYgUECIiEpECQkREIlJAiIhIRAoIERGJSAEhIiIRKSBERCQiBYSIiESkgBARkYiiGhBmNsnMvjSzNWZ2W4TlfzKz/4Qeq8xsd9iymrBlc6NZp4iIHKx1tHZsZnHAw8BEIA/IMrO57r5i/zru/tOw9W8GRobtoszdT4lWfSIicnjRPIIYDaxx93XuXgnMBqYeZv0rgeeiWI+IiByFaAZEGrA5bDovNO8gZtYX6A+8GzY70cyyzWyxmX0remWKiEgkUTvFdJSuAOa4e03YvL7unm9mA4B3zWyZu68N38jMZgAzAPr06XP8qhURaQGieQSRD/QOm04PzYvkCuqcXnL3/NB/1wHvcWD7xP51Zrl7prtndu3atSFqFhGRkGgGRBYwyMz6m1kCwRA46GokM8sAUoCPw+almFmb0PNU4ExgRd1tRUQkeqJ2isndq83sJuBtIA543N2Xm9ndQLa77w+LK4DZ7u5hmw8FHjGzAMEQuz/86icREYk+O/B7uenKzMz07OzsWJchItKkmFmOu2dGWqae1CIiEpECQkREIlJAiIhIRAoIERGJSAEhIiIRKSBERCQiBYSIiESkgBARkYgUECIiEpECQkREIlJAiIhIRAoIERGJSAEhIiIRKSBERCQiBYSIiESkgBARacJqAs7Okoqo7FsBISLSRH24ZgcX/eUDbnh6CdG4+VvUbjkqIiLRsbawhN/Oy2V+7nbSU5K48dyBUXkdBYSISBNRtK+SPy9YzdOLN5IYH8dt38jgmjP6kRgfF5XXU0CIiDRyldUBnvp4Aw8tWE1JRTVXjenDrecPJrVdm6i+rgJCRKSRcnfeXr6N3765ko07SzlncFf+1+ShDO7e/ri8vgJCRKQRWpZXzD1vrODT9bsY3L0df7/2NMYP6XZca1BAiEijUVUTYHdpFcVllewurQo+yqrYXVpJcdlX04GAc8Hw7lw4vEfUzr/HytbiMn7/9pf8c0k+XZITuO/bJzI9szet447/RacKCBFpcBXVNRTXfrkHv+CDX+6VtV/yxeHTpVUUl1VRUlF9yH22MuiYFE+ntgmUVdbwxrKttG/TmotG9GJaZjoje3fCzI7ju2xY+yqqeWTROmYtWkvA4YbxA/mv8QNpnxgfs5oUECJSLztLKvhs0252lFQc8ot//6/8sqqaQ+6ndSujU9v42i/7Hh0SGdKjPZ2SEujUNj70SKBTUuh5UgId28bTvk1rWrUKBkAg4Cxev5M52Xm8/Fkez326iQFdk7lsVDqXnppO9w6Jx+tjOWY1AeelJXn84e0v2b63gotH9OKXFw6hd+e2sS4Ni0bniljIzMz07OzsWJch0mwUl1Xx6fpdfLR2Bx+v3cnKbXsPWB4fZwd+kdd53jHsC35/IKQkJ5CcENegv/T3llfx5rJtvJizmawNRbQyOGtQV6ZlpnP+0O6N+hTUR2t2cO8buazYuoeRfTpxx+RhjOqbclxrMLMcd8+MuEwBISIApZXVZG0o4qO1O1i8difL8osJOLRp3YrMfimcMTCVMf0706tTEp3axpMU37Bf9A1hw459vLQkj5dy8thSXE7HpHimjOjFZaPSOTm9Y6Opd11hCb+Zt5L5uQWkdUrif76RwcUn94xJfQoIETlIeVUNn23azcdrd/DR2p18nrebqhqndStjZJ9OnD4wldMHdGFkn06N+ld4JDUB5+O1O3kxZzNvfbGNiuoAg7u347JR6XxrZBrd2sfmFFTdjm7/de5AfnBm/5h+vjELCDObBPwZiAMec/f76yz/E3BuaLIt0M3dO4WWfR+4I7TsXnd/8nCvpYAQObyqmgBL84prAyFnYxEV1QFaGZyU1pHTB6ZyxsAuZPZLoW1C82me3FNexeufb2VOzmaWbNpNXCtj/OCuXDYqnQlDu5PQOvpXB1VWB/jH4o08tGA1e8uruGJ0H356/mC6to9uR7f6iElAmFkcsAqYCOQBWcCV7r7iEOvfDIx09x+YWWcgG8gEHMgBRrl70aFeTwEhDam6JsCCldt5PmszRaWVpKe0JT0libROSaSnJNVON+Zf1jUBJ3frHj4KBULW+l3sqww2Hmf0aM8ZoUAYPaAzHWJ4pczxtLawhDk5efxzSR4FeypIaRvP1FPSuGxUOsN7dWjwUzzuzr9WFPDbebls2FnKWYNSuWPyMIb0OD4d3eojVgFxOjDT3S8MTf8KwN1/e4j1PwLucvd3zOxKYLy7/yi07BHgPXd/7lCvp4CQhrCtuJzZWZuY/elmtu0pp0eHRPqnJpO/u4wtu8uoDhz495Larg1pKftD46vgSO+URFpK0nH9Je7urN5ewkdrgoHwyfpdFJdVATCwazKnD+xS247QJcpDNDR2NQHn/dWFzMnJ418rCqisDpDRo33tKaiGGMLii/xi7nl9BZ+s38Wgbu34X5OHHveObvVxuICI5r/eNGBz2HQeMCbSimbWF+gPvHuYbdMibDcDmAHQp0+fY69YWqRAwPlw7Q6eWbyJd3ILqAk4Zw1K5ddThzMho1ttB6WagFOwp5z83WXkFZWSt6uMvKIy8neXsTy/mH8t30ZVzYEB0iU5IXjkER4eKUmkdQo+T27z9f8E3Z2NO0v5aO3OYMPyup3sKKkEoHfnJC4c3p0zBqZy+sAuTeqyz+MhrpUxfkg3xg/pRnFpFXOXbmFOTh73vpHL/W+u5NyMblw2Kp3zMroRf5Qd1LYVlwc7un2WR+e2Cdz7rRO54rTYdHQ7Vo3lROMVwBx3P/TF0xG4+yxgFgSPIKJRmDRfRfsqmZOTxzOfbGTDzlJS2sZz/bj+XDWmD327JB+0flwro1enJHp1SuK0fp0PWh4IONv3VpC/u5S8orLQI/h85da9zM/dTmV14IBtUtrGRz591Tk4XbeTVP7uMj4OBcLHa3eytbgcgO4d2nDWoK6cPqALpw/s0iiuoW8qOraN53tj+/K9sX1ZVbCXl3Ly+Odn+byzooAuyQlMPSWNaZnpDO3Z4bD7Ka2s5pF/r+ORRWsJBOBHZw/kv84d2KRP30UzIPKB3mHT6aF5kVwB3Fhn2/F1tn2vAWuTFsrdWbKpiGcWb+L1ZVuprA6Q2TeFW88fzKQTj23YhlatjB4dE+nRMZFRfQ9eHgg4O0oqyNt9YHjkFZWxqmAv767cTkWdAOmYFE96ShI9OiSyprCEjTtLAeicnMDpA7owdmAXzhjYhQGpyY3mEs6mbHD39vzqm0P5xYVDWLS6kBez8/jH4g08/uF6hvfqwLRR6Uw5JY3OyQm12wRCHd1+H+roNvnkntw2KaNZhHQ02yBaE2yknkDwCz8LuMrdl9dZLwN4C+jvoWJCjdQ5wKmh1ZYQbKTedajXUxuEHE5JRTWvfJbP04s3snLbXtq1ac23R6bxnbF9yOhx+F+Gx4u7s6Okkryi0tBprK9CZOvucnp3bssZA4NHCEO6t6/tVSzRVbSvkrmfb+HFnM18kb+H+DhjQkZ3pmWmkxgfx2/m5bJ8yx5G9O7EnRcNZVTfg48uG7NYXub6TeBBgpe5Pu7u95nZ3UC2u88NrTMTSHT32+ps+wPg9tDkfe7+xOFeSwEhkeRu3cPTizfyymf57KusYVjPDnx3bF+mntLrmM7/S8uUu3UPc3LyeOWzfHbuC7b3pHVK4peThnDxyb2aZGiro5y0KOVVNcxbtpWnF29kyabdtGndiotO7sV3x/bhlCY+oJs0DlU1ARau3M7OfZV8e2Rao77c+UhidRWTyHG1fsc+nv1kIy/m5LG7tIoBqcncMXkol41Kp1PbhCPvQKSe4uNaccHwHrEuI+oUENKkVdUEWJBbwNOLN/HBmh20bmVcMLw73x3Tl9MHdtHRgsgxUEBIk7S1uIznPt3M81mbKNhTQa+Oifz3xMFMP6033XTNv0iDUEBIkxEIOO+v2cEzizeyYOV2Au6cM7gr932rL+OHdG2SHZFEGjMFhDR6O0sqeDEnj2c/2cSmXaV0SU5gxtkDuGp0n2ZxrblIY6WAkEbJ3cneWMQzizcyb9k2KmsCjO7fmZ9fOIQLh3enTeume9WISFOhgJBGpbomwBvLtjJr0TqWb9lD+zatuWpMH64a04fB3RvPCJgiLYECQhqF0spqXsjazGMfrCevqIyBXZP57SUnMfWUXs3q3gQiTYn+8iSmdpRU8NRHG3hq8UZ2l1aR2TeFuy4OjqLaFHulijQnCgiJiQ079vHYB+t4MTuPypoAE4d250fnDGhy49iINGdHDAgzuxh4w90DR1pX5Eg+37ybWYvW8eYXW2ndqhWXnJrG9WcN4IRu7WJdmojUUZ8jiOnAg2b2EsEB91ZGuSZpZtyd91YV8si/17J43S7aJ7bmx+cM5Joz+qlTm0gjdsSAcPfvmlkH4Erg72bmwBPAc+6+N9oFStNVWR3gtc+3MGvROr4s2EvPjoncMXkoV4zuQzuNpCrS6NXrr9Td95jZHCAJuBX4NvALM3vI3f8SzQKl6SmpqGb2p5v4fx+sZ2txOUO6t+eBy0dw0cm9SGit3s4iTUV92iCmANcCJwBPAaPdfbuZtQVWAAoIAWD7nnKe+GgDTy/eyN7yasYO6MxvLjmJ8YO7atA8kSaoPkcQlwJ/cvdF4TPdvdTMrotOWdKUrC0s4dFF6/jnknyqAwEmndiDGWcP5JTenWJdmogcg/oExExg6/4JM0sCurv7BndfEK3CpPHL2biLR/69jndyC0iIa8Xlp6Vz/bgB9EtNjnVpItIA6hMQLwJnhE3XhOadFpWKpFELBJwFK7fzyL/Xkr2xiI5J8dx87glcfUY/Utu1iXV5ItKA6hMQrd29cv+Eu1eamW7P1cJUVNfw6mdbeGTRWtYW7iOtUxJ3XTyMyzN7697OIs1Uff6yC81sirvPBTCzqcCO6JYljUVxWRXPfrKJJz5cz/a9FQzr2YE/X3EKk0/qqfsviDRz9QmIHwPPmNlfAQM2A1dHtSqJua3FZTzx4Qae/WQTJRXVjDshlT9ePoJxJ6TqiiSRFqI+HeXWAmPNrF1ouiTqVUnM7Cip4P43V/Lqf/IJOEw+qSczzh7AiWkdY12aiBxn9Tp5bGaTgeFA4v5fj+5+dxTrkhi57aVlLFpdyHfG9OW6cf11xzaRFqw+HeX+L9AWOBd4DLgM+DTKdUkMLPxyO/NzC/ifSRncMH5grMsRkRirTyvjGe5+NVDk7r8GTgcGR7csOd4qqmu4+7UVDEhN5gfj+sW6HBFpBOoTEOWh/5aaWS+gCugZvZIkFh7/YAPrd+zjzouH6X7PIgLUrw3iNTPrBPweWAI48GhUq5LjaltxOX95dzUTh3Vn/JBusS5HRBqJwx5BmFkrYIG773b3l4C+QIa731mfnZvZJDP70szWmNlth1jncjNbYWbLzezZsPk1Zvaf0GPuUbwnOUq/mZdLdcD535OHxboUEWlEDnsE4e4BM3sYGBmargAq6rNjM4sDHgYmAnlAlpnNdfcVYesMAn4FnOnuRWYW/vO1zN1POap3I0dt8bqdzP18C7dMGESfLrpiSUS+Up82iAVmdqkdfe+o0cAad18XGqpjNjC1zjo/BB529yIAd99+lK8hx6C6JsDMuctJ65TEDefoqiUROVB9AuJHBAfnqzCzPWa218z21GO7NIK9rvfLC80LNxgYbGYfmtliM5sUtizRzLJD878V6QXMbEZonezCwsJ6lCThnl68kZXb9nLH5KEkJahhWkQOVJ+e1O2j/PqDgPFAOrDIzE5y991AX3fPN7MBwLtmtizUqzu8tlnALIDMzEyPYp3Nzo6SCh54ZxXjTkhl0ok9Yl2OiDRC9ekod3ak+XVvIBRBPtA7bDo9NC9cHvCJu1cB681sFcHAyHL3/NDrrDOz9wi2g6xFGsTv3/qS0soaZk4ZprGVRCSi+lzm+ouw54kE2xZygPOOsF0WMMjM+hMMhiuAq+qs8wpwJfCEmaUSPOW0zsxSgFJ3rwjNPxP4XT1qlXr4fPNuXsjZzPXj+nNCt2geIIpIU1afU0wXh0+bWW/gwXpsV21mNwFvA3HA4+6+3MzuBrJDw4e/DVxgZisI3ojoF+6+08zOAB4xswDBdpL7w69+kq8vEHDunLuc1HZtuGXCoFiXIyKN2Ne500seMLQ+K7r7PGBenXl3hj134GehR/g6HwEnfY3a5Ajm5OTx+ebdPHD5CNonxse6HBFpxOrTBvEXgr2nIfhr/hSCPaqliSkuq+L/vLWSUX1T+PbIuheUiYgcqD5HENlhz6uB59z9wyjVI1H0p3dWsau0kienjFbDtIgcUX0CYg5Q7u41EOwhbWZt3b00uqVJQ1q5bQ//WLyRq0b30c1/RKRe6tWTGkgKm04C5kenHIkGd+euV5fTPrE1P79gSKzLEZEmoj4BkRh+m9HQcw3a04S8vnQrn6zfxc8vGEJKckKsyxGRJqI+AbHPzE7dP2Fmo4Cy6JUkDWlfRTX3vZHL8F4duHJ0n1iXIyJNSH3aIG4FXjSzLYABPYDpUa1KGszDC9ewbU85f71qJHGt1DAtIvVXn45yWWaWAew/ef1laGgMaeTW79jHY++v55KRaWT26xzrckSkiTniKSYzuxFIdvcv3P0LoJ2Z/Vf0S5Njdfdry0lo3YrbvpER61JEpAmqTxvED0OjqwIQunfDD6NXkjSEBbkFLPyykJ9MGES3DomxLkdEmqD6BERc+M2CQneK06UwjVh5VQ2/fm0FJ3RrxzVn9ot1OSLSRNWnkfot4HkzeyQ0/SPgzeiVJMfqsffXsWlXKU9fN4b4uPr8BhAROVh9AuJ/gBnAj0PTSwleySSNUP7uMv66cA2Thvdg3KDUWJcjIk3YEX9eunsA+ATYQPBeEOcBudEtS76u37yRizvccVG9BtwVETmkQx5BmNlggjfzuRLYATwP4O7nHp/S5Gh9uGYHbyzbyk/PH0x6ijq7i8ixOdwpppXA+8BF7r4GwMx+elyqkqNWVRNg5tzl9O6cxI/OGRDrckSkGTjcKaZLgK3AQjN71MwmEOxJLY3QUx9vZPX2Ev735GEkxsfFuhwRaQYOGRDu/oq7XwFkAAsJDrnRzcz+ZmYXHK8C5cgK91bw4DurOGdwVyYO6x7rckSkmahPI/U+d382dG/qdOAzglc2SSPxf95aSXl1DXddPEw3AhKRBnNUF8m7e5G7z3L3CdEqSI5OzsYi5uTkcd24AQzo2i7W5YhIM6JeVE1YTcCZOXc53Tu04ebzToh1OSLSzCggmrDnszazLL+Y2785lOQ29enzKCJSfwqIJmp3aSW/f3slo/t1ZsqIXrEuR0SaIQVEE/XHf62iuKyKmVOGq2FaRKJCAdEELd9SzDOfbOR7Y/syrFeHWJcjIs2UAqKJcQ82THdqm8DPJg458gYiIl+TAqKJefU/W8jaUMQvLxxCx7bxsS5HRJqxqAaEmU0ysy/NbI2Z3XaIdS43sxVmttzMng2b/30zWx16fD+adTYVJRXV/GZeLiend+TyzN6xLkdEmrmoXRsZuvPcw8BEIA/IMrO57r4ibJ1BwK+AM929yMy6heZ3Bu4CMgEHckLbFkWr3qbgLwtWs31vBY98bxStWqlhWkSiK5pHEKOBNe6+zt0rgdnA1Drr/BB4eP8Xv7tvD82/EHjH3XeFlr0DTIpirY3emu0lPP7heqaNSmdkn5RYlyMiLUA0AyIN2Bw2nReaF24wMNjMPjSzxWY26Si2xcxmmFm2mWUXFhY2YOmNi7vz69eWk9g6jl9Oyoh1OSLSQsS6kbo1MAgYT/DGRI+aWaf6bhwaFyrT3TO7du0apRJj718rCnh/9Q5+OnEwXdu3iXU5ItJCRDMg8oHwltT00LxwecBcd69y9/XAKoKBUZ9tW4TyqhrueX0Fg7u343un9411OSLSgkQzILKAQWbW38wSgCuAuXXWeYXg0QNmlkrwlNM64G3gAjNLMbMU4ILQvBbn//57LXlFZcycMpz4uFgf8IlISxK1q5jcvdrMbiL4xR4HPO7uy83sbiDb3efyVRCsAGqAX7j7TgAzu4dgyADc7e67olVrY7V5Vyl/e28tk0/uyRkDU2Ndjoi0MObusa6hQWRmZnp2dnasy2hQP/pHNotW7WDBf59Dr05JsS5HRJohM8tx98xIy3TOopFatKqQt5cXcNN5JygcRCQmFBCNUGV1gJmvLadvl7Zcf1b/WJcjIi2UAqIR+vtH61lXuI+7Lh5Gm9ZxsS5HRFooBUQjU7CnnD/PX815Gd04L6N7rMsRkRZMAdHI3P/mSqpqnDsvGhbrUkSkhVNANCJZG3bx8mf5/PDs/vRLTY51OSLSwikgGom95VXc9epyenZM5MZzT4h1OSIi0esoJ0eWv7uMBbkFvLOigMXrdlJV4/x/3zmVtgn63yIisadvouMoEHC+2FLM/BUFvJO7ndytewAYkJrMtWf258LhPRjVV0N5i0jjoICIsvKqGj5eu5N3cgtYkFtAwZ4KWhlk9u3M7d/MYMLQ7gzs2i7WZYqIHEQBEQU7Sip4d+V25oeG6S6rqiE5IY6zB3fl/KHdOTejG52TE2JdpojIYSkgGoC7s2Z7Ce/kFjB/RQGfbd6NO/TqmMhlo9I5f1h3xg7orE5vItKkKCC+pqqaANkbipifW8D83AI27iwF4KS0jtw6YTDnD+vGsJ4dMNO9o0WkaVJAHIU95VX8+8tC5ucWsHDldvaUV5PQuhVnDuzCjLMHMCGjOz06Jsa6TBGRBqGAOILNu0prjxI+WbeL6oDTJTmBC4b34Pyh3TlrUCrJbfQxikjzo2+2OgIBZ2l+8FLU+bkFrNy2F4ATurXj+rMGMHFYN07pnUJcK506EpHmTQEBlFXW8OGaHczPLWDByu0U7q0grpVxWr8U7pg8lPOHdtfQFyLS4rT4gNi8q5SJf/o35VUB2rdpzTlDujJxWHfOGdyVTm11KaqItFwtPiDSU5L4wZn9OWNgKqP7dyahtYanEhEBBQRmxi8nZcS6DBGRRkc/l0VEJCIFhIiIRKSAEBGRiBQQIiISkQJCREQiUkCIiEhECggREYkoqgFhZpPM7EszW2Nmt0VYfo2ZFZrZf0KP68OW1YTNnxvNOkVE5GBR6yhnZnHAw8BEIA/IMrO57r6izqrPu/tNEXZR5u6nRKs+ERE5vGgeQYwG1rj7OnevBGYDU6P4eiIi0oCiGRBpwOaw6bzQvLouNbOlZjbHzHqHzU80s2wzW2xm34pinSIiEkGsG6lfA/q5+8nAO8CTYcv6unsmcBXwoJkNrLuxmc0IhUh2YWHh8alYRKSFiGZA5APhRwTpoXm13H2nu1eEJh8DRoUtyw/9dx3wHjCy7gu4+yx3z3T3zK5duzZs9SIiLVw0AyILGGRm/c0sAbgCOOBqJDPrGTY5BcgNzU8xszah56nAmUDdxm0REYmiqF3F5O7VZnYT8DYQBzzu7svN7G4g293nAreY2RSgGtgFXBPafCjwiJkFCIbY/RGufhIRkSgyd491DQ0iMzPTs7OzY12GiEiTYmY5ofbeg8S6kVpERBopBYSIiESkgBARkYgUECIiEpECQkREIlJAiIhIRAoIERGJSAEhIiIRRa0ntYjEVlVVFXl5eZSXl8e6FGkEEhMTSU9PJz4+vt7bKCBEmqm8vDzat29Pv379MLNYlyMx5O7s3LmTvLw8+vfvX+/tdIpJpJkqLy+nS5cuCgfBzOjSpctRH00qIESaMYWD7Pd1/i0oIEREJCIFhIg0Gu3atQNgy5YtXHbZZRHXGT9+PEcaufnBBx+ktLS0dvqb3/wmu3fvbrhCWwgFhIg0Or169WLOnDlfe/u6ATFv3jw6derUEKUdF+5OIBCIdRm6ikmkJfj1a8tZsWVPg+5zWK8O3HXx8EMuv+222+jduzc33ngjADNnzqRdu3b8+Mc/ZurUqRQVFVFVVcW9997L1KlTD9h2w4YNXHTRRXzxxReUlZVx7bXX8vnnn5ORkUFZWVntejfccANZWVmUlZVx2WWX8etf/5qHHnqILVu2cO6555KamsrChQvp168f2dnZpKam8sADD/D4448DcP3113PrrbeyYcMGvqUodbgAAArnSURBVPGNbzBu3Dg++ugj0tLSePXVV0lKSjqgrtdee417772XyspKunTpwjPPPEP37t0pKSnh5ptvJjs7GzPjrrvu4tJLL+Wtt97i9ttvp6amhtTUVBYsWFD7Ofz85z8H4MQTT+T1118H4MILL2TMmDHk5OQwb9487r///oPeH0BWVhY/+clP2LdvH23atGHBggVMnjyZhx56iFNOOQWAcePG8fDDDzNixIiv/f9YASEiUTF9+nRuvfXW2oB44YUXePvtt0lMTOTll1+mQ4cO7Nixg7FjxzJlypRDNqL+7W9/o23btuTm5rJ06VJOPfXU2mX33XcfnTt3pqamhgkTJrB06VJuueUWHnjgARYuXEhqauoB+8rJyeGJJ57gk08+wd0ZM2YM55xzDikpKaxevZrnnnuORx99lMsvv5yXXnqJ7373uwdsP27cOBYvXoyZ8dhjj/G73/2OP/7xj9xzzz107NiRZcuWAVBUVERhYSE//OEPWbRoEf3792fXrl1H/MxWr17Nk08+ydixYw/5/jIyMpg+fTrPP/88p512Gnv27CEpKYnrrruOv//97zz44IOsWrWK8vLyYwoHUECItAiH+6UfLSNHjmT79u1s2bKFwsJCUlJS6N27N1VVVdx+++0sWrSIVq1akZ+fT0FBAT169Ii4n0WLFnHLLbcAcPLJJ3PyySfXLnvhhReYNWsW1dXVbN26lRUrVhywvK4PPviAb3/72yQnJwNwySWX8P777zNlyhT69+9f++t71KhRbNiw4aDt8/LymD59Olu3bqWysrK2T8H8+fOZPXt27XopKSm89tprnH322bXrdO7c+YifWd++fWvD4VDvz8zo2bMnp512GgAdOnQAYNq0adxzzz38/ve/5/HHH+eaa6454usdiQJCRKJm2rRpzJkzh23btjF9+nQAnnnmGQoLC8nJySE+Pp5+/fp9rd7e69ev5w9/+ANZWVmkpKRwzTXXHFOv8TZt2tQ+j4uLO+BU1n4333wzP/vZz5gyZQrvvfceM2fOPOrXad269QHtC+E17w8uOPr317ZtWyZOnMirr77KCy+8QE5OzlHXVpcaqUUkaqZPn87s2bOZM2cO06ZNA6C4uJhu3boRHx/PwoUL2bhx42H3cfbZZ/Pss88C8MUXX7B06VIA9uzZQ3JyMh07dqSgoIA333yzdpv27duzd+/eg/Z11lln8corr1BaWsq+fft4+eWXOeuss+r9foqLi0lLSwPgySefrJ0/ceJEHn744drpoqIixo4dy6JFi1i/fj1A7Smmfv36sWTJEgCWLFlSu7yuQ72/IUOGsHXrVrKysgDYu3cv1dXVQLBN5ZZbbuG0004jJSWl3u/rUBQQIhI1w4cPZ+/evaSlpdGzZ08AvvOd75Cdnc1JJ53EU089RUZGxmH3ccMNN1BSUsLQoUO58847GTVqFAAjRoxg5MiRZGRkcNVVV3HmmWfWbjNjxgwmTZrEueeee8C+Tj31VK655hpGjx7NmDFjuP766xk5cmS938/MmTOZNm0ao0aNOqB944477qCoqIgTTzyRESNGsHDhQrp27cqsWbO45JJLGDFiRO0R1KWXXsquXbsYPnw4f/3rXxk8eHDE1zrU+0tISOD555/n5ptvZsSIEUycOLH2yGLUqFF06NCBa6+9tt7v6XDM3RtkR7GWmZnpR7o2WqQlyc3NZejQobEuQ46jLVu2MH78eFauXEmrVgf//o/0b8LMctw9M9L+dAQhItIMPPXUU4wZM4b77rsvYjh8HWqkFhFpBq6++mquvvrqBt2njiBEmrHmcgpZjt3X+beggBBpphITE9m5c6dCQmrvB5GYmHhU2+kUk0gzlZ6eTl5eHoWFhbEuRRqB/XeUOxoKCJFmKj4+/qjuHiZSV1RPMZnZJDP70szWmNltEZZfY2aFZvaf0OP6sGXfN7PVocf3o1mniIgcLGpHEGYWBzwMTATygCwzm+vuK+qs+ry731Rn287AXUAm4EBOaNuiaNUrIiIHiuYRxGhgjbuvc/dKYDYw9Qjb7Hch8I677wqFwjvApCjVKSIiEUSzDSIN2Bw2nQeMibDepWZ2NrAK+Km7bz7Etml1NzSzGcCM0GSJmX15DPWmAjuOYfvmRJ/FgfR5HEifx1eaw2fR91ALYt1I/RrwnLtXmNmPgCeB8+q7sbvPAmY1RCFmln2o7uYtjT6LA+nzOJA+j680988imqeY8oHeYdPpoXm13H2nu1eEJh8DRtV3WxERia5oBkQWMMjM+ptZAnAFMDd8BTPrGTY5BcgNPX8buMDMUswsBbggNE9ERI6TqJ1icvdqM7uJ4Bd7HPC4uy83s7uBbHefC9xiZlOAamAXcE1o211mdg/BkAG4292PfL++Y9Mgp6qaCX0WB9LncSB9Hl9p1p9FsxnuW0REGpbGYhIRkYgUECIiElGLD4gjDQfSkphZbzNbaGYrzGy5mf0k1jXFmpnFmdlnZvZ6rGuJNTPrZGZzzGylmeWa2emxrimWzOynob+TL8zsOTM7uqFSm4AWHRBhw4F8AxgGXGlmw2JbVUxVA//t7sOAscCNLfzzAPgJX11d19L9GXjL3TOAEbTgz8XM0oBbgEx3P5HghThXxLaqhteiA4JjGw6k2XH3re6+JPR8L8EvgIN6sLcUZpYOTCbYR6dFM7OOwNnA/wNw90p33x3bqmKuNZBkZq2BtsCWGNfT4Fp6QNRrSI+WyMz6ASOBT2JbSUw9CPwSCMS6kEagP1AIPBE65faYmSXHuqhYcfd84A/AJmArUOzu/4ptVQ2vpQeERGBm7YCXgFvdfU+s64kFM7sI2O7uObGupZFoDZwK/M3dRwL7gBbbZhfqwDuVYHD2ApLN7LuxrarhtfSA0JAedZhZPMFweMbd/xnremLoTGCKmW0geOrxPDN7OrYlxVQekOfu+48o5xAMjJbqfGC9uxe6exXwT+CMGNfU4Fp6QBxxOJCWxMyM4DnmXHd/INb1xJK7/8rd0929H8F/F++6e7P7hVhf7r4N2GxmQ0KzJgB17+3SkmwCxppZ29DfzQSaYaN9rEdzjalDDQcS47Ji6Uzge8AyM/tPaN7t7j4vhjVJ43Ez8Ezox9Q64NoY1xMz7v6Jmc0BlhC8+u8zmuGwGxpqQ0REImrpp5hEROQQFBAiIhKRAkJERCJSQIiISEQKCBERiUgBIXIUzKzGzP4T9miw3sRm1s/Mvmio/YkcqxbdD0Lkayhz91NiXYTI8aAjCJEGYGYbzOx3ZrbMzD41sxNC8/uZ2btmttTMFphZn9D87mb2spl9HnrsH6YhzsweDd1n4F9mlhSzNyUtngJC5Ogk1TnFND1sWbG7nwT8leBIsAB/AZ5095OBZ4CHQvMfAv7t7iMIjmm0vwf/IOBhdx8O7AYujfL7ETkk9aQWOQpmVuLu7SLM3wCc5+7rQgMebnP3Lma2A+jp7lWh+VvdPdXMCoF0d68I20c/4B13HxSa/h8g3t3vjf47EzmYjiBEGo4f4vnRqAh7XoPaCSWGFBAiDWd62H8/Dj3/iK9uRfkd4P3Q8wXADVB73+uOx6tIkfrSrxORo5MUNtItBO/RvP9S1xQzW0rwKODK0LybCd6F7RcE78i2fwTUnwCzzOw6gkcKNxC8M5lIo6E2CJEGEGqDyHT3HbGuRaSh6BSTiIhEpCMIERGJSEcQIiISkQJCREQiUkCIiEhECggREYlIASEiIhH9/3N9MxXS7cD2AAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plt.plot(val_acc_history, label = 'validation accuracy')\n", + "\n", + "plt.xlabel('Epoch')\n", + "plt.ylabel('Accuracy')\n", + "plt.ylim([0.5, 0.8])\n", + "plt.legend(loc='lower right')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The End\n", + "\n", + "从上面的示例可以看到,在cifar10数据集上,使用简单的卷积神经网络,用飞桨可以达到71%以上的准确率。你也可以通过调整网络结构和参数,达到更好的效果。" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/doc/paddle/tutorial/cv_case/convnet_image_classification/convnet_image_classification.rst b/doc/paddle/tutorial/cv_case/convnet_image_classification/convnet_image_classification.rst new file mode 100644 index 0000000000000000000000000000000000000000..9305d3994fad93d9807e28b81e26088e8c93b314 --- /dev/null +++ b/doc/paddle/tutorial/cv_case/convnet_image_classification/convnet_image_classification.rst @@ -0,0 +1,255 @@ +使用卷积神经网络进行图像分类 +============================ + +本示例教程将会演示如何使用飞桨的卷积神经网络来完成图像分类任务。这是一个较为简单的示例,将会使用一个由三个卷积层组成的网络完成\ `cifar10 `__\ 数据集的图像分类任务。 + +设置环境 +-------- + +我们将使用飞桨框架2.0beta版本。 + +.. code:: ipython3 + + import paddle + import paddle.nn.functional as F + from paddle.vision.transforms import Normalize + import numpy as np + import matplotlib.pyplot as plt + + paddle.disable_static() + print(paddle.__version__) + + +.. parsed-literal:: + + 2.0.0-beta0 + + +加载并浏览数据集 +---------------- + +我们将会使用飞桨提供的API完成数据集的下载并为后续的训练任务准备好数据迭代器。cifar10数据集由60000张大小为32 +\* +32的彩色图片组成,其中有50000张图片组成了训练集,另外10000张图片组成了测试集。这些图片分为10个类别,我们的任务是训练一个模型能够把图片进行正确的分类。 + +.. code:: ipython3 + + cifar10_train = paddle.vision.datasets.cifar.Cifar10(mode='train', transform=None) + + train_images = np.zeros((50000, 32, 32, 3), dtype='float32') + train_labels = np.zeros((50000, 1), dtype='int32') + for i, data in enumerate(cifar10_train): + train_image, train_label = data + train_image = train_image.reshape((3, 32, 32 )).astype('float32') / 255. + train_image = train_image.transpose(1, 2, 0) + train_images[i, :, :, :] = train_image + train_labels[i, 0] = train_label + +浏览数据集 +---------- + +接下来我们从数据集中随机挑选一些图片并显示,从而对数据集有一个直观的了解。 + +.. code:: ipython3 + + class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] + + plt.figure(figsize=(10,10)) + sample_idxs = np.random.choice(50000, size=25, replace=False) + + for i in range(25): + plt.subplot(5, 5, i+1) + plt.xticks([]) + plt.yticks([]) + plt.imshow(train_images[sample_idxs[i]], cmap=plt.cm.binary) + plt.xlabel(class_names[train_labels[sample_idxs[i]][0]]) + plt.show() + + + +.. image:: https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/paddle/tutorial/cv_case/convnet_image_classification/convnet_image_classification_files/convnet_image_classification_001.png?raw=true + + +组建网络 +-------- + +接下来我们使用飞桨定义一个使用了三个二维卷积(\ ``Conv2d``)且每次卷积之后使用\ ``relu``\ 激活函数,两个二维池化层(\ ``MaxPool2d``\ ),和两个线性变换层组成的分类网络,来把一个\ ``(32, 32, 3)``\ 形状的图片通过卷积神经网络映射为10个输出,这对应着10个分类的类别。 + +.. code:: ipython3 + + class MyNet(paddle.nn.Layer): + def __init__(self, num_classes=1): + super(MyNet, self).__init__() + + self.conv1 = paddle.nn.Conv2d(in_channels=3, out_channels=32, kernel_size=(3, 3)) + self.pool1 = paddle.nn.MaxPool2d(kernel_size=2, stride=2) + + self.conv2 = paddle.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3,3)) + self.pool2 = paddle.nn.MaxPool2d(kernel_size=2, stride=2) + + self.conv3 = paddle.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3,3)) + + self.flatten = paddle.nn.Flatten() + + self.linear1 = paddle.nn.Linear(in_features=1024, out_features=64) + self.linear2 = paddle.nn.Linear(in_features=64, out_features=num_classes) + + def forward(self, x): + x = self.conv1(x) + x = F.relu(x) + x = self.pool1(x) + + x = self.conv2(x) + x = F.relu(x) + x = self.pool2(x) + + x = self.conv3(x) + x = F.relu(x) + + x = self.flatten(x) + x = self.linear1(x) + x = F.relu(x) + x = self.linear2(x) + return x + +模型训练 +-------- + +接下来,我们用一个循环来进行模型的训练,我们将会: - +使用\ ``paddle.optimizer.Adam``\ 优化器来进行优化。 - +使用\ ``F.softmax_with_cross_entropy``\ 来计算损失值。 - +使用\ ``paddle.io.DataLoader``\ 来加载数据并组建batch。 + +.. code:: ipython3 + + epoch_num = 10 + batch_size = 32 + learning_rate = 0.001 + +.. code:: ipython3 + + val_acc_history = [] + val_loss_history = [] + + def train(model): + print('start training ... ') + # turn into training mode + model.train() + + opt = paddle.optimizer.Adam(learning_rate=learning_rate, + parameters=model.parameters()) + + train_loader = paddle.io.DataLoader(cifar10_train, + places=paddle.CPUPlace(), + shuffle=True, + batch_size=batch_size) + + cifar10_test = paddle.vision.datasets.cifar.Cifar10(mode='test', transform=None) + valid_loader = paddle.io.DataLoader(cifar10_test, places=paddle.CPUPlace(), batch_size=batch_size) + + for epoch in range(epoch_num): + for batch_id, data in enumerate(train_loader()): + x_data = paddle.cast(data[0], 'float32') + x_data = paddle.reshape(x_data, (-1, 3, 32, 32)) / 255.0 + + y_data = paddle.cast(data[1], 'int64') + y_data = paddle.reshape(y_data, (-1, 1)) + + logits = model(x_data) + loss = F.softmax_with_cross_entropy(logits, y_data) + avg_loss = paddle.mean(loss) + + if batch_id % 1000 == 0: + print("epoch: {}, batch_id: {}, loss is: {}".format(epoch, batch_id, avg_loss.numpy())) + avg_loss.backward() + opt.step() + opt.clear_grad() + + # evaluate model after one epoch + model.eval() + accuracies = [] + losses = [] + for batch_id, data in enumerate(valid_loader()): + x_data = paddle.cast(data[0], 'float32') + x_data = paddle.reshape(x_data, (-1, 3, 32, 32)) / 255.0 + + y_data = paddle.cast(data[1], 'int64') + y_data = paddle.reshape(y_data, (-1, 1)) + + logits = model(x_data) + loss = F.softmax_with_cross_entropy(logits, y_data) + acc = paddle.metric.accuracy(logits, y_data) + accuracies.append(np.mean(acc.numpy())) + losses.append(np.mean(loss.numpy())) + + avg_acc, avg_loss = np.mean(accuracies), np.mean(losses) + print("[validation] accuracy/loss: {}/{}".format(avg_acc, avg_loss)) + val_acc_history.append(avg_acc) + val_loss_history.append(avg_loss) + model.train() + + model = MyNet(num_classes=10) + train(model) + + +.. parsed-literal:: + + start training ... + epoch: 0, batch_id: 0, loss is: [2.331658] + epoch: 0, batch_id: 1000, loss is: [1.6067888] + [validation] accuracy/loss: 0.5676916837692261/1.2106356620788574 + epoch: 1, batch_id: 0, loss is: [1.1509854] + epoch: 1, batch_id: 1000, loss is: [1.3777964] + [validation] accuracy/loss: 0.5818690061569214/1.1748384237289429 + epoch: 2, batch_id: 0, loss is: [1.051642] + epoch: 2, batch_id: 1000, loss is: [1.0261706] + [validation] accuracy/loss: 0.6607428193092346/0.9685573577880859 + epoch: 3, batch_id: 0, loss is: [0.8457774] + epoch: 3, batch_id: 1000, loss is: [0.6820123] + [validation] accuracy/loss: 0.6822084784507751/0.9241172075271606 + epoch: 4, batch_id: 0, loss is: [0.9059805] + epoch: 4, batch_id: 1000, loss is: [0.587117] + [validation] accuracy/loss: 0.7012779712677002/0.8670551180839539 + epoch: 5, batch_id: 0, loss is: [1.0894825] + epoch: 5, batch_id: 1000, loss is: [0.9055369] + [validation] accuracy/loss: 0.6954872012138367/0.8820587992668152 + epoch: 6, batch_id: 0, loss is: [0.4162583] + epoch: 6, batch_id: 1000, loss is: [0.5274862] + [validation] accuracy/loss: 0.7074680328369141/0.8538646697998047 + epoch: 7, batch_id: 0, loss is: [0.52636147] + epoch: 7, batch_id: 1000, loss is: [0.70929015] + [validation] accuracy/loss: 0.7107627987861633/0.8633227944374084 + epoch: 8, batch_id: 0, loss is: [0.57556355] + epoch: 8, batch_id: 1000, loss is: [0.83717] + [validation] accuracy/loss: 0.69319087266922/0.903077244758606 + epoch: 9, batch_id: 0, loss is: [0.88774866] + epoch: 9, batch_id: 1000, loss is: [0.91165334] + [validation] accuracy/loss: 0.7194488644599915/0.8668457865715027 + + +.. code:: ipython3 + + plt.plot(val_acc_history, label = 'validation accuracy') + + plt.xlabel('Epoch') + plt.ylabel('Accuracy') + plt.ylim([0.5, 0.8]) + plt.legend(loc='lower right') + + + + +.. parsed-literal:: + + + + + + +.. image:: https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/paddle/tutorial/cv_case/convnet_image_classification/convnet_image_classification_files/convnet_image_classification_002.png?raw=true + + +The End +------- + +从上面的示例可以看到,在cifar10数据集上,使用简单的卷积神经网络,用飞桨可以达到71%以上的准确率。你也可以通过调整网络结构和参数,达到更好的效果。 diff --git a/doc/paddle/tutorial/cv_case/convnet_image_classification/convnet_image_classification_files/convnet_image_classification_001.png b/doc/paddle/tutorial/cv_case/convnet_image_classification/convnet_image_classification_files/convnet_image_classification_001.png new file mode 100644 index 0000000000000000000000000000000000000000..3db75a95728c4c29509026cf967057b1dca88852 Binary files /dev/null and b/doc/paddle/tutorial/cv_case/convnet_image_classification/convnet_image_classification_files/convnet_image_classification_001.png differ diff --git a/doc/paddle/tutorial/cv_case/convnet_image_classification/convnet_image_classification_files/convnet_image_classification_002.png b/doc/paddle/tutorial/cv_case/convnet_image_classification/convnet_image_classification_files/convnet_image_classification_002.png new file mode 100644 index 0000000000000000000000000000000000000000..488abd9af3265a97637339ee6ebc34fec742105e Binary files /dev/null and b/doc/paddle/tutorial/cv_case/convnet_image_classification/convnet_image_classification_files/convnet_image_classification_002.png differ diff --git a/doc/paddle/tutorial/cv_case/image_search/image_search.ipynb b/doc/paddle/tutorial/cv_case/image_search/image_search.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..d9cdaa7ea2c8fa7d2262b51ab2fcd2d10b0b98ce --- /dev/null +++ b/doc/paddle/tutorial/cv_case/image_search/image_search.ipynb @@ -0,0 +1,650 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "jg3_YfhpC1JG" + }, + "source": [ + "# 基于图片相似度的图片搜索\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 简要介绍\n", + "\n", + "图片搜索是一种有着广泛的应用场景的深度学习技术的应用,目前,无论是工程图纸的检索,还是互联网上相似图片的搜索,都基于深度学习算法能够实现很好的基于给定图片,检索出跟该图片相似的图片的效果。\n", + "\n", + "本示例简要介绍如何通过飞桨开源框架,实现图片搜索的功能。其基本思路是,先将图片使用卷积神经网络转换为高维空间的向量表示,然后计算两张图片的高维空间的向量表示之间的相似程度(本示例中,我们使用余弦相似度)。在模型训练阶段,其训练目标是让同一类别的图片的相似程度尽可能的高,不同类别的图片的相似程度尽可能的低。在模型预测阶段,对于用户上传的一张图片,会计算其与图片库中图片的相似程度,返回给用户按照相似程度由高到低的图片的列表作为检索的结果。\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 环境设置\n", + "\n", + "本示例基于飞桨开源框架2.0版本。" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + }, + "colab_type": "code", + "id": "hhxOPw1SC1JI", + "outputId": "6130c395-d0e3-4566-86c6-fd4de9aed371" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2.0.0-beta0\n" + ] + } + ], + "source": [ + "import paddle\n", + "import paddle.nn.functional as F\n", + "import numpy as np\n", + "import random\n", + "import matplotlib.pyplot as plt\n", + "from PIL import Image\n", + "from collections import defaultdict\n", + "\n", + "paddle.disable_static()\n", + "print(paddle.__version__)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 数据集\n", + "\n", + "本示例采用[CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html)数据集。这是一个经典的数据集,由50000张图片的训练数据,和10000张图片的测试数据组成,其中每张图片是一个RGB的长和宽都为32的图片。使用`paddle.dataset.cifar`可以方便的完成数据的下载工作,把数据归一化到`(0, 1.0)`区间内,并提供迭代器供按顺序访问数据。我们会把训练数据和测试数据分别存放在两个`numpy`数组中,供后面的训练和评估来使用。" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 68 + }, + "colab_type": "code", + "id": "czLF_NIuC1JW", + "outputId": "be9661e0-af81-4137-96fb-1dadb96c0726" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(50000, 3, 32, 32)\n", + "(50000,)\n" + ] + } + ], + "source": [ + "cifar10_train = paddle.vision.datasets.cifar.Cifar10(mode='train', transform=None)\n", + "x_train = np.zeros((50000, 3, 32, 32))\n", + "y_train = np.zeros((50000, 1), dtype='int32')\n", + "\n", + "for i in range(len(cifar10_train)):\n", + " train_image, train_label = cifar10_train[i]\n", + " train_image = train_image.reshape((3,32,32 ))\n", + " \n", + " # normalize the data\n", + " x_train[i,:, :, :] = train_image / 255.\n", + " y_train[i, 0] = train_label\n", + "\n", + "y_train = np.squeeze(y_train)\n", + "\n", + "print(x_train.shape)\n", + "print(y_train.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 68 + }, + "colab_type": "code", + "id": "lJDRE4a9QaJo", + "outputId": "ceed15eb-9c2c-4051-8009-0444136b5462" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(10000, 3, 32, 32)\n", + "(10000,)\n" + ] + } + ], + "source": [ + "cifar10_test = paddle.vision.datasets.cifar.Cifar10(mode='test', transform=None)\n", + "x_test = np.zeros((10000, 3, 32, 32), dtype='float32')\n", + "y_test = np.zeros((10000, 1), dtype='int64')\n", + "\n", + "for i in range(len(cifar10_test)):\n", + " test_image, test_label = cifar10_test[i]\n", + " test_image = test_image.reshape((3,32,32 )) \n", + " \n", + " # normalize the data\n", + " x_test[i,:, :, :] = test_image / 255.\n", + " y_test[i, 0] = test_label\n", + "\n", + "y_test = np.squeeze(y_test)\n", + "\n", + "print(x_test.shape)\n", + "print(y_test.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 数据探索\n", + "\n", + "接下来我们随机从训练数据里找一些图片,浏览一下这些图片。" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 357 + }, + "colab_type": "code", + "id": "JhjoI3w7C1JY", + "outputId": "4bd42a22-2a48-4296-f0af-5172d8f46793" + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAVQAAAFUCAIAAAD08FPiAAEAAElEQVR4nLT9yZI0W7IuCGm31jIzd4/mb/bOzHPuOffcQi5ScgERJgwQHgNhxAReABHeABHegCkPgDBhxKhEQGrAkKagqIKq25w2M3fzNxHh7ma2ljYMlrmHx793nnMLCpM/I2O7e5i7my1dqvrpp5/i//7/8B8BACExMwsDgGqzCGACRHcHD4Bg4pIyIZqZqlr/Z4YRRIAR7hYRSNxUz6eTqjJRGfJuv3e3r58/H49PdZmtNQBAIiJKKU3TdDgc7u7u3eHTl6+fP33+9POnui5ZEhETxrTbffeb3x7u75Owm51OR11XggB3N/3N7/78v/vf+x88vv8IAH/7D3/7v/nf/W//s3/9/5YygrAhOiESsaBkZFSMxhgFWViERQgZjaAhNIAwIAM2TBqg3gCdCZgC0DEMw4VpzANjcneAgAiA8Ah312YRKMSEhIFhbk3dgilIGFOqgS9zLBoe5A7h2NSX1po6OFizVu2/8y//w//J//B/9Fd//s8A4N9+Xf/X/8mP/8mPpywohIgIAP2nQwAExvUnACAAISAA9v+G/jji9ScFAEAgAkBEXH/2Yzs/wPV7Ibw+Gzdn285w8ywgEgWjEyiGR1CD/N/6ePgf/4cf/vl9BoD/53/5f/tf/K/+5/+n/8v/ccxDKJ2fGziOYy5FJDFG+OLhgMh1tS9fzozw4eN+vy8oNC/65ecTEnz3/TTtxdxLobv9MOQELucX/+kP8+m0Oi8O7oHz2Z6+zAF+91C+/83+L//i3YeP4zClpv7z5/mPv3/567/+dHxZU5LDffn+d7txFKvgqhA67OLd97lMKUwihJhPx/rHP3z9V//yv/8/+5/+L/+b/8F/GwB++nr+j/8ff/+3Pz4TIxECIQIQEgIg4M0lR7hc0P4rvLkhN88CAAARvV5h8u2JCPCACAIgZOZEyAAEgEiv6wERCIAQCYCQCAABr58Hgvpd8giz+PCQ/9Vf3d3vU39Q4BdH/PKhyyKI/jP+1EuuC+LyFxAAAQjEyMyICIh9aSJuH8/dzcwdwh0AiJCpHwABEGGq1hphhDl0w9v+/fITeoRDOAABAAESoRAyACMxMSMIEiOGuQUAOmAwXT/w9u3x9T+BEAkJAZn7FYYIiIjwq/EHAAKgA0Q4BkS4gQEFMAEDMIBBgEcAAiJiN57+HT0uNxtvvwmaiWpmQNiM/9awu3EG3Bg/xO0pXtfajWFfltf/H4w/UAkCASNYQ8w4br4PUSSGITOQNHbzCAezQHKK7ly2U6XMCOARapaEmDEXYsZh4JJZDZmRGYkAEZiBOFIGmVJALCvUFZjYHEyjrrauba0pDQSIzMSJRUiEcxImrtWZnJFZBBFywVJKFlkVTMMtWvW2ujb/J9f7f8Xj9nT47SPRrxzCt8efXvav/+LtqREgfu1U2yFIGACAEAiBV+MOuvxd9L0kIsItwMzc3d0jAsI9Arx7/v7WDuB9uXq4uwc4IUiSlKQxu/UAgZCQkCJcTdd1jQi3huDCBMIiRIDuHu5tWVZh8AIAbgru8fqVby6Bh7u5NY9EQQhISEVYGBmDiLKwEBIAmNfaLNwoRAIzUt9KwQGMHLe7EQ6BTCTMQsREDBwObmHWXX6YOSIiMxBqeLhFOIQ5KjNKzpQQCSLCvLkTS6EgNwMIZgk0jVBC6P8uhwfpOq6zBCMTxrbpBABg3ywgLrtxXyiMcfnYb9dV/8aX3781frx1QP2PIgD8Txn/ZQu4PouIiGSECwEjhgdplLWW8O2kBJHIpgLv7gto9sXPpt6iuYUHIYIDOiIGEU67IdxVdVlBBs4D7e+kFHp8yHmQdTUkyAlTQgps2SXbmPDxtxMCf/rJdF2WrMsc6+IvL/XL11MZYRhRUioljUMehmwK41BY+PSsWuH+Lg8DS5LDQe7v9kS0nM/rrBF2ftbTU1tOFn7ZeAHCPdwDMRDBERAd/PUSX7ZQgEu49frIZofb5aTXa95P3+M4p7i+CMPBwSEAkTCCLjbuN0FFjwe3N+/3FLrxEgT0qKR7XQy83PB+SF/4iFss0c33mwMvi+Cf9PwIQIhC5NRN16w1RIBw7F8vgLbPhxDg5lrbAnMAmCojDiW7sDBjgJszc5hpXQgDEV3VTS9L3/3th43QCA1vEQhIDJQoMhNhMFFmYoRwM3dXdbcQZEZkwgQEAYHU74NvrpOZEnMSFkZCAkfzbe9DpH5FERCQAsAczD1CARTQkClShABgAEaAI6AQBQoEABJhkGKoExlQwKtFAQZQIAeyAwIBsm8XDzCue9/tPk8Rr3cVb/4vboz/sok49HjsNTi8eWsIAMfbdesX47+8w80C6OvGAhEwQZgHGRT3HLCtbibcD/Junz8+7EKzHh3qsqqbu1ZHBHQiYKJgpjEzQIS1lLAUTpmGIY8jPzzmlPh0BggohZgATJm1jEGJ338Y3Onl6yKMuzELo4ETk7o3MwcgwTzQ7pAfHycmRkS1qKuZQUqKCIXMQhAEgULZVmvN55PNJ62Luv/JBf+PHf2PtsgOf/EE3Ljlm50gIq6pwbenu652vP3Zb2Vclu31uQDEG8//y1MKMSPCtpQBe2wagEBIiA6Er7nlzW3fFj7CJUYACARgRAIGZjSuZqCqyxLhbZl1XV0VzJGIAtEBMbxZ8+pVAdEjmHm/m7YYOyDMIYKIQl1hBQAzM7cA6+9pbq8hKAaSAbYADg8kISSCbt0ghAkpwltTrbUjFACITJRYMjqEBUADjQh0gGCmlCSnlJkRo39Vx4gwAEgpEUl4mLmGq5sFeIBFAAYRBENQODoGeQQHBqIQEHNitgh1qwiuaBqK3veI/lWIbMrLXZkTIhIbJQcxQAfsIdeWMb06CduM/8aRX3PC60MENz6fbjy5vzomwqCLa49tZ3hdvrG9763xAxEJDgQOoRbokEnkunxF6N1+99t3998/3qtKe/FocFzWuXrT5g5gzBgppSSQM+RMwjlnmQ45D4xE08D395kQAc0tyogQvp4qgI0Tlkn2e1kWN50h1vu7hFwUXAbcHUSKkDAlLoiH+/Ibu8/l/Onnl3Vt4RjVP38+Ho8w7MCi7aZSSg7lMNG11tnbqtbsujlubhKRkAgx6MbP35jWtibj9XcEIKIbk7748O0mvEYWV+vfri4BxiXtw0u0++rn8Wrrr6aO8PpqB4IA6vkmbVZ+NX4koovxAyBE4KtPwOsuFRBm1kEI3O45mr9ml4SIEGHqqroudV3runhYPXOEt1ZbrdYamAMRIAWhKWp0dKJ/NiKiLeGPbV8Jh3BVD9PL+oOAjoR03385iHAYZdoJCSEjCYlA4mByRiTsGSm4eQ/aAQIJkSIogiAA0JGImMLD+rWkC/KC0RMqB3BmZKFcMqG4OYBpa+H96rKDQUcJiILQAcMCHDJL5pRLFslBqO5VG2NQZMIAipToujZCa3v5h/XzZyNCSi6F8k6mO0qlGXj09YL4avy/kkfiay5z3RRew/43IJP7W+PHvhy3M98Yfw8JbmMUxCBiRiYgcIAABibm6yuYaBzKfhyGLEo8TXK4S1KiNFob1tXb3O+jE/FYcLfnceRSkpTMCRGjFJQEjJEzhkNKFO6eEIMkcxlTyewthhyHHe52OeWk6CCRBkyFkBEJJcG0Y6YxIp6fT3gOYvTw2swhOHNrvq6ViRk5MyhZFpomHkemm4zszSX+r+34xf7xT7zydfPtSV//b389S1w8/z92dMAPCTsQteFbAX0LQAR0CEAI86gNAoiZmBCJIPyyj3BPKdRaW+bT+Xh8mc/nui7m1u3VwwGCAQnBo+NG4RHq7tEXM4rIBeojJCYiBAIkRIjYQgBJSYiRJMLVG2x+EABAhO4fpw8fDx4MRESCRIyA4OHYXbAHdNANCBCBOIDcAtw6lsVEgtzzcwdzC60OzkaAhBFgiDAMiViSpAiq7hHu5uAgIsBEjg6E5ISIIBFkGug05pxkGKdJUjGKajrPLuhjHteW0pl2Y2Lawr+2nH7+m//07/9f/0ZYMBXM0+7dbz7+s/9GGT8gkgb2L4J4ky3Eq0/oiOIvl+wlz9/u+esyIgAERkQAor6gLjFC32MjtpVEABfsYNsuiHuuhwCAhkEMQHyT0yKyCCAurbkpZdvd8Z5GB2ge51P7/PNxPhu6C/k05of7fH+f8yARSc2rNlNrtXrfrpgQgRPnw4gODkhEHJQQP7wrd5OMU+HEimDoBpYyBISHMUNmzIlald1UlsUjzCMQMGWZdmUYCmIgeCmFgYkUKUneffw4pfQalnfP3zGybyzym8t9sbwef2G8tcV+MS+36DWyiA7V99395gS3b3XZlK+30TGumXQHDPGK2V3f9Zf5usBNTNIzaSQCDwKK7vYCzK0tSzudwzzl3P8hAhFGIISZNqutLfP59HI+Hk/Hl2VZ2lrNbHPOPTgkZERwBwD3CIJABIINOtQezCMSIQkSATIR9dJcEhERVWXiDmI5uOkGlgMAM05TvrsbAgQ7PAfoFqahtUcJ21ZBTCRCHJIBGQwMPBCEAYkl3DuaD9FLOABxSe4RmTElwR7wq5qpuyMEbYkTQwds0ALQDMFDa0jgIGUo45AzJzF20SCTTIjIqyqzT1Omi/Fbq8effv/09/82l8RpgDxxBHz8bTo8OjIgu4cBBlJcYSV6DYL6lkwXEOe6ut4i/K8LqG8JvMWA+Evjxw73wLZYN4B0M37qOyNess4AoJscFgACyYGaOQBKwSlJKUzEFjDvWk50eq7rYiVxGWkcab+XnKVVXCHMIsJbCyNyAyQGD2KSzNwrRwFhwQB3+2Qjp0LApIAGpI49kHN1ZmBCZEoJhDmxIIMDSqAkyUmIxAPMHMOJsBRmybv99OHD8Mb4AZDeFGf+CX99U9h7+/ifeDncnNwNIgI64ku//gdwuYOXXyKuDyDe4Aq/CBBBMDa/T0QesW1sBHjJ6sN9Xer55fjy5bPXVso4TeO035eSOVEg6VqX0/nl6+fT09PL89M6z9ZWVVU1cw+zvuddYvYIs3D3CE4yHfZSMgHWZX769GWZZ/cApO47ApBF8pD3+/3D42MpxXR2i4gg5pJLa/aKQhBIwjKQpCKSRXIE1NXq2hZvrYX1vAWRkuScUwpkd9Q1qnsIbewDR3ezcEXOIpwlCzMCEgezsSAnMY3TsqxzM41wJCIMDPfAa/BPbt4MQt3XEMJS0pgyA1I4RhCjlGwpALAYM/t+zMLXm27ttNjLzB5iaBVgf4a5QcWUCmJSCAhwoNg2G6Bbz49Il5z/jfFf0ODroowNhg4AINhK11s9s78+INA9ICAcX2FC3KI3BOh4qiEYoFMAgDM5Xlx/AHpgC0wdQB2EEMYizOQWU0nvHw7nk37+8qLa8ogogAQEgK7smjAs3A2aubUA9AQSiMQEAGgGFuBIEcNADhgYHtZBxEEEiELD0ETQA8OtrerNKTDnDBxmiojg6BqquIJbXSioZBjGnHO8e1euxt/zfUJkIrxWRKhfCXxrdIDUE/HX++LocNlYe7brN4XYLZ7y2KD58LDmbgEYLAQ9eX0NMjaT7iXebubxdhcAdyTAjgZEePhb7y9b7f0CJlw+6eVfx7HNa2vLsuq6um34X9iQh+Ru59Px5cvnrz/9+PLly/nlpa1LuLmbmkcEIaWUx2kgZrPWmvZaISBmSdNun4aia13O8zrX+bwgCTIQobnVWgMgDRkBHx7fpZQRzMDUHZCRBJHfxlKG4imDJGbuZTm3CgAe7hHRXbdkLoNIigBtbqER3vEyDOhwHQL0YjISYXfIRMjSHQ801WVZl1XdgFBEGBGjF/OxL3cwR1eH6lgDUpCTIAvnXntERswQARa+amWGqbx6fghAAzJMDsmjWYNltfMai4owcUE3BHAkB7oF8665PV0z/G1lwcWi39YCrkbeKSJX47/UpoAgwp168LytENqW7pYaEjmT91C4r8WtpPq6EGOzHKKUWBjKyIxoTTHzUMZpcqC2LMgJciJCBPdoLZoyhgc0jeauLbYwlbyJBQapkYGgMGOS5BhuUVtoQ3NXDAdTN0kUkQix1fbydT0+r/PJKCUGRKMIWNUwICVqDHWu6DgU2k2Qs/M3S4yuXJpv82+4ybq3v6DrLbl5FV1+vmYOePvkBWwL9wbaeq0IMvx/cVywnIALPez2WUHmQPAA9LDY8m8A8J5AIF4CAyYWIkMgVTufTq2uZc1N6/Pnz18//fz06efz84sti7bmrqqtqRHRNE2Hw91vfvO7Mg7zupzP5+U8h3lK6fBw9+E3vyGmL58+Leea8jhCmvZ3ZRiYaF3Xr18/r8sc6hi0G3ePj+8RqSfjHoBAeZyuBuPh1WuNKlAAVK3v5a1pNV1DHTFzEsooA8mAyOAK7oB90/ZwNKeGFDmLGyASRA9dAAKCkEDMvdY6z+t5bbV6dNgIg7kXRcLdHUGDrIEtDtVSizDX4j5QHqdxmCSJCAtThFdd53omhDHlK+JHxDmNQ5mGLMIUCti0zbMuS95BIu7AjBH6ZaXhzVK89SR4g9LHbcnqigvB9qoL9Lth2tvJEMLRETZQrudN/bUdi94SIkZwDAokRGZ8g2wzOHfGJGMEsoAkRHBVBQggSwmmnYi4MOVMjGzN2lpbU8opgFqzVd0MkII1mB3VPJyqZeA0pCQJiCIwAshtXdoyr6d1WcMcoAxZ6x6BTi/1pz+ef/zDy7JY82kYEwO4e211PjMiiej8srZFEWi/h+8bHU+r2huEEy8xz9X5++tGcI1DKRARL4/0mAuvv+M1GIYLMhcIBIEUW0LtEdaorT3PwAjc3i/eYgf4jUlvfnt7Y7e+d8SvFOml+/YLeg7X3X3LBjsEREQkJInEiQgjrKmbmmldl+Pz8/H55fh8XE8naLY9rq21RsySsgOkUqbdgcsgech5Bo9SynTYlWnn7oFMqUx3D5PT7v6xjAMjLueTmUL4sq5tWZbz3JY1D4OwELOa19qa6vX7BICFaTSFFs6m5groPWhCdEZAZCAOpkDcOEmIyCQUgMg9yyHEnMUUL3Qmu1xzdgAzmNe2rK1p9LqHBzYzBwSwgHA3i7BgUORIGMyu5IhATHkou2k4pJyTSGKKsFUXJnGAIY/XjYyI0lDSkIWd0JgifFnOX9f5ae8fMk8RDoGI18pvvDF+vDF+ouuifMvM638W19WHBL0ahRfntLGBMDp50eNi/NTDIkLqmwUiEoEAQAAhktwsT0LIgiVhYiAC9e7TDMHcFcJrXT0QMSRRYhImQAwIB7+QqNEM3KAj0x3KcDfHEMZEXFLKkgnJHVTDAAeECl7R1uZzresaVhM4vnxdP/+8nl6aqq1nJeDE4hrnU1tnzUmGQXSFusT5uCynKGOaz/an6vw3ufR/nUe//u4e62rrDIHgSKNT+hUOzk1S/+vPbXfuNSd5PSQ2s/GNi3uppG2oYUAgEQuJsOQQF2LeEghr61qXxZuBeZiFBXXsAJjcA80iltaO8/x0OlEpKedpf0h5oMCUEzK9nE7Lsh7nJSg9fPguyZB3B5EU4JyTanXTZZ6PT09/+Lu/Xc7nw/1dGSYSaWan83na77S1m2/aSXdNHWoFcM6S0iApSTTwChYRoKGmFEAIiMyd/rshjczEiOAZoZmph6t18KyHv6HqrWlrBoDMRCTh0KxVDWYACFO1AHBIWKZhYgHzE1PJaRjKNA77cdinXJiYIwIcSQDFkMZyILywrQllEB7QYw2wILQ4nc4/ldPj+/goMlo3ykuQeOFnxsXD9CSUOhEcLmbfc87XJXZZD9fiH/X6yKtP6gTm2Oic0Ks2iFvSi/0aIhADUzAgR6Dim4o2IQ6FpoFSwsBQc1Nd1wAIV3O3ZT27g0YQInEwYCCAEBZBQkfq5FFCFBZJlBOKBGKkRNNQJi4jDwmFgszdvSXCh3EY83APd59P57//8efjsdbTqa1weqmnlwqOSQQdwhAphVGdzzjrXNYUNA0Tmz3Nz9b0dIxleRMxxYUCe02hNg5Iv6gb5rJVA7ZU8o3P337/hvVzyRTiNSZwa/PSzsfeX4J7pe19/bo59wv8jbFvj+D1Ll/Q2ssnfzV+j/DorPhtjWxQd2ei9/CBiJiFJVjw8hXCw8xcjQCFWIiVqMN7vrUkgEV4rS+n08+fPqvHtJuYk6sRUg5HwupamzpQKsM47FIeKQ3IAmHuVsYx50wQdVm+/vyz1jqfjsM0cUrN7PnleNjvVdvtd3d3cwsECwQAA06ckiRKEgKq2nTxaOEXq9liV4ie7tJG4UAIM+nOX0ETCQC7vzKbt3uM4OBNm7lzR2/MCVNJ474c7qd7dlrlyMjjeEhpZMqIgiAAHAGIwcwJuDgkeU1hiCntpjQNMR9BK3EKaOv8dT1/BjsxrizJgCkAIOhi+W+NHxGD4FLnhwAAw289xwXz78YPiB1WujwOARiB2+IwhJsv7htahAQQDEyB2OEgRKE3b8EEhK7aLGCtNcLCkJkY2D2WpakHMkniQDLwuVV3N0LIQiiJMRAiIgmzIFIgbTQ0TinLMMguBUfTiIbgmXHIE1JeCSWV55dzXawp1NXWWV1hyImEGCk0mltdVBdj8qhOBoNkEA/DqjafY13aG88fm2m8uYxvTRAvvRZxeRzfPnsx0Js95ZsbE+Ha2nKupyMFIEpoRbfLnnH78l96/uuz/XG/efzNApAOy3U8rN9av6kV9M0ACZiJmR3J3aBnfwAIwUQpp2EchnG0VtfZ1K2pNjN1U/cwtadQ8y9PT+MwMrGrMdMwjsM0ld2Uch6HiVmEC3IypB47OlEgARETomudzy/a5uMLpUTManY6nd49PLZWL1+t59xg7iTEWcJJHaqDSJY05pSt6fn83HwGisBwDIPwcARgZtoqI0BEhAIwtNaWZXF3EelxRUR0fhd4dD+j5mpNzdSQEBPIOEwfH757d/fufveQgOf7xSyYk0hRjWVVM2IOQeFefUJiCqaCsHFjiGU43A37u7Z8inCR5ER1nev84m1GUJSCmCkAPRj8Cs9eiWGwheJvsAC+MfXNaXVE6tJq1ekeBBuKGBHQOyg37A86afeCCAQARmBHwCiArJdDkeWm2hdh5uuyVqtVvTVFRJpGKZJzwXDXWc2kN8qJNNPj8WxuKeWh5LEMQuKaAZwRAbxptXBABEdzAExlPBTncz2aVg8rOb/fTTntVkAkfj69BMR5CfTa5kaOKQkRWZg2Oy3Huqhpy4UGpsKcERVDKMy0rrYss124ZK/us1/mG+vC120XoBeHoTNvb/18R2S683xjr1fPj4gYEe7Wal3P63xkB+JkdXHfAQsQvuVrIIBfHf01u9sCvv4oXYuwb3N+82+N/wJRAABsNPAOBXfqnXtshPbtBxEQITMBkYZVbbVpMzP3cHeEuq6mej6dS2/Risgl760BoeScUxEW5oQkjtixFSLCS+WaqMO/ptVaq0CISGo2z+d6PobZ9ct4UGcYEzEAGaAZWrBDBhgQMoOlHpZyOFuDFmGB0Ns0sPcn4ZWRgL1AEJcK6FbpIUbUCHN3BNz8AIG7M6Ysw2G8e3///v3dh92wE+AhWVMziy0eUYswdlAAISIhj9Aapq8UexIZ9nfD/s4+izswCQB5Xdty0vUUbeW8TyzoEeYcccX24dX4L82AF7APLtv+bbWI6AItvwKE8Mb4Aa60Ur4Yf0ectkaUDX4HDJBeMiHgN10r0ZrW2pqTeUQAI4ETBTOyIzICIQgTExFgs1jm2lTLgIjEbJQwJe6dGWbRWrgFBprjCj6D14IUZEoUMuVhP0zv7+6Jhk/npa9WFkriOdNYhCGYGALRw8JMW4QNhQ+7tJ/SrnAmsAR3B67O4+Aib0P0Xzuu8P7NC+Ni6ghvno3Lv9u/2G48Xd7KTVtd5/NxPj6RhwvvdElhAATAb6oKHT58Y9TXvcB/9V2uh9za/vZNtlf69eUYHRhjJ3by6M1CFhSoEKrWtKmrulatS117Pa+fKxFFhDddm9naJKWUpAwp51RyEiT0cHMEB7YIDAjkXnNGd/cwRGRh6TSki49CDAWQ1wsLAejAgUKSWSQUABCCAiS8qCevgRoYQ5bEOYwb+BxeETGg15Ld3RMIZyakiOiiAxHBzExCwgBGqABo5qrGTEiYElOQaxQe7qb7h8Pjw+FxP+4pCIJEBCmamkX0aoWCmUeYEgAJm9m8zFmyXzYyZp72d+P+fqaswYgCgNZU57meXmw5p/27lKhpOERn+W2Mru5vLl4HAQmvaw4upadXH0U34eqFERgAwN34e15xzf87+XtLUbe/dOqEcMQeWSAAdy2Iy0r0mGtb1VMeMgsAMmAi7hwsBs85yDEnFCLUoDWwgjevtljV5VTHId/txyEJQrRm62LqzsIIUdfm68x63NHANQYaD7u7x/3+/d2H02qfj19+//PXn55fns8zWjDiNCWCWM5q5iKYmErmnGk3pse7/O7dsBsYwQnht98Xlxju9PF+lNs05ua45M+vfhjfPr09Qpdr3s0+erUfv3k5bjsvRriZrut8PD2/PH8GNUv8YHVEBwrsHV5xwfRvXPxb2uGvfd63zn9rwLgJ5C4+pKeQFz9BSMziSdwNvAfLZqbaWqvruq6ttQuvxxGRmQGCiFMSVV1qM3MQJ0LM3eF1Hi9iYKeH9YAKqbfPAPT+XDciYhEmwkv9AhEDPAm/ZV1jYArKSImQsMNbBg7giOrhs6J5EsyShJES9ZTeHHqMEuHu5kgb3oFIRCIcAZcPy4hdMaDH/7B1eABTRAQMedhPd4fd/ZAnoWzNzMJ7J1DfspgiQL2pWls03InIzJZ1HUrvFAAAIJZpfzcd7r6mAhu7CyHc2rocn+rpaXj8LpELR0CvwUfv1+xtHAHol969C35/E3ACAADjm+WKcGn1uYkCNrT4NYDf8ODeGdpf7tcotJMNEZDfeP4A7GhdSSXnjAgYQd7rTAEI0yAOkHIiYFAEkELJ0MPdamurhqogRbGO8bsGoiRMAbg2m3U9xRHFJ5c8jo/j3fvD3WHaL3Y8LevX0/HlvJyXSgagFE4QrqqqgcRIWIpIwsMu3d3nwyFNWdwgO+TCnjIXyvkb1sL/j8e/V3EAEYiIU+KcvMOfQtcbFK/n6ZUo/FMn/cf8fg88t+juAmFe6D4bL8Q7B5dYcnJXbas3dVfTtS7LuszzMi/LstTaC28sUlgAwN2JOOe0rut8Pptb4pwk5ZSSSPRzd0IcMxJtVKXeDuNqrWqtpgaIzInwUmHFjV6iqszySoxBDMyBJYI75YscQB1CDVootLmiRRQikYSJmZmSuIZ5GCBAGHTJE1UFhm7eF4ZPbzEAs6hNVb1vB5IYCc2UHYF5SMNuOIx5h8BqruqtWWvuAcgoIsLiEMu8nk/z+XhqtSGQu7Wm05TVtH8VZp72++lwL2UIFkfqthlW5+fP88un+/bbAlOmToZz7Hw/wGAOAIewwGYQEXRj5NdLRQDcF7RfL15PUuFNjHvDEdw6iuMCYl1BQeo7T2Bsnl8Z5IYYQ0iDjFPeTcOQs3Sdh2g9YKEkmAcBQSREJ24sxm1UYlS0ZlqX1uZ6xNOyMoQLUpE85mEcR3cwO4eq2tHBEe8KlcNwf5jucpbAaL5WXbXpuqqt6FWhga7emjtgc0wJhyyl0DjRMNI48jgweph5DWgALdD1jbhBp4Vc2A3Xaxuv3vd1Q70g/K8Jfdxc3yuR8sqVAAAMd0SUlKb93fvvvp8Kt1rHu8cy7kSSA8UGbr1JJejGbeOFf4DffJCb9GQzflPtvPetyt8/UedJQng4EHQsjBgB3ay1uoA1q8u6rm1d3bSr8XRc5DY5vs2UmUNEJEnOWUR6m2BHBgLi0lToFBbu2mo9n9q6uCkRkcj1XFuyGsCst91jAAgoHqwaZD0NQIhw0xZnVLFwIUIRTAGdCdrh7aBb9Qp3b61FBBPDhn9FJyUHmurW5NKFiZi3YjoiALKgcC93NQdTbb3pwQNDSHqdxtzWuta2BDqQt9pa602Pa1waFYh42O2nw10Zd+dUerlNmChsOX49fv7h/Pm7DdsgQga8MLQBBEi8i6UIb+2K15V7xRQAGIFuFuzV+P1mTV3W+3XZA/RmR7wxftwqxHDx/CQg/JpOEGISySkLszAR9Qq+g2/pJBEhE1AQYGL2hOM4hFDDJq2xAzgQQFhEODAlkoHTkJIFFJVgGFIMghOWaZimMhUpSAZohJY4hszLIs9N22Kk4Rrh4YDakJBKosTMhFtdBDEXjEAwDIXWIPyt07yU4jo4At8++WvHtwE+3fy86cS7eOgAIOJUhv3dfWasreVxn/KIyJ2eQv5q2bde/ZepB/3iNbeHzKfTpW/rivj1Wv8W5XHiUjKBhzdva1tP63z0Vq2utTVTY+ac05Y+EIN5aw0AWAQRdVlUNaWUUpaUJKWu3hYR2lpdVyIhFqJOXTY011rn8/H0/LWtc7j1uIA2BHvrNSVyYkK65V4iAJnhPDdFSL2jj8CtLesTR0o5l5KHPaWCwbbJEypGxVBERAoMxAhf2+KeSxlwK96FR29Wh2ZOhDlJj3l73ZQ3CT0Gi7quMy/szJx6GwIgECMxBnprbW3rPM8R8PD4ABHH5+PpeHLVN6uDaRjH3f4wHe5Pn8e1qUeUJMSwnJ6//PgHLrvT8bmknLKUJCl1XcKEkjEllMySRYYgcnMPullX25ViiCv/f3vTHgq8Vqi39XKtcxECQtDr6kcA8E4B8kAHQgQCf+v5EaF3/II7OJDA1sqlAB5m0VYHdUwoSAROCQuMkZNALdpQCkaAkEGYmiAPkjIRQaDgbspC6aHIfTo84MP9uC9ZAMy9ItSS/G6XgHYYcj6+rN67M5HQ3cGqq6MzOKMyriucTiZMOSdmwbVdOuRf82REQIrOa9w64Tbb2hCUzQr6xdvgkWuX3at5dmDmap4Xz0f9QmLfEoglj+ghOWSYiBP4pnDRT0x0leqI/mYXcterk9/e/FbT49b44abs/3rHL7cfESKs1dlbnY/H+fi0no9tncHUTSGcGUVSuKeUcspM1BDn8+zugEBIEMBElAsSp5xLKcMwpJwc3C3MTJsSNwRkYULwcK3Lej7W5WjaeifONQPtWx4SEgunzJJuw1SM3nFOhojBSOgUTmagQC1noJFw0JAuRgbuAI1IRSyAuwifmauZATQ2QSSPcIcADw+1sIhNXrBXMnp/chB2GNajrnXBMzmlFIwcCN5JKmjoFA5NFYFySfu7A3isSyWc3b1re21fhDDlPE67w939cdotX76oeS6JiVTr+fnLz7//25enrzlJSqlkyTmlXFIplLKUwnmY9nd3Dx8kSeuyC/36XSpWCBctyBt3sBk/QVxBvm78/WkH2rQiL8YP22ZxdY0UEATxFu2/LEp3V1UCQsBo7gjELBDo7uaBgE7ROyNJiEnCLQAyCoQrOAUkFgYSZIhorQIgM+YkSVKWNKZxGoaSWCgsjNGnzHdTTjmr0k8/nxbCISV0QGhQXT3IkUEIJBxMuSlVxWqeCJBZRAI8ye0Oeb2AABA9Urux9z/h+f/pAy9Xu0dRFxyOhCUhIUkGIPfoGpaXCu2vnuXbx3FLS75F+wBAhjLCBbe8uIeudLHxRrQup+PT6enry5fP9XRCU/IQwpSSCBMxp4RE0zT1lbScz3WtrbUIT5JKGUQkApgp5TKO4zSOLFLbGghdww/WNdxT9P4YtbpYm10rhPddzAFgg+R6V79w4oSY8nDTDBPkIYAiAwBahEUAeQhCOGLAsGqCFZyNYQU0Ak9knIMBM2LToAh1t87mq612AC0C3MHMe4MiMyKhNQNzdIJADKbN+KHWleKMTuE4lAkA11ab1U6hQSeSvBv2w24ax12tFYBVvdVqbb2SSRCQmYZhuHu4fznsv3z+7LXCNBERObV1+fzD741+QEAmFKaUUxlKHkbJJQ9jGceP3//m8TAe8rQqqV94aR2936oDQZfk8FrN7dwUf2v8m3ujDU+88v82t9j5YxupLILACRneBGSdG9kUzK0aeHizSJKGcSIkrTVMTc2JgAAxzMnNTR3DgTAsaq2BOA6TcIIWa2u1VWQaxkGA5mZDdhdgppJpyFAVCuG+5PvdMAQvawyZ2kAPu4kMn+mE0CgiMQ9SsiQEQyDiYgGnuWXzMqYxUVIYh1dcKaJHe1t9DLQrWjDyG5z9ZrfYruA3Lc4A25762hu9uTaPCIRA3lB/d3MHcHU32soEF77gjc3eGv/10r8B/y/h/Rvjz2UAuI0KNvlboAC3qOu6nJ8//fT06efl5VnXldwZKUsiZosgkcLMku7u74dhNFUimufZ3BEipTSOYxkG5pREJOeUUsoJMMikox/h2qqbVbfN+Nu6aFvclQiDeRPs7QGAQBLJw0DM4Tbu969pf0S05k2BBIg1zCA6YZ8BCQPYHFrVYGeuzN4JnhAGSMg5MTnFSri1QniHMIDCQXWTJkAkYkTAruHYZUIgIAB7PQI8oqsFmZmqRSzrotakc3qIWURKkVQCWT1qa01rYAO2W0cshMNQHu7vXw57QVhMAaCrHZlbW5dm3ulJRCAiOeeUM0siEWaC9fzPvnsn+wloTCLbrYfXmjD27f1y57e0n67CwJe1flMkfM30cVtqAeCbBtjmi5zAvvH8CMQEhNrM3C1Czar6OMQuw5CoS7X2zb11dBmRuwIbIBMYerSIG11Ri2hqaJFzGMKqvri3wcw1rFGgEBShMfGUhYGnQaYiNvh+EgoKTwyxohFhEUpMQRuTBRE8EJBzKkxWsQm/9fwBvQ0OILYyTt8V/xGv/025fQvLL/f6CpkAAPolfI/rXnAJCyJgCzUvof2fwvjj5peLlif45d/rIVIKdKaKX5jDG8qNts7teHr5+edPf//3z18/MYBr07lGBEsOpOrBOR0Oh/1u//jhI4S/PD87wFor54TuSWQcd7vdYdrflWFkZnNf5nPVtRsRIwBoa+ottDJjYLhprW31cEoCBNEMHJlZRFJOZRiGcWCRiLh7fBDZ+PDhUZe1nhdExhTNPTg2Ah1yF5Z2DdPmHhJMwBiozefTgoS7Q8YsDCUoAswxYmvcYgN3NVMn6p+QEJBIwtytk3NcECWjMCdOOaWcMyKuy1JbW9YFCHIquYxlnDgNAbyaea2n+fxyfp7bkYvl8doCv1Fih5zu7++e7w6D4KnjXYhAIoknDo++PQVCrzgSmGnTWpf59ILL6enPvns87PLhA+fRIXrtfmOd9/1q0x3feNyIsUk5X/P8TuO5lFc7RvAKCHSl9O76Lzi2ExC9qfMTUhlKkrzOy3lptemy2rLqbvJDrrJHBBcGDLAIdWeKJCyZCRJABGhtIGHNvJq6RUZGIu7kICdv2BqsYfO6nufzLJpQUuFBUmZKCO5eKHZDCvWSgQHoIIXjLBiOIkqIyCiMiM5M4zDsJh5zgai2nnqa/WpY0QurHR/s3Erf8qdA3xKBbsJxvYa3VrcBppdnO9wOV1YWISJ49C7KXk4mZiLcSPg9kOrByHW/ud1sN+R/i8761tI/8Y0KYDd+ZNk+Im6sZbz0AWitxy9fnn786eXnn48vXxNRuPvagEgGdOS5GahaIEq6e/dYcrIIM2+1ilCYpZSm3WF3uN8dHpDTvCzntZ7XVdvKGILgruG2rEt4JBFGCFc3rapIlEpBSKweDkIkkvJQhnEow8DC4ZGHAWnz/O6ua21rzcOAItSVEjCIKHHKAAzsGnVu4SQl7fIu57EuVk+1VdXqwpRkZGGL5mG+BbVo7mERFhDeUwAkQmcCcgDsjXwMhCwsSYSRwqy2pS6ttdbMJEt4CSQgtoi1LQGYI83r3GwJqjQoF381fgAhGIqk+7vj/d1YevtvmAMiI6Ew4CU22ewf0d1UWz2+PP34B2nLH/7t92NKu+/mYX+3xQyETMwsknLOAxK5X1LMC4b/BuS7+X0T2Lit//ds6CIcBQRdxdreov1wcZbmUKufZ1tXrWolhZtjQCf8NPNwq+pAhhxCvb0vLCJAShSo2pqrqzD0dpLOtWRCYg7ERZfzynMepjIOU84p5ZQyU3MTit0gGDEWTET7oayDlNyWxdwtHAmEADBCkKZSppIKs5tXFAb+Na/uuMEdcMml/z3y/QtAeNlALy1C25ZxIWRhv+oBXW/5MrWltw7HDWXzV079evyyGvBtpHCx/MunwL65h3qry8vz5x9++PLDj+vx1M7zXCtAZEnDOA3jECw6t0Xbl+PJWfaPj5zzOO7ATOeZws0tD2X/8Dge7mSYnk/L3/z049enJ1BN4CVxwWB0b205nwlwN+2YsLXFrDliGqepDGWYIBAM+m6Xck45kXRy0LX/GAAgPLQ2qw0chJgEncBQiaCkVIjZqVo7LStFlN3w7u7d+3cflrnprF+fvtpiSDgOE5ax2mJW3d3czRzcKYgiwtzU1YNQhDOhMCO4m1cEYuYkKYmAw3k+taUtSwsPEiHBpo61OS4acJwXQNjZGLqgeJqCs1JWpFdUmQnGktL93fnhfj+WzBQRzYKEGannmCwEAJ3n3gUPAyto0+Pz1/X0b/7T//t8Pj/87qfdu3cpSUqJmXMp0zjuDw/j+48p52uEA/gm/9zckm/cq8uSvQp9IFyM33vxuovEOziBZhB51fDziHWt69rcEYK1NXcsJY1jyokTMxFHOJqZVq9rkIUYimOvtAKhJELhaMd2dtMI74CvCCfBlDCNJSPXOJ+qrQ3cB+ZUylBySSxQG4HvpszCQ8IiXIRbjVLq09P6/Lw2dRQGD3IQpCnnKSUAj8DEQ+JyDckuKPq1jcoB8aqW/6um/g09CAE2wZONNeGvp92qrV1YELeIDK/nCtjkgrrYzAYPdkDhBgSAbRVdMVh4c4ZfGP/bgwiseVvm+fn5+fPnl69f6zJ70zDrMi8ICOGAkIQNenefnc+nTFgwyFTAEwET5SS5JEA8zfPX4/HpfDrVNRMxEzIjeJhpbetphggKTExq1cOCkLMBIouIJHDoxTDuyrydtvaNImLX32ROzJkFhJSiRdDGgkM3sBptMUbnkCkPj4c7LfZ8/9VqPS6zrgYuDJ35TZjQzFarHMGYAPuGFtoUCIiEKSEjuqkqAPZeRkR0iHVdlmVtVRExM3rHNWtVj+p+ms+BEVAJG1LjZMg1qF6DQwJIBFOW6bA/Ptzt91PJMrtpM+7CpNQV0wN66BndP4QQJ+EsYq3+/MMPCvR1rtPnd6WUDtAOw3B32H/3/fpwtx+m0Riv1Z6tUNTjiFfjv1F2AHhV+QEIB+9qjxthBcIhCCRBltdFHxFNVa338VEEAMJQ0pgTA2BApgSBgKoIEmrmrTYmBEqM1FUTMifMaE0dqCAIInchvoQ5U06Jg2ypq8aqYzMHACEeci45c11T5sOhDBqJIgsNWawBALvBstSokRIOhaciU04jcwJSM3IsnIuUW4Yfvdqz32Tz//RxdcSbz7+YIgIQdbzl9dreHP1l36TrG3yDAAD87V+8cgfoEpJ8a/b9+Mb4L70gpuvpeH5+ml+e1/NJ15UQx92emfsEivl0xtQ4j4dpuhMmouXl6fPpaSQire18RG0iImHQ6rm2Hz4/Py+rED7c3w0io/BOmF1tnk9NTx6t1hlOKiKp12HDmupaNVfui8Z7GcIp+hZIRG/0YohoHMf9OI6l5JyASSHQwU1rVfNAi/Vc61KFxZthQAIchvL9h3eubfnjuqzaFgsiDyIehlRcPPQUBEGorACu1oU7JFERyQ5eA1g5wmttTJpSAERVbabIgIwgEWDm6g3AtLk1XTw07CRijAtRNZgt1usdQoSMMCXa5d3h7nB3dxjGcpq1+RqcmQnBEQN6OmLb0mJAljztDo/vv6vLWR2+fH1+8d+nry+5lCTCiOOQH+/3ZO13333M94dgCSDb3Lv3BYEXhxHb3X7NFONmCW6bzkX0GaLP7IAQHG7q/BuFDDtrV1CCDYYkRSjMwlyyMDM4uYWJzW2dl7W57nhKIeFKiClJzpmRyLxEYISjI5MkSolzRjRce32/eW1qzZBikDwOY9E6hpOAWiAEAwiDERz26OrzMqca414O+/xwPxymnIjALJpRRE5pTIVfv8xG8Nt4whsj1i9Fj0umvRlff3a7criJn2ywwWaQuK3hbnZxW3ffeqm9d5x1dKBDg1u6tzVh+hatX7aOuNh+H7P1TezwJuf/5X6AAOFe13mdT51mU0rOQznsD8Lc1lZrPdVF3RNQYhpTcrOXp6/zOi8EEh6qAIDCi9a61lnj+emshLuH+1yKIAiidIjBHQFTzu6uphEOyMQUEK5al6XzgoBS38U6DQcQiKAz7l8rSkQppZxzIk5ICIThqmgt1qVhUwRoS6tVc1JXC1W0loXv9+Npv0si81zXuVlAEOQsVApBkFeMxNhdPRo6hyKSUOnjxgDc3dU1ArFWlhUDtfeddUhT3MksNNQNopm2tkRosLt7SQ1waTq3tl5yeCCATDAIDiK73f7+4f6wmz6fj9oWLjuAXqhz78TeTv4nJEIGJjzEe1vnc28yrurtvNbmiZgxdBFsy9N+qi9f8P3jsNtTourQxyAiQOql+k7s3fTNbmWqXg+/LjKAHoc6AhCgwEBvc353BBfGlChnNkVhJOjQdQhRkoRBXQBMVnpaT30Ionugbz2GBIhB5MEBYQERThd6qBA46mprQM2u6q5GCRNzyXkouWHkAqZh6uGG/TuOFBq1TlVj2E/7abjbpamkTBThoAYImSDhN3A/gROQvXH5AW8oE1so9cqWupo3bPHajZVfYNRbWBG3PAB6tRvfovQY2+wl/CfiDn9Fcbbw4dfC/g3DvDBoA0JbbbpaWM7p7n5/93B/d//AxMvx9PL8rJ8/n+bzUhu0dWInbfXzj/Pxid0SYUmJCC2wapybKYiU3eH+7jFLSrTM51bb2cyatrWGxbS/y8NwOr6oNqjGTMgUzep5IRYpJRMFxquof2cM8+b9X6/glg4DajCDe8RqOrd6WkFVhLSpmhmbuZlWXZdgzkLjkEsRXHCZF2zKKaEnTQgQVtEaQUgiGlLhgXUIs1Aws+aurdW1Ls0sJXAgteCu35YZCYDDSYEQQN2xtlrb2nRBdB4Qw01X8/PcTsu8XI0fARJCZkgC4zi9e/fu8f7uH356iqad4EXhHEYAgMQoSJKEkhAjRE7AqRyahwegIgcRADFCYRQ0r3V9fp4//2wfHveHMQ3p3KBZmHfEK7iLlkPX5kCLDWiCb4z/QhnHTSsMgoAQmGG6rfb1peXBEFlwGlKr5gFq3oURhTknYeESsoNhrCVeopoJimxtH4iOplpfFli1eYC5mgcBC487LojhOL+og7XB3LoEARFSZi6lKGEE1qrL0qx6ADBhSkR7JEQDKtOYsxTyQXBIGQIqsWsDaHSdOti9qV9kyuMiXXLtdny1xi11QgK89Kz0vOgKrcWG7qFfZm50v9aVkgC3pDYiAIz8tQSIAdKHWWy575Y84OX2XDWaN72FiwL/ZdP5hfF/c/RhiWaGTGUo18gTAq2knPOQs2pTbRzGptpWnc/ry0toTUwwTog4L/W86lyb5PHxQ54QijdYTU/Hutb+MQgxjWXa7dwaIszno9VVVckZojnOlFjrJFmw868vWyhe+Oy3H/uqOODuQkSAQsRIBASxFY6JJAg1rJmaK4LnxGORMefEvJg3bdjAjSKICNbqbp3CwoRJSDhhRdO2qJuaqjV1dXcPVzOzhYmTZOYucAGGbt7QwR1rraZrRCUORmQOQvUwuNBwti8CkAkUIAmMQ/nw7t27h/tMfxdawT3cWlsATISZS5JEkoWACXt4KQP1eq0DdFZiAAlCZkRb6+nl+fPnn//h779/OPz2w/s9DZiwIm591RAE0EsonfBza/y3l7vvVN7Z/ryxBQhBGMqN5w8PbdrWSkiBXFJiZPPwiK05gYgJIaK3eBGXVcdqJlISYecAWzOda3tefWnqARZqBgAkTC010QBcjxUZTbXLLfWhT4hIgNwlMImVDIgCQRCzSJEQEkdKwyBMglqYRkkImIlMyQ0Ty6842A3k7BeibwxbD/vNz1eH/0qch824Oxp4KZriZVFfooOuhHWjvXdpa+tjp7aw/5VxtZ3nFzfpRsYnftXzv0Z0AQDRJ8x783DMOYvwOA6EcTo9taa6arM2jEUSuykiCHLrlMBA00APy9BMP315WaqyyDjwvqSBoB6Pq+rxeDTzMgzDMO6madztpmkysyT89JWfPn9Zl0XRyJRdObNpDS9MiIDhW/Pqtb/vqqhEvdmGUMMxTIiJqUhCRkEENQYxNXUUkgqxhhsSMhfEMafdkKaSfFVtNi/reWnH8yxCfbIvARpErWYUANi8Na/VWrOmYUBBhNSHSJhHQLAHYmzzed3N3GqfBtInEacUkiIlkMTiyXkopVyZZEhQCBwhE0yDfHz3+P7dQ2YCawBmrZ7Ozwlst9tlGaQUSgVDw1S7omnft6jXkg2hiy8jC5mup+MJluPfFn63G//FX/5FeXcPBDmRGYBvdbte0w9AA+Cboby3XNZtUHNsswm7qTGCEOQbc3H3eV5eTsdVNachpykJnNcVCDmJsBBi9E5ddyJODIexuGNKAwZoW9e6tOPaXhZ9WX1VMwhzU+381+rrkuYAavOaM7trhAW4B0Wf61RVW+ui6wwIIkSYhAbMYIFRm4cgJqKSxkEoExFCEXYTVxlTodu8OV4lTjZ3G3HtyLpgo9vw214s6WKc1wvYu1Qo+sQM6m7ZAYkYKSg0euOjqfVxVsSAlIiYCLqPaWtAECdkINl6z65qPLi1wLyGatHZQn6hddwa/7dH3zm1c+TCw9u6utbTMjdVBGbiYRoR0LRFOBMaxOHhHSDNz8+hqiHNwTBzzofD4eHhYb/f55RWU1Ml7KJZJMI5SymplASQwu8II9SOiMu6mlvXygwzuIRe4e63k4/fHj1fUIhwxSBGxgyZCLFgC7akzZoDRTS3WWtzdwAGTCy7YTiMg8OiZrPWVa0umLIM05CzEAEEuJmbWkSzuuhafdFQg+DECJSIIbBPJDGzoKDYFJnM4hLpAGEgBXOwACdIhchFosvTXTYygEwQCIIAWd4/3n94uB+zMDi4mkJbFgcrOachEjMRaXMz3XTGI2Jr2sWuSwjU1bTRzM7n87KeclsPJf3FP/tzIookwNJZmFmY+cLzjU1d/rpebgGtQNj4L7jp/wEAI3SJwhsRgGuOG0iYk3gg1goQhMS8aeBDeIS5BSHt8kDIzMXUzrVFdT1XPa2xGLToAQk5EIEESxBZIMXAVIQYI0LNDKzPiQ5vFhosSETBwBzEkEWmNKAFODbzThkvOWXG1GeCIUWEVeo57M0qu2Lvces3b569/a+IjY11iek6OWfbFDYSiZqZWpgTw5AJCVozr0u0Fqauxii4LOLA6N5qXecISAWZ+PVtt3PCN+Hwn/hkAJfGnu3oxcUAD9Ow5lpbXW0+xzwDxFxXIC7juNsfym4nIus6m7mIpP1hfHg8Pj1/+uMfj1+fWzPj2L/fjdP08cPH/X5KjAHu2jCnYTduTHOCpm1dZiJMKY3jIEyDpK/j+POnn0/nc29l7alUxDY1ol/OrTZ9swXgJr7L1aK5t7YKYhbizJmZVKjmTVS4NW3rPJ+Wda3VgQlBduN0t2sOYa0t0MxWCwDKFCx94i8CgJnqonWty9yWBgoMxJhyFhQJDsfa1DyathsxJSTElJgYmSDC3DXAgQgFKXGYXVuVrzaWCAKBAUj43f3+/cPdYSyZEd3MwZoSuqm5K3gXfllCWy/HmGoghDOxMAsQeSAgWERtej4v65fPy6dP0dq02//xx5/LYT8dDnd39+/uH96/fyh57HLdl3mm3eB71vm6hKJ3AcXrpgAYjCCAt7QYIhqGsttNaRxLLkmSNseILrHKgIzESNanJDQllmkYWTIAL2bWxaHOqy9KigRC0oNAEqFSxnGaDlMhpsK7kjAzhlvT1cj75DX0wIDEQsRE5hFAnpMMJUsgAakaABFSFhFERmei1BtPw5hvs8u4+tHN4QcFxYUp1RnT11lFfcd7lXzdtDP6/8w9NDxUdVnX8/n8/PRVmL7/7l0ZyrpWbzPr7FabGqx1Vs9lGjOB27rMSMQpMwR4BF0LAa97zq2R/6nt4Fc8/0WjVt2atVbXBdatHiGDpFzKOA7TjpOEsJmzSMplKOXwcA6UwJ+en4+F0sOH9w/vHh7u70VonV/quoiJQBATRHTln6Zqp9Na15zzMIyMOAzDbmovx1Jr07ALM+22oP+rX2QLsNXdAbVv/g2AKEsAlkBUNzcnCEFICAWBIQJCA4NSGad9baaLZbAcyaMFkkQBFaeEBF3r35q31Vq1Vj00BMOJgB3dohfgELt6v7tDIICgoIhIl7O3CIfe5SpMDNvCIUZ8LdgSgGytMpCEht3u3cPdw2GaSlrDPSCllNABwFRbXYPNtIEbdy5QWx0g5/zaCd1rOBEBSJwc+Tif/vjj5//iX//186LD/eHu/uH94+P6u9/tp/EwjnDDBcfLfvTNtY9r/++tqQPI21VFiIk4i+Su48JMgVMZMtIgOXNKLMLk4GYBAYycODGL9XauPvm5WVgMUpJkyQIc4FUYhzwN4zjkJInGsi9C0zSIkJm5AiIysxCnAEEmYkD08CBkZkkkwIOHNgoHREosjMgQwpRT8nAwYnmz4DZt6ujaT75l6XHRwrhdqB4A7hsHerOs7vnD3Vqb5/n48lzrAgCn48sPf/x9Smk3IOG9NfVWUVer52VeglIOQDc2IYiwRpR7YgcA7pc+rJ6Q9PEe/x7Ht3X+rf8jItzCLVzBAzmGXMbdYdjt034adrs8TShUOFkEIA276d3ju7q02qJZWuDTtNv9y3/1rz5+98HD5vNRPwMSJk9EmIQhoNU6z/Px5WU+n1utIrzfH3bjWEgAIEsqpZBryolFLokSXMZn4fZdbyom7j6v67xWLAWJ3UObgZpziIirzc8N1jaY3Ql9mMbv98OhCAtpcGPicT9q1ZUgx7CjNaWGSZGbW7Sl69J6IJqzuTjkIDDoSbZ2Ja9OQaaERAgO4eqNEFMmScECyGbaAowzpiwiQhim1SwQhCld71lH+7vUCxOWcXp/d3h/f7ifxk9mgTAe9hkjIrRVOx9BEmEwE0KotWVZADDLhQDYp2gFBCKncvfufSKoT8/O8uPX0xl+4E9fx+nL4/2XZfXvP3x4d38QIUZUdIrY1thlvMerGdwMp7vEB4CAgiFvYShS5+aSQxwTQUl5uC+F5W4axlxKyYRkGIjEEMJMXRg+zMJ7C7FbEPJut5umScYM5KoLBmQpOWViyimVPE5DOox5LNJr4cxUch7KAE0ZKXrRkDAIkcDBnYAFANAUEIiFEwsDCCELo6v1ebGvjr8bvTkagGEYboB6dOp1l1yC14K7972++/xezQmPcGt1+fLpx3/3r//LdTl9ePfQ6vmnv/s3ZRh+9/3DfjcwEVGgV1+O69PXkDKMI3vWtTJRlpRLLl3G+pJBvKIRvdnittP4Txk/EfXJeXD5y+73AZCIU8qQG0Hghbmec5aUkBmZOQsCADHnifJEXqnseNjJsOT9/d37j3fvPyx1NgxejgKG4UwgLOCOxGaQstZVV6/rWgGOWuuYipt16b5Ncp9eC62EhEi/+k0CulcFRkKkUIgAiwDUFRdrvs4tewzCD1neF7lnJK/WVuUcIokHaMUEkD1xTEVg2FeUudZqXbUPGIGDkIhZEmFTXLU2czXoBWkkANm6KQCiK9SmgikhsgE5ijNhGXNKwtyDPwhHokR0mylvZBsAFwThdNjtPrx7eLzbPX1Zm0fO+0ywLrObgjVmFmFC7KKKEXGZuuzhFtD79zEAOOXd/WMRWfMOIxoPLxXcqqwvz2cdx92nL8/ffXg87IfE29T3P9Ww9sscsq+2N1+j4xdII6cpDzkPgkIklGVIeTfkIefEiRDUWIQTb9pIHZoiwpxzGYacsxDtD/vDfi9TcrDzjGGRJIskEhaRcRj2U9mNOQtiGCCKSI4oKZlvEDriBtLzVjhCEu4IN1ygMibi1wGN8I0TvZBwt16YjvxBN+ytnwf6AIcNXX/1U9trukCWm7Y6L+fnth5JZcB6SFayZw4hAELX0Dr7ciQ9Ixi2WRdqrTHLbreTlExboATKdbtxD0DYBnvdADW/ai8AICyC7t5aXD6kmZlHkKQy7g73jaktcws9zS9K7pkgCeTExAFEzCQ5QF5O9Xw8PR+XebWgFMjHZcnnE4Q6Qh4LYKB7R+fDDBlzoQcq07A/T6d5flnm0/HlZcZjT4QCkXNKcqulcOEh9C9EeEvxQyIpWUpGZHcE6/oTpGan+UXVGfh+KN/f3/+m5DuwrOvy8qU6093IJRdKUkU5DFVNmct0uIuyOzddWtO6mKoDNPV1bSYGTBa2rEs1qwbVQA2iS2eCAzkR5CQ58zgkEVJrAJ4KlSHvDlPv+TN1CEJMTCiSr8ASbsYfANGb8YZh+Pj+3ft3d3/39UdvrVeY3dSRinAuueRkrZ3XpdVaSkqSmDg8HCwCUQSJIoJSLnLAYbTDewjkkoNpdW2my6w/fD3+8dOX33z3bhoSsTACBPBtOelmIV0m9r5uBB2KoW+Mn2hM+WG3u797SGV07RNTqSQecy4pCSJEZCBiHocBEda1RqggDpJwYtCwU2Olw/3hsNvJwLW1dV3UnZOwJOryskkkSU4pC4G7EwSTeIiwKLpeGrQRAYSByImQhdDJFVqXbGB0uFBHO5T/pjTeZzb0Y9srtsJT9KY7j21IH2FEIAUSQYSavfrnjYIfY0nff/cIrXz3MBUZPu7+Ig3T/fv7PGRzXBdr89HrcZ8CU2A9z7UeTzORmFa1SAZ59DzuSDIE9jD4SvLvVZirksAFpLjlFoG0ZY6t5oiI29BKQCQWyQWmicLArdZ5abOtkNqUTOUCum/MVWvzavPp3CxQpIwDiZzmMz0RU4Q3D0CSQAjbdMI9GAlSFiYhZKKwti61VtPwAARmTilLyl0t7xd+pldw6fYB5iSSIgQBmUqAommrrS2VkO/208f7w8fD4YFAzi+2nJo5Qxr375KMmcI4Erpg5CTBGUmARLIUTpnJtXl4rYrmaoDCEZwA1aMFNMNm0Zq2VnUrtiEnLkVKESSwiKDghJwJGSJc1UxdmGhbUW9c5pXp3X/Jefj44f3H94/5b35yraaNgM2MGJNwSowA7m6tuVkpZVM73pggvUGGMIKQhJgzRKEAAmHFIGttWRY7PZ2XH3/+8unz14+Ph0MpHXb4R1vVb/eCjUf+zYsRgBEzp8O4G8adVTX3gEgihTkhS5f/YmCiTBwI3jF7DAZEoRgGv7uT4P1hPw4DC0T4FQZCQmLijS/A/XCwa629d3zAJmfYwyFkJ3QkJGICIuZt8El0YDkiAiN6KHnDrgsPXaMt4Rpo6J3n11XifXvB5t4RL31aEeGmfUlg34BAAXQY5fvffBBbDyUKKkzIeeQhB13+plXXVhJB2Hx6fln968sZWQLdgXYsLDlSCWRH8nBzI4DemfJ2KeFrgeLmkK8//ZBSGqed5KHT45ikd31KypATtByWPep5XqORWvNwAiBA9VBtbbufqGqSZH+3z7Ug4nI6aVuJOl2sS0kweLg5mKP1aV4KpiI0lMF2OwKsy2rqyMgp5TKmMkhKxOyXal+XTsRO4L1ZbIjIxIwpgBklD4Nrm8/PYSGAh3H657/53Z8/3H1AyPNxXZd1nS3VIZUU84BDAoeo6E2IdvvDGullXtcVMGUWyjmRoJmB+0qBzbwaAGWSkjmIHMgDlrW+vPisRr0bjVMRSSKBTn2wDqGGnZaTq9elEUDiwpSQiCG9yq1ebtoVb8s5f/z44fsPH8by71x1WVdTMjNJqWPUdV3rukCHlN3DDAAxETMjixNGJ0VCGFgABBIgb1N2OEgSSlqa/vT5y08/ffqrP/seDoBBm8zpa3b/J/cB3GY9xTeBskc0a+6WJO3LBAXCTN0ggoHQQZiFCEgAAJohQhFJwO62upq1Qpwe7hPmw7AT5ghFjC7ubqbmPOTSBy4hQAB5oAequ3qoOQL1gg9EdG358K7g0i8xElJOxcghADeCGzpCePO4eOzN9pudv+jLJxAGRgoHIhdHFmEOpK2S7oHIwAgI6gGm4EZd9AGQIiIawFoGeffwmxyK9QXWF4sVwME1XN3CTdXBnKqhtfZ0mr8ely/HGVMBZspld7hjsGjVApTFIdyreJ85y5gYmLxr8Qa4Y/jWpvFq/H//b/6LMoz3j+92h/s8TiSJEZJIEiFhREDCnDPEoK7A1JvGtTWg5ECdjGC2pRvDWErJ2tnV7nVdIwzxkqvThZLc9W47ZyE0TM0UAYUFM0YKZKaU0lAkF2JBIrBbu7gUKL8Zombh6mHIGCmnIKzIWZJM/P5w/9uHx4+7/bievdfeXcEb6IL1OWZYrdWXz+v5qNqkFAKxGmrGbBzBYIimVt1az+nIAxFSr6UxBKAHsLNlQbSG4ASMRICw0WCoB8QWVqu5epgjXRodOnv9F+Z0NSOR9P7d48cP73bTwBh1WZQQrLGwaguidV1aq4TYB6m6alXlSGlMxARIQQRIEOFhncnjANeeUmJkQbP26cvnn35+OJ1n9cANZLnlrd8WvX51B/jFQ4jcG/BIsiRGBAmPCPMwZ0ABSshEGOHNGgAICSAYdKkkI0bilCjnlBBQTRExJ4no1QYnQhFi5m2CO0Yf8WsQEUBIwijEHROK6Cpj2EE/C++pIxFe1Bq38PmWc7kdqnr8pF9/wGEkSR3tdySQRCWzZGJCRHcHZOp4QIS7otbQWl0j0bifcqK8H3JKD4cD23r8ZGs7eYCb6bqoUbNw9zxMTiDk0BrXOZfYo0DKUhILJSECW88vIUmmvSQGcLSm59UdqGTMBcsILFct0W9ukPzn/9f/8zCODx8+vvv4/fvvv7+7fxyn3SCUEjPCrOruQ845kSRRDwhY5xlRskEuhUmSEDP1aZ7CDNjlLazWutZaVzdVDWdmTkjMTEQs5BjkBLC2djq+LOeTtYaAZRgkJSRGkUjMkvF6szauZN/J3Mxba1c+fERoret58UZONngShpxyTjzk8nF//1imMSjWZlWBKOXMJWU2P38+r0/L6TQ/P5++fHaDEVKUlEtBSoDAUVFXb+t8XtZqfX4PIhBLlowIamu4QYRE7EsSwSVaQ7UeuzVHRmJGZsBwd3WHcOq6HMgQ7L9YZjfGFACRRB7vHz68e7w/7HKSeV3MjNEBg1+eOQ2qCgA5CRK5mbZa15VT3qfCqSARIAP2vKsD0QjRe8YCILrKNbg+PX39+fPn55fTsrZxyIzXz/HLcP6Xdr5RUW8fZOL9MM3jLvFFMIMocQ4PbQ28a/8gE4JT0IbGAQAhBwNwNHB05M6N7/3uzONu5CSqjQgYgRhFhEgu4mbRu+EJiEkQnJAdA8GIqOsAYWcAaoNOTcTOv9xGlV1S4DezTV2rPv3UPv+D7O89F3BTs9UsiHnomlWHlETdAJksgMUAzJu1ZT0dTy9PuciUvn/Yv9vf30/TNJayHp/q10+LhZtXs8VeKlUNZKL7Dx8FA6xqa/lQHwMh5RBxiGEYpl2J0JenLyDp/SS7YcdOtbanr5/WecVS0uFh+pBzlog+UcG+yeHk8x9/n4dhmc/r+dzWuX04P75/h+GhrUdA7gosImliUo/mCBDWmtKCAJwctvG2Ww8YAhMTQBBd3XRfLMHgvOHOFt5cq7ZV69raamaImCQNwyAp9dmgkIRS6vMmr1WMHlt25txbz++qa6tzqAhhZAMidMg5Pe4P73b7iYjVWlNT6+J34BHadDkG4nqe67KYo3nMa0VYcEiZEADIKurZ1rUua1VALkTIjCI0lgQQS+2kU48AQcwECEwQi5tVVwsunDghQbPqEH2bzCwMEoHr2mqtx/1sN3MHvzmIeJp29/f3D4f9bkjzvLZaQVAtmhpwIDEzS05EpKrWN0V3NePoetu4MUC95+GXaLYHJhhJEFqczufPT08/f/763ccjyR0luTDZLnVsfHNbrztBj/hfGe+vxk9jGXdlzFsVHaW3AmxpCAkR4UZ864gnASAQEEIQsINrUzXw3roYEMRUSmYmVSKkJCxEzAgITa131MQ2lTiEyJE2tnznPBADAaJ7hJm6ey8hIyET9Woy0HWGweth2k5ffn7+6ffe1jJOBGBurbVA8nXItlIKgQJmrmjr2YmDxNT0fFqPz+enz1aSPkzy7v5+GveHPSLYQpJEckHfobpjCqBwKCU/PDzkxHWda615Zyhp2O0M4eX4jABFyM0SNgwfYi1G2Np6fjp/+vF0Wmi3Lyh8/wjFw8PUa2trS7dORjIAaltfnr5oW8/H45dP8/P3w1BarV5bV6FcwTNs0vu5Z1uAYW2djZoAJ6Re4OqtSEREblZb1aYIwUJMLIRCwaBg7lrXZW7L3OqsrRLiOAzCkiSllABJPQAxJeGUehkyLDzCSfsUPaSQ7kovN8jDzZeIGbGQJOKACKsGSHsuO8ms6rV6U9NQBVNrTT2CU+acJY9lSojTPC/HeY6qA0QZSmaGUKuzrdVbb0G2HidLgnGXCShiBVcIqG5LXT0iFQFgbdZqNIRwGspAfUA1Rh7TmPOYxlA4HueX5+OXzy8DP9Sq8O1BEd0DEclw2O8fDru7qTw/nyoCUSIZJA95nESEmDudX8SDxKFP7wE1TSm6JLG7Q3QF7i6VGRtrEiOJhFtb7enl9A8//vTw7jGNBVnCPNz78DbEyxjz1z0XADZ9+M7z/SbnJ8QxpzHnzJyIEzJGaK0Qwdj1eDoQ9srbxK1ey33oSqtal8U9YgBJgkRC6EGUUymJaeO948ad0bg0yjg5MDELBqhVd9tQPXCIrd29K4p3+k3OSTgxUR82F/it8Wtrnz99/uGPPyzNdofDMGRBJNUAJGuAhgOSjYK+qp7OyxpIeQxAXZc6n9p6pMjr8Xk9H0Oba6t1WdaZEu/u7/hu54EV82xwXmpK5f7hQYRP54S1ioYk2R/2Vev59ByqHDolTvd7wBhgoeNST6flx8/HH/94XlUQY7+kdWl8dotW23lexhx6MyFC+ghmr7VGuLZold2maWKmti6mTVW7wH7EkBJ1/27Wa99IIpwGZANEBzTrUD1u/PaI3rglTNIJ42HhGtv8bosIJM6ShFgkcSdUW5gbECYE5M6cAOaIrelk4/j2ru4bB+RuzW0lII/WdPbgaCa5TKmMKVFT1+YRHqiO6uRuYZhCSKZUsgyUR6PzWfmzWhOGBJYxAL0ChkNd26wBojmLjBnRzVogdbQCIMyBMChCAAApBVeHamiIXjEibAnocypzyqkYmJst83J8OZ6P53/E8/fceRymd3eHx8Pu05fn2iwPwzBOwzAN05TyQNxHdLhHICck9nASJsI+EcUvyhLbtB68xvOBQMgUZE78sta/++GnNAyL2f1hD+Zbdo1wtX3sxQTcTkPee0wjJz4cpmnoNPk+kYKypCHlIn3QAoV3YZNgZhGSXkyPTlR7LVAjIOM2oh26gKEbgYgIYCgSoDMTEwlR2joD3cK8D76gzuUjEsauMhzet60+SaEv/L5Q3TWCPPgSXfq1JeON5/c4zvWHz18/nZZhHA67cRAhNwZMIuM4+PwyjRMLrVq/vpwWDxr2nHLnLe2K5MRQ6/LyfPz8ua7zeTmrVU4yje+GnIlEg9bm52VFkv1+H71FiTgMmJAjYK12PntrLgwppVbd2nJ6avM6P708ffr6/MNPC1AaR52P+vQV57XVtq7rPC9Tsp7mbMYPARQo2KeqoNf68uVzO59zyRFmrTVtpo0Q16Wl1NEjNPcAQGa2gkgE0ccumnpTv0adIkI59TuE0LV4LrouOXdNPohg7iPvwdTqsramGoEYbBndUxImSSn3QioieHhY19dr1zAmHELD1RG1xXJUTyAZ0sB5TEORHNo6qcsJlaRiODrQ1OSQh8e0uxPJqDGu8+70uK5H05lcpXO2yoRLnObT1/OMDNNuHIfkoU9PXxCJKbEkRErI2ZxUIZABBs4WUc2s4nxcUbyZI0Mjb+Sawi3M3LburV9EmRdmbVwQ9JTKu7u7jw/3P/z0uRkMh/14uJv2uzLt8jAgUtcriAhhE2GPXssWkc1QqasB9LPi5tyu+B9wpuKL41//4cevx/O//fs/jqVc5P0vw0moV1U78EE9IKAN2tK7w/gXf/b9n33/3fcfP3LOfdcS4iKp/yMPixBBiN6vD12ULXqM432ZgEcgbEo5fWQlkSehLFJKQcK1rh5GCMI85CRE5BvLpWvrBgZhZ9g2c9iYdhvm33kk0A3/ImjfCeKmpmiopuG6ZZtX9wK4YvrpWH/869+btvv9tE+poA9EQ867aXy43+934zgWD3g5n6sDjrvhsLu7vzvc3d0fHnNO3mz++vQH+5tgOrd52I1//ud/9v7jh7v9IUl2jWbWmqm5e8zLqilJBHJYbcvnLy9fvpx/+BTW5mZAVM/n5Xw8vjydX47L8/H0Mr+cF93t8sP9/PKEhi3wfDou69paO+Sotb4av19oCdTJ5W5tXSkcwQg3PnhrLdwVjXibNhxuSCS5SNLWGqfMKQFwU6/N1lbdnRBNxDRZSsIsjEIXImHHv4gBcqdAh7tZ7/teW1NDYIpaaxBFoPQlAhDhl+qLm2tdX9VvABBdUAUI3a35jJTGzIIQZrVWq6u1atpW9yVgcWgBvNh6rEtq9wUHEUKIFDzuCkWrAdYIo88HprNVtfOyBDoymN25R+3jd/ogV2JkSLkQsbthIAhrBEPU1uaXNcidnBNWcAIlWDx0XZu5937bf5KRnVJ+9/j4/uE+M0VbbV2anGbi3m7Iwp0/KsySOEmGiD5qkYkB0BCd+wDGjZbvsEl4d0QFpE9Gt6+n9TTXn76ckrAgA7iZIkaX1gBCJpbOv9p6yhHdQ9f7wzgvCwQ83N8POcM2q29j9TNiHyDOhABITMQbi84AwJGAOi8aHJCw7wKByMwBiAgE0el3zqTuAI4YPU+PDePvIhtdYLNvKxf8voOeDn0sMyEGxCWR2XQI3T04+i9uBuB2kyRzSrvH97K7+/Lv/vb5/OX4teySDAgD81DyfhiOn4f9NOzHQkS1NQPE/GyHPc9LWnVvqEmW06IRz+WTMa7QDu/uH6bpMIyjjJxTuJN7BsDwZV3tfLbz2daF1Nt5Pn/5Mn/96k8vYDZX14h6Oi+n0+nl6fxyXE/nZa0toonM5yOVgQ2axcvT19P5tNb1y4c71Rvjj3APjG09MBEwdYYjEIL3ERUBqn0MhbXWzNTDOaVx2rFkD6CUpt1epFigmrVazRwR1gB3Z6ac8jCU/X5KSQKpu5FOwAizWpe6rm1d6rqu62rugeQQDtTUV65MTNvQrujdzp3SsszLa7NkEPsoNoEHoqMbCwkCaDu+vNjKVs/WVlOttc2m59bm2vS0wsu8fzl/XG2a9ujBoTlWASUQESYhChQDOjcnamYWmmurqkNAKhmBgrBFYDggSirMybWZASKKK5O71vPp5BBSkpQcEap1nlcPrXU191Qkjxn522rfL4xf3r9/9+7xAa0ev/wcT0+URi5jHqfhcBiGseQ8DIPsdqkkQkTi7u6YCIFja3ILBzRERdTLcPL+E9yRGczCVCOs2tpCyN1sqUuEp8QsXTXbmPTC+EcCijCv88s8V9WS83/wl395v4du/DnlnBIBXMebb1osfeg5XXpKKBCoZ+7RWbNAZuYBl3ZIU2tuFYE7GO9mDmBmiN1fhXaZEHBCIOZthEAEbDNWwCmcAtCBiAgCBJwuOnwOEExEiK7mAeau9lpRHsbxL/7qr/75v/gXf/s3f3s6f0Ezj1gBDaMpthptbvPLfOqTbBCZCPk8fz3pz0/n6aeXaZ9EXB2YeBppzDhQnM5/t+rpp68f3n8/Tju81BvMTWud5+V4PNb5DGvT4/n09NKWtbiH+fy8nNeqc7XW2GIyEcjM7BQr0tfTEVN6zAMDgde2Hl+en47PX97k/A7WlSA9KIK2Ybzbheo3qPPEw8zrui7zotqQQMyFpJGurQFxq62UkVMO6OpyZuHWWq0NIFJKqlPOF+p2QJ93BOCmbZmXdTkty9za2ncNZDEDXZZYqpkDIBOz9DIq9r0j3Ncbz88ou3y/H96ZV4gVYhEAAm91/fr1y5mJyQDc3KvZ2XQ2Xc2bWlsWBRimvQeEW8LYSQB1bhx1/QIiSuO0uz/s2zovC3ECki5GAYDm4ACdU8fEPUtzC4RQxF14c10rWjPH0E4CEwtsgRrhgViGMgyvYh5/6hCRx3ePv/vtb/7yz3+3LuvavBkoBkWjdg5QU2k6ky2WupQRdmiLSYi68rH0dlgXDqKu0dORr23uBAQQhlNnYnaA0NRqs4AIZgnkIAoyQ4CuJoeIGEEAdK7246evP3362oe1bvtyv2d9vGEfh3DlBUdEXHonOm0TuzJh16/sg/ycmPoUcNh4K0gUdO2tczGHbvzeFbwgAtAi0D36JIpe3tmmhXY2ogMgMiFiWEcCwt0v0AYBgKqrvsaWpZTf/fnv/uIv/+L94+PTpz+S9i4Y2XCTcFGlZkgtiIcNysSgth4Xy2cvRyEOB86S7g/lfj/QFEt9/umzzlqf6zCOgIFCSRgAvNW6rvM827rg2vS8tOdT2P+HvT/plWTJ0gSxM4moqk139Ovuz98cUw6dhepqgosCuCW45/9oEAT/Bjf9O3pB/gES3BDoqmZ1VzNZWZWZkZERb/LxDjboICLnHC5Eze51jxc1EE2AIErxwuP5fWZ21VT1iJzhG7SNrRkMhzH3o48ZzYQpQCB2J50YJBCAmxUvCYCqavV02E3D3uyxryTmBd3ViA3N0Z0QDcHMtMYmAQRiIzZXsFo1g7AEYnS3XPI4ZbVpGJtusd5sYtsJM6HnVLIWzaloSdPgrouuCUKB2Q10SiVnyyWnaRzHaRrGPLgVZuYghFCpqeOY9rtB1UKsmv1SawhiQPcylVPNLxIvNtc3F4cp73Pe61TJPTaOQ/+wC4E3my40QYkS4qha3IIQIUBWcRUoAlmtECOHgAgp5Vw8KsWAIUqzWr149Sp03YcPt0QY20VoOmE095IyIkgUQvKiZEhB3JzUMHjo2tg2BLDdD7mYTeZmwG5oTo7sErhpY9e2H4tGPB6n5rqInJ+df/vtN/+r8Z//4ptv932/PwzbQz9MOamlUtI4pkN/f/9etQB4MU+lqDuSSAhN7GJshQPHhtoWRYDkpIY2Y9SrRJoBADDNuzISNiEAYWgbCSGI1Jz8uOtX4pCLNZDGqd8O/aj6iL/QUkopzFp7BD57nzqZVUiMVTUwQgJmRBSu1buZFVcDRUYSZkBmJkFkwCONwg3MCzo6kcNMbndAMy+aj2tPdUoEs6JuNJvcG1T2lXlRRVUgP8Z/TUtI7QTLBwCIMTx/fv3F5y9f3Dy7/emHtN+7FqpGTUiCFIgCYkXysTm7M1G9pkzArIhecgFoGoC2bc4vLnjRjrkMhx7SeyLOWjjQcrkUYs1JS9KSUZUNyQAdBKglcXdDqkUUIQVkI3KECZwFmpbPF1FDgDyqOWpCLZ6TzZbjp+A3RQSzYkZmZDbzuM1gdlh0r9kZznUTACCae1HDrAZeW3w5A0DbNMxCPI9QrBTTojmbKxMM+0bAoW3RMY1jHqc0jFOaUko5T9kSElSYJhGqe0l57If9dpeLNk3TtFHbqBpEAhG46dOdHxGDNF2zjFFylkRqaXSznEuZknsAWCCxuheHYubgzDOULJJHKAGU2UKQphECzMVKLqolq0cHB1yu1wpQzN08tgvkYKCOzoGJSIQRsPbYAgsikHpAdAlN15Kj8O5h149T1mxWzKnO3JzQyZmqp/i/9yCixWLx4vkLRvri1atD3+/3+4fdft8Pw5SHcTzs94fD4XDopzSZ6ZTyfhzHVNSMvLBnNhJCcSRFwgJeUcePkjSOWCkuZvPtrmA3rc0hSybBqn9dbYYhAc32uuaq09DffdhdbVQf08sKkq8cmNpeMzOEWQjIrPJpGYEBDOtok2dOYt2r+STbKjgbDFd5zNmx3Qysnv9Jg8DNtXqkV1Z/EELQeWWo7b1qj8EGdQkxBFezXBSoel7RJ2QFM52mfdGh7WSxaFJ/UPWAQIhCKIj1O7A7A6DNUl/MCIDBgWtBqgWN0UwQ2xg5xJSyppSSE2AqBZkwqTBZzm6FwBkRQNzAgR1MrTL4vHoiV+n2jHgA2oPu3A9FU9Li43CYctFpHIb9MB7GMuWn0BixKoRIrGrMWutpBzdTcK+Gk+D1Z44IUTgnL+OUMQUJVZcoMJkDmmnOeRoRUc1LKaYFofIUoIzT/u7Op8lXKyHJKaVhHPphGsdcsoGiIJNUdioTVWS2ZtXZq7QermoA2V3TNB363o7jMTObprFo6RaLtpEJywQ4HA6mGmNYLBZdt+Ag0zSZem1TCToxoHDHGMEaNIoS29i1DQK6Ya/j0I+mygNxFA4xdt367NzUJbYGUMqE6F0bRYRqLUOOiBwr7reOAuKiLNrQNLFFf39n+2FK7gZUze0V3HKUkuxnQX5PQsgRMLBsVusmhJub65xzSmmappRL1jKlMg3DMI7D0I9jKqUchvFut9vuDvvDIeVSI1VYkNARtCqOVl8itVxKUVOAnMvQjzknzYbgdKK3wakGIjxyrRzBAG12KXZL47B9uLvoyrGrXDtwhCBBmIOmrGpa0/NCAG6akajBkwSiVXKMIdUSfVb/xSMGmt3JwQAYmMlnXbrZM/NEnZu7hTD/Z8Sq44X2KFczy2POrwPDaiKai82oZjhypedyrB/2f/fbv/7t7/6fSXfSkpJmKBEKAQXkgF6XI5rPhA1B0QhJmKqOE4AxI6OjZi8Z1diMVEG1EWagQOyAOE7qZkWJnISIxAHUSUnUSknZS05mBUEZC7q6jWrbog+aHqwcwBJRAVTzyvIe+n63Hceh2NPgV1MHICqqpEqqc5jxTHHIpgpmCCBCaOKI5OAlqxnUOk9YEM0Bma2UPE1E5ADVzIURgRAcXXXc73waISdh0VJyytM4lpSrZoNgkOPYZ4byWe31IhyBA6fmlJnmctSrBAAANe2Hfd8fmq5pYxvwnAHGflRNVR0PAd29pGLFYmzQQwCDUgANHTTlklLkhtDnvpu7qaaUU06ILiU0C3AkDhHRi3n14g2MzCSMXmo5akBMVctqXvtdCJsgizYuuyaXIsJTLrmUrO6OTp6Gksb89Mb8ezb/JsamiVDjxGbRIHMrajnnnPM0TTlrLrkfpvvdbrs77A6HaUyqWmnOcIJHl1zq0LRYyimrZoMp5b4f0jRpVnCf7ZEcZqzr3HmdS5F5syUHAHb33ORFeHF9HuNTnZhqdjHXF35EamhlWZRCSIGFAKG2i8AM0I/1uRuYGzgqILtZ5eP4LCA6b1VWM4V659BccTZZBjhCk4CQnODJRgKOePpOjn70/3WmwOHoivG480/T8P0Pf//dj38/lh03QBEho4GqF6sUfwRFVASdPb3QECu424kUEMGrIJxlzf00bHc55zwOoK5ihIzADpBNVdVMkTwEBuJilIqNU84lu6qWlKsyqVpSG9VH98FhJMwxKtOcP6lVISUHKu7dYkH0KBglquoOFRNKRFKKluJMSFif/aKFAQmxaxqQqKUIEboVU5ZIIhQEiNxR3YuWYhabpiIxnFARFIARNJU8jWXs8zRUWRV/om3GwjEEFq4oLfDZ/piRAgezKuBMdR5bKX1V9vn0TbSU7e7+/uE2tm0T1qv1RWDaPjwMfZ9SmpjGcUTi4TAo4mZzJsxl6vM4WLKUbH+YFKgxA8YmBnQYh2Ecx1JKKUW9oOYMjiRaoGTLORNYE4kCgZurm1V4iSMjkKnbOGU1ZJSiME45p9Q0csErRx7G6e522w/GFM3L1KfxkEwf67E/PubIqUN/90pmJUYgOm5hbt66u2olzljOOqSpPjFavO51CAiu6Nk9z/py7uampdRBTi42pVxymTXwKmbf0E72kR+tUa4AWkW93NGUSnnx/Ob8bDOfdl1HwcyUyCoUvCpfVa3TUpTQQsoYqmS4q5sCgKMdScJFjcydkAjNHMFzUStqpVIlHYopmCHVLoZrATcEJ2Kol2mGSSIiqlspil75yg4IJIQk5Ab1k5BDqCQoPynwAUDK07sPP7798J1lQfHYRS1akicv4sRIVOkahIrEMFOMjQWYgVgdCJCAwSkl84fe4B03wcAYySUIBaRgiFm1WClanBSJ1H0oOiate4ZaySWlPKWSk/nkdqjNqqbpzs4uXzxr1hsDBCd2QjMHnab+YXf/8vMXTRufBH8pNVNFrCtAKTkrc4hyVBtTU0VAESFAQGChpokCThJIBIVroeg565jV3c1qW6h+AIAHIhIGZVMzLScMF1EFUzJLqKxMrXu+Vjjf49qrZnXWWOFzzBxjDCE+xj86kQGUnMacO1i0oVl2y7NctIxDVhuGCYlKKaFt1+t1iGHYEhhYBjPN6pSVik1TORwGU9sf+nHKDohUE3TXkgHNFE1NS3Z08HC8dAQO5qUUJXAL4u5TGnMyAFalouZmQapEbtNG0TGhWUErzpMjOqP/hwb9Hy0ER+IvPuJQ+YgLgjnkfK1atK60M0vIHRGUPAOUo7ZEFaPRYqbqal4RJtWsb1a1mLfWivJ4Cn1xA1B3AAM3BmDwxWKxWCw+OefaTWdGnTW0DOs43RzASyl1jwQ6zgOtqGP1RzY1dwcmIzOzmomogalX4jOYayVL4fy70OyoAIVYVZVB1Y/iinWuh048A/5nK6jjfMDcZ8LCk6VOVXfDdjfsGlxx5G69MMPRxzGruWbDCSA6BUNBZ3R2ZMSI0NQHpV4fBMpM2SilMB44CAeKIi0HZkESRSzmxYu6GioQFrOplFQ0qxZTdS1WUknJSnYb3Q6mhhxxEcNCFrhYB5iZZITu5iUl47ZbnzfV3PUU/OYEhGSkpqS5pJQCMwQmJhHJOdUNkI9+OURIjQQWDpGEFaDULFw15ZRSHmCQEBZdR8ylZASMQZqmqeJqM6Ja1RGIhXjWTaoZ3GzNVLWSCKG6eVpRMyQsJZdSFxYJMXRdRzynMUHk8mx92C7Q89DvG+EQ5OzqeWgX29t3eTwchokJWWi56NabVRMbrFKuSCUXwJqBhFz8w+0u5TwNGYDarmtiYBOvsCUzKIZmDQPS7NfG3EgM7qbjOB1GmCxKAMAy1UkIAYhIJBb2gkRtIMGgZ10I0KeS1BrsVu3q9F0+DvKPf/LYf6InAfgR7faxf+dOiEEQHIHRj0J7CELAVe3In0BqDSoEaF4SaBZ+wflXVBjOk7g/LjFQHT3rLyACZpHwJO2vZDJXQBURU7JspQrgwawZrWaopY7lHatufSnF1dwUKmKXAZzAiiM7ACOAu6oamQOBzT46R+e82tBDAEAFM9NiXkzVrKp1Va+UOuKosJ8jWJzcUNUIrK7rp7vgAOqgAE4kbVxicAxj0V7tYJlVBTMDshNXhRGm2jNgAHIjMHYUIiZGYg5BGo5RQuAYpJFAzAZY1QaszjOq6MBsp+oO5uTOs04wOxpAMG1LNtTYkoSdlvdlTBIbJ9Zq3K6plNGpJ9kjPpnzz30RM6vat6QllcJFG0Oh6uSep0mrG40q1G1OhAEJExqpe1HLJadc1OaEEwBKCOymOSOiIhBWKhV6JVfVDIzw6MAxN7RmzBeQupZSjYOylmzgVqCUnLOEIDUKno5hmGm1WqyWXT/kcRwekLpu0XbdYs2qJYmAJiEIbVivV4tFKzy7VlcKl1UoB2JFT+esgBRC7JZLYc4lVTEWLVogO/iJABZCJIkcIhEakAyjWYE6/mIhmrHmSMTAGBAJmAAFl10gxqhQDBVgvVwL/7yB0p84PloX/E9kDbMc1Rx+p+B3RAFgQHuSKtQk+HGno6d5xCzT+8e/5VT7n3gW9ORPAACaCyFzNQxV2ACswovN5xNyMJ9tBwwr0NbqlOBRTcOr2oWhIyBVcrcbFDMkMgIH0ypToIqqDmDuxHXkXRMIc3A48kTxaElQv6HN501Vr42rLOPH4AsDNzRnR0EhCiphDBOUknVSm9Rm/bgKrQJiz4iAbgRG4Fy3GGJiEWB2iiaNUjSJKixc9UWBq9UUIiMQEIFUcQi0So+tpEd2ZIDgEJUAUQK0bSZ4sJKdWmcyBK0CHDYBJqThqTCRzD5LZqAKyIZmqJk1Ja0CnszETNM0TkNK0yxUZTAe16JZmqwumUgSm8YdeNbYNXBTs2QlJ5g9n82RsKrYY726ldaJVA25kbiolZz6aezHIaWpaHIABM+ZkKpL0ZRzvnvY5jyvZETUNE2McRjLOI7DlLsxXdBF24T1+SWuVmxF2GPktmsis6m6FdOS0pRyhqMGDnGIsQkhskjbtOv1moinNGrJ4GBqpZQ6uJpbwSRE7CjSxE6iOVjJUcgdFkvmUMZciiIBA1JgmfcPV2JcLrpNu3SSonZxdiby6c4/L211/5pZzU/+Kz6G38+FPs5b8omUa3MEIcxWUXNn+hgBXqF2j0E8O7Kf1oBPiC7zm+ZTwbmjgE/TEyAiIWYkK6Zk0CAiO7IDmZaanNMMqiHVWkLMIVbrvuP5YOWPWFE0AJoh+VUTDs2RUE1TUQevaj5QV12xOpue/e4QYTYeflS2qbdeTd2RiB0wFWN0RkJ4tOtycEB1Lh6KY1I3bEp3ztg0OZU0appKTq7ZAIzJEKmuuMxYOe9Vb5vYOICwM2OWkgUb5qnKlbQxVFHSwFGO2EzGEIgIDNSqSym6udb6GICpmktwIETEHiy7DubkWJ2cC3gBz+jj07RNkGZodk2TVC0X5aK5qJiHIEEICJDq+uDF6kzUi2rRoqXUZ5KIRWL1sqtzFRF2V6NixXPJ8wQHEB1YGCpICBwdqSI5mZC5Kp/lUlLOueRixeZBI7hpKWVe/YByzik9dvvNbBrTMIzTVLvdYAZx0RJRjLFtmpYxCgRxZmJEcxOiUPmkR3JnpXZ2bUvMEkLbNKvVkoibJLnkklVVo8/rXJWOQ2CfKeLMSIvV2lXRDdxjC7EoT1OatBQnpNhEREtpcNcQooSmXa+RZUq5a5o/BfL5Tz/wk3+ZdVCefHzl9B3FJAGOy4oD2WOQ12UFP2Lb/enfaDATf+ljxIKQEFIlc5r6sQPH5sUdmBDqgl51hdxn7SCfSzGf1annDzWt7Yc691U3w9m0FlxdvRg4AaBWuA4yAlLlHCPXQtJqxV/LHMOKZSIEmxcv8AoTtnmJOX4RJmy7sFzFrhFGkayxw8VKctY8lTSWcSppspysZnsICGhIEGYYRxUrQWKiGISZyYUgMAZBDhgbii3FhkOkIBSFmYgQquA9kp0UMA3cDMzRnQmxbkKAZAZFi7u6FzcCdHDDSnrT5Jo+2vlBEKDiNEgRHNWVavBHNXOguiwTM4sEcyQK7mC5lHGaMqKWmbpbq6P60iASgripVzlg1WquhkhMNAukEh33zyAiLILEDp5yGaZxSsndibmuIrU/U1QNwIFZRERiiCcl75Tz23fvX79+UxQMkCgUzYf+AO7LRRsWXWi6NpJgBldXRbO2ibroUk7mllICM1dlxMVyGWMExBBCCEGEY5CU8t57A5Q5aRNENDUkirEx8GHs3SzGhpFqaiDMqsp93/M0DomIV6sVgu8BHFAEJTRtjI6UU5pb5h8fH+XyTydsPxt88CRk56zh1PIigONk/skn+9MgPTYE8bgQzN2XP3rNz/52B6ip/id9CjwaBZrXVpW6sKAEDgrZqzYDHFG/5gjASA5IXhyPSrgAMz4fHADqYEXnLAwCydwBdUcnqHhhMy0ZCYMQo9TCi4XdvGg2rar6oKYIGBAdEZmQZolPA1dT15RLOgFjJPDF+erm2VkTl0TBZhpR1b4pJVkunrOVbKVUZ7x52w+BWE7KuggVsCjECITA6EyzkhpFYuFKe5OjUv2sgglG7jiDIys+niqsvEppV2oi1e6ZK4LBrFvqYEXzVEp6QoQDmXJCrFcF0IGcjIC0jvw0l2LquaQ0pnGaci6z/UN9PhwIEUUAgEm40rwq3zMwi7ghCxORV2nRWgvgvCbWR9Hc1Q1NXdFVi9uU8qEfppTrQNbrLEpPGj51vIB1yXhsxpjnlErOLLHKuUsQcMt56nsjgC7GGJhREFxLKjlryaYF3RixCVKV9ME9CMUg7s4IbvMEbBzH/nBQs6ZpQAARVXUYBkRarZaANI2juxOhE9cBKmIupRwOwzhOU9K6TiH6MI2lZAQq5sUMEUspruXTGdrPbbZPAhs+efEf/bsfXz63+vBo3gyP4Y2fvnvG+j7+rj/+/J85jtbzP3N41cM1dwPVnBIaM3ilkJoWQy90ROsZoLtVcXsrVuG6OHvbIaABFHc0MLVillVnzQdwV1cz9aKuDl5Mc5mQ0AOKOiKgVcSt55yKulEVkQXk6vcOToYO4MUN1cw1u6ZSJjvuloQQI7Utx4DECFV3GAHBTVHV3LColWyqVpcMrsQqrjFZExqaMbNcFS7mMr+WNUYVx4sGRRHdyYmsGvZWSQIzhzKzGoAAyZxUZ+FQszk26/2ttZ255zxN06GmnKc7I9vdlpmDNFUCTURETIqUUnLKAApe6sR76ActxhwAIGtRLUWViNqmnaX7sJZpjlxLOAd0JqpEYJhZU3gUknSzYuaIGcs8hnX37J5y7scpl2IApl5SLqVoVnMn4kDCtSASEX7082DCZddebFZNu5TYUBX8Jsq57HYP4zCAgep600UhTKlMw9Afdn0/jOMIAMuuCyEggtX+IhOAF8OSp5zz/nA4HPoa6svFKjYNAgzjeHd3Cw7nlxdd1xk4IZU0OUDOZUppHIZxmsYpmQFRcPOUJgALgYkJgVS15ByEVsvl5eXVCXd96p7ZsR92ileb//U/EIx/FLM/LxPyM5/yJPjhTyxAf/ITqinDx1u/u+cylTKCoaPlqVBhDMyWIU+akysxUzXqmZHlFcflqmbZwZGckBCdyRGKGhigW1FQM0RSBAfMSYtqtqJoTq5eJp2Q0bOzSTV4nBVkiypYATQGE0InEGEkR+dHlGHRksmmYuPjfXHLacpTD+7MEYkqOhvQHLQKJBE7sZIBEgly9VJRy37qJ6I4opNV9w6so1aHuqJlsGqROIOPnIQEZ6ywoSuC1vJGHayawCMccxOcuzleR+xISLUYSmXqx92Y98fTAACQf/a//OfMLBKZ5Sh7Lk3TLuqIi8Bcp3GYUkpjcnOROfjrnlydUpgFAKxK2TCFGEWEoWLkSpqmoe9LzgDAxFURZPYVnrtLc8lWyfpFdaqDA6zTdVVVrTbgxCE2i8UyNFGYf/mb33THeXLTtJ+9+rxr26btWGLNFc19TGm/7wH5fL1Zr1arRRtQc+qmYRXaVbsacynCslwuRIKasshyuQghzsmNe5qSYgTuYpeIuOuWIQQA4GFQYHdfbs66Rccz2R3NPecSUuLQ85QaVQAKoTGzoe8BrO0iiyCQljKNYxA632zOL67qtf0oEp/8+f/p8Ud7+/8Mn/nv+2WffDJJu7j42tTA5hqCmDlwUY2bfcqZiJAx1HXcANxgTvhMzYtVMeraY8FKIajFrjpUj6wgAQFymoVRKmPKQJNnRAyxIeRqnT6PPqoOFIAxOhMyRhEmAgIGYHMrnlKxXNDyxfNfSezqd2nb5bdf/SUCSIhEAamCLAzQYPa7qL0zRUciZmKp8iFa820EQERxhMpZnOWEa1fFYVafrmFh9XqSIM/BD05e1U7U3dVxTtahhjrMSfW83dfgR6/sYEtjGn759T/p2tXjzfq3/6//CY9H7YXOmJVaw9fZzxEPCXP1OA9SweGPB6EA8OQnj++uZwMIhBUrdCot5+PYcJopF3ASUalo32Pfuz4FNU9cLBYXV1cxRgAopfT7XS65Dg5OH2tmRRUAhaWyBioduIKG7Iggnq1BvMrb86n3VuFvepR8wapDgjh/cikAzizMDI/fGfzxmtlptAleiQn+yEyeKS4owrFpu+WKn0z7fnbL/VNp//9sx39QUeQ/5jOe/LtpSv2tpuHxx4j1UlfRoWMvsl4RAHj8evNtn7v0x7M7ls6nSzCPio9l4ZxmAjjY6ZE+fuSc1hzz4mMndG7OzYuXH214wT3ExWL9jKUBgJTGD/evx/FwChYAgFMtP3/+ibD8iA32eewyn68fX/i0RwNHrDJ8HBhPey5HKuXTGgufvOFJ1vbko+s3MdNFt7q+fNUc1zL8/+Jj9J+P/3z85+P/h4//gHrEfz7+8/Gfj/9/PeSv//W/BoAjotPhaQrjT7OL+cCP/u+UMs0/mj/jyRv/qG38yQc9OfwRtILH4ZE/aVk/fU9VXl2v159//kUt+7PCw+BTAXyEyOPjy0+/9igGBqcp9scDr5//vj8PpPE/fuGnidTTfvpjF/7xb6cr3QictRQYAGAY+u+//2673REdEXpHMH+V3a8/rdYbs1YFgM+aOPPHzuMUPDLp6rsIqw8CzKIa+nhqx6KVCI+03eOVrtC5GVbjtSYyAyKSQIhQyiz+UguZ9Xr96tUXFd4/TdP79+8Oh8NcvLn7fKPxWKSeHqE/eXFhbmXPsx+CKjaHTBSapl2uQ9NwdXY6XXUEhKNe2NHR9Umei8f6fJ7JVeGiE9NMJFRhWqhqXLXQA8hee6dzvMxFw/Eza97+8aPwaSZ+/PLHPx+r3WPIPN7s4yccX3fK+iso+vgAn6L3VBXhyQYG504DIAIjBoYTmgT/t/+b/7W5J7NilWLpc6PsKOF0hFhAbSQ8BjqCIxCDcMVozUalFSh8vIz15YSEnwyAj0/WKQRmZDj4rMdYDWXA/Qj0YoDZMNnRc0njOP2z/8U/+6//d//7X/36NwDwfu//93+wH+5M2Amcjh0MeOyZ1z/pdBGxAtHJ5iHxqc1+9KbDowRV7aJUdNF8s92tYi3nW4NeBTH98XvhYy17XFjdvXJxYXaBdCQHUPVXZ/TPv5HrJQHA3/3dv/tv/pv/43//L/+HtsEqeo7EgMHBFRKShYAIqAkRoW0xBCCGUrw/aE5wLENPIhkEiozIgaQRamKVgi65pGlQzVBZgo4AyIRNE1aLrm1YgqGr5SmNNvQ4Jhs1GXkM7Ag5Q2zC5dUCEbcP4zRUdksexvxf/Vf/9L/+r/8Pv/rVbwDghx++/z//n/7bf/Nv/k31gCgpzygdQhZmQSaWGRB6is16xZ4um1hyqX3nUjQQbFpZtaFrm+uXr778y3/67PMvl+uz2DT1ihPNBgJS1fglMAcHqJiU2tNiIjObpqHk5FpynlKarGQArwZ2semKubAs2k6YAWByeK9wcDfXSjhCBwGUWV8WM4DNj4+hK0BVPK64aXAAraKpNGsX0sxLRjPICnZU5okEFfxQLxQgHF0G3BzcqLYMEX12gVJ3rVeMHMkUkpoamAIhNATMQIirlp4toJU5DuX/9n/9v6j7pJoMjmy7/7SDjkQcd/jY0/Q/+airFAPw3JUBPJqLfjJzqpheRd3utvUnU4Ef7v3v33pgFYRZVx4BqizRcfF7ajpZVbnliGKtv8KgEs5PwV+/oAMA1atbn02bjQ/thGb1ymoFeLroHwENfoRDG7iaF7AyBz+YQy7mANORc7Hdbv/Fv/zv/sV/9z+dLm91hHZyp7rgAgN4gaaBi8vQdQSg/VBu30NK0DTiBCmVXKAqM5EDE3AEaUjaliSgc0m573clQw1+K+AORNA0sFo0q2VYrTAKQMnToA8PuuvtkEABYgRHSAnaBd48XxDB7Ydh2Jsa5AQAgFh2u139LsMw/Pbv//Z//B/+eytack7DWHIxdSSoQA3h2jAlqlPzI57Qn26RAGnK/aEfhjGl3AlcL5uLdbtZLR/u3meiIaVnr75cnZ1RnTPPkGGPLOYWHMTRAUrKM3KMuEKwxnFK02CW8zSO40FTqgiObrlElqJ+2vwAQAEGg61BNsuqxQ0dGoCIJMQOlBwUvF50BqtbYhUorvc9gyvW4CdB5CObSA3G7MWxipW2jOLIM8oBgef5nTqYeSmmDg4K5MKI7l7p10AAZgiqMBUrCqaIAC05kxOhAly2T1h90rToFTFheLIktsqY+Pc2AxEAgfgI5qyNRjO0mef1pBt72vnrnbTT+x8TYXB0IzxpBxMRIM20SjNPCdQAKqiCAEqCBNI2j5keghBEhkAo5I9oIqiAEqgZ9DzGPrZF8bhMfBz8T/HtdBJzmllrsxqDI6ChHVMaBEekuj5AhVGdZiAO81QT6mQGwZ0qbNSRarLzSV5EHAEAGdoGq4c5QCABaQojQCEBjiKXl91X36xWa5nGdHc3/iSHYuHq5pkz3D58eHjYHbapFIuMzASMJEFCI6EViVqMmXIaicBMy1RyBgdQg8OQWejsfLHeNI1QmpTCiNuexikrMIuDhICxFZZIZG1jYFnVRGwcYbFYnPgw1Y5ps1qAQil5FKkCA+AWgwjX60fEIiHEEIW5rpLqBgDz5zjkmJgwCKcxdQEvz7qrs8XZ2WbR8u7Nj7dNe3Z+AZsNShV6dsRKMOEK8J3T7OMgfC4F7OREMk+5UcKR0iJEjFaeVpt1tuZqRTWXkqtkwIz1Pu7R6kUzgpMgEQk5gyOgGmip/MT6GDHwrI47I4pnXXpwcwUWckGn6kQKyExIZIhFbQKDrGXGvdE8Aaw8JQREq/xwrzqLBqUm4wALcfNw+kICXM+tUvrr3l3HrU51nHqqlOpC/Mifh1myiOhxaMAIZuhelR3my10rrlMoHrllNfSgxoI7uAJWmV4WkaaNi0VDBFOahmEqkEoxRAYmIAA0yAWfIPwQQBAqUlroZCnjpyisj9Ec9nQMfgAggie7PUCldp96FnUG+7QfctyRfK7IZ+LoTKx/GsPzG7x+Zk0KTkyy46lUSPlH70NkCYDQdNI2kcGqi7xEaloJjKTYMC/acH7ebRZNG4QLWYfwDDHEZy/PXHh1h7e34SHuSs5dJyGwAxuQGjsgEhQCVYmhCULgmsecs6pX1rsLwaqVzSIG4cQ6jWau0uCUvRRWZSYUpkqQjTGAYykGmMyMn94XRGFuQ+DIRYURmWkaJjBthYJg5Tk6MwlHEWau7/J5aWZ3BzNC1xxcFc27iMtFs9ksLy7PJDbD4WH34U3/cLe6uOgksATwakpenwM66XCdGh+PRpL16ajxj4TMIiGERljqBvS0GeEABaAYqII7MhCDC3ldbLxqDxlUxyD3Yz4JtWBFJjTDSh42VwSqRgnHFlnVE0MAPw3/CZ3c0JGqrjI4uIEpeCFwR0THY+1fn2R3qL55Bo7g5A5W43mWXXg8JLtq5Q/D4xMPJyTZH/dhPr4WcxQcH3Pz49ONT19kx3zZ4TSqnK/7cc4L1UnRCcEFWcJms3n52TMSfP/+g324OwxJTSv04FQG2NNhb40ZRCao3KnHLgM+XXyO9xuODZIn3/H4wT9zKWpWgHAq8mFuexxfD44nDkjd+edNy48mkX68M9U+Zl4xyNEV3J6cByJJjBShadsQmHRitBA9NNDE0DWwbmzZwmIB4OXD622eAAxiC9eXvDwL3YUCU7NYnS3xIaprXm1i2zUIzTjZw7bfHaZ+GopZYOVIy5aJpSSpylElWU5ps5CrpWwaLCWplpbUu7BYhCHpw0MZcnEAVZpGDYGYqWkCYi4F3OHUR5wv3BHLwExH3o6Dlo6xDRgDIXFxVHTTbKBIjMSRAzMhs6mWnGu2Vd9OTFG4a5uzszUxT9NDOmy3H94tNudNs+DYVeXMY+gfu5fupwy0/nVudWBN4rBSTaphJHNg5IL6NAIMfHQfwQ1YkFqCICAMgKSApUAxVzDgAAhGkOcnoxJkMMTA7JhLNlW3SsphQlXDI7CFTnXl4/VzMFdFK1DcimrOauCzMCCSO1Uz8scv5mrmxaxUfnHFoOCx13s8xOYMfXYjPrYeP4rvp/PAJ0nAsUX82Cw/xtQpjE7RU3N8PHXxT33J2Z+tbvlEGIOsV8vr6+vPPnv+zbefmysjTcO0fTg85uX4860FAnhisvqEZHIkLv7HAlhOq9qTv51+9HTxxCevnK/kp1fwWEqcPmp2NH08ZZh7vU++CGEMwoFC4BiYiRrB5SbEJhBQ1/jlWjcrWi55GGx3n/qDReHVSq4vZXFmCQ6pRIGwarm7jgGpW0vTRoRlSn62xO0O7/fTMLlaZAld0xBRzqpqWsCyWg6bjq9X0rXQ9wps3knTkBPtxzwdiooJEwoykZDEGCrdLCUisp9p3iPUnl7wYG5VuNFd0bEN0gRBEgVM6tk9eyVrsjt6bRPmXFJ21QrbMYch25BKKaUhiOhkeTpsx/3OVYkA7PTMHlW7n15ePO23j/dqTkpnrWr61I/0+NICUBABSBAiY8OI7Kno7jDs9mO/T2rQLNqmiU0MjXAl5VUFMQZg9OhA6pNZta6cU1/CwAgAxYGfLgHHsrcC39TN3BCNEIURieY8uoqRH7c0ADpmEbNtERFUY9anX0cq5fhYiZ4yiKrIeBpI1CtTH93aPDu+uMqNzAMkJweYm+CnscRjDoDHtjKcdBTmdQGk6lLG0DTxxfObP//Nn/3il9989c2rvt9Nh3774eGOH0ZPVWMRsfYWgAwfBWxq+Y5e671TdeeP86THmz1/l8ebWnXJ4bhDH8MRH5MHOA7YkE5rQPXX849WosdxyPxje/KbjtlsTQuPlwePS/aTVDlIpVuAsHdMZ2u5frFuogwHDajrVjYLXG0oNvawAxJYtnB54VdXCjTevT3cbznnRRPg5oxWC6ZoSAaAy7a5uYqpLO+3fT9ozmQQCMUMh6Q5l5IV3RuETQvXGxdWNgjIXdMk42wOZrvo7iCdkETAwBLbRSxFSynjRCIgIk9BlseWCUDFvBI7UjZPUzLFVRsWi7BeLZgllXwYp/th6ksphgZkpiXnMk1lylYKugHRpPh2Ozlsm8BXm0XHHGIAzSWl6qNcpTmPj+/8yIHNGgHIdRzsM5jUDecErYCZW6yt2OOO9VGmCoRQXR4B8Ggv3O/7H//w5scf371/c+9AFzfPrp5dXlycn62X3DbEJwU0Z4Qmsph7VncDy4AWOFRaHqGxOroLOOP8UJZZTs1rwSJYXc8AsMao1ey/9iWt2iYCITGTkyO5CwGgKxpiLYSPNf+srfu4M/nTtRA/2eePT/Ss+YLzpnpcpXDOXKpl3Cfdwk8TBIA6UA2hOnldbM5Wy2XbxVefvfwnf/VX33z79c3zy93u/v79h2HbH7aDJpuyqjoSkqF9VCYDwEysoCfLPkAVUcD5Qj2e1anpSE825cfEBj86zbkzcYpr/OSafHISp+OT9ORxWZibHx+d+ScXq/roWWHgLtKqlWXDbSMtSSA7W9qiMUJDsnbhJLDpaL3GNrqZBSgBgMgXkTdL7haeTadcxmEMAu0FbZbUiEwTjaOMiccJJ51lp91MCNeLeL6izVIJsxYDALGIGW1M4r5sKESOC3HiaWJHYiIjw1rZMvDH6jczL9OdCWZyeOCcaFQbk09JHWC56JaLVq0cDr1bsZL2aUwKuZSSi6aspbi7mmfH0Xzb65gyuB4ulpfr1YravN12u4c0DqrliFJ4XMkf7/LjbMaP6drxbvnTxf/nD3IAd1VP4EkQHazo9mH/w+9/+O3f/eH19++KwuXzm+ub6+ub68vLi7OzzWrVxS40jTQCbeAQKBAbsGmVEzFGYEKtg19wNBCy4wwKEAnIsfoUVPoIV39LrJnQ6TvabIWAVY6/Ou6RO5M7mH/UhAcAkHnHmcfT+qj0hIgONE+j6m+oF80BoGYY1eYR56fcj1OtGWWDT57z04M+73CAddwam7BZb25unn322Ysvv/ji+uKiacKzZ89++atf3txcx0jLLvzVX/4FOQ2HVJK9/3A/lIkADbDMp3HcLeFxh3+anZ+qjaep95Odv0o/1POvlOiPVoP5m8xfZd6sEWrn30+ggeN1RZ/bN4/rBM1t/sfrXtdemhsL6OCE1T16jhkzz2maxhLYPMQ2hkiQD1MEvNgslh0vgpqP+13f51StiJctx4YsGyFdn8ezdQO0FsE27A10THT3UN7+dE9AL17KxTmtWusEHbUfYXunu4Mlg1TyME6LRlbNhqVtu0Aow4SpWFYuSfvtVHJad4JRZMFThrHPUzFHLa5Fy8nX7rRyViiRajEPlZ0XAi/aBrTkgVTzfpgOSSE0i806BOqWbclFJx2GsYx5KpUb76ZazJLZVHzKOqZ8v4O7XX91u//sarg+lM0EvNg8294vzy+6xYKYqpxEpYZ8XEVbbdt7lRKrt/KUhNEJtmKPyJtj5LMBFEspJ3ASnMxsnN5/ePjhH7///b/77Y/fvx2G3P3uu+V6s77YXF5f37y4efb8+urm4uJyc7buzleLSBwDNyE4oZZSZUMBTAiAncCBXCpP2ZAII84jpJraEgEKmXsu8zvrY3mUMLBS9daBmUgQ2IHR1UH/SOlNaoea5pnWac0jgKfqbv8Rhz+2Np8e88IxD9jm8r4e7aK9urp89dlnv/zFt99889VXX311cbZB9MWiu76+EqHd7iHn8ebmWfp1+XC7HccyjHmc0jHb+GhzhiepyuNfn+zST8/no6/mho/10umrnML1cdd/3PnrguLziKA2WwgcgB2OzxF8vOU8OYmnT9RMZf60HKumAKZmSLFp46KLgVic0QwMilpRGHrMJrEViSSC7jBNgA7ZAAhEIARgwjpJdPNxyG7eH2zZMkYXgkAWGYQ0cCV/m7IGpiDAQkCsBmPyYXJVB0MmWcSmXXnoDBvb97rbeiquysXmMfTMzHt6zWsvnYiZEIFAsPGSEzNrzuOUhzGlrI4YY0Bt103cC7HmMk1DsqyOR7p3dlBAZ+IgZvYwadkmx96AWWS4e3P7+rvQNJc3LxbLNRPwp5X7kwzXHJ4oWwBWIXSnKuv4s08zVME7M1UDH7JlclA34nbRrtfLs3XnRafDYeqn7cP2/v3Dh7cf3lxfXj2/vH52cXW5ub48u7pYna0XywXHWWWkPhxOCMLzbiYAtUhnrOpDZLM4alXyA6vlp8/v9aq7Wj3UARycwBk9EPAcFEg+qyeeDjmVNjWnwKprhhWnhE+vwEeNLqhnV0n7dELPVOXUpzH5ZBI31/m1HpMg55fnv/z1L/7yL//iv/yn//Tbb7++vroKgfv9bhrHcepfv7n9/vvfMdMvfvHLl68++8u/msakP759f3v/YDPWDk57+nx+VZfe6hkdDdrqlzB7ujDwYzpyivpqHgynVOdY1MyMaQCoclHV62luFM9StwCzwak51kbEY0/hiBf8aOf307mjV4ED+aQfiQgMyEiR23W32qyWEkF1d3//cJ+ahSBzyS2LtBQE3dM4QSlIKZW77Wg+LVawWcr5SmPkRYubFZ9vuGRoAgsCWibUGGy9RmQ5z5KdUtZxYGE6W0kQzAqHXt/dTodDbhoKEi/ONzF4t8wSR+BdxLRdcgFRaktS0ANWNVIrp/hHRAmhaWLbRGZ2d0UFd5HAzIqQkw79uN/tD8umcYdSImEjyOillP2oo3pkAcICBEQSuRNqhM2hn9TUdslXh+lVOtjDm9e//Tc5JQDkF7xaLYOEmWhdWX9W21vVHWiGr9aclZBIAiKwRGLB48J+LATqAzOrYjG6K6Rkyhi4Obt59hf/5V+9eHHz9vuf3vz47s3r2/uHYUo6Hfofd7uffvihW7Trs+XV9fmzm6uXz599/vnN11/cXF2cNU0krs05p5rbgyN4IGRAVGSAwAgIxcC9Ylyc3RyqFiASIGlVQCJGEwQlVK0GJsrHxqFXkY2PBdbkMT2eJ+F+khL4qPr/uIZ/UvESHbdKOGa/p7bV6aXHnxAAGFjThPOLs8+/fPWbv/j1f/FP/uLP//I3r159tuhas0Js5nnYHnaHh7uHu65rJYbN5uIbpftt/zd/+9sP9/d9P6VqBffHXeWnSdqf3vOPU7i6bc9caECo0v9Pg/9J/l79Fmq+CFVGDUzrtLXu4EDHdz29SvTp1Xs6HQCEeiflydaPiBxYAnJgIJqKF8emawMahmnKUMxLhjRhCNKEGMjRC5ITkwEM05RSLtqThWWAJmAMuFrC1TVpwtUKYwREVIVUrBREAGEUxEXDtI7M2AQjnIaJDkNKpRi6ROsW1MVu0dKiyyJoNqLr/YrHwvvsVhysjp396bclIhEJMVatZDOrw7eQS9M0OqVSpnGcdtvdrg0LoojOgCJMjI6Y1MbijoZOBUAQAuOmC5eLyEKHycdkqRhFRoSx399+9/vtaMoLA+ZXn7VNRAIiPJYj/ljFfTSHASBiCUQoEpkE5ub0p4e5I3jDPDv/KThj23WLz19eXWw+e3H9/vX77//w5t27h91+2m4P2+1u3x+2d/cP9x/evv7p+7P1D8+evf7p+d37V5+9eHZ+cbZcdtKEGKVtRI4+1EcuQWXK1O7PPBRHrFsXOmFFsRMCKxmCIyiDGRYFzQ7uUtU8zB1QkOXjLEjczWeR8HlZcANHmCePH+eip+BC8mNwPe6n/tjjr5T9GhJWfzx3BBEBsVstv/j68z/7y1//F3/157/89bcXVxsOMOUhp3HKQ9YpW0LB9dlmuVp1y+Vqs3lJzd399ttffnu32/3w/U/9NADAR0/ZEb+Hx/Hhk0DDY9oBVCe9px/POMb5mXAERjzaWwBUu2g4tn5PQOb6NlO0Al5XzZpsgM394dorqFfsES/45JSO6091OpOaoD4Gf9PEtpUmRlV8d7sPgJdn3bNn689X63Eaf3xz//Zd/7AtCMAohLTsqO1ibIQj7w7ZNKcxj1zyClyBA3YLfP6S3EiIGBABp4EeHny/hzEVd4+RNxu6vuG28VyGYdD9wcfJ2iUsNmG1pkVHbZBFE5ZNCASmyV0ulrQbYNdPeUrghsRIBsRPtgBkqZKLwiJqRmYegpl1y0VJaZjSOKaH++0q8FkTOQYAIBKWQMJQmf/u6NXc1EKENdHLTtYLUQyDy30RZvQO73N5/+EDbXVX4pih7drVsosxIGGlBB1r+nlaNTf73QARgZiDCIcQmGfc2qeEdwd3QKKuoeiesqqqZ3WEILy+PH95ffHFF599/tX93d1+f5geHg53d9u3b9599913r1//9Ob12x++//EPy+/+/u82/+avL168uPn8i89evnx+8+LZ1fXFxcVmueyawMTgBmruptkKFwd3dQDEIIRIXtFHx6FgcEBCdzSsToSU1RO6GTABmpdsQAjVnv1JRNc5/2zOScfEfJYVOPVs/mj9e7qzPRm2PSEmQdXrQgUA14pScnDmELr47PnNr37z7Z//5a+++fbLm2cXMVIuY05TmsaUBvNMDItl9wxvlsvlYrFomoY53jx/9vXXX7778P6wP+wP2zL97Kn9zHFsDxBC1aI/1YHVG6b6FhM4G/BMkXv86MeyEOfJaf2a9coVdEASB6v4++NFe5JN+Kc5FB7XBoAZ2w+Aj3aQNWCYQuAmtkTU99MtDx+2+7OL+Gx9sdy0U/LDzn8aH/oxIYIZC/lyFTbrZbewkq0JQxpy22gQRERVR6TFAhABza2AFchGY6J+9CkrgjI7oXSRu9b6MQ+QzdABQ+QQpV1QbECCSeNNKy23oMuUuBUNniAPlrKbeXXs/rjbX/VfqxYr2BE13rZlvTY1LWp5PAzTbnsYVsuw6gzZQ3SJFJqmQwgeo6BDSlnQW4IGLLi16LGlDrgkHg222aexvH84lIe0TWiOF2eLVctBPgvLFdHsxgMV6A0A1aCklos235BTww8+vm9PnnwkoigM4AG8oJUCBCBETRM2y4hni3a5vLyZxqlst/2HDw/tIu4OD+/ev52mab/dH/aHh/vt27fvv//x7Q+v3798+fyzVy9evLh59uzy4uJsvVotFk0jIQgFAsLqSQqGUDuARmTIAPPiBQ4MIFRzalcAq30BBzdkRDBncK4VxMdFv9SytSC4u1T0mdsxgTs+oVVa1U9/n6vlefev3TzyKiUM6OSIIITsWK0VgOvEUi004erq+utvvv6rf/IXf/GXv3p+c9lEzqlXzdM0lpyrsXzXxq5t+fKqbbuuawldGNarxRdffPb+/bsP798/PNwOhx2chNePm/tj7oxHXILP6uRUhyYwL01zo86zaQ+mTsEhAgX3x7QAoA5ZjgZVNXnkmskYgKJlBECoJKx5RsLHpePU6jutCcdK6FQcVHYAAgKfCEL17eYM2MQ2EE/Jd7v82z+8U0irM7652Dy/Oht3/lu/e7jfDmM/DIGwXa+b9eqsa2nRwP4Ch0MPXhYdA9I4oWVqgFiQ3BxcAQqwUYDgzAOTxsaZUQvlZDkpGDQhmPGYodq3q9mYB2ZzWnAIQZZRAMrephGmHrMasNWslOQU/xVzwXXEd0TaAQJCszknDgKM03aXxmF/GPtD30S2rs2hydJQY+fSIcmibcFs7AcvYwMFwB8GNShnoMVtPBweEowoYynD5Fr6h8PvvUxnnbdc1stuvVgSsyFpRd3PpcnM4XUHt+Ju6Ibo7q3P7JZKi33SrEFAJEJnBCFsW3bDUsjK7Gs5ChEjBexWTbvqQhsB/e5+GVsJkRdd60UJ2YEOfRqm29v7/T/+4ceLi7PLy/Pr66ubZ1fPbp7dXF9dX19eXa4vz5aLrsFTfxsBCY2gIICDemWGugCyIAKqg4IbmiGREAGFutYzafFkGNmf9pVnhJ+BGTgZYHVoA0cCgtks5Wc0pZ8shWhwrGp9Jr6dsIS1Lzmnz4jOXdt99uLlt99+/Ytvv/r81YtVu2CAKaWURy1ZNZdc3DGEJsa2i62IlJzzlEvWw35rmgNT24S2iSzE/7GQvbkIOfVwcY5OMyuWR7cM3IAQmAHNOc+xyTe/uEIX6hDz2LZ0Qq+a2H58zUdoyKdn8GQ6caQG1xMDrOToj8YBrjlrMfRZV25M5c2HAQUuLzsBvl5uLs9Wm3X7/mGfynQYdRyaNFFJKItwdbbctGXotRREkn6A/t6GyTlA2+BqFYitmI7Ztzs7DB4abDs6v+CzJRNgnqBM6EpCxIR5cpsMwUtn0kIULWoOwhLbVjer6WIlQ8/uvlfSDKluPU+/+sygrY6Gcz+YiUOMFASJDhKm29vJy+2+Lwikvis+qBtR5BBis+o6Am8ZygSoWc0eJpkgeBAAHYfUj2VbaFRzVc2pTCO7/v1CmiDdYpmyrjcXTdu6AyLPuIOjMh1AFYc2cz0xC3BG1nzMbnNQ81KsuJNAIEACYipmyVDNxlRqh1ECmIGW9PBwe/vh3W77kNMYGNsY3LCYm8KU0zBOu31/f7979/b2p827i/Ozy+vLZ9dXz2+unt9cPr85v7zYrNfLtm2CCGLtOIAQi1AMxIBalQAdEN0QDGvy/winnSlMBObOH4eLzN2reRms9mzVeIFwzgMq/umTmV99/umxKeazR3EtrJ3I5/x5rtkIkJg36+XXX33xq2+/fnlzs1mu0EFz0aLg2MSIAOMwqTpRrKGTU9rvD4dD3/fDT6/f/fa3v/vppx9SGkUoiIg8iu3BMZ0+kvDm/R3rqT0V0DjNR9wqy9Q0Y0CCiOxzC39+74wBgJMu7QyYhOoYy4QMjgiGFSNeExE7agc8qR+ebP1EUB1papMZgQzt6Tpm7tM0DkOehhGb2tCVadTX79Lf/M0bG2zxq+VysXj16iqTfbh/EAKEMB7w/ZseVS8v4mK1boPmwuY4Jt09TG/e2DSG1br59hfN6sxLGfb79MMPU9/D1Q1fXsYXn7XnC+rvrd9DHqNqcTJV7fc2DHzY0nJNm2spjUzJp2Ad03IZP/9sLYG6Lixup9c7tX1OxvhR83JmzNXM/4SqxToAFIlN27XtA1HaPfy4698fhrjvJ6B9X6biyAzFi0FkjIGDx1LilKHP4ZBCyVEwD5ZKGst+MDViJgcwPxz63/3+uynrQz/+4qc3v/z1n718+dlqueq6zkxnZ/OZIj9v81WFAqtOJJCDmn3k0mvuKeswloLasDeRhQgc1aH6B04FFLyNbKqH/eHH7777H/8f/+rf/s3f/fDdD9v7HShEpmJqCDGIYMBZApOmMX3Idw93Dz98/1PbtqtVe3GxuXl2/uqzmy+/+fzm2fV6vWLmcUgG0MR4tl7eXG0ksivmoodUHCFERGGY/beMtDiCAJC7qXsxw+pzPd+dj8zhPm5KISD+Ucfjk2OGrhJgVbMgB61IYiKnRxKPGhBh17XX1xdfffHqy89enq1XUUSzVY1jYmoaAcCU7odxyhncsI2tOfR9f3t7e3t79/0PP/3hD79//eanYTi4KzMK/7wMGT3ZVz853ePPazuzLnxmpmgGXjP2x1IPj5926hSf/iSoqG2g6iUBTk8u4Ee/96NzxNMPjmeCgPDp1/DZPbHkVFhUIRcYRkypsO5bji8v+quLzfXNOUTsllRyaWOnhR7uRgaNFNcrDNzEyE4+5NwtMlPu926q/SHHDt0qXURVAZBFaLHkRUdpe5pgApEymzAg4NRnZtkYA0hRTNkzY5BwfS0cxR2z46GMuyFXIupHF4CYRViEROAIr0JEIBQUaqgaOu9DONy+n6Yx7IfkMIw2KBiUGEskbzpZBmAJY2ZgGlwK8uRCZOvIbYtx1OSKjAo4ACW37Xaf9Yf9mLb73h0IPX7x1XLVVbgBUwVZWyk5TWPJUyk5xYklIlLTdoB1mMZPEEtQ1FJWg+KMDihcnX48zxpKiEnHQz7stm9/+PHv/93f/e1f/9s//O4Pu+0u5ywo6OBuBBQ4+NFj3hysaJqSa6kuQ7GRt8vu9U+r16/fvru9e/782fnFmUjo+2TuITbXF+uvXj27Oj9rYwPESQ0IjFhq2BmYmpU5yafj2vbJ/ELmpgE8Vqkn4tysn2yPvSuAJ6sBwhz8OAPqqycqEjmx0ww8RjczKwox8vnl+cuXz7949fLm6jIyWVEHJI5NF5ixbUMxGFO+u9uC7y7O09lmIyLmPozD7f396zevf/jp+zdv3+WsKU+ATvwRhAPd0BErChqORfgxLbdaez82CbzSyFkE3B5t6T6aE5yAz0Yw7+1H7G+FEThCzbYc0HE2SDmW9vg4ZXj6ofWOINrMLD8mKY8vQgyBI4F7ySnlDFPSaTIEuHd//X76h+/uFPj8erO5XJ1fNP1htAEge98nK1Ma4OKCb561m7MmLABDToVZRvMxpakfxnjAruV1RzfX8TDAcoXElrNmcSYIAsxqmFlsscDrZ6GJuN0mQo6CkdmdcoLBAReyOmsoxv0h3T6MDZpYZitoisdGKVI1cREJwvwY/DCX0EjELXehiYvV+rbt+vs7OzyUvp/6tB/zkKyN0uj6AhdX510bm0OROLITFoOW9EzsRUuhi1vpxqmo4+iwLfig8FBoP0773/+QUlk0YdnJzbNLCddd07gZWCllSqbTNOwebvf7XRr6GNuUy6XqhqhpO46NiJxasbUZrg4IlA2hcAEkAjNLWVWRPOQh7e/ufvj97//2r//6D7/97esff+q3D66KrtlSTdCBAlXPVlMHIHB3FwRHVAQkYEIt5f5u1x+GN2/er1bLzflGYkzF1ByRLs+X37y6+fbLV99++/X51YUTE2FRx2LMQo6uVl0BCgAZoM195Y+CH/7EcdJLq88i/GwSQADVNKQC3k72rIiAQGZo6vOMBWLXPf/s+Zdfvnp5c3W+WgmQq7NElEAEIhQbwe3hsB9ev37bH4bN+paIu7Z9/+7d7e3dw8N2u9tvd9vdfqcKU5pU7efykjkZOf0znycct/vjTQSw0w/wYxby00+g+X2Pbzu+AAmqMfQsgUBPR6FPPuqPTxHn93/K3Xx6XQOTMCCYe8nqWU3V3ezg9v5u+t33t8Acuvbior25OhsXze5u6Lfj1JeScsmmJiJsTm1BkHB1zYDRgPe7oWkmAhXAReRnV9xO4GI56/1tgo6lIJGJuGYwRXBqW6q+uSwqOFnhycQTKpgqhGUnIW42i6uL6d3tcP8ABzSB8vTriHCoo76Pg99gti+evaxDU9Rcy348TFmHcer7NGUVE86yALlqF+sF94WWEduACrho+arlzxuMRR4WcjikIZVDtkWxkFETlbH007C7u719/d27787evXzWRlltzhFp7A/9frt7uHu4/XD//vV+e5/6g8Q27+6mh/f99c1itUGJy+WmuY5zLwCACJmrfy86UkW1maPOzQG0YsN+fHj/8PaHN+9+eLO/vR2Hg4Opa9HsiBwCx/nJqcN7QpQoTQyImHNWMKpQPsecNE37w37c7gaOjRFVwfj3b+n2p5/e//h6t9199sWr5fn5erPcLGKgwDRTeB2rARLAEbryafAf4/tRivDYCZsroSPlDJ+o0B8f/3lfnZPfxx0PgMDFlEzdFNyJYbVZf/3Nl99++9X15aZrhBzQKYQ2xGZ2MybIWe/uH/7wh+9//PFNYHn77v1mvUrjME7TME1FzQxMbUp5nKYplaLlSfxX2ZMTR9jnPZrguFv7kaTljg5uasVL1pJddfb+PW3NCIROx8+cu/21FziTEuuHKc7iR9W5pbYLjmn805HhvCrUR/+01CBghQd9tPnXUR8LMAOAOxSf76NntYfd9Lsf76ZiZvDVF+efvVicrTodxzTa0KsVRbLtznPZvXs/xhg2F+3NF93nX3fnz9rdw7C7P+R+Es0k9uya4oB3O99t7cfR+gVdbjAICHMu2PeeDZEpNnD9TIgAfdcfJsCOjDHZYV8yhuU6rtabF8/x4WE87A/7PQTUU+ZPSCIcK6CPuUoezPo8CFCVXpCQKDCvujZ37RZ5UN9nHVWZqGvCWUtXC3q2pPMlTqqXxZ4tUWJ7cb6+WMq5FJjGD5vD/cN+uxvCIXMqmMAChxAOQZad0LTb/fiP//Cvafv+zerZy9B0Xvpxv92/f7t793r39qdx92BpIgn9enO3OV+cX7arc+7WN59/s1yuY9MAABI1UVpDKEjoEmpHGxEocCDCwBwKT104W7aX69VD1xw8ab8d05g1mRuG0CxXIKFmi+pKgMK8XrbPrq9iDP04jimllB2wiQ0xu4LXWWkM3DRmOg77cbf97U8//PS733//+++//Obrb379y6+/+bz5/HrTrgNY7Qgquhq5V4i3a7FPxGaOOz/Oz1x9SKsJ6OkJ/cSi/MmBRAhO1UGwjgLJgdyj+cJN3BXdYujaePP85ptvvvrii1dn62UjAoRMMUgbY0tEAJrztNv1b16/+/3vv/vDH3401bu7+9VqVUkvQLzd7qYpZdVUypSzFi+q9vHGSWA0B5L/af6+AVRX3qIllTyhFqSIrgR2iswa+TQnCHBs8gNUcRKvACEDUAI7sqbsaVX/NOr/1PHz/5WAmCSgMDmhGIoik7qaOfRTydqbgRAh6KLls02MbVwuc5pSJkXBrD5uU8mpTHR5XWQZnn/eXV2H80330C3GXW/DYcqZCg2qfV/6fUmMusaAuFoyQkCVsffDZM7atHC+oSZgKbmYFvNx4rTX+/vpttfL69UXn59vzs9evRzGadr2w7LBp41lJqoIH2ahj4IfocqiUe0FQoxCzMVhKp7U1TGISJCKTGXGNmAXcQ0wGcYGLy54s4idBE3iwhgFAzuP1k8JfUXsBF3gLsICJn148/4fhsPt6+biRWgXDMnGfXr4kB4+THd3Oh5c1Yj7/fvpbrF/u+TYQVz4OHzzq7+C80s4trcE2NEZqz5XLQ2R6vBPoO2YLlfw6qbsvobpMGzf7e7flzSkPJGwBK4TOxJCZmISpBh4sWg3m0WMARkMLOfsVsem6IzqqOpQXCIKUWQaVXcPu9txur87vL897PrsZleb7uZ8VQVCwNxqBo41KKvR10eHVLQTITkAEkC1/H4CzK7sSD+mAKd5/wynQ0JkR6o2QuDA7sFggXDuEBGVGJfL8PzZt998/c03X3728mbZtkJEsQ2xC9IIR0QuZTwcxnfvb3//+x/+8XffvXn7Pk357vahbSIxsnCMMeWy3R1y1pK1FHWDkx0iHHfZ0/ThI5zfYy5+7Hl49QYrmlMae9RCFCFkshr/NYCt/oMzPnHGUdRh8KzY5Iqg6Fq7GydGz7yMztXCHwe41ynS48l9zB6r8F6ORIGrwC1C0VRM3QzNYSp2tx/xxztzkyBffHZ2ebE4vwwONg6ICCmVcT99uJ/e/jgu3vbWUgJ69XJztmrXzxEuxmm8vdvt0/syjf27N9P+YdQziYS7BshjF1vLYRj0dpv209QtIFDXnlMX3Rz6cRqybw/6sLX9P94+e37RLdevnq+/+BJJYNvvLzZB5DTnx6qmU9v9MygE53oHEYkD1whHHJjULRVN1ZmRhCQYhYPxfcb7BCvFzUI6wWjADMEnL14QgTGuuiVxQhiB946kORguANYtLRq/aLQtD/ndh/Hdd0lWSCGQNZgbSMHSQg3YVdDczZINOfXbUnSc8jKEMh7mW+WuCTSDABEaFgUAhOq46YIe0ZoFX60un58vn1+db1btw/2727v3+2GPVmK3CIuFLLrQNqEJEpvAIsRRqF00xF4spTyNY98fDjnrMAzMgo4GZMgSY5dTFGIorXDXLUryD/fDYXq9H42Yf/XNZ/bFC2RC5GIllxnU6IhqkA1kFqP7pNuPp2dyfhxxdnX+E3snAAAQEAB7RR1VXAQYuTfmK8IrwgWxEvJ6vXjx2eeffXZ1cbFcdkGYiEJsQ1wgB8SAwKrjw+7w9u37H358/dObt7vtIafSHwZhQkYRCSEAYlbNVbq0JuP2SVzZKaSeJtEf5y3mMDubozuBV70Y8kJeCJTAZpYV1IXgMReoXXmF40zfDKwA1CVVwQzw2FU8UgeOgJZ5zaytI/OaJfhjivDHsseIDq4OTNhIBOA05FIsV3SHw5j0djswY9M0ZuAImyW3y1Yi5pSLORApwFg076cfX++aRQgY+ZlcLZtuEdt2AYIHtbN72WwmMFysKLZmlnPSSFayjVPph7If1QFTBjOS4ECu5iGYox3G9OPbcT/Cs2e34Hh1zovl4vJieX7WnYIfAIiFJbAEZjnmmAR4YkUwmOs0TYf97u5+9/AwDGMpcy/ZzKdi2+S3I9xOuCm8oCZGDloIrKSpL7knMqTkOE6a1J2QAjetbxhWDjHiIvgmZNaScm9JXXcKhGghADUYBEUEgQtQlTAspp7VxiHv9ml76yWfbpMW1ATMiAQMCGgMWB8qhlkKU0Q0OsWGYivtIiyWcbE0kabrQrfgdhHbJjQSAjHNruApT/dbM7N9Px4O/TCOWiyXwlTh+WQkRRW1GBNZTtPkTkYhW/JiqaiZMTMLm0N2m4oWrWh0MvBillQDgQGf7oucOPB15InHOv8U93XbP5XWXqEuCHXOhcBaBchnGLyLW6t+Rvw8hLMgKiLnF8sXL55fXwdGK8mDEEoIHYfWUcyJkUrBu7uH12/fvX33/v5hqwUBSA3MHYvlZCMWwBkHB8CIDFCeMjKPQG2DWeTXq9wHHREKOEd+RSgYAQRmCRLbAMWYnMEEFK0AIIARKUDtcbhXy8M6SnCvVwTMwBQxo8djVkXzP08kT4446dpacQCgGTlWKYFmgDOh4vG7gKqVbKNnJFktOyIf2pwzqBWryzlgMrvfp9//cDdO+TAcPn+5evFivWji7mGLWEITzs5cP8ecre+H77//YFMetwd72V6dS2xguVq9DBHCGYZme99HUvYR0s5MU5mmPE2lFFAJEKMAkCoUI2FYdmwO/ZDbHSCm97eHf/mv/vHN27vf/PJy2Vm7WJ6dr0XmfYUIWYLEWCVxa82P1RmxIuxzLsMw7u7v37//6fvv377/sN/2OZeKwUuWDr0x06K3dwOuSlhghyTsI5ZpGqaU8lBgKD5mLbkKZAOhr5e8RmSGIBjIAhhpLA2azvMtAhWCGISpil0gKbghCKIBB0dCU2vaxxqm6meUDGIAVcIMj48eOBqq4qHYw8Ph3dvbf/dvf/u3f/MPb7ejynJ5fh1LAiKKMTZd0zRNJCJTTbl4LrmUUhW03d0UVB2djlE39+JMS54sp5x2+5RzIjIUXMrFs6tf/vk3v/6zb57dXDVtk7WUksbsrigE4FDMsloquSFyezSD/ajbb27k9iRxnSO//tsnrfU6rwKn6hOsBABOpgFgiXiOeI14wVRiwKbpukUXoquWlKxpkarXdqPgdUZQij08bD98uLt/2A5DZhZGmb1KaxfCDREpEBID/ZHyzWll9qMw2EkhzAgRkI/JjT9hadbGWgxI6ggIhayAZnRwMCCtkp31aYWZzTv/Yp8bflZdzMEU6Dg+qNXWfBGPBcDTJMU+CvWfrfsrSZBIEMWM3E1EYjR1AlVVN/fi3k/Z7g/mxlLali6uVjFG5kbEQvDlyiTCNJUppTzl/X58aPCum4hC10WWFgCWnbx4vjxbhwDuJeehtZTJwSgvR0yE6tg1TAQ5u5mHiIsFNQ2t1rReQ9uNuw/jP35/24+p7eT5s0YhSGifyHgRsYhECZE5VDi9A5iq5lzGMe/3w/Zhf/fh9v2HD2/ePuz2ffJcTNXNqoSHIXPcjj90PYUAzDebdhOpw4gMfdHv3u8/7MZ+nALh1SosO4lMHJAJRYAJGQ0VgEgkHllx7lbQHSm4Qymu5kWhQmAqFhGiWBuaKE+BZPNG6M4ODVMURHRTS8XUMBcaJ93txh/ebH//w/sf3973kwEF4sBWDAxKtowFjXREpKKc1FPWVEoqGRAaiUFCCEIoFY9IQI5IRG6uOWs/DvuhqFnXUdO0jVy/uPrNn33z619/dXm5CYGHIaeixY8NPHA7Cvh+8oyJ2+w0Y270hIx5GuyfSn2A01/nH7rPPl+160UASBCB1kQXSJcOZ+YTeHGFlNI0TuOQUpq9t4kBGav8NYCqbrf7+/vt0I9eABgBqUKXawzPCjkK4E5A7k+RMh8dM/HG4RiCVaLnBFOoyfiMyCWAGBiJ1cBcsSR3mpnf7HO1j4Q8UygBfCb4zz7GVYtXwbS6DBx54o61jf+45BjM9li1IVBlD/FRDuHJKoCABBQ5rpcbkjiNOeVsjiFGDMip9OOouQBAcR2TH0bux3bX63Y7ChJz27ZUVBHLek2mfOgjgqyWDQntxlE/DJED0lBgl5xAfdmGVdNFCW7PwVTztNv3sOjX+2FSRddAPkxlSiaBL4ybhter5uoCL8+G7T7d36e39+n1h4kChwipPJVWRCbh6hcg0dBdi6Yp931/+6G/fb97/35/d7fb7XaHfjtMQ9Gx4KRQtKpmuFkZ+uGDWSl5d9jfb9dfPTv79uXZi7PFen3+YNvf//bhH3546Ifhasmbbt0GEHZicK9m9lSdpwgACYkrEt5dQYuX4jn7lEyLWzWHYiIyImT0GDyGx+K3Su4QGYMJQxupiyQERf0waD/BlGnXlw/b/GGbDpNnI0AB8+mwz+MW0Zxw2hMwMSOSOLeOQZGNmCSEGFbtomvbJjboNKVcstZ5IAmbljROVpRRQNCkhaaRVbx5fvHnf/bNr3/15dlmSfMYihDh6FsEZCBERBLiR+DL084/z54/6pAjEn4UXX6kRVYts9rim/FgCOjegp8R3Ig8J7gwX5ozYEK02hqtussiKAGIAOkUxGWatvfb7f1uGjM4IDARm9knrTD3SsH642q/Hqd+u8Fxy56RNn5E6T8ZxJu7gAd2AlcyxwKYahLuYGiOWKmUjBTQCYEBDJ1mPWaEChA2AzRCbJBDJVDPw2zHY2ertkQqeXJeOhHInNToKCHyMR4ekIlDCIjSlylnJQAJTECAkFXM3UxrcZpVDciRq7YQkYi4hCYEWy4AHBlxmnDsk+aUcm4bj5jAx1QoKabii65trqRZNxwjuANys+ArbLrVIpVpmsZ0GHdDubvPxIUIz5BCkK6js7Nmtcuvd2Wf9M3doKgx5OdfjDnrKfiFWVgQwTSXNOWhT/vdcH97ePd2/+Hd/vZut9vthmmftVcYHbNRFYVFAjQzt5yyqqVSppRSVgBcLLrYLrGJO2ve9vbjwzQMo7v0eWFmyMbu1Rbs1Lyapfm5thtAFXLynCwlS8lN50ebzZgRBAGcGJg/2mFqD5jJhbBq7wUhd1fVYfLt6NseDomVum59dXE9ohUqQ96/y4ecc6+m5miESMyh4cYpAgghE7OIBOLqk0juYGYl56LmgGxkpaRx1KIsAiTG1LTx6tnFV1+8+OrL5y9uLtogbrU4JUKoiITaPKqMWuGPAlqORFcHoFlLpD65RFX6vIacHR3Lq+QZMbohgQPacWP1ALZ2u0H+UvgLgPOcWV2RUQK0cdnFRROapmkWnbQtELvDiZmT+35/+7C725cJAIhAqgykgT0V5z/2yc3tKIr7uFvOk7mKIDhOziv6AE+yxPNQvaYRbgSGroJFmJAYQzb0hGqqgHN27iDgDVm9VIiGDsQVKZBLnqZsEwZvoZU2IAXAWcGUvM7wAcAJ1dC8erwcJeXMsCipM7iARQB5zGXm1LQqzwDPboGAYMLYNAEQc8nFixdzoNA0y+VivVosupCnYuYSWmaMDYClILDbTj/9tDW36+fxbCNdUC+438HDQ3p/e1guYvpNevbsHGILwF6MiLpus1h4LuN+v/uQPKXp9n508BiQCdvGwWlz1lwOvtjrri9vH/YPB7M0XT57P4wzzgcJQwhC5Dml/tDf3Q4f3h9u3w33t9P2YRz6KeWUfUKcSCbz5KhISNQxk3shTLmMpWQv5s6I9yG+vZ/W61FpeEhy+5AOGQtydhoKbkfYjYAAyGhVrjYgEyIhE6EgAJq5JpuGkkYtk5XiVfcSCR3neZfM9ksfgz7B0Zzd6+0w86JGSFPyXa93e73raUjg3K0vnjft6ury6uH68nUnMN5pf3u7ez+m3l1AIoUIEKVhoYhc/2F0nFIqOR28N7VpSqli+hwIwc3KqAy8WKyIxSy3Hf/i65e//sXnz6/OFo1AsZIMHRm4DllibbuaoZvNiLUnOz8ROdYOFZ5wPnN6/IidRSJyZvcqilAfbjtm1jWnNnJfOVw6PUe8AmjczI2FQmQWWBAskZbErYQoMmMCseoo6jD2D9uH3W5fCsz6ZQbgftRYfvxfzTc+UYl7svNXD1Wouz0gAs3qMidjjRn7BCaojCbskUAEmM1pUEPMU6ndF3A3MAwoS2AgIEAxK3U26q6mplXu2tjbRJarWIiCuxubE1SavzsooPncIChuxbSUDClTghbDGowAwqml4O7uapYZgEADuQg5QMoFAUJAQCZydEvFAF0Yu4bPNu1mwfclGSgwUYgSAIxZBncYp5KKtUOUiG6myR+2cHdbPrxLaeX7/dh0+8kHdwkY2qZZsjVNaNsFAE0T7pYgMaWcS/FxVFM3kBDCYtl0y7BPZdsPlnKZ9PX7fTru/K4l7bfD7Vsdx3H30L97P969H+4/5H6vaXLVyvnOjpNjglmQSJgWIUQECtJP6cNgQynuMGUdk+7G/H47AY/7RLt+OOTqGBzMcTvA3UEjgEQkAq7KVbVKBXcFM9PsKWkaSp5Ms9kMfa2673Ofhqqm5R89ZQRIhm5Qio+Tm2ERGyc7TL4fYT9CUmbh2DZtu1h0i0UbMQ/bN//48O77O0BT48AUIoWOQ0sUsEpyGIC5mpY6NjLUYjnnMvvnzpaermiCEygzhyhXF+tff/v5r7/+/GK9jIhlHmBVdT+sKxkhCFUwOXyq5FO5MQUcFGcCo8/0fXR0r0BDZCJhUvaciqqZ61FkCMAIaN6gFsAb8jO1JYGBFmZvECNEL21Oq2Kr4p15BH/MdsHcpl3a3x8e9sNe3QHIiqobgB4li+fW+LEZ6fW+fOT+A4au6BmdjoqYjtXgyL26+7g5lFK5ikjG4pGtYeyEFy0zei79NE6l9JqSmxW3pGbYSjyn4NwEoGBA5giu7rlKU6AbWWZLYhM7AFKVS2VTqr6j6ICFQJHMPOV8yFOfx2EYdTcF5U2zAdAI3p52GLVSNFn16qGMBE0UNcspgbpwoEjCQmCaCqGST42Uy7O4WYTDYedDLsCIAmGDlkBUmrQ6W+SCoWmBsUApoMUBKS6X5+u1hKZJpu/v9qXQ+WJtxaY0dYt2fb5puvPrmxXQcsh02O+aFtTt0CcFcw4k1DYkwYdDTr26w67PqnPclHF8+OF37/7tv867Q9rv0m5n0wCWA3iMHEHYcJygTDoVK0BOjMwxhEUbN8JLgMMwjponK+51coKqsB8SUr8fSj9Oh6kAcBebQLDt7YOUNeICKXYsMlMtTc3NVaEkS1lLNs1m6mD4RJ0FiTwIxoASEQFUj6DPOfKJ3EGhuA7ZE5sISvAp45A4KSfj7ARWiR/M7XJ9/WLYP6wunnfri9Cug3u3vojdGqRFDuZihp7UyaA6ps5i3FSnb16zeHMwRwMkUvRdGlqBq/OzL1/d/OWvvvrVl5+tu4jgTCSMaq4A6m5qRVUIJQgwmbsQfpz2VxSwAx31jZCICKWJbdcu14sYYi2FiCuBFUqxYRrHcUzTUHJWcEdz8SvHa8VLpyVgMJ3QPSAvAi9Cy9iZxTHh3bY3T/uezm94vaGuAzbTIeVDP+zGqTewU49xdgjHiuKr7co/hTWsh4KXmvvP0Pu6cKC611G8khdUVc1IEFgCa9NwG6WNgJp12vt0sHFfppTNktqkbth6QWxDoCVJRCdCcFP0hGhMSIEkUBe8YUVM5gie3bOQilfTYQMr5AVASxryuM377W673fW6zx11GhYXT/k/iCiRYyttS1EoEAWizXqRi7sm91yxR0KMLp5pvZCztVxswsUmrDr+0NF2AFNP5n1iBgbhbt08gyZlAmEgM0IQigtYAbZLOlvH5WZBwYEyMsSGQwDzPCWAQ5QAhBRiPD9fxKCMxUqeVHNxE83ZEJxnnhM6uD0OWEHTuP/pu4ff/b2No6dkOSOoCIkIBzYgUI/FAcG8OlgDztILyMxL4UjwLHeI0BcnZmJSgH7M6gNzTilZLg1RxzEQbCfHXe4QCPiSvEU2JHUo6hVrbtlKNlVzdT8qN8wIY0bhim4GIXCrmrgfKxE7umJRNzdl04JFoTYpEJGZDTEwiGBgsILDZFPRqVgG5m4ZidrVeWhXzsEB87x9mGttOpxAOGbudcGqU+G5841g4MUKR37x8uarrz7/7MXzs83GgYZsYFBb5DVLdrei6oxBCIGOqPQnwe9uM7mKQWppqd50zfnl+c3zm1dfvFqv1mZGJCE2i26xWm7c7e5he3d7e/v+3Xa77cdB3bilS7Ov+/RsPzTbvU/ZCKDhsG5ls1h0TYtA/XD4/vuHv3nA5Wrz7a83X365+OxZWATTAXKv6VDyCFiAkUOF/HplIp8oRlCdWI5//Wjj96obMEsbgJHX+wlVtyaBJ4ISuJhNqfSIiM1KJHSdtAIMKZVx7LfDfjv2/TjmZDoWHxWcipYGrBMaAgQkAQDE4pCFTCKxSNu0y2WQAEVTzsV1Qi8RNSAEAnQzS1aSa7bhUPa7w93th3e3D70lvuhgdebG/NheJabVZnFxub48W0WSMmIX4/Nnz3JxBMW7fZ+tmDsRN9Jgc3O1/OLl2avn6/O1xADrteyS9AfrpzJOfaSy7PDssru46sYJbx/SNGUnCK21LTEwOq269vJ65USFOje/3rQNQ0rTbshv379PBSMHIo/RAzduPI00ZSypfpuc+ozqbWwItB+nbtnSkW2tKQ1vfhpe/9hECSLUMInMcD9CNTCHhjwyMqMbmhkblII9cUO0Fu5i+Op8c9Y0rw/jgByCOMKYS1IHJNASVBuEZYwZ/H2aDvvsWadMBs2miCGq4pigAqIZZksnqPAKgAo7ONanWMWUXb3WdJVQdXzEwByKOaozGqEDISiAgxC2AYu7mjdCIXgItNsOt2+++8Mf/v67H3+43R+wXXXditsOOFjVFKMqK+RmDqrmoE7zrMrNde604TxZJjczBCA6O1v/4hfffPPtN+1iMykNozlYHUaj4RFe5gWguJdS0MkBiT/KlcXMKmSZCAW57cKy6S4vL19+/vLFyxc3z2+6blFKibHZnJ1dnF9eXVwj0f3Dw93d/d37d9vt9tD3xVU6Xvb9xfdv19//GA+jqxJDDEHOzhY3z84+f3W+uWhA8v327vvvJvXd/WH/7sPF7Wfd+RKhpNdv8GEr0yRuREBS8xBytZ+15fsTmHgDK8fbWtuVTAxgSpDQe+bcRnMeNe/MwIu6doytCLEjoaMVcGV0YVCvHURSdcu58KRpZI4ogRgBi2NyzEBZWGKwLpqITp4QE9FElgWyqJK5adY0lZyspDQM6XAYdvf3tx92I/F62QFUi/pH3GWQ86uzq+fnixiDowVdt+3N1cIcSz4LUfrkY/JUzLSw82c3q69enb+4WXUN5VKmrMOkQ9J+Ai/WCMTIbcPLRScN7EfNViRSE2kRpBEJLIumXa8WWTH2xa3EVqMAkMNY9rtDP1rXyKLjrkUUTxMQcQyNtUpsU7Zl68UoaUDC7Lnpmkc+iJlPPU6jxC5KIGESIkZGZHQAZ7aePFKdnri6QzF0HwEPRLsgEmXZtSFEDOFg6G1ApmpMzQARoQkchFjYzMAxKWwNluwXnQcAJjBDy26OJjRLMAOAG0KV4kEWkkASjmsDABq4eimu5dGODQGYIDASUUAXAREgRnFAAhGIwdShERBGZrdB2ScGbRft6uzcdGluVuV3ipqb49EvpGYfDjgzz6zqTMy4WQBHMgdzEwnrs/Xnn734za+++eqLz2PTTsmzm4JRJdQBsgEZuFtxcPNiiu7mJIjmTxB+ddFxAGKOIVxeXH79+Rdff/31N7/8dnO+yTnlnHPW1Wr1+eefv3j+4uryOoRmmMaUkqWcc5nSZGgUSX963f+LfzWMOX3/WpOGAFHa5fmzi6++ff5X/2S5XsP7u7s8Zde7N+/f/eFd89d/++zzF2fPzhfrpv/pp/D2bjHlxmxERAbk2oIkhOqM9hHW8Omfj+uB1XaaOxwtzCiQI4ITFIQxclqtkKygp3EoOeWpn3R5hk0jLBhjalu3EpvYJB1zkWScMGUxYFfVMlqWwA0jk6h6LjCa9upooTCGQAEosSTHjJbQe638wzSlcSopuWrOeRzTcDjsd4dB27Pz0HZdjA0/EVZuYrh5cfXs3VXuJ0i547hahGUHIQjC+dXVWbF235cPd/fT2AcuX77cfP3F+YubNTHu7oYff+q/f7PfuxtIlIUY970RsAiqOZCGtrSddw0vhLrADUsXY9dEPeRx3+dy6FryjgzYoJSilgu3JSI2SGqQJ7DCi7ZZL4E5XfSFg7y599s9KUjnMT4BxhBh28bFgrtFI010qCM8E7RITgDRvRcICOhmCsXcwIzVAfdMmESJr6IsW/5q2SWHB7XkQEgd0Ya4JRCPyfQ+F8i6YnJoIpo5phRzkBioIQzitTjx2jNwr6WKBOKIIkQyO3IfUW3gTqpU9GiGCYAEbaDNAgNhIGF0FmdxB5wcktqY3QECAwGqGyR6+exchy9bTO/enN99+LDdbQ/jOE6Tg+qsMlEZ/khAQER1aaiWVgi1z49QTfcA1Jq2/eqLV3/x61/82a++/vzlM3cch1JqueSGYAFQfJ7hK5gjoIGBpgwC1Uj6EdtvAGAAkePZ2fqzz17++je//uabb1589kKC3N/fEUATmouzs5vr65ubm4uzixhbNQUAIXaHosXQKcB+uf7pH7573y4U0NSZKWKz6Dbri2fnr77oNuuJpOmHsy+/zC67H+7y/W6bpvT6p2Yht/f3dvcQcwkOjOAnlgwBOR2733+83X8C8jHwgtXIrArWshExARIqQYmcu4YE1bKj2TgkgsCEQaQJYNjmtkVQVQvReCrIWqHOU0a0AjqhCYELEqEBToQj4DRPHK1BE7SJLIFn06nkfRr7cRjGccpT1qzgoGopVddZc+K2Wy4WK+HwtBUjgc8vNlfPLvb3h9IPkpIIE3hAX9b8G5tVExahmMW2hVfPV5dnC2GZxvLhNn33/f73Pz3kgNI061VQgXFfpsERijTIgdsmLhbYBmqIG5ZWJAoymuZ0eNiP+bBaBeLgyjmraWHULtKqxTb6lMAUSgZyjhHO1rBaslErTTCErCnrEd58DP4YuG3/3+z92Y5sWXomiP3DWmsPZubjOSfixByRmZGZTFaxhe4LAYKEhqALAYIeQHd6i3qIegg9iAQIuqgS1NVV1cUiWcxkDow5zuDHBzPbw1r/oIu1zdz9RCTZlMi6aHAjwo+7ubmZ7b3X8A/fEJoUOJI7oDuhR7QAzojO0AZIBAHBzVTMXN3IoTbDNagnQw60CdwTsioQVzZoqxbcDGEyLZEgY0OkIqAKALNAzt6T82JTC1IBXFT1F5EZYoQQkXjBWsFC+QRTEIUiIHp/MggQGFLExBTRGZ3ImMERyDGgM7gDMDs6FPEuhSdnZ+xwtm7ePHvy+uXL11evr25va2NrmgbVrKqLoCoS2lKfRyIAUuDFQxQW5z4iPjs9+elnH//yZ5999PzpxUm/3U1zVkKuuJKFZQ/AR2FJhGpJbOqqj+ZQAKhFc2ua9OydZ59+9snnn//s2TtPVW2/25V5IsJ1vzo/Pd2s113TcuXeMyMSAzsABnZ0ZA/M1VMNa6nNiSAgNsC9Nb2tTuAsrz+LP33v/fdeXV//7ru7r74bXny7v3n1+ru77+5ux90ObdHDNFdblD8BiaoSz6OtHhEWu/X7nR8XAVYjF9HsCOjGnggDAzJBZG/YIpu1RBYChKZtV13XdV0XXcmlbxHVxJgVkNVVFLWYgCEoQ2GcAxoDoivDGDmbCxMFKibbYijzpDKrZCnjPO3rzM9ZamGZkM1AzQ2IQmrSar05W63WFed9PDsi6vrm9GyTYjff7ebrN6o2j+a5TLupFAUaYhM+fDet1iebTXuyapjC3W0ZR/vuRf7ii+0fvrnRBvpNJ5ehCbHsx65hUzy9bFPfprbrEjWIbF4p2GCqMs7TbnuzG/N8fpFCwJJlHAVMmgSnaz47CQ37bm+MkrPkYUKj8w2erTm0J7GzseT9JLt9FskPiKHAjDFgJA/ohEZgvLRhEBCQKQRoojYE5G6iWNX0AAMLmBeHa7HRy43IWaR3uubJanVxdoKAt3fbYc4CTsRPQjpTHadpnOZpnMilqE4Fc+TAi5EAVRUeAiJITEzA5IgLNr+OKXcUsaIwZc8C8mDCLILL7uamleaiC/fdEF3Jsps7MDhAESBPp5u46lbPn15OH3xwe3dz9ebq+5evvn/54sWL726uX+/G3TROcxYppopgairgwIGZgxKqmpq6oyMxx5TCO8+e/MnnP/vlTz8536wahsJO4V5JCpEiQgCMgGqmWR2guhsJ2Vs2NoGZAZ0Ru7579uzZ+++99+ydp6enJ9vtTjR3XZtiOjs9Oz8/W/V9CoEQEZ2IF+AUAAM7OtAyFRGq8Dk5kgErsGJyajGtaa1ts25S2Lw79ufPV0++evP7zr6m/Usp886a4CnYmGt9DxcVkoVigEhmduSBE5GZ0g92fgQhMLCsee9oSiVAh6ElMnYndzKLAfomBkiJm6bdbNarvm8iSNGcQrAQFNQconhgZwRGRy/oHig3MXQJQyJkUVIUV3QEQZcyZwHTedI8F8klT9M8zbmUokVqJBc4oCEpOlCMTR+6zWp12rUrBLKHLCVEZmraJoZ+QoJxxGk2BVVD0QDCrKuOLi+bk9NV13aIOOzyMMh+79+/HF++Hl9fTd7hLMi8a2PSSVVwP0KaQBnN0WdwxlUKyK4yg4molDK7G0EFIIGIqRi4B8Imhq4JkSxniIHAZL+fwHA+T10X1n17VnzTeZsosCPqw6pMQAgIkTxS5cAvYbQt9AWKCP0M3ey8X1BdbuCipUiZM3FQd3UCwE2KbRM2XeybUNSKyWTFQ2z7eLZOkXHO0zCO+yHIPAXJjOrodtCwZaxOwQvh54B/q31gN0Mzz+pFvAhkcaloj8Oh7sNU7vaSyCJCCBgImWpYjmpuWl8KHEAU3DEGblOM3Nvp+uzs9PTsbHN6fn5x8ezJ+c3Nq5u769u7u9u7YXs3brfDpKOpgDszIJIj2EEFzt2bJj17evnZxx/89JMPPnz36bqJAbwNGGryQMvGz+jsyI6kGJWdoA1sDpkhPNosIaQmGgIRbDbrZ8+ePn1y2TQpBO5XXdclImqb9mSz2WxO2iYteJVFzfqACFg0e5cwrdauhMgQCy5oTbfI1GLjEMUSYbfuV2f87tP+s2erbz6IX/52+7vfh1//XouW/STFo0P1TfAFK7u0II8rsCkZ6NITXDb+2udXQlOb5/EWTKL17MqAGMBdXUyLI1MbuoabVdulbnN6skkN2bwvcCTkL5rG6JX0UEwmDiGFtm/bzYaaNgCClBAxzFMu86yljMW8FvZyziWLSFFXA8eAXN3YmEJyQxPEiN0qpNVZ169DbARA5V4lyd1FFRy6tgkGpd+ZAyIyeNdgDLFbx/VJd3Laxsgyl2Eot7fD7V3eDv7di+12L7MgFMJRr65367bpm56bxrkZ5nA7zCCFzU779NF7m9TjlDNAIQxmtjlpzLlrIbApV9cHdCX34B4ACwdq2xCCjqPIZDcnzMGjAyhF5jZy21BKD++VE1gES+BhQUzSQoleMq7QJNiYrLOFukEvPhWec9nvBzfrV22kpufYNoFaHoPP424Y5pe312J2cr5Zr1fvPtuc9A2AlDxNw37Y7sbbrcwDmzhU7ihyQCYIXB0cFqYGHMpJaj6Lj9mLeoXxLNJVh/tSxF7f7F9cTxGtCdi1oW1Ck2pgYQuyDNEQ1UHc1QxNgCEhM0GKYbM+4Zgun5zjTz+Yprs3t1cvX73+9rurr79+8cUX3+RpchMTYXQkNvM6AOv0X/fd55999KtffPbx8ycX6652kdpAFhaUrS248wVNhgApBmLo4jL502Pb0rBerw2hEJycbE5PT1arFTMzUd/3gSnG2Kam7/u27ZgP+rTH6Xagxy0tSEIKHEKoIu2KoO5qZuboRMAYWne1AEDEDTebFZ81cNHrZfemjf1+pOudXt2peTQgeFs0/f5zIyC+tecf578QmFr2MprMGQuDMXHiGCM1MUaCyFwTRocuNn3bJGLLblDbKHjAg1V+ngl4Rp8ZtYnSNdYnS1HVClAOJIKl2CxlMhOTYlI0lyyiWuWcSc2nuex2owGt1xsOjSFzCv2KmvVJ03TMMSs82vkBTM3MiSjEEFKax2nKM6KftLRex9VJ6vomMZcsNzfTdpv3Y7m+yy+v5hev76aiFJhjBMRxzCbqRinGm11Os47T4CVHNzrv7FkPTuOY53mUQkUstdg0Yb2GpqW+TeiyvVYTm0cfk0GrdTM0tTyrIEyZh5HQZD+Cq7cxbNbtet3wA2HlgB7QmZwJHKobIjohRKbE3MbW4MTgZNQmYlyQqZWTx02kTYPnq3B5kp5s2suT1bOzk7ZJRUxR2z1KsZZyh/Mq9Kc9NU3v3sx73kZ/o/PgWcUQPNTuPS+A/IOXai3sgTmIQlbLxXNx1cokNbPHdEzwxcqjJtaIAK6u4FbpKClFIBIH0AXABq6L6FMFfTJ2fXcW+9P+kqkMw7vfv3r9m9WXJvrqxaul8u/mKm626MU4EIcQw5OL01/89JNf/uTjJ6erht0q77cScNAcQQltcaGq2Buo2LyG0BxS1QZ4uPOfn58bQUHfbE7apmVidyDitmuaNkWOMcQQA2CVeTYgJFpsaxcjU1hyN6S66wdmZsTiBqZkBV1okfDnqg/mCIpgSNL28enTp6t463D27evm61f+5QurSK4FSlzJQJVEuFT4/aApvLz9/Vpk6AqmDMIoblOeZnLhQG276fvm5KRfraBvQ9M0TI16IA4AbvXcVGulxIioVlpd3DL4zDjH6G2yNknACRQkD3kaZRrLNEqepOTqqe2mho5EjMyUcoFpHK6vb7/6+ntzeP7eeydn5yG2sWlX3DWrTYgNANW+zlGVqJIpKgLYRA2oiO1vb0+SXZ5fnJy1/SohxzL5ze307bdXw6xptSoGL97cvby+U7Ru3XSb3sCG4W43TPOk4zjvp7mJUTVHsE1COyHEYo7DML+52t9cF2K4fBY3p+HsHDebFPisb+X2dbm7Hu9uMgjIiYjYsNNxKCKQYiTus8S7m+FutDlzSuny4vTi/CTGA58fMTLEauFDqI7GBIEpEiaODadIpH6q4aSTPnEXPTA3kfqW+zZ1TXe+6d+9XL9z3j877S5PN6cnp7HpBXB3t33R+XDzBsrQDEXvTJNzc84xeogpRCZGImIOaF1DMeCCoHNHA6omS4RFoRSfC8xiokvaAQ6LONuDGlkIdHHaUYiJoWGMTIQqngGVyJsUVqtETJPYVGyeTYsDO5oDqIhO01zMnLmNadXGs03fvnP67OJ0nubvv/s+MqCVSKiM4GpqYm5OBpSacHKyfv/5s1/9/Cc/++SDPnKZBgUHpBAjERqouquCADqiKWhxVIyIAbxqjwektxF+m5MTBZ9B27blwEfjGA6cUmxjUwUYzBQB3XXhx1TQ7MHMd9mYiSgGTKE2VMiMRKDMIHM1GnQCsCpzXpcqJE6h3/R9e3G3P7t8utqccAhwDCcWCMay7ePCUCYA92AmivQ46zeruOEQ6WTTWePgEgO2rXc9bk661SqGIGZesgrN5sUxz4KuIvPO8gSq7q4iUopIVi0AmVnaFtoGYhSCSUuWonka8jyVOWuZXQuY1A/tiE4E7gwhUWNFZC6727tXL1844OnpyWq9TqnlEENcxXZFFMzhgXvMg6XM3dUAMDYNxTQWAynbUbpJjRUJ8li2+3HMZRTLU7nZz69v93fDiJG7yE0idc9MIjDPWcSKWAqB0TctnTQxBCMWcNQC495v3uTU0Xsfrk9P282aV13DxENrfU/THkou+50hiAOYEhOFGJDCXKDs5OXVsB1NudPA8SBEezwVZuLAFBkTEzIxQQwUkaoTMQMSrhrYtNwnXiVvUzhbp+cX3cXper0+OTtZPT3tLzbNWR82fdd1HcVGkfugNG+2NOetMBpqlnnUuQUNWrKUkkVEDd2JMDJEBreDSNtC6wQXEHURFwU3R4CABxoJuuCj8cWIXSRpuA2YGCMzAGVTByf2JlIXAMldTUEF1MEAzV1FVUTNBMCZqInYt2HVhoiApuNuO+zuTDKjOwMYilRHkep3S33fffj+uz/79MPPPnz+/Mk56exW1AyIiQMiuYMbFPHstjDSBNmByZ3ctYpsoRM8LPmFvl8VE5N8uFlLXKNqZs4hBKZ5zmoGAdwVazuimgQhGYABoiMjIhM20ZugjIpOaiwF58nnSWwWMKAaJUCsHAIHcyJKHOKm2Zz2p5t21XBc2HfVrRyXXkWVEkSESjZgYhONMdxzFBc0oDJh37RPThKDmE7EFLp+c9qdXXQts4x53g+lZHUFInNQVTcNromgjWRapnEYxmkcc87ZQVLy0MWmCYy5ZCnZTKWUuSIgTBTBmam2dk0re8oZQiQWLzZN824/be8oBPKSat0rEHDDsXGgWuK1B0rkuJgCIwJQDKvNWme5fnFzt9v/4bv5zdY3m9g0xIgG0J/3OurL6/nb13fXu2k27VchRnCbwaxrYiAaQURstx8iUZfCpmlWPa/XFIIAEGFkbBzmENLpyeX56XqVHFX2+/08jate5RSnQUopu10hjk3qNxsaRp6z3u7GWfXlm3HMwJGxCWRSijwUgIDAmAK1idqGA1XtysUngbAOiTbgKvIq0qbhdRc+fNL/y08vP37v6enls369bmOI5KSZwBHUywjgwXPfJ/DT3EQ0CUyqedheA8A05e3dsN1N81QatoYQndBAzQGBAwJAESjFcwZRV3NCiIEQwJc038nAyIkeeJC4QSk4Cyhi5MjAARO2BkbogZAKmIlnsVltFlMDc1MpOat75WinFFdt7NtkUr69evOXf/2b/+k//Mff/PWvx902MgKjiZupGiAFYnYOFxenv/rFz/7lL3/2/Mn5pktWpGQpZma44BWcxLSUMokVVXNiiAGJySOYBDTHUswXQumhz9+1LZuU2asyuS+gIqtiMSEwI5qJiAC4anATcIaFaU++8IcIgSkQ9Q2sGmlYGFkcVGSe8jyITOZ6GNNLeaIy7QiIkFJo+rZftV0K/LgkuahmMFd7E180NIJWPPbDlbm6zxKENoWL09QldJuQHJvUdWnVM6pNMu/3u2HYFS0c2d1zLujeBOpTIA9uOpexlNldiDQlJ8aUQgyElKWoanEVVVVRV18KnYiO1bkJa8G8Op4xOoFFtD5RSGHVcJuQK105JgrRl9yykpQf5DC2JCMhcGxSu1p1J+el0M2Yt+Pc3uWULLKHRKlpd6N9f7V/cT3usxkSR4oRNSu6hxgCBXeas6hILXEFgrbllNBNRFEKmDGHkJrUtk0MjYmUnO+2w7AbADQEJ3A1MgmIMaSmbbDtxLAUh33Wm0H3o4U4hyLJNGe5L5ITYorUJe4TtwkZKWDNPQ+UKwB1Nk/gLWJDGBn6ht857T98dnL+zkW3WiOA5DztJE+TyFzNXrLZDDhjHCGaGGYJ05zCSABquJ9kl20u5mYNI1T38IOiuzlk9ZrhV+ElWhjbNX5bQkx57KEIBi5qRczJEUwRkHQZpOCOau5qXhzFSIwqcF9NRBw8MEWChiCgm8rN9vbXv/n9n//5X/3m17/9/tvvizgTFFR3NXADQqSYUr9effD82a9+9unPP/3wfNUHAGfyQGzoCipqim5QzKS4qqu4uSOBIQhYdgyM5pgLAPhDpkJomwaVZtcYGMDMpOrSuAq4VWEBM5Eyu4kEUp2jUQUlIHJlzNaBT4Fp3cFJX7qQAzYFxSzPUzvvJY9uM3sAuNelXMx/FyACxyalJgZGroDnAwS7zvwQGJGqtPCB8PQA8r8gItW1gHtkWvV8so6BiNih9iBIyjxLHqZpt9/dFJPUREQsc2EEw6iiE8xmIjYBSUoQkRyJGUJiQlPNosUku6r7EVRREZlIWIU08Ggd7KQYrG3p/LT78PmTkNLF+arrkwAZIXJwDG6+XHO/3y3NTURLKbU/HSOnrrt4/pRSunr55u5mV24mkwmhcPCYwljg6sZudy4eOcTq0VwbJIDMAXHFTWsqSuYRPUVOkZigZMnFh53l7KmN3SoAyjyPU87zONxt9/thnCcvM4JyoJQSc4iOjOgxcgKAhneGo043Q0Ecm0y9e87lOPmRkLsmrNuwbjiGhTa7eLuaKaiYFfWiMEsQJ7UsOhadzIuDHbhtcym7/bC/247TbEihW2cI16Pf3Mnt9ZiHfdDcoK4SdynGrhs9bDVOIqZzJDvrkAgDkDmq+iS2z1aKgwED8qI6VstNC/S/digfSCxU8I+pmBJldctqRXMRQOxibCLHRfkDA1JDVO21HbXmumwWXIOR5/nurnz7zbf//j/85//wH//i669e7bdTaKKDiZZi4khAbIBN07737pPPP/3gVz/96NPnzxKQTIUThhgaV3Cb81wki9Q3Iq7NFCR0d7NS8QuB3TEXqD6p95M/peSKyZUQS8nTNOU8lZI4kFltfSG4i+R5Vjdtu6b2+uFgOHPQ/3EkxDZ6m3LkzMhE5FbKPM+j5gk0AwAAV7D+oYQnbo7mRRcQP947ZN1v/CFwCAERSm3CwLFW++gwUzPRJbeKhB6DMIOjgYvOc5mmnPcio+rsXmEagCAIDu5iotncxLwQGRMBoxMxU4hULR1dxKy46TE2x4NXMCKAL5KJCz6BnSI0LZ9sOvSzkJqTzSo1yTQocfWxNjMDqTHLQwXfqtNTq4BmDojccFylZtNPxcbiQ87TkM1LTKQQppycAgcIYXGeXmiaVAlOwO4mRGYNQpc4BWCsZn1LDEbs5jJPw91dLvt5GqZhnOdsZgweYmwq87wITKKTOMUU2CfXSX2fbRRvEyJjZSjd5/yE1HBoIzccAh9o4zW+NK/ikkWgaDBdR19FGMzHbNtRtmPZ5MIxi9p+N1y9ub15c7cfJ0NOJ6ihvct4N+j17Sj7fQtZUC2wtG2CODrNEDKESfOsJkuEAWJe1HMBNXdwYiQAJqjphx/YiLWzxT+gkS4BHREgZJFslnNBZMaAACIGtcFVRdYN1UzU1OzYjGewaZhfvr79zd/87q9+/bu//eLbu+2QizqTmhZVMTcgpEAUTk/Wn3/20a8+//TjD969OF3LULQoR8RAgamgaSk5uygi1eIfBUKrznWLDymKVZ8feCDeWSd/jI4QtTjAOIzb7V3fxBiYmFSLqRICOEgpd3fbMQ1t0wbi1DSMhIvaZ13IF51PYyzomSAxOkKRXOZJ8+wyAzEgiCtilT1282xuIjTmcZzHqUxSo5baeq3AjMPOD2A51yGDRywRPliWfUmvfJjybqeBqCQJDNVWS83LlEveA+bUEADGSIjOCOCGVMTctZgbEWBAJEVi4OWNwCp2CY8c49rXrYHksgQ6RkYErELGwI4JYxP6VUsIITVtt6LUYolA0SkYIFaMmKhbecxSBEQkZAQ00XnOu3FbLJ9crtpV269Xb17H8ds8jaoYQmo3p+veebefVKcqe0nERAErkJUczYksEfYxrDpu2QMoIyFj19E4y+4uD/v5+hrKTNNuzqMUcYAQYxdiG7vWFHa78W4/X+8mp7g5uwDwu5s3V3fDbsrOsDnrzk6bvtVV3x6TN8RFNo8JiAG0uqOai0oRK2pZQYzUumDP1lCAvp9wmuzqbn59M5ycTog0juX6ze133765urrdjsU4NkOg1o1inkTHieexi9KiUcEZvXCcqDFk4ChAAiZA2XBSK+KuYO5MGNKSOOIyghDtqBl7rDnfG1QTQYzUpNC2wdGGeZYiJsZMCCSGcy5SxMwdgBzNTb3kUuYiTNAkJ0IkuLu++09/9Vd/+V9+8+XX393uRwVw5llEZKlFOVII3KT47tOz/+ZPf/5nf/r5s8uzGEKBomrBuBpNQsWiixEyM6VAHCIxqWMpqpUksAgaVNE3dn+g3luLG4hgqvvd7pZDIiIEYlqtVyLCTMxs5re3d+7OHM3h/PysZYZFFQ8BrQrsqmtxzeAZQQgYQKVYma3MJtlCAnRzRXRjQteio6hYwf1+u93e7ff7UrId85IqZFn33sBLd+/BDHkb32cupqXoNMNucGYoRWJwcnQDVZciRSYH4QiExHxI8RyQqsRLcbcAS1ERmZAjUu1TCaACKKBWUJM5HFR6oZYjHTAAVXwyICEHcIspedsyBgpNjJ1TQyEipOqI6l71bAu4PjwvrrbWRIvDskopWay0TWj7CA5qZRj2yGjmyBwikUMMXktADo4ElakCBMaABI7QEK7buO64CYBuZc5mhBRihBQQ3cZ98YLzWGT2IoTIaiE5I5GoDrPdDuXqdnZyS5rdrq6H7T5zE7smXT7dnG1ioNw2ke4nP1BAjkuNzyonVtSLuggUQXVwI/QmwFmHO8WXE4zZXm/L2c10draXLPu73c3V7d2rq/3tbsouFLNSaEtsGi5Tb0Oi+SxYJCgG2biUoiGkGAgDzqxuc3ECn8zMF6XZsGBUD+v4w7ZxFYKschCPt35EJzJERTAmi+x88LURk2Eu81zUHACYyF2LzKJzkZwCGcRc8rCfv/rm21//5g+//f2Xr6+3U1FDdMQsUoqqsyMCcts2Ty/OPvng3Z998uGH773TpqAiRdXMghLzEWPjhBAixRRSrK18Ukd0U6KlQUagaoiGj08mzNMkpuZq6rvtHYmCipsS03rT5zKnJqaUAPn2drff73PWXDQ2KTUtASI6mDuou4nJXPJccjEVcCEENFSFUqxk1RJInNxUgMyM3Esu+zlnUb+9e/PmzdXt7U3O2Q+gqwWTSciM1VaZiFR1STQe23F5hTcYFIO52H7MzK6iKRqBgy1FOlWxKsNPBFQ9tnxReqoVFHMAZwACJkoYeqIECO7F0A0MCBDYTPxAjl6CFEJEPuhFISARB3GU1FhysILcIDSADVEiSIusvxuAo+vD0YdEKcYmpcCMiGBWkc4mNtjIQJzw9CwxXd7dNre3u2kupexUHUwYjWrOVY0l0IGRAhqAK7QBT1ZpswopimvZ3mZVNG8D08m6c3fJOKmjt44+ZzelXIzjHKZZ1PajDLPuZpt03tn1VMqrN7cF4PLJxdnF5tl50ybQ2QPfF2IRgSNWIW0wh6JW++lqbFr9j5CcwRPAqqF2BgPfZXi5l/52Pnm9HcLd/vWb8ea63G3jnDcexUS2gtNdamIiOQ9jm8qmRUTaFiCHETWixZZNYtbgIvtBhQ0YOGCIyFy3Qz/0rJcNx2oEW4XaA2Cot/TBKHNVzWUuIUAXqWvYgdzJQOass8xDKRWrGQhNZZ4n1WJQHEMxG8f9d19/89vf/uEPX3z76vXdVMyRDEzdRU3cgQIhI9Jm3f3k4/c//+zj58+erNqmlDyXeZwVCYOyi9cYmAkwctc3KTVEZABi5u5cSaaRHWzOWVVjhBAf1S9DYHL0YFSkTGOGIqACYBRote7fefqsbRpRHcbp5aurV69fX13fDNPUdA0xrft1CNGdANHAqvKhAiiiEkrtwYv4PM/Dfhr21CeKCUENVBVKGffj3TiOc9Y311evXr+6ubkppSwDZ6E41tYeHfGivpC+tWJ/Hy/L5EDVEXwuPuWlLhHAa2Bi4JU7ubQOCQAQa8sC0BEhGCgZIlGk2IVmxekEMIoU9aloKSoLhNvcDkorCE4EMRDWkq9YERExc5C55O2Qx6yzAuIEE7ZJY+NczU3F6qtocbt3tq1MZqKaPIMjMnOKydTVjRFS4q7hvottFwDVbzXPxU0JvZpjRcZIFAIFopBC6CKym8YWsUucAsUQEXyecimKZIAUY3IDEy3izChq01TmWRwsROpWDITiVJxm8e1Y7iapKJT1pn/n3cuLy82qdbR5FPLHIRmag5qDgqpl8aKgBl77VAAEuGhKAKMHRCZ0w332m325vt07Sb5+I3e3VObGDREUrVhBxVa4Dd4F7QJ0CQ2xIKpCAUewRO4VuWKu2UpdhpyOPqqVMXf/AxwW5PoQAfLbO7+bmhQxZ6AUU0yMzKK+n4tYNlSjCs40NzDVIqIqTjblfHNnd9dvfvvb3//ud1+8eHl1t92Lm5qKSlEVMzUwoMiha+I7Ty9++flnP//ZJxenG0YUKSVbUQAjzqIOiAaANUJMMcRADmhmqlpdi5hCSuSAaiCKIUAI9LCGEU42m1nF8t5GkbEMObsWdzWwVd9++N57bdPsh+n16zdfff3NV19/nVK6vr3hxO7+/rvPN5sTp8BOtUlNIVAMGKOHoGoEzkV1P+6vr3fX18xNy0gBAVxkHsft9u76brcbR3n58sXLly9vrm9VlJeW/lEbv0os0QK4NxM3EdEKRbgfYkjMyMHRzcGc1MEwGFatZGX0RdBv2a2Rlp9LLSkbOoZA6O6OoUndadufxnQqyrrfFYUpj/OMKmAKlZx3ALsaU2haZEZ3n6Z8d7fd74ZpmvOcfS5WVDMgtdzlZiP9aUyrFUF2RBUXdRHVcs+Ec3dXcRWgSISGGGNc9evUtOZGWP2FAB1D4DxPorJzd3UkJ8eqhJSYUuSUQrduV6eb1DCYQs4+TmbK1DEuPh1VyykkBgfVReo0Zx8G2e7KPGvTpWfxtF213ABPqL6fcs6iqU3P3j1/8s7l03efdn3SMozjnNXLg06fm1sWHTIigKGJutnBnBgdGG2xj1fxnE0VmxgaQwTIRYb93NJMZYogGGpjyAENmYghsCb2iB5qX4coOATEYEBkyTKoVUX+UK01C7lZMVSCUJ2ycMkusXbz8R5Lv3gz3VsQQAVimqihOjF6CAQx4axq46RWOIVE7NmshologQGBDHyapuurqxfffvvr3/7+66++u73b55zFRKyIimjFBJCqN4HOTjcff/D8z/7FL/7kZ5+u2iQ5G7iCi7urw2RRMSWo6G8AAgQ1NYdiVqSYOXGolD0iCEwhQIwY6BHEL6z6PmgRVpFc3EWKEM7TuL27vbl5c3NzDeBXV9svv/zqiy+++uLLLzmG7bgPTVApZZ6fPXunXa3bftUwOkDTdf3mpF2tct8RAjKDaNkOt6+u4tVV6tepWZThxzwNw66m+ttdvn5zfXX1Zrvdmjkv0uVgtYtQFcy86t7bcbGmxywFZl6v1ydnM9ucqCCpiAyjSLEmQCSixVYUqyDics/BHbgKJgFiSAGczCCltunPUnuC2JtYlnnKPBceJhj28zwPKrO71mXUzAg9jcRsRDTPcntbdttxt9uXnEnNFaz6WIzc5KbAprMGozlGyVon/3ieVO/TfnczVSV1IFNQc2JuAjMTsxMqVRti9HFcFxFXR8OChgYBQ2RKAdsY2j6t193p6appI6iWYRjybKa5mAGohrotmIO4gbuqATgT5ILICQOWYfYZxhmULauNRQ0ppEgprE/6p0/Pnjw9Oz9bxUjTKKUEILKH+Zi5TcXGXE0zYAmUah15qf1XQFwppmIB6aRhhACJA6KJKBVe0hnCCgthCAEIobL0GCEwVoFuQmBERkAwNkHT5OpgAYAreV3dHP2wq99LPTL6ofi3sLvwnuT/8HSqkCzWcN8NXd1UVETVMCy2FujkxuhEVFynorvt7ttvvvvm66++e3F1fbsdp1kkq4u5+jIcufr3rvr+w/ff/fynn3z+k4/ff/6szFlFnbE2DMwdigFgCMxcdW/QANxc3UW16ti5mqiZGSMxUSAoP2DEhLZJZCSsRfJMpIhNSjGyadnttq9fv7y7u/v6q5d//Ve/+f0fvvjmu+9TE/bTWGS+vb25vb355ONPnj1//+LJ0w06g69PTy+ePr26vNSz0xAYRb1Yvh2uv3+JT19uzi5XJ2eJyA3zPI3DbhiHYdjvttPd7d3tzXY/TAbAgYmJFkNgNUURzJlq7wsRmAkBLWoM9wi/lNI7z55h6CyPmndadnO52+4HhrJqQhtD5BACcwhE6O5ISBgd3Yyl1lE4pWYVOIFTSk2/OkNMw6S7YdzudZhsLrQb7OWL29vbN/M0MNF6tW5ScgN3ARfm2LatGeY5ibYOQoQE4OgOKMole9ln491+9irkJllFTcQuT4McDSEBwMHMcileFmF5AIwptm1qGmZ0ANEipvH0fG0GZkjA41C8eEBOTIExpbjqu/WqW3VtTMFKhsBEJHO5vRsYQA0MSRyLuJapogzrBQ3crM82qUfgfc5lNxYZp/08T6IQ+OzJ2WrTbTbd6Wm36mMIwAxdm0TaXWqIw3HSuJnNs80TNkQhGkItVB9A3KbFSjEtXoqDWkf0dMVrTiXFtgkxVI0GQCYMVWCTAkMgpCOrHBbTawPyAmC4mO+pgklAhaqziovxVDVPUTNcJnFt9FdmGiDhQkoiZ1zgJg8ORKDAGInZ3Ypks7lozl4KZYPiYMXRgB0DAgQcFXbTfPPm5ssvvvzu2++GsZijahadkJwJiYiNRImMIdDTi/Nf/eJn/+JPPn//+dP1urvNkk2ool/QHReZSqtCMwAOoA4AIObqgBQAQdVFQIojOAESElTyyIN6eYgxoJNGyHneBUKhNqUmJSAQKXd3t6q3X3zx5ZdffvXq1Zvb221sg4ITAzE2KYjp3Thebu/OLy5XbWqLAHFIKTUt5AyWoVjZjsOLK3/n1ZP3PzzNufPeTMdxGMfRrJIAUMqSYdbG/nHDN3NRQKkEfgOwakeCAFxX+MNBiCmFFHicfJ7ysN8N+9txf+2Wu8hNiJE5xhRTjDEyU0rNapUoBHHQhbsZgzQIDWE0S6UkVby9m65vt2+ub/b7W5mHm5vt9y9vb29u8jzFEEqObUJwNANVY7auY2aWwlKSajDVCmR2YjdSJ80Kw8TiB3CxqVhRm8bBH+z8BqDmZlJE51wZVdiB95agep04uBsRNm3br70fpcxesosqQnVDADMT8ykLbvchEKpaKUjBSWcp6M4cncmtdvyxmM+5eFVhahISKaMgzubzXAysOKS+3aw3/aZfn3RtF5tITRNiosCEoVHTbppS295zesHBFE0r6QEetNJUTUtlQi5uWYzeEpxG7CKVhmOiNlhQYKaAzAFDwMS4TP7qOFHfgsiRzJYYfbGqUEA1BCOCRVzYFnIIeI1JKmQEl95yhcjwQfveqh3jo41f1U3NCWrnXE0VYFJwJXDQbGKGFTgEwODgrnm6vX7z4vsX33334uWrK1XLUkSySiYGYq4sMHdqYtP33YfvvfvLn/3kZ59+cnF6kkIAADNTRXHySjuFRXEMKhCk+kU7WmUXAqqDqKlpmEUtcAA1yEWxenccJ39gQqSeOc9jDAGDNk3qutbRmWmex91u+v7li1evr+Y5m4Ooi2pRHef5+vYGvoFvX77anJxcPn325Ozsna6D6+ucy4LcdaQCsptuXl/bq6vt3XYcx1VZFym73TBOOcZmvaI8hZh6ADIFJgLEurSZKRzuCxFVPN89ln9ZiA+zRXXY3b15/frm6tXdzdX29vUw3M7TVstUe3O0AFNT3/er1er09OzygrtVB8RObAYFcZ5LIIgRYsBxGGYpb25uXl9dvXz18u7uWuZht7u7fnMzDhO4xsDkZQxH515gpnnOIURCUKWSUWTphTITUKDACiRS47aDXR9WsfEHuaWDORTzojZl309ZxBi8aInR3VNgRDfJVR6fOTQhtRRnRxAVB7RKVnHObjfbQUWZvE/cN82qa2PLJY/myjESEBGGyIzBhnHYjXnWtkuFZbjbzrO+2e7nLClRv24uL85PLk7X52dNlyqri8BjpNSkGBgdDGAjslqtK1ocDn1+ClhnQxWdseqTl1WySXEVA/CAHkIF3Vgk4wZDwgSQgGIKwSywxwNegBcYI+FBB08cioE7IDpj5aeoi5HXBAERURer1Vr2MTg0jcxxgY0KKEF1+3CtwKt7xVuzinsuZO5GxQgQRE2RCZuAYDJZ0bDwbMFcVfJ2e/PdN19/8+WXV1dXt9utu6nMB8MQJwshBiRgwvWqff/5O59/+vHnn37ywfN3m5SqYoa55eJSfTQCx8CRnNnc1EyzeLaszsgMxFVWZC6CDqbeJE9tLGr7OaujeQMHl+4QYwyMIeDUNLWr5O6IGBMzY8757u7u9evXb65vchF3VLVcZM5lLiWb3O12N7ffA9LJ2dmT07MPT05Xt9tyfWNT9qJcPGQvQ757c6tX17vtfprmnHMueRiGnPOmOyVsQzDC4IZmdVPDZcldBDuXA7FW1IiJFOytZGye5++++foPv/vy9s31sLsbh7tSRvQiksuYTRQAY4ht26xWMk5ehM1iuy91mapVXXdk4hSrpG3Mkm+2t29urq+vr7d3N3kax3GYJlGlwIzYqDEqIxAAmgEAiTIgRyZETFGZoy/RSnSvth1VsKLajxt4tWTQh31+ByjqWUEMs/msqAoRoQpIWVVc9gOGTFEU1NGRKYTUeiSK5IQeUkxdL+bDcGsla2IwalKPTOJiTuDsjpO6mjnqKDaJZ0UCMnfXUsCx5aYNq745PV8/eXZ+ennWn26IOedJRNGNK2s8MAElh7Zrm/aBem+NyQnhQD5TqxGpaTYp5grojuRUWVsO4opeguXg0KBHghgpOEUy5irChfevWRs5gLpQ34EB1AHVxcDNIyIScUBGIMMqiVkHlC/iIocdZGkXL7LNbmDipo+2/hqOmqOYq5gjuDkypMDqxi6ohTGQ18RCiuZc5nkc85TrTFYrouWYwyo4khJoCPH0dPWTTz74/Ccfv/fs6brv3XwuRcEcXdVEwRxiCEQxRoqEdcMvaqqa1dAd0BVNDbKaiU+ThiApi7sPc2EOD08mtG3jTBZpt48OnkuZxqFpQtP2TFQk7/f7N29ubm7viqgDiug8l3GaxLTpemR6ffX66voWiDepfb5avyN2eX232g6YS8rWZcgO25ut3GyH/ThP8zTPuUzjMJQiMbRMBL5TARM0BUeEsFgsAaF73fYx1KUeAjgQEajg407/MOx+/V/+8j/9+V9LFnQj8iZyv+qa2Ay2L1gQiGPk1AAFUd/txlxeIUAWAXCOITATEi2aBDGkaG5DHsZpdMu0YD8xhTZx08QYYoohhZBCiO5QiiBy2/QxREIgcuYeyYCsanOIesimimZY53CFHdRI0lQeKvnMxWYxoAQcgInJuoj9KnZt2zSBAMCAwFR1nudhKLsxZ7Wm69KqX7WhaygFbPq+P7mcZ/32q2+3N7egqsbTDIVBSlDDWUjUxjnPeRad56yzBY8sFJCZGNs+ri/Pur47P1+fbPpu3aa2wRTMXAwZiDEgowEIQGKm4DGEBZ5wfxxomoqlmBQrxU3NZAnliGsxFhjB3ECyZchAINJ1IZLHAAEgIFT+bx0SAGBIYj6LmwGTMyAHIEYVL+rV7ykyL0KdBIs7M4Kbq/rRgfKg4ISLTF+1c3QzscqqP5wHVrQ5BnIGAUfwEDmGmBIhaINZoVQ6gFhRzWIFGFer1cXp6W4cxWQ3mtpc7WgIyQDnnGOktqMnl6e//Plnn//04/W6F9WcS5Esrk6A5u6a59mF+7QKlFJkRArBgTRrLmoixVwFUJ0McBYbhlzMAlMgTkSb/hG+N3AIEJja1DYdE6vaXIqadau+W3VqCgSOjoQhBgWw6gxvJmZAIK53w/Dy6s1cpAXax2aiEAxInMRMnBTHInfDWHb73TgM0zjOo1l2gMCxSV2ebRzm/XaQImAHYNWiZbyIpgSiGAITEZibExE6CdND09Gc55fff/vtV18ETl3TrFZd6rvNepOYu7ab85znQkRd17Vt27YtE6qplDJNGcBjk1IIITAIzqaIGFIgJicPAdo2InSE2MaEa0SkUFsSTCGEmBIA5lyIQteuCCnPGcHaJoZIFNFRi+QimoqZgRuWIvM0zdMsrgf0UTm2+sxszmWcS0jRoWa73LTctiFEZqIFrABOlFVtLnmap6KlCSGmkDoOkQCVIvWbPnW42e7cTebCiMaBAoeQ0CyXUlSMWImm2QSw2ayatu1XKTVE7CHGputWq/7sbN33bYhEzM4osiC/q7KLeR0pVM/kIAd/n8WoAhR39yI1w69OiLjQ6arpOULVhA3uRWUcJ1XruIXGmaC64tJi5lPJAVgd3WyRksFAiIxolL2iGxexTmIPEVJYSq/uoOZEbuZac+VFKAYAwPwg0lO7A/7oROpqEJEcsWhBNCRM6IkdgnfsRkpYvJYUEThQ13VPn1xKcSMKiV/f8N0djGZaCiKBu4nEwJfnJ++/9+zjD997551LZhymYZ4nNSEGChQBXVVcUBUkulYtEQ7Bo2IMWtRVzbSogQIBRwMbVKa5IGAXInftoy4/QDAHJu66ru9XMSREFHMM4fTy4vRss9/vV6d350/Prncjb2ccJ8iAjJWRMs6jgxd3BRKjLDrMw4gsITmSgmdwANi5b4uUOW/ncZv367wP7F3XMXLf9nnY3Vy9efPqdZ4mBmOEgBjqzu9U6y2RqIuBmNDEQLmmZIGacN/tc7U8jWUeY8dNCOt+dX5yenl2vlp1QD6Ow9XrK9WyWq1W6/V6vSamPOc8zd1UAKBpUkohBFaVcdyJCrrE0HTrFTDNU8lzKRslpya2CCwiZuIoFCilSMRqzpy6rtcsb66uSxbm2KQUuwhss0yNqjsCEAFNU767uVGVksVNpGSV+8mvZuM87cehdabgTBgjNy1XwJY5MdfqtVMQIDWbpYxSSqRGzPdT2e1lmvb9avC0apouNmF1ujKpDXaOKfVdJ6rbuzvIOWET5mhkDdH52cXZ+enpxbppWF3cDYlC5LaLISJVtGWKIZpqIZRaVjRVEzFQKfM4DblMD1SJXNVLNhF1RzUzXWwXF9rh4RYSATFFhI4gzzAMBbI3gSNR0xIzMRpB9XBFdVRb5FMiMzGlSMyV7w9JMTPGgGZVXs4peohITu6gamgAjOag6mpuh5LvfVGpAk3wUUPZ3Ma5DHPhGBhhFgUX1xJBqQ0NQx/Qg5sXdSDyyAwYI7d9XK9Xp6vNZrNZdS+6lyndEI/D4G4igg6n6/6TD9//yScfPXt20XZplnk/zcM0IOJq1aeYEkMCw8Kg5rlk4kgcAxoAIqYY1cGzmGiW4lB7IkjB0QAViCARR3qE8glMIYXYtX3f9k1qYowxpq5fXT55cn5xmu5u9uP44UcfZMHvvrtahAAYU0xM7K5qXvtMaiDFi4qiGQRjMjN1N6DBbScu83Q37Lf77enYr/pmteoTNym1WW5fXb15dfV6LnPtS1ST3oVjXVdjAgrLgF8qxVhl4B5ifJCJYghtSl3X9l3f9X3bdl3XcSQi3G53OVutxDgYIoXA0DSEBA4pxaaJqYlS8lz2VkRVgTyWyBAZIYUYKQWOXeqJgoiqZrHR0SvviDiGkFJqJvCq34KEHEII0chAZkCMIYQQAyfmMOz3BwjT2ywFotC1m836vG1XMTXI3KSw6VIbOZLV9k5NiGKE9fqk5oXzLClGAnDRkjOwA3fMbdOtCeMqZxUDQOLQNKnve1VNfZdLCSEUybe3t8Th6ZPLs/PTk9MVRyxlFsm1JRZjZCKAynGMDsCUTAwATXXOUoqaWsFghm3TH3N+M6j0DiRDtLrLVghj3ZhxQXQiMdbWLaKH7KIqAmPWvqk8UFpwGY6qUBTMHcGo2n9FioE4EDqCwKzIiqwE4ISMDMROAQKiGyBi5U+SARKQoZHXT2YOWgHrB8raIwi5Q1HNReYi0UnUASCAmRmBhwCbPgYyNSsGpWrLGCBz6BkdiSi1qem71Xr9uu9vrq/3+22eZ2jsnScXP/30w5989MGT03UXSaW4a0Bk4ibEJsZg5jGErtGiAOSi0zBl4so0qBoNEV3IIhoBRlBn2nTcRiLghkJiRHqkFhXapm1S0zfd2HZdUwlj6/PT86dPnl08OW/aBIhqMYZOBHIuCIgEq7brmrYJsYiSocvCTCEF4sp684o2MPfRbS9apul2e3t3e7O/6LsmbFbrJvUU0pDzi+vX39+8Hm3WCEIGC0QerNJ7EZhAGYFQCERNARWluKjfh5fMvF5vTk/P1/16tV63XcshmsOcBYuN4yxFc1bTYZ7K9m6X2qZru8C8yDCaqjISIbEZlqI5Zxum7W6suT1zDBQhwOxTiCnGSBxlmkVyziWEuOoDuOV5GodhmkYVdejrzldyGXeDmrZdxxQ4ceBQ6cvEHOtcSnycME1q33v+iZil1KQmxRSbFJtATIBm8zRt77ZzmQm5bbu+P39yaePzOecionkuw34qpbj7ar1+9913V6teVSWXOc+I1Pd906QUg5qeXQ5u1jSNuw/jyMynp6dd34aAojJPYy7ZTBGhLr0H6wgk5tN1qIebz7NM0zxPcy5ZpLzz7MMUm+POL5OVSUKjHA56jId/iABrpo1INZIFR7eExmDZKYtNhUTRnIix2iyXAkUc3AJjYI9EgSsBBCmQC7ABVQkuRGamgE4G6BQWiCcr1sIeADKBExiwA1QgsqqZgixG1G8dqGbzNGskD8wckIyYCT1FOtt0m1VjjpPodpiGcS5lzrOUomBwdrJqV+3Zxfk777zz4sXlt998/fVXX0y7XdfGT95/95efffrTj9477+IKDSJB7G21Io4hJkLAPAPTqu/MvChMuezu9kWFQq3+R0Bg0MROkRQAQRNDv0oUY2paowFxrQABAABJREFUMx/3k2JxtPtqPytEoI5jH5pVSDsK0TEhr1J7ulozqM1ldzJ91zQ2zdN2l6eZiAqjzzk5MlAQw7nIMEEu5mYhmLESFRMzCE4FfMy53G231zfbN2/uTpp1CKcXXSAap+nNzc13L168uHo1zKOYgKKhVtNKc4UFww9pTkw05klLRnDVkkeZ5+mBOQQGjjHGEAMRidk0zwbGewKXeZ6HYSylFBTCDIgpNbkvMQY1BXMiiDFOeVLV7XY/7MdcipoiThxiE5uqSsxcd76m7RpwH6ehlFnNQogll8BBzcZh3O22bhBDcjfOVKTsdjtzk6KaVbLlXMYpZ1Ex0yol8MC0o237Tz78fLM640AhhibFGDgwuWmZ85s3b27e5GmQtm1jSDHG0IaTNZai4ziN45zi7OYxpm7Vn55suq4lJi0yjiMxn5+dpSaqior0vYB7iAEc5lKYeb1ac6CSM6Gk1Wm1qFCVec4i4lZnU+ib/vTstO/7lBI45KxzzvM4Fynu9uTy3abplslvLnOWSYhmBMfFeQEeJNZLYI2AtIiEW3BjcHQopeTs8wwJ1LiAe8lYCpgCoWFwdSRQ9ODKYLXSigEwAIKhFJfipZgKaOPrziMtNI4qCrHUcWmRiECosB8DcwQBL2DyMO2vGkQiAkjEVJvqWW0qwkiBgYjVEdWklHmaZJ69KKgyckwxUUpt03bNqot9itF13N1uuvTZB+98+Oz82aZbB2+hEAciBg4GrHU1KpnAqtYNs6sauJlobW0SATFGBiY2JqvuB2gcgRNyxFls71mqGPhx5/dhwhBaxzWFFXJSt92odzsYS3I8Td1AUe6G629ffvv7P3z94nW9cePt9Wmi9NmHRJxmhd0o4yAAGaCozHOZgTMUBCSICjZBwVdvxlfX+xevr9k3jk/Sqii+2Y3ffffNt998/erFC50AFERmYQjCRGD1HAtMQVyUGUueVRwBpECeYL/dqughH/NcZJ5LTMJ5Kir7PS2tmUNJHRFjCEzs4HmW/X4AcBFx1ypByCGYWSm52svXqjKSjjC712c6EYcQU4qIWEp2t4POGC/6JCLzNCNRkZJiZQSaqILbfjcwcwipthumOatpzvMw7PfDcIT39u3q80//dH5/cjBErwVm5iAl7+62efhi2n+7u/UUOuNmnDVE6vo+EKAxY1z3mxhiv1o1KSEjU+jb3pIh7GOIF2fPQuC77dZ17hpyM1ERUdQAwFpamW27GwOHy8uLft0Twn7Yfffdd7vbuyIaA52edv3Z03eefnB2dhZjIqSaydf1y92btuva/jD5TfJYJuAwoesiy1gzHSJYjJQRENzAFBAMHNAhAJKbFcwTD2A0CkMBVRFwQ2ImBkUVcmYKgULDseHUMlIISlERso07uxvFzdcNn6/Qz2jdAxIQgFUolFmVx4aDyEhtCbB78LnYBJYf6BFCYGIkcHEDNDXF0dQUGEwaboKjYxa7248vX12N01RZti1XMepZRUw8BX736ZNNm87akMdtH+H504uLlhobo2r0FBABQIpJtmGWUgp6iYzetTFGIm6auF51sUlQVyswBKrbHgCqqmQ1c9Aiow7DvBfZjTNhZw8Rfs+ev9et+7Pzy6Zpf/L5LzenF6r67N1nJ2dPuu4UwC5mfP58/OlP9ze347vfv3DEQNz37Qfvv/snv/xlYGY6+eCDn7y53WHJTwjeDeH90KwRBRQdE4anZkFGf3L68198/uEHz8/O15vNk7Y749S2DT25ePYvfvUv+/bEq5APIhOFwEi4yJ8VIaKuaZhBRcwUwEykjPpnf/a/Wq839Uy6rv3JTz4moq7vUgwA6FWKsNqyAzAhM0cOxHzQAjMzFRVYEkGvFxKqx+fhAMD6SrkUM6PaNg6MiLXjzos63LJ1uLuaEWGMkZmrYzgiIVTFcUQicyxiqmruUvIwDp999lHXtfVcYoiX50/cKwZtSXgJg2hOvB529uEHd2cn27Pzs5RSKZkD9/0KAYdhVBFiTrHp+j4wF5UQqG87BxjHMYRweXFBRG2zLSWHwOaecxZVMEPilBpVTWmTAj959nS9XhHhfr8HS317J6IxxJPT0ydPLi8v3js9OVlMrevJ122o1l8ObLi02rzzy/8WCZp2xSFC9cBdxEmRsAqOICyUvsXubp2x2eOogTm0gfpgLSmZgC3LcgiBCLxyHBCIMCQOiVJiYjagk4zdzk73ejuoqncxnPZ0eYqrDpnrho+VP2W1zneY/MsKYC5F5v1w+ZM/S/36cF/44qyvpu9IEFJwBFELBFUaPAZAB1Br3Tcnq7ZLbYopMBM6ojgVw2IIzG1KKpe7d840Dwn9dN0+vTzvV31qQkiRQgJkEiBTEmNgBORAmBLFiMwp4SqEZpFhMnNFoirCA4iqqlXMA0AUWN0pOMGmbx6a9uD1y5fMHFMy02G3zzm7e2rierNOKQFAKWUYxt1+v73bzTkvvAWmtmnW6x4Rh/00zcvoiQjVy5UPCDwCLOCDG4TQr7umaULkFGPTNEhcVHf74fr6ZpzGg4nTgaxf5xG4mx/7ugdO3sLrXW8273/wYdf1ADDnfPX6eppmIlo8/KrR+dLIXcLLY+3Wl1d70OR5fFQcwUHkpaJpzd/6lQPUhhU8eNX7EPFRvQgfv80C+10UiLTt2ieXF01dyx8+bSGbQV3OamN/v9+LSIxVttmwMhod9IDRQqSFAVH56cwAoKqIGGNExBrFIN7jqCrahZAcXEWRMMbEgRBQVXOeRbT+SQghxtg0iUNtyxyQMlXY8DEXRqZh+/LrPGyr/fXhXA6ntNzv5cGjk5c6ZEU7iCRWbz+8J2UeOCoIuBTnEGmJ1OqPZlAURL2y6xkxMETG+inweAvgwBS7/3K48uZmGrv16sn7oekAQNWmWUR0Kbdi1bhbWji8PADmrmal1DjxKDeFfuSlVfspd5XiZrjoG8cQiPBA5K4Wmot6XLXxPmpYY4U2HS5GxS4saMUDY+q+fWkOlQMfmNsU+FBaQvcfGff/fPzz8c/H/+IP+vuf8s/HPx//fPwv8Qj5IY0UjuEU3Lee8e0vD4I6fPu7t8jPbz39UVT1g8cf/fIHL3R45luBytGyJ5fx+ub7ad4dZD3ffgVzcHOtkkMOC6izCuYwPpY5+GPv/k9zuJtb26wuzp6n2AJAEb3dzbkIEb31UfDR1b+/LXAM/A7POYiWvP3Mh2nOsc5+fIXaeVte5sHLOwD8sTHx1tMciChFrp99GIYvv/xyt9vdv+LDUfb2lfDjbw/5Cz66McuP9489xES/9TrVBKk2UI+nVHMiAFc78CT+yMeoX1er1ccff9z3PQC4q+pomtVcxEqRUqTkInKIwd0PLYOjfHrN72qR6YAdfnAv3A/yIX64TweZuiN+1dwfeLrg8VyXMP/+hi2vtuja3982JwRmXK3Wl5fPUrOUlsLV9vb+ftTi9vLPcjcXGz6i+2GEP7wpx0l4GD/HgQWL8sHxkzwcR/go5/IfvNrjWwIHd8DDeTFgwOXJ1zff/d//zf/ti6//c+SWOByKylVgG8xdzPNsw+jzZLmoqSFIiNC0oe2462IIuHic22P18H/io3aFPvngT/8P/7v/67tPPwWA2938b//TF9+93vVNCEzLlKgT+dApq6jYI0AIYVE65QMmzdzqPKQD633pqBtUA0oErJhpPCBtiIAAeHmdQ4pZL74fSw81vQY4GF1gZdf5MvrUvO/SsyebNkUA+OKLL/71v/7X//k//2cACCGYGdbeO1Um1T1r6/jj8VdEFEI4mjUBIPPCvagriy9QkkpgPqbwbmYiUkqpNY76dnXahxBS07jbMEzzPJc816IJLmh+g8OqISIA8Ktf/epf/at/9ctf/hIAVIf99re73cthkNvb8erq9vXrm1cvr+/uhpxV1HURhQEOGBOHgMxk7qWIFJXi7sBc9xt3B1WU4iWbqVl1pQxVOBZC4BgDEZp5KWUaJ1FFoAOhH6ruOEdixiPfwc1zllIkZ1U5UJZQmwQnK/rVn/7L/+P/6f/y/L2P68ALY56hLjqIiFiHDjHhcbAh4gIlX1a0g1D/o9WbDjU1gOrBBAcHlIfVsEf1FLiPC+4X2vumyg+OWoldzE8P0L9wWCamef/F13/xl3/zbxOvmKM7uOOyiKA5uLrnjPs9DnsfBy25OMwhaNNwvw6bk6ZtmUIVJKxIw/9Kh0qe8lhPoT4yZ/nm5fb3X7/Z9E0KVRZ9KQMdrm2deAchqsO2HwIzEVf58MO+d0So1p2hbnh1xIeFzVT1AoEIAmEgrlhq4nts66I2cxwDB3weUZ3/iwYq+GJxqwcy3Ha7/Xf/7t/9xV/8xf8/lwiX/QiZOYR4XDsOQs8IAFZpJ4eZX/uOVP2mQ6xbYV0L+tXKzO62u2G/lzL/3W+93+/v7u7q90XmN2++/v773716tXv5avvyxe2rlzcvX97c3Q05W1EzreU84AAxcgjIAc0glyK5UreQuLblzR1UqKqYaJ38ABQWo6cQOMZIBG6QSx7HSUUAaIkjHk5+IqAFNGXukqUUzVlVHZzAHVBT1M3KAeF/+9//n4+ntiiuVALDg1gKHHwpLNJx/Bz380O08YNAa7lVfiymPipxP37OH7nNh6e+9bg//PfB2x5DBCKOsW/SSaAVQJBFRREJjViYLQZKKUbuGqYIOuIkunefStY8sa8SUUwRiBB1MQz640PiH/NQCg4YY3OvrIzISKHiVpmIoAqb1c0YoDbUajfhwTVBrLIwgREBnbGi049BmkONCKto+AKkrStLXTuYKSz0XOTDSnOortevx+D5sPzjEjUcdgOkZSAup8LMNWZ+eNT9HI5WZQ92/ocjDgDqNzWoqWCKY8W7Pp+ZOARCUtOKNVDVCnCMMfZ9f3Z2HkIYhqGUYmYppc3mxN2bptk2ab/bqUq1g8MDTKMex7D/qE2Qs3z34vqv/+arP/z2xXff323v8n4vOVvO5M6HiL8qeIAYYLZqkqMaTKEy1VUN1bziU4zdkQIhQ9WwgMXNxMRwLssFNwvmnVdnMHesai3ggJXuC67uALVZ7tAAQ2wh+gG/AI4+78dhN+gjMY+HGd2PH77gyA8/OQDU/eYfGhofn/9PMau8OoBqEk1moYiK1LjXOXjb0apvU1p5WY97amO5vQ27fZnmXCYlgpLJNTAS85Ks/Feb/AAQWJjCsT6CsDirV3OYEJCJ6sSmY3aFx4amLX06AMKDlckhMT7s0nWqwOJSdAi5jm2+KsFcN9iDajAunciHt+vQW6r30B3oUS736O3qUSc/HYBQx3bjH5v8tPgy4kFcedmAKn0iBA4cakjiAKZGRDFGrrALtZznOWeaJgBo2ma9Wq/Xa2aqWKzqQ3H8wESUUnIPlZwBh8lfSsk5z/MMALUzWs9F1ba74fXr22++efXdd9tpRBFCjA68ZIzHy7R0T1G1XkSsau7LPguLQAh6tTdiqJWfxWTHKjkEHA8dVDqyXI8b3iFlrrdvKWkBHOK1wx2v2bIJSJlE8OGuGg737NFRHzlu1DWAPF4CxMVMp0Id4UFh6fiE49dHgf6jleYHv/7B3/7w8MUb6ADFeTAu3aHMOOzBqw5sMVVzdyLhkJnTerO6OD8L2I8D9KuRwjQX3w06TcXB58lLQXcmxAXr8V9t7h/zosM7IiITBl5CcV629LrgHrBQUCNhV8X7Sen3a/WDa3hMaBeubR2XXkdiLX2isy84JV+aw0sKfSwaQRXghoOTCtX3elTPAThAIR68d9d1dbY/nOTH3xLdE2cfDsD6hKWQcfg/xtR1XUqJQ3DzXLKZB+aYUtO1bj7st7gfVM1MAECkjOMeEedpmvNce+/DMIrIftiXnGtZoUYVx4kEh4rV4VyOZ+O1ZAIOVZgJmQkDIDiYocEDskldu46XFADA9P5yAsP9ONeKF6gCAzXUrwH0g8pjvV3Hr4e/reuBIQMuQ4MOFR9wqimjEzGBBaZHOgvh7x2ZhzXs0eQnIodHc+/vPn6kovqPfZhBzjaNZmoinguqYy2DBcVc0AyJICVA9HmW3V6bW8M7zVlMfdhr13vXYWD8r1ru+7Gj2kU3kWJA5hoGHyc/AMDSpCA6Vqke1bqPW/OPHg+GtcMhqsNHf2BLTbk+yY9fl22gfkgif/ReP/6edXetgLEak8NhUP1wia8SCcwHG8iqPBuYliQfmUPX9ZvNyWq9DiGICrhzCE3TrtZrU725eXP1+urlyxf7YWfmOecai5RScslStO7tVXSAiGKKMcQYIzG5Q7VlqIBOPCCg7i+sgRSVbK7VkjvWRlHdp7EG/A86KbAE6UuWe1ABXypiAEi8LDIObmpVjwwAnOvOuATzh1orVirSsjwt1xwADo5x/LC85stuu8iTIDO+ZYAd3prYb/VLHhAz7/kzi5yeudNRP/RRLe+HwCF/GB4gOAC5uy3Rz8NB4A/gVvh2n+v+eLxTLn8oRXMWExeFYuRQRxEDQi5w/WYigM1qRgSHITZDt5KUvGSZxbout03qOg7McYGieS0R/bHP8E93IGKTqGtCk0JgAoda8CNyB4JqBYY1La+2h1Y1sAFr3xAdyI+GmbhoZjogEJHTIbZckLbEeAgV8VBBv18M6lbu6OCVZ7nYq7ujO7mDI9jShXBYOPb3EHJErOS/+uPituBuZnwUY1n2ESSmOtMR0c2RKFbmHCMAqjkgtm13+eTJ8+fPz87OmDnGGFPTtu1qtSqlvHz54m//9g+qmnMep7GUou6EqCIL/JEoxtgSdX1fayKH3gYCgIrihABuZohUG4LHwW3q06jTqCaMHgmDHWrKy2XA+0Vz0f+qE5CWesjyiEPdjQOHBYjpli0vf7CEC+DL5Tz2BvEHo9GPUSoCHGA7WKu5NRMGBzcDUCJ/vPEfdv6HYRj82ML81qLwcC24H7E/No4f7vn/86fRAdP6D5h4iEAsxMVMwLniW9uGIzMhEup+K6DDuJtDAPesOofgTUsx0iyYi42j7LYZgdsGYkR661L9VzwIITKlSDEQES0G8oR8UCtGxGppslRdHaouTV017aA5+5bsPBKSIzhVapcvEmnE97oCy2EPbpaBgd+TW/ExMuytKON+p3r4nMNwOuZ0h7rdo3FVRXfoMPkNaquPY4ohRSI2p9OTs3efP//0008/+fSTp0+epKZJMYUYm6Zpm3bO+enTy7ZtpnEE8Ks313meQgxeexWqGCjG2HVdCBEWh/WlPllroWXORQTLfdr8MDVVtd1+3u1yLmBOABVdbHWFXHa2RzXuZYIsc//+EoI7EXCgWNmiZmqGDmhWscPL9D9qHVed4of36GHb/+F7+eLn8Ng36YBneXhfwvHq133+rU27dmUf5uEPV4f7jODxPH0cgv5IWr88/mBtOT7+ww7CfVRy+PrWN/Ug9q6X1SZPU6aCAbhJeHLaNJGssEkp+/Jml29QKFiINfbAtosnZ90QgYinWW7vRBTXPfddbFsKkQ45QDUJfRhw/BOmBksX7Qj24Jr6LriR+u545DsYYj2Zg708LJcdDw0yqAaZ6FhnptfGvNfyHoVQ15HDbVx2ncPoq2XoJdSsDCY6EBvuJ/7y7Dom7NFNrMWzWjOvM79tWyISKaqyvMZBrLFOf0QEt+pF1TTNar1uuz6m7tnTZ59//rOf/vSnP/nJZ0+eXNa2Xx2izJTncrLpUwxm2q9XX3/zzfZuS4Q5z9u7u5JzCKFt2/V6E2LQUhyQY6w9D5FScjHzumdW5MBb90VUb++G69vdMIuYQbVjdq2X517muy4D952Rwzrih1jAAQCqo1YITYjR3BTIEUvJZgKIAAaIeAjBDq8GcD+njhcZD37xS4KGy9ZN9SFHQySisIyhw7Hs/A9xTo8+7t95PNz/F0TTgw/1I9P+h8fb8/3hw3/X3zu4Pe7Fh2hnF/r0Hdnvc57QPLQNn59R1zZobZnL9nbabfVum4tKiBQShUTEabVhDmDOAD7N4m4mZOLgoXXieFxt/648+h/9oEO/6yB6sUzPQ3uvVvwAFkAUBnZDUFuqedVVnOBg1Fu7M44Gh/owVAU+RERevGjx2L2zQ6nPF014MF/e/e++BPcO18dHzOZ5rm22Ov/rDGTmaQIAN4DK1FtO6oAtMnMK3Hb9xcXlu8/fu7i47Nebd549++zTTz/6+KMPPvjg/OykAlxqkRwcSpG2aypxZbVanZ6dvnr1ehyGm5ubaRylZKhnBeamqgJAHELd82g5VSV3Xpg2IXtpUjietJnNcx7HXMSrH17ljj7oiSxfj5fpfjYhVEf7+4oLHf1AECmk1NbFr0g2U7+/jv8z5hEdV9+6jkIF2SAsVQTEJcB7+EfLzu8PyzmHZvLyzof4//jgW4UQxAewu2XWvv1xj/HC8beGeChMLo88oK8d1497CMfSkr7/UMsnOP5NSv7suX6UdXsn08CWLYVwuoL1uln1rZvfXO+/f2Hj3w63W1VzjrhaBU7crzC1KOqlaCk+zaa5yGxmrAZ9H2PCqjoN8HDB+Sfc//G+s11NqJaSXL1GdAR1Hp7NdQEgwsWSBSs0AA9QnFo9MQNEqHFlTUMJa+jrhItyNlG9mcsy4nBoHwAuVcYlAIXDDvGo6PMgIDzkyWbjONZUHx7s/CGEOtXzPJkdOl+1TFU/E0Bg3pyefvjRx//iX/zLjz76+OT05Ozs9PLi4uzsdLM5iSkduJXL6DpiHJsmnZ2fPrk8//Krr7/88utxHE1lGgZAmCYexz0BSinMoetXIUYAV5GSs8wTWkmEoYkMNs6+WXWBH00H94o1O+Dg6vx6UOP4wQLpy559SAwA6swHdc0iasAxNk2b2hRynOdxnicRB60JPz1+pftRAg9+Wso4sNTp6mpA1REREcHoByt3eGvaH7/SD577VhXg8baPaMv6hoeV7T4jeFzSe+uq/Oj3jyOI+wd9qWH4YvIGdJAkAg5wemrvvGsnJzCPKAPIrDbN03ZKSF0X3nknAKXvX5K9tt1OgZ2Ie4ociAORuiOWav+p6qpIuTZcOgghAj0spT64if9EB90LCiwo5T/+zDpCiCrn+DD5jyJ5dQ9YRkrVN/HD7UV/PIIqRg0NoPpY1By+Rht0+O+Hx1t36tGv3Cvk5pj51858SmkZLgg4z0XejrEdIMR0dnbx/vsffP7zn//ks89OTjd937VNk1KTYqrNOVj2BTAHImDmEEKTUts1XZu6vk0xgts87MFsv99KnmepYlAWKKBpSIlwAQailkjOkRGpyiF2TXq0Fx7EcI5bulVnOXO4Lw/jW+WMt4/DUDJ3qJ667EgcQiIOxGxLHU8MsTYJjtn58vXwI9aA/wd1FjgMIXc0tLfwuPV4tPMfv3mYz791Ix9mBA/z/7fihQe38NHrv30JlnNx+NGF4BCwOkBFMNSxuFzpJdRaFkMib1o7OYXz86i5mbb46hv4/e9ud7f7ftU/f7/7xZ+1zxP94QsM3/o8zgpl1VPTEiAjMTgTEGFUMAfLIr6fxas1Q1qtmlQbJegP4hF8vAT8I0cBeBxhsPDG6+NWq/YA4Hj0mSVEd0BCMAB0Pqji4lJ/rnCRBYoLAEu9AJcSQDWGMTdXBUSn4EiwNJYMAZAqm8DvWTLHcXh/9x63tevvf7w8DCGEimANTAORj2Ptri2f1wEcUmwuLi6fv/fe++89f+/5s7ZtYoqETBwWuNPD7cr9QXrsJ7Am9LZJZ5uT8/OzzWr1u9+efPm3v71589pVwC0SIJnLrFYqPDa4MwEErlt1IDSJKdzXfd29hjBYXQ1wIb6gGyHComOyDOsH06T+7YP7CnC8UBVN5o5FjTg0Tc8h1lqbUlZbsAN1V/djkuFLGWYBYeKjyQiHkbPEZUsRB/HxjhXeYjU9DAQevMSPdOPeCgSOT3u7XoD3aCF8nE0cg8UfxgWH6XXMYw4L6sFixQHtR5oBFoL2nVLncwjTLRKWPIrKeHLipk4sIXgMgOAIzgxEi0TEYSoQAANUC0aDSQ6MlQCAKRDx22/5T3r8eGphyx5+BFoshUDH6kGDaHRA3S6D7HBB6zSuf7K8ARIgl5y327t5HCTnEFO7PotN9zD6e2udgzrOzAG9hsB03Fz+vjWw9uFijLH28WKMIvM81wYjOJgZc1ytuqdPn3700UcffPDB5fnZetXFlKgKn/542HXcdSmEANAyYZPiul+1XZtCTJFl3nuex/0dAnV9H2IyR1OVMlWrU8SFRQGIBC6Llcujwb9sdYiA6IdamyNUQPxbn+lIUnr7kz56YhUmNWaPFAJiSp2ZFwAQMVcCjBER0R2q1Y/pAthCMHC3ynZxP7KcDgvQoQrwY8fbO//x+NH8/3j+D785zm4ze4jiWk7r4cn/IJr4o/UjB6hBEcCRVkSHtkwlcqEr4f1iZgrz5PMoTRxDCF2bzs/b9z5YpeClSIh6c73PWsosganvExCvVqFtyR3U3LXOkgrEQMBQ32ca1S27k2narFPCv7fm9Y9zPIy8ForOEqcvv68VuJrXIRLXXcjAsV7/uhIsu/rxpSrBj2jJ9h2QmENs53H8/tuvXr/8fh6HfrV576Ofnl48jbHlcMAQw1JzcYDFw97RzR7jPP2wOT2q9j7cPyrrZhgGd0sp0eK/CkxUZ5+5lWIpde+8++5PfvKzX/7y559+/GHXNarKVidmpfRU6+Nlwh/f6zBuiTks0s/MyJhiIoTt9Zu8H16KEOHzD95fn5yY+bjfv3n9ctrvlx4GYsWvG3ngFB44Di9vdaS2HOxk4UB+/eHYOG7Iy/f3afBxlSFHrNgjZA5FQiCOKbqZiZq5KDF3XRtiRERVLbkUEXdzUzA1FVVD1EWO0HSp1TgdrtMidvTWfDvs/Mc0lpaR9/CG4YMDDv2/h6d3fPzhCcODl1wKAo/bePcxgYPDccm4D+UPykzLDBdRyflY1WBm5PuN2AzzyPOA0kqDhVBS8pNTNkFV5FD227wfJym5afTsPFCI602MiUpxLyYubuCmAF5x1EBoblqKGZhnMGBCgBAXpvo/Sanvjx3HAP4QU9bC+H1Wv+Ch6FFJqM7VOiKOr0J4yCERmcndx/3u+vWLb776/XfffDlP4+n509XJeduviQKHdCh1Li8AhztUreZq/W/ZKhHgwLt89OEfTwlVrSXAlHJF2mvVFAN090Ch67p333335z//+S//5Bcff/TBk8vztkmE98SFh6/8VlPpOGIXxzDEw8aNT95cnp6db05Op3FouuaDTz49uzhX1durqzyPVjK5HyY/OyECapTID2XvAA4YneUkEe4XoKMO3X3I9dZRo9iqg5aQAgCakTmamTuIlGkaQ7iv3QMAEseY2n7dNM0y+UspUkzNrJiIVcsEVVEzVRFBtCUee/AR+ThIDkc4Tr/l01WIANwvVzUTO5Krjk97eLnfevzRbw91AiKi+0LT/fri7qYGcNyyDhcJMXDkw1ogoOM8Xr2+mueZidq2XW82q7ZLtCzMrjgPad4l6VBQyCc1RLJ+nVZ9EPWbm/nuJpsN7draTc8hNk1yB/dcRM1cdZkktcMGbuaoiqqmVmor163ZrAOnUDfUQ2/rcNP/8Y56zXD555D/L5MXYKHTIhOEI8N2EWTHmhECHAujlcNbXdAADcxRERgpcpqG/ffffPW3v/v1l7//9csX34qqqN7dXp+cPUlt5x7rRUH3e+zZEt47ABAaGdLitnLISx8HeG9NfjPLOYuUaaJDzQ9rUKtqq371/vsf/OIXv/xv/7v/7vOf/eTp5XnbcEohxBhCXPKuw+vVYtDD/O/B7ovuTORgtcipZuoAsesun797dn72yU9/en55nuf5ZdO8efVq3u+xCKEjMyAZoCCiSZPCMexHcHIjN3D1JeQhWPRL8cA5eRQoLHs+HRJdICJOqT05OYsxiUjOMs1FRMDNZNrPAxKmFMFNpDg4h5Darltturat1J+kqiKqRaRoKS6iZlqpiKqci4i4aVWc9KoxixiJwkO65Vs5P/oSlxw6Q/bW/n+4uI/zlR8854eP+6Hw8zA6cHdRAYcK4Sa+X0EcUcw0Tyqacx7naRyG65vrF9+/GMYhELd9e3py9u7Tp5+89+Gq6wDAjPIY532UjXkjyDMxcLSoGlMLBdA5YOr7JnbOqSfuEZMUMxdRFbEQwCLoAa7gTtW6xdxNfJqMsBAwo4ITRyReYJVvlyv/cY9Ky72f/EvkWT3n+S1o3vLJYQGcLNceHoB/wWq7ntgd53G6evH9X//l//S73/zlN1/9fr+/S00n508cDAmOATzWBgDAUj1YtlIHQEZffJwAl2Kj/z1Xo46EivGvkz9UzyNEB2i79vnz55999ulPP/vkg/febZsK7a9k3qq3K2KiaggQqmDQoRJTm3B2CByXMjRzCCGl0LZN2/ebs7MTPLl4cvHk2dOT05NxGLc3dxwTIDuq+YKbcFjQDQ99O3BRE6H7gufxljjWzhw9DvXtuDcggCOH0LX9Zr3ZnJwGCsO4N/XAWiuIZjpOs6qUWR1sEWJOTdu2TdPG1C5qPuhuJlJEsszZiqiZiLAIiwQWFdGDooGbmRYGD0HDY2Go8GiNPi6oP1bnhz+yHMCD6ODRoK3CzEyVfVxVk6pYNbgTURGZxgEA+9Wq67pq7epqNXqaS9ne3V5f31xdvb568+bq6urNm6vXV1fjOBFijLHru1/9/Bfn//uTOvndsExhHoLmGU1TJ9ph6iRPebstbk3fpRibvqxmQ4NkHhAh52JWzAwBmSBGKAWkuFSwpSMwoQEiqeE0OGhxm1S8X4fUIgdHPAY8/3g7/9JjN0C+3/wPvQ8iAIRq1sfHtbhKYh8iPTtCdBEf+DQsWzYSpaYt83x99fq3v/nL/+Hf/j9/9zf/Zc5j27bvvrtar9YnJ6f9akVEZksvUBX8KJ2xzHY/IgjqXlHDj2M75K2j9i0fEnhrReiQEboTcqB+tXr69Ok7z56dbPo21bTAD5fAVWS32w37/ThOzHR6dtr1q5SQmBzA1ERyJecQcYyJmIlTamCDcH5xdvn0cswzE5ycrLu2RQAVy0Vm0WkR+jUyB0RzzKUMY97P5SDaDEiYUptSO04GiodyNeCC710CsGVtqBWZwwJcf9827cXFxWZz1radZKnROhOklKoIdTOO4zgOwz6LIEJIqevatu2YAgBWghcHAgQzkRwzB8lFRYmFSJgFE4qKlKKlmJmrmQV0jMGIw8OdIrgessHlrhIgGNmBTLB8rcL2x7z/MOAQDqdKuHgGwmFXN7X6IUoRkSJF1FRqhO2GSKWUcb8nhPVms16v+tWKQ1DVYiZmd7vdy++/f/ny5cuXL16/vnpzfX13d7vbL+LiAMhE5PDf/6//N/VMzCDPPM9BsrsagTUpbzbkosOuuEvfbihszvydKYfdoNM8i01FCxHEiEAhRExC86zjUCy7WZ1jDBUb5WBiedY9Zjc3iK1RaoD5UJeAY1njhxvfP7hGcIywaKl7wPGSL1C/g9SZ+z32CA8Q8HrTDu1ReMCNPxTeVO5urv7m13/xl//pf/ztb/7q22+/JoKLi0sEjKltmi6mdgHPAVSkiB1RbEs5HBCOhB441BH+Aed5P6AOGymH0DTNerVar/q2WQS4QgjHPpGU+e7mzdXrq5s3tykldI8hxBjrOVf5+f0wjNMUYrw4P+/7PgQOIRL5ZrO5fPJkLoJgXZsWr6EiwzjvhuluGF3EK/cT0N3nXHb7YTtOch8dV6Ac42HFOx73zZNlza1JPx5+48wUY1qt1pvN6Wq1AkABgco6DSGmJQk1M5Wydzezyjds275pWiI+wLuWTCmEQICEXFhcVERKyCqKtZ9CWSkcfOAJzZkV6RGLN4DW0VqJY+TgTgCH6iAdmnEV0EQOfOApA9YyHgU8yAzYUWcdVWyep7vb29evXt/d3AzDkOdiSz1C1dUdVUoZJ3bvu7Q+WZ1cnMauLQ6jyG6crt68+farr1+9enV3e7vfD3PJogoO9WxKljzPHz5/P+eK2QR3nApNOZYSVdTKmII/uWj6Vq9vh2Jz12HXXLTx02lYf/v91Wt9PY3jXOaiGQja2EAXRDEMWbUUERUAx8CBmWNkBJCcVXQuRXYla2hH7npu2tCkGGNt/MIRC3+Y7YR1Yj7qg/19E2TZ7Q8A3weTrZYjkGp5DHTpAAOhMx103Q/lW6uJ3FLCWHj41e1puLv+6g+//v/8m//HX/35v79+8woAzEDNFNCBASMCV3QBLFVDW1r6iAzIS4PBD30AhIOwX61p/fCcHrSga6j0aDdxt1oJ4xBwqQQ7MzdtG1NDyDV9KXm+vnr1zd9++d03L/vVat13m826X6/qW5jqzc3N9y9evbi67trm8599EgMRNoEZKXbt6snlpYjO8xgYY+ScZZ7zdjdc39xdXd+aHaB7AO6ec97tx7vd/uik5AeLbgM8gOfBzREcqxpKDZCPZ7dIdwAipdiuN5uTk9O27RFpznORwiG0dRozV3m15e4TxRCapu26ruu6pmkAq98AmrlMBQlCZKKQmhBjdVTTPOcixVWlqBsQUp1xUNDNHLMj+4NbEyrx0I/EgIr/rsHMAXrsFRd2LD/6UlWuPyzloEPdtaKtSinb3e765ub6zZvrN9e73Xbcj3Oec85SREzVwUVxnqkURk1d6M82oe+EeRS9GaY3N9evvntxd3M7T5OIACEShxDAq57hPOyH3W53vDHmkAXGzOOUplHaOMdeVp2mZB7LrMBciAp47ZJancY5FzUBIGJgphDQjecR8uySDZwCYWCOAZd2F6KJF3Uba+rpuYA0mBLEiCEczCzhrc3+//edHx7klXjUzFwUdKqrbC2yLp2/+gd4/zpARMdCLhFFDtv99ovf/vVf/fn/+Jv/8ufffP2FmTcpnZyePn//ow8/+dk7733U9mvEULfze3jJIhOCdE/jeHzca+T8A070MNnqi1OIseu61apvuz61XWoaDulBsOqH+pGairmJlP1+H2JDhNM4TtO83+2urq76rp3Hd1wFPAIwAMYYNpvNOI67vQN4jLEUGcdxt9vth2Ga5nrF64V1hyJFRETlvrcNIGIiZvdwR7vP/X/07B2JuWmarlv1/TqENM95mqacs6khUQqhih3knCsCQkTwwIBo2y7Ghjk6EGBFX3txQXSsiiqIEJAAVI04cC5aCpMQkaqZmUgGRBUBT0DhYcASajpjcBR/qU7KwNUAGxcGF1UWOUCoF9LREbX+ygDBjdCpMpNBzIY83dzdbYfBmVLfJdOplHF3t9/vpRRRFUASjXOBYT/uborOnsibCE2Xkba57Kd5HoaSBd0DV7dxIiaTRYqqIs2PJ+MOxXDMdLdPqy233DTsTSpdY9DQWHiYYLfd7q7+5u6m2W7Lbj+M01TyUjoyLYRIyDF421CZaR7EHQJCQEdVIOfgwGQaTEDVpmxiOs82Bk2Jmya2bWhajgmJj0C1mvr9g8sBx97rsaF8+BkP3x0kn2vGDa4Ooda+ax/CDRyQCA9gSEQgpBj47ubqf/h//7/+47//Ny9ffKvmhHB+cfknf/rf/OJXf/bZz//s6bsfpXaFTFCFPg9C0H4cI0vHCpHQDwW/KigKgHDApt6fy6HLvUCQl/x4kQyrMXb1pTHHGNP55fnTd56dnT9ZrU/hmBW4g3uIzfnFpRRNbZtiPD07MdcXL146QNskN0PwJoaIQOC8IOjryyuApxjappnnAORNm8Zp2u/32+1WJTNBE6uO03L9A7lLbuO9BIabF7Es7n5sOi5ESzSosAJcyphLcd/dU2hPNmf9as0c5qlc7d6UkkOgpmmqKlHTJLNFYmQYhmkazS3Ftm26tukCMxG5cxUvRcJgAmC1hWD1o4RAoS4GXIgtaNO0VQtwniYzNy/gEfGepAQAIasCgIE5IboRUgImAK+TnxwMK6fLAAFca08H0YkkICJGIEQSRUXwSpcqMo1DLhkR275FAnWd5gEIzNXdvDpwirAUm+dptxvykMk1BEhNQdoVnVRrRThSNVlmrCyoY73b/7/s/Ve7JEeSJQgKUVUzc3JJMCAAJCna1dU1PbO9L/v/H+fbnd7palJViawkIMEudWJmSkRkH1TNr18AWZ21PTvdu1v2BS4i3K8TM1MicuTIOXiOsVn1eMp4PMKh47Xve68+jMFpP3gjdzzaeDze3af9g8uJUyo5TpJzZWeTCZkwYiBc9V6yxlEzgGNgUgCplS4mYmIlwqymCgAipmoiNaOxItIJ+oDslmH7VCb/503+n2yLXIbUszc9xdq2lB4aUl1d6JsOLyORSjnsHt5/+7tf/f1//N1vvi4pAYAhOO8url68+eyrr372p9ev3k5xzrkYNJDf7JyicfYN/4+rcdQ1RUXBrLbxDqt16FbVT9Gs6v8AO7+9ukKi9XpNCMO6n6bxm2/ejdO8Xg3rYVgN3Wbdv7q+CF3X9x0tFuwVq+66bjX0MXVq6pwnIhERKUQUvOtCNXFtZ0cIkkPnn0AyAxDVRQSzjT06C7naw7X2dxILHlYXFxfDsC5i1ZW5EYSZnXPclLe1/pvZOfbMLnR91/Xed0S1mFv1HLmaAzcdf7OTSHFtBWHnzMDY0bLmIWIRQVItyj48A/zGMkOrVpuoMtLgOseEUAMAA4PKI8qiZsoLr0MJCyM7t/YdE0XVWEqOKcdYYiazEEJ/sTWyefaieZrDat2p9JpLyoVTNhXHJh5913WM7FnIiSGIerFcpYcRtYYfCgRQV7yqaKdQy9dnsTWCiI1j2Xu36VZdAON5RbLqHJnLo8b9XObJhAAIzSRlLeo9OTJPwKgMyszU96ZOIs6zQOUiYb3xiyek5776z6O1wWmWkuVS5lhCh6t1GFbOe2LG5tH2TAvgpyPE01HB/mUzNAIwIwQwA6l1WztV7gCxCfPX36gcG1MwRTUQAOeo74L3HTr/eHf7u9/++ld//59uPn6fUyYER1jE5mm+v394eNil1ECgChIQWa1sNbblUnBUa6mtIwAjA1RDVMP68I/LRadTs3anTjvQgkESAhTJcZ7HcZznVLEFYjIzg+b/S8Sr9SaE7vrqupQkef7w4f7rr7++ublfr9eff/bqX/3Zz19eblb9V6HrL64uyXeAVOnwRLwa+rRZxTznkgkdAnnvur7r+27u+84zES9X1gAxp+KeTZiliKqNCVpr43hW7azYTL1x3rn15uLi8mq7ufChTzkDADtUVcfkXGsozkUMwPmwXiEhxTllKc65KlVIxAgMwACkZqiVjKZaE32pO5DyQqFhZuMqvASkZtBvELuOSrSu655N/vvbT3UPKaIxJULqu56ZgJQQ2APW8lbWIgJgjpAJGRGIxJEPgYZ1cK6oai55mtM8lzkF5o59YKzFhlDKoHblfPBdVoxZR5GSM6VcRAI7ZYTgDdmSYsnO0BkkBDUrpihGQIRYFARqM/oCEP9obMUExwl3e0fUZe0FEBxngTxlSaYioIjmQAGkoCB78MSBkKuzLJJzHszJWolyyQpmQFxX/Vr5IjTvqOGjhqqkYiVLkZhSjEmrENx6HYgQ0M7EUP/Y+F+fDqSFyLigzmY11sYGOlXkvxYdK/FWFdSwhlgpTfvHmdmtNlc3n97/w3/526//4T/vHu/NbPGENAPzPrjQ1ZOq1QRb4F9stayzomELK0y1VnlqZdEq81f/+SFBFfZRKcfj8cP7D+/ff/jln/zpar2haqkrtmzeVG1CGTGl6fCYUpx3jw+3NzfjOK56B/blZjNsLy996P0wNJUJMwMgor7vujmYQU6FKKnaMAzbi4uLiysVC23y1wzDQs5Iflhv6YxIWtXTZPknth8nzxpszZBEjl3frzbby9V6S03FzJh5GIZWvEQzABGJuahq5Qx53xF5L4KExA7sqX2+LriICkvyZWYqUlt97CQ/3hbolpd5qo3hkm30z9oUwL377a8VQNRSLuM0qwF3ARwbq3M4dIEBJYkVAEBH1DvqqpaqdxR83/cxFheGjqlTC1lzgVzMqw5T4jnPcbTDge8fVsepE0oQplKOs/AhxuNkc8SSgkFxHJkLkKBWFgojEmACqRLqUgX1wYCQySHYAn0ttwGBCAFJzU2R7x6gFJfzVkpIJRmmko2gis4xKqLVsMYY1KN1Dp3jpmDtmIDLKgBYomIKzjMAZtFcNOdK3BCslFZyjleglJLM8/GQckpTzrEUdey8Y3atMPTHz4haOSliTo3PKHNW6TqNBlWFfQlPtE1rkru1uxSIRDRLvP30/rdf/2dJ+e1Xv7y/vfnbf/+/fv0P/znOMyMAYgV9+mF4++WXn3/5ZT90YIJUVaAMAEUBlVBNao+/QlOVNbNaIwBDggWIMVWTH3L86kmdjhONylSNyIha+UxEdrvHr7/++vXr17/85c8vtptuWBHVhkMhq7gTA5Gyq5IEjmkIYQi13Y8csw9d6NfkAyCBCiyRPDH64BHpuD/u9o/DesilrNebl69e37954NAFH5C4UdEMUk5hPW6vXp4ECAnROfaOJSGoIQMszLgqJV37bIoaMw3rzXZ9tbm4DL6LKatF00VRt4YQaqKWUxqnueJ8TG69WQffdb5JTZWsZtKuT+XHMLFDYCYDAJVc1NQEKj1JaZEJqpOhajOaGZGgUR33p8l/uHuvgKKWk0xzLAbmPTICaMdEXdeRA0VEAmKqkulEAITeURc4Ks6GIXvv2IBT9jnneUYAisVUcRp5nFbH2SdBoAJuQrdCN1CYKQtrBBgJQkDqPBtko4CI3l/gEBlHSfvjMccIIqJW+1YADLSgyU8WlRUwCx4mMHNgazWf9EgOpBSCZpksBmjomZgguOqsTo5R6hMIjrHzZD17QlMgZjPkYgQCmhWAyYjBEInYOW/GTgtzQHSl0ByTKvRddkw0IC5s7dNE+K/Of9X6R1VRTwv5AkbjEjkvsMLpF7CxjgDNgAyYaBoPv/v1r+5vPr777vf73cNvfv13dzcfzKSyEhAxhHB9/eqLr37x5vO3oQtmissYhbMPbvS59hi0j13whRYJ1Mn/Bwzw7KfYf/Z0Rmag4zy+e//hm2+++/jx49u3b4kwBFf7E6qeuIEROGi1P2CioevWq5X3buh73/UcOg6BiEUKKABxpSYIgIjs94fff/Ptze3N5dW2H1Z9371+9erxYce+QyQDfJr8KZHvhs3mfOcHeyL9LUuuAbS20Cqc773vhuHi4nKz2voQAFGyqkm9a4vsCdTGnSxSpGmdOEfed10/MLMBlCIiKqKqmQycQ0J2jn1gcmgmTAxipZQTSbruDGCgoMwNjFwyJvshtx/ynlpRD8mpACopCfhc1obXE6wD+r7HrjPvgdgZkACIUFFSCyl3B3E0saNqAU+lWIyqkgBRFHLuRXtVVTIAAZd9n9aYOKRuJXEeS9ph2Tu4GMJBdaTZRFdd1/edWw/7OP3+++9uHu7GeUpFCM0ATZKVgprJ5NlgQjMCIxCAJICRCL2hV+KumwAmxOIYmbGSA/reAWLoyHt2VXwdzUxABQyItPO0CkEVcymlADpX5dbVjDwRswEbkpho/Q+AXUBKaUoiJfiIAIRhNbhWFjc9hc7nuOuPJkrbFWsMrXjqsESAxVRjMcZZVgYmZnaemNuGXDKhBHQe3f5h9+tf/f3f/Zd/P43j4bATEwQ0IFXtgr988fLtV794+7M/efn6LRioSLWYOFUYqIFYCIRmsFjmYKtrVmXA1mZuIib6h3b+J7fJs/IZ2qmeYFZKPhwO9w/3t7d3D/f3wRPBAEvbnBmYFkVz4BCgeo4N/epim7qu215chmGLrlczkwRaAAmsq4FvTno4jt9+9+7f/+//8ft33739/PVXP/vql7/85evXr3b7A7Kb51hKMcAKtxCRmp78RaAV/yUlEWODk6NGhSRAzcTQh26zudheXl1sL70LpUixQkSOfE1VRYos4EiRoqbMzq+C82Hoh/V6HXxAwobVx3ScplySA2AmHyrlJ7AjVS0he3Y553rFtfGmpXXL19EjmnMrH1YzlKfJjzYCIBgxMXkyZEVw2boi61lWaR7c7Lcb3JpuSB1qsZKLxgxgFJMgF9GUS5DMqoagqjGnLEVETdWpERAjQRWLAjAEVg0AjsjIOVYk8A5XPlwAjoJouB5WQ9/5vt+zs802lPzo6JhT1lLUwCyrmufOPTsZAABQIK086yR4nMGIyHszcw4QS+hRAYCBnJUEYOAcECOSAdRZZmiCBgQCDMGRKpkqECL7SqoyMAoOyIlREUoplgyqJiYAaMBFqBTZ7xMhOAZQcN45h0h2Blb8U/n/SbLh9Oep1E9VpOUJM1OtyaPEnKSUGOM8z/N4TDFaKb/7x3/49PHdzad3j7v7WqU7ca7NoBuGn/3iT375Z//q+uWbrhvSPIvqskUgLLXsmlUZ1RDyKe44zWCDWjSqXoA/cUbMT/TeH9ChVdulB0ARHcfx4WF3d3f3+PhwdTGsOodEgNz6Y2rPgaopKDC5brVeZ9Gu61brDYcOkFQKgiBoJf4BEIJpyYf948PD3ePu4fbuPpfMLnz5xZfbi+2L66sicnv3oKqGBESqRqrM7nzbr133uehSIIfmflhvFyKT60K/Xm9Xw9o7D2ClJAMIPlT9Qlv0v2oAUYqKGrMLXbdarYd+1YVAzLAgvhXPU5VSEhGKeFVnxpUt7dhDMEYuUkTESnnKEM1UlbTigmKmzEj8zAHXAUerOyY6JgfEwOgIhgn8POePj4dsdLm2l1f6+roMXSqWUkrHyVTQew/YZ/XHkW9vXYwUvBFGlaQlFTEzD8RAYIhM4Jm95y4QkRbBXFyFOpwOnVv7oN7lboVEIXRgWHZHTfMXrl9dvXjU1WOOu+k4lwwGJZToeLvqmc+Q2Fq1QANUZKdqU5l1NtcBEve6ch5Wa+vXthaaxvJwN6dZtLZ9IVPl0lWlSjWoiZS6ypHzzOw6M6RCRub6YOhygcOUp3lKMSM7FSsqomDAKnY8liqZnmPuh9D3LnTkfOXnwVnM8qNN8vSn7q0nuh8hVBeIxUysbiAqopLneTwc9vf3tzcf39/efnq4vd3vHudpfLi/+fDum5RmJhYV5LqFWu38W28v/upv/i//6q//l27YpFREzNSUlkbd5wX7CvVXSl6N+ZcEZIl6dSk3nJ1Ofbbruq7rYkyl1MtSs0lCAwIQtNq0awrTFPf7/f39w+7xUd6+QBQ1rSwMJKxC+s0Uk5zr+tX2QhC9D13fIaKKVAFWcgRIZgKqapLStN/dlXx8+fJit7943B3ff7h9eHgchn6z7qe4enh8FJXaiLr4G+kZyAlmVkSKSiP4Nd4TVFlUx74fNuv1xTCsCd08TyIiKkwM3tUlujkCAyqaFMhFzcx1PoS+H1ahAa7Qgr7GFAQAyxI1FiDJmlPpQ+hc1ZauHXECdc8/r38tCEvrvGbXNNHPJj/mSuUBVEOtBlFqGuc5P+7t0w0WdSgwuDx1EWTKJcZYphnBHAYn6vaTu71z37/naeZVn5kPJR8kTyWDYU+uR9cBBmYOHIbeb9bELDFjyi4VUBE2CM4PRwrBsyPn0Pm5yN3xMKYkBF1HL7f9sOm6FU8loYGkMs/+8nJw7llLWyPEtLmlAhlEjzMCsHgOHfhe2FPoWKRKWamBGiKSVZRLBUykJMuplKKJAMGZIBGgCFa7uOBW2w2gm6YsGY6EGVS1ccAr/0MUilmcbRqBzHLKKcqwdt1APqCrmmBLxviT8/+HR53tp2XjFHSqzfO4f7y9/fju+3fffnj/3c2H93d3N48P98fDfhqnlGaRrCKASMy1jmVmzvlhtfrZz//sL/7yf/rq538eQn8KIPGMvF45BadgBX/0pZ40fBY+zo+/OyyNPfiDyGehJ1bJbARShZTSbrd7/+79+/fvvnr7YrvuyTl0YOSRHBIDmEoxEWIcVqsXr18OmxUTrTcrJlAtaEBMxKxgcdrHOR7HeHd/c/Pp+5IfXr/gw6Hf7Q67w/HTzc163a8364vNynte5kxdeU8SeWf3Bc3OXBwXtkVNu3wIg/e9GZZcikRVRaqiCRUJqRceK01Y1ICI0YW+7/re+46ds0rcaZJggITeOZEsOReROaqYiOaUc/Bd8N6xAwR+kjNEPX3MeVDcKDLPzsVpGwlqKmYFTMxMphLvH/PNrdzde+827gV6PoqMUxzjLCmxiu94teIcSx7v8f5D//CRYzZZ7xHfz9NNjLsUEfDSdy+5e8Pu0nHvCOcOtRhxOozlOOo0Sc4KYojkHAfvQuDggd1dka8P+0PKK+brlxef/cUXq8t1d8kRFcVKLjFOL19t6w2rwxKtphZIFRYnRRIFOc6WEh6ZO2+hK11HXd9LMi1qqvVl7IAJpXbvzDod5TileS65zITUh+CcJyrkguu6rutfbK+Y/M5Gi1BWhYDGmDKoZ1LHnmqiTggedJ1mnA+z83me82rr1xeh7zEgVgdXbcilneGXtnTi20LPa7PQTr9ggNTabg/73W9//fdf/93f/t3f/e2Hd983pxopKqXuIc33ERqdruo+rbbrv/xXf/M//y//tz/9k3/98sVn0xxTzpWlTQZyNvlhke45sYmg0j6p1Vxq07FVGb2mr/Q0zuqQEymlUKXrIC2NRm09wTqrcGn03u12v/3d716/uvrisxfrVbfdrhhMDch17HszK3ECKZ7xYrsehr4UARMicwymBXxHznmPaT7u7z69+/DpN9/dPTzcav5o5e711ZwjffrUjVG/f/++792fX/zpdrPqQ3BMRU1tWaqomY8vJ4JV7lWlXk5CpGZnTeRcIPKqeDxGIgAQ58gxM6OaWGkNjYBU1OaUzazruhD61Wrouh6ZKo/OsDZOKJp5x0PfAUqxlHMRAUsmIi5LDkVC13e9d1w9kWxxGa22aGaGSNgyyBrHyDkW41Sb01NTaVBVxXlOu/0+Hkc07b2jLoB3j0WOWeIcLcdQU5WAMB6nx1t4uCvHPeQymdwjfR/jTYq7nNDggdLMseq1EQHmICYKmI5TmaYyTZKz1QYDRHIcgsfglNyN6Xdx3he5NGKnr+frjgbbhMETiEouc4T1tjsL+xsZ9iR01QrUhrlAMcxoMRY/Ru+tD2ICaUwqpfrTg6payUnjpPEI0yjznOeouShXRrWKYWFTdFRZjIBgKbHKEDgXF1MsJoTs2YJDYaxGoaDOkE2kmE2jGuXqjUzB+aYIjH9ot2xT3kB1KSTTaUY2Qp8h5pwf7u/ev/vum9/++ubm9vzlJ0rkQsutOy+aGSN3XY+I93d3vluTD8iL6a4uFJYFjX9m4rNwimoiQtSIBmZIJ2nQHx1VgUJ/Eg843TBrVflxHL/97rury82XX7zeblbefd45LzqpiquNSqYI5hyx8/3AqmqaTIpqIVAEM5U4xd3D3fffffer33z7n75+P837L1/Z1VbWQz4cMpPMU7m5ub2+XBNAP3R9CN65krKZAjQfo/PTJoIuuC5wjKDVPAyR2bNjH0LwPXMwAGm6WobIzrhigdASCRCTUiRnYcdd1w/Dahg653wTbTmFdtYEVIiAoNqtSLVPMlEVBVHUKrHqvfPMDgmZmZikSDVKAEDTBs/8OLx0tW3BtMG7BqIppWk6TJOYra8u3eXWNmthl7LkXCBlTAljtL3G494Oh/jhBh53lHMqejtNd0j3iKPzmciKaBZf5hcqm0IezfIs86QAVRfXNBtK7Ruq+5iWokYZeWYuK5fVpbHMOY/HQzd2tL3g4FQAEQOQ8+eaoFCVUPHJXRsBGJCRHCqqlphlShkkso2VEeMCdd6TmqZSSjrsp/koecIYMWUzIO+rB0QuooqqGcnx4bArMZJCjrmosePgzEHJVgCUUH2gIlTmrNX9g50PW7CU9RinJFBKCbRd0eCJeREmfoaP4zIZas2/1dANePklXMBuREJ2QIzsgu/ojPZLBE1cC58+gYAN0VBTzu/efasAHz58+MWf/tVf/9t/9/LN2/ol7FR/O5uZYHDapxGBGU8y/qf5XmNLomfAkjazLK1436k9XuuyW0uTdaCCEqBjzjl//+5dCO6zNy8vtxcvX7zYblHShDibCrtAxMTegBXQTMwEtCCIZyAysJymuHu4e/f9+1/9+jd/9/U3/+kfvgsdffXZV9uLgPqINs7Tfr9T5nIcJwALwdUm4ikmNWXjxiM5OxFCHPow9KFkkGJmSrT03q3WzL4UVVMiEDERyRmIFRCcJwQU0yJlnpOKmEFwbui71dB7V03HWxkHALHaJZpoMckpx7mkBFrIEYGJiCmIWVYDESlBuz506tATcQhBWZmpVgpVBYnMyJDtuX+ba9JPi/EzAKAqE4ahlwvnXLD1ENGKZDB2iMgEanAYZZ7UmY5HeTjAmOZiR7U7K/fsxhCEiM0EcyzzqHqQPAINaGiQc1QwajVrBWothaceDBFMZCUwbdfO0OSY0VJOJSVn+jS/0c4ZC3/gqLiVA0RDUpWcCbKJFAZjRuewQw7IImapSIyaChozsWdgBOcYCckMQAtqFZhMUeN0gKJWDIldH9hwCIiAuYipOG9OyJUqTZUAXNcNjDwXyaY5pgmSZweIXc8AZICAP71htvJ5A64Rq4HA2ZKHSKHrL69fvnrz9rO3XwFizjmlOc5TLgWXGhwROu8ACCp9gKjkdHv7aZqnjx/ep5y++uWfvnj9uS6YyVkR8fyzcKk4YN3w6VQShB8sFz88Winv+R2zs/9blYohYgQRmefp5tOn3/7umzevX3/xxec+eILMhByTD0Po1i70yESAqhXlNTDVknMpMeb7h8dvv/v+t998/4+//fYff/f+m+8+XF6sjtMXYl3ve+89gOUcpyNO06wiVJczBBGRIp7cj8+FiDrvvHdo2VSZXdf1lxeXq/XGd50pznEWyVUnUVVFLCURLU4MEItIKTnO0VQdcS1Mc6MHGzZ3VLQq4lNKSXGcjvvdbpwOc5wBVH0hYlNEIitOWUpJufick0/e++BcYH7SGq8NrBU3PFN6bIdjH8zAqlcDIQCQ4nrt/VebNGqaU5SSpj1o6jbXXdeRsqQ47sd8/wCkliNOWRVGwz3ygXDy3rreE3uxYhSLFMpHtYPJhtmRmWmVjgWExQ4W63g+5ZLFsa2G1YtrNaSowFDXc5VihVRMpGgpetZu+VMHkvIiaIVInp2hH4igg8KgKNqx2/jg2GVRg9IhUWDugijORYsiUhvtBlzIGRKxmpaUkuaiBgTMys6Fy1W/7t3D4ahSMqn3AGuv2UQOuShzGPrQ6ToWOsZdiXn3OKWiV7Dyngxr98jTBtqyTaDK5q4g0NJETguZvAWlq9Xmq5//GQAS8cs3b03Lw8Pt73776/u7W21NdFDhPUSK06xZEaBIPux383QYh8fXn302T8ciBdA9xU1NuxEaSFcTeqhFW6yL9bO9sc7A5/hlI5+0v1CrMyxT3s7TCbNzVMEhlpQ/vP/463/87ctXL1Mpl9uh7wLRMYRhs9FhDf0qOMcitbfASczz8bh/eLi9vfvddx/+89e//+13H25uH27ud7v9jhk+fDpcX7mffXa1Wpeu3xPHnPI8zTnnIqImoqWkWIqq7+BESjo7G2ZmJBUFpW7oLrcXb16/6YZVKpJSckK1PRwJRUGzxpgtFkRUMNGiJiZCiOxMS05xDs6hBWSuq64BqEiKaZqmw+7xcNgfD/uYZjUhBCZm57nae7IjTugYZ8aFbOZDN3R9CIEdq0KKMZWsKs+KMqfJ31rPqgdytXxw4MAHN0ydxIfdfEhTjKzarbfBd4w+Osxxng57REMTFi0GR8U9weScdl3YbJzznCXOTqxotDnnEWBGCAgGimg1Ljzd+YWVCApQkCJzCcGtVj2ybSaH6obgOocEZopWXaH1j5CNweVdzZCIqRv6jfcv19wz2JyRIPReTC1HZ7jynTpjZjXosxRFMzZgA1LjjKwAZlKggGWDAoAGqqUg4+AHRc6FzUjBENExCGuJGSCqRkDu++AEReKUNM6ikDy7vqdmtPnsez9lmw0RV7BKgmsiEm2yIIJz4erFa0Ai4rdf/RLRHu5uNpcvbj5+SHFUUWRfRfDm6Xjz8X1KqfbRShEV8M7VQLT5Xi0XriJBdJJPpvY4Ip52kX8KrlgOOsF7rWDxhJ81FPIHb2HGhJ4dix4eHr7/9tu/u9xO8/Tq5fV2vXKO16v11WW6uMoXCj74UrKUZCVPu93txw+fPn589/HmN999+PvfvHv36X6c5uM4zzGO4/jh48P1VXh1tQbcIoWKLy6MO1AVLaWUrLJw9myxKG3fy0pRKYpAXddfXV5dX7/YbrfILuZjKcXMmMkHD4DOWUx5HOc55VKyqiIakRHUJVBLTtN4RNPSdcxcr7aqllxijNM47neP4/E4z2OR2sBfAy7vXXDOEztyjjwj8bKyMrs5hTmELngPhCWLmGLlX//ocGKlugwhnnfHAxBZsmQ2S4kpOYOUk7eOgisODhLHHJ0nRmDChLQz2QMm53m9vrh+0Xcdxjwe99GSqmTRWXU29GZogKat0/Ksz5xrzgE4AR6QR3LK3nnHm9UqwPrFZrgYNLBSTQ9BqFpunI2ZZUi1ngmrIiMKqAIkooFptRrevuz/6ucvXgw+H+fpMO2P0/3+MOaRxS7WG2RQLaqqnVNFERajolQMnUIxlaKIip6EnBmoIioyAJo4xs3KEwNApKjCqA7EOTSb4oHJVv1VcJ3mFZodUiyzHmxKM4ZeS3raEfF0Fq2iR3WPrd2FjWDfanCt8u5Cf/3q8832SkWIeJ6Of/nX/+7x4XY8PGYRF7o4z3ef3n3z21/d39+ISusEBAuh226vL7YvQ7dm9q1Q3HiipopUUUZsDFwEWOSCcVmklpTlCSh4tvOf/HlPd+c0zhDO8Eio8gPIiIFoza4ntGm8//D+79E+ffp0/eLl1eXVxWZ7eXXx6uX+xf7w4njs+pBL1nm243h/c/fb33/7+/efvr9//HR/uH88znPW5sigMc4fPt1eXITP32ymucsZwcAHH0JwITCzikouKmKGBBV7XHgLAACgqsfDNM/ZubBeX33+9u3F1SUSTjGOx+M8z1V4t+9XPnQANKcM/Fh2+zmOUkrf++CdI0YAFYlp1r3EeeqCR2JTLSIll1xKijGllGKUkg2sVnXMrIihmKpmKUTeee/BOw/eByRS1ZJijhGRnPPOOWZXSfkAdjIae5r8Mc913hNXt3cwAxVNM8yzmUHwAcGcr2I2aoTYeX+5CTlxYBTVcY7HaZ/iBEyhW602Fxfboe8tJWQ9pGNK0ZJkhQjkW1CHrKcRBdCcpNEAiuERaA80AaGRZ9et+75Ht/HYI1AlkQGYIMgfkfPXZc1ahkHgHK9W3auX67cXvYzDcTfc3B1UbRxnJl5tt75js6wqWkzEcsIkGAsmUVckixRSBnCOlEgBVEgLIlAphQEDAwTW3qGVVLeQjk1Bck5lLiWtQne5XgdGERhTTJIlmRSLsag+P51n/6pi+6aGNQmwBZZDaOJcPoRhWDM5JJZSLq9fT9NhGo8K6kN/d/MhxxmQpKlztPnnfLi6fn398k3fr4hYVduCA0uHABi2rvUl0zj1sNVt4iy++smc/3yrJyKAk9arnRYOpGX2Izokz9x555lMddwfosrd4269vbm8uL66ur6+vrq5vXzx4u7F9VXXhSJZpwi7w92nu6+/+f7bu4dPx3icF217qQVTzTnt9vvb++3NQ0pRpimD2Wq9qmKeTKSqUmpFrmlwG5Ce4SuqlmIRsdWwub5++eLFy27od/vHcTxO05RSct51IVTGjiEKzuTcSQ7HEXpERlAVKUlUUpoZcfYeEFW1FGlSN1V7Vypp7YkZXW+7mpIqVk15EzMlQge+0vqkiKpljMzO++CDB3MEZ/Ycp8k/xWOdfdXhFAzVYD7a461oCutu8+Liih0gkZE3JATwq9WrX/xMXr8Ch3Gc9u8/JdHjeEwIF31/ud5crlbd0EnPQnmYBpxnnsQKFHMR1EARwJ22EyJgMgIFE4CicAB8NMqGfYHOuB+6sDbtLVGS0pBxlSIlm5TzxaxtP3XPql2T1WiD0QAJ2JCLakkpx4MV7Zl5FQCuDF3MOqeyudz0qxA8mUmeUoxlGmWKdkzGuTiIDqSAZQVVNGVDJ4TJsKjFOZOzrneeYB0YxDRlM2PnjSmDmMl0PPaAr15cyqovSTWVmCVlKaWMx6iLU3Wtv9QibLXh0Ab2A4iBguopH61BuJEYZUlVSG6Zs67bbroNMQcXDrvD3c3tx3fv56k5Y9d41zn38s3nrz/7oh9WLblfUm9cXDJaJX+po7bpWpeDRjisv29o8odAv6o3cVoCltUeoJoOcTNCQUBH5JnIeSQ0s6I2HY+7Od4+7Ibh/vLy6uLi6vLiYrtZbzZ98E5BISbYTfuH/bd3D3dTnAyKAQGBmaqYFjNRgRzT8Th9vN3Hed7tD2Z6dX356tXLzXogbJPSVA1YzQxJyNdhD8uNUTUmd3V1/eLFq/V6I6DHcdztdinFSsU1Be+DD92ccxZJUhQ0BI/KngjNNMdccoqxSIFldwLARtSUVoo/uX62pgGTp9W+miiDiEopqeRkol3XO+8dETmoWvSplJJLKd6sJwCPQM8hchfTBFCRHWJGADLiKeX7x1Fnj2v0Gxq6HggP0xxH1YlNDZlxvQJPBpC9i4T1XboQ1l3XO9cFVwh76/uhE+8RqRhOhlURG5EcLMKUREokZNkkqya1UXFPBVPhmKHz5AzZEiasOb4iAKlUSZU/WDRGQFMoxcBURIvZnNGBsrfDQNM4pzV57Jg4OF4N/eXF5SC6vtgM69AHAihpnOOUx07HWd0oPqY4W2YowkXJjM08kBeh6GnOeYojQEEwJkTPJpYcoSkSKmBBEC3jeFw514eX3XoQgc6H+91uN+2naZrHdJr8dZOpLX0CjQxDJ+ANfjD5K/e+5v81IgAA5MpHYXbOs3PMwbnO+xB8V3KqJXUivrh68eXP//Tzr34e+l5aM5GC4uL0XFH9RnVZVIDb2ER4+mlmTdfulNyf7gUiQJVI5LamLDoC9e/MzK7K8gOYEZIjJGZDNNVSJBXJqajN05Riyvvj/PB4GPpu6L33zA4hF93P43G+G+OoVm0YAQGa4kUxVUXIOR8Oxw8fbuMcj8cpePf65fWbN6+GLqQYNZeSc0lJyeUi5nwGL+Rt2fxVLWeRYpXGaQq5lGmapmkW1Yq5IC/lGyL2vu/7ktOsRVMsORWt9b6SS5aWkNTQqpH5m20M0hJsVZJ+yz7qnV3EHNoLiKiU4lxxS8nQwExqr49IoZyzYzT3QyslV3IGACRkQCUgZuo6BZnjOD5o2qd5Pb6Ua0D4dP9wnGOpPH0D59mt+jLNu5jGUrKZIwreB2YCIzQfXOi9D+yYBCyaHlRi/fqEjpjQAZESCkM2nbTEIjGXaDiLdX7sj2PxJn1JRcdIwuzRkzGi6dLe+XzGK1ThH0Q0FLE4Ss6pYIpZj2MiwLnDC9bdvj9uPLGhUooECtvt1tj363XoKbhiloPDrgurDU+zdfu8P85HxpS9Cqg5gw7AA3kBzsJjTv74mPLIVAiMHFnn1kNg1tS0lqAUySmOnRPNm83l5eXV54fp999+9+2HMt4+pjFWrhO0G25FjYopVQo8LBq8TVl5meSGtuzGCK3sZoQAogWLAiBjct4R8hdf/mL/eBPnw82nIqaOueuHL776xZ//5d989Ys/d10oOYk00zIGZXKIRstUfcrzl0CdThr1p0biJrNEZ5V/ZCYiXDQ4FtgQEMyYyXkOIYQQqndYXdcQDIjUIJciYCCOUAkA0WJFSmM5hhACdZ0bOg9qcYxzygkRuKawiKZFSs6plFx58qWU/W7//XeSU5rn+WL76vPPXn/25mXwPB2TpJTjnOaxoPdDAvPZuIA/MXzVLGUZx/nxce/DY9d1xSTOUUSgSY+ufNdHKZCzC2GzYXbOO/6Y5umwS+PRSiZCqgIvTQUBmiGtVQEVhCXpWmb3IqQIVjvasW6c1T+EiNktIVmDVBgQA7Jalf1U0ZJNQzE7tyABR4YGhpWuKKpm6BigEFmReLuLx/04lhkQbu/vj3OsXQLMzN5xCJLS9Lg/xFgAHDFStRMQkSazWa3lo+mskkEdAoCQocMaoELVx8tmETAjZSYDQk/OY6DiqQQHzhECmqIRNQFR0HOjrB8cpiDZprHs7+cYI7BmsfFIZEA9Pm7ocV9224JeSWA8alJHbh2G7ebqVT8w4dFskuKkqIrrZ2OXvQvBuZgEjNW8QmfKAlgMkhhGEolElRQrgOgMQkdiVoqCVIVjE5NY8n4eX4B89vLyxeUWJea4v7l9rymfgGUDKFK7PpZpz8h1XlXBdABYBHPafK8ai3VaLn1dAIoGgigq5MLnX/5STdi5m0/fFynOudVq/Ys/+au3X/3J5uI6VbuLJtr31H1IS9DxjCyIP5j8YDU5JjIAd0a9QgTn2XsXOt91AZf3rRksM1d1eu8dM5k9SRgBUd3XzIzOeBCqklNUsZzTPGMIGIMHsBRLMUTssPY/1W75kkrJWs2/EUVtmuaUZpWCANvt+s3r6xdXF4SmKTFoR9gzZQBSURNAD89oZGAAqZT9fue474Jn7zy77XqLrnbir9gHUYsxmoGBakqaosSY5znHqJKJmJmR/YKYmoGhyQn8BmjmqmiGutT/sUn0ITlqfyovCE/5VG0QbrAMoWNXfZlKySJFtZjK+bm4QAEMDKQNJTGNidW2224+5tubhw/Hu/eP92Y2zZOo1kgSidQs56ylWFEVE0B0LGBZJZZiidCkTNkKFIGDlqTJGTIagpGxQwIrqmRkhmAERgSOmbrg3dCHzWZ1teHLNV9c+NWafah+Xtw2vWpUYU8pjIEpmqIhohYbj/L4MN18eMgxr1aeyXECMhQo09E97vR+rRCMpBz3U1HPg3erbnPx+fpyBXor9lh0FM1QKIxGUILrum6dCyH0YqEI56IxzVOeNUUmC47NvAGaFQUFVWRDJ6itzQ2R2HcF4dP+cXPY/OzLz64vVqgv43z/zTfO6xO33xSS2JyU2YiQEZjIMbEjB081jpYb1n2iSu/VbXXBQaiVd62Ycj+8/vJPr159/os/+etp3OcckXAY1pvLq8sXr0TBmiBq7Sppe1Gd7XSa8LXqsOz/WEOPht2gVbc+ROeeIn9ECsH3Q/WcCrW1p/L8TBUJnXOIKK2haAlz9WQ4BExkaKemeq1qsiZVCbNkmY91xSJywQePQKpaSkk5l5JrtszUtAdSKilFJthu+pfXFy9fXF6sey1FUh68e7FeBYUkJg4SSGYXnigzQEQuMJAdjntTco4ur6+uLi7D0JMP5AI4n6Ucj4d5nOLhGKfpsHs47B+n/d7i7MyU2LCCiITAi6iP1N39VNtFU1BFNQYDM2Ri513X98Mq9L3zHSJbM4hQMwEzUZnjVGVIHLvQ9SG4LvSmppLNimkyK+d2Ss6ha0J0IGqgRWqlkwiIIWvejUcdJ1VVKYDkna/BhojO86SqVVSUyJlRUZDak2RkglKgZMtiEWxCc9g4k0TGpMgIhOSIHfngQhdC8CG4VR+2q24zdOt1t16F1eC6jtlDtU/UCoGoIjDCs1W53iURS3O5v58OuwlUVz2/2IY+dGSOAIjK1SYQUIo0K1mSx8e5aA6y7a+8C69W6ytVFnBiXTXnDcHYxHFBLFMi0L4oQ8YiSW0upeQcpURQIYCKD5uJmQIqkTIbE4ApGrBnYopS9uPxfrdjEyLpOg4eHT+xZRQgi8ZiDoANFFtMZqCN7E0tzIYl/DE9kW8BTAGBkOyEw6uyc12/Wm8urq/fmBSRhISh64hZVaQ63EHNwwEXwY5nl7dOgKU+h40+DCduQM1EGJ9xrhGhqu45x865s8mv2jrNEM6stWtMcNrnCcmqhOdCByAyaO1JIiImSUoCACbvDIizVSPwnHNOItI+Y2G2qmqa0zC4q8vt69fXF9t1cHw4jCWlnvl6GNaAMcsMeLQiCkHTyaQCsMoCaSlxnPb3D0wMoXM9Dd47JBZVTUlizMejxDgdjoeHu2k8aM5sglyHvjNyxr62M0Pro7SmiGptgcPaCdCEq5axsQR3DREW1SIi2VSy5JTmapznfdBqguACGKgWg6wNNXw6XG1gl4oFGGmxlNMUc5yhFGF27HyKqRStNR41a4bbyN73lXpSZ7sIpKyi5PwQQq+oACllm7MWYGVndbaSASE4YAcucN+H1apfrVeb7Xq9Wq2Hfr3uN0PXB191dcEEq1oqYrPNNAEypUzItMz/luwY5lz2h/zx44MV+/LN9u2r4c3L/mIdgguEAJp7zxebwWPQ0sUJd3uNIr3JanamV+RekSkBMfaGkQHEmcNikMbpqHOKqeSUY7IpzofjbpwPczqmql9iAghqUlS0iIkgQHAsTkGzGTrnus53oYupfP2bbz50ft3hbjwoG3dnEtFmRS2roTLgop+h1aAHmIB5wcoN1JqrBiI4QPgR86k2CEiRWacmAY3IzjGjgqpIa6kFRIRK6GEk11A+eqopNjGdE/m3wrWVDvRE2qNGRXxakZcOmSrZv8QQiPQEpCkuMnjNZGRJKFSfKoW1GZ4QAUi1mlIU1QqpqDJaKZgTiVaYUEsBq+KrBKfTIwS0oQtvP3v59s3LofeqkmKUlHrC674zopzzmOQxlpLmkA70FC0bmAAKO1Irh+OjYsmQtvE4rDbsguSSUy5zLDGWeZbjEabZ5cKqVp3UnaMQ0HlxrkmGCRQCKaaGJnVdq1O+UTqMUMFMcpk15kR7JmKo40xMihQRMxEpogXAmF3wXoqKiqoiUkxTcPnHXFhXpJjWPgRVtZzLHPM0xWnWHKuPHafC2m45wpPoUlViaHQaMStFYkzzHEXNDHIpKaWYYtFCngKHzqP36D0yIzv0nkMXVn2/2aw26/V2s1qvVquh74eudVcSgorkYicoDFHBTAnMlD0Tn59MXVq0xY3SeXp1PXzx+frNNV+sqfPACCBERp7ZQTDtY8m7sYwxBZtW+zxFFl0xXxEhYRAdS4nzPB2P+WE/3u+Ou13KiXKxnDWmOM5jjFNKc6mC7E1htQpiGFZbK6LCVt1sgyPPCGr73fjp+0cPcLl1cxr3McupHWmZsSfzCwNQg6rpYohmZNg4ke3XFAyBqU4VUHoWEVnTAlJT04qyMlftktPbIwK10J0IgZlctYNZrPjw6SKf/fmDHQnPDnwKImofRzW8wPPhSI3QTEhoy9KxxBM1f4FqGMlEYFSwKl6oSFGp3A8pmAEjoVTMPDgAYjWuymOAoGaI0AW+3A5v31y9frEhk3k6zsexxOjNyDEiClGPmUVLkq0VXpZTBGBGZjQCkTIn0VHBQZbUjSORk1QTYdGcJcY8x5yilnwCYmtuCqYgxQBUpKiKqGipdfynDiyrSuig1RfFEAzUpIAySrU4bf3erSbryBBREZkYDaTkNCECQM4RrIiwPZe9c3GeVKEUK0VVLOU8pzhN8TimeRIDC8EZYi6LSL6CmdVyx1KooFpaLyUfj4d+CBfj1kBjng/H3RwPhrFfo3Nh6LkbXD+4Ljgf2Dl27PrOr4e+D10fODghiioypyTCTEQAqkaGy9ggAjA2RDAtzjl8slOp14gIue/9y1erwfOL624zoKcRpVQwHAs4WnU8MLNiV2A6zOn+cHDy2D88Ph7G69lWqzURgXFJ8HB/uP149/Hdp9uPj4/3c5wBLQCwmRUrWZKCIhqC1jytadmYMaJRdYDBQhgcMmHvkcDmcdw/xg/fT2nOqzUCpSlPP8/6TPEaK9dbcZGFrrJVDS+rqrdgpou2ZXXjQ6AKfmBjCKg9Ze8VeHdUieTNqhtbqR5OkTUhujb5myNw7bW3Jflftn2ERtRv3OTWubMg06fj9MBTgb9+4lNsj9WXppZ/TxLFWpXVTlRirlKcZDW7tGpTnasqrqJYKaYJUMCk69xm0znn5whZCJFENeeMABfr4c2LzdtX26u1T9OhTMfp8VCmiUUJkYjNYyB2gKT60jt/imKIQvAh+FLb0BHEJKZZVGB3UAUtoiJoCiqWRUXg1EKvQIBQFE0sxQrNqdVyn5kBQbVIQWpQNlV4S8QQmLjS9RjJMZMjduyoSulSTUZMtaiVWq8xRBDIOUopRWYwjZFzTucLritSzKCGfoDATN558Zq9lgBFkdhcdrlUUxq1oqJqolYdChttsIafKcXD8UiPj92cupzjNB8R59XKuj50PQ+961auH1zXLexD4uD80IXgHCMwlOpEWJTNWucYAhIwEVcBlFrsEBMF/UEkU1lZTNz3dP1i1TEyW8lxmo5QUmH1hKwEiKyzcBKLc4rHlHbTpPLg7z598+6bbnvx5vV2NbhSePeQv//24d13Hz++/7h7OMSIps4zEDoDMRNDqUK3TAC6IGUNqCIgUkMANI/bdS+KXfCiNk5x9zB++HgYp7y94GGDftX5fqCzQKZh7GenVrv7tKaAC0UHTvgwUy3y61IO+sGBTfGJnCPm1u3Y5hqcqgMAgITI3C59vfo/ZobXszT7o3zJa8zIWN2OsYkYnaBtAAJg5mpJUzuQ62fUNs8qFVJpckvKiyFg8I4RZkc5ZQBz3jF5RF+jpGFwV5cDk1dNolXgDczMMa0Gd7ldXW3X686D5JRU5gipsDUY0xCYCbzTEDZLU3c9EaK65dZOC1QAVc0p5yQltwgcG2Jn1ewOFlQEDUB0IWo08Ratt691ORgbUstSquAtkAkiOfY+eOc9OcdMrpr7MBMTMVR2XpGkiqUUM1SzknKeU9GYS0LUlHwu6Zwa4+oq7D0icCsYqqZUjnM8jqk75nGSKZWUrIhpUa1hirZmQW10QiVRANFSxkO5+Rh95wEMWdeD9H1Yb4d+8KGj0CF7dJ7qSsbInl0X2FHrLAQTQNJKP5U68IiQ2VgFSlFtLPeSy5QkLZ7xlb5tgOAcB08hIGo5THOZ4+DHdchDwN6xMx8wzdMeGRTn3XiYJI0lTeVBbn7fff2/Z1KDv7jerudp/+nD7a//4Zv37747HI5SIIRt6AbnPBqUnMy0KesZE0tl2TCSAouZqKCYoSFzcLQKJAaqdhijpDJPeRolC/p+uHjRX7zoXr155YN7mqjPTbjrw4ZQRaREgcBqy13TZXOIcHLMaPzGU7WdFoNNZnaeHdcMG04aH6dVpEbyLSRo34SaygTCKeVvDQZPX+6s/t+S+tOiUz8XvWPvuDF8KqObsI63Ou4JgQmZagyyeNEDPfMqBgAwImbvBSDmdDhOD497Vdtuh953iCQqMaWu55fXl0Xh8ShlTKIgZojoHQ1dN3RD57vAAWsoKWZiQGREpSRVqTmY9z54T09YDEixUlRqU7SreIZDwOppVEX/EBSwNvCALuwLbdwGQmBmdsTI1IjTYKZFU0Yzb+oQudpeopVaQGEMnnzvXPDsHXFdQ4FJ2SmTAopoNp0VM3mtoRm5kmzMJUpKUiBmyTmfR5du2TcA0ZiBGNHYe+SAPjAHF4YSksSoKavm1pWoJrq4SYkUMMPajI3A3oWuOA/EFDpaD3699sO67zrHHtkBkGJDdIAZXVVfQAXSVvKse7giqKlVqSQUZTVLpQoomWrOZZ7T9JzkV3EpdI6dMy2Ws4hYLhqz9jN0ZAEgEAV3ICoC4+NxPsYUxRLZIR3e3fwuDD44fLW9lDw+3N7f3xyOjzkXYg4hbEI3MBOIqAmU0krsi35YbSSqVxSwWh0aGTI79t4AU5Z5yiVJjkXNfOcvX6xevllvLv3mTJWIACrF1TEB4nM604lvA4xAjI6ACdABGDY7osYDAoBl09ZTod6q50ybqT943wXApzPFgJM5SiscLAn/AgPBj44fPkS1VOnIOTqx+wibAvHJCtoxBs9cmS5Y/WfIYQ0XmtCFAZgKezesh261GtarLHb3sCsi21XniFJOOecs4h1dbYf9cf5wMyLm4Nn3Yb0eLlbd9Tq8ePmSXSha6d/V3awZOBcpKoKGIFaD2/PD2lG5VgzgEB0BMoN3ROyqFaWZWNEnM0nEykwDMmJi73zwxLyk7apZsxhqlT1HIKs2tATGpuCEXCFHyIYs5GucBEhmWBQFSQALWTLNWC2UEMmEfSSXkArgucjL2eQ3MDMxhQyZDGt/YAiEzNRhl3yfbY4yzbkkgaKwOLWfBF4dYkfoGJGQPbMj9syOvcPOo2ckV9u1wMhUi2QRzQqKAcQZmBIBcpWjJ7NFd2yp6tXyoRpkg6KmUnKJcT4e56OcI7Gt+cVUKw5J1HVglMRSmfdT4qId5p5t1RXmqRR6HOVwTNl8GDa+D1N5fPfpN2Uar4eLbddZSaxuO1wJIHHoVltyHkGVorNcIFcXQ9ViUkBL+2w49RRW/1klq0KOjJ49R0mqRfqOh6v+8y+3rz7vgTP5/ISQEwZHg2d2bAZZVaQVfBYVDWRC79AROjZHyK5VbYqyqJo27cmG0eJJEBDABI0X9dJlLANAy/DxVMlDOMvwAdvvwxLr07LMnd+AtnI/rxHWzZ/IMzeVHzRcIG1eYADnXO+dZ17CDWrkQnhSuVEzKeaYVl33+dvP/uJf/9X28nKcplwyaZmm8fb2NsbknEdEUPn+3a1z986Xy4vNm89e/OznX7y82nZM68DAYT+nvu/AIBsWMYlZYxQpKkIGVjRHyTn+gBtT7xAAGbCZN3PIzgcfAjpmMBMtOc15nlQyQdUaMAUroEjsPYSeQu/IkamJliwCItCK4zmbOWQmJlfRDkVWYykWSwGHFFxH6IBQsZQ8IWYfiAk4GJloRRAFRJWoeA/OsSP23rNz5yJrbqEY1lEKC+WbgJgddkzogQR9x95jyUK6kAippnDERJ5oYPSEQICVFMFITI7QEXCdwwBAIKAVKixSzFRJSkUnCFFrhGdgCG2sV3BYoQUZIICiVrSkkuYyx/IU9i9HrXstIHGdEqha/f6kRLWZNGoizCXBMUI2Rh/8sOYuCOT9eC9Tjt2FXl4PHByFPnBWNCIzFMlgWTWqJYMskkSySlYt1hQ3qbFtalhtqKpFDUGJ0RBq6sQEmwt/8aK/uu7XWxfLDFhOSCwCOEbvyDlSAyvV8RlqNl59R9wy+ZmtDhJd5H6gIXe1OFsn/zKlK474U6n6EvAvfxoo8LQQwNM7t7n+Y2ThDx3LOoKMizzAouPRkhFm73zv2dciAxIs6UZr0aqv02oqYKqFGa8utl9+9TYEB6Alznd3d47h4fFAxCoa5zmE8PL66uLi6quvPv/Zzz7/2c8/v9gMmnOepuPhsJ+SIqFZAkhmcZ7zNJkWNGMAyxKnPM3PostaolzOvz1EDMDI1ESZRZSQQEGKIiigIoEikKkRkEvIAATIleYrqBk5u06VoJCBGXXgA3e9J4etbkQGKGKFGImU2DnnAIpaMsuVCoiI1Ua4ytyIAFjddBQX4dZngB+1pvHmJokGBlrUDMQQFZiQgmNCdORAnYN2q8i5qg/BRB6pQ6NFcFabawsQmqsuY4aAaARkrS+dBARN1GqIZQiWKpUAK4cVKuhF6IlqWchOzQ1ohiaVGHU+wprORz0hBAAlBCD0HSMjdepySbmUMqcCopKtGGO/Cv3gh4FDMFZFFMtCGdnYMSmBlphzjFHjaJAAI4IQqkpOKUlJpglMqtqyQS3wcRMlMy0ippByRswAEKcEVrqOw2V/+aoPHQKIaTYttgRmiHUkoaNGoqtK8YTgmByBY3QOvVsQMjAxK6opQy5QDWF8tWhsbBwCAGZ0tXgPcBYEVnreosy1CPAh2ZKVt1R++d2nUv+TAMETOFEv/g/XFTMTNVGr5j/V/gcNajOMd74LvnPBE/LTbn8iCVZszKxC+qhZ5HG/Gz7dfHj3/cV2+OrLzzfrdY6hxEhAx8N4PI5zjFKKGv3iF1+8/uzNv/nrv/rqyzerwYPmw353++l2v9uN86yGTCiII+hDnONhjyrOLLCzXKbDdD2ORdrOj9V/hgmxACgAI2VyHr0hFAPNNVpFNZ/Zcm0BRVRkckQKICCixySjZsfGVJtwQNhD6BnAlWRE1A9hvR4uLlbON0MO1Vwkl0JqAlCcs9ARkwthKMWXnEoRRBCxFKUUFQFTJvBgiJBVNaWY4vwM8Ksfr2CVPFGTkEb7AGhS2GCOsHMOgRySY+dcrT1UB0FiBN9cjkxVRaGI1o6MGtvVfYeoEVOUwbmlXFBvr5opqCkSGrVXqRkpAPOJQA6msHSi61LH+tHxg1iAkAAcMTlAL5CzRYkouVgxJMc+kPcCJjlLVoJsVFbsi2aAijAs2LZEsclsBiiO8Qn1FFyCYFQjU1AwNROxLJqL1O7sWmk/HmdR8R31V932MjhfHXEUnp/LKbGvle2nbZnQEfhl/+e6xjVjTyhiRUDVmNpeuSD2WBEWz4vZYtPybbg7Y0VWn4DGBWYDxCdogBqwV+sq5zXj//rRitJwwiMQDAmJ2TnnfK3+AHIlF8LT3Ifl5mttOCcz1aIa03w47MbDTtMl9Y5UoJQc8/E4frq5G6cJzS4uL7/68su/+Ms/+5/++i9ev77WMk+HHZT5kWye54eH3XGciQkNp3G6G8d5nFCyB+ic01SOx+OLaSpnE4YIiQyxEKELPgzWrcx5UY2qWVSgIpdgvlcDNK1lCqwvE7VcRE2ItHblAABQYeKud0QsSoToO9f31K3ZexZFEZQMpVgRK6Vyoky1gNVGKZaCOVckzlQIjKkm0uaqSsYJmTpflt0C/NS+K0dMBlZMshR9AneMkJiZiZc+gkonMKyi8Frbp612IrXsUtQqfb+GcVyRJiQkh+CpLRYAgEhopihmBtVTCFFNS3UwNKNaILZKfoI6KapJ0hnzcjFMrUtvffAcz2IHgRVcFo4zFcmERkzEWKyUKRfRmAsjDsF1AClfZXJ5LlLAu0Deq5YiOWes5Xw0IGRDUmQTtQbamqiIlqoiUURSLrn9kZJ0nksSdb1fbXm1ds4xGoA6tHMdpeWEaum+tdDASSe7WTFb1eAGVSjN2LOWbBEJmdAREBkjMtceMHSVFFmXDFgQfrDTtAe0utuf6vGtCNC2htNsXIZGndjtCVvKhs+mPcKJ1rdYR7T/iJC9qy71WNsxT2sFYBOVRERatAEBuEhGRu+cDw7QUpx297caj5Lk8f7usD8cDsfHx91xnBzTxeX2zcurLz9/dXWx6jxNc6ncu3kc97vH25vbUkM158qcdocxTjOURGCdcyWXwzi+mqfylFoigiGKQSJHq82w3tJ6DcQSU8plNsuARkTecd8F5rAk1A1WUTMVZ7pcewRVKwWZYRjIeYeITYPA5nGUaoiNAFWxF+tOiFqKHA8RDNh1KpZmS1FTTog49OvQ9ezYDEtWlQRmhNZ1vvZWPE1+IjYwQCJm9o6YDQxVDElNFq64IZLjxtCooJFpaz+klk42nndDmYjIOTBtYm8tf2sjyREZAhlpS2WrBRybGdZCKqKaEFARbNZvtlRL/xmZ5tNAAoDa5yxqRamYE2h0KhFt2hcCrgCiGqaS5pzGhFyiqCI5q1k1KoOwCSmqGahUJR9QIUBTQ9FSVIvkokVVikoWSXXIxRKjlgLG5Hs/rLt+8FQVus5n0jJjTps/UeuTP01RWjh8zZHNln0VoXFj6bRMgGNkJrdM/vPlv23hyySvEQEtUcbpa7UlAH80+ZdD/8kQ4GyJhmfLNTZjDEAUM1SpBPxWRwCkOhwqxAQIQGaQiYig77uh74PzWuTw8BD3VLLc3+4Oh+M8xxhTztnMAcDQ8SoQa7KEknNOeZ7i8Tjtdvv7+8eYihqwYxGpPD+VTKBBpOSyi9Mup9POT4RD79YrPycWtG4w3ymHDFRQE1pEywAGTOAC9yEEh0hmJkUAgKoisImKipQK0ZhaKUhMfe+44oXSIsqctRRuq6Y2oEYVxbBkyMlMgYuaWs6QM+RkxGTmiLx33sxUM6KZKRJ0wffdk+koADjnXGUaILE5MkICRKUAoManyV8HyYl51fRGKipL5JxjrrgHABgqsCF7T2AMbRu20403A0MPRAgKBnhm8YLANawgMLPsNEmOJUuzOQOyRTrGqr/WH7kO1KtmJcs0xnkuoEqIWAuVoszUh8F3gQYCU9HJqZZ5TsomIAUkVglhVsNSvIoJZLVKzjQpjWWptfVApUgqEotmVanFlCQlFp2LGrjQ99166PveB48EIvLjvbKFS1Tl+2r0jtxs+5oxhp2sba2Beo5bawwz1sItEzYDNCLmM7ZO0+V4msULsHfS7TjNfDvN/4VP+aPL3oYJLHn/c/KVNY6q1d7xSvAnIMIaLkpD2AkcGtUWAENENu6AsRH/qLYqOXHkcNWvNqvNetgwuuPuWFKKc7rdHcfjKEWIyDvnvGMCzVM8PhwfnA6rXDSlcpjS7jDv9tNud5xjLlKqwaxKqV6XhJYNSsrHlOZSTtmlY7q6XL1+tQW2qWQOZhhzyYZSNCuoWt1TrGBJWZDU+eoyCkTYhc4xEVQi/KwqzjkAFCmI6Lw31RhjKkVVwcg5p6pxTipG5BAZgFSh2vyidQigYqJVfpUQPQKLWM7KLAa1m0LUCgJ0wXWdxx/s/IBY+42UQFp2ZaALxHS6qQtH05o6RUW1DREXA+5WZofWakwM4CrbSFWamJpVY8ZqQHmSoFwwInRIpxATHQGhtqBHDACk1ZEIntWi/6ijjkrD2l4BbFXVyASccQeu527oO0TI4j2TiZUsDhwC1tUOkMwILDT02bKRICjWoaOimnNJRWKWVKRUsaEsFpNMc4nZlNh1YXW5Wl2sQu+ZsZmL/vg42/brpKtIKIKdaDk/mIJN5RMAoRUF2h9HztU7fBZgnKF49UIiagvhKvm+9vfWp39gy/vjfd7aj59UWLAFNFCoTZ9WIXOkEzu4CmXWnJGqq9Vyc0/LDhGRA9c5T446FxhIUpkOk4y7NE3jnHbjnKdIah0x++CDC8yoUuZp2u+sFKUwp3IY0/4YxynGOZXS9Hq12m+aGWDdsZJIlJK0nE6KGbebcH29UpJDE/HIqYCBSKXx1tI+shVK0dQkqBKhCjpC5eX6gtYcrdFYyCGiKdZ5W4rUUhozlyIp55IKswdkaFQ3QeQudEQEJgZG2PiRBpizmEXVDAi15VS1kGPvybsGn9XD1XCdmBRRwVRFc4EsoIKLkn9rF22sTDsdJ0jGzIoUrE1XVaxgEWxtjiotYtCTEE0bTkgNBzNVqGZti1A8IjhiQu89MIGQgYBAbXBpK8hP0Fj/0IGIyM71PTuuSgnKKGhFs5CSQxeAV34IXQDaIiALqnj2AxMZZKhqC4gOCZANyMCZCYAYlFzSPMU0z6nMMaUs1cwp5GyHYz7OMkUzh8O231xtr19sV9vBBWoNym1enE2pijKa1mCfG7ut5oN1Da7k7yqUX/P/CsYZETCCZwpMnoldJcrU4KoV6E/23ni+EsCS3Feo/bRKLLgjnpB/OH+VAQDVrQBP2Nzz5eFUfTUTqU54aGBY+5TUkMAhOALGpucHSA7REXliR+hOsQ8zU4eEpJjH+f7jzeRZ45xjnFKZc8ZcBiBzXpl97zdd54mt6DTORZACzLEcpniYYsoChsExI1hWqVsRIiNTlX2ECt88YbHM2Pd8uQ3o1m6kx8N4nKWUUqryGRBbqJgsCMURYizREbMQgiOUFJNDxyiiU8yiwkmZ2DlGBKs2XgUAnOfgA7NjAwUsxbIUBSNVFAEp6jh0IbBzlbQNIHX6llJSzPMUKyQHZFKKaaEGuf3AsefE6TCoQu4lJchC1Uy0wuxtS6+uWnUW150fmbm2nEppA4eZnXMqDAsdHdWklCItmjmb/ERElWAk1UMQQdr4MkAkT8qkFW1WMZVa7jEzE0EROLsxZiZaiuTnrX6nAVpRcWLXCod1+bDS+qJBlQFDCLV+AkhZxBA9MiErqkAukg20VR6hGBSzbJAVYipxSsdxng9jnOaSC6iSmeaC+7GkAkYcgh+2/eoydBtynRoUkQo6pCJZ9JkBiYqKGC3yRUvs1foZrNEYns5Nm+6/EQATOapyWs1R6wmKg0U4dbEy+cG1atF+DeuWNeGpom9nP59N72Vyn7OGTvellJKzlCJtSyAlJSQlBOba6LjobtWuFCA0QK7mNUamaEAEVK08FRUF0qRmqXgikCJSYqlegEYiXg0QHSCLyZzm42wKPht1cDyMu93heBhLjKaZGQyVzKRJQ9UU1ZJIyrnkcgpyAQDRPNuqBw7BeSBUAtuPcvLfBUCqxDTTLGYAiQrWHmqyVr51qKYxZ6lVIiLvAyFW662cBRGzB5+YGEtJMZaUiqmoggiogAl6h56zFjATFU25SNGSLZdcchbJBpnQKmuic7juuffkz/M+AIcNdWh3yXK2nK0ItmasqmLaRkFDlPVJY5SZVTsmUmnUkUrMbHYzAGxGrf6nstC4wYAUGdkxA2AxLWDSiostDAQAqIPCMQAIqIph1a411ZI0JyvlbJBpzjHGEUyZw/LwSSJaoWpMGlfmi6gWKTml42HWqA746MbDPK5W62Gz8b4joMBIWggtmsQc57gvkggA0NRUtYhGkanomHOMMc1zOY4yTTZHSAlyoaJYjELvNtfD9kXYXnX9CpTGWE67vonkGMec40lrwUxTkZTrP7HK2bfQWvVU46TTo3U6Yy2ow2K4ZWCq1d+krtw1ejO1k1FOA/zO18mnjXtZVls83/x4zpeK+gJoHXh1YW49fMtv1grzPE9D5509fdGKA4tzDEqmyKSm2cQARY0MjZwhKS6kHyZGRkBQRTRASszRVT5w46iKKIiKikklh1k6jPv7XWC/ulCfDWZ9fNzv7h6OD49lHqFMAihiVXxBTaqoiKgmKXOMMUbJcgZRGlruqHSdG0I3BBy8IzOTSbOVIsUEDVprecUA2rKgiFrLNMxogGIqKkUKGDjnqEJYBXJR1coYqvojVkWLilSHKgUjNlcYJR4RIOeYczUcqgp99SYJgDBbCOyHcLkZXmz77eCep/zg/uJf/V/r5i9gpRqp5mwVvLZ2J0+vUFAxPc/5mdn7wIQqbRNGXOpEdfJZTayhKjOdTX4gJE8OEMSs4GnAWAOyoLWhkmNArIHwScen5DxNxz/5k3+z6tf1u/Xd+hdf/Y0BeBeY3E9OfgBEdEsUYFYkzXk8REnisJKSq1bSxvuOib1zve8RueSS0jzHg5RURdfMQFVUU5G56CwlFdGcbI6WEsQIKUPOpkbguF91ly+GzWVYb5zrAVCsWngbAIBoyTn+8qu/6bt2Lt7xZy/WCLiUwZqoPTTwrG2t2ExkT5e9lQOdo8oFOqGG0Dj+Fas4CQG3OH2h8bQfT8Ojfmg1c6nP2rMnl8WhBiUVLDI12wwdc3t+s9n823/7PxPhdr3pva9Fw1qTNQPv3BB8F7wjZCYgrO0xaOiJHVJFMGuNmGuU2pJHxNq5jlXfrbUTQ1t+ABDJ0Xo9vHx5tb3YdpuVCx2QA7f6MkEIqzevXsU4AqOa5dSaVWosKSpRJOccY/qbv/m3682mzRbfv3z9SyZznVfAmGx3iLf3x8fjNKeSpdQEtprrAKCqtSY4FQClhtc2yTOpYgTWCM6ADIJFaoNyra5U+1U2tCIqItUsiMExOQanKlWksIY8zc2tWSwIM3hPqyFcb1fXF6vr7ertF38ewvB0B7//7uvTGv8Us9kJ7DmHluz0a08FADyNwPNYsIH37Q3Qzt/rNGQWiP9ZKaj+8vLry9B8jjO1tVSl79cvX74NoQeAlOf7h3dzPOLzxObZR8JSVFjiYK3l+EXHptIQiLkWPRaeafXJUFU5SaC1OLyN9rbZqbVEpHIQrEljITM1bzVGrMqD55fLTE37bvPi6vPgBwAoovtjTEWfCHbP5tpPAn4LSrqcCC6/cXYxzh788eX5Q4//Uc8/nY8ZMGPnfS1JTNP07t33x+ORiWlpcG2wLSACcG3jP/uidcjUUzitUU+rVGv6gOUUnwM/dnq6VZy9d1zrnEiAKCK1EFhylfdriER72VKwUGv1+fV68/nnb/thAAApaTzelTLXBVUVpJK4RHVht/zwotRx1mbL8qOR3Zb86GkinOzDzgGXtledTg1xKcW0jfIp0VquSP291hRRL4Bz3Per1eaFcy0ufqam8i/Hvxz/cvz/z/HPK5b9y/Evx78c/z9zuN3hvjVcIi1xAiPgSUm0VXmejv8vWC/Oy5BnjwLA87j1CQKHn/j9/36HqqWsckYp/8nw7Ow7nwK4Cjk/RX9np9hedP6Wdp5D4LPA/vypnyjwnV9HfPZPM3OMnauZOBSx45xz0Z++wH+gWPuUtTzjFZ+eXfKAn3rtT7w5PvvvJ1/7gwGwJJpWRc1awlGdlBYGxFNo/lPf5A99t+cD8J/47gDQuMFVP4cQnfNEtET///Tn/PD9G0iHJzgO3H/5+v/hfLfqL4LvEcG7MPQ9kSu1MAdLzuCcZ3auY9efrssT0vwssfw/6aj9Sed0RTiDLWrCeP6trApamJ3gw7N58VOLxf/px+lcctGPt8fDXOCMALC4ElbcEp5Or56OthwVar1pUf5r9Tc4vQga5NEY57Z05yMsCnsATWNy6TI8uWuB1iy8Yrpt1FdOfvs4MxDVi4F//jKsAgHAcc7/8Tf3nx4nVzXDTl8Lq+JA0/JYxAEIFg+v2tPbZBtb62mb9s3IBFrrMSzVYTjBArVxoWmcn3/ZBoAyLsJAVdGoQqPLEDjRmdQ0eHe56oNjqC69uRRtndOn83nK5Ze5QMs/frxAnNogfnT88CFEy6XEOM3zNE2jd+7q6mXXDdborm3UwIIPLgWhpzLtyfxA1XIRBlh1wZ0m/83Dx75bV7EgQjJVAEHElKOqITKioWXH0HWuC2sfLhx39b4wuVPi8Iewg/8+k2pholXf2+VG1lW0mRYRNTHL/wExDxGb5rI/JAOQ1qIM1dTh/HouiwXWCqBq01iFZqXTJvnSxU14GqtVDOmEUgE0dJNaI3+FLetyg018h2BR+YXa1tVGHAEv3QnL5EfEsuhf5KI3j/N3N0ffehCX709GqEDnkx8XgBWJ0GFTi6AmbdnkSWqPQ2tnayTlp6uCrZSjgKcOqHYCAK1hvHaY8dPkr0vCSSasFSINKv0MbDgVPaGYpSpaVdfL5cOfllUArN4ycDb5z56ls8n/B2cHAhioSs5xHA/H436cDsGHYVg5559W9tOA0VpoX2DoNvmf3LmKSEzZE/XenTyInOPO+349rNfrC8ZqEh5znnOeDMC5Xsp8HD+AzuvB9f0mdC+Cv/RhG/ymC5XIB1AVCc9CodPxbFFYtpifPuE/4qjvRUt7P/wowsenqOZk/KKV9aOmqaRpPhTJiNb5fj1snevNyOxJ7u5/hAOx6ha2TX4xVaDaCoPQtPqb6S2ggVUOqZI21ZRlQpiBGbVkYKkf1cdRgE50/EVXayHy13vZaJpNd4/AAI2qFSBUr7mnZqM62hZ17ad7guAYAuNTY1h7vPb2P03+0xSs/7kmFYOIAFXIFxWw7bfVvKkVAmlpLoOWzagRNGFVWiIaWM6lWQ/Xyd8iIjxflhYE3qqWzLPdgQjJlloKLjdrmcYncSN6igKefpKBQbvmy9eBszD82aGmJTcOyDwfJE9KVsqsMrjQM7tTO7ypQRW7Wr6/IZjasoCbquVSUpzBObOnUp/bH27BSr64MFsDoGqe50NKY9EEJjnu4/xw//CPUg5D36/XLzYX82ZtwN5xr5oRKmGkdbb8GEE8P/lTxPd/ymGmkvOUypxLFBEDmOL08Hgb8+yZtpvt6xdfXKyvmIf/AYGM0ypWx5Yu/PuqOQda14U2jFq7HVGN/0zPrzlqXRCeRviSzVdX3LpAtF3shIBAi5ahgT6EbSkwQiMEM9RzUX9ttTfC0zLT3gqAsXUZLMzt9gRi6wVtp4aIXBVRiYg8ETEter5VpkitBtvWlJJ4mXj4PCaqpWqqXSatLfB0VQmboNBiTtZe/vz6/4FxakvJ7dSAfb5ktM3pD7722ac8FYmX62GNMwEAVkqe5+N+d3d3836/u8vpGLoeVEvKq/VF6Ial3GillOrVs4QUZmCVlowt4C855TnNm9X6erUG7+snum+//Q+Xl69CIAALHFTLcXrMOQGC5jlPD+Phu/uHf0jp4MPF5dVXyJtheE3EyFDKVMDUjIiYApFHtNPy187KTtnVcs7tWYV/VhRQb95ZOfPH17Z9alOZs6JxjPcPu5ubu5t5ntjxcR4/fHwf4zQE9/rFG1BlxPXaO+anL/zfK1V5fiyDrH2TtllXCNYW+q2dBlt13IHWDvOcuostUNKnt2nXv237bQDWIB4rFIJVdxMQGkmQkICQT5klVj5hVS1RMyOsOrY/OJHTZtsmGDYcAUAXQbH2RavEWJPxRSCqVKW2eQPAAoC0EBfByOxEQGoXatEFb2sLNYlAeJr8iERc3Y1NT4+d7cD4tIn+sG5vIlJKabyEJ1wFGpXqPAhdxAhOP9WQYJFLNxXJJcVS8pKcooFK9Z4XS2kej48Pdx8+ffjd/d274+HOh+Hx7d3L1z+/unqz2lyELgBAjGmex+l4SIsmv1UWnIiBEXtVm1OOOaccX7968/nL16thVb+h+/jxH3MeX15/th626jrVMs+7lCMiSTzMh4/j/rt5982cDsZXSMPrN7GKMpWSpMymBQCc67qARHyejWCzZLFFOahtCfbfPLvsD6yslQMlRQEhBC9SDuPjzd2H799/ezjs2blxHj9++jinefCupHS5vd6sLof+Crj7b/k+/58+TruQAVRp6GVRqDHXaUOhJWL/keDk03HaZ+pOZU9/NzVZ2jGMm35+BeUW2LBpTQPUDRhbzqvQZuFzqtdynMQF6Gk/PYW+cPrqPzhlPNmwQO3mR0RuyN+i14ZmTzwoAwCgRQF5meanXKOFUe1RaJJQ2FKe+pLl008n8qNxaq2PWgAB9Qwk/PGC8eNL0aZl1pJzjnOc5vGYU1Srre2gpjmnUoqIxnkaj4+P9+/vPv3u8f798XDn/JASHMe4e9yvtld93wHAPM/TeDgedjFFW3Sn1LRIMUDnOzWKuaRSUomgllI6fR13d/+BOczzWHJkQNVcypzTQYpI2sf5saQD6UQ6RfWic43ftMQpl5j2JpmYu7BhCkSBneHzma3abFVElZGJ2Xm/ZHItBlhecZYVnKGjSwRhBq1/5dSicv5JqprLnGKc58TsvL8U0cfd4fb27u7+dr/fAdgU424/ppwS2eCHx8f74/Xu+jKGsKoYAv6QL/bf+aiNtfUC1KhXW5xK2EboE9USABFr0n82yay6HaEu9linANhq71bbc0yb9x0AoqfFIQuoCqkWBTABQyRFIObWvFkLw0aiZtnUiraWkOUU8CQghT8sqSzNRfXvUJm9ADV/MVBRAAEDA7Lm+EFYWyEZTAngNPnPw5ynq0e4cF2XHAoQ2kLTEv4KVUALaM727ROkd55NwLIsmTW9S6gKFNWx8CnkOCEIS1mxfYtc8ng8jofdbv9w2D8edruU5uW+mIjMcco5l6Ixxmnaz4e78fH9NN7meOQwGvbjXG5v991qU3f+FOd5HqfjMaWoUjP96gJWANl1A7qghiKa8rgdViXn0+m4aTrO87E2ljjyIjnFwzTe5HmX4y7Hg6Y70wlARCXlPMd4PB7GqSQp4/QAWvp+WA/XTA6JsTK0WxKHZlZyGsfjfr+LcQI1Ivah64Zhvd6ErmeuRjP2dJ1Olxl/YhNRgOYMCmAiVR2sPaUyHo/H42GaZu+7rnOHw/7d+3ffvXt3HA8pJQTIMU9jVJVh3XvXEbFameY9O8cU6Kx48T/UcZrKLXTGZY0yaiOukazpmYlBO57G9FME8ZQ1NKzb1FRMS21rRyFFdlaVZ4grzo5VdA+bLodZUVEpCcwQxRAN2X5aXkUJtCqDn19gbBMGYMmT25ypm9eS41iVKq/SeIQN8zc7XQzE52jCkjTBUyJu53BT7VQ6N65bLIrPJr89WZGdvS0UkZxz7UQxMwTw7JqM/tmSQcvStkQcNZuyaTze3n66v/10f3+7290f94eUEy6t0CJljlPOJRfNOac0l+mxjHeS9iozl3Kk+1zwcEgcVuwcgJWcUkppnkopVunqqtVpG8n5YcNdjxRKydP4uHvxqpz1wjkwVNGUppim4FciMo+Px8dv58O3aX4QzQCzx0nJifop4s3dLsl3hhbzuD/eEdr1xfX1xVSbR5mrV3ctzoKpzPN8f3/3/vtvH+9v0jyaCfv+8vr1z37xy1evPuuHFTrXugnoJC1TN6yFKX2SSq5dG9bCJ8mposGMDAA5l4fHx8eHu5jmrutCgE93n3719a/effx2uxmGru98QB0/pgdCfv3isy+/+Pnr1591fbc73MY8X25fD91m6Qv4J5CF/w5HnRVViJgXGw81U0SwKnPB1CaeQFX7sMaDMLNq43Uag6rN3NmaIxoiVjlkhGIkyVALOuXgPHBLbqntvJ4JjLSgiuWccpznUSSyFg6dX7/AsFr2uadjqecvAn5nT8DpEXzaodGWuVW7XhUIQQVViZuqCTCCITKALfv7aXrXJu22nzccetE0r7MMzUBBdcl7nqKPs4tOFQ3x9ESBN9N5isd5ZjSVklJGsxBC8I6de+qXVUOANgPUqva4moqU3ePDt9/+/sOH7x/ubg+HfUpJVVtxRFWl5JxLkSxQRE3FUoKYIRcCYRIos8zHksnGWNtUxUxFVAoAEJEaZJGSU54jkDfuPRt7yLkcdo+H3WP9zXq4nCQlSTXUUCii8zwf9g/7+/dpvgU0H4x6YecCeccuF3087KfpYXe8edh9BNRX1y8O13dz3Kc0vlBZr9D7gcibQUrz4+PN7afvP3349uHuQxoPaurDSkq5ur5crTfkfKDqH0O2DAaA2m+bpSQk9L4/mdjhUhPSXOZ5RMTgXW3gn+f5H3/z+/fvv0Msm80q5un+8e7m7tPD4yOaOfLb1arvkNER8arfdGGVUtrvH1R1vSrb9Yv/EXC+f/IwifMYJwP1wZNzSB4AtKghAQeAn+QstB0MKyhlVqPWpjMLvgGISECsUtLxwaCE9dZ5ZodVcEFLVksGhnOBkrHMlmMpKc2Hw+EhpyOIDJur67d/0V++Adf/oHrypD6Kz23/WsD37OeCoqtphePaxl01I7UiZoSAaFWX3JqAITMDViXkKvu4wKUVbHuKfuoHa+vROlvnn91+W9q2zqKpUvLj46f7/YMjNJGYIqiF4IPzzrfmYjCon147MWsWVKNgkbLfPd7dvXt4uNkfHuIca/WCaWlnbCQHINDquWtIJZmYWhHDQiDEYiwKBQxUrGpDaxEAZGYDEBUFBQZm9IG8Z3KU0aSk6jF3Oh13PMTVkHOuwoBQRGOW41huH2Ka5uBhvYJ1sM5Z1/luPfSrdSp0e/vp/cfffLj7Xmz+eL16df3yzcvfffn6U0r65hVfXr5x7ETL8fj48ePvP7z/3f7xUzw+SpqY2PUdY4lpHOcDdz06H3ygGsAul19MY5yncU9EF1t2nathGUHTc5Os4/HRDNartXceAPb7w//9f/sPX3/9dxfb7tWri/v9bdGsksng4f7RjK4vX/f9ZujXZmbgdvvjbnfXd/7q8qpza6bQ5pLqAjj8vw9NLlncf+tqstTnCUBB5Li7++53/1hkfv3Z6+3VVTdskSjNkwF6v0Ie0HWA/AzIM2hm33jaXG2B3MGgeoqhEZB3anl39z1C/mzz8836gkMn5uOYcpxSPMb5kObHMu1l2mk8isYY98fDzTQfS9GLVz/D0L3sV0zhx4QZPtl+np8dAsAzsK3myk01sHEwK1cPzUDATBuDSZrPV63lETOjGTLVSL2+0JoAQa2GVORoOW8AOJsGp3rD80v/FILUx2Ic37//+v2n77x3CCClgGlFPpxnVyEJMGi4y2Jmf0ocDFLM87xnlmEVuj4wO/ZdCAMQ51JKipInlRkgAggSpKM+mD8mKzkD4cbxsO6o75E7A4xRdvupHEsSkVylcICcsqPQh74fVttVxfy0YAjk/HMln2nEeYI5ypxKn3MRKYKpuCnyNJJnFTE0jVn8Sgskc4cxyqfbjze3nx53jwKxmligmceh96+8q04Dc8rzp7vff/v+7z9+/DYdpzJFibHr+i1fOacp7Xe7jykd5/XFxfZF6IYTEVOlTPNxno7zNDrvZbVR63CBWbRoNn14vHv3/htm9+LFq6EfAGCe429++81//M+/ev1iexxfoudhHXofNsPqcJzinPf7ERYdoOPhiKAECTYDXm6dq1WIUxH3BJP9t8zc/8MPHI+H3//mV+PxPh6/ePvVV599+YtuWBuJGKBFNAZwTRlvGfQAcCqPWC1uSQYtaLkGBWpVebWopjjvHm4/oMWL69Ww7r0Lababdx8e727n6THOuzI/yrzXeNA8Zk2qs1pUMMVOVdRUVfkEyC7HOdr/fH4BnCb/CYU724Gb7thTe3hDAeonGICBmFTP3Obr07J81bOaoj192DMg/vxb1k9Y1sSmQI+6cCTqUUp8fPz2083XwXdEZLXoQMzIrjpmn7SGkQh9pQjV9JWAiBwYDsMQQldEENn5jl2HHMQglpJTlDSqjAAjYSKWyadp56Y9ChiDUsAwkN8wBUZAOuphklzm43EuUZm569xm7Ya1Cx32ve9WyKS5SElltcF+dW4EDS7GYYpuPJbDOIUwmWVFB7wCWmcd51T2Y757iN4X10/d5nZziUnl9vbDYZqZh86tOx8YV1bCNM13D+9cCBRKCMNh3H3//je/eff/vP30qYOVRYyHeav2WeAuUImPD58eJE/DsHnz5s82V699t0YilbmkMU77krMaE22K5aKFgMxMtUzz/LB7/P7d73/19X/arLe//MWfw/YKAHKRx93+46cHLdYPq8+/0ItN92L7YhX6EPbTnL777rtSJMeZED59eFfS9u1nL15cX718ebnddmq5SCKsFG5ZIAj65+7ez/Ghp/1fm1T7Eyz0T71Jy11r5z9g9Y4mmuf5m29+e/vx99PhI2r8/LPPLtYvV4GySBEQkEJFDJv47TKml95vNSmm2dIEUhjUkMUIPQd2Ukoa7/f3H+5vbzSP3YrN8stX5fHh8Hf//n/7/pvfpXkPGgMVB0IgZjlaDn13/erN9vp1v/1s++rnm6u37FdmqCq22BAgAGJ1dvkRLFen3dk1WQCzRS+4ljvo9CxQC/urAOzT6gBQKQe1IcHUFMGaXhk81RSeR/bPopDze1czCa0g6PnObXmKN8fp+1J6ZgdgiETYEXovXiolkV0IHbkuhIGwa0RBQyb2vnMUiAiATA2J2QUxnFOZU8JSKKSSBikrgCPjkXlSOXZr71duPiI4QK/cq1+r6xQBkmSDeZoOD3f7HMtqHfrV+uL68vI6OE/siDGpZYPS9eP2yraX7Phs51/11U17krLTEpih72i1GoZhk+Yp5yQlqfKcfUnCcX9MYmRzSsiuD51zTEAl8Yy6g6OW71KOU953IUzz8ePNtw+H76Z8QEZHKxdC1w9dF8zSw93ddHiYD/d9N6Td7uLF2zBcILHInON+Hh/NLHQXq801aB7WVyGsDSnmeHd/97vvvvntb3/1j7/+T28/ezvPc7uTLTaQOabjcT7sjheb4WLdOUdFrZTHu9uHOCfPrgtOS1YpgIzEajaOx3H6lmntXe9c8J69D103MOMPxst/4/EDMvIfeahWL3Dnfdf1AwDuH253DzcIsh467UMRzSlno0ycBJOBaKXxYAX8iFAVcorzcTc93miegoNutekvP3e+R43j4f7T97999/tff/zwfcmTwHg8POxvbu/vHv/hb//X7775neZ56N2bl5cXF+vQd0DepAwXVy+++PMXn/1yuPh8uHjdr1+R605Y4+l4Vuf/0c5/HnGfY/LQFo5TV84CBy6cHYTnm/dCRoDG3oFmaQJP7/ws8Dj/xB+CJQuzoZGjTjeipLSf51uTlWOPBM4Nzg+dXwXXO3bEzOy867qw6leb4Aaipqjo2HnXORecc4gEqlXCOuVymKY5xiglSQXvp5S8JMvzlOdCgJ33ZTU4R8wIVjTPAgVU03E6PjweHh4Pj3sV7boeDBk7BiAhU8gCUjTnXHKkMrGlc7ay++IL2mxl6B493Hi03nVhi1z6fFh7nEwjWIeMMdHtjpLkGEcO3PUDc89sppJjGVOSEaZDeXg43Nx9evfp2753hBLTgXAaBg7oB7rYXG8v15f9qp/m4/e///X9h3dpt+tdGD/ebK5ehG6F5BUkpnE83hDx5eXbi+3L3fZydfXq4tWX5vvH4/F333/zH/7j3/7j1//lw7e/yX/5b+Iy+Z2j66vVy6sVE5Uc7+7u+o47/7LvXR+63nsyBRXn/ND1V1ebzcUmK97vJ+D7Tzf7/T6puqG7uNhevXh5fX197Zi95/NS2R9z1Ln94/3fno+v/9oSYA1vglraqH6L7urFq3/9N/9uux52N79RycQYhoAUTEFzEoPMfs5wsDxrSamI2v+Ls/98liTJ8sWwI9w9RMorSrYYtQK7AGkEPtHID/z3YTSjESCxD292d6anZal7b8oQLs45/OCZVbdn9gF4DKtua1F9OyIz/MifQAIiZvIqMg/Dw7t37374z/G8W3Xu9ddf/d2L234B+/1u//7Pf/6X//eP3/3r49NDKXG/e//z9991oT0dz//+73/ePz0h6MtX98vff/vVb36zvrkj30wpt+u717/775Z3X3NYMrdMrEapyFXf9XLG8Nmvz5/ldQl3PYS/KviffVbX2cvln9t1WnfZy1ffhwofunTpVu0lKtHAPqMJ8G/+X19uobIer4gSuMwNoK5C7Uu3YCplymkgM+OGiAIvV939evmmbVYhdMwMSCoYfFgsl23bBR/apmlC65znKkWIF08ekVJSiSk2zuW2KSZFS8plGJrdDvbjefdpnA8DJOmoadaEDgM4GVKZxFQ156en4dMPh92nMY7JeWIgKtO0M5yrSbFJUc1SSiklFUnp/qjPV31vv7Kmif1ix2xgA0AbPC+X4+1daTsmcERMzs0J253MCYDFeQhNYEeIJWcdTypZyQwt56RilOWpbV3fErOtFyihsdR0fnF/82LZ98RlOj49Pf7y6cPP+Tx7pDGeuk+940DkkaloiekUvI9DOndPxG6xffnim4mWm/2cfn7/y5+//9Of//Kn/Yef37x4/Rm00DX+929uh9+/LkYuhEXniS577MC+C2HRBlBFwCIas+o57s857E+PTy0onA7nlMFxt93cfvP1199++23fL1tYPDv5/3VR4PoW/f9fOdjlVa90RDSDtlt+9c0fQOL30wcp82n/af/0qeu3zjWE6AiRDBxax4xGKKlc5OxqN0vIaBbn6Xh4GHczYXz19lWJp19++OUv//qfv//j//zh3Y9Ri4GVEY6mVsr5PDx8fJxiDKERYGoWzXK7vHnRLLY3GJr1i+3rv2uW9wIMBmRqReBvIt1lqkhfPou61nm+UMX/4LO6nj8AsCtFFa8xxPTaz3zhGQPWZukyxv+1M53Z5wNv17v6Vbix5yPAa9SxX3kRGFpxkD1gQAsGqOy1BIK+a2/7fumcB0QTCN4vV+uubYP31YYQAaUUKaVaBYjmknKKMeVUSlGTil5DVUZqfBfcArXV5GA2K8bkUFEmLXPMKqWIlHw6THGYtGTP5j0QlpLn8VRkJjDEytwUVRGRrJryPNlzo87X32TmEnwmv08SIDvnmIPdvUo3qoxAjMymhl9HXwTVDIHYKYCKlJjLOEqeVTKWrLl6ibA0jVsvXNs6pC7Ni9NT13C73qy6js7np2H4OE27KQ8ZdFQ4HjMfd6zI6JxvfWg4UGOgx/1uv5/PU7d6f5pze/dm4mZ/PJ+O5+E8xhhLkc/DpVXb/A9/ePvKjZF9JhZCdL7xXAH8XdPc3W6897vDtD8NuzEW1TFFR7he9o1zViTGPIx5uVzt9wcDfP3m6y+lZcXD4Bccyv/+0b3imulvelr4363/P9ec179U1JQzIK829zd3Lx9Xi5IOP/7pfy0x3r36drW5a5qWg0dE4rBaNF3H7YTjnOeYchEpgIDr9RrKq3l4A/n48POnn/4ytp1HdH/6tz//+N13H3/+Ps7ndtW7pmFzJctwGs6noRRB3/jVLS9uZnXnKH0s3dKv777pNq+cW0qxIjMAEJFBVWX/8mxYTyHqdYD6efaBxHRRbLzm4M8gmcu6HgwBBarOujEaIvHF9sKw7vixhnioWzJDU6wCsBeSIFwVjavbVQWJ1OAA13Gjgl65BnDZQhiS1RWJXuE6gOZc2fjysvVLNp+LJAsHmAnP/eIWmdmxZ+eZ2qZbLVdN0wCYSB5O52kapnEYhnEYxylOOaeUcylZzBCJ6SLziEjMjptmfbPV/MaV6en0cTpORA6RCLOiRSvZTNTmjL4LK2Qzq6lujhlBEhOYEQI5AgJzoAgqKiT2DDvnXJMQzWhOipIgKgZh7xw3FMg5JmImEiRaGls15jQkqqbLUkRSspJRCpRkqZgomEkIuF6Cd1wKD+JnbAK3i0XbLVIqh6bfbe8TMObMSTRr0SyQDZSFCjoh3wnlOac0xf3TiYdTCq4dTiUsHp92cTiAzATCz56kc/R3m+7li1X0zYw4iiYFFVBQcVhCc9vf7Ltgoh+fzvv98TRNwzwj2LJrtov+5XrVEQwlHw/77777ru363/7275aLVRMavGhD/1e068+O+uei8b92cAiqF/YJIqiqmRhg0y1Xm7vt7cvzLh2fHtCwCMSY+uWGnS8lh9Bu7u9C02HniIEJYtJcEIEadjI7TyJ5PB2fnuKkEkvWH/7yw8PHj3k8OAeefHAeVZUMEXzTrEPrl9ub19+8fPN2+/qV69YxyjTMy60RIoGCSXVlxYvM72cnwS9P8+xz+NyE48U18NqmwzXBX5eSAFDRYhXhD5WHS58ZywSEUHKe5gkB27YNISCiqMYYVYzIMzGSIVY8MrEDvC524EvrYXAxqKrxALDqBxhcZMKvF6Fr/c2yedW3KwQadYpJD2Us9uSbPqaIqkzYeL9arNJN7NpWTad53O13w3BOOc0xjtM8p5hLKSoiAojO+fqLnWd2TQN904XFcvvitcXx+PFHKbtYkqoxoyJkkIxW9xntwje9q3iiIqXuNIuomTCT98SekCsmizD8atfqnvaRGEIg5xDBmLBxGpx4z96V6sINAEjVRuUigQKEhOjIGsQVXmRRKk4PDMyQGZsAKerjpyLZUBtHTQjUL5PywbXD5i5M8ypnTVliLilazpYTxoilCGiCki1JSXF2cy5lfvgLnx4M/DhGmx8XrmCLbUOfefje7EWK23kokhNzNChEyiSOcsPSdOqXj6eWVUtKT/v9PJ5FxExPedo2/IdX32zWm4/H/Mvj4cd3n+w///Gbb37Td+3bN2/atr9mYvycvJ9Xtr+Cf18n/AjIxFD926rq/f9h7LABfFFlrSgXQDWtVLeuX9+/+k0gS+PT+bgD385Zluuohqf9vm3bb0u5e/kitAvft8H5mHXOZqreykGO+08/PPzy58Pjx+E87B+fcpLD4ZBjbAN2XeiCcw5U1BpcbZcr14XVzc3rb9/84Z/vXr1dL5cWx8O77x8/fgAKanr7FkPYKJDYRZkYL0CsL5/JZd4Nf/WJXZdg1wWeAVzkdC8ekBf+QO3nCazWstUQy1fXOy3nefrl559LLjc32+Vq1XVdKbLf7+cpATIjseMQfNc1Tdf2XXDOcUUu1S3etabDqiOMeLUPMVEtYPSsg2Hnl6v77c3Y90uVosfdXIbDeTiMcZxGNBv2u5JjE5qb7e3XX/1mtVob6HkcPz48zKV0/cqFAESAbM4ZIIgCEXqPnoEcEBk5QZ8Ugg+Lu5cmeffhh9Nhd3r8lNLsmwY860VSSZ3H0Djn0ROBWU5ZsqhUgw9wgZpFaDrHgUXLPKnrPfKXl9CNY2aHBq4oghkRiFgRCqpF8HL4qygKGzE4RGJyWAENRkTExszIcP1iAACIiBljgnH080TMTdt6F5ILp2B741O7BpHqK44pY8qQC8QE06gpQkmiyWQuflGkgzlncHuAswkugob7dLOkefIvX/kQrnNc4tCv/GKjCAWsqFZWuzIWRnVk3nWtl0WQVZiXjsVH88UMDO62q29ev3j14uXdRO3i4XAaNaf944fHT79s1713jBcLkufX/1YdUHFdUu0I0Aj5vy7vw8UdpZLOri4YSARIENp+c/+VIzk/wTydTofHYZz2uwNAiHNcLpfrzbZpmiVQaFrPhOjQgUpxIqhpOj0dHz8d97vj4XzZZkmpdaIZlKzsKbRd3yx8u27XL5b3b2/f/Oblt3+/urkP7ObDUxzGMp5jinE+oUXPkoFNKtHm19u859fzIf4FZfNMSaj+FtOa9s2qHWxJ6SKHzgDMF5gQIxY2U0lxfnh8+PnHH2OKw3C32WyWi6WIPO124zirIAA6pqYJ/aJbrJab9aLvOh88M9ed/GWuYqYiJUkRUckiKqqllJTTzfrm/uYOmhYAiDg0bdcv226hpUwx4ZxSPg3T8bR/msfx8PApzhOx26y379592G5vQhuy6NNpUPLbO9cvnQ+OvSNygGiVGOg9OkZiIEJiI1cMHDK3Xbu5W738+rDfP+32UzpnZEfONx2hpTiAlTY4RmI2E6sRSzKIARAqoCpIAUAQgywo8qu9hjMxQ9CiWBfpdWpqqmBqWEQrgooYGYwMBJDEirt45RICU0ZEvWCp7LqMBWY8HvoPD106+7tVt1yz80exx2xPGU5ADIhMSN68YmegRiKQM0qxUlSzScac3ZsEOZsYqGQrZoJoXEozT/Kb3zR9f0X+N218+7vsg82jzqPFkUpmQ2fgk1nJgJFyYtJ+EzawfXfX7YVmdMDtVy9fvnnz1av7V1tou/U9Es9x8hDP+w/H3To4brsl++a5FsN/6eQjopqllFOKMc2I0HVtCBe/vb+ehP0XruuS6VItXyvmekrENX5x89I32K+a4+NP73/6/vjh5yxN0223d2+I/XA+Pz08llz65Sp0PfkmeALnqCgjSsrTMJ72p9PxjM41nruGCK2Uch4gi1vjan338v7NNzdvfrN++e36/utu+8L3K+cbB+CoRWzieS/pBEzVDpgVBa50GlOAX5F77fPU7DrWqzIapgJXEA1VPABcWDqqGlOe53kYhxKjqTnCEFzwLjhGBJU0T9PpeHh4/PTLu19yysfDfrlc9n0PhsM4zHNKSVQMALyntm1Xm+Xd7c12u9lsVsvlol/0zA6q5pdoTvPxeDocD6fjaRzHXEpKcZqnb7/+9puvvlktV5cAVvGRqOjItZ2fJgJI4/nxcDwdTsPpFKepqDr/8Zf3H25ubl+8fNGuNur60C+hXULTm2dgB8QAZlaQoGKr8eJxeam1RS2rQbvYfvW7YRzfvf8l7Q8lSuvc8vaFmh4PY8qTFJcbZkYpNo8xJ1EjZHSBWCHHxJSBWDTPZR5eZy3Pev5qF1cyXh6NUNAyXOggF3d3R4ykomjAQGRGlZ5pWo86gBkZGKIpIZC7ZMXD0e+elDI2L5p+gYq7uXycyynLjOQRiD8bdtbEyobOVNWJmpgqqFgnKMWqZ7kVBUNCL4LzjNt7cP76hrFP69sopZz2QqxgTM6TcwBsBa2gSsN0v3b9Yrl58fKt2t7CTI257nZ78/LN6836ZsmdX03ctON49B67tgFTKfmyrrtuky5Yw8uprKpVpGYV3iWlnM/H/X632++Y6P7F/Xa77fvlxVjm/1j3X42vasVgZlVm06RkiYjmHLvFumsEdNp9+knz6bj7wH7vQ9MvlqUs0jyfzXKMTb/wTQOBEQxyyfNQ/XJyySkVNgiOvfdtEwzZNct2cXvz8u2b3/3+1Te/u3vz7fL+q279gtteFA2MzbDzK27bxSaNn7oGXKX6AJCBaEUS/83SXEzFsErfISChoeIVk/9sNKiAJCJpLtM8H0+n0/l0Oh3zPIOBY2obH4JvnEPQInmexvPpeDwepmEw03l2hKAlA2CMKaWSs+QiOWVE88HN81ByjPM0Taf1erXerNqmQUIRTXM8Hk8Pj4+PD4+73f50Ouec5jiP0yCpTP+36fOzMFq10DJy3C7bpS5X0zCM+4f9PF6UNqcYAXAaxjkmcLxB71cdAyuQAoldkgB9YQYCABpV03lUMy1iBpUn129fbl+d1vdfDWPKKaFbol9riVOEaYhasveEhKXYPEYRIOdcYG9A2VBL1QoQy0nm8VxUnk37c6lylqoAjpABTEEBCoIZOFa4IMvNRMFA2ajaheKFoMkVhsGIACAAdHFYLMnmUaeDNGhd45peon3M6f2YpqJS3fxqJsgCoiB2dXq7uL2BGVptYK7KdOgAwARzhpxpLpTsMyQTUchH7+fGzdnFFICbpu0Do9PotTjDhlzX+N77jfdfhzb6vrhWOHjf9E3HHBTwdqXL2/tSEmLxxL5p2QdihurbI2JFVLJoyaWUUhC5azvngpkRU2h8ivPDw4cffvz+x+9/ZOI//OEP9ptvgwuu81fXlVr54ufRAVZhrM/lxHXGUBOlXbWJJE1xPqKVrm2CQ/LdcnXz8sV9GZ9Ou/fj6fjwPvjQvHr9IgQ3n0/DcU+OybNrCNGglOPuvQvUr5cusCGYoYFj16+2N7f3r7Yvv96+/v3N69/cvXq7vHnR9hsOnZKTVFk2Wn3KOSxcaFebrvElBAIRVEWli5cjXPyyPkcxLSI5X8Q04SLxhwCMAFeqUUVogUmc5v1+9/j49OnTw+F4mOdRcqmqPo1j56g6rYKZSi6STbXv26YJi37RtI13TtXYUduAAaeUzqdTyglNc4rj+aQlHvbU9c16s1r0rfchp7zbHx4fd08Pj7vdfn84jsOYSozzPE3jZrWJc7yGJ2AEBCui5hx3i6VbguuQ/HxO03nKKSY3Y1SAi5VYUYtSLGefYorROceCyuycEICpolWjasBKyVQFUSmZMEkpbRMWXb++e/v6N/+M1B8Pu6yQoRlinJNMY5EkhGCAWSyngsRt96XxkqwqpgBiOcs8j+kq9gUA4FIBruaXBsaojGhIBCjgHKpXRCQBBJQqNsTKROyqwxISgQNiBr52aljhEgrzpNNZxmPhRr1n3+jZTmM+THlWFUauyC0zS0VFQOxi1n2pHgERUNU0q1zRF3VYeZHiLDFK1utuVs1SKVMuUy5T0VnJkBI5T8iQPfqGgoYFL5YcmgSY0Sm3hs4McsaDFAOpcC4CY3buYmdOZqAiSEpIIjKeT+NwnKfzPM8pZSa3XK7atmMi553vmsPx8OHDT7/8/MMvP/9MSHU0xcw3d3fsApO7cgY+g8//A4jZr4AunzNoydP5VObTQNp3YXu3aUJYb26Gm5vlqo3peDq+9x+7u9stVptkAMmgYzGLUqYyD4fdp1LOvsGuc4uF877b3rz46pvfvH77zf2br+9ef3P79reruzfNYutCD+DVIKuA1OW5Xjn0xI5C6BtXHGWrKgta92V/c9c1emktmAGwekJUfrYYVKcrBbNSSs75dDp9+vTw8Pjw+Ph0Op9KyabKhI6p4SrpZd5x17TOkyNywXdd1/Vd13WOnZrkXKSQMhB5RpDcgJU5TlISgsQJY4rM2C+bvm18aHLKT0+73e54PBzPp/NpGGOMIjmlOI7D+XSQLzRYQxMrKQkWQnGkwOT6dnm3vn2dsjaLrjm0SA8pJiSy+kpdRGKq47dY7Zgv7TQhoqmqFEA0AqqCVFkQAIC8C8TNYnP/+tu/b7rVfvc0THMSUg6b+7feeyuT5JRiLjnlLIAKWETk0jsUUzUlECsiOcbynKvgcjG5mO9KcUgMiBdZxeCxv2iiCRbIBUVNwRxD05JzdfSNrnAFHtanIAbvQAoMQzkfy3ROLRRiIg855anMU86q4p0QXtS0S6m9GZqCVmHaq5C+KuRsReCL96uBAaScxykNc5brw1QE27g/TcOQ59mKGFixsZCaRCbuuiZTM9Fyjvjh8XA8jnHOkjNY9g6bLrBng2rLUJrgN8v1Zr1abZawMEJukTm0qLp/evj47qenTx+G01FScc6t16vlcrFoG9821ra78/nnn757evyQchSBH3/4IeWYJb+NX9/dv1r2S0CuMy+iqn2GFXrxZSz4BQxH126/zoZtnvLh08Nx9+Ny4f7hn/6b9Xrh2/Vy++bFm73Az+8/Hd7//McyTl99/fs//P0/ru/uksE0nc9Pp9P+3Xn/y/n0eB7OAONy5Ryt+sX2q9/8/T/9D/+PN7/9p2Z1s1xv1jer0LSpUClaShSrwizXrTexKhSNlgpZ8gbUceURqklFItlfjzaQiRxdkNLVMVxVRXIpUUrKJUvJKjpN0/F0Oh4Ou/3+fD5P86SSGdTICK6iQwZgGly4v79Z9H11nm/btmkaH7yZ1aRdUhQBZgODrgmS593TUEo2y4RwOh5TmYjNMRJ5ERnHaZ5TTiWlnOakKkzmHbiqL/5522emKeZpGMs45OM5s2Djm14sLO+/blY3ks/7h/fuL/++e3woWYqKmhFT14SuCUxY/ccNsJpZe8dYNbdEWcDICqCpSjEmBvCEAcC1Xfvm29/fvnw9nofTedydhtPp8OLl3XR8HE+7835/3D0NhwPCKaZpHuNoelXORCBARwqlFMn5s5Y3AIA7RyWCwOAFnCK5au1aCwd0hGigRRExC6hZUWE2BXbuQhB1bHxxkjU1IYaGOEc4nfR0lGnKy6YACKAVkZQkFlEtikRIqqiCUhyoXZ1Wta64Va06HJeKHVBQvejJg1nKNs+S4hcUeSnldDgcHh9LmrTkyvswzUb1rhBZZhunkz6epu9//vj4aT8dR5S8aPFm0714ddP3rVzXlYloPg/nw35zWi1Xq65b9f2yX6zGYXj4+OHDTz99+P77025XinFwy9Viueo3XePbVtr2OE8fP/5yOp41Wyn2NI1SonPgHC5Xq+Vyeal+wcBMJIuISiZij1+UCy6o2MuQDxAZCck37Jqc5dOH9weX16tu3m6HMR4P4xxBFM3SNIzvTjHH2HYhg4TFpp6aUmQ4n0/H4xwnU1l07aLt15vbr75987t//IfXv/0nbNehbZcLNlM7Z5FS261qpImICgAmUnKKI+iknNyihe6GnIfPyitwMUj4Uq6YlpRSnD739iKSc8455jzmHEtOOeWU0jRNwziMwzgMY8yxlGJWnVHrawECRMzec9eG1bJfLpdWMjN3fR9CIOJSiuaS5llyHsf5omHpKE7TPI6lxCYwog7n0xzPgAXAANhMcy4qhshMFgKBITPkrDmh42drHlMtSdJUMo6DPB7mWVzTr7jpHXvXrtvFklybBbvFZhpHZuq6rnGu9a7x3vvgfCBm5xyxR3ZYl8HFwICUgRwhAwOSOXY+tOxbAEeuWTRtv94ut3k5Tu3xfDMNml+n4Xg67I67p/3Dp9PuaTzth/NhOJ/GaUxzlJJVxFCxiELJWXKU5yBG9zQUR9h6bD14owDgQrWwB1FM0SyDuwihgSKIqahpHdQDECEHQwGAyiQVZsxqacLdTg9HmaIWKSbJCmgyySBJFQsSAnotQUvQ0qCB40xYCLOZadEUdZ6zqoCrijXOBLJcEGJSsCTU8mV5XHLeP356+PTeExATOo+AWASJ0HszztN8fNh//273/S8PP7z79PR4GE5z3/hvv7pdhK9ulq/vbtcxFwBo226a4rv3Hz69f9cE1y0Wq+WmX66afp1T2n189/Tuw+7PPx+e9jM6CD4sD33rF559CNa1s5XhfMgiyIHEpnE6SHoIdLNayG9/6xwxuGu1UqbpHOMoMnnfrNzLevgvGRIRqYoLEJJDHwB4dZOOu/c56+np8U/8x+DD/jAOwymmo+iERMHDcNj98lOc5vObD+9++3f/p5v729XmBiwPp8PxPKeUS3ZNu26acHN3c3O/Wq39YsnYBtcE30IpQPwFDaJABiRoqmol5uk4Hz+WdIqcSG7l9oZ9ixQBtbZ79nlgcX3G8+mwf3pgZlONKcV5HqcxzlMpc85JSo4pTuNcSiEiQENQR2gIKlJKNpUq3iEI1LbrVb/su8azQytojqhvG+8bVSUAWvVS0uNDPuyf9rujZAltaypxGpBMckJUlQJmFUCAiKZEnjGQ84ERr4ZcOk2QM3v/bBRjJiVpSaBO0nx8ejgMkVzjmkVo1+1ivVitQvfi6z9s3nw7xvGQ06Alee8b4sa5tu98tyBXhQ8ZkcRAzQSZkNB3LjSemdkRkfNUoT+ArlTrLCIX3NIF3y9NhSBLSdMwjcN5PB6H82E4Ho77x8dPHx4/fXx8/+G4f5xOhxJnxCKaY4I8iT0v++dkTHXChAVMDDwYMzoiQVPQjOgqaMsBUNVMRSggBIxQydsXv6jau6llZzHCNNp50PN5Pobz/vGpW3MsIoVL4qIuT8G0k7wgXXpeE5LCpDaLTLmkGDVFSbEYZtdl8gX56iGlgAAqoAX1mUptTunjh4d3P71bdw15H5GYedX4pmmIfJEyzfHxcf/hp/cP7z6dH/bHx+Pj07Do27tNQwib5fJmvdrvD1m0gkDGcdo/7cHUe9cvnpq2p9BpKXG/m96/nz49TLvTE7hI7BrsAq0Y+77xmxV3vkFpA5HDXEAjoOQ8nMb907B/Oq025HtCRwBpHvaHx2E45DIs+nXbbp276ghfpC+qtjSYWclpGs/n09Np/3Da7/efPo3HEwAcz9OcsqH6Btdrz+TJW07zh3c/zHFWpdfDV+vt2oCW2xdZACyg7eJ4AoBUbIo5plnL1GrvFTmrGTCDC1zEQCrOrZQSc55kPsfT03h4L3nwgbquLWoeP6P27ZL4n1X9Mc4//fTDX/7yY9f3iBjneZqn4Xye42SSRYqpxhSHYTCDru+C9wCgJipScozzWHImMGZsGh9Wixcv7l6/eLFaLxvnRNWzX3QdIk5TIoTNatX6YKrBhcY1u6fdaTjlFJGscd4REFHXeMfm3OcTrXVT40NgIqtKQFJMqQnoHXz2c1HVlOM8T9lCitM0nM/HEZDIn0MYuzmq2nKzWSxWzWK93NxKHst0BFXnGkBG15BvyfkKkUFksAvkh4lD2zU+BM/OOWauHid11p6LiQoRMhGTC8Ezk2MAsMWypJzSPMd5msfz+bC/e/z0+PHDw/273eOH49PjeNql6TgNB5OR7DleEVzFColgQshmsYDL5hx4NsfmEB2BI2AGB8gOiCs+kpHQyNADO6xKXKCgYoTgkNBQBOZJ9ocB8sP3f/oBqNVVEnYlhjj7Mq4lrjVuG3fbbu48+5THaT6dx6c5jjkXVUMDbiLaGboBfTSUy9BIES5dAH5eFcWYf/nl0w9/fvd6szDid1MKXft3X728u/GAJeV82B12u6OktAze3aydwHBICOx82/Xr9XrbNf2H+eFwOtM5TnOKSYtgjKLneDgNhlAMLSU8nflw5GmAHPdxOhRl0wVDCUzb1aJpNutuddu7lkvRKUnDJAq9J53Hx19+SlkhLMkFBkzz+Wn38XTaTdPp/v71/f1vum4FUGcpVc0OEIwIUp6H8+nhw48//+l/+fHf/+XjT9/vHz8BgIBlhQKk6JquyerW68Xq9l4lHR4fHj79dNjvv//uxZtvvn319u3LN69u7l/tth8+/PTjT9//ebffPR5T5ve3X39YLO5ers23HTohH0JYaecNIEcR0TKPafw0nZ/m4Smed/NwAIRuuS2CWdTXZcxlXvuZeXe5hvP5P/0v/59/+U9/vH9x33ad5BJTHIcx5/niAISYSx6GAQFNS/ZORMyMCVJJp9MhzrNJ6boQ7m43m+Xf/e43X331FV9OAiOgSBmHMU6jC+Fms159vf77v/u783n8+P7hj3/84//4//wfT4en1WrZt23ftd5R8E5ULmpmIKZSJxXEF21xEU1JmNU5c/TZPAFEdZzm0zAUi/MUcymiCmYqo8xJ4mRlSvP52G0X681mu16stmHzUvM8nseszMYAni0wVs1PdhcMIznngvee2TlyfDEUuwQmMRFRVTCjSgO48ADIIQESh7blENq+X67X2/vbV1999dtxPJ2Px93h6XH38MvjL395+Pm7h5/+0jf+uTOVQ0Wow9oCZCBVxkBUCQqDY3QEziEbOACuIsqGzMAAzgEqkiBd0H1VfwJywZwgZ5iTnseUx/2f//yz0nL9VrDtx2jD0YZdn8cWSli23qembZpSqCS2yJYmSVlVCIA0AjdmofBRcFLICkYGKVlOJuXLmDwXedydPn3cLUSL4fdPp3a1fH2zXS/VNM0xzVM2sWXbtETSNyj28eOxIIFBTOV4nCTrw8PhcbePpjlLzjmlPI5znOeiEQlC470qx9GV5AkQbYzzccpBjAhTQ8Jsp7FZd3fU9o3LXCaCpmAWc4Q+TWn38aglu4VQAOM0j4fjp9PpMEwDYpvzF7rll3am5DicjvunD+9//PjLXx5++renDz/Mw9HU0DWhaZeLBYXeqG26brXsttvl3d1adW7a7x8+vnt6fDqejsN0nOazC3yzvVuutvF2/vTxozwdno4DvHt8/8v7+/X9DXjLsZRBQsCNhHZFrc+O0hzj+TQ8/nB4ep+mo+aExKFdhmbDfpkKcMxyUfH5bA355Uopvn/38/d/+VOMw2KxVJWc8zTNItlVWQ4iKTLPMyKYCTOXUhDAeSqSx/FcUmLCJixevrj75uu3X3/19u3b1xVi7tlXeM84nEtOxOg9r9fLRbfMd7JernOK33//nWleLZfLZb9aLb1nqTaFWpXxCoAgmWnJJZaSixQEVac+QNdy2/DnA6NmU9Y5ZURjkC5Q9DynUrKaJCnZQGJM3MWUixnKarXoPCGpM2CnRqKIzjEFZvbsgmPnvXdUxwBMSIyMF43iC2wKTA1ETdXAFIsSI5dqNUnEwMyISC6wC6Hp2+VmJSqlTPN4PByeHr9e3r0Ky60BLrb3xP7zV+NQgAwIwQE4rjqCBmKlqCKaB+O6qUFRAgJLhqzeg/cYBEUsZ7ygJAGqsBplHSeYZ0tJp1SG6fxv372bdfutrLrl6jj2h33efcxxnB3bqi95SqtFGzwG7zbdZtGshinGmIpEKoGn3lKfFGezSKKUiFBF58lS+lJiitoY0zClPOVU7OlxaDLOsZSsMmfJElwT1r5BM8lTnLKU7bo5R0kxfvjw+C//65/aEB4fPj7udrvzEUA26wUaDec4jlOK03LZvHmx3fQBji3sgtJpP+Wi51KkNeeRnBLMkh5P4qnt6AY69BgNOrJopmZNSc28A4pJ/aBhzj4mmabDFIcppjk+s1u7SMkAMs3n6eefvvvpu3/7y5/+5fT0zluCHEPw3WJ19/Lbm9df3bx6221u2XfsgnfcNk2/bEseX7z98ee//Kv95//54f1PP3//77vHD/vdp6+/+d3XX30buna53bSHfd6dDvvTp5/fP27u3y63nZbp8RdDCy9Hf/+mu38r7IfjfPy0e3r3b4/vfzLDrt/evHi9un272LzqFtucSGXOCUWuIGT4leWNqqY0j9PxdGykRK0tRIqm6q6adxWWCgA5p8qGRADnSEFTnJ2j9Wrx9Vdv/8//3X/3T//4jy9e3K1Xi7ZtvfOMdD6dh/NJRdVESs4xTuNo5VKM3Nxs/+mf/unly3tEcI66tnGODQG0LtSLWkFS5yHn+XB4OJ0PNqtQcR46IIB2sWzZfXajxaRUzC08rTuvm84BfNxJElVFzUVOJxdzSDHH8XzYhW7drW+Wq8Vm1fRtC4CgxsjOec/UOG5C5ft+0f8CqDipqkmAYASE7BCRVLWIFlUrAkWqXDiaMIFjYnY1GgBWHHJoiZVb6Da0+QpXryL41ZsXrnnm1VdDdWVbMyHRRQxeP/OzCM2gyEUDUVGRoSiIoHqUApftoNmlUiVAglTACJBBQIc5vvt0QIdN2yyWfn+Cw173T7NkWC40AEweWWLxZF3nXPAuNA40W4qxFIPIgG22laAJY+bROBexedY5faEnXzVequGUxlTiMH942HsiBGRyTdssuna7DN5Z1OxXi0ThMOTler3erBSpADSLfqVSUAD19nZtBqIkpgiyXi+++ebV3WaZd6fBPz4NSYE8c0PiFBmxarfmOcXjkB+dSOLGEWJICgpiwLkoi8xTpJCpR1433rdhtYYuFbm/u/He/U3mJ1Gprf54ftISV5tl2G7XNy/a1e3bb//x/s1vt69ed6stuwap9pDsA5c89+u70K4VyIf2w0/fjePw41++G4c5Trltuznnpmtv77ah6chynE7jdGqwxDiDFtg/UdOF2xfUNU3rnEfJMcWJuGPfL7dv13ffhnbLPpQMJasJ6edi/29W/YgKVqTEnLGCuAgVLzr+JiIV42KmIqYqYMCOmVwT/KJvNpvV16/f/P3f/+Gf/+kfv/36m8Wiv0BgTXOReZ6naZymIc5znKZffv5pv9sRsirkoufzICV75lJyTtkkX7xIFar5AKC6gE3w4Mh78p5yBm/ggBx7otB14bN1ugJksVjUgZFpF2jV8tHhhFZEihoCcC4iheZofOJmaKY0x42UZRJrhHvq+p4b71tHgck7dowXK+EK+NCrSAlc3ISqVREyVldyRVAxVRUTUDWJDMrOsfPMgdgBmCHWW52STuIiL6S785tXYbX9Vea3S6F2ZTOxVY8GRGOHPhAilgIiKsWs4nsRSwIooILFoXNAFxPXy/KvcqybBTZLQa9J9Tzjbj99fDc0IT8d4mmIKca2CYFdH6BhRUlTkpSzIIa2QQOwOI/DPM9o6BhCWPgQEFuA4yT7Kc/nCYZonwFLTLTo2vWy7fugXHzr9mn+419+enw6rperm/X6xa3fbvzqdrneLrhvXwl8/c/zNBUw8i60i4X3DJAk55iimTaBTufxxx8/7vYHifOrV9u//2//7na9On86/WLh3XcfpqSrpgVFmzKhUOMgYCYbYnz6tNPjCZ0DpKyqBmhc2lCmvtws59XK3axv7reb7e1q3YfGGUjbb1aLi4laBTsTEAAS+77v7l/ckv2erLy6v++6PgF1q/tXb/9huXlB3iPzZbV5xQ8S+5vQde365ubV26//7k//6//00w//9vDhlz//6U8//vh+sVje3Ky32+U//fPvl8tl3y5Co2M6Om+w6EltSkmORzifnW8o9M3qtt++WM6RqF3cvV6++KbbvLY6cP2sf33Vub2g+6/fCxG2je9a7z15RuPKr20q2UlEY4xS5EJkVDNjRGr7ZrtdrTfL9XL59u3rf/qHf/ztb7998/pl33dSSpyncTjnVNI0H46nx0+fDk9P5+NhHMZffvop55xSybnkLKWISClSpGS1YqpqoqqmlcXHoaHFsr25XTYtiUYmC64y10AUmbVpHH9WYVArRccpRysOLBA60kXAiW3MMWZhYhEtxTioD5lBDfOQj4envl3d3L5+85oXt1vqg28DO7pgt+XqOwpwWZJeYWCXcvry5zrhUGQCUaxPJSlmFWNHTtHVZFGy6JhlmPMwjMMUxylNx09pmi9go8+H/zMDGyqQohrBISJWSRY0Q9XK0gerkgmVlnHZvUOpmH+qSkhXUpeC8+Bb4AaMdU7leJp3T0Nw8XCap1wQjXu/aN2yc61HM0kpas6KEGJDhPMch2EYx1kVHHPftj6w+dZ8QYpQYo5zyfh5b+kdv9wu082679xs2rQOSnk6DinKabI5I3u33ATDpQ+uWXQth67fpDHPYwSgdtGH1jPlK6NMTUpHXm7KygfTfHe/uV2vF30nXUH2Q5Isuum7rmvSnD1Bt2hC44hIHc6MxwJlztX/Dg3JGBOA46g8JGqpX2yyoSmyIKvar2QWnuV/x365XOe7FwQFLW+2m8VyRf2qX7++vf+2aTe5ZNE6q7mI/qgqIIcQ/Lbt+3Xbr9l3rlnEuRyP3z98etd3ffDu1csXX331zc3tDSj60GQtQ5rZOULEIiVmHKemS9x0vl1ubl/nVHIW5xtyDbI3kaqOXb/4v7336+GntnF969tATajwQHLOeRe8D2iYS1GpphpWcwezWyzam9v1erNarxavXr54++Z+tWxLmvbzeZrGNMdSSprzPM3DMA6n43A652lM0/l8Pk/TFGNOOeciBsCMACaSVYpIVi0qZlqH1hwTl+xLPredY4/EagqIDGqmdPn17OGq0b1JIRMPWlIuKUKOWmYtCuzVzIqxVLlUsRxhGhL2MQly8PXVY7Rl1wZfq324KgbWzA/wTBHFAC/SI5ciqoKjFURBi0ESkyIlm5EKFjEsucScp1TGaZ6HY5zGOI0y7GA+ot09n8U6X8VOHAKbgpkAKl2+NERMV/VTQCYmMrrIsBsTeIdIKGag1a3JsN6qcrVCYm9NB85DHnUc0/k8tsGJFmYMPiwX/Xq1XC177ymlrCYxpVQEiQQg5TIMU4y5qCLSaYrekXMQWg7LdQd2zjMWd+UQQxvcb1/c+Ve3Mk80x67znahDTGoPh3EuZqyu1e2ag4NmToYuzTqf59PhzM7fvXzhll3R+QL8KKZz5Cm/An+/2ILHpm/sMJ8P02l3PO/2p/NZRG6Xa7fsIyE5XnryjEjUOKKGc5bTboxTLKZgxuiUXEk87st+t29Pmi08nKaCPqullL56/fb//n998aJZAFxZpmYgwOyXm3u1nOIpjU/n4QkobxYL75yozSnnHFXlmjjq2RcAKOgQCblZ3bz9wz8vmm6dopSMMn/HgA59G1arxavbm9foPIBKjtMcESciZtcIoc6pDFNrzBxuX7wxKU8f3ksc5vPOuYa5Gpw904KGyz1c+YiXzN80ru9c31BoiBCJkFG7jjabTdct2Tm+GvGxc875EELXtYtl0zTMDI5pv/tw2L3LKaUUU5y0CAFpQclSSqlgAdLSBwirVhdeFIppNhVTsGKSRZKVIlLADA0NMBuImojmXPZPjwAWWt+0oe0CEJYiOUtKZZ6+yHebadXEMoN5zvP5OE9TimWOYqIEaEam1XPQSlFOJbgY2tx2yOU4POC7nOY47U4vX796cbvdLPum9Y4vWhh2ZUVg7dsvjB+9HH67CpxceIVkGWVWNxfJKqnkKcucNaZSUtGSIE+Ujj4eeT7qfMjxSPkFPFuPO8/Ve/kiqmafiTIKhkAKyABUO+mr5NHlRi5qM0W0xldCJENQJEUzVDMk6FrftXA4yzinw2lMrQME8oGYmC/MhzSnOabznGLKqrU4oaKaYr5YfBpSFu+5cZ7RceMCWiOTlx6vL59jXi/646I/54gIjecuOBDLSY/jMMQZIHlKS5T56RTYg0JJEsc4DKPzPj3u274pkgxNyZMYD7Et2hN3DUMfbKThk825HIfh+O7DfB6LKKgZYnROmDOAUwCwTkEFncCQdY4SRRSRPaADEJhFjymNdmyaD/vDKRaLMU/TbGOO/5f//nnOV9XajrXdSkocVzsrMY1Pw3loV1ObYk6JoSmilWz47PBX+/OC6AidC92m63LOX3/8OJ+HPEyax2XbtqHpmmXX3bquM7B5Ppc4aJkFTJFURMaTEoMagIbQNk0PZnkap+Mjs+9WW/I9PPPt/UJLenYhgnccPDuHjoyr8R5C17jterne3ITQXMoNRKLavYbgOLRMbGZFSjqNp6prKyWbFkL0HAicc+Rc0za+GnghAoEhoBEqYkHNUiRPOU1pHiVHFQUzAjLAAiaApjSM6elpF2N23nvfhNACI1EByAjeuwXilwB36WwQFWDOMs0px5hytXrhi8lc/T2iUkg4qyoDqkmeUkoxl5hTlFJiyrfbzWrRNcH5i7X45T81BLCLxdhlMlEbBKxfMSaxWHRKOiWNcyl5TjmNMU9zmudU5gnS4GReUAwQ2UajGJ0EBnpe9nu+mGqjIBFW8XNFq/O/wMgOtcKttL4GF6l0MxAFNc0CdSuLFyS6UiX3gJnRqmuHzh3hPMWyP09zcSH4Fl0wSFmOwxRznmOaUo4pSxE09UxNE6o4EFVfCau0QWZ2hA4ECbDX0tkNXdFyBpiRMpAYIEBAbIhAJJUyTGMZFPJow7l8PGydD6VQKRdTQzAi/tk3jkhAhCkHHxS2Y7w1ednwomdYNIVoijpmO4nsT2cZJsllvz9PU3qHPCAGMgfACD3TjePe1I9zTvmYNTsfVtA6WoAQUwhgkvefHv3jI6tIjHmaymYBn+2TP0OmoALqfdOu1qvXFsvTaRzP2e12yCvn71rqr2BsrSPZz9Opi0RFjkAEii60b775naYZ4jifHm62fdcgIyI6F1YcmrC4uTgmxKFIznEq9iApYo5EnMapxJxjjjGd9x+J2VWS3cUggC6wJDN6dgtwaSqrpzCoGaoRAHnnveu6tu9adh6x0jdLynmetTafSMAExMBUYxj1bd+E0HgXgne+qbNhJk8ETMgIVOnCV91/JcglzfNxOB9Oh6c0jSY1udWZFnDTLPrtHPX9+4fTeYS6P2RSM5Uqr1tu7155H34VzogIyYfQdZ2WXHJSExVSALCaJuunT1rLE9WYsveT76Ygc7J0KFHifD6P55cvtzc3m1W/aJvPUwAAVSl19WOIQKw1WqmlIkk0FZ2LpCIxznmcdB4gnjVHy5nm2Q1HOe/y6RNCplXX9m3TOmhWTfCLxZL5SyBzUgetGZDAGBFBrl9lzfUEAGD12GslJyFIxXIgqoEIXjh4ddOHRlxnA8DMi7ZZttKGFOM05VJAGwMgDiHHlI7DyDNPc0oiamAiVrIjKCLsuAbBSv5jQsfIDAimGVRBZ4bEeDVhS0U+HYePx6EVcYSe0AFoMctSNIsqKsMcx1isGE4TlGygQADMhHg0RDUFM0fYNS2ATZFB+p544fyyLYbjSQ5JjwDnXDQWET2dx6fz9EOxoxqjMaAnWDDdOtqgrURN7VggN9YEL61rrCzYNY5EIY5zUm1QqSQ/zy4nfA68vooHXJD9rl0s7yxriiXHc/V4UTVVrUjA55X2RT780jqIioKhc+H25RtN87T7cH6gxquji/AGU2iaFTdONc3TIk6nNI+pRFErKc7jichJUaSmWWzBJXYNXPq7y7D4gkiuheFfZ35y3vum4WrNDoYXbwsXgm+a5qKoQaqKVOVmrYpqsfMcGt94F5xvGt+1Td+2XduExrML7IPzgdlVoRS+pH1DAKvqgISlxGFYHPu2YZzH1opWARFFxIaafrHdvkgJgdr+NOiFhFeRqpVXXzabW+f888ept48Q2q6r28VcJBdVleomBGQAVXasHlrNuZQiCgomJZcSY0kpp6SSU8opbeJqueibNniuFn/1yzUTALlg6iGLxpJTllQBCblInGwebDrBtNf5HOcpjed02sXzLg+P6kndC2tQhZwPy2VYLlfEXzZKbpgLIBgZOnRUdceNq/0hqBpVBNTFGf4y1Ae42JEZGLoapulKPQOoJivE5H3ofLdsdbNKRXVOKYqUOQFi8Bw8EQKzK2IGQMRmEFMZpQxTdN41TeOqBxLXNQwwFTPJyXKch/NpGkaRSw8zxvTHH97//OP7f7xtO+c8MRqUJJKFmRd9+Opu+ca7xSg4pgkhV5zyJUwjABqaqgSDLWGDAAwZYXLQOewYUMBAcylTljmLFlCRoeRdzg/ndM5iSIjoGVcOiyNxpMSOOKI3Ns3FSkbjjvE2eFD6JDYj+uDYJPhuuVg/j8rXCoDMAIsicttvne+a5Trn0SC50KELUm19Lgibv5EYrbFD1QSYeLm+kZfT3cs3VEaZd1pURFUUAbwP/WqBDru8jPMU5zHO0zxPOaWUCqISt+3y5tYvRI0p+NA5twBzSBeSx6XWq2Bv+IKKQ+Km67vFuk6XzIQIyTliT7XF96HagcKV5ui9877pu74ydZumqdA3JvRMjXfEbEzAZMRISGBgRheQWbXfBazytYDZd62PqVuhkuSSk5RcgBDZO9+ha9nAN30odMGOmtQWBABUZLFaf7aBh0tvQo7ZkLDvqg+SKqQ8pJwNxFDJe0BXozEiIJqiU7RUrEwJp9KkhCYOyhGy5DnF+3G+Wa7Xi0XftiF4x8hGliTHXJKkWCwVzUWLllJES9KcLUeII6UzjPt8epyPj+fd43Dazed9iRNIXq5WaXs7JZ3y3Hd4f7tdLle/yvxzrLAMQYfiwDlkd/nmSDFXd2SzSwmqaMIXkUMCIgQCvmirgtaSSkEBiZznHimosGr2npuGi3EpFemgoppKoZQcqwEhohrkVIYplpKd4xYg+EBIzOS9a7s2BOfJUEVzmSUP4zyOX2h9pcjheDweT7ZxznG1eQU1T7xqm80ivFr394g8T8l0roIhVfhdDcGALnLRqBqKeIJJZSadkQoCozFZgxpYnQEbeTNvwKoo2eIMWX2FXCKKYJRyzsjOeceRwSlyYV+Y1beEL/rW1J1yGWY7JDQzJ34yp8905q1y/PGSwWt57p3nthHJUmYDQHZignadD9Tf9mvxzM9RBIl903WL1XJzMx2WQ9xLrrMrqwCYpgncBifBtZ2LCzfNNI7zNMR5MDNwreOw6L0ikSIBEzGiA7yyvuDKuP518vfeb2/uXrx644nAVCQjGCMuV5v1Zntzc9t2nXeu3jYzO+dC8CG0Xbvo2r5p2+ADU4W7KiE4QiQUQiVQAkOg6leuCqY1s4MaqpKpEQXn2qbRfuGQSirZl5JFCKH1rmnFMKsBe/YXY3m1UqcP9fCzD88/T7peiIDea9t0pc+lxJilSCkmWi4VMrjP3Y+hIoARKbCpQox4PJhKESk5lRhTirmUrNYDNegcghnEYjFKSjnmkouUUlSylYwSIc0WR5uOMh7S+SkeH8fj07B/nMZTnEYRQUQX2nEuYQFt3y6W6+3N/Xq9de5Z5k/5GtMKaBAzvChGGShgAbt6KiupVe4xEfuWnWMKVYgLTEEzmUDFTYIaU+jwflJ3Ou0Pp5iLOKY2eHMOwbx3SKSARQGqbbOaSBqn+TgMYLpaLrxzIfjWe2Jsu2a9XXdt8KhachrHaRrHuZznVK6LfgbrSXrM3kTFcslFC3ta+mbp6Kb3923TF5lVUs45i6gi0sVIEeCzVICKxjkX1CHG1kMSlsohR9s41Y6yC6WAnDPM5osl4dvWtQFu+zb4EJ0TVZpnKWUwZTUhXZi2ikuhVtqO/GaxEgwuzedp+nF3HmJq0JanNF+f5Tr3Vau19MXdPQEiEKNvvWvBpIio5c/F/jVa0DVFIV6p9SoKF08K9k3jQiMCKRcRBVDiC0GDGZXYERp75A79gpoex7aUgtAYeaJASKREhhcnvKrPAfIZjXpRILoe/xCa12+/jiV7IlMtKRZJWsrNZvv27ddvXr9er1Zt29Klmma8Tv4IHZEjdIRYJ0oX915QRGQGvIIbCAj0ohRIdYdfcQMiCOIdda1nXLStL0mkiBQQwuJdNoxJxilnNUOupoh6mbwTmBW18kyVqOLIiCrJGZSYnfOh6bpeshDg+TxZUVAFEsI6AK9C4YXQ+yaga7OglHIa55jLUtRKsjhBHtHUiIoPHj2hqmrOuaQkKUpOUDKW5MqMeXZltjikcT8fH4fdp+HwOJ4PaRpLTiIK5BBdMZgFz1NcGb58+eqrt2/ubm82N/fPWxh3ebKLJB+IGSmIXnB+UK3bCRAI0bCKHZqyYVEUMaygADO4aIiAqIGZZyRroNA0lvMwx5xVLXhHgAZKSKIWUyliTKVWfSnlOcY5la7x6/X6drtatiE4RrRu0W23675rSEscxzhOOUtMkrJ8XsM0wb19desP9zfbxSzadbHNCh477+8W3U3rbz3YMA2EkUAdglwHxIgGWN0/jMgQEgMBimcMFNqm7dtm4TqCwIkURteNGQaYO8ZeBBydFWe1l4uGfdgRn5KkFBURCL2jpceNhzsHNwSdGWY5jDEzZGJhHko5ptx6mgyfdfyfM789w8pf5Y2QCCu/8osr1t/k+v/gMgNE9m3n2w6IRSXnOaWxlKiaa14lIGY0JERDdshoCCkmFVZgQAcVe26ApngRtPys2/crSs/18IeXr14bGCGalBxjyjGnuFkub25vb25uNptN37Z4zaiXGFJ//HXJTVfrTqjdRQU0iIiKmZiqiahkUFUtIGqipgVKMckGSSQhFueMgMyzGRXkxJznMqdxnHMqVlWKDFCBarFlakWh6F8/0mfWFREROed927aggoCmRnPKYqLFCNCYmauOmRbSPBMxcgeAOaYyJ7OTlFzmWUoSxaw6KYa1NCEQmKRZ4yjzCHkiyVgixMHimOZzmY5x2I+n3bB/Gs7HOc6lKBAheXYeAU3EN91iudxs1re32/Vq5Zie6ywAgCPSq6YQIgPiRYuJL1DZi70YIzCDsiqIgSlIKRKjigFyHf0iKmUxKWYAzJI1lYIpxjjFISUkWHStc6ymohZzmWJVU7h0FjkVNSXg5XL9zTdfv7rbOhTQoipd395u100b8pzmKZ2ndBzmVPR5nbxY9P/tP//DcckbzE+n80ul2Awx6m3X/bev7u68k8P5MZbYuKlzQEBKBmbE4JwiSi5gysjmKHnnCLz4RetublY3N/1q3SzZeJp90icLbq6dnd8iLKcE4FLKt42LxFFtMFEVD7YK/r5xLxp/0/l137ZNg0bpNP3LcYihw9W9b91qweRcE3y/DPxsSm7PtR0v552qBRNY0S/dNQISfHGku6T9vzqBl9fVgMj5bhn6pWsaK/MUD8Pw1E03fdqKFrsqajo0cnUQwioBDXPULAZVV//Sa2m9o6o4UCUpqe7Bnt2C8/7u7h6JQFVKzinmNKcYF13Tdb1zrvr8XGW8rwUEXHx76uyyDuCq+H9do4nmLCnLnEvMMeWUJEeVoqogYipggmYIAlgQFcAIkdATe+KASKqQxaa5THPJUm8bAEGBDQwN6k+S54KEl6/FPndWRMTOhaYhACbHjO48Hk9jiUXF0EG1xdWKvEmR+3VYv2LXUtOWmM5jmlOeU46lxDkNw+jOaXk/3d3fLALydJbxgMMTpJFNrMxpPE+nw3jYTcNxns55nkpJIorkuHHgG6DAzhmAK/nmdvvb3/7mm2++3qwWKvHxcRc85/QPXw4/e0RDvDQxl+CqBqJoClaQCdmcI994j4QZRUGIcgaRkkW0CBQ2AyUzFRMBVYuUh3hOSqHVrqOkKAqERgSgIGC5lJwrVVERQc1KFudo2fm+bzfr5Wa9kDyaGJNfrBbb7YqdP5ZTKnYY4v48jzEX+SJN0Hbdb/7w+3np6fTkdvtvubPl6eE43vT973731R3ix/TDObjF3cbWvVfTlMZhmrMkZDEw5sbRetn1feDgQIudh9ZRu100t2t3t3YNt/PsTtP4ND/l6TEbG75ofEs8Z41z7l3dVyUpgmo94evAX7fulaeOsYhAykwuxvzTMA5+3mCXiOM855QQrVQ9l//ChVBDcWV9VIlcQ6gOWP9Hsj4AgKkQUdOtuvVtv97EQXMah/OuP++n4TbHG9+0F48crMkW0VgkmBJoMVC4+H8DgBmqAvC1X3pWonze/AMAMPNyub5k6pylpJxSyanxHEIAgMrhqxHqS+av1cWlmbnYZYsUlVLl/IvEnKeUpxSnaZpKilqymVRakZkiGhMQG7ExAxEyOecahy0QiFnMFmOOuVQc4EVwvi4r60OhAVWxnf8omAISkRE55yqzg5iQzAxyziISU9EC5hiQ0FRLTnFwkr1vqTUiQqJUcsmqpqXoPOcuW4dtcEyt+hZtPPB8DPmsaZA4z9N5OO7Px+N4OkzTmHMSUSNG17BvQ9OHrg9N13YtmJV53KwW2/WqDU5zGufhdDosu04kfzn8IfBFHK3i+AwI0AxyxUCQeiQC1/nFwm2b0KgzxYIUC6QY5jHHc5pnkVyDZyUdZpg17eiJLdy8dKxL/sDjkBBE5SLqbiJSSixS5UTrZpWIQ3BtwwSllCmOJyJoV6vVarHZrsxwtzucpvh0mp/O02me5vyF2eOb9v7rb3IfyvsGm+7vty/849F+/Lhqm/tvvt5Kfvz+p9Dy29dfQd93nqbz8PMP7z48HMYpJ4EQ/OZm9fW3b+9uV+R5Ph+ffviRSoSll+0ivXoZFwvWfPrl4Yef//THj7v3h7QmenXXbxw3fSPsYrFTzsekcxQveBvw7wP/NuASyxTz9yeLPmxvLQGeh7hDjW4/qv307nFKqV20T/tTfuageoVJ42VreqF91LkEwnVNwYB6nQt+fkXt16kK4Aof0QKI3WK9unk53r0eSKWU8XQ4Hw796jCP96HtMXhGArokXTMXPJgyAAMrFhMxqR325/7bLur7drXg/uygDQBEFEJo2w5NNAQpwZpipo7Aey5SxnGs3jC/mlZiFc9TqGrAUnKO0zjO0xSnMcWp5KmkKecpz9M4jiq5Dgl8ZcZilT4i55EdMhMzqvNqdSBvWWUccZxURIHQsUN0Vv1RajOFQEyeXPDNM9wCVgNAwqpFyAimWL1r6k7ctKiUomolDSWnTITsmJEI8jRpycWxl0R+zUSOVIummLPgKeOSp2Y7hnTsxtwWTeOJJPWeitrD/nh8/PT4+DQOZ6vSgC6Ad+ZabLrQLhar1c1mfbNd3243CHba70CylXx4ehyZ1CTNU7y9f+6e7i4NDgAA0sWZpxY/UFGFClBUM2rOSlinAw4JHTkCj9gYBoZSLCsIoshl6ipjHL1JWPdbaFzwwyHP05hyETEEU4emaIb5Wt8SYxN41TeLhkFjmjXGMXjHjMykKjGW4/F8OJ7HKc6p5CLPRYmIuV1vvOb5tOskv1jyYPzLx33jXbtogxL3jiffrXparpvGKTMvDnaacpJs5r3jvututovbjSCIie9bL+jWC1gvY7+Ers8lP+Hh45A+HIaHcwbPkpjQk6mCRbVRdM5axDqkdRu+vl//dts2aE9z+WU/D2oT6IxEwQXyzgEXZaxGK0lL+rXu5V9fdsFYfT7m18rzb1rt50f/2ZdtZkJE3PT9cru6eWF5HA8P8zSMx/35sBtOR9/1jbt42dT8ZmAqTo0MEFCRKv0CzbCGHbhyzuFXmf/LhQCM6JkR0JAETBHNBFRSSmnWKlNRZ5PV6aiKeREWIEXUqnqb4nQ+n8fhPA9jSpOVWUu0kiTHGCOaBO8a78U775mY2ddNH11IUsBAhkhFWBPOpUwTxQimVTGNLh0TABka1b7F7Ato59fXpVomMKqdGAA77wGg1YoB1pzyNGWVAmqKaCoqBUUgnV0TODSEXlDEihVRtaLQxsnLuZPQ5ei1xOFITKvNXXG2e7CSYpzGGCO7QCGQa33T+W7ZLtfrzc12s75dL1eLpg9eSnIS4zyapDTFgmgqpaSS0/O236VSAJEcEhMROgLHNYITICiSgZWSTnaI4+Qmh8reXODgOXgXHLYrWC5YjVUgZR2iRnY5mYiKQYZm7l/4ly/v5QQPH572Txdl5MCuDZSi5OpXaIAIq2Vzt+2WPZc0Too5JeYW0HJOT4+742F8/+7T/mkvOTlERqKr0ujnS8CSaQFjD94pl9kspnTKDLQKODdTzDGfmHkcx3MqGQ3RiIwYFGDKsjvHGFMaJiH23aK52fJqlQCnIek4ftidT6c5zUlLJmKOIymXWWKyUWguYgqETAztZnH3hzdvvr4LbQhjfvp5p4fzKc2j4c1icdt3/WY9JHVkp9MIWLZtFWa6nJhrDrTPaJ0aiC9S5Zd3szre0oUF+lk5728z/yVUXNZpbdMvVvd5PE3HfY5pOu+Hw+Nhv3P9krvWQwWbmhkQITtwBgbeUAAFsLpD6aVck8vM9fKHXmS8nkkTXLpBxIs8s5Sc01xiKjnmlErJ9UopzfOsqsyu0qORhaqNhWhKcRzO8zTmOZpmguLAHCmCgqmrxgeg1c/TUcWnGZoR1IHIBTWopnGKQ8xxYskOzNX7qrHVKkYYkQjUrJQiUn4VlC/oXQUgZkJgEamwBkR2DqFFMKxFBPEwDCnFWYBKKaDInoOzzhcKZVacNJkUhwimZNDavLLjylwDCxTJ00Bt1y9XYMvu4SGE1vvGKXKzpHZB3aJbru5uti9f3L99/fpms+4cp2naf3p/Pj6V+WQlUS2CANVUpaqkPyP2zKMhGXl0HtAhMSnA1XjvUvkpWoailkgJhZ1x0saXxpXg2DvHTMTmFAA1K6hQVSLwTC6w771fNgFbBlmxVp1w8wjBU/FQCmZBA3BMq0VY9c6hTsM5ERJTC4AIKaXDYdg9HXePT/M4Ipi/ULqfLcZNpcSiSR1Aw8TYBFx3hCYpjmfG6HgmPk3zOc0KGFMas4BzTdcEg7ZpfAhJbYhlGrMm8L7D3ku3TL6JBXKM6TgeT1OeExRhhLb1m9vtNvjDcToPUadSQAmU0dA5WPT26s6+fYOLPkx53fTNLw8//PJhLHK77m/vN/cvbxTo5W1/Ok8m8++/umnDlzUMPH/dKtn9mT7W/3aX/1fzqWusuIQAJGLftP2m6bfsmpKTSs5xjtMwT2MX59B6x66ef0IgQma0K3EPiURq3FERrdUFXkCgf3MDAACgpWhJgAgqWnJJMc3TPE7TNA7n8/l8GoZhul6q6pwnAoDCzkJgJNAiJcV5GlOOIIJgjjUQBoeOgRADU/JSiiveGhEfnBMSASdk6pwxOTTTIllE52hjormEIuHqv1UujqjV85vAqnJhzlLSX0sUGKgqoVTbkRpSDYwIDdnV3wAmplC59yKSipYCBtXHoXHovUERRjHNagaYUcWL72DVc+kbV7JN4zhO8dPDDomOU5mUCzXgGDn0i9Xt26/uX754dbu9Wy83fUsq0/Fw2O12H9+P4wEgIwI4BmYkqmjl5zU/ALjpcHHC8cGkAXHmHJIDckrVCZkAwfhiPoGmoppnm0YFE2R1LTYtNEEaNCqiZoBkHfkFrxtuHTMQz3kG437VWYJhSCkVJWWEAKjGRZCQOh/6pgkOUkrzFJlps10ROwRM0/zp/ePT02EeRrTSOGqCu1jfXSOZqeT5nPMAnWdsUHLb4KsXy5JSSnEquouwT/B0mo5jzBcTN3Oh2zQ9M4cQ+q5ToFhMzYHvkFxp3ZlaKZxFypRlTHlOXEoLkBq/ur99+8//zevVIr97Sh+eTh8fsRwaLIZm3o1t97FfbZfrRd9qqw4JUvnww8dpktvgX7xY/8PvX65Xi2HKMWYr8f71b9aLFq5nuy4hqyPYdcJ2Oc8AUK3C6D+sSD+Hhmcn/3NHDVAp2943q9CuXOikxGpJhCqW5jSeY2B2fSBfN26IigRcx0EAyOAUrvV5ARAzg3IpUhQ+FyCfvxctJeU4ESCYVGX8HOd5Hk/Hw8PDw/v37x8fH0+n0zzPKSUAcBfMj4bAi8WCHeZU8bDRTDwxMzCZYwwOmYEMHaN3rml83zRd69u2CR4dgW+o7YJvG9+QYc45x2QxWVYvbqEXOp1UUxEBQETmK7pPc0pZUv71DAXN4LIDQAVDREKqf6lYP23vgjVL01oDEIIczikKqCEgu+BDaDyqaY1Ic4wA4F3E7DyUrvHL1XYY59MQHx6fPj4ejcMp6hgtWmtUTGW9CP/9f/O73//+97frlc7jw49/+ekv3/3pT3/aH/au4RBc07QheHUsjonIFOpw/VeZX1LF2AABZAPzJoJUgLwi1RIPGAFcHeqYmgiomIhKKcZIRlktCWRSZ2CKgmSOqWXf+zY4n1R2cdBCK79u+nax7IpKtKgkjhkQVSCwX3V96xsmn2IppapCkRnGmEsqp+NpPA8qhdAcUyVpP38S1ZKmY56PgEqeCWTRt2/e3KU5OeRxmoekQ5IplTmVWEQRg+PgKj/Fs2+QQ1YUMANCcoVwMtrPMmlSA5linuIppqKKROSctot5fXtaLo6P84GOJ8MIxmRsEA0+Tvk/fTidwsOyD56g5LSLMZqho65zq2V7u1m8uF2LoqpYTv32pgnPlHx+nTurzurfbvHwbxfr/3tXZf+40Idu3XRrlYRIpgpaTIpmkSQ5SmV1XFG3AABcAfxVvUPJzKRqPtTVe6ninX99OyIyns+H3Y4QQaXklHNMMQ7jcDwe9/v9p0+fPn36dDwe53kupdSev0aqtmlW6+Sdk5Kk5JKTgXoiIiAyInAO6ovAiM5xE1zXNn0T+q5pPXmyEKjtm9B1vs0GNM0lZShKRg01ZKym3sADlMvhJwpNdaYWKbmkWEqCXyXMKwoJr7PNCnStdAqtVQM57y8vaCUsxSRZVI2JqFJfpKAoaWFQ4OpXppLjMIy7w7ldncY5H6Mc5nIuZ9d0vl+v+xWAoiTM43bZbbvQQY67T/tPH/7yn//Tn//93777/rspztu7m9V6bSqmLTORd845A6oyi8+/GhccAgM6IgJVy9lUodp1Xb5tBs/IxQqWKj2iiIDe1JMqIki2ZNlIqeoMOEMyYC2cig+t91AsDjEncNSy9+vNEh0ckiXLIXgmIqVF6LarTetaLTiO8xxFEJBcznY4DCXlnASR2DkGQRUDqBC4z++aScnjPg17xwGBHPFqtVwvlznl4TQP82OSEnMGRA6hrpaBDEBB0EATaiFQQueQ0cC0FClTLpqb4Nj7HPN5nB9iHgEKcya/F/pPhxLG4d/e7d99PJxPkyQNzKZwmuT409N+Tqt/CX2LfXBd8GTSY9ncNNveNYxaLEetdAq7Gqw8f8egpulrEq0pn+rwv55Ju+wD9DrzswvK/RoU8ErhvIpt2EVnB1zTNP16sbqxEkVTybOpIAKBN+E0SRHjKpELhHDhbgAAXZf4aFDk4t5loAWqwqwYqeKXHqDkvHt8/PDuHRGAaslZJEnJ8zyfzudxGGKMpZTKkxcppYiaVuU1U2LOTUC+eNqq5Jwlm4mhIBpWw0EVAnDMznHT+D74Rde0nlu0JlBovW/b0LUGbk6iSuQ68kodACVRj+iJggGJGTtH1AbvkCoJJ5acPr9l9dAzOx+cIzBVMEDgunBBAIWChohKSOScNY2WIiWXIgiAMFfjolLEpnGOYik2iGG1MOQ5yZThx0+Hc4Ffns7G7lCINy+6vrtZr1+9uNss++CgpHQ+HBzB4dPH3U/f//KXP334+edPD59Op2OS3LRBSpGc0xxVjBhcCE3TEjvRz7Xg9fADVeXwGt0vh1sBUACsGkhCVhA0AmUEJkRG4qoAhGBgxURNqSAJMzIQeiCTSWYxyCIpyxQnSRRdapHbPhgu0lxYXRtCcD6g70O76BaevBUQUXKuiBaFKeY4Jck55aJ6IR3oZYj91zyWS9FFTOSqFVrjXSlCNE5RX72OYg6ehmk3nI6nXMqqc03j2kAKtjuNM8Smid45jwImqWQECQ48E3mXUjkeT+en45gtAcUCnw7j//TdOwP4+af3+91O5iloWTsUgKSapkk+zOcjL3veLJub1eJ22bx4sXpxu3x1t1p1DapJ1nquCCu79j9M1ZdjC2oX87TLs15P/N/UAv/FnwCXbITXjwqRAVByKnHKcSw5GaAaSiwg6AOzd44uYAKCquwAdBmMGxLXGaDoxemdzIDk+U2UUp52uw8fPhBfcD51XZ9SmuMcUySmpm2qH17OVHIRKQjIHIIPrqL9HYE5BigABU2lboq0goNqOXt5fEJGdMwEwGQIpoBFUypm6EQA0DtQIIUiRtXuTYnUkLWq6JshWrVMqen8+VVpZtVKu8oNIiAqALoKR7rUowQAzOy9903XLErRIiUXhcpCAkImVJDc+ObVqxdNvxyjxCLJym4sFlK/9Nu7+36xWK+Wd5vV67vtonGS5+N+P+z0sNufPrw/Pn788c//+vjpYUoJELpFF9pQDZGQ2MDYGInUGaCqwl9n/gJABs4MkbxDQLTKh0QjMmYEqIInRmjgwHv0HpgNzaCACpiwZshmZoBszqvvUJxMNkqZS9xZATTw2CaJjNxSaJtmaUsxaX3ThbYPrSOuPQkzMaEBpmzDlFMSVIEiORctYgClaM4ixQiIn3W9xK5drB0jocNL3WiIykiL9fJt09+/fPP63eP/9z/95ePhz9//8jSO0+/e3GxebG76dkjp3aeHD+cY2taxC1hUZU7JzBomBCyqMeXzNGIuayAVHnIcz59+ev9UssRhzDkbaOc4d8ExkdOlw03H21Vzd7u4v1ne3yxfbBd3N/3NerFaLZddH4hMCiCAw+qM8KvTb/VNRrNn8drsugioHfb1X1bn60tbrlghKrUouFRraoCIWsFcoCql5DhO0zCPZ01TBhjPx344L++KGWQpJmgGzgicXQCgCHyxErlcDMBGzlgDABoKqxnki9/A5fDnvNsfPnx6dN6hWanmxlK0FNWsaovFInjfd22MU5xTKcmuh9+Hpm1aH5zjOi8gKU4kVKorQHW01VIymNUw4b1rW79o2867lsE5ZHZAVCUhQvCOPbmGvFfvFJmNVE2tIFiNMt6hY666tc6FOoD4HFmpIl4vnwiAfX7WCxKjAoNRzdAQkdh5H5q25JTm2edsqobgFqsN+/nh8dC27ne///blm6+z8e40/vj+UzHYvr5/9eL25d32fru+Xa8XXeOZx+H8/unp3Y/v/u2P//rx/bs4HufzaR5PKsU5Dk0ITeOYRTTnXP0AqjBwxd+riqr8atVXBEgvZSIBYu3paqQ3rAQGU60oflWr03q6SCJANfcWARFUMyygAmoGLKqWi6VZUah1gckSZMLEjhi48w0geA6taxrXIULJKUvBUoYpnsc4zLkYNEwM1cS44j+vwO1naNDPIdk3LaNcwOcIAKpWmKxx2C1C8B37xU8fjuh/2B2H3f646ZtX22VoghAC6BynKWdQBM255CkmEfXIYJZKSaXEnDomt1gyUkrlPKVxjlAEAImRHJFn3/pl50Lwy9bd9M3tZnF/u7y9Wd5s+ttNv930y0UXfOt9YGJkJh/YIVMm/g+QZP/lq754l2R+gfnadTb4H19fioRrkWG10deSU8T5fByPT/N5D8QiasxgDQAisHM1kcGlB3j244iAHTk1QwcFVY0cI39B+F2NLsYQPF466ZJzVikEwgzee+eYGEPwXZtNCoEhEbN3znvvfd08gFppqno1VtsWUNEq0JnBqnw1E3Pwrm1C49izOUJENgQpCsjeOed8Pb0XQBVe/IIBzJG7eGYQ1cBbrTV+9blTlbQmJCajqn0DaEhIQGZMpEYCZKqGlw/HtW1TSu7mhFhARVWrokFlxRlI1zdv7l+9KthvbrLo/d3mxe3mxWax6kJDXObx4XT+9PHjT9//+MP3f/nxL989fvoYp5OUwgxNcM67EELwzjEjXuuV2qVctUBNi2l5/iwuR0BEyVgYEhuyAQM5IAUAdB4Z0ROwg9ozxEylgKPLQrWSSfWSaMwARSydocI20CiYZ2MWZ4VzEbOoKK0PTRecZ0UsJnPOCFQUYpJhHB92p4en45zyIuVF49vg/BXqeX3rwMCyykXB6noaHJN5MnRVz8bAwNgMQNiR57ZvF3O7aFzgbPk0Tw+n89O4iezbtn35YpsMTmMexjTOMs1piLHkggaVq1hdgpsGFZQQzJTIfGBsXBt869l5WvXu5aa5Wzc363Cz7G6W/WbZr1dd3zdNF9rWN40DQkUTQm4CdX3oF46ByozOP8csfAb11S+u/rPLkvk6w8dLZUCXOGAGqnUg/ezI//VysA6svA++aV3bYRM0YS4pDYdx9/HQLfM8kW+46QgMkRH9RbOVgC5hQz//ZERkAvAMXDOMeTV2/DlbGqAaiIFYHSA4IFCQyqQCMa5dDzsM2ITgCBvvnPNV88M5x4RgQmiMBlBECoA6BjAtORWRivK8YuwAEJiQGapAeJWmxcpFAhSz6oktQIomhqIgasiIaFfjbL4okdfy/1dBuYrrEDHXrChmYFrFq6uhnpEAG4Axgyl57xCgEymLAjDNSVKe53kuIog4zdOf//1P7JvXb99+++3Xv/nmKwBrHDIBqk7n088fPr775Zcfvv/+w/v3h93T+bSfxkFyZDYffPA+eG4ch8CByTEFV2USXKVIQjXVqGgLlecTWSdyGVqaAJChA3JGBmxYAIWNGNGQlFShdg2CoHjpVO0i63GRTqmvRMmgCqhV2YYQ2QRFVZKIArI15LsQfPBRNBeYSzEBEZ2mtD9Mx/OUctFq78CYEeyibA5W+VwXdPuv9hYAQIRKZEB2sX1XVRMBKZbBpNA0RdXKUkRRPQzx43H8cJyWfQtAnh1YkiIxlSmWmERyqYbBl4+MUc2yCQKEwGuPiBCCX3TNovVN4HXvXqz97ba93/TbVb9e9Iu27Vrvg0PH5MkxsQ8cWt/2vl34ruO2JSgq+EX3/n/zwusvuAzha41/+Vg+H2/4Ncj3CvC5rPsqIT10/eLmPuXxLFnnKac8n46nx/eSU9Mvm8Wamdh5C82XguJCI/oybiCC2q+gVgagEwMfrnBXAICL4bKqISJgTatCrKhYe9EqW0fkHGHwrm8b7/0VSUuIhiqE5hgQVVWILDgC0JziRfbfAAnNrDaPlXREVEtcqgT1qntZhyemVq0n1KgiDOnCULIiYpBFLVf9nWf8kUs0M7vowFgVt9VrLX3ZjSMx1nWIkXOMAIRcF4SAaOdRoczTWKktc8y//PJL6Be/+e3vNuvNcrH0hDmO4/F82B8ePn78+Ycff/rpp59++HG3e8xpNi1M4BwG7y8SyMEFpuB9E3xoQtN4H9xnVdTrsdTLm/zscgjPmHGAFZ1Q1+eqGg2UzQEBgCpoJf8wKmGVQDS7fHvu+oYBAjqAYjEqCiirkDAoKllUD9CS984tujaExmbJOc8xp5hLKuMUT+eYi/V9g2CemAhzKUXqIgHQsFTv6Mpqe57iaowyElGpjP6Sc7YcJUcp2VTxaXc+HScVCCGgc0PM7x9Pf/z+07JvDsNwGuIw5THlWEquLxQhGALD5UNCKKCjFHK4XDRd4xadW/ZhtWhXXVi0YdX61dIt+7BcdH3Xdk1zKWn5MtdjcqHp2tW66Vc+LBwzUtFUYkrgvuyTrTrKqF7yP13HexeZqqtA+hW1ZwqIapfy6FeJ6qq9/Oy6YtPbfnX7+jfsvZlN+wfJaZ5n2D9oSRpXZMU3DXQdgFxMXQirNvul4kciBAc1KJsjU0JGRsTWIX3O/BUQpyaiRMZMiBzUGxGYmBYpub5CjKAAphWVRwBkSlXigKzqyhvRZTGCxGjGbACiaFeAcJUtFwA00OohaYYmqghmVbeFipIRFFVFVDXAqh3MiCQi8zQbRhUoUlKKKaYvczIzq0I0oopa3TMuDjtwmTlinY2gAoIjUEBCZmdUWwhmRJzmOE/DNKcY0zynNEz43Xf/r8Vyv9t99fp149xx//j48On9L798+vhxv9udTuc4TSolOGQKjtE5Cs45T865EFwTQvChCT6EENrggscv2ghIdTpUG/7nmZ+eJRO8jnMQABUMQEBBLkptlUcGaKiAjtBQ7RJN6hm8dKEIhiAFSkRSFEZ2yI4YHToXyLdN0zRN8I33weVCaFpSnPM8xXFKMRUEXPYtE4KYqpSi9e2pGT+J5iK/JloCAKhYHlMap1wsl5IllpxjtDjnacrjlOepPB2H9x8ehtPoEFvv0Ox8nn56/9g2PuU85zTMaU45SwZQJuTqI4lcXziH0AVuG7dchJtlv12264Vfr5r1olt1zbLxfePb1jWN883nFoyrGQoSOe+brmsXi26x8W0LxqBS8ixxSjGRl78qZP72ev5lfUlEcP17vB79K+7MwOrS7YIQQPoCVUdk5/vljanO00jIcj5BiSklHI4E6oP3y43L0ZsA6q8kuu2a82pcwioFCIZASAYYGJ9reFa9a2a+QncxBDSpFJCcq/gwVrlOJPKIzoChekaAWU3SIEzKjN4zMFn1mDQGADP58sEgYN2aIwExEFwUmrQi+fgSVoDssvigZ71x1T7O9SX+K0jM52epuq8qCBf1NLjUX9epSo2KF68dQhRDM8TKljQRBdPzGFVy5TOUmA6Pj9/967/Op9Pu9cvA7nzcPz0+fPrw4XDYz/Ok/z/2/qRXsiRbF8NWZ7Ybdz9NRGRmZWVV3SKfIJCaieBAgCBoorEmmgngzxIkAvo5AiRBkMDHCQmKvO/x3ltNZkZzznH3vbeZrUYD237iRNUTOZeeZxWQGa27b2vW+tbXuAtREs5JUmIhksSZSYS6580wDikPuW/+IZEIwI3atZ9Z3u1h3n6W3pzdyiuOfj1h3+QY2DuFPZyh3yyBFtDtvQDDwPpC2E2Ldz+pMILK3f8qcZolT2lIOA6SRsnTNBJxBAlzIiJEN99KLbVERM5yd5wSY1mbNhQK9zCPZlaalWat4zzfjPnBml4+Pl+fPm0tmmnVUpuV4tdVr9fyfN2ezuunp5dPnz7/+suvZHZKEh5o7enzUyA2sxbWwiI8dkE7EBAhCpOklJhHoinzaZbH0/j9u+PjaTpM6XgYT8fxMAxTkiGxJNpD05hFqMcq736V83y4ezceTsMwgFtZr60uTVfTas1ktL8p1N9O9fBWVL1yfaIPAmD/JxBuMOc3L3zl9ONtTrAvhm7UyuN4fPzwuzHP6/Pnen3W8qIetbWtFFpXKiVpS+7R8ciA6FfdfoUEYlcWefS/iyAJpTeTi4hQM1MzduZOGSJiwgQYYSoI6GYdQWeEJNT9fN2xu3maWasFXIUiZyFiJ2zuAOHWO7sIiH7KAvTMOcdbQgAEIhgGhUMA96DZHQhBIGJADsTb2vXov1OYQJgp569xXf288LA+mOyDvd6Y+I1y1ocuzIxE4eQOnekLiMgieZjnPfrO1Vpr2jiSgLYvv/x1fXn663//j4zkqq2VVjYzTUScOEkP8OWUWJglUSJOQjn1DT/koQMAOeXUu4zdYS1gP63C9gXzuvnddm4SdJo29hDuG3sJwSN2Y/id2hFI6OJfuwlAeI33dQjvhb+MMmZKmXmUNEk+5HEappySIItI1QjVUjvwa2ZWW6tNAyIDJ6Ek3Ip2XYJ1Ss/uV9sXEL4GG94WmbdS67o2w27WZM20ai/dtrIu66VsV29lTvC798d3s5iZmVcFdbcgQ3F0RGQiQWREAWCGnHjIQxYZCAeGaaTTIb9/ONwdxmFI85yPx/EwDiPzPlkhAgLsCczEQswinCQPQ8qZmXvOe9uWbb2UupobOObmf9Nc9mnl/0AxcJspvzkw/mbnA+4z+f3w2I8A+IoIIMtwOD0yCnovm0t4CyDvSU07drLPejHgRvCBDvv1YfBeWCBCAAe8vfZ7GwzUZXPosdPgCIABiYQlE+5mGogAQO4YAdp9zDrjllMgYq/5gdxB3bs0MCLMv07bOge5T0Tj5i8YvaaF6H+n79+U9WsPkHxvbB3A+2ktwhCoEF/b5teFFuFh1Nsv7AOAeAWGcIceEQEC0cGAAt0IAIhYZBiGzvwLjy5MJYDWmpd12ZYFgWAnfRCCMPUYviQsiRKzCIsIM6VeC+SUc045S8opiaTEkgj4div0srN7rnyzXwBA6haA0UODPKInBRACsfdjdH8MX11lgJGagDBQ/x8BABj0NUGIIEFzGh/n+ylPDMhIzDxwnqcpJQkLc6hLq03XrZTSWm1mqh7NPUKToTaDgKZmav2p9ffCBElIANxlSJw6ErAvMeCB8pzAWQLBQk3HVvJInJ1TSjwcsr2bcXs3aL1TbabWVKuGdvxwTxhHJmagznuUBCnJmHPnQBIakSWhIaecE6c0DmkaZRh5EBEWJEEigABEFmaRRJKSEDMzmq5lbS1AW8BrziUAAJTxSURBVNu261q2bauqzkhj+8aT65WPH7ET5l+3d3wF/Xsb/BZac4DXW3cHYV635htmKtwYAgGAiUfPVvOU8mh5AMc0TJJHkowk4agK0SGyXk33M4S8vz3cxe2v3ccu9e3/TYQp5ZxHSYJEZq5q5kaAwsJIfRSnrbi7ELpQcACimiNizkNOQ6IDoUcoojOGu9Wqboq42x4i0Rsz5p1ybBFofUhItwEJ7R0B2K5ZJALiXs70TEUBZCImjoA93e+rRPGNcuH2dSLxzvjHTn9FQul73yDACcD2BAoAYoKUBwAMQCTv0imASoDueLut+1NjQGZMTCLMqd8gIsJyu2UkyZCHlHozIMTSY1oIuWdwOhASJEb2MBX6lkcq12vtF0Sv6LpIlBCIo18XAWB98+/2ibuxlwgyI8srO6V3OJCIE1EmOY7jPIzosAvJRBDcWitVa7VSo5S6bqXWZmprqdtWWq0AVsBfzsREZauuxkwQoBGqVpuqh0eUWmrVTgXvn4SY8mEAmMQpnNDBTWvDcQjJOiSdxO5HqKdsesBQcDP1plq1q51wb296KEMgORICJxShJIkII2znmGEgELFgkiFzSiQMxHsxgrSjaiTcfRz6XWumvmkvnk211K2WWmpzdUdU1W+EcRHu++66uSffVhvsNv23Bq43Wn3zx+uGv927tyGpB+ANL3zd/DtwTUjCkiVllgwOPeydJRNJH9+aKzakfbDPzMjiTF3Y/8og6Nsi4k0R0us19SCPjmKam5kR0iCRWAgxPGynbWJrYaoBGBHMIuKIknJiAvcKoaHN3M3czBBvQ75vVQX9prLYe/GO58cuVd3fZkAAmrlCgBl4YGCQw84Rdd4tNN+ssYhQ1dYa836RI5J7QLibdagcb1ZqEdBLy9h7grjNqQCIOKU02NSyW1NtGBgN0IMIkIIQoRMwCROjCLCACIqQSL9TmAglJU6JRbDz1QPc3TSc90FlfzDeZyR/L+l9fvIO1eBb+5XbAtqXVEdcO8EDA8Gxp3czcGcH443vwTAPMhyoe10zuoUT4jAKo69LKVtdlrpWVfVabSu11tpq24qWqh5GDFZxuS498IsAhHuZCmpemlazqrasthicL1e9+fYj83R/SKOqRVigAyhZ9TL6kNsx6SlHPUhr4eYQBuZgaKbNm/dMt699G4SHVw/vAwQMkr6OCIG54yiERMDY/eTDw9QAgSg4mLMQ3SIivZlBADjCnhcD6G61ttY0uucpenh77cfcoZ90tFtcvA6X9nJwX8P45r7tq9Pj9eERfq0IEAHQghzshvndbuedZWKBREyJKUWH7ZFFMksCQFVT93AnB2ZMQ0qJbkxDp50vvC+Vv4FizX3Z1ut10SEjke2nNzAz7vflfi8TEiCYW2nFPYQlUrRGzCAMwX06YN3wGtBxVz5AdGaD70XuDd7svEaIfZgHPQTr9i05kvpuuk/qvUIW4u6rsYvft22t9avJintsW1nWBcJTUqKEu7R0n6L1yrmr+yIwwDsHtnOB1KwfW67mPeROSBLlDNaiFjW1ZMAMLIBMCB1p3s9rhO6Rz0iAhCzEzJ2/YB4A5g7IoRRIvJP2gIjBDE113bZS69veUv7X/6v/JcCuRuzf/ttxc+x5OLeH88o8eW12cMd7O5TMQtOQ7uf54XB8ON2NkkyNiKZpAIBtLdta163WpuZgarVZU70l8nlAsHQTsVtQHZLwrmTxG9TfzLZSrtftf/6f/KfH0+m2+SVNd5xFzMECPNDMtQ6t5MPd4VRqUW1u1nWnvc7DCDdX75DMbfMHRphbz9YGdwRDir5/KYRiv9+h+5wC0Z5j0PECIpbEJEJMARjRgda+MXcrjgjPaj34HMIR/Pjue77ZKjPj8ThEv/nhdkG/Ytk3kt3tgv1K44k3//4W/dtxProd8wAYXw8OD3Blm6MdudwP4W2QNMzH8f4hHQ6UcyBr/94cmDAllEQi2KW3u/0jACBYgAgMb7rLcRz/8Iffe0DOmRDVFCIAiJgyixBjt/+6uW50xy73SJJYJCVJScYhMRGERlj36ouw8Iiwfszh61355hLbRx2BXYDz9UzqnAqyPrdwwE4U6ClBLIkliYh71Fp/+OHHlIfbZ5n++Md/BQDjkEQEURAx3PcAy9uz2HdNIPTjDbrfgZvvslq3TjNQ09I9CMu61aW5mqB1pBEZg2SX1RJxz7Zkkd4AMLOQsCALIvcmq69FBH7twrrcmBDVrNTtD//B/3Qcp6+r4v/1X/w/b//694DR6xnwLe785vX6jPHWYRKRcC94d8k9QhcCdV6j9xyinXT0php6BWD7aCbezq32I/vVJSbc3dyPp9Pvfvf7eZ73dxmtr4ZbyQMR/jqc6bVz3B4QvO6VG2b+9en1H4yvsNgrgwl21B3efCPw9m2+RdZfv503A/x9FL9jdfH1p1jyMO9pSu5RSlPtl9ubN/w/9JD+7vX30P/rs3ot028/FJ2DYhauEN7ZdSTppm5Hv31FCHBrb16nB9+8+twu3X6q1vr5y5dtK7gDmP765l4hqK/1JnxdDbeOunN93lSiXzGQt8ORb4ugv8Xo/uYt9l/zdr++/jF724bd/cc95Xx3uu8WXbWWT58/buv6xh8BbsXim+/zb0HauDVFsT/wuP1neD8Uwt0toucO3dCEr1vgtejDW+f3irAifrMm8E35/voTfTDkPkzTu3ff5WE/y/DvJ5n//vXvX//+9f8PL/of/yX//vXvX//+9f+LL/nv/tv/dy9ver11K+roVlF8feFryYHwChfffpz6AGmPOur1rN/K6dfyPb7+kb3i2cv71+Lmzd8Gt99L3UeO0COsqVsPLw9zm+f5N7/5YRxHuEGdr0XlHnZq3v0hzQ0Ru12XiOzmZBG96IKu/NipTq+l9v/3GjtunyH2UnF/y/+jr/h3FIZ/Xzqv6/rnP//5el1yzkwMgBG+w854E7wwp67e4FftFr7+uQjAXQrXGR7mHjvXb2dI3JyyiagjmHH7lTeqbPTIN2bu2ukAuDEr43UVeEfpLSCCEN29Nc3j+Pj9h15etmZP57U2lT3k6raW9hyr8Ogkrt4O7okf3UZEmBJ3m9a/bzlf+zN42wX8TSEbb76QbzjOt3YDkeI2W+kYz23t7O9NhKcxdXhfHS41mu0rdm/i/h3t1ev448aEhi4nsP5PABL3eWLXJ5mpqjYAEEksgsKIGPF3H/rtsvmb7+Irgvt331HvgwEywSl/jRCU/9P/8f/g7q2jYapIkFKWlCWlboBOAO5xE1h1s05CCHNr1mCfxwzjMAKStqZqfUf1QLz+OJkTIvZVRcwR8WrY6hDMQsSv1Ig+BO0NETHmUXhMNKSm+vL5pS6VDFpt12X5j/7j/+g/+8/+93/84z/0Vb4uqq1ztTknCrO6LC/PT7/+8vOyLcxyujv98Jvf3N3d52FABO2GsWXxcEkp5XEcZxG+beqbiOWr/OH2tXns4cVmQMjpGxHrmyfz9oF8XZq3gdhterfPS76umD/9+c//+X/+f/6v/qv/+rsP3x0PJwautZ7Pz7VVTpKGaTrc3Z3u3989PJwOd/MwDEIChIS+W08K4TgIQtRtW7ZyWfdYGmTMkrJISsLCktIw5ON0EKJWW92262UxMxEx1WVZiPB0OkzzmBObx2XZiraAIIrMFBF7AM9SoPlAXLby8fPTD3/8h//N/+5/+8PvfwKAp/Pyf/l//Lc/f3w5ztOQE0IQQceDAFDVt6rLVi/bVlq1MKJIiedB7ubhYR4fD/M45E4Rwht5B16hTQcPt31OeJvAww0j7ay+AIBdridd75aEifpMVyQHQG2t1LZtmztIToCE7qre1O7u5n/1H3x3PAwAcKnxX/7VPy4h+2kYcBNefPvwv0K7AcAQDAZadLtu63K5Xh1oON0P83E6zBC2vrxcnp+eP30OwIf37w4P98PxSGnYcUlHuAEUtx4eAAB3NglA1930OxwQA245avueDwQLUIsfZvhPfoDHcf9j5F//l/+Fq5WymVYwJ4K+8zklZiHsgQecUpKUmblXB0Rhrlst4c6ccpqmaSbiVpuqurlaq7WZ9UsViQS6gAuAhSFAVUsty7J4eMqDSGKWm2/cfieEOQnmUdJhkMO41fb5rx/Xl5UV6lZfrpeAWK7XfW8FtGq1GCAxAwbUdfn0888///lPf/rTPy/rdRiGd+/fhVr7UA+HAyJs27Ksl2W5eNgwDPPxhA/vaZr6jdAdBL8xr7hRw6LHrzVrtSFjxoF3DtlXSO9vjt/X3wi3X3Sbyd9uj862AACAy+Xyr//1v/6//V//77/97W+Ph7tQKNt2uZwDYj6djnf3d/fvHu/fnR8eH4+H45TGUVJiRA7HMMAIYZgHQbCyreu6XbaymbYIZEz9XOedgzRN493hkEm01baV5Xp195SSma3LwkzX0/FwnHIWD78sZa21aQPwQQgRXEOrta2hekbalvXXXz/n+aBtT4Yppf3LXz7923/++HB3nMeBGYUpJ2GkAKzNrmu9rOV5WddS1BpQ5MSnKb0/TcvdrPf1OA0sN/IuRp/t71itw+sE+xXVfd38ABS+g/xMyLs9QEopMTMAMnPKAYC16bKV8/nSmnMSJGZEVd+KWqDqPk5uBh+v/i8vkRMw7XjZjQH4Zi4GAYAOe/IPg2cwaK1d1/PL88dPnw3o8B6OD3zyjGCXp+3LLy+//OVjeLzf4KHx0QcZkzpC55vAPqt4u/mZeh2HgDfpqYOBdycuv5m7dbm9uddm6NA+yOsfI/PhEGY5JwgTJGYI6LMb6ty9PAzHu+M8z2kYw/16uVpTFrTQQFAzwsQsSNwl2czsZmzMLBFBOwsd1Lx1h0BEJBqEkbBqc/c8DMMwJEmInWsVt0M8kIAEhGXIAyANefBsRE4BDtM8Da/Z6R079XCECAfTOL+8/Jt//O/+6d/+489/+VPTend3LNvVWr08Pz08PiLCy8vTcj1v2zUwxmF4fHyX3OXxsbvZOlAwIchX/7pONw1yC1WrpW7bhkwgnLqhI96w3zcgNr4ZFfw9uur7vArfEpaZaBzzNA3MqLZdr1urDRBOp7sff/zpw3ffPT48ng7HeRgyE0J3zucIqRbuQcQG4M0xzMI985iPEmRBHggYbrZZiy0A2lpi23wQJnACx8SZRFLKiOM8ElPOuRvcMOJdnnKt5+tVWwuGlPJ8P+eUmZgA0b1t5f35+uF3vx/3EQwgglAIOaEyyZByTpKT9IuEOdzJQwLHnLi25uFMmFm6ABoJgLrk5Bsofy+2KehrKjER+W3z96EDRdzMx4hEJHUmHO3NDpEwIxJzEkAsZVPdmlYikWGQLGSw12X7Z4nEMXBkCia4bf7X8/z2xHuWKHoH1Bk8o2EYpyjQopzdkfWUow7YCMAFisDIoaFkhbRksIFcA+Pm8gZdp3z7Cwigq/V3C/uum/Qw68nC3aisOyv3za8UJTlgHF7XnqSUujRBCMecU9o7zB5ibeHDmB/fPRxPdyypbGVZVnPv9AOWFEgAHIh9x/ae2frMlagTngKgzzWxR3N1/SRRAkg5h/swDkMehAWAzBy9T0ex95aA3n0DE6dhGGJ0qI0BkXwaBn5bdXVdBqObblVfXp4+fvr106dfl+vZrIG3bhp/Pb88fblHhMv5edsWs0pE45C8bLMkCp9PpzSMQQlAEONrK/1mdgIIgWEQ9Erp/mbm0ovCr362uLc0+Dq27Eyw25AzIt78ZsQsQ04Dc0JkIMhZDsP4cH/37jTfjSnzbqETQLXWWtU0PGCt3YNHhHEQIFD3AgQiOSUZZARkc2+qUaqZQYQFlRbuxhRCICzA5MSIhIyBWAOtObntLheQiAZAbGoOKIl4yMM0CrGbOqeR0ng6vh7KzHSch8MhCweGMklOOI9JkkBEqdSlptGtARA8QpjnMR+m8TCN3QGFmRF7APTX++92pMYNvADfpX5xG/p3k0JgJr7BJPte2R+puxt1Z1jpZDl20y4L6IGBb6EoBBCMzJEYXjf/PoTcD3oAAKT9AXcwgcEzepiabVYv9fqlOcV2x3bKoMKMmdsgh8wNbCDP6CPDKKgYsZsQwKuLQF9bOwGIgAm6G0oEmIMiNAtF0M7d2t9iOBpBHV4b2b753RwjCDGlNM3TNA45ZzO7XK7btrm6CJ+Op7v7e4+otW6lXK7X3ES6QxKJe/QOPzySyOv8lol2X4W30OANZ+pZEDlnBBjHUSRBhFknQsTOWejxYLEbqzPhOA6o4QAWwBTjkPCVxxbQmTCEZFrO5+eX56dSVmG8f7hrddvW9fnpqZb68vSc84AIrW0BlhInYc1C7iOJa3un3x/uH9KIzIwQGN2wGAB25ImIGEQicjgScu4sdUCAm6cWvFIG3Xr3R9ivr1t10JEEupUDbwfeiESUmEaheciZaJgG/uH+cBwS+rp+2Z6eJR8e3n/4MaexLRVaFXhWbddS1UM4jTnfHXLiMCuAITmN8/FuSGkYPLDZMOTBPLpZFUTsfGcICDRH8E4rNrMbKwNhR6iIiVgjrZuabs8vbcjL8XjMid211basGx5Or6VySun9h4fzWrflEt7CiSlNY5rGERFKqeFq7s3NFJwJAYdhvD9Mjw/Hd/fj/ZGHhAFgZlGb7109vinvb0yRV7+lff/jLXqzqyqJmHut7uEQ5u6EiEbEytKFi5LHhEo3jki3rftarvXejjEYo/NO4VaD3K6HWyuHX3kEHJEwmte6PC/Pn85PHw2oPN5FexDQKUkaks/jMo+V8TDkQ5ZD5px59yhSCA8j9NvfQhDUbx/orO1OhgCCIL7RASP0BpsThpMx20D41gJDbqh8t0DdIah+MzNzQk/7qWta67Ys67Ku6+qRh0jdbqVj/GaGgK+Ji0TkqjuWu4PUKEni7UpHTCkRddUkm1mY7UOHmx4LIOBmG0AieRjQoKmRO5Mn+ebiBw/wTlz3Wkopq2sjgmEaKsZyvdbawqPVxrQigkVjwpgzuIAqAz5LTikdjsfpcMDwVxkavgYY3XB+cCcIIQDq3ob6SrW/te8dxMMgf0V/3yh3XnvEf6cOl3KeDvPd/d2HaRqrLWPy+1Me0duyNlWkFI3rerTatkuxUtirWyvuSJinSBCiCua1qQVgMwtOaQhApARAuK8Z6scPxp74S7e7rh/Dqt5Uwx0ZiZgwktCQBYmYw6yUqq2tTT0JEYSqrVvZ1vLqf5NE3r+7X7f2/CVM2zjkwzzc3c2HaWLCUirzXq97X+xBY5LDONyfjvf302nGJKBqrTX3vqZe6+y/a6L2h/MKdd0Ksm/4SK8W1maO3c6ni1dYKEMm7mRT2E+9b/8WQmACQaDbWrgNMG6P/rb5XymUFIFu0UrbrnU7W10ssCwvbT0L6ChkWWxIxyEX8Ewo4BzO4EjQmdO2mwbR7SO/Rf+8FwQEiD1rKOD1C+jXKGEEOUZlJ3yTPiQA4BGmaqpdFTsMmRDNlAlTGjPzdj7XZdm28vT0cj2ft1J392IJwl6+7vAi3s6OiLBuxd4JX73OTykiVPUVmOH+EiFij0D0vWwg7q5MXYTSRzEEkPNABrAVbM2B+A2LDmIn0XZxEu0ypCCExGxM/WeIRCSnlBFDjYiAOROBm7fS1mUt2xbmBNitw6DrS/fTvY8gIMy9aaiiKRAaoCtCOAF0XwRkQRJiRgJBirjhsX8vvLmdF29/kIin6fD47ruffvsP8zS8XH+19qJtq17Yyynzh7uBsqi9rOtlfWmtGEcwkUzpdBp//HA4JsDtum1tUyjKqrFqWeuXaVyGYUQW675MxMSchXPXHjLJTrEHN2sFaniPpclZiCVAkBMwJ8rjOLm2dbnWba3rRSGGlCGQmpG9mn1ASvzu4d4M5oG11pzleJjfPdzP85QYm+o8jzknA9DuweQhGFnoOA13x/kwo5DXpojQ8WN3cnfkztz3HV7pHPivlLtbc7VjXv0uRPd+EHT9PvTlAhoegchEmHNiF3NT1aZGGIhf+zHsm//m7uJ7Z9qfKUFH4AF6hocjYCfiW7juOUWhLTM283I9l+sLgY2JVaklnnIKa+Da48xYBCVRdyv5Gxpfnzrd6osbARzgptoiAIJg3An7BGBg2lZv/JpqDd23v3+6vi2NOSAkpXHMzMLMhGStlXVdlnVbFjdjlmEYpmnKOQFgKS08bgxXiJuvfj9PO1+gV/Ed43H3XkdD3yq8j9xv3QHuCARi9Ju8907uEMHEmBIMg7i7YU7yzb15K/4AMKU8jsM0DWUVvI3k3UHVmZ2o+xHsE11V0231JlMe6rZt1+t2vQiT9EDxblXY62BVrbWV2krtURNAREmQe5BGrzZ7RmFOeUh54CERS8918Z3X8LbE/9uDoH8twziMU1cSg5tu21rqeUS9H3gch8fjwYF+/fx5Ode6hhsJ8XiY7+6O339//w8/ngaw57+uZXV1rsEWWZ2jhnmrDZFbBHZ5FjOrsGXxnELYEZlRmDtYfJusAWNv+LlzRQFCmAOogqHX0DUAUZiQE4XQ149FhNOU7k9zojBtwjSOw+EwTENKjOYkhBG+1GpmiUnVEvGQcciUM+VMTBQI7maWAMHMfTdutV145v6NodttLdzwlO4L0gHXuAUQ72uiP5CAIALqGQVEYODuRMb0zSQPAYQgESYCIrhxShBvA7+dB4MBAI6ACIxg4aVtbVtb3cB0EKIw3666vGArHBYYvPuSedsWQM9jYohhOqIMAfxqV/BKKLlBSPun7qrU2xAZdkMlAAAPb6HF12e9fFIcwr//uvkJCRiSMAKmxOM0Pjw8HE/HccgiEhFtq8v1Wrfi5ggw5GHOw/v37w/HQxIqpZT6pKZZEhA5QOfVmKr5rmkHJPM9xK3XBXBDZbrQuplBaFN1d9g5LIyARg4eoIQA4e6qQEiE8zyDiLYyjm+z0/dO0CwQcJ4OD/cPjw8PWpbL5aXUGoAOsGxbqZZyTYlz4s5NsLZdX56nnO4Oh1a358+fCMLKOk4z877c3cPVt2V5eXq+ns/rsrTaPJyIOSeS2xHmFkQsOQ3DdDjNp+Px/n4+3o2HI6ehF2P7YduPgNh7AIw3eCJhShJhX15+DWufP/+8Xp9ct7spj99/4PEhj6eyLM+//vL0+Zk5D5LnMX+Y+Xe/nX7/+3e///GhLeX68blY3SI3HtI4SU6JCAJKMy/hN/E2om4AwjhIB8IxJRnHTERmaupNARGrAkcAA3Z5GgEoWduW85PWhRFSTmlEZiTi4e1jiYDwlPDubooYGUGIhAGidSOZIdHxMLy7PzLzdtzj8YYMRArQAgQIRchzMnckNHU1dzNEtD0wLN7Uf3HrHNHdAKL3FKqEZEwcvo9V3sg3urDQA3wHYhCZUBiEQSheTxZEzEQDRxak3jl1AfgtBhx6N06I1HFIIIRNfatb3a6trOFtZBQHbwtuF1ivUTY3sAgN3eq6LmdZhLyxtvQOaCZHdKTOj3u1TeiUGILo0qEbMIF+E5wjIENgqLXr9vK5PP+yfv5TjfvQf3iz+QkoKISJKCdJQ85jnqbpdHcUSdraRlszM/MuNVRiZEmSUkrjkJh53SrT1kkYFhYeTdXNAhyhJ7vdgrVwN5YhIt+1hegQrhbRe8sdANulkT2Eom+q1gCCEnZznHCKBvtY4M1x3x2GmTDn4XA4PTw8bNfn8/m51tJa9wBwgJIk5UHmMQ3GmAhMGYIRMKxt6/OnX62s9fo8z3PKSbj7yYFrrMv15dOX88vz9XKptXpEkjQfD3kcWDggWmudtmQp2fJSL/P6/Plwuj8+vpuO9zIeWPL+Xr+58/st8nVsqVqX5aW1RVs5v3yu2wrhQqlGLpGuq5XLppcLb9fj7PeTvH8cfvzx9MffP/7wm/vTnL5srYEUGIwz8Egp994KPMLQukVt75ojIIwAVax7jYp61UBCD+tMSgRkUxbopmuBQBbamtdSmrpFzoKcgtmQDPDv3AidMDh3w0gnQAQH91tyEOVEp+MkOZlFq1rXVQSHjISO6F0qKSI5BxGbOKubqqoi4k3SDUQ71PfmFOhwFnS3bgyMCCaiLsaM7r55a+rQwZ14L9WJUJgSo7yRRhNAJhgFkux/HXYl+23z495/92IAAALCm1evq26L1g20ZQyhcFParuvnXy+SnHPZStVS2rJcnsldrCTweRwlj5AT8m5R9Aov/A0t32+Xvt8shTHMtUS9tsvn8vxre/kVts+kgmGvv0uEOtpLzJSHJMxmpm4owjk1M8wynY7DPAfA9bL6p8/rti3LAhg5PY7T9N13vG3bsizbVos202611S+y6KTF8D3TjJjp1Vwpel0fO0PUfHeLcbQw4n7/77xI2xRCJBGCqFmt5Xpdl2V7dVa9may4uRMysYzj+PDwcH3+gv6nbVuXdbksW6sOgCJpKKJVDqPQPMxZjg/305AzU1uv9XJenj5epmmexmkehpyFExG7QSmtrWcrV2+r1eJumebTdH93f5ymERBr3UwVANy9tXVbzk9/rpTHu3cfHn/46bvf/zHfPfb7PvbhQH9kXTW3M4pM2/nly6ePfyGmcGt1A4chZaTcQp6u9fx8jutZDL6bp/d30/c/PPz0x9//9Mc//PaPv+Vh/PT56a8fX54qFhppGDMn5t2thRBoYHSMphY32DwiEFQImZG5Aqyl3hq3XhRDBLBwHjzlJDlRoKtFA8OJZDAkhQSe3Hxd7Ly6vvUkC49wsOjBeoBASN7nDAAWBgHzlKdpYkkR0UqBaMIgcmM/EjJTSolZwkF7iGatRKT61ajSdAeMe3b4mzcQ5q+mA3vAUdyg/NvmN3TyLs9GZEYIcuUkX91vEGNgGCUS78wbRGCMnnf5SsXxXhNQhLtZg7p6XbyuUQto49AETgxcli9/+qdyXeR03wDLdml12a4vUCvVNSPc390PhxONMwi6RkDQXjU6wv7Ob0fdvv+jQ7ahrqUtz9vTz/XlV71+wXqZqB3yN/CyRLfoIWDBlBgptrLyylM5IFO3eOEIEhBmQNpaA8TampkR0TAMOSdJbG5Nrap2F3XcuUd4G2xFRKgbQTAnujGQvEcuxW2h7cd2t2RkpJ456QjARAk70cNKK+u2LVvZ6jfGd300g+jA1GF4JoaIWspyvS7LZV1bBGH3b3TDqBxpEMCBD1PnsKK1si1LmF5F5jEfDtM8T9M4iyR3rFXbtpoWCGX0nh4x5nScxuNxJqa6UWvFVUsx06LX6+X53AIvzy/rWigNEDgdjpyGuF33scvBv94w4V7KuiwX2g3LQzilNIiMhmkx1GuRtd1x+nCY/+F3H376w4+/+eM/fPfTb999eLdUvSzt4/P6tNnVoBEAuruj750GIljnxXbge/cCJ0eyXccaqt2C5nbRRJg5Wzg2RwxiRrQeqMtD9FIzUJVdsWiqxt/QFvq8t594+4KL14/fzbOHJCQ5DxkRrYlpVS232qRvvA7B7o6oN2r9V8yv/7pbMQNfP+2+5/3r/bAbJezU19vN4dGxAexum4BEzN8EECAAowv6ftq88cZD2HHvPjbx8LBorWzXy+Xly3Z90e0aWsgbu0oYg2Ndl08/l3XNy7kxny/X6/OX5eUz1MptWnNanj+nw90wnzgPju57BOsrvPF1zNA/SCf7oTdvS7k+lZeP25ef7fqFdElRB4H8Nt8OQNSNui0PgQgE6OW6blpQWP3xMM8iXNbFzFCSMN6dTszycrlIf7EEmCjnnPOgzd2ju6RD9x8gJojQ1lpXL3gIEiH1nX5Lc4hOdKevdiLh7tqcEQQgSZqnIWUJ4a21pWzXda3adB/G7Kd7f37MQdC9Uct6PZ/Pz+fz8+V8vl4u1WEcjillBESIplprqIq7k2DPhrHmrdWyrRi+LlLrwbQhYM5hHrVabdXdmXvsuQ9DTru/GjMjJAanqgGhbtrHV9tWns/Xy7oZUGvtt7//4+EhR8QuYYDwrl25UbUDuoU9EjATifCQ8pgPeThAmoypSabUhPzxw+P/5H/2H//hX/3D9OG7+e7EObf1fNni+arP1/q8uJGxyJiIAcC6jMmDiBIDct/fhCj95ozQ1iICgxilR0cDQnj0cOYw86bBGNTPV2GiiG6qBegYLk4zphlwb2EwAB3RsA+e8BWMjtdAv0Dsi6WzioAYwFGh13y9P38dtAAyMKEzSlA4dcOaAArgTmhB6iiTQ/SO/KYLet0wt8nr7oIBHTaHAO+sjN7DQtcwvJ0oATj4HluLPXUvENGgdwedbg8QaB7a6vV6fvr8y8vnXy9Pn+p6QasSDa2iGXWKe62tLNt23gI+ny/n83k9P6fAEbxcz8+ffqXpkO4f8zB2fj50xlnH+vbRxv4dMqEQgVer13r5ePn4p+35k5ezaBkYxm7ryfL2s0hrjRkTUmI+HCYWIgaUxMLENAw5zK7grjUIiWUYsgdUVWISESJUCwAQkZx3B1oyw9gjjYgpPAwcPAwgwiGsJ5v0yVcHBQCAmPgGYZp35yMAQhHJkkQEgEyt1dZdpezmzfH1FQ6x7/y26Xo5X16eLy/P6/W6reu6ro58mCnnFAGurdVWopXCzcaI6JdKiIhwI9Jaa9i2UU48jiMgmUUpbV0XVWNm5t24x91aLesajNDD58u2Lst6Pl/Pl+V6Xa5rXauuVR3Z3FNKgTDMB0h5Hyn3K+32OQgxpzTmgVm6tiIPwzxN02EeppEwrEge8iml9z9899s//P43f/gDnO4pD2zgsdUCZbOqpuYWhOSdsgA92jZ2jxn8SpACj1Cz0OhWgtwjRvirz/9OunDXViG0c32IGZE7kmrurtWNwMn89b4FuBnlf2VIQLyWeZ0TQXuyMtJrLhghIxpijxPcZaLu8RoagnHjoFNEUKcfcs8XQ0I03/1HaJcE9Y9xG/nvE+U3hcVtDYFbr9t3wtC3g1m/udpFP8x64gCEAvCtzEAAs36FLJeX58vzl/XyrMsF2obWyHX3Fet+hm1za5v5dr6s58t2OTuyD9m26/XLpzQfDu8+SM6UBkTpYWz7VHEfg+0nLIChVS3nev5le/q5Pv3VlmcJzQQDS+K3Fqu3zV9KFSEhyTm9f/dwvDs2NWAZptM4jNM01nUVRoNO+/JARMY8DCzMwhHRStWmTDSOgyMQk6qBdxEMBsBu+MwQCN4vNURE3mexCBCB1L1cEQPCzXu2coRQmsYxM6u5llqsVjUA4iStapfevH0hBmOE6rK8PD99evn88fryXMuqWkstyJmYOqe4Wqu1RLNppFqHpg2gh58xggvztlK4AkYf+hJRU9i27XI+q2oeck9FMtVlWcP9/BIY5tqa1lrK5bp8fj4/X5brWpsBymDb+pd/+ad1WwOhqv74+z9Op+QWDgCMFF83DBOdDvPj3QmQPDDAJKf5MN2dptNhoDBe4Sjy/bvjdz9+d3p/P90dbD4EZ2qAmKEFNCeAJCSSdy8sJEwYGAq71+x+pUUvUdzaZmbW1M0hghETsxATITOTJGBS81BbV2WCbrDFqbv9sIdva9HqFNK2NfwGLGH3b+5BsdATA9p+p3bKINEesdkPIw/oHnWC5u6ouo+K+hDXTQH6CfL18H9l8vQ4IifiV8hix1ZeZ0ydRELIBK+iDdizJ/YhQHh0N3r4Gk/Sf5kBKUL3xrZOFACkAHr1CccQ8HANa962aKvVRddz287UKpnuzuQWYRaqwE6SxIPC0NS2zZhRa9StnL9cP4/njw+c0vTwXgZWB9vPI6ROJ0BHBI7wVrbry/X5l8unfy6Xj1GvI7QxcWbKhNiVqN8E9oA0M4BQQwAcx/Hdw4PkIY3TMB4RqbXS1qVs27IsEUAcDbypq6trlFJCGBGziAMgc2cIElVTNXOEIKYwNO+2/M3DJYS5pwPsFVlEN8bE/vRDg9ACUYhT4n6Kb9vWVPv90YlBXVAA3756Rala18vL9flLWS6trNaqqZo2JhHhlMXVK4C2FrhbADPT3sdgQIwIEaBWgWg/os3UDFXbtm2qCt1kgEBVS9ncmlsLa9aaaautXZbly5eXp8v1sjYHzpMDcm11KwVIAPlwekjD5IgdWH8bpiJCj6fDD+8ekLMFlFZTTo8Pd/f3h+Oc2XU6zfcJf/rdd9//5rtxHvd+aW+qHVRRG4UxMSQSEeoz0p3ysbtGgTsihFvT1sXN1hqY9akm71uTiZlEKCdnMSADMDfqmldjrIUli+SwUK3aNAxr+QrEAkDP6ez9+c7+iM6GomDArgF5cyvtHFPmHptzs2Dzm5JvV/XdZkhvfxfuRli7vdWNDXzjVvTN31s27FRr2BVkEACdO7gjA/CmpdxfHl5LuV5KHzrKMLAIs/TyyMyt1XDlMLDNtst2fS7X57a86HbxslL3L4xO/e5Nqgfe8sOZCanb/KErWvNtqS9fLh//kvKQp1mGHnXzlSDStVhgLepWry/L06/L08/1+Zdol4w2JByZkrAQm0dp2uybHSMRYB61WeszGxkeH98/PL6/f/e+tvanf/nnv27b589fLi+XuztPw6gkpdrluiBAK+XueHi8O0lKpVVqbX8q2tRdWyWiKc8AWku9Xq+lVABIaUgpJGBn9nGAd3AwqB9lTKjBRIdpFKLWai11WVckOsxzSqmYV/VOIoxvHz9CIHpoWS8v2/UlrFFv5FwhumCZU0oGjQjcFdGIICeZ52meRmHGcJGUB1VNLRxuFOmILvYADzNvALnPKFRrq+xGps1aqWVTVTNblm1dt+t1e75uzTEVYxbEaOal/SOQfPjhp/FwzPPMkj1iDw0ABIAk/OHu+IcfPuTpZEDnZSXGh4e70zwOzDlYhncf7ub/8B9+/OGHd0hc1pURSVJ4hF/BN7SK0RCAOIjCXdW7Gja6/a1ZM1NwDVXT1ta1Xi9RS44eaqsMLgQD05gTCSuSSzaePE04jIFSzcKaWzCVMY9M3GXQtZTa1tfDLCLM1awBdLtUcw/sOkai2EWeaHZrIwh3zjNz9BuyW67s9rhxu5l3K7/XBYA3Bu8eTPXtxXAjAiJx973cpVlft3ifzvdhoXc2xk2b9br5VZfnL59/+XK+XCSn99//cPfwmHNOktCjalmu17peIFq0xbbz8vLp+vyxXJ6sXlFLR1c9CBzA0R2tM4GAkVnSJKkEULijB7lSUGyX8y9/wTQc3n+fj/d91tonmAiB4BSm23X58uvl88/nzz/r8pypJcYh5cwkhELMKbl6WevaUbLXzd/vC1WvRbelts0YZcrDcRxXRALUZutaz5fVgtOgztLMt61gBIYnwsM4EBEjDSlxEmIKVWtNIzBCmCJuw5IOqpopoluIOOX0av3axwJM1NOOenITAlSzZs3cGRGRmYTBdkUmftuQ9T/FzbV6Ld4KhhF4N6Vk4ZzzOA7DkBtAzzF0d9VmZoQoIsIEjuHGjMzkjOG7GKnPfmgnHPVCQJjJXNdtBQyrTWttrfTk8GWrS6lLactWq6MEi6gQYm0vlyXPx19/+fn+/YfHnHMeoOec3+b8jDgK3U357vFAkpZtRKLDYc5CZJYRj+n43fv7hw8f5oc7SGSchAQRLNS9kJeEbWIwIGRADkLv0Z/m5q1ha9FKtOpeyTSZ5VYmXRF0IBQKBBOwEXxCOtAAyCX8Wtl82WQKO3oegTkQwQGJDA14b2NV1eyt1713XKuXeD1msfPuicjZO3R1S/5hipth8dd9vRNGXze/h8Nt838tDf6uDNx/85v925sCIkLa9/3t/7fL9JuVtE+h32z+Wr788vLnf/n05YsMOXsZfMvtDlJ2j7asy5fP23IGUNAt2lKvz7Zdoq7YKrphn+tGgOOuwO8IPSAgMkuXQnp0KMwZDNq2vXzmab4+fU7zXTowSbJdSVejbdi2dn1avvyyPX+0yxf2Oo4yZhmEu+NIL9w0FOlvWZDSFfymuq3t6cv1y/Hl7nh3GSaCqKqtNAgC5KpQnhfgAomRCAETk7tv2/rxV52n+XR3GubpkNOYM+yKkAYAgggs0zCoGiObagR6s6qVhSHGlIQweomG7sDCTElSJ3T2c1iYh2Fk2i2AoRtypZRS/iYRsl8SaqFKGMI9eSzcDADGcTweD/M8z9O4IqQsLNLKtizL9XqptQS4SAJHa199Yfq91FlNHJSSIIGH11aJKGXx8G25amtam6nusyTArelabWtWDZqDa+xZ3xFN4/np+Ze//vz++9/cPz4KIdxusn1yE97aCrZMfDge8nenmUiQU3i0okIwTjnPYwzZxpmmEceJxglD/foU7gxtInscxgGzJQpyDgVVc9VWylZqWaisVrfQksAPzDPjcAdZkmRkBApgdzEdiQ7TiESrto9r06en7fJcrgcbjzKf0nSQNKaUhTgctLm2CIc3dHiIiNraVredKPVmpEtAyuo7iN9N3YSZ9qK7K31e/8O/lhLQh1t+8xB7E0fxN0fAfig4OPhrafB6JMSu3Nu/9g45hEN3FOuuZrGbvQMARCvt079sf/pvrp+/UEoveh4uP/PpzvJgHkup5/NLaYUYGS2Bka4p3BFahEf0yaB7uIFZf+C7Zxh2M4UeUwyhHcxGgDDbrtvL5+df/izDdM+Sp4NZa8vl/PR5e/ncrk++PmO5cNRZYkx5TJITCyMTESMQI1MKHrOMWd7O+qTHEqGHqp/Py+fPL3enZ2Yu6xIIWmqSdDreX5d2vq5qygRClBIPkoYhMVKrtSBM85BUWpg2DTMmGnLucAAFTMPgHoxUa9VmzRX3GqDxngBDDETMO2+RECJUlQBkV5HuNtwRGoQYwYT87c3f15aHuWuHiVsrtVa1ne0vLFpblc1VEUCYGsC2lcvlel3WUuqY0qtJdC8eO9tzf0+ORJRSktQFUQ6I5r6sa1cEue0lSSBtVYtqVa/mzZF7OCuEm21Vz+fLp08fn758brX0PvXtmnX3Vqu1ImCHDEMWomSOtVmAgXtTXLbl6Xyhw+HheEfjicYBtCItiJjQRqgHDw5r0IKCQxmMEVxala3qutHiVCj7JHw/peOQxoElESUACGiOqqQsiENmRBwaqvoTrNdmqo0i5nEaiDlnSgMge3dvY0wCwvmrhs691rZuBXuEeHy9aCmIu+w/oAMrEJmZesRBxA4FvD7j1xu+/wHuuxe83cSgr7t9LxD8FrbVy3hEvGXXYGBg2M06EPbJXgBg3/x2G2C+BS/AWpw/2ec/1y9fkGSjtpWXcjxASi1gNSvaGkRKiQWRULxxGEeo79FZ+yHn7r5nh1AYhFGv8926JZeCK0BX4KJVXc7XTz8P0+F4OJIQu0K9tsun9cvP28uvUM4jWUo8DeM0cmZOwrSDPBSEgEEYmWDgb6QKfTbd7Urosm4fn56neVTXuad8c5rH6cff/ig5//WXX9eypSHllJPIkPI0DgRhpRCBte3yXJZSSlNzA+RpmlJKSVIzq+MQ4UJYmFYqRCBCDoDgYEY9GkYSEbn1rtDD3Nyy8DiOQLRtW6uqphHgTKEapq9hsftuDVcPih7sorWUy/l8OV9UDZGFs6p/+vjxMgxDStZaN88ptV4uy/nlcrm7jCJDEoweT2xuCkwOYAAa4G6AMYyjA5gZigCSqq9bfblcLuerm6WURBJLqj2SxKyaNgc2chCHcNWtteuyfHn68vT0pWxbmEHALe8OAMAjSrVSDTw4Ophs5gDW0Fpr9en6cr1eLutWzY937+QxgTCCsAyJ8kg+2nU7V1VAGZCAoE2Z7g5zHjkmqAWXBcDSME7Heb4/HoYhAUVgGIa11pbFC4Ex3FKagTyJH3OU5qKFsD0MlMbUUrKcVQZ3lOSaWmMdxomI33wWXbeK8XXeHl1zGSS3Yt5De6AKM+3z3x7ny6lD+Nazr271P1LPEdV987+55/sv8JshVMTXe/52qjsSBnbXqD0lAHbYrzuX7hFp7tGzQG8PxqhcaX3GcgnAOJP51q4ZmCtEE8YxpyFnjkySEC2cvWdDBXqHM7C7VuyoRViYozcCImtotWd+aYQhBDMRcAOv6/r5l2Wc9N1jjCSIybfUlqQXay8Q65h4GjhnFKEOZ3Z+Q7wOL7xJqIC9mV2AdN4nElr4Wsrz5Tx+TuZ6OkzH4+F4d3c6HsbDdH9/PBynZb0yCwBo03Dod+84j8IoidetXC8vl3UDJMkZ8dDBCSA8HeZ5Gkqpl2XxJ3VTuNmm7hFEzOOQmbm1VmuYqYejd9cv2oeauGM5YR6mr7mob2/+Xj2be2ulrMv1cl7X1dy7QYCrrq1ZLThP4d5h79Z0K21Ztut1PQwDjsl368EeQsRAZO61NW3aVDvL3Gv1gNK0lHpd1/N1uS6LmeWUUzJJpu5VVV3VtTm4cu/kQt3Maqvn8/PLy9O6XFtrPW7t9YMQ8XQ4nu4fTnePx7u7/p2TGjITkmhqpshsHrWUui1aF5YZMUhSHoZxzIMYlieubZxOxOS6jJjumA/zxCStjdfcwGCcxsPhcP9wJ0M2CDVrZnXdqLMpPNSsaYsIIsqJ7w+ZEOcGMtG7U8KDPANcwbU/ZozuGPxNvx7QWtQSN4cnAAigQAAKusH+COC983e/bf4glr28Y2Z3r7UrKfvmZ7jt57f132sL4L5L9iB2A5W4cfvMrA/KAl+rhH3zh+3v3r/WDvbmD9flermeX9q2BtJ6vazho2bMyRkBBwYBwsSQGKXzGd3DfPfT65edmpkhUfdTMw+35oatbFqLmQKC9UkpIiIwYpi25by+fFqfPw6jpGEg3diLRMtoxDEI5m64S4zEQRxETn079BlrT9ikt8CGdF2zunci17XQx6cvHk6E0zQzy/396Xh3dPDf/+6HdV3N/Hy+/vrrxy9PL+tyScL3jw/HwyTCz5fzxy+fStk0HApvWxERBLw7HX/86cdxHLeyfvz0ZblellXDAbqilBgACHEYhpRzao0IVbVPogOiagMkj35OdG0vRPfb/luST2ePhplt67os52W51m2LCEQI11385RAu7m6tb08389ZaKdvlcgnNzOgewgyYiYVI1KNpKWtR1dew8qZW6nVZ1+fz5bpupTaPiKgaIWYWrtas1w8Ggeq3ahURPfx6vb68vFwvl61sEx34TUc6DMNPv/tpTvjbn37zcDr2QqN3TB4BhJwyMSL6POW6fLl8DoEPPE6UaJzH6XRM06C+hJX7YU4C10sVMyHNA6dxyDFSRq0tEVFyGAjHtAvttoIB4KCq27rWUmurxDxN85Dzuwc8HXSrzvN0/35qQ9oWPRcttVRDdNTa1mVdt8vrnonAcDZl2FU6AeQ93xN3/OtrzW6761HfeebuTCKSusD0BtBqADDHDt4hMEi/0vWmqthBQN/rB9hpOWFm4YGEgEBMr7KxPiC8MRE6ynA7Q+zrzd/Ufn0+//zlsnhwkudVk+g0TXmYZMw0DDEMkYYkWZjJHRBN98Jkn0S4NW3ulnhABItQM2qtuV+vy7KuqsrCfjPXQWAkQgvXVpbLy5dPnNLh7s5dwZ2ZhnFkp8TEJIgcyE7JKe06dNjxlCCORCDD2/h0YSa7KaQ7y+ECwEhJJKVhOszH40EIh2mex9xarUU/padt2a7X9Xr1WqyUMmZJqTsJwp5ajmhuVlSbMaHWKvN0mKZ2bM+Ho7a2Z+PGvuSxe2MRgQj3XHtmjAAk6yYYXwnNvXradZn/rleo6bYty3Ld1rW1igjCFBxI1HOOcyJV5z1rEDvVspS68BW95SREQESJM7IgUmut1rYtm7unlN3BPWpty7qeL9eXy2VbVlPrZ70BdBcT1VdX2V6G2j7KI3D3bSvL9bqua2ttmODtkZxSenz3iNam0z1PE3h4a2jUPUNYZDjMLAhWEHW7viwMx+mQU0bENA2Hx8f58SFNCbzMAw5CVpAEhICZ0pA7L8BaBTMeBLNgEnAkN6bWZWxu1kqtpahZYsnTPI4ZorlpU+XpcHw/XzDJtoRZNDaDMC+lrNtS6vVrqxygiqqdWoNxs5YlREdwfP2C+r9Z5+NEhKkTRWutO2zeNo+b6g2I3d0gdxrPreD/2vN/O6nfGwSPsAAEctqnfQDRO+PYScf9SRD1+c5XLYxZnNf6sjZPOUNqQAW5SdKc8zjRMCTJIDlLJoSwZhZN7dXRrGcLR1iE91w5B8d9RGK11lrrXpW8edsESBDoattyefrIwowhwowxJHHL7JgEmAVJgFhJgMQDPcDdAYEJiQOBUYa3gkBJkiLAbOt2jgxUka/LGhamjojh0Vq5vzsejnMiNgqRNIzTOE3puqzL5a8//3J+GT68f9/CIWgYx0MeUDgcaqllO59fXv7y5z/XbX14eJyH6fv3H3LOl+uyrlupLXa7DrDO/XcHABHBvRrryt69c8MgRBAiFKrU3j4Y6AoFAIgw07ILDYuaMkIeiITSkOd5TpIRsKylTYOpkrsQuVutpVBQqKnkzCmlJIKc1HxblnXd2lb6qotAVd227en5+fl8uV6vrTZE7P7S3LNYA0zN1FwtHAI9eDc16W+6xxZUbWratW6v+5+QhjymlJbmLSoEWrWyaKtFVYlxqi1lYmiMZgyJUq02WSAjD8Pxw3cPv/nx7k//siEOY0rM02ECJhJBQAgSFp5mH7I1JRHk3D2dAIOZhYkoILx7/zLzNM13H97Nx9nrElbdlMfT9HhwzfRpQYsUrOZrKWVbSr02La9zfg9ozVuLjAwMHgYWARb7F+FMFtFtdlzNOHbefL+7ay0QQDefbADo5I6e/rBzA7E7R9krk2c/dSA8oM96XnvfHS+MPlGLfdYQ1PO291xs3kU9iDAO+ZUG7QDVsAGnNMow0TBgHjWnloSk51Nm4pwkQ1ixUmuttTW1RBQA3S+rt5Pm2jkxXRCEgPsg1m/v8cZc6P0uBURdr18+E+AgfDjMmUByNktokROyCBI7siE7sCGZhxkw0ZQzIQQyvgFiAUDGYSTEuhXw24DVolZ1WxEpibhZLWvdNm13knJptm01AokEiVR9u1601nk+BKI2tWbIRoDupq21WtX9CQHdGSnnkRHnYSKgQfJWq3tIEmZ57dbMjJkxoEN/FjeSx669vjGc/p33fkSEmWprpbVi1sJ9H/slzJnGQYTZ1JlgGJK1TOEpMbhrq4UCo1tVDLvBexqitFLacl1aqTc1GK7btqzr+Xo9Xy7LupkqM6ckcIuK9wDdcd395gdEp47rh90szIl5D1d/c9pHhKu2rQCiSiIQLXo9L9u2qhZirLaNI2eGxBAIJVerxbUxJ8p5enz/8MNv3/3mtwtTTpwYKQuKDPMsOe8aGuEAJumOYwy9+eKgJD6kPA1TmwMg1RqA8/1pfnw4nubQETo/OB3weIILOOBNnQC407lvR/Vtr162el7rASVD9EjiAAdCBzcwVQSInhjE1kE+BAi36KieB3ZPN7uF3HZLJcT9xwnRI3pn1FpT1b63O5C/dxTeXSK+cgg8wmNH3rqyhIiZhbvf740I2C0svz4aJJJhnu+Od8dpztNxlHmAIUFKlIR63QpUW71elsvlWnsqNkJ4tNbcrF9Yaho73bALBy1gpxX0MRPsg+ZdgiHgbrUtLyvTMo0CNgycEIg6YoaEDESBpEBFfdnKspW6lSTy/uH+MA5y8zL+uvkP8yGJhPpWirn3AUHXYq2l/vrpc2lbKeu2rdfzmXNuFpt5qdqvNLNwC/OOa9iyLC/nC5yppy6bWV0rIZQFz4SulochgIjTPIzH+ejQiTNYaj2fzz0LvTs3IKIVMzfT2zxmn/Sgdh9k/7as2x+rRYfY1KKTqGE3c2BADNNaHJqqW7PMDNPIhIkZwFurlRyDVJEQDoc5pTRMs8fWml2X/eavzQJg2eplWbatbKWspaipMDs4EnhI9DsnYIclu1oECTAMwFQ55TSk6TBP85zyEIjmgRFd1Kiq5+enl08fT3fTOM3BY9vay8uX6/Xi3lig2th0OIwjiiCE1mZ1cZsITpSH8e7h7rsff/P7fzgL+fLCEKfDLOOIkmHf6hDoEUiDICVgCgIiQhIgR5xQYjzOd81aa9tWe7JHvjswzizEPDQYLzqu57UEVwhFBaKcB3Oo5sJfb0s1e7ouv57PCvkwUg5ndKBOYenMRjcns9bzuTppukduAwmyW4S5QUTVpm7e07m1qVttrV/WEdG03zVVTfuy8Nd2oG/+3RByn++bab9TIhy6HIAkxDASISBxb5rjrUgJiVjGcXr3+O7x/eN8zOMseYCcWViSpCwJgKrq9bJ8/Pj5/PwMat3iNsxarRAxTkNAlGoRPuQhSYJdd4ivC7kPKb/qjwjI3dxbC10vy8sXAee7mRMKANBNI90llIGXtf7l548fP31+fnqehuGPv/vphw/vHg55r5BfN38WCfecUi+6vh5FCE31+VyaVeys5to4DQ6oiObQ1COQJeVhTDn1Q4WAwKPUYuAdjhmHnEXGnAlp3batVkQehiml1CPikJmZe3Pcex4R6eZZFqE77NIfWpcLddXw381gYWeC4R75FoQ7oNPlRBAQ5loaBJg5BiaRHkDdm/8IM4MW6ASmKQIQ+zFEqlq20rbSW32PWFtbtq201tRKa2pNnQGDmBJ35jkE3KjpDp3V5YQR0cwmxOPxePdwPx/mlHO/IeWV5ONW16suLzy4cAQ0W1tdLrVuzABIEarKtWposHmS5XI+j8dxHmYZRs7TfPfw7offJtflC6Lb8eE+zcegZBGtVveGCAREzCgZeWRJnBIDIni4TdbCHZFarefzJZinx8fpOKXEnIRlOq9x/XV9WbatNtPm4OEU3pW634it1P15uX58eTaXWuXEPAhLFqa+Untyu6nu5ln7cLorEUhQhFiZCgR0S9jOnm47SIgUO2jXVLu9j/VVcZPYRzes7TOC1+Mg3FTd9ebNjUbE5HIbISGgOwByaxpvPCNIeByHh7u7dw+Pw4FlRGRj7r7mmBjDoZlracuyrOuWd9IyIlMeMkBIkp0ECcHMItKtyODb0g9uuEbQnmOHXcrUtnI9r4SZg6eU0Im+ivy7OnPdti9Pz7/8+vHp89Nxnr97fGx3J5/kb0pliTdUtiRCiJIEkAN6KM3mYMM0yTAApyEYSRyiqjU14jROM43DmCSlkbDdHe5U3ZdzWBOReRrvT3fzNGYZzPyyXEupZqruSLSVUsyYeRxntf1Va1HVPvrqolRCQkYM8wAIi9gBSzMze/tg9nKOAJkoiyQRItiLKQNvvfDbY6SEOPXEAzfE4F1RaK/D3h1v7Zwh1VZbrc3UAoqGV/NSq5obhJk2VXNHBJEdee4acbfdKepGUe12gC4pPb5/992H747HY0qie2jp62dxspq9HsBG29ZyjWsLq8I8H8dhTn2f1mZrq1a0qc+nMU1TOjxKRgDiNBwf35OWxO5Wx7t7mU+YptaavXyJAoLOJJQmTiMNowxjnidOiUQAd804M9WtyNNnU5sP0zgOeUjE4sC2vnz68uXjr7/U6zlqjRA1rBalVK2La3llxZnpebl8fPq4rriMSef5/jDPchRKyH3cFeHdod5p9zyPW2ZU70p2bDeiY2bdFtq806Y6WBidVtzvg751CJCB+hFw205fmeSdP7gj+RGO6Eau7mah5q05cfOgeRrsdsd0b69hyKfDfDpOlADZQ6JbegEE7czFrisHiBswyZyGfDwdIbzWWnp5gsjCzOLWOukHdgC8f/odz+w/2mOJpHPL61YWWjgwRhxlICbac7HCXL3VUrZtbWULN0HMkgaRLj96u/+ltWaq3u03aMc2e4VtYeqGykVtbQZVnWwYkrBkkkB2DyVCV6auEaVpnI+qLUy85ZzvjofvP3yY5ykc17UUbbVpmKnZVmvUupYCAONUAKCUYrexirt3pTcQIxE6uHZ/ctwF2fs46JtRn7tDBAII8zAM0zimlJJwi9AA7zlm+1PcCSedtI/dfbVzRQMJqDfqqrZtddu22k0E1LQ1dW/uGtHMuvo0EC0i3JpR7Vg0dHImmMONluKA7tYdV2iYxnfv3j2+fzdNEzFHf/yvx5hZXZft8tIGTC15Uaw+IInkIeEglCS5w+baWtRmspbnp5fjab57eDdIAjOEyOPopzvX1a3l45HHGXgMqpxXhEgEiTOnAw8TT5NMcz4eeRwxZWDqLtjEJKXYfLRac+KcUsoJgGr1Cuuy6VYbEuWUKBiwa/AYXIYkbxl+a1leLs9l9ZiHEWEchhEJWIJov6t893HsTpq7PQ4xkgP67rN9Q+nc3N26z3e4hVncxntvsYZAAuSgV2fN2+r5OhqwV95+H+4BEaJVMmnMpEjkgafj9PbREGNKlBNl6UwB9MBXvkknMIdZmOGNPNDvcMnpcDhEuL68kMgwzUSYcxYmNcCeZ9tbnm/usq+TUETk7ibSakNcKRg98ZSEqGfVuIM310pW50SPx3kWvj+ejmPOBOjNrb1Vj0rZNnOPbr8BiACm5hDaS2hmEnHA4gHVSWKc03SY8zDUZs9PXy7nl23ZvDkBMkLKMh8PvZ0bhnx/d/fDD9+llC+Xa22ac26qgOgB5tZUl3VtrT2/XAix83u7SL5DnJIzeef0djWI72AQBJHtcOwrWTt2ZgYSSU7T4Xg4nuZ5Wktx9qLezPYkacJuE2rm/RShPSHU1frsYQeCmmq5XM4v5523GK7uzaztZiPRc4dQBNU8rP8s3PB8DzQNc99H+2YKQUwpp8Px+Pj47v7+IaUMAO6Bb+SWqvby/Pzx519yW+/nkYlF8n0eVNxtwaKEjCDdYltSBMa6XK9Pn9cv94M7EYRpgMEgNB/QTYaJWdQMw3POIZKFM2dOIw8zzzMfTny6o3ECyYH7dRwQyMMk2U0JXIiJk7eo21ZtcB5lPM4YPEczMEX1aLXVut3f3fUKqG9+bdu2XY1sIK/HkyI6i3PqvhwW5A7qEO696ELc3UR63ebRyTCdauim7q6974uwm1n7Do/tt1fsMslAfN25t5/dR4Z7t087QtvpOHAD4Zg6gxse7g5+MwlFBCJAdIiGVgMJNYBiP144gjwsrBSvhcL5JmVzIGRKwxARyEue5vF4ZMIMYNqttEKIhHGPiO/3UC85VcOsh3L0DgdMrUIBZ4ycOSWRJIToZtAMqs5ov39//+P9MTzGnB+OOUG1Yq0s32z+rdbXkwX20YffuAHQ8xfVfN1qbW4enbsqaQDACOzmNl5rqy0nliTTPA6HoTsuTuNwM+rtFvDdVYnCOnVs79LMPed8nI/zPIlkZkYgB1KAWnXdFm9q7hjAPa09gtCEhEm+LWR2YxaWNMzH+XR/ON1ttflWfCtNrQ8vATioy96NEdCBe9ZWv4MwEKUDPWq2bNvL+bJuW9Nm7g6ubq2bVgECEZEQCVLrB5R6oO8pjmbRmtdm5ijRDxgnziIyTdPheJzmmZjDAfyNg18HyS7L5+fnxyGO2EQENTlvDalGSBrJQPKcSTB19yirZb0849Mvf4HtOuQkPX9DUjqcwl2YEYFdgRDzCIhJkkjmPPA48+GAhyMdjjhMQQkQwRTcAgwlpzyGabgSBKKEt6pbc+Q05OngDKBOLUwhuzcRFsrD+Na726w1K+DeLBtEIHWTX4jdRdC/xjK5q7o1Qso5D3nIeRCA1m9q6M26mal2izSwjuTBTRnXi9eIsOjc2Xh1eUTEDrBDNzTr+fEMOy50CydDBDIS2uGI2qrfNgwiiHASZHQMw9vfjEBCyMQY0FS35botS6hSeIcMzayWti6re5RSImAa5yzktbjt7I/bXA/2ErRP/2/EpP2i6/dDuFtrzUuhbSs5pzxk4l6/OLuODPNpJGRCYqJEhNDTlMs3m79qQ0ACRES3TrgNoB5zDegQAbXUVtU9Frluy3a5LA9390R0vV7WZV3Xrayrm45jeni8uz/e3T/c58y11Vrq5y+fzJxJeqnWk4HUrNdHwowDAsDd3ekPf/jp/u6IHTAKUou1+fPlvG5rberu1P0LutYbU+ZBvp1bBoIjekBQytPdfPfu9PBSWzN8am2DqGbWJ5TdI9BadBtnxp50Fu7u4CRsGEHQwi7r8nK5LNtatGCAgbew5qbdtIgEyfu37LuD+s2WM6A2LaWta4tAGFBS92AJFkoppZxZcgS5dyuDr2HgzePzVp5rYfY5NdbtutinRc8azjzOM7R6unucjydIsoT1M/T88oK6LYfx/nQ6ne7G0x13hN+DQsEahjOiiwAKceKUZRppnPAwwTRGTiEcSADoyBhBDkjgToAOViAUsOPsm1nLQ57m2cA0VM2AO58OHDmA3xzKEWAA1tu47gqrEdUcIDAcAwIpkCxAm27bulwvGHCY54f7+7u7uyHnWkpPfG+tVcTak/06N4yRSPqGZ+ZhGKgb9ZtHMb/xKveafK+m90zb6AAP7YPWzujvLU/0sAaIb2p+pKGT9/dUOkCiEGJhYUqI5KCtXs7ny8tZawUPpAhz9Xqt7fryYuq11mHIh2EkSu4Odgt1jN3egwA9+nYxR/TeQSAjgt9c0bsNsmorpZSaR52IwQEBUZixGx/jzWcUDMIiFOMbRrx4dzQA6OE+SH1OGUbdBsXdQWuPSbNCRc1rbeuyJhE3Na0AgEzeyzLwlPj+/jhNw1a288v5crlsW805q7pqZ/Hsoi0AQKIhpWkav/vu/e9//9P96VjXUmprxWszoCi15ZRXFmtmHoaxWwqTDGkY0kBvb8z9CCDiNMzH+e7hdH1XyrbWRRjADbwhEWNPGYNwjcAe04x7s7jbQQWARVhr12W5LNdlW2qt3HmW1mqz5oDIAtjlKL1w7F4f/V/6MKm11lqNIKaGiIG8cxWQkBiRIvpIaico3bZLbGrVjVNICm112crHp/J5c0pyKMoBEpqwSRrZQj1qRCiAb0J4Op6QmVgkDZBGQAitoRVbBQ8HAk4oIw0DzxOOY4wj5CHSEJT2Dtn7nLaiB+KISMAZAsCbtbIu5227QFhKNE2ETMxaW9Xq4OFCfxOktDvfIXQZeiklNyUxQiAMoR6qBma2btvlfH5+eoKIVqsw3d0dhVC1RZgIAbAZkgERIJCkblGWAWLbSodZmXnM2QNI6laUdOePdFP9Xfrptgf0dXYYIiL2VoV62ny3qFMl+aodJeZ5nud5ZpHoJzntwBwLo4ealroty3VbFlPdmUXuCNDMStncgomAGdWAqbcvrwOhnYZ8G/XFLfOqj63ezgP6OD7Cai3blqeDUQIDChJkJAQS6aaQO7PZwYId3x7KICLi5qYWSCmnYRjyMATF2mqrnSmtujPUnThq2Uzby/OXJDxN4zSk6TDOh8F0JKY0JGLKWaZpTDlH4PW6mXWzIG3aZ/YAQMCBDhg+DsMP33//u59+/OnHH6chP33+0qrWUtatNWAiOsxHVX+urdVG3ohBhESSMI/D+Koe+0qIIpKUeZ5d78v6bl2X5/MTIZE7m+XUsS5CDA+Dnf3Y/cuDuc+/BIK0qVos67Ksy7ZurZWhO5TXUkprCoAkqhHu2tANQ9GBHMl300x3dd8h6EoKyIyg6qbRv4ebCL0/3benWBAFJ+ABI+E14lOLv270ZWVR2kIlvlBbY30ZpqOnWWksIDaMU57T6fH44cfT44mYaRhxOgKLm4Y20AJqAISSaTzSdOD5AEN2pr4sdm8Jdwz0utWnp1AbpjseJ84CIaCllO3y/OX6/KlujRFPx2kO2TbaVlsv3sBGluPAbxJU+0nvEf1WfBlkSOOBRbIwCybiLsDTWq+Xy/OXpy9fPoG7a2WKJDSNo6qKyPF06EMZhCACSelwOhwO8zRN2trHTx+XZanFmOl4mCUP41quWy1bqU3NOorVmUJ9IUbATenCzMIDZ2YWSZ3dE+G1tWEaXzkLLHJ3f78+PHBK/Zp9TQi0ADPXUpZ127attfIaSImIQoRmXisAjnmeJKGZlbDWSaC3dbB3/P0E2GEMFtlZU70uiUACQuwIQa1l3dLUFIcAYkNWlkAEFiIyd7zxIIxykzHecvs7tGaugAwEnDgNKSCKKtxcFzq5qrYmkeYDD2OOMEZkhjzw/f1xniYRigjVKiK11nXdpzbDOLUWa9k8GgAhSc8UcXBkziL39/c//vDDb77/4TAfwMMdWtXluqylOQ/hkSQnSRAYHswkvAN9Zje19ptXr8aIhWkYD4fD/eNxvR6eP8/T8zWdQS0zJUERgIjW4+pYiAHI999OSCQA0JqWqtu2P0trzcjVtJStrLVoIEneJUMG4X1kiNabKCEI3pG/cA9VBeJMeDM6sVJaa/o2d+DNzQ9b86fV/vzSitLL2X5Z4pcNzhUOAZlM2RRb9UpmcsppEKOJ81HGOzm8T8f3cpgRFCVhzrAz6zysgjuDoGQaZ0xTpOzIpt1IyBGJOSMIAmqL7Xz1skazwTXRCQnBUZuty3q5nK9rMeRxDuQkbEOOGDkLIaTjIX1jEH/7VKZtvV7WPHlraAYEWq1p0VJq3a6Xy3q9LMtlvV7APWe+XIjRc0rNdJ7mlFBYtu2yrquaTtM8DOlwOEzzWGsdr4OZAuA0jafTQfLQPcwQAglNscdv9nq2h9x0n4YOswtTSpIkpZw6t8/MgVBSeksqpZwpD8CiAa20tiybVnNjQvTAquvlclmupdVu490Z3eB7hSgsu6lG9ER6dVVT9W9TRrBjZkREFMzw1a1k16btar8INdtKva5byJjGFJwUwhEbEHh0m7CunTOkgsnfcvt7peruSGFh3pEXv+X2+u61CBHbts0zPjycvvv+u3kewfVyeclJfvjh3XcfPjw+PpraX/761/P5+fOnL4Ahkogk5/Fw5GaOpJyHBELuzVS1CdPpePjuu+9++u1P7x4fWtHr9Xp+WS7n5eV83apSBiDu2kw3I4B5HOc8oEOtZVmW67K8qsf6Y7yRoYkgSZ7m0/1dWa+Xl21ZtsuFA9IgItxzq8IcEYQBOQL3UWL/ygGwVi1b2dat1uqmHuoGrbaybetaigZxYiFmiQ5FuEPYTtNjYqScUktB1CJcVQOJWdyiNS1bvV6WdS2+WwP2Q+CmHvP4ctV//KVqLKeDmvtS/PNmYDExSeBEOCFgbdh8zKMc74Z0SvNpPh1lvDMZGyUhYCHEAEJEAaKIDEQkA5EgiRvWtVhVU/VQEuM80PzAMvbyeNtqPb+0slnbjowyTBHswUV9WbYvzy/NLV+vMgzMiSUN8yiUGPl4mpjfZigCdPdus7IsbVglPCOEtmW9fP74y7ZcIFxVu4moqwI4WKvb8qWtEdFaO51O8yQppc+fPl6vVw+/e3j48N17IuxEnXEcRHgYxnmejqdDAG21cUVJ5MAsoQrdoJw4qPMwAomBmRIjExIEYTBCtywPIHKmmw4ZADxga7aZTyw14Ol8/fXTr3/+9efz5YzumdPDfEiItaxgyr2LRgoPNQcPYhpyGoeckzBBx9bNTFtT1ZuMGJBJUpcyCglD7KZxHs6EhNDZ4ODhGG5RSns+XxvmU5o5JUVoe+yKqRsgCgMAWsCdor25a6TnQzL2AYmp1looAlwVXlH/6IMx67mI8zz97qffCOPHj78gxLvH++++e/e73/7OPJDwr3+Fz58/bdtGpCLDNOYIbOpqwZyGMZkHh4nqOObv3j1+//1393d3WfLzdVuuWymtNVd11UC0wKittqYImHM+HY+HYbTadl2//e3Nf1trCAichmE+Hu/fPa5LZ+dn5j3ghcIMnAKZ8sCSxAy6rgIM2HeieNlqbdWsdSVW7xS7vS0gppwPx5OIrOtS3KKvjteccuYEkLMLc+N4dYdQtVZba01N7WbmdEMJ4XWRrdU/XZrjNi3R3RZK1QHDg3uqFgZ6i7pWfLmI55jcwxCVCSRz0TGLEwXAUxrG+e4hzTOm5EhFrZaybVtdWllqlAbmzDYccLy/kzRjGoEEpVv04LacAWycZ+5MM2JiQRYANIvWNJAiEXLqTt4cSG8irmAXNAZQd/BUchtFBqbrtl5enj7+8tfl8sLChF2y23b/dq11c2WIcFXLA2/bYibrelmWi7mT0PPzF4gA7K7Kq0gax5GImjZVW9drrZuHMYMkSYlZQDLlKqZmfciyp2y5a9XqrkkIbkLBrvShNx/El9qu1U7AEf6ybL98fvo3//ynL09f0GMep+/uH07jKGhCyAFMFEgR4WqEOOQhj0NKqZ+Mbt4R8K+z/AjoF8luZk54ky2oWpcf7ckgEX039ntar2ukaTiBGCytnq/L8/llq1t//0QMQBY4pNyavtn8gEw85AzhYN62zav2mR/ebFD3tCUiNX9+fl6X5XS8e3i4m8asWuZpmKbx/v5eJHVT5wB/fqLW3DWW63bdyvPztTYdxznlBF3+Jnh3Ov7mu/fv7+8FsWxb3YpWR2CRlIdJvRlQrXq9LttaheQwjo/394dxrtsGgOu25fy1JPtKkETsrBsgScN8uItwp3Bo29OQtvXaWo0wQjdHEZnGQVJuattW21YjWgC5A0mUWndlAUAA9tkQsqQBs+TT/eNPP/1EzB9//flJbVtXDw8UpG54L0goFjmLee+jodt4YGU1ZcaUEDE8HNDf7haIgHA13VStECB13WfmcEInboDVmdXWl+WX538b6c/5MPPhwONhfnz35fOPx4e7YRTw2q7Pd8fxj//qP3z3mx/59GAaXz4+//LLp5///Nfz82INxGkgPB7l/vvx0X8z3r8b8AQoMs3H9x8wtpdfLtvyUtdjysxpTJnn43S6v1fHXBpxCuQAMmcEdDPU2MrXGLWI0GaqOggRUUIak5ymcUry/LRenp9enr5cry9DTklEiCAsMYZjLRuAzPOYcx5GHMfBXWszwAACa3p+efmnf/qnJH9pTWurtda7u5Ppf9AeFQlrqy+Xi7rlPAzDkHNm4UOMHXA2tT4rDI9a6uV8WdZlW9aUEoMTBOUMJHiL+Oifxdwva72stTkG0NbsvNany/p8XXNKpHYtlYmnjDttINz7CM4tSxrGYRhHZO5VnrpVVTXrEsKo+/7HzhG8ic1b61eF7YAWUk9ICTfrLFKIViputZppqU/Pn//817/8m3/7j+freZ4nTqmLKN2Jra3/i//07eYPYc7TkHqylnuraua4Rw93WUowc84ZEa+X5enzy/nlejocj/MpYEK0zrcjpmma7h/uS92Y+OVlWa+llLYtpZUe0k1MQkRpzIfj/PBw9+Hx8TAO0NunYlrd9JXrEbVpqV0WGEnSmHMWySI8jtp0HIfu/PV1y3SQlG4+gESInEc43HtY1e3KBC9Pn5fr2WpprrdOL4lIU4twNVV1M2RRIqqtlVpfSx7JwzAf5/v3gQyc7x4ff/fT74lonCZJWc2u50tzhOqGytIvDWRhEVcD20e3JGYRhtilbM2sminyN+xuc1drRUswMrEgZsKBMNxLs5dC5DgCetPr9VkhhmVM22k4PRLjS061bEBgZdmefvnu3en7d48Pj48yaV3Kr3/683//b/75n//pX56fLuGUgQ4i93f5ch6bbsN8Jzzk+ZFTHu4eWjnjp5+trnW5DsPAKaUsx+N8f3cwNeFqkJpCVfPqiDUY0fZs7NtDie5u6iixTzl6HEO0sm7LpWzXWhaEASIBM4Ij3Sw1ANMwTONERCmnpl311l2gsJTaPn8Ohx4FZ2bucX53FklA2LRt2xYYwuzGZkQIRNTNgULELUytNS1LO5+/vDw/t1rmaYrjQGNCDUc1c9P6OhvvM0LfjXmimVZr6urgLNzthlTVhHnXoQf0LpBFcs45p5SQyAH6pW872QADsLuR3jgPex5QmKtqa826t2lPuA/vWD8CCksgaYC1Vpar0Xp+/vz5088//+WfXy4vh+NRcjZzCwrgp9/91P1O9s0fEVn4w8PDw+kwD1lbe3p6vly3Tb2Zo5myU+zhW6paVv3lr5//m//6H+taf/vb7w7z0NpW1/r0/Dxs27qtLPzdd98PwwTwa6tfrkvTZkyCEODkCsiYSO6Op/vjaUiJAbWZVrXmqt6K1U1LqdtWlhZNHYGSpBQhSG0ripxyGsch5yxvxjCdBmJmSIwEQBTdwYWTDPN0enj87gfEnsFuW5gpMmEH+AJAtbXa3Kw165N5RDS3rWy1tVprHobj3d37Dz88fvhhmI8aOM7zh/ffI+K7D9+f7h5Uo+ifrpezrYWXknIaxgEASIiFmjb3CHBwBuq1lJu1plttZE2DBo8D72W/W2jzVrSIwEHGY0oT54wArZ2tQbU6pg/zODAJGrmRcUZ4uJsP94ecyMp6ebmsl6d6/jyAb5uqIVks5+Uv//wv//yP/+Yvv3y6nK/hnhCOLMsTPX2i7fySaKDAd79LeT7RfJD5Pg0Hq7WtSxmGfHpIQ747TuU46noFo025abStuFdsRpk5j8LwpiBzb8VqMQYjcumelW6hTYtqiVBEB7AA7FWmhwZBkpTGcZofDoe5E/tLczMzJ4BMFKqtbM09EIkoiWThVKuua0mSkGhIQ2AgYKutlgoAnbrXQ9AIRc1KWZ9fvvz1z/98fv4y5jTJQ4aHAYpqqwalaFnmnhEEAIQ4JhmSEIGZeTREzRkPc76bp0MeciC5gUHvpAFCzZPIMI7zNKWcu2F8eLSmTXUf2fdhn3mog3mgh1o0c3UH767QQDfOP/Y3AoQMzCQ5AZFDWFu+/LqFXy/PbTtDVAILV2+gHl2+vXP3v25+DwAQojHlu2n2bN4sHH2rFg3NCACYgbrxKDRrl/P2lz/9LMRDTvD+xBRmsS6bNl3rBhDTNEHg5WW7nDeELQKFxLtEk2Se5/vT8XQ4jjm7WTUDw7BQtVatU+K2rW5b1WBEnqchXHW91tJW3hgIALTrt95YLEHnJ32lK3Y0A4CFcMjz8fj43r2W7dq2RctKiAw9LtFcfV3LWmofzW1bbeoIYOGqrWptqsM0zce7H37z2/9Pe+/VJNmRZGkqMXKJkyBJkCAFVE1VdU+PzMrs64js//8JOyM1XV2kUQCSRYS7X2JEVffBrkdk7cjuvu+UJyCSgoxEhBMzU1M95zvf/+Z3u+PNkoUc78Y9Eu2PRx/DsqYi9u9//eu0PEgqLtXRwDu2rUOrpmoIzb9hWnOe5+W8LL1ZLil3Oo56APDPVQyoBLS9g/tgN8F2DgOiiDMARwCAFcyBApma5lIhl30Vzam0kvAyuXVGgi4GH3twQUTSMq+nx/XpoZzP6TxVyWxqzheG0+daltX7jjh0+3vfDcQdxZFdrGLL+UzeD4gcvA8+eGSoqKnFbhNWxOrNWFBzlvLFaalac655FYfKpM5Vk5QXg5LSVMpq1loqLZHPMVPnO3bOuzD0Y9/1MXSIaKpVKrPrYq+95lJySmta1SyGznvnnOv7Ydzt+r7vuw4Ic85rTimlWlqCLBCjY+d9cN4750U1pbQs8+Vymi4n7KPkQJoYsgpoqutlWS+j6rb4EdEzeyY0IbS+Dzc3+3f19TLNAZ035KIe4Ro5aYDGRMG7LoYYPBM1e7eqSq0NVGlXl3E7vrbqtSkQa5X2UZfqgOGqQ0Zooz5H7JAZFchqyTXldZGa0gySxy46xtgNiJyrqBH7rg9/Vyk7Uyu5nk8XD+gVvGPHPvgOU1WxWmurN2wrqoHZg+HpPP3803siW5ZXb1+/GgcvxUxLWlZEdOgYeOj6sR+f3OQomSM1QMQhdu/evD3e7n10ZvV8ntlg7AYDzEVyKVWtiLT4bRdDPwzHwyGvy9+enqZ1IQVTy6UuaX04Pb2eZ5HnTDjEZuS6ym1e/mPzYcJhKEv/9LB0j4mJERhJxJY5JZGny5xzIXYKUFRyLaomzbDXbIjsxt3+7v7N1998sz/eXua1qDA7Ih7HIQSPgMx+XtbTZZqny5orOuqCb/3eppy8qjOg1ny5PDw9ve97zDksy7yT25vXbyL07ehH0Q7gPoR3Q3zb0V3QHZfee+9HYJfVRMW0ZMnCuCidkvLTIuHzNK8dW+9pCK47xjgeX3//3e3bt34cU1lruXRYDk4XsmoypcVUNeparKQlrdkEgePtu++H21eu27PvAF0utUxP6t0RycVOEHMt63xZ53PBDEBdXxkpcifVztMyX6bn98XMpKS6LuJYvTPTWvP58ugczfO55EWlmFapwITofN/1h8Oh6zpCcuyCI7LGYgnMnfehtfRqLWsu67oA4TjuugaA9D6E0HfdOO6qyufPj+vHT+en87ys7chttssQiveBvTeztKZSpQ3Vak0lLyLJNKGSlWU5PayHndaXxU9IhKC1ssf725uuD69f35+enh4/fi6XmUwdQiBCNJHKjse+G7ouBscEYKJizybm5mW0xu5vPoZNFdRUzBv8r906iRBVoSV/gGET7RKKiVTJy7rmWgGyitQcid7e3yG5vh8UcJ5TFUXyN+Pgvlz8zgUwnKYVBVCwi6EapGqlalUFwGbLtytC3XtH4ET0dJn0R0kptW0RgWLntCohSq6gGH3s4+BcIHJI4gijD4fD7u7uZrcfUllTKrkUMmAuJcuS07Kmec3nab5cZkW62e1ubm/GcXx6tCXl+TwRcBXza1rzepqm5e8z4dpMSdWa7h43UZSoVqsV1SoQsqMrKlfFcpWal7XWaU5VJHYsZkVa59dERbXxIwCJeWPj9sPQixm1boQBuzDux3ffvJuX6f3H95f5Un6u87ysuZhBbLFTtv14jNQYget6OZ0/dT12KazrhITPJwwAkGmHdN/F+y7sqPage0f7DsfeoY9JcJW6Jiss4jvN0rw4ZVkLqHNqgdBcN7ibu8Pd21f93R11XT4/lMvJ5bmT1GnxUiHXKnUxANW8LiklRrx7+3Y5P0leXRyk1uVyuXx+yOfP6J3k4nqQpsU4T/PjqVBCR95ZcD4wZlGZTvVythdwnda85nWp3olncTxP559//psjOD1+zmluSWpaK3jfhXg83n711Ve7cadVTKuJAKBzruWmxBiHYWCiUmuutcjogjscbrouEm1CzRDCOI5F6rJkx6dSZFmS977ByShLzpU4QYN/1bKk1JoBIiWXlMqa86JKeZnW82O63H75vjTJrJk69ofdru+7oY8OYD2dtTWywdr8AAmD4y74GJzjbajQzAiNSCBarwovAgC15gFTbJ5S1SqVDDa2XTP8biebGpgygmlVK6VKyVql6ZIDkQ9DiDF0fd/1hrQsaymihveHvb8argDADd2uljIvy7qelyQheGRXReecRc2FQITIBFWuoJVA7LxjMfnw6dPpfDo/TZfzBX+Hr17dOmYCqEs2Q4eu89E7j8wq4r073t3cv7o97HpiulzWtKbG774s67rmKa3neXp4PD08nR5P0+F4+Ort61evX5daH58e5zU9nCcAvxZlhlyWyzLnmp+DWjdNbSMhoTK2QHqppax5rWmyvOTLea1VDRRQDNZSpiWvgkk0lQKEVFsCfcm1NAKHbQ4nAiARTTkty+RjKDmXklIparBkBoMQ3Zu3r37/z79RrNXqz7/8UnKpohq8Q9Ir58o7N45D14cq6+Xy4KPGxUtNXb970SwAeICR6XbodiGuc6aqffQjOSTzrOxDBB4jiUa1OuUKPqvi0dOOsffOga7TJfTx3vu43/NuJ86vlzl9frDzmZaZ8oqlaLW16FJSM3D3yKWkmhcpq9VFdcrT54effvz017/CevbB1+ns+85ES9LpaT6/Pymy88BRqfcUF8pK5weYX+F1wZhpXlNe5uw5O0oEj1ovp8+EgCaqGaSSmaow0G7cv3391fff/eaw26/LlNclp1lV6Ap98Y4YFFRVCiH0fdfvxpubo/eh5Nx6fiJSalYF533oO/aReAWk1lQTkzXlWiU3J7tpqQmQnfdF1lzrvC5hdiawnC/r+VSW2eTZGNioGkhInpwLRAWnqiVlyUWqbIwkqR4xxBBj9C0XY8sObS7G1lkqogqOiVsQs7Urp5rRlkAhVQTNaq2qAsbUbDpoolKriRQFqGpFTNWYqS1MIA5dN+x2/TB0XSRmqVKKlFLf3t4E/8Xijz6CoWFKKReZWtUPhEWFCD07dgQARODZN0xEUzOXUuZlPZ3O0+MlLUsXAxO8vt8779ZlrdUAfS1Nv1ANLMRwe3s8Hg9E2C73pua8F9M152ld53U5TZdPDw/zkkLX3d7dv3n9anfY/fL+w+VynpdlSamPmZkdQLX/SdwHpiq1FjVBQXYspkuap+lyOp3W5QJSIM+YLiIizOaDMme1ac1zKlWFmIFYVUqttUrzOW7jFUAkbtCIUnLKa8ppTeucUpErTBYRWI53+6++efNwfhTUx4dTWlNpnk7d4j6RIHQuRgbUXJbzxVbPaHrIy3MV4xlve/d68HcBB7KnqplMmnQYxKygISE5JiSXjVAxBCOlffSHLgyjQyjrvDqH3dDH3Y66vhqs87ycznWesGbvsOvDYCCJcq1m6pzro4vBOU/OEWhdTg+ffv7pr3/+y/s//oXKUn3/7cMT97vGMr6c19PDZMR9h70ROWBf2NShONSXxpKplFzSmpNPjtgkTZDSwoT7XR88qTYtKTLzbtztx330PRpLtZRLSlkkgzX6PiIRE6lqrhWI+nFftJlZeJ7nnLOKIKIPwQDXrA8PT5fLNE1L8/mKaK01pbSmtKxZmzIPlayqpJoyk356fBQpjJzmVXOCFq3bngpYs6LXUmomMMppnc7n+TzVVECVmq0GMXjuY4wxBh8amaoBylRFa20TreYHN0KpWhuhaDvirSmQS6kIWqUCKHH7f3BTTImomVZTMRBAJMfsyAV0zpA8ewZiQwYK5LgLGjTnPHbhSyChI8cBfNf1yWBNKYt4M3bO2g9H2FQQTOQ9O0ZEEtgoWoRUcvnb+8/z5RwDedabw2/73tdaliWL4OnpdHp6nOeZo+9jvL253Q3jMl1SWkwsOO9iKFUBci51XpbT+fzw+OBc+OFX3//6hx/u71+Vkj9++PDLTz8t89lac9VjP0ZRFslfSi/NTDTnMmcpZuC8S3n9+Pnjxw/vf3n/8zxNjiAy7rwFqOgcDQOnCquky3pZ1lSSa0ZGhFqlwVsQyVFz4KBz7LwjJgUrUtY0X+Z5WuY159K2aatVaq5pOMRf/ebbbtf/+OMvnz88XJ4uqRQGxI3YXJnBefaBAWFdlpyBiUotz9bRzvGvjkO+iTdcuFavQm2WDpClmmazVYyF/Gp0FlurlUp753Zdd3/sx6NHrvNkw+1uf9z1fU8uaErrkpZ5zjkZQncYb0b0S96taV1XlerZ+uj3u/5w3I/7HQB8/uX9n//Hv/33P/zpxz/8GUo+u/Hb9w+0O6R1XXO6TOvTtAIFwO7mECkMHHrvta/S7Udkfn5fTERqqTknxmZiXZY5BA4eiYKpgAGR8z6M/UjoPn/8XEp9PD3OyznnVTSb2NUBo9p6YaLs/Lg/jrvdMIwAMM1Tzqkp5ZtG0xDXNT0+Pi3LsvXYVFLK67LM67otfibvMTp0rVdZCEgu07DvBlIghOBewCSmtuY0z3NHYHkFtMt0/vjx/dPjY0mZDJgpeN8FFyP74EPwMXbBe+aGLYOUUk1JAMkHRDRqiQwl5VSkbnAis6pSas05A1hRcUzsvfMRyQGYARtqFWskPWBiH8gHckEBc615Xi/T4n0Yh77vu76LzKS1WC1fnpeuMQ0Qsc0LtAEoEBoaWUQ345MhYbPBb1o3RAw+Jk6l1PP58vHjx4fHV1Xqc46tKtZaWgymq2HdH5oMU4rUsqma1iXNa3p4ePz8+fHp6Zxz9jEcDzfv3n11c3tc5vnTp49/+9u/f/jwS1pnRhx7t9/FOMSc4fz36nGRep6ePj9+TDVXVSSYp8v797/88svPP/3007LMIfixC7e7uO987whj9Pt9KGpPUxZd1kyE3ntmbLKe1uOALfQMkQgZDa3UTInmZbrMl2mZlpRSTqVmsWKN1dHxq6/ufReRPbOvVS/lXKugbWii0PnYBecdIjTaZHTuyzrGM93t4ptDvO0RFbBjIB4Y2aSWXE1yRaXIvRfPylbM5iWRWqkFqI995yKC5+5w44Yd+ggtlLZBjvroa+8oeIXOEUUaBmawGGgYunF/uHv3djgcAODy8fPDz+/P5/M5rVbKeVnny5TnqZYiKlnqKgKgXO2UAVZbSQ1sEZBN6PhFsdyOEiJ2jpiiRceoYEWqiRAgItcq52ky+5BznefldDkvaanaRl525bzVLbZLjJwfhocu9iFEM1vXJdestX2iAQiISFXWlNqNoI2HUkpLM2ykYmbOcwzURdcF9M4M2E1IBEzcceDgfQzPHXJVy6XMaQmoldC0zvNlvcyai0d2wfU+DiF2ffDBNaYnO4/MhlTURGTNNRUpokwEYFIl57ykdUmpNLKfiBrMy9L8isGz8z5GH7rBx76lmDYUXJaSS61WlYiKoRPkWhWWlGqtIto4esPQ74Y+dj4wNTz/y+JfUlKRlHPVSsywxWEJEWrFJIpYN8AXOlMrZVYzdM4zhb4Hg7oWJlWzXLKYNKSC994H57xr9uZ6BiT+8OZ1U6t59mvJ87KcpsvD09Mv7z89nc8pFefDd9999+bN29dvX1Wpf/63P/71r3/981/+8vDpU87LzX53f7d7dbtXgKeaSqk5v9CvSskfPv7873/7U5Kaak45Tefzw6ePnz59+vTxYyq567p57EWHqr303RBCf3MsRuHhjI/nWtVM15SCd2KNAAewQaDQABVBQauUJS3V6mU6TdNlTsualiWlIkWtIqML3vnYx975wC4yu2Ve1yWtZTGtnfexC8M4DGPvvGvxI0TI3tEXJwwS+uiHXbzZ+Q7xJjgU8N7A8pLTXHUShz0e+z7uOu8UTtOHx3Vey46069zu/ujcSMMAw33xu4LeTIgsjN1we8T6yjo/TykvOaFQpJv9buxc38f98bB79ebNr3/ob45SIT2eZbqMY7h/c7Sa98eOoVpKaEIEGAwDCOFk8rfT8iFJiMJgli/9m7m+0G9apHLsun633+/HwXuPYLXmlOaUk4kwoqE/nS//+sc/gtEyp7VtqO0kRIMt3tfacFdUTRSRGR+abKwtpI3cYc2wp1dmz3XfuArpU0q5lFIFEX30qk6NzdgAgzW/cwDy6IJj9l3/rPBVsCxlzquTWtCslloSGw4hUqRIbvBd54OPW1Y9EBnAWlVqyjmntVEJipohkpmkuuac1pTmtaRSstSUi1lqUaUxxtjtd0M/9F0/7H0MTLi1BEVTnc7TOqc1iwE5RVKgLLKsRQ289yH4GFwXQ9+H/Tjc3uyLfIGL2hh+Iq39QExIpKbPFmdRMxMzYabgW3x4IwsQoiPyYFD2I4I4x1dG2uaXbt1UNVvX9TTPReTucJQiY9ch2prXp/P5/adPnx8fH0+XIhJDdzgcXr9+Pe7Hy3x5/Pz5D//jDz/++OPj4+OyzKbqHQ29HwafS+Pfq3zR6s81f/z4/sef/ipoa8mX5bJcpjRPyzqpZgCpmrPQWnmp7IXZHDlC75o1snksShFE1K2L2IAwm+tbRHPOy7pcLmfn+TJfljRtB74Vg6IgCGjAQOA8OR+IfEr5l5/ePz085WVVwBDDMA6H42F/OMSu88GTMRH6GJwLz2wCMZvFpmqzovMuduzNHKMAVPTsgDDwbox3Y7+PwLJCoYilWCJdUVdVrrAqgbFgUHIgarWKWWWS4C0GEnWqrBKZbo/9cd/Hvtvfv7r55oebr94hYp4nS3Pv8Kt3r8Ydl7Tcv7mNkREEUNnDuPf7Y8jGqejTZdHz7MPqwbjM82nWeoVeInkfuq4fxt3heHN7czMMHRMuy/Txw/sNs01M7Kvqw9PTsqTT6ZJSVqAt3wtfqJvXEC3bBihKeEXotdJpI91simlD1M1S0e4DIlWk5G18y0SGwhy8RSBH7MkFQy6Cc6pVkI2m/CJVBlUta1mmldmIHJH3sRvGduHu2PcuBucxOGQGbsQvSDVN01zk9JQu8zS3oALvgllNOeWcspQiCuycjz5oLaWZJmoVUGjEKhFdc2kTgVLLlOpTkkuqy1JqVcYKgCZYVEoRMSucFwJEc45jF26OewH7Zl6/XDKutiBiUQNwzIBYpRKR856ISbTWmnM2oBhd1/ngRgDMWauiATrPXR8cajcEH7wjZwppLfO0GJR1TYikZo+PT0/niyc+n863x0MILpV0Op9/ev9hWlZybtwf3rx9ezweQ4yX6fLXH//957/97cPP7y/ndiYDM7NjRABUpNaHa/3TK/2mlI+f3v/0y79T9FnKeTpLKZF5v+/73otIEXGeXXTGsGqB0tiSS1VBAKRGfdIq21yuJSeQAZERcS1lnpfT+cSeneclrdUykjlPwJ4FsxRAYCZEUBNCjl0Yx353GMZdt05OKw5Dvz8e7u7ubu5ux93gO9/GvJ7J+/h88ueqv5yWP36Yl0qvRrrxdAjMnr2nkSl4t+sD7YfxZnTBVSldjvu7oXahP+5x3y2ay3Ra1jq68FqNkbGqrmmapofLZZrmUqoL8YDs0DpP93fD/jBg6If7u5uvvx0Od8vlPD989lBe3e+Pr/s1zZfzadztx/3AHrFq6PHVq5FkXZM9PK2Xz5dlycGFCuZzzqfpWRWHSCH0w7Db7Y7Hm7v7N6+HsUcwfz5N81JFzYSJY+jMUJZUdZ7TvC6JnAdkayro7bx6Wf4bF87QDJvkt71rDdqhZldyynbZL7VIM4GLlNo0NAqAogDoYvT9OAx9jF0wcPNalzmrWM2yf/X1Wsp18VdaJpzP1XvfD6Ebjseb2/v7sR8DUCTf+cDshFkYjQiY0POS0sfPn9afflpO86d0Ws4XMjyOyARptVxNgJBcH0OI5sNaUjbRSBEV6yqFKpR1prWYLrVktQKQqiypZAHA4Ng64A7IgSnp6m2u9bIuc1rWtCqY77tlWQvSN0+X5+wwaPLebRdFbPcxrEhEIQQiKkXMjBi7zr16fby72e+GKFU/fnp6Oq8pC7Pt930X3OE47MYRkaWaKqhCM64DEJEjdkBogGvOD09PAJpymtcl5ULshnG32+27rlPTh6eH9x8+/PGP//r+/S/zNGsV54Lz7fbkr4Xfhmr8O2G/aSnrmiYEX02qJGLsxxhdMFGRuuZsBCE6YMhaNYuhppzUBLbICmoJv20/uTKfwAwASUxzYyLnJYAXKwaKhEzI6J0SVTQAIt+QA0zs2A9D7PvtH63SD/04jvvDcb8/9rvogjfUVlASuRdNLCI4V4gfKlgB8wQuUBf6IXRj1w9eO6bBhzEiwbpqP/rbu53udL/fd2OP0QEACzjXzCnPNok6r/nz0yUvSxcjI3rGPnA/uGHX47Afb27H29dM3fn9X08f/pamB7TUdZ3rd+yh7wcfXXvdvfe7sdd9v7iqWaboqVRGdaps6BoUpJ38RLHru2vABbIHcmaKzod+7EXNlImCDwaIzgla1hq6RMwItHlKt2pVt3D7xsaD5qVuKFXYSv6rUsZQgUxVSzUCIcMrRpKBQBXZhBlDYB/Yh5YmH4i9AjU4QymaskzlpVRmtTGV/ZIRqBt4P+5fvX333Q/f3xzvGMghe2YEFARpOQGM5N1lnhXh6XRCwFqK1EpADiCS864zduTYex9jB4Al55KLVfHIY98NLvYY2DBXBbVUm5W/ecKcc5Wq+iqdWhTzpaiYgWatktY0Tcu6VDNXqwBWdh8fHvOX2v7GIgFEInLeObeRDEIICAiGgErs7++P//TPP3z7zVe3h910nv7P//Zvpf48r6uB3dwebg6743F/d3tnCiWLczEEyEUJvRmG0L16+3bc7/7DDz90IXz+8PHpdJ7nCzDd3N31467rB0C8XKan89P7Dx8+fvr48PCwrLOqkmMK3vvYhxhCX8VSLkC+jXeuAK72IcPYha73lcAARzd0Idwe9pF9TamUEosXUCRU1ZJTETOUWrKAIiMxkxjAluj0RQpCi9ZEIFBUAyECDuTFqaGhICM59uRcdWYA6Jgd+ehccOhj9DH4GP04dCrWd30X+74fxmHshui8ExMRKVm+TLuIwb17c/Pxq5ulyoWAgI0j9SPfjsPdLo5sWNBZiKSqyjZ07tX93hRvxmE/dPsuBmYs2B9vYsdqJee15IJIWu3x4XR+fIgx9DHsuoCjBwIOLh6Ow+EuxGNd8+XT+8ef/nT+8D6VbPNAwTFpj52amRFS52hw2HnuwFUY2L31yz7VUqAICh0OR8fbPJmYu2Hs+xGZc6lPT6dpnRFAtHKIw54AjBDap2487I93t6/evi65mCq0BNkr00o3V/UG7W/I/uvW/0U4t22lvpiWWlNKpbpaRVU2mm7bQEyIMATfxc5510KTqrZUHueD44jY1TDu8fpcvMHrjOeMNfjo+rvj7bt33/zwu3+6e/NGkczAQFEq1gK1VClghoQ9W3pwn8gGzTurY+TOhdsh7kKMNnbs+y4GH7zzgCgqKopmZMCGDOjZAUIBXcAmsIWpxLggXaTkUqxWWFdeFpjnUlOp61zTZV2W5ZLyKipApAbLmpYPH99/+JTztYoBcJv4AMGwkVRt44fA9jIBgvduGLqbm8Pt3c1x7EEtBs+MCBqCf/X67t3bN/d3N/thBKB1LapA5IiByIMRs9/t/f54GMYdmq0lLSkBczcOh5ubELuUyzRNj0+Pnz5/ev/hw/lyLiUDgPMuhBhC3/muC9H5WCvUav0Y+h66rgv+RatMREMfxrHLIBUUAGLwITiHZEIAjA4a57nUzVtfUZXAdaHfD6KW12wqz7LKxj5sAYfUFE6MDfdJTA5ZDARaA5+JyTWUMDpiBuep2d/JABUJnGNDaCGYWxwkPks2rf3z/K6E4N69u/n8q/tP56UIOBet72vf177XIUJHqEakCNBGyl3nh6JSkbwDJnVEwcc+9vuBAhVNcnlYljl6PO77434HNTFzCM4HZoeGAMxdP3bdgAZSsuSl1LnomiXJIiQuetTaqYoBEHvno/MxxM6xea+xk5QkpSxJtGi/29F11IeELjgOAYiq6pISS6VGjCX2kdoUAgmY2XtPiON+Z6Jai4mCXOXu11NfRBoKQU11u/nRVhJqS3TWLd5HNdfScpWk2fGuQvq2+BHBO+ec8z7wNo737Lz3wYVgQCASuuH5M2ZqKaW0Jut6pyZqzQk+zatAU+9UKjmUlXPWklGEDOB8iR8fbs7z1wJj6Pzg+hDHYRxCN1DofTf2vXcBW/AGqFkjSWvNxaoQoJitJs4A0YhxRpfbZLuWkrOkVZdJ5otOl7IuS05zSVNas4g0IqCIrClN8+l0+jtXnwIotuyBLYLGOyLbEsJrrUgQAhvSmurT42U9zU+Pp/N5qkUYcT+OX7/76tc/fP/VmzcI+On9x2maasPfUyDnm12RmU3t6fRUSz2dz0D4+s1X+8M+dt3pPP3pz3/65cP7eZrWdUk5m2oDmcSuD7HzvgsuBHJEXKqq8X537Dt5epr62D+TIplwGLrjfswgVWupxUzXeUoGJopo5JgAixmBMZMBATIj9zfOd/24P66X+fT4NF+mXKuItNQgbDMjxz56FwP7a/AJExpuCQtGoK3OJ2IHjIIoYFlylpxrqjVXURBsn9ySU86rzyjKOedSq1bQ8mJSCp1/+83tPL09PszrKqTeOR87pwhLLo2py6Qq5BhDiFUAbM25auWaQarqCDD00QVzlGVZTh+lpF0P33/7+hDxcjmXWktJOc3OmQIaUHDBE9blXNLkOu6PQ9URZstVrSxStMSu1mykhMTRcx996QlJBV1WXyBmKEtZp5nGDq8kHwMQAiFrJksFoJZR+sUt3cBgMwHjpohxTczWcrO3bt6W6yfaxn2Nur+F+12rfnyO5QAQsCw1p1RLS0p+6Ry0yL82atkQfQ2kQVs6M3nfICE+vOQOLlL/uDz9t+lxHP1unZZPnyZyj5e57wdVM6lWii+5rymW7KT4qiwAqehlub1c/kUCHN4M4xhiRGZ23rvoQ+9jh8QC7cyxjWhYSlKrBmBWq5ZSV6mr2WTyqPIpLe+n81ObNKelpkXTrOtqtWzYGACD1is1lVLV1lTyutqXDb8GKyAmfVYXUgBsPUCtIgRISstaPnx4TEsh0XmaT4+ziAUfxmEYx93xcHz16o1Uefz0lIusKashMaZS1lyWdZVsy7JM53Ot5Xx67Mchdl2I3WWe3398/+NPP374+LGWggDOuT7GGEP00ccuhOg4OHKE4BBFQMQQOXg3DuPQ98+7MiI6zzE4AqwKhCpFZBNTERF63lJ6GJGJ20xjSzgcjY42P01V6pJSniznygLesQ/Oh7Dfj8fjcbc/DuOx7w/dMLBnJGp1OhJxM1kRAoKAJJW15CprFsml1qpt2l2LpJSWZVnW1UdGo3VdSxEQqKU8l7HMvDvu7t/edcN+XaRmUAVkIGdZRJKRKZMWhhi4d9GH0I89spgEpLahIzpP3pNjlTKdP9c070Ik1K4LBruikpMDqmCliC5Lmqcz+o8wL7kkwxx2YYQdR8rrWtdccwJQJEMmR97H6LrIJRIRKmJQKuSKM18ZiboOvgTggCpq4+Zdcau40fJfPqgtjuIFWYG4pXR9QQI1VEesqEIiZtYEf1dEtQG2nNkWvQaCVkRySVLqcz8ANgPNls0NV0Rlu1EAAjtPzEQsAC9dGAAAqGgnts/BxIGC1HmaPvz88fHREVlVq8VycSX3krtag0qo6gS8gBcjNQSMIXA730UFpKKkWhGSIRVrU3iTa0t/XeacstS6lHLJeZJyUblIfaz5cV0+zZfTuqw55ZKqFCvJSgYRVKDthdjopApSal3XlNa/88K4pppu4OQqLUIBgK9BdQiqprl8fjiVJME5qNvliwiHbhy6wao1Ao9nZo5qNC055QKUHk6n83R5fHw8T5dUMoIBAQK8fvMql3I6nf7tL3/68aefPn7+uObVoYshjMMwDuM4DtF3iC0KgVrYWjNDpVTnaQ3e97HbjQPzS+pwOyMQjBADkTgQa0JsdkzOMyIaO7CWCN0+eaZqZOSJfeDzcnm6TNVgTkJofYRu6HaH3Zu3r9+8/epw++rm5u729m5/OA77McaetqRPRERmMK25LGtep7ycp8u6PkrBmlWbGxIgZ5nn9TLN8zSHwTuglFPJxSrknL6AeaKhj/EQ+1CKLdOaS2kfiVzWUgqYIgih9NVj4Bjjqze9KtVMoEZYY4yhDyH4QK7UWqZ5On9awFIqT08pV3MhErV4Ilxzhacngb+Ep48cIztCUxdwd3sYdn1eU5qX5XKOffTB++iAYy6d7yIlJ2YZLAEUNEOsjqHrML4s/tZ90xfC2daYv/bl9CVGwux5kk9bPt9LvNZzxW7bPcDMDE3NoAVNIDVMLW1832tlAYCAtCV+bagnQGwiC2gM7yZGNgRsYCxQA6hmKeWc8/OCIc/uds+vj3gYaucmKNN8lvqgtVoRLUXSCqV4Fa/qDbyAV/CGwZAB0ZAd+8sjx4DeIXvgAOwNSZEUrW5XQFOzWmtKa0opLWkpadW8aFlEVqkJLNW61i0hUlGJsCU3KVTFpv1VuN4kzaCUmtKaS/q7xd+URtocwqhtgqJqIlpN1MAQSGnVUtYTGZooE3cxDEPnXWRyOdfzeXr4/ERI85JTlnnJ07KIwufHx9PldL6cnk7nXDIR+uj7rjOzeZpE5Jdffvn06VMphZk7H8Z+OBx2Y7/ru96xb0gfomaHsjbvFdF5nqsPbZj0Ir2Etn8LEnCbBRgQbvJHx7x9OggQiZkNoLagbxUEZMe+eN9HDF6AqqJjZh93+/39q/t3X7979/W3h+Obw/HusD/u9ofxcOi6gZ1DdgiMCEigklOalrzsytqdT2b942E+Hu6Xc9FOa65pTWqQqzTLDxttNm5Vsxf6zSaRBHGOHKM3jwURRCoCaikGJq0zKWZF1JlF7wJ5Cw4UAKr3wceAjCDFSoKay7qc5ukyrU+nIsbdOLZ5DoMQmOparPrl5DoXYgihOdCDo4hAYCA1sedmqwcooEIEjV2DoM6z0Qb7r5qLvvj5AZ6fYVuwbS8wvLbe2le2pI1W+6gasbVQjOteeJ3hX+1betVfmCoaGAFtixy2RGZ4mQxcgZewceOJEACJwGBzyF1fer0WBnYl5714xgHI8XDY7W4Pw7jzXQfg1MyyNuYWECqgIhRBqIiihEZgBMDQCIZmxbDOsBIxAzkjhy32ErEt/lJrtfZKaC25lJzWnGrOVrJKllrajUVVpRVN1wqqTey3pmfbVJ/J8FhVWmn/JS3KeWIza5kx0ROYiVgttZQWk2HERN6B8fYiKRBCCygzpCo2z+v7D59zEgI+n+fzZVmXPM/rvK6fHz4/PT0ty6xaQ/C7/W4Yh77v+i6cHx+neV4uE5oNXe+970Ic+35/2McQQaHUqgJ2LRARgACYCcCaNDulNE1f+MbVSsml5hADI6mygiEiGTpq1NN2CgATeEcGgAakWqyqQS6atSgjeKfsgEPo+/3xeP/6zbuvv/7mV9/+6rsf7u6+ORzuQuhi7H2IHDomz84Tu7apmFWRXKUI6mWe9uMH1DA9puh3ec3LZXp8fAx9j+wACdERO+88qAqUq2WjrZZaynlePuk6G3kCYk+eES0E70Q6u2aFIqqKLUvKSTzHrhu9D46jD4GCN6hpeaolBwYCOp3nh6fzvAigM9RaGa0wQxcjIErKRaqrXHKpsUqsFo2BpTTFlkOEWtc8ncXmtCwghVtqBfrOuyqWKcs6rcvHabqRL2ywzx51U1VpMWfNaKPwXIlvRzk2sXnb6/G6/q8pVi+lxObixC1W2cyaErBVie3fNknRL0wBbfG3ygJawpKZbMb6bak8r/+taPjSCcN8GIfXx0M37mPXIwf2rslPmtUu5zWnXNaa1tL0fLXW3E6YWkspJZdaiuRqbYCIROzIBUMU0KJ1Tak+H87thxIR02padRMpaq0qoqVus6h2VFepUvVaNBHgBvnHhrbZ+iBfptu51lBpfGfHbApai21fRQ02QqQtNbExaWybrFjO9XKZcy6fPz8FHxlZjUqt8zpf5svj+fTh44fT5ZRKAjAf/DD0+/3Y9z0T1JxLSo5oCJ2LIcbYxdh3XYwRAVNOIorgnm992MhI5BCxOZzXdf2yJGsnv4kQgCMyMmVmw2YXI0IgwEY+ZnauKYS4moKRAmLojeIx4empIH9CLsO4u3v9+tvvvv/+hx++/fb7r959fXN80/f7lvCA5AjaxZQde+Q2N4leewMFghB2jjoT0gJ3N68fHz5/fP++mjnHIfbkQkunY3YYVAm85+sGBapa6jxPD2s+IcWhH7oukmMmpkCqThXUCIx084OpgJirzlfnHGAww1xE6rQucy1lWeZSqxoheRfavtNYkM45ZOeQqSkzS96akqJCyN5FMzUEcgRkOV1KLmuSnGotCdl5dh17YqdmtUsOtZZlt4stUPW6+gH0OaMctUlBt9uAQms4Xi2U+FzN/b8t/q2GaNDj9vXtY65gZG0jNrFrLs9VHPh/oz9Yi/29bk0bdGFrMeKXStjrXwBW8GrRoCMKXQxd9F1kx4CNIldKLnmpy1LmZV1zyVKKSNXaooo4NXF/hVrNDJCIHHtvgFXFSbEVudarUsFgqypRDbeBR61aipZSctYmSpeWLC5V1RAdt2Zpu/AbETQA32W6HG4Ozr1ck52KQoNGIYJu9QYAeOcNtzDUUgqoeXYOsQXbiWnKuUh9PJ9SWqRWNOecD3Fg5xRkWuaPnz9+evh8vlyKFO+D8+wcO2bP3NgGg2rzZhqRD76N7sAg53yeLlat6wbv/fb+mwGyc+QcEaLKM0r85R1qmzkbOEBkMiJjs80LisRbhGtTHQECYWQkJHS+292+UQj73VxW/4f//u9I636/e/fu69/9/p9/9/vfvHv35ng8Bh/URGrNJTdPNzvvffbaeReQPRJd91JjCvvdrf9VuLt79f5XP//p3/71D84/nS4GMu72oRtKVaQSPHsfUbnvw8viN8s5zefT6WElCHR3749jNQ/OATpAQDKyZnPlVqU2D0KtNVMGoC1QvFbT0tTsIjbsd3HcZzFVaB3Q6IkZ2hpCULVapORSkq6CErpIyMjN40RqdZ0f01ofHi5FKO6O/XgcDvsYOiYiAAa7u7t//fr1ePf10PcvK/8qv7XN1wiNmt+SlxGaeL9d1RFMcVuARFsdD/qFgBe+WMvXRYlk1pAzoIggVzzGNcTz+e9eH20dtar/5TKi22lv1kJOXtoE7SFVlqdp+njiSgGc930A5DYsYWTvmTyo1kHyKimXIiJo1awt/pyT1ooK3CpnfAmhV7MskktOKadcaqm1Vq0V0bz3TA6ATWFLKa61pLSuS1pTagz4VgttwWGuBfu1u69zTMRF6jRdvv3+29h1L4s/lwJgbQuowq03oAZIfCXVS/NWMKLRJrfSrIUQEErJ03TOKZsis49dT+wNdV6Xjw+fns6nZV0BzQdsEPKtoAP0RBgCmqVai2qL7zW1XHNa07wsJtbYLJ5bL1fNcMtoNb3CtV52Z0LwzIGYDRiMCFWtmFbRFr3qjJkbhoUBtDUAnPPOhf3h7qtv/gNQ34fPP//42J73bje8/eqrX//mtz/88NvdbgjBg7Ueil7vrRUISYm1KBC1dMpro4vZMbvYxZvb23EYSs2XaTqdLwb2zXffH293SzqrriIVTAMb8xcVpoGKaVHNFYGsFFlz0iqe2UXiVoxqS2ADbVZgqwbQ5r9iZrQspeSsWkvNJWVmHsZdF2NEqqpVCgEQA7V09C3gHTcFrFipkkomcoyOCNlHMLGqpazLfE4VhR35LtTeURUzZuIQ4+BC4OG4ez5hrPlfcq7eVWY1YzNibax8sBYwA+29bq8Abo3+Rq+41qvtiHreRL44xun6eP6OeEVVbrf99pSe/0r7MyRrEOy/vxQAGCI5JlErpdT6MoKVKtPT5fTpEcXQ0JETMXUrBhc6jpH76JiAUR2reiMmaxhNoQLobXueDrYhBbSMZWQDqKpmAyCKQJWS1jTPk0hp+gNEjw1R2e4QOa/rsqa05lSltkA/aCuFWvKaEXGLIHLsquo8H16/vffevyz+Ja2mGy2koabbq4PAxTSXLCpKhAot/QW2u5sgAjMZKBH44MzQDHJNNS1rTvOynKfLklKqhZnbJnqNV84MzhEGxwiRiHRNUmo2A4CmR0gpmVqTzWAMngnQVLWUXMwkt4Rma2y8bbERDSGOMRAYiZAjkTov85JSFSGEENgxIxGTc863KqTr+t3u8PW773/32/+sGsr8Bw8+r3PNyzB0b968/va7X7/7+gep7VZYiZR8O2sRCKlVM46JiTwgNWQLtGuKqbVSKfb98ebm62++McAQwj/9y+9i73766S+fP/88nR/A1l3vVMtLFWNI6rwb9vvI6LyPUmBZEjP1A4TQwuObX0FUURW1aZA91+oKS62wTLUUQQKRmtbkEMmw78R1kUFTmVMt66Le+WHovfebytMFQiRjNExLtkohdNHHEAdHaFKLQuiXMqeyXi5qVtYL+Voq+TAejl3nA5YwHp/nyaq6zOt0mVqQLlJjUhCAqciXi7/VZdtR/hyBuVX+z7UDXNt4254H2K51xEQNrW3QVEPbuOB5p2i/ef7oG6Dapvt6/oJt70BkYlUtOa9foOJE9HK+fPr0ueScckmpUj+sZhT9uO8P+7AfvWOoueRSS1VpA0bRUlLNqaTVRACAARtI3JCAHJJrMrKuHw6Hmxh7QLvM04f35TKnVFYSdOyJWsqrARsGdUQxOK5q4BAJCbZMNtPaYv0IgnchkPfeAEPH+8PIX5b9/+l/+8+mVqWaaCuQr7IHEtNcq4ERomMXnWe6WiVUEIwcE16NFoCqUEVLrWtKa8prybmWWiszD13XX1Gs3jnPzjMSkhjkWpY1l1pbuSWqpdZlXc0g+hhj6HxwjgGtpXC1bBVVRcDf/u63/TC0ZxJC//U3v/FMjftPjkqp57b4VQkgeGJmhOuKZcfku74/HG7evv3u9ZtfqfDXX6ff//4//tf/+n/M8/Rf/vf/8vv/+C9v3n3TH+/KkqQUs3rlyW+9EyRuka7tgPzi7G5nCyAgEIQYb+/uAWl/vIkxfvf9t8RARF3XnZ8Oquuu98ebr5zbdmUf+vuvfgfGKkhIznlTSykjQj/EEJjYAFS0iCjAtvgNkZkQmchLsXUVqYCMZlLSygh9jLELoQsKNq9LqUVVvHPjMPrgm6JZtUGk22aCjpxvV9vYMZOpxMNEw+Myr6pA5GOIAFSKoPP9/tB1IVDtdvd0fS5D3//zb39DCMM4hBC2NEUmADMVMGljnFajfjlWR9iuac89/2fApZqawtbrxga3ou3O1ZJtAZ7D29oG1OwB9sX1oQmIBLa6f5sKtD9FYCQ1raX+7te/7q+lct8Pv/7dP6lq38V+HIf9AXxcVcj7YdftxrAbHFNDSzcXISCYiWwYrZLantiko4Bt8TMiIztmNwy7481t3/VIOK/L/e2ry3ypUhGAnSPiLy4h1rKjTAXAiAgBtSlFVapu8TLee++jcwGRpOp3X/+qf76OAeAf//WPzw3U9mM9/wvXuxa8VGL4PKHd6jN8uU1tjZKrDFM3lTUgQNuaN2nr9X+F2wDGXjZdeK4sFFojefsbL12f65UPEKAfhvv7+xADANRa5stjyevzz9Yq4C2NdXPob7+u35+YidmF0HXdAIDzvJzP54eHzyKy3+/2+8N+f/AhtM/by/3x+updX4TrTPqLl+6lF4FgqqWUNpclothFQMhpLSVLLQbKRCHErj8wOwCQmufLQ0nL87dohvbWsMAtC2trettV0AbX1hfC9Qxsw5J2SQHgTQ+HANDuBmZGiMRMTazYBvDPN+QrOathONo31Stwqu1/jZnTWvTEjggJjV3wcUfsAGBN6f2HT/OytMiJ5/fg+kK+vEx/L6j5f/hPdn39rz8qfvHr5c/gi+/yxRd/+dbY85/8z18AgLClbgx9//ruPsYAAGldP77/ZZlnIiImZgdtQonI3D5LiC+f0C9+0KYvvPqTrvXN9bdblxNbRjARAaKq5Fza23T9lMHfP4mXd+rlvrh93+ePw7aAANDMYuxu9jfPlf+zWfIfj388/vH4X+tB/99f8o/HPx7/ePz/8fGPxf+Pxz8e/4s+/i9Dx078NJb3mgAAAABJRU5ErkJggg==\n", + "text/plain": [ + "" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "height_width = 32\n", + "\n", + "def show_collage(examples):\n", + " box_size = height_width + 2\n", + " num_rows, num_cols = examples.shape[:2]\n", + "\n", + " collage = Image.new(\n", + " mode=\"RGB\",\n", + " size=(num_cols * box_size, num_rows * box_size),\n", + " color=(255, 255, 255),\n", + " )\n", + " for row_idx in range(num_rows):\n", + " for col_idx in range(num_cols):\n", + " array = (np.array(examples[row_idx, col_idx]) * 255).astype(np.uint8)\n", + " array = array.transpose(1,2,0)\n", + " collage.paste(\n", + " Image.fromarray(array), (col_idx * box_size, row_idx * box_size)\n", + " )\n", + "\n", + " collage = collage.resize((2 * num_cols * box_size, 2 * num_rows * box_size))\n", + " return collage\n", + "\n", + "sample_idxs = np.random.randint(0, 50000, size=(5, 5))\n", + "examples = x_train[sample_idxs]\n", + "show_collage(examples)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 构建训练数据\n", + "\n", + "图片检索的模型的训练样本跟我们常见的分类任务的训练样本不太一样的地方在于,每个训练样本并不是一个`(image, class)`这样的形式。而是(image0, image1, similary_or_not)的形式,即,每一个训练样本由两张图片组成,而其`label`是这两张图片是否相似的标志位(0或者1)。\n", + "\n", + "很自然的我们能够想到,来自同一个类别的两张图片,是相似的图片,而来自不同类别的两张图片,应该是不相似的图片。\n", + "\n", + "为了能够方便的抽样出相似图片(以及不相似图片)的样本,我们先建立能够根据类别找到该类别下所有图片的索引。" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "-L3amT6BC1Jm" + }, + "outputs": [], + "source": [ + "class_idx_to_train_idxs = defaultdict(list)\n", + "for y_train_idx, y in enumerate(y_train):\n", + " class_idx_to_train_idxs[y].append(y_train_idx)\n", + "\n", + "class_idx_to_test_idxs = defaultdict(list)\n", + "for y_test_idx, y in enumerate(y_test):\n", + " class_idx_to_test_idxs[y].append(y_test_idx)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "有了上面的索引,我们就可以为飞桨准备一个读取数据的迭代器。该迭代器每次生成`2 * number of classes`张图片,在CIFAR10数据集中,这会是20张图片。前10张图片,和后10张图片,分别是10个类别中每个类别随机抽出的一张图片。这样,在实际的训练过程中,我们就会有10张相似的图片和90张不相似的图片(前10张图片中的任意一张图片,都与后10张的对应位置的1张图片相似,而与其他9张图片不相似)。\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "Bj3vLn2rC1Jq" + }, + "outputs": [], + "source": [ + "num_classes = 10\n", + "\n", + "def reader_creator(num_batchs):\n", + " def reader():\n", + " iter_step = 0\n", + " while True:\n", + " if iter_step >= num_batchs:\n", + " break\n", + " iter_step += 1\n", + " x = np.empty((2, num_classes, 3, height_width, height_width), dtype=np.float32)\n", + " for class_idx in range(num_classes):\n", + " examples_for_class = class_idx_to_train_idxs[class_idx]\n", + " anchor_idx = random.choice(examples_for_class)\n", + " positive_idx = random.choice(examples_for_class)\n", + " while positive_idx == anchor_idx:\n", + " positive_idx = random.choice(examples_for_class)\n", + " x[0, class_idx] = x_train[anchor_idx]\n", + " x[1, class_idx] = x_train[positive_idx]\n", + " yield x\n", + "\n", + " return reader\n", + "\n", + "\n", + "# num_batchs: how many batchs to generate\n", + "def anchor_positive_pairs(num_batchs=100):\n", + " return reader_creator(num_batchs)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "zIxWRe9GC1Jt" + }, + "outputs": [], + "source": [ + "pairs_train_reader = anchor_positive_pairs(num_batchs=1000)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "拿出第一批次的图片,并可视化的展示出来,如下所示。(这样更容易理解训练样本的构成)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 153 + }, + "colab_type": "code", + "id": "XeAYtBaSC1J1", + "outputId": "f8e3ed0e-3f91-4e07-d83a-9c398d59ba59", + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(2, 10, 3, 32, 32)\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAqgAAACICAIAAACk6Y4UAAEAAElEQVR4nLz9SY9tWZYeiK1m732a21j7OncPj8iISCYrCRaZFAiKQAmoGTWQoB9Qo5qppAIkaKKJZtJUE82EgiaCfoImGqgGVYAkosgSk2CmMoOZkeHh4c17z9rbnGbv1WiwzzWz5xFJZhYEHRjc7ZldO/ece/Zezbe+9S38z/+Pfwru7g7uDg4ATggAjgino37n9R/uT9+724vXPH9fzwMOCBAcEcAIDNzAEIxcEdydDciRHPB0PgRwcEMwBEdY3sjNHAAB4fkLRH0u+odfnv0n//FPfvx6BQC//OVX/8X/6f/yr/7VvwkhxBhjjEQkIu5ORIiuagBARETEzAAgou4QAiEiAJi5qjBzSomIxMzN3CyG2LQtERURUTVVBydCIkIERiIic5+zqKo5ICITg7ubAgIxu3rOeZqmaZoQoe/aFAIgFpE5yzTnec7/6I/+/n/+P/9Pf//nPwGAmw/f/j//y//r99/8qu/7tm3aGJnQzM3MwSJ53+A4Hr/5/vs/+8tv/vmffPXhZtclv+75xxfRzP7qw/FxVAoRkcxR1HIRjs367OrLn/7sj/7oH6bU/Pkv/vLm9sZFA+GqTVebzbtX1+ebVdNg1/N20zwchz/+xTffT1He/Ozq7Rd/74tX/8EV/0H3cNkWDwkcoRTNc5nGYdgfD4/zuLd51PEo07H/7O9+/k//k+7qxwDw8cP3/9V/+X/76qtfOIoCiFHgZt2vEHAYRynFAdGNQTfr7t3rKzX/+vvv/upXX//iL/+ybZt/8o//4edvXsk8fP/+/b/+01+Mc/nRj77sVt08zdM0TdOc5yKzEnDbdpvN6urqYpqnr776lVv+8Y+uf/KTL3/8499/2E3/9//qn3///uPFefejz9/84R/+XNX/zZ/8+Xff3ZRsq9X6889/dHV10bYJ0IdxdLMYo6kejocfffmzf/Y//J+8ffcFAPz5L//t//6/+D/8i3/9L1dN08QmxGhueRjLOJVhJIDUtyEFdAczVItN6M76dtt3m46Q81RkFsni5hYBA1FDSGamAEDEjlgYHA0YiIEDUIC6lLSYzKXMotkt03zUw/2cR3EFAMDgYK6jWTEFdwRIRExMgQmRoGgZh+Gf/qN/8r/5X/yv//D3/wMA+JNp/N99+Ob/cTysmGNdyoinTY+nY9kaRPjCEgAAOIKdbAUBoQM6gAM6MgAhAqAgOAERoAOru5uhGYChIwCbB3AmJzACJ4BgSEDk5AACPpayG46P+/3D3e0swl0bVn3qe2eaRP4Hq7P/7Rc//fv9GgD+xb/8F//Zf/Y/+xf/zX8DALBcKhBgvRMA8Gq13OHfdeByYwBPn8Oz9UMHMARnxIao4djEaI7HeZ5ExcHAER3A6/uaAyOmxGer7tXF9nq7vVxtL1er8y4S4DBnJOq7lqyU/c3P/sM/+h/9T/9Xn/38DwDgw6/+2//6//y//PBn//Xl2VnX9cwIVlSOhJjSJVMvxmCz+z1TbmNMTIE8xdg068AtLQYOOFAIKTATgjocFR9m+nDkQVvkbbt5c/7mZ6uLd7HfNv2m6dZNt4pNz01DIVGMTMnMch6G+68O3/5LIrv60R91l78HvAWg3/nxuVueZgdPTUPEADAM49ff/GZ/OHIIhASEoprnDEBNm8Ahl5LneS5FVR1czWcxMVdzZu7b1EZuIoF5zsVVHcFdS8nuSkQMgIBmkA3dmAADYozODE6o4LOYqKmDKQggIjBhIGjYmxTatmtTk0JkRDB3cCMvqsNchrkcxnxxdvZHf/B3LrYbAHg8lj/96uH7jw/z+CjTznQ/T3fH/Qdg7C5/FLpLxYbTpl1ddutNt95wSnVZO7k9+0QHADcDAKw/wGXfIAA6oAGIm7mKm5maG5hgAXBwRGSiAICqhkghRiZktIf3v/7LP/5/5eP+3ec/6i/fWXftYYWBERnd5lx2h+lHZ+Gf/cHlu22qDyv8jqX/Nz4QX76+bpuXv/13nA5Pr0AE/K39iJ/GGk9G59nxAz7vzqejBjBwslu/dbH4w38jIgIz118hGgC/tHmG6ABmpqru8HRydwdwdwNAAwdz81PcdLo4B7f6kvrr+rDx9FaIxAxIDuQADhBjfHmFRMiEP/gMHdzMDMwU3YwQmZlDRA5qRdTVAQAIgHGJp9RMzQCAmZumSSGYas5ZSiaAbt03MSSmEIOaqhaEQECmWopmhWwITuo0GR7Fj1l7skhAiECExEhMvHwBM8ToGpnDpxeOqiBmRVWdPJK1FpgDEYYIiIwY2fu2iyGBCiMxUWSOTAQAbtWXMTORqqqb1/jMzBEokhNwjImIcs7zPKtqYEophRDcTbW4CYHFwDEyIhBBTLHrurbBvl/F+LwRArMhAoCqqaqqfrI8GTEQJ6bAQODqQAB0WpX1G3dAQDr9Ex3QkYAZnMEZFKF6SA6IxGgO1fETGLkzUUCO+OT4AVyLcQSKqJNrAHdImR1cM7g7srvaciUOyzdcHxEhIoFhYGT+wR7/rT267DUEwOW/n/zi5UGA1U0SvnjB4j2rCwR0CAZkwOoOpgiKoACATp9saXw6hwGYQzGtjzLnbEQYI8VIzPX+GOCHd/L/vwNP+5fQgRDpr78Sd1ezUrQUVTU1MwMiICIHyCWTFf/UkLqDqBeBopbUEImWNyUANEdRcHUABVclUnBwZwpqiISqgK5kyobu7hYYQQzmAqUk5KZp1u36uj9/t7n+rN2+is0qtn1supDakFoKCUMgCoB0ym3IgNy8FAnzjGFyZzM1UzczNzczUy0lz9M8DLFtr99+ntoeFvsDYoAGQIslFRV3xIIIqKYGy8en5qIq6mpugAwObuAI5mDuKqYKCA625IRuCtVqoyM4mgMqIhHWV6iBmKuCArjXpQrV2JqD2rLB5Xmjghqouaqpmpm9jBTdVcpY8lHLqJJds6uAGwIzGGjJoqTctJtPvOBvrx34dLngDzdkDauQq3sEdCQKXrNsZCA+uVZaXrGYmOW+AJCZIQYgBEACZlaiH/rDgEiADn76qm9OJ7f6Ir9/Sgaevn/xufhicvD5XtDrlnZYrtRx+eCx2kVCrBl/DXrc4RRiO6EjgKMjgD27+nplAABUtxw9bzp3NzN3I0JmevL9vvwtnrKWF+kMIRFVbMDdVbX+CTMTESIqgIoUEUCszqz+HWDd0U5Ejq7uZmDm5suDdUB3LGKupoTgrioAVq8LAIA4Nm1D1Ji1paQp931f3Vy9wsAca0hy+ovq9VUFVAuAizBiTE3qVpzmMulcLBsEhEAQCZxA1Espoo5ITUxn2+2677XkcRrG4UiEb9682a5XWiZUKSpTHjerLiBqtmnIuwkOJTQeM8R7xQ+TnecSRS5Dww3X58aqQVIIUWNydGdghtS2eLoXcHTHnP1wzHPOBr7qddN1ITDG5BE5xBRD38Z1m1ZdM0xj5NBE7pvYphDQ0RRMETDFlNVFRETapmlTSjGZKHhAIAJQ0/3hcDzuSylt065W667rzMo8H62MjNK3se8SESLhet27Uwp9Sm3bJiIEcEJsm1ZVcymllDnPRXIN2pZ1FJFaDqs2chQp6uYBMSCSIyIFREav7puRAwIZkCE5BQun3eNqRo7BY8scSAUAIIToDIAK0Zs2hMQhAgegCABgZlJUZpHJy2zxoJCQ9qUcVIsDuhaFbGZgjs6AEShgiMQUkMgF2Q2bgE+7AGr8QwREjogITnjKkmmJyU+BajUwy4ZyACBHOJkmhAUu9JMFMHQCD+7RvDVIBmTgAEJQCGZ0RQcAdmAHAqxZck14HFFUxznvj8f7+8djnowDNil0TUyJEcHAgAK+MGb+aYCyAI5/kyz/+fCTIXn6fnEwL+/6ZAGIKIbgDoEzq2s1n3XjAyIAoQG6u5cix3FqQ1ynPictwSiGEEKWsj8e0EpPToGf7sUd5mzDBN1UQhCgELg+i2DApjRlUDHyEjmTgzMxuXuASIJkZu4z2cjkKbUxcADMAo+DDbShzdX5q89e/ejvnr35O83lz7k5I2LigETEjPRpNIXIISAHgaBFj0MuuAfMIjoNhzwPeZ7KPEqe8jyXaZqO++Pu4fLtF/1/9M+q4zf3qdhUDAkCuJuUnKdpUvMshYgB0d2Q0BGL6JRLVldHZmKHGlu4AJh5mVV1CRKXDWGCLkBORE5IroCGpIEcCdRVvRRXMUBwIK+piqM7qENRx6LgRQVCCNVliFkWm4vMuRRRU39aP6ZlPNwedx+szCYj2KBlAhcGDK5Shv39SGnq+w3Ztno1OAHXiKcFWk9Gz0nysuOevCuB198TgoGZoSNzNEBXByCsHsRO50GozrCCaWoOhE3bUNuKKAAwk6OHaaZPnf//zzJ+/+R/8LRv6t052Ivvq7cmRPo04/fnv/zdGf+LK/xdGb/ZEqbVIAAAatB22sb1tMtviV441U8PP0EHTzihqgJgRasQHbzGGQ5g9eLdajSD1fiZgTsgUkzcdW3XNCkFYqqFCxFzcEQCBHfIOQMNMT1n/ATATIGJaPGf7mBmaiYiDuoE5BoIAzNxNGQpmlGyJGJHgIBgAApu7uYeEJg4hkhEOc9ZCqK3bdpuN+fbTZ5THo/5cCilEDVEPmfZj3k/ln2e9PExpPYj5nSALjw2Z7Re9alFQEY24sAhhhjdkgf0SB5D6vrThYOqHo/jw/3+4fE4zxOQyWbqU9A851ncKTRN17UBVxqjqoNDIEohdDFGJlDVUswMAWOMURQACKltWmZimkUUnAiZmXPO825S1RBC27Zd16cYRXKZB/Qc2bsu9l0bYyAOl5eXfa8pdEQBoEIIkYncfJ7ncZxyLiJiZs+LGgGYKHHsUkAuWsytBt+O7miO5qjq6u5MbOCiUqSICUHdoQ5YkwpDB0IgPuXNhEDAhBSoSTE2HCIQOydCBEdXdUlSkubZOCo3IbbzEXMeSl30xIgMoADkxEgBMPjiviuCRJ9kp/T002Uj0AmPgsXxv3g1nRxtRbGWNN1rxuHojl5BeyBwAI9gSa0z79WTARoWgBF9QAeCTOjMhEiOhEhAAOCI5qDuk5TDMO6P4zRnBaCmCV0X2hSYQt1aRJGZ/nbm6m92/PY5l4KmQ80eTpFS3fkVmnI3MFD7YZRhZkV0HOcDxX0cWg4dUSCMbVK3KWfJo6IOU9bTKjPHLDRmPEyOrAqUgrMjE4GROmVzVQjm6FbIALCae1RmCqrihqzG5I7qTkY0Fx8nnbB4qx3Hdnu5vn6Xzt8BpvpAzUzVrBSr2a6qmapKno+H++8f7+81T1m/Cc0AkIroPB7maZCcpcwqWaVoKWWap+FQ8mynErCZz6JDLgAQmdCklFJERE3MiZiIkJZY1BzEPIuag0MIDGZqimbmqirZ1CgQIQGC+YK4GgGCs0mFggRwnkAc0ei5aAyfuJSa7lc8AhzNUB2CB0QopnORuUguKqpm9uJRyjzeT8cbMgUrbqPJ6JINXOZjgSLTEIDJC2PFKvx3LKaXTuwpJT19X90HEoI5upEBLHF8MK8RC1S/z/QiG3dwRzUXMzF3wBhjaCIhmDszq1uo+OqLEk2owOQzSl5Pdvr2k9rY7/KRL/fH039PfwUAYEAAi31wIEA3WmKcl2yB59OffuCnM7wgEgD+7urScqmLOzYVKe6ymCBAcKtFTIAKipiqEiFzAIBSSkXvTw4eAKDGR7bg8/X05uALiFnBXHDVerOOSEhEgI5u5sUE0WMM59v1Z+/evH39+tWr6812FWKcpvnm5vb27v7+4XGaZkQchnGcMrxYJ0tOUUNiQHMHd1ET1aKKaORICAmBwUQ0z0XnSUxLTiEyOBISABA6IzoCI7pbznkcx3HqiHC9XqeUAhEAxNSqiOi9CITgwHgc9GHMx3E8Hg4iuTy8nz+c6bYJZ76yzWevztany0RCYg4hgjcAASxBo6lfIy8xZSnl483tb775fv845ZIpyDw05CUw3d8dSlFu281m8/rq8tXVxavri8BY8YkUA6FpKWXODg6ATBxDSDE2KbVtS4gqKqKl5MCh77uUQimzqjLzar1qmhYRyzyVPDFZ03DXpa5ruq5r2p64m2eVDGaOCCnFvu8AcDwM85xLLjlnAGB+seYQiYADxxQCIgzmpgDm4AZmpsUKqakKuROAIcCkGiB0CcjBXIuUnEtWcQd3bQRPgSaqACExBuRQOBChOQYgRwoETAjgSEYag3Af+p76lGDauYgUMAImVEQCdYDATuzg6nXvVdTE/WlvI1YuChMR05JJ4IsNhmiOJ7RtSfiXvHzB5U6bk9yCeePauDeuLXhCb906k85s5cAGajSo3amAW2aUGLxpMAQExiXgIEBUt6x6nPPj4TBME4bYxEh9H9omMDN5qIggYAz8dLUvw/dTxPLEQ/rdGf9vR/zV/7x8/QmhfP4ZcQVDQMzGOTMCEzUpohqqzm4VLEBYjJEDiOrkcMAhIrNDi9BG7ta9kRvgcZyOw/7m9q6UcrJjpNBOSvsRFWxWayM0DDFA4EqSImByYAMWCOAUABS4OKsHdUR3YkMCg0ahIU7G6nDMxcfHR1ndbfdDM0yrbgqRyzyUeZKSS5lkyvM0jONhHoY8DfM852kq010Zfg2WQ7qluEHsHNjAwIEoEHMMTdevQ0wcEjGfXb3p+vWTTc6lDONkUiJTpFr1RHBSdTUhJCRiRvNq8dhcVA0RRNgDqmIBcRUVQYTICZlruRMICLFhDmYpz6QqBIPIccqjOlMbYtM0bQhhybqXmNbdwaDmUu6i5mSuauAIxWTOZS4qqrpg/aes1DRPuzLdRyZGNZu1TJKnkmcDBO4ihC5hFzGSg4upAjGeFgQ++fnT8eS5T5tswZaq43EHM8cFRXZHMERwp9Orq390BTA3M1ErYqLujkjITBDQDACdEGLkJypbPX6Q8f/u428fWD+59AXvP3nzmqdzfQLo/gwKfPJu/iIe+Nu8f603malqLZXU8EJNCXUhMBEAWCUaVcPhLw5VrRh1dfxPP69XUx0/VAjPwAxUzeuSZaieG5FipBCark1nm/WbV1df/ujzt69fnZ+ftV3jiLvH/TiN97vHUvIwHt1gGKbj8TjN08sA8wlSBEAzN3c1E9UiQqgWEExMiquaOTiwG2qZp4Lm7kC4BBJ0quQU0WEch3HclLJe9ddXF13bbTfrpmnErOTJHFTdHXLRjw+H93e7x8MwDKoGkvN8PPhjjENzkezn78636ybEiFjZkhxiIqwRtCF4aLqn8LKI3N3f397eScYYebPqz7arVd/N8/z4+PDweFCiruvv7+7v7y93++vNuncTROq7zl0kl3GcMDAAppQAuU1Nk1KTEiLmnJmz1Jp9ZABumsbMUopd1yFxEZV5KnkKBKsubder9XqdUhNCahpWmUcZ81zqXgshgPswHA+73ePD/f54UNdpml86A6a6jrzmIWbGiMREgc1c6yJyrdlWDRdFTdRIHc1VvBTTYuauZJoVEb0WSsGBGZrgyiakiMhgCqBAAZHJACyTiVkBJuzaNgjuAjI4IDqSERmRk1HAJgWODAJozghAlMJLRBlgyV95qXjRS2C/Lp4nvw9PwD8A1JiyrlR0j2Y96BpsA7Jx67X0pq1bYxq9RLUkTkaAaQTs3BJYcN+BzUyKAHzy+kDmXkSHcToO45SzOHBK1LbUtBxjQAzoEYjcBYGRf2A1/jscT4ybusHhhTGquQsTRQzLNoT6fF3dHVRFAlFKkYkjuDsq1VQSns6zoI4GI5SI0zrGsmrN1cEAXN2HqRzvdzcPexF5uioxzAKUQcBHsSZiEyAFjyrIsymDGWsImIRigpAY3VqTgEAOxMBMgYiAW+QWOBWWQpxNhpnK4/TNNx9H/2rzoE3qNU+S55InKbPkUvI0zVOZZ5WS8zyPk+YH1McUgFtPKWFYEbcUiDiE0ISYYtOktklNl9ouNm3Tb1LTnT4EK1KmPIlgZEqBibDevJqBO6ITmTmBIyASERO6YU3giqgVOc5DmcZ5HEII5xdXqescXB3UEQCDQZgL7w4wTUA+5/ywOw5O3eq835yl2NSMj55LvW4OBmAGakbo1YwTmaNnlSKSs2TRovqUBwKAmUo+5PmRYgB0laxl1DKbuQNw1BjXbYCARq5uAiZgiPycZvsPHNpp0T3x2726RQJCdzNaPqfKoyBfeE4BAZHQ/fRJgqGZiTzBk0TEDOhkZuZABCGGEOjl3g+wsMBe1PCWsvzzxniJe788Xtb43V8ABXAKroBgIa9V9+/1ESAQoCNapQb7gsYgACAZ2CnXfwE04IvrATjxAj+9KHc3dREjghAIgCpCXqNpZg5MHBARzAwRRRTRTuSAGjQoVq6+M5yqArA44OU9alhi6qIuagA1knU3IaIQ43rdX55v37y6/uKzd+/evXnz+qpJaZiGx/1ut9t/vLn5zTffffvtd9+/f7/bHUrRecrjML19cyVFnpdIvXlCJDRfWGaqWqSgSyZAnadpyPMMQDGGGCm6D0OWooBIiKJec1lAFLMpl/3h2K1HN1/1q/Ozs+16s1r17nAYhvkYgJKCquI4T19/e/Orb+5vH2T0FrrePOTDEWaN1lx3+P2H7VkXN+eblEJlKcaUjNBdwY3AOTyXLUTkcfd4HPbb9eXbN9e/93ufvX5zuVk1H29u3n+4//7j3ePxqHbz/sP7b77bvPr28tX15fXVRZN4s926llJkfzjGpjGH1WrVKsQYUogpRESMgSVE6pA4EIEDNE0AaM28bZOqj+OcxzHPcwzUdt3F+cV2c86cSrFxmO4fdjcf74fjBI4phfWmQ7fjYfdw//Dh5uYwDA7w+u0XT0aZEJmIACUXV5O5uBqlRE0CaUWKE6q7E4KfyI+BiKMBqSGqq4EqmiI4gqDMBi516bkBpMgxuTMUYAcMQIqoQLMhkptLVhHRkpuGY0Q1gqIuxhAcWEECIAWOTVivOoqUB3HxQCEBAnvbxOeVjLC4elqQ/VO9HhYaDj6XJpe9dmIA+JKdOJp3aG9BX1N57XrhurG5yzmWGU2KS8lmg5Knrtl621206YL8TMtHsHspR8RM0RAB0RyK+jiX3eP+OIxmSKmhrqW2oRAIiREjQuKA4GBOUIlAp7t5yv6fg7S/trq/1PhOxb8nHgO4vcQJ3D0SnfV9l9oQwlzyze5xKsUWzpJFZo4hEBJCQIyVKQFgjgjugBUnNLCiWlTUlRmQTCTPRURtyvJxN93thnIKGdxdVIoaqucMmgEJEmEImsIQwhyYCR0tRObGQptS16RkbZ6ZFJhjDMS0Qk4aWg2tcphYJ4ozqUI3PdL9n/0qfnW3Wv9Jn9rIgcxFspsjI3GgmGJq+/V5BzYeH2WcoTSb7ebdT//B9tXvh+6a44pCRTj5hExW6Cj8wEW4u2jJZco1hIoxBCZkcFCtG8XREdXqAwkEXQyFVM3NfM5SxuPj3fvd3e3+/r7rup/81M+vrzlGJyoOCM4qthvk+w+6302guznfPh6n0MHb2KSVrw0JAiIjVMcnTmIuBmbmpgggoBX2cvBsKqpSpKgUkSLyXOhzFRnLvCMNhGAqtcwBDlYoUIiNRXJUccnOBTlRCO5IuIDX9rQgPwGoFsob1IQSltcyGYGga6WHqZipEnLTdoEjOjqgAgphMSUXl2ySzaw6WyZgRiMs6syYEsfEL/G8v1nG/+8B+f/df1tv75OfnZoFf+dp8bd27F+P7396LAw7gyWaRKyEOFFdCpPoaIQIZguAX6/DrPIDVFSJPATHpYjv4F4xAK/1ea8Q0fLljg6gaowUQ+i6brPZXF9fvHvz6t3bN5+9e7vZrMH17v7uu/fvP378eHd3f3t3d3N7d393f//4eDyOOZc8ixSZ508yyxqGnO6r1uBUVVSkeJlm9TIfhjJkFQdEjAQsNs/FDLqGiAjVTyQnVAcwE9UFwQKocZiKmJmUqeRpmmdnG2YZp+nDzcPN7cM4sgSap2wAbJQjZYiD+N3D4eGsa1dtSgEWrD8iotvi+Cmkp6qMuxcpSH52sXr32auf/OSLd29frdd923W//NVvvv14o4fDYRyP03gcxsPhuDscxmm+vNis28Acs+SpCM5CzIQcEnNgPrE3A3FKETwQM0cydWYOwVRd1Q+HcSSV+ZjnuWvSetWnEPJcDsP9YZgfHvZ3d4+3tw/DYVK1lOL5+arvUgzA7MxgJvvjuNsfXmZjAGhmcy4gpqoVjCEgjhEQi6u5wil6dqx1d6x5Bp0YtOZL7cjURMBd6tOmwIzogFMRVGdF4loAR3JwdS0mRSUrFi9BddQyicyCRCoVr3SO1KTQdQ0FggxmzsRA2FFq2/RJoY+e6X0nIv+LXQiIWBvznmh99VdLwB0M1g6vHH/s/jnAa4StlXg8wm437x7nPM7gx2M+fBjRm/PLN6u3b7afv0qrNsrUqET3W/Wd2oSqRNlsmObDMA5jLsUxxpASNg2lGJgDUkRMSInIzbPJywrg38JGvLi/080tRopO6Q0COC3NgKuuu9xuz1bbvu/2w3E/DOOc9clIuWe1imEGwtrJJwsYAgpP2OXS7WhuRaVIziXVFjZRnMQndX0ikbkX0SweI5rRPlsxYPTA3vCcIsVAgRkBUwi9B6XWYyeaWBHFkSwGyhZSjAiJLKCEOcOg0bBJcWsSHm72Cvvjpj0/u7i6ftu2q2ANInKKITWx6Zp+3W/OzfL+7ptxN8nQd+uzs1efXb77EaVzomQubmqqbmpWSplMVbWolJBSv7kKMZ0WS82S1IxcURECIjqompsTAxrWTrUKRadAzJh1MXrDNH24ufv43XePNx83m/XZ5VWz2jTIEKg4gBm5eskhzzINg+apiJohcVyS3AqsABIx1vI5Gribq4Oqobm41oqlAxQ3dbPaOfQpq9/M8jxMw85iCEQI7q7gjgiMFsgCGYNoGXUekRsMDWhAqi2xJwIYAgA+c0Z+pwus1AUrqBOW7PMswyxZ3CxwRF1z0xIxAisQA6FjMEHJLsW1uCsBMAEAooE6cKCwdK0/v0moDwfdTyW8U/UOlwX7fHUvvv/twtmnsQE+/Q8r/RcMwdBrNTTUMvlTVfsT+BEWQuNvn+3FRfyOw9xNXdWr0V1gHF/I8EtJZwmq8KmDofY7EZA9MwLBDHSB+c0BeIHs3MxNTMzcATkAEjK6uYgGDpv15vXr688/e/vZZ2/fvXl1eX7Wdt3j7vEXf/5nv/yrX339m28+3twejsdpmqVIXVUnelTNu55t1/LWXuGQWrtQPXWZiJRJcp7zw2iPE04Kag5urq5uTMaAhIZUSdIO5u6ESDElDkHN9ofjME5til2TApHqvHt8eHy8SYEej+tpmve7/bQ/gHWgOD2Irc/6y9fN1fn6OqUN76f57vHx8vps1bf12gH51J/hhEAUnrEZwtSEzdnq3WeXn31+dXmxvbg4v7q6pBA+//LrDw+7Y5YCaKZi/nA4zkWGcXqzv/ji7dWmb8RszmXIB0LerFdd1yIwAqgqETFxExMHJCZilqwVzplzMZXhCOQFdA5km9Vq1a+mYdodvn1/c//x9vHhfrffD9NQpinPubRtev364ovPX//8975Iby67VUffvL/f/XoYRn2RjWWzSRSgkLkBOlgWYcCIDAyVmlTj8aWcCQZQMTnzGvESAoGZAYJibTUyAzUAQuUAQJrn0Uw5cmCiSITEbqBgYpK9jAaTHWyepzweyzSJAWixMhUEb9sQGg4pEHOIqmZggIRNl5quee62eO5ueWK54ikeXxC7FzDg8050RASIBhuDt4JfOv0+8Ofo54SUfX97uP3Nd9/85qv7/U6IdnfH97/44BZf//inn/+DP/z5q8t1t0aLsRSaZzQvWScrhfFQysP+OAxTUUBOISVqk0cmpoYoEibESJQoZDXJJZs/celeQn/4VJn7a7KVhZnydNNLXWP5OSMQUiW0xxTPVptXF5evzi/OLs7vd4/fffy4Ox6X7kxEBZxEwbWPXLtEUQ3ARX3xeMsOQA7MHNThOI2HI1Ns1NEUgJhTG5oOiZ/W2Jy9FIhrNqS5lP1sABAI+ohNVGZmNkZuU0COpAm0LRjYxU2KZqLQdxYjYDCgLBZU1crYpP6s34hGGe9n9yZ1l28++4N/8E9evfmitkghBWRCpBBiSO003N98rQ902NsWuFdIOYuNdyXP0/iYh0OehjyPecrzNOTxOB13w+H+6t2P//5/9D8+f/V5ta4pcJNCUXBHx8p/VFw6kpfHtACThIExMAcKHKv6SM5qj/vDzd394/29qB6Owzxnblt0KFoRWk3sbR9F2imrtmn7akvry/OL132/DgReRMCRKCAwRiJgWCqh5uBq5oayOBldOAAOCAv35cm/mE7Hw3H/KCmmFJuUqu9gorYJMTKTg+UyHTkeUuwgNi68GPfa9YxYWWfi7pUA9FxEA3BEx1r+RhMok+WDT8e83x/uH/KUmSilltcb7PoUE1FkCkAxcZO8sAuqmBQTQXRmcEBwrLyQ4BTCk4MH+Btm/H+bYzEZsAQHBlrAFSxDxYExIKphdCJ/Cct9ctBL0t/p+Gtd/vMr/CnpdxczREB4ypuf2xVP5Y36+hMWQ+aoCuZe1G0hCy6pAaojmqmpmJgCUCREJgSKjIHobLt59/bNu3ev37159eb11evry65r51xub29/8Yu/+P/8+S/ef/j4+Lif81wzsBhCiPHJ5r64tqd7qXQFqymmLXfl4G7ms8px1rsZHzJMU5Y856IuZqSBGYEJECu2ZI6AgSnF0DUpxSAqx2Fw9wNhE8KqjetV7Bs6W7Gp7Q+Hu4fD7e39/uEBeHKKpXIYPvtpf/lmc536VXF/WCivvlg2ICLgSi1AAKDn+iszrVbd+cXm1avzi4t1TMQMTddszjbXr69fv71+PA6KnnOep3ke53I85lJMpQloF9vURFE7HAdwYKYQOIRYPxMA5FrNZKhNxxaAmd09T9M4jZpLIOsajn1KqVPD77+/uXs8fvP+5uZudzgc57m4gojlUtbr1eXVWdf2b9++2Wy7tu/N8e5h13ftU5ZsAGKWRRAgLDRP0JzJwYmgpgtmNfpBBgqAbEBqIGoAiqpa8X6rRgZt6aEzdDFAIzZAN51FBSCgM1JwZgcAx6WYld3MByh5lpJBxMWLFiulMKE5G7i5LaIz7qZK7DEkjvQJPRYRaAFrX1a7n9NgXFK2T5p2EMghgq/cLsyu1c7dNuh9wDzb8Xb4/te3//bPv765v+eUDvfH7/7sO/XwICG/ulzvd6/lnEAb8HOg2WxntpN5hnzM+TCMc1bCEEPimJijEzJCQmgIUpUkWPJmk5eMmL/9UUuP1QJV40uAhBSqCwQIgVOMMQai2suDTNykJoUoDuboWG2FMhDFmAJFpiDqULnVpxZpWphNBpZF9uOUAmNsiYK7Bw5N2zSpeV5jBrl4EQAMIcQQnLJOxRQhEVY6twNiCA7RIYoFzcjiwdVM5qJIlpVCAKACyGIB3RLODKHkDMB9TCmEuDrrz6+216/PXr+pe1nNVYuWkqd5ng7H3Yfd3feP9ze7x8P+KBr+vPv2vUw5j4dheJyH/Twdy1Qd/5SnYTrsDvvbL//uP/z5P/qPz5+3P0YmBLbagO9mYr40qgGRPzl+JEBYuOeBCBHMCdAl53mcpnGem6JFXE9IZm2jIIcQeL0yRsgpUlx159yftf0qxoio7qKijoZIAQiJxP2pdWGxsU8tFbzQWADAF046nJ6LTdM0jgNhGwI6MFLgwFQL8lbYxCSXeaA0BJmDZhBGckQm5Nqi5dUouz9xB7DSsOyUbFddl1LKYV+Od3K4nx4fDvf3WqRrO+56gaw6WWqQowIZJQ1dkOmsjWhdl0JgYgKiytkHcmDAEJD4k0D4lJk9b3t/DvifXPjTNT5n37+d8gOcyvinw13EyuBlwDKAFXAnShRWGFfU9BZChT8/PQ0CVLWH3+nq/3r3vzTZuZmKVIo9Vn4tngoobg4VfKnEEofKlq/+UQ1F3dUAhQPXqyFEczA3QHR1czBHqgQaCuDQts3l+dnbN69+/OWPLi/Om8QILloOx3J7+/DVV1//6qvffP/+YymlCgbAiTlYCWWItfRQMeOXnAk3MFEhr+pAJxwS0RCK+iHjbQl3s8/HfTnsDnMJapEhBQVrgIObgBu5B6IUuG/Tum+byJLzBEDEgljmnMLq8uxy3Z692/Ld/cP3d/uvvn7/7fsPdzcPq0gcklDj/Vnb9evLq+1l2PbzRrTr6op3WDpWqepJVO90go4BADjw9mx9dXV+cbFpuzTlYXd83M5bQFtvu1evLw/TyImOw7DbHcxtHuZxnu8eHgNqKfO7d2+JWSVLKcPAMXLTdO4o4jEChwDo6qLuEZG5yviM0zTtHndlmrombVZXTbcGig+Pwy+/+u67j3cPu3Gcs5lVBUZEDInW29W7d+++/MmPf/TjL8+2fdevCDkXefXms7ZtlqdiLqJFNFSMBlHNp3l2kRmREIzBwdAsBI4pppY8ObCqZxW17DqbleJmgLVb1zEABkbDulAJFAEDqENtDXACYqxKaGByYg8qzLNX7goiuxZVUVV3LGq56DhlYi5ZSlEtRkgIZGgvAfvfyvh/4PgZnhk/y3+qDyPwhNaAdC5Bi5Q8IrDHw+Df385f/ebwy7+4v7u/W616ncs8mrDdTBPtH9Ldd483YR25pRg9bJw2Jrc5z9NwLKUYOkUKgWJkYgZAx+jegDeADaMDFMk5a1YpnyKx+BybnDjDn9B/4On6l3/W8l19EDVsPNWrEbFiZeY65en+8Jil7I7HuQgxr1frIZeci7qbKYArISO1HCNhZhF3Ba9GxRZqE6pKzjCAMToiCXAbGwToEq+b1Kb4lFu6QxEoBdRiDO3FOhGVu93sbimENiIThhia1LapiSG54zRlN4ioDi4GiC5aiKx2FBNwZGCWQWE3fhfTxWZ9tjq7wItrblbDONx++CYPu3k4zNM4DYfpuJ+Oh3k+DPvb/d13x8NuLGLq9Mf/xlVtHjTPWUVKFsmualKLnlRKmefD6vqLJ65SvXsCjMwVnxHzoqZqblChV3ziMzHU0mmISAGRPDI0DA1CgxgoMIdQlTKocjqQANEZYqNn57TZdO5sNHpwjBUoDQEdSFSyewFhhdpkW2n7Kmpam7KhOj43QFo4LOr+sj/T1PKU81xWq54iVzdCgU1tnEcq0mPCkJ1HzpPLjJqRGNUJYyAMEQgQENQr9e20sV6634p3m0ueh4e74+03492HafeQx2PisAqXwYLPg6G55wI8Fy3OTg3l+bPrsyvd0na7apsF1UdD9CowVaPZ34L6l2dU7/ElCfFvftREAp2Wv3dTk1nGfd7f6Pjg+QiawZ04xbQJ/UXaXmO3Bo4vUwr8tKrw3+04YfQGvjj+F5cJJ0G1GpSDAVh9vL6EeG5eRNW8+mdi5BPfcqnOLlkSxxgD8/nZ9rPP3n329vX19eV61QeGponMNE3T7e3t+/cfbu8fjsMYmKsu0PO1IAL40w9/0HdkS5XfXoCXAICEZEDZeXA8Go3KWoqWXNRZsbhxsayQAgIQY+0CW54JgiMt2T9zMPMyT2666prLTdPBeDzs7+7u33/3/rjb6XRER/KGGAJ627btZt1sm9iMYdxzVTV0e145J7rlk1mtBxN1bdv3TYhkLsdjJsLDcIHE601/fX2+H46Oup67lHieB5EckIDs8bBLTbi8etX3Td+1M/qc5+MwrVa1zwXMoDK7zapFJyJgDgBYci6lMFPXtZvNWUzpcbf/5vubr755/+H2IRdXQGKMkZsmxsSMfHl9/u7zd+8+e3dxcdF3cRrHq8vzL794e3H1qknx+blU/SAE4sBADqCqUrIABEYkZgJCZ/bUUNMFj2aBHF1E8iQyK6oiGBFV+MbBnRwATl3NiIwxEjAvWg68VPptSSLdDEw9i4m6PX3uS60KRSxnjXPh4KKm6grutc7qP0yS6YUUNjy7/Bc0v3rX6IhezTQBBIcE3oC3YI0bganDZDA6CTWUVt3q/Nxoe74hs1ValRDCF28312ecyKEweAJqkN1tU3I/DXzYQVFKLaRQ75fQAziDRYAEFAEZQNwq5UpPhODnXf83Mw6wsIsXUQJfbt+XkltFASrvx11UpuwmZRzH/f5QP+eubR0Q3XORuk8Dcde0Z6u+T2GWUghtHOciplAJp7WmLApFUT3NYsfjqKGkFEALozP9kAVlDnOBqNg1DWDMwkWM+El7IQRuiBuDoAq5qKkrAVYFdCR1QgAzZ7Q2oCNlCEDJQpdW5+vXX/QXV6XrReDbr35JMo+7m+mwm+dxHo/zeJyOh3F4mI67aTpORQSDFMnDASQHAkY0rBCUISACI0filrgJHEO7xReAX12ydAJfsRJb1GreXsGV52qSAYIBai2iB9MWbEOwItohgrmWoqW4Gy8Vs8qjCdbUklBkBRlzETczMUEjNcuiok7qFLT2X1XAyNSrKsRTGxl45RuguxfRos/cPkQMIYWQapO1upuIaVFVkxIjNKoLv8wEpHiZAQDRsNLszam29y+ik2iLNt3L8j+AArlBzvlwmO7vh7ubfNiZSmhbkEJWyBiVUAnBUAoZAumKvb3cFAy52VCfwlMHck3kcYnXXx4nadWX6B4CPVcgXp7hk5/84HjiB9QHaTrruJse3u/ffzXvPno5umZwI04prrvz1xvUJgDwBilVrYMXBANHcF8KIicM/AXiWEO0k9T/yzXmn7rP538tJF6qakpo9lTsd1F394oQMAcFrTK3SLx00lcgEAAAiRwQK4m1adJ2vX77+vVPvvzR5eU5EwBY3682677v+2nKj7v9w+OjqNQQAitOWc1z7V1hAsBarn754fqi9+vmhkDgVSatijegO0+UZvKCZJAc2RGNSIyGYsa+EltHJ8TA7IimPhWxcU7D2G236/Vqu90wcc55pzmgVwr+LPB4mD+8v7m/+Ygl9wG7yJwix67putTG0DTQbZVJ9lbm2WsRx2veb35SOf/UlQAi1lb4aZ4Oe5ymyUwfHx/79Wq96q+uzu8f78w2QGdN4vuHj6p4frZBg8PjbpineS7b9frtq1fjdPz+4/3hOJ6fqxowkLhLkfqRhrAoWRIROopqCuH68uzV1eWrV68Ow/Drb9//8te/uX3cT6oYEhMjQeqb84tV3zdM/Pbd68+/+Oz61TWHkIuUnBnh+vLs8uo8nQR93U1VrRRDdKcnKcqqFKWOEarYMDUNt21ou4gNKkEWzLmMU5ZJAllAcAdSMFVRsmKIGKscAhMGTn0XQYkJiZwInMxxqfeYu5kbiFsxVTcAr63KpcqUFpNiJkZorgBIFJjY1VVMXs7XoE+TfnjO+BfkFRdLCNUe1OyBHQJ6BGgcOsCOqaUQAAoatPHs7dVPxC62K9d5c71GhMP9Timk16/aL667L69XF/3KITqAEk1lU6btNGyn8WiOMRZwcCNTRmOj5JCAE4aAbIailkWLKTISv1C7gyewFgzpSfoDT5X+p7Bm6ctYtl6F/Ayraz5VRsAXNpYU01KK2gQUOMSYQooNEzaJEQggg6tpF+PF5uzNxfmm76YyZyJ1VxmKKTEtDVfuRo5MTRNT4FJmz5NkEi1zGcXKk6lCxCZGQhrGzJy2qVv3QaAbsqjk2QyR2aNYcgli5OAmCI4GhBSQqJaXANDRAmNKATmMytxeX777+dW737t++3no+uM0PNx+99W/+ZPdh1+Pu5syH22Jg0gll/GQyzwriENlS4lY027Prl71qy2HxFwp/Rw4UEgcEyCWkj//6R+kfrM8F/OlEF9bQE90WDUTUXMn5ie6BSg4IZmTOokGMipzp+WSaRfwBlxKOR4Px+OxkzNuva5er91uCMgUY0QEtNmkKDuAmaGq5VKKGgpBWda5uz71Ztfcu6pGgoPrsp+LSC769FxCiJvtxXZ7yaH2UFvJZb8f3GzVtSE21aQvlEIpOk+oCibkAEQoEZF8CZSqdt2S+J/cJxIAopMZisA0wTjiNAcVZwrMlcIWEBJz5DpzAAEgBCYiMC6cDmFVuhYRlxrsAqfUijW9dP5hQdVfePC6X07aN/Bk214m47+dpC5yPHiCx7WUw910+83w8VfT7gPo7CYARhhmbqUcMLGzN4TcbewkvP3XZPwnmuzzb/668OP5cH++zbqwKp53auSHCt6fGH0OSMxEnESklKJqcOL6YTUWXC0GEmGKsWubvmv7rln17WazWq97cI2BEGGapuNx+O6797/55puPNzdVB8bc0KCGvWZGiBgCEzEHd48xM/MPZJsrNREWVBaIFslDA5whTGBSZ55AFT9DIxTA7DipN+6RKQCIQJ0fou45FxFNMTVNo6KVhA9mKjrN+nCUu/388LifjocGlRLFGDAkSG2MDWFAStCsnVyBRU1K0ZIXrKc2MC1e/5MDEZkZ3I/HY5mnaZzynNu+v9Srru/PNuuz9UpKdqCy7V+92p6dx88+e63Fvv4leqZ5FnB6+/pqmsfbh8M0S1X+Cvri3ZYkGNwsz0VFAoXVtvm9n/zo4uJM1PYfD+9vbu73u9in820PGMxA1Nbr1Zt3b842KzB78/r6zevrs+1W1co85nk2lciU4jNRsb7XwrykpfzCzB6Cm+KSvWCIFANXVikDnQRqsKpKMjMzIDhSbUDV4sLMLacYI3PAwCE4YOBAAJDNtUpzVHUORERwMDUzMGTkwODmbMxm6m5QBRDr4nEARHQEERWRHxTGf/t51aMS/WFhwvhJE5zYLbi35huDS8Ar4kvEc+QINoPTuu2+eHN9ttUfvSHU9rwBgukwOlJabfmsx7MUEjamICYAQHBAP6Dv0Q2dTQ6aixCiE3JgTsAJgIHcqahlNVF3RGYOgX/IVzhdOmrV8IDnrGEppiJXR8XERCelcFuIxlXWFfwJIqsBuKmgg7kjU+ImhNim0KbQhjDleZpzF2IKMYbAIQawJjVNbCKXQiYnaU9bvlzN1Q0B1A3FspSsWUyeCptElGKIoU4R8zlbaGjVNTHqMKCqEKI7qZEK1Lp5AA+LCCMDsDlbXYfEhDALuOJI3bq93r77+eWPftpuzt0KHx7l4fbuN7++ff9rkdFBmRPHGFN09FIsCxi3DmiWDdxDmzZXZ5/97PziVeBIdFJ/4kAcKERAL6VsX33+ROmvfegiWiGApaZ+0kIwN19keBAJKy1CzMmMDcE1aulVzkA3YAGsaJmncZ4nd11aUKFijpUmTSkmJmdCXKgEioCiVkTEHNBAcUkg3eCU0VXsqJpnp8XZmFtR12e/D0TcNF1KncEkpRjYMA53Dw8IEAMvaHlV3nTTMgNycAciD8UkaikOhIz4zBjE0x4FW0A7QHBUBRGUEtzawOZJQyAOWWR/OA7jHOI+hYY4ElIM3DWxb5u2bULbaGgwBaWnrQ+LT/6tfV6NGjkIwAmqXYL+p3AEnwV4TvIdv308g4SIiOZlzg/fTzdf5fuvdbijOsAL0B3EYLBByQXlIsUQI3EHyCclgOUL6bmrFhak3d0NHGqi70/wyOnwKqJXDVZFEWgxk9V5IzIuQvquVoGWmu4DEoQQOLBJHEcopYCDu2kRdIPAMaQUY4wxBGqb1HdtkyITmBaRjAjr9YoAxvF4e3v3zTff/eabb7/+zW8+3t4N46AmJ67o0i2AgRGQOVAISJSzpJheOJhKLfQTAFnnAwhiJfdBNprURYppJlAmW6YXJOJACqAOXZV8KWIAqUkck7upiAOUovvdbhgGK7mUeBjmudj7h/njbj6Ms7v2CRnZOBk1yA1i0oJSGLiHZBqbYsc85zyOFBiJKgcWfIlj8ZkMu0Bk5v7w+IhmkvNwHMy9FPnxlz9eNf22X43H4TgMTaDf+/Gbfp1+8vMv8igNN7ff7susWvzNq7cG5dffvJeyd1HJohyYKMQA4KJiCipect7v9/M0913/5tXlH/7hH4RI//rf/Mlvvvn1/eNDCPzlz971m9Vxmo7H+bgrF2dnP/nyy6uLs2k4vrq6uj4/71Iax93xsJ+maZ7n4zh242j6lEFiLQbj4jSAEGNMTGiLrrcDUAgJCaepZNWgTCkARUIKHEJDfZ9CQDOt5zM1FaEIMXKKKcQIAesa5xRMTad5zrkUQ8GEiZkW4MrV3UMMYGaEZhAEvFj99KsQrtPJr6l7FskvLFndHYRLI//C78FTaoDkaAhKpugEWCcNoQFbWZm+dv8C8MsQ3yGsCQhtdPWWu8smOmERANVoDo5iaEjAjqBU0IXdHWAm5BCsSa4JvYs5g2Wb7WDiUIBahqZBDkjuoAqzWFZ3qDr5McTw3Ltfd8jTbVVk2c0Qau6DDkwUOAQOMUXmhWVTBzQAuKu5mpsvjp9OKgfugBiRmpjatu26NsYIRGZeVjbM835/CIRFZHccxpKLSe3x6ZrGzAcRN6mFKAGYRHfD6G2zadtIjWsRKVmtvPAwiBgDd03o2uRE98c5CF2dNV0MybWUmq2aupq4iBACJ2amxEAExZY6JzO1Marp3e5YoInb7Xl/fv72y831O5nG8nhnH76i+2+SyWp7GTfb2PcppRRSjM10ePz4zV8O40D9mbhPx7siGajdXL579cXPLq/euuspW1y4IoCoZo6ZQvOylVdEZ9XggHVgWAVllwyvkpqrJ0ZAqKIWZB7MEK0Dj66jzL3m4JpNpRSVDAB8IpDXtq0qENumZO5NClOei9Z+FtSlLbti3hUSrmQvR6flFFbNKiyp1qmK+ZIm4uBuaIrqIKZi5Xgc98cDE53b5lSwMwQFLTIP4JhiZAQw1VK8KmhTXOQjcOksqNP5lup+tfsqoIXRmhS67VqlmQBns+Nchvvdw+N+mgshh9h0TbtZ9efr/vry7NXb1027dmZgcgajWpY2qPYYToH8s+MnAlByAKgzyVz9SdCbnvzus29/mYwvP3n6RY2VkQjAVeeDDPc2PkDeV9GmCq2rio463CNF7vp15BRW1xiCVgkjXGRxn8fyvkAo3NTNsaoX/VbDzilmxjon95Tm1w96SdlfogV1zREBBwqRmxRS07g5M+acTVTNVTXG2LapTYmZAmNgCkwpcAqBmJhrGKEiMk/Tt99++/XXX//Vr379/sOH/X4/TrOomC36eqenjoiIjBQ4pohIqUkxBnrpL5cvrDLA+HR7S2OsSZ6n42E+7qVkNzslLaDmWTSLWmJEdHDmENs2tavU9V3XhRBVbbc/TOO4akOIwQCHWW4ej/f7UZFD0yUyIsyQICQI0QGtmAkAN9CYdyvLxyJa5ilYIK4j5Ko/ok+XyGLL3EGymJYy55yLqqPTplt3q75v2rPVSubZInfd5vx6/fbt9XSUi/P74dbmnbrCdrPl5Nvt+jhMblZyjsyBCUMAQDcwt+JlnuZxGE11u9m8ef36s8/eDtPxcff48eZOxLdnZz/7+U8uXp3dPd7f3e3uPozrbnt2trk4P7NVf31xtek7Ajgejo8Pj8PhcDwe9sdjtz6+TJPxVDI7/ZuYGdBBUFVFCrqXyAgIpijOTkEiR/SKT0du2sQBSxFTd3U0j0gNhIRMACUXB4QAxGiACl6nGpYiZJyW1k8AWNqM6yoyVSBiZvdKI67oUJWXdARYSqv6ab6/TPF7+U8/jRpER3I0o6XqR0Zs0Jiv1a7NPgP7nOg1wSVTgwCI7IAhtG3ThMROjlBQASEggagMc5nGPBfQEmkhgyjCOXkOaE0ANC0KqlbGGY0jE6RaaxD17JbV1GufXWVJf0JTfmpSxoUTjlbF8V7cJyMGpsgcY0gxBuYQAy8t2aZSKydecZVlp7ojWGRsY2rbpmm7GAMguvksiuhWkqnOMj8M6iM4mKoz8arrmQMMg2V3VXVA8yy2hxkAY0gYyIGKoxh8usCAmVNMXdMUj4+jTV66pqyakAIzxLlAMRexOoajjvxABEJfpAXNEACMzLDkfDgOBXWzFmLmtsfY5P0uHx5t9xGO9wya2q47u+rOLrp+FTmAqqtR7IJhd3YlbiajuQFHDok5EbJ5deJWO45rpURV8jzls62/ELwTM1lmDOJJ/uSZxlxrK3hSaqs5EQIEBkdjMzJpJTdSqA4lrfp0VYykinVUAiUCE6UYHCGlEAOpmy5ScDWlR3iBbNeovQ5dAXc18Kr8XyN3+h3tZm5eplJmjSk4xllylRZGBidXUNEsMlkezdnJCNhtXTEjd3U1NAtQhzkaATzNZX16rzrKRbSYZgBBcgyUi93ujo/jNM3lcBjuHnbHYVYBoti23brvz9bt9fXF21k2g9HWwjk3zSrECOTLXBx39x8KXVStJSR2BK34kwEb8ILtPUMGv6Mt9iU/4IlA8CT9ieAISqiAXhs0AQDcjVhMyvFuuuFj0wcMa27CulNkQ3YnQCNXcHNaMnyv8ZGJiYArETBwAOAXV+VLmlwVZU5YXx0CwUS4KJLXS6gjCesgLSSKkWOKbZvariGivk8ll3nKqgYIMaS+WxHhPE+SsykReA4hhNh0KbUtMc85H4+Hjx8//Pmf/fnXX399c3M3DAMRuZ1khOvExEXvhjnW9INDDAAYUwjxuWa5fIQ1tyRcpF+X8iMRIkv2w+54c7O/vS3DaNkMUBTnokIWyRNjnzgEJqQmpL5frbbnm/OLy6vrtu/neT4MR8nzq4vrs7N10za7w/j4+LA/HlO3AkcoxUCJ2DkZs4GpZLUCxNj2uN7CdDCYtcxoBuzOBpEWxsWzQMNpb5uBY4zJCGvX2ePDzsTQ4OrV1dn5+ur8vMwjelYyVMijTKO6MlpwVXCIkduet5v1bnd0t5LnzBy4atBiVSYylHma8pwD0eXF+fX1Zdu2u8P+/u6w209NWr99+8Xv/50/ePPZ+ce79++/v103O9dgJm56dXHx6uoyBi7ztHt8vL27Gw+H4/GwOxz69dWnAj7VyC7Zsrs7gjuoeSlSchZ0kJISR0aKRCZcNCQADAwQuAoQoRQFMxOLROuubUOTgGzK+yyWsFk3EaN4UVMxUwc1BzULTzLX7lbr+wsNTUx9YQhyXUu1qc9qMrWkMC/MzHM1//RVUzdYQEcDNEJj99pw4BgdNoZvFH7k+mPSz5C2BImcwAGhAXdVHYaRSqU6VPFqRxKdDuN43D1Mw47BNqtVisEBCTRZ3nixAIiBQuQseZzuiwfvEV0MXCC7ZtCqwxICA3MmZH/GL59NFQABBQ61ZAy2DAc7RWqO7ozQxLherVZdHZ4VCMCtjuGUOpsFwJfbdiD0yJhiaGJMKRGRmeZSZi8AOQYoAFOZjtlmFQDqQmybZt1v1mbEBAfYDaOaqpObipn7DEh9SoFJDIu6Gb4EYpCYQ2pSRxC5zMfi390d1m263rQhtlawqGRRIGramAJXbXdRBbdi4I6BCQyPYx6naZqOGJV0QB2meTgOwzQOMg4+jyozoBG5mYB7SC2BH3e3+8ebcTwqYEgNg+0BTNS1zIf9w/vvbBxci2upMjcADmrmKiWXeeyDa56WvV/pKAYLY8S86lzLkgcB1sVsJ61WRAT3YIFQ2MyMVFmEpNQ6WYXHJGfJs0gd0F27iTkw1/pIk7hJwRAcreryQr3Gp7KPV+C1Fnx9KfhXx49PgeJTpWh5MCo67I/5OG/6M6Am5xw4bM/WnIgSiud5PkbkpEiNQTDjxkS0ir7gs1IWIgFKJZF41ZYFqOAuAZiBazadFHL2LHn+/vb2T//iVx8eHp2CQVCnPMM4SikFcUbexxb7b24uvrk9f/Px6t3nb36cv+xWF12DhORINUMADM9MNQCAOjLOGM1BwIo6IjSACMAnCOK3o59/34HPOo7VpPDCpgFwRwQH0Xy0A4wfu8hN6s4p9NCcIQWrBcwF96/Gyp+QB1MF1zpgmH5rZA8RUqCl1obLnxDxAu4RgS2hDSIFNgAkQg6cat27SW2biMnUSi5MVLIAABG7aRadxlFFiEhLBnckavtW1faHw3Ec9vvd999991dfff3+/ffTOKlqSgkXulAFkBZ3UUek+zLZpYq1wieF5Lr9lw4MfNpF9SyE0KL1INEzVygfCMGpqhLWJsqiRa1SkjjGOqQhMlVQwUzneWa0s7Pt+fk5EY3ztNvv9odDFjViCLTU7YkRnUHZSwJtE/V9szq/7IaB843rrKfSCRCznQzti4F2pjaOY55z361Dk9CZcMpzHofx/fv3IpnwTdOEVdug9WMBne3u/e6wy8Nuklw1tNRciLlfNX3flNmrRphIcAtAZEuqXQs02jbN1dX5+fm2ivftD/M8eWz69epss9ps15tShjzM42ORDDFQk+LF2dmq74fj4XA43Hy8uXt4kDxN4zgO0zw/E69Oj+bJMribiVYZ3aIiJuZgs5u7YRNDWIbWmxhCnZdTsT0AdzQgxQZ5jU0D0bLNXkaftSVM7Ewmrm5z0aJmamigZhXeqDw/cFyEwdwdnZnBl86dOusPGRdpbASsc1f/nfQYPLF5TqW0ykoFBEjoG/BXpj82+xLsM7AL8haRwRG85DIch91+eHjcO8Xzq1dn51fr7XkIgQBF7OHh4f23v/7w3beB8csvPr+8vIhNAlCwEl03SBib0CTgsstiBmJu6plcTWc3QUAkZmBiJLaTTXi67IjIy+hEjqHG9+hAlbjKsETRTJhCWLXtxWazXW9WXdfESADuKlpERURVtdJVRd1VAYwZUwgpcowRwKVkN0XQiJb6VNSGueRcxjIjUCQM3G1WXeDYNinFJOVDUTU3dVSDGXEqJTBzZKyVsk/3vgMBBqBAtXyc5TAWc9qsAnIy5lLm43wE1DrSkRgJCAEJFo0odxfzqcicBQAiU0MQQTAPPu5guNdxN5cyGYiDSLbDIwIwEZgeP367v/0wHI8YkuTJwU3ERAwxj8Px4Q5KxtPgT7ATSgtuUiTPJvJyvyxiBjWU9Eq2r47fF48IJ6mVyrYGdQtElMxmkyjqquhO4Lj8tRJCYG4SE3nOQgCBQwghBAKAFDhGzuaksBRvKp7/1D1/ao3xJW7x6nodYJk27Qtl5KWQHAIyYEAKGAxBRQG86xI3BORFc85jxlAgMkTyqJJLKS7KsUqbRg6hzsQAslPnPpxGUVdwlxRstmyWDUqxMk3T/cP+m9+8/3D32KzXqV1z7FU5ZxvGOowuG2to581Bzo/+ZiZP61evPz9fn3GMvMzDW3C8l0eoLBcEA5tVRzBABkKuwHudOYqgNVhePrbfZTxwoUae6vwcOLUcW0f2xYvVEgYiILsmVC/H+f7bA6e4uoTYx9BwbLyKY1RzCc8N/VXUyKtojBu4V8f/dClEEBM3KaQUmibWX6hZCKHvuxgDAKpaKcXUloyaOXCIKcRUj4VOpargXpgUXcTmaa5VVi258v9jjHPO5t62LfP+MBxzzo8PD3f3d4+7nZjHJoVF/38pJ5ADUYUpCQnrwFbPruamVqqA53MTwvIpPlc5/MQqA2fCbROu1u3bs1Ue853o7E6e3SkQATpBzQA8ATVNQzE62DwND26O3q06UXHTpm3evHp9eXm52+8f97vD8XA47PeHEQDWbc8cxBwdAklD0rNug1+2cL1pr9Prs+Pc3h9gmhRMTQEAVV3NUd2tCiHVSxeRh4fH3X5/fX2xXa+6ZpXnaRzGPE/DeNQPueTx/Hx7fr5tzs7hER/3+9vvv3u4H+7eH8dDNqkNtMck3iTuuuRWVN2smMVqL1RFKyfDJEZar9tX1+erdb87HD/e7qbJAFKgBgQP98ddS/Mw6py95ABh3bXnm/X52YYQvv3+m48fPn74eDNNUwx0mtLwAlLGOpHVa8XbXNU057nk7EVALCASMLmjVyQptl1kppq2q7qQqAZwtGKk0GJaUVpB5AyHPE9WMhXFGLMYk7iJWSlioqAO5gLq6ouUuS2DTRaaOjHH5ApqCgaExIE1EiwEKOBIv1UXh9NMPiR8bse0pYUFCaCxiKBAsgJ75fYlyO+DfgG+Je/QA1htmx+P+19/9eu/+Iu/+pM//QWl9o/+e//4D/7u36tQJAB4zvcf3//5n/zpH/+r/zYE+u//k3/887/zs4vrKyIUcDMMRNvQ9v0Wkt3NLjnfGx7qpGmCYmBEkbH2cCMCQ21cezI+2DJ3AH1KISUjLKZowEiBOFRSroObphD7ptmuVpfb84uzs+2qTyGi1aEtpm5FxczcSEVz1lJykYzoTYoxcWQGs0ykUtitZTxfrR3x7jjCESYVM2fCFLhrmu168+bV9XZ1dzwehmkqAHoKqxghBeqbZAHncRkGf9roaEBimBWRsAmpiTRlL8qDRA+dxZWU424+ljyNuay7ZtP10AREiuwJ3QHGAlOxqaBBbLuw7lfrru+Jehm68daP7+fx/iiw8zQpztNR9w/Hu+/3d+/BTHYfh8NhHAVju/v4EcnLNJqKApY8z8M+IFTZbCTGSMyRq6gJgGrpzq85xBdPBuE0bU6q1oSqqMmygKHynU+lTAQgN1dCIuusNEWLn+JbAFMBt6aJ283KKc2zPj7uXTWGmDgyMaDXEbQwg7mJ1ffymvrXolUdmXvKupbEFgGRkJdauD2VmJ/WWGDebtebzQrAp2maprFY7rsUmEy0+FychbKyaFBX8SKQp6aUsKIUU9ukGCMREBqhAVbW18K0N3AkZqBCDl7Mcy2mTPM87af8OPnBVn3b8ZqozciGEUGzGarMMoOjJiiP5RD3h+0u/2iwdeY+cKyDHP25afLF7Ti6gs2mg5a9OTowMMOnw6/+lgcihZBaTi1QqMCZm/mpERncGVw1y7ibHj/ub76FtF6nLiB5RfvrSeBU6H4mGNSHdrJdL0KQJqXPPnu73x2atg2BaxtJLoKIKdXuYHIDNSWilBpe2oFwCUkd3EyKVqK1ipipisxznqZpGMaSBRa5Uih1QjxhTHEYBzGZpulwOAzDME2zOxATIJrKCcNaODC1U7u+l9ZGNEBTq4nGb+WVT9Drgu1W/Wpwiwx9om2XNl1zTEklMii4c9WBAaOq9YaUUhO7LjRdiE2IMaVQs9Rq7c18HOePt7cfb26HYSillJzNgSkwm5kzhcAMImUcyjQAaEih4VVj63hImFmWhoVFXtDEwAXFnlaPiOx2u4eHx2mazjbrru2amEIMwxH1kHOZ9gcIAVZ92zZp1ax0ttnmZLCKBA3Obmp5OB5jC0jeNCxFpQCiuYu7AqCZuCsCBkZu43rVbDYts3+4uX//4XaapG7nPI7vv/42j537XGTuIrdNf75eNTGO4zgMw6+//tXHj7fjMIODtxErPvVpNlYROwADRCZ2AwczE1BF80BLfXlpdWJOKYVAAlJcFa3yK6uCFDlFDNGZswOo5WIuFsAEtLgHK9VyZTUxVAd3cXNZJBzBAAmYCDkIByBAQ4U66q92tSAzm6GBAQNH5PDvyfg/WYAABMjOBEAma9Nr0Hcon6G9Bk+EoZKgzVU1T+NwOOwe7u9vPoa2z+PRywxaQAWIQHJtbdIyg6GUWWU2mSHGmsgjBKTYhXRmtuXQQ3ksqlgKcSGs7DjkpXUbwciBXmRjRJhCaBE7ZiKaQAGcmQgpcQjMp6gZU4x922369cV2e3V2tl2vEgc044CpSUY+zTmXItlKlly0lJKlIHlKkRlrW1tkZgICJdCLfqPuQCzoxUzEupgSE4E3KVxdXrapubu/M5FDnmdVB++beL7pz9erzaqTnOchvui8rnt9kUoi4q4j4ZiBKXSxP+fUSRb1eRaY5jr30d2DGmnkNmEb0R1OsAVzSG0KXdc3oQHV4f5Ox3F43O0mffDuGM40GYi5zDYPU+20Gw7q0G4vQ7tpVmvXiTkQR+Sm6dfd9rLbnIUQKDAxEwfmFDgwR0BUk/78NYX0dC9qLqLugOClFFGRZd6tncxulZlcOjIdXAGoQHItOoVpNqtIvKtrLsVU2xi36xWnfp6ldrys192qbxHcVJggMqVApaAsZ6+bYil2oS/MXD8VGKD2eSLW2TZ0cv7xhb49M63X/apvJ5lzmdyU0IkA0c3EADACIQKRA4gZmYBJBOPAKabIMRIhOLmQzwAFgBFp8YcIBDFwQFb2gl7AxV1VzIqDUPTUc78KK6dECKM5KbBR1Q+NFFdx1XMfNOhRDjf7fbejC2rXLbVITLoEVc9HCGjmRWWQeSf5UZ3QCCJhSEjRlvJZLdEsbLu/xk74U6XNMWAI3HbcdLWpQaSAq5/G4IJaHSKAbjId9zffGjUhNa0WDY1zi6FBjjUPsXpmxAo8OFRWKDmyv4j71+v1H/2D//DN6zcIJFKGaZzmaZ7zPM/zPLl5jMsw99VqdXZ2FmMSKeM47fa73X633++naVrk87wOYZR5mobjcZzmkov5Igfr7iJSX5znTJF80dM3VanMoKdpzqeJzguV1d3RF0EeU60TAOuA108yflgYE1VLzHSplqmpipgWM3X0RNAyRgYOxBTdEcTQIXJsAlY9mJSa9fZ8c3G92mxXXUsxmME8zCkkVf/uu+/ff8Bv33/3/fuP8zQHohTTnMtxODo4EzWxjaEZZtHb++7m7sdDHhQm9w0ShsghalU5rpA3CzKCK6vW9mgAUJXd7nB3e3d7c9s1zfn2vGkSRQjBiUVKjhSKyN3d/Xa1Pt+eX67Wb882u93jze3Dh4/33394NM373QGDq+TA3jYsBGaIYGYFEN0VyQNToEDE/SqlBDkP33///fffvR/n7OBuef9w+xd/+rjexM06bs/Xr66uzy9eXZyfmepf/eqXH28+vn///TiOKfYNJ8lSieEv6+JYx16RgysixSYwU86TZaztQXxqEw9cm/CRGUMg9kiAbmrgKgJUObDMQCCe5wlQHIGYAyJ4cCEr6AimqAJWHN3RXcxAa8myklspUojRISYqMKlKVnMzY1UjdXDiOvmCgSLyp3N5n/shn11O3coOYOSERIZI7klpY3CFdk12RrYG57o33VVLniYredU1n719bWqp7X/02dvtpg9gMg/uDlauLs/+7u//tI2OoD/+4s26a8gEBFIIiFENDVFFVIpLMSniVhyVooUA4ITE4KHmg4uS0VNQDYTYhJCICNBMsxYDSCnFEBMxOIgKuBNxk2LfdpvVarvZnG0321XfhoBqXd9cvrpEpt1+t9sPh/04zVnV3N2JMTAzuWkej1Jy2zTb9erN9QW7JoNxngqAIhKFnCUBJeI8j5Lzqm9XfTeOP1616cP97WEcxGTdde9eXZ1vN23k4TjMh6Z74WFqHYkQA1FsQ2gTGkMHHDdX15+R0+3tR1VBcHAvYsdxFvFpznMb123SLjCASkGgJrYcu6btQtsShek4fP2bbzSuZsVJ+wk21myatOr6NYy956Op55JnO2v7zfazn/Vnr5oUp/2tmNjxMXZnF6+/fPuzv3d2cb00gBBSXe9ISGzuqtpfXtNTO59ZkTJNc+2ekOr0AdyxtmwuS85PXsHByAE0iIFkGg90HFDEwYrp7DDNs4o2MZ71q9SvS1HU4u4XZ5sQaJ6GPI/o2jJZCksrIbguZhiXUjohAjiBAiGBV4oeUmAIDDFSbdxw96aJTwgZMXV907R8eJxLmWMk5kjobgqmRLFJTdv2ITUYoiAYGJNTwNopEYDYgNDIMsCBMAeuILSYuSExty0FJAko5AVdwK02DbVp5a2u2k3f9OpcslhWyerOgVKMcbVZvbp6vT0/j22TKN5/vA+cHOicsU0dxvBEoXh2/E30kiXnY552JntzQosAkbADjKdG/7/9UYdPtFXbA0xVwGpp1MHRHBaxXjQpsr/H0LRdsjJAt6V2G1fniB0RgZ30wpwcSlU8NDAErwI+T2/Ydd1Pf/rTi/OrcRyH4XgYjtM8q9lhv//w/v04Tu6G6DGGvu/Oz8/6fgXgh/3BTI7Hfc55OB5r7QmXCFNLzjlnKZXAWQ07VQmVCs7P84yM1YYy89PWXUR2T3g9nEpIL2wunIoiy29/8IIf/OQpnxZRE0VTB2sYugghAFU3D1y1cgg5MhAhIgExhRSbvus3603v7vv9wUz7tiOEKYtoORyHnHNg7vuOKQ3jdBiO7hqYYoqhXVFaGfLhOHzzq282Uc7PBWgm4hCS2FxRtCXnBwVTNOOX94pUit7fPzYpqth61YeExNj2jUkgQHSa5hxwWrd5ve7Or89eXTTXl32/CsM0iZab24dsuZQRQRdJGwN31JKBEMAIgdkjEwdk9Gk6DmP+9rvvbm5vAXS9TmerpkvEoCzYULduurP1atUmyXl/nD58+HBze7M/7E0tcKugk4iDq3mRZ+FOQoiRU2JETDG0XXQFKQ2KigKKx4VHClzpHFSTcgBGYJbgTw0eBIxEhJEBQYq5QyBEJCdU0uyOKmRFVYq7ADkogKCBVvGGhUuLC0ZHtYXjFFAskhOODAhO4ORLrf5vcNR8s1KbFZEdo0FnsAFbkzeRAhITg6t7MSllHl3LqolvX19v1tvUdq+vr0zl669/NYx5GgdCW3fp7Gz7d37+U3DtusRgUAoCNjFxDLP6ZDhJPuZpKOMk82xBgC0agDFhMAgOZFZFKRZY42mzAKh7AQBVcxBTYm5iTCGhe9Vkd/dAxMxNbLqmW/X9ZrXervsuBnZfr7s3ry4phL5JKTxS5cc5ICGERByRqCoTS46E3gRatYHBdJr2h+OEZBxiTDkLah0HtuzaJjWvLs5dMlppyOcytyn2TehS6FJE0VVq2vBiXPLi+AHRY2RedTF03EfAnjnM03wcj8dxWOQLtSYd2U1VY5Eya4qIYAIQLIbAQNR0zTqtVhqa7+8PB1AJG+VWgBPTRYIUKTUEY5wOO1UO/ba9fHv52U/WF28i4T5y8/6rLDl1m25zsbl8s7l8bSruVXb6yXuDmoEU5mdnCTXjNyVfukWJlrk1TLXTgtyhVq9MScyUDNDRlcrEhyMPA2oBghA5cdN2bde2XUxdim0TvUkEZ+C+Wfel5P3DcNjvspScxYr5XGwefVZ0ACcE4locQ2RCrxrbDlbVuKCm+8DMIQQkMnfm54wfEZiB2AEUyEJiJwMXdIuRUwzM7FjnoIIHIEaOEKITKUFh4IjIVBAG1Qf0AYTMrOS5iKsTYJvDYZ7ysLudx72WWVXUTAEgRA8wm7Bkx6BgEIjbQJgoptjS5mJzcX252m5rZ+5uOPBj6s43TekSdEwI+kOuXlg3MKjs8mEeHwkmBFLZowXEDiszZunw/R1G42Xfef1vTfodvDp+bjoKEYjM9UT4qu16i3KD17H3ZSi7DweScryPZ9fN+ZsmcQgACu66KLIyoxl6QQe3AhicTkgEAADEmF5dv4qh/XjzsYi0Yim1bds+tN3t3f30sLNBQ5hyLsy4WnVd2643K0S/u4/goCJ1aF5VRjNXNysqAL4I/wAyETKzo6qJiJkyMDkBAtXciKhOdqjt0VCpeI62tN44AhCi16XPgYiJmMgqz+L50zxZq1pQqJP6VG0pkS1DXryL3jXOyTF7AGuYmhgRaNYFpXHkoj7NZRimEAcmAofhOLjK2WaVUupWq1Lms+2ZmVHg2DS6xm4YicCsrLuuXV/EzZt08S5evgbxf/vP/9h/s/nxH13TWw7uFEIQUXWoO1cNfREFCi8Ur87Pz/u+3+33pcz3d7vt2fr8Yr3qm9TENrWEZOpl1mkuHz988NyffXl1fn725vPLtG6/fX//3fvHb7+/2Q/tagVMYOJmi/ZiQSQmdEMEckNwRix5uvnw4f7x8M233z7s7tuWri63716dXay7FHjVdZuzbdu3GGkchoeH291xGscJkfrVyhUYg4jleVYRdRim+XkKHGJqYtc3xNTEpu86N0ezBJiBfFYCQoA6GRMREOtUDiQHRogcnETAzIyMnJFTihAiuRjNtCjJePEyFVEo6BW6XzjH4IiAFRhyZGQwkFlqYoXuix4hc2i5iQ0zo2OxOgyrukr9ZP8vuh3PQiJLH785gSO6ohUiAgwGrUEiCIE8sBKxEkhxmawUk4ygXRv7VZ+aNYfGKX37/sO/+H//8S/+8i/fv39/cb75p//4H/38J19enJ8z+nDc52Hk1lOIsWmVkxaRuTzO4810uJ0Pj1nG2AonV2PygBgcghmZuphjLZnokwqhmg05P5rFPBOTI8YYmtgE4jnPWXKRUrM6BCKmlGLXtKu+W6/6dZsiwnrVbvuWYqiasvM8IxgDIpFzAmLEIMxsqjESed+k83XPhNN4hNSOTB5i23Z5LlaWvsnA4bDb5zQT+KpNZ11jczyYgMp4OATAsN6iUctNF9Jzxr9k0lDbl9qua/uLxlbDYLu7+/u72/c3Hx72j9M0mXoTuAaARUWHMs24G0Ji6pmcwzQq9bRpt03quldvM6Sb3zx+GARadEKR3AXhDbR902zOgGXc37pLu7lYn133m4tutSb3ENsQE4dEHHDRnBKT4lYViwG8tuiJiJY8z210/bQLZuGgVOo91r5xJkQkguCOddh0KWVWEVQACWpRcxgHGo9uwoFW6w677bu3b15dX3UpsXsAC03q2jMAYMT7h/F42N19/DjNUymi5nOWYczFEDgiN2F5jlQFnIBRCM1dFVyN7Km1ZbHB5uYvSMrgplrMCgdMKaBzbbphxr7vmtS5+zxlK1NsUpO61IS2pRDV9GiFOVkkSiG773K5KfmhSJmnPA7TNGsWLCWKdiX7NO9kPlo5isxzzpOWiXTvsxxvGxti6oAaPgudN0ANN03seX2+6V5tUtdmydklu7MNWxvWMK/Rw9Lt+SnU30cXEi9HnY8UjZDAJ5ABZABuKEbAasPxifP77z0MAIg4tSG1HCISgdXGiyU+8MpcBUJgACfNPu2m+0nLsfMSIuFmjcFPc0cMHJ3J1GTKDoSglAipeUnrV9Xj8Xh//3B7c/vw8JjzTBwAQIqo1Ee45NYiMs9zKZlgzUSVHVbyPM9TnRLt/qx2xMyVnodAzBGZwR1Rq0xKiJED1R67SiAyMwF5+qzcX+Tuy0mx8ldxSdjc3YmqnNvzszGrhX8BhIUQU1XiRVUUaxIDCqhW+bWuRNRFRgxS3MyQoKac7mCmKirq4FZydrOmb7uuC02DRKv1Ggn69XoY5/1hRATV4iZdakLbQ2pCt9lcfR5TJ4/fp93cjdyWnskRuXajVgFBrbVlU7DnRt4Qwvn5+fn5Rc5jLvKw22XJ5qKyWq17Tykyeh1MW7QMUwqW5Yxi7Nfr1W7CwGOe57uHXNrIfd+ym7kTUThp2FfYxAmMwBlY83x/M7+/efj48WYa51f/X77+rEmS5NrOBfekamY+xJCZNQI4OOfwklcocu9D///f0C8tTTaFJHAAHAA1ZGZM7m6mwx76QT1yANkdVVJVEpkV6eZupqp777W+9e7Nb3988x9+9+27uyMBEwpwau5r2Z7P58fH81YVkEQSvR4hDH2E6PWuXT+HwRBRmmTaZSTOkiQTBuB+4jA2M1QKBHPzQAphEEZhZMawK7/put1eXaQyT8uOsiA0pQIFI4ZLLRysew+3IZl9NasROnngaIN5oCOGYUQi5oQUqBGBIEyZmJAjXOMqD4mmg3P+5XM6/o5/ONm/Gg8Ge43Bp7ApLLsxASAHYiBdmdph4A2jCfE059v7NyTzaWtr2f7817/8v/7L//vPf/nLd99++0+//91vfvubb/eHWaj1pq2M4xsToowH1arrqrVYq2EaycMxnMIZkMHRDTSAXmMDvqj4PXzrvUVoREZIkoRkRMy6++sQ7ZWdhUjIzFdjJTExAiJobwLOCFPiOYknJnci5MzICSmpUYreO0TolNJ+v8sszGSAnVBSvqylrmUcGYOIiLR3NyN0iEAPimAPDdu2lYl30w4dBnHvy25MAAWgAXlw4ASRe4/zeX3/4f3Hhw/PLy9b2bp2CGC7PuUB0VW7BrZB4U4GdrYWHTbe5/3NqubkpbetMQoGYSvmVC5Yz9a4NlsvH54vreuOK6/r9vSRXRGxradwpfDora+n7fE9Wzft4Xa9cYZV3VS1t1r3E5n2z7fSWNqYk4hIYsaIPtrrhMiAAcgwqNQUQA2dkHcJd5annPMyz3y8Oyy/p+w397/78be/+/GH3TTB1dXiI4bOVLVV6117q9vWehuMYe89DJ00xCkFcASiBQELIpMIAgN4ANG1YLveTp9ES1/uawiOw+hKgYDk4K/bGEB0U48eoZwjJV5mWRaaJxfeGJDRKTz04v7B+0+6fSyX8+W8Xc51Xb12vqz4fILeUVIkhsSurXdtGgrJXfpm3XvDHEwBaUIgJ47MkDFEDVuzqL32cHBKXZqvCi1GWMD/skfLzFahg5bQSokECcAimvcL0ISyBwQgimscy6e6FF93cIhXCRpeHdzDTM+cZ8kLpzy6guD29eHherii8W66eSmGZnOObQf1Lkjbtqn2sVtDhJq12pEz7t8gIqeZ0T8xb9b18l/+y3/7wx/+9PDw8XK5mGtK6ebmxt23yyaclnnOU5YkU05uVkrZtq3U2lrrfSj4Vu0WAcNgP6XELAypkzGbBxKncWukBBlBkkx5oixOo05yb7ptRW3U5zEG+G7mEUhAJDhaeI4RMCABYermxMySPit7I+x1n4+Irr13VdWu1nr3rhyh6mtvl9Zr99Ydvc3AFKORYAGADMOpOOWcE+dEObEajDhRJAKiQSqe5908z9Oyfz6d//jHP63rdnt3A+FhYQC1Vgne3f3++2/evNXp/ziefn+Dt9gBoPvAj4AGooeaOiJYR/ts6Uki97f33333Q++llHW9nFvXp6dzq30912WZ5pxFEgGBu2lcij68VNm1I8R5jdPWX7aNSmNysIlBAJwAJUuWaaKMgM2aQ2fQ0VO31k/n7cOvD8+PJ2R6c3/413/+/v/6T799e3csmz09rT99ePnw9PKyXbbSTGHMLACxdwJCSQkXXva4lqpPzyM/+nUVQ56Y5kTIQezgQjDtiDCFikKwIyj17iSYBedEy5SYuapaaDc1BEpMLOQy5eWwOxwxUwBTbNFTaEJWpPiU/zPEzwhDcMSgbMHg6A7dKDCRCCeeUgCW7LX32gtGiAd7qBmajSxf1drW9oq0AQdwREU0BH5tUo1H+cr8IcCI7Dq7zaRTqLiSBkIAy3Vuh4wICB2ieEjYDE4BEtSdwCgMoQdWg1VhA+ppnpacW6PCwxtuvQexICUexxsfzMHBEQB0JL/OyzQiCEagDcKXM36PqPZagCIJ8zjQ+/V8DMQ8vNvXjl64mataa43AO3jvtdUtpUQs1jQFJAjXysw7ntLEKKQmKVKBvlYFb0OZr5IOy46SzNM8pdMFaQU3wZyzA1aLIWi7rNv5sq5radod3IsnTr13Curq+kUKXAB6kEbSSM0SFSml/fLxw4cPjw8PHy+Xk5l6BCJ4WFMfDGgiRg4Ix4gw2MCb2XPp/dKfN3Czuwn2u2Oq9YaOztiQDSHaVtpPHz4+fuzbtl4+PL1o+M1J37y8lMefjsejTEvvVdcT9E1bv4Q+gG67w5gxfN5GAtzdtLda9hNZr5+fF2IRGXlmIgIQqjDgP4To4CPWjdAZXTAmgCRyt5/ul7yDyMec437J/O5wk968+fbdd4fDbUqTunbVvnrrxVQRYts2FlmWpfVu7uP5cfcIq22z1mQy5GYxdLnC8zTjnoQG8oWYhyFySIfidboaX3T7UmJhMFfVHuQImIgJvG6lVyNYsuScSJLMc9rtZL+jZec51UlcqIaVbftg+t71l7a+Pz8+vTydTy993bD06ekp/vbTuSnd3R9uDsthyW7g1hB0nmFuAdGzwG7qQbSt29apR+aWZ58JLgIliaiZI/Cc9CgBBUkRrwbFL9p6AAAiUTkqWUPr6AnBwgogEc3oFcEAw0YL//9XxY+fBOhDBw0AAEQkWfIkkonFrf2v/y9CIDgB0HWkrlaxrc/tstfLDUb1Wq213lV7t2GziZC0KDgSgsxg5VMgx7Ztf/zjH//rf/1vpZTeWyAkkZeXExLV2ghROCGrQ8AKvfu6lqfH562Un3/+5cPHx/P5UmpzB0YGZJYpTTNTigw5Qi10ODmvtykSYUppyjkxClpYa327lLqez+eyqlZ3wyHncx+ooJRSTJYkjR8QAYDXpCrToX2NTxu/qrbepDVm7tpa630Ek3V1VYvYtJ9qraaTyCKpb1Q6dA2g+LJCnafpcNjdHPf7w26aJnbb7/e9dyQGJBlFa2hApDyJNGaRJITJTItu3cNTRt5JPt4cjv86H//DQd9MkcKag9qVSugeiG6qjuHaSb8KHckp73cHlkOphZgu50ut6gaMgkimnkWT5MH2aoan4vncO5TH5+2yltb6lIAgKF75a4l3u2nKS8YEHtyiu4E5uqOHq9VSy1Zr7Xme5nm6vTm8ub85HpZWTqXV59P56fl0rpu6C2WmIYUHU2fmlBJz7grY3UdHJT7fssiICWPEsYMjYZ4lCYBpZ6YO3gzZiSNnzplzEmJWCWC3HoGQkohkcAYiVe0BWY0hMmMmqkMRSMiO7M4D7w+QECbEGWESTIg4dDIOiYiZUFiDuisajsEpu7OTQPBQk1q4mTf7Mp8vvij3/7cVP4aLR3abwBMEe4CPqTK6DCIIvU5tY5zMSzl713NbNdrt/fE3//QjpXxze7ccD4pxaZU4nCCYVM17i7oRQEjG8W2ExJgMGAxDGZRBKYB89D1ogDkBr12mT1/2ZVcmJRHxV3bWuKJrd+2awAPqXnvbNjdlwaiEZYOcUp6m0Y2chDEkJzkuKU3JEdUhQWbovQeGYhiGM0BmQp4ZiRwS4ESg1kW4O0RVM6/u3cIBkThJ8hhnMTNVR27+1cYPAB7ggAHswa3HpdXHx6eHp8fz5aLaE0ISIhQPcgOCa1cPx+kt4HpoiiCI0L6t54cP9OcUx/1xsxSz8AEEEbTWy/Pz+tO5vPdeLrV/LN4BH4ue1lXPj3f7Oc07QLS+ujUz72FryuCWpolYYpR7eLXCBSB5kKQvO5dfypjGGt5tnGcNAQWDB9YEADESISAvOd3ulyNMC/p0Mwf1faLDPKfj8e5ml6bcHTx86Odb69rb+AOPxyMxp5xLKa5qbmpean8+b00DU/LA0s0dwjH8er+9ulqJPoGsrl3arz4UIhJhkYFvahaN2adEhNhVNYJJCCwiiIApstiSdUlVaEuIjGz9tJ7/qu1XpsdeH8rlYT2dz0+2bknj5nLyh/cfm2JinxmVAYMY4pD52+O8E0dv85Rubhd1ZtvOYYZCU0wpFtRUL1QBegcE0MS3y2xlDpVwCsBru/0LgA/0FbRQKEKAhZlq2ZA8857RYsQHX5/s8fbiZ3XgJ4XaFZn52t+GuIbeSOaUmHnEMVIQvrb7RygSRQyNRyA4ooVu2xlePk77Ze+FkbSXy+PTuq7aKyEuU+b50MAdQDBruY3XrnKt7e8//f2vf/vreOwR0dyr9sHmAMCX8yWJDJYNAGrXbd0u27puaymlthYOIlmmaV7287yf551wRuRAstF5AwrHwU4Li4Ftn6298bOt5Zfnj6fnh5+eHh/Xs/dG7iLEPE6SaOEislv2y7LP804kB/nwO/Teamnn89leUZcR0XqvtTJnZm799a/erfeu3SJOrTzX1Vy/PSw7SH/b9FLbudqETUeemgcT7/e7+7vbN+/eLLtDoHjAMi+1lMvl7AG7ZWHGADufz4/PH19ezgCQ09Sb1mrnyxqS5vs3abm3XrGdbm/6/QzMoBFd7XogsysgzsIDwrTxFxt/uNdWA+D25u4IzoSE/Pz84oGS5pSyanOr7iDMjBQkzfG09kt5/vDhYT2v6HbYHY/73QjrzExpync3h2VeyNHVWKJ2rVu4qfVwH6MTQRJAdqemUJr78/qXv/7yt58/fnwqW+mBNGKS3KPWTbup2jTPezoicqnbum6lNv0CSBIQBmHgiIQw2io8LYmWLMxt7rFpLx0qIFpaiK9ARuGE3AOpE4PknFJ2xV77+/OHrcMNREqedpETA1IgYWJxyhiMAYAZ4sC0Z94z7ZBTIPboVUftp4HVrai2ptp1GAVRgzASsSF0jW4ghmxfDfq+Am98jfIeJTdBcIR4iI8IyMEodA0jHuYlRhZMGa2hWbft8eWnovhS166X3//LD3ffvNk2TWn69t0dsz49/louNDOSqfWCgB2R3XGycGD0iXFJNBskV/EmruxKEeDjaWO0ITz8OpcTPkPJWHjKU0ppUN5eJRLXnCHAEbqJplrK5kpZeEqcCAkicZu7MjGGL1Pe7abdlHb7HRJX1R4wp0nIto3CPaxbR9cGZgQ4EdF+t2SpS+qtqOrW1IDMoXZlSfN+T4Th2rXXVhlpVJTNTb/YZa47TgQTjt/em7VezFWEM+WJbRbOzEioFk2j9q6mMoLuYEwdWSTmhMXpZFS2yx//eE5p5t398m5/c2eQrG+n7fH9+eEnLB+TYEW6WFqdPqi/9EbW27YmeRHGxCjoEAoijsz7u8PbH6bdASIGjwyIEGG4k958/71Mu893WLi5h+rQT5l7v5JYECEEjCkSYUJixCy0MOzmfDNPe8aZY7IF2auWy+VcHqurTvtbnvZLnlLOSNy1IwIhMNHN7e0bs20rrZZeq7sRU236dFrXUpvaWvrLWko3RxlYN0YKhgHrxZHRMM69o21NnzVko+JnEXPfSqltzQnmu72w6BUzZkzmbuHqWsm2iWSiFr5BOJGoPW6XP9Xy65xXbWfXF+tr3ayXheSWQcAQDTiQAMGAEXckabfcIIUdM9s8p/1hX7rfkZxq0HQr0y7liQC9a6/lvG3dDRvvbo63vd26T+40yhe8IpKuG7/b5lrCmmtXBAzw1pkZrWF0AAu4Nvlfx9QUcB3uXVtzAQgjEcwizAHMarhG39w7EqLQa3XhXywuY47+FYUewrVu9fJ8eZoJbH+8EUEWYmHAzAgiDN7L+ZEMJ95pvXza+M1sXdfz5bLf7cYBzW1guOwqpHZARBEZAtpW2/l8LrUOjyMR52ne7fbL/rDfH+ZlP00LU4JAYEZJwOzjcgNdQRXQfFK/ry+/q496euq//vTLw/v18vJcC6gRRLpGqiMAqsc0TTMlyjM7UISq2yuEspbaWv00UnL31lophTkRc2tj0++9q5m1rqdWL2WLiMMkNzwtwR9kWgNIiBmwv3pDied5Phz2x90uL0s3sICUEiJ260I85YSERNxUH5+fn55eSim11FZrKVvtnUQo3ad0I9YnP99I2ycDgK7eam2t9la1j71/HB7DXKW1T9eiZqfz6Xw5v3l7n/OUp1nSFEFmEIFm0btiGARCypRTIGtQ6Wba13WD8DnJzX457ubRrp2SzHPezzJPDOZGAMRI4p1762M8goiS0jRNlLI5rZs9vRR0++n9068fXk6bagBnRqCxGqn1iEiZWbiqau8fH14uly0C5IsRDACMLBfiQCSCQcXhJMxIIqLSKCOWADTJyFk+nZaRiYRAKOecclZya3oqm1ZjpiPxRMmFzg791QWwCyFE4VgQbhLsmfZEExA7OEb1qABqUTzcjTzczcMJeSCiKDAnCsDm3gHCgr4SXX21b/5vvjemeleMeViEAvSACphiqCoCMGzAfJEdtPVtay9b982NJf3w43c/8uyQAIBBIbr3UouFMEeEKwKZdumNEA0QwjAcXdE6aGAQW2MXhCAcdhWkoK/tL//ry0ZmJkQdwR7/cEmEA3TYelsLdcbG3JMkRgbIwh6YRYQgEeeUpiwY5mZhKkx5WVKiplW7TlkYgAmu8UyAKMKMmbwKbOvW1V6BJCgiu/1hN0+JoLX6cnrRge0KBEKUr/Q94BauGArWrG9uzuRTphScIvYCu0xTTohU1C91HFnBAQiJSYRowLoZ0uwQFV9Kf76sAW3WZLt1ag2D2/ZSzk/9dIK6yZyVUwVoAT2ioG01Vo+EKExTTomBQEXNPIJzOr7Z3b4jRCRGIiBEQA8ws+Xunr/w8cMniMsg7UcYDnDdKBeBhqMfQJhmpjnRLmFGYyZcJJyQ3C59K1szQ0k0LflwOy1LnmYiMluEEcGJKOcMgNM091ZrKW5KzK13IGbh87q13hmBGZmEk6QBdgoweN2E8PUk/IkZ88W9I8xE5O6t9W0r4URww8jatfYg4iyOiEIgZIJVAkitt+dAy5itPW3nv9X6gcHDmnt1U+tuHRDJjVxRm/dqrfQSkJC8OHe/hUiCs9CSaCfQiPlmXlV4ueW8F8ngoKV2loLSwkLwzbx8K+kWid3dDLQDIED6vPGHVbfWW61bVTVBFAfkCOuuHUMBDBAgKBB8RHdEUASG8wgjcvPoEA2iDxpjM3XrpKWWJwcFwcAIMMJBA/s0OIBXoR/C9UzhZGplPT1+BKL5eJyOx7t5fzBEZDDzsq7n8/nxMYrdTLfatk8cRiRKOec8EXNADOh263YVxJl3HRVp4OilEhLDbjc7CKAwp2V3uL2/P9ze7g77aVlyngChl4ZE836XJhl6fHCyTrUjlb6s5fvHy//5cmrPP9vf//Lh8cMEHgiDjTp64L1d8wZm5uM83+33JMkBw9QjhNCIrxOBLx6V3mqtm6REJLXU3rrqCPHC0vXXpyfV8maZ7lI6Ek/uf9tN3On+mJKEnVvVAGAkyXke8uaRgGTmvddwO+6XJEmYBjqod1237en5+fnpVNYLeAvwkEz5gHzLuNuD3kA/YMnYzKiblrLVbeutaus2Eo3iChXON+1TBdO0f/j48Zdff729u90f9rWrXkmZvpU6oj0IgUiJWCA7ogFpoJoFwG6ZBP3uuN/PGaJjxJLzbk6ZnbAGGBFOmRBy9Am8l1J6dwCWnPaHg1NSp5dz++nXU1j/8LQ9r700A8RJBszNBk173s3Hm6MFPTyeHz6+vH//AEBv331zPN58Mmr6gIaGJkxJODFyTsRIgmlKg25LGXhBDycMIrSAcOumHi7ClNM0ZckCZN60cQfULWgHeaEpkaxmFE5hQiQ5LXk6BB8ID+ILRXIHc6tWwy5h4tbcILAhJY7xJ0qIIGIQBYokxKjqHVAt4Ku5xXXH/4IR+VnXfJ0CIAJEjyjgW/iGsRFMSMzkBAmcXN3NnNSoNVu3y3k79QiZD7vDze72rczHwBTu4Ju2tW/QytqGCkESU3Ki7g5qNUJVVXvdtrpt3cAzglbkjARE4x+fX2183bz43JcNHD0fvwasfW5lvMYqhprWVomxEyTi3ikJZ+aAwRszhSAITbLVpufqEcA073fzlPeH/bTMvXcw71Wv8t2ubu4IEU5XHYK7de2q2sNdkqR0mBPf7Oba6s8///z8cqqtqYOkNM3zZztfDElGjb55YwVA4MMuMUxKlh1uJtpnTkk0AGt0DeFQuz56wpxFZIQEM2UPI3Mg9WjGBtB6W7cLNq3bS6+ruxtyjdxdLDCRzwLHBGl0gQgDuSp2j4RIPabWejfHhHk/auZrjxcCPUIV+atWP3xa7pkQEwYgoDoSBANkAcFAN8FYMu4S7wQze7RLc0Ahg4hu51JOl9UBdkBpWW7evLm5v8vThIiEe53EtA0blHmY6oAFeERvfat1q2XdtvP5fL6sW2kKnOcsRPLauAsAu8b1vlrPXnFpn0eWgERMiDaY6E0Tj1YIrlvdik6TLJMn5mWS/YSzdNbi67lsHxAtwaFvL+XyvrWn3TRdk/xcIDBi6pp6i9awbn45VUHsmdjdN03NDwBE1kllZiv7PC/fLTvjo6cdyk5koiCaDnILTBSMDWH35vbH490iuUYU7dFbjA7L65doL6rFendVGlBZJkLsvVNvEwbLSJ975c5gIDq7M5mAofeIprppu6huqk21d+3uSq59e/TQIIqrBi9e5cmv+v5/oAINEGrvbbu0cnCANO/kuJAskmY065fTy8dfy3apvVu7aL182i8/4W7Gsz0cd9qtv278bjEYO4gwQnFSTiklknma97v9ze39/f27t4fb23k3S85I5KZ13ZjpcHuclomGdS/IlZoilD6d13e79lZv9HH+TYrfkL6FeBbRaQkWcAMzd0NAZt7d3hzub2/u7piSdqNtRVVH7EP4+rWkv7W2lcKSiaSU2tugT4YFXlq91I3DFjncznkPYTO82XPusUyEEEKkFFcTPzERD9cBBEZYb50I9vtlSjksem3bWlrrwrDscmn7blHXOiSHCAkpZ8IjbzdsM3UG+xRI02ttZdNa2wg4cQMgEp7L5+6FmZ0vl8enx/cfPmylns+n82WrTcH9ctncUpKQJMz8qsVmCxhzBAA47HcqNA+2mTsGTYmXxIJKbg5ORIQpGFPiXnlIJpAk53w47BQSIG/Vnk4V3KqSg4wKIwJDvdSq7sP+MKnV3j8+Pn18etlqW5bdbr/f7/cinx+YT1piEbzCQa+hYgESOCMnYaBhwBw9TXN3DCCQLJIlJeEkjmAz8y4hhHuAwJyYiRRs70YWGXnhtGc5Bu8JJ9IEFu6qfevdmkp3MQ83DUg4amiLQEQh5LjuhUEAiUiAaOgw/yGf7//P1+tYLgAc0AEUUAE7Uh9TZXAKjXADcpChdQ4wYsxZlmW33x/Tcgsk4RpGvUSJCi7hBoCcZ6IcQI4MQAZmAWreeu+tWRCIohmFQdCrGuFVkUD/qBj6hCQMdzMzRHf3UVO8oj7j1SjbVWurSCBEHYkJk1AWVjX1yMzgXhh6WQW89woI0zwdAKbd7iDpcDgARC9to81Ne29uWku79GpuCGa9bdtWSlnXVmtX9QBgJmFOKSHAfllKbbWrmvlQYXwKhEMkSSQTEA/jIhHO08RACp6djzMuiUm4W3TQyXFnAUjd9JpMehXRJ2YChyn1WSFn1w4e3lpdtxVZeythHSFocKaYJwAR2GU8JDwmmhlf3Z2gw++0tXg+yfHxcHqZdzcECwi7j/wwcLfeVXcpvjD1XNc2+OJE+ZoUlxAWIUELcxkikjByQAVw9cDO2QDNTYFkWljSfDgux+PucJiWhUgQApMQhSIidjODsGsxiRiI6tbUWu/tExMdX3Xp4eCO10RUJLzu/Ndf+3rX//RAAOCg/5q6K2AwwpVDME4eCEAQgkpeo5wVn3T7gOw+Ne8XbS/Wzq4IIDHSBw17T660bVYL1BKl6Jw6GrCpXpobHOaJEAiUELLANKfIy8Xo/eml9HPiJVOeSXZ5yvNMlAAjScqSE4sCgHVrxYDCZ4BrDSO9rdZqmDHSJGlKKYt4yKV36m3HmCauBmQBbhgm4AwmEpk8gYJfRyfny6NeXkpZ2zXBwh3Ry9mtB5IjD2j/Fdowkpg/txg/pYIEBoYDvNLISdJ0uJn2d/NyTEjYttNxl7w+Pr8Ub1YuX95kV3XBOH0OL6CbW0QAEeck4VFbR4w8ydDs53ladrs3b9/95ne/+/b77+/evdkdDilLOLTee629lcRyvLuZlmV4bEfiqQ3zdCn5Q2o7C9Jv2su/LP4vl0sjOe+/rTKH9TADdxGedsvtm7vjd98dDnc7zlaaPDzyuq6mhJhSSjl/OvW7R6m6rk24EdtammpHj+a29nauG0BklkTESIg2JfzuBncVIKx5DAAcIyKhumtEMCMzggeEqiXhOU85yXrZ1vX8/PzcS3t3v79/ezz1+ePDy9//9IfT47O5uAajzlLus90mFwIHNAtVN3VV7a3VUsq2Na1qDohJZFfWz59LgJlfLuvff/p5mh5rqa323pUAPdx8enO3X5Zlt1umaeKUSUTN3U1rhYjD4WCJIVxbzWQMnAkTApoOnRZCDDeOIBDSMKgR05R5v99pCItYYFMklDTvZyODi5shkqquWy21BgDJ+nRaS9MPH166wv7m9u3dm5u7u3lZPn0uY8lABMJgAqbBAPGmZqOPIcgTDSaXdo0e3hzCsBsxMrHkJEIkJMQREwPT0rkUZp6z7Ikm6N0MyDLFIckCPrknD8Du1nvrfatl61uz7jySZxGAwELDVM0IkgCyR6ir1wgmAUhD86L2SQx77bINKf0VDTCcOVeAD0egBwdIYAoSZB510YDaezjYNXMUCTiJzPPcgdSJ83LMeYeYAQSJiQAxY85aU86zyAxALJMDNwsAwpTQLdQccLQNh8DxKgzDCArHAHREd6Sh9f+0fiBe1dkA4Ka1NbjGvw3EF4b7UMK/pru11hIi6iAfRDCCMKwic8kyQjVVWQuGEcE0pd1hf+zaLN6+1Tdv3yzLzCwAUEuJCG3t5fT06+PjVjcmRAg3X0t/OZetGRAjgEJYLV4LIjDRPE1bbZvqVutair8mJhNzWm7z4S0uc+QJKCOmBERkApzApoWE0TyCXNAW6pimtNaXy0XNfWgzUBxYHc2AMCWOibuObKtWt21DcXVHRCYQhDRhypQIloz7TEviSRiRzEEtzEw9atdNzy8dK+W8vyFv/XBISVQ13JEozJr2HVXXf7l+LkQiIpLGyhwB7E7hgC4Qk8AhU0KwbqAd+qbNN0AXnGYCTh7kSA6QluW75bd53u3e3O9v7tI8IzG8QswjgllG7yo8jHzoHB3RkTygm0dAzlMQhWh1twjV3kf+DDEyEtFwYYG/Rgp+ve2P//b4xPnH0e8m4Zzn7oiIo99gvXozr7XDR8BH0GfKiCqgK3oJa9Z7ALuJG3eFWlNzvFx8XaP34Z28BklHBKaU72+XhSUuu126+/Zd2h1Wy08/Pf8///v//Pn9Kcthn/d3u/3tsj/u9zJNlult2O73v3vL6ATRrZfSncKPny5H6npuZQ3t5M4RFOGqPaJ3SK4p4ZTJqoV29MreZuyZLCNmdI6O0JyKwKmWj3F6aOeX2tqIHyTi0A7uiAwogRJXiyrFp7Pfl4LpIfj7JD106622stFU0tQJXJhYeDelm/3UNizns24v7p/nlu4jghEQkUXS1W3g7IGISbIPcC5GzpJSylM63tx8+903v/ntb3//r//yzfffHW4OecrI1Fs/n9e2sWkWSYfb22mer1hWInylUbYi3fvDyw/+9KxPP81e3354/04jlj3I4tbBHdzzlPa3x93dDUyzJ6GUheTQLRDbesHW/oGd6hGta6kt58bMtXUzJYCq/Xk9F+2JeUmZgSLCEETwzYGF/WlrvbuHjJ6KeXSzQNwfj3ma2/PZ3WGkBlzdpx6h4Cbk+wVwSqS7ZrQ/flNLKtUCZklpmegw9UUUHFoPs+hXg6HqUCe23npXM0Bw1V4/V/zj42hNHx+fhpIuPCBQSJgdAFOa5mVZdrtpysQJmcO9u/bWzD3nrK6trAaaZiJIDMHhNBI+GAPDXGEwZ66n7wAOQlqmrCAiDEDqwMSS52zQVbU3gnCP1rWUru4aLWArtb+ctpTm27ssOY++i3/RVbpmqhEQxgh+GSNe7T0omAQSYR7UB3RyCKMIcUZGQOKryN6RgBLBLIQAZEFEmRakwyxoEOJCkKGzKXaDCKAxXAu6MtAQhRkQHdgMVEMtzCIE0ANdwdyD1DEoEF9RCf/Y5ycAAsRXs89rQy7G8TwFTA5Hp2PQHngGlmsb1EIhyAICiUgSBAijOMucg1jyUebDoOjiSOhC4ZgsLQyMNIKcqVsAuAMisyG4SPAI8pTEgsQjR/vLZwNfC68vv0sAeSz9rxV/H2v1K0frys9GGLZ+VW29IxIzE416w5mCELdSrtHdqtA2IdzvF8w5OdXmz6cLEUlKbgqA67Y9nZ4fnp6enl/ef3z42y+/bnXdT3me8pSmIaUyt1qrmxKAEFtNwjTuGw831W3byrZ+useIZLfb74+305RZkmDKyI7k5iok4fOEjIhmYE7JJKepGaHU7tE6InuQ+RXaAohJYA7aKQ0Ho2mrrXIghDFFZpgRdhmmCYRikpgFJuEpSyA3HYq9cLduVrTXzTulu9vbHSttY+Pv4UFErtZ6rTO4ti/2S7y6YREAnCgQXMAngllwJ5AwLEYeaguPIAZIA/kz3hEHZBFJ07Tb53mX8syciGjEZFyZK0QI7ENwcs2UjisKiSmxTFkSMyem5NJ1rRZhY4cf+Q8II2SbAP3VP/qP0hdEFsnTtEvTAueLx8CwpZRzDiCeU5IY8cStaavBG3GjMHQKc9dwxd5gPbubn57p5QVOZzqdomh/eWmXS3c3VUBMUxLJDp5STnJ3iwu5euwT3x942dsGL9D/fnr804ePk1z20/FtKYd5nV+e0zzLYdb7mx9N9xDg5l37eu4uX67Jsr48t3WNVlEbdlbrrWvHZOk4U8yJJoFWuuka9cSxTdx2HFMiMQerFJYkWOrJL1ie9fzYW0spoYgThwcBEmbAHGgOAYEADBAQ/orhHzo/imtWU8AIpLXeLi+nB1m3Szm96N3bOWXqTS9P0C7UNz1vbX36RIka/ZnR+ReRPHFAaNertm9Az9wAZABzUkrH/e0PP/z4f/7n//jP//r7H3/z/fH2iNfAXNy2orVCl6DEeZK0IAtGx2ts8ljr/VL0VPEldm1+B29+/7w1aXY4rw8B3q8OIwJg4pwnAr6sxTVisSVN880xhM+9Dd/BNVz69VpUrTWtrQnLQAoGQu39eV09/N3N7XGaAaGqoYAQHXeTOzyuW23WAzWCAMZ+zCLffvddnpbn859UTZiTkKoiECHMU7rZz6U2wnOrl23btMgyv9sdbqpfQu6n5Xa3PyyzCG21FfFmEdqH4tC6uTlcaYWjUurav1DCj1M/IKxbiUAmISIMkkRTnpdlP83LNM/zvExTJmJHGJEEvXVVg0C1OJ0vDH0nC8aMZmhO42Endgh1vXLc3N1Muxo0pSTCzDwymd2BCIg5pZRzQjAIHfeeOTal0rTUVmrbal8W7mrny/b0/Hx//6bWV18y4pTSnIbWySAgDP0ajGNX5Su7kRMGDNaMIQcx54FbDvBuDYGDyEYHHI0n8sSakYTvlt0MgWSmrZWttNpL90DJU8o5TwvmpXNxDaKsgWFNt+L9YtoHdhQlnNysgwMHkVMAquv17vrUUY7xYpFfPWAjTTkwxvxVgJbAG4d3jt8Fv4M4YkwUEkqgOMJSEVEyUUCeCHYQqt4QUWTmNLNkEBx5GkRiMMV8tGREbB6laTONa/QndkRnppTmadlbKEhjISIkGKEqox0+QsYJkL/wJiFiFpFP6oRrNRAAxIQRMKhcjlck54C4M2lEjNRQBDf3cAs1iGHYgUQwz/Px7s3tzc2yLMLk1k8vF+s6z4mFTufLv//9779+fHw5rx8en3/+9ddQ/fb2mO5up33Oki3NxV/eP/6yrZdZeE455syM3by0NoS02/lc1s8bvzDdHJY3dweRNFDBARSI7q4NIYwxECAHRziAdeNS3B3X4hFt6K1VA8I5S0o4TlLhbOZr1W4K2oGEw4R84tgLHCdMGczdzC4G6kAyMeMw6HfVpqoeVe25qPHT+ePPW25TWyCRqoUHArlqrVu/mUPrp3WsqdVuI8yGwgU9kU2Mu0wTYyYnMHJH8h6GRHmZp2VJU0Zhhxj9oB7Yam/YcNZFfZhqDfw1txUJ0a8HYo/oEDpwXglxSeL7JTP01rPzFJC6BZTW/TXdlUabGQfwDGlkBTr5l+mvSEQi07y/uXt7uGxPzyfHoJTTlCTlCWmej7tpRwSqpdbQrnKQlGdrHkDqc+/Q2nJZW6lYN336aE8P+viYzisUXR8fy8tpJUI1ZF4Oh+M8cdl1yoz3Ny1pX8+YcJupZzzVfklu+4S3s9OkScpEGq2/nLlOt/ntAezkdjYdBq12ee4+fUlUlPc//VRrOT8+tla1rIBYu7rM6UjeV92emVAvpV1O7fzQ9ALSPIEmprBeVwybhGqp7fJitXir3nofmgByACSAxDKlqZuPQIW4lhrXEg1e53affILjbddWLk8PpRbI02n6eH78sORlImLdbDtruawvT9vp2fUrpXLANZcs5URMMbupt6aqFm4BIEKIlNJ8vLl59+6b73/8zW/+6Z+++/GH/c2Rs1yJuB611nUtdatMycmxdXaHMIwAMFPvzbatndZ6uvhLzOv0pu9/d9q389z79mhB4RTX8HWHSK7YN+teNy5trodlf3e4wTwtu+NS++VS/mGeNNam1tQZbPCBw7dWuyoTTZyyJHC1cI1rymcWgsCR0h6A19wBpJTz8eaGJbuHqs1ZpikREyLlnJOIW1/XSz89r6U8nGGre6Dv5t2bI79Zbr+7e/Pt4XjM2SFKa0raPEJ7GzFmqq7XondQIsYm+ClBEQLAI8zdxkP8yfnLnNI0Tcs0z3maJY1wT8KIGN3kV8ixRdTeBbpHBvDRRBlqm4H6dlU3AwcYEv2u5hDinBbKlBONsQ4JY1CET5oQVPvVrmKGtdpWbC1au6k5q13WrTW9nF5++P773q8kMiKas8xJCIHCwa9KM4jRNEQA9wCPoa8mYAQGSiSIbg41BmB9gA7dw8KCIzJr4iqgGdM0HYUErG3wWM7WSzVzYIAkTJwTJEoeqYdjsjFmQHAPjBBEJwByI1VTCOCQUTf5P+jbx+V4kAXRCPa7zsnZg8NzwBxxE/BN4DcB7xzuAGZAGeBJjAiPwBi5SGM0wEgRKQwhCAfJ0sFb+Nj4CZBymlwckEAN9coH8YDu3twtApmnlKY0JQf73In4/JLx+vc/ysf4C9j96yYaSOPUMSZCADDAxWPjVx6SbiK4ghyuUW8IIMxJGEV4mvK8pHkRyUwjnE8vp7WsIIkeXl7+8vef/vrrh6dLeTqdn5+ed8zvjofEaZpm5xx9q+7nbSvbxvOUCEuNAN9qK63Xrq027x38s+gSEYUhCSQZZEp8hYG4yGd4Ab4ulwjkAklEJBOFqyFhTlMSoqH3RGP0zCgI6N17wXYJBNdur28UuI1AZ3d3xOjOzZii1V56q61B+LxMxvJxXde1nE/P6x52tHBi1WtCtKn2stl2ildbsgeoeTeTQGYYFb8QTgmXWSYmGVglEHQxdyTJy5LmHacUjDTak8IAZAGcJyIBoDALs+uWT3xNRrm2j3FEqREFkZsaYWJcKuNGWzdzJGY1sw3VY/T2DTwACUY28jU+7h+asGPvF5KJ8w5lUeAeZgTAJCIk6fb2dpf3HOhgl9LSuefJpeHLBbujPPvp5H//Oz48Ye99W/vpCU4vvK59LdK0Pp/a+bKmJJe1ns7tcWpT5tIaLwlubckIOfmEncOg1WjAdryb39qt8H6SZZ+m0DAyTCSzIEMtZTudcNJ6vpw+/sT7vek/f974//SH/9Fbv1xO2jozIZMjpd1xT9TOu+df/pyWj1vR9fzy8vEXq6eFdBJc5hTW19OL9Zp4PDrQugcmIGwKbo6MTJgQJ2HeLR2jbGa9j2E+ghH4ELQ50Cdp/2CTuFpZ+1ZLsDgLUCbOu2X/5vZ+l5n1fLmcnx8fTi/P+nqKwddPa7T1RmYui1g319Vc3TXcECFP8+3t23fffPvdb3/87ne/2d/fG6WH08Ve3FRNzd3X8/rw4anXPs1zmmraqkiiwDDo3VuzdbVSe69dHR2nnt98kP6e4gPBA/5abAPvMJpO4GpQTl5x61Yt/BFhv9+Xd357uDnevUNK5/PlS8/Y8L+oe1P1GAHGem5l08pEc8o8DJWIgejhhp5gDLyZEJgTAFrvzDxP8zLPTOIepXRVm47LYT/nlIef+Xw5Pzw//eWvv5xO55eXl+eXE8rh/ls+vr3/8dt/uvvun7//zT/d30GSk1nX0IqqZtZbLU1bd1Uz16tlcvCbRp/49VrcS6utd0mTcBJJhIxBkoYAJU/znPJ0Xa8xRt91CNQRwQM9xqgWh2J70GojYMQCKoaZhnkEjFFuVy21YcpzmrLQlHjOMk95eHAJHD0h6EjYiyCzKJtutatihCCTmj88PofZdjk9Pz3318MlIU5J5sSmwy6PhEOVyTSSJdzUHcwYZAyFgoIEhGmkFwWE4qhLxgkVgAhmNuEVfZXou4RTmiEG5lc9nAQ4YUp4BYwCJcYwd++mWy1baz66SkhOEGgK1skAQIAckIcpD6/ZL59uMjADNWKioGCkIA5MAFl9cb2FeAvwA8g7hLuwXZiAAXoQjOmpISEKEPM1AgUZQsDB1czCWlw3WkNESAmYWRCc1MPCDDzQHUMjqlo162YjAzMJixq9JqrCSEceAlWiQPJXD9inzXLcdnEVxfvnbxINK+VIfx/HrmvkhRm/8kbNTXuF8EQkLClxvvovsgW0rmAxpbTkiSBr2awbgF/W8vOHj//2958/nNattFCbj8dl3h/2x3neXdSfztvDy3mtLQIGQaRpL7W+rCP9L8wjicz5M6s/3HotbdtgCk4+cEOh5uF2harE8CdAhLma+isjFDzAwrPku9tDzlzKpfdNe1MdNG03bQ4XKU8QWmvxZllDwmNtPLBQxCSiTi9rN9eyla7N3Y+7+d2b+6P6w1m7xVrqZW13+8mY7erKD4sYp97PBosIM1MzImYAQCCEJJwz5ZzmLJkJPBkDMVOakCTPO04TECEhMyRmyVlkEskp5TnlxGKtN0ROiYiB2cG1d1V3BcKUc8rZI9y6ttaSkiVOjARee3ckRgT3BFS6ttbN1IORGZgNaMgsLWJMC77c9wNFgbeGa4WiEADVew9hkinPb27ud8tOa/d+eVlbbWXr6NE/PrbLFhFxOtWffuoPD/1yqWXzXqlWUuWm1A1K1a1s5vnh6WQGv7x/AvCtbvu75V/hdz/+cLjfT8uCAU1781YX0t+8Od4t+5wPOS2Jkmts2xFR9se7uyTx8lxz4v3h8vL04d//EHf3vf3fnzf+p4cH01621XtHGtFFbNaIkQBQVea9WpR1vTx9aNv5EjplPh72wtB7C7OIoHCw6NeeL3mEOkC4IyAYu2LoWC4A/IruGaXwpxP79Z+DzxUR5i00qgEakpMgJuj9sOwyTdatdqtq+gVHfTzq1xsOIsJE6ObmBgME5UxUasSIGssp55SnnOeJUuoWp7XUWrp2dw/zsChbWS/NLUgAGaKqdSSQMOgdaoVSolbQhg4YiYOnlu4usj3C45OttQ9lcSDjAIZ7VQjTUHNV91ZZeCNY3twtaVryNKWciL46Y4aHmQNYmFVtL2XrrkvKO/m88cNrTTRKhggKBGYZ2NN5nm9ujvv9PjzUjBCnnPf73f6wCMuYLz4+Pf3tp5///JefzueybVur23xYbmGad/fvvvnx/tvvD7t94g29R1RjG+zk3q02tdb9yhRSd78WaPGVcjxeA4fGrPdVkhWB7uCAwCzCTByA4cN67SYIcxaJqH7FcsVnbfaYjwYYDEfpkPiYulp/zTVw5pSYchrKMkd0oiBCRmGcANy60jXIbojIMAIAWIgivGxbb62ul/KFXgGHlRzQ49pvTMyEFB7Du2lmQ2EcyMAB+AqWoYCIgSYnRA9wc3BABBaWeQLBTe0Z7NH75EjIjmSAPiiAxMaohAoQhJSEArxas771ulk3BBTmICY0joDhUgwEvqo9viwqv/hohlbpeiQIQDdyTb3t3N5xfIf4HcJtwOKawwgMOHy8KiADBkyMPOZfIz8AYRgZemhDv4ZJ49VSkoLJAbr3ar259jADdIBu0c2GyB1HCCYAwqjG/KpEJgoaKC0YzJD/5Vrg6vSD8LiSxOHVsvjaObgOBGOcIMMxEAhocIsBmZkYI0Ld2f36wnonDiEKSBGgBm6mjsPKWmo/X7ZS20IiLNM8c85rt4fz+suHjw/PzwEw5ZxyAsSt99NanrdSWxsticTD04OfLsTMVDuLXAUdEX7l3yDAGKkBEIaPNJOIQKJICVNid0XGlJMIWnhtVXvpas25myPSJHQzAyY7NwdwGBGOhMycZBzHJw9aS19LvayrmSXhlPPb+5tusf/15fm8bVXPW2vdpiRm4REEYBZm4V+3lga2xxyEiBAS85R5zpxTSkmEkYIMQ1KeI5BSSq/xLoTXofo8TXmZpgWRtDZtrZi528ycWADRx1KIRCRIwMJD0mLSX1ny0K+0x4hQb81LaZf1fFnXrTcF5Gl3PMq8OMoQJQyx2D84YNxDu7eqppDSPGUd9rFwAqVeffNWy1a387q9hK3y3nprD0/lvFprfDrVx8eX08taVu0tzMidItACx/oxyHBb6WZnd+va1rLenHfL7bzfwZvDMedMhOHGCIc5/+abt+qSpoU5w5UuE0R5txynPPXz02MvZb9/fnr8+S9/hPq9tvp54/+kp0ccLr8IU730U6/1cqqn5zzvAdnco27RaukFfL497m+ON7tvv51yYqRyWd//9HMtJ9DACAYcy7F6976hltDm1s0MyF6b/AEAr8pvCxhiLRqPK4YPOCsGEDgS5CUfb/Y3t7c5pe2lKgrNe5n3SK8BsHj98Ee3uLdOCO/e3M7TfJh3D/P88MRYCiAxc7PtvD5Oz7NMgox5XrR3D7+G5AV4h5wWmmS326c8vY6C0ujhsgQnS63XrdXhaw2fs+SJW9i51bZuZtu1RGHAGHFyw8w6QaBu+fRo5IWQEVvK+Uv1+KfVDOIqq9l6P/eCgHdz2sk8hChOEHSFrjmGAvZA95E/IiByczx88/bNzeHQam3d97tdFrq5Oe6WiZmen55+/vmX//GHP/33P/75b3/9CcJF8ny4O779p/3dv+5vfnd7c7ubWFsrcHY5QdocHQHMUTVUrfeurQ1ZSASMY/2V1fXFZdAVDXe9HACHgK7QelZdIpwIRQjCe1dXpYBEuF9mTXwK3S5wrf1ioD9CQ7u2MckYJk5XaK3V3po2cxPkSfJumnNKGtraRgQIw8qQEiNEbGtBQklpXtJe3QHMG1gIS4B3q8MaBACfmn7jEDPUGEKYk+QsCGRqqubWjRyJkAXjtRXL6ODNFdwNAxiZGA0GEJCQc+J5mYVx29YP2uDFqqSScq7aQYLyYLMMjTFGECFKJiera7NWtFfvSuzIEIxMlIA8oNkolhHdPTT8de9/1ZHg8OeES8S4S92iF+hNej9CfDfxb5jvvS8WaENqHCPlIYAUCUBiIEFwMH7cwDB6aNVe/LrxQ3ggMekQk1KPKL0V1U1btVBgi7HUYlfv2u1VkA8x/AM2Jjs+KAMIPbwNWPzrTebuamoAEyOJ+LUbEONlwZh0uOP1Ww7gRCAMTCgEKAggITjajma21hqlpqrmmNOUU0oiY9YFbl27q6GGOxzm4/3+eDlvrLYT3mWRnIvH48PjX99//Ld//9u2lW9uD7e7OaVUtZ9qfyrt0lXNAShd5yyfi8sYhuFA8wgbr9kHiWzEogzznxmAu7qHOyFNAsddcgQDM4SmXdXP23Zet1a3rqbGLVim5Xi8+823t8Tpl9iaw2Q8p/l4yPvdtExpzimnvFb7WZ9fVGtXCFiWvN/t3twe1PxmN21bK00vW6/Nd1OMkWI4DEvbUHJ9WsEc3MwI3UlEJEtapjRPkmQ0LRCJBoqHWUSycOaUWBIyjmDcaZqnaTfNS6vt4eW8rSsx7XQ/zQsmGGHrLEQsEDzylxDBQxXRzJpq61pq3Wor29Zav5zWx8enDw9Pvz48ntbSHJbD3Y//9PtDTo4UyGOO5REOX15JeGu2Fds2ifjm5naZbCKB7taiXcp2+sUjequtrrWdajlt9VxK2Zqvm6+rrWuvpZshY4Lg1+HzYAaTCKdEIhKATa21WmtdawnyX39+f38jv/vukGSeskD4PEkAHm6WoDQQLdotHInSlHaHw02t+ve///Lhfc3T/PT48Nc//0EQevssupT94dDKpnXzcCQiCA91ddUavaH2Nu0QJZAi3LVrK921XWabp7TfTZJGDk0vpa0XczQAR7q+U9atb9FLuHlcodn4Wp4CjKjesXONb9g4oNMrPGlIU1NOx+PhcHPMeQKkHhxp2d2m/d1blvT1Zglj+zS1Xrv2jtO8O+wcySlPtQJga3Vdn1tvpXurzrzc3AlCuHvtam4jaTrxxDypsnuomTsEGAAhshkMNfi6llJabW2r2quZ1trOtV20r2b1SnQRzomXZZmGlYtoJKv1Zs/PZ6Ke0nCkfD71f7X5xzVrgpEYaSIR4k/kY4Dxbg1MNgVyIEQQArLQlNN+t8xzHuOP/X6xKaWUiZgISyl/+stf//sf/vjLL+9P58s8UZ73u+O3h9sf5+U25UmSEIK5Wu8B3UmNhvAwVG1IELS13pqZBoA4ACF87RUnGtCaiYgj3LpfAwk51Nqr8xURwMNV1VUzsRDPUzKKjRxcr00DAzXoaoTRtZl7ICIJk3SD0rT03rSbO6MI8iQpi6C7g7s1VZSEiDL8X5KmnOdlsW7hThaoBq0bIrhfHSaDDPHFQQxUVU0JYHQLmBmHpjHGyAUwgOA1AQmAmDxC1a6WEyIRAgRo7m6IAZCTCAnVRi99eNGq5rzT6GodqRIYUgNo7t2c3TxwNdusV9eO7jR89uRBQVe97HUmQjFkEK8zkq9rfhrR2G5Woim2ytsmbjdIb0XeAd1D7MMlwsOGHPc6VB56KHj96BCuTQWPUHUdIGeDGHU1YARHjD+pe1SzZq4AjhQw8givecOvDmpzt4AIUzAF+Yz5cgAF0AD9urEECMMWiyxNzd0RAn2cQdGHkwRGWJ+aKWIQgTAmYc6MgKY4+tKt91KKmlPrAHjc7bvN5t5NzfqQqwzxaak65fnNzV1ruk5rgsgiW+vb0/NfPz7+7f3Hj49PBCB3RyZqqudSX0o7da3m7lcrpfyvlWWEWajZeC4GRsX8kyeJaKja3H3olhhFZLdQR9q6tW7PlxVDS+1q0SxKs9rUKaVlOezn2yUl4Thyoyy+XxIc9mk35ynJlCRJCqhC10dgCJQHvIaJb3bTy7k09XNpZUj+XtF8etUIfP2xXFswgBGJaZnycbc7LEkwRtwSUxCREIsIc8JracWA6GFDPHP9A8xU1VQB5JOvbrw347Z5jWEMVautbNt2OZ/P58v59HK+nM/ny7Zudavn0/ry9PLw/PL+8WnrCmm640kjgGiAE161rv/Aioiwzm4L0/1+J7fznHWXVXttq27b+IjAtLfeW7d11eeXdavFAkux07msWzcNprxbFuY8pMABiIRIPHKLiWiEnKuPahha0+en0+Pj/nKptYO5MCExzktadvecskWtra6rhcMy58N+f3tz8/x8bu308eNHCHx+fHr8+Ou3337zCQkPAPL999+fTs/r+VTMhIkI0YGHzz806tbVHcgBR8tpoKafXaNuXraU0la29XQ+PzyWbXPAQHCkkWkW4Woajh4SECOs3EHRfZR98GqkoFdWWLgj0LXzOwjDxPOy3L95u9zcqXvpvVlw3r/57ub+ux8lT59W5SHuICJCMtPL6fzXf//r7d3dzd273fEu7d6pgao9PLx///7909Mj4tPpd/ruze/e3c85Sette344ny+1d5Z8cyNqfrqsvVurvZt7YAAisWm02svaLmuprY/ztRuenx/L5aOWR4yVobsN+UJajjf//M/fv7k/SAIEVIPnl8vf/vb+cjmZSs4wJXPzLxVxA4NHiAHkEEJ8P++ZKLOMWvIacjyGM4RMwUzIOSjcQdVJAmFwxK6m6ONxX2tTsyh9SvT8/Pzf/vv//O9/+OPlchbBeZ73h/vj7e/3h29ZNPy59pvUdzwlRgAEi+jqCKbae9fWrWtvtWlrqgqALkHChBSfQE0ATLzfHfa7o5m11lqpXTUQiND99bYHdDW1rl3BHUmIURhDw7RpL2Ya7qbRm5fSveN1Y8Br70g9SvPavKl386HUEaJJJBEYgHmY9loQUN3ALHKeDwcM5ACOYHMKp3WrtTVzgwAmlCQDaPX66EfXrmaTCMmId79a4oY7iJEJiYA5EIMQaUijW1eNsAhGTOPnEXiYdRURMCAhY2qAW9fSvdW2M4fa0AElB7OCk9rkhh69t9L6udbiFkKEAn6tERUHCT0ohvkQiDGCLBHIgOx+WpGv62tob2WtL0+0XfZaF0nf397+KOmOaUaicIAIQr8Gao0jWnAgUjACI8lQRYWbwQijD8BAHsICB0BiQUIg9+gezcjGQ47kwYRAYORGTDjmPaqmXWHQEtIn7a8DeoBeO4SfK8uBa52FppwDqZvHKNci0Gy0x8bhwCK6ta4tYPSZeMo5z8ndN9Pee6mllFpb7+peKiG2fqtmzXqAhTpEMFEzez6d121jkvu7N8uy27a1rhc1++nh6aXUv7z/+HQ6u+lxmty91LqW+lLKqbbNvPunsUTo6JB9weo3N9UOiMTDdDCuH3209d2v41B3D8fhRGKZp9zA8mZrvfz68IhhS+aUcreIFltbkWJ/gJ0AeZtIvr9NeDww7DNDYmQcOymqhbkT4sQ0J+kWZrbVdrpsc5Ljko/76den9VLa1q1ZJEaC6OFDGB1fTF/HaIyJGEEQpyT7Zb67Pd7sZsYgGC4vj6smCCNgCIg8Qt0tDAkl5Wlac8pgYeYp5XlZ5mUhpHCzqGa9tz4Y57XVWtv5fHl8fHp8fH56ej69nNftsq7ruq7bVurWSum12NZ0bQ3zdJznvLuR5cB5hhF4Tde19av8iggKX4S+uT3eHPbLThibtseHj207baeLTbskPKPMgPMgfXO2FDkRAvfmZFFbVeG8HPaJ59bcerwu+jjYSxHQ1ACAU8qEDkFstfSXl8vDw/nt3WHKeZqyRZa0OxzfSJa1PNZ2KeUcgfOcJcG0k1TQvFwuj+u5nJ5OrW1Xo+Onjf/3//IvHx8+XE4vqnVEG1/h4kOn4YYMIgmIwwNcgoQQXGFb2xOvRLRtWy1VFRUzjJ9Nn3ruEJRdAJGDrgpV8AbWX1uPI4nKIwxC8Zr2+AmcCAgjMJSRxQFaa7X21hXTcrh7e3P3Vr6q+ON12AcQoD0ua8+zH3Ga9/eHdABMZk40PXx4WC92PuvpuZ+e6/ml5uy1bB/ePz8/v5TWJM21kqRWW69VW1O1a3uCKZlFLVrWtpZaW1dTVQuz58eP5fLs/UzYISwcAhicmGl/WO7ujzkjIJijY/AvoNpW66rIO/zfya7Hvk7MPAHOmJlIiF8p6tdFb6Rz4TVW9WoFHMxKtT66mohh6ut6KbWLiLpdTutPP/3y17/99OHDAwTMOU/TcVru03yfp5s0TZyZUgoWA7QRDewN2ClMh2tfVc3VYqz1CHg9jfBXV0BEy7I77I+tdSJBIGytWw8AM++1b+u2JmaGAB1nq1E+R1iEujbT7mbuUKuvmwmpJiQIwLChgAhoFlvvl2pFXR2EEBAQIhFKToZYWq8axZoZWAxDvojgNPlUTaQz0/AVjHV3gLj+YSw+WsrqmkmC0RAogmGAZYAHJZ4FBwgwrjRJYCAmcA/HeI3vBoJAdHBz096JwRGVWYksFNUu6mRKgeQeAN0s0BKEa7SqvauGQTgzEREYhIFF6CCa2EgwICYERgBiIRYi/kJAatYv5/LyEmWl9eTn56ztPsv3+/0PCN8w7gEkgl4/xTF1Qxp14NWZgQCAQ7YfEWEAhuSUhnlrtNU1AJGCGIGGQcuRAwgQEImcrq4INuAEzDhyo8fpYQhZiAE5kF7DReO1EfvpMQH8ol02fnmsAz583q9iC3MvrW2vO/u1oxjg7voK944IQhxue4qotZzPZ22SkgixEFvA1vrT+bJuhYUQeZpmc9u27VS2l5fLx/P689Nz7/2QJ0QsrXft57WcWtvUug/MODGgBXTzbl+l3A6RupmN5WwkMiPiVbw+zImj0oYYlDtADwwIEKbEUKIT2JwkgCoNiIMJ0SKxsLFtbHmXMfNQ+MJwyoymgnmoupoBxJyTeDhAqe3j0+mwTEl4N+eIS+1WmlY1eeWajFvPv66TX+czIYQ5yzJNu3lepincrvO7MTs3v+aAuw/RwHj2rnkR7u7ORLvdjginaUpJ1LRtpdTLtl0u58v5fDkPKu+6Pr+cP358enp8fn45Xy5ba6W22mprbUSLXDt8PWJJ1zkCBmrrqs1saH6hm2rZviCpu3sn8uNh2hPPsyDUctnWnJNISjznRdISQd3UgUQj5e7AgZGg5ylU0b0x55SmJHOAiSBx8ojWukMMJBTHYIgjMs2ECN1dzy/lr399zxSl9tvbG6Z5f5AIcafedV3X55dn92CWaV4O7Uatmffetsv55XI5mRaIL0l3IP/pP//n97/+UtaLez+9PPatM0IEmgMhCeV5Oexv33BeVN3NwPWafYG0avIAi52lZczIXm0mAYhEGEjZ0JGDEjKTcIBZL95K9Oq9hmlYAy9hbSiAwSHcATpcBZbkQbXb8+mcFMKpq/VekyzTspt3B3rlqMf1efEh3kFkybtpeZN333B+k6Y3+/1NSnME5nRAzLe3P/300wfk9Phc4t9/JYJStve//no+nVWdJT88Vk5ZzU1jiAiZUVhEpnAs1cvWS+u1ddVWaymX8+nxQ13PrnUQJCJgAC+7xlr6ZesWxMyA5MAoiSSN2f+1fP/ikYkreAyJaYLsHOMJxdeR+XCv0ojiRDdA9whvYaZBjtFUt1rNdajrTqfzv/3bn3q33/7ut+D273/+y//8w789Pj67giAkWVK+53QflEJyXu7m4zfL8Y0sh9J07br6OuPZEhKEfsoLcogrk9FflzL+hGUcX0w8z7v9/naeryjF2urp8uIQ2u18On/48MF72e/nlGVcl5q2Zi25alXtw+dl5udNiToA7ibJwoCgbt29WhT1tbWt6tYBAqfR8TZl8P2UgxiDutXLWkszDyJK0zTeRCFiU9PatesgPGvX0RA261+l8w1ho5uiG4WGo2sARwQzEaWUM4k4EgRiDNgAE6InCQBARcCr+nyk7wIF4lq2DuaClBLO5gDFdOw/oa61eMBA7iFU7VbXGh5zlkl4GRcRY44d2rX1RhFT4iSC140fJVHKwmnI5gAArNXzT397/Onv0/ll6dsR/Jtl+Q/7b/5lt/8x5TvEPNjzAYDxyshHRGIAxiuXZ9DpYSQEh0MQyiScB+LMAy3GnCA0rqL6wECi6/wdEJjB0RnNLSSjZGIRYHZkQmWJnEMySEZMgcNzHxRGV2/t607vruqttUBS02FfHL9hNBIwhpHP1lKSyHHbL3Of88SqvfeuvZSqasxpt4i7E8Jo99Tt8rFtTLTsdm/u7nGSpnqp7WXdzusqzB7RrZ8u54+PTw/ny9O2nUovXYWYWBzwtJVw31rb1EfuWVzZYhjurVqr9cuN/2vWfQxBpwMRk5kDvAJPEDAwIJp2697MmgVHP86w54nRJpHS9MW7uzLhnPCQfEeN9Ax9Ikrj5sSIrn49YQCEu6ptpTb1ec4zYOlaW//p/fNhP++XaUqCiGq+1r61Pksan+d118evd/4BuoNghDmlOSdBCvdex6Md5qZmYYEQBNdZfxKhxCwiSZhFckrDFIE0HMNd+1rX83p+fnp8fPjw/sOHhw8PD49Pp+fzZV3Pl+1y3tat1qa9a8Sr5cjBYyQ2EAEmgCVPN/v9Lict60VrKZupDt6Vml2OO9P+adtvWhVqmudgAtIITVM63OzffmNLgTQdESfVKE0NIJnl1ixQvSNBSnOaUI0QcyBHECDIlA/Hmwh8fn7uqkOBJCJmflkvYbDkibCHbudT+R//4y8fP3z49bff/vD9999++wPgQdIG5OfL88PHp/fvP2gfDF4iTGWrqgYI4RrWIvSTYum68f/z//Efb+7uuvaU+W9/+fPThw+tlvCQJPN8ONy+vXv3/bvvf5uXm9atq7madmtqTb2qjZzpAKKR0zkWNRzOXUCkUAqkoIwiJAJg2jZvW7TN62atWFutna2trpfQ6qZwVa8yYwJJkTLluTn3qjDc3UCccp53afpKEOevE6aU87wclt3ttHub57fIR4upKaqratQOkm92B93fYNP+fC5bfwCI1urL87qVFgZIUNqZRIZFLQCYaZnFQy/bquqtoTohZWZR1Vb7y9PLy9NTGyLwazeeAggdtto/fHhRtWliYQKW86VsRc2AASPA7Cvv+9dHZmRigtfH8h+l2fgKQCBCFApGr44eQMKA7O6t9e7ry8vp6fF51BC1lD/825//5x//tK5laO05TdPufjrcy7zjaeI8o0wOqOaq1not7bThi+dEiCMCwVXNIYCABBkQADkB0eDAfHUByESJkFMCJp61E4tay4Lhvq3rC5j1Mi1zniZhVFdwmJK49dExB4Cu8bJ1A0ERJ5gAELErNINqsTY7b21rzcyzsA+jvSu4Cl51H2HRWy9FzRHReg8iMu+1tu2yrZe1FO29I2JKiRjZ0A1HmOEXy3AEhNM1n+rKocRIIoCQJAFTt3A3H4pVHCIWYES7pkhYGAAhiQwobdPuDRATAIyQ8AaoQpFYHUpXVR94scBQ1xZKEDje93EaHIWTW4QhjIECJ2EQAkZAcEHJLPIZe+O9bw8ftp//Rtv5BvzNYf9DOvx2mX+Y5xvmCYBGrRPjrDK2fkKg4bQiRiQaDYGA+ATMHI6S0bRzQIyI4VobaYwcECOzyixCPQygG1TVatYhbMyJgCI4kEKySw5OcE1UQQjHMHTDL5CdEaCqPQK7AvNQ7Y+NfxTJ9OqWU3dvTUo5l7IrdckzAsYQpQVySjmlLCxMc5b9MkHEaV3Xbbtc1qaa86werenz6fx0Xs/rSgRqVmp5WS8PL6endTuVWj1wYJIRzePS1c2aWXcYgX2jZ8eEGNc028+HSwA162o0VlAHQicmCFBzG8qJURcEQIR5NItuUJU9gMlnNmYaDi01YAxGSEJJSCgoNKxZh44BLuwUEb0rBOQsAaDug7pjEfOUs0jq2ro9ndamNuUkTJm5mZXaL1s7ZGTh0Q2NL7VHX38R0ZTylPPoEFyrIqKxPoAAITKKCA+jLycWEWRGwkAMhG6mrr32bbuczufHl6eHp6enjx8fPn788OHj48fH55eXy2krpZaqrV8L1dc5CV4jnkaTh4BHBCYGaKuXk9USELVWN0NEd2+9fXuz11eGh4MXLxtsJATMFhZRHXtI5B07owiNLuXwKzlAAHmgGnSNru6BInNK8zTvGJNBTznf3NwC0lCWzMuSc2KWWutaCkEcjgchryvWrX/8+HI6n7fazxfbKp8uMb9/AbBSP55PHz8+Ppl6WWM96/PTZmofPz5ta9Xew8eu/5X2Qn7zu3++u397OB7fvX37Xw83//aHP/z808+96/Hm/u7Nt99+/9sffvcvv/3n/7g73K/VSrPerTQtTbfW19JqNw00wED+3G5Dx9ebG7pDgIOgCKUEEN7rkPp7vfTt3NaXcnmq26mtz9pW7zXckIIo5TxLXnCaQ1IP6KpaCyLkeTftbmTec8rwWsHEa4OPiPaHw4+//f3u8K7qArSzWE4Xe3r+WOt2Oq+ldG3WuhpLAJ5KjVJHweSYOHMEYKAFuw6GBiHCPPP97exafv7519Np9ch5vrm9+R5xfn7hy/m8btvlvGq3cI4xrQQa9MjzZfvzX37++08sPLxKZO69GoJPicVhaOX8q039ddG6kik+i5jx+ovggWNtpggiTES7JFOGbSNk2e2mZXcMh8t5K1aeXs5IOOWckjw+lP/xhz/+4d/+rOrTPIU75bzc3B7u38z7mzzPxKBtOz09UFoQ2W2r68sGTzHvmVltqO1G6AWTIJIgIEtCBHP9PEkGcI9aey2dGcf5nSWxpIguaEJG4du21u2S5+lwcztNAt7DYM5CYClJygkBa7fVWiERXcCoASCGGjSFZngu/nhqtbckQDwQLxGu3rt1DYfeu/bu5m7eeqh19xUiAP18Pj8+Pj09n1sLIJ7mvJMlXhFwtzc3n0J6EMcMfwhZIl51KkTAmZGYJBlEaDcdnF1i1WDycPBAN49o3SEIEDnxFZzsYarI6GGtd3RFQk4SwM11LdYDBIiYUDgSoSC7E9B1TYyrfzWsIcAypcSciFgYEoGgexCFCHL6ZBeHMNXzc5yf5rA3u93v7m7/6c2bb/b7fc6CdC3ir8vmtUgaUzdCZhKSAemBkUgH6AFECHLtHV+pXOjhMBI+GSIAwUybmZpV681cNYr52vtqWsGaqjpokKEYJZPZeQIUDAJA9EBwNENt6PopdyDCa2sdgOxqfH91BQ+toCECOriHmzV14nbaym4uc5qvCmLivOQsspum/ZwP++l2v9zfHiDi4fnl5w8f//LXv1/WNb28yLqVUk+ny9P5vG6buW21nC6XU9nOpW/mGggI4z5x9x4xZA8GaCPja/iPEBPhEAzjF8d5dy+1XUpLE9KgykLwsAb58LuOjFEMjzBT9WrWLdSQCOc8gDcQIyU9QogmYTNmIoto5mgeqN2dCHlIoc3pClxDNevmTSOApmk67pZjxOm8/fzh8eLebg9IvMsCDbbSzpdyN0sepCEcWo+vJcqD6RnBxDnlnDMxE0HOOSCnkWlNTCJMwsRIrySdod0J66al9VLqtq7r6fzy9Pzw+PDw8PDrx/ePD0+np+fzaV0vWy2t1abq7uGBAQzIzGMT+uzLGWspu41Jk/f1+eGX9fwIY1gyzosYqrpu27d3N5+U8Aa+QTnHmcMxJMDMau/rpmuJ0mLAw6R0L61vZdu2y1q3rdTa67pu5/M5Ave74/F4c3t7D0D+dEo57Q97SUnd1Pqy7JnZTM0VMCTJ3d3dJHiiCGulbpe1/vTz83nzx5e+W34er4uwAOiggjw/tL//9RHw39xCTb13b+16v3x9DpNpuWWZU56ZszoCz4pTa/rd97/99vvffPv9b7794Z+++/H30+5mrbZVL81K063ZWlveytata/Rrs/r1/cV4dWs76lD7MjCTJER07WCNvEZftVzadp7WU91O9fLS6kVbGRv/oDPnaaF5caTStZStyoUQdjc3y/07Xm5Apk9o/zEVQ+Tjzc39/Zv9/hCAT49P6uvNHZGkrZxfTs8PD8+l9DGQRQaP0EGwGskOTIh8/XEeiBZAI8IZQXs5bevD4/s/nk5nlt3u8G6ZE6dbCANAH2mzlBBiCO2ZaDTm3GNda3wy3xEik5DkxMRCgp/c5PAPX9cuQFz/M65z1THUM4Th2RqDfkRCEkRAgvGYAeDT89nhw9ajmU05z/O8rtuvHz7+/MuHp6fzfnfI0+zhy/7mePv25u5tWm7zsuQshG7WI0IIvD3Xy9MWL2EhkvyVkgUABEREgIEjnWHMH7+o+SNiMJFEEpMQCRESM6Ew2iR+mAhCy7a66bZtqkKkjKgWk+A0pXnKANDUV9BKPRd17hPTkAd2jaJ+3ux06d36MvM8ARKwIDJ42Fo2R163Wmsb0t9Sams6MC/E2FpTM0CSxHmeb26P8zK/ukT9m2++SemzjkSArhB5/CS1CCIUFEL2kQmrHn3ADD3cQa4h8GA+NGlIJMwBPOI/ITwcQtHCTBXdTTgYPVCFTSQQUZgSXfcKATEjHfbIq6PAw0fCvQgJ89BAMCMIkocLpEwinxtkGJHddxh3Kb3bLd8cDu/2+0POEzONKfp1mA+Ece30X/81vA5MfK35A8dEyxHiWlONWfR15Se88tpHGwDMrZmW3pqqalSzMXFXDHNTRCXuDJ2yUQ5Mo1HPw/zkFqagCl+kc0WAmTmARvDXx2c3V4AIwAA3G/Se0trpss45z3lm5jmnnPOym3fTtCQ5LtP9zf7+Zn9/uzd3ICxdH59P/fn89HKqrV/W7bxt57UM/Ett9XRZL70VjQ4DSkmJaXA5zKObj3FR4DBCXFGDIsQkYTFn+XQiM/OXtT6e1kmDiP3Vpz/aKB7XcIjBwKQAtahdu5l7MCMBBSMRAWCE9W6ImJgyMyGqRW3qgKR6PdONcBoPJm7aicjct9rMHa+iyGBCJojw1nwrNYkIgRD0ruetntYUA4SsWmvdarevWn54XcACmUiYJSXh63iKRVgEkQJJI3pXc3c1U9Ou3bX1trZyWbfTaT29vLw8Pj0/Pj0+PT49PT4+PZ5Ol+2ytaqm/ikEBJBeh6D4eoC4nkRhHNchXnEy4NbLdmmlvEpBhkvF3dVqDe2fDpcOfvHLyU8ZXTAFhrN26T1pTB0gHKBXKqrnrTy9nF9eLud1K6Wq6lbauhYmOew55zxNk/vVOK+qxMxMxHlZJkQsm0YYgBPJlFOWETnLSJObr8Wbb6V+FHlyVcaYJjzsp9vbOyHZtrKt67pt7jFNOTFxmJuPheerjf+yGgCTHO+/+d1/4jnv30Q6NvV//df/+O33vzke38z7mzTtDCSQgBxYQITACYQgEfVAjaup49NJ/9MG5kg6SD5whXIQUAZkgISSJe9p/2Yy1V5rufRaeqvuRoSSZMpzyjPnFEhdtZVaLhcAXI57WXaRd4bTZ/9rRFdNKf3w42/evn1XSv3p55/+2//nT4G7//x//T+Ob96tpV5KP1/qttaxy74uTGNtuq4T42fRcJhGmAUEZ5Fetr/98svTx3//9ac/9Lbu9ne6PWyXk8zfkrxxc8kyLTvv5t6G8/g6k8cAioiRf3nd+Hlw45mnLFPmOUOe9p9W5fhU779W/ePWg6u2+fWR8jBDI7Cru5eLSXMgDGZk9LJe/v2vP00fXwx4tz/8+P07Yf7p7z//6d/+/XxpzHmel2W3APHN3bd3d9/f3XzDy0GmeUrCkoEzhmN/hvKhXp7ZTmEgeUZkHI7ATwHpQ7g5zktXde9XNxoRTdOUUiLC8IBwoGDB25v9P/3wLgk+Pjw8n04v51JrmWaccwpwEcnLtMwTInXzLbC0Lue1u+0SZ2Ek7hqXTc9rW6t5eBIGD2FMmdIkjvB8Plf1tVnV0CCzvl1OtbbEwkkIRESWw4Gnvciy3x/v39zuD7skIyAmfvzxx5zz9SoABSkjTygZWQBf9f5EJAhofTB8AmyYUQfTZux9AX3gYIVEMGWP0NrClBwiwDE8HEwhwIEDQSOcMe8WjBACEsBEAWbs3COBYw91NQt18CGbu85YQ10pSJhJGB0go3dK6bMGTpjudvN3N8fvcnp3PN7sdnPKTIQB15Z6xNAaX2kq9EnTB4DAgAJEEHQFmqM7xiC4qENcu+3X/D64duTMrfdee6/aSm9Nu1uYG7jRtXsdlqjnXF2LCIBgEFsgaDhclTym4TZmB1/u/TDcwMM+FDGUrqrm5vrKX3cId29dXy4XYZ7nZV6mfd4djru7m+M+p4xwyOl+mY85J8KIQMRl2f3w/W+C3v/xz3/5+6/vH86X0jsMsySEufs1f/bKsBLiSSQTkoO5q4NGBI7J37DoESGmJAtiSny73wlfn31VezitPz+8LDtlTu423rzhcr5q+SOAIEua8wzA1bTrUPmBuV8/KEAMHCmnTChECKhdVwBqbQABxvLnAOFASPm6H2NpiuAIuJYt3Iig1kYIAXHZVmb2MKIwj7W098/r/5e9P3mWLUnzBKFvUNVzjpnd8Q3uER6RkRGZkZlVmVUtQi2A3iAgLWxZs2gRFmzY8rd0s2HHP8GmBBGgBFpopLqqyaLIpDIyBneP5/7eu4MNZ1D9Bhaqx649d88sYAlh4v7cn9177Zqdo6rf9Bv242KmKnlZltfPR1mVLutidAOrCYuZE4aUUgzLUkQki+syl1KWXMZlnqc8L8s8zfOpsivmaZlP83QYx+N+PB6O4+EwjeOyVM3UrKJq4I5Ol9YN4GeoWVNxqMelA4CRoTfNzIoXMQdqLSwGqCRyIYq7bXd399LtU5ejHg/+vCVPsScij04pxJiMMk5Fl6xFZ1meT/tvPnx8fDjO86LFAamIl6VgotqnBTMrusyzmj98fN/1fS4lxODWO2DOU14WcEUglVwU5mkuRTh0iUMVSjiNBSEjeBc4ckrh+vWrL1LsPj48qOxzRnVDiGAmVlQ8FxD9ZEwc5uyIECiE7urudSzGpwWK2k9/+vO7+9cxbYCjKtaUtXIukBDJKAAJYAAUQJLaka5Rv2KB6z6tPnBNanK9IUhUU14MRjVJM8GSQ8lRBKqpeOAQInOonM5gGrOkzWIAcUgYuBg4viDIEZCIYkqb7ZaIP3z4+qsvv/rw/kvE4d3XN+MyLcbTXJY5lyJEirUNWed/bRlARcO7WSXCgZmIpsToveTjh9//9rR/F7D02zhsCHA6Pn0tcOJ0KkJlOblnXzX0wFuyi3XbriXTeQ3W0URRZYWo9H1lNXewtaJrHxHOE5TWJFREMVIDRRDFqcBcwN0CUR+IEeY5C4QQB0REgnE6/f3f//1vfvulKHf9NXNEYo4dIc7j03jYDowhkrsjpdAxqsl00OkxjweU0YGSGoeOQ8QA3Gb7WE1NWkFi+h0ubwV8xMh1iGhmoBYCpYR9n4ZN6iLluVvKMk5FTFStKIgSAvd92gx9jBGQRdHF56yRCpmbAhEU8XnROWtWQAcRA4Chj7tNl/rkgOM4n6Y8ZS0GQMFNU6BIqe86DsGRMOgszgU4bPrNJqWUYuq6wIHcLV3YJQMAGbBiEGCqM83GJ0QgNy9ZSpHG2K9KUALuRoHAgBTQgQkZkBAVAdCtus2bu1ilIdQLagCK5gScmBwDOTNQ5Fq7ojuVKgBo6tDaPQgOtSfuULs/DBSADdHIAgV+0YYMRDdd92a7edN3d9vtros9Y1hH+wYm1XqF3ar1sVe1zfMtrTRGqtZmFc5g5mqmJm3Ztolv64vUnlyRkkWySlEtblWCkoiqC7A5zGYj2MyYiQmQzbFBG51q60vVReBijVVwwfpr2o1qJ5C5gmFrdbZyT1XnnI/j9LTfDynuhp4Id0N/PXQkGhlcdZqmcTkdl/mbh8NhKmKU1Z6Px49PT4/jVNRiiAHrynCrlEKsrDQg4sit6K7Yx0Y+xCZZAVADbdjEsKV0vXkJ/Oo+LXKcimKJEWrzrP50lSar/TQMJEQCEQGLk3njnpk07iOB01ppAwEENABxcnHAtTcDvrIiiQlNCRFUoayE72UpqoZgVVERAZalIEvVRCYgN52WnAuqSsnLPM+H0yTrrXHXvMx5GhFwHunjx49gef+0CcQ5SzX4LLnMNdaP4zQu07xM0zydpmXl5s15nuZlPE3zNOV5FilmBlBFj6qyd12GL2dkTRBx/dt34IZWjeCQmDCG2HcppY44IJBV7CRh34fd1fb167uUWuA3sIzzQmPHkQM0hx9CRGDDyADsNuex7PfHx/3h8XSaXAGcAYAAI4dADGZ5WcbjMecyjSdRfQqUus4BUkqMDoDH43GZJnQFw3E8ktk8TwC0u7rCGNRsyXmZRikZXYv4NMnxmI/7pe9JhRAiYeeoYFRpzKX4nHUpn5zKoQggeK7EKKTN7vZnv/gzVR+2V8idOrqYGTWZy6YF4g25D4DujSC4ajQTgIG9gDupQYPq8eBVIdSrfm/D3Li7Y7RAyF1su7ZmpCjnzewMIYVhcHBnUDAHW6FtAJU21vcpJSny+Pjw61///TfffLsZyEG/+u3fvHv3Tdq8pTiYeyDA6udoVlUYvRapzaBHTcVMwE2llJxTF/O8Mz19fHiXWH7xJ7+8v7sBpKfn069/8/75434uH0vhYtm1gC3oSo4AaIQG9X+pdib8gh+KSEzAAaeIS0fH8SQXd2alKFSy7rr5a9q/0kutxnsDMRCAJdtxktOcE1sg3G3i9W5D/XW/vdldXaUujuPp699//e//5t9/+dXHkHa760HK3qalh3Dcfzzs3+9u3n7+s7+6efOTbthuiNLVHSMseZTpiPOMujixGnRdpTUScTW/bI1gQHDxCt2/zC6xZuSMITJWz2yDGGC7IWbYHx4D6jwvjL7bbUIJcz4uOS+Lax+6bdwOm003dGFiMTMiZ3JCRTBTAFGTGqOQq8whAVzvNrc3u5C6OVs2H7OM01TUgEJM3Zs3t5uuG/rOHY5Lfj4uk4CCqftxmkSWcTxttwMHUi2vX78+VzDojgUoG00Kis7ojI5YA4sVyLPkXJiIgZjIwLUoGCAiA0Vjc0MxRMVggYADORCAiribBcSYohMWQlU1NEdg8FqHIXIgJGS3COgGUhFEUJVNCePCaE7ooar7VeB2gOjIjsoV57dW/IjXKb7q+/vNcNulgTC6hzarMDNdXI2cLITg4OgOgWBtpFYnIqBWR0Er583UVKwtgAqQWU9ddHc1K6bFpKgpggcCYnQM7KLGKpLLflmechkhFPJozmKOYhUpXBu3aqDiouc2LCJ2fR/W9VahiNUy6nzyewsI57/7kpeHp8fgvkvh9dU2Mg5dMsxaysPzYVrmw3T88Lz/+sPTWLTbbI+n8fl4FNFQ0RYcCKCqRxs4EnbNmYlqyq7qdBYUgNYSXRmEyAiJaduluy5ebTfM5xqmehESIAGsvsTrOVl7BUCMXSdxGEPvpgFzRCVgXIfYtZy3NdlRDgXM6tiyKv81DLa7qQMSMRKHFN2sSFarChmeRbM2y9PqDyBq1TyBiCJDYohk1btoLjLOeVrKedJnItP+4fT0AVL/nMfj8/v/UFWGxczQTEt19a6+3kuWLKVYES1SVES1TbFMRaSYCaHHwF6DjJth7UvZyzB0jf4IQF5nzlClAtDREY3QkYp5JB5i3A7D7fWu6ztHELU8FwdLKVzfbF6/ef3Z21cxpvNt8aAeM4TizFbLw6q72FkgxEBw8uNy3J/2uSyBqe+3TFHE1VyjITKong7H6TiWUsbjaO5gGlJCQg48Hp8BIOfSGBZanh8+uGrJy7DZvnn72ebqRsxP0/j08OF03JvMuizPz6fjfnp6OGy22912yxyq/LSau6moL8XGWaZFLiewQcQBTE0RDBk59Le3Sc0d2B1Ua6umJq2XF7Zud4Mma9j+qY2S9cvrN38yufb1rKjYt5eQVpuGdTqO2Mi1dYzZdjchIwG4QXFo8trn10bEEIKpPjw8LMuyPzwD6N3dtaq/+/ZxejzErsTuKvWJAkLtPGmVCNPK4a3U7apDaK5uZlJKKbELhAvhYpr7bX9//+bNm9cGDvSwe//4+LCfj8d5BmIkNLJM4FAlyRwNyLAZa9bP6gCryTgKArJrQTRe8nLZHq8es+vDweHsYEwVRgWACIZsBmpQzIpYERcBRiPCoe+ur3dxd7e7eXV/f19k+d3vfv27L7/+6quvn56mm/ub2O3mycAKczItp+NDnscQeylzt71VkX53S0SOwYHdXEsByoiBOXJsqgarHi+tiky106/ngYS7lZLnZUpzIsIQgruLFABUD6XY4TCClTJnNXRKRJXxaKWIiDLzZrO5vbu73svHZS9FUT049ZwiUcU3oaupiyiDpS7udpvbm+ur7XYxFy2LNEU/B4gchy7eXm82wxCZRE0QZvFusaxipSK0s5kAGjOJ5Gma9JyQGUAxmBVAyJFSQKLaUDRzKZpnKVkwBGZCJHSz4qAO5ERYtXRdHdFABBkRrFXTYGqGjIGDMwoYOGDVkxRxw3qsMwERkqE7uVEdoRFRTBEsVOJKDBxj4D5hQg+K5AgV2u10sS8JoGfcBN7GOMTYEQckqjRxc1HNVgQd1YKZMyQG5xpTjNHUjHTNu5tdgpivO8jM3akGLF+XgWnRkqVkKaIi9XSpI28iV19ET8vyPJ32RZZu4+BsdehfeTXraWFm8klXiYj6vu+Z3cEAFeET3dg1cGJVTkJgphhTIDJRFWHkLqYhpi6EWeS4LN++//Dh8fHj49OH/f798ym7b7Y7Mx3nxQEDsmHVGmqicQqOAGE96syt2UWubb/zG/H1OUbqOGxTuh76bdfzJ3Ldaz+lweWgIabaGYtOsYRuDvEEiAY7xwBraVEzbsT1D3AAY8qMBlXDydi1nVEVncvcbzZ9l7oYSslzeVYXIAYAdawdCyaKITJiUTE3qykMQ+saNOU+b8yv9cq7qeVJp0MWEYSS53Ecn57281yIEgCsZklqqib10AV1d7DzYcgGUIVfa5VRxWCglp+GcIk3OqvhADTO5Kdd1LrTEAyQKGz7ze3u6v5ml1LIXopoF5gYh026utpc7TZdd+GfgkDBOAjyUru4DmjG4A4kHhyBnG3K07TM7p5Cd7W5CpTmnE0dN4GIEUHVcy4qGVzBXcviJrU7MSMCgKoxc9f1jl5yNlUi2tBVSv2wvTJgjF3OxdxkoVl0WYqVUnIpRVJMm00IIRChmioQeHAKCmyX6l0AQZrSch0y1sOt2odVae+KSWnLydtOd18hs5WvWzd/W23Q9jsZfsrohjUlcIDGZF2xhi+pQpV0wBUK1DCDbWB67tU5IQSiwPQS+hEA4HQ6/frXv3b3GMLuzevXr67zUg7H8XQcH56/NuiG3RBiqNS/WpuCtfOq+nese8fd6qDTxf30fOoHu77e3d5ukbgYhMDDZnj79m7JZTo9yDIzYgBjz+hWkJ3YvOLLuZ1u5wtvL+/ZzQ1Q1c1eZpb1htT5nKqr6XkqZRVKxlj7BkruAOaWTcQEkYiiOyCmru+vb663r1/dv/n88zdvP3z48G//7fOXX35z3J9UHMn7TZf6NwAewCVPmotK+fj1r45P77fb+zKO/e6ebl9zfw+b17CPkGuSpE1+uQahqrzXzlY3b5KoZ30VVT2djo+PTyqW89L3nYMveQkM7p303AW1Mp8OB1UP3SZ1KXYhBXJdpJg79cPm8x/96GnCLz/sx3mCPkUcrvvNJiUTPcz5VCZVmadT39Ht7d3bt6/v7+77IY7P+3GepmXKMgNYl9LN9W673Q59AtBxmouaOiNzijF1ZGBESg6ALlJUofL6z2vY3W0RH0uLHTEgBSdAB1UtWcoiko3NOYKjm4MWBXKjwIEjBAdVcxP1gmaOrqhKBuzG7laNiqqmNXsgVjCZsi2GHoQDaDKqzHFUZXMjAGLu+i44LKJcLIQUu8RD8giZZm/dXK0D7oucvFpHG7sH4ECBMVQvbQMQ8GyWTd2FWS26RTBPDgRMaFq77yrV5qNuXLNGKWxlgqpUUICBK1jWMsmylKVoKZqLFQFwEkQmjIuU59PxYf/8eHg+GkDoyM1MXAmq/yfUpKSllr52wqB2+7phN2xMNJsuTWq9BYEqgVVpiPXQSClt+iEQaS7Xu93b16/fvHq17TdEXBw+HE7/7je//fWXXz097o/zsig4Y9gfElMAjCGAq0jVkbM2dagtTnSqricV0gNIxODoCq0KR/Qm7AQBaQhxm9I2dUNMhJeBv0LMCHCdDLSDsokmC9IR6Qg+lRxFNwYAVca3wbfRXsZTDmgcSkwGWESClWDE4BXXV5zSMLx6+9l2s2W04/GwP022FMKASF7dMBACc5c6IoSSQcTErZo7Uu3kogODsznaRXQhgIQeTMp0rOlILvq8P42nHNOGOazoBXYkYgQCcmQC4LMmlENxL8Utmxevoj91CgqITrQGNDzHJwcH0Gb/2i4K1Yq/KhW5oXsKdHt19fru7u56APZjFg4cd9u+77e7IUYylWWezvklIcTggQ1hQVc0QGO0oOaixR0IScCWIkWMKPZhezXcMAawkwfc7jap74i46u2cTidCFBHi4OBlVY6qaF0mrkaykhcAT7ETkWleaFwo9Y6c+mGwIowmmRnRabMZdrtt16fURXA3IBMqBqRo6ClriD1eJJehGtxVCBlS1YeqVGW0lUpmaKtG5jlutxiM4NQMMdHwU9YAXv7fp+PrH3isMzn376ZpZ6pAZQaDA0BA7AKlwBf8JFDVcRynZYkx3t3f393f3r26Xqbl5uHw/LR8/LCfFxDZhhQAqsWbQU1oV5h6AyjWLsV5SCQ6TSWGcPVqd3d/t7263mx3XZdiinmZnp+PHN6jT9GpQ+8wO/gJqQABBABEZwRqG7I2xG2d1jsatBT0OzxLa0wkQDQRc7dVesWIsBF8ENSwDvjBVMzOPXd1qI3t3fX17f3d9e3V09PD0+P+4/unZS7IkZliSsgJgciFQzKDeTrmZTSzGDc5l2laUlqolOBapYHRcaVI2WU142tLTque9oUotJnN8zyOYwyNwYtMVZVsWoq7LqRWyjQWd0+Yq4U4ugVUETej7W774z+6m3H46uNezVRknmfd7epdM5FlniXnEOj29vqPfvaTn/3s85vba/IiYpKzuwZGpNilGJnQdZ5HNStFFBC4ryuOACIjIVXTNhFxtyK5lPKi3uWA6lgAC5BiAAZgteoB6prFF/GiDuxoRtQAH2auAgCMdTLmCt4KJ1Nz51bftUWBiATOFTmh5BfEtCrKIWpQzKuPHVcJIgKAEIO6IZI7ruIua8w3q2P1i0TczYpqEZUsMosEyG5aCMG8WJklZ1VTI2bTteFbm/31jqMRVIxO26BeHSXd1E2rwRk4IBlYAc1SZlmyZpHV3MmrHDg55Kdp+eb56f1hP+asHLhOCL0Oyeuh7Qat8WtW/ILHj1WeOUaKMYhonqpWA6wC3g1WW9kIzJXcQABANHT9q7u71/f3m75X06WUx+Pxyw8ff/v+43iYRQ2IkUlEPXLougrZNVBVFTMFa4F/bXjWwrQy6ZGI6jzZsVUua2GB7lUKKVQ1pIu9z0SBqxPEuUqrkE3HOstPKQ2bRJSniU15dbsAAMfLIxigfmoOFJNDrauokTABiTAl3mx319e3/TCUeQIciSNxZGJAcjdERKYUQup6RFdzsYpWdoXqC952Rl0bcJHBuLuVLMso4o5EIUnRnHXJ5qjB67pdEbJEtcYkBo4ICKqmborV55TcuXJgvFUVhC1Y+Dp3hqpvXCPTuRe9XkFEAAJnAETYEA4hJK5CKtX620OkGAMziep4PB2PJ5XLZeaEhi64DqMRzamqaBABE7M5AFCfhl1/dXNzw8BqqmbD0Pd9T8TWWZ9i36UuxlIKVCehanBaDUlMEYkJa31bu+Eq5XDYZydOg6NLzq4G4IGw7yKmsNsNm23fJY6x8o3YzaLHrAac1LgbdkQvkLhQqoGuO9R+NNYzsJXW5lA1wNWgjquqEri7VwgfNrVyNDxjz9bOGrzkqp8EcryAqvnFt7+QLsDbMbhuJ6icgDrUQ3YMhH2MfXghJptZXvI4TdM0bXa7frO5vr3bbncp5tev5+O+fPv+KKepLO6Wzg3Il/fx8u7X59bWmblZUYfNbnv1+vXbt59/fnt3N/Td8bifxzF17w3UdI4UNuhbUgXPaAWgqiQ2CjIgQPWGxHMrzx0RnLwqvBq+8JIrnwoMHMxFVc2orm5vgjYMQFjBfV6qyOl6QrhbET2O8ziXakVjrtM07veH0+kEADHEEGLg5LSpKsCUNh4Sph7Hfep2N5/9fHP/Rda43x/i9PUwvoumHBLWOsC8jUXUmMjc0MGrwHouNVz7RfdCqzqXqpmpaGTuhw0RqNs4C7qAOWLHDEihlDJOU0eGmyhbLIpps3v9+ifd3WdPs2AMv//tb98/PfYx5rK4yPNhenh4mt1v769/8Yuf/bN//lc//8l9svn0/KDqbt4FDsSBAwCWeR5Pp7lkAwip59hRkFxgWRbJRhQrCd0NRLWUvCzTsiznyhId2Sk4B4gMAZ3QvGQzMyzus+IiQZQxAJEzAkHFbKFJO2nXIwgNxFAVwQFDu3HUkB8UAdkdkY2AQydRzStNE4uq5QLiAShyY/qYawEwRHWUoiSgaGTg0QGxGGTRYymjvFCtzC1Lnsp0zDEiqZU5hIE5ETC4gS2mFWmJgqJFQbxlJYQAAVTBCYTOxy4BNFSgZhU19VXlWsAWkKJSC/3q5TzleVZVoEXtlO396fS7p6f3Sy4cQtcHjmePBHMHFVAHEDBVFbfiXl7OFQdTJcDtZoiqky2gXoWpiKqkQ51CuYGpQDbLSw7VVZn59u72/u6+Jzidjqdx3p9Oh3leFEI/JKeKxCT2ELD2YAraAqLgCrYaELYmIYCDQQwcmWMIiGjm0QkN7YyecEcEUa0akaXOPdaPgkQphC6GFDgEbtuounEqImIMgbebq7u7GfDR1CR3bry2BM+Haz3iiJA5xBD6gGIW0BgrwQFUNYSw3Q7Xu12Xkrsfj6fj6YSIXUpwRnARhRhSjLFLrgqYHZCaRyCaV0PWKquGzMQXaYyZTuPpeNg7MHFkBVMjjjEiERMTh4AN+1UxpeauATwggbtK0aKLuKo2A8g6FbaaZHgd6/iqG0QNcYcE1W657hk420kxQgBjhIi4QcCSx/G0GAGDQgFC8yWLHsajSDmdTtvrV0XXwO8O4l7dtJ0ICZCcmJwgAngIOKQuB4oppN1wdbe7vX99h4aTjMu0YLWyUAshbq9vb66vj7urnJfqTFakVEaiqopUh1400xxiNZSSMj89fID9ETgCE6OSF/SMLldXm8Rhu9sMQwwRiZwrP5DYkLJT6IXDZnf1isMZBgNBrN4hgCo3Q83eyx3Oo31boXuXFT/WzJTImHz92qXRzPqg77lPvTzwe3/9jzcHEBipC7TpuuFTxHVdPypCSLvd1c3t3Xa3mU4nQnJXhEIoUHuUF0oTiLDqp7dexvnV1qGZAlRiJXX9cHd/d3d/T4jjeJrnZRxPqot7bjSm1jFZE3RbswukZkVR2VKtQ14Z3gEd/FMtZfN1vI9VsctWiTYjhFrVEQMZZjVEiE01VwjUwVRlnpelKHFE5HEcn/eH5/1pHLM6JKrEFSZKHPvQEaKFNMRuF9I2dptud5+GKwodQoayh7Jnd6aIzEjUOHuiGpSZHBGr4rwsy7KUUlTlch1489bSUgoTUQixD8yUS5ZimhcwTQQpEBsAuBQlMpFYxOds4piGzevu6pd/8ReL+H7//PD+w8fT8bRMrjovRdyvb27+9E//5J/+5S//+Od//Oq6G5++FQURU/E6uwIHNctLnnMZl0UBYnJOzsFFScUIMYSAgGZFzavbuYrohWQvQG1yYU3STBUKmoupYQHLilnQnKKTOaxYmtoIb7UetJZMa6spEAJGInSucLiaBHp1kgV2DJwsttmJI6i51WKGiIgCOQVUMDE3RAMsRREM2MJKw3NHccxquTVqAAAMYHE7qEQtUpbRZVDaAHTo0RVcFU3NiqgDBopD2VSyIDPVFbwudQMFBEerPUAT06xZVBpaFqy4ZTRxNVWRUkQWkSnnQy4n9UOW50U+zsv7XI5AFntOAyFXgjmAVM80cAUQMKmCjGDlfLC4meRsUmK4ohj6OWVVq+y5FVNczyhxdFJxV/PEnHa7GFOIARByycfT8fnx8Xg4gHkXI0EgQDYgd4BaWLu6K7x4HLdDci3lm2cTQmROgSuhMVaLSDcBhWYshe4u6vU6LJUJAgAABBCYYggxUAhkF4G/coMq56RLfQYH4kzQlJ+rzy29LDoAZ+KQIoaoxAJO6LGpW1VwNcTAhLAss6qdTqc8L4SUYnyZoqyDknY2EjEzVXBgFRGv1QhYCBxjDGd+AoCZTdM0TlMIKRi4kYlxVfEAa6VNq/cqKEQD6UCwQwTXoxaoJo/odaQJYOi1f+9YwZLohg36sE5Vsdb0AZ0rRc+AAYk4IiTASBCYIKBJHicUQWcgrvvJESssqZxO09vjrPoSvMxQBTWwElGDXjZdSzOQopLV1AkppTjs0vYuocNmDg6Le9FS8SGpTwmZAbGUVN0Jmq2hYuuYioODquQum2azXMyzFMnqyMiEAQmNQAJhNwwpxtSnWG1bEZCIOYaYHKNDUNQoIXQ7vCDBBXN0h6oXW41dmy/CSwBEX0Gpa+fEK7s3EKfg7Rgx9+ZeV9ddy8QA7RxhX+r5y/b9S8R1AKDWOjo/fe4k1qAJBBCI+xh3Q7/p+zP4AhFDDCnFnGOXupvrm7vbu82mX6bl6enp4eGD29Il7FIKsas0gxUHsyY6bQh6fif1Hao3JDPM8yxSdpvtbrvZP+8fHx5/97vf/f7rr8pyQirZ7GhWwAxgIZTaQqqXBQHbCqwfEbCKtRIhBoRU9S8uq2Szav1uWFteqkZ1GGGMKFS1KEnBCwkBMtZBy4Kgbm4asoioIpKa7fdP375/eD7M4+JMYAAiZgIxhZD6bmBmhp5UddhNWGGUbrvtdqDgM6RJA3ktP4HJwFVVVFiZFAnJRaTkZZnneV6WuZTyEiyxtTJr4EdECiGpVry3ik3jYlIKk3YBEFOkPg2JHQCXYodpeTyOaX/or+7/+E9/qRS/ffg45vJwPMiygFlI3fb2/he//NP/3n/63/2zX/7x/U3nMhaBJWspvmSbl1LEzBcDqBMQhSBqy2nxkyAtxIm579MQU+duy1zUnBECAcMnTrYObZuKKolYdndQV1eggl4UxQCc12Vc5zUVS+TgzascQRUykwKYAYVqyekGYIyhWtE4iLmZomDkCIGziboboQJAIEBjQCYMTEggIOLuSAZYxMCMIlAEjgyEiNXkBj9RVAQ8AT8iFdOjl8F0MBhU+1KCzGxCQc21qKkDOu2GK0djwhQ5EVqjJbI7AahWsjaYoxUT0ZKlVIvSJefsqlwVfEHVithS7LjIh2l+Ny4fF92Lj4AlbS0kSD2G6M4mSuAA4ggAhqYACi5g4iog5czoM9NlGudx1Nvb0KXtsHWgqVoumLl5tVsEAFRQdHcXhyq/3w39vOTH5ydYpocP7z98825+fr6OEa52S1EVQ3VXNxEzR+J6AyuyARHqKWcX9QoTRKLEFIlbKoBEqKpOiMqVSUfOvACcigZYjnk5A0jrmCAwB+ZA3MJ4lagnQsKYUiLuVYNZdp+QnAncqenftoDdzgqOKXaJAwOKuyMDVf1AD4GZyFRPp8NpnMx0mavUOyCHxt1eBU5cLVuuk9BYC0esjtXnrzshmmiX0tkPwszmpcxL3lAwdhdRMQInNDU1UQStlnQA5u5MvmO4DXhPjm6PIHvUEGkhAxECJTMGJwAGQHCkqloKCMAAASEAMVIAD+iJMBIGRHYISEyhI+rRmdCYDkhfa36eZCrBGELDpnENBaXIsuTxhZkI5iTGReJSQtXkIgL0SjaVvJTled5/fC7T4qqIylHDrjDTMGJRXw6TSujiUJlHWAlujAwhUGDAariiZlIq3IvczayYZdcpS87ZxBwxEDfxajeuKSa3wZKvk2t0CAA9YHJnAxU38WgXWgeVvwNnEjlZu9dnNJ9VCt5a7p8fBHSu+NWhkmV/qGBHWIE50EaY7Vlsgb9mGudOTgvy/vKL2jP1aSJMgboYhq7r4kvFj4QhhJRS15W+7zf9MPRD13XMtCzjPB1NZmyK6VZ75vWD2frrmwBaywcqJtRqiq8uZXGVOu51N8vzPI7H4+EwjUfXxaGOgr2AOYAaWavq6mgPzNejB6tKRpVWI8LESETddwZ9ZiZmooYIYqZqeF6DhGRVaBAUsQgSYCA3M0YNKAXIzEopeZlLnufx9Pjw9PR0WLIbREJUhWk8Ej0NFClG9y1AcAAkjGljpnmZl/kIXpgcCBgblRsr5qjqwtein7iOxEtFljYXrPwCum4GKlJKqTeLQ4hL5uavbape3aer9IEbDz0hoQEWw6noYZrT4XjbXd3c3n/2+Y9/9OOfvP/24/P+8HyawP02be7fvPmjP/7jP/75z99+9pmV43iUcSrTojm7KJqSiFX1NKl2kIZiKLkqCkqMvr0aYogpRndzCWhiBk7E+AnYGmrsryKsKiDmtZ5UMEEQQ3PEhsZuKDkHWseNthbyxSEjKIA7EgQEZsZQN5+DqaEomaEzO8Z6iCMSgCEQAyRCpYAckahuX7c6WEZGNfemp4yr4I4TEFx4JQOAA2XuxjBA6CTEhXFy7x07tOAcTFnN3cRU1CTLQUwpAHIKsScO3CEwETW/dKuOcWpgInkp0zTPyzIueSlSFAFiQGI0NEdTF/Osdirycc7vi0+UNHbYD5Q65ETUdgg7EBgaYDUBAgO3ikk8z8UAgBBT4K522EPoYleKZrFscu6c1cXrbW81b4FuGGKMp2n6+Phg0+nw/KR5GZjebLeJwsf9YdSi5qamIjFQF2PHgOSMWMSXinJ7GThURXoeutTHyMyi68Ha/BpAK88QyREXtcOylMWeTuMllbcBEUI4q8cAIYI7AxKFEBgdykJqHQLGoI5Qswk4C2nVNpMzh8BcNUSNUAM7xbaO69DQXUpRzdVPt3q9V6QXINSpcAU9V/2AZhVWS31sOHqsakExSoyBX7R03NGgmrTVHBGJKKUA4EXEwRlXuSEEQOzQB4IevTNBlcGtkAsD16mZIjsGwOBYuy9VRBrWwM+AAZCAAkBAS+SRINS0FykQJaQOAQkL0+ygqrPa5GCKEasmSYWqmIgtxeUTjShwJ3NWZzKq9gHVWs3BVco45nE8qQkzxIQYZbE9ggueMErqmTR0IXYpMFO9jxUVQgiBAjooAYqC18OdERGxAy/uQSSXXNS8uTBjPVcVAFrMp0tidU2NAkAAYAcwDN9xTQ21D6pO7g6KXvEa549adZ3Wbn+VXFubT8hISkRORI7afhDrOj3P5i+wJrjeY6qKYC93vOJgzdzVK84N16205gVQlcScASJTiiGFFDicLZNrphxT7HXo+44DEyIjEFbet6jMpQCHDkDdV+FrXzH8blCdU8EBq+ZoBUKripQ8B7pH/JwQxuOBEMbxZCIxcEqMIO7FkbRh+mrOBYhEddO5SynuCuhEzAEBq+0mEXGImLqYLjLlVvGLigoia8V8AgIAEQI2zA86qDmqkUNgd9PIkAKKganlZZnH42n/iABPD0/HwwzYxe6KIIuU/dP7eTle6WIgyBSi5WV0k8DBVOflpGhXhw9xE6O6IasZmlbDxHY/VlqEA9SWeO3/5yVLeelemFnOJeclhDZ6yssCRJXX564hJKyvsKiWSUplOMeYgkDIjmOWcDxiOnTpJmJ8++rzt2+/+frrd0WfwDF12x/9+Isf/fiLrh9ykeP+eHw6HI/TaSpLcTdm7iOoomgp47KoGjCDkUNQs5yLuWzckTAQE1IYuhKg5OwaAvF3Yz8hkHtVSHMDo5U0rWCKCNQsz7Dyi7WB5JG9ZUyimkUXN3EHRMJoStiFQGQApuqqVgoCphSJGLO4GlsrLAkhxECJAge0WolW5AtHTqWYw6QOUNc+ECKSOwG1TOQcpYg0bXW4tb7XmDITgLupigTJLAvo7JoNJC/LlI88LaM8FPFtN2xj12EgZkZiREBmczEDBTc1XZbpeDg8P+8fclmYMXQp8YYgulZlLKqAFUUoiBpD2t5At4PUA0dwRHR2ZzB2pdqTdHvR4EGP4PxCdYMYwuu7uzev7+Om92oUBFx936y1Bdpsr7bZCE3VmKnrEhIej0fIo4x7yMv1dtikOGdNj8/7x4c8nxZzNQCz2A+317u+C6dpOk7zfhKHkrX4yznnFMIwDFd918cA7qaLuoE6WusDUVNyIgWYREpWmKf3z8/5rBWBgAjMxMwXWtG1E1uregLTvBzdIQYIoWsd07Ow8lq2gdXo7M2SFcG6AM4vM0xvLKa6XFdmgdXoV5vGCGCEVMGc7vWbqS75Zg8MUKf+hNz0Zde2A1HshtgNFBOGQM4UkGLoSih5MbXm3o5IhBwomAXJKnkUBdViBoQtUEZktxqeGZABGHw1imyTpmpeQg0uQ3XdC9b8Gwt4dp0cADADPAPOiELoLQzVBdXg/+RKBMDhPI6uJPP6dmm9zpWphYgGnjUXy8QeB+q2AVgent6JLKfDEqC7e/124GuyxJBi6ByJFItKkXKm3Wg1wCvqhkzOHKrnJ4LHGFKytXYGpIYDA68qmWZgjl5HsYiIK2RxfYO1AniJxUErsH1NTA2Aaj+6SlIh1Jq3CvhU16Gmirx27BEuXu+HHxdRvC7eleWG6wUFR6uKLHaWBPnkJbx24NECcarW6fhdcfsa+6t/VJ7nw/55nuj56XGajqVMqpMZoi9UGR9oSI2CuoIZFCpfzapZnrq5rx7RphuVPI/jw8eP8zSfxtPxcJjnSUoxE0QPMVGozqGIq8QmMVffhXk8ldLwElgHVqaVfMtMITBfEBQAGolf1BBczaSSlxAYGBHNzGytKQHIYHEVNfdm0GpmoMXLXJbT6UQPHx+en/ZqWg3a3Eh1lJzzdJq7Q4i71Ju7gIsWkbLMpz0STqfHAXsuE2hxsOqrer7p7ZZ4/SwrfVtXHv93bp830L+I1FyxlIL1nppVLhy4KQMSpEWqzTkVj9l4Kr4/Aj4n2rni9fb61d3rzbDtYg9I2+3V/avX19c3an46TcdxPk15KV6U1IJBdHQDNICiMmddsgApOSOwq5thw2WolLIwAYIReZ2PMyF9Gvcdfd0xil4R3ggG9VCsgKhzJ8mqJQtQfQM1clXZiKIiAERkRg6KFIgIzDWbZfFckJiDo7sV1UUcDMmQayuTmTnG4OoZ1BQJmZzNCVpLA9rbcUQn8HWtfAK2qQaajBgAogEJIrAbGceBtLgWNAGQEucCXZknUnteyvM0XXfjQNwTEDpXQDAU8Ay+qC5QTjrv8+nxtH+fyxy7mGxAUuYOlMyDUVARNXE3QGQO1CXsO+AOKFjFD7kGUPKKoyaoqjO1oWgQALjhVAAAmPlqM+w2G2HMZk2VrUrXrZtqxXoDI3EIXYDr7XbbdYnA8qwO7BIYQ4jKzCBjild9d5qnPGdRdXcO/Oru9nronh6fSH0WZ1FaQc0AAIQxxqvN9rrvGSFLdqg0XSBf0T7esP1mlkWWUsqU95c95YbqbxV/C8UVXQ9YHY3NW8LNgYl4hUACVKHidVhZK+8KvWAAQOQ6O6h5goMZrpUPAEDlhlRkkvv5aK51EDo6Na41EpG7K2ozUoXaJCAmeiG+1xM5BAzJiR25jSoYmZxRXat2ECISM3FkUnXDxb2ogpkiSrVCZyKggM5IXPF66zVfiyWvh0+rTWusoKYnXfvquFLDHbAAjYiZq7kcwQv1gwBr4AcK/J29XzsxKcUY4vqK5mAVssWRuk3c3W480/V9nzperKjOGKSLw93r3TZclxG8ECMBMDEGJQJQVQS0anzZpkZesSA1lWICwOCNRI+wAmkb16faG7h6m/fQ6uNqZgaAF1o7L49QVdXN3Bu5vK2CRlAhN2jYAzUQA1Uw9ZVFuwJcWtrXduYqqdAQ/ufr1oisiEiNv4TutSqpECRDInQxR6jevC+YgPO0IRAEpip/6GaVg3COLegeArvq48PHeT6Z6bfffv3hw7en8VBkRgwxlr6L5tZ2UWV2GoCjiOVi8zIv89E0rxfLCXnYbLue5un08eOHlGI/DPM0vf/2248f3u/3zyISOG43u9RviGPt12KbghEx57zUEFdyFQ0I7tWLs16WdhsvEiivDTZVBUCttqGNoQSASKZYjwJ3Ns3gZlayTyUsasWE3aNLhAJQljx9/Pjx48cPeTkQCodE1JFviRkhyVwmfibE65trIpiOT0uebD4Jh2XcZ166+RHKEcEJW/7bPlpLe1trsXWcW3x7+SRElFLXpa4dFqoiAojI5I3+Xp25LQbm0BnSLOpTKepCkfoMvBQ7qQSfqYt9Ir7b3tztbk/XJ6Jwd3N3tbti5sPxeEIveREHwEhhwNABlVJkUZjbvN/GOYsYOgZKKcSYUuoSuZdlnqeRyWNCABNRNQVsbh/n4FGHzAGMwRCQwNyRbBWoczOvtsKsjoaIIdAZW4a1RjB1rZRHrgZ3BMiAgbyYq1oREAMGF3HVMi+6CKABgUegCvuPGCNBAHMCDUbohmWyIlItcKWYiEUPWAU51EEQDV86cG6kJZSJAQMbcnBmIVRGoECcGCMjIihpHrYjzqc0HRX8OS+742HD1IMmj4RIoG4FdAGZrcy07Hl5xvnJpqdSZrNoNpuVEAaE6BAVeVaTZQHTjmEg9GpSgAIAhoy1zw929rJrCqDQzg9xXP1/28kSiQPAkktxNzIMEJg6ZmNQqMQwNTUECA6bbri+unp1e3vTdRv0DZSbkG52V6B+OI5HySVnYnrz6pXHWN5/LHIq7jHFz+5f3W0Gn/LxOCGTEQRHNVIEI6RAXZ9udrvrrs+yZBMxE9OAlSmBaijuCu6uaI4EWnR0mPOLqhoicuAYOBHHtXpemVbtge2sRQRs8DhfOcl4eYj4Ocy37nBDCdZgj03PCLDNeWkN/C/f1CI9tkBauUmV9ApsePbrcgd35dZzOO8XqIB7c1R3RW+3lTxEBKLaxUUEImRCd8qEpRpYV5GQGviIgUCpabnieqFwNQQEB2OTxieqKRC8HE6XSDKvI1ZWQEMMXrFcVTeOGpsWiRmYqSpPtgfWNCYMw9ClzkxUVUzUSvVA393sQBJK8gK3V1chQTHNS5jHMsTdq9e7Doajz7kJb1GIITKRQyF1c0Ngstql06KASOiMwISBw3rbbMWltfVgYAAKK3EXajcNKwTAHZoZUDXSuIz9lcd/DvwOVf9onUy7gcEKOayBv9b9awvAz2N5wAv8/nfn/JcPXMf+dP6f1i6tjsleGcgG57bCxU8CBsLK86kzZm+NA0DEwJxSJKTANE+nZR7zMj8+fJymiYiur6/6fri9veu6TkUBkUMgovbxzErO8zwfR6gmn2416aW+6+/v725vb0PgvMzPz8/j6TRN0+Pj4zRN4DAM267rdrvbfthy9eZAw4qGRXTEvMyR6XQK4+mompmCAxCFGNPV9e0w7GIaYvrE972KoYgYEJ7L6Zr041nQr+7mBlvWRXzRsChn82gOhpbLeHye7fTum28/fvyYlwlBEZmIGQMRoZvk0VwIdTPEmFJ1j0NssFVQ8zJDmYGgSn2BGVA9iZsJWL1JLyXlp1xeRIwxcAzYKEkmKEiIQO4uIssyiyoRM4C6Z1E1K2Y9EBZLsyIvWTzPOu+XTb/dbnaB+frq+tX9QshX2ysEGsfxNB3BDVCgZJvKnK0YFedsuijMWZdsoiAC01RMlKn0qRvcY2CRTOhW6ZcKgK7uht6asxePtdw39apqia7OWhVm6kln4momi7gAQWjQ3wYtR69CnxQIAOrsw7HVIytl38HdRcuSvUCZs2clBgwIXHt3jWYCiMzoyEgoxUU1F5GiKqZipt6ENsyaH/en9BpyJ3NSJdDKJawSgwAMFIk7JwZwisJdT2ngmCQvj6ZxmeMcwaUQD2jkBTWrTKVMc55O03E8HpblqGgl8EQMjqzGrkTsaEI0A544CoYYYMBkXXKu1UDrTq6qLOc9UZcUoRGs3PeXD4I4cNgwZ7DsFRyBXYzkYITilkFqtZqYr/r+Znf15v7+5mq3jdyj7wJdd+Gq66Z5OR5PD/vTLCYOXdfdXl0txQKHU563fbfp+y4mcBCxLCqqZ1wTIgTCjmng0DEtxauDDGDT2XQHRagYomby4M5EkTnGT/b+WcRj3eIr4srqOf5yLLqZwzrxda8VP17kqS8HZ92KL6262vh9KTr83JitINA19tcfhJUF1YItVIB3/QFf8Stazk+2d1C5PFJVDqsAsTVKjGGdG3tDVblU+ItnokLs4MwUiLlNiCvgHtc6sFbyZ/lyMARBdsRG4lmzg4vrUFkn9dMRAgQkRAi1WK1JeVtzsNaon+wXby+wTtLX2kxADYwC97t083qLitshhQAKvRTKkw5he33bs4R8AssOYuTOhIwMKYXWqQd3D0EZs5CoO0Pt3FPjLzY0YxutrJG+fjqqfX8guhzUg3lTaPZPAT4vgd/BzRqz7fwxazm/rkFZ2fzm4K71ja5YfnKqPkcOUFUmVonZppZ8vguwtlzacw3tVsUYmOsVp5qlKL4sx/UUqNHUzLLkLOTWxmBE1Pfd1XbLzADkbpIlz7PmEohvbm7v7+5vb2+vrq6ZeVkWcIgpMgevaCORnJdpGveH/WbYnE77UjK4Icfrq6vPPvvx9c1NCCnGJCUv83Q4Hk/HIwJd7a63u+12s0ndJqaeORKRV2m3EAFBVaWUm+vdeLp+enqcxpOqImLqus1md31zl7rBnTbbLa8CC/XkVzUgAaeaYVlTBEc3NzVDc+LVNt2rSKhojXaxOBT1cVw+vHu3X/Srr37//uNjXkq1fndfGgmVQk0ZVPaIpdvcACYMmzhYt7vthvsYlRxBBQCMqs+iE/DlBqgHDjTSXAV98jn24yqcUntPlYjLxq2tWD3lCFMfmSlLyeKEmqzjbqPARXycM895pOnJn4d+c3tzX4rsrq8r+r3v+tPpZN9YsQKugQFdfcl5XqZFFvPisIiPi8zZEAJhMl3muZjOI0/D3KnmGDBFGvpIAcWKujlXvFD1332pYOoJKyZV7xQEXISVCDusLD8EIC9exmURodB1mAIwAoEWczRnJ+Cui1AHloEcvJiCoTk6ISEriIlKmbWYLYoKEZmc6smtKiD1iA6IyIzmrmoll7zkksXFTQNYnTyTG7bAf5FJI1ZeTkRmqAqYiABEjlTVUqxQbeURAEfsAAhzmD7Op6yap+lQ5jvUjRWSmWSyMkoep3mc5nGclxGpbK5y7J+AZ2TkABAcg1NwToJceiqAjDwgOwdoIletd4IGAHUaXVe+VRHvus6yKqnhuUeNuInpOnZialBmKQa0TalwUPfSjJIwEF31/dvb2/ub6/ub6z5G1JLQbjb91TC4+/Nx/PVX7z48HeKwjV0XAg8p/ezzz1/f3X7YP+6GwVVP4/x8mp5P02macxbzqhBniBABk2mQ4ujzPC15AQAOzMRtHO4ICFwFvQkjEQe+6sL9zXUM573vuZR5XpCCuBHSOrFf//sSjWr687I+zwG+ZkXn64PVwqlF0Jc+wLqmW/FgZ19bAECuuQkA+hnYBVC1iGpNxhUHuE4unPW8jOrD3Muy5GlGNWVe0wdr3kDnZMMdQFqGY+CIlXGOCNUZ3d2p6Vm+8MURq27cy6eo7cY6fVjZaQgAVWgU1/oQAWt/xdCY6ByozM2JavJUGVV2YTK6Vil5WTI4mEmRkksWKwWKuWtBA+x2HJy7hCE4YABH21FH275nmD0mkOgNYOzGxH3XOWL9fGCeS2GiUqROS5nIAUwdQIkQq9IOAjo2cSyn5hZWB/6+Xowa8sGpjbdgnUm1x0XgX5F252yy1iiXgb9aQtYRSg38aqArrdjB15riH6z46TwevshL6tNEsKZxDudP8DLqr8NzDERIYGDFvFzwxWOMr1+9+uLHPyZmd5ciOZdlnjZDN2wGZnr9+s3NzU3f9wBQVVlCDEQMQFbHrmVZ5uU4Hp+f355OR8mLuyOH3W736s3bzWZznmGUkokoMG83AwFeXV31fU8cmQNSpTkjE8cYAECkYqx1Wcbn/fM0nkouRNRvhs2w3eyuOEQRf/369aXvu7cxfpW1sNZFx2bqaNbK/pf7XNXDsB6MlJXGAh/3Ov7uw35a3n94PJ2mKv3tJuYqiFXbAxDdVEtZphGpT8MQ0xBTP1zdxW6LeGqYk8ZNMCTkCow+t84cVtwYAREQA32CIL3IlNdEbx36uysSUKMGQZYqn+/iGFLGsCDGrlRFbFDzNI5TzohUTIhJ1XMpj4+Px+moYEQeA5M7FJGSl1zmXKacT9NyPE05FwQyAXRCJK+U+Co9SRAC9X3CgCheweeOJFqQ6XLH1Jq+MgOt8sxMzYyByMEIqnOPomcTccQQrFpYIxgZMCBQiBxjBERUo4COoHVet/L769URdVUDWzH57Wqbm7u6uxMZU6h30FSqSpI3+pmZVin5VuZ8f8bPyIzMDazU8nxSrNBHJm8+skCOZJggkgGbo1kBoNnsGXwwIAESdwEVnI0WjwthCZ3tXk3d7gRhAnKowFByYggRKDgyIAVkbi6HtXPZ+JNobXUBqLlC7Y3XWYVjJAv0AvEJzLfbzd1uO59OU8mLKJl3MTLZVAoBpMCB06Yb7nbbt7c3N9uhDxzAsfZQzOe5HOfp3eP+w3F6mvPAcSDuEXd9f3tzmyWDF3TI83wSe56m45JzFhEzQAV3t0T0etO/3Q67xGZW8lJKZiIiJidzUffaHwjEfQiJORIl4kR+s92G1aTHzKXIkjOFFvjP4bwhbAjPJywSVr5mO1NrEeVOaxegHqlQkR5r7/syBPiaBfjaNGi/rWUE4GC2qlC0pr9jxRwAA57fFbRRuvvF4e/uoiZi1Jwda13YBrnrDa4lN7i1ljsgtrKgNl2h9axhlcOooeQT68f1XcOZDY7nPz/tGr+kRPXgqte3ZcXmayri3/ksZrYsyziOgWNJqfYuSyniRVENvEF7U2QwYMFgREaIGLkj5uBeLASIAd2a8BUAMTNwaBWzeXUtZyKR1cm5bngzhLM8Nnj1sjaHKviFAR3MV3NuM3dFUENUIFOr86XLzR+kWiK0wXzLB721R9ZP3wL/SwACJ1vrNa1Vm8NK3vjkwp+nwrUFgFWrbr1Z7lD1mc+rcZ1dQYUoXt602oLkJoBj5qD+Evj7vv/ZH/0sxb41YbQpxoiKmhBx1w3VvaAC5s3sjI20ZsqnoiJScs4ixVTdwZFCiKlLHKj+1jN+raqhMFIMCan1OtqcZEWC1P52vbRmVkqu7GJCCinEEChGcMxZf/azL2pS8rIhGwscLhfh+obd0JVs3ZzuZg4eCTqCxakIPs3xKG4f96dpfD7MYh4jI7KbmKqBEpoDhNB33XXXbUPYMMXAKQ271A397pqYtWSRzJILghGYGRIBoKqpGVdcpDcheCcCCkC82ncAQLUpUzNLKQbmOh8gQjMVyWoWmACpri+RIqaIYDnr8/M8l5Jl2w8pBiIs7qcl70+jmUtWLaqqIcSpjP0wpE1KMYgwg5OaFJnycpqmw+l0PI2nwyilIDIAEYdh2JiVFPlqt7m92l1dbTebvus6DIARgjkC51xUkUN4qWF8bXIBGCJUx1oEd5slsyEyUgqYIqK7FgM3UHeuvUckwIAcQ2QOXQeOkgXJgNCoYgS98jls9VwnZojV9oLMWw5IBqBu6syOAQ1QtKhUpGIL8FpUsooYBSNAAgJtmpttN1XDQGxEK68mKFaNpoxaAufAbIQArkgECSNz6MTtADADfHQNXihm1hk0m2YtuUhZxIQY0s7SoHFg4GbtCeCA1WgbgBppaAWgQRs6VkGcc/GiwQVd0R3NUB3YOooxDucBbArhzf3d27vbh3HypdicASB0nQGUMRctKaXr3e7V7d3dbnc99AFsOh4YYLfduNPj8+lblQ/P+w+HUw6Jt6RE2Tw5pBBf3VwVKY8fvh3H8XA8TFn3S57U1NAdFVv+vaPwi1f3X9zeJE5P42hSVGTYbAIHFStSiS9CgUOMV9thm7qEmAgT4vXQhXWi5O5qWkqmzMFsBZLXLjvAGtmszXpRV4y5nWPdilJrkbAm3M3cplZXUDn/FyFw1Qldn8GG4XZ315axVC/pSmAHJEZc571Qm8D6HZ+OOtRiwAplOZ9thgSr0FldigDgznB+D+0dnL9aRc2hUhTqqzS9s+/XmMiwVvztkp3Vh9oo3GhVkDFreMi1Q+5n/scFUKF+p03zfDgcTD3FVOkOoqKgwFVbqYYrE8BKTg5s1fjKQNwLIBJDikyUvATJlfyLaxMcnICIUmxtb3cnYHevCB7zXON39XYRAVUn5i5GDoFqNmbm4iIV6O0Kqu5irkXMXu4LAISlqMNZh79yAHwN/Gs+6G3G3Dwoa1+lQeFBzcVB3KV25V5uaAtU0PLHygVv+Fxfx03ugNRSjvqEGWg7CT6ZsXj7pwGk67Tp/FUiGjbb6+ubtaBsNqAAQLxaQNQwZc31r/5vra6bIY9r+wArkKK+7xqH66RKVa15VxBTQERsXBKtGVcdGLUeMZ43Y62q6sI3wMqXrV0yz1lvrq/PtF13F5VSClrTflEHFQNE4OCESlxbURyaRzm5uoGJqUieRTyqIQCVjMsC0+zuDohEpEoVuknoBg6I7ME8FgFW7DlRHDD2TsnUtZSSJ8iTKxDVnJRUBRBCYAAkUnMvkqUUlcbv04tk2d1zWYrk1AcO2PI/MHerJSohVSt3US1S1LXRT3BZYtaicz+nGImxuLUWXGWOqasZM++P/WYzbK62fZ8CMyOSu8hymsbj6bQ/HsdpytOiquANlxsCpRQ2fbraDbtt30Vufk++ElAdTL3kcilGZO7TvIzT3GO04EDqjCAO4nkxdooxkDO5Ux3ilQABAABJREFUVqM6dVcVUSzCwEBmBJVoTFSFXBgcQMw8mxKZOORiuVgpru5EVR3DDQ0Bq5M6OgGAgakwikYzwFnKNGleQLKoqBVdZgkBQk/mAQzmcZ7GeZnzWX4YAIDYKTittqZmDQFtRogIBMTO5EQmBMyASISBUICXxmCICIkwEXdIAkE8qJpWJwHAhJw49AjcqLMACG6tTKymqn72AgPwqgQGa4Kynh6IXgUDAMEEUIEdXsaZzLTd9jdX25vt5jDPqpLBYxexOIK6FTQilWDKqiTZVWWaHNH7Pi/ldDztT+O7p6fDkq3O/iR3nCO4lGIlo0pym0TG4/F5yc+n02GeJ9UCAA4RYEv4Rd//ZLd9s+lPWaQskhfNGVIPYCqy5DwvS1Zg1Y6QYJMIO8KEGNzY7CUquosUKZlDwBrqiC/nyu0wagESbR1za8MHemXh1Vh7DvyA9HI96/itDnZbOeFn1ska79wamauqHrThPgE03t86AzsXNrnkeZ5LyZd7v6pfEiusjjxeLSmr+YI6rGyCJt/uLcIDtM61YUUC+Dn1ae/x8t1eBImV0UCw/sDLoMIBqijnxQ80NFv7i9WDW1bPkZe9b7bk+TQd3SzGxEwAriYOWjvpWKkBVa0BqpQ+IkUmBleR4tmliAphS6jAzUHXSXsrK6x25p3RvWkx1opLdUVT1Nlu9ftyV0FEfjHSq6qqBdVcXWrgL6IqF6JqAGHMBQAq3agRgapFXesptDWwBn5rS8RftCorh0LBKhHe4Tvdnnp7WuAnqCYszd0Z3AnR0Lh6jtdXbo2d8ydpR0C9pkhm4EAcAocYzrHfzE6naf98Im5Jsq8rqCJXVhz6KtDT/kVAIGQAIKx0uBojW/luvoohQU0HocLR6yZaR3f14KppT3vbCHqJsG1QOGxkWcTqpm4ObuoiZvLpIluWcRyR2SuH1l3UEDBUEAFxiBRiYKaG0nF3w1LoNPtxr9k49FcUrpFuive5jKCjqBNXIzFCICZUZC0unmcZuSTjzQaDYxiXJRsR9rRknScZj6UevY5EFCKrFHArObfBilQtfs9LzvNScr78LPM8zsu42SZAUhEEAI4VlVCWqk5kTafaa4caAYCQcljKXPYxMBIgqlePgLUB4i0RCSH0fbfdbbqhTzEEJkKUshxOp3Ecx2nMubiDm6k6M3ZdGvq43XYxpSFRF9FNSykA6ARZiqi6wmmcnh6eDvuDSqsuROTp+fjhYX/j2w4iKgADGnpxm4ScOosMRqAFPEtRh+bCYCwM5MgMAdEVsdS2laqrzaqLKpIV86nYIl4EHJADGFo2bwIBCGbkEIwcvWRBR47FHOZcpsWmEafR5mXRRUXAvHjQZSZVPx3Gh4+Px8Pp7J9kSCWEHFJlJjT5DK9zdOXKAMSGwjWmihtCpKqZ4kiAwSE0ZHnNKj0iBif32od1BANaMqziRS3T93aSnH298bwdX5Agl8dxTZAdTEFN1EaVqVwi4YECDNvus9evnDk+0SgZ+zTO1kWQXHQqY1me5tk3W9v0XQjgSjFZluOUPzw+fnh6/nA8TGKcEgBqzkMMUWVgeEfKbpZnUpnH0/40PR32+wt97VuAPxmGP76++iwFknI8Hp73T8s0SbaFxkK8FDkt88kBAIpCOC5TPHbmIbE6SF7m09HOvlbumouW4rEgEVJ1cGiFka9VyQpbAwCs0bna13vVpa8I6MpOw7WVCusEddUirVPjdtGhFWNnOKu5i0otcpCImQjQtEWnetitiQUiYs55HI/LNNtZUdF9Lvm0zIIeLFTEVou0dXRYJSgqUqEG8YugvLa12x2G70zrqZ0S5+83WDtGAEAEKzrn/H7qFWzovpc8Yp1RgJ+ta0V1WeZ8kcSAm2rOMhGC6AKEUJtzru0OVW4CYyBISIkINXBE7pABy7yU4zLus56Y3ch7twgQ3MHqtL01drVuCqqKTI1mgYrAVAHAQFoh9y5uJpYXE0WnZvcm4iImpXooorqrWS5S8nKZ8IQf3Sc4l9HutYK/uOr10teA1ppatQ/na+CvR0Tduw2m+ck4obZtzqV+zd7XPNSrPTYTniX6/Nzng08Df02FUuCuCymFFPjNdYyrMjQz39zsVGSlMrY1eb7xRKt47PltNbgHrKMgd2h3ANaeB9i5Inm5GrCir+tVOde3vu5L+N4D16L/goFSe5xeHc1ev7mrmAAA2G53//Sf/SdmVhHy9frXeMjE3HgulZWwjkgc3VAV52z7oxQnSlsK1xzuTMo8/V5lZI7UNEe9wdWp2pVG5EghbHa3929+nPpNlswUd5ubVHr87Immz6luWEesXl8x9sMQQ0Im8Gpr7N6gFfknf/pP+mFbP8tuu/2rf/ZXHPnm5qqLsbpdBY7qnpecc6l23OvkWhtYASr6jANHWgE4tvZhzpkbNNESijH0Q5dSquZaiKhapmlaliXnrGqVAK1mhJhS6Pq42/RX2+Fmtx26BEiEgUNwBFGpfqvTNO8Phz/++S+GYaifZRi2v/zFXzrY1fUmDRGDAwECuoAuQk4pBYoBAyvAYmoAlYwdmZiRAJmQAwTmeufq7ExdWt6s7ou6iKuhA4XoFcxSyaGIGAwDhooHLOIGzMHcc5FFvGRaJluOYkWJPHU87FIIpGLTtDx/fviLP/3Lzabdlw2Hv7i+ReIIEKDqc9U2uwE4VX2DWoUTWNX8r4lA69LXgU5cBfEMEdY5MgKtx0E7LGoXsO0wb4HfwBvL6LLqqqf7ReCvWGFYLUNM1RbTv7i526wdstD1d3/0s5+p3Z3yj4+nx8PzJAtEHpf88eFhHEcXCUA9p23qdkPXpUREMaXU97nI3ePzm/1hv8zFLcQEgFZyF8Krm93NdtgNHQNIXpYis9rDNL19OjxNc1Enhy3j26H/+c3tj26vb262Bfxqf7jfH744nLJYTB0iFdWx5GNexJQBNindX+2u+n6TQnC3ZfnFP/3LYbNp92W7+/O/+ufM1Pd9iOlcMKynYTt5zueYA1hT0KhXtQX+9aA5B/56adeSBdckrh19LzFhDfzo4KLSsgQi5upSUbFF9b8Old2HiIiVofPLf/JXw3a37pfhl7/8UwNPKTE3vYFaUtY3dRn44R8L/PBDgR/gHPjPn+48zGo08ZbWwPli1aYxvqgNt4tqYBeBv46J/+xP/nQY2n3pu+0vfvxXABA5roIHrdRteQwCEfAqpR+R+hC70G+6HUNnM5VrG7diM5H3jAkgAbATOlAL/Fbl49pOwXPkN3cQ9+wgTbBAoBTPi5s5VlUcap0SMcjZREAVzUABzK2I/MWf/8lmM7xc22+f8vkvF9H2O6CJ9Wn3y+fW7/90GPIDU5eXx3lojy+/AFe06cUPf7dl8PKOCF+WdQy0SciE0Hzfp1KkDnfWHtD6qudG0Se9oe82is7h5Lt//95nOj///W/4ge8GOCcYnyBrLl4mxrjdbkJgAJjn6Zt3Xx+PRzy7GawJ0EsCj5eDKGjfWbXo1RwQkOsYF9xNF/dmVAoAazt7vVLtfECmEFJHxFXngDmgC5YDWH7RY15HhpXOe57YwBkVYtYN29vXb1PXA8A0z7///e/H8RRCoLXdgk356zwgvEi2Li7f2t68uGI/nFQBVifLdd5Z2zyqdp7rwNqVrCG0HmRMVdG0rr92add+ApiaqA7D8Pr1m67rAGBepm/evxunYwiXwnHtOIaVkIorlm79CA25UhfBehjj+QO1eFi3l39yCLdv8fWsXN8pnJNNOKdE0Fymtf0AElBVhXJQMy26Gbaff/ajvh8AYDH7Ni+TKJ535TkovNwDx5f/4OW78vO7Oa+/9hIvx/ra+lr333c3zXcOkpfHD+6fc/+49uE2Mb4dNj0zAGgp0/NTmSdRF1VRqUNpcy+laPV5qb5YSE3XvEY+YnMvUkRUGjq2rQFCjIHPwlp1IZmDmFXdiXMpEomGEFII1UxvUcmipTL9aG3G1eR4VRAKgUMb3TuYD7vt689/lOp9macP734/jaf29YtV9g9fmVYgveRK5zPiH7qSL5Cq9Zq+PL67rlukfgnDL/VY/f76R937w2b75vMfdfWzLMv7Dx/GaTpL3V38kpd7+kkt9kMb/OWrcBmcPn2vcE4V1+/5B073y5f73lfaKecAbjZshjevXte9n8v8cf9uykeCS7Svu3/yWufNjrXLi8zECFh5vypV4aWWay9I9x9+i598aF/L7fW8qC5g59/+ksSs/lEXe9rcN9vN2zev6mcBWI1q/vD4w+MPjz88/vD4w+MPj/9/eNB//Fv+8PjD4w+PPzz+8PjD4w+P/195hGOulLCXOdDaj7vs7n2/K4D/eEv/B38Gzq2/8xPf776vIx2A737luy/uzoR9bOLQpZTH/dM8z9aUph0RuYrlU1inXbgiAKpesCEiM0Mlr/hLx6t+JxPSCqlVs1Iqdm2p1lL1++owrGLTAIC5uqxS4JBiIkQRaeg1cEJ0tyLFzZAa8Nbd1XTo+te3d11KADDN09fvvjyNpxBCoMBUvS8qKZhffCJa3+fMu7H210quhDNe1C5G4+fuUmuSNQAwNWXatdn+4gtcGZy1C92a8u2j03lyc35pd1fV3e7qix/9tI7Hcl4eHx6WZa7kxk9v4z/ajPve39fm+OVfoI7sLvvLLz/1w0Or/3d/eR1Gd31/d3sf632Zpi+/+vJ4OnLgF6j1aoqxtuDW8bQDEoXISAiVgSlNQb/OV8/okrUHTecreR6f4HlYUHl+2sgm7cnzFPe8X9ARjdAJrU4cwaHiKQND3+2227ch9ACQc3l83M/TTOBUqZpuRQ0cAlPX9Zurq9T3HOr7f7lC5tXKR6oxAyJyqAZMlwKnP/AoOR9Pp3maSy5mhggxpc12w8xSJC95nueqbYWtcb3Ogep9XT+5O5jZdrv9/PMfVQZs1ul5+kp82nRDikPAoRTdH/bTMqtVTAlzIA5VatZMvRSoMFFCjDF0MQxdQCIxLWpZtKrtqPg8FVMLAVPivo8xMBKpWl7KPJd5FDXnQJWgAQ5naAQAVP9N5gAAqlU3AggpcIghxhj7rtv02xR7gtTG2GYlL6qKDmomkkvJOS+m2pD4DswhdYk5YNu7hBcN//OfsJpoVI93MzUTb9CsKrJfF1udRzQxEFMn4pS6lFIIMcaUUuIQmz/Qy7bxf2C3ViAeQtUkkGIXlvb/6Pb7x7/n/5vO9D8Ug/4j37kegsycukBN2d2XcvZK+2TWcDFb/nSUe/F46cfjd9/VCkRTKcVstStcw1XjUqwzGb882r4zkv7O8XUeALm7e2Dq1s8CAOFXDxkA3QigATyrZWUN/FQNH88CLQBV72T9ZRdiYL5ehpe38b0BxqoDdn7+EuhxMZT47tziexew/m6/6ugnt2mTCACe9k//+//qX/3qt78qspgUNwsctt2wG7ZX26sUOgDiENOQuq4fhoGIRISZNtsNMue8iGjFtiJiDGHoYt91fT/EGEMI4zR9++H9l+/e/far3z3sn4qZI1AIjqgKy5IPhyOgb3eb7bDpY3d3dfv5m88ipefn/el4mubFwVKkUqbHp4+l5NB13CXkkFVP4/gnX/z0f/I/+s9+8tnnAPD177/8X/9v/ot/9zd/fXNzd7O93fXXQ7fpQuq6YTNs+m5IsQvVh8RFNavmIotVIrVnabjwIpAFcvFcdBEVbR5OTF4FFYCRUohD33UxRor8ghC2ecmncTqdyjhKXkwEXE1FTEXU6sVEZq/MsioQQVhETuPxP/nn/+J/8T//X/7pL/4cAB4fPv7v/uX/9svf/Xaz3aaUPtl3q0vY+c76DwhAXSyFC8xnG8UaOHqNvbTOU9cc5ZyOXG4Sulim3534rQuw/RZTLSX/9Kc/+x/8D//Hb99+BgBffvXlf/m/+i/+zV//N1evdt0mEXFIIXQxRCIydy/FtZhJNT+BfohXd9vUJVMtYzk9z7KoB6SIIRExOiIzcQpxCP0QkVFU3KqZnrs6kqdIAQHN8yLjoSyzqgARpsQhMUcIgUJAQhRAIo1pSbF0XDa9X+3IhfePDIbXV/DTn/7VP/+r/+nN9R8BwOPj/l/+y//qt7/6bW+lxyXhlMv84Tib4e0w/OSnP/2zf/Hf+dHPf7a73sWUKmoMiNQsi47j6fD8fDrtl3kMTNfXt9e76+32KqXucnTonyrIfvvtt3/93/7ffv13v37//mGeJg709u1nf/6Xf7Hd7R4/PLz7+t1vfv3b4+HYpYDkVXlYRAwMCNREZTYzJBb1cZn/8i//6j//z/9nP//jnwPA0/TV/+Hv/suD/uqXP/nlj+/+5Db+/MPT+H/5N//1r7781XHeI/lud3191W2vMSZ1z+NJHh/0eJIsuevCm9vrL97e/PyL+36ID+P88TS+ez7OWQJ0x6flN7/6OI/L9W387O3u53909+r+KsRuPC1ff/Xhd79++Pv/8DhOZXvXhSE0ilMhF8CCDgrBUhevd1eAeByPS17crYv9zdX969tXn72+/9mP/+jPf/5PPr/74w4/Z+gBIC/L+99/fToeCHCex8fHDx8+vHv37ss8nboUAzM4bHfXn33+o6vr2xhTjF3qemY2keqXWbkvNUPPOY+n0/F42B+fp3G/lIPoyWBBLEhKVHGTBM6qsCxlmWWeSt9vf/TZT16/+ez+7tXrN2/ffvbjq5u71G8wJICwVn0KddfjuhuhEkEiQF/Dn4g8fHiap7lusU+Oc3spNT896NtLwQ8c/746HH3vkKg/9b2Y7its5nJNXn75vO0vCxhvPEbY7YbPf/SqHxIALEW/+vZ0nAo1a436a70SKJhe8JR1zH/+BdUeY02a8EWLy2zlSKKDT/P4/PhRlilFZEYGIo4c+xCHiruClfjqZmecJ7yUY+cnKvLGq3qKVmV7kd22++LHrzZDqu8sPM0GK3nyIvCvLjMt8OtF4K+XHgH8Hw/8l0d4uw418NvL817Zliue/+VPvHy1yzu5XtEqVOSs632cl+W3X//u3/3tv5/mk+QFXLuQbjZXd1c3d9f3fRocKMRu2Pab7Xa73SLRPM/EtLvacYwixdQRiSmEELqYsCXFZGZqepzG9w8Pv/3667/51d+9+/g+qyp6pdsB0Dzl5+e9uW62w26z2fWbz169zcX6OHx4//D8vD+No5p2CXMeP3z8ZlomTIliAOa55Of9YRrH/+y//5/Wz3I8Hf/NX//r/9N//a9ev3rz6ubN7fbV1XA1pGHod9e76+1mt+l3KUZGcC+icylzXkaRSXVRnYrP2eYCucCcYc4+LTJnyVXWjZzJsG75QNSltBs2Q+r60AVCU1czER3n+flwen6an/d5GqVkMFEtWUWLgAOEGJGDgRu4ujg6Ei1Fng/g6Mfjod2Xef7Nr//+//k3//7q5qbvux8K/Jc78buB3y8CfzNoQyDEEBjBRatnBDQ4TQvg6GdRb/ihwP8Cqjwn0O5QpcNefrOo5GUGgGWe2305Hv/1v/nX/8f/87+6+2I37AZECjGkIaWu2i94zlAW1UXADBH6TXfzetdveiIsx7z/5rScirFjJE7VdgJDDLGP3TYOu44iqos7BCdQ12xE1vWcArDjMpanj8t4LFIqN4Fjx5wwJopr4Odow1CGrgyx7DZ6dwcg9Pg+aoHtoGrwZ396urkGAJim6e/+7lf/j3/7f78C3eI84GnKp6+fRnN8s93peLr/4vPdq9uQgjk07QTEIjbmfDwdD8/70/FpPu1jZEAMHLquDyH+YOCvsX+apt9/9fv/8Lf/4cvf/f50OsXIP/2j/d3r+5ulfPvu29/8/W/+3X/77x8fH/ouEnkuWcoiksUEyNWKyGymQHERPZyOZno6neovmvL+V+//rx/Kf7O5/ThcTYlh1nzIf/88/d1+eqKA2L/yhZ8fj8AT4jyO5f0HfdzPh3FMMfzoeL/4q831ZzscHpb8bjz95vnpcFrIw8M349/87bvTId/e4hf765k//1xuuqE/nebfffvNr7768Ld/dzocYXsPcQPmUAosI5QZbAYHoA62V/Dm1V2IdBj341SWBQLCze7mp5998ef2i6sbyPYZ4Ge4qirmnN/9/t3Txw9dSnmZPj5888273/72N//hdHxKHFJMKab7V2+6gKAFkVPqt9srYlqmWUTWCl9zKfM8jWON+8/Px4dxfFzKs9gRaSbOxIJUJRVQhZfF5jFPU5kn2e1uxv2fLtPPdPkx4ZyCEmbm14R9XrRSY5gxBZYix8PBAIbdNnadV0vqdbNVwPVhf6w7+JPQ30DoPxzy18D/nYr///PAv3LE/OKZT15y/U8LPpW21rproOav396un8WPU3k6LEwByWql72hMlWLdjDwrTWItLFrgJwA+EwhXSJ+5EwKS55yfDk/7/dNp/xTIXt1ddTFMpeRiRSn1u7v7t30/eBWZWUn1vpJlauA3R2jiegBrW66uhCJFSjH3z/WFhBoapJnOl2HV+61SQi96yLj2KS7nAZeF+bn1Desx/dLhwYur/QkOs/aaVll3gJUjcJEOnnG2l7fN3auz3wtNAJEoIAYXMAWsRkWVzunVRJ3AwLUq85uDnsbJHUQ9pQ6ZUoh96od+GIah61IKgQmrvEDte2c1cUcKIXQKxU1MHQkCR0ykg4tkEC3jLBCsSNXODMwxxBgDKjBB4ND3GwHIKss0G/g8z+PxuIyTrz0xJExd13VdiJEjh8gcqvonVvh6iKFLKQR0Z5FKFHQqYEJqQE7kRB6oMkuNKAaWrCKuTsYESA4EyEgpxhSHLnZd6gIzmKu5mXLoAKLbqDYT5BLdiqnEKkPmgBwSEpu7oTta1Q2Jks1Pm2FHq2ovIsWYUtel1MX4SUX4/Wr+XG2/rJBPF19FZXcxDn1HgEvJuZQsIiaVhsrUHKlrA6Cut0+Rt2cuZpOlsGZp3QQ7iWo676yMDjGm8/FERMN22N0ON3fX/ba3bIiUQkyBuSMk6hKWoNlnU+NEqQ8IgShstr3x4GOIvpiLQpX1w0AUMSRI0QJr4EAxBkZkYM9QtABaCpgSdRwD6nyaVDKzmpsDqSIqSkEzJABDJwwEPSMAqIEIFAoeela1p+fl4QFKWS+sm+nodgqBElkEE4IuogGl5EiS82kcj/G4iYJapBYQS5HjtCzLrGIEIYQYGKvQOzQ3mhehmNo9O3MzAocYY0yp63tVD5H7YUjd0A3Ddrfd7XbbzWaZ5tSFqngQGK2AGoipMXvoDNCAMctScgjhHBjMYVlgzjbnZS7HJew3m81f/dmffPHZq6lMIcbbm9dLGX/z+785Tu/jsB16TSn3u4kf2BQ4JIWwiG+c+v7q2oabJZqfypJDzNtt78XJsEx0OFA3eKdLscLbePv57RfjMB40dkCR3GleZG/TKZfF1QFCgKHvrq532026vk2n8fj4cFhmWOa55HK93b26u7/a7DrocW2j5pzfvfvm/btv7m6vAwMh9H23vdq6FVcPqb++uX/9+vNXr95sd1fzlAk5hACARSwvAgimVkQOh+P7D9/s94dSci5TkaNB5mhMHFMfQiAuwAYOpfh4slqBEnHquR+62IUQKSQyy48P3yJCvxlA5JtvH6Z5jiHstts3r16Np/Fv/+Zvi+jP/uTnd69fh5SI/Xzcu3suOi0KK5mlbe5KSF8bd+vWRACwRs/74Tnc99rk5y8ivDABP6ng7SVOnY+Z89e/+3w9napfq6jlKucG7R0xQ2AKTLBGeEdkwsiVo1F5Ti8vfG5lMnhT2W+jOwfEal2AhE9Pz3/7N3/9+PihT+Hzt69e3X+x2fQfPz7sjx/fffvQDVdX11cp7VSVlAzBsPKhzl1zcANqTC1rUjqrNMfLuOBTgkjgl955uwwXw8d6Ic4yGT90Kz59fK+F2m7VJfMS/SWT85e27Xp7Ll7h+23Zl8QNAAH4k7eERDFwF8IAwESQUkppG0KPyO5kjmZeRGkpjrO5HafJzLNZ32s/DCl2KfX9MHR9n1JkYqwcWbWiNudS1IjjdnetTqKirupOzCn25rDclFKyag6Em364vbqOIVKVyiBCRCaKkbq06/rupuQx15eUeV6GNNxd35yV+yrRiJmrsRs05YbKGIPKpK/OAO7gaGzKGs0EXQFTdfQJLRF0JzMDQCzITZYViL1JsQSKTIE5hpBiYDAItZuCQQ1LgXl2EyA0BTMiJSNWd0COiGxQpRganiJKdoBhqFZJ7bMgM3FgYia+ABpcrJH11v5Q4G8io61dRhiYuxiH1DFRCBw50DJP2YpmUW2SiSu7+GVh8oXKY01u3SpV6cVHqpoScvXhdkL0aMzhJfAzDbtucz0M26EbOiFFb6QvrHRaRGf0EA0tJubUuoAxBgAcBsUCaqGoFhVHjPX9I6GhFQ8RuxBiZPagAJDRXUN1Z4/J1VJnotQN6O4mhmAcrIJV1N3BlUAzCzM6LlnGeU5BY89lkf0Rnvcuci6YzGRyORF3jEauDBrIHTySu+XD4enbb97tj4VCV7KoiqnmXE5zNreUQmQjcgA0EZHspuB+NjM8z/LOApYNN8NMHIiZq+8pMdfFl2KKMYUQQ3Sq+SSbI4h76wuTATlQYVy9k86fBVVIChXzomVaxg31P3r75u2re/ESQtxsrt4/fPjd178+Hh0XczJzCJFiilK8uI+LHKbc98rdpse0I53dy6w+cwcb59AhxTKUfX8ino/ipF5oGzc/enW9DABNbpKWoH0+7WU+2qygYQvDtk8pbIZ038Xluu9j3B+WPELsIPVEUed8HON+E97U0yzn/O3799988y4w7nYDMYWQiFgdSlEKEOLQDduUNjGkTIKIBGTuUnSaFzVxMwebl2l/eH5+fioqZov55LA4CmOVDeXqUgIGiAYu1SA9xrDd9lfXt13fIaGZ5DyDwzyfzMQd989PDw+PKrrdbss0T+P01e++RObPv/jx94f+ZrAUnRapk8W2Cuq3+YU470XD/zLwn9OH84HxA4G/fsEuAv/auq/lcA38l4Hj4py5PBLq8wYApm7uRWy4sEsGAMZqbkENnkXg0MyWzoEfX6ix5xlmFWOhNfBDtdkjJgBTLct0fH56v39+SK9uuo6urje77WZexodHOE37LFpkBtCLfeQAaGed/bWzUIcaK5zrJV+52IIvj3BxNlZjDCf0qqjuAGqqa/60AvHO9f35n5crepFFeJPmWC9wlRmA1Yahzgrq0MLar8DLlzj/rpY6XCYHa/qHdHm0I1FMaQu74KZE0Ke06Tap6wGTGhpCMZW8LKacF3Wb82IOs1pxpNQ5B+47TkndlyLMUK3CRLTkfBgXUei7zds3P7q/E1jRVUSBOQJSna4BKBFEDpFj4piX0m6GGRL2XX91tb26uuLAs5QiYqY559M4/+yLL7aruEpNVdW0ThlERbUU4K7yQKk53za27ppD+osBI64k0ZZhrkARahYo3r7UekWrFy9iqKGZnCxAStqllOIyh8LFVuE2MHRtzRpHQmbmLnAMxCFocsRhsyE+B4BWXq8oE3N40RG4eNS7/JIwf7rF26Khtm5cimAIAdnZhXhWn0/TcZwXEfWajbwsRiIOMRDz+UCo84Iacpj5RegMsKYxDuj4IgxQHxSw26X+uuPEyIQRCZASAYIsFQOJ2oQlCRk5UIjIjF51ESLGIRAQCHkBIIh95ETgLiaejRPG2PV9IudiWohMkSAQBPCAIBS43+CwHQipLMXKjD6rliJUPbSk+DSiakgpiKE6bTd+vbWQdJrkeFJdZ2MOajJrmSACKDiIm4ApojOYLNPjh28PxbN+XQrmUlTE1VQ0i4YYrm93t7fD7V0fY1qWaVl6kWJmleNsKwoUEX3NwaqfhDVDAjMFWV0v6klAtXlUMygyRzEo5sVBm1MJVP+04i7uenH4IDiDB6SohqdpIZKrzTbFAFQQzbEUOT48H37z1eNhPDjJbhsouBQ3h9MyP53o4TEkiFfbgTUOc0zHKO+xvKd4GqJud6nb+SY87crok4uTUPCE8Q3f+BCLFDcIHIT8VpcDT0/dONPiVxY2aCZo+mp3HeLV9W7zdJz3z/nquoO47Odvf/sBSqY/uv1ik3YAIFIeHj58/Pjh9au73dWGKQHyNJbn5zHnIkb3RYp4LhqLuQGCu2r1uZim8Xg6uNuw6cyLg5hnkbmUxXwxn9VmDqJGKkAMiOTuKmTC1YM6bfr7u9dX1zddP5j5NC8pLJFTzdjUUIo8PT6//+a9u3/z+vcI8PT4eHt/l0LsYnIgN1+72WDuc9ZxLjV6VFUDugj8a/htMcUBrEWBlxPALwP/Rb8Q8eI7L8fHqzyh1VTUUS9b/ZdFxRqYzz+1VvymZlltGJJcqEPiiqVkZmjVvVGl6hMRIr9MJ/w8YcDW2Wi6rQCGYMTAEU1tmk6lTCnR9dVwd3d9c7vr+th18eZm97zfdSkaWC5TKXOgHpGwSWW/aOrVz+QvCU+7OHX3ERKRXfQi2iM0b6JqgI313aqpiSkAEhNRBXCdr3cVKXq50J8czxfPYyvy19S/jiKqZJeZmlZkAzPVokobJJrOr1Pf2+WYwC88g8E+JSMiBU5dt+nSgABAEDl0sWNmBzTw6ggCZpaLSxG3LGIOqFaQIESKkUKYS0H3EELf9UTBzXPO4zQeTuM0L+7Qdz12jR2AWF0FG3QDARANCZgY3K24Wz7nRwhARF1Mu82267rczGtMpEyb/Orm7lzx12vYBFTdzEVNmHSt+AGpaamC49qiIXBs8Hugi39gvZb1r+c5NjXkNLbWVgX4nwW4AdDcci9diilmy2akbuQItELoAICAiChwDDFyiAwRGTebzRk+Ciu02M7KYvgDIf8/+qgpdJ01VwXwGEJk1opoEq2igeM8L9WkovZqtKr1UUyRubnZIzoxxxC6lDb90HUpxUQUmImZ652tUYvoxfC7XqDYx9iH875CQkqEDp5Vc5U1dyKkgBSJI4We4xBSH8nQsyNjFvGCwADoITASuWvVTEdoe3VF1FSZIzInAEaG2IUQYdh0zKwduwKpqeCUUbSmOx6ihaDMhIZ5woAgvSmINLnj9XqaSRm1nMCImKsLDrgRAoFZWfaPj+W47E8wLVpFy5nQDVQ9pS4vV2A3/XDX9YTgpSyrOwsBUmWaNPcwqJ4wL7d5XQtgIipFpdRBCzRV73raVKjc+k8L/OBAZgKmcDFJbN/uDg6iPs7ZfdKCQxe6Xjmqc8m2n/V4mE7ffjiCq9/1223q40AUwJDnTp+6xVI/BTemfUiHfjtdi3ZDcOawG4Y+dkkiiCyFnWLspEv9EO8R06QzIPSho0gl6GmzPN2c9nA8ptNCE0hRLaYShnR9PXAfur50Xci+//bwu/34QUt8u/sXm3ReZhACphhiCCpacjmdptNpMvWuU3NQ1Wka3bQscwghRTb3kieRWWRRE5zLNJ+W5ZRlLLKIFIeipqqu5g4qgm0aXx1WS1BFM2Tsh/56t7nruk3fbSIPTClwCMQE5MQpdiL29dfvnh6fvtz8buhTjGGz3dpqN3C5s91BxHNR95eKnxoYzc4EmLMImJ+r88sCwN3PBeTFUfH9wN+K/3Wkb6ayatGe18r6G+t31mde+vINGWwmZovotCwvcr/rrWmlPVcuBeALuA/oE1jCudPfyt6zYDsRAxqY5mU67p+m6dBFIkhMMM/jt99+s++6eZmPp2POC6xA10DedhEiMDtROw/b6B+rd99lZe+txvreiQsQCMlr1QhAgOAuJedpPJ2OSHR9e5eGXr21GNbCCxAczoLca4dl/ZAv/IbaZ6jVhZmVInnJyzLnZVnKQkSb7bDZbjabXQhcpI5ZDWpzAs6l1ottw/nd12bC5YchpBi7zbCNHJCxsnFCFfvUalhDAFAtULP8v1j7s15JkixNEDubiKqa2V18iTWXzqrumhlMYwA+kAD//zsBsqebmOqurqzMjIzN3e9ii6qKyFn4IGr3ekRWkyiShkRGpKf7dTNTVTnnfOdbrHmoh7q72uqxqD4v888PD+OQB5b9tHtzd5fToKbrup7Pl8u8LGXt/Uq3gYZu4+9hm86oXxYnQhERTpnzi59vIEK4qq3rej4+rynZ5uwPZrrU1sprgMr2+xGv45N5D1fna3EmxI4uX7FUiM8mfkQEisDrYXi9aL1nC7wutBAjutfby0folsCIQGyI2EYfx3VdqxUzdjfwF7ZW9K/9avsNHb1NkmS323cJU/9ropNL3TYX/lc0Al6v778C4r1e8Y6aWG+Broe+EGdJiGBhzRoS9vMp3EopayllrbW2UltEdAf/fmcxU0pp2o2H3b5/6iSZWXJnObz+/R0o+eytIlIv1dZFkgAikgUDdI5o3qqBBO+STMwZZZS0H8bb6eb2QECSVjoufl6dQBJFRABCAImQBAmRUJi30qKiFjeHiGiqpJizcKJhyq4OiEQgO0k8ZkA3nQu4W+aaxVgcEUJDW1RFK3C+6FoUkskQ+No6W2tnrSf0LDhwn2nAoQfyWp1PzzNcHo9lrUZCaUjjmFiEkCB8nf18hnUdpyoIyU0jLNCAEkJXrW6BV4y4PaxXLDL6/G4RZtaKt3KNDtv8Sl+s+T3cOsP6apwd2JWRDi9LTgAAj2hhzVW1qS/tcjl9vzwIx/0bONzGcAslHnCYeWxIAJW55Z3d3Ix3A0/UZNJxd9zjuq9pMot28aHwt/TmqwNpQgTKOSUmjgC3Sm8CG4tnziPeRuBFLwhxL4dxGIixol9g/qCf/rx+97Ha4rqW8tPDp11Lu7vduEsyjkhwah8unz6EBdj+P36zwA4AIOf09ZfvMvoX7+7HMT8+ni7n8+l0WeaS8yCchDlCL+fn+RLrsuScmJ2Ia7sA1HHk2nRejsfj0+XytK5nc43oCSkclMKxrtpqX9P0I4whxIyaYW6sLRHubw5f3t3c7cZpN+VppM4nSpLu7t+O4+Hx8fjf/9t/d9Pbw/43v/n23fsvu0pTUkJ+yU7aoHaz7Rnv1UGjB8faL3byLwc7XseSa2nfaGwR+JmfL77UCLoW/utpEa+FH8xd3dXDXv+Ozwr/FvK7tR1wnfjDQ82LttLstVPeHpBX7ti2lOj//lKufjnSxPXP8ZZPGCyYJJuXZV6Oz0+fPv50Pj5lEQy7nI7z6fjdn//cy+28rJ8+PR1u3/QcB4ToRxEx5SH1nZqbNVNtVqtB2JVh06ekCI/+JwHxV6IKoWs+BiIQgqvVZX78+OGH779jkT/8/b9P8o4kIdJL50NX+J0+u1Sf72muzClzs3BXa63UWkopdZnny+lcSjHXlNPd3Y3e3aHZuNv3RYk5RLyUhVfw4lcC4T5D/+oXuTPpUmIm63tq3wKmkIABANEAqvvcrJh5uHkARm8CatNSypTTmAczQwQhXtd1nufT6TwvS9UW4YgE3qcW25DyLdwRAcLDiTDnYT/t727eANB2PiG5eWvtfDotlzPiSyIimNlS6mFI7YV59fpdwpVr4kAO137zJY8gAq6hUBj990XnPvxq4v/FLRkB3sMqIhx6MM7VeRtJiJlBgIm4Nh/HPOZcRZkc0Deg4SrzwM29vb8fkSzINI3T5xN/XNM7PYL/FdrO/+dXbMnQPdy0daiECROnbgFuZkSQhFLiwRg8MYIQEoJ1/WG4B4JDIG5RZCsK8ZDzmAe/Ojow8yaX+axd+vy2C4/Q3lMRCXJCyoSOxLYRMgCREQVRCIUoMWdJ4yDEEASEBkBcQ7vpdiADD0QJAgMBWzEv4Q29hfekRwu3gAAUTFkct2AEYebMghlcgoHRD6NMWTkbuLcF1zXmAtVgWWIpHgycXu8Id6+tNFsBDShUrblZGHf5rpvXVR2tzG6GlIh2424YUg6lMHLwMNWmqpaTwEsecD9lAjsoEnC90/rZH7G5Cpt5oGnTWrRVU91iHb1/2s1EwsytX48tDavf4dv/fr2zEbIAo9e1XM4rz+v82H764VNZl93B79/x+98Ni52J7GYc5yTQ5MYOt+32rt0f8DDQOMA0tFFaAmTXBhdLBqNMhNlkU10SIKFFGEFyMwhnTynt0ZFAhOCe9jsZJDMk1Gy7lpenc40FwRsWJV9dqZaMgZS06elyam1x0zfp75tu1ulJ5O39LbZlENB6OR8/HZ8/1bK4tzAOU7O11vMFlwhblyVVYVHhNC9r09ZH/8v5+XR+WtZTbbNHj9LtGxcJJw8EMEYHDAuA6EluEIDmUErT5kmGaTgMOackScDNnx+fW8wfPx4fH5+fnp+fnp+11rquKeX3P3346acPd2/evHn7htP0+fNiZtYswF4OoK2Ov8Kg2+F0FcG9zBTX4/86isc1RRZehxhAiyvKDfBSwgMgeiD4lmLwWeG//r7eGfyy8G//5aEWugW1vv6pV7r4a+HvYPrml/83hf8FXAXayC+B4Wa2LJeHh48Pn35+/PRhLYuICFFZbVnmy3JpTQGpNZvnQjwul6WuZeQRORCjj9B98t+soHtaLFGvxld63/UZ2SbwX7wxue6J+3wMxfRyev7+uz//l//0/0hZDrtpv5uG/YGENbD/RIROkd8+57a5oa3ku7lFaK1lWZZ1KetyuVxOx+Nyuazrejlfnp+ezWwah/1+N98f5tP9cjndv3n39s27PE3audWdAuIRV1Pwv6kXQRiIr90lQkBoWDWKLbIzQjs7sxn1zS1S81jNjrUV1Y6PJOIkgIGCnCUPKWfJGHA5HVspx9PpeDqeT+dlXZupuYWBNl3KYm7dUoeQEHlbk5gx8TRNb9+8z2kchl1gAAEievi6lrku58ux1eKI/Q5tzeZlSRSlLK9HGSJgT94Jd4vQ6/Xr22kiJmKMAPJunIIRuKVweP9921pqa5i3x6xTL8I8qKP5gGpqqm4OHijATCkzABB7bbYbx2UoKUlli36iXzfuL01u772ZOaWUhjwMw0vhv8J3Pbj+RY/5+lRsP+NfaQi2/79zFdXUTM2aegetXS2aKlPPrQMizElGkywkdzdIFB7rWh5Pp1IVgeKaCuduXabR21PzHgnoL/PG9oauO/6Xh9ndbVVbNUKYkROngVkYHHhAUXIUwGtsTWdjeriDKaZBhh33Fm0VbmtrxZxCEuR9QoFWm5mXSwOnMOo/gQjQCZ3CAwOZCTxaNa/AHAoQuKGP0wC3++Gw8zxoRFuyDqPmSqcZP51gXiCASOiF+hAQ1bSGhqAyrKWd21q0DSzhQeECMXLc7GBwdMZpn95+cb+b9lFRa5RqecwA5IaEQsSAFmEIvhH5EAnRXr4EBO8drBmYgjYjslZbLbWV3kGoqmoj5SC3pqam5mpb894TWhy66f1mUbWdYkT7aciFz6dV9Dja4fikf/7zn3744WFeys279A//pzd37xJ6fj+9l4Gwjm/8cFv2O5hubX+/vxt5BybuWN2jrbKqa0VywOYOBmidTshh7kvV1ppqE251Srs8TmkahYWkH0IiMsqugt3R3SnNNHFNRQZw0MuynE4VQVTbZT1aW5l8vi12VfQI02E3ronW+fF0Ov70w58eP/0QUUTCvNRymi8Pl1E9BDHUa12g1COhEJKbl1rXslyWyzJfmq7qRb0xgkhKkjAygCAxS6TkSKEWpdplrqrOzEF2WY7ny97s64DW1EQzDMO6rA9Pf/nwOP/Ldz/99z/++en5KEl2u4kRP3z89E///Mcvvvl6fzjsbw7D7rW79HBtrbWy7S0Rt7KM0JnL22GyUZD6QYrwYoQFV4bQS0DqL7b1ABuEBBt1aAus3Rb+L4vSfuLh539sO5eutfhvDp4+Bn9+JvX7WTof9bM3/pICuJ0T14idDVrYGoMX56Mwbcvl8vj48w9//cvj44dlOQPCbpqYZUiDq83LolXXpmVty9KSnJ4fn893p/2wy0kCvdbleDqvtQAAMoqISE4yMQtSRAB1tzgNwCCgwOBfBtQBgLzs3raPGh6q6/n88ccfEOHDH75/+/ZNHkfh0e0KDAAQ9sIfG2yzRY9bU2211VrXdZnP52W+LPPlfD6fj8dlnltr82U+HU+ICLc3Aj6Th+p8Oa2XmQNu37yVNCQSQHQAf8GtAfCz/vC16fvsYpnZvJyfnh+7V19PrqMIcgBHZmbniCjmax8x3Lkb9eQ8pDykNKSUiMO82lq8tbIul8vpfD6dj+fTudSyoY1GpZbT+aTWJKWUU5bciWM9yJJJSq1DnppqGvy6qoAAaNrKsjw9Py/LBRAdA8JL1cu8PD0/qurr53mZ2BG2iR+809v+ZuLHK/NxA1OvbSZRJ9wDY3A37ApDMw8NNN+WUhEIrtrRGeiagb57RvNh0HHIY85ZSiF1un6cl+ehc104pZSHYRyGIU/j+MuJ///315YJ1stzBPSAR9gS4/owaBEd2CfEaeBpHEW4tSllntfaNFR7xnYP39nGoI1oFn5Vv2zffRBE/NKpDAAcvJlVM/YAkMwyCAtBYNoLIoK4myMDIrIwM2OAN29rpUAWQqY85rCN4wgAkjBPCRmYsRVrq2l100DELicFh7BwBe8rxEBXBwjLSMRAjITCxAIonVzYtR6B4pklucOJ3RnAPnfWc/fS2qXWU6tINJd1rqW6J+Z+Igr4wBQDCyTjtN/v7u/vb25uo1Frti6NNu1+lpQRqda2zLOVAErhruZN1R1g853EslZAyuM07e8MEiHk6QDEqqZNzRQwuhgTCEXYIQc6GXWjmM7xcgrOGkzTbveiHCGiMecsuVVYQ4lWjaY4Fz19elwvq+zvxeb9wGm/5oQD8e4mdjsdR0w73u11mmhCEAUgUAc3HBQAPCy0qdsWY0nYw4qRCBkxVOF0vthg+XADiK0HMTNlCKAEzmPsJtytuEb26WZorZ6eLvNphUiuVqpCRM4ESp85lLi2Zb48mpXj8fl8+tD0knMAsFsQ19aO89zLNAaAGywWhJw4u8eyzKWupRXzgqSSnAGYYBiC2b0pIeeB8kB5AKJQi7W04CYNCQUDWpsvl8fz+cM0Qk6sNpSa57n99YfHHz88//Tx+dOnp7KuWfJ+mkz18fHpw4cP3//1+9/+7jd/9x/+7hcoY4CpamvXw+k6ISRJKVHXkm8w9BW6j+5T01cBmyFpJ6d4gL7M9gDXSrrh2n69R7YHedtmgntPrYqthlxjpzqJygHiSnh++ZlXLpF/TumHX078V7JxL/wbz+oFpcbNLmSrqnRV7yNCXdvz08NPP/7w17/+5enpo1kVYauHIQ2A2EotS1mXstZWq2nzUtrz4/PD4WMWuVzyvMxPT08/f/i4rAsLy5DHYbi5uXv39qv9dIPMBOgY4OGEdDU0C/r1eSzXshrdYIgJxpTGnARgmeef//rd27f3d+/eHfiu+zYQAgMQboHMZtZqh/HLsiyXnoA+L/Plssznsi7as1dLNTPwANWMmJIcxnE/ZEFYL+dPH356/PigtX1b2vuvvh532bt6njAwALB3Fxifj4uB1xjv/mraPjx8+ssPfwEHERqnYcx5YB55yDIwDq6mYKtqMQOIIcluGPbjeLPbjXlILBhey1rmebmc5/m8zJd1XVpr87KcT6emTRJzkoQ5zLSWpg0gmBElMSECWoQ79qG8L9Bj++K9w0keYRBdgRDUfwX9mq72clO/tsF9RMe+5u9+exsjkpiQAK+Re4EeeD3z4WoLG4woCBmD0DWsWbNWmtUIDUZIiYIxPCqHGnShDwlTSsyI6jmlIQ1DGhIX4dbQoDPeYWMXELNIGoZx2u2naTftdsNumKYd0+uyvD8lnX7qV75nF7Pitdb+j+b+CIQei+aOEbRFjjJJMJEwA2AzX0o9XdbTZT5fLkxk7gCw203MNE2jedS6lFJK7a6bjkjdjHRDqq6rP+xcl41l8XoRXuulhamru0NQljRkToyElAcaFM6mxQBcEuUhpUEI0YsuNqtUztJVhZyEEyMBZ2ZhTCgM45S12OxlrqWq9qxhJkb3ULMW/WTxBqERGGaQEonkxMQSxnZRXzoRvud2MXNOKJREE6uTfT7CuMfc6uO8/Hi8XEYty2zaCGTf1ZABiA5MLhNLchkON3e3N/d39/cQYuqlKAIN4zTuxjwMADSflnZRhiMEGYSaq3o4IgkSI9HpOKMM+7dfvMPDbm2IcHe3l/HGA1U1wlLmcT/kIRGBGg6ezEYH7/olQOyOYs1tKeu7L76SlK/3FybOiSaGEVCCLN/EV7+feLjbTaOvRI+5zITMo6ebss8wTZ4H5CEkW/iyhsWQp0zsbi6Iu6E0XFttrZaoEZFYCBiChdI4ZdyhRcyX+dOnh+VyHgQBx/AKECwsq8ipVKoALJFLrc5tTLdC7A3Xs7khAjCNKY3TKNNw8/K81Fo/fPjhL3/976ZVtXiUaUcoSQ0RIwkFLvNalwKdlcKcENgjqjZVW0tRq0SeMgDRgCmlLAmZwtQv7eLAaZp2+2HISIxmkCdIu0Et0HNd/Pi0XM6fPn6g8Ieb2ykwn87w4w/H//rfvnt8mpNMGEgaifNIqSCER1nX5+en4/HJ9RfLym3jq7Yx8NxYOA95GMdpN0mSuLKE/SX90Cw8zNTNr8se0w2XC/3MZOuFCLYVeNtkI9cOfqOGg0fntMbLeATQ2QIRZC+F/2UdDxDg5l5abU1fIn0/wzi3RuX6BuDKRwyAq+4AAQA7p6jjGATRpd1q7eHh0w8/fP/TD99vhZ/psttPeWRJtenxdFxK7b4oSRIBnI7PP/3wQ10XVf3hpx9/+umnH3/+qdSy2x+m/W7aTV99/Zv/+R9YvkwT75g7+gvMfd3W39SvD7Jt4n+pn0Q05DTlnIWO6/rTD9/fvb3/5vd/d3P3ZpuyzMystqatE6g6or8uyzIv8zzPy7Is87quSy2L1hKmZuZN4xrlnZjGJImRIqK1Wut8OpWl/JQyOLr5ze09spAI9RUpczdjQdg0Wt2yIDB+cSJHFK2XdbamTFjbYONI0ziMzDwydTIDEuIgMhCnlHbDMOU8JGEIb6WVcjmfzqfj5Xw6n07ny6lpI6TW2uVyVrM08BBjGqRLOCw6QmKtu7EEWETTRuSUcjO1fte8gkthZlW1tFa0OoJ7mHZ0rtbWPl9bQg8pxwD014kfXnD17UK+9LXRUdQwc8MwQNs8l00iIoysh7UvXgtaAzQUBgiGhA5RGVTBg5ATpYFEiBHBkljOOaecRYQEUH8xAhMRsUjO4ziOu91uP+32w34Yx93//yb+vq/rZK7e2FwFi4RJGJGQXM2vohrCAFOvrdFKSNiatqbrWpelNNWN8HFtS9xdzWyDO/oXfP3Hv7Lkv/abhEHbl06BxJQyIaFpx6U4DZTHJMN272pt1pQKIhOTdGYgCvLILBwRyJSnPORuygBlUVPz5N69OiNcXQEQws0JNk5Fnz6C0Da0ycHDFwgnJJEMQ2YUGAfxiRPhOMIruQ/ATEstp3XZrAKQIhCJHVkB3cODKI9pd5hu39y/e3d7+2a/vyGScGjNwQGZJQkSq7q3UmxFczOv7k1NNQA4pUlSTjmFw+H29kvk3b2VFhCxm9LN3cSornr39u03ta7rmrIQobl6mJltbIvtcKVAbG5LLV9987ucrxmjQIknxrFP/GmqPMHdV1nyfoeDPqZcDnnO7JQg73icZNzLMEjKkpkQtQHAmAdibgireXU91fU4z6WtbpaZh0yDJKHNgQiJDYAcznJyc1VbazVdkXCkER2sVmMbhryjcbBkAJMMkYa3t+9Yx1ZZW1WdIdSd7Mq3BQAzvczPx9NHswZgSEjsnAw4mCNJsJiH1aKE5B45Q04EAFVbra1q9VBmJwqRQKJhJBFE9ApKbIjBbMwGhB7QmtdWzVrfdLm7anGvzycgnpvv14rffb/++c9P//LHH2qJL95+uR936IERWlS1AYCbXy6n0/FYyhph+Bl23ukYveR2NWeH322bZSIgCGkbza2fYWFu17E7zEPNeuF/1XJcSfiw7f+7FD38yvjEHscCuJVxgEB6PYn9Sji5LiI3p56tvoSZ16aq9vmR/Pl2H68T/OtA/8JQgO76trUmL5AAQXT9y+PDx4ePHy7n07rMaoUBolbN4zBOpenp+WleKyBLHqfxEObn4xHM1uXcVH/++PPj41OtKyIQh4fN6+VyOZWymCsidBg4NgrjNvG/UBFfXoLYwd4I76k2OKQ0DnnIya3+/MP3u8PNv/v3P9/evc3jDpFqKWWeL8fn0/H4/Hw8n0/zPK9rqbW01uyqonLTsBZunWge3UQakIlTTknE1cq6Irq5ZREPePz0aZ7Lp48Pu8PNOO3G3W467Kb9NO2mcRxzHrqzSi943gV6n+v4iYZpGve7sqxh1vfxIjKMwziNItkBEYglk6RhHCUlQQi3Ms+nZT5fLpfzZb6c1nVurS7Lcj6fAXw37foqQ72FGgkB7vIgu8Mkyg7uoUvRiE3p1EyZsyLedlN1D994ERtDey3lslwuy1kj3MLUSq3rus7LYq/SkegGTZ2AEqhBDlfnixdIC2KL+u4Wk+pWTVVrhCI6JVKkZtw0VKnMcH6Gy8VaNXBITEMmIoFAQEuCaughJAOnkRIRQQBJsu4SmrZcGnKgjg5t7SxLkjwM4zhO02632+/H3TQO44uRywtnZ3tA45rvfWWcfD7x/yuveDGpiM5J3BZ3EB7BzDlxJiFCEdwP6W43qhoimPqpzZ0KdZmX83ktVSO26KYIaGoB1Y2JRVuzLmzoeOD1GcbNfOLlJguUwAw8EjC2RRErIJIQZwZBwkaCaUhpYJlEMosIOkJ1q1rm2hMeEDkY0yAMSAhuHkGUOI+choGIzs9zXUohAIwxCxCZmzUDd0JMIpxZEiGjRlNDMMcwoiAEYIngcDcPdBT2/Y5HoWZxf++SrjwoDMYgMPA2EH55eJOQjvMiiI1kAW4NgDFN483N+9/+u7978+5dGiQlYRJECsfwaK4RoOamTuahGq21Vuda1qq1BvOwP9ztD7dpTLvD+EX+4u79fVVQA/dgxikTgt3e3bz54u03v/2NmRETEPbjWtWutAz0jYURzWxe67ff/qYH2AMAIWeZMPL5vKwCaeBpSsOEWcY7Rx4P4+nLOI3rubn5ILAb05vbw26cGJI1bZeThO2IOMlqoav+fHr+4eHh0/OTud6Ow9vDIfPudsq7YZfTgJQcaFWHcQdffNWqMkKrpuZDztN0GNNgpRm0xKG8v8jbNq07HvIu3/77u/Ktzxd7eHj47sc/nubTEnGcz+b2co8BGpASmId6hLrW7o8EmCQP4wCBtbbWGsCCiHnIgFHXWlpR1whDdEALNwAI64ogCwdJgojhVsraKtamp2OZl1qseYBwCsNSqwiXWk4zLK0+Ptd//KfHH76/LKcy5l1mTsy12aplnueOKLNIWcv5fD6fZ9eVZbii3p2W0ZUd4IjuAbWq+1Iqdr4kUUqCiN1vuB9rgIDQl+fet9MOyrSpy64W9K2putqGhQKCYyBczybEvsqEjhiiYcQWQ+8UDr2GAG4DVvfu6Qc1gVnU5k0/45Didc//GRJ4PSL6cN9Ps1fkAK8avm6VBhFmuq7z09PD6fhMBOM41GJg6loNAbJ4q5fT8fl0tsBxd5PfJU/5cjq2spT1zEnGUX73+28PNze7wz6PQ1N9fH4c8y5nJgzkIAYPwIAuAfBeN37F6QcQAOu/2jf3yJgHGae8208pcan1cjyenx7n52dyAMT5fH5+evr088+PD4+n0/lymdeyttb6ZXtBUzCCwF8m0r5L6RrqntKmquaK6AGASOCxXOZ5LufjqY+P074X/v3+sN/tdtO0y3nIOYmklKTTuX7l5yCSOKVYi5qHR+Vu5rIxRwKAkboFbiIKd21lWZfj6fl0Op7O59P5vMwX1cqMtbZ5vhBBTomIkJFiM0tCRhbJkEFBrfsgq5qZglmYewJqqmpmLzgRbI+BmTXTqq20puGqbs1qraWsrdXP5KbbLIcvZOnX//Sf9ZmW6bpOMLdS67IuqhXQJYsTVfOmmyvn5eSXs7digDENjMDE3HvmlWEpUSt4JKQBCYGA2Fg8pZREumfw34z7tPlZsCSRnPKQhyHnlNL/+6C2f8vrs28gOjk/3F3DuzpYPRILIe6GcZC8H0bdqH/eVJdaalMmHnLGbk6EgAgRoNfrk20zVPgl4gLQBa6/fmSgmz6QkKm1pXVISjvhhYAScWLiF3YeA4CZtaplqaadeMmYOMIxQQSjhxNqU0aUlIcpD1OupZlpa5ASM14TNDCQCBNSQmAIDAULh3DACOaN+hCBZhwa0pwpKJAF0uDT5FdfJUAAYWLC3p3vUs7MS6nqdm5asJrLNPLN/vbN+6++/Po3d2/eqFV3xwBVLWtd1/WyXGprHgEeAhszIMKatapeGwBZQ6qAjrE7TON+3NGuq8l7fyaECGG7ca8He9OvCHZWwzZCeFh01TcEQBA0t3mpb9+9S1eoPwKtsRaoq1sua5tZM6FBxSjElUkFPYtz1VpiYQ6FnZNDuIZVrYRUtZn7j6fnPz1++qcff/r+4eHpdCSIL24OCeH9YZcI91POkmvzZi6AU85CUqrWstZWCYVICBkRu+CQnXYyvh/e1rwmJ3HhYRgIwOpR2NzmUrT4aZ71tel39+beEP0q/TEAs9BQJ4rWBghsTbU5AIlUtQpApS5rj/XrKzuMiB7JkQhALdzcDBCgtW6ZgPOijw/r83G9lOYAOXMSFiIguqx1VVOFx8fy4efT+VRAkVLnkZuZ1dqWdQGCaT+JSK3tdDp9+vjxy6+ebu/2nDY1r5k3M3cECHMLAFWH2l6Ac2bOWRCpU25/TfTbPP8YGRhAmLFrkM1rIwRq0QAjkDu/3czdOku3N5CE0F0gUJHss8KP4NTJAEi9IXjZ9wegmddmvRX57MDb1NREvahsjLfXDuBKWOjDQ9/oXllxvf/w1so8X2otOaUhHXRIGJoQpjzc3t4eL/NPH38K1wgKa6a1rhdTmxHnSxp34+6w39/ffv3NF3f3b2SQZVkclEg6JMu8Bb72ZuYlA+faery+BFARQJAJUBAgELKM07C/Odzc3uZSsnA7ny8PH8XMHZ4fH37++cN33/31+fmoDmZ93HQMYuxwjvXGLbYgIgyHDo50n04EMgNz62h2TyfyAPBwb3Mty3w50yOlxD36YxzHcZrGcZr2+8N+vz8cDofdbjftdjDwa/1DDCRzLFVbqYIoqJe55FSyjCCp301M6KqnZZnX5TKfLufj8Xyc57m2tpZ1XReEmHZTQFeagoczcc6JBFsobg6NzMGMEoLOnZHS5RNIJNhN4oi25f1VHLaRT6N73FLYpo6IK0PlVZXSyaC8wURXsyvvyD+gX3n6eJVHAwCaxbzW5+N8WS/mJjkxCyCbRS1tWcp61rrYuihicCBzcIIAcnCscLy0/c7eVnbP0QkIZEQmKUnmTim4dhkb1aDf/QE9/he4Gz790iy3PwOE8fJ4bFoMjOvKeYNDoLeAv5j+PT7DOeBq/+cBV4eNxsxZ8jTk3bDLmYece2wGAJnZZV0fT6fLWsyimdXWatXaSiltrTUChBNsXf+GKVBHBIM2URq8smEjwC0g+iYKvXo001nBQldEAUDkRAARzbQ11OSYIKgtWhdrGm5dRanRtBfHYUpDFgSYny4t53EAD9zd7j1iPl1UWzMhR5HNOxgJXEwpCBC8E2AwlNH7ItGjGRi4MYZrNIQKxZA87YzIPsu2IJEknNyhqhVtEWauS9NFz1RzGsfhzds3X3zx9bff3r99Nx0OpZZaSi3l+Xj58YcfP3z48PT0sCxrBBLLkPNuGt/sx/00TGPKwoC4qD+cTo9zeZ7nN/Xuqy/fTaOoFvcWrkiIOBAThAChI4eZO6hqbebmcHUs2XKjiSixhovIbj++7MXN4nKsl5MCMhLWusRz9QXLR5z/hPQMt8hj7AWytnZ5fkhndPSbw8qYo5ku84CEcn5e1v/bn/7lv/78w18fnx4vc1Udmdq6JPD3+329vSUmJFzq0lpIGoWTiAgzgiNBuBCTNrXW2jJHmABnSe/zu5bqWs5rLXOUy1qentdPDw+XyzIvbWlwviymLxh2NK1Na9ryt8GBmUkNalWtajUguFRDIGJvqus6e8SyrqVW9yAC7CUSmFEyTyToddW2LucSERApIglxq3E54+NDfHq25j5O7XCT3tzv2OX57LW283O5zB46TEMGNkauawELjahq57Ww8P5ORMTMjs/H7/763d2b93l8v08jbLG8WkvdauG1jF6BdrxWU36h+ODV17lTtIk3VlxXMW3II3GEt1pb06bqHkHU1JZ5XuZVtboZy+Z5AohuoQEKpNsR6xROEN2vpFfGXvg90CHCsZnVqqqf7fi3I2+z2EJ8mfEB0XFjb78uCj9fyF6tdWIjM5giwDSNuzEnxiy0y3S7n96+fffx4eHT40NpDVGIk7X1UldtzT2AcHfYObzdHaZa16ZrUFJrgCaS8iBpSCLMhE03/OLFKZm5+zV9zurH2FRo5moWWkIbCd/c3b99/0VZ1v1u30p9fnhoa9FmT09PHz58evz4MM8zSUYWRHjxN+s7ls7GxGtXFISCGBFXf24GeNVlvGxGCNHBozMCIGDFQCDmS+pO3mkYpmm/2+/2N4eb/f6wvzn412+/uM0w8MuPIsQkCZMnxMQCgJ3DrdqQGNFNtVY9ns6ny+l8OV0ux9P5VGtBYrW2loWZRhi7Xg6jp7ZQSpmCwoCFSJCE2EQiOjMfzaCLYxmISBJf/d5//dqM9RCRaPNWJoqNOGEvTQwipsQ5p5QlJX7JOkd8sTj4nJixfYvuUYrORediVRssjahLjNB6lVEPDVdHBDd0Z3e2IPdoDZc15jXWiq2x5EQUCA1JSIRFSLo3tX+OYmx30kbMuZrymoX/enT+/+6FHSQkDCZwQiA0MICIYKCrZZ+bhlm4UUpM5Ikpp9Tz8oCYOeUULkCm/VtS2/ITiDjlnHPOKfVhoj8ZL8SXXyFkEWAtogUCEqIDhLmrOQN0D8rERBjm2tyLNnKrCEH1tNbSrjEJPd8DwENLI4TE7OhWXddoHOGoakBAQoBBTJ3sggQRqm6oCAagiHx9/gLR0CsgYk9LNPVAF3MP0AWJcWRU/Rwj21hIjogpD7d3+3H0aU9rXYpyTjf392+//PrtF1/evnkzjCMAtebH4+XTx59//vGH777768cPH06nYykFkYhTSnm3m55v9neH3d3NLg2DkxTDS/EgabUQxe3NKBzuLbzb7nar3XRt7dzcTb3WVtbq4ZyYuBuIExP2rwICPVzkdc/n6stZ2wq37w55T0Drel7PP8X8A81/ybTUOpwP2SeZ0FzNvcTT87lVyzJwYGhbHY7Lhx+env/Ln//yxw8/Py3rogoRlQlNd8w/7Q/7LmFgeXh8iqD7u7fTfnMNUdfoG2bs8vG21sW0UcNkeUoTITZY1NpRj0/L5Xipp9NpXWsr0Rpoixe0KdxVm7YmlBCJmIJCHFXRANxiXZu7turbnR1WWzGz1kprLQA5yJgwiKLf7UEBrWGr1JTCvVQMgEawrlAqrAXLSkXD3Fji7k4CU2t+uejTUysFRcYhU6AyoHcHcSRHCETJ+c2bNzc3e9VSSnl+ejoej/ZZCpyZtWafidx66YO4nhHXi7hFa21suD5B9Mq/meCTCKAyEjEjIYckRJaUEZFSMvdxzEO+nI9RSkEI4RgSIRE3D7Vaq3qfWcIjXgt/UKD1UumBDggBZpsC6PPHnza/MqQXQfNnvp4IXQGI8KpfeFU49NokTCKcRIhAW20YNCQUSkxZiMkRDbGlFLvdBMDLUspaW2tm5oDNakAQ8zANAZanqVmbl0tTO11O03Rgopxyp5xfVdz97fya1i9Mouq61nVel8vZWsmMqnB3906/jXVZhUQdHx6efv7wUEpZ5uUyL+rGOUnKXVGD2ywGjgwYDBQRW27L1s0F9L99SzKgbcQC7318QLh3vjr2CrKJBREp3FtZ2rpczk8PJCw555zHPA3/8B/+8Ptv3tzcjLCR330gyYcb2O0FMTGPeehAqda6QcTml3l5eHq6zJfW6lrW+XJ292EcOy5EiMJChDlniCAhZBJmcgzyFziFSZzAEYgMgAK2LrBfWubPYkTianbzok/t9/d2izv0vqvpy8NPhMMwTD0mcBgyiyRioR41eK22226/kybD0TyauQdwnhDoPM/eykAsm6FkBxqAqTceTCiEuTO4zVGN1obzCktBycLUrxoTJ06JiOGqU3APwBfBTHiEq5qaNtXakFBbe+kP4rMm5X+wxv/8+ffrI7at6BCBUQTRidSka72ZSIIzu7I1MzW7LPPxMnu4u/XwHmEhor6B7u9OVau2pk2bIsAw5JzzYb+/2R92u90wDkmENhLo9gERN6Pn66EcWqytGhp9rwgEIIEJ0pQkE3eeaNO2tnJUr5W5RIQ2RXIZOU+SBpEkBBzhzVvnLIJTGNZip/moVVtrQCED5zENUxedsLutS9FWO9XBAFlk3EnKLISIbJUwEJncTGszcBICk3UmQJqA54XNr+gFhFpVV2Ca7u6+/MPfv3n7fl6W02U5Hi85529/9+1X33zz7suvZBw0rF3Oj5+ev/vzn//x//jPf/3rn5+fH5aydtiHuwbH4vmJH1LajePhsJt2+2HaSRqRk+RUvCzZz0+Jo3Hi7ds0NNRwMO8uzK021WKltLUUQMg4pm1YYGLsHO0+onxeXdxjnR0jffnll7s7eTr/9OmhfPp+Pv+F6Tllp4ImsKL5wOnmcAMAZa6+nvejjpKJ+bis3/38/Z8+fvqXh8eHWT2EkbuxzWWtHx9Pf6Kf2qqPp4swHY/HaZwoSd7lkRMxZSV36vWbCMy9erksl1JaPqY39U2+YSfXaM/l6aGeVofSSmsQRgzOMLzUiAjopgaeGZklEUbUhiKIMLiFGza1tVRJvIcdIpppMzXXCIPoqJlHgLp5a50V2gxaC6CMFOpkBdXbsjQDlCFNO6KqgRWCETLh0Gk8HjUCmBMjOwAHEFN/LDnxuJvevn3z+3/3u/1u/PnDT515+sJp3g6HzT/tykKGjVPXFzeE3SfaA8KdEJ0ozBBCr1s5egUAmFmqiOQhM23uOcyUhzztd5LF/X65XB4/DefjcykLEd7sJyYuxfCyLMtsVRElCHGTWnUB+wbvex+QocexduOf1/UfwssQvxX1LW+kZ79CdA/T6+/b5juEjn33fT9JHnbTdDjsiOLx04ePWrPIbsxv9vl5l4cf88PTw6ePf9XWdrt75lzWxa12qksgLfMyz8t5vqi3y3za3915xMPTI5GEUVnq1199fXf7ZhwnkuymbhDkiIhGxPg52i8PH5+06nxaLqfz+XgM0/00qFYPkrzLIeGxNltKW8paS21NTY1EBFGSEFGvZhbbiv/FFPZqXr8d+xEA3jH+F5emzX4bgeJq4A2fxbmEb9QAc2/b4d2pxoxIyCRo6//1fwN4228yawpmQ0qCyD0NjwUB+wp3M3Q3nefL5Xy8LHMEaGtmir14EjIxs4gIE6WUI5xFehCjO7iZ8Na8CGEQBIaxZwkYOk8WmDCnYchDSon+Zs+NiCycc7ZwcVCzSmStpStv8Vr4aRjyNI3jOA45C/Uythn09YW3d1eUzfmZmllTi0DilIiag9qllhoQmTmlTH1YShTGAChEGOgOrl7VFJFRL3M9ntZxrMwJoUfeUyAD9jhpCNgiq38x0feOSlVrq6U4eC31Vx7X/9bXix6ib0g6sYa7mgIhCRCie6ha0bbUtqy1aC2t1aYA0A0c5QqV9KVgv7sIUYS7Rn8c8m4a9/txGochd03mry7Yr7uVDtZsNsmESMiJKHW0gIgwFLy5rtZm09Vg4yU6JUSBAEYBGVgwubsVBwOrHq1Z9bboeqmmBox5lGEa85hYCK6LiPAwtb57DGRIoAxMiEMghDVDR059+a/u3oDCowaFsS+yLPRyWRBpyNNuf3O4vbt58+bm3Vf3X369a7qbl/HpNAz5m9/+5u27d+N+j8RmtizL8+PDTz98/6c//ve/fv/nUosDSP+SiQDAdPNrTyzDmDvbc5x24ziN05SHJKin3ZAEd/u9iGD/XlSj95PmEAZhEQZh2CVV61q1XaOWw8NV29raUmqo1z+06/PCu3F3F7dv397mHZ4uuS5wedD1MQ5KiQUtwjxQkTmJgIOrBjhKALoGXdb249Ppx6fTpag7MZJ0JmtoeJtL++nTcy3teb0kprIst4fDbrcfpmG/m8Zh6Pjl4tVdN/4ZuobVqmGoF8+UxnHcyW5M44A13JdaIcC3j/sq5XX3UuqyFBYEBgv0cDfr9k0dOCYD6pmOYBYK24qqZ60SInQbRIggBAd1h9KitU0xV9Xdo1orRdemFkbkTGEBblFWxWjaoqwOwZt6lrn/eBImIgcg5JGmd+/f/d3f//393WF3GIXp3bu3h8NrMif0c8qd8OpWdvXVsc0M7so8B0B0ROy30JY633VY1PHTawPI3FrrRLF+WAegpMZCQ06JD0Jxs8vLckKI3TgSUq3GjKUsburgAbjJp3sWn/c78bXwA6Jdhfy/Og22qDkMAN+89zE4giC66fVVi/W6gd44/9t2wwliTETenj/9eDk/52HYj2Pd7Y5DYoa5HM2eponubhuCHB/sbKpaHDDlMZBrU2vaylLXRXKuap8enjyiR99Mo4w5jcOUUCxAQQMN0SGA6BdntvzX/+c/tdoux8t8vsyXixDe394AxOl8XErpqb+sDhBq4USYUkoJERmDMLB/ZRFoYRAQQQF9NAQAdzNr/di9/spLcNeGG2/xG/HisQRbSHp0UwaHADVrrWlXdXhEgKmXVt/c70pZXx6YdV1LKYIEwnE1+1Z0qIYIW4RVXy5CbAlsxEkSIjALAKY0iHCSxEQpJQ+XlCQJOiIGwXb7CTFQBENgBAFiHnxzGSBw4ZSHaRwHfkFXtiYGeo7QHvaSk3qoWm2VAXyt0zjStSNDxJyHrmXIOREwU4/ojc5xc9cIiyBEZBFEKqWVqixpGNEI1Z2RwEytIQOCp5RSYmFOlL3HprjXUsx9bQURvKlgGocTc0bCiGEagxDN2YGu7nvXJf8rpxA6v8FUa61IxNrWdf0F2v/iUfA3hTWuu57+z80/epsAuhCo+7dquPdTbUhJhLIIAar62tpS6yWvacZ53Ug0IpyFc2IRIaJwqILm0rlimzBeGwsOg4xjGgfZCKPXPh46koefC0eAGPMkeZc4MwlRADKnceDEVh3MiCgUrYTVfrZ1G7v+wzb/A0cIAhREY65iavXSTK2umyxUUhoP47gfxl0GjlqLtsbBHYYgIDMD95SZKUItGvGQCEBVw4AZ0Z3U3asLIAJnMJPSeF3Zfbsnk+S3b7765qvfv3v7/s3bL2TYsYxTFhkOnHY5pWl3JzKGo6sZUCnr8fjw9PThfHooy+xEDrSsDaIK9YgHiQBzK6WclxMhMEsexmnc7w+H29sDhh120ziNwzCKSF92ePg1mgiECSBhIBEw47Kup9NxqVXVzcDDq7Z1Xc/Lcr7M5/9w+T//x/8I8AYAhiH/5tsvx/LVzWFoXtoC5YI2IysdhrQXEYQUlIkFwGqloJFzFtplAcS52lzMgDgNkyOioQchpJEBXbVG06d5met6bJchE4VXr/JDzjl99e79fjclyYLJ9FRrn2RAiHLOMKWMOdMw0TTu9tPNyFM6xPHpcnH8+OnTk2mUCrWWlynZLC6X9XS8BERpirDZZQIQESMRIw3IRAxhHlrVcxZhjCwiDIFm3loNCGFBFmQJt1qXZe7EiQggN2hqtelataxaq5kFYWiN58fzCecetYiYc059SBNJiTGJBAKaC9Ew5C++eP8f/qd/+M23X3/7u68R45uvv3r/5dcvMsvrAAdXxDm6MMc8VDeJzfaE4dXUD3oJ6E680IfD8EB07YnkzK21zpnduMXCx9Nxtx/vbg+H/fT+3T2/u2t1dmsE4R6qPg7JtQnTWq02a9DZab3kb4lSHt1xmgCxk09/QfjdyHqA5J3A15PvBIHdOYKDItyArxtZAA9EFEICCDXV1rSU+cLeSOfzp788PH6cbm50vLHzzSiZBSHPh5t6uJe3by7afJRGrtpWlLTbTeO0j+DDfvrq3f3NzYHzuK6Xp8dz0cIs+8N4mQ+13KC+YxLo7n2kFmqAgOlz2xv58MPPrelymcuy1lKEiAKYaC21qfYerfOf3cw2g7NA7E1tDd8s723zWjBV7yZwcfVv8Ot/bdP8yzL4BQraYsO2bzn6usx7rKIjgG8cuU79YGZhQeG0G9NnxZJSysMw5DzkxN0ehpAA0a70OfOotdZa3QwjiBhEkqeuCADEHC6JkyQiEhGPEBERAUUAEEjS6z4JkVMAoZH3qRIjKCDQvfeln0/wry3jlboiIhCBSIBgOeecReTzib/LhXtAOQUS9pVmdOW5d5dZZO4U4kC1cAeRnABMm6uiOwMIoRAmprT9pdAQ3IOQLKK0WlVrqwBADhdZnp5PwgkR3Sa/TTmFG7hT/ML5P349CDv0G6S1pqbtb1Kt/q2vrffrXtvuELZxIvtDZdGbALXNyte7x/sr+bsbgCGCCzEQALI5erAHB0RtGhBCnJKkbTfzqln4rGf5xchPRHmX8iF3tiPoBkl48zY37VfIQat78wgnBs6MjG5dXnO1rHW37VhNDtxK82ZeNTSQiQgks2ThlALUzdTVwwgJCDtvlDBSIhGKfvkjCICvghr0YAzEYLTA6PuOVrG114lfJN3fvX///ts39/f7/Q0EaGssJEjjMCbZqCGIqBBhXtel1cW9itAwZEduDlarqjt4EpQhEXNo67eBewNAWddlWdd1Kescbkm473AONzcEfYTqLlAbj5sJQLibiTUlgDDVVrvc0kut87pc5vl0uiyX+QXtzzl/8/XXY31IIz0edXmO+SlsIXaZkuxE3Iwcwr1Fa6YJZZd2mRMTrdoeTpePp/PcFJlvpt0uh6tCOAg4WBg3tBrQmvkyT8G7gUu05/Pp8enpfDre3RwGGWEcLstqqgHaRaOJE2QaMCeRJDLkxDu8va2KAJnmstzcjOd5BvDE/PLsu3sp7XwpQZSbAjoTbdgqGqAjMgAgAQSaG6ghBdHm8+UeZtrUAgKRO3PKvJ+n2qugGnbf5KrQGpgDIDBjb5HnSzEDbc6cxnFgZgQUpt0w5MSM2ExLa4AwDOn29varr778/R9+d/9u71qnKY2jfL5N7kUhYOuqu9nyK6a7TaKIfTkBPWcWPPzVBw87I2yjSOM1q+QlSJOYaKVai7UKcXvYv7vd74QGcNNWzBwCx5ytVUJ4Pl3OEbZNTQKIvcBZqWoemyZmAyp+dYgREqFdafOA13RzpuBtaNkM5ru4hhEYPAGAm2uNdSnz0eenm6RfHujjgbLy/j4POTGAYFCSfJh272/v39NXX0ArqjOLpKdzAuK7ezrshyHfjMM47ZBobRXb+VLP81KX58yHiZ/uhrc3N3H7Gxl6EmnrwW0Uv5QlA0hdF3dngiGLEPYP4G6CFMzurt55Dk1bM2vNWg8Esu4v2rpyzXt7ZOrble7E/vCrDUN0PftWgl+7QA/wjgfE1vJtLYFHYFhHVJg55zwM4zTspml/c3Nze3t7e3f7H/+3//Vmv2XYp5Tevn33xRdfCVFPB8Or0t16l4vYTJdlvVzmWqsHZGZi8jBEyDkDokekxB2lF04OIZKZZXuzGNxnZtqEqQEUYOamGurR/aASO7Joj1TCK9nzui7ua4uqahFucaWy/qLAIKKklNIgkpmEHAkZu+YkurxZxUxoyy+xnqnGKRE2revlvJyfQ2tmntIwpDQMOUlCQjVzM4JIzGpR6mq1hmsAmtJa1tPxjMBq1nSPsN/vGCHMKUAA+bP13Gfe17H9S2yxW66f8RVeCmgntSISxFYvYXPN6/6WG9cMrgBBN+6oa/WeibDJ8MKLxdVx38ybamlaekOn5kYAoErGZsru3qlC0H13iCDIAZiBybv4CpARqQcqxPWNQH+uA18+KPSJ/5CGm8wDAyFEeNNygXBvc0PE0AEJtVpnvPBA0/3Aia1YfzS8hfUQPy1DSvtpigFnQowQA8PO8PS+tWWVCLdm5grRmdqJklAkhI3EC4DChBaIkVMK2Q4kYSImTlbNWtVlbaGg+up+KSI3t2/u33xx2O8T57pczuEiiUQCCCMtM0C0aTexc3Wr64UoDofd+y++IJa16rJWDFyjhBkgpzxJGswat0Qrqm2bE2169uOyXtZ1Lut6mdfa7O3bd+Mw5JyYOYmkJCICsameybG/w8PNToZUm2rT3k8c6nBYh/1+ePfmJqUNUh5S/vrLr4ZyXGz+MM+nD3b+0HzlIYZEIkKtN4hrw4gwGGUc0hCUDOK0Ln/5+OH7p+OsiiJ3hxth1lbrus5lLrW6KgTyMDID5sYjj4dhSGMAXC7Ljz98n5m//vLbbvtdpTV1d8IQghBi4ZQTsYSTaWiLAtKmndy/2X3xxV2An07r3c3hJRYyAJrGUjSw5GaSKCUG5ICotXg4s3SRWveucERdKyJAkFm01rNlgInUKAJU1d2IKQ+ZBbCarQ4AxEkizAySJum+mtCalbXVZq7IgiIggiw05Hx7cxgHCbPLPD/XEsgi9+OUxykfbqdhul/n03J+mmcc999cm5hoTWtVSbQthQGsb9SRuqoWNp+Zbdgz08812i/N0DVHbJPJiXDOQ0qpz0sWvizLupy1LbuB95lub3dDolbZPZhlGofQGuGqrVStiiich0kkRcRaaqvNzAApMLrx9bZt/MXA/ypmgm4fZwGMIBQEHlsaFXbmIEBm4DCwCrVCWWN+rg8feP7w9S6m3715439Y1/fT7T1wusxNQygfhhvcvVvu3rX37ywc3r+hx8f8dE61hZDtRntzu8OQx+dPz8c6X/Ly0GQpVOv504ePXG53+Pbm7rdfVma6whDMfdD7BXYJslyO0Q/R5qYa7su87dvVOyOq1dZUS2tVtbbO4NJmrZgWaxv+7gYe4Qq+1TOHgIiei/jK1utkB0AM8JedSARsnD/sU01Ph5UkOQnnLfZlmKbdYX9zONze3Nzc3Nzc3t18/dUXOaeXYplTGofcIdvY9rrROz4MZGbWtHELmTAopRQIHv5a+D1YWCQRoog4bBO/e3daQxYRzoRdpr1x6dW8qTXz8CAIcGBpqnqNdIzryif6tF5Va63W2a0etbZaa8e+rjdZJ9Nn4czUs/Z6o+Hmqt5Um5IKWSB09ZCF968tCQuhIGZCEB7TMOTMzBDYitbWamkQIJnAI1TDGgLQ1lm4trosc4Aj6ZQBYMiC6rDlrP9Cx49bRvAWZdMTsFtrrfybJ36/ahXwMyDI1byqhntK2POjPVzNam2l1lpbbaqtmzV1t/5tlahmrUWpVFprasx0XRhyICBSVwwTMgS+hAci/IKF+6+8EK7ZSBF9crGwaKaua+v7PWKK7dIGdvZsEgpEsFCP5lbcKVAiMWbiQFyQkGhICZkVwgn75sSUt7hlA4fAcBJHgpQTEeYhCTFAvHAOJOcAdLfAQE6MwRm9ILi5Yhi94PwAQMTTuD/sbqZhTJRCrS1LsHJKxBKIYdWMtYlqW5fLfD661pzk7u4egC/zKmkhEpq5lJWYkJlyIhBkdKsAcKUWm4er6uVy6W0wMZdSbg43u92UepLVNAEAsQBiH5uIUISnceSUUrcJMVPLOxt2dRin4f7uRmQTizPL7e07rJfLun5gZ/sLtIxarubSDlcE0dRKtZVNJJOwpKwex2V9PF9qeB6G/c7omkzdRx43d0BgBsYgB2HMCZm1+rLOz88Px/10f7gdxgMiIrEDeTDi0KV+Y+Jxx5y9tLaWRaPI4MNh4Hzv6rf7+3WxP/zm78ZhfDnHJA/SnfSBAsWCSgszK6UFxpgJCcwbEqYQMnT3Xko766WPV8zswcxXanEEIrpv0JgZhIcZuPVtZafvRLOoDWrDcAyE2oxFk1BOcnPYHXZjuDPh0/OzBQ4p5cQAWsv5dPrw+PDzw8/fj/sv9rd/yMMtbGdfuPd4Se/C5s93gH2870N9r+6q2p9N+EzWex3eNuS9J9Qh9qwcAkAkdLVWl0SwXC7WDsL7MSdGg4iUMiOuN9PlnB8TCkaPseuQQYeizd3MkLaZvXsE/eoQ25JSgK4YBPT3YcjB23ab0MmRepvXaitzuxzbfG7LvMzH8/ODleMAl3djjF/dmg487GrAY7aLRgWPwLqGVs+57Xdx2NGb9/F0xPnkbVlHmd/fVzdr62nBC1VJ6m+GltBr1FixzI9leXavSN4FdZ1jgBvT4PUl5+NHM2+ttVLrutbWTLestv4vqrVq1VbUmmqzno0WHcz36xeAsKW89lFwc1F2N+2RI5vtJr547sLVC70zNlJKKSVJKSXJKQ/juJ/G25vd7eFwe3s7jgORDDnv9odpGCVlCGhW6TNQNtxbWdu6cM4sBACGHqEevtE/mcch280BCelycYecc0eX+k49ALvzq0hCAGJBCJbEwtG0twuJk3BG5IYBoBE9VbwblHpEMAFhZyQ0v5rqvVAYespcba3U1g2oXXU+n0+n0zzPL/cZIjKKUBJOQtxXUb5lI2jTKlgZq6AA6Frqss6qNcAYYRzym9sbQX801doSJwpp1Uqtl8ulL/WFKSYDAncliEQkItMgQ2JhDNd1uZzEjztm9thlc/BNm7l1ji/ZwJu3cl84eDONbuDsL6TreDHgwZ74Ci8J953kCy88PuhMH3c3j+7FEQGIKEwdDg1HBrDmqrEsbV5K7U0PYc4iTAhs5rW1roFBxJyEhREAmUXkyt3kjiXa5gJmfTfjV/jlRYLxOXQREaFmRZUa8FWe1FsOD9DQaF1C0R1DAMEWJ/cut8MWAR6rI8Jul/aepEJ1q00tYsw8YiKhGn60otVcUIgFCVD6NsO8cKIhieSUUhZmQEdwpJ7UkwDJTAFQAJmEyC2qCAmtahWAX5XGiIOkUfLAKbEIEAdTTwXkIInELMzmXsr6/PBpvhxVC0HsdjsPJlk4DSmNIul0AoswNEYXyQJBixDpVYqNSNDzUFTr8/MjEazL5f7Nu9vbm3Eab/a9ZJJsoq7el2ASRkIw7M2NO6VIgDHamKfx9v5W5KrjRUlwsx++nYZ0frP74u6Hj7tjowe3ploN2d0gAoGa2dNlBSgNyAl2u0FkIBILuCxlrg2AxpwxvNa61FbVuyLH1AOQE1qQBlZzaVZpafVS5ufnx4/jXjUSECqgE1OaJKEITwPuD26xPp6fLzHH2zLu5OZ+f293++GtLpLx8Nt3/+vN/mZrYkRu727evn0T6H2L7Obr2lprapaycM6Isc7Vq5ohAFuLACCKjv+7e9NKCMOgqVNWEcChNZ0vpVRTpdagNW0Kzfp6zN3BFFqLVtFdiNADS1uZYZc5Ce+n6f72JgkPSZ6Pp1VtGgemKOvpx+//+F//8T/95c//9PDpp29++7/85vf/l5vbr2DbbG7Zodeh/coQAoJO+drWv10R3Cf+bQx4gUJxaweRibvaC4F6agaiASACR3dRbdbn01CHcIZACiFIgkPiQVC6iB/BAJtqa95aXZa11toF1Z2765vM5FXHD5u49+Xswo45BhAkol7NOtsbQiC8Xubnj+dPH55//uHy9KksZ2trgCZoI60J5h2aoZf5rNbc5nXVn54fTotWv3z1re93efd7ng7ACczC1/CzhbpeJmbZ50vbr74AGdwOtDQ6NuGJMjSwNWA1aEbYDYzjXwMv5acf/mJmrdVWai2leyKYalhEZ9NZNVPtCH8/fsIDAjd/E+pC25f8IepTMVMnfTFypCsx80WYIVeDBuYkLGlTUw/X1ziOh910d7O/Pez3+4MkCQ8mzuMoJBFQa7VLcX+NszNrx+fHTx8/7IYhZ2Eih2iqHkgkIoMhQYSw5JRazmYhKXkEmwAAMXdWwJaIBxsTrav3cCNts0gSyQhI1HpiESJFX6TENQrphfgGv574X14evnlO1lqvYonPawwBcwekkRwdwg28m3cmLYxFKCkJBNZaVAuADQPf3txkETP99PCxlXKsz9a0ua6tzut6Pp9bbYCQRNyNBdU9IBgxEY3CQ2IRBPSqVlY4nyUJEHq4AgBevaoJeoYfSw/7TCJC1OGC1ubLeV3mf+PEv7UBvac3s02Dp5sZVGvq5n2Hvrm5mffM1o6rCHbqR+dbRq/oVZuZzytwv3Ip5RxpI39st18PgFLVrvHp92h3+Y7X+O/P3qR5qIE64fasm7o3jxagEQFh4R0PMgAAmxUVkCjMoDlEOKjw+Ob+5m7aB+OpLuFOBPc3h7thZOJzWZZjK9YInIkxCWJgUw/vkozEzJKYBAlJPjuXro8gAG/Fs4cuiQuHWxC+0q0JKJOMJBlFgCkInaCHT5h6kr6Aaa2dL5eHh0/L5SjUD0ZJCSRpUnMPbW1Z2Zt6qIcRMTB3PQ8R55xTTiJ9+WatFQwv63I+HyUJgJe6hnnOg0i6jm8Qm5AymNAdCLp3k3cBGDO9YLz9s6jpw/q06IPA7XpRK4HWldMRYRDOBAiojkXjaVmL6lm1gY3j0JquZs29uYfbaT43zYloS2uOCETv9GQIatE0zMA40A3AcibmWJdLc1aZCpBGBEvilJhSojFbHutafV3XORZpMWIep2FMdzeyZ727Gd6+2/9+HHbbVexk9SSO/VkI3WYxUzMkCkBCAmAzcEdtvi7VA1KSPHDOKQJrLQCbygiQMdA0avFltVItHFSxVm8Gaj2O2816YDK600t5M7PaSinUo1h205DzfhjH/f4g2sZRMNpy+qhr++5P//WPf/zHj59+BtrXur4+L5vk2KGH7G3oLwTQK5dr26df//FLz5z+AthE0H3lD53rpNZhSOrCMnNwc1WttaxzIg1vTMhEYT1Vs4YrQDCxB4aFWuuJM9qam0EEBiPC/2Di31j9HY9wwug+8MQQoLVFdxQJR/Dl+eHnP//zx+//8vDT9/Ppyb0hQco8pVCuA5YcVUs5XsrsbU26rnZ8jg+P69PpWGr8w/+0//LLlLMxgTeyVaKkpvBsx5QJuORUheuU4e4wGeS7Okba7W7ybkSk5rAaJA/b2IsWv0BaAOSf/+n/CPC+mXfbDDQinBzQPNy2ICOwUMMwcIUAIoDuWdOtUDo5JQg8Upb9fi9JwlqEIU4iMg1jSomTDDmP4zRN07SbhpwlsaSUrhP/MI7TOOacWSQnGbMIoruraavqbnVZ1gBX7UZ7rb7KxmqtP//847/8y3/vK+1xGIhZ3ZnTbjoMSU0qQpi2cCckEGARiIBGARBIfiUjqAZRmDsQXJfZgRg97VeSQGwedUJJIIRDOQIwHBEBN3IfI157xE41c+grfRFmptbUt3iY6C3RK8DcvVmjm/YGhHlYhFerpa3MiXFImDIKAoVVABPxabf/+9///s39GyT+83d/+fTx08PHh3VZ1rVealnXWmtzd0Bk4WI1p96DkWBIgkw0MmXpqY4OWtd5OTMIGtOGi1zXXERMSSQPaRjykCUxIrq2ts7LfDkty8XdPr/PNlkrbDaE/Vfoda0PfZsWnbWo1tS0tc4VNdN1LR2r7IxJVTN3IkyDUGIEYOYsQoTduUxE8nag1LVUQhwRe2np1NAkKSfOIkwEHqZWA4jaJubs6ZZxpZ68PPkB6MEBA1JmCQRt4WpRI2pAd4fugR8B6ADuem7OhowBAOoQ4a0NO/r69ssv3r1p3n4+Pfx8fGDh3335xZe3997gw/Hx4XI21SQyDMkdWJ2Zwgw3GJrRwXvsCDkgoHVpmG38WQAPYKfEGJu2UT0SE78IKwgwBeUQARRHJIRAC+hMJBQ0NTRbVj0dTw+fPi3n0zQlSdkwm1mrrbXWzweA3u9cEeQIDw1wYkopTdOUc2ICQA9VhOCUs0i4lXWptWLgbrfPeRBhJLjKRV/Yv2paW6mlNY8gpgiozWqpfj3L5vX8j//8n396/qcR7h5+On//3V/OTycITyyCKIxMoha1+mL21Orz5ULn46lcen/yNF+KKTBBeGkVwjFnvNad3nK4Wf9qhcH2Yxd755Hfvr29vb/R4pfLvFBTSsAppXEcDlkYyUQKcY1wU28APmNbKFoaxtu7u9/t6Itduj3kd4k3+2E3K2VZlrn7MdRSzUM4EbGWAhGtKubMPBp5KXY516en2TymKd/c7FIaiFJENQc1ZiNmDqDadC1YKtUSEWCO3RbRzFVdzc0wHLtn7XUbHIGhWk9nDVe3tsyXr776klnG3TREnjKBzeen74nLujy0dinlUuv6cl0iwtRaa/0m2Z6nLbnHXhhffSIy234Fr6Kw6//snfqVMgcA3XLbw8N6iBmzIhi4YiR01bJejs9WGNxyFoAopZ4vp/N8KZ3VKwzBalfngc5I7wSbCMTo+u8XoVn/O7spEnVPLcIINAwSHiSp+nJc2+Ui1sS1gT78/MM//ef/8te//PPp+cEippvDsBsFoQJqYLLweV0u5+fT2jDkNhfLqlGLnZ5pt7PjY1yOllnLGs8f6PhxgPJOS3poswx68w7NrdjJCfa3OI7D+7jFfJcPh7s3u5SaxqI9CQk8zFXdxPyzwVKsre5WazHTbqo75MyUqds1YLhbay3Au4nSC8nBPNZmzV66MwIAgxCi/X6apjFck9B+t9sf9reHm5wzEApLHoZpmva7KeeB+6S2IfHc536+eqgRhPfIqVpqVVM16/xBa9parWVdXwq/u6/rcjqfLkhDTrtpTEkAKA/jkEYnrV0jZs1cIQDp6jCASIip0/WzC9JtnphhcACMfZ4IoHAYGAKlgKzuAX1fKsiJUxL3AOLuVwhJJHUd/y+ZlAhI2PcaQ3ZXDTeHCM8pj2NPZ/n893eQPPCV2wKBpVWiIrgmSJkSBtVSWi0eutsNv/vtb37z7W+RBZn+9//9PwXgablcLnNRdQ0MYAB1r+oK2pzHnAkzQOeoAgEkBE5pGnckksaUiK2ah0Un0vfCE8EiOacx5zHnnJNwD7a31mpZ1/pv2vHH9u34RlzUbcWk2lpbi6puMdyb3d72hXggMHcbPOiGUJtf9Its5IrV+2e4/Sbc6T9ug/XhM7AROh7DHZ6OXxv4E4IgZqExUVCUiOYALaIBAGzsXn/9XKHhbuidWYB9XcmG0zC9ublngXGUpZ6T0L//5tv7/d3pPF/aOiQuQClxyhwY2ADRQ7tHCAGQe4ArIPDQqYuA0cXSAI7h4AaBQISALCkNowNhHvLn9xghMfacwS223B08TLWAgLqD6bIsp+P58fG5zCeIfR4RhKr2BdPqqqWspa6tWaLkrB3F6j0BIklK4ziO4yDcjSMUwqGr/zDM1NXWdV3XtdQ150QMV6isXwuAMOgboC4ccjSH2vRzk6hlXf743X/75x/+77f0dnnUp48f62XlgCyShQdhEaka0aqGd22+mT5e5j9/+IjIx2WxcGIKAFB1VwDB13z4gC0g08xDFyiXMnjKSClJGjKxVLXzsjzbYpyHaXeThjTkISe1JbATKokwhbblWfMz4VfjlO7u09d7/mrgcaADwQsYs6k0EBgBEFgIxjyam7baOaduBJ5MrRS/LHG6bIZGxMM0hQi3Jp2j3tRWBg8oq5WlLYu2FrCZb5FZ1Ora3MIjNt857NAe9q0lE3h4rMv68dNDa7WaT+Nk5sPAzIaxLOePAUtZzq3Vpk3dXnrluO7Ot8IfL7wo32TZ1xVff/561X89F7rk27HvCV/yWBGpkwUQ0T1EHAAywzjwfuCE7nU5P5cqxIw6ZIBY1vL0fHw+z0vVFgFdH8Qo1os4bm8gANw7h+BvJ34AhY1s04diq61SIwjXauV40fkCYYRm6HVZno/Hx+PpshaQBDhQvsmHPSZodi5zLOvlcsHTwo405KkG26ZLGtV0vvD5iFmgLnh+4vPzgHazLvjpeKQ8v6MEUD8+zhgx7bLwSFBSqgNr5gaweqweHIhw5UX86tCV337zdSnr0/NjKSuT7HaH+zfvdtOOEIUoMdWyfnp4cLfDzc1umtLm1odrrU/Pp+fj8fn5udbav7swJYT9NN7d3TLB7e3Nt99+/cUXX7x9+zalVNai2jyiG9QQUd99vxZv03XRfruEqalaq7WULhww822oADezWuoyzy+7ZERKwyDDYB6KYGES2PmBOTEzmbmqNi3m1gPmFFpHoZLIQfJOMqUpi9zd3GYh31UM50RuNquUrnVdlW2t4FwqhyFyBnEJQjaMiEAkERlyzmNmJto61m73SIlkSEMgIBECJRFtLTEhwLjfI39W+GN7GrrQxTpBBr00JagJSoZcaUDHeZ7ndSm6CvPXX3/1hz/8Owt4en463NyhyKms52VOImNOO04RcWqlmLaIsGCXFOBIDtQt8AJwP+3fvv1y2h9QsLVyuTzWtYY7BjAhC5NHSjIOwzSM4zCIJCJUs75+u+YTfbYa33j61wiMePl1vDI+e4mMpqZNrQ/9Zmttp8vFzbftDW+CuZ7EsQHxEV0rAZvaNNSslFJr16FE9/3sY0SH9K/xIJRgy+7u7LhOHqzNyTSnhAB27SGu9xgK05B4HHgc0cndncJBAywCYYOJruQkIGDEjV4DQIzg4K4e1kwlyzfv77/12zeTCOHff/N7ICm1BbtMmBLyADwAcIBsxH7CjEiBXWtjApxwZBFEQGTCDIGqAaHaLTgEiSlPCRnTKONufPWWQAgiRyJAs7Duf7xZHFbIaGFoNi/z6XQ6Hk+tXMYxkWQPrrUta7mcl6Zlns/Pp5M7HGhgtlLm1kqpSwftRdIwDOM4ZSHCoB6s2uVQxP1AcvdSSinrOI0cuLVpG38IESGJxODmCtA1weamPbm1f5RS1+9+/PMf//SP74cv45LW0zmaCcqQeMp5GjoZwoiCGXY52W4CwDD/4eHZ3A3Qu0geCYESoVAgOpGThoMjOEGEB0bY2s6fjnIz3tzfC+dafV7KXOK42Me5KMmt2TCMwpATtc6ZdkIYhnwjBZ4/HXkP8ve7vbzZpzcT36ITxmdIDHFKu3G46ZbhUzZCHHJWbZ3ok2gIk7rqusCyYqnQDNWCqvFcU1pzTrVwU5nnrgDqVzaaemvNvXvjM0L36tZwJ0KmIAJi4M0COpgoSRIiQXDXLsp8eLoM47gbx/fvD2/uEaGs65P5upbS1D1+oU7qPbS747WnuBZ0+BzS34hQ4dehEuxlaQrRmgfCVUcefYfUc2CIyMwBKAtNOb27m94cxh17lPl8UWbcTVOotlpOy/rx8fRwXE5FawtmE+ZB8hYOrzovGG6bUxiGWV90v/b9EeHe3KoFRvPW6lrLsszhMKQRHHWuHC6JcspJMB8Ow/3dOL/3emOcYv9G3r5/+803g8Ty+MOicoJ2ASwJA8gaNQuPIhkO9zDttDU/HT0RtJXWeVrKobXhdNafnlSxnNDcyk9/LRRR6no3HdnPh5vju3g/HPbWThEL4ICYIAIJUKgToF4L/++//d35cgqzM5Kw3N/d/+Y3v7093Ho4E+bEl/O5udfabu/ub2/vDoebYRgQaS3l5vFp+OmndSlaNUlmpmmKw2E/jcM4pCR8c7N/+/bNmzd3u91IW/6SaSuq7tYCepF4qROd27kRcPsWV7W2vgHvg5xtqkAz09benr940fJu3kI5hylhMPuQYbdL05Rz5nCvtaylFl0DIpEkFqEYON3lcZeH+2l/kDQajindTAcR1tw7jWiqPOTMVa05OBBxuCcSMGMm34Tk/SQlZGHpu4+/kfFvbBcRMTeRFOaEGO5tqJLl8z/wuvgCsLgOo+AQylFXrAlrogoOS1lrrYAhzN3vzwHGYSAmA1hNq2mnME2E4VAgFF0RQYhT4mHAlJ1obYpYRfQNpjf377/46utxv2tt+fjhh+fHj/P81Mnb3TUxpzTmPI7jmDNJ6vIMVkPGwF8Q4v7m1cdtwm2HFOEb67qqllJaa97xXbO11GUtfRVChLIRaDwikJCCMcLCYDtHtsarWyt2rjsF0TVTo3+jqtZ5Z31pahYWwV0jDL3xt3BURETcXAReLx+IUM6UEqWEQWDiTLBxAnrBujYJ1yCvF4pI/wGAzOb2dD4+nY5fvr293x3+/tvfCcA4jM/retTL2ReXYCSkQHKUIAhKsRklB9g2/Xb3dsKQgA5/dP8W6zMqQngA9WyzRMgsiV9uMY+ora1rYSJwV2vWYw0RiKKnEZj7PM/H0/F8OVsrrWlSU9N1rctSLsvS2nqZ58uyAGDKBZFVa9PatCVOiCySul5PEgsGhfb8C4NQ7zZdpKqlruu6Tq1KIrwKQ6HjvIHMKM5JxMOaWn844LN7zN3nZXk+niQNvAxtVTQSopwkZ0kiXUMOYUOiL+9u7g8HRL6s9cPj07qshk4EKXVegwthEiQiIElMWxCmE3QiDzqhjjLsp/0wTGawFCsNitpaq5GpNndFNKQgIfREgASRxXEu81Fpp76w+JBpzDgExueFP+X85Zff/Pa3f8DemwEwUxL28KUsa2m1+fG41nK8XHQtpgpISRCZKBzWtWlzszBHCFKjsnZ6difMGAKxdJbANl69zNmEOCTejWM3BBKWIeUkKQu561KXpZVl9Xkp2mwYcC1TbXUtbGZN0YzdJV6hi35pwt35s5Hmusr/VeHvNvqduAfXso/R45l7/ksPRoFAQgwiAGBizEK0G+X2MNzfjDejCKg392Yo7NnKGu3cni7L8ThflrY2M+/mAQ7giCDMibtB4XXF8Bkb6+Vtm7XT86fLcmbCcG9aayvLuoLDkEcM9KKCiENSEyKY20rTuHv3lt1acIscMqbdbRacj8cGY4VxsXKp7o5jJMBA9JR9jEhjs2iltVJRG6nnovx80adzO5UwRDyFtXg6ClkMQjpZ9jm07e65LI+1PEs7B02AiLgJ5t3q5x9H/vD7Pzw+PSzzEgFMfHdz/+3Xv7m7u5/LHG6S2Ink40PR0CCnNOxuD4eblJI2HccbN/jpxw8Lr9O0uznsDzf7ccwsHO4RaNqW5fLhg333l7U1tc6yXYuaYpdmaXO3iO5+zFtjCEiMBMSEZlbXPr+1rnqP2CJtzPTd11+p6ktNZeGUOCgy+27Euxt5ez/lPBaF86WcL8fzvFSrQJTHcU/Tnsb30+GL+7e3w5QBx8CDwcSy48mIHwnO7qau4rHfEVgGAwCEGN0n1xq6gtE6X9altd4TsDAYAtTQpt1y2MNfbIhf/20zM+hvHbbb7frq1VDd0BACDGJzOvAIMzLlqAKrYEbD2pq75zEL07JcTqcnZillLmUprRoAEGWkbMGtuWvyJgTdnmN3ezuNOwYyt3VdS1OAdH9vadh/+dVvfvt3/y4Jfvj5h++/+9Nf/vzflrogEUCwSE5pHMdpGHLOLOyIwKweKWWR7ir0t4I4f2WYbIX2avtjVpuWUub+TV6/qVZVm0ZAqS0AEns3bwUiQSJ2MwdE65Z54UQkkiCiUzG6SPKlrez2Jk5u151Ca9ZEWZmQZDNcou4nZ9ajjPSKSW51OyXMmZKQEAK7iqcUkkEboUNnRW2cV0Em5BeL38CNGsesZn/96QdkkEx//+233775AiC+f/z4p8cf//nprz8tn1ZQIHINbyaMRMi584jdFbqovZ+M4OSNVY0FUuqNUT9tg7YmK7xHsoI6vMKwbr6c5+fHZ4oO71cPR+Y8Dfvb3bgfJUtRu8zz8Xy6zAuGqrm5Fa3LWpal3y9rqbVqjYBlnd0dqDtMRUrd8SIByAZ4YSAQkSNiuJlpM49gpFZKWctayipCm1NVt+vGvsZhIkxJutIIApB+mY9CnPKN8I1W8YXAhLsSJFEaBJnqWtfa3PSQ01f3b1MaAvh5Wadp99PD46fjU4RPOWXBBsYQQ+aUZIItESZxEhkImbuCy+tuTO/u7/bTPoCqeXMKQEnMLEMiIXdbI3gYBo6UrQaBkEKD5WHFgcpJvQSNIQSxIULbp5mm6R/+4X8+TEmrAjgjSc/8EiaR0/nyl7/8+M9//Ev969P5NDcDjxiHgXnMQszoaqU18xCWcRoBgGkBCNUGrozAhN133M0NAxkDAtADHRGmYfrq3f3t/pCZB+YkaUhpmgZJ5BTnUj58On16eH56fj6fl+O5nC52WxIAu4/uk/vi0bM9+zkG19G9B11uMjnbRqreqV8TQMLDA9yucH5v1rEn68Am2+uoosOVozemdLNL79/cvLsdDyMncquVAcacUs6JpTR9ejo/nOZ5rk3d1AM2TlqrVa37qBGzMCv4pijAax7Py+FVyvr993/6+PGHPlL06BJ3Z2aLISLWywXMzykzYKt1rcUk9m9vD8zzag+fLuv5tJ5PmKmtxZphoDV/+PRkHu/f3A9T5oQJSBwpQTAEAYhjEGVUsIfj0/PZgCgPY29RGG7DUcu+IgiqN7N6aetxXR55fea8ZyYCN6vrXMDd37x5Lfy3d28t4HBzV5oi8rg7jNNhGHctwKxSIkwJhIOoeTQPQw5KzoKBeZym/eHm/s4J9vvd3e3d23dvhOmyXCJCJAPS+TKva9FW+5VWbeuyaqu+mfvYdfODxLwlHfZlOFJm9ojatOnm7UdEvX7Y1Uz588KyqQbAmUMkcooxoyRo6mptretc5qot5TzxbjeMN7v9zeF2f7gb8hDm1tTA3cGXVsmfhR+RaogimgBwIIEQMAEDDOQZfADF5bwi6unpPF+0ace2QuJzuDuuMtVuceg9lKe17pSxec+09jms1LSVVh0CCaxDzh7goBFBSJAYUoKVgtycWaZpJyzHp8ePP0/jbj9fTus6qzYESMQZKDskdQ9PEBkpmFMext1uGg8E5KYBwQCQBkgjT7vx7v7Nl1/fHHa727thtwvyRcvj8am0Qj0VK+dhGLoPsCOiWTPjdE0x+h++XhdOfjX7W0s5z8uyLPOytLb5HxBgeKg5InQTSWNj2uxhka72HxvxhyKuhFu8xmYzo1n/UplQhFMfPlP3mCcAsAg1J3Zw542Usa0hAcL0F/cYM+x3tN9TSpAkZAB0mAfsvsDh3dqgH+OfUSG7SQVgeCAgI0bE6XL6+THdfjrc3t588+VvAOLD5fTdw08/nj48LydvxMiG3gAc0NFVO72sJ9ohAm8Up+LRtJmJUGJBwi4ljfCgMDcDaKbuGqgOr4zLiDA3dRNiYCJKxJByGg/Tzf3NdLMn5rau8zxfLpfSKkPUprTW86rPp/l4Ps3zxaM1bX18K610f5VuwqXupWlpbWfmER1+CbtKOCM2lyIzC5A1DetYa8lJunPvtvEl2gzUA65KDCLrRO/XQxkDMZKrrIvD3LxiUnb25u1cl4peFl2rAuEuD+9uD/txD0CHaQpCjzgvl7WWMHfAMAf0CGeCPMiQ0ygpSxYZGRJuOnQbc7q/OezHgZkBmJmHzLeCxHzIkinca/gwppxQEjgQD4QCAKvrWddjLReFA5JwB8BePkse8tfffD2I17W4OxOJkIiklGUYf/r50/ffP5fitZo75JSoy2oSJUY3nS9zU4UI5Oj3c0c+cDfsYRjywCwRYObrUlTViRwAKBCDKBLjYTe+u7vdT9M+5yHlIUnOSQamzJfadruTpKGUgmhAkob9zd2Xw5Acdiy3+8Pdl198k3N+ucs6Hytgu15dI/eCz/WHq9f+q5uGx5Ylx0g9DxQgUPj/xdifPVlyJWmemC5nseVuvsQCJBLIrKysXmcoQyHngQ/86/nABwpFOD3TM1PVVVlIJJbY3P1uZnYWVeXDMfcIZFW3tEkIBHCER9zr1+yco6rf9/tc88ivJFHQ4HDT82HX3d2M9/txOziGiiJgQkTBMxOVnKc5T1Oe5rIsVat659j5rgtgVGuWKlWxMVTWY8oX/IAvL5FyPH769PFn5zw7dk04iuQcKpHIcr0eQVTDSMrpupRSRdR5N/jec13KBetC15GK68pFdFZZJllinatBpNI5z+yNIYmwK+QEfeFYgAxcEeBauZbKLqNKMCCASGhEntATeCJHClqkXNNypOsDpg5sNinX8/nDh4f99vbu7r7vV/OIU3LgQhz2Q1YzJNdNqcI1LbnZh3QpVRueV+qc8zUt4Bzm5i0rynz79vV42DLR0I/j7gbVplTArBv37N3xvHTRv77b913QmnNK8zQ1fE3T0hugmoI1jRJaE5qKmJkjBAUm33XWshodkao+E2/Kdrv9HAixrrCNj6qNJZPSIoqlBlWpJtWkmHRMr4bdm/3ddntw/XhkfjQtaIza1zzOdTfpBPzPg3uMnfEGnCcENmTC4KkLLgYXowseNwQxz/5wCB9++fMPf16OR2kBErEz1bX4sybzbkSaKrUUrTnnZVmWeRGtyzxfzudpmj9rHUxTXuZlqhKJUZ6ZxaCIQkrIVpxlB7PHgIhd1203O8fu44f3JvXm9uZ8elqmSUvxSJ6cBwpAHYICZ9BCWNeEixi7MThPSLvtjgm72A83Nzh01XMm1NhtX73G4DCQMjydnlLOaVmAyIUQYvTeEbMSQq2uVnaMX/BH/3rHb9ytxjO05iapS86XaXp8fDpfr/OylFJNjRCZneN1h1bVUopI21UbsxyJnm21DU6BqKolJ1EtRUqVUmopNZXChEPnhz50XYwxeB/YOQUkZkAU0yIKVs2tEbDPYwF9SYdYnxaHhz3dHABUg7dhY57p8ohXp4gV0YiQeD19qAGaMSK09B5ERUXDxls3hlmW98ePN8ebb3NBwk/T+dP54Xw+zktiGYCoKEgVSKAmRYopEAJxcBCBoGBWtfmayaqoOO+YGAmWeREtzGZkWbQpG8AUSFoC2fq4ELre9/sh9r2Pnhy74LouxC7EPobggDCnNE3TMi9V1NCWlHPVT+fr4+ny9HTKOXuHatpg5CLF1D5TnpZ0PB3ZUd/7QQMyIUM7WbfgYhWppcxJqQiz77uUU6mhkpkRrht/QzI1Usgz1Ufa8eeLWZopyKLLtSxTsQvh1QflQI7wOi8FLYB65tB1oRv64DmwMQGSV9ylUj48fFpSul5mBkOt3isysLfeuaGnMVJsCRBSFYCYvItD3227fuy64JvB0MUOD6RMQESBDKSA1GjYMTsGZO1YeoaeHVS+HvPlmOEOMDrD1uReL8c8jp3kMUUnUhnRedd1Hbug5kv99NMvH3766UPO0nfddrvrh95Hx0yEuiyLNLshAhNpraYqtXjn9rtxO272+wMzXy7X8/ny9Hic50XVDBVc86C3hBxzjnbb8Xa7HfvBEYkWI+DI1EXygyGfL2ewfDjs37x9+8e/+7f7/f7h4+P5fJ6W893rbzfj5uVwWWtJKZUCX6poPx89RcyMV1MYtCm6gRHw6qtWMgAmcj4QYdEiJibiHGz7eH+z/erNze1+O/aeQMqSq2TPFLxzjmutp9P1PJVcUc2VsgDgZjt2fR9dqEVK0Xkp85KmOZWc5aU3yLRSQr/kxJhqTSqJPDEjgYEhIauWZb7mfLxePjmgLb/2Fq1mPV/np6Mnivc3HUBND4S0vUDfxa1eN3D29QHhxIMKu82oNFiiHktMGYMv7Ivr5rgpJYnS1QD7bp9nmKez1qXvO2IsPAHgodfN4CJi1zlmUM0lnfT0IVeYZ7ycHt/98vOf/vT917/5/R/++G9vbu7W26wAoo/D7mAcEDCEsGQtMhepBpWKXuf5Os9LTux6ABKF8ly8mho6N+42sQ9o4Fw0I1FF8D7wsNkj2vF8llrw/hBjAIeOAK06BlUwIvLBgESqKZJrKtMskqVWMyPnGdg5wlWBvS7zXSRCnP4V3TiCERiJQMk6cw0u+eCychEVFUCL3m26/jDstpuDH7c1dAtCMVQzYl68S9nEZCr5QZeHItJFMu8Zg0MPrhKb9xojdB4DOybfD4e+V3bXOZvCfD3bcxvjy01PG3iytblM2sm31lpqzjmX8ityn6rmkpe0qAG5Rhk3UwBFE1QEBs/m2NA8BBdjjJtx472/nM8qRbScTk+1ZIfYsTO2dfC+5sRgG75G3/X9uN3shmGMPjKvOdPDdgfeL6LHefFDGfo47G9eoy55ef/hQxF7eviIoC6EtlsQczMeOldaeNdfg6H/K9c6zV0r/1pbAVjEVAWxoXs8uiZEt8+Gn7YBtJ3/OTfoWSkMYM+KJjMxJhq6MHbx9c3usB1ijCF4ZDagJFoEirY/FozIgwKvcm59bph/6X9FBGbwDgCUPbJrsvl2AEFCI1q/0t4erE6kpjhAAyNbGwCCWrSel+s5TQXBMwnWLGmel2nKQaM4MyRgUClVikgFBMfgPZMzRNRVMp1RUaGqOZ4RCZaUzMR7JIcAa1+NUZ0z5z7HJBFTtxk2d7ths+n6nkPwwQXPziE3+6bUnHJTXZiBAqQimuV0uZzO5+s0Sy3VEYI2T2otFYGJAyIZQC1qVRFh6EIMPnaeyRuiKappAxiXUuelIIn3YVmHBpnBG5GaAAK1lCZUAFSk1jF+1nB8MTZWy4ssU4EFLRNmNLOlmmiteUKLnd9uBk/ek3OtzzzE6AIK2Ca2Qb7lVE0VrTqxAkYOhzGKVhWQNvsRUgFENjPPVLyvjj0FcuScY+LgocU0cgjBu465dxQZ0CyTmCWQTGI16fUpXU9JBQAIQPTLkh/MsABmogpQCZEduqAq5XyeP3z49O79h6enEyKNY7fdjsPQOd9MnkCAlxDW3qFZyZkQY4xD39/c7G4P+/3+QEin8/mT91YrgdUqhkbBIYCUzLTyr3It1ZScc95psdoak2ZihIzeM7Efx263G29vD/d3d73v5sO+lGncv4nPFb+q5pxzTu3lvTT24bnKf2nctvu5lWdGsAbdqqF9MScz01JAa+doO4a7w+b1zeZ22206cihaK1rLv3COSaTMcz6fL9dZCkZRJOLo/X6/HceNI05LqWJLqvUypZRyzqWKNm0ONhX5r1T9qpLSlJar98ytE1lRmYkL8dXjdXSzN+xsCmJBqy+LXY5OZMPFMXA5IdNuwc4i1qWXE+iRcYodiEMfCkQoYeRM81KcoxgkdDX0YmDIC5L3pD0bcR1dve8sEPoJzeywLzGaVWTHzGBW03yu8/vj+XqdoOTp+PTh+PRuv9+rfMbeuKRmHLY3d+P+1jkvtZ7PU7pe1cRAievpeP708UFqHYfbEHvvI7sAJkgKZKZKFcnQkTeF0+msAoRu6Deb3U5qKjXnVE7nk2MLDg0EYHV3MLL3HpFSMkVjYq1pns7X82VeZgAehl3nO+ecmaVaSkklZULo+67UejqeTqezfEGIM0VVUEETmIqAGFoNXTXSXK2KOKJNP9xtD5txS904uy6zzwwMGCs48hB6jNVcgvPVnU+YarIejIGRzfvAhlgMUMUqVYBEFj13m5uDC98S90P/yw/fXy/nRnW0FVW3NrZ0bWd99rQ/7yb4Ar19fvSt1pJKBmIGVlBVMCUTsAJKwJAaJ4XBRdd5F4Z+iD6k+elySYByOj4RWB9DDiFXlaILAAIqwKJQjZwPQz/utvvbm7v9/mYcNn0XDWSaryFGRbrMy4ePj2J4e7Mfous2u1dvf/vHf/s/+tj/8MM/T5cn9mAE5NB5BnRiQM4RMbRU6l8V/QoN19mK/ka0BkA0RxgdDyGUoQczTz77ItIyDGi11K/T9zVzQURWfmAb+YjUUmstpRZACCEgooiAISN2fXfY9K9v9t++eXW3G4N3AJhF5yrnXE9zOl+nJVeBLMSmHgLQGt3RpEe/OsCpwLzYkq3bKHhaqk4L5gWl0GqdA0REdi2H0xo85PkdoFoT3xMxGAKa1apqFqPru7DZhBhdrTrNJVkOwVsIaJaXuZQslhkpeDRllQUZK2Y1I2UwNChqlbIBQs7Z1FQdCzvHgIBozDpE6OPnsSU7N+53N+luHDddP7CPhABazMrzBL6UnBsOeaUcihbRtOSUijRSdSmqIpobZ4LQB49ErpE9E0xaS2BmohgcbUckAnJatYjlaqlomhNgicGn1Ocl5S46QnCsJgZgLfua0AAVmvlbmlCZ6PM9pmZzystSXI2kVFVRYamYzZYpebIubp1nciyql2lhoNtDQKLzPINk02pqCiSGpRgUcVUVKHQJwRJBIEZkMNamTTS6hJima9psbw67vod2lzqgyK7r+th33dB1sesCE7YjaJnyfF2mlEoFPD8u1+OiIgBmoIb6MgCTWqbrw+nyXqugATOZOaTlcqnff//wX/7LP3348DGXvNvuxnHoOs/OzIopkfOOOYZQcqslqoj0fX93e3t3e3N7s9tuxi5GMAzeEWDNCU1Tzmbouw4Rclocu1zq4/lSpC4pV6BxHMB0Kel4PU5pyRVPp8uyTOPGhYDBA9hsdfJUzUEk7gK/TGHMLOecUo5rrsS69wNAO8S3Xb+UYit/woARgAxV1ciEkDx7BJS6qNRlnoKn/e32zd3u7av9fgyBRJZLESUE7zjErovBpJ6Pl/Npvl6nOUMGqOS6vu82w/3NYRg3CLB0FcilYg9P51LqsixFtbHizUDEWhD8iwJRRK6Xy/l89t6BdVUKmBFx3+lhZ2Pvfb/hCpSMa3FAgrgFoLJsH0tg3cJC7Dthzmxp8ulscHacImNhBfDEQOMuBD5dTsDWddZHCB6lgHOVKYFMEWG/9/db/PYuRuIbIjHp9yZYn44oBkgBTNP1eJ6WH3+2quH2bn9/vwF78/VvXsf4MoIB93Q6twY7ApnanMrD8TRdrmqKpkTlfH68nC5mNl+X6TI5vsypiFUAI0DQIlKRwDmuxZYlSbU+xvaRlzKnvICUaZmG7JkDICgjcJO/O+9dC7xRETVAUzJltMhIxL3nEJ13TlVVskpOy9UxjoP3DI6MUL8UxTVuiRmJYFJDNWIVgtBxCLQft57wbnO43d9vd7c0bGvsmDiixpQ3l4mlJkeIpJEgU3jKPlt156ICTFA8YlB1pbiSfIyxsiOkFLj2nkC6btjvDtP2EU2VgV3DHqC1Xy9NbkYH7D274F0VBDQnawoffj73t6RDUQFFtbbxg1ZoXc6CtVApyOIa1GXl0JpZzul81uv1DKpdCLDbJh/Skmqui4AaKTrfd8Ph5ubm9mZ/c7O/PRxux822i7HW3FRvD58ellQen873x1P95uvNEPMynZ8e5wxh2L9++8183eZ0RM3AjeKPrdZfS7H/ZsXfQrDX2gYhOLYuImxjCEufcy65CZAVaF3NmHnFaTUaOSKwYzOopRogiiFJ82Mxu1V9gxSc2479/c3u9f3N/c3hZtP6lrqUaqkkAYeFAOE5XfLZy++JeVVY/rrdpwrLDNMEFEAMSobpAqWtAEQGz9oCanWpETy/FiRARDJaYcdAaACoqibioI7e347d/Xb80A91NgTvve98ADQjB1SdeUfcuc776L0jB4qsZqYka4NUS01mUEo1xfaLgJHARJA1eo7+c52MTL6Pcejj0IXYed8jWM0gVcBERWqpJTeqgogJt1xmImLXerNm0Mjez1otIUSyTGQGhgAKkFJ9Op7ZOXacS44xMqJqraWmIjnXZUmAEBeflqXkVHOujKssDJvh3BDIcOUpgTb+86/vKLVcZMkaKnBBFQWxawIGKGKEqxNfwLJoSiU4rwBD9PvtsBt71+wbSFUxK2pjrXMdLhlNF7TA5L0nZDAya5znhJoRinNqUH2oANGTR+iDhy5QdBicOVdVy6LLJU/n+TKnmQnNYDpdjg9Pl+lpu9tXAIDOcd94i1XrZTqezh/BoCn7SnW5zp8+zd//8NOPP/08TTMSeM8xOueRmyQaVgE9wAvpFtQUELuh74eBmFSk1OqIuxi3m81ls6kl80xm2PUDImbnWqtyXlJOKeeqQMM4EGOu+Xg9zcuSs87TvKR5sx1CIOR6nR4d1ZpBSjXLXHZm8rzxw7OUj1X/lVq/7f3rv8MqjW09QARgMPY+xi46D2BaAQXH3r+527252x+2fe9MyyJa0Iic89477xQslzKnNKdURUu1a5kxxN12v91vx74LnnMuteaUUmtoNdmBqhLS80y2AUXqS82Pz0pPU2h9WlUh9h6Va41Oo5qlOj2c7MpddXxd+ly4pFCS5+pcRfPuosgkeeJliZbVGQFnwgoKWp3ppBXyI4Sn3tex4+icMHYRvTeRBRR3Hd2N/iZyQDRHScVRzWBIHr0PXey6YESTlTxdRYO7H7oxMu1e3e9eQq0AwP348y/tTlG1XOp8nY+nU5oXE0EVwpKXa56zmX16/zFd68P2zN4XLUgWg+8ixQhd55lRBVsaRyY8X05ZJ6lLWqbgsUgpWoUiMVn1AMTkPHvvPRPHEGvN0/VKoJsuDo3r45wLkdgRYc6ZCcC05uwcDwMwMWB/e+hXtCisDcGX/VWEMyBXcEBjF3exe7Pb73x3P950466OmxIH8h0YuJT6adr++LNeT8eAaRzkcKtOO6uxZJke53xVpOqwzOQDk3MhdGM3BBcB6IL6QMWRdd5htcNuHx3Ndeli8O65DU5s6FryjkfHbMiWRRDJvDCSiMTYfZ5ZPoeKWKsIFPXZwaRqis3I3hKl16yJZo9xTKaQ05KWWUW6rtvt9lnk4XS+TlOaigFwjONud/fq1f3d67ub+/3uMI7bGHoiFNGa6/Hp6Xz9UQFjv/3666+xpODcn/7pHz9+/JBz3mziN7958+rN/acPf54uD1UWrcUH9+KiW410X2z99sWvFQqr9hxMD47ZDW4cehEpuaZclpybAckM2Dn2LnpPjlqy37pcGNQiuZZc2vPZfhitlQAKykjR+e3Y7/a70A1Z8TwXIhS1VOqUynVJORcAbO3NKlJSSZxKDI05Tc/WwS9GMLBMeD21ST3Oc82zlcrEwA5NiZiQqeUZMfKLla8leFOD2SK20RUBmYLWCuncb/CroZ9vb+vlcuM70wDYUYhiNpMWCWTg2XVxjDHGuPJqG0RrKss101yXXGoVlWoAoLJqQpBVRMypI+f5BX628lEELIuwCqtSa7ca2GeSUpVSRHKu2bND5sh+s9EikKqUmhgIgawxGBUBoEhlNed88DGEyOwuc60fHlPOT7thu9l0XWQiMBCRnPOSZzOLi09pKTnVUoqj1hdCBCUENLTVzrNmXqipruq/zxuMQq6gC9ACJCZVq6ln18XO+1it5pp8jUqgtU6lLDnvduNvvn47ifX/23/RKhVRkJEdAFXJc5LLtYBq7zAG6pCCJ/ZAaFjVIGct15TdpVabYuxr15ENIVQxVoNaCnGg0GfNj8vxw/Xp4XrMkjdjKMTL+fTpw08fPv253zJRF/gQ4hbIA4Bovczn4+Vh1fI3z725dx8uP//8/vHpiERdF4kNQBDZs3MumNq8pJSWWrNaJUYOTtDQkSJOeTlfjow49sNmHLfbrQ++7/o0jGgEAH3fEVH2rrXKROqcc5rLdZp9DOTIiMywVE1LzsuUczKL3oNaev/pp+M5oDkTqZJuwd/W0sHLJ9N4wNIsci8a/pfJ3fPwjpoaSsxaVhwCIIPv/H7bjX0PqqDVJO7G/rdf3R+2A1mWkmpOjND1XYwxdFFMzpfrPM+piCETg0G5zhcPOo6vD9vRMaZlOh7PHx9P794/fvx0vpznWgURHfOanW32Mop9efad8zeH2+XyxM6JSi0mqqSSSfMlL9OUl/NynD7+XOoFeguj6rbOg87FrmpZSGAmXhIyNTypAToXou8BvRnJkgp+mufz9envAz90frvtxg6jEo+9hShFllqUESKRq2pFl+N8yZlrleiNtt2wHbfj/rB1fsekH0eZEwaq0Tm3C7tdcO5zYemO52NTR4nosqRlXqZlKSlprlAKWpYySy5mNutFsqVUyblqhRzGzpXO1YFrccucpOCyZBN0jlOSOZ1EUqml6zoOwXW97wdyrBRU1BE7coF9gy+3AGYmcn1HaK7p870HIjWFZpyqQaUjxBaUFT1H/ysdma2ARzNEBQzkh2642xy+urm/HTYbpB3FnR/Id2d0WcGqoaLPEJbazXM+PlUttNtgHCLFPvReWBnPIF7FFfW10oLI7H0c4uI5GGCVUuuF0LjrI+OgWRTmAt1Sh/MVfQ3L7GpmEwIFQgJu2SJdEBBQrCaSc+fDr6hqX7QyVsKgvQTirv4baIpzco6da5GQ3geVfL1O0/VackKw2AWHOINqcLUXZt+Nm91uf3u4OewOfd+HEFr4IRGa6Thuc87Xac45m10fP338/k9/SvP8n/6X//T+w3tk+vbbr968vb97dROCnU/9w8P7vCxiIC/NOvrVxPJfXo23o6poigiM6IicY8ccnOti6KWTqlUVANuUMXjPjpnaBmON8JNLTSstrIq2NMhG8hdtRwpidqEaXZOIzEdEQBPRIjKncl3mUsUQXwijqlpqXXJi5j5G7x238K6XG8xACuQFyJkhTjPUDKirv9/U0FFDHRCSa5hBXE+igEDtSNCORoyIBEYqomXx2t0FL7sDv06nYavCak7YF8VUt6LKSI5cCJ133ntafxoGSeSa56fl/DAdP55P52mpKit0BlVYcYWnaSm1lF+BlRRArKF7RJ22wtpeGIiitdY2RxERx46d976LUWJMzjExAZABkbnVc2JmJgZA6FtwqgHOy1JqbulUUnUchwaZUNVSS8qLmeYcc0612TfrszGiHVP4y7nRCyfhVxIaZux6F4JLZ9BZXFFSg2IxUPAkiKkWSrMyeY5QLJY6LcnUDrvd27t0v99v+4djKkWF2SGSFEHVkiWjcSBGqN7YKxFgY/uAAUBFmAUtlQozQEeYXLRBfceOYqAOJdRTvvzw8O6n9++fjieruNvszfuLFZS8LI+XaSSJQ4TOvWpedzVLOU1p9sTs2AE3wMvj0+njp8enp1ND7pSScsYY2cykainlcr5cLtd5notUYkYmECpSH4+n8/WSrlcC3G7Gw36fxZioqiEyIKmISFWjtgurSM51nlMVucxXF33oI/tI6ErW6+WCWHfb7vZuM24Dslym07QgYTDTWmY/vpJf47phRZF9Nse3m/AL9sbz4LMVNqKmQqhqiFo9SefNEQfnouP92N/u+s5TSSKqjOid62L0IRpgzvUyzyllx54865IUgAMPm+7mZnd7u1OAy+VaSr6czw8fHx6fLqVAywNsqDQztX8tnU9ELpfp6enk2CtozgXQQqAKsthCOtOSLo/p46c0nTTqsiW7D1U4i2ZvSQ2gMkoGx0DW2lbCrKEzDFjUUsr1HcP5zf28+RruDryJ0cPGg990OA6Z/aVQEjWtiOZAtCy0LIAe1LA6CoAKQMxD36nQ3aFeZ+kjxWDoXT/4L/cXJ5IAAAxFzDQTadexI19UatVnIXw2A6BqUrUWICM0BDOpaSml1Co5LQXMdWE7dFvAqGDLvIgmJHCh32xvdjf3m+2GmcNQtSoBMpJD1irLdM3FDJldcExoKiWbafNpiZgJMce+5+C7FsySa06L/IvYd4RWF4ApU+z739y+/tu3v/3jm69f92OXq89CSWQqC5SKiXBBQxS1KrXz18Djp7OvFLc1bHZh8zr18huUxYRVrWTNk2YBB2TJSSUkU9CyyHK2WgDJEUUmh6hV2Ncw/5SD22ippoqYQBRFkDyQM+ysItaFlRz54N2XyitYwdCrYRletn4AgBZcAcRtAOWC5+Abdiu4vpY8X9P5eF7mOaeUckDvu+B96JyLXTdudvuhH2MI0QdEUKvEFqMPMY7bcbPbHO5u7169WdIiYimlf/qnf/rxL3/5P/7z//bxwwdinK9/8913X71+c/Pd7/8upd/8/T/85w/v39dSUk5FSmkUcLZ/wYgEfFm/W45Wi9lRATNC8EyOuTmXhtB579n5dvhzTL418GGV20vzQ9aack255iq51lqtSq1Fa62itbXu51xSOSORI1zjKkSq1FzykjIS9rELvqVIsQGUeTmeTwC22+7GsY8+fHmDoQEpQLWSVRHa/AV0pawAozEi4xrV0M4/uDoOWy+C1sEMgUNjBmBTqCVDqVvgrt++eu3KbQFFMUrGBViRWyZS6xQ0rh0Re++ZHRLMNT9O5x+ePvwfv/ylykNNc6liDCpWS0EVQC1Fzuf5fJ7rM2Pb1kelrb6yJg0QEpIgwqocrCJZpZgqIgcfnY9MMyEwEpMDA21OPWguOBFt4JqVLGwGZlVURbgWraVIFYuto2titdasIq3JX2otWp06FKI2VEe2L24lw/U5aI7Gl8sHvrsfDrf9Lx/zPKmva8/YQIIThFqxJqnXkgPHnrshhGlZciqB6Haz/e1Xb3/5dE6/vM9zasNHQ8cg3MaFBtQa0Gam1RCIlB0G7x1zxZpMUTKWLJbAW7/vBr/rbsew6WZbPj2d/vOP3//4wy8+082w2+26YbudwTb7rXeSl2M+au1oM3znPbxsftICa6VB7iwtcr0sx+P1eDynlE2lFDQrXR8JS1qmaZofj0/zPNdagSh21Dqg58vy6emHWotVCc5tNuPTaTpdlz52WiUtdVpyyeW6LIQrLglUc63LMqupN8fRh9C5EGrWktPlctxuw3e//+6Pf/fm5nZwAUqVWqH5WXNNWbL+KtHuc4vpmdvzLHL6fCyw57we0KZ5aCueipQlzWcJNoz9YTPc7rZjF4lFazapTBj9EELsut4Ar/N0nq/TkgBpHEZy+vA4Cdjdq9v7t69ev72/uTmogWN6eDia6rws87wAOgBuXURZU4Ophdc8QyQBAKZp/vv/4+//z//9P+0P+77vgbCL3XbjiullmlUWL7ZkfxU6SpFUB6wJa0LZkw3GrEzm1To0RoAKMqVSCYkDUiCtVpZUH7av6h/+h/ubv6H9PcQQJY8s/dj5/baMe1eX45wv1wUVOmZWggpkUkrBJFWuc3/UOPrNcDv2/Zt7d12EO2UPGPjXdSW4b7561e4wUZBmyQWruaTLVOZZy5yWy/XiS6mgHgmJBFeEMmilakpalmU+ns4E0d307QOtVaZprjX56FPWJem0GHoLgQwiOmRqfDsGLlTBG7ILaOoZTes8XaVWQzIzQHDeheAAAFSllJSXnBiRuu4z356ZN8PmZn+oUlSKlvR62PzN9uZvu+1vze/nCueTXSdZkpUa1DyAA26NSpEF0uSljqX2mrvztcMucgTmW8KKjKqKWE1VBBwZGtSqklXE0mzzpDk3FHHvfSBCAWDRVGaPxNoxjt55gKJ1MlTmJmIvtWRJueZcs/w6nQ+oVYgELQ4CDMyooQyI2iTbAIDQBc/ei9mS8lKX89Pp48eHp8djTkmkLstCqoDknQ/exxC8cwhYUq45g11TSmZWVcZx64JXU+fcuBlC9CJ6fKrTdL1czwAWu+CZuhAduy4Od/dfqdXTZa7Cx4eP85Laz11R1/idf/V6ljqYQRGppdQqYEIA3nGMvu/i4H0Ivh+G4AM5doSOmnMfzUxUi9SccwvNaQehVSmPAE0UJu0xbgPo3FZRfRYDrjY9qc4577zzjluOHYABpJQAQAY1awb8X7Uvmvi4Vm3ZXLA6LriV8cKIjMgtRwxoVXHSy8b/ooNoRtZadUl1mlNOuTccfNyxW/HwBkmxAht5ZE/OA6KoFrFcDIljDN6xIytmt+MmhHAtacl1OZdUq6I1awKKIgOaTSbzXFX/xUezxheIGj5zldauTEtNMLOW8+OcDyF0/bAVQ/RDP12X6zLPKS2lFiA1BTUBNEMxFAMBaIFq5FxLP2ogv9AYN2pWa1WptZbnX7XWys8lPyo2QHhDt4AarBCtX13scHcIu4N7z7lU0YwE4BxW0SVXNWMDx+BD6Vw1D3OK07LM85LnxQG8vrl5e3/3/vFpzrl35BwhOwZ0BJEoIHsgbk22aoAN5E/IbMxVFdGAkaML3RC3o9+NbtvDJi7OPlyOP14+/nh6eHc+7STuu804dvc3h0Ku24+dc1irLcUwwxefy3N7z8zMyNSsFJlTvl6ny3Wqa7YM+MA5VxW4nKbL5Xq5TiklVW3Gf0Rq+vzrdMm5EEJ2XsyKaK61j513TqvOS6o5G8iL6bTNt4CBkNFRywokYrOKqF1HN7f9N9/cvv1q3w0EWFfpMajqmiP41w/8lxv88/Xlxq+q8Izrg7bpqyEqO3Nknix63PRuN8bdJkbHTVLKhMw++OidN8CU8+lymdKC5NgFA25AVRfc6zf3b3/z9vZ2t9kMKial7nfb3XYzDMP5mmu1FtFT1VaWHDzD1b64z2qtx+PTx08fBGQoG2JXUiUB7220GS2Tqc42ZXwqXIrNqKFCZOyIAiKAMwsqjpi9JzSVbMWIDB2SJ6cAks8O4c3rr27f9Eq5JJrPLs0OJWwH9/VX0lOB07WiFuLKLhFmQiAHjmLHLlqRtCypVumDv7/bbitWzOCEPIzdF7huAPf/+J//JzNQMZWWgy5qUnJarnNJi5Zlvj59+vT++HS6nMu8aBGoWrKWRhFhIu/YQvDsEF2zSzWJ2fUypbSErkO+uu7jJdlmux3GsR/Gvuu76JE8sEOOvet6OzRZH4PVmtzltEzXkq+m4jyG4LsYW99Va1UpLUj47tXBhzWXNHj/5vWr33/3OzNDKZgub8n/u3779VL96Zfz6Xh+/2O+nhAbB1wRwENLC3cAaCZY6ohGIOH8yKWgCxxDP3QUvBJB8OgCGiKTSinztchUi1hRqEQtexjRGXI1MjVNUlJBuyVZmCbv3xOR1l8InxxerE5pWpZlyWme5ut5ulzPnxPtEOmzbYlbidNKNDJzLRKXGYGIyHlPzuVar5fp6cMvjx/fvX/3y/H0VKSa6TLPkDM5T1zLUpZrupwvRLyGqKmF2O12H3eHm/3NTQxxTS6n9YBBDLv95lv65tX9LZh1Pnz19dvfffeHw+FVF3fk8Le//Tvi+L3YZboUlSJFUAyfAfwve/3nC6zJEoTAoIouOTcVGSN2XdiMA5BzoYuGgGgqCmiARoCAjbSZq16mtKTU8gXqmjAmpYrU5hSTljT5bKTQlgkqIqICBogQgg/e+wYKoOejCiIzE1IXY9/FEILz7uVwuVozWlcbAYwQTMRQFcgR04oQomY7b9rzNexllfc18gyBAdSqc63n6/x0uZ6XwZN5AtAmCEA1IBBWUTVEY2vBhtq6CA3e3GAaTG7fjRXvL3ma5/Th3ZOUStSM2YBoRGRMKpYLfhYr4eqNwNVFIsBgpmvDzLTUWqWqrqD5rgvk2Ie4P4Tt/haML9fp/YdfPj18OB4fzLSdG4jYQEVLUWZ1LUE3ODf0wziOm81ms930Qw9gzVy0xqJpVS1VitQi4mXNuABiNjNaz1FIJvoyOfmsVgBmGDY4btF3AGxVjY18IACYc06ZECB4GhmZZMrLlcOy5Mt1evj4gOj3w/Dm7rD7qZ/zMkQOTM6AkZnRM0UmR0gG2MihTb/CLEoKaIrOhdhvbu7u71+9vX31+ub1XbcdE9PD5fEffv7zP73/6WrVvGuT6y52h+0O4xgPw+ijQ+986FzPX5wvn/2fLYOYW+ZirWXOac5ZqzYwuCosOaOV0/k8TUtVNcSqCrVQZkSUmgmkjz5G1/gSALDkRZ7qxfvoAyHVUkWqgsIqTQUic973Y2Tn0dCFAEiqplb6ge7u77/+5vD67WbcMmCqVcGIkNvm0TzeLwUMrnGaBIBfVjUv3f6XkT+ssDwCQDCtVZk0RNp0/rAdbvebu8Nu03cMzdEnTMAhOOcdOzOblvl8uXx6fDKku1ev2IXLabqcrmq62Wy//s2br75+M469d6QIQ9+9fX2fsl4mUfCfPj2l61RKm+c3jgdigwgRvXRhmbEfXDeyOVusWKp1KXa+xgD9WA9OsJbLBPNM50wiHplSBWH1LB4VjcEQq/kAY+eErKpQZSBxqJ33JB4M8wKXs+OHWMDNi12fREryHjaR//0f+uPt+O77B0h19iJmJ5onTtH7cRv2r7swsmoWkSXVLvDN7Q1yX1QNC7o8bLaOvxD3ffv1K1MQMammz2tlrTmlRaWg1Xk6PD4MT4+np+NyvZRpKanUVJIAMDETsNNlWXLKgGEYOh+8quRcc9Gc1VDnuV4umXzKwnPGYYGukxhL8L6p0T2jZ/bcCGJALgRw6KJL3rR4hyG4GGIj+JpIywiuNW++APg4doft/s39a2Kikvn8uJ8Xd77kdNYs+fh4+uX7dHkkWt3+CMCI3ORY6BgZgQEAKdmpaDpDN6CORFtv0VqGhRGgA+TKwQaA6LgmSwFCxly4CrQFzJRgvaEFFAm8Wi9y8PCV9xAZIxSxKWnRKqp1JdP/av5K1IJ/1wTI1VCOBgSeXAs0BkXvfTcMoetKzpfr9adffvn0/qfT8ZhzMgADMlNTRFXRkrUCJLy6z+hLVWQ+n84PT5+2DwfvndQKZuyo1XeAWEthsv1+E7zvfBzHMS/54/sHMwoxLCnnDI3RlqUULQqqf13x45eHZ8T1oUIiQFSzUiWlDGZVlZi7rnSlrn1BFDNERSYVJFXLonNKp+s8LYusRmCr67beFp+VkfjcWmx08PXLLZeHkZp0wHvnHCOS2eoIGobBsRuGvus67/jLpwWepdNr277JLZpTsQnSaAUwG67rHj9v/K2aekH3A0KtOs3peL68fzp9ve932x4Dt2Q2Q4Jmq0Ft8UigBMCoigqopojWiBBlJh+7brvr+rfb3YfNpnNsptKypc2I0BgACBBUf/Ve4Nl/YQCqIgjPwYKgAFW1yorLbC0mUUWi3WbTdWOM4+V8hlrzMk3Xc2YiICR2zCpVrdRaCiVwwMzecwg+hOC9Z2ZENpPWvKnadv12pK+tAaBMCKSIau0VY2M2mBkSoVqLRX+5mGizCdudDwNQZ6bIgGHDDrGUmkuVahXYKzuoJjiVdF6Wp8v1/cNx6MY+xDe3h9++vUe2nAuZdEyB0Ht2hNwiHIqaqjk2MGQiQ0LnfHDO7Xe7N29fv37z+ubufnPYhX2XUB5PD7+8f/fzu/fTZT6M2839wEfpu54M0Kz3ofPRqfcWYhe6fqQvbrP2YAKCEgGimbZRWnPpqYBzJEZVrbRol1xyrdpirlQI0KB670Jk50LsgvMO2atBa+FoVQTwzhGSKgOEdjAlRiZgByHG7Tia8fWylAwptVjhZdy4+9fb1292/ciAtdQ2kI34spf/ygTz/LSve+dfl/7YJFnt0WmtXVMVNFNG7Lzbb+LNbnPYjrux72PwTFKqqBoYt8guIjHLOV+ul8s0V1F2HoxLkuPxnJZl2Ax3r25vbvbj2DOhiYIiI8UQ+hi9c/QMExQRBQNkeJkt/dqS3HXhu999XfSJugE5QkWftZvTvSwHvgxSpBasUMQvygLsALNVUUYlYhM0ZHDBhT7EiMWsHemkFlMKgLWqKF2n+pcfzh+SCHOumM4IkqK/bHbh5r4fAkmK6co0kmTsXzGV0G/77aHfv+oN9fFhnqZaCho6Hzz7QKpqgFiZfvV2HElSBawAYlqqmiAog/QRiYP34bD3h32Y36ZlsXmWaS5zyqnmUsUUS8nLfD6dj2Zg5jbjwOwv11xrRWJ2LVzMEw/oejE/z7LMZ7STASABed9y3rrou+CHPgxdjCF4Pw5xYDggCIGs9AYThGotdtQrUSE3IPLLTtnHfrvZec80z3i9yPHjP/755yHL25u7qEkhmc2Sq5kCoq4CZ2xlehBCw8oIzMSOu9HznfOWF3Q5BwFXwYoIuzRu6mEjt3scPFuFnOt51tPVHo75Op3VkqGhB0YmQsZGwwIVDP7t/X63i/sO+/kspeRcCZ0Jlq66EF66yoTtMEKO0TtQa11lAmegENjFzgVyUDHGbrvdjZvN+XRKJT88PX56fJBazdQMkNH7QM4jURXLKVcxRCbmEHxzwteU5+v16enTO/8zmOaUmriDmEKIXRf7vu9jH2MMLjp2j0+PHz58GLfb7W4fY4dMczp/evjleD4tOVUTRbEGCvrrjR9hxdZC8yJ4z7Wyd1yZkaj5F3OR3KYgVdiJgQqgUW3Bu6VqLvU0LU+X6TotbXc3MxX9Qu/f1paX1qKa6eeGAxCiERNzy45Zt3ZVRKQYAh8OwfvtdtOFgAiNtvu8YIEaqKFDQiJFANCGGlJVeQ4hMwMwU2yAcqYm8luT+ogJWtqYqFzn+eEJf/rw8et999UYiTtpKyAQqCBVMGs2CNTaNAqmCiIAIIaiqiWzAA0ZmfeBb6LvPTOBSK0VCaHVy2BNbug/Cy8RXmpnW/v9rcu5tppVTHUNHmhSm2VOw6Cbze7m9m6M4zHE4/tfPjnfnAuBPbNzSLWm83RcsT9mMQQizwwIUEVyLgBkJqXkWkuRLFpb1KJKUxWIqBK2Y6vpClFdC0JERjQkw887CjDzbjPsD0PYPrm98uAiu802sOF8huVSliqmls15VERZrDwtUzxHdvF2y7tx/Op2v/zxd3GM//TPP1zPaQg+eNc5doRQtcVEKxkHIDQM5pj74Mbtdrs/vPnq69///m8Od7fGJq5eefp0/vinP//507tPy9Oys/Fv3t751376NPtqZU6nx0cOIVSvyWMI/bjrxs3Lxm+qpdbnvFPw7EUkpWVZltKMk4ogVkWLSBVFw4bPKu0rKp7QBdju3G4bb262d69uxs1ILhpALVJyWZal5rKGzWtzWHn25DyyI8fofezCcLmkP//zu08fTufLrFLZ1e12HHdhGH2p6XpNBpmQ2LWWGLej71+NYb7o9jUdgD1H3rW5/no2aHe2VKtFGKGPvNt1b+53b17tDrux7zyClpxrqdi2TGRSEyul1nmej6eTCIybLVE4n6brZf7w8VPXh2+/++brb77quyi1FrHGq6y5pmm5nE6fPn789OnT9XrNudjLq2xnEfu1FQlgt93+3//n/+mP/+athZ44eHN+LvxwpA8/ws9/X4/XBcUIjUmYikEGq4CKIGCCtRC43sWbsRs8waypZrNJLS8ZVbJkkVKVz+flL//pL3VAN47sOwc9qWo5395w6O4223D/pi+ZnAsqGPYHI+s3fYgeyD89LU+n6l19+zYY+bku0E55mhCu5oZRxb1s/Gi6esxbajSsfBVDcA5jJIzo3NB1UcTlgilLKlKk1KpSLaXlcnnabPpxszPlYbhttA8VUTUV87Hf7A+7/U0/bJHZzKyU9eCJBrmwdznXJfnoeU5h6lLfdV0IXfCdZ+94rdFV0YRJGwYCwFSr7zZInzf+4EMf+xidVVmyPJ0uP31453NZCF8xOhOCFlIkgGSG2sZJbaAjgIZVEZSoOmaoOuZ8OV5nUNoZbg1HUXDhrEvdsb//2t8fCERSno+X/O6xXq7HY/7L9fxYckJG77su+uCRSWut18umhG9uNpu+/+b1Dubx03GakqiC5whIwxcPf3s7zpFz4BwYICiu1FLD4F0fnUMnCbz3XddtNhsiPD5tXXCGAIQmUEXQDJlZEBSlmtSsbeMHB4pIRGCopeaUSxWzUnJOSy5ZagV8+cPHoRtCiM55BGLnHAcXQmhf9F6x5noUmMlVBVVSwH8JVfzV1UKf1TkNfnXniwDA6pFe/fRmZqLtbrR2lGh7/NqyV22Gf3tO91qNBc+XrRGHLf9v/am+VKatAHXrsrVC/oN35FzwoUXIm8pf0YebU5qJib2oGoGSga1i+LV8hhUyC8+dm5b4sY5vEIyAEdgRMRWVx+v102VaqgpxWz/1JaeohZECtjUSTdEqagbRWgGMxNCAKiCiBcaeMTAxYa3aRqlrQdWw239Vja06kr+iLa2BKC2DHuBZWiFaSjWDruvGYex9n+IcQ+h87EIUrUPogg+e/bxMc5pSzqrZzAhNgm9+yZwZAETUTOZlbqfMF0bK576MqhL82rCPz6QEIiJS+1J74Zzbbw83N4dh/yHuF6fUOY4boopFgapBUUUQqhWZEbOWc17CdQp+8K7b9N1+GP7229/6Ll6n5WfRsuRZM4JXJitai8ylGqirRuCwo+i725u7w+3tZrfb3Wy59zOU63yZ9FT4/P7p3T//9P30cd7V7X68+XpzF9B9vD5Mx/Pjx0+SUxi6bt/7Mlol8VbM3BcfQFOiGAIitiiplHJKzbxiBs9oEENRBcWyWmAsRDdu/GYTb2+H25vhcOhub8a7+/0wDsTREKVqySWnXHLWWluripl9CM4zOWBGYmQOnvqHT9eP7x8f0ErJonUIGDu33XbjJjIjgCE4BERrJdSLmfdXmhhVERWm9rlL8/UhQmv+P8vmzdbASS2i5CjEMIxxs+n63jNblSQ1mZgUQ8TgPQNUMDNdlnmal2ma1EiNRfLpOOVciHB/2L9++/rV63vfxSLl6fE0z8mM5yk9PBz//MPP7969f3p6WnKL9iUkWrUVzcX7zEVol4/+7dv7wy2BD0y+A+/OsznL5dPliS8TmIAQKIM5aPgWZShgWaWiQiS3D93bfRccnEq6Tg9z/jSbYOcMRSqYZqCsflqKsB+GLViXFivLUuYzIC7J7253u5teLYg4EeCBAM1HXwXPZ/3wKb//kLuuzgmzWMWEuFTNqgn16sqk9tlt4RSoJcYTgyMHJoBVpKhIleqEzMpSllpaizOEGF3XqCdmFUrJh/2Q8l3KIuqYhmWpXd/3fT+eLkT+cPdq3B38OJL3CmCqWMW0mIiASaNxIgBAI9deL0hM3nEM3Ec3dmEc+66L3rFzwTOtMBc0Eek2u5fNEhHZuehcdH4xe7pMfz6f/qFMlHI+PS3OfSMwupClgBo0Mdj6jQBgmRXNUJEQHJERzog/zdP/+u7TNZW3nf9dDH/nQu+7K01qQ3w1hN++Jqt1mueOr2lKP9mP0+n/99P3/3w+PSFB1x/2u2EzOu+XZX745f1hCP+3YP/+dvju5vd4uP3Tu+NpUYdch9J1w+FwcM69vBfngB06h86t7Hc0JEJGit73vWd1SQUZmd242RxubxDtl5++X6bzdDlP01Sk1qJcCyHjunkaITtmR0ZaEYgRjCA4NAMUUayIhbAASdOwpTlJuV5wPbgDEDIxOmSH7JHYgMih73Xchtv7LToAVPg1WOm5mwdg648bAZk5PCevEaGBpgVrrS/sFgKgRrxHc0Se2QAdIih0QYYYVa0wy5om2yb5CmCrfOGzc6g9ygqNVOzYO+89t3ih1VwHgOg8YlsEvXPsGMFq1V8VMQaggIbeBfahWDUBYQODNpJBxTU04lkoR0zM/MzuXSPHCNEFVsa9iXO4mJ5SvlZNhswEoiJZS1LJYNq0Ao1cq2gslWXWUsTQuIN4gG5nPgLWNtbghh8jU7OmVwBAVZNcy69sMM81zuo6JMS1fY7kqPU8mwgLGy2xfS8SOQBMKaUlA1D03XbYRh82sQ8hMLoz8+n8dIWplKKqBObZdWFxzGJWpbqcVeuSrvMyS61MiCu7GVTMpJ146KVno2bcfqpATCwICAJfRL8H9ne7V69uXm9vfhoeJjbnmagzKAqLUFFnQGrmRbE0LfSclwvFTSpzrnPO46b/7avX4243FwGAf/r7P328XLebIXpPalol5QKgMSsjwqbb9Ptvv/n97asboZpt/scf/+Fa5/PytMDVwnKZTu/fPfg5vO5uDm7YQNBc0/n4+OH99XjeHnbDYTPeHQYFrfr4dAoS/aayfzl5AagZqgqU4nKuqdRSRIqqQnP5tTu3qSPnZSklx473N+NXX92+eX24vRv3Oz8MFAIwK/GMJIhEzhxZFxDNA/g2j2Jkcg7JDGQlJqGSVe/Eu4aIZjQcR7/Zjdttv9n0ITpHsKJFsMGCCdWeu1ufyX0tiqxJNnLOZmvENgBKMy48w3NERQ0MlHwIkUNH6Kxous7LtKhIm2857300Zak2Qy15mqecKwDkoh8/Hq/XdL3M2+32j3/8w+/+8O3rN692ux069+nh8Ye//PzzLx/mpZ7P0+Pj6f3Hp59/+XS6JkQ25DZyAIMWG63PY8IXVb+Z5rqkdCFxiCyCcLrUh5/L+QNh4ehUrLIJCTr2wXkERSgos5YBtd92/avt8Js7T5Tz+Vrkh8fT+wniGDbeO2DGmsG5cfP2zVfj67vD269Tsn/+x79cLlUgVKgFRNm6oUPsU6J51pos5yLXdD7LT7+Ud++u7z6kw6E+nctuybGr5FRhUVvQ5qoJvnBbOGAPa/MV2QBQDAgIDBXXqII1aHwNnWFm9tQWIgfRu64n1VGN1LxqvFxTLjnnLKX62N/f3+9vb90wgHNtDssqoLUxG4utlm6RLxFgVquYPZ//kbKa986zi955j8E5YgYmoy8scAjOkfPsiRbTa84ntbTbWkwfQDal3on2urqD2wiW17W87apgBs26w4izyIfr9S+CP0o9Q72KQCl3Vm41i81wHMvHn7EnQlvmNB9P8/mhzk95OS7z0+X69GAgKVZdhjy44Odl+fD4IV3d+338zd1Wv/26H8f7YVtvgYlrrZt5ujvc+JeNn8B7ipFiJO+e12VAImoRFH1kK1iWlqmFMXa7/S4v0+Fw2Gy3ILWWuqTcmPWNasTYoGuADWUmakZtBkFQHVXQCs4Que/iGhSKbbxL1gTeIlWLIQBWUVQhUStV2VMPIXQbs37tofz1oM/+xVeAqKks1vaaAXjnpZbgfB9jcO7Z7k7czGOIjYIg3jpxffSigmjl+UABaygSrTJjE1jHpY0OpkTYdHzOuba5txUI1uAfcszsfAyBHSGRqtC/6FygASrC2qJ8hsYzEBHS+ibxZTy4+jGfWb7PvfUWNmMeN9wDWgE453Jc0iXlwTsSk9qiVhDJEVDDtTdRAZowk1ZQVWTvxhveHMz7WqekVJRaVdzu6PZ3NuSNiORcvrQmA3xR9Le5V1OuGQOQKrzg1JrsKcbY90PsOnauLKWqonOh78Zh7LIfu947DwApe2ZeB0lVC+GSl2m+ApqvxZfgidVqSnNOCVSJeOXMfb5svYe+qPqf9VZM9CXiFgDAcThs7u/2r7fbIW4YhIgAAxgDD8RCAQGKGVs1ZbNqmkGWWq5pOS/zbvEHGXt2r3a73//262meHz4di2gBNLGA6DgMfQyOBu/3u/5+v3tz8+b17ZvtYZjkcZ6PHy8fH5enRa4VFxQpOaOrXd9vx+gcHY+X6XT58OHTx0+fpvOlmDw8Pe1P5/E2CYXTMnfQ39Yav3heELR9gqpaisxLnZdaqzLy2A9dH0MARFhSyqmISN+71683X319+803r16/3m+3Xde1UquKJANoOTgqAIwt3pKfq3NCJGZsYC1VBQUjBOwj952L0TvvAGzY9LvdsN8Ou+3gXLNdr4ZQaGQRBSLvPb/Qu1bdRiltwNzw6m3XX490z4u/2ZoyjYSGVrXkmnMtqWipVa3WImbkuPOqRQ0BRWqal+t0zbkC0JLy8elUq3Wx3x+2v/3uN99+983Nzd7HUKodT9fvv//xH//0/XnKl2u6XNPlukxTKmKu3aq2JvKigRI8N56+FCSqSJYyo7EBatZ6epw//Vgf33G6olUDRTSHErAiYkBlKAgVUZggegq+xedpKnpJ9dM1vZ9ocCBAHiEQJqRu2H797R9e/e67w+tvPj1c/vz9JdVPhL1STtWuS0llqcUuZzuf5XxMyyJV9HSWdx/y41O6ztANOGdJpbjoCBVQEBS+jIFqjwy5qIaIDAKqBihIjoHJM2N1rKpYq4DWok6UzKClPyGAQ0AQRmTPgB6gM+hKBUAUlaqF1RFbP8Tdq1vfdbVWE2ET1AK1ipoAij5DV9YO7osWS8Sqml6WdE25KT6c4xjiKvVw1A3l5bNBRHbkPDGagSU1HjZfv7qVkvTj4+n0dF5yX6oHJqCGmkE0hLUtTcDQcCRAaHBd8j9f3v8YR//6zRD8ZT79kpafqzlZujLDX/758v8qethT56vqdJnleHYfH2/y8fe9OXW7XBdLw/QU0gUDjVV7TZtc/C8/TgE+OOu++uZtt91+9XVFTKVcp+v9/ouNHzEE6nvqew6+JcW3vampIJ13JiBIFUAQjYliCF2I0fvOR+mHWqWUqmo5JxFpGvMXa4AqIZNVEGt+VWk6si7gNvZdF4e+izE652MIXd+Z2dKuvBRRIhSDKppzvS4J0PqxH7add2RQEOCvBn22/r26hqw993hWsq3ziMiOZTQ0Dez6vh/6znmHiM6tTFIFIDMkCI6q5y66Ulkqtvju1QmEq6ZutQ+0qbg28Y4SIROp8IuhCGFFwjUhdTP3udWahy/z7Zc3szrzDGqu0oR8SI3v+zkwCIEIDXEdmxHYs52/kZvpWVHvAw1dEDA1nEp9vF4fu2h97BClFjTwoSPHRgTsMPTWxNXsSNUBWTUatsPdWxwPc5nnnM/ir+JqO9CueY5mqKpFtFYrJWX9XPHDs+ihbTUNm4EEZAYI63FhPbEABO/3+5ubu7vNdhtiV6uAI9+FOPT93FeklvFYtRhUYmUyRhRTUUk5X/CSpXQ+eh+i82ZWylJLWU9/Xxi9dJ3krQOexn5Ds6anN2Jmw6an+LyK+V3/6nbzZuzHEF3JoM+mOzeEYGQksohVKJVQmYEjk4Be0uU08XZwu2t3fnwKQ//27qb87e9yKd//sHn8dNSUB++3XbffbPbDuI/DYTvu7zd3b252/chkaNncOYeP6k5jRHJkAFr7/TiMsju4oZzy999/+PDz49OnhzxfCTSU+vh42n567A/3UfE0TcpblZcDmaEZozEBOSSiqni+lvM1l6LRxVe3d5vNaFiu0/n9+48ppRjD69e7//jvv/72t7eHw9gPvuVMQVVAcBCI2bsASALVDJhbRHWr3xTRXEvRATTTqtkAmGOZbTPErgs+ZEQbNsNuP+5342E7tgcXAEEVanOCQFXMUvvgvmT1S5VSCqI5t3YCzOyZjt0yyu159XZAqGgK8nQ+E+btwM51TKJa2u4egqtGqYiKlpTneZ4u0zTP85TmZZnmdHO4+du/+7t//x//3d/87d+8efuKHaVcTufpl3cf/vFP3/+f//CnKUmqoODMyMi79ZBizzJdBTBsJyCRZ1ff85sxASsEiqKScjo/PL3/ST6+GyCrCFUISiPoxlRqjiARl86VkWBg7NXwOp9/eidi5fF8mctcYaqUMwgqkfVOK/N2c/PNd//um7/7t93uVdVfBP7PXGMft8B5zvrh03w+zk+P9ulDfXqQ80lLZqQownPVCuhcdKETMDFpUScGoR2dHftfTceQPSmCkRg0j2wb+BMAEzEJIoQAYBWAsFIDUqqqgQoAoiIqWtNutMzsL/Jm0ACVHQxD6IZeajWpBELqQcTMBNAUn8/4rcZfSWO1SpZcpYq9hNppFcSqLquBslopX57JgJiosbARgTkM4+b+tko+prIsl+kqi9QmBm1zVEJDM1o3p+fXi1jBZikPSS7dZrh/Fcbh6dFPT48fLtdNKV9BjY8pX68ag/VRAV2unIpLC9cEDjZDPMRwNXYcmB04ELPaD52Uey3x4/vlHx0L3Pyb/7i/vcvEqdZ5nr7c+IkgdjwMbuhd2/ibxJ+JvWMm55CTGbEyWwguhtAm1VoqqHUhai95yaWRAhr1HA1NYQXcka3MiqomiEYEjqnv4/5me9jv9rvtMAzO+RB8jJ2oXK/Xy+V6ul5TyQAgpqKWSxmWxUB9jCE6z5ZVCX697f83rzb59uTZu0bx88zBh+B92w4Q0QCqmJk5RkL0TNG7LrhcXC5Mtf5qA4Nf6Yrtueg3MxWrtTJRKcU5Um2B34rrjB+fnRSro6dxQ//qIgAwqEUQhZhemEpAa9Nh/c/14LCKGlcdvz2/57X8R++pZY8mlV9O50hgMu5DCEgCeEk1L7UaKBG4oshF1ZuM6EIYiCV0Y9zscdjXhWSaTnN9uKYp1VK1bfqNgtV+Eu0s/S86L3/1cbR9x0StVmliZwR07Pt+vLm9u7m968fROc8ptVRnHwKt6kgzrFWKaKt+gAhMwRBENZWisB5HUAURSykiQgjrVPjZffEM438RhH7+XJs7ojVXvqRDInLEYQjb7bgZxu5iRRWQGYkoAFahalKtVoCqJYmCuFgdZSpwSe6SuvPUnY7HjWnc9m/vD/pv/3DYb3/488/z+dIz34zD27tXN5sxEoeO3ZZoWxd/RFdrPFE3hS4NIH3vnWM0InNUfVc3m+ofUvrL8cOP7z/mKZPpJriKNKV8vc7LkvyowYXoP/NIAIDQmNA5InaqlLOezulySSrmmD07Jq5StapI8R7evN1997v77767e/tm14DIVVo11YjezrH3bZBAYGZM8IzXRjCgttojIjoABRQDY0bvKQQfg3dMyNBHP3Rh6EIXWJo7BgnQiAEMFQwQ20/9yztKVGqtrbPQPs0m5vlSmvMi/zNAMytFUk3RQc5q2sYQjmoStSpWJNdcSy5pSfM0X87XeV6WJRtA1/X3r19/9/vvfvPb3/Rjn2tZrsvT0+nnd0//9Kcffvz5w4dPT1lQwJMDosBEBLi2BgFXfw6sfE/9NbkPAEArgnhiZwAqUvIyzzLNHoVBncBg7sBU2UQ1ghzIdg42jgbEviJcpeYpV50vaU4q5hT9IgTFPNfSzKtu8P1t7O+dvzW85hLm5BHj8Ww//rQgpodP+eFTefhYLidbJgKLsePYh24zHnZxu/dvvzrsdoP3qyGUMBigiprwl8++w+d6hJ47+s2RSwCGbLBSEgjFMRQBEagiVUDqKqc2KACArMzmHCEUR+CZnXfkyLCIZdOE5skE0BCUCIkYDHgFmwEYaRMhNdmyiraELzBdVVqiv0qhADOtii9HsmeBMrY5pO9iL9LHIUu4bnZ6PaXjQ7LaETEgKJIymZEZoa07BEJrEJdSZ0TpOr/b727vYBxVBUp9mOaNyD3KVmWXl3pFZafNEmOKUFXrFuE+9m/DMI0H3d9B1yO21JMarsftwzufZvjwEcZD/0dw41ZjLGB5WW6/mPETYYw8DGHofPCeGcjRSxaPRyZwpHLx4ANvNsPYdwxQlnw9n9M0DUNHfb9Mc0pLziRK3MrrtjfZ87tFQxBCBUbvOHbx5nb/9W/evHr96vZm3/f9+h1oOZXgGRGrGACKCoF6ghC5652ZoiMgMBAUWaVHX/r3Wszm+hVbl3Fo7rd1l2yVn3ueXyJi69e3b0JARwjgAiM7isAxuJC9d4W5kgjRM8YfQPF5N14rjOe9w6y1u5vSgIjQO3w+7bffivCc9gCfm/O/2hqxdf0UqqypMaLYjk7tzPksRCRr0BlsLOqWjPzyfttdDcXQkQ8ug/z56THnCWSP+/3dZj9X+IcPT+/O16XUIqrWKLxw2Ay/f/vq9ThsXHUxsnPkw0ib0/H4cLr88unh6TLNqbK1LABBxy54YhCrf/1mGoCIzF5+zmhma4e21JpLszdTF/vd7ub27tXNzW039GDogvfBBx+IuNQ6pUW4EoFYrVJUtck02lSeyBmwKaiKqcBqkTMAQ0JkMsTPRFdtg4XV+wjUtP2K1rDUCKtW4vObMTOtxuD2m+3NfpvzMWd15M1QoYhVtSwmVagkLSedQWAjCIJQp+qmPJznqScSEw+5247//o/f/e43b354fXf89ARSDl3/3Zu3XefOy/GkTw/0JJg7DF3EbicupDvtqjGiOeLofeDoMFCKerT6aXmC0yedgLinYF2AECutioku9je7/ebu6/AcZYsARMCOvI8ArmSaJzk9zZdLMgAEu1yvKeVUUq1zF93d/fAf/uM33357f3PbOddYs+t4y8yeFw2H1MyQ7XZXUYWqq9QUQdWkYaUQFNUMpJYiBQmda7M2CIzRkWMAqyVPtdbWveJme0ESK0VrVXmZ6639NqmsiLLKbV5IZe3xfDloq7VbR0QqiEpPYCH4cbvpiWziacmp1JqW5XqZpuucljRNy/l0KVUch9u729//ze//+Hd/+/s//GGz2z88Haefrk/H4/sPD3/5y4c//fnHx8ezKDEHRG/kDbAJg9f9b10nUBudrrE/W+LFy12mSmCd856gOEEO4HulaJJRa1DdmN07cuDMLCLdMN4xbtEGgD47NOZMqnpaNAs47pwPC7pF7WgiiAPFRfnxXIaPcxjnp2OeF5hmXGY7HuWXX4rUcr2UaZJ5slqJ0YcudEO4e73/zbdv33x1e/9qc7jph5G7iIxmgojeCuZpcc6+JCm7UkozJbd3va7NBmqACMa8tiyb+g+ByIgRKxC2fgipEZiaCmBlKwBCBG6935hXfVHjF6zj2Gcc2tr4JcJ1LQCERv1enSYrnNMMXnpDz1NAUZG/pqo9/4OcC0PfI/bdQKW40IOLFbG2GghfBvufWWVtxo8AapZFinfWdbzZdJsNj5uU5uV6uSA+qeSWhC61qmZAAQZwiAAsCMaK0XMfBt0e6quvbLMhE7Sitfij5+sJliUvC6SEzKGLGPuA0Prb/NmhADG6fnB959ZhtF8fYe8cI6GQZnOOfOC+67z3JeV5mkoqRLTf7VRknuZpuiIiNkENAJqYgqGKEYACgpm0efMwdrv97s2b+998/eb161eHwz6G0NxVRepMmHOIIQa/5FKgnd4ZDck7MhBDE1NRRTVSIPvV/vLfd63wDH7utONLNY3rfLfRoqKnSpiCW4Lz3vva+oXrKVHsv1rSPluJqgEwUQiNuQT0ax3yf/tqpxQzMFlPJqrarH2EwMhkSMiOyDMxMSMzsWeHhgLaPovnUbppI+mRJtMpF5NyO8TDMO4BLrn847uP//zxKYs2ZL6YCujtYV/R1Vf028FzKZdPv7g5bzdj9H7TxU3Xdd55ZqsAqmCAho6dAjpK9N8OUVg/Bq1acy0p55RXX+5ms725ub25udnudr6LIhp86GLcjJtpu9tstypCzcOoAOSR1tACI0B21FjMTK6NlwFepvf0bLR4+evb/6FV3Lm+YDVAbcocWHUHf/WyFRjddtztxt3Tcao1IQIjxI6D63gToGBNND3ZA2ZLhl6FalaY83yerz27YIoM+42LvDn0sUc6j52lDsRtYjd03ke4qIilEi5Lf53YIuHoXNdz7xyAL0Uc4abvuhiC85C8BLc/hjd/2IojXVyEePD9rutjP5D36Hzouv3N3Xi4Ye9/9XYMwShnOz5eHz6ejk/XnGqM0RSWZV5sUpWup9dvbr/59uZ3v7t7/Xob2T7TveF5XwXA5zS8dZAC1iD4IOuIq5npW1MMn11OairPY3gpGYEDc4N+1io5l5SzQrOCIRgaUClpmk5Al1I+7zAvqg15/veXjb9lPK6/DVFqbVnPpkaGaM5xCK7vwgigCxatS5qWy+VyPl2u1zmnmnKtFZjjsNne3L1689VXd6/uQ9ddrtMPf/nh/bt3D4/Hj58e370/fnw4XqZkwESO0LU95QUkhH81nFzLXzX7VcXfQsUIAAxTscuij5OkybKgVy4CF+OEQSEAgYJmK7PiFSFUCBacOmQGtSKkFLrO9+QWwaJWHCVCFPh0mv/xTz8+XMn3D+/fP56vuVTOC9QMWl3NkrKKAAD66MZtd7gd37y+efv1/dff3L96dbM/9F1HAAmsajE1QkNJMp+rc/Jl/8JN88TMjh0RPePLSNVEK5o1kWMqIqUCNEkvETA5FOEqosIiqFrVFMHMBECaWoSd964hv2Jg55AU16RIfB7KGii8lFxN44PIALhKtUBb6xHBzIhRicCo2X1EyPnP8yR8LvoNAb2P21G97/pOEZm8qVMlVUAVwrWj0wYVRq3P0wacZqYVrTJBjNz35L3zoeuH2sUZYJKanE0AF8QL8qW1O0zJjFDIwFVYQD5WuypW58FHNsFKFdGp21SOyuxoG8IYXHDMz8euL/kKiOQjx97FzsXgnWfnmBrsNDg0kqTJi/fOOx98RIDz5XK+nBFpu929fftWaz2djo9P65msbVYqFZqlnUi01fPmyXcx7vf7t1+/+uqr16/f3N8e9sPQEVEuampUFdTQmkyNGLm1YaABhVodprL6Qg1IYTVKfn6Oviy7AQBWHf3z06agYmJgDh2suvhmflvLAWaOnnvPMVB0XKSmytFzDL60pCsotdbWLFppR/YyoHue4j1fCFAKl1IZGdcsuNWC3Dz0DS7ycn1+8vEZ7EWoCNKiEg0QQdWYKKDztD5MzOSYvfPBueg8GlasrdXR3BmKVrPVUkDUyJJY9O6qNBulKsfr5ft37/7x3YMLARTqnHPOScvj8ZoqpimPv321pPr9n753w/7f/fE/7Mbh3/72a9blvJxqWY6na65GxEzsKSjWCo6Rfr2+2XPrBZ/fvplalVpKmZdlSbmKOu8PNzf3d692+10/DuyoVAk+jP3oblfHyNNmtyxLSjmXlMRcOLJbpE2V2HsXOh+j4xDQtdBDbTbFVe/QHj5cb0lrBuMmajV4fv5bViWuvCT86wOeOec3/W7T76N7XDCDZufdcIi7YX+33QwhIvLTx+Uf/vf3pw9zFKJKssiUltPlEhQ9WL/pXgcXCKZPjw9P5x9/+Ol4OsfA0nUs4iNebcqbEncBtuVMl0XKNOEGu5vtJnAoogBI7J1j9hAceh+j78abzcP7fH0UvfpOdlRZ59ptNqHvXNeHYfBfoMcNQJq7NcnpmH/44eMPP3w8Hs+msN3uVOTp8UlK7jp3d3f7H/79b3/73e3tretCiyQHRKbWR12nJriOSdv0ujX3oR0Rms60PWQrvFxNq0FbjQ2oiC4pT9MUY2AXfIgqkLOmbEuGLFqL5Wy1WK16vV4ePr27THf/l/9rfn7YbX3mDVYJ19o/t9bSa4eSNRXX2j5ALcasTUCCjwRcqyxTujxdTsen0+V8vc4plSoA5Mbtftzubm/v7l/d+dDNS/rw4ePx+PS//K//65///Jfz8XqdcypQqpVqiAy23lOIq6NsvfWphW0hArT7b/0w4PMiwoiiUIosiz2elp8/TX96d778Mm3IE/AiNGE4h6FwRARWCXXeI74O/o2H6tygXpxbACsF7tyGh82Mx3Mqpi4GcHjJ5fLTu19OieOfyO2qwDxdgcOScbkS1I3WrtSJOHeD7W/926+Hb769/Zs/fPXq1W0XB6Za8kNJlaAiAoEDcVpyXWSZFx/zFzoScMMwPPsrQOXFBm2qqCppyaJ1WZKp+uAdP48usemxSYmYQZVEBZ49TUStPnOIrBVK1ppEuAHPXlq+65Pf5Du4Mpqfh7UEYGAvTXjAFmBORrY2S9fOwa+efW2TQCQX+s0GXA6xy7WYair1qWio1jmEFb2ydjmg/fmACObAqlpRU0SOXej7ros+xiVG9D4ZTKpZISFcAD+ZfVKd1MCQwaKaMySBycrP18vFPVo34DQRCNQipYbT0+Y6b8TGPriukxDAsb1shl+cOYkgBOoG13U+Nkqf886x9y4GB4IZ6+wFGWutp/MJ7d3PP/307t07Q9gdDq9fv87LQoQlJynFqjR8fGu1KVg7UzMTs2vRvXd3t69e3d3f3+5322HoYvRgpopKUJspFZGQmImJlNY0+dbVRkM1RENSJGUG/u+pLL+82v30OQpFWh1MrbZDICBrDRlqRB196R09QwBXR9h/c4Ldcr4RBaDWmnNlYkRWJftXbAf/1Rf7PLBH0VZCracYQvLMnhriFZ9Pcy/TVCTgRmUmRIekSGZQapVcBDQDzs4v1ZLYIpJKmVKaUvIIaFhzTilnyUWs2M/B4He7oXTu04efK31iDl+/fv16jL99c/+HT6/m6VyWnLM8y+DRDBEI/9VODH55663jNhEppaia93GzGe/uX93evxo3mxibcAyqcxoDgUrd1lpCiPOS5nlJOZFzS10AYJovopXJBRd6770jx0CgYFpNm/HgudVLz+EGX660n8t9bYrrtvGvRqtf3T+OffBdF8Y+DF0IS+DoebftX93fvLrZ3WzHLno1fThcxNLp1nNmvdhyLDZDtTqVNBWfJJup5Hx9un788PTw4eE0XUPHy+Kv86nrmDr0ETuIzJjRpjpdLglR7vbeUagpFdPg1JM2Z4ipDH3/N39z+OYrd3lUuXau3NYrnT+dHHmOfVE9Xy/qh5H37EJ79yKakiySjk/zw8PpeLwsSxJhbAGaVsjJ/mb46qvtb397+9Wbg3MZG+eqDW7w5UD1LHB9nqZTO1KBGRMiqDaplTGRAYmCiFZTACN2tUIVq1VzqUhYqs5L+fhwdme7zPO0pCVrSpoWLVlqscvl9PDpnQvfpPQZ1//sqRFEqC2Ua32KPp+sX7xziC3ZEggseNqOw3YcHGNJS5rmy/F0fjxeL9elFFEkF2M/bveHcbfvh1EMHh4fr/P5w4f48PDw93//Tz/99MsyS1UEDkiu6aMBAJ7RNc8v5L/3IiACBKNcy2kqH87px8f56Sn1TEA0C8AQ+7s3PGxMZZkunz7mj3k+Rbh4uLoyOjVnifRKuhCac+RgTS1WzhWuc1rKNT+cBd4BjiEM49iraZFaVRi9j3Hc+XGDhzt//yZ+9Zvh6292v/32brsZNFNJNdcktRhq8+aaAlSwKvAvxD3uu+++FdGcc0ppWRapVc0QzDmXUrleTkuaaxXn3M5twXk1wHb4bg5AQmY0c6sbw5id4Oo/Y6l4Oc/Onz13qtR10TleZ+kA8DJFwDa4W7dfwNXDo/g8eYS2DbQxQEusAfjCZAnwgjVTIibnhmHDLqPnywy5LI/z9TKla1bowq0LaMDNM2OGau1WYIMAIAK5qirFELqh344DdfE6sQEuprNYYawIk8GT6s+lPFVRBQ+0Z4xICnCqyw/TfDo9uQ/vyHkgaa2NrtRtuh46L2G/GQYKgZlUmiEGbDUZAgAgoe849j52PobgQ3Oet43fgwJC8b6a2el8/tM//4nhpx++//58fGK017e3969enZ4elmU+no55WUwNnbXCWRSLqYEhITnu+n63P7x+/erNm/vb25vNdmhaHodkoI5AEBlaRcDM7Ji8IzNWRaSGHwVVEwAxBGAH5tA5/Ku45C+q57X8bv9AAkJEh+iwaYxNazXQ2ti2RNz6/0JV2UAAAgCmqnOuS66lrpGi8mIG+6/t4La+DgBTBam15OwImYIIvTQJ2u/Ez9CuX50IrPWfAFgJCcWsUVbWOTYjO0A2I5NnQboHFTBBa12B1inhFvyO0CjU12sqVYA4U8ip5qpJzYg3Y7/bDcYkYiAeCJ1xKfLw8ePPiB+/uvM32w7x0+X0//7//n/u71/9P//H/3AY+7/9+lVazh8fz6cprSF3TZGsIPrlEwP4eVi+Sg8/P1IGYOa92+/3N7d3r998fXv3qut719TfhMFzLZQXJYZh6Nm7nUDONadluxmCx7HrHp8eUpqdc57IESKqqaAZEgOaCJupoSFSGwI8iyFhFVo0vYaikaka0kpksGe20UszhpCjG3s/Bo7e+di50cJ26H/z5vW/+cPv7w871ZrKcryew2jf/mGE33Qhx3KG4/t0/lCuD1WqFpAsaVlmK/Xh3fHjh8f5PM9pueTy6MQ7Gcd4fzjcLNtw7nq38cP+EU6/pHfZUeMlztfTPOW6FEtDdztUw8uHqwN7dXe3397edpFl7+F1uvCn3WNeMnt/Ol8+HR+3x+vv/ng3bgYAMLVabJnKPC3H41yymDasdZmmqWXGbrbht9/dfPvdYb/30as9z0TVTGsBgOcfJqy+fCBENrVSBUwRgTwiIYjlUhpAFQCrWJVaJAMYOVqSSEU1NuBS7XyZfnn/+PAoouU8LZcpX5eSllqySVWrkPM8XR/fvL3U51b/ujiI1lpaiW9m+EW99tIGaF8nZ4RGqGAlOLi92d3d7nJepktd5uv1eLoez2lJCuRj12/3m/1he7hh76clPTw9zvNZtYbolyW9f/dpWQTAMzsj12ZGpmuBgKt1DwFoBWXa2u9/1rrgqjd5eVjWOFnH5BX1WvRxLh+m+vFaoxNFnE1vDv13f/jDzZs3y3J999Nffvr47no6PbF9ZPvRQedIvWcHIRT1/sp+FjarIHWZ1EzPaVmqVAMFB1BFivdiVg1nDnPn5bAf37y9/eqr269+c/Pq1bi9ceNIsTMA1VJBJJLDyETGBKgMSkJUSJlcF5m+6Pe5m5vbWmtKaZ5n511OuZSWlanrLNOgde6JHBHbFxaHBt4AXJElAGj63DoiQMBc5HS6iiICqdrN7YGoe/Z5a5va/Uq225bWdaYLaz++fR2fUajrt/8ry7s9r1tE7F2otaacrtfz8fj08XSs87x4d3v/ZjjsPZKZQq0gglVABVVZVVVqztd5WrrOQvDORe/It6hgWaqcqnxiQoOrQTYUw8Y7RoCiiAQFYTYppcgy0/UKiEYKCGgkQIVAtr0/3MTDjQsRAVss9a8XZCDCEFwcfFgv77z3zxW/VZCi5BgQUk4Pjw+a8ZdfftGS37y5PxwO43ZzvZyWOV0vU6mFiUE/a9fbis/MIcbNdrs/HG5ubg+Hw2YzxBgc88rgQFQiIapkjsgROSbn2DkHhtKic9rGD0CiBNqEQoGKJ/ffWfQjgkOITCMzIaQqtfH0Tc0IjY2IDETUQJmQSYgoV01FSssCe5Z9/Kop/69d9jJ0aJT4WkslL6zK+lcfwH/9as0yBGtNfyAgZvbMjpBQwdqQygzEBAGqaQVlU25DqCY/bR8CYfDsC01iOVXTmmKuueZS5yJJFYm8I23KwMiEqEq1zNfL9OCOnx6Ph95tunia5u9//OGnTw+vN+Pvvrp3RLthCOzxZfkyVdFaVMq/1hP5VbkPAEBIzBy8H4Yhhnj/6vXt3avNbud9aEJwJnKOmKjJdEII7DwAV5G8BEdgWhxRYD8v19XuYFW01pzNjJlQoFAlIjFrfRxcDROwog9WIRABgOpzW6n1qD+r/59ftlnVXGpazYSkmzF8/fr2m6/u7w7bGPzjaX46Xx4vR6lpHHnYdp30eTCtNF81fVqWXD3zeJnePzwFco9Pp6fpkmyprlSfMRoPiFuiHVNkyI6mbnB+UahPH6cqyxug4CgNnKJRML/Bfm9C5XwF6t3h0LtX7DcO9x7vcmTGcbpeS1nm6Xw5n5A6qfVXH4Nai6IZej8OMTqXLM/zQgSx87d349df375+s43RVPMajbNyshXXot7yMyJVxKpArVZyVlVCZM/eO0IrImTg28enVrVWSQqGGOarHp+WnBWJqsrHx2OSBf//7b1JkyTJkh6mi5n5EltulVlbV/ebfrOBAHji/z/xyANFSAIggJl5S3fXnktE+GJmqsqDmUdGvX4DUHgk2rokpTozKtLD3UzXT78PU85pmNI0p2HKMWbJZmoOmFlD8H3fMj+ffZWC6iciOpWTlw7AItSrZoVaGpHIGs/rPry8u3zz6vrF1ebzx0Gm47x/iuNIyG2zohDa9Xp7fdOvt9y0wzR++fL5/uHrcHwSSey9qY1jUiFih+QBz7RDtFaQoZT7YRF4VlhYNkpE8FcKiFRayWoAyE0b1tvu6oYGO4x5TjKTBkujzp0MoxwnHSKlmW0gUICHlCCD5tQ5vDENhlOjERAZMJsWK1bpPhUsqQ45aUrgnPUraJrmatff3V2+/e7Vm9d3t7dX213nvSGmJFOOExSsmXNEACAIBYBh7Cownl3B2NXlXFHK8b5pmj714zgej8M42GyZvd9st72sipXyCzTUlkSo+OmiHlZG4kv+XiavVS1O8/E4DMOY4qyaus43gc20YPuRoI4Jl8YKIIIhap0RhQKKNsNSnwZbaFkLef23CnD1wGBFDiKoxWn++vjl/c+/vH//8/uvn+d5tN1t+oe/Dz9837JjMJlmjQly0pxBikRIGo/D/devT1lGH6gKoymmlFOcUkwp/RPygckhOXA3jtdsCRQAmqLkDLZCvAO+qCpwZlwGupGAAvN2d/Hy1dvb29dNaECXsStVW2hlAQARXeNDW5y+Lxpx7JzzzJ4VFRmBSllP1bIaE2Pg9vLiYrvbMXNMeRznaZqBgGt9q/6Okus3TbNarS92F1eXl9vtpu867zwj1RJ1ZUlzzplz6hw5RscYmLPzZCSodSzQQAHZVAo5HpM4CRzO5uvAlojMAMAISw8RAQkcQsu49nzpWgbb63QUjaapQPOrchKJCYqlrJGV2VK2nCFnS7kcmSVjoALexPP6dZkxWCS+K5hc1JIkl0kqPZeZgSmUEKwc+YX875sBwaqhw4pMTESO2saRIzEQMJHMxo6pNFYELJtkzSyIRVmYUQgUMYMiQtcEMZnaNM9pijGOs845RzmM8+M0HeMcJXkKTGCOAYCSIYBkGMb5y8PDq8v+3e3lrJJTev/l/f+K//vPr1+8e3UxzqoKqKggiIXpX9OUU5TnGnntcS05dC05FcAdhxBWq7VjXq+3V9e3m+1F03TMXCLwZ8h9CdGRgcyA2JQJg/eb1RYNGh/G8ZBjTGlKeUpxLnByx6XYzBnLWA6BEVSiVAMkZIfsiBhq038BZ5Qvy14+RXpJ4/3x05f9+6fx65wPqvO6Dz++e3V7fT1Ow88fPv75w5f7x4dx3veBXl9ftV1r6JLKENPjePx82OeDpVlFcErQOpdymnlOq0Re21VYX3Y3L3fbbd+wc4mnAWSyNnhMIX7S8WH+6Kfri24tr3Zt66lZSxvGDhWCTR7WmO5gvgXuDbuMTOS2F7uma4+HB9U5jOz5mS0CER1TG5wnbDw3jTOjh/tpOMowzU3DV1erl3fXL++udpuVSDwOEYutZGMHnhwRI/A0yWGYn56G/WE8jnGadI6VwYlL246oWAUwLTz7CqhmGVTFREATq/hpzsScc/7pwyf+DEyIBKpFyqGA9pURXKCri9XbN7u///13fdcuAVkJr5NjX/SGANE0n8zBCUPDyCcSzfWm+/Hdy3/zDz/+/ofXK8bHn/8lPXyd7u8txu3uuuk3vm3X283u5hqYH/f7p6f7n37649f7L6XFjOwQHQIje0CyShmKxe09Bx8L9gCIylkvrshKzxGUrLBHPHee0AxUYpwB6fLq6rsffdLgVn/653/64+HhEZ0d5of/+H//b+EPjciY5hlpvLjpV96L6MNxOKaYCTYIXmENmJgVmRw7Mc6A7LrgPYloLPBJBMtJmhAuX/R3L1fff3/95vWLu7u73XbXhgaRJKWcWHOAhAyJSJgJLWfRnKWgNZhIKStMCvE8W3ZEBAhcEOOhkJgyEdKEzpFKIyJVKaMWaBXPHptqQcgthqQyM2QtKqk5xpyJDEAQFUDMYrVJlS+QTmYaFRdOFz618asRrpa4oqhxaQD+RVKpZqKKoiYKZjml4/5weHqK06SIuFrh5aVcXE4XV0qEpjnMEqPmpDlrTppFUhqR7+f0dDwO45Seng77vQ9x//i4f3p6GieJ6Z8JD8Rb5xt2pUPpuUIEij/rCD3ynOLheJzmMUlSEVALoW03m+7mdvfm7eb2jpyXLKJ2UpR7/iSI7NgXtkLvnGfnHNdZNyos7GJSQDrFCDNi8G6z2XRdN8/xcDiO05Sz+IYrHQ2AmSGhZ3I+hNB0Xbder9frBQleuqxWzHAl03UFBUvgGIKj4Jw4ZSOhUggTU1QUAFU0JSAgc9a48N/J+A0AwAF4rH942U9akEhIYlDiViJbatJ17MYMCCrVbOkJMCNhqWUJVqKiv2TdO+36kpIvd910YRP+Vy7z7LEQsGffcOMZmRmInOtXnhjnrFlMxPA50jmRXhqCIlDFSgAaYjYlA0eudaFrmimkaYyiollzysM0H6Z5zClKds45YgqkhALgHDtHhjalNMY5miTTKPJwOPzTn3+SHJuGFEq1gKOKqmlWTapJNetf/5znnxHROWq79uLiIuf1erXdXlyGtqWKxl9gN/WWUdFYpoq4AHNOfbPqjZCCc1PbTOMwzW6ambBotCTCvwjOEGCZQNMC9C81Ji7sSlAUhEpkbKi/yvhzjl8eP/z8+Y8fv/68Hx68dxfbzdXlpuncp8evv3z59NOHL8fjsfGi2Iz79DDOTxn2D/GXj48f75/uj4OMIBnHqPfHqWvYtxh6ClvsVi31rrvouhdrbHg/zBoVB24RdtilmfTBKTd523n/4ubyu1V3SeCZ0EsGAL/m4De9vyZba+IskPMBALzzaslsBkzsjL8hIAUmbAJRQ41nQNz3zapr2jDFnH2gzaa9uOw3m6breI6gWWuriYEcEjoRnKb0+Dh+/PTw5ev+6Wk4DPMcJSVVVTBgRMbT4ExtqCiYGGpRnVK0bGjec2tVoR6GYTYzH5xjQjBGDJ6ZHTE1ntdduL1Z//Du+uXLixCelQeeg+eaZ+tSmS2TxQqAJ1KBsg+aJrx9++qH79/cXG7luJ+fHoaHr5Bi33U3r1+364soiZiy5Hka7u8/f/3y8fHhy+HwVDjHQIAInXOlrA+n316vpw6EWwUn1x1osEwVV5C3giqcs8SYieSUZoUoGLp+e3WzG6b8eBj/8PMvCZUI5zx9/fohePIemMh7aHzou/WcNYkdDRMqgo4AAVDKRPnS03TErmPzhZ9aCdixD23Y7vrbu+3rN7u33128uLnYbXYhNCCYs0nNLagMyDMCM5qQKmVB1TqhI5KSRrH8jeMvH1xNAYGda7FzzjdtGMd2noZxHOM8xZQKILBWQcr5xzrMQejK81OVlNI8z3OcY5xSHtWSY1hvmtdvbu5eXrcdAyTAXDjFQBCx0HadcBYFyi1Vj/K0dU6PTcucfmGdPW+LV7mznJJZESxVVUtzAoG+X794waEJ11fXOeZPHz6aSM6SU9H/lkUXV001ztNhnPaH4fGw38xxdXnVNM37X3758OnTl/0hxpSdf2zcruu6tnXeOw7BN8yOHZf4KQTfN2F/3H/4wx8+fPjw+HScphFVLy/d31xc8tu37ds3/uY6E2vKoKpSqQpPGcxpVrmIuFLNFgtwkgAk5ZRSUlAiCt4ZMCEw0arrmPnx4fHLly/jPBsAEZehSirkd4Q+NL5t2rZpmtC2bQiekbFUF4sHL0RpgFzLL+DIPGNw2Hg2CRmlsFqpiQmZCpCWY8KQ0UHD/gx3WYn7rMKxrRwsAvSGAcgDSJbHPGWRMaYp5wwopMYMhtnQO9cFLqQ9jSOHZmSNpzY4UTFQQONKQ6xEOZUSezVYzwnuyc8sl4P2jXr48tq63U75yHPCT4Rd69ertgmBiMXQebfZNEw0zBKzFI6RnCGLiBWCUnCEDtGBGWSDIi0HasaKXtiB75s2dnmcIpYp6ZynGccpzjlnNVH1zME5cJYR5s6tNqFrAjduFPnDl/tPj4coCkRjjA/D8HV/cMG5QG3vZSwEmGa5RMNnjTpYULIGp4+JxduC6/v+xYtbVXO+adoeEVSzKiCSIhZQmCoiOEYFBkQEIpZslROrVv7qQK9jIkdAZpYimCUAEBU1KZhDMzHLFeCFzEVQgtksLz3AZ5SaqJiJnfXHYo6f7t//4f0//fOf/8txnu9ubq+vroHsEPcP09eH8eswPTmCN7e7lsPxa/58/3B4tIev4+fPD/vHMc1mSvOI98PMoG2L20v/Yrt58+pme7VSRGTcz9P4OH36+DUfZWvbC3+ZUkhR8Nh3TVjLdy+af/jx7n+6uLgFpXkanw6fkPDi7rbtNmQYUx6O4+HwtN9/ynnyrjDHpJxmcDN5BXo++0zmPQbvmORwiATaBLdeteCw7Xm18W0H3mcXhBybOcKA6JFYDXKCw2H48PHplw9ff/n589f7wzjnmDKYMmKpGzITkKEVst4i0YAClnM2A8BCXe0YGNVSKjgL9K5lptAFT2AyBw+bdbte96t1v+7bvnMXm+b6yq97PtMaWyqwdXDj5IahjF+VAlJBhAOAipgJE7+4ubm9uXGO9sP+y8cPT/dfg/e7q7vvfvw9hvYPf/yXT58/zT+Nh+Phaf/4tH/KeS5JK1JhyUYxA5Nay4dleHw54ljbKWoIVbqlDo0DIirqUoJ9Di7VLKZ5HA9iAq53fdM03K+wXZGxJVVSIiNHvFl111cbRHi835uhD505DCE3gqBCkLJBqo0ZFEAtimwhNKvgG2DHfR+ur663m223bre79c3tbrttuw6Dp1LJtjJtqYagBGKYETOimZEYiJKaU0KRGGPMedYknXyTCbnTAypn1ZVj59h7NwfvvJ9Gz9MUYxLJaiVOgiXaPxVMaipGpM6R977twkqlaRsf3PX11e3t1cXlmgkMcuE/qkD6Aq/4FtZuhRZomfb6lonN6utsqd3C+Q9NrZIAEHNoms1m8+LFCx+CiHZd2/f9Zr1moKRSqQKQFE2QMlqpwYrz0HWchVMyxONxmOf5eBxyFhcaA5CuH7ue2m5uW++d59D4xrNn75xzwfumCdy3wGyb+7w/xnGeFQjBtrvw4ra5fmGrVWSWLCa59DEKyOX8s1ChULeF6BVKw7MywotITjkngZwlCwiAGRO1bUtEDw+Pn79+neYJaoLomBkB2Iwce++bpmIHnGdCLgg90FLC05q9I5cD4QgcgXcQHLXBoWJCLmUgVTImUwEFUxBTRiK1xgXC/1bGj1AoQahjDgwIlrLMOY8pzyICqFRMhQIIMRFV+ncoDHSmzOA9eSFRNrOMagakSoSYEcxUSWRBrv167hsAisauLip4uoj5qZZddzZ9VJf3/u7F1dN3LxsfiFjEvOfVuiGkMUrKKqpzlHGSKeVswsyrrlt53yI5QgFRREUnCGrKjI0LaNB3bd8GH9yqcZtN37YBmJxzXdusNfdN0wXfeI8AqXHkAAhWTdhdrqn1T3F+inPo/GbTeQB2qGDMuNn2CTIipCievSeRde675rz/en4rShWlEP0jAIbAG6cKgETs1DTnjAiADEA5a06Ss6iYGRARM1NJXWFpA5xxIFbLblYIfFLKp/Fuqv0CW2KSmv8hLoWbJRxTVQRYUP3fyDBmkcfh6X7/9TDsgf3lxdV2dznmeDzuH4fHOQ0Nw7ZpX11dgtDXn+4/fBy+fIqPD9NxOKa5EPzSJErAwRF4Dg4nyqNlmuM8ZRF1RDHn4SAeQtNt1/3NbnMLgoZdCOHu4vvr7dvL9evOb5+ejsNhGA/Ydm0frtt+I/Oc00iQyvk11XlKZoJgSNi0fdetiM6VOYGZHLsIlmLOMTvGENgp+cBd57vOsweiQn6Hc9RCshiTjFO8f9i///D505f7rw/HacpI3DVMqMFR14a2Cd47X1oCTM6Xk6rJJKYsCoDM5AN7EIxTOuxneZCUyBF7z6uu61tufN/3vFs3m22/2a5Wfds1rmugDdm5b2nhkQDJAKUWF0oJ5/lAYk1m6okrrFHlMKacU85i5trmcrPqr2+arjuk9PXh/qdffjoenobhOMexcJMz88KvjWCoVVqmODY7/b3smlL3V6s0BJWxtcLJ0JCsalOeO37NknKeFYSZmVMI0HXQtsCkBiqCZLRuuuvN7tXNleQ8PB6HIY40ZSDKEIRBzSOTA3TBuaBKomPKamBo6lW94+22v73b/fDu3fX1dbNq+r7b7DrvWWVWzZqTaNacVdXEwJRIDQUpIxiSIySnAdkLGMgMZqBGjsn58wkGV1LmiqypdwWIuW07733TtlM3DYUOeRhTivWsL7QqQGhKWDupxOzMIMbkvNvOkdit16vNdnN5uQmNU02qUsiRABSgQoTLcFg10HW4F2o0eJalQZ3nLaVTq5X/8x4MEXJBprEn3F5cNF1z9/JVyskMag8DwNSK6ncWEc1ZJJevOYtIyinO8zzFaZoAzHufUvLB73a74Jzm7NgTM3qHRGqQJJtposiZS3rt59DMY87SNN311U3wTZbkfbi+fXH7+k272Y5Txod9TeUXy1VUqupnKVCHorBABgogFUsJiqgIgpo0jlEmOrQHNi85Ma1CCAB4//j45cvXaZ4NgZ3zvgxwIyKSc8577wpjuociJC9VEh1tAU+YIQADKiITOgbvoPGowZGRQ8tiJqpKZgTmwMAURKMlaY8AACIASURBVNUho0EI/htUv4FW4nhDQEJjBMfUeu696xhBbbIsSVQ1Z80IBkBGZHaS/LEyPgcGZmKiYFySaSZRAgM1Q2BGQiQ7CfMwkyqKkpIWfqq6xytMpUBfisivqokqVIZRE8k55Zzl5GO6rv3xx3frNjYuEKKoEqFvPBKpmohJlmlOT8d5mFOSjIR907XOBQQiMxAFEHTZLKkwYRsaj2QKwzzdDU+NozfX19uuywbJ8at8XE+hC6Fx7MmZWRLZ7trLi37Vtm9e3gTvvjw8UEOXV+u2bxjgcr262HZd23BLTcueeJqlcY2pbTbNzc1F8GcCsFWB2xZ7qEV/A5GRERQRTRRMVVJCsAp5Uk5JYswp5pSS5lyiS/ZM6LEUHalgM5ajiQRAhbwl5XmOVmnRzIpMEBou/aWK+AEVFSibEmo6qGimZGpyKh+fjPKsScCatu+69Yvr265ffdn//HX/eX98YtSXu83N+uLlxfVhn+P89Wk/Ph7imCJ58EQ5agIRlb5rr293200XvBnghz8/pPjp6X4Ahc16fXW5u7t9dXd99+ry7cvLty9v3jLwp6+fzfT6and5eWksH79++M//+b88PDy0jbu5ub7cH5lczslEmuDd7mK76sdpOOwfp3FUEXa83nbb7a1zC3NfZZQiFYoTHJ7S8RjBFElTSiFj0zRt1zlmNYuzHI/x8UEO+zQOcRjH4zgehuPhOKQswbvderXb7rrWI2Tnse+argtN59vGheBCYO8ZCVUli6QkC8yOnPNxzg9fD+9/eYoJ4zxmM09u1TY3L9av7tYXu6ZrrOk4NK4JvnHMkHOe1dJZlw0BSYGk9IcXLk4CoNL2Wjp4VhR9JM/z9LTfv3//4f3txbohZb++ub0DF7qtcHP/+PVP7z/81//6nz+8/yXnqCqGioBlnAYArRBpwam8X5PD+jv+osdfwo1C2YtYiOSgZAQloV4gCFC+q2IgzkEIEJwa5qaRJqhncwaSzJu76rcvL67udlfDcPhT0uP90+HrYOBUXGPo1NpATc+h6Zt2DVE1P44xRrRoKdroQ7dZX75+dfO7v3n94uYGHS72rDbRa9ZrSSSKJDKrgzaWAYgIGJ1vG1XOBilHCk1Ok+XU9evz4PI843/+W5EoKVRxzL4wxzC7wtGRUzar1hCrTDYscuTYtu12tw1NI5LZ+9Wqb7suBIcEkssLy7UvGLJKxXFy/EsXEYzOLqs8J1oeH5xhkc4NWVVcR0Iw9r7rOlig7OWBSs5F0H1J70RMcyEhzAUkLjnnnOrfRGSe577vb25mzRlUECpuS/TE/GcVrYBEXBn1DaDru5vrmzlGA/Oh2ex2dy9fbrc7BJKUkRmoYkq/2WEAUJJ7pSoElwEIgBGUCnErKqKgZohjOjweHHrTUmtxADAMw+FwjCnpko0VvorSivDel2mBwhBcuwx66mnVBgoBIJEBK5N4CgImhIaMmghzVskL5WVxxwpZjZDALLhnYqVvVkX3oWfqPPeN773riApH1sQZCgzewBCdAlBBdqmIpixWsEEVB6y5AmGtIkRrvFj1FUucx0wkRETn5ZTSRmHGMmFWYr6cOTFDBl20JkRyTDkX2XIAAGia8O7tq4u1BQpYcPsI5F1JNUxNROeYD8c4xpQkA0Dw7Ik9IhUwJoAAiVmSjAiNd46IlFLOhzQ6xpv1qmGfBMJmsBUPcWpqpOnMLKkkTSnlvmleXl0BogWkLmwudykpqq7b9u7yom3CzvJ22Kz6fp4luBYAk4xv39w2TTh/HAUNXkIiRTAVK2n1CaBQwbyWERBJRFVzShJjijHmHIviEzI6x1hmbk3NvMGCJyiYaoOcU5wHJCojFaqqBguBeOliMVQeG1MVUJM671d7T8WGl394vtR0TlOS5J3vmr5rOgR6ehqfHo+WZR3C3e7ien0ZXGcyiIKh+g6UGMzQCIGzWIy5b/3upt30HarJnIf7OUfxqe/b1c3m5s3tq7/53XdvXr65uXh1vbu92FyZQtN1Kmm77X0I0zz88v6X/+s//B+Hw/77d293u800DU0IpaPh2XlmY2dZhkJkiuxd27W7rt3ymVEumPyUbBrleMhxFh9c23k+EAAheM1uHCSndNiP+6f4+JCHY84xzjGOcRRJXcO7ddu3/W69vt5ddJ03VPbQND60rml927qm9T445wABS8Mxp1xapIjE3k1zbgOq2GGPIE0UIUYCDI5eXO9uX/QhZOe0gGsCs0mpnf9K0RJObPCVh1WX6VF8tn6LIIvK8Tj8+adfLrddw+BREzvsegv+OE5/fv/5D3/686dPH572j1giFEZiV1qjz7gvO3n5Z4r30++CJc61M793EvgqEPPqHM73GGiSFCW2zgEqgDjmdR82m27dN43nccwSk8w5jvHweDwOwzykOKWUEoDz1DI4AnOOAJ2hMzVUJQTvKQTu+7DbtK9f79798PLdu9e3dzcXuy0glD50ytkYi5gCcSZEJGZlAvPMiGaWzJDIEzWEnYHLCj4H9pxT0DT5pj2nKD0L/wFK0aecUq0hEXkfmDmEpuv6cRz2h+M4DCnOknMtizwPjKKZMfNq1Xddh1jKhExUGKILWWkpr8D512qwlxyxoA7K/n/eOxUJUHXllcwWZvXTK8oTLqSvtSGwcPvXFM+B8wErbgBqXQfAqO4TVVEFrR0DtUJhKDnmLFaYJpWKuRQRUdFKGqRW6BJOu0tOe2mpeDp2oWmCY8/ssFDqFBwpmJWXPd8BYmBnDpVQEAgs1xEnEERFUiJgNpakTw9PngKhFTkfNYtJYpJUeaYRkFSNwArlb3X8TSjyd6d8HM1OQFYqM10lMFOnRsXrF8RfZszZUqICHSvzCKpAakgAIKG8cz1SxcGc9OIIibrGbVq/bsPKuUCkYlmBUwYEBdVS2ih7SiFnnTCrKpdZ3BIKFARpVpGqpmGVHUxzzgX8zUTMzCzCz3QdWMGA5DyzI7PyfIkiFiXZpeGIIjJ/m/GHEO5e3l7sHIMHA7EEiOxcNTplalkhZ8iipeNPkAjJkWdyQGBUS2tZxECIFEFR0dQEEAlbzwQkgjdxvpmvYp65JODoVSEVnI4k79xmvQGwzdV6nKMISJFsYdp1a++9kE3z/PLuEKM4bhBJLd3efNctiGsoYzIqigCoBJjBGNBUa4qRVaXAetWQNaNZBKAsllOOc44pJkmEiIzkqGJcBZCAmby5YlMMUE1NLcQZmc0sLyXcguY0UCRCcoXoFxHMVExUNKmYAXPBtwAYYW2GP5f6AEBVxvkwxYEJHXKOUXMeHqIMum6b6/X65Yvr1q2Hgz3so5GtL0J/2c5T3j8ODP7y4tYRHcZHBOl79qQmDNlzsot+9+O7H797+/2r129u7+5e3FxfbC/6duPIS4zT8ciqgahvWgO7v//ypz/+83/6T/9nlvz6zY3zmPM0z8eqsUGkkqfhOBwehuE+ptmFwK51rkaGJx9koCKS5zQMaRyyZOw3jXn3eExELs789JBRktr0eP9wPE4pIjOvuma9aRWc87zuV6u2W7vQO7/ywXknDowBGdChd+SCC41nRjUhsIAECBk1W46qQMBkEHC1CpeXmzcv+1Wb5yTDeNwPX4cDkknXUAhIBCoGpqgEgIRMZxweCzRbsVaSzGoUUCuAhVqq5NZZxEwRaZ7mP/zxzwQ5z+Omb+4fno7H45wevtw//csff/74+fM0jcxEz3LlTEBghAvvewUQnmX8VX/u5HGWIqKd+Z0y6LiQ/FRU0tke0ynNY5yQGnRCSTjwbnd5cz1dXG671edxnI+Hp18+wmF4an7yWfLjflRw5LiwwxoYEhnSLISTzHIw1S7gqu82u/725dUPP7x9993t69dXV5ebrmsNmZCYAdEzgSPREo6bqCYwKezWZAaWVefaIEfPGAzYKWRHzmOKGFEWcG5d3zh+eL4nFVJT+vDMnoid88yuMFJO45jinFM00Uq7u7wHEYUQaoR1gmGDVvzmUnNZkAIIACeBPHvu6JdLOQMhP8+7AFRFXfuGGHb59aW9WF4ndWCg9jPOi0v1zbBOsNXvqJbUAqpcSR2EFljK33Wy2Eyy1QI5qC0w8gI4VBHNAFZ5W52jmu6V3nOFmi7RDpb613k4hgAO2INjYGfMSqxESqTERqhkgpYNMmjU2SbwuF6FrmtC8CmJmRWkoOqCoEVkpuB9KO19H5z3TAwGyyitQpUnLoU4dKVvYYTgARssKh4ETJYZczLPJecvmKzF8SOASfnEp3V+BBHUEffBr0NonXPEcNrCp1JDDdIrbB8BVTVnW0YJTFWzSK4XXpJDMCtUISVH1zKeUwYTZBHdQxN7rgBBZRQBIEJHpM4ZoyGVkYRl/z6fF2IMfUDfkgUzEGNAcI7r+BDSwi/LVmfOMtgMgI4aIo9EUHipS0JhSSGZZTRAI+KASLXPKCQqW92KpjIIaugVQHLMmtQiIvrQAGi/XWdJRXw3S0KAzrVMrARZ0sXFVrISeEBUzJv1lT/jhK/d8gVMg6BlIqKQvdTJ4rof0URKjJdNc1IRMdA69Bmc81wEYACUCLzjEvcjopmaiuRcvH5pask33HtIXKZXioWh2gs2EKn4SyQtXUECZ2Aq5/8cDCxrBMht6FrPlmJSzUPkTLu2v9ruVpuVZX+YjvtpMtKwAmQLnWvb9aa7+uG7v/PsP3z603DcO6OWup4vvXUW8eby9h9//49v37y7vL7a7i7W/aptO+SgMT4+7afhQCo+uMA+pjQNw+Hp8bB/UpU4jTnNkmfTjrxndkROc6kmJlNxTH3f9qsuhIbPYf1Vxl7GUcchp4iIvmkbc9S2Ls66fzw4yJacc6IZvfdNQ13bXO76rg3IEILfdH3HvhXz2Xw21CyFTsQApIb4Tg0JTDMaEjEq6BxJhcHQM4NTwqZxmw3Znd+sMWZ7fHzUX46eMDgMDj0DAkrJybSQLBD+BbhHFVQAqBKin7psVTXgdBJL0qVIlJJ8+fqAIBLnVR/GeZymcRjn+8enDx8+Hw6Hkl7isgruZPnzF4XGQgmPp9mE8xbkmbmt56EMEgGi/ZW6BYipmFop7CI7duRC33cXl5urq3VOMo/znOe8T/akdl6LrHq2BoAgZLNEic5RG3Dd+93l6vb11XfvXv74+9+9enmz3nTBOzVTrXFzzXCRqExCoBK5Yi/L9IFJBACFVDoUAhGAtM5IiUgSSar5HC3nzj3nUgZZhiCKYBZgEbYhwrZtvPerrh+ncRqO4/E4TWPOqZj+0psDKJNtVqxG7QTYM8BqARKUE7/8WiuGCBZSwJLcwSm0Nzy9/ak3Wy/0dP0nUFBFBZ7qOQu8e6ny1Ip0uQxVw5Jd2jK3WMgui0uolYSl7VDCGQfA3lSx3KBS7sAaLpWrMyjevVaQisQILaS0pzuNBmpqAHQmWYGAHrhB79AzegZmYzb2wA4cadSoacppFklKZJ6x79pVv+q6lik6fp4LLvwdTWjaEJqm9W3jQ+Occ8gIWHLnSm9/au0Xf8nOOY9gxFXJnjkRC7NmZ9mZuALvwuL5RY3UkMyQ2T/PJVeXX6I6NEIMjGvnV67xiKoWJc9ZxpyiiJYCKBAyOy6k5xUmZkvJoPTjc9ZU5/FKPgklR085S5aK1YMyFc2qTigbadaCpCwo1aJAb6qZCcwRQXCEJTtDJhUmsEXxuJ78o4yzHIgCGGSLAMZCTJ7REzpCBkJVAitsFoV8Eq1AFSvGrZhIQ6DqdQs6gRsALnsRlcnAY29WGl6opRRsYprEkpoYoKH6bqWWqoC4GRp6dAioCCq573pTxYyiGim2bXNSTbUFJF+gFAqKhloI8q3APQszYSlEmZVtsgg7F4V1puB8mQ0JZmYioEqA5LxzLksmBAMVyTiTqaQYY4wiRbyv/KGSVIQQmiY47wtLfAmy1aCADIpeDxV3AaDLYNBpMWMT2FO7bh3qrDFyjsG5m4vri4udOXeY4uM0HONknMGlKDE07cvbux/e/O2//7v/BcH9h//YfPzlg46y665/9/ofb6/ervvNbnt1fXHThjZLIsMcZdToWMfj4cunD2k8dI1z6FFRhUzAk7tYbeY4T/th//Bwvbt2zCF03jdmaJAViTh07dp52l1ddeuNd52jZy3bgi1PSYajTaOYFcZOlyEHr8Px+PHj0zz2rb+5fbF5cbPpVi4E7Fq/7rvGeyQmAxKDadaHQQ9TGiOomUMMjoNDx8WOpSUDVEQkNrU4z4KAXXArQkFidMxd7z11F7sGwD88toCH4HPb+pL/ACCBByAQMBM0h8B4GuU1A01gCRQRi4RpcQanZk0lYDzx+JQK6BTT5y8Px/0TgkxxiqlwEUmO6cwa4xK0MwAvZVvUhVFmcWgKJzg41FbSqa5fLdSzu4DFFZ280vNCIvY+NE3bdl3bgQtzEgPbXaxevb5pmva4H+Z5TimlJKqGWJkSbIETitocDbI6xq7j4LvtdvX2zfXv//777354fffytl+1IjqnVLntRU0LJLbsdzS0UoiuR0GL408qOecoEkVUpSSXoDXOnnMc0N2c9y6+zfh/tRYvZVDlHNA59L5UjJ1jdt7N05hSTjlXPafy7xbHf4Lhw3KnF1xl+XrWcVE7QXtt0Y1f4HvfAP2gpBH2XINdHt0pHjhdhQHAgtN+fsrFJde9g+XtzRbHjwvQGAEMaivDAMxogSgAFAx0VQpFOqvyG0ARvyuNhjI7WysWxYhi8fe2vO1fFi4IwBs1Rg6YgcmIjNjQGXlDFEtTmo5xPM7TINA4ROi7dtV3nnmUPA6HcTiaChpIijk6Y0bwTMCloaBgqpo1gyTKKaWUOGdWZVBGAMYiMOMQkaxIM3nCCJgRM1NmEiEgUhUkxayAAqU7nNSIz8LoBZ2rIg6sY1p72njuCE01ioxzHFIaY5xiAjOHJIAVgQlmQlgZ6KBm0WLZVLJkteLwAMo9RSmwjFKhrqUBKTKlsGTweNplggjoGRuEBsCbkgjkpGBZmYxUzTSfj42p6ZCnYzo6zmCWJUKlNfSOAptndggkhmRkQExQy5uYFQyN0RCkkBBQIbJQYECBguOEoiKBJVdGZATACiQxBCAjoIDAWVPSqKAFyrp04qGg5FQtyaySERQRiIlQsc6HnIJnEBXJhTEBAIpsExqVFjPknMGs1N4B1QyLSLYtw/VEgOycd0RUmVwkqwgYYBWYrrZTVVOO0zwOwzCOQ4wx5SxS63GVSLiGypUAxNCQSJdWGigsusZmZjnrWQcGsOjYNuQBHRtidC5f7Jxv3Ga3Iu+ehuHLw+HT48PjsI86KaaU51Xb3L3Yvnvz4vXtlYl/uL5rxTfa3Gxf/+7dv7198W672Xbd2rlGYtw/fkrznMUyxslgPO7H45PluQ2rLPnx4Wk/jA9f73OaL3fbLNkRyRxzjDlnQ0R2AEDBh64HkxAcM3ZNz0h5jgAzNYYltDNQMUkao+SsxARmKtmhXO2axkmc5vXKbTfN5cXq6ma12vjgNXhunHfOITKIwZTypHOeJY6aMopiAkyM0iCTVJe42DUAJdIyC11YIswX9I5D6wKip9Jk8cHnvEGKXYsIcv4mlVvhjFUJAEwlzodpeARt1X3ja8RsyZN06fgU005CkJNNx3wvUWSe05yLCFyZWURepgBKJgVES6IICkA1yEYELHP4tgx9IwAUmaoywmhnd2BxLBUDnbLGmEvTsNpkIu9d0zRtG5rGFS4NM2kafv362jEfXk7Hw7jfH8ZxKtGtaRm/KjaqtA5T0SAMni923cubi++/e/nDD6/fvbt7cXvRrxyiisw5GyIXAFxpx5mKgZbas6pmyypFwE7RDCSLRMmzSCowtWK1SgMlpznFsenGbxz/r0sftS+sZewWKqymGk4wMEQKTcPONcH3UzcM4zAOwzDM0yTFEFfqndJlFwA8d9B2wu0XSSRYaBKKaa87pm4m0NIfNlpE1gGq8HmpD5xXAWRRg6DTNZdtfSZEbDXLr7S0y849VQXwdCVQ642FFQ6tOMwFu1IKWgsSwp7fYdnAtgQ0xfEjVpwJLDUVXbCvFTBwdmYQIKg1Ylym+gzYjA2cGKtCyvMwH/fj4+PxuBcwNNt0Xdt3LZgc9/uPH95//vRB0kQIOaaEY0JLDM4zZFaiGkaJaVY080ytxxQtJzBBNC2oe88OmcmKKnBAjIoRyx+YyiynkJqoCQCWaEKRMmAhsin30uZpnqdRU9tAuAzuJvDWaUNpTJJiHMZpP8fjOEdRQg6A2SzlFGNcBu2gxt+1pgJgsAjkmIHVFAALSFCWArGUOCClVJCaplLisZOya8O8a8M6uMZ5JsQUo6QEYETsGQxyjvP0fGDUdMxxH0dHUswaIgTnHAujOspOPSKrIgFaQWIU+StNBcqBpixCCMQBnVdsFMlUEEQoY2GAOlVDi201NRWFhKqsCMhGLkGadJ91RkMFy1aK58pGwYLmNEz3MU8K4MA1HAgxq6CtVBR8fTApS0xJSQnALBOSqjkSQmdmOWUwYHZIsiBmCuFJkWEFhELeZzFGBNBctrAhgmVT1ZjmmGKMcZ7Hw+Hpaf+4PzweDodhPE4xqpXRsRDmFIsG8DxF75BQVHhmdg6dpyJ0DABQOElMVEtqcwqYCZEbpoCaosAI7Fdrt7rcueB8R8d5+Pnjx/fv7z99eDgOMwKIZEvZA1103crz8HhvObTYvnvxw/e3726v3m62b5r2gtkROwWMOadxyNMAHFR0msZpHlETOQSEw3C8f/j45ev950+/DOPx5mYXfNiu157dNE7H49H3W9d07F1wrfOgfaNxkhyTyPg4xmEOfbrob4Jvq/HLKlkNFNl8sJzSPI5NC+9ebZpwRQhd11xdbTbbru3YOTMTSXmaZyQCJjJyWTXHmKNQdmtmJZy1EniCJlMwc0BkpmWUpoBMRIkaYFG2jEkNHYpjQxwMZlHr+/T6jUekrlWDVEqWYAKQDbJYFI2qz88l57h/+vLw5Zd+tfI+ICICWfFnpy6w6aK3iBWDU3aYCpREVlVBRQXRslXdq6WqT4XfBFDhnAmhTCEjmNo3fehlVbQfPCei5Sstw08x6zDkcZpPWrZE2HUNW9O1LoQSygMibNfhxx/fvH3zMs8yjfNxGOY5ppRTSnFO0zSP45hiNoCY4v44xBhFdb1uX724evPq5rs3r1682K13HbEMw5PqQkOKBcXANXWHJDmJZrGC9UuiAqAM6JhQVSWrZDBFE6JKyGimRWQ7zkNMk33j+P/ynixlGiJ4TriXm7L4JUZiZu85+MA+uND4EOZpTpKssrjDYiyeb2v1BLDk7giI+qufVqsEurjV0jg8dREWlW6VvN3t3BJIOueuLnaAUGEf5f2hmF09f3+sYUcF0507/r9YtqyKGa/BQe0GwH/T8Z8+jH5T9KhvW7MqqC9Q1auL58/iXXt18b1DQheIHBaJNefIsfPo7Di82kJ82fnHaZBV39/eXH//3ZtXL+8uLm5ixN//7T8i+XEcTLMnbr3rmtC2Teh65xvyoXRUi168d65rw3rlt2u/XTXrvln3fb9aN23nfEBiMxDTMuzQpJRSyjlKipJjmYgULcyEWjhApmm+vv0+hAoi67ruxx9/HzxfX2yvN6urLly0YdsGQhpT7mPqpnkz5zFGUfDOA2I2iyqp8PYvbQiDouJQn55qTYLLsyCoBNul7q+qZlX/I+ecKwjRrAIxanDbOt62offOEwGSmCWzVCBGru6x7979rm3rZyH0nbvKQR06AFPOiFDuJIMrSj2IDGiI6ICYyAoxKtbgBVWIhBGIPFBgbBXZMJdTRVjoE08bpwBI1EjQGNEICZENvaF4AMIGDRWADQTVQNnIm1dI4oFwFkCHLnBgIq/a+ktCv+wx/+LyGrTO1IIpILiiv1gp9hQMiOolAdSySjHLtenIBR9aFLlLuQywkBSqphxTzimncdjstu3Vxer2ZrffP05xTLnUKonY9213ud3uNpv1ZtV2rQ8NO0eFxid45EIPVM+yoYla0vzy7mXwdUKh8avvrv4dIVukhsNu3ferpmmBHIu5/WHK68/+4mFHxxTFOQcGOsv19uLvXv/+7uJVw1tj765vVmHzw6t3u+0t+A1AU1NSNSTvwgqNHDsRZcWGfNOviNGHEOfEE4deN1e2AV2vN23TBuebputWa9duyLXAAUv9zHloOkhJ4qzzRDaSd8gNLK1x77ub27+ZZx0GnQabJsk5iY5dR9dX29WqdY5C8G0bfGBiAxCRrCJWCjdMBOQVbJfmzUFy9t6TAU+CBuSdIlTHj0xqmrOKJlQzJQNqAl/0sG41OCNkUQIsOplZ6xTlCfuCp80AppbKfMzF1e9cjWBgve7/3b/9O5XUtp1znuhkvclOjh+WOmstAlfHD2qg2VQUpMT5AEp6wlkVO1BHqQAYTqhPxCIOW2zsAvRbAGyLGzlz/M9VZ1reMgtMU/6f//3frtdd+Szs2s3uO2lD23jvPEIjxuCh6emSPQCDWEoyz3NOWURTTvOc5nmeximlZAAxpeMwxpTMtO/bF1eXL24uX1xfrlYtOlPTlKOJusIDi4yEhFxqz6UQJppV1am4nMQUQBnRMWNpzIkagKkWOG1pn4jkFGM7H9fb75xbgL2lhPdrh3dyUH/9+4uTLLFI1UgpWCD7dd26vvSvvc+/+pqzatF5hX4BIS34QOe4aVpiBgARGcYpSa4yC2fvbr/6IGcdnr9yUf/6Hfn/suwc1FC/9Vde453ru5brZ0nz+JBlrgMJ9b8SMYOITGOa5zRNWdWYOATfNk0IwXsfY3x8eBynsYSOz9S2TJWm62yV8Keg3JnRMXIJB7gKLEOlvYSKcl285+JDF5e8nJ1StfO+XW0uy2jyPM9fPn+e5tEXqQHCKsSGWBpYuQ5PlIJIpfKwUthabt5/94nUB3o6wicQxfMVPpe4T1WtQnfIy00+YUZOZsjM2q67vLwOIQCAWp7zIJpOPwUAWsZaShrx3I86iyafTUuV/isPdrGAp9+5NITqP3reuLZE0KVZWFSK8/Jozn8FlhFI1VRbdEtcBAbEPrieyAFAlnw4DjGl82s8DUba6aafddhO/79cCTxHw6cCF9SAu3b7zKxKIhW2yVhAmYuhKHabPS/8kgWDucTZz4nZSSF9eUzBh+1m450DgCTTfv445wG0ojqWyR40QMk6x6I9pqceH6h55/q2C74hZDASMSbumpY5lKHw5WyCqWiOtujHL/RBBghEpGopVaUPRGB2lbYIiQqXp/M1QsJ6p6CWt0srypDYNW0Zs845Hg9f52lUNRE4xavE4L1jLu9cGT0X47EAoZbbX5ISzQJmSAgGuAiqlht4CuJOB2TJXgg9AXPpQ+FZX/bXVvRX6YyBqfNdt7piDgAwjtPPP/9yOByLMXmGMD0/z+WfngCmZ9+sP1oyqzMzcNqUePYdPPvhybz/5TWfpWX4zfdOHmap+araer16/epF1zUAoJpz2ptGrjiyqk1q1WpVBWRd9rwtvcYTJWvR3y07h4iCL4QqbuHUsiXBWS7iLBldDLDZ+RMDWzLQ5fnb2edZoHql48nctv1leS4AVbPkt/Xb+m39tn5bv63f1v8Q6/+Vdupv67f12/pt/bZ+W7+t/3+s3xz/b+u39dv6bf22flv/A63/B9T/KKUuUInYAAAAAElFTkSuQmCC\n", + "text/plain": [ + "" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\n", + "examples = next(pairs_train_reader())\n", + "print(examples.shape)\n", + "show_collage(examples)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 把图片转换为高维的向量表示的网络\n", + "\n", + "我们的目标是首先把图片转换为高维空间的表示,然后计算图片在高维空间表示时的相似度。\n", + "下面的网络结构用来把一个形状为`(3, 32, 32)`的图片转换成形状为`(8,)`的向量。在有些资料中也会把这个转换成的向量称为`Embedding`,请注意,这与自然语言处理领域的词向量的区别。\n", + "下面的模型由三个连续的卷积加一个全局均值池化,然后用一个线性全链接层映射到维数为8的向量空间。为了后续计算余弦相似度时的便利,我们还在最后用[l2_normalize](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/layers_cn/l2_normalize_cn.html)做了归一化。(即,余弦相似度的分母部分)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "QKurk9QAC1J8" + }, + "outputs": [], + "source": [ + "class MyNet(paddle.nn.Layer):\n", + " def __init__(self):\n", + " super(MyNet, self).__init__()\n", + "\n", + " self.conv1 = paddle.nn.Conv2d(in_channels=3, \n", + " out_channels=32, \n", + " kernel_size=(3, 3),\n", + " stride=2)\n", + " \n", + " self.conv2 = paddle.nn.Conv2d(in_channels=32, \n", + " out_channels=64, \n", + " kernel_size=(3,3), \n", + " stride=2) \n", + " \n", + " self.conv3 = paddle.nn.Conv2d(in_channels=64, \n", + " out_channels=128, \n", + " kernel_size=(3,3),\n", + " stride=2)\n", + " \n", + " self.gloabl_pool = paddle.nn.AdaptiveAvgPool2d((1,1))\n", + "\n", + " self.fc1 = paddle.nn.Linear(in_features=128, out_features=8)\n", + " def forward(self, x):\n", + " x = self.conv1(x)\n", + " x = F.relu(x)\n", + " x = self.conv2(x)\n", + " x = F.relu(x)\n", + " x = self.conv3(x)\n", + " x = F.relu(x)\n", + " x = self.gloabl_pool(x)\n", + " x = paddle.squeeze(x, axis=[2, 3])\n", + " x = self.fc1(x)\n", + " x = F.l2_normalize(x, axis=1)\n", + "\n", + " return x\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "在模型的训练过程中如下面的代码所示:\n", + "\n", + "- `inverse_temperature`参数起到的作用是让softmax在计算梯度时,能够处于梯度更显著的区域。(可以参考[attention is all you need](https://arxiv.org/abs/1706.03762)中,在点积之后的`scale`操作)。\n", + "- 整个计算过程,会先用上面的网络分别计算前10张图片(anchors)的高维表示,和后10张图片的高维表示。然后再用[matmul](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/layers_cn/matmul_cn.html)计算前10张图片分别与后10张图片的相似度。(所以`similarities`会是一个`(10, 10)`的Tensor)。\n", + "- 为[softmax_with_cross_entropy](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/layers_cn/softmax_with_cross_entropy_cn.html)构造类别标签时,则相应的,可以构造出来0 ~ num_classes的标签值,用来让学习的目标成为相似的图片的相似度尽可能的趋向于1.0,而不相似的图片的相似度尽可能的趋向于-1.0。\n" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 + }, + "colab_type": "code", + "id": "v0qA9wX1C1KD", + "outputId": "e8d1b3bd-5412-4081-f30a-d2303b041578" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "start training ... \n", + "epoch: 0, batch_id: 0, loss is: [2.3078856]\n", + "epoch: 0, batch_id: 500, loss is: [1.9325346]\n", + "epoch: 1, batch_id: 0, loss is: [1.9889]\n", + "epoch: 1, batch_id: 500, loss is: [2.0410695]\n", + "epoch: 2, batch_id: 0, loss is: [2.2465641]\n", + "epoch: 2, batch_id: 500, loss is: [1.8171736]\n", + "epoch: 3, batch_id: 0, loss is: [1.9939486]\n", + "epoch: 3, batch_id: 500, loss is: [2.1440036]\n", + "epoch: 4, batch_id: 0, loss is: [2.1497147]\n", + "epoch: 4, batch_id: 500, loss is: [2.3686018]\n", + "epoch: 5, batch_id: 0, loss is: [1.938681]\n", + "epoch: 5, batch_id: 500, loss is: [1.7729127]\n", + "epoch: 6, batch_id: 0, loss is: [2.0061004]\n", + "epoch: 6, batch_id: 500, loss is: [1.6132584]\n", + "epoch: 7, batch_id: 0, loss is: [1.8874661]\n", + "epoch: 7, batch_id: 500, loss is: [1.6153599]\n", + "epoch: 8, batch_id: 0, loss is: [1.9407685]\n", + "epoch: 8, batch_id: 500, loss is: [2.1532288]\n", + "epoch: 9, batch_id: 0, loss is: [1.4792883]\n", + "epoch: 9, batch_id: 500, loss is: [1.857158]\n", + "epoch: 10, batch_id: 0, loss is: [2.1518302]\n", + "epoch: 10, batch_id: 500, loss is: [1.790559]\n", + "epoch: 11, batch_id: 0, loss is: [1.7292264]\n", + "epoch: 11, batch_id: 500, loss is: [1.8555079]\n", + "epoch: 12, batch_id: 0, loss is: [1.6968924]\n", + "epoch: 12, batch_id: 500, loss is: [1.4554331]\n", + "epoch: 13, batch_id: 0, loss is: [1.3950458]\n", + "epoch: 13, batch_id: 500, loss is: [1.7197256]\n", + "epoch: 14, batch_id: 0, loss is: [1.7336586]\n", + "epoch: 14, batch_id: 500, loss is: [2.0465684]\n", + "epoch: 15, batch_id: 0, loss is: [1.7675827]\n", + "epoch: 15, batch_id: 500, loss is: [2.6443417]\n", + "epoch: 16, batch_id: 0, loss is: [1.7331158]\n", + "epoch: 16, batch_id: 500, loss is: [1.6207634]\n", + "epoch: 17, batch_id: 0, loss is: [2.0908554]\n", + "epoch: 17, batch_id: 500, loss is: [1.7711265]\n", + "epoch: 18, batch_id: 0, loss is: [1.8717268]\n", + "epoch: 18, batch_id: 500, loss is: [1.5269613]\n", + "epoch: 19, batch_id: 0, loss is: [1.5681677]\n", + "epoch: 19, batch_id: 500, loss is: [1.7821472]\n" + ] + } + ], + "source": [ + "def train(model):\n", + " print('start training ... ')\n", + " model.train()\n", + "\n", + " inverse_temperature = paddle.to_tensor(np.array([1.0/0.2], dtype='float32'))\n", + "\n", + " epoch_num = 20\n", + " \n", + " opt = paddle.optimizer.Adam(learning_rate=0.0001,\n", + " parameters=model.parameters())\n", + " \n", + " for epoch in range(epoch_num):\n", + " for batch_id, data in enumerate(pairs_train_reader()):\n", + " anchors_data, positives_data = data[0], data[1]\n", + "\n", + " anchors = paddle.to_tensor(anchors_data)\n", + " positives = paddle.to_tensor(positives_data)\n", + " \n", + " anchor_embeddings = model(anchors)\n", + " positive_embeddings = model(positives)\n", + " \n", + " similarities = paddle.matmul(anchor_embeddings, positive_embeddings, transpose_y=True) \n", + " similarities = paddle.multiply(similarities, inverse_temperature)\n", + " \n", + " sparse_labels = paddle.arange(0, num_classes, dtype='int64')\n", + " sparse_labels = paddle.reshape(sparse_labels, (num_classes, 1))\n", + "\n", + " loss = F.softmax_with_cross_entropy(similarities, sparse_labels)\n", + " \n", + " avg_loss = paddle.mean(loss)\n", + " if batch_id % 500 == 0:\n", + " print(\"epoch: {}, batch_id: {}, loss is: {}\".format(epoch, batch_id, avg_loss.numpy()))\n", + " avg_loss.backward()\n", + " opt.step()\n", + " opt.clear_grad()\n", + "\n", + "model = MyNet()\n", + "train(model)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "v2izWWI9PutF" + }, + "source": [ + "## 模型预测 \n", + "\n", + "前述的模型训练训练结束之后,我们就可以用该网络结构来计算出任意一张图片的高维向量表示(embedding),通过计算该图片与图片库中其他图片的高维向量表示之间的相似度,就可以按照相似程度进行排序,排序越靠前,则相似程度越高。\n", + "\n", + "下面我们对测试集中所有的图片都两两计算相似度,然后选一部分相似的图片展示出来。" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "IDE4kmUkPr_T" + }, + "outputs": [], + "source": [ + "near_neighbours_per_example = 10\n", + "\n", + "x_test_t = paddle.to_tensor(x_test)\n", + "test_images_embeddings = model(x_test_t)\n", + "similarities_matrix = paddle.matmul(test_images_embeddings, test_images_embeddings, transpose_y=True) \n", + "\n", + "indicies = paddle.argsort(similarities_matrix, descending=True)\n", + "indicies = indicies.numpy()" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "kgBsMRarC1KF" + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAuwAAAKoCAIAAACjiz3RAAEAAElEQVR4nKT9SYxl27YlCM1iFXvvc45V7tdv8d77dQQhpCAzAokQIkWPFmQjBRJSIiQ6CEQLJHo0oZUSEkgpJJQNJDpIdGjRRQiJBkmCIILM/PHzl++/W7i7Vafae6+1ZkFjHTN3v+/9TwD7mvs1Nzt2bBermHPMMcbE/9V//58AALgRUmBCQkB0dxVREzVFxBACIpq7m7s7gCM6khO5O7tmcAI3IuXQCB2BEQgJAMzczEzNzdwdEAgQHdAM1UAcHQABAMHc3BER3F1EzUCd0RHdEQzBHUzdxaAaLLWeTqc/+cf/xn/1v/0/+eUf/2MA+M2Hx//d//Hf/9O/+Z5RA3OKIQ9DHAaK2YkNyMwBHAEBANwB+geYOwC4O7wcl88df/YVM/vZKz8//PI+AAAGDgCOBgCEQAhEFEPIOWKT5XA8H47H03xe6tzscG5PT+d/8o/+4H/43/qv/KM//A4A/vRf/kf/zv/0f/wf/Af/l+3mOobBzd3AwXdXu9/7ve9+9atvf/V7v7i7u05DIHY1AYTATEREgYgAkAgRkYgQCZEAEQkJiQgZGc2kyXKc9/eP9+8/vP/+h8ePD/Pp2EolMEIndFSx1kCqSyP3wDxM0/Xdm2FzBcPwfJz/8td/+/R8dKMhjde7G3B82u9PrVayf/LP/tl/97/3P/iTP/mHP7tDn32O8K9+uJxOx3Vd3SyEvNleA+KyHg+H+/3Tb9zq9dV15FBaY4q7zc003aXNDZjX08fl8PHw/LG2wnkSWebzD4fzx+d1f/90+umn8vxU13Nd5nVZZjDPeZiGadpux3HKeailPt/f/8Ef/YN/67/+3/zl7/0BAPztr//if/u/+ff+5X/8/0w5M4fPn/jvuiL/8h+uZqZqpp/9iCMiESMBIgE4+GXMMMdhmHIeh2EkIhEzVVUjopwzAJTapFWVBuAhBCIEAESMMRKRu7u79VkHoCJrWf7oT/7T/+a/9d/47he/DwB/+pd//u/8e/+Lf/+f/z+maSLi1oSHvH17k8aBzdp53t8/IMfbX36XrncG5GogYlVrrQAQcuIYgYmYOUTCvmpoK8VU0MFVrTVXNXB0dVNrVdbVavUqJmrufVHQ0tbjSdxpnKa3d3e//OV0c0spBebELNLOp2NTQWZCclWvpa3zP/0H/+i/81/7t//4V38AAPvD8Z//i//o/uEpxpBSyjnHGJmZiMzMzERERFprrTUxVVMHBAQiCiGknGKMKSRGAr8sWE1aa7W1JiKqagCOQEQcQkopp5RSYiIwNzdQr7Wu61prFREzI6IY4+uZ9JPpE7Kpnuf5NJ/O57OIINEvv/v2P/ev/Wu3t9cAcDo+/fmf/d8e738Ad3AnN5NWlqqtgXtgTDkyo7sjuLmjuYOaibsCAEJQhbKCqpkpgPXRZ+bgAAjMlFIIIcSYAMnMtVmrImJq5gaARERM5GalldaaiCFhiBHQzYWQUxxU8XSqpVZHBTAHBDd0/9Uf/cl/4b/0X377zbcAUA8f7//j/8Pp/Z+br+CNCIgihKlanGcqzdwM3JgACN1pXuvHx6cP948/vv+wP81mfnv75h/+p/743bu3OfE6l/c/3a9LDXGYxun6+pqZluXcpCgAEGFIQOwWVGxdm6kioZnUtiJYGGIac5gmp1CKxTBeX7/dTNcpjczsfWpaE5XqtUpdW/n67bt/9k/+6d3tHQD8eP8X//v/8//yz/7q/yrNCEJOm5jSkIlZHEqMNEwjU9SGU7z+5vpXV5ubzLHZ/Dz/NMu+QF2K7g8mjcY4DHmz2ewQYFmO4G3MlHMKMRuEKiCqqk1aaW1Zy7ysJzPhgDEgJ1Itp9MRVIaUp7yZNtfu8bw0NR9GGlMe0q5VeXi6X8rMUdOQp+nt73/zr/9n/+G/ebv9FgD+6j/5F//rf/d/9B/+3/9PwzCNwzDkMA5pHGKOgZhjjCmlECITcuQQQggYGGLicYiBAESIMeUUp3HYbilQs9JaVWnlvJ6eT2UuahoST9stUihFa7EiWsWb0LLI6bA+PM7f388/PMzfP6yHWdwALhvW6w6BDuDmDg4AaiqlLcuibfnP/xv/xX/3f/4/+6f/9J/0VwYD7Nu5gxEAASEgvHwgIn62OOOntdrd3fx1oUYARMCXn3QAA+9/W59IDuCADgBIDmSIBmDwEsSYm8NlGQZHcAS4XJC//DT4ZxuDX87xy22DQBmdUQkArLqiIwAHQEag/+/2zv+/DwQEdOxBjLu1ttTS5vn0+DyfTqVKNQCMhD8/KwQIxENM22nMeTQzEa2tmcnheLx/GPIwuuPVzWYcI4VABC9X9/JW/vL3yxe0yVybiwJiJM4xhhCGcdzuduvNjYoS4XI+Sy1mYu54WfT67XdVlVrLMgMiarVSEuhEYOpRlEoxIHJgRKQAyL91q/1nm/oX33n53suduIxJM1Nt6zofDs/rsrpDioMDIuB53s+nfSsngFpWFaLaGmMMqAjmIOhelsda9qozgCImROuBgGpTEwADNHVxN2IOIYzjdpqmYdimlIkDkgIxEP09j9j9NYhx6IP3t66t/3FwRAqBmAaiHsq7ad8xrU+mz0e0u6toxeoORGjmpm5miGSmANBEVERVwF1N+yhCRBHpEQUAvAYx0tpallJKD8T7KzkmTgmIHMBdXRXMwbzvZRCYQkBmRHpdUPob/uwye4xO7g5ggRHcVAEBmBCA+sW7ffnAARCRCMGNEAhc3d1MVWrV1jAEYO7JADEzOBBdphRzyDnkhC+PBhF77BICMzMAqOolelNV1Usg8nLyzAxIxMQvBwKqqbm6mppK66/X19cTIhAy82VRR+zvZk1ExER7nAQA/SXMFEIMIYQQiKi/vj+v2mQpq4hcgpsQUkpIn6VMZq7ax5a6mqq7uhu4maH2tc2sL8JgbqZu6j1ecVdFVTRzNQW3S6pmjoiE6OBqioqADZF6FNSfuZk6AMFlFF7yT1VVQ6cQgZkJHAHNVBUAjAiQCQD7XmP6xTRHxBBSyqNDBDAmJArAORgheGyqrYEhUQ+cYooB0QNTINgdTqXWccoMVsvSGtW1ijnHuNludtvdbrcFN5PVxEWagaMpckRwdGdQd9FqDsKgMfI4Zs6pmUmrrRgqa2vWmgID82UmuyEY9w9XBv18nDMC4yUPRnBCDwGIoImIQCuBYhrCZpdvNvlq4A2Zq4RgKXhWQLLqbXU1TMIsSA2RmPsjclUFVIP+7MAB1L1UKVVE3B2AEBS09hGt8DKwa61IiMiBgXv2IEVVAJyZQ4wxjiEOFOLn01bV1X47/eo5SeDAzD2z6hEA9v3eDAwAAcys1eYIAEiRDU3NVcAMEanPQULqqz4hMQGjMbohMAEz5sS7kW8mPq3BzNfmap/f7C+3/E8AymVgff7KIEAA2h8eOrNHBuw7LyEBGBIgAhKgec9d3E3VzB0cEYAMEZDo5QMN3cAAXAAUwdDcDd3ZgJ0IKBiQAYqROroDuvcFktADEoIzIpGbo4E7KrgDGoC5Ww99ABwREPH1CSAYoTJKIo8IZA2kmq8eEoaMnCkMTtz3mpedBT/dEHxFaL5caj/befsg+G0k5gW5+TQgejzX/9eHPprXZdk/PD59vH/88Yd1WeMwxGmXr+8CMSJ++YtooHQ9br/76u10tRFt8zI/74/zPP/m+/PDw/7+fv7lL5ff/8Pvvn53e3WdY+S+pLqRwyXwdEB3RL8Egsu8fnx/fzqeVPVqu/3lL36x22wTp3EYt9vt9e31xw8fH+7vnx8f6/ms5uiOiIgEDqiqpiZNpcRTCima+xXIGLxVs3WlpoohEMUQWggY8pf7nAP0R+cv9/Dz6fOKhKFfzh0R3NzWdTken5/2Dw8P75d5Jghjnm6u3gDg8fBQyhPiE/Nq1cCl1saU6nxXxttxukGEOj/WchIR4kQYEZtqrW1Z17m21dEdpUkR1xjTZtzd3X613V6FOCCTmTZ1ihGZfys06Sve6/HpWvo/Lq9Hu1y4g6ohEHMah2G3vcpDxsBqWkud5+V0PpVS3J0uOQSag7sXK2spL8MV3RHBHfrfL39ePoXPf/Vno7SfYq11Weavvn5uTS6ny5w3m2GzQXBrTUSAyEVdTMwaIg0Dp0QpEUV0dzARExE3c6K+5BNiz9qZOSC6OyEIYS1uZhQCBEcA0+Ym/pL6uINhxwaBwF3FGd3MXKSs6+nEKY8hcghq5gghRzQ263stUgrjJg9XVxRCvxYm2m63opffoKqttQ7AdGikhxehr84h9NAhxJBz7miNqLTWtHW0ppa1uHsPQZg5MBMzxZBiZGZ8CYdbreu81lJqKe4eY+xZbEoppcgc6OVQ1XVdT+fz8XhYS1E3jmG72Q7DkHLe7XYc+PK8zFREpII7mMFl3omZmroROCgiqIlfdj9XMbdLqKzi7kQYAVxNzNTNehyMRAEDmomoqGDrzy6Co7qqaVNBRA5ACAagJiKt1labcIjDOMQYkYKZrkttzdwhBAwpILmZqFhzA+yZKQAAEuVpouu3RJEoMDETAoK5NClSSl1WFSFHDjENI4Wg+PW81Kf989Pz/vHpeSkNQY7PT0vVJm6Ou+31199+8+b2bjsNrSxsq63HZTmurWJKMQ4xjQwc0UzLOs8ONo3pasp3b3eG4f3jYT6uy6KSdZO2DaJxY2a+oKE9ARaE5laiNbxAWUAIATEyoiE4EFpgG4cIBOVY6iJaIEzb26++/vr6V1/tvo2Y5uNRVsmydUOCWtvBlpN5pY2HTByZIFoCFwRXKdYqOJoCO6IjVrXDeVmW2d2IMAChuFpRLSqNXAkAHMQgRk/5KgRmKm51baWJOVhIwzhNw7TLm6s4TK9Bv7u3Zq25WcfoEQHNEJBDzDHnmAITAzgRITGgm5mJqRgGREcXrfNqR2c+cM5xGpGjqosgY4DoHXZ0dQQPFDAYqCE6MGjAITNs4kBpEzQGHCJ/3Mtp1Uuggp9WsD6l+3TrWNlllftsTQ6X6B0EwcgdDRkZHDrMQohI3kMFAgCCC5iCCA7uCEDmSIB+CUegf8P740AlcAfs6ItCUGBREiM1bEIi4AroTj1sR4yKRIAQAADJyVVfFuz+0c+/x1k/21wIIBBEwtjXWBU3ca0ohcIAbhiyY8cJfn582k7/fz8+nQ29PAU3FWm11jovx6en+x/eP93fn54fAfHqzV3ebENgxfAzMCYw31xdv3vz5qu3N9PV1KScFkYyAH96nveHY2vvazVRKev6zbd319fbGGMIAYkQXyIqfzkVA3Nbz+vH9w/3H+5LWW5vbzbDNKQ85CHlnIechhxy4iFhZHqO83mWdVHxl3yZAE1N1qXWcmLGyDxwmgJrgKZeahEQitGNlirrUtS+jKt/DqR99o1P5Tx/KYJc0v7TfPp4//7Dw4+Pj+9rWQLGKW+sLQRw3N+r7nM+c1zaurhVkcaYrO1lfajzDhFqfTZZHSnECaI00VIPy3I+HE7HUymV1Q0IYkzTsLu5evvVV99utzfEyRGqFCRelznljH83GPP/EYlxMECMIeQ87jZXu+311fYqjwMmVtNSyul0Hvb70+m0rqtKc5ee/bv7ZdZe8AC+TDsHd3O//C5CAAd7iaq+vJ+X29uDmHVZe6h0GayIlCLljCpu1nFcdAdzNXckHoaYM3HAy5qB/a0uqdbLES7gc2BEN0NyN3UEBwfqKySAk7p1LOQSyXKvrQCBIyMwuDq6urS2rm1d81YuwAlhiJHdpbXaVEUMnAM5/nww9bHTy0b6cnQMxt2JiJljjCFG7vvpSzjSy0y1VunFIxFR6Rfd0XUKgZkohB7BvFamaq11La1WfYFVUkrDMOScYwyI6H6Bgmqt8zwv81xKERGOIec8TdM4jqEjMZ/B6ITAl/QMscMdTAagYO4m0nq9y8EQwM1VzNUByNxVDIBCcCZEdEQ3V7OeyLte8PHLTsDMEBAcRaTWVmpBdE+ZOQBwa1prrbU1EUDujx1JVa1JaU0RAnNghk5CMIMO63yOtcWQME3II2JCQ0cFF3AngIAODNaJCYgBjAnGFHPgGDwHZNLn/fG01DKX41yR0u7m9u7N7dfffv327m4IYTkd2mm/HsLB1GtxF3UNCMQxIhopWQOwxGGMuEnc1LHMvpy5AiPCcjQwA1TqsBwSA5ERSNA1LDPvRrRXMMYJlUmcAR0JhYlDcAPQJtpgyptteHOdvhn85vzY6nw67p+W9VjtJFCUbWlqM0AAsOa2qqJjRCRiCMDuqIpibuAGZqhqBkiOrGJNdK0Grg4VQBAsBehDGADMVVXB3bASSM8DHVCVlgWrCJXTJp5F9HVVUjVVhwvbARnpZS7nlBIHol7KMTNAN0fTTpJgBnTT2tqyqFRwCDlN19chDQ7gYGgOjgSEjr28wj3RCUYOIGbklSBFTMiE2Sggt2pFoJWmKvaSd/f/wAxMzMReUWTALxbkAK4Aiq7oCniBb93JpTkaxg4oOV5gC+i12E68cEBTNu/YtSGamnVkEdzJAYGw0zIsAAaHWBTPq62iolSbl+KmRuCRIQWKhAQWGXOMkYHIAB0MnS7VBkIiACagz0pNLxOGCANjZnRGAFAHMxPXpmXVsLIKpS3GCRg7Y6Unz/hZPaMvJdYLYr0KdokJelRo8Bqq/FZ2Dq9nhIgODuqmsq7r6Xh6eHz48cfH9x/3Hx+k1mGMV7e3N7c3m9vbsNnY6ojkn23xOadvvvl6nZ+ubzYxc1PgCDHyOIzTuByO5XxafvzxN/vD/fv3d3/0R7/6vV/94ptvvsnXIwVCdFN1M8C+X6E7SLN1LvvH/YefPp5Oh9PhtN1chRDfvXs3TkMc85auOYe8m6br7cPH+/v39/vHh/moChgoUCSGQNasrtKqlObMedpkHsKUW8DTLNLMa1lqe5iXx/uP0trP788Xt+pzPOYCWpp1RKC11swNAY+Hw/uPP318+HFZ9qYlUiBf11AYzdoj2NF4BV/cZvfqroCxrtXaoS4DoqoeASogURsXfWqK5+V0Ou8fH09Px9Y0qoaUhjRNV5u7u5uv3779dru9IYoGqF6HnKWu07Tp6OjPHnWPM34WtfSiwMvAoh5vhBA24/bN3dtffvPL66vbwIFCgIAGrmZlXY/H09Pz08P9/X6/X9ezmgBSx3GRsM/p1yKvv47Mz5A/7oP0JZa5zFPEz8AiV00xfAqXHUDBlTBRJERXYWLCDlU6EcWcUspECObYMwvoRRgKMYYQQ4whxpTzMAwhRABTbe4qHaw1Q0BCBEYzra1Jq65K4BgIoaN81vHdjlkSAJi7iIm4qqsKeAg8pITu0KRWkXVt0taVTseTvizKqno6nZ6entxV1V7xMCJKKXU+ykuVhykwIJh7E1nmuYm8hjsd56IQppxD4MAxXHB16re0lNJaK6XUWrXPMoeUUt5uO/wSQuicpNfwqJSyLMu6rh0cGofxKqdxM6VhSDESkYio6OvcJ8IhxWnMeIGo1VRVTFVUpEdNItVcHZywjwNz6AWh/tzVDJg4peTOFV3E+sImTREByfoNQWREV/NW67LO83wGsJJS4IgUTaEWac3UIAIQBUQUabWura6iFhgASE3QO0j+Svn6LLxUcHEzE61lbaLVrZmt2mbUyuAMSEhmTY5HRUQOBlBV5nVZ16VJVakuFeqaN+mbt3e/+MW3777+6mp3Ra5gbdpspmkzpFxbVQR3lbYieIoZc5QxAliMDKBtnVtrWA6jlm1KKZbcnkBPqiZgQE4EIUAgDy4glZYzjQmkvswXdyxAhZjQe3ofANTE6ooRtu9u/uibmz/ahm+Oj/Wv/uzPPr7/sZRzk0W9UoA4ZB4ipBC3rKWVZRUVpkCWcszTuGVOtUJpvlQTlWoVwa6ur3JKz8/74+k4n1czzQOl5ClxTrzbjsMwIg610TzP0jRQGQa+utpSDHPR01EPp+e1NsFW/zD9679aYQcAL2sEOCEyERH2WTbkNKQYmfpgeqnJ9nXIBbysFdzMRGqra9VW3STFUFbJw8gxYCAEJ3JCQCZSN3QiIKKQEiOiVwEDa2pqQJzS3S0ay6lRdfJzKyD2Un2+5Gtmqj0V6Zss/mzVDXghfxmCwmXdYXRWVwDovFDECwMRHHq5nIgQ0ICJIoURAdEbAJi3DrwAIAD1copBcIqifG5+rn4qtAqKcxPqJCZQDWiRILIzQGRIQVPAMXkICECAzJ1nQ04O5MBEnYX8+UFERBwII/fJbWrNrLg0q2sToyo8GacRKALSFySfl5UdXqtNf+/hn/3gyz19RfJNVaWtdZ6X4/748f75/fv7H344PD6V8zyMeft2d/f25vbtXb6+tjDOrfwstOTAV9fbm7urcYoUkA04UOcepphTPJMfT+dl/3SuZdZWl7nUol9/+/XV9W4cEiJTIHd7DQ7KWubTupzW+TSfDudW21//5V8DOAV+y29ioDhmyhzGFDfjsJliHtOQn+7Tcjjaunqrak1V1RzEyNQRAIwYiCEgReeIwEWslvVwXI+nXjP/+4++y4pKr3eKyLzM67os69q0gcH+8PTh/sPz/kHtjCBOFHCtteSgkY9OZ/TVZFVZ3AXRAaUKCJWGjKQAZ8QGhKAJ/NSUSm21rstS1tWBElNKQ56G3W57e3V1d3V1u9neECZ3FytSCyN/UYv9HZfw6dOfjxB3QAohbTabd2+/fvfmqze3tymkeV7acjbqiwO4+5DC9W4DIgTw5LouywXzIEQAosv8u1SUXvAXs88j+F5A/BTEvOKu/Sa/1jV+NoadkELkGBD7NGMAcFUn7FscqDuo4+VtmTl39klKnAISxdh3erykSQhA0CnmnfPSl51+lsR9f++sOYd+bx2IkJkJkMDBzUS0tdaaIXRskQBAzUW0lNaqM5a1+EtmZmbrui7L0n/Pa8gSQwgvDN9Xfp+qmlsTqR19EXm9IRSYiUMMMaVwKSMiAKipSv9LOwBjZogYQggUckrTOL5GS72e1StZtdZaSm2tE5VyzsMw5HEYN1NIiRBFVdVExD5xhtx7Kn7B3cwvJTLtBBXRpiqAgAQ9uiYCQ2jNEYG48/gNCXuwixephpmZqIIbknfQCJHcq6qLNFPtL3NXNURA0xfej7mbY8dkDRGBGBgwBEIClYtsQzuG9RkEK02eHx4P779vENbm81xKq6biWk1WBhtiSIFTiA5QpSk6hYBECl5aPa+ltKbSQCq2EqwNAcccUgocGRQoxJiHPEx5mGIrplX1QhGNIRFjSqGrSkqty3xyqdnXEGqOGBhJ1QVI1VwB7TWIiWCkLcgy2In8Bb0AU6tVigqhKSIAx1wUnCNvt+mru92vxnD3/HH96fuf/vzP/5PHxx85KqKqKTohpbzZXL25GqYUERgVXEydnBiHIecYB2Y3qOd1ba04KAdMaSCHPRzaKvNpNVPEHCOlmPIQ0hBTCuAkCq212gwiiYUqUGq7f1je/zT/9P58Ws7OZRe+KqX8bI0ihEuEEXgY0jCklAIh1KoqcllDiBADErlbq1VqbbWWUte1SWugmiLV6ttNGzZjzJGIgBEDYae2GoADEjIzmCWCBMYm7iZAlOJ1jMByf9T9Sc5L+6JSr9bVEJ07+HcRWgP5heGI4OSGSAGVAJ3RELq2pS+igAjmgOTueCmXcYjjNNwQBqlLqad1Le5IFBjZgADZnQ2jYy5q9/vTcTWjLJCUolLQHFS0lUXralID2BCZwUyWFP12GzYj5UiRo6Mh9LkNDBBYQ0Am+hTHYAdIe7GbicgR1FWlgGMtc5n3QOfBBf0K8gY5IyI4vaznHUf5DMnqV/0a17xm2PQSJXa6IyJdvofoDuAirS7zfNgfHu73H97vf/jxeH+/HI8mMuV483b7za/evf3Ft5u3byFv54qOPwctCDEOnEamGDggGhAFpsZAgXCIuBtpf4gPT+fzUv7yL3/94f7p4f7pD/7w9/74H/zRN19/NW3GEMhMAIwYpch8ms/HWaqiE2E4n5Y/+7M/O54PHAkIrm+vhjEjc47T3ZinzWZ3dXt7d/fhh58ePrzff3yY9/tWxYp6tWA4xMw5QAiNvYECInPITMk8AgRTEsXfsfHjz/+J5CZd06Gi67ocjsfj6XieT8u61ibzctwfPpZ6BJyZDZmyFbVC7EOq4FJqabXYS71AHd0UsRJjYGUSQFEz4Aq2iqG6GSiiBs4hbkLcxjBOw3acdsOwSWlMaWTK7obVvdn5eE4p6SfKGb7uhfDz4tEXV+fu5sYhjuP49u27P/j9P7za7XSp79//+MP33x+OB3V3QiYehuHqajeO4931LsXYJUjLvIi1cIG48VLE/Sxyduicm17SvahD+i/u+ho16/fkczzmEjJ8dsrEHHIKRDFGNGRm7aqiF9KYAXl4ye4BYgybzWYcx5AzMnWZj6m6mnnfX9UBYoocmJAd3KyZcMyJ3EACmnX1o6qaCpi7OXMA6xFbx2O0llWJKAZjVhF3l9ZMmouCqgPZl/XKvt2G0OtF8RUX6VHF5wKltZbaahMx9w4svQqIOp+xV5oAwD7jTta1iOilFMY8DENOKcbIyDGEwCGEixiqljov8/l0Xpa5toYAMaVxHMdxjDESM3UmrCoyX4pZ0l4DZVNb1/l8PkVmcFdtrbZaizRxM1EVrYgQA4fIMXSCuEpzBzPFwIwMANZVX67eR4RIE5HampkiWggBAPTC6UU158jbtCECInQgNxIEMDIV0yaq5kZIKSXqELxaoCwKdS6lNvV+i1XFXsPrdZ7/8s///Pt/+c9P6nOVc6mliYq7OrknojGncUibaYgpAhPHkMZMgQy8iqy1SGuuCqYk1dd5PT2fD0/n4zUxE5AaAMWQxzxtY12WuVYRBDfgEJQv3HArpVQp7mUk3SWJ0SM2BDUFc3fsO5UxAbMzQSBgV4h2PcELVQncvIic11IXNAmIONbooJshX+++ebP5xdXm3Xrw/9e/+NPf/M1f7efvOS5Xb6chx1ZoOcnx+dzEhnEkG7bTbthp9dUUyDhQiCHlmBFtLbXUU1lLHlMOKTI2BG+iRbSou2t1hjCNaZqIyFWrmZsSIsWY8jQi+fPxfP/x8Jd/8fTrv9l///2x1HVz7X/47XOrL3y4joP2GgsCM4UUxjGNQ+LAptqaSG0OxjHkFGNKzFGlSaut6TyX87mcliZVASFHa7q4A6eQYiBCwl6kohdNC7iDoSM6IwT0AEaXVClsdwOS7IZ5IAMRqQIE7hfRg3cqjPcqNBGSdiTms+U2oBuiA3JP2ah/gsCXZdnBHQk/4y2+8hgdEcdpfPftLwLnw/Pj4RmXNrfmDMEhIUY1Ks2rQDE5LPp4sFU5TRPGCSkRJcZoqoizw6ywqjY1RxNtmtxwdUWfAEegyMhInd6L5ujYVRNfHIRdM9lVAcCk7iIRwV2t+Cq1lNNB1UmMs3KIPcbEFybuKzfms5vUP3lNM/3CTHilcKhp14mIWGum0pZ1mY+n56fD/cfDxw+n9x/X49FUxzHf3l199e27d999ffPubdxdVUirCvz2gYABMBIyISE6MyEyUIJAkBjHhCkSIuHT+fHp9Pz4DKa1FXMr6/rVu7dXV9sQGMBlLfunw08/vP/px/fH/XFdaqu6LmWpsznc3N66w93bu83VhmNfXsHFDGGcpruvvkJwra2uy/l8ElFyCBxDDiEHZTYAcQGAwASJUuZpDDdj3A2Rf7vc9sWWA25qLuu6HA6HeZk7O2S/f94fD+fzeV3XZiJtqXI0WIgqgAlQET5XQTRLwlBFmqqagQOCkzuZAYIROKJ22Y111QlVMVQjRBpyBtzl8U2MO+Ycw8Ah9vIDIHS+5vHw+PT0cf/8OIzRfucz+nsOdwAg4mEYb2/f3N2+mcbJmnz88P79jz/8+OOPx9Ox45l9O1zP11fXV9fXN4Fwt926eyAutfSS6IW/6a/88995Y/EFge3EWSBi/8IkAH4ewQAgAAeOQ46BvUZraqoirZXiIlybi6QNkrsxexexckgp5SHHPDhBZ59YB5+tp+Li4ByYnQnJzRtYCMFTUgCX5iIu4uqmzVpDBzfrDLeLAsEvb4juBIjurQmYioq7IQETITHTJwUcInY9c2fUXgKZEC7Uohf45EJkkSYqak6XhOeCu4QQKHDnlNnlp2prTZp0okwPCnuE1Am5KQRGpgtB0GqtpZZlWZZ5WZaltQbuHGOnv+RhiCFcpJvunTS7lnI4HAPjK3JpZud5PRxPsee+UqTW1qq2TpYCYkwxhEAxEPMFpBFVEXejwAyA7tqaLKuYGlEARHc1MHxJ2cy9SgPRVgWAQo/EIjFRlyw5spsLiaOYaYd1ici889U761jUXKT0SlkHivyz4LK19uHj/d/87W9OIufalia1u3Y4MYbMcRryOOZtaXkcYgppiBkxJHYEMRMDB+qM4EDkIufD4fD0uLu+DTGmkMyNY0zTOG03a52LrB3ZMhEVISIO7OD9ibcGY/btGLchRoro2FXlfUknciRgdkJgBgZDi5tt5lcyLEBTWgrMJ5HqIWR3GDJdTdt3d9+93f1ySlen9fnjh/cfH7/n6TTuPF9rHpBXNMRSydRrXWtJ6CFQUA8IjkCdpq3aAJxImTRGHweOgc2klbWV1U3HnN0dzFsV02SGImag5ijC7qwKSzGVdjyefvrx+fvvn374zfNP35/UxDyUWVx/N6JMiNzlzQAiTZqKqiOGmFJOw3bKOTMFbRWkamu1CKCqShEwdzEnlCFJB+gJgKCDH9TrJX3EG3aVIqADmaO5mpN14hkxOPlFC4eOXwQxppfaOf3uEknALnegRHyRMHZ1LRIi9GdsyBE7QdnNzC4gBAKCbbab3/uTP8l5+7d/9VelCc4nEVeICBkxr+KH4/x0mJ+P69zceIjTdso3abxSSsZRKQZTTps0rq6tlWWZjy41TBOQzrZYaQ1QFKZEmYEJwcFMzBTsM5DkNbpkZuZerCdmRVQLxOiAor7Mp+W0wFzDVNNY4rgNaeCYIAQkeolj+uLv2EGay10juPARuorG3dXVTaWt63o6LofT+Xgs51Nd5jrPdZnX+bgeT/V80nlB0xjC7vrqu1/98ps/+NXt198M1zcaR2kIoOCObvg5KeaFqmOI6OiGF+kaMbBRjoH7ejpO03Y7bQ/H8+l8+uu//qvn56cffvzhT/7kj3/5y1+8eXOHCM9Pjz99/+Nf/cVff/zx4/m4rMtSam2q7mGe21/95fdPT6er691mN41TpkCqAoCReZOHq+32zZvbctyvh8czNrUWA8Wc8mbkSE2tiRRTd4sIhBwmvsXRyL652+X4OY8ELwTjvtuam1opbVnn0+n4tH84nA6n83w8Hp+fD6fTcV0WUeVAwOJekbQLWhVhKS7SjmeJVAdqY7BOQXNEQXJgR0YAQO1IqCOriXTKpweVwDxc7bY7fDNtvo1x507SrJS21lVMWluWZdk/3f/0/m9/+vHXD08/Xt9u7TNyH3ymTPpyKl1GY/8WEcWUr69uvvvuu6vN7nQ4PN3f/81f/MXTw31tzcA5BiRw12U5r8v8tH+4urq+urrZXt1st5vz9fV8Ps/LaVnntS4iCi8yAn9VeL0I4DoX1O1C4XQzYhryiAi1VlWB3zX3ey7GzGnIIcbmVXVtReq61vNZSxXEPI2AnHpBixh7RaqncC/2QwAXMkST1mN5cOtUcHR0BCaGwDBkIrQK4uZiqk1qtdoI6YXzYe4GJNAau4YQc84hRkSstZq2TvQIKVIMwCF+Jksm5s1m08RCZCbuE7m2JipSW62lVamtvgRYIcY4xBRijDEG5u6q1CN4cxORUksptZbSTV+YOcWYU+oQT4wxxEDEhBiYwVxqXct6Op3O53leFhUJMeRhGIZhyHkYBmY2dxVBpl5DLK3O5/l4Oh0OB0J/5feI2um8Pj4fGd2stbaqNFBxU3BPKWw2U4wxBmR2AG2tnk/zumqrRBDRCRmb1lLW07yoWrggUzHkGCE6dGm/N9FSyroUorCZtsjBRLmr5TESRERzEHNTFwCLMTDxMsu8zPM6q7YQgqk3qXZxoTEEBdRXyrU6nKseq6yiTd30EpUQceQQY+I8YMpGSTEEZCfu2o2u7wUgp8jBm1NYVdQOh1O8f7y+uRtSos2GidPAk46tbcVWtYVM56W6qYtAiCnFwMHc3RsycKBpE6+nNA0DE0tXbnVMj+CyYAAQOYGCtLy9flXAuZN5rpJP57kVGDPlGNHHzXj3i2/+6G76Wk9JawMuaae7r9J04zSJsSLRyCGN27rgss6Hk8xnC+Ng3MuopCDzfGhhQWYE2W7SBtKwmUT9+el4Oj4v64nZbr66MYOHp4fzcT6MABCHAWMgdxOhJjovsjwe5qUs5/PDx9Phua6zmxBhjDhEHPATaeEis3DzXu4Bc6u6ellrVXdiTsMwbYZxMw7TmFJgAGucyXLgHDJRakbiy1qlmTUFNQCnXv691G77c7wEMeaEZiCOYuhGIAqi4m1Z9LxoqVrlImQgJ39RvINbX3Ff3rIvel+sZ8EwOJB7VGPC7u5iAQ1Q+2oN5mAGiC86cfvkReLiJqoqZs2gGRYPs4R+eqK4rHY41f2xPh1WAR63MYSJ0zYOG6bkHJU4uAeOkpK2isTN3LXFwKD1vNal1SImmQAIMw8XJswldaMvs1Iioq5yDyEG5hCM2CAwB0JCBzevpdV11aZtXjAeOQ2cR0455oFCwM6Uu/Ca+mbVRQIOAHapLKs0kVakVim1zvNyPMz743w4rKdTXea6rFLWVletBVQDQM55utq9+fbrr37x7Zuvv55ubnjYFGBQg0/mOj/bEtF7hQrJ0aBj7RQ6oI0ERCEGzHEYUxpTQJTTMt9//LDW0kQOh9N3337LxE+PDx/ff/jw/n7/fKhrr9I3dQgh1GIffnp8fDjkHKbNsL3apByRPIQw5nRztUsEZBLYcoKciTRMKQ0pYUgNfJFaBXolTMkCYgiQEuxG3A7EP0fJPrs0AxFdluXp+enp+eFp/7g/Ph9P59PpfDyel2Upy+rgaQgxY0hARGCgZtaogLopeAtQdll5x2FgQAdyB1Lo+6r3iEkdwEAUioIUMGDzicJu3FwDXqc4Eic3gnBJg5flvC6np6f7x48/vf/w64eH9/P5SXR1sL/rWj5bFPqMuIQ3IcTr6+vb29vddofm9x8+vP/N9x/ff5jnc8qRU6LAyOQOKtqkVqlN1ByGcZo22+00ReYQKcQQSqi1dZrdRUjfa0u9xGk9qEYwgM7SiDGnNA2jqh1sr383jERAjMQUmUJjNQR1FRETsVaaARODKiFyt7syDx1LeClMXU7lAnhc5APdaqFDLP5i+GQX/o+5iUkzaSpNVQndLraa4p1Xa/bi+GCoZuCtVdEGpuDOgQMhcIwpvtaSkTCmGGNEAvWeh/fakUhr+snHDQJzTCmmFHLmEC/uBujm/sLvbS+meGJmhMgxphhf6kcpxcghEFFfDUop0mpZ17KWeZlLre4WYhinccjTOI4ppRgigLfLjRUxqa2uZTmf52VZW2ufjy8ECOgRHdGIHNkNAZncwMxTDCmFlJgZuqOhqi5LOZ1KK4QQWhUOZF6b1gvVRoGYkGJMIXTOk1tr6t6g26qJSayICGiBYkqJORIlIkNcEQHJmT1FJvKyrvN5LlLcxUx66IwIhJdiJ+HnmRgqkXHo1As2ADIiDiHmOKQ8DGmMMYcUQ4xddXyp2BNxV3EYkDkimUJp1QGWdd4/PRDhtmxjSu7QWnEXRk9MiagCiJm2qhxyShzCMLg5B1JkopjjMI27bUrJHc3R1PRlNgF2hpahqWsJmyui8DpdzKJqFhVVdOPAeTtd3V199fb626t0uz8saDpMNjls7kLcuHozB2LAAWJiIV2Xs83zfs+UgKeIDKqFoUpaU4whBHdgMqAO7/nFAAgsRry6HsFxKadSvRarK07DSMSlaKlaq5ci53M5n8tybsvZpSI655A4YOTMGD/fZNzADFQvSjVwVxVHUDUnDDHEIXUTSKYAwO5uHh0zBY8D5hHyqFzNm5u7IwMyIDmAqBMYEYCRmSG6S9c1BXd3wP5iNVkWabNXofMq+9NaajOzy6rhl73Wra8qfbEj6M49/rMghrIAV4kKhOCRPAdNJMGtqzcR3E0cXwQ73hVUBG5m9bB/+Ms//Q+RN4/Pp6f96bjq8+ynczmc2+EktaEhiWHBHEKmYROGDcVEIRIHIGIic4+UKtjcKjJvd1fo5m7rcjpXkKIzaR2YMAWmnJgwuBlTY2L6jJWA3XI4BHqpawcmDAwUcl9ImAjcWjtJW+fTaX2YqygQpSlvdpvd9TBtYoqA4NJEW2vyevv6r+nymbquy3k+Hw7reW7rIuuqtWopUorWKlW6wsZMEDQyx3HY3d68/e7br3/13e03X29ubuO4sRBBe1BJl5z2izgGEfiiQ0UyYrjYjV2EnwSEwRggEg+RpyFudunpcLp/2p9Pxz/903/5t7/+4euv3g1pWOdVanXTmFKT5tVExZwCkwme57VJU60ph5ub3dXVbrMbhzF7qdgqlnNEXc/PkdvtLsHEQ57AcZnrsrbzKuJGMTKR9oxcq1lxO6Mv4F9u/N3tDRy8pw4yL+ePDx8/fvxpf3w+ng/neVmW0qq0WltrAIZsHGNOiYnKWmoRExB1qYYmiTRcAV6FEFFduzMfOtoLtckdRNVUa/NSaC7oNObpltMtwkbacDwXVQ0h5pi22w0hnQ7P+/3jjz/8zdPjh3l+KuvJbCGSzwN/9+5L99kXXtRtn0nFIef87t27N3d3CPb8/Pj93/z6/sMHUxnGMY8DpWjkXZZD0YMPZlJF96cjP9xflZpzjsxXu91ms2kqpZTTfJ6XZV1Wae2iU6ZLXO7uZkBIIcRp2tze3m6nKRLPy3lZzuvyCh3Zz8AjRiKgTuvvgYOjIXn3fAZEpi4/ynEcKQZTZSRAUFMUAQK9eJCQcy9DY+LQqyum2kzMNACCq7YqZZV1kWW1tVhtlyXKwUSlSrfnQrrUtbUVWVmRHVDN1FVVCL3rnTGEGD759/TMQ1Ra01LLsqy1FRFxc0QIRDHEMYy90sQpEoee9KOhqzl6E1nbWkqp6+qqMYQQ4rQZY4wvUQsSEnFfc3q256XU03F/Pp/XdXG3EMIw5SFf55xjSkyx+wQaAhoQs6qu63I6n47nw1oWMycK2+3u6uo68GWzjEx32wxvRsLuMTF5TxbFmzoAxsQcuhTtYqvUqsznZT6LCnR0KSfmRDlGRDeHgBCZcgwxBEA000AciAlAe4GtrioNgFLGnDhwQkrsQhw4cIwUE6YICFrW0zqfgR0RXM3dECAwEjOYqzqHT6klEuVxGre7KBaa1FrUlYhTTNM0DcOU4sAhBeaYwjAwsZlJV82EEELHvs05xTQOTVqtFcAfn+6X9TxNY0qJiN20lrUus9TaOWKuVnR1pDzklOI4Dg7RtDhCNarOFgYeNsgBgcTMHP3TTuIICiamNeQd0idE2ZwBUghAjinG7TR889Wbb968uxpuk21QS2C/uuU2hbBxYyszmngMjmTuZYF6smNY4eEpOYeNJ0A7nJ4J9e56sxlzCOQGTdUhtArmgXEYh92Y9w6aMzKnt29u5rmoCtp4tfmaIy3zw7yc17W1poSeKDSggBhJh+x0HYgwp0D0ab6YgylIA5HOUUYEENXInHPkFNOYQwwO1ko1cYAgHmr1WkAruQbnRHHkqIAKACFETsmZm6GJEmowjsndHdG1CgBwSESMRBijxVC8Pp/a03lVm5cqh/OyrNVNCVzN1Dqtt1d+/FKcupxpB/4+rWVhtSAeVh3m5vN8Iqi7De5Gv0owEHJHoNw7TYQQDBHQCcwBHXxdTz/98Lfq46n4ucjasFg8N90v7fEsVTENI4bo6BAzxIQxcE5pSMQBEKWz/l3JG4MBU4oJwMu6OpJiWi2Wqmqag6TAmxxTBEImZPptfgC+Ou4RERABByRmB2QCBvVW6/lU5+NS13J8Ph/PazWjRGnM0y4PU0gJEUyaSO2unZcghhAAzFxa60HMfDzWZTFpYMoA4Oba/MUXFN0jYwxpGNJmO13fXd2+u73+6na83oZpwBgBGQwuFRa7hNxflJOsg0AXJ2b3iyjZnVDZUcEMySFwTjHnmMaUcgLEh6f9/nB6nNdyXiNHE88xXl9vc06trYVATFThkh22tq5rKXNcqHOxAlMAoAZzW23eR1Swgq7b3SbHuNlcqcHz01kOZ/TFWzOgSybuhqbdQECluP9d6AU6QBciPT0/3j98PJ72L0xeBYXur2+mYtUgpjylYNpUm0vTVr2uwmAxEwGGwCG6C6iBm6u6ul2cihDUwJVUkyi1FgyvOF877wxSVSri4BYAAaBr4R8eHh4+vr//+MP59KxaTFezaiJ/tzjp0+O6rA5miBxTHKfperfLKZ0Oz/cfPjw+Ps7nc845DjnkjIG9uy68CGBUUbTW1o7Ho4lN0zjmIaSUYtwMo4ybIQ+nfD7HuYt7saMj7q2JqrpBDHG72ey2u6vtLjG3UkEuedbfc9I9trHLbmToRm7oTkgcQ8w55Mwpcgodp0R3QxA3dAVDM+vO9xdPlxd6fGeUaG0q4g5a1/V8rPNZ12KlQBMTMbk0/DDrYIwAokcjAATXViv1hJyB+FK1AjNyRUAU009SXjNbluV4OlaRIrXVTh91Iorcoak85DykzMyGIKbrWqQ2b11x17Uw53VdWqmB+Gq722yIkSIzApiIXdK/SwFC1ETaPM9Pz4/z+WymIYbtdkuIkDNx1zlx9yvuWvra2jqfD8fjeT6tdTbXwDHnvBk34zj2YhYAIMIQfAzuboExx4CIq3oVaOLi9kI4boiQu6g7REZstZTSag0xBEmchpBzSilkjjnGzJyI06WBDBpZjiEQmLQVVxF1M6bMxMyBAoODYy82WF9LuyngMh/Pp31IoUOtqta63Qh1xZzW+oUXEYcQUiY25EBMZkpIKefNZjOOU4yZOCBACJxzoOBm1G9CH1mXN2LkGCGis2uT2hbV2srce630/MlVwazXMsC9tupIbZryNKacHbxVA9civjYUj85jyIkoQBe+0Gf0eVMwQWuUNq+G3ap+PpXzuapi4BAjDzlupmkzblIYqQaTBlDGLV5POW6TAMxalTxEcMSqAME5o6vN65rOaxi2Dn7aL8S2G0dJnaWP5AExM0wcpmmXEm7aspZ2DkTg1ueDCqBR5CGlkOKcoxNoDFajsktdaoq+3WwCZTcwkwar+xf7i6g3sda8NavVShVHaoYUI5qcq13qQs7grMbNWARMxU0YpVY5FViFqzGDNadmUMS4qpozAQZm68bZKqW6e0xAHBCpO0gZYhU7L3UtXqo0qeiayJS9gaO7grn5BUj57ULFZ0c4VxOgFcaH0/rXv36Udnj3Vf7u7RDejENkImTTDh4TXQxVHaALmgKjmpzPx6WVc8HibBwpRs45jCPLEgDTtEEmbY2QlNzQ0xA3mxxCNJXzeZW61nXV1iJASjyk0EQWbSrKaSL1tamtZaCSCa6HmJk7enFRaX66rMuI70spIhABo9GFgQ2Ykox5HIc5BgSBVqiuPpd5lbXaImBO3QfPTU3V1Ho97rK0wGXJNzPTpq0hQoqUc4g5EgUTV0ZSJccQaEhxnNI45mHMm9vNtEsxM0V2RiXUV1fLrshuq7b1NcX3ro039e7MgYTgdOEfqKmZyotJvJE5EG2IuivG1Xbz8HQ4Hs7rXM/zgoZhu4mRU+LW4rygaRPRJl2FoTF1X/eOm5pb84ZuKFWOVgg0BBtz2lzvrq9vbu/eIIXp7pwf9/jx4bA/llK64JPdyb2JLaWspdiXQcyr9BeJHLxpXdb5eNw/H57Pp/2yzrVWUcOukHNQafN5Pi9gso4jM7Z+sagG2kKA3SZe73gcIUbt5DIRLc3NFMCFMSAGYoDgGJES0gC+be2qaC4i5kCchzRsx8mkPT7eP3x8//6H74+HvUlTUzB0oVq1li99L/6uAy8BAUeaps1uu80xtbX89P0P73/4sZbCMYQxc0pGXa98sR7CLjpCijECYGlN5TDP85DSMIxXm+10c7MbNlfjplxdn5a5NnE1DjzkbGbzPNfWwGEaxnd3b4aY6lLOx8Px/nF/2LdSPz0C/wKGdQA1UzNVQQQyDxcbWgFRQszDMGwnHjOkoATq6mC9lAsIDODgCiaqZmray3w9HBJtoq1paVpbq7Wcz6f9U53P3ipod5UmILooBhxczUWd0M0JOxjaHIljCsQhRHLrvOG2SiVwqsuyvAqUWpPn/f7j/X0TQcZ4aUHQOREhdSfdECIFU1mX+XQ+HQ6H+XyWtUuscW3lcD7VVslxO20YMcYQAkurZS3SAywHd221rWVdlmVd1vP5dDwdVXUc87TZSG2tlnVZp2mz2WyHcYxpIA7uUFrdPz4/75+Px5OBTFMepu2QhtgXhcCfxPpuKmuZD6WUwIS7gTiW6lVdnMRUtYpWkZZSynkY83Rzra204/5YvBGAm8yLlRVKDNvN9s2bN7txjCH2aUCEjgCBACkHZLeZcF4Wc0rDOAxTzpEIVJtqFenc+d4cQ6SV8/n5eHiMITAFcKqic6lNu24LCP323a/0VbXecxt3YoqIzBdNWe8RMQxDSrl7zxAZR0w5Bs6qVkopa5VmjkSRkBHIHBzQOF5ITypNa3GzzkxiQiYKIYSgUKS1ZoCdcRy494uL3mwtOicpAgoceQCOqEbUXQvILmuVuQlqgzC8ml+01u7vn+4/POS43Y1TDJS4d9pyB1STtR2bnUPyqzTdvH1LmE6hSlMKXq3tT7MpXF+bagOC1kpdV0S0ahSZYGTcAHjXCeW4y/F2yNfDsDnt9tDkaf/BpM3Lcng6rGthQtBkUgnibrMb85Yot2rH8/xIx/n8mEd9+9V1dyOcl9OPH/5WdH2dL24uqq1KbW1ZJaXi4LEBsgrWWvU0r2IU8sYpNnERM3UCiMwvXHKr1VuTVjEynaunWVKupo5IQw5IkQKDqYqWWt3AHJkVAJsaujI5M6QA3fKvEnY/x4DexKt6bVANBNC63RQimHfRK/EXytDQcGyeiw+npvdHWdbiEYchvrtGBU7YLbPsUoXv1EJHtZ76YhU4zWVpViw5p5CmFGNSyVoHSwwwbCZAku5DDqCmCB4DTWM0QVmhWNM6q0gIiZHQxU1Mm5k5kGFQCC71tMomtnOxIVJ65cP8Fu/CHES1CQQ0JscABOhASIBMrxKGnlKCiUvRtZRzOS+tNO9a9IumRexFoHhRWn/G+3VEj5ED9TsUmKl7L7phAEzMQ47jmMcp5ymPm5ynIY4ZY3Ci7m7ZbcvrsizHw+n5cTluPlfB2KUW+OKYi3BRuhMZqnb6mRp0JS1bb1CRUsgxROaEdF+fZK3WvK7rMs+qQVoDcAq0yfHtV29zSuu6ns+nw7FzCOq6zpEcWzAmBjUtHHCzyWmYdrdvb96+vb694xDjrsTtNY9Tvn/aPz2t57O12s24Sm1raZfq5t9xqOqyLsfz8XA6nM7HdTmXstZWVR0dwRgcpbZ1Xp0UXVoJQ4bQYUD3SDom3I44DkjUrItq1EVAxUXVAVnJOWBg4kyUOQ6hZZdRWlYM7kTMOeUYg1qb58Pj44f7h5/2h4eyLDEE7s6dqirYqRr/iocDMIftdruZJlM9n05PHx/2z8/mFlLkGDDwpSnOq3nky88SkTuoqZrUJiri5hmohBSd4pjjMA55MAQCDCGkFN186TJU98Rhl0cr9Xw6nR4ejw8P8zIr+W/PkU9jDEHB1ZUUvBZbFpsXW1cvDTpdGsDAxdXF4eIzSQwOiI4XNzkm+8SGEJdWpZRWVlmrrlWW0tZlPZ3n474tC7qiOwJRCBQCBiZiVwU1F3NGuHTPuJDROBClEFJ0t9aKXqTUgEhflvS881gMPGK89IDsDNzAve5sZktZlvn88Pjw9PT4/HS/zmdv2ok4a63Px2NtEjieN1sVOZ9OOSV3X+altYZEXcTcWlvWZV2WdVnXdVnWBQCmaRynzdS76aU8TdN2u9vurja7qzQMRFhKPZwOy7o6WIxxmqZpM+WUiaI7fu7fo2ZzLYdlLaUEImditrVYM3ckMW1tFalqYmabcQNOpVSpjcgDO2JTtdaqqxVEF5lyCoiFOKXo4xhSQIZLWxywFMmGaCaOYZimlEYKbG5qVWQVqQAwjJtpswsxN6m11VJmsKQQVPC8lKfDaZXGRByQyY/H4ytJGQB6sxHiECPji19NtwRk5pQCEiIpEhA5E6QUu8GxqJQqZkDae9m8lJmIKQR0EBUT7ZwtZ3a+dPyJMYYovcdVznkYx3GakEC1qokrmBMAAwYAduCXrYT6CPfu04MMQEDxwvgFMPNWpVVJwZgxBo6Be66EGIEpDjFvhl24gpTfXH3DnoZ50dBCpoYy5WUcT2Pez+tZ2gqkhJUQI3tkJgiBxpTGKU+bdJV4gzZGGJNnpXo97aSdlnKcTa01UEkpp0A9l2ZHDinnrSYEDOuiKR2GwdJ2SnGIKZ5nrv64vcoc6PMpY+5mLmqlCjErmhGsTY+n+vh0Egibmw1FXNciUlAaAiJnJEAXd3O5UGYt4lIxrB5P1RzGoUOR3d3AzF/6NEkHPVHMACxHuNpw03gusC6g2gJBDgAAoffqcWDABiQOjuj0slgiMn+6EAAINNyBBamDYIO09SaL8LnEtXEzTqgIZhdKqxMBUgBnd2oKq9Hc8FS0GnBMYdwM29sIsXoptm4hVIOYMyAphdZqK0Vak1pcJUdGphLDTACmpuLMJrC2Vl+8AVqpUisSA8dV9LjacZUhIyYkpt6b73MlrwOqeamVzEEDeuRLJbs7XjgjpZRiGpCjExmiE3CkPEQDShHM0JGQyNRqrdI6c81fRSlgFykrkF0sHwjUhHrjG2aGroYGQG/SuFHADCGlYROHDYcExADopq3W9Xw6PD48vv/p/vvfPN8MUl/dIV+Zoi8fSL2c5xd7IgQzJ730AVZhVwBgZBwT6kSiWiqKLVrKPL//oVFCZjDwYUhfffXuH//j/8x2u/n48eP33/+m/NW8LGfVJusq87kmngLlQBxwuprGaXt99/bNu29u3rxJ08QhhC3m65vt7d31m6cP3//w+P7D4flxLWtb67oupVj7rRLMhbyMvVFPPRyP+/3+NJ+WsohWtSbStIkpgBI4tRep5Pmk0kgy5hgCUyIYBt9NMA0SWVurqrJWqQ1Uozlqr4dgZMgQNyFMxCNyEiU1bg2RaTNuUk4hUW3L4+PD0+P909PHuZziRDGPBAxOlxLqwu6/y/LmsytDvOjVAMDBQ+DdbpdzPp9OTw8Pp+OxtZaGFGJyInsZRz9jqOBLy0ZAAvYeI4BZWZaHtS3Dcbq+2tzspuvdME05JSZ2cyTEwIiuouV03n98fHr/4eMPPx6e92utDd1ydMYXOszPH4kTGqGBq9Z2Opb9c9kf6jxbFWRemWGIuIwhoBIShxgCxhgRuqEaI8fAHtiUXVRrq03KvK7n83o61fMsp7XOS5mXti5Si4swvpiJB8YLAz+6mKtcuv9oD0qNkDjGOAxhHGPIKkIXGRERcxiGYRxe935CTCltNhvuJjFDDvHSIgDgstuVZT0dT4/39z/9+P3D/YfT4dGljSmlEIBorfX5cCpViMI+PD98vI8xdiShc+P6Y4buY3mhAF/+mNkTdXOZzsaLwzBsNpurm9vbt2+226s0pMDBEdIQr2+v8pBzb33wQoT7vC4u5k+L3K9iSoy0nBRBtYm7cW/p1YpqMxBwP5/nuspvfv3j4XBk1mGg7uEnTbSJiba1aKvDOBLSOAw3tzfjZgw5EKNZL2cyEg1jJs553HIYVFG0iaytLSIlxPzmzbu7N3eb65uq6kQvRGOstR0O5/c/3a+tbrZTzoFQ53l+9bvzlz7qkbn72SPiZ+6CzqGTGIP3Zk2G7gwvtqVmtTVDBQVVa+6KSClGH8eApKJuTsTqIFUiW6ZMxDHFydEgpHF68+bNzZs343ZSba2VapI45JRTTERs4ui9n/mLnWR/FIgA6GSAnxAyIprGcTNNw6WE140Uc6ARcYxDvnn7ztLCc1WoV+kOCpo+u7VtTjxFCfS0nn74+P3D/v68eCAbhxYY4UKE98DDm+vvbnfvdvnWKx0fT8thOftx1T27bAZG4io8DhRDurnebDejm63zXIoiZvSMFANyCjGlOI6+mYYhjzHxtN2G/O0vf/luyPF1tQndspk658zFIBI5cKntvOphBoz5arwbxkz0YKVh9Cp4aEEqRK/kBXoPC2RHXisRIZI4U56mkBIyAyE4dYmog6qDmYOZmpHrlPDr2xxi/GFvs2BbqjgRUWIncAaMiIm9BRCDYiDu2rOnwJ2j9imI2d19S8VPBzVaMYxxsGk7jZtMIbg382Yvvo/ebSA9AAbkqO6HtZ2KVSEM4zBdD9ubYXMtxrlQKharmhg6AiBTMHJF0abH/WFKYTuk3qU+xc64danNoKmadHs+REQnhEBsyKX6CWV/rmPGIXFihn6bvjjI3Js2UsFGZKlzemKMjuiq7tq9fV76x3KInA2QOARvAk29t8Ry95hYmvTeNeadYORu4G5ISAw5hWmKHdYhgm7pclHBOigAOBhxGIa82cRhYo6qpktRaMvSDvv58cPT/fc/Pfzww/HjT/PzW/3MPPTVU+3S4u+LrxN2VS2CG7oZAEXDrqJ38goW0RNBZmxEKtpKRaM8RuaAkVIXlr4U3bolGjh2doKgKyByyjlvd7ubN29u3n61vbnL2x2GCCFEZMpjzBOH5KIuWtdlPZ9ErfOIzH9H+v+aLj8+Pn748OH+8f68nJvU3qrGTFTVmpl2VcqlNZeqSlUhDogBgSNOA223OI0eg6tYMytVm6K6gRNipJACjTFtUroKcQKIaEhkMRBSREqJI5iu5+Px/HR//+GwfzrPR9FKDMgEBmiOgBjwhRT2r3RgN7TNaZqmGOPj/rDf70utDk4hYGS/NHT+HYrnT2gPflYDRmytLfNpjckI8zSkmDbjFAKjg4gSUcoZEAqsp1of7u/ff//Dw/v367xgDJ6idUMAAPhdAqtLv1Y3qaWeTvVwlHn21nrjkLrMEIiGHNwxRY6Jool6UBdxYLFOtQMDF62tLutyOJ2ens7HYznP7bzIeW3r2kpVaWgGl55KYAhujuam/tIdxMAMAE1Mm4hoMHdEeAFs8NKqjhhDiDHkFNOnrrxENOQ8jSOnizqLQ0e9pZW6rmsvID0/7R/uP95/eH94eijLMZDzZoLIzew8r/vnU2nKl9Z87O6irRdvP81Hh1crN3phqV14cP7a7oBijMMwbK+ubh/utldX42az2+2ub2/Gadxdb8dhfJnV/fl8gY07QDUvjkTBkdyo9yMiB0R7sdJxADAzabIu6+FwXM7z9e0wDLEURWQmEqZubtYp8W6+lmygQxnjkIBARBE5hsghhRBy5N7cW6z718y1Le6Sh6u37755++7babcx5K+++2XMcTtmFzw9r0Wwff9hWdq0QUR219fT+3Q9XVcZQjc1VlO33h7CzMSBOfQmU+3SIgC7zJSJuFt89WqWu2EXpgIzR0C+ZA7WJSeoZoBIxCnlDcZhs93udptpE3NaVy2llHkh5t6dwUSbViQiZujdHwEAP2lFftaaD1+4pReVrgNjTGHKccM0hjhur99g0li4tZIt19MZDs+ylqYWjHd3u7gb3C3G8LBHt3NKTigxKBIEjjlurqd3V8O7aHldSnmup8NhaYeGRxzOzC1ln5R2V4Mp7nbbPGQwF1ckDBxiCECB1UNIkYNl32zyZjOkIQKm3U349ruvUk6vl0OITBiYUuSUwzDEaYyGQR1Lg2lQ5dSzfzcHNwLrlFoxQFN2BVVw7PixOYnCsmpOFx29X/qwdROegI6OBEgvJTsL7LuJBekgtjTVEkxYwMAdHTAAIwT2pF7NUKAatK4RJmL6Eon55pe//3wo+/YEdKSQtin98vd+9as3wzYdCfYmi/qL8zlHhyAakcY4XMGi+4eHw+rDMG2mu6ubr8ftNcVcVmOuCORiWpuqdiEnAoUQVdrDh49a5oR4fXONRN2Iu9Q2z2vtGTwSEgXmMUcAq+ClQa16VH0+reNAN1cpx4QcLuzel2FmiAYuImgFwVEqmaMZDCMSi1Rr1bUCSAiQEucczFIIHtVVsDVbqzRRB0TgYUxmoKIivYdzR7KhL60pxyGHYYxMYNpMxU3dXU27uyAhJ448bra3d7vbmzwO4FDOa2nzUuS4Pz9+3D+8f7j/6cPH738sh6c6z68lmF5Cwt5Y8yItQe9NngHQEal32HNDADBySIzqoKZF1nI+rMeDLjOIBiRKA4bAOcYhOVqt635/+Bf//F8Q47qu83mWpkOemGPiMEUaI07Bt5t88+bmzdfvvv7uu5u3b+O4MQruCNpNQl/M3O7uZK3H/f50PDiRAXnvEvHl5u/uTeR8Pj89Pv7w4w9/8+u/ev/xp7LO3aJdLzUb994yU7vpZgePNTIGJiZi4hRps6HdjjYTxGC1UqvYCjQ3R8EQUxxj2g35JuerFLbucVmlNjGtKYbraQfIy1Ke948fPv7t8+GhlKVpMzNAU9PedBEBI7GjUPBLg4rLVXxmif152NGfEGKOsZuDIOL5dD6eTgZOMULoncAc3NC+jFo+v0sA9lJkckQidpV5XWqt+WoS025qctEouRNx1dZUDofD/Q8/ff+b39z/9P68Pzr4kBPFT3Lol73vk90dIl0SGlNZl3I4ttMZVSIiRFbzsiwmAo5DlfF6x4N7bUJciAUZL7YDfQxqXctyOJ6eDsfn5/U8WxWtTVtzNQTgXs+Fi7MwATqgG5hpU0NzUwUzd3I1aUpVoDVWVXe7rIZu7t3ULqTIIXxhnUw0DMM4jobAgUMIRKgqrZTj8/PT4+OHDx+fHx4Ph8N8OpZ10VYi4xB5zAxgx9PxsD/un05NcZy2MZkzm12wWOs+bxcFoXdolgm76a17Lz33OMYBzB1a6aHTctofp800bjfvvn43jUMkyjGmmFoTMOu+mURf1PgRoRu4BooEjL3FrgcCY3RzISYVMpfuR4yABBAYhxzGkYeBTEdzNvFaRcU6vCWtNdf96XgqCwYygNbEHZnDOExXV1c7zCEpYlvXZV5Oy3qSVpFos5nevvv2m+/+MG6m8erWo5Xz43aI3vzwNE+7v/j+p8cmvt1eT1NSmVPOrwgZ9orjS9OHHs3EGM2trMVUl2U2D8MQEb1JFQWOFDgSQYphzBmBpQk5ArODAUIOw5Q3OWdD7DoCM6WU0USlmQkxEqcx5GmaUko9Q16X5f7jh+Wwv9tMU+CyLDlGQyTiEBOH2PtJvaB9eOEMfEZYcLPWSlkXY8vIMm7Aw5B2Q77iMIQ4It6GIW3wri1zfTg/nv/29NPhcP/TY7bdV9df/zEN766/u/1us9nGyOf5PtCpSW3SQrAQh3G4ntI1tfj04fn5w9Ph/uE0P696sHTi7UybSoOOW77FrWnMaUQikUbE03YzDrs8XKmS+EIUiTgEy0Pa7MbNdogJHTZff3UXU/x8sSHCFGga083VcLWbpu1AIV5f+/WubjIfV7R2nFdfzrNpTSjmGHElRgQFBzDu3myMmAIGAmuu1VpVaWLKRqhqZkjE0DlwoTdSVF1W14aMOcLNhtw4WThCPJ29mAMAQ68omRGQIoKzQiCsSG49if8cibl5I7RujjZt5+3uOqfw7utffvUmT/oxFnE7qJS+qxoEDlMer/P0ZrP7Kq9+8J/4eGaO03S13d7lcdvUwQsiEzB6x4/ADYHAzKyptrqWhdHm+TxOY4wphDAMw1LqulZ3VzUgYCIiyikAOgO4SOVQVfenNSe8u8qEYBiAvtC+Q9cgY9ebqIILrxIohkBsrk2lSFtVCroSIjOHGJAAzDkiBFE05C79DkQBgD61bhDtwYyBhUDDmHPiEIjAjEyxOwx22SYicsg5bzfD1dW4u4p5VNFlOR4Op+NxPp/r8fn0/HjYP+wPj0/Hx0eTs7b2hfz9dep3ZMfMEHum0Cm+vTxzsTojoECg3lpp67ksx7qcTBq6BSIDRo5MkSCoqzueT8vz07OawMXsg5gSc4ohxEh5oO3INzfbt9989fbd19dv3oy7HcZkGBzR/SUhVBVRUX/puEeXfjWRmfh3ssnXsr7/+PE3P3z/04f3z/sntaqq0lTapWjXQYpuwHuxG0EkQLBOiCEmSolSgpCM0Xpkx8gGbsjMKeerYbwbprsYd4ijCpNKsIq+EkGMXtr5eH64f/jxw/u/PR733o0dLxw96RxVBHRiVfmZnPrvOcwsUOhsDCJSkdM8n5fFADCyExpeOCb0r/COl6oTuAOou5uutS7rutZCJZVlabVabzR/xnUtT89Pj+8/PD0+n05zqY0iDzFCDNa5Vf67sZjucGy1yrzW89yWJSIwMzE5AgOQOpUG82LEUqqouV9Sqt4T1rwLoLSVdT2e58NpOZ6lVLROQTcA6HstAQGavXZS6UZb0H0EXyp27r2c5GqMxBdTqNeG9T1VJuKf94F6NdIV0659czdpZTkdD8+Pjx/ff/jxh8f7h/Pp1EoBcAYP5C+2ma210mpppVRF5ggOxmxuUlu7CNo7Wnlh7ACAMxM4Ijn0eru9sHnscmEiVltbS53n5XQm9820GXIextzbUhEFA1Ptznrrl0R4R++baG9a5WZq3ebatImYKSICkJkj0Wa7Gcd4c7MJwc/nk7pmzhaQqNUqogLgFEnMiq4mYABiVqoAUIoZKN3EnPJIIbxIJg0RQuAU02YzbrfbzWZDeSSGb7/9zttuyiRFxuF4f7+/e3sn7m/e3MVEx2N1+CQqghfvLqKLMp89xMCMbOaizcyaSFQGMBEBsFLIg3ajUUIP1IkwTMgIjggphkDMMaWcgTBqc1MGt1rK+SxF1AHdKCAQqKqoJCNVK2td1lYHqIareGxm7kQeHdkAUC+q1s5AdANT4sqft+wxMxNxUBnAKNIwDbtp2HIgRKCQElIKO4Ez8n3QgHNrT6fixdYW4+YaePPtzS7f3F2VISWAU60L4Rrj9c3VL3bjW/ZUzuXpp/uHHz8cn/ZLPQrPvCnDJGDS6qqInMcEUwhDpzYgE8cAgdRFjAAppXG3uzHXq+vddjuOUwwJASEPX5RgmDAwxoAxYAqUAqSIMdEw0BA5Bz6c7dTwuKgUrMZiBAABDQEVusCIAnGKNGSeIjC5XuavSG3aWJEvUydEAsDAGAIF7j34utwvJb8hZ4JgAmJrkbV2/11HQro4zlL3fyAHcGgIPwPHA8chj3Rz++arxUuVGPnm9s3VTRxFaF6KPIqu7u5G7jRNV7ff/oM37/7g+u6XavT2F/cfPj58vL83hylfE8V1WWoxUGK89N8GZnHodYSyLKY1gnWKiZmpGgDmYdj29ZF5KVWto7IQYyTiQAEBVOp6tsP5SN52Y3RNY4hI6VOajACIhBRSTGahOaMTKJiAC7qDN9O1lnNdz9JKdxECpI5xOKqTIXtkHvLIHLve7WI/CKhqpZZSSqmFCWImJK9tRbfu7NutTIGJOFAehmm7vbq+urvN42jux/3x8eHw/a9/ePj4PJ/Lci5lLXWtbS1SVwD53CQKAC4wHIfAbKpurmjuCIzgRK++MdD9WhEZAWxd5vP5WJaz1ILugcC6z4+IOICag4tqKe08n1url44xIQEBqiuCmlMI2+vtm3dv3n3z7c3bN3m7wZic2RGxA4xq0rSu5bA/fvzp/f379/vDUURijDhmqCGFzx18LkpiDkFU7x8ff/z44fmwPy8LgqpIrabN3R0M3cGJoW91DA4OL6BzAPbA6K/FOkH0EACBQsxi2Jw5bjbTm3HzVRqugYci7BCHTczZQeZWDufl8eH5w28+/Pr+/v58PNVVzdmdvZNavIFrL2MZsqi+9GX64sF84diLL/04zIExxhRCVNV1Wed5XmthRgpsAOCvMCsggCPa5w/7EzbTqeWubs2EAMKQAXyVtj8fp6enIuJqoiLSSq3zWs7n+bA/nJ72paojGTPGCDlRSl7XHllfmuS4vUIx6I6qUJuJ6rzKWqRUILSgAYw5bscpxhRyBgc5nqq2ZVlabaaGjkwBoBvcibuYaEdfvAkqMJIDWp8QITBdTCQBL80+uyl2d3h5LZ51vaGrouOQhnEYmAJdmO09nkG7gF6v5kovizIzMxt4lTava6tF27Ic9oenj8fn+/nwuJ739XwWESJ2ROtNDqwgmJv2jpGoJrW5eWACRFPtMIu7G+LFd8uBiDAG5X4TPtX/qOPnDgTASAwYzL1Ksfnh/YdW6/l0FJFf/Or3bm7f5iGaWS3l6emZEdpL13d3s1ZbLYZCQOhmarWJuSKAubSyEPgwZHQq3oj43ddvY6TdVSrL8enhYT6VzRQAQi1trbVpdfSYOCC1JtKkiraqtSpxTGkap83br769u3njjq2KaHQYmAkRxpx3mymSucyizWzNYB4woIs3aQsFf/f1mzzmr97dtbY+H38qdfkcUe7N7IjZEZoIEEZIMcS84SBRWnW32rrXoLm3MqvyGiiYoVQ1dSDmQIjIhImBiUybqgzDdZ4mSoRoqLWc51MI83FelmrSyAGZ4/lEgUOMgVIIQ8hK040N1wsNYMHNyCFUI2kOgC/GjASOYKiafQjal3dABA7IDKAK4IHTkDbbzfU4TUSivoqKo5NH1xiQB6ZtoAK4VmgP84/2m8NSvjKYvtpdj2+vtm+YXc3XYiFubq6+HmnTFj3dPz5/+PHp/cfD/tysxa2HTRyH2Ngen47FfbvN0xiQIxMCo4EVbet5L7J3TwGvpmn77bcDkg8jxQTQ2ZFyKmX2F+dxRAyRY2AmAJe2rmtwDogAMcXtFDbD7q3wqeHzqX145Oc9LTOLKhGbmQmCYWAaMt9s0pQxUnO3hh7IQJu1IpUIY9+QGZGYQkqUAgUWVQdwZisS2a8nSknAvBR52PfeA70PGiEhGjB31gQiulJXM35Rkg8cMwUAWtMwvn339ZDT7vpmmDC32WVYMXR6gBguxXiK4+23d9/9yfXtLxBCzrcpbGvxZS05jo4Etro6mBPSMAyOUEQvzjUiy7qCtTCklPO4mcZpMkc0HzcBkFprpUkVdfAQIxD2E0cATSkNYyu1zOfjog/7wug3G69CP0fliQiYkVEBzFSqNJTKylhbWZfT6fh8Ou6XeW6tOQBSYAI0J1e4WKrzkDNxMAVA5NAbipCqxcohcqgE4DmH/gteSuFODkgUeneZaRx3m+31drOdOIRW6tP98/vvP37/N98/fHgqc6tVenLl5g7tsm2/HKa2zGU+lxSG3tv8ZYHvfwwIqXcuM3RgASsix3W9P5/2x9Npnpe1tNWreFVuhs3ZoROhAVylitSOojgEIDBgdUcmcgxxyNvb6+t3b3Zvb8erHaToTH5xYEA309aWeT0fDg8fHz68//D0+LCuC4CnFNizlhRipE8lGJfWFbcyr8tpmed1Ka21JghmYl1Y5Jfia7dcQ3fzzpMwVzUB8IikUgLpDkwvAoIQiUNwTAbJYKB4PYxvYrgyG8yjegCKjNm81rWcl/3z4f3D04+Pzz8dTwcVV2NVsG6W5wamCEadReIqqtLc5O+W9/zW0EspMYda2nyel3mR1mLMMXT+URdUf2LFfDYRLzhEZ110PqmqltYCMufkYFXlvC7neXYiUy2llrLOy3w4zefzfDrObV6wiocQAoccIQbFT1WY33G6Zl6KnM5Saj2c2lKkCYZe6gEijinkcUjj5Ijruti61MNxXRYVBUDmAL0jdG+s444dpvMuewEkyszwidoFDgjIQAa90wkwgHd7JKIX/18HbWpN0J0cGRANOkuulyYuN+tnPBL3JrKWUlstra6lLfNpPT2en58Ozw+H58fldCzzuSznJtr7azsYkc2kgZFDJAopZTXpXlCu3XLvIuHoK5ibdmtnDgERtKMD8Akm6uffjblCd0Zw9954SdpaVlEJIQBgDBkRa6374/7+/p4RPtFdHVRB1S99HEVFpbRL40RVaWVJIYzjhkMGDCnlzd0ds0s7H0/Lh49Px/282yiH3ERERUGBQcydrPU+kOKqCBhz3l7fvL2+/iqlnXtYS6mliJgpqTIzOsZa5fH+Q1kKICMp0RlgPVudz8vDw/542os2NZuXZV3Px+Np/kz6Dq8VJQ7EbKr9K/T/puzPnuzKljQ/zIc17OGMEQAy8w5VXc0W1TJRZiIfZdKT/n2ZRInNJru7qrruvZmJKaYz7GEN7q6HdSKAvLdFUttgMAAGIOKcs/davty/7/c5x0TsPRGp1AanRSRVK6VILRXZFGpWMTIOyEzeB+96x4wkRuS8D92w2Q370TNKXZfLFTECnItdainEDh0bYFXLuZYiBo784Po9Dvvqh4zBUADMGYKAmUK7jcHYDE2sVu3y+FaQATgiT6wCnvzYjZt+08c+eL7Rnk1NCcxAWoQfeyaPJEaSdD7P2k/DS4qb/djv4nYIoQOK1Zi5G7rRUpmuD8t1vrw8n58fT6ekDrZ9h+TQkSGXYllVjatiXZKZCBQxUdOqVrIidh1DDNvY9SG6EA1JqloVy6Xk/C1klKi12cg5IjQVLamsvDbyd9dh7MJIYTQehxACDZFfXtyScutEeWM06jxvOj4O3PvmUgLHHCIH3zpZamqtlYEO21mZnSPvyRsgofNmS60F0DxDH2joeew5J5fARLXtIHYbNiCj2c0q9gY+uV2OfRTLL6frmvL9+w/77XbcDewSqa9IKqZqzrEke3lZIQqFQ9i+h3AAAecWx53D4EA8eSNy7AjRRB3Rbrtl7+an51ISu4hEpWRQcZtxs93fv/9wdzzO81KqtJio0+ViAGDITN3QI+KyLiK1sVIcBxd6DqNofrlWNDHB61Lk+1ArbLwtvKXI1KymqkVBECHl9XR6fnx4eH58mq5TKQLo2Icbd8BEblH1yOwNsJIikAvEzGYAxIE8O+zMq7UGhyBgzUXWbGotp9F7F7vYj90wdEMfgydQnaf568fPn3/5fH58Wa+LZG3b5nd7WevB3l5LKfX56/nh04uDwOC8Z2YClNbpVRMwRAMnSEBKflL9OK+fz5eH0/l6nnRK9bqWKeciGbAYVGO4uZrIMZupb1W4md2QRCZYnQf03o/d+O44vLvz29E6r4hAiq0Xaaq1pnWdTqevn788fP78+PXzcjmjVsfgidlCjb0L4U0QK6LTfJ2XeUnp5XRWNOebZIxEVSqIoioWUZM2awAgFNWcixVDISwAtS4stRdv7riNtiFTbAGKgBF5w37rwxF5p7pJ2U1TUQDfB+fYrC7Ly9cv//z8/Ou8Pp+up7QWNUfOkVIRrHYLWmxeHUYENakmClVIhey/pFOGG6nuNhdBROc4hshEy7xezlNeVqg18hCdh6afVXlVp4AgtCw9sKZyuonU8SYMggoKmo1DF6OBrWnNpRbVlMs0Tefz9XK5TNM0LeuyrmktWsUj9l3YbDc++Gq11CJNjt7k4X+18YvUeV5fnuuUymVKawJR15SUVYSroGFwcbdhR3hBqTkhiFqT2ZApIBoocoNPkiNGRa2iVUWUHQ3DYIDtwNBQ04AIeJsHNXiXlKwmb9gnBSy5pGVZp9WN2XU9iJZSqlRAIMeICq+BUW+XNDf78/O6LqUKAF4vp8+//OX89KWsyzpPaZlLSuu6plQQ2cBExUxIa+z8brcnF/oeAMsy51rqK6ibAOBmQ9eqUqvIrS+LKEjoEG+QyObMFTB07JjJEYM1bE7b3lBMXh4f/1lVqgzDWEq+TNfn0/OXh6/eu2+dGEAFNvTkIgKoJGkyNLNSS825pOI4xH47jjsVG/r+/t27tF7/8R9/+cf/9Of/9E8/X87zpr/EODrvnXMUGAgUsuANbE3oHHfD0N/fv//7v/uH/f4uJbicT/N8qaUgslRbUyHClHldnr9+PXvHwbku8jCiQpqm0zTN0zX//PH5T3/+5enlHDznspzPjz/93fIWZtk67mDW9jBjcs636RIzg2NPdJMSCppVUFHJLZZCxUzAgAyRwYcYwjD2MTrkUs2Fzvu+67bHw/vYhZqXOV4Ie6Sg5HNO7HyMXd9tiN11TqfrnKshRz/swnjAuDHHKmKmRoBmqrWdJNGUTEBMilH5RiJDJMcc2GulwQ2HzWG/3YbAiGJmN96TqVUkq+QIvTN2wOyYIrOGzrlOMtcUPOw2/tgPBxc34Pr2KCQ4r3RRgXVerpfr9Zqw8yP24LioClOIIwJG39eij0/P83I1LAoKBIgMyAiRNI1xPt694zAqemICcIosgq0N+7ZwBU8xUgjsHCOiVlunVZopVAwAu442vRuHuBvd+314eOTzeZ7mNWeyITBT77DzODhgsFJRkUJPXRfHse+GwO6WKUmOuLViVVWUPHoXfOh96LTodCnLktaUUesQ8H4fiOx6xWWttcGnDFTtNqNs+2QTn31P7GXXGc5LyrmUGPthswkhMBlrBOeJiBE9cUHRqvN1/fzlqds/H9/tIvmcxdQYidkRIToehi6XPM1XqhY8F/Xeca7smAAhl8II3TAcjnfv3n847PZmD7asxA65TUmlYcwRmRx57wzUTJjZ+y5ELV3RwkV1StVfymVK9a95JKYGRQxKhZxISy0gUgx1Wdfz+Txdr+u6VlEg52PvQnDONbxWk+apmqjlUuWW3XrjqTOj986MEZsSsYpIcL64kg1rQUQhugVjIhiagGYtlGuZzufz88vl5bzOa01FbxMK/OsD+etVcn18ePn88TH6nsmPQ4To2eErHsPajgvGarQanVb79bT+8jK/TDktwovaonWuJZeMVgDEEAHZyLEDH+gGzyBV1RYsIaIoog4dhb7rdttut6W+A+9e9+hmzaglrZfz+enh4eHLp5eHL/nygmV1LWqWnXmETiFu4BXX3WyWy7o8PD9/fXyY5rnUqmpiIAqi1hAhVd7MKwBkIlqyaVEUoGpYxSM5gsDkiAjY1NS88x27DbqDC3sfjqp9XjkVKwIKRAWkLGm5nE6/fnn48+n8OeVpXnMpZuYBgzEpaQUVANVbEWOGDaAu1UpFKfj/Ez7824qGmL0PRC4vZZmWdVlLLiDmkNpqUqmaARq+amNuhH8kIwR3O86jqtZ6420H57u+b6NQBUyliq2ny3S6XK/n6TrNy7quOUtVQnR94KHvdxsiyNdSaoMt/fUIuV0qkq7X5XS2tWqp7D15JrJbGQXEzrsQkRkdx76zOsi6gEhaU6nN8Nhs2g01xs55QgJFKXVdV+fcMA6IZKpYcnOu3gZmROxccA5EZhOrCjf+q7V85zwv0/MLOueY/TgoARCwd4hMpsTU+iLfXovqmlJaV1NDs1LK+eXll7/8/PLwkcFUJOeaU06ppFQaKF1NRURrztWIc4h0W3jas9sCR1CbHUmlQXJvDSElVVEhBag3DQW1MBpFQCJkYkRUAIHbh2lqKCQqKSUi3u73l8u5SE0lN7Xym8RH1ZYk81oNPILmNddSCKrUssyTVmH2Pg6+G+O4JeA+RsfxZXn55deHP//89em8pqTGYh62vh82281+66OrzSnK5NgH33fdOPTj/f273//+jwj4yy+fHh5fHh6/zNNkSrXYmioRDmPv2EpO3sG27/e7fn/sfcBc1lpFFdUspXS9TIRWNeVUtbUzvvtoAIAdhxAA4EaIca7J3UQaycwAfPARzaqBqrXUblAAZnLBOddvNsNmH0NkICpCxAyAoiCIFthx7N32wEiefZdzYecde0c+pfRyfno+XaqS7wfXbbnfgu+UULFRggxMDdvQwszUUMxqIavk3jhL1lD9WeustVMHHJznlr/TTpVtJK0FQMg77jqMnYVgOSNi6HsXB8SoxdUV6wLKiOx86JCd1FyLliK1tBxUFRPPPo5dtxlczELZ+aACInid1k+fHk+XF/bKHp137DyxZ1Q2IOAu9RxISEhApS5zPp2uGz+9FZdE4DzH6GNwwXvX7l+ptVhOyLfGWVMfYti46NFD3Qa4zlSKtNFeZHAoWHPNJtUAIUY/9CF2zju+NU3VtApAi1AyVBEzH8x1kYhC8Nm7nIgMmSB62m08okWGq+d5rSlJbhLF9uP7D+O7y7HviIIZlCop51IEB2YXAnYauugDOx+ZzcGm42k+/w//z//Xp6/5f//fwP3xDq6nNS3I4AMhqnOw32+Q9DK/rFlqrWB1HDpyrlRtnCiOYbvbvX///sP7H4ahP18utqyl1pTzklLKpaoiYMnZo+9iZOcMVlEMvRcDKSLMnkxhvkyXy+XbBwMN8X2Dt6rmYuvKdXFotSQ1uy7pep1qrcTsIrvQDZtt6DrHhKZaMqgSU60yp1WnuixFRJiU0Dv2jaTEREhspqWymIFazbKyyymLZDUporYWhAlVAWpJQRWvL9d1WkqudrMO66smAN7qGPzuyS+1fP36/OuvX4Zh43wQsUEtRPatWCBSFTWqFEvF55Q/nfPPT8un55RX0uJpzZjYkqvFKmgFVWxFLCCo+Ztd0AiIm8DKVFrdq8zsY4j94OOA7ADpZiZpIWG1ztP18cuXrx9/ffr8MZ9fombHiuiQSMkpxxyHHO6U/OsDQyFGAHx6fPr46ePD4+P5clpzrqqqVtXyKx7ZFKAp6qppBa2oimRCrDHgYeN++jB+eB8POwwRxbRqcLSncMf+HmizlpASznMVIR8GACe1XM5Pnz/908vp55S+pnxOueH4vZlX8wIgJEJYq5giGqOCVgMlRDSFspaS9HtRzPdnAHtlw9hrFdPgaquuOed5Xkpe05r7vo/B+eCBgIgdMxIrEJAnH9mxYyQCbnh6sVprzlJLkSrM3MVODRRJDea1qOXLtCypViMgZ8hIjjzE4Da7zTj2PnqpuUoppdygeoRvoum3S0Sm83k5XwP5EGPsOwQo6ySloKqjMPbb6LuyVhHtQucP1JIgXx6eq0jWCgAO0SEzkCNmIu9DDJ0UUWtd8sF7T0y5lnZMlSpmZgjOuz52tZSU1wK1NTRa5hKo1nk5f/5c80pWt+/u3H7rYgQmAHBq2OY6pdjbAcZMSnXEm/2+1vr589fTy+mXv/z8+Pnjpg/eOQPKWUrRWg3ZiIjZI7pUYU0mL1MIKYRXgQu2ZCBtrPIm3ZMbkun1kX3dns1MUVCbjUHotXZXMAGpVqveIF8AgCqllK+fP/37f4f3H94djveHu7vf/fTTH//4h67r2j+sVU7n6+PjaRwKgC3TjFrGDqWs15dnJPfuw++2uyN3PXfDbrNjw+vl+vGXr3/+8+evT1fqNrtd3Gz3h/3d++O7H3/48fe///12vzNCauZjH7tujN3QdV3X9f3QPT48fPz68HJ5+ec//enTr5+vl3VZSspCROMYnUPTOg7+/f3x3f3+/t3u7n53vDscD6MPndHwL3/6er0uRKbqi3dDP/D3s2QzMHOO22LunGPviLkBY3LJKtURM2EMvWNXyINRXlp4pTKgR+hi3O3vxu0RkE3MUUYVzWk5vzyZC5vi+8H5rt9QiP24PYgoIotqWstlffjydDqdzuTC0G9cP1Lo1XkAMGRsSnUAVIFXWRaqmavIEcLwdhhT1bzW63m5PiUv/XRZaqqkQEDtziAgADOtBupj4HGkzca6IS+TEvpx6DbbEEdTurxcSs7zdR631+Fwxz6knK6nl+vlkkt2Mfabca7abePh/nC8H2k3Q604c035Oi2Pj9PHXx5P1/OwccMYu55dACTzDkMkYkh5tkkpeyQ00Ov5+dMvH13d5VcOGSL54GKMXRf6LtyiNbLdUlul1EQrmEmNMYdh45F3QxwDvztuTK1FlDioWtJ61YZKNSLvOAbvCJu6yARKLS01uo0pAEmJ2Xvf947JMY5DT0AO3bTMUnWM6sANgcZOTpd8oXJtvLxXwwfdWD6/uRy5gMQiWnLJuUG4idijMpJj9sjOIyjjpuPlvH78+V+mHI7v/o7RcH5eppd5uShYtNiFrhtH8vB07uc01TUhWt9HDiEXmZaFiJz3u91udzjEvmf2QNQGB8uaShF55cqVUpHRsWe+MQN8QFXQKlLIkUKpeZU1578hw5rYjZ6uaaU8kxZanYBNqaY1mwExowE71w19Pw7eEarUlUCEmXKpVQojIAhYNSU08uxDYO8cOyZkAxALbSK0LrmsBWtFZDRQRTXIpdK6GioR1wqX07LMa8m1CQO/LYS/+cX3G4zO83q+zJfr0p+ua8rx6mJ0sQvDEEJwzMhACDQv9fH5/PXx5fw051lQPJuKJKukFUWwAimAogAAoBKIiNxshPSqpGybQq285nlZlyXlrCqI4Agd3LYWKGuaWg/m8+enh6/L9Yxl9awBsZkoVFQFRVC+2/ebUsQ5J6prSsuyLOuaa6nN8nVLjTIgROAbTFBAFZt9HBG8o7F3u0M83PXbQ4y9sUdyhLwh9w74rtpOSpcL5GylyWVEaknTdHp6/PXL5z+dp8+Ak2guFYqwKomwKDZRkJiJKoiiGiqCNAMYqZoKqfwt7O7735s2hQeRY991fQzdfJ1FJOcyz+vpMgFRrbXrYoi+791mHGLXs4s+DqEffYzOIVqLnkha6i0XXs2slZs8L+m6ppfL/HSeSm39QlAjUVpTzbV455wP/TB0fQSwN6lKW7K+7SffV2NqNWUp1Y19P47DbgemM2gSMy2E3PkYkM/TbIx+NzrP/ThqlXVaa1WDCmYOsKVItm7lzTTkzDkGRDFhYN8FEqdVRIQIVbSqgGn7Dm9CY4Sb/A2BAKTWOtUV7OoYUUdPMTjfd0RMIlaKlt/IrVszo+/73XY3T9M8zy/PL5fLNeW8HTp2vlZVNRFVEQRDxHYiMaNSasq51CzqCVmFtGVCtQRdu+Xotj9ow7+GOm3K7PYmIzZLRYMjADS1cMsAU5Vb69VAVYlaD2ZdJqs69n0fwqbv3zZ+UZ2m+fn5PM8rgqV19ShWmVCIuB82x+O7/fE+9mMchv3dkcWel9WHcLx/Z8zDbjtsNsNmu9/t7w93P7x//9OPP233O2RiYnIu+BjjEEJ0TZJiNefL8W64f7e5f94s0zmXnJIAFkBCZHbMRF3vuz7EoYt9P2729+9/PBz2IUYD+vBhn9IFwEClSjzsN818/lbGwKt97Dbecg6bi0GqtuM/ijF775zrGD0a1SRStUgCMEcYvetj1/VjRSelqoqta12vspzTmt2Sut19GPrglZE49qSgBmXNS16nVJMYuLg5HHfHd7EfyUW4ibDwm0SUCW5FjAGZkTNk8N+MIwhI4JhC9BhDH33vXcBGy1MDhfbvpTkyvAvjZnP/fv7hDKzFhLcjdaF5c6bLtMyn6fw0Xzbb5eK6LldZrtN0ORXJse82+20yCdvYj9FHXwGLaCl1XlNO8PQ4vTyv17kAcFNiBUUfQwzRx8gOS011ESyRmJlxntPDw9PWP5fyHYeMkT374EMXvCdEk8AAykS+nepzXkW0VFXwPnjEGNpgBhQMTRlcWU1zashZJAyOAyOaajFFU8PbdMoEyZjZAIoCEvG8eO+8YwRgxBC8Wo/I3ucYSik1uHqD9qAxUy6Si0K15lf8q8shsQKWUtOa8ppqrW14UipoBUCP5BCqJ9v1NK9Vn67z5fF6eTqPwZbH6fT16+NXctxvOx8Om93gOtqftnOai4iYMYWAbMil2qf+Uxe77W7X9X3OuZRSRarINC/TvFRVJCJDRayqkCuhAYKKAKJzziKBVA3kSGRdZaLf+kZexSXN4aCllqzLbCUZgAAUIAVAJlQWKVKFCENwXXBosmq2rA22SwJs4ImQkM0cQPTcB++8ZyYEQGLywYBWgVrPSfKSF3ZETO62GlkuUmU1g5z1ck7LnEquIrcUg9em/esD8u3H7U8MUA1KlfN1To+PqpUdjkN//+54PO42m03nyapML9ev//Lr8+Mj6roBCD6qwwmn1UBabxab+YoAVMwQJJVS1W6HjhagI7WqVJEiAl9wGDe/+/3peLjv+40PnWoVqLXk6Xz9/OvHL79+fPj0cT6fWMSzI2pR7qAqKqsmtXmF6Qm0vG0wzvtuGDa7zTiO+PQo0sB2tRXpCoYELWTVRJUUQMvN6knMGHve7rrNvg9DoODBE3nvuz72Bx8+iG2nlVMGEUOkEDqteZqez6fHp8fPL6fPl8vXVCaFYoCGzoBFsFYtYlWs1FqraBUTJTVSInRoZApmjOiwmTXf1uSbz0cR9W0cCIDMHEPYbrfRx+v5ioSiMi2LPNp5mvoujkO3Hft393eH3W6/2W12+3F33OwPXdezo1qW6+lpnS81JVBgx8HH2A0GmJb1l09f//Lp85evX3/+9FUV3t2/G4YNIqUqL+drLWm33RBiDME7n9cppfy2ucJtovfXRQw00K1zw2bY7rf9bqe11nXWdS3ZSC0Auar1cs1aUUocOh9c6IZu2BpwTwaqDXVYq6oiG2qVrIuqEUI1fTm/BB/7vkfA0p44BDXJJckq67KAmogA4ausF8DsNp0BhFwuDw9VihKSd+Nu57teUqqi1Cqnt1XMue1uZ0jDMFwul8fHh+enJ3LucHf3w4/vg/Mvp3MpAtCitbSJckKIMQ65pOu1lpKXtRIwqLuNk15l9DeH87eisBWRtzbMTf97Y9i02EcCNMlVXo1tb38fUYFQa10vlwvSqRt2m8387j4v33Jt1GxN68vlBKcW0QDBQSmwHbq7+/fv3v30w09/N+z3wNiP/eGwCUSshTwcf7gXk36z6cc+dLHrYhdiH2PsgvcOselqKoIiFK2uGqlZLivC/K/+/rjf/tt/+NfvPn36+vGXr08P5+u0IOJu1w9DDN71XbcZd9vtdrffHY/H+/f3w9ABqEr6wx8PABcRMVUA/eHDLoTXUfLbEnBjdLZRhUNCNWuDODNNpVQitS6G2MXOc2iojEkV0TyiR2ADRGQfDUCWKukk589aVeMZ58nnTP2GUJ3DLgQAKrlO0/z8fLpOUzfud3fd/bv3m/3BdWNrrhgAttHua3ftDdprDSCArMhv/XFiHobN+7sPdIy/e/+7H3/8/W5zJHCS26fc3HKEZIiE7Lpx8+4Pf3BMp+Pmuk4LoTqqJlDWnC9SZ9AcQji/fI3DhlwotUzXi0juxwC0U6cUGRiWtM75+rJcztf55TRfzsvzQ0orWu20xLy6KjYYxTjGuO366BnWlEWVzQX0zntUvJ6v5/FSX4sYA1AFRWDvQx9jdN4RgaHpq+hEVVRSXnMpKXsfuhi9d8DtyWMkRCJgp0TA5IMj5ujZkVmpuVoxFGsAS3NkRKAtzKJIVVMwJAzetVgzdn7YjN0w5pzTmuZpMVvVzDPESNelXqYyrxWL5lr5dvr+rogBRDBQERNhxIbQNSvny5JOMyR1yp6ECTtP0QNarnlappfrJcj8vMyXojlQBwTsuR971/ndYX+Z5yUVMTBkIE8u9H3f90PfdX0/OO+LtI6JVtE1pZSSiDTGl92yeSsAt9Gpdw7RMxpZBUHHUsFXz+5buvj3FypYVUkll2WRdTYzIwIfyAfnPZKv5nzsfeun9p5AgwPNGaoR4ZpSV7zUUIVcI+4O3dD3PnjHDGDsnO96AXdZNRcbNiOCUUtU1RboWERqSVKr5KzzVNY1txHKfwlyBn+rWzAwEV1SoplySlWyqlyv07Km63U6Ho/bfujN59MVztcwrzs2c8SKSW1VMa0VaoWqzU36ih2palZqa5Uo2E0nrLUdGVsgct9//dO//NLF3nHAOyaCkvP1fHr8/OXLrx8fP39eLmctORAyc4OAIZFVrbWkZZnP5/V6sle3hZlVqSnnlFIuRVWbp9Ya7azZsgiQ234LqA0SYEiEhsyIyGpchXJ11Tp0vYu9jwO5ndg2S5czlmIAgqhVclrPT08fnx4+Pj19WuYXs5Wx9X1QAGqFUqwWrQqiBmAt/s0IsVnEmshWXx2ir/3k33w4318K6DiEru/HcRi98yE47zl2kZxbUl5zmedlnsM692iwGce+G7puQDNHFLzzwVeUxFwQDY0IYuC+78bNxoCuiDEGbjmCuZRq61qICxisKdcqABRj7Ps+xkiEtdRSitktjOnte/7bIgYBmSmEEGJkR4jcd52tqyxrLXmdZyaaL5es1bgt+oMZu9j37LoYCEByzmte1lVazB5YLeWGLjTNWczMe4+AOadahAlFNJcitRYDIiTmQE6a8kQEDBwRAjVVbJobaMdzCP24JeJ2rm/q2m8vBDHEGFJGRFWVWn3wP/70U2A8HjZWZc1lnhbnXKHy1loxM+fYwDvnzBwxgFiRKtVMxawdwm9xP43Je+M2vRYl0ARNpiLS0qyGoev77tbd1PrWoPr2zr9WlHmeT4+P0fsQwof7d29RnWpapKSygii1mxOQiHY+vPvh9z/+8Mft/l0cBvI2jtF79WT9ht+5/f37YxO8uMDIWKpcr0tOc5d817kYmMmkVjVrjcYWrpLyWoow6HbjGXd9wG3vX+730zQj4GbbjZt+M/Rd1znufOhiHMZhHLrICOs6l3Qhyl3XYNlMiJsx/lW0zW+46rfBIULLVCIkwgomonnNqOTQM4Vx3LVxg2npYwhE7TwBIRCJUVG56PogKYtkAysu5GVd0qoi3nlEMoVSZV1WJBp2x+1utzvedf3wXfLu6+ehZmCtj/L9E270G2YvAhJyCN0Qd8fj/fF4N/QbKZrmVYmQGbwnoja2A3Ku77fv3xtIhrSeAGrRhv1RkZrTcs3LhRDWeQr96LtBDdK8mNZA5js37noMzMFVq9NarlNaVlmTLoutK0j1WmGd1cB6DgQdQgRzzQe85mTgApopIbBj34UQwzfdlQFU0dZlULwpvb1jgnYyqSpVzG66sVKtFkZD8GBMjgkBkYEYiJDZBT8M6Jg7TwyWay1Fs6JAU/Fjy2Y2BRGTqrXUqgKI4kSimkFAdq5x84MFVDEEYKboXYziQ76RX5e6ro2b9pvxi2snS1T1RJt+2G82Q9+VJf365en65anXvGMITJGY2Txj8FChpvVyvYSynEzL9rAfxtH3kZzzsWewcXvYbJd5ziJQbpYMZaJhGPq+d96DkbQHvU1+Si211qacA1CRUgXARDBE33WDcx0ACwujAxHPVs3p4Lv+e4DPq/6HGJCr2prrOq91XQiBnHPIzkMIgVz0G++7ftjtu3EYxxgd4G6Umuqy+mmuWICFHYqKd7cklL7vY/C+oZqCc12fhPQiFRyTlWVWES0l5ZzTuphJraXUkmvJr6O614HZb5+i1x7MbxC3BmCl5svlwgzjMAB2yzIv6/zy8y8fP9Nht3+32/3Y7zrB985txnGBuqqsudR1tjKLzBVyoaqERgCKTUoqBlKKIWiL6DZTgBs2zVRUSq0fP35BpJSyGv4h183Y53X5+PPPXz7++vTl03q9sKpn51qwtiNkJuchl3Uu52V9PF1eLtfyqlWqUs+X89evX3/+9dcvXz/nnJnJeRbjG/So6XVMzMBM1FShAiIxoTkiVyu8PBfJRAhdP9y9+yGOW3ZO0S8Zi5hZ9AzItdbz5frl+fnT548/vzx9ni5PCHm3CcS0JpiTrWtdi9VComSISOiY2Tlg0CqSSjt4N16EAaDchN2/+WC+/7WBWQt93G63uxgjEwXvhqE7Hg9rKZfrlHJec6615DXVUkVsmpbrZZqm+TpN292u6zqVPJ2f03yRvBBYKb1KI7rysq5mNo7ju3fvs/J1ToQ0z0tOpdQcQhyG7t37d8e7Q4ydShHVWmsbRd1sv7+5vt12jYjDhAia84KGQ99RGZbLeVmWr49fEPByuSoRdh7ZgRIQK/vYD/f7fee9lLTM8+l8LTkzYs35mrM06gaiZyakUoqKrMtaSkEAMxB7lS2T67qOnMu1pJyLVjQj5xxSAzejaFnS+fNXUQP2+1r73YaI9DUJ8tvn0l61qohst9s//vHvxiGAyTKfLy+nEGPXd0PqzCSlbCJpTaIQAwBCDCEEFwLXXE7l2gK5ENE516qidgHYW2fr7UIEMxBR56Dv+/1+t9mOIlW0qlaQ2zCFiFpjpiVeBefR5OXhYZ6mp+eXcRj/b//X/8vbrmpQyUHXd4RQayZEDr7fHn746e/ff/i9CDH57Tb0PaR0KloMJQRA8KZFlut6KUXKl6/P/9N//EtK5Xc/vfvw4XB/HLtItSQzAWsMLzaDlGpKJaeacy4l5SzeZD+43g0G2EU3DuH+buz6TpVEVMs8nZKkOef16fnTr7/+5cvHny+XS9cNIQR8yyW/VSx4o2e93nOtMQUAIlVFiVoYF9Vc1zXnVWqqwzDstuN2M+x3Yymr1no7ejJxYGUCVwxnsxe0lY3FhlKnaU4/f354fjmnNRHQuNmM43a73e73293xfrPZsAuiYHA7RCG0xc9MFW96wdfJK76K778jX5haTnlds8MKSH0/RB/zkqwoRedDxy4iMbkm1Gf0Ie62br0sn+0qpagCtybNrXuX06pSa1G/JA4zIFWpCFoYiMx3zg9d6LqVUiq2JqjiAY3ZsSuEtZSypHljdv/u/XazR6B1XRUqUs1ZmHt2BMam2HXxD7/74Q8/feheYwfMNOea1rKsuV+yQ3MIhIANsMpEwKhqjrCaqJpJlUJCQOislSVgBoDE3vV9jKFzhMGTaW2nETUyds47x8xkDRSJhETADMiuVTiGVAU0FcjaGD0IGEN07ELIyZfoK7MjQ0QwtSu3M8RvBH4OX2MUouOxj2Mfg+Okeprz8zkNqBahD+TYgIgdEptanuYTOLYy953b7g+b3c7HvhrMa64KqYCBD6Efes25rEVzrVqF2XkfkJwBqoJULUVqVSRyLoQQRaHUqmbEt3sdkb0PIQREJ7UgOK3ZgZLTcXBDH74Hd7Y6nwCRnBiVakuqdc2O0BsQi4mqmgIkooQoVXOWEnTDNMboOxe7gN5lqQDgvVdVzy6E0PV933Wxi957ciwEi+IpycMsU1JARu8Im8SLTV0trhIBgNabM6KWems1wf+mS0GAzAe33W1/+OFDiGGarqeXl4fHhzUt5+vVcsFu3XGIRjEwKGiqS1rLMteSqtYKVVC0mVdbB+41vKfhaRTMmln8dW1pR8zz+ar6SRWYwnyZ7u+PWvPnXz8+P3xdLhcoOTr2TKhKROydi13sB6q6mnNJ5GUq+L2q33Kt87qcLpfz5ZJKrs010yqot5ilxr/RFrxkhACO2JjIExJCQDew37lwF/sPPm6bF0QMrAWfWC71ZVm/vFx/frp8uq5PqZwB1uB004fgeXWMpsusNUnJqi0aCZmQoVmEDMwpgMJrQDNWUBD09F+293x3MbthGMdxZOfAFBC8d7v9ds3ZAGCCkloZI2ZoQCLahEil1LSu/dCjyTqf6jpJXgksl1JLXXNF8ilLKsW5sN3ufjDXXa6X83Va53VdEXEzjse7w939/WbszaTJ2v5GKPZfuhqXsU25TcpaHLvY9Wx2uV6vcnmZ59ZPYOZqUM1EgZyjoXNd5G4IIbhxjP2ALqR1haqJpnW51kpIaIQGDIgmIrXq6/EOAYEZmU0EiJpIQqwlohCAERNCa2wbGWiRnCcmNx9euu3Yb0dyDH99nr4NzlTNe39/f991se98SnOpyVo8mGPvnQ9eVRu5RQzA0LmGmaYYXaZ0ofmNbXyjCrRn5X/xwb15cJj7vjse92aaSxaRecmqdnNqmMnt9gbPjGi5pDWnyzR9/OVfpZS+/XeEXR8/vHvXebcuE5jFrju8+zAc7uNmL1kcg2fM6+XL419KmmIIgaOn3gxqWnJeS0kff/7yH/7df5zmPJ1+t04fZL3bDE7Kom2qTEzoDKgWy1nSklMuprW5cB2Hw36HRFKyVZmvU07ZgFpTuWYhomW9Pj59+vrw+cvXh1KrGasiAaa1qH57r/gG7KUbnxBAqrS4LilZpWitWquUIqWKIqk5droZ+th3Y6da07oSB0DSUjjPWC+Yz6QXgMUgqc61THm9TjM9P7/8+unh+fFJ1Xa73eF4d39/n4qI0ZpK21EbP9Q5x3xj88KbmerGQQdtxxP7zWptplJLrUVVkLDrOs+8TkuC2Q8djBj6DRFZizxsDbQuQHQVVUi9D6o8T3NOaV1Oy3JZL7NI9U6B1qoGgOQoRjeMoe9DCMHHAMS5wvman0/putZlNREGsLZpM3MIvutiCL5ayyQt0BLJquFStC41A2O5uxvfv9t5/zrmU6hVU6rrmueYiRo3Qcy5JssFM8DWHgGQNolUUSFjBVPTRvwzM+ccdS2ACgGkFkNqphFGZmpPegsoIQNC9tiyIV8xlwhIVayWJAItjMkxISEjxRCYHBBBsy2KzXPuAnn+zQDGMZFjip4l+j766Fu+qRXws4U1oSlsO4wOPREwAVmu6fnykoGig2EzbA/32/2hiM1Jl48PS6qPz+d1zkRhHLHrBOdler4s61LrbRFXJGntvjXXKiF2W2R2zk3zZZqh1tg5IkBU73yLPyXnpZLpUlS0LGh56P0wfGtdIgAhETpmEAoAXpRysZoNPLCoidScBXFey7NOCwe4Lt0wHLfdu2334za8H+P9Zrt1MacKhl2sqq1TyN57F12IkXwQ4tNS/vTl/Ovz/HCqaVmjnntYB7IOrWmRgieNXnJJmEsqKSUVATR445l9t5fc6F30rUHejN6h4x9+evdv/nf/+u///u/Hzbim9fT8/PnL58eHr0/Pz8v1+vPLQ0fuOG6jCwY2i5yW+TzPa5FioIhqBGKvSiG4RT/dogsMbju2vVrW2qkRquD5PEv9uC7p4y+//Pjj+z66vFxqWsjAO3bUYrqBGTn4bjNsD3fAMR6SjvtnceFwT+4tMZWYHTlnCLmWeZ5zTgoqVVWaoRkaJA3UVBDVyN4wreSYuzjshs394fjhw93dh3f97j2HWHIWsBAZ1CCXdXm+XP/pZfrL0/TlWiaKOO477G1wdrdx0bs6OE96ua4XKZrV2jtOAKagtwRpJqKAN4+s86WqkuDfJFt839MwAzVk4q4buq43wFprlYqEm7GXumdH3vHLsy41ieFagKbs+Nq1ph57A8tpJrS6TpIXrQnAMOVpXoEnpGDo1yJi0MV4PHhCTNM8q6JKiPH+eHz37t3d8YBoLy9P0/WSc1ZtxtHb3KPJt6HdfN/t+sH74FyL9ZCSOXQhxhjjvkpF//j8VERi6HzfY+gwRO46P4yu74HplHM1vd/tNjEiuzTPZV5RS9d1LeFazHIWFQUEMnNMamSiQMjBI6LmQkSqBtoIcuyjB3mNgayiomjgDKAq5oopYxFH7J0vTT373WthIu89IW432x9//PF8ejlfTvOythl8C5sGRMcEMSBVWWstFWw1iD7EEEIIzlRd2+SYiZCdU1WRpn0xhL+OO/heytbmBMS4P2yZOeVcSk255lyZX6HDpm0JYEbHDpCWlM/zdL2e31yWiMjstrv9f/Wv/+G438yXFxHhMIz7e/O+gI3bwUHN6fz185//53///7ienna77WbYdXEXnCcTUAEtl4eny8PpPJUvXR+875yr22BlFalVQY0MGYHNnAqUrLWaGojUXPJmS+9+vPfOP379/PT4/Kf/fBbV2A1F9Pn5si6rmqS0XJfTPF+XNSGyYvEJtcqH81zqN2Kvc+SZnXPEjsiZQVqT1FJLkpJqzSWntGYVJXSEzrSUsl6nCRzvxn0XAm9NFJJivV7iesL6VM+fZH0RksK4lpJkyeWyrL5mLak+P1+WZXk5XR4eXz5+/LTd7Pb7w3a33YzjOG76zTgMwziOXd/FEJxnQiJCQzRAwZZWp68xEr957FXVrBoqE0bvGPF8vdRc+joyu42Bc04FtOU2kwFABVUUH2iz3c5X+fPPnx8eHtf1XGUBzYjoSFOuLy8vojJuhru73Y/ufhg7FzyyKxWu1/zwePn0eJrWuVZkpVLMTLvOjdu7/bGPPQNlUCE2H12z+a0rTNcL6rkLshvr3dG2u+Acvb0WUS1V5lT8khFEpdTCXXAheEY0FQJgz84zyS0C1dRa5JlUURBTNDBmcr5l2qBKEQP24lAJnRI3qsIrDYvAkfNMCLfk2AZfV8tFSpZ1LetaVNQ5ioH73nsffPAjObO2RVDJ9XINY+f4+xRrRHBMfRdBrX0w0CbTobcwXpcLZZ6EBjCgak7ROwJUK9WKB+9Ctz++u3v3oYidL/OXr4/Pp+t1TlZrZCG8zZNbDI1ncgwmpea1ZFdrAbCmP7jVvbcbxZjZO0ds3jnH7BiZgVQKJLUVIDmugaiPv8mDAAACbCQtRKfmcqG0aslqgjESO2fOGVBSO0vJaXVgM5SsiSxGHHfRB+YQfB+74msz/hJSS15H5mowrfXrafnzw/mX5+VlAal5Y2XHNUS0RsQ1lSol1XXNy5yWJedUVP6XmzD4V1tlW8UBzXk3jP3d8QAEd8f94bh7vD9++vrl6fHx8nIuKZ/RqCbLsizzaV2nXJKYABq0SaI1hhMg6C2agRqY6dYsvbVE4LbJIapqKfVyueacr+fL5XTab7vooGPoHTDfmIJAYEQCWIAzRhe2sTtueXOXYP/DDxzeWpdWalXTru+6vp+maylZVE1URW7jI1VoSmZ9k3giMjrvhn7Y7w7v7z7cH++Ox91mfwjdjp3z6qlksCI6S33J+ddl/cu8/rrmcxVlNzAEIvUoiAZqaO1VIxL6gICADoHaegVWwaqhtNkWvLKB8JaV87/WPSPirutCjAhQa80p1ZIBNHg3jr1USct6kxIrLEnCnM6XSwzO+ahgOUXHaJK0JKlJVUTRgAQcuchhFMCcC5hF78a+2wx9zcUxdX1/d3c8HHYhhJzXdV2XZVGRZij7Gw3MX33P1MW4HYbdOHrvUJUMoCo5t9nsqmI2y6V0ofMxsmcXAsfIfc+bjSBUkWSapDj0fd8HpNVAaxqGkT0DQRUxywLiGp/Oe+cdMAGxMaloAUQzUW3Jx83FpqJYRbE5l25HZFRDUSiV1PoQYz9ALt8HDQKAmpVSaikppVKKqFZRZN5sd6IyXa+AULWKCN6KEXyb7LZU+/b/+OCGoXPOI0AVyTkDqIH+NnLu2/VmuVfVlFIp2Tnq+363307Lcr7M61r+Vo1kYE1EyNIU978xWxliCPF4PL4/7i4ouRQIQ+i6JjTvxyApf/74+Z//6R//w7//D6eXp91+N467GLedj9FRdNQ5Op+uJa1pKY+Pz85jcLhcg+YVzIADucg+3kTIilWgVCqlVjUx5jDevf/Re38+X86Xj//4n/7ycr6GbhSz8+WaczJsbMqiJqZAjDIlwlJS+uky1frKI0H03scYfIjeB3beTKsVqSKllpxrWVNKeU1g4D2SY2YEhKpaDcBF12+IqIqs8yJl1ZqwrJYVwGPYApmWPlU6X5fn63K5XKdpWZZ1mueU8jQtLy++i0/DMAzjuBnHcbMZNpvNZrPdbsfN2NrrIcQQfPDBec+OHbZJMrSN8zdrskopZZ6neZlLKbWWtC5lzeAwjpv2TIECACkQNISeCZA574ZhKGm9XC5fPj8s61mhhMiOPVNZ5vXx6WJWkWi33yA774MPQYnWlE+ny+fPT1+fXpTIjKEmSWpQ+8G//3Dc7COS5HwDY6lGJIfAtej5dJWc+04Y7HhgYsHv2peqVqqmXJdUGoeuCtUqUcTzbZBPjgmbX7ot0ioirYPSGilIhHRrtwCAARs7Y2da7RVh3Sx72CxMgGiiZu34+irUBG1NpFyXOeVSESEEt5YQo7kAACiK5Hzf224vd0va7TrH36SKDs0c8diP7ZkCUS2ZDLab/bA5ns8vV8kzxBUQsAih73Mf42bTdX0g8D6O+7t3P/3h733sP39++Pjp6Xy6iqJpKetikkota84GGoPbjn6IoOmcrl2iishd9Gvy0/V6Op2bR2lZV0MiZMfs2QXHhEZaWCrUCdKJ67VzOaA5tY5/KxpTRQAmcExEHsDnQtdJLS+yheMxjJs9bwdHtKxFxK7EhpA0XZf1RWlT5z3U0XkEdY5vM5diyODYM3tRuK7p82n65eny9Xw6F6thoNiRiXd+7HmDZmte13m+LM/Pl5fny/W6rKvkbFIbS+m/uKFgs5O91TFqVkXn6/zl05fddrsbB0ewP+x223Ecuvfv7v/49384nS4Pzy+fPn35+edfHh6e0nVOy1JSTmptEM/oHALZK1YWwG5b9K1lioYA2kI6v6fWtCIQwHKp5/NFal7O8d2+85sA5tBQUZWYXVCgJcPlIifLYWf9fusP4w//QD/83e9DF9trEanX6zXnfDwep7npQK41FykFtcUwFasKBgTgkBrCCggQzLtwOBw+fPjxx3c/Hvb70McwDMzBs/OBxco6vaTl07T8ck2fi341monQYUDwACYGS1UTIZVS4DTDWowi9RtHzV4ruK4muWpRK0ZVFMyYyDmKVIumaylLeZvO/LW+BKxlFxJxCG3oCbXUdVmm6TJNc63VEfd9N4xjFZAlSTUxS1WmZY2XK4VQzUoeQnAORbWWlFLOuWpVMHAu9N1AhrxMS1UNIXrPh+M+xphLjV13f3/s+67WOs9TWpZSMrVF53sJzGt33N4MpQBMPA7D3eHw+w8fnHPPL0/LvJbLRVzo+v7D/bvdbldUwbCKFMnIzNFT9BYDE5KJpvV0OQn5w24XY4SSpfZm4ooX0FIKAAtXUgPWyuxiGI87YL7OyzwvqFBLqVJREQiQ2TlvrAIgosZNBtmAxgCmJuIAtv2w2R0C8Xa741crr6rmnE+n08vz8/l8vl4n1eqj3253448/7E+75Tqdnh5zyssyO2ZAYs/sGIm988xsZinlWmuIoe/6YRhU9HQ+lZLV1FSA3Bt0uF1t0AR0AxyJyDzP03Rd17Xv+3Hod9vNU3ea57XWKq9S4nYAFVVsDH5Hrbgn/ia6bJx0UNVS87wu62JBgSLuNQQaRv8yr//0T//x3/0P/+Of//Tpepm+PkwuviBF7/0Yw37s3u02JSt7JK+n83OFDFieOi/r4tlv9vebfdx0vY89AquYoWRZ15oNXLcZ9u9/ev/Hv3OInz9/XbL9058+/+lPv1ZgATRUdugjx973Q/Q+ApuBrdNaa0lp+ePlXKS+FTGhyZG6LnQdkbPmdJdaMog0OUFVMwQ0U0TwIYSu8yGyD4AemMmzF2zIGFcR6pZdVvPeJwWs4vVqLy/L54frl4fL6XQ2MyZW1ZKz1JpTmqbJPT8751wIIYS+74dxGMdxGIZW0Gy32+12N47j0Pcx+uDdbTAE3xp+ZqZV1nmZpvr58PX5fDr2u1KLaMm5lCIGCC1Jw7BR41WKam2B594HpFJE5rRep8VAehyCZyIsQkbeuzBsd5vdfhi3sR99F5LKdL0+Pj7++svnp8vp8P4Dor9cppqyZxjH8d0Px9jT0/PXeZ2BKIhDNh86Uy9Fpstc00pmqYdpgnWd5bt1TAVqhVx0zfW246jVCjlLdBQjY3BVhF8Pcjfyd8kqN14UOwdMCiBiaBWbQwioGOZqotkAmNl59i4QO0KqKmktKaW0rFIVgRyT99SaX80hVhVylesqOFfnso+eyAEAEHGI4x7fG9zf77z/ZuN3Tb/W9xsDRnSmYGKe+bDfvVz2nz53qeSE+5XYIBV2cTSoqjLXEob+3vlOBFIqCpxSSmlJ6yRmKlnL2rStTQziSAdfB84dzk4usgJyIDS+BcsYWBu3qYKJVKlqAUFRTWqtiNny1ZUrw7p1GBitgP+bQMsb1Zyd8xFdNPJVKK8SggBx1/XDbts5zPMMKbNIqsXWUq1MFz3N3bMIDCO1HIQ3lpUhkYFpTvl6WR6/PD29XNZ5MSOOHJgHpl3XHTbdAJbtmq5LTmWZ1nlO85xrgdqyfr8B7v7XLxNbl/Tw9THGEJhLXn/63U/H46EbunHsx02/2+02xwOy+/r4VFSnvKa0mJi06VRDPgNQO0yYto4MvH4HiNjiiAHIvp0nbyRhJFQDEVmzWC1c09aJODH0QMzeEaMRFoM5S5aKoB5t9OC6EDa7Ybd7O9q2xX2e5xYI3MinJtrgCiYqWbQ2tDoCARGpoSPXd8Nxf/zhww8//PDD/eFuGAbyPoTIRGgCOkF9svyzpJ9r+Vjqc62LVIGKpMxERlJNRKqJgVBJthZUYnYcenKORJxloCpYbpZCEG3AZhQgq7WorCL59Y27bZivPxDgtSMKrzZSa0OFWnPOy7KIagy9Y+77LhdJpZ0wrarMubhlxcsk0CgyPjo2q2nNy5pSaex79lHEHCAv86wKZsou9MMQuqGKxhDGzYYJ0rzknGotJnJbdbVp+PF10Wo90b/qjwuoskHHvI99EEtrZcNN7MM4YHBGLKrLur6cn1NVcq7t4uyc32wUbXl5BoP90PvQdTGY9gZmmVfJhsjkzVdNWWoFxBDCuNmgZ0FsOH6k1g00u2EAnaIIVkMAImSz1gQhNLBSitbqkfsYTaXrurdOTKn1+eX5y9cvl8t5WVcRCcGP47jdbbabMXj//OHpejqdn1/yugACEUbvAb0ZMXFjGZRaQMWHEHzohyHnDICtUdjOk/A3rRgFQ9W2/ppZrXldl2mahrH30W93m/1+l0qdLldJ0rQEN/YRMfnQus3ddr+/u3f+dfwK8OrO0lrKuuTz6bLKZZ/1w4cfogMp8+n8+OunX375+PHpfJ2miks1zgaOmfvgj9uxVPDEwggO1nWtVwnPOMeApW6GzfYuhG70cXA+qgCYGKkiCbCP8fDup+OHP3Sbo5asFIBD6Da+30o1qSJWUqkgOZpijBgcEYhIrjmtaVnXKWV5M4+0qTCSKpQqgGAiTT/c1O5pTbUWMGgEyBZO3fV9HMZuGNh7IEI0IPOeEEJ0PWBHGFkOBBlAqxDk6zVfn8/Xl5fTdZobVfmtqd9CqejV5dXGjiH6LnZd3w3DuN1sNtvdbrfdbrfbzWYYhn7oQ4iOOfhO5A2oCFJ0ncuc18fH54eHp7v+YCDkUG7R4nDTu97G92pmiEjkmcWQyft+M+6OBwpMRLvDgdnnlImmjVkX3d37d/u7+zhsOEbybLnkMs3zy/V8WqblcERyLFJLzUQkVkUkJblcpmVZfB+ZWKo5vnk9HRqQRU/eQ2s4v3X7DKCK5aJLEqCaC6SMMVD00jNWT2pO1LwoY4vtAzUjQFFlZseO4BZjY4pq1sS2pppqTbnmWgCMuWUl+RbkDGa1asllmdM8rbUIEXnmFnFdiuUiWSyJLVlLNQUBrOwLETOzCz503oDABw7xe1yMI2LnfOjGImTomwikD/H+sD1fd97HtfTF3a+uqzKB64ads+np4fEXH6/bf/UBOXz6/PV0ujQTyjw9AczLfM1pBQBRyNLm8QqWvE4bzPcx730qVXNxVQOhDmMHoN47JlLVlG8QpJw9qJHkqnOtV5ap02UItg/OkS3V2L7PgmpqD0J0xIFD57redwOFqAsrIAJGx4eh04gKq7fSzcuUlzStsKZc7Rzj4yJ4OOwPO/bOqqII3dhnRUXXuV5fLuevj+tlDqBbJoXag38X/IdhfLfbRrNzlgtPNzECGCGqqQqY/ZX5CH7729+KBw0BOCd5eniuOV9P569fvvyrf3j88acf7u4Om9126Ht2/jhu5sPhOI7PwSem2ko+NCYgg3b7oYKhCtjta7zeyk0I2WZO3+Fc7buEUEO8ZcKYqKRSVxCq6LsYegphVUhVlqwrCREAyaleQ++2G5H2PLeFXnVd15eX06+fP3769PF8OpVciJDZtxyhNtRp2DNBMwZk6rr+w7sffvf73//dH//ueLzz5JzzzNGRY1AplzT9UpY/S/5nsC+OZsaiGeriZDEACJ2J0xVyhczkAZyyu2VsoFFQZAJyCOQMBWo1NMktpENBQZWzSFGoAPqb3eutBWoGLfvaWh57A7sSGWgj5OaScxEAh0g++L4P68qmRa02qjOliksSolxKv4a+Cwi2LDmlknJpwlOfBQyROKUkBlVr6Ddx3AUXFcB5djGYNIFkQbupXuzmG30FzLbrt6MxkXo5n58eH7c+3O12wbu+H9VJcHGzGfvdLmxH13fEfJmmn3/Bh8u0ABURTdl7v9/tKtg1pTXlejiQD10IBFBNkonWAuy2Q8Si8/mSZm3QhFpKO2KEGKGKZ8dEZlakmhkS3xwjCORYG5feDBwpWBuWSS6oDbnLb59KTumXX//yl1/+7EPohvGw323GzRA77xkMtNc//PHvVXSZJlDJeQUEFztDV4uZQq5VTaUWJuyDZ++rSsp1zSUXUaN2NsKb5/b2KCFSmwuZvZEZodRyOp26Pu6Ph+1u8/7H90qoWosUybchHxK5rus32zhuQt+72P34hz+GeCP2IkHw5B2Yasl1WerT4/nzly8//Hj9P/3b/7oneXn8/OnTL8/n56mmTJydViMVRARSXXIqRhzWoYsroDgyjxXrnBYi6F3od9v7H97f3b9DdKpQay1SWzuEfBh3d7/7479+/9MfioTLebrOGjeH/+a//T//3b/5r9eiL5fr18fPj88Pz6enVGtRCE27TagoLSBE7NvzYmBVdM3FppmyiIFKKWmt61rTnNZlXWc1DSE4cs53oetD1w/DOO4Pcbt1sQNElWRWmSG4LsKWeqr5vkjytqDkUitdKcvHac3TPK/L0ohThNT0pM1w8waqIQSQmuda1nW60Jn5wbfuT+z7YbPZbnfb7fYwbrbDOCLyv/k3r5lWCjXBOsu8rC/P518/frkbj++2fXRxrVClddvgFmKrglaZkNmz64EsKUKI7//u93G7LblE393dv1exz58/Pj8/7Jax6/2Pv/twPO5C58GRMRgVs0l1As0gAkWZITpWj6WW0/ny8y+fEeF0ugDYjgbqosPoMRDhEOj+0Jny4eC3OxjG6nxH30XB1KprquikqiCqdxg9DIE0eo1U1XwWduiIHN1udCLyPnhidmzsDAma8AcQgFRlTXmZl2WeRDVGF6MbxsGHAMjWEj5ySUtOSyq5SFUkKlV0ATMUtSK2ZE1F1wq5mCiJqc4KWJCIfHbRA2Kt9ZKsfncec957H0Lsh1IR0ImRAbPngXEzjjF265rnwkMJQ/AxRG9Y6lrKJ9FFRNc5fUof1eq6LrmkUnNO12V+ybk4FwR4LagAjiwQxT4OkbQs55eH52suGPz4jnwfPNfg7TpJLTeluohWVUUJFEFYC5SVMXeexo764AEgteDG31zNPIfIzCGEru/GodsMkq/ksNSc11Vzjj7chdih7h3Pzs3gsrHWhKtcnk5OxDuNfaypSC1mVYUKqQjOl7xeJlvXHu2wHbgPxtRFfxzCvu8Gx5JyyaVROpg5xlgrlpIrCPz/eZmaFM2YLxeotYoKEdZaS84p5zQOzgdTmE6nPM9aCqq2O/Q1uw5e6TCmpmK3B+yvihh4HSR9/z6+iXwBqGEyRK1W0cqEFHwYhpH7jg016wolV6w113nWtdZEoVqdFnsLgTMw01LK0voxOasqEwNCG3kgEKEzBELyzrkQQxcOd3cfPvzw7v79brMdQseIhISmWNdakpTHvP5ay88mv5g+tw4OSqCKzgTRAhWh6lgAwHkCZQVkQAZUhRasXrXFyRAyAzMwm1NrDaE36BUi4d94kxSs5Ua0ddpUpZacck6IAQCYGYlFrJTKXNh5ZvbeeU+1Yq1qoNUgi62l4pqkyk0ObLCm9IqBFDWtooRIRLkUQ2pqtQBA3jOxc4zEUltScjGTN3HZrbMJt3aitYbBd0++vbrGns8nRjxstzF2few3w+b4/n7Ybf3Qc/TseBm6nvBwnr4saRLTEPtheLfdzSV9kpqWKc1TCTF4773vu76YJRFE2G421AJASgUAZFKpKC54zxtXnTc15xyApZxVFYnWdU05K8BNudX2IVMDKDmt87wuc05rNfkuz64VB4pM3Tjs9rvD4TgMoyduAZPeu/u7Q1nfP319n9N0Pp+KiI9RgUSlSq2lVqmqxTN5LwA5K8xLKqWqGN5mCvS3nZjXB+42UUIEVZ3neVqmYTuGEHf7TZVa1kWlTjZnVTAgIh9it9luj3f9Zutitzkcmb91YjwbgeS0FuIQYhcHBoCSZb1en7/+6Zef/+Vf/vn5/LJWKUzVuaoszSZsZqK2ZneZN1WYkSKPcUC0RpprX3R3PI7bbUoNg9rI0OCc65zbHg7vf/zpeP8jMZHPu+P7vwf8V//wr1UtK3z6+vV//g///p//M2XNqWRyHpqzBbEhQ8g54u+XZQQkBSwiiIJIolDFcq055TWllAsi+IDsfOz7Ydj0w9iPm2HchH4E7xXVtCCQJ3IUySIgmxLcKkq2ulbBnDWlmkutVZ2jJm7FW9vrhupBREK4jS5UpaiZ5aY4JWImH0LX9cM4jtt9P276fmDm/+6/+29vH7NaSjWtRapUKWtaixQfd10fLQl7bjfhq66zoAkR+9B3474oFSNzeHx/f7i79+Q61/Xd5nK+nk+nqw8GXT/GYRxD3wHBmtM1zdPydJ1eRNdxcFXNdFWBvidyfp7quqavn59VbFlm79n7MnQCaogCps7rbuuYebfzXa/s5LtVHVQhV12SGksRIULP2KR0DCqGrgqzEYN31HnyBIjmWA0QkJgZDFUMoInEAYlrrcu6pmVpPjvPeDvZtaNhS5YRRVNqWIcmfhLLRUuBrJqrpWKp6FosV1ODqlBbZi4KsXAFQ6y1zkm+91+6EELsun7oSwVjX5ELcqNQ+dj1Q3e9XC/nS0Q4/PRuO/QlqVvnbjiKuGVeHh++1JpLKTkntYqEpaR1mlTRuQ7ApVrEIHru+/HYDx2Wr8/X59Mvf/70yGHz9//m/3B//wOxl1Jfnp4eHp5e4weQ2aWShy7y4BwTow8Ou55iR+BABKsL0urB16UM8QaiBUIOLgzduB9r2jiYWNI8XR+/PjjG47vDdru53xyRqRRZpjSdrufH5+n8Mk/PLy9ziEuXOsmmaoQqiDJzKXCdqiyy8Xg87H78w4+7uy1575kCWlnz+eVyenx5eng6ny+m2HU9c0e05nwuRW5UgteO499c7ZD3Nn9VkWIm3vchBGSfiz6/XELsvPMiejmdS63Xy/Xr568//+nnl+fnWmpD82Izopi2yqUN6ERVDdRuVk947a++7Wdwa500jq+qmUKzMTVtNlQxQ47duNkeNvtjt9lCjNtc+fmM53xerpJT4N4XkCmVh/f2msqLhN75LsboYwiBvcecFcxESykqhtggnuRd6GM/bDeb/e7u/u79Dz+M261Uycu67XuHUPNS6zXlJ7UHpM+EzxnOYpNZIAseqSdyXoCK48wmfYdqLkRnhshaEEhJFsiraRUBMWNDr3orfEGViMgZAKE4UHXNi/XWurRvNcKrO6nNEcoyL8uyNG+Ad977gEgqmkvxhsSEiM41eCPAq31XVWsRMgAwUQXTkkqVamJqJlJb+iYRqVZgdo5JSi6ZfHXBAZAp1CI55ZyziKjezPSvFey3b/KbjqddRBydODynla5XI2bnd/v98cP73/3x9+NmLLVh63Tcjr9//8M5yz89PD5PCZhD8JvOP15ePOia1vly7ZyHceNC6GMPzIiMCLvtVnNeLxNU2Qy9ga1SoZRxu2ff2daQyHuPaCVnA0Xi0/l0na7zvAAgEBkimqCZmtVc1mm+nE/dZRTCXJK+ji1CDD/++JOx78cxxODZqemSxbSSVUTpOzzsux8+3K1pErRpSYjexMSkVFnXVKUgigiaKiFI0ZxrLRURCcjA3mxIt+kh3OaIdqsQG1oIAUxEcsrzNAFAF9zdYWulOIQvtV5LVTDPFILvh3F/OHabbQUgdt+xVIBQTfJ0vQwu3t+/2/TDpvPe43R+OZ0e/qd/+sd/+eXn59M5GwiSeSIIJiY5qwoRZpHH03nJ8bDtD7thvx8dQ1rWPvR3794f333oN1tyXtZcG6iJkL3n4LrQHe6P++NxdzgAcAj92IVa1sBIjsC5P/35z0B1Lcsq+Xq99kPPTCLZTIjAB2cQYxfewDCI5FxwIRIHH7rY9aa6MFsts1mt0ri9gOSCH8fNZr8fxk0/bkLXuxDBByMlRUPxSGiUUy15TctcJRnXlPN6XZbLlKYkqaK10Af4rgv59uBqq2laKcpIRqgqBmgIaKBVVlnTmi/XKz+/EDs1dY6n6f/+uvHruq45Jx9pGPwwhmEM/RiHcfC9hr5nB9Y06iqqhVHJ+djvtsf3Ff1lWRRks9uO/eZ+f2Sl8/Pl+cvDfLlczpeqCRlyKbkUQJ2X09fHn8/Xxyyz1PLhw7Ebl8tytbruj7tRB9R6Oafz45JSzTmHyJ6vfaDtntSKqhDisI1d8MNAxLXIkvPynbbP1mJzUiXtTINnx6iAVWmpWLVlzQM4CAG2PfQew82GXUzUpDIxEotBriYKCk3hlFUqihJCXsHUSjbnQ6MnMCERdt5R74NDkWoKuagqrEWuSedVctZSQdQE0LD9DKKoqmhq1RR0TXVd6/eTcUcE7KjrYypmQEpU4XaydSGM43jpLmnJ87IaMvuokGO/bnbzutZ1Wef5yzLPYhqCbx5zlUpILvi+H7M5W665CjMih37TB8jTPF/nMq2VNc/zGsNVAc+ny/l8vl6vKYuYETlzQlSAg8dhDBQRh8BjR8EDgoAKkRH97eFIARQJXHDd0I3bTZ1HmUOZlsv50uS1ovUdgGfqh76Prjf2VXSayhWueU05L12FHKmx56FtJ5ayzotW5cDdfvA/HMa7+z15Z2ayrM/X6/Xl5fHr49Pj0+Uy56Qizc/cVMrEQKogrT3+v+FCMGYMMYSuJ2YxvEwLP70g0nWaHGNK6fT88vL88vJ4SssC0BTkhgCMaMRtuxIT+G5P+23h8ttH3b77+fY9tN2brM2dOPg4hm5EF9EFjv3g7WhotAAs01Ssis15TZf14Xeay9sXyiWt65ryWqU6dt57KSI3V4oxsUMfOA79uN1ut4f97njYH/e73W6Ig0cCsXVaQEpOF5Uz8ZndiwtXwEW1NvRkLWJSCDk4ICJiNWTAQQyAoVYjVkYlc6igGWsB1aot9VjJilk1E2ihEoQEhsAIjPBb+9tt2K1mb4HiaqXWabpeL31wztSc8yFExx4Ba61qEMCbGRE551qwNBM1oq6qVkUQaEfEUqo2C4BBKz0NMhEBCRFQY1PeDOGOkVVqySWllHJ6JcS0cUej8Lx9z/AttuvtDiOsYHMptK5I7JwbN5tFJZmGdr+o1JwD8d1+d+CI/XBaEiKhKdSUGD1oLelyORGimsW+Z+cZaez75ns2Audd8N63tCSpYOaJibmAKpA0KJ8jRmbHPjTftwMDBWNEZEfeq0jSmpfl5ekRe8bgp8P9my2Z2e92h1TNxUCEVqWIlCJgxWNlWbFMXK+9t6HzPgRIUqrlLKVIrrWUUrUwGxhWNTCrSaqIKRC0+MxXDMF3Xcz2KdxSIgmIOcTQ9X3Xdb7JEGolMkbbbTqoO0lrZC61cojMhITee2JOy7Ks346WbYsl07TOOaftsOmPO0cfak3TfP769Pif/+Vf/vL5yzWnOUtSFERmctjCrNp02OacFTRG2kHsO7/powz92O9++OF3x7v3PvaG2PqJrTpTA2aM0fd9iNF579S4w653R8LqHIJHAJfSZb8f7o87gz9M8wIIKa3n83PR6h1757yzrvsNu0vNACnE2EY1CDhFj2ApLTXnahUArQXa201+1AQ0ZUllWYvVUhMx9DEysOQ1LesynURWdlCqlLVYSizqDJsB5vWMaP+FzpmCgbZGKxMiMzThqr7V92pVk5Sqmko+n5/rK3mcmPqx2x/GOLr7+81mG2LPFBAdEAI6VSiqGYAB1KTRNL0LXb89JMVVDajErhu34+6wT9fl6eHhL3/+08dffjldz/22Y/ZNc1ZKvl6vjw+PL+cHRQFn+/0QBtLnZ4Xc9WJCMdiEWlItiyISI4NW1QSQzbTW5JyLset750MLUs+lfssZNAARqDdvqDWfcypQRedUPUJ0LjhiQGYqgo6x2ZSgydKbopGsVJ3Xmkt7BgxMEcEhKqBUVatFVu/EeR+C5+C8I+w8kyFaTlprVdE2SJqzTklrsZsUgQiwyaTQtSrIMXufa0358ldLmVPJBBo6F0uoCoBUDEyM0Zxz+91uOl+f1+eU65Jqqj3zthvt8A4vp/N0Wi6X+Xy+xi784Q8/jeOQcxLhGIOP/fZwP2V4uCRZpVQVRXLROxfH/eE90+adGgHgw+Pj5TJdr/M8LyKypqRAQx88Y4RpgGXv5K4PQ7DOmadCKKrFbA2QPHyzjSECoCIZkBFiCL4bumEzptNwUVqnktflernmkuY8XZbT3Xl32G9iCG2PzMuL5CuVTJrlQlrEeQYEES25zmtZkyxZkWM/grNCmrQsIrymcnl+fvj69Onjl69fnl+er9dprcVKNVVMuZqJ90zsVS0nrCKvBII3Ke1fa2KI0DuOIXR91/UDEiPimuTx6bwuqQvsCLSWeVrSukpOqNqyS9RuPg/wCAYiUkquRQwRWsH82u/52yLmFXnX5BRwGyK3gBFCal6x0Bn5OUnClQU5hu125+MQ/fzsrufH8/Xykl6erg9PUm9FTCnl+eX589dPX5++XucLE8XQLXU1qIAEZAbIwY/D5ng43r+73x53m+3Y90OkECmO/Sg5PX76PJ2+5PQY+3T3IfpBkUQFRELNca26ppxrreY9d951ziGyC5GT1HmZSkqazApRQSdkgARcq5badMZYK5qY1iY1JXBoaIJWUSvqW/fiO5fPb5ocJefz5RxDjLELjpl8DF0MkditOUOpdlP5kfehYXEctxiuZi1HNVIgg9vI71WZiM2ByGwtZfEWCMnsvY8+EGIpKae0LMu6JrkhTb55qODtp9cO0jfZlZmoFKmMfi3FLhcRFcBF7VLL/fGwG3pGyNNUctluD5tD+MP7d79jRsC0TKeHLw+gDqFKeTk/p5LXmodhdD7Erh82GyOc1jkvi6Ixk60JVCIRAJvoMq+ny7SWYtSgKeAchxDSkpz3XT/UUtQUiZg5EkstMl1zSo+fPq2y8tD94XD/do8RoufgOJiYiFjV11xRdVQgXebHX6anr/VytryAaik6z6XJp0spYhVQEYEJGAAUDVGBzBRvZj40hKZdRKTbG2mmYo2DwI5CjON2uz8c371/t99tnGcVuZ6vIhKc2296/9P7db9b1zVVzaAlLTmnbPrl4enusC/lW9ZYcI6RJOe0XNcuDn0Yd33K8Hx++fLy8uXl9PXlMlfJAgKEzI7VsTl3k+xXBQGtWpd1XRZX8tDthrt374/HH493v++HLaDd8BaMqKxac8lkFkkQq2gqZVEjqxVlJshQxZJWg/PLl2V6GTr3f/y3/waAzpfLly+f83qSIjE6pqBKQ+/fGB6qmnISkb7vj8fD4XjH7OZpiMGpVgDVk5aSq8iypOeXswpWgyzgiiTR03S9LPOaVh/c/d3d0HUOIKdpuj5qXUJAM6eZg8no3BC8JyCQ2/jvrWNmbbm7ccFVK5gimnNu6Doifs2iRee463tDmEpKUoLSMAZ2t9cSY/j9Hz48X/4YOvzpx/vtvmNvYjlVK1WEsC8T+wjU39yfCiqG5GK/GRWKlDUtjKQq67p++fL5v/9///f//v/zP3769Imj/4fjP+wOx3Gz9QGW9ZzSklNJqyTJfqC7Y9ftB+rTWjLhmkXZpeDFsWH0w9APGzdstIvo2MykFEGgRjgzE1GrRWuRv2pOEYJnbK2H2mr6arVA58P7rYtb1wfuAiCZAFYkInIe2ZFjbB+xaK1VUyqiiogt8OHN/QpmKoooznH7WswcGFeynEotep3ynHUxWqulArmitvk+ITIje3Lese+GftztfewAcLpepZhn/z1F3dU0q6hjCIGg6C0xQYHUmGgcx3EcT0+nUnVey3WtjhGtC8PdoF7Ky3VKy7KoCbELIeScADDGfrPbH4/3Iev4PM1LKbmsS8pFOsc+DJud73Y+ZblO8/lyfnh4nOYVkKpCVSDm2I+bCAOuhyDHUI+R+4iBDLSqlqpVtLIJg/5NsW0ARgTOuxhj7PsQOkSvRqrWjCNpna8XY6xouY+RAWvKWhfUTCZW63pZMaFGIoRaLeU6L3nJWhRcMOh7qEuazhNLBZrm9Pz49PJ0TjkxYdd5qTpryaVZ/JQd+eB8iCJmsFqy2/ntFXL9ekb4DWWVEJAAiQBZgVRUak25lJQ8AaOCVMlVVVCBDW6yl5scF28z4QZyYqPXg8ZfdVn/+p17bZK/VjCERNxiMocx9qMLHZCvirUYgniwnshzGHutudZpygHLDXH09t+a1JpympZpmqdIwarVIiIKgMQcXBiGYbffHe+Pdx/udofdMPTRR1+9Q6eil/P1l19/eXn4i9Wn/QH63V0cEKBUsZIxJ8pFStZSpErLriIAxz5QDCiprFOa1BJABi/2+iiriFlRrSoFVEAETY0Im7FXWRVUUPW3bNg3CD02EwSAgZVar9dr9HEcR+haHGv0PjoXqDbQmgJA68QAgBlwa7EyY6MsEN9y6ZAM9LWwBTNAAmpKL+KbDD+2K1iVpZR1XRscpWVhNV9S68R8IzG0Tsz3t5g1+VlhZQNJVdKcUi7XeTldr+/u7366v+uDL8syh5nJH9bUHQ5x6JldJN1Gvxv77WaMMaRclrTGnJCZahFTDs4QrvO15hwYXfBYRasBgKjlVGbRyzTNKQmoY4zRdTE2OYH3vuu6jFilKgAykvfAwMlJKefHpyUv1MeXD7+vr92+hohDQBUxE6sCZg6QTCmv5fK8fPl1fvxapmxz0pSsFClVq7xiqwVBuXEUzBmYFL2d6ImYCJFewR/y9sSCKjKF4EIMw9jvd5u7u8P93WG32/V9BNA152W+1lqx62OM98e97S2ldJnWx+ukNa/rZDWKVPj+Y2nPS1onnhCxSB3Gro9uLevHx6dPD4/P5+t1ScVQjIDRI0bGjsm7yICEXhTmKmrqGUGkLLPmbefCth/HYeNCl/JSq+jrvSEia04BIUaOAdb5dEKoSqgaOKMlLbNIrgrPDx+X64vUlXHHTMFh8Bg9SeA+OCIsRbhx49o7ZFpKKaWAKhNF730ICJKWIcRI7AHIxCrIamuTWVS1PmWOXRY5XafTdDlfJkB4enzebcbt0KHlZXnRurQ2H2i0UsZI99uuyhA85Cq5SinNDNZ6Qt9p8xEd0xDosBl+uDvG4FstKyLeu+1mNILzOi9SK9IffriLr64x7/nu3e6n39/5aPfHIXYgluaES4I5zTFuXBzJxxg9ERtC63QhUwixl5q6oJJKqes6vyh8fvj6y6dfPz98mdblsOm3h7v93X3sB7O1lFRyAiAVup4zFxj2buhos3O+yjpXIul7sn1kYJUQYwgd+rASaykFnWcKzNEMc66qSTWbEdFvNn4mdNyS6dp5xnLRVLQUM6XUiYpGjB0RmEq1SzUmGzocAAEpOnKErMDsiFqaDrbY8xvIA7RlABmSAd98w4QALT8ebpDVFgdvqIYKbMyIBGitBAsh9sNm3G7H/YGdL6mkZb0d9b67XJrPVYGAPGutYtba6ASGTNjH2HcdO19zndfCl0W1Ora+i/0mjP2WOXx9eCxVVK1Um+YEYP2wGYft3fGuL/bwfJ2m9fHxctY0HbpIPRLHLna+w6WcLsu8lmnJcyrMXoyAfeiG7f54N9BW5C6k+77bB/bYrKkmAmAkSihgbw7X15UMEcms7b3qXPDB+Ui+87EPrEOHd3fbcdOxZ1PLqZKiJ9RSCayxG7PYNa0raPTGCGZYqs5JsoKSYwcgtazzy+OX6Rqq4bTmp+dzKbbZbrbbfc31cpmfHk5Pz5dalkoQQ2MljLXeQD9gAPY21LsFiqB991qssdprLtWoVLsl5zoCAjJU0opaUYxuYDYQENVbCpKivJXDt4hVIrRvUt/vGwnwrT3e1mdrYVuGBISELno3jt1uv9tst853QA7JGzpRlkXzsiCBOYh93N7tyEGMvHl3pFcfPzONfTf00cCWJS0lQdVaBcwYOXi/2Wz2u/3+cNgf99v9brffD13fuRDEpzk/Pj3+/Muf//Ff/tPl+ddIs6jfv4cYQwiplrossixSq0nBWjAXWEplrDG60IM3yAWWi6xXhcom5IGBzKhUq6ItmbidGEwFAQgMUREUFE2soSK+Q169vnVqRm9voEGtdZ4mz3672TIwITkOzkXvQggiUr+PAGxdljZR8s6zd8jMREAMgEgVSNuYquWDYxupMJHz5IMPcRjGYRii90lKTsu6zrUWUwFoc2v75jJ7u6fePve3asxMSi1LguYxyHWGy+VyeXl6fnp6en68m3/4sB9GMvOML0/PwzgMh/2w3fTDuNlujtvtu/fvP3z48PRyejmdAcnHwMxV6nWesmQFmGtipNj3oe8csaQ8pzWXmmy5iq61FBEBAeTYwAjsGM2HKC0tI0HJSfL/l7H/bJLrWNJ1QRchlkhVCgDF7u59Wpy55368Nv//D4zZmM0c0eJsxU2CAApVlWKpEO4+H1YWAHa32Z1lJI0EaURWZkaEh/v7Pq8gEaoRk9Q6H8/jMKjn44+fvylizExMV0aDqBQGaHxABR3G8vQ8fXqcnz/XrDoUHRMmDcTknQgmXTneGl3oYvTma5Kcy/r+I7GP0fvAgYnQQGuVnFOtYirBh81mc7g5PDzc3xx2fd+00ccr8lTXWiSnRUsm2N3c33dNW0WOl7GAnZNM08At7G/2d/d3/vWwFJXLMDwfj6nKMM/w+dF517ZNrvnT0+ePj0+XeVEDAmYmdhQdbzxvG7+Lvos+hmhGU9Yll1wym5RpPr8cL91h00/dNiOFUiVXqWJSrVbNuc5LcsEfdn3f+svx4/HpU6nmmLe9R10ux885LYD0+fFpupxOz59fXp4VUFRTXpyjTdc3wamUPCVJyb7hkdQqOS3jOPTDZek6sCscJueSUim51iyGamxapeQyLym2l9C2yKxmUEuahvPl8vNPP3V9++N377Z9q1qk1JJmU3DktWLfwA9vN7udOw3T03k4nqezLKkarCfrq0MeyQhc3/IPN93fvbv5r3/7465vpzmlXGqpjmnbt0B2XuZFVV388R9+t2mvrjEiatuw2cWmsW5DRmlK51zGvCzPp89Ns+PQ+9gH33smIFoXI4ERUWX0CKQlz2PKAnh5fHlOWvymudn0D9999+Z3P+4f3niu0zQuaa61hNA6ai+n53yaIORb8Nt7bjznVJ2nsG+329YeupppSUlkAYYqMgxLB36/P8QmllLmaUjL4Jxsd6Ft+y/uJEL0nry75mGowroTEloM5B0USaUQS9doVIChlE/jWKzuWrff+EPvNug3jfOBNj0Q8byUqobEhqS2EgmuNy5kZ+wEuSpiXbm/6hibwNI6Q9RKjoEdsjJwBACTjGCriWS3P7SbrWNfUp3Ow3A8zeNQ02LfKHvd+fQC5NB1nqmgACiprmY0QoghxKbx3q9yCzVi3wDqnEqC2jAYEjonpY5LBk5FIMam2xy2u9tNt3FVb/a742kYzsdal9PpAjUxI5AX0mHKL5f5PKUp65wVsAKyoUfXKAVg8q6JDfkQyK/pn1oEqpAiJ7DFcjb+T0NiVmGDcy40TbPZ9IeD1gkWa7200TfBU3De+TWbDBEcowYOnp2jBSCljCLi0SEaYBVLWQWJIznHMbJ3oJKXuSxF51SlVnah7VvHvqZipvM4tbNLXXCqvmli13bdNmVZUiolq35p7Jv+tqj85kewL/m5Ami2pklcDympwqqMqwWOGVVFzAwUddVC6Jf7+Nez83qa/d89awdHkQzQOdd1/W6/3R/2/aZDZjMmF4ydKotArQpoEFERqPENb10Td28e3Fdir0rNoGusRZfSrEUQwLFrQrvb7u5u725ub/f7w35/2B0Om82ub1oHpOOyLJfPz79++PTz49PH6fy8DdK3fjxydN4FEVmGUUomVJBsaYGUTarxCiQmUKelqhTUglbRBFZeClg1eI3ivPoqbG13MJJzjITyhV30H94xfa1j8HXurqpZJOVUS1UF71cdSAghGplIudqwpa6BggBgtgZtOF5btERAbFjZXbWBaAao109ubbC6EJuuafsYGyYqeZmmyzCc52n8ooa5EuJeNTGm1yDrtRPzmx9FQUutqYAaKVipYAopp3mZpnkZ5zIt+80mOOcI2NQHbjf97rA/PNy/efsuhIg+Hm7u370bYmxLqSEEVU3LUlVLdoaYpBAhiwo5X1SLzFVm1WSQAY2IQ3CE3pELft1h10RYA/PBG0CRWkWyVFQDRALQIjXlWet0PEv92sNQVdWqVsEqgjBiRAMp4zik83m5jGmYalZYJKp2jISYCbJqBQ2E3rtt17YhasFaVEzFDL0Lsek3u7Zt2DtDKyWllJWB1Zzzfd8dDofb29uH+9u+7xwZmcrafcjLOI7Lskgtnp1z3G/7bd/XUopI8OyKIFqMfnd7e3N786WIUdUxLS/TkAyQ5pwLIAQfqpTz5TKOIxh2sfHXzCfXBLeNftv6fRe76JvgETAVW3KdUqklY01kDMgGpAZiJgIiuE5RS9FaVQFC8LvdJjj8+eefn59PORUf/O3NjqAcnx5zWhz702VYxss8j+dxWmpFJmZi4raLXWDJyzIRwdeaX1WnVM7j4s8XH9sQQrO0taThckkpm4F3nvzaJDQpVc1EtUqpUkLThKbZNKFse6k1LcsyTefzBU2DZ1OulaWUbAsCENN2E3aHzU2q/cvFh5dUP6cxvW59tALFHPOmad7edv/w/eEfvr/5h+9vt32zZCkiIuoIm8AAOi5tUlUKNzd9417JsAjE4AO2HcUIgCWVccwyDpfHp1+7bri7/3F/825vhSiu+zSsfUE1yxOUGUqymmuVrLWCbe9uvmMOTX//5s3tu3ftbqN5UDOppeZcksyznE9lkcVvxfeh2Tcc1tk2ITM7TyGwg6xZkVxogCwl8wGda70Ly5xSSikviOB9DOGrVokIoqc2cvDMjHqFaQCtIimTKcmJcOMSq2+aiBUk0VLVaqlVRHxREwHPaIjsnAuwZp2agVwlEsTGjM7QAbECFTFTRRWtgmjeU2vBUDTDIhqdFWNjt6oUQavWNVuxSM5WJaU8D5dpuCzzmHP6Nh7OPT0+hdj2NyG4WEhByhWMQo7BHHPwvmkiIocYNrvdze1NWuY//fF/n54/OShpGRQdeHo6DXPRGJt+f3/z8MPh9t6F6Czt+u39zWGZp+H0/PJyvBwrM1XFS7ZhkXFO85yWCkullBOS7zdNRX+cMgHGLW9DXIInR6a1qk1WxdTIFXIT1BsL8g1/Bb95AAC9j327udvfl3fBl+lTwjKZVABtYmjaJgYXHHlCVHFsWrMbGRmqCagGiookgqtJA4lidJtN2B/aftuB86nqVGZk2u537BsfgxRJaUrLpJq9p/2hM2bXNL5tm24zT3m8DMs8rzooEFghlWtkKn4T/I4AjsAjMAATE3ug1fJhziqIghkBBMeeHZNb5VlILKpryqWoVJHXscKaPGCwpg8AXHEX1+nC69tFX2KUvr4U53m729zcHg43N13rTFNVa9ihC1px1ahWkTSXiiJs3LTb/e3u7ZsvRUwp5fnpcTxfdt0t3jVHe8w4mmkT42F3c3//9rvvvru9v+u6TdN1IXZt029iL8v8efz0+PzLp6e/HI/vcxpMwNTXZPPz87mqMBbEaoDmI5oUWcayFGNH7AgY1LTUUqvwVRFmRaRoLaJVRdYahJGQ0KGL10YYIUdmQ1LVKkIqKPIFtHydx32Rm6xF2utf11apc977GILE0MR1/QuraK2yGuiY3drCRXK8th98QHJAqEQgigAqFQyF11ppHVqx42bT7zfbvfehlHx6eT4+Px2Pn8fxUkrRtdMGa9miZgbXWhl/+7Lhtbg0raK5moEZ4aqXMpQq0zjXVOdh6pqmbZrgmEyY0Xk+3N/+IKIumm8AqN/f/u53+HA7DpfL8Xw+nU7TZRAV6ntispyWUoZyBDVnQOQgePVemYFcaB073zTRMZpW0FpKXeZlGC6m1ndt27lquuSUazGpZMBEwTPmPC+TzOmLydrM1ES1qhUC9Q4CgoNcy7xM0zwvWaAollJI9dBG1/C5wrCUXBfS3AfX9+3dfu/YXc5z1ZprVYAYm81hf3v/0G16JEh5OZ1OoNVRiLG5vbs77A/73bbv+iYGNJ2XWfKiNdeU0jLN0zhNi3fcdN12t9tst00Tp0EMxLQ6xs2m393e7B/uD4eDe+1citmk5ZSXSRGAS1WpKvWoKiiVEXZdF53rooshxODbELrgm+Aaz4GAQQls41Ciz31TqpZS2n6329+1mx16b+v9xNjUiUhapFaIsdlsN9vNxjT/8pc//emPf5qmyXt/d3/vHc2Xi9bqvU+lpHlRqSkvY0oVLMS43xxi223aoMWnPDXRf6ESVNXTlB9Pk/Fg5NUseF/SsixTSUv03Gy32jZlzTutAoAIiqZo4gk3TQjN7s2bN8M0f3z8PIyjqdRSuqbxTRNcSGmZp7GUbAZtF25ub29ds7tNrvn4MqTLXNbjGVdPoGp07ru723/84f7//P3D39x126ABq+88cEvOEypo1pIjh1JqrbWzQvClq6RVsmgm9s4DQi2Sxmk6XY7ny3NVG5dLzrOarDcEEFWoprXWeZkuaTxKHh1BCE6Ftzf73//Xf5SqTb9pN7tut0U0EzIAAii5vDyfPj+ezqdcQPuhjCM0F/PRlcyl8LyIyQRWpUDKlZ3r4p4Yp2UUcbaaP7UiSWywaSE26N3X84WR+sDb1vnGEXs1VIVcLRdZlpJzHavOiy0LDgv87i7EEG5D40zHNB+LppKHKZ0a30VuomNmYMdrn0HU1GSdVK6haWAGaAalarWKNYMWUCOmGAnZkZNqcllqMS1oqGhEIlhSGkoVKU3fN00npst8SfOwzHPO6VtdhPv4+WWz1WZ/FxwzQTUFVQIivQYfydoLAHWOd7vt3/zN76SWnGawejk9KYXt7QMRe++bvr853Ny/eXt4+KHfbj2rGG22+0OqUypgls51nnMuy5Tqy1ymbGKshsAtNRGpMnvX7TF22dyoeMFtBFRxMQOYlFLmGtdpb64ylbCp+2Jf8cP/7kFGDqHZbvZy6yAFnWRyPiI7R8zkmJ1zngITofN+bRcjUgB0spQmOAYoBdABE/kmbg/d/qY/3G5iGwXJlsqLEAixR6IitdZStRqqD6418EbovGuja5rQNAbgAzMzcSVCszXHF1BXWMHXV84EWw+HCNEbOzNPgA4JTESzQBUCcMxxhSGiU1Ag4iqiWqTWUq0WWpWHq2caEA0IrlC7q+QC4AtG3RDBFIxeixgiJGBq23Bzs7u7vzncHrpIZRkRFR0B2ao9LaKplElSAcGIHp339G1lKbVezsfL+aTuhtCvlwjHuN/u3j68e/fu+zfv3u1vDqt6iVxkcFLkcj5//PT+1w9/ffn8cTwd5bJAqgZBM9SlzFwXowSs6JjJHKSs06JzAR8VSL0BmdYsIllVFLSaZtVcIct6dkM1lBUgcc0jM7nSNsBWHsKKlvl2kgzXDer67ikAgK5udLpKO7e7HSFXsdh2TV4wUylcoaiuqFv0PjCzggKsySPeeY/kDFERMQgSmDASG6Ko8qrldeuAeBdCk3OexuHz48fT8Wkezikl+UYX/sWRBPbl76+mpn/XttSqWlVBV6zz2ntSNTNZqpScpnGMMXp2hGsSKk4i2PXVNWOFGBvLGkIXyTnknMtwuUgppZTqPRGVeZnneVxSVfXkQtu1bcMxGq8xjqHtNjeHPYJdzs/LcCmlrEQ7NGhicN67EBxolpVMvypfiBFRBKp8M0o21SpazAqArCLcqlrSWMpSRQtyJRYEdtQ3jsBpMQOx7KLHEHzbtl1s16QRQ3RNcOQ2h8PucOj3Wx9DlWJK6Dlg673f7rYPD292210bIyFKLfM8T+O5LBNIlZxTXrTWENvddnP/8PZwextCqKrTskzzXKQS+67rttvtZtM37Vf6sAIUhUVt9V6pQTWtpTJo58Omibuu7RvfeAqOA3PwLvAaz4wMhloJ1DEAUDCu3pUQm92+3900/Y5cUCBEh6hqKoKlAruwv93e3d0x8el0+fTh/fuf/5JS8t7nNIUQaipoGrwXA5WV4YZEsFaORbWuUSdE7AP7AF9JyvJ4HH759JIqzrlexil6Z1IQhMCY2Eevzl1B3axE5KPz3jMzgZlUAmvbGGMgosswrBlY3nvvHCOpWVqWVcPqY9NvDy525pfuNLv1ZciXOxuAqgPovT+07cNue7tpnI7MANFhCOQ9gEoRRXLkPGrR7OFrEbOydthxCMGxV7OalmEcxnmoWsXyOJ2H4bgsl0BBpYIK4Qr3m/MylmXQkpmjZ1fJN9SEZsPMoevIBUXQsgARIBpgLnI6D8fTZV6ykeQCKck8oipKdaXAMueSskha30ViJ+JFLSUgVlVg5r5vRKQUca5UWUqdzb5kWkH01EcXGkfeqWEV42KEIEUyQhVNJZcCYhRd2HcRDSJyUkpVBqnLXM8uN5G7zjfBO3ZMQGv7X02BVhkfESIjEhpArapSsVYydSs72RE6QyfZ6maxoqZVDIC9ZyRUpddRgw8BRRDgFfn+7doH9/Ony73FB3Dko+G8igBX14SqppSnaboMZ1BA0v1u8/u//XG32/74/ds//flP/+N//o/n5+cQQmiaJrT9ZnOzP+z2++12H4JjLYFCp2EnPGYA9LltzsfPl48fj3O+LJCUjDy7EJq28XHDjl3wPiI5M6yejuSXSh8v5sgIwVRyKVm0Gi1Zpjl36V2y+G3d8m3UBRggsQ+x2+481N6rTC9aFoCqBqVU75whofPeEXfYbbb7mze3t9On7ed0PvuaseZUwVyI+0O3320Pm3YTQ0QgKFUEc5uxDvOUSpG0nhGIELoG2bmlLikbUgjORcfuGmT7Cgq5MqLgOq8l/KYVEwjetfS7HrBRCKJOBbSYZcnTMpnUxnP0PrZNcH6dOlAMTrRWxVwMZjElFUAkAwWrryO2lcy1QqivHRhadcBIigYGyFe0PHOIYb/r7h/2D29vD4ddE6imIJKrFJGkZlXrnMu45CmnauByTLMNT8u7u+f8qldQlXkaTsfnjJKKzfNMoF3o9vvD99/9+Pbtd91uE2PbxC7ExoeYpuXp6dOHn/70pz/86+PHX+ZxLMfFXhIkASPonWGjBCnpVCxXIFaJWBTGAnOpZEmAQhPIoORUy1LyUkpNAlkwCZViqVoptvJZ2MhwBT2ZVEHTAoDgzBEieecdu2/LmPWY02usKppZVWWmEHy/3T28fXt3+3ZZchaMm13MRQBFQYuqCYBzzu22Ox+jmopqFTEkxw6J1UCBIXh0hICuVpq5lgKmzvsY27bbdN2GiJ+fn14+Pz49fpzGi0oBVYBrA/tVYoWrNviqj/kiiNFvnFZmIrpOTa9ksDULaq3rEA2sSK2zrtMs5zgGT2P68Ol5qvDxNHZN1xJ37DrPCNS1fd9t2D2llEuuoDpdpmmZZ6lCJN5ziOFw8F2XigBR03Q3N7fff/+d1LLM46UeS8o5pZKzqU3THBpFJue9rwVUoVZVWxPHCZiB8ZvFripSi0FRLbamYNcsy2Am5kl80NCAVDZzrSN0UpS5aYMDI+8jshPTlEuuRt7v2rZpu93NTew6ZEolXcYhlQzEm31/d3e3Pxy22633XnOZ53k8n6bLZZ4GyZnBUE2hdm1zf3f/9s3Dd9+97dtYc7pczp+fn56Pp6VU17W+aUIT3RcDzPUhBQ8YiQOtYb2qGF3LfNNvDn237ds2MEFFqevYEaGioiHpdWeh1TZdazJgF/q223Tbfei2wNEECVfpQhFFBdx2m//y+7+/v9ss8/zx46fnl+dpHpkZCVNeTBUBHNGVpMwcYthYT8EvIiKQUjqqSG2YLBkJ+y9p6SnX959e/vDXj+e5vJyHrgl949voNm3YtC15r4gKrMjEFBoOMfRdQ0Sl5KpyuZyXZU45udi0bQzBbzddrfWaoLHSnZwPxD40/fYmtL2QS3Veaq0KALT6I64dSFOTKssi86wpgXjn2HlS55SgaBWogIpuTbUGqBWpwhf3K1FoYtv1TdM712q1Zc7DMKSU276NbRzHy/Pzp9vdrQcjQTYm1wCtsUQitYgU4kDEbI0nj5HR0ZWSJ1VVAAnICXCqNs7zuEzVEpIpOBEsCZCuHaFlSvMkJaNz3W7bk4XxspSa5zQCRBFtmrjb9qrj85PO6eV8GU7nl/Lq5gMER9gE6lrH3lezXJTQQKEGX9SKghaZJX0eTFVu+vbQN43njY+u0pDTIKWAAYL3FD130beR28DerZJcc4zBU3DkHTOzmRZZDXXmETm4NX+XDYL3ndX9ArmUOWdQ59udbxw2bYxhe3vbbveu7adxSlNmPl7Z2d887uO5Ym+LhS23xpNhRjQwXP36IiJSVTV6f3vYvrk/PNztb2/2h13fbtqK+MuvH3MuBhh8bNredTvlZkg25cyaaymXxcaMk7jFgnBXuc/cV0aLAEbInnzkpvWx9bFlFxBYDUW1MJ8hDkKoxmjMBKa11iRSDZcqo9QX3Vdj+M+eqyCSGHwg6DxUoQK9g5pMsqkyYRNjiD5E3zQ+xhBccK5ZxtJu7o+fHodPH8oyxi40293hu+/397eb/cYHVk255LSkqhONCiRIyCDskJgYUaosmKrMkLJqVa0mVVlWcwi9litfWLCEKF8o/QAAEB19f9O+3LXFUSHJkJPKLEZWC4ghOOfYBWS3KsgA1qqD0YESVakoFYWQ8OpWeq3v8BXRC/Aa4/JN8bTWWWsdE5vm5mb3w+++/4d//C/fvX1wjKY5OyuFnHCtBVLJRastuSw5ZTUkJEmyzMtwPEupr0WM5pLG6TLUmipoyX3b9Lvd7cObh7dvb+8fXAzMDEBpycuUjk9PP//xj+//8qcPP78fzyen1hRi9WgQs/lkOXtJtExSsmo1YF2qZTSxlU9Qc851ycGYUcBUSJBEAYpxNSzKpUIuWhQMza363KomatVWGCGSrqzi1e7z7x67ssXXn85E1LHrN5vDzc3+cNv2mymdqgGy59BwrVyVnQFQCNA0cb8/+NgUk1IllyxmjhwiKqwauwhgBFhzJqQ0TVoqI3sfnA/MLFIvp9PLy9NwOZc0E60BWGsTZn15sLI34Ott5epS+k/WyVXV8zoVW71ZdP2lV3EVAJA3M0MbF3l6KYB3se26vus2vQ9sUjIQr/0kMkRkXptVAmjM6DyGxrV9s903m20UUQAm50JgHwxMzETXqCckQDHNuZDj2PQUnNRqVdaDSRGA0DG7b2IHVDXnNI2XmkcrM+aFS3aSraQ8zSnnLLWAKRGikSPP1DIhQ/BejRF9NUgpV0PXtEx9bGNs26ZrjSiVtOQiZt7Htm13u/39w/12uw0xmuqc8rws5/NlvFxKTqDW+BCCC552++3D23d393f9doemczqfLpfjeRhTxtDGzb7d7NuuXztz3y5/MRIjBXSrhQTRMfXO77p2v+m2fdt4Ai1Ws9WyQi1X8ZYZmoECrPlzUgo7apq27bdNt/WxLYpa1cy0as5FREJsdoebt2/fNRH+8r9/+vmnny7nk2ptmui9AzA1YSJDNAQDREYProMGnXNF5lymJU9S1ZQIS63zVwMgiOhlmj8fByNelhQcdtH1Xdz13X6Tu9hG79BIqhIRkiPfhG7LhDqNeZlzTrkUI4yqPjaIxAgCUGpdgQXOBx8FqijQlAqehiT28enl6XheStWvd9qrPU9Fcr5iIa92B2ZkAmZDMGCkVZKBpgwe4JuzRVWXvFzG2ZErgdAozbkWAUDno5p9fnpCjft+68H62LHfICGCRx/QOVsB5aKGRsiOI7JHR0BqVuFVvFiqjXO5jMu4LFkzR/Ath9Y57xEJlFe5iJSSc80LQoiMzpEvpUgxrViSDsM4T3G/23vfMHswLFJyLd+OYBQRiJznEF1A8G51yoCqq2a5aq5SVcacRCXVXE0ObdN515BXNGQGggKQDdJSL8MY2PomtI0L0bWNB3LsWVRFtZqCmqiJISEboSLLK12fAL3HNmoXtHEVygqoJOec83Fl5UmtUjKaODLP6Pg33XH3lEJT4mjNnlrlDpyArtc4EkMzYOaubW7227/7mx9//OFNDAhgTePfvX1jPm7uP/zrv/7h6ellSMtlltMkzG5VY3gTURmzHofx09PzOJyxziVjjTf+cOjAxTViGZmIwQfzUdgDsBnqOtBTt1q4HJNnNJVMOZsKcPKS6yLs7dsxDHyxByMgMLChEZhCEG1MGt/gLjpPYCJr15kdx8BNG/uub9quiV0ttLmZf25+fnyeLqo3+9vt99+9/du/vXv70G1aRlvmYR6HaZyWTEVHMd/1bdPEzbZjhznl4XzJ6bmKpJxLlaLqq4aOpMhau1xDPAhoxQsT/rtYgib4v32zl+/3lwxDkUsaRwE2JEZqScCDj+BcUatQ0WTVUQF5F5wHTDlBISVURCM0oPUIM/s6S7rSYtYb9xdfNRE5t4aPbHebH//md//Hf/tv/9f/8/+6PeyfHz++PD+meSTifrMBk8swiArPSpjRMhk5LSIg81jn6RuwkimUVKfjOKWCnv0u7m7evn3z/Y+H+/vN4cDEtco0TafT+eXz84eff/np3/73y6dPZZo9QBfixoduRy4t1XKddTxRSU4WZLGOELmmuhRW57jxVKtQznIeofHt1qF3np2hjVmNwJANyJRUtYiIaUUxgZrFipFehd7oAEERxFKppf4GcL/WMKsCZQ1lqOIad3tzd3/30La9AgzjdB4uuYgiEXsOTTSiRmMMXdtttxt0bhbFIliLmq7VLBIxg3NIiGi6TJOJSimlVLgm9UCpNaU0DpdlHFXq6z6Apq8tly+f7FcZz5dS9d/Tv4iImNHodYSm186NGlw7dKu9G9Zwk5yrwlSstrv+/u7wD3//9z/ev3NGT58/Pn7+tFxOUynFwHwIfYcAi0pxDhDR+Rj7fnPousN2f/CNqyLny7iU/PzyUnJaUlIA5wM0taYm52xghti0DRJJLpKyrF9UQvTsvFv1118OmGkan18+Ty+f6uUFp4HKwmAr5ijnvMxDyUutlZjIgBCDZyAE0KXYXEqqlsXMx+2mdU0M0RtozmmZljnNgNxvdtvt4bC/6Tebtm3XTLtcl1IkLXmel5QKAMfY9Lv9pu83fXPY9bd3u9iGOaW8TKfT+XQZhzlV8u3+ze7hu83tfbfZh9h4dvi6j6maiJUqDsGRwjrvBENllVJKLoUdMKE6RHLOAFWrqtoqwVBTM4K1iKmNb5tu0+0Ovu0pBEzVVCSXNM/DMCji/vbm9v5us90uw8sf//SHP/zvf52mwXtu2xhis5piARHQdG3VIRJScMGQEauojZRzrSWBmk0pv4yLyDcaMoAqMs2LqhCaZwxnbkLommbTdrvNdo3cisEXYo7QgQNic8W4CJCYplLUxnmZRTSvPHcwH2Lfb6L3hlTH6Xi6zE8no+cxlc8vp6fjZZhmNQBAepW5g5mYLbUuqoJszEooiIoExD54R6B10Zp1TSXxAbz7wrrMuXz4+OlPf/rpc7fbNdtt13l2TJ6IEWwc88vn03Bctk0TicKb77tmh84TOI/ga0IXNRWrIiDgYWWhAOAVyY1ogEV1XtLnp/Pj82VcMjjsDr7bhd1t7Pe+aYkQ86ImCrby+4nJGMmzc+SYGAC1lk8fn8ySY+s6KgUQvXPR+fD1O2aYDZOhILJbcc1KkMnADIvostBCjGiilqqcUqonWIq83W720T90AaPLXVwcz6LHl+Onv/ySxksbXd+Hftttdu1OsBqKWWMWzBBRjdCRZ2IEMRVdOYu0Ipg8uy76fVsJZMmnJTmkmFQrGLwcc5pLnmuZGJfoJXxdLtcixocJfz1m12QsRBgcKZohMpLzIW76/v7u9uH+5ofv3z7cH4JDgILAiFiB5mLPw/zh+SxVEMi5CxjmUkHFgSrAojgs5XiZcipkiuaEvTIjRbqG2CIYKnFFB+AA2BCNoAJWIFMyYrNV7wXVnIIBOkAxqAb/WRvmy9YO6ySTCT2KVx8cYmx965lgxcsZIDgmjoF8S6Gj2PsYOt60Q8H9A2KI92/6t99vHt51tzdNDKYVqxmLYBUM5JrY0bZvu77p2mBaz7lolbTktORcRNTISBTzUtIiq4XoWknYCmL6T8gtRNQ0zW67aQS2RTdLHlK9ZBkyjEJJJEGuUnJVMQVTZObQBB9C03CwoqubWwsoKFkVWMOtdR0bKODX27fCaokDICTHa2q8C+Hu/uHv/u73//iP//Rf/v4fonfHl6dhnB4/v5hWRGqb2LadmVWrjOB4qlkdQa7FSaZa8BsBhpmqFlVRY0VCT03Xbna7/e6w7be1Sk7DNEzHp5fPHx5fPj7Np8kWi9hsgn+z6R6Yb7eZl3kYT0eWz9gouOigYdwwAuSLXZzl1qSaFEWn2Co2QH1gAUsAVbSqFLNiVgyKUQGrhvLao5BXfOlaX8JV96xW9ZtM3m+fdWsEAHDkum5zf/uw3x2kyDQNw/kyjXOpFYCcD0gUXGCCGH0bow8BnPeG4MFp1VcgOiISKl0d3uKc89575ysm1fVIqjnnUlLJuZayjn3x21f0mzXw73/pP37H1tQ7VkZFUAWQq7T7WtgaIK2XAjM0NTFBAQcagj/c7N+8ffP23feokK1e0kLDGXzAEILz25sb7z23bZjmy5wUqIld1/fbfrPbbDlyymWecyllnhcpmcnF2KIUB4CAaVmWZWHiru99bIiYkbTUktOqxfh31Vip5eX49Pjp13r8ZKdnHE6QFkMg733TAqLoWk8GYgL2CqyAVSFXWaplRSFmH1wIoe3IOzOppaSSq1TnXGz6w83dfn+72+2bpnXXeCBkZFql3LExI3a+7/rD4Xa77fs29m3g4KrqMs/jcDmehmFKFdi3m93tm8Pd2357iG3n2bvfdGJsjfM0NjQjI1I0hWJ1SgkRRMvsyBMExjbwmqFFa8KZiqwth/XCQuxi225vms2BfTQFySnP43QZpmFc0tRsNncPt7ub7TRdPv36y/tf/vr4+DEti/OMeIWIrFqp14k3EoJHdoisxrWIGaekUsUgiQ1Fpqr/nqpmWkQgI5giGDM5Kt6nLubtWJsmOsQmuG5qplQXgbaJaNUMwUVmZO+QwHRtP6RcSjWroswhNk2IbaOAwzwt08vleLxMp8s0LiWvot7froCqOuZyXpbTkiaxtouucZWdOYfOEyMTGnOtVczUF+IA8CpSrvV4PD0+PsuWaBca33gmZlJTlSpFRWoq43l4Ol/2++2ujQf0SC44gtBsQrtJpS5Jaq1IBbgiMBjBarswE5F5ms/Hy+fPx5eX85ILMDQb39/4/uCbjWMiyZIWzUkROPi1RWfLMqoool/XdFU9Ho8Gebtpao05FUPk4J37yolRg6XYkDQsis5aREZ23rdGQAqApqvOD5cspepcShUTA4cE1tzE2JNrmrYPYayqS720Paj5GNBzqbRM6rCC5prr4gsHh4CmQAjB8Sq2JQLniIkJUKoWMUAIgRtR0bomQojgPFUVSfOgNSGZSl7Dyb/9YN1zJjuVf/7p01TKXRd2nhoMHs2IvaftdsOMfeMf7m/ePNzutq1zYJprhY+Pz/+v/88f/vu//vnPf/n55eVkui4dBFv1PQZaDEm4qUBZAdQRKAILkiILBAC6il1WZ6iQCQAarHcPRDQgMQQxAQVFABbD1T2rQBVQfssfRgC01yIGroKPFRvnmNhBrSUlpxxjcH7VOqAZiNC41GxpEQcEVVHbdvvdD+H2/nB/097cWOySUk0qpQ6zDJNchjRnC+227eGwb4Kzmqbhcjl9fnp+fD49n8ZhrmIcYtPvkd085XFccq5rcp2u/js1xLVhj7o6dtcFY3DKOFrc7/u9o7tS5nk+jePLuBzHclzmS9IxS6q2qIkhx2bDvvO+7TskBkJ0DEQwUy3JoKxDJEKA9UDCNZ3r2nQDAyBkImbnfWi7frPbff/Dj//wj//0+9//l9ubu2kcjqfLX3/+9U9/+N9ay/DD9+/evbm7u3l46DfbfjicT8+ncRjzlKeaKmvD9mVqiWakGhD6xrE6AAIwFSGDbbvZNpvj6ZTGeXg5z6eLptL79m/f/Ci7pCKbwD/s+u8a/0AI8/z56fOnsrjGpeA6ci1Qq6pluchmLmOtQym5iDJQx7GpLgw4zTpZvdQ8zroULrVkscUwA1SHAARATEhGwGs7ipBZ6MrdEYDybS1ggGYE9ko+q0Su7fvbm9s3d2/7pj++HM+n4XI8pWWpIoTgY0AMCIigZqKguVRC4tBycN8qVFS1lmWZZymLqWipTOS9XwiryJIWN89dylVWlxPCarc3hVWf8+qXetVcvR4/X9aG4TfuV2DnNpvt5rDXClYVREAr6DofM1NRMVMhRACiV1G4d2G3293c3mx2u9h3EAMB9Tc3u5zOae7HsTkfCfHuzZvNbn9X8vPx/Mv7X8dpCcG1MRy2fdc2c1lqKQ4RyJEaER92uxT8PA7Krt9s0jQ9PT46dpvNbnM4bA+3bbspcx6LOiQ1yFKTfG2P57x8+PTLh1//vC1LM1/4cpRlmQEsNhHZxcg+rpHIQGjEueq41HFO5ykVQ9d0sdu0/YZ9EJMlpfP5lFJCoKZtbm9udvtDt9k3Te+CZ2ZiXrm9zvm27e0Aznup6rzvu36338UYyMSsjtMsJaUlXYbx+TTkUkO73R7ub+/f3Nzedl0XXGBi/kY+bqaqyWRhbB2SU0GAKljNssCwJO/MoXmErg23u82ua7smOEKtFQUBtKKZKiBh6JrNbX94aPuDAZW05PE8n15On1+GOVXkpr/97sc3XdP84Y//8pd/+V9Pnz6kZS61GGopmR0xM10TMggdI6zQPyLmxiBKUISY5tmkgq+ghaWS+6KJQQBCezUPkAGqgSpWg2S21GVYKhOhmSNqouu7Zr973O82h91mt+u3/bbrYhvZoVnNySdDFLVlXkqearF+t725uduHWBSnpL98fH5+Oc2p1IorpdAMrl1KMCQU0PMyP14uv7wc7x52929u2/1mWRPCAAkwRocWaynFiFgchi+fi4jO45LmwjvXtl2/3TQhpCXXkktVZvfwcOjbhnyZ0ul8efF+t/e3jSPHTdNuNtubWnUp51oz1Il4bb+yqqiJgaRlOT4dHz99fvr0fHoZslZqKLTcbrnbUmihZplTPV9yWcD72PVMADnVp6dfc2LvNk273ey3QG4asll6fjqZNgjZR/UNM7lvihhdUj0NpVi55LLtoI+u9T603geNoTaR+zYfh+U0lONYlqKqqssiUi/LfG6bO9F755sGNwLkm/h3fyNEoWu15OHzsy6zTDClNAMAgTEqWK2iIA6BGRxzDK5rffCeiFQtZS0ihhgaB4xBQRREs1pC0+ikAuQiKdVxrlPW32Qnjepslj/++jzl9N2hf7OJt43fNq4Bt5KdGOH2sHu4v9ltex8jmM0pP70Mf/jpw//6t7/8yx9/fn4+TtMCtlJMza57pZlWQDRn5ryRW0NHEUyMFEkBkQkA+OvVb8ViABmwY0JiVDRD+yquWjMVVAEVeJ1DfbtP46t4ll5t1rDafoyccy6yrhwKUVXlq/dURNUEsKSqrq5J3t4c7+7vq2i/7Ti2WUwus9aalnkaL8t4WeZRtbZtaCJ3HYOmNMzzdB4up3EY0pJUjX1YxaoGpJZVFV/zVFcDxerz/Y9pSqna+1P960l/jHQbYtOFGH3Xhd1mOgzz82V6vkzPlrTUIljNITTB+yaGGCOyWxOQa5XVOAOAhnXVpIKAXhsvBnC9Mq2qe78y+dp+s9vf3N7e3N1ttjskPp0vnz99+umv7//61/e/fngEESKHyCE23m+7tneEUARrvczJaQlQPMq3Qz5SiuRvm6aa14KbDPZ4Tv1j6h+XLk3H5+XzZ/n0iYehr9D32+b+DaqVaYp5udEaq4p3yh5iF4A2YkFdbNrofKhQMVICLXU+XnKqRuQ8peCk0mUsF0ufob6YTIBJoYpkxYIsDm2dRigiInkEB4aqAMZmACpWQQtYhv+sE7PWO4R91z08vPvu3fdt09Ukx5fT6XRJKYEZEREjO1qvWaXWmhcwJWLXRN8q+4i4toJ0ZRumZRqHc8kzgTmk6P0amVmtVFkJI2JmzMze1YL/8XX9///gyhXuWkBGAxRFqVqrqojqSlqUuuotzL5BY5tByeV0On1+fnaxa5sWY2h2282w3w03h3GQWo0YmffbW/TNeRgNKHrvHEpJeRlLrWi26TpTVKlo5ro2NVFrLYBN8GjgvSfEWlWR4nbbi3afHuuczOoK3l5xMt/8PMBMDflGAnmXCploVaArKpnJERKpWao6ZxuTTgWKsTFzbDhGdKxopZZSs5oy+6Zpd7vd7d3DdrsPsXXOX7sRRIiECBQCIjoXmqYDgxB9bGLbRlUZL2NdZtSqIjmXKUsxxhC6/c329r7fbtu2jd475xwy01elIhN2gfddOGy6LjBJFREBKGpF62yKSyUTMumWsNrLDLEN3hF7BAYVNKkG5Chumu0h9gcOba61zHPJU81TXkYz2Gw3u/3GRx7G07/+y//66d/+1zxPITggJUI1qSKIr0mxdIV/w2vyhYpZFQMgZnIOIJipOjVyX7oX8FWH943lAtAMQUBEUqrrFZYBvaPzNB8vw+68ub3Z3y7l5gb2AqKxjeTJsTMfxFfzVUupOWe3JBNpuu7+7i4X/fXTk3+5THWRYuQDEb1+Q9RAAa2qzUWfzsOffn1sN22330iILnhiRFMyWENrzNgogGuci1/s4uts1ZFrmrZtW+8dkFUtohmgOo8xsPea6uk0UvCRXOfiHYUuUGAXY7uNS+JptrJIXRDBwIi8mImqmkzj9Pnx6dOHz8eX8zwtEJCZ2RO5lXtQ51mGQcZBrLJzzjvvPZhJrtM4mSMT49A25NUMRHSeZ+cELDeNkgeVr7cxhHWUailLhTInaQNvGt8GCg6RMEYnZlWlFJuS5GpVVYrUWnMtWWoFIOJtzIwYY3h3OIT9Lu52eV4eC2U4d44YwQzUTAyy1rmuQkQFAvQkwMJOgVeVaCB0aKKKtVRMYBW0mKxIDDHVWiUVmxYZFp3yt6w7cJX9IJo/n54v54+d++Fm8/fvHr6/21JAqeXz0yNr+f7t7b5rgiMABuTztPzznz/9f//1l7+8f3k+piQM1MBKGF4/HMM1iBHQlNiQ5NUtJGqianhtVQugAq7I4lUvsp4N3rMjYsBrYgmg4Tr9NVWrVQQQfaQQkb75kl0dmMRI38oVYc1pDS3h2iZamePJVEFVRMHQiJAzpUI+kW8ReLfr1cCzk6rDeSwpTcMwT8M8naEuwVsXfdd1wQPqnJZxXoZ5nuYllSpIHKL3betCQwSi4j3HxpcsJdeUEmUQ1Os0beXkfnPATFn/5dP8zz+dXzL+eLd5e2juNv39zf4t1DRPp/Px8fHzr8/mjuYWnKBxXb/f9H3bOGZDdOx9CKGJIgKrWxhrxVqLGCrqdcYNq/ADkZzzITRd2/eb3X5/ONwcbu+6vh/G6S9//fmX9+8/vH//P//nP//08/tlzoTw9HxCYABMy+3NzZYQwAgUtGTJCWpGrV/eegREcy319+2Nt2AqPBj+26/Hj+n9+3nTboZ5TNPAw2XHxPtb/+4m/s0P5Ck/PspPP8u//vLry/HsXSV2gCgKKVHjh7c4ddRqyOIfZ/r0Uh5/Oad59G2MfRt3waCOp/NUpxFLCaSNE+ZaUQDMERAhKalCUQAkTwgmaACmJKJgYEKWTPO3nFtEwBXiCwTsvL+7ffiv/4//dntzl1J9OT2fTud5SQDovUdmQDCQJaVhOE/jkJfFpLLjEGK36X2ICCaqpdaS82ownuZJpQbvu7YN+4MLIcQgZhVAVM2UidbYgZImqa+DU/vadHntx8CXY0Ov368vf1z/M1FBx91uG0NkQ5SquWgVQys5L9O0jEua5lKqrbUeYqn1dDzDX3+p5C5jukzpzdu3fdNyCN1ue5Meqtnx5fnx+WWY59/97m+6pt31GzDwzAb68dOvPsbY9/1md7O/JXLTOIJK04RpCsP5UlKuoqLmyIvo0/PzQnR4+4BN6Pa7Os/j+VhLVRF9TSADgBCa77/7MY9Dm0YaXgqTXS4hV0BvvqnoDBArGmqudZjznDUrKoaw6VwMoe0U8TwOuWTVSsTb3a5tN5vNtu82bdt7HwjZAAiQvnnAeR+a2ErJlQhiG5hRrQ7Hy/sPv+Zp3LStc14UjGOzjaFt9ze32/3ehwAGhOSJHLHjr50Y7/j+sPvd2/u73T4gzMNlnBdRFYACJAKmoEW05innJeV5TqXo7a7fdU3jvGMERzmZusjbfbPdu6YVxGmayzyyVCJk7zYhvvnd97u7w+X88stPf/mf/+O/P334+e3D9ub2Zk6LaEFCVTVge13Eq2WWicyoCqRcjsN0XpYKSD6geQKjYuj8VyAJvLowDQlw/RquXRkABLOK1cyIEJGMOIuWaZlzPQ3zh6dT33++2W/f3h/uDv1h0wTHyE3TOXYxpbzMs5Y6D0MM4eHmNvhmGOdaNc0/5Xmi1xLGrroBvVr1qr1cpn/+8/ux5Enln+b0tz9+d7vtHBhLlVxEZXX2uRB9aL6IL5h5028O+8N2u4tNFC2l5mkZzEpsnHOoluYsqUgpg6oYOd/dcGwB90REvnGxc6HBVKQkrVVEiYMhi1qt+Xw6f/j146cPny7nUYrFJnpmNKhZprEY1uGljBdLsyMgqQCGzOSj77qo19z7NC+DN2zb2HZmZuO4pHkKUYzdsuQv3Qsm3Da871gYl1oeT7VW84E2jT9sfNs4xyiAyBQ8Re9yNS0iBgqWqsicimhOZRtC7/hmv/v+sLntNt3NfeprOCXz/UPT9i4wOkOsqlPJxzRPNSnoGnqNDM6Tc8SEgTn6WFVP0/ySz8s4jWkBsCpaUq25FpEiVsxdFh2zzeU3wQPON41IHfM0pzyPKiU33vMa3lvzME6ts7aJm74LPpjBnMqHz+d//sMv//yn948v45zNgGFdhK/tDxUzU0RdY9UMDNUMQXWFWZjhtbtta2MeXlWla6+W3bWIMURYk75RkdRQUY2MiRGQgNt+Q/xVFkOvilnCq2fnup0jGLKiU8NS1KyYEtEq4jcwBVunT6ZgaoJaAR0qomhOKqXklPKyzNOYl7nWyWNBJPKRFbFi0bRM0zTO05xLVUP2kT350DTILtUias5zaIKbylpksUMzVCIEFEFV/OYyBlXtedZfzpViqsjJIJsp8771setuWQOJi15CcqO95KA+OoSac61nMUxFcs4mSkTBB4esXqVKzqXWWkVqrYAIRgxARN6H0DTdptvt9re3t/vD7Wa7c95fhhU5Wp8/P85LZg5thyaas74cL4iccpmXFBzOw2U+j/M453mpadHytX+ByBw2bXe77+46iwhittRhKs/nZ/1l9rHWDHnmcfRt0zTb6GNzf0ddEKjjxw/PLy8ffvrlz0wpxEPT7ZC6uVgfL4dOokaDZPQZ/Kfq/nqu85CbTJ356EVRT3NeclVELETA6FlVzQGwkQNkwAok9jpSwzVKQFEE0cwEtJr+JvQdQA2rAlZgJh/bfrs/3Nx1m93p8ukyTqlUXQMFmIFQRJa8DMN4Pp3GcSgpmVQiDN6lZVyzjkUkr2yVWnPOqVQz896bats0zHwlCus1tgmvx+ia8HQN0/6t9cjga3vyWsvgFfv7m+aNgRFj0zVN25ECVLFQQdXQpNbYNrGZp+CXecmlaFUwqKplnuXzU9EV1sy11of7e+e8ATV9f09vgel4uZRhHIYxuEBEYW2raJ3myefkQmBEt8Y4d52KAAgaBBe8CypFRAHREHOtvhZBoOB917q2gYGu5cs3P7D3/nC4v3/4nudBYkTiJR71POZcwciKGZoZiEmuMifJhkCeXOToyXM1yLVMaVEV713bdvvdod/sum4TQ0Ps+dqBgC/lCzPzdaiEHiBEYAIfuZR0Og4vp+M0z6DKIYbYqgKQIxdi1+/2+65tvVsDma416Lefi3fudrf54e72frtnsCMAKRZNxUSBzLCqFzQxlVpNC8BE5NY9ldroHK0kaAxN3O7bzY7YqWipdc4Zc65q3HSb3e7Nmwfy/Mtff/rTv/3Ly9NjKcn527Zv0HOtWUUMSAFlXRgKCoKG5EkU5qLjUl/mPOSamSqRgjMzYAVy/15A/s3ztZTGFYf2xUeKgKhmayExLdmNsz8Nx9N5HIbzzfb+ZrPt2xiC9z42vfcNk0M0ds6z2/Z9aPq/+fHHYUpPj8/zlNDWoCsEXFvQ+rpIcEo1lUs2FcIkFYjg3d1tGz29KuFsbex5dh6+6cRE75sQQnBEUCVVWQzTuokAMQHBypTW9DI8Arexf+vChlwIvqlASg45ELHZXGsRM+Ri6FKuw3D5+OHjh18/Pn1+yUtBoxVbmGdBJ0hiUMdzSROCRiSuVUqpIXrnXL9tiWvNpCZqi4GLjY8RRKzkPA1Lytp0uszyNekGwTuMDo1BVLSWZZZpxrxUE0vF2CEgaEVZdQZEalVEDEnRsmoVkVIn73eemflhnHQYaogq5kRiiO+2N/fdrnENERfVReq55kVFCMRErJgprK0DxsjcOl+qvISxscfpskx1SZJzKSVLLVJFi2IhKmprhPZvNDGb7baUPJNJsizpOCw/f/wMtYBJw7UIbtrY9dtuswuxkaofn45//On9//y3P//ppw+zOHT+GtpCqxUArwAKszXU8BVXYbbWDIC0BvLi6uZdaThASEjIzscQnVt3CECT1fWJgGasBmqVmLsmOO+Nwv5wWBP1rrv1axn12sCEq3+BSEBKlbIknUbW3LcxBnarqR0J8ergUFCBVHIqRdKylCWVVGopkotKRdAGzQVwLI4y1pQuc0JUg2XOw5DnWcQcB3KNI3bELpe6zLMBdputWUQc1CoxeE/MfnXwmmlZeIUCva51QvYV3XFBPeaXKb1/4YdteNiHN7uwa0J3+/Dd9gZ3Eo/FveTzInmczudxybWIVcPVWM1MgT2HhohMLeWcVnB6yVSyGTjHV8Bs02w2/X5/uLt/02+2zgcxu1zOr+8qf/f9j4fdYRrHaRjGYcg5P34+nofx8enFM2heLC2QJp3nMs1lyV9mZMg+7G7j4W1o9gEa3wBuikwTlTKq5bI4E18yTwPWgtPEpTJ659oQmkS0LNOn08sfVEbn3/TbB27uCjG5U4VsBrUWgyF256Z/ojjK1GboF+tmAQ+D89kARFGMEpIa8yr1VnbgCNFWa84VDi5m1UBWt7auKfHXdKXrqW9QBXIFUeVALnYcmlQU5zSnXETQeecc4ErsrPMyH48vl/N5GseUk2kFFTTNCdI8IKLBKoURfY2IqGZgKLWimXcuhGAqQGh6jQUy0ypVapVSa61AhISmBK9K9hXV91q+XFfBevJeF+nrRoZktKrFEKuKmSETEgKaD77bbDeHOl/Gy+VyOV+WaZFSRaqJLNOoqgwYXYRqkkq32ZRamPHm/h6YH5+eL8fj0+OTQ1pyUlUBq6qqCoCeGUTOp1PblM1mJ7U8Pj4Pw8U737XtMJQiVcDQcdu1Xde64EzVgrPA5tAQcY20+tog4yZs2/ZWOZjzHFsL/Vx+vaSzJVGrgKimpb7OP0LjY4vsBCCVPKexSCXCtutuDje77a5tOh8CIq8JEYRATMwrA+NawTjnELFUQcQmBmIyk3Gcfv7rL8PlFEPcb+/f3L9pmrZUMWQXGh+bEJsV9e5Wd4Gu3eCvI2VPfNtv02H/0O/AjEVVYMpSihJxQUIMQIE5gmS0mio8nYZSahGr29762Do0XPFYN22/I4NSKxgWgcuYTHW7OezuHg77m/Pp6Q//83/86Q//Gj1u7+9ibIl9bMCJU6miVgWrEiBCMbFsJGSU1S6pnpdyzDpXFCBhViS9Uqb4N52Y/2CKsytR+sovorUqx9e6nGhFGq2FfV7m6XJ6eowfd93tYXd3e3d3e3Nz2Pdt33YbZgjed33vfMDI333/3WXO73/5eBnmeckigsSra29Vqisgri5q0dNl+eOff8lpBqk1/c7/7Q/Nrmf2IKj11e1Av637UQyqSq01Gc6IKTQGZiIzYNh0N7HpnKNS0uV8qi+/huavLmxjtwPiJZdU1ICRGAHNai2LlmLA58v0/pdff/rzX399/+F8vJigI49KdbY55SUZGBFznYiqi6Ej4lqXea7OY2zcfr/pN5JTyRlyEec4Ru8cVEkl1VLMwKZJlkn01TW2Mi3RNLBSQOk5oC3ZSK0kvUjNKkbIzKpUjASwipQqREYEhJTBhloFQAB4mtqPn6dhovCTAUrW+37vb97uu82u7YP31UCJNQTzDjwbwCrd1aqIxkRswCI1lWk7/eQ3eV6WZZmXeV6yKBggMDNRRSY2YmLCb79jLoaACLUGq0WkTEkejyOpEtq+daTs46bp97HdIsdpTu8/Pv7ll/e/Pj4dLwOGDXAgYkAgvMYL6jU999rBXoN4X52euPo6V5IfEAM5IkdExISIzI69R0IzqKa45tyoGdC6kfjgfPB93zjnq1FwvwGR0bUZQ+uMeW2AAtiaC15zWaYlnweS2XLULjRdDMGRY3flZqmakq4anGowAySzbJKsJqt17QpJMQGZJYPZqgkXw6XoNNZawYVIAZFYzWqptVa8KuEcURUVVSXCENz196sqwpjoC7Lz9WsGVWGuYIuOSc5LOU7pefLHubnfxZveEwUIHJri/YDTvMzzMKdxSbmoApFz3vumbZxzTYwxNkhURZac53lZ8pJLNjC3bsbexSZu+q5v2+g9IZRaak01ZxNbXalm6EPbA62o42kc5nlezsNluKycUGc1aMWaJKVcyheLNbuw2T/s78om9A0EV5GKYM5Wsi4VpLKqC+xKYue5ixg8GKERO8fBWSTz4LKxCtQqKEJEBF5Vs5RiqkhggSl6X5x3wChoS0VFL4BA61xWxZTNOUBUBENDUqbVCaQoXwKfV0iLrcYQQ/tGbg3gnNvf3N2//d65sOl39/cP+5t7BRrnNKWcqqzdRGRk5tZ1sW2dd13XLfNcVqyNVK3FVEzE7FWi5UX0Kj9cI69EK5NTVTUjZrRVWVFSSp4dwjV3Sap79Y9cVU7w5f9yfdm09mVE7DqB+Xq2IBMTgKSUDEUMrjUMIiF5H2KDCC4E9OsbyTVXqVlLEallns9PTx9d0Cp5Sbvb26Ztdodd13YGcHN7W1Kep9mqkGdDFDMzI2JCklLSPGfMDrm9ixbD8cUxMTmEGNX6KpKWRcxwVbrEiEx5v03D5fzsDK/y/a+Hy3XriIimRByiU8TzbIvInKUKEqqZOkIkcp59oFUvkEoqudbKzm02/W5/uL257boNExNd85pWv9pVBnOVxNAK3gYE565KvVryOA7Hl+MyL0Tu5vb+/vbu5nDjfawiQOR9dM4hOQJAU/eNNvs3ZguE17E4MnHXdn2WZkpJzIx1JaoYgWNAQisidSy1josSlypzjts2tDGwa1zc+NCKYi1mRgacBb2P+7v7/c3BVMbTy+nxQxqOm9226zvnnK33SXTGhKJiWiuIqljN369/xAABAABJREFUKoJMShncOclQbQROiIpsSAB8zUCC3+xj/+nzGh/277SA9u8eVatFSrJlnqZxGIdpSVUNYmybpotdE7zzjmLbsvc+xDsXv5/yd99/d75Mnx6fyrT8J7/1qu4GWLLkMphJYCLQLnimNzfbLsYoWE3k9Ui5PkzYNL5rg/eIZAZiJLTSv6oxuxi20fdqknM+D5Nj2Gyfb5Zz1awmucqS65LKspRlTktakmiumop8/nz84x/+8v6vvz5/fk6peHKIYMVyLkkrJXBE7HCZjBGiQ0SQKga2LELEIVLb0mbDudgwrPkz+grRIBWoBmmGtMC3OpKV8O0ImGjXmgOeSUUATUuBuVYhDtEDUDW5Ulyv/QhUe/3/ixgizgvay+l88WSO2bnQAI3LlEuipo2IEYl8Q13PTcTogWgt3KVUBEPnSAFy0ZRrWKDUx/3n83Q5L5dzmouBGTCxrQ4dRu8ouG/nFuAIkJHWDFljX6uNS/0kQ0npft9+d7v1/SF0e9dsBNx5PP30/te//vphrgkY1RRVnbtmwZhZLXVNoF13FgTWVxMxwdUoRMTrcBmIwXl23jvvVj6GoQGUuib/KKCJWqmKxD5ybGLXtW3TdI1X1TLOJSVT+bKR0WqxIVxnzIRw1eiAFNWa0zyO8+lkeUyRlz5uZbPZdI4DOEcIq7LDOYzEEL11LLUtueRlnoa1AzHO4zBP4zIvac4I1HY9MqcqFRC5o9Bu2s6QcqnzNA/TUIs1bet8A8ClpFWbyUzoCA1VtUBdrX3fjMXBVGspNRdtWgUv5CaVNNbzsnw4LZuGdq3vmhhjn4uNS51TXpaUUqo511LFgCqjRnNITYhMfdvFplGkInWa5yWlLEnlVetlFp3rnPOmebrkZUpiWU2LmgEhSq7TNBHAtu+avus27TJ1T0+fz+fjMs1SCoN4sMpKUrTWovJlh3LO3+wf0j21Ia7WLxLwgOscDbV6Ke5yom3viPnH7/BmL6aUMgFQH8Kb/f3l9v8YaxL0ITQurir6rWid0lxhVkuoXjJEHrqIyASEuWLVPYiYZrQFdCFTNmYCMpMCK8+qMlWHQrB2YgiBgAFpdVsjFAP+qiuHpu1+//f/5EITY9u1/W67c87lUs6Xy5zSkjN759l5F7bb7d39/abrCEFrXZYlpSWnlPOS01JKrrnmslJgSnkdJ0ktWquUUmoBxBACMxuYaCq1Zp3Hceqaxnvf9ZtaMhGtEaFmq1xM1XRlbds6HAU1BQArpSwpp/zV0YOIgR0pXI4n4pF8dD6aY8e8AqkEjIlD1/WIWZS9X7k1kpc8L8s4l3l5+vXDPEwvT8f7t29/+N0PfRPZcNN27+7faCqfpve5pOAcMoHKauFnommcSi4xdrDdt23TNI1KiTG8PL0Y2P3uTdP3OefLeEnLIiX3XdNutpHISn7+8H7tWv3WP25iVUGR2PmoxE1bN/upVkxulFJhDbQlBgJDqFVSSUtapnExxLbt94ebh4c3290+hAaJpFZAZWamaxeLrsFZaHadANn6/kRnqktaXl6OHz58mKe57/r9bv/u3Xfb3Y7ZIXFAIEJHxETXtoQSr/Kar/DL6yOiwzQ9n89k1sYGg49930xpUkhJtK6dQTQgpIDIRlUki0kZp9MyhSPut/27N+8chFv0Cr4KVlEzJvQxdJtN99337zaNe/n0/vHDXz3Lza7rtp0PwUC1GpgAGTlyhFWwVJnSslRJKhUZKlT0k0ICFBfNoSIpIhhVqIqmyN9WmHptvwHAVSN8tch+8z2E6xjn+h+/irqIHaARIyjYMJdSLnOqpWhsOh+bvXfBMXlHMXAMsWlj5968qX/3d383TmkYl2nO68SKiOxa2b+aV2GFbPjLVP70149SKyOoyf/5j7/vuq2DIrWYVP3mBuMc3xy2d3e7btP46KuxKBYtZOZDbOMuhoOJe3l5ejldTuep67DUZFCIDQmqyryk43k4v5zn8TzP05zTMM0v5+H9+49/+N9/Pn4+k3KgNYdcx/mypJRMqBCjsdO8KLvKmHxkA1XANItJLhm3e3d31yFTeMnDVGtdVLFtIxrMsNSiJWHNaN+o81//QMfcRfSEnZdcdBE1NQIEYnLeFKomUXBMuO6FBiJCYIYkALNKLWksdev4pomdU1drHI6/Pn1oAFyp0G0jBfaiqlIypgCekUnBrFYFQEQhJu/AwFfZN93fHu6G5fw5HV9kmceSC3hYbxLoCNpITVinJ68fzSvnjBDZyClIVr0kWRlfbdNOhYSiUJjFTtPy6fn4fL4IAAcviqtaa1XjrpPnqxD3+u28/mmIBgjrmgUCJCQ2IkMGIAM0QEIGBBWtorVIFREzVRND5ym6EGITYusc11pzStMwpKnVr0UMvAYikXNfiHJoaqpiWvOyLNO0TIvmuWbTugAUq4uVpm28c7zmkiMAsUNEWlU8Vqokq3PNY56HabxM4zSPyzJnU1qWis4VM46x6/sQnPNODDRJqUW0kvPb3ZY5Xoal5opIPnhey0ixWmu9kvn+nVxh1dxetRpiKGaL4FzstIifoA3SN7bfeAZaBBTJVhfSuh2ImGpFqNmpFFvzrp0jHwKA8yGUnMpSawFTx9yE0Memb1vnuJpVQAcYDbWaKiBAcblWNRFAZOf6PvabJkTu+nB+fpmGQdKsUkqtKEWl1q+NS0BAjyFQ9OwdEpKhAgMCOWCPYCwZyfRyqQYWvKLhPEnNUCoAhsB3fTyEIEICBMTE5qm4NGpZ5mqLagHdLlOHdvY8Ki6muahD3SMx0cJ6RvhsmsxWJ46VVTSPKESKYGRiBIZgBK8lOCABVcCvU3GAtu3+4R//6/3D2xBijG3bNMuyfPj48TwOqz5XVjIoWlMbBGhis91sovfrlVtUas15SWvgXbmmbdVSS73+UtZrVZMNjJlFdJqnx6fPz+dxnpdhGAhx07XNTbvd7kwV11Ak0dVSJHpNlbB1RqEmsiI2yjxPd/cP7jUteYUZiFgpmaiGjo29GSuAqWKtRIzEIfiGaFMthKAqNeeaPBNLqWnO8ziklOdpKcvCpqRqRVz0yziBKhICExAZoqwDoKsuR0VqKTnllHPu+/7+/t55l3OhtPT7DcdwOp1SzfM8TZeLpOy22LRN2/exbX2Mzjn67YJRFTMhACJmwBDavt9LxeAbrQKvfZSqdU5zzXlZ5pQzILRNc3t7e3f3cHd333a9AYmuAytzzjHTdXEaABDCOlSidaiECFLLvMzHl5fj8bTMk3P+/u7+7u7h5uY2No2oAqJj4tXY8+VMXFPLiVYZxbc/ySqELapDStWAfKiIvmljtblOAGU1cyKsE5jVUgMqOdUy52qq4uK931jcZwtLASlQipWshH63u7k5bHabzsr4+fH90+OvjNJ3TYiOCKusuAe7xt4YIIGiZtVZbFV/qEIlW4AzUmWqwAK07vdKZKRA//edmK+73Dcf4rc9mHVPJLp2TdSsVqsiuY7sjrvDU2gaF71vg0dUAjFRUO9c17b7w3673X6RGbweDng1o4LYVQaPAC6V+nRaEJ6bGJwPm83OgBvnHDl8JUyuDzP1m9hvY2gcOTYLJmKlAEGInXNNKZJTncY5p7Ia4tDUJGudpc61LiktwzA+v5xOT58v59O0jKfL8Hw8//rh8deffp3HtGk3riEIYAa51JRrMSOFGYXYSjHnhHDxiY2VmSSLRJACIbAKek/eAztZlkyEhN55YE9mLOJq5W9dltfEuFXx4sETVDYmSJOoIpIn58h5lXXRQuNYCapaFZO1EiUStWqWVWarWR0RK0BDMKf5+XLcsOsATaQPXQMQxTtxUAqAgjEgXsWIIqvpZ5XJBnb3zfah229i793FyASVXs06RLhy/b5d/U5VX4f+pMjIXpHEJGk5zfrr4+Xu9nxJNgvmVE9zOi95ESXvQ+tqIREstRKR946Z2LGaiogZOHh1OiMRoq0T+Gt/m53zipgFqtYiwlW890ykaxfRoIjmUg2JQ+Pbvtvum7YjgiWleTjO4zBPy3jfqHztxHjngnfeO/fqgUREBTUEkZqWKS1TlWqiaLJo0TylM89t6LvYtI3zDvG6mMAMxarUmvM0TZfLZRzneV5qETAKISK4XHTJ1cR81zVd3++3Pja56jLnYRhTzj74rt/e3B1E8Hi61JKbpnHerZ7asuRa9YrL/S3DgxAjc/COyRBW2pohsRkqmhqUgotRstI4duhdRG/ojYqCqAnYikfLOaeUlpTisqBzDoidCzGaowq6ypP22+0P3313uz+0PoDBlJaK4JoWiFdLLxjUlIZhGs7n8+VZTXxwm3773bv7nOaXx0/Pnz59fvw0nI51KVWqmso3jFsVWc7DdDxhE9F7jwRqc8mCbG3rmE0El1TOg1bxfUchEEKI0bTYUpopBa1tH5FoyVBK1VIwGxU2xI1YFTPVeSm3JT+qvhdNqomMkW7YbRAL1UfUi+pcQYTQEIpSNVJGZQJCYwNTVEYFQ1IgQDJgQ0WO3+hhu67/x3/8r+usnYmd9+fjS9V6Gc7Ok1kt2XJaYIQ8L5LrMkx3d/e3NzeHw2Gz23ZdR8xSq6igrdJn0zVm9fUBXQ3EFRCYeBjHX399/8///C8/v/+4HI+XCzvvt/v9/cObNw9vtttt9IGYai2vvuNrESMiUuvqz10lw/My/83f/G3f99fPxSyJJlEyIADGVY7swCCnvDZ6GRkbDNxs95TbJpdUUsoe1dTlparWVFVqnsfjo8qyfP7wodv+S2xbF/waEtj0nSLUdXhmRrU69l3bInHKdRiHz0+ffQhv3z00fTsuM10urouB8Obubpmnl8+fVeX44ZNDUkIk6vt+u9vWy+ic+zKBNVvnhRWACIDNHHPX9GCs3dbUyIGC5LxcxiEPaZguwzAQ0X5/c3f38ObN291u73xDyGJAhDE2ROjcV+/zOqBDJCYkR+w5xlBzfn4+Pn5+/Pjxo4re3t7f3T3c3z9sNlsiNrXVcMmEX5hUZgaITMjEjhgBTO0L2AcAmLjt+6brSq15mgRmRedCbDualrqkylUBDK7QCgJAh2CEVakAFa3kNvvbHzaH74q6YSpgrhadlxKCe/Pm4ebQAcj5+PTx08/Px0dEC9GBqSqA4UqCQQJbI14ZjUFY1UzJKTtlX4mLYDbMiqJswGBEiMAETpC/iUv+pkD5TUHxqtm68ia++bdEqApXgSUArH5sQHJspkXlMi3vPz6Sd00XYxu851zqME2i0LVrBq7CFdZ4lQ5ffVFoejW14Dr/AmAwBuPLZH/66UmF0Hi8LL//3bu73YaIid037lfgQBwRaAXBRUJiQ0cYmx4EX05Py1xEl6bBpt333dYTa05lPi6GOedc5nmZX15OP/35l6dPH8dxGMbhMizn85gv2YoVKAmSd2RgWUVMVz9vThXQpEIlrTUxExB475qGtSHoaHb29DGHDgSySpZSjVBsJsLQMBEDOrWvgut1JuoYmZB4xU4RIEO1XOpS0UJYDTaAlRmJIfK6b8uiukLiqwqZrg0QIChmo4pTCuTMYMnzcTpFogTa7/RmE9803DQBAQ3RDBAQic1M6hpIgFBESlGxSKHjNmDLEAErUF2/VIiApHitbH9bxFwBj9fLpzMkVRbDuerTZXn/+fyXXz83fbPZNKcpncZlWLIA0BVwCCAKCKtLyK2nF+Kq4FslXIBmiMiAtjpU4Xohw3UJm73q2mRVdV2bimBAxD42XWg6cr6q5pSX8Xx+fkzTaGql5N8sD3od6iO+5uGBmaisN9FxWSYzdc7F4D0JapaSE1SUpHUhJhVTUakCZrxenlVzzlIWtMoExuvtgIg9R/Ni4ELT97FtibDkPM15mbOIeO+bTbfZ7NquncaynjAhBmd+PVUK4vW1r82sb+pkQggMjbNAyrh2Q0CJDNaYQKwAZsSVEYk8oSNaScheSBTVwGo1y7WOczIa1XDO2cfGh0jRiVnOOYRwe3v7w3ff//7v/vb2cMOGKrrkBI6avgOkeUnznNO8lLTsdvncNQZZam6aZrfbHnYbArnb9S/7frtpXj43w/E4XTgNyu7rbExNlzrP6RIshxqAQ0758eVlUfPbbd+2LngvdWUiCapKsTSDaUVwhm1oqOvaPhixLYZJkAklIxRUxdVBlKUV6NvWKDyOY6l1MvGx7Xa72xgKTlnGkApVg4qIBMVBVlAyQAWkdfNeOV4GZEYAzoCBxPArumtd/sRoK2NHrdaqyswxhhijcy6lVEpRUyml5jpdxuPz6Xh/9913392rErmma+2K+78ijVaL/ToGMlglOIgI3rkY4jRPu91hmvP/+pd/eXp+XlK+DGPfTzcHCbF/8/aHd+/edW1TpaxtGBV91RO8VkZVRKSUkko67A99v7l+LoiFXCEXwdg59i6E0LQtIKaEarbOZoMPjj1euykqJg5iMN2AsffLZapzUSnzKDktx+MLORe7drvfb28Ou/sb18RUCwgwEgEwrJEoHgCKlJRTkWpo5Jwn9F3jakbvQaoLgYjyNKVpOn745Ii5a2suzBz8On3+zY3/ird8baMxcWwaZL/C4xTqksYyl2keh3FIKbHzfb95ePP24f7Nzc1N07RmtAInDdExIeLa0L06LteLBK7WQiaCUtJlOH389PH5+aWU0veb+/v7hzdvtpudD0Gqmhm/tofpuhUhwGsjGolx7WN/ETCtpzi1sW2bruRUSpEshuaYg4M2BOjNWhC1papUE1FAYERDQBBE712z297f3X6/278pWUsFUxFRAwjB7/a7GPH48vHDx/en8zGV1HjHxFf5JK4ptWYMQljVMkBBFg6KCOSBnbJTIEUUAQEUIDB3VY0rR9QQPP3HZsw12eT6+RDR+o9EtG7TK0Pr9YT9pj1zfWvQkNdQ9jnJ0/Hsguv6lj2zZx+bAKiAVTSlNA7jOI6llDV+Ab6ANtAAFMhgjUcFegWm4FLk6bgAPCFSLbI25Xdd9N/+IATkkB2SQyJECqBkYI7Rh6YmXfKw5BQ8tDF2Xd+3u03XEsoyH2ut86zzfL4Mx6enz7/88v7j+1+XaVrmZUmSkqx+OVNTrXUd6alew/bUpCIAiKIKVBFCRUL1RGZMaI0rCZ8fJ99pszUk856JELESUdM4cWzgnQvfNpbw1RW3fgcJAXm9dxgCMNhViEHkQ0CElhSkWsqWsxiArHXDaodBYyyoU6meqGUuAFnqnJfjfJbAwzYK7VpST+YVaG07MCAQmIKIiaqqlWo5axUH7Mk79IQeyeEqnV0bN/9J7xIcqIHqKvMhZCQAJUADR6J1zOnXx+P/+7//25jmv/8vPx7H5fk8noZJyas5M0Qi9o6Zybm1vCZmp16qSC5aV929AiAqsyMkIyJAEFnRb0BXk4FJLfVa1BiAEVFsYmi6frd3PqaUc05pHpfLcXj6ZDV33ZbxW+4FXPXuagr2Olg1rev0aZnGYV6WSNZ0ze3NJnq0MqssIAmspjRLLcu82pGMEGPgEMg7DoHbdrtmx0xzHselVHVIfWzazcbHhlxYcj0dx/MwT3My4Kbrtvv94f4uNk3KlpZc6zqL8WymVVVhRT6sANh11v4VRoQWWVqShpTIFFEAFVnWtPMVg+Sd88xupe6aIhs5dBGdol7lV0lMpjRnPV0G530Iwfngokf2yPTuu+9///u//6d/+q8//PBD1/Z5ySpCjnzwbRuryPl8eX5++fzpkxTcbjpHWspNyWm32XRt28TYBL7ZtG/u9m8f7o6fH58ePz59/PD0gbuu5S9XWNTkloUuXUki0Vw3HC9/+OOfzvNyuLl5d3uzf7htmPzNRpnhZqtdrIgoCo5cjOHNA3gQ1knhGFgr9FZ9mSCdORefScGWkjn6fnvostT3H8ZcLmIhBv7x+27Xpek5nJ9CSb4kcghIVtkUVcDMFIVWtZ8Zm5kqgTJiYFdNWRVVX4tNmJf5D3/608vxuN6riUlFSloQqe+7ZdmUUs0KIZZal5eXz49PUqTf9D/88MO7H7579/Zdv92uRYpzzCt0G66mIkJmJiYK3jdNs9n0MTZ9t/nxx785nS4//Pjj56en4+nleDxJURFjjtvd4R//8Z/ePdyv/MbXsABYrztfrv76KpQk5iZeU9+NSGLUtrVK6JyLse27w/7Gs0tpWc+AGGPXrG6IKipFajElz43vu11fhvn4+DIdL3laShapAjUbYqnFAFwTd3zvmlgysHIkdkjewDMBYSkl5xSbrt9tmr6bU5rzklXVoRFm1TmneZrzPEkuT+8/mEF7e5Baas7fZnFfzxckR95xAAQDUxBg8jGSN0JIOQ/DfD6fPnz69eX5aRgGx/7+7u3Dw5s3b9/udjsmxld5CgG/ijeupZFzDgCvvymCcxRjyDk9P3/+9PHDX39+bwY//PDju7ff3T889F2/mi8QjYn9amUismsH3lBfLQfrVvUfhL2EFDn0TW9NU0qd55SLqRqb7Zp46Nu+64rYp6fT6TItVs3WcFxSgJZD39989/D9w827m/39MI3TtKQ5G1jTxr5rY+Blufzxz3/48OuflpJdDEgrTBQNHFIEdEhgBBVr0jqJzQayZnqhA2JDtlU2BmsNQGTOEQVHjNIY9V37bYjCOsJZ24OAaOsdEOG1xYVmq5Drqkr/6jdfa5bXD1rXtgqSmI3T/Pj5BQnVNLbtbn9ouk3b9CIwTMunT58fP39OS1odUPgK4lo/vlc0F8GXZg2wAhaFl/NS/vi+pBTo/8fefzZLsiRZgpgSM3MS5LIkj1RVV9d0DxGMoHcBgYCsCAQi+IA/DlnBflsBsLMz09Ok6tHMvDwinJiZquKDmUfEzcxXrLvRPSPl9ermJREeTszNjh5VPQdB5M9/8WUHL9ZLJCAHziM7BkQxRBImQEZkQEZ25BterzevX73drC48h8bTND/ItDuM9Lzf3T+8u7398f37Hz68/5BjlmRZUBURmLjoIxdqqtqaWCnGqFMEAiCYqgEZGkPxBgihNcsP9zs6pNe+W2+6sG2JDN2AqG3HYA1i2/cNH5nLUnVgpovQPiggQWDsWxLECCo5KydjH1ZrRnBkkGMz7G1Eg4RznrJkLX6saEZoeTRgota5jYEQZJRZZpBxwKgYO50ouQtxDThANizQA1TBssg8SYyWsuVsqmSlmJ6ImcyIqXQOYV6g19l9cd57QOAcc03ZVkaxZi6AHg/TX//9t7PMSXWK0/3zMExJS8+HEKAruaLiY4flPoARSemorhwPINZkMuHRk6vwWmiqZqrV/dRK3su5EDh0Tbtq+w6AUowpxXmep2kaxxE1N6H7qL698D7HWbs8IlIC0pxjzCllH4Cc61arVec1N5KGNO0ljSqx1EASswcipqah4Nl5ct45F9TMuWRIWQxFnfOu7bq+B3JxlmE/PT3uDkNUxKZt1+vNZrvtus6QpvlwGMecVcxQREVzziVsPta4HVFXncjAOsprFxvn0bGYZSBBSmBQ1qfS5oGoZlnVxAobRuzIeVZTQ9OsYjHJHLOBEqEr2Mc755um6/Or5Nmv+vV2c9mv19M4qyo7bhrXt42qBO9zjO9/zONhR4ya86proW9Wfd81ocw2IfjAq8DYe248OhMZ9l3bnqIxMnXJ3ExZUVRE5+FwOAyHYQrkJ+YYnHQNOXZ95zZrW28SBUIiMpANfPE2d3yYp8c5vx8BGoLWbTD5tHYx+8QQAeaU2cPmshljp7YKfsrz5vqq+/JNc7G2Z98QtIehSWYZDUCFzEqSyMzQ0KrCkBWqkhyjd47NIvN5IbyIHIbhabfDRXmZmVvvN9utqojYNE0xJlWRrKVY5LA/PD09ZcljnPaHYb1eKwATkStCI3gsAmAix84xt02z6vv1ZnN5uWubBgxFZNWv15vN0/PzPA5adIlde3V5/a//8i+uLy9CCP5l/DrFmEUIajsN1GeNTtEYETYN9X3IrvXc9F3TtaENwQUfXIppjnPpzM+aJWcTBQPvfdv5tgltE9JhJATvSMZ+nuI4TSmJGAAjMrkm9Jttu13LfpdSYiSHGJCoSOPMMcYoJuQdOp5yHFM0RnROwNI8756e90/PaYp5np/vH4yoixHQhv2QkpTrdZ62KPl9IwBQQmYAI7CYc07DsLt/uL27+/D4+DjNc9O0F9vLL778+s3rNxeXFyE0KtkAiBgKqV66zJeZq/ihVrt4QgCd53m3e7q9/fD0/ERE6/X2iy++fPvmi361YqaYkqkSEhOUhxQWTrgEwLSkgcCgOAKlNOtRVwmAkQMHIHOkBM6RJAHG3KB1TXj16loUHaIjvHs+TLMWjWlEbLvu9du3b95+tVpfOt8iz0aQTb2ji81mtWrmONze/vh33/zd3e33ncPGs5UiRUWAgNwiBSQ0NLOUJM6IM2p2DOiKXk42QihcALEWesIFouDRs6JR34bPMTG4BJUVtNfi3zLnvTSPOW6lILgUQi/ECZpZTLrbj2b3SK7pVgh+jtg2wzjO337z3bff/XB3/zCndBweJSEFhkBU74ItewUDBAUEoDHmaRqIcNN/S4TIDKs3b9/mpu7Eco4pz+wSmlOpzQBQqm0InGfE0HWhX6379aZtezQTnXeH/TzrfuD7u/3D/d3Dw8Pj4/NuP4ASKGnp/MHaVQuEalY15c0Q7axD6qgfUrgrS1liSvOckPMUc2BFKLo5HlEUgQgbR0weMbSdP2/psYUbK9en4AJ22DYumqWoklKGCVjZOXMMxOAAmsapNlLk3w2qnndRqDAFoEjPTI3jtsybZmn2PO7h+TFEhWbbuI33PYIJsxioGoCqiMQocdaULWfJSYo2+ZL3IiYCNNPKxLzMV7p1v5rTLHlOyVQF1Ah9eYMisQ9R4w+3D8M0PO0PQPbwdEgZpjxnnTUjEXsfgvcK4NixK1oUgA7MF4dPJERkh0iGpBUDFlbPSs4JVSUXlesSvviuC223bvq1Cy0SJzHvqPFem5B9IO81alJJxfXi5Y0xgxNLi6C2hBpICpizqhoQ+SZAS5oI2fKEObFzzWrNzvkQOmbymAFEJauZqEnMiorsQwcOyTeNGu2Heb97erjf7/bTPGfnm6ub64vry4vrK9c2c4zDGB+fDvvDIauIyDTOOWfNWbKoZisyJKYG5ZslL4624njBk3cemLJyBk5oHigjZzADw6xqOaOBagnzsCTUnHNmgEjCAjmlakIAYAkBIyK5EFoAGHa79z/88P7m9c3Vddu27BgFc84AEoic41XXtY2fht37dz8M4yE49+rmarvddn3HiGked2OcsEy15p1bd6tpvX5adW04TWSEFoI1jbaoPMc0Rk1xu9m6ZtX5BjMcHnZ+mJrG992669fh5ia3KyUEibmj5OZhzU9Ph/ePw3fTQMR0c+O2TcMSCJ0SGjWISWjI4B73r5qQHm9ex2mzXm1f37hV6xoXDPqnoZ1sLpacyMiFBQGRXEzpSmqHGZxH55AdZSZJuW1O58LOrbcbAaPiKYPoveuaRlPquxUiHw6HaZqGIauYY9c0jaoSUUzp6elJDR4eH63eJi5Z11xHQiVjPKJ3viva5n3fhMDkdvvnlHLX9d55AMhZnp93Kt80oX316lVO+c3bN5vNppDIorLb7X989+6w30PpBmQq68V2s/7511/3XVdWfdeEpu9X1qw9t33vmpBMQHPw7BGTZsn5+flZVXPKIOrJtatw9epys+37tpn2B5O46psW22mc33+42x0GMQtNu726vnrz6tWbL0Lfidp+t8spIULjAxikaZ6GMZU+fk2zZQMQBBeaoKrzGA+Hx/fvHz7cFo/Tw2HIhM/DAIiS5jwnQ6IzQ7tjt0VZDAI5IrCYhzw8PDx8eP/+hx+/f37eqea+X3/x5os3r99e37xZ9asyBpCIEEohMxQBSKQi4YPHEsjSe844jsOH2w93dx/u7++Y+Vd//qtXr99cX910XQ+qWQSsvIsQ0MzKbGyLVi0REhBacdYEVT0Mh74PIicje1zIaybXBhcYskFKcx6tb/3VqmUfPKP3POc065xQBJWcay7Xr3/2s5svvgDfTsmiSFY10NA2b97eMMl33//93/zdf/7Nd7/ZHx6uLzYXrnfsHXtUAvBIPVFARwYGGk0pE2RSBQcc2AdEVkVRJAFnSM4RUiDyjhsPngTN2nAqJCqUelE0LSjElvqfk/ZSsSM7Lqm2iAUclQJqBurUUIPIOcPT85jzh3HU7757uL658czDYbi/v//+hx8eH3cp2xGyIwIAmyKooRZx1PJEl+YJBCiRvwPE50n/+jfvxpj2c5rcxebP/vtmUxCVHMZhd9gZeKKcYlJTJGhCCJ6RXNM1aLTu27bvDTCmpJJymudpOBzS86O9fz8+3j3sdsM05SzoOGCNQ02LsR1REXjMIlLtPmphBNGxjAEBTEzE1OKs+5x0Di26xq3WfdetvQuiqphKN3LXs/dsRkdL8vO1srbzL6QYkYXgfRabZI4xxqwwAxI5do1nT4RMoSiJGgI4kiiWizEAFCXbVAwrYk5j6vKqv3Kuu30cnqfvgLW/vH798/XmxnKbmSczQ3TMWJK1qoVqTWmec0yaBaBWdxMRFknYXL4/Tyg5ZmIhRnQAogIKxFDcpQERiUVwjjk9xjkncjTHmBViKsJXQMRFtkhU2VWZOqqAF5gLH0RUQQxqqevR+pakaqrF7gNNCNEztsGv2na17lfrNfmQsk0xC1FCKAa2LoSsMqc0zbPqCxBTvBVh0fwo7fAKgOxD2zvXxGl/OMzDMLeBnQMTS8nmSeKUQaXx0LYudOxbHziASYpzjjnmNEUbZ51nnSOI2RjTPOenp8Pz87DfjSrQtO1ms765ubq4uerXayGMh1HEAJCY2Tt2nFIyEABFNGJwjkyNuARrp2jEoW5cvvKRXTRHSTkDZ3AZOJslQ7GsWko4UJdEVJlwHSE6JgBBzFYyqyXFttQZiRkzqsbD4Ydvfr1qm+26J5DVduucBzQCUBEjcITeEROoxvHwPBO2DTOjSibAeRo1TaQSHPZtgIKAARiBz1PJSH0Im7ZtyQWkDNabv/KtG1Ma4z7OMB1i17x6fROcm9UgKylk1cN+GJ73w+5wGOIhwjP41K598GPod75ljzmQI/beNU0QgXSIxtyleBm4Gw+eec7T4ygx5dEQyDN7jApi5JCJi1WvKiKYc8xYpFPABXQMyIigTeNDOBWQImIIoWlb4uIjA3XAA3Rdt16vt9uLYRhTEslqgM5Z27ZmpqKHwxBTRqISdiKTAUgtx82FlyQoETYXmTvvvffesROVYRxFpDAOIjYM0zxF75vLy8s5zl9//fXl5WVJUQHAMI13t3cxziE0znsiLI8bIr5YLAE8u3XTbrsmtC04ElMA8UjIiIwSNcXZRB2VXhC32vQXm+163TbBoebVpndMF83FPKVogE2jAL5p++22Xa+btvW+CeQdUBLJpYE35/3T02Eak1nKeT/sH3dPAJizzNM07HfPj/d333//9OE2z3G9vWTP1HhgSikDALNzbSs5cfDnjTAFYSCjoWXVeZ6enh/u7u7ef3h/f39/OOyIcLu9vr5+/bOvvr6+vulXa8c+i6hKLRksVZwlUVhac0rpLBETE2POMhyGh4f7D+/eHQ6H4JvLy8uvvvrq+uqVD4EAU5FHWqSqqJYGFjBUPW1rM6JV5DVN048//iAyxxhfLjCIRkVgCmsHmoLzwbkuuLYLnjdGekiTen2ahlmNyLXr7eXrt9vr1+CaJCCCRNz1Xb9qOcAw7L999+vf/PD394enmGLI2YkRcMMB2CMGoo4ooGMxgUzKoK6ERw7YIwdAqv3dCIToiBxjSxicNR4dASg2jC94mLK4HaV6tX456148/XtmsWRnX+GsZrCut0VQ4Hk3zFEeHvfrd3eIOA3DOI6HcUgpIZzr1C27XfJTS44JFpB0LAmlWeTd414QXdff/Px5TrnsIos87/aPT8+ErWOZpklVvXcINs/OOXCeHXnfeGSIKUrKqjnOw373tHuad4/w/BDHw5SjADpkbwUWExe9MCATzZYNcyk4sWPhynIJinUvAiIZFeYjpSxDysAXXR9C633nnSdNBgLomMF7Y07TpCmNZqdnv6wdZcWswbOC1KJVTUmnSaJC0TNHYi6dJt4FgADoPQeCTmkWm5OmrKpqiAY6J3lSEzMxRKRouErSkCMFv41jdylhRVZCKxNAJZKcpnmUOKFIkryP01OcDjnNquW5KU9lrQl/qXQHAC7FKDkRGEMRlzYmpcW21ggI2LU9aDxMEdCQUIHUULX4g5ZeJCsPYSHYnXOOySE5ZioKDaBUlN+dd84DkKrGmHSaJCuBGmhgZOa2CauuWXftuu+2mxU5PyVBnOIMllMcx5Ri0zRkunvaDYfDcVI+Jl/VaofRwvpY0ZnZbC6n5/3dw2Oaxi44nae2Jchx3O2H3e6w26d5RrS2Dxc3F9uL1cW2Z8dxztOcxiEOQxzG8k0ap3QY42E/7p4HEWvbcHW1/fKr16/e3lzdXLXrlZGLaiE0Xc/kGuIgAqbmmFJyKUbNWTNnEuKcoqsNmMu9YbStTzdhRheMIZHP4AQtm2XVrBBFE2AyzsBZsbQFEyiDIgEDOqSMSkYgSECZXLHyQ0SP3AW/Ck7n4du/+et02DlLMj//+a/+1fXr103TELGZ5CREQGgX29XN9VbzuN/tf/jh2/fvfuzbPvhAgGSGmlrvZN0TyvC8G/b7NI2aT7YDDvnSb3Nz3XkOvWNqm4QyRLl/+uG7Hw/PezfuX9um775qWr/bPfEcV+1hjumHdz/e3r7bPbyL8wyu6/rtFzevmqaxOT18GHagIWDTc9eHTjokN4NNBOLBWLPF6TAe9ndmqEbDOA9xUrACoR2CJ/ChVG4GRnDMhCYiAMYOkUBBkcwFcp7OUX9BJCICFksuakQAVc1CyJcXlzGmVKy5YiJC730ZhPM8FyCStdSvFEHrMlIFDAyQC0gBLDKmpQHnqK4xTpOKEHFKSUSjwu3th//lf/kPt3e3Nzc3m83aOR9CaEJourYJzWazubi8avsWDCTnOeem6071sKowzS7n9dX2YrvmPmSwwzSKSDJELaGVpZwZsA1h1a/add+u2sY7yzbmYRpHQAuhafqenK4vJvMByVFwHAIQTXNUAcjKCiiaUxxjjIfD/e1tzMmv1ynNjw/3xYY0z3neH57u7t7/+O39u/fPt08htF/88hf9xSZBnuZx/7g31b5fiaRHRm6b4/yOiMTEjtlRkjiN4+3dh2++/c379z/cP9yr2Ga9vb5+8+WXX11fvdqsNyE0hZ0tvc+2NFkgQvFHKRDGCIjIc1G3oXEcvvv2m3fv3+12u/Vq9atf/urN2zdd13vnTCxbecKKJA0xMwKUZhlcVud6wFX909Ts6fnxP//1f3p8fPV//R/+h2UeQ0EUREJfXLpELabiFOIA2UAc62bjwK/BWbf237y/exoGRW6b7Wpz066uQJssquZC6LerLnh93N19/+7Xf/P9X3//+EMOgL4dDCCpBezNN6H33AI1QB4QQcUEjAwcIiggKbDVpiSNagZGiB6tQesRGsSAhGAKmU3O9SG1hlCn0p8XQfRSnwFQRdvLX2tIh3jsXVpYagPDQmoxMwDGmFJ62u92ACA5l8/5SLECF5xyBoNO3xRAY1aFrQ1gzriP9jTDPsFRKyIluX/Y3d4/N2HTNTaNo6mBNUx0AAiNtn7DngRScX0hYMc4x+lpt9s9TdOO02wm5tl3XT/PEmNSU+8dM4uAiM5ptlTyjWAGRFQOCZYFTYq7siut0WyEBioac0m9gGdogmuYvRonQYIEkOMcH+7nh8vblNLpsiOaYRZTkNpNiiAGMducdYgyjjmZLXDK4ADEjnxovFsH2ATc9t6Rn8VitpyLcJ/OSeYoKes+qpEIzvdJu/GwZn/hm6t2PcaYU2q9OiACyJKnKY3T9DDszWTTtgntfp7fj8NjjKMkKzRdaXwodCbAWSksAIAbxqk4IYApSNasYqREAmqgVB5pAFOQlNTUMWe1GCWlrGKIqIYlwis5rGMayyF6xzWedI594BA8c2kfKnSJalbJRdfEirw3QnF/ZwQuuh0qllOa52kchsMwxxgcKdA8x2E6MTFmVhwl0KCEnhXEqIoaOteuN81qI8bDfved3j7ec+MRNaVhnIZh3A9pntVyaN3mab+9XF1cbNi5GPM0pcM+DgW+jGma8jyncZxTyqrQdc1ms7p5dfH6zdX1zbbtG2RMIiqAROycB3I+ExdjyrIgieRS86zFjKHUZ5+lk6B3tvFKLhtTUknGYpyBMnAGmM1mo0m9AqtgLjVsqiq1O0W11Lln0AwmULMVxMyN840PnkhiHA4Hh3b37vXzF6/k6y8dSuMRiZNoadRyzJeXF9PbN47xPtw9PjxO47ybHxHYEZOZ5dQ4juPoUOO03x/GOcYscky4OvKX3TVshBSZnHMtZds2w9Oc0KEQUBOsazWEIecP9/cxSs/9NMUfP7y/v78dHu9U1XcX16/56vo1qN3dPuyfnuM8M0u/8f266dqWmFPWw354eHg47HdpHiROkmKxrRGFrLMLiOgQwDnnPYfOOc9MyISOCcFEcqG1DEANCQ2MvH8hPV7E/0uhYRl2UnIGxdGNnGOPQKKWUsopApipZdVCsebCvFQNlzLhwpISRQSgZbdYVGiZCr9JRKoaQmvA3uec1VQB6OHhcZzGd+/eNU3DzCE0bdtsNtvLy8tXr1+p2TpvvPOIqIt+Uz0Rkbw/pP1eLtdAwK1HwsAqc7KoIlm1HiMS+bLfpmHmnERjVotzzIAOGZNaUTQJHbkQgCmZjml+fHzwyOPueRoO0zzEOI0xj89Pt+/eZdE+J3OIjpvbW1PLc0r7YffwcHf7w7gfQHh7s3319derm8vdtLPnxymKiYRVn9JszLIUNpRnX1RinPKY98P+w+2Hdx9+/OHH756fH7PIerV58+aLt2+/+uLtV+v11rEzg5RFTQkICdCqV6AqaFUEUwImKpo2OaV5mqf7+/sPH94d9vsmhOur6zev31xfXleNwdLZq0eCvtYhlrLqI3wxKO4RKqpxng+H3Tff/ub7779lpuMCY2aihWhVA5MsJf+MYE0IbeOYkcnYw4a98YYcGkD33MwC6673xCVZI6ZJJVueJY7z4cPd9998/7c/3H14mkd0HslNwCaA0TJZixDUHGZCQMKU5RDzkCSloiGoAmYic7YxSxQt9gDMAKpAhSBwhGiSUdIpNWSWcxVzPFEunzRgq1pNGVSt9aPMxQtZvBMoWTZb1AT0rB2Eqi2VHT9r6S/7jBs9nKq4kQiVWU1TFp7lccz7ufR1lYPUcZoPwzjNEyPEeVoKV0zR0DnvshhozqZZkxFQCCHO82F4HMYpxQbM9R1vL9r92MY82z4R0cXF2jGOw3g4jPMQkyiCK/wLMTrHZRWTMmeAJQZy5L0nxwBkaGoganOU/X66u33OqdlsyTnJWRjEVFXTPA9xHo6lF6Y2z2kY59JtkqWoZJIYZNHdkMdhnqecytJZWTdwqt7Em1NgYUIgT8RoDYMFzEJzgmHWPcCcVCUOs6mk/UzB2dr5gwtrar5f329d/wZ937UeEHK2ccrTKDEW5ZiY5cN4+HF4fpiGMc5VSr8MA8SUUoop53w+KtzzbkTIpklSTilpkgTF8wsMDdjABDVZSpaTmiJKFpnnmHMufXE+F5o6A5TskuWcVTJqZsY2hLZtm7Zt2r7puqbVbEBEKeV5ivM0pZQMQEVzSuyc9z6mNKfo53k47JF5nPNud3h8fHh4fN7tDzknDT7OCeY8JzmOTFOdUxynWZ2Wzr1SPqygpiJEru+a9dq1q4f7x/sP73McyBJZJlNTgZxVxEyIwd8+F5MTRIpRpynt9/MwpmmUVERsCZyjzbp79er6zdvLt19e3rzabjYr7ymlOU9zVMhGAk7V5iRzTHOUeU7zOE/DMB72OcbiSqYG0zwZTDGf2sURwRN2Hl0AQIkpJtFkkBWiMCuTMajLpiCUok6Lr6/WRdhEVVLK5RE3yABGSM4xc9O2wTeqZqJgxkieyRM5BsfgGJkZyQlqkuh9c33z2nl3cXHx6ubV0+PT/d3d3fvbw+4wxlFS0jgj2NMDO0ZGjeM0RI1yynl79terN/6qVQM1y4A0xTCkDrlnx+v19Xp9s9k0HPYPz9/85u9v7x8lY0x5Guc4DjAMqMCNgLn+8kLRfv13f//u+x93D08gcb0OXRsa58wgxjhN0348GOS29aFh50rtSkloYu/Ju96VhhFPLhC5IlcNTEAAYCyiWVSkzL/ELE1zVqdoZqIgUg1fHDMxI8UYd/PzNM3jOA3DNAzT4TAOwyHOU33eCAxRoTb86qku/1SEXjaprjJGxAHQOW6aJoRAzGbQdqZm1UtBLOc0z3Ge4+FwX+zumNkF34TQ9f3l5eUXX35x8/r15eXler0ObdMEd/Swl5zHp8fn2w/3HbuOLy+aputXDcswx7iXVEILYCbnvPMeCKeUIM1ZEoBhFQNrc86Pu0Oc85TEiIGDok1xHud5OkwkOh8OKU5RU4qzjtPw+Hj37n3KuZ32z/vn+/tbck6SSEwyRYlzytE5328vN69e9Tc3ftuLDDNYdmiG0WROcYjTlE4BjKrO8/jwcP/w9PDhw/tvv//24ekhpth17c/e/uzNm7dv3nyx3V6F0AFSob5LpYqhFlLf7CgXm7G0nLIxOtU8zfPj0+P333/39PSYUlqvNj/7+Z+9ffOm7/riYbW4OZiZMdb2haX3BY7leWRYCDjLWdJ8++HD3/7Nf/nmm2+enp5+8XM4hZZmRQkQSVV0mmfNishdE7ar/mLTBF/UZZWAVm3AmwvfhOv98LwbXXA6Pk+7O99dCdiQh2He3T3E3fPd9z/85u7x3fMcMwVDRvCEToVlkjHPfjRHMwMWOiSLjSnNOcfSbYEmpsl0zrJPkkpfN6myASuxgmdLwTGDqqUBzu5LnOMwjEQcJJ+DmCOUKVNUYVZqKcLyqJ0L+J4/I6W+FbFmgV6iHCylUefvqgjy4z28+GuJvplJDZImc+7xMO3GlE+KV4U8S0mmmCClOUWZp5glu9YZ5lmGrEgUJY/zMCFA3/Y5yZTukiTVtmk2m9V127tJdnMmNWia5uc/f+0cf3h3myU+75JCAvBgCCagKNoQUekGAcgAAhlgxjk0znsgx444oBrsD+M4Du/f5Yut+/oX283Gi0nbab8CRAne/FkJWRbZ7/b3d/sQJkOOqTzrqGo56xh1GCFqTeCZgSfatGHtaYURMO+F5gF3GZPHwOgd+kDoUBrsAgaC/ZinOKcZ5sjCHD1PxI8KOunGGhZqyDeASKhZbI5OZO08IDmleU7v9k8/7B4fD7thHpkQDbJo4ZpSiofDME3TmZwquP/Dv35bNA81z2maJMsiCAlW/dIFLEMuihxmAKqaUrUBQsRihCYiRZ5BVVNKkpNKYoLgfNM0oW2a0IW2WKAFIso5p5hTnEUEAEvxIBGtVquu77q2a9u2CR6QYpbdob2/DM+79TBem2rwPqf4+Pj2v/vXX29WbV0snXu9XZuadw6RYJlBtFgsqki6eLvpr5pw9/b10939NOxMEoE6LmJfFf8jalnZStFfzhZnGac4zxKjFiEm76lt/cXF6u2bV6/eXFy/Wm82nfMBAWOSlEv4wkAuGUyz7A/T8xf78TDEaZ6ncR4HSQmgmDrhNMXHx+e/+qt/v91WDY+m33z9b//33jsXGgBMKeVqmQXZWJQyuFndaH4Q2ieZU5bCZhWJ/EIDVuGz4icORsjOhxD6tmvYlyhGcry4uPxXf/Grr3/xZxdXN223Ck1H7FlRnFFmIvLBX1xsp1dvx+Ew7A+P9/fv37/fP+7SHHNKmpKZEAETeUcicdw9/vIv/k3bb+oIc/7i8qoJvZhl06TaznNofQjctaw5XW8v1n3niZ93zwPM7aabo4ioKaAIp0yG5Nr+6vLm519kU6S02oT947Ol1Hc+OHZYuilSSmmWmR31q7brGx+c80yEjskxe+8b71zRAGGiWgKBREvZIZqIFY4MDEQkpfkXv/pV1/flXJho1XeoxXx1cQIkjjE6Yu9917ZNaNq2ff361TyNMc45Z4PSAlLq6qGQjnAOOc/od1tADBJ755om9F3nfUAmhMp6MjEAqlmK6XAYhnGYpjGnZFXzgAp/069WF9vtZtVvVqv1atV27brvjm2Wq6773/zFX3qmV6+vrt7eXHz5ql2vCFCmND/s4jBLCXFFPfv1eu19kKI5IgkA2DMgqmmOeZ6mFEUMkJxvWjU9TIPkRIqQc55n0aymWbKO07jf7x4es0hYdb5tqQmAKClrFktCYMjUdP16e331xZdv/uwX1Pn7h4vnN2+m3WBZg/dpnh6+/OLf/PLPV/3q+OxfXV6+ffOmCd4xE9Hrm1fseLu9+PLLr1+/enVxcRVCWyTFtUSnywJGFcRYeYKgmM4SFo9HVZ2nqQvBcr5Yr4P3V1c3v/jFLy8vLwu5tdQzQWmcYSZfGhoQiuBkQVolNVBqMlSypJlA989PhPjFF1/+m3/9r/tljPnQvv3iz8wUkFV1nmdTZXJd21xt15s+tAGYFFAAyZwXwDdJD9P8vBsNfL+6aVZXTX8ZFZrGHcZextETa9Lt5tIYzaEiAjKhI2ACZvRcGmOsOvOKQswiKmJYSm/VJKkmlTFrXkCMJ2vIWtLgOHjPzGjwq6/+vG3qnLxer//qv/vfAkLXdScJ3aPUXQExZiqCxxzcMWI44f1lLT3DIMfflBqRF1zpqRds+f68zOYTEHM8nnIMCiCmTdde3dz88pe/bNt6Ln23+Te/+u/brrm5umqcj2PKSUWt6ZqL64u2bxGZwBCT5GkeRwDo2lbFrtb7OAjE4GnTddfjrF/+7PbD3eP+eWia9uuvvmam2w93d7cPd3e7cUgI3gAMhB02TSBCESu1cypiqOQoNIGcq45hLRGjgagkSVPf05dfbzeb1sDaDi6uAMn2b+VnX/77rqvPS9ev//Lf/VVK2bnGkEvAUsorNVsSnbJGsVwfad+FsOnC2mGPs6o+CiTAQNA4bBibgD4QMwHSnGGY0jDlcUoxGwAiMTlGJMtyHVavL16vbr5s3nztN1skRFGN0ausiho24DTsvnrVTo9vtsPzIUVHBAa5WLsB5Jynaf6rv/qr9Xp9upv/4de3AFa9j84yGnAsFoWllLkqFmF1pT7RBscBUcdShc9mZYAdKT5ajEgAll2cjVE1Q8ASOtPyHkCsbHyWvFBqharNOa+75mdvr/s2AEAW2Y1zynnpQzueRR3TZiapTLgxp6SSl8fgjGSH06BfzgsWV5pSwgDLSaFzHIIvVgPMdHy9VdoOoegrqYloOXjVYkWsC8SCCuCyrNern/3sq67rACDP4/7uhzjuixiaLtH6styVel7UxZGgluza2TlY/c2pQK728ZXubDxeE+e4q6Cx9cFXk1VYyPA64VftNJVSP177HqzKJB53jwUZtV1/8+aL0LQAYGqSyo1b9AlUcwk2YzQz74qPMkjOYxWLWwZXtWBEQCLnfBMMYJ6nFJPkbGrMi4mGnUYwUjE9PTr21cBtWbFq5FVT5WcpelyO0E5DRtu2u7y5CaEpAzvGWN3B8LQrUyviuKV/vjhkqZ5Y7mO4d5o64eM79nK8nmbVokpQZ9pSgLjk+E2rgkC5R1UhYTm0hURxdYAyeefbpinNStM0/fj+/TCNvghdB0/MiGBqmmp6tywECLisLuWxthLsluu1RMx1PSj9IFLMs+rV1Poes5JJlZSt9AQRAWFZ/o9Nn4hAxOyK+l4LhCmnkn4tWTZTTSn1bfvlmzdlvcwih8NhnufFiCrKEl81ofFHcbxa7P/i0h+X1LNHsv4eEQvazDnP86wqiOS9b9u2LMnL1TjdPDyNuONf6/08rrNlXKWYpnGMKZlZ3/evX71qmgYAco673UOcp3J5F7YJmciV7s86W5ULXjyATNSyCBgSe2JH5BUgSxYVExHJMUY1KaNqWbqX/8EL4Y3aZbBMOHW+MSjNtHp27QgBodYAEBYfdeua/uriJvgAAOM4fv/9D/v9ns/toD9KJx0x4EuG5nRlyyg8ewee/fhpcur49uO7PsEwL35zOu3jWwGIyHm3Xq1vbq6bEABgjuPtww9TPHjnCKkkYABKn5wrxsrHGa7wnYRkAJLF1EAJkZm8ms1ziimLCCE1TYM1S5JTyloNnqCMpdpPVASgaga7tuCdZjUqB19ylUqMTcPsCKBkDAAQRKwJ6+vLt8G3ADBN4/sfvh32u9Jtc1rMF6WpwprXJR2rIyEjMKgBZFt6urBIG0C9+YBlKGr57/SIISCamUfunO990/sQyhNU3LWOyypAlLxP85hTrMn95U6ZldlJVNfr9VdffV3WSqjTx5+2P21/2v60/Wn70/an7U/bf23bH2DW9aftT9uftj9tf9r+tP1p+9P2L2dzOWcA+KfkYz6l++yT3/+Rn17KwQqjmFJ+fNrPc3yZIf1D9/jJb37PQ8OXX+rR/bZ3nCfuzCAEf7Fdee8+etVvPbjzl/3R5/z/p+0lNXj2h6X/9F/8GYCIHA5Dyunljf6vYjMz8N71q5VjhlIMO8XSVX5KQSGe2lyLoXH5Vs8qCGqjTUnyUfmznSp7Tu2spwtUM3cvPgeW1FltvF247JL8rhm0mkSraZlzjTRi8ouEj6hNMeVc6mZr/kqPmqRnLvFFue5MaeKUvKhtfXZ2KtUWvuqjmxkgYHHa+igNg0ve/ViJccqIwvkrXlyQJeUUvNusWuf47G7ByxniMzf0xfF/7un5XDXJp/v5/P4X+8WfeP1v207znqiOc1RV4mIvyny8+8d0z+lCvczzLXPCaeR8vPvfcirFY+B0UT6dH48XZ0nT23Lnj9ktpNJzVZqbALKamBbL4WM++XjjlwxslZnF0ttmYIupQsmgIp4S1sdHx5ZcarVVq7Z6BoimWmRESgEeLj1Zn73udJ62++wtqffFxpizlAdtubanhCKcJ/bq1+Nw+NwHLMnHmvA6jvhTvdLLoqVj1r7kq4+D/TgCPs4gLkdSXslMXcNHZxu33+9/6qL8zu13rdKffIvH742Mzv5qAHrSUHg5JXz2E8ufvPd935f89OPT7v/5P/7P3377vhQALK//KRDwE9jk0/a/F1jj81cJq6B7/b7oLB3t6D69SmdTXf2ILPLVF6//L//Hf39zffHyZeWpKOl5O8vfL+eDAGalbubzZ/TPtH0kPW4GciwQgePRlhoIOuZWqycLLFl7+NyN+uPAw09ent8JE+04Kg6Hw3/8j//57v6+KDj8Ucfxz7MVb/mb6+t/9+/+7Xa7BYB5jt998+N+tyPMRAjomL1vnfc+sAO1FBNoJkYDTSlLUlEAYOcdIIkqMbehRcA0zzll1QwISAxIIlbMHApWQQSuEsnETOw9MxZ/WKlNWlgRhIhIJgDnfTGRNgAt9UVSi+3KitOtm1dvrpq21Cuk37x7fHieUraYJWbNSWO20nNRagscAyEER21wwaFj4MWVppSexJxTyjHnWsQAAEhiGBPMCcYZsiqiMmtg8wyewDMwoyNErlhvcVBRzaWAGMwAkaC4Qx8n91pDYCqasr69ufjf/ftfXm9XUBfU0opwXhtWbuP5XASwPCKERREMTr86qwg5n9t/asY+n2nrtGXL9LLsE0/r2CdlPsf32ovKlnGe//b7d4dxWq26dd+t+65tmlAkxa2WeehSwCta5TAqAEHkUje1rM10XgRyRFkv+4zsiKiL0WaRtCgyKBV3VvVqPZVzaTYriupFWsIQEAmYu+Au2uCRASCb3aW0j3McR8jZH5upEBkgi8zTXLR2mbnt2iYEJkLTnKJmMQBHpQKGstTGXhVJ85xyLgo3CBhC6Fe99764fABhinG/3wPAdrst9dGqWqT2zqtLawVY05Q+m5d3p3YwHG/TOOe//XH3tI+OiqkGlsaDAu6xuogRHate6jfAy+9PkQqAIRTR8VR1GQCgSETW/xzzsendqqiWiWoSEEW10mspZiqqpiqwzAkAuIQ45YzUTEUv1uFXX65XnS8H4FJK5V6WC/HZIf5T2+8FYuz0xegUflUQU5+GEvMtVhZLePPbQcxHUGCe07ffv//rv/0mBO8+B2LOj9Y+KRarG50gqb1Ybsu7fhrE0FmpVZEpL8rjnwcx53tDUUlZEHCO6aPdLjgdzFSkXCUCLc3RRojE6BwRI9HRpvCflFf7g7dloVKVE4g5v5KIYATFJuToK3f2z+f2+Mccx+/zts+/BhdDu5zz/cPDjz/+SMQfobR/4Zuq5JwBMOXa6SpZ9s9Pj/d3RImZmFvfNI01pgHYo1qOCUycQ8Oin57nKGbkm4BEWZSQNUQ0jGPpJM/M5EKDzCJguPhTQfmXwciATAlAVaBogQKwAQGQiEoSSTGnSGheGgveeweIKpKT5Fj77QAACUJ2x3GexfZDfNiNMescZY4yzzInTWJiBVkhEzBa46lvfOPRuyLniaUCUbLOOaUkcakgLgBbDGOGMcEwQRJDyMTaOQgOAllw6BgdI3G5yFpgixQflfoTGFIpTofjdExlajIRjUkBMC3KsABwFCA58U4vsQws81JFLFiQ9mm2OZ8b/xgQU0oxAc75HoLzsPPj9x6Lzc+Xy5Tl7mn3uNtfpiSmzjvvfT3u5ROP2MvOTq/ME4pmqlo64KuDQFWgL9Qanq8I9aLU+tdjjPHiUI/R/DkcXM6hhIxaqBNURXR6imFFdcj5fhj29/cyz545eB+8YyRQLVJCKSXJws71qy54jwBohiYEUNpV3DiaWbH4YWbNuYiMHFvYmqaJKTKzZAEzdi7O8+Pj45Ficc7lnPf7fc65KImUun5E7Lru6uqKmV/e6NMdPG5Z7GE33z6OvpA7hIRFXr9INQK5BT4iFABSdNgdITGdivyXAZBUo0gUkWqeU2qByTM7Qq7yP4gIBiBmIppVc4YsqEaiUCRCSpNCNlA1MTA9gpiK/kVNshpYlv54Oo6Zzexoif4Hbb8H6DE8WZHiglsQKilbrTSQDMAQziFCxZg/9YllbT9nz4iwCa5tQxP8GRPz4t2nw3oJgM4+9mwiKM+FKHxuajg/nuWhL98TAZzChpf83vmcdHz4Vdk5aRtPn0Y2SCJVSi3OWVQJSdVSFFVzjkPj1qsmMJ3fjZfTyj8zoFGFKWoWRRACw7NrsAhbLUbn1SHlGPOdf1/ec/z1i+3TO/Lp739q+52vX+5hvdHOsfeh9M/9zp3/y9lUGRc1hPIbBCWamUai7Ng1jW8663oIDTAqAUHnmTw7BDQVm6a4341ZNTSABCJoImCjipHLnpUZmob7VeNDMCyat5WORCjy6iCF4yBQU1FFotB45haJc5J5HOMsORKieY9Nw13fEbFIinOex5SzmCIx+eDbVXPMvxBi63nVcOOoZZwYI+McNdUG6JrSIbBiS2CKkk0MxCBnS2I5a1JVAwRHDop3HagyADnkgE0LWUwFEa1x4AkcmSdkt3Qnn5JeaGZVl7nIupcZr0zIJSghwJL4YkPU4N2Ry1ezlETNiAvbdwZWzibI8/BMTUFfcJ/n8OUPntgR7Leldk+P4vmeK+g7S8eUF2iWnFKaZulaBGMiR+QAgPl4qCf0cEqt1K9Hm+/C2QACLarWS9AI1XUITJb+tSJ/UJivl7GRIRRLHONCevFJUcysDEsTtWyWwPz57G2mOc2Hw8Pd3bjbMWITwqrvHFKcZompujaKEvO4CwAwzzMTrtfdqu2atkWAaZrmGGOMBtA4Z6oxRhUhZudcCMHM5jmqSEqxEDM556enpxDCZrPx3iPiPM9PT08xxiKAOU1TATSXl5d933ddVxJSpzvzyUqNhVkBQFQALAuzaZmdzcokXf6146psZckGAz6bjaG03xZpai1WmpXIUwBRBQMVwQqNCaudBnBpbSMzNVFQFFEQAlFksYxAZorLbL/E/QBmqC+hAjj83UDkJ7ff571LAv30+qplUmF0MegqIGa5WPWvv/sTz39c+LTKQy+//PjBPv/+d4CYMhHQ7wVizr4SnWheWnrh4PzhPz7hUEGMIqFzfDwdVZ2mPM8xxpxylqwxpWmMOQkAygJi2FHXhYuLfr1umtZ7VyRoTwHfv4QtizzupyzSNxwckZ2u5/GaLBlhKAJLNYrC8v1ZVvyM2zvffmqa/kOn79/j9dVP8Sgt+l/RVsnv0y8UdAYdmYxAERwagprmDEiGRIthLBIygvPmg6KK80JkRCqgkhWsJFnAOQpBmlZ8kCJqfnwiEErC01REVaw0/mpm74rvrvdOHJESASYEMCNWImEW5mogrWWvgI4xtK5pTv7iROCZ2sCqIEyBObo8e0kCWVXMRMkUEJQJHRmVugS1rJbEUrYigGxQRAgKc2Ll7BtGQzJDUchZANRXdUQrgSkhEBSWtBwOmlkuuSU1MTPDJX+yLNRQmPIiMcHnz76I7IfBzFarFQcuZGxNnXwy5IqqEADA0oz9Yu7S82cNPvrNT7IyAADVQBnsCMEqrrGzNNP5fHhkYngxqi5/yjnNMU6Ox2kaxsmzNxHP7PCUIarhHgASFRRSrpWeCTxIlf8o6QI0AFMt71QwyTlLnucZwIL33gc2R5W7OQtCYCnTKPepHOSRdVquqCgkVTYjgHPPEQZwALzcOc0ZRAxUYtSYEYBLOZcZiopKnmdjVGnKNTSzlFKMMacEiILH5ubq1eO9J8KctViqIaI3XzHZQrogghatDtUyWsp2pGSgBtUfMTEfb+XCL2NFAUAMTSsjDgqlNM0QREsXfb3zZTSXqp3yGVLE8RERDWEpq1iurlbxlHKPbUlXACIyASEYGiMIAiuIYlYkNFQlNSm8mR4nfkN8ebsAAMDZ2fbZs/0t20+9BU95WauX5Djwq6N6CVAWB7eF4DtTX/wdNTqfHnPBAd6fFnKAAtw/c7RW/bM/D2LOX//bI/UXKGqhzOqDaUBUvKvOIE6Bah9BsTJizkD/POfvvr+9u3sehjHGpAY5p2GY41wQb0mYGiK2bXN5sbrYdttte3mxurza9qvOV9GaRX6jzA7/TFtM6f3dQ8ry1ZuL4BpdOuJePmlQsna1FmHR3j/+dZl0lidg2cod+ZQU+fTe/T7bZ1//D0H5/5I3VZU4aRqQGU3TZJrmlFyx1y66s8zkPBERAqqYSq4zleFiwG4GgotFs6mWwDKblVIuKJEzYEkYTWOMKYlmAyOHPnjJsc1p1W+QiDg7ViUVzUUzcxoyO0ZEyaqWFRWAAD1iBvDHc0FEYvIl9cUWnMbAIcUomEWzQk5lzjUicGQAJqoKWBZrZkNa6nLAwBTMCI0YncOmTClIWs0KavKTwAihVHTVJeEsCMmqWVXEslXdEFuMUEDNYJFYAnBsjk8moznnh6cHNWi7tmOvhlatn+tTfD7mC+iv3hVnkOKngrTfa5I3A0QhAwBTq7dwmS2rYthPBYHLcZQDVrMpp2GeiIAOjI7GKTbetc63wXvn/VKUUWsvaMGQRETARkaoakasVaemZBhKAQQoKBrmnMd5HIZhP+wBbbVat13r2DNxSSSyqy71jDUbUpAeHhNLdgSggABcKIKCSI5AGbENfrte26tXc9dpzo64axsyiy6oaKlZLgJF3jszm6aJCLq+K6UqZmZ9H0JQVUIM3gNATtnMnHNFFwkRc5acU06JiLq2M7O2bREphLBU/eJ2uz125JRsFCL2fV+c2sodPOUgDZAK0306STxbhY6wWg2oUiBIpaoNAEBLUskMVJUZVSvHuuBBBCYGQwUqn/wiikZDszLhL2lCJCArFheACMbASIqQsVTNFw00QLRqlmJ6rJNcUomnT/ioF+Yff9NzhLIgkyQyzVEBQgiEaCKI4Kqr0h+/ZjjHzjnnPgYxnz5rWtst7OO/fgJiPuVvzrePQAzBi3xymWhegJjlbXCOVRUM7Hwlnuf47bcfvv32/TCMKSdDkpynKaaY8mKyVE4geL9Z99tNt73obq43b95MV9fb7brv2+C9QyrOV/+cW4z5/e39nOTqol/3LdVJ/3Snl9CuTI4AC5SpIQEW7p1O1TJnJ3R+P/7bxBr/hJuVYiQCI9NS1GiYc8JKIRJVYWIsmV8TUSA09cSsasVdqyyxVIzBUMGSCmoRHUQ2o6p8JZCzxFlyFrGMCKzMCJkncU6lZXQEVhRkwSBrTskkx2IibQBZVA1r/RcZkr2YKUvbEVDhhFjQOQ1CWU0EkqvtHoi61N+aFArwxDGUZIihKREwo/PcBO6CD44dkRnETGKKsGg8LjXfFcosaWhVgFwpeVLVZUKox8s1mDlOJ+cxRszx9v5DFt2se+8I7QXc/2gKwpLAOb+pZ9HdOZT/ffgYXFjPCj7LDmurly5pAoAlefQihlwCDQRgOLVZKWg2nSTxOBrCOM3ecR/Cpl/1bdsGz6wAQHYsAwcsFWeMi6Yb1e8IzYAKoYWopKKQc57i9Lx7fnx+fHx6VNDtZrNer/oueO+RiMk78UyOgKrGNrljCWM9djAEwyKsjIhIpej73MmSiILzm773ZrnvLWdCaJxHs9xnUC2s+6KyyACQcwIE5zw7RgIzaNv2qNda+lHKE1Q0LUu2V1WKOC8RFc3Avu/NwHvnnCMmMAuhkQUFF1YGEb0v5sofR/i/c2K08l+l4EzBSKoKHiIWEFOpcEQByKBLeqiWzpRFj6uPMRQIZctIOQ4TO0JGRQBgqoktRFRELRQbGiLAkqJRqM+pHpmQT87nD04n/QGEDWLlO5c8cX3uDcYx3j3cZ9PNZuOZc0oOse/bELwrEOB3fcxnn0Bidp69OzPg0M8zEIhU0qAAi9HH8vx8dKbLgZzDnY/TSfXGEZ6DGCJD/KjG6uXrl0UcFQzgqPkLFcS8/+v/8k2WpKqwCJwXzdxy3GomWQnp4SG0bWjb5uJi9frV4xdf3Hz91eu3ry+vLldLDdZvuZzltI8hyT/+lmJ69/5uSvqLn715ReQcOYTFbbxeFCjTcRkhxT55OTQzAAVAVYCi215Lq0qMUKbpSleWX9eF5HhGv32E/24Y9N8oOCIk74LnhkurIDpAXigwBADLlsEyqJkVVfOcEhA0TXDeMzEYiSgYFAMpx8479syGUDpLHHtTmKckKc5TzGIExYfJISE59MyOmAsjYmVGQ08OyIpd5pSSmTl2hGTFv5SRHLADPmVgzjLmZIhIQIjA7EvRripIqNUVUkT/s2WBUgSqCoIgZsVTE0EdgWcKgZvGt9413jliQjQ1YJA6E+tCrpRkSqUpAEzVSjeVilbdZjVdBuMR+iy8rX3UVphS/HD3bhjHVd+Cad92TWiw2Emrflrihsfdng3mUwx+/M1P8Mq1lmUBL0XSG5GM0Qyw1NuKmCnkEpkVffcX+y+JkbJcoQPTWjeBhOQZPQnAmGM6yGFiZl63LRXTcecMVCSbKoKpapSsBkW2ndl5x845z650AxXmi0t0SkXodp6m8Wn3eHv/4fbuNuX8vF5dXq5e3/SrdUBiZp+kIfRgjoCJg6PgfCByS1oDCAy0BNKLZzmiAy139jhde+d6xJYYV4JgDOBq+9zSFoxlFJZLSJUjq/RVxcslo4g1y/pilSl1Y2X1U1HEmlgvRd6LqTUCwLHC/XQrrRZUAIKaHRtSKmNi9NE8VgDrImF9BnDRsDwnAAj1A2u9qoHV1apGxqUwkIkcACMQ2qmFvqjVm6m+BNrLkClYH2smEQlRuNpNG4JDQCBCkzL1SEGwhYr/eE7+p2Zi6pJessJSHV9hmObH510WIeK2DZoFmI9PKP5R6ykBOEeFiTkDMQDLHPcyFjEAWkDM8qnwIuv8U8TsC0CACFDTgEdcxVQnqaXiB1++HF6CGFBFAHXuhJ9Syrd3Dz/8+L6Meah+ZsULqfiqmqhJFgBk5733PoTn/eFpNzztDuMwz+MscnN5uS6M7U8nxf5pkMvZlnK+u3t6Okzfv3vbd93lpusb78tUaQqVqyv1diUXjkfmEKBGvOWROI3/wtYsjeV1YNd6ptNH/zcKP/6RNgMwNEXNhoyIhZeBmuwwVavsbyGoC4ipAMHQyAjIDAmr2SGogQJa9QsBJEcOECGQiUXIIElNShsueXRAjITFVGmOmtUk55wlpRRL8UCcY1QVQnaOuQmBqhk8QOUij2dTWhuWtADWpA0AGBACMJCBGoDWqtXjOxGgaOczGSAwkmdsAzXBNZ5DWdLqYmaLCnwtDTGow1cJat58ATHV9+fYXliBwunZWzpVjejFtBNj/PH9d3cP996xZfnizReOHAEhLdmuT25kBf/LP3rMVC1X5vj9ZwB9QSa18VglaRJJIrNkVUOwwK71TXBcd6mQVWJKIlqIOuccEVU/AiuzGbz4RERkNIAsImZsEsSpGS7mMlpibbAscpjnOaeC85jZsWu8D941LjjnGJGIuVrZYIzz8/75/vHuw93797fv727vVFVyBJgYnqeJiMl5H/zKcYPoiRxR47jh3DIHJFfmIVDJKYIZk3fOBefJM7JzxMTuyKI5RHQOkQiMsTjuIcGRVzvl0aBglDqDmdaeHSDkesvgxfSkZ5F0rWlcRu/x94Uvqmyf6seFIcfhjvByfSnp308mQzrm9ZcjWb4urAOgAaIakJ2Xd5xp+aAZLbkoxNqhTfUbKvvR06kYlLKBoxLMMkyPCSJDLIeqp5/qkShUgEBH7nTZXtTE/ENqCD4b6yMikUMCA8pqeZaUJUY5DPP+MGaTbu6c5+Bc8IsJWcmgn61Zx33BJ8vSRyQHMwdfQcwLlsVOe1hqRE7QhD+FiWffVxDzEzo65TUVxCzJo2omsrTTnxdTLuTzGegBVDVE4bPQMovsx/3z/tkRlKV6mbINF19yETNTIHYmhqpkMuowj8+7/cP98/3t0zhOP/vZ65ub7XrdnV85XIiu0y37PRiLP3pLWR6fdt+9f9z+529zgq+/ev3menO58r0nMkRTLZ10QIiVUqkHVqbXl4hxeQSghh/HvO+xaf/sjE7ne/z1bz3Ff3JA9y9pU7M453lK6sw5YGZyAIYmkIswi0CpY0agZX5wRMDowFCSGAJzYCRQ06TJEgJ45wmZzAAIBck533feBVA0OQz7KaXiYOeZWlOSlEFnTYZEZppSnqc5xZhzlmoRraDmgu+ZqQXnvGPKkiA6dlZ6XMw0ZZljhiUK1aUYkE5rQs0gS5UkMS0FHwRUGGxmJnRE3pF3WMA/oZllKawymMKiiadmi56eAqDaMiBroCHHyR2MqiANWqm7QTs3vzECd14PF6fvfvjmN9/8WmKCpOt+te7XCERMZnpM6XwSVdWW7IXFPxEz5yCGzpqacCleqDgDQUTGaXx4fnp8errf7WJKnvn64vLnX37tt9vjTmJM9w/3Mabgfdd36/UaEUttqQGSnQJvs6r64Zz37MCwiMA1TWjbpmtC4z0TMaKpIoLOMc/TkFJMUURAjZCc48b7xgfPjgiZiIEdk2Oc5+nu8fb97fsff/z+7u5297zzzq9W3TTiD+Mtc2QHXddsN5u+W3kfnAvEIXGg2LBrneuIvAHmlMfDkFIuVkiN877tmn676nruiErTMkDpK0YEsooB6OxeLBii8utw7PY+m7SstjWcYPSyipdbcfqpDo8jaoaX8RmfEnbLrT3/4bifJVW08ERHuIJYS3gRFxOsl0PjxHdguatop9qEpbMMAQxznbUBlxLUUhrvSvDMZkZH6FOeGjEr/y31O1BUSirYRxCpI5PBBIEQRTGLgameBKjq5o7X/B+8fWZHUlComJqkKuGQ5yntpzlmVbCYJas2npDdeSfRH/PxVGiYj0CMng4KEQBKpTR9wq9APfqPAc0CYj7/oT8NYpZE4QnEnHiCT0EMIJaApuxWVed5GqcheCaCau8HAAhU5mOtZscIqgpZwLJmSaY2jdN4GOMUDWQYh7f76+uri/Wqa0IoCeFKoZ6aKeAM4pxfkH8cTKOm4zzdPz795rsPSGEWnqLFm/56HdYNh5JGLQRlbef7yf5ORMM6tgsrW6jTJf75pwFh/81uVq1JCyGuKmVaM9CUS78oIBJzODbZEVJVkkAEREYqonUqIsXiXkWz+lpdz8AGZqUFwTMyoqqklDAjgoTAzAYImiVhLMJmKeXDfp9ihMriApqmHM0kt2HpGUYVU9SPVvLqhFkxSuX4l8WgjJxKK4laKSkolaSMyIyl7Wzpmj75LB6J+YWhUlucT23J95+nZqAy5Euro1FFVLoQNWXQ1jUDa+Zh2WKM7z788M13v161qy9uvpjnSSSrKDFVK+wlosXTqWvJOZRFtvwOlnXrBYtcY8ECycrh1ddM03j/eP/h7vaH9z9+uL27e3qe4+yZv3zzFs0QfrZdb4IPZpCSPDw8HYZD0zTbvPHBE9ERMn00qZYbAmYEYASLBXApe1mEbbGANGIngKgA2SyrqggYsFCSHHMmpMJ5MTIjMcNw2P347rsPt+/v7+/GYQDE0DQ+tCnl+9u78fBgkJqGL7ab9apvu7Z4zPu2bXwXQh+a3rmGnJMs4zTFOVlWBIrsQrfOmhh13fgjYqDlktIZkXG67+d8RoG8BbIU0FL0e89WohpTLwDjNAVbZSHwuLulvGL5Wl95NmtXe0hcskd1xAKIFVdqIcRN4/0y1MrxV91Aqkd52uepb+zFglgBB53GWOHSi4YxAaABFV9MBDTjhZM4FpsWEMOmqea8SoCuBIDAROgQK/tSy7GACuIqn2hQMmvnY8zhi2P//Gaf4JNPS1KOH1GSWWZgKinLmGJMOSaNWVLWmCTFPE4R2DkCNcxiYlh6pxQWS86fOKKPPvFTJsb5UhKDZepZlEhOJ4BqiBURm704C1uQ8vHH5U4tPPLyUR9djheghArjfipS/gyIIYSzJFStV6QTE2OmOacs0fsAWMuejj0KiMCMRxCLUEBiQmImNrNp1g+3OsXhh/cfXr2+fPPm+u3rm5vr7cVm3fdd03rvj6VaJ8Bd2RlY4HltKoGXk9IfsZlCntJ0//BE3A6Jnw7pedh+/Xrzs9ery94zGoGCmcpyn845s2O9TuEqAZkImOv8nFOdvI9E6jI3wO8Y1J870M+BWjTkT+Oef/D2WeYSPh7hP3Xpz7ilJVdafviJ/Xx+H4vRt6nmlFVjBAcGklJUVTBEcETCjoP3tRgFkQgcofOOyTlyqjonEUkEMk85JXHOb9abpm0dMzIhHBRUUyYURiNUFU1JUkQiT44MMIshcts0oHme9imm0ATvg/NeRXKeREuCKcUoyLb0ReDpZMqUrZZFzUzUREwWYqL2R6ERGCBmNRExACbnmByzc8hEXHsfbJEpsCNMQQRTOE8QHfmP5XpXpAcAxlwbg6v/+2IyLqqSVUWpgJySF3F0RhGmFO/u727vPsw/Gw3EMGeZcxRibtveoSs9q0stS7nrCqC2tFudP8e01OhZqcNYIi0AKMkJRlQTkXz3cPv//o//n//8d//lm+++vb17GIY5pYwmP//6K0QBtF/98i+6fm0KKvD0vLt/uA+NjxK7vvPee2ZAFJEXy56ZiKSUMjGbAZGxY7CsIirJxKuoasrZoNweKnIpQFQbhq32UQOUNFY01eLkbSaPD3e//s3fP9zdpphD01y/ut5eXl9cXDw/PPz4w/P333y3PzyD5r5v+q7tVs16219crC6uNteXF5v1uunatm37rmVHAMYoJolEicBwHV2MAUw3AAEW7FIqVupFPT6hZ4+aLiYeZdjVNqCXg7VenCNCqZRGeZbRSn9zzeDUVxV6rw4kO46WylyoaoxJTfmoo4CAQAKWxHbD9P5h13j3F1+/9l1Tjm7J/qARLCjrNG8swfaLicSKkPKiZljXR1UFkHKDAM2MoXSDGysyoiNi4qWfviSDgEvVMGFWNLUMxmYGWnDMkhkAxQLWSyK1ZGfQoPYAHa+kqzH9ebrElmD4c/Pg+a+OcKikOsr1pYVTUoWYZT/Mh2ka5xyTiFoWy1lSykmM1fbDpKopSUpZu7ZvG2Jf7CVOd/n33kqC1pU2iVI5YudlrYWbeTExAcDCANVnXz8CKTXSOv/dbwcxReIZyXBRej1BmZ8AMYhozvH56WIpmEcjIiACO6GWZZ+n9ctMVAxNHWIR/ByyjHF83h8envf3j/vHh/2bV5fXlxfbi/V607WNK6VzjrnUVhIRc6EWK645UaT/UErGDHKWaZieH55DRI5mCS0DIlMSWwcMrpaDEZxuzPLlHDsDKKRskpKoIoBnYCLEWl33ex9QgUe/x3n9wWPwH3M75wXxo39fbp9EGb/n/lVNsoqZ5owCgAqGIjnX9iIzYoRlkiqPRhZlBHQMoCnNOaVpmkyy95hzHg5jKVwxU+8YmaCoSACYZJVZNZmqKYmgiJbMu4gCsjhTFcRMrEXsLgSngjm7LAqmkiVn4axMbC8vhZ0J2Jcfy0wqqmZV2ovQiGrBZVlfit6uY3SEtMjDHHdYZFxqvFOk6rR2NgFg7TBfxDyRylSNxwmlfBKCmS6JUljYIBUrAZ8SMJjJ+YnEmKY5pSwxpXGcDsOQk3gXnAtEpKVqCZY4GFRNKogx0zpRWJG5IaZa2q+lD1tsaaIEwCLMLzkfDvt373/8T3/zn/6/f/2//vD+x+fnvWTVVP5/uLpatW3o+56QPPthHHb73f3jPTqc8uQCxzR1IQBAjKltSzvX0Z9rHoYBRUWa0ASPaFTFAHMWITGEJFnNxHRKKcaoOTMis1MuKZlKedly9YlRVeY4P+12t7f3++fder3ebLbXr15vLi6bpjnsD2ZujrDf53manp/nJgxt36zWh8ft/uLpsL+euvZBVb3n9art+sa3jSf2WYNpYPPTE6VDR6hXXwBUcdjjevcpInn5WNWviCSqcxbJkstNO3IY9R6d3celn0FVzEDAzJaSzmUNKl3musCdJelDIjLPs6qW1tyCmNCglLE/H6Yf73bbvvuzN9fQ1YMsPgNIYLSMyxNwebGi/ZQeFp5NOHVmqGdWelGMARhAiZiASEst2+JjgIxVbyQXSTxTUqBFZY0RiEABlECO17SsT2AfSWq4mFJhvrACNCx1ydUdhgpzbHDkY/SMVzhCSTNRzaqqykTeuBBZc5b9FJ/242GcYxYDLDRvTjnOs0nGnXqmENzFavX6+vL6cus8s2dYJiIAgGM3yjJ2FrE8rExdPUFkJse1k7426J5liOvNo0II49lvKohRANMTZF4qt5ez/YiTOMs6Fzb1BYiBRbtpGReIpaD+NESOIIYIkYDPBK9o0bxhZue4CHeerMJqMXLFnQUSqRQy2SoNjqqmc8THx908p/3T/v2PtxfbzcXF+uJy3feNZ24a3zRt27Zd1/R92/VN8Lwco6FBpUORfu+V/LOLaZmJZrV9EjfMng5s9z4BTUnud/2rTXu1DpvOd4GKhgfXpxBfPikAZlOW3TDvDtP+cHBMb28uNn1br1yt2/hth3rOyi4Hd7qbn3t9wYj/+KmqT5mYTyrMls6+41+PpOzZ6P0sJ3rc/2+5GmaaJeY0CyUzEyGrjcKGrIQm2QCL1pyJZk1qOUs2M4chIyooDMMcxzmm2TNttytmQFRVjfOAaNY4dlxiHERLKc7zIc1F+sUDJFGtLJoBGMcoZta2jOjbtmmCd44NiF0fk8RMVZg8Kzg6U4SHijdETAXq/F6oERXD05pPpW4QGRE9gZ1aAkVNizw8Vj7eipZMdfexGh2fV5zY4g1QghxSKfmxmlwxKJEHQImysXpIEeDxvfWZzjnrwgETcuNX3vWScRji/f2zcx0j9x20ObNfSMEK3Mupq5osxjO2jBoiJDIjNixNUKoAyITeeWKqWv5EOaf7h9tvf/j219/++od33++mQTCRB2Y1kTk+/s3f/wdCDd5N07DdXO6e9vvheXd4nvP8fHiY5v377bpvWlA7DMPN1euLzUXbtACgIof94fFhF9u06mWL4JxDIkAS0ZyzEAOhlFY0kWGcnnbPWTU0jfcO4Sw4QUI0R0xEbd9nSTHOKedhSobu+vXbL754e3l9FdoWwNq+ffPlmyypW/eH/WCamSA0jhj3o815GsdDmh/e/fguzlPf++3F6ub11fXF+rJrLhpaeehbCu37HtG++rf1ei8SZ1qLmQBOwS2A1RIFJdNK0SAATknvnvfP+/Ewz1lzXa/MoOYly/JfI1Yr9ixZRSSCikoqI0QNyTkX0LERWy2+R1okikw1xWSmDgmZlkLYbJox53mW50MC8PkszkMErqmC0sV/nFTOV/aPqQxc8lXnqSyoFMbyI9WmCwFgswxKi2AsITKTJw7MiOiIamU/mqiCGBswlnQhlp6qzFCzn4i4WJ2dNfICALg5JsBFoRMRDRf8Vw/1o1mwqqnai99kyXOcUxYkBFeMmC2L5JyPfmpqp4LtAj+KY0KhiQmgbUPXt2uzQISgZwr1ePzy2zdicq6CmGO1Wxlrx9eYmepyjmXvJVIBIzhq6pRTPI+DEY5L4LKf40EdBWIAlkbrmjskO+9degliTuDGEBWOVgkAxz9C1YIuElD4wm/oSCABUtEJwOX8bBlYOVc6N47j/nn38Pi8XnXrdd91TXA+hNC0Td9163W32ay2F6vVqmtb3wTfBCamhY9DOHbJ/aHB/vFaQlEQmdL8PBzYyKlZTvkwpechvRq7V5vmYuXWgQIjWIlFCoytt0TFppSeD/OHx/394/7pedc2LiZ5++ryct21jSOC0z35rcmaMzbr/AjhH3SK/0jb59JMR07QFmZR4MQmnkDMOXD5fSgkg0rg0jIJL8YzxJ6bpnEUnPMIliVpJZMRkAywGHkJsJADEnSOfEMIIWRJOaVsMKk69q4wiUwoqinnlBMQsJFqKRYpzymCWbJMSN6R865tOATnvTcE55GnrEMGywzCp5XitC39QGpa5uTiaWdMC1Fc0BIYGFb3TrSlm7VcyVruU8DzkdcpnVrHZn7AxWK4tgHZsmciM6kGxrTEhFCEwpDQoJZ/uLJLXVidlzeciNvQe26GIT4+7p6eD6vV2DZtaKr4qS0Ktlk1SRbJWbNKrlXQy3pRik+YiV3tDzcVMGCiJgTHzMjeOWSIOe72u8fHh8enx/1+lywjGTMyGCioTfcPP/7acd/3u/3z5fZ6ntI33//67uFujCMR3N7/2DdNG4LmvNsd/vwXv/o3f/nvri6vy4AUyTmnnJ2IlLVORFJM4xwDOVRwjDHlJFmypBglRgPwbduwQyIDW/T8tRSKFLq9VJsCEKBjT5vtxcXV5Wq1csGJ5PWq++LLN8G7i8ur4TCkHAmsCSwqh3HMogJhmOLDw/z89MRsq81+P+l+H5+7piNjmT2mJtifD82Xv5pWS03zTxEwPzVjmNk0pw/3z9/fPtw+Po5xshJ2F0pPVAwE6kgpIMZUJauIJJNsFZGCgQ/danPRrte+64g5i4FBCZcBwRRFGIwYERTEQEUlRsgzS9ZsU6KkdD7SSol0yQ1V6cYXYOXs+xKK17nldLJEpWtJy1SDpyfjtIuCbI69V0SoWAgpdMzF3zWbqBUu0UCk1qYhMoIBlUYlPMEnMwR+yQ65ISYiDiWpSQwAVicoMAQuAK1klM8rSBZqosArVdnvdymltusdU+EAc54lJybsmsDOF3NQUUs5J2bPnH2SFE2VUAEpikw5Z7BFWJittKYcea4T6wJwnDqW00FCx8Sea36hzMYv4QsAmBqxnTMxywSHsBBV52Dl+IFw5GZOblBnqc8zu8cjokFEAzpPJFVvoBc4F4urYznu46GK5pwjIZs55wyRlzqeOphKXVzBLIhIXIx0rTR4wvIiBAXVmLJKimneH3Z0y55LT5hjxyE0q1W73a6vri+urjZXF+ub6+3Nq23XNYinWpDlmAtFcCatc36VPr9sIgGTOTLCLAB7QZgRdqamMkZ53KXbh+n6onl90b69aFcNk1nOeZhTTHkJLSHG9Lwf7p72398+3T3vD4dh3Yf9EIcp/uWffdk0gUvnbYmaX1BnZ6s9nzW9F369JhnhSO0ut+DY2Ma/HyT447dP+aGFd8EahVSywQSkssy6aOS8DJSgrmCnuvKf3BDBeWraxiMjSqacNOZZRACwadc3V6+7pgPNOU3TvM+aiZiYHROTQ/boXd+4Vk1yZDLfeNYMxnEc5zmOU5yzslPvffDsg1MERS+gkAGT+RZI66xiigaGakhGbI7KGku+9UhEiVRhniJiblkaViB1qHTi0VRVRAVKn7cAlAkBARmoiJgolrL+JUtkAJYXTh9LwrfooiMcS19EDSqaAXhZJlYrY+pMhKXDu8zU1cKXqaS2ybMZl7d4dRURLSApiXrmkzIscd/2DsPj4/P727vDMOas0BARA7MCpJxjjOM8j/M0FQVMlZzKw6LFeo/ZOXbM5Bx5x0wVlJlaATHBe8+ubYJhP8d5SnPMyWS5FFVH2NAIzETiw+O7//U//s/ffvvrvtkkscfnp2Ecs6aSKivNxnlO+91+HIb/+//t/3Ecxuy5bX3fhr4Jnp0pxHGGbA6dKU4xeyYQQQQm8uxa5wBx3YSu7Zidgk05jWme5lnrtGwiSUQQ0bnQdysAC23rfXCOPXNg8Ns+4Nur7cU4phhTyokZutanFO8eHvbDkJJ1/XMW7e76YTiQo2x+N8JuP47P+w8/vpuGg2f5Pz1f/vv/8/jqi5fPTR1z9ceSK0UsA2yh/ADNQETHYXp3+/A3v/72199//7h/LqFy0ZErTofRQJZAtyxoKqZQOzmoLum82ly9AX/Tbdtu49uWclYDIkdEpZ6mEAclkYJZchyLjSIaggcmz01zbJ0hRIfkuFRvVrrgRedmTR/j6XyPqxUui1wN1BGrPFMVauC6HCpB7QIu/aZY8yOasficoEdmIjUHCoK2dP4ZgTmsUT3CkVRd4B6Scy9rYswFIEIfwDkoi5ZJTQGjGaCCvaAIPt2IVHWeppii9x40lOZwAnAEwRGYb0Ihq1nUYoypEckmKpqzmRKaD9x1rW8aYGdIgAxgS9NXIfR/Mrw+HQiTc44Luvw0cWAGBlJuui00xkevqczYiWU5/r4cBxx1ZeqdRahZwxOIwbP4GF+CmHNTwyNDY4b0iXb+kuLHQqHDqdrVjjRMjblVj6CsWl2bLvqahEiK2YQSIqUy2xojeW6YXWnp6tqmX3V3D09Xl5vri83uzdWc4tXVpm1CcXWhBbPDi4XzDNrh8T59dOXKACmie0RmpBHSICNNZip5nOZn1zw1zeMuPD41T9tmFRhNUkqHcY45FXcYBIwxPu7298/7D0/7p8M0z3HdhSJ81/ggYuu+CeGIBM/ZrnLdq7zSkW0/VhTVkbWA5DOe5lM0+0+yfYo2EE/Rx3GU4sL0mgGWLMkZqC1ZlN+/3McMinNQYEJEyZBmGcY5agIGhAavXHANKILOaAk0IxIBkzFhIG7YN+RaQ5Q8gySwLBIla04yjfOURICIOTRNG0LbBckSo6akEVRMyaOZcvE0UFQzRFEm55yp5pwgFvM+yqIxxRRHAspz45nItWjtRyeEaGUmLWUhJa/qmExJDURNSnZ9YWFrw/6J6ToJrZc/Q/EotRKsLbgGjiCm5t/rHavPrFkxR0Asi8qi0oSwZL3M2AyKfZOoUFbn3FGMDEtZWm0Fz2oixUR8jtM8Z5MY4ziOh3E8jMM4jlGyAcaUDsOgoi60zjlERiqZI+7b0BSzZQAVRYAsEnPumoAMLvGUY8wpVUMTA1MlVQYFQ1MVkZTnKe2eR+Z33jUKmIqjW1XnySqCBmmOh93uy7dfxRjLuVRT3uAa78hgHqdBDtM0OeZ4MY6rddc2wXlGcOyCd6aSRYmoNH+VpjjPllWFVEGySJynw+45icQ5mtpqvWLHfd85z6YiWYun1cW2u1i3KYuIJlEibIOb54k9t/tDzNqv+r7rn1/fPDw+Zsmh64gpzRIxPyf/dEAR+fOnFPPvfvxrqPRJISkCmNoc0+Nu/+2PP76/vS0pSzYiQFXNBglAXj6wC5OuiOio1Cy6y1nb9eXmWjm0Tb+mXM2AgdAK9V/7k9HELCUEhWky4gVWuYW3geXWVBADWP2S7DTVwNlgBDhfmF6G6MfTpeIzVsgxPH09Sk0BQrFfKtQ6qhKhmDEYITKRYxZEMGCAUsdVMhNWnMvq/F0Pgl+ula7dXFIpIeZjX6HUWM/ERAu3f4o/zud0rP8ZgKpoFrDyeiGipgkAIAJU0nfOh9AYwDRHEQMsdaRGhN7X+C4E55ugVcN7WfJr0FOKn05y2B8tLiXfxszFsGiBJqdJqoCYRf/uNPw+BTF4Li/zclCWcXJ2T0/s1LFc9yMQgwtBDQAlJYQnCFtBjBnQkqI823mVwRapVQXH9xbEWnjWLKIiJWo/q0FchMKLQ4UBIRVoa2qG5SopAovKFCXpfBh3jw9377v+/YeL2/uHN2+ub663600fvA9NaIJ3jhBKRvu8SgaXLwYnj4PTICNEJmAuBGbJdoqlQzaRNM/Dwfl2dP7pyb139GsHgQxURPIcZ1MlrFVGWfJ+nIYYxyRJ1ACmOf3w4UGypFkfnoav3l5dX61XbQjeUakgO+ZOEdAwZR1TTikX1WMARCIuw6ZUnxUbeqy+CGYKmI/w8eVWY4tP//CHbgjH5nw4IilaGEFbllDk2nlQgvwjpD8DMVZEnFVyaTY+DuzPHqeqjuN82A8WmBTSZONh2h2epzQr6TTM69BTSo40xd24f4w5O9d511vTUut9611oiIMZiGmOcR4PcXie94dxHMYxjlGmlA3JN03fNH3fguE0zjGlrDInNJScvXfMxCXDa5adJ8cEkOeohqPVOnNKMY37XdH2TUm61QX5/vTk4kJ+EJiRqZqV7AM55xBIDZJojKYix/IYPGrIVDxjSOCqywLUTugS+YhKllz6nY49ELWkhJBoST+VojQuOhlEUA2oFm1WNUVCU6gGvTXDrh+Zv87ToBI36+7yctN0ziAP404kKSoHn1Ke5ukwjuM4TnE2gNB1oDrFnLOsQgMAOaUsAqCNd0jgvG/blpkl5ZLdMZEGURGj5DmnpJJLTVHWnAVQUDGhqiTJWZKYANgMcCjUVnnetcI2NTMyTDGO0zDHSZc6ZUJsve9CYKQ5xvHhsD/sDk+PTHR1fXO5vdxuNm3Xc/BFmxfQNEbn2HjKSm0LzpGZOeQ+tJBlmJ4e7u/uHu5jjCG0SLTZrFbrbr3pncM4T5ozGjQeNxvyHlSTqmZhNUDTmJQdNm3o2F9fXf3sq69SnJ+fn/fFo04UlG5eTxc3Vw/3T0+7p6s3b5338NF2HqhV4HK2yiwcBiJwsVvvW/Y8juPj06PkhAYBmbACtVLYfsxUlRC0JBHqKC1LPIZxOKSciDiEgEwiuUTbhghIylS+V1TUWhtCRAhsClJ0To5LJtZed2TEopK0GEOXEV/AesEOZx7glYKp/Ux1xFaV4dpfXXsAazu61VzukrwFU0M0y2QoQpAckQEgonfeWfWNP3eFIEICI4PSVAyIwMD0Qo/F/fjhrhxITeYilISoc9Q4apzzLybAmoo/zuu2uCMxUTEtKvE6AnjnwCBnY1YkZu+b0BhhYBZDYo/MgICExABQ/GZ1P0xoiiIAJQkPjomoqCf+DhtDJipiK2cgph50uT0gxzIxOP7zEu6YQul0X4blGctWXkd4GsUvQAxUEGNnWHUh9s/W+iOIWZibCmJenlzphyx7Vl1cQheYtAy00sB57CRdXFEIDIpFWnlfqUXT0shuZoqglsWQzcRIs0AGMxuId7vDfhh2h/39w8PNzcXFdt11Xde1XduE4ByT975tG+f9YhazoAQ8Nj5XAup0LuWDCZBqEbRZtKwmiqKEStgCSMyYNaMpaBHyEERwJWsGmE1iFgXzjh1zkcaY53T78GyKh2l8OuxeXW83q65rGkfkPTeNq0ImYCnpOKfdOE9zzKnUUdaVzzN7x86RY3REznHwrmt9CMxE/DKC+afZamYLqnyaoRJYSYsZAjA7z+y9L9zykjA9jV7CkoLUjDnakvz4rVsW2T/vH24fpkCskGeYxvkw7mOaFTPmfOtaOewaD2DTHJ9FRShFnAYKrjk0h8E3HXIAQLSk85CGpzTup3Ecxmm/nw8xT0kM0Hk/NM2q75g4Z8ki2YQTgqU0l6QHATgAA0jsnIo5z1qU2Z1HIiROMY5jtJznOa+mdKFEYd2onve+13nZwEBF1FSwzGwIAMaovMyypRe6SgtXEGNqWuBj6Ty3Re69pPALISNquiTnsYyfkjHC8sQRmBExIJa+wgV34rI61PXgODmpASDymVo3E/Wr/vJqe31zud72ADJNe0k6ujGqhq4l4pjSME3jPGfJxFx4pphzztKYmVkUiTlrTjnnqtzfhIYZHIOZ5FzSpQKQVBTAN91me/Hq5s1+f3h6fpzjCBkUVMVUUIWlzM4mCqWtEKCIcNZpBgFAYo5Z5IypNjOVnFNW9imm/W7/9HD7ePseVKb97rC9eN5s2m7FTUPO1UdN1TvXNH3Xtm3bel+0rQgA5ml8eLi/v7v98P59Vrm4uF5tNqt11/eN5Ph0Pzw9PqQ5eR+26+BdwwhoM5l69AZshp5S14pjYu+bpm1rMfLqebe7f9pNURhDSnpztX54ePrxw+31zdVnQMxyarCs0zV4q66FJcFkpQEHG9eu+6ZrxHScphxnNLTQOnKgNeNQJ3bCmqyHyouUi5sli85tN5RIlYkBIM0xxklTMlVDJtdw22DwpSDUKFuximQmQsmmIHq+IJrM0zCNAzsmR4W306XNE5dZHBCsGDXCEmNhlfg7i+xrzI5geNTI+OQ/OK7CAAAgpqg4W85Un8XjSumK/iQtLp0EhcKvgjoGaPARDnD/4//rfwIANCMgImJH3lPXtxeb9c3VxdubK9e1pwOzs1LZKqUAWcUQmrZ13vsQiIt6GTCR965r0YsBUjE2MkBqwIDRBWQSsywyp7lIjM/znOOcU7SUVRXR2HHbhtWqv9hu+645at4sl/P8YiIz1ZIYqKW7R52qZdipltLIekdOy0C9xGgEYPJpsumc0SkT0QmVEOExXX6ENZV2q5Ixx7W+6kPAwtwcQcxChNVzOXoFLF2jp9bIkhCxKnl3irYNKs+EyAhUfF/REBEdLmLwy8Czom+GhXYqR2JZzTI87eQw7j7cfmi7sO779Wrd9V3bNk0IbXCr9er66mq16YN37LhUbhigc9S0zjGCAhH54I8BaG0aQTsTpjYEY80d6bblfhV8aJjYTrWZUBgHx4wAapZEpjjHnFQ0i0jSlCWlJCK3T/eP4+O3H35Y9926X3VNG5zvuma7XXVNIKQssh+m3WF83o/jNKUkKgYAhYnxzoXA3rF3FBwH7y636y++uHl9fXGx7YJ7UUV2jFOWG/EHbC9zPfXiGCgZQpGBEMmSpSo0qoIRomfXth1Ry+wKrFVVQFj4mDJLLJxCrRgUKPYNVpdMW7bjwUhKT/f3H77/ofHoAC2ziolmtWSQ5qf8bk77NrQNtg21HaLDlOd5tv0YkwC4gM4Te8fUempQnUXUlMXGYX643++mKFAmFTc1Po6dcyxiCoYE7BAyj4xqSojONcxkoMS83w/sHCK3XX9x2XvfiIKZ5kzjEOPDrt0NGbzrLtYnUqTIVUDBDGaGJGk2VU2WAUCW3h0mYueZat9luQ9mYKYlNYl1K40fRw0iNEJjQjRTOr6Ka/ksLk+uLW9HK5VqhV+t+uPIBf/QEYDWYOvcjrBpm5/9/OdTjlc3N33fzePhMWUVIw5Dyv1mu73YGlGSHCUTESCllOaYiqh/EjVSQTC0ZKpZnodSbQR91wXfGKEQIkK20hLCxOHi6uZnv0gx5q5b/d3f/u39/V0udKczZVVW9SpZVEVA1AQLUM5ZBWWp7dQMYA4XE0AAEJHdbn//tPMdm2jKKc/TuHucp3Eah6fH+6ZpfWjYO2IuLK8jckxIjl1wwTsfgg/sHBGaao7TOA3DdGAX2LmmbfquAZDbdx8e7m5/+O77lNPF5dWb1xuyVdqg08GROeeJPRE1hFcrE3PGmV1sHIKBYyMkVZpnYwpoTrfdZR885ZvLPriXC+bZs1Tq6BAAUREQDQUpoiUARUDCAGSNb7ebbrth79UwCzgi8k1oOlZ0QB7QITvHwFj1FIvhB0M2nVMcpnk+TCI5ON82DROlOT3e3e2fn2SeNKsCNt364uZmdbktosaZCImUWQqRZWKk5yYdOcXnh/cPD48heBc8O0/OgwvEjiotXclzqGYklSYqYOW4Oi5JKCy8Cy62MSdyvibrS+84LO8DM8hmgoKqCMhE3pkjYlukjXApz1BENCQl0GIBilbKbk6zsvv73/wGDEy0lGWwY+9ovVndXF2i6uV6vW5bNQVVXG4ZvGBiDAyQsO06UwshEJcmOjRAJPaeyMGyLhc/BVaDrDlnjSlP83yYDtM4TvM8TVOc5hRnTVlFzNR5Xq26m5urpmnbJgD/tpiYiYp3Fy7dWHrSrywqmZULtPICPQMx1ftEEdCoXnxcwE259Ap2XDWWSQvA7CWIqVMhFUbiVL9SEG7lUuAFiAH5tCbm+KWcwFIAsUzWUMploJAgJRGyLFG0qI2WoVQzlOWyoC3iEkWjqE649ahKq1sC0YxgxNT40Ld90zbB+xB8E/xmvbq+ud5s120bQvCOHCIZGDOG1jkiMFz17Zs3100byjlk1awqVsy9iImcd559h33frbfbTb/qvfeIVBssFBCRmGq9soGoOhHvvYiUNJoIpCQxp3Gah/kwxTTH9LSfvDs49szcNc3FxaprGkJMkneH6TBMwzjPMUoudxGL9lcZ84WJKazM5cX6eUq7MX4RL262/br17neQgH/Adiq7rmgSRHJOcZzGlFJpvVRTkSySHXPfrRCpbXtVldozU9K1HQKkGEWFsKbdVCWmGGM0M0IqV/WzWCvnvHt4vH//vvXERCAOgdgRkgKk2TQfhtnxqnO6aTy2LjiJkCZJQ56zCU1ZIaWMYL2ndUPblgOjAGlWBHDsPPusOozjeNiPfsdMAASE5JAZ9w69Yx+88957ULVxHkUUiUII/WpzzQ1zE0I/xSw6x6hz1CzgBVPWJPIRiESoZtrluRUqnMCiTFDMCHC5UrQYjtakSOUTBMAM+KwqzqxU2ywUOgHAomHMhY6sX+A4AyMeK2aWUrZa17fY/pXs0rGp6nQibdv+/Bd/Zszdqu98aIFMNE9ZSTN78CF0raiO0zTF6L1HMZmmcZpjjGowT1MSybWUJaGpShJJWWROadWviCnnrERTjGbAhAqmwKvVxc9//ktGh0qt7x+eHqY5EjEUlRnVnLJoyppVBVQ0i7KoggEZgpjmNCdyIfSLQxCkOd39+P7dN9+ur5N3Xue59N7kFMfDPsbofGB2zFTKuZiwCZ6ZFBCAgZCdD6FhdoBFaNFMJaa5bSHnOI1jStM8TY/3Dw/398+Pj8TYdP3+4H74cXq8TRZ3DqVpmtB4HxwyAzNwg6FxHJSZANWMNDYukShCJFDnyQunC3e9ds59vOKUFMnCzhpgrXExABE7SD6YZVRA8ETjnKIZed90fdevEZiQXNtTaMmWkhZ2LrgSkgiIqQIBOVKwkDPxmKJ6H/q+b5vGROaU9s9P+6dHSmJZchYbY8+spfTQscSIOTMiULHxNLMXMloqedrfHx7e5xBcCOwcuQC+oWK4ycTM6ByyA/bAfFzUkBAQjbgM7nLraSneq3ryeCoswLqyLOsmlGKMAkXgaFpoZqRGJYcBCxdU/oiIBFTrE4wM1MzRC3V/l1MunTCoVolqsJwSm130fU7ZtNReCJgxEjMBUkmhHcV6kF3TrcCMFx1pAJSCv5ioVG8DVDNMhJjS82HcD8NuGA7DMAyHaZpSSiklKfNT7c9OPvB63QPC9dWVrldWxHkWaGFn6/yxPqjMIyXJRUfK3dBMEQiZAEmRzLBU49dGBVSD4oECNQ5b2JTlbhSKoLy6DgqrVVAFIJXXH7VhFmHHlyAGl33SkmusWaozm0Y7k+06/2XZXkR7sKjBLzs5vgWJQBWtiiTicULFyo7UdRzR1LAYnSJZKe0DAwBMmpOO41x7NZkdUdM06x/edX3XtW3Ttk0IRGW5ECQt2civvnqzXvcVxBgkgZhNBESRGQO7PjTbfnu5vln1W9c17B0VtQtUUK0SviKi1UVZpGhc1KZQREZ0aqiW5jkOh2GKUSTHnOc4D/OURYkO97u9d4xF6zPm0r1hANVslRgNFUCSpqyLuiAg4f1ufPew++bd7c+/vP7V16//8hdvNyc+cinr/ENqYl4kN0vWuWQumEEtxrTf7+7u7+c4O+eQIEue5ziNo/f+5voVsQvdShBiTCnHnHPw/pIdIu6G/TzPaOiDX3VdzjKO4ziOoupD2Drnic49b4/HoVkOz8/P9w+5cUwsCYm4aYPzxJgJNCuS59Z1kMVELIElcMiX2wvjIETDON3d3U/DIJCp5Qu3di6IaOPdq1fXRg358Lw//Po33zw9PqokAmD25Ih8cY3U9Wr19su3oW0ZeXc4vHv3Ybc7KOB6s/nqa3dxRa7pQ7Oa0yFnGKYsxheXl9vttuvXzP48zVeVDQxKKEiAjlkJS/kYkQEmiclU1QQNLIOWJpBiz1fKGErFF4Oilqw0HJu3q1hqfe7VtHT+EZGUBG05jJJEKB3WBIQKZzQYLmOhlrKpZdGUZO7SUSembbtf/OJftasLYmYDF3M8jI/xeTYFpKyy2x9Szrv9EHMKQdV0HIZ5nmLKgCimwJxKsT8igkXN4zSO0zyM82YTmxDA1BNrksnPCCCmSTKYXWyvw5+F1nfb1eV/+dv/8vD05Jxjz8TOTOM8J0miyUSrwJ4AkwvN/4+yP2uSJEnSBDG+RERVzcyPiMirsrqne2YaGBqAQKAl7MP+/2cQaBd7zE53T01V5RWXu9ulqnIwMx5EzSOye2mxa5SVVRTh5e5mqiryycffMThTqWVZL9fz037/wLKBmLwuv/7pTz/9L//p2z+cDvcPLAJAMU3d84U3Lnz7bNW6rPs2y6stN0SqISOQWgWATtkgACNez8fr+Xo8X0/n5XJZiODN/eHhcf/w8EhoP//88fzydD4+m7bdNExTmEZOkUkoDsN4tx9STGh95LJFrCnUfEFHZBKtD6k8jP4viRiALmUh4s02v03vEQ1qrc/X/FxLJlVXr7VkvS6Wm+8O92+/+Xa+LqomIQFxc2zdKCSiKTKDWTMHd2LClCRxmJCGlN35/nB/f7gbU6x5mdecr1dobQyRiJcy47K0p+diNphCjFqKtoqu1MfRTq+lX7eHX225tMtnDLFxN12Ic0CWbslnJpIIIZJECr2vhkXYmVEChggsuBHDiAQbwU6I7n20uo0u0NFtGxJ8Nc3Yxmj9E93Ur+j+ZUzVdR2vUukttQ43qurrqQUAyPfffmeqtVQw3/RlbimGw243xIGQTM1qhVYBgJFFAIkdwQk7dur5Yl3YsjVfbjpP7MqPjtUANq2cATTVeZ1P1/N5ntdlzTnXWltrYCAkxgRKfeNxN5v8llf7/4e9p69BTP/M8JVreeXTvalnq03NW/+m27Hr608aX2HHNiZ076oOvElBbmwBvcbb+aZ4AnidFP6rAdPXIAY2ENMrcP8FE7PleqkSfZHx9j/vPDZQb44E6pHNt3fYl9i+WsJXp8kOiW6ozfsdgr13rWfMGyJqP5L2gSgYgDU1hVcYBMAiz8djjDGlYRjSkBIhqam2WrW4OQGXWv7j/+3ffbXB8E0ogISQAO8V9xXCako1O4JUNDXw2r2tCl611WrWRcmgzQyAuzlmQ4odiKi2VpamzdTBFExBzZqpaWutEbj3/i7tqGMbsXPPZ3U085tnyTbwhrDkep6X8zzP85XB/ubbxxuI2bpgbofs/3MgpuNOVQUEDEiCElkb6KLzujy/vKx53e0nCUFNl7yeTscY4n5/V1VzzdXasixrXmspwzAO04jEl/k6X6+mPo1jSgmZkLFZu1xmCSHFRMRfMlS+ikdqrZ1eXp4+fsxjZGarhERxiCLEZAgOBlMMbg2hOlVimhc1CGFwDOYi13k9nS/z5SxWfJJDBK1xLtpAMO0pAjvUkvO6LNdra5kAJEYJgq0n/FNKupUcmdbWzy/ugOoEwEACKM1wzW0tzYBCGu8f3+73e2DSr6dj7qraagNwpq1t7iaHxw5MN6VLt/y9znfhFjsGN/MCuevGwVJf0278Jm5yhS9EdsfXgF8Ea/1Bx+0U02+oGxOk/UyygfItGUW1VS1lfJUxMclh3JepOjiUArXqWmHJfXNQ9+uyllL6/AhJzSznWkp18+Z2nZe11rUUAEzDEFOIQl002XGz1kaIDUmpEKBal/NqDPHhsJ+G6du332pu5/NZJDLLVkDd2krctPXJgTdHB3JOaTgc7oFpXpfj+TODD8PUPzcA0NbOzy/P798nJqg57e8M3JEQubWKqogdv5iaaVNmJuYeY6jaWqmAKE0RsLYCACmKiCCJoTuXUuuHT9fjaV1WPRzGt9+9G+7uwzi1sqyFn4/2y/u2rHUcaRpsP3gKhmQxxcPDmmIkLUyQIsUkIQgQuwKTjCmCW2lNX3tEv36WEcxBXb1BL/ASg9YsZz3meiz1pFpIS835cqlFARJgmHZ3h/tHg7DmasSKaL7Zs4WpMQpteg8ED0wiAUOKEgnitJSUEpjlZV5rLbUyAcUYiMnBibTU/Pxk86WeXjCFBojMISVmAejBt6/TnNf9oFotCu7K/Qbu7ZxGbJtIXlwiSSCJLCJMKsIiKBHigBJIhDggC5EgsSP1miOkm34C/TVDbYMGG/WyHavh9sBsaObGcm2bveHNlnzbTbfyPKTfTfhB/l//zX/TU4bQIYbAROYmTCnIfhrGOGhtdc3aCroTElNAYeitKMw3qSiCU78A9CqN6wP8nkeLYNvzDObezKupYi+nlf1+5+baGjoKBW++LuV6Pj8fPwehb96+e/v4JoYEgAa3ocxNDvD62jYolj5fpu0W2RQwnaBoqvO8XJblOC+5FKu6RXgjdMUzkZh6rUV7nNAN/nRmLoYgEoIE3rTGPISQIg8SRAjdyPV2ob6Ov9vUQ/8CxNx4kU5R49ct1lvuhWprlYjsVvfY/5KI3bs+qbkBmW0RXebayXEzgE054QTu3lS3bt++Hm/VCND3Y1X78t3dEBGZ8YuYwuAV65k3bbXWNa8yzyHGFCMitVZzyeu6tKqIeLib8u9slmGIITALYWSczN6cV3ouT/pyDnGZBktChCRMIYC5rqWttZWCCDIMyKxq2sAUarV1tZpbrRXBYkQiR29EIEEkSRh5imLI6q61d8gZbiM/7J8Hk0sPRXJXVwX1G0NMr2MDh3nOf12u+yHk//jvXjctVW2tufvXzNn/zusVvsA2RDDVRkxRYhxDCKnkbAil1cs855LjNASmELhoa6ZkKkFIZC1F1S7Xyzxfc8673W5/dxAJ13k+Xy61lKbtzbt30zBO+zqv6+X60cyGOHSkbWatddXEdg+0Vl8+P3349bfdLoYQXMVfgTY7IqLibkjXZX9Zhv01OPjpUmojijuKA4SYS315eWl5SaQlh4AtEh7Pc25uYcAwUkilttPx2FohBGGSQBIImSWGNI7TbtfM53nt5/C7h4f93SNJnKb9/u6eOZSqpS1PL6d5XiQNh/3u7uEhxnS+XtdcXjd+c1/zOi+z1Nfy1w2sCPNWfYUgwrbtmbBNMcAVEZS0O/n7aruNzDesT0RuDNSpZ/9yUHF309bjD5hvE1tCui1OBtbd0r7JnTqf6O5NrbZWaqutmVqp5XUxM1W9rO3l4tZsmfXzy3I6lctiuwnfPppT7pMj6zk4hI7MIQRwt3y9Pn16+vz0dDqdAODw8PD49vHdt+/SKE6k7rU2cgoIbtBUtda1rM3UmFKKlpchRDQ/7KY/fPf9frcDgFLbdb7ONjdH4pCGkZmsGjoKyX5/ePf2G2Q+nl4IbD4fmb4WWwMCaC2Xl2dwGNVRuORaayu5gIOYI3YzYqtNWQQQJIQ+Z1PVvkEikqkjOhioUtO4elxB1sYv+XDRqZgH3NX4XZXdrMCA+4d3SyY6hpxz1bgWX9pMvrSaierumQhlmTO67kcfIkqENKRpdxh3h+kQHXQ5erxvRX+PYvoJ2WzJ3SCgqupqy1KPp/mqWMeDxoBI1sp8WrXZeNilYRzGfUx7oFLBmmMDbK+zS/PcNBAQOPdoRKAGVIEisLOHNDjg8/MzEIaUwjgc9jsAbPNitabItaznl5dlvdZWIYjs9ru7x7s334y7PYtYT6T5CpA5ElDAkChEJr6ZkZ1cqeu/FN3UW1ViJW5ElYCJiYRYQCJJZAnMkYKwRAuRWEgCy8badLUWIAOJIYB7T8LbRC1fbd23uQQAgKGruxuQA24iNepG8N4620kf/v3KK99/+72ZtlIJIMUoRLDVupIQEFpra21acq65mDlzkBDjmEhE3ZdSLtdrrg1AusGJaTODbgOIblkiYubQFTOIxBhjmHwcBiekQIyOqpWAAidvviz5Mg4hoAh99823bx4eY4xgYOYIyF/ewlfvhUBYRDYQ431f2uL7QM1Kadd5eXp+Oc3zXMpacl36iaawyO5wiGkQ0pLb+XKa56WU0lQdgYhikMAizELCwiHGYRxSSmMMU0r7/bgbYwrMEm49Evh/HMT4Joz9HXO5ETH0GngDtxPk6+54C/zErt6FTXa9aRHBHahHMXaNKGwgZlM+whddsH2VgNEVWtTz+zcVkd9+nN9SWwgrIpLkXFJi4qqt5LwsS6nV3S/zpfNG/ceNKe3GcUghEFGtdb48PZ3L3N6DnIZB7+58TAgYUxp2E7jXy9xycVUWFlcOwQBMUSuURa/nej1dT8/HWuYgLQ007dIwxJCGYTfseJQwDEMCxFq0ldpq0V5j0T/8Tpv1+S3coAWY+4293OQMUFs75+XlNDf9Um3zfxrEbJtiH0xsJBcJhRgkCDGaW88uy7Xkkq/z1cFDDKraeU1zX9b1eDqvOc/LvK5rXvN1XmKMLPL0+fP5dMo5X/bXYZzu7+5qK/OynC9nbXqY9sw8DSMgtNY62fn7NRlYOKaIEAHJ3BwdGJCYXcIQZZgwBmMyMyMxZmRxFmSRUQ6IoIch+F2iaRfJdTDHahAGCIkkpTGNYzJVIueeTRGERIZh3B0OwzRJjIhoW7IREcUQksSUUtrt9xKiuYeURoQY5LDfjbs9EXEpX3/+ZraWcpmXLw32CH26LSLEepPwb7cxf3XQAnDdIPpGyG7HJLgFw5tr89bDcXujtKmrqmqrpdRiZsTEzIGl9xeGEFgkhpBSRKZOBmrvpAI386paa6u1VlU32HLytjvM/HLF44nA7Xxafvn1cjwuVWu7b9cHQ1yX3GptZtSLYc2LeTN39+auW3GxamtwucQxPSJSjExMLEDk/WAJfaFSJCYEEQkSulE9BQnMIrLm7Obzur68PL8cjyeS2prEiEQKCg6MTADaKqhaUwIMEkOIXysWHLGazesKfLEQKETrkTS+aSxwC+jusiVtrQFC95eZGSE0ZmKC7XjMCmHVVDWiSWk8VyhmzS23cJo5nCCSRUTxqXErrIUrclI084QaamVwW0zU+HyNBH63syEoYhnH+qi6R8iB3P188cfZmsK/eKn7nNcPT5+P5/O8LqVWNV9yO57WysPwRsNuhwRlzuslu4MMpgYGZIDNoZpXhIbQsJc/o5l5c0MQcGAk7lY3MAN1ACKJwd1fnp5aK7vD/p7eHB4OQLzMl6ZlAgfXlufr8el8PhnR8PjGDYZhFyT21lL415QSIsCrX7Snxiu6w8aNdqag+9DIEQxBNykYIwuxUM9MlUASOEQMgUJ/AsSkJ0ILcAASvz2qfV6BiHyz1uCrCuZWSLlFegHATZaKsDUubeT3v07sNdW+cPX9lYgCShAOIgBW29rUm8FS2svzS86FWcbd/hEfKej5uvz24eM//9e/PB/PyAFZAjMLB5Eo0tt/YoghBBE67PfffPvu7v5uSCklecDDQXeI2KNDwKG12tG9u+dcD1Pc7yMLf/Pu7bQbe75Ca+YdKBE6OXz1tCBir9t9BTaOiCRdlVfn5fl4+vDp86/vP6ylyDg20/PxeDqezqfTME1/+3cxjDtzyqU9PZ8+fX56OR5zLUg4DOnu7j5ysNJMm5oP4/Dw7u1+PwnRNMSH+/27h8O7x/uUhq4A6mKZLUV3QzRfwlX+t0AMyVdMjG1nNzUhAvw6X7ivvA63BHL/wsABfOGVbjvobW2GDYbc1g346hvcJk1dhNUt0LbZb14FN7oVyXSh1lbna2bMjEKI/XQd+2b59aSFiaZhOOx2MTJr9Zfl42+//qc//ZfzupaHt/j2m11SwcmapTqwACPmfPbaQoyUBINjhCCRgF0hJWXGtp7z8nL8/N71OO3l2x9/kPRozS27L4AxjHdDTElrKzmvM5ZStokBIiI5urkTuAN2LYw6uBu8NqUiMGFPGbTXAhzoPMoXEPMvl7d/vUoAbA+ko7upOTMPYxrGIabIzKat5PVyPc/L7GBV29Pnp/PpPE0TIjKKcMi55vzp0+fPl8t1CxdrLaa0XK9EeDqdr9dLXvMwjpfz5f7+LsW4ruv5eAKHl5cjswixiNyYmO3NiMjD48P3P/7w5s3dfr8XGYnE0Q0dhJiCUJpi3O/DmFBCM/dS0TGGYcdppBiRQ6fPBrGRbcQGLed1dQAZJgoTSnAgc+8jC7WmZkAch3Hc7Q+HuzSOKAKIW1GjA6EQBUA0Bw5hGEZAGna7ziEJE8eIhLvDYZh2xNLfixosaz3PZXMn9VR1RCLn5kjk9mW2DO5BeEsIku7mvFXXgNNXxwwzq6Uu65qXsq5lLSWv67IueV1zXkstZV2XZW5aETFISMMwTdNut9vv94fD4f7+/s3jo3DaRHS6maSaaak9jb9Pb/GrRxaoqVwu6XgKga+n8/r+/en56UycverH0crq6qZaWiOi1AYAXHOuWrpcbv/2TdhNu8fHaxcaIrgwxZRijNLjWEJgDsgCRADmRgSBQxAOQYQpIDHCd99sqsPrMj8/PX389OnD+w8vp2MupXSHdtOqZZmvnz996pbC0gohxzi+jpMAUYkrcjEkVVhX0QbuCCAxhCjDEBGp1tJq49oQkIWQfDtwuJu1WlBEuDvhQlQYch3mNmgNaqAKaA2tllnf/4KXY0pBYoDIPs/h2qYqEIcdCgLekc0xrWa1GZXGLTEz6wEbZV2P0HSsMVSk4mp2XvSaf6e57mtAbfZ0PP/jn/780y8/fz4+L2t2ouaclXi4u1vatLsLhFhbmwsxz3M+L8t1Xea85pprLYpoSLDJR4kRSA26jJNBQujRRmimZgQuIlrq09OnZb0C2O5ucnAHvZa15BkcwWq3Qrs1NbBaWq3aqrXqgM02+foXJsZvZkhTRGTaRkDq0FPH+ug9CHfpCuJ2QFboHlN0YidWZkLpIbMQCCUwcZ89MQtLIBlRIkkkYUIWIXyNHSAk74aEjl22URQiEPYaaSRwhk6hb7/hpqfn33Ex8v63T+7uTQk9BAoiiVIUCSEgg4M6VjNrBsWgOgKyk4AEdb8u+f2H5//8j3/+9cMnDJFkAzFCGIWDSEpxGsf9tNvvR/vGHh7ue4yviAQWcAeknlhsapXQ1ftAmSlEmdLAiDiOCQHzmk0VGUVkTLF3PvNXxgtCEhEJsk2bEADJkapaLuXleP71/cffPnx8ejkC0cNujw7zks+X67ysQFKq5ly9lmVeVb3U9nw85Vr2d4dEYkBrqZen43K9VtX93YHGUcaUhAv4eV34ghIFmXZjihJo+5W+QMr/HRCDCEYkIl/8795PelWVXvW/29sk6sur3ZgYpFsf6O3rNvVLB0hE9rUd7evH8YZgXiU1X/97A8i3z/eGg2+/uUE/qvbfpN9YIYRu8vp9SAz25JUo4uq50nGxv17ml/lCQWKg1RtL0NxiGOr1LYuslwsR3T08cArdGEIMXVsFjoPSOEkaBNHm64sDqz0iKwYAwW34i8z9iSJEdGKE0prZq2KrWxlfDYLo0DnPjcoyMAdVq+q/88D4xmf8nwExX2ockYiE45jCEAGxaSu5XOfr6Xy6XC+11VrLOi8ImJd1GIYYhyCx1lpLuZzOp9Op9jYyNRGeLxci6rHqagruLy8vbnrYHxBxN02EGIVI1Upu2lqtN/c2AEBK6Ycf/1DmlzePh/3hEMJEHJzcqechhMDDIDIkFFakbG5qApRi91bESBKZQxBO4hEq6wpttVoAgUKiMHBI2BGJaqu51Fxbc+I07MbdYb+/j8MAwo64fczmCITIap5LNYCONHYpuntrFdyQERBDiiHGV3Gfmeei81JZ+Bbz1ZdmRzJABHNAZEbuPJxZaerQyBwQVd3MCbtgBdytVWtacy7LvJwul/nc/QdL9yEsy7zmtW4gZqmtgBtLGIZh2k273eFwONzf3b15+3b+7tu7u7vuEdPmTa1qq9pq1dZ1XO4O+DW41JIvv/72/Oc/pyFdj8en9799Ph0vKeUA7Xzn5OjYWlvW7OAxDoCYc6laHVxYxmGAGGW3C4QZwJmre3UPiIpUOhB3igiBMLIE4iiciDc9MoH2rAoRYUYEHhKJUEyS0vhyuJzP12UpOZeccy4lr6VkU2PmwMzTfj/tXydKyBTGIe4mEaEYeqU29RS4ENIQh2kg5C2Ar8suiVmImE295GLqzByCDLtBQiQKbY3lIvPqzZq7smXQ4lpyxdba9RRijDESM9aquSoh9xzLZmyeiMi8FdWibJSQpSIjRHUXLE6CLAjQI3H0X/EXudnxPP/68elPf/35n//8Xz9+/rTklThgGCHt4mRn4yFdI1BEij2rnq7XdSk5u1bUytYYbzG7QES9iADITcwFMKJGREFiVITa2cRmdi255jWXtdSi2twhl2UtswCzVejFNdQrM0y3w05Dh+ZkPVnvdRkzq2XN62xgLuK81V5YL+9QdzVCQDcw5O2Z+bJlAAB6A2dwNGBEtMZQEZBan173sBYW4oE5kgTi3uLFLL2lWbo2kYj6XYHEr/kw3E1cW/o2Emxd1EQE3kuzHPhL4Kr8f/+n/wTu2gyhEZgwDZwCBw4yTsPhYb87pDExcBj39yPSNO3G3bTbTcu6Vjte5vrp8/m3Dy8ugYS4C1/dmSAITeP4eLj75t3baRhSiPs07lNKEli4W55NO9/ubmq11NIMDAlERAJOHM0g55xzvV6v2lSipCG13ZRSICa6TVvgpokJIfSt9paQBvM6f/z09NMvv/3pLz+9nM5OvL87HO4eSingvwLQ4e5hmiZTP58u63l28924e3io7z9+AEl//Nu/fXx8RMDL8/Hz8v70clYziYmFdw/3j+8eAuNyPr0sS/tkpbYfvv0mpTH2cqueA/W1sNe3XxVuY6ZNJUz4dYu1u7faaq3CQn0C2Im0jlsdVNumRgR/rYVABLyJovp/ts/nNjXpKOR3jyUCEfWO3g0e+WYA7p9h10Li1rv2Ou0m9x65SJti27vmlwIGd2CWr5CyA6i7uol5XOluTd+1h7PKkznU4/Hy9ElLLWsNEj/fv4lxaKrT4cAiMSWMkdS8VN0S0Z3Exvvhmx++d611/QTQWCgOIe0nSbsuu2jmrWlgDEkk7GIUuM65FFOHXvQNW/TqNldyAgADg97iAKYKrae2fG3tcTfTWuv/sXGSA4B5V6A6i4xDGnfjsJtIONe1lLwuy+l8Oh5P5/M5L0tZ13VetFlZy+FwGN9NIQRr6urjMOQ1z9f5Os+uikRlWUOMMYRxGGIKwzDsd4f9fn93uBuGgb9nQQwAbOBlLe7VQe3LZjmMw7/9h393N8nukMZhFNkhi7MDARIyccAUCIWUsDgsqlqVHYOEhCFiEEdR2FKY2RmJkYR4y6l38Mgo3F3yPVtRncyRFLSa5la1MrgBod9yz9EN3VrPFHGrjZlFQgQEtQrg7oxA/6IKxd1qtVJMnFj6+QUUHHq/HiBSN+xj13QDQKlWW/Hbmg4IQsQETKS1Lssyz/PpcrmczsfT6Xw8nU6n6+XS6Y1c1taam7XWyhbZXwFRQt9Bh2EYxmF4fHz8/ofvvvvuu7fvvjns71JMjlRKKa3V2sUU7u7qXmp7vS7ruvz1n/7pL//9/5CmKefy8fOHYyvzbtI1UV4xRzVbl/V4POVaWdgcS62q5ggiYZymEEIPtIGUIKW16mVZDTCIIRAS9L0jEI8SDzKOLAWQu+af3BmZMQjylqpIOITp3aPc7+/m6/V0vhzP5+NxvlyWea6laNNO2QKgItw/PIhsDJmw3N0f3rx7DERBQhgGEkIHEU5DGsY0jBOLmLmpqlYHYOQY47DbueOyrK1UAEgp7u/3xLLOdX1q9VOZr6uqEpTE1a22rLU6LMvKHKMwQ2vqACwUY0h19YbL2kyVGVytFlUjZHUI65UbOXlAkTTJbpIUiQHGSFHo60dcAS65/fLp6c8///bnnz/89OvH5+NLKSVIijsYeK+1XT49kR/FaQrpbjeNk3HTXDK2msAHcAJlRwIjR+zumE3k4USO4NRqABPw4CBmIcQhJhcOhMZkAGuttWZA1zy3PBdiMUNGCZFDxOZOrODNtGoltwasupki+xsx02W5XM7HoRUNUTYRy5ZUg+5mYG5Nq7N74G021EP2b/JXp1teazdRu3mt7q5kjchRAIn8tCla+v7Vn0MSlEiyqU6EiZiRmUTg9idCyMLSC0yJOzfSnWtmYDEG+ZJ9K//0X/6C0LWb1bUgQGAZ47A77O8f7lYrWafdEFIMIjGN4/5wF8eBA3tpBuhIyIE4OHLnA4gwMKcUpjHd7/dv7u++e/vuu7fv3t4/THEQIK9aSzPV2qzU2pqaW8llWeZciloDwhSDiBCxqq9rXZb1ep3VLA5x2k1VtZdlMKGNX7gCEQlx8zEiYlMrtZ7P599+e//TT7+8f//xvCwSx+Yc0lNZ1+enl2Ve7u7uEehyvmpr8/EyxPTdH/5wt9+nGJLw9z98/3D/8PL0XEqZr9fr5QIA6zxeLte15jClcUxqrczLpVS6LuNliSGG3RhEwL5UaN1AzFeamFtODAA4o4jA10+MA9zyXW4SY+whrl0f41/ii26O6B40ehsvb+Z3A0fcKggcHKB3p91+9OvXb6k222Lab+b+t30IRTc73bZtIG/JjW6m3gNz3NzUv4rg6yhBGNF9vpRl9uPJT2X08YdIE+rS8nld8nqd83wF93w5sURH2s+PMUY3HeZdDAmZkQT678FIjIf7oeaH8/Gt+YIohDgMMU0jsDBLqQ0RIUoMGIIQQnNHopLLdurtgXrQy898yxTuY2IAM+ieRPXfOXrMbsqS31vi/yV4uYmktxEvAAAwc0wxxGjueV3P55fr9TLPy/Pnz8eXl+v5XHLOy5qX1dTAvcbUSrXWDImZ97u9q63LYqrMneZMwzDsdrthmqZxSCmlNIzDMA1jSimkRKZtmeu85HnO5jXEr4gYYOHpsNvdH9IgEkIPbwEGJCcCIQyEQhjwpjPHLevH2dEMLDi21oP0DRGVrYFV11JrXUtzzooe0QQCuDcvzWrVatAU0UhQUiBGN+DedmRkjg7Ua2K1qjmRQHf/A/To+9dp6ddYXFUv59Pp+DJOYwihc9XwdUgTohKBkQnZVy33XfpFfcrOpAhuVtZ8Pl8ul8vxdDq+vDw9P51eXs7H47LMpdbaamtV1RC2QuamVTuLIKHklWhmFhE5nU6X6/l4PH57PL198+7h/jGNowHalkuD7q690+ArJmbN+aeff/5P//iPaZiU6NrKGrm4Wa1wOnrNtem65vPlkmvtc7dmbg7eR7rzEkKnUYiIDF1OZwMw9WEYiMUZqrsjCOAhDWNIHCQiYbNcci0loxkYcJ8SsEjYOsmGmJg4SEppTOk6jufzKS/LlqyI6A7FfRzGL4kPTOM07u/2gViYKcQ+ihbhNA3DbhzHnUjwm1zJAcEpSBx2AzhgiLVUcE8pjoc9EitkOc8Ii+tqtQIUxwamvU3RnA270sTyWplxf0ghJnFrinWupRkzgLmWak4UFGqtKxNjDN6cWSSltBvTIFzzMg5fRmNF7WnOv30+/vzh6f3n4/NpvlzKfG1qhgADhnHay+6QG7gCO6MEJa7qlovXOhJbTBBTU4tEgihbSu32X33R7TclETELoZBRBD7E0RmDhMW1ua7LvByfCdwuR79eKiIYhmrkFjgoAYcAhE1brpmIDaQZ2e/WMZ3n6/lybK2mDcR02nqLhNlyNkCZUT1K6A8UtV6RvR2IOzwxAhQiRhQkAHVtBq6+doc1fWlt2WTyfZhD0kP18FU/SyzdXsO92ZJ78B4JbTpbYQGEprbbH+LwtyJbmLL8/NsHRmRkd21lVqsIcHd3+JspTKiXPOtLuaLvx/Ht27cxxBAjizSzquqIwzR+9/23yGI9ZRVgHNLj/f3bNw/fvHv39vH+YT/dH3aH3W4cUorS1lJyySUva5nX5bIsa86t1pzzPM+l5KYK6CIsIiQBHFq1WmspFRBkiIf7O3WvpiEIM9rd/rZZYgjBtpZaU7U11+Pp9P7Dh7/+5a+//PLb6XS9LDm3s//24c9/+i+tlNPzi3BgCqpW6ktZ1zrPD/f3f/jx+ynFaRjCEL99+zgM469//enp46fT6bguV5ZwvVx++eln2cU337+dDtPh4aEM43K+Lk0/PB/dPQl3HPYlhBduPV2/Hyf1jdOJvu5PQUQRSRJSCiJc2y3hxMC7Uc0MNqVMjxVF6x6lTeStrt1ShE6dPrdtouVdRdfxACIhuLem3a0Nm7Rq0wVswl51QGAi79Qf9vKX7tImM++DVHdVU21Wa6tfuS2YaEqRHJ8+nj5+qs9HbEpp+uY+vSFd23qc6TngJ8ZPeT2t68nMwLmVBayenj/GYYxp6B2CEkOIIcTAMYU03L0Z3v3hx7wuAKhVo+BuEKCoTjmXWqtZcI8QA3PY70MI8QyndWlqbmY35umVJN2etlt0oWsPePxqu+yamFqr+1ay+r8JXzYBLwEziUgQYZFhGMZhRMDL+Xw+n56ePh2PL9fLfD4dj8/P1+u1R1aXXHtmIwJczxdCfHh8nKZdCDKkCO714eFwOOz2+9007cZp3HVVc9+3GAG8bR6kUtbr+byez2VeFBn3e/uKiWmq5+v14/OTSNd6ByAGQWJn1Mg0yJhEkhhCae2aS15Wc6RxGCUmSgOQVEBijswDQyIDXfNyybnkZiihuiVvMQ6E0DSXlud1rYrINTWAnjYP4I4GTbVaq2QQSBygqTmiMCM7oLpj7zLC7Sqh+Reuv9X8/PG3T7/+dP/mcZomJrmN36mPz5spuFd0JiRmROpPABOLcAxMwgqmra3Lcrksl8vlcrnO8/V4PH76+PH48rTMVzOTEGJPYzNFt9YQXAnZ+rWOAxO/xvDndf388dN8nT99fnr39t0f/vDHt+++2R3uQkzMBNSDhBXQvnac5tr++vT5f/35lxBH3k14mCCOOI7NYXn/MbuVWqua9cSKbvtGdkRAVmzrsnbQxkwkNAzDMq8lFzYkojSJIWWvTRuqxsBpF9/cHR7C4KU+vzwfL2vJ86ylgANTDCFw2OpkAAQoIcZhHOOwmyYWvrDUsroqEam51vY1rkfEEGUYhyhCxAbYac5t9h+ipBhCAEdABgwGrAZuXqqptlJbU0VEUltzQWIDZ/bE68BzVnVQRwOwLmIjYERAUNeGLRMQYxACZmoKrXpZlaChN2vVHKGogZgDMrYB99NgniSku/09gpvT/nDPN93VvOb//Jdf/vL+08vL6XhZVYlpCFSEYJr2949vv/3u+/39G6OIIIzkZqWsXitrC8hDmvaT79emMqTAkbnv+uzUA4UQXl3+PaKVuwY8crwb9hAlpfVYl5eyrOfjBVVM4ekTz7M5qCM7k2IiohAgJRYpWn1dkBgwOoWviBhQ1etyPZ2PtdQUYm+TRmLsqIFxq+4BJg4CJBAQozmt1XL1tUBTB3NEIwBhisGnMez3MbG6ubel1QxujMFIEG6ZS+CmAGjeGlR8NcDQlotyk/i64ZZsss0BXokcU6utfPP9j2/efTOOuw3EPH3+LMQpJCIwzUgmUSgQBWrWjseni1lwK4f9GGSIERm5RnXP60roh/30N3/8/uHuYD0/mHk/Te/evnnz+Pj4+HDYjUkwMBJSzXm+nsual3md1+W6rJdlPs3XZV1bqbWWnIv2rGv0vvpLiExb4ZS7E5MQxFp7M0hzm0p9tVn21KDWfFnXdS1LKdd5PZ6v58t8vS7rmnuAXl5qLtlqbqWUtQxpnM9XdECmKCGM45ASmCLi/WEvMbrqcr3Ol/O6zpsMismanp5fPv36/sMv75koMrtZUYVm2q7oPjKB6d3hkFJiRrpNcW4gBl6hzE17TSJf3O9ENA3D3WG/308inEuxbpDpeN26dN/pFj1j7s0MEaQPdImcrT8UzAybKgqJyMG1S7lxiyA0AHz1b/uGtvoN9cWZ5K85s4C9KBXAvY+VwM3NtPcO1qalaGvtVRBHRENMjHw+Lx8/np9e3D3eP96NQ2BIAHcsnMYImDhM6/qiNXdeaZmvtVZklhBSSDHGECTGGMYUp2mY9gBx2u9DGNyveW3r5UpEgKkqXteKTNomtxHcU4jSQYDtGHFZ1lKbu3mvjPVXDdCm/UQCgi3HCn+PVVrT1hrcQifh5jy6SYm2xBARjikOKQ3jmGIUkZhSGoamWmtGdzQnAzIX4t0wMXJNRatqsxDC4XAIIbamwrIbp4f7+2k3qep+3JnZ/u4wjmMMMfaiD9oCT7u7o5baZTvrfD0eT+V68abAgWvV1l4xmZvlkte8BENmAXDozkEw9eqKaIAeiJgJDMmRDJqZ1ZadUJhQYGtcJnQgYAIMEAICcADixGmkMGBImycLUAChIWBkjtjlLV1+bgRGqD3nG9yttQpIHmKfzPstF7RnNzp8da4EKDn/+stf/vTP//T2+Ha327GEIIGYg0iIARFrZ3y1bpJ1JESOMU27/f6wuz/sPYqp5lzm+Xq9LtdrZ11PL8eX4/H5dDlpKRJkGPYpJTevreRlMTMRJgJwZmYJEZE3qxGiu6/ruuZ8vlyWeWnN1pzfvvtmtz+wJGS5PXlfE5egABeEF4QAHonCMPI4oYTS2vl8WUsxdydCCU7YCRhAAaKeBKZmtnWPEAZsufeGF1hyvlx2D/c8jTWiokOra5WlzEV3OI6Bw1hTsbRAzUWhalnL9XS1prcjEwpSYt6ldL/fE9Ew7cxsvlgrhYgcjdq/lN+ROwNEJiJq3aaDlggm4SnQwEAEzRE5UTwYiOW8XC7L+fNyOa3LtbXGzDHGYRrSNKZhcivkC9rVrWc/kDtYM23gqIQA1rxV02rIWrUWNW+lQmvNWnWoZNWtwdaMrGaIDqbkymjKDgxECDGkEOLrwTKX+tcPn//0ywetbc6qRkQpxj2J7O/v7x7ePj682R8emjNxjCmZ27xc2nXGZSFzRBtIYhgNOIbeOYvSiQqHL4lrW2QLOLI6NPUY+80sjta0rk01z22Zra52fLJlVjV1VAgkA097DgGGhCE4eGsNqAdw8NfPi5nldb1eL1Zbkxg6AiQGIhKUwBKZOAKSK3k1g9KglmZL0Vx8qdCauxmCM4IwhcD7/fjmzf7hEO/GIUY0bVpbtQqoW0jY7YDcV5++2/iWsfSaLNlX414ZZOBgdkukBQfHpq2WHGLsCqoNxCznY5CAQxuGOKSwP4xv3t3fPRwOd6PW9enje13mHbNd7wYCLaucdpIGDuIA5PrmfpqGH7VtCCaltJvG+7v7GIOZ1pyfXy55XWqpJZec17zkdalLXpdSrutyXZZci7XO8Hdxzza1FdGkkFKKkUNMIYQuoN/v74Zp4hCadnn/7WlBDCKLl+Px8vn5dJyXXKqaVcVhmPbTrjZP1TxRIHSCCpidAGk+zymmP/7NH+7vDgyI4D294e3jGwP48Muvy5qvlzMSDNPARFEiEdVcjx+e/+v//Kfjx5dxlDQOwzRFidXBW9X5upzv//bf/M27mIKE0G2W7p09IXSEL+Utnc5m/jKeEJGH+8O3b9/uD6MI11rdFBHNocdL5NLcnG6GeTVv2gCdWbg76v3VtbiReESI1Emqjdfp8aMKtw3iVV0Em5ZqKz9Cv3m3HWyTxPaVF60PebqfCtzAmtfar8sXEJPSKJJKrdfL6XJcSoHreUxxjEOHJnuO+ym9G21u9UXtCtYAqkHtFJ1er+RG4AguQUIc4ziGaTftHu/3P4wxLaXO1/LbL+/500fE0JxL8zCM1h5N1WqzYRiGmFJ89/iQp/Hj5yc7X1vXFSG8ArVtd795UwideKt72v7KXVVrawBuRv8CxLgDgjHzMMTdfvf4+Hh3d3c4HFJKdAOouZQgnISnEK/7u3mZ1yWXUmqpqq3v1TGm3X6npi/PRxH59ptv3rx9e9jvmbn80A29bGrrutZSuq29dBInr6aK4IjcTR/n88laHUIUIm9Nv9Je9B1TGKYpDsMoPHJMFBDQtGVwjxRjiNM0SGBEVS3TvPT3ThJiHFiSAyN1xwulyEiW7F7dHIVpCHEiDoRkoM1y0rozcCXwyDSEEEUEGZ1RiYFcsLIbMqwtX69ZQUMwCuCd1nIBIAXuRgu9Vc8CwLquf/6v/+V//B/+P28eH6dpR7KVZcYYhzQg0aZDXddSStPmDkHC3d399z/84Yc//MA//jiMw3y9rmsupZRSai05L6fTy8vx+TJfSsmImFJ8eLg/HPaINF/n9+/f+5pZhEE2UwWyQ3/aUEJAgA7t85pf7LlVPZ/Pp5fj4+PjYX837g9hHIy4mWkPDe7PS5D47u3wxx9THMI08f3eU6poeS3lcrHa4jgSiQOqg27UrN6aVLB3lwG69XXBy6qtnc+nn37djePjN+8O377bff9WpsFNl2X+5f2vsGb4rj3u9rIf7hLjnIZlXeby9PT8/Ntvz5+f8rxobUykZqXW3WH68ccfv3n79v6wG3f7ZVnUcz/HeHfBfLHBmJUMeSFGYUMtYC4IO5L7AJOAeFE1tWjIEu7Mqc3l9PLy8z//50+//HR5eamlSAzDOO4P+8dvv/n+3/xRTVq5tHrJqzsEptGVcq45K3JDBIatFQEclqWVVozc3FUbcUWvBIqogOTSDJBNiCGyR1DRBjmv5zMg1lysfWkcqqqfT+f3x/PAUhSaM/AQp5Sm4e7tw93j22nYMfI6ZxBL0zCkIQxsQ/QjqVq9LFDaaGjOUMFJlfrgy9zcoLeP9tR5QRIXNhJzRAkZPGhj1alZa9bmlfOs12N7+VTnq6qaghmH/cPhx79NDw+Ukg9RHXXTCrxmxd40Me61lLJcqVbgUIEJ2Iid2JkkhbQbOCXi1JpcrvV8OT2fz9dlrWpVQXv8rykhBEJiAqRxmt6+e/Nv/vDNf/z330+7eyJaL8flOqs1plsvNW3eWO4+CkAg8s0Pi7AN8/t63CcNBuhbULZ1SWJTrWa/W8fk8vwURGpK9HD/5u7dH75797d/9+O0H5d1flkvZb20+ZpCbDGU63kJTFriNI3TFGLapXA3DdSTbQBFpIe4juPYWnt+eZ7Xy/PL58vlknMtuZZaW261WW2tqhUDdQKQLdKVpYflIGOfp/RvNY7jMAwxRhYGopgSIpfa1ryOIvbVmJ8QW9WXl9NvHz4/XeaqPgzJgIfd4UFd4nR3KE1Na/W61lxy7vsGPTw+fPPN28eHRwJa5vnDxw/N9e7x0QFfTi9lLTHGh4eHQZI2FRJ0rK1JitS8XlZSIoBhGLgL5xxyqUuuvcWy35uAtlFjr2XldJsHEQLRFx4GgIkOu+nx/nDYjxLYTAmdiZvadVnnXJe12g3EIICaqTXYyCq6heVg94g6ABMRUxfmum35b82gqpVmuao2hY3RNmLsLo/urnK3bcwECAZqWre9EJlQRBBRzbRZrdXNc/mqpRcA+mAUUNURYTfFIfbDd51nzaWlBDGlJBNzkpgiFREFLKpLySutS8mLlrXVxTTXWmvj0oTzqpqn2HhIiKk1vZwuDsUBzFmBh91+HIIwoVYta1lkmoZwf8/ggTAK9TNsz4vfhkpwe8xxy1DAL6EF26tHWWAXTW9fSwBA0C09cRjSfj/d3d09Pj4eDofdbreVzPfoQoTAPKYB955C2E271prb1qtKRMISQggh5rJO4yczOxwOQ0rdmBhC6KqMdV0vl0u3+67r2stT87p2BCwiwqKdn2PGGJ1F1VprcHteEDEIxygpyhBDCElSkshAbhYAQTD2rl8RIXL3Foas2twVSUJIjBGAkZiFe3EssDuqEyIGxsg8ADC4qytBdFcAAhe0QC6bCUDcCMGEiZMMjNbAEMBg0WaqyEouIsDs1NlD3xDjl1et9enT599+/mk+nYZxRCYRYeEeK42A65q3CaMbE4UQhjQEpuv1dDlPx+O0zPFyvZZc3b1pXdf1fD6eTi/X61m1vqLbrvoC7DWbZj1GYTs2vFZ/MDFJJ2AVEbFBU9Xr9eLWtOR8Pa33Dw9v3u7fvZNx6r30X579GO9++Pbt3/9NiikMA09jBZiXRVutEgx4GnfAdC2l1VsQm/f7dYtnZ0LrTj5wa61oW0ux63qVkC+XdVka6vBwh0LKVC7zOq/N7ZuHx2kcGaC69xo9a3Y6nj++/3g9nVppgVnNriVP+50iNXMHD4hVTb+MZH8XDau1np9ent5/LPtdCOJayT0Q+jKQWb1eAdRkqMNjuDtESeBYcj6/PH38+aff/vxfLy/H1jSOadrt8zxzkMfv3iEnbUVr0epAQDAAubu5N3RCAAcjUCJHdDdttVV3B2dSIkVXBAWzTgIiOqJ2ywQTpCAxBgrSVzeiL8SSql3n9XxZLMZWW3VqHIwIxinuD2ncCYlXzddFOYcpIo/CEAdBHWuter4QI6eIUQzRGIw2S0uHfV3ZC4EwBNzqGAMBS+8B0Gpz5lrjWvx0ac+fyulTOT2V5aqqathcrPn+m2/xq+wN3/ylWyDGlwfGTWutOZOasxIIooCIMwJJs1BKACPzuuZ6Pi+n8/XldLouq3ZdgvTmMQtMHkPAgMS5wKfPFgXfPe6Z7mN6iBgVnutyMa2t1r7o9IpB7pgFybi3ZNMt7v6rhKEtPcTMt3YQd3+Navp6TZbzpw990CDw/d2//cPf/80f/sN/+AcS+Kd//idv68AMw7CPcTfEyMjgDJYIdilO0y7EEGOUEIjJAJA4BOmNo2sp58vL55dPzy8v1yVvwjUSGmNEGoiZxR1qrabm4D0BVzZ3FiKihA3EDMOYUmAmdy9NVVvOpdQ8r2tEfA0i67RDzuvz8fTh89Pn82woj4+cQpwOj8Pu7tttoOOgzVvRWtdsqmbocYiH+0MIoTao1+U8Z2T6fnefxgFY4rD0IPt+YuIevQMoImGIHInYQuKUhjSkwHFgmZjuDgeRaI6tOYLfyA1nxu5nI3yF+bd6xq9AzJjSfhp2Q0wpMGMIHCTU1p6PV15WEW1qhHwT4xqYAjgSCvNGWwmb2bpmswYAwpxSkLBtHoxgZnO161LO17UWRURGALTAGGIgRrjdQZ1jA4Bccsl5WbObEUrX/SGSudfa1nWVC7XWvpL3gJmtuebaEPn+7uHHH+9FYml6Pl9/++3TPD+X9dyFqiHEGGScDsMY0gDI2mvnWp7LcirrMZcXBwzxDdFkKG5pWZpDJRmZSdva6uyWHRA4YWMrS1352pYVAKwOMa6n+5hGdx+ECaFrR8xdTXvLseNtf8RXxfOX05htkWUVEbjnqXX1C7EIj0M63B3u7g8P94fdNKWURKTV0pVefdBTW71JgzGEyBwQUaTbjHqikgCAtjYvSwgx5wwA8zyfL5dSSl7XeVny7dVuL79Z5ZmIRQDAwFl4F/bMLCKm1kpR1S8nfqZ+QGDCPhF0M3dk5jiMEoLIIBwQA2KfUochJupOThREQWdwBOrmhe4sMu+1ECCEAi6bkQ16Votp74uwsK1W5Ei2MZIkkgYiLFqau/JqpqpBNZAlIhJVckNXZG/g9KVoBXr0Puh6vkppDQCQSAJ3t5CZL8tqZizhsN+9e/v24eH+sN9Pu2mYJgD9/PmjO5RcwFBiqLW8HJ+PL0/H43PJq4gwUidnnp4+X09ncyi1Lctq7gQMvQm+g2KkHufdozCYGW++EAd0a6eXz3U+rqfnVmYe425IXT77KoaNQ/zux+//zeXvo4QQI8dYant5eRkZJ2Q3GHbT2url48e8zE7sCGRbKWU/1zpB94sDo6m2ZdVlxaVWWM9NtdXidfrmTbw7SAjW9Pl4fZnXx7unh91ujLHra9zgVPO5rMeyXNZFax1CRGQjKgbPpwvSx2VZdjEyeM8i61uym74+LzmXn3/69Z/+83/Z73YpxR4kyOAxhHEaWUhrifuHw4///i2/GREcbJ1P1+dP88vzcrnUqkjEEiXFEBNLJBA3NEVrDmpMmqIj+BobmEm43UoOYERMHMDArLkDBHYCd+1J7m7upmQOrSkJdGXrsDvsH9+MD3szrc8QUvjS0ePu1dpS5qqtaXHIxFndAZQFiK2o1jZfL8XVWEsepyFMMUz7RLi7Xs/mJYwgTBQjBFbaTlCAm1EJmCAQCQOzUQASdEpIe0SfL+f1WvNaL5f16WX57bfy/EGXo9XVzAzZefTpYCXXvNbl6uQkCZkBsFsUuvj79b1A94MhOTASMCNHppAo3RlNc43zpZ2vx3mZa12b1gYuCaBBIA+BAwMiDTHe3+37WleLPj0dP/328/9o7Xj54z/8X/79m+9+GHcfy+nj6en9er3qq+HDe5mTI6LzFoZi26lRXsVh26keewzp1pmjaqa3nphXEHM9vTBxTCEy/vDdN3//b/74N3/4Qb09f/pUl/mQBjbfh3g3jXeHw7CbKKVhtzsc7ocOYrY35EWbI8UYuoALuZ/WhmG3N46tOSCFmJgDMjOHKKEXM205VLfU7q5OQARmYpYQAgsjgbk31VJq6WfPsuZS7lJ8ZWLcvdWac1nWuhZthiSMHCgkRg5EQ0opSGAUdLCmtc1rMzUKxIFDEHNbllZru7+/Q5GH+7thHBlxLbk7fXps4Kbh65E+goAGUFGcREQkcIwcRuFhGFVhWbK2HmAIJBACBidnFiLAW9v0Zgz68iLmYbffHe6HIaQoIYgEjhJKa6uJyhCSNfVucMJeDg3Wv4kwxRBDEBZ2s7Fu5JswpygigYmYQNANfGo+zEWGpawNEQWB2IUxBCG6Bcm4i8g4jgCwrMu6rMO62lbWJiK9xRpabeuaY4wY+HDYb1nvnTNEQhRhHlI47IdxtweSYUyXy7XkuZWLVmgcWxusTYhDCIAkHGSz2427KPs23JV2r2ZIe9Wg6qa2LAsgToc7FsF6dbvW3EwbihLAHILVSsxgZjXHEOpSd/u7YZw4piBRkBxcTWsr6M2w3R71XhVKt5PD68u6JgaxJxD6dueSpBSn3bjfT7vdmFIkotpaqaW7tNtXr1cRRIcXMaZxe03MHXd2KUXuDt7SWZbc7/t1WZbeNdZRy2vMD75yb625u5qllB4eHlJKXZHVdOPbbmsEjdO42++YPPTKhpg4EieJUSQmlsQU3Pmm9XEREEERRmCAfpbcfJNdmYDQD7hI0HFy35gRgAgEehdcf9QdwNWtqaojMieC1jS3Ysf1el0WVUMgNERlgkAO3Cq0VX1VdogJ7ct7YeZpt5t2d5ymGMTdAXELrWJxd8QxhHC4u3v35u0P3393/3A/pAQIuZZSyrLMm1ARCFfMeX15eTqfj+u6qGqQ4EgIWGs7ny5zj3x2BPDb/Y+wFcV1RrWnmPWUsB7RBdastlbKWtblCr6cjwaaHu5pGNL4u+QuZt7fHx7ePsYbo1ZLRVNBGsOg5hSCXy/grq0BOyD2drO+Pr36doGQjK3WNi+wFjEUQqi1nC/H97iWPMwrxbi2JkTPT88P0/g47aYUu7PVSU6n8/P1sppBjDGmYUiBg4EjkjZ9eXo+P7/sx/T24X43Dcy0SXy+EsK3pi8vl48fn9e1pRS3MbepMA9jIiJtdfeAcJ93peaS3do6n/N8zsvSanUAQu7izy0SsC947uBdjQFBenGyoSsCIwKYuuvWBeoAXREOsGmK7Zax5WTg6q6qjuCGalZay9YimaM6Vof65QCjulyu8/GcxmBICqBEqypWvSzLeGTHtdX6cjxmaFmXtk6yn8bDHsMeGAq2wh6GSCFCCM642SsAAanTsE4AQsroiAbOhDGkxBLQmjIIggAHISbXZjl7zlBXBiAKRErWSl7rcs3CiJBGDBgRAYDcfs/E3CJhkNCJEaPSQJgQBmixulxXvCx6Ps+lXBAqonVFJaESgnDoCs4Y4jgMu2na7ca85OPzy+l0Pl/W89xwuPvjD2934N74vHpe/dYZgG7gqu59uTAAhS5R3yz+mxKiP0OwHYq2EZM2LTm/vc76VYq65HUZxnGa7r/7/vt//w///u/+/u8P+7um9Y9/+Jv7wwOACuLIYYxxGocYI4QgIYY4sATscxL00sqaiwEAS0BxxJDGt99+N+4f1lyXNV/nDIDjbpIQzTur1DsRbooEczVtTWsrZoYIbta0Xecl11xL0VbbVjdSm7ZOYea7w6uA1N1zLjlXA4rD7nE4SBp2hx0ALnX2BpJ4kBCHkIQZsdVqPDvgbj8OQ4qJtOl8mXdDvN+PAHB3d2CRKXJTrX2JQ2KiICmEmEJKgQI7oTo0h2bbI8PUmyKBlzmXtbGwMJNgCByTDDEMCVLAFAUFGDbo+cWfAEAc0v4xPWiIhLLdO5moofI+jEmT4hd/Ehih803kzj1tCBHAHT3BTbXxuqVsi64TeEoE0SCWUhuYM7hID+CFW4Keu7swhTQAOIcxxJyG0gu2aHN/EKKremttf3e3vz589823McTbZskp7cZhlyRc7bJenhn9/t33+Pj29HbWVi7HT7XMiIvbteSzqpQiIQhRDGEIYTfEIab7aX8/8fdN27qWvGbT7FrXZUWEYU9BEoc95KWUa80Lei1LrbnF8RJkQETVFiRqjaXE/f047mMapxAEwVUL6AywGnYIYwAExE7EpETyimO6JkabIpE7uhsRcZAYZbcf97sxBNHWTqfztky0pr0CXtW++jcRicgwpHEct+CvlIYhqdr5fH56enr//v3z83PHK621Xo/ajVGttR4x2FFLP8G/avjXdT2dTufz+XK5HA6Hf/iHf+hJ0K2p1t8l9hLxMI37+/soEEIUmThEDIQMN6uC9Nizm1zKmB0Q1F5l6bjVUmxGIQcgMASnWyFir2Pq/tnud+4lwU5qUHOraysLMk+HB/d2Pi3H8/nD8bk13af9lPbsymACxG5U1rIcr+tzY0x3jzgdXtW9KaU//vHHv/t3/z4NQxDpalch7jKyPle6v3/47rvv3n3z9vHhQSSs63o6Ht9/+rAs11KbOwYJql3Ye73Ol5yzq6Oj3hzMql5zIcReLBBD5FueYg/e2gBLH+h3VsJbZ75rKfO85GXO6wqus4ghhPt7CMObb6L7F8E1bmMhBOi+JWPEaRyJZdztStHcGq7zdvWxm883YYUp+iueYgJUa9XmLO7TuEspSpBGuB7P65rnywopLG4EfgY4M59TjIFUqDFXDKVBXWug8PDu8W6a9rshCZNDWdfT8XR6eXl5eZ6GFPnvhpScEYDc+3Nx2ykdimJWHKxv0eBqNSuzN4khMkrQOIIEVb2eXrTmvFxrbWoOsHFaplZzzbnW1nyTdxujB8EQWIhb9ZYtr02diMC1uKkDMEOI7gS1uYLX5gj9MzIyczBFVXDvRTFuay4fPz+FoVWcRXyen3LdmW+bZav19Pnz86ePD493PI5IhIxa/bouH9639fPTztBUT3U19mVJOO8f8gOaGWLO+XQ+t5LH/egDr1pbbWqKBgzk7rVnOaAbbtu5Go5pF968g2Eo3rQViCSH3cRIXv36wvlSLTuoEKEElUGJ5nWp52MBEARgQSIkByIHta9iO828qTZ1COycUA5OO9PYCq2t5XpZlUsr1hSJkiSCW8B0K4asgogBAZhlySQBWKAZcUhNrx9+++39h+en4/GPf/j2x3f3I/vlWMxwmqYYI7G4Yy3NtLgWsNr/cWtmVVtrWrVqxwDwWkXWh3/utdV1Xe4ev621fQExLHF/uPvj3/7d3/3bf/fNdz+Mu0NTaA3G8U7CxIECcyCOzEGYmXuIpiNXo+5acbBc6mXO6qAeSLaA8aamxj0oM0YGAOboTrlU1QJAjBxDQAQ316al5pzLmpfWGm4KylLyuuSllOKm1gzVvkTro9fyVQmcea6tqrHEGJNrj2E2RzJkR1TkRmIcQAISA4hxA0AZprQbp1HQbUjxsJ/evXnoFlwENLdmlps3d/NOEYcQ0pDSGGWMGMSJDEA3iN+gR6WYWu0bWPVSmyMSUYicQhtSHWIYUkyRU0ARlC8qXwAAEhnv3uxXErkJt9ERUFTHpMmgy1Nsk9A5IjAjEb461Tr+2LxUpjmvPV/TtF/7GxWP1gwMWYYwxCEGCcKECKCwlSY6OhAhszhAVBtqba05GH9p+dyExKq6K2231rfffhtifN34W9Wy5nW5zJfnsmKtiyGpB9VGCIiKWMHBrahlVWoViQQpiowp1Drsh2lKQ5IYzFhN3bqYuak3i04U4nDAQMTRTRY81jXXCrgODgMOk4gQAnFwOJjvmo3NhugjQO+OZQFz8HbD+wCOrgbK0Ai+0OM3aKLEBI5mKiIIPeoAHazW0kNEemtErb1vR18j//vtKiLDMCBiCKkTM6XUWo/Lsj49PX3+/Pnjx4/H43Fd18649G/3yrvctFUbJP0aYOWcL5fL8Xi8XM6mej6fh5RExBw6+nl9I0gUYhymKUYMElkG4tDZE4KeKEo3fgc32zlZT/OAfqr6kpeP0HPiAcH5pg5B3wamDoBoRP2k7C6oUHK7ntt8ycscYgghAML1+VP5/Ck/fXJHefPtAJ7Y2QiUqza9vtT5pLpAjPB7qZII393t7x8fpnEIEnocEzkSM0vYTdP9/f39w8Pjw8O4m5i51pZLXtY1l9Ja6xNBJNJar9fL+XLOOZspExMxAjk6Uu/4xVcQI0HoVtR2692FTo6pmbnaFgBjAFhrmee55dKl9del4Mtzev8bx5ElLfMPpr8v6XEAB3NHNXdgkYTEMQHl+ZRLzq1WV0XsU03z7TB3k6NhB0AOpUHVEMP93d3+sCemueT15Xldcy3qISzs6KbaEDAEWQQWsgVpATEIAuEw3R3GQ5Q4xDgERlVdra3zfDqenp9st6uleH+TgF9x/NtNBhIoTXF/GKcECLXU6osBehggJCSCMDhR07ZeT62s67zU2gyQQ4wSeltdSFGCEPdScQVQZpMYQxJEN92KlswUALxp1y05sHUr+u0FYOhG3nCDv9y5HQJEt7Kunz8V8POyPov4fD3dpUOtmyKqtfry/Pn584ch4ShEHNkRVHPJn0/P19ZSU0JcA3BiqKloaVVbafNazjUfnz419+l+3yqcrtd1zV4rmAuQm9d2G5L0JkYANdjv7gYJWIuWxfIMpQAihzBMo93f8/y41lXJAiKyqIQZ4brM8+nYkJJIGAYWIXJgcme1391gnYkhdzR05aqyKi/Fl9qKmiM5GDswsJmrWilWcqul9akxiSNA02a+rsWvSzNr52u5LPV4nKtestp8uVxP3x2modYiInccRxzYowNWU9UIVkArWkWv6BWgOjW1rFiqZWvZVPFV29NBTC3zslyv198xMXeP33z/4x//H//Nf/sf/u//z7i7O12L9VOfmRPHkBy5AeYG2ABdAbW3Ajv2Kaw51FrzPBdzn1dqZufrvObSCX+tCg5MQoTmNq/r8/G05kzEIaVpNzGzNS2lrDmvy3K9XkstaO5u2MtGXBFdmAOFyNyj/ty9tNZq81cmpt+9SGmY5FpeTp+btl2eZNyRDMipkayGrODoTFArXgoieMgmyQeUMfE4RFRXNWu6bR1qVduSW1G13gbnqppbcwtJQhqHMCQWRiLsqTatadui/LTWmquW0jMPKixAgIEpCqcUxjEcxnjYxcMu8pc8XBAJ+/uHe0tEQJv+y103o6MDbLXmX2zRGzG5HQURsPf3EgnRfL2ef/31+eV6vRxryQSvbfJupoAUYri7f3h8uD/c3TMJEXSxfB+m4LY/gQNsMTzdGtIpAOh9Q95dS9psV+3h7Z3EdHv49eXl+OHD+4/vf/r04dcgiJLg198MhlZpoxZuaAxAey6NmQKqtlbW9Xo90TGKRIm9kgvJm9UK6gSRcAxhHA4Pu/DO9If94W8uL8fnTy+lVklDHKdpvxuGGCPFGMYphRQlRadQVbxwwC5PHIkAm4P1ZaQCNNCMuqAtr1rFDiPUVIwcSJt2zT24r+vaUwS3mB13M2tNSym15lJKl3kDADMPwwCAIrG1VkolWi+X6/Pz0/F4Op16P+n26sF6Xw+M4FbhCa9qSoA+SOrzJjMLQXa7XYjxcrmIyDRNzEFt69l7RT/IjCISiEW2wbzj1vmOCA5mjm5EXfjRtavbD9yWkE74dtRsXQ/NPWHRwLplBgGgy9uNBJ1cxYpeL+Xz53J6qctC0yjDIGjT82/l82/l+bMjHrjtsA7yxmK95uu6lvr8CWsbD7uwO3AckcNr9OK2t7m6G6AxbRnqIaTHx8d37959+923KaWc89PT55fj8XS+zNe59IEF8xASsYB5LqWptdbcFAGYSTgIh+7rA4CtZYYAoB8izK1XQmnrg8MOWFvTXk5j5u6EZKa1FGHe7SZ3Ox+P1+vl04f3QILALz9+32r5gl7MTWGTnIP5Lc4OAVXb5XQ6vbzkeW65iCMyo3eRKHYj4sbqOvQKQXIfYvzm22/fvH00hP72l2U1X1WKCjIaqwthAgCDq5dsuii2xqCsw5oaSq64TllQa76cjp8+fjwfz6A1bN1qtxnAloN9wzDMcbfbvXl88/13d/c7QlzW/PTpRZuN+10MAuASgwKUWmS9tnXOy1qrInIahumwD0PC7jLbTeNuBHdtxaxK8GkfWaJqa6UiKnOX7oKTbec36rpDtR5pSQBgaI2w8XbtCIEZu0jRWl5ePs7ryT7+wo66rstED3nZrktt9en4+fPz+4f7cRijOAUlrk0v19Pp6bhcQ2kSGR6mHYwm5ldvc52PF909P7X8+eOHFjntRwnp89PzfL56bdBjMhy+iNW6hYLQgGquKQ3LNVyPL1DygSmqcy5c/TDdxcd3kJdCFkwB0Vka2Ol6OSOABB3SmHcikdjQ0Bg7DfwKkV3BVMGq11JzmevynOvcoIKBYEwUiMijKuS15KJ5ba1Uqw3JxVYO1vf/42UhQmZ2t1pqnpdmhEha/XhcVD+P4xjSMI6xMg4KzI6AauxA6JHBkJzJA1tgiIwIxepclpfl8rnMFy0rqHZ0bu61lVZya+XrDFL5t//wH/7wxz/+/T/8h2++/2NR/vg0t5xVzRFRWDJSD/WymyW3VyLra79kn7bXkou5E7Vc6/F0XNaspq7mzYR4HEYmLq1cLpfPz8/LupKEkOK02xGzdvVjzuuyLstcSyVwQBACZCQCYYwhQACSHmaBDv6qp7lBS89Vc9Fadb5cf/vl5/l6ng67w5tv7r79YwjT2rxYa+oLK6C2qnM2QqClORcFmAYeAglg7y43815zD03X62Vel6aGzCGOEEKx5rpq4WXgaZAURZgR0HVTVbSmOefLdTle5mXpi6OpmtXWamXC3TQcDtPj3fTt27sob4KMr++FiIZxmEoPHeot5huI6dkwfYr6u9TSW4De5ronZCJrbb1cT6fjy8vz8fRSymKtEQEgGXEzrzmDW6iy30+73fT49g2RAICZojvSBmI2FOXdjX3DM9DP5dsQvF8UN2/ND/fDa/S4qc7Xy+VyXNeT2jrFAUgvy0upbBrcwP3rgCzv85zeAKCu4Nm93wdBJAaJQxQmgJbBnUhKhmVeecxpCiHsdne7EB85XJYlV0VArha5CYuYS1WxxhWAmxWpgTQyEhoAuQfw1ONMkFHQGVGrRbavO1Pt9gLoqE1rrZeLHU9Hd7/VayIh3YTVayld4KKIFIJ0kYpIyLnkXNz9fL4cj8e//vWvT09Py7Ig4jiOzNy5k68FE19jqa/Ol0BErbWulWHmbuhj5lrrsiwxRiK2fzUXh019KsgMQLfQYuzzIjAgByf37ciyRR151zkBbbYaJL9Zer+ei1r/WrQOsLtKht2wVSiLXY7t9NyOL7YUzescKXhp7/+KTx+G04shgOVW56pzs4es01q81jl4kDSF8eAy+FcaQDOrNa/rguhmUVgIwQ3SMIzjcDjspnEEhHVdTsfj88txWTMCxWFkZnMrpbY1u9n1Oq/LkvOqtYK7aXNRSsASuD9Y3JF7U9Wq1axZ60PDV+GT9rIK2yotzaEnmSGYoUgvCnBEbW2d58vpeN49zeej6hd63DflRreemrmrurqZ+TrPl9Ppej7XnK02RULV5rqRc1tFyUYw9No/RhyG4fHx4d033/ZTfpRfQd29uvX0eAd1R6zQAL14rVpb81bJGy3Fzxio5HZNTNZqnq+Xl5eXWmroUvyOGbcqt395f8Vpmh4fH7/75s3jPTFe57WBrEvu/0cEZw7aNK8zk5blOl8uJVcOYZ8eHr99Ox72yCLCEmScRjeruZhWYksDM/MyN3BldhGUnpDBAOgMgKSI1dG4x5N0NI1KoNwP+K79zxkc3bXWNV/rUgChtrbMl89/eHkdW7h7bmUpa6m5lcIe0Uha6w9eXeZcipjEKqMG0ga5lJxnuObT5aTlenxpYzwdLxzL+XRezrOWiubU03KJ3V1b6xQeEDgzMV2up5zl9PQZc/aYJieuNZWamDFFj9wIXd3MK/ra9Or1Wiu1wr2iSxtuwgFq+i+YGIAeswPI3WJtqI7ASNSDd3Sr+apaGyiIEyIzIJoJGW3/28EUtowUF4nT4T4RYRqShFAaevbU26EuZSnArIjkwLAljKlpQ1ARHKKM45DCIEgeIU5IGJtcNK9as1kzc9PWQ+G/fi/y3/53/93Dmzff/OFH4OHpeS2lNlXtrAMCkACSG7wuYNhBTDc89VQy6otqX1Uxl3y6nHJezSqYuUISmabKxK3VZV3nSy3FSFopsK4ABE1rq1W73qWqmwMzM266TkYhFCQydoUG6v3QBwZfWXrMPOd2OS9Pzy+//PLLP/4v//Pz08dhmr7727//v473+3S4ZFW1MyGB29ZZi0HYqM3l8vGpBoEhMQNqU3QkpDHFh8N+vaw//eXPT58/VrX94fDDH/92pN281rwu63JBb+O49ddvJeUhDtNYVV+O54+fn3/98GlZcoiJkM10neeX5ycAe/Pw+PbN/bs3D+Vvfng4pN2YcJv/AaILexQF7zklBADWKRQEAO/L4tfbW3cXIiAR9uY2cHs5vfzln//zL7/+8nI+qdtuN067gQmJAw1TazqfT3m+almglV0Kj4ddD1+utZjqJvXvzM3WynRDTk7dqt1FlEiC3CkcaKrTGF4d42bWalEtIdLDm/2PP/7AIT09XZ6f5+PTnGvrAenbDnrz0m2hNNBRKhE08OYtV2WvjNCbFAgpNbzaz+9Pp5z2+/3d/ePjw93D4eHN43Wu7z8cjy/z5XlBtyFITBLHEJOE5CGABIxCUXpne9+fBmJhijHUMRVvElB3UfjLyKbf+T1fpncH2uVyXdfl+fm5lCwSOikSo+z3ByJa16XP8hBpGKZpmhA5BC2liKzLIpfL5enp6Zdffvmnf/qnz58/q+p+v//xxx/v7u5eJS9w41q2J+1rIOXe/6qrgM00pSgyMLM7dhjUe6/a1tVzu2M2jQptphpkR7atbgKgBwJ0tIIbdO2R+R3EIGBPA0TEG9PSqYCuCDHD3rfZ2QFCYHBCQzfL87lcXsp8tPWKVUtd3q9HqBd/em+XF82zgV8ux/n4HK5HX97Vd28t7CSFJAee7iDuG4XmZDcQo6rX6/V0OpaSYwjcXyRpGAC9aTseX2qrzy8v87wGCftvH96++zbEtCzX9+8//PbrPz4/PyNgq3W5ntd1rmXVpoAQQ5h2+5SGECIAqFrTplpUW2vV7DYzsm2Y7HaTWvcJ6+3OYUQOkZhra6oKhJ2cRrOW11rWL+exG6dxu9yqatq01FpKPR+Pl9N5vc5WG2iruTPQal1+1Pv0SJhlq0uFbi0c9vv9w/29IazrGmPs0XjoHgARyFyz+lGboy1ozQCakQE6eqmX40udz8fPDqCqpdVaShGJu/1unMYQBIn6b/wvvO8bE/Pw8PD27dt3jxQkXealOr6cQXvdY0DkmvNyBqhLWa6Xl2POeZim/f3d9//mj3dvHyRGcNBaTZs2W5dVW+snO2JgQhGMgdABmRF7MJYTKPZeaOiiPQIAcCUwAusqJiJ3V4SuMUeD2pvtEUi1Ltd1vS5f3LxEHCPGVA1KbQxFjdg1MrYpAg5tRQ/MIQSS6EzVy5K1eV7KrK0tRR3X8yyjgfWABu+pmMM0Tvs9AFwvl/W61LZoNQBvVte6NuV1XeC6MpViFMyTtRFzbcu1zXOd/VqbYQtwTrBO0gIzkxIqgbp5a2BIwPaVHq4/wo5MwEHSEA7su5xQzJGt6VrmS1muNa9uhiGFlIbdREDW1M0dgYVTSj3GE5F8G4LdbP5btjttfWgAbjpfrzgv3c3aI+ZbLetyuZyOtayElNKwv7s/HHZ3h+FuGu53u/3+Xc3HfH2+Hj/bekFvm5aWAL7a+2R/fxdSuFyvy1ov5yXneqv2AEdwJwe+GYY6V761pMM22UDuBrseEGZQS10Xq/WWhNrp0bUSqmkrpYe5kzV01VKyuTWr1qo3NdXe1OOIRmSGpmDuSoBmSNBZXURC7h/W14AMHKBqu16vl8s5r0tesztcT9fr6Sq7xSy4Y+syFQV3FA6GAMXW0mq5IugQWYjMDB0ReQgyz+Xy9P4f/9P/8uG3n6v5m3fvJMQ37741hMvl+v79b2W97EYOCGAqJGMah2kad7ui7enl+Ov7T3/9+ddlybtpF2NAgHm+fPjwntDzd99r+8a13O+H63Wp9xrCF2UMkTNB1+4iOuJrVXXfFTbeBV6z2bofoqe3ApSynF+ef/v1p59/+tOnpydjHna73f3dbtoRInGgNJZSeq6iFi3F5+t6Pp6BqDXLa26t9l17EwUTgoO/9rp2LczriAOJRVLqPeISwteGCwcwERp3A1F8++23MQ0SopvPp1NeVqREEHuduW25eq+Dst6p4B2TdZRcYftIABnIdAYzy2VO+R5dD/sUwuHu/n7c+5xhXSGvc8655Upzo2sJgiF6iCCBY+AUJUVJiUMgEerlpMIsnIQZVXdfdcHcdhkzM2EeYwLEWmsp5fn5+Xq9hhAAIOecUnr3rsUYcl5zXnMuIQSR+DoGMvPW2rqu5/Plz3/+888///zp06fz+dxZ38Ph0AmVPjnyrZvmi4b99Zd5rXBy9w5ipmkahigiqrauuXugbm/h90wM9vXg9g8SoN2CcRH6wcfUGlTsaUa9aZa/Cth0cwVXNmVQ2ip0SHsCqncyhwnIoTkwAJjX3JaqM3DjAKZYal3ma8snWq9Q11aKu0P1gAF3K+UCpSIqOSOREzuJAfvvIyU6hiMid8Meah+8tlpKmZd5zauptdZijHHc3T28+e67Pzjgr7/9sq7r09PT548fggRwq2VtrXaFpapmaw5WSo4xIUIprWozLdYTF8y2ImrYrmu/Ft2xhohE/TjhPUaIiHu3fe+qSSEFZnRz/SLshRvb1y+9+YZkrDVt1VUD8zgMjFRrNQO1hu7VmqupqqEiqnMjfA1jhK7jiyEYYmChG2tKRuT90no1NVN3y2QGTgbkTgDoqnlZsq/QHJq7druICE/jOE1TELkRMfbqlbvdX8QphXGK0z7uDhJFgXf7q9ZmhdAxhoFDQATT2qpqa0iQxpRSvHv7ePfmcX9/J0SuLaOXVUtrtVRtrZ90iDAkGR3MrdXujDPTbnVQgJ4ryg7s2MPFEc16KpcDmXEPiOoOIUJiioQMQKyGLEj8ull2x+iwP1BMzuyIxDgERowi+zxKyQUZp3FMIaJha5YBOqO41FpzbQ7X44VrU63eKlrDbosiCFEAejpX91S4m7VW17wQUsmr54zQGnJgaWBqRT0XaAW9ETUKnkaf9uF+p3d3NI3IXFVzqQC9Ltr+BXvRLw9LCmmK44Horo4YtNR6Wa55LSddz16rAyA2ZGMikYFSBCA3Z8KYYpQgIkB9W+6U8OYruckywcxfx6zu0ANUHLDVkpfLfH25HJ9zWdGRJcbnw9394zffvOPvvnnz+GYckNZBzXFeoGbeKtR6a+KX9yE//fQXFo4//RVJrJHZ9gj2dmxEARS4FVHS9sfUo7d6RZP3HlsSRAYnIZwSQhgAGmAfS2wLjQGRSBgGkubVWiesTE2bWcN2+1okw+4kb7UaQWN0IUoimIKgUEAmRAb+co8BMcYkErC1jITff/fD/eEBmYf9/fV85eF49+bbkJJrLhWxuRp2o6w5qPlSu5bYAhMTuUNp7bnO78vPn3/65//1v/9/f/z1p+r47Xc/Shir+v3bb9TxdL5ejy8lIbSyXk5EeH/3ZtjvOMS16cvl/OHj57/+9Zc8r3e73TilEGTNy+n5cwxibx97AlrO9brkJZde67ndYE4I5Lgd0PpG1tekr6beG/hFACJgImY285rz08cP//if/qdff/3r+XwGCXdv396/+ebx8c047hAJgNQB5gvLgqzO8bLKn/788cPHC4DX2pal1FIcer0BCGIKKGDYqjsYIEqUYSchhsBdChSF9rthf384vH2Eu/hll0WMItM43j3eI+K4P6Q0uGFeyuf3nxY0cOtFE44ErXW0DK/7AqF51x/0kwSi2zbTctRWHLLQLLLqanUN+bpvdwehh92UHh7utDHieD3PtTdzLYtbQzJmkpBC5DTQMPA40DBwiBKTBEEznmIMKY4DTsM90xe5EsBWwCnC9w8PQWRZllKKiPimg2nX67W19vDwEKMAdGjZ7UjDMAy95KiP29Z1fXl5/uWXX56fn+/u7vb7/bIsALAsS9jCfuRfD5X6z+rI5tWd1GdJ7iYiKQ0AgGgpYU+J3MKXv0pTvQHQ7VG/7TwdnLK7q1ur1Vo11abqRDIMnciSSD31oGpfkQtZi+jS1Z2AFUEJG6P1+G0wBhUkITTIC66YdKQEifK5wuqmoUlU2FX21RjVd2EYd2/2d9+E3buVDkVjW2qzVWtGq/51DNk2RWXiHlLFfttJm+rpfIHeAhHjbr+fdof93cPu8DDtDs8vx/fvP/z800/Pnz8tlxNOO0LQlglgnCYkLDXnnJf5OsMlSESipn0kp75VnvZDBPVrjLAxLMy9tYmDBADcfGnakCjE1O8TZp52024c+1jl9fVKtvXruzGzhMQURPa7yb95e9jvaq6llFxqznldlyUvaym1Fq3NHBQ2wzMRSQjmrwI/64pgUAMAUmS/yaA3HONdodYdBbffzNFuB1tiEhKR/W53dzjsxzEwo1mPUNow3ddsDIlzbCjVBZyBJInsU7LARCEMEzGbVQJlJkjx/t2jq8UUx/3EIpqL5tlqrrVWNXXsjg8zaGqJaNzHYfJhJ631flD3V3uFOxI7dSxC7uZmYGGz0fXmEN+AopkCCFH0bsZm3pvu7u9foyKE5e7+8eHNu/HuTsaJKSBSGGz02Gxs1oqqOwSkqKprXl1pDOiyNF3MVmt1VXs58roQGrmTNkQGwrzKMs9E1FoBsMDM4NXBal2WGRFqyaBKjC6iYzAHW5pDs4A4jR4FZc/7h/FwD/f7Mg0aA5KU3KAuIgkDAxv5108MdKlEH86G6Y7TIyKF5fz546e2PNf5k5dlZHGEUpem11avcbyfdo8xDgSb5bC1olo3TfuNFXawbXzXMSDe0urNzN0a9plNzfN8/pSXI9Y5WnH3tvrz9f0yPzK1u8MOeKQ0aG0NL8oDyhCJIKAQDCl9bYSRP//5v8B2CZkxEgmR9BBeooAkHZl28RNRnxtwkMDS27G5p3UiR6JAGMgB3Ttr52TKbtjV42qdje3ykNJaa0Wr9thk104/O6ITmTIgMBqBEqgQkLAzbGQVdJeAI/zuSDpOadqnmHiaEn/7TSvV3J0TWINWDqPs9kOrUCoXg6aoCuAkQmbkMXhDQe8H0mZWql5eTqcPf/n81z89Pz3Py6JOx+Ppr3/9q6RxmA5ETBSIhAARlZGZKMQoEgyQmHe7/WOzvNQ8r2NK3ZMdhtTMg4Td/j7EoRnOaz2e5/N1HobIt/Mldk/0F+vJpjx5JUBejWeISAS9tMDMlnl5/vTxL3/60z//4//68eN7J5nuHqaG2mhZ3VwB0a11pHKdYS1SlMoM9ZdnppeeSJtza7W5q7o3h4j+IDZCo7K6WyO2OPr0QMM0pBDQQl0H9rYboOZhN32tVSKiGP9/7P3JkiTZkiWI8XAHEVFVG3yI6U05V3V1oquBJmxABCywwRa/iS/AEhsQPgBNja6s6qyX+eINEeHuNqiqDHdgZiyuiJqaR7zMrEYXoZroXfewMFczFZXhDnwPHz7HD8Nwc3OLRMP+NoQIRsNw8p4QdctFarvQNTbbCsINgAAr4LXeJJi2wl61CrUWqpzZ+Z3VDJpNE+hMZCFAv4/V2EVXRpbsVRJqBlRVTcVqqaJYipRMObvQQSzgHSHSrsPgwi4e+n7feEKvmxFR33XDMOx2OwN4fHxsF6uqu91uGPqvvvqy77uLMB0z73Y779d8k6qWUlJKz8/P5/O5ZZG899M0pZREJOeMuAoiXwqqL8Teyzdtv95+v3UU51bajZk5J957JoZXM9j2aAAJaANMrTkUQWPrgjEaojGaYMtdm9YqxMwOVIEvC66YCGo1NLBKoGbAiEoM4o2dIRhUaH5xjKqpYnFOnWdAm8eaQTNKclj6bszyUBNl/YJdZx7Us3gvToqm09HmpQwOnUG4AemvDZTwqtkWsaWUHh4exvFEhH3f3969ETVAMsBa6tPD46ePH54fH9I81ZJL5hZOOGbnHTEBmojM81xrVVEkMgBbzXs3GaFWa8stiMGmOrCp3bFjZ2a1FrOq0sZCCwDVMfddH4MnBLMf7ZJfGhKRMTlz7fOc83VXSpGSy5KWZUnzPM3LNC1zq0GsVVqBZBXdVmittZacRaSWYqJNxBVUgRnY1FRqK6IQRUVQpM21r9WLqxqoY4pdjH3Xdd3hcHN7OPRdhwgqzVO1XrKcL2M/BudDFVhSKaKaayCiPgL3yJFCBwhSZtBCBMSBnUNC57wPDhFryjqOkhdRqQDVuJRSS2mF3K1Uh5m72qkKmMJGL7ZazQyZgRwyr1R7UVMB24IYbFiiNjEEM0MiNZAqLkRwPNzc0MbtI+Z+2A/7g+96DoHIEZJvQLWZgmVQEWNRTIvNczEtwRGRFLOCalKr2EQs2TvyiAyGDKo1lzROAEh5WbRUUjMVUZFU4QxoINPEFagn59A5IkMzA2S/u7V4qNRp2NNwS7sDDX0IrgKkUtJ5Sgox9thD4HgNJiNCgyHUIFUpKSMXN/S+mOazpGPAzBGG4JAoS12qnvNpFkFkEXXIYLpWXK7g+QowrzdjHSbISPxSMtvEQlVFzVTKVJYHtHnXoUM20znncp7rYufj7vHx5uOnd8uyO5+OeZq1QnA+BCIlBgjBX6Pj7j/+/d+paM2LKvhWSe0CsVuNHIAJebVMc0SEzMDeOXbeX/TNPblAoWfumKNDx0iNfAwM4FZuYPM9qLVKqVLW2KXVJDZLHoItiEFSx8QYHEYH5NARh0AxkHfoyBwpNjMFrZccrGO+OfRv7g93d7vzdKh9bAKpAqSO9gO+uQs3N33OphKIfBWcp1Ibbwu97dnUTEBrraUsmlktjefvfvub08On3eHucHNjwIr4w4ePLvZfffPz+/s3b+/fHLo4OAxkCLWL4XB7x6ETE/SuH3alyuOn53lMYCZmWWXJaRwnM2saIaWU45wfHo/3t/v7230MHuBSYASE1HwG24RDzZy2iWvjZlFN1NKEIprm5cN33//93/2P//D3/+Hbb//xPE6+3xcNYg+np+rjM/sAwKpWi5TS6rDUBFT1qKnNQyLSOCmoIIAFcAA5YGKbOJ/MRFxIPh8za6d9F/Zst7oElJTGxVH56ivVl9w4Enkf+n7Y72/Zud3h3jtv1XyIzLzi0KUCIDLapv+PrcAAiAAM0aFTA4W17BRUVrkkQ1MsFbhiUCKi4NFRyelRc6gVyfvdfdi9iaxDR3XnNXphkmmaf/+H50+PyzhZFVtyE11CNSishPXZLzH4u9u+3+/YXYgpcCFOt+Cx74f9Ybfb72utt7e3ZhZCOBwOt7c3d3d3zDzP0zzP87zknJrai6rknJlZRJqmCzMNw7DbDTF2XdeN4/j8/AwAIYRhGOzFx/WlNXPmFsS0eKihQbWa8xxjDCEgYloKIBCSWKNT0VUuuU0taKpaEUCBGq0ZFIXMmMjFEPqABkU0qS6qhoDQ8EFFQCJwjhEdq3kpXtXrAqbViCEQcAFURENDq4CNi5YRKrM5ooo6L8t5nGacC9ZC/hn8t+eMUyHzXcjd06g8IHdWc3n4HlCiG0lHvP0ad/1WKLtyldYVlB23khnTaRrH07GKiBTn3P6w3+9vhmF/c3t3/+btsuTjw4eS5iYOV0oC8DFG51ialBygYxdcIGiaYNRqrlc6NK9iesy8Oo1h4zuvCE2j9UqRpnloquQp+uhc09igGCI716D2a7T/MumvMZkZomciU/bOxdjEirRWybXkkvOaskzLkpdlSSmXXFLOy5JrFQOUKinltCQ1q6WaqKnWXNvEgoyyBTtghqYEKmtRJGzsRyOm4IfDYX97f3c4HPa7/TAMXfSmUks2BJW6UlO3sc9M+323673UPJ7OgMBWPVoceuoH4CBAIhWxgqzJCKZNz5xARbRkrUWkKkARK7XmZam5NLArxKYREMSakJcBQINXtFYAIOeBGIGaBU/Lytnm/gJN0LMlGkW0BbhqpUpOiWLobw4XJKaZfgQfmR1Sk11HAmIDp2BmBCqsBKhEBc1IwBk6Cky+ILFYLiZsFZpVHhMBQkUrNS2nRVS1VlDzimZaq1iBWgqJ6nke2A1ddyB0YCQKFYH39OY2BV9jr64zjsDBoSMANqkpTU/HkkvXDXhju7hz9lIBj4jRO0cwz9MxPWQAv0/379/WfE7jI+Txbt/1wcfQOe+d4+dx+vW33z0/f5qniq5nYDVrfPZGA1sVqXEtMd32SI1Rtw4OZjaDWkSkglWm2sVy2Lk3t4ehcwg6zXN84ilZmp7+8Pt/qCVH383zFKi8u4Fu72JgB4RqjVjzEsQs87nmPI8nrZWa7FY/OBfMmrKegYJoYzsBIhIbETnemmNmTy5S2JHrmSJTaM+XEZCRPSEjt/xuXeVLVy7cKsPY9k7YxPwJmRyzOUYXOQyBY6DoKTjybIyCuOlXEPEVqEyEffS3h/7tm5slpXlalnlJS061LDWZLAGl84CC4GgIUY1GgJRW6zXkxg+3mixp1aYSqmUZT6Xk27dv9rsekMZp+v77758eH8bT8Wa/jzEyAGhRtBg633cUBvIBUUMX7+7ugajrDvOYmi1iNkml7FOqVQys5FxTXVIZp6kpo7/ag13KqFdMGc0s56qqUltAC6sAHwEg5FTOx/H73//w7bfff/f9w/mcq7DTaBYkY4Za80ScERmBVEEVzVBKzctca5NcZvbROU9MtIrVYiXsJLtUNKtWUasFOEOZ57lUKMssrIyZnGlwMeVcymeiF8TknOtCh031b5vywQxMzCoAqiA0R441SQ0rnxmxGZ02FpC12hkibBcPAIBgJFVzGs/PHz6FXJYfngePrs94C/E+7N+xj1bQEww9v7kLX7zrwOrPvh6+/zB9+FiejnVa2h0taUlCBapaRcb+7vCmCmyigutc1jo+AKaclrz00iGujkUiMgzDz3/+83fv3vZ9L6LjeDqfz96H8/lc67nJxhBZk7Abx/M8z6rqfRiGoe+HBsOM49hKow+Hw3XsctnuwFV99YURkkuWWhvuvjmurcXYl4jnVf9qyyw1bhuumcl1IVAycwiB2REGQCdiuVZrEqpmqo2oxYSOnDNzWVgq5bPVDIICcYGSMQo6BQMtZkJoKCOWJ9IpI5VjWp5OeZw0JPNgHMxYjUFKzTWN45kegLDrg9Ykx++gjsUfHc3eObh9A/oT2iotqmdmR5Ryen56mudZtDrH4/l07J9i7G5v7/Mym8EynrSmBuyJ1JYod963Fb3NMW0ZawLA7FdnlJbabzNgE49oMa41GtGmz6AitZSSF1UlQmaSWriRHpu3/QoEvLoQvIpjXkKZ1e8XCYCV1YMT9RpijbULpXSllNyyS6nknJc5zcuSlpJrJaJ5np+fj2Y6nk55SVJKs901NWC8gBgExtp42Y3bvbJyEBHAgRkReee60OxQCMCkViO94IvXk5hjut119/sYPTFKFQWtCNBsDo1Ys2itTdBh7aXMSK2GQKuIlirabOXJTEpZNXKaTHJjY7ZejbRunAw34NqMGnD/Usppq9IRGgAQbmGaqtbmyEMAhmporWrh1YRMzMTcBHbVDEwBCQ10m5eaNjmZqQrWrLMgIZjRMruaSYpWMgIzrkKZEYRUKJvmWkTaRIO+FYSqIhJUoSIwzcoOYgcgMDuVbKcZmHC/t5uD7QdzoRWaUUUWJcUF0ERzSiYWOchtvo6SCSk455mS1jmdPp0LPI9zGsmWPJ7IavT9fr8/HG5jiIQKCEPg43Ga03O1mcgDoG1F9WtJ6QvXdktBAjQJpTbdO2YDqEWlFNMlRguHcLjp797cHHadI0hzCjF+ehx/eJgffvhuPo2EoZZ6uw97t4ddt+7WNxz6pZvd3x3SMqPOy5zBsnNu2PsQO1CQXNOccsotTrW1qlYuHW5rhK5jv0fuDIKtvvCrnKtn570P3iFgE1SwJlfBaAzADZhFZmSmwByc75l2jvZ9vLvZ7YYYPDCoSAYTsibxBo6d89wFd6mCQQBPuO+7r97f11o/PTw2edU5L+PxyMRpPNeuy2lhZgqdI6ieUFlEVKHR9EjVgSipMPQe+oAxkHXxcP/2cHvLCPT0+OnTQyn5dHza73cKnGt9fnqSkpvoatdPIXgfuO+7VAyQpnGZp5RLMQOOTtdBo7ohciLU/n+dSl5Ts21ibqlFxpLreJrHKc1TWlKuUhXAESOhqi1LOj2PT48PD89SdHDhnUfa7W93+9vd4SaGQKSeyDehLudFbRqXp+fph6fvlmlkF3f7+7fv3+z3t9430TsjBGaWNC+fYDnJQiIq4H1BTyq4nFPJisWcliHe+tueuKwcrqtqi7aOEphqWiYikpJUitUMUhBbXMqw+su0Lew6QxC1eWI7AqzxDTpnaiCG4Ji9SZ2O343Pv/n+2+y99b0bDm/3b//i5ou/9KGrVZ8+PVqZnjogvP3Xf3P/i29u6G+/fj7mb78d//G3z7/+3acfPh7Pp6nkSfWcbT495Dwd+vjnP//6TakX00RibqRRDwDH4zHl5XQ6zvP8+z/8/vHhsdZSa/nVr37hvTezUvI4TufzeZqm02k8n0+lFCLyHrccUNmSTa7vu91uECkti9R1/eFwuLu7gy1tBGstcbl4EZRSWk5qHMePHz8ej0dVPZ6OMfq2AM9zombs1CqU2F1yyU1hUqo02xgkBnKrIxFqK51UU7WqTOy8JwyO0bDtIE1ECRSMEYN3wQyloFaZnvN4XLKds38s/aRRyAuo1NEks2m0tMNnsUXV6rku3xctEg7qOuec3Bh9MxxM6aAVxucxjQoL3AeEbOPvbXlS90k52XCD+RvY1FQ3TvsKXDOxdz6GICrLsozz6LxnYFOtpaDp4n2aR0RSydoUlbXV4hkSEBM1+XMRwAqAzK4bhhCjc56I29onm0lQrTXnZo1pKto0gVrRtYhoLa2axgevWr1zMWYz9d7nEgAvQ+Rll/wysV4LAgHoJV+CqxDKWlRBxo5DDJ1IlaEUKaXmVHNO85zHaWb24zhKqVLL0+PT+XQuKWvj6GuxCoKwKhOaka7VaUDNtL4tSAxgojJP8zmcmdgA+l5VQ7snRLSq+b8YcoNjuh/iF7dDHyICppzzojnLkivhoprSkqxWImEydmTGYgCEzZu7iXUJMTpm5yglqWPNudZKzpmIllLTDFIaAmC61jOqaC3VQLkFfe1oplKl6b03dl0Lwky05JKWRYqsdFCmUvJ5HOenb6Relb43Q8LmVFCbUNq6uVeAispgvZlT8bnYOMlzEq2goLnwuHiF5DlVy2pqhgYEhoiNW6YmYKsGPhMxciDvFamq5aqWJhFzRKaoKkVx2LnhRsBlF2oIWIEEkJunG3nvu74vUmuWJS+55irl0scI0RNFdgB8XvLz0+k0ff/DH37XBdv5PERWBefim3df9jGOp8fAfLsbxqnMj6XkhT1cjN7W7ei6zYZXrQEhukao6wzmtRaSLH0Hd3eHuze3+7ub/W7ofZRS++4W7ePHj78dH5/Oj6fGsuO6K+9IC0pxSFpF62feSV10aFwjm5CIMgt7iz1HH6HqMtIy4bxAys17saqu0ChsWpwAABg5ZBd3LuyFfC66lpggrUZs3hGybcEmEpIj9EQOmYnRMbngXd/FQ9cd+u6uj4ehG4YYAhNUNUGtRsromI0ZvOeuj10XrjeXCNDH8O7NbUolLfM0LYhUUj0fT6g4PZ32cSi1mnM5FOZ1h4HEhGZqiEagiOoZzJOa66OLga3GYX843N47RhENIWit5+PxtN+H4aYqzLmmpSSBuYhfUvAcguu6OM8FAKdpWZacS0HEbvCAVMo6zeWUlml2OqjUz3oAbgFASy+WUqbz+fnp+cP3D+cxmbJZ8xVZtX6laik1LRnVDrtD513Nd4zYD0PXD13fO0dohUAZjbk6tqx5zE9l/CGdv0vzxL7rgzrbHUJ3s/PBcZGiZoQ6lXy09FzSWIuIMDT9Hu8NqmqVcipLtVwd+/F8P8+l5OsNWUu/A4BInc5HEUnj6fT0kOZTLTOQEquBojlEt5HCXthAutXv2apBsu5UoXnFABKKSs7pQWQirJWhJi55KQKiiNyhu316GNM8PcBMdv7VN7t3t/Gbr+/u726Gfhh23kd1Lv9m+ZTOD1LOYImIpPQ5aymfVwM557wPSJBySnk5n87LMj8/P4/juCxzjHEcx6enx3leWmJoHM8552VJyzID4DD0LRJqjN0mxescNzvG8/mEiM75GFdPpVLKxXCgOSjNW1uWJee8LMs0Tc/Pz8/Pz0R0Pp27EMyQ2ZciIYTLaTNfIzEtVaC6ukuTAbUCMALDWi3NNc1TndEEOIDvoD+40LfyVDVtdFbvQ9d3QZKlSfIpHX9fTo9SWOugcgu2AwqgAvloZVapatl4NixiBgtGYHaeGc25yt7Hvnvjod/v6uytAplzZCWLjJrOsBxhUchnK4vJi68NXPNh1QAghHC4uXHePTw85JpbrdC11h+sOYcqUq2lLtvRCJ3zzkcAq6WoGvJiK7EarYV2ZiJSpYnt0pEAAQAASURBVJiKmUmtKS9Sq9qqG1Rrbda7sCZoGhjJTGQmZrKVjOHlzOFHDT+fEda2zrlMBGgGZMT0UvlvZiLNUl5qrcuSh2mpIgA2L1NelnkaDdQ5NkMDwOYZsWUaqBVxQBMyRSLY0peOnfPRt/RlwxG99811ZMVLr22TtpsMUrFmcowIWLOUPM9LEcNctWo5n01rCM7HgH1EBBEBQQBUk5pyKamUDAgxRCklLUtTadda8zxPz091HglRwVSsFbcatHqJ5s7TSmnNVvaLlCq6qpo06zkztZJLXpKKNmciJiy1jHOaTqdrkKyJnaqC4lpOvqrxECpAJQsA0dZAsOacxqPkBRVK41o7BjBFK2gVDdXYjFuBnxRrQqZr/R0akjEaoRIaWc71NJ0SCIkggIIj74M0m4nGOwGm1fEBkSg430dfs+gioEVL0Xr9fBDROULywRlKWs7j8VH6jsP7He92zgcf+37Y9zGWPIUQui5Ex1AnTQpGKz3VjFeK3lqRB7AJlrVPsca+WkkCjfcNJmjVketC7Puh63f9sB9iZ1VRXf84oVqeFxMDcP3QBdbgwTE0JZ3aik2umtOaySR6tuhTKUgmmoj3t/e73sW6LMu0TOM0zvO0zGlZaoKSS61FmlyMNnlEDlz3Q7h/9wZ89zRNp7QsJZuImiatqayV3QTIxIzkiQNxYNcF7mPYDf1uv7+92b+7PdwN/W3gQFBrzSUt6VwlA5oL7JwP0YXoYuf7oe9iR/RqqEfv7m8P85I/PT4SsYguUzo/nizp+eF53+0N2QIecUL2YASAQIxEyEKCKoJo0RERonPDEGP0Wl0XwzD00fu6LF0MaZnH8/F43N/43tC50Df9WlHTlErWeYLR8RjOJjAuc0q51EpoXWRCyCWXvO6o05Loi/emP2d2V9NWC67ABBCQkcZx/If/6e/+8de//s1vfleLvX/39f39u9u7G+98FdUqIBIcvXvTe79HaPNzAZVmdW6IqlKLlpJLzstUTMv5fPr+wx8enh/H8VxKxYVZ551LBzt+wV91vnsc03nJpdTjNH54eHo+naZ5UlHnXb+73d/u+26w0Jd0msZzOp7HZTJHb7/+Kl/ZQdhafSkGWkoax/N0Oh0fP3767rvj8SHlE1FhDkCeODoXyAVEd1l1tpVpXUJgGyC2GqgZQQWtoGezp36At198OQw7VchZpvPHZSnzXFz3RdJhmdP49MPxw+/L+fzhD0//3f/+b7752dvQ+a+/vmGPhMvj979++uG5LCn4cP/2/RdffHE43MfY0VaddMkgOOcATYRSnvOcmlwvAKjq1BKOT0+///3vn56eWnqoLYEAtnFfQt/3bcIxM+9933d938cYERkR/dYAIKV0Pp/O5/P5fD4eT+M4TtPU+L/XYExKKeccuzieJ8c+peJD10gbiMTsVG1bcta7SASI1mpz4YKem3kTnxc3Psvxw/Thd2k6FaNw+/7+z/9N/zYaExA5UCTE2MV+P+xvXDrL0/fL+LE+/Md6/sjudvBfUHdbuRPuoVbFZFbUKpo54EDUs+PQ7XadoBOn4ljJm6LtlaVEyA6NHVXCs+E4PksBRk++J98bBgW+pPnWqKLUSlSJ2bEP/u27t6r3peT4IZzOJxUBRCIX4tANu9D1pdZcJdfa1mit0vAN7+OwPzBRSosZzPPUAsclJQQytVqLNB6s1LVeQVRtJTm2YKiRe13wzvvgQvDNqDzGGH3sui6G0MUYzCDnsmlRtPFy0Za06x0aARjxFTZDjTgGpsYIFw0hA2EFZ8GrqPZdt9/vSykl52WeEwiwDUPnHDUr4CYhVqGtDEpqaxzDiNzyJ44dsXPOhRhi7GLf9z608OWS8LLt3OGaQl5LfXp4+vj9h91ux0Q55XnJx3EpAuy95SzPD2iq+x3c3Lq+A2KVCqqAWGuep/N0Oo3PTyY67AYAmKe5pGxmUsr4/CzLiFZNpFattUqzaQdrBNIGt2zBFFxM6ddwYRXjaV+lFiWi0EQNpFSRJDae54u8fZuLqqqoIa2uYZsBCinBRtoDQzLmanZeljqNCKDISs7QIxN7HyJ7IjZgNTa1KrlkMyMkdhxCQGY1RCBh0lxVHEBZSnZSHFRGAsfOQYUapIYqzNDqaYzVyATA1GEMXDpnRsEX1Gz1qo9Zc9KIXX+o7s1+nMfpcVmksGpP7Pth1/VDu2khxtB3xAymVjKkqka50fy9a3ahxNTyKWv9MtGl9qTtN1tJT8k5LYvVmWwR6UyNwDmOznfOBbWqjeRuzcUO+ujevzt89dXd2/v9MATQ3DIWeoX2AYBL49m0Wi1oAqCiZVmmbulAbnyMMfhgylYRBKEi1GxsWhvm3+ruzQytgqYuwDdfvtnfv51KOc3LcTpPaalFZVPIIgBCcsTOcWSKnofghxh2Xb8b+n6/2w39zdANnoJVqEVzkpKkLIbKTM6RCy5E3/Wx77t+WO2CLxdjZmjmmYNjM0spnU7n56fHp4ePy/n03W9/4wj3t28i7CqYUTajZiKKZAYCUjRnUuGGmjM7z95R9Rw8d8ENXZf6GLxP85jmeZmXvYLrQux3ClCWKaVZ8qSS1ZQQPXsTW3LORcSE0IJDBGtKWaU0nrPe3x6Y6TPGdWNCMZGpaM2nh0//8D/93X/8D//+08MnQu9RPdbIWX1Yci651Fqjd24V6iYwRdn8tGrJpZZaSykp5SWlnOaclnk+nc8PVetuiICxFiFLp+cfPljqJHsXP53LacpLzudlOU7naVlSzmbqnVNAF/cGCGpVSm39M+fDeColfbazXCXbmQmhpGU6n45PT+P5ZKbOMSEgSduPqBWzjK3mi3lNUbei61Y5s1W2ruoOgAAKVhFLCHq4Gb78+sv9zV1a6ul4zssP8/jpsSDFM3Rf5WKnp4f5qVhOOSVF+rOH6bDvmHlJguhC1+0PB9wdbm5ufvmrn799ex+6+Bq9uEAaDAjee9UqTmKMjn3wobk5TtN0Pp8/fPihsRCI2K9rWBiGfrfb39zc3NzcNDmTFhhREwoEaGtDKfl4PP7hD394enp6fn4+nVZPx9PpfO1ifalaIuZhGHbDruu73bCPoQ8+htCFGPu+DyG0quzrDlZrfXp6/vDDB3RELrp4w6Hn4D0zMTE5YzKTWuZlPi3Vauh3Ip4cuI6I0RZDUKOszOIpQzmN6fmhHH/A8aPvKhcn2AMZUQe5wOmTLUepmTzGw47B6VxqGU1SBa5ggqRN1gOUQQULOATvito0pXl6BmH0O6EhY0fgi/FlHkNEx+y9C8GHsJamtyL23W63LAcDq6V452OMsevIuVzqknLKVdSImMkpCAAwsnMhhI4Jm125qpaSU0pNo0pFq5RmimSrbR80dylCJHbecUs4toceQgih8867ls4jct6HGLz3bZfVllL4460tBwh4SS1dxQqIhmYITWKJVM1IFQ3UmalpADWrtaZlITQDZca+i7UVi6tqlSpatTaRmG2WBmSkNQm5lqE6XgPrEEMIIcbYBsKFr/V5NgGgVnk+nj99el6SeOeqSM5lmuaSBQAgJ5iemQA9Uem4ZEU0WSs/0jJPp+fpeJyOR62azmdAKrXUIlIEGOfzuUymeakl59wSuSqiANYgkzZeAbaU2DaTIAISG1jTKlTVlq0PXRfIIZNUUVMghquNpRnUqqVKZdle3CamjXNjAAKQzRRsIczeleBBVY2MWIiNHLGLPiAxqZIoWgu7nKkRIpNz7NCxIpuhNj9uJmOyagTKJgyGqB5EpEApsYgnZQA1LawFrQIKIXnnu2BgzJStpJquBbsVARz54PcH/8W7G1EBKArmA5IDcgTUAK26aY8KkQ2dA0ByzA6IlFkcQxu4K1XViAxwFTnHDQkDBDGratlhMi4M6l3TyUAEQqO1xsmsWSh7D2jucOjevt2/fbffH0KMLBlqNcNrjiIAgDs9PgCoSRGtxUwQl1xQbEeM+6UnBlHMyWnxoB6tEmy4rJkpgAKaWkn5jFi+/uL+L/7qr3f7m6Ly8eHheD6lZUkNA1qzgC1oo0DQMe2D34cwdF30gRyJ1pTHdBqfp1OuWQEoONf5EKPz3vchxBBi7Ia+3+1i3/sYX+UdVr/7sszLOE7H5+cPH7777vvffvr4Wwc0BFSZ//yv/nXonJVazbZCYlt1vU3QlJGCd+wDhr7lmxxDII1kXcDo2TMCWKm1ViF2oevVqNZyfvp0fvwwnx9SmsUMDBwQKIqZABo1gbamFrjG9QhA7IhcSyVc1ktEBGLvg2MsaT4ejw/ff/ub//DvfvjtPw77ru+cpMfjQ7Z8RKRpaRUJ2RPd7fvOeVErIm2PvlQpuaalUfylSq1iOadlmRzJYe/ev7v72S9+EbtuPJ+fn06Pj4/H4/TdDycwP2WYs+amBQIFrKgZIoDwiCCm7Ds0QBPSjIBNSNk7dlegUiOuE1IXo4lMbmrSiD74w+1dVzKoidSScy1TLqYLITA5zz6QC8wOybN3CGaGm9o+GDSRUmwbKue47w73b+7fvv9qt78bpwXQ1TSBnk6nD+WceU+GgXExWR4+LX/376bTNL/7H7863NwNu70PvtSK/v1Xv3p7f9N/89X9X/3Fl8Hjd999uq6Aa/Q0btUvhM51XQy3hxtAQKBlSc/PT7VW51wpZbfbt0XLe991XasY2u/39/dv7u5u9/sDALUo3MxqLeM0EXEInpmfn5/H8++///47ADifzyktLUlRS1UzAHTOxRhagfcwDH3f+xAcs3O+hS/D0MXYOe988JuGjTlupR8AAPOSfv3rb//j3/87ZIj97ubm3e3tm5v7e3ezt36AzoMHwtppst0eRWH/LvU7dB2HvSOiCiplmqqkmZcAp1P57oE+PPXnqUtzrKcsls7nMXdiDuYFnj9hHgWhf/v2/m/+NSh9/MOH5x8epuOciwC5VkRjImyFoToU55BjEHazOe7c4W2k/jBSP0sM6np7Ub4goqHvb28Ow9DHGJ0Ljvl8OqlKM5+7vb1t9q3M7H0QtYenp2leUs4AxOxUJFs2Q0erWZKolpJzmnNelnkuJdnFfGqV9kIgJGbP7Fzw3scYYox933VdjKELIYQQnXNITNh0Y6EVG7bRobqaRb6SiLNX7cKPWceSXUiUlwGGF0gPFBGMmMxWm6t2wszoGGP0+8O+UY0vpolNdUVqaYDnJTi66IG1CPulkGNTObq8jqumHyLqFm+tTVRPY/70PC/CIUQikmq1SJ7nPC8m2WMNnXdgWarNE5cMZmVepqfn8XicT8eyLC0fkaZFTJuVGDsXgssZi2lZ5pxyWkotolv+HWAl47eIRTdmLzG5EGiTSWUkAFGtRK7b73e3tzf375x3JS2llgy4v78n57fHoqVKyTUTb6ppTXIEWcEEMhiAFbOkmkSqd/buHsquLrlmqQXUOUDH6D14MrQiWkWsighUQzUDq1rVgIP3wSlQyVmymAAiGrnmpA5m2FgdpXDKu1wRKxgqajFJaBWgqAKh92sBfy55ScvFA9KwRQpM0e16/7Po+puuP/C0LF3n2JUqU8ljzQc0medxmsdck/Pw/v3BlLhpETQBXMAmd6FWW0cGM5PGAQC8ROBgkc060wAGHgj7nXdsptWq1lKTmdTS+qr32A8cfLy/79+8G27uutAxs4EDEiAHyHAdxzgEsZZy1yoAFcDUJoNnYpiW4oMDLFJrLVKLlqx1i9lBV4MUAANVzbVMWqeB9c/e3PQ+HPs4jod5mVMtUlVVSAE2UgOBOoKI6ImYDCzVVKc0p+k8zucpTRWUY4zYc4i+c+y9i951bXvZhRi993Sldicix+dxGuel5IePD8+Pz89PT48PH5+fH+Z5JLMffvh97EI3DOMyC0ARnedccpGGCZsCGhE656IPcRj6/e14PtXa7CeUTB2BIyAEUCk51ZpbLaCplpzn8zgeT8t4LiUpAgIIEBqpNeGFln+2zS0BEXG3G96/e/+LX/7s/ft3h/3uZdNvYmXRnMxxOj/98PvffP/7b+fx6NDubnZ9P+Siy3xaxrlUmdKScqlVHNF4DJ64VC1Va61FJFcpRUtWWSupVQ1zySlNQ8c3h9uu63fDbtjtgveq9vR8PM3Tp6dcKwv4KlBNEKqjyiRNPaISySylJiTHQEzomZpYbwxhN/RD311U+wip6+JuP4ApM5ZaaikmNfZR2s2vNedWTjbNcyq51iotfa2ajZk4GDtCB8BozWe5bX4QCUGxaXgMu5vDzZvd7nbY3SJ3albTKc3L8ek5J2HcoevIJtNlmeonSbnSp6d8czf2uz1z85fku7vh9s3dm3eHYcC8jJ8+fk9yk3O5DJgGrG9z+0XHsRHASq1yPo8tlMk5q5r32Ii6bV9OxKXU83mqVR4fH47H4ziOteYl+WmamDilNC/z8Xh8eHigBzbTXAoYMBM7F2LXjtN13W43DLvdYb/f7Xb9bhdDcMyr/ITzXQw+eCJCwlXoTPQ1EqPPx+l3v/twHp8I6fbw9u2bd++/eH/79j7e3XWdj5B9LiBeeafOYXdbqCPwngKyQ6gGZAbVKBW1Aoqx727jzVfdxLhAHXP69LCMKAqUEp2PrMkcwxA1TVr9cjpOz4/j8VyzEHs1aqarjMLY9GGYTTD2HMJwc3f3s2/8/WFikv0dxINxsG3sO3aHw+H+/r4fOu98c2icpzGlNJ7PKaX2ixUAAVNKqppyWZaypKWWimCllJozAZacS8k+F1FZ5nmZ55xTk8qFJk9F1GS0WlbReR/dik50XYyx6/suxuhd8N47dsRkq19L26xroyFtUAH+WMLnklG6vLLpI/9TcjIAgLTOLteYjZk1JUzvfde1HZvYSiBRUZFqKrWZJrz0c1qTp221fvmeV1n51pcuZGrcZMOvYhgwAzEsSlWIFRXQjI0YkBqOSMzFiEqVacq1UoMh5+X8+Dgej3meTcQ1u98iVURrRUIzQ1BQNdOSck6lZFFt5UYrtwk386hWSLHeVqR1K/0ic24ASEwuhjjsd3d3oetrTqnkpNLtdrSVWKtBlVpKra4J6xIDEDVN/ZZFMgET1VJlzEVFfReMXTEuTbWGHJFf/6rWojWVokVUoK7RMRCBqDMgjgCgWaWs5TRSwYpqUUBgUq4KufhcctHgDA0qWAbLIEW15lpLtTXNVlMp4/mF32MAxSgrebHAGLpwuB2y3HSTM1AEXZb5RKfoeiI+nR6ejg/LMhHp/qZ36LnVim4+8quIoKpIeyamuhVdrxV1L8ruiGxogo68M23WPRMiMFEpaTwfl2WKHd/d77rY3d4Owy44R6pWxUxJgarhRTd+Hf63d7uSyulUbFHZ1D1qSs+Pj/V0nl0MSGZWTZLVJJqkllrbCIBVkx5bjeI4Hn/7m79/38Vfsbu/vX9bizpKXSziTNVEqZU8iFSTYlC05pTnvJznaUrLnJe55kVrZcDofd/1hz4OO99HDg4ckXc+Bh87FwI3ywZ80b1IKf/+9z98/PSYS/74+Pjxuw/Pnx5OT89pXpgdAaRcfvjhh1TVhy7XmouU3Da30hw0kHHV5w5+f3Pz5v2XWusyzUxQi2qVZuIM1nQvl5oWkGo5z8fn8+Pj6fk4TwnBhcDs2TF7YjAU1dIA25Y1t7UUwnv+6v2b/+5/92//9m//zV/+5S/v7w5uGzAmtY4f89OxIn/44Yd////573/77beHm/3d3c3bN3fk3Ok0PT6dP358Op2nUosCOBe880vJjCgKurr8gCkhoe+cRwIAERMx53z0vBvczc1Nq/4V1S6G3TDs9/t5kTnlrMaOHCM38r0VNLFmW7W6sy+IjZdNCbnr+9v+brfbvbm/u7+5vRhAeu/v72/fn8dS8tIFH/xuP7x5+yalrFVqaSpcyzJP03gez6fxdD6fzss8lbTUnIpqKwpi9syBObLrmCORB2JAJ7VIFUDq+kPf3xFFJt/vnIHk+SYezy4cIWXJz1RnhAWsgAkoq2SQhLakuT4/jQp4uNmB3r691ceH4/w0P3x8+O//h2//4le/mP7P/xbgLazVSW3tcsRoZlLLioPl/Pj4+O23v3l8+NS4FCHEltrouu7m5jAMQ4ydcyfYNCseH5++/fa3p9OxSfmP49nMTqfzp0+fjqfjkpYYuxBC3+9i1+2GftjtGu4y9EPfd7GLIcTgvXOenVtXk00MhggIyTYh/MYdJuZLdRIise/nRf7+3//69PjQh+HucPP+3ZubN2/jmzf9/nDowz7QgHXwvtv3fddZdVDJhICZODjnHLoMYVavdhO/+bPDLd9Pd/jwu+ff/GF6esxlrrmYGhvGvvfUW8fcufH8qQAK5ngTXe9MAYEEfSav7Jx33lHwFoNzfe+Hndvd7t59+eZXv6Dd/pjSjCzdPsT+wu9x3t3d373/4n1oWoIiaVnGcT4dn5+eHqdprNK2XrDp+GOr/hM1EQHTWktJSSWfjrE9WTUbT6dpmmouiBBjdJ698z74EEP0McQu+OCc9+3VVoTJjlae5Rq1VFmjlfY/bHkNWwWelICoNJDmau1fm27mA7hqheklNHlF+12XjO0Drn6KuHKL8ep1JAQD51ZOsV3milfigdsvb4e6xCu0OirQ61/A1R2FXhJLRBRDHIbdfrdz3quKEKr1YITENWfVmqXWMdGciFrtH2suyzhKqcSOnCNENWAr1pZAU62WpVQsTRVOBdA5h8yOEZsgSHOHNiu1ijTyw6pybmpG6zpnhmC8DhwgptD13eGgdaCcNC3kw+VaWvBXVnGLVqVEwKaABCAASUFBSSxnOc9ZU+7MIVIRFiNFA3RAXskrebGSak1Lai51IFuanBE8O0XmQghawSqoYi22LLkuWUp2xBHNB4AsXHReC9BRDFKRJFJyqTmVJUnOUkvKaVrm09OXtW5BjNJS3DijQnFJDaxKDT7YACXnlOo8zWkySaimD4/fj/OzavGOyUXnVtZUAyLb42/io6a6FeS33kLX/XHrJ6agFWoVyWU+nYDZpZwAYJnGx4cPyzwNu3hzN7StGhOVRSa1ysjEpbglSSp4Hc+7uzc387TkMqecoa4FzKq21NlwqZwCcqN/F5NiWkwbsWbzYdlODSAt0w/f/fb7EEbX4Zv3BwMHlk3ryonRZvuba521qNa5lHNajsv0PJ+nnJPVygjRcez8EOK+j7vOd4GDI8/onYved9HFwM4jOXgteqFqSy7znItUUxi67v72Nr1fuhCazb1nds4jYc4pp5xLyUVqKSkXkbKOYjQkYqb+aTedz4h4fH4e+i7lklKZp2We55yXvEyq9fHxw4fvfhvi46dPHx8fPp2fH0tK3pP37NgH53zwYFhLFU0151KyqTQEN3bd4Xb/s6+//K/+1V/9q7/+i/dv74Pjq2uROp/Gxw85y+9/9/t//PWvH56e7t/c3N3e3N7cqIEqnM5LLnWas6iw4xA9+2CI1UBXEz4EUCJrxfWNy0bYePrE5Lo+DruhH4bGmXCNUhC9c83jpskuE4ICimk1E2v25uQaGQ7QyNjYAXnHw+Gwu73d99G7zUur9d/Yxd1uZ9rthuFw2KeU5pRTKjWVnFvBcFrSMs/TNJ6m0+l8PI3n43w+LvOU0lJLUW1xczHJatm0Iw7EHimYVtUFIHrvQ+xDiLHv0DGSjaedj5EYARLIZJqRsq2hZEWrjBJ8FanT8XFecpkCyscA3z9+V+vy9PDw/JvfTofdUOoVEkNNO4SIUMRqldPpfDwex3H69OnTH/7w3dPjw7LMiNh1LbXBIYTz+dx1fQgeEUW0ZfrGcXx+PorUrgut8oiIl2VR1WE3+BB2w76JxwzDsN/vht1uN+yHoe+7LsSwMhKA8Fq4epVUf8m5gUFbCD/TifHe3929PRzupMjDp0fIDw8hnD7d7W5uab8Pu/1+GPZ9PAS367t+2A93p/3Hc3f70e3v+r7bR+k8uRArxUkDSB1qJRx89zUMfB7cst9hHkMsAOoBIgEzWiTrOHUhi8ktQy+ka0IEwSFHIK/O1aZkxIjBc+zY72eI4wiaynFKQhhvOOzyhUpCiI0o2xJnjS7U0kDLPE/T1NCUS/bGrBV8ojVLy1V3uJSi83Ry3hEzEolUz7zf7RB3IYTGBfE+xBhiiN7HDZNzTfnuMnFbI5c23BPsetfYzNBgXevbM3kVkHzWrpNK19jM9fevP+GlbWDJVqR9UQwyurypEX22o12zba75N+tAvg5iAF6iHLPW63QLo1/e4rwLIYTgnXOlgpoiUcvLorCoigCZIDSdE0RirVVyATN2jokBDUSJlc0QTQV141GvcR0SNZ9r5whRVFrlf/NZwC0QhJd4y1aTOSAFUGrV9QhAQIzsERjVIJfXSv0vDvZXnD9rbxaErGhgzkDMxKBkUa0EpEZgTOwwRA4DhdCySeY8+uCAgG21UW1Cng6JvQFbU48HVhAxrApFTBQNkQWoKuRaak2qrdJMxIpKLlJSzsuc57mmpZSclnkcx+n5qFu5uAGqchXOtWWx1ACdi4jM6ECz1AqKKeU2V6FhDDHG0PXBcwBYo3RCapAXNsZRS/KDIpFjXs1M2uJ/sRUDUxCxmksxyAhQa04Lmdo8TyklQNvt+66LzrvmqmxGUkmAEVmBxUzVXT8ad/f+3p/HOc1LLmVsemuN3qDFCKEIKCMBQDWpm8DuJfUIYGjrzChSnh8+fDT7lPR5fx+MOoBiVhthG0HAMshk5Sj1oebHUp5rPmmdQSQ4t+vCYb+7u4n7ve87joHYAZMiMlHwIcQYYnQhonNAvIK0W2N2+/2hCojo/rC/ub39+quvn55P0zyVZi1ppma1SpOzzDkvS04pzXMTxUtLWpaUSsm1lNPT43w+mVnO+f7+zTynaV6q1MeHx/PpdDqdDHScxnEcmf15HHNKYEbMiJHI14qExApgIKrLsjx8epjHk6moFpH6/v37t3/5Z3/2829+8c1Xn0UwAGCqeSnHx+Onx6fffPvbP3z/odTyxZdvdvtd1w8qkrrQ92Hou1xERH2MN4ebGKNtI02k1lpVKpriys2XamomVVREgIQYfRf2t4d3b96E4EvJ8zw3chGioqm1FLRlsIKWAaqaIpNjJAIzRQRmcw5DcLe3w1dfvHl7twfNaTmr9gAbEqvm2A27gR2paq51ntM0LdM4TdO8zCmV6IehP9zeyruSU56XZTqPp+fz+fl0fJqOx/F8Xqa55FzLAumE6JACc/CxBzCpCxggqPfU7/r9zQEcA1noeuc9kqFVgIygJslMTAhUmDR6O/RUinoo5/HpeTpPj+njbyrAkuZjVYfhix+vDxs6SoiSc3l8fPrhhx+Ox+Pz8/N4PpdSVcFMxnGa56VtSnizG2srhF75UXvvmJnQiZiIItLN7c3+sA8+7PeHYdi3qqUWaHrviVZEv8GzukmHXE+2Lx1pm7uvIP91yIQYf/azn/3FX/z57/7xP4zPz48fnpdUpqVUOObnJyVy7IJzwTVNM/AhDDc3cX/jdzeH/e79bbzbxW7oMQ7F7dn5HevO2xAwSu92v6Rvvopv62CGjARABoZWWIVBmaVqnZeUysrVEhWjalwVpIqUopKkliznrOdinwr8LqFfqs45HW73f/U3fwbU/erP8+UyG1feRFQ15bykVEtpJIDtRsFKA1Fplou2UcThJQjUXNI8n4k5xi4Gv+u7Lm5RS/DOe2ZukvMvUcUWsYuombY13i7yE9uMv0rCbCn1ax7sZ0FMsxf4LHC5BtXgs/Di6llvB3wFojDzK8kD1ctPzV6OeY3WXNCg6/7z2fc//rjPf4KABOgAeDW5AGt+6ymnVHOquZo1GUBPDATWrA8IEJDZeWI2NYNCtKmnERCSKqz6ggTEjr13zrvgEYFEVKVVNgoAWJP0WO95C+6Z2DlCREGsUhub2QxLUcpVRFIqOdVaXpyfG9XDWrbCmo6XmjW/bGh63wjoAJk4+FARp+MZqoYw+DBwF12/D/sb6hxiRaJ4sNj1bMRE3jkElCrVpDbuFbHq6pZnRZQZ2VEIxM1TFKxWrUVKqVqyVlUQUSmllCbgvKRxTPNU0pLmaTmP6Xh+KRdHaKqOjoIjajp4hkasnjV6HXo1BUBS9TE6QCEybBKPQC2TtnW89UtztAdo6wKyYwJq0oKicikUIzMCdVq90xjMgJAcgBmqD+7u7hbRQgxNj4DZMTVn8LZncwKVHCDHV4q9+5s9EA6n8zTN85wBrirKQIuiAnCjoZnWVsn3mRfuNgWoSprGEfBscenPAg6A0BQMKlIim1BPIM9Qn00etBxVJ7LsSDrvuugOQ3d76G9v4n4g74HZDBSBmyUJs/OevedtnwSfmcARhhj63WCqncT9fnd/d/9+ybmUIpsFm9S8FqWWXHJaSkrLPC/zMs/zPI7jOI3TNM/znJYl51xqNbNSStOGJ4Snh4+Nx6Aq8zwt80xIuRQAaLs0NFENtZTiaikVAGstF2EP04qgRBC9u7+7vb+767uOEEsVWt1Y2p2EXOQ4Lh8+Pn/4+HQ6z+zIOT8M/TAMInVe5uCbTEMbsyJSakVrQhmma/2nCKjChj1U1Vq1FFNtapzKzA0GDyEA6JrtRUUoYLJG2JrMMlpB1NWKl9FMQavz/uZwOBxudvub9+/ff/3l+/2+z8s4np6l3gMEABDRaZqncfJMzkXvvGPn2Ucfh9gtQ1qWltRuNlq1llxSTss0jXfjeDyfnsfn5/PxOJ5O8zSleSolSV2RXYACYCpZKklNZuqYnHfGTOyYPCEDKJgQCK5OtkCIACp1Wabj6Vlr1ZpOWo61flrsfNSkUkyqG+7e7LthN1ykx3/cEHG9h973fU8IwzA09ZGm+Kcbb7OxONvWuxUltaCk67rdbnd7ezv0u67rvI/EFGLou37ohxj7EELjVF5WteuF7cdL2r+wecd3d4evvvry57/8xcPDp+dTms7jaSlcJZVWMd4YEM7MRCoTdl30XcTQH3b9V7fD3T52XcDYV7en0HeeQ/QcY2S3Qx4AO1VPCJ4MyRQrWDURNAUsIinlnGvJVcSar6IpahEpRUqqZSk5p5KWIkvVudg5w1JVTb7++v3bN/d5eUFi1luh2vbmTdaleUgNw4CE3vtScrObtVaQ0u5ZCxeYGlfbOedc6Lqu6/su9iGGGOPQdzEG11IbxNSk2AzMtFX42Pb5V8FQ88yAhpOt1MaNmwsv0MlPSKtcd61rOPNymZ/9DvyYU/PZT/8ozvNyJpdgDFpc8C8LYv7ZvmcGoo2YIQC0prkBGha1sirMTK1tMQXM1g3nFkvBWuOyHpGQmhzhKm1n0PyqVgMchwiIBQqolhazQkOkGrl0BSgNm2wlohkgNYcfZ0ClCqasoiWX0sREry6OEBk3hvYamCIQGIJhg7iQEcm52Hd1DstZJWcyJvTUgSEpIAiY1VYuwM4FF7vYDbuBAZeU5zTP81xLUVWRZinXkoXWHjRiKwhNAOi7Dk3ATNFqy5BcmChtpmtvVANZJYu256KlpJwXR4DmmmC3bX2GEYFR0ESFyJwLK2vDVmkhohaQIqy1FYBEeImGrd0dBkKCTfd5u4+G1PT5mY0Z1UwU1BRBPVPc9YjIfFF1AlWBdeojAxC1IrVKve51znddJ7bf78fTdHyaAMqly+j2pUWEzVnoM3QUYLvBreerWamSkmAiBm8EUtRsRjiBfA/1A8hHkCPq5KAGpiG6fd/t9/1hvzvcdLtd6Dpkp2C6xqPbppdw9VvDzU171XnbxjkgMiA1uWBmh+zYRS+tqNxgzddpo7NpE5Zo4cyS0rIs0zSfx+l8Hk+n09h0OcZxmkZVe3z8eDw+Ier5+Pz49DzPC6I5ITRgIjVDRAEFKVISkgds851rUH/LZ4UQzNh7Hvr+3Rdf3tzeOx/P4/Tw8Ow8hxiGLjhekd6lwjnB01jPk4qhd84534W42++0yvk8IrGhlpKmaRaVeTwSk5oB6DrEWnGbqqnoqqaJaijr7lNrjWZaq57HUaV6z130wTEzmCWD2m6+1CI1i1VHFGL0PiBqKVJq6bvuq6+++cXPf/nui/d3d7d91wfvz8enp4dYf/Wz9lxqLU+Pj99/932apt0wxBhCCM75IXaHYQcAIpJymdIyzcs0z8s8z4hM4Bz3Q397d1/ep2VZlnGaxvN4Op5Pz9P5PJ7P87LUnFUSQE4Lp2XOKUmtUqoWrUsFAVQCMVBFQjRG9EDQpq95OuU8fnooZlCzVyjEGc0QI8eb2B0O9+/fffPz919+4f1qyn3J7LbZkIj6vv/yyy8Ph8M8LzmlXFJOKbdc5Spp2LSY14yAc810zDvnQgjBtyRFjDH2Xd8cp5137Nm5FoStpdfXu+dtJnoZiJ/5Cfy4/XgDjQjB4+397ud/8YsPjw//+IdP03mRLK4KqoAKAF7qcADAk1nKqZQip7Oj8sl/8swIgpwwVvbIrEQViRA6sM4smiKiOKrIFZwCrSLUBmunrLZOrQhk4NRYFaWAimk1VQWtCsWwgMvQGQfnyAR3YTjEwW36PXZ1mczsnO8AHFOIsR+Glq+8qOiqCqhtpUVNeso3m0xm9sE7F7x3zjVmdMPPmgOimWmteEHIL8s8EV3jMpcHRbzqLF/BNuspbwola5D7Rx/YVSDyY6xlffpXjJnPjvFZD7kkp7ZD4Uuc8PrrC770+lA/fvESylzdj7WZWUoyT5Wxeo+AoErIgQM4IAA2XCSlmmat1VSwGWq3/JaBloqiTNTULg3RgMh5Dh2xs60oazsVIiYARTUzqbmWXKoIAHjCzSapORA2FKHlj4ycQ+fReSOqtWLKqiqlSKlt5F5usmPnnXfsPbvmuNtWZEZQWAMBZwbB4a63sivzaZEqNaXMVjpNoVWCoCXUDFIcMw+EwzDsdswsp9O4TOM4zvO8ktYNTAWkgBSUgqWAlFpqleq0+sOOCRwDEwgBkrEjMocWwARUmKBzPhCRQdcPLyRlKdP4cDp9L3XX+cjkCd32FItoqblK8wBvDlEbLNgmwGZOhtBW25alZgArRaVWUWkO2cTUCuWkllV8vv26rbRghaaF19YsI0L2DStAVWhDFcHIRB03ynDK8zg9TMuz6ouSsguhk2Kx+1xzBWBVWTYABF3vJ/zxjUO7OwZZZaz5XPOETIZTWU5SPpl8APke9ZPDo+MUGPrgdtHfDv3Nrtvf9MPQdUPwEZlhBWPXkbzmjRvbfI00W5FAiwcvH60p5WWZW3wNYKqNQYfcEni2gvlt4KraCs9UqbWkXFPK85zGliUa1zDmfD7N85JzqaVUqQbQ9z0BEgEjklvP7HKeQAToVv1owFbOF7zru66p+HddOBwOP/vZ17f39+z8tCyPz0cf/G43dMFBm/UAioIAc+i63W53c4geEahWNRExkVU9uSCpD8RNYcOkmbeoNfLZWrjJK47lANiADBDQANR7pwal1GXJhIgYGpndTL13e+f3+1sfvNSGi5xNLcbIzCKl9ceuH969/+rrn/3i7bt3w9CbqdQ6Tss4LheRqFrr8+nTx4c/1DKn5TB0fd/3setjjBzJNQcE55zn6H0fQ+q7lIbmBJRLqbWUXHMqKS3zNE/j6Xx8Hk/Pp+PzeD7N5/Myn2uZmTmnPE1Tmuc0z6Iwtyq3eZaSTVv+lZEJqSVrqeacUhWbCNG7Q4t90QJxiHG/v3t39+bt3f3Nbv9S+v7jFkK4ubnZ7XYrRVzrGhe3Vqs0yw5rRjzk2Dnv16v23vvAzjE5asr/jr137FbnVwBq/mqwbfb/ycH3n9oMoMbOffnNl998/MXdr3//6ZxzrUXEG7KRGalRMTJARkQwKmZQl1wWMJ0pEqBIBUzgCjpBVxCLCZlEk6DiTBUxMxVyBYIBoyqCoSlC42KsHJG2T/FgbMomK9aB1iZORTYKRkpe0ZyktJzPyzRpfVFTvSzPiOi9c47NQt/3u91OtqarOjG0KKoVETu/etkScis8QmQkICRiB2shdIM2dSPArg/iemlv4MEFGUHELUL4CSxkg23sn5tQP+Od/PRPf/z9H+sqV6/bFol9Hhz/seP/z2hmIFVrkZyl8c2bUAKSI1ZiY6daiojkZZGSEc055x0zMSJq01NtKXIAaykLZnKe2LfzXzExXWWUwVTry/O2JsuLCD9iFIkZghkguUAuELOZ1ZTak6m1SKla9TrrsKoir/pMANQc4NtOeqstNwNE552PwXmHBLVmMeDSQ/IqRUFBEmpCrd45UvVMqY/MtIyn5XycT8/TPG8VYEQAZEJaQSpIgZK1lCoVvGsLIdiG0zQ0qHVc58QHMDNwplaq+tjhtr6bGUAByIgR2RM1BxFUUGuOiSZttUfk1c28ObczEZFjBDIwQwJmoyarLVZLBa0gAqvR+0ZbZdQNCWqsyot5HrSqEUAgWD26iVZcTkVrITQTAC3YMBJdrM5Wl2sSuvMuVFdbYp5d20QYADTTW90c4bY+iWuq96pDaHsDgppVgEntUeoPWnegj2A/yPRDnr7X/An12XPh3h128WY/3O76m11/08dd72PvnHdEK2JiCERE3HZK7NxGJVu7UJsoEPFaYEmqnI6nh0/PxNiEobbqqZeO2wYmvEwK695B1QBW6Xfvw263zzk1e5qWaZqaSup4nqfx/u5GUjEwsEagxXZD15oQXmPNtRqzdSgmx857DsEPfdzv93dvbu/evEHHc8p2Ojt2Zna378E7aAQWMNe591+9dRHRS0mziJ6OZyISldPpvCxzrTkGur19451fIUSEUsoyT+fxfD6dSynMLgzD3e1d3++IPSJKm79MYhfMaMnFOZ9ItKbTaTqe5px1vz/s9ocvvvpmt9+b6fH4/Iff/XY6ndmhiNaqSNj1u/3N3d39++HwBiguBUStFiuVsr6oqarmcfrh6fg7sCR1yWm3pF3wS8u++eCcd84TEQ9dPAxDm7xyydM8ncfpPE7TMrMr5IOPw7A/3L55m5Y5TedxPE+n4/n5eHp+TKnkrKen03g6ed8VkePT0+PHD8enh5Jms6Q6ApqDgcAjOjBCQzb0bkdMRN32BIE5xL7vhy72zJwRM+BFfbj1oFdb5Kbrv21CzVrJRCOLNh30FU1FxOaFSsir0w8Rw8XBrvXJVp+4QgUKDa3eZqh/weL00+3H22RVTXkG1Ps391//6hc/+4vvj1keHh7SeYbaIGBUIwAGYGx1HVoNyIiq6bnajMDAAFDQCXjBoIBM1WkJCl5blILVqCgLupZjZxDCSmDXS0vTnDTCQlYAWsqv4fVGRExI6FA9lYBa5qffffvr3337TUrL9eW1oJ2IQuDPch8rT8WMmbzzAJhLUdXVVXRFWcDMsOnTm5haKRtqvc7grIobsLJtTre7ahfq4jV2Ai8/vX4EF9SEELeAdWtb37Krx3p51yUD9BkKAs33+Z97+ldN23R6ifyudrAv8+R1u76uS5rm6mQ/Z/ZcjmTQtIEbqL8GAm3fx+QEWdRqzTUlAMUQCDwHJtrsYKWuBGmgphCiss1h1tgNaiLQik1UrYpIVlVEatG3GkATkQFAJgCsqloEsPE8vPMBEa2WsswigkhVVEppA/kyXnLJy7I4JtXmskToGIleFiQAMSMRXG2uUFWlJBTBsoBjyVC1SplMEpkU4jpPdTqV0xMSLksax3E+nUrJREzsnPfk2oNR1GJaQEujfCA5ch6QRRRLRViXyG1VQwSHqEYILmIQvKq0YqZh6G4Ow36/D7EjZFSy2gwESdQFL6qqaEiIrolLGRN779m1bbuKVgBBVMcUPGpFLQAVGJmIHfk1B85AHEShFs1FahVEC4HcmhLUjYZrxNQSERUErIAWMmFAZ0YgBERkgaxjDQ3M2ZprYJGaAqhz5Pxa2dIenW0hjMFWFv6Tm4c2FwBW0BH0Byn7uigTAXyn6QOVT6yjw9r7cOj7u/3+9vZwd9jd7OKuc11Eds0DvQUT0Ap32RE7dttfZmK+MN5+PGKryOk0PT4eHVPTEvgjQQwAwGZWYBdAFVbhZmo7OWZqCfVd2eec5mma5mme9mlJUrKuepegYqLNzx7XUd1El9esFm2J9iYT4mIMXReatF0/RAOcl5SrEjI7qq/Jd0S03w9IWjVN5xOKTnNCOpWaj8fTPM9g0nXu/v7Qx950VY3POY+eALTkhGiNSXNzc7PbHdg7QhRrm3shQgDLORfvwWxWeX4+T9MiasPQ7/eHw+Gw3+8RiYjncURDkQRQnHM+hBj7+7fvY78Dcqmo5SrNp7xKKnahK5jVIsdUPk2LgZVal1Jz34no0DAlJ9WLc96BGTlk53wI3rvgfRe6vhumZZlTXnIpDQ+rpeRU0rIs0zKO59Pp+Pg0ns/TNCPRNI6IlGt9fnx4+PT98/OnnGeDLDaZaHNbQ/JmrEoIiNashAsAKOg66hywU6KqkNQyfE4Ce9Wuy0vw9Xq09mfDTaAMsFnnImDbBiJeFj+DRrIAE137K1266H+OZmbmHB/i4cuvvvyLv/zzORdgeybgEiy1FCKaEgAqQl09sKwCKGA2IFunD7WWtTMA4DZRNIwYoBpUhUptDUNq7sioCG3ItN0YrIK0hEYr1nqheyACMDCZg+LRHKKU48On7z59+lBKudx23yT9Y6t55ktB1kWT7fKwHHsDSymLCOLKNJVVwNbAFFXByECb1Uur7Ftjqu3GwUuw8fLKpf3/CmBsEcx13InwR0PYPxJ1/Oi3Xkex18f/n3fCuNIhGl/X8KfOwKxpFBWCljdT0yoiupJlck2LlKy1ihQwqFSJSN1aUtTWOdmqvKiBLqXoFj4aGKzbfF37XUu8OsfWBhtcwiyAbevLDMSACOzI+Qa5mYqUomZEXMVqLfra/VEa061WWpkNhGBIZNf5ezNTRbUmZiEiKoVNoC6WUUVFSi1zC2IUSdMi85hPz4hYSs0ll5RMVZDMOdSAwi3AMimgtaVR2Hsfo48dOy9qUCsgt3B8TU+2bTU4IDD27e+lixBR3/lh6Lre++ABEBSVFJXIkI1bmCsmSEBr1GKE5Bw1u1gzKBVVAQHYYYgeHFoBxw3RZEbHxMzADtk5AxTRnGvOBUB94JV9qaaipgYIyOgIDaxUro6jdyDKCNH7PnrvGRAIvJX+ZvcKHXeiWqWmvBQpHCj2Xqq0PY0BIJKuCAw0rm+bdF7p/rZZCBAIKsAZ7LeaaoFPVpHoEesUfd0NvOt2h93hsHuzP9zsdrthF/ueQzR2FaCaidma/WnwRfDsHbNHduyd86Hh7+wbNvO5zLWITuNyPI6OuWkEQBNXvkw9l3V17XLrqW+7mZana+vGVVobwDke9kPs4/3djdbVNUVFVaHKJtBw2c5srmltl7JOo8xE5LgJH6xRGSJWEZ0TYAbALvornw4zNVAjwj52X7x5dw7x9PycU5LTuCzT0/PTNE/M1HXxsOv7rm/4qYE5T0RGZIhWanHO73b7/X7XD13zMF63nM0QTatWLJlroXmej8+nJWUzbJP+dD5LrcS+lBpjv9uX8ayq5pzv+93dm7dv3n0BzOO0XKhnolpKXdKLdxKiscvA5yyIqYjOBhK6jnjnPLNzZlBKzbm0zTQzrcqnXXd7c3Nze1dFlpyneT6P4zhOyzQlpuKcD3EYDvvb+zdv38/jeDwecynTMo/zVGs+Pj18+vDd8emT1ARQARa1nMsZgBE9gDfziE7UbUlAQDKHhBAViwJWxVyxlFcE0h+3H6t3XLas9noCVTUA2TYE+FMAigHgFmFf+u5l+/2/ZEMk57oYBwz0xVv913/952YiMnkWyZKmPJ4nTbVlU4qINWHyFRQGABLAZiPT/GvMqiECWFOGbHh0E+gyE7C8hSxmBrIlqdexSIgGpoqbpu3GgoNmy+qseqgOyDOZ4ZSex+Uktq4xzLzb7e7v70LYzHURRWqDTwBwFbRUqFVa+NJ8G1oQKRsDdH2aZrgNXriCcy6gxQWAuX5+l+mCXoH2P77t625Hr8SnX01krwGT9V2AlyF2eQVebyd/8mQu538NCLXTBLCLPg0iXt59jbj81Ct4ucatQqQdypDsurdDi2BqlZIqoxFprSpVSpJapFYtWUstZSlpVqntIxqSwrU2ppSZbcsRtGFjKtY+9/JaMyBZ6bqIzAAOrs55i4dpO30kanVTCEy08ihX9SaSlUYopajUH2vn4LYDaSB8Wx0R4PKLLYlWquRcaqlggqpaF8SWz6xQE2pGU1OrkGVeMq34Vpv5sZlKai21VEaHiCpSyyrKStzELuOwczGqghWBZn4Bax5GpXUrMiAFEmRBuohDEmIILkQmBsMKQIqgrEaASLxSNcxUYK17b3MQtC2MY1ZTqU1phQic5469I4ygEHx0yKZGQI6RPdG6AKKqlJxFioIAIJMDo1LUxFptE60SIM3LyEwVTZnA+xbzmIjf9e79uzfNV641J9ryAwVQQmQwXws1n08FUGhBTPOhbB4D8Nmoa6GHYdOyJUE6OWRnyatznF0PXYg3fXcYDof+Ztfddd2+64YY2HtgLi1+BBMAQ2AiWN2ksEkwYNNIdRfrPW65ws9WFlWdpuV0HJ1j2tSMtPX2dp66QUkIhFcjDS5BzOXKEDeFSkAEUEAgQiYP7M23VAFoK7i7mipWng4Zbg7lFzym7VmQDJHMoFap1TCv65Ma7If+FTXPoNHZmHnY7ZBQRJFIakHyMfaNXt918eb2LoSgqioNUBUpNeW7+/tZVNi5LnRdN4QQiF5CNwRT01oqIDnvW5nFbt+o1LTb7fu+996z87yW2vJht5sON6rCTP2wv71/sz/cxq5v4mCwDhFUeg2QoyEJUjabqyIUDSEAFueh66Pj0GydREXFzFSU2sxAyGBA7BixC54RguddF9PQp5SXXFIupbY6s5Lmqd8fxmlcliUts1mL51zXD2a+FS02k2EzNS0GgigIjBCoJXkcs/cx+m6Iu30/7Lqui861+qZ/ap96DelfhTJ2vQx8/pvrlPDZwgPbe/8zQS+fNUIKzJ33eLPXb97fL+f34+O7nnIuMo3T8yPM82JiKlWqiqIay5YxVyAwYiAyYBEDM2oFAMiATYgQEJVh4Ka7hw4xAnJTygIjsjXQ3+rP0bXs2apLhvCy42UwMnVE3rtud3O4f3v37tZ5t906bOzoVojOzG1Bsk3MrazlHqYqzRX8wra+hAuXgAYACHjbWr8kbj4LSi5U65+EMV6HIv9pEWirZHuFlFyFs+vvXPrQJdT9T8ZUaBW+/vzMAX4yN/Sq4eU32z8vZKDPmopIrVILIKpUKUVykpqkFK1FahWpAECOoUUWiK1QqF2MtdjDNmLG5am8EI4AAAgZNhwUt8OssMt2Mav2Gr6EMitzoyH73CZHvMA/qk3Q9Z+5C3gF5q83UEGtlRPANuJVa0Jr5UYCUkArXMwordSWd2vMuTVKQjNTEZRGDbukXZCc910Xh8HHjl0wIBNT1Hbtrcdc0hUIhuyMnCFfnzeSAaloEVEAVgAFNbzQjhjRABXRjOBqmIgZmrVx16zdGYDNyIDIMSHFGJyR1IqGzOQdE5P35B0ionZWq+WqokYEaOjZga7wwVUHtO3eKqGt5S4mjOQo9NHzVQd1IlVMEI09dj0zh2YI1gTtxFABZDWr1jWoWXcGa0c3AEAyYiAkNMcEzlXva+xj19+Hoeu6uO+6IXYddx0NgUNAx4gE0hYWkApojIBrgLLlkwFWYIY3sY3V0AMR1yqHq9Eyz/P5fG5RzrpgtAjXfrxg2GUncz3DXJgcl30Y4Pb7qwMh4MWsBEDxZRpBwtVPEmADYrb/wwV73XbqjfHd/AwBzWA6HLS+9k0EEG1MXAqxu3/zdrfbp7TUku/evAVC71aNrcvMhZurR9sZtChq03Jd9Xw2EBgAEFoNZLNVqtXUDKHp0hIxNq4iXZJ3bY1a07XOe2LnyMEWqJuBqOZSd7v4miSOgIBsiGKYATJhCRH3+4E5nsezqBCY8xx8XGHVXFN6ansTIgzB9313s9+FuxtQyLmepvl4no7jeZpmoBkIXYy725uS8zJN5/NTCD76fpmnKllqkRbv5FxKqWu2uy2T4Dz5zodu6IbDsNsN+91ud+h3+xB7I9/F3cXF+ifbq6XucpteetTn68SrZeZ6v/tPfMZ/hmaACt4wOILe5YPHLw7xr79587aXWvM0np4eYB7Zam35gGpWzao06XWogmKEAijGpaAZEBpBRSTCyC44Yk8UCBx6xo4gEgQiR0Se3ep920hj5IN3zBwYEXW1glNrUl+IjhnATIoRk++7my/uvvrLv/ybv93t9y+XYyvCuHqr1ppzEllBMr2KCS4hC8C6KF566qUevhF4fxy+XEgk1y++4CVX2aXLoa7v+QUR+eeezR8JgF4xEa/mij8ecVxjMNeH3872VTqsIY6Iighm9BIwvL7Mq1eubQrW3OePrqNtrxS3iGkjwq5VgsRs5mELy9aAggg2Id1trVmfF9JGNF1nXYCGsmzMWlylNz6/J4jYSrRfYsKXPCGtoNSmebYFafZyTQYrxckMDBpPawXeCVomyy4BeCvHJofkFEFVay24ai0IqEDbcYKtziWvp4w2ka+L+MarMiRjQEIO0feD73rnA5FrqPtLIItATKCwmlAosQKyALpLHYyCVi2pLCQCxEBsL+k5QESqa8UbITTl0kZoBzMmRBLGVcoHwQFSKhVEVKX5hAmgLMnMHLlanQ8MiiDAbIRCWMmSiNaaERxhQGQ12T7B1m5jxgjBtSLkFgg2UamsJb8i9hZJosVQ2LW0F1lFrSpqqrCaJBmoIgKDwsW0Bi6DltoCTkDIhM5R8C6GELu+74ZD2A1d33UxRucDOg/EoGgFDUxKtQyWQCtS67ttKmqGF+tEsCY/Wg6npVPFAFThujRRRKZpOp1ODatZH/w/EcRc+sxn/ef1sFyDmC1nBgBoL+moLXxEAFtTpGsFPa6j8FUM06YbQ0BgQES0dZMgam9ub9qcu50J2qpk2qLv0K1eyFFVG+nZsQOEpuPX9h4r7sOOmpZOw02bhZoJ6lU01SiFgKKaa3PltZV63vRUoMEZF7ifmIidJ8fECAhqZrqWOF5um6gyYwyv3F/XKm+sikVBUzmN80McY4zRuy7lZclJamHnmPZMbsVRa212soS0JM6liEoXo/ceiboYDYm962K3pD6XLLXWWkrOSz+Ezvf9/va25JyrlJpzLU3eMOWcm+/VWsOP5Lz30Xf9ru9v+t2+2/V9twuxJ+erAnP4bHESEfoXrkb/xbSr8pzLaaMZm5FJ0bzo/Ojq010s4QCiliLcMaU9g5gKiFE1qGaiKpVrtaooSqCI1bg6BgNCICiESBgcekcuMHskh56sQw2oDoGJqJVkUatfbl4fK3EMgcSg2fo0sS9CcERmWqoqOYrD7u2b97/4xZfffNMo1QCgqm0DE0IgQlVtoYxcaWPAK2IsMtNl6bis1NcIzeVdWxDQvn8Jhq6L3s22FAMYIrZle0NTYNvoEW40qS2uglbM1iSFLh/X+lgTlLTLx9tPQB/X3//4K1xNeradPVwCI1w5HJ9d6RYc6MteZ92tXm/56OWoAADARE0c6bos2cxqKTlnR2SOV60kXX2DFai5GIExrOHIGrcYoJFtsQpdHlJza9him6vlH1caAFzikM9G5yVcQTTYOGrQtq0vV3fRIiyl5lJrfXUtOZcl5eA9EaEB8YarqymAKJohKoCIFslVq0FT2sWqigWp6ko/ly0nC9dbvQsC0TbIYG3C3kRxm1wgAbpQyRXgLABVgQjQVhwF27ytCtZEjcVADKpAvRr6ZpZrTSWzA2ymDM1Ity2Va/UQIBhtZXersK8ZKVQr1BRz2iMRtVybKoonEskeiWoFtQLEhUNhcVDJvMMQ0EC1JKmihggOUBB4ZVDbSuZuTl6IqICoaKBmjZKUa8klz9cgmcsllZoVKrKiqQMwBCFCMVUlXYXLm1LzpmENuNYV05q7JiQ2InSEwXMIYYjxJva7EKOjzmt04hkZCQyLWVFZtJhpVqsGFQCIHXjHTY0GkEhUQQUaTEMMFYAIqCFXwI5NqRnZtytR1XGcnp9PwXtmWjNghM2a/VIm8C9ttmo8vB4I7do/nz7W7xvKsiIx69dNWfUFb7W1JGF9vfWSWuX85v7CiYFL9hIJLwQKUGLX9a4h8FcfbWYtaGrB5Ra/A2gzOm8Wm+DhVQAKAGgGRBbYNfZpC+5eJgfcJvqV+GOGJiobhw8NoGX12pTQ9k5ND+qaBilFSiqopARmKCKllvP0dDx96rs9gKu1LmkmpMNw08UBybeuxZE9d1JtnNPD8VR+uzBCNwyH/f7mcNP3w7u7W7u7K7XMKU3jKlTo2Ie+q7dqCiJatdZSWu/PecVjmh6dqrZnwY6DD87HphninGfHBiT1JWHYbnQpJaVkKrRRV/65fvTSc/7/2KpIq/t+PQpIqhabxucPj9///fPHb8tyojqRJYdL7BcNpcHT1VQB21SA5szAgA24WXEyKIG1RLAgGsK6QSVEMgRDqySGag1yxUa9v4T1hiYs5kGjGYs0hy9VVTBANDATrblU4+gGcrtM4PyVamet9enp6Ycffogx+i3HpBvi0vBIMxDZkF/YksVre9n9tX9uC9wL26m9oqorRr3NJxe0BmDdRwK8wDDXh12xihdUSAGg1LrMy7KkC+/KzKpIKcUABHGd0e2q2uKq/Rj/w63BBRAygystmcsx1leufvrjI69zBV1f4zrLXV5p9oqEWEVyzjmXyy5ZVZrAE5p579eLV9Fmb2Oblnj7RG3zF63xYEtW2Mqd2mYlApRt9no5Q7oKp7bjbaaPK6yxAuNAzc9X7XKAJii9QvvrnSyljPO8pHRJ8VeV8zQ9n0ZErKrsHTnmyhvqDarUNBS1iqacpjwXWapaNSiSZKMq2MZtugovtzi63ds20yKAQaPrMW0BGCGCmKtF61JsShECO0NiBTVcxyiKmdgqhC0wL2VZcs7VXvqYppxTyZEcsWtAh6iYKa6yICsjghAu7hkrY1hEtCKQ95GgiEDzr2yRd2BaYtwFP3jPBrkImGUij8Zo0dNuiEiQSy6mAAhQBTIYtgfS9tzY8hNmBlYEZNVBFCSttczLvCyj6ou8gvtv/uv/ppY0np9KnlUqiDadepV1tG09aA1i1uVrxe8Q1lJiIFIicEjesfM+er/zMTrHxJ7Z4eotAoSVzIiMvQLWlVaCiOzZNYDZrUoAzE131Ttm75rUiQ/eh+Z4B4a3+/3FNDGE8POffSFSvXNEBLBWOunL8PtP2zrjjwb2j1//yf1Q20q0ItKXIOb1Iejl9XV6/fLLNyH47VrcF1+8AQRippW/Y6uc8yabsyHoL/2/4aLQhomukyy/1ld8OeercbRFLbjGe5dh30QKNli4zaZwYXu0IOxlpwlNZ1pE37y5DWFdS/pu+Ktf/Ssw8M4jcTs8oYuhG4bbLg7ORVVLaUbAXbeLYXA+EHtAQnbMsVQdz/M4jvP5ZKZd3x8O+/u728P+MPQ9sVeVJedpmuZ5XpZUam3QLrRt0urhXESqVK2ybhhVq6ohGgAR4krLWB1wkAhVoVT51c++6uO6448hfv31V6bmHOP/qoIYVam1fv3113G7Fmbe73cI2QGzLendNzE4bRYTUMgqaIWNwS8roN16WNsi8ypdBUioa60GkiHaC2IJAAamoGK1mlTVaqawMtUav24tIEB0hMGAZGVW2rZ+rIhsETHyrru/ffvL91/88nB4w7yOlxjjL37x85xz48Q0ANQuaxRig3gbSNC2CrwRxAC2GAbgcwxGV30s2JZtXXe5L0vm6yDmVdhiVw1ext0LmAMAIpJz/lf/6q+HoW8f2sX4y1/8oj0g/CyI+VG7DiZgO5/Pg5iLau0all3tj2BlCl42YOvt2L5/lSq9DmKuXmlBDAKqSq3yq1/+suu69hH73e6//dt/YwB9jM757QTWNRHWKppLkNhWadq8EH8qS3a92F8FMddPZJva7PX7tszh5Za+hELYSlNfnilArXXOy3/7t/9mv9ut19J3//Zv/hwQD7shxsDOtbkYtzRaC2IMzKpqqWWZ83TM40mXs0nesmKwXdzFEPQn8DNYUSVcUxzcFOcaOQDJOXDedbt+dxO6gdkjkUFDxRXQUAG0pXdUFHKu45z+m7/+1X5Yn0vf7f7yL/51YymQ8w16b7LJWxCzMjmbsoJdYnNTES1SAND7QMBSW3nIirI5pD64IYRdCIyYUzFVRmIwAguO+j4SYZYq6zrSYjZQNbiQMF56tWGDh1QAlBiq1JTSN7/8y9D1Lw/37//h781WO/Ztv33dBy594Rq8e+lHl39ctt4tTbFK6uLWeeBybtu8iLROYbZ1sqswYP3X9vV1u4gToGPuYmQiAMi5PD4d05LWW7N1yOug93+htg51+CcCnZfv/gWhEICZxRhubw9tH1mrjOOcS/3xmy936XIOrz9ye9227ST8OIb6567q9Tna1X8/tSi/eke7Fu/dMHStxCnn9Onxw7RM1/LKAGv5OW2eeW2q5S3VfEGMEWmV2RQREQAjapGtb0EurnUt1uTIdBtyL+dzSWVfrVUvoPrri73EZ7h2TutCuL87BO8AoJRyPB5TzvjHHut/qa1dcYzh5uamEftVNaUiUglEJJU0S81bUNFcGeH1bnFrLyHv1rMusNtP35V1U3VBKl7/bDsq4ib7DdtX2zbH6/sBEMk734VuF2IfQtfW5pTyp0+f5mXZeBQvZ3LBJ172MduieR3D/FO37uo4ti2W+Nnrl7v8owO+fOJVUub6Lao6DMMXX7yPMQJALuXp6WlZlp/oYj8+Tfz84/DlYfwzl4aX0QGv33N15H9Je52Psi52rUwMAOZl+d0fvj+P05UMwcszvZpVrj725al8dg5/bDvw41f+yCXj5ZIvT+Ty9s8P0uK//W742ddf9l0HAPOSfvfDp9O8NGvDdSLG67Bkw3IugewqDy2rouDnH/Fy/v/UMnLdny8QQosy+GX+XA/ZfmsbM2u0pCaq+7772Rf3bT+W8vLpU5uTaV2fr7rK5YReY/0v37VQ+BKCbMN7/Xwi4mahc9GYaHArACJy07XY3vFyV64ed7upL+ezfme4UmM0dsPt/Xsf4utf+FP7U/tT+1P7U/tT+1P7U/tfVftnLFf+1P7U/tT+1P7U/tT+1P7U/sts7uMpATTEd+XUgxkiuqYv09ROAADw+hu4hoHMNlLn9voGfdnK4bet0vgCduLlzfBjLA8uDDu45HdhQ+lwPZUV76JNr8kApNGQL/nRC1x5+bJiWC8HhM8u5eUULl+2ry9HvCRtX5/09Uv202ms1++yqyODd7TvyDFCq7Sap1LLilpesPHr766P9JPZjR+/iK//90/kmV7O/nXa60fXcQHHL7A/gDFxFwOvaXioegXfwisE8Z9ua7Zxq8ywDbjUq/wQbPDj5eCXWsvLp10lsnA7lG2ybXYhTV6nmGyFlC06vNs57xriCrJlpK4v5I+06/tr/8zv/rHLh5d7e3l1+3P96k99Nlz6zssTd/RC4rwk17YP2r55ScO9+ohLd70kfK7PZDvmdo4veZuXH/yxlMAfS1CabeV5uE5Fl5yMI4xEvPFVl1xX5+2rVO5VmgTh9fP4Yw/vf+E04aub8+MpY32+xBi9aySrLPq05KU2tTK7vp9bTuzqPF89op9KrFwLWV1+5fpEPk+74cqre0kCXpJnL8my1/h9Oys1MzXrvX9/2EXnAGDJ9YdP47SUa+GozzrzH71xn53cVcbr1Zs+699//Ga8PuL6ri2JfPmUplC5Lmt9dG9u++AZWh9bkoi8LGQrF/j16UCr2F7/XPJ9urXP7t5nFIqX57kxKWBVHGgU1R8X2v5Eu85bbtOkeu93u51zDACl6tN5SUV+PCquLgQ+m7XsjwyZH7U/kvX8cd/759/4+kdrtYp1wd3uu+BWOqz7f/3dRwBAs1ryPI05JZHqfbi7v98NuxC8c6t4EDffl+Z9CIgAjNCkw5AseHZMCNAIc0RIyLXWtMxqGmPnXDNL3MqaAACtcYy3pJ5hqxJTLbmuPFZm5xwAXsqrycA5h8wA6Jj66NrRROFUYamm2qrJsNmD2Oo+CwJoigpotsqCbn2CYK1nvuZNAFzKywHUUA3Wyi/Tl25kG5HWtDnJb6XRbc1rdiHt7iNutNl1hmhVa7CW572/Df/2z4e7HQPANE///n/6u4enB9dsPRuBEpoE4DaZXxrA9T8vycVmWn89SvUShFIri/8J4t7KS9AteXyhcgNA01IEsE2dUtcaELhwFUVVRG92w599/dWu79bnMkEusHGjXuA/xSv+3o8m++bPJgJaQRSkWqkgqiJWVHMtjZwLBrzxp9eYr+k3wFrA3hTGmQkQxEgUqlipmrLVaqpW1UStVsvSIpuVFV1Fllx/du//j//1zfsbDwBqNiUpnwdlP2pXwX67XNy0m376168mncs9MNgks1qh3ct0tP4Ra5WDV+Zm24zZPotXmvJKMDKA6OnQeX+RjlKt218FELUte09ruKYXgjgpgMDqDteee3Oa3gS7Wsi1ereJqYG94gJYIz2+3LbLsvlyC66f/3atAMDkvGfvXfO4RbCqduvcL4dh5xgAlly//cOn0zgjrbb32ELXjUO3xrHXP/rR09hO5H8OOE3Xq+d6vQgAGzn5ZeV5pf68chZtN4Sv39/00QPA05L/H//w3T88nlkBZb2TBgJg2Jib66DZWJfbE9r62/qsW5C5dR5YSwmuTnR9GlsnbVZARI7JAaJIKyMqZkKX+cGarL/UKquV5noZUqQUKbmUv/7i3f/1f/u/+fn9LQB8+DT+3/7v/8O/+/XHLjjmtq/TracYAFx69tVjgJdXWuEQ0np+AK2z23UH2lb7l+32y+1/eaiw1WFddGLaBp0bt2QVaAbc9jymWqv9xc/u/i//h7/66t0eAJYl/fbb359Oo3NM7Q/RWvRqL8qHTaK9iWGEEBqjvNaaUlqWZVmWViF4eWTYHKTYsWNaCbYK1swDyTGbQalSckkpNQPr9tDhpcdd3byr2d5WU6mmPFHevXv7t3/7r29vbwDg6bz8P//fv/32h6NjIlprYbcgb+1FBAam2w/BAA1Ity3hy5O69AEAWDW4DWAzKfoRrcdWBOBlytv0aC/7qeuOcNkOrYGymOUqv/ji7v/0b//si/uVc+2+f0rtg0tazqfTPI055y7G2cJddV0P3jkCIEJmdESM6BgZidYgpkrNRNapD45bRdg2IDBXmVJWUQP2BkTEZIzU6isNTEQUdBVkQWwailI1laKqTOwAkQwAapVaqkptQrgMBG0V3K5aARaBsTQBvTWIMQUTBRFTE2A1ljUcuTzmbUd5WSNa1ba0yAfaUiHrwtZCabkQFE0vBveqWtuEri2OATBQUNBttjDYzKG2IKbJ4VSDqigK/9UvOgAGgFrr49PjDx9+8M7zKmhHhMREbrUOXKfn7bvt31dByasgpsnFXAUx7bdp/f1L5QK0E2gmm7bqdLwKYnQLYppJia6iiCuHXUSqCCDUrVxcFMbZ5gRI0FQxcT0k6Ko8/3q/f+m4CGa4BjECVaxkqy2CEV2qlqoqamZ8CerWear1QF5FcwiJAdEUrEj7C7nokqQUE7WqIGq1QhGQNYiBJkwypYJmuVz2oCBqpclZ2x9Nxl7od+0rAfBmgHGFR7z8+ra0fI5oVAUBaLoaL5KZ+v/l7k/aJEmSLEGMFmYWEVUzc/eIyK2rqmfQg8EFt/n/dxyA78MFHz40ptHd1dVZnZER4e62qIoIMy04ELOomkdUTdWgT5D09DA3U9OFhZdHjx496i2VxeNAGKN5i3QdAQiQCZMhUS9ICefCd3Shu7o31WYm5mFnF8SpOai56QAiTn0VgKubgKu7uDULT/jecK3b+Vo40nRBYwfQvwIxd9aLHQVg3+D8xlEAEFFOnsCSWyJycHATA53Kn+a+XlTt9bp9ebne1ZbB6C4/gM07qN/B//2I0/8+ENPfPt5u4F3E2Ynou/D5G1duM1d1RNQBbjbRv/96+X/++DVpBzGxwgAMyUccAYQjDDs8+W9ny41hi6qRju0PG9Ej2r8BYCJkxESYiRIAaJTvaXUIEIPufWM1UxXxW42ruYUXU11rBbH1/1zjB5et/b/+08//1//Hn09LySncp9TAAUZj9g5ivjXGHQZgSFECdwdiYODPbrE+7iox4ehY3YMGAL8HMUdQBrH1Efc2P1FNEDW2vQQX3KVZYdxrb22hqq+vly9fnnuzZO6BZQx0VLIjAhGllEop0zQvi6WU3L21tq7b9Xp5e7vUfdfR3OiI0nMuOSXsXajM3blb1JM71Nr2vW7buu3RZv6+m/p7BHgXPh1wMzw+AbC1/ln2pn/+6fU//NcvJSxv6B7EEICjE4GhhxNUzBYyIAvnkNvMv2euYYAYgNumfg9iAABGQ7jDzMfJHcEwZkV0CQW43eb3IEbMtioU9nrjSuelIACh1+RgE7tsoCXTknGeaJk4JyaAgB2MzkyJAscgoZuiOBBCSVQyc/QeiIpJJnBqiRWBCQg9hislSsSI4K7VAdwoaqyJiCh8ppkQAVMA1JTAXSXspImJSkqcEgBy351idEANxMCMzd2d3ACMQB1ac3PH5EDu7DC87iysFmm0fB+Axrrl/7GhBt72o9TfDrpgvLb1kPnYIKI6046go+8qHbnHc8euYAZqd7sRACJGz8icc+JEd9cBYgj/ueuYyh2mhHUR9jJGiHUMt+4wcFsNNyYmzMbobjKZk91GyBCAzK03ZosmcICIiRlHAbKZ71WuGxDB2C9g0GE39IjvXqVvu0EJRNkggwMNzxxEoJQyqSQPs6ZYUnG4q4EbM5dSYtKIgIg1kb21qqoOolabSXMNlInkQImYCYfxEyYigrRM+UjAIAITJQ527TcaC4zVfVu0AWIScj+fDzLuForRb4EYcADukn738ZzubmqKIoZopsMH6MivjjcASMgDwvpoKI1+c3iLYy2e2+8mrwdwd1AARbDomQvuw9cQe7BlANZNN24f3PssI0cL9zsYwT6OrMRYNvfzM/7u+5WBe5wHOaVcSsmFGHuXt1ZVFYAq0/16ySmVkkYG/K6CEW/u2f8kiLkDAf96EHP7FHd5trHtDi+E4zsRHhz32B2InBPT3dHAQBk4ASB4Cvs0cwjL685oAfSw4TZ6R1uVwc2gu5OjgWMsIcUBYvrZH2QO9qNFABr67oBgjmBoCrojho0nHh310C1Fh6mBlQyMVFgVzab+fQAAIpgXPp3zack5kbuBk8Xu2e/4YGeOUeo7wN0ZGPcR+j0dyAGGI5zHecPUcVoPyWJM7iByPw7H/SLsDX0IkQkIesSTEPvuZf50no5Gg4SUcyol9Rr+Ps367jhSRbe9NLrrmY2ZnGM/T6oCGod1Rzw552kqAYuOIIe5pxfd3HJPJwBiazRAzAGexuH0LYjxwUUCAOacj/2KEKecljmVRETofcRuEzoqiDqI6eNGBhT7ZH+Svmz8wCoBSvxgn2M6dwe9sdQQoxv5OGyMXAkMQREAFQDAEA3I4c4nfawgVSOSudz2ZABIUw6yzslJMlsmbZATlAQl4ZQwzFfR3V0RMCEk4sLA1Ldk7+gk/nT3AUJgRGNkxigIPzY4wg6JVFWlqgpGKwGIPZV8dH5jihwWRrwZKIMQufNs3xhrggi0FuTKmAwG0Az26gaQMlDP9RzOC2YDSgRHHn3WLPpfuDkEm2P9qAxCNSyv7gLGnu5Uc4duORSu4H3yuQM4+phvMIBRzPJm0AxE74nmYK2Yfn39K0HM2MopDAtvIOb9Y+6myw07U8cLd8cjAABEaB/L1wkGw+sAFlaE90585tDU9uYpoQGq9YQjjMMwMlR4WwbWG4shhm1lf0kD7OtDGTEzoCMjqKEZdK9jQnQM6hMpXEt6TqpWq1U3ETFzAjuIUexbDmAfnh6hxgbCkPhdrgPv3AT+CQxzQIm+rRxDPEDCN0zMEGrhu98ev9sH+LYNE4LHe43+uQcdd6Cgvk9Tz5P6UGgdFi7H83ffdByGKB27QGfXFEChA/EO6kGDPncf7OSYNTesH4ge330uugMM75Rpx98ODo5u6E4EaezvOSciMDDRtre21ypmRCQ23X+WvjD+O4CYf+Km/lPXPYiJzeBdZIwHcru/OePHfVDfec25ozSSGg021RRc3RQBiBjvenxgXyj9Zfrgm+GBbgAJFK35aLBrAwoDdj68M3wWrQyjZU+nlt1VrSE6ECP1jmbxIuEQCzdOxdAUVbA1jB4U4+PnzNOcypQGiDHrZF8HMQjDl+4G9YJnuJEM3gncwCndl7a/QFdJdnK5O7eMZoeDj7nd2Thvw4Ccw6YVkbCDmMSUmHNCZiL389KFff3j3BqwjHeL3/Tu7ngirJ+jyzoR3Zs7v587v960ASByUt14oudrMXoasfV2s/heXvPNlgLHj3BkuOhODIcIibEw5p4WuY1Sf1OAPJqYDfaYYot8D2LsOB6GNgjtRpcdB8comoa+N1vv+xNeXogQtk4UWRdDMiCDO9ZHx7QFc4Nw1jg+bOoICJzAGJ3JGfufRJbIEwMBuLmIABpYZoKESATWwhFL0QFUgAL6x3ZNHnRKNDiFsUTAPfpGuO11u14urno6TdHnAcLcfiAeAoikFQAyoSGIm7sGDBKzQV/1odYKbeuN35w8bjXU3dcVgYgmQAAzU2jiZhAxTU/quB4HzEgzuVmnxA/vzkNjNdKS5EO/bCbu3m+SoYVgCG4RS/eCjr0L3dxNTdzEoAq2lu+Zwb6PWHT2uul1DAwcO3K4a6sbV5xc38zmcVrfHRcA9691IPdjOQxND8LgAOKXA8wZuAFGOskD6jkcDqXD4HG8AXAxFzd0jnEkjzZjfXYPa3MHjOnhGDcl7k9olRTMQKVbfsZ7Zgw+PCzee3CZymhQGxZn0EGqqps5RNtMJiBCJ1GvYq1JrapmDgKD6+m3wCxcrG9j5YZgobf6BhPcXXiMZGctYgrYQQLf/fhuAh/3oiMgMBjO9WOL74PKiHjby4P+HPMtNn0lBesYfLQYM4ud5LZdRfRJTuDm4mou3gNFA1SAnrRyBzeMEQFQV3GTYN8AwPGYCephdh6pp/tP12dXgBgfyCwgI3bhmiEYIyRK85RLLsTkbrXue5O1tRoW3YjoeHNu70OCYOgIaH07O4Z39Dc7Bvz+dLzdrl8dBO+u40h4f+/61fsZdAh+W0dw+9C3PeT+d2/c7O2F1NpFt2cyQrWmLQwaCRC5JEw4pGxkPRAHjLPZg6eLgy9gCriB7S5VtI1Med83bskl670Ow2BtIO3IfgsihFkpIhuiqbkrqoL3FIA7GJibNZFaW1vfDkt4JOIUbqfEibpYyxDAu4QgTjy+jWqsOXK/e7cDj8XpCh3RIDpCHAuRUsVBPFDvUI29t52P2T54CSKEnmgm4p50RyYMW9VSOCVihGXKh6VlPw5Ufbh9DtBEx0+P4T0mmKpGRklEoifDPfg41L7RoBQGrAkljbubqfQWzXr8ejxDODd+oxS+fwNxIWLY0B0cMEAcyoY9xz2m8VgmBEhICfsYR47A3HoGc0ScGIoZ0B6/DNCCTgeVhg5MlFJ4x8QWYeagEVu6eZ9FfQ4HJ2RwuMcfCNQhDovOOeM9dkwUSQFwAiUwiqaR6AzGoAzKQEzYCWY3ciSgTnWBmguBgoMrGjkBO6KbQWKARLdXO/ZbN7Omsl6v18vb5e0tEeX0cUoJyIPWjD+xiWNvRD1yaGZuOMI5e7fvGKiAVI9RcgIAY3fQZiYA6NIMWJVUQRTUoJ+1neW3EUcGiPFjigxTSf8WxPQ0BLqBmakr9F9HM7SbDgBjm4vXOfauoHSamRjUhk3eaf7+ucvHGYB49424499EBn3oEXzMhl8/0/uv3x2Wty9/9bT/wvcKAB4QgvvIxlTFLgDvlSd9XQVXjADH3IaY4ArmoAncsTh1ksBdDSp4f7QjEDF3jYgbNFFVdcXWvFYTFQN3chp77OAFR1zuw4wVAYGgJ2u/TdhD8A43LHAbvXdkit99eZdQ+Gbo7m8B3EKP/mwRA3zzW0zo0HNrI6EQv3ALUQFCm+XgvcDnYJZvLx1zg3qsxu4ECmodmHbhdjznffIzYH/XnnX/eAw329HyzDVCqkFG3N5/wJ3gy2IPxf4ykbzEnDmnlJgRIdrx1Lpv0jZRcQOgkKXbr4QUtxfBMXgDQv33ugaU+fYlfey33y6Su4feK6XiXXWa9pvXMPdWvW6u5GYuzU3AFJDILXFJqWc03F3Eo5dc+MGDG5qhm6v2yW3msplUk2qifdACBxJpcOwWIqaet7HOfTq6ORgRc1aCkpi8Lzo1aW6ti/ocDMwCxGxV9v2+r03QG8xEI/2HhMcKAgT6dm8ZB/NQz0eOOkSUMCjF6B/A6D09HWlbZAQOYX8HMXADMT3nGuNCRESMQBQ9MBCxgxjuTb2AcXDG/Tb33qLs/Jsg5td3EhHvMYeI/Lo6yYeFdPx9gJijE/sNxMT/Rneqo3nFb4KYA1Hdv4f798bomZzRjsE/UgQElAASYSJER8Meo+qIPUJhTmAEiq5MnjjGkQDIvAvOIwBNCefCKScm9CGYUyAzq+qiLhrttBCQ7KbZGrMQjiY5MMaH4H3ON3GnSQxcEARxgBg0ckWr6EDAQObkAM5kTBqdBY0UQTGqctSccoSEquLo4FNnB1BHCBZ8p71d1x//8t++fvna9u3hdHo4n87LCbx7/BLaQWsOHB5huh1q2Lh/ADcO3RxU3JqhK3gLigTQEQ0zmvoum6qCZ3VSRwHEUO2OOMkc5JC1eDAlPUy6AzFmXd7rI2EU3KordLmWA6pi6Gz8BmKGXMlvM1DBxbQpVMH7ZmPH3fqGZek0y69AzG1JjPD0mJr9MQcpanFWAP069/9uw4WR1xzC3nFIj2e+O0KOesNfLWPoIylmTT25krvD0WkiuNyoHiJgAmBMTFGBhd7rp4hGNG8OAAau6k1lb7bvujNmItGgAcDd1MHMRbRVbdVUQARUwdycosNIlKeROxr0jifYh67XZSE6RWPm92McvugWbNq7EfNjDL9R2h4/fServDsOf30oBvS3SPbfik3uHgGdGO+JoHgxPs5wdCczVAg4jQCDa7+hJNcoJgJE5sKMTN4agLpI72B4C3vBCVzNQnB0C/ZuMD2+DUNOPzYiejcQPcgPxqgHJq7uogyYcppLnpeJmUy07vveapMWhuJ2U4B7LM0jtByL5SgQIDri7uMakRyOw+z2ljrs/d+GPGOG9zPsuBt+nI/APQC71770O2YIkYXt8QyaO307VcABBbAaiKMbG4CTuSeGAn7KsJzKPM85JXff91r3fav7Xvet7aiaEN1dXUYJmXrbre0u4iJ9z+zzmzzcrvvuFMcDQggEw2sDKCEX4inlXAoSiZoI7ibNXC2k9WBg6i7aquytv0rcCuBuy30QxHRUcEMH6N+mU8MFP/QKABDqRgfsVRMABoQIBMrkmQ0ZHDJAQki9XIG+4czAvYs1QyscCkyMxkRHFipONQZzEXEDF6kHIPPom1b3ZOlYR0eNxYEY7lntABCxTgLEfLPDwwAxZob3n39gI1ULr/LeXFNubVyPqrRfy6LvmRiI9Hx/cH91QigMJTmB9Q3ej13F0Z3RE1BhYkBzisYCoxwy6gkcTQiEQDPRaSplypQzEru5q6k0NwX3nNPDaZ6nHHpDVVEzdWiq647rXi+77QDV0RAN2AERlMAxgE3PtmAkPZAg1Lj3SyZFuSWaa4hagozpfxRM0IgYAYxRI+GKjgRMCEOPE21OyAiB2RGlNXIzVWIcS9oADBjNTAHWff/y/Prl67NLI6LWVA3Ie4vscRnY6K8Ru5YZYpwcHj8CencSm7qJgu4A1brwgqKiRsyrNnVHMAWOrBvG+oOR5DcXdz3o4IPRNBiFYT6aSg0QY2aObtRBzPCpGSAGjvRBz2XiPU52AxczURAhkffE0v/fXAjEQCnK1o5IJBbLXaSDQXEjMTCFqBYJiRgjdQJ3W72qq2KttiWqYlVczdVDWtQFRkToFknbSJkDAFHu2WRRV3ULnDQKFnrDm/52ADDe829+pv738bZGJxSAQ0d4t43i3TZ9R3j9Boi5nwKIUYRyD2Juh+j46ohPb6mons5GN/SuJesb7LsZFiXW6mgAQEjMpbPCTuBRqxR92QDR3S0kZDDIGOiN/PpW883PAYaRwd053dNJ1iMa9Ig1GCETzSVSSGxum7Ra99pEVGLxGI5UtZnj+zKf375Fnei7Vxn+96Zm+uUQPWX0PlkHY1b0f8dtjAPIVNU6eUjJoRwPI8TCac5TQqCewDUzYbYp8TKl05yXOaec3YzB0JsqVFfTqk0o6n5HK2kzdRUww4PvD2Zu1I309NyoMo63aOZgFqJtZMgpzWWa5xkQ9333FrJV7emNAVqreVWXO4qMABJTZk5MiWhMeoBbIOYwYsT7iMD9ZkrSQYyBgSkBAJATu5Mp2U64EwLwqeebohKNusyIekiAcBTARV+7UdhJBOnoE0S9cSm+W2v9MrNWW63VzG71oTeEDNB7iVGM6Dc45mBi4I62+Za/cfc7MgbegxjVAAAHrXKU5t4+4/E0A+j0MypUxvcfJ6Fl1FGddhMhmCO6ERA5kRMjMrqhEwWIQRs2CkiW0BPCUujhlOdlTtNExGBuqtLYtbl5yenhVOY5JyYCUGUzVfMqvGZ8S07gWM3Em/c8FSMSOB+8BwTT3FXTSO82TAAIbxfEsIFBIPAookRXdENXdCUgjC6XKmZgBJ21CxrGNHSwYISRIqkbKus8EWRCMEBQc1AwMvLdbGuyi4oBIQOyOao5KSA5p7sor9Me6u4q0XOYettQDzHm3a4cKRppWi/g1ZgRk2NygyYk6qKhkgNHC+4rVJw17q+BmYuPRBH0NqLuCNHmHDwUZWJDMROhoPWsobpZ59TBneBWrOuj9KOr1aIwOBgwdRcFkWDx39GM/bC//w4YADoCDZb+W2gf4sxIroxYYQxOlHxAqEX9buq/X0jwPno+Vsrgp4IjGDsQHD+9ozRv/wAgpGlKs0GUpFkP42OLgc7EEBA7JegSL7JoYt2JkP5BxpHvwIiY8px9mXuBtAGoQTPYBapCU2/ibTdtJpFnFEeEaQIg35tuu+67ifTqJO3IlNw5sinujmoGTvgex3hXAAHAgNQAd6fkMarvma07EIa379z/dHx9h4bemV0ADPrrHorgr7awwSyqq7mq9ZMVB7nOB62lKqImQUcSAVImyrnMnHZptPtazUzFxfvx5sfhA0Pp66H5cvAurDAAJ/dOUoK9q0R/p4npRdeJeJ7KnMs8TUi41rq1utddRAabGrXFYz30+XuLLGEM+x35D10X2EkY7NKwMZ1+fdviXP81u/nNS9x/FBynlLpudX25XGsTjua/x/EAB9QN8ZxG0auZIkLJ88PypOf5WIPM9PBw/vjhQwEkRwdXV5GKoJlxKqnkhIQ+hlvBxayZNpHa6iCVHByQAuJGs8tMpMeugogpJQMMboajaW6UT0NzQwMNdyx1TDzN03JaZlXdr9dW91rr3qTF1gyggAa4O++QGpAfGtFelZNKIg7psfdx67P81gPU71fS3fEc+iZ0NAMkcgBgR1Rl21BeHV+AMU2fKJNTAaIBUaI4jpAwfKTiTgQLrJ2KQiKM0ubuRYNI5EEKExCnm4C0MzH7DcTcQ5l4zJ2EFnmUngQBo9oPsn603UGKb4iT46kG2h0gpmdq7W6njTn5q5l59yrx95GEGkPsaI2tDboyeKAo94q1oKjoAE5IRCk4VSQnVABUcPJEUJiWROelPDyU5TTnaSZOgei1ZZcGZol5mXIpnHhotIxUTcxPE0+ZEQneqnbVByB6ImcIiUEvMggGCHtdmyP63ZYJKY2C7eDzow7ADQGMor7ShYAxJDyhfnVGjOyVjnLksAyx6BncanXELWfNuYmoiqmlnHJJBl5rfbtcvjy/vL2+nefJgRzJAI+6MQDo+lswIjAlB1cVMwn87KM18bdRsptrs7YTtJxnYGhiIrbvKmIO6OamiogpEzFBz/ah9pxYt/CycBnuEcNQ4sf4uetI/IckIMrpe7XGDcRYVP8Z3oGYfjqC3oEYdxABERex31Bf3F0GBkA81vm/7vrNRM+/9jm++e+/8IrEM995No/aW3RHhN7dnI0YiJyOHR/AHRS8l6XScQ+QABg7fZIYmTvJmRw4QVKoCkVBU6ATV4HwepmKGziREiqjtWZqEDjGLJTWZuZq6maCaCMHdPdpAO+GAN9/8eu/v33csenEUP7zY4mHOGz82q9kU79ZEOy9EtAjOxxBDCF9w8SYmZgGUezUkzAEmBCASXNyd2sOquIayM0CtHd/JbupftVUpBd93x/0HfiOU2ucCN0LAjERl5TmUkpKBGEwWNe6VxVTCx2Dd+g4docOvb9ZLUP2dzfY9qs7En/+OzZbQSQAq62+vL38419/vG7rVErKvc5igJh4TVRzkSbSaq3uxkgP56fE5d2RhuiR64Gwm3J3N0K3cDqy5kItgntTla22tdkuWI2b8SDtEAAYGRCJe+UdmIOP/A8SloKA2sQckJITgbuaNkcFBEyABigKvbYWidkh0jCOScA2RTUDIkM0wOq8G1RLNgYYAVKiklPJRGMn9E4POMBhWQEAQ+r/KxBDEEE0OHj4PSQHULH6pvWz6S+QaQJkSkbFIQWESYjEyIBEmJjDBgeirgRAzYKnI8KUR+/k8MoMQ1FABE8p3c8VUW2tuVskn/hbEOOIHcQQUff2HVFiFLMAogeksKPm7wY47F30BzcQY2Z6Q0DHY3BIc+BdxNRDi3sQ05qIyN0sMzQhrcjEIQuEHqsd8g0CTwDsjobQqd0jH45EOOd0KnjOeJ7zPHFhQAhrRnRTNAUw7KVCxoAc8tbO60BwpJrTw2RhOsrNpGdkkYIudBCEoNjBzYgJICGm9C7WSCnUe2AMxugEbhqbh2HPtCBCAnLCnjKKSYsAYOquB7/goA5qrk1aa+JmKbEDBJsy+3R6WAi97tvz16//+I//eL1c//T73xtEzXTEVqOMC8xdpBmAZI4wr5oZUI4piACiwniPXg1c3BpI42wPDxlTvr6J1NZqU7HMjA6mighTYnJXUTdMVMC5GZp3/QWEdCSCzqi8AB+ZRVBHczPttDkamFO3iQF3d7VRfn2AGAD1wUF07z0fHI2LYGugOhJZv7oC/USWICBxuNQdn/2d1uH9k7xjd+A4Ev/p0/PuQXffGfxv/I2/BWSO8Aq/URwEIe4IigjQ3aUSApg6ITATB3whOOyU1DGoEHc4LGpNzc0RhjEVduldN52gjtlxeNsQI4cMngAdVF2aqLk1BfWEgImCNQ+9hVk4DosamRqROsL9gsEbODj+OSL7I+Q+iJj7QToO7/GPEXb+cygGwb/RlAw/kuPZ7LefAQHJ2QESOjEEiCFKN/seOMQQai5mqg4AEnsEERKVnMPkgFu9tqpdfxc0yw2SdxpS2r5vZsop96p0xLEK7NuBGGmFxGkp05TzlDIarNu6t7arNNcosu8Jvrvwxnt0/i0RExz8kLsgwEhZIVhvf9uzinQ3+XtqL26VOQGMrfz2bkcwc4+ZBpmGxMjism7rT59//o9//7++Xp8fn07znCkq8x0BaBSmYlNrtbUmpuCAifm7Jk+nT0clDgCo2st1/eX1LQOyB1mrtTVT6WV3RADeJXRBhBiIpoaLpGLqkdtCgoyZmNA7xwlR+kuiasyUppMZ1LY3VQ4zOXQVrOLulBMRqUtToF1sq23KmQlKycty3rystte67aqMCZEUqYJuRtWnGxODyEQpc87MjMep7GYjedvvUWfJ3rvu9ueAbp8CaIaE4MVB93pdn/fXv1r9cSp8QmbODsWRgTNBYg5n3ZjCxESJRzm1m1q3Sgsh+bFiAboVWQjRUuL7mmo3UxUADxBj34AYdwTQwcQc30dEB48ZHWmm1vNKY2abea+m9kPodeCaXr/kg+EO7He30fwmd3hkq8y6HlnutEpgDtpcK1OmqBo+nPQxHP8iUAT0sGcKaxpEMyAm4JKnp/P0tORzgSkBEWjbtvVS1c1R1aQJuCXGuWTw2Y2FvJvzOhqgOoo5mBfGsBE/N2sBcfvuguYuDs3MxcUBAChRIppKvtd0JnIFMHMJzxlw09YU3LWBiyuCOngKuQyChbKBINwLxE3GVhLmEGYgInVd131fmZkTRycBQK+tOrLU2vZ9u67ruu6t7a1d93W6ZkSfckIGwo57VBo4CiMimlY3NyAzNVMEN6mE6T4gi+QfuDL5XDhN7Kq1OnnVpuiUiCkjJ5qKu+tWN1KYChL2Imv05Ab9pgUZgBRpzJhkwwHDTDQyzRB+tR6igQP/xi9FSi8E3n3emoGOCRvzUhVE3FTgn6BijuDyiCMPw6f/H69fP8M/RfLYAWZ+9Qzf0vq/+oYbuAafH//r3prdO8SpRyQGEBWyEbaN+q7AFpEcHqsaIQR5qb8pdAj0HnZMkfIz7UFJlGdHmaIEDeaIHvXo6DDUADdXFkRA83C8/ZZSOVbPfUDf/WdvUOa/1/Wu5KVHfNRHNX4M8NuwEwgAyLsJAxJRev9ZrPccUOl5ARf3BhBeoZgSEpbE7kldzUBc+tZ65Awx0kaupiHlC/reh5lVvJ37rwN0ESARF+bMzIRmqk33fd9EBNxvVdG9ULD3ueofrgca3w5VEH13tbbQTQcGuTe4wMEI/vq33x0JeFcZ8X6csWMqB0cQ0eu2fX19/unLX18vXxQfz1aYATHuUbcIc6AqVvfWmgIwACfiKc3rvjVpR+JYzL9e5KfnPTmyR45cm4qpoiPE/91rbe7OIekidiA1DvBtpqqAhMU5GWGYeaCbgSqKkJqT0o5gjpcdRWKOG5KrggoSJeaUMgAyMTpEsw4NnT1zpsROuKusDdkTECvQ1nCtaRO+B61heRITyocWvhfEjrz9XdYc343/Tc/mnXgFIIDsgOTkArp724CYvWVUIDMKc4+eWOAQ2CXKRCmn8J3v3oHux9vrwUS3oyHmhABmwPxukji4uYWZO7iR04DOgdcdwUE1QIwPqNHxY1S9h+xa9AAxsaEFiDm4E4BDO+V3BI0DHFKbMUN/6zqYIR8lDlHfff8YM3VVZ0RKOKjviDEJIbAbY2hzj80X0Aw4E02FYCn5NJc5Q0IV1brXt8u6NVFHNdAmgJATyZTMVEtmdoJYEqRA7igWGWcoiXJOs/jWam1hSmxqTgAZiRIyQYuTF4MEwns1bEITdwVroBVNXaTtO6p6q96qGzgalkSA7OpgDJRQUasjmFRTdUPwoZ4jd9Am+7pepAkSz6eJU3IEJyxvF+LkIhlpmSZpomav18tfP/+87teEcDrNDjZNJViPJs3VlYkIXfaeCUA0JgID2YHuE4RRbWhExoSZvSSCU9ZW1+cKvqFQmubz43laJmKo+7ZfG6lNXAqA7xuIgWdTMNXmrpAcGYmDTfFDqIsadApYmJkM+HZUvXWeJViEHujbYDKGympM1jAWM1W7r04aHZhstHFAjJguAcameIS0sURwcPexc9wW3jv5sw+jug79Bltwh5EGnQjvz4weH4zYyW9fj++M//ZRODgfAFd0iZVBaODGap1ORAJ3lK5ucozixkTU4Tj0eipHAGImd9ubVnF1BzRiQDdVRbC+nSC501GXOFhaAAc1bU3MnCksI9EczcnHPYrkqFtU1jgd1TX3ewV2vh/6pjo+abe2wyOwH0cx9l+Cbw5OHL/VHzhGOE5cG49470UzwkIAQPL+wIFn/bhJMM5k6MLfXkp650JovflRFAuYDfc6ACBQNCIVJmLknPlEE5Joc2nNolI0JGMQkggwjGyHh8tjJIvuQscxXy08FDBxmnJOKSFAa21trVdeBGpDxIN9GXihe2/hqMrCA+v0YXKI9ORhEhT1Jt01JIbjMH3AY5qPcQvZBN4Zz3UQ0wuKLKALeqSYe2Gzu9barut62a67VHEBUAIddBW4YwcxyEHU3t6zmNS27tta68HCivrXN/vxs2UHMgdwBVU3cCe0wN3m3pqqhWKMAc2BrAutzcxUncmnrMyKroAd+zcxiY0GEaEBoElfxh3/g4HblHkuiZhyShNjLpkoqQN2famZkxrXitfNkd3Rm9vW/LLCdQc75uyoLgxpOEAkyjo0MXRwjNP8t6nhnhzE2Cnu6tWJUsrzbHURWPLMeVnKvPA8aypGPQHVzVcZE2Nmyr3uEXBAU+xpPvAQuXWOl4gYANz1Pc3pR2LTBvYK3UZf1z1bczgIAhwbNPRseKxzG+zLAVPMMUIDO+qI3GGQ0ONdxLw3uLGDflv1ADcO+BsevJfQ+/HIAE9qRmZqSnf5v/FpEBAiBlRzcRNVMXMRTjpPKRMErWvuVa02uVz359frujWDrvdA8JSo7tya7FMumYnQzcxQgdTQIjGVOOdpKjklN2+yi+5braIOxJSnXFKmlAxAupuRzaj3AWRKYCHrsJDxmoI0ACC15JYMkxmLsoOrohsTZjMyMXeUhmpoHMl2dHVtrgJgURzr4LU1b9JMny/r55c3N9iv9eefv7y9Xt6ua/Mvl237+vxlLolcH07L73//+08fPpyXmQlNBBlVFRxMxdTMFaw5KaKjrEh2MDHuEJWhDNblzQBMmMCTtWQ1cS5spRAnVLPWdK8iTWBbzantu1Q1ZREXVQP0tCBnRCcgD1ev0PCiBNWFIQLqhdkhh+nlTB1jdxBDcV9vszbmVLxtCJpBzd4h5b5YY8dHIiR3IBiV4zbSyeNm3q+2fzkT8I4i71EjfhN2jrC3i3nt9vetj9L7l33HxLhB3XXfTLknfxAco0byOPLDcHO0Dk0JmMbhD4eiCJlADKvg2iBCVyYndzVDN+wNMjEyouEuFRgmdk0z0SZmgJl7cO4UGjtzNHcZ1YPu4G6qbmqu7+6Lh4b7Pcq7+/x2YwrGkYu3n/7GPfpn6bR/SiTl4xm+EaDcbWpIdw9x9N7p4d2zB6Q27bM76pH6iYKEkpAL94LSlLh4Ug/hUFgH9qLkOGYOFzZ369Le33rbhHj4nyGASFOVGuokAKdDs+fv/9j9fLT3c0xVL9e3r69vPTtJTMxI4SsfkiskiBCbmWO/cg+IP0bMsPdjw3Fz8chbxSsewjaLYpOmZgDwdn39+vrlsl7E5BZwwB2IiUmF4xgZ8JUA3Wyv21a3QxYj4s9v+vNzm4DCFDUqIgkscZQEu5nvzUQNcAQvOEQz6GYmahNTIsg0TmmAqr6ubRNrjgoAJohIUcZpTmhMnhPMmU4lPc7lPKUElhHy0OQDYkplcpsM5+ynkqt4NWzNN4VrNdmtir9nYvqfCLYgbMPfZ0Nw1PLcfxM6oET0nj8e1dFAQFTKfHpI/lGnvRSaz4/T6Wxl0VQUIFzwwvA9MWbGxCGeQ+ZIvkfiihzCRqWvNBom6QaArr/eSd2DGnWwfuID2DCh6A/pEWpXDAyW56aMH6cDYCftewFLbC53IKZzKUdtY0R2dozP8ZjB0/S5dc/TILod7+Td2odRdGKuoGpxYqcUb9M6faTapPUCbzN3IKSEngkYwN2bqqnuVbame9NaVYOihe7RigA1adBx6N6aNHV1jFaK2GV/DYlUbd/WfbvuW221qUPKaco4MU9TAcJapVYRUZR61+wEUmE06yYwbEagCbwATEgLcmHKQLkpiqMoAiT0JM5i4ErSSJzJHRgcSN12c5HCeD4vnHITe1u3y3V9uazPr5eXl9e3y7pdtutlfV3XrYohIHlJgKZat/M8/eH3v/8f/vZv/o//07/7/Q/fTZkzZXNEB1OTfdfWICWgMxJwvRAZjsjG3VVdxSZ3ciAzF22btU1h06wwzznnZObXa7te9+t1e1u1tgbXZw8gIi5iJmJNIaV8ck4LI3pYPDmYoZipi3VDYPcozDYf4lDvIMas1y71Ogr0Xqk0kvsO0F3eTXp8c6eJcQDveQ4mTMSEaNolMWjmTn0XNApz3Z5q6vFul00CkUOvGPQAPHG2jT3j7vUIemMdPAiau592j5Y4Bm9NU/pGPW6AB6R5f1Kqrau8vjTuDbgdiTghAEocoK6IVHLixNhVwJyYUkJCBx/JC3RGaObNuBnsBuaYCDl6E7khGBhy1IqiM6ITUidBHMEV0BgpOnMROhAYmpmjh3ebaNzHvgOJWhVtQncf0dRErI1vHLLNDlZcNTbbqOT0sf/cR5F9gzk0TJ3PCIyK3bQBxyQA+A0o48eG5QBxIHfofDxjV2j1e9DDPr87LcYd82MLHbWyUcIMAKAgIo2QKDEgT7kAMbqD296qm3azdwBGwpRB1QDVo8FTlEVGrVyQhc6AiXgpEzObeavSgs2FTg4E1g+Lpdtb7JvwAQDgCFXjalJ/+uXHf/jxR3dgopxzSjlM4XqjPobEeZrmuZxO08KUfHAWN71w9xP3KJuyfuBEps0sDN3Mo1yraa21hn/Nul1/efn8tr4AeSqZODmmUZiIw78zTCGsR7ohlE2MiLXttW5HYbKqv17r17d64pQoFl6ISKAw5gRIoGb7rlXMkSLjBghoGKhf3UUkT+mc5w+nnLAlTEj4trbtWt+qbsYaqRoADvNQN0bn5HPiH875u6fp+6d5ydlEQdXB0B3NuaSH00kc4bUCAMFpKvzlTb6KmYC0qFnQQ3vRRUgMXUaGMQHHXPWuuOwHGQKMakq8ncOIAITAGNVDiRAIE09zfvzoRcGwFDw/fpeWR88no0nQHBwDwWBKRJkwETJqIspM4aOIUfXmLg4a/pthMByiXrMhbLmtudje0bt9bKc8wd17jjwG1aI45pi15o52WJCbefBl3osJos2K3/mKmd+vcQfolkrdQP3YP0afr74aIkUFN54GxgfA4aJ0MGQIHU6Sg6u0sKYjIrWAceAQoUo/m1TCqpgQPKXo+mIqqiAqbW8mDpgmzqS1ATghc6LElFLiXFIueZ7cbWsS3euYKeeMRNJaXdfL9bLXtq7XujdtvYAXwVw5YV4KERJrM2/bem1buo/5k+y7mam2tu9Sd2+tez42sb2ZUkNoviI4IyYmsmSG4qiuuu+uAJy6HMGgiTcVcM855Wm2ve0vl+fX9Zfnt18+P//8y+fX10vdqzRVAFGvKu7KqFK3/e0lEz5/fdkua+Jk0p4eznJeVJTQ23rdt2vbrlPigs0S1etbMZhVeWzUe2vb3hh0LqRiyCZVrRmaZcTTlNM8WU5SrW5tu+xtFxF1VUDkzKkAJwBFYCImmoCSum9hBGtMjWhrarWpdLxpgOogIfPtLZm8m/EOsHKkXQaIGX7KoxWwSDD0355VQcMcHto0SjoP8H1vz2sARzA51j+guyGi9Q5f3zRpuR1m4/H3T3R8x+4e801ofAMsdlsg31zmsFa9bMKM4T2N5CQgKtdtq7W6KTMv8zKVKedohGYpzKrvQAwiEII67EpVoRqYmwIywuGOqOBARgBq6tqzyTBQgnVOFdwtmHpzVBMJJwYDEQyS08wic91ERN8Vj5iZiI4ovY904FQCILAb/dGdJYOZuac/73kuh7AK6/+MHfyee8Dx97tBjQePidVBjA3SQLvJ0sgsBZy2yH/dLEkC23Yg03143XxYsLg7gLgSEpsiJyAm98zsKZsGVhntHtxTOJmruqsfeAzAYuIDEGJmLpzCMkS0VRGRFpMUuoeHR+b97rP/b19N2i9ffvrzf/sHVUXClHK0f08pJaZwMCtlOp8fz6en8/yQOLtLr/oI9QGMfGjgWIPQK2sXZNnI1LiaiotYra2aiqs3qZftUmXjzIkYKCnw8C1FB4pIRixu8+1WRnM01dbuTNXMfat6rUIJMyMEO2bACBQibTZTk2ZNLCzfwRQhohl0JHUXaZ5pKenTaZpTnjOmnL+8rl++Xr/4rgINkJm4Ny4zdyNyAjzl/MPT/Ifvzt89nArxvrVWm6ioGSEwcc5l4kSYl7l8fJxPX69Nvr5eNlB0HbZedxM9JcwZMyEiHPU6iIgGXfqN3aUEbyAGB5cVZCkQOKMBkQAjAyMxZPYTZSGQkmE6nVOZgSfjIihO7uSEyI4JkYEShSEvlkSJOWobzUE9nPPi7RAF2eWxuuhePQodhvZcJGI0uujrP0KviKDMTYQOgBJMhwd/2xl6D6GsDts6O2paD/QyQAz2Fq/RJgGpN6Q8BEYRgqiqIcKN3+9PgIg8nufdHhI8JScmMg0dhmNUQPWcvB8tLDtfRKNBd8kpJY7YyMJcFACIc0Y3VHdXY4KUKCVOpaSUkZM7qoGoq3kIIswBVPdaa60istda92pq4DyCaXNTU0Uz6hYw5trsvW1gev7yJQah1rptW902N1Vt1+vbC0Nmdtd924jgPM/zNJWSUkLc3UybiDkQFyJFZgNrIk1F3J0YVLfaXt6uX57fPn+9PL/VqozpNPMSs0cNZncmmBK1fX3hIvu17u2XXz7//d//fdvX7z99+vj08LDMTND2rW6X7fq2zJkJppKuL89Lw7No7ovfrnV/XjdxQU7najNbsBeMQInmOU+nGZaJru0NnaQlFTLnlHNJ0ymlmTgTOPiOZmhETWR9e00ATw8nytNV4fWqtu2y1VVMgCBlQ5bY+brRV2eAwcEN4xAd9edx1oB7t+00N3VTMdWuQoUx44aSvnevOFSHoSIfaAnuJv4NcgRmiRMMzfsy6yHmIQa4tSIIrmJ0KrpfQfG13/4esP/+7/74Q73w/sQ1h6q+aghxgcgRVFu7rpdfvn6+XC/gmlN5OD88np8ezx+msqADoaPLkWr20VjEEQ1MAZq5g4t5dFAij+H2CoIArta7L0M/l+MOiA0oie6I446JmKqBaXZjgwFiTEXlXqvkDiZu4g4OFNigdwBxAwIvCYJPoKgEH4IMRzy29rFBHykHHXYbhsDDn1iPmwC/Ap5+BICdEB6UyrgXowzuxs309AnB5EtvDnvjBW9B4zGpjhsfkE4UHJsDIHHiRLmgAzVc903domORmZpJFREEz+iMAGTgwVyGXnIupRC7SFPZI7cHAL0xZX/zN27voJB6D0p/Pwi3f7bWvnz98t9+/Mcmu4GPpAAwEUfqHn2a5w+fvns4fzjNZ+YEKjbAiVlfRN0iwdQd1NTdOpiJs+j4T6jGTdwdjcDNTIF9XmYkdORm0UUDenG5gzu0LkYJVtCYnJmJ0EzM2m2OATiYmakbOQY1NszJu3MtukattFG/dXikLmLjVwOAOacPD8uHhR+XvCyn0/z63/76/PPL5WISVBAh9Da+JoSYKZ2n9MPH0x+/e/qwLOT+6rC6E7o6IUIiVtWc8w8fn1JiZXj465efv1z+6m9o2FnYu7uEiDnTXDjsyFQw2lOa8bFTdG3c7R/dh+7YpMiN3NnVARo6kCMAJ2QviSfGKWcvKeeUkJNT0gTCpjEy4m5OiIyUEpXEKXNhYiDw6L6HTqMuKs7JwW/SaEx3W/sGGuXmYaKII7mEgIRlKjklYvYoDxvCgSDZ1VVEFdRtEFKxbfTiI+/5SoA7IwSHaGPEXFIuZUopMTPA+K1OoooIAjTocqnjBvjd3++uyA7nnKaSAS1UTijgbipdrjhKWfrmQEQ55+V0Wk6neZ5T4pC+MCEAhyYYMKxFXVXQPSXKOZcpp5wBaNubiDQxcEw5I1Kte2tt3/faWgwTgBNTwhAWOxGaWa1t2/ecijtEZgKJ7/Po6S9/+bGfpiKttm3b1nWtRD/+8vPb5S0RmWvdNmJ8Op9Py1xyNO4I8ssgxOqckLO51VqbmzJjylT97Vpf3taX635ZWxUALKmUkKmLqho4YCIoiRixLSdlPE15nqd9279+fcnMYLpdLmC6rZdtvezbdV4mBy6ZX5+/fqr4hybLuFditolJM0NJX9eH6i4urbprQNfYUpjptGQ/l8mb1mC5dWaapjSfZ6IslWpzMdk3BbYM/pQ0Zc2JQHTHvdrW1AATII4IBFW6w3A0gwEANx6UYew0xxZ1y0cOJsa/YWLwPn43g+GtRBySQXX/jel5n35AAHDr6WcnGNzMr+f0b13v2Jl/1eXvjxtzr6JrawzIAMTmVqtsr2/PPz3//HZ9A7eS8mW/Xvdta3KaHzOXRAnUAWKn6CFGBHGA4BhWTADmGjTz2Md7rjQwAULPq0DX1Vj05owKg9HF0SCi8t7WIhIv5j48Gt5rYsxczCCCL3AEVROVfW+EcJ4TJE5ugTcHlYY+BNMQvvP3Y+sKJqbmioiMKRMToUFksRDM6Nsb0ZkS8Dsl1pFeGcyHhi4QAMCxf8voXXLK7s6cGxo98lzQa9tH5tTcCQmKI2IiysSNWNUU3N0UxFzFRQPUdNjWWbRuPw/gZtJa0yYuCjAOint2Csb42IDN9+M15tjdg81s3be366XqambYpZuGCBwkAHhZFiHbdCvbzMig2in+aGuJgN2+u3sh9EB6jOmAwtDhDtoQQTNBNOPhkoIzRfXhnwUQGRPvPTHgpmKJITVtrQUddXdbYDgf41hPzoCRVWGKIfURO0BQAzeuNHJhAIl5yuk0p6eH5cPjo5o/zrmQkzVQJyRmREczQ5No0JwJTlN6Ok0fzwsBohMzt5bU/fDPzswfHpaH85KW4gBP8z9kVDLvrqd3FxGURHNJHcTQcfTCADE929EHpYOYg4lBBCdHcmN1ByByJ0dnMk/uyTJhysmOampnpMSYiKmXtTEiR1/haGNJ3NsSuBsAgVEXLeHQsEHnP4ZTw23WAbkPZjwIWHAHR/dgAcw8JUgpT1Mhjm+riLSmqtpYRRpTcrMmTUDcb1nRkJUSAoXp8MBzzFxSmcp8Op1yLkSgakFdyKGzNhu/MiYBwj+z0xNiyWmZyjJPgAbgTCJkGn4pfRJRVDXGxcyllHmeSyncLdbUNTYoCEoJCThR9sQK7hYthBxAVL1JazVc+xAR1MA7B9NaO4z4Qq7EyMNCxJs02IEISxYAjGIgf//Z0n/6+7+P6dNXlEjdN3f/8vbMFM303N1y4vNymucyZU6EBKH9JuIbiFHzWqsi4jxRLpjyuunLZduqqiFxKlQckchVVfcGYBRWUbVKa4j09PTh3/7NHx9Os7Ytp4QAtda6Xvf18vXr18v12kSnZXrbnYleXr7+reb/U5UPt1uTxNO1wnWv1/b8eLo+LpysNm0MeFn3RiuIE/OH7x8eF95/gv35db2spuacMJ/nZeE8rZkUzcVLTum8ZLMZkd2dQZJfqFXaKYOy45QrwNsOItgMxQJw+Dh5gy00AAOO/ZrcwSxaEYZGNLC6q44jZ2zZRxzg7hErhFyxZ1M7cWLdmekQIGCPpH3c15Ehhfv/Aty8PWHsqYNeGYv2V1/3CT4C5sGAgvsh8j96Xo0d2Xxv+1qvyYHNEdVkX/fLy+vzy+Xlul0JYat02S6vl5eXt7fH84enh49zXtA78XzPhfaoE5ESI2Lkp8MIpJcHQR+4SHD6gIIGDkjoHE3VITp0BlcFwSKMsbsBghiQ+1PfQBVUANydQptsZvtev768EQDDA88QrRQheK/YMEcVJ0Bv4ItDdgduoOJV9l0RGeeFSoKIc4Lx8RuIcXdARI3eTQdrHCzOuOORinJhDPVeV+eYu/mtU/pAJgcTAwfbcYzCITMciEnFpUkl4sSJCadSQOFqVUwd1VCNIv8EQOCqHtZVRDlnRoqNV2oTN+Mueo7pDeOtd/h93IaD3It7C10fe0Nt0Pl9ZGJk9B5IuiOQexTymRuYWNtk221HRLDeby/8hgIvE0DvqzlQFQ/5uRvDcMF2965tQIxAPqg2D3upronrsw/DqibwOA55Jri5N6mI4IBVbkxMH4A7fVoYqRJDTjAVLEwoztXRvGOACJ6POXtg/Z6+UEJf5vRwKkvhGZ11pyZMTI7gRO5giogg7lrRNBOdlzmnjJjLVPdWzZ2ZAaBKmwqf5vThcT49ni/r+rikOTlDRRN3v6F1AEQqieec+D2IOeRaQbUepGS/ycdhjIAA5EAG4X9GGQ0BDMksAbAxOhFZuEwhaSSBGJkCgbiQQyJiJuekxAQkY1NAZCIkhzHRDTRyroQGEFW3t/2SABkpEXNHN9HXHdzdaq2Xyysil7I8nB+/++7T6TQDuZu01mqVVrV3ojZzt9Za3eu2b7F+ERSG0W+ixIlT9GTllFMqZZqn5XR6yDm7a631crlu2+ruFrRwVyL6WCIIEHrIsRTCGOGIBwjnuZzP87xMgJBSqlVaFWkeLeaCCzRTREwp58zMiTnlnJjIXEVDGBkhnIUaIva6lMgpRebB3GvdtdNQamZAhISkqmb7vh/9pA6oRMRxfrq5u4pGdqellAlJxOO37vu/pn/4x79E10xEDEGeSDWzYP8jnET0zDxft1JSJkqECTEQLnLPrSEnM2yiQInPylmAaN316+t6udYmGgFS8FTqItZEDADQjV0JfC7l4WF+fHxc5vT2vIk0UZmAc+ZWcd3Wl7drcyLBVZ/d/XJ5mz+u7a54BJHNaWuue71u9e0N14e0sPEuhQn3Krwx0rRMS0mEZdqma93aetnrrmvTzDoLJ8gle3ZfxYUoZRQRdZG6Q23rpvuFdJuRkSAnFaLskAAugJuEZ0ycieBdeW5EnhMlRiZU8tpMzMAxsu4ioe293RV3F5FWK7lDckoJkMHBVEVaE9n23dxSyjknhkTDS/5X7fbuTl+IrrY92ogmhoDwT/zC/74LYWSOj0tVnl+ff/r8OSVNKQwcnRDctcm+1jU6zqH7lkqTVmXb2zalhYEjHXw8OR32aYico/K+YwTs8hPsDyRyiGLpcH01cwVA9oxO3skY6PMPVayBA2GE2N2KSq3trdX6ThNDpqiC4OFx65ED17ZtGzg8LNOUKEdVbsdIMentrlCJjrECjPFSV1FpCGwpHTczUnxhsNOZsYhXYgX3JIyPmPxgaEKgKtBT7ejO3tunfqu7es//dZahEzwRMA+wNDhsbdGXKmVMjBB9QAgVOzFEIZiOWmvHcCnokm5rof9StaO0Y0TgtzHpl9/9wbvHQG/o8Y15cQBeIo4DPcDXAMK9JS6YRo7SAbrCjI7eET7mTk8Q3GgCilsBAXq8F+ETkIF3Y/N4ykgJQGQo3d2Jui61d12F6EXthmZq1SsAEGbRX4XNNP5AtzwkhinTMvFSeG+YaiNVQhwlNL0C3eIda6+nd3A3AbDMuGReCs0JJ1YVmckzc+dngafMj6fpcSmnkueSTnMpeVJDIKINDT3n7O5UcU5pLrxM+cN5/vSwfDiVhwkLCbno+36pBJAYS8IOYhBCb3kX5IwQKqIkh27Ohbf50APmkMpEqz8AdGRGBgzqFDvSMSADIHQkZ3BAEAIlUAAFQzMXZyAE8JAHWude/FhuPTHSscw3dwUBR3+k0ImImKuZ7HV/fb2o+lTO8p0/PT0Rp5QAPNoyMaGmpKY51m4TqXkP5XmTaqBAwDSQS8qZOYVAPZdSpqnM87wwk0hDBFXxMA7uEaoFgrkLUcC0j+SvKRkizDlNUy6ZHcAtuXtPnzpaZwwxouZIYw3zGxcRQnNHjFxPYAwzAEwpc0qZo30MBGpRMxFVkbjvCIDgZkEiSXBIMaZhdgxxTwcsi0/XWlM1BA7bG32/9tM//vKZiXMqxBylqU12ICzznHNhTt1WrukqllZEACaaOBOiuyFBjD1gAiCHRLlky8hg7vu+v12u+14dwLsroKtJU9n22ppqk0T4MJc5pzLxec61rq3q68tzZjydyqcPpz/+8P26nr8+P79uVaE056/X2pruVa8NjqMfY8sysOZ7bZvJtvrliucMHxKdJ7S2m/BsxT2pKIKlh8x1sjeuu+HWAJB5OaVpeVgSs8jWNmoKWn3VWut+Wdfr5bpermZeprIkfkDFnJ4Svc34daeXHS6rrtWqiJgjkjmYeE40MZ9KKhm3ppfrHtyUhYdmU6lyjy7dbN/2y9tb5TTPS3p4QE4mWrW+rdvlenl7ewPEjx8+nE7nYpAyRB3pSKlgN4w7cstBnfbjLfYMC/MeiARvL48dpHTPLfwLmJhO9/TdNoyO73+xyv7Xn//y9//lH4i2nH3O6ePj4x9+9wc4LcwoWlvb3DUhOk7ZCFZ5uzyjJcIcbU8AQEUAMTH3A2m0bwuqgzH0eGgAGvRHLCR36G2ozaNfjGcEJuv9qx2wO8VAWFGzm7VWzYwZzW2r9fIh6QDKCE6ubBJPDWHtBuHR4WpeW2vCEyeg8AlyCpUuWXiQ9IPw7u+winMfdpZmYSpNBt77l91Rab2AJ6i+HsH0ysubMDBukYKbKxj28moUA75VLEEknvyAKp078N5Yo9NyPWx2H2/UHCL2UhLElJ0xMUEir5H1iCBKo2EjQRc4RAmESg2BVyh/7wQwx6F1EFb3JJF3QAGDHbvnj+Lh3TaGovwNAMzAMYr5BxIME/qckAg8ell08gUcOjIkHPAQB8PTiZqIdd3MxNwtaue0qqn1dE4gSYzeK+ZmkCKwxFHTjYxkCISubk2aO6R8etcJZrAESEQUJBIBKTPOU3o45YelrDV9XitVJ0T3bgc/QhIEM1dm4t6jxRW0EVgiXzKdZ36YOKGdZs4pESeiRJxPS/n4OP/ph6dPHx4fT8syFeZUsu61xRxOQ1ZaMiWGwnDK6Wku352nj6f81+QZVKHzUv2jIDBConBM6KWCTjZol/ib+pHlXfR6Nz8dRkoUKeS/vXgZDiISRmsdMqewgU1uDI7kitrIN0RxJecdZFLIjVhCJYsU7fDGa3UyI/ZC5m/zSR1uEyJGHjBKfqtofbtePv/ydd91youKf//dd+eH8+EsnhK5ORFCAei9B4rM02mp2z41aWbi4KO+m0KSnkv8r+RcUkqEHCEHEU3TRIQiuda8bcyJWiMRhUGng4dq2CA6CHWHqOO+YErEHLNrlCA1UbHuhgTu6MOlMGpjLXLriRAsWWIkVPV121trakbE0wSxSxPSEQvFeufYEgGAYtWHUT8He8LMIVJxGJ1fzBGQmYkjUosajVGtAQepBACQni8rcyoFEnMkutQsRRvVaSFKoW6sIlvdCJCJSsk+TYRc225gwErkzMQpIWXy0oRBUFRq1b2ZGIw2BXHGCYAiGoCYVeb8eJ4/PZwflrIU5oS1rnthV7le3/Z9OZ3maU6PTw9fL2tr1HavTWttVWyXdzlYN1cxaSJNkGwX3apJ4eVpXoibGUkr1qyBiIThdD7P+eHEe5O3l61eFLLP84d/8z2W9PWLvV2urz8979cVXLTVfdtMWiI6LeV0mudSCrpLI3cVu+5Am+kqrbooiyGCqIEoMKVTmT495GXi12v96Yu11pgiGd37rNt7Jqa1VvddUcBhzhnUTWRv+3Xb3q6Xl+dnd3fRttfH88O0zCmlLq3noPVCKjKMvB3D22AQMDeFEJg72gg4f+O6D9bvQEw/f+JJjtvgd1/Hpapv15fPz39FeCvJTmWa2XL6A3Ayk62urW0IipnFfGuwt6tUdyXGQpgAIl+zuxtHZ1c3B+foOevElDKHKUakUzNQujUs7C0kQ/yA7EzO5AzA7hSZvzDHYUYmMrO9rqaWEqv5tu+v3016x/a5iWsbDBZBqGe1RUW3WfQPRgyr3LEjo3WI19PV0JMjYeStvcUsAZBZdFkFwu7bNELFHh0NaNHzZz5yYHGr+uzpe7t5tCKOvHUzIvnn+3PBDSGYj6Jr65LhUWQM3aAL7FYaS2Yi4rW6G6SExADo4u6EBdBdDdRVXT2ENdFWdexG/RTvRcLv0px3ZMw9zfhPf4yRxfeosu8uoT7sd5GYmDlsjzv/bg4YaTt3QFVrTQCAU2JKhODmsTyRycTqtbpbLskc9rWZGCU8VlvoPVwDePERR0DwitTHrLNdHo1OGd4dl7dc0mCgnAhyptNSnp5OvLX85YLYDpoMxow6tBGRwyRwxuA+fcr88en8u+8/GPNe2zJRYUZOxMxpOi3l4+Pyu48PHx5Pp7mUnBCDjXBEJ3AiYEYEzomiy15CWDJ/epi+f1w+Plyed9vXaCdwuxJhirvtkOKJukM3AIw5DG4WaAu9Zz0ip2cwTPHDH8YQcbT5YgAEGpUMo1tshG5dl6heV5EXtysQp+kR84Pjglw0AQAThdM3jl723bAqNDNsmO/c7RGRc0o5U0JXba3u27WuW2276H65Xi+Xt22TmmRZHl5f3+ZlJpbgit3dxJF4mgonRkBDAwNPtEDJhWNZYTgIp+iZwKnkknPKJaeERFHqD2BEkHNiRrNcSs6ZS037nqOv0xDIRdNHM3Xy3l77/r6EZdfQKGkX74jFAnSwrrf2cK5zERVtpmaEKaTQTtHyT8REjQiIFFCICIF0aFccACIZeGdBFllxZgbA4GACxIiK3YxQgYiiGmso8OE38wapArJ33jclYkqEaZnmj59+t0yLmtV9dwWpl23dEP18OnFa8vkDIPl6EalOnnPJy0MuJ+YFqShQmGkYWp6AU3Or4BYG2I5JnVLinKiSP55Of/rj7/7N7777/ulxnli0Xa6vn0/55cvnl69fpuTt3/3b0zJ/+PD49W19/XwVkdBqRiruRqC5q1p0VwPT+ZTc8fWt7oYwFVoKeFPV1vakO9WVMudPT3kp54+fbLfXL8/b9fLWFB/nnIAKbfv21x//+uf/93+5Pr/NM5XEOfHDafn+uw8fPj6cH85g/vr5ur1dtm2/btvbZX3d5OuGG8w4fwQsoQDYDaZpejjh7z+V81wSA5rVdWdO7ug+zMbuy5MAAFzV3K06XJBW4rZtomoI1tRU932v1/X6/Grff//49DiVKZUcvk4ejCcyEiJ3gVgfIz8MKyPVDxGEOwIAH9zjPRPzLt79NYjxbusRFb8jTLpNNXdXF7EN/YIgAs30MZGr+rZdr+sFoKWEwGiwX7YNHN2ZICcUcnaH1uplvbZW3UykrfsWVdnMmSnnPE15SVwIqJTl8ekTJ6hNVaOg1QkxhtnNUXCAmGRO4bOM6MTICROTmW3bpmpM1NTWbX/53SIDxEQlQJMa+And1a221qrlnBkZMYU3tiMTp96EwBVNIAziOiLwkN8bRom+q5MjAZA6uaK7Iii6sAn18nLsB1VEK3HMj5S9mzseZskBOaJOGMMGR9T3JoD5DpK+u5Ox3XSiP6bFkG7YYGT6n36wxHnf3AEVfN/tssq2NTcopVBxsP2yKSD6QjkZeTemJAw98agGvY+rIicTRsgHT3NHAnY3nni7dzgG+1GEvR6wnzrQxSE9LxQin/7zTsT35w1tjDu4WFv316+vBnB6fCwzkAd73AAxzVl3fXu5uOrp4QyA68tmqmmK9kAdc3uPSDkxA7mZOVrvpUxdS+BgROFcXHLKt6ZW4MMEfyTSoN+NlHCe88PDGagmJoi8lHeq6DgwouQRXMGV0BNTYmKCeS5/+P33yunD616lFYZEPWtGnEtJD6fp08P8uCxTLolQe45Bg60gcEQPfgK8iwIY/MN5/v2nhy/X+ir0vO+9N+i4BYmJEc0EHIgYDu9ec4fuYecemT4DN6ejyC6yD8jkFKCFHRFT2EEBZgeUZMqEGl0SjLNhRiVwYkATaetru/5c9y/M6eHDH6dzxjyFkrNLWxDCFhFh9L6mrs4Gx6mko+sIMc2nZTmdwFqVfd/X6+V1vV5a3dVl33czQUR3bFVeXt7U5Lq+uNdpSokTIS/z8vHjx3meAEGaruuqKsyJGZnYHNXUIdo+hTMAAkR9gUJnS8JWwjlhwgyA7j7PU211XbdQ+9poPEEEramAkXUQcygLAze6q4+0mKupNhEDZICh5YvkZBNE7Pphc0JQdwo84YCYAN1B1KGJOqCaIZCrAwBHO/BBnx7VNdBlw6HzjAxacnfQnrFzcKYAMdHQG8xAxNwUXb4Fyo7klClPeZpyZiRX3TjlnEvOhcy0KUC44TEzTcvD8vjh9PjRkSxlqnuUNGM+YTkRLw4pMFtTN+0VOVFqyBBQM7K8RkiJ4MPj6ftPT7/7/tP3Hx7nOanqus5zYXZ9/fzLer2u18uU8/m0PCyLyfO+rZhSDPA7GgZAzUSkipjq4pQYlymdT+XhacmFr69127Ztbyvo0raHU0nLkpZyejjp23pRa9f1onK6XMMUp4lcLteXL1/Wry90KlyKZWb363kqpzLDIiq/PD//8uPnt5fLy+X1eb28iF1w0enTRCfKuTfPMjdLOfk0wTJTuZCrtr0ZW8xCFdOmprcu1oiYU2Litu1t27d1RUeVhoCUUxWRrdZ1bXttZV9SyoB4MhD1RGFOGauUiTExEQ2/SHcAIkSGoKAdYFg4IaD9Jojpxx7e9tdIMQyTSYBu0P/tyXK7NWiOiigA1R20rXW77OrSdtOGpABg0aBRm5kDMGFW2gkTEQqI4kVsbxJK/91c0DB5nsopoWmvCGEF3OU1Ye0WZsFaRMeTULYpoDFhRszu5I6qDmhkmCDMqK211pogUhO7buu+10Nw3UR+fn5+ff7KKXFKiRkp+q1yLpMBqdl13bRtmTBoWAACE5AGpqHYZ2bsB2wcCwScgRIwIaAA9fgT4v05qtwZyzqE0efgKiKFFFzRkWSxIWnprb4cpdm6twfFH74xIAaHYyX1k9AHQQejdXS34wqTz37PYWSa1MNcQWuVbRc3VDXOari+vCFGGyY089DBDAECvGOE77/2bxoc91fsZPS/QsJ1PMNv/OSoh7pxGUTojmDadHtdDXA+PYBB+AW1fQdGyqiq+7q52DTPANi2qqJIhRDd1AFdFZGIh0Dr6OPR37iH9BDAmXPJU8klp/QucRELyQwiTRXapt6RNhqmdnaot3zpWu54GYuOWEEZomvJZZpyQKoPTw/VCad9by2Bkmt07Wix9lat7G3OMmdpSc1b27VVkQoAzOiQAMCQVaG2um0bmn58XP70u49v4l8q/sPPzeQmICWEnKgkEnE3I3AkBoi22mR+CPYdEbD/kw6jwWBS0SGYfBveASnMjoHiSbw3AkF0dufIYxC4g7g3tSqtohsBpGErgYiEyATEAWKAEHsyh2mYiWMp6SAwmPnx8eG67m29aFvNpNZt2661rtHPRFXdGQBUbdt2kf2vP/+jyPV0mqaSCfnp8WmeEjOo6rpev3z5WmvNOU/zfDqfibm1Fnc/Z52ngo6m4I5m6oBm3VYm3gz2bsXdfziyNkSoSmZGRGaq6njrTHW/xNxUVJqHzYxa8DAiFtojA0QDJyfykK0ExwPghnT45VgHt+zdmEBFDSsQMkbbrZ7NHRh7pIA93GgGawrQbS1GIUG87YguAn5FDNT7h32zmhNRzmU+nx9PyyknVK2Xyy5Nat3i9VUlEFo5nU6n86cffnj88GFezkC8nB+2uq/bVcQRsiohgrm2Vlvdat2kVXNxa+5KaHG0JsyAAK6Jac7p8Xw+z/OUE6EnxGWZlyktJUOTrz/+BNZevrxkpCVPp2mW9bq9vU6PT0DJGPWWfgV3N1Mx3d1ElVd4OqU/fJq//3j+/e9O2vSXn/T586VJm0C+K6Ifz+W8nXmZytTmjOgqTcFlr1AVirs5MpaJ0sy/y4kdPl/XH6/rj89fH355+pu/+6OK/ef/9Oc//8OPP/385e3trekGOefHH6aPJZ0EeXQ8dHe3Zq1K21uuoipqIXQCMNUmJs203TQxRHxaTss0rW+X15eXuu7uPpWcc045ieh+Xeu6tb2y+PZ63akkRxBVRAiL38iqpgQ5EzN2qnRI09xTzoDU5Rh9Wus3Ot+Bmh1Gwd4dEzOm2+3kO07Arr8dB4YjIWVKSAkQ3Vpdv/zy0+7oKiWRg4KrtGYY+3sI26khp5SnaeKM0+LIgNUy0CM/AGirNaX8+HAu5YSYmVLiYoLr+hl3WuZTyRkc1LW1Fl4z5NFMJRE7ISIkd+L++TVlmpZs5vu+qxzm98cXAADXWv/Dn//bX/7y4zTlqcxzKQ/n84ePH6d5hjQ31e26P19etsuLSmOONmUEJl53dGOiaZrODw85Z4+qyMR5npfHUrggJXQS6YqUSP+Zmu7eaqu1HoWIcCibILwo9PhR7P5qphL5YwgepzW9XNcfRP/237XzcVC+u7qN6KBejv1m3Ndxd0MI1Z3xgjOwkOpFA3Bxa25am758eeUyzQ+nBBMchM+YY0N4gsdM6xTJN7stHG/EaWQX7rMo99edQLinR29lbdCHLroqxbdDBRo6KickCy0ZWDNHypxTKnVf2y5tb5SCVDGV6N1lCGiqJgqGrthGZ+lQiAdwoYxAEeQHHrSg89CpTNMcHo981yrdB4gZ9FqvDlCote217vte6y6tqYgP9a/fGYS4NjcxrSqr2zyVZZmnkpMhT1MppaVddhFVMW1ssq3r56+v27YTwPcfHvjv/pT8OzJzxH1b923dttUBzDTlhEhutEHKjG+poPunp4e//ZNulH66WOEvJmJ3TExOaZ6SYDMVNwEg4AkxSfhTGoAbmMKdi5YF/+DqoOQebvLurmjqjdxQEZCVkyuoITqoKjc1Q3VoKk7mCTE1WlKGE4CUVObTY5lOzcnN0IyYMiPnYF8gTJ2YwgQuzMIxJz7uS0rp06ePZn55QW1XZnBrplXa3kxbMxUdptvgZttWv3z5vK7PDw9Lyezmdb9++vTECbdt+/z585///F9fX18R+cPHj3/7d//2fH6srbYm7jZN06cPH+cTGBEc0u1eQdyDFhpXABHAQ3Ic27UTMaICjEoTvYFLd29SW90hBFTiIk2kmRgmAqKoSxnP2UtE4yBA6i5lpm6ObuROZiDq7g3Q0JEp5VQww0BROmrQumMDREEfIZgpgKoqduMVs17noWDuRJSYiZji4Qja06V3VyrTUqZ5muZpmhgBXAFQVfd9d3Uzr3urTQwgp8J5ckoGJOqMkHKZkETBvamhiiOqGbiq+5iN7gYIyE5kRIwJuRAggKTkp0JPDw+nuWRGNzFBYMpIOM1P54ePTx/W69vL8ysBPT4+5ZRcpG0b5GIpN3kn6nfzJm1vom5N7bJqIf/uPJ0mPi2lYhWx55f1y/MLWVsfyQDL48ZznXMBQndQMTG1FvZdgOCcaHk8ZcI/ns+6y89//evPL6/PtS7Prz5NmdLz63XddwXPUz6lPJ2W+eljenjAmS1Bs94Vz9S2rb5e1lrh+XXfm5iF2zeoqjSV5ip3JTBEy7zM8wwO2/X65efPKvJwXuZpppTBQUTQoHDOzC4qtbmoYltrFbDegzb1wjgkCs6YMlNAm1LAAQmbqbkTMxIC2Shw7IdBdwgBPyiiqKvrJhbmNrIBOEqse1biboa5u6i01pwaomRwkbquFwEmt0IUpc0EBuAY9l7d3EUd1NyZKRdDAgcnxvN5BrDLmxD6fMKcXVoFMirUtD4//4SO8+9+z+VkZm1bX15f3P18Ouc8IbO7iu2gK3o2oxh2Zkp5YT71sAbhNw9JMfu8tb+8bUu1ucAyueCUzgAZAy2t6/by8vbll58vbxcxiF6spmJ1z4Sn0/Lhw4ffGS2nk4qEPqMYSppmKJGQMiTvxmGkjs1sa21d7XLZ97qrKABQSp1GMzVXEWkiR3DjbmIqYhICOUdw2Pf6+vIqzLXJ3QcaApeo9YnbaT0860Mw8kwj/XPH97gpmke9wL5VaepOTICOVqFVbxswhZaiHb9+4/UO2cf7qz+wk09B+fTvU9eX2G/fn9tT9iemo2L6Vw+KefuNnQYRJaSUKWUESikzsYlJFalCEI5rHq5EnWhBx9H9NpZNSCoO1YuNjhPmfsvhesdZ/Yh4vylDsOfBxMCtr6OI7rWu67Zuu0gz675pR063HzPgiYDApG4mtaQ0zyWltDV9fXv75cvXn56vl3Uj37PrQla3bX19vm6VEGfG6+VyPc+FiZi1VTcJP067CiCa+1SSnZfMVOd2mqbvPn5w4jeFT395TYQmN0aZiJaSzktW1ta07uLonBwIFVgN46OF87M6ioNaJEAD3xCCk5EjCoFB71hHiIxojADETAwYyd0Ai4RoRFyIISOfEgmakkOVXbdX9cLZUuGcMGdOGRFCfw5HT+q4Jw5wf19E2tcvP//801/39XK9vIrUELCrRe10FKQqgAKI265ta9vbfn1LJJJIxaaSWt1aK9fL6/PXL7/88tPz1+eoNfzTn/5kppe3t8v10pqcTstUCufEzNi7ifnRIBIAwsE3iKOoffT7LF4ITYhzTgDRMcI43ezazWyvdd03IgoReq2tyqYGAIzOPSjqw3m7CMEsRGPq7uYo4q2piouaujoaOmZG5o69uxgTzLt6Xh0cY7xjmZpBZ/I7/Ysdd7WwqURCZHZHdTHXG0s7rrQsDzmXMOgSFxEJ64p9q7vXfa+R6GJK5rQ3/fL8et1qgJ5pmgCiCTCpmnmDvoo9M0HORKhOEA3/MLhQzmlKhGhpTvC0lA8Pp7kkQtMqq1TZdyIi9IT86bvvCPH55SJipZzC0sFU3t5ehFJDknbzVzD3tbatVnNQg8smqLZkfnqc3CAxEdvWtr98fm51r/viqUyfVnpYH3KptUkg/3BPJUACJp+m9PDdx8dPH//m+++uz2///pfPX17e/uuX5+XaPv3hb75/LKfp/Hd/5P/5f0zn8/Lh8WlaZqVyMf55oy9X/Sptd0UlrfL2tjM6eP36Ute9OYzsvoEbiNgRZkPUf03zPC05ZRN9e/66Xy96OW3TDJxTmab5tMynZZqmnBISAjCxq15eX7a2EyfOKXGGsIEHJ4SUcl7m6XRaTqeiZk0NYWsNkKZ5ToUoebRave30g7/9ltzv6cFvmBjsKQZ7V2NgZtfr9eXlleF1ScrzEudqQixEE7EhIQX1eDsqo0+wu4NvppQSI7gmS4nPp+SudVPVJnJR3betAaXFzte3619/+s8Z+PuPEy7e6vb89Zc///nPhPRv/+3/eD59zxPVtr99fdtWcc/SoO2NiB/OZ6ZPejoh5qFjjdwZjv5HcWMSnD744ybAklKlvEJ53XH3JnKVVqVul+v69eX65fPXr69vr5d1rbXW6m0/LfPvfv/Dv3HKT9/bBKLoYKiWfduMluqnGeeJUi7MxGTqokAb+Ku0152+XvRy2bZtR4BpnhMnBIDDTKtVd0iJAdHN1bSaqGm4D5vo9XL9/PkXXuYalDXAKKc5mBcY+Tcf2hc4uJ3D/Kfbowfh45G9bW1ddduhVWacTnNixNbYXZaZl3kOk2+p3g1rx9y6789tIUrtsMk629eFPVGbGYwNQKRQ8JtuloOaji0uSB486qPRPPy5Irs2XvDGzoTPChJiKenk89MDOuTCCK6qrUprmghckZw5JWJzcnCgRNHjjIg4E4a0F9GjhjcMcS18F0MCQBHrB7ivEjUhdlswcMd7ORzoxB3EbNvlcr1e19qadMpnBBJB/hAAMRLnwqB1l7onplIKJ7q+vP7nv/8v//4//7e/fL1stU5sjzP9cJ5iJs0llTKdTgsRich1XXNK4JCYp5Jt0+vb27pte92nefrd778/zTMSL8sylcQ5/3zdz3NJAN25vO9jeJrLx9OsE26bv1pV9SljygmouINbdQV3UHNxFEN1NqeQeMdeg4YK3lAESMzIoCgkZCjZNSlwMZ4K5pmsMKWMRk5YUs4g1JIwQNvWy+vPv/zZMPP0+Pj4w+nxNM0p5+BcBvqMCnBQ68bIIHeb8tvr8//9//Z/+f/8x/+4TCUTj3dH5hStKdSMQMB38NX1jbzOLJYsg6K79IYMZibbvq7rte5VRImAiJdlSYleXr/+8stnUXl6enr68DQtU/bMKQXUaLWJdt1kr13jQDHsgN31bpiNjdIenmacZsklLefpsKswt21r1+vOidyt7XXb69Z2h+gynjQgGSXsOrQu52JCVKthVeku6qpuYtFnSQgAkJmcI3YwVe8vCAYYa1kBohk7oh4GybHPIGAiAHIUtbo3j8wVmqKBwy4iKo5G7/hxSKXMzGQOquomohZKnNbUxNatihqnBAkZyJxEgcSJldlyjhC2IGVO5g7M3XfdzFSbuXUwF7WC7oRYUmICNJkZnk7l4TSVROCm2kzMkaNg0N1Pp4d13X/5/Px23ZDKZd3qXk1lb60iKSdp9RAqWkgsReIem+Na7fPzdlqu33+/TgkTc86pqb5c94ROJeXTF0H6VAGu19W9ETUxUdWwapWGbnmazsvy/d/8aXp4Lf/pv3hK4oDMTx8//PGPv9enB9Z2nvm8LI+nB0y8in65VtGt4r76vpmRJVfUptvWapPXS4defuRmOq3xTk7igAiUKCUiUsF1dZW2boKZ5wWfqHDGiQjZIeYFqNq6Xd8uF0BCTsTJAUwbuCXGknPZTqdaMXyFU1HAdd8B2ZWKIpfwXggnT+6qI+8V+xwTDcOJ9KbJ6OeR90lo4GFcfPsg7lJlvWxa365Uda7YMMOEnC/PL3u9OjZKlgpQIiJkjg4m4E4xLIiQmAhBEzJDeLgljtmvatraZkDMsNW36/VLITa9uKVaXy9vP3/95c+JS/vj9wAPiCa6vrz8+PK8qiZtUPc2TXOi3z2cFyTrwtl3B+zdRUzzA85XabKqNxWxtSoyUa27qxDavsluuANdFS7i12q1GSii0ep8NXppZrvq0K3gpnyV5SKPD/Bw9uWEU0mcQM2vVS+bPm/6sunz5pfNrlcB8BOUnKKcCYkypAQ8E4RxUxzVyv2I3Le6Xd+uX758+fEvP57PDzcQE0TzUaAQW9/4wThBxzS9wwsWRos9DWRt31+en3XfJqRlnrjkqeRcpjnPmRac5mWenIkk9rJRUgWAwVHcOJnBrhyUTQdTA8RErYSZIai/O/bvr4OG8f4FQZiJ/Uuu0BnmlKaM6rFr3S5ncCCkMmV3Y2Z0nJYC2VJJnCM/0fVMqp2scjUXcXAASpwRONCimq7b1QWw4evH7+4r4MABDQbd06OIfgKo1ipVRNXAep+fKEVDMATPRIWxJF6SWdvrtkb147ZtLy8vP//0009//fH52qpoZUVJD6SnucwlpTKfT+ePD4/zPAOSiKA7plQSp7y42cvL8+Xt9XK9nB5OP/zuu1zKPM+n02mZswCc5qmkXklyWy6ES0nnZXbnRC6y19oYLLmWBICgrRns4LsSKGZ1Vo+2U6m3pHVAJ3VrSAIoZqiUxcjZCMJQDU1Aq0tV3hUTEKCj7LtJhfqm10tdt7at674559M0Exsnp1EQNvRpPRUbJX3uqA6iesz8fd/+8b/+/X/6D//+6fHx4fxwOp+xOyNTT40goBt4U7nuK7sKW83o5BrouSd+RbSJu+eUT8t5mqYPTx/m05JLnqZpXmYzm+eZebTrjVy76L7v4SUWmh4cICYhA5JaD2iCkSWklBIiEVPOCQimudANxEATa+qcKREhK+REBI6Yc0LkTKiGDkkcWgs9fCgVRmEhmphLb/fpZN4P+eCGCBzU7HCp6WFQkIpRX3YYJB9ZxKhJgijFVpfaFMRDoA4Ajk1VoiXI+1WbgMgAW1PtBgcweq6SGSBkBFNFInTglKbT6WFZ5pJznnKY8UzEEG+cRrLKwM1UhZimqWRmIAcwM8Uo+XN1bZnslGkpidDDCNXdDEP2rqrOKXHKa21fXp5//vKy7/vz60tQm+rQsMm+HWe/h+eKKQHklGgma+2X580clmX+9DRPef7u44en8+dtXS/b+pefVZq9vOx/+MM+k+/ge0mb7LuINGm1btta991EOZfzD5/otDz+4YcPv3z5HeK/+ds//S//y//8P/0f/q6+XevrRV4vbV217ttlf923y1bxKlOVxWQD2twQKDEkwtYFBehGhwqgJ4f9Ztqt5utWtyqIdMrTp3necipSbW8Ncq0GRi7mYnJe8pQZvIKL6ybtuq3hn0opx5mW0eeMINlFSKUAsDrOroD1uhsQKKkoF+aSUs6cCyUCQHMwo4jwRqbVHdzCjqjHzUMgCdZbYrmJux1nUcy/XZ4/v1p9fU705fTly09fUyqXvTYTYEkFpiVPSylTKdOUMDERJsZxFhFSk6aU3L1t1cEYmTLlxKIOkRXXZtaAFMkNtqZpr691f23bm+dJdRW9asW3t5eff/nz559fRNgaqsDT44ePj2dCzYUA2NFDhB8U433OA5G5nDEtbXter1etOxqUnAGwtUpEyzITky8PC5WPpw/TXkPxx+Hl+PSYHx7eGmyXLUzPWmuiamI5rx+e2tNjfXxqp2XKhczkcl0vl/XlbbuudVXfgSuyO5CTOLp7zuXh4XQ+Lcs0JSZ0w16lZNXksl2+fvmy1Z/frutff/78D3/+y6enD7XW46T00Zi339c7cg0OvqUndCKOvH0navcRUGp9/fxZ65oennAuAMh5Op/mjMk+mCHDPK/9kLBu3Q9ogOxOTqOzxm22DBKogwA7bO3cFQwVFFz8JgMa2/qgNqF7pb9PnB9fv4M+fvx6/3PAKj9KoB3cyQ8Xt5R4OS3uxjkRID+d0BzDwYgx3r2pO3RfDVGRVt0MkXMuMCMiirXaqm7yJq8v/vz9cqp1vyGY7rLc5fjduc9H6is+m8cEdeyNPw3dEvuc8FT4NKUZxdq+rZf1enl9fb2+Xb98/nK9vKLJ02lSd9AtMyJ5ynyazw8Pjx8/fPd4Oi2JMzmomFtGz6WUeSGkXz7/ItJaXdGmp/P5u48fnx4fTudTScTbTt2rxqLtVwcxYW8/F/dE5Gp6uVz2bUPVJTMT7HKxdgHfElFKJ+fJkAxoUFOITuRkgAIggOoOyuzq4nuTIKZcr7uA7Glj1lypJFXbLpf69qKXz7a/oW1EzqVMy/nDxw+PHx5SQfPmImTkHA3L0N1VJQCzR2dfvffucm21btdrR7aemEUMiThhclAVcAOXWtfn5+bapO042ukBuLnV2kpp7jCV6cOHjx8/0NPHDz/87vfLsuRp+ru/+7vf/+EP7s7My7JMU8k5q/q6r9u2iai7U9h6EaIhGY2ijTB30JAGp5QwIXPqDRwQplJKLkelFTgaEaayLOclMy3SW1ZHrhQAjFSxGl53edWtiZIBExTmzIREUYYdpsiIBuqOKIyegMiJ3K2ZQa91gqOTBkAUTkMGTOAMDu4C0ADM0QnNgSIatqaiYoDiKE6I1Myjy3J7Z3cFqZTZY1MBSMQAyRK5GQJhodOJw3qTOU3n5XQ6n0+P8zKVxFxSYg673m7aS6NHuWFUwzLRPE+ZGQeIARO0am2vtoM2BazeSEAIEhohALm771UckDgBJwF6u+7r+iX2glJS1PJEy777/Wh4gUbPdq4iW7WX1/rTLxcAfJqn0/n03XdPtW5vb2/Xtf5lf75c2rq1p1POIJaSZ3Om1hS31naRKq5OROm0lJSmp8f58WG+XJ8elz/87ulv//aHVuXll68//sf2+suXLz9/uVwv1VXdQGlyfEwoBXeDxFgyl8SNdPjze5dKmcV44R17YWbrtl+u67Zt2iQjMvOCFs7mVRusl+Z6kdr2pZyWZmoIovJ62V7XulVRoJS9ZJ4yp5yWJU0poSOKbG9vUqXMmwFf1+rIIKKt4ER5muZlCZcAoDS08LGpW7SSsS6L6RtVrIujL3eUTNwzMYQ8lbmkWYXXN9l8X18v15dLTsUd85TPj1MGtiq7al1rSnsumQtTwtGIODE7GCbIDg5KiJC5AFFOBcFyqgKGCEiQMjODo6jv6tVAkB3ZDUS8gvpeL9f1+XJ9VskgDEa6zNjrSLsfunUe/1duJL1TWEZA1bZvq9Z2CeZZlJinbUMmVW2mYibuzty9QThXBbvur9cdGZkTAIhoa1prJeLH18vDw8vj43mZCmc003Xd9q2uu+x7a7XWWvda3X1rlYjNfZpmY0rz/LicTvMEpugWICarONO21Xm+zst5Pj3Mp4cyL0h8P82C6/JRpHTkJo5V1RNSv1HhABDOLeau4q2ZNmn1er0i8YxzCsIVQMQcjBwYUaJCG8YxDIA9kXQc3+9eyO/+YAfGLiH6sX8ZufKvvrrmoBtDInDhvBROnDKHqWiecpjdAQDn0YMx6ivMzFTFREMr2eq+13WVpgCYU5mnlXJSFBFp696uom/2h8c7cAkAoTwFSgAQkWiP5vmOv1U0JVOg7ulDaDPjp3P5cJoe55ysytuVAKTKel1dfb1eGPzjeZoePiCz1CuhPszptMyn8+nh8fz0eDqViay3dFMzc0HCE5/naT4vp6eHh5Lw0/effvjuu09PH5ZlZg5rpXZd93XdVduvF40DIQHnMi8nVav7rtKsVUS1/c3lwlQTJuaCqfRgzoPQirKrQIKb1HWtb227+qVq0wbo7YL1c7bNt1Xs7cv+k1CZzwug7dfXennRy2eyukxc5pnyE4GbSGt7bSsiMCX3BIDkEJxBXwpu4X3wLdvn5qoiUmtNvFkucSwfZTYQxSXNdxB0hWBp+swAHZWPZs7Mp9MppfLh6UMp0+WyTmbz6fT04UPOGQACjhBRrfXt7e16vSJSSinnXo0/Er+mITI3jzcW7zSYGgcIL1W5608EAOZe1Zo5MZ/m05m5IAcQFhNDJ6Aq/rY2wK3WhmAJKTMwCBoCCgIVBKcQFZuCGWCGhIAIYaegBBCV61E7WvXw4kuOWSGbuIuaV/QdcSN0IEZIbAmrUK0kAh5VluScFFgURbXau0mWzg+P7hBdSqdMiKa6gzlhmvJ8Wh5TKQ6AzKlM4R5YckqZOVorheUrYqhHx81EV6u1gmt0XwrfcQNwE5G2X99enj9b2wrjxDQlOGU+LdOUsydwR1WLegGnxGV2ztetqtSn87LkVMRzFdz2WwFcpC1URZQpVEKGCInYAS9XnWdJXGgqf/z9p0z+44/89cvL61uYvKwfPyzfPS1L4XI+lXkRMVibNXOJCmQzNEuAJQGRirS667ahSVmKZf7Ly9v/+vd/+a//+R+2dX14Oj2c5/O8lDR9Ok9ceCPAieeSS8mtQaZGLmCCzgCOpmhCcQaPzxKVul9fXj//8uXy9Vm2OjM/neaMcK66qm227eu+XZ+vzwWXpSwPX7+8KOLbZV2rNHNknJmmNJ8elu+eTj98WKZEbWvbdXt7e2vynHIBJBHjlLGdtU5eUjkt5EZh+cLJo89hFDeFF3QIlYGgd1yMSUpIRAbyTVwfM4zT48OHD08/vH390q5r29ql7m3bp5RKmk7T9//md3+YTuXr65fX15fL9aWKhCcvMpRpenh4WE7LMi+5lLC1TkxAriZIlFNh1Fa8sXDOubT5NCdySGBoTkAl5YclpQI5GTiYmAuScULGhKmQp2VaSs6IKNLGmYDmGH3p7S77igCEkBAxkScydHMZNthUtV7Xt+t6fX5+2VqlxJGbCA8cprARZ0AijpQ2EyZ132t1h6nkqZRpyszkELU+3XNExVpr0qSpAnhiBiIzKFP58vy0rr8vmRk/EDr2nt+mKq6ec3n68KmU+fHx46dP3/+7v/3T6XQ+1stdHdLACnf3b4idjgf8fxn7sy05kiRLEKSFmWXTxQwGwLeIjMisrp55mP//jj5npjpPdlVmREa4OzZbdBMR3ohoHljVAETV9BmBAW6AuQEqKrwQX7pL+3FVt0FL5jT0zNtxKKhgtixLivF4iWUjUxicGBJb5zUwOfJExW6Z4F8FbfD1Pzdp0fXFNKzhCsc1tr2agVgzLddv81Ou8ET7O+0rCvMNngTf/Bv/iwtvuhIEdZ6N0LD1DzvfedJrAiIgMJIqAKioFr36PKk2j/VWxYjUWnIua4qXuJ4uaU1ShYC9733XhU1AR1rqcl6PH86/3L/N6RWJMTRBrc5aa0QJpEplQlKmK19IQAtqImNQFlUEcagb3/18v3m/nzYdg+S107HzAJhSEwfJ3WbcTuPbd+9C16W01pqauW/X9X1wbFUK1FpNqmlRzVVKLqkLvaPwZn8/DgM6e3O3/+XHH/e7bXBOak0pvRxOn768PB+OOUcAfWUrqWnMEnNpgQsu9N2gfl3KGnNaQXONM9oSggXi4BE9SwsrbcdZIBWTVEqZl/PL0+Hz49OH4/NTOpytKPXj2MHdMI9OkuLhVP762yWK7e82w+QcVdKI5dQ76vp9oL2ISzOvRfp1vZey2+77fkPEr5QrM2tU76u5EOA37U5ARCZ2zjliQjJVrdISDu1m/2QKFZQRADwzoQeVKmBijXBppZTmoG+GzJ6ZRex8Pn95euyH/v0PP/QPb7bbLSKeTqecs6qmFA+Hw7IsfT+M49j3fct/eB3ojT1eay25pJS+54hbSjmlHHOaxulVO6JmS4pzjLlMjtz9Zr/tRgA0tArFUJFtSRlfzllLrS578EioVcpaajUGx9yRZ8OikkWqiCIGDE6JFYgUQYmp857YIXGpMkuJLXMJnZmrQnFNkharZ7KLo8VTNWJEZ9CRYp+FrzJLY2MLXBFFIVfM9TsvD9cPk6ppVSLovWdnCAMCIHAfxs1mF7oeiJAYG+5CzMyOkbkhW3BVEr+6SrWInoqqJFVqzQrmGtoIrU27LPP5dHgpcRmC64OTwNiHJn03cIAgho1aBcS+H6bNtpTiyX54e9/1YV7z4bzwy3EI/msZ0/LpTJ0jNMhJVVoZD0ss/pKQyTtwPux3W1BziDk9Xebly1Na1qXk7X43TH2XSz0fZ2ZcLmuOycRqrWJqTZhPUGvNMeVlrWt25FKqH54Of/v0+OnxhVSHzYQcCnpAR81WQQpqQbTOM29YKh3PWcSkgsi1WWoiJl9hWDUrpSxLPF0ul/PF1hVVcBiGQJ2rG6lL0bnIJUtKtdQa15zmpZKLYtlIichbEEAgH7phnDa7Xec4uViLpvyyzCs7at793rkMRaQz72vNpAZVtB/I+Su3K3ggUpMGGRkj8HWaW6uzFajtgtej/T8c1xGxNSJ3dRuTwxJPNV0gJRQK5H54eHf3cHc874+n4+F4nJcl5qYfFc02H9c059ktvuu6rvMhOM/siBjJgVVRBAaP7Ii4c900bpjMhY6cYx/CMEz7O2bH3jcKRoNqZYtkI6pH5e00jl3nmcFEtKpUMCNCQv6fdzxCcITcihjPWlBKRTNm12xnUHKNs+RMneMWToIq1SpUq7m2eYBX5p33QQ1STiKSmVfmBmteHRYa1blKKVJr0Vt+a4YWx23Oe5UydH559zD2vWvnPrvtpwbOhWGkvh/7YdNP25/eP3Rd9939fPuwrtLJV8bLP16vMMm3FyMGx8asJlI0AhoGuUpLgJGRGNixI0TNWsVU26phrPA9+6iRd69JDV87knAVTl3t6sVa4Nw/nPjx/6ZA+f/ramCxGYB575Sv9RW59iQa2CIqalIaeV1qXWsRUTJUlVqqSK1Vaskl5RxTntd4XpfjnJYoVdEouC4MQ7/rXe8AIC45rbGk8g0lzlCFrDqgnsA5MEcdcudo0/HgqXMkjJ2D3hld9bdqIM7qwOHtpv/5zXbbO1CZeyZC730D2YJzd7tNF8Ivv/wwjmPKcY3rvM65FgQik5pWNTJVAnAekbhqMVAk6PvA/AYIusHvt5v9fs+Il/N5Webj6fzXD18+fPz8cjzVWm4+/tBeVhFNVZARyQjgJqcqJUeoEcrsWDr2ffDBe3BcDVXhmpNrFnNe58vx8PL8/Onp6ePT44fT4SkdLww47ffIXSBzWGNK6+m8vLysqVAebeunEYcetpNOQzeNAj6vti5J5ssxLUeWFfODbt8Mw96FiV2PyFdLScRm801Gjr5bAb6qdK5kbaUbKPIN3xZ91202G++opphrLrWaCiggUi5lWZZ5XlMupkDMtWpRPc+nYRic910XpnFExGVZcs7OuZyjiHzN/b4ZTcHtyFFrWxWuX215CKpSSqq1qQFijHE7bWqtt+diuUgupUo1UyZg15x6gJENVK2gRCsLWx57GoInMEm6pFXSqiAG4IFBsRZRIh76ru+n0LQ8FQyAgBx2HlpWmEhKl+OaC7gO2Wp1JcF8PNX1ROXg9NTThSATmAAXGhC86S0JMvQw7WzYYhgBuQUgfLsQOR96E61WEYGd73ruO+eIVJHJsXPIxM5jM2/Cm8Fiw9yvfWe8tZSvCSKiqlrVSq0xrovVyoiMgGhac06X8+l0PBxqijb1jAMEBqKqUKR5r1PTUaEaIo/j9P79+/fv3t5th3/6+b33/OX59NvHz0i/bYb+tYgxM1BhsLH3phbXknKVWgwhRgKytVTv0FsdPP/hD+/udr1p+e1DPhzO82Ve5/luP7y7vxu67tPHz0xweHyaTxcEzrFUUWu8SQMpNS8pnWM8raHS+fny6dPT0+HIzHe77Y8/vB82u2OxOatmPS/l5bwEKTWPwY/3+800WBFgPh8O67JK42yXmmstr6ZqjT5QRdaUz+tSLksx3YXQc9h43HZub5yqnbNdsszFlrSWmAS4oBf2GDwSm4iJgkHzMwT26MBcJ0ZF1ECZ0DOiVkmiksT5UrLlWmIcx43zwQyYPfY9Oi4toLqdR1p6Ct3Ilmg3Pancknjgdb8TkfmyplSGYRMYQbfr/Pzy+KGuKxgGF96+effP//In9lylXObL6Xw5ns7neZ7X5XA8fvn8+fR0iimrgffBB++868d+moYw9OTIBR+Cd96zuZ50O905tq7feMddj+PG7pSIyHcDoiOEIYwPd+82oTIOICxJxmGzm4ax846g1KIlQ4UwTA4p52aj/trjB4fgGZUIvIO+A6k1JzLtAwbvHfUPm7APlHIOwXvvfQhitORaRNGwMSba2Z3QPJmqmCZRIWRGhxWD77fb7TCMIYSS88vLYZHVO9ZmlapqplW1gJAClKQ5lpRKTsaMAHIzjVIDZOeQVK0nh767u9+z99/s2niFKbTBHt9daqagrZT4KpjBq4kNtu9vemzVqtXUlJ3v+s1me3/35mHaBUPHjJ2rbKvWS025Zrmi62gt2vs7410EBFNsJKxrhsKrkX9Tf9stY9W+qZabNPZVnYRXZUArCb6S6L/e9jeFy630MVMVBDA08ME3gbPIzSpVRFRATUVKSqpCRLXUeVlFrXMdGuSUckoppbSu67zEy5oua5pjOq81V1Ng8hK0Rq0xcc8cPCJP47Td7Vp7Ea4MOfFQO+SNp6En7x1YHxyNgxv70Peete7HYKWYmZiwmamg5Q773cBvdsPdNDDhuh1VxTtmBARukb3jOPzw7mG72YiUyzI/PT+dTqcYcykxV0NDZjcO/diPLlCqfej6cTNO40SIPvhhGPo+eBeWef34+2+fPn36+OXxt8fD379cTqeTSmX6qkw2g2pQFBgNVExSXmerK8gsdSWJznLv3GYc+nGD3aDoQcDACEVNkmpcTp8+f/j86dPz05fL6Tmns5fie5z68PBuuN9v7rckZa1rdFQfdi5mY587rJPjH/bjn35+2G0GEVizHeIqaT6fYjzycfkix4f89kd589P2/mcXOmLHxAhCKETN7I06/61/z7VVePXdVsOWctoAw8ZhYO47v9vvfnj3znteLud1XWJKXkQUfAgxpRjz8XheY1JRQmLnxDSmtR96dtwUf8z8/Pxca+37HgCGYQyha/b8bVS3XOmmulfNqsDM2CESlpIboVNEYkzn87wuMaV0t9+9FjGtedw8plJZL5FE1xY26xi1xjSfz+fL4bQU4Gm7d66zktcqUVZLx5LXmrKu1bJmBb7b3t3/af9udzfuvGGaL7UUYwFCR6giqaTlcvz88bc512l/77ossMQ5XR6f9fLSldMgx2Ank1VLqoqFe3M9uF6RqyD4QfcPuHuDuzccRlRE+Q6KcdbCAp13hM775ncMCKoiNZVSXe2HafR009Fflwq8sspeCYAt7KkJwbVoLVJLSSnG2UplRM/onQMTsBZVw9DW+NCFfmAfDLkamhICiqGpoYkB9H3vHfedv99Nb9/eMaEa5VSWy3q32zm+9vhNtZYipYD56ytsJZVZkWorrDETgUd9s+9+eLebhpBjUtOc08vL6fllXeNSU23NBMfw9OVpXRKB22ym5+cDeV9yQYNm8KgVSiwqy3K8XM6XXPJ2CNu77bS7szAc58vpUrBqTFVNPVvnadO73abrA8zrYFaZpJthTdVAzieptX579LSrMS4Ww7UYaD2suSdwBlNHHZNn9kw9y4B1sbqWuqo6gmxmYlCNksqK65mPjhipD51WXZIIOg5D33FwFBwyalMza62i0YrWlOuanA+E6H2QPJL3zf0eidk7UDXHRgjM6AwBUK/Zk69ijtcbUdWYcy6ViPthCK4bekdgZVmcYD9sREGqDUO3mabdZvv2Pl0uc5t5Ly8vu77/+99//ctffz2ez8yenWPHfd9ttmPXd+TId2EY+tB17L2ZqKgFKouAA1B23HVeDUwFpBg517npzc7XQVEZhVBhGsd3b3bjpldPJjY4cAHHwEVwRsPve/xM4AiBCZmVcTUtac0pamENYeh7VgkghNYRDN4NQw/sFtHSFCQiJtWqgSohOu9U6sRaRYDIENWg78PDfrfd7bquSyk7sNh3zI30r6JXe81UK5IbpvF+t+29v3rBN0FZ27lbm68l9RIH9r4bvkkF//76nzGW77/4KntuNQMjgmJtNQESMQMjOQ8h9H2/maa73X7k4JnVQwbpa+HEYtVANOdyq7LsWygIry7GX4uYVvVdhd1w+7Vp+f4nwO+K1tyMaP5v7+gbhXUbpibXZdHMrNHtWvVSayml5JJFhMxqKsv5IrU650R0XVdT1G5EoJxSXOM6z8tlXs6XNMcac4mlxqxVAQkcGZlmiTVjJD+E0I9DvxvHiW/rGCF4hql3D/vh3a7fDBR8Q8LRMfng+iF4kLvRQ3FVQI2uGcZqd6Pf9W7b+/3Ue+eGPpRaVURqqaWoqvdu6Lth6Mahq5Wklt65lSiJWCkgwM4PXdiM03575ztfrLguTLvNOPaeOBD3zmmpx5fzp8+f//0//sfvHz8dTufnSzqukFKxq5j868BRrSXHrLWWtaRLjRcrM+TFyqx1lZpYujL0LgiSGFUVRTAmJLCqlW0hWJ0r00CBJoTOoXm27di/fXu/GfvAus4zVERwY7cs67zmhbB4lN717+7Gu+24zAlqudRoa7J5EbEic7LEzkLnh+2uh41zg3NMLbGSCRHVXO+/JSx8wyFrR7VmW9sMiRBbXMA4DdM0DdPEjDEm5ELOHCkBEHPOtdQ6L8sas4khoWM2hCLV1RpjXOb5fD4zc4xRVZnZe99KmTZTGnuXWjbZTe+kqtfMJSYAjXFZlnme58tlbkVMzuXHH9+/FjENInWAbKJpPufTAlq1AKpzrLXmdV1iuqwVQ9+PnZFJWbUuqJEkcp7lMpfDokU1dN19P2389n4YfaCqmgFU1TWxVy6lxpiW5Xi+HOYi2HlnVUXTvNb1SOnkdelk6WUJ+cJxBbHCQVxQ31kjLdVJHAKjeVYw5I7wuzXZxZSYyHPnvffeAWiMueQU41pyUYF+GB/evtvs0HcdM91M/K44W6Oy1VqLllprlSKStcbm2mml1pxIDZnJ+75zjnx2yA24kzIO/TR009AxU1vI2+mwVjVVIlDV4D157oJD0OPhBUxzkuDdu4f7dw9vgnev0yXGvCzr0LN3jpG8CyAVTMysllxKbVgw4Oan92/ebHf//F98P/YiRWp6fHyZ58t8ic/H85fnxz5wyUmrghIS/edffu2n8XJZzGDoh2mYvAtWbcnzupxNchdos+2H/WihOxf4/fF0OC1jF4LnzaZ7e795d7+523RdMAK53wKB322256UcT4k/y8uzipTvF3RAZtf1rh/VhXWtz3PEWnKE/eA2Q9f70DkO7Ldeai+l1FX0bDhbjVpziVW41OUlzueXw5fpsQuDD4GRjLrN3XS/n6Y+eALTnNIaY7ZYUpGU17ymlWdH5JwPXeinyYWg1BLiAzpvqRqjMKJzEAI7Q0IGbM6aLdLyJikBA2tVLUoG1t6FTXc/TSNUoSJ96D5+fl5i2my63Xa8322Hodv24W7qnHfllx//9z//6V//7b8fHg+Hx+eKWrEAQjpflsOBHbHjRv5l550PzrvQu34M5VL6vnNdZ4qySlVFidAjD1Pnw7hlE81LdAT7adpvt2/u9q4b1uocKOy6kil4N0c5wo0FdJv8hOgY0JExJpW8zofnp+PxAFaDc9tpAoD1Mpvq2I+w2w3O95vgu2DMjEymUIRavDExNYfrmqvWKpZFUtVuHN+/udvs9s57KbLxHgCmafLeq0hzDiylpFqQ3bDZDtOmHzfkXFUVBWJspJ7rgfFGuK4q32aNtWXxax7SDZ3WV07M9Vd4/VzMAKxl8Tnn0CADAKDzvoeJg1fiDMjMnQ+bYbwbN545Wikmo9mrv0bJpUjVm9vst6/naxFzdXO9+rc2jeXNjc9uIqqv93KF17UaqELgK8PsalJ+U9Ch3Uzn27cjXK2NWqPIzAxNREusUhURRCRf0ZUkRZxBuqyHL48pZt8FBBKpjCxdQnC11HVZL4fjcr7Eeam5tK4YGjETovOhC8OAhCklrSKqzJ3fdl3/FVEmpL7zb+6mP//T+5/vxykAo0JtWJCx43HsOtJl46k6UQJoRA0kqG92w7b3ncPOU/COCDNTKXnJ8XI+l5I772TsaykppRTjuqy1FKhitZBBN/TTtNnv7ra7/ThsfQhKyD3125aGpZwLXJbT0+Hf//b7f//Pv/3bX/7j5XzxXSfUiQZq+sVvkEswhRrLfJjn03w5LOcDSNz0FFBAosTLejpe2EmxbQS/NQ69aPVkvicGCFC3Xfnp/fjmLgD83KZe8DwEN/bdNA0otpwv8/m8nd4/PLxcTo+Hw+en509xOWnNNScTI0WoKjGl05rPq8ZEiN4c22r1VPNB6wlsx24MnWcyz+gdITKY7ztPX1ElMzGral4bIwzh6kOL0PKPuev6zWbq+0FUcq7zuq5rVGtAARtgFWmUmKZ0dMw+OCLsseuHfhiGa4QQQGv7dl3nnCNiMxPRZgnVfm1Vr6qUUhHN+67FybUD6bLOT8+PLy/H+bLEmGqR0+nUrDIBAJEC+YF8b4rL8fTypS7HXGZRMSbggL5T8hkY0E7nF0SqcdESwXJgc6hFS44LGY53m+3dtLmb/ORzzVJSLGutEQDVpKaaYlrWNMdzsVJB17I6yJAz5NRz7ke+d+PGaKjI0YELTSGRECuIEQfnwHMBkRJLmtV58J4Zvo0rdrdlS2utixTVkvNSciwlqyoCo3OppK4U9v6KQJvWK5VJW260aC1SRIpoVWlxj5XQCMw7ZjNuWLlD71zzzCFC0Nr70Hc+BE9gBYoYCKC2nltbfRGZG3lKY8zrnEyqAotg3/ffnmAaGbMKpKRMNo3ee7yca80ZQFSq1pxzTSUHx4dTHLu+H8eH9w9/Ws4AAqaPT8fLsl7W5Xg69oEcI5mBgKps/j/D5m73clmDd3/4w4//9NP7N2/vh82gaR2n/scfH9jj1Ptx2mDfqaghATE7DoGDw13vdmMYe2cgaCWw7Efeb7tc4bwvDuHxy4v/xlHxupw5F/rRDxvqRsn1UhVVqkAUWJW3HY+BeoaB0RGDs6ywM5tV56qrwKo1lhpLvszL6XBm34V+6Pth6DvPDtCz70NwiOq6wYVELrpYSik557QsUS0EL6VTqey8Ajnnh26AEJAIPENgDAEU1Dc3MW6pBqb2D3wFBa2qIAImqSCi67sxDC4gOSJBuCwx5RjXJS9xGrsu8DR0291m03dTPz4/Pg8h3KIyxcyqSQIhBGo5RA0iIuc7P4x9P3Sh9/3Q95vJCNd1rSrJxRRineo0TtMwEoCWSoHGzm8G33lEqFgrlsqWxbBmqlnV8rdqCzNrjJkueG8j1Jw20263Va0lJzArqibagspSTAuvfb8041ICh8hoBiJk4Mk7aOYS0HkWgyJalPsOp+323f3dZrcDRK227bvg/f5uH0JXaym5xBRzzkkqse+nje96YFfU1pytqjaDz+tPet2+9fsIhe8vs2u20T/IMeDmcNekogotV+ZmMtskD+w8Ebs+CGIpFREdU+f90PeOKMeKah4xIAXDYBiIMlIx0yti0na+a29HvylirvFMNxpN04/pazbF60xBCs575zKgqIrKzb7sVdv0zR2ZXZsCZtp8GaqI1ioCYMSUU55fLjkVdgwAVSSntK6xxgypptP88ukpp+S6nokRgYmzi8QeAHPM+ZLKnMtSmilz+0eJkZmd9xwcAmBGE9CqJtZO8F/lrwhEFILfbceHN9tdAE8KpdRac1V2PI3jpmdZt6OnlhvTd5337EA3U3j/Zn+3GbfT4J2varlKyk6lnlvQhnNM1AzZ13XNOTNy3w9mBsDTZrPd7Dbb3ThMwQ/sAjnmAM5V0gzrEo+n9cvLb79++td//9v/+O3Db89P2XB/78ljFatVVeRbn5ha8unp45ff/nY8Ps+XY1xODJKnzqHm9byeT+eXZzPY371s9l+67T50PWOdPN2NbnDXQOwN0c6FMA6+CyFwN4Rh7EIIzJxzRe2847u7N3HZn1/cZqyOltOxlIrOe0RStVJqKbmUDKZDYCAKqN5KgBqw+uuHMFTQagjMzjGiofffA5d6ZXA3JIYAnfOEVEENDRC9v6YxlypSFZDZd54JmYgZiBDQeWmKCkJ23nfBMzM56Lqw2WymaQohhBCGYVDVthq/HinanyBiA2ma6lFVEEG1OfNZraVBtTGu6zIvy7yusaS6rrPeBH0IwAhO1eVo8XD5+1/j86eSLlVrdd5t9uPbH91m7x1XkfV8rrXWdYGavFbKCZZFL2u9zEQORbBKjetyPpallMtaLovmDAhVcrwsMaaUyiVlqIUUS1wko9fqVLvgBvTBG5uXAkokEEqpmSCbrlKBvIYOfYiARbWUCrU4+0c2nOt8MNWc4xxzSmspsZQEoCGEvu+HcRqnCRCLlCBVhQicmKVSci21FJEqUqyZAYMiIaMSIbN3RJ7REaNqrZlvK4rzbcaaqXq+qhaxBYypiYGZIRIyEkHDeUota061plJWEwVkx96HvoWgXh8MkfMDcV8rIuDb+0FBPli+XApUbbl/BFaLzOf44dNJFfd3Yezcf/mXPzzsN/vN9Je//vaXXz8+H07LEpkgBHRoKGVez+f5fPfmfnd///7Ht3/+07/8b//yx3/6r7/s95s+RvFUmb48Pi7LXISAQ6/w49u3uyl1Dtmy5dhrHRgD4ZJTiqmm7NDd7YYQOlUcvPvw6WUzdc3g6DbMiNiFYejGjZ+2lmsp+aKSM1wEj0U3Pt11eR/4vnPBU8c6OpvQMkBUXBUvFU8VX3I95zrXuIJjPy+hH4ahpgiipWz3u+0wdmEawmjDppRUckrn8/np8Uspka4+0wqAJYsjp+NUQ8fMru/CdiCgNiFFwZynvnf0v4joayYBYJpyqTkujH3nt9PU7+82m2kz9QSSlnle5tPhd9DCaJuxf/fufre/G8bNukZEcI6rfmMwa3I1CYRbHwUAieYjec/ee9+Hru85eKDWUhRi1w3jNG72213vvdQsYxeHfnUMtajhHOU8l/OlrslKDVEoVlLSVw6Fqq4x1lqHaejHfjv0u83mzZuHy7qklNY1rsuyzkvs1pqSlLoU0ZdTWFc/BvbEhqhiWdmg8z4455kIkQiMQAHJh2Ha7Lebh7vduNk14RON/dB3d3f33vtcyrquy4zRUadG7MM4uK4j9llVwUSz6PWEAXbTC4Jhc6H65unYtTj59nptDX/FYr5+4Zrlc2XiNo+DUopURfLs0DlvpmBVb0rvFo2yLmvMkQhiSely0ZR6YvV0qa+0l9bPhCa71qt5o7XcAUVoVnxm1F5EtYZifEWVmGgaxmnYxOWSS6m1qN1SowG/uxNTrVJNpdYWClxqzaWUWkQFCUMI8bI+/v5xvay+Cy40rw7J53U9z+k4x+NlPlwkC/vKjpkIERdE70Pf9wDIho6d9x4BVWrb9AA5BGZulHhyviNmQOSW2/PN8cUMqloRraKI1PduEwDFmYooOuemaSylD1CW+53jLvi+HwbvnSMbAu83w9T3/dA59gJQqsaSnaNaUkqx78LQ96aaYmwNi34ch83mziyEbrPZ9V3P7AgdgmN23juHhVKS5WV9+vz0+4ff/vq3f//Pj//tt89flmLDOO2203ZXwZU1xRgbifX1uaS4/O0//u3f/tv/Oc/HkiODMsEcXI7rly+fD0/P59OpVPHdEPox9P0QwtbhQ+9+nsJD57fMU9eN08bvxrAfw6Zzo3dTR5uhhtaiRQU/9NPm7o0VfxzOHZ8IdtOgsZTN0HPostqac6zFHPZTP+16UKglecBdF/ZDvwt+ZPImmlLKkR06GlxHxMyo3z6Y65SRZtBrHGgcBud8kXL1YVMrpSauCshE42Y3Ebb0gCaJIWKpGlNSgYYeN6UCkV1XLe+dc8MwjOMYY/ztt1+fn19SyqoWQnDOEVEreZt1A16pbCq1NRtKzrnWQoTtH0U0lZrSktOqX10JDLVCjjbX8vJl/fvflg+/YVoFrHS9ewfT/qfBjerDIvpymZd5LsvF0solUcoUo17WfFzQBTpc4PNz7jsbh7SkuiaLFXIFlRTX8+ElxSiAxt53I5Jf06JEIXAI3dhNnrlYzhLFRAIWHaqTYioqhSsgB9cD9Rm4AqsiVeiKJvlOPuJajbBc5nVZUopmwo66LvTjZpo22+2277sWK1VKMVVkqiprzrlmbRlJKjeSADomz8wMTYjmmDyTijQeUfPfJHJE7H0w1SbOtqtvtydQbXZDjctSai01x5jyGtc1pTWm1USJeein7d7rt9IGw+ujgVo8SIoKNaeYUkKtbECIntkxlVK/PL7knM7r8O5++OF+eP/DW+/CNI0UOPxKL4+nNTaaW4Gyns9wOJz2Ty8//PQ+BKh/eAMsxYoQdFP/1r0NY//DTw8vx8NlLrn4zWLDOKxLqinm5bKessNmQt78lKSUEhh77/a70YcQi75/u9ttB/e1iGlmGsy+88MUxo2kAjGVUopBUlizLlVz0Ryq1ao9Tw46D0wwMHREo9Eo1Bd0oMHMi65SJCnUWmuaJUHJJa05lc1uM46j957RceCOXBHthxmJ+uCJUFRrKWnJxdBVNZ+JyI89kDlCJqxQLktUdsN+b74jAMavZiStJjNkQDagFicqaoDMfgFm8q73xL6DZqtQmVDF6HxZc1Hn56enZ1UNwVOL6SkitxaH3nohjckKBgUtE7XM3pYSFfqOmJpddF5SuqxlXjvvTcvaB8jpNPZMIKJLrEuUpUBWbzSqm8SN+r1/T4wxpgSbLvRdH/w0DtvdPpUSU13iejmd13nJMeU1xnWppQAAkiI15yTRWiVVMqvFZ2ZH1/w5QDBkPyh1PSP2XbeZhpKrmnnmvuumoWd2jputgYXiRdGI2+kP2ZtU1xxpqpo268pbz+VWifzPPJEriUSvPbOvzJNv3HKvadgAYI0EwEAtc0Da/g+q5OiaswLXXOU2pQGg3SVI1VI0ZqwyBIeBlCBVrSpNEqZmhGSIzaDzxm25cXivzSBQtaJaRKrU1xOZc/5u9/D+4WcyOs/HrEVUEKSBMa/68fYEa61QtOQkItZybnKOOaUUQS1wWI/z069flvPi+67r+37oASBe4nqZl8M5Xda8FKtqQuYMXPPtEJBCwM77YZxC169+jesa11lVm6LkaqrF7FwYuiBSUoxoBir2jeeNApSq81peDpfj5O+GjZ/6cew8EwB554ahE6mDp1Kq586Hvu9657i1QvrggnPsHDErkCiEkhnBpOaUvOfgnHdOVZvjyDCMvusdc9d10zg5diKi1VQUNWNaNV3K5Wl5+vj84bfff//9r3///S9fDo+nNVK36ft+nMh5KZZKSbmoyLeMy5LSl4+//v63/1FyNBPP6JgX5stl/vW3j18eHy/nSyrFqE1XNzp37/HHzscxLL17YH/X9XW71f2od51uQ7fxsA226dXRKlKBMYww3cv6DkH55fOwXu4Ju3GqxN45yLLWFOdZSxq6MI3jOGytyumQkHIHqbc4WOosUYlrTsfDgRx17qFnh1BQ3fdT5hvOn6p3/m5/N202QJBrXdbFwBqO4n3nvHON2d91IXjn2bFDclLrZV5rEWb2zndd5wIT2mtd0uSTu91uni+fP3+uVebLnEvtuuCcY2bvnfc+hK7rupZWrS2HWmrOpdaq3whdr2W75O8tfNQ0S56TpHo6ptO5XmbKWRGtsm2qrdXWUgvkXNbTvM5nWVdIK+XEKXMuEmsRBDSIuR7OC7OGUFKWIiQIVU0kr/PlcMgpGhGFwYEzhpKLAIAEVTTsiLTUVOqaSyqllGJVtDagFADRnAq6CoTE4JACoKo2wcrXImaZLynl08shpsjM4zQ8vHnY7nfjuOmHse8DEdVaVGrOKYoaaJEaJSuaY3bMXWDvnL9qVpxn4ivY3Az6rCkcVKGqghCAoH1dXQWUlICgVaqiFVHVtJQSl2Vd12VZYlpTSjGlZZlV1Dm/3Rr3Y/2mIFPVvMY4rxDwVFdZDrnGx/OcinY+DN4PnrvAG4A1xaeXx8MRnp7H08Ou/Hj38w93//Jf/vzLH354/2733//77t/+9S9///DlMK9rTJZmUksu55wuy+E8P83r8+n8kkr5l3/588O7N9vdZrvf/Chv45rmJZ7PaY51TXo6Lo8fH1++1Je8EnNRKApInjiXWpqHD5Prhn67n96+3e+2/VeSsl0TiJAdh96P21rUOFqpKlBEq+QiWVKKpa65HCPtOp56HDrqA/fODczBeAi08TZnuGSdsy1Vo9RcSj2ux8txPrycDsd+s+nGsR/HzTQNXRcccej2bx5MtfOsUuf5suhi2CzPk5WKAK6mQtqzjYGz6svzU1bYiYTtDkPvviXEISEyUiCnQGBMoLWaHC/L8Tx/7B7vtpv3b+5+fv/2/du3u7HvO+fIcloOz4+Hl+fD4cvHz59Lrf3QVdGcS5RsotgGlkizOLoSH9Ca1SlWVdGccvUZivRDPww9MKciqSw1ZjS1mgnt49+dI9Naa5WUSxEQ17vpfnr4OeyAyVX4OmNUdV3TPK/L1I/eDe3k5LtOdRDcqrx92/hAUlKOy5pylJxLjaWsJa95jSWmjAlUEdkAsrbeSDUzRXJFCtBmd2dm3nvHHhEdMztnAE1Y6H1gJjUTxaqQRcWgdXuaTzuqmMiViKvX4s5e8wW+OcG84jDXKgGuJWGDcqT5G97otGYGYoAArGYgSFWklJpjhpLBuxC6FgxERIqohMDsnd/f3Ye1S8s5pxVVnalzvnPBKc9F5rTmlgGs1TEDsCAqYNUmSmrIWzVVtWtgRlHNUkvJr8om77p3D7/8c/YP+/eH09Pz4ctpOeS0iFUkIDBFNWh5v5pzVqlpWVSFvTdCAKu1Xk7nNCdKEA/L82/P67w6733X9UNHRFak5FJzy5Rh9IxM5Jzvena+ndNzFQ7h4eEdOz6fT8fDoUpWEXbOXY/j7JybNpvddiMlf/nw0apprfqNMtHUYtGX0/rXXz8FTZv+h/ttN223+2lkJMfsPZnqbhpUgdExe3cFs43QmJGweXeRIbvWySIMnmutfBOSmuo4DsjcDYMPnSPnHQfvyawWFM0qWfKlHJ/T4+f5w4en33//7fcPv78cfl/Ts6Lfv/HDZtzuOHSx1MtSYs7NsvZbv6tay+X0cjo+OoJm8pOJyIV5TZdU5yRzkZzFSA0ErKyIyuY8Dp1zDgUosZ992AxunHDc8LhzYXI8EDjMIIJsPFz8eOo2iFjKagT9MG42Wz/dC+jl8Hg5v+T54pm2+2naTbvNpuT4uUrMKyXW2bt87/NGhdO5fP71V/Ruv+l2fQdawfFrd+wrNHkL3O668MO7dz/8/PNmOxWR58NzTImYvXM+dMwOiULwwzgMQ9/8XRBpXdPT0/Myr41MNo69dw5QENG5Rs51fd9vNhsiuru7O51Oy7zElGKKEEFEEJHZhRDGcXDOY5MZadWWZWJGRGp6PR2omCqC4HdtcZUaYz6fdXFxEWYcN9ApIrPvlbv5vC76FJFmkUtMpWQqlSuQIgEDIQQScEIoSnlOKC/I3gCwKWGhBZMTu9A4PoqQcoooS5VsdikRl8WdZgPIZc1lzTLXWmqFKiBiAMgOrzFjoR+GzcZ3d0y9Z2/qVL5NTnNNncjOBehCCJvNdrvbb3e7rhtcCNcEhxaTXRrFXcQEGJzj4F3wrvPcEA5Pt0xRM1WVWmutVXLOKaUVAbph6AMQNCfPq7GQGQgYKBqYSE0ppZJyKSmldV7WdV3WJcaYUkopxxTNzLuqQMD89HIoX2Vj1swUCzDUmi8plfWS1iIqTtQV8ewcAwMi5pJrLmnJNRUrBQE22812Gv78x586gg5xmoa/fXp+eqbllGuMtZRLLfMiMV5SXuclL6t8+XL8459++uGHd3f3+2kapmnqh34cc64iRpfzshvoaXL77cDObe+2/TR4FUNYUyYkH4ICrbkuMddam+nz64NBRERGZxyCH0apYuQ1VxGQKlpSqTwXLJZTqWe1g+qkOFXYCt6BbTromIKj3nSHEJkWp3PFucBSdak611xmmVXmeQYf/DBud7txHIYuBO+Y0LNDZgQg5zj4MBqIOXKkIFKL1hKXshJMXVFLKWWxdZnVeSYH+v2Jn5jIEQZUMyJTFikl5zVGijHlYgDDMI7TtHPduNluN4OUSOwMqAjdlfpHxPvLmxjTZb4cXg7ruthNNyKlqlQDcI6v6XsN720TWjQbkgEjAXPJtYgYmlXRmkwFTG5OLFKrADL0Y6ccHn70npVQv/O9sFJlTel8mXsC3ozBB3IUAB3y1XAN0MCk1hRjzrmWXErM8ZLjmmIsKeWYVSoqgGotpeQS46q1GiD5LnSD7zq48u8NARXAbqkoNzP2Nt5JxHKqqZQsNeY0p5hKqWoAzeQGr4VMC0EWVa3/YKjacI7bH97UZV9zi1AbnRZuHDUDMVE0QCm1Nps3K7UiqBkQIbMhZZFY8lqLIYopEBBR8G4zDkEYvK/sCAOxAlrVsqZFRQQNQKuBAhnSFUX5h4ZXk4V/528DzDz2mzd7HbtxO26nYXw6DM/Pny/rKeckVs0MmW/Iu+aULudzLTWEQM4pQl3S8jJfns96qfG4XF4uNZfqRZJq0qvqQ9Wk9XkZDBqo4kLnfGiPKMUoVQzAeT9tdi0OuaTUuDVSlQhBjZl3u63Wenp5KaU03/OvsmQAEVhjfXpJnwO8vJ/W93fsu2naeiZHyGQGNvQBDBAYkfmK8gqYXbtniHazv2CiLnjnNqaKcE2aALPeFJlC6Jz3nriFFWnJdV1lvpTlGA+P65ffj7///vz3D58/Pn14OX/JcuqGOm263R1PU/BBDFPMa7rmZ8D3Xj2qtsZ1XdYueCI0USRm0KJgyEauGtZXaauogvlqQ7Wj1IGRDRGTEssFy9nKCPXsholcD+xASA1I0Bv4aN6QDMF1Xbe/xzuhTAJaDgdNS0fk+2Hox2katlNXe6hpWhNBwM5XttnyoYLGS1pOj0Z8fNxqnJf5vL9/8//4f00+dLf68houDoAhhGma9nf7tw9v9nc7UQvBLcsiqojoQ4eIUtU1KhiiN2NVA0MRNPXO9cPQ9/0w9sSkUgGgYXWI1Mxkr9vxdn86nYuU9n5Ci4sTSSmbAXMya+kWxa4EYnLeXb3N2pxpQRz0TWdMtdQY83qx1CHYtAXyV6qMC9INM5DmuiJGs4RkzhMRek8aqCoaqCqKAoI4NiJEYiNkQvbmAhCaKBAxgpS1iGTVVXW1PIskUbFqgAhOzUpNpcYqq9QqAqJYBRDZd84xE0FniC70tUDNkKPWImn5Frx002Y7qE3T1szaG+f7Xg1jSpgzorVocjCtUhts5Rz10xD6rmm6mK62hWqqVYuo1lpzbvz3NS4xLjkn7/3btw+83TAGIja8OeQZmkExtaIxr6fz6TKfl3VJDRrLJecUY1zXKKUCABOp2fl8fn553u82McbXR9PQc1UrhiiuqAetWtK8zosIgnR9mPZbYupCh4o115fn0zrP5/P5eD794cc3P73Z//mf/vD27v5Pf/r83/7H3//yt18//RYOL8/xsqScas31KLHAy0v621+//PDDv/7xn378859/+ef/7c9/+OPP7354s9ltffDbsWfH+11/vx/mP75bLlkNfOcb6jhux3G30QqD70rVl8/Hv/7t899//az1vpSbBO7mDsmG7IPveikK6MCLCmgVkSAlWA6xxCTlZNUlC0V60l1nP2R8N8LbgSbWQesItkWrgVLAaG41d6lwLHCqdtE8X/KiIOS+PD37vuu7MA3Ddho3Q9eFZhyIbhjCZsNIAQir5hijlNnKkmZdOyTnvEMGVU0xErqU0jfsK4SWU20O0RQRiJhYkRlARVaDp/Osv/52Ps/H4/mXH9//0x9+2m83v/zhn3/84ecY1/N8eTm+HF4Oh+PLl8+ff//998PLIcdUSiqlUcqOZtb3g2PWKpJKkWQKRAiGJWUpNa4rEBuStaOiqmht4jUwATUE9OyBnLrQ98P2/s1wf5+qA+JvU3gMKRV5PhyhJDObBnPuOtmQgJrpKxg4DENwvUMYTQXq3ZUeIVJzkVKrtADovM7r8XBMMarZtJne//j+7ft35FyMsYqgIXGjl1ap9RqrawCAhE4UUpF5XY/n43m5rCkC0bTbDv3ofcfkFEAQVQ1NtBatXxVwr3KkVht842R+o8BcCwh8FSkBoqkWUbNqZiUXICLvSkKBKxUO2AnCnOJhnp3vO+YYF5XSEfZj34/vc03ndY2iyC1NCBHrsp5LFqzNAbkasus6IA8ABgjERoTaAuLQSL0C81cyrNlVsj52wxD8/d3+bruHIuu8xOMhSfJ9YG6e6KiqKaXT8ZSW2IB570Ke1/S4LF/O63HJS7qasQIRECohMSAiKhMYt0oKWsPymhYJhtkKYkrp8fFxs9lut9u3b9+9ffcOzMBkmZenp+e4xlyylBQck+du6Ni7cbMZhvEbKS8ikpqtSc5LOpyX0xxLi19jYgKkZtgEAEDK0NJe7NYBlGZAS4ACzc/L1K72CITNcJkQmg6MkJkcmAdlFcspXy755fny+Pny5cP5y2+nT799+fTp44eXp0s5Q5fGPe3fT/s7nkZ0TrTmXFOR1sRQNbKbd9htjNWKuSISEpIZEDCCJwddP/Z9XNa1SoslQCFEsAqaCBaihWmDWJnEUXEYO0CPREhI7tpcUAMUhSqllGpiapgpluN6/vCo3ptzFjhsx+ndQ9jttO+R+gSMXb/78YctCLHDsCFXUj4sZV1jYZpztc+//8d/nC//17/995/+8Kf3v/xpt7+7VfwoSqpG7Dbb7W6/68eeW0qP6eCcMa+5kdOsiqzzamZ56BbXQoK0GmTRWEo/Tm/f3u3v7n0XzKDkJFUA4RpaoYbEIjYM491+f7mciJGdM0NtJ8jrYqA5l9zycaTVNOy886q5VAW7MtvxGp7+KulR05jTUorzZMPGvQskasiGLMSGXIiFOBMKUhMfsBmbkEozbyMDj9dBiIDOkIGQEYiVWBBRDUvBNFhcc1wuKV1yvdQaVbIUtdKykgygGRqjGd20PM3GpEXOOIJA5EwgxzyfsGQ0WE/v9BW8AHAudIjYDUMTT7efLb+stXyY0HtPCEQEhKZXcklo1QtclQ2tgQ5mNeV1vqyXeZ7ndV3WuKS05pymaRr6ru9C533DdeE660xqE47mNS7ny+myzOu6lHIth0rOOaWYE4oF7wGglrIsy9Pz80/v36bX4LSrZPRVtYAGV489qaIlm1UBpc6HEJhc8FBVS8mn81okGYiJdMRv99M4De/fvjmvmdA2nXt8nA7PL6fzeV2XlOs8p2V5fPxy+vDblw+/ffzw26cPH5/+6c8ff/7jD+9/eH93f7ffb7fb0Qe/2Y3TdpJiolBVS61rqhQ68kNaS0n1+bT8+un5L3/7/OuvHwdv5ZsH89WQg9kFH/oOmKxIU0uJ+FpCze0j15KtFqjFW72IZZVcTKq9CTSC9AgO0RMMjBUxAW2Upopj1kMSL0JVFsWcYl1c8q4Mo5V9SV0DUMg11Y/vfRBkIC1qKcMqKknqJRI7KWoGuGZS9thJFvsOjGnzhwGbAgahqezVjGoFnXOVwzmnmnKJqaRSH+53Y+c8E4KhC+O0NSAfvPPe+3B/d4xLFKlEtK7L46dP8zKbNrqmytU8ui3UJqpQK+SWsOrguvVZa8G0kuNG5mB2A4/7aXvfTzvfT2lRsW/0CUTovRgeznNe5lzqbpqGYRxC6ILz3jUZdYPzPTOiaxEmrRBAADOT2gQxpRaRKuuydtM+xdXMxml4ePcwbTcCMK9rLqVZU6paqbmJK0VERQEQgQ1IFOZleXp5Op2Pa4o+hLdacKfUiXJQNQFrRycw+c5ZDgCuYVxXWKP9V18/2je1o9y1qgEFqHo1f6u1GJohCpqarrWio2qaRI7L4vAIiB27HGdG1RCGEPreAYqeS1zXqHNlpi6QlLrOZZ698wZmOSugaMXQk/dGZAigeot0bA6L9H2+4zXAzjMRdQbeU1ejXA6Xl8eXVGO/GaRWBHKdL1Iup/Pl+bKcZzR05JxzZS3L8yUfY55jTQURW6Y6NbyFA3FLQgUARQACdC44H+jWAm5pNVIlrtE5v9vtps1ms9kQc0nLM7+cTud1iSaCAF0XuhDuH96I2P3Dw2a7Y+e+jjIzUBPVNebjaT5d1pRFDK31xsAQgdqG9NpMvz4yE7v671x3K4AWzoB0e/du2rJmNcilgNRaS16XfD4th5fzl8+nzx8Pn38/fPn96cunL4fjp3M5Qy+bLY3bbrNz44acE7BSNaWyxrSmtlrf0oNvl4jNaz6cVse5TTti9rGISs7FzNqe01C3BmEWgwhwBhqBBsLARExAaEhk6BS9UCd4LeZu1qumqmKgYKYaS7U5m0Lnw27nx2Hqxm7aZe/NeyBmj9MUmMEhirm1yhrP83xeZoGa6lo+nY6//fbx//g//t+n87rM8zdjzAyAHA/D8PDwcP/mjQ8h55xTlFrzuq5rXJZZqnjnaq3LZRGV4B2CSZVqIgYVoALs7x+293e+H7BkEYnLWnJR1SpSambivu8BMcV1XteYci7VqQGQqNYqOdWmJEhpjSlWaalVTW3vvHdS5HK5rDGVVl2afYuRieplWQ/zYkOo3vX9xpNDZkAWRDGoBoIohEbcYoUUrkoKBEBkBdI2AJuPtqICIKEhCUBtxisAWcLKdSZ3gXJRW0STSFVpaUtoBAAEitctgRENGQEdsffkPLvA1Dvu2LnWaslRVHOJ3xF7paFITIDcYqj1KoI3JGDEBnERogteGAoYIkguSRtI9Wrke2UkrHF5eno8PD4fjse4LlXKLTMr39/td7utASBiyySXJhmNKcZ1XdeYYy6p1CJa6w3QiSmVa3Y5ee9NZI3xeDg8PT4eXg6vBj7fdP0RCYwNzZqyMYSgrrl+wbLGUuoYQt856vqc8HwREV3W8vn5rKKfhtB7JLDOuz/+9P7dw/54+vHTp8+Pjy8vz8fj8Xw6X1LKWmWZ188fn9Ylffj0+G//9u9vf3jz448//PKHX37+5aeffnn/8O5+t9/0fe8COyRWw+KKVShUFS5rPhyWD59f/vr3x//426fffv34sHWvSIyZiYpIBVFG9Z6h8+ZQvZoaizrwvoZWx+SUIcUUk+RUai21pEtdYr3E+rbj+8DbwJPnAWyg2qOxoUfunZuI7hwtAS8Jz8VO1RarOQshVu9OOc+1ZkBg14/93V6vBeolAAEAAElEQVS209C5gIClYFYXDWqB5ZjVshQBA+e1H3nLovUr3G9Nlw8o0Fp5DIht6CN7AGw93aRyXGKu8nI8/eU//9Z57j0Fz11wwbP3HAJ3wY/j9E9/GmuuMeXAfHe3Lzl//P23X//+n3/9638+PX5pxDYlMkZ9FUpgy+OxJgFtKBESEhga4TUoGFXI87C/+3H75icXNqKu1FpLm7mthiEfemV3nteXOL+8HLbjeL/b77fjZgjD0Hdd8CG44Mm55qdkCGiISle2LIIxNQse1wECjlvd3T1IlZaYEbqAhLGUknOMMedSy9UbpjVLpZZSSzutITjHPqb4cng5XU7rOnd9YDYQqcOGOYgqIHEXjIjgVqu9DrJvLr0RgOHVxvcKxmj7rZo1d2kFrSqlZqlVVStoMlMpZV1QiiD6alCgZqmSx+BBskfQ5KULKl0u8XI6PB6en45nJd69fZhLOX35NK9xO22ZiXIU1RgXHMdhf++6roi+snPVrslE3wbamWmTSRJwzrLG+fOnj7/9+ttvf/vt6flTqXmY+mW7iXerC75IifM6P12W85xTrqU0ZhUCWTXPjgKKCBIgETsOofOhc961VDk0dYiEHLqB2LU+O5qZCjO32qJt5Mxu2myI+awFAERNDYjJezcMw3a3BUQg2r95e/fm3t2KGDRDUxQB0lLq6Twfjpd1TblK54mBXkVnN1MWwRY4D/Da/nttTd2yuA0bPkNXeUFjlFGtWFKdL/HwMj89nj9/vDx+SU9fLs/Ph8PT8/Hw8XR5rjaHqU53/s1b2t1h79WkptIQhXlNl2VdlpqLNhDo29Ky1vrycvr9w5fXXJqbFsvAoNTa5MEtwrAlxlezFeBU1TcCOVoFFSDNwEjBcWGMACbmmvsjckVQb8BAco0644YfsjFWgkpoziH2AboOPXlHo+fAxma5aMkil3R5yctsWimf45fHx9/+8++Pn7+8++mPIte2hTY9JFrXhf39/seffnr77h0SHY7Hw/PzcrmUnFtKq4oSoqnWfI2nUKu5iqAZsgJk1e1lLt7tjidVyzmtlznHLM26rlZi6kIgZhFZl/np6SnGFQHMoBUxJdWY0jqva1pyXkULgLUcSibnHJtZTDHnUnORUlSkoQbtXkqVl9Pl0/Mpb6c0jlPn++A9OSRWwAJQDQUBEVurCNGqKhqiXQ97bZUAADVCgNLGpKIaSMvrrCXlFOMa17SmvKSaitZqUI0UUNs0acdJAjAADwSOzIwYHCF7dh250bs+uC70PgR2zogFQRi/bVu6y+XcNF0cGliJ10wBBMLmtmC1VgRtCaA5JVPNMQKhmTGjD9537uraDFBrjTHOyzzPlxQjMbyuNY0ZuqxrIYoplZxLad2ipviLueYqVVTMVKrkknPKMScVIUQCKKXUnOfblVJSuYVaAVSRUgqjgoGBiNTaHG2ukQgoUlPMGRH7HjrfBcBGTMh6YdJaLsfj4Kn33HsXvAt910/bbpz6aXv/5vTy9PL8/PLy8nI6XdYllVyXJa4xPb0cP3162v3+6be3n3/79fPPP3/8wz/99OPP79//8HB/v5vGoRt69sHMqaIp14pr0udz+vR8+f3Ty8ePT4enl/Vyr68TBqARikCBEYJnBC9CehWeNJMudcVVz+QcMmkDAzmngrVozZqlLlnOndsH3Xa287YPMrrmqWIDgWcYGQtDZLoUOxa8CC5mhY1M1mp1TatoJc5SvfNEVLyBYUw51VpFsjZ7ZhNV7/xmE7yxGv2jH0nrRVyr3av7SEvXIGzEh2oGRVVinufVakZT7yw46nwrYmjo/Dj0Y98NXU9EKuo5ILlp0//yC3nnahFQe3p6qro6ZlWTK9Oo6VLsOg7thjx8kx7YFLyA3vnNtHs37d4SD0XZRO1bJAbQkKrBkmu8rGeVy3le5/VyGjaDm4Z+GId+GIZpDH3vvHfeEzORY6QGpyNAQ//b2ZOQELDvhlfbuWYpEJsBYYwp5VaallJrrU1Eef2NGhg59qWU+XKJy5LiKiUdPOW4+jAQBQP0Xb+9uwvDAIz0rUXUraME1kTAzQnmRuZ9/ey2LRG2APNmDSd69VcRUQFGBcgiKETMCpBrjSWt2TMpm6DpnFNc7HSANa1Pz0+fnr58+PypmO2fH6Lapw8fS1W8y13nJcckZVGj3c73AwVvdvOMualDRK+Spder+RgTQirpdDo+Pz+/PL+cDuc4x1KypFLXUtdMzomKFJFVuToStVTWNVoVH4Jr5BRmQCAmd/VRvF6KKM6BiikyueADEpdaWgsHCXwXmgJp2ozjeOVzEnPJfT8M4ziqgvNuu99vtru7+/tx3CDzuN1tt1+dx9vNNEC5Frlc1tPpcpmXmNLYMQDZbciqWMnZzJgYkfXqUU5ws/i7RsM08ws1aLC3aq1FcqpxtXXh9VIOh8vT4/Hx8/HTp8vjYz4+z6fzaVkOOT8Jzn6UfoebPY0jOme1QK2qWhSqmFyDvW7hPl/5VW3B12WN53lp7STkxu4VAGOi9tSa5McAgMCABCAZXNSgqhisZueKl2p7B9EwASbBmKF35lkdAZLdCGTgrIXWIAN4IANEVawVUpS4iImVBIzmkDxXMpQaU3mey6eX9OuX+LyYWnee8+cvj18eD8ta5DuaoolWAyFH7BgJc8mXeT4dTx8/frycTq2wllpNm6ORgao0cEVrEhEEJFaALDqc53PVcTNJ1ZJznJeS0jXjQgURHbfBCKWU+XzOuTSWfiPE1FJTSsuyNlcUM+Eb0tbCDRGh1KLXKazW1Elfi36LuZzXhOSq0VptKNqHwI4BSQEEUBFa97GVy6Jicu1LMjpCbsWBtONow2tVRVSkNI/rWwZHTjmlkksbKHJtHiFAoxuaAYBebQMJEYDAHKqr4hE7cANz5z0Hb54rYhWs39uou48fP4zj+O7d29B5JkREQzO7YreIIFJLSSXnWlLNuZTSjrPtDe/6cHe32+w2Aw8E1JSYTUE9DH3wjZaPANayIdYYS66qklLMpdTSGJUiKqpWVct1xW6upLmUWmoFMM+cq6S85JSWeV5jUrXrW9wejGopNeeMRkIgWqqWWnJzhbDGJaklpWRV0/kydG4ag6os54uIWEnrGRGM2Rxxi9Kbttuf/vDL24f79z+9+ekHWc6H0+l4PByen14+f37+8vjy/HJaYy5iYlCLLpf85dPz3/7914e39z+8f/uHP/zw/oeHN+/u7t++uX946KetQS8FarVUdE71eImH58PlcCxrhFLxlQl/ex+Q2BGaZyIoFYVUBdGs2WLLlZAEjMBEyXEqrmTSaEsF0RKjHFOZHG0833X8dqD94KbgO3YOkREc6OB1y7D3/Ab8Ym4xjMjFuZMaVHCgSZWVnWbM3tRyrafzZY4xt5MUQMNIp+1ud/+2mybqe3DO6NsTvzbOPDUIxtQMtFVTgGBGQEyOTFEFWJEJTQ21omnVS4xpXSQnsMoA3vvgfRdCH7ph6N++ufvjLz+/ffdDO/j+n//6r+XpqbEKpIqKwK00UNWrKxXTleXxyhY1A3TkBt/vhs1D399V81IRlNtcvm4uZrmWWCUDJSMspaYlzeuBMDjrez9O42a73e/vxs2mH4Z+GIZh6Lqebh4hjGgieiPc3ZwmW57QtXAvOa855pRyKSXf6pd6XSTlRtcTVZWaS5IqYOqJhZ1KeXl6fHz8nCoYsA/9/u7hZ8Q9O+77BoN9XZW/AWJaP1js6ud/87e7GdwSKWJVQNPGIsJrjFBWURc8E6kjZHI+ePQBOPjQd6HrQgDVFNd5nk+ny/HQGG/PL0+/f/jtHBfu+mJwXpJzHcxpGjrQvNb8nJOb3/S7O+r71tsSvfraNGT79h62+WKiKqLMUku6XE7nyynGaIZdGMlYclzzIrEikxp45wc/bLZbZI5xfX55isuMdk1sQyZHjejlrzZ014hjYGJgltaZZELA2toZoESuH/pxHMdh3Gy3+7u77XbnfSDGcTPdlZpz3uyS9/7du3f7+zd39w+IgMTs/TAM32TANTWWGGDJNp/j+XA5ny9xXWwTCBmgqZixlrosZxXtQ+9chxQAGEEJ4crjNDKrYgSgZM3QtMR1vczny8vz6fFLen6i40s5Hebj6XI8nk+n5XRez+dlTbPqyl3a3On2zu3uYZyqUVlXFQFDIgJyyIGJGZlJuRHCzL4VKDS3bkPsuuCdJ0IDqbVc89nt6mWIpgjYdP6KmAHULImeRb8UmAz3jA+O3hS9K7xzuCObUHuyHsEjOAJGREYlZCYNTIxAhMBUwNZcTifVkhEKmmKzEwQzkZrPsTzN5bdD+Y/P6ctsSbtcsRRZVsEw9uOWbplWZiaWi2WFuqbl4+dPL8fTsiyn4+nx6XmZ52sb4LrIXOsYUc0q2SSZCQAAqVmplZ8Ovz8fvA+3ad9wTYNWnTcGe0tNNyspa71+TaTVRVJyiWmtJWktgOYYGuWrwYfNbNoMGAnNAEot+TX1HQGQnQIuMedaHV6Cc30Xeu+d822/fvVeRAIwaywcU0WiLvTeuWagIK++kaptx5ZSar4qpkVqqVKaciJnuVKnbinzDSP5StK7GjsxACECE/WBPXrw3iMHLo7MLCvkq2/CrYi5nA9oUvYbk77Ztlzhx2vEm+UcL+fzuswprlKKXL1h2qpRx3Ho+zCMPZoxoBo6pD6EcRzBVKSyu+5d7CjnLCogJlJzbQV8G87XH7XUtK4116pVar0i6SoIAFWzVIkxpZhjqrUaGOJ3J/5mwYKGlUylVCtSq5g03o1JlZJLzlqkqNZIkr2ZLvMiUkoCANVaAa35tZSi025n7JndNE7jOHnGcejv97uHN/cPb948PD59+Pj4fDgva865pjXHJR5QX8LT0+Pzl49fvnz89Pbt3f3bu4f3D+9/fL+7f9N122ruNJcvL5fPn56ePn06Pn5ZjgdN6VunS7jJXxGMiBjI4JW7YAjmkNGMqYF9duUzOYJC5qCQSoJUUGpe1S5ZDtVO1S7i7xR2gjvD0ePorCf0gETWsfWIE1ACSsiJcFLrlHaES1XB2tXEEZHIarHlYjFZCySGa/nvbNMF74egnip9b0iiCqpI+A3O3b6L7Go+gsgE12Z+c/UWQzEwUMhFL0uK86WkCLUSoWMeuq4lfb083APg3W5bSgVo4CcjIZohA11LfTRu3FVtfBwwUBO7yagMkF3oxl2/u++GrfODgEP5R2N8MysiuUoWS6KSCqW41OJMHGno3DgO4/lyPF3GaTOM4zROm+1mGjfDOHRd57xn5sZMuB5ATK9iAxERKbmmnHLOMTdee6mllFwb5lHr9ZPraexmQa7NWx2QiGrVdbmknJZq7Lrd3htiC4RQ/brr327n1fbiWsS0wgCbLkjERK8OtwC55iWuVSoTidTW6gJTIudCAHYVTA2g1ib/jrm85HghcyZ5ns/PL4fn58PLS4oLIFwupy8fPhwuZ0GshqlC8H09LmMfCGohmwkG9jVnUVHEBg7dYrabRuT7+aKNUWAiVU2JqR+GzWaTEyekpVYpuVhBQgXkjrnjoR+7ceyHodQCpjWttdbWjXbOudCF0DnnX31RwYyIjJ2qAV4Pn6BXmNE7v9lu7+7u9nf77WY7jlPfD957JPDBOeed96VU7/3d/n5/d7/Z7oi5sQpDCN9m9NxKSq4VljmfTsvxeLrMszxsiDoQeHXNaagY2KspznXTIQAwsVprSaWskleJscR1jXG+XE6X0+np6eXDh/j4BY5P5XRalmVe47zmJaY1pmxYfK/9hNMdT3fWjYa+ZGliUzBgduS98yBVpSkElRq9+juHawCRlvQCQDd7ZgAFaDmgaP8wKA0aIRmgGKxmF4Cz6kVxAZsBTwZ3BXcIW7ARbQDoADyZA0RG8kDOKCO5KxrDZh7TjC90OgtIRRVscp3mz1NWsVOBOVI2zIBzlWrB99M0cH/39uGHP7xKkwBMQarmmOPhdBQFAJ7Py/kyny9zSslu7bv2bNCAAEU1mxaw1yLGzKSIIfDpgkgA0M7Y13bPV2jUmpQaEaHeDjuNjqZVrdZSUk5SMqggmm/ejoBI5MURYWPB2NVi9jtMqcEMFAIgCZg0eMSsinRBPCPdipiGZ9itiFE1ppZL7ZkQAOSmAxWVnHPJWVOtpUgqWhvxxUiFVV0rWBEU0NqN47XGuGHz0NpVDOrAnIE3cc3wCs3IlKyqJdP8vSW8MylSU82xpNVqQWrNU6CrX5kty+Xp6el8OuQUr20dJroxG0KDmBTIkA3RMDg3jROqecc5pyotUKmWojllvBZfjSlxNQYXs1xLSjGtMc2LVOFbldfWDhEtkrQUyUlq0a/F/jdgn7WAGIUqRKpar1ZdbdkT0VpVpC3VqJqq1JxVS6lJJC+StRYthRBCICTMVXOJ7Mwkm9SHu7vOcfBunHa77e4Pf/zDGpfPj48ffv/y979//vTp6fB8WNeooGAVF1dUjvPp77/7oe92u827d2/u7u+m7Z58nwoc5/jx6fj50/Pp05d8PlnN/5A50BqcSGR4U+42Qh8hGhETAdhXIIqQSBnVkzowNnFIxVl2peZa6yoyZzsoTNV2ud5neDvwmx7vOhoJWCupEJQA5sgNDOboDuEN05J5zrCaxjyXEitAENmW7EErWyWrpoYOPU+92/REHiKWBco3xCszE1AFJcDr6auNX0QCAwXRtnPeMjtbzjoDNmWcuc6FMQgQeS1FRcR0joVyIbCU0nyZO+9U67pcDsdjLUWhZQY2IseVmNrCthGB6Er+kms7FgigG6ft27fbN2+57xGIDNmM0P6hiBHVIpKkrrmWNdq6uJo9SGAsylVlTenl5eSDD6Ebp2m33W53u2mapmkax7Hv+67rvPfU6HItEKVp8K4t9VxKbXbYDY27VThVm12eiJiWWktOMeecswkEDg3QEdVSRc1CCNvd/c9/+OPD+x/3+73zIamWUl+fi/2vLmiLLwGgoojUrFVKSSmm4+X4+PxkYG/ePCDCy8uLqm42m2GcOmZgLqqplJSWlJUqHFOMl3NeLzWndLmcD4f1MqeYmGm725QcTy/n+XIWpKyQsqjCC3x2iM5Z2A7h3dvhHVyZFAa3ICiT28e3Z7FblsDVDGMch4c39/MvP7Oj48vLCUFqqUQt5UEBmtUpMYah40DjOpUc55JVs6g0p4++60PXs/OI2ByFr1wAYsDSnhsANsYQInahu7u7/+HHH968uZ+mybHnZshMhGS77e7NmwcwIHZ9P2y3u64fWrvezK5Ovl8fzBW312oxl9NpeXo5Hc+Xqu+ICVUNQQyZXNf1oNU7YkZAIzQiYFAU1ZryuuT5fDk+Xw7P8+PT+Xg8X+bLsiwpLafz/PSYXp7tcsjzZc1lLjoLRIGoCP0Ytm/8/s7t9tgPBVBSzkVKKVKLmREycWEnuep8WWNWRd94XfrargUws1xrrSlmEpXW5VdVudko3v7PFohhdKMhtx3OruIaNUJFy2Cr2go0I16ARoAeIIA5MTQDMqlgZEYVicgTe/AJ3Gr+nEKHvgPy0IipQMAOXQDowzBN7x624Zfdfe4ez5alG/q99z0R/fN//X/2w3RbkcHASq2XZc5Vvzy+1GLrklIuRURa6DlcyfLtuEaGBiaABaAYyBV6ABAFAEnSQkuvKM9tR2v0YWyj1Iwa2xReRQjWZoNeG+RmoAxm4Oi6YFLrfjK71ggyVSDy3yRyI6Lvun6agvMOQaVeGS6EyiSMdnsWjUxlAEqoTEZghLV1AhWgtZMArkwtZvIOrbmDcOsSimqRWmv14lXVEBWwNdxe6Xdq10MCtMwq0A6tY+qCI4eGUKVaLRUxqcVSc6nfpo44hia/zDVHdL6BT0gIN2KDqJTS1tYMZo2xyK55G1DXd9TCONa1xiS15pLWuKaUrhi4XH95LZ6u6Px1aWr+myWXklMqKWuRW3Xe3MXFDKTWWouUbFc7wv/1paZVK4CgqVm9eq+/Bl7YlaV9ZSuKFq2mWbWK1Fqy5qw5E6hVALSiViUBZKvZarm8eZim7TT00+B3m/F+v9nd7/f3+zdvHvZ39/dvPn38/ePT0+PxfGoN8qpS5ng5C1Trgvv0+++bzThut853ArSmerjE03Gdj6nErFq/q5T/f1xt428C7EbnJ7zlACMGAhVUNkMTIvDF2NXsFLNCXRUu4k6JTqpnqVE1KmajLVswDQAOhAkdEaES6Yi4cZDaeUjsrGXVWsyqWSHNBBWwGlRBQ0Tves+jQyOtUIt9l8h9c1fAb26kRfA1Ht71pIAAhqSo0nwviAjBGRh7dIGcshGCA6wqVbSICKjUWtdlITBQUS1ay5UzoYrXTIJrHYNXkypoZTrg68EVkYC7rtts/WZC59TMtFpVk2JWvn00Ik1BoLlKzEVS4hQd1I4pVM6lMpGoErEPfhiG0+k8HQ7jOL7WMcMwdF1LpUZrALFUaVSXm/7o1l6XRje4ncCuN/b1TCZVajVDZQBEMVAAZNeFMGz292/fv3v/fnd/50MwQC0iIt8arFxRDW1zx9oAM5OatZac1jXlWHOO63I5n14OL18evwAC5BRCKOuCiOqomtUYq0LMZV3jPC9lSZA1L8v5dIjzpeSY5vlyPOWcEakf+hiTST0f5nleBEmQEcjUYoomFVEGqff3D0yOmJFItX5NrWm2GK+v+5sxplKrmqkE53e73Y8//OgcD30/DN2573OMYFBrWdeoqjmlhWZw3N5LZu76HgFSTk0R6Ngze2YH14LYAAwa768h7K9jCQABvff7/f7h4eHNm/thGFrHidrVIFO86j+ZvffBOXeznTNm/s5gRQVEANhMS66XOT0+n56Ol1wFCakV3k2O0XWgzGhmLSJSay2WY13WvFzW8+lyfDk/P56ens5fns7H02mOc8qxyrLG9XDIp1NdTznFKJqAs+sq99J3PG3d9t7GTWVvalWyVL1mzMhVekJUiGupFteYKxiDiH47ugDAVGutZlJKuZXJCtbkcq8SKsBbwldrgEF7hwmaE4cqZDAzqyoZcFU7A41gPWAAYDAGQzBDq9UUAQyQyHsKwfWCIZtbpO9smjB0xI6cI++IvQt957cTbe/H7r7De587O2iWbhzvujARu7t3P7gQbmsWsnPkWAFKlVolxTJf1phyyytDJkS82sgYvKZJKJAAFMVXgVBbl6D1sfHqRG3XZDNs2/PVQ63Jzw1NWnO8qlW9utrWxlJ5BW8AQAAcIiL64PtuYMdViqkYFN913xiQYhO8eB8ck6lc/aEQlFAIryrFG6nfDAyvr1ABKpEitqdnSK2PZWSGjOyAlYNwvQ4GMaVaWcTbtZFktzfgChN9VeQoGBCYB/VogYAdYXDCTg1UQMiqYsN3vh1mzjtiBKtVSmI0BkZANMa2iRAF7/uhq3XU4BCx74Lzgbxj57xzvffeh5LLYV1qymldY1rXFHMpomKgSIhoCEZIzEyE1OJRpKZS1nVdcyrSiGHAAP0wkDY3nqwiUqoZiGiDbVu3164sIPwHsUWbxcRGqNJ8R+06Vm4lAFyZEK0rw9hkYUhI7MArqqGUWotBFVOVdK6LpCWu8+Pzw2Z7PwyTZ76/3/1Tev/+3Zu73f1u/+bHP/zyX15efv/19//863/+X//X/zie52Ec2XmtJa9xjcs5xnk+MSM7RiIgEoVcsFSq2omCWjX4ro6xW4lucLMeu6IVwEjXLRBVEciMwQBRCXwFQyOFSs5qqeQVnaJXlubcmE1TrVFLLHpOcIy0DzgRTExbhoEhIAQCNiMzlsoqHjQg9oQZSBArWDYrAAWsqtVqgg6cY2JnlrUa0HfGfTfWRWtTt+LmNYTBAAiu/vjWdDzMrUxXNARonElFZ+gb9sTesauiDrSCCkgRrVKrqYApAjVaFzZg2xTE7HqgudIXqgoiNkLV69ZCzllw6lgQ4GrkUqVkqfHV86aVHKVB5GZFrJRqKbGUTBAyd71nIlUjok6CiOScz+cTM3ddd3Xu7HvvfZsL2M4LTTV8JcZo47s0ZLHV3lcmzFfLXQQzwpYrR2ZEzqlqBVPkftxM2837H3/cv3k3TTvvnLXEVrmi0t8WMa/k3utKBWBS13leLpd1vqzrXGKa5/Px8Hw6Hk+XMwBYitvtdrfbBe91XQ+H4+myXC7LeV6XeVnnNc+xLklSURBVMdGc4nw6q1oYhlpNBKzW02GNa25ckv1u453TUlOKMV7InHd98L1zAYmtga9mZiat6pPXt+K1HBOR6wGIAMdhev+eps1wf7c/nd6cD8e4Rqn1fDx/+PjhcjqdYzyfTvT02BrozLzdbHWc5vkiIsyuidgRmciIrIVSNqoJERsYEr2yqtDQO7fdbHa73TRtur5vdBcmJr7CMa3fh9iKGsYbjNqKGPwGiVEpVnPDrEV1ieXL8+XxcIlZDJAICKxxkJB6kAKSckklrut8WY+H+fn5/OXL5eVpPrwsx8N6PMXTJZ3nGMtcbVWISnMsl9MpzpdU5lJrQdIQ2I9+sx+2ez9ueBiEKaZURdvibHptY6lCKdkMEWtVKDlXITBu1tHf6kbMrKlHtdaK1I70bfPC1rDAK8wATVgOV4oEoTZ6FSCWWlSkaJGqBfUC6A0doGuiWDRqdByEZk1NYA6pFz8abpFGwN5wcFgFJ+PJse/ddvCbzbC/33b7PWzezLzVPC7AQ6ckoe963wVDR+4rQoZIoe+HadN3U3A9kotrFoVUpenLHThkvhVmxtc9B9vUIgU1uLbZ9dbRai8ZVPGqXW7GxtC2J0WBioigpCIlV5GiVkT1qtIzuTKpWx+4gqp4H7q+d95P28mHUGtVKaIlfJOUDoBiIIbaqCfMCNDyztTAgNDac7lhSreeHzaBD15bZwCAzXOIEAzUGRqgafMIaBsXmrLqlQxprw9fm8uMfd0frqsQgpIpo5EpImjTrBArOBNWRbSA4PGbPr/jG9/S2tsI0MjGN48IQ4DOe+k7dYwAzQwDCAhaNkCVvGqtNaWSYo6xZUNKC7wlbFJTRARUbLmkZmK1lBJTWuKacq4qYACIRI4DM0JVbbVnLbV17q/PFugGVhIh43cZPVa1VilECHg9uZndYIBvEPOvvShERAIkNLoS+4nMXnOYVUFU8kVqrnJZ1mE6d/3o2c3LPjhFVBMZpgE9T3f7n50Pw+D77nA4mEFc0uHlmC6Sc045NsY3XLUxBsAAndFgziugWQH7NtuiIYaKiIZktzF0rcRerR7aBuzIwBmiQ2te7eyxohN0CM6UDYqxWhWrcnUsVJCiSW0RO2ScmHYeHlh33kaAkRCUHEIz/mSgDgAROkAlFMBi0IoYQasGik6dg9YqFWX42lJtFyHe1uzrbbQBKE0TY9eO6OvCBm2y4SvrlZQcsAIDAKEJGBERKIFWa1aqNwQXb+XfN9ZJCqZNzqeI1yaTGRISIJiBmuK1T9R8pABZpWgpUpJK+oc2P1zt11gQi6GIYakFJBfMNTOSmjFRqSWkxM4RIiA651ojqaW4vS7zTOhbJkrj1V8rvFsvDa6vsb0xjfuGgI4IgmNGZhYgJG9V0Qfv/Dj0d/f7N2/fTds9kTO0qtq2on9AL248E/2K4ZuWnE4vz6eX52W+xHUuKa3z5Xw6zPNcUlTVl7TKuvSMWLp1Tafj6cvj8+F4mud1XVKNJS8pz6sWYc9IDGC11risaiCGIppiUanrOdUixuY9jv04Dr3kEp1DMN+Pw7jp+xGJW7iEVmm+NapWGz3oH7Ns7bUMRAAmHobBe+5Ct9lM6/4+paRVnh6fYk4558vxGOPa6pVhHPa73cPDAyI+P/O6Lqp0C1toIDcykRkiE1QgYjNp/XQDQETnfNcPm812u90Nwxi60IoYItce662IwUZZabXzaxFD9N06BrVCzUjcqFMxy9Nhfnq+rGtWqaSFpEgqNVVLUuNa1nOcj8t8uhwPl5en05en05fPl+en9XRaL+e0rGlJOeZUYDVajBfgOck8L2tKWcWIIHSh34T9fb9/GPb33PWCkGouWXNOagZ6xSzN4Oa0BkRaDa9cK1UVBRH7B6T8uvkxkmv0gVspcxP1f/8UbzA9IAADAKIgvjK7W5oGNTX2tYhBNnBErXnAig7MCJxaQyrNkQu+C9z3PIxus/H7TbjbdrvtuNltadilsFl0UPMCBGjIDogUUQC+xWCJKPRjP277buz84FzwPueqCoTeVRHnHDLdctMNAbitOVfhX5vFBmDNOrl9rmbSTmNNhQ43cK+9TW3OmohVsVK1msmVDYbWTrRohHY7nBsCtQ8CInTsiETQKvyDaedXOyi4LoRXulJbiJtpyq1wgWtt2T43A7kdgICQr1JpbC8Irh366z1YW4JbudlKlquJgH2l3dwOtLexYQC3xDS98nrQ0JmxGRIgwXfZfI6I8VoEE5Fjdlcbulqvu34tjrD3QRAbea6WLJpFSstUAjWTxi9rTJ3mMsrESLehC2pVNFdt39WCY6UWMSOwgGzYPDxEqQJAkZtAqZZr+YZ47cwBgaChI/JEDr+6EJrUUkomIHhlL9+Eq/AtHn2bOfbKN1M0MRUxEKA2ELgJ74HMAFOMpcgyX0IIY985uzz1Jun88TePLkAYxu3+7cP7X/78X//8L3+K8+nzh4//+Ze/n55flssc4yIqoXetv64mtRQzRu6Me2UHJmBZJb/O6PaaTdSIXk/JbXL/Y0Y0AjE3DINNvTCRKpOYFGNUxsCEvlQhzABJCVBZDaOhgC4Zuwod4pbt4uUh8B5gS7z1PnyliyiCkaEDUDGEBh8CGQhgQFJyRqyIRcyL9cQBGG9lDBF554J3TARXcd2Vnd4CPtqotTYRr1bpbXUyUKmNqo+MLpCiApkW0+v5DQGBAaUCOSKGZqNkcA3LaFxCurmFmwJhC0Zv80nBrFqjFusaab7wdOn6DRGBVrSqWl4TSdtfycTsPPtAPqD35pySU0ORWlRSvM5Kz5RLCA1yYQYi51xKiYhUVVSuCrxcACA4F7zznlu+jmNyRC3i1vnAzjvn2LF3vmXDO2Yi11MwBDEo1dZiStIRB+/v7na73Sb0I/x/2fuTJmu6JD0Me9z9nIh7b2a+wzfU2F1oNNAAmmoCIikzLWQ0aaMFtZHxB9D0J/Rb9Bu00VqSmaYFaYLRZCbRjAAIAs0Guqtr+KreMfPeGxHnuLsWfk5E3Mz3K6AhLkSzisrKL987xHDGx90ffxwUPHz38EEHlaSPsU6+6EIi8W+9ni/vvvv1u+9+PV/PdZ5MSy2LLpN4GcgXLdfr2eYpw3MeLpf506fP3333m8fHp0gEZCPM6vNiVV2FmBGSwRCCLfNSlko8uZktNQZ1IjmNx/vjqdDEADEPb189vH073t0DVCNBsVR3qrGTmQbNbtXuI2qZpQqHWm3K+kbg4/F0GA+vHt6Ev+Th4c2ylFrKNF3q5Wle5iH7Q3718OrVj370Y2YC7P17XK9LpF8SkbMTRzqSxFzT0gTEAg5LSofT8f7+/u7h1d3d/WEcU85EzNII9wFiWvSSgu0QIzwIiN7MvL6QQStpYc9MDOZF/cPny4f3T9fHq10mlommp+XDh6f3n87vz+f3Hy8ffnP5/O76+OHy9Pny+DQ9nqenp+V8XqbrvCyT6nWu01TPxc+OJ5NHo3PFFCU4OKVxPD68Gt9+/erbH9y//mq8e3Di8zyR1oATZtowWxTgjFCGk0MiG4ZBFiJMZdFaVs8lEaWUAE75OByO3OIt6lY1ahVb9VVsMVa2vk+a+VLbnuNd3TX2t9gnIwKXyTN4AAZgZAxMB5KR+cB0cD9WfeXp6+Ph9evTw+vx7lW+f8ivXh/evD2d7g4k49WHpxkfp/pplqcJU0WFoVZGKWTXqiuHjDkdxvvj6dU4nIY8ppQkj5B0ur+/zJNWjWwdb/Z/GM/hTg1ZjMZ2QihPIgg/MLi6VrO6Sux4q86kprXUUkutlaGSHQx3BiK5UpqH2xO1hRMZSClRkgq7LnMFRNjM5yb2v8795lEzQzUnbsnOLU5JPXbBfc9pkZ+Gr0JxrWGtW8O7S4IxnNujECisRXIDvFO/iazDo63f4W6QVhGxCVKEGUdwJpNu6t6Gk1QrEy3zfGUGXESsapSn9pa3HlpbUW42IvHFbKp1KSWEQhq1TZgliQSw6LekamZei2lUK68R0iu1Vpg1EwUIC8tghSgi08uyVK2q2iif1AB8iylRYpZb3QtXq7UWATX5d6yQzsJQULWwSHulXriBLPJGV66ZETjMAJATkcFNi9a6zNci4vOQfPkV26cP76qbejI5Prz5+g/+cP7DP/zRH/7BV2/fvHr7+uHh/s4d45B/8YtfPF0uEU8VTlVLDXENJIJEwBN+U/0VHYNTOGNe0GWak6ZPL6JQ7UlJjQ0Oi8Qa9whWCHHwTEqtDIcbV4vsUJwVArqom2Ixv5CfyS+CQ2IxYQO7RdSZQq4DZN4kzcNGJbA5qbouxWYVgtitIAncLfBIH1rcupOahk8wbEkCVrYnU4fDpc8oCtOs8X7bqhfOTCVhhhOpo5hElTJv8hFBK3aFm5PBjZRi6nibiw6DVV2u03y51vspp4wolkDPEuD68BMhyUgD0uApG4tXav4CVzMrTNVKzTmnkPFtR4CYUssSDrrrtdYqPTCUEyeRlCgHb2Ich+GQ8pCHIec8rkcvlJu4uUUnr0wyHuRwOJ5evRpOB7AUs8jNc6dqvqjf0OGaHeb9B8FXUK3LNF2fHp8+fSzTBR4pnVW1lKUuoRN6uVqpwmmalvPT5fHTx+t1shoV0MRKeEuU1J1XcSCCkxZ11DB2SEEB6ovOl6sAZZnVLQ3D4e7udH8/Ho8w6FKtVKtVnaqjmlertdZ664kJvABIGI2mhoZswhcS1BRJeZima9UyL5O5XS+XnPPDw8P9/f04joDnnEUSc+2UQ4MaDCLOzMMwCFFOOcqqFDMiGsbh7Vdvv/72m9evX59O9ylJ4hAG4vAq7uALXoIY92dZloZaUQtpARJCbeU8fXz38bc//8VvD6XKI53ff/ru14/fvXv67dP53cfL+99Onz/Ol8fper7OZZnLvJRlXuZluZb66HYuep3tXP0COjufKc3OlQjjmIZB7k/Dqzfj6zf5/kEOByQxC7HNBnp7fkQLp6o2q5yIHMzs3CguHtko65OIyN39w+H05v712/F4FGJHDKOpLFNd5uqm+DLH0Qxea8N5Bjizh7UPgSdCJhqBA/NIPDKOhCPRgTh+EvnAGBivEn1zHF7dHU53h8MpH45DPhzkcGdpWCp9mv39Rd9f68eazpVnZQXZolbK1fTzpWhfbYhYZMz5lPMh5UFEODmnfLw73Zel+QXd3dGUMqxXcAsp+4AwpvEG4MTeSFGh/mI9IziSNN0CxMxlXgLFxw7VdlZywLVxjBCRKEJk3eSc8jCkGMnCbk6u4ISbYRZZFM2o9OYZ6qUEuM0phGTcGruwPboJ7wJ3l02PMBn5+lLPTwkcYoHBuq7M3seD7pYJlZrV7xOyvmShs8eu9nwvBFKZr7rMdZmmy9PTYyaiulTV6lZNawSfI62zRWQAgjFXIms3ybSjr4XPKvas6J0QH22KLUyUxyQC4RBcjO2tWtVqyg2pYZ7nWmpQjXrXNidbUFtDnXjvl/AGu2op7kytKFpwDYJOoFq1BkRjXb1WDoWaQ41U3RUw96gHbAC5KdiFumOw1unqXnW6zpJEYapeiuTh9K/+27/4yc/+4B/82Z/86Z/+7X/wd/7oj//kj//OP/iTf/5P//k//sf/5V/8xV9N06KKlPOyzOfrparFbGSbU10I3odPGw0ButilBVW7O4aIEGurr60PcgacXUgzOxspoAJK3lkfQsRKlIRVjVVNVdTcmIOK4jCqOhs+u95beeVyGtNAlEgSKLkls1h3DdQibRGNY1HwrFqWpV6nkg/OI2+l4GFmpSzX6coE8lAW7YVbiIXZodwREZMgom5wJxhRYjZiJTIKY8yBRNTRfEwOAaEyHKQgYWn+mH41dTeCutfQWRIJgj/CBxOzj0Mh9TqVaR6GYxrGxJzNcxr3NXrCCjUQSSLJlAbKA+UBWmDqTqEDZ+ruarXUlEWEmCWl3DHNkDIRMXFg/Hmaaq1uyuSJSYQSIaWUwhOTh/jqYRyPp+PpFCzh093dMR9GSrk4O5EMQzocD4cj8lghWg2h+wK4UzGfa5Q96tZY53N4M+jgcCZwknEYE/N8vjx9/sikHtOqlmVellJKUVM8fn4yQ18hVITIEcQbVQtI3uJUwWgk6tYVhfROq/5gfj0//eKvNWchIB+G09vX90OLzJLDSrWipqqAukcSddhTe0hGIGESTkmYq5qYuTJRAEeWJEmE5c3bt8Af5ywgP92dzk9nZn7z6vXpdLpcLssyX0MKBcQSsKPlRam6CB8Oh7vTiZ3m6+X9+/emquKn0+lHP/nxT//gD95+9dXpdEeIJarrmKOHCdBJIO2VTrbYfwSAO6mhVivFwjepXq/z51//9i//6T97+PDnb/2dnL97/NUvp/cfy+NSnyY9X3WaqBRWS46isOrTQp8n/7DYu1Ieq05VJ6OZpQjXLC6ZSIbxcHf/cLy/P51Ow+FU3S/zlZa5ms/zUsrScIyitbdZLRWgYRhZciPFR8YfiJkhLDuvUhqGt9/88Ed/+Ldfv3kzHg/uXstyvTxNl8frmSZX03Kb+7tuh6DY8RuthNiZQQLPJCNsJBwZd0x3wkfmI9ORcAKNjgGcgm2QKI9yd8gPx3w8CIsr/FKtXG0WhZTLpJ9mej/nT0qfHRfnxVnBxWzW+vGKH35aSt2ZTJRAmTgRs4OY+XDMAI6NoRAiDhSVIQCnrluwshksAkNWQdb0s8MYrejOFLhbDCBzr6qlllmXUouGvwAc7glrIjyxxjUvNhNHQYrYJN3ZgVIq5TwcjqvNH5nYzEIksQZ7M/ljgEaVmP2OtI5NQw+uc+L1b28RpBYuiCqkfXB7K5HUJgFF4hk6QQib5B0ibgnv30QLL/WAlCmHZX+DZNLl6VPkRBGTMDtcayQ2dyTZKHTeSZAiEnYJRKgXxwzk2Mw+dza4Qc2q1qXUUmY1A5OEj7W7SAzmFgnoNeIGiKtFnsaLgGmPlTI5w13ddD8FmmoCuTsFvPe+Uloo0lTTUDcJBdA1d6l1IrUYhbPDASbY2qu9Q6DVrrXMUwW5kZtpXRyQj+/enZ8+VVUW+frrt3/8t3/8J3//1Zs3DzLwD3707a9//f7Tp6dlqZ8fn86Xi2ohA7QQlOqCDqh3w6VJoFG4aqML1oCp9Rhb1+d0Imd2EVqBC5EQwcgFqHBW0AAGVJ3NKZoqFDiwOD26F/VL9fOil6RH0MCUOewez2ZCSEkIbIATzDyQSIFdrFZiz1ekA9JRyyYJb2YhzZyFm1JMoyhHPJX6BHI4McI8IYU16lYb8A6L3F8GhNhRw/4gAgWyZTOAlUDOYIHD1YkdLgQjUjcmK27OQnB3dWgD/eQgFph7rVGtnVI2YuJEfJM50ujixCSJ0kBpCH8MZIYmeIWFKItRscj8ixQbabUb415ZWDz5OI6qWpZFVWuZXasQhMFkkQTIkkkSS0opDcNwPB6Ox9Pd/f3rh4daXx3rHQ+jSi7IYOSckbOSLG14m3cQs5jP1ZZnRJLwbzcVgvY3yDkxiEop8zQRqXutVqOwfHBRtHrLK1UQQVJfA8jUvJpFFrStKU/dD+HthbZsOQDzWn2eprqwww+MuyTD6TDeHYdxMMB6RtUt6PLni4MruRExIsmYKVYsjlwCaYCEhe/vTl9//fVPf/LTMQ+X6wXA6XBk5nmep2U2JyeixsramsndmPnu7u7rr746joeyzOPxeD6f1ezt11//7Gc/+9GPf3x/f5+HAdtDNwC3d7Q0vhNzX088FEFuVjkPx6Y6M4RhhlrnD/X9X5Rff1Sld+Plu+u7X9fPT5jMJ6PFInBYlK5ET8afK31c7P3s72d7V/1cUYwruJI4Zx7GPI5J8uFwHE93+XCglKprna+0TIgiBuq186eDeheot9lEkiQlELs5ooMiyeamWDIk5fs3X7355oevX79KQ1at0/VStZZlZkl0y9J4dngfLeg9wUAGjcAd8Ylxx3hgvhc6MR2IjoSDywAkkABgJyERykIkpPBF1WaYOhUMhQ1yWfRx4c+KM/KZvQo8sj7drdQ6my7P9qAgunQASiRN6adBrwi1c49EhB8j9pnGtPSWQ+YUnsL2i42algYIbtRGoKt51QAxNcj+TIzIHA5pir51xX7PsVp29bm4Hkt14ii1uj4Jh/Nhd6v9OYkaX6B5CPokiPfiwzGIQ2+rA5jtQ44VEmH35Z5QHoGVfs1GlFzDUj221MhTvi0VYbp+QSMqfX7/27Y1ek/bCcJrj9W0pCCQsIhISizSqboUpzcrDe50+b6Yuwq3KExFkMNhPJ3us6SqpZQ5tDDCa+lqze0R2Qdm3ZPp22N1OyZMLiVVXUrdeCSdOmLCJLCirZxrhCsa4yz0u8zUe1ZaK4znzgwTohTJpuEYcJAQB60IYKFAmKEEBsCJTYgkhNH1/PjuV//in/hlmqdSPj7+6X/0Z3/3j//4b//wB1//9X/wZ//sv/mX/92f/+XP/+qX9Auap4X9Uq7qtYIUVgmVdrUt0DM6zb2VtutvBhZu7xPB1Z3ALSHeQ/yOxJkgxAmuzsIuwpKJnCVJqV7VURMC3JsT3G0BlKiAilNRH4omgoQHBMqkWeRIOXGKO2kEo2qz6qMuqjrIMOSDyGGaltVKdvfIL8iHMYmoFXdlJnAiTh4QzYOgEgOxKRX1rqfuewslJGKJgCsjqpAR2co6I5CaN1cP4hsgZxgRmxPXmC0EB7sBHHCQHMSJOIFIa12WRUkgqUeRt4ncY2iJUqacKGfKA6UMGSA1YltgITeHW1TQgJNLT6t0c08SSSs8jGNb4uATTBGhTWu8NrVKakwhZSki56c8jMPp8+n6+rXW5b4s+Xjnw2GWkZFQFGKACTOaPpbHGC/mU7XlJmgZ0RJrqivW4PwqDK7kCkd4U60W02JWI+zNJDlDnCoMqqRukQVpSl5hpUu5BCFmNcP6obubwEHG0/1dYp7mqyQZ7w6H+9Ph7pjHYTa4OwmFGCKTixsHS3uXmRguIC1FWIhJHLKWioqZYWpa5lrLUpbpCvevv/rq7niatcI9iajq+Xwmkcfzma5TkOmb9lZTZaRhHF+9evj2B9++ffOWiX/445/McyGm+1f3P/zRj9++fXs4jMwhY9g3gMY93XwwzfdPjbUVa1ZH97EFtA3Ewy41Z6+pTPRk068fr5el5k9H/XiaHt0mAqlwzXSGFNNHsw/q7yv9tuL94h8W/VxwtrwEkUDYmSQP4+F0OB0Pw5AlOfmyTLUuAK0CayxCwfSq6lGvWyhnqOrCBeCUB5HkIINSW1ItlNz3GxizHE73h7vXaTwSQ4uWakuJgHoMT7rZ7G6PLWIOJxgDiWhkOhDdMR4I94R7+BE+OAYndg7Lx1ttQrj7ZMZFadal6nKplRZK83Ao4DQbTxiuNFwZsxgYQ2YmhirIXrHekcp6f91UxOp4cKgqWXeVw+CIimnBBdv2pXCuI5AeO8SdAXIFiBAKRo031eEEMQBxg5BWBtegivCaz2VYQysrvo+vOgeK0ZBlMUcqtalybG0bcyiqaG19EPLU5NrHaseZ3t6NliAwkTWzntBJLB3EWLA/djB0A/VNH9X6OeMCwWNFk8OI/HJYo+4EZnMAClX0XOV+pHJ9AhC4LQQ6W2AtCYm08qIMJiYXomTa2tb7k3XQ1/JDu1QPCVmQKIko5TQO+e7uTpivF10mq0tZSoncsMhZikW1umo3DJ+NaW7BZIeba6llrnXx3arc9UiCvqGRlh5fDH0Pq00DD6QriKG13gcFjkX3bKF1NEBNb5LiM9afnZyZEMmDpqXM1+k3v6kgJILb68Mw/Mkfff312+PpyCnd3d+9ev3q7u4+p+HX/t37+f2sEwvDFai4jQ1HziiYgv4fo6LfyQpy+uTo7ChD1OaJpHYPjjqIQZG91Fd4qilK5LhGvMUA5QiykTlULTUejDkZwyCaEp3EBjE4U2iWVFO3WetTXeC4K/WwlDwvc9nE7gLtcisHnCh0vJgiJtxnXnfbtK03hm8sjnBva4N5EH0M1IhjTS2JuS0VZKHh0bpuc+UYEULGbQUxRM3f26ZYymAJyMXLkjjBYUTmN6i/dzyBGSKQhJSQMqfkNTlZCA/CGbGIRHVJdqIm91JrzTmH3h2AaJaUsoi4EhQtc8E8YIEFkZ+IiZZlnue0TLOWAtfrfE3HEx3v6uF+uCeMJ8oHYpfmY4QjetmLeTHcqveEVGhtoUlY45ObBQdkvDue9AFuwQmupmUptWpVgxGzECUGl7pcrp+X6VqrOitHCqVay5HvaiCbWedtv16XSB44H1IWqT6nQfKY0iHzkJqIs6MpPLoz3IXEjJ33KzIa5a2Su7S3eBcMgnsIIje9KwLuTnfHw6kiOM2oqsPhAOJpngE6ny9lKc24M2eRPORXr169ffv2q6++evv27TAMb95+ZYaUeDwe7h8eVu2fnnsXwGUPYqg/dZTj3IOYGwoZU8TlGUQMFVuGOo2Y5WJMSxqfMs0MGEtlzERPZh+KfbfU3072vtr7indKHyse1S/OlcRJhJmFKZHkcRgOh/F4dzjklIw2rY5qTZNidTvHStyEZd2D4AhYVQWIJLdMqzAJrUKr3zj7yCU7Z0MiODgyhca6jGXO/Ds9MVgbEaAoUkJIRAPzQejIOIIOjDEqDwACNOpeuGvJIMSJOTMlBnF1X8wLYOaTLoY6GSbUiWhKXLN40Wl6clNbpuVSLtd6/dGd1eX2jvbhLzNb2ynshfArM9bcmxXExELXDBYOX0GMEY+S7MyN2ILtTQeDpNVhREuF27DBjmy7uScjkwwKYiILCfMX1D7c/NP7nIxLkm2StPB9MvP63aC4UCNltGQhX4NLK/JZL9u3q3Wf4h6Kot1pX3hvmnhOhyy+ZnXt7ycJ1WgCghm1KR2t7G6Re0wGhyqrl+q6tK3g1qnbuRfCzODYOWNLBBEl4ZxSSokIQWycl0WX0jxaFCrQQW/y1ZHY9r9uwjBRYoab1qWUuSzXWnYZPWFYhqQGoWe6tFxlqxopd8FWBhRN5d7RbE5Dp2AzvMuJr62H3ohAwFVpvUkUlUwSJREncp2fPv7ln/8LLBe6XH/7i+/+wd/7219/+/qrt1+d7k5/+Ac//YOf/NUh5zpPH979Zq7zQQ7ERFqJtxTrRhSqi7kRs7XRTAj6SodWW7+HtenNYHFvlB6jVubcmSxy0iPm5EzJycAR8GcPH3KozxWHV2Przhaok7u4qD7ZkpMxmEIdWc3UFtfZTQzsROauuudeUFROOQwkTEzCUabNqnlRVTWiztwI9G2x71q1xdygBCdhgTA7qXfWGDEYbU3dzS8nNINBAIsms9WKYCFwA4DEYLTMfQIgSZnVfSkFZUEamKVGG9x6+1YPR4vUs7AkEkHrqXDqtrln5oRgy6BWLAunlHLOwzCklIgpgpmNWEYMUnJnONHqR6QurENdPQxwrXX+8OkD5YFOD/Lmm4dvWI6v8sGETcO3Flsw4EB1KNiwBZPdzLRaLbEkRGqFwhyucBnz62++vn91z83vzE6+1LrM5TpNrp7zMI6H4+F0vZ5/8df/+sOH387TXObKiyGZc+Vi4Y1ag+FbL/E6ZgHyNIIGg0Cc0pE4ExgG7aLlUSvMyV0AgosbO+8rckcyh5s6YDAiBgwtnb7paIXqMcyCjMpEKmgbAygDTUw5pfu7+3fv3j09PkV6gRULIbtvvv32q6+/fvX69XgYcx6Ph8QiOSXJKTXxzzWJel0c1t94sY80EAP4zS5D1BKSJRODbMm2HOv1jqdXRA+iJ8FBMkRm0qvVd3P5xfX6y8flV5fl3WJPhienz84X55lIRSIFNUFEmDPnPOQ0DHk45PF4OKTjyCLhgZvn4maRgVXVrBqRuduyFFXt9fsUoJSWPIwhbR2axFHlALWsVVcRcGGu17lwSsOQ0jDeCY+DHBLDipZ5nq512+e2nalh3YZze+lkoiw0io+CkSmwS3N5tDCHMVmAAUflnMbDcDql+7sRp1HMErBQWsCl6lT06bp8LvPZlyXNGMviOD99ns6P5fq4XC/X6/L2wabpf73OmJDLtA2v9MTYsJx93brhkRsc73YXbLzRNSW6AwKEqL3dow5tT/WYo111vnFiuye++17W1elmH+w+Vls36XbP6xALhByAwNqCGFH+XTgw5mfPs3DqPvIGxq2Hem7Hdrv7Fjelfk/rVtpcAWhZFd6NGW5whbACFQ5MBaxaBs4RbNgHLZHCcRT4sCnZxFXNI72lsScRywNroyKx7xouiL0xEynqj4G6TKo1TUI44Bp8l2WppZoqhbReyFA3IYvbau67BuWob2Za5mm+nq0std5UUWi+ATJwNFlARrdqVtWrurYd0tfOi4J2wUT0LXme2s8XD6ebbltXHyaAobZcHt/NP9eaKq6P56fP5z/4Wz9584PXp1en05tvvv6RfvWDX969+gUlUi+OIaT2X3hi1E298wA7lJUXe8FNyejVmRs/rSKxb05sDmWo1BLP3WEgIyPymKNoiopkkaAWKCYCte6LqogFX7rRl9QqeSXK4AIUc0SpsD5kKXicRB4Ro67Momq1eoCYDZT3wHpzzFlUe+6NwOEbIgR7LQwbbmEmbz55WbW6Yy0kt+Y9JSM37kVzw0Lhde1gMZG4VfQNJlwTW/O6L1WXpZaq8Toxg9PuR8AMFwoyn6EpLxi0IQozbQuMpRQBddXqzd/y3LneHmOjlLibVSuTlbLM9MQmiY6XobjJ4f7VV8fjvVCSRC0+HKOcqAL1Bu21QeauzaEXqZ9aQZ7H4e7Vw+EwuCmHt5vFCUV1WcrlejH1lNIwjMfxMF4Pl+XJky/zvMxlmXWZ6nwpdVHrGe9tOdkN3O70BMGHLMOJhIHE+UgQVS+RktQr7vRY8WqC4vnU9Ob0BZw7hZgd3tVx3KP2H1EIt7ROp+ZwJSIRyTknlkAznz9/Pl/O03ValmU8HL79wbc/+uEP3759e3d3Nww5pWHIY8o5SaLNyU67ZOm/AYh55onxEIUAmRtrybbcS32T7VXGKZnAq2Ku/GnxX138F0/688f6q3P9zdU+V59BM+HqXuEWxRlJQkg1JZEx5cMhjaPkgVPmlIc8ppxAbGY511rV3IsqLHStGNAQhimlaiwHoAp1VOJCzBqUvWhn1b2ieoz8yONNFmETTpJEEnck+8UjFKO4L/uJaGCcmF8lfp35dZa7xEdGdgBUDeoojkQmseTDq0EMB8Os0Oq02GPVK7wwZsNS62Wpj9f6uNDFoUnEplnrx/fvzo8fy/xYpus0LU9PH03LTb9spRICz7TBeTOl3A1Gffewphtu27hdP97/FXrm3QvTccDOvbJ+ZYUve/fL/h789mjfeeG9oBY56g5r6o8VH9yG8ApI1gW3O0XohYdmtax7yg2Fbk4EGb1/Zp18bYn+0iDoU6ptfm07J4b1dtqO1JFdx28rJUQdHtp87fYponeBgyRYTY0d0xxWzJ30FHhVvVarlYiUSy1LKbO5L8tSliiF7SyAe42qRn2TWzFbTP3ueQjtX7OlTNfz5fIEVy3zujq6eROEXxN6AEQyfbDkeiiNu48lsjciw5mjNm/k4kLDoxEN3WNJN8d+4K7+EPfmCiFDOV9+8fOfT5fLb377/pt/+cOvfvLDb3/yox/++CfzTCU9yOmBco6sRN/q1292cuDQYE57S9hpc5vWIUOdCxaDymEAOwEwkHaE5kYeVbRAApCIu5OAMhxCVNmMzamxxBwUWc5uRExmrhQBnooF1sKV6GWHomwJCyi5k5nXWmqpa7+Y2VLqXIqIwAhRPEVLVVMlNesAzhG1rFlYhJycEpmxC0CbxdE4MRFsIYAgTCLx1rrdU8QUWhTaACGoeySgWVDgGwmUKFoMzJIS5TyOYx7HlLMx21JUNy04NT9P89N1igKM1OG7cnLJngbXAiQiC5ZNsEoCAJJrpL4ZTEshM+xUWbUW14pIEtqcqNsKQ+sa4+4gVbdaNfLNiy7gPByub7+Z7+5TGiC8zokYOerQW1c4dVmsVsrGoa61VmI8vHoYxlzrYjUqwJuZqzupcVYZD01uyexc5uL1+Pqej2I1CrguyzRPl7mWYqpu1aLcdlWHUQvMMpiImv8wMQ05kXsqIhlOpdSplCXX6tJKF7Ut0qwp9jaX7d5DtiJ2AI7mANrMxIj1EJEzb1R+bosjAGIeiNLr14fD8f7+/unp6enp6Xq9llIOh8O3P/j266+/fv369eFwSElEcgrZz1iceFWCWcHKF0DM6nGJsczM7sG82aJjblarllKlKFylLInqV0f+9i6/PtooWrQ+TeXdVX/xVP/i4/Tz8/KbS/1YcKlpcURtGncXooGYUhA0KINylnQa8+mYjiOGXJkWc8zL6BjGQ84ZPFApl3kJXtQqjq+mIULNLLEoOhEc1+s1xqqp94QJ27OumChFmi/Yqi2uXmct0/U8zdNU5tKSj6NJ1qwKIOxQ6elAR+Z74VcDvR3k9SivDnIYRTKr47LYZbZlcTeIiBGK01J9roKKUe3Ol1c487Ccq05mBa5wJy/KU5XFxkJCEGgzdiRnSQ8pHyTXw/EVcdoPsdv4Q1v49wAiivBsdheeb9J7NOEBoduOS1EzIBohihZtc3Uj7N4AlI6Dd/Pg5g5Xv85zoNA8Dj0xGz0zOtDW6uEkohBBJyJ3sqbS4JFZShsEacAE6OIk/SqAOxjWc7eZNv8nmNCA4B5idS/Gdk5fzQ58ITSWQogFbV/v469hpO3DzZoG96TeJv7UHTBtmvZ8eLgarFpdbFmICCRRHMbco6YdVigZ5cVVX1yzBxjCnx7V00sp83Wer7rMcL/NTnKr1WqxcEB1ABwgpvGB3ND5N3CDhUCLNnvBNk8M9/rG3+OM+dLRWWoEkHtd5sei0zR9enz8xW/evf7Vux989/mnH0tK6f3ZFoxIB8kDokbYF07m3Y3IjU0MANCmrdJQHZyJm2Jji2d7X0d95Yk3B1lPwnNHCsUXhxpLbBJsQSNqrDXzVs2j2TbY+AVmCvgGWZkjjO6RTtUy1bdRae6qWlUJ7l6jGqeqq5KZe/CBQmGQ+3OBlYJSKcHRseb6QLNZtrhyhC89hHFjDMZAjT0SIT0QU8EJsBZgaoShjuyZubPWmZmYmrCB3zzIXHVaKiMUrKzFfDg0fJNLgknkRhEMEA87i5QtHtYiSFdVG2cE7oZgGO18JVFVah19z9agiAJaZB07Jn/6fP304fLp/fX+IQ0HksifbJ4YdVTzol5Ufb+EmlotWrWYKrx6qyufUgKNoqJarVarqmpuxgxJTsMQegWlVF3URQ4P94Mf3byWOl+nMs+nuWgtQYlrBbeXYm4UomXEG3w3Z7gwQZVrArGh1jIvy5TLIkEN8dU9bvucyWczZvfTX9lgbUwDJjYnY+vsb14XUI+wNw15HMbDYby7u3t4eFjKYmaH8fDm7ZuH+4fD6TgMgwiLpJQyRdEIbiCm9dy/BYhZP+mOELu7nfsxVlngGTQSTpkP2YkwKX262qen8svPy189lr/4PP/qqo+Kq3EFW0dDzBF8F2FGEhZJJGlIecjDmIdxGA+HQx5yGDRqdakkFkzespRlKWatPk/oX5h6BDsjFOGRwRRdAo7IZWQ/7J1k7q6l1GURAUQYZqraarOXWsqtbtF2xPRORAPxwHxiehB6I/xVTm/GdH+QdBBLNJmDoKDKpA5KPLufFzurXd1NPakebLqrTkmm6osHBdKJYS4VRyeBJK5LMagVXRZyExZKpMIied9fjfi5AYPnHYceWnrpdWvdjc0rE2eg7m0gImbrQYueMfj8HNuedjuKvvD6s3++bOIWCOQQYvaNsnVzy80/AaIude5AT/t9tj2uSGO9h27fb6+uKO2WEeH9evGUfrvire3MZM8mEYBUVNvW1mj1MSGZOBjWrY5VmE7NWSq8xvAjTo+d8yvGtZtarV4WnWcCGbEsQ9WKcC65B6+n1hqEwtXPtif0OppwUPDSoHW6XK/nc13mVmdrt857kDHqopSAHr00hYHcEKlJDfQYoranK7y6d26vexirUQSzub/AXxiPLw7C2n7cmDZmhrLMqrZcp+nx8fHj+0/fffdpPL1S589XoXw/HO64TupKAPs2/72TYpyIPFQeW9IdQnobPTjUYaQ7LOThqOGJnmPKHHt92PJt7LEhhQ1o0tRKAsdY0zYwivIWAdibowNMZHvGWvvD4Oa9XBVWr2lvmsC5WisMZtVMHeaN9eDmFXAWsMQi2StzeJsG7rvlY5sPBOrZI9JGYoPpK7sJ1OLqbtzaUJiiEE5ENNgIPbDUMI25qWkUQPItStWOwJUER8vIMO9mtTM7sTfRb+/9BcAbSdM9CpxEyr81GRdrQQVv+jgAbcYEOjOgt3lvAAKRoPWNl6WeH58+/PZwusvjUVIahgMliYmgZovatNSllF2neSnzMl+XokW1eA0JAjNT92paXdsDWtwlO5FTpCAbAULIfBAT8xzQgkpxJh7ScOfkIdJkarWUZZoWVV0fwKwWLcFRIzOGwwgCd4ZH+ckpz9PImRI3E7GvCetSs3cr9dlAjSvTuNzUtTHaKk9GUQ8lNuOIBHV3VXNuMznLmHI6Hg/oYaZxPIzjkFJzwHDTVeZ+ZSJq9Kbon77srvClR9tfgBhyXyNcMTeFc5bhIONJbKDpRJXpWgxPC12N3n/233xe/vrT9Ovz8purfS40kxhJlGaK7JKoxiXEQsKSJSchkZSD3Xschvu706vTfWLRaZ6naTo/FVUDFdVpXkpVgmu1ZV5KUTPraTUUeqksTCwgVGp5aAwXSqDEnNbpr6rn8+fHz+9NT8fjYRxCooFAplprrd5yL/YjPFoGTCQsmSWEHjL5QDgkOaQ0SoL7UqyYmkMyH4esLJowV50xn1Uv7LUaqUpd5HIBUQFHqSCnoKoJxFlMshFN/tnMqttEZJzE1eqy6E7mY4vj3Po5cHuseGL9vZr46wcC6PQRYpFp0DzSPayzgqH1en57rGeIgWfdS4MXn+yY/0aRJ8ZtTJrOSrlBTU3qqH2GPGIxUQAvhCVpe949sNi5ZroUyK7FntkegRiA4Da1PcoJ7rQakCuSCtQo3W+yniQtpXR00vI+IpQQHmwiQ+NJgRvVMFCUtv0muIveujgaq6qZVinVyqzTBKa8LBXEw5GY52le5tlCyy723g7LHNDdmm1uTJyywMxr1Xl6+vzpevlcyhQf2GvJuNk8X6+XC8ZBErcOrB5K9TD1GvU8vQ8kBRZ4BYxdSQE3QqUosNM7Y0fT+zceAS7YmwYNnBwFuBL4nD59/vjh8d27z+P923R6U5ZpKTAXK1p1rii17CeMl1LneRazyEMO2AyQqa3Bxa4ySERBy22aScTsjg5NejJjS2WOCzyHunsbtuX4ev9poRxuNUAivGJmgWcIIIc6sczzRJwMXGvZP8tS6nVeNIkQ3COsAAephT0OIBgLRhxylt1XEZ5IxVJDKNQaVzwou+bhZGA3VmvqFhEsaj4Oai43N3gNz42HAik6CuJODXS4KpVaacmcPI3GspRaqt70i9pcNcO8lcaopdRa1auFRopXo2psGgM6lp8W2FEzVa/FtXgtbkGBDO51d6KGh/mmZ27m/PY3gRpvt2pZwPLxN78mGcBDUT/cPaRxdKZKvphPRS9zuSxlVYoxs2WepsulmBa1arU2C8QrvJpVNM5TjObupexCn22fkR6dpSg8gpyC+iBMScSBgOO59JCxu7nWumCeapnrsliNOBpBAGVVLcs8Xc/peuF8EEpaTU2rWjVVR6llLqXsdaR6FSiiLo9J5OS3foG1KcPspV5muLv9+2diPcyhz5vSEOWuJCWRKG7ALMyJ1rnHfGsZ0svf3Rp49pkv/NPcF6uzLrmmTC5UK8pFl3fXeZ5qWS7fffr83dPTd+f546TnSouxSXZ2F6aoLEMQMPWJnLo1Zm6lLDaBwlowS8x1XuZpvl6vVdWJ1X0upWqj1pelFDWP5aUSOVEkK0mCCDhVIAY+DLMW1GXRLTNRtZ4/f/z47jdlvptPh3EcmFyX6fz0eLlcomh5DyVvR497OnMpbMy0sM+sE2RJtLAlU7BPWuaoCUIi6SAiNXNWTUuRUrm6q3noGrmZe4EYs4sYkakZCTFJ8jw4HGWZTRfyhclSkmo+TeXx8yetdZ16VeuyLCLiZrGJNhjBm6HbYMTub1oRA6h7WBqIWffixrStdZ3iZrZCg56zGVbdLrQeVwc0BGN3JN+gsscrpdZSSr1dx6pqqVW6G6ZJOq7loQkUbr2w5KzVE1StzVInqqoEWsFWQxHAPmG7cX9XH/qtxyUQQ7zCPeQCoDlp3b2fPFjzvlaw36EiAGleKnZ3QHHLIOdORFrpPy1dsO9ba53KUFYO5U7TWmupVWvBsqAU2AIwH+5Pc6lIktP18+N8udRSXG1vmIQJHkGdjm0sCR+GoRJqWabz0/L0fj/cp+u8IrtQeiifP5XDMSVp9WxDVcgMpqZzIF20pV+Bpec2GwEMXbN5EN5mgNBTdv/Nh7Xu6Num9+gSQDwLnx8fP3xMp9fp/muHL4+P9byYXRVXYJ6uV9PdBrPM03TlqsQJ1DOlY3B7+G2JgluweixCsDnYuwEE1JoAYYtZdBe7u6tBzWsHOtq5D4CpRYZ6h6Rq5rHAhSaye/XAOegUWZYoM1LVU9HLdVqfpap+fjp/+PR4dzzkJBQLLTmRgNgpZAVqhAnUylrC2ai6uzqZeWhuB+KM2BARkZvXglrJFFVRDa1ICVqV3A3EVHiJhdG8i2eHWZGa2LSjuptzpWKD01FGF3m6zJdp1pUT435dynmaB7jXOi9lnpd5mss0+zRhmbDMKAvVCVphHgyitptYhRarpS6z18XLDKvN+Rfn//IOd2vq7S0/8v27dSmLYl5smvXh8Xp6+1W+e8CQVViJ5uqXuT5el1VbwlSny+V6flQiDc+kwxzqVt2qWQkbySz8exopTu7mGki0RWTdimm1aqpmQRQiYoaIpyH8hxk0BEo2V9NSlnm5VjnXi2iBOsiMzdjJHWrq0/VyfuLLUzrcJaSl2qL1WmsxNcNVy7lcp2XRnVu+qpalmghz+CKZyCFRp2ZtPEf4ALvXpa0+1nybwJqZvrl5A721T3NIjXV7mqISbjMaGzYO666185YEEPkXq9BfzBu3RnJfF2X1+lgedXr/Sc/T4Pf5CrkIzp/LU316fDw//uZ8/jj7ZT8uagIJTJzZhEGEIOsYkUo2ZHW25LWoV7o4f/6QkkRVr9TijsREEDHHYlaq6VK7a9WNqaqVpaIaqja/c850PDgLzGNkYJ5xffp0/azWNn6t5fHj+0/f/fXT490wZk6REFCW6bx8/gQ8y17ejmDJFfPZagYmYAGKVJSn5UIPIgJbqhVFUbiD0mdJachyR1SqQ10KJgsRLCdGdZpUF/ViXsHqUJjZTGxDNTjm6QK9AmV/G7/+1S+WsqyDJ2puwFFyJ6/sjj51N7S6gpiGc1aXiRlRiBRQf7tZvmYbWEFnq6yemLgi+ha+woWAFz21dzPTg+621Dpdr/My77S7bJrmy+XiZkkkXBBx5RVdgLwDNepGQuwP3oVvCTvPCjPnlKhL+/S5tvpPVwd6a5qtfUIziZu3yncgJq7HnYzvoFp1WpZS6x4qpf/5f/w/i5bhNZ+8Eyr6hVe9YF5faSCmuWEiXTO2voiilrA4rVYrlZjT4XR8ePXw9itOeblcyrxorW7eiyzFgAhngK8gBu7CfBgywcsyzdfL9fxUlpnJVQ3AP/yH//DVw6t4kvv743/wD/+umx2GUUTCfmQN4pnBzeoC6G7ViplSo8oxKdi9VaVaB+S/49G2Ju9qsOEDIBJw5uEkx9ck7Mtky8Xmz3WeZl/+o//xP3p4dR/fPxwOP/vZH5oZSVNgi3gEmoPO0XUwWqt1EOMg7wV/bK3+HUwla9Asal141CeL5bN5V8JEWM2FNhG0ecnZPNLI6gqUV+UiEEvKwzimYUzD4Y9/9qPDYWj9cjz8o7/7h6p2PIw5JdqIKEJNX8Dco2afE4l3FAO28JpYyL+6e3hT0IJB5HCt0EpmUEW1VbepcwQJLchcPQw+Z38GYiSc8NyI3kRIOR+Oh7sHZz5Pyz/6k5/dHw/xLHdj/h/9wQ/cLLl76KYsyzLPdVl8mVEXqjN0IV2g6j1JH+H/cUUoRpdiWrwWbMFNrHBk7yb9npHV/+yGhIVbLQ1puBvvX9+9/ub0+qvD6zfpdIecw/SshrnY3//Jt6chx9dzyl+9+abW6o39sy0fNXBMo2JFJ5MC2oR1WhQM5K1UsNdq6q0uWUSaQjMth94xRZVAACGdV8uyTNN8na/TfL1aKTBjNw6LUI3Gw/D2m+Pbr+9ffy3DsVYrqnMzNrFYfarL3//hT+7yGM+SUvr67Rs4pMeTAkmtuWa98WKqAIiM+9WR07eHbjW2ngjiX5Rf4ogiSecARl0k3mxK3763zk50Q5wjc7G5m1cvbwsWnA5ZpH3+4e7wH/7ZH5nXUdIx4ZTLnSyvfOLlfH18fDqfv13mi5qyOBgePlohZrCg+2bXG2PmNAwiiYnDl+BW1wWDWYaohcEiLCRioEjl1FLNnJyM4ETVtCxVi7pqDDdOKR0PYNFwgJov80JPlz/9h3/6cH9qc//u9I/+/X9Q65yGUVJkVjq5mZYyT7UsbuFO875Tg8gZEETiAQkgjMw4JNwJvZZ0L3JHJI4wr1QVTplTSsIpKfMFdHFcFHPU8mIHkzpm88WpOFeQEqlT1Sb+B/dlnsoylfmitYDc3Inwp3/6Zw/3bU0+Hg9/9+/8sYOGIXdBoAZZgvi09v66we+9BRHWDH9HYIXANe1bTeV7B2J6zBjNO7gDMf3MK4Bw9xt52B7BCZ9FVV3m+e/9yd87Ho/tWQ6HP/nZz+A2jqO0cNLa/kBbRr1xyTpsCqyOwAad0mJ9TjFTSon3IOa2Bb4PxLSw2s083QRcwn/ALd4aErfl7/3R3zodDtuZ/6v/+p/0a/wOb+fLl3pjrdyF22gcdr8pgqiSJCWKyPFOe4Nur+DbmdtLzNycaGZRPnC93P39/c9+9rPT6QTgcp1//tffPT5d+jLmDqwtCmBPm+9X8LU5+2DZYcZ/VxDzLFKzOwgtMJSCJwXTlv0Be7h/+IOf/eR0OgJYluXDh4/TNMU0wb5rfDtX+09vqWfv9Of223+usVlsvs/ei+7793Az1oIv0D7pN5+KqRVpa8xEfDodvn77ehwygOu8/PV3Hx4vU6yscaYXLdxPTPQs1OW7q/T1Lq7YbqA5vOKtZw3v+798/+LutrFOoG6Cc+xeAKnZ/enw02/fHscBwFTqrz4+nad53bjaOtECo72Q4pqHeXMD8ZTuvZRKf/vmpn/3mHsxsLYOI0Sp5SQpc8oS0jWbYg3McTfmH755OOQEoNZ6vjyWsuxGzuYG995ktGu2bbDcfLb1QR8cq81DWyh2t7i0ZvMOj7syFG1ndxBzzpyypEwUlV18Xd6D0XbK4w9fvT6kjPDCXq6lVFov9DezQ9bFZt/G/babZUft7/ZB2j7S22H94gsg+oLxuL+2Q5iHVpUDl2n+q1+9ezxfgrHP5EKeYDC1Guotps1kpvU+19+0+43mFuKdo97X9br3UfCG4tF6JoBvHwS6Q9O9WVDr5JE1AT7MJIfq3f3d3/rpj0/HA4Dr9frzv/7l4+PTs4hb42l7i7a+7APa/1DEASBEiSCdZYpOUkHIKFHYNRSuWluV1yINIOKh3i29vvRvgKPRSnTfkafT3R/90c9OpzsA87L89rfvrtO1K5ftuvSmZ3d9vZ+u2xLjzz/WW+V37BxfPPa3uv/79gV3h5mdjsdvv/32MI7xLL95/+EyTR3s7m5ru/EvLDa/+/ZoG3d4fjvf8wQ34GW79O0z7OaTA252PBx+8NXbcWh2Mr14+N8fvz9+f/z++P3x++P3x++P/wEczzPEfn/8/vj98fvj98fvj98fvz/+B3Gk3/72OxAzJ4ran9TKyPeg1Koxduvu69yZZ6/juQf9Jp5BNx64nePqmVct/rNGRzbeVHcxEuBmuojIeDyJJDQ1VXOLIsDP7mUridkT3V/4dmn/he/zQm/f/JIDi5598ks+Ltq90Ty6cSphGlPjuU/L8t27D+fr9CwnE7ff3V+xM4rW8B4IoL2bcBc32b7UeUgvGuWWSdB7/Xng4faviC2Y+d3p+ONvvgrXZajc1p4j1cNS3vUWtit7b58tFLOqEXeHoSPKXrYwRjtXfwoCmvLGM1/txlBYL7Q51Pu46m1H29XN/Zjpq1MaEwG4LvWvP5wfp9KFzdbGou8fFr8rovF9XtDf7R39glv3hYt68yYDCNKg4/6Qf/bV3WlIANT8Oteiz2WpX5y609y/GP363tt9Fhx4dus39/b87y95renZRxw58atTSsIAlqLvPl+mufLGFHv+5d8RAYhY8ssgJ/3Ovvvv5YjYzZjT67txSAJgKfr+0/k6V/5+bYd9h+xCW+vw85auj/76emwtQPv3my4DsIsfvujWXc7m992OuR/H/O3buzEnAEXx6Yq5YtNnvv3Wly/07IVnH/QvffL2C7tVigBnrwhiIMgpo8mCepv433MKcz8O9NU9j5kAqNplLrXqPjbajt85TNa44/c86brk0Bea9sXH/6ZjssUOHTnJcUzCDGBeyncfPl+mhdaA6c3nb1/xzjrAsyflCPNxxBsjf2QfKScmktAc6ZPSX3Sh7+eqb6PYt0vvQpvBaDwdhx999eowNnpf+s//8/87p3E4fpXySYBBZBgHYp6rL7XUZXKtEYS0Hn0U5lbnlJjptjSK3/wPEf3slauCcdaX2v0GHExsX8tQrM9AFEmNUfI8wVOQMbVe56df39+ffvZ3/r3T/SsAS/HffJyXqncDRgGztx0OcGd1LzXSLpps79pk/SbWLsT3O6i2VLEbiuWLs3kjXm3Lfq932FNKHGRGIBJ2h6rfHdJPvx5PAwH47t2H//3/8f/6T//8L0KTYn9O9+1vIqIW8yUwGVDRQs1knkLMbk3DX6mI0d5Ni1iMvHgJXm9oZRLYIR3iWDUFAUIgivSn6DF3R2cZAx6ZgFXrvNQ/+5M//t/8p//J3/7pTwAsVX/56enTdVrmomYONvOitVYtVatFzjgZyMAGNodaZMGADBRFhKylTitBGeaw4qRGCnerjEpWycgtqZOTsjjxKnYWZMwYe06kTaoLgDAngkQZlgicO3tonqr6UvWPvxn+kz99+MmrDOCvP1z+d/+Xf/L//te/HbNIp+UFRa9tOc+z/27G0p6u0WUqadezsUxQB2Gtafdc5a1s7e1Ia3iYqQ+KlhiPVkrDi9pS7D/4o2//t//LP/v7P3oN4DLVf/mLTx8ep7XQAoCQN+6nj+IPuztEr/sO9HWFgSjs8Ox+2qDb1Q1oibPm69k6gAZAQeSPuWn91bXdQEBTKCKYo5j98Kvj/+Tvvn37MAB49/n8f/p//rf/+pcfxtwKtKEbPlERsGcs9PbBvkaMOzs68AltDOygEN+u3FhXAFof9AvHOkMBRF6hv3gXgJkv1f7wm4f/+N//Wz94fQLw7tP5//xf/PO/+MW7ISfppJOVnHYL8Q2IMlvY5qQrWXFf0NOs1naIp246kOAow+AkDnYSA6tHda1WYhZY7zqgf9tQ/PbdlU6navNS/+QPv/lP/xd/9gc/eA3g0xX/xZ/bLz4ii3PfGffomtbnQMeS2/BZK8n1V4LmYjefafdh8UpXn2+fIiMhL4N9pnq1YoZR01uTXGlyKHmKMl1bC7cLulabiv+dH8p/8h8ef/IVAbjM5V/81W/ef74Ex3Qvuv8lYLGNKIQgSyP2bqPAbPcYBLSikfylgeVoBbR7CYAXInv7eddviwB4y9jA24fj3/3p1/enAcB3Hx//D/+3//K/+Ve/SCwi3MV3AaA6gYRIYqt296pVQ9qeWk0hdzcQkDLxiX0kZRTXUmotquqqQAVDRkknlhGUiIPyFbnM2rqTwOwMMCweunqKBHszh5vVyLrSrqFFVXVe6r/3Rz/5z/5X/9O/9ZOv457TX/3852m4Gx8oj5qIDykdD0YsU7VpnqfzZy1TCLFZU2SkVkeMJYfKGHtnVwIeFVis8fMotHzMQiKpgRhgW1Vikkc6uKML7cbeE7IMOQ8pJSEmiHsCi2Qpy/ny8Z3qXGtLilP381znpZITUtM0almizmq+VCvmaiHB3uQung27NhK+RxuGdiDmZuOBYCv/sy36gcj6DG0gxtqE9BXEmFNoXqg2ptL5Ov2TP/+Lf/xf/deHIScRvLhiWyJDj6izahVYQinFjcyzbiCmC3J1OTAHKYGYRJRs1kVdGXsQk4It5+TVqjeAQ2TMkV8XqtihbIi+QbmVWq7TTETn67T2y6Xap7lcr1Opao5afV6WpZalaFWrCnNSsIEVXJ266grIwc7sgAVfjwpTZTKHL0ZqXNxcC6OwVQa5ZQU7KSdbywnQpkkGIiNSJ3VEGg2zRc3xkCowwLgxb6v6tSgxTaUtDU9T+f/85fv/xz/75WlMSRpnlptqFBG6ZtJ/DyBmW8j2aYrWFvcvg5gtBWZTv0LTYlLDokb8OLX5UtU/Ps2//nCVVh5tAzEbQAkQ0z11odsQlMmAMi1jUXcgpiMvIKpKriCG1mJl61Ie22FMKkWbrqu2+9ZuTryBGKrui5oRzbVdd5rrv/rl+3/y3313GnPOeeWrAugp/U1Mi7Cm7ezcFxybRIAbflZA4EsgZudW2G2l/V2svda3sRcgpqNRNZ+LEnwudXuWv373T/78l4dxm/sriGmiWq3fjQLERG46DK5mFVbcFpj2choxdb0/NTOkgxh2EocYiUKqsYFbXbxWcGYHN73l19ray771uLuXatdpIeA6tzG2VPzyM/7iHQYJMe71UW6ao0OQ3W8D1pVzfdd7Mt5uiO0/r/1v74nxDrDbwScuZ52XipPmB5W0NIctqCGjltbYbs29Vr8uxsTr3K9V33++/vrdo0gTx1w7+8Yru4NoPe2mme9rv3fgtVvJX4CY2zXEVhBDvxPE7EFSfDJkwtRAQO1lrS7T8s//6tf/r3/+l2PKKQps9ctUhRMTi0gKELOUKNSrxBARMEWNXjfOxA/JD1QFM3RZilat1bQABexykPyQ0pFkFE6Ryq+qUe3HYaFznsgERuQGqp6qs3Yx6Kja1UrLEZi4Vj1fFya5TFt+fkrpKOmO8z0Pr4gEKVM+MEtmK3oxL6UqqDI7N1wQ8qfkxMbCzJzg0KrV3VnYjGqJWhtNjS1qosI91NqaX4aJhBInohA9W9MaOJbjlFI6HIbhMBxGEWmGLgjCkpMlp+kewxEdEVOTTmmzjZr3iqKAELkLEZgSU6iwWxena8NkN2j2Hvb9AvYc8jxbvNY9aAU0Pfd9+3yr59NOF6z5JvDahmk7SWIesgw55bQV71h/t+BQE1VsclsKpMh2VFs9Mb5aYCRtNgmTk1cDiFiUjNXVOYUAmHmr4hF5m1Ee03TxCkfizFGNOmaamUXNlJhmjFqqSDoeRln7hTmPw2BqBMzLUtStWosCIcB9KNoCZI5mT5q1JHUyZyFO7f4DkThIwGosaq5EzkIBdAclAnuTl22TeW+ZG2AewniIFFRA4KnpfwDGUUHMqzkzj0NazRRhuhuHh+PhNEqWECRphYCiME+ks5vvYca23KzJWdig7YvB1Bb6nVevdz9W8aj9OAzr09BRGvc0zm7pAQabqk2iD4csmzo+mDk1seOWAgoOT0moI8cAXfd5dyeCS4CYKGpGTTN0d7d7i3AzBtwZ0kBMk8WJRdYbiAmPkcUK14MhrUmiNaw1tziEbU1zjYYdsxzHdDiklIQ6rAdaehZ1tfHVH9P7IsKutnrQw8raP9fv9sTcdMZNv+yWgxdn2NStzOGeZcveYaJD5lOWMUvqXlisRhERRT2QPqajKBoi0atVlrdWBz36dfVEdU9MBzGMqHoGVoiB1cIhGnA2to32LA43CwMsoEzc1fZc5p7FCL6fL8w4Dnx3QBaPdt2Gxt6T/RLEvIQpL0DMzWcC9ESd51ZTAg5y5wQ5OnP2IlpNLVtlKTQqEGWauuXv63kQ9SPFjwfezRcSph4UWGHuzYjYHWHerLbF1rvbE+3DhQE3sekz+vOzNTuCXsKmXXPuI1J9j2gvhpe23wUPw/F093Acx5Sk+feIzGyp1QEWYU5M4u4uTLWSGYjykIlEK0zNVBnmTMoMTwwIGdnAEW4wOGeSlFJKOSfOIiC4GpuJujtMyIU1kwrQQYxUF3dYFCNi0sxQcXdmynkws8NwfXU/StrW1SQszAmcwQM4IWVwJhFhZLN8OJorbGFoJiKKBYaZEnG2NJAwJRCZaHF3FuEQgTILBCopEcBq7t283ATSOEkCCKqbJnIXh88pj8NpGA85ZZbIPzcigwjlRJ4pDyR5H/7uSN/VAY/dsRkRALqBRewgdfWtPI0jXOI9x7ad6Pnw3Htuumt0b5Fsw1GwvfJsn6L1745+o6jRLcci9hUKcdA+TDcQsyJxChnmdYWAsZuRkblYWNi7Gw4QkJi9OaLCeFcQOyVmJrChVZDtpxVwNQqt4ywpSUM7ram5hogeAcRILAYMOa/uVnevpqoamETdqtVqGgV5tPnGKOx7Bakh1uFYsRHyoByaEdwcTw4SJ6qoCgMJcUo0DCQ51L5A1AybG2evh1FsTma9tGKc1iVATOy2Brg7mZsj7RXhiYaUDkM+DJKFmseOnNFATBQCvwUxz8fneiu42QX2A8jJ+fnLsWRjF+2Jl1eoQq0MSvfC2LqsOSyxEfgoSXY3wcRMwtRralHDC9YEB5qDFGiSit6djWhn3spB7+6U11G6b4EOhuKpt29RF6SKqRgqhYTVE9OndJ+qnZh1Uw6ECClxypwSJ7nxtazA5RmI6ZtQN3d6GvIevvSnI9r3YjdObqb3iyPi+KvmzLN39wGIxLR58wEmDImOmcdEIusHGWjWFblzq1dG1P3uhqq6WNiDTKABsaa0OHIPGTZPTLjwCWBnNhCBDURGof+zPvYKYhTu5gpvbqvYFzsZDYC5M6AmacMwIEJOGDPCkd5bJv6zA77/P4AY352hA5qeQQ0iQ4adUIln0Ws1mMyVj0KkJAF6onv32rIAs8Hd803ppBaYZvq3ATGtK7EBjs3j0iAOR8WibR9pPvUvgBig+5lWSPXlK754JW50jeVtDyJZZEx5lCSheQJ3pwqKNYUc1q0GsLC3gomJmt/PyUMhiAsxkCFCDHHE5DV1o8R5SDkfxjFLFgYQpUwsQAzBhTRTZVT3SlEjwMmdjJwRxXspytmK8DAMwZY4jMPeF5XYlbyiSXAGIaEIaxIehZVP+chYrqmW0UxM1auS+JBtPPh4SDmNyRPcXUM9yd1LCWlNJ6YsWZi6G81XIEBM3aiGNcVkQvivCUKUUx7GMSchArl2Xd2w3BhbicGb5cUNVY1jD2NqZWoRRTMkCj+wO8HZm4Z6jCFzbkslmhHXPX/77Wi3texi3m1a3i7xcTvP7g+bhmDUn1kruGLvIPR+T3u/+stwEgCYxZq/3Wuf295MysaQiFXVyaFdZILg1MUWfPNEEnEbx95rSjXJuOfV3frME4rixORwiKjsFi1VPZ+fPj9dzK1UXZYyL2VeSllqVTNFC/A5K1hB5lxDaBMdwaTsnFvwPupSwS0iTstkpj5klwQScDZKYdKEzh3WJt6NFo9FoBVFIoDDQ9+3JieEYGVbd2i3VjFRQtT960Gf9uybrUPfR7/7AojZQ9xt3PB+id99JJaV/UBri67ExszuYCKl5ilvcXGwEwo898UXbUVmpihiBsdOpipUxuHWzbkOHVo7ehvIHSf2iubevU3NI+rrMKEG4L2Lr+BG3WFlWri3ARl+pH0AojFiyN29l+nahiw1Xgi2bulbSNsZKKTied+oe5DSP++777bPPAMxN5onNzCunbo9eas93ttwA2SbJfPyIEIWHDKGjKg0ROvNGUc1GCEQI1gDtdZSi9fZbDYDkTAPLAM4OYhInGh7iPAEtBsK0z4ckJszasM762a6joLtrmn3u7cSdZ3N3YvciqUbd6/ebpXqPfB8Etxc4/kr3z+zerm7GHZggMkTSsKF/FH9s9rMeMMYhYa4rPWuoN53/Vm6vOhz4zKCreuajPX+98B3f1d7QL8NBzwvAIQ2iteRuQ1975Nn2438eWM93xf2r3t/pN1tuJIqLdU5ijWbW61ai9niriTc9MzAaP4HhsNU4apVTYvpTPCFs1smGohZwpSFkxuTg1MaxmE4jMfDkFIrSeDmZtWbWBy7CapbMXXtdYg8xjkcTAkE7urGIBAkJUk36DJFX0dBGqaohWgKCBMzDzRmASfPFx3OZ1wvZS4qiV+9QiYzYrXBQ3vViZp0VVJ1NXeQUBbnvqY1hzQzCM06cJi7khtZOG+QMqVESURSEuYQteyOP3e4wxrf8wtw1AEzrwYYGN29ZmBe4XP7P90yvWmbIRz6rdDdoHlx3MZBt4VvvY3191qA7gbk3Kyj3wfl/waHNarE7f3uNuBe5CLWtG53O6zJ4ZqZSVDQmrKkB3PGKRoczUC2BiS8Vdpw9Nq51CgUtI+GmNn1ej2fzyys6mUpZSmlLku1WjuIMaiTgpXYwGZwEmIhTkgD0gAWEMcizk5m6q61zsv05GaJjzzkmHXu2cPnHhh1s9j2lv3ud1uZbhutPRY6xXM7GGCOWpHsuxf7LhEG7LOe+cLhu+15O0sfsS9C3o2eQFGkfHsSMDvQSoZ4Rxjdt9AfF+SOzJ7o2XnjE2vMov/YbhztN+C4ZW9r8fYtBABBt/hXfPV9s2e9lHd9wFaaIYpV7767v4/2Y10D/eZJIqbaIcsXnrJ1T3+X+s0y9rPvZlLS/qvbwfuP9k+uh3WM4rfP8LxF0dHE7cGEQTAkH5InWTXNAcDZya1RJ4ThqHAn01YvBUIUs4ZlAA+d9kfotKJdOMOdW2DI1718o3m8tOkDJoZ5sAteaL+3mBovvtimCoHptjVo/d46C+hZY9Gz5rPedC8+YxELXe231rYu7glF7Ap/Qv0EK7Az8z0TO0k4wxtzaH/ONs3/7Wr//k0P64arg17sYLvFqOHgrQV2x1oH+/ar/7aHwxet07KAjFUM5Gpaq2l1U3cls3DYUYuJcUxr04hmGsOJwWBhFhbmzJwkWLMMuHlR4wgmcQhC04rEGmWYq7m3AroaBICYcNEJEYTgXSCi2TYkxLJ/nKQ4MEZgCCKkkVcwIOpC5AJJLOOQ06Xg/S/ru9/aeebhcPfjH+T6CjIYZadEJCMBpkstqipwcmInyYnvRhKmqFVLcJaaBcwcpreRks+Dwkq6XhNnevjG8lDzwUXcqnnslMQWatQGF7ZMJmStSMBNHwaZ0aK0aocXTmYgOLsRWoWm2Id348UoZjn3LF9qRYe2vv8eJ97eYbiNk94b64Z+g4T7v/tO6s8y7FqH4gsDff9i+MmdyL2zqcP+o9sNshVz9AbkOKwUtOJJ3lSDjZklMxpaVkbU+fR2g+ZeqxozO+CmTvCoaAs2J5hp1bLUfbExM52mabpe8jCYU60axmOtqpVqRXBktHF7RWERzBIRSpnzQHkA4CDuW7/CqtZSrtPlo6udMjIOJESJ3cIeMKwd8LL1EAUPCFjJKWuvRUoTLJzszXm3a/fG5uJdB7VzNlcwUSJBhOa3/er50ZfyF6sYEb7Ew+g392JBC+YDkzsFwQIEwS2QcjhRD1usEwXr6F63JbS8Ql9PbLR6AoJeFkr5u8XVu33iRCC37hVpjKzulWhhpnYVQ+RNtLrnzhxVLxqk7llvN+l9xB1ChbJqXRuD4m4biFnTrOP3+rODOK2yereuiNauIA782s7A8G6Q73vkRb9Sb1N0D1OsJzHEvvDRqHrgnbKz709PYkOyQUxk5+t1d/JNDNoYcHIVuDC5pKafS4koe4D+2JqdvYEYbo7nBlSDKdq7/fahbvgrAKM70XZbbP9C7wXCPtTSXwnNC1qLwds2dNqx5yNu/tCXIKav3S+PmwZugMYJLjDxwj6bnbV+Vq2cL8wLyyEyE8ipdbkbIkDWeyd21Zsze7NtgGee+O8D6449CbcFmmMuuFmrV7g9gu+GO7arNOPE90b1yyvu4eDu1T5b9zdpZtcyPV2fTCWLkDA1Lm0YqYhiEy1NMBx2pMQtZTpRoiRAFkbKKbMIJJFIJGkKVXOgGJCYEjm0NgPbAbeorx55I9VNoy4ep7DHWvaikwinlIYhAb4sSyml1qjnKTtLCQCSIzlSw7dkABqZ2cBEAmZKIiYgv17x9OhXJYVcLxnmxVS50ABKECJX00VVnSBOyURSwt2AYAapEQFJfEjOTOooQCUk95O5Lf745OnAfuB0xIGdxbSSWxUw2NDgW2dfEPtzu3LtTDOQ9ZoyADV2iDM8bNko32Yt3rDaqwBaUulq0/5Nj5cW286Q2P75bz5Pz0gnulFVjinxkp3ez+3P/+294sVqoriZgZkc4dRrDJSWeyXgSF2IIjnoxdcdcLLYjSJhyNWJwE5gI5CTB0DpadPbETRTNXinJXqT7jGHqlWDGkU8MupKc0qSR04jpYFSIsBNYQozBsGL2QK96vzoaqgD2ymxkZCCd7Bxm9g3W9ANbfbZStmQSXQfBxt9d/B2rNoXG2BFkCu4m+Ju32fN7a+6v7tAH/xylHhHC89ej3LMzO7krkqN6oP95IgCMuzPpEeCAUyx36K75oC+owH9ioRtHW25CZt/oLmAvgfh96Nlx7Y8UQM0EpVaWVI0H+vmmumX/0KbeS91vGuGnuh+4zyjzZ3GGzOke2XilRsfHVYvMYCwNbnFCb9gNG931pFaP/fNJr+/+73pEiCS6bn3QsiEqhCEuO9rzRBtpHQiOLeYElPOWVKKTdXA7sk5uXN4LjrNY6X/tP36BuJHm4dPYuuv26ekttuj/yZ38LbzkhPz831/74mJ5+jOoA3+rB4U9A6PUddJav13X978xe9nn/R+AQ46pi51mZbLeXEfxoWhzO7Sapf1G4vf7ekZPbXub3q0Z4ne+PJH1srP2MZVjJUXW0SPpPrzM7w46bps3V5lf1e7f3nVWq2qIiFIBtTCsxG9IQ7fL4Fa/khzdYaZm6LGKzNEotCVpE6kj5gAUSwLakoVZCbNwug9SUBU1VAEMyi+zuzu3MtLEXFQcBxmpgFiCKY3lc8bfCE3cmU3MneqZsxGTmzGilwIjpF9BN/R3eDDUCzZ49U+fNKpVAwsggQkn8gqGdyzUdLEEBvYAagFiLHEy5icWIpzhRT4gPpa1Jfl/eeU7wd/4HSHwx1EwlZTh5MLAWCLrEJSIuXIQdn3IwGhgRJcN+tmp8eX3Xpq0JoWeLOXOMDurlviq/vtOIzv0otxEUOwWX7P36Xdf5+fsLHHg8Jwa/BvIGb/+XUL9X7EStWsBIBA5kF0CJTQSAboldvdW7aeOyJ3bsXqgJM5kYtDW2izRUNh5hZ7nCmFLI2DKRRclEEWlF0N2tYKvJh4zHlMQ8vFZxkFJuYKY69k5lCz4FkTkaQseZR84HzkPFLKLAx2rlaiLjogVs0nsUnq1WrlchC9z6jMMJA5m/ktVvmi4Qw0R7TRNgLQFhMGh0VCN3tisK3D4lyLIKNvXWvHwQHSNVzwcjXbGZzP3n2xGu3e8F0AIpa3SFADWtXu/RDaXQzkq4rM9mowoQiRMOgEtJyywBSRYd3YqY5Y56y5mLYGayNnZ6R7H+vuWLmSPYsKQW2LhGB4y9frjiE0UNgdOLt+WWEVuTOMu+JS65g9DkTfHjY3DDoFgehZAxPtfS0takLUz9pm4L7hXgIae7mr7MBvC92+6NRInJVn5Ag3oMIr3KMmZjRH8LXJvOO0RCBhEpbMwxonVg+avJiTuffC43CK+n3knUHVfGPmDuM2uLZVy2/a6YtjMrDozaoFup0v1NCAEGgtK7g/le+cO3vHdZsdN+ZBX2G/cCsv7qwthsKEijrX+TKdH6+VhI91ODkJSIjQc7E6u2u9dd7GzLMeswbD9ptAd86s9xwvh63YtuwNjW7hMzVri/JuPeDdGdZYONaptp7l+x58/Xv14myemK0pmSRxGoYxJzZV03ARkTATRfSHEUk2KbGkpmxHxMSJRZhZyOFuxR3E7LEDqC5F1Zs8mRWvaswqkkSSJCGigFB9ahGzAK5mWPNVObITaq1GZA4vdQkAo6rqWsuyn3HJm/cmnsEAgjnIzayl6QFmbJQpH3H3RoYHG5JmtekpyhprIspMyZlR23aKFolg0iQgInYXh5sltpyd2N0ooHAmzdmcbDhQHi1JLxQY84EijBTeAPKAhGhFt79/NLdY0saJX/M6vtz9bZS0jndsFtq/y9Gssd1G9ez32gPcx/izM+wsfo67im5bX4xX1tEcrULNlo4cGe9OPLfNyurGNDpbF31xb3tLJC2YwKVvKgxiTg4stftYepInCJ22FDbRc8IaM98dj68WjQwlAxVROOCkbsWUFBQeGBEZBsljHg6cD5ARkiklThB2I+IFZJ6JXNXLxHplm6CFlgvKRXRhMmaYs2KXCBrLxG5ZvDlu4Ev8SbHoMJwZz7wX3dwnZtq7Ahv7gLrRSQRwG0d/MxCD731t/do6ZiK9LMTaHOwcvCTcbrSRLUWMZ67LiOY0/ZZ4bAKtqeadaNW05xwcmGeHJ16A9Ruj4MtPsPuhL+9I/vwMtP8zjMQX7B7uRv/qR6EVhqCLBO1CWv2IxA3uOIjC/dIBDYGY9tmp7R6e3zJvOVldcm0HS3ZNc/NIDvdOQ9sfZjDdJ6KFCxpwYkjUkCXvKI0oSWTXO8gscjRaul81hLyntQqa1F0unWoQUNJ91zCBPbc77/8hunkWBzYU4E7sWJemfcOEJ8bNNGQ4mPvWhRb/5H7VtattbdSd5F2bzv3KX1xYAXj7CDdfm8ASdCA/CB+EDkyDsxhzfHLFLftTeJvpz/uMAILdpsbdYLwvBHTay23vWaU9WjsGgb217Q6mYHtGp/Vb3z+1mjfVX3zo2Snj2jyk4TAej4cxMZVlVihzYmZOkaVKgMOciNMwMCcwrXO/JadSKIKaO1Kku5m3WvZw6Zl11CG4CKhvdQFnYdRdutRImB5qSS2lRU29aBjbQAsZQ5/7o5P3OFgPuXVtK3cnDeEQVhMw7u4pD/nuaxuTY8I0gInUcPfaD6OKmxY+T1QLiMCpcKrjYPdHSjkZTL2aInE+jgTWedGiBaDMdMwMo6eJZUhvvk4PD35MnuAQcsoEQXOSKwicmKJIwt550QcTw8ER99hWjjWuvA227zm8NeHW5TcWbdvUX3yJ1lHUwl3UQOV6grBelZpnkHYDjlpo3FajI5amVvg7ia/pRdhs/dilrDO8ZE2aMDciNXddMU5PR4r7NCdydY/Vu/Ek3MjAgKsDldwT3AjE5O6J+DAc3ACfqxkJgyl8YiQEovBYMDOxqGonkwNASun1w4ORzMtSqzvxXAoRDFy8FtfkDS5zznk85GFMaUAajJKzkLAIcgJJcs1poFF4mbRcZlrO4gvZ4stZp4uVWbRSakPB11ghrdO7h9R3a9ONo7Y5Mxgc3tWQxNmcYevuGMHhXqs+AnzrKeJ/LkIeM7t3uN+sI62jt2F3c3x5lHYoduP1iTBMDAJxWr2M27ds5Yps+2+EbxxKG4s2nBuBaxr+XXeVLoMUmUi+MYy7JX0bp0afFj3NIl7rbGMCBBzgKJB1W6MZ/XF4e9yQpOlrp8PEtS+17dLd271lyNxyYjojZmvJsIuowYLeuBwR0n6GJgR4+2R77368m1bCrLcE9bVLHdh0ul/2MbGQ78cYWpA3jEi0fos0zkRbHlzb+8LbYeE5cJBz5G1GQ5Ga1+AIGtTaIm9ozrd4log3O1Ys410HiDioM771cvd9E3rYr88vcocwyy6iFDaoMAiurssymdk4DokzIyzkiN0z1plIu9/9JFtr3Y6vLxzrJxpdhQhZ+P4wfPX6nkzuh+Nb5LtCufV9Yyq3+btf9JnDzbldnglCLhwxkxW59MzouKlbf8zqku5EKW+JYGEb3t74yoym9U72CGj30L59qTuc95lf7eIEWPg1O9GkfU2Y7453r+4e7o45ukBVibNIzsNITKoaIhjEJCkRpwbPgzejRgRSMlddFmHKKQlJiOKKc2IZ8ijMQVQwB7EkEe5YP6JKGlzMJi9utarWEtLAzJwyAyilwECcRAREEEFVSWk/l1OEWiMbZU9k6IQ7z6o8zXJ+0ssT3CgZDWImPow43QPED699HJSN50kmo0pgQcqekx9Hv7tHHkybrcciMmQ2d5ncDFkQo95Ar07MTIlcJ1yNcjZJYGnYrfHgV44hNYv5/88Oj9wwAM2uvV3++u/9oFwjzfvjpScmfu89MRG2cPdtLzSYGaki0ub6ukC+7twczrLmm5GQ8HW4kZGYCZAAYVJyc4tlMoscJDu7SmWAhJ3JqEk4x5QnUOLE7KVqkrQ+SGJ+uDtCeJqWuWg1J8KypCTK4iwk7ha8jpxySkM4HSUEHYkFIhjEGe4JA/g05qvzx7pguSYrCvU623LVeeKySAKHIoEh1FZ60oXDm8/CdqsHOzbjrikORquGJ9y/bFkyuLEBuBEJ+yK2Dk1m8pDdiT541su0/8+/LYhZxww6liIO2oS6t4j+frT0z/sXOTHUqcidSNwmYo/rrL622Ayai2ZHMPRb8wCtnbdbxI570Zohdlduq24z7xxuMIDN3Vocbw9i4rYZzc8VtMPbPYBXJ9nOE4PVB7Ph/mbxdw1fZnAMlAZiwpvI7TOrKYJ9e2691vbc1RPzwu71vgndhJP6PuNMzM/p3ZGi1Zu5AVABoRe3CDWi9TRR28XJVvJt31aJmchcG+zuNr1vuHWn0dyIT80DtzLx+n1sOLeNDTd2grO0uy/sZD7cPk5P5HO4aS3mRp6563vGQoTeu/ux9HIjX9M74zPWJxV2v9dxFx3oSk6Z0n0+fHVyMbnH+ErTASzRad4jo7u4TfvjpSdmDzK2YzMP0XHgdqYgmfZW3NpvGwz7cRI/4QbZzam/QUxg767ZyE2+ew8AmHnM42E8DpndrEohZuYhpSFFFoXDIw8ufBqhQBmIzMBty2G4MZGIpJTykBIldeNSARnyQESqxWCm7qaqofvunepgZtUsUHLQ87RqNasEirSm7hwlkcTM4s5m4AAxW9+kBgp79kpwLLzJ9YOEuJT06aP86pf2l//SSFXU9Q0c0OrKRCKTWV0qFb9e08dzmiulhKHakFAdnpCql0rC4+kggjQVLNXniRjyMKIQPS2koDyQuH7+oAxOA0739vYrO55KW2jB7uLEq4/bb1dP7D0zjH+Ljn+ZvLIOF+oL07rc/I5jD5QjsrLGk1e43j7Vh/LOJIxxewN3QglQujYk2prL+z/6fa5rrMPcq0HVSrUQFY/KMe3WwhAnZnaDGQgQESGhWIxcklmGjeyHxOqqpSyOnHPmPIDMMTATbSDGCDHSOYlQSjIw21I0pbTepAjfH0cZ5Drky3W+zMs0h21gLYAhEvs9hUCkVoInJs5Jkgg7sydX8sV9GRgPw8AzpC5UZokVWKsuyzJdscx8NJYWHSBnWMv76oxSwhruX5twt0S3XTvgIpxvLXgiEDk3P/XqTOkby7agBQM/khNlFxbYxtHqJ8C2YN1ggZvx1V+JO7PGegtfRLhE4hkZ7UlvRM3BMCe5dcW0cHTUgGrKNnTr2Sfrq3ZgbfaW17fWhY1jzwdpTUXNE2AbmNgNeaNti4oWMGKwE8iInJs3rIV0LWSkA4l1Nwg/mzFrbtIuO6lfl1a8RuTgACYruAFCsqkpF1JDMVsf7R1K7Zb3PRKfjEayZv1+ccW4YXu0P1dB4d2m5miSXQbiLuDSsjkcgJN68/oEwIgwUj+lr58HkTtZSGt5f9aWubN6C4iB1iGtyThO6quWQuQrBtLhdiFiMJswOEXygdVqVysHqbx6lKMsGTnc2RVW2zwXJu6BqA3P78Z8R6/ATbvfzFv+EojpwLKl1jCZZBvuhXRkUT6W8WgyODkRpK3Iu1mzQgoKB9LNTd3c3GaboLk/Ioci5t2ul2//42g+mC9tKbuaaT3At420m2M7501gwDcs1rfw1ezY6twFHE7MYWoakTCRJCZys+LmWouZolU6Km6mwQZnEmI0aiCYRMYh53w6noYhs7CZT9Ok1Yg6r0VLrepmi+8domZea13cPcjCHGUYyYFWNMWMUpKURFhSyuGLrA3EyH49SSuzpNEo2Fa7nsxTNZlm/vCJ3/02fXzvbP7pncFDYIYqk4OXCwSclMqMsqBUQOFKKlSVizmz1oIkXE7CLEv1eaZpokSiRwfj40Qu/PqVk5bze9PCaZA3b3E4YRiNxckpHAsGJ2skN/viQPh3P9Yp9e/ASd9OQn0N2faEbW60f+6GuCOY3JDb6bKGyleS7Bo+wIbr4377VPeGDYJF1MNa0m+sT0nCWimQKcHJTBmUJA/s2XVkv8tSoZMbGRLLKOkkQ2CV2Wo1q6HaHzZgjCBpbvtnZOS4KkeVMSEzq1qLVtWq6tY4F+IhwV6V4eKWhBKNmS0JhDy5gVRRxRx1suVa56suSyRpuJvXpU4Xns75NHHOTEptoaCu4EnOGnvjC8Jv3w59q/3gTobGZN4fO05M2JJdTaKBGCdYuBFakGnX2bfhpOcg5rlYzXZsC1mcgXcgxp0chuaP4fUz3uBOG2XskJ3oQr+HiL7sGmEHvv2GGN3H2E4mZH+HsWwSOhb43Qety3T/flu8iVrKQjTNttfvmi+EiF6Yp9S6Zo0AdW9KY0Y01MPbSho1heIX34AYWtV+0ecsPbvY1iYxN+MGCbjhxwDbXnRjWzvwfQpCkczX1MCsQergjjSBkcbRXWMV+xYP/EWu0ZveeKLoYJ36QF/Xn+jVaB2JD64QJ5zghtAPi1f6GkYQMuGggsLNCbX6VXymG6MfzIAZu7GbEwlDuPkvm5On5U9tLbPv2h6OwfrG+vdqX27Ovp0rImIWlsT8wCjixWk0SSYMtuatZdpfcQMx653fHO69Y/vMpbApAiqsIOa2+N4tFENkdNnzMXULjnHz126mPG+ebjfdoDnsDLMv7pIe+ILhBO0sevNQ2jUzVSKSpgDuRm5mTMiSk6QU7Blm4VZVIEelYqYQoXWr6qaqRZeQ04g4VPhUiIjY1TYQI5KATERRS8TNRRKRECJtW2jNkLIvPE0SYlAE+6hRYYIGSJTcxrkOjxf/8Amfn4aoCvj0VJ1VDk7ZPVN1lFkS5FVyMQxaSJ1dXKUQL3M6Xx1wrZ7InwYjQqmYZ7teIcSXo4vYWX044pQVNn38XKeLpGEwHt58I4c7DnUDMg9dYTZmU/MaWObFQ22m4NaDzz/UJjRuPto2dkKXAo1vfd9SszvbShAgSJTYCFPO+rUoRPTbIhKrxFoVPAzhVS59/xQ9K6GBmPXdlZvt3r043S/KxCJJ4RUgwDj3s20zgphzSkSs7rXWpSyJ+Xg6jiSploHtkMSIZzeu5qCB5fXpLqdx8npZps+Xsy5zpE2rqRM5h5+a1Nx0X4YMZjaX5VLqXOo0L5d5us7zdV6mpdRFa0U1ri5GKTTbXBkDs3CySi5CyITMgKPArMyPn86fPn68ns+1FiKwkJOTFZue6uWTnV5RCnq9OAjsLZGeGjlcnoGY3eG9t0BAaOr0TK4+UrrVwEzMwQsMy4IagmlbGqPrQwlTVxx6uTz1LWUDNL8bmPetpTM/PKA9UeMq7E1TA3rKNNychEloq9ETWT7qvHHD4RT00Yg37guaxabrWFWk12/5OpT7iNy35zOwsr5oXatphXcUNCZvlVx8jXIEzod3Z3QkEJr2Ks3RdK0TVlMvtpKAL5040mwAQqvgR6snZrO5t0rgnWTWemq3v/Q2pN1Dbc8o+16Ip9vRjjoS8xUyN2fMrv1MXasre+uREEXomQY3nBuKV2wdFV0JyNc6QMTERobNfxaf6PZVkMWir5367Ij8bHcAFsGg5z7pljnayhS4mZWiy9XKtSekgTqPxMmZLIRshINP1hx8K7dqv+V6SxDcXnfA+EYnhvdYqX+bCdpnIhEgcKPqCSpMbIRKrmzUAojugLWc9a1Hw5VOvIZZ24vNP9A82gTAKeR29gbDdjt7NtVu9e4Fbp7bCDu0DrwYYiuAawB+XU9uwfEGYqgj5/1PHGY2z9fr9ayJJOj65LUEVomUZ04pjUNikSiTVGth5uPxmPOQeGAWuINCOIYJpnVeVJcyT9O1lOIGNataVGtodQBgDksqKlqTdvWQmObMLGkgImpeQ+kjy91LtGGpusyllrKX+0vCyTkRR0pV48WEIyEqN7r5ksTv7g6HH4HcDwfjwSSbDKCEZITKCTyMyCgsqgoiUR8qkZIZubtmNgEJG8CJXaUk8ZByIq4j4ZBxyOaGYUQ1T9k4KwDTbhO11dXJ2V3dnytF/E0O2rNV1gHU17jISzbb9/uXTgLEbXEvkkItaE/dHd2GY5ML4d2A6+cWZhC5I90SFnoFLAbRCmKi3Mae5NtOZg51UiNzbkSVIO6ycQLFRtVnH1POw+lwdMfj+Xy9zpfLNKT09u5hHA9jyifWQ7LqJuRsVrWy+N043t+9UuHP1/M8z1e9NmpCN5SIKKXEjlJURNbWraqPT5dP16moXebyeL5epmmay7KUWqpWqLFClAyShKUyEliJ3A2uDISIHQkh8VJ0ujw9PX2e56mq5rZtuVvx+ckuH/V8p2w0HjwdiAaiSA4M+s6tm/fFsW3YBA8Cnj/3LIT9EZ6YqGga/F5up3fvNBmmlq1hLWXo5rL04j9/AxDT7RIzgDTSUtbz7GH76olx9/TCE6MGVVodmhQSgEEsbYkjNyCGdiAmxl6r076zXynEKZuCMG1ugpUF0tdX2zu4YztzaQFtM1DpYjAtSMDmDtcoNu7+zA8bNIs1yEQ7bZgOYtor2L2O/jqhe26YOqrsLja0Z1//8VL6p+Xy7Daevf/Ad5vNFm7odtJzjkWcwiJFyal7T7sPwDezO6ZzHz1EiILETIC3ZRNMwuJOVU3V9rW1o/KR9cyOFp/vSR6BMa2/sFZnX+PhBDMosXNtS2YppZSlat3vq409BmcgMUCUmJgbq9V33pR9d770jvTw2ZffXbUz1ptrTGdu26NzCwA7w1vK9+4k24Xbr1jVn3ti6Atd1XHg6kfpd/AMHO9AzJ6Fc7MHhaftCxlGu4u1n61k2u1ttYGxnfpL5zGzskzzdLFEOTGF1S0sBGEOf0nKeRwPkpKrmanWTESHcRTJTBmOaouqetGgo7lb1VqWZVmmWhWxW5uSR3i4OelbWjUROImEXi33isKxrEbnNHdjRMKitQioVZdlqUu9SbGWlCDJw2PT0BsF38FBBajHAT/+Nn/1qjIIvtRa3E3YhCkqxS+VidJxMKFZi7oRSBymlJ0rYMSLwBhMkHBPlWWaJoL7OFCWBcAwpPsTqx/yiLm6JL871tOx5BaMzxa4t0k2KUF9Va9cO7BVpqIOU4kcvB9A4bpovuS9BcAgZhIRj8XZWynj3Ri7PRq/gAgR0oDDVii9WhdhHgYIi4kjqxHVw0NE5FjrU7ZBVmuttQqR8GYrq2JlxvR5QhaCYVWhlpQaWGewMESYs5OYNjM0pZSHfHd3//b1m2Up17ksSzk/Xmwc6Gs5Hu9eCR1Rcn26zJPOyzKVWqAyHiS/fXjI9/fj48ff/PY7r8rSNBqNyIlzzg8Pr9a/V0hWan3/8dNvPj+pYy56mZfrvFznspRqWl3djRRSCUQsaaA8UM4kCYC7wsHEwhAiGbMv8lSW+TqVWs3dmKR5JKqXi10/lk8JNtvpdT4+SAalME/FEWIpjtVEfTHL21LUutCZnM1JNg4ZgQRJVpmEsNqd2cFrbWEIp21toqYo082i3fBBt9Vux9XLEbe7w/C68ObSp75GYpftCsAiINLHr7unhH3miDm0eq1OsIbF23gOce9ezSw2GwN5q9G9BzG+iyPdHC1BFyEE+qyt20Lc3UrtllsMJc5ZxSdgAVqqDTmHVpQihTRAFwhojUUtNtYhy+Zr6dTz3ly0+aY3T8zKIu6tufUOv7CJ9+vJZgN9qc/6RrsDl8Sx58ZfDuwlbtGHDJNQsOqbm6kp/CLgHzmoV9/h2CPASVIvS47wazGJSM6jA/OyVHW1VuyTQGRN7VDNVV3dzarCoFbVqppq8z0omnnbM/7idsBNJ6ZBMlWdKqrTur1EwwrByCUi5kwiJNLLizXnTx8DtpkZ1B51O6J8+moKusdn3IBQefCo1xcrORGBtIHOxvgG+R7prn61lWa7eqp8nU69Z/oeHGioe2J472C97USgJefzNpai9brXdBsV67fWezB+bqH7OqYistice97HUWwHDqx2l69PuQkdtse0WpeyXGEE45RFeMgpjSkPaQhhGBahqPXrcDN4dffIlC+qpSzT/FSWq5WJmQ6HO0lZzUPKJRZtCAkLEVEibpqRrtWDFsWEnKVRhKktClrdWdeMS19dtf2otday1FL2/t2Uc3ZJLqknma1QD86oTBhHvE3uRCJedT6fS12c4dK022l0OEsSA2ZOBgiTOFVDAguzi9QUsgbO7rOa1bKMI7tXSZR5YSCnNAwj5E5OyciIa5Y6DMoMJnFKnXvW729Lnf/y8T17RB+RW6f2sM4WGCC63XBefBcrAG5QJbzd61SwrjQesy2kD2/uJZgZbY+ImXN7RQtdFUlMvu+wTt1qhKj2Z9BkHUJBkHRjEJESO7E1pMQpyTiOd/f3b968/eG3PzifL7/41XfLUpelJFBZCjlOh9MRS318hNlxGA1p8iogV/WqAgySjsP46v4+nw6UkxGMSMHD8e7Nm28cdDwcX79+LT1BSdUez9cPn54MtKhNi86lTksNEKPq5gxiYrBIyjmPYx4HySICEU7CSVjE2cgJanW6Xq7XSzVdhU0AEJR0xvykzIBSA5lMzFjVxF7Kjd0eTghFjmhVwTYt1kOYE0tiYiYL0z30cAMjdWTZep+aEF3jFd8Yi7u7f3knu7/3n1lBjANuBurJUy9BTB8m6zcTIz3zxLir2ZqpvNn5sVCusZA+8Jon+/unxrOb9+99MSbufvquRoOBqtAy8lVoIVS4u5ObAFkN7kldzJ/Pfd6yk1bboElNdxDTd/z+RoCAcKbxZj0//91pqs30AHa7QX/l5cjq8G4Deb7vkf6uv9AiSiKvH+6/+epNJyqjrUoRgSBrIp+dQMDNdGYJOj23IrexfQrLMAwOLMtcGsUXTETe5IEiB1bNekl5q6q1WqmlFq1Vq1ox02rFrFYrUWktojEITcm206h6MehtxzRmCYE51ESImRIj4AU5VuVE3XtZ9mleu9fx8jPeKKsaDj4CBe2mG5HmWzph3AZ331cgEN+5ULbp8mVOzAqDb/qf9m66/hcBTL5aGfvZsCfMvJxL/uxW+tFdj20D5O2V/aWf7xRt1r5IcIpIRpI85jwc8mE83h2P4zAOaWBpDMWlqtnS7D6vbqa6qJlWBIhZlovVSViYOHtjgxHIEbsTN+ZeImrCu87srZxkW/6cezpCZEg3oqSbqq7G0troqrpXR4sjDUN2SZrEhK1F7q0pNTjAwoOkgcmoqs7lelFfqnMmU9dqABIY7rYs6lbdQJQSC0gMiThx4sSNlOYwNV0W0+pm7BCpqFThGCwZ3R/S4e4uc+aQqRM0rh/att+G6er4fI7UNmmKbfLT7nXsZgOhEz7XkWuqgTY9UCdWD+ZqofFakc2BbXEkgzG7tbSfTo5uTtDtsy3TdzOhuxO3qXOuz7HpsXf8TpFgEBaAhQ84eBtAi7nQwClxYqHqfrXiDjULbkAkPI3D4dX962++/vZHP/7ph/cfAF6WoKPXy9PT9e4uvX6VKD3Ns6l/+/U3byAfPz5Rpcv58g6/PSxTJb+/v7t/8+rN118Np6O6KZMh5fH48OpNNbz78P7HX78ex2Gde3PVy1LUsKhPRZei86K1mmu1GO/CNAzpcBhPx8PxOA55zJxJh8zDmIfECdWWWspyuV4+Pz2erxd1i+RUJwpfAsHIZpsfiVHTQJKJE0S4iSduEru0LQe3E5u82zKNPJrcZZcyGlWTUhSkp1UZCExg635/rIucrWMvrLHGx0TsL7tPbr3c7s13mSxxtFPGtto8jm2PXcfnXvKUdyoUUaI8uAjrs8TKZmtmHK35PbZyfZ0QE9GjRGsbiHugdlv4ZrVlqTcitgnQrgo4h+elrbLxrQhjMEri+SDTXZpGmRMZ3EqlpabFD1dlq6l6qtgxYhCugRALCJ5p17bbB5LaI/Lqw2iA5vvhS/8dI2KNGN+AmM5mfkkBDSjZukS2TQUGYXOIucMJyVmYVq7SOOaf/fQHhyGFIz1sGdUmN86MnDgJsUhKqY/uVqiDG3brgQwmYknCTlDTTsXwIP1RFGoNCqC3txUw1Vq1lGWZl2Upc6nzUq/Tcp3L5bqcr/NSVEOBgwgr8a9VkY1Bvg1aJgiBwhkToZyuHGPBp6EdLZe6P6bv1dTHc8wbjiHb3u1BPApWnrsAjijOLu4O16B1c2OwW2uXICFFt8L3WU69ZwlrFtvWoy3Axr59zrwn7209D6ClQYKoVxCh/VIer/iLsdRqXe3Xg+1oK0NbdcLu7ZNvKxPqQKv7FgGXADG2scD72ZiHYXi4v3t1Oh2Pp+PxeDrd5ZQBVtO5TMv1cnl6nJfJ1dwrvEaqkZszZXdXXRwKJiQBC4uknMzhSxcID+EmwB0axW1AEmrAkYJUqzsgPQrBgMZtx7BX79KIK4hxX5ltN7hfkESGpCylUFev7lZZM1wyKdSsuM+uxVVcHAi1PoPDNSprG5yIWw24KLJqJqHUCrIo463VLXLGKb6vDhRz8TqwpcHzEHFYJod75Hp8AbK+OFbvCLtTUXIHOyQcyusp2jjZuaPbqLCNZu7wZ9drgaE2uLFaqBTBZbB3lnpkRq5rOcGx6ahuad190HU48gXl8uYIXA2GRn3kFkjklFJOKbEk4qQ+EB/TkFNipup2reVa9Kw0FSulqpqZJZmvl+vlfD2fr+frNE1LKUqAm16fHi+nQ/32LTKB0+GUf/CDH8l4evPq6fo0+aRzme2KdBi+evP64c2bH/7kR3f398VUiQyJcx7G41L17jB+++ow5BzPYObXuTxdl2pe1Ofii2qtZmbsREwpDzQc5HDKx7vD6TSM45DSkCBgycwpxCK1qJ6v18/np89PT5fpagasOQ7EjYHr6nXyklAn1oW9MiqRghprer/Nvzz62tFXEnIjT7d1jIQpMSWGcF9tyBngLrUSv9piFStmKJbGqOjXDmDuOxDTofENoulnuQExBDK30Ehd38DqfXmxBgZOfsmJQYzWJrG+LvfsWFVIvlAt9296eAcq8c8gDRvQ96AVwBi8sl/YPkn9nOp5lGkkI/e5EPSk/pYg7oOrGD0vH9OMfN429Q5i/AbEMIPQP7OCmGja3wFitpTN9fXettGDXwYxHaLR+sqOybh6Ym60iJLw64eD2104D909Zm6wZZk8CYuQ9IPDi0nM0pI4NhDTI1HUOefmZqpaq6nTuqRFzXdHBHc6R6ZqraXUpWiAmMu1PF3mz+fL09P0eL5O81LU1fsqF1QqfZ5qwQFZGMoQBhhp7+HYY4C9zejPNWpXTkwMpz6o4BZb/zqn46Hbosm0+VQioLH+sa4Ge/7weuv+JU9MI/buB0BslLQB3P3U7QbmDiM5nn3mi64YB75Qp7o/MLaT7ODU7Z26oytHh4Wwlv8AAGY+Hg4PD/dv37x5fbo7jIdhGPM4AFSrqtZSlnm+TNfHeZrc1a2aVXONdLkkzq34yug0piZSOuScoi41VDu7huNZVK3WChAPTG0RMN+6kyLBupl9LTZo4XR5JgPtz+AYkJhBwhzKchA3C9zq7A4XOMPFq7u7F3gFKqFyQzhubq6K5mYnAjFTkiZpSuEkUnVFx+guAhd3dSaIEBPECEzsxEpurtaUdVKYChbRgda73bxdUXLrxZhJQkhEotWfLlClJBizHcSJvMmrk4PYSKkXVljHQTcMnVpLeR92kcorZBRVwIlAHFUyHQiZbyN1AdYJEkZhmBxu0ErNeKGOuNsvjyrS2CGnMBoh0a+NAe8GAqkycxIZx/HudHd3Op2Ox1Hy4BglHcchSYK7mRevn6/Lrz6df/Px8XK+zNcZxOfHy6cPn9+/+/ib3364TPP7D5+qWmIWr9P509NnPp9fH149DPcPDw8PP/npz+5fvZnn8vnj59/88tdP5zPE8shff/PVD3/84z/8wz+4v39VtapBwRVW1a6lHoTe3A05tXCSmZ2n5fP5qoaiPquruTqYKKc0DkM+3efjw3B6SIdTHsaUJBGEnImcqYLITdUvc/nw+Pjh0+fP5/M0z5SycOrpQGHSOcjdFa4JlhljZh7YE0G6SlVThP8yIm4wc0Xw2KZA7xYwPJENTNIDm97CkURgsHXGJLwvZNRZWX3FbsNgu/AOrLy8szVghvADAt2MxgphtkTZvtbtT+/ODk8WeH63AZN7S7Hq98nd4xnxKQ/Ns9Xtvqf5NlPguTOUbpZm93UmbRuEw1td0sYLDmFNSz5z+WzXn9fy68XfZ7oMbOTkRSBf051zOjDdgdp823Niuse5eZSC9bLnxHQQQ9iKtqze2ua3WFv7Npy064WXIGb3+jaK+sGNmdzcWq13GvOjg6NGsth1C+bM8zAMKeeUMoswYiEJDkpTfSFETCz89twsrEaZc8DXQlgxNt1d1ReNUFFtJb9oK4zZ7CxQTjIex8RHAtS9Vi9Fp0Wv13q+TL959+Evf/6L38yXuSzFwJyJxQnBhzDVdQOmUM4kJ/JELuzESOGbaWU6boZOA3fcmqyxbXa2RSOb979jDBN1jslaqdtD+Zla1J5asSQmimBct2odWGUwdjfSgrZrw7SRbK7mNfSv0a/+pVxpUKw0RA7qeeu7i+wtnZvLNg+lbSB4N6ba7F63jqYEENSg23jRWga+5Z2Rry0HhIr661c/+Lp+8+bN6XAQYnOUUsuyXKfrvFyXcp2mc10urkurLqAVDpEsklMamDMLkYiLDDndDcOQhIVUraq3zC70bVsjBFMBWCGT4OijB5+czJnZCYE82grs7u7ai+2ENKF71DW4YcMm/v9S9l9dkiRLmiAmRFWNOAuWmcXvvd3TPeTsnF2QR/z/J+AAe5Zgp6enL6mqpMHc3aiqigge1MwjsqobA/jpGx0VERnh7qamKvLJRwjJkfNOnRMlU/SOiVgXw1bBdaaGRBxcaCr2zAsAZKoqWaDYphERvGhQixNsmbnmYnsAZoVwiqaihMAMBGUROqTg2SlANrUlC7wQp3RdtUUlzssy+FeWTlHWoeWcz73FRHWFZuBqYCoK0YISFiTmpU5elkj5sr5UE6//zPpzViAwBAXKSmKazbJZFs26ZEmgGiEEx46JyByZWw1bcF2KayNazs7fDFqXthKX1aDlxHFEtfebut5st/v9/upwtd1u2roJxKxasauryrNDKHm3dhyn9unM4VPf9cO5N7NscR6nOKVhTkl0jpGISJARPEHwVNe+3W6gbXe7Xdvu9rtDdVeftqdxHMc8Z43EuNu1N1f76/1+s9nllEVVAZPKnJIjdAC72vPFh98gic5Zs4KIZUMFQmZ2HCpfN0212YVmG5oth8o5RwRsWhSSSpANzIANZtFhiv04TzEnkeB8eSt1oTdc4FYzVZBEmhwbO1IHSlAO63Jer1OOl6v69Sm8/FehMrmvKx4icwSB0V/Giq+mhcs+iyvrrtQ9S3bB0q4vGba/M/9ci5h/pYxZluFaxKgprdLo10XMb3kw66PUEK74c/z2e1CYkVZqnZIVCHaJwzJb/DXtlYgULzvkeqK8+nW4vHWl4wCwxXh87QYuSGcB51UMjImYxFvUdJqffsnnv8b5yyTd7NSRy9imVhK/y+1C9lP97RirUHoLNkdrd13gMbr4xPx31EmX9/qrj2sR8/LW/X6c9Ju34PL/7Pcf4StqqhUVzFe3ftm6swiuT8yQvffOLazdUseALcjJwsEu1ejqcFN+QC/IxFKtkTlmIQZeJ5Zr/OXlGQKgZ6q9D8GX+0oVRTRmjXOepsazxfGsOeIZ+1mkpMyuRs+/WXyFCiOkgkorM4YJ5FWAk10W0+U5lOCnV9+1yyCJvkZizAzWHIayIRjACl8TgL3ixNjXSMzyXH+PxFyuy9e3y7Jkl6L3pZj4V9EQW2/otch96bjXWfHvqbsvXdH6Pvx2Yf3bf/C3z/R123C5GcujRMFcX8lus/XOq0ie577vx7Gf4hjjlGXKcc55NtOLMRYisauc88weS0nIDpxn75x3nhdpr+MSTryMINYLtI5TCgmivD1WxLayMvUXrAvWCmbFkZaNztZW5TdnpWNHxMSeiMl7B2qV98TBULPklCyrailKKh9CoKq2Ao0sreqyg9JiKllSLlfG/br+ilRPofjILrwTQCUzAuSy+SABszFEEgAgKMZ7y36FpbwGQrAyPFgHnZcLtbxoQMs59aeTDtFvtw7ZNzUuc3hcZz8I6+yVls+X93j5PXjpdZZFaAAZLjGSCkRiMKvFDHOCOekUbZrzMKWcRHN2ZHXNbeXqhjeV34eq8rS074tycTkDcOG9v8hgsAx2iYrLapmwMkDF7nq7ffvmzffff//u3Tc3t7ebpkZAEIEUGbAOVfC+DBnBoIvprhvrzeb56XHsupzVDAgZTKZ+yGbBsXmf4uCYrq6233zz7vsfvn9zc5uTIvKcJMZ8e7sj4rpt2bmYZgNlJmaUnNM05SiioiUVDcwTCKH7KtMOgZyRAzNw6JCBPTtfVaFt6rptQrNxoeFQMTvHSGDFflSBFCxpsVVAAUwKUU2KJz2uIPlqRHthuphpnqccRzRxBMJEREXSUYKifme0sT7Rwn4o7C1ENkQz/+qeQQQm8I4qhzUTAyKaIhiYFJ6sWDlQcB0VIRAhMBMXaxnVnEVNEV8VMV/XUL95VrbALLTwf8oztAssAustCEUavMDIL+/+UjU4Wkx7l6+XEUKRzhbIk1QZVvjUVElWVH9V24oVcR/gulbLDM90VYaWi6NFcL4EOJQwzrWOWTcNNTJVTZkI6oAVi9c4yen8+L5//y/p+Innc+O0rhu3e0NvbmxHOfgEKC8ippf3jBAc4cr7wVcFyu80Sq/P9eUet98MlV479v5eUP3VF5bitRQBl2r25f3nS3FZjtgXfeLCtZECH71cSxTBedZxnlRGVWXiqqo37WZ/tW/ain1RmRW8F3LKcZxSEigeqsvcVMr0A1ZNDUjBn1wIjjFczJxwxYeW41NNTZkIkHWhSy2vmdCcg7qhm6tG5K6qXXPfP5yGUzeNSZZdrKha1n2MEArH1hZ1mRIaMTheXBpfww1or0woVngB4RX6UuQTCLhu14uTAb4o8Q2UgAAX24vl2hAgGaISctEKwFKpLxu8vRQWa6tKgGhfdxm2FOBUDqPLRV0Fja9u43IkGqKsX17vOgC4+A++uu2X29cA4CtDnleNymvl9dd7V3n6ePl5WPV7UJ7nCwyz/EVHtN9uD1tx5Ew1pzyMw9PjfT+cFbJqUomiYsVAhz0BhfLu+cDOFYeeLGLJAIwJzDMiEhgDOCz4bSnFyvStDGi86rLUkdnMSs0iucwkix+4qBki2RItaevdvOyKhFTgyRfJaBknERmRGUO5umCgOQukOc7jMEkW50Jd1W3bOldZqHTVQq6zl0XTt1Ln1jHK5fKXjazsjMWbYAEOrWhBHBAu+lfLxWK8bA3lF1+YzOuRtUj1ypD1q0ep1ykZDKpzjNwPtXf7wy4sIZK6BOesZeFljRms7eiL+QnYRRxpYIBioMsATbKmKDALjNHmqHOUOeqcdI4pZ1FJDrWuqA5YB9o2VdzAtglNYMdoIGaqy+tCZnKOvXOvpLxAC/eCEBHUPPGmru8O+x++/eb7b7/97vsf7u7udvtd8H61mZrJoPaBAPMc0SC4UDWt2+1HyR/ev5eY+m6IKZthFpviHCVLSp6x3e3u9s2ffvzmH/74448//WG/3fbdOI7zOM1hHBGRnRPVKcZhHEPwiMBMZbwuIllETIAIfRF58lfBCEShqpt2o2qGDOTIe+dDCFXdVKGuXFUTV8iMSEyLmcsiX1nKDSQkBRIgNdKXTvvlj1zkX8VJJ8cpTX2eet9ufVWhCyaIBmXE9cLU/VpVggYGigXQVU1ZZJqzCyZVwf9g7Sw9Y+1d4513RIyKmEFjztOUYhZREFhjZQEYyXv27JhZVCPELAD/ZhEDcOn118YQAYkYLwfhUsRc1vwFicFLD3fBj8rhRL/jXiCABwhYNncABC75dutdUG7qcs4Xy/slc7h8fdnFlwbgolrHguGvd3cJuCjIKiwIgSksaj4w8ATe6cZFjwOkZ0pPnHuZhtPDaTo/o0m7P1y7b1vbMu6UWlFeCKpfPy7BSbQ6xCyIy/ouXywMFh7MBa2Bl59Z3354kXzBalr420v01eM11vK6Y16eJb36LlyavwW6IPmKUJOy3D+O7z+cUs455RgTAnofttvtzc243W2q1ofggmfvXe2cKo6z5ZgNpDSRuNQkBQAr1B8Cs8KiI6ICt5VHIb0vABcYMBgaIzEzlo7vgs8Ud3qktq3e4HWoqqoZ26fu0/3zczeMMWuStSx8fV1Kh2oESuvt4wjAQF5xXNY36at3ki4YDHz1k1+hXBczH10sMV7/2xWJwd8gMeVfXBhyr1GLywX9V5CYJZxn4Y3+bhX89rFgtMt98lLLrNK6V0UPAsIljX793vr9ZU3+23/n3/jiQuh9VcSsvxDRsSPknIu3yzyOXT8c++EMBUQ2NUQwInZIITjnaXELXGjgZoUQS6S2eiisM7ryLpVSyi61PWGJ1TWRYrNuIpKL/s2sAFera6zC2vsg0rLZFa2RLvjt6x3A8ZKBkwgIiHOWOMwxpTkPw3juTicTbeoW99eHKmzqCsCJQVLJKiXZqZgvLvsGrvDkKp4wA0Mtu1kpDop5LdrKYYbVFdUUCv9AAVepBCCXUo5LWWRwcczxhXbz9VUzQwVKzg91dRrnNA5bz7VahYzgrLhxw4Jzl7JolWeAAQBReZKqIKqioIvzF4pCMkiikvM8p74fhymNWacIMUJKBR0iLoMUQgWzlKYollLl3fM+X+83t4d21zhPCkuAMDI777AOXIWXV1NeryPwixjJNiH88O7d3/304z/+u7//9t27drNxzhuC5lwGFmUaTojzMN5/+iQpX+0O1W5f73bv3tz9x3//D01Vffzw6XQ6x5T7YZ5jH8dhGMddW/30w3f/8Kcf/4d//+/+8MM3N1cHE5H80Pdj1w+I1I/jNM2n4/np6akbTsykakxu5VAYqGYxAPXFEIDhdYo1Mbfb7VUyQkRiJEfOOefJe3KMziH7RQEDssAruB7JuLYuAAAM4BAcmINVNosvbdDSbRMQmOU8z+N5PD26qqrbJnBTKm1a7TXthamNC1S4osUGKpIkxTR23dNTn1r5aQsQAFbEkQARKk+HXb1r6rr2yJRUxnl6Pg7dMI9Jk5gu8JB5R433IQRmp5JHlQgmqzbuZWdZ3q+lfjGASyvIhI4ZAWV9svZy7BTUCtWWOdmil1nBfQOQog5UhVc+MYRQs25cieaxcvAzG6EaQJYlGHAxwiJUWE4dBEIDW+ymVHTR92kpFQgNSylZdh4QIzVUAyBFM8NLEaaBYFNx66Xm0eLDcP5Zp8/bluN+9/CpfpxdN+QN11z96Pd/j+13Fq5Liwj2ql5b6hLkYh+6Vl4vGAysnJgVjYZXn78aJ5USp1yL0vktP/pVQ/6vnxgIL8fPV0eRFTXZ11f5UqMarg4A63UZpvRf//Lwf/zTB5Wcc5pjliwAGEK12262+81m1+527W7bHHab2+t9HbiMsU3nEsgHsEzEbX1zTC1LJqQQPNGS2+ccOWYfPCBascFDYGIfvHeuJOnY5RsARGCGIOCc2+927Wa7v9Lr565t6w+fHz4/HiXN0eS1PAnXyljKa0UrKdBUyGPlS5dypBzk65AdYDUGLCPjwp5VXbi6sLhivIAgJY9aS+ezoCyEYEZsSFKSkGzJPntpHlZ3p/UJw3r7sSF/7dhbSBqm/PXVxK/+6+Xz16OgSz9SPvxukFQqnOVJl5Ta3wqJv/6v3/3mVz/0QkS7BCIu/7n8VGHAjNMkac5pynmaprPIoDqplsTEgOQBCdE555u6OmwaBOjGYZxjFE0KWubktnhMIIICZoMolkTIyJYXYbbWNWqqogaGQmaWcxbJC38XCsxCiChiq1EmoikammpxEE8pzdNc4tAvL9otHoaL7SiJSZxiP/TDfOyGp/74nGP0HObTM6Yp7q9CVZOrwC28FzPEZYaznCevKWqvVsdC18Fywy/3xVLzLLvIZUdHgJVvBste88qNCteFb/abC4uLjQwpeWk2cYz9HFFzLtAOElw8Vy4nYGHUqkmR/GFBuUzEkmjKlrOJWhZIYlEsJskpTuPcdf0wpVlsijqO2QxDCG1bbeu2ajyzY2QHmmLspnmMUy80ZMmgYtWhxYqB0JiwDlR5rjxVDumr16JkRqa1D4e2fXtz/fd/+MPf/eHHn77//upwIGYRSSmJKjEhwBJ0gSCSx2GIw4hZG1HvyBO8ub0ykbaqjsfTMIzHc+eDqyoe63B3ffgP//jv/uM//v2///s/vb29ckxTP1TN4Kua3ZBVj+duHKc5piwS5zTPMaWci9oJAIiQmcwAkZgBiBiJ+NUaQCRyzgXnnPfOB3ZMyMAkXA5Pw9X5m8p6YFzpegtvjxEXIQYQFljhxUp33asXiQpqSTCbx+n8FKpqt9u5EIi8GMxzTDmpJgDwjh0SApqqxJxzTilZEfileRq70/Hx/suH6pu79J++B9iVP7Y4kUDZ8cEzVA7ZoRMC4clzdqwCYKAr28ATuSX/AxDRM4OCAAqAvWrC17llWcIGAKRGAATmGD0TIokt5i6XxqokQxVS4+qAhQDAZsUorsydCIAdvkYuiWATYN/gMrMs8y5euoyYzaMlWUsRVFExUTBgLg25aklgATAgVBRAwGIntpxGpqYrsA2wcvIRSlODlhhz7VLDk9fTNHw+f/lbGh8PG3/19vrh6UhD6qMXuombn3Tzo/BOjUUL8Kcm8nr/plePFWFYYwdKIfIi1VzxmFfqpFdFTDmw1+1paSH/e0XMoj9a9CCvP5ZS9TcownJ8mllByF799nHK//zXx//5//2pNKspLRJrZq5CaNq63ba7bbPb1teH7bu7w8315mpbbRoMDhzBqpkjg9V0rmTNiRlIgmIupUxUsELMjFhaNcXSGr5qIdfnvGJtCIglls8BcdNCVXkk9Q6ZwRHgafD81XtFCI4gl3uTXiExpSaG1+/VUrKUQtxW8HPlSOhigEAXKiOtpToowmoTA7w8aVQr7iPKWBIXcWGcfqVHLlYya215+YaB2lcs+OVJqL46lf5tcOTr1fHf/YmlbStnvghcjr3/n3/va/HRpfqx1195VTip6jSNfX9KcchxynkapyHlWdcBEnIgcgUxcMRN8Nf7tvK8n8Mwz92UxyhiCACMUDmuPBGhCIhYFklJENQMi0n0hcKsZqKiZrREHL3YjiIAXPLnS71bYmgACRefC7XSHRXm+CskZjlksCBuqKbzPPd9fx5PfX8ch+PUnedh/Ij88Zc/X13dXF/fHa5ut1fXzXYXqgrJmaqaqMilmjQo8m5YwUUyBwBlLwUzWk2u12uFpTJZqshS0CwvrnzJFkZ/QcUItUQ1Z3mJbUEANnAKoMjkXbP120wpQsXGaqRoYFhAKVzGpMBmZipiOYkm02QmApo1JYlJY5KYNGWds8WUY8oxpjhPaY4p5ZQtK40xnc5HZLu+3oSwP+yb/cGFmoJ3gd3Yx4+IT8/D0zD0aUo6iG6rsK9D8AiV423tq+ACIvPLXlZqVsgZiHa7/U8//vinn376008/3t1cBefjOOGaP7CgSIhYHNAACLHyIcPUH0/DOHGcpApkcne9v73aT9N8ej49nU4PT6d+mkzk7vbmP/6Hf/zDD9/d3RyqKkhKhi7Uzf5wQKQs+enp2A89MDftdpwGMByneRjGpk0uKJJjcpVzRsDOiQELEr/o+FV1HIdxHKhpvGPP6JheROWEiEzECLz0IOvJUloRNHAIFXH23hVtgSKtDVW5dREBaYnKK4aKhmY5zd1p9CHttsaEVZtjfnx4PJ9PcxyIYNtuKu/JUFIaun4chnEYVNR5l9N4Pj483n/88vkX+se/n/5v/xeAt7Ag6otpZxLtxwFlztGToyyWRCULIjGjK1HNiAjGjGqWUsaUDUouCdJitHnpCNcRasFaSBGAC/vX0BMGBkRICstEE3QF281WtED4pTUkIyiCPIDS1yjg6/BXJthuwJC9d54Z2TEREKlhShqjzD6llERSyvOc5nmOMCcD5OArz57I0JLlBMhWZWUpIc5aug/WFQ8oyr7CNEUzWHKkEuIQYPA2cu4xnqfnD59+/jPC9Obf/6E9vOsl9i48cUeb72n/R6jfJUGZxpyTJM3ZNGd7ufcRFytYclzMWi9IzCqxLr3Sq1KGXsEy8KqIIViVSst3F97Mv3GUlCuIAIXxtwwrlyN5iYd6RfR+dboUFF4vZTEAAIxT+pe/Pf2v//S5Cp6ZSyWhaoiCkJhH587Bk/ewbcPtVfvTDzf/6R9/+tNPN4ftZtNSQT0MHBguvmBl7qu23iyl5UFipPL7oaQUGyCiiKiwERm/aumX/DZEc6UAQQMTJtjU9N3bq00dtpt61zZ/+eVz4/ni3UdQkJjFSpSxpHeRK4N0uzA1bN3ly/m2NJ2ACAaICqKSExoyEzAXEEYLM8UEAATLEkdEc4VUQSQApFkhk2U1VSAFwkLVXy5pUfmaIQIyXA5b06I6WkmXy01qC8PkN0XC1430q0nw68/Xq48AtpBcV9SEiJhYTXOKMaWUEhLVde2cW5k3v/szv3u8hm3s33qsP6Cah/F8Pj/EuUvzlHJKKc9ZFQI7zxzYeQQ0SYzgyZqKDptwe7WtK59Fn7rp1I/DlGJKZoKE3rEpqmTNWbKklFVBRHMWA2BGR8TMLwM5VCRgYtJyFsBLXoBpMQ1YwwQRCb0jKlGdxJWyCy/cPoCFJLBKmIicc1VVNU2j2BLFyksHOU3D2B2H8/PT4+fnq9vrmzfXt28P13eb/aGuW+dcGcQukAl+9VgvJ5ThsgGYAirqSqW9bCKvLsjlkhhcqnV9WQqkRbeu8lunLTAD0KLCsimm4/k4z3TXHXxwixu8oRpklSxWfLWLjc8sMovOYimpJMlRLhVMzBqzplR69ZSmSUUIUY2i6DhO3blz3g47j5ZRExYZGbL3zhrabDdTtNn6WdLjOTFK5VF1s60r7wiRyVA1Iyixe3knVIKj28P+h2/e/f2PP/743Xd3V1dtVZmI5VzagVW/VKBsU5UUc5wmMNOUz89HZW7Y/H7nvGu3TdtuTe18ONx23Zu7PubMTNdXVz/9+MPN9SE4NrMspgbsQ7PZIvE0TcMwqMHh6lpUYxyZfUppnOckIsWUn5lxAQ+zaFZVfdnyyw5VNGqqGayQgMHAHAAgMi2SinKxV2iBHNLSgalKBpknjbPliCpfd8blLHjFc1nAXM1xnrrT+eFzniZFPnXD+w8fn45P89wzwX67q0Mgwxzj0HVD14/DqGYheJHYnx+eHz8/3r//Ye9TnC5/jFa+tZhNMYPmJJmIxHBJrwLG4hK8lJfFpBJkbTuYmBnQjJaNZVGn4OqHVaaqCAuNiw0dgWdDUARkMGKzCwi+HJwgCLJwbGk9c5eyf10gpVNfHlgOeDJmYCrGgWoKKiApa0ogkSwyZsYZYTId4zyKiEayyoW2ckyZZVaFhJBL3UYGpEhirIBiJEYFBGFAgBKVAt4Rwiz6hOlxGB+T9ZXJ1D3FcfQBq7rd7Oq7aTrn8CiTVH+or7937Y2BgYoieARFdF83qkTwGon5N4qYFX75bRHzcuSsRczCV/v/r4i5rL8XXOGFsXQ5tL7+WBx7X3r6nO3xFL88znUDoRjhLfO6AtwIYiY0QgkBP39+GoZx0zT7jb89BOc8ozEikleFmLOKmAF4RiRVTSmVRVcwKgOVpfkXMwVEVEgpIhqiIyrsWFVV0dL+YumOVZb33BFuGudo4xwxUkrp+rDx7gWIZQS36sXKJbogMbiqmQqDx4rsXkyXmpyYEK1YABeYxRh5uahLVQyoamBYbsCsFrPEGYmp3jAhWhKZZB5lHhGMiDwu2lEzQFAzEc2ASC4ALXOi0huZwXoXfvW4lDLLgBpWBv9vfgoAfsd9AVhl5OvPM3O5tPM8P5+OwzimlHwI19fXF7n879fcb0qW/+9fUfitqZKqzvPQj+c49ynGLKZiaojk2AUmB8vwJ4ExoDewrCKm7Jz3qACVo7wVKfQ/REKXBaYpbYJvKhqmGFOOKc0RpSTeLHuRMhgzeIeIKAoCRRRQihhUMzBCMkYsVo4FYHEMWDQ+gMFjcF85XrnyfpZOHpjrug6326vDVYbbJF2au9PT/af3Hz6/f//l86fHp0+n09Pnzx/3h9urm7e3d+9ubt4crq/bTev88jcXOSniqwzQdXoEYGWVmpq82JeCroOgVcb5++u23MelBieQ1YXwZYUURS6amoxx6o7HLx/f//mf/6t32FReFJt270NFTClr18/9JNMkktVhNpOYZUoyRIlRJIskyVlErQSOZCnNiqpmTZERqqomAMspxxjnZIqWKU9wepzikMlT3VS7rXnvm7q5vSFfc9/3w9B/ejhPY36+jt+/uQOoHJmkrHkOnve+ohJHZQYg203zj3//d//uj3/85u7trm3mvtM41nXlnAMAInbsqNhxmkrSaZqGUzef+zTN8zg+3t+b43C7bxkdUx3cpq6c89u2uYlXwzQDYN3UTdO0TU1EKS3BFFkFkX2o2HkXKkMKdXP7xm1323nqU55FNMZUCNjl7iAkVZ3nOKUUszGFi5kkEVV1VVXRwHJOZYssIwnHRozBIVLZKSWrSZacFZB8qBAp55jj3E/z6fFhOD+neQBIXHJfDC83avkEX3rqsllgnKb7jx9j/uXp+Xj/8PDp0+fT6TnnyRFsmtY7j2qSc5znHFNKGQFc8AgqMszjaTw+5qGzsmeXX1wkGI4ANVses86qSCDgShDjYkMBRIveQsvc4GVagQRoJIooS/O+lPjr3bIaVBBaMWtkBAZBBMdkZFZ6ydW7GwHEIJsm0aSFlYBqq4zYgEoOFKB/dWeJWj/LaUghGmNGUDVLWWKSeZ4lJ9LkSKuAjqzhhDzPesxDP8oMtdvyvt7UVOFo0zQ/wZS898jejJJiypAyR/MKwZEnYmZiBDUJwR2qFmHsx/fn55+/PP4FZLreXmnCutm3m7byh9pVh9389l07MCX/0/7mTb3ZoSXTlCJHpyFjE8JlJ1tHIEvrjIsHTBmw2vKRXmDftcShtUBZzPjXS/zyOV7Kmq+2pNcl9KvyhV512OulvAySvubNrB9X571XZxUiekAP6AzZjFbS+gUWKE9LRfKpj5++nP/y1w/7Fu6u3KbRxoMLzMyIpnHMkhDJOa6rWhRMc0xJFVTLPHaZry++IgggWnJhkCrnXMkkEs2SJUs2g5J1UDDg5S0FcEhXuxrhCkzazTZU/nJdViRmEQLxQl1aWltb6rwyQVIAyyJoiEuCcXm5RGyAjsAKxKuItkiprThnEFPMMqcYu7Ocnpldc/3ONzXpJPN5PD5bTk0dfEVEBihpuQSqmiVNi1sPeVgutSGZWmlFXhUxpQC0S4Viq5XA7x9lDchlrayl7aXkLbaE5JxT1Xmen5+ff/71l+PxqGa7/b6u63BZ4b9DYv6tkuX3X79wYvQV+8JMU4pzjHNSyWjggJDAmNkzGUicJ5FMYIBBrO6jfHjqz3Nu6qH2zhO0gd/s26b2zITMSKwKKeZpisM89eM8DGM3TcURI0aJKRcliXfgHDe1V4NuNW5XAzAGKlAZl5EfEwXvfAhEqJZVM4ASauUgvLYeB3ArpwiICIzIIbsGjYxrxVbTbtu2lW8qH5jx4f5zf+7P56dpnvthGMdxmsaY43W+3m63dV1zKOl4uK5Nu1SuuHxmi/nbmgtf/rwVz5ulKl9Oo3/lUoGVekfttSYCAEBUT/P4NE6G9Hw6f/704de//fXP//zPzrnD4cawaXca6g0SRdFzH/spTVPWLBULmqUsQ5J+yjFlE7Fi9W3F36bQFFXBAERTcgSIaAbTFKexn6c5ZzqfJwI8HXtmRObQ1Pt93G3bTVs57w7bLRNNcz7386mfngcaYzVMMBxkExA07lrf7qwgY865q8M+Zv3x++/evXnT+AAiIonQEdbeOTAjYscMTAZqioawTGgQqyrUdcPOgeOmrrabhr2vQh28846D5yr4ugrErm4a55yopJTmGHOSnLOKggEyO+cMsVE11aqqkPDtu3fjNISqVoOUZIqxCCsRMUsex2lOKSl6h6+JV1RgkZxyRMkpBO+8dwREmRAkOEQqA1TJOs+xHyZA2uz3jn2cp7Hvz09Px4fH5y9fxr4Dzby4peBvzpaXR6FaGWhOY5zP59PH9+8/fvz4+fOn7nw0SUxW+cDMaGCiKlllKTWcd0xIlCUPMp4sja8bGaKVWQ5ohmKYFwYMINqaJgBuJXq9IhHaqhYBz1iFQo02KnaV64FazkzE4lgABOAWNbYh4JJOsnxEWtkbWW2I8XgehpgzoAArmgKX+49KT0noXo0tkujTOX15HB0xIyDKmjoaU5rQcuthU3NgX3siM5bcQzfM9+NwhOhs99Zvd7X3Np+k/0vuh2a79aEyI4uWuzjONiSnEGpfmfPsHSAmFa49487hHIdf5PTX46f/Nsd52r+tw1Vodm17kOjGLufJKhe+eXurzbebq321aRidiZvnOM15nNR9hSgDI3ExfUMosbprEaMXh5jfFTEvbBh8hcTAGgKwfn2xF/rXV1rZnV4XKC97ly10wWXw92ofe43EmH5FhiPwnkPly90KlwpmWRSIBUUgAMUsaRjT49P58Wk7DJOkFnwp4dRIS05usQ8jMlz4Nwb0tQofoNiq4MIIVFEQWSCtMkwSFZFieaqImHLGiyU8ABExuzbQzb5tNq3nr5AYpsXFhwl55cQs1GN8VeSVOlqzScHLlIgWMzksXmgFx1cwU0AmZADGwivSDFlj3z1/Of3yF2L3hnhHN54j64yphxxDvQnoSXvJWYyUiIkNRSEroEdFMltBt9Ihu98Y+Cxo2HKBdU3Lw8vI6esR0uJ4t3yj/M8AVkMrgKLunKbpeDw+PDzc3993fY9ExDyOY9u23ntm1kuld3kmvzsZX5+Y68fl27+nBKtaShKziKKiWzjSoMvNsditARITMSBmtWFOWa0bY1O5qzZsm83Vvt1tGyjWEtlErWZqK7+Teo5pnMZxjlOMc8px1nlO4zimOYkqEYbKTTFPOS0vrNDXmAIHZkeEDs0ROCbvy+FiBoaGRObYHLvX96NTVVrU2Mu9vQzdTQ2IfLPZ+zrsrg837969+fTx11/++reHh6dxjKfzUxKd5nmcpmkc3759SzfXVfDOMSGYwcK/M1DQgpEv18EMTFcKD5QOwwCXlBYzgAVfXcZ4r4ysi6IJF5e/r8rgOaWPx8ePTydid3x8/utf/9uf//mf/vLf/uxDfX33veC+ajO5RhGTwSyWVLIIgzZsDJayjlH7WZJkKpXWJXBi3Y4UQRfekkZJmmUY526YhjhaQrt/eDwyqAAQkg9V3Wyeb6533769enO3vzpsnau6Ac9Dd99Pn07x8/P9r1/GP7zb3R7qJtg7cncCNQAA1HX1h5/+sN/f3N7cOKTz8ZkBN7tNU9V1VVdVWHZHwHJPESAx+VC1O8OqCUBt1YiqEdy9ud1dH5yvABkMck7FflCzqMoEBogpS8qSVaTkYZkxLAcrsaubVkTU1Ifw7fc/5DQTErkwTLPAmdgZgIrmnGJKWVSBm+olB8pE0zSO3SnF2VSI2LMLlSdUTQNYXlLryAGQivb98HD/ZIi3b99WdROn+fj89OGXX8/Pz5oSE7VN45yHVb73dWxN2SoWD1QrpHUw1ox5grm38YxThyAEluOgpbtdu1A0QCJER4weFS2TJSzU1WWHKpuyEQIvrqmLZUdJqiYwBGEsSVXIiG5p8lDUYkwKSsibEG421bZ2HgsVbR1uLJzUgowWXAHc6pNQqKr01UAESlebxO6P57/MXZ4nBGZkRa9U2ltEI0BQKHO/5eaPST89zn/9tWcERnGsiBmhJItMTbBNqJu63m94UxEqsOozDyZPY/fZMmtyDFi5arTjePxl7LqDv6t5A0BpnuPj8+k4HkdV401db6vKqoCAs+RY+Wba1U51utfuY+y+PPfzsYuHPXz/zZsq7I73o6Tu3N9baK6/+bG+u3L74BsXnNech5FPMKZpBMmX3RkBGdEBFq/ny+xoPTmULpyYi6qtwAOr8ghXCVzhLPCloLlgOa/PAFtdpZZ/sfxSWyeb62a1/gzRErGyIMr28gte5u/LLyfCtqb9huvKOeZlD7xI8ErJCwiEpizJs8OVfIBE7JiYqExmHBGwK+etpGQGTOS90wXGWAqvy/ZmBsWLCKnsfVKgGl16OV17e41zVNUlqGrxCTNVc5grfhm94cUqhoAZwcAxOgYmoFKM4Eupb1Zym0QEHHMBb4jBFZ9IIxDJKlayDoiZOLAFBwA2qJJGid3p8cO//NP/jMDUbEPlwoY9W80ZIbZITlTmWSBkc+Cqqt2yIwJWxELWUyRd8B1lKNb6l6NnlfqoLnXq0nKvV+irhsoWnTAs44Py/VIdr8Acxil2Xffw8PDp86fz+ZxFVot9nKZpmqaSLFGglNfzzIXG+2+1+l8XMb+jxIAB5KySkTAQw5JQvvB9SpFRAyATe8/OOUZwTAgQY0SU3DJ5bndt09bDMPXDdDwOOWvw3nnvvA+1C6Ha4yIYIgPJEsd5nOZxHOccM8DDaXjoBjVAcoTknQsh1HUbqsBECKqSir+hGpgxGBEZs2cNyM1XPjGXxbaYPa6ytZKCRMTMnkOzaev9vt1sNgSE5D58+NR1U8qaU5Q8S5o0j3Hu4831drurqoqdRyuqPABdrudCa1kLg3LclLv+st8s59CyIBBgjSVZrsFXc/DX4vmY0sf7Lz9/eWzrdjieu/MxjgOY5pQeH5+N3gM9GwZFAueo9ujIyDxDckhqMck4537OSYQBcDW4sOITWQI2i2I0C4ExoYnMMcUpyjwnMZ0GBFDJYIQUvK9DXY3Pe4qTyymIAXuYFBJKpHFK0znO3SzTdLpuDrsqOM6yrDPv/e3tDZInohhjSomcD96HUBExrAYHunAkCxkCgdj7itgqZDS8eROVoJTz5J0pFA8TW3JrC10P1CCJZNFspmqy2NeALqFwywxFsrHzV9fXmtM0xjmnL/cPWvr80qsV0b+aIpvcph8PABUApDh//vjrz3/9m+Rkoojo2IUqEOQ0njXPCErIzgUiZ2Z9N9x/eTCE8/N9225U7HQ8/vrzz8O5895t201bByKvlyHlv/4ozevCh6iYNlXY1r4LrIQiYiLLkmLC135mVpzaitfGC154+aWOIJBVbIFgkfVQmUwAIhbw3JWQSCJP5HmJ3xzn+TlPU4xktKHtt1fbu/0mMLoL/WLtIxbVDBYkfeliyy1aPBAvhx4iEKMZzElJpi8sPSZXxFsMApCzZUMBQiXDr7gXInAe9PGcHJij7F1mToyTp7nyuQm8a/zGz5QmyWqqc/88T6eUB8TkHVUV1RVVgb1jJueIK3bbEJg9KZ3CMLpJKGeNjcXaQgs1EqKKU9J5ElHIZ4+prfyUbBRJacoax7F/Pp3G81PfP/rNFsI7Vx9Dc3Kh8hjElDWhRs2TZHdJdkIER+yZPS1ITElGB0RYPrWLi/xLEbOaxpawhXUUCZe2GtddBn+z0l5PCH776YsrzFIfAC0eFC+AjK0sEAD4rR8JEVSemoqbipjJVnueV3PzEm3BimzKgJYkzSnGFOeYZs9qrCbl0FUjAEMgUUQEZg/IS7bsqyFFyaJcnDkWjpqqKuICQi92ciuMQFTYAqsfAJS0AXCM3r+mKV/evcLoKkmNq73y+ooKimwiKgJWrhdyqf7Lz1vxwjDRDFqCBMAROlSGrJIsg6XJY3Q25unRhGQ6mUyO2hCQW6JkDY2WhpQgqVduudk5DT5UnlmRlMHIFMv0l4oi6rfZSaBqihfL+LWcxdX8+OVjKWKwuJ9gSnmaRhEhZu998A0AzjGej+fPnz99ub9/enrKOdV1Q45jTn13fnz0xViBmdBeJlnLO7ZmX15W4+ttan0C9uoTez23WKbiZWNCdMhgKot40BDJe0fIRFwFbmrf1KGpGkCYpiE4airftHVoGnJuiv39w/Nf//xr13Xee19VoWqcc0zATJVn77n2PvhQh+Ccc45xHE7DGFMUSWCZ0F2AuuCockSEYJgNi9ZJxJIsFs9Suu+v41/dOhou/zNgYDI0VMQldcIAwBz7an/D7ExRlY7Hoe+mFMdBEsgcp/Pp+ePh6vr27s3t7dvbu7vd/lDXLTsHgKok2US1OPStWWNYdNcv77ctYR4vsFv5FhZTgOWeW21dyvj7BVOOcf704eP795/fXN+RSOX59ubK7Ic5aprH97/87dxJEnI+1Jt2d7evtjV5yp7MexAdp3kY52GOKQkqgCxuggtJWFRFkkQA9USeqfLOEZgapkTTjDHPUsRfCkDsKmEvzp3756fU+fM5f3nmUA/ROOMVcEU0TfM0je/7Y/8Q3rzZX9ec0005+Imoqmqg7un5qWa/r9vtbtdutj6ELJo1XrqzsjYFIJmqLPK/bIDOb/YHIVOkaU6QBABEXmwYilhBJBuAGupijbOgEtnUVGJMZkrkyonJ7CpyGWmaUtcNnz7fH8/nKWUzK8zzIrzOhnH86f/0H7+Dqy0ADEP3T//H//L/+t/+NwbkEnbqXAgeNcfxlOdRckak4Ctmj4jTFE/PR0N4+Pxxu93WdRvneH5+mKe5CnXlnZkCYdF9vOZpIiKAveh9cLHAY6Smrt7e3qAknScS6c+aRFdRcSFOmr64p5stZrYoRmovCZCIFtBaZ63HxqNj5x05Ju/IETCTJ/RMnp0n8syeqXLEDIDw+HzMpyFPJyBsd/Dt/t0Pb7e1I8aFl4BrKQ+LMAOWu6TwCV6TfkqBpou5ixhMZidnDeWaMoZAwaH3ojRMeRaL4EAZ0Jhfz2BQjdQckhIpMRBnsrEO6fbg3uz5zQ68nPovH5/7PmU7DfHheJ6TbHeHu9vD1c2b7f7aO1e1tL/5u7oad4f9frvdNu1hp8TH/eHcz11OI+pcO9xvGnJuEM2WGRJaJApbvPq+2u6HfOpjEurPn47xU/d0Hk+nuT+Rbx5OcPecvvsHt7+LiatsOM1pGuI4jinxS0YPomeunHPOCMEAtYTJFTkMAIOWkdBLrbi8y0hwYWys+/tLjYiwGve8Trop9Oj1pHs5N8rlszX6BQCRsUzzDJD1ZYZja8uGWPy8Xl0VwLJsgmNiMlvDz9XAQJfwFAAFU1JDMYk6j6nrhvOpa7IERxzVkDC4kgVDzjGT55IwYmWfWsDwiybUjBbXsvLyDUykVIHFkrQQOpiQiEPwZcGWibuqaJaiO3GvJHBLLWhqKibJVEG5bOEEWgL4FjGqSoqzJGEiZmbHSISogFhwcVIVzaiZDAp268nQksQ+ztM4q4jsKnx72xzfbnO0XY01a+2xdZ61wXmGeB77fj5Nkwbb3JAn1k0g77xT4EktmyAiIRtACR8mfplX21pJ2Dq/gdVi9wUQfCl3C9JlBEiMwzx9eP9r33fO+d1uf3P3DpHu7x8+ffz08y8/n88nYq68B1RJ+XQ+n+BpGs4S56YKlXfeOUaUxTkG12dzIee8asFeMJiF7HR5zq8LYSJqG982fo4ZCapQIcCco6ogGpKVt58Q6sDX+/aw226bvZl2Z+e9vbnaX+12ztdztmMff/754//z//7/+PWXX7KasXOuQWIAK6SWqq7bTfvm7Zs//eEPh6srADz1+f3nh/vnx74fTQTRCMwEJJHEmEwA1FY6ecy5EPVKPLOKxSzzfP1aUu5gHftSGUuYuWJcg1Q8g8t7RUTeu7bd3d2968/Dr79+Oh1P6Xye00yQNA9jz1331J2fT6enrnu+urnd766aTet95SgALHZsCGjAuow2i6WNLbf/61Lyt1O8dY95tV7okmIMAAAqMo/d1D2n4Gvnt42n231VYddPfZ/HfpzPKQlYqDxM1mSAGj1jcOgrU7Npoin6mCgLZNWsOWcogdtqqKYqKAkRKu9qdA2ARyQHGbT1EjVnUC19FzK7zAwOpTXazJ3vyCxj1bTka1cdqio7nMilJKCxTuonx9OE66YsosMwHI9nTHlT1Y2vBDCJYMqLanJtxMtbJYAZSpCBsYEU11nvDWQWsUnKz6suVj62rBGzEs4FIGYpl+lfuRFVJc9zVDXnHDMTsnfITDnlcZy7rn86Hu8fn7txENES9yg55yxR7ObQxpTKa5nn8cPf/uUv//S/N1VVVZXzvrQXJjlPQ5rHFKMZeh+KgV5Kue8HMzg+PR4Oh9u7NwgsaVbJUsjW/9rq+O3j0n4AmIFj17abq8P1dNtLKqQfWnxwmbD4uy3cRiRa5kBITFXG0FwsaQigIt3XeLetdk3wzgVH3pFn8lygcgpMjp1HckyeqPJMCGLCuftE86A9ANa42Vd23XLjGAEXkeGrZn8hJmOZF6+v2C6vDQHMUBEBCFVAUYNlb7kmrRr2bcW+yoqBoZ+tS2YmSOZIf4NdMaH3WHl0zhiFNTU+72u/q6zCWebj8PTx9PQ8JziO8vk4gg/fvL1tN1fONQZelBUCV1tINAvNGbbofaC6yTuFpg2gM+tUOdg0wZyrxJIJYSRMaDln20bYnif/8HQ+jzEd45RSHpMMKU15zkP6AP7q7vsz5qkYTJCZZ2tqV9fugvWL5K479d1pv224cgAgqxEvIRaP1q/USfBSplBxuHwZHi28E7gUMeuGA+uxsUpkLxvSaseKAABauBv0AlwsLXSR4L0g/cu/oq8nomaWU4oxEppjZ1Am22tWCRS0lM1I1FLORGlO2o/D/dOxDs6RN8MxCyA0lauCC8E3TbXZtG0T2gr9quxHwJfnYl9VUUvyxOVlg0FxOl875iUjBUs0XjHuRFHJSV6E72WBISCIpDnFyQBBHa1sX1s01FacuSxFE3U+eGZ2yyUoE2FARRRAMTTk4ooGjtRSitMwD32MQEzbjcNDdbpp5ilvqlLEQO3RZ7Qsc+5lfE5dnzQgs4QqV01mAg3ZaIxZyId6w74CxLKD/iZxouBRr1G6pdN4KSaWIgYv/4CBiETkfD7d33+RLNvdLokRuY8fP3369PHzl0/zNDXtBk0RIad4en5KKffno+S0aRpG2G13oQpghaFkLxD0Opd6tXi+vr1X6tErYAYAwDHtd81uU+U05yzAtNDLmZkxsKtCVZiBm6bebrfb7a6tN2BKKN5B226cr7JSzKLAAhzVuimexjmpMfdM7Ms6YedDVbfNrNZsdsa+aTZZaZzzNCdTYTAGBVPRbGZoRoxFNl/yWUtwm6ikIq7JEnOOcf7a7G7tTcoFY4ASmUpICzpSrCbUYkym0Labq+ub29vb5+fHoe9iSmakqhItpmnoz49P958+vN9fXV1f311fX19dXW+3h6ZuQ9UE79GRAYpazliyZhAQmXBVmhS9VTnN9fUY8NUVw5cwv5cFQwSN18YnD33FTb3zbbVtamg8OjlThga9AfvgQmUVjn6ONJNnV1ctAFYp5azLqJxUICVNqtkMkNC5ohL17CgEVwXf1CEwIqikOFYoya87ABAyExN7T1yFsNn4poIQkvNEDjAQeiIXiDZgrUhElODgzmW/rsZ5nn/55f1ffn5/aDa2h7oagDlqDqEiKhaqCxWxsJkUUQBMAW1JJjaRHJOAmFjZBqB44JZgz+X/VIoxsVhSzVlLiVQ8YE2khKcH75mZmD1BJEpxfHx6Pne9AZJ3OlKSLBrBTLOmlKeYp2nWdTQmMZ0ePx8//4KHA282FIIQJVGRLDnlFFOMIkbIgASGkmWaohlM40BmN1dXofZVCMUqtrxSLSaea2sEAJdB9XIUQWmTzMyyKhggubrZXN+8FcGUQcHHlAwBvC+yUYJiF0FExEzeIWnO3PjNFdGitiC0xtmbjfu7b6/u9hvH5JkcoWNwZEUPyGVcD0BIjBiYzDTG2HlrndSciLkmYU0oEdABIhRH02VjWtF3gIur42WnWowiy+szRUAqoFrOmhNqqsgObWh2ta/qbFg7cl2MKSYRIvBUXWLnEI1JgpdtS1VARiVTp9AwBgOb8zBP8XTqT11/HqZkz+f5w6ejazZ3t28Aq3FUwpmIumEa0nCanoYu9l2t9oaQj/1pmpJzrg3+UNeNJ2abgWYBF/hwcG1DniCO+elpZHyaz4O4wWkMVd54nuqmC9R1cI45xlyHerfZqYABKlMD3Cq9vd17v1yXeZp+/flvH788/+mPP1TVVantLqgWFkhkUTC9QHdltSCsWBcV3IXKlYbLYlovycspsawxWn9wNfpf/DzNLicb4sKlgKX4LP/e1vajcGLoAr0BSJbTqXt8PLZ18N4jgimu9DzRMtEGZ2aqkGVUzXNrp274+f3j+ZTiCMMs3TybaROwqf120+6vNre3hzd3u2/fbA6tZxQEkCJVMP3N0VfeE8KXl20GCLY4JRZ7p6LHxhUeR0MmUenmWd0sopc92TEQ5Dj28zCQc1AHR+Z9ycOztbkyBUFLBBrIe4+OoOiDDIDBgADFBI0BFBGZkYnIYkxx6KdhMPDBV7vahV24OfjRwSZg7aBi8ySmMcUxDud5POd5zpZtPCsHIh9jMuQodh4zV/X1m282zMykhgbKRd13KeV+Xw68nskBvAr7uFR/WCwGTG3o+/v7e3Z+GKPz4fHp6Xg8pRQNLKfYS+66c5rnvu9ijIgY59kzS47ffPPN4XBVVTURLxGt9lIsrUXjpUS+PLULxmaF2HT5pmd3c9je7Nvz80M/DBondoF88KEKVdXUzbapQ/BMXIcqtBvwdURmwrDdVo6A65jx3EdAajbbb3788R//x/8p3Lz5+Hwe5hgYW4cHT4FIDBVQCdumEZni1O+27aapD5tNitNgNqcMiKqWRETzME2l2kAi711VhaZpmENWTjmmPKeUU5Kc5fVLdfgi4gcrwUGgBZNZL8xysUQEwUKoNpvtfr/fbDbEZFbE0SoiojLP0zCN/fl8Oj0dnx6fr66urq73+6vtbrdpt03ThlCzqwxYxEopwszOMSGVO13s9bjWXjCzl2aUv1o3l1dCdGj99Za3tTaVsqvAqsO2ud5trzft2GcRAmRfsfPskBiJhBz5ECpELvECRTUtcZQYc0qgVmxQHDM5gqKu9Ry8q+vgCcGy5jRPk4pcxluIxLioJZxjXxWdgSfnyQX0Hl3gEIKviUglqwpoutpVbnVUTSk9PD5/+nIvVwKAItYNYwjBe++8Y+Zy7hZGLzEaogIt4GxBt1RzSmpiyzy5XEQUBZMlgKJMlBarBDNZ8UpQKCW/qhJSgU4QkcF61TgNT8/Px3N3GobTMHTDMM+zSi55UDnpFHPfj5eNzEwsjpAGp02Fdc2GIJPMkDOoEUDxxiotsSkYUeGyMSITOudCCE3TIDsFIucV8WWKcLljX4OqAFBsc0Q0p5QimDomBXZV2+yud5NkqLq+E1Nf1957XJInC8fFMRM7EMkwjdXhltxaxABUpIcKvz0039xuXan4y9iebBU8r9MBQ0Z0RCoGZAE1kFUO2GNwi1uhmeHSLtlyjtnLYn+17i9jiHI2ky13KwHgGlWiZOpRawcVq8kkc4xdPz6P/XGcsoXaoTCAXH4zk9UBdxu/aRyTOSBvoWFpAiOkeZYp+mibBDJLGmIeR/NoWV1WnqMhJjQ9d+dz//x8etC+G+vKOQvOn7rTNCZQinVdX20rrBAgG02JkIrYC5mQMEmOcZriMMA8tWx+E1wdYqRjkx5PRoOvt3tzIZcAVyrUZvbAzr8iKcf4+ePHXz983rUheNpsd54dlr4ZoWiVaDWS+H0RQ78rYr4yu7vUlYi2UFGXPdHWbDUszDkoyZcldB7hwqgqbF56dXmpfBUBjOwrrlJxUx36Dqz22SMiGi6zThNFVVQDZ0YgIDJ5khy57+L7D8cvNHdn7UYZYlTVyllT+912s79qr2/777/bi9zo3WZbs3ckAqpryM0624L1ZRO+yqi4JGUVTaAsQTEAoGZZJeY8p3w6d5++PBwOdz9898ftxeEagdEIldEco3clEg6Ell+spmKJNDGII6g9Bb9wl4scFMHQFEwJjBmMCkoKiCAgmqPEqIDgHaNWrI03DNA4qAkCA5oM4zCez2M/9GNMotk0z9MM55TZqE9i2SCZ2+6v2SSQEaqYqWVWfu0TU0Z7tKh3ANaiZZ2avdyzy9cNDEEMgMhXFbEbp2maT1mM2PXDEOeoZoSQAKJZzqnY3WURETmdzp8+fwbEaY63t/3V1XXbboruurSa5WhU+2rne1lIa9W9xv28fIuZdtvNYbepg+9AJU0I2tShbUK73bXtpq4rJlYFIJeUMQGbeAJmBkWbZEyTc8LEiIChPbz7/p1r6dSLyr6mm8bdNb5milLyJJUJ6yocNrvrmz0YBqdX+6brzuMck8KUpJviOM3jNKaUNSsQiWRACMXpg13llxuGUILzr2nUbjF4RwNQNDVTQAHAZZ+E1wRKK4CT976q6hAqdq4oTpERVZnIABHNNI69zGP//PQl+Kptm81ms91ut9vDdnfYbK6qqiXyztd11fpmE9g552RhfquaqJqAUklc+qq8RIQl0KgcxZeKzDu6PbTztGmC994BheCaNjRspDHlaEkMEX3FLnBgz+xIHQETlZ6Zyi4hcZqHo8YZzQjJO8/OMZMRF1EKIjlH3rMjBBPNklIujPlCdls8QhAVDAgxEDvvOSA5BRAiRQLk1UcfAAFUN7uGg1vXnw3zfOoG51zO8nQ6OeIiQeL1sfh6EZXYWF0pi2WoDwAiYiar11xJR8USbLJ0YFA6rkvLCAjIvPjnlegUIsoqRKSqlpJN49T3x9Px6Xi6Px5PwxhFYkxxmjRnMlDFKHY+dyJ5uWGINrW/2lRXu2a/b9umMbO+symhKulysjCTR6Si/ZvmGQyr4A+Hw6bdVHVjSC5JNnBVbYhZrVw2vGBx+LLjFjxMVadxmsah73szbZoakXI2cPXu5h34TX54zJqaza5pm1BX6FgL04odEyJbytFP0+bqzaWIQbAAQqSHiq5rYlUwFZNLe2ZaGDYIYGCoiEpU9KmmxkiBnQvBBw/EgiTEgKS25rUuTTotypHFDsMAlkAkXZJLyQCMGBGVSEgVMyIylfw+SVN3OndPT0+fPt7fP3fHIYP317d7vXOgy3VBBMdQV3y1a672vg7biq0iYFXLKc/jnNrIlW0bsFO2s/onX4PzIYqbMiYjzprm/nx8PD3eP99/Sudhburah6apzn1/Op2f748V0fTN3dvrfbNtI4eHmYXyeO43VWyczqN8+pI/fzzf//qZNX/z5vp2fzhc7QXCw14Og79KW9h8O5n/cp58iSXLkiBOGZoKRe7Ka8kpPT3cf/jlb20gJv3DH//UbFoTRQBaB0hrPO1vipglqmLBNQ14uRGWivTFaYbKnbU46b1IPlRRjQwLrkGX+R+aGqColuLnpRgtHNWCjJZ5Cr6UCwBqliRqnlLGS255Oc5L/oOSGRjpCAqoMwpCdmNv7+chp+l8hjEuQgoHEhw0m7n5ksMvz5/vN5onyzc/vNtvGy9JNKssk65XKOZKDnolmHv5ekFlbM1ViikN83Tuz49Pz+8/fvyXP//1++//+J//0/94e3NXjlAEcEybtmYC9r6tfQmX41XKK5pyGiHFgOqc31YcPJewLSVbaMWL85EZmBX/RwQgc2yMiAqSJfGcZtA0gExk5lErhoowzen5+fj8cIzDlGaLEDK6KeWcOu1lTjCMkUM43L4NjhrPTSBTyZpMEyujhUsFY1rYRLbK1WyVKRX2dEl1wpciRqHoXgHddn/YX9/U9/fn4f7Dp0+ScnGBCyE455TVzLJkBXShYl+pCrA7dsOc33/8fH+4uv7hu++/+fbbd9+8a0OVUioRYuWPIhCsGN/LUWkIQMvTVjSjSx1DRE292W73+91uHIZpGj3ZvnFXh/ZwfRXqjaKLScduyhJd1OBdVTvPtPrOKAISMhMxUjKboKoOd+/2t7um+uPd9tur9m5TVcHl5fxWAGMExxy8M9Vpuh3GqR+mbpy6KR674el0enx+eniw7txPM2SRnHWekR1XoQ7MVROsDqKaRfb7HbN7VcQsMtSCxOhKRNeVB/TyplxuflvzJ2OMU4xuJi+0EMt5oeqqSVKdB+sRz0cX6lDXVdNsm82ubQ9VaIlDFdrN9rDbHQ5XV03bOM9cJHu0CD0IUIEUl9ViZnQ5qu3rzhsgOP/m5hZtYjJij25X+e2mbgN5ElGxrGYA7NF58t47ZgKPwACGyMxu2b2nfuhB40wAnrgKFbNDxhIiVs4SIihuOGgXTtua1mdFK44GkE0NDVypPIIBFZ1iBjDTBLpq7hAQqgsjpnRjMfXTXE1R1zAaMMCVyUrEdFHcMhHxQkJbQfMyri7KdlyQbgSgpYgRK9QpyQIGzOS8KwQy5x0TFXodEilAEknTNAxDGgabxqnrno+n59Px/nSasnBVSZauH0ykCZUpTlOa5niZWXrvb2/fvPvm++vrm+12V9eVmvmqnWNWIABHzjE7xx6R1CwlmeNsCsHzpm2rpvWhatlx1iQCzpVwTS4L1wAKtmCL6mNZt4aScozzOPTn41OhRXpfi5G6yrsqGHI3aGZft9V212435H0xXueSfUfiUsw8+nqDq+8FgTlLaFKRVVTOKUETW7kRLxPR0qiX+d5C8kQm9uy8C8EVb3ayJe/r9XLGdQZyGT2Uz239fHWGtGWyWogzRdVNTEyQYjw+3P/689/+/C9/eXw+Kftmu6noKvYb1bi+FvBEFXPleFtXu41vAjMAiOYksxujVWStd1fen8megh6u7BoZzO1HcZMgYDEmFE8YmKNZFhMgcFXYEPTT/dNz6vrYnc9vb958843Vm8cBM4y5ex5d5yD25/Trp3T/eTrfHxt2G1/vmj0c6ro9XPmKr9qghx4PfabzcfQsRAQoSfOYchUw57zu1yZp7o5Pv/4MdaB3b+/2h52j1RIQl8STlfoHFzDlUsRcFGJrahIBwqJ2XcyPzcBUCuFfU8pZsuScUwIVNoMi9iNkv9SpxMwl35Fg5bSsC/VSbyt8rY4rd3+GnEScrNVNcTU1AMWFggKqpEqWyBitygnmKFPUboKYCJnAKArGxLNSN6jRIJquD/6w5f0mMKnMUSWvKAyspczrt+ai/sbi+bVUbWYiEnOe5jQMw7E7Px4fv3z58sv7X//857/Ncx7H8dWLyWhWeYdQsXPBOzTVrJYTqBKB5JjGfh6HlKOGSoMrrvPLFIYMZDUFQwNesB0gJAZx7JhBMU0ZTOMMmGaTSICeoGJqgxszp2z9LOMEKbGQT0BTkqRzFp2idP1UNe3+6tYxejKPqpDRkuns1NNXmUMGaoB6yc62y3v1m8nF+qkaJlnYqVlUDKYYT8/HOE4IGKqw3W4RqqJvF1MDcuy4mHwjpCzx3MU5Pj2f5mnOZrvDoa6bLDLP8zzPCFhXjXehrN+Fpf3Vaipd/1dQQMGbm2Zze3NHAMMwOM9vbq62+12ovICN09SPaegm0Vw5VO8sh8RkKrrqb0o5z7D4f26DI++ut+2bw/52v9lvqhBYdFlQuJR4RqBg2jb1fr+LSeaUhmnuxuF4Pj8/7x6vd6dzN0xpmvKYRAyRHRIzESGbYRZTyIj8+n12C4sCjNCUCnfKEFaEA9dXTQDoypQhpdR13fH4fHw+ns/HnKL3Dm1hI3vnyDmHXJyaFMxM52kc5kGfnw0IjMzYgIKv2+3V9fXNu3dvbm+vrw677bZt2iaEyvmKuSLnAFi1ZMipqQoUzJ8UCV9oMQAAIYRv33zbBC9pQud8fe1cC0omIikBCRqAqaComqrk5UYRAARURhW0WedehnMeUp6doSeuQIgZiJaKAJbytzCH0EQNsoFYccJesOeyK4oIIDASITtiM0hZkokCSEGa1VTKmYfvbm/fvjm0TVVulpRlzhIVHACXagUJrKS5GWguXNACzxSPwpJsVlz9l4VmwGAMBXxnIlYAEVUwNZjmeD6dc851FZqm3m037BgMFr8J77z3ZpZi6vrh48cPw/HkJM9D//j0fO66bo5cVe3hSs2GYUCi65sbU5y/PL4kewFUVfPdT//4NGrTNKGqnGcFaFJSKX0vETsiZlccb0GyzDmqCII5phIZX7vAonNOgkh0CQNaQPyFgF508VIwftSURHKO0zyczaBuNuxRwQkhQM5oK227uJEHV1UqBgiOGBENKKMBRDB61d4o6oSSNccsQpqXSc4aOP0a5FxhMbZLtcnsyVXsAnlXximX9mmtYl7P2O3Vfy0DqhKKuexVxWAPDITQEHEVSnmJ+fx8+vVvv/zv/+v/cupOd29v0a7O4difriXHyy0dyAUgyIrZNqGpKzdNoyFV29Y1Gw0NNHuOSZth4pM2Y3srahF4GDWfEwoCsau223fv3tXeP1DlfNjfvjvc3lJw5jb/5f/426f7D0+P5y/H8R/dvj5Uj6cZYAjbQbBLw/nLfffnvw6PD7NOuXFVitjProPD9bu39eFdU99k202jP57kPA2A0QDNYpY0plhXLsZlNMaETeVR0qdffw4Mf/rTT7c3175ukGklqyy4C2OhoVxO6kWdVIqYVaaJhASLCwosidsiKedpmsZhHMdxGIZhHKdxnOdRc3IIYCoiznG73e4Ph+ubm81uG0IoToJmKAU7xoIer6yaUsHgqy3ZwMRAxHI2Ynuh5QAuHKiy8DOYICRCj9QgA5PzzgVGEkJEUCZxCA4piEnO+TzAw9P05b5/d7MJlCVOF7h0pfosmq2lqi6BegBmqqIpp5xFc445TdPcDcPz6fz4/Pz49Hj/9PD4cH//+HD/8HR9fRfjssaKP43mRIieHXmHSCmnLDnFiKDsnMZ57I7H41PX91UIlLPtD9437Dw6hwBWEjWKaSETMVnxcvKos3fOG9A0TVkhReMcVTIaO4LGu31bO5K63prbDJb7nARYDKKUSVo5FHGVjCtoQolOhSyBRb9y0JY3qVCri2K/AHJ4cehe7vwlms8WE2AwzZL7vv/0+fOnT59Op/M4DEN3nscJAJrc1CH4QiMogN2al0rFcZMoxRRjHKdpHAdD+P77b7fbNqa5684PDw8I+Obu7Xaz4/JevSpiXhVTpYR+ZbdmIGohVN9++/3t9W1KMzFsm9rAnvrp6XT8cuyHaQaFisx7s+BjrojAZCaCylfOeUYCU8uJkEJoHNaMLozpdD+n8/OX4JjRDIsClB075xyTI2TGpa9n17b1dlO/ha3km5y+n6Y4TfMwTed+eurm524+DvN5nMdpnueUsk5z7vrp3EX5Sp20GgAst8riqbMYkKzW1OXGBpE0DsPz89P5dBz6Lsc5zvEsws555ypffEqJQJHJ8WIYkM2ySNS8mKoliVFjFkL2oT0cDk/Pd3d31zfX+/1u07RtU2/qdlvXm1A17Gomz+wWN06jEsNAy/T6ZZbM7LabKzScpx6I6801UpimHDUqipABUorz6XSe5slAnKO6rrxzZeGoWBaJMY3zeB7PKUan4IkqF9g5cOVoK6Oz4hOwEkoBxCyXrhysRMMSloMQaGmclxwgWUjYoKi4uKAhoWNigxdTtXIziFkSSarmHJfGqAz9Fm2nwRIsh7bMrqAUMULLsyBENCNEQ1akrJajjONkCt5XCiSACmiXLp64jKzIOUAStZzzNM3nc3f/8Hh+egoiaRqfj8dhmhPgbrM5XF0778CMDb59911O+dRPr1Os2Yf99dvrN533jhwX61C/eF+UIqZ8JCAyIFUJOYpkE8Ei6DcgJCAt8UOES1tcAHZYIGYAw2USaYtOsrCZJc5ilnN2CuAcItniMsEgKmAlo4ycL4rRYllvZgC0wpTr/aGa50ElF/68LrTiC6nsa3Cw7GxUIo/wQpfh0ly8uDWtz/Tlob/9wvp43RcuLsFQFByACIKYAGbVSXQWm7NOInNOWVKKfXfsutO95kU1Rgg1c+Nd7bn2rnbOM0U0RWNP6J1TY3TEQlpxGwLlBlhlmvMD0qAkCSOZsrftoXLUqgQAcL5lrqqq2W6vDle356vBDK25TeHGuT37gRGrCjHrME3PZ3w626kHp5VZDR3JI6Qt9BXfVK1z2wGaXug8j6dJtWCLJqWTTwKXXRkRgyMCGbvT8eHL05dP57u7a+9CCEWffOG4LJZ3F4dCXCxcLtKbpWRY8AZTVZCcUpqmcRiG8/l8Pp27vuu6fhj6aRzjPJskxoLESPBhd3W4ubvNac7perPdVFXNxEhc6lkgNCQDgoIgCijib5AY0AQ6g/J6hS8UjIJJpyKwRl1G0wZZQQwV2DiAFasFxaXCNRPVKDpMcnwe7h/4yxdnyWuezKScfEuE0kqUL4YLZZCpaiK5EAlUdZ6mc3d+enx+Op6eu+75dHx+Ph7Pz+fu1HXnrteh70SW4lIknZ+fnp/P4xwVwFUVswdAEYnTyAi7TaOapvOxe378/PkTIuZxGK9v22YX6oZCYO8cM4FBSRgmIBd81RB4KFqVOU1TnGMWsGlCJ5rME7IYltsTkck35jYzxl4pKYqoKoIpoa3EETMVyTGOw0SGmiBHjbO59rVbN8LivSFARcBfLuVF2lXa0kK/L5oLlTRO4+PDw+P9l+enp3kaTYSKOxUUhYahmangMlksPVk5GoARjcAx5pS68/z8eP/89LDfbRAx53noT6rW1BUxNnXrvVvx6AvQByupV7+KsQYAQO9D5SvbbEQyoAamcZ71PE1zGsa5nyZEVAcBFC2jiJqmOABa5Rt2nohNsswDITb11vuanUNiKGwHJiIAs2IN6r0PVVNqescUmH1wIYQqcOXZO3RIROQ4tI2rgq+rumnyto27YTr243kcx3FOUYYpOaJNU/ErGpkD0pVZSKYOFBUAC4uClnHwCspgnOZPHz98+OXn8/ODxCk4dIRDPxjSZrMhYiclLDurGpgjxoW3gsQc2GEQyz47jjZOMcb5PM3zeZ5OT49N0/gqeOd8XW+2u8Nue9hsdu1mt9nsN5vdtt1WVeu4Ig/AAmiADtFd5K9YYsOxYjJEYvRmKJJjyjmLmZHHbpr+/LdfPn3+NI29Z7q9OmyaWoFSzEM/pJyBMItMaZYcOZtDqlzwVXBNHepQV1UdQgiBEItfHNgSmZchZxUtxvJFp8LMzmEBaxZ3tcX3AwjIFZ1Utanbpm7rUN9e74J3l00ZmQ0xSnY5g3OFAodQKDzOuYtHzrIbm6ItBuOABEoETI64BmRAM5Cc5ymez93j45GQ7u7e1HWzv2JCqKtQB185571zjtkxMYtIXxwWp+n5dDyeT93pxDnJHMdpFjUOfrvdffvtt9vd7rDbMeC3d2/7bvj88Oy/yrUhcjX7Zq1g0LCYLFphxAKhEZUcPCuckJK+YAAGC1yhy55/yRkt+PYKk9oSQQNoYoZIjoicqnOOAItrelJTZmbnFCBUIdS1GIhpFrECoiDA4hZzkQQtb/JyvEie+k5VEcGxyyAgAEi2JASs2MnXo6Flf1tF7YsF33I0FdKoXqrii0oFflPIXNCZlaO2ckaVwBgRAKNIF1MepygC9Wb39ttv//4fN6enKmCm+dw/d+fnywiGEOqAu9Zf79v9rnFsBMlxFjOVMRsDmBpO2cYMGbwxG7HzxKCOmrrKaGMckC1UdWjp+io3aerTmDs9yhi9wh9/+rs3dz+69lBtDu3+ih03+yHwcN1OcXg8jo1VG7cZKs0OvCOfXXW0q/lUP3t4hOy6eUboxY+JlMgwAAIbEdrG0W57KCFisFQhwqgVo+Z4/+nD9fX1frcL262AXUgnBY8hWm+xQsW/MBgQABQNl7TSnGKc53mex7HrTk9PT89PT8/Pz+fzueu6cRznec4pQfFIs6KZg1BV+9O+74/z0A2n28PN9Xa7q6s6VLXzwbnKuUDOIXExmUymZi/ONGWloM2gExqiyauVAmaAqIQZsMAwQMiIPkvGnAAow0LOVzVQAgmqLlsQkZzHeZauiw/36dc6DkcCTABawoFzKokTknMhleaULUlOKaeUc0pN07x995adOx2PHz9++stf/vL49BSzxZSmOKc8Z0kqivaKDgwQ5/nLrz9/+PBpjFGZq80GgOKc52Eauy44ent33TQ+axqPzx/+9ud5HLuHLzfFnmO75appNtv9fh+8V8mSZc7ZV/Xh+iagpllOp/Pj4/H5dE6ZgKkfEyvOuCNyg/Jpzk03JZGMlYVt5DgBzqKSMxozkmeHtoz9c0rTMJyfH/PoLM+kiVU2fH0pyBAK2SlL1mymIlY2XiMr874yUFTVohIFQ4B5nk+n4/39l+PD/Tz0qKkKTNuNNjUSOe/rEAgBFo8vQgPUvEwOQZmAPO02TeVpnmeV6eHLh03tr6+uPQGTphjP5ydAJQLi+vW+YeuOVKIKbaHuLXsyO8eOUU0BUVENolgUAPTOt3UFyfysOqE4y0kQFWKSczdPKRkmQ1ZizUmmMwKEanCuosVCvLAcichIE2lyNnv2VXvlQ0OuRE8QMzrPzOgYPZFbnhEH7+rAwXPwoSJ+t29ud9WUtillEZvm1A3zT+9umjWfCwCcWbFy05IKhqs7gC6lf7mFDM0UbJ7m49Pz+XQE1d2m5Xfvmrq9f3wepjmLjXMCIMkKIME7bDebuq7rhoNTIjXLovM8dV0nWdqmDsHnLACQ03w6zqejlM1ls9m9uXsnJU/ZDExRlRVAwII5MGRQy2oL3fFl08fyJiAgElIupA9JKSU1I9Pzufv1/Ye//OWv3fm5rSr56YfrwwGAxml+fHicxgmY5jSfuzOCXtfbbdNKqELOXq0UZeb8apFVdj5CQIcldL6weTjF1J3O0xSzqvd+t9s6ZplnERHTEMLuatduNlXtm6bZNpu2bqtQ77cbZnr1Wkp3kFJmEiYqjKMi5OUS4bPszAWZQjJEIFQCJTQmZGYELJneIimVsayqKRUPlOCr4JnJO+eZHSKuaV6GmETO5/M8joYmkqdp7MeBsoCIALgQmt3u5u7uu+++O1xdtXWdpsl5t1gCf8WGJyJH7EtM+kr5KHA6LlXCYky7ZE0AMYiKGYiqGoqaaE5pjFHBSKW8A2pgKqrlNHJErAZFJ28WSLOkcZrHaZrmlLnvzDe1r5wjZHQhNJuNFrV/4e8gIcOifn0hsfBrVElVp2FYebhshvpCHbN/pYiBQiRQVREtNjdlhPW6K/p91O3vHhdu/ddapVUfg4RkBsMUH85nzZrUpqzabLdvv7d2I2lM40Pq52kcdA2zJMKmYlPfbkJVEaIAZOcMVVVmVVahIh0xRSQmh47Ju1D5vefWQZonN07JcjJo2YJRNLOpO8ZTH8MZEHft7vr2av/tH/32xtibKsTBwVD7oT8/teN+m463PNe9opAZGTrlJobDSfd58KRWZOgJnK7kDCJ2hFVwlQuvhndmIqAZTeLYf/7wvt1s9vs9IjgXFi+gC/ODEGBJqiUskOn668vWJ5olj9NUElunYTidnj5/+fL48HA8nrruPAzDPM+SxVSJicFAU1Edu+BTHOepn7rz8elhf3XY7nZt3dZ146u6qppQt6GufajYV+yLm8ZXjr0AijaAnkEFtLzGciYCmBEokgBmNSNipFrNpmgJBSgprLdfocVKVuNss0hOMtZRcqxytHnkM9oU+5jmYvS/eB2kmGIqXJ+UNWcpzB9V2e32QOh9eHx8/PDhw/v3H5+PR0M2s6wCqMzomcXJi3sPQI7x6cP7X/75n5/6LgKE7caA5yHO/TSd+8rzw9ubw6ENVTg+P355/+vYd2noptPTcHVdbzZKYX+4hu9/2O22ZjaO88PzkX1Qs7rdxBwf7h8/ffny+NSha5XCuXeEPNEVkT9nx32Ex5OonWeZ1WWsM5tIViIyABAD0mIJm1KOceq7k+WBNE2dphlzwjz/wz/+BLAt18Us5TSkeY4xpZRiSjHmnMQW0smliDEoKWyIKaWu747H59PTw9R3mmcy8Y6KrJEIVUopq2bF3oEuig1lAu8JicA8qqLkqf/y4ReSPL+5Q8T++DTFGOM4p9Esz7Et+ERBg2BJKymKVUtpe+Episo49n3feXZgxUwj55yHKU7DZCKbypOjMWfVTEZmakZqqMaiktQyqBJKVkmmmjF2iAOYERCzJ3LITGAoM8nk8sDkXJPZN0CAJWgdgR2uqRTERM4576ip/K6ptm3Y1FVThRACMqopqjnAyqkFaAK+OirBqWbRnCVj1iUsbB0wFSZbiSNQlRzzNE7TNJvCbrfftK33rh/GX95/+vD586fPn/thlCxMIDk1dV3VTbPd//jDD7urAxLPKZ1Opy/396fTWdUO+0PTND4EVR36fhiHse9jSoDQNLDZbG9vbm9ubpumLcrrFKcSlexR2GWRnHPKWr0KVgIict4zeTMFUJUIlk1ijFPKAmbHx8cvnz5/+vjxdDxeXx9++sMfQrslZkUGOk1Z0jQ9Pj/+8stfK+e2//AfNrebzWZTNRV6F5p6s9m0m7auKmIWVbPVIqiQbAGI2Ifq6fHx4eN/+fmv7+8fH7a73f/wn//z5uYwR0xx7IfJHepvbr9/981bH/yimSYmpNpXF/TCzEyTaVQlVZSEGdS7qrSUCmaSUQoOw2UvJiq6bjQqJmiIRiaS4pziHOdkpsyubSrHV579fttUlQNbJh4rlFkI7WgKMcbT6SgpHa4ObV2DyDzPjMhEVNXNZnv39s1333///fff7/Z7Bvj04eP7Dx/ev//w9PQQ483lhll5lARESFh4hIXRhMuZQksRU0AlNQASsxRTjjOkbEkk5ZTiGKOYgmfEEngJxdcGARFdMWcRVWSuqoCaYn88PT08np5jyjPwbLgnbnEfKu+d3253gNz1oy6xMUDsaClIzMzIjMWTcxczMhEZp1FNSn8mqiKKZV6Ir8qL5UMBlEF0oYDGFGOes1RqeiHp6mr5SS9maBceDALAUmLBhUJYFgi8EJkNCElET/3w65eHIzzOBqwo2XRzcOTjcI5xSBlSerEiY8Z2E5DNBzTSrJlRPQMjzJItZUsEGdmwhCcwgXfS1HzYW2PkXwABAABJREFU7jxBnKY42NB3514fO3NgbiaaTLsJpiPJhM7T9ub6avvm2z/s3/6gFEyV4ijpnNJJ6vMNf2+HuP1Ou17mMadsCizgkoWMLpLLwglYEFUVEIiTIyQGNsCcTF7Gr2amOWqc8jxKmj69B0BgpvP5dH190zZtiawpj3JEYLFrZnZEC0+UFtxMVWKMfT+cTse+72Kcz+dT//w4no95HCzNKMmBEhWH/kXIpMXtOsehy/PYPz89+hCapq7quqqaKlTsQ1U1Tbtpt/t2t7u6uXv3zbeb7f5Fk78e/WYd6BPYDFYVhkqJZSIAAmMTAxVAgBqwTsp5BJgVrfhW2mXvBhAFVMCkMcVzdujo0FbtdlM5zl+ehofHh67rpmmSnFPMc5pzSiWVUMXQClucHRMgPj09O3ZPT4993xNR3TQALKosmRCqyokktLN/lcyZYzx+/vjrP/+Xv336cJwnbBoDJ9FkyjLOlaP3V7v9YbvbbVOanj9/yWliSZgmiCOF0E/5cHPbVj4wELuuO/3889/EMGXZ7g4xpy9fHt5/eH86T5vdleAWuCbHiW6JPCQXj9O5H0XzsU/nSTJ48uBRzCUSQklmkrOkOaFZmsexRxnPkqf+/DSeT1PfxXH4P/9f//O6xlTyNI7H7nTu+34cp9P5/Pjlqe/G1V/Lyn1ZtLPMXPnARIYW5+l8Pk7zZJplSRR+5eCyhBWWgExyrsQuAhF553Cpv1U1zzp//CUeH758/GXLzmURIOKq6vrNMJxKwgwgpJjUlMkVegCz896/udnqqkyMMX76/PH9p6er3T54l7PMc+y7buinfpwAebfbHkKVxcWc44RllBc41G6Xs2QxMTRiUUkSchzjOKTYi0UgH+ordh4wgBokQFQCAaSoJjEXS1YywmJLgcZLWlyhnkLlqK1CE6hkSa+qQFti4wAk68ZDSvmliLFXjwtWjStRqUxFEaEM7ph5u92ZKuitd65p2nmer24/73/9JTTt+XSqvEMsRUzz5t3bb777/sc//On69gaZ5xSPz89VvYlJpmm6vr7a7/ZN25pZ153Pp/PpdJrm2UwPh6t37757+/bbq6vrUFU5ZzB0GNg5IjBUBVG7dPxw2chijPM8m6KpiKU5zcMwjuM4DnNM2VSHro/TFOdpmqZhCF0/nPqeiadxipKzyhTjMAyn46mpQsxZABRREcFAVJNoVlVA57wjZle4QC6wd8QM5L2v6jq4+te/fTL5+fPH+9Np+MOf/t3+cIMUiMVsdByur27e3r4rSl4ssis1ptfJnGYmqkmVJEMqMys18zW65Vou94GKiDL5qsLgufCF0AAEQDXGuTuf5ziCgfO+CsHVvm1rR66qvKPVe+tFebbQOxCLPw6DqmNGRMk556TE4Lmq/O7q8P2PP/7400+3t7dElHM+nZ4/fHz/6fPHaeoLZeTyQCwAT2l/oVAGFw/V1VJ1UVQVJRXZ2oCaqcqK7I/zlETNrWTLhWZkIgJGhTvFzjnvAMRynMdhinPBPVJO0zRQd1LExlrn2IeqMYxi7BwWH1BkpJVvY4Zq6By+iuRdClYAJi78ZyW7vHflR14WJBRGLzNdRO85pZRzEhMiYufXf1Ts+BBWJGiRPyy/bCF1vmL5vnLdIiAjdERA8xyfz+cPcxoMGlc78qaY0E8YJnVpyvOUXopLIucd5ZxExigOszdsAhVnriKCWcjqVuQg4Bx4z8F7BoxgSaox1qdRdaKK3Z7bTbiqd4bepf4JyPl612yuNrvr7e5G0KsCxGme6sFCdI21V96k9pbqnPtZogCgKoKQCkS1ZCBLCoSiiZoKEikiAuQlg3ApyIg2TbVtG4mjmVWeCXQcuvPzk0OQODla01WKC1GJ2UMq7rO6YB1QIsBSSuM0ns/98fjcd+c5Tn13Pj4fu66LcU4xac6oWpYFFtGYaUlbE1A1i6rLGIuQmZ3zjj2y876qmna7v7q6ffPDT/N+v283G0T3Gk9mtManqpq8R++0eO8XY1xcOW+mBsYKCJLMsuZUhBmFNVP2DwBDNAMTyGozSF9Rc7W9fXu7e3N3Yxbvjw9mUHxJSrWiClPM0zTlnMAwOF/XtffesQOF8/MJzM7dOc+p9oGQF6tMFWZsm6CSwFIVXpqxQvycz8fH979+Oj4n7wUdKEMGjBocPj/Vm22z220Rpe+OZknilOcxTQMQn/rpdDxu2s08T85Xj8+nv/75L0ksJ9ntD1nk8fH48f37Yc65kNvUXNUY1QReJp3m4axzznmI1keJsThOGJqaqKYkOc3jOA1DnvHkKQ0dakpz35+fu9Nzdzrttu08z+WliORhPPbn+77vx7Gfpzj2x+enT6enc4qiIpcEhjKN8yEcdvvddrvdbcjDxAoOvK8QSY20dEKy+KvlDKoCoIRCoAQCBmREYGCWcy4TcTMduiMzPddVqEJVVb6pAzRGk8IwzpX3DgxiSqrKxEQOiR077/zpbptXPlzO6fn58f7+C0hu6sYM5jkOw9B1fd+dmbn2GixATjbPeZjmGEXFRMCkeJuALZohRyiQUEeUMWtE8F4CEwJkNDONaLGkaoLOBrmY9YGigYmJohmWuHkSAkATQhlpYriYXSMYE5IjdoHZM7lhupNX2pEXsTWs8yNEpMX+CXTpMgEJ0Lvd4dDUdc5S9nHvfUrp7rvnb3786Zsffuy6syMiQDMNPhz2+9vbm2+++Xa335PjrHL7ZjjcvNtd303j1DRN27Zt2xCzZJmmseu6eZ5Vta6bu9u73f5q07bOByiUOyuOJmKMxiQii5vGugHknE+n4+l0KnYvWXNKcZimcYpzTDmLgaWcquC3bZvm2VQ/ff40jEOhQogU/Zww0abdBufmLMdhSGAhRWL2UxjmNMWcD7gn126apm2qum6qug5VYLcWMY134fj3p4fHx//yX/+567rHp+fd4bBt27BtG03Ntq03m6quwdZwWFVQE8evarJiZpQ1UwIVijmnlLIPajUEHxwzIapIjHPX9UzusCdmJvXF/FhVRfLQdQ8PX3JOu/2+aZqmqkJVLT55cHGvwCJnZF4mmUTEjtu2vbt7E+cJTGKcJSeRNRzD+8PV1d/93d/96Y9/3LTt4+PjL7/87S9//pdPn95355OBOk8XsiIiEjtyrjCzabF/x6JsRXxBYhCgHOas5py3qmRhU1SQlKyIHSRruQVW74qU8jCMOQlzaJpmt9s7Zig2foiuqneHQ87KoQbCvu/mLDGnum2qqgl1vUVEdOycLkBQyd4BMzNSXMd2ywHDvNttEaAONQB7X5Fz+MKawfVcWx1Ly9HvVarsnVPVlGJKSdWcD4xlhm2EUsjlACvTFBnWkmUdZ9lqF3K5W225NYCAyfsABnNM3TB2askbQcpJU84xS5wlddPQTxcnZQBQclEw9bOfxHvc1M6x90RAhKwoAGCg2URMDRGZHBrNcwLBcbIxuxnaGUBEyXm/ba63t282f/C5656+pJyp3W2v7gg4TTGZZoGcdZjosXPHPhwHOg+p61M3QD/DHKFsj2JgQIJuzbIXADUt42WUWEhSMkwv1uPBu7dv3vzw3Te1J2L65rtvr27fNLt98BznESQF5uB9VVcVVz64MmgtGx2aloFCiqnIVodh6Lrz6dydzqfufO6709j38zjGlCRnVTVYidpFI4bGRGomksslLP5sKjrmpKpLUBwQEJNz2/31u3HabDYpzgUKfS1Ocg5vtnR7IA5ALIgsZjFJFl3afJGsmpRMEW0AxwaIIAQeCo5ZHAVAiLJZzjoixorzTRt+/ObqT3/49g8/fZNl6qZOJFUhqOp2uxO14/Pz/ePTl4f7cZoc86Zpr6+u2qpBgBzjcO7maZKcWW1fNRJgzBJNACAE2rW1SDLLbd3yumSZ3W633W02rJr6fgCLwEgVg3PGqpg1T3Hs/j/s/UmvLUuWHoitxsy82c0553aviReRmWRWNpEkQYAQVaIKoGqggWpQmpagWQmo0kzQj9BMUI00FiBIgEaCuipNOKsSUBJEKsUEmUpmH83rbnOa3bi7ma21NFjm++x7XyRFATWoQXjcuHFjn332djc3N1vrW9/6vtOR0KTMiDqf5+k0HQ8nIDzP9fHppEDf/PIbjv3xPP3sl99UtfPxvNnsDPBwOn/73duqoNLqnp0QRnDKaTaBMpecz4vNoougiJlWq7MuZ1nmWvM8TafD0UzKdAwEWhcpU83TfD4eDoeHxycnPACASH56+v7x8RsVZdJxUKl43jFkXqjmrKpaS8maJS/zNKP2/d3m1d345RdvDOzrb6TkfLO/SV0nCtUTGk8NSylLWcpSSpamuE++KqWUai2H47TMp3leqgohcqBc4lA7oE0YJSUKHYgtc8Zc0NRERF2OBBDB2//h81ebWi4BmZxO54fHewLYjCOH5Fo8ppKnk9Rc65nAlum0zOc8Tct8nueplgWkApgiMYXA3PVDN+6QueZsIgRGrFjfAz8RMgKACaH3E3OCjOBlaxV/PETMlADIg5TWIwZUrHo7lZkTnzMBcsDYx27Xj3v7pMV63UnNNaidy6zgxCSnK4FgS/uQqB83TYcEiJmC1LsYqUuhH6bpTGt5N3DoUzeOYzfcIPWGhKSpi/vbqNDlnAOHmGKKiQN7Kn+7zLVUNSXivhtiTAqxiPc4ERriahmuALlYruYP9rrtWyllWRYAMwUx9SWFmWME5mBgNzf7n/zkx5vN8HQ4mNqwGb2LmIhCCGa6LPnlze2ruxeI+OLuxbDdhOCty6hqpdR5Kd2yxLnjkNlb2JEJENgY2QCRMzG/fPXiq69+9NVPvnp4fCxSjudj36WYwrAZx+0mdYkCW5ULpqgmbNc8Ce97EjMxBVGPg602wTrtU2Ji79b0DgqvbDaWuy+upqBCACGEvuuGrksxphic5GGyug+YG3A5KO4SAkTMfRhSjNPpeH//fl7mKgJNqIaYqOu63W633WwIQEtRqSHAfj+OQwyp+/LLz1NKz9ficoJr0LJy/1oE41yZNVBQAiRSouCRvgCKyDwv8zzXKoAYAgO77Uujw8YYCZk5pS71/dD3CVFFkQFiiH031ipqtFQ5nksWA2JgjmkIMUQDREZi5xV5e4AntMiGFPCKE0OIXYyOGztd5CJeuvrQoLfBWwuwidyFas7TNJ1Op8PjARS2u/t37z8QBlzNrES05Kxq3ooYKKBhrQWIui652KC0dnwkdlDB22GJkRA455znqcxznWc1UGNAk+pCzCiKyyJ5qZfyq4gdz+XD02w6B5bU8Tx0qiEFkoJLhnO201SP5+U017kIIBSNeZETqQosGR7PdRKqGA01pLC9HW9fdS9vQ7Ql7V/lvFDs+u0+hGQitcJcbCp6OOuHEzwe8fGoh1M5nct5LlPVUp9d2QHBmheEzxI3tnL0lbxzrMpzzM9E49Dd3uwZNcTw5s3rze7GOJpKnko2CwgxcNf1fd83zVMnL6iq1JKXsjiMu+Scz+fz8XA8Ted5XvI8z/NUczaVgBZSWNvlkZAvhO9WAVZF5tQl4mBgVSXn0iqeiAgBiCHwdnfz5vPPXrx40XUdEV4vxwDABJs+7MaA3Pq/RYEQc7EqKlbBhBASE5BhqIaLKJhlxIgN0gQgBVQmIRJm6VPYb4ff+OL2s5fb/bZPKbB2L+7uzPR2f4dI2+0ul/JdNyjQkmuIUwhh6Pu+H2NM3qphBqAQkDnGLiZFwFKjGTKkSJshqcRaytiPFySGmcbtZrfbjiklw1NeqiEwKKthVEDJUEpZlkxoBIJoeSnLvJzPEyBmsXhearW34zsKcc7y4eFRDefz0vcDEi9FDoezETGyKZRq/bhg7CkEYkITK0vJslTNClVRVUGrllmXU13mWsqSc5lnlSrzCbRKmVUyWi15nue5lHKZZCJ1mh7O5w+BAxMSQB/1btd1CDVvpKqZ1VqXvMzzfD6duq7/7PXt569vPn9zp6p5fqpV3rx+PY6jx7ul1CKNgJRzmebpdD4vZS5SAYBbGSgsC5aCtQIgKQAThUgxct/TuKVhhK6XEHPbw72i1ayffB8gKZqXPJ8/iDQkBl0NC7jUOi05CoBBY3TXsiznrFlrnY6Py3SU+bRMx3k61Tx7EGNERCEydd0wbPYUuyKubR+ZWMBt4b3x3IiYQiBv6wD0tnRSRZd+dtd0IWRuGiJu6wfaishERGSc0LixaSplxWvZmyAi6PxXFlAzUwQVkSbfa+CcI23auM7zBW86RyIzy1qrwbjb9ePGLVy93E/Iavh4WOCQq5qszWzEu643AFCDaTHMzm2LFGLitgcsRae5irY1FwEZQvAmxYDQcan1fKrb4dkHChFjjF3XPYcBZhtwu2ZvpTMR+cmPvyql1FrdaB483mu1SW2WPlWK0/VLKSJSG5MAiWOgWuV8PtWSj8dDjCkGl2dgpuAnQEwc+auffPXf/G/9w+++f3tepvP5dO7TnrfDOGy2IzOZtZzT0Jqmoj6vys8FPkBEMjA1rblAhVqllE66vktd4hBj2O22hBQjSc3nvKhqWI++T2/evCKmi141XGgV5PGPiAKpBxLE7DRDQqQUY7/bBaK3339/Pk9VFIFSCIkDutvHdJ5OR1ANjJ+/edVHACshcD9ufu+3f3O327RrQTQPDlrBipsCPDgxgQjZXNii/QIgMyKbgTe9n0/T/YeHeZ45xTQO290upNjq9maitt3cAGAIMaVuGMYY2cAAxNshtWrOZZ7L0/F0mh5LrbmUlKuIEYMqADsnBb3Z29xknZEMkFMT4LmEliYidVrOc56XklWkKYGtFdm1CcmLXYDmberT99+/+/67t2/fvj0cTrmKKX19961L5DBzKeVwOEqtXdf1qe9TB2DTNIUYX7x4kWKqtVatCkBEMcUQQgytTZuQI9L9/fvD42M5HmLVDYURlBkqh6wwVzHkUwUpcJljucjb94eff3OPWCJr6ng7lmmhLqmJ5arnuRxP+fFwPk15zmKGKcXAiFAVrCqdsx2lKkGKvNkOd69f3r7ZpgHZapf2sdTIlGLou6QAIJKzHCd9OJWnszwc8v3D8Xg8T0tdRAVgdUNBAzVDqYVIIpK7xjpKaAjYRHERubt0JpqZSukCDa9fpZT6fgCAmnNV1SpSc82LiQAhc0gxArTSc16WZZ5rnmvOIt5TaaXWeZpExHObLsXNsPN/c0wxRA7MTG7KHkNUkXmZVASJ+nHc3+y7YSRml7VFIv/SlLqY+pBSP253dy832103jMRB5OPuBMAQOHq3KjF49djdFudFVNAkceiHLqYeiKvUec5FCDAQxRAjEQOol//GPtzsNy/vtp+/vvvszd3tzVDK/P7DO2bebrfjuAEDV5B6fHw6H6btcLrdL103EDMCnKbldJoIgACHcTsOG6uCYOQVgqgCRmyBMQUypO24G4fNBYkhpn4z7m5uXu5vP2zezyK1SAVTMAFFY2cuuEGxAfnWU2qd54oIRlSy5kWY742oKtaqQFSW7BariKTIROH09LQs5Xg4hzRASBxiiIHQ63AghtWwutCYFM2zlbOWXD25QQKzuixSFimzaY0EYBjW9tJ1jmmtZylHhk6FRJUA7262r25fpNgHisSsqqWWvCzzPBHSdrvdb7e73ZhzvtltEfHN65fb7caxslq9lUxNrUo9T9PD08PT6XSezyoaQnCpxWmmUqfUA0cOiQNTjCF1HGOIKXBkDAgkravSJRcaCOFdB1qxiMyIy8rbgxDi3e3ruxfZyValqqnUUquKMgpiyaXkZZrzMs11OtV8kjKDZNCq6v1aWAmX5Xg+PSCyAjKHkAYOCYhdJgBBidxFpkNvJ4eWfbnOP7kPILEgYkX07UhXozAwosCp467jtKO4Nd4oDucczwvodRDz/v07ijMfMcQdAZEpaTWrAqAmqqAqaq40a76Ktx0XwAmP1b8RW8coGqCRy58goBiIQikm5rsWxxQJyczEASWAVUm/cTNVpZQWaYg45xIYQ0BkBoqIfaiyzI+n7ZCuzMao73sReW40QHRVklUkDBAxuMu4WwdXFRVba+G2RkyAXHI5nU7TNC05l5K9rUeaGJFzF0xq1SoLgFMYEIiYA3OMse+7onV/u19Knr9fSl7m6UxgRMgEt/udlJyCA2geFFoIzxk/rLHX1dZoIqBiUkVKtVqlVIkxcsBVwkfElmU2U4BEDEicQuy548AxRkQUkVKkFI91a8m1lixiYBhC6vu+74euG1ITxsRaKOcyT7NUHfsxvODtOG42m3GzefPq1dj3gTmFEHa73/zJV/mLlylA6mI/jF++eTkM/UeXA9Aakz7648sjoVsdNkIWEQcgUrVaJeeac1PZDsghxBBTjCkEcd0/A/DWpNbUHnjtQHaFMapiVUmMkWPqxgDW9UNMCV0IR4QaASgQBwR0giYhUUAMeo3EiMjpdHx4ePjjP/njr7/7bl6KiMBK6Wt9IapV1LVcVZURRWRZlm+/+fZP//TPHx4ehmF4//7h++/ej5vN2okfVG3Ji4oG5sihjxEJRbTruhcv7rquNwNAA6YYQ9d3Hj0ToQ9sMPjlN99Oh0fOeVAT0GSAoQBHBYoCCXHsxr5/3mBE9Ok4f3icmCQGiItOGadyjrGaWhWdljpN+XiapznnImoYWKipkoAiirEApj7u+nhzs93utt0wQlDRAImBxNCUoBqqaqm2zPXpmO8Py+Npfjou5ynPS82lVufhIzR5MG/pUlUzJdNGOF+1dF3osE0eXK+lzsfH6fg4DBtDztOscylVs4jUsuTpfHyal7mWambMDKC1Ss55Op/zsoDkWnLOpe+6ly9eDMPAQ0KCFEPfj5vdzTBuu2Ho+j6mPsYUYkBiBSSiFFPJ+eH+vZTS9f1mt7+5uxvGDTIDemZAgWN0RlpMIabQdakfgXDJNRf5yOUWXIgMRJuUiOdY6JpFpgSKoB3Dtg9dHwCw1Eo1L6CGgTmm1AdmAyMCZtwOdrMdbrdhvw2B5OHh3el0H1Iahn7cbmNMZOjEjNPjcTnPUCRRsGBAVGpd5kVKZcIUYhyGyKRUm+o8QGIQMEJtnQGAgZqgll9LVT1N55zzZhhvt/uH8zSVqqDtj2ETJtCLaH+j1SMIISKjSi2LGKIiGjAiI1EtlYiRQkx9P26ZSGotOklRCjNyBA4cmMlTM0IKCliqqFbSamXRctaSRcSQQuy8c8Vq1VrBXHDBm4euKG6mtUylnAMCc3RHmmGz6dNm6LduwgMXLfuyoIFroBNxKRkRmLHr0jj0vuA5iwbc8BhgmqeQOKQYT0FFuq4nJqnCgfIyjRD2t9tuTN7LxnwZORUTc02flhs2ZpS55psZmqYiIcgliGHmvh/HYVclS2uqb8uXyyiJaVVQZONgwS0cEJlVK6pgg0vRzKqZSVE1wlJFmIMhAShIQRAiIGLCuKocXHhpgYk8GUDi59JsUwT2ftOIIVAaw3gTx1uOG8VBhL3X7kq6D8Kf/fmfKqSMvzAcOw5kYHVREWmAuapWJyxdak2q3tgKxGyGRbTJkCm4bRAhx5CGfkypDyEhhlWZyuOK5tRhzXrCXyGkwMTe89igHltFzQCc60loGBB7krrk4/1+pAtE5rmF79Yr5IAXTVRAny1AvDJCoDXIwqWD1ZoYqgE68cohKidqVaml1lzykj2wKX7kvCw5Ly5nKaKuBQnm+p7n8+xAzjwvp8Ph/fu3w9A/3X/46kdfvXr5crPZuNZ/CGGVKro8M2Zm4mkathlqokVFSpVSlzBH5sQxxhhCRA6AaKDEyAFCgBAwROIQkAnQRKSWknP2HHSeXQQxazUECqEbh3G73W73+2HYxtSdz6hSj0+Ph8OJmd+8ftOleHd7++Lm9vbu7tXrV59//vnd3YvNZhMDffb6FrCEoByQOd6MQ1odhi83UWHtt3luQ26Pnt+KZpuH4C61olpyzbmoQN8PXepC14WQpCpgcQYPkmcfrmZkpZSci0/XUuoy53lepmlRhRBCjGm/u0l91203HKMC5lKXWiMyhxhTh9562QoafiatGdIvpZTy7u27P/nTP/mLv/prIF6WUkUaNtmeD1DVKnWe5tPhSURS7Aix1no6Hd+9e19rGTeblLomT6yKAMSh7/vtdktEeZmlFFRJXdrt9sMwpBRTSv0wdF0X+9j13TiOMSUm8nXE7YwfD0/L+TCiouhSFjk8FYQcY+GomDrT7sXrVy8/izFdouSl6lw0RBLBknERfZrOALPrvFXRUqSUUquKmAJkrbCaghlYYOi6uN8Mr242L27HyEFyXXI1KUsupYqKBsZxCAg0Z5vm8vB0un84P5yn85zNIISogCZSxAFiBPPRxlUpVqs5MxzY7WrR5dcUVGCtwNaSH95++93P/zJ1m9Rvu3Fk7lSxqBbN5+n48PjhcHyapznnxcwQMaUEBssyq9bEJKUcjsdXL15+8cWbn/z4q9RFZiKw1A/bm1e725f7u5f9uOUYQ0gckyGVKgYQQ5iOx29/8fMyL/vbm83udrPfhdSZokBTGmJvKmxni4aQi7SuNHSA/bmkJGbnRU6LdBjITKQ4rKdVwITNiKEPMATrgiFqBtVYGcVAmKWLwIGtPUkGkvNkD5hLPhBBXmYkGjeb7W63v7kNFJZpqkvVWvJcztO8LEttVHmotchSRQQIBamUokJOWW5VMCMCT/hV2LRqzUXkGVGe5/mvf/7zX37zTeRwt9t/f//hcT4jqJkAoAKAqiveehCzasStBm7YVLCNiIkUaLUQUzeTCERj38fU5SpqbetANNUqS1GkGAKHFNhR50paIxow1Gq1bfwWGc3ImIxJKzWxYvs4tAQw05Lnks8dReKu74ah24zdNqU+hs4zcANrSid9hwCBg5n5Yns8HoloWZZSxxDY8Sdb2xKZOcVu6Ibc11oVwHa7PTPnZUHA+XymoF98/nrcdLW4Q2Qp1ZGkWl0Ahjy3psvaip4XIAS2oaOu44u8ZluXEQNFIFLNiEKtJ9txT4wJkZhTqP2gZQbN4LTI5tzsKLCCqRT3zsuiKprBDE3csBMIFcQgN6pYaxh5BmLcxrj9bN2ZEYliH7gP4213+2bY3qZ+w5xEMecq5wyywDWx9+nxqVo4S1Yb1iCmqIgArl6MCs++Hyoe2JghIjGZucqb4wUA1rp9Y0hzt3TdkLqeKDjapfqsAb56xl5cY7zzw3tv+NIsg2u3t4mLbSsQWkKRuZ4/nF7wJYghIq+YOMel4TGXz77c23aXLyJXjmgC+/ZKzfdy3cRcutOqVwulLiXPy5yXJqqwzMu8zPOyZGf511Jy60DRlc3QdZ2pEuKx5Hma5un0zTAggIrs9/sQQowxpQSAt7cfNfVcFZXWkwcwlapiIpKxIGbmGKMTdzwYZA5m1ayqllqTE3cAQES86u/hi5+wiIAiYVA1JgwxpNwxRwUAA09Pu6578eJln9J+u3l5d/fi9vb29na33+22u3HcjOOQEhMNHJRYiAyAhhiYP5JX8QKn+eqArf/N3YPcdlGaKKKtS8gl2wYOYdxsCJFjtIACpqqe4jTu2mp1toae7iuhpciy1PO0mOE4hiGk7XY7bDZp7AVgWhbXrkZERwKthbotkLFfsZBZzvnx8fHd+/ul1HnOpVZVUHHSsyE66lDPp9Pj/b2KbDfbwKGK5LxM00yES87MMZdaSlmWIqrEtN1uP/vssxjj8fA0n89Sl6Hv37z5rO/7UjIzb7bbYehjl1KfXJuAVkl4U7XqVj6lJ1AptixlWYpWIZDYUbfrGIebu7vbF2E1szRwg1kDJUBUw1K1qIqCmN8Mc/jRs2ZrQjioSAboMWcPLvNgqvV8OkIGNwGuoqXKtFQiEIsENM12OC6PT6fHw/mccxVjjgG5CV7LSllunVitn8tnvxgCQiBgf4zR1OR6FUsxvHl59/jmlWHMFU6PD8siolBEs5XzMh2OT6fplOel1OJgbQoREGstzMRjD8TIYbvf/+bf+lu///u/Ow4dEdaSKcR+vN3sX+zvXnXjlkMgDuBOrmquTHV8fJxPpzLPdy9f7m7uhu2GOblRjos3et+Tl7B1jXbZNfTRxDTgJU4GNViKTKVSjAFQW4kfCDQSQQoMmiITAoJbc1iXgBgMjEgSZyJqHsZqVmg6lTwfD4+sJvM0IfFmu9vudrvHQ0BezlNdMqhepIvMZ4MZqUSA4CoSYE2DBhTMHERxsWN/0dckkapeLwIAgFLK9+/evf/w4fNhu9vu+q4LZ0ZT0NqULQ0V2jPnUv2IQBf7OVsFi10hyOOeZ4DBCDGFEFNQD4cQL8a7UsW80EuERqAGsoBUCoTOsCIQccBCGjZkujp7tqaLj1YAVa3FpJBZJOo4dZwCB0J3xKtNxddLnT5ozC4euCyL96VP07TZbM0M0cVXAJzaCmSGzLFL/dgrEe23eyKaiOdpRqOAvB33N7uNs89znpdlXmChmlGLuiqKXUgxAGBAvrgBqnc86XXZ0tqmycwgygrkEXcM0ToEJDUQraUueZmlzCaLllLKItWd2i+wqdacxViBrHqZx5tFEJqfCqhr1HuACqLWKPVEqwmcBzF84UcSG0LMpeZQFs4zgMXYc+hiwBQpxWe0DwBCn4YKCW0jmoIxASAngNZc6QUqejaRbzq1a0OGAei1naat3FAEQnRjQSIGARVVqbXlHqvSdms5WTVXDMiMpNWD0HuJDRGk1pytVufmVMIqZ1neT1NSLZeLWUEpa3wXf4UAr809a4tcFD4yTxfHANCFQnD9HYRVd4EIE4UQuO86x6VU1IMW17oUEVGtxenmJbureqmlSMl5meeHodea52VCsPPp9Pj4WErxuR5CEJHPP/8MftWx3i4jBENUMm08e8gANCMSGlpj3TKnFGOMMUTm6LirMxmXvJSSS3b2tFOeW401hBRDSikRMYAPIIUQt9vddrtJMey3481ud7ff7VzCsO+61MWUYqDmA+BrHBqAaiNfgt8OabwiUsXLjYHGIWbiikjmuyK1ZUDViEJMqaTEgIGIAxMHMZnKbP74eTGuTTYCJFNjEl+EYpAQ3B2IzWCz2Ww3m81mG7sOmVVEq6Ba4BBjpLaktlhdvbtDGrS2ire4+2u/22xFdck18jwvSy41q5Ra1NQdTNBFMkGJse+7GGPO2UxEghc0Y4qpT7VUornUggDOHUldqjmZVGHr+24YuhB4msuyTKL1fA7IRDF0KYUQLsJWCE3YexiHRDjXzGVxrvBpnix2fey23WY37Hfb/SW4vF6pTUEJzUDExECayD1CM6hSXQk+4MQ8h9ZUa63zND3YUs94Crbt+GbTDX3gEME0Z/eoKWB4Puv94/z4eDydslKrfIiaiGIVX/AuHP310QczQ/NrBDM095FuwNdzq9bN/ubf+Uf/6Pd+/KPTvPz1z375//xnf/j9N7+cl7JUqWBl1TOMoU9xAACpmvOyqiV03bhNKY23L37jt/+t3/07f/enP/1p6qKJns+nUkWNOSSpVuaswQBKrmKAMcYYIwW1nGueljLVstE6W2EwAbHV8QOqmpsYohmbIlKIzg0FNSvgYP168aZFpFSpYkzumw2IhsQQAjXjJ1KAXKs3p8YUGNiZt4gCVpvbnGhVm8+tTuALXgzJ9V3ysgRmK0pmAYnpQlUWXzUDU4fJDJpRKahXhdd1U9u20EpeqlbFippc7mSV+nQ4nKaJb+7G7aYfh3SM52U2UV6JnuD6HW2D8f9a1eakAStk7jYKAJeZIu4d3eKGJtiwiuiaGw0CKJqgVVMVWSZTqcbc4iJCUjOfCZLzIqWoipkqOj/ykpqvhyqZReZEgYFMrMy5orRb5F5HvrkhInFlFlEHv0+nEyJO03lZZtVExB7qqSog5lzVVBVCiGM/hhjGYQSAshRQXM5ZC2ONibYhDYlK5qULS85LKXOWqeoiLlkn3plkZorkfung6tNa6sXS7kKfRwQGZGEhUkVA7vthGIhDD0hVJJd5Oh+W5axlzsskgKKLzy50fgYwcYJQUQyJQCuiESg1ASxxkMiH3MBMqoJx47WhAdgFurNVhh6BRESl5PN0vOc0xJDG3d3+5VdpvNlsNpvt9jpJDoHYgEnJmmA9MDMCGZICijaM32V8rCmUubt8O65BKmvsIgVTqUUAzRjIw/hay0qnRVxN7JztoWbmhSiENfiAtcUSEaTKslipTn9diAQWsKepvtKPJUkuG+fz9v+x37Wt9EFbo9E188bndyKCfkQd8re5vaqzLnDdni/ZlTaZAG0k7+rkYClVl2WZp9Ptfrffbub5TAgpddvtNqV0ibd+mPf/8LL8zJ3SYqIu97YOYfXT8NCkS33fd8wJoOkdqeqS51KqiobA/TC4zHOIqYtdjB1z7LrUdanrOuToJMYUQ5fiOPS77bjfbPbjMHRdCO5MEJhdkMj5Gk0cXd1Q6vlqGrvSjJBwvVu+TRJd6LSGaxCDiKhVGvLIbMFijByYmFCAKtSWzTYaxRoWkxEA+0iaf7QhmIoZjGPf9V2IjIS2amsiQAohhUCA/lJjIDnFvVat8pF5ClKMaegHIOyrpJSWZVlKzvMyJ1a1EAMA1lqHPnYdM9Ht/ibEkJfcXGcR+2GIMRGziE7TXEs1sGEcX77YxxhTwGXTqdS+Sze3O0QULTnnEBgQVKpo1ZrdxxwMRAUBiHHou+j+nSpkQgRRjaUiUic1maxegs+HiwqogrYFxs2C18cD9F8/J62J+EDOMKsBa7AwdtgpMTooBerj5+GRK02Y3yteP+FXfwvCD17E9XkGN4J5fkNK6cs3b24DTEsmsJ/91V883r9NbEvFClQxIEcOKcbIzJ5+SMMNKqe42Y+7/e7mZv87v/3bn3351bDd11pynXO1Ug3BREvOjzGmcdya6ePTk6qO49h1XQx8fno4Hx9LzVp3Vuc6myBVbx8lNMCiCESBExGBt0oxo1nJWZZF5ll0Y3J3NbJqpugADCE7wu7rj5N8HRJzThljYPZSsqk2cQBfjwhFLJt4WxcSubMIoamUZVEhZi8MeM8qqnkln4A8ivBUEz3Hv3IQbPeDPOP3XVOxKlbDZxUyVZ2XZc6LooU+bnab8Tyeaq5SSfXZMHd1C3OU3BUOtHENPiLQedq7/o9e6DUGXsZXwIrCbXIbmJsEyKIiZVlMxDQQgKmKSFMjNhOVmheR6gifN1I4onv17GPi1HGXKDIwiImUCrXZ1JtXDtdwBrxvgWuVeZ7drQIR379/D4AhREKG9Rlrn95UL1SqcGBTMNXj8XR4PJwO5xj48HDuw4iMYCqCIIHBvA9ImFRFUApWoRU+ba32amomF2OM51ujqsxEzdqdDACQQkgcQowjIJVakdBUiVhSR6ETYMRgWsEEyczABIGQQkcGpASmvgwTtjFuja9eCFQ1JNe2hrbVmydFjfHRUDBfEkwk5/mEHIhCnk/IaTAL3Ua0XgeXAUBNa8mLmCIyArqJg6FzctW0iopqMRMzMfSFTr3RW1VByQxdPtULRl4uEzUAYHbApoqUWouIJ6kt77q0BjUfyhUbeU7XARQQVbBWqKJVCsDcRY0Q+3LCST71tbr8LqHDV6uiQ9s37dMvWAMXf7lZCYLB9TpujrbqhULTMCRvjiIEQGIAI0ZufSne+qxm1cz1WqWWkn9TpXp87+t4re75oLvd7uIF8+lhzZJOFZ2qpOuGAKJVSil5KXMuS8kZEbvUbTab/c1N1w2i5PL5IrLMs1vn9EMfwiZGdxoADhgipxiHod9sNv2wibHrun4zDkPfD33q+27suyHFMYXE3lLkzDePnddo0MVOTB0hbeduplZFChg7aVabvSIYADKSMLqPGoDi6ueqTt1CQFSE2hYvMRWt4j0HwIjERoaoRIi0VqIapmOBrAukfTSAlJgYqhYsqoBV1FwgL6UUInq3gMgl91JVkGpVXCXlMrWIOKUUU0DE7Xb0IqNjb+7N5DCSx+yI2KVERLUUr0giIMfAHBx8EvHdxpjDMA6EmPOu1opgTNz1vYFttn0tFQAb4V3EVJqumRlVn6IqtU7T5NEIRgYzAh5iUuKgVefTSevx+KQiV7emYSptWjs6ZrCqDKqXkv3W4srab2HOKjPr2FJK2EXreg4pUnC1IQghMuPQRwAylXHhsddsS0UQsFJqqTXnXGu1tcq8noo1I9rLbtkSXGi4f6MytvuiVc7HpzId+374/M2LP/i9v3W775dlWaoWCBU75B45MZOqlZJjjPv93kzvHx7mkjHx6zevf+/3f/erL78Yxs2337199/b7w+Gp5iXEdLu/Y4qn89z33ReffaEm7775+TLP+92u7/sQ+HA6PHx4S4ygL0Cn6XhSkVqKmRCgIQnG2A3d/qYLCcUCWkQtuRwe7o/3j9PTU33x4vXnn8E4+sUGhEgQCAIaEdDK13zW8ycXOmh89MCBmqgvkOfYjTziuCh4Lzkiee+Sc0cAwfVSGdTzEEEHc7yYzHhZLduCpZcFtJEaHZ9xbRpSswKxWKi2LuNmICpF6qy1i3F7t7/N03GeahWnNrX1oJVwL57uzYhTGkGh6bZeKryXmWJmYuqCe6XkpVRVc74oc0QAKeKUcVXNpZpaLk5Nb4d5sVS1SoNh1iDGFK4wJQAm3vT7bX8TqQN1KrGoiritr7hqr+FaQDFDBSylTtP09PR0OBzM7Oc//8WHD/cI7nXhd9tWE+AYYzQwh+dTTAaWl+XweHi6fwwh/PLn30zHybnCgECEISCREofATAEQ8TmRVvGzqlKyztUUNV78ks2clVANA15E2Jo1KaChmDnXxVUeYtcnHEPKQDGns0gxEyKTKsu0qAAFiMSoAUFWKIrBVHM1rU6RUVVDAYorxqZqAirA7ATs9qL/t5WqFDRLzRVQzYDiaTpzv3/4fC/1uQIT5ukwV3qauAj3HCI2aXgFj0tMTaoU1aJazQTZAw5xaUJVA2FVVICWVNllkl2muyH624s49RXWWEttBVJ1nfa28mwvGAmAKoqAmphKjDJsuY88sHPbfsWuv2451z98Rlk+/nv9Zwt9/Fzsaom8nMWaMj5jSOAUjtUm8DlY8uDJSBFWRrTgSl4zVVGtDnKuSWHf99cQmV0fz5AVqO+Qru4kalVKzXlZlnKe85yXBcxyTKoSXD3P3bWsAZu1VmxyFwagpVQVz5yIEEU6r+o4/kVEMbLDMyEEIhSpWYpjvNA0G33HcmzN3CGL4Rq/8HBPDMCQWhKtnp8bCiC5MSSDp2Ztx3SJooYNODRJLVBWrVKBGtfb1d/RlBQRHREDaNEBgAZGIIoxuKlAU1Rz9e+mDNOYT42T6NmnA8riOQ1c3xdf0ThQjGwWvV1PXEeH8QJ6O2B50QxvyWrDjNbE0oMD8xtA4DUsVWyFZQKzzWbwSSAiJefiBkwGgGDeIqeqZu54j4AxRhM1067W1HUKyLE3hJqfyQqXJ2QdAFzblS8AZTu5T+zJrkcCEZk4hJC61HXcR0sdhhipccwBkVutm4gZmSNzJKx+d9rU/vTjHef+6OHDy3KAbVn5BKUpJb///rvpw7f72zvS8ubFfkhoJku1Q7apcNFYxErJ87xkLYS82wyAdjod5ixSKoJsN0OX4uHh4f7D+1/+4hfH41NgGscNiiLQw/1j6lJCMdD33/xsns/5sOu6ZABPx8PX334dYtj2XKYnqaJSrFYDIUNFrBYo9sNm23dDR5woMFOelvv375/efTi+//DyR1/9+Ke/79dCiH3kMYU+hS4GtzVoK5mLEbUyLHuz5yqF3XCq1lCDzVILViTa514IARBXtW9DMPZacOtpUAWQpnvUSgCwfq9HA7Z2SkBbrFHBK5oqVJQWw3K5P2ZWpU55vj8dtuPAQ9rd7HaHo4jWatVAsXUmeSHo+V4DIIA2xsJlGqx7CgCCKWiRMs1T8ZaFUpalqBmRq3ZHRNDqVmXef9qqLKotiGlrBcrK/vTM1S8LwEt360EUhm47djddHBgDOKkVAZzp6uCtE85VG+5osOQyz9OyLEQkIsfjaZ5nU3Td4JbKISJhDKGRzfMCBiEwIolqWZZaKwIcn57ItOtS6mIIFKM7J3KgGJhDZKYmXOSVMLcTKrUsYQrYjd2OnxstVWvRmr3Dz2oGqdh4IwRIBiYiOc9Vsmu9hNgRc60FwNSimRJZLVUEPDw2FTQmUFdgV2ATRcskGaw2WNFlrrwkqtUkm1QVQVNvzEZQRGsW4UiA0PwrzWqepsOHXAWH5XR4FLmyHXh4+Pa82NujLAV7DpGQQBFMzXcUUkOz6is6olLwUpLjB6IOVdll+jVUDGDVPvUSFzV1GdFLKt2mI7RONmpc2xUC8YcE1oxTRAVBEvFu3Hz+ptuNMeR+f0sfb/zwXIp6nn9tRbTnQtXHxyWUIURyrpetAlZ2BfrRVeijYETavrCVoqw92CsHx1xOHhFhJRZBMysN6NBybPQa1XTl/OxAfeNkrMUmURXvOV+fEi0ipeS85LJUyabiVb5S8jyfT6cAgN2w4+D13xZQiopKEak5L+fTSdW6fui7ue+GUgWJqghx6qYpL1POY62bUoYcwwSAUhxnRTCXU0+p895RJlLAWiWXapsgt8Pz+Npl2Wz/05SKnUtGBo3iRc31iYjcc9SusDozL/P4yBgIKhJqU9RFoOe6ppo5/VzFwAxaA1iIHuZrXYv4gFUqi1CtSNSi8Auwqea6BRdOjH93rhUzBls5OQjOavI5T61a6pz1NtsuwB8CWZvSdL1/2xrM6+oVaeqDQzGmEC4Az5rwERGSc9TEXxIPpvQCoojIxeikiJSybDeb50AZm04zMjcRQltzyIY50SVqV6fErDulDw4whBj6vu+Hoe+pCxojcGBEbmu5GoDVWhBJBFoeU0vWaoQxRCQSM0PUpVhDfQjbqZGPBlySGbuYXbqY25p/ACzL9LO//qsPP/+zF69e9UOPaC9uxr7vlirffDiVx/l4nj48Hu/fv3t4fDweD/0wPH74HhC//ubrp+PBGB7vv9skevjsTS3l8PTw3bffSsk3N/uEcniEWuTdu/eMZOWJwN5+952UuZ5HRFyW5f3Dw19/8y0RL6fHl3c3hBgIuFmUa1E7Z1yKlmpdTLe7mzElUKtLno6n4/3D4e27KU8/nf6xXwszboZ0u+2HsU8xIiHBhUHYyAgI5v4dzVAVnTPkGDg5i1IbRdEXJEB2dklehxNX1lFj6/gCqQjSVH8R1jCIWgxpbS4AXBY4XwkVQEEr5GqzQLarIKaUcjgdvn33/c1+32+HfjPe3t0A0Ok8TaUImGs9tZYgj7C8uqSgjRmpZMDa0qQ1RTA0zSU/Pj4gB1ilDQAAkWrFjAQIqOr1SwM3kwU1FIWi5kYFYIqwSnbZBWcydDm3Kx8oRO7iZuxuutQFYmrbkwlolVq9kbU63lqLSKlaqxEFIiTiGGIueVVoN89dXatErWWMgdlM53kBgxQDh4DECND3XYpMWEUmA0O0wB7HWEqUQhdCDDE4DEytG8aZuSYqpSw5n1/efRZDet5fapYyAxsYyDxJqWQQGQGcWmvVai5TKQuxpZgYI/rMQkgh+CLJGECViaSaqrgJMgIZUKmgWpEEjNEMOUQOxIEpECKDghbJszQlFce6FVEIjeF5qQEzNCWtQACy1PmoAnk+XSPKYZmnZZHpVOdsGkIgJKsApj6lkA3Q1I2KlBAcufSuJd8IrMlUXVqlvHsUms+kI4HWiJOtbXuNQldckNvu7g/qWmpYgW4DREsRu9Dthu7udv/mVeyizg8eZz1vlp9gL3BZAK///ekr+Pz6Ry1o4BHJJ0nf1Scg4A9/5ud7nUi27zDHL9aACZHMg622WIcrYSVYa5YOD8K6fUpt0I2z6KTUknPJS61ZrZpKqwOYlbycz0cAVCBRIWarVspSSxWVmdDZ8qfjUUS7ZclDERFiTl0HQByq1FLyNM/HeRqGvu9TJABZJslLKVVViDnFOIzjOIxdPwaOYlirLLm8frH5yesRhrU65qvcZbjWKKaNrsOGaOZBDLYOscvt96VFDVCfG/XX3dQJOGBmV6PnYLCquehMCCGSswgvCICjgQAqWmsFYkQSF7puNQxsTnhXSIxdEEghJDRrtgT+j+fD0TmiNTTxn7Z3tPn9A4RjfacrczuvwZg5BI4x+jWGGMBsLeURYpsnpujMo0sQs4YyVkWLWi61lDgMw7OvTUPjQcwajvypJ/wPprcDrVdBhaqVIstSAiCwoWIipEgenYi0vxCoVvNsudaqIK7gTB6MtdXKP1g9hsGrB9Xa5FnNJj7Bi9xy5OHh7ffflZK3++246TebgTEGUoZKkAkz2iz1VMtR6rkWWeZHRKz5ZPVMQLAcz4/vTgkQAPI5QaEIu47GCAGyQk1UAECWJ0OLlGOQSAuAFVvIloBVVabj4xElxZACBgICM5UsOs92nst5yoycnx7GrguAZc6np6fz4bgcDnk+XbuLpw67gboeY0SfO1e9WuoNPYi6ri7WBlANEI18q7ngmACe3bpnKrb+UOehQgsTm8oIIGgr2MEVIwVs5aq0Z3ld3y4rtrZQpjix93JzHOCrJsf5jJGtC4jUjeNYNasuYCACIuJMep/XiASGqE0h05RM0VzsFQAbVQeVENVUIWfwLsVWpF43AQeLW5sxe3iuhuJz3pcfqGBKzTWz7UcGgOCK4R2HZ3kFRAqhi3EIMUVm8uvHVj53gXiV6tFJFfEGvVJqLn0pJeeac865lmYTrl6XEFFRuZRNVSHGgAApxpASc4iBQ+QUqEuUutgPqe9S18WUUupSjF3iPoTI7FY02LjFQIrkqZ/UUrpuM+4uCYyqzvP5fDqiVDKbzucqFTi6uBECE/EFkvf12mwdIkKPlrzm64iVNUu8Rvn1TJCQAqEZNf3h1FGIRIwAAZUsWIoqKrWpxxsYkTEao6G5RJw4gxhBkRAxGHfCMfLH3UmEiVEjkZAxMmNLRLF1UKGZAQEqQqvCBgRDqQjqE8TQsUu8QMHrIqcXeMOIPFdUJ7WjAjXcAgDUm/U8dfBUw1qTkzmjOnIYU3e323/xary97fptmU6Pbw/Lw73Wellzr4OYT/7fZVG+vnhrP2tkZUcw7QfHD5fy9vcKgcLlkX1ODj95eUVp1q0XnotTcEmdr77heaP1OadtvldZw3gttZZay1JrUakKLbFwcFckz5NWkZwlpI6Iwdy+QEW1lMUNCqZ5AYM+ZxVLMYpXlmOiQFVsnvPTQd6/xz7FcezJbDmflvM0TVPJWQ1iDNvtdrvZbTb7EHsxrEVyqcuPX//0t14CDOvFKJj4QupRqg+Jzxhz0SFTA1JkIDMCA9/Eza0KTEGdrNXCEuSQkNirNI3L0oKDdeEGQOIYY+p6jgmQpWqT8nH4nMiLeyJiOQOSVxE9KiIi0abfrJc7BXDRhDJDNX/KLt1wLbv1aXE102gVvoDLGTpT82ICsDbI+0L8HJB5LltK8eKYi3i2xX7d2PzMzDU7DS4TqZWZiQiQYwiF05U5nwPORSoCmRETg387tol6mZaXgNNaxVAdLVOzaZolz6cjdmzbhPsxwu12txlTn8Sw1GJatUMEzbnMyzTnqUihyGvtTOuFimRtFfFFxZ8YovYsaOONIzxjcx8/m4giejofReuyxPmc5vNQAc7H2VQ3Q0AcQ7i7uUkll5Di7f4GEMaBSs4phtv9/s1+uB1DH5Pd9J/djIQwDF3sEnKoojdbBoCxTwT2YhfRpEuMCKXY69OL25cvlix9Sn0KfZecPo4mKrVUjRHGXvOoKgKiKHkcxlzxaT5pnXa34/7FLsQ14kezIJicXwIAKIT2vLYooMBK0luxanxelxBtfX9rcTU1BKd1rjcfofHPTK8p0h79NJD8KqHydKuFLGproolGsAptN6C1OUdcZjX1fdcPnTEtInY6Bw7GzH1PSzPiaQVREVCCtvvQZYo/L+stxgcy7//wbYW9HeVKle7yW5dnzZvBqfGozATaXEYzbKB7W6EBzfNFZAuMHhesnwtIjMxApNRgVK8OMwKBcfCJ2dY3bw6T1mgmtVopUqvUIqq1FieJire1Or2ytYmouohiiIE5hECBOQQKAaP3UsYQY4jRtfQiU/SavsemdlEtwcYHDRgAYuCAa5Zeaz0cju8/3M9dRwbLMqkpp4SBkZBD6tKAiKnriNFAjai45TKuJkcAXq1alqksGYANVK2oihQFIA6J0XWE0JCQOYYEzKIKIIbGkWLYcIiInRq5khxHZjSGajXneZaSTYXYcamIFDAk4O729u6aPxoc9SRr7GRbm6eaAJPLApo5HPKc/hpediJrKzY9B25tTrgmOgHSypOGlUZCTUDBR9sBMLUmKm4A3h+GCJExhrjth9vd5m63e3EbU6p5yU+P8/19fnm8hpWuM93Laz/4+/lYHxq8egr/teGLT/ZLEPNfxfE3fV1TFbxsJJf3mJm5JkMVKSJe6dPGjIY1oTattaotpsSlUAsTV25NdRo5liq+BoYQlzwuyxziaSXS4XQ+l7yo1BhoMw4ENp/P8+l8Op6WZRGREMI4jpvNdhj3IXRiJKKlSB8159+9vhhQBdK2w7ccsSllew+AGRgQoAFRK7mggkJrnwZvAwYAp1ZYCAmQVlzk0yBmXf2aXZMZSK2ipnWNpNedXp0h41POz6J9jlftPkJiADysf0a8EdXc5b2FFM9zDNfiY3NZWHcLP0MiA0C1NY2+irPh45m8wiqyan+1aXOZJ3bJiRWstcd7OILrUICv9NcwDFxKV5fw+6rlcI2i29G2ObtGYjzAEi2SATRiB7EmW5bKtOSqVaXWygwxMQGFIERm0PymVEWy1aoXD5CPxthsFUeEdba4tBaQE6T0o84RYh42w2a3CSmGyOZ9BCUrUYy0SYFiv7Px5qavpZgpcRj6zgCW17cq2nEY+363323GoU+JGE0GRAiMSKSEArYZCQCCwxdDYLQYCBGr4nYnm93NksVUGLHl6aZe76+iXYVaoVbNOedpJsA+cujjdtP3ibc327uXt89BDACwaKgaRENDs9cNCQDUUNb+rF8RxHhqvBJa1gLQmkIxXq2EiA74fTy94WJfernVbVa3rij/47+ll3+ZNSrhNURGRLFLsespBEWaa2UxIi5gxayu/NnWCrQ6zLQgHi8PEK67D9AqY0q+LJAgElzyAz9Lj3BWIrAhon0UxFziL2zUAXu+9DWjZLdw/2TLQO8zwCviziXAQnQc2Xc9Atf0M9MqWsXMyAF0qertUNLqT1XE2bjtX2bATCE4Pdn5ZI4fADM2mxJuvaGEjMjQBBMbFrLeA1vvdOtnvroO5Bg5REBq3AKpDIJCRMQioEYU0IwpQEPGQM2IORB1KTKiSiUAWRZQEGm0PCIKgQPHFDsmRK1e1gfikAYgVqsAEkkDrbGBshi5jm1IiRAJipZFjYjZVJgxxhhixyFyTMTdOI7XNJKgWlVVa1ExJVMCa+0z3CIT9594ViJqvEyHmFyCDpDZeAWG1S67KZJ3rDSFPF8NvXbbVnafrgBgQIpmDIiEqloNhRD7GF/cbF7e3b560Q8JcpnePzy8+3C6f788vi9fThfe5Q+jgast4fnv6x+tGwKsRA395BM+DSzW7e2HP/qVr/wQkP/hJ/wNYZOfj6mqEa0pFuAz0cLZInLZuxEAVzx2hf0IANwGk/DS4gAAZkqltoo6IZtZlbos8/l88qjCn/3pfMrLIjUzQ849gZUlL9N8ns7LsogoIU7L9HQ6hPhAHAGCGuRSX7/s8zN73FEChdaUpit8ay4QJGthyZzWYqZqLoIC1oqUsGLYiBRDRETiCEQoAgC1VmuIZ+u69qgaAUVsmbNh8bY6XFsq1pDVg/RWkbWr2eIbfJNxfJ5RLRZwoMIa7+G5DLmuh+2WoMtvXCLIdmMBwIEnL5w8ozafTN1rrKUVvMl1IFd8QnVF9NuI6jVO4UsyOH4utdRaywX7acUJNfVWSR8CNW2h0BrDuCOu7wr6fHn03MKHHLjv+pubzW4IBnI6TaU+GQDHMIzDdrcNzKrhaZIUjzjnWmst1YGxUuXSvHtZftXkeXUwLxLA5ZlRFydfq5EAEEO8fflSTz9KfUeEtWZC6xIjh5FZOWAM6N3EqxAZERmAVkGAYBh8q/BRxo/aU9zjx1r8Z4TAIQRCr/+yWGAc+p0Yum8dAbgbq9f31GxUEtG81CXnEiOoRqLE/Vc//hETdkP/6vWruCpcG6qiCIqyGjfHulXFyhdVXalLz/V2ALjQflvB6PL6Ne57XRlqo7cGDZc3oc8be36Ll5pAoQnQAUATU7gum69z7vo15NiFfmAOgChiuWqVcp7m0zwty1K1Pf3e73opUhEiWtM4vo7szdSIQEFJCQIqIBkarsRBAABFJDdEQvQ6GWmrKDxjSG1qoSleDWEjMbbXq7YO0HXS+57nCLFeAkfTj+MDHyUjMmqqHV5NDiE0SRzvIrJVjGYl/fuLYLau44iX6pBrj7T/PL9OgChmrt90PVYAACDtxFW14cntWlJKL1+9/vypIFieZgXT+aRaQA0ogOicCyIDM4cY4gBEVQqbcQyB42bcxhDAasnz0A/H49Ph8JgLEKfU9dvtzdCNIUR/mMy0FFEFbF1jwGgERfI8HQ/n0+F0vC9FQowce9XBKGTvOQNFbtcpCijabNSYvXB2uc5gZraaL3jwqe5OaAYA5JsjtIjDfOI2LAbg0utH7oLQ3qKrgrw3cQCgVFVU4HUarfiH2hrD+6Pi8i0IyowhpKGPN/v+9Yvx9iYNI9QyPxxO7+9P79/NhwedJyj1GhH5OA74NzquA4hPgpgfhiDXv/I3/fS/2uP5itqafqEP+Bm6HRUiUFvqvfTtv0F4cYdu+RcaNBMLX7mAWmjfNPuneVo3iYqIyzx5ZziSlTwhgJRacpmXuZTiEH/RgmVBmokCURSDnMvh/CTykX4PwhXwDF6gvipWaIOskQTI0Om6RK2915dVBV0VKwxArdoPaCsOJV/YKeBtfFIVQM19VC7uPGtMAdh6pdaiPzGtsd6nZT4Rm+b5dJ7UNMbIjMQNgfTlqoUIKzjSKucrh8CXkQbKu0PTujU+L68fJ0zXfyM25s31G2wVK7iKzJ6DpbbVXXji+sxXAABCZES+9LBiE7ODhsZ6pRgvm2T7g8/fHkLoI49dd7cd7263YyIt57yIiSpCZDfF4MAhdSl1KaXEnN3L0qWVn6/meb9s/3cdyo9g1Eugef38EXM/juN+n1IChJIXAA1MHAOmhIHd/LfZovra6lu9ASKwYYMo1fEEAUVVdIkNbH2zjhEAIbE39Bt5tzcBMgcDVKOVsu4JOTrQ4COrjMqEMXgWzsjDpu9SSl0a+p75+XqU9PLnen1r2QnYmiiCXSby1QheBut6Kbx+P1zm3EravYp2nn/307V0Teqfz7M9y+vnPetlXN+X7bi7JURVW/JStYiJQ5HIRGDGykpmDl1Ai5NWVPHqjDy2WkMcJMdDbe0FWK/IV2ZylRBn2jT+8dVTBoSm1JjMl2V1HR/XkkVt7KCrQWzX90lq+9EoAQKYmBoYMPiDS0DM7Bqw4JjvmoYAQGvWvUgS2mXBeC6mAeKKEMN6jc7tbhkNfHIW0H7DmxNE9BrtRIAQKAZSFSKIKap1Iqiqnu6aCQIEiiHGwNEQVLKBIFpk7FLqug6tSmqYECAsZeEQun6z39/13ejqG34SVcypu4gYHNmxkoGIFzDUKlqzP2a6qK0aS4DCzpA0MJNSvAe0UK7zdP6I2AsMzVdLjdgav9gIlJroADqbqjFHrOWxiO48F2Kf+hgCgldTXXtNoTmQsUvDNQEOwBW+MzVv4mrK9y5iXAGzqTBCF7v9bvfyxfbuxebmBgFPD0/n+4fzu/f5cNSSoyAoxlYF+2iO+T8+2QBgpdBCw+3b+69rNXDpEv9oKlxhNpde2Y+hmk/eczl0ZVdc3nn94c+bzVXkdPW1a9gIQIRmq2KlKape6FIYQBFUUZXWc1spHAiITCEiM7XmuiY01poyiSgwUQgcmUmkLsvs0cx5OmETaxGzCmB5bkMmVXMpqkqukYlmruQCAIAKWnQpmlduCvitveC71EQ+pLG5nOSt6+buQa1XQ1xPRgEMWyolIFqL68DCc1d/u7+N2IYEF0hw/Qb05iVCL5yuCNaay66BwDrkF6jD1jDAr6XW+uH+/vu3b29u9tvN2Pd9xGAusuExxLqgX+6aqbMs9bLbUAPSPO0zxY/KVT+ct340lTbVT16H1iD7nGVe/cRDjqbnvM6x9h4ijIG6GFKkEJgomAGZSNucWt3XwHS9O9A6r9rYIHE/dLeb8cV+82I33GzSEMwKlhymLlZVjIkpShYgcyw6pj7GUsSIoOOoAGJaRM0dYxwbbPN8jSQBzGuKno9aazEm+igbM+KKpKIIoEaEbMiGjMCm5MVLAhVT0Lbn+RZARNL2MPL6nq1NXmiNkqOqLUdowQqLVxtbiUJVZzAnebX5ouaClK1OUKXUuvI5SylLRgCxTkEAG5S4Th1zMMawGtLHuIaf5VXgcj0CBr7Br0MIlw+ENfa8xvZa2fn5ly8BIrZK07W3nIsm4PosGDzTYdcTdODumhPDHLY3d7cv3phKzlmRBAiYjFDBmDmXpWQCqESi2krBqiZN2gCvzmgNrgzcPRBgvTkGbpHXekgBnQljgN7euI7EM7hEimIISmjExKvYlTmPE8n1i4XhGYn5lccP1/y2VvvcaIstExC39OOSkaI6ambW+gKBW5iBH33+BYz54Q7SVomr7aOdT3vqUc1UQE2LQHm2tAKRej48Pt5/r1IQLHJI46baKKpVBUyZIHKIaeSYDEi0tA6BZkxaEQOhIEI/Jgz7kFKtakwcuyGNRLGxe1QAwM2StNQqNVcxE5QqpVQDil03bkMpvo5JnlWq1KwqQGghhNQTsZqI1EmqCojR+89flJIv4xCkVCkipdSihoqCBgKA2IIYbAkIgBgAOCF3jRWgqfa0PUfBH17fedQMwcTzRW8LXePdtjt4E2gpAObeXkJQCLQLcT+E2028GakLkpd8np++e3t892F+fNQ5B0K0AqV6sfFyMZ/kzf/6w9bl6ZMg5l8zQX8QavwgWfn4d6+n3f9fQYw9izgpiniza2P31up/dG2tvf4Aa3SldaEhMK/htGzSb6F5t5jnTv4MiIoWcBEfosVTbmqbmZjJpdhiCi7I/UxW9QeVhFhEZSpzrstlP18vyE3Am5BMLb6e+zW6+DCat9l7kZtISRFWRRcRMEXTWssyT16D8KwZAb0QEAyMCa29aApVxdUaDIkYiIHUDFVdR8XTTUCXVV8RR0d/ARGrSK61Xhna1Vqfno73948eBSpA0siEvHqnWetMBQAiUiJSbbWndWa0mhlAWy7xAhRfzZwf/m1XWellZvr+qV4E0vUu22V2mYEJgKHWKrV+ZM4Hl2wQPIi0Jlm56vb6v8XAfd4BAIDM20XMYJXeBiIkUNNSczCICClGRCqii1qtMs8LAcyLLaXkKlVMBRCYORKCSjWxqlakSS4RAoMrF4IBCICjTRHVgxhpXeXP+wsSxa7rhmFlRwMjBiZmpkCAoAjIFC71Ik+/PbamJh8MZuBtmOIx6UogEgvaCneuLwZGHsQACKCBVbQKzW8bPHsgZADwGqaRsrqwKa+N+QYAoYucAjBe1OH8xhXNWXKo4ULhWOcGwLNg2fXTtd5xBEb6lUGMH6otuId19D6uY/rEdeDCLmpeTRymEfFX4dw1iMF1HSvFu2/K5ZyROfWbbrPXWhTOtCxEJcYECN65plZFa1BCMqcqiGhZSa/tPuFzEGNmzBoCmeGl28HZGOA4KAIjEjGHYIhlfdARgNlwxTPcvochEDKhy1/48yJIwBRS4D5BF+GaRdaiNFf8wjVHgOewwxUsfbHzZigiQ0BGx+TWwBxbuyVcboOtCO3zx13uWuOhrXfqV95+W3EmbWeFaIhqLsShS9X83J8AZlZrLSWbFiIKKRFGUTAotYqocBNCzlRETUWLlEW1miox55qpErlQsgEyp26gYFUVgIoCWSviVRUAQG7hWxVZylJrMRGTisrGA/cIIYtUrcWcnBgiY3JbHA6dgUmd3YNWAH1Fut53Qz7PeZb5MJ9z5UROggQAMkYgBMbVi01b551HfAZgqlZCyMt8cZW7xAa29sx5nNtQOry65QZVZFnmWgXBgEARrWMbujim8W7b7QdFfXp8f7p/Wu4Py+NTPU9QCqopkUmW5TQti15xYnxae+x69WQ+n9nVQ7v2MP//inueM6RP4oWr4zo//pWhzyd/f/KBftqfKN3lUpacCanVYFTNOe1lkVolV5XacAYx529cgjEzB0nbdomyunug42VGSBSImFGJqAYWIgZgIg4quAIZ7nVhKqoV3DgeyaAlaGtDDRE6osPIWEXOyzLnszzfF1WVKgXUp7XWqkuptWqtJs1QFGSVOAJwph4FCn4iqn6xAiq15Gmasmu8Ahiiw9LMHGKkVoV2oVJzAQIF38ECUsDLAt1CPjCgiz6NtcUCEZEQi8h5yfPV0y+qp2k+HM9d33PgKjWE4Dhm8CIDrntIGxdY4YPn+bC+RalFa+u9uUDH6/EJD/dqesBlJn80i69oxU5q8A43Bc1VliUvZbnMQDVbSp1yMeNK7nwKKk5Rcia2iWqttWoz/nN8RNEUFcxULcwZAWtejgnHaLuObzb92HUhJkXNx3PWwgII+jTp/cN0/3g8HBcwoBCAWM2KwFxkylKrCxkRITJQWIM7AauqQFAqMREhSNV5qfnqvnDg7W6Dyx2nyBQYgVqpyC6agkDAbpt4VSohcmCA2rCqqlVQNg0Gyi5KKK1nHwwAyZAASJoIhw9VQSVwwS5DcSNTJDVUNdWqNUupKsXjDJdHQMQQIxGpGqVwwVRUdV7m0wxA5yhlTRebnDKCC+euOf1Hs8JxX4Z1c/Vw5OOEym+9wkd1qrWQ4zeAW6Bz4QyAtWZdW8uhXkJysh6u6VAtMs2Q86xXKCxgBErAYNRczTgwYMh+g9HAFEkZMYSgCrZkXeq8ZPsbVmVXokDCkmutjuD94D0cQ4oAlEVUKoAwY98FDq0bCdVNszgENSvWinRmJkjAHLqE+924215qy2BmIlJLAUBTxcucuVr5kagps7mRnAIBEFIgblUuH22idQgB4Gp30OdPaxPU4+GGxvj0eAbH2jubxIS1wN4xQwDHvLPCUmTONVd7jmKIOXahG12DDpizwlTyvCzn87mWbO4A6o35bsMUnF2MgoHnWdozpK7RYGq1yDzPZkCpT6kfupGIoekMmJMhpeo0L/OSiygBJIpMAVJQWspyVkbALnTcDV3qUkodEqtqyVM53rs9MYWOKO5ffhZier7df/fv/TeWIg+HvJSKLmTnPSFA1DQLr4MY+CSIAWJOkejiYHZJCtTJTyuF13Of5yAGEVU01+wIsAcxkAINXdxvNi9u4tCDwnKaDrsP+fao8wJVWsqLZFKlTL/7e39n2GzbA+N6lPC8H3wCOK/E9o+CjEvEc5mpn7z/8rZfGb5chyZ/UxDzw/df/n05RCTGePndzTj+3d/5HUTsY+LAHpe424DW4oLSLpOEpqZwYZasmVOToEFsxhiXdMa/FwGaAZvDne7FCm7H6NbQ0FoP3BFNq7kclFdMfXwa3YycF+Vwv6guS/47v/Pbm7GJ3fVd95MffelwvBq4oEJpSn4gou615Crda1cFEviO5bi2NoViUyllWeYq9dJG6AQ3B2OIGvHGgxhRbaUQdEm3NTqDSwTTkJh1ZPwjwRu7RXUp8gd/66vN0F3uy+//7u8x883NbjMOIQVutd+1g2CNULxcTbRO92uQuAH+0JznLwozlxriukH9TTPzer41GKZ1EOLliy71JZfgqFVzKb/x49/ou94/pIvhx5/tzCCFdv8A0PmiamZN3d+q1LXFGtqGha0SFpn7FLsQEkMk60i3XbjZDmPfp64rYjGRSh4TgGnoBEIACm9mISSOMaQoZnPOc85zdu4femMHgTO92s2qpgAQ2EV5QFTzUn/7qxdD1zp6Qkw3r77o+56jBzH4DDg09gsAPTtjw4oxICHgVRBjqiDYrtYcFiIF9Cer4Q7YLDLcuAwUrKIuHlG3xntDawpbpiYmVauYVjNAahG2z1qvJQ13b3hdlDeb3R/87j8AhL4bQggrqGotLWlBzKfryvOsuJpzhM8r3vVKpfDsPXWF261ZJj1/6AU6aJPpkgH7k/L8t5lZlZqX+Q9++g82m32bY13/Gz/5iQGqljyfj4fHWnKMLFrnaZ7nKS9zLllFATGEaAo55+k8n87nUur6PFxnwBQCx5QQsZZSnwUQ2kn4r4QQQowAWKqKVgANHwcxoBhCcCqSmlhThzJf4ZhoGPrb293v/f5PN5vxci1ffPkTBGAOxHTFwrms/M0LzsCkueECrmQN/4U1iFklBC675g/2kWc0C7GtcpdFeb1teLVqwUdBTDOgUoCqkMXmol/+6Mdd19axoUu/9dUX7vXrT0cRXXJelmWe51pK88FUBVAEJSJOiUNgwpi6fhxjiGBooNJKLiilLsusahi7lLqx2zCzuFkaIZqpas7LcT7PuVQ1QkohOk6qmvMyueZviLEb+67rUkwAKFLzMp1PDyqV0xBixxz/rZ/86LK/AAD+5Z/+f9SgOj7r/CH/wfrXc0vL8+vrpPGfruO7fuT1imuXW/zxU7cmRM/NEgAA0IR2mKPDts0p2qr71zS5xvYdKpvN9vWbz/t+gBWJuZ4Hz9/1NwQWn0Qk/4bHv8lvXU/ETyblr/w0M3MDan/zvCzfvH13ms4X0xDflczWcP0STj0nWJdneT0HWJ+RH2yj7azaDy/o8zMB/uPze6YP2icfd/mOy+5roKabYfzi9au+6wCglHL/9DQvy/p5z3UMjyOeM4tPz/1yHit31aO51j21nvHzNVwqn+sHPg/61TXCJ0vxpyPzfG1marYZ+s9f3vYpAsA8z999//15mkJgbmLv6zOCH0/0XznpP/r8q9+8vui/6Veujusn8Vfd+etruyxxpmZ919/d3aWYAKBUuT/MS5aPAdLrre1y8z/+Amz/xwMwWhMUAmDyIg4hkgHUKmCedoJXDnJRUbsOqbX1qH30fFzXQ8zPxgt96zWr2dDFV7djFxkATKUuZ5XLtvfxbb4e8MuYXQ35J3kJfPp7Hw/uWvt5nmiXePgyNC0ZXgfueq4jgPtbtQsFM6OQ0rAlDgAwz+dv3/7idD5cvDyux+XTk/nBcT2Zrq/642v8+Pw/Go42Ie36pcvQ2EevPI/n+oCq6Waz+/zNV30/AkAp5eHxaV4WWJuIPc4AL7i0Un4DCJAIVoD8ue65zoPLtzbAA659934wCJf3rG/AS5PD1YrAvOqTfTxCiEDEIfBms7178Sqlto4dnu7dw/XqSf1olNap9wx7tUxrfcjX3/nhU349Jz+avNfr3L/uuESY9snL3lQFXdfd7G+9CS7X+vB0nvMzrcQar72lw22qrsvAipB6PNV6JJ/Pu73XvM+qpbXkrUmXGWPgLGO/6QDQiJL+o1bnWW/dynhbp4obSa66jzT03Yv9Nq2qBPgrJ8Gvj18fvz5+ffz6+PXx6+PXx3/Nj19ddP/18evj18evj18fvz5+ffz6+K/5Ef7kX/1LL7ZaIwmucL0qABI35mQj3l8B/FpVclZVQCQmjuzcI4QVcTVwWgAxsUsVXxGsG/tGmyuQw37E1KUYmACgljrNi6qGGBxdUjUvgeJq9bcZt29evfEyfynzw+P3y3xywTNtYjftwAbJXZpRAdZyI1zIOuu1XdcjG73Ggd/n+sTqQ+aNYdq+Dd08iwjJ3WHB3ZANDdwI3bUTCRmZKXjjKwIRpy4N47APHABgKfn9w/s5z67C4/eAMRC5ZY/UUkrJeZnBlEMIsQuxZwoAplqlZpGiUg2MiURtzlVFA4Frr/lHAoI2NShgxhBdAYyx/QmIQc1qzSLVUHxgRLRmQcSu6ziSrc7TpmDVXW6w7/vbm1svW0zn8y9//rPj4RgiI5Kuln4X5N9pBWYrQe3qJiC6EBFyQ9bbj32WIlIMwaUbVa31bDWlPmOmGJm976AVy8FpepeK1PO9X/HgBv9DqzFX0c1298WXX/XDCADLMr99++08nbh96TMr/HrO+E/NzFU5zYyZmZiJAazWaqoXLu/lC5sM6dVTdpmXzkhQNW0w+zWHhlrNoemTNBsnvxBdxTtUTbRuN7svf/TjYb2W9+++W5a563ozmKel1AqmSOBosaoBIHMAwLV5X+GjG+cuekBXlbVLoa+dGTnFCBulWtWZXdrEM7H1nmJT09Fnlb0LDfGCxONaRjVT3e62X3z+5TAMAJBzvr9/vyzzqq/V6ED+Ye1Jh2vakYmINdYIAjQ3OEQXFXwef1UttYpWl7RhYiJuwnjBBVW5ySqq1VrMzH11Lm2PIlJqNTNexVZVNeeMCF3qkUhFUtfd3t6llAAgL/O799/N04kaj7nVSS8l0+tl+DLlPioxt/F6fteqFHCpyl3lrmsx+FIiuarRXJWqn6f383S/LjKamZqqaD+ML19+lrrer+X9+2+m6bg+8YCrp4CtxhdeW2DmlGKIgZ7PYRW4vK7KrI/VRYiyzSIXz9G1FOjtjWqlVJcjIsQYQ0ohJbd1BLC2jNQqpdRSpIqIGCHGQIEQwcbN9uWbL1M/AIDUPB3vS5nbFzYpGYVmgwAX8vVlvHzMVkrpJ9SC5+Ljpda0fs5FKWPduZ6N2eijB0G9ewbXUvplwK4YcV68B41pGLcvOSQAUK11OYkUIALzbtNPaSTr6RsAikheskgFwBjjZrOJMcLV9mpXMpvXE+t5qlyI4Xb5tz1PTSf0+WpP6wKG7T3PZWFrb4+xG8ebSztR+E/+k/8ZMnPqBOC8zNO85LzkpdSlEmK/SXHoaBgwRp+Abhig1ean8+Hbd8s0U8f9Ju3utv2QuBmbsimKIDN1PQ3btL0bUhddwFdFEZGYVSDPOk95Op9zLlVssx2+/PzlfjuA2sOHx7/4y59P83z78nYchsicl3J4PCylUkjOmPk7v/t3/4P//v/gJz/6CQA8Pn7/X/zf/nc/++t/aRqqUFYtorlKKSJFyCwGJAMVlQK1ggG2NQgCgntGOQHJZezVna5iDJFDIALECu6KrKgWQAJCYBKp5/mcZTYSTpiGGFMXuy1RwApS5jIfJBZ4SdADiDFx7OMQx226DZbqUgIO4/jmi89/+6e//Q/3uzsAeP/w/j/9L/6zv/z6r8a+77uhS0OftmPY96lPA5cy3b9///77r99989e15v3t7c3dF3evfnMYb01rXh4PD1+fju+m44OB9P1wWurPv/6wzMsu0aYPfer6IcWxM4RlqWCWOtps6faOu6EX2IBtg24j7WO6zVk+3H93mh8FTmKlFjsf5sfvTimkH331xfhirN2srIio2cqDajEi+q2f/NY//nf+3Tev3gDAL3/+s//l/+J//kf/7P99e7cNKeVlUTEiDMQxBgMsqlkki9ZVKQwMTFWqBqZhTMMYh5E5gLRCuuUs0zmHEF+8eDGOIyJPU/1wfzwc5mkuWoWCbDbx1avtpo+klUyJIAYaEiNaXpZSVYQQMBKGRlZGQdbmiKSiMs/56TT99O/9g//wP/6f/Nbf/h0AePf9N/+H//3/+i//7I83uy0inM+nUgo0VqP3XoA/5ES0LPkwne8fD9V0O2634zj2I4gcHw9lWVpHFQdA8O4bDpFbbw0AKKKFxh80D9HylE/nc82ViIkCY4ghxpi8CU3KkvMZUbvUEQdDrGJLLblIEZ2X5Xw+/r2/9w/+o//x//Rv//bvAMD7d9/9n/+P/9uvv/7Zj370Y6n4l3/+y/v7e9Eck427iAHyIgih73cIIWfJuZSyIECIkQOhQa31fD6bWdd1MbrYPmpVNCCgwJy6yIEJLQRKXQKweV7O03w8LrlUJgoh9EMfY3QRgdK67/JSyjIvtThbmREZMSCSmRYpVbKU5e///b//P/oP/6Pf+q2/DQD39+//yT/5T3/xy59tt7sYg9Qqkq0WqblKATNau8c4cJeiqJzORxVNsSdKKlSKzPOMCJvNmBKZGSBwoHmZ3z++P5yPtSxEvOm3Q7cZus242W5323GzHYZNCB1hnOf88HgvUoexB4B5mpa8lFKPp+P9h3vRuh03m81mGDZLXr777msi+urL3+i6/nR8+vLLH/3jf/e/++bNZwDw/v13/9n/6X/zF3/xx+MwptQFjkyNte4RFH5sP3Hhf6xmgorNNutCbzQVLbWa6+cwBY5uBQOAyECITIGInZVSpbgEgduCaBOWbeRFantl8wnwkAHMSq055/P5/Ju/9Xv/vX//f/jFl78BAO/ff/N//b/8r/7sT/85YkQkJoiEHTMTK+Cc68NxmRcBgM1288WXr25v9ykGBKilSK2Nmq9mLvFERIy55MPxdDwu5+OiVQNRYEqB1Wxeiog/Spz6Lmd5/+7peJiXpaQUX724efPm9rPP73b7ITCqasn1dJoeHk4f7g9v3z09Pk3nOXfMr2/GXc/Byu/9wd//9/6D//iLn/wtAJhP93/+R//kw9u/MoqUujRsAHFZspmmGNEF4giY2UPkKuYJfgoBFEouoOYhsusUIxK4roQBAHHgECOhiWbRpdRFpNaqhBiT+yRFokQYAcjQpJZlnswkhBhCoEs40jxgi5ZFpZh6/pJffPZbv/v3/7393RcAUJfT+6//xfn0HmMUg3kpuYrL6SOytzmYWZOnADyeTt99/e3xeDSDVy9f/fT3f/r61WtTcxPLUkuRUmvNpYiIykc8oTVStFK0VKsVpJp49GqgKjVXdfJ1oL6LfR/7gTiYUYUmxtQ0KMHlOaq8evmjn/7BP9rvX7Ug5p/+4f+DYozjphocjofj8XQ8n5Y561JjCtvbMW4G6HsLEQkACRhBQWY5fnh6+KtfwllhA93d8OqL2822ZwNUlEpSQcRCpO0+bW663Ysx9Qlbw1VFRI5RFZeznY/z4fFxmhcRuH2xP84/enm3tSrffv39H/6//vjpcHz52cvtdpM45KU83T9l0diPxkFKJQqn88mvZFpOf/Wzf/HH//K/NEmilE2rSq4l55qroEEMjGo117pozqJKMUTGSBgQonORmAlUa65mAmgUkAMxU6CASALYhGXV2GokjCGolvN0KjppKJQsDpy6IXX7QAnFajkvyz2MOeXIG4ZiDIFT6uNmm+6idHUqHe32+5+ktC21sV/Py/nPfvFn//xP//m277bDfrd9ud++udvYZqhphnk+vP3u2+9++fNvf/EXpcy3L168ONVTHYZhKfl8Pn7/8P6vDg9fHw/vRWsc+sO5/NXP3i7TdNPzfoibNGw2w3Cz4ZSqRkPgWIZtvj3WOIQio5UdLdtEr8ftVyL04eH7w+ntUt+dpqenp+X0kM/vZTPu7k+PuzfD0j9WzgBUZ5veZV0gxCBU/+Hyb/u1nA6HP/qn//Q//6d/+DJA6rplWVQACYiIA1WDXOtcIUPTy/vk+Ow2vXi5ub3pYoIsOZecS5nO5XTUGOnNm9fjuBHlx8fll9/cPzycV7FtAIQffdm/2HcDW0QjhMDYBwbQvCy5BTEUCQICgipANRaDKiBionI6L+8foSgcj4c2x6bTn/3pv/ijP/y/7/c3hvb0+DAvsycIou7OAX3X7fZ7QjpP08Ph+PbxUVR32/1+u9tvtih6eHgsyxKIAgUOARCKGz0zUwwhBiQAEEJrsBmSmdWi83l6fDqUpTBFJibgGFPfDYGDqkiZl+kJUfuh5xDFsIielzwtdS71PJXTCczs6lrOf/EXf/Kv/uSP3r/7thb8s3/1iw8fHgBzP+L+Rc+J5llUiHGQiucp5yXXUoih77uuC4RUSnl4eBSRvutTl2KIiKRVSYGQIoeujxyZwGLHw9Aj2DQvp/P8dFhKrYE5xtj3AzOpai55nucsuZhUlbyUUqQWFQUEJgyIDGDVailLWU5EeDwe/Vrm+fyzv/7TP/mTf7Hb36bUmYq67IQKIFCDlAEMQgqbTV9r/nD/vuSc0kCYTEPJcp4mIthuxxhJVQCNGOdlfvf4/un8lPPMSJt+tx1vdpu73W6/u9kN223fbbo0xjDWqqfjEchy2YjI/Yf3p/NZVZ+enr799tsq+XZ/s9/fbMbdeT79/Od/gcjLVFLqPrz9dp6O//Df/m9f5tif//m//KN//l/utvuh7yMnb4dh5hACMyE1USKH6si7nMBhv6pVAIwdTFsp2a7HZWqIGALHmBCDNY93IqJAgYgBSVVrzSriQUwjY0pttqcrWIOmqlq9mQ3Qg/h5np4en8xsntY1+Xz6sz/5oz/8Z/85c8fMgaBDHELwIOa81LeH81wthnT74raUL07H224NYqq7h6pWEbsQSglzLYfj8XiYjk+LlBoRiTAQllIPp7mKhhjS0G32Yyn29tv7p8OUl9p36fR0t0y3Ob/Y7QdmrKVOx/nh4fjd9x++e/vw3feP9w/n49kCwqtd2g80kADif+fU5pjU5fz49dPbv1BO3I/Dbg9I0zSbQd/1TCRWwMT7AdS1DYCZgsRoVabTJLUGDkTY4BMKYOjitAbIIaQ+MZvYIrJUmWstpRghhBhTjMxdCF2gHoAVtOblfD6YSkopxhCYzaCKlLwsOdflXJeTSgYwU5G6IEItc7sWKefju8PD19h11eB8XuYiot4850EMmmq1amCIdHg6fPftLz+8/3Ce5ofXb273PWNJHBGwSs21ZCk553lZlqWUUlUNgERkWXJeSs4lFy1FS4VSrGStuZQqtYqJohkRhIApcd+lcQz9SKmDkIxcD2hV0DWzIlJKAYBSlsseETb7HXWp292oAYdAyGYQOfIOx01/82pPQ7coVl3jf2apVnSpMaa+q5T72/Hm1e7167vtticlrTjPUIqILLGjmxebzb5PfaAQUBx/QwBgCEQBImrCHGerYIH34/Zuf3N3s6m5HManLiQy1mxlFiAzsY67rgvd/kaJ5+MUQkcrNGqGoqFqREvE3LMBFRMrCjPECgQQtKrNWVOGSbEaEhIg+xIXMcbQhxGVJItKBqgCuUDOINkYIRJ0hIExIoKoAPlabQOPPS4SZwvFUIECWgBlQMMUKIX4Al98sRm2CWeyEsTYLIkSqAXjYCFYiMYXBBgROWAggTJh7Dvud+PL16++2gxb0HOCVPcIlbrU1ZrHzXa7e3W7uyOKMj1BKawQKXVpXEpZhKrKZhj7EHY9jin0NPRhsw27cfuy3//IEs3yVvj73H8722Mpx3L8UO5ToqdbCam/jX0aoM+Pcj48fPOL99ORNvHzNNxBGipD1nPWsygvixzrrAY99TPPugYTgfl2u38FsOu3McVN6NRMCCvYUqVIzchCF1mZj46bMf3t3/rN3/jJq9evxtTBXE7n+Xg4Hc7nZT5L4HR7c0PcnSY1pfuHeTprzsWgAuiQMIWwG+Kbm7TpApipmFZR0RqTKag3rLZGWBW1olaqEVNVBMNgMSzHEPtVxAyQeOiHYdjE1IkKYkBgAADCuG4sqUsx9IBAWGJI23Grhn0/hNgRBWba7dFEmIjBbZisuhKOiBESByZEZARXx6LICYm6YAGDKZZUQwhopFWZuR9S4KAiNRjhgCD90BFzUYCqyaAq5KpMMA7gEYNfCzEP44ZDfHh4qgU5xJu7u5hss0u3r7epi0vW86k8PU5LnouYGFLsUuJx249D6lPMuZjpMmcOKcbYpZ6ZTYDNrQyYo0uhKofAITETUAxx6EcDAA6tHlNKORyPuZTj+VxEYtf1Xb8Zg4ieTue8VDOXQghIxKGvpZtO2Hf9pQWdELuIXTDUxSoRcQg98Bhj2m63IcRmtlckprC7GUudq+D5dEQKhMEsMENICQliJETTaqIiRQ14u7tNw1DKAmqBus2w3d/cbjbbELnWcizHHHUz8jhsb7+6Hcdx2IzTeQKjEI4pxd32ljnVWm73+81m23X9NJ9KWUxhs9mbiNRc83LxgEOkFNPQDX3XdV0X3DQYgRjYdS/YB7WV8R3SAwBmYSVhMTO6VBcMtBoSUPKMH1owRAEQOYQQoyssXAA/MyYChHBpOpLWSGsGrtiOZGimXr1tZSWCEKIK9N2IeJG8QrIUYEhhSIEDWgfQI5GRAhrzboRtirv97YuXLz777NV+v2FCBHNxS9eVqG7dqmhITKSgN7ub8rLkqWqtoGIipeTTeSaKojZsN/ubzc2LPRH96PPXy1IBIKW4Hcd+TGlIHAnNVLNaLRVzNTHqN+MeYxytFjmJyGIwdJV6W599DnG/v6vLZ0opdOOw2yLxMmdAHoeRiaXmXOZ5Pqu2CR9TFzjGEGrOh4fHZZ4RMXJIfZ9SCiGpQs6lFhUzYkx95ATIFVDMiohqbTUVRGKKgbsURwAWLTnP43k0lS66s3Uws1zK+XR6eLivk7kCVwgBmZkwxkTPvY8e/TptANBnEHplbCVCuCMuGBL2/fjy5WtTWJbvT6fp7dv3wzC+vH3Z9z1j6pmDReZgQCJYskkVNTxP9cP7x4eHw+k05aUqkAhUV03KZZ7zdJ4QYLMZt5txuxlUsJacc4ln60fe7rt+cL6AiRZwq1AUNbiSdAEACGkzctd12w0YWBUpdclLYOyHuNlv9i9vIEWcaq5K3qbGLKgKhQEjM3dxsxm223Hc9OMwoAYpICKqiwGGSF2fUpcQ0YqVRbSKqVKgEIgCx8glCgETMIXYp27TD5thyIQpBDS0anVRCRYSBeYwJu66tN1lseWQ9co6ycCEQToMGCgEispkqDWaIdJiJBKgKAe1IIFIluaggGDIyhE5QkhMSgimldRMtFZbihaTQGiJExFTCGioggpYEYEoxJ5j0I6Vs1pFcNMG/9EYx7B7HT9/dbcZkxxqPtbT5OAiWlUoBkFRFK6azdFloNwgqFpP3a7b7jb7TT/WWTDk2m/45tXYdUUqEcfYByLQQiaogMpokbgDgVykFosUxiHc3fabFLlwF4aA26F7+eLVb2DH9wc75tO88JzLMs3lhOUcUkg8vR9b06wyGaiVJavGzW7c3u6oixXmeZkzzMidEWBnGIF6waTPZXckjomYgdmQgdGVA1yIQAAFQJDM5S0uU1MVTGPX7XabF7f7V3ebGPXppAiFUFLgmQUxDF0EoBK0T7YbWLaxFBRlA91u6Habbrbxbpc2HdWipZrk5oiCiNb4MaZqVrVUQREAU6EKZApGEZmRw6WfEBFjSCEmQpYmd0zodxrRyRy1aCmCiKpGGPp+BOSYEnMEIObQbTtei9tORKtSl1yqLWbWLK2I0NRMCShwDCEQYOCAQCKSQlLR6XwWUZWsqF4YDZCYbNj0yJyrUa4GWNVyVVEhkpTS9bWEmIh5XrJUjCmlvu8H3t0ML17dcuDDYZ6XY5V5KSpGhoGYYpe22+3NzbjbjnnJIvp0OKoAotuXhBA5ND8mIkZiCowxxb7vkBCwAAmQGlysNNsguMOLGRCFELoYo6rWqoRZxAyMEIkpxMCEUpL7L16eF2ZgBgQggMCRKQGErhuG8TaG6FrBIUjq4mazzWXq+1Op3huKAAEYGYKZqlVrRD8CsBA4cDfYWGtREbIw9pvNZjcMI7IqWC1aaxXREMJ+f7PdbZhTyeKeSzH22218VRXA7m5vUkw511rqZtyZQZeGPE9aRWqFi0AcYggxhETELofc3IoDBw5I5BJE6vYL3MQS17rO6pexEi1slQ9BbL493qTaJHko9mmIMQCAaC2lNrOsxrxoVEBExNVy0msODGQGhPqsTUJYgxJFwuf+WwJMyAN3Qxr6GCNaBA1qCKiAoevSzYa3m5u7u/3NzX637fvotVTwp1TFRCpxFRMlQyImQMOus6HqRt2VR3KZp4mQ8wKisN1ub/a7m5t918X9biNevA4cOCKTohM0QCJ1vfVjGTbjXqHfbW8qiNCSZZqWgPBi2+1evebVmBMROFJMrOiBpSBCCE4DNQR1qSzVKiLMxITRXRAJDSEE1ABmSqQhOB7GZk1EmURcdNOjCCIGQAoGgURMqrj1LVEkDNbYPObqToHJgxEDCK4+5Y5dYEi4MrRcpOaKJrXqZlgz2bVVKoOudCtahBpj3O/3ZSkfPjzmXN6+fZtS6kIfQmQiIk7IZliLni1PUz4cznOuT0+n7797d//wdD7NpQpRUPNCoWi1nMt0nmMIhNClCDAAuGKM1ipiTEQAPAwhREQAQgG3EfC659W1BOw7SokDo2HqupRSDEwUttth3A9p6IQYFwE3xiMmZkUDEagVwZgxpRASm6mIsqGKSSllWUqdiKJUkWJoUnKdDrOUCkRp6DjFFKJrqEqukoVDZKIQKDApEQLUKnmRMqt1GIduGHqOkVLCmOpxKuecj4vKmsKTSVd0X3EgSgFQDJSUXKycqslckSyQIbNiEDLLYCKGq9ElqFlRg2pz1UVtyTovNucqVkIAioNRhBAREDSbai2WzWWSO+ORYgqAaIKawQSIuN/s9q/uXr159dXN657xeHr/VD7k44c6z4RooFqKpLmUuZZsz3CEuXqoVmDlkbtNTB1pwGJaIpQxWBj6Td8vVaZ5ESn59ABgCBo4giSRKBAq5JprXSoWGzfDZ68+343dcjjXGir0FTf9uOdEh8dOTmGa4/Ecz5OKIKYECZZ64BlqINMcmDfj9uZmb5refLG/fTHEHrLk+bRUrOPNrtukruvBNDFuhkCr0qWAnbU+qYqUgNZsYhANQZqBra0rMIKHBAauUSYi03Q+Hp62fVGr3373/rycY0dV9PiUVWqdiBiLFoKy30ogrBUNAhFsNvHNy+HFLvRRUYqUaoKRAzCrEqzKU2ooYpWUEQ1A1FBBzXLRpcgnMkbO6EMDqVKzaFXTRqsU1VLLMs/IJEVDCFUqAHSxQ893kVUUGIbN2MXoXANVrbVqBsAqoiLiGzs3RicH4kAcOcbAKaYUEoKl1OUla83Hw/F4PnIMu922i4w9p8jb3UgxLkXCVAyhNoo+TNN0lYpdlFgQCTkwYuIQxzFsNuNuuy+ip+np/uHwdDjPcwVir3yF1G32u1ev716/vJ3nJS+lip4Oc80VrJoRRwZ2t1nkEFKXxnHoUuJAolrFai7nZck5iyoCMJGalZIBMaSOAWPqCLlkMRNmhhiJXP/UqVKN0nl9X9pai8gcQ+xi7AijGKnRkmuttlJrmSkRdUxG3BMv0DjAzgVGr4koaEoxxRACEzMQqWktRdUYuU+bvtukridWM6hkRNF165Gs1nI8nt+9e/fu3YdpmgCw67rddtcPw6sXd7XK119/ezicRYA5EAUEVrFrNwjHnAhJihWoGOn/y9ifNTmSZNe64J5U1cwA+BSRYxXJIlv4///JfWhpkdtH7u1zeEhWVWbG5O4AzExV99APCo+M5Dkt0pDMfIiU8AgAZmp7WOtbxCJSck4sCRHDRxqYvcX9hIXHEKhq710jYuilh9p22Cbe8BuMQ2gQYA5ImDhPKVloDMC8manGCK1E+D1OEd7A+W9UHURiRiJS04gwdW1eq7ZuX98LIy7C9zkd52kuJWGwm7UWEMHMx2V6/zA93E+Hg+RMb3xu8ADzwWccWh4PdBQgDPQRTjCS1YCJCMOBxRLblFQ9RBIiD4Fazvmmfx3sQwsMRGAknBLnx5xK4oJ3a3UkpMSU1Kg2J6a7Of/Tv/7zNN+gau5W+2XdvzgV7GnvdGMbGkYQIiZh1XZZX009l1LKVPaFScDMrGtvY7/ZEZuembNIQkTTuH3kCLARMtEtEdAjIJyGggSAsszCWfiq5tt26VoZTQgqkzDTzShjte7WNwQVQQRixjDv2vWbOIhRd3o4jk5sXHbfegpuRcygvwQglGk6HA+Hw+H15fW3336DgPvjfbkVDMLCjEzA2vX58/Pff/nw6cvLly/nL88v17WqOiIyC0S0toc7S0JASVEylomnmeeZp4lh4H89WtWzb6YOAdOcSBCJv2LXfk85HkVMPhxGwAOEc+JUcs4SiMvdPB8nTnKLHKVgYhIBZAJ1VVclBGISJgTUZhV6AtEWvVZtu2qzRBDIQITkEaFhGsgjsniIquOGCf0m141vTHwWFkk5l2mel+PpbjnMNCVH6mZuWvet7dvvoQGEeEC6Zz4xZ4lgcDRVMwRw6SCMmAKVvFAr2RtHT9HBewd1AgKP3qqb131VrR7dohk4GKDiqHCRHIuhkBRwA3cDJEyAiVmQEyNrWCiQdVJD7cnrKdZHe3lUDrvssa1Ui3QnIQ/r9pYi8g1nm5GX6fB0/+6Bjz/e/fSPP/zpu8en41SIoJIxWc6hBNXw4r5r1bqpdxbJ03GaMaVLSruRKUNvGIKR4DTP75+e5jJ92MIVJC0plUzKGDlUusMFY2PwiYvk+7uc59Y20ljmAzMJTnk/LPPshjKrpWvza9Or1o6CApRZEiG4kjmbv4GSwAFaxB6BbuJo4QAjdZRSSewQRKDW1OGrEwFvPiFVPV8uzy9cpJr1X399rr3dPUwBeL249bDac0EpfUomd3w/o4UgIjFOEz+e8pIxRQd3DOcRyoHiPhj6bhFhA+BJgYDkI7PH3VrvrXZTD7c/8qMGY1u162gXbxxXc1VrTYmwcr0NgYcnSURYIMBUb7EYzIH+1hvD8LCoqpkSIoEQxFssFWAgI2VJiJiFk/DhcDCzIvg58edPny06RBfmeZrujsvD0wOynC9bwPW6b4RRSgoA1f4tAtjdt7qv+86UACDcwa1b7LW+ni971U+fX56fL+tae3dmIBq3yKAy0gg2ySXJ6OPNAwzBXBIgIgMQBtwiNVpXVGi9X9d13bZ1q603MyfElNJtrkCUc0biXCZEbK0j0lSSJ9MbnLW/uTb+N6tHBwoUkswps2QAQYcA6Ko9tPYGgCWlCUlkJpRcjrnflINd1UzDTc3MBzgaiXmEaXiMw4WZMVHKN6r6IRUCxN5HWKGt2/V8lm2TdW0vLy+tNTPb952ZlmU5nU7H46nVJpJFcslTKdPDw1Ob5s8fn5b5+DV1BAFFhFmGeYc5EChJLnnmJIhwk4xKfA3JhDAHYA4zGqV/aCDi8OKNf/HN4DOGczBEG5my5JKldnuz65i7vRUxOCjKty59BEZ/85CTkSRg0brue7tc1ut1r7V/PZOJ8JD4fkpLSVNJDAGKauYYNOVyOpzePU2Pd5wyIIZ5uI3bb+SCWu3WdWw2iAMw0MOH4DQsPNCBbrPwXKZYlmjmTBQRpmbiSRIQjEdj3CIzxhAJiFFShiVqP3DKQJxymecDcbEQZJ7n9P7nH1NJb/eL7u281S/ABySBboCAwG6oCoCQRCz6ul3cXK30XhpfERlMIQyHGWsAhkkAeSxuBmbdHQPQb7NEH8BdCHBn07Cm7iCYmYRZzL3WNdDmQiWRD/AmIgS4W9embY/ojONE1ZGWYKrfZGHe6Jc4sPr/29fNonTzXTJzKdPpdKq1vjy/MPHLy8vxeAx3KCUCwoORcyrzNC3zvO5t23qZ9u4BTQFAmCE8QBBgKnNOmVnmMp2Ox9NpmeaSErm7GYCjW3SIWj1XY+aEX+eN/5u/rczHO3ftvbkqEUlJeSrIPt/NeZm6k5kjoBALJyIOJAwIszAjGpB6CvV67bZDIvLmfd+tVndFjww8S8mSOmsY1NaNKJXEkpAGGdmHM/krp2+M00vKy+l43+Hd+/ePTw8PD6e8ZEu8976/Xva+13qpfQu/aS9IKJ3m9LTIUlgyAFrntodqjbBMcEyc1cHATDSOgKcCd7bZ65fP2+Vqu/Vqte21tlY3tQbghEAiCZgij7w6kB4T8sKSEFysEXbkEERmR1QDqEiWMoXTevH9tW3Pr9ff5PLO5gLWn8N2CC75wJItxqebxyDw67eSUvr+6Udw+Od3//Snx59/ePp+mU7u2Op6TaAWKWHd++fX18v10q+XbevmMB8eT4+PecLtvDtbTqn0pdDe0sXSy8PddH86Aea1fm4V7h7m04ysv5HXGZ6PsO+uLjKdJr4/HZ4ezeD1788V8OHxfZmmuIjUlkS2vj5vv77Q587BEZPBLDl7sHVT99p0b/28x9cJGaAzGoEOh5wkRmSCKaVlLoCYtnrZdlt3VbupFd8Uib3ry8vLx6wES7g/v6xqJhMB8rZhr6QKh8CHmZeJh79ibBSIgAgyAYWhBwRmSUBMIoGMDuCm5mq2d+hO5gxBADxsQe7eaq9b1R2s6+9PzbEw0h6AXXXYM97yWsPMCfltwk+EADTG1yNhJ8x767DXHcZNH4ARqtpa661pb2oGt+RZBiFGcsAQwwhhZkZFn0t+eribpumnH95//vjpf/z3//788oXBksTT0+nH795//+P3DvS3Xz6ue9XeemtpPiJSrY2Yv9YxQ5b78fOneTowTaadkK57PF/w7x9+21r78uW6rupKZtHqDgSSqFZe1+vrK4N3N2utIQQOKv3NOhaAQMxA0VT31s7X62iLW2/bvveugTFMsMjMSQDRwwkwizCnXEbvi4hQSgaIOiSDNVQVAP44hRknLXqkwIIyUZqCEgZieICPxNR13SLCpmlellwWJjycz+4eoKrV17X3Vnsz07G7k8SAMPZZTXWQSIVZJDFySuWwnA6nAzG11i+X65cvny+X1+v6LMJm1LtPUxJB8961khxSZkBkyff3j4i0bvlwOP7lL39xVWvr09OjpBsSHglzTilLazps5kSSUilluhUxrAiQcwYE7Wqu6DZquyRtCHGrKgBiTklYhG9GJUQi9qDWIxwZUABzkpSkKtxStoaI0p0II3BklOGI0KPbiG2AJZCAE4e6uV/X7cvz6/n1el23wcUY74UJT3O6n1PKRITgt3hpYU7LVI4LzwU4mQciEIzpkd7CYjVMx6A8iIgohmFmSI11tKcezCzEkmUhBqKtabPetbU6rnVmInQEIECBCO9m5kgQghHJNdAQ/VbYLNM8H06cJsqJsxwe7lhuxaWH7e281pdciCIF1FvL5SPkFaqHeVfdwx2iWk81Mt2kpMYY4b31fiuTgdxiTJ0QxuZVgJJFqLUAJyZwdGPTaLX1qtYdAocRCdlLITxkmpPkhDLIAuBm1rvpHtaBHSLUQ6tqb/ZtWHJ888/vgTpft0iACAEYbzpuCACPlNLj02Pr/cuX5+vl+vrycjqdICAcmG0MDB8fHkuefvjpT2utX57Pf//lw8fPn5+fX/bWmYgCzFxI5mmZp2UqJecswkOGE6DgBkFxk6snRLEOvTkyJfq63oQ/dpUgZZq0V2sVEea5MOFez06W5sIp1d3NnICIUFAIaUSLjsDBwV1w1b41a8TknQACPZyYs+ScMxI5YhBjQpmKMSMAyy1YZNw2o9lkYRZhZCamlOdleXh6CM53j3fH++N8mmVKnbGjBzlyzMeyHAq9CRUBIQScw8DQDELCODyHh2sLs/FnIQ+x931O3534u35R21E3CKjg1W1EkSM6IyQhZBBBRpKBWA8OKM4Lp5k52LeIHaEzBoNTdAEHQMMg6Oi76R6A7byfrXrOEXEWrnOJktmJHME5I+cg+TYvlZEPeX46Pvz49P3T3T14v5w/bWtv9WrtC/mWCNh3jmvh7TQHs3RPaZqFU+8jExrSlNMss9zh6YH0/u6YH58ezfnpu+/bHg+Px5JtPf+m7fX8+rJvV4fI05TuH+ThlOfluu67VY4IQSwClSMhFYoeu69asaJNKAc+FCTsdovJdnIlN3rLjhv3DAaAWzjFiBfOjFNOh7mMklhNr+tXek8QAwKiYxIK8G5aW3eP2tXdm3bE6B1HFlspIURLpilJIoJbsg1EOLjFzQZKzHzTRgLcRuIeFrGr7Q26ETklGCMaYMbEmAVRMPG3S5ivdAkws9uTFhDGU5tIRIiQBxZphI8hDD4NIoIRgPdewW2UagjQu6q2t2RXswgMJydwQeYginEy1dox6l6jaz8djks53B0I7Px6j9h671Omw5KPx3JYinqIYIRpa6o6EeIwtnzzToaled93dyQ0s4wBtKujqVntfVvdjJiyO2hXAAfg1vh6vSb2tq8Qsb89scZPMwuAMJOWGTFUXVVv/A0HNW29QQBnHvAnRLQIcFd3H54awpEHFd9oRAaL5Q0O9L9pxRAIqSBNgCmQRxD6LefpBqLqEWEhAcHMzMKcmCTQ3ZFv1mVgJhaRG6xk7HRtqFgiQliosBY1Ux/xvLd8Nhymnm3rxAQgAGnMqHrviDhizlhknopIvrs7nc+HZVl++OGHML88fzkel/yWnYRfCe9w4yeN8MkId+3o7r0PpYzkJKUgFkEgDERorc2pnC/reV3VPHESJiEkHLcgAoorqCo4lExTllKKJMYdf3+sDf0ukAeam0cQ3FJZb8QPjACiwBHnpqq1teu6XdZ122vt/ZsAyHEmeGsKhACOHogkKfEy8zyBpAAEC0AgRAJGjMBQ8tHTOiLYWCVqmAeEoo9gZVWDiOCghKkUJshmPbxp2AA0aTj7cGcjECCNDzHMwJwMUF3DCSizAFFmIURGzEKSGYXkm1vG3Wvb97YDVBYHqIEOjgAYThHgYB7q3sBhBM6FOQQKBZE7RXjX3tXcx2ero3hjJL6FkDK4e9tXc0UiDALPvdt23dpeXS0cRJIkzoUTCkwjZ9khOALcfERihrUIQwcP1269q/bupt8+/D3AHGjk4wZ8jV3DP3xzXysdcHAWOp6O67blnLXb+Xp5eX0dfgUiE85lkqnMy3x4CmzmT4+XaTrMh2Wa58u6IYwOEZlkylMpUylloLNGpxEeQRlTYgJC4iTEZAHdCHVkAjOA/6/DGBFCICqEIvn+7r5q29rrbjvKmCyYaSAwIwm8zb8DbISgA4D5vu6qKiIshmSSJJdU0pQST4eiQlfTShgAXSiAyQEQrCv6DcwSYMiQiuScGSVxpiynh7vvfvqO52ueC08UKYZ3EzAI4XCcp3/88ed/+KFM5e1b8b3u2+VqV5Aw8oRBBMyWdK9b7007oXGhMh9O8ynndwf+sXETO0PfTSPQcyLG5FTCYqQ/IKJDWIQzAFEwIZMknnIWTGhoGt3cDM04FNERDMDIO0gzCSqCjK2utl571ytTPy5YFuHFqGAwTiWFsN/UdrcnDHaNbdsvX365vv726cPLy2XfnFEfZrub+FQOwohkdw/zfV6M5qrT3mXb6uXly+vrx7U/l0UOx+V0ejrl6W7CacJU2JGPj+97s+h1e/386y+/PH/5eL6uFRSPPB+X5e6Jc14v7fJyWddVsu9bJZS+7xYdFsJILuwR1j0EUgZGt72GJ5immArAhNMR6G08HkCO6ABqY9ufGAvzlHASAoSeaGXEt4OPGdIQcQMfZrm7n5djSXPRbpJFrQN5uDmwg1toBDNKIk7IBGjaAuwGz7uFLBECBgogut8S+sbyIbrVruerbrtR4CFJEgHiKcv9aS4J+15PhyzfFMpvk/kY+9CR/p0SJRFEvIUnwnCREBAiBhHlxIQkTGBm2kzb1x8zbDMehuAEHu4W1gzAk0wF3MO112hbrbWdXy+lsPW916enp0cEv79bCO63bUuZhEO1Xi4vrev1+rpvW+vdzQEGlCK+gZYBEqaciXnbd/eOMUUAYPVQj1AN7ejOyOQBYQYYYaG1Xs9n0LpmJiR30D5m9V5bN/PrOgb1xIQj/djc3yI3wyGIEJwYEZkDsbY6JAMYmJKg+6BNWFdAVNWBh2mt172ZWcpvob7fiPsQKacpyRxBpkEpkIiEb3JXQhYCiCTEHO4a4b1r6xqwezQWnEoiMncgTgCoauBOCcccrbXea2cisAFIeCWG7rukZBqtqYgQHUQACO0mkmBEzKUgYq291pZzfvf0lKSots9fPjPx3enOu/3w3ffzPOU3ASlAuKv7LZXQ3brWWjfwjqbeu+1ViON4nO9Oh/uH5bAsSZIwE6ravu8v5+unL8+XdR9S3FuwOwIiBSQA124MsGQ6LrnMMwsjvX6DHrvpcoZC2M0GrxAAx2grAJhlCMha19Z1pF95hLpa6NeLzCzWXT+fq5IDp8RYEpckPE04LZinCHQLGnFao9xgfkudjLFUqdZ66z7uarrlprpH9IiIrs4QmAe7M0YnAEgYI4A5gmIAMgLIMALQIsBUwzxqEDLLIRdKgky9tTVetbfcMifWu2vYbdIfAF2j7gamImMnMdglYIYRY3JhMDTooxNRhCBDC8Igwxi2fUIUdEQORE6pMEkAERIyqbbo3FxdfVznDh6tQ9dhUidmTjQlWhJNCXMKZkf08DDrXZupAvhNXOSuTbWr9T6uqLfHC2iEWtCYoRphBI0zDeJN5BtIPurOQAhwZJpkOpyOx9Pxet0u2/Z8Pi+n+1QmUMsCyDJzTlwIRTh0irvD3ba32h1TMbVwYCACRiQNsNog8OZIhyDilMo4+elGwQRD7RGgGAiZENHHFfAHYa+rgtmYpBMEE6acTDXcPVTV3YGBEChGhy90o1G+dWCmSowoMozKMnFZ5mlKKaU0cTC38K46tJoOAGZatbkloXkpiChZkOGNaipCORc8HPX0eFcBkQkT+W2gQyXLcZ4KIhznu8c7Tvx264fvatdmkMgo1IcmHR1to9awdnc2DjQKYZu6VTLfgK1kWJQchaUgDKSODYWRmQ3hQ4AAJyzMmSgBioaEYwMck1hAC0dPHAdwsu7YjLURRqYI0L3WvdWtXoWcIAcjpYoCQE4gHd2/yaHT1r78+uGXv/17bkpEf//04fl8qRtMDP0O9TDtWQ+Hw3I63N0fTw+PPJ26L+eLffzwsl4/A+wea8AMGIRxmJeff/rueErme9e2Nz+/nr/89vry/OnLl9cvz9t175Yh3VGSBCG6x8uXy+vlCkRSckC4eTgGUDA5otVoTa+XXRlO9wCLSk4Y6NhBiCgBp28tcOCAAEw4Zzod0jLnOae5cCkAECxEUkQerrVbaIATAxNnTIcplSwyEo0pciF2YkZEnCdMhMJRkiOEmXdwRvAwCIPboToGMRhBDoSM+Db/Gk47ZGJGYRAMgkgcRZASR1DJUjPubMv0u0h5yPdyFnckHFYahyARyTkRksXtygH0AYQwNCZIjENUaWCtNncfgxEIhDAAZ4qBi3N3RhTmkiQxM4Zrt9a62nrdvny5CKOAYuwEOs2Z0HImVUDQtl/PZ1erqtH2NVxHgx5u5qO9/v2FSCmllFOERTBjcQ+1Ho6MCAzgFMTMHIFORBipUClJBhXXcaTPE8EtPxzDXc0NulMfuFmGITIKHPqNGBFuzMycciZErUOI4kNFCDig4WauQ9w+hN6t994tInJ8xbx94+YjEkkiCQCHCgcRiAUBPXwAqQOABnCnbwCk2rV3iz2wExELSNDI3FWNWg3Cc9AohsO81QaIxJLyNm1nYtv7RThBMFFKKROVCB0SKTVvbUfkaZ4RsVdttbfW3C0lYklTye7Qe7emBCHfzMg8onftXcdQ/U0x1bp333bfawzdqTkhlJQgp1LynDIRBMskQxZEl3Ufz6/WVd3cwzx6i1pNm3KSktNUMrGML+jrRzqGiwM/ONCfHm4G7hQAbopIZUIM6Npq67VWNUVG4lHE/65YUvMv5/2Xj5cKjCxTkbvT/Pg4lWnCacacAzCGswIQbzRzHhxlCkcz6AMldSvrxthCWAgk8S2iFRzd3CNMDWyMXpiAwsO6QQA44Ci41Vtvrh1Nx9YMRSSRCIsIEHZTbY7gaM0Zbbt+NVu4Q2u+baatikRKJimIyCPqrhCYkjDJ2NsBJnD2jm7h6ERObAMQE4gj2ZoZRdI0LUyi6gCBBOE3XG2Yj+xqARQCFpqmlCQRCQumjCVj4iA0RI8AMzdTs3YrKD1Uzcy7elfrvfe3vO+vdcww0N3A2m/uM4SvWURvkxi6AYMQiYXneX54fATk2vvr5XK/bTkXJiJE7d3FOKMQg0dCKZLnspwOipRaU1ULdbfoNth4qja6PnQEYS5TyTkLEWPgSFpHEw5RKI5IJII35O43L9m3NcLR3d2fX18sjAEyse1NHa1xuCAK3FDZQVPyMai/idWBECXLdDdNx0nKlKecpyklQeYB8bz9+ABwt6a+73Xd62UtU+Kfn1JJ83E2DSIOD0JKkqacpsnyVHjaHcEpNBwhCqdpzkfKfa/rtqacvnJiwIEq8AoZgdysuYOBOAdOVaiXXUOdQqFd+8v5c3euqNkLGxzLAYMcvSyZOBx679vaLnU33y00EkhKlAuVxAUw1cBt9xawRxgwMZAgOEma0hEsbbB5Xwk6RgyuVO+672271iwYxxlxnJmt9S3YDvFO0f1t3Lev2//4P//b//1//7/O//gPp8f7KqSClAEwutt5b2tbuxyOP/x49/6Hd989zocj4LSudnf4jLB9+fJ/9Vdj9lb3z59+m5TLP/3l/dM7ovr68uF/fvzvv/zbv//n//zb8/O1GwceJTlmU9St6+V17dV+/fipgd5/d//49JCnQiwsC2M1pXY1W2193j99fIGw/nh5/93xh58eM9Hee1Cf8unGBHzrYDwQAacsj3fTjz8eH+6mSYgxwtTB75F/lHuU49bt8/PL+XLe6h4GGXNhIXWrVZMixTxBRCoijFTuKRwFPUlE9K1Zw84MSYLeysEAUI9NoyuaATPOC5QUgoYAgZyF75ZcmG12DCjMiQUlOXA32BOwrpm+GnmAEKdSTscDgNRqiKv22zI4pwwIvdcxB78BwNwgnCAYgynM3cBVq7sLFWIhgjACR0LOwm5mqon5eDiUlAY+urVNezezXmuvrYV//tgIV6F+OC5m1lptdXVrbYfrVZblgJzdICecijTrXdUMIuIGNn8ryJhpmufDkoQn4aN5rNuz2k6IrlBruBERD3cKJ8yFlzkfD3MpQmMgprHXqmpZ1cOZQJ3CbwgeJKbbw+1mBgsMZsqZSynzMhHSYFjd7J00mhhwJ4ToamZm6iNQwhRu2P74GoD81SkDzMIyEgNUVQGER1aHBaIjDV+qda3bfoYg1Wbeq+6ALUkeW0E3r7Xum21bA4jBiYYARHxDRbfa9toTrD2uiihJluVwNy8HkbRtq6kCQOvty5dnRH5P301lQqTe++dPn8PsdHeIiJeXsxus1+qtvXz4xfxhyH0AwD3qXlttLIUEbxbqcFO3bYetYvcg77itAd4Vap39nSwL4mDC8SHx8u6xP/jebd3b63W7bnWvutf99bzt2+5qc6ZcsuTSbVR9w+F0G5UNgeJA3ql2bT3CBjzP3XPOkhIC7Ou+7XVrtfeehKaSrhtA+Ne1RVP99cvlf/79826EnA5L/s5gfno8TTNNhSRBIHi8dd049HMxBBJEGlHdq0UdfqRAdBQmQMkimQnBu6qH99pUtdcW7lkSsRBQaHTrvXdCCgCzsG46yjEMHtRrZniL2CYgQSAKsu7eI1y39fdJTERrvq6dYBPRnGOauczZPdatYnDiA7G49wBAyOGkzbQbhCJpykAciAE4+FCjjJ+maUEg92qmN9zD0CYZOASSC+FhKjylaZ5yKsKChEGdWQF7OLhhAKgOjxs6gHu03vatqRoid/UBnPu27ofb2EsIBdHNSV0hAGlQBwIg8DbAAxipMhABkXN6/913xPzx4+fr9Xq5nEtOyzyFk/XdWLAsiAbdoxloJEiH6UiUdtFt29d23fe6t7pt+/W6Vu0IFIgRQcQp51ymKRchDnNCSIlSplLwYMyUIRxvzOpviphaK4z7V3t/eXU3ToQc7urBBAmJBcm7a20BJgSm6mbgQYApyXTIy8Ph9P44n2bJWXKSXAhRHaxb37ppIKBwKqUQwGXr6/P18npejtO7n55SybNbbxYGAMCSUi455Zw05Zxy8oEsiBCWuUwCpLp33dfLdr1s9naRcfAM8wGWBCmCPAwByUKQIWVEVkBAQQhvuJ233l+C5JSPc5Y5F+vqhHNZ0iyUvfmFd6Vrwwt4gwSUmHPGxMDqoWa1Q1PogEiYMpC7G6UiJQFmjN1Ne90RIefizLfI+QBCFGImVGwaa4uruDhqfDOJMbN1vZ5fni+vx7QIT/dFkkWHbuvW9lUDzdPdj3iQ6X2aH8p8gCD3uhzS8Sj3D8lknh4fW8PPHz49f5Hz5fP9hTG2lw+//vLv//a3f/v33/7+uRufHn88TEdT22k/03Nvummte1+vVRZ5vH96vL9HAG3NDcGFIpMmWzUuBpcwj436PqsrYlC0HqTA/9U8Mkp6IsyZjsf89DidFs4cqruHA1GaDofju9rhl1/5wyf8/MXrphQkgOxOFmSQmHjJSJQSMxItRAEEDmPZ56FuZBFAQjjGdoHQLM5bv2y675ATPSEQg6QRNQAIwTPNiWDosoIIyYk9WBwxpGfJid8GMUBE05QPy6SGAGQ6KRuMcDARgAgXIGcZslBQBzRD8rAe4EQwVC4IwRJJkJmAg4BlDI8UnKOkcn86COF6uVqvCCYMTOgFpxyqjuDgHUEZnSQg0BR6RLhq0x1BknOaljnf3R2c9tqi3UyG+MfvBVNKd8eHko+Ec1eT5O5TThKObffeh24ex8YuFy4llZxFeGwejCICtKi7I6EIddURjQSAQ3pCJAgUPsLXnBiZB6uB4dYI3v77puwPRExZkEi7h+NYhDPJgO/D2D78l9U4BlIkYYDovXU1RJBIw+HFjAgYYK1v58szBLW+RRi+mYdjnNjIAG7at7V6GFIphYhgqKHdw8xMu1rDbq3tEZiSAvHhcAqIfd9bqwBxWdfPnz8ikjAflsUseiuEoL2+nlOEX69bOC7z1Wp9+fifQKra3ooY3/Z6XbdpIiJK7MMGDu5eOzZjEOEsUsBpPa/ongl12wBh6KJLmaa5LKUcgbfFprKfr/v5WrXFx/11u66MgZFZUgBd17X3uu9bq/u+77U1d2fkXMBvMnbdtk21wyAAO0jWAGSkure9tbXWbgYAfqMl/H6OdfWPL9vfPm89KGdTxIOFp8SlEAsADqGuMznELVru9kUQIhqABjiy45glAgERSpKcc5oYIQwhqpreZB+GAUTEgyfj4THM+bc1rZvbmLYN/JUIWKAach/znW/RRaG2r1f3r0UMaPd9a9qdmQ8HQS5pTg5g5hiMkAVLoIwIpt6g1+jdI0xSSJKb6RIH+ifJLUmAx9U7coHcwSxMY6htrBsjTJmzpGUpSTIgB7gGAHlENw9wjEA1VMWu0D1MrTbd9mYWKbFZ1K6t6+9O3mHNDxwgaFALGPqeGLJteAMP0c0u9fYL7sJ8d3dnqs/PL9ra5fI6lTQlQhEw8F57Xa31Wq2tu7eOHpnEBVypsQ0w9JA7B+AIv/IAdzB3ja4GpiHE4U4ISSl17C3cGEFjoZTsj9UYSG0dEENw3/cvv/zqvZ/uj9M8JSEWmnMCzOjYWvfWu6kTae2hhh5CPM/T/Xd3px9Op++O+ZAHGpqEXcMufV+3l48vtmuR6e7u/nR8NLTX/dP6uq/nXZKw5GmeARyh1erAkUsq8ywpibSUpGRBEgAC9ymX03KyrufLy4dfPv3Hf/z1Pj+1/XbzC8tDeXia3xl4N3Pq4YSUSBIVCvO8MasKQ+9Q1/DWeumePRcBh7qaOeSgOS/zw2Qy5f1arts6dd0DHAiCwD28rcPTUMGcgZGYwAyi1W6pTFkRWFur63Y5X4hQypEpIXFKgkteMs2TZDGF1eIC1FGM+C3uZBRkSU7vHp5+eP/49PB0f5ceHhTw2l+3bXu97L2rAUG6u+6+97Tt2QO0bdv2+nr+ZHG5f8iHh+/f/fhPL6/15W8fPr3+9X/+e3r9suj1/PHXj//jv//7p09nVbx7ePfzzz8d7+97q8/bs50vl33fqTd1YT4ejj88vj8el88fftuuVXvybhmXRfQaWjg/3T8A+DLhIS2JJ8EyIQUII/7hEoMI9IAwNzU3DWa6eyh3R2SWCDMHlrzM0hvlOBbvtG3Pe3QziGChDDwHzcw8JyqDwU4DMjq0hbXHXvu276pGEUEpY0KQIGpdny/68Xl/eblOhSBNZc6HRXICN3P0YIBEAALBYGCO3QenDpkwZZH0O1SNmZalTHO6XhpCzFMxjt51mI0AY6R1sjBiBAg7yHBB9S2cci5Z0OcU4XkEJQljQKLoCtrbWFDNkxyXbK1vl+fa6unuOE8HFqx1Y+ymcDqUd+8ffnj/dDwdPLTW/ZqhNQkfmHBm4TxNkslROF8/fb7WVkcA6+8TsggzJ+K7+/tlvm8VuDXAA9FyXBYh6TXq3rd9d3NJkhJJIkQ0tVYV4zY4KTm7GyJI4t6k9j7i9BCplFkkEQoGDS/6DalAPpYmEHDbWMPtyASEAW9OKaVEnqHn4K2NXRIREGGE81u9c3svHmYNwMo0AUTXfWQWiKSSMzHnzBZgZntdVTsEaQcAX+YZIPU2gh8kCeOEvYLqS+9tmjjnPOhYb+6auCW5RKi6mateEThJZpbrda11N+2X6/XL508RYNqmaUagZT4AWG2r9ta19W7C6Xi8t7p//Pt/kGDrt3PMzK7r9nq+mg4/MCThUaSBOQCl6TAdjtP9nUG8vj6/vK6m9lwSE5Wc5nk5HBbXZTmepsM0L4fj4e5hb+fzCt3+Fr+2upcEEIZIXf3l9fO2X0O3fV/Pl+u27b2riNydDkRsqtb6tq6tNRIGgG4Ba72cV2HKImq+7VttFhG1N1P/Vh7azT9f6odrpAwwS5TC85TmklIiR+hh6g7o7CbATIkhARAGRaiD31qcxA69a3gwc+Y8l6kIYwx7oGMMDoIjIfmwJ8ZNqOxgFtr7sKEMas6Y4TW4EUUHd3GovYUAeejH3E1fr1d9a5KHtLrXej6vRIy45InNB1AuMeREiakEiFvf91o3bTXMHCAoCecpF3bvgDQgMSJCDObdPcwtwBHRA1o31WAUBOq1B8OcsRSZJyGi1q17V6/gzVGZIhQj2Ey6xd6idevaa9fWDIA4cQA09XEH3QrlGJ+DDdyze4D7G+xuKF8dAynQg3CsoUfGqAchzVNuyzxP07Vf1vPrRfjxMNE0MZJbWy/Pbrhvuu7amoZ6OIJGmGNgSmWeKeVS8jLNJ1X1Ee3sZuZqDjGAjkjMN7Jy1ba33gC8hKVlAe32rV1cWu+A6IFrrc+vr2haTnMZoya6+c8hwhEoINR1b22rvfYIyFM+Phwef3o4/XAs90Uy3xA0AXvr6+vl5cPr829fvPtxOR7KlGgA7RLggFGycEqSVDhlDAKe0CUGYJpZkkjOmSlRcJiVVLJMW10vr+vHX7/89tcPX94/jySFcZMnTUmTQ7/1WESICUQgA1kk5QSQmZrERt0gzM0BJGdyxDBvra0plcQPR+ZJUBLSlFjdTXEU8mGBht5JK0JgZmLicDKDtpt7N1NmQYRbVY8DR8wkJMRpynOhPDMldWyGNej2jP+2uiTm6W5e3p0O7+6WuwOKoBoTRPggjGFKarr3vu69XHWv1raX6/XDl9e/Xq6fksBpOX3/+JjoMi/x2l4v1/+MmvYv+4dfnz/89uX10lJaUKbj8f7x/r7vzwi07gKNWjPtGurRvW+tAe3P2/Xluu9oylOa5Z6oaUnTe5qIkKlPR5mWQyk5pRJMcEgy4zdrC2AGIRAmAKzV173tDRfnVEKEEUiYp6SG5CeMLfXLhBrX6uZYEs+ZJ8ElcZqYyo19wRgESEx+y+OChqHh1iOQEBgxHGnf8XqJ11d9ftmXhVtPASCJS0EwBxvyQYpgc9Ye1m9XMN4GJ8S/D2Juk5h5ynXX3h0DgW+Lijc9L72BmHz8EAQDb9YbBPOUmJlI3I0wCBXBIQBRmYwEhrKj5JIo1Gtvq5tmOR0PuUzJLU3i4Xo4zI+PD0+Pp8NhDrDW8jKJqjJjxIiTTJIPPUjK4ijnSwXYIvwPHUyA6wgXYkCCuOHgcubT8TTlKQy2rb48v9ZaB4aQRrnQ3Q0QgTgwYghdRGRAYA3APEADgIiEKY0FNqGPQgeIDRwJw95yb0fSYAB5RLipAiKJII30dsg5jeAkgCCKAE9pdLG/l2TuNmLkBiUINcwUEMQJCZCAHAzc3ZppBIWJCItkRHEDBCAUN7Kurqq9mnaMmYlT4oAQFhtPyLGXsjBzVSOCWtfz+RkR13Xbtr21fV3X6+UVAJmw9104hVtivgit10vXjog5lbZv2uqnLx/unx6+TpTdfa9127bEKefknhCopCzoTmyuvQcpiHMwACU1Pa9tq5URcpJpq7X1iCDOaTpMRdIsJefMsl3Xx7tDq1cCFWFk6qovLy8vr8+hW2v7trd9b7U1EQk3Eeld931vvauZMCLcUgZ770wIpTiEqY2wdlX7mgn59tyHZmCIx8N8/3h6993949P9Ms/CEgY3bCGgmbIRcTh5MAtFQgUzBkiDToROQIEuRIIoAHTzgkcAIiAPWQOID4GiBXAAgptp7621GNESAaEjRnYMG8jc2w0PSCwkjCw0oifMtNXf4yBgjEvcTbujh3tYuPrQ6DAwgmGYECmyKbQG5mQe4QgtZHMLvOXz0W2uEQG31M6b25fcwAzdcTzHEZyQUsKUhs/kxgG06AA9XEPDiMJDO9QG22619qq1967dEAHQtPV977X/V0TkVzAb/P98EUAgBL2Z1sADGRLzXMrxMFutre3b9dJbC7MI7Gp7vbRmddfaowf2IAt2wwgnJJFMmCLCik+zDSqRedjwEKqFRcQIVEKEwNDwbhaINgiI7vhfJzG990Bwx966ubGQHKd8fxAjCgQ3hGDgICgpWdd9r/tl3deKGPNpvv/h7vFP98u7ydEDHQnD3TZfXy6f/uPXT397vp6vQpyI1KpFI8n5sCx3dxR+mJdEiQIRnDMcTqnc04r7pW85FyJOKZWUE2YCAjShRMCt2suXy+cPL+dPl/1lc32rLtXbS9s+bjV1ZfcExClILMh7BdXUVQJKxgTY7rgyg3oEpGki5XDbr+fturZW0zTR5OuL7VuQC5t5B+vQu0Ow8OQQGoRgyCIsiKLuqhEcAcYSZcnuRwAHgDxPwULQEnGayjQxzewChm7khmgAXU1vVIjbxcMzy31J749Ypva61r25GSCEN0E/LqfTnLvu58sLUWKydv3t+fU/f/3yP1p7OQjnmSVskv7wBMhepk3CBUVwDi7G4ZRqiIWEB/TL7NcfSip9+nXv57XWWtXt3/4//3bIi151fd0+f7pImf78z/9QHu9KIVR8OLxPkrtfHCtNQZlEOArZ4uWeMX0rhsU54zJPSdK69Q8ftPfL65kf7vPpVI5zofBu59Ao5HeH6O9mSfRl680iMc2MC2PJINwBUD3MovXAoJwEAMOdQTM7epiaupt5uBv4ulq9mlYHDYpIBJlxSnzIgM4QaI7q3BXVo5t3C3e8Vfnob6OL399LyTJNuZTeqteu5sAsTKBv0IGIUNUABTD3pn1T3U0rY858KCVpYtXQrmrVerh5qBFSSWmZy+lwIMB9Xev2itCTYE4xFTodJpHl/liYYFmm5TCfDvNUMjNGzH5/JKJ5nom5qjUNddqbl0Wb0i+/fgEIczX7XdwXAWZRd319vbYW1kdzpsyFiedpEpIsUre17tu+b+bOzIQUxgTMzG5h2CxUXT0CkAEDQCNooDBcwSLCFMIALGVKaZIkI3cAAdGVkBDJVN9QFQOyDsowuPsQCIAit4N/tIc5T99GKIz6xsz2fRNmYcYyDVi7uYf2oc2Dm6CbIqgretAwXOcMEMiY6taf19fr5ax1BwoRKjlNcyamlLOaEQ11hQGEmY/hkEdf11czvw6Q377Vtnftwim8YWTmHK7PXz6p9X1bPSznnHNudbXe1u3arX3Vw0aEdlPtAUYUyJhzPi3HbO75fDm38/X1srbdIB8mToWyaG/NGmprXdet125IidOEeQXkPM3MfDzM798//OXPP0yZrtulLBMzd7PX1+uH3z7v22vvDZEsorVGBKqNidzd1DxAUso5CbM7mHozhYChexYhD0EMMwvAbyQxgIg5p7tT/vmnhz/9/N2ff3z3/buH+/nAwaYBY04BARCKBtAbUWNKDIWMwCdCZNqaaUAiCkAmQLDQ3YHV0QJHyCKnCEI16hHN1C1IzRG0t95b64oATEwBaI5qPGS0JGq6r631PlRbIjjldFxyFgJTNP/9vQAIURYqiYgoCzGg1UCGRCII4Vd34zTiyRgAhvy6W98u/XytOcvxOE0zJnYvEIGQCHEw7k3NEcE0EBlB3IEIc5mmzCk7MVhoBAQ6kmO4wRhtByGpeq1aK+zN9tr3tmvXcADwukdv7XLda+2/P14QB89k5BuMqw4C3jIrYhAqIP44VI8bdQARUpb709Fa+/ylttra3ltTZPDeLy+v27Z3DXNyToZJMfXgACSihAw87AYQABZhg3z+1sWHh5qGO0QQhqADmPtO2CV5zk7k/yVCQYDQw7WrmXGSMufD/en0+JCMoVvbq/XByrRcxD371iWCEaTI4XE5vTseHqa8pNqbOyCRdVtft9ePry+/Pq9fLohRlulwN5dD6raDGwkej/OdxPG0zDmxUJqEBdIhlRN1rGtblzx1U0QSloRMQcBAyO7Qdj2fr+eXy37dtf0+VnLzfdu36+pTxESSWJhSIiZGJzREBYZgd2SaD0xIdnVkJ5ZENC1z3ddau/XuXUkkxxHJmRqQGrpKWHailMsRibRXAjtkJkTtft0NcQopIsJCuSSIGQE8XJIoBrmjhByIJlBSpd7JbHjrxi72m+uFEIRYkmARmIR2JiVUEIKlICU6zpzFTK/r9sws4P3y8rcvr3/99PprWM3zqed+eX2tus5LgTgWYajRm6uCpDzNgJKFoe3ndu3U15liWg6s/PnzZ2xWsJjh84fzLu3AB1KJHkghQPM88Q/vJPg+nZi4InQQxRYUlBJMQEvI/Lv0imjMQlMpGZFr0+cX23Z7vdDLy/Rwb+/u4bTYkjwLCNM8wXIgxQQLdQ9BFPQSJuwsEcPh32NfFZxiDhaGcObIBYnIGFTJAgd9ztXRIgXOSQ6ZJ6aMwBEUxCQQY4Q7Wiy3cLsJ9G/nFn6T5gQDPb5vpu24zATp5WWtoYQUdrv/IHAMayMMwMIVQgk0cWSBLJElOJAQhrxiDMMBPXE6Lvl4mE5L6a1f+lbrNUKTpJLpdCjfv3+Yl2LaEVwS5yxTHtHRREQQkXO5v7uTXLrF1m3d9LJ1vtbzpY7nvYe729cnDCKKZKHk5tp1nCJEmFiSJEJy195r7633qr2ZuTMTCgUNIDmga3SLbuABaEERhCCETuQYhMDh4OYAymSMnAVyZgPxADc34EgpPHrvbm5uAbeBUbgPccwNBTaym28a63jrIL/B3jho1y1CmCULEXOW4VgxdxS6ecw9PAwcARicW3Xt7qEQwBDrdXt9fl7Xc06Ui+REwiTMPrIqmQHH7Anxzb8DEOZ9pEt2rb3v+7b2XiMCWYQpjSRv8LrXWve6XwMDQSHUtWtv+7713v8X5wh4eKCL0FTy3fEo3S6S3KPuzZp7nmaC6TABU3UMQw4EM1O1QEkrpRKcInBxz7kQS0nyeH9UrfwCwWyDiNubam9dTZ2Fhi8JEUaJFuFIVFJKWQaWLDzMIpt5BDNEREqpaKiHCJv7oOmPN5KEHu+Xn747/fnHpz//8PTj08PpcGDg3nyQu0eHEH7DkhiiM5lAkCd0CidzNmO/pYJQBJqGujsZSCASDL6kAaN5YUakptrN1Lz3AflRhcAwIAdvjYmXw8KSDRhgu8RV1YAiKNyQICIxAFAE/TF1BNwZYZkSc5pLSsyh6I7CyBgYNQIiknsarVE4NrWt+rrt23bNOX3/3SMEeQoAoFGgR5h5bc0skMLUw/GWg0EgMrIvLFDVLMAtIkItzMJiuNkDe8d1j1ahGbTat9q161DVDNUyYhL5tuh/Uw4R4Tdr2W/Lla/nn992cx6At9PRnYkOy1z35Xw+u/u2tct1UxHv7Xw57/vmgQHsnJyyUjJMHgkxMzIAj/AAQHIIM5cxi0UiHCLlIbo1RBcMRHNniIbUiDrCfxnEgOQp965t7wCwHI93d8fHd+/evX8306R7f/7y5fL8sl4vYD5NJacpoUXb27HInO6/Ox6eFkrkMLIsiEHa3l9+uXz+6+v1eUOHh/d373569+NffpwO876udbPofjfxu8fH490hHzPOQmmGYjJxyQzY93a9rLlVUAMEGeEEjuQArdtW2/W67ftuNyb27Z1Y+KrXzdZEuQjnxLnIVCgDS5vAxY00et88JErOBNQtJBmipTw9ff9DmebL+UKSU+Z5mg/zn8F2bxe0XcCEXYRSKdN0SCkDAhNkcm39+XV9verLStUlUnJASEMBgGZq7hAdsIMoJOoUaqqx7ql7jpQxT1QmHuEhtyssKHVODa1ZHHD67kHWar9+AobD3cTgKE6xmT7XtsjGrW2/fvyPl/Pfq64Jo0Z79ctFm3Iw3R3nKWnd9u35fLns25SlFM6Sp9z25/94bflhpnmeS16sX0R/SxbvH7/rKK8vz50jP83HmYkTBFjttve7p/sEYM+XZjWOEdmD3MCbKzgkGErMrwUZLqXMyyTMAWDmrev5al+e4YPY6di+f2rfvys/vk8Pd5Jm5BwhRjkeDhkJR6BV9A7Rh2SeXMCh7mZuyJKJhYEZKXEuDECq3HZqjahS1pgyH0taUjod6CCU3L2qgoQQoliEGqiNQ/tmq6HAGEwigm/xPa213z78+vry5eef/+X+vhB+fH259u5dbzG/bg7kXx+2SMEMKKPdL4mBhnuAgikyAzJCMDjNuTzeH5ZpZoTuTdvW224WJceU+fHh8A//+OPd6VC3rbXaeyOCeSqlpJSECNys5Hw6HqblgJz37q+XTc674zbP55KTMEG4h33l3TLzYTmcjsd5miSlkUE2hKgpiWp/fX5+fn55fn6ptREOjh9B3EJuzSyGBhHMIMLBIsyRUFJCBMYAJkEAREUEEcgZs2AWBGIPNDM1FCECVO3VbIg3EcapOhAWAWABQTeaICFR3Nwkf+BeeEBvtu+VicuU53me5wMArLqZmwAPdkJr3vaGMS3zXYScX89mW2ANM1C8nq8fPvwSbu/evzseFxZC0HCJiEFhDveIYOaUBCB672bNw4gIKFIiTQwYg7nARPM0z/NCwG7D9TNOLCdwRAsH09ZaGxuNt+KSWBKyqId7JJHDNB2PR6odiS3CASyithYbKzoQ7t0IY5aEhN1b7EaX3fnigGbW6l6mSVLpvSeGOact52qxrVvtyoSHw1wyDwWr3SpdGCADGNjSlEQkMdNQBd6s2EPvHH57ovq67ox0mOevG4pc5E8/3F///P7n75/e3d0dJIHG3psBADoRpBsvXyLGQy0cUCMQVcPANNSiB7uPUEOwkZrNRmTgRJIzpUGFQXY55EiT2rZdX14+bPs6oHdu2tUvdQ0FBny8u3/39MN8OG6108uX6/Wq1pEBKMyDAMlxYLvkmxYm3K03DDsucynzYS4syVzCEDCQHMlGykXr2BSbRu++73q9tteX6+fPn0pJU56ySExB6MISHtp7V+u9BRBxuKP1cL95iL/SBMw8oDvYDTBoXf3NV6beKmw1WqcA7hq9R2/uCgDITCnl++V4//AoLPB2kd12Scz4hkO7yYPe/syv2qa33dcINgAEdHP0mEo5zPMyza3aulaA85wEXLdeNZSQHKN7WFi3HlSAAFHevuYbaysCERkR0G8O+3FWBFHYEG6/aZLGBzGsiX/cjEuaSxBi3REw52leDofD8XQ6TTLbrAahXc+vV9UuIoI4Zb4/zK4nOZXHp/vT6XCzKgxCw2rbl/3lt/P509XNp8P07sd3P/zD9+9/egKivdaAXiY+lfz0eFxOiy/ZJuSphKjD4BSZY9/b3jtGAN2URghA6mC1btd1vV73fR+Dn9+rZPDN6mr1ECkDIUjCNLFMVCbHEGvEW691b8ZOghIRZITdbMeSTw/38zIvh2MgzcfDtMwpJfTme2LbFtG5wDznPE2SJuIUyESQKbS343G9XPvrzi+rfV63da9uFmFA4eat1+bVqXlo6xCm2vZIDQtMeT4cpsf5eD/Ny5T597eDbOxrvH58DaT7p/eyzMvdwcBgM2u1aVvXV3z9EMThvu3brx9/OV8+MvskcuWqGuAdc05yZDBr533bLuu+110YS+Y5k0iH9qyQPR3DM0QgRIJYBEuejAvsLonv759yZs7SW0N2IJ8OUyJaL6tpV+segYmJUTWAB+7gWxAZiggjdTU1c+zuFoYQ1IB6jWjVmmFk93Ly5A61KigcM+fExK4Qe1ZzJ0f0FDq5BhL2rtcdFPywcBIhsnGHILMD6bAtEQ7RBidJicxw2xxVdwlKQgLA6BHqYD7KDsDxUBwV2B+6fVDtz8+fz6/P/Gc4LMt+dwqF19frbqatmRkxMCBSDGaskBRZkviIHw4CiI4QBEboKJA5CbOQzKXcHWcG3Ndtu573/WramOFwmL7//vHPf/r+Tz99txzmy+vr9XrdtgCIqUgpklMiAnfOOY/YQuQEGK1EaZGl3yw9t47rD63YPJXj4XA8LMxcWwuPlIQZ3axrP1/O5/N533f3SCUx861pjhjFUIB5uIOPpAQLcGAMZBRMg1BDEBYYKdHxmJcllUlEKHwkdGMnUgVnLpJCvbkNVe8tdfmNXDJoMcwjpjHMQs3/i9VCu9XaRz5UADCnacIxrQkPVWNGRAGA3js4TEUIxR3dYajzGXmey/39UYR++un7eS7bvgZASgxE8zxFxDgGiYkY2VlNhy6TeCRdDKw3jkGVkBwOx7vTHQRCoB+i1e185q47U0CYg6v1pr0PvP9bQTYeWnutvWsWWaap5Gzd1bSpGoAj9jHYNwuDbe8A7pkJQnsQamw1iCFCe9uXeZ4mySXca20RzsQct3VYKQnwED6Nj/qrNG/cCIDBb0UMIwHGW7IEIuJAaAxtioev2y7ED/d3IreHZRJ+uj98/3T3eDwsKUmgNVWPHjDCnIMRgAbYb3j50R0pDNRDXQ3dBTATuGA4MQFDYDgaDNxKRkwYIxPUzTjPh9PdfFgcmoOv6wpmHNFV+7ojyHx8uH98/8NPf84l//Lrr+CK4IkgJUZGjRg8ucFHlG8FcQAAzgTznMqURSgAvu4BCEf8MwFwoDiAhlVtW9XL2s7X7fWyLWq19t6V2fVmaQgzta6qGoHk5I7a1d1IRgbsIB/5APA4mIF72FjsqEVvVmuve9QGXRkw1Lx3b921O+EAz+XD4Xg4HOktn+vrUwYJkTAI4Q9vc5Qvf8BKff0d47ZEQGFJkpKkXn3bd/fwUgBsqy2sS0pAqO49oocFAbEQK6KAo/uYwdEAE8XXpT3CLalryLMdhnZ7fMpjQUb+h3sfACQtxRF4FSaGACbJnBJnTiwiT/IeHF4+v26X7bWvBfGY+O64lCmXu/n9w8MyHQMxIBL7XreXTy/Pf395+e15X7eypIfv77/7h+/f/fx+ultMdVoKE01ZDktJ9wsuE7GQQEg01fV1M7L7x1MSNLcRBTbkTQHsQN1839fL5fV6udR9j4jbiTGur4jNbTVnRenMIiYJ85RonrJ4uLZiuvb92rUDhYOiA9Be6/Nc+HD3VO7v7h8ex1MXmZEIjQLyDHY/yWmR5bBwKt2pGm4NwBGzpHl6Py0PD/q6qXx+fdmubT+3HqqgDq3pet0UOy8e2utr777Vdk0LPR6PT3J8dzi+P75/N92flrFMHV8XgMn22s/XX16fL/b/kKd3j/c/vLNTef5bP2/b5bwB1E19b82gbbV9/PTx8npepmIFItkMsZQ5+wJaemv9Cper7rVq3wWYPWXATIyEMqJ3t+t126/Xbcp0DwJRifP9Dz+Uw+nu8cGl+9T2auCRDsATMZVyutfwy/bBvB0OB5kThjiMJUL+9haIiNa1txYYnCEnLqUIZ8FM4Ze1mtWm/bz2p3dLQlpfenaaLY4L8sSayzWhUiYQggn0SISX7XXT9XXdkjmXSXIitPFk6EGGYuQt+ma+mm5uwmkzelm9q4vpgMxz5jxzLlwyMSMQEMYI+gwf8JIbJuF2jblt2+VyfVnXyzzfnU4HN7hct97btl3NvExpgJoG6P+wpHf3aZ4Iwluvr9drU0UavbgS8zKXZV4O8zyJCOF23l6+fPr8+Xm9XMJgLvD9d/f/+q9/+dd//cvTuycM6HXtDU0ZwIWRGWX49RFEBMJbbea9GViHuMHXRygx3MQiX5k3hDnLspT7uyMSxav1rghh2tftqr331sfUgQhukYS3+D19EzJ7oEOMFMURqezjY3sLEIBQR9Rc0vvv7k/HKVBNre0NHYWFILwpgU8pIzBBq7cnegTeXjftb7g7IVqY9d5V9dtJjHvsWx1wF2aotTFLTnXMSwCi1srC87yUXDZCc+6KmWieDiyJk5Yih2mhwH2/EsPpeFDrHz9+7KrL8eCBAZCStF6ZURgBxlAmhnaAbmB/A4jhz1R15nQ63j89vieknPMyTa3vHz78+vz88Xp9ra2Gw3iCmf3eWUaE9r5ve21wWuac8zxPiNh7X/dtq9UgBRGwDOyDWu+11tZ2uY01sjC0DrBpb/u+Hg/LPE055YCoA6PnIcTICVmO6GUSCIfB2iFBYroNUwMJbsJVJrj5zP0GTkRMzDcmHoSHb9s+l/n9+3df6cOENE1lLoUAw3xIssIBEUiQFTBGmkKPQBYkQTZi8gD1sNHEZiYWKkFxQyYGgHmEQCTwAs7he++7+mq+3MlPP/8plbwcy4ff/v73v/6ntTYe0d3aNOc//emnf/rnf/nTP/5ja9u//dv/9fr8QeuVIKaUJKXAER6SmTixS0pf9+KIyMIpy2C1qNvgAzPJLTIBM/KMfOTInAK4aujW7bLva60BQTx+mLu1sEBwJg4mZyADHXo+89YqYuRUmJEoEMP8JnQddkyHIGaC8K5Nfdt63a11NgcHUI3WvLbQbkloIpKcyzLnkn+P6BmxAwAOgRhO4ARBMaYhgIBxk/TenkYIt6yWiDHGGXw6DEIf+9SGAYnZw69bc7MCmYUNoBs0tcAuqaGPhayZRQARC+IN0IyjRsYAcHWNcBzE0lsM0PCfO4Hjt4SoUcRITqYmSB0IwkfSCyIPHbXkfNzvjnd323Vbz1dVxcLLlOe75fB0nA5zkmwWaJABQPW8AqzOTnMpx6fD4w+Pp/d35TQPb8O0lDKnMss051hSZGaSCKvV1vP25cNlyeXnwzs5YICN1ClEBJRAdkd1ba22feutmo500z8WkENJbRxK2qChb9ApCRCFwqa0K9WGrb4126gN9n09r8I5SeTltisMcgtvRr4nsKnw/f18d5hEsgavXdfq1z2AaJjzck45G5W+ac2/IYCqt+qqES16td3RGIXRQDu4MdDM0zt5eMf3D7Ac/JS9UMjXVFoz39bt5fPrVs/lvKIcOfLhT+/keITlaOeto2pTe32t4F10a/r8/KmeqyikgI7IrdF502tgbNZqvTyv5yu650Rz5jnznCklAmJk7KZt7aq2740pimDvW0r5tDwdHu+Wp4eK66tCRAcAL2rUUyrl/qDQ4uMH7ermgphJLLBVt/qHxFQIBw+AYKZ5kmnJc56EE0GybnXTrRme3cKa9oRo534PwooHl4Tc0HeAFupjbuxogUAJUHoHc1+vAeaE4eHdvAeqQW1x3fRcdVWr7mzhPexsZzTqgADBmic+nPLpSMhU+Ku3aNBdATGI4ttWTISPx8M05XW7lss55zmlJMLMCABEkLOkxOY6juqc5f7+dFpErV3XWPdr690tzKz3nlPKmQ9LOS6FAdq6tf3S60Zg96c55XQ4zn/5y09//tMP757uRbC3TuhJCOeMCElkjF5uoxbioSlWg27QO9Raz+fz5XLpvSPc9uv4jaMHwZlhEDKzSLghhlnfN7Pe3Z2ZU0oAyKP6wdt36bfb83bMjYYpYGzPR1M6CG0KaJximuh0N5+Opbeto5ENK4OPYQR6JJYIUjKMW9ISR4ytxNeq65uaZsBGv7nxkXIqy3xgAU48UkFyTiLMEqrQdKQsHDsgIlhEtxCMZZmmuUi242F6vH+Y8wShAQ4Qr68vLy8v6s7MjFxKcXckj7AAv7WSgRHoDto9IrS7OwgnTxDREUfT6MQkIsthmaOYdaJgxnVbe+9NB2Hlm9m4yP39/bt3TwHxcP/47vHd3emOALq2rl1dbUgKEG+GdY9a27rtxIOhhh6YMlrA3nqEO1C3KNkRoHb1ABFJqcg0gcgcc4RGGDHlUkQyURpUHRxR6rfFAyJxDN6/B0IgohDToB5BhHtrdX23/vDzn3MpXwvlKfGcOd+g2I4AhTEwkIAxRqqjhwMiAwtAAid3A/Vw8wBCYOThNow3a1g4RjAARYRGd6217lW3vkuawnWa7r57/z2F769fqG2hVtx4yYfT8sN3j4+Pd8K4tr3vV2tbIkDmLCJJgImIkyRBFDT+lkkwTFAJOSEy9ObuHoh84wOTObtlwNJdmuumftnrte5Ve2DkKZVZSBxJ3wpCgyBER3yzPLt39aaVETwoEJEECG/SF0AHtJFsgqAavY96Rfdm2tHGBXpbC2MEckqcEicGiviDRne8n9svOYDDt9Hwo28z+F9eNyUL8WhF2t7AASLUOgSklB2iaaiGkbExMquDGRi4g4I2D7cgswAQlkQkQDgmm0AIw57oCuGMFHjbFBMgBI7rzgP8j38rYSJGYkB2UL+5ncZBMcIspqU8ff+kvdetni/XdW3Lafru4f1yEEvYwaGFGE1I1NPuU81HfXhStON3d6fv79Jc1K3vDdHLMXHhtAiJKCAxlMx9t/OXy4ffnj/+9vx4PPpPnhkNrY3WDsixOJB5c9ewHq5w82bCt2+HgCZIC+QZkjj5rtd6rZf6StfCMwZr9VrbXrt2RaBwMOvag5FC++XlmUkCUi7L8XTPzH2/Cvb7BWg5HO8f5nnetv5ybZ/O7bz32kNEICQCwiIJpJLneZ6XU5q3iF3xatQMG2xdEHMqpRAJZTxM+fF4vLs/PE204BV7zdcC2/TVZQmq/fOXj7/9+otpT9em9W++8TzNp4eJl/vpAXov6+WytXV9fX7FtbV+PX/GHVwSFi6UufXLp1+1mpqaNtMdwSeWMh8Pc5qKpAG4ITaPrrXVum+7drMQCLS6EhH6XU73hxODSfN97WeeGZl2u6SSDk+nyId0Tb1Cr8piUymodr2c6/X1K+kSICBcGKZ5ng/T3cMyL4loXPqgjSgV7xLo2xaqTt3i3JDBFBPkhacIq6u+QFXY3fboW69c9wDvDG7NXr9s54HucO9hGuQg3XBvuq79qt4NEGwzv2yNI9iRCVhiDqEixcCAgIBIkJwAHQHNCeMWwfImhp2m+Z//5Z/b9nFdt/7br/f37yM4JZnnWVUR4/7+hBTny6t5JwWEVEqe59SatYoigOiqWvd6vVafJiJPEhi91fby5ct6uQrHd+/vH57u7x/v7+/vvvv+/ePTXYCdXy+1Vu0tJZqnZSgU3pI6iAgDUG1EJApE9NbP5+svv/764cOHWhsOvBz9Polxd9XaezVVzpgSe/DYt5u2Ed2Sc2ZOY4EwhrgeHuCB4WEIAcgQA1o1tBRjn0II4zd0QsuF5oVzBhG3pkKeJzGF2r1bD2vgSJIYeThOe+/q9jXTfihq31LuGRFEgsdG5+295JTev3v/D3/6E2fKhVMRZkYnRPdoHpM5uGeE4wjYtNibaUHKh+l4Ekl2d1rev3u6O56EWVUvl/O67h7Qu7VmxDcFagw0jAYRDWpZBHWNWnf3iCBwSmlCkAgy88+fv6hazuV4PEZ4mXIu08PjuzJP635dr9cAen5+hW9cIPM8/cu//Mu+fnHw7x4f//Ef//J0Oup+sb4jBBAMLcTInFOz3nqrrdUGgwSIwCgsOZUCrgbYFFARhRGhWwekLDnPy7Qc8zSzEDK4d0lS5jmngiMQk0chOrhvPp7tgLdgV0EgJBhhE1/bZFNVvX/6YZqmtzMZEsHEMScvOfCmGuax4htB7oCUeRS+yDdTI6GTuZl3C6gQwCxIg6849FAOQUyhsCqo9nWvtTVVryl/+vALET0+Pci79+3T/dTXtldL+LSk5fTw/jSJ14+//OeXz59sv87CS87BHCLAjMIsaUoiGKgdR+gXfK1XI8iD3UcEdQCJYiIQcUDrYp2d8FLjda3Pl/XTy8t2vQBaWVIiPC5ZipHYNKepJAgz9QAP725Vuzalrt61O0Hv3m1MRhIg+oDWQxiEunez1vpee63W1fuoGxwAAUlSKRkhwpipTBkZ9r5vdf9mC/MVLEkA8LYjfPt/8UYdvKEL3wiG+AY9INGql9fz5XwOc0J0sx597y2QHUUB267AUMqCxA7kjtbMfO8GaugOAMypCCcSTiKJMgGG6UDGICKLEIM7gbHfPBMEg9f2x02XzJwQ+8UhzEJHnuwfdk6p5Pt3j63250/Pl/Pr3hVDY2E4cBUzb1JDOmXM3GiB9LDc5Z8KTnL87lTuZpolyNQaEso4X4QhyJoHmUe32uq11a17EEqSkiSxmXqMC0UQBJEQKoIR+g1kFBEBqt8I4gA4IBmIoSgBo3msbY+oiTsjD+9WtwYOhAkc3aJX3a+79Q4wZD2Q0lz3teSM3oQslAicUjnO3TW2qpdNt6rVIDmVEsUokCy8bf3lZfv48fXL84vKmaYrz0pqzM7Bh0PMQiXyRGXJyzIfJzpETVvrDU3nvt3/Tv2KiKrWI3IpKRVX2C/t5fMlGAKI56XchWLUs/ao7mphJQt7CCj2GnuYe3u51r2pa4ABeklpmebjsixLTkJ+y4KFrtaamloilJQ6JDT00MSYyCm62666m2pvvXoNiGU+BB9oBg7mLFi578bYD2jRu54v/Xz1tyIGEVKiZZFpmQ6n5XSapbC7umsQOICAODOO1L8evlmcbZHomTTTjnBZ/ZP1D9AMGxEWUgaZJhaGWaQ1VIPWXLvvatW0OnTvGmQOGsGJ36JBQkfANzEn5Bxp4pSJh50iEILwllgzuGpxS5f9ereI3N09HU8P1/Vlry3vO6K4OzOWIsRYpmTe3GvvKyKb5wAf4+8BhSUKDGOMnHie0mnJh4nAtbZtu5699/vT8d3Tw/c/ff/0/vF0dzoej1Mht9bapr0RgoiUkkV4EDk81C1CwwxqA3UCyHvz59f1w6fPHz5+fn45d3XAm3zvD30VACMwU0ppKjncNt20d1WDkTXFRDSCk9Xhd7vp16MNb48qAwAIB0QiIBrceGeylGA55GVJTBYOECoUU86m0fvuI1OgQzjXFrW12lrr3dy+Vi0RMdasQ4khIhGYUmdOX/d8IvzwcPfduyfMIBlH/oA1j3BAcQ8zaC21lrRrhIRz670rEmKZpuNR7k+Hw3Kc56Wk0ntvVQlFFXoLt2HyQPAbnMQNb4Nvc1XrTVV7RBAmQkYkIiYiVXt9fTWzaZrHWGvuszAFkKQpe/SmSGwWqr8PyKdp+qd/+gt69fD74/HnH38oGC/7q2kDQmBWDXMHMzJjBQuUXKZbrgyam+SSypTLFK4QYAFNATUQQRWIQR1GphhzWpZDyhLQmSmVLCmNYpeFb5zi8GEpJx7x4ERjkYR0Sz6+qbBHtePH0x2/aWIAgMLIe1hzIyZh4iRIBGYjI2v8gJEsRogxxKp95DGbAkQLQHBgoWHKvi2zAiLUQs1677WpmoOb7tcvH35BAEEQ9NO8wPF0jjBmyXk+3gnBfn399Pn5+flZa80sw8FcA9wRHARuSa1aTXv/dnERI0EHHMADIxACzUDVpanvm2217f563v3L6/78en45n73vS8GllGXKh4lyduIuIkzurhEA4abddHczhCw8xDVh3nuPrkAUABmRhkyMEOCWlIThwyudUwoCUZcAQRZJSYQQQwRzSUkQ0SL+ALr96rz8qsWC//9e2vta19cvL7/+9Zdt3UouBASI6r7VipyQEzhdLlf1NnXklIf9MwLN4WY8ckJktnBRshQ+bEHoMVT8b5HpEMOoNWyLt/kg0Nd4p9vt/8DlFTuqa1e/QSgt3G6VDDlnPjze9d7vP9zt2yXY8iHLXY4DrdioxbL73FjCxbAA39/dfX93mu8Ph/sDZdqsNt87bgotyNBBzgHNqDqitqlXbWE+LbOcju/fPyxPJyzS19pUm6EFMjIhMEUiN4LEeMs8Nqi1/l5vuYeqd4VqzFLSrGCtvWqvwUoMQRZvfjNmJmIEDI9tM+2YSwawfV8rXLVej8fl/m6xwL9/XP/978//z//2t7vj4efvH0+nA1FiZooI4gCmlOfD0vv69//89H/+t3//P/6P//fL9uGHv+DDn3D6ToilPxIrF5QZpyMcMkzsjMC6a9X2utbAdjjN+979bQWDzOlwvPvu/f3jYTkcpZxymc4vl6Z1OmURzHfJ06IcKTSfMrtHXv28xVqjbrtuGOG9CkHKjJyJoaR0XKbDPC3zDID7ttZda6sjfGRO6d3jPaJ82WLtXgJTyrMkr/X504ez11CExpePr3bsj6dHpG60GyhKApC+dvYIEWzNXy56t32dxDDTsqS7h7IcplwSkVuzrmruBmQBToACAhQQYN4dd4VrwNrh9WK2X3+F/d/08pk3LnA8yfwgpyXNCTnYK7YaW6XLZq9bv2wtdu+7qTV1Ik5T4WWSuMWhBUJippLSPPGy4LLQMvGUmWk4ezGCxjqWKJhBGPgbd5IHtI4WOc93Kt7UrW9bXbs1FmBBxG621XrufSPKved131mg7m3rZjH4nrjM6f7u8HB/+vH9acrp+nqNXqO3SeTn7x9//vmHd9+/Oz4cc8kiGLabB5Njptv0RSjITbVrb6223nrvrUZt1C15yLrbl5frrx+fP315vW7dnADZHL6RkQAR5ZSnMi3TNE8LAVq3Sz+3WgGQkEcw3Dh9vsYB/u5aQIiRZofjQAwEQ0SmIIoIRzQRXGa+v5sPS3JvrUW4JuZlytr8FdZhhd2bN+3bbtdt33rv1j38rYgJxJtNyd0iJKVEJGaWv7HzMdPxMB1Pk4E6Wng38HAAIMYpArX6do3rWte1hwMCtL1XiV5NID3dv7s/HRhI9yB11Qgl76QVrAE4Y0gYuqI7hhMGj0FLb23ft9p2Qrg1tSOFXM3Nh7B0fOJjl1drG6pfxOjatq3vm25rq3v/un4tpfzpT/84ZcaIKaX7w2Try8tA+TGDJGvWzaA1ZCHKnPL9u3fMnFKOiLrvSaTMS5qKILp7q62bW+0DWcvgUHvgrkFAaVqOzIVJANR7a9ZTkhAJSOOT94EXUtMbOYSYOUseiyQcwzdEBAoAd8vTIb6i+t1DW6vb8I6VkhCSUAZCJschuwcDJGZMIuaxbW1vg/ls6oYIFSAokIkBRhbvSLoeaNERMeGAzEJM4Pry8df1cn798nx3PN4lmQ/317U5WDrcY5kv6369rr/8+uv1emFE5oSYANBMVR3dmNAh9Yh9b3utfyxiwCPUjNGBERA8vKmq6r7h8xf98nL9fGnnXWv31pu2WjLkUk53cjrEkiOxElYM9NCwcPdhBO+9IdBUhDkjkWrf93Pbt00cIpKISBFmoDBM0KUbs3FKQpgyZ8ugRl1JjYIYWZKQMOZC0zQxR0RLib+FQ0YEeLyVMjg0yW9tCeBblwJvvziKp/C4XK6//vXvv/7tl1/++isB/vTTz/PxyCK9923bOPnd3YOCXbZP12tNU5NUgIWRiQQBISIC3ZEwwtBwaJbNh6EJgplKyYx8E1DpgAFquDI5CQxK3x84MdyD1LX13lq4m2rvve2NCCMg50QppVmW++Pjd4/ubVlLOvDpdMhZau2kmj2KYQl3Q3YX5CmlwzQfp5kTpUoNsQJ0wO4tqss1qAJ0dAjdG3gvipQKHKf700FSGhN8C2/qqoS8J2FmD0ZGuIWdIhKDfBP/ijdnv6MFKpCxABfOBA4cQRrYhweUgxmNIiJMNUwdxFgCxEw2osACMqWyYO942fZPn87n83p3XAhRcnl8OExpomZIeToc5sNhOixYkVNBYiZPpCXhVGRemEv2LFg5VZ4sFcjsogpqquF79+veSDAXdftmdAkYSCipLMu0zMjcrV8/n/GMh63Mh5wnBoqUOAVlLPL/pevPmiRJsixN7G7MLCKqama+RERGZlbXdHVTA0PzNP//LwAEAhFAg5lGd1VlVmYsHu5upouIMPNd8MBq7h7VAyN7Mo9wN1UVZr587znfIYukKq2Dea+mQQCMTolTTpwTEuWUpqnknImTW7iDm7s5IpaUl2VaDsdmuJ6v1ejN08NUplBYr9vz9bqiAYrg3M+fsNe+7dr33quahzF6SYyJgNxClTuw4hd9DyJIwpypFBLBAHdzU78DEeDuCSKCQPR7VAc1p7XH89o3r7/49sHrs1ia2BkfjwiMZcE5oVgyhdrouttyg/MK0w3KinyLZsRMwpyFMaCrug1Uvkw5TxMvC5QCg7WJ7u7o3zQqXh0Yv+te9K4ffjv/+tsFUDBIvfe973VvrSIaIJkHkR4OycxyFuRY993CWtt6rw6QkmTBkuV0WJ4eDo+nQh47dI6WOQ4TPx2np9N0mHhKIBJENpLhhtjYDNzBrAVBxLCr7Fvdt21bN1tXWivVhuveX87bp5fby3XbmqmiGqqF+bfwLhyE0nCwIQE2dxsCWrr3WGgUMUA4qLbjh6PpOZ5WGnZ6AkAKYkwJiNDNibBMcjjI4ZBLRvcO7kIoxPdOqt8nEK3pbdO1elO1e8YTvRYxAw8D7oA4etDCDCJ3t9SXgmya0jTJ1rpqdzcHJyeE7E7aaLvF7aK3dW9NIYCQCNgV91uvq3KUzNPI52zeW211a6127aZN69aoa932VpupRXhrHWBEVTdzI6KcBJHd0TVGJxsRB/+GiEQEkdyDaVPTYTYx01bb5+fz7bb3pv5VQ4aEmEUyy5SlpLQT3eFDiAbQ3BVMxnY/z/M8S7pHvbtbLQkRU0rMKYmYe1PX1vpWAUGYsjCaQ+vqK6CUaUHElAKia9+BokyZJeH9Ix6Vq5tq0z70PUwsnJBwUGRkDCpJAsK05zJ/28h3C1MPNCJNjgBE6Ix8n1q4jbsDIKBrN1tb3WobecGEQEyUCIWcItTNOjokREDoZs1N3TQGZQ8owl33tsK6rmtvT2/LH74TmUEmBwueu8u67+eX6/nl3HtdphkZzbxH9K4WToEhFG4QZtrtm04/vHYvXhVgAIgQ1Bu0XV8u/tvH+PSsz9d9a+qBI/08EUUAM80zH0qQWyJDaDDIdeFmMUjTzDwlSTkzS+/kuoUHAAWO3IskuQBChIBSBAJJypCToTVX691VoQcFJuAkibNgzpxzGjGZ/O+yxvzejoPXzkb8/+nGDCgyDTlY18vL+a9/+cu//ctfP3/4vEzL08NjmSYSISbdayAhEZDV3i63G9aGkpESsySWJFxYENEdDNi9UySBFJDCdTwSKeWSMwAO/7j15trNOoAGKZP9j7+m7Ldtv211r7VWBOi973vdbrv2Ps3KD0cSCXCZ89sf3y+n4n1niXJI4L634AYPhA9JZuBaDbT3vj9br21VfzzMcwpiRwphz2wRVfmmosgo6r5uPSLeSHImDZkMvWpnGhfB3ttWm4bOUz5MnIL3iDAPiJQ5ET08HETunSVCnIgmJPbwbh2Us5yOR5Bi0prvtYVVQ2AyJOvQI6phdeoAGfssOAed+nIs7x+Xp+W4pOl6ViRtfbvebgSwVgWShzdv5sOhbJUwPz4+nY5zWThN8g//GMCOsn54zjRfKRl7gijCmVCog1few0LrtvneoxtqoAMujIwk9FVF5mZ13dbzbZ8ytuhxqeqrNkNLn2g55TdPh0NO0j05SjPova2rbTdojdyIkZlEck5SpixJAJk4sZQIbs1NNVSFgkqWLIfjknJRot+ut3/+6RNL+eOf/vxwPH788Pnz8+Vv56uW9O7H91NGso+61v2y7ucb09I36huRTw9vDtPk4tUd53RYyoFfVf3D229mZk7k38Dv8T4Tx7jzi4Y/mihxQqet67P7pW+foK4CO0jbma70PHspPh24ZBTBQrh4HBWfqtw2vNzkfLPnS9/24We5t77dJQIBmIiTZBGU5EQGrjDoagB+v3reiy939OD4xi6+rvW//be//H/+9//+9u13h/nAhNr6Xmutt/CeE0qaj4f08Pgdk3dt4XZd18s1zBuilQTTXJYip6U8PsyHKReGvjf0msQejrKUlAVc63Y7B9T5eEg5EwmEq7fW2t7aCHPhxKUUYAxyoLCwvdXnqz+f9Xxp11X3qtdN12pNozav1cbUPL4pYgDIut/Ot533bd/3uocBUxqEcfzSZRkPZgyiezALEYATeLwKAoNohA9gKUREZpASLAdZFsoZicLMADDnzCh1131rphEBat5632qrPZBYsgDwYJCM438kHEe43y1jiID4+7k4IoigMFivtW4WhoBZEoRrs3WNy3O/3va9725GzEIk08IUl5f903S9fLcfpzknJoyubd1ul+vLbb2a9tbr88vnQOitmiugRWjf6hfCPnOap2meJwC83fbau7+yZGKE+pht224WralbrOu6bVutVU0hYN/362VV/doia63+8vPfPv78r4/H4+PxwA8ntT4iVM2tm229A+FJ+HQ6vv/u++PpAICuVuum3aaSEZF59N0JEYFZ3W/r6hHLPBOJAKlFbVtXD4R1W6ZChKZ9Y455WZi562jFBxEJc4Tvrbo70Z2aPwS5TJRTHsFaAaG15nKw10TuAHTgQCFmFk5COZEIM8nwC7fu3dzBEA1lGPu3riqJk3AuOSXOJRGim/ew2lqYT+FAVN3VDTEco6qaGhrG3ZVrquuFy+3N05Sz8WShu4bVfrm1y60r3MlHADbw5M38rvlQJG0UxmCMXx+yYYJgEAgOQ3eHIMHcOnz+1D98tA/PcdvBgpizBIabKbr6vrdakSBnAQlO5IKdAsfGRziY5sKUsnBmIRISsHII9HmecpmZZ5KJZAoAa9QVmqFDLFPO5Nhv3rbqm0GEJOACaRJJWWDYzNQc9T5Kfy1MIHz4nBxjMEzuV7h7YyYAAkcYjocjUCIO11bry+fPf/3Xv/7bv/5VtxYPtl6vy/GYEpck27ieuJqpuVVtba8KiCBEnFiWkh4Oh5xYdQh4KOVCfGQKNQwHDwII9wJBAWraa920dwglsiAVGbuzf1vKiJpbBDHJsCMiDOGxGyJAyglGXHeS45uH46Fwb2QttHttsyUBWggKEwGgQE7Y1bXvdoP2AlIbQIIAD8ewBIjKYiBAwkkh3C0BFeIg2BVkC79WBQQKCoRwC9fYDZBEwsg8AjBN5fR0mid58/5J0uv8NSLUo6uTGlbtICazCHOgwDARYARng666gVZTc/CQhHIgeQR6Ipj5cEyHx1JyCoMQzxMdD0lrOcxlGE1LTstcgkhkenzzdDgsOQNYI4rWn87XdzJdLz1q1NgZnTkQa8DuvnsPUONtj61DMwwgkS/t+W+qZPPttl5eLkth690iuvmq3cBSQrbJmGCOFCAWUM16t7q7NnIXJs6UREQkJU7ChOSBGITIAai9h3bEKFlIcp6nw+EQnG7VqneHlFOZl0WYP79cf/3w/FIbHQ+qiJCTHMxhu7bzpxcI0crrZUULpkJg15e2XfdqoUTxu67GNz7ZuDvkIu7x7vhF4gRDVEEigspbmKu99HZBrUzNyRvGzYWrRVfgrdHDwodJZqFcKGWa53w4xMNmj6e+7ta7uwETw0hSibuV8G6bJhjXzBhRqnAPbx1kJwhwp/H95YX03n/98PHvf/8AkeMJppzHQFtV3ZoIJ6HlMD08LAh6Ob+sa93qrmqIlhPOOZck05SnIllAKMbQasqMh+lQ8pTnUiTCW6tABhi9ZyIJAFMLd8JgRkBMSfKUWdgtsUgENJW89zivl+v2+bx1hdqha5hCb36P+P2dIA4ZBZzq3iJi36uaIrAQqNkdNOev7JA7DOSOOAsgQrpDZSEQgXhc+6RMTMSqIAmnifIYO6AT4rjBg9G+1XVr7ojEQOQAOmZFJCwMMIArQ7TMSDgKpfFA3dN21My+1egAYkAM9pdaGCOjwJgXm47JWzdtEUGMIiVLgfDabi/P+68/f8aI06kQxbbdzufz8/Pz58/PtdWuVnUbPA8IQw6ztm1r633k7ZSphEjE8M6OjtHdzHUHHo4AG7PWuqnu+3Zb1/W29d4RUVVNvxIIAUB7f3n+/Pzpt4nCi7hNI5FgaKtVdW8NGB2AmXKWnGQA3aw314ZICOSqGkCvk7gx6LEvuyMRgg1vFAqra++SJCB6CjTt4daaDikMM4OkGAQ/vwu8hpcGIFyYiYhpmIq0N7NvdCQBZmAGg0xeciopMSK4h7p312a9awAEIjCp2XDO491hf6fv4BiEEA0nT1MNxBbhEIM61HpvVcGIYEDtyUL3vb1ctzrD7qwBrfq21Y/PL+vtGq6ZwCMQ3KyHuQAS08RQKNgMwwTpC5X//owFYjCFQHB4wMj76tF3r7u2HhYoIoQI7qYRCm6x73rbaNvtNMlhmktKHH0Ep8dYTjLaWZm5ICailCgfKDuGZAGS7mKdFaWbv1z0cu2XtQdAPfAh4cyJyZOYSFBKIClEmDkzEYKZuQLY/2A2ipEiDXeNCSEZBQTBmDl924AaaBjoXa+X6+Xl3PYdI5ZpOi1LSUkIBAEwhMDBtddwn+fpsHS9bdbUhhkJMSwlclXS3i08AkuZiB18NkPzseVqymyWAly1q1bVjmARhmgRgcj0O0kMSGThKR8ejkFh2kspGGBupNRb3643AGBhyZlLYaKMjLfQW4XqwpkFwju4KRikODzkrKwegYDXbb+15hCBhkhMOaXMnFJKyJRELAAJwudMHsb77tZdNtXAOZEPkb1zYsmEQt6oR4Dww5vHXMrpOL3/w3cp35kE7r5v+75uiEUtml2wUQ0pKpIJyCk0lZ6W3rtXi3ULA5PMp8c0fzctf5zSKUFgliwi3W3bbz3s+JAnefOHt09Tmd++OaZE1lv0OjGVpRxOD4eHB2HwdrF6FnJiCkotUm3KxgIB0bCZr92UAxfzyUICBAkJIFMIIIbHN+53M1/P6/nzS55c41A4IZB0FYpDSk8hJ02HTinMe62XTVu3MCJIIjlRnoSFmQnHhKeZukpCSTMymFaMnhPkUtJ0StNB8tSMmnfO+Kc/0WHOhynfzte//NtPf/v4kt6+STnt204A0/FBldbbzT988rp7p8unzpz3lepqv/7l8+V285m/913/vQ8OvtYxHmZuFn5PxoGIUHP0iOEREibkHaNHXDFWwo6kQN283fq21c8v+stHeDzJ28fp7cP87jQ9HfOhpCycFlimeDy21rU2bd1c0ZwQOAJdUTW0t24xlIXDLEqI+ArJHCOSwRLTsbRez35332u7Xtfn5xcmxtORCSICCRhomsrj08PDwzEn2Pd2u92ul4t2AwBJSF9Wu/W6d++gU3nzcCo548PpeDhmToxZhpE1orXe1Ab3gJnLNJVpWg6HVDJJSjmlnInIw1rvp8O2LC1Ppvb5l1+vvd1UwxTBKBxN3dQhiOArgxQRcyrM2TRUtTeNCEkJkc2ru79i1WiEHY60PUACZCBCYAxDCuAgJ5GYZ56mVKaMSK0DkjMHoIUHMKaUEiVG6Wbr1rZdA0XSJNmkOO2dwIEHkg+YSESYmZiGqxbGbAuga1f1ve6tfT0sA8JVzRR8RGUjEvGgmAsk8ZQ8JdNQ80ASSTIvp0F8X2/tn//l758+f3r//kAS18v5fD5fr5fbbd33pgF3SaRjBLppa/22trZXM2fmRd3UalUE6N3DUUhEhotKUkqjUom4I0gWWAYqtdbm5oTNrQ8l8pdnrLW99yaMOQsTGUDE3SOrptvegqC2Vuu+rWfw1vbWW+u1hjtJGghQJNJRJ7kjQMm5m5v7VquqEoCZpxy5dU6JGIh4KmXK43emAHRjd78P9pCopAFFM/euzSOIKOU8T7OkjEhmSgApfcXbe7hqb61xYoJpmaclZ22td9Nu2tVri1GMEg3VS+E0gjnCo9Vqxo6RchIRmUrSUGxjzmBD3DSKmNrrbhgsyMDITETSLT49n9PegZIDaesvz5e///xL3ddlouOcaiehETOAOUlOVAqn4YkJYC4sBb92lMENxi4LKBjhFuYWFplxmejQLVmkDBDeW6tuyG4Re43LuX+e9kM+vH86HRbqde2tqYUBxaBwciJI7mwgiJlTWlJyiG6tdtv2cDdOcdv6z7+eP7+ct717eMn8eMw/vl0elpwSCjsnASIHAwTC9Lqn/TsUCQDAyCy7z8uHqokQ4k7LhiAf6UaITBgBbrat28ePH9fb7fHhdEjlUA6Pp9O792/LMiuHmSWGbt72FSn/8P7dPB/lt4/P53Wv2lXde239eutM0XszU/OYpgXRynSMYHcyx6bVo+WSJTFCOBiSuXWIAWECHPkf3/otuCTxPC2TQTeVPJWRgI2IrrZfbwgwLTPlDMxiIa7YwK4dauMihGgWMfg4gnNKE0gEukHXMbEcOEZCY3QAARAM4TuZm5MgTjlBWDhZeKoIV2vNFCEFTYkHDXbAtj2QWJZlEZHDcSpL+QLwiUAPMWBHZIEQj2TKRqBkiYMSJiRgdkDIR3IKm1mYp6cyvZ/mN7NMyauTU6/aDWqFUJwyCwnNnKTkhNr2l/MzhC2nByZEvCuutdq66bZra9Y1umKrAK1T12bKHox37LgFuAMCJCYMwFAfiIBvjfrj7hvm7Fg8Z2RHikjMb4+Hp8NhZknm0Zu3ar2GuzBK4iTEgog4uGMQQ0qPGIgOropgGJ3JOUkqucyL5NmDzd0M5jK//dNTyRwOHz69/OVvv/7yfPlunsvjwbRyTofHg5l0xPD19nK1FmroYc9nCrfr3jZXJdupO36T/joMPx5mNrC9ajFUZQj3y04MnPzoYhKq+y0CwVeKnVARbdzXzarptvfrGi8v/vzMn0/8csL3D/j2gU9HnmZOCcoMMVM36t21gdmQS4EpaLfWoGl0DTXw0am/V1hfFSMO4HG/W3/7Zeat2b7ua7klpsSoOjJyKScpOSeRO328tVp3a0oYFAzJ0ZlBinDOROBZeJ6nKWXNBQNLKhh3ORvKgD6N98vdPeWMiDmnMk2c8ohgBgQzYHRCBkBX703r3nvtFgTOr/dYGcAbwm9sI0QiWSTVuvempn4PBhqpQHDvwQzB49Bhj0bMHcAAiMhIjkAQlAsuS1qWlHMGAOKIAKa4p9UjMY5ECHh924lYRDhlS8Ultx46zkrGIYOVQcyLCL/nNhLC8GBr701VvyyZcO+999bNfMDnAqB3I2juwRLT7AHAjS2IZM5pyWUKxzJZa7fr7abWHZUZbrfrum7b2rsC8px5kDbxbq626pFZkFjMqhu05hCduyOiOTIQCjIGi6SUlmVBxH3fVTXCCSBnAZwASCT11pAAUUtJ34YJj6lnTimnfPdyIzMnFiGRAFDzrlbrfruc+7612q3rMKaKA75iZq33YbSLoJwTOziSmtfew2yotFvruagqeUmSSsoyspMSi9Nw7OMwWxEz3Xc8w0YeMXIx8zRJygMyjgAyfufX6tKGMLiPhhNAhHbtrfduZoYBHOA+3l0PRJEhqgBHGJpPdWNnFGIGFlHse2tdFQBZEIkx0B26jpEjEHLOnOdZppkkdYfa+t767bq+PD9/Ol/Q+5wnQgQHwGAAZpgTlcw5CSJ01a7RenT7HY4onMIZQjAEwtzUu1p3Qp9SnBawoFwYwFvD6xoGZg260rrF55e+TP3dG0g5BR6CpxHKjiiBYsBq6IoEnDExpQbctN+2dtvqbTU15uTr3n/9bT1f9to1wolg25WQLcrToSwJOQGggxtAjO7gXbmG/0Na9ahhgF6/eWSLEYxaJgAAAoMIEVWt7vV6ubx8ft7X7TDP+XB6Oj4e5qVM2dF73bsquGNgb1UyHY9Hkbztu3aDCHD1cLBe1829976rmXv0thL6NO9IGVA8mDhbtGyl5JSEiRDQRyuJCIlRBP8dR1nSUhSMC4tKKqmUKSVhYmYGt3q7obs/PeIcBq5qvPdYa7/ttm37iiTMIpI4EZMAQJBwyiWAerNetbc+TIpurr233l2ksxARoAQKpYID2VXmAEeJWuv55bIR5OUoU1ZBROzqXR2BE6ecxKyp9fZNKi8Scznx/IaXKR1TXgiyAylHUAd2TlBQwW+Nk89vU3yHHg6MlJKUhDh5w75v3nrtSp4kZnERA/EgcLSu9XY2Xdfr+rT9KCkfdL1db7vuW9+3W7utnz7X83nf1qpd+9rqy6q3zfdWJD+8+S7PJ4zsTuFd0ERyBLTWmvLQuX45Q5lpnsvx8fD43enpu8MpF6lRP/khTX/+8bvDMvda676ude2tAkfOUkoa8qAwa1t3NyAk5iw5iWROhOR9A4cskBKzMLCQSCC0WntTMD0tp3/8058A4J//+td//dvHv/z946fLLT2dlodyfJRpKSITywHSw76dn3/7ue17XiZz/+W3nxFhflwe0vHKNywKr+SL4Z/EEUCoowZ3B0ZkIWIifm1gKnm3ruEBZgM2jVEJOqFG+ODDIgAnC2kNtPHtkj7/Gr+k/e2xf/e2f/duevtufnri4wFz4SQIGbWwKai5mY+kezNU9arRurduXcNGJXmfnwzWLAKgD3nI17kFEjHdkyDrGTwxAlgS4CTM5K6qNaKptkGQcm3uRo7KqgXxkI+H08NpzomP8/Tm6TFL0trdAINhgGkQOCOEd+utdVUNQGGBCFNttVL3zkTMHt57X7f9crn++uH6l3+7/OWvn58/PdetUpqQmICFcSoMIOCV6CtgHJEkJWYxg6HwgIjebUTqiSBAdLXWtXV1NfdAQvQwRaBAYAQkBiQU5mmCwzEflpxE3EeXPpiMiUoqhGQaXaNZH/mECSNICCAXL81z7urVARGJGQf/hu4bmOMdP8OEQ45vqv3bsUV41Nrr1rTpiKQw9du2IoKkjCyHI0yLmItDBn4Mn3uTADycniadahPAuu1ABB4TJ5mWJbkzDSXZDEjqptp6r3Vf83TZ10vdrr1t7nUEHyGCeViYqkZ6bSQxIyIz2bi1x2iACR85JdlWIoacYznMXy5jzDxN0+GwTNOccmZKRIP7Mk/LYZqXXGp0MPNtqwwuMtBhMHxDQF2Aidjd930HAExJcsk5T5JRUm3t5eVlrw0ikEJ1NrMIRBSWwsLg97AqQhhFzPByDuc1E1k4kpgZEogk4kQsROTMACDfposjBKBH9K61tm3b0azWqnovmnMSY2jNtFvtPRBKFs7CTC7ozDH8u2Pw4QAeXfV8XVvrRWSaMooICrEEeu1m6DlhKvL27Wk+veGynNf+6adff/r1028fz/u2MtnjnA/zdCylINEIMkMQBEYiIDVf97a2vjd4v3f7cokJipAIcSdEihAzt1Z77+GeGR8WJknTnIihKU1X7K5Nw0xqp+ezCW250NZOj6dTzhMJQZAF9m7bVrU7GDNhAfbdrtvlcrteb5fbuq+7a4jkRQO3rXYlh6G68VjdP2zNIJWnQiLUCdsASpjiF4zicDV+TeXFwa0bsQOEyIiC95j3O6ic7lI4RCL1vl6v588v1/O51zqlfFwOD6el5NS0bdv2sl6qOXF2To4enhkjCx1LqpNER7IIhN61bddWt9Z2M0dEb5v3lqdLLgfKE1LhNAV5YGAYRM45IQ4gnBPzGNnzsE5+KWJiuCUhAIG+pkEBArqDmWnv1rr17jx4mSCS8vHUWW77utZmW5Ukp2UuJExIONBUTMhZUszTSK1qTbd9V7UYBzZTgKlrQEyek0zzsjBimOKGSdcWHZJ7jkAOpKGMq1utt3Vfr63tJBN8c90HEs5HLidMQjnJIjwBsbGHVKDGrJMraG/oMM1ZJg50J9CAMLSVw7zuYNVBPUWwZBoerbtx0t0MWKkn5JQ/n28V9e/Pu9K6Wt2rtut2++3ysu7NPNAarOe6X3ZvMc8yPZ4QjgPOwqCMKEgaUXtVoENvZvaliCGiMk+H4+HweFwe5ol4EniC9JCX79+8YcFPddVeVZu7k7ziwgle+wk+nt6AGHhPEUJAVUVESSXPE+cp5ZllCiAAY6LDnJc5TyLPl/Vf/vrL//dffvr4+VJVvXcKE4qUqRQp81yOZduWWnvAOTNs67ZeVoeQnFMmNCf9d3oFiLhv4uO4pPvNjoWIg0bRjxGOYGiDuuzgwGhIToSA7GEYQQDOEGiG3dAV1/Ab+r5ZV2wKzaGptAaHAy4TiSRhEYIU7m7uEGHupOpFo7WoTWv3rt579Lv0dYBpER3/nVgpIlxdO/SurXVED0ZmyCmVkuZlmudpmUsAQ/RrSS0LKxPgMsvxWE5LOS3l4TA/no5TkeNhfnp4EJKdt7rrmLwmSTnJtCQkVNVBTmldhz609U5fPmwkDav7frmunz+9/PTL+V/+ev3p5+vL860bTDIxM2MC4gA3YxUnkm8RC0Mja+5qPiy+AUaIQowMATFimsdletzsiO+hSIQjfx0ZiSktMy1LnmdhQHegEO04QoYhyJ16s3tC79C3EdQeah6ANMZjFubDM0VCIiTEhAAeBHGfNjCRBzLdaTTffC5gOkwMMRpG5tZrRYoSnnJiIklMlFBmSqfep5fn5g6SiiQCNjPqpmCAKMSQMBIOD1RiKYhs4Wam2tu0p3zbynnLL9v2sm3PqtU9xt5IiAyjcUEAeLcpRTjAnb8Vd4o7EUjiAAG6i5fvRQzRYVnidDwcTmWaEYU4pTJN8+F4enh4qk8326pNUyEiVTUD9cGR4wQgwkSSSxoyJjUbcDxAkCRlmojwcqGAGJk5zCA0VAZ01/QAEIwkIBinwZcihllYhAEA2FQDnUjGYIIGji6QJX/b60eCe+uoa29diQARcQCgCTkjApA6wlB6j+40MAz51VBEWbdu1RV67a3qVnurLVIQUU/JA9WgqW+tC+EcQszzMr15eiiHJz7vf/v187a3D8/nVrf3DyWJFJZ77ysG2CFa14HEVffWu3Z1x99hbAEA2J1VgwAQE6A1tdY6jMQlJE5exEgwCajSPNG2uxmZ4Vb987nRT7rv+u4tnY6S5wWQ99bXrd2ue2/GkIQjV1LVz+fz+Xq+3m7rvm/VHCVPnSQNM0RguIeNZOHVpimuDQ9OcqeOO9yjEu/D+vgfGFH3Ugbg1ZxECHS/un3ZtAGH3cK6Xc/X8/N5va7aLS9lmaeUBcjV6l7Xdbt181QCARzDtPe2B8qU+TBJW8PQgAE9DB1cXZurM3Mwu3W3bq4QCsEIZuEW3gcJyVkImDEJl5JKkZyI5XddJam11dasq6m6IzJPZncoIgClhMK99bZXKsKImPPhqXz/hz/ouv/lp7+ff/n1548fAPDH9+/fwHEqAgDaKgvlXKZpmstMANrbtm2cqO7VLCICBbr2db9VTSlRnvLh8DilyVtDkrfhab9dajU0mh4CRd3bVl8+ff7wyy+//PR3IP/+8MPr/He86cRp4XQMBDPmniTnw5QykxTwLfZr9KZ7JerAWGAXJLCw7s3BiJqF1+puSI4g6GB3CZcPuxyaExcsy0EN/v63D+frTz9/ujxftRmrhveWpZ0OtcyIOYGndY+tSpZDlHeW3nUoVc/kfRZHRA/t3bZt5w77vvfevrAiEKnkeZ6PUzkkytB6pvKHH989zsc85XW73vb1tt08gJgFBR167To6+4hJCBMDjoPmDl6M+7EjMh3L4VTmE8uEIOaeCqcJp5wB6MNvH/7rv/z0f/9//m//23/9122ry7Ec53SYJN9be5GET6fT8nAKSZdPv/XnD/1ypUqt6RVukiGw4x8c9IteAcLD3UENAMeNmlNmFALGgDt6fGCY0Z0jJGKESDICMQInx1AYKtJ4tf9CAA4fKIMK3txpa/03vWz08kIPD+npNB0PMJXIiVMSJABUDzJVM8yKmqGptNGVaVH73cLjFmZgoSQ4QspeD0uve1s3mKZWUs6JUJAIc+LDMj8+Prx/9/bpaWH287m0/YZaI9Oc+O2b0+PDssz5cJoejssy5anIMk/HZUagutem7Xrd3OmwHFJOucwlJw/vvW3bdrndtpfzuq1dewSKJGIGwNZ129bPz9dffvn40y+3v/3UPp+1dZMshJhTIplY2bwzhXAikm9iB8DM1VxHMKwPp/QgniALOgT6Pef2NcsmIjixCENiZMHElAWnLPNMh4WzQJgToJTkLN3Y1Pseatqqe+AdMJyod91u67pp0wjzxKkkVHUAYAKme4cX77lsEgDMg+kKEVzGu//6Wsa+Do7hYN1NVd3cnBicQkEBnZOUglOi+TjXPq1X7U3dHQlTLmjYWjV7lYAIsbCIEJJ5QABLFqFSMBdLsud0EjmQFAeE/cX7Cq5MXJIscyklc+II2Pc6XEXmYzjYzczVbTiIiZBRm7VvrLws/HBckj+eHh7m5aTm0lqeluV0enxTv2vRnLZd52lOzOHa1aJ1NUcLIJwRypROxyMRr2nba91aMzM1C/AsBJCmkk07os+TLHOa5zTGKK01AJoyM9899niHuvJYikTMkgHADRDQw+4X4nvcBJEgcvpGdwVMQBxxZ2sGIeVpccPaTDUMSAGM2ZiMQ7tpB4lIGAzBFAwAAa59r5tqmHnbu/ZQhQ0Nm7Koeax7u9W27i0zN/NAJJZpmt+8ecu5vfvw+W+/fDS31jvBdHeHm+m9rwdmvu47Sy9zIkYf02HBQ0H+9ugPDItWTULSVJij26WpSRADRBio696Q0QgwfM6yTNE1eiAgb7X//ZfL5+fLLx9ub57evf/+e5J8vlwvt9u6VbfIPCcpIruajh9u1fZqe+8oRp4SIgqiu2r3cARGTEBJQ27VrpVyQuFRfdyZL3d/0fAi/e5m+aUfM7grr5bRe7jIsDeMYa631i7ny/l8WdcmRNM0laloqLfeeu3WVHt3R+/kHIi91+v1zJJTomWSCxmBIkMGgXkGCHNDUpFc5nk+POb5wHkCyYZCzJQEkAxJA9icCSWleeblkOaFU3Zm/HYfk96adR37191She4USMiUKVFKHADWFTN7QA1zycvpxPOy9v5y2/Z/+2mv7XQ4LfMyFSFkd8UYPt+7VI+YSykoNE1l5IyQ0BCZW4B7r3WvtRKlJGUeoWxXaS8f+l6pm3GEQWt6uVzPL+d93cohz4fpcFz4NZkz7hkQEIgR5A0p55kel5SArLZmvg0aDnZAcNtDSAJQBxqLq4Wp4j0HDckhDDQCOIDp3m50xKpq9Xa79o+f179/ePl0rmt1tRCmNw95npeDLIAdQQNm5Dwt7+bDO5EDEzIDUYgEkbuP8LcKNNy/X628RDRP81wWciGlguU0n96++/5Q5lZXVTXrHkYojKPtHo4wNKpMJEyvU8MRoAkjTQ2QSHLKhzyd8vTIksOQPCh5QCDAy8v5X//1p//X//4v//Wf//rx8zMzPL05fP/D0/vvnqbDUnJmlpxkmgtNmTJPCV/quU/lzVOqzVjMvPVNfcXfKXyGsZqC8G47GTmx4F9TxUY/YBCuiAkFgwEQAiM8KIiHBANiOGcAHBFQgAlTIiqkDJtZrNYVe6W9Qtt52/Cw0DynaZaUR+cHOZMA3JHN7k0t96jNUqPWo3do3aE5jg389zd+9xFJFoNjm1imuSyH5c2bN99/990f/vD9w2nqupvuU8k6T/NhOi357dPDw2nKmcss05SnLPOUSxYA31s7X84v5+u29pQmEslTyblIliGWGnV6wB1mauqtdnPvTbe93m7r55frr78+//Zbe3mBvQIzCCOiE4QwAbJ0E74HWX95Le5eW9tHrq7bF/ulOZhb+ICPA4uIeO89AgAcAJkg372ylBhKhmXiaaIswINnisgkDmHqXV3dVcEUY0gIAQdzYq91Xas6dUNCTJzwHm7tdwrsyGwCcIoYaa88zN48TTln+TLmj4jetFW1bm7hBm4j/xaV7lrfCAXfEVaWVQ0QFEB714AgDgtUIzOgIAoCYYIUwOrRagWAgpRSFknCKDQTlQA299aHObBhmBAmoSlnkeRuTVvv3SPGY+yqar3VbmoAQMQs4q6qXe2bIoboMBexwzQtKc/RO0kiEZaU87Qshzdv7NBsyhMC1NaoNUDuAx4JMMRmqeQk2RwNsFk4OhMSultz1QgjBCIQoSws9zPdtJtwQszEDK8uvaHIGYG4ADQchcPHOhqV5gHmQAEY9o2N97WO8cSQSznO05SL8N1thyw4YicRCF2QMuA9YxTBh/9YCRzc3HrUtZs5E2HQlAuTIAMzx11iyEwpJ0jCnDJJIk6SUi7lgcuf//iHtbXmej2/vC38NNGUgCjujXyE14HVoDwMFRcmwvwtH+5+qIOrI/tEklIQJwA2D3Qf1G8LCA1DCMXCNCfYxN0cELuFNl1vfr3FbYVqwpJertdt37sqBBcJERsDu722vXvTUEeN4AgcAHEMD0Xo6BbOY9AaQLX7VrUWSMMp8ppbe7en3e2f325l4yf/p2iYb/87cLO2t+v1ervd3F1yLtMkWfZ9bXXrvdbeaq93/30Aysh58pS9TIUIEAxBAYgIUykO0M3ZLKWUp6UsS5pm4BQkDjwiukZhoO5kPQnnnJdDeXycl5mE9n+n7xFtzd2FU2TXUBTyFJCAcippkszMROHDEKZu674ytNs0vy3Ln77/Q9v6P//3v2632rp1D+Y8lQnBiVCEXeNlO0cEE6Ysp9MDi5g5ArCwub/b9nXbt9teb5cPrS+nx3fv/3A6PTzwQy75cnmp9aZrM2BH6Ra3bd97T2V+eHz4/vvv37x9I6+JqeHW6traVZYJCaxF7FFimWDqffPWe+217kOGadaq5CLzl7ta15uDI5RxSOdCQNbCvUeGfEpLlmJIm9qn82XbetuttpiXfDS7bZ/cbDosj4+Hd+/fHA9p3W6EXnLNTG+fvj8djikTUS8EiJgJ3LTtVU2JLKeHMi2pLF8+G2ZepmWSyW4aTI9PD+8e3y7HJyKq1+fWdxGcc3JFCmQC5te2LiAPLwEEgCEGMwWEmroHc0oypbykdGBKhBmSMHEW2uv+6cOH//bf/u3/9v/4f/8f//2vHz69IMXDafrjn97803/68x/+4QclCmQkKTkLYy48zccMVT8u8cYff3gCgKjX8+eXv//9s1YOf139r+uHYGT3EANTvAIgAR3BItSjdd33bh5TZhIGDo9wexUL3VOMMMLDDSGIgJly4mniKZMIEoEDdMPbjuZea325tmXGaZYBv58PeZ7TPKeUSMABzExb79yUBXOi2nCr6GG9NR8SBsJvxP1IxCKQkiDAum6E+e2705u3Tz/++MM//PnHP//ph5Tgt4+/mDlATFP+/vH45jgfp1QKUcKUKAvkRHPJRHC5Xj4/v/z13/6+3mrJhzIfj48Px8dHSTyKjG3b123r2iXJ8XgEwNba5bKut8vL+Xy53G63/XLdb9emCinBkTlPwikjqNkuMTORCLDEsD19yYEys9t6u16vte7mPngw7h7dAxUVghCBhbmUoto9NMIQQQRywilzEhaOknDKkhkxLIaTH5CBPKw37a07cAAzUwBBhDtYhKpttd62zQwDE2ImYqYho77PQYaaBIkYMSDu2ipCgJhKKjl9mYu7+Xrbb9fNwgNAOCFQtTANxYhAZjaz2/V6fmn5s5I8BEwUcFu3pk6JEUBNIwDRiEgizMKMVW78v+wAAQAASURBVHXbroiDE8ISMigzAdh6y/lQytH6FroDhAgmZkRw09rqVuu2792NmIWZmcx835u758QeOBijZi3sq8uaEKeUMGfiBJwICCmZR1PragC0TPM84TLPEb6ue2ppmqOr9drMtHettUcAEIMIpsTZEvpckmBs63nbW91vXTUJhcfIIO+1mZkcMiOKCEv6csghjqh0QUQHsNYi1EwdgJACwNXMkAEBSK331r/m8ESg2yz83bvHp4fH07wg8tp6UzfkuAdIhrinxPNUzGOkk7sHgIeObGdVpdYJAZLIPFEpU8TYORwAeo/TJEKui4vQcSm5LCjJENV1Xpb/8p//4ccf3/1f/ss/vnz6bfv4a7s9e7uFNgd0DwaEJCQsSaaZAaJuPcwFgeHbtQ+ECIgeBhHMhJSneelV98sa3ZJkJgogC69duwcBTwJF3CzMMRCIZjfd9lC9be1nJG7WDIKJmXN4ZwNCDEALHCpWAhewJFSSZGEwNTCPPspORGJ0hFDz2mDdLIFLprui5f4JjlDWrwd/3PUR5uHjKokUQK+MrIFXiAHLDu261/12u9VaU+JpmfJckLn2tu2bet9bXetW1aqZZJXsnINhAA5BvQWMzFQPIJQsKDOyR7CIpMylALMj+QAJIniARSA4uqOFR86Fj8f5zZvHqYR2HxkqX4uYsX5EhAgzOhZmQWQYgL17/DcguLfa3Kxqu7p+fHnJR3w8Pb1/+/7HH/5ALPM8EzEhC4kIERMimKpZmFkI8V0zSSiMgEzIFLQgEYH7urVWt8rSWu06DVTTkpeqdqlmUD0FjF72lCEipUKUEPmbkjHcmmrTzkREI4yNJqGy29a79d7MakRzG1NWdXZOTAUiAclIMyNJyIVAoIdp1776RDQLTSmnUrz2l9veeq+tmUFJ+bRQOxIgfffd6YfvH75/d0pC3nXi6eHwiICPx3kuBFAj9oAW0ZuqDVUyouQ5L6c8P6RyQOLXih9ZCAH6rVmi/C7nNJlD17rXrbcd3MZxMTrsIswJiJlJ6A4mG3EnmHOO4L06AOc8pZxHuK6pIiZKjJwccW/24fP5X//2y//x3//617//Akke3xz+9Oe3/+E//vjDH9+/efe0dm/dtVvd6+X5nNtWMsdeAWg6HE/ff5eZ7DMV7de5TrnQt3NxAEIUIiYZuaIj9mdA7BHJATSgq9em7jHwfMPqZ4NX4oN1NrptY2U6M5XMJUvJ90CWAXsxxzZy1XpsDdaKZbNp83mxZfPDMY4dD5PMCZIM8Dg4ACJ2BDMztda06RcQydcXQkSl5GXmeSo5idcuwqfj4enp4eHhOM+FCHtvt+t1XVcAmHI+HeaHw1QEU0ISEKExRUMM1367rbfbtWtHpmmZD6fDfJhTye6mve/7vq7btm2tNwBgYUQe2MDa2rru+1Z7UwyayhwIUoC4nJ6OyGndvSu5VQ2FCESD8f365e611m3beu8BcM+GjAj3ZkqCkpMwCwuEi4g5u3tizkJToinTcA0KjRReCEdE4iGmCAy/hyklToEyMmaq9lBIwHqPC4pwGK6tcR9GHIKMUcm4m917AfevAI/wwezzb1/LvtVtq8TEKeVUXMAGidjiTvYyW/dm1oQ8lVrmJzOudd2qgshoqI9eOhGHOxJrx9b36/U8qOwR4DlynpgLEaVcpvlI1LPEjV0rcyghhIWFttrqtm/b2rQjEbMkyR7eqiIB5sRMEUYYiTnJ1zBLBBBEw7tcY/yvnIrkOeVtmoIkIVIp2U1VzTwCo6mNPrMwTF1VOxDdtu22bq22IgiRMdysa9973bsagYwSvlGzwIgEkRAJiZFkdB0jApDv7sJ7slH34TT5kp0EI+3TIMx6N+tfCjJEFOa5lMfT8XQ4QFDtXpt1d2AiwcGBGHQJJI4Iixh2fvdwCzVvqERDt+Nz4izMLAhjTKfm1jvkhM1A3YkwZ55yYUmB5BCS6OHh8PTm4e3bx08flp+pPsNaYdfXGAP3AARKKWWZJoZw0FAw9N/xeyLCRgID8iDGDH8fcraobs6IDAIo4RGKQyiTyafs6lY7mBFhCmC1ZqbdViB0NCBKKaUgIDQbEUnDThr3viZzYk4kCckDwkP8HudN1CmUwtys96jNKnsCF4xa3T0AKV5jXb9pxoSZdu1miiKvS+nLn76ONIDjdZeorbl7zjllsbCt6m3b1m21sLXVvfeqyoAcKEEpIBdCZlAw68TAjNbdAWTwqyUFBhICMTA7go8w89GCvet0PRCIIGc6Hae3T6d3bx5z9tu1D3LB1yLmHmAhkjnnSSChoRGACEHYdtkDIM8TBu61RigS7Kb/9uG3fuv/Mc3Lw8N/+Z//5++eP63rSuGj206UUhohBzQFmhlgqPr55YJIzIJA4YaEKIiEy8OS5rJda1C07fqp1Vp3DJ/nY6R8XV/6usfkiePp/YP2+vnXz63ry+fL+fPV+td9GQnCot7UVaYyAWfM4oLV+9438wrRGJXI3SKstVbJcspTmtL08MDiUREATFTBu2rb+3apFW1Oc5nnpRRJU+0WBt4VrEHEaYLTH59Ox+XHH79/enqY59Rq2xk0c346IHiRRrirVbV9bXvr1UwBAVgwLTI9lIcfyvGdTMcvRUxENO9br+Z12cQde7PPnz+Z7n273JH/2hFe/RPCSCHMUymIaKZDr5aSTNMJMEHSCJqnKaWsXrfNk0w5okh2o3Wvnz6//PTh88+/ff70sjaLd++XP/3j9//X/+U//Md/+vPp6YEkC0LTfVuv21b1FyOCJSOEbmsvx/nwNC8M/QqRaX9/+O7dMedvCjLCO8+JGBzinqDhr6FeaDHSWaM3D3cVY6REhEAD6+vu5uj2KmsbCzvJXOacRSgwnAbOZYTNAlgABoah7rQr3vYuN00v+zSnZc6nQ348yOkgxyVJYhGB8FDVtr08X18uLYDN457z9vrFRMfD8niaj4eDJE4Znh4P794+PZ1OQritt59/2vd6+/WXv1/Oz+GRUxqGMQykGFArZMQIa203s227BcS79++SzMfTm+PpMZVsbtpa3ffruq3rbd/37kqEEOEG+7rt+95adwfitCzlcBBA6Qbr3vN8/P7HH1Hybx+vHz6ef/3tctssMHtEQAPQL2d/QKiqdlVTBGLiiFBTM9WokvgkSRIzcwQmJk8JgLLQlNOUaBJMDOEQZlW7EaXEJaVUBAB6627BxFRSng4etO21tm1bV3Urmi0iSZqmaBjmCDRUE4Opw0SjSQBq6uHMwoyAYBb32OZ9b7V9sVi7R2u91p5yTomnaRm+qX3fe6+9GeKdYG7qGjfrDo4aqQ9BYEMAup/VAMwMXojI3LZ9PV9fRkXVqpayT2WZ5iMRi/DpdOKH0h/mc+HbNbf17Lr33s1UWx8gO239i3559JHKJCIpFw4nYSwix2X+BuAVEBbW3arbhMyS03x4PD16VUC5tdbDHRFbA0BStW2vn5+vP//60V3fP52Oy1zruu3bLx9ezpebmR2mnAT5MA2Vu/ZeayMoLdVtXd0DWUQQAhBppDzGq8AdB5bSLQCHPDBe9RYDsT0wMnDnN/dx73jdkKlM87wcy7w48sv5tu/NxggExtxTmDlsVEMIdFfhEBEAh8MwhKupmwJ4IhSiNHpz0H1EZThaUNNorZk7E005J05EPFQdA3KNQ6huFm5EmCUlSQDQVR08GEVQhDHYE2D0fldvvmr7IlqtvVfOE1Lc4UEBgAKUgBAiRSSITEjCE/mg5NY5h4aqhQUDLkDo5ADGzEAwdJCDyjRkaDD6WxEOruEQQUEMws5sFArUAZTAMMyZlL1hNDcyY/No3dZewbW1oUITALfe9n3/8lrco/fWWu2qKIMgG6/8u/FafVwq3G29reu6ujsJpZIA4Xy9mrbL9bLtq4bV3qpad3APU2+2J0eUQm7Rw9RSklJy1woeiMiSpFBg2Kvc1GO0UAeqPxIDcTiECE2ZH47zuzcP3799evf0KOLkbc7zt5dkYUICcsSU0+E4U6beGzELofe+Xs7msABSor6viF7mSSGe6waKh+fnN49vfvzxx6enx19/+Xk9n0217S3n7A7uHoEplZwg0FV7bXuEATDhKLktDDhzKjkzhboaWtt32663FQF8mVdr5/Pt4o2WHGDLsZzenvZ9p8HQ+6a4JIBMIADRIxhkEWE2bdXbtl+3/draZtYQ3EccoIJ7YzYSmZapPCTi6DHu4DqwbBieKBIGkREaQxBhEc4MFN11j4ic0nGZ3j1M3z1Op6MQOXRN5FlQmMMDbO+t7m3faru13i2IRPKU85HTMehA6QFoSPpf2+Ng1W+bXcG09XS7tcJrhILX6Jvp0LgiEfIwgzHBvd/OwwIMwMQ55VLmA/LEE0BgFoqwplW1ByD0jG0f2WfjUmcBQDQt5bsfnv70D+9+/NPbpzdHgOhNHUUttuvter506+i6o6UktCy5TIc5T6DqRhjHx/n4OPPXOAiQVwkFEQYOINlwbAQABozE+Xv4rw+76HBnfh1JfQXrvuaV3XfPcfCM8CW6G3/HjHtc1tADzaFTkDrVWHe7rX1d235L2zHvp5gmIaYw2ndaN9g2b80oydeC6PVrqGVTooFdOhzm0+kwTRkx9n2L2M9g23b9/PlT2zcRBKbe9sqdiwglhDQqEe2jYxpIMM8l5SXlJecDMe+1br73Wuu+3a63bb3t+6auiODu2rVu9bputXePMR8suUxTOTjCuu9lOfz45+9QSspF3T9+fjbfkQAQEQ3pd26LMUIaSj6/D3JGKJ0R4d0cEjioUYxIxClRFsqChUEIDMAdwwACSYQjYYiH9WZdjWgoObI57LW5W2ut9qamyMTEpZQIQ4VABgyz4dmRJIJIEQO4Nwrd0UB2d3PVf0+GhXuDZnxkYxgxBpmD7hIQbu42Ns6GERXZMYEHhGsPtcCIYTZNKRHEPfZkW7f15uER0VpLaV6W9uBQpon4nqBpBb3drK99u7g5vTZRvr69EW4R6MyUSy4pJ8mJh08dASV/y1aBwU0aPY8uzCIpT8s0t3neVZ2Iemu1tnXdL9f1+Xy93NaXl9tt23KSaZ7muQBE1956rbV21XB7OScCOJ5ySpKTmFtiJkQzU21gljLfneKjjhl45nE7d/fhTI24M65f33OKIAyE4YaxsB7+tVBGJJbswM/XBqGX89UtylQG3G+wnhkJhe8BFwTEd1jzyKIyc0nkJvfWMgDhlyJGBvzbAwOwdl1XV3eRMs9LmSZhdtW6rlp3c2u13s4v+7r22sPGPBsRkUQgzAdGuCvd+2YwrIJfwX0erWlrOmeEgNaqO7k5IedU1AUUzAEAgZiJR2GeGBchB2q1G2GAe4yPGhFxoLJi1MigCkADJjkYOXCvFwkIY4iPPEa2dVc0Z3cJZ1Dwpg2rcBUU6+obeFcLIso0IYC5+jcw1deFPjiOvxf8jjJ6/CTuaYra1dzMrNZqprd9Na3bemtaHaKbaYAFhgNGOJhD51QBkFkwQkRyzqkaWNAwewsHBcaIawO0CDAMICBBF3IWAMRS+DTlx+P09uH45uH4MC/EbvOx5OXb9SIppQgwBxHJ01xKgjRKaVzrvp4vXQ1ZOEuvKzOUkoC4J3rp+7/8/PfW/T//0z89HE+677Hv15dnbZ1zUcfamjCfjstcCgmqKa1iHiVlRiSIrvW2b7V1T0RElCRhqKl3xbCt1k/nz8/7+svttqNOhySTSKbD09TqCYNOT4/LstCrsJcQJqYlUXXMxKc5TxL19mK93l4+bdeXfd/UlIjUsNXoFkFRmKdZ8eA0l3BvanWrFsYIRWjJmN/MM5fDwhNbtE0NsG3QN223ul2tm6c8o9su/ZY7NUkUqkSKcv/cte37ur3ctnXTashpPp7ezoc38+NbpLJtXTv1uve6fkl/teirfl7tY6Zp1/Lh41n3dpwisYJ2d2BOjDJQ7oNq4jCUUCA4FJEpT1Mqi+RF8vGYFgTXdtv3q/bqyJQP6N6vF5EsUzkeprePp3dvn96+e+wx/fl/ev/jH5+WSbzVWw2QFtNp39t6ufTLS0pI3q2uOM2nx8fDNC9I0O229auaHyd6mPCVe4GIOVOZmAkBwR3Q7+rAezg8Drko5pTmeXLznBINfwTeA8XG3Ywi7ukB4ADgYV07QmAaAnISIR5xJOBjMPHamxxYJ8AI16ih1q3e+vnc57nNUy7zhIhtt3VF8zxmqebWG95lxK8vBiBM++16wZgfv3s6Hg7uer1eakUmJ7Te93WtrgqOu8dZFSrhcWKcpRA5qBsYSrCkdDgeRAqnyYNr7XXbTUG7Wuu917rt2z78JVWtq2pvvbeR/RwOQDIE/seH0xMyzW3NUz6dMnHueriu6+GYb3s1cHYU9m+cvEBEOedSCiLqa7w1IQmLhwz1EsR9vuYjZgVBEIQhMSQKxiBChxFPlaY0C7Obt962rVlYzrNIGqXHyEU0s956bY0klTInSZYICACYfDhBIyWZpjLPC0TcbqtqByQiZKYxhUCMCItvuv2ImETSyIl1u1wvr6zYrqpqDgj35jRyYIRb77egxFySYG3dWm+1QkROYqUQOSLW1upgGbi527ZviUtrjYiRMJeMSEwELMwCAL33rm0uhUncXN1zMhipy44IklI+HpYy5yRIGIGOiEBA34zFEQlJAMld3TvSQkHDK0UD/QXeWns5v/z28eXnD58/PZ+vt5uZlZzfv3vzxx9/ePN0BDcLPC6zmV9vW1P97dPZzKbp7ZTym8fT0mZiSiIpEYA1VWlifs+mIrwjGhDGCYsAo3EaCCODwgIBI4IcmUcugpu69TD9/RSGLtf202+/mIIwHpZ5ecilFEBDxHAPipQyj1Yb+ivyA4ej0cNHyAW/jhORmDkhQjiOWjMchisA0IhoWpbD6XQ4HHIS27br5UV7q21f1+16fjl/+ty2HcIDYWuVmCUTjlC35m2vTJzKlEoupRwOh/HJAkAEtBa94RLsFrfL6o5mJMiH+dCp13XvXTGAwoGFBNxRUA6EiFh3N43WGzgSaSAE+mt5DhgOpgpGSDiUDYHDijkcKwHdhNFc22q9RVfwGJMMpgDvbVNwysCQlG3l6MCcS0mFEDGAUegLLWL8qzxugK9RSffv0ZFBuNdQ7mPLMLN12/RygQF4CutaA52FLSCAA9GCBi0z1OO2a49lnnIS4RQZSnHsdo/DhqGLRQwiD0fQcAxnNEYT8pQgZTlM+fEwPZ3mp8N8KtNEBEBLPuZy4G+WjKScI4AcUi65TCLsbVdVtV7b3nrtTdfLVUoC6IwM7ijMS9G1/fLyKRy//+77p9PxcFja6bivVzP3QV0lIeax4IlIUOZ58cAkghAxOtkGBo7qLMhEQB7dwE0I1fTXz58+3q6XAONQb6kzL4LEj+8fc5qeHt4sp8MXSNSU6B/elvbjUiNLWR4fc5kCYOtaIVteqHIyZyRWg5pDzUEgn9LDQQ5FJhHvISyVJQwFcWKeWCaWTImZWa3r2tR136zt3ptpD3NgRnBw07r3hBgS5hHq3pq1plWt7qp7g2YJeUrT03T8vhzeSDl6IOA1IMIVXL9UxAHh3tyaR25Nn58v2Pf0SJjDuiMOxtQYnkB4OMZowyIhEDKSCItMOR/z/GaaHsq8gPf1aq1vgxbWtQc2RhGmRFnQTdXNU+acp/fvT28fl4zgtZo1YwsQdZMi8jAfCqG19dyCCQCtWT9fwKoTwjzRUih/ZUUggSTKmRBhVCzk4IDuCIBjqdCgZCeZ58nUXmVbvzMEjsWGGK9QdUDECI+wcAAe2CZiRiZEcISxzb4G2OCd5AEe5mEaHaA2WCuUDHkCIjIN7eGQhotsRAN967Yw89utXm86TyiPhzdvHp+eHphh37frtUJ0QhvjXQJwi+b9uq/QQtBSwrJkDro7xdSBMWEhERYGo4BuZq1bq9paHYqKbdtq3e792nvzlUaGHjshpjSeUMkpMWeYlnQ8ZCBZq8yzLEteDtkidQNEKEW+tVvcbWJEYGZmiFhKIcpJmRhGKnm4udoYOhMAEQhjkkGIAXNg5iQlSckyB0DtW+/eu438G4CRWnCP2hkMuN66hwobMuGwTQMCjlB6YKaU0rxMCOhuteIAjo3mkAin8Ut/MxRHgpQkF2EhiOi9jrBoG0Ht4xJqEY7DVwbh7h0pYQZCouhhe6+rmmpn1RzRCLH13loz63YPplYVT7kMhPCopVo3a9V8QJ58WH4YCe7oFkpJxuwPgXOelmVJRQgswIERXJu23uzbrtIwkr1Ka0cs4Z1j2Wtdr9fnzy+//vbpl98+j4FR11ZKeff2+N27N09PD8tSbrebmSGEEObELbz1drvh+SUflqkkmUqWJAhormY2ZPjMPKKQYPSEB7V++OJGYx1eKbDj13W7T53oboTBUfd8rWCitv58XT983lThdJiQ5WmQIYapMnDIXwDcIyzujBAbs5rx70QIY0mUiQhJ6P6cmPOIPggK8shZpikD5dPTw3J8KFNGjFa3ut72fVu323W9brdbbZu5jj6woRF6piLMAw/sHkhAWVKZMkuZv974I0A7aAdwdIVtr6pBlAmycKZMrhHQzFytAfoQcBHCmIwX4ZZG2koQhgPdZclE8DpXiYggIggYSty7yYiIYNBy1FS1mjawQIBEIgSJA0Jb7b0FO3mOSVrmMb3GVJiQVAm/mb8AfCX1fgMmvJ89A+AT4Np7r0212zC+ud3WVbVL4mGSGpuJIwPQYDV5ABKA99bV1OQuaefEkLPBK9QaEYERaATIuoUFBoIzmGAX6pOkeUqngzwey3EZJYOZKSNnzkV+p7mUNE3DRSx5kpRD9fpy2fbVC1btTuAQt8slt3w8TSlJmGPC4+Op8vbLh4/113rI85++//7N4+Htu7et7fveUilpmg8pEQF6r7VCC2aZpkKSELH1tm51rcMQQKAABBY4xqqImJMQwnVdL+sexwMytbpvtdpZ54fj93/649Obd8cyHx6PIzEOAI4T/6//0+k/pffOk1FywEFK7533h2Nr01DBOYR6jEwcZORMaUmSEptEQJ8PRgWGx3xoOIAjUDuoefdtb32vtdVm3WnUuvNUplkkW4CasYtbuLbW92u97b1DhAlhPs5lmY9vluOb+fSEqTS3rpvBljmJxDfaPqDAFClbxs692UqXAtLnJEi1OrGkvIiI2R0Lj+G5cBJJIszjwWfAxHw4HX44HB4ks+qq7SaSiThM67ZC0Hx4nEtK6H27/vrLr7/8+mHbt+NclsJLZlaFMEBwiFY5cn7/D98tCQ6Cvu8vvx2vm+7Nnz8+p/XTNGE+zYeH4+qI8DU4DXEMmzEAAwFHdQEYToTezVUhIghRBOepWLfBaP92ZeFdMm8RARhMd50NEgIGgI/DPSBwBAcSj800HO9S+0ES9IDR2wEcJL1w1GZr38dqRhgdflYP1ZH8+kqmAehNP/z6/Nvn/h9+nB8eH77/4f3bN6dWb+v1+fz83NqaGEuRZZlEGMK7qu0bVMvspfDycMiQAHGETXjrgbs4ZguizIknFBEn6uqq7rdtv14vdd/MFDBSkmU5lFyECzi15tZxuC/3vTnIPOOy5MOSNRBRiWxZ0mNfAqZuuE7tcJy+IAng/paGR5hZ7z2lNM/zNGUfE1fV3joEhgchEjCiM4IwJsbEiEHuzszHw7GUhTDV3te66chOAzAHMAf3cV9BxHleADlw7d1q7UAOxIg81C33uCOiO9qOMZfsHtHbaHoTYU4CnnvLKX2FRBHigFYPIYtZIDhSUIBjAJipW7/7XiBsyIOJu2BACIdzdLet1doa7JVruxHROLYDCHHQ4ZAIU5JlmeZ5Ag71tl+2vl/rtrpbypwo5SSMzCyp5Mmn1tpt202NkFPKOQsTD8mzMO1ab+v1dtvVXs9+hFdxzmsPMRACXK3t+/X88ssvv/784eMvHz59/HT+9HLrarmkwzJ/9/7N+7dvSs5DIbRt+7qurbXEmKZkLVzbTz//cjjMP7x78/T0cDwczP3zy0tVIyoplWmaSsmII0NnzDxe50Rubg4QwDQ+KYDRKPBXLTKRAIcMSPF4KWr2fLt9vJzPa3cjQCglX7ea5uk4LcwE3cx896ruVXvz3k1r033vZk6ExIEcpaTH5XDM5SBShIQCCQPwyxBRhIlnSYXLfHx6n6YlwLXupnvve2ut1rrva9MdExBS76ZhRkAJKSEKpwAShBRJ0nRYSpkziUwTfi1i7gmY5gDq+269GSEmwakkTmk5Cte63tZam1oHDBakCFRztcw8ZWxmDoFB5tzjTgb3wBg29dH3AsABTEYWJkIiEAK0MPOmphEGgDwG6oJJ0NHaXrtW2z0OVJ5KmhKKSJJUCgHiVuMbcCfC4HCNjkgg4h0IZEHhjJSIVdu+79t629ebahORAaRs1oKECdyUiZgFKCDQzXtXDWQRB1A1U5umacoZSAg5SUa0MXkmYWQODANX12HywHDGYMSMbRI5lPSw0OmUy8RN98vGDja4b4LfHJYAIlMeGmggbq33db2cz3vbMLJiOIFB9G2L8IfHBZH3bTdzluQIUOR2rf/9b3/dtts//eOfj3NZjsdUDCmnnOdlZoRet97qmJrNQIkZic1cAdURgAmBgRnIx3HkbuGENAJDKGeeFids2t16IDHl5fiwPJyIEdIXijokwvdHenonVCYD3ps2DbUwRfXilsff7BE2Ysk9ADGQYMTsGoSDCTugj+xfg96hWe/jPIswiBFZHO6EmCTlXJZ5nqa762cIpBgxMwtC761pTymlpZTlIZc3h9P30+EpTVM13V4+bvYScBWaXE4gX2eTBJgxFUoQghGqrTZtDQh5ryaCsRAS090IGhSBGQBCtQdQEiIW4sSSU8oi4qEekfI0zadprw7Ngu/HBUZYd23uChDMmAhzAHf1phEAJSFnIqTEeeZpkqWIl1JbrH6rN637ltZ+OuX8/fuUM986mMPvbpZITIEUMVhEQICBEEEx9DGvuCwSxLsX6f+EYeDuATFkQElYiFngi9vOw8MHY3aoiWm8lWP8GyN09uv8AQOHnS9CfXhNx8wiMQ16cFdXC/sd+CIAImd58+7p3fu3x+NSSopBrwjrvfdqZimJCLEQD6QtjD1qtOEBiBkIUBCZSXiITZBJMBETkasH7mwAVbWbgXBKzIyllMNhmcucZAKnulmr3jto8+u6Z8OU0jDMqsW+3VrbmWGeMsmszlLysvxuljxy9O7CgqEoQWQW5hjykUBHJKT40msmAibk+4MDRMjCKSdJOYJADQfKME+IIJyQeLzpI51YJM0zAeK2t71264oJEUelOCQk96aDu42746g53QFHY4BQmFNiEf7ySiKi9r31LeWCCEhOMSBDMCQG5uiO6G4jzhkswigMGgYmggG8EfPeemutm7ahqUIklEScWUrO+bAcT6fT4bBMU1Lve2u367lul7AdwKepUGDhRMgDC+AQXYukpF0jgIhHNohHgIMBqHobSc5fbcl3/c59bbtFEIS7q7b9drt8+vjp428fX16u216JYeJUSlmW+XQ8HI9LKcVcI0ZudtPeB7jdEWpr27a11p4OE8VhzhRA2w3DGVNZ5nnE1Ywt9f6YvK7CV03S+IDwtRnz6udCQKIABDb6+rEMR0+PsMRkQG627e1yXaUkTqkkgaHGc++qu7bm3dx6t3VT9yAGFAC2ZGng+TUl1Z5NhQfM83UGSowsObPMy7wcUKTX1XvV0dXsrWo31wgHhkGq5tHhSIgMyENozBI555KnIjljDFzA18VvbrXrulUkvpz33j0JlkwAXRIQERADUSD46MwF6ujImzkQIWRBREbnZqg9wiFwjMNe0dQY8Ep1Rr6PwkeBM1T0gQBECIJIQIgESSAQhULDo3e3lDjnXIIRmQPA75Do330ROIHTF14vUIwwEUAOQANter1cX15eLtfrum1dtZvtre21OURixgiE0G6A0QN6t9qaBrClkTPPiKr9nlxBnFMQYjNzhMSMLA5Dv+YBNFKwSuJlkuMsp0N+PJaHYzkuWRJ2rbcdPMzyNOVp8q8RPQAgOScL6I696f75up/Pt/OLo5VZQNiJOnrVzsbI4kGfz1cw2291nqeH795u5faXf/7pw6cPu+5//uGHd4+P8yHVpkQoTElkoO62bXP33jsT5cxMQpw4JQRgjCzChOrhgebeVQPACR/evInuMU3NFKyLS57n49MD56mFbb1d+mpx15F4+F5r37ZEQpKFgcYpSQEAEQjIgQx4x1SYufnd0acWzVQ9GLyFq/q6+2Wzy9ZvtXb3ETQzzQWJEIZMO4tEKeWwlHmZ8pxJEJkIsYicDnPV9tsZ0SIv6fhwfHzzfjn+UMr3nA8g8HJ7/u32svmvBjekwy6nzvVLaCISFuElJ0IJh9bMIqoiIOw1UrjdGSaIhK9oDdDW97pzktPxmDmxJOKIuG2tr3VFpGWajpRUgVI1FyQOwNYahSHA26enH75br/taOHBXPW+6VWBmSrKU6XQMlnq7xd7l4QBBFagBVG3e6k2NElF1dIu9Ym3fCmIjEACHv1qHxQgDAJgIGCIcDA0AAiwcwIeoFDDsri+7b4hfwBXMnHNOiYeSlyJwBGaamaIBMHEAjISNgBg6xWHpvqMfMADv6yfiS0hDgI8rCkWEdu/dzUa7GwAgZf7++9M//sPpP/+nf/jxx+8AfN9XhJhKPh4PEbper7W2dW2Mkpd5njjnmMSWg6RSAjEAkTmVVJaSciZh5MSUAySCVFFdB9Y/iByBS3lc5jIl5jFJkZRy4gkciYzYqELv2/nywqslmfMEqcje9Pnzp8vlbF2ZpcwFsEi3ZTl86cQMmXDrzWyw1wgAWmuDKEGATCkJMQtimHc1VLNx7cDXGZ0IMbNFNOvmoG6cZMZlLhNAIKMjBoS6jyxLSZxzTnlKuerzi+7NVQHIIO6AaRogGG91A6KuzcNoOFbw3hdACCES+irw6do/ff748dOHh6enqRSkeD2DEZiQEzJIMu2GtVdXVwNw84DekYApL1Pi9JjLdLlc1n3rvatrhAUQSsrlcMrL8Xj67t0P796+ezgdU+J1r9brup7rdhbUxDAvs9AkAAAYPpx35O7zNLd7PqUNZ/B4O2y3WitGDKbfl4Nfe+t1BxLqzbQ7DJds773u23a+XG7XzT2WZXl3mJhZNeZpGi67NBU2kTyYzhDuqhYRvenedK+dIa7Xy3osb06lpPQ4p2UqkI/LacklI4087UAEDBx6MCLGIMThChxzvDHrGfdq+FJ7AguifBklM8GS+M1SHkpuDWrtfW/Pz2cFs7BlzmQ4QLrdtFkHdCEUornkAAByR+/hTfvn8+1K+4vwlLhkXko+zUtJkohGeBKxBGeRjIDDS2Ztt9Z6b3tvzS2IUNi8A0WehAhGYLqPKEhEEsokJZecE1KYdvUWX9x84d30tm0GGEHn8w5Oy5LMde83EU4pAYADUBIJ8vAIb02329ZVkQU4lZSysGuAx27qGkEDhzVqnvtWhkMY7uGAHmoOgCjChIgpYWSOhAAAigQiyEIPhzIl5OBlyst0LCV3VA/c9wYBancD0pev0d8bM3fCMdkjQEwkoNZbvV7W3z4+f3j+eFm38+X6cr1ebrfrutXWAhASpiQB0Lup965eVWvvFkEiQExAEObWw40Rx14NEbW30QxnlqGN1EAOZJYsdFjKw3F+fDw+Pp6eHk/TsqQsANCt6+5de+ttsS7L6eGbYF5JOaFGM6vbdv7tY1uvFJZySiJGeH/dhEAUMdKUvO/V2id9OH33pz8Ak/795+fL9rffPiSR0/GwHA6DUcVCgIDEnHIOdO1qvtcaAeZBEUwERBjmZhBDxX/Hkg5c0jTPLXsFYKBpniQdT09P0+PCOXXvu25b392/PGRgHqqGXRkYvqCUCV43NAKCYegl8D6cuwPbqO5tFDZQu2+7XTe7bHpd+61V9RDmgpBDBElYSgab3QNyTmUqueSUswiwDFQGLDkfp+lQlsAyLQ/L48Pxu3k+CgEEmIEHNiJl8nG7/T1RDRBACKeMpQg4bLuPGZE7DXSYBwUQEwK7iMBXSFgwIFGSVOZ5Klnct7Zvl/VGlHJKiMKypJwZBAIAulkz60zw/s2x9ndmO0QrzNHMm3NKGedJ5pxEAXbVrRoDaOsfP7ysdc8jFFKbg7QWZBbV/l0n5tUWPYawX1rQMeRrA2DmESOpBEev4q4bdBpuCf/aPkSCgcaRxDxu5+7oAOHhoaYEQTj23iEVChrNH7+DjemL9BcgHOA+nR5DqXGFighQdVVX+7pack4//OHt5eXN23cPpUjd1oY2FSaieZ7NuraurdW9J+6HeUmpzBxz8WkiySmQ7J5AxLmUPE0kDMgQZIaD9NV6ra3utXVTZC4yn54e57ncl6LHuOohEQtJ4pH+e76s4XWaPE/IiWuz9XZrtUKwSMopo0zIUUr592G2AF96VxGx12pmTJgklVSEJUkKMPcOERijWcXMPD5fFmERh3DTbm4egCiS8K6Y8C8eMxvMMk7CSaSoAdP9lPXB1cIB6hyMUN2rA4KZxyiBEXBsR6/tmm81MWZ2W6+X26XMswgDROAw8A9fFY1lyXdxQUe6D64jAAESo6T5NB1aj2l6Pl/O63ptfffQsT5FqJR0OByenp4eHx+nKY+HJqyHtbAO4sI4TUtmJDCIMDN/RSVOgF211jpMrdAheDRXgJlKyb9LsY4w7b03lM69aW+A7K90R7wbhoKJpJSnp0dmvlxWBOyq+17TmsaQUJimkhEikMwMmUR4nnIRcu11u223GeeJwKeUeZnmeWbm8Zfftez3FmYA4N2tNt53HM/C3e2Cdyg73tc4ftuJAXVwRCmJM3GiCEAO7e263mrfXf0ur0F3cMIYyFzENKSnAEEKat60GVKItI68x56Tqy8lF2HPBUgSJWSMQDN1DW2117pv+1Zr12auI5PezJCiTJKEKDAADMHBB+tGQcgcWgeE1utet6/nC4C51941dlW63To4qddUFSmYqZQizMNjikTgI/ay7613NeLghCUnTkzMFHpdq6sGf2MGMje3u6CPyfCOnTMPEkaaUQTuKueMI8sBAAlK5seHObRg9MOSD4eTZKr1qk1HR7vV3u13uqv7TnuPTUJCMrCI6Kpt2y8vzx8+/vbTrx8+np/31i/rer7eLtf1uu6tdQiICYhYSsoiBUEtcu9cq5rBONaRkozJ4jCNIhAy0T00CwIjBiZZiDClJOUwl8eHw+NpeXw4nA7L8TBzToGoozPpZkMVZL0sj+Zf0SqSOLt229fb88unn3+h0O/++N3x4ciJ995Xdw4s05TyZO7sfpiX1f3l+dnQ3uEPy+Pxu3/84/rby/VWf33+9O7tm/lwOB0PiVl719bNApGPx1l7v12et31f9zrAqwkUQtV0ZOdKFh+Eo7FyHIgBXPd9N8D5sDy+eXz7w/e88LWdr23fda/2lQ6JiETp/8fYnz1JklxnvuDZVNXMl1gyq4ACQAAkbzeb7L59W+Zt5mn+/9d5ujIit5tNEgSqKpdY3M1MVc8yD2oeGcXmFRmvlIJUIjIy3M1M9eg53/f7AJJ3CLfYzwuEsQ9vDWIskx7em++BUQYe0dXXrdXeu8bW/dpsrd7MkWLKMgzsklAImIBZWBJysghmkpw5JZGUM8kuRw1hOE7lu7vvJxM436Vj0qkv9LVvTWvSjm1pCct9eXQ9zHw4xF2OieBNrxAYliXu70hIlguHQ0qJiASEU3LkACZCYRAGdwsMJJrKNE/zNB3m6XB3PqeUWq1rbVtdADkRM05dISIjJwDAGHw3YMaPj/M0fXw4U6tbEid3yJTz6aE8THIE1w1UEiy9f/rp+vL5+S9/+rEcyt//t797fDjVr8+talOMEYcQ3+ziA5rlDhyjQzoMEKMduA9lYTQ9HcFx13bC8FrbOCx4ANnwwgTdXNa09wwARnRZEHi4WQ+LYHeCzMK7SBcJGRBC3j5hB/a4OYQdR45BjCg/cHdUc9UBg4y3IuY3P/zq8vQ9YVxfX5flmdD9fJiy5JzPhxNqXK/Xbd023PR4h0dO0zzNkQtKpgBSC1BLbjEYboAeoKatR+u+bf1yXV9fl6eXZatNcpnmfLg75yza61DYeVdwJRCCEkSB1lVfXq7at+OBSmZmUg/thkHMQpyFM3L2AHmXa0OEklPK2cYwDUBV3b0hMmEpk3BKARGgZtu2qW7IRphzyklSry0QS8qS0kBC167uQEAMQAE+Np4IZLS+u/iROgADJh80/rEfDpPPUMQMNv7YIG8tY9ozCBGJGACMG/yiPx7h3XrtrWvvqkAWEI4+EsGBEJkE0YGRgHk2Te5jYuMInEQOp+PDd78DKi8vz8/PT09Pn9ft6qDq1t2Zy/EwzVM5HuZpysSkXcMV0HMidBbCLFjylIURNVxRu/ltRCkp5xAWFsZakboIu0sxV7Opt9Nx5tsUJiJGo2aYfazXIAk3IsqlHI6H0+lwXVfYVDJPpQCgma+2vbxeA/HlehUCChXBx4dT16mpt9qFGEoucsrkEtq2+vT02mrnROWYppxSTkNJ/S56Z0/UwlHB7NdnNxUBwPhsR5LE7YLQHh0JAADd4+naPy9tOpZ5Tuf7KTNThHvU3l6WviwLEpxPx2nKKSECmGqzsDBETjCEVyEOjMBEmRgjWlPvas2WLCXLYT4cgueQ5JyiC6CH1dbX2l6Wdd1W5yCwpr231ntPiUVSyhLu5k6xcxW7+eZI3HLpAND6dl6ugzEz1jEDGCwr7dA7aoe1LohB7CI8zyVnkSSSOCcxs3Wr21bVwwNcDUCDNaV8Os5C7fPTi/WKAEjMSEPeGNFvnuehV1OAAMZcSk4MjB4ESHBzGo1CJ2U+HYuQIMSU5HQ+NWtb1a1vqQgE9Nq3rb11lHH/N+J4uAAMECJaq21Zn75+/vO//vmnTz/9/PXr67aqw7rV58vl5bJcl63XNhiSInI8nT5+9/E4AuBae768brVq+FAOJZGSRt7JbTMGKCk5AIa7NguAgMxccjod54f788fH+/u747HknJgJA0Ddd9BFhFqo6dbWcngw029FzAiPb9tWl2tdrolBhMqUCMENM6MxdSRA7GoiMh8OJFhbBaatbiRy93hfUBb/bBCbtqYN+cSJw9UJzAIRJAkAAIlCba2BGw3q/BC57x4+xwghdEZ3YDAGY1X0TpIOp+Px4W66m1zcmnarXbu5vastiWgQwTkch8JmVHvg6AAWYHDL4mjeu3d11VCPrrbVXluvPar60qybB4DscTP70Q/CAQd4iDOSDyR3TiSJJAmzYKAbohHFlOTxfE5R6nSyFJuvUTfdVNfsG9sW0JC7YFMGwgbQ4a17EQ6qrt3cnQTnnCBQGAbaLucylZIkMQ2yNBFBEElKh/kw5UIku9wHCYiJmEksoHZl6GbscLuxAi2oG3pQTiQ8FXnY1nWti3bLIiUfi2Ry68vSYpDBLJp59bZ6zng3TR/u7i4h16UuGqGeOGc5vIcpj3RcgPHzDD2ZB8QIhTQDNVA1i3ERR0N72JgAfaSrjhc7YE6cM4+os8Go2IvYwBsuKswdx/VGGHYt2i0etz080APN0PflYle9RWjg8GKgOagPF9W3OyxnIYpluYSr2ZYEVbsLClPO+Xg8WreXp1drXu+62cScWAa4CzxAPXDoY3aYRrh7V23dtk2Xdbte12Vda68OkOZS5omTIBMFMwYxh4Z2B0NA0uYvl/Xr0+uXp2tY++7j3Jtua7XAsBj4bJKcpCAXjyCSd7v/kL8IUd/LyVvM98BUJM4BIGpmbVkWjzbNjMSA0h0ua2PifDiTZI9BQnH3QBoMc/CI3k3dSMh0cHND99Anqr1rt4BgZhImZmL+NjJyHwqG4dUYWgbkhCAjeeIm6Hy79phzziXTEHfvnRjz3S9DhDF82iPGLTyZuampGgQlkWkqd3d3qRxLKSKsuiEFCzi4miJKSlISpUTMHADd+rqtvW3MgVkEbXRhATCCAsj3Ng8jMSEHgoinMI/AAVGNgF1Sze87ZB6uqr13Tsqq2htKQBgRDOX13d1p3Tbzq0G0rZlHrVXN4QtdlyUlOk7p8W6ep3yYi5pdlooRFJ4lP94dBW19fW2t/fzlJeft7jQ/pAMiMvPA6oxjH9yCtwGBxj+Bex7AYN+NeXbQYMu8mYneH/cDwACNyDPTMU938yyCVdum2jooxMgyyZQmmUoiCG1N1boiQggFuJsZWRBCJpiTEFEzCXcz32pXUw1Q5GY+ZZssCgxE2/ryujy/vrZW8yyIjoOvvx93EZEURsL3mO2ZjjEjWeoOEE3rutW3ToxHtK5bUwQxpwBR961Ws07sKXE3KyXlItmSh7tb66OuZwSG0UsLyxTHwuCSOBhtWM0Zx/1qpn33cAIgBmGwYJlTOZR5FkSqLdwN0EanupuvrU9NSsopUZIkzE197dot1CCam9m2rMv67b3AEHsBhsdwp/Xa12V5fXl5+fr06dPPf/nxL5++fn66Xq5bbWrr1l6fL9frat1Kyg/3Dx8+PHz/8cP333/89a++vzseian1/vr6utSqqs2sd3ULAhBOAjAydTEiMY9xuQNiADOXks+nw+PD3cP93cPd6TiXzMgA7mZh4HBrVbmHm6uabW19/15kXbdt3bZl0daEQRitd+01ZZLk88Rm3DdT6633Mk/T+XTk83Q8tLa9vLzWrX28e7z78FiDSwAlata2VoUgCRIwhA9NmgekUhR8rdG2ZtuGATkXklSEEBwJOIKSJKIe3Syy9sy9EEIuh+OJp7L42lpddenW3Idn8nZVCImFJQclB4oxkwQaowz3sYV4v8G6VUMNukbtvXbr3ZpGU2/q7k4Yu3/Mwce3cjcbTn6GHbTGnERSJk47GJtjwCgJLDHcH4VBXhhfXS+vat5FhbpTY9tCr71e175dfGp300nr8rYwu8Oy+tOLBvbzUY7jIxJPAjnLPE/Hw5GJVVvvvZkh4TQdDvN8KoWJe8R1a3FZ5sNxzqdDQki19x6G5mCBjkFhQwNigZuJmYt1Ak9IINwbU5Z5vksyGei2vi7Xr6+6PtUgnD8evj98d+o1lYmLoWhMpwef3NYlWhSb5/KB6RtfwWCUj8Bj/YiBi0W16Iq9QVfrvQeCiAzRGhABogNbIDkIhjCIOECkNDoIaZSVu/F4TLiDv1FkALo5jC58AMF+iiQCBITRqgEMCwt3uOHTAdx7xG4XcCd3eoP2qurL6/PPn34C8FLk4f50mAoTmhmEM+LhMNelbmsz9Q8PW7s/eEgAdm0BsB8sAR3QIobW1Xw3B7XatnVdt6V1JSZKaTpMklM3dcTElKe5pIyOddVWXRu+vFz+8uPnf/rTzz9/epkyAyARmfpIgxbCgMScsxTgYuGE6d0UZqifGcZwJWAkK0eAdTOrEZfSKidx03VZSKLMCTB1w9b056fXJPl4/90kmQEpFFEBfcwWEDGAupmqcrA7MDKC9GbqVUOr9rpVIC+HUnIiYWYKHAMm69q9OYSxwEDkgSHQFJwZuA8M3Dt4l6T04fHDx48fS5qEh4dzgGSGMJYBGGNQRQYxHNVNyZkNAoklJRbGlHie8iXLYLRMMqVcCDx2b6hBmLlF0Fbb8/NTXV+KeM5MAUTQu1ofCitTdUBMwgTcLcaFRuQkmYjdzD0I0VRhz5d961zu9GFKnU3VlBFGDhoRlpzuH8611bW259fl008/N/OtNXV/uV4T05Tl+w93jyc5TIc0Tb3ZVhtDUKLzcf7h1x+F8JPwjz99/fPPX9Dxr371OM3nMMAANfOAcTSCGO50AwIOdvJbHNr41IeGA4lDQIjcbu4dd7tZC4GJDod8Os/lfCjng5wnQvRADjqnNB/Lnc8oVEouOc05C2GYWeu1dVOlAK3RVd2cmSaSu5nLfASeVH1bLq1tDn1rq1rftnWeNvV7ZDG156fL5y9PT89PEXriuSTOjIzMMDAnbtp7772pNR3OK4+wgHBQb+HetbWmb90L87gu9XKtOSeijJKxW+trax3QcuZAsPDu2rR3HZ5gR+JBmglXwUjsCTWLWY4505zJCICCMAy8g0EEASIwCabEpfDplO8eT9OhIMm6+tenZesa3hAJwluH55fVzbTp3elwOp266WVZm3ckSql4WGv1clmv18Vuk3GkoW1C79ai1m6vl+Xr0/Pnz59//unHr1+/vl5fr3Vbe71u6+vr6/WyLpfNugvJrz5+9/d//3d//Ovf//rX3z8+3p+OhylnJjL3bVurqprW3retLdfl9fm1rluoq6prjwhmGbR1IAqknPP5fH58vPvu4+PpcMiCDEBg4A4Rg543pmowGuTDouX9F8LebVvrVntrQvT4eH+cy3cfH493M7B260glOFpo113mGMz5MJdSluvl559+vtbrUaZc5vPpNBOPKWxvW0NIWYRAMTxUO1iMLHcBJAewCNo1mgwRBIAEGEMS1DUcqYv3mcGnAodDmrMLbH2pvvZQ38Ot4P0LkYI4kA3Iwu2meBkVzGD62U31bw7uYA7qoe4eYIOiGj5wT8wYQzNx45kqYqABKhAG8D4o/qZmGZOr4bEKRMgJjoQhZCbLJq06aYAadYyt9+tSL69tfUGd+vpgvcI3jDr0jrVibXiYWEouGYNaSXic+DDnw3wA4BWwu3nvzJLKIeV56Iow3NyurRknpDlJyomJXLs3NbOovfelIsRhSmPDVgP3YIgUu+ICkSUVIN7qeq2Xa39+bctlhdMxPfz6nD+cgs8Qlii7MR9OKUWybt4B0PGXIjKk3WRyk8SMPoQ6dIOuoAbqQQzMkBLy0BIAGQTdbCmEKCSAkJKI7C6P0XIbp959ex7+7dih6RZAjuw3ES/B3oTa4yMReEySxuq8s2Fg2KWCIiTgW/ciwmvbtroxYSlSyjTNJcKHGAeIJGcg2Zq12tfaWzfzIdXqahEcyMiGbqHdlRWQzKN1b91q19q1a1ggkJBkpBQoHo5BSGNkWcBBe/N1u16XT5++/ulfP//44/O26WHKpeSUklu0br2bKlsAOiAKcSLz952YiOi919pqrWomKVEpRKymy3XpbtFqd00mbrbVmoLcUQ2vm7Zt+/R0Kbl8rL3o0BMMAGMwMnhYeN8n2Goe5j7g0tratbW1RXcD6GXinGWa07h4t00SYdwqQCmzWeg4fLIBuQWpe1Nr9k2sxMzz8Xg4HIY4im5C03AECgJMJAjsPQCIBw3UEcGJdj3OgL6MhscIbGYCZkyMjBAePRSjuVV3RUkB0HuvdRtrBIZrOPiYdSJAqBoShavyTodz1xjDUNjhH0wEgUz6C6FS7OvVEJe4Nggzj3DDCGGaSz4e5vPpWJtt27XVNm7W8AgkQhbBUlIpmUXcQohSEgI+HObj4ZhzUqBLA/vpxVQdBDkTJySyGHc/EwZExOCmwJC5h6mNCgb3x3Mg8yN4NO9g2J7fSZUAESRTzpQERHAoUx3RCYWpME8pI49RFQPy/n3QmQ2D0COImBOiJ+Gc0zSV+TBLOnQ1jI7QzdTBwJp1aEyppdaLdV/XdVuX1iqCWWenROgCA2kzuHxkTXuzXvuu5NhBmRFjZwh/q2DGFrBu/bK0OXpKGcLV3Rx03IXq1DQwumMyVuPEPOi0QIkIGDijTxwlQWaPDHen3CPqvsKFknsSBhAiZpbM0ySHQ76/nz98f5oOxRyen9tyrW2zUW4BYLd4XdXDB1YbmQPi68vFMe7ujpKSaQeovVvrv2QRISGAqobGuvbLy8vXL18/f/785evXl9eXrW1b29a6Lev1cn1dl+oah2n+/uP3f/vHP/7v//D3f/zr3z9+uD8cZmEeZMwAUFV1t/CmWmt7fbl8yV9en5/X67ou3twggBOWaTqez6kUQCql3N+f7+/P93fnksS1gelo6u2xv/sm72+/Bkjo/b4vdVt7a+B6PE7f33//q+8ef/3D96nQy/b1pb7kijF5he16NfO6trq0nqb5mOY5R5GX6+X1xz//uB2Of/z4q7vz6S6XTKhbrWoTHAQJzcKsDnYD8qAwMhLniRFHuEzvERFCTEiG3iAaV8WNmh1E5nJn87km2Hxb21WhIxMlAf0FMjkAHNACDUAjPMBjxz4O3xPsO+tOfQjYfVq0t1AGP82QIBEC0cjUVQh3MIMYvtswi04MgD7komEIzuFoPqbEZuE42rCkRawcU+nFVn3eVl3NuwZ03da+PrX1ubVXkaP21VS/pb8CuhFEyXma53k+TpOAexSGWWhOXGRySN2IHSQBJZF0BOK1LRg2ZWGELay1TQ0mmedyFJ5JyJsufXm+vv746Ucm/+Nvvj/kQqGwE6Dd3Nx8CMZbN3N9XddLXa+9Xmtb17ib4ePH+/uH3+STbmtFuGzKBBNiZ+w9Lte2XPv37npbyHBMwYafxDwc3tBOsOcJ+ECcUSmp5DTCCD1QzSHMwBGcKMYiOOJFdwkBoiEa0G0yCTffDO0wTAoINNvXVrmtv2PwTwQhsGvAfUQuDS8pktMN1iHvF2ZESEkOh8P93elwHGqVpr1rN0NHimbQDKvi1n0w19Sj14pogUbonCbrqrXXgADQQDVoPZqGBjvlYIhgD24NA5ByYUoI7E6toalvmz4/X//8r3/5x3/86Z/+56fnp1pyeng83d2f5kNZlm2ruizb2igICqlGJERkoXcMDzO7XC9fn5+XZUkpne/uj8ejiGy1qseyLt09LIDA3dQMFbrGVlV1WZbly9NSJn2+rJLXsEgih2kWZlNrrbXet1p7V1UbOUhMTMit6/W6va5q4dNMM6XRgVdT30kkyIkoYYrEgrmIqgaqqqckGKQdeljV3rq+dS6RkISBWbVHROabDBUREROXQz4Csm6q5u44RrDMEUHuThQRqr0Siapj+FSyeyIM1zryDQLYbdO+htVcSkk5iSwR12VZo1P4MJUQcUqZiNQUAhobjJGhm7vS6DUyDfj8CLvcxbDf5pwIyCIplymnBNq0uwZpr2F9LDcly4fHByTxALqieRCRZDnM5XyaPzzene/vORfVMHMWmWZk4lxmC6SUP/76tyHz169LXbe7xw/z/WM6HiiLthY+vMAjxiT2+QsQAIxgdgAiHp41MLW96kRKkgDBzVm+8XsAAlEjKmjCKjQJoOhwZ4EL0cRCjOrRVOtqoRFqGEboCEBBQFLmEkApAU8ZywFFiD2FTtnJKZwBhjmYQsxha3pRNY0NURMDAIaZ9RhzRnQHRqs9EL2HqbdmOhpOyDkxJUIAcwJAEXx7Xtxjq3Zdu0NLyuimexcvB2hgdAvvKhHm5EGQyyHNxFkDkGjK+SAxoU2ZExlm+vh4ojJduzdzd+sbsgWYZ5GUJU88z+lwLOe76cOHqUw5gijW5y/XuqiaOwCyRMTaLMKKqEgdeonLsnGWM6KkRIypZRQB5G+n7SFzGlgI7ctlvby8vjw9XS9XNQ8EDa+9LeuyrNdWK0KcjvNvf/XDf/mH//x3/9t/+Ju//sPjhweSMftVUwDY3ahMyJRYJEsWZAoozC/0Aubbuoa7MJ2Phx9+88P57h6Jc06Hw6FMSQhhlIW+ixB910vC3oLwfbfAXUb5rhODDKnIHZ7O8/z7H77/za+/++77R+L48splpVwpsis0yq1uzBIB2ntr5qaWKCXKrbW+U+swMSeA1qu7R87APIZh5qFBTDwG5iMGVETmMtMw1VjHQIdQ8w7aoKtZqKMDChiFYq9hrVdHy1JEhLkT/8LHP4QvOpK/9yJm5HrDN07IOKR/s+7uBuAAHJUfIRATERGjqgOYu5uGY4CDOaAFSeyZgQSGoITo5omZkcIiNKwjhjAVwJQad6HFaXHczLR79Lotdbu0trS+ZhUzf1fBADJNd4fjh/Pp8Xy8O5TECd08MwUCgWMEAjGyMCfihEgQaAZqQTsdgyxCtXt3E2CcJIVjrJt+enr+86ef/vTTn3OCu1OCw6HV1lU91EPJG0YAC5KoQ+19qdtS16036yZBmSAlL3McTgmY3DkYw5P1rS7Lcvmyrv26fjTv7zb+/TUuFiIQI+3GJEMCZkCWUngqqWQhYgRUC4AwBzLf6xi8aSdwD8LeIRX7VR6p2GNTQxzQsH0BwpsOLHxYbDAgfESXIAPh6F9CIJAOGxszg8h+cH97LxHBzPPhMB9OLNkiavdWVbsxYqA1B0yFDHtEVd2LGDOMzhUaAQ9itgHnBIgK1Bxrt8vWt2ZNQccMyyG8M1spXnJk5pQ4s7t7W/uytMtlXdeKGIdD+vjh/OtfP9zdncqUtmYebQxGUSCNVD3imyro9rBEDHDIutWxlaaUc84eMTIffIBpaZhtoatdr4uqMdO6rS/XJXf9/PUJgBj5OE8lTYNe1rpW1ZGO0FRN1d3ddd22Zd2WdduqAeOEmYTGecC9qw0TAAQzI4qAZJIEQJgnpk7M7Ioerj6Uhe/1cDiENUAGvvd8CRJhIFDmlFLGEQvRrIW5744bertBQMc3DjPCmOeCOAc4eLc9ON1ca11fp2k+Hs+neXq4e/TeXi+9rRt6Hzo/ZgkgQlQzvx2xcHBV3AgxAth5wLURh2Yx3p/493vUAZHCvW+Lqjlns/GkEBNm4dNRgCgADvNUmyJCLvl0mu/vj3f3p/l4FEkRKgkOM0cAEeacRoV0OJ4fg3/4zW+2dXv8/vvD3QOJjNMvwrfj7/7T7w4Z8LFGYdCIHgdws11czzQW0q7W9RcnfnOD8ERQCHhYZUbtDgGurVkg1O61RdvCu4eaEIyMdAwkYCQZFaqjVHVs1a2CK3hjNCIEYABwBAXvWn0J7WZ9I/DM5DHE/ooeNAxUjh4QiK4wfJRjyk1EzDiCQEHD+ReQ2whQi6ZO3cw7ukcQ0AiyAACzcB88xTBEERpR6Ds5MInkjIW0ZJKERHxPieZImy21t9rQEXqEuQhJIiZkBBkRyQAE7hGEPswlbg5AxOIRXVU9ukHtprAFRveAgNY9pWDhoTV+C7MbrwEzVNNt05fXl+eX58tyXevatDfVrbV1q3XbtFUKOB3n33z3w//213/zn//Tf/zr3//hw+NDmbK6ethgd420iBgWw3GmSEwzMOCc0iFPWcRUl3VlxER0Phw+3N8zS8qSUiKC8UzDjd/8dvt9O9bffg1H1fuXHO4OiWTm8vF8/v2vv/v44f54ngFNcqSV6QogTjnmc1svaCYi1tvr10WjOwPdH+/tHEl49f6yLneSCjEHogMOo7QZ7C1IRhr3CZqHqiaR0+GYmAGgbquZdeubbqvXBtE7+grd+tZfG6odD8YIpowowM6Sk0r6dkqOAAs0RwUc5cvozQwZPYCb7cRxs10vM3xe7iP83cZYABGEiZMwE6ARqYeahQYBBZgHEqcoBRAR+g1DJcyJkADB3My0E0RKXDaTVdZGl8+Xbe1EAGa11XVZtto2NTXswQYM7yi3aZLH3z/8qn738f50Zpa1gQbx7BibelQNbpwQ0MYI0d3qtkoSBCYWgDzIKQ6+eYTXhCt3bz1++vr83//pH//0+c+f16fjlB4+574eSTVsGL8dwUTKlGaS7A4W3nXVvoFpJiiHfCyxtc9Pr/50QcPT6eFXTKVetvqyPv/09eX1z7Vdr48f1Or753+3IuHQSAcTEga4xeiIJRKhnGUaIAUczDpzDyagPSvHHWIkCiDBfiePM+O+NAZ4BA7xSyAiYNDuk8JR37sBqQ+TBZPj0Mcg3mZMgYgEJEaGEkA5IJVv4XxDiw6IuUySilr01l5fr7VWiBDm7tQsDudTmroRbNr7oERHoId3bRBgpmtd0kKSWJISbwZL99etV4PAFCFuoNXrWsN9aDVKyvOUD/MkBK5dGwhND/d3/AdBpI8f7n/13f35/iyCeXLJPXA1gJKS5MKSiIVGrx/fF2TjjBOquq1rzjkAVAcb3RGBmXOWCOqNt237/HkV4ekwa+/LtrXWf/zxkzY9HU4QIZISy7Ztvau5a0QPX1u7Xi61VTfdanu5rLVZACdJkkc8nKla61vXjuMg4U6CBThIAjkciEgYEdkD1dTUAvz96QVxpPZJyuBuRE5MiYUQQgOB3EaiFiNEmLn5AOkxIyDa6HBgEAWgC+PxUJKEezdlExlAHdV6vXzNOX/88P3pdJd++8dDOf7pX/2pq1aD0OHnN+8aaGYaGm6IVIowURCDj0Fz5Cwiwkxuw15g36y87r23ttXeergtr89mzvN9IEnOpeQs1IWQiOVwPB63rT2/XHtXYjoep4eH+9P5mHJhFmEoPryeMUyDQWjhGHGY5t/87veqdn9/nuYSHm1rw0w1jGE7cHJYLtRxRGwOZna4LxZhGJFSAozRxFbz6/WSpsveodlHlh6Bh3k6HmYmcgdhiggH76rrurbe1+ZLjXV1607hc+K7YylMYYE48sFK4Azo2reVInMIOLpRwKBvW7giaph1N9uiW1SVcCA0RzWv3UI7BYiwMJMhIEUERZTEQwOPaUQ0wb4Xv59mwNDZ8jAYhis44HBbys0zp2GjvW+YFI2g1QD3G3YzkIhzShNTTiKCcyJF2pyX9hLUGgCrmbtqU6fN2sbeO1gn6rlwN3u9dNVOMAqvyMyBaK6EASQWaN2QMKXCJMulh8LpLjNTyZzSt67SW4aLui11/fL69PnlaW1rs771tmzb9Xpdl8W2KhYl5x+++/6//ue//7v/8B/+8Pu/erg7h+u26LcomL3GAICIURQCIVERKefT/fFoHz7c350A/NOnz+tWtW3eGllPwhIefVMP348Qe7xo7Ba5AAgKDBh2eAoc07f3Q0sYvj/JOeeckMHB1I0TTtPhni24c0LOnPN6zdYbRERd29YXqz7RPJXD6XxMKVGYM4/NkBApsHdtEWurm0XFBBRTIsJITM5sNsa3kiRlyR1bs7bVVq1pmFByi63ZUtsTrC3WdAcsGTEYiQDdvNXWanur+iOgd6/dDMgDbdeXUABYgFuMHLubFvSmPhkS/HHwIRImQJDxSgKozB1JAT3cIiDMLdDch7qDzIiIsBMjCSEBDOKaKkXkRFmUU1SlbammjjmFW6tt29rWrPUwY/XkIf6uvuQs5+/vTy8PBKRNo/UcMM9nZux13bpD2xIgJCBGYjb12rSpi1CEJNxlAQDe1Cwa6DX0+rrUP3/69E9//ucfX35efdu29H/1eJoPd1M6TLnMRSQhiGPaOoR2da1tDWokCm4J+TTPh6lE9GpXRXZMGqFbe/3y+fnTj6+fvrxenlt7WX/94vqLseV79RLRQPiOriYwegCISEqcEidGRAwDwyCKXZ2A4Aj0rb8NN0U3xl6eB8HO0Bs9yHd+9WFPC3AwDNIYWAtmJ95/GLwlZo/4pQCAIE7pPM2n8917VD8hiaRSJuJUa70u6+t11a4iAojkgJLOD4+qHaLrHpQwUuEDh++sm3Xz2IKI86RIi8Kl6cumzRFlIsjgyVq0TcMUEbKwzgTODA6Zmcp8oI8f+Diftw8LEZzPp7vjnKfi7kCClICEBVOZS5lTLiLJh9nnl69hS0Gk3vu2rQHQeuu9R/gIYjydj+Fuvba2rXVrDZFpjEe2pp8+fW5bvTuv67Jer2ti7trNHYnNbN3W5Xp9vbzWrXpYV2uugchCkokFkcJDweNGi6agIRh1H4cNEBxwJ+RBoe2t9l49FNDfvxEiFBkZLhZgREFEFOjhEajdiHDMKHZtOe7MWXgjNScW5t77CKFEzGagu7rfydBc23ZZL9N2eTrk+eHukTEt1xet28VamOXEIhzII9xilCxElLIIyYiKgQAiEkmIFDtx2+2X2ottXS+XSz4cc06tKwBwOBKLJEmJCQmCQIU45SwiALTVql0hdlcjJxBkSQz7JMjUeoQBuKqv2waYyjwXxHKYmcl6jXAa4eHjcR07BcIv/UYQwzzVtnBLLExkZmYaiK216/UyHxZ/Z1AYHUFz8ADQ3Vg/xn3DuzrIAttm62qmzgAUsgoro3ZDgJ3tmCiQwRqHJY6MmAgpsDoGoFMokrF387o27DYhMuxxSF1NzcCd97d30+gRCN7EtLwr4twAPFSt6xuw7G0Nw6E0dSCMsec4ISFDDCySAwF6oCoqgfYQiswDrxvMWOZc5pyEUJhIQlHBusIqTqQW2MfQwhRdXViACdHRSKBqr837aHCP2SUEEGZhZuCUSAjAmCnnCYDa1sFqzoCkzJDSL59+xADobpu267Zct+va29br2rat1ro17yZAh/l0f3f/x9///j/8zd/88a/+6v7+LokMbOPOB4J3w5BxVDVAGkwp4pEBgxjuLy+PrdXeu/Xe1qVva0lCg+tjHuHhBjuJ1x3AKfaj6VCBDCUZ7lkz75996d28u21mdW36/Lwe7+4fTqfjXGTK8wf5LqeCSBQstLXuEbSy9dXW3pdmLPLx+MP57owMRVXXWlUPJAi41Fpbe1mWzVwplzyloxdhEsZSAolT1uFzRUag1lqtW/fGhId8bN6qt2XZflpeeisPH+bjJMzMyBjRtvr0+evz9KX3fU7gEVvty9qDwAOHCIeFEXGP/hgamuHXHQ5CuOUHRjATMwlRAKCQsIgkAMpZc7dubsOo5GAGNvLcLHjk9+Cu/Bh8tkFzJ7DGIFxJmkIyR2Qe22XvUVtsNVpnCDafLJLHNxcMC893dzIdLj+9vD5f87Y8HudfHU8ppy9fvfZmtSbAKWVk4TRpaF3NTJFizoYTFUyUEoF3s7XWRdty2X768uXHr5/+/PXHS70C4Pay/fw/nk55+uNvv/v97379u8f78+kMEOvWvjxflnWxqJTadLSSub84gUzlNM93UiaecuG0Nbo8fV2+XL7++R9fv/55XZ7q0ra112d/a/bHmz/69vZ2LgiiMGWhlMDdRwUpDIhA4Tu6F4IJRCgH+wD5IjHB8L3vNUwMEG4wwe1vCKS9ysFdmr2jUCLCEBgBKNQ6CQgJc1DErcFPyAgO5pFK+vjh+++++z7l9FbBpJTLNJU8M/HzdX19vaxbRcRcRFKRlIhlns+91+v1GSkCCUhSnhgSIzA6gGvv21Z7BCbtwNdqL7U/L705Up6FCuPEkAiIOSOCiEzTYZqmlPI85XmeS2YM0Favl1ftjYWZIMC6uhp4EEnOlOb5NM/HXCaW4qHM8uZOwn3jH+Rb8ohtq73rYHwBRin5fD5/+PAh3LTXVteWyD0Gc4CZW9UvX55eXq6H6eUwT/M0pSSIMIIHIry2Wrdt26qqDn0R5zwknJKIhykqbEx2mKVMmYQCu0VXHYByAJQwHibB3nVdl7WuZg3C3pXGAYP/NlGEmGvcwh/DAwPcFTEEBWFgxMnBVF21A0RKPE1lmjJL2to2mJsAwSyj/eC+ExtM23p9/vLzvzKkH35zf393/93HX1lfXS/a7HAoJWcg9gAdWzvsOhhhYUwEtIvAAtysd+2qY19/68Ka2vXy+vT0BVM6ns6ZJaWUhEYngIgDQE17693DoQZKzuIu23Jdl+uyLMvd+btffS93AsSEOBLIxwwNiVrTrb0EkgdJSrlDOIfqrs3BkcYZREhIvov+gHC4rAOG38sdAVIaczo0MwdobVuX121b3kBkEVG7X9b+/LoGcpbMJACICEJEmOlASM18M6th6BwCIMwWqB1q9wAvYCYgnQIYvFOEOhhRMEWP5do0gAtHSo7Uuq4vC7lHTpmpm3W3TTUCRCgNr16StFeE4hDVenN1sBE8bhEe2NW3TWu1d4fkMDeLMbnMiBgAFuZhFBgw9hHAfRQOvUdhgIRClAiFIjEdj8fDoRBCYFgA+0iSC0FG4G6+anfvYIoWBNQN182Xtmj0TZsHJp4COALAzbXvz3niXKZSRGjogpN1r2qbNbkYp05kKcEv5ePoEWrWTNVNXWura92Wda3b5uZCaZ7l8f7hr//wh7/+4x9++8NvjsdjeKgqILII7A2dG3QGdkQI4bf6V7s62hCnnU6nu/Pd5fXibtfry+V1mqeShdEdw2FE0gDcFB8RHgE+xONvtdLO7v4lG1bqWjFiNVgu9nzR08v0Yfn48cPH7x7vT8cpcTkUf9CGgSK81Nrd3GG+Ew+vVzNqapvZJJIMY7Ee6sRBGsu6vFyvX15fqxql+e54nAjyYWZEHHIZpK1tDTDMYKcfE4cQEgWDk5qvtb28XDr0Y+9jju3uy7JcX1/Xl5f2sMYNRuTuW+vXrY2A9nC70SOx9zFx8wBH2j3yY8I1+OAekGjPntuFEUNYDphzLuoaAWTmABpDgtg7qgcb3UJfd3KXjbM3uFBQ7C17hV6NHUUdew8dQ3ngUU+5s/+SRwKBoagNltX02mTdhLhrpEwszD52oiAkopRyGjgFNCMKEnSk5hDNt9Dr1i6XtT5vl+vleXt+0ufOi5NG416hd1cOR2weL5faVSJwqevn56e1XgD7fIxyJ8R7EUKYWCaWGWmCgO2yPP3l88vPny9f/2W7fB073brYdvX3+nEfr3CCHX00+CREEE6pj1HyYDsGgQ+HAmEwYQJA5D2Nb/DRBo9jdGBip/8O+sy3IgYAwQH4zW7tt7J+XKuxUVGEE9BImmMSZkB0QCCUzPcPj3/9N3/zhz/8dSnzXsQQ5ZRzLimloeYgopKnlORwPE1T2R1E5dD7hhgJtwGfmOdDSYCxR55BCCeAAEwZg5pZUk5MgUhcRKZMh8Q5pzSSE3KS8+l4nKeS02HKh3nKiRnRehORVqu7DZTXtvXWPICn6QQ0HY/n+XDMqQAxU/zCOgKAAMx7oDARj2CQ2rfeOwu9mXZg2H/mWQjcgzjV2oQkfFvWzW1d8lpSTklSEskyHPBI5KYjciUCiICZRwfCzJH89oTFUL6mlKZpOpymPHG37enp67Ztsc8HGUgiyAO69t4bDE/wt+clwixcmRISArBH2MiMQR44FsQgImISEQbvGk4kLJL4dDyezw+H4yEiibAIe6SBo4eAq7ta91116NsWXz7/KDw9fvhhnu/Ox9N6vru8lBpbyjmlBMgQIEQhjJgRB6FmhJbyqBdtqCHVhqLo/Rk5wntv67rmy4WY+XBMiOEWYLHDjQmJAmI8NoA+8oBx1EWLIvHhtOVceGbkfRDke34YjrxPZE5lIkLVZh3CjJlH1M6gSzDTLtjZVZYGPuQ7OqoYRETe07bNjQbu6xYP8VZaqkXtvqyNpXrBJCDERPv8l5lzSnM29MiIZo5DfYvRLTa1AAOi5K5gMpxb4a7g6N6xV3163jRiOk/s4Ei9aa3K7huSmXdTDR8yL8nCSTAJMI+sUWYGiDA1i2am5hauDh7Quy1r27b+rasEoOa3pfvbCPvWqLrJFgLDUD0UI3DE0VISSoJ5WFcGoBwBxyHaOpjxsErizXGLiEQOVDW696Zb1VqtM8npmEUSy7BLoAhJ5lLyPM9TSYmRETGoWWckCzRzHMMz/rddWCKSlObDfP9wH4hlWvI0lTwtZa7rRh7HXL7/7uPf/PGvf/ubH+7v7kXykKwM28S+/t54+P9mYRnEinAfYHQiOh6P5/NpmqZlWZbr9TK/fvjwuJ//3wRY75/osTeOaSjBnht8O7y+/1pZXi4A4Baqm+rLNNHT5WVpK7I73Q0Yx3k+CzMTAeDrtlChu4/HfCjX51XX+vXrn5bXr/PpXJjzYOzUZmt7enn+/PXp569fm/nhcO4fHk9ZZiFKJaWUSm6qr5cXd5tTQo7pMHHi3rSp1a6v27r2Vntf1+oTRsQg5LRtffnytLy8+HVJbm8ZPe6x1nZdtzFhYYoR6GwWrbXezCJi6B+Yc+LRDGCDiDGTwmEsjlECDtD5yLJzACbp2tWxmXr3cDOF8Ijd7uEBEOTm5ooIpTAJp0zCaACt921rmxFEDWePSImKCQDUZvEWTXm7jNbs8vP1+unau3fAa7N43e4+PX946IkhHwpEJikJM1JhkZLoMBMiMiOC9l6r1vW6vfbtua1fvrz8/KfPTdfyK8xnOq+Fnun5J8+p/P5v7r9/PH/34QiB//1//su6GKIEm9FKojlhMrYIDNMe4N5V3RHpBJ7r15enf/n0T//9X16+fiZao7e69m3p22Lr+k2ruOMwb9BrEWEiYU6j7wLUk6tCa+YWsCPmg0dADgYGcEAaGu23Tym+jZQIAwjAaa/k9zIlbuDR2/W8TVkBKEbpCugeigZgRMREuRBhGIRIPp3vfve73//X/+O//f0//MPxeNwfTsSUU5Y8XJCH41FSYpacU5mmJIKIpZTT+a73Bm5t/bpt21rww/35eCxgGm5gmrNPxztikTIp8lL9but3a9s7MTwlmd6KGAAQpsNUplKmkpIQQYT2pW7auylEkJrWpstSt6225ojp7nTmcpoOD7kciZMFxi9XidsbGsg7FhF3b631rh6B7l37slwjjAB6b4fj4eGHXwHA88vl6evLCCuOgBE5rqpcWRJLSVlzNpcsPOIhAANijK2TJDevbRu9nCREhOOPE3Ep08ePH7/79WPT9R//R3z+/EV7IGBKBTBrh406BAT43sq+LWUeYa3qtkJyZkHCXW7JKCRh3nTzcEfjfciC5ppEppKPx+PHjx8PpzuWY22eS3H3lGXXiUeEe9tFpyMttNdtQ5LHj78CiJToeJhyTr0RBLoFhI57kxGZhuh+pB+bEaRMRAyCgCMZIdxMWL6JSMeQFaK3WtdtktQJRiClWph1Fkk5q4WHUoSZa121a0kMkGsH91iuW5IsJFTGhjBKF9dAAGJOpUzH0wkp6vbaWzNTJtbwlGQEfKbEA2Hnbg57QGiYWaibBkTg3pcZOAkBYpbT8Xw8nUagBAAEYCCpQ+1WqjIpBgIHIXhogI+Z97HwREmFVK27d43uAaCqHQAiCzKhMCYGi1Awde26NluW9vRSHfEupUwW6O4BzAFUDZpa144Upcg0pTxlYQYAe2OOqGn4Wvuy9a029QF4xwBoVZdrXdf6bR3z6Oq9e5dOMQxcABgYSLcAlYCA4VMP8ByUhYtQQhGaiyQm3bbmPWXBkbnRu9bNuxNI4ZgymYk5hAsjE+BqalW3Vntv5p4zlwKcOM9JJI3s1TLlaZrmeZoy8wApmptYLuLhKTsxuBm8412NqiKlnI9lPt2dz/fLVretret2uSzXy/X6ciGIh9P5w/39xw8fj8cji7i/9V32QgtvMpt47xbafaH49jXDDHE4HE6n8zRN67ouy3K9Xt19TOoNAMx2yd4wgAQOfftt0jSiCmC4jt9bkwBAtrUiIgDVrb08PwX0y2XZtoYITev5OM9TysKJ83E6OWIgklSbQnIH1JVbf71Yq7a0JrkQKmAzUG1Pbf3a69fem/aKOK1502ZuowkCt/fuYArfpE8iqTus2/J8XT6/vD6vqzGmkiUJBGxrXV+X9XLt20Kugt+EJB5Ru22tswhHiAA6mpENV/StVRgQzI6EgjTETbE/qUYERIIIOwc4ABBFJOdxNlHq7tDVYHg+h4xm9BVhUGcCzIAYdrlPJiEANTCtrV+20I4QIiKj7ZgYnV3ICA3fzfhddX16uX55cUSN2Cyw6s/PF8c4HbAkgegEBCpJSk7FgNraem1uptZq25ppD9+i1+jV22u7GmzTPOeZIgS75I/llM6///WH+9OEaE9Ply9PTy8vq+Rcjny4ozIhEwBF7yFAs0yFDvNh4pItpF3t9S9fX//0c/v6FbY1H9ApbWa9Y63QOrwHqvrtBQPTwZyYk1AWIEIx7z0gvMVQXiPgiNvYAwY8IIA8wMacyWKXNL0VLW8PFtxq9XijC8DupR/uvKDxu3GrhUaBgwjMCOhEEBjH0/y7v/r9f/y7//Qf/uN/+s1vf5dLeXvySymDrzoey3mecy45ZxEmoojIOR8OB9PU7u8X0mi9K2iwRUIgJCfMQkBMw0npyGWOXK3M2gMxFaLMlIlEiHlvEKIQiiRJRRghTLt3C+0WDuZ7+7o1aA1aD2Quec7lKJIReYTQMP3ixP/2jkSGQQB737Nyxuind12WtddGhAgm8zTMw4krM8te+vCg9AKChYeBD7eeKAYR8FDc7EolxCFDGSyWlJKI3Ka9Qx/v7qPxlogSUwr2kS7sTmYD0E2EPCKcvh3I3LU3a5WBApBAAgdpBhnZA0eWDeKwgjohTVMR4Wmaz6fzw8NjKtPWIKIPrw2S4H7HjqMz6s1E5B6qtm7L6+VrKXPO07hOODoh7mGOAHt6HOEuw7j5CYb9fwRGEVPKyU0lyfsrg4gRXmslXrIIAEgagQyOALnkgx9JptS69l5rtW3DsCREXFIWIAmLtrU2NWEYQ2wRQto94FOe8jSVkrTXbVvrtkY4s1hE75IERQQijfvZrNfezXTIxnCAtzwCoGsPAgZhGkqaWyDIL5AEhCNj9YZViHDz0N4jxuEBGYN5D1aOmxgCMRgDAijG6uoBiIwIDIFmoe7dwQACcZhufYwfEANBfYCbkZlKTlMWFgYkM9t3BHRSM/dae6tam6k7EAWiB7Ruran+0ml10zCZkfHe2GPkAc8c56ZxVMLhI6NEUiRPPE08ZckC4BrqKASIbqFNtTZt5mYAlgWtiDm5x/4eVbtbU1cFAALg8TjkUlJKQCAi5d0CxBCjaBaRnMUjmD1gtOn/7RmGRzHL6XA4jhRk7Vq3vl7X6+sl3E/TYZ7KVCZm1pvYH/6ds9D/7evtnIFIOed5ns/n87qul8tlWZZ1XWut/+ui9O2Px46ChqEKwJFO829/BGldhWXKyZDbtV2ury9fr68vW2/9er38+ofH+/tTzrmkXKZjynPJh8t2ea0vEX0+JWJoArpZt1cLMk4aXB08RztIwImFcVur2opqDMCIhKFWtSnAfDzGWIHqtm2rWTAnA1ta+/L6+s8/fXrum3w4nr//cDjMqP766WV9fUWwLByA/M36PtxJ0S0czQdlGsBhNCc5INSjde3WWIgYSxDReBzD3XoPAM9DDzsetfAAJmIRsqAAAfTY10RWMQjilJHYAczczFUVEIQp5VyKpIQMHmqg3lq7Lm1bPZwy55yyJBEKEp2TZulC3+oYU91en6/PX3ma3cOJasCXpXaAw4o5EVGUUo6h55ymct+q/fjzjz/++On560trnUTKYTrf36Ujp0nmczp9TAZ+OmZGbx3vyvzr3/7+492Hu0PRuv3005fX16uD5mMc7vzuYfru433JtNarmfYNhNP35+P9+e703Yd8NzfTp8+vn/7xn7e/fPlOkjx+5EnW1m19XuASoe70Fv4eb6Ge5gAw2mQsLEIpITNJBJGrjlOeewDBzm4ZbcvBn0IHJHQAAyDHGJH1Y80IwJtziIgAcY9wHJcxhgsQInZsXsAYY6AHmY9JNkYggBMDC57PD//lv/zX//p//D9+94c/HE7Ht8eMiKZpLlMZhcXIzCuljKjn0f0konBjptPpJGTrazPol6VHoBBmppLGzCJJykQZkXNCx0CO7gQigGKOo7oPhJTQDZbaetNwiJxEEIgJBVAtvGt0RXNCTBCm5uiQJ3bHuplkzejISQSGHOTbogwAgCmJiMQN+oZIwikwelNTawPrSxgO6J+JWJtB4OCFu1vKRswAoaNIFUZhZEKkPViHJCJab9rVkIgp5zxPeZ7mJOyhboFIpvH6eoW/4FrXQL9etggSEYQUgb3butRaGwKllIVIJN8M0hAR3tWaAhlxIO15TMO5B7trG4QZIlqtOefz3d3d3d3pdJrnOUnq5rWu27Zpb2NsZKq9N9WeUi7lABC9t4gg4qmISNrW5XJ9PiG4GyJERNce5tYbIuaUPAUgEQcBeewEXFV1cDcj5rH1REkpfeP3jNm/mdm2RgQTA+DMx8SCqCJ8mKdcplNIq/16faXLa3jH8AAsPKX57CCttXDXWpWBp5yEkoyRacplmucDEG5b3dbL5fW51m0YGdRNmLtQSRkhWMTdat1eL69dVca0MAli7IUAQI6YJhHgAFLVy3U9XFe7jfgRgQmEMdGAFI2nE/YsP9MxWAFB91haX9W3AAtgBBI8ZA4L8fCufWvCUrJwYuLklBCbIwFxIBwmYcYW4LYvBI6RiKY8l8yHIszojubWbbepDrHUIBO7jeM5aLgZWkRTtxuw4dueCsNJHgBBOEQ2ggTDXA0eA30hTJy4lESCkvl4N59mKeiJnBGESZgdUFtvtWvtbetVq2oIw1ySB3SzVruDBo3cvqG4ppxK3l/CTOaOAPt6p2OFdSIcUSNm2RwAuymMmJ1fFGQ4YnmCEaVkYmFKhERB1m1d1t76bYLq7o57XFPcPgEAdETYE8vfTarHCjO+Zvzu7e/FUsrj42OtdVQwX79+FZGBp4p93L9P6HCXyBChj37XkOLjt1/vipi9pTx6Yd23y7Z1vVy23nrdVsDuqPNhPs3nlA+TTAlTRibQkfyLCETRcq/VItwFFMwGeZfLPE9+OpRt02XhVJxQ3dUM3WqtQTiVTCIKgZ3crDftpJdt/fr6/OX1+WlZquDHh7vD+VSXFa4rrPUk+e7hEaG/PP0swm+32RC/dzNDpQC0EHZzJOTRI+lqtWvrXYymbIxG5O6OGLtRwMDZaf9mb5YlIUJmcCAHYw8RG26CEU0FQSzMAh7OyoMVOrpkoxKCtw5YxIDZEioTJHZmB2qH3Ip0If3WVXKvy7JdLslxZBoAwNq7Xu1aMWcqBSawRtQpAx+Wtf306ec//eXPn3/62ruV4+F8f0eJz9MhZznydLwvrRn2CAtWnqf83Yfj/Snr1q7LdakXxTaf0lHy8UHu7+eH+0KIFq1VQIMs6eF89+HDQ/lw79N0aW25vqyXF9vW4/F45DzlQ0+U/TSn5VN6uTs/CL9FLe7mzFFrAMXuDWCkkZmDAAC9hxm464hCfttkbwKYuPXcAiJ2H94oQMchfy/Rx1fgG0VvFDAQPoqY29q6K88AAsfFMQwPd0uZyjTdP3z8/R//9g9//Nu7h4e3/IS3IuZwOKSUReRdG4N20RAiIro7E03TJByMDX0zoNohZDi7mVwwBI09hmlwkFsRkHcw7V59BTARZYLoYarRmjEyUQoQQPGg3qNrRDBJKYU9Unc2R3dSDQsLMEnBjPwNEQjv39FIpX6b/Y+J6zAHmYZh8HDtRXhXYdm3kZRzzqqKTJwSAKBpAPBtShQBbg4OxOweph7hSCAiU8nzXFLKxBAeiM6cAKg3f3le1nUFCrMOiCklDG5N6+aqGoHMKccg6vL7PWYscIwkRDjYzziMJIGIwjJaMx7uZhGRcj4cDsfjcZomRLStuqtqizDC4MQY1poBxDzNwtRK6jrynkgop3xQ19ZW8+Ngp0aA9mZmGC6DZQSgphhBSB6h1sfurq5mNubzIozk+MvLMlgl5qFdt21lFsp59GcAUEQYGankPMI8ghB6aWrGNJXj0YOuEO5mvbUaTE4wxD+SE02THA/FI9q2qfbWamt19PEt3I3MaMzuZWiAal3WtaumZJ4LETKBqo6HUySN5md49N6XdV3X9d29BMI0wuV4mDcBRh7xwIDR4Pz5nsakEQroGEQgguWQcce4QG+dyIVQCklmSIKJSFgSR7gkCgQ0gJGYTMiMwizMJGi4LxV2e6yGpz3MR+PWhyJzz14bi5W/E1t/u8dg3OGDxjsg5AQYCEKGSABCLMycpcwpZZZM81ymmZN1QUiMKUlKqWn05q3qaEOqmpkjosgoS6I3R/ScOeeMUxmh6ZJkLikxYBi4YwACwwDhmBmBEGBiFiTinKWrO5g7aHNt7+OGbu/I92WRkROPTm8Gh+M0t62va6tbra1F6LAz7dwS+LefzNti8u/87ruXiJzP5+v1+unTp1rr8/NzSgkR53kGuPVXbn/D+EsG8+K2ff77XSDJJfOwtLqNuJq6tOt1XZ6Xdl0PR8kFHZ1Z8jKnIkeZ5okSmiCZvZh7UGAm0nALigA1Z+fgw2k6xfzg1GrflqUoBNFSuxgRQOsNCLnVjLkwYUqNZbP1cl0/vzz/+PPPX56fO0Q6ne4+fBTJP/7Tn9n9h4ePv/3Nb/7qb/7Qbfv//p//n8zvOTGh6lWNwAaEm9imQsIAAN3sBuAKANDuSkoEHi4CgGTDq65KzICBQBG6b6OD/elw49EPwUWoW69OHIcjp5KYUVUR0E0jQtVqKIWDAyFm4kPJU0EkyAyJQrARdIB2LGWSltjeYP3h0TZt14bWykRTyTv4qvnWo4RAmdz98rw8X/3pxepWP3369Hp5XvvmgUym2KpeDs4lPxDiYZ6sbsunhurCnBhVL5+/Xv/8T1+u1yXNPp3l/HA/l8PxnFIJotqbagcwKQmOs5xO03x3THd3FakvX1WvMqNOVLeWQh7y6fv7D3/81YdLj3/66eff/faPU57e1uQYc34mYHA0H78TFECAPOSVuZAZma2+F+Jwc+y9ad1il79AAIab710ajIEyR4z9EHbz5cXtqcA36MC4gOCj+YtDEmzkNlIGVVI+HB8ePvzw4cMPp7vHt+n+7SnleZ5PxzOzjCMIIo71cfQwBmtG1YBBUpqF5olCa9iG4cgcgE3dzKUr85iMuzoqUkAajXQfI2wY2fUp54kRAdDdIlANkhMEeLAZ1mamISJlmvjAeVbg13XrHmzNY1TqBiRDFvRuEcDhxhoipPEb+0nZ1O2Gw4pbaod6OJpnEiGRPJWpd13WLcJGwPH4CHLOO/rMo2lHQE4cEaaGhMwp5zLP8zQN+k4MgxRiikBEbs0u1y3CWCiXxEThcb3WVp0w5ZzdoWP30PfIqx3YIZxTmnIBSYFoHjZcNsHAFBEwjsuACBjvdFo8djsCpCAMzjyV0ot0rW7COROfEMHMtq2aBaIAsMPowcQIF/WIWiuAH+Z5mkpOGQB766adkD2iWx9yd9vj25AoTHlgkL6NQ4lSSikXshhDvXVdRx6QCBEjItLIM2aY5omFD8ez9ratSzhw4iGq6B3ce9t66KolzfNM4TYs+nDY6Ye4c8oHjw/2PC+HABhS8PDampqrB6oxm0WAQ+8NifLITCcCgEFqXte1tm+7JSLmxLmIFKZMwGjuoWaqZsGAWSQzDTwPCwh63hNpNAmfjpMQq3prbavVTUVQEpUpCXNS1ir5StZ7ePSxZBBBJhaaJhFEN9tMr9UwQpCESYiRcedl0O6wH8sIBfBIFtVwD6J4n54y+qwAkFIqpYzzq5sSYknMlCEoMWVJwgyMlDkXSYlSoiTMqExUipSUWKZurTXbqnkwkSOOEbcDEAtJAIInhukg8zQfyoGCW+tuToQAanUzREkFQwbhq7kJI02ZIA1StScGoGYMjl1D+3D6fHtDY0ZnZgHdLbR1pexJGTnM9ycFiUg8YmSCII5M3aEYHQF3oz/zzaP0poZ599HtMkZEPBzm8/l8OBxU9XK5MJOMganIiCAdJ3/fzdbxZuEeKmLH/7W2BEl5nIQDOEhYskiRturr0+UnwX/9p3OZJBiTpExTYTkIF8530wOQKAil103Xqj0Z2IDXa2/awgHZOYzUGYJdUoNQb61XR0Z0DxyWE3NHH0GiVfulrU/b8rwtm3eZUp4KIfatvvz89ZDo7re//s2v7377w4etrn85Hksq9O4UMywJYQEQboMNosyBAGpaVft+ysdxTBw4p/1wP8T7Q0aamXl06fbnYldYKrpH7d09ttrXrdcKkkwSSyYUGUG84TiiaE07oROzZZKTTDM6BoKxm7gmU3L1MByB6u9fCIjBCAlxz5qCgNHjDg+K1FJ3bGsHq19x7XV7fvpa64rJc07ljtIpnNegkmdOLPM0rSjLUkPteEpM0WxpVZ8uz621Dw/peC6zzId8mg4FqC1t27qqCSEk8VwQUjiTUzaPXnvfqobVsOvaeveSS5pO053clcNvIf3w3a9zyr98P7Tb7jCCghiRB9WDxsY6ShkWGfEHOyDxnehlhNDGTkOHEX6ztyn3L3OIG8Xw5gx7+3/GPb7/NzpEAOwhxwNWy4AaLU/z+f7D/cPH4+mhlOnfPC9DEzPPM3GCnTSyVzBENCxUt4cZcQSSEkFKpoLhiRkjvNfwQErg7EAeoOqGRIwAA+yB+y2AhCSIQkQ5g5mGqTl0i9uUEs3JwimIMbGUTDEbBbXa1IOSlJyn4aV6H17/9nZuugdTM1UN2JtJg/UNw84yRqwjoQrMko/Cc+Q5xwDRCwcCMucyEVGYmrq5AQSGEBImJBopuymlklMWQUBT6z7GjECI4gp1DQ8rEzNhQ+3q18tiBlMRIhEGZ9Cm9gu5Agyh77A0MBEQBTg4IDMQjImm38YcZraua855HARTyqpKEFlYiEVknidVc+s1JyYWYU7s7uuybVvbatfuCI4UzAQpTWXKuWzLNYannhB5n3jaDZIC4EMeQ3DLZYuhk3EzfX/IJGLhRLyjCLvqsiyqmnJOiZlZAtADEJOwSAJK7lbW1VQjQHWsJ74t2rQ7B0YpKWFOCI7go+eXck45MyekOhbPEUnjA3zdu/oQiUZKhRIQIBCpmYZ3VSaOuOFkblPj3nvv/e3CEGEWzomRUM21tzD3bqEK7okoRAJB3Xu4D5E5BANQQBGeppyYejMI0wbgETp882ZAY3zftbvpfp4hHJhfSkyFAcGbdYvmFhaZICMGDhvk2B9xt9fc/DZCozELXcP39L1vr/G+xlMz5tMAxsyHKeWUEVCYJ0nMHIQoSJnT6DSjBxoySGGWFE6qsENfMJEQmYOZu4drgLsqeMuC9we5fzg8nh8Q+PXldV0366baDRSAEwVDuKoFmHYhJHCKIHAemlBGckLkCIr4d9sko1vt7gphBmaqgkIwIpNjGDApeJcPDoXtaFP9u92Y//tX7OSYMjqg67rVWp+fX0qZAKCUklISlm9up9u3vwFNHQBoj1X7xXsRyoRD8jgRHVLR+fHI5SovP13Wpf3L//gJCdOU5qmkLMUxOxzlMOU7SifO57k+Py2fr3XtGkpgEb13jYuHVV+2dtVLhS1EJXkGnx2xhidOxJklEyazaL1eluvzujz3+gq2CmhminKUzFmuX5+6NV1q/nC4e8znR0J+DdhKTlOe3rewRm4jBMbuQoSmFt0iXM2aKkQkkZSFaNfzepipqVrvGg7IVpAmKinlPdoXkBFLTgGUNQiptm7qLy/L82VrHUqpIkBkYTkQzNTDaFTzbiHOhfWA9CApvPXq2+bXFk2lmxs0Z0xYVdT4bd5BhNOBTnfpMBcA2bbN3UiQAT0imtWLIrl269XqtfW6tbZg9tNDmR6m44epFEbtNLdyJiDJU2EZ0HEHMsdetRrEdJdmlMfv02kq3CghEHAzvlZYG3pIJpDUgvUSm7dt2lyV2jXqVS+X+vW6rIsKrK9Qf9b1fq33d7+6v//wcH836utvV+bt3xg0NKoiCOQOHj7m1IB7q9ZNfXRjaMj+b822PXBheLDfiQgpcBR5Y4YbGDQ6/ENnt9uvb92dcTDwIPcAMxShw+nEktX6+f7+8fG7u/NjLtO/MSQD7Gi4nHNKBYlj0F9NAWJMl8ZPfUvxxcGIZpRUTokpC4fq5hTmkiZh8QBXN9dARklEKZAoiBB82PaB1JyQRIpwaq2F20j9aNXMEClTkFp4swBHklxOgEZUA3k+nMo0i5SIWKvju20fEYYsfdTGW6vau0cwkjM5uKqFWYQzjSENgjt1a7Uz044bd3cHZGKRIGTmlDMjBZKGhjsSlpxzyoPwPwZtTCySc2b3vqxba32IjgWZEAgLIDNlt7i8blut67IBsPAkTEgMQb1qrd/sr+POMvettWCZcxJiDBKixAQOvXcb4Dl3gGi96dPT2G577zmXscrOJe/cz5TDfSqp1d7VIIITuRkzmdvXp+dW+3QsKVPJCZKcz3d1vfS61La6We01MBAZCIhp/Jwit9S2IIAEAIS402J6/8V2OZIxOAUO7amv27a1WvKUcy4lu5miExPxJDlJKsBSDneqpm1r2xoRrVU11VYhgbsgjVqMWJiEifPkOM+HlArVLQLNwsNxKJndPQw8TJVYTvcnYjE11d7aZr2G9Uh7RqK7BwcxEuGoZd6KGERMIkIc3de6rZv2ZqEm7plgTtIQlbl5VwATDNrt/plzGjpVcMTITFQyoBOhm7WqturyXLdr7WtjgsOpSBnBzBBMhK6h4ANDhIrsCA7YHRq4UCAiCkO81SJBgEjAgIQYBhCh3br+m+4F3UzeDhCMQQQ58d15nkpxC0bMzMIMzMAEDHm0a9QY1AhRyBm22q9b31qoMaZCKSic3L2Zao9m1lvomnN5PNKvP0wfP5zCAvU1emvaCR0zETElCcRuNgpEHRzVvG4llymnuQxpGpGIZJb0Cwfc+1+EAWHd1LS1xrSzGd0pxqANyMEHkwuGthDhdsjfV/WhaHlvSoJvaphxU+9gqmma7+7urtdlWRbV5fn51T1yzvM8HQ6HlMfpdzeQAr6VSwQR6IKhGL/g3Q0EiGt4d6uqQXA8H9Ikulpd+vPTkv/16+nhwImcgmh3GRypMOcTEzMzRuGlNmvqLWzj6q6KnQDdOqKa914hmoB3o6lPUxLHbtgarxShva5LWy91W8NjLkXgBCGt86gOMJjk4+OH774/He9nLmCx9b66+T7aAQAAj2hda+2URkInmofakNNFRDiE8E77MzMf43F3V1ez3j3CyZFZd6pJ4Cg/mQLBEQEjwlRbr1tf1r5t3YAkxi6gRAh4UwrfaD2BCAyeMATCwppp19i6byYRYLwpRqOmbE5vF5wQcoKSIyX3METbm6zh1nt36M0Byd20ar1W7dWiTxPPd3z+mI/3SQR1U87OCSjx4TQdzvP6etENA8Pc3JSYjneJGaZZcpaETACAGgFAJ0wmSAxNfd26g6o37a81LINOFAdXas1W7YxQLHDD1Wn1oCk1bw7fBrA7RmufuAxRPw9wCUTcEnCJmVNKbq7dhsCEdpBu3NxGcGut7MyAW9U+UBbj23k47orf2N0Ne+bkDcsbHgGxB0jk8vj43V/98W8Px/N1uUoq9w8fynRgTvC/vBCBechgBnEWzaz3FuG0e3928yZi7K2BkTeDFEg2hmGYUYDTzCxgDm5Iu36PKQFy7E3bCEAkjkAP5N3SyeOQEjCGccICROwRiByYkESYkQMoAfI0HVLOROLuwszvAiABgCgIhxLSwBTDhQiIOTF3Xc2bm6k6AoYkISGSlHLOSFDbLcgPnBlTYoxBgiEK6gDhgIGMnDjllFkYHGprQ5bngYASGGrQ1Z2IyRE9AFPKgSxE5m1bl1obhEsSYRJhBFTWvUJ9Wx8BzL2bBXbonbU7gmkQkEACwNHTeHvn7kODjyLSexdJQ8RWSsnpMJeMzBichPtktXZzIwo1dbBac84C4KWkXJgZCeXufO9aA/R6fe59U9NowSQkwkhoGOGIASPfAr7BpRSh/9sG2dudRoFsAeFhMeKS+3CddiGhYL6x5CUJ51Qmd+hpdJHJ3K/LVXsLAEQaQ5CcOKUsKZHkHJgGQiPARprAyF4IQsTeOzCHOwlN05zypIM0uFxaa0JAxCmllMoI5hnWLbNu1t+b+W8ebTf13qw3BTOCAKBw793AfNWuEFgEB0Gb901w5DC5OgZkYSAMCndvVZvCsuq69LY0SZgm4TyUw2Hg4VBV3SE0VF01wtEwMKAbJAZG4H0XHkMciMEDp5uyekDe7Zde3n2tccJISZKAkB8P+Xgsc8nWHSIER5I4kQgKlszDtggAyAiMGnFd2+u1rtW6YcoJKZAFbjLKMCPwufB5lvMsp4xzMkUv0qfkAggxGIPZMDWlXke2n3lYdxgBGiyYIBExEydJJZVhLfx2gwHsxLibndPD1cwtCDpzYhRCCUAgICRAAueIAZ+zN/kK/P/RkEHc59hmpmqtDUZfYebW2vBaT1Nxj5QyS8J/p2e0/5QIQcD0bzoxiM0sevPrZX3++ureTh8fp0Pua1x5tdZfvi7/+H/9uWoz9MDobs2tahzLIadySIfEcirbsm5L2662EQJ4N0kZE5Tocljw9en65fnyStvzLMeHD9+x2XZZW21ufQgInRCyyHk6Pt4dhI8Pj7WNmHUPBMmSp/n8UKa7VN2xteu6Xa7rmpvdTmNm/npdni/XaQZOOYaStzUzQ0QiTMM0EeYa2wiXDfe42TvNIoIDe9dtqxaOKEGISAYO3bv7sujz6/r8/Hq9rKbBLCWnw6HkkojY1Mc84zazAEYa0XvhYYvV1dqz6wXjKu6CmQFwVYzOqmz23scfjEbRTFeglCdQx9as9laXtav53hsMjGAiyQgOnGE6wnzAORERShYRDjemuHucVA/rmq9PTQO1A0aUBCWNPiGqwWEmJqi2gKf59KvsDHq19ety6b0HZgCJbXsVPBd5PM5e+MeMz4l7EXg8SC540evPl5/qT3769H3tb9lJQywy8BODQs173gbtNSjubeBAQDOn2vf1AnzMm76d7RwoxgYWCLhHakAQOt3igB2Hq3o0GGFsAW/RNBAOiBbQzPM0ne8f//Y//sf/5//r//3hu+9//Omn55fXWvtbcuT/8jQORJYgMbOkJOOQPSY1t+Y6jqmiyMAUkZm1bq12CicIBM6SKE3IgmAUxiAQRJTG1N4RzQ0iaK/3GAC76ugcDnaR+LBVEYngPkzhQB6HDGJimSIigrQ7sSJCTrmk/HYaQwhEI+oMTgiYOIQACZlJ0tbUrPVmXVdwj0jE02E6ne/OD/d35vrzp61rNWuAyAw5IweNZE1zba33rhjAQISMwODo5iMCqzdr2VnBAg3EwMbOE9E48TQnJA5r2qvWBc3Ph0OZj9NUkEproao5SeJvTqtwr71vvU7E7LrUDXu3qoJkZWLkIRrHHRSFhEgiEDCsnsI8PufT+TxNU6Q0Mo+IOSVGBHU0U4pIQsfD9Ktffey9k+BIm2Lm+/uHaaLjKT89zT///NPl+rLVzizHdBKREDDVrjXCJQkxwZ51wEjgnuX9KRlg4MQjxjwqxsBr9BF7761VJiwlJeHeNPUWrgA+URLJBLNwytMROS3b5hAYxrmUaZ6mOWcuZZY8IyVWuzF6OgI5wAB2jBGpug/J94DuJBYmNm3u5maU8jTNx+Pd6XSXUwGA1pqZt9Za+xYF4+61tVprzkAoU+JMRGYCUYQFd79666oQEIDiDORkQGa0xzREBBOVxMzs6B7Q1LoRUCKJIPPYYSIsBIHarXXr3bu668AlIox4QgRGVANGEATerdkDQj0ydd93F27q/9vKMSZNCJEy353muYiwzRPPk+REzuEWoQYQhJwF81ymkuZpZH44MQZhb/Z6XZ8vy2VrFgNwuWcaIiITYU5Z0nHCu2OeM4e268tXVQuvU0Y6zFmmucwB6brZ66JrVXRPhAgE4UxjyMhp6I8Bw9Jhmuc8vaN14x63grvo8BYZHd0UHEhNSBIXYiEmBKIQdzSPUPCw8cfePpzbMr4vku8XzNv/onvU2p6fX5blWmtl5nk+RECttauqGYt0PZfYv/HQA7y/AvuQHhl/mfws23KtVV+fts8/f/n66UkKOD7kOZVzbq333i6XZfvnrbYN0Fvv229rtb7m7dSPx3yc0sQpZ5mwMJOgIoJjaEhMPCdknO0qE7b2pfXLtlZVbTmcXr8+13UdVENhyfNUpulwPt59eJSSj2vdaq1WzQ0RyjQd7+7yQShtVQMbto7D2fEOFeHXtb5cNwURjb2IqdXcmTkxISJhVLO+00EGwgDhjWcZAYjdfGvdApB85AKOdIKt6eXani/b0+u2Vg3EnFOecpnyjrtQs90DE07AONIEERSih65uC8Yl48bkgIhK5ABKZpQNOX55tZiIGRMDZ6JULOC6KiwYauG9WXjEjSAyti5IJZU8F06kgeHR3UTb1jhLnvhwl+czti2shppudQUiJkBEM5GCOFVOgCzkc4Zz23xZv2yvl77Ukmk6u1Nt21NmlPtTnk7zfDwcZmUrCQ6nueTiKpdmf/n60/effmq97c9+hKr2pr1r7qwYhMjYUCKxMBCMCE5Td9/JjTvqPWKsL7e2JALswhffWfDjZLA3WvbmtrmF35LIhvTFHUanfEBTmFnKdLx/fPz4/W9++4d/+C//9X//b//t/uHj8fzPf/rXv/z4089vyTr/60CJmQdteJekRajq+58wAMcWRcSIEO42ErcsMEAIkwilgpyRCBk5eG8rIA8MJyBg2G6/AjRCx/GZBEQQ8YhYRWQYkHhCSQmRdW+NIyELcri3puHmo/36vgcMEOG9L3V7TZAYRYiD0HxIX4nJhYE5INTczMKMLdTGyba3ta7rtnZtSNjblhIihTvVUO2xbt16jNWvNQVARgoP62YY29aC2BEtbNlarY0jmCgnKZzGSaO2pa6v1lZCSoxT5pIYhhDaC/rpdDq+ZVo5hLqqq6FZdOsRDt6MAntvgowRg//iAMOdhETuY4AWY20g4og4ThONUBHhlDMxD59wODBhCM9TZuKuaqYO2NqmTYmCSO7v75m81cW0vl6vbj3MkNJYxffBxe1MudcoN0zAmyYmItSsd01kgDAsNbh3+NxVVRuOaXgSIQy3nTINwnmyUQyIlHk6nM7mrm0jyZxyylMqWfIMyB4RNrQoOuDGt+0i3EwtvIFImueZkfY+KuCbl41ZeMi5ASOid315eX16enq9vj7U7W3MF+6tbm1dGEA4MgIw7CZF8CHf6WZmasM1oWgBGtD2sGIfarokAnNOQcGujmqkTj4GxYwO0M1rdwuL8G3T2rQ1693NDQKFaSjWiSAQncBHCY++x6ThSOUhN7IIUw8zDKN3TJLRhQ8HEZ8Lnmc+lMTCJdOUQVKEYe9RezNDZgRgIRBGQsed2wattnXtL6+Xl8u6bBGUqHZisN5DLdTBgwRTpnJIuUhEbNu6Llf38IicU5nnUqYiWQ3VdGMXMiNnCgIEQGYUQabhYEKMIHDBUS69W8giEDzCIXxvTcfg8Zp7YFhQIAzX9a3H7IAjoHGnCoLHQGL/4nWzU9zkuLfVZmwE67pcr0vvXVXH8ni5XJCImUyPu27RR8qIv63AbyXLN9fGu5c8ffl6eVn/8i+fvv709fJ6PX6Y114ZGEpgCce+bmt7adfLa13Wy+ulR22+vcx5WsuM8yGfT6eHeTplngpNQUqhZApCp3w6yXSUXM8PDxP/aZL/Sf/69WX7efuybv315YurzflwPJ7z+Tjf353vTw+P948fHpnSha6URCBpOEKklPOO3HEHYJgiIh+O5TjRzT9iDlv3y9o1GstosbiaAiALRiCimcJqA5sNRJxLlkSEGIE2HDCAFlg1uo8hDjpAV1u3dl3b61KvW19bWOBIiJQsaTQxg8xHYJgjhuANO2vhq2tzWwE2ESuYRAoHgaI5GLDiYcIiIAPPBQCASMwp5+kwzeVwLMejoxy3vl63ZVq2tW3d1IIRIcy7OjgjlVIynqixabW21a3aFpfTFTLnI0qGqeg29RbYzb5clK8qEJnzNM05VEuV9Ejz7yQeYUnb65c//Y9/vr7+PB/w7u6wbU3tcnlZcolyekyZTg+HbqeoQARyOs7z6ZwOn1/q//nf//zzp89tTyoDd1+W9fXyKokQTZR7Z62tSyopJea3KZ26Qbh2NetuvfcbnhAg4v/H3p/82pKteYLQ16zGzHZzmtu5P3d/bcSLyIrMSBQUEmLGACGkYoIoqaSawYAZApTwPwBKJKRSDZgiMSkJJBgyolQSWaEkIyqSzIiM/vnz99z93nu6vbc1q/m+j8Gyvc/xF5GZQdUEpGc6fu65x8/dZ5vZsrW+9ft+TesfNVnYpcJZu++mqApVrK6ZwC0k3kTNFLS2JBwEJLDVrvTq9vVPfut3fvQbP/38+z/60U9+47MvvkAM3ebgwwHAiYDUqqr8XU0ywgs0CZGJ1JQdX1Q/rZtl6wZSm8s7NqWPw6aP8MzOeyA2RCBkb6SoK3i0Bp0hAphUkQrolImZEM5LyfrDolq1OWIgsQE2a9XVgBEJm+bRWqC5aZVSyvMCU6Uejw+PDx967qLv+643pJSKGFBwCkgkPqB3TRYgtabTaKWmcTrWUh4e70+n07IsRHA8Yq1LCM6AUtZSTCqZESNWURWJ3jtyhKhigFhFcy1zSVXrcTzkvDjALnjaDk6wFqt1fnq6y+OIUtkHq8Vqxdgo9uS422+6N69vvH/mXRmKoQCqQNaSRAyqWYV5HtGafZ5z3hOQqKhZS8pqRYUqgkExANN7wGU4MZGPrht6HwL7gIQEgMTkkBEdu1IoLbYs9bgccpZSyxDpzavhajeU5Rqk1pxzLpKrWG7RB428DMZgqKaIJllESi6pSH42CFBLKS9LJvZIa+a2WG6zt4qUWomQKBsAOAcoNs+1Ss2ZOSgxcuDQm9mw2ZrBeDJkB+iAAvrBXKxiUnNOc82zmhioNcrkOeS71Lyk5H3outjyB0xFzBCg7zpHgIgqktKMiEj+NI5ff/3L9x+/ub+/f/V2Ermci9RlzvPoQclHzwyKUuXsewfaElLVCAxNxKDWlaTYXOuas76PXGoMkckROBJ2algFRStwNYBFtCwZLGutOZWSa60iIiqCgIEcO3KMxgCEQKiEAthkHWhGCEiGhIYkTcuTMkgmLc9FjGrNWSt0QTe9DV57p8QQHHin3qERatVallIVQMhBrKRiJZugqCREq3k5ndLj4XA45VSCoVU9ERraLDVJygZCTAogVpMaJkHVMiVC6rd97FzXd967WlMp1aSSSWBBr7xm+LFj8g4IRPKilQDQatEyWU0vOrBmJiq1GWwhmBk2L6+2SzRFharKYEwI3jGimUKz41KltSozPSMta0wrnLdzq8LtjC/qypUSRADTnJZ5npZlmebp6fDg2O22A6I1OU3VKiqXkGZ4ge405wATsRd0Bfc7P/mH02n+ZHt//ORUq2yu+09/+smw68qc58MyPkzzYV7mRGjbff/60+svXn9yfbtzgbxjb7Hzm+1wNXT73g+EUGXOeZk3CQ0HP2x8twlRa77dv73Zf7a/+vr+aZrFz3OeTgdUG7rdsNltd/vtbrvdDNvtZr/ZI9I8znNNFUXAAIzJhdAhYdEECNHFsslX4fWbq3d9t+baxH7z2U/+vgIF3xHRZZFrLQDnyDMhmNWqJmbAaxHz/JACAtEqtUSiSz+jVJ2XPKUyp5yKihESe++8c+zYOxeCR8Baqog0JppzzcpixcRSgWmRUonAEzt2zgiK1WIiWrt+ePP6k08/+/ziDNv3w49/43e8j33sY9eHzWDoUpG05DQuKZVUVcQY0VRqKWpqxLEP++s+BNA8SV5yKm6I20/eDLfbuOWaxzfx1fjulDOWIkUSSmWz4HzfDdtN2G0pDlfSfaG2rxM8Dnc9yjx9b7Pvdvv+ar8j9OOoPuxfvf0iEqbrMB4/P9QJAXZdv4lDH4fHwxK2P//ke9/vzvel77e/+dN/gIi73abvA3tyTJ6aa4Vz5yJGpK4C1GYRmhvEuDalv1PEwHkKPGeempEpiIKoNjbqmv2npgZaLygJgQFzCDG+effpb/zW3//iRz96++7TN2/fbXdXInB9fb3MCQCHoYvdS3XV+WkJ8dWbz4iJkJo/mIHlUi+yl7W+aiEy3GCDlpWguLrGkWtMMiJc7ZuaesFEFBAdswHkJm6TCs1xiRrfcUUMvHOqMixLKVVNEdF7B9jipbDtkB2xNV9UUURUE5FydfvOnVVjfT/8xm/8PSklUoyh67sekOaURQ0cI5EhllKaLmaVX2GDgVhE5mlclqVZjIToYwwhOANckogAGANyO2VH7F1Dpc++rs13LbhqMi9TLZkRu+C3myFGzx5KXY5PD5IWhxh86Poh9pvYb4ijAhNScO6HP/zxMKxxEDF2X3z/J46563ui1ZUVFbRaLdqKmBbpSrg6+lzk5Ss8ZaZqzDx0fYzBEbngYhd9CC4EIm4bjLOWA2qtecnLUo6zLKmUWvro3r3eBw83t6/ffPL09u4+pcQcnHOEbACl5aW5VlnKWoxLKbV88YMfdd1qSeBj9/azHwNQN/RELNLiAtYFqJmsIKFz3nsXvGtewW1UEDOgQ+c59IYsoimlaXwiopurm8125/vesUMArSUOG4o9+O3raTJAAmImMK1Sc8lLWpzzN9e3u91V3w/Oe1GrJec815pN1bHrur4RmLpxVAxxs3/99rMf/Og3Xz77P/np7yJaF7rgvWdGw2bR0hrvq+jf1oVQ1GrVUmupVbVxz5EdusBd8C4wMyGTsdOG34qKSGPUrStcFSlVal13saoI6IkdE/NqjkptQ7Cu34BNt0PWEkzUUARKKSnPP/7pPxg223Yuu/3+9/7r/43tJnz++e2rm902dME5InAeQ0/MDEYplePhVKqEELqu3/R9iI7IENS0ACgRTnPub6bjKKLBwCExITBk01pyATL25DvsOg6MDhGqlCkT4Wa32ew2w2ZDTCWnkkvNmrPO2USgoWXQJhl39uZGbHv3vCyf/fCn8XxfiH23fW0AxgzkGvGuNnmFrJpdahtpH2KMznlEMJNaRaSUUqWKrkpQuNATX4qr1zn30vA1UzWfM/fXm+txmqZ5WVJK4zTdPj065rdv3t6+enV7exNjLFJV9busyueGi2i9uv3E+fj8f//8r/+ViORcpYqZsaM4BGJerfiLNGpsQ8d8cHEILjTL/qYlJGbH5AgZnzszTTTJjMREbReYc15yKlXVUNfBB0xncNK1KDp23JJNpKHnF/LQ6gO4WoagqdVaggtX22vvAgDktDzefZvmcbXrgEsNvTIV8OyFYOdnpsky8XyVzzcdXnKV7LwynRfFlYKNLfW4PUC4ehTaBUrD58sOq9NG82Wjy+9Y3djAiNj70PfDbrdzzgNAzunx4cOyzG31al4OaqCrHmBdxNd33rbwLbTSNYnAyvZBIg6efUv+lpqzVLH2ZkwBDA1o5Xm0u+WMIoAzgVrKskwqlR21+4OIIoDIPnQEoDWJVLF2H4mRmbiKjvMSQvf27ds2L6dlfv/+63kem0cUnBO/Gs137RbBhfZiq9nLalG+3kV72WyDl/uJM98LGsrY2DLrFy/VSJd72rIifIj9sOm6PoTog3fOm0HKOadcSiGizab3PvxKO0mkztOhltzWPyQ838VfVWJeWsSXiXodXe2BXNfPZ2acnWHYyxSgZyoQNpXS88BEQmwboot0Atcl9vIKz4MdrBGBzcy8D/1mx84DQErL+/dfT+Oppds3vH31k8ezur1ZxJ+v/uWN2LpA6EXP0uorOFM54AwnwZkSdfF7eCYcELVlvFWoK0cEEantt2qLSsYV0OZG+LT1Ucau66+urr0PAFBKfnq8S3mhVhpenCka/tzoDefj/P6+A3fDmdJEzT+lPd+E5z8uP/o8THUVN67TAhOG4BBBpNa6Co7WX9naRedp5wKNn3dJFrv+5vrWhwgAteTj411JCzHBqvqAlyPsPFOtiFt7gM7z2yV8hpp+WFVFKiI2Hw4kvoyMJkdYdVt2HmatTlsTkZqK8HLlwV5kB1zqWgNQ1ZxzrUVUu65/9eptiB0A5DR//PD1Mp3o8mbPT+WlJfCyiLHz06T2zLlosCK9GEaGF87Er3YW1oexXS8zW4XU8Hx5zg8RPv96eL695yex3d9+2Lx6+70QewCYp+kXv/hqnk4xOu8dn12WVo4VAkAT+onZOcyY+GwqfSZKQgtjlyoAgJerviIc68IP5ykfEVo33BCAHTc9ROvCnOdIaF31l/ST79JS1so7dsPV7ds2xkxrSaNKORN/8HkKfh5q7cKv4+zl5bXzVX65wv6tx+Xb7QfPRgeiLWTeVpt7RAzee+/d6pP5nRf/lRcyMOdCt9ldtBffIfn/+vj18evj18evj18fvz5+ffz/y/Fv8Qn+9fHr49fHr49fH78+fn38+vj/zcM9ffUXalZVc5UlF1FFJMcUL2Fxdgmiab2vMzDX2v8vWjAGZ5eUhp0iABIhGWJDXC+e1q2L8RKEsrPZ9dm7HS7ILQESIRMbQJWqLQZHzVSH7fbtp9+LfQ8A0zx99dXPj8fj2iM6w3gv4Up4iR//rV/hBVZ9AcZffuhsSoMrfr9ChGovgM3zb3kBwQGA0eUq2Rk3PL+umYnYbrv9/PPvD8MAANM0ffnll6fT6YwS/+2HnRHuJh30zg1dCI64pb5KPf+ii+3+KsoygCrNLfSMYCLgek9aeDR+94szLI7nftilNXJ5i+0PMwPzoRv2t+wCAFSRcZxzyaty6GzR811g8hyR+0JS1yKS9NyOIUAiIER+/p12CSiT82dZ2xzn+7R2bGEF0f/mRcTLzbp0dJ9bDwi42W7evH0Tu7jel5/97DSO7Z+cYdd1pP1dj2eodG3dtJPyzI7JMwE0EalUMUN03jMztbblejPODY/nK3H5/Rdo1aRKyamW0jiOBrDbbr74/g/aGJvn6auvfn46HVtQdjvxi5/P8/C9jPiXf33xDTjrIV+c2/MZrpf+uR/Wvru+6DMd+tyHhecH6EVL7twbaN9sUP+w2bx592mjX0gt6XSUnLCZNaE99+eeH7XLAF1/HTN559j51aWDGYjgIrYwEBEwWy/45YGH1uhYM7dAxGptRh3tiWxPDDABO3AeiBt1cuWfq4KISQUzOk83wA5i17RXUtM8vS95bLNgc4gStRaH2jgh2FzcAMwIiZiB2RxVWOOxEQANCMwBtIaGqramUiT2BmSGTV9CZ9GKGWhLTQJBrI6EyC6dKRFIi9UKZsgOQgdEl4e4ZTqtpgbM2y68JVpbyXff/mIaj7jmpTkgXkPP0V76cZ/HFD7PnWZmVmvNuVSp7abhi27c8/P0Yja9PFnPAxcMDHBVWJ/HnllrGMJ5zTJY3arX11EVs912+8X31zk5l3T/eLekCRmZ2VFk4LMDnKipVTUwZABSJQEAQnLsg+sYPaiJyaJZTQm59d8RtEoRrbrS2dZHHJCsEY6N0BBMEQzI1FSrNMaIGRCRZxdcULVpmXMpoC1B3QGimAEYkbU4qb4frq5ftXZSSsv7D99M89RsLxoB1FTWcIG1fb++jWbu14Z3a6zqufGFuGqX8DyHAJhqNdPzNMqNO2AGKsou9P3Gh9iUgHVVpAKANZ0yOweAogJNRErrfP8rt9kxdpH4rINx/+L/8n9IIg/L/M3T8cuvH05z8t5fbYbvvdpf910wYBHLVRuZgoiDR8+EKCJpyarqvGNiIBK1VEqqNbVUkpY07wdFTkWmlMZ5KilDNQcY2TESgDURWwUpVYDAeY9EKtBuGAE64j6GoR/M7PF0mlMqaqXUkpbf+ge/++//j/8nX/z4xwDw1Vdf/u//o//dP/1nf9B17JwjNANoUdSqgAAtQ5WgyRXPD0ubXA1MEdEcI7uVfyOqUk0EzIAQmpV5i4d0iNEhIlaFYlbEqpiKPq9Ja1+T1UCqAFhs6xOiiM1Jqq0BY4hYShlH+b3f+4f/s//pP/rpb/42AHz55Zf/+B//4z/8wz9s/m8vH9h2WuckyTb4qkfoPL+7ufrNz999ct1tIWOZlsMhpSzAFZwgVYWqxRDZd9nsMM5TynPRamjssLkiMQfHgbHZDARH3rN3zjN7R74ZvTE7x47Zrf+13Bk6W3aqabn99Ic//Xf/O7vbTwBgnOZ/+a/+/OtvP+SUpnkaT+M8zymnmovUKrWW0ga9w1aiEBKCgqkUkVqkmgmYRcLeu965LrjgyTGp1mWZl7TMyzwvyzSncZ5P07ykXMTMAJtxybnsgUuvnfAyz7Ws5dZtbrQiInLkPHtmx8y/87u/8z/8D/4Hn3//cwD42c9+9r/9X/9v/vm/+H8TgGNneDbde65vf3VufbE6v5AwNRGTmRE4hshuG+OrzXCz7W820VQ/Hse703x3Suri9e2rYbsN3nnvQgzBO3beccvBdn51Cj6/6lm5hQjj4fjh61883N0dj4dxLhXg93737/8v/pf/q9/67b8HAF999fP/+D/6x3/4B3+wGWLwgREdueh9C38hIjQAavTclUqBa25tWwro/JnOy9F6Witv60LdwHZ9ee2tIwKiAQGStmQ+VREBFTAlOFMUVzIMG1iLQG9MOzSQWpe0/Na/8w/+/f/wf/T9H/wYANLp8PM/+P3Tx2+cD64JykxNmmZVwQBxlQevU6dUAx367s3tq93VDW/3sNlB30M3QBwAGAAk13k8marz3vvgogeiNY0BDFSwZkiznk71+JQODzWNCMJkgZh8hGGA7RXevra4UWCoCjnhslCabDrV0wFrCc4TMzDA/ho+/SH0GwCYpvd/8S//k8cP/xxRxWw0OGU7paqKvfeDp11Uz1nlJGpVO3Zhs8XtplwNB6aUClZhMTbtVfYEnXdgupT05Jy/uvk0DrdKmyq0LAW0hFidE0SohZfJlSIGY8fH/XDsY3EU0Xvz4XCyX/ysPD2YiBuu8NMfQL+RecklW60sCkpJzKrybviHX3z6HwzdDwDg7tuv/q//p//4z/7FH5CL6AeOO/Qb8wMyEyphJZBVeo5kyEoM6JDYIZpKyfnpcPj62/eHw6nUagbEjMiXp+7FPqgZ1pmCGjabBkRoFYCBKZF634Y1GZiKMVGIjUyZGzEIFJkcGBlqKWVM5d/9vf/a//wf/aPf+q3fBoD7x7v/+3/6f/urr/+MB9oMu9vu0x72lJxqTe5YZE6nYlB5Y9bl5EZ0Grm7GV5/sv/hxl9D0ad8+mr+dtbU+WEbhp3v0cpx+nCan+ZlFhXnXXOMRg7Zglkk6bkgpoxQIeYsaXocp9MyjxnUQog3u/1n158uqf7xX/7l+48fpCwOaB/3QH4SUSoxSu9hS/STn/zWf+u//d9//fZ7APD+wzf/yf/5//inf/YnXdjEuHGhB5RSDqqZENdFBhxRV5WWRVNaSj6i1eAZzJY5VzGkltg2hNB1XXDBM7NqXeaj6OKcsvPObx13RFQLTKey3b358W/+/ddvP4tdLyKHw1NKCRAAlKT6EDa7ayQ3zhMSbrbbvh+CH5haxvWKFajB9d7/6NNu06/7DXf/V//ylNI3h8ef3z3+7OuHMZUuxtv91l7tpr4LFbiK5moiK8s/MDlCoFrLPCdVcd4zkQGJWpKyFFmaiSkSuhi6raCbUjnOy2Ea87JYNm/YOe/IITRvEKsmuVZFY/aAKI1OqOCJggubvtsNWzX7eHga5yWb5lSWaWTCeTy1MzmeDv/0n/3+7/9n/wUwoAdq8SHNeUQBAKg5QdNlH9jUKuc1xgARnAPfvBMNikCtUCtoK2IQmFvEC3iEzgEhZIWsUBTK+ScRgBhC29chKIAUAIDOgSMwhCowJyh6fg8AKQEIqKXj4dDezuFw+P3f//0/+qM/gr/zQQCf3Yb7H376k3e7TzvtZJwe75dpSUICDnxQJNFihOS7rPA0LYc5HZOMxZKxEIOL3vnOc3QUPQfvoucYfPSufR2ca5k3wbtGw3LOtc+rcYRDUBFJiFjz0t6YiBxP0/3joRUxx8NhHMdlWZZ5Tssyj/M4jrUIkkNosmVihrNkqaSSRAoZ9I52MWxj2HQhOCS0lObD8elwOp7G43GcTqfpOE6H0zQlyWdeIK3b15X42rTPraaxlUxKjth558g18xcmZnSeXauoAOy/9+/9dy/35Z/85//5H//JH//d78vf5egd3Pb+zXZ4ve1uN1FEv3k6ffM0f/2UCvHu5rbfbHxwzrkQvPeO2bFz3rW74NvafHm1FWYgPD49ff3zn3389v1hWeXueTodzmPsdDr+F//sn/6n/9kfXPXQRfJEnn0XQmTf7Ombexaci5hG/P1uEUNr0tO5iDkXMOvmtsFFze/eOdcqIUNUAG166BbRoiK1mAqBOgDn2CG3MCrHTs1qLS12uP3iWso8jUg4j2M7l7osj1/97OHLv/Cxc8yIhqvnRZMBnYmWzGZWajFQ9gD7fUZJUDHNOp20H3DY8mYnyGXJ0+H09HAnucSu2263+9ubbtiii0isaAhKpiAVSrJ5lMf7fHzQOhOIMPt+4OsbIoDd1kI0QFPFUmye9fRkh8fyeE81cwjoGRgADd9+vt4+GfP4L6enf2Imc7WPSR8WfVzUlLYh3HQo+7oJs9ljLjCmrUEXO7fflnpz6GOpxll5TpbLRsprgj44RTvJ8rGL7PBzkDfgd2pO56KaoebKlQjGkb79GsdRCJfr7an73lMPixiBG7B7NT7RV391fP8+EfPrd3bz2qIvuqQ6W85uLjbrPFdZsnt1o+9e/3tDBwAwj6e/+OM//MP/5/8DfaS4dcONG27c5sZ3MTggrChJS6m1iIIRK3lDD+Q8Ys35dDx++Pjxy69++fAwy3eemAvS/hLwO+N5BL8C8TCB99BFYEJsDiQCzNDCk5e0pGQ5g9o6oJmhCCSAQDoej+1FljT/1Vd/8Ud//ge4wavh+tPhsNGrchQxwW0WKGUsAlmHLHHO7oSskbs3u0/kLV0PCwneLQ9/dvjLydLr/tVtd5XjUOv4zeNfPxw/TPOoan2Mfei62HMczF8jblgGW1QOx5qn4ucpjcePT/NhrrOQUdf7V/urenM4jcs//xd/8vNf/DLNY2T3bv+WXfeUcsUl+jR4vQrOEf7ef3Nq5zLP45/9xR//v/7gn3R+13U732+QtJYn1YRAYKjKAB6pq8JL0mWZ8/yAlvrOgdrxmEQ5xF3shtiFvuu2m957DwilLOPpXnQJnfOxj9214w7NJMMy2dX1xPF2TMiOay2Hw6FUcb5jdowWY79NEcmflhkRN4n7joInJn9JSG2yfDP74m1omw0AcN3VbRpHOI1d6L743jtA3my6bRd2kSMQZsNiFhQBOfgVoQAFM64eyalpwy20KhP4EGK0WEqq0lLrmVkAK2HnnfZdIhavrOid99SyZU3AnIoTqaoXz39GCtF3MXYh9jF2sVOznYoLUcxSKUv02+2O3XomhNx1EQhCD94TUrNBa4U+MoJnbJ69tMauAzCamTQhT9sCEjIhM4JZEa1qVUBboiACEzCCd+SRPCEgkKozEICqloqIGBh5xujAMyKQAYgYAjpCABBTNqVgcu6iqBihziN08TkHipnXdHKA74YQtd6KGaIRGBIAECiaskEVeDqcTgP67ea6HwbLydOyVCPXb5r9vAKBIlegJLsxy/1U7if5ONVRqHAk56Oj6CgGDo7Di/IlrEWMC23tPJcvzjVIhhtCCiZckV84wzrnbq6uqmhbP5Z5XtKSUh7H0+Hx6eH+8e7j3TwtiA4BocHjDpxj78lAp3lUyQw4eLcf+m0Xh+gJNC2zmipgrroUTUVygwqd94aoZs3Wg2j1El/1DtggGEQgZsccgnfM7VSYvSPHyGBgYrVKqaVKuTgWEHPsO/gveyACMxF7JFJkJgwOrwf/9mp41XdXkQbmjqFUVddRV9wmnbIoUU2plOUyZRusNifM7M5d3xaRskY6EHnv87IsRZU9Qmn/0MXu0i5xRPt+eBVhv4t9N3hmRy64s4lZS7lraWt4FjHQBW56xqDOuoizqgy1zS1rEdkcjp1nJmIyNAErIqWuWZ2r9gcFCRjZM7VCmds7IVKzXIrUs1AeUWr1jjbDcDG7AzVNqZ5GSEWZEY0QrE3HSK1/jaK0DsLqA8ft0F3vcb+bnHs6HcenQyHmrtvur1Tl6e7u7sOH919/U9K8id2bN2++/+Mfv/rsi+H2ne83ig0gZ7IIMmBOfNzgNKXpJCUVhx3zBoyYgAmZABkUDZ0BalUpomKmUM9bKGwu+AAAEDx++oYG9HN195M+PiZQdhTBODrabfXda7ndIHqeF3x/7w/HOI/BytD7rffU7SIZHmUecyzyCi0GTQ4wYEJUlEzlEFjZ++1WRUquWVSIYJnkZ3+93H2E6OMXn4UffW/TdzhPoxZw7l2ZwocPv/jwQa9uBbAEqhsufShZbBaFJMdpPo71aWGmudbn56WLQz8Mxh3GwXXdsB+uXt/s9lfD4NmkpHEZT9M4LjkLkCAbBUBHilLqPKdpTFqALxZa1KC6tpdsesz2EDQxshgIMZipVG3JmzHQft8Ng+s7JrCylFqqqbHjLvZEkEteUprHOS9VqnkXdvutmN4/HN+8ur54ESGS953jvtSSFxMtczref7w3wlfupt/u+h0mW07wJEU5E4IqYVZaBk2s0Qd1XMEI+ba7ftPfRM9Pmhapo5QC6MkF6joaPMU+XG2vP4vdNYBPp+kR6vFpWTKcRj3OYkK7bjsEFyIMkbI8LWlGE0ZSM0QahiHGXhwtubIsAJrZ8gsOAzFvhn6zGcg8IhKZc+xdbxZUyNQBBDMvRsjEDmPos0eGPAxORZEy4Pbq9ot+2BAvMeLVtncIKadpApPeIA7bfbfZx37P6CQvUJFvhu3uVR8gLx/m5amUJFWd3w5x6DdXvhu87xwHAHIxIgI5D8ClSgEl1LOqFUzUpL4wkQGHsacizoXNMFzHLsQYowsEzgSKSEtgZcfsfBeJwOrS/DyIBYnUzgb+1gIU1+YYY82ohs0lEyu3DxLHKgBIwA7ZcUP9wJxZMKsiubmtOHPEXQgxBO9D8J6YwCyGAMRVlYhIagz+uXeP6L2jCN6j996ghQASMIIhMwRGz8QOCc4pRYwXpg62VDAgJmwpFoCKZkjnIgaACBxhaHv29ntbDxKB1ABFFRgoOIyMTKCKCqbcUB5QMzQlM++NYe2nCyhzRgfN9vfFuXgAcM6F8NKw5FeKGAQAAkNTFBWDw5juD/y4993GOY4+omgGxOBc9Bw88xoaDYpu6d2+C9tevK/3C4zglVzwHDwFz8FxWEsW8s6tfSXvLuXL2lJaK5hLEYNgSsQXwhMj9l3YbwckUtWyGVJpTtCbEDwALHMGJUDCZgrJ5Bz1fdjuBgA9Hlwti0PcxHC92w3Bk1lOcxGZU56WPC15XPKcaqkqhsjeo+NzHwPWfJRVansx3m5RZMG7EKJ3zjExO++8Y8fIplZSUalScy3PJlGE2MVAAM5xpKiAimBkZ6EiXlS5iCuDpzWo1lw7Iuecjx27AM47wo5t5+Gmo73DDZsHQ9VUTT36Le/f+VlgLiVXKc37TlYXr9Y2AoDGG1DV1pek88HMpZRcVZHJRykZwNhHfHlfQtj2uO2HcxGzGtKs5gOtgfqsTIWzXhRazXJRuLdGOZgRmF0EytgUy82CYQ0tq6aiWqtK85skdsGvqktZnyVqtdnafwIwdMy0bsRWCEhD8P75eRGR6TAe7w+h61qDjVtUJJF+J4gFzVRUgIMiLmofx3E+jh8eD8clVTMXwn67VamPdx8f7j/ef/woedn4UE9veifeWexC8IRIeiYKIhI57+LAYTA8Vs1QlKr2bUyYgRla44CQEgFeyCCoSIIIDfC6LDCkMeTYpbwQMBoxsCPoETySElXvMUTF0GVVBZ4SPjwis0PXm4uvul5ZSsVSnVhkjMDKFAIG5iUtE5qoptgF55DR0BQUDHBa8le/fPrlL3DT33jfH8bY9/H+TlKKFLZfv+8+3N9PaXoddDNIpMVDRlBGJqZJsU48HuTxVPtY6rk6Q0TngvOducix6zfDfr998+rq6uqq7xxqTTPOrB41JCwKAqzoxEgLmEheUsnFOR76DpkBQQEQaJXiA7yAgp2BVWkGbmZmJqYiUksX+Wo3bLcheACVWU0JvT9PWGhdjH2k3mEKJacafby93Sogge23/aVQJuJu2Gw2+6QlYsfAZjXJAojAgMyiUAumWWvVAIgGajKf5ke9t4k2+2GhBRSiCzvfb1yolrOkpFUQXIwRQ+/7Pgzebzbx5qq/if1ewaBmcFqgplSkgA/b4OjaDb0D4NkgPU3zcVrM6tB1vec+dsOmZ/ZRiippatbm+GKIASK2aVsrqFapuUHgCI4oAAWEXoylVAMjQu+MYu/IdZ0zcILk/O2rtz/supjre6bko2PTUtp+h5BCCDfD8Hq3vyGEdHpE0S5ufeRlujuO6TR9lJqI/GbzerPZEl2FGHzomx0howMDXa1JWtamro4mACDFhOCl2V0FAKIQuivm/dU+Ri9aoRbNIqpSsgoAe0Bo+VSqiqbsHBMCmCiaqpogoJpoUQRjBd+mNkJPIAjioBYdpUiptWjLs2RUInREfnXxQlXLpZqZY+Q28yMSYgurV4OzZ6uAVrS2fJxvDAEzsWv+E9jwbFppecQExMAOHRMAVrmw0tYIAtOVRAh2IRc3FGf1pmk7Ozojmee6lgCs2U4SgmfsPUdGR6QAuZhoM3ZWlcb/PTvFmElz12/ftBaeAf+2Ay+cNVwndTjTCUEUTlm+fkqej4dtfD34jpxwVJVpUp/zEKBjc1AZAAgd+b0L1rsipGiSMZsRQlv8W7RGI1bxOcLxwiR6QegFePZW+Fv5rXhx2gVbmTzM7LyLIbYyNXnfGrGGRkzeu2EYXt2+QhS0uszgEXZ9f72/cgSnp6fD4fDh48f7+/uHp6fDcRynZUm5VBEzolZjOkTSNpWpgD0PlLamtb6RO5dlK18WARGYsCWLqpaS55IXO7vcEmHvOSBcbzZdN4ihIDb2ZEshCCG4Vgk5do6DY99M1hpeguBDGIZN6HoXOkfma6LlpKd7ylPPymZqCI40uE1/tXn7GfXbLJKrZKm51lJKCz2udTUjqc0bNudSisha3zSv0taEMUBmJyKg1Tn/XIggOsfRu/bRYI+LpR5AYw49+2A0w4gGQZy5+2pr3FejujZvYoAz0w9gjSVvfL9c69z8VEUNycc+xjBsdrWWvCy5ipqSqWfHRCpCZ7rar9hG6Gpl+DzaSqn398f33z4Ou+0wdF0I3jtyiNTwjpWO2zj1Ldr1NKZjfTh98/Fxmu8en5a8EIJzHJw3tZSWUpLURCZVlukRHn7R7WK43d8QeyPXMAEsAkumCsH10u19HEVEJYmxICsSqkERhArSPH8JnDPH4gmRgFiRDcQA3fnpV61zejqMd49TOExdLoPZFmEH6ouUKZfDEpDYJn0a8zf38PVd+XCvSGEyNxkIWoyaxmJFAqcuwKavnasRVUt+Oo6PJ+znvutD15Fjb9AbB0N3yvXru/LlN7bf6LCNX72/Gpfy5Zf0+OjF6HCyj4+82fpXr+vrWwi0QF6kEMjAMDBwTTSfpuPjtN+ayqVL3zq6DORijLvd9tX1/vXVZrMJuDrGCnjE3gUOuVo2FKBUbKq5pqWUBUE2Q7cBQtf8wxQRfCRmBADn2IfIq+GQlqy5YEpVFYJ3xpitEljLCk9VJOfxODFSHzYOYZ5PqhqCd4R99A7QAYTAm94r4Njq4fPjwo53+6tXN2+lqAffh16DDNKbA7qCjOlwX04Pp+XphGVyQRikVjlx0g/1cP2wfbezK+cDbrqudwyYn5b7x/RQLDvvN2GzcXFDbvCbGF9tupsIkYsUSzmd5jxOeczL7NG9ev1u47tOzMppSqdxOZ7G0+lQqtLtfv/u+iaGuOQ6p0RsxFBYEMGx9/win8usitRSVBBQixXOjpCd64Lr2Q1g0QyqplITSEbLZIqEhg7dbnN1G4c3++tPiEo+vk95Mp1QJS9lnpZpysRu2PSMN9vhnWM75SJl8l5Kefx4/3BKxyonAHHkSh59iOwDuYDoiTpANIGz7gOaGahhq/8ZTK1mke9Yw7iqBkRd33cQr3Yb77mUVJMWaXWAqAFjQMKzrY4SglsjNy+pegAeFbFKFQVEZKbAZEiMVq1G1ICKWqXmXFSBGR0hEjKvVlfkiIDQEROib0XShbXVnJlURQxNGTEQgSPP9J19Vtv8rfA2wMplaa58cFZdXO5jsxFahVRngzwwABEABBEQvTyIqy5mrQYvNG44M28MCCA42gTuHAFAEchnHrcYNKUQrTHOYGbcPIxWJogx/dtLmH/DgUQKsIg9LfLN44ICTG4fnKmpqNTqi4GiOfNQHRoTusjbGFwflVFdzYd8yi0mtLGY8fzx8mhYw7/W3ehvHmaaS0kpEbOqNipvllpLaSvuWTaylmMNRfDBdV1Eq0zgCDrvo/cIltLy8PBw9/Hu6XAc53lJJRcpVau0tKHGv/DOBwQstYJUA1UzXFOYkBEaCN1qC8dMSCtHUMQAdYUYFFHBKpzbsQDgCPd9uNmET17f7PY3EKIxVwJDZOdCCH3fex/cSrulwC54amxoAKhSiTl2fdcN3TAENE5jfrh7KkepGB0TWhEpAojYbbo3794MN68rQBUtqxNprWc0ppyPlNKyLMuy5Jxr8/kXqaWMp5GQx/FUckEErTj0/aVlSUQh+Lge/uxGhhevSENs+6FzHa+otEoVYYViAK2163hFoVZJ4tqNWgUMaKKl1pTznJZcqwCFzg/DsN9fX13flpJLzrWUkuZsGn1ohqNr0DM8+3hBe0VbP16MMjLylcJSSZOlWh0rMa56IwREVDOpIqpGQKaz53yC+3F8GqfDOFYp0aFzxNTulAIooxGYqORlHp8exrsP5f69OgcuEHoED2KQM+QCjUXuHLIDqMBkTAZkVaEIWEFFlNrCR9E58h6QMMQGBr98XkTtOJW7p+XhpE/ZL+oBtp2/YeoCVhdS1uW4xFr4MC5TgcVsBilF9amAc0Nn11skxUjCMAYokYWxpCLjqT7cJQW7Md1REfPBA7EHc4J2muzhUT7eQU6227q//Nmu7/TP//L04UOdlyMAdV1+cyu3O7nZSKBiUmvxpVKtIVXXCmkprIIvT0iBkJ3vumEY9pthE73XAsuplixlqWnWmqlkpwIGBCRoBjrVLDWZFiINMTAHdO6c2oTsbM2Fbsx8axAbSsS0oEmtZsGhCSoCmtScMwizSS1a1XlqOJ+J1lrQgD07YheDR2o0s1xX1cFlsWTmbb+92lzroiAGDgxhuN2CQ9e5PJfxcBg/Ptnh6GSxqEgaDJALFMl1GWGkssV9h47nec5lfFjujulRtETPV5vNzg/B0PMQw4Y51mZFaKloriDoaBiGbdh99ub7Hcfl8eF0WMAgFxlznko1i12Mb26uY+juDicx61CLzU9FHWjLU78EDapayTXnii15z0y1CR4MPAGecfQmbJOCULxTYlJwzm27/l2/feX7qHURyTmlWsFUy1yXuc6LhAAAnikye6Ksmms5FeaUl+Pxw5QWcsBEFWtexmn86EKH6M0w9jfsSA1fRCStPeu2eJqpSK6C30VipCLiMAxE2KJswBjVW6mVSiN5O9+yctVaVntL6AUQIVJFZnLMAU0kpVxrNQNAZudULecstQYEj0JatZaUq4AjZAQgUzR2ZmhmzMwXLYwjRG3UEbMitb1yFUWk6D0wO4AQ/MXYGAxA27kC0dlLmpDat6ChLW1HZ4BgdHY8bNm22CJMDRSkmpiJWKP0MqNj4IvYS5syqM3kqtAEC0gEgXEIHB2JqMiz66xqmxbROVqT2RvIAaCooCy1On5GNv5NNcF5w3ae1M/aZAMDrIZFYM46pjrOhUVRC6igCTOIUAYoho4pIDkX+82ujxseCEKa8oNqLqiE2s6VLh/0/PXzsSI2jXGwrnoXxcBl4a8qp2l8Oh299wjYkIMlL+M4n46neZpbH2hl3K57fgAw1SI1p3mseQl9R2jHp4fHh4evvvrqcDwQETQTf3bEnkjJABC9CyGEEGJLhBRrSsFzEWvAzMGHplRgZmqomCoaCIqJGq9aQWYMnoK79KDAM73Z9d9/c/3Z55+8+uTT4eYVdXGqqgDsfAix73vvfcMPHaFDZAJGJEKpMk5TLsUAnMNN5zsCsjQxHEVUxftACEVrVVmqoRQlw8COmQ29vhQ6NSHQmoFZSmlFTClFzwCjijw+PHz5s599eP9hHE/z7ETi1X57oVghYoyhG/qu72IXz7s0W6NUAAFNFUS0SPOlBTMyQ2yTAioROAbn0CMhN57CGbF5CdMZVJUlpyWllJICuOg3m+2r12/evHn3+tWblLNUqaXeL0tOpYbqiS7e1gjnknktZ5CwbYEcnm2uXAxXn3zvcPeUch1TTnMyqYQaPfSRo2MiUtUlp6JqjqFW0FJAx5yWXBAxhBh6DtE57wGw5CpVUAW0qpSKXKos4zjdvR8QfNcjDwYDGqNmLbnmXMosVoCVPbqO2DECSjXD2ox5sRQoBVqccYjkgWNvaDWrGV7W/SJ4f3Tf3PPD0SaxGoIbtrvdq02/HaIFzCinOR3TaVjS4r1ur8pM03HKx2V2T/Dqett3fuh6hqlMR0hg3k9S39/Xh3t9esBuoP4atgxiVAqSiGLOi41PdTrgeAAUeP8e/+RfOUD705+5b96Pp8Mvb7b6uz+t1126CcvOpWCqgqm6Obsp88OCcwVFDMHH4C+JOQYgCOjDZrffX11th96rTPcfJilSstSsNasWFTVAIMcukKdiAppUE6J4hz6i84hEw9BfXV+H4KosphUQSsnTOIuJZ++iZ6Y55lpKsuK5CTJMSp1OIoH73hFRH2MI3rkAAOxiFZvmwlm2Q98Hv+k6ZgazaV4eD6fD6VjPiSKMPLhh57clp6UuU1nA4XCzc94jwpKe6tODPd35ungokM15HLrQBQqhIM91sgyopR+rfFU+QLdM9pRkRM1D6F/1wy7sVBAgArtkda5HJQBSIcPoNvv97mb3bvfmR6++gEX++ul0TKbVgXXMlbgIrNa8jnATe0DnJC6cx8VICjB4fgZiVG2Z87KUru+88y62NggSGXABSkBCSgEMAEpVtOq8snOGAWgTtrdhs1E6lfJQllNZCoUo2tLEIBVwDoiRqdZ6SHI6Td+k6T5kEBHQEn3s+hvnApgwQ5qejghaVUWIPFFoSmIDwEbJRzgLPrxWyZql2tlFAwDAqVYEdD5QyxUVbRiyARpiw8CDJ+dITNEALxEDAI6bBJ2YjAFVUFUArHXovWNTw7pma3UO++CDFySRqkWEsXpCp6iIalBUTYw8IBIbMIIiGoAiqGFzWm9KaL+CiezaF+dDBVQAGtQLKxIDK30WUBERGA3IVjHF+nHuXbeyxFoKz4rHIAITOF5FptB63I3ECG1yOhuMrD4G6zxOKG24KEDLz2FC7yh6R9w2tmYIKoqgUqtj+q+ExTRNP4AoiFkVrVUyKtSMIJ5QCaqsqlkBhxQKdUm57X0C4TZyEpoUFIEQCPAMyazirLPO/P+791VK+fDw8PX795th8OzMrOQyL9M0TuNpmudF6nf2og3sUdV5mfI8HQ+PVvMuBqxhmabj8bgss4gQc0uhYueccy1Vro2N9qGmhbCeS72VnoLknOu8d8xrPSgqCi1+3NCYlZAdO/KE4DX7EPwz4ZpwE/hmG2+vtm9e315/+gn1w2kpAuB89CF0IbLjBh20vDluEBBiyQUAYBqnaawgJCEQM2llcATGxN4BgmSopkXBmSgqEqA7y0rtnAC5jsT2J9Ral2VJOdda180LIph1MY7HY5pnkwKmIq7rwneRmNB10UfP3p2t8Q1Wgx4Ts6paxFKRFg9FwETOO9/6JGKiqiJg0PJFmv/FmmCw1lKqUmvOZU4p5SKi5Jz3cRiGm6ub169evX79eknL08P94fHh8OhKLe0RW9WDz94hrYYhapp4EnpBdol99+kPfkACp2k5Hcfj0zHPU82zal6K5FRaSEAxKWClFKvE6o3XwGvEpkhzGBx1HgGNiYpAFRBEBCXMIsdxfP/NL2uaNttN11938a2jzjRrzbUsIglMiMAFcpHItXgeNGjkaAQjEDImcETOEbSgTTP5DnHHjBbxi8QKhC6EjmNn0Zfoc4xMZqVqyjjPvghxR70vOyZ181FTMTzM2p20SvGQ65TQdMn+NNvPvpGPdzye6OrKrl/rZhDyCiiqWYCENmjhaoOv9xjDnrR7+MhF7TRuqwL72vV56CFQylM9PdXcUSV/WnjMMOV6KozBdUMUhK6LlzFmAMWsAqDzRGy1ppLKdLSSGVtMUxIpKoLI7COFvqkkoGaE6pyFgD6Q88REfe+v9oMPfhqlViBmQkxLbu1FAWkT/tB5j0hIBcEzoRIjMhIjOSY1RaSyqkMZwNVaaq2MhQwxkpnlnI7H6TTVVMplbkJAD84ppVpyTYkSsdsgOXWliE3CObk6Q12K1mxYg2M2xwJZQGpJmktX1LTqNE0aR3EnDjLEsOPNwF3gLhmIOQNTq2LVWixPcNvtPgz+k+HNbbfvmU75kI6HfBpBjcWROYerFcJcEhJVJUKOLgQXgVgB1KnR8yxraqVoXsRxFee8NROKFqA0GyoAA7R8swJc0NQ5ZM+KkbxDFrUxzXd5/ihlBhUARvTkPHtwRYgZrdY6ztNSy9M03+floVZA9Ehd5682m0/YdyrJZNR6WsZ7KVAqCQ59QXIdcUBiOOvQ8ExxLGkaD/eb0ImUyyPjwFrVQiJWawYQYhSxLKCG7JxDjK2IAUBFdC3GTQGgofFIrQaoYEYO2dDqaoXHiEMM0fNstSJe792C/lSpzFUUVosfaF0rKUkKZHEOQuBO0fHKKWx4NZNzbbPdFvuzE9B54VeFWrVWaCxdPotq224SoPVJgMjwHL92AQ0AQFaaJBjACmetPiLNiAwZUbR9v4Fd2NJfgYAJEVCEqsJczBA8YWPNiZkoqBgIMGEfXB89cXOagOZ+QaS1ALvvIDG4VkZtcL1Y5J/hF2t2awQIQO0vLVOtSSLaOUnNDBqiR4TaWsocjPsQ+wV4OiwiSxbLYh3DVe80SXpBe3n2U8PvmMCdv3HpK7WrR3C5oOe3nFL+6pe//Ouf/+L2+nrTD945FVnmaR6XcZzSklUN8fyEISJi8xt4fHwcDw/3dx+gZm82xK6Uqqr7/VXfD0V1SWkVGZcijtfELkYmbLI0BmOANTbeIRMF54NzwTEBrQ55LS2ymQpR46L6EJ13XgOzla6Ll04fgjEqozIBO2LvyXliNTN/Zhe2HMfGWmpRq01H1nabWvN4eEgoOy790DtQ7yx0DiyQ99W0AlVDQVWQJhFuC/iqEWpOLLCOzvZ7aq2lZtGCa1eDmk9hg6TCSmJkhDNR5XylffQhenaE1LwQ2+BvxlRaqmTRVHQpknMFo+Bj791mt2HmlOdcUmlOesyqDDGiQ3YOiWX1NdRaS0q59btEjJCdi9F3Q7fZbrbb7bbvOyTbbvrtZth03Szq2DNQU/WcvQpxDe1rsgEDunj/AABANww//u3f+PTdmynJdJzH+8fTw8Pjw8Pjw8fHjx+fTo/LNBpY3PaV4GmZ0dnODdFxcMAKqaqZVCM0Q6tMiA49EBKCgIIAYlK9P52WZXl//+HmZvPq1btP3nSuI5FSa5FaQCobIBE7ZYfEgIzEDn2AEJCARMxjlWQFjQ3BgLWVxkB4IW0hIPvYb7Z+COY67NToSco8T0ElEGhJU1pqKmAYohs69ltCohBZoOoxlfRxDvDgcYkAYJBLeniCL7/lh4euJPdqrrc3p8HlcM3ENi4oFH1/c7W9/ns/sde9qZJUzhXA6HtvXn//83fb3XDVTzfxq1q//vkvxsMIwz5ghFkhSU0yCnSbq46HwIfQDc8qSzXLUuZSUinzvOgyU8k6HT3qbjM4Aiu5pCWnZAA+DhSq+VwMTbIjjQGByDt0Dp2nGMgxmMo855JrFxkgeOor4DzOtc4GFjz3fdgOXS11waIlmHNd5BicD2imSUouksvUso0QPWAuOT/maRzTECMSpVqP85Qr2IsJGRQga5mXw/R4kkl79GL5KalozWqnunGIHR2PelpkLM45OikMs4AtxI47h1WNVIvqfTWq2Nnuyl99enPt9iyxVliUCgCqIBmSEqPzGNywCVdXtPv+8BaW5cuv//KXX/71+6+/mg4n7rwH4KzOLMTISA/T8SnNrIEhBHKx8oa7SlJIE1V94SiiFXJShGRmgN4HB8SgIKqIMyI6Zu88kWHICOacdz6C78iblG+XVHR5kOWAMjdPMXRDjNz1w+KZwKTO0/hRqoke0/IkeUF1zvcuXsfhbbd7R9zVMklmLXNJy7R8PC5wTKHfpq6/CnHrfSCkdU1DBCMxXKb7w93Pne1K+elzEbMqSoCq1mXJABKiR0NDRGJ2jhGYiRnWRKqGJ7c6ltpCCy0EGgjIERnB6p9akSkGDsCoqAw1UMIwFQBMUi0Qd8EPwTliFc2iIC2dXaRWBmv7LlhRHzYzQjNDJjS11t+BF4cKmJyt7RgBQcT0TAs0MzNUVQTQRn1YzS1W+QAggKEBKDwLuM4cYdO2KJ3n+nO/BFeGkEGtkqvNoAZgDouanFM+HbNjGqLfDX3feUIzhVpNRMRErdJFYflf/jijT2d2gqhUMDUjZg4dOzTJxdAMRRAqguiSUk65lMqOhu3QeQrVxNbUu2cABp7BmL87G6YdReTheHh/f6cGOdcYPKqlZU5TWuZUSjVrVQucG1JgZimleToeHj88PT6w1sF7UnXcmDFgBss8pZxzrS18OIbQNAtNMm1qokrN01iNiIJzwXvPzl+CBM9lgCG2+PGmT/POxRiiD2Deow2bZx4JAGCrmxmb96itnm+NB7LCD6vbJZqaZqkqNVVJKeflNB4epsN950x3AQOQZ+cwdAFQuYum6jsNkDvVGBxoLWmWotCE3woqtdaaclLVpqPuYgRCs9q6mi2ZF/GcQEcNI3HUSATw8kSQnWPvGtHpLEFvlkUqUnOpqchSNRXJuRKxD6Hr46s3t12M0zJO4+l4wpSWKgLVgFgBBQhRS20M5FpKSTnnnGsppuAZ0VmtTaDUohKzSkVE71wM0Yp49nxJYVzr6JVuTGfaTUN8LufCzm1fX2+3XS6QxpRv9tPj9ePDzceP+67bkIulfpNyUvLmAEEMa63VVwmRvCMDkNWIzwzUmg0poJmYAjAqQEWYa5mm5TQf5/wkppvNrWMGJTv3o9HaSGBuTU7H4NmCN++BAByDZnRszXQHFEiBmSBi1z23xQldYN85NDY2F7JonYSKRKItmM3zmHOtwOwRAzI7bz56g05qylXmnCbLM2Pu2YnA6VQ/PuAvv3VPRyc1KMCHD/BqkFd9DtGW7ISjG95eXX/27/y0m95iSXWa69NRpyKVbbga3n36etNnSJuawt1k9wt2p+oHoc6MsGolJ33nPTZ3oUtrElR1SuUwzeF4siIslUumPA/eDTE6RyDNlDepGihSEeWlICKAx9K5dSA7xzEG71ytpblmgJGupkVMwABspgBKxMPQe6Z5WqSY9w7YYhdiICRTFSSRXJelIGKMEQHUGrVJEmjOhkhVZc5FvmtNQ4iBvXOuci2YyHkA06x1qXmuttSOGbqwJAcFqnVCYQIvIppnJPEILgAmRamaBME4kFTKDFMV2CceMMcgHgGFUF3rzat65666q2vaRcW74+Offfmnv/jyr+24eKEeI5jDqlYETCpCqUYqkSiiQwVvFMArcCp5ruliFWEKJWtJ4lilWEmqUgXE2pLLxIzgmcwxApG0bFYiMBCVUZeDlaVORysLkSGhSCIKIXqmANVrSaUcx9OYkppNaZmtFjQCBHYkSqUoii4LlAWtQC2S9aSMp9z5Y/Zh78M2+I4QW0AsIKhhFVum+9PTX2/Du5zz5dY4570ZmjkVGVM2k71zwbFzToM2vUBb6hvXDrU1qWlVylgzq5PGjj3Hb0o1qbUa+E3vvXOoBIoIXl0VoBiWaVwc8qvtZttFZqqlzkxS1JF3q+5JCAywoS/Y3C9UV9aYoQEyr2XO5bC1ADFsVl1mSApMANZcR00Nmu3mqhG15gcL6Joz2rqINkESGFQBAEBQ5oa+NOQDCMGQCKD5/4maqRaxWaooFQdilkUB0DMF56Lj7RBv9kMfPZhW0VIk1VqkzBXFYPX3++7xAip6Xn2eT9ZgvQ+N+tRwN0ZiNqRUhRgcee66uL8OjHk5acmlipV5zLUa5FxTyjnNfXQ+sOs6JnSw9iFfUmBesmIunN8LJLMKlF4eL95lUZtrHZeFkHJ2ZFBSKksuuZho87YjBYDGWTKpNefp492Hp/sP8+kYCfIywbDZ7bYIdPf4OC/L48PjYRxzzmrmHfddbK5qwTsTnZelaS3RDAEc06bvQoirNFYMwXg9DVe02jyLGAEx8+pOFD0iRu+22x2feSStJ6mEyq4ilWpEatr6r4aq2hoI58tiImmajqfj/eE0nk5lOWk+QR77bfSWPVZGUk9hiODJx+gMgbtQy0aUYo9apuPTVFDMeRdAtZQ0TePj41Mp2Tm33+/evns3bAdEJTJR05aUTtboWqtxLhIgr56XL4oYQkZyjbDeHHqbeqeqSdWSS8o1i5UqtYpzyEyb3fC9zz652u+WZXl6evz48ePD48PpeMolV0lLri4VVV3SkpZUVz24qBmooiGD5WJS0YVuu7/3wZuJiizTpFU8ewhdU7wjGBKsVPJWs6z0GBQ0ldZMePEsaDUrRsgdbLZ+4/evbvafvnv72WdffPXVL/7sj//47v6DOnIeXdiJLHn8mOey6XoXAhEKknmCVYCHTGxqWbLWQmbEZI7UWAimkuvTEzHtr26YuY83znUOUIuYohoieKJAzmNwHLh6ErpIClFXvZ+RGbCRZ9/1uNnihatE4AOgk2UqkjCKQwgmHVIg3FexZVpyyRiNh2qxGqDMqlVAEtnCOFVIRbUIKmDK+PRE9094HHGaoQp0I9zf4+Ha5G2BSBU7oSvwn11f/cZn1zsnXnNNSY6jPh7m948PHMPnP/hsu/dF3t3dvf3zv4oPjz+z48OwTW/eYjdYlURglDhVPy+SEl/oCqJ6nNLdw9HUTT6wSADrEHBDpZpnBGQANkUVkZK11qJaCSm4ANg7IOACyMhdNzD78bSYgXcheFSFWotIQYRh6JE2RBADD4MH1cQVqSKzEYBjZQQQBWi1b065iohUZFJTICIHIjYVBVUkVG1P9CUaA5jddrvf3+w7jnMeyYGrRMWBYU0Fcu2d56EXEfBGMijHPjBrrljUrGJQZVZxWh0IG3IK5a7+4v7jx93jzSdXu3fX8d01RxRWJVQgFMhSepObPkbRrz98+Sd/9Sf/9M/++dPd/Q/6t/vtdtsPc16sQF6SWUUfAkXvOXQhoudElJnFa4VR53EcmzdKKy7Tkkuu261j9jXBMtUpVzAL0XXRx8CilmpxDMTAHAjNpC75SYGcE5BclgVNOKCazpOgS0zXhOBcrrKkVHNWZEYosggCoJpYsTou+W6ZxCweRy15RlNEICfgZi13okuubNbIMShVmtOdmtUqtYyaP767olLrcxHjYydiuVgRWYogKCAxMzpWZVWHquf4A2urNyIaOQBcHaNFEZQRgdkI0VBYShNJgAiaIyCgQAzklBwADt4dPZPi1RD7GIgpF4SaM0ITEgGcPe+MoNlWNOcHbnslayOP//UEjRewOVCzvmqeZ/BCBtx8qsWgeaef20nnnzAFACWD5r1rsKYVNULhulNshVQzW1C1AmpgVa11orZ9vNoOuz72Pgy932yiI1KRVGTOBXKyjETcKLL/VdAYNSBEZorRb7dxGylCjo66ELfb7dXtDZlWyZJTLaXUkiBnoyJQSi1LAdBaC5lfQbe2ejwTe6nBMP/aq/xvPAxRDIpIrhUASCGXkkvJJZs0NsUlsQfMJC3L6XR4fHg4Pj3WZUTvcs61lib2mqfpeDw8HQ6neVZV730Mw9B1LWMIwNKyiNRaCwEE79iRi3E3DM45SUVqBTAmCt4xOyPGCimBGXjfJMe+i77rOiYWV7vuGYmxFmSzRs60ytUMLg4G7ZIYAJhIkZTG8fj4cP9w/+HhcZpGkNQ7ve7dtu8ctwUTXYy7m1tRJSRVCJ0MKsVUkQxtPB3uHpcxCRipSq1pGqfHp6dSCjNfXe2XtNy8uhk2vQ8BjQkBwc7ZQWs1uXKz9eWqD/Bcdf7K359hxrW9pCs9HUG9o82mv729qSJ93zUAcp7TOC+1FjMgJlNtWhVTWbd+rWAykKoiRWyEB+diqCbzsjDR8TSVWgHw7KjTqmQ4e96sAwRWJRupEL2kw6lBrVAKArKZYyOPQkYYOFwbSp5P3Sac0jJrLWg5S0VqHLUQGB1Vo8pUDaR57jgwhSKmAkzoiNQhG6KgFkhJj6f54em+G3ZuuPLRY3aWSxJLJWeGGsimMXYjuI1xVGcK3OYLYAZ2QAymRojecbfBfoPPY8wESoVUAYqQTAbGGYAcQaVabEqYC7IDKSpjEinTMUtObKnjNPhEXdGopWKp/CRoFSWDCRJZDNZvMMSAaCWXlKBKZzSY9UTdEGPPEQRLgWFvvp+KVW08M9eFoUsKw8PpKeM4ubo8dosIFVNldtU4F5dzVI1NVAEAZlCqLameTmOlxFIjkngXnUuleodtl63Wot4qgIlUI2SOgVxFVMRcpRYpparikgohdp1DRNUiUgyMGdn55rfpHCJhbVI+VSBEQ0UoqmCCps5zDM57NjRiJEcEjGyGIiBVFAGCIwfEKC9XF+fc1dX1m7fvjpuRUpSqOLtgvSZlnol58JG8OBAfwEmfMXiHJGYWq0L1QZlF1fJS56M38bGDitNSptMyT3k3pmvVQfd+FziyApia5pozK4/HufzpV3/6J1/92YfHj1YqXZPfeGS0CmAo1RZNDOajd8wcmYlZiYk9erZgDZZ/4VWJCMzkPTNxLZoWncZspqAWiDg4RjQVRWs9aAIwFc25qqEHk5KXhKBEDKwmikSgkZiIMnMl0FplScW0skIMPvoOnU81Wx7ZBdGa5jLNBaEyQ+wQtJY8ppKXZKUggFOFWrRhbGZapYIuDKc0j6bPQJnrhl1K6TSfppRLNe+48QykViVS71ABkcxMqxCZ80TklBgQGQjUTBYAZfKEDgwrOFBQxIqgaIuIArAhEnqyXeDo+qvOj9FpUY/MDMjGpolVUM1UgahJIktVaL0NdgGaw9wKEBgi/Y1ldeUCYltsUFdDsPMezp4XXlRoxZCCXhximlj6vBQ1qq+pAVBRNFzlx9BIxs1GA/HiY9HWYEUoZjmrZx767u3N9Q8/e/f6aheamBdBVHKuU0owJUHOAkzRUXAUXq4xL5LM7EKEgRcFGDQDLbssPgroguP9pn/3+vrthr0snaPtZrvf7Xb7fZqn0+FDrVlKycUWdclcJW9A4NiYilQsFYmY0L6Dwvytx4UTc6EmrIjQizWwfZuIHbkAzEYExGpa1FItqRQTcIaOgIgdsmcqpY7Hw9Pjx/HwuEyj1uQRisiS0uHwVHK9//Dx4f7+OB5TLkTcd912u7va7XzwpjbP8yjjOE4ppWHot10McSWHmMicM4AiQ/A09BHZVZWiBihM0Hdh08foXXR+6HpiLlScezZUNLMsmtXOUicDAOLzknuG0RG0lDQdHg/3d3cf3j8+3D8ejyJl6Fw/bD959/b19S6EIOYIOHZxf/MakdKylKaRNjWEJHJKZZnH919/8+3Hp3FeUilg1uxhRAQAur778PHjm3dvvvjss9tXrzebrfdsQNAS785C6SbpuYBn53Vfa601V+9U1WANPAJVRSDHHJwTBbG6ypLBpJZaci0VkbabnsmlJY/TcvfwVKudpqWU2hhr2O67u/jOEQFo1bLkUiUvS3rQpdbH0+n+8TT0HajmXLOoqTogIGQH7FbrmjM37myggCiCzPAMwppCEpwrG5oI1DJN4+H0WKr4EIfBfvzjT29ebb+5e/jm/uHb+7tlyT50Qxdi5110pmxC1UBEUiqGNQYGpSpk5sBMgIwAWB0YIJv2Ut3DcequT1cRaR91tjzPJ5HjcZTjFKZwpWVbpSsY9kb9Fl0H5AAcuYguGDoxQ2TiSHFDoYfnFoyMaZrLrL5XdeOJSiUhoiABJil2SloKM7CNWPIkRWpZPJWrjW539bN9veokOKtCTyf3y285P8KJdArqO93f6PfeuB9+b/PqyuV00spKG4JYpzTKvcKTw0i0UQu54jGVAphT+frrD8Opv3m9j2H44osf9lv69oOOs57mp6VYcJFdxMRVvIhH7F50LYlcQHJStUAupSoCaZgXNy+zIwUQsZYwqGjQ/EUZkFQRtQJnNUk1zZqqeh/JO+99rQXAUsqqtakMmwSFCEw1Z0nzMk5TWmpz3Sy1AihY9ox97Fe/A1UXPBKKQC7VMImpMySEvqNaaE5C/MyKcd7f3N4W/yMsw9X8cDiO+ZCRpeoCJbtgV4G94JbrMBfOMIk0HYkfgiBP3C3ostg8j/P9Nx1r/+5N3w2Gw2kcv/7Lb95/+/7NdHo9v339wzfODWYqRfKYWPOHSR6fnv7gL/7oF++/HThcXe93+w6jTbKMlgydgZ9rZsg7U2ZAD0BIHp3n4GIng+PgeHu5L0w4bOJmF0KHzFYLaoWaBFHRzBH1XQiBRdFAiajRPlUFJaOaka/VlqWYFSTvYwjBO08eKimIFfYYN9u0yOn9Y05lEyn6/vb6htg9HmfH9d2VL+qmeVxSkubTZg4FVRRqcYZAICJ6kd6AIghjJa4BxbvvsJUcu6Apn+Z5nCZAcn61xGm9f2Ze/dUM1oHS2joNhCBCkHNg5UpgAcLgvYJV0GpSDcgAgVbkmigydoS9QU65pCoqyEQoRIqkItWMwUhUcylqSoTOeWokl5XwgSvn71eO1vDCNhfYeed21ja8OBoHVnS1eHlWPzyzaO3yj8yg2fm1Hsv6v00RCG0NmARAJmq9N0Lq++56u/n09e1nb15/8cnr6+0GEU1BTEutc8puDgJODXMxz4nJI/4KL+ZXW0svThIAvtOwRTAE9YSbGK82m9vr/e3WYfZDdLc311f7fT8MpyM93vfHo8tLKiJLtaQgq6aE2Tv2jj0zMCPpGo1zESd9B9n62w48f/yNw6BdIrGVJCQqU1qmeZynBQS8k+hC8KEJ96WUZZrmcSzLIiWL1OrdSrJqvYk1qFoRoaUFgKqKmDoRSSktS6q1IlHX9fvdZhg67z0hlZw0Z1QBUGZkJiCTUlQLM7D3u02/3262m812t9vudoS8+BS7Z6IiIpIPHKMPkb0HAG3OHxfJTxt6ojXN4/Hp6f7+4eOH0+FJaonR3e63n7x5/eknb/ebjZRqAEbMvttd3TrnxtMpp9T80sWUclqKoJSyTOPh4ePDYU7J1qZEI9/oNE/jOM7zbFVzqm/evNlud8yOyQGRtegga+nK9qvFpYGISBWtIkB2Dv8+V+TIzJ6tsoqQESGA1prm5Xg8nk4nomYASgYoYqnIvORc6hrf4P25W4etmAkhoEHxaVmWaZ7zvKQqU85zqtvtEL0n01IrgRkZMvLq2rJCgnCpqgEEgfiyJwGARrqsMGUyFJFal9Pp+O3Hb3JN+/2+D/126wE2p2X2B0pLmufFbYi9Pyd/skOqis0CoJlqEiKgN6QiBasFleAoRiKknLlWOByn7nB8laagmYgz2iGnu+NpSU98wseSN1PqHqfd7en69af9/pa7TWMpGTlABjLggC6i64DjpYgxs1qqVOXo1Hk1KVUqVdVlzFKzzdNSktoIJes8ikmJoey2wnsdYt3F8mooQy9ZEEQfnPfoHFkI1UfdX+v+yroOVd3TU2Ac3Oa6c1dUuMiyLCknyBpS4SVpqQjAqlbHaVhmcrTZh6HfX8nrp+M343w/TicDjIG8d8wewKsyQLwslkQUfexiFxBca7aqSsk587Isno0ZWyZWy1IlRGJARFIABK/AalBFqhSD7CX0sclAiMhMzzQpFKm1aivZvWe1tTIywzWDwlRETCwBM1EIziMiswEgmqiu/r0KROADEmLw4PhS8wMicgzRNtv6ana0ZK58AJiZYOhj8DgEHwR7zIEJFpwKFDNTYOIMzXhRrS6aZ5GatR7Ho6gi9QCYF5Wc8Od3BuCJZM7gUczqkmsFhtP94eHD/YdlGd/G7TZyhtNjnuqiaVJTYHBqq35XqywlFZBlsnlOWK2D6P3Q+R3R2rJkR5tt3O1iCGTNhk0Nmx02gmMInoPnXFlXR30yBRO1Wk1NgFQA0SGutgsMwKCo1cBMhJi8CxWlJtOMw25zu79+c3PrPEd/Ci587+1uLvbxmE75VMwhMCODIlRlhRicARaxJZtVrRfiIYIj8AzefWelcWqQcnl4eDiN09X1zTD0YFZzMWmZ7tga5YzovGOH7MgQUYAM2ICBjNgAtUknxMDAE5vz1SQrI5IhAzpoSjdrSmhDh1ZgLEuu1VlQa2ncUK2aqakTqalkEXHOGRKL0EqqBG5ucarNJ+OyuJ7NzhFWb8eVCdjMPc/jsQlRULTxj5uiAxEZcFVpqLWwWkRqM6U2yh8QtyiZNbAXDNS4VUGIwbEBpFJicJ+/ffXjL9799g8/f3dzFZjRMBdRNUAqosE7IlZFEywVxmlBZNMXs/IFc2mW9megpyEA2n6iGdxDC2sHNusdXQ3d9W67326Gwamzfgiv3tzeXO2d9+xsf3s9zuOUSl3mLJZFRIWZfGDfxX67jX1XC6pgtXPQ8HcQmYtnYFOHoxmaka0J7QjQ9OtkL4zIzKzknErqqqteqkou+XA6HB4fWxET/dKHfugHC1ErlrTUnLQUNMVzLLOCEVPXdTHA/mpZak0iuda+751zh8PTPI1d36vZ8XiY54XYDV28ur6+2m36GBwTmCVCzQVNqxQAFS1SdZzGXKtzvB36q/32Zn+931/vdrthu0PEcZo2w0AX63Hnhu1+e3Uz7Hahi4amKkAOoFkPGVKbplNapvl0mk7H5XSykvddePPm9sc/+uJ7n767vd4x0TjOtVYkR+zYBx9ip+acB6ml5CVPBOYZ+8C73m07d3SUM2pLYV9HMqlZmpePHz4uc3p8PMzT/Pbt2+1u13c9MYuUUkutRVXguYY8jzIDELBqNVcVUF016rB+BgJyzA7FIxqSGWit4zh++PY9E6dUzODx8XA4nE7T3CTeauacjzGGEIio1ioqUoTZD5ttF6JJHceTfrwbxymXKqepik3zPHQhOOdBom9uZsh8+WjS+FVaZWpqLTvyRU1mBllgziiotaS6HA6Hbz9+mOuYZdoN2x77NIuUueZlmsdxnl3oOnWi5IwJkJmi87Uqs0oxEUZm70PFMk65SHVOexf6PjgmnUpKOR9munvcvX9v5HfdftF8mMe7w+E0HQSrH0f3/g79z169/d5v/9Y//PT73ofBOdbm3IsE6NH3GHrksKaEn+8LVXLiHfTMrsYRNCXFcSlPk86zSNGcMC1xGXmeIDC8fu1urmgYauyqacmpeK+p2DjROLulDoKOuop9RjfnWh+fIBvCFPpwdctv3fA62FYLnR7Gj/eHbx7nD4/jw9Mc/PD9L36w3W5zrrlmF3yWTdxgyZyTmxc/LU1OazHCdvDBdQYE5i+bGSIaun633XYIrlbhBCmjpJqXtPjgzHu3ih+aAWUbeQZowNWcmlfwgAQmqqVWTVBrZeIY4tD33nEVLaXM85xzSinF6G9vb2KIXcxSbFkKInTOA6Jkzbksx8UzD5uOnatVVEFUqoi1XNjVf0sdW/QQPOIZ7VfTqaZDXk6SxiWN02k8HehpcrMOPvR9CK64SqCZENnpUnUuOmUdK07FZqyLqNbKVvf7q5qXbz88Mh9vb175EK9evUolzw/L+3QHo2xvN7zzGBmdObOp6jxPvbGP/S6C0fHb4ymnihNz6kPdBeBAAQBNcF5StjkXqfdJJ6XMMcZN2G/7Pa8bD2Cm3b7bXUUwyklVill1HtiRc8DO2BmzkZpKK2SpFtBqIlqrmVQiNww7ZkAUArWqYlmBALAWNeHqUDJYhkjh7e3rz969efv6Onb8+vam8/HN7e3DePjyIR3qQSGCdlT6mohEPdvV1jvvUrXTVLRkEyVwjdPguHpP/AyOAwC4cZrnOdUqiBC8c+ykVlPhS2PzsrASXoKFSBRVQQ20koIZGpIAiDY7cACFRqJQI1MCAGfg2oqrygCOSJibqZXzrICuCkqxqqJrL8gQiZnWSF1s2MjFD/JvsGAvgDMAgOla2p8n7NUHpjWKtMUlEZMDsHXn8CITgOzsw3tp7zYpfqMVNKuxVhxVEwaktpMg9My7of/83e1vfP7ux5+9vt4Oy1xyyqBqqMQWGJgQjFW8ilbVaRq2/dDFyL9CUl4/N2bmBRFb7wUzOQRHwAQEyMDbIe66GJhTLiMrm6FzYejD0JsaOTfs9tur6TgVTmC5FNFaq/dsgX0Iu/0uDl0+lZpES2vNYRMd/+qFvhx62XT9a5EYNSutA9Kc8gGXZZnG8XQ8zlMCsexLDcWqaMyeQeoitbSohjWYkzm4Na4ZCLsu9n3XL5GFh75zzFKKY/bera7kjrs+Dv2w3Wz6vo+e11wLxJKWKtlAWg9OVc3UMQ3bzfXu6vrq6np/td9dbXa7frMFMCB6icQ45/e3tzev3/TbLXtfDEz13EdCM1ARraksc5pOaR7TNKZpRKu7uHm937y92d/uttF7QOz73lSJKAbfupneOQIAITOhDGhKUB3ZENxuiONmMKCsJs92+y2nWUuuD/ePtQgCpJTevHmz3+9CjGmZas2qFUBFJecl50UvHVMAaNdYWhOpeSevfRtobAFA9WtKUMO/0jw/3j8Qc61qhnf3D49Ph2meW4eLmJxzfd8Nw4CIy7wsacklN5HFZtN7h110pWQzm5dca52nSUqRHIY+UHREfLabwTMdi53zDZEBMBElWWMwvvOsiFipViGXfJin+8Pxw9NhLie1muZlQ70WrDmZVjEpqqlqrqBKqESmTGoMnonJg9W0FCGlTQdIBiQCy1IXhxo7JCQHUKyUejqePnz7jZrl/avpuBzGw2GepqVkrVZVnsYidZryJ9fvbq9u4/YKOLaIkqYXR9+h64D8d1RjQEwdQQfFo2Fgg2jBSIUecs2z1Ko547LklLlWdq0fD5yLnWZ0QNlhly0VuzvaKVkGVIdgpGRZ6jjnu0SnGnHZXG1ur25eObtmCVCrJJnH9PDw9O37+/cfHvthd3t7G7tOQavqOC3kHfnO1IF1pl3JMdXkHDB7s2DKKZe0FNPLxhI98xDivg8dANYqy5Kmg0NdedlNgMFsBG2SR1NTs6pmgECtbGfEimgITWnicM0kRyQAbaBL8xc9x6ejc46ZzbKqAqySRhGtuZjTKB4QSzXRJl8FZvZqyopgYKsv6ku0v0i5Gx++efr2sR4P8zHlUSU3XzLktjI6BG8uoCvexFDEYClYFJJYWQ2pEgN4H8CCKOZSDodDNww+dB5iyUVGOX59qMfFbz32DB7ZYefJoPbsldQoLzIfl2Maszv5vsLgd+T8BjohQHCl6KmO8zKn0+Kyu+F9JMeKKM8k5bYPQkTV1cMMEZ0PPmCM3jlnprVF+wmCmJhWq1J1yVKrGmoIGIfOexLJpqtpVK1QBaYpMYqDQYsFoi64TYyb2HUhXG2HYehC8I7glMWHFOJsZo2thIwQLAa42lgIlsU8QynYEl3FoCqAsQrqi70YALiPdw8lpb4f+tgH500k10ImkaCNKDRbqbZNQ96osmigLXyhsgkCgm9eOSK1mlpVKVKrgQAZWNLqSKPnQOhAm2dWF8Iw9B6s3w3VMCMlhbmYiKgJInZd10J2qNlE4Aq1wEVABN9ZNxH0RaMJWxtoTS9Aa4Y4K9umGiL3IRKiqFatqmKqq1IWvCJUU1uF2ECIjtAjImBLX2mfoO1YARBRQRCx7/yr6+0Xn7z69NW+cyAlpZxLSaaZQJwBIyEZOLCOQL2Blbxd5pub/e7ZTfVcrtDZJBVXJAYMlBGZMTrqHAePnsgxex/62PWdTyX94pvxMeL1NvTbvgAmtWWal1RDt93dvJmzzoWelieZU6kFwNTIh7C/uu43/SzHVJdapZVKZ/ZLg7haLfi8cFhzDLxoo2BlEr10r7MmmS9FRLSWXCVPc5rmPC1lTqoqqYqvmnMK3jskULDaEA0TZaYu+M1m6LpOVaWImTnHXQysLgbfD/0mdsMwxK7LOcfAKaUQO++9d771d5A4xoAE0+SJmYQBtHVMutj54K9vbq731ze7/W6722y2fd/HGNUsVHEvErl9CK/ffXKax267RWbNVVHpHM1gKqXkkqc0jWk65XnMacppioSbyLvonEpZppLm2HXb7TbG0C6wllykMpEjqNWgTS1StSTQ0nm82vRVnevzuOQl11qzNSEUEHUkVdOSp9P05c9+fjwc53l68/p2s9umJdWaGsdXaj2eTqfxJGflqK2edgYrI/zSfF1Z1gbgTJrSHsyylVylpHw6nhA5p6qKD4fD49PjMs9VKqI5phjcZuhurq+Q8ECoWkpS0wJaHNtu0w/RgYl3dHf3NM6LiYhpRgkkGHqmwITNeAmMABjBMXlmxvYgkzCp48j0nSwYU1VTQZylfDydvn06PUx5TqlUGd2yw+AwVGBEcN6Rd1ktFwVhUgatZkIMDOwYTWE8ngAASH1w3nuttszLQdOenQUP3pxaLVCX+ePXv1jG43R1X5I8He5yzYZeyadqSfOcphgfj/d38+N9f3PNvNWaWoOPyJEP6CMg24vuOJLzcUe8XRaqIky46ZwLW+/iNJY617kkgdp14qOYYXAu9JwLvP9Ap0d3z31kQpzUMFd3LFydQFDJWUoaT1lYM/povas3Lr42vCUboCgWieSH0O/6tO2XYxiDQwJxDF3Xs/dVLZW6USIIgQfPG4SOoATPMXTRB6tyejocnw71ohwxQykR8Xa/v9lueuckzY9372teht530TGTqoCpSUUCaHdRxapUJWEyYwBEIsdOnSPm0IXdsPHOmVqpVURaW5mI+n5wDlU15QwtMZ5QFRu1ABGdYyJyBKoqqfk3GzEhtQYoo5GKmmhJWDLU/Gy2kUr6xf1Xf/nNXx3zSaxGdl1E3HZElopgrh4UtRaBrFQNikI2K4ZJMCsAWnNqauk/Du3m+tU0LU+PD4en49XNdTcMV7stgFlN+bjgnBRtFvHbzn/vptv2iDpLfizLkpJU9rULlTr1nfPgOvGU0Zhi0pRyTnmuWEPn+qGLxGk8Tk9Pcr4vIjpNy+m4EEYpZOqILLCLHXeDd55zMctVVGtF1VbRoIilWqsJEQJZDx6ARZOpODJAyLXOS318mAmCsxA57AffB4eS8zzWxYXb3fc++ZQZf/nNL56OTyKZQEyLCRqAi7rdxC7itk/O5w7QOQTi08Kl4DzbcYRUoGZdZn25xLh5TmYWfQQwUxVVXDeXjGiAggBEzZpdzMy4oRHn/BIEADIkQS4A9f9D2Z81yZFs2ZrYnlTVzHyICAyJk+eculV1u0nhE0X4//8IyWZ1N3nrTJkJICLc3cxUdQ98UA8AWXV5pWmSD0gBEAgLN1Pduvda30LSwPBQh0ABxAA2i65G4W4egkDIQggkDDlnJpyWRQOKmnSLVQ2MIJgopSSShBkQ7xJdGkT6+Fbs/3h955rgyHwB8GHGHrIMHxU3AiWBkvPj8ZSSdPW9tb1tHl4yCwuhdLVt3bt3eItXxHHXb1bJ79EuOJy5nCjNJT8c5z9+ePfx4XycJwhoXXWYgWwD6MhAxAiEQjgJggSAtnlbl/Myy4/oCwIEYMLESDyy6BkgPEwIS5Y58zGnKXO6p0kLMgf6bWttu5SEaqfldHi5VQe6PF9668JCMk3LaT40kRWgDqHJCA4nFkk55SSiNIZf/39dDoB3M8sPuRYA4COr0HpXogjstVrr1ru1bmaBHuqhTRtnoSRUJBDBtJtZSTLl6bAc5nlGAwNjxpzS4TCjyOPDw8PDw8PpJMy1tdfX15ITQSyHJSWJGIQbJBqwRElJxqQDAUQEEHOZpml6/+79w+nhMM/LdCil5FRyzkM4Vcr3TgyLHM8Px4dHyfnuvsS4px0imHtre13Xul7X2+V6fV2v17bvZcqFMVN42+pKQ7eeHs/LlCBAVfd6c/fBowMP98FN4bmk0zLpwxGCnZrxboA22iJxD50iYmR2iVbb9fWqvRGBa/tgH4hIhOd5GuqZkRT5HzoxcI/9QiB+YwHfaXkIGHGX1Q6JwTiibOvqAVtt7nBZ1+vtWmt115EdNpd0XKbTcUZEbXur0hghrNe97atPzAgl0VzSlKV30u4EQW4EngVLQqHRl0AAAqcIdAOAoDeKywBzu8PvH9AwiOp21fbS9tdWN/Otu7XNqIFMRRxTAQBJwiJmNhR3GIQRFEZBjJiYhUegoKl2FmSmcO4aW9jrukdE4oRZIKx1215et9bMlJCDVIo4MRmEubkFYLhb3XS7ue5hOYZ6mgUksST4nT4ZAMADq+Kt4W1VtyjCpWSWJJwPU25HTzLVXruvCuaBCKFha8WmvEF5AQEj7UYE85IDmYtNHhV6UzWNTUkhFVgKPpzj1ExqbbHvoE33zhGzpMfDYu8eU5kOc5mmPC+LA17XerutJaeAZg1A2ZUhEnMWSiOhaF9vddu+PWOMuCQ+T/nhsDw9Pj4cF+8tCe3bRSgIHSHAgpgDgjAgwg1GbqsjorCknBnVCRBtWDyQBz69a4e3hNHBiEopEYH7iBR7+3m6966IHuFEwEjjWHvPTg/wABlJISyRItDAIQy0j+C1+9fpvf322z/+8pf/7VbXlOj901NJB5kzIeitRVcPV4vquDurQ3dXhx6kAx8RhiNV1lT77oEsi9BIM+11W4UxzQshqCmEJkeI0G6MTLtBJqBwh2rYjXNMCYPBBFGIKPECiSKagjkKpSnPIH6U6XiYRHnvW7f27eDvHtvWr5dGiOGiOvZ1RkyIaQC9zE3VVUMVRtqQ+yhiFMEiek7kKt0UwZEBGSGAIorwJOXpuDwej0wPc07n4/RwPhznspS8lNxUX18vX59fVDvBYLS6R0OmUmQqKKxCIEzECVIpO60VAuy6gRmYYu/44+4kyAkjAMmt130XiKXkLJwYaYzHw4XB1XrXCEgRwpwIhDFRHlTSHtiBW4ASmqCHAYmIIEI4WFdt5tpdI4SoFGFkhwBCFGYkFibKZeasTpsCpGGsZLlv32/mZiCEAHcz/49UlbtxiYDGxzHgdHEPWQwHsyFeiZJ4zund6fjp4/spz7XHdavP14ualpJSEgLZtr13b6oAd7+qQ5g7futy3zOaKEnKJS2lHKby7rR8enf+48eHwzy7494C3Nw0tPV95aiSKQkJMWHijESCINrkOuelyDeeKiIKESPkRDlLlszEhAwR4ZoYDlM+T/k8lTnnXBIQV/dN+3Vd123b12tmRIRpmh7Pz5d5/fL5xcwOyzEhEOdSJmYZrppwa7Vu23rbNikZiSQx7HgX233D8iL+d03g/wEM859JMeFhvWtrWpsCIWKYoTtFhFmYOaJGhPdQhMwEgpIAorduqoxlKtPhcDgui3eHgNRlmrJkOT48/Ou//vPHjx/mab5eLv/2b/92eX3e1is40CESj+cXmEcRHgiRUjrMCwAMUtxIzZzK9PT0dDwcCueU8mhEiwixsOTD4cDfNDGIkjOnHAEejjTU7vf+YLjWum/rbb/dbpfXr18+vzw/99qwCEGE9e12NTfO01RKWAuTcO+1btdLbc3MmGTKEwuL0PGwLIdyXA6HMqd02ez5srchnRindjMzV0MjlJILI/uqbW+//fIbhM/zdDqfTseTSMpZWt2Z6T+8MqM0GckZhG/Fy11LO8aXSD6qd/CI3q2ZtVrVfK/NAra97bVq0zAXhJL4MOXjXJaSImJKPCWuWcx0vb1SdLRdmNatWt+FY87S0RBiSjzntJQ0ZRYaPT6EwAgyDdN2b8ICAkBrtu291u9jiyHKV4ibtovuK2hjiCy+U91rQvfDAsgW4R5JJDO32l01fAgwxo4bgpATl4LLMqvpHf08pn2E3eN5a4b0cFhI2BRr3/feGvWi7TDNx6dTnv1y6boZAxbMUz49HJYkgdgJnAAiyDGBLJ6TEyP8R4tlV/v1sv3t5dJqBpMJc05Zbg7seS7vc3KDvbWXFW/bvjdoXbfWEvPxMHmk15uvV9pWzzk+/VROR1pmT8kY41Zx79xNqk9BC8uxY77V7evzDffP0JtHasYZ8cP59Hg6p2k+Pz0ejvN8POxNf3t+1Wvz3gDa9XKt19qrBxGBQMS+Xfdt07a567durTC9Oywfz8uhpCmn4/EEEB6w3mbvq/bN+iCwBRLgCL4hcAzDQOF8XCjNrow9fO9uhhGmum/7UFzlnEZamaoS0TxPxGja1tu1Vh1xn+au+47oRCYwjrUI92klDEqCqhM7EYkgIoZhJwgFU/tGVtLeX3779be//GVt6zyXo8jhnMs0sYgR4kZuVR2rU3Vu6l3BDTQASIjcuoFbRFjd1suLmnGagOVwKOHZu66vr1b3ETNfMp0WmZgeNEKYt71DW1PbEMjmifLCxqm77Bge5IiRiLpZ69URjqf3KWMmm4mOPFm1mzHNDG9UAo9o1W+35uYQgsREQUFdsdb7Iq8Wde+tmWkgkEgGBHPtWrW3VsFN55KZiJmcUYSLpFPJHw7l/fnxn/74x4/vHg+HecopMebEJfE0lbauX15e//GP3z5/eWndEBlYwFjdIZq7uKmiAYYQpRwPUy4T0xW3CkQWHu7k/jvZpZiDm2ndXDuCpSSJUxIh8BHWQmBEDgREOGSwcA+4GUNqUIvmuDlVRGcGYARnpjTPQYjafduxqVmvahAunIgjNHxom4Hc0QGdyIB29Wo2AmgCQu/dQIgB0ow3K9HvlD33i+6D8rhDiN8WOHzD8QrxYSqPx+X9w/Lx8eHDu3elLE3xtvfH67lqZ6YI6M3DMadb6zpGJUxAxIxIxMyCxEMGJCRTzsdDfljmx8Py4WH5+HR8d5rnkhHQHd08VF2b1hrQQlJ4uFmQo5AgFMEikMXlh1heQiyJp0xzKctUsiSWAYcHCM6M55JPJZ/KVESIuYW79r3W67av+z68jJetPV+3375eylW+fHlR1Xluc5JM0XU0tJFpjIqtt95aN3MWlixE8I3x8X/wcoAxsx5pUT/+1n2wZxZmgQiDwuY+dGLAAMPB7piY3dwMwjqEJabj4fBwPB8Ph2VZrClEdG0sUubpw08f//Vf/vnx8UFVb9fX6/X19eVZWxMWDOcxVB/F8Eh6ZJmnecqFRURSSpkILSKnfDqe5jIlEpZ7kkFOWXIOoHlevsNhxwNAPKTdDAAYFAau4d7bvt0ut8vrdn29vLxeX67ruqG6WdTWWq0lp1yyEDKBm/ZWw63u27q+jkConHJmTDK91ROJg3Tqr7IyGFrD0LEXIjINH/6AF+TExL3X2rbbbZXEz88vKeV5mbOIEPS6vzycTz8UZAC/syu99ZPoPlD9NtwgJg4RSZJEmrq5dTWP3tWhdWvaPIwQU6KcmCnA1XqFu0ASRTjCzb11ba07o7bm1hkj8YhHgiSQecDhRIi/I4rGgNIsAGjknQT0pr323u1bETOQCntvL9vt63p9reuuLQAA0bp1vIdXWpi5Dpbitt7I5XqbhSeiAAGyMLBem1mwMA6uLsaIvCXh8KiO3Cl1JMJqvLtsasa4qWeIqSRkUCdD9d2TkzCe5rnklJgEg31k2jJmwZIxpeGz+vFlMY/b3p8vrdYAhQIpMUt2SUqFiQlFJDiLNOI9dBhLgpEyg9Le/bbDtmcHs4ic7GGxCJsoXja4Vb7VtK7iJpQTApq2Tbf69TevO/NkUGoIyrQcDnkuidGtb9t1bwauSWguKUlK/I7FnbTaFQNaa6G17m1ka3y7svCH8/HlfGRmhAhAljQdzsCkO/UtqptqG0h0RLpPrImw5LIczj995OXhaLzsKi/Xrba7n0+EiMxinqd3755KKarqHiIE4GaMELU5BE3l0HWcoTtAJw82H7FjAE6EdLfiDbUBMLswMAhB3lebpvQ9qCMCm1I1McAe7bpXWqcpMU1CBMxuZAEOGDGedzM192CRDMCo2qG1qq22fesju7vMZT4SUt+ra6/etGEuKWQiJkkM6GrWr7e9YjtQZJk4k1jCGmrOoWYtWkRyYDW9rtdO9Hh4TFISmzAQUkTQzDjxN+VVOGiHWsPUkbwU5kS5UCkkCRDv0YGqoc1UjZCYiIVzIgSJ3qN7vTXoMWYlwZyXfD6e3j88vH84/fT+3R8//fT+3cPhcEhJABzDh/7k5cvnf/zj119/+e315WrZiTiCnEZ2UXTtjIYASEHJGCmJenASFgaiGAmF8fuztNTa676+fvnMYB/ePc5TKTkLUXiHCGEiCAhHwrnkAAYSIUzkjB4Q3b1CrEHX4AZju8CUUXJezmdkkn0FeW3WzbSbhgWrO1oLRSILDMfarIPtzdaq1721vZdDioimHTwcYji4hYQAeLD4IN5Q4W/7y1tlM7bL+4OKCHdBRxjSXMrP75/+/NO7f/r07t3DaSozcvaQbrCr7k331q+37cvz64pc8vTGP0UkHHmDLJIkMydEZhFmOc3l48P84WF6f5jOS1omKYmyII8xt4V7895cOxIAFUfc625egUFB1cW8me5m9dveT4hTyaeDnA7LPM1pqJthWFZLYTymtDAnFDDY6nZt9eu+vrZ+6727IjEwO+De9OvrLTG9XNZ9b/75UhjPs5i23jsiiAynoseb4DCllJPdwTk/BCS95Q389zsxHsOUHW//+6PRCpk4DfT9G973/oabuikCOgKBBxHGYKyY9iqEZVo+vnv/8f2H0+E4T7OLEaKaSk4/ffr06dOnn3766BG//frrX//617//7W8vz19LyinnoSkZVNfEIklEBIVySkSSS8lpyjkBYNcGgFMqwpI4Scop5ZxLKVMuBYjn+fs4CQLIgT2QgQEIHULRmlt3t7q+rK9fX79+Xl8vr8+v2631FoRUFV6u28Npf3h8WOZ5OR7mubj5vu/uuu3XbbvUfSVCzJHFioyUDPDuVntfa11v/fqi6yu0Th6MFJQkl2HeYiaR5OzFZ6fYtvW21t9+exYpc5mWuSQGDsPe//zpp5Lzj9Xl6K+9JUaPUgbeVM+D0QDuo2iiJGI+wiB7U+geVV3dACElLlOWxL3X6+XFtBGTqsPQ5gdKSmmacz4SGkAPdwAlViQnBEFgcgYaqdhMgmPwjneYwR3H5GHmvXXt6veRGgCAh9fertv6+fXrry/PX26v12117eRh4Q7oYI7mAep1367Xy9fn55c1y5xY7TTNnCdCiE3b55e2a0iaJBHxePtBhNxzeCYmBXlZDSJMoRv3KBhUlTYNJE/My2miFCC99y5AS16WeZ5KSYisEVXJEU8LLhPlGTmB0+/qmIDo0be4XFSbJAzhKClIunNDJi4JA705dAw1wsiHXOY0H8h2E1lL6nSiZYZl3h4O/sdHYsTPOZYVXps8v7LdANTLZAVVXF3b7XWv61W4avCtIaZpeXiY1QzAtu2yrYByOj389OHDf/nTH4/Hqev++fnX//0v5398/tvr7bd1qxFgTs4Z5QcNmcj7x9Pzw/HSrO71crulPAOnPB0SGQ9WUW+bb2Y2dFhj5J/LdP7ppz//63+dHz9sxs+3ffn1621dRwq9pERMEDjP09PTU865975v+/V6aW0n4nlZfkrTh/dIyObe+r7v63p77fsOXV17V22tqTlDEDESETqFM1lJcljK8cAi/v79IaV70S8kj+X06fyxMxh43/S1vnBxSMciCzuOrp6gB/aB9uy9QVBJJZeEAdu2t7p3HfZb196JBcNyommR3vy2ra0HhjeRVdERW9XWWuvNskCc8rnMpYT0rdVKXamjedgtd2AoW22fX54bBCdyn4L7IozTEYGiCBTB700/DMcwBhDmVKZ0OKXTOU+TEJNb1D1UgZAEKaBTOIMU5lJmt2ml1PYeFr6hcaA4CkxL+dP7n/7Ln/7w88en90/nh+NhXqZUJmTyMDf15rfL/ve//u3f/9tfP//661pv/Mic2DxIIOUcCrVamCElDqRw0K6+dXM6zRCAAAEAAElEQVS3TBFMxhyh9rs+DID8/e+/9LbfXr4cpvTp4/uSckS01nqrjD5PiQQQkHi0mDlACCJCNdTBe0RjNklIJ4ZCzuiuXVmy51mSJMYCmvet7XvrrBEK1IDcEQMD0BX63le1r+v6crmte3fzu6zijkFyRBycGLOx4PJ/rxEz4qZBCIXIICBGNykCAglLlnfnw7/+/P5ffv7pDx8eTsuMyMAJKAWwBmx7f72uaHZBIKKpTHQnOhCyEMk4j6aUhBPRKGn4YSmf3s2fHvLTkpaMI82NeRTkgWEYKuiRWIRliPTBau9aq7p2TPu+r/s2AnHuRQxRSWku6TDP8zzLmFjcY5GiMGcRAnaD1vS6ba/7/tK2q/bNAygys+Q8zUspUwS2Zuverpd1ry0z2DELRkCkRIERAUwgSSRJLlOWpICSflDn/R+5Ar5r33+vVSAAGRXM3ac+sGrubm8CaUNCpGAKQg+32tRdl2l6OJ9++vDhw9O7w3KYSgmxIfgty/zTTx/ePT2KyPPz89/+9re//vVvt+sNAZZ5OSxLlkRIjChEwiTCzMwkRJTzdDye5/lQSgGIuu+qOvz5gkNHnnLO0zSVaQainMuPRUyYuRoHAAKaRZh5OIRq39dr3a77el2v1+26tr2bxUgzVLNujkgppVKyCA+ZkLvWfavbre0rUQhoz4XN3LF332pf1/1yWbfbprWBKUbQ/WUQYqGRA4JBSO6Odzcym/vlti6vl3cP52XKc850PkbX908P3xZlAADCe/40E/AQcL/Z8QLGqSdiZEERM+csHmbeBpzAbBAVgglFuJQkjK3urs2sz/M8z0sqBYj3qgbAkuflmAWZSBLTeum9AjgBUATzcBbyXfQw6Ab3XtOQkA/trt0Jwj9iYty3Wi/r9eVyeb1cbuu6t31YQsHdMfa+kYyRfSeynHE+ZCLetL/c9s0kd6FM3aN3G0ogIgqw4VhkoYI5gBgJAcPC3dXNAUiERIKSgxjAmOUvRCTSm3jDklJOhYmxK/iO246UiAgl39G9v39jEFE4CxdE9JDq2A16N0Dt0QNBSmIg6qqqXbsL4gzAqObmCqzzwZ5mfjrjz+/007m/nxEctTFBWpY5Q95/494ggwkoulrXfe3rtRFrs7ishqkYYAfYTDvgZV3n5fDxw/t3j+d37x7PDyeAmA6HHmhAtep13fe+dbWuujezt+KSiZZ5mpdp833T/vLyKrnN05wFsxSZHD3CsHaP3oIx3IEj5Tw/Pj3+9Mf3P//T4enDppCva3Ba1xUDRLiUWUQAMJd8PB5FkvZ+vVzddOgyRCTnkmRKkgCga9u29XJ5qevqrde6r+ttXW/runXtjCPzzMLUwsENIXLi87mcTgvzmx4O6Zjnn84f8uO5Wv/l17/HViPUrHVhRwRTABNypAh0BQ+3CCBwFsopM9F6u62SkBltyB2ZIDCc0Ak9XM3DjFvvl71tndpee2+9KyrmBClzmnMArShOHJzAsYNjGAF6xK5tU73dLoAt2GAqJ1mSMI1T8I/PmKRSJsSciiwHOhz4eOJSyB17g87ITCkJmkMQRsgYthIiMKYsStrNzcmDPARhJn53mP/w7vTz+4fHx9M8TZITSgpEB7IOXbub9X1r++qtoRm7YNDAdJCgBTTzasHqqOHdxD2QLAABhbEkL8lRnfn3Rcz//f/xvyBGCs0fn3JKSdJ6ve63tdVNGOHxSHMuiYbFPIID2K23uqs3p3ARKFmW0/n4B+CDNa/X7fr1uXbzagtxziX5nEuWkqhKIEAuINnu8K1Qt6b2vG5///zlt8u6VxNiDxxzI3xr570xeXBw9u4C0h/WMiQUxiKwlCQiXV3dIUAh1D2TnObypw/n/8s/f/qnT+8TD84qkAQnAAQLSBC69QwVXRlhKdOUIUiABLiQJGZJTCIyPN9EKExz4WOhQ4oygjTcmYiBEUG1hzdCn4os81FSSnlRR+qkdX25XffWDPvLdXt5va3bbm9FDCIycZa8TGW+pyjH0FXDyMNBUsfa+rbur+t2re0WvjtUdwQUhpyn9+/ff3w8HbJs62rme617bU6wS0yJJDEQRO0AlESWw7wcDqfziUoBkd8+f2m1I34L2/nmoI7/cME373rcP6YfBTEAgAgJMQGN7O8IH/HG5nY3M7kjUU6cExKBWW/bKkxPD+efPv306cNP754eD2VOIjGYIUzTPB+XhRBen5//+pe//tu//a9/+9vfEPHx4enjuw/zPA3KESMyv+XuDE+rpMNyePf0dD4/LssSAet63fd96G/CggCFJKe8zEuZZwMQkW8Vc4Rr116rDbG5mQUYNHPv2uq29jYGxq3trWsP97Gc5pyHjng8qADQezdrbr3tu+1Vt816s9sqVXOazPG29y/Pt3VXA7ptzYCIJxYb6EjEJCklEREC973u2rT3bu4pJURUtXVdX15fS+bHh8NUpuNhWeaZfxiNARMmwsyYeXRgDGK8b0hIjG99GCAnZsk5WXhTBQUkwAAesUwISbgIE8S2boTBCKfD8vHD+1SmL6/Xry+Xl9crE5zOp4fjwezpen357csvt/US7ugWakkSIt7lyvej/A/P0v2N9+HTozf88LgVM1u32+v1cr3dtnVvtWvXMI0+7OX9cnvtveYyEcTpWPL87mf+1C32rV67em1IKEVSKWU6zEmAKcLunXTiJJSTjH+QAQhQm65bRQvJqcypTDOnDBgGgKGS07vDYt1uL8HMiCks9HpDbLF2zDO6Id5H8z/guMfGz/N8PJ0fDXHbsFXU5l2jd6+tmweTkQe05q4qjpOAJA2soG4eROcn+eef6Z8+2h/PcGTVW1uvKDodcHpaHiefXku7VBYMDA1V694Uagc325q+XjaQBrns4HZ5DRGSfD4/HOa5TJMDqKNILvPT+w+6dX+97l9v6/Xl9fW61V7/9LIOb+N9SSBCFi7ZdHt9/oJAj8fzw2FZ5pTKMVMRWYym27aZtbAuMk2H48NPf3z86Z8PT3+YjmdvbQ566H2eMwGUnE/LOecSREBDVghmSgFt2wUZBOZleXh4PMwHFkakCFfVVvdWW6/tdrs+P395eflyeX1u6w21g6pr2/b6em21NVCQPIBHE76xVRCxcPrw8O5P//X/1EIp4vrlywELB21ti7CElqEVciEMoUoICKbmrRJLOcwi01ZPXVtrKwTmfJCUMLC33r1b7xHj7YvW+/PrBZB6M3cI4ASCt+gSfQIERi+CBxaGQMLMUlKaUgbOGazv20rRKGNBigUISIzFid62fmY8HOanpxPLnDJKaWXylOLN4B7CCEkIUBCYAtQZGcx1VwoUDyJ0ZgcwU4FYmI9CB/GJLJEx+hhNv/HZGClG0O/xsDw9nF6u19gcgG0ICTCAFYLI0SP23g1c3dJI6yVm1pLxMJk1Z4ck8SPrUv63f//HVOT9eQFKpZQppx0BMIhFsqQypZIHH9M93BGCzGHr3t2CkRLnPOXDKT88gRz266atO0Xba3t5vdw4sYHtrXUN6EBAqCwsiQDBwk2b2mZ23frlVtetqxEiNTNWFBwYtZFCDegeSG8Ti/v17U7GiT8L5MTM5BEWb383IKf007vTf/n08OcPp58eptY1IJIgC6IMwEc428I2i2eGxEQ0VHgEKCGJJLGkkUc9EAfCMCVcEs4JCjuDj/4Sx50+g6EUhgRjA5CUgYsbSkFYfe+317V31+frdrnVreqPhiARKbnM87wsMwK4m/ZuaiOq2BEDYFO91Pay97X1xtgBLYADCXmepg/vnj4+PUgYmAohIohwEkxZchFmNnNJFSBKltPD4Xg8Hs+nPC9BPM/TtlYAiN+3Vf5/Xj46Mfitmvl+BaA56hAkoLm1VnvvZvqWqhpE4wA+tlEngnkq79+/+/Tx49PD43E5llKEKcBFRBJPU8nC2vvz16+//fLL89fnXvthOZ4Px9PxlFPS2iJciDkhyuARwWATp5SmaT4ej6fTmZDmUrZt27a17nuvHcfco0zzvOR5amojn/Lb3dzp1KMmgiGs9+ERjRjHKx756gRAjFNJU0lJWEa2cUo5Z8mirqrea23r1m9bu63aqjPe1HfJ3eL11n75cllbUJ6rwd7Cxig1cSZ2YEAIRhIKi+FjNzN3H0xrj6itXy63w1LO50PJqZSSSvpB3wOUmEviLJSFvsGU3iioMRCWOvh6A7kNQOAejhi9gzo6eABBjPsd2l7hu0fp8Xyc5sNQS2rvWdI05cNhIVrylIKjXIu2Zq31WuWuOhv6cRjMSb8XMTjOMEPmck9U+sHK5+7bvt5u121b933f9rr3Dm5okQADofeODhiCjIdCp7LMp1Nz/PXz5eWybVvzAAROyElYEhuEORgQxBDUjExwxFAmKMzAKMQRwDlLlsQcBnvrhFEEiShJJAKbUMR737fbNUEgZ1SgLINbOd4VfEOGf39dgByICJEDyAPDArp6rdaboxqoRW2BHhOSkZJiddfAsJziwyl+frL/+tH+eAr2+E113yMJMdicoRKKB5pzBIT33oe8yYFBxLWu7WLN815NWBG4lEMpOQkh9q4vr7etBUkxD/WCfAhYzEpredt5q7FVtx/WseHfSDlJa24dLKIVKAk9jS1m4vKQ5lxrbVtYZ4rD6fz06Z8e3v+c5hNyHhHjJQtjFsKpTKd5znkKIn87GYlzqJ9Pp5IzCk2H5enxaZ5mIBi6glEJu3qr7baux/P5+HJ8+bpsL19tvei2aQU0q5LctWtYmDo0/W4fIYAkfFiWjx8+avg//v7vfrmBQnOr6JRwLnkKSqu7GrtzMmQCtbBmnUwzIJVC05RzKqDAnBglwNxc1dwcWJiIWBxQu5pD12HGFwyxDlEdqjPjTDnDATgHAIKIlMJlljhPC1oUIFaKcJewZgFOPbh/X8uJ6HAoj09LLjNLBDqxgoOat6aqgUFElDOPWtu8myqY5yJTkmlisGh7q7vXAIbIGJlhEpwT5sQpJUlCkuxOacURDigpnc/HfTu/vr6o903NQjENOZSJEHJ2QAxDbOaKahQKWD0MnLPEXAzNc/IfUxPlL1+3h1M5Pz7Jclzm+bBM2qacmVOeynQ4LMLY+qa96UhKBGyOq2J3Zk6F5jSdpvk0TYuFtFDwmsjWvr18fr6te28bo5+XRAh7d0ImQCQuKYVH22IP3QyqIWAhDKQwh1tVdStMmZEh7rDau1p3kOhCx6L9A7E3MSUGxgCMkSpgMexJfFzKv/z87l//+O7hkAobuALivGTm0eo3M6foU4JD4WXi2057d3UFhwC7D+Tvgd10BwcjZOJZsJAxAJgNchrDIBODYxADADOFSCJOQYJIqZCUHsS1x3Wvz5ftttvvzBaIKeUyzfM8L8sCMeJd3c0IEQmCsFvcVK/ab6q7hws7IIAzYklymuf3D48f3z1ErV73uchxyYG0TOnpNC1ZkNDMahtFTH58ejieT8fTcTmdAmlZlutlDfsGO76vr/+h3fK9E/Pf+a23RdlDW9daPYmG9163ba2tdlVzHwb+AYvCMLfOBOfT8d3j0x8+/fTh/YfDsuScS84iPLxerCTCbrbX+vm3355fXgjpfDq/f3q3LDMHwvc86syJguIumIJviKFAwJJyziUnKaVkkZVkww2Ap2mapnmaZylZo37j446XP5e8LDOzIFHEAIlabRVCTeWwTNEX3Xrf27buAX6Y8qEkIRSGqZRlWQ7LwkmQIKzV3ttt3V9u9XYD7ZyoI3epa9PXa7tet1tHbF6Nb5vuCk45lWlelh5+u12q7uEEHuYWHvatrGcWRPdYt/1229R8IkIh+kFGSkSpSJqylCxZ4h7cPgCTDIA21N/CGEFAESRO90JsL1L3vWnT6GZhihBhKik9vHta5umwzI+P58Occ0kBS4RZbwFEGA4uIsvhgMKHw2m9XdbrdY0reUhKkmQMktTN7326b08UBtytgT+2YQDAw7a6bvul9bW27XKrWzNEyojELJjQPYxtC0kwzz7NcH7KzplLKq/t9VrVvGQSCgZDs+GECskBGAGqdwlgWEsU88RzlsfDQiyBbIGt27bW27oCwukwQWDCa+bIuTPG9fpZoiV94OVAeZJy8ITEFMMzDt/kZsN05pfb/vV13fdoLVTJjR0SILqh7eGbeVXvGgShCQ3JumO02gTtODsc4DHiJ4GPDEb4OpGoFarWbxCvurf92tt2wHMMTa61qhFUpuXhHOuuv1xq6908A+acyjwfDkvKqfX25fnly8tmIN0AWKapvNzqy6VvOwEemCsiRHyPHQgID0OMnPg4FT3M3n3JkgnVPNCVGPL88PDuAbG1DUJzovl4Or37aTo+EFKrte1b23drNdyQE4ZbbxoQLCAMQytIlEs+P5zdHAUll1IKIvbedRiImUsuknIEHZByScuSl5JeE18/26o9ektJluMBq1XTvbZ93b48X75N1RGRmISJmQn4mMoF+bpdGkI8HI4Ph8fTYeldf/mtWSAgmafWxdxa73u/QEeWAEoJUkpW3c01uiQEQg00ZBaRxMISCGEOOjSsTMKD40Ruqfe5C6UcIC4eBIiCJAlTcPxhfjjhlFDcdesbbKg37b1GV9ztmy+ZGY9HeXrKZU5I0ZRb81ahVau1eZgwiaSUCmNyxQ62rntmflxO7x7O7x8OaP3Lb19fosUIIAxD9Jx4mso0z9O0pGkOFnCzwUAGJKScs5xP1vbry2mv2/b6ot5jYUwUACLluByYs7ua7tovbs1U1bR1106MZc6BplP+fXZSc9AgkEKpBNLYcicp8/FhmuaUJO5eEoAwDzezar4bOgjLLOWY8kEko3lotbpC34QscUBY3erXlxdhzPlxngoXBCRjNiYoGcy11uqxd68aFhRAEW4eO9rIMbizBQGBaHA7hgT1Lgz13ztoEBxAPQB85CINumkiOk7lD+/Of3h3PBRJHO5OTEtBxNisN1WrFuZZ/DDzaUnr1tXUujJEgIY5YMAQngaNqoqEFqFjikk8IRAaQjCiUDBGQAgNcSwzjvRKDkQEZGEWAeLucNv6ZW1r06bx7W6IKOdcpimVknIeTiJVIwiAGPkN1m1zvZlu4f1OrAEEYMBEnIgYgszclcHnxLpkLuUwl8fTMmUGADfrmgEg5XQ6Haa5lKksx6WpLcucc+pV/1Nf5fs19ku8+8RGeDPG77pj9z/W972tW8kJg3VAaSK+RSsgOKEL0pRIMM8lP57PH95/+Onjx6eHh7mULJyFOTEgWxDLEOzeHcJZ5N3jYwScT2dh0a16qDBnSfM0SyYDHWd6RGLEQX4zUwAQkSycOKEH+lBi0+FwOBwO0zQBM2L/8QcwPpdpmljuLQ1zVzUk6H3DiHs7MEkpqWQJt8woBIwhRNNUDofD4XAkIfe2gfe61XXte7WqqBYR1hWAxndobr1D79uuvDVvwcYsGfJUBKD1zfZWewX71tKKuKNdEOFexFzXvfY+eyZCkh/OLwjEzIkpMQnf/zYEAiIjAOKIpB9FEQIAYUhySSlLSsBI1Kkbq7kRYzDzlKd3T08P59OyTMfjoeTMjDnxMpfT6RCAuSRJLDkFgAH2pubXpt41+M6sgYgYOHC/j5DvXEkMBOShRh5ZUL9/xra23rzt6E1AhSKQg1hJBDERgEFvSgSHhKeJl0LGvMyleQIu5lEScDRrW7g5ggEisgeOH+63uergjmSCqaQsAsTqEBY7erdQCwdVi+j1ULBkQPB9fV6tV8YpcTpMOAmkFDROHWPRQoD72MLcL7f28rpbh95BO4YJIkaQkBiCatUabhjC5NmN+m4ear3PxT8c4OdzfJr1ARFW3DW2jaqFkzt329d98/1mbcuuHu7d1bR7OIrk+ZCcnWTXerltILIwZjfV3lurtQbuPfRy6798fgmkd+/fWehtNVMJL+BFVUz/Q+qII3hiXnKC44IKS5kJ6Vb3tu0duRzPHx8/zMclXJmgFCnTXJYTkNS6b9t6vV62fW1tZyZKAu51uzXcURKVkpd5oL8oIyNDBA75YpKIAB36zR4RORcmEhZETFlEgAkSBvYKXckDOZVzbgZbb5+fX79e18u66w8mTaR7NxeISsmcqIUp8fl0fng6H04p1xbrETsw1mQhqTJXAjezujkJo2RGmKYZQswcISQRBlPAGAXq6PxBmI8DkiMCuyIMGJgjVgLPxMKJuABwjEAcIiKsp1ORHEa11tZ69KhbZQsZyR1vOz8isETOMc1AhNgkIvoe7oCBEHivSyCIMDNjzvl4OJTy84enn96d350nrXu9XbYb7oQaYBFBKLnkaU7TwmUCTjAKr7EoAbII5BSRljmfj9P5Up4vtAcAIqckeZqX8/nhg3De923b8Hpdq8GIdAtXJpkKRKbMtEz0u07M6TAdl6WUAoDr1q5CXV2ypJI5JzNXMwdCzoLh0NvettabOYnk+Tgtp5QKWtTrtdbaLy9e14x2WiamRXhq2gFiPpxPx9n8Ht4XjJQkyA2hue1dW9duI2HKPAIIycnczcAJCBkBIlDDQQNHLLep2fd2XwR086oQ1YlD7e7hJeQpybnkp+N0XkoRZIosxImKgIOBbbbXunULTDkfZ348pnWX27b16IyBiBYeBjEKKWZEZIAEfMrpPMGSI0sggCBKAuZxSg8WCCNEvm/zEBB3+/fArgRA7bZX25u1H+4FkXKZSinDzj169WVySMJ8V5c6Wo2+R+8YxjBcv+jBAQIQvV2/fsm6kbe+3SYBOk55WZZlPh3nnDjcIsw9AQART3NhQWLMOZUpT1MpOWk3sLs2Cd8aMXex5w9qmP+oiblzYL+BlbSut+12KVMinCGQSVLKKTVXC+3hhu6Z6DxND8f56eHh/bt3T4/vzg9Py7QQogiygAgiowCOsDaRNE3x9PjInB7fvevNTK3XPgqrJDKX6bjMksVC7844CBYkcO173bdW97lMuUyMrK3FiBVmPj08HE6nUiZ1H0HnP3wumJKUUoYxbWBt1czDLq/W677va6sVIRJTSWLq6BrawEmYlmU+HI/H0ynCt+uz17rfbvu6hjsNQG3c8xILyeRSKqytXte6Vu9AGty0e6IDPOScl2UJ11vdtXah9MZJIn6D9pu5qd22um77MmckGMXf9w1m9KYokJGIBoFpDGsBkRiJaZRHBHFXzYCYODJZRAAFdiYGzIkwMx0Px4fHp3dPT+fzknP2AHM3NySclok5HY+HZVk4pdZare359fLLL5+vl1frfU5pKSWzDYDH6Hq6m1l4+NCJIQTec23je2oaAJjrttu6casLGh1oB6wgzUK7N6QiAtpbvQrEPC/znMO89t5qgONcCjFP4hRsyc2aBnTHFtADKOjNqE8QEtavdautNoVlilImSpIKTIiT8/WmXy/tcqnbDO+O9P4sIqh1b+71MGucSy48L5BysMQA0MDvAl7MYr3p9aJoog33tXtYziiSp3miEN1U2RGZSkrL7Bma7wQ+FfnTB/i//Z/p//rP8U/vawL99R/62y3+3uiKhNnDMWq/rbiv0bau3UzNqburhxHcHQzAudr189eXre3v49HDtrqHw+FwlnwE4uv6+v/8f/2v61b/9Oc/HQ5L64GYQ6VXrJu1/btrbHSuKZSCRHg+nRi4pHLb65eX599eb5vF+anNTx+WNM0llylNWVgYkPd9v748v758fX19brUiwTTPNE0Rertt5g4i0/GYpiylQBAgM8owCI3hX0SMUsZM7wIEGJxrjECRcn54EoioDQ2Z0gwwnc6QUlXLf/n73z5/qf2HET8GgAMFE0MiKoKTRJIpL398+PT4cIj8qmB+XFC54K7VBa8MKHRPV7Ue4BAgh+OhzNhdwR0wzB2kbLXte6212cD7Erh53YwAyfsklidJB9OiLTdKkpfz8fjAXLTu4M7EsktFA4Z9s6YjVcC2fWeEcj7mJX/LqY2RsNW3yWlkIqaEuXRAiDSpm2mPAFeTwIL4eD48/vzhw/n007uH45wJ9NVbYiQkAHGAHmyQeJrTfORpgVQUKe58fwc3RsScu/e6e0DMUz4f59N1tkpesJwO58f3j08fnx4/AtDXz5+/xF43amMvZJykJM7ztIDhXvB4yPKDTll+fv94XJbTMkPA88sVtCWKRWT0bdVN1c0CAImFPIIoiDmVVMq8HKf5wJys675t+7q222tYSyJSpmk6Slo0PMLff3y3LJOO3pAqE6aUe2s+PMiD8nrfGRFHyCJi3AmiyHQPfHMPjUG7Mx3S3e+vC6h7NyB3xoE0gggghDH051Fkorn5vu5BEaqI0beb1b1u3YCJDox0mug802uKaCYYAGDhiqTeHDBwcP+hMB4zHAsVgcQAAUIoQsII+G2JpTul6y6rCghHIAJgIkSyALUYBdy3V38Q8cc2SURJBEWYCcKFQ83WbfPw7tbBlcADcDix7mas6K1dXl64bwkVQrNgSdN0nKd5nqYsjOEYgRAUd/ndvTQZEFhiBkJ/q1b+x7oY97vME2EgYH50WIO7131dr5dcsrsjUbhnyVaK9w6hheI0y8fH46d3Dx+ezu+fHh/OT8fTQ5kPIsndiIAZmEEyEzPioBmTmeWcl9nylGvt27qHuYggc0lpnuZlmlMWC1PT1quHozAzho+qRu9cUKLEYjkDkiQ5HA7TiGTyGF6kH2+Wf7iIKIaiXEbQdwiTCCkjEyYmcmIMxkhM6Y1AIylpb9pqXdd9XXuthCgpAbJkoZRTmXLKmG033hq83iqBzSUxsjcLq71uiKNOSjuRAYynjZlHJ2bg6jyid621q3ogllzylH88woznmBmZkPjeS/O3ooGJAWlssBRBCExIAMwBiKrW1ZsOpcpoSmHrvtW+tz5b8BhDEmdmmqZyOInk43LIuQCQWaj6ttXX63p5vaEbTtEXteKAcK9i7ovAWxn9w/Uf2CruXte6X9fWdyCfM0tKifCm2JR2cw7PpgAVhaaJS5Y9oGu85b+SMAqjICKyuWhgc0RDcvxmzhpqfuvYa1V1bAHoyJEYXIIS5Sklc9ih99ib7tV6xwKIaopt27ett4UZUwZJgRQA+Ps4+vFJWse2UVTSGvsGAY5zp5nTFJwBM2BBBOGSZE7GgQ0E4XFOf3jH//KJfnq02tv6Er98iecbXIM0U0JHYFNShd6htWhNe9eSHDGEMRC019q6OjSLve0WejhOkoVUb+V2vV4pHahIre1yef36fAmA0/GYMvde97XWrbW1a9P4oRc7anOmEWzMjAxIfVu/XF//+vV506iU/tSaIVGZ8zznaRzB2r5vr89fXr78dru8uGkq2ZmidwPdbpfWO6aEjOFPTIjAQDhksciIiCR4X82QBvPhnmULiDwolSw8JULfOxgQUiCc373Lh8WRQPL//te/TofDt/BXD+hqrat2ZSk8TeV0XnYrMh3nWQRe9FJtwzzDNEMF4nx/D8cOEPcwW2ZMUw5ODiNa21Sd2YBEzauqWgcIFhJCyuPwA9Mx0tnj2HdpkTjNJzqV/PTAXOzioZUIRSgZk5HW1kI7uUdwuIYD3Sca3/YXKZRnzrOQkIZHGwo4B0RCBg5GmiVNnKY5P8zzx8fz43E5TInA2r5b2zHGxyoYYUAapMEG7IAGYOYBwKNgxLtBJ0QqM6U8HY7TbU/lksFxkek4nx4Ox/M8HyUcpo3yjYDAwtUAkHOiMuXDsWAgoqbMvxsn/c9//EMu6XQ6kMMvv3y5Tfzu4ZjKpN2FB7LHeu8IkAoTieRcAlIpZZoOh2MpU0Ro2/fXl/121bohAfOScuJymA4P02kBhNN5YcZ9382cmdyj77W3NsTLNHZ+BCRKmYAAMXiIPYVyYkZsBhHuQD5kFu5qrj9smDEAsA6ATjzEgEAITJCFEGNdt+v1dshTqP7yj89bbdNU5oIzd1etW2sh7gCUJo7HCS4TUgcChwgDqK579O7oxsScRWaBpdBcWCgYEWHkYzIzOgR48NBljwRFRA90HxmtTgjCJMQj9PHePf9utbr7rsKDAKdShEktYxiR11a3fTe3MVB3DAMACPagiAg31b3uL1dClYmhZMwl56VMc5EiQ6P9Q46mm1vrrWu3rtbNzc29RzT38MgYQiO2GoDiW7PlzTENAAEOQT+AYn4gxYR7rdv1+hpI015TunNbcJqi7kh2zPTz+4f/+b/88U+fPr4/n07HQ8pzmmbJMxB7qI/GJkfOaZrmkicP2Na6blW7ttYM3N2IMKUEMxDgkvNcpqmUlCUA1FSYLAyJWEQICGIkgbrpwMMKE8k0GDFJZJCFrTfr8n3Od+fl4nclZoSbgQcTTVOZC2cS23WQWJEwMRWRueSSEwK4u6rVut9eL9fX131dzSylnHLGQLl3kI7H88PSrYds1a+3TYSP7x4a8i+v11vv6+VLrVOZ5sy85IkdVd3QRUac0J2rBOaj0A/CVPLhOB2O3xdlQhyhRHmA9scYBwIHdD0IeficcaS1j7sfNZ8HJGFhxAi7a9PczG7rft2255fLx9v7d+8ezw8Py/lQ5kXKRJKYWYgjULvWpoBsDtqj1h7aBbCrBsQI9h4YmGB88+4PKTXf4V34NjQDgHv3Yn99vt7a7uzllBljmpMCXQDWXfdWJ6jn3PJUysQpydpJg4AJHJt1Myd3EisMJSVHaY7YgR0DR5T8PVgqAKhModaRGnACcg/vqsCSpwUZMHlr4itgdw2HYCAPv25b2vczxCIc35EdP8rNxq+JIEMv+yu0za0LSTiHSWsJDCKKjk2bM1GBQEe3SeDDU/npKT9MWPf933+N14t1QwzKwktQNgFIO03M4qDNcN97ay0SZuF5Sk1jvb6+vta9tW7hFhpjJTAMbq09v7xopOmIZrYs0/V6+/zbry/PX+a5QNhtfVnXa9t26xpvVcwgQQtxkiSSCcg99t5f9tvn2+tvt5dumOv5VmvtOkcYkqFE6Nb26/X1+fnL5fk33dfR+sbe+roFRF2vrXfMKc+Tq+I9TIbD32JgBisRERKoqIi4+z2QJu4eM2YUocQC3e4zlLDz+eHw8FCWhVL5y6//+PTzH3LJ3wrlbW/bvt0ut4k45/P58RPRhAFG/et++dv+l9b7Gf9YpBCiI70NeZARidEDgQkFOSEloZwiovfWm2W28WADAtwcQpckc05zkTxTPgYeEQ5Rpe62zakcjhM+PsDjowat9YtFW0QUTWdru15ju/i+sRJiEQImU7Ou3zpkkuj8dHj6dD6cFvVY+6VarVpr9d6QQpaSD1N+XMrTND/Oh2MpS87kfrs8123dt3Vdd+sqzJKAMBxQFbbd160da2Nhj3GkZkYUROQIdSJKZckHaC5y00ifw2s5lnLKULzi7WVXiFBukd3Qu1ntgeicCBOmY8ZA2Ne7zPFbEfOn9w+Dna57W/e172mZFwuKN+g8AIaDh0MfDOVUiAmpzGWa5pSSqQliJggMxkCkzFySpDnPMk/HKRBy4YgY5cugRF/NmTmXUkpvDlI7M7P4aNFH6HAhpJTmZRJEaoOrEeYR4Rah/ylAaay44wEO/G4LHrk2t71etu1cEVVrrett37Z2lSik3fR51WrMyXIuy5QS6ESmYggeEQYI7iPBE92ZowgtGQ4TzZkzuwCE26DPj1WJRqzCYKGO784DwmEEMxOURKVwTncH/3/OKsJ78CQl5lxyiTSgIGNN94GHBAgcnZjx5yEgNKxa31qbKFJhxJxzKmWALu/809GwH0gzM9NRDtRWa6+1t261W1WNgAKR/ofNGIdv+hbwe0H2w++67/t+vV0dqWuXJKNOIIg50TwvHx+W//Lzx3/9L3/+00/vz8tScg4U4IRSAlFHCzgi57LMh2lehGXf6vV6+/L582+//Xq5XQ0GlDAxSZ6mwmnOeco555yTEJOHd80WFhhIxJxSEiIMd9U+6l9CTDnlUlJiQozwcA2ze4n44+eC37fP8WthnkoBnxGMzG+JE1NKhJyWOc9zKSUzM8Dd69n2bd/WVqs5KPDuZCxJmFICyZLLfDxmh3Xr61q32627Pz0dlBjZv659627WrJEHDJhmjzGVRmAy07cqEwDvulRTB6QROvO798VjNDkR0ANHokfd6zABJRIkHEdYAhhADTWvvW+t7bX3oZxUULPeFSHWfdv2vfbezCiX5fHhcDwfH865TMwcHtpt31ttVqZZUg5ENXe11ntXtbBB2BuJZUTf6kUkIgRCRHZkIfod94IkzykvpOHRwwA9BD0hkrt3q1sFqqc5JJMkoZQwUhi1sKq9ti5kJRExlsRCZCDo2AJcozsMSQ4RMjEIcy7GHgDK0hDdQ80DkcRzFsJsDXxrQ3ylAUTYLWzf876qq0Mg2LjLgHu9/x0M69F232++X8WrDP8ZWY/u+7p18IYeOQkTChmZenfv4eEq69X+9jf8m+m//aXfmp0e8+MhfRBGh6gegQACkdxCNWrTrspYSkpkDrXXvdW69z6s+ubGCISA7rbX7fPz5039oH7bR7qR3a6v4d4OhcC3eq31pq37d1I/AAADC6XEWSQDkGpftb+2/WZ1s9oU1rbettu2b0c9BUAQmUPrfa9bXa99XyV6Rk5hqF333dx73dxchOh+1r1nn4PTWxEDeH9sx6mSVdXH24/CxCjI4xCJ7Mfe9n693HrbwwAcplIeH89/+MPH9x/fpZTePhfozbdbv7xcKc/TdDw/hIft9fraXy7183P7Yg457YjCBEoUJETEjODDcIdOCBgUnYIoyAEJQAgpJwA0CMQQdHRdSj7M+XTI+YB41j7Zxlaj925JOOYDHE4+zebWMqhZIncyEAcJZ1fyHkER6qam2lS7wdsWQ4zTSQ5PqSwMzSC5hq61bqta41moLPlpOXw8L09LOUjKCGit7vvleql1C3NEXOYpMCk0aGZBjFj3tq5bry1nCXRACoRgRkbwGFl4xInzQilCZkMOxjRLmhmydtrMdggwAuOuqArmEExAApyDSyAA5kDx3xUxT8vce79te91rbz2RBIziiYlJQJyjs7Zmdd2BMeVcShGRXDKnO/MtzXBA6Dltt1fzSGWSlFgoEiKIwQg7jpwSAIzVnBlTScfTCYBaXNLepeQUYAH3ox0Ao0ylnE+nJJzWdtv2621rqggYCH4vWr5t+cgICUGACMjijpKxCHVv3dbWr3t9XXEimKcE4bdqXy/99bre9l67d4Vu63Epf/50TBzoNZEiRQCxe5A7BLipR0acJY4FD0WWzEyE7j0swkaF8KZHhjs7/S4c8XC7mznZ54mOCy8TZ3lrNn5vxAACMFIaMlakRMSSkDEIardwCgUKGrgzx8HXhUAKxA7RIywwKHEpeS6pJGKKMO3uFm891xgnLQsfINR937fbNsq7vfW1KyLO7gYRhIB3BCHQ9xbrj6bquHdi4HfjpIi91nVbQdhcEUEIU5JDSee5fHr/8F///PO//Pnnf/nzH94/nIsIEqqTAzslhyBUAmZJy3w4H59E8r7Xl+frv/+3//bv//7ffvn1H6/rxcwl5cPh8eHh3ft3x+NyLCxZRIRTSjklFnQMcxteA2Ip08Ik4dB7A3fVxsRJOCdmhGGbDje+H9y+fy5vZ7xBxaeAEJEylePxmBisb562KfFUWKdECOfz4XSccx64THdtvd7aftNeDZDywTS/1AZgxyInIR6mZWIRPp+Pba99X1X7w3GKJCXTedPXPZ5Xfb5uezNENg9TDVMYamf3+8cAKMKIuK/79bJOSVrV76j+COtmzTUZoQszBJtH2/X55QYID5iIsjAxE44UVsTe+mXbXl9uLy+v61qrmqq7g3moR7jVrq232ptFLMfTuw8f5ml6PD9M8yLM5t6bbaV28/PrbTkeOKUgNIge3txq11R7vjf68C3lBuEe/opjCZZEIt8/mZTz+09/XP/pRb5+uW1Xxd0dxINMxU3c1QMJWZiSICeUIjxD2PpyebndWm/LRI9pmZY8CQpAVVKHsfZue1czosiJ00xMojndh6eMymjmWgNAZWoiUTI5aG8BPbRHdURB83CtZd+6VvdG1gBT3KNsg5AE73VM7/b89fbl84rbucQyTXORYNx6v93W6wZdWSSxZEbCqrX3aq3fdv371m7/4P/P/0K19r++7LLEv/5PQiUfXQjM1jUcMaF1MgXt3lpzm3NKcyEBiIBdgQBMVVt305iycGZOu9bVLtG3174tfWsNr9eXWjdrzb03boDa9rW3kcXz/cRDgIKSKCdKSMmRm9pr61dtmhAKuvXW99v15XZ9fXx6BHAkgCGlMgXXjHHIkoWJEd207l3deyfCKcsypSlLEgYiBBq6bySnu2NpvKno7kSEERgoLMyJhEbQaEhImTlNDrxtvdWX3k2SuPfjcT4/LCL89r6Adao3e32+lcP58OGIKb5e/vHSXi/9ZY3nTluirLH3SIahRM4MLCxIgRgUAIph0b117btvmwNFABKzlJLFccqCjxMzWMoylbQszAv4ad/Et97CjEkkHXg6YT4Yk0E3AefooeBGAJl4mmSrst26Nq2me/IZ5EcnLxCk2WTpWBDBeTJnu2777bWz5flYjjl9OB5/fjwugu12u2770OvU1kj4fDjmVDzoujaUW9qaBS9Z+rZt16v2B/QU4EhMLIyIiOa+11XVJE2IYiHdUC0cghJwiUjNRQPIDRWsxWZQg4wFU4IyQ5oiuEaE5C7ZfzdOOiaubhrR3bvfgxL1DdFwP1IHmNm+V2C8R0tPJZeS0hixG7GkabYA0N67EjEDCo12xIBoaAzYAiJFCOKUcywhlMyBrrcRyuQA3az3bto5MTPf2akliRQgvu1Na4O3OJUf2wMEkIgyAQOBk/sY3YADdPXWvXbbq962RhOXuQBL9f3W+v/2S73c6jwlDNj23pudZjzOnCSYGBEcSA3A0CFMHRwS+JJhmWgpVDJhDPe5u7uqh6PcB7QjIncYe4ebZni2HdFKwqXwMknJFPA7fc9YsoU5p5RTkgFsI3REdetqvbtpUBDdp1X+1nO694IMQAMMEYiB2O/fXARC2NiE6Y39E0OCNOaG+7ru6957H440GrRilnuD7H/QkYkRFQ7+O8klREQ3a9qTdiTA0dh1kXJ4d37648f3f/70088/fXz3+Hg6zELoAaSogT3ehEWIiBxO2qJu9eXl+e9//cdf//KX3377pemO6Oq9bn3b1QzO5wdOUqa5SGIASZxyyVkocURo7x5BLClPKWVCAnB3c9M768UszIbjFWPkjP3+lumtE0MEBASEMcyRYkKgwIw50zylsMKEy3GellmEBm8mwlybq0ZEAGmkm9ov124RH0JYYnGAEQBDNOV0XKbzMtUGmYII+LiUEnkLh+2yVncDAHMPt3ALQLv3C4MoRiVECPu2Xy/rktO+/ZBi7WBq2lSTMgoKmce2t8t1e365OoBj0oj725ckAsfA6Mvz5eX5eruutXULiMAACL834dRMTQFguW3btvdujDzlfJwnkWTuu3QLlzRm/nS3TDMHYlPdamduAMDCjG9PNBEOaNYY6wvmzCl/5/blnD/+4VOs23w6Pb98vW4vLXYIF+slfEI3DCZERA9R427UI5p5U62qvWtK5BjBAELjuAEM5IREAfgtEQjvJn3Q0akA5CRJiGQ0XgNRERBCXdW7VTcOEkaO6Kptr73Wut3CvkCulDJSAuQfQyDdY9vbtrbUQxDDGAxAOQCsuaNHBhcwD3ffd9MeqBI9Pu/6pao32qu+dD884vJIWUhLzOBeLXE6nVKvSXV3NW3q5iKpFKEw7ZYF0ihbGM2QgwSTUCJS87rXhqBGYcpdN/ca0TyaugMpcAc2cP/9EjFcyZkoBYkB1IirtpsrFk5LUg8g3/d1W6/ampuN0NbRgz8sk/lyTMAIbqTuzZqpg6lImkte5innLCyDkjvEh6NlB28Tx4DvowdEvAepMY33NyhIMklGSmqxXW+9V8mk4mGN6XdQNfdo3dZt39t+oCMKtNhv/XrtlwYbZ2CiCDXoSqQCllLkhC7oQeM1iRimFA9wcAe2gFHBApIQcGKWnBjKlFNiQgDsI8CLIySYoBQ6MM8gKQgigoSDyVsHc3SkN7q+u5u6OirFgKP/sLngkN5RSoD48ChtK+szYa/Z+N3x8NP5+PG0vFsmAbvcYuutbmvtHYimqZwfH5Z5Cccy7RaRmCx4zmK91m3z3sA1YuCVfOS5t1a3203VyoKmFBGIJCIZUs45JXGGwPAIhzBwIEg5LUtQcC7pdJrnQ0mJXS0lkvQ7g4JIOBAeS8YIiE6I2rXW1rX1Tqreu5p677bvexDxNCcAGtSuUhCiqZq7jEIYU4Rbd2BLEQTezUEVVMM9EIMQmBnoOE1Z0iZtrW0AS7pqbfW21a6dwIuUEWXDwiWXeU7A6et19W1vvYcaR8QP6gQiLCKZCYNGHabhYwuqGnv3btAU1qol8XxcqOBmuf+mf/lql9X+5Y+HU+HAChivt4qQzkfJOUfg+IvYwgLUwyAS4pxoKVQKpoTWA8AC3F3DIxDeOH0YSDH8wAE+UnQ93A3MBGNKdJhSyeIRfRxp3142YcoiU85Tvks0ullrfd3ry+tl3XdVQwBGRjAC4Pt/QYA4IhcizF3dm2pUVINBKSbke9xWjOH10PITIrpq3W5tr+DKRJkLpVTKIeeM1sA1hor3zZz0bZT0RoYYX/GbIPitjHmrahCC0LPQofC78+HPnz78+dPH948Pp3nKI86ciAKcB9Dfzc0hIsCbat1ur7rd9l9+/cff//Hvf//rXy3az3/8KDm/Xm+/fv767//+99u2Pb17fHp8TOVxKhOYEiGJSMqlFGQ0s3AgZpacchZiCO9v0q99wwgvEJIzjizQ+/f+vUX21oj5QWKKCAHae6/NeoOwnHk5ZGYgojxNnDIJkQgSD+rMSPEDpLXZ58v+l+dbuCPwknIcET163cAV3YUgC2n1ertKmabTg+QcFFXxZe3NoFY1s9EFU7Wu2rpGBHHkhDlnRmx7u11u1yTbbfuW6xnh1lT3rqIE6h1q78+Xy5fL5evLTSN29cu2Ho/zcVmOh2MEXK+3l9frl68v67qbgQWMNLYRTvb280EiIhZEMvVBaCSPRJiFumNTddfW6rbd9rp6OMsgLlFVv+2VmBBxRqYhlx5f+s3gd4+eJUn5O0k55fTTp49HjA8fH758/fLL51+fX7/c1tdubUENschBCK7cKu0bKvUXbbfdwnxMHcKhq1VV4UiIxh5AhJxAsiFqEnIRRORuse16q3vtWyk853OaJ85lZM66ujVrW9u3BtohOROVMVg28137Wtfnl6u/4HQ4Pb2flgfgA+EPQEW8V5gWvXnd9jCHrC3EC2Vi6sjuoFs387pHmGScventequ31jZUx2CWnX77B/SL/b/VEugk8O6U/vxp3lex/uqmpt3NiTilHL1LspRpmaaHw9z3todPzAk5UZoyWEDtN4/usAUyS+WkmBWsOVdKngpDgX6zII23jR8JiTNzRkoG1ME216v1ii5TXmAh5MyiVvd9bXVXVXcjolzK6XyS/pMvJVuN3ve9a3OwCFMMFc7LMh2WJadMzH4/d7x5u79rWOHuSxr955E3igP0hYPhCMQoWfKExPu+79vFoyvby+Xz0/tPbt/pww5uaAq9Wl377WaXS7/uvpFAxiwCiRIDA3hFbxI+J7cJtIF5RGAEGzqMJA3MTBaodbwihsiBRAiOTimV40yI22XX1pGqp56cMArAscCRQcbSw8yTpM4JvJt6OIezj/NJ1+F7opzx3kj9tlfylM9zeUeclpIflulh1iN9uX65SbOnkv/5w8PH47QwhsWUUi9533dkmJbj+fH84f27qUymISKttQRojiIE1rXtYRqm7oYUwOoWvfl6vV1fL9q7mgMmcC+JT4clQRzyUTgrRA938zCjoJLmhxMckjFKKfl4mmViIGjRlC3R93wuABB2RwBMTFEihHISFgx0CzMfQg1mJiRVU7SkfY6QlFhS773Xervc0OxUCgcBJkA3C28dthtqbXaPOw8PC0cAZyYkZDL1Wuu6bdfb7Xq73da6bvtt2800EdokOFJiEJm5lKkZ5GlCufW9qmoO+NHRAwAjBAYRHWOE2xEBAph5a7rXftubECXh5YAsKZdIuQx4eil5OYhwoClCEMJhycuczKL24BYaRtUQQAhKwsPMy8SJA9EibBxEA9zvTF2QkQbM42hAMfQHGBDmZu5G4EyQmJjR43fMG0QcrJGcU0rCIg5ea71u2+v1erlcaq12zyEIAhCABCAEKSAR3qkAbzLS1sADxCllZnyTvOF9ayYiERYRInK3VtdeG0YkTkkIORFlpERDZP7NTBHfV93vlQwA/KdODLw5eohxxMYmxsOUHw/z+4fzu/NpGQXMt7MuYETc20K9V91bt17dO4RK3fbnl68vzy+t1ukg798/TcuEgq+3i4d2reYKGJxYcgJDAkC+k3qFOWRYFpBYhNM44ZsTM6m5mao2sSTOxIwApncc+O/u58d57NuvYygnAIiplMyIhyMSEktKTImR82AnJJackpZSckoIaObdDAJgRKQzE+JwTvVmrdbe+l5bN0sWOB2QihBOOR3nqalD7OFGxhChFhFg5hYhEJGACQnBrLfWelfV378wQxZlrq1vWm/b9vn18ny7XrddI5rr1uq+7/tS694j4PX18npdr9e1qzEXoHtKwD3SHsdJ816y996v1+vry+vtet33/exHhHDvve/7vm7r5Xp9Wdebu46ahBHdQLvr77IdcTAuEREAmWkoIcwhJ/nmtCLmaZnLw6kkFAZ3C1Pd911XViPrDAYA7tg7rlvnwFvv3SBxPkwzIzFbU1+rCmMkArojqlDJwzwABrKXCYfyYlinRquScPTCrFvfe91avdX9tnOoOwjxJEE06hvdLxvm51ftctgPxwXheNey/fhIESKBg6q3boEKJJYZHw4zJG/gW8fbqnWzWgOCKZN32ba0bt6qEdE0TYh8vcj2GvXaM9rTmdHz45Lbjm4BI+8VUUREktKwkUMp6XwoWqcNbZnyLJKZAaFZR0cLbX2FYOSeZzswOSTOQcmJqHU3sbwgfo/nQmQBSU7Uw2+6X9q+Wm1gWDhjdgNyqlbXurZWtfdQl5ymaWZ4nBh8nWG7ttut20pW0Q2HZIIipSQpI6eAe8P/btJD9KFGDAcHe9vGRv956GXuj/+IWR1yp8TAaN613vjVjUy3W+z1u74PgTJQ9mB17Gqte1NUFydEppQSZxQ2dvAdWpWOk4ET6kSAYQqmED6OmUNmGgCIgQhuihRETEQoiIVoEjDsRrU7YnduFhxgCA4c0BzceUDHIJlzb6A9xmauARpwp+TlLDmJyI/x9QSS8KHARwLJMi3Tw6P41E636RX32xHh6VCOmROFA2lOfZoOpoF4eng8nk+H0yFJtu5udlomMuvNAjx67dvW901rMXcScxYwbq3v67rdVu09EFlyOAnHYS4J4ZBPkmaVpBRqPdxQ4kB2FPPuGJRFluNCibprxSp9PkyPTN9vR8iH+Ia4MItwyctymHLB0aAjIuackpojvZhp7V3d0zQh8+dff33+8uXyfEnEf3j/8VCmwIKMrrXv9do2JwAU5ixSIEBbczcADAj1WGv7cl3/9tuXXz9/+fr8/LrWrfaqBhDApDqevmG/BgQUkTLPaZrsOoi+3n7cYO4WimByJNBvHhp3DOi9XW/r1wzaEyCVUg8TiuDjMf3pab5O+HTM54NECTRijPNBzocyT6mqIXuAbc2GETwnPsx0XtJhYkL3EYTlNh7HiBEHCChj86aUEjFHIFIYKPjw/9ztR4FhMTCC37sXRDSVPE9FEhMjElm39ba9vLx8fX1Zt01NDaEPCEe4YBSEDCBIiSmLpMF4jOitVxxoXJEkTCxJ3sYoPqZU9wweQghrXbt2Js5ShC1A1EiNWDIhUdgoUjyA/C77fitkAO6plz+2YUZnWFJJYyyGBonpOOXzMh9KycIwHC7qZo7IDqE6Tur76+32fLm8vF4vzzdtnmkSIkQQ4uNhOT5MD+cDJQJUJD8/HEs5zIdJ0sC6B4swAhCOFjcSE9A3axUGDqC+MOdS2HhsXKOrMeLbW22Rvgdz3iOf8b5p091ZFgFDQJgcUoqCc9BMZVqIpJkPbY1MR8oz5VmmBRDn+XCYLoeynab0uGQkendazsd5mjIxR0Cr9XpZv359+fry+rpu6iEKlVeZsYNAwHEq4ZCAbkQbIraBCQ2vauGECIhEQHhvjxEzy7dicXT7WIgJoLX2/Pr6/Hp9vq1rbQ3dAWzX1nvd9tvr7WV6hYBtr62rB9JIEiZUDbzzgTFiNPg03Lv22/X266+/Hg7zTx8/PD09PD6eU+Jtvd4uL9fL19fXL68vX9brRbUjwLA6jdnoXen1hisf50d6Yw2kkohAFX/MtAIPUA1ViijCp2nepuXCExi1zfat91BKFIDqdttu1Pdqimk6L2ejfNur2t56vW1aciJBYhrxmB6+rbWq4SIpJWYgkSWIM6uVJJRlsh7rutet9d3r2rfb2vdmXZNEBCZKEwUSo5E2u71ejaiik4hEJEb7vWGcECURCzlaQAsEIAKG6ZB++pTL5Ptev37t9TdtX602D2SYwZ1aJ3dhsVLkdJ5S4ta9r219sUXiPBXXqVZqdfTEx5SYhYVH2gMEUWSh45LBpppiyvkwSR5kByfq7N72aMAQDPOZjzxJPqQJgq1r22rFxY/vhOUH2TtLMBvCru15vb5sr6vthgYEJMyZQ2G3emtr7aOICc68LEefsi+zrqf68tXwmY3IkWMTD3XDe9AJWzAGe3iYghsgOBKCDCfUMM253znUjAllUAMwPADdbAS4egiADBypoyubJXWx76JLosgL5ENIViJldCHMRfKULIIQc04pEjt3sBVvO+88aUIQWEJK7BVqDWwBHsbd3FsDZEopI7mPtjpSQpwJJzKKcAwU79JadGvWPaIiqms6fXjEboIIkdDEd+qba1MUCnIFcE5pIfYoxDlLyTnz96IfQtiekn5i55nnMz7A5PJAG5BeIPWaQMFDJIeklGKekXPmnI/nh2makGlo0TnxPCVvHNp6V/Xe1tt+ve6zAALnTMgBXPfWtq3Xqr2TIEsPFII2Z868HNNDzk80nSOJRwtQGsAVGzERykjTfCDhpq2mWmV7PP1RpHwvYoY3eNjSHJlFSpKUhDkhy0irIwDi6gG1d123aa8AFAHX1+vLl+f9ts3TPOSqTtE0Xi63vd2UzCE8MKWyzCdC2vfaeu9mXbWZXbb9y+vt16/PXz5/vV5utZuqjTbuMNgPQC8R8cjhTDItS56mQGpqptbMvpl6PKCbm0HGEEYHJEc1AIhhNtn2+vUCt126QZKET3Repo9Py//058frtj8+5GXiTMIQBD4lWkpiJoewANMYmZFZYJrlfEjHWUrCcFW9s/npbb7i7ji0HQMdmYU5eSBQkBP0cOjqYB7dfO/WuoPHj4excdCLsK699haIvbdt29fbut1ue20AYGHqaq4UzggTw0ychUSIE2chIRpTo7dzCDEzy0BY0x1QFyFCIjK2BHfrAxbRe2tWd9OAgBqOcspJhELBdZiK30bNP+I5IeC/04kZ9eSdKIKQhJdpOpQpMWOgO6h6bYpILGHue23rtl3X7evLy99++e3Xz19fPr+CwePh4Xw6HI5LSWyeSkk5MSZIAufjlP/p58Ph4dOnD4fjDOBmmnJmohG3rGZERoT3HFFzAkQg5CCmPGUzGd31YcSJADNrtUZO/xFC/J+uGNaDALNAoGlZpjwdjg9Isu5NTQl8Wpa8HKUslApZl5Tmqbx7ODQLKByIHx5Op8OU7ywcNLPb7Xa5Xm+1XXddW4/dpk556anMQIkjZiaaS2ackuy1rXtzj9vWLFwG4ZMgCU5TmqdSShIR+N6FvY/G3F27btu27VvXHuBEBAge3tVCTWvf9woBXS0AWJKwjJeUeXRqCQEi7nLKkX5QW319ff3111///d//fZ7LPOXzw2mvt9vtst5e9/V1vb7u263X6l0NnYjHRJre/CX05mUnAEISumPZEcPH4fs/XYSYOS3TfJwPh+n4mq6E14h9EJvHBz6+LkFAOI/GvoiF1LqFx1wMEYk9INylVW9Va2sMjTHl/y9f//YkSZKdd4Lnpqpm5peIyEtVdTWABsghMSRnZbED7ojM//+8LyuzOyvkEksSje6uqsyMCHe3i6qeyz6YR1Y2BjIu8ZYhkmFuZqpHz/m+3yeDSMpMhGIsELYtrW3b6+VW1xZK1l1b13YHLjMGgxZASiUjW+C2bi68oUkpqJ0jiODbaIuIPUeZmNMg02k6HQ+lHPjhCd+/p6l070vx+fWn22tvsJoFd0AHNnUkHzKNh3Q4Tkg03ypynB9P74/5t785ffd4Kjm3pUbcRSe7ReNbdJAwHUbhKD1BFhkLCUMEF0oZc/XuXZEiD/uyxpwlDQTsGpQU8yEe3g3yDRgamR2xWr/W9WW+vC63zZuSBwEyIpOp3bb55Xa53C7LsjwcO05TkoScoxSVHA7moEFOggsBua81EC1ADbohaIR6qLl1wL3Tv9ce/pYmto8gmfbyGHbFp7lbb71tW6+rayOwJMiJC1OYK1COX78aFj6cpuM2EAS6JZSBhpzHVAZ0JIgiSZwiQK33WButmSOlIU/ntLLdVr/NfZ0hkIIozPtdaoUIwruPSaQIHaUchnF4iI46z1p7W4tvzWvX1nt7jZD64/vYmgABMYSoYdPu1nNiIRSWJMXdyJzACEEY5c86MZxgKn5CB9rIvYM12dpg6gSSMFEwIQkDkAQOkotQzmWcJiKuu48wwt3felsOYRju2uq6bEsiJnELxwButfdaXc1Ve63uhpwALDNFkkJloCnnR8pDgJo36z3AkdDI2RsTH4YzMW/Y2DbKQ0lH+qbdJ1nSvp75TsXAGGNEJilZyrArfN1NI9R8XVutncq4NR1Z6rJZa2NOj6fD+eFU0nBt/brMf/jTH+f1Vo6DQ6zzwpQeH95zSsvW5m27rutSe+06r9vL6+1ym9dl6+oknCVhBDONeRhS5giKKEXykCExIg7jVIYRkNVC3es3fWePqFVrjcOIAxMzNfMN3CkSkTDWrs9XV4vb2plpyPnD0/HHQ8lCt3kxr0XgfBiGJADgprXWbgoRgpCESuIhWRJ6eBjeP5SpEKNbtx0CQ4F7/znemA/7xFISp5yYkwcFBSkgWRAr4KYxV3udda3KAPlOxYP9JWt1W+ZbRGjXYeyq2rZNe9+nhg6gujcvFCOy0CR8GtJhKMxkEEwojEzIQiKScyo5p5Tuc5093g9x973fF7IINdtan5ft9bJ+eWlfLtqd8tKe9DQeypAKRkdj2G2n3yrm4i0+MN7ka297zL6ftboBAw5ZmFJKUxlLKnB3/2LVwLVtzRBDTetW121d1vXL5y+///0ffvr503yZh5QPOSeepkJIVJUg3KynRNMg6Yd35/PH8/nDw/k9Qbpdl9q2acjMqKrNDYncnSjtjR8EcNPILJgoUZKyJzPtbt1A3rPQtq1GSX82TnpTYX01WuOu9FTfWm/bmhmO5/P5eJ6mM6BgambKGKfD4XB+KOMUe2IjxDDkHz6+O54e3rXazQakgVFS7OnoLaLWda2bATXHL3Nb+kKv6zgdnh6fSh7Cg5COKR2HoyMutb1cbuF2nRcAGkoaS04CQ5HHh9Pj43mcRvlGR/L21nhX7aoRkFgOE+UARegR5urmuyawN90vG4kiMBze9gb86q//BjeNGODutdbnL8//8A//oNrc+sfvPuSBVHvbll43rVvf1rYu29II0JghlRCmHRy/D6jC0ffQ6r1vRAiw28xMvyHDEgIJciJSkTQO4+lwfjjV29xe53rTaH0GDkSSJMfTISWhdVsU6rp06B2pq9bq2v2aTDWQOgS5x7qFdbem17ZYRcHjMA4YhV0iYNvq6/X1crleLrMrDHkqpZQ8SbK6Labrba7Q+4ApYZFBgLmbx9ZWq7kMtq2knQrANwN+96ib98ZHOT+dfvjN9795evc4nfI49iKXMd2mtAwoX35ZLy+6zV07e5Ugh6g5w/k8TKdhGItFtLadT+V3P77/6x8ffvf9YSK+fW5t2QgDkYZhnMYp5US7eNkhHAhhyMSRQiARDwLEgJQU8dasQ3RCTnE8DiS41dZrMyfJmEcex+GB5f2HQ8pvGwwiEQXEvK0v8/XLfLnUWyMz2qGcgBCqulw2Uv7l6een89PTw+PRTgDEyMEEOdLUI4BSkXHka4kLNe+Oog6tO1d3MFANMzTFnermti9DuxsxpfRGnYPYmf57oG7d6rZt87xdXnS+obaBGYdhSsmVvbd8h6UDACRJ7x7e67b16r76iBPnMedjSjO4MMCQMnZf+9xbc+sInRjzmM+nD8Wm/mWun1/9FzKjRMrsKt671tYQYxqnVDJmylMZz4fD6fHh9L23YP0TmId16GK0Wb8un5/dffv80X5cBRAlOUrH6FgRN6E0ipzToImwN/Xqrob4FZfydluwsBSi6E2X9dNy0/Wm20zRxhSpcE45lcKpBJKgMKKUIpwI0dS1WdNuplq31pu5ehhSZBIh0LrVRSSLu7ZmgOIOYXZH8fQe4ZwRw3l3kbmL+QDEmDy4adTbVmvdRSiqlstwPDCjoCsoeAfXP5OPixAHgEWo+VZXDFWdAgyIMCUwCnewDiyckrr/8jwrpN/9xfX98cAs0zAVpmkYknCA997WbZ2XRdXOw4GYdFNttt5mR7pu7XXZPt9ut601822rt8vctoYBiSTl4oSbKjGVlLMkBiBwYUpFsAxmMCqWYQKkbm6q1XYxyv7yQ+2+NegdSsbMQEwRoBGJCAmq6tphqbb1GEs+TMOHd8f35/HxcTxMVCsmhIfToeQUEbXW3asqAPuT3IwPGxrgacqHQYQw3LupI4j8+qzfqxC407nofiKWHSSNZEAEQOqwVruu/TK3dVMGyPJr1e/hvbfWNpHELMSoXXvrrvsZBiwi3MzN3QWhMJ7G9HQcH44HIpxrdTehXUBKOUspOZcscjc67ctLxB6iwfczCoC5t6bLWl+v1y/P2+dLb0Z52Bzg9DBJ4oKRAIEkwMwUPBCC9gPunRP/L7iTtHffTWeJgWmPkmYWU69NpXhU29oSbqpdtWmrtdW6bV++PH/+/Pz8ctHainApeDzmp8fJwefKAK69lnF4/+5cxunDh98eDu+Ex2Vpt+tNVc06YfRezb1rF85CGQHdnAC8JHdJ4IlyypmQkRwAkRgAVb21tm5blPx/2onZ2xkMSAFoHigyjIfD8TSUQyA5srsnxmkcduZE632XsSPEmLnkfOBDN/faQJV3TCwEhLuruzuwAt9avCwdUIfmAHIcNROWnIuwJEDmROI9bYtMmSioJBoyTwM/nA7ffXz38PBYSsnlz4i9Owpq15yNw0AshtEBmnk17ap7i3hXegEEEu0wvL1R/3XU4+BfqxgiAqA9FE5Vb9fb3ngN18vl+w8fHyVJnZe+bb1WbS16B9c9mToR3KOc9uLoPgUE2j1hgOHhZq5dW1f9JmwMAIRBhEzEchlwmvx0bKdzO8zrteu6g1G6tabqwYARpL3f1usWiGkwRAzCoLahVWu6mllENsW6ufUWUDtBqyKMDBhubtHWdb7eltvSW2dKOadcCgGDI4l4JAwDZkyF8kB5cOK5NXZrruNW27q0dSYaKTNLvtfEiASJcRCZmA+EB4ApovTe+9ZpiMepvH/Cv/qrtWvXfoPXux9dRksFxuJFIgkUkVzG9++Gf/tv3v/1jw8/nFKs2q8bhEY4UZrG6XA4lJwTh9wZ1MjsOYugY0JGzglRAJkzQE65hAtTnvDpPCHps7XalIzFaSAaBywTvTtTehsnBYC5b61dltvLfLluy2JVM/lX3yExAHTt1/n2p19+Ok7nx4fH6XCQnIWFSIIzp1HGAEmQkws7YXfbJSrm2Jpa9DBFN4KQ/TwZdz+smd29SntiKO0sCzfTti3r9brdbtvt1pYZt7VEpJwpuLA4YmdJ38CIEEm4AKStzrJ1N0xYDvn0MDb1TgEHSl2XS7/Utrl35kgpjcfp/N3TiA86zUvK1iOCom/RlSVI+g4PJeIgVHIkOMgwjOfzwzuwsLa4LtErGDpmby54iaZ6W/Q2e+ucC6ZEJVMRRE4pMciYo6Bea7etMnZHNOvu9tWggBASXWzRbdXbZXn5ousNXAdBkVwkpSycEjADMoMgcx4GQtTWe++t9drbTryqtXU1ZEpEQgkBlnVFjjxkSQmpIyViDjdCIL5btPYol93hKYEJsDAmYQ9GtSUw3gJgCSmxpJSyJFNzSSpd+M94V8JIGt7M1q3N84JdDm3q2jWckSMNGEHWUtkOxyN+efn5l9fXq/7FD7+k3/L5/PQ4Td42Eem9mtoyX3qrOaWH0+O/+ut/Owzl8/lPL5+/XK+3+bbcan9Ztufbdm1qgNqhGwuXUVLJOQ9ZwXFbHSDzHeJLEXuDMB8PDKlFGsZXQOpmrff6jVIxIqrC0uBWnVMcMmSGYFREQHCMatE0turm/U9f5pRlKHmtp/OUx0SHaUyEIjkA1HzPUdob+xGYnYJ0000tchZE9PDeA70LASEw7w2YnTpHb/Uu7npAEokgNMA76wL3htDrtV5u27o1phhkP9e+bfxqZoaMzBjuOyYf1AQgITk47DQTCCEaszwdx49Pp3dP5wjgl9etbnunOmcehjyMQxnSvk59fSd3hBkTMTMSAqK5N7V5q8+vt8/P1y+vrRnncQiC6ZjC25RkyjLmhAjq6uo7zQhBaG/Q34E3/is8JuKenmoe9lVHxxBYq61bp6LVoW3rtq3bOmvbwNVNu7bXy7wsmzsOw3A+H949HT98OH//w1NXfbm+VNvatp0fhx9+8/3D49N0eIc09QZ1a4Rh3lvdtONal9Z6WBBKSQNjQgQmMpWuKbkWiCCRxAC8377d6r/Vtiwr/AtFzJsxCXa8IJPsmQLZckklDeM0DFNKOQJSCgwvWUpOobpp622tt2tdV91WVJBczscHQGqMvYLbfmPNw4RZJIGSgmwhqwtAWLV0uUXv51IGDBaUbu4gFgPZIeOpiEAIxSD0cD5++Pj+tz/+5nR+UPehDF9f/kDcgfCccyLOw6Dm6lHNam9ra2ttNdqmoaZ70y787gFxj3sj5G3bJaC7BfqOz4FddLW12vrWe+11u11fe/ur8/nUu7a1tdq8KyNk4cxpyvkwDocx33XesacfBMGdtLTnrpGGauutfUsgBURgxiToQhGZeBhhOvbTVh+W9drqpc3rtl7niqhjSuNYVtXLWj9fls0gj4dhmKZxYE6ueN3Wl5frWlfcNeEkIlgKJiFX16qI1Qxb7X2rGD4M+XAYh2E8n54i+PJ6q605UR6GQaaHqZwez9M4CklHXOYVCXJmtb4u8/X1xVdIBzs/PEnKAEAk03ScpiN7Xlf9wx+/fPoypyGzgET/+I6mcn56zH/77+HwkLT/hLHWLiAwPCTK2tvqHbAMx0N5/HD8zY/jX//V9OEUostat7rMda2qxmmcDtPxuIuwrJRcmiqaA/RQh2DhPa8QGAODDYkwpyzjcDinD09DwNbrlR2TcBIcoB/Jz6M/DTXR14Olr7VelvnL9fV5ua7WOrgB+A7oDBSRnHJKqdX6T3/8AyGdz+dhmnIZU04UFEGAAqkgETMNnIATILt5Gk4B0tW6V1dniJJ2fzHvPZj9tf1Ki9lNm4Hhob2v63y9fPllfXnpyxK9Z4yB2YeBIiigeyQW+QYOaRbz0l9v6+u6aCq32o5DfsznQiki0IEdn7dPtf9+aUugisg4HI/Hx/P5fBgf4zAMSbwFBNbrs+GSMJLlYRTV6GqLbqtrJijdzoBUeEgIPqCPug3hAdPATNaqs2PXdpu3dRnGksY8nk+uZ2wyyEEJJGtE367XNi8yMjD11nr/Bq8QhvoK2ye93fR2i+3G1hPLVGQccik5pUwiTgRALHc9h7u1Vre6tt5a663btvZlbuAxliETMSAEXK+3rW9lKCXnxCXlkoeyRyAxciAG7BO5CEcCFqLMVARTRiBGzFst7tpNATHlfJgOh2lMLIQhGEw+jZm+kV/IXci4m0f3fBAACL9LuomBAEBZpJQBkV8vS/08/3/+0//Pu/748elYxFHcI5atb+12netayWmQchqOp+MxdU8GuraLzW2t27KtS20WmDJySgUyxJTKkFIq0rSvuHWzHdcBsesycc9iSjw0x+l4yEMmJo+wnc/y9jGHZrB1H7ofEjGgYDiA7mnDFq1H1VDXz5cNCSFoqe2vf3j67mE8DqUw7hIBj9jzivcVOgLFSUEPFVr3dJ+9gJpbU6YgQsDwXUr2poyJu9h1P2RSBAHdRy4e0dVuS7vctnntve+qrn9mUdj1Jd56r9udpAtumQiYu/rbTgSJaSr56TR99+784eM7gGCK28yhKszjWIYxlyGntLNe7izXby3CsE/i77UHMsk4DqeTqcvS3NHrtnz58tnbOuV8nIbzcUpErTU3xYgk5BkSExJE4J/rev/5zo+IsItOANyhq2+1e+3z9fV2vS63i2lNvKOHXVUJeRiG4zQ8vX96fHd6fDq+e//YVZ9eXy/zxcyt65DzYRpFWNVabb2uBI7h6zqb6W2+bbWaOkMay5RTEZHE1IWlSepDjwiWArQrScMtwmvt87K+vl5M2PSfQ3u/vaxd+ZRzkZR8htZ1a612ZXZmzkIIKEwQvq2La+997euq/T6pLIJTZmReOqNyD7cgtw4ASSTnTA2CODg5G4A7QO+9EzhCCARFQA0CIjpk0SkvhyIEBpEYDlN5OB0fH8+H43HZ2o6e+vqnO0EwckpZEuJ+OyJpF2IItO6KxsCOABTm+xDsLtdy9zfyxT5WIr6rYb+ykfYqydxtXddPnz4RwelwCNutOlBSOR1PQx4hIIsUkZJ4TDmnfYHa9XC7cGJ/we4qfTd383+O6yYCEfBMSJgiOeRah206nE/HdR6Waa7Ltq1umugyHUYXXpstW6vqFoiBmRmFunLbYttgqw7gIlYKJBbJLFmQhIKJMMLdGrgmIcmccxnKkIuYBidiZXBkSWUsacgGvKo37LtGjREloNbty+dfqjqW2/Fdnw7HtyKGhqHklPqs23YN3YAklUzMHDjfJFH85V/ix988fP89/u53UGTdapYij98zpbjdNGAYjk/n98cPP44fPvLHRxii2dq3W99uva0WsQfXn07n0zCMCZqkJMJswMzGjOGMKIy5yF40sBtSMNAwlMOUpyFB9PNABXhISZInqafUPhz1/aEm9rcF2edtfV1ur+t8q1slM7qvjXdWKaIkGceyebuu158+/fT7P/3j8Xg4Hs7DMAiLA+yyj4hw8khAwyEfLVRBigdqawHqFkJIKMRoqg7WWnWPr4qfXycpAOGurW3L7fL8vD0/o7aMmEvOIoG0L7vonkXkGwuMuy/L+nqbL3WLYXvZ5nQYD/mQSeZ1RxWjNt20VavAxpRSHkuZcillSIBgWx/Op21eml2CAxk4RFy0++ty662v0Cykea9WX/uzMadRpxPnSWih2k0FeCzMAcIG3q2l0Jx4nAbfDsFAwdHVoJtV3zaoXSZhph0B8uvHDdri26stN99Wis4MJXPJqeS0O1GIyO7qeiJEN+va2+5yNFO11q310GBhSrkkAu+9aatNYwtmLimPeZimw8GOechIyJwAwIEcmJELScol58KIpjUqBFJr3WwBqBAKuzTPWNscJNqr2waxIXT8Zo8RQEAiYRiHEVkwy1hGRgYPNwMyQNyR+ESCQFvtf/rlxfx/+/Tl87//t//6h49PmTAxpdV02a6Xbbs2W82kbZ9fR8cDJZ9Oy3S7XmfSm68bbi0hl1E45SjGABkpEQqhmqNptF49KrqOjIhMkiRlEUj5eITTw/F4PgxjqXXZgSj7Z7eLBkB31wgDZEBzNPcO0TzUvGuoo3lclt7VL7d6W2uRfCz5WDIR9t4ijJCS8J3Mh+hOYJhdS3FElbvcGdRi3TqSM1NgBFAE7Es7GtgdnXrPPbgb+/aGuFlt/TrX69K2r6z1P9sYkVNC5tr7Vtu2rFYbmgtgFiGkRTvc+3FUhI9DeTqfvvvw+MP375EwJ7lcrm3bAGIcy5BzzrJ7SdxtV8PsoAR3QPJ9V3AzCBDhh4dTmc7f/6Av1/rlsnx5uazbdv3yeXl5zqkcp+nd03nIOdwgDMIS8zQM01iGkvCtKP7mkIx7eupu5iQWQPQAD0RmAKxbra29vrzeri/L7Qqh01iGIpyklHI64QHp8eH48bvHh6eH4+Px9HBywN9Uy18+3+bn+VYvrzNTIdHe4Hpdt9UokBCW2+V6uz6/vm7bFkHC5TAexmHaMzAJg4WlDqMbcDIHRiYiAjTzVtvtev306VMn6Kpfb819wEFvVnVEYJQkZRj4Juu6zb0yoPf4+IEO45h4t4z32nqr1bruJFcAzCWfZOdfkwUwBhEQc+zyYMRSSslO3JFIUsrFESMDCDlFgKlv0aODQgjwMJwOo6TUzZhwrjWhD4nHLDkJM31NxHi7kDAA53u2DQGCA2HAXcbU0SPM97g8gghAMw3YRc8REbDb1AICnZExAN/Yi/ArWEwA8v48bFt9eXkVTuM4Cad3Tx+Ox0dEZsJEBG5WN3DdoXQESAB781CIAWE/R+KdJ09veqT7fxaAQIIZMLGHAxhszKMMx+GwHk/L+bau823e1s30cqh6eDgZMJAAaG+2+Gq9SyrEUwQM45FT8ujEIYW5CBfhklMackqFSKltbZOEY8nALCwQfru+RFDJReS4bBshkpRqsH65YvTEcZjK+6eHccgEvt6uf6jz8OmX6fQOw/y738J4AABCTEmQYl5el1ewnpCGPAzEEhqvr/HpF/vDT9P/9e8+vn/6+Dd/M/3Fj+qRpsPp448fS5mWBQyyjId84HLsIjPos768tldfvrT5ZasrEvI45ofz+Xw+l1HQYLflvAGd9vZakHAeMqdMwLPXPaJCcpEE4Y2gngfALIchCWt4P47rj+f+8bR9U8TYZZufl9u1bZurEcT+hMUdKo13weyRaDO7zNv8j3/47+Mwvn/6eDwcShl2ZgYAqXnt/sYZKUBiwe5qbzD9QAJwDaUOFlprRYRhGEVkf0IAAOOekN5bX+fl+vrSbteDkAx5SJJF7kcwD3dPKQv/qiEz03m+XW/XORT69mm9lnb4eDiQ9n/68vPr7bWAXOql6mroBA4ElDJKdnfrLbQHOCfmSUDDi5sAgyQdsBrSAtXBHVIPbrf+fPnychz5x5JRFCVatJ9vL8ttJmvDMNBpxOPoDAGWOCBxlEG9eddNl6XdWluTReY0HYZyHJkLkPz6voR7q74uti1glnMSliIp58ySkO8sbLpL4cNdW7feeut9360ssGs4UBmmzFAyg/fa67KtNWy7R3nQeTo+PTxq0AlpGIsIRwTC3pRJKd2FDoF4u17aRZv3btpa0x0H6rAELLe8zichVlPttfctD9n81zVZiJkDEnGQIDMIUeBbvqIGdTDUVmut21Zr7dr1Ni+tt9Y7Mj1fLuNQhpQKMVbtt9nn1deKBr/88SerdRpK7XWn8d+jQvbhnBuAI9+zqR0CkBkxE/kbccJ37/E+PwsUxmHI0zRMh3EYy3z5NSr6289edXYHMOgK3aEDqIdamLoHhkNYWLd1bSXJ8/fr/O7kZ/qqa2FC2fOIiQJIDQ2QGXdNQ9qVTkS9623t5goeQxdiRqQ9WtEtujlJSsWmrz08eOMuufVuW+1b7XvkYjPo9m34K7EIENXWWm3L9QZdpySc8iCCZhj7LAlEeOe6vns8fXz/+PHDY0oyjsP1elpuc9f2Bt8GVzPbVeVwTyUD2r/aHeHGhEgkRA+H4bvDyVHmrT1f5j/9/OX5+eX19TbP23Kbt2Xbai05M+6idC/Cx2lqfTxOg1BY169EtbeLYaRdMpA5MRBphEYg89091Gtb57rOrlUEc5ZhmnLJ2SBEkeTx8fjweJJSHLG5s6TD6azmFh3R6uo3XoldFba5RtBQJoR4fd5eX15++fTzsmwsueSpq3UzC83C4IZEUrZqDiStq7AkliRiFtuyXi6Xz58+RRb9poj5Fz4RjDiUPJRCJEu9/vzpS28GgP54Po6FCLT1WrdlWcw8JRamVErhYcwlJwkiUwMkFgEBCQYF8Mi15axJdEh4GsXRA4LNkwVFgLsbmO5ZYl6SlJKKFCfOJb9criySMCgcPcJCu/Vuf9YluxuBdnozAu6BqcwkTAx3WDERYUAEqgV+hWs4IPq967bDAoiDkXOizEkIk1AuOeckkpDI3IehjOORJUvKgHwAHjx4t7KYaduaNnPEPTAP7/JeviNoAIB2K5wDEzDCn6nQYM86338zFARRgBPlIY1jGYehpOyGtRknZvW846g5MaP5brxyCSw5o0gZcg52UGTnhJKYEwOIaRgGFUzM+9aNxEHyZvdDIsw5GaDuSxqlrv02b67rkILIIw4Y3GsDN2Mks5EZ2wxvEV3utm3bvMy3pa4rWs+I1s2YBQPAuabkfirlLz9+9zT9DrIEII3T8f3773M5tiYOiXIGMaBlWT49/4TP13b5ZXv5OW6vZh2mYXg8Hx/Ox8NhkoLejFJmSYTOCDufM3HkzDkL5xzAWV0yiodkYAkIY/RjiUx2nnribtanqR1LK9wQ76+/ul+25WWdV+sd92kkIrwBGXb3GRMPEgC9VW3++fXzH3/+4+fnXx4fHhAjl2HfeU2tbXVZl95qaA9X0x23SG53wijzLhUNtb5tmzCfHx4AYBiGBPfzJAEjcBj0quu81mXJQ4kszJiS3F0JAB5RhiF9oyGLCOvdeydG0/7l8pxLHhNr254vn19eP01cWlQiT4ks3MAN0BxMrUPVddvWqmZOgNkpBQkLpNQKomagJJicJBGW6LDdlsVDvkvvEiEybtb++Pz5erk+sHAqcMx0SMHu0QVQELOkkNR71WhAKhJTEWIqJaVSchpyKfj1Wtx123TdtLUIlF3OISJZkOmNlOG0Z0CEmXqrrbbemnZ1d/TgCEfEJCQU5qq9ra021xCJgC2arXWttjZrht39Ec/jUACCmCSJlCEPA0tGJHWrva5tXfrSrfnuZA3Yx9YVRbeFiNzdrKnWw+ns3xYxktKum4tu3Uy79a21rQ5dXc2wmXlfrrfL68vzy/V6c3ciVLUvL6//+3/6h3/8p5+msUzDcBiHkVJx59p9Xm7Xa6vt8+dP02lyiusy31p1QcoSFE27rjNo3XsCyWEU4XFgobFkYXLikhMAaPfW7Z5sVFyIcpZhyGVId7nKn4lI99WMAmhVaB5dwwJ8/0WH2LMJI3bEBwaYxrq0Ze3qAbsEIUhkL2IIiSHIgUAdEIURQUra8SPRrV6Wvm3rttZxTOOQhcUB1ax2S5K6A0s6n7wA+l6/3AMXXfcfDzPrZkvzVX+1iyMiSXLAddu2dWnbmiI4cWEuzO7uaqYKCCI8DOV8nJ6ezu/fPTw9nsZpfHx8XNftersuy7Jttdba6tas17ppV0ASFkBwCg8nxwAPdIBIIox8mMqHj4/j4agAS23PL99/+XL55acvv//DL//wjz89v86XeSHExLiz1MYiy3Fdt2ldhyFxwujf6BWISDhJyrmUVLIwA1Fz7+5EmPJOpzHyTaDlgcfD+PT+3fF04lzMYVgbIB2P4zDmbvZyXYKeh2FElPF4ekdg3t1wmY2pAZKQpJSPx8NChOHLcvv86fM8r8N0nA6OzCiIDE3JewcA2XLtZg7jYS0pj2WcpgksluV2vbx8+fKJD4Nq/2avfGPE3DtMAdYJYExynKbz6bTO8+fPL68v17Zu/fv3f/mbD0Mpra7zst5uCxAe03HIw5jHQ+LCQhG1d3MDEWbJIkxAPqWUt22TtI4ZziM255Kkq3nzWJ3e5CgGqBFNAw0opcP5PDycH97V4/OXXpuE9nXptQHmdd7qVv/sGUNggt0ebeY7A8gDABlRAimAdu8RSbgzhMbOlY+AAALeM67dItARLEuayvgwDcexHKbhcJimw3E8nKQMQULMOckuH22qFVBbU3Btvd6Wvi5aNwGYhiFnTizCe2TO3U29DyLfgv0EMX3biUFiYAKIMARzMEM3xshCJUkRzkTojojDNA3TAYkDMUn2kND7GJWEKYVIJETEBJxQgvb0lwBrOtc1koyPhySUk+yJUdbdEZjz6TgSiZqHecklAgC5W9+auVliDNe+bZv2dluS0Pn9w9NxfDzkU5GvGQqt9Z9//vzTz7+0RcwLRgKw3iuCDZk/vDv923/91//h3/+b//Af/t3v/ub7x6cyFIJQIspSkDNCAUrIHGHali82XdZ5+eX1l9//8ssfl9vrhgHvHk7ff3h8OI1DyUDhJDRM0ppUk04shCApcymSk6BIUC7q04HBNA3BEgAoCDnbIa3ncRmyArY0WESvXb8+Y+Z23dZLXStHEAbtVGO4e/x2Z1sgAeYi56fzOm/zdf388vnnzz8/PT7mIikL7XTD3uuyXJ6fb/N1q5tqdwdEEBTzaLWrvnVkMLS3eV5yyT/85gePmKZDKeWexAXMkAgSBO32wwVhLGnPR737LZmAyECHw/Q19R0BGWlgyUnM4MvnT8012EC31/l5W145DSBwHBhSft2W5rp1rU17a9hjeX1dn7dt9t4aoWdGSSlHLpaZcSzcEzsSpYGHnQztbCHAO/5kae0ff/7p5fXyF++fRjn5RHhAYA1vHhQeSILEGg5s45TiPA4PZBWYRYSn4zgdB3qzvod5W7e2rL3rTkzIOWdhFgIAt51euuNg3Hx3Rre1amt7jKYEEjEgmhC617neap033TClw+PDSIK3+fJ6ef78+nLbbpvOW+vu59NRGMcRyuGQh8I5BaCZqam7h5u22nWNr63efXAM6H2jOwS1a9u2p9tXkjIACIsgAASSRri7qqJr7Vo71WoRqrat6zzfXl9f12XJzA+HiZiDcFu3basinHOexvGYy1mkOOC2dUJi2rzJNnf0W93m1tduG0AjaGDRm5sCIQaQhSZhgiIiSUTEiRIzALSut3k93JbxtEkpKMwYpaRhB3b+S9oLALCATX1H1gYg7kc4JwzYdYJp1+wRhMblsn5+vn18mnKmwiR8l7wSMRLv2DNEIAQhYoSSmAh3XF1tdr21ZfaUZRwLC5tF7bY0TZKuqwWld4/nYXS1MFN3M7PetXXtal1NzZt7Nah/Fp60M/Oc3NlC3AUwESXhJFLvKWvBAFl4GvLxOD08nB4eTsfjOB0mIulND4fh9XJ5fbmYad2id62ta1ck8cAgMoy9KSdKusNfcgwJk9BpyueHkbN0j/ePh9fH4/vTNA5ZI1i+vN7m2lrrBu4eUSt1022r11sak4xZfphX+6YZQ8IiSVLaFcSB6IgoPB3Hh/OBwTLV7ZaTDCmlw/Hw8O5hOp4pFzPIY4vAYUws5GDL1hwuZWtDGYkYSRjQ3a0DCAqzpDSUknNutQGEqq7rer3duoMBgiQUFhFz6rW6GXPburZu47pM49jHg2kHj8vr6+Xysm6LavuXNT6/3itHxMR8GKcP7z602m+3bVtuP//yScjfPx6Zeau11k21SxJhkiQo6Ig9AtS2rauqE1KSkoswgmITQSIRPkylQzSEoFjXqB0UMAAMqAMFoANUC3ZsDgfmwzDkoRD5fL3VeWu1bsuqznWt/Rs4JAIIc2LeFSjue4gWAt4FUvsPANwF4IgCZG5mERDud5YZAALem4wRLIyHsXx4Oj+ej8fjcZgOeTxSHoITkogwEHhErKtfb+u21Nbruq6Xq9XGEVPOUxmZ9mTi+9jz62RyR8a8ZWz8uR8QA8LB1Hv31mKr0TroTlGKJFgyH6aSgh4ezuUwdLe+92UD99LN0YEtg+11KvHerAQkDN/DMruvazIOSyRZEBiJYk9VQ3AwCt0NHGb7wHQnkTgRYkYMd2xbY8LoypQyUEYShK9dL3g7+aqqO7vHPswOjLHk3/7mw9/+m7/++7/7u7/9d3/71//qr777zfvTueSMED3cwByAkDIgA6Jt2/W6rZ/7p/9+/dN/ffnlD5fL8+oW4zg+fHj4/uP7wzgwYyAEsZShjFNbm/fqjhAohPtXAMAInFI6nXIOzyMmMQJzrxZVeVNvwDpNCBzLCtcbfN1fzH3uddaqzHuOHn5bv+xkLUIE3JPyzPxyuc3b7fn18+vl+f27JwADQAAPM611vd1enp+f58tWtwgnxEziEbX23nVv2zOBqi7LknPu1pHwfDqVklMuwgCBjFLKMIyHYZraOveIzbSD2x7IghSMFCE5SfozJAEjFcxJpkbYtnq7vPyxEEXbagUPDCeERJCRhAQpJcwMgo5u1pfa1s2UECCJIBfhgTWZurqRYOJcmEhGYSFHARYncNy7rWttz6/Xl+v16enYJDwDsHlU9yQ4MGcpJ3bv602Ij0ORI6QTbqTd3dWEMaVfA1PdXWvrtYXfqc0pJUkiDLGvBDsLnggQVXtvfQ+sbz3MCYjuk12EvcJZ1rnpFoJlGobzEVOJoQTL1my7LZdlCQDHqLU9PZyGcdinzmpu4W6uO8/H3dXc/M4RRQTge4Q87hk+QMFALPhtLC8IyZtUOMK6qjbBZE37ViOv4dbdWq3Lsl4ul7atD1Meh/eH08kDrrfbvK5bbdtSe+81ieYykQwQWUoMuQl92ebLtn65zasaSGrmK0QjtDBX2zt33rUyu/txGo65JJEA2OPfatOXl0tK+XB6SDlHEbBeBEuWf468eIN3h4fdsw4BAujX14aIiAGZMBMKAhOg2Zfn6+8zH6ZECB+fxjSkgHAgCkaQfa1EDEZMjBEoBEj35iQE1GrrVs2By+aItftW7bb1nPJ3Ty0g/fjd0/EwdHVrzVVNdatt21ptvXVr5mq+pzJ93WAi3Hol7UfhMefaOwGUJCIMLMCOQAyYiAahw1iOh/F4OE7TlFLec5tzSoBgZvNtiYDWbWu9NlV15nCEUIgw1e4exJiS5R6TBQO6qVtHUCESIcFU+HAY5DCV4/nwmz99/uPPnz9/eblervOybFur3Wz227ISQBaacv7h5dLfxLCISMR0V5uAuXo4sgzT4d3Hdz+8O3HU9YhCdVknSakM43A8l2HCVNSQJLtHyoIYalXNbvOyLCvzTVgIMQuXkijn+3VLSkn21sLO1g2ApmbLakHAiVMexwMStK7aG2Dn1tZtG9epH09a27osZnq7XG7XF6LIJX1rS76vwm+C6PslAgLAMAw//PCblEoE/fynP15ePv/y5fW69Tz41rqqJaYhy5g5UbRa1XT1wG699QjgIZfEIkQQtbe6baomzA/nE+Ryi+uteQ9thg4MiA0YgBXQgxowOd82zWujlJjoMI1gpmvvrV9vMyvW1uMbKjQSlZRzLsz81t4HYsI9rEL7HmQDjkhAgMhBSOrc7xEZEbuqGRkJwsB3M0CApHQ8nR4fH8ZxRElNuzZV4FSG8/lUSg7AtdZ1nT9//vT6+rrcbn3ZJPA0HQZOdN8+9wFXROw4xZ1ri3ur/1fz2/19CfcWWqG22Dbfqs2rzautW7QK0YXsMMp33z06wePTk3H8/PKybFUNVaN1MzMDH5Vz4pzu3EpX2NdPU+u1em+kFVjIOzqiG3qwCwOpY216uXxWUwdw2PE6YEFEaRjGwlOKGuZ1Vsl8zMOhZNDoc904ta1/1SmL8OPD+PRwuBhtjdQMAJPg+3fHv/+f//3/+r/8/f/yH//+h7/6i1KSZCJGN4cQcMTYk9c7RMeA+fnlD//1H//z//a//2//j//nP/6Xf9iuM0aMh/Hx/eN3v/3+u+8+jiWBu4MjYC4lbNLD5lpr30zNDFGDFAHIKBLju6fRiJDBvZrOtV3mts51W2Pr2YecI/DzZ/GUer+/Lw6xeV+9EWQAvg8e8T6/hrebeBfUM3AiSmCh19vlcr2oKhLufBHwcLW+1fl2+/T86TJf1JQgsggCqMWuNg2AxBIRvXeqsNbFvD89nsdxeDg/IZXwIMbj8dg/vLu+/oDgy+1lNd/cBowx8e48NdhJrfFVdImA5JxxOOUHEwK93Pr2/PnnQJWgsRyHlIDb6hphAw0lPTwOD+d8GqiAKluwAzKmnJwmROYoanhb16abTggypJSYSgomB4cswaZu3evW2tq0mhkokwlGWGjTtrkUysepHKYpNRl1vrq0ASxPBMfoXZe1MqI/NAj91jHqZm62e3l2CKqkXZnWdsNMvA1smvbe1Sx8ryQVPGwnAIFbb2ur87KtITEej8PDiadB8nAahjSO0+l4+fLy8vOX19t1Wee6rcfDrlKK3upmVZ0IeZeUmIEHYkhOOUliZgbaNRaEe9RHuLn1dj49fcvuEyAKC9vfYoA93DMs2tY8bRDRwbp2M4PwMcsP785AUqaDds/gBXAl7uZIsU9gRKiklIYSwrPZL7f5l+vl823dzCTlIGz7F+IYDju0o3etqgahEQA4AjAAEAmxmd3mJefr4/NrFpHTwG5TTlMpf0aa/ObjEF8HM4wU9+Xwvu8QkGAwIhEwYXjMa/v8PP/Tn14YESH8YRpyysQBEkERuNN4mUAIIoDQAYBgF9pDU3++tnkzY+wBtVlttlTLSbbmj6fpcts+PDT3HZeiptpbq/uIsav6boiHP/Pxhntv0RsBC5MMAwMNkmmHsKl7IAIJeCYqSYaScykpFd6pL8xEkHNOKcEuU21am3Z1tXAEU1fvqrptm5kBQs6plKw6CEXrtfdm2tz2L8mSxGEkeDeSfDwcyuPD9OnL6fXl9fW6XG9Lax6EveuyrHOr81o/v97aNzqSb7b8fajm5oGMZciH45ARhjSBP67bAMSSSxoOkkeUooYBzTxS4giLtvexmplB0M63KSmZFlNT9ZxNk6lKa7Rtm7kjEUtCkqZq6xY8pzI8PnjCBESBaKZqeg9PQ+i9goOq9rq1vu3uWeZ/8UH79oMAIJJKLgDUuoFHrVt3fJ0ryVq3jhYlUWYEa22t87pordA81EKdRaaYkLHkjGHLui7r1tQCiXMmhe64NJs3XauBQkIEIQtCAwfoHn1zuSxO7AjTkBGQWIDEgXo3Y92PO9/+xcIsdxg5Ed/rzVDrvdVatTfXTsRJZCiZEwJ47W1ZYus9AmGXjtHuHFFtGhG6sw96X2sLJIc6b31r1hyG6YAIDgck6rVqq72udZ7X27UtW+F8no4ie3vozUn3dZv7+mb8y99/gGm0FrXautm69aX2rWvtqh3CJOEwpRNM+3PTre/vwu5sCAh162aAkbfqrgHOewg4MiLtjANyE4YkQKgItLdpXKFrqMda2/V27do5CRCoGjJLnlJOpYwJwta+WifsPKZTKUwS6r323lS/AfeJ0MOpPJ6HdosO4BDTmH74zeO//x//+j/+/X/4u7/7d7/73W/GpyO4+Q50CARHMApwsHBXa72t26c//um//+f//F//03/643/7by+fPxWWw/Hw/uO7Dz98+PjDx6fHcxIG3yuuIEDegciJu4DfQ7hQjQDAwGmAh2PhAgBbbe22LK1ucwWkbCWKWyMJjZeLc8Gvr35AdNAOKsD0NWUM3oLX3ipqvNeKQITMBODrui7L6u5vvwix09uZkVFDN91ar+Am8hZeC6BoAACYgADAVXW7Leln/P3v35eU0YnOnHIwcQBQSuV4TIejbuvi+rJ1LA2ZhUmtd+2977tefPOQRUAEgjAeMQHaLdSQh3QcIERcvbbWumuC4cCHh3w6p2mAZOGZOIqAFPQcAOAtOi7QWqwVg1PhjI5ADqAGCmToEHOtuuq6bW1rWs00HMABTHurdeMl8XDMXLiMkQTLgYpRMsYQzQVFIJr1aG3Zem1fuUoBoKrqukvO9kJmV8bt0pPdfrLfoKa978eZu/3D1RSAknC4trbWXhWck8hhSscRs2CilEiyDGMpJWHE5XNst+W2LOu2bVujvAHr3FydiTIQAbhZuBOSMOeUShIR5D3TE/cQRIJw967DcKBvs5N2vPq2rb2psAgnEUZkrdUYWdARLIyZTsfjmBIzucdWdVVzlsM0xfFMhMiwl8xF5DgMzNytX+bt58vtp8vtZW3VDLHu6SiISHtCIpAHNAA3a9tWzZrqlGQgPJSS5BABtfX5Nj9//iyMD/yYwk+lnIYh8R5A9+tDtmMsDGLnOe8DoV3DEHBvn7z5KoD2pwHQHOe1/eGnZ+3ael/r028+PCQp4bxnCLg7ovOOq7O7MBkjEA0hzOO66edrXXo0C/VQd+1eshHTp9f5elvXtRKiq5ndN83eet162wu62C0h30wtIqB377UBZUrT4ZBJBMk8ltrmdetmGCh7S0mQhQkJgDAQAnfR7472UNVWe62tNduV/OHhrm6+btv1cuu974amaRq0HxLH+TSt27ZuLYiIQLW33nvrZnEcJafzu/O0/vbjuvXrsj1flq1qIF2uyx9++vmPf/rll59++XJdVH9Nf93TCHYTN0SYWeutttZ7C1dJIYP447HUrBaBKQ0jp5HzsGPFzT0ndnczVdTw6N3c+r62zUTznHK6j6wk55xTyaXVttWGyMMwDWO7LetWu8YtD0MzO0gapomYtnU1N0SMsK2t6zov883Nc05I/nA+PD6chOXbTTTemMQBe8YPIeI+EAjAYRh/8/1vhMUglvn2+bqurWeGY6a9X7vdLlvdLvNtWapuPTQQYBqHx9DYIzLcLpfbbVmrWvMAg7n583X79Lo8v65bNQYpgiHYCMLDzJsptajqS22t94fjNA5JNYIFKaNkIPbov6qu7o73HcJPKCwk4WEe+2qwrXOva6hxjqmU9+/O41QgYlmXz+C8hDoApzwUYrEI64RuAFFre3m9EsTlck15b7q0rWrtOh1P2vvD4wMz1bah2SjShwytzLXnxIdpPE6HkguLABhEOHz1VN0Tiu/uGcJ7Q+z+b0Gq0GqvVWtrtdbaWtdm3iwcUXLKY0mmbd2eX17X3ltX4pTSJI4WW3fAiF105da3bRWi8+k4DgMzCXMSzEWmhONAyI5oIkIEtem8ulqsra1L8/AdYeGuQ8kf3r8bhqO1aPOyXCu0Rdk4yvk4jJFIQ8zuvoWvKzLj+SSPp3RNvpEjx3cfzv/L3/9P/+v/+j//3//+f/rLv/i4Li/rdkmlSCl5yIT0doz1MA3t9Xb58vOn3//DP/yX//f/6/f/8P/dbq9DoqeH8/vvPvzmL358/937w+NpHAsjoNnOlNDeTTsEIBEncid3dCAzCkd1zSlOQ57OQXxbty18W1bvfjTAhFZJV6im9bbosMbXpywwjMxICWTPwwrcu367Ojx2RvbXlCwIECYMbF1rU3OEIAfwwN05OR6Pp/54apcKG9XQ3oiAhXISYtoZEXvHjgL6Wq+6XW9f/tt//S+gjkaheDifWbi3dluW6qhcOg9dN7vWLTAIpyxam9ZqW9XWf70WCMW+xhr6klPJWd6X89OYHMGqel/BL7X1Zdma6rEcx5hOPB1lICcCOoy5JOE4UDDag9a6zUvjKw4rAKUpB1uvF6ub9sJGFl45nmdtt3bbttpUu4cGOoB639o6r6rIOL2fQBSsrj6vg6ECL6EAzuxEDhrabX3d1tvm+msnRt3UXCT29BCmO6CSCAnI0Vy9q5nHnhu7+3x3Zee+tmNQeO991ehSUjoM6TBiyYYRoXsEyTBw4tNhyJeH0y9//Bk9rrf58+cvFkYpzS2aM9AAxIiAaLAvRczIu+mAGXBXIADu7QhCQBb5FnItrbXa2rKu2r2kklJJKTnD0tW3DQUgMzMdj8dJElpn0G2ef9mWrtuJEXkQyXvlA+iqyog5ZwPYWq1dq3oz7EHdHXbKJwMhMUYQBIZ6KJCGq4f17h4t8UNKgwgiBWBv7eq3zMwYKZMMZcr58Xg4HQ7jWPj/0I7ZVZe8h7vsjcq91od4GwTsDyXGG61kq/qpd+2GCEnk8TgdJ2PHcOi9me9TumACcAe3/X9BjAiv5tdNn+e29lCPANzdIgCxrHSbl8t1medtHFK470TaXQvTu3X1brsYAYS+mY5FgKr1rkQoMhVJPCSDVrXXbatNzR0AIxiA93zdN4zefaGHQMSAaKpbreu6bdum6hYAfMc5b1utrZkqizjDne1BFIDdrLYOTISg2mprrW7uIFyKpPFU4nxwoLXpy3VdqwLwy3WWLLXrn376tLRu/6yx9BXlG6Dm61ZvezDQWkbhkmQcR0Cel2oeqrsMtBDTOKKZEqGqMt997wgYsYcVGwBo6yu9vYwiOUnJxQNaUyIqwzgMda299tZ6X7e6bU2PPg5DygkRzToiE4K59t62ukBEzpwKH47j4TD8Hzsxu/Lsn80zIyAcmOV4TAGw9f75y+fbfO3zeiw88P6s2jLfLtfL822+zXVbunUnwMNh0Ah1320y18ttWdbadetR1/Xn1+XLbbuufeneDDjCIKIpGzq4mXc1JN1a21prrd2O03EqIsmdcsrIiSWlHCmlb//mXeoOfP/u3F2bmlvvTXuDe2IwZYbjmB5OBxHe1ozeE9PaVOEuj9+D5iGnvXye12pur7dZJAFA77pXT+bxejy6GYCbda1bYXo6HTIE9i6cDuM4jmPOWYTdYw/m+9bndt/tYD88fnNT3LF1aLtDszft1Xoz7xG6V0KJqbMjVNXLbVm7KhGlBPfhOu5M4AB2gO62NsdQoE01UpYxp8SZ9hB2xsAABmHhDh7Wurdueyycg2EnQU4plzJkSYJk2kMNA8Ohha4N11aHnihJIH59bd+WLxgyDAkEfBAeDsO//psf/+Pf/4f/+e/+3V/8+LEIfvryRc2Oj08TBzghSbiGmqtar1rX2/OXz3/6wy//9PuXn/9Ub5chUTk9/vib33z3299895vvTw8nzsIEHg5ugAGqe3Q4IjKzSHJTazuNwQN3yTdmlAERojdbQVdCGA6lU+pYX9bgT9Zn/eWLlUN8i1VydCcH3EU/eyoJ3m+mAwUikuxYEiLI0YcJjAjeeDKBdxuDOxFNh8NJ62k+rO0WoQ0CMZJwKVmE9yfa9p4NIBnXRFbbp59/QWWO4XbTw+MTsSzLMi+32/V2vV4v195qI7CX29ZVH6bMAOJG9mcoIgdvsd3s5VYvGfMRD4dyHNMDy+BUGuLW5x6uTa1roLu6mtW2eTfr1bUBYIRFMPQUPdS2YMrHUSSmwwhgCd2ilUgQAKbmfWl1rdvSq0WUXDCnh+PDYTiioa4dvcdg2TAFeldTFWRBAW/ge3oJJ2JVatXrpt8cYWCnagjsw7zdjeQIgXQnltouuDXvuwIAyPaJjZu5obsHR3QPQ0Y5lHyaZCyUeT+oxi6VI06DlJKISc3qvCxt+/TybOBpGBWkeaqmgSxMpdA4UUrCd1/wHWYMeG/HwC6++z+0Y2VebnXrt+sNg4ZUSk7jOCnEul7btvVoaRrP54fDwzgSQdvW66cv9erbLdbtkI+ZhQAFoDAj0R0r11pXt9YxcMzjcSKjlnqPN38OOBBieDQ3hVDCYAlGhbiZIsa5ZE7CkgKp1t7WNXoD1zLI6elxkPzu4eG7j++fHh/3hRIAdoMoEiThkmSX40aA+15ixh3t8TVY7g2W4hGt+6Ld3HORh+NQvz+7JSdT8+tS3WMcMgMwBmC4WQBiSoCkEVvXy9bnqrG34xF3ypcwgPW6ba+X+XJdSjoihPbeezPTcPMItWjqFpEQprTn6O3bYUQ3U9OCwFEZiuCEEj1CXZuqhXoIRewJ5PdKLvZjyJ5XgojhsQdB3JblNi+2u7jx/nyA21AEhzzkMo7D4XR8PE+n0zQMIwB1NagdEd2s1j7Pm/YuuKScS5lyGYc8CGeMOA6OxOMgiDDP63/5h//+6yQZYGeJ7LcAIyDAzOZl/vL6+tMvnx4OcsqPZcrCBGDr0tZmUmi0nAdKZTygdK29d4BOtOc4pJSCkDxof6u09257bjOEAzEJM7MgErOUYRjGMa0bm3ugmt9u8/FweDi9H0oaUtpZlqqqWolwnEZGnMaBMBgsl/QthjC++cBb7yz2mjjA3QmIiIZh/O7jD8TSVNfl0pq2TIGThc7L8nK5vM7bZa63m7aqYX6YV3VfW78tFQnXZa6tN4vr2n553X65Ls/XrSoAJ4owQzPv27ZH3O1bDRHWhltt19syFj4M5XQ6Ppzf5UmAJeUh5eF4OvHbLDkiTMM0WPYAWAbAgG5hHgboiYkxMBxdJXxMdJpGHbJAjCW/3Obr1ubaDIAoMdEwDBi+9zP6vAH4TnyPCBYZp0mEzfR2uy7LzbUPCcecnh7P55KxdQg6DGUse8IXAkZYdwTfPd14TzlARiaRcJY9jP3+iEFrUSuYWlgLq2ANXTEMY1+bLWDrutS21Fb3LBKIiGoeaj3cwkOyHI7HwJBctm1rZr72KTCxeJA5bs2ZYZqkMLOwKJIYgO2BZu5uYa025vH48DAMY13rdq3eFN1PhwEK9K1319u27SEFsAtb6Ntkm4BQsOpdp3L+m7/54e/+L//Df/y//fv/4W/+ksEvXz6/fP4lEIbDgD6iGwCE6R7F0NZ5vb08f/r580//dHn+Bb2fp+HhMDw8vfvdv/5X3/3m+/F8kCym6ndHiIXHG1kUmDilHHkwNY9NtYdX2tPejaGDztZqu87bclkQ5bvvuRF9umw/fXr50z9d6rVdP9Pp4dciZq9YAu+14t2R74DgCMLIRLwPDyQlEelJJcg7jDwmEUL0cDXT3k2VCKdpbHo8lsPMo0FjCAQQoAKZAXd1eVcNc0RAp1OZmmldtj9tP683mP7xSzm9s+DX1+u6Lq11M3Xfo9f7YYDXl+uHx+ndw+lxKCNlSvmrLdlDV7291p8XXaHF1KaHePpQfnvij5OcsOTFxXaCqmtv67xdv8zPV13bvHhrAsHBZAtahipmvcItsh6fpmHIhzQyRJsOtinO0Nd+25bF5itcN++bNUj4+P5JyvBXv/3r794/IFaokBDHTlNwBp4JGqMzAyV3cidAkZSGsfSGjqSO367JcZ+MIQCYmWmHnV2JEbYvhqbae7dusDt898OSanezfQaC6MREQ0oPh/xwoEGAdycwQLhDuOm9MJ7S02/ezy/p5dOX+fK89T5Np/H4roO/3Ga1GId8huF0moaSRYSQzLy7BSAGBu4OAwB36/7t+BUApLduprhrPhiEgjEsgiJctalGhA8Tj9M4DlyYYlnncRizOzyezoUG6w4W++mYkRQB3BkiEQ6JzzA6MRFvTdy8mzVVM49A27ONdxg07TyKvcLBYHHmDtBVb2v1XnvrQJjGUi2mhwcmfnp4eHp8vLOMABBQmIUxkQgxEn7dZeJ+t5Du8sCI2M/wezkDGOEBXW2et+fn68+/PKP3nKhb3NYmIh/fPUxFmCAI7OsWBmAOeyHSLZhREPZOGAEJRQLT1l5fl9fr8nQeCWNets+vy09f5p9f1+uqzRwgCmPBmL6NtXFw9bBgEiLuajV6CWxqW+/b3f7gu4yG70Rg34VNbhJxZ8zts8/emqqaWcS9CUWEyMRZmDnncpimw+FwOp/Ox+l0HE6HYRhH5rwH/iIGogCQO2gYQHvLAPYAojCBwPCB4jzlp/P0cD4cDiN/I7zaa3t4K23co6nO6/p8uT5fTh8fjkMu7uwuHqxq3ZvDKmWdgJlRRMyMiJklpeRuiNCxm0Pc0XQOCnehu9pexpVchmlKJQ/DUMp6j1BBjIjeWu8dAFJKOAzC3HsDCHcSoSRFiMehQDiE7pxA+D//+Feoyn2GLpJOx1O3/vL6rH117+YRSGowr/U6r7et3zZ9nuu2KXhUC8mLRtzWhgi9t6rWHV6X/tPL8rK0ug9FCIhBu5u5RnO/FzEI7A4R0c2Xza8LDLnN1buLU+6Uj93HafyzU0yAm6u67OUlxtf9RkSGnBOiew83Rg+t0StHyYnjdNhdzxqw9rWr7bnfRIyBZuoW5h5hTIEA4Zbv3CdvtTaA+XZD1zQmLnIY8sC8nQ7hdBzHoZSUEiD4/p3CW7Dqjp2nvYCBCKdvE7ndrW59Xbp577312qxrdAfzN4WmeajaLoVRi0DAcItGCLsPjHoQE0IQ0ThNxFK3Dc0jyBS0x+ZubesNhsySvSRAwb0Jei+CTT2MhZkl55E5bbdVayezIjRMhbNsmAgtiAwpOAGnX/lUb7tlqKF7SXw8Hv/Nv/7t3/7tX/7lbz8+nKbL83VbVlflJIxIGKFqoX3btHZrra7zttzadrO+Ccb5ME4ieSpPHz/+9nc/Pn54R4k9rK3ed5FtuKq5KtwJ/SKSXQYRReoO5t6BuHAWTNAlIMd2oD4WeuVih2OfLbzr7Wrb7Os11hmWBv+MpQxvnS7au9+wh6nkxLvbUtLdHZNMXJysRcKhpP1UF2F7I8b2g2giLphGKsZjYgQERpQQ3uFZZtgjDAAjXIiZRJ21V//l82u8KuSlGd1uc2s9IpCQWRDCvV+SL2u9zm3doJ7taeSt29dYG3dvW1vmZZEFihObVIALtd49f8DA5tXQiIGFmMkj1lY3s/V6gW5TLuLha7MNfJXgwHPLIw8nGYuIB1ugJMiZnIwsSeZGa9+AiQudHg9/RaWMx++/++7xVAJuwj6QDEDiwQCcMqbsyBYIjgySUhkKtgmII5jjn3GV4m3SH3vKie1TUFXbixdVM1MzV0cLdNixHuquhEAQiBEIlITGlI4jTyUYfVczvJ1XIQDRHYGYhsMQ4eu2thkNQC3MQN3meTH3klF4HEoZhxGJwCHCwsMD3+iLd7wr7v6cb5qXgoCJ5XScBLEIoNe2eQfECA6I2pv6FZnMxB+GIjxN0/v3j00Pzd89PAnIcpmX67JcV+uWiHcDKYCFESpxs8xUAFem7r6ZLg2qmnqAuYOTO4eDQwogRGQRSSDSAW+tW+vP8xJdNdCvW/3DL89re1p7yflwOD4+PqZ0L2IIMac8JBESBPJfnWLwBle9a8s89j4IkEFiKoKZcUgpM1jXL8/X//QP+sc/5TcYDDw9nk7jcBiEmMK5Api7uLnt9+l+REQiIkwIhXHMnNBDHZq+XpaXy6rfG2B8fl3+2x9f//f/9vL7z8tl1apeBJPw4HrMxPhrJ8bMyOEgA2PRtc+uTm2r/aWuc6/NzdEZAQGZhAHNrPfeu3QV8UB+a0SpuTsT5pyIaKedMpOklHMehuFwPJzP5/P5/HB+OB6GIaeURNI9MS0i3A0wmSHTFq5h3lrfanO/27/201W10BYD4w8fnr7/+FRS+vq6xH4ei7dROIADdvPb2i5zvS6ak0FAN07lkFzmtbXr3B2nbTufD6kIEaYk98PDfam3UAIIECyISqaqER3CPGK/HaWUFFG2LiL7q8pMu6ktfB873Gme+59GhCSJGZNwTgnCI3JK6ZsiZk9fAUIkwn0GT/v5Be600wDcFz5mLDmdTwe3rS3XQApkDbxt/bZqdVyNLptvWyRmNp5b+LzhbXV3i+jqW/dbg5fqFRIXGQuEhfboyVXVnWyPvbAAp3sxDuhA5qAt2nVd6k9fXq7Tz88PT+8+fvzw44+/NX+Dqr1hGHadtfs9C0mSHA6jELh202q9JvK23eZXKODjOA4pnaepqVX1pakFwNfnZKfGeSCySMo5oXtrm3tob3XbiJMwYYAQEgS5URgLnY8HADkcDkMZSMQj1PteuyABEyMLsxALseyzSf4mBC7c27bU5dostt5bbb1Xs+7eIxRcXe2O8Q5kZABXjXDliKHw+TAx0lJr3ep2ewXCMo55msYyuBpod4dafettvl6HEsJIwiQJIO2xY+6mvdW2MWMphzIMCKw9Wutaq0AvxIw4JMxSmKgMUxpGlAEoEQKHfbVYe4RWJ6Cn8+GHjx//x7/9y7/+3XdZoK5LXWoYno7nMuRpHIXYtfXW1/mmtYWats10Y4bT6QD6fkoZPMbT4fT+8fHjYzkO7u7dAB3ACd3DrXdXBQQCckQikBSSjFMl9whnjqHkkgbsgiEDJpm4nOcmr4qvty3BglinJNCHpqv+irq7fwiCwBEBOSghiXCSnGnIku8Bt4jMnHIKj+RkKRDSMJQiQgQQFq4Q5mbdTWtFjQIZZFJI+7yRDMF2lCiyBjg57srIwDJkSVuN62Jz9XmZm5G6Iex5X6Sx74+izVrvS13m2V8flu8fhg9/tbwFvEAo6NXtFWWcisiTjOjwy6fff+Gfb4fvhzIqNsONczDlMR1LOZInc9jnxSlPbLheLttt3a4hEx1/KMOTyOAW67U13zQUUwwHPA3juNuNbtdlqVs8+jQcfsTHlKdppHGIQz5kBiQZhcK7h03jBODr9VNXQ8MMIln4INoAxRSI+Jtm3xt2+/7eunm4kERg63v8btv5tAFoABZgrqrmvWN4Et51tcjMRXjKMhZMvE+awgP83uYhQnTyUEYWhFLKdz98582jBTkDovbW+4aEwyCn43g8HKZhjABzJzS1XdGNu3ibIAgJsqTh+GfC3t2YkESYkBg9zBUcOTENmM2th/dlu3pYb4fjeDyPNBzK6QFa98xdwwWMooIHQE45ibBQBkNF6khsjMgBi1B1y8YsWMz6ngPQTXWPeoKyg7ACiwgxqftaq7Wu4UyEklCSAi+b2peXMaVM/O0BmYjSHr/Jgkj7DOObnhPGtx2obytSRCJMQoLg7vOy/cn6l93jiUDEEVR/2wGAZKeSslt0tda1dds5pkKQCYvQwDBmPBRiwA7GiO5gjkBkrs/X+sfPy+8/zT9ftgASoSlhwWD9Rr6/r2XuaDEYi/GmvZtewObaLlrX0EAQ5pJ4GEpJaefedu1d0558xkwAJCLjOD48PLDk1k2Yd3MjESWRXPI4jIfT8eF8Pp1O59N5GIsQ3aNrYpfe7XlQ7A7C2ayrdu3deje3HYtk5tp6U29OmeHH755+/O59yfIvfNm7vxIwALr5ba0v1/X5uu7x2hBhO9Kf0N17r7XSVgloEOaUMnNKKaUkiKjWVPs+NWNOCHR3PWSMAGbe3eYOUHLKWUQoEbFwTpwYGWOf+tI9MomYGFiQQghlp/j5W/jtr8/KXegNsAvFHfY0o7dfIdgFi7BHf+acj4djq0udr7X2tWqreln1dbUt8Lb5rXptnhNBc1rabfPeupoCoAU0jc1phQScEooQ7SCKJGFuHqlb6rVZt3D0ndwLuN8ws1DXXuuy1HTbLrf5drv9+ONva62/Xs4Oi0SIXXsSgQQ5CxzGkhhcredeOUzDrW7rkhIi5pF2/vI0TacI2pq6hwMBhbkHOQYGpMRTyYRAFBghkkkSsUiSkjiRjwyJJcwRY8giUo5THqcUxN1MAx1pj9dhTvRWwRCLhxsaCX/92t29rct2vVSLzax1Vevu6qag6l2991BFD3KgCIz9jIcEQcQl7TEjksAEugMkAiIUTsZsAOiOSBakig1j26w2d4e3GLCw3lQbYeScxmkchuzuqt67himyQ7g20EDmEE45l5QGkgRIu3X1mwUpwvogMX04/s1ffvibv/r++w+P4LYtq6oxyzimYSxZGKz3Wuu6rPNsrVOEm0K4CB8OE0UUFkI8PpzGx/N4GEHIajNtps1a9a7W1U0jAne9jwMiEyXmzJLJ3O3uCs6pUBCFiAy5tDKcV5hflxlmLjqdU6FJrqXOyxX+HN6FQRRMwYKpEGeWLClJEsiJE5MwcgBwEDtFhIAQBhIL0Z7lDuB3M0Jvqn1blujGgQNnDd6nYuAQERhEvksfwyP2RDokUs7o1mHb1OZem9Objf9+fgnY7fzUlLp5b+u6tWWZv//0Wvub1crBNohNRNIUw2M+urTX29Ww83FPYN4HgQEIIQRCgICBzIUQISVzrW3rrQmX6Tg+vT8ePhRh1FprvS0610XZizKcM57KWHLOkqdpOvBIXpgfAMj0lsmOJQnDot6sb30drJ/lJMM0l7HlESuqs5MDYqldA7zjP+v3fRVU7NLOu14+9vPz3lY0QAwk3/MmPMJNKBAxCyFRD49M6ZD5UCjJbhDbhfe4Xzkg7fRR5kQiJFKYTwQGuqlutlXjDmNJkvl8HA6HIYsw8k6GAaIIMwuIe14EApFkTkMapm+tyfJVChoAFnebBTINpRSiYRrW3uZtu75ePn3+dDwd/uJ3fyElO6a1ri8vv3hTCYhq3T2lVA7TsQyJUb3h5vcOkMMeyrO5JpOcZH9vzKP33lprrQFEziUlQSSCQAjTag4YMZY0Sj6fzg8P5+P57GGfv3x+2eqY5Ifn51+R8IgswpJIBInhW4v/Tufaz827uZp3wAUKQSIkQN8tS3hPRFoj1AwRc04PD10tEEkEEUAkNfWqNm9t3rQ2DfdMcBCYCh4SDImGjBCwOQ9Dno7TeJhyyfNql1Vfbv1WTRWmgaZMx4wcUBX0Gx1JRJhHdOdNB8iC+cZ6aeuz1htop8gkU5HzlB9Op2kcRMRjH2TuDignhj065ON33+Uytu4ewXvSNDhCACALJ5FxHA+HwziN4zhkuTNRdnlHgO8BOUkICiUZLFRNVc20q5k2ba3ZujXd1tq6Q2L8y++e/uKHD6V87cQAYtwrwnueGEFAV7st25fL7eeXCwmdDwcmWOqm2lPhgROnxAlaW5FCpmMpA5M4eKsVEdb15u5uBkBMiUU4gpiIBkIiwpSSiJh7EhpyOgyDdyPmkiUXSXvqtt/pocyyD5vAFSHcXCHAzczim+I39pGEmQAyoOMexgu7kvDukyeEXUOPKClN0zTfhlb70upQxtbq8+Jf1lia3pY+N1M1BVSPqj281612NUIAJCQOTsaEIQTOLEycmLLQDmlTa5tsrdZ9TmxuvkvgwgEgAh2gm7d1ndftp59++v7779dlfbspgHeoHBK9RUZj5ESJRhwL7ATNmnvdeq09YGndqIpBMAfRcDy8m8ap91prb+rmrpaUrXdrLXMcCqXEOQGg5OE4TIdpOo45JYqEnsAkzJpaKEUMGQ8TTQcOok3dgEJlHz4QC5EgM+3ZyuGCwvnXToyr1mVeL5dNo0V0dwvz0H2GFF2jNmhdLCScVNEUUQBZmInJQyF8THBMhR/G3v261to3xESAKMTEJeWcGeGcUIUFHAkEkSkstNd18d6mqRwfDufTQUpat1Zrc+uMO8tEb7fbhvuQCKcDJ8lMDBHe1fuuP94XKxPYToO9fzj+q9+9+/G7x9MwtrVVDKJUUi4jZ2H0br23ea7L3JbVugsLUTBRLgXdYZ8NMk8P52EaiclU+7a2eW7L2rZqfS/IA4iZBYM83uwryMyZ2MENiTmJJCFCEuACXMZUTn177tdLXP2B0+PjML4/PS/r9XXO4ohfF17EYPZ9TDIMKQ+SswgBo2MomLlD3J0uGuHeWg8HTuimqs217xoLN6ttW+Zlud1aq+GOiIykBrFnG+Ju4xBEAYy4Wx3INLbNbtVW9QYeggiOSIF6F4rep++7IaRo+Nxqbcvzy/abP3zaan87wOzcwZRinOj4mJ8oWysoefwfvv+3acp/eP3D0urcwqxR2Qgr0yg0DIeHgGjk3ZZbnRPydz98//G379/9xVDeiUfMy6W37Tq/flk/9xUv1N8NPY4PQIaIx8Pp8emBIN2Wvm0z6sJgiMca8WVbWT1Pp+N0noBOeeoP78Vg9nnjm9Ki2LgANoRO/o1+HPfpL6Hw3aAOCPdfQAK6B/RCgEcY4i7zJYicmZkY2SB6BGTKp0kOOQjDnYNwt9QIQgAhCnGRfChj5ryvNEyMAdZtXWq8XBX8u+GpDOXd03kcS7i31hAIAGnXYjmE7w8JIzOQSJkkT/jnFusdiEcK2O3e8U/IhZhz4mBAbL3PqvPlqqqnh8fpeATgCLpel75soyQJDABiTklyTolQLEyS70NoQk9CzDlkdGj7nAfR3HrvrSXtCfaEi5QAwN2aNlMFD0IQylMeTsfpfDicz6et1p+7LfPshNsyf4sf/ipd/lVy+WeFJ8AesYLIcs9n2c3XEWA7emXXQdjd8cGEyDlQOJWUC8uO3iK1WNZ2W7Z1a6qaGSjjsdCUaRAoiTITM01Deff08N2Hx6fHU05yucFafW0WDpnxkPlYeBRwiwZ/FgPlsYutHC3YMYRDben91ltHo4RjTo/H6ePT+bv358M0CBPc5RG77mX3YFEp5d27d9N0jHtkLIKHh0HYDu0mopxTKaXkkpLcIWMA4Lt2aCc67FRHRA4Mx3AyM1Mx79KBNw8SjQzC5gVRkrw7H9K3mph7mfyNGT68qc5bfbktn14ukoVEhsQWBhS5SM4pl4ws4ShMRMS0G22o5Azgy3rrvW3bauZ32ybcs8dTSsIsIpIymY7DcJjG0/EQDg6ehHcF4C7T2ftS8DWxCwLiLif1iLsO/ZuLMPe9UCTmu8bmmwtzAAgHvxv6kbgM4zhOknKrbd7aurVb9Wv1y60ta9uaRkRYRHi3cLNtM1Mj8N0wRBIuSI69O0ggyxuOn/B+WEUiMTHV3s3IDHeq7o6hDdoXoNbasixfvi3672US0r5qOSAgEhALJeA7jnMovfRt29YN3VMqlDNK5iSUUiIaiZrptq3auqlb19Z6r2ufIzEcByolH8YBOFM5pWEah3FIkhkknK2jVauOASyUBAg7Yg0i4uAEjATAgYzISIy8/yA47sbGXx8n97au6/XaHHeql92Zl3s/RL01by20gyqokinL/pqQu7Xa0LEQ5lIeD4cIzLK8Ltt1bWqBTIwCEIjAfHcoWo/QcHerva9bm2fVephkFBmLIONNW28bhhMBM3KgR7iZArhD4lRSSZIJw7WZ2tcTDDMeJn7/NPz24+mHj8fjkDli7R0IhzGnwpII0a23vi3b7brNS9taOMKbiAQBWCSVbObMnIbMSQAgzKCrt9a3ra1b7xYAu6jnq/Do/kNEnEXAQiWllJNkofA775wyxUFgGgXxYCidBqUDqEIBJ/9VQYpAEinjOMg0pqnknPZYrn1n2rmAAAho5GTk7r1reLB7q1urVbXvEgDtbZ3n15eXZZ619ogQFgRkk6D9fMTCQkE7AiMIAKN7dO1za7eqq0YHiLutwyHe6A9vWwABArI7NKete1/Xz9etfyWPIyAziaAk4GQhEUw0MU8OuSkt1a+rviyta1N4qeFH16EchSYiMug9qpPJMDy8e3r34ePDA6cTGjiwb8vxcr04wWoNfRv61mxMhDmVnPjx9P9n7096bcuyNFFoVHPOtfbep7p2rTb3qNwj07POjIcomkggGnTIPvSeBIIU2XmvxQ+gxZNIeEg0aNCBH4CEEELA4+VDKDKKTLKKyPAID3dzMze7du8pdrHWmnOOMWiMtfc518OT/pN86frxa+eeYu+1ZjHmN77iAzBo/a0uLZEJAjhW92OvAH5ox6VP2OtYNq93d2IM80M3a3BUakoReUzwS02+c2gavSSVB9WA1rDYs52EuRl4Z8Tolpi6giMTZpEinETVyDGhCEV1/qKISWVbNklSYD/rQZoVHJdaUdiJUk4psWmfgwhMscCJr88nxFNozIBEKQeE+VzEaNdYHdRhnptaJ4eN4zi6AIA5AxZJQyqZk1V9/O6+Vy3bzSZtHoyWato7ITORIKCbWwcnMhgwKbajAbgLEwmNxICkDufwn9aIVNg9M/MwDCnJagvYFtVA5oCJi6RxlE3CwmhMQ5LGDL1aq5cdxN17a7VWZo7G+ZmAvTIYQsQXaYirobm7A6xec+4KQGAEToTu2B2YJY/DdrfdXV2N251ba826+ry0/XE+7KdpnsDaNiMmGbNkQQouE67m659/+vq3vv/hJ6+vhbl1793AfcxUmK9H2SRihOaKdCZmnN9La9pMldiEO8Jidlzq3BZi3JR8sxk+/uD2+1988sHNLpNT4KfuITOOhhqSpZSurq63W3M8x3qZO6xxAeAeTB5mFuZIAloDSHwtBKM5Yh4SBQAP5qogJkYFzMiZZExl13tz7dqbaStysfA41yzrn2i9GCC23k512Z9O3z3uJecyDLAdOCUhSYnLwNvtkPMAlMF57QCAJ+Y0FGZqfQFXM52mufUWjjjMXEoRkWEcc0pEbKoI2Hs/TTMgzfMCAK3WOi/WlRyFGJkDUEEkD+fxcEQ3gzNB7VzFBNaqtTZEyjkjkbmeCT/xXt1cKfT8hCmXzXZ3e3uHgOow1b4ozNWfDvM0LxDdXAZHckQnpASIjd0IDFyhm6srNgPuKI2JJfG6r2QSSZJZspdIFQ1r8Fpr670ZGloQVSFcrc7KvPOivGZABRbFhM5IjEAIMSoQcYSNqbba0SAomJwSChtTR+wA3bW3xbu6em9tnufpsJ/F2frVwOOYUtli3jTagmSRJISEhivuI5ISA0kCElzq0aCBiCI7UpSJvkYrIAHFYUOtt957f45QcLM6z8t0bEANSAkUXN3UVPtqXdbb0uvS62K9knlhcgdr1pysUWNsiAlouJXtZnN1tXvz7nH/k58fj6dcBsjJewfVeZqEvIi0Revctet0OM77/bLfd602CrZtCty3LdabIDISgScmGUd0czOhVNI45E2RkrB7nSLFM95LSvL6gys73n3y6up6m6wu83EyGqSsuLyD9lZ1mevpMD091dPc1YJSbkSRyWpuQEiZV6tTNzdEM3Zgc6u1LXNt3YAkZya2rsiARCwMFotIEkGgXErOAdGqOrgaW2Poo9D24w+3JJaGNuv+u8fWjosdZ5/1wuxFoALDhq+25WYzjExEjmp+pijZZWohrXBg62raYbHj8TDPJ21NhMGtztN+/3D/9rvTfGLilIqkzJmTEBsgMlPKqSCQhuiBXLX3aT5pfZrb09wWhe4Y3u3Pc9odV+zZzQ2th+rbQBZPi4m9eIU8CG2Tb/gk/s0yodqTK9TT9IufK9jbw7dvD4/3j1Otp4f9YRjfXl+9vdpdX21vxzIWYBQtV2mkTdltZRicu7Mz42jD7fWr+dTfPczWpgwjJyFBSTKmK8wpDRutC3rNVgfJCFwtLVYbOEJbfJ76cZr2Wy6vNjcCw/7BbdpPtR71dLK5KrgyKF7qGIw4jeeG0tmpBzDaPhFpDmv0tyMYgwlRFkKEqbaOyEOmnADQu6N5Ih6kDGkoucQJlhCJOHFKkokokpxdI9DEmGm72+T1aVmdpzqdIhQ4pSSc47sCNFpZNrZ2q5De4/cIrfUYd/Vj7a3VjMDMpuaqvbXeqvdO5oWkm9VTJTi5YW8NFTPJICUzO0BmIjC37shkloAySULKRMwMLMIJAU0hFF3qphQ7L4rIMAwcZhXal06X8J0wfREC1FaPh9Y0M+2GgZU3Ob8kw0ZhpD3DRUQNgLgaYRAh0ZpCcFnKzcDc0EHNCYHBCVdXJiIaS3p1s311sxuHgshTrY+H9vZhevvu+PR03O9PdangNiRixCzEhBY8HKSc0+315tXt9mabE+M0t/1hWaoSwK4QOm4zZUG3NR3k5YWInBLm3DOf0A9teZinpTZwH0Rut+PHr2+/98lHv/nFp9ebYZmOCT2CZlhERMLgzw2EEQd2cCTyta8Z/Y8z5/l8N2htDq80Ig8jTwCDi6joouIGQFstO9gIKKEQ59S7a+t1rpP9sq2KAdiFTYIObujQEZfl6ZRSPuYybLYbZtoOkkTonKScc5I0Akjra1RPsDGI8eb2zs26GiIvtbVaVdVMa00pJSJKOTMLmBFT7W232yy1mlmces+07+eR4PHInRFpjRICMvilAwxECzmKGGZhFqQ1odDPz/FSQBOzCA/DeHf3CgCOp5PasXVbap+WOi8tCa96cJbgIxCBZM4EDGbazV2depjiuLlha611rDVxypyySKIkwrx6sqWeGifhWrH3EBlrVKv4fuDI+f2EdRxFjHmkgwVEJxJhjQSIFokpnIQZmQGxE3Z3BVdXKBlNwaHXdkpSQCddWOvVmMYx5bF4GmbIRomIBFdbRkYmAhZkVBEg8q6qdfYuzmKUDQlDSOjmqIAYCQRdV8HdxURGVY+Hw8Pjg1HqSMrYwZtZNa29L62dlnY4zcfpNM3TMk8dIJEDihm3DrMDgWdEBjhdTUNOOeWhJCGA3ms/9ZlExFXnZWZ0tiSoicA6HR6P8/HktZJ36h1bw94QsRCaUCQ8eagxGSXUpsNmt7naba4EE+nsxtrh4ofDzJvN5vp6e7UdS6K+zDOdqLBkj9UbTLW3Ok/z8TgfD22u7gjEqvGUg0OLgTLiqjs2RAp4wk3rMs/TqXYFYiByZAJmQuZEhOAqKkRdGCRzcO6Q1gazdgAjh8Q5l00qG5Kh2qRmDr2Pgpv0LFAgoMzDwGOmUaiELrR3tQ7WY33xdRAaEK0iMldHCNiqdW1I0Hs7nY77p6eHh/u51e1uK0MBISRmAvboeOWSBwDuqk2tee/TfGz6NNXD3Kam6uxr2Pj5wBF9pHVP8PMkdkdQpA5Jz7EzEJzLbeIr0dGOtNSle7PZqpq+PR1U29T2UzuoNQdrvVO12tk8cdrmkoURHHybhAvnbE7Hucuxp4EAMQ3jZnu12VzVGalnScK5pKFsCIyhW231CG0W91FGB2nA5t0RDG325dhPT/NhI+O1sDhlKTmNbAVYnMjQ/DmbC85vB+jCB1wBGQI8B9xGa259iI7owCiCq70sAgmXzYY3GxQhYiTInLfDdshDTjkRA4ZhGyOGESsYgLmt7i+AFAfOeM69t1ZXJvEa2qbuPeoTAFgbhlGriojwy6VMyjA4kpNYb/PSam3OWIr23rDCMk3TPE116U0zceJExG1eDsdjb4rdrsbd3fYqMdW6IBiiuXdzJ0cEFOQhJSdUEWROJK7eAveOvLlwaiNKIikLE6tqZxiFbJUxh+Tctbf5uMz3D91RmDfX11uh169epbMKxs8iYzMTWE+TBIGWrwU4hOD68iFumBo6IAIjeBxD3QUhyoXvfXT96ettEZjm5e3D6etvnn7283f37x7bMh9OU6sdAZIwIyKhAjp4mBUyYRJk1LbMT6anqX37bj/NldBvBkIAZkAEw7Na/0XPkkW2NzfD7XUd86HXb57un46nbjokuS75o9ur3/r8k9/63mdffPrxwPz4CN5bUEBYEnOKShWBEKM/ffnJDnEnkADgoqh5YUQFZ4rHWRPtQVRb/wXO5KLV8idS/2wlpJtBN2jdAnCCc3UQ6L+Z926AjoYIFEMViSWlXE7D5siSiCUlTsBu3KuqaMosqXC6vBh3gJTy9fWdkCDJUB6Px/3T0346TUut0VMbxs04UkoZCVG4LEte+0yCiHGnAEjduxm6Bz0QETE0aabhLqSOBs/zH4lYhIgjSZGZc0EJJrIauJHwZVs1VXZjSimlu7u7qDLfPTxp73VZeutmGoSLeHYOSGgZaBS4HlMi6L2pajWqaktfnQlqa6ep1pOrEXLKuZRx2Gw2w1BEOKfBcm4l1yXVuizz3ILT+lfLl0ikMQBHJGQSYWRiRgBwisArZk6Cl4NOREE7qLuB05nPS4zEjO7dIbIWZVPE6HpMuSQk6N4ZGvrq082IEv1tcEZlMEIAVNfa1TRU8omdCBFjgiJ2dwvsu7Xaao2Ug3grvfd39/ffvPkOU1YWJazu1a1ab12X1k5LPU71MM2naT6dJkDYubJl4NwdTrWrGqP31hL6aTltdrvabDOWzTE93j/VVlNKjj4tlUD7hPMxHfdP4LR/9GVaGEBEEgCp2rII0tUwDJJ7q1pbq4u5KuF2yLurq5vbm9u7u5urW++kE3YTVXqeLoBICSkzCxos8+xwyjQkUzd1I3QD7b0udZ6XeWq1MwmodQuvTWPGlCXagU5kvRsRM15MQabpdDweuq0IWCYGYgZeDc4xqxnOCzEOQ85ZEMzV3A0dAx83IAXyBRuat3paZG5JEr/+YHj9ekj5svHjICVzQSNtAOhm3tUjmgfO8FNgAA5ghq5IxCWlkjMRmPYKMM3z/nB4eHx6OOwNfLzakjAGZghERCmlnHPJGZFrN2+6zH6q+u7x9O7xeFxaU1gdT8MwOBbAcKEI7SqAgyOZg6lrJzASoGdIGQXLTU43PGFdYIYG6tq09rZonc0qciO2qw0RjwQ+lHJzc/Xq7vbD13djGWvtM6saOoqBzVM9vTn5sY43UgoLIpdUhjyU5MpCOY9Xw2aDrFM97J++XfaPtkzZRWjjnNk6+UJEDfvk7amd3tETAy3LEUwy+PWwafxBNZ0TNVkas7/XNUJGEDo7JTMRr6YkQUUgZA7SiZujQ9gzEZqpArpw3m5ubl/l7UbBEFFSzmkYyxC9pAia57CnCzDbV80oACAzGBE4koubMytLSTnUkVEvrlpfBCIIE1FCFKYsMpY85PTSGlZ6b47sDKarED/cptyst4haVujGDsKJhDGVqv10mkx7kbwtw24YEtMMqtooNFvno3pUJ8aoTIDE7moKrVHvDIDMaxlHlFhEmIgMQQH7qpYIMMB663Mza63N1Tltx/FqGHdZdpvNSz+SlaBF0TqPWxksJVzLnHXDhfPR2R1cHcidwtccKcheSMjMOcm2pMKwzNNxWr59u//628eff/P4+LBH6703Vaf1iv4U4Fndo2qnaXl4PMYJfz+1N++eWq1CIIznNk345LxXwcRTzOOIpeyt7pfp3Xyc21IkbXK+220/vrv54tPXn3/8wavrHbtbHbVLHnMuRUQCHY0S4VfrsWDtA19+4UsbRF/ZK/Ffzy9pJYi4RUXp5/aQqmtkWDbVrnXp09KX+n4kdxBFDMwsnCKgG5G7w8zttLTDtDzsTznnJCxEDCQETC7Nk6Egs0jc0ghQJaSUh3ELN2qEvEZhzFXNe+SX1KqqLBI+nimlnNc/1JGIHaCpttbj6LEGh9F61lXwcyODfiktGc5HNjOrtQIAlhJF7GWRWDn/ZqraOzD6MJTehqd9ONi2WquZrsvo2gFkBxCEUeBmkz+63Y5ZtLfedWo+9z612t0Acan9KYgQkzazVpcQrZu2lJNIYiIhopyDtVdrjZJrnueXlP5Yy3HNzMlpJbkiAoDb6gkZngwszMEBFgA01VjZYqoAADsiODoyYmKBnNMwJJDdICLSAR1BwJWc2IVRmBIjIzIYgqEruJmhgXaD7mGJ7QBOaA60UrNi0zPQrjHYng+XRJ4HGHeQC7EAU0IAMLRO3bB1qBVLxbxQOgGJWc9JkNBg9XFXVQM/LfX+eIKEHUlIduPQtpv5Yb/G6BCIKYL54tX0qIbA2kSYdpttYsyUsFk7TNCBhcHBtWuvfVrMlISwpO1muLm+urm62m53bbJlWRbjbi8APwc1NMPVIs6dwEMtB25uGvY+Wmtb5mWaWu1JEgDV5uqG5JwYIV8cjd0NwaOGiXaoRWywuyv13ql3SukMv7EhSu9EKIBDlpwF3dxtfQC9u3cDNJOTFZuKMcwNDyc0wKtrvr1hkQuBNCxhGAxcPTIHAv4kZCA6Y8HriyMEYswCV9u8225EOJqkx+l0WuZmRikJcxnGnEv4QBAREUtiFgRUQCAGb7b0fpra4355OtSqbn7WXAZMvBq3IyB6gLy+jmZDU1Qjc0SAZz8SIiwD5xFda+81oGXwhrAgL0lsSDIkicyTXPJ2M97cXt3e3H5w+4qJHg57g+j8dWftqsd9rdN8qFgGLEnq3HpQlOJQRNwJu/a6nI6Hh3p8IkXmZJJJCjklzhlHwOpEzfWoNc3HOk0ZMuJmW4qV1wowPdmS9p3Mn9fydYthCpf3UP7QOfsC46jG7mAWyDwh5CQOXpfegahshs3u+ua2bDatVQBMZfX6Z+SVUR5UG8N1371M1fjEak9jcSS253L20pOPCjyQDEQIA5oOrkzA72PK8vDwSMycB3MYBLkM19vNWBIAaOvokFjW1WrNghsjhaq3nliGlASBzQRCgeIAoOjgwAhOKEncsbup9V6tLa3NFUL4mpglnEKJmRMGaYXUnbqZOxAYuAGBWsjlxnHkVHbjZlPKQPAyyBoRmM7hUZIQn/fjVagUkuE1g3BdGNdmIIdSCYWI0NAJCRyoG8y1PR1O5j43+/rt4RdvHt+82x+OU6IgdJNw1PKAEVrPhATudjwtv/j24XCY317tmXle+tNx9lYzuTmqe1+NZlAtsIn3KmUSqeDfPT0+TadZKzKJyGYzfnB38/GHrz55fXd3vUnkoDbm5CUNm3HcjCwJADR83y5jI2R0a5NohVz80gK5lC9+lqH7OuyerQQczu563U1d+1rRqPXuqtaattZra/M0Hw7LZur6InPkxbViYUGsBSRT7WpLbcdpenxao+ARrpAECLmjdGeDnILbbgCgjqbhg8a5jJtdQEXkQJLS4XA0g3mpS21IJCm13ik26pyHoS8LqnrrfZkXZgbESL1evYTWEbWCWWeU9YV/T1czTUkAcFmW3jtd0Ng43gGEWzS4qepindE5kFR308iIq+4Wnj3BpEMiU0WCLLTb5A9f3dxsR3drTQ9LO9U69+ZEZRzV8XBc9vvl6XF62p/2x9NS637/dDyAiKSUShlKKSXnYRiGYei9L8tCRPM8v/RWWYXlnEoZhmFIEobRCGamep49UeggEDuRhWWHqbnBSqcEVzBVCDalGQFlyTBsCvahkBBVQ0cyZEJCoZS45JyiXDKzXoOPbgYRlOPIgGTn2jEytIlICB3Q1dau5zOFDCQPt1/8phlhLpgSJnFGQ1e33rX13muf5+VwPD097t/dv51PB9el9ebVMpjlJM4GTpk7y+yUqm4LX283ybwdjmR9qY0AhnFMhGAdEQQFMPGQaIzkHnBtXr0+nvpxgQThGOSt96mzGQySgHbbMdwgh6Fgb5Wwu7cXBqTu3qrVam4uhJuSh3FIY0lZAFy1kzbrTXvvtS6nU50XTQWAa1MD50Tu0sBZGAkpBSaNKRECqjXTzsy5SOtmAGpdtQEUQmTGKGMpGomIJXMWWg1SERxMbek6G8BSy/7xagE1rs21dgTQMtq4NeKXLLKVlulrsRArJDIJeNSqqBbnKmOCRLAZ+PZ2vLu9KTmr6v5wfNofqloatq+HTUrperMpKVPQuM7KItBeewUHJNaln47TYT8dD32eIbpmASNDtE/Xs9SzIWq4mAO6gRmqYT8fos+7PmImKgJiLeEiTGH5jEPmPAw5X8lQWABpGMvt69vru+vrm6urzdUmb2qd1OvUnmw5mDHlRu42w3Tq83Fy7klErc2nbgaZwNEmnevUjtPDdLpfTofeqpGYeM6QExLl7Ltt9+Q1MRLmjniyPs06YL8eJA/Dq/IBshwf2/Rkkxwdz+fluGMsxMRg6AYO7mjhtYCILBxel9qtO1gXhiFL73pYWicZr4Zxd7O7uh02m2We3D3lQsx0vp+O606DfjnXwRnZVwNXD65a661ZeF2bA0QmLwmHEVPg3sHTc0evzaROvbfnqiemf++N3Vl6YpExlZx321GYW63mzogk4syx1UWPX5B0HCyrMGfkzEhmhh6InLo2U3dgi5tiiI6mYOqqbj2SnCKwVBiJwQGZKKoQImL3iLQERANQV8Z4Q86UJJUhp0xEri8QhHXzkPOmcJ5Bq/DVwzXH1ptsYcWzNpuICQSQo00Ia9aUASzdHg4zM759nOba3zyc3j6cjtNcewdGIeSwOcRzNlNMUQRza90Ox6XWPs0LES1da9XaeuzlBmhnggkEr+oF7KHm+3m+Pxzvj4epLQxeUtoM6Wa3ef3q5sNXtzdX23HIaNi7u4M6dPXajJdmDkIdwcLWjEVY5GxVvJYs5z5S3LazlOtctVz+Ev914eQGTXL1JjVTtabWuvWmtbZl6XOtp9Pp6Wm/O566Pp8tLXjToZ8iiBEZFXmEz83Lst8Tummr1pWQzal27y4mi1EyjOMdIqLZ+lrNHTmVMgKAObSmtWlrqmq9W6211kZE2hUARPI4jtEROp2m3vtSlwgowKB7Py9XRO7MbMLuifnZjsjMlmVpraWcEKm1ZubTNLn5UArJed9FxPXHuGo31wjljGNj2OzCGTWkF0USIYjQUNJ2O1xfbxmpm41V567NzYlTGRy5VpvmdjgsT/vjw+P+aX+IeNjWavSPap1bGXIpORciGoYhcKNxfHZSNrdlWeZpHscxJSNA4JXHZ2bBSg7hYlT5iKiR2RStc2A0BzNVs64ejIyAdkhQEppr746qwE4MHOgmMlNKLMyAYKYA4uDqboAAbBBxtQTEK4YdyWBIhBGSamCGDvjiaJmG4YPf/O3h6gZISASTRBfe3LRFXm+vSz0dT4+PT2/ffvd4//bx6f7x6WnuJwLPwoxsaMHZM+uuHY2FbUx8u9uQQa3KgGNOQqCtOhgSo2RMRaRkKWjepsl6BevQFMyREZg7pfAjySRDyrth2A7DmFNmVlZafaReTBaH2qxW06ZolphKkpRTgJGm6r31WrXWXmud5/k0aVJECXYvICF5J/BYTQG6cDJByAFFMNNmOyLBUns3MORQvLGcMQBHJEyJwYDRGd05UntJsffWm7Wmemz0MI2TO6Zu1NRbkrkIyXvxXNHVhmBGYLBlwtsiMBgndajqMUUSw5j5apuvd5vNMID7Ms2Hx6fT4USO282mDEOWFM6BZ/wEYy0IRNgdiKW2djyeDsd5qaZK52AHW1/QC0IrXBa6VdWJEtZGhg1e9MQBGHGTynUZTlgSezZBc7UFM+arYbfZfZBuRxoMfdiNrz99ffXqetxsshQBAaNEzGjIFbjJgNl5aFKVmlkzbYpxA5AABJX6oR+82TQ9tvnkasACKavIhF2xs6SMaUOFO4A2N23YJ0OtWsGQTleZdun6eri+3dwehvkBJ32RQuYO3b2b8xqf5gGWRzMnzn6AaA0MnUwTexZyNe2qzCKplDGXseQhWLprB8Ah/JfcrK+8wLjj6G7NWtfIR1Y1a7UHSKy9q6l1g7AbJRIWImEODxAhYjf1MDzNoz67KJ+LmFIyM5fMOUsSKSnnkt3BOrpCtMgxWixmjgjaiWFTBDElFgESB2jNCcC8mXW3Sb0GbAaYJHJ5u5shAidaoTxGRZD16IsQ3AkAYiEHJQCLg56rGQCJJCA2J4ll2LS3qq1d3hAiRgWDcEETVqqH2mpO6h5wEUWpI4xyZvuSA+GFxxm+qzAt7dt3x6djdYCl98OpzksFsJJIgi8cxDC/NL9WUjcRhedT774/VXdoak1t6dYNAMgRmS+27EoRpXd+KktrP//u3ZffftfcmaEkuMp0u80f3Gw+fHV7d3OdU0IgQGjmh3lZWpel5nkep7HkJATg1rUJ87gZh3HMJcxA1ioG4b09IFg56wiMD+DuGOdji3BjM+3a4lyrPZzAe9PatM5tqXWe6zTNh+Px4eHhZr9X7S9+Pqi7du0rB9YBnddzkPW2TEfw3uoynQ55qQ2Il+4lp+1sC/BiuGlayhDu5OwC4LV1AE1EKGkg7GqcTiJ5GDa9dwCstR0Oh776Z3tKebPZiGQiXpal97osCzO1klkImMCC3AwEEIgFIghSTuk5P8Vsnudpmssw5Cwppdb64XBoS6MbIlpTCc0dAYTFAXpv3uP+maRUSuGgaHn0qVa2Fq64blCzExGzSBYuJGVHTuKUumPr5k54k5AZkcNr5/Hp8Pbd/Zvv3n77zTf37+4Px8P+qSNhzmWz3W232+1mu93uiOju7u4S09F6f3h4+u7NG0DsreckIlEuACEwEYSfB65B8I4YNjScU+iyrelSW++qVc0chAgZyaM3tNQ214O7YRogey+JMAlAHBUwjMUJMTzropBRCM4fIjEJCLGgEEY2mBvgOYGLgPiFRCFtNh//4Af9iy9AzR2BCcPbWJu3ql0tyMDLctofHz64+8U313/xMz7OalDVOoUBKYGIJ6vZfEROBvW4WPNdGbevrxhHAgIz78364qDASDmnYZNzEc5ooMui89znyXp1UBTiITfDE81mNhbcDeMmlZElA4q7RBDKxVn8XMS0BsuidWq9NjRlwsyMwgag3azWtizRTqrzVOfJmpJk5kxE0XIyA1SzjmAKYIg45AzgQjQOJcnNbruda63NqjlJysOQcsBJoK4AnlMCw8DdcmLiRCS11aUeG/RjW/ZVT5YrsSRAbuAnQ1To3V9YwiMyYSJKwpLOhVLwBQkAQBWbOnRVNXIYhDZj2W3KphQGbKdlmufD/WM9ngrLJo3jdpNYoEVbzYNW082090jfQEAFqE0Px+l0mtyM4yh/RlghaOWr5mOVKgXdlEKlLuBEi0OFfgaQAACY5GrYvt7cQfGpNllKr/Wo7xxazrIdx7vyepeukWG4Gl/dfTBsNwZsDZfudca+gJshuQjmTd7R4LyMtd2gVtfmuTaYTqnB7FQX70t7G2gFoqU8MgsPGZCmea5Vt7JjwsEBe6/Hk0Ktm2QIptisd3vsqMNYBua77fV+PH0N717u/WY+VT0tjZlS3EXzVViLiMgc0TVgwpAQBU3IF1dzAwj+nAREzSQG4doNDAiICt7Ua11675FSLsxmNte5tqX2JVqZGkjMmm7Qe2vuwIgruMEBxKSSMjgsbSGRzdXNsLmxcNF7ceYXJmSiRFSESuIkCK6t6TzPc21uRgCJhUK/QujejbyDIRKKAqKre+vaWteuANX61PpcW69KAJuUEqNbWOkEfo7u55ZiiKB9DZR2RA+bAVunojqAGTlkYkYyD+Ktejdd5lYXPx9i4tTbaq0prb02t1hQ1/9TcwBGoJhMQAZkDiHcUjOE1aYJV3ITtt7mZWEmdV8Zh+7CkAgdV+0xnKUs0ZLwmDUICKDm5tbC1dnDvDm0j6FtNQA0995baEYvC1nt/c394zdvj6XQ9bZsN+n11ebTD+8+/+jVh3c3u+1IhGoKTrW1x8Nhfzw6oCTZDGPJuSRE8K6tlHyj14DAnDjxWZP0Xh3rlx7l2qXs67tZARhQNe3WV6VaXXrwKnvtvda+LK3OtS51WVqtdZqneTq1Vv258McAjc2hq1rXeC7MZN1bbTPPSSRJSkmy8GmuhDJXyyltd60aTHMvJQ/DOAxDKUVYEGGpFdHHkrJwYgKg3m2pfV6WWisixgyJoiFWt5QyEffec0qttd7bPENKDOA5CTO7W2DEiGgvtpbLdFnHWFvmeY5GC4C1Wq2pCLtbKYWZLm1/AqDwQ1cN9MXde2+1LrUqMuck2qU3MTPTbiwinItIYknMq/HwwGmkNKjRaVrUQHKWnHLOQFS7HU/zhw9Pd7e326FshvLw8HA8nmqrZr7MUxCXmEV7W7MFoohp7c2bNz/76pvTXK932+CFM6FQSAA4pVSGPIwll8IlA1F3Q+JcciIhIK19OZ7assa0BTZBBKjg6r32dlrMVQoSJBBHWXG/1a/bwQDcVjhZ1zNhAJpncHLdfNzMtGlvZtV775Es9kyJSal88KqoQdfVQVkVWoVevVXvLVIEtdZdKZshg8PTcX461Kf9vMwNDBAUQal36g2dgdx6tg4MeTtcl3w1lFtC6a1rq66zgyEjl5THjeSMKKgOrdm8tNOp18W9IxOPuRkWnnuvQq1wzkjiwGpMJu5sjmHq82JWGlht9XDcP5WHMe/ScJu2lXUwh16Xejwth8N0PNRp6rVqb8HmiwwSICRHhFAIhcy4u9tQshC6GxNxzsKCTCRKag4RIGthH9V7U23gatrmeWmMwzDkMkhJERSlZqe2HOp09KYMhYUTu4o7nypOC1xAWAJIjEkwRa4crtAOgfOZBWEIzA7haIDmTqrYF1p6N/Npmuv+CK0PkhJzcWR17T2Y93jBi9UJAImr6eE4vb0/vLt/etyflobqjIro6GAQBpVri3TV39hqS24l81iGoSTwfmj9QXtv9bm4NKvHvjw1dfBGNqNV6FV7rZoxeTmUictQxuJd2sSmOC29LtpbnabD4+O0n5apLZinPe0plzpPZjNKZXLgBIbKZAhz60s9dZ1MGwKRISsxe0FUt9PThIbzacrCCN3rrIeTAqNnYYeewH3BbqTjIW9kzITbnDM/S8YAoHV9+3D47n4vBGVsrs3XMycGURMgPDuBCBKTALg1sDDhjANvpK8ArffR6SxQiS6dazCvFBFXDKH33ttKLwvG8Nq4WAvMaEZjfLdqMA7A3N3npZaRUh5L2QCInbsYaxFjqoxI7mGN3Wpv6sepvj0cD6e5zgsBbFMZSs4liTAyNu2H+aRgpaREJO6oAOYGqIiL9XmeT/MyTzM4tJSGJELEzJTSmkbuoT1Hw0SGl2apuakbOFjtvffq6g5ClBAF0BC6m5tSA++tT/s+T5ciRtUOp9PTydT2kqSrm50DYc5lGxEwARM5k1EcfeEskLWzTAcQCcDdHCHmxkrnJoJEmBmVUAgQQR3OGVAQBjPBjgzrATNvanPzYCeuB/q1ggksE7r6PNfDEaZ5vqDKTfVhfwCHZTbc+PVm+OzDVz/4zc8//+Sj27vrkiUCjBB4mqd39+++e/tuqhUBN8OwGYbtmEVIrW02I4AKc8kDE/t6Q1ZeCqzo0UpRgYjR8B5kmiDtxsfee6291mVe6rzUuS5TrdO8TPMyz0udl750VYu4AEQXfu5eAUBIWxxJ1edlOU8YOMIJAJBQWErOzEJI+8OEKKfacy6701Jrf3w8MNNQyna3HYYxsQCimgrzdsybcdhuylTb/ji9u79/9/a7eZ6zSCQUDKVstuM4DLkUZnJAYRqG0nubTvN8aujWWxvHklNh5SC4rLWvmfde62Ln5xJos1vf7x+XmrebTbBIWuuPD60u083NzVAKnOc1ujEAEEQrrrVlnqfT8XA47OcKxLRSVNyJ2bRnHhLvhpKGkkU4ynpOkkqRtHEngtRUjQDBVSuDDDmVfLXbbm+ur26vd5998vHxsN/vD/vD/v7+4c137/ZPjw/vvutq7vY7v/PbQUYGgLrUr77++mdv9t++2W8KCAMFNsmUwqM1pzLkcRzyUPJQuCQkSSWPw5gloaN11aW5gYgQC5AGTYGseW3arHZwh04mYhkiT8LdWp0WBQ+T2hC1hc0MdCNHdCfy9YRhrgEOtFbn2quZojZdltZaf68cRzIOC7PVAOOFDBERGrk5kSQZhnJzff3h648Ph75/OLXT0uqkWh0renVYeoJlWWAYOY1lHLbDuB2vh/GauHRzdwWviEZClBPngszuAN2pda9NtzvT7tYBERO37lmWOp+0PokBqmJrUDuhsoKYY+vQ+nMRgw5i1ee3T29IET05jThcDSQGuEzHw8P96fFhOT5Nh4P1Ru6MgG69LqAsKJhFJEkWcG+1Tqep1yWnVJK0ZdHeATz4eL4KHlRrNTNXN4AWveHlVOfjctwT+LjZbHfXu2vyCGk2nNtyaPsjHiEBUk7M4KbV9qf+eNSuF3QcstDAyGhkSh5SP1QPtoUbsaMQsYF1q3Pv1o46SWTHRySN9Z6IMhm3ZrUFpusGSLQ25QmZIneXj/Pp62/e/eybt19/+/Rw6h2yQYr1NtZ5Q4JLbRy+nnFac2Uedpt8s0m2THo49Xlapvky95e5ff2zb//iT78+wNzVpYtpm2zf8uyHp3fl8MDT7ebV3d0HV/tX90/mkPfHPp9qbXObn5b5/rQcDv1wSCbXP7kqj65dadEy4YDD9oqI0Zv30/y0P+yPyzyZm+Rk4HWu4D6U7GbT4aRLT0ablG62YyJsy+IkzUREMwxIoNg7VLN+nTZbGTLrmHlIcomAW1r98us3f/nVN5tCu93G2uIsbgaI5hqbgXl3rQAaGmfrzVWFwNBdm/eK1smNg/AGq52MOyAYoScmdDZat1NEzFmQCjG5WZzxzqcUcrOuunL7zXrrphYz2B3UPJdhd3338cefv/rgE8LU6nstJfni7/6eMI8lFxFkcPeqNsxVTvNunuu8kMPmXMREbE3r7TCf1DVlESZxIEcEciRHqh4eGMs8L+A+iBRJwiQilASRNBrbAOH4xxQydmReqeYAoK031WoKjokDTkYH17NSy1uv8/HjH/xuHsd4J7vd1d/7e7/n7sM4MLNqmNNGDsMzTZMImGilN3v0Vl6sduciJrb01VsYgAiFUAQTUWIURCaAKGICU4qgs5XlAEyB7ngzq93MgeEsYVp528HHdzVfapum6W/+3d/b7q7ivVxdXf39f/B76Hi123x0u/v+R9ff//T1b3z+2atXt7lkETr7FDFu9ycY083DvCxuUHIactmMWYTcdRjK9au77e3teHOXc1FdNUsvipgzAcYMVrbtmRHsprqau6hqa9paW2qrrS2tLrXNNZLCaqvNmoE7r68MfuN3/vowbi7v5ff+g3/AiVNK7jbPc+9t7RzEKooozCKJid1hu91+9NEn19dXKeVhGK43Y8kJAHOScTPmXHi1FXZmHoe8KWWzGcq4nRW4lO3tXV2WJEmYI3xgu92M4zgMgwib6VLrNE3T6XQ6zao9rUGYJaUc4y/486s1n9mHn36RSon3kkv59IvvOYKaS5LdZkNEdWm9dXMrJV9dXQ+lrIeTyFE3NW3a67Is0zy7bE7NP/v+b9fmJLIZN6WUMLYxs6tN/uKT209e33x4d7MZi5ujpDRsUt4wDwDUW+9qGnEQCMyccmFOjlRr+97j0+l0XObldDodDof7+/tf/OLN/ePjNE+tdwD463/9R9vtNt7Ldrf7W3/n782WN2w5p6B/EgX+HwEeLCmVnFNJKWfOiSWlnIcAw8KfoCsipZSIBYgxiKGu0GdrU6+zg6GIlDFvr1MZciIM8MVtRWLCx8ocI6MlerFMJEhMHJ03N+29za13A0fr3pp+/zd/ZxjGy0IWBR8wBwzmQBgHBmbQBNqpZ8gZhgE329th+/lwzbvb8fbm4f5tnSfVBaChVfGaGUpOuQwyjONwvdu9GoebXHbIySAO9Eroq227iCOaOaiTaoTzhtsJADihqte5tXluy9NQ8NVH19u7u3z9YUobrLAb0ivW9MGNnMdYKdsvfuNvCtCg+Tpdb28+LnefyNWdbK4c0SUVdZchba/LzbydF3cTySHuBmbOkscybkZJAu61Lvl4IsTh+jqnJLslLM4NrKt3MzVX8zh0iyQD2LTeW+t1asu0nA7oVoZh3O62uxtiHu6WcnzMx49u6uNMExQfNlmSYMde63zaf/Lpj3Jex9gwjt//rR8IEa72Q7BWTWamzQGAGUicxdxbr6CN3BPihhMDae9uvgrkRAgxQLuISUYix9CwEQYng/jxdEo3315/+PDpfp6aO2dAAUB0jC3conpZj1hrEeMODrrbDN/75O56SLbM9/dPX3zvzV/70V+73u3WnTKl27uPPvnkNxfoBsBG7tpg6rJYURIZYbsbrm5vX213NyntzAVSy7k1Go0HLUPvr2Z9TJxuxo/HvANwZ/W88EDjbivC1m0e5ic5nXZTnas5SBEFW5bFzbIIuC3TorVh81HkdrctSbSrM0MZRUrGgoRKigiFeSfDzbDZXrWeNlcffDSc98o8bj77nb/GBN/7/scfv74bNleSB2ABxDNY6w5mvTFYESRQbW1Z+tXcjPNwfXd198H1h6+HcTS9JGYE9wWida69ayD56za7ciK7KcCFchHm0BQHfjMDc1Vtra+j1F3VzMEMdze3n37+/e3VrRrnMl5a/ACAb/783yEir3rkoDiBmgX9Jl4E4wsV8br16tq8fhYHr0jT2UVkTTxYN+44G8U4Wt1fzqzyy0t5MbqCMR3VVnzNyst5XrTcTdO4uXr9kZQBAKbp9PMvv9wf9rTaS8Blq35WvsYPeh7EsFI03r/w8qnzWQ/P30Uvvn29XS9+wLnvepmsoS6O/uLZqwYv/35RT5mabXdXn372Rez9p9Pppz/72WG/Z+aceMhSchpKSSJnz5v1N2nXpdbW+lpIrQyGOGk4EaU1615WeOmX3uv5Wbx4s5dPPVc7F3FRaO78/Hwv8rjQrV+AwWHc3Lz6MOUCANM0ffXV14fDAYlg/Xp7+cvje8KbEWDNbhSRYGMIU0SXBVGCXji2natSYqIgrrZWew/2FV5+LK/V8YqnXsqytULFF4P7JSPxfBtyKbvrW0kJAHpvh6enpc6+BhMyBt5pDuH/H6bdl9HwjHZFx8SWpR6Ox6XW0GowrxM5fiUTlSwlS5JweYp0UkYMC74L0+sy4FZPBQAIq+uV6W+qXVvv4d0bbDAAuLq++uyzz8dxBIDT6fizv/zLp8OJ0S/z8AKzx80IH6w4x57VW+u9eh6+MYvPO9SKerqFGcjlORHJWr7jBf97ZprDeeitMwvPL+U8F1c2/qXyNh/G8fbV65zziyf2/AGfp99KT3f39VG5R6hIrW1Zlt4uXbYIDzRc0dLwUmPmFIIBwPB0coC1+I7XGfA5rByvdcJcHpCvERfmpkSQEidJLAmR3EG7tragyHBzwykU+PPT07d1PqGjoCRJIkVyoaACmmnvrv0iTz3ftBAI40Xxdu6VmKoiIAsT4uW1OVxe6fqfeOb+2/NUV1utB8IGQBBRzVR71abeDQzI1xOhrymNJW9vbz/JaQCA1urTw7s6z5fpcF4XVy6pI4aM4jy2LYYRh6m8n3H0S1vx+fwV+ovzXV6HHarZvNTaetNV//i8/MeggF95uQMIUckiTG7am05L3Ww2n3/x6WYcAaDV+vjwMM+TrWUyrhU4mofLLzATRy43EgNEnzBup4Krubp3RBTK5xBmd7KggcSeFW3TmMUAELE4QbxY/Uht1TkSUmIipPUerZ5oGEzntbmIJERuPtcmkq9vb1LOALDM05uvv67zaTPkNW0m1Nbn2Xb+nyHAJQ74uXrkxEni1Lc+pPUBP28cvipEnuekv/yX5/l9wUwv+8/KybysDDFQRCSXgVkcgIjlBVUR/8q29uvr19evr19fv75+ff36+vX1X4Lrl428fn39+vr19evr19evr19fv77+S3HJm//vHyEAkzOefZwdCB3XHDi4EKBg7WWtfzl3GYKnvF7PXZIgTJ0B3WCEAwqkDKW4u02T9W6EChhZGezqFhjlM4HFIqDRV+N6PKNU7u4G49Xugy++XzYbAKin0/3Pvlz2+zXzZaVRrxToFRW/NJMuuu4zTo0rNB5vA/H8siEQclwzFi9I1zn+c+XnrtcZ1jrj7b7aELz894ug+dwYCMhXttvtZ59JQJfH0/1Pf74c92cLvAtef8FV4fwcIAJooyEYLcGg6Jmd0xyfH/dL1O1F6+SX/+lXf9EF1437eGmLvfy8m1tX2W03X3zC4wAAdTq9+/LL+bAPk6u473RGh/H8W97DfN//7S9bfy8/c/nwHnx/zkiwCLOMZJQ1lBXPQDT4ORYqLlo/s/Y/VizVXM2G7dUHn3+Rxw0ALE3f7o/T0hAhLK1X0Box8HZmfq9F+quuy40OEr6qatdVvncGWs85nhyBJv//HpK//L/nTuX7/x69FNgU+fB6UxIDQK3t3dvH+TQRKLpR5PueH+2lqXRp8JxbsdFxAF3pkDEdnxkGZ2R5TcSM1gwBnJs1+FcvM+/aW9faVN3wbJmFhMyShbOQMF2eWm16mmseyoeffFiGAgDzNH395c9Ph0P4CsePfYkxX+Y7rA2fFx6hAA6ec9rtxu12HMdChHXpy7xMp7nVtraDL01MQiKUJDknSYyRuht93rXD/DxQL+aOcHk8gKq6LC0EqESYU07DIOMWWQBgmqaff/nVfr+PRRjPbfp4jJe3E9D/y94i/tLkgfPbOyP30SZ6/56cB8nLn3z+4hc/4tJn/qvf+kIi4G5mu6urzz//fNxsAGCe52+/+Wp/OMRCRIQpp824KSWvWrjoJZi5abeYCt3N1oxiRzNbZbhqbnZuo8eS/HwT3u+MATGwUHTeLw72iIhw8WRa224AEPwNVVM105XmS4TCvN3ubu8+zLn88m3991+hW4lGWTSVfvVXmZp2dwcicDOtps16dzd/MYuj54ycatfD8ThNU0iX/SyqjSZ7uBtd9Bkvf+navXXY7bbf//5vbDYrXeHLn385TdMwjsM4bjYDmE/7valKziiiTO4OrQEASzCWuqsmh4QgvI4JdTDzrhY2FsEACYM3FmbhxEnVptMJEbfXVyxSa6ShzG6WkoSZFhHlnAEwvMXDBkKDuU9IIpLEHeq8hDne1e7q8y8+j/cCAPL/+p/948Swy7YR3AkVcnYXspwgC5SMiRzRAILh4sjoCG7QFWqPbFcI51k8702G0NTmbq17UwwzPucE6cbvPoTPv+jW649/PD09TsJH5JMJ9L5pR1vau4lmF8wZS5FcKsKhtd56sZZa497NtBH03voJfvB7f+u/+4//489+90cAcP+zL/9v/4v/5Ks/+uObm5vtuMnhPamaBDebXIaUc2Ymj0Z3ShgpML23uljvz3M4Rj2gmaM7RehUSgY+11pbDz1XkZQlRbKMXbq8bm5GCCKMCKuNj1tEabt7NEZRATpYj23Sl7oc98fbv/23/+Z/+B/e/M5vA8D9T7/8f/zP/8lP//UfZhgYMgIySAIW4EtznhxWwzwEZhnKUHIqQg421zot7VT7omaIZ9e693ZAB3zvv9fKM6pYfw5XgtXbktCJUBIn4fCzp3MCMjOXHC4jrrVNj/PN3/kbv/s//u9f/fA3AeDdl1/+X/7X/8nP/vgPNrtNzgnRE8IYDvbuCIDgAsDgZ9sUwHUNO0dAUaREr5pmRo808mC+O4C66+rws/Ku1LyZz26OMBASQUQINkAEYAB1WAyagbsTQCJS95NZc4iSxs1qrcfj/P2/8x/8d/6H//jTH/wuALzdH/9Pv/9v/uyr7xJCnefH+/vTaerqJGmz2223V7urXcr5TA8/7/vnW/+8ygI4YmvtdDwcDsfD01NdFkYAB/Pw5dtuttvt1S7nDGdbGY+TwGWDXAdd+A4FRxHXJ3jmCKwmBubdrHb/4Wd3//C//rtffHAFAO/ePv5f/8//+U/+9M+ynorV4j0jxtZCHEnw4TpFjBRsJOLVLb523590rq32bmbo9MwMAgACIjLE5kCEm8SZjGwWMInhE7u+MAsLp9ra/eP+7cPhzeP+OFcEMofaNaV8fXX9+mb32e1wvckipICL+jf3hz/5ydcffv+L/97/4B9+7ze/AIBffPnz//3/6n/zb/74X1xtdpuhZEmI6BbrfMTVMSDjGu/VmralN3XAdRi2Tz5+9ff+7l/70d/4rd/+wecly9uv77/8ydc//pO/+O6bd1rVzXF1/gfJPGzy7d3VR59+eHO3y4k22+Hm9nochthTgwWxUpjM1Fe/B1r5H3R8On311Zv7+8flNJWcP379+oPv/ebd7/6dtLsBgJ9/+dV/+p/+b//wn/2zQTQFlysMAME9TAiDK0YMAF27na2xz7qBqDINLmSkM+VL1Xp/QZ5jvpR6F41nlM8IoPGag18YO2ZQFM/qnsvCEdtn7733Ns/z3/u93/sf/aN/9IMf/hAAvv326//j/+F/90f//I/m5oCcEn/48eu//rs//Pzzz+5urjabgZOQmc9TnafDtH86Pj3uH2utTAmB3fA017f3jw/7w+Fw7LUnpJCQE0uSxMRBjgyzETNzMEIvI+1uSx4FxUM4XVISSoyJOMW5RiRvzgTEw/60fzwejvM8LUtdFluS8O319kc/+rv/zf/WP/z44y9+uQj5917e22x9cVfilMoGMf/Vr3HorZ3mwzvVTnmwXuvh2+Xw3fHpXauLIwMG1QW6OaRtunr95uH0R//yX/7Zn//4F19/tX96atW1uymIyGY7MFKtS11aXcJPNJhAFgOg927mf/tv/63/6D/6j3/0ox8BwM9//uU/+Sf/yz/5s3/3Gz/4we/88Ld/+MPfxrr8+R/+0elpf/PxJ/Lqdtpumjb77i27jTfXi9t33z3Y6fSB2R3j3cgiWB3n5qepPx2W+8fjcZqr6qxt6g0Sb262V9c3H9y8Ou1Pf/pv/5Rz/jv/ld/b3l59++03X33585/9xV+2efnwg9thyE37OI4fffQxIr15+1bVb66vyf3w7qG27mMp17vb16+19W9/+tXjm3fz8fAP/v7f/5/8T//RD3/4w7ihMv3Bf6YMOUNO0BMwgRoYggtgglTAOOhpYA5OQAyOoAatwdygdmgd9BzFHPQgQ1g6TBWWBouutgHGYJL73Ufti+816/3Hf7o8Pp6YDshPnUH1qjVb4LsJjiBtGHrJXXh2P7UGrV1bG1uTCmZQCZYODYDgNO2f4p3M+/3Pfv8PfvxHv/8K883V1bYMmQjNcqbdVRnGLEmIGYlCs4qEqtZqq/NJu/LlDBrRm4ZqCmaEKCw5ZwWfaq2tuSojDZJySiyCiLYevRzgXMQkJkQzjXChSJpxM+3q3bGBN7Du2q2bn6Zp//DUzdrhEO9leTz89P/zR3/yJ/9FARIoCJRAMkgCiVCySMclMAQHQAYpUAbJ20EAfa71uNQnaCdQBdBVOm7n0+EzlHPeYs9bZEjKAQhk/drYj8EZQIAKpMKSE6eVYxubLg0lpSTC3qZ6+O5g5v1wXJ/LYf/TP/qDP/nPfv/6CsrAgJ4RdkQZViQmihhZyZIrThMMNUJgdEII8t/7RUxI38kBurueUQwg8ihi3BZ3I0hn1xEF6g7gwODmMCt2B3VHgEys7gezagZAcaaZZ318AnecD/t4L1PtP/7Fu3/xF19nguV0evPNN/unY+3Kqexub29u7u7ubodxiDLjLGh8WcTY5e+GWJf69Pj4+PDw7u27ZToRBSseUiq76+vrm5ubu7thHGNFWlmqsTfGD7tsPw5rCM2Zb3pBr9b4cPWmNjVz8NOy1uvzvPzkL37+r/75nwztcejTxlshzDmFJ5mIpMwswsgcWSphEMkCJEuDp5Od5r5oVzM0CuPONZWYMIqY6oAIm4QFG+shgeVUkqT4+RzuQJKW2u4f9t/eH37x7mk/Leio5kvXXMrd7av51RXeDfMuibACLeZfffv453/yl8uyLKdpfS7H45/9y3/9x//F//vu6mY7jkUSIgbxHMARSWLSrM6Nfel1anMzcBR3d6+//VuffXabf+OTTd8XBfjmz3/6k3/9Fz/+Vz9+89Vba2rmyIwITiCZhjHdvrp6/MVHN3fbUvju7ko///Tm+oqZz2diMzeANSksjneOZO619Tff3P/Fv/3Lb75+Mx9PYymHjz/uzbbf+2EUMYfD6Z//83/z//zP/+kGloxALGuUXhQxGLnALCII0Hq/+DLQGkeMgHihhQYl91LErC4d7vF5RAxoeRUSh92yiANo779UxKyx72sW3XtFTOAltdYTuDsentb5cjwe/tW/+uf/9J/+36dGjpwLf/b5J/P8dDi8+/Tj13e3t5urrQDoaX/cP7x7fPP2/s13775blprTSJDM4HF/+uqb7759+/D4tNfaB0lCAqFX4hTKFlPrvQey5WDMNmx4d5uHrXDBcUjbcRxLyZITF+ZCJAicc9lutohwPB33T8fH++NhP09TPZ5OT9MDgF1tCgH+1/4b/+2/UoRcpjWcCec1MHrryzLte53dm6SC8EryDjAhnmOJAMyt97ZMh9P+u9YXyhvrte6/OTx8ff/tl9N0AhRAROuqNnc13vLVR1+93f/RH//hv/q3/+Yvf/KT+3cNKsBq7ALlihJTbb3O50/+qut0Ojw9rXvl037/B3/4z37/j//wR0/vTjbLSLxM/+5P/sXx/vH10/flw9dPV1dTW/pXXyXT3UcfzG5fffWNPh0+6fqhwIdbZoaj2n7Sp0N79zC9efu0P06L6dzbsVcq6er13QcffPDJh5/s7/d/8Pt/mMeRduPNh6/+8i9/8uM//Xd/+S//FACubzfbbVnqsttdff83fhOJf/7V173r6w9ek9r9N2+mZYHdZvv67sPPP2tz/cm//rM3X34NWk11f34vACB/44shEWzEB4HEAGC1hbANiQgTAMVRApoCRBpbhBU7VICKUBE6wgrdY0R3Q2XoDN2hE6iBIyhAdzk9zfd/8WUDLbUTMzom82wK7iIILNflinjzOA5P5G/qNLfKnHbECdJYeqqLexcEqdBOMA6bNWwTgImGcRwBcs65lO3VdjtkJsqZh00mIYM4ejGnLEMBRK0V3N0yi6ckUdcDgCOaKvSz7RuxIzuCCRInRCAkB2qIzR38UvwQIAOZEWjYJ+fMYc2HEbfWndU4EB4AdhQHc3SXsUkZ8LLvMedxUyBlGBkyAQuQhJdHpCUACQCHdx/RCkhk1JEkUVbRWuqhWdMGbmAIFzNiQMCzdQ5RaC3O3RUCZwAGSpAQUEENzEEjQK6UNGzyOKRhCH85B3dVQ4QU8DAzZald0y7jOX+OicZxvNrAbjOUnBQsIRSmFAtorAqhGAYwMEAUpLOHEhg6IRDa+ROR9hHg1mq9s5oG4FrXrCMbXMgVYXFwQCEUIPYwVzYGSIYK0BwMMBEZAJjFlmAG3ZG5qk3jOPL5vRDikNNmKJmQTXebjasvTUnSppSx5LHkoWR8gZe8LGLgZREDwOB9yG0oy1gYHZHA3E1TyptBNmPaDDIMEkZGaxEDqyPwiyKGok9GF+QMn3sPBmDAxkBqDppllX3El+XI5cEpYRN3IZSwvDvrq2nduQmYUBiZkBg5CfOARAMOkTxsaOZuAYf52iQCaO7uJlDFgDUJmKTEkjHgNSJHMDAS2u52H3JJw9VUO9qa0cUp726ur7alDIQZImW5IN7ejN//6PqTD66GtM79nNLHH37w/c8/u766GnJhQFidG6NWDxEHnk2SvFmf6rI0a+GN4un65vqD1x9cX91q48N+//Of/uKbr77r1YeywbzWhL4ih+4A+/2y1F/84muRjB9++Mot14+o5MLEkZlV+wKgkjAXGYZCKaNwX9rj/vCLbx9+9rM3X3/5ptdWJB0fFXaffvZfraFLZpHd9Qe3u48GexTqxAlZzv06p6gzmIUFEbt283P7yn1VuuEFNAkQJmJY0N2iMxOO0pHwdTF19DgXETFzICuXIiZKYV0dz1er/3VTDrm0GYdNwDzlkp9lIwicOJeMkoCYEwL409P+3dt3u1J2wyZd35WcZu0wHztY09a1mZswM6XWNLohiCgiifPVMJaQpazAEAqxO0TDKc4ySZAzEKIrsQtBRk/gCSDh6h6cmXNKGUmQcNiOJFKG7e2d9Q77p+M3b79+erqfTtP+cAxntufrPYksgFnvU6v3vZ0s4umPx14ntVrKSN5hZ5R2xMXRAdCBVH2elul4PJ2eWj0Cn8zMap8a3B/1cKzM5u7WllrrcZoXve/89run+fD4CGpMqeRmDKpgHVhoGIaS0xa9tT4d5lYNnNxc9eULhWEYntcxomEYxnHIORFB187k1zdXA+Pt9QbG0kRMlTgJY84MjpshaU25E6F3gm66X+aHuT6e6uO8HHudTQ0dhAQllTyWMuYhS8olj5tN2Ww24ZmYylgGGUtflu12N44ZkUoZhmFk5u1m27uOw4jqZRicCcdxHDdjKeSUS0klt1OVF2MMAOR7t8IAiVzInb0bGFHv4E6K1FfJtKpa626OpOSO3bypzwZVoQaRxREQCAgAHKGZL+AVvAIqgqF3Z1WaWjtMtYGq9EzEHcQgRzuAxHhw2qlsTynda/tGT0u3K8cBiQgzYQYHJ0Qj6DxDehHUQUhDLiOu3dacZRhLySln4ZIMrffqACyMSSgnQEBXMEYVdKdwsVwHKpgqinRVdzfETuiITogAAa1HWiKooTv6KgKOWBkncARkgpRoFeF20K6ritHQ3RGMHJq7OXB3EeDnDRgRSxoGGBOMDCmyZOjMj4kjFjPlREmQmB2gqyKjJ4RMDJREirHNyt3UguzxXMSclZir3Vp8gZsSOgMKc5ZMhD3sL9CQnBlySeOYxzGKGPQAcrsCuCTmyMECz3NKg1wKMkIsOQ9FhpRTEnUXhMQkCJc2XKAvHrgChpUO+soGcgRgPPs7AFAAJehO60KMZ4E3nAXYITsVto7QFdRRooN+NsRBAHJ0AHIwQCEyALfIuHBzaE7mMNSW0/MYC35VEk6ELqnktGRxQFzDZ1anOHiviIEXRcxzQWPgttrKcRJRSauIuveUOGVJiVMiEQoY7Uytii7iiyIGHRQuroyXlXaF0HwNpaS1bHh+VcG8obM7U4SeYlDHXvwJXO487tYBGKQQYGZid3ancBgCNwgHbnB1j/QLVCNljMCPVfK6RsU6gKEjUS684wIJtj0c5U3dSLjsNkNmomZoCE4IzLQp6fX15tX1mM45UMx8vdu9ur3ebnclZVzDM8xsZV2hkwNE0iQgJFMW5qZL0+6GyGUoxFyX9vbbh3dv77/9+t3jw9EUUxoYQ7ZqQf8zNwNvi03z7G5IVmcraXs66jiMYSet2uc6u7ecadiUzXZTSiGRaZ6/++7d11+/u393OB6qqzXwNt/fvXlo7dzRJiplGMfNYE2womRkxrUKcYKwuSJmAQS2dKZCQPD/4khjGj9qnUqXIiYcBWLo8JkvcwnTO2dMoJvHF62GqhBdsr5SPeBMA8QgHWLcIhZhZpLnWF4iGoay2YxN2YA4ESIejofv7t9uhjTkUsq179CAUBgJiFGEAVCSIKAtkTLmzDSUgWl1JCeEdU8yAMCIO8YINQz/HjTrXYmwZIbCmKIKDsuFyOVBhK4dARwtD1LKBkxMadzsO87my1OrfzWm53kyI4D33qb5dH88fLXMj65Na+1zNa3uHfp2KYVYEkpYfcby1c3meToe96fT0zLvDQSAEOxU/XHu9/s5Quqgt97qca6nRafuD8fleNy7eZJUSnbD3l3ZaHWZoJw55yTIvakqWvfWg+bjvSsAhEL+MvcjLFaE3G1aJvBOQqWkLAREGaWCdmAAi1WViRyxmy1ucwMShFwEJFkeIF9xyU2BoKmelgWZkqSz3wdJShIG5EhMJCRJEqomScIS7lRCwiIpJURKnBxNUhJwSuElLp00bKwAkeQ5yBYA5JoUVwcFVzc1b11rRzMXcDXkGL/dW18dvd1AHZvB4lANqmM3OBsmrc6I3XHpVg26YXdQBHW0bt2Q3BC1RTAkEAEIggFMCJXoMPA7om96+2aa305Va2XAygSMjBaGwxqZPr7SNV5s/LlwkrUf3tEtMSYhQ1D3pubglCRaJqvPTkSTmQWaEgsEIAZNrbnVlWnmKy6MazkRvBlWEwUywGh+IyCtHsDE6/iCWCYsWhmAiLjuTObhSO+upurPyZyEWEA2kAUygegzOyWOugCILJSzlMIsDI5dNXpEaoHVYE4EDhK7uIZ5bID9scGDCJWSkTBYbWoYpsNJWGR1HSQCIEB0RBOhVKQkLomF0c3MYlN1FmIBILfkOZGkZ3YrIibEFFYniE7EEQiHgMH8RiAAOSc7AXhCX02vAQRBECL7JHg8TOAQsaEgBIBuiH6GAVcfDwCI7wIYEBQgOQC4huv4ucqIIgYxMo9hTZEHBASyCK57GS5+GWp+YQXguqqZnzMdXpCMX9DrXqx+8QUIhm4I696MiITkABZZ8MSOZzKau0NAT2EBsXL6zq4KgY+irkiMO7qtrGUP0r07nDPvX7oCBaPc5MxXj4BUc8dwzl5fQtwxMwe0GJwV3EDBL9xCPJsEAqJZRL2iR92pa0Jfd43AG1ciAkIyW9NuEMEZhcUTCICpm4obMBKJuXVtZGpMQsIOjDgOZRyGC9qHGOWjJGYJeJHcDY0CfInAUWKkWOfQPZzqujkAMpO7Pz4+/vzLr1Dt6X5//92+zobALGfHkvUQ7sESdzR0V+u99vu3hz+3n3/7zcMwjlnCt1DnOrn1lDAPedxsUs7ENC/1/vFxf79v1YYyooN1XabpdJr07NWPDuCd0EUkEWISYiYmcDRfwaCzpdfZtSvsl8AR1sZi+LKf6dkre83dWuvWO5wfzMrggZW5bIjohApu3npHQEBBIjPv3puG9TgRUKxHl/EcZ6CVSn8uoAGAmXfb3fX17rT0rkCSkPC4HL97BElm7rXK61evrm+ZiUqScchtt2ndEnFr1trS2gLkOQtLFkxDzok5rOnNuHVdam+t1dqJcEhZhIKi27q6IWxEsGQpwoxnNytAd6vN3Ts4mLHmMlxvdlkGawDYTm3b9MpaH8pIL07858vXvF+d2/JwePr6zbd/MR2/Yzd0IwdGEiFvUE/vWBKlDUlGLHHbu/bTfDwc96fjYZkeuzqSpDyeat/P+t3jab9/7G1JRATY3U9LfTpO++MyzdXBUspDsd7tTFvy1hqiMycR3myKdquLVdQ46ZjpZb6/kD8ArewpaNoOx0Ozdlomqq23xl25ELmokpn3ph3Muteqh/3JXUHT7np79ep2K2U321StLtrVHWGe53ePD8fp1HsL97IoLsLpTtddbtUZRMxXb6pN3aJhsJ7x3f2sLUJYmfjWIxb6vHRfLqHe3V0NFLwzVPeq2IzMWR16NYwAnQ69g55XTwVoRg2gOlSD7pcihhDBwTpAVWyG3bADaOxXBmbGboKuBg0BHcjWDmsDOhE/Mj64Pi3tNNcePxqdwIU4gQsA+OqT298XiBNRTnlIWZIwBwvC45jZARQQiJkxjrcxnkiYPSUDV4UzbomIyIwxVN2Umpm6rbHDGHF0RCzMiKIgDqLoEdPt5uRBgUciEAqzP1IAahcu7XqYQXRa97Gw0n1+L0Ajpx2UhAVJGkDcePAVRUB0JsxJcpZwwSKl+O0R70gEOTshJsbevTfXcyuDEJlA2FLicRQitgh+7gToURWHgxyefc7CK5wZU6LE4bx2ziIXiO+KmyOCOZOk9+a+IAjFH0JEQQhaDwD46i0GTIBnsIgRCMwdCFwAEkaLcm1ZsrvjSvTJ4ODQV87RihU4oAaB1x0AxDEKlHUmX4Qq4NEeBQAOOM2NomEDCA4tzm5/ZQ17XgleLmyXGhQvBc4zHvOyfDnTp37ph3lELinC2jBEMqAA0PwSTArnCiZ+xIWxbStYspYsF4bQmXFua5bde7/3vCcCBLNhtWAHpEgDMHSKqRKebe7ol98dwTXrS4XzORPJAcDUFVwB6HnkGqi5oiNGtOhLQRkirigCrT1BcldgwDWYgRzcgQ3IDQ1Y0iB5eAkp02oIGEgBojugo4HpuYcUG8+amB2BuSBChC4J3eD+7T1p96bzoU7H6oYIFxPQODGd4TADx9BLgls7Tb33h6fH07AZIz3DXGtdzDoRiLCULCyA0FVP09SXhoo5CTg0M9Xee3vOGnPzPntfMCOxXIzMwdH1siGtD9/gXED4Wtae6d5xooF1L4C1iHE/J9z81SF4vt7LpUFcp9Rl5F4+f/5diOdiCVdn9Mt3i8jN9dXr13fHaalqiMkJDXRa5oenJ/akU2rTZH6VczdVQhJJIXBRVVMDh4DVASSAbnePxYnZzMG8hnIpeEeEhMKm0MPijzhxKnkQodqaKqoDh0gFzB0NrPdGTAYVSIgpJRwG2YzDvNmUofyqIsbcumtdpvuHt19/9+1Pv/n6p/N0PwTEL5Il41Csiy4HXbbWJ9cNUkJkAHDX1pZpOR1Px+nwVFt1YM67/XF+PC0Px/m7tw91mYeUkogTzrU+7venuTZDRMhJTBNAjwkZCUS+BsoAsyBQb77CZ/7vfcxrl5HQwWtv3uu0VF7asrTUDI0JwJ3dGUAIHEHcqCkuRlUFZLO9+yhtrjcNarfeVljsNE15u3l3/+7+/j6Q4zjDq9kaGqxxwgJX712F0PRso6dRDq9BEOqmHnrkEEKawUpKh/cfihxP3R2agwJZogbYHDsyknT12hZr6oaqFEiMApqjAShgdWwAzbwDhPyNCRFAkTp4N+gG3VABFFco1h0E0QkNoBu4AhqCo7M48oLpyXTflzZVntvWkEG2aCPCAJAB2N3dxVAN2M9alvNTyTkPw0CZUk7EFOJOQ+jujpSGMedUhiJMMQBYhIhFkrbWautqZJHS6U7RbUAi5KBMrgdoYERm4mA6JmQAxuhBVnWNUEgkACRDVqQE7ADYGeCsQnMzgw6u4H4+7wcKHxcjbqTcwCbLCFk6rk9WzVprYEZgCBbkyHV5YTJHows+4ClF8K10hVZNdd1VCZHJhTQnGgZhZjBW46bdwNdDW2yrhOGcv9YZjKFbtL7iAoQYKGVUzxhKrsw5rQ6zAIDgjJ7QM6EQKgADrp2n1WTUKdi7gHHXGQHBzAzdCIxsvT0AjuiM7nEYJmM3BDAndYqb7oHKBNTGECMgIMLAYFYy8brhA7uBAyGCeySIkUMUotmQ4ZeRmHM7hc5dL4I1XHktgldX2gty++KjvYBzHZxw5eyCW+AWCMjn/SDeiK02sOvPMDh7wgKAhwjFATAyaeLLDMCAol6hIMb4mfbg7+1PvrKjV9vNGGTqgfWARqhbeB1AFEyr5gUdAbo7GqgbI2lskiG+BlBwhQjKcUCDKGnc8VJ0+fqkCFY/B0UDMARjdwBXt07IkayQCBMaMhpgN6zIXQy4vNwvg/e1KqfXjikFqqHm6ivdymAt5uJUQ8xCmDKY+5s3b+fDIWNCIzdiTuGqTEyIFBFPa5kaXspsjqhmrdV5Dt0rmyInAPfWQRUjN1B9WdvtCADOjEWQAXrv7p3RIs4+Lveuy0GXvcngKODgFnErEKJc9BUI9ng7sTARrDPuMlai7XwuquNwJgnB+YVi7tm2Fy/FN4DTGsLJwhhj3CT0lesxHgIXtfg9jo7qDE4tMLn154jI3d3NZ59+NNe6qGmHuem0TA7QFn/SQ336aZ3eOXyw2UrXY629N2/VrFurhshJcuHqAoagXeelkcu2DCmFPnKNmebERORmCDSUAh5SSswJcqGhDES8zK7duwCKs7gwFUruPjXT1o7Hh1bnjEPXBm6RsParihh3V+1Tnx4e77/++mc//vYXP3373S+0zVfbPA7JcsEBRMh6s75Yn61NppWkYPSzQAF613o8TfunwzIfanej/X7pD4fTfq5Px2mZpzlnYQHG2tppXpauSBL6UFFWM3PKcRSHkARY7w7s3kHNVC30WpfDwovHe9GvEDEjMxCpw7woTHU8tXE02EWCfQJy4S2RC5+EZxIldypDGu+21x8PN3elQW299yUaf8OyoAgiz3M1dxZBZHVAtd5Mu/rac3Yza7UxADgikqoHH90NVN1Ma2+1Nwy78TOMhITOQQ19USsfOqpjc1IicjHkBqDAAKKmrbbeFAxVsXfoUesAGKBG5Bl4M+gBXnoUMa4IHcCALkWMo55fR8w4QAMHapSIs3BGko6wOMy1tebc+qhK7gnwimhLUAgYAZEgthlQet7018kb50NEVICmWntv7qg2q3bChEWAAMmBuqq7Rp5REmFmc3TQANBjBhuhmyMSO5FD1BgXXu06zQGd0IiAnSQBCKF5nF1ghcPAmRDZ1jtg0K2vLBUlMAMjDObt5SLCIjJiSiKUxRmjR91VF0bTjgA5cRYREj07wjjiWuyDEznhCiyJgTCqAp7tEoRcELNQKczM4K4KrK6REr4iBb7uTKtCfGXlAICdf5sjMqMDqK3kUnQUIZH3zFLoAsYgBChC65/IuHECI4/FMxgwQIiKxESJMYfkAtwR1lwqwo5O5IWc0JOROp1Vre7u3QMKB0UKTkxCDODb1ooiaAKrdQAzogOZuQEDgBMBNjMm/NVITCBtL2Ca82n4XINc1r4Xsmpac0GjLDmT7AAYIZFnQUTqjJS4CGemaKWtXciL+/fzafsMzeLzJ81hLQaef3uIrH8VEgPohJ7AAK1G3+oZ2TG7jMkXyNL57Z/3wfXLfS3U1t+/SqlwBaMMvaM6VEVjVhZJbM4JCVbe8qrmoTMOsL5mVVfwxXTIstvuUFKdfbbWGJXTy/t/Oc8YAAHFpo4OIe2Jminacec6C4wQMTGTJECA6TRD7wOPiTKAIIH7hW2EIWk4G+PE2GVyJ2VQ7l2tK9XuyAkQACNDycxVIyjACZApOB8A5Abm1sEUz3EslwGDVsGaY3FkQDqXP+v9jF4cEkV9eC7J13iW83BxeF9yEGX6uXt3LogBHQwdLyMX1qIELgwZxHVVdYp+9Ip3grs5gUN8NwKIoZybFHEx0XYz3FzvSqtTbfPcHMC9aHfr3qzlDF15mg6AybyqRZYNu5vbelqgAFnNvbs2A7I4UIXfTwxYBITIelNkQlyJi0jkxJ4SEYkQd1U3NXMCcKRABENQ1pbqCs7emllXAkxJck6/AolB6L0e9+8e3n59/91XT+/ezMcjgvkoABhxLCEEw/mEcuDxyHlizh7xgtaJAJkcoKsuy3Kc67FNh2rHudZuS7e5dnUUdhTq2qtqt05hlLY+AgcAIjy3Zsy6GYJ2c4PWPRKHoh1sHdwi1+Uyn1GSRGRbCPYRkzm7czdRFwIBdKeEBu4JHYW3ZbDsw4hQxizjHebrNN4N1yMiqM2mDdzrUq+ub3dXN8N2o13v7l7trg6/df9oDtvtlpEZOedyc3WzpLzZbLOI5D4MG5GMARcAABKRSEouWMYhlxLsrlzKsNk0plKGl89F9pg70uIMnIokBFb1btAUWrNaURuBgRp2xe5RnYAhKoA6NMBuoOc1TiyQGDCCDhh9HANAD5B/rWDcjACQxfIWxisZrw2pHo/LdNJ64tZ3hop+Ak8Et8LXAkIKCEAC5lgh1A8vMX0zn+pyWOaEuZOpqROmoZD507w08JR77eoOImzWzI0Ac07jNnPKgKy1g4ZKlRzJABQVDMmIHDhybBBRKXobThrgv7MRIQsnZhRCQov4tyADAyFJDp2q5LbUZVnMurqbeUfsAB3f22AQUQSFgZJLAikc3R1TW5r03t0tSRpSZiI1U3MDM8Bwc4ktjzh0yEgGiCge7IW1nZSAk1BOTITugIRnpSas9M7YFldmg13ycgL2dgh5gKuhA5oZIJCSgCeMIuaMkEXlgSiIguhukbvLgEoAgLwGiBq6ghmBC5KwmIiUVMah5BR9KGSKLCQQ6oTMOCBwlLVOa33fm/bWu7oZgnd1VO/qAqZmYNrXCB5iivaEg4MQqjsHfE2IQGrIiDwhkuN7e8z6gC5X3I2zxsP9rOl4b+E77xKxCD4/ZnBCSwiSaDskZmnqnPM4yJh5FBAydwQCZoxmvvOlQxK8Fjew7qrdu9m6da1wCcEKxpraitpeTmbRWOlsJmiJDNAUovI1RAJQBzbQtdaMTg3gOjUYidEZgRzAva8qfox9BQwAkAkZ0I21s000NzhCQsoppTKkspGElBM4mVgH7Ril1vrqELr7svS563F6urre/fZn3x+vbvtjX/zQqnbkcz0afV5GZHdyDwn+uRlqzkgriYkZ3VU1xoIDIiQgOg9RJxcAWjVIEP6c3kEJEADNvZvqWn8iEoIAdkJhMDbw6grWwQiJOtiKPRGyJBaUUEcDApi25t4ClA3CMz6/F0iMWZCIg+8YKmcCB2BCD2ojIisgmKOpW3eIiU/ggObnFHZYsWQ6B6et3Jkz4rI2JFeUbq2maAVbAC8nGicwjv0zlBsOQWVbvx1WsgCD8zoU1vciwsIEDbT3eT615kUGJ2pzzYlff3j76nY3bhKSazVzyLkI+wLdtWqrpta7tkV7A1NHp5Q4Z0zJ6tJNW8Cy1gN/hEbQekOk1o0Ju7ujISsLlQGoBV6HCGLup9ZjEDAyK6FBo1Zb7727OXPYC/1SBYNA3Ewf9/eP92/m46PrMmZOKW+3m2FYt1tzX9oy16V2wnLLeSucwKE69q6S8jiMwzjkkudZui1Px+N+NkMGEERxYFVwdFm7w+Evpq17rb117T2SDd3MtTdYMwrXKtvUHAAFBQiADFyrvez0IWEJnIlDkZZIkNPWknDZYdoYsII5J3VYKhKJcNnSMwABAABJREFUyC7tht0VbAgzEw/bRfOI4wevP99tR8LqXq1ra21ZlnmefzgdzTynvH86fvjxp8fTvNlu5tNcSrm+uqYvvme9lSETQmu1DONm3LYznZNTEqYru3GG4fq67LYppdZ1d31lam1ZdtdXfCb1A4DsKXfgxcVRBmN07FV7t6Vba9arWwdyUsOuIZl2PR/1zKEDRJMrTgPxF41/pfXE4wAEdGmXeJy2jSAXurmDq1e6uVnUJqelG9WleCN0ZXeEwnRb5DphiaMBk8YhkdovDS9zC6cddwFn7UoB5yAv6ovq3KYoYkpJCOawEtzTOOaS8sjG2pcGZuFPAkiMTJRJDXoHdVdrZqbeWuvmRms0GlEItwGExRGZGAJwDbyGYt0QRCJBEkNybK5q3aWbWJchc5aXO5+jK2izGbUXK4kkASOBCLMQAkRdE26/GvK2c3F6TiQOqgogoVCQVin2IiZMEcGbCBHdzBHJSciD0RN6hpXIeC7+8bwBx4EtdkQ18JXVB94NEIjf4/cAACMIgjwDMM9/HIDACUCIXRJKkpKGcUjDAMOYtpthty3DmEQongYjEaGwCyMCqVpv87zU2pu21lpf5lZrb9W0g7l2XcicLaOju7qCKfZO7swcxQqEbaZ78DyJCYGsO8o5XPGvXJd6ZQVEzm4Zlzf+3B14bwFEAHBa7VQcwEzBLJEz0W6QJEnNOclYuCQs4iIeHY01lTt8+EI0sYYLBo2xt65NrSkooDp0g2arEZG9uH7pfSjZwmrUG2sz712TETsyozA111lNsKfVQYQ4MRsBga3UwIiptDj1nxMWNSQAiODkC/YTLns4Lvqo3YE447Chcs1FYXAfime3pJbM8GKY5wTdfNb+cDy9uf/u2vqnQGXY2txxg5ukm9tbFjnfWEo55zKmkllkjZJ3B0cSp/B4QXSk9WFFLQ+IIMQsgklAKIo3Ml9P+WoKqLTOBjb3bqZrdCcRrox0MmJI0WpRt24Ka7mjcVpLxEKUiRmQzkihvTCR+WXmFUaTLaLezZzYwcGCTgSATMCE4Mjh0+oa7AgAX0ELiCRhR8Lz7QBwXUOBV2lY9Botup9nnv2KvAKuixsCuJkGzQlWlMbPH30d/wYQagvz98aYr33KVb8NWWgzjmg0A2zGsrsex00BCKMXBXRGIUZmJ2II7nk3baoVEDHnNA6pDIywxp0iknBSdEKQRCkLUlrFAoQGrIAKIAAi8XAt3JgMrfYKDpkTQxZIBKwRpRz3ycI975dJJeFNtSzzNB3rMrn1UEiklFnSSiRYOT0d6TCf7vMwJCGyvpjMS611qXU5TafD6bQ/Tk/H09NxOS7uwPNcezdVMHdyVTDTFsddVW3NI+o3uC/xVEKFp7D2Bv3Mh8KzozyujjLvIzEi4aZAxKG7ANk0t3f7Cfqbtp9Ore/v31jTh/s9Y+5m7DAxFkRGp/s9vd3fPkxzl88+/ej1q00pg/aeVYeNXq+vAQmp1vbJ558fDqfpdPr2m28eHu7b0l5d34hwKYkJu3ZJ+Wp383Q4vHn7zpdlGDa7q83d6xtKRENx5m7GKY15aJ/1Xttv/NZvv0ywlwMNClRBuuNpVm+9za01q91M4yyB5OiGzb27tyDfRLsZKEiUAGsUafRI7WXnGzCYZSt3EZzMyZBRUtnBh5/UVx/upTzM9an11nsBTQzLMoNaQtkmuRnyjeDgwGRAgt1bax5hzu/1kr1aX1yTkIqY2ew4GSRAkAK6HE+n07yY2rgZhizgWpeldpVxw3kYhy0mn/1krSGhMyUmSpJzQcd6Wuq8zNM0LfO01FNbDq0i4iaXTSojSmbr3IW7CEuSMCRFIWAyRocoeCRlIc4kiUvjptytm6OIqQ7bDZ2rS3fvZiddjlPXhqmVImmglEmEJaWUc2Kk3nvTtvRq7piYCADN1+RhIA7uBrCvqLCfcxWIgIVICNciBoAgxODogbqvx/mVO3KmA0QNtDJ9V3Gmo7kDupl1NTdnr9UuRqIIGDBMWNit6p/YUzyoUspIOWW+vvbXH8ndq/HuNl9d8e4q767Gq+uy2aZSmPls0+vMxCmr6nQ83t+/++abbx/uHw6n42k5TZ1qY20MvXvXWJAy+TbxhrEgFmuynFhrtDEQJQ7czxAG06oRNsP3W5bBtAAA7dr7Wh5oWDvrsz7pPXrBmfz4/BEAiGKJVO2uLaEXpm3ikhncJfFYMGfIYpKchVJaH3rJvLoZnZmV7q5mTfvStTar3ZvD0mxa+nFqj6epqbrDKmBxvyzKAQIB9BPMABNRZcU8oRik7JIhZ8ZFW13QNDMWliGnMpQ8QMpMDIgkayT1qt6LlddUobXAXzvrEeZ7O9z3+1O9124wh1nqMF2Nc9ksMGx8U3yL7v8/vv6rWZIlyc4FlRhxEmTTZIcVOUW6GoBAMG8zD/Pfr8idtyEXGFw0uhvdVXVIkk2DubsRVZ0Hi50nCw2ZkBRJnrk9PNxMTXWtb0nTQZ3P+04MSymn6XB//z7pdJj2Y7kCkHEdbrbfvv3upmUOAAAR9cO42m5c8EBkaCYqtaoYsv9sEy6qaiBAggDMhIxAznEXfHToIBOaAckLir7UDKAhIgG3yrOKnOccRgjcngh2TI5fQkmgiJhVsSpS1YQB2QVCcC+UajVQY2gPK6KCfIHDB0OogKVRkCWbNq8VoFnDOThC79wQjACxZihFNIu24wQZkFrj9auoEmLwHI2RCFRrLbVK02I3OmE1Q2YfIyCJqFRVLWhGhMG7tvOrNbZ80zUBQJPJoTagRjuxIlSxLxLTX9bknEvKJuadX48jIQ9dT8C55xBc8CRaSikiRbQQARZEbJMsVNNzFoBVA3Perzd+HGPsXMm1iFUB4tjmsC74rg/OOQOroh6NHACTWMgVGdGIiD2qGIAJiGkRQQAyDo4i98yuoBYBMCy1zsuypOXf1P2AgEyufdrFREwcMzKJgRgGdEhUq6C1hlcpy+NyROeAQ140nub8tHv4dP/xp/c/ffzw4+75cFrKJDxnm5c8T/OyNCsXGpjmbNoqVKhipZooIHI3dAjNhZ2JqBYRAxUFo6aGADq31xD+TRXWbo0aIbLzzgUwr6ZG3X7ZPX34636eT6ZzSWU+WQGGHjEYMoEFEzYlNAUQduvrm+9/9/u//9Mf/9N//MPr15dSW0ewRbA470Pw/iLEb3/9q5rr89PTvw79w8MDqG3X681mvd6sui42kzCx/+nnn3/+8NH2h9Vm/fbt65tXl75zx5yP87w/HH0Ir1+9WfUrFfj119+sN5tfipinAmqWQWsFzEVzKalKlSpmYNTYDIZN/NvM0k0R9llWe/ZzvIgXoZUvL7PXNuYipNacNFACIyRib90gq83SjQ9F76s+iynxGHuHVkCw4sB84d0qcMfgpfUZ2o55NgF9+VLTpZRJqxMxUTSFWvdLCgqqVNTEwAERe+9jCB5BAZAci0Gu5j0SoAKWxnWqokwmioZF7HSaT6dpOp3meZ7ScirpmDMSrmI/utAD+/OxFB2z9y6G4H1D9BNH9g47pujoRQobwDkXsbmih2HovNteXn4+WSpY0jpZPkjOCs5qIN+Tj+SC84N2zI49oiNCwuYTobNgp5HJiICZkMAhMlADTdjnwoIBHaIj8I2zAsTAhHjWTZzlSy8TJEQ621heZD4NBXJWBUIbrSuaaqmS5hxPy2fLKJwPb/j5bAAvixyd1R5kISzDoOO2jBcwXrq45jA67j3GDkI0Hy30oV+tV8PQxy4yUy119/T01/vDD3eHH98/PzzcH4+naZrnJZWcTSqIQFUwZZPocB3cyvPauTXBRt1o0mmhl52nHSRNzLDlP4FmgVLP6/TfvtS01JpTklpURGs15POs5n9e8f7NC4GQmtswL8tpf5h2e8yLMUxaNDgEc44tz2UOqTFtQ4gxauziZtyuLsa+a2Ob2nDialWhKNVAhlQqzEVOqTJYLbW57NRADf+n1VhFlsPxuNuX7mQ4owgKugRczRcI4gai4APHqJIPaTosszsl75bYp9iVEIYYNHrvqD3cnxn/5gjRMyCRd+S4kOSwVrrqim9rATrXdWG96ocYO4ydDF1eUelyxSomVUWtCNQqKsUsMRWEtCyHItNqM643m1e3V1dvr3w8M90RkZwj74BR0IoUMzl/hNkRsLV2glk1sGb2ZybklhDCjtGBCpkptR5MldbbAlCo5MzaEeH8TJihNtINMTJxU5SboIiotkgsrWbSDJJnTOBLjBcYOnPaEEUETO5L5RUikffAXk1LzaoCBgyIxOADOhYidmSIJmK1WC1wluogADbCrxIAKCogKKCJmbQksHPclZieixg9x3ewAmWRWkSkEoBjdMRnblOTXbWmCp6LfQBWpM+qcwMToGqoX5T9qlZLERFH3jkf0XumPgQmroNjZh8cwMtRAFSrVpFmPACA1i1o7v0K6hBd7OKw6frgO6y27oshBWbnnAtdHIaeiUqVUmspCmShs35woeu852BmKlJLrbnIUqUpDhCZ2bxjz84DmOPawihURbV+Xqx+Wc1e5rMK2gIgkFjUjqc5FFmNo3MgVdCUWUVKyce8hLyMDpwC5bzsds8Pj4/Pu8PuMO2PaUpSwFKxZV6WZcm5njvjZqVpN4lUsVap1WpVRkLEltaF2NzHKuWlCwOI1Nbm1hf+Rdr2b9YiYnZIXgTTIo+7+dPj/v756TAfEtZSs5TFMkGaQQM4BlAoCaR87lG8f3xeUu1i+P3337x5fdNMIS30jdix9y7Gfui3mw0iDF1fUvr04WN03js3juPF5eUw9MRcqhynuaq6EHwXAbmq5lpRyTkXQnDOd113eXl5sb0yxcura+f95+twn/ZJFaqCCkBRa4WvnQdEhoAGDRiZXyrs1jc+n6nhHOp3Tthp6HOiF7u1QeuDAjRpkTU2FiH1MffdAfh+KT8+He72h+lw8Fpd13WOnFVf0yXzlmlgc6YohqJoQkWgZoOC+MIrOD8wOuflKJWmSQwIsagVOzheEJmdiz5u1uvb2+vNehU8IZhIqaoVMZWqNqNYXXLJecm5aM1ms+nOdLekx8PpdJrSkkrJVWo2WcyAqAvRI1MV1JcWJmFg7rrQeR+IRu8v+ni16m/W69UQI1PjR7GP0UcO3nlvVdJmM7x6FUL4/PAvUk5QEmg2S8UWrAmqR3JIY0oKcLEeV6suQsAZl1q0mVQZPZNrFgEGInAIZ4+m4S/tEQLwAM2+jIhGrIAOREyq2IvJsm3uTfVHL4q91kYBRTTgM0qMkUxBQTGl8rw70fOpEZYAQAEKYMEzmQehMTtaM1oRgb3Pff+03j7HYbfo9GlX7nZKAIjOuRhjF0OI8fbm9vvvv//22+/eri8N8f39+//zH/75//G//+//8A//ePfwuD+eSi4tK05FwfSMiyEgAk/UezcEN8butg9fjf6dp9eqgySRcy6GWvO2oDoCRSsKKaPqC2PwvH5BA/ylJc1LzbmWIrkCmgV7SSpqaz5i6yl8GVHV2ruEqlByOe4O9x/uDo93WLIHefbgHRErE5P3zjEROsch+Nj347j++t2bV6t+3KzQIBcpS6o516pFpVhh74ZxLUwidbYCUkxqm9x+lvbbL3IFKKk8vL/7+NcPdDlhX9AMK0MFysCFBjQbYr+5ent7ZZJ++uFfn+4/LYedVmEfYuzHflyP68vVauy64JkQzRQI2LHvODIH75z36nnrL9/BrZav0Kpnh8TmCD2F3nlHwdhL4NJL4ZRlyTXNeZ6m5XAodVJNQ9R3r7Zh1Zd8LDJ99dXb27dv19vL9Xpk7z7vLFVlKVmqFitLTWAWqA8cAztDp6pVLYlVRXSBgaG55EiZTbEW1aLlzApSqDVrrWpCoKUoKJBXJhe4aYAUDFCAER2w4dkC2o5pLcQPxAih9yF4H9uI6/zmI+E5ltBE0QyYz1GI7dEkit0QYidSpSaVDGZM3nerMGxCP7JzHgTLZDKd6VTkHLGyYxd9N5KPStTInFpSSSdJc87ZRA0JqZ1RVBsJ1ztyEciJQKqai5i1ReM8gDIGBGRmbkgZQEAC9kDuzARraAkEVVJOQr+IyMxMrAJaDNEHD6SerfPIRIARwAGzqjpuelvItaaU0Cj4wZHru65W3U8LLLmYojnBFYWbuNp23XD9lgk9ctvmQoyxi9ERa5Vaa6qlqgBVYmQkR0xMKiWn0zw9pf3HkhJUYuZA0WPA9ueAmINz0bvgHTP/jeS6XVE1SSWVUszUBw/QAdCSyuFwYnZ6a+PQgwqCiYkhOCe11pIqeuXAYHDYH3bPx1zRcEACw1yKliKmCEaiqIrOOwA1ZWiLiFkVK7mWooLEjgCYER1TORMp2sYN2OpWMJWmAzCVs4nsFzHeuR1OSM6Ac7an59Of//Lzx/fvoTO37cZ1AJSUpjKhHAetQQnUCqQT5GRVmoaEiI7H4+l4cuzXq20LbG45dwoARAZcqh5Pp+C4i/HNqzd//MMf+9j9+MOPT8/P5DmXogD7/eGHn366u3+oaj7G4zQdfzj+479O/ap7/far2Pe1lpTo6ek5p6qCg+9qKb8UMY+nDAomBtqEIs1v1/ixZ95GC9gtL9f/Es2Jn7Vf7XxNAHyOJWgPNAEZtJkrELQBqyGjInvruxT8rsqndPpw//iw29VlHhgu3doxRuKe3SXaaOpETUVLxVZqiYJW+DflpQGYCTR6umt5Ia0qV2DuCAfX9V236oZ1N3B7ANHmUqZlOc25lFNJNeeUUj4taZG8iBykPtbylNLuNC9LklpJJAAgs4RgAVGtSlpOp1rq+YOC4In64AbvRxeuh+EdkuvHDXqjcOaRta6yFCxgACjqwfwXVl4FnWqeIBdAaX/WQEEqCAOCgHdzDG5cxxBchx0KZRUAdEQOGwSxFTHoHTIxuqbYfFFFEKAHckj+JZC0QS9FIQOcPf/wuWohaOhyPP/UuCkv7MW1iy9DIwVKqaa5yEu9pABJYTZbwQuC5Xx4A0SshCf2Ow536J7FJlmmOs95WWrJtYgqAHp2XQi/+u679cXV7ZuvjJyoPu1Pf/nx5//jv/63//pf/9txWkS0JQlbG2jLmaRmL+dnJgrO9V0+rAdw60C0MvQKlguJKKK2vcjQhMDABEDl3/T7mm3HSqkthqZ5GQGwmXmavlrbKAXPnQn45b1sbx8BQC1lmeb97rB/3KNk1nrC6tjIAXHbJAxUmCAE3/XjarNloq9fv4oxSq3znPeHU1pSqVpVsubQ+csiRm5/mndT2k/ltJgIA/BZOorypVu8tbKeHx77CN4ZkkKLkDfDCnV2EJdRMW4uxzHQEIbL7d3PH54eHp9Pk815mPOUaq2Sx7oa+hij874fu9XFehz7PvoYnPOOnK/eASOrOiRPDpALmIACm5liNUm2LFaTqqsUtRuAxsW6AU9POj3kGb0QoS3zXEter7rtxUrZF6PPwl4FKFLnvCTNi6Q5L0S07hxxdMiAXM2qnqUwxAGR20jcOSAysapatVY0bIIuqdW0tqVNzJqUhs9cvsaqBjNt+wBYy6sQbewEUGz6TOeH0EXvGZEM8MwleFk720lRCdvj+vJidqv1xXZ7syynWhZQjwbogus2cX0dhw0557RgOig6AMeqwAHZGXsOMfYr8tGIGypE8pKmfZlPkmYruTUj2qjLSgUEdgGIa1UFCv0Y+gHNQCvkdB4MvWh34ayRAyNC9uQCOd8++SK1SKlQKpDY3yTLNscb4tns5FgZlUmREaA5Cs17BqOatdY0z5MZKlAf++hj18UY47jiuOq74fLy1Vebi5thNcYYHZH3vn3OmNBMtZSURc/Bw1C1iYoEDAmEmJnR8cpF6kZBJJkPbOYpMgfyDJ5AlRyEzsfSFQkuuP/JnaSmKad5ntKy5JxLKaVUMEqpzEsiLMfj0bTi2QOCHcQeO+KRuGce2A99tNWwvbx8FWO/2UzTaTrOaVrynHJOaZqm3WG3pEUBcllqLUXVDGq1UqRUqdUINWdAY8f4uf0CLz8wO1ME9Bw7cO7E2N+2lNoIh9mJwH4/Pzzs7h52h3lZX6zH63686pB1Pvllrwk6Sd48qmbBrFCtCUeIyFGpJaVUSwWjEDpkLs3aDWqA1YCqIACa+dCt1+tvv/lGRZ4en+4fH593+93hkGt9enr+4aefpnnphzG6QUQPp+ePH3/w0ZELF1eXRbRI/fDhvfMdmtv0Y6m/xES541QQkK2B9NsOg/yiY4HPGw6cD5XnoU5jFTT9fzP+ATKAA0SDqi3KhZRMEK19epnQERqgknIoQ39kvl/mT6f88HC3e36Gmp1nAOmj26iszHoprrE/TbG80EVNG5Tsb50eQAiRcA1wvV6N6y0xitqScxapDG07IjOuyqWiNlcrFcJD1Q+H6dPj0/PhdMrzlPIxp6nkqdaT6dEsqVYRU2OzFcA1ujFEXG+17xamaZ4+HY+L1M/7syOIWlaql+wp9lcXt3Z97dbrruMIGbVIzSXNaZmlCAJiFUzl8vLmNud2LWI21XyCzOARGZAQnBmrIQFUg2lZwkSrFFwYYh8YPEtVVWfI1jxKQAyNscvOgWMDRKl4dkcDBSDGdtQwaJZwYlEkwEIt7Q0aZQ5fbJzNcwnEyiYgWaTKGULS9gRmYwbjL1VkajCJnaqtVbmZwV9OpOzgiO7P4D4VOBwXcTI4XqtQmjSlZSn7JR+mrApDjEMIdVka57fmPB1Ph+f989Pz6XREpM1quL68HKLDkmrK85JrrQBW1RaVoiqGAFolp+pOKpNRJhZrOISz+eb8lTWJFwNI86f+jUpXDcReVgw7q3Obyrl5a8SgiTdRW6xR+4sGZ/h7oz+p1FJSTktaUiFVEsmaGYUDoWd1qKayTGgSvIt9PmTg/un6w93zko/H03RaTnMqWQxMVHJZgndXFztkv0t5n+opW4VIYYPsGc0ZCjHhLyd+0Totx1M69rByjgCysYAXrVVynaaSJcc+Tt998+5XX/3+3/8+p+l//Pd/+ud/+Mf/9n/+98f7xyxTrpqyTKleCV36/nK9vfzq1de//frq9jJ68t4REzlv7FtOR9OUF7Epl5xLTbmmXE/5UE8P9Wm/zFXV+7i5uLgI/lLrfndvP8Lh5/3u/gRT4jFLBjaqye6Ox67L67CJzgOAqeaap3k65mUqy5IXF0LgdfRYgcAgVS0KyN6hIxcQAUSIxHlGrLUWrUVrBUMyAgWVSmjBcfPrNJeh0nnoqCYiTWirRGJmVUptYgszQ2AkF0If/KrvPHP7G+eFEqHRKpsG1xSMm43gZUV24eri1ZvX38zzUWpiEkI0DuhH7C4oDMSOrGIctd9qrQDIPqJzgEzO+RiJnBo1YKvUPHQbybPkWdumWHItKeUFlkVVmb2opSW52L96/XpcrRhhPh3ufvqxLvMZVmRWq5YqVaEiAQIDofc+Ds55BMg55cM+i1X5G00MnKfHVktCU3YIasjKrEhApOSCI8BAaLgstZaU6qKGVIPzvvPeO+5C77vt5ds/bm+/Gbc3LnRVcs1Lno4C6LsIiNNy2j09fPz5x/3TLhetFVRRQQ0roDKfM1s26+3br766uNxc3Q6aLg73P2manI8UHERnDFIT+NKvnVInMIQ+4v9UxKhM0+lwOEzTdDqd9k/POc3OBTNy5BBtno4lT0DOucB+QN9zuO2GN/3qNg4XFFaXPP7ue//uq1+VWkopaV6mJc3LMs3LNE3Pu+dPd58eHh+eds8PT/dpnpe6FNVSas6l1tbxw5qL1tIGEaZoCvhCgDNFkVZLtwNmS2yDl+CwtpNjm8F59jXL49Pjxw8PSymu79Y3FxdvVnHrDbN5qSUnhCZIMhVB0Cb/PmdEIDuqpvv9cb8/XsXoPVnFqmcPq5qAETOqWqnVMV/f3JRS7j5+mpbl/ulxd9zPy3I8TYfjwfu43mz7YShSBEoRScfl+fkRGNm5Kvpp+qSKIaxuthfly05MrkYvF/ZiH/38uwgvHAI42xDsxVdiZwEGtilSE2QQ8RmHBoQYWJwV0PNoiUiJENBaEeP8bLY7Tc/7+XQ8pPnktJJyXHg0f4k6WmXJas2yYE0qjud353+hVGDmzTjcrPqrzXq12bB31fQ4z3MpGcE574JDxFLLtCRjzEWPkzws84/Pp58ed+8fHh+Pp1NJU6mzlEVkNl0AMpz3NT5PYIiQInuOvQyjBgeIE1H6/OAigMKklrFC1cFo5+NpWMvFBXSMZaI8GRlaUdRSk85Zl2SnJe6e5aW6bHbNAkqA3OBw4AjYIQVDBwBVJZeUUxTfxz44BnOq6hRQzLQ2IPDZPuIYPCMiKYJqU5yRRyKAM+MBkYiRmo2SUPSliEFobnbAM/fsXMQomlUzMhWBRt1vJ1B6cWd+sfFnxBlpasbQswkTjDAhPQP9LPCEFs1GsA1UhEKWRdKp5HKanx+PyH79eliN6816M/QDmIlIg801xw07jtFfXWyuxujLUpdlf5hKKYxWVE+5LKK5gQAcOcZzH9ZHqgApmYJiC2ci+GL9fdER/81Lz70WVPv840aYadpKFdDPrqumDQN4qWXa+2JWRXLOOWdR0xeTlgoIAldAImWnJlUQBcRM2aDAPuvdtEyAj09Pp8OUi4haY3zlZQ5MuRg6fyj1VGUqgK6O1HvyDKag9IX3FaA1m7JaMglWCSmBaSM1SFEpJeXyeN/98MMPm4vV669eX73alEpV3fOpCMU0zVnwVIkKReVt7Mfbm9tvv373/a+vbi8aaRYRiRjYvchW0YByETglnBNPSfdTOe73S/mwe3w+7QFoWK2DW60utleb9XgYT3X/fHzk+zsRctw7imauVktLJjD9ottXRPLZ6O+QPFIw8gJctA010JDJOSBGbK0pcwTMrROqtS380oCJcDaoEwM3NR8Jopmaymd4TDPnNRpktbNHsyHEguMu+M6H6INDFKv25frQ6hh7Wcj+1pxEzN243Vy9XUFlkugAwZaiWTCpF2NrYVPrjgitLbvIgAimQMiMYFCqNiEMEXJA5zyETmrKOUFeMCelIOZFKhGIllrFRVivVxdXV4y4J3hkl5uPtI2HiMghK0BzSDrvnHchOB8ZyZBoXoAdEMPfPjPNr85M3jvvmB0QCTIwEzFQszDrGSaqalKtqmWW4FUih351++YmrN9ef/2n8fod+a6ITqd9VUNXwGrJZTqe7u/ff/rw04effzzsDlUJ0Xs/sGNgIQbnEA1MRLUOqzH0cRg3nXealur8+W6gNQIYMwH4LvYySt+viPhvL8dEJOXleDwcD6eSSxPIOObQR2YkMnKMbghx3Y1Xm4vXq8t34/Y2rrYhjMCRPPiuExUDNLVSSi4l57wsyzQvh/3+/uHh7u7Th08f/vzDv9a0zNNcNauhcx7BVM/xZGZaqzRPnGmL7yEwEIDmDkT9chmG/0ncg0RITMA12fPDafc8G4HvQ1h3ftO7FZuSW3ILAzUVx2TEbalvxgIkJM/U+QJ2/7R7eNptri4ikyGqnV34ZqRGolDVSISIYtddXF5+/d23x7Tc7x6fnp/vH+5zqf0wbC4uXr1+Pa7WKaday9ANp2k3L5M/hW7o5zl9/PnTstS+3/7q3Vf55cAPAM7gMyMU+TO5veHD9Rwlw4jt0bb2RwG48UYbZtoAyBRJgsOhd+xYhAmdd+owmegLPECBREGQlHwxWrIep3k6nUQKkQa0gWwtdZN1DbUDMZWKIAjWHE8Igi1ApBFc/+a2BO9fXd/om9fbi8txvQ59rwjDsswlL6ZgGJGVcVfSUas4fMrzX3aPPz7vftqfHk7zbklTrcm0GAiBMFZAObdDm9ABFAyRIrmOHCAh+35Y9WqOQ4KpQbYaYMIMqtqUy/OSPpZ0RfbNxSqPHR/JJ+RKsSPqXT5O6X6/TPM8nZbT6XPORVvd2vDZgUNjAufQBXQdUTBjFadQc845uT54551ziOjEoEqZVVWsEYLVGsiyjeK5Ma8QiNDApFYDYCZuzUEEDMR0LmKaGviFRaJoyEQESEiginzOxj67UZtsmLQhgX+x5xAJ++TCgT0Q96oelNEE4ZHcB4VPuSDR9+Pqpu/ANBVgdbXy0bFDLEVWw/r3v//Df/gP//HXv/nN1dUVAKjqer2+urpcbdah67St68Gvh36tZB6jSckUHYrqlNwsWgwzYCFc9fGi403vhhh9pjKdajFqC8OLLQuwycHObJTPa0Hzn4qhGClQNRJDBTBQgSrNkwK1jVHPLsOXU4CZGSGqiWmuMi8plYrkfIgEBpUNFayCY+TAfmBAjw6kEKDvRj9ssVvPHKvhQ87HklSNiJ0nECzMCjhVJZVFZal1zspKfZ0bBUXV0Kq9ZG4CABHEHn0QyXOejEIiRDIHZrXkuhCzPt99+s/T9Pz4MC/zr377m+3m6pvf/Klit7n+688/vj/tJhCvXS/dxm0vb7756s2vv756czOs+tbVOr9xzRf+MnUEAyUTBHG8mD4e9+8f3v/4/l+eT8/ex225DD2uL7v17dvVxbA/Ph73x2W31ApXl6+G4aJYKAKdxzEgf3FfqhqSGxuzolYk9mEUclLVDJVc09QbkqgRQGiKB1bVqkACqO00Z+iQyJGjl4hUJkQU0FxFpRBa8J4ZBfTsRzsz9dghO8eOXfCu8yEQNa3QeSRB57UEz5TMF1kCnfvZ52tBMt+H7c36YrVedWMkKenx8fHxcbc8H5f5WAW6cVxfvB7WF+CiKtZUtGarybSAlVrzMudSqogBGIMymmMPiCpohOYcmnPgsWTUbEXAoIVrMjOotIQJwJfUW24ASz57UclhiOQDEAMgErMLoeti7mtanP8lYbgZxJHduFqtx3X0HsmKJWQJkYFRhGrWlCwJVvNqEaRYtWpQCFMMw827d29/N1x/hasLY5dSLqcplxNY6XtvRabD48f3P/zzP/5/7z5+qKUasgtdv+ovtxfDOLDHlgJuWkpewHS3/4hchu5X61U/rK+q43x6VilWDYEdBARnUgMyd2HTXzB/1l0BACCSc2yqz7vnw2G/jsF3nQE454Y2VA3ehxXHy2H9anP9Zr25HlerEDvigOgAmNFeojeb5fscH6CitWrJJaW02z9//PThv/y3q8P+ebfbiSkgj0Mws1Ky1Nom5oJVSi2lZWQyO1ABA4NyFoU0PtLn5/1zUXPOYTM0Qa0w73VZzA/ebVQ7yKyEDsyhRJCiZTFVJoAzSh2aYhYR0XtadZnh/cPT+0/37371bu3ZUpsnKiITBEQWq0VbwJtYKa7vfv2775Xw57v3//rXPz8+3BvS26+++u67X33z7a+GYXWaJ6n17vb1/ZOa1pQmFygtp4e7D/vd3A37h/vvS/2iE9Ng7XDmiZwTaPHlbNDWdDrvqo0MRC+dGCAkZmQkcyiOoQu26owc5coKjlkJiqLBmS7VMpErumJuqbCI1qwoGH3waB3WEbF3HMlYFfXcAGsPeBvOVsSX3hAB/E1IOrPbbDbp8qofx5YZS4Qju2hazUQNq4JhIlpqeZrmnw67//F49/Pz/n5ajrksBgUaehiAX8ZrZoAN8ornHxF6oogkgOScH4ZBxZGDl9t6jogHM7Va6jTP97vd++Phk5Q1DRg6kgqakZ3vOzKzKZfDUbSWUv4Wr2IAgM02ZMzADigyj95HItYamtafEdCQzLmWbAtasApZFQM8MyNUQeRcB9o5R1ilOVykzUPMEE2ocepfQm9bsgbg2cmOioiICmaipmZtrnLO2ZEGvmjJJsyfHxgBOLHbhYBdr54hWxAzlEXtk+qdoTGvgtsGPzraL3nJuTZxMaD3YXOxevfV13//9//u7/7uT9c3N977UgoYdF2/Wq+HYYwxppxFZUnLtLjoDNGpC6ooKEjWR+/VqsFieAJkImvGA47ig4ZoUl6YGeeV6pfvEf7mOGNQRWtzJmjLNjqDU5tcq6pUFVQ7Q1TOFJDPf9uaLT3lfJrnOSUFAHbN8WDMoCZIBoTtHyAPCqpaFatSrnCcCpd6nMucKhISgFSwdpQ2DK4yaZJaaqlFCdBBCVgMQM9do18GyeSgX8OwVubckJ+Grhkvaq1SEA2Wox73h5ILeX+ay/e//+N6vX71+m0tsN+l+YRiTK7v1+uLm8vbr17dvLke1t0Lh8p+WTThs7X7hY2GhkyKOs3H/fPD/uluTifaXoiWaToej8dpWRhYwbEbu3gJDlb9uo8dEQOQY3aOf6HDIpJzLvZxGIGdr9JEqwoM1vLAPsfAI4IyUxd98IgsZo7Z1dqZGBk6dNwayi+LHjGpSi1JxYoAEwZkRCaqLTSTEB16JgrM3nnvvGcOjgkQTNoS9sJiOX+gmrjWtP06felOYnbjdnv5+s3VzeVq1XnU0+EZdgdAiJ5NXRWIMYZ+FVcX3I9Sbd7tjTD2wSTN8z6lsqTjnHKboRKAI3SEqppSLUWlaq2gRoaMxOw4hIAEp9MJmQhsnk8qAo02A4zBex9C1zkOYIDkwUdzoZJTQ2u5a0zOOfae2eEv18Ld0K8247ga+6FzxEjgmMmL78DQSlZgFGQW5sw++67rXAHPXddth+3b1c136zffh+1tQkt5XtI0n3bT4QlrjUNftT4+7z7cPz7sjqelBOfZBWSHTOzQe++jC9G5AGY5Z03LtCz7/QEenzaI15tx1TGbZKsnQnOICMExsT9b4NfDpq3tn59gAnEoHoVBPWEXYvBOzEII42ozrrer1bYbL113069ux4vr2A/sWgYLAeCZc34+4jG8RMj98lIAg4vtdj2Oh+Pz5fYy+niaJwDo+o4Qp0mT1NoSWQ3MyKzl+r0Y587CJfj/8zIAMammUkEUc0LAMF6sw5WFVYcxmvOghqQEBeAECM6TgjXSNJgAkqKYB7eK4unuaffx7nFOqW0wzGAgeOYmYhWF5utA1FqCc9vLi7fv3n777bc///zzX//yl1yq9x4AD/tDzqIGRG69Wqd8yFbOkUtyRukhzGlJ9oX71dGLQRDPbzO8jJDaJOEz+Aia1+ilM96AcOTYhRig8zWQRm9DOJtvk2ptzf5apVSpYmDoBZ04nghPakmB0A+ROQSwEiCvQL0ZqCQBqbm2TCbElgIlZ183oUFDZb/k9wEAEFHX9/0w+NgJ4PF4MqJuHIeu8+hULeWczTLz7rj/h4eHf3m4fz8f9ykntMKk0iZ9AESEzhCqvMhg7RzwooCA2NhAiAAO/dBNUgMzAnhAVKyAZsYtbqBqPU1Pnz791Ifr24vIHNkHCDIr1eJ65ODi0OehB8f6t60/a9xogDN/FBDNHGIf4yp4RxZ67C68Gxw7RBBnRIjsnTGbkBU8Q3vQ1ESraDG1aoAvZqUWZ18BkJml1PySSvD5XIjnmGF7wZcCqKmoFJEqVc/OHlGrAm0WrkpEgTl8PgFUxCfiD6Erq7UGRwuGZRoqHIr+WOqjp6t1f70ZwPFDSj8+PR+mBVTnanOxrl/99tW73//dv/tP/+k//fa3vw0hpCW1869nH0OMsQshaJWSyqdP98vxtFuNzH6RmAU0pWB12/vomKRiNRGYVB8xR6C1j+hcGEdHCGmyWs5XCwAvwFZ8CQD8fFtqrSVXaHAYYEM2IkA1AFGtqlUEqIUufQHJbB1gIDQV1WVJx9PpNE25lpdc1/NspBEhADMhUs1Ws0hlsUrRha6PMQZXjgWrMRuiVE2lliUVTxSJnKNSc60VxJzn0cnK1yIiqlDVaoUXF7jzsL6yzS0gmBJUZlFSA9GqVg1IFQjRAU6Hwz/81394PhyP6fTdt9/erC5GFzBjnURVw3q4vR6/enf55s3V9nJgUoDS9J/t9PfixD+P5FXEVAmNHTo0SSeZjjilDulyczmuLyXh46fnP/sfAPDj+8fn51SS75zryPVMPVNgzorKAX45ZVLoxjhuQuyB2LCWJgtAdt4js53hnGoAzrsuhtXYNxsk0kuL3tChC84zEZipSGmZdWY1Z5iOogS1SZ9cs1q2wC9mDM4H54Jznl1r4jQCgQJqI5W39jYRNhiUioi0XgedCQjna4kxvHn7qpBbrdcAetrvdofp7uFpOc3b1er6OqixuZ5jD+R97JnLYovn8vr6yrR8uDvtjnkqx9MyN/XWOdFctJGPRKQ0FKSJY4sRfRcGtCr1w8ef3T13wVupNRc0k1yEyQ193Ky3F1djHAM4Jo8hVOJkNs1p97yb52oqCEYND/xyX3xwN6+vUr0J3iNYrgsRDWP0fSBfFSp4xYhh9KFXIDZk5zqrIfjN+uqb22/+1F//anGXxxmXsuT5kHf7aXe/f/jEIBGvl6W8fzo+Lhivv+0u3gQEk7KkWc1O80nM+tKLxg49EoiqogBJyqcPHz9UwXH4ph97kKQFnc+EiOqJA3ehGa/W45r5c32pAAKweJtGJ68uxli3jgkJGTB0Iw9X/fbNzetv1he3obtA3xuiGZQqiOb4DBAFxBeGxXmBbg4ZU5Cq7aOoKiGGYRiHYfQ+llzVpJHpjqJpyTllEWVkM0Cmc0pi4/TUdkb4vGTpL1/+L+uYFk25piWrASn40I9Xr0N/i7TuoA/gSEzII7sKfEQwjh5MlUWxAAoAAohy8SsPkR93x093z8fjXGslBg9QRcAMoKqqaDYFRyAApoZmjmm1Gv/0p79Labm7v/vw4S6n/MNf/vLf/tt/dy7c3LwiVmLuh5WmU5sFMLnVuDJxhF3n/ZeTftfy3z5/wy8PUH/7wpci7nPZAMTIDkKE4IShEtk5ORsFkJvHyXuizhF6FziMGHqInVc4ztkl6QtX0ahV6wLlCFqFMIGoMhUnc6JSfdvTUfTla8OX2daXLzMrRZZUCi3GnErlEKghd5QMDDxmqUeRT8vyl93+p8PxgJbZVRS11vdphRsxvhgpX8aK+sW74IA8IIAiY9/FrpT2QT/7YL78slQlpXm3f75/fP/p/jIOb7ZXa3AsBLWxicwcUxdc37vub0Rkeo6heUlrwpYYZUzgg+s7F0f2I1FsbXolNCZkR8bgxCMBVyMARgJqSH8p5zRjNWmjTfiCzIatD4S/XCicL+rzcbG1XM6MYDUwRRBo1g6t1cywVKyKXzxEIIATu6MLXehi9GyV1AB4p7JXKrEf1pt+1SfvEjldY+glEI/II7Bfba/evfv9H//4/e++v7m9LrVUrc3w2XVh6Pu+70MIeV5KKcejlCrZyEU0FyqEXDEYOCMFUsPcMCFi+6V6TD2zRNoYDEi+3W9rGYH4t4/8F/dTZJ5Op+OR21bQEOVVAK1kKS3woLSOFGoLWES1s6IIWBVMpUpOS0pzLqlqbaGuZiomJoKAyOQYmJGJDUlNRXKap9NufwpRY9CSUIuhGqqZIKizZluW4JCY+i4AuVU/3q59H10RZq7HktF+aV4igx+12ygiq1E2qhUNFCP0KxA2KWIVQC0ty3HJtdbRWXd43ty+dsfiHz7G6Qhh3HTu5mp9c3uxuRh9jADVXmB8Lz4cgLO7uyHv0Xs2NatKWjUvdZ5gmZyjgahDzEvOdjr2O/YRwHvfOWRcUn34OPW4H3i/v7hPuLlav7uOY+fO/wkH5Gjo7SU4hIgQmdgBI5gxomdynoP3fQxjH0OgNhY+J8gAO3KeGeDc3MJSqlRRQ5+BPHHH3IGK4xYjVFCF0DxT9N47F1oqLBDYS+guYvvWMj+sBde9aOsZvfFZyvL5voQQXr++ARec83Oal9MRkb3veMTLy60PYU6SzRmcA+tNU5qfASX4SyRnDAVqkpxralhyLapVpFYQQHImUEpWFaKGomdiYuFc8uF4BLD1MERm75zzvutiHKPfjsN20282XTd26AmdOWZEVhM1BNCGZVI5I4VfXs7x5mI4LQMYaJWcjZ2GDkPHQCJGBo4dEniHVJZZckVlhPV6+9Xm5lfr22+sv9zNNqeTlmM+PU6PP+fjE5QZEI7758MpP++Pp6U455kZGqbPx7aMi8jpNKdSluKRRepStagAmCzp6NxheiVD37t+C8HAntFaBqABKZ1BGL+M+QAATKxMZXq2dAgMfRfgLL/g2K9W21ery3fjxdt+felcr4i5caTAkIybtuZFGtfaAQamIqXIkmstotqkWgRgQORD1/erEDtVqFJN1Xm/HgY2PNG8LPlcqp+7DWf03tlZ3b7eL/fHLxcyBHCgCLmiAhk552O/6tcbT6tOoxeCYiV7JpfJRUCj2Jkh+Ag+AAgScRfiuB62Fy70x4fl8fFwPCw5FSJzrE21A5oFCExUQGtVACJENkDrh/7bb76ZT9Of//wXIq8Gu93z+/efDDCnNIyx5EXKS7dSidB1cagRVdxLiN/njxkCAXCzE+H54P1FykJTOJspWJOzndPmSBGRWdgjc1E4lFwysgABcTY2cs5zH/166Ndjt1n3m4vV5rZbXfrV6m6ey19/mO6f86w453Q6TYen6XSYTE6rIUTnUDAngaODuS/KVpv+UREBkBQb2fPL8Uup9fHp6f3Hj24cw7jqxqEfOhc9AJYlmQAGXxQ+7nY/PD7dH6dZsVutPNG0TKJJUbXlaQIyfNFzMmgwuYbLJASP6AHVDAF8cOfEbIDP2CcDfEnyFRaghPkwPX56uu+3p7iV4IPrSFLNk0iqUsVRt133mw25F2Lvlx14hBbSQgiAWrUoxtDHOHoMoiTW9ElMzlHLgYEQDR2RsKHj9msmpuWM8D4PvYGQ2DFTI0rZy/99hrtCgwS9TO4R4HOiaGPeIYOaUhXQolrEVHDJNqeacv08GjNEYafsFF3C8MxdCnjALnkA9N3Q48Um993J0Pf9t7/ervqx8975gCGOF1dvvnp3+/r1q5sbF3xVITAi8kg++nE19H0XvAcAUWnDFUspoIsugvPKoSge1S0GUkhVBNAQZtGHadFlPnq4je6SZFuz1yrGhCTY+r7nhuRLOh4AQJV6eH56fvgUiEx0mXNaljQvAADoQsx5qSEKQovQavF02ozWfJ5Day2l5sWkmBaDqlDPi1mtoJkJveNh9F0XnVUpPJ1kSVpkydMx7zofI1pVSVUSkgbPsfM0hK4Pm8B9730/xKEfhnUfuoGY2Sn57lie03PrjLwsZKYs2FvXOUTvi5asmpQ8hpHzAZ8fy3woUlHNoSM7no7/8I/PP/7lsI4s1j+ermhw43evtv3t9cXmcutiBEAD34RS9HlnRsAXKhoiOY8jUaCyHCbLRXKWvGg6YQE67ZiYEwTkDnAYVqvhcuSod5+Wh0/P97v847D78OM0Xv3rRF//5ps/fLu93PQAYIZVaClAWgFQ1Yh9iB0xqzWLBPjg1+vVMAx9Fz0TgiKdp1qOHaIjdABY1VSlilWlit7YmUN2vY8rXBXJxUqRnLUWqJlQAkMgaPB2bjGLCnZOl0MFEn7Jj2vsaxFQYwUCdD4gOwFE/iVyJHj/5vbKO1+K7A+6dLFsLvy3KKX00S1Letw/zDnFVcdSNE/p+LR/fB9J59eX1MVF62JaEZQQocWSqNQqtTS5Tysvm+QaGRURFGrVFsnTxK0heheHoes2F5s4DtR5DB5dSAbCBlhLTWZIwKJVawUpVgvUikVaSmF7EUHX47g5p+/FooAQghG2FEb2FAAdYUDve5+ys9kSx8321ffr299gt51K3e0PeZ4GmOH44Xj3j1bz9eUbMHd/93z3uF+Ok6Z5Oi1aZpPkibvVph9Wq25U0d3zc9pncgikCJUY2HVEDkFPU9o9H/oYxtWaCNNy1DITiemiCZrGLtmtfRYtGIBImabDw8fd44dlOVWVplVG9P2wur79anv9jvyQkszL/pyn3t5pdAAKwC/I7vNksYrOqe4Op/uHp2lJjBS8G4aui957hxxiP8Z+JHI1LWVJ677/6s1bU3t+2t8/Pd99elxyZm5Sx4bRbcpbeLGAnvV4AJ+DbwEAmCh2wcWQKqkhMFPwRM5x3/dbHGIBW3RRD+hm8p0xYFgh9tgfYCFg8d4NF5uL69cX23ckfJin/W467pdlKn2v7IUdmZpCRkVkBEOpRdCCi96xZ3KOh6775utv/v7v/p6Qf37/4Xg4mtZSZX94Wma3zAe1ioFiHxEcoTEHwlLF9Nzm+VzEnPkunzsxKJ+LtV/a6Ofv9aVVYwjaNHoKWawSJQrQxe7iKnaDA3LOuxj9OMSLdbdd99vNsLlYr1/FYcPDIKfDpY+ni+e18vGUDvcPdz/x8nh/mtKD94nROQou9iMRBznNkJcXB+v5i9GXb1++BFEQodEMY6AQBDGrgqoqFJGHZf7r0+PPu+cJDGP0oScEKFkwN/kwMRmRfc6DaRG2cO5aEBgZBIAAUFUJIEYfqgeHBiDQ9KF2TpvFRuNDAqvL8vzwdDduHi5ubsOmH0ffGWSzCsw+sPPsV9dXzru/vaDWgkZPHIhb7rRarVbQATKJVm0mGmbwTtlVg6qW1UQN1EjNEzgjBgRyhg5JCSqo2jkUhs7E0gYkPXsEFFUNrJ1PEVs/B885Saqin3OKz60dUatitWjKdZGUS/7sHDGDqppFc5GJZKo4YZg7h8REDkNYyLGP/Wp7+/rtb3/13c3VpWfnfPAxDuv11dX1MPaEjdRnpkpEopLmJeV8BqSeyQi/KAwIAAljjGTOHAtYS64N5163ktYiaVFIwBlhQTZogBdTRkDSFnH9tycYBHMoDKU1oEAySDYpYFiXlOdlOU5MXnL1wTMjUUNXGzc9BjOAkBYHEj0MPRP46okNJVOCAmo+4rjpLm43F9vNGB1qOZ0Ou/10/7yAIJM6MiIEogLgPa3Xw7hdjZer1apbOTd03vehG/thWEUXqIoBm+vUz+HT4csgGFVdckml9CsNvgVsoTG6Dkfi3EMVkaJzBa3AxprScdrfPdZ1Bz2TAPcXPK54sw7es4jOp9k5BiJRzbmAalPFunZD8MWvjuidMxIrUlMpWUoFsbOzgtUiYDBzuficg8NqZdBF0l52H08nl4Ge49Of90iO0pI+r0tVoVQkA2oyDxdCiEiUSyYkF9x6NV7fXq3X6+gDIZSaz2Nu1Vyt1lpKLrXNSuUcRvMyZmfEQOjBk3OIQc0bFnSBoDbfUht4y8u2YQAKVM2qgUCbRIOBVamlVFQLyIEcIhtCVktfpG6ZqZacTsfD4Xg8TSB1HLoueq1iUMV2YppzprQAkdT5sPv09PCx83RcTt7RApoBikCpRoomVlsEUDuIMFnrDLTKGtGA1VTEatVSinPEhBfbzbtXX92+enV5cx1XAzAJQhFQRGISsMZHkax73k/Pxzx1qEKiuhT3RZ//jFpkQCISs8Y+gmpKYI4wMEZC15JRHHfeWxhWfnzbX37jV6+SwTQfT7uHfHpCPFp6dHZkx13w86QP948fPz2cckl5yWXSmggEh5WPsR/H4Ls0TcsyH44HgZbpBv3Qby670I9M5GLUxioPIzMX3RsCqGqVXAto1lqzzC92WDBVS9NyeN7df3y8+/T4vMsl9V3oQxe7GHw/Dpu+W4tirlW1AgC1adP/erLxMm1XS7mc5uV4mhAgBm+gAAMROe8vthe317efrm7cDh2yQ9wMIxLlVMNxajoL0xeA1zmi6vOGjZ9P1gDwN1olx6vVehy3QF3NpCCmJc2YJ4hr58SjGdQqhU0duZ6QOW6ZadwowQaW4oNfX2w369uhuy2nlOZ63E+H/TyfUvRIHuBsKs6ggObBSKuWRsWuZZnBhzB2Yx/7777+5ng43t3d1ZKbckJqTjVPp8lAI3YQEVtwp2jJeTnl+TSJ/NJUdh4NDRiBDFuOOn2OrX9pCZ9bEnhGYzR4vCKUqhWKEgAPuN4Mr9+8+s3321ev+mEV+p5i4L4PY89dB47JBUcrIb+gZeDV23ff3rweVhcllbuffv5nh4//8s8fdsck2U8hhHA5jN9ursMoBA9mZqmqihKAqRmpmSp8mTTI3q+vri7fveW+d33nfRCiU84ExI4z2lOa/rx7+ue7Tx+mIw/j6CMA51qKWkGTFl7rkBgFUatmUADroOluWm4aODRP4NFYqgMdY+gtGxsAVFC0pqMDRERHjr0Hxmqi5fD0eN8P729f3W6Hq+tt51f+5CXPBgBVaC6rV7fOf8aoQ8ugJ0CH7Nn1PnSeGaFqLjUVyVwwl6oOuPPGoYauEE8pTylPOaWca61m4Ji8c513nfOdc44COU8qzgqDUpvUtolSS1oicoBNHkaIQKbNfIgIVQ1MQEWrSKsoWmmJ2h7/qinPC5xyWT7PYs/GgGWeTiepKlKi9zisYoxsUFWejhP34++++fZPf/r7v/vjH66vrxvsnM6ZGoQIqgrAXde1f3O32z08Pr7/8PF5t59TKqpKSEzRh80wdrEzUARc9R3Ri6rFrEUxOCZmjCCdpDXaOnhvpc4E85FrAhVEJABFUm24nF9K5eD49nL15mqVpyXNCZ2CU3NWq0LNeT6dnp+1Vh9iiA2WzeyJGYkBHTtnBCZsY0cXq87Zuoy9iTJyTfm4J5XU9bi93L766tXbN6/fvbrug8tpfv/x4b/+0193u3nVDb0PjoEsmIah97e31zevrl69ur7ajOvgPaGiWeuxYSvNXKbuIMSEZ+d8Wz2r7Z+X/dM0dDEQMzaHLEaiMVLtUARNJZdaRAylAu7BzDAXGInd2K8vx+FmjCt3yqf7u3smW/br0MUqst8fS05E6DzHru/HfliNPoTmBSZDLZLmvMx5SZi1q+7KRbKw5bCOTB17Ox3TsoCqPD50h0e1rNGB96hq86KHLNPxs7jPFIpAUYyOnfchRO+9815V1dQxXWw31zdXb17frlYjIqlqqZJTOi3T8TTtDsfd8/Hp+XCc5lqzgiIzsWNqEMAXUqL3nQ+RXSDnHTN5sAwyi1ZGo6Y2avg7BAWoAqmqgjGzAuRaU8nLkglgDL0QZjAROS3pMqX6cl9SSj/89Yd//Kd/ubu/N4Crm5txXMUumFopCwfHDg0lLaclzSJl//zp/uFuGPrdMg99lwGLYUqW5pY9IFKVAH3oyDEYiagWMzUwYiMgBwYKXM1yLmDsCd+9fv1//7/9X//wd3/avHoVVwMiqFou5yFyNamlHo7HT5+efvjLT2WqoGB1fYp7qBp9+KwiU7Ol6pyF1amoVBMUkupciLTyvPIwEJJqKVrNPEW/Hq7j+ltevSoUT8f9YfeUdnfL4UOxh87V66ttcIMI7fb7Tw/3H+8+5pyLZLPsHI3dMAzjervpx9FKzZLScpqPh4bwQUfOd6vt5vL6xnu3GcfVxTr2vfOd5wDwdXWXVlOlRWGu9ZjzKS3pl+JSSj49H58+Pt1//Pjp44fHfa553buL9faaR0KHQKZak6gpE7F3PgQiag3+F1XCFx4hOFPMvXN930mtJRfTKrWlVLvOhze3r37z7Xdlnj/d9VIWh2hSS7FlmtKyAIAjbmYLaOpJBGAyq2cBZ7NdABjAEP1ncJ9z4Wp7e7Xd57pOiaosJZ92914NgPqQoCLMpzztc8ngfO/CELsrFzq6Xedx0Tkx0bBejf3G2zalpzTloz/td9PpuGxWEYRKRqmq9pk9wmBaVabTseSUl9k7f3V57Vy4vLi4vrySUpdp8p5icF1wKopna/CZ3Ke1pGU+7nfT0+H56bF+CbtzLzLeF1XvixvpvHG1UwUg/BKFp9CUx6QcoO94NfL2Itzcju++6r/6Vbh9xf1gIWQCYJccW8WSapViJZWqU81LTYuk1Xbz1VdfB+cuV2PdP//l6urh04fDspS8OB+r8dX6cozRrSqaai0i8pLV87/oxLBz4/XlxdvX1PfsPQAaoKjmXJ/n5XmaP56Of90/fUqno9aBkRkli5RsIm3BJgTf+JmtYiMgwwHAwXmYjQQb9gOzQ8BanchIeOHDdRdPPrAZA1DrTBM45oGdAzQQE7Fpyk/Pj/efHi6Hb16taLPy3lAyMIIop9pdXdEXeRBwniiBnWHx55LSwKppFsEipfXVQBUzIM4qD8fTYVqWUlIti9Rq2notvfOjDysfB+c74h5gQOkJomfH4AyJDNGMWlQj0EtNowRA+sKKUDQjMG5Y9bZAIzk0Y49c0UuFOmLfrQK9COJULZe6pJJLRcqplAoYrGkNtelkkd2rV6+/+ebrV69erddbM7Ezt99Etd33WmtKOaV0Op3ef/z4T//0T//0T/90fHrCIgHAIQWDVQhfXV1dXF250PkYoneO+XOcKxEQITM5RxEtau7BRmZXkxx38nRf7z/ocadSVaugewHw/vKK0X/z9mZ3Os6nJU3LvORlScuSUpJcVJCB1cpSpED1pMFBYBc8ucAUHXYOHKIyOvN0tdr0XItoNQIqOR97Elm81812dbUdry9Wb19dblcDmq5W4ynlu8dDF/ou+ODIM3qy1Rhvrq9uri9vry+uVuM6eAZLUrKKKjTrQjVGc+yaVOOLTozYMtXpUPK2FMfYkM2MhECsLlK3ojgaOVNQNgH2EAYIXnpHQ9is482ryzfXw+WKO6i6zMvznsX6sa8ip+fdMk1g6pjj0Jf1qLmErkNGJnJA6TjPT/vT0+F4mOYpSwFHgItwKN4cw1KXudaieSmHPS+HzqG6lTpXCWSZ03GfTs8qvyxkZmzAQI6dj7HzwSFircCEMcbLy+3rVzfv3r0ahrGUWqsW0ZSym4IBT4sWOT09Hx+fd7kuQOhjdD4SuVbLEnOMvo/d0PEYXYeucxQ9gHFNlVRYFUG1Tcyw5UKDmGVVNXNICpDMkkJ6ecgAANVyrYdSjqXIy60pKX388OHP/+N/fPj0yXkGUNPqXQDAIllNh3EUcKXgsuRlOs6nY62qiNWoDfORHBipvDgKgZnAu0DOGSCQem2JB0KMyB6IMAT0gZ0PnocYr1brX928+v72dbfeulWv0SuCZJGUdc61loLSRysx7YfVxcWFqpHK2I9guL244Jd1zAyKWK7o1YFCFRFQ01pFlKpxVnaO2LSWqqlCxc73r8LwyrgvRY7Hw+H54fR8vxzuCR9pFcfrV96tHu7T6TSLGTkiAbLzYaPWqqrOuRBcbchtqbXkWisye9eFEMfVuN6smKnvuhiD886xd44MkLA3yoiz0skS5jLLF+Q+1TKfHqbD3TId0pLmuUwpyTJbAcfjejuVlCWXmgsAcAiNm0ZEYPplc+r8EFqTeqgj6IO7WPUebZpmVUUAESm5gtrQddcXV1+/fdtHzstUJYvYPC85VwTsYiRDVXNEMQYASrnUWlWEEIN3aCalIlhgend7FV/uCyP3bt3xJi2UUyppv6Sj5V4LIXh/SJUhzWV+muuizDHE0bm+i2OkQfpST1NNRZKcnicVmA/7xu18fHi+v3tajRsTv0wqJQNO5DCEpnRXkZJyWqbpdDwgkmRdrTbjarzcXlxfXl5ut/vToUoFU0SLXQQ0H13wzjGqoy66rg91cs7zl2glx2ch3NkhfNZx0rk1D4Cf43MMqXlo1UwUyHvebMfr6+H163h7669v+OJKxouHjGk+LKUsy5KXXEspOaW0pJzTovNSTmnh4LZX29/94fff/+oPN1fXnvH4cPerX//66en+ePfzaTrYnFFx243+YvtuvfXOpSXXImiCdtbcNz3k52WZg1tdX+Z3ryj2xGzWRgt09/T81/tP//rp4900PaZ58Qjsl1qgVMtaS6aSfBVQYITOYTBiMzNQJMcwIkVkJu8ck6cb4jUGDwC1UE4rUcbw+/FivTn5KgyGRMDnvPqm76uACgCi/el0/Pn9/RgOry/ydtiuttEBeFJVmLPfbPAlANJe+PUCmkFRMpiqVc8IZI5oEbWqaJCrHfK8P8kzlk9p+mG3OyyJDYvqUXMyVURCDMQDu5WP6xC3obsK/pXnq+gv+37tw0gckLCpfD2Zc+wZPVsL60UyIgVAYhciIQN68VKLkBqZOYUoqNUky2oZt9Nw9fWrEF8WMoBSrYg19G8Lf55Oh5JTkQqAwbmh6y8vN8PQn6ZTKfUFssXMRL6x/fR4On36ePfzzz//8OMPf/7rX/71X//84cefpqeHESwiKaqJXHn3+3dvfvPHv7v57tfriwtPyMwtguLFIGSIyMyOIJg6UA9gOS2nw+GHv3z8L//P/Z//eXn6KGU2hxVMQL+wB8PQdX/47dfjZpCiJUtuQT8pnablcJyeD9PTYT7OKedMUp1ZIB4DdYPvvAueA6MDM0D1/bbHUsaStRapVWrOF2tfazKZQ/DOVPNSp5MFijFeroZ3r2/H1Sp2IYYwRD90cdWHsY99F4euG2PovAvMjCCoKiCEKqqlFKmLaMpZzb4EkZmaFMuzLqfikRtRlx2YZ1Qh88YePSqIiikJh3FzcfXm+uLd7fb1Olx7vV4PV9fDamTumRx4qZZT9SRqqEJS65wKGKQF5rkeTi4EYHTO9T7k43L8cLf/+Gn/6e70+FAPDzEgdcxpYXBqULWUNKflaCV7gNB1GGJhrgD5dDwd76fjo8oLKwIJ2SEHQEZyrkWSmZBpCH61Gq6uL16/uX3z5nXfd/O0pFSqYS51HIeuX7mwSgU+fHhS3YsgETnfuxBEUbUliXFFV8gX8onCOUDVOQVWySiFtKLUc+JKoxIQw3lzNa1qCGII7CgAIFZiMFDVVOVUZdEXRjxAqfX54eHThw/393fsyTue9vuGJgPHLnSX17ebSz6dlufnXUlH9rxabVebbRdHxzGQD+Q9B88BRIla3Y6ePBEbkmMEByZqtRIbBwdQGTRaXWleBbcdNyMw3j/P//DnyUxWMb/aSBeoKOxn/bQr05IR9iK7kso0j+uBggfV1WYbh/HNu69jiJ+LGFVQZYCu8XBMUPJStCzwcOKnEGJwgcAvWQ+pVmXuBpVYljLn5bC72z9/2j/fpWkfvKw6zzyahf3+aUnp8uoyDN20zPN8Wk6HZZpOx8n5Qy0ZEbxz3jtmpOYn9X693V5eboeuc4SaiyCBCiEwAhEzBWQCiEjRXFSwkg7whb9aJU+nu2W+Z9I+dF2oKdXT8VDmmjM5t715u49xW6UiEjM38Au6M9a+nT9Nz3wzbNN2tQBCkTs3rjt/7MKccqmmqimleVlKqo7d1eXlauyZcbd7/pc//+tuf1KDLvbMvnSlpBJjvL25QeLHp8d5Xhix78LVZmSA6Th5puvt+k/f/2ocupeLQcixTjwfltN+Px8fc5oUTGasqeLgCpGI2VIduNh13nkHGtiCDxY4Q32aD59+/HFZ5mE9oKmURbX79OHur39Z953Oqzjtci2ZfRrGsNl2IbiWPeqQvfMhdqWU/f5YK4QQ1+vtH//4RwP4h3/67/cPdzkt7HmzXfvokdR57jrnHb56fR27bnd58frrN+El/BUAzlLS8zTtl4HaeZKEQNDCrODFfm1gzBAH3Fzw9S3e3MrFVR42hYNkzbvjseyfj4fd4XDY7ZbjpClpySIlpXSc0mlOp5RWm833v/v+m6++NYHgu/Vme/PmzTe/+f7T/d2H0+HxeCy1HKfpbr+P3o+X10McTr6vtLBmNnOg/7YTQ84N1xf1+ApdACJTIKDo/EHtZPppOn067I85KyMBYsko4AQ6sxU6ZVQUJuo5eiJUAVZAxwY9YiDnwTvvONCGaFBEFRCBOdHzceviH+L63fYmZuGmLydTMDHJJkV1cSJiKMiK/nDU+6fT3eO8WV/eXoZVJx5QRV2CoYfPkCiwApJBM2Q0V5SycFbuHHvvQM1KdYRAOJnc13xf0l05fVqOPx/2cykdOAXYa52sLqAG5oACUc9+9GEd42WIt56vY7wehpvY3YS4dX5kjkROiL2AOVQGh6pkzkGbpbX4cGSK7DyQGhME51qYrVbVVOuydFO3vt1+DudDROe9D9GFgMyiUqRM8+RyzrU677uwiTHGGIlwmqYZFkRkwlZ+ANM8zw9Pj5/ef/zhL3/98MOPjz//9OnDh7tPd+V0ugV6049FYU7LvpZOpDO9Wa3+9Pvfvf76GyY6j0jBzEyaYLkxLpvdzYQBQFWkDKvLcprqNC2HxzwfkHzDEH+piXGOrrejkYFSLZazpJSXlI6n+Xm/758CMzq2JQEgDh0PnVv1vut8aEnJaNw+tg7Ne/FUnJZcS6nicexZJEhxRBQdk0iZ5+zZmTmAq81qGPp+DH0X++jHrtuMXecDETqkRqJoh7zWQm7grJLmOdV9sf3huKQkIl/0YkAFaoGUzLOAGBmQh+JEknlCAI4OL8a4Al5149Xm8s3tzduby7c36+vBbaGug1sFDFYgTzYTgrID3zvHbJ3z2lVEM2PvvPcOCUQkiWpCWvL+mO7u9PE+nJ5X5Rh17gv4/aOVVCmYQc5lSadlPoDWIcYwjBw7IRIpokk1gSX4YtIHjf9K5+ANIGRgIgyet9v11fXF1fXl9nLdvL7EJIZBfAitFnG7/TSOQ/AhSwEkJkfEVZopux3oGNABe2CvRIUcYzAiDCswk2SggibNDGSI7mwqZGwNcTEBqwaKBIBVz4hzIFZio1+4SiWnTx8/vP/pp+N0JEdksHt8RMTQdf1mu7686dcxuEEx5GrLfKxanPfjZtN3qz6ON1uBonYsB3QgRipoepYKESO1UB8yBa0FyCCQUfXRDdHr2F0O3Xfv3r57+249jgyQ5mXWMkWsybui+Dzp3UM+zgvAHvSoNZsywjB0wQe+gNuby2+/+zq+jH0BQFqUGVUC/txUbmxeM0IIRC7wSEIuBMlDTlp3e5in03S4+/jX5/sPp8Mzg46bbd9dlEx5SbvdYVlS13Vh6GIe47HbG0iqyZKKoplDQo9jP9xcXw+xJ8R+GLc3N9evb29eveqGrqbSOb/uh+h8S4UjZCA0MAJmI4JJBeUXfwK0ahRBvffDMFwUZHZ5cmDAFEQsp7TMU6mFnHfegXmzhqmDM2QNGkDMtIqJgr3AElvcWy1Vaq6aii65pGV5enr4+eP98/M+5wpAxEHQT0udl0rsx7FzjtFAqsQQLy8vq4pIjTF0MW5Xw+vLbRd8LXXsure313//d38YhuFzESOLy0c47eb5uK/5ICVB7mVxtWQLWIgQiRXVBVJMeELSmo99CKhW5yWdHk7PH6bDPh2Dc44Vl8X/+ONPwQNhen1zFWns+37sh/Uqrsetcz7XLCLg1PvYipiaNfpI5Far1W9+81tR3e33omJm7Ch0nfOEKIBGbMx8cbmOfddt1tevrn34IsWagZpd/XMsNZzlMGfEGQAWNEFjBFCoChS77urW376Cm1epHx+rzXeP6eNDBTYfT6l8enp6eHp4fnjM8+RVI2MfvUjdnabDtEylqrxD+53jUIrkXM3cuL7++vs/3D3t/uXjx4ennemplPJw2LNzoRtXyJWiuS5kjVY7AEM702JfroS9G68vNR0b4sLEHPIQ4iqlOIxM3rLolIiRgAJSRO7ZDc53wMEIW0AdMyGoFDjbtQwQ2MgbEzEiRIBIVk3RVKY5/fhh7Nd/4sFv38Qs1FzxaIIqphl1Bj2BFjUsqgrZoN/Pyw8f9/34+uoau7FiqVIsGHpnX8ySs+YJgGEq1RDIgevEDRZ6hFTKbkkVaulgD/VTLY/LspuPp2XO1djYozPAjly2UiUVEwSbTPaiXLNbJk/UIQ6OL2J83fffrTbfrVffrVbXHHuBaKaq1EKQmMAFZNaW9qLqmEPsXR/IMcSAQ2/OqYrkUk5LOk1yZF736D7PX3m9Wl9sL4ZhbaYKu1xkWTKhlJL7Ht2FC86ZWcnpHDyJ5BwHH3LWeZl//PGH//f/5//4p3/85w8//gRP+9elbKug1ND174Z1LHKyu7uq/6MWmU6ffvrx8ZvvBubXV1fkg5qVNJeaRRS1NmyR1LoseZpmFY3edV3sV5vVm3eXv/nD8vz49MM/lyLECi/qx1+m2GYgyepiwlJMskgpUpJpIpQQYL0O7KHWjohiCLGLsY/eOQRDq6CiWsEETAyriGoVE0FVh4aeMESMTMTsvScqKZ/2h7osRtwTjetuezGMfQzM0fs+OMdszepuJGBZqoEmlbnKvJRlntPpcJzmx7l+upv2+8Oy2XxuXVrLgVLIgnNGSWKmjsETzYY9U9/xyHH9arOhzZvt1dvt9u3VeDmG0UkEYQMHyHlGU0szD0e/3QZvPfU8hNqNsh5NAYghBHY+OCepnnbHvJ/kuOSHgz489IfHt3DahqJIqErpUGrKHKvSkiUtc5qPpqW4U0iz1yqxS6CqNXbcd4H4F3VBS9kwYgXKVclpH3z0zI4urraX1xeby1XoPBOSMxZrxCdFjMrj2G0248V28/T8XDQ33Smoas0qQMzOwAF5ouCcY09ERlzMAbp+CORCMQUTxkJiYiYAakDE3ntQrbVWLaXWrFLAsKXhBu5CcMg5Fx8ivZhGU0o///Tjjz/8xYXgvX8/ZWZ0TOuL7Ssf+7Udl+qCKMU4bi6tdmO/pKXv+lW32gybm4vNzXrTo9+t9yBmIpKzVcFzdxOb8dwARVRRwYGhgBaH0hG8vtz+/tff/uHbb9+++2bT9cfjbFJzJDNlEwzVxh7Uqkguy5zzlHNS8V23uVhfrFdDH779+qt+eOnEqIlIqUujKQkIUHGhIiLAQH4V4lWMl324kOyApufn+Xm3m+7vipb9cX/36f18OjDA7fXl11/9ZjWMx+f9w/3jbrdLpbg+hBg5doicjqfso3Tad31g74mJMG6322H07Fbr9Xq9GTebYbPq+t551xLIPTMRGaG+DI3NGhieNVmeU14We/ktIhf7TTdexhlWQq7rbwXAGIxMabW+JsR5Ps7L4kLsh/58IH1JlsNzT6BJravkaiImqqZLSrvjYX9cDnOZBSqGw2n+8OHDhw/v37//8XB4Nq2M5j3Np+PTYalGY+w3q2GzXg1d34ZEUuvheBqGLnZ+u91cbTevLi4ut9vNZntzdfH29vqrr78bV+PL6QXrRGmnp/1png6ImVGsqmQBFXNWmYmIgCvV42k6Pj/Ag3LA4Mkhklo5zag7j5POh0IM3J32+udUTofn4+HpD7/7/t/98T9+9fV3X71br7cREXMpp+lYSmkqFaKzzLa1p8T0674zgPvHx27sQwhV62E61LowmWlJeQHQofex78Io28uNd780yRzhi7cWAQk/A1g+r9wGUNVq4zUhmw84bHB7WdebhdxhyU+n6ZRyqapIFLtTLh8fHu4f758fHvJ8Cqpj9BeblaEdj8e5FEMX+nB5c7W5vASglCoABD+8/uq7bx+fv/7zvz4/7z/dvZ/zsqT0fDrF/X50AdUccjTs7SyfrS2s53MR49xwucU6q2HrhFgWzaWKeuUBwsa8M+8UA9FAbnBuYDeQG9EHY1Z7gcybIpsZIhiqoKFhEG7wQQINKIrAiJBLunu0vlyQ35AbmEjNQFq0TkXNCDPDyWNWlSWXXJZa4bTMnx73N9cZWftRYTZhdJ76/jMnBl+kSXSOEydHHIhD9H0/WIwH5h3ak5VHLQ+1zqJO3Mq6DQYlyaAVYAVoYBMUgRZNBxW0CqQX0yAnuDtN9930nJbnPJ9q/mocX7lu430H1JyyqERK7AydayBZYRSP5tAcAp/DmsWgqGWVReoiNTTqzvnhp2EcxtUqxlhKRqSWGGJaliU1MrtzDsxEREQB0DlGZGTUYnmenj98+PE//5d/+c//9f393TrVd3G48JEddsRv0I3eLXFwMd/V/JDS/YcPP/3lz3d//eub129Wt6/QO1EttdQiolXNpNacyzQt+/2x1toFn2WA4IDZby/85TV2gxI3ekuTOXzxMlPVWkuu81xPU57nNKd5zmlelpQzgsbAffTOueCc8945wqaulGJS4VzEqKK0RDBoaEQm5xv/umNkYvaOPJ0rx0C+6/s4DptN33feITKSZwQDMWtMRlEVVVXJpkup85yWea4ppWWZjvl0PE2nOef8eb0GBHQIjKKYK5QEaIgeguMOwsb1N2G4DMN6s7qOm1fr7e2qvxzd4I1NUU2FVLJKseWkiLj0Vo+mi1DFzQXFzoWBh5G6gboekKCUZc46L/VwrIc57Q91nqjmAY0di7qiJedcoCjnqliK1ZxNaq1lnwWkIiJ0WRGPp+l4OszzSb8QLCmAEgGRAuZaXUXsQuxiP8TtxWa9Xfdj74IDU2Jkh4zEZoakylVsvYoXV6vN03iYdjVXswoGAIIITOQYHAKfw1i0miFA1arA0Uf2xmFGMKezIRlqbXMDIGSHaC1OBs/5utZQ4KbWdJ3sXFPZfX5e+hguNqthtQohqgIzRs/Dej10EczmaWYh5wZyPvQrICLXe/ZWyJJFH1au28QRBgUxrVIpa6mmiqotz7wZB4xBm8kcxaH3DrrI28vL7dV1d3Ghq2GJMSPUUsGURIEJ+95dm40x1upOB7tPec7TaaJ58qSsmWElkj93yBS0SqpyIspsrEDMzrveu87x2vmtD9chXHR+rdERLY4PCPeO9kuS6nAM3ukQQ7i+un11+xbUfvrzjx8/fmog01orW/QhbFYcXtv19kJEVuv1u6++2m63jtkTR8997FfrdT8Msetd8IAEBKgt701VpNg5ZaWNDrUqzAmX7MX8WVPRnhcmN7ruot8wxbQ2q2KltFxL3/Urdr5WySkBYAt/NRG1dt9RW+Qlk0FDqEgtpWVAHqf5/nm3P+W5YqUAng5Zf37c//n93YefPx73TyaFEWIIJc3PT0cCHTbOdZ3rumG7vr24UJEP7z+UWpi5H+Lt7dXbV6/evnr96ubVze2rm+urV1cXm+1ljP3LwdJvx6vri1d7faRe/GA15d3HkCciRiBwSITkDLVKklShyJyBlckcoUPCqqjiEEspakW8zaLzlErOJaeSgWBdFWq9vXm9GfvOeRfCECISAwICGRgQmNSalgWA+r5/9ebt7//wh9s3r2PsUlnuHj4ty4FRc5qenh+WNBPhnCWleVmmL599d2bMw2fqGeKLsVQREUAUSpViBsAuel5d4OV1Wa9ng093d4+n+XSaVSF2Pfe9OjYQInOOQ+dNnKVFQM0TMkKgEIZxe/n1b3716z/+/s13X3MMaSlQlYSvr159+933v/vDj/vj6TRNKRUyLCnfPd7v2QezKLUDKIBq4AwqgrZomvbwezdsNwHFkFCRFQ+Pzz/861/vPz7gMV8X39FGQufJesebLvbMAdQbenBkBKqmLWRZ0fkWDqwIlYwUO0EHLyZk1MLgjUw17w/TUifnArCrGFqKoKmRESibhcgQIzMclwK6dKIl2bSj/TTNwZX1AIJOxRmGccQzrx0IscOwAthQP/SrELvOhyG4sR9Wm232vub6UOa7Wj5UOaiNFL6O61umvpR9nf5R9wctW+POuCAf4QxAIlD4on3V7vtxWf4lLY/Hw91x9+vtxR+ubr7izQX5HpgBnJ21MhQcBodMxpjAxJa6QD1N2fZVQMRKLmk6pWlO00lf7z+rx4m46/uu74gJKzZ7kKrmVE6nk2uZHO5M+TQz5yjG0PVdiH1OiZHDlMcPD+OHT3E59sjXfrxSLCmjagEvzscQhmGIdbE0PT0+/Ms//vf/1//2v0kuv/m//Kfx+no+HVNapFZVAWzz5jpP83Q6llKzoyXPSzqx1SJFg8d+wNiraFuD/hZHhAAkFaYpPe/mh8f97nA6LfNScpGqaojovBu6PngfnGOixmbNOUnJWqupQBvLne0DSEjOs/fsIzvPDOyIHXHwNHTYBY4hdrGP/Rj6LnTecTNWnvNWRETEtCUpSLNFay4lL0lr9szBR0ZRsbyUvJTPnRhC8tFxJDVoqb/eqEO+jN3bfv31sP1uffG6GzfcDc4FsoAzJbDK5iMAmVUrVXI2rUZgC8sxpsdPx48faH3pL67i9avhzbsYB8+uVjk+Ph8/fdr/fJf2Ry2S5zkj1BCrGxYsx6o5q1o1E6iCpqQQUbvIJ3L3Uz2ccrYjzjky3z/u/vzDnzevX6WUfqkuz+cvNIBSa6kIAC761Xa1vljHsePggQjMiMl5JnZqYGwKWlWH0V9ej5vH7sOD6bKIOTZlZwzEDtgBMxCqSskGImaESE4keiJyPnRbZsZMwC4AVbM5ZTMTIyIgT55Y2YPW5hBDMVVNuajWs1bpZR0bh+Hf/+nvOtf4zF0IoYux731Vezyl/SLH6ahJhhUyuapQxNWikuBQlzrV6ZlqXso+wSQgBlWxAAmCkaqpaIspRUYO2BL9GC0gk0DRepzS4+HYPe/Mua7vahU1QGMCBnS48v5yFUCDVHl8epSkh+f97vl4PPz0I/bRbzdjWqa/+9OfLi+vWskvMhc5OhcIe7SR+CJ21313PYRt9Bvv1sw9IpthdHKxunr3+m1Oyzyfdvvd/f3daT6B4bhabbYXu6fHx8f7+/uPBswhLvMM5Hry2/Xmu6/ercex72Pf9+MwdDF6do6ZCeicIHQGLyhYFa1VVIQaIK4RPM8hWGQl637np9OF8xf94D+P+JGKRfSXq5uL0cSkHg7Tp0/PUuHmcrtab30caqsJzQgM1cSKERI4aqGizgCBAZVYiMwsl3I8HZ+O0+NxmStiGFwcXOxdrgvwIZf9aXp+eirTiRDG1UZyerh/DoE2ry4XhJImKuHd0Fku++Px+XlvYJvNeHN58d03X//6N7999ear9eZyvVpvVr0PEfHcvej7/re//e3+5Pr7TxMcLm7yaX/4H//l4/OniRwhkyKaKRTJJemSwIzRmZpVUYOKiMBoPZJHZNNSckEy59y8LO/ff3p8PP7Df//r7c3Vr391+/333/zdH37369989/bd2/W2R4JaYVqmnJJJNhFVacyIYbX5+3/3H0QVmVNaHnf3aT6AlcPu4Ycf/vzp7uNuv98/P/zLv/x1HVbzPP9SxDR0djNiNY+SAfwyqSESdugcG4Ki9aNtLtKwmhV20/S4PxxOc12KY3axc41DayImAEaO2bMIgSMMPvTdZdd3q82br775/Z/+/Te/+dXF9RUy5ypcjZHjGDbXV9ev39y8fnPx/n1OmaCCyHw6Jebg/cjs/GDQmRZbWJbDl2FDucjH59309BhC17k4+njK5acPn37+6f2yP/pSN8jMITIMjtcudExOhQyoBb9Ri3ZSBQM+M1sFQdAIITBwy7YFUkNnFIgUkFSsZKm1AFVtaZhmZ9ivZrWCXDwklLkuuSxQ6lLpEdJ0/3H8+PP9yJYXEnFAV6/hm9elDS0JKIAfwG/CarPe/v/o+q9nObIkzRNU1cOMOL0MJGiSyqxqOjJLZB72ZR9m30Zk/uPdl92p7u3u6uqqrKyMzIyMQIBe5szYIaq6D+YXQHT1WkCAiBuA4Lq72Tl6VL/v9y0WTV1VlTO+Dm6xEKDSj73SqWCnNCIGct6GRrEuMgIKCIJswRAYS9Cpm9F8BhQBBUGt0eDZ4FRy5iLCDHocywONj3Zo2KhxkzXGkjUUDLng/IwIDU6RWJlVRTAn7ccSY86JU8opTmma8jAs+4Gf7K8iMsaxG7pcPOcyW41mt1FKqZRiDFpr598J5+gqmn845+rFYrNav2wXBx/GsfPCqFKEmTOIRDP6oN4ZE0JT1QvlIaVyPL394x/X6037/OaCMMYppyRcSim5pJTSNMVxGPt+SCkDiPO+WdSNdwEAqsquNq5dpe7IwvpzdxKcG5Myxng4dbf3j4/705hiZlZQMuSdtcYG54P3wJxzngdYpWQuZSbun5nTqGTQGeOtDc4F71xlrLeOvDPGGxMcVRWGYCsXQqiqUBvn0OBZKSrCXLjMQXDzTIl5RoIgCvM4DjlGoziNZRpyHHIaU0nl807MHKbBKk6lNrQx/uVi8cVq/eVq+8Vi/UWzvHC+RbIgjEWJ2AV0AW2NgFQy+GRzEslACkRAFp1TskAWrQdDUlI67tLhYTx2+7fvu9v77uGYxiiAiXOcxqnvxikOKY0pF86gBefmBwjNgkgiVdupOSgK+cpXbaiqJHVTe+9+RlOFOR2JFEBYWBWNsd6HprWhiln6cVJViwhkACXlPMXYDV0pbGxol9Xl1Xr7uHKvSIFZC56DX1CQBWaTXM6RFEspIggKxCVbJN/UbdU6a1hZMwICMYPmUphBLKGzznpLzhsukFNhRhQVSXkuYn52i9V1/atffNt4R85770IIVQihcv0w2Xe3+W5/7PrIkQCsr0QBwBB4ZY0jy5gjinLWnl1GEpKCOTOg894ias6lSGEWBSYRVGZmQ+itQaRcShz58aFnvt3vulA5Y41ztqIKFKecGdlWhBZA9bA/3D4+3D8+3j8+7HeHKSWD0NR+tbkYZ/wjAIAUSUVY1BtaOXsVwk3TPKvCtnYLT5UxAYFYGECtMWhtW1fStikuFu1i1S7GNImCtXbRVmUaN9ttztm5yvqKnHOhClWz2WyeP392sd20i9pbC6oE6K0lIsS5cyqFWVlYhIELc+IizChzqO9cxcxrJOk08fEIU+8b8ug/4u1FYJowRhdqbx2CFDsiy5FFjLUu+OC9A4TCzllHllRx5nKISiklF5aJmXNKwzCMw9D3/TAM/ThODGyqQoFTNpjb0ACaIpqK5FJinPrjHlW4iHAeh17VpZynnDmLc+5wPEEu/TAW5sWiudyuv3h28dXL66+/eL69una+drPpXfVjoeycvby5ePFVPATsYXl5Q1N/jB021b5kyiyJueQkkEUSGbVKxnsEw+WcBYYAZAmcWEoqE0JEUjSBhcYpHU69fnj742v746vlDz988fb129++/s2vf/Prl1++WK6WIXhBECCdh7rWEhEQeeva5ZLICGCKU902OQ4gqV+tg/N13b55+/b+4TB0w+l4/JnFeuY0KQDpGe10HqkAMKk4q6Guq0bJSmIOVVlvD9a8edwfc0IFcrYxtvZ+uWyh8ifEkkouOXMBADIWnKMQKFTLi8uvLq5efPX1r3/7N199+6ubl1/6qmIGFiVjwBA7ZAdYmXq5uHn2nITjcRfjMHFm4Ggw1FW13a6Cb0vW3X06ncpn3Itj1/0ff/+P7179uFlvbjYXz7eX3e3Dd9//8OrHV+PuqGn0IhahIhMQIEdmnCGqrIyKRoWARQsACFhANUaNsigrkBiaRc4kYJm8GmdUrWmCq9HaLCBSVApoBGbQOZJ4UpmSDmUYVAaOY0o5Tkflu2T0Db35T231wx9LN0FmAvybf/2v/7evf9usL+Zq0qCtoF62y8vtdrtd1nVlDLChwZhDyg+S98IJDZqAoAVhl5Jwpnw65uGIk1e6ALshf4UhgeKckDFnOnqn26V+fTPW9uF0jOPoMvuY3DitCvPjdNinDMZahGCso9qY4L1va9s2tqmp8mDROuOcR6usecp5GMaUEpfCKZWSC3/EXkBM8e27d3/58cd1u7CGcs7KAjMdDMCQsdYZY1XnCBsAAGYumQmLsXZ1ffPyV7/K//bf0unI/5QPx8NecqfQgzjEUbJhEy0maxZN88JZVfUhwNAf7t4/fHiPdTXf4fO+vtvvjsdj153GcUwpphRjTN6Hi6vLq4uLq83aNIv64np8uMtTz8qqOsenf6xhFLQITyme+u5xv3/cnYoI4OzctkDgyLZN66w5ng79MObMojqLBwGM0uz3VkC0BqsQau+bEEJwtiLnXXB15Vyw1nvyHry33jprLJHVc+QOp5xKKVzmivBM6ARVFSVjXAiFeRzGw/4oWfqBH075uB+mMXP6JOwV1ZwlFQ5avKEr637RtP/u+uUvt89vNpfruvEGrbBJk6iy99iu3OVLrJYqCMyYEyojAZAqChqDNph6ZVeXtNra1VIB86nvXv1wePXH09u33d1uPKWYKQFlwih5ilMaRh5GThE4GSmA80TIEBk0pIgFIDIMFIqtVheXF1fb55v1szjiZvHL3/62aZ+EinPBc05S1HPEmrM2VNbXueDd/aHrp82qbeu68o5V9vvD3f3tuw/vrLPffvPLxaq9zpd3j4+h8k+YZhUA1SKFmUHViqJBBcTCwqIiWlIkpNoaWm4CuSmNOSfOKeaUc4kpllKctcuFqVxlrXVcuBcFJSIpJefI87bwmewqePfixfPaWTCzBpeMmUF1ul7Up657eJz6MfVlsqEN9cpR45wTVU4555JLolKo4Fzbi/CoYqzdrte+8rHMR66SU0yxT3HknAHJNcGCQygywsOH0+OHPafoLC7XdbtomqqdYnpze3foD6wRUC2ZlPPh0O+P/enQD0OaCgsXAvnm/WNM+ePTwszMDvTa2a/Wq6/b5sr7tcEAqpm1cFSdu4c88/5QaRZnGuPW280GkaxFUOFs4Pr/8n/+v8YYrQtIZq7YRSCEsNmum6axDlUk51xYiqFPuBqZD/wsIkWZeZ7k8jmyDHEuYoAVBWTo0/EIcRAVjvixwuQs/SH3h9QsjA+GADkjKSoo56hcgrchNMt2QUSV97M6Y0aV5Mz9MBxOp91u/7DbPTw87Ha7w/E0xQiKq4urr3/917Yy9w87Md0NmpKSpmJYrXGGSMrEcSRgVCXIhJTjNPYDAzxm/eP0vVWYptS0zbPry6++ePbtF8++era5WoXKlDjtpx6Kgq3aan1jfIDZzNeoWRCcrBSf0LpGv/z1s/Wm3t3l/WGMpy4nARBBAaOGTNuuram4gCgIzf0OJBYsbCkFnwBLLDhO3GFSo4qTyvDhbr9/vP3Td39+/uy//OLX3/76N7/+67/561/88tvnX7xcrBYkwSDPVSILsGpOwFoyCzMrW0s1kQsbv16sL7Y3VbU+nOJy8aO3n1hEAGD1c/+Ffgp3mDPhinPYttX2El2VYilk+lAfVXqyE4kBcYiOyASPlVVHc86jdW69Xofra2eNJW2b5uLq4ur62c3LL15+9dU3v/jV5uIKrRelPOdCG8sl7Xf7d+9f3354N3WnVVXhYn0Yehn7qRRAa71rt5urL79+1rZVjJOx/bvXnx+TT/34X3//3e//8R+vLy5fXF5/cXkVd8c/fP/j/fv3lKIrpS5KgEFtZiMITCiGZq8SKlgVFCnCQsRoyKATJJ4zhCXPUw9Vq2gEQFFIFZRRZzIhq0ZQQRmIWdUJCcioPAr3RUeVCWQUHko+cNxlGO7ww+//WZtFGookEeFs6//78PEEQ8b4QE27WKxWy8Vy6YItkpPwkcsjl0eQjgisq9QalQBFpUwaWWPSvFBcg70Gf4khISigBbAIQZXQsAuyuZJf/Pp4UTeH3dj1firYDbzb+2MvY0xTcVwEtRgAi6Mx3iXbZVdlW0+mDibYalEvVoG8q5pGCJWUInI2bCBjmSkd54efueu7w/FAqpX3cLZlkDHGWeuc895bO1M9Sk45n7iUXFXhYnu5XG/rTbN+9uzFv/rrOHS9yu39g2kacNahBmOX1jc+UKi8MV44z5JMRHEmrNpS8jT0aMzM+48xnk6n3e7hcDiM4zCnH00xVVVdN3VaLFiBnLeLpW0XYOxc3P9cEwMCWqTEksY49ePYDX3JjIDe2bquwNo56EpFpmnq+z4XBURrnZ2nwfNeS2oIvbOLplk2dVuFEBw5dN4GX1fOBTcHF4KxZAyBEosIC8/ki5JLmWGzPD+ys64NUY0h772JMebyuD8eHk+HY+wi3R9yf+jzFPWzZ0bn9qqoEV0gXZC/qVZX1XKFwTOBalEtAGCdVrVdXfqbl2axUZ6pLEIGyVsgVGUwhlywVUv1CowrJU/73fHHH3c//Pnhz/98ev8hdTEmKugTmUKSJecUS4qaMzIbmj+4ufsxhyNgBhhZOoHJVtiut9fPX371/ItnlwDSXl+9+PrLT24LeAI1I527ykTOBecqID9F7Q8nolN3GtfL5XrZlpjevXv46ac3f/nxz1VTt4vt5bUj40JVh6oxLpQCzEXmtqWCNYZIDIG1aIyxzomCslgfgvehrprlcuHAG06DiVOP0ZC1aGkaJwDJpdiSrSWL4A2CmjmZpHCWAqLCnzVjiKipa160cM540bPZAkGVlRNyxDKKFgYpgGjUaLBqDCJZ77z3BB7AKaBIiaWytqrrly+fL9YtW2UVLmXohv3uYff4+Pi4g4Keaku+aBzH1B2HFIc4DNbCettstqv1BlklTmUYYj8cxqGbxjjFOKUSs3DBwliUEL0xSLb62L1AIEOVoSXqBeolwQa0lWwVlKXMZp95vKM6ZwqiQUtkjDXWkDHBGGOsUZE0DRaXbdMCojUOFFPOOZecMhJWVWWtRZAiKoWZC/AMe5sNQTBHvp2fofMsVmBu1MyqMlWYI2wRsKoEUpQ4FX0Cj4MqCCsXybGgqkEBBkukrOM4muMByNUNk/HWOcJiZns3GUKa0x+mcXh8eHjz9u3rN28/3N3tD0cWXTQLNEFTFKT9/bukFLwbpjx1HafsXKibRazbjGoNIYJtGhtcyWXoRgVkynIanCEAbNvmYru+udxcrOplhV4nnST1Y4wFcqmWW9du5iKmSD4MD4/9XZeGUyp5n2uf6gWtwMdEfSzSaZIMHIskoYJkFARQ0RAqgpkLIbLGGGsr55cLCAGF3BT54XFM0od6UWQ4PU6x436Y3rz50E/pcX963B3vHve/HeLzF5dNQG/n86yy4hw2NnOpVQWVLao3GFyo20bVdkO5fv2h9g2q+aRVArA6Q2AE9exNmmUmBAbZ+6mqaLFwl5emaSXxlMrDlHq0zc1zy7k77GKe1CBYBWJVjFyQzHqzXq0233z9zbNnN5tVu1mt1pvNenOx3Gyb5bKqGwUaplQKO++IyFv78LD703e/+6e//7t/+i9/Nzzsbpp1MI4VhywaMzm3WW9evvzqF7/+zc1ybbv+keX+D/+knwXbjNP4hz/9+B//7r9dbraXy9W2WeAYH96+y6dTRVIxT6kkhUK2ttZa8sZ4IkNEZAyoEUZRBlTn0KCzFhhQGBmUOUssxliwTk1QKCpRODP7TJbYqSFQJiikowEB1KzCOgKPyBlFYGbnkRBKQRTOp3H344fJHZJgKZJK/vr2Ieb8tCKjcd6Fplksq7ZFZ6PIaRj3Kd8T3irskaLz3mgAsTmHMq8Nmi214l8WfyH+BblWrQgiYAAKAJUAGlsw5HqdXzwPLzdlvOj6AfpYjqd438LDQXedOcVmUpfKFGOMHEkjFezEUCZD5J2pfHuxNC/D6qa+ul6r1WHqp3EoQ4rdMHbHxbqxT+6kORZ7Rs4RkqogoTWWwEgtdVOHKjjvjTWllN1ud3t7//bd+6oKf/M3/+qbb8lWlWma5jd/deXtr68ubw4H9B68RWOcMbUhZ71xAZ2f4wUK5zmNgaqqXq5BRco8K1XRLJy5ZC6ZcxLOIGwJg3N1VYUQFIABwQX0FdIZA4mKHwuZOQm5COfMiUtWjjlNx0FZquCNcFsFZY7jKMp914/jNKsnVEBIkJCMcUgEZA3WdbhYr7artgnBOQNGjTXBBe+Ms4YIAUVBCwtzyYll7rvMrW+ZrVPn5sMcfKYK1lprvTFeFPbH/o9/+uHuoStQnSLtj3kaBuWPsSIzyNyQGEzGZBTCU8QPfdztRwdYeesr7xoXqtZXa2ov/GLtNxuy3lhvqmCCR+eViEtRBHIOkSTr9LDbff/nx+//dP/Hfz68e3s6dlNixVDIZGYuE0gCSY6TLVm1KCqhQQRBmI/FIKACg+gupx3b6NrF+uL65cuvvv3q5RfXIbjn3369vbxs2/bja/kYaiAzvxSN87V1lbCdIt8+dDmlx+a0ak8X61VJ6c2b9z/+8PqPf/6haqvV+jpmVLA5YahWPnRd16ecZKY9owkBvCNjQ9W0dV1XVTDWIoDzvm6Wm81qe7VdVgYv6jJuhv4wjX1OcRyG0+EwDF2cJs4jcG2N9QjGmoKUCVS9MKccz4Gq581SU4rjNOHsm3+id41x2u13h8OeOTor5AA05tNDllMwi8o1tW+W7erqcr1sqmBVYu4Pp6mfOC1Xy9W3v/zm6vllWDsTjCof96c3P7374fvX/Ie/DKfo/YKs0ahpSMc5xg8ceDMOtm68MevtZnn9/Osh9fcP79+8/enPf/p+fximmETA2sa44G1YLBdXl5tvv/llVddPBZmt/bbx0dKKkzsd+2koiOb8tho0T0nnM4nLOeedt9Yaa2YOgqqcY8+JnLWiikjzLuoRrDXOGlVF0FISzJofFQCYn5Enxa48DYxUPgaqI1pjFFFyBhUCYIOJEFdtVYXSL/b7N51+ZlAwGBqXowHKLJlQEdhZm3M+nvpDN76/P9pQu1AtFquLzWa1WNR17ckikDWm8j44R4g5564fDsfueOyd9/VF3VaeUj8Nx/37H0eGypmxwOHxdppGG9rFxQsDVKYOQRGYhEWFs5YSkVAQldBbG6pgnV0t29WiMpTzeBpIVPHUj9OUOMaW4/L5twBLABjj8P2rP/zz969up6ZLxJrbJr64ig4EjAUjRWMqk+axlFEwq6ZueDDoEJ0SsUFCckQWjFeqyPuqvrjcXlzfCJj37w9M48ULMU66HQ+HMu1lOsXT2H+43X94+E/f/eXVD6/f/eIXXzy7XrSNVSkISNaT8cZ4NA50DlRWb7ENLgfyjgrjanmx3VxZ9JI/TWAAwJ4Xgfn5OYfFIBMlY0fjDmgEcBA1osXQYG1HksnWTWtSjPowpugsTSgTgSj1Q7S+vrp59stf/urf/fv/6ZuvvlguF4u2bZq2rhpXVUiQC8QpJcwAZIxh5vGwu331w5//23/903/9u9s//ZHGjNfklSqwjauTMW6z/vL5l199+fWLF19eLFam73n36OsFkvsUQlF4f+xu73cxltOx+4DWMsswWi5ghEQMMCsW4TwPtBXo7BFQBiVhVS1AAgZUvSoqVWgMWaMKKsKQZ1ARYFQeOScVpxqMLLESMmpQjSrNgpiZlqxPxSWCGlQ0QB5MDThl6jsuNk6IU+Exjl0/fsyDMIbatl4smlB7siSgggDWgaIUkSKWsWWtWEKEaoQwaiiAbLIEh26NZk12id6BAUACqoEqoAaUxKSCMWmKCuDz+qJZrXnK0/HYO5sRp5xOKQfAVtEWLSUfSQaUUpi0VEreFlPrSDbWx+tF+Ob5xXLbLmFdUsx9nE5df2rXNxf2ycdvjFkuFtvtZrVceueksKiAIKE0TbNaLdfr9WLRWmunGN9/+PDHP/75u+/+GEJFZJwPddsuFwt/c70yWKqw6no1hNaQNYYMERiyxvo5gUpAYp5KSazMrPOp8zxuIQQVaykEVwUPwsIzeY4Wq9VmtWrqGgCKCDgPvmKyDKTys6cFAERRFFmhFJ1iHoZx6HpNRYL3CGWx4JxjjIXLMAx91zMDIlrjjDWIaKypQqXeKANXzhnT1NVq0VTeCswrtTGG5riuOcwhi5TCORcVoXPiDRoiVBXSuTCkc9LXDIJFMq5qFlXdovVCBEDGYLDzueez2ZioFChJCygn5caUsBjC8nE6pa7ncbCGmm27XK0XK20n00QI650N3le1b1pTNxhqdR6MNc66AlByPp66n97c/tPvH/7858fXP3T7w5RUyNka1aACgyTII5WEWkCZZxKL0uzZYZAsyooMcix8P017rPLC+eViub1YX162201Th3a1Wa2Xzn8CXs3t7TPWHUkVUip9N05RpyTH/Vg4CwswaYGS0uk4DUMZR55y/8Nf3kxRm3Y9DMX5Zbu4SJkYopmrTmOrqqqaVd0um3axWLRtW3vnEcEYY11wwRfQrGqtM01TWwxtgyppmtrF4rjf7XaPnOLcTHFEs8xT1Kgx1pifK3tAz6YZBp35P4iIaJBVckmlJEMaHFqPIDqVpMKkaC0tQnu9bb/68vnF5cp7LHHa3e/6fZ+nvKjb9Xa12q4Wl3W9dC7QOGxXm9bXYUz88OEYTC0KHo1jtVPWGZTqyVgSrQp741bXz7c2wPPT88Vq1fWxCKScEE1VLauq8b7ZbDcvX9z86hff1E+cGFSDWqO0iBWIURaGhIBy5jcZZ31w1jrr7Owod9ZYMgYRzvyBAqKs50SHj+/UXPEpgCCpsjCLfKpVWP5H11NX5qMZF4R5msbHh/sYo7XONbVfb3xVUUVoBMagZD7HXow5HcdJtKAyqZSS+iGOYxymKaacGBgIrW2qdrNeXazXF9vterVcNI11hkEAUUD0Kf6xsGBmLiWPw+nhtghbnhpyARMDeMOVN+qrUNcLH/I0pDhyjlBKztMwDaVEBC6gCdFZU1i6U/fw+PC+BcPdcHxsQmCR0zDEGKXkm8JXv4pzdRnj+NObH3/44U/RPBuLH+JwcEPphspIPNbdMU39QxyOEifhyJQVJXOHaIgcGKOGiEiNUTQAdqLQ9WW1WjRNY13dD5KEnC824HJjg6McqAtj2T2O+8f97nFI702oWTOZFyK1ck4pj2NOsaQiqkhorHN1qLwjo2ItVsGu18vLZzdXFzfb1bat23Mq+FzEnAOrkUDn/EtQxIw0oj0C3RXpugH4DquOgjcuYKiN9cwyjOP+dOq6g3FkrXXec5bTobu6uvnN3/zr3/72b/79//Tvv3h+JWAA0CABUJxyKWe/q7JYREd0PB5f/eEP//R3//mP/+E/ffjuOzgcgpgkD2CDB7/dXreVWb64+fZXv/niq282q21T186FbnPhmxX5T61LQqpC5UPtQiVkhpQtc3DOOEuarUrrfAumAgpEzpI35ObOv+Js5C8gk2IELaU4nelfduVNYJvzJCwZOYEwwABl5JiK+Fwmq6V2aJ111hCIJFExAkXBAWTQNCPTWIm1ElT0SNbYiqrGWYOYJY05nUUT50/F2u22LdvGOiiSlcWEsLm69AXKoc/HCaZUhtTE0kw5DDkkrhVRXQJjCCtr3UzgVQBAA4hojKBXoSI8jOb+YL9716C9/voyb9bc4BCqHefH8fhwnx7N0FX+Cszz4gnMCaf3VA5GLNBzNkvjDNl9Lj/c3T5vYP3184t2sVpVBqGMKfZ93x3rL75wT0RF7/3zF8+++cU3bdNaopziMAzHw0kBFm1zdXlxc3O93W6dd8fD8cdXP/3un/7pd//4+xB8CKGum+ury0VdW+fq5bK9vjaLVkEVEQzhE2eajEcyiihctJAA8XkjEOXzTT0nfjV1AwDOuhijihCSdW6xXF1dX3vvp2ksUtB5DHUhFwWBtQh/ik9SBLWgHsEzU9fF42GcTgOWrDE6osVymNq2quvCeRqm7nCYUeLnkRmAs7ZpWx+8M2iUx+0aAJoqtE2YBZ5ECMIpF2ZmUFbNqoXniHUgpJngJ6pMJMxn5QbPp0zQbCkXY+3V1c23v5Qi5uJuHwseutQ8Hq+2C+/Mx0W55BLHghknoEmQrQ/XL+jy2dE+vj69+vH7P8XjfrNqtovFRbtu68Z66yvjKqqr0NQLWy2gbs16E66u6uWytd6kMT2+796+f/zh7fFhP2WKtMiQjaoDJmJDhbVoKipFRRjmRGcgBVEtIFkhKkTWyLCL5cMwjYH8Gl3tXV2jC7nAFFnPrp6n+hKfAONz588YZt7tHvu+FzAKVtFWVVVXrXM2xZRTDqFZb65unnWH0+GHH17fPxyvn3/hfGttvdnckGlizohExhrjfPB13TRNXddV09RNU3tn5wM9i/Z9P039zoAzWntsKt8u26aukHm97Y7HfXV/O5wOkpLmDMzIcxykEpCZxw+flTJnUPr8iube+NzAc67yrq4ci2NhF4jA1tZqJklSO1k19mLbPn9xcfn8MjQml9xcLI+Px9PuBAy9JOgOk00rqjdV3W6rb5YvwqIqZH/64f60j+OYllKbZeMXCy6lMp4Iiha0eJq4nvhFWD57sfnmV99cv7wRxGcvniGIMbYKbeXrylfr5fLm5vKXv/q6eSpiRCANlHoyFRlLdfA+OCJSBWG2zq4W9aJt26b23tO8Yqkycyl8rkSeWo9nC9GT9n9+vOdr1tKJMMvPmi4fr3NezVONODO7VTXFeHf74Xe/+4fDbte27c2Ll7/87b8J1ZJZWKiyde0q87S/pFzePxzevL1XEOFSUpwDDQqXUvIY03GY+iGO08SlGJD1cvHVF198+cXLr758eXmxres65VyYyZjFcrnqx24Yc0zH/aPlWPr9ctm+uFi1m4vtzfY08X6/JouTelbrlptpjLvdru+OhfvMUCRljlwiSDGAjmgaxzx13eH+zavFl883l+umrhxD6ca+lEQEvxX3qxg382uJ6cO79+9/+ikslKGKfX+Mj/fffzAaHbSc9NT3MUbNmTkrFIWzzAzJEBFao0RkLJBRpDLa7uA4p83mqmqlO3b70+NPb/dAGtzKmiXpUmnRbAKGla1XgGVMMsRUNdX2cq1SdncPr354++qH12/fvuv7gYgWy9Wzm+dE+Hh/x5xXq/Zf/au/+V//H//ri2fPvvzy67YJzn1G7H0CCZ7XNQUQIvYel2v1fhI9KMU+alRfc9VS6xsSGcbhsN/3/TAMES0Zx0FQsgxjEoXVanN1dbPdXrWLRR8lxZy4lCI5lblKRgCa9XtEUMpwPPaP+7Q/4Rgbso313rnQLMJysV02uKxWN1cvXn61XW+dtUhk68q1C9O0GKqPlFuYDyxkFY0gAaL1oa7rSoX6A8xhtmTs/LMlQzPac07kVlbNoL3okcup9IjmBObGuRBCTd5ysZILIqOqKJMaRK/kgWbQewIxyigghUvmIlpEeMY8zqoyUWFARYcUrK9cVbkqWAqIkxZzRuOfLxfc9Rc39uELv6hdFYx3vq6r5WoYSj++iynaY5EuN6lUKdvMvogHADAFCdE444Qwahm5jJxUNZDbgHVkWkXLAofR/XjvTaiN5QRgzBhT1bGZJGcZdJ5Xm4pcRW5JsHdl5zgBlGgAbXD1CLrr9nq/e3+/u3x2ufjy2Wq1kJjKOLXTaC9v7FMR44y5XK+eX26rqgbAaRoJkUtR1sVisd1ub26uLy623vkU0/39/YcPt8fj0Tn3+vXrn3766Td/9euLzcY6Z5x3TS0EPLeF8SmGBFENyhzNcf4hOlNTnhY4BFBABPDeA4IxxEUAwFoXQmiaZrFcisg09qKKIWCos3EjkDKM/DODkghxwZR1mkrfxb6LJWYsGTJ7N/bd0K4iACBSKWUchuPuMedchTCzcEJVEQhoI8bEmEouoOq9rYLLOc8K0pzz0A/DOI4pKYCra+OcRSICM8evztIPwHnqLyJaSkp5nBLYsWGwvm7b5fMXpGgvrk8xybEbr/enX3zzvKqeHn4BzpKjGtZMGIki2okCUnU01fsCf3jY7/fv17vmarl5scXaDil2qtE6Dc62rnahxqpePHv+7De/ubh5tnTB9nt+/c/9u3f7u66fhF0QmBM8dUwx59JL5JJDyo7Z6Jw+jTy/r6pJNCmMqkORvsjjlB/GxCSXxhnns9AwMZ6m4AgRq6b83Dk2L7JEpCI0xWm6PYEqoAv1crO9dq61hISqKtaiXy7QoGBxd+7u7q7rh3acGqqtDU3jFG0orDDPHIz1zofKeA/kWDEVUS0AMjtfWFikIIghrYJdtPVqgWvwlfNUr2pyK7SublPfl3GUGHOMuSSDYowB57hY+zFp7F9ceqalqbd2s17lnIyzMUVEAEWHBIQFoAm4XLjVumqXVbOsfOs8CHkLnoY89cc+97uOx4Uuk2GoYWWbpqra7Wp9vd0NnOAkzjpSVyq/8MocyKPqVFKSkln7WPrIkbG2VbPa3Lx46YNvK9dUVV3VlfXOuiZUy2VzfbFyZw48MOtwKofHMfo9XpjVqm4qj2esn3POtU1om7Boah/8nFSfc0mqggVmbsC5SmGYq5p5ogrn/yVPPz+VNfp59fJ51fLpLkEEACJKKXXd6fHxYXd/t989TmNb1VUeB82pFBFma5yzn9xJufD97vTmdo+GRCTFiXOam72qPKV47IbDqT+dTqfjsTvugzU/vPjpqy9efvvt189urhbtYrY4PDzuTv3Qdd00jilGw4XjOPTddrO6fv6saTNPAxVZVSYuw1BMKiRZM8FTkKxBMoYcoy0CnEthTiIEOp5g9yj3t/72dn2xaZarGoz0sctlAhC7fDbF6elz4aHvj48PoSfFMMY0jvthuIUyVNYh4rwWKYvMMctw1twBoSKRJUUCYxipEKoYFoNI6x8vmsV2f+h3h8PudMciTdOHarKuWNuSQWZkNcagcTbU1Wqzuri6RGUD9PhwePPq7bs3r9+8fWsILy+vCMAYevXjDynFi4vN8xfPnXNX9eWLFy+CNf+jIkZwHhUyAQNAVbfPnuemdcOIQ+axcEGISoadz4Wn/eP96bBPwwRZhNEgeVOjQWmhaZZV1SLarhuR3Gnox2kqpXARKWLIBO+Dd8YQWQIEY03bLi8vrl48f2Fz1pi8D6vNVXN5XV9dhfXaL5qqrUKoDJmYEoC6uobKa9NA9QnVL6qZOReJLGStr+u2brfbrc9pmIYyDBmlkIpzAEbPvffzH0ZUIS0sJ+EPKb+dYha9ANs37dJdeOcMWWe0WFBQZfGAW9MQ4VptjcQIJ46ZAVRSzlm0IIoCCifmVEpkSaCsiCqFDBsqFgtJofncaNzZEHh+2EJTvfjNL9YObBVsVbm68j6Q8bv3u8dXD1OX68dJh8npGf2CBlkBFATmzHFkgyPiPeZXed9xqsi+pNa6tdMQMjSTwNtDKZSnLJuGBDin9XjcdN3F0RxTNRZdZtiIrdFWxm+ctA33mhc5LZGeVcuR5Didpsfu+z//UK+b7V//8uLqyuQChVtmWK7NU36KJVx7exEsGcoCQtRUwdEGkVyo1tvV1dX1xcWFNcTM4zCoyNXVFRENw/Dhw4e7+7urq6vlcokAKPOBnYt8ygJFRKGC8ypWmFOaNxYVAVHUz6p0RMRZNbJAJGOc9z6E4JwzRDGOcy/EBI9Nna3v0SSGI3P5pFcAYUhR+i6ejuNwmqYpYQESTMJ9jIeuX07Rem8BiEzJaTgd4zSWqjLGgqosl5vlwhHOS5IoqKo1ZAjHnKaUQGGcpt1u9/D4eH//iMY+//LlxeVF3TbOGOUy26v1fPDk+VWXmI+n0+3tvSBd3OTV9rJdrrfbjff+5lkqwlMqw5i+vLps2/ppewQuoBkUSJzLtunE3j10Ne9O3TDEeMz5Acqp6MTeNVfe2rt+7LukwAQSoHPGWGdeRrYvv2muyAD6acQPP8n7n3gwUoijsc41dTUJvO2G9/1wO54A+HkVLpxZIhg9M6gUsChEgUlgYOhYD6XsMu8KODDoKgF/OEbnjm1jqmCcc3XbfLTxP324RMYgijAMQ797eFtybJrl5fWLq6vL4FFlEqYQbPBVVS1Xpa0Wpll4X7lcdLlcWB84qVEJGrCUUoQVBLXMaaWppCJjjIaAUJ8Scw3MYilh5qIgRIc6+BmoulktQvC0uKzDwjcjD33qOuyOud+jsjHWEHBOswj00z2mKiKIMy0ahBkVnHXPnz2vmzbc3e4P+3EcciqoSIjBUV3bxbpuWi/EkaNkIWf8ojKj63m4O95zZB+atZZIGlG6kZc1n7rpkFIkptYEF5A0iPFRlYsDAwpO7Jjysc99KW/uHvoyNneWZUyCq+3Fi6vN5XqxqmtvzTwGA1BDfOalAnCR076/fXuvcJzG6eZ6E5wREGdNXTfOOQIkEJWsjGicQRQAA+f3Vmccf2HmDHN/pjBL0VmcpudwuqcLZE6u0k+P+yz4/pcDO0RKKT08PJ4Ox7qqcLM2zhJpGk/DKaga5USIQBae/mwpfDj2j/uTDQ0g5oJS4BxwpzlnVlBj0DnLKo/7/dj3724/fP/jD7//8x/Wq1VwruSy2x+mcVLVUjimREilaYYYHrp4d4q33XR5v7/abnwVIOvKQE1yzP3tfn88DXHKwNlTNg4MOKsVlhhTitNQcgSFWTRzOtDDrl9vF8+fbX1t+tyN8TRO4+b5+zGmj08LIZU4dbvXRQxZy5qxgBSY0gm0iALoOSXhM145gKoAKiPMIEgAJASwiu7x8faf/unvQ7Ui8jGV42mXczm5Ht0J7YFMYwgUuOTh4mr5i/WzFy9urq6uLq9vLOJ2fX11+dK75vd/+C7/9IqLqupy0VRVddyvYo7by4vtxXa5Wnrnr69uDKpznxF7z9mrogJQCCAEv95WL17Wv/4N+Sq8/wCyL2OfcwFlstmllFI6Hg7d8SA5qYgikENPwXlPYBbLdagaBdMNsejQjf04TbkUVTBA3oF1yqpFBBkU1Hi3vbn54he/GE/HxWIZx5Gcay+u6ouLanNZLRahCsaglsJSWDgVjUyTlIiQCfWzG1QURaGIKpGtqrBYVqu1ncbJWJ3ZKfPvOQMb5w9p5mUAEhbVjsueyw40AWTgwOV5Tg4gPPHw57BMh2iNDdZuwAdFFgFlEJlHU4AghDzTlY011llABhLmkmNWnZAnSKNiVgtERGYWGH96zpzFq41Lz3zTurqylSe0WhT2Awn6yGEskAsAMgmTljmEAUFEEIQVk8AO81ud/gDdDqZK7RFkicGjeYZUsWI3OkRvCfediYVTqvIUytgW6MSdCnvBJWMD1IqpFIrho7HVKmypvmnaUfjBNMc4TrfH/btdtx/752yQGDSmaFJeqc6iGFT1KblhSJTHIsdhBEPr9WqxWISmvX52XdcVqKYUU0qqUNdNu1yKSIrxcNjf3d0/u3kM3oUQjDHWWNX5gRWZFztCIkIFAUSaM3SUAGXWeM4OI9Wze5vIIFo7w3S9895Zi4jzdIMQkKgAZIWMlJBGhUl/FtElLDmXOMUcMyE56xUQGFlyZOnG6dSPUyreO2MdGVcKxxgR0BhWVeeDiCCevTOIxCwpp2nSYRhTykgmxjwM6XF3fPXqrZ4z5LEOoXKOy5NionCZAxS4xCl2p9P93cNPr18rmgLGuNAuVnUdrDULPRc7uejNahU+yxwBIVITqHa+lbAYwe9OqZgISM1icXHzjHcWpHbrK243I+iJ/F6opMwlEbMBcJZ0cfr2FC8mNgaw5DAcTPcA0as4NE4hlGCPWV910w+H4W4YLEpZgVQODdWAsXBSKIBJYGQcWAfGk0gncGQYBELRKZahn/b7jsikbJvKh8DjlOVnrZhzHwZUmTWlPE1jKZN1NqVuHA/Hoxw5eUeLRbNsl8YQWtO2gWWjoKlIqJYKTqiwZuY5LowMIBmLxihiYU4xiRTlDMqESoiEDskAkKhmLsylSLHGtNVpueovtrFd1N47S4a0IkfUeo+OQUo0KizCxnhLDj6L5Z31HTN6dLZbqCgRLdsWEfu+H8ZxHMc5H3s2YPtg6sb7yipyzLEgE1skijkNaeqmPg7JpJxciGQGllPPbVWmlA9DyqAmkHVoLAqTcYV5jlRFVSqWDKMgdtNYdsmcmKigcLus1svlxWa9rL0jZC4555TSxwpmLhe4aJpSLjGue0MYghfl4N1y0VprORcCmCOVDCECMmrJ6XTYH4dpSoVViQyA6MwUKMwyF0nnY6jqJ90aIs6ttrPN/ly9zL/8XNkmnFLsumNKcbVaLdqGtThnc45x6ok8gSoRmE+ZVqrCJeUUBS0Yw6KiCALA56kiwryOGzJG0cTC4+F4GrrH42PwfvYxTVMEUR88IJZcnHWsECowLiQoeddNUzodTnUVrK/AGFYeTqfD3W3KvFyuVlfri8WSi9zePdzfPzxILFN/7kHpOTQh5zxOOQMu1xt1Zkx4GMr+MD0e+/ykuVSVmKZpOPWnwkIuBJwzEkWlZIU8WwUAPm9hfUzgPne5ngpVAGAwEiPsdsXao3M1s05jL4WzmcBm8EAmIQBAEUklOYNqiVSAi87Dg3a5Xm4u29WmWW6swXa1Nt6Furp5/kxBtxfbqmnuHx4RNMZh0VSf16V2rl1RRBEnsmG9vvzNX1/8+rftr//KZa6TyG5I5ZRjQjHGYo4plxktHDVFECCnRsEbWzWNr+vl5sKFhgHHKRWlYYopl8JijAk+uODJWgGdUswI1lsb/LOvv6qbhavry/fvT6cjE4TlytS1GE9EisAsc50JBCzcDf2hO57SOHDmj1sMEuBMBkcg40Njm1q9lTwvbpaAAK2qEUF9cjGIghEFELBaWPucR0CzWjvjuPBJ4U2aIE7XgIGQDQooiRgAS2bBtDWuQaMoqkVQhLQ4yAAjQEbDxrB11lfeWI8wTsNx/zANXafTqaQec8RKTP3RX/LxnskKd6CjM5tlE+pGSLmw5NTFaSwFRC0iACXkjCWBZpAyZ/OQGsVQysD6HuJfoP8zTfeUAnCPptXOKTbY1ECAbDTXKRkRPA1lnDgOFXNAWCMmcIgQQLwWXwgRTllXdb26XC1d3SQXpvRVu+2yx0Hsh+H4l9u3vpZFOMXx/dt3y8uL//nZl9t2CQCaC+9247v3RzQPU35/6Nr1+vnNzVdffrG9ur64vGQuj4/3zDyMQ6jqq5ubtm1Tih/ev5+meHd7d//sfrvdVHXlggciKuyYGRQACM9zSVQUES45pcRzMPTTwPzccJYZAY5INMcBOGsMgOQ8Iz+EszWWyIxj7MYhCwuCmDm8/VN2kkgRTpwioayWC2YZxyGnWIopCF3Mu65/2B9Xy4ULdbNYWV/pMDIaAAIQUcwMisbXbVW3aGzM5XA4TQ6HflQFXzWiJGBT0sfH4zD0uRRh3iwXTXBSWHhOgMoxxpRSjNPhcPpwe/f2zbtXP/3kfKiaxWq14cuEWhEyKBOpIXAW6/BZqaxo1AVqVs122a7JVsnUQ9SacbVdhSoE8ofjqWRVNOSbcRqbpuZYR5UknIUzgKgZMh1O8XScvPOViqcCGA1HKt7gIrG9G9JPk/zxFH/q85SpIvVjciKNd4CYRCfFCDiwnrJ0RXvBCbAQJTQMHFPc3z8EX9cueGvItEhGgXOWj44egLP+FdGISMkZFBfLFUAdQhAot3c/fbjVOPXO4na92mwuLi6vm7a11lprrq8vFA2Rj1mUJuYYx4hSvHXWBx8qRZsSjDGnaYpx5DSxFNCCgHNvn8gioiKIKotMoOMwHE79w/5QhdpVdeV95Vxbhc1iHUKDzqbuoT/tixCaQOaTtu8ji5nmr+ATNNoYNTQjSFW0FGXR4K0BwqLGoPPG+LmpmlIuopo4P+4epyll0SiqKfPQT2i6KNWBg4vCchoyM3hrHIExwKyiJs/cFgFWUou+RkEkj6x5GgbSHCxobVFVmdM0MSILFzkDdD7uLoaoCqFp6ly4aUJd1XXVqErwtg4VGcoAqkqIhox3VhVSgmHoXr368e5xV4q4UG232xBCyWl+oFXhY3NFz2NiQgQ0BOeUDn2awuk88pvbM/PWizN8LnOKU4wTGtxeXBlDwzCgtayaS3Z+FtcbsPbjo08EtcfGaaEoYM8PkqIIghIoIRChITIh1JdX1z74GMec4xTTNEUzl1gA1nrrPTMP4wgwqjHkQ900VV2rwjHmw/EBVbxzgMKc+qHb7R8Xq9Wv/urlv/3Xf/Nv/+ZfpyR/+//9r3//j/+cxm44Gevc3AQDBVUULsrRom2b1WJRJ2HCUeTEbD4Cr3Iph8N+6B9FHKAruQAB4cxocOeq8pNEc1bKnm9GeBJHf8pgQwAthmxVAWKJ02PJRTQDERq1rvI1GEuzdSyzgKaxOx0eHu8+3M+a3JzK0Md3t4++Xb/45tdNE5ZtUwQV6MtvfrFYNFUdyJj/9F/+ru/2r3/407dff1XKU4I9gD2r4UtR72m1br/69uW//58v/+pv4Ppmt9/75RsTboFofjjn26jkXFLKKUHOCKAIuaQpRgrBhEAukHOKlAozpphzYQY9N3uZebc7TWPf9ydVrqqwWq6vLm78anX97S/c5uJwOiQu5J0QJdaZQqCQdY5ABUgpDt3pw92H3XF/GS8/P43NJ3FEIrLGOes8OQfWMpIiKlolK2gKnjvBBoAUjJzxYwrIQFRV6+cvNDQcJ+m7h+PRpbywwZFTAFQgBQsQCtRIyxAW5GyRwnngOAFnVSRqrNOmhYtNrpuJfDE2GAxTTw8VP9we9w85JVZUtXq+Cz6/aaCL8XevX3/46dX19U3VNEWLFqYpl3cP/WkPeVxAQeREJVJJOoclKYMwiAHMzEfgDzreudy1YXI+Cz4W88M0bdl8CX6L1oKaUvwYLWbtRx4nzRFAPVgzT7ZQmZSUg0Cw6AVkUS9/9bJqFmU3cR/bWLs44hRRzavvX/849nFd7ab+zaufvvz669/+n/6X7fxihGUac99NYLox73b7qcjDw+7Zs2cv62q1XFhrck7D0E/T1LYLMn5zsRn6/nQ8lZLv7+9vP3z48suXy9Vi5s6d5wZzEUPnRgwKKH68CIlQ5OOOTXNkn6pFQERn3Zznqcoll5hiTNM4DP04Hne7+3dvdx/uphgBCQjxMxm8iJQ0xamL4ynHgaB4R0h1qcMMPlbRWORxf1JAMr5Zbhabq6J0nhWqkq8zmFggFE1FYiox5jFGBKMKiEaBFAygneE+4xjfvn7rHX371RfrZTPTf1POwzCeTqcYowhPU2RRsr5qFsGHEII1hkDmXGUEmQ2LAPg5XcGgqV2zCKtltax8rUpTLse+D3VVtbYO/vnNi+36hlUKS1Idh7GyMLSLcehiHGJJLEJgt9sLZyznkmNORbOrS73gnFmNAnRZ3qf0NsFBbXS1ALGmoZQOS48ERJNoLzgq9AxdkUFgAlPIoEEiDWqc9wbPacyI5xnBx5X059f5MMCqSKZpV9aKdSaXdHv3ru+OcRoM4Xq1XK826/sPi8WqqeuqakJVW1eRcblIHGMauzgeRaGu1lVF1hGLMrCWmKZ+HLqSJuYMWmB+U/EcD4001zE4b5pEdDxZ7+tQr0LdVFW9XCobs/ImhJZKximiYwvBufpjETPfZrPP/Ez0Un1KJgQRSTH1ff+424lodeFtCISioP3Q+W6v3jpVUYwxHfvT7rDvhzELMJISZhHIWSROI1gTEaBwMQhgYY70YVZmjVlS4cyQWRNDLqIIU2SQlKfOG2mXVeWMM0AgXHRGD8gcy/zZ52GMaVfLi+tLBLy6uqiqypAhtM7aOTSNkBBhdiURUd93b9789Ofvv//DH767vXsQhXaxePHixWaz9cEbY8+PPM06aH2C0J4HR3gO0NFZia9PIdF69lbPxiBk5hhjThFR67q6uLy01tF+F1OeYlTVULO3BkFiSh9fEAKgMmpWFkUBsnNXnohEDZKSAWvF+dC2S0Cqm2oah3Hox/GUc8Inl7gPVdO0U5xKKSlncp58sHU7J+wKF0lRcwE5lTzFsZvikOLQVm7b+q+eXfzVL74cxvLdd38O3syBaXDuL88x9gCgzPPQAVGtwcpQjeBB/cdunwpPcSxlAtAzTIdRCfEMWqHPNyP4+Pp/3s76uYpLEAtCVuWSRy4ZkJEsAhAGS5NBw8CqmTSX2O0f7l7/6Cord+/WCJCLDGO6e9iRqy5unjdt5Q0yc2H1VVW3bfCeOb//cDt0O5HJe/18bmEVVERyztAs2pdf3fzrf/ft//J/W3717bt+zN1km0XVtDZUeYrzrFJVpXBOqcSEyIjAAjL15fG+zqlZrC9TVkR0hkFnwqiKWmMNWQTsjsdXP3z/5vWrD7fvUoqLRfPs+Yu/+vVvr6+fh6pdPatxsYzTlHMunJGYEQWRVVQJGEFhHIY3r3969cNf7u5vn798/nEujmckCTmyliwSoTHOe7EuA6pAtsTGFmsRkUlFkRQI0KISGiI16JwPi81m+Ytf0mo9HE/x9v1p6EOcsnHonCEBFSNgRAyDsxh8WLi6LTClcZrSmNOBM1p34erl5XX4N7+e1qsPxzgxhNoTp6v9RfumPX037R+TJ2SArPMYCj4Hq+2Pp//n3/7Hv/8v//nq+qZpalZ2SAvrqz7bu32bxi1yMMzIBcucKzg7uhUUASeQnfKDpsGbxfXWtZWy2ON0f9e9icOOFs8BVoouq+siAeacC0gyJoEKUAVYqboz/AlVdFI8Wpwu2ubffF0uN4cPj+k0oZBP3Hb5dDz8t7/85f0ff3eqzSFNj/f3/3PM/adsCxVQJSRjqQgY3B8O/+0ffpdS3mwvbq5vFk3DnB8fH3JOq9Vqs/WX19fHw+HD23f73cP93d3799uu7zZpnXMuhWcTgp5TMeg8PhctUkoupeS5/TLvc7OEnOYzEhEYmjv0szAwpxzTdDp1u93+/v7uw+3t/d3t/u5DOR6qOCCSodnw+dFmWabp1J8eT8eH/vjQH06FsVmtqWoUqTDnOBHC/tgp4OXFdrG6uHn5TbXYxJSEhQiscRncYShJO0CsA60XXmHhfB08qhIrKQIY76vF5uJqGIb7u7fvA+0fHy4vVgZtLjz0w263+/DhNuXUNE2o2pcvv7x5/sW3v/ilMfbq+mqz2ThrgQucY7gR5pWcPznGrbGrdrNdbhvfEECKY18ygQBElX7ZroxtmrrBKhhnDIEWTsNXcYpjOqU8smQtDMxtXd9cr2uHMo5jBKivy0YmmKY+pZTvJ3kV4x34sNxcLSGNHcXelr4ID6ys2hU9Fe1FBsEImOeIWeuNIU/UtFrVzXqzvbpaX22Xm1XT1FWovDNkzc+1DnLOG5/7MeR8FaoQ0Hva7R/u7j7c398CZyLa7x5DeO998KGqQlVVdV23Vd3WdWutzaJxiqdhDKEO20XjUDRyFk1ZprFMhzx2KUXVgiCzeUhUtRRQmb1yilbJnEttsuITqBXBmHnMqZv6TeNvWlfZ2tQLD0YTuGrxsVZWmOGZgoBnTwghKMwTZy7SnbrbD3c//vDKWrNdLt1iZQhKyW/fvTnF8SXiYrVRwVPXf7j/sDseh4mzAHrnfDVPGJVVOfLcxFAAgFJUQJQ5pdyPuZvSmErMkouWMpt95lz3BGXcLP2iWW+XdR2sJTjHDuGcJK74qT0GxrnN5cXLUtqq2aw2IQRhJmsAgJlVSRWsoVAFZx2A3N3d/e3f/oe//2//7c2bN4fjSQSWq+X97e2XX3/11dffbjabuTAyxuKM4Z5rlTMO5qzqBQARfvrqE9YX9NyIBcOF4zTmnKpQ+eC3FxdEZooxxkN37E6gdTU4Y1R5s1jmJ7y9iKaYpmlkcmoUzUysQaBzNvA8BKjJWhequs55keIYp3Ea+5QmzhkRrbXOhxDC8XhEY9IwdMOQVccphqo2LtRVvWgaMG447Y+Hbre7z2n0zuQCwjRN5e7heOrG+8fd4Xg49ccxDsKsT/c9zHkYoCXn7tgjWVVy2FhsDdT4qfBQPZfgGUjhjG6Y1RAMcyn4pJ74l9eTqmL+j7kdpiA5xVERATJgAS0qRVk4mxwrocSlzBSfqZtu3+U4PN6+/r6pK2MtoMlCYgw62y6X1uIMzU4l7x73KcW2aZx31oXNduMv6eUXlyF8GotbVmDBomRD0zx70bz8GrfXMTSnfTcktr6qmtYHH50lUgVNcZqmIcdYSjYG0OA5TDtFiNEEZpmDVFBUlJWZ8RxTLMfD/t3rn/7h7//+L9//8e7+NpdYN/Wz5y+O++O3v/z117/4q+V6UzULsh77XqLCPBTImbkIc0rTcDrcvnv76vvv3/z00+nUFS6fH8gI0RBZIoMozDnGOE4lxr4UYa6IUTkreSKDEAhRiRQJhEBZFdCEUC2X64vLK1xv0NjSn3pnB8SEJECzllTPDCZh0fmvNwAWgFUT81iyBSSBKlSrZ9f+4uLR7VMstvYmJyoDBS+qzEJKhgwDwlMT4eMLmVJ69e7DP/7xL9uHQ1MHVa2sXdX1QrAe0kp4JK2JEViBGeR85pjLakBR3IN0RrBZXD17IYtmOHU57k7m9IB5D6UHaYCAUSfOBB3Io9EPVjoURGkELwovBCxgUtypvlN6pU6NuMq41j4uTCYb0C4SBGP77vDq9u7Pu9u901NJ3f744ouvUvoI7jO2qsJyufD1GNIylnI4Pe4PP75+892fvq8WrXe+rrwxpqprVedCs91eGrKbzUXfdcfj8e7u7ng8juNFKXnOC1I+R3shzcTl87nsbEd6as7hOTEE549tHi4VLjnlGOM0jv0w9H2/3x/u7x9ub+/ef3h/2D2m/lQJP6tc8CGniGQ/njqmcfrh+798989/ePvm9vF+3x0nQWuddURovQEAQ6jCOZecQMG50C7WAobGiVmss8Y4Y61xFsgAWesr62u0lZJnVkAiY60n51NV18vVKoRqHIbdA+4eH/ePW+dCzHn3uDseDzElIuNDtVyt2uXa+iDMiKaqvHMOEVmKwtP3Lk/n+89WIkfOoTUABOqMGMNkispYeog5k51s3XpsvaGakjGQfAgIPvjyNGYzAN67ZrEw3oFCNNXUPI/RHobDMB5SOe5T3mUzel+3m8aZyRo2Cn3MhY8iRuBUsGOcFBMaMQZdcKF2oQouBO9CsO2iWa83m4urzXa5aOtQe+eMITD0syVWzowrUEIwZI2vmlBX1ns49YdpGk+nPakS0jROxpACEBlnnfc++CpUddO03nskVEAWbdtVf2w4TsMYp1hygTGVNI2cp5Im0TLfdnMfW3JWYVVRRCUHZJEMkXFkgdlaB8ggLrMZJ5C8WFdXTR2Ctqg4pJjK5yGj8KRUxfM2CXOsAiogEc2BSt5aJARQJDBkpjHe3d36/SM4e82lDouS4zh0fd/FAgzGukqRRJFZpTCCEvIc83e2ipfCOcUpDVM8TbGbphhLLnyOVhNWYMRisRhY5LxSEIRz8t78vf/LHc8Y065Wl6qb5XrZtFUIRGgM4Tz0ByBCa613TkQeHx9//PHH77777i/ff9/3/RTT3HYkUB/8zc0zxA2R+Wjk/DjTmI3Zsx2b53rlE4aXz2i7j0UMIjOnlAqzsVRVoWlaY2xdnw6HY9/30zTVVW0IhfP15cXHjB5RSTnHmMQoGsAz5edcrc7figHwM4vcO2bPVch1nZompynnpKDGWEQC0BlZPr8QEck5KQDmUmYTAfPQ9f0wTqkIg7FmyvLh/vinv7wZoumH8ac37/eHwxjHUpLONYzKWdunArMM+XASIFt5VGuxshQ+K2LgjN6xahySkjJIYdH5ScKzZ2K+986Nz48ly9NHfRYiPf00p6Miqs6dh5nZkqWMZTowTTIHuwFkMR0kTqd09FXwxjk0rqC1TbPcrK2fNQJgjAHQmBL0yiLBOzLWmWxtgaeAmnMRkxgEqBhv62V1+UzqxU93D+U43O13x+PBGNu07QxUtaQqpe+OfXec4sBS0FhCg+TIBhsq66v5QyIkUJjRbSIyU17HYXj9419+9w//8J/+w//x+qcfWTKgkqW729v3b96+e/cuFfn2l79eLFbOmExWFaYpjmNfclLOqHw67F//8Jeffvjzq+//vHv/jgtb88nRg4gG0RpyZFA1j+NJmHPKfb8bBy1JQAflRrkBF8g2lpAciUoSnOP0rKmrxjeL2gWeqzNAtrZYl1RTETYCc2SaqCq5wsdpdCyGoeSUOJdZoCPKsUgqBGSsMQYUSoxS+u50d//u9v7x0A2xaGVmVSDR3C3An70WG8iEwhCTGsICME4soBOkiXgibpCdFKMMovgkjgPQohoBjmS4cu12/ez5VxL82+GnnUAi6A0egI9SlhoCkCpG0DuHr4x+Z+MHypGgKfrFKFdZV2A7ke9gei+m52XTnfj1h3qKfXfinB2ZIWrapd1+fxyG0xT7LH1Jw5Ri+kQgJWerzcX6+cu2bs2UkvWuejyduuPQ/+1/+c8Pp2Mp8qtffrNYrkPVPjwcAa13oWnay6ur43H/5vXrh8fHw37fnY5wVuSl2Yo8P2CGyJ5xWdaQM8Z8xomYXUpcCudSYorjFLth6E6n4+l4PBz3x/3hcDzsj4fDYX84HU9HLaXxpmmqUDcVcE4JjYUn58jhcPrb//d/+n/9f/6jCAhrKQpopn6wvjLW+yr4ytd10zjXWINSRBQEECyhQ4ehWVR1U9WN984bvrxYPHvxcnuztVUYi4xdZ63ZXLRNCDGnuqnqJnhvRKTvh8eH3fvFvXF+nMbbuztQvb663l5slqtV0yxMCMY6nDe7WVApfM5QViJAUZ1zyj5ulipSpimNYzba1K5pfd3U7Wq18L4tRfvHw/EdG1NdNG3L4h9RuJvaqYSIRqlyZuFcG+qFqUKxlQ2VNabYcCx47OxBeJo6nbgvXGxtmmXbrp23HnWCkmI3lfjILKq9mARWjSUffAiuquemSFst2rZZLOrFslmsFu1i0TRtVXnvrTWIM3j8M9vETElgEEBAa62Zs9OdtxK8n9OnVOZpJKlCKUUkRRwRaZ5QzBcZE0Jo2xXPSURFdo/7lIuvW1fVznsAYR65FDQGVEQKaEZhFS4sDFDAChkk54wFa4xhKQA8kA2aTGROlAi2PjRkm5LLvj88HvtcPoouYXbcqJDSnCRLhlABhNl79+L5c0X03o3T4L3lkoKvY55ev35VAJmwMH/15bcuGO+NNTBOUxYSQAaXucwmdANqiFVzSjmXnHPMaYrTmGKaUu7H8TR04zTNQmVJLEVE2RDUwYBuLrbVelVfrttl7S0pqLIqf16IzesYka+aRnS1Xi3q2hpryRr79E7T3DJzxpih7//0pz/+/ve/v7+/R8Sr62tEmMapMOec+u6UUmRmM3Pwnth2zPJR/iJnOK8+FTEsT+3pc3tGBRGNoSdT39mbba31PlRVZYzph+F4OIxhQATOaf/8Wckfc6A0M8fMwISUyYC6M5QIEYlIVUnVnP8+BSAEZ4i8s1zCrEkGhJzy0HdxmoiorpvQNFXT1M1CAft+PB2P+/1OS4aSUaVp27mOPU7ld3989ebutGj/VFj2+93j/phzmXcb4dmGqXi2Q2Mq/Lg/xqKr7VoRCZ2lT4Gp8xsGAC64qq4sWM489BPP4e9PziT4l//+SQhzrlnx/FJRAfDchVMitNYpQCkiHDMfzxHeZwGTlaIWaFH7RdugsUImCSpwGo6gsVk0LnhvrJ2pegrjOI5Dz6JaBiqPbajj9NFpBTazAhpwXkM1gXm3O363/90IKMpzwzSE4Jy1lixBTnmahmnomfP8Zhnrq3ZZLdfNalO1i+Cbtm2Nfcrz+zi6VulPh798/8fv/vl3b376cfdwX9feOMpZ4zQdd/uS82pzYa19+fLrUNXjOA790HXdOPbKpeSYhu7u/dsfvv/T6x9/eLz9EPuTV7XG0Gf1PyEQIuFcFfJYcpqmHMe+FEEtnAeQRqVWrsG1INlqBqpIHSgICKFz3vtQkWVjG+fHECgEsWNijcygojBTJMUqSUY7DcQFFVQkISuhI2OQMvMwDP7+IQmnh8epH2Lh/njcvX97e3vfJc7Gk6vU1WAMic4jj08PPyJaD9YrGgFCIFaKWcAABGcQB87C2Uk6B3EDzD8rSgGdVDN5Ck1oFqvlGkI41bvBV6N3Y+Rj4aOWC3ANqBFlgoI0Wdo5fGd1b8SlckrlecEbxAOUf5D+nShMvLhz4+9Ds25LnlTYIDYZLg/cH7oujZm0gBbRzFJ+duQn8sHWNVV1TW61WiZmtLYfhruHR8DvV8t1yvmrL19WvppFuKfjfpqmOaQmpnQ8dfv9seu6EDwicMkzXmUmgoG1xpiPq3/KOeeUU8mlpJxSzimlGNMUp2EYu74/dt3xcDienv7pur4b+mEcp5hSqpxdVosqhKquvRRyHumTly+O8dWPb97/8QdsWuf9OXYQBmOstb7UtS5q4mQ0GSmcJyKXMkgRBD1HXKBBNMaYEExd13XTog2nseQ4Tf3Y1H6NxgcbqhAqH4Izlph56OPt7YOv2rptBRTQ1E3YXFzO1mvvAxCRObv0Z+naWV+JT850maEPP+vEBGfbxq9WzXrVLBf1YrloV6vKGDdOp7t9393Fkskv1iFvmvfexhqriX1BH0s9javULabD6hQW1bIKdfDWliyH09A/7tOxy+MosfQs0SEDAhhrfbtYkaR4ehzVRFEGKuTBBOsqX1Whrqumaepl2y6W7XKxXCwXTbuom0UdQrDOOmecIzvDiv+7mfwca6VCCGTJeVM3TdM4i+y9N8YgIM8QeSQAeFKJzrvg2cRGhIjU1A2oCvOpO6VUuq5XwEZLTWLMLBooxkAI3jlrSSuPdWVF+NSNfcxd5MQqSoRoSEGmaYjMrpIG0MWJOdimspv1AtjGKU0sfcws/30F8N9dM9GNiFarFRpThdD1Jy6ZQImwcO6G05jy/f3t9uLi+fMvKl/7EIL3/Rg5pily0cjQA1gLSFBQE0tKOadZ5JjGNA05FVGNKXdDl1ISUBGQdEaGWEMGfJrc0J2Gvis5i4r5H7RgPi7I5J0L1htjrDEheG8dmbOFx5yDHg0zH4/HV69+ev369TiO3rv1ahW8m6ZpGIau71KM0zTFGI0xgMhFnlh4Z8nKLIqYa7+nsoafxLyzE1tnrbJqnsbxeDzGOALIPJyy1jrniKiUEmMUEUTQklPO+tm+XVhSKUREBhUIiIwYxE/V9CzXIjxz8xEtzmRb68ocN68MCoZM5cPFZluY7dxzDDU/iQr6PuYYteTg3WK5nNfAVPTd7cPdY+d9ZYwllJQLABhjlBlQVRhB50hjJQMK4zgogA3WWMslz9iyT58NKiCEOqzXy7Zq05RLvss5ARA8USmeXjv+7E89XfoU6gVw1v2KKiIgoXeuaRsFGIYhRtYy6dm2YgBI0BUSzsYoVwZd7dD6jDTlchwOOZnao6tc8K4OwYcAoHPTfBhHyUPQlJN+PoGxhdk4Y+uqGPP+8fFDN/7hzYdR9Prm+vLZ9erqev6QDRkiBVAuuZSECGRIkVzVXD57cXHzfHNxXbUtAV1fX3nvVM+DpPm2i3F8eLj903f//Oovf+KS1st2sVxYZzKXMU7dsX//7s0ffv+PCAiKq9W2O/Wnvuv7Uy7Jkvanw7uffnzz4w8//eXPh4dbSZM15JTtuTs3v736sbE408xFObKkHCOhOJty7gufuPgy+WRaa082bJy7cK6x5NSos0TGAHkgYx22S1mujlUFxuQok4hRVeLEuQirwCCFDIDkQmQIogGDpgUDQBH58bgbf/9drsKxO+364WEYH7vueNidpimSg6Ypi5X6GlVJlNAgfOoqKaASqbHoPDoPiKwAIs6ZsK6rUlwpmCadCwZEUjQwj+hFQBHUITTOORfQEHq/WK37TXfoDnHoj4WPUkYsBQwJVoIboUuwWwr3Bg6URioPqgYwkD8gPIh84FGP0cbu3e37YI0DIRBVWTA+Lx7JjgGwtgbQqIBB+XkhX7hMKRbBeQ5fN431vqrrw/E4DMPf/df/+uHDh9/85q+eX9844xHg9vZ9jDHFqZScCo9TPBxO3akPzllnCxLPEj4EQ3OKZCBjSuFpmrqh7/qu67quG05d1/d9P4zDOE7TNI5j34/DOAzDME1TiimVXErOhUsRRGNdVdd+u1isl20dPGY01uJn3T5VLVkhs+bCT64lAlQ04ApK5jyMp+PBGuut9T6E2oeFDY3xwRjgNAwlT2Ofm+AvGpE25XI8dYfdLk+TIUSzYlEkst56b6wzSCDCYz+8/3BnfXX94tn28uLm2bPlYtm2bQjBGDu/1yz8OcHWWPv0PcM8+vzYnpq/7qy5uFx++eX1zc3V9nK9Wi7axaJqW4cEU0L3Lr3eTzlXlXtxjf/mG3+xmKbc59Krwt29+cMf7Q9v7PvbMCQbKjDOgCFVKLkYzktOyBxZe7VDTjyN/dg7bxeLJWi5u/swao+KZJ1zjQ9NqOpQzeaVtmkWi8VyuWgXi6aufVUH76w1ZAwag/MJ/gxv/9SJUUVg1KJqQNEaF3zTtovGoebggjFWVHPOwmwKExl8ylsCANFZBDA/OsolpTgIl1wUyTZ1Heo6tAvrAxGowHzLbdaX281qu66vLlfPbrYi/Pbd7dvb3bvd8XAap4lBpDLMeTju75Fks91aqtMotN5crBfPnl3l2PfDBMaVzzYHfLIKz4arJ3wUqoJwAUBn7cVmvV4tY5y67jj03dCNiuq8zSKl5NkPGoCCb5o6D2M5neLD/eOxjzGRCBpU5ZRTVzjNdZyQMucyjQrqg1fFGIvMs1JDEAyAqIo3tKhMYwHzyLHnkkVE5l6B/myHe7oNwSpR0TRMiWixaJqmnhftmT+CCqo8jOPD4+OH9x/2ux0SzXDkyvumDt6ZkhOITMM49IMx1imUwrPoZR4ozYSz/66IwSfFrz6pZpg5TnEcpuPx+PBwr8qb7do5b5581DineSCpKgI655z3n5590Vy4ZDZOgATl3OMFgLkN8zOl+Xx3zTs3EhAaECRiRu/CYrGsQ7i82BY52wEKqwI11eV6mU7H+nQ6dseD8261XnvvD8fTMEzjFIkY0RpjrXWgwTsnxZKCUBZSA+qtQaQy45dyznmcJouE09inOIjyx2+PCMBBs2gurq9eXD0fu/F06Ia+e+q26Mf65OkR+/mv8Enqi4Dz+z13n4yxTbO4urxSwof7B+UuSxIpsxMDkIQ5TTlayIPH2rXLplrWGKpd193d/iQqV+sm0Kqu/Ga9vri8ANCH+3uRcuqOCLBdX11ur539DHYnygSK1iaVh/3up/zw5+9/mlQLFxP8YnvxMWdYSiwplZxEChEhGbTBV81ys91cXK3W26qpCWm5XNR1PRe288VcxrHfPdx/eP92t3uwRKEOzloinM8rADL2/dtXP1a+Xq83JZcYyxRjSrmUnDUfdru3r1+/+emn/eN9GofakTfOCc4o959dT6UMgMJ8jgA0IYBBRpKSi8jEQsxDyZMpI/tJQ2tMBWgMKUJrbQjee19img12WbQTDaJeCRUTICMKKiFEgAnUkBqDTKREBFgUe9A8dPjT64hwmqbHcbodx0OMqZRsLS4Wtl3mulHjDLMtZZ70fv46RFEQFQmIFHCW+am1pm1MKbw/KgGDPsEqZn4rPPEQ0KKpra9CtagareqT82iMIE4gR817SB26CEYArJpacFvgBdGohoCGTJ5pfrEezIZc1AKCOhTpDiTqVRAkgzLQhAGbuoRanSE1xhgkgs/6Y8ISp2HsO/bKZIPzaJ2o+BCQ8HTsHneP4zTFlB6fPz67ugnexzQwF2Mti5RSxjEej8fu2G3XK+PnqRuKypnPChBjijEdjofj4XjojsfTseu606k/nfq+7/txnMYpzmXRlGYgTZkjoOdAOEBVsMa0ldu21aapF95bRP4XWiUAOCcx6VNL9gyoEAZRLpjwKfESyRgffGiWVdNWTWtcELKAVtFwappwMY3VMHSluMPhyCW3Tc2iMWdfjLXWB2+tcc5VdQWgPlS+qpqmWa02V1dX7aJBJMKznHBeeWaRO50XzycP6jyaOLtHPr0Q5931s6v+9OXVzdX2Yr1YLKq68VWwZLTIUNRtfzqk/qEf3z+kZytGRDJojDZGRs9WB560f4TTBB7EGFBCRECF2gB4I+R6dQNaPhcKgoba5ZJIwFVM3lhrQl1Vi6pu62YW2DZ107TNol20bVs3TRWCdd5Za4wh+/SDCEGRfv686Jk4J2gQiYyzc6KzQTN3Yqy1WFUqaoxVnUcSs9xBno6e8yeLwiWlyCzMUDV2vV4uVhsbagVKKWdmS7Zy1aJZXm4vv3hx+fUX1998/UKEF81rG96JfSB76rvIOXrIE08lTapJmgocUQGUolpUWUAUEaxFYz/fMT4KJp7EH8pPtKM5PNU7Z72Tpq68PTgrot77qgoC6KxDwJQKF/G+quvi/YCq49AdHg/DpMxqCYRTjCdmPkele6PCZRoRgUARDbCgqHBBY4wj520INarmadjvDxalrasXz06LxWJRVdbQTJ/7fPQA88Yf89AN0yBcUtO2VagRxcyOLgAFzan03enx8fFxtxumKVRVUzfrzab2QYEN0el44pJ3u8dQ1/OCX5jPIsCz7O3JhDSHVJ8nRWVOfM85l5xTSimnYRi7Uzf0wzgNVRVWqyUiggKzpJRnyxIohxCaqqqrsN1unPskIJ0PA7PNXQBQhUWgsJD8DzWwTxoU/ah6JFV06KwhaKxBUYklTTEO41SKEM3ww4pLKmmqq+ri4sJ7l1OehqmkKJoISYXFOeEopYAoATpnQ+0b79qmYtZd15/6aVY8lZIVteQkJX/WiVFVJUvbi6svvvj6my++Ojzs//L9D0+PA/yPBE7//y4B+EjTUQCy1i+X65cvv0JjlJEzn/tA599M85y/JMNplDyiJNKCwMIxjidE9ARtcHVwVeXrKsxMcxU+HQ556m1qj4fTR60SnAWpIAKSSjycDr2AIfDoWLiUgkTOmKqqvPeH/jiMY85ZQclYNM6EOjRtqFtjXUoJEOu6tta1bbtYLObK1Fk3jt2hO+33u2HoVDhUlTVmnAZ+Wl3rqlLAw+7x/dvXz1++sNY7Xxsy3nvm1Hf94+PD/e3tcf8IUupgF8F7BMzZndVtH1cxFZhJvAZRjbHWBQ/ilQtntpFTKjlxzoVzEY2STlweUgpEgbDVxXKx8sHVi9ZWIT/cD1039SNPaaeESDWRNQ5wjnNEZ5wLlbVWCYWgoBaFgjiKHpS7HKfd2JdySrkr3HEpxoTV2jcNLlr2lZJlBAJSS35WdHx24n8aCqiq6uyPtQTOgveFcA5RtURWrZnVZQokagBRFBUtWkN+Hern64sY/Dv5aRzHKU5a0l5xR+ZA9gJpAWAQbJG1yC9iWaJ8pdirLRkMUKOyJgq4HF2LziphEdHMEHMRmSyJQYPUeUKk+Vg0T53NZ44eLqU/HLr93qwMtctF2zJRKtk6ayx5Z8ngNE5//tOfd/cP0y9/dX15aSxaOysZoZQy6Xg8HE+nEzPPa70+XYgYUz4e97d3tz/+8Or27u7Ud8MwxHO5MlfdcxJROXPJmed2MRKBKrCAKgE0lq6W9eWy2Va+AuCcc0r6c7I9AJIBQASDaAhUZkaywhmiCzpH1FvSWWc1DcPBOFtVrfUVWU/GoXWS103gY233i7ppGuXinakrb62ZptEarStfVbWxJlTh6voaEf/qt3/11VdfX1xdtsvlHLg9a/POCkk4V1YAIPAky3s6g87Cb9GfGWB9FV58+aWorNaLdtFUdeW8N95bYx2ZzTRtv7x8f7r7hx/f//4v/e/+iM8v7XZbX67t8xXlWDqexEXXlEakcs4RIqgBJQVLQA5H1WPmnsD6EOomBF/VYbFeklHngnGhrpu6XjT1omraqg51XbV1UzdV2zZNXVW188FYN+/mSAbtk2plRtoZ+4lye/ZIqMqciUlz60IRyTnywRtDVVW1TeOsA8BpisfjcRzLHBMKqIjyxMKAuWcDaLwPy+Xq8vpqtbogW8VY9vmYJRGQAU9gHIVlu77YXl1fXIvIbjftjmnfa8qWdEzTaHgSOzgbQNBR8MajJ1R4fLj3waqUbujRGBeqjzXZXKZYa405Dwc/Sj0QZyYZZwARJsK6ahSwFFksj027QBNXy2UVQoppmpIPoa7PQmgEFsmzmkSNRQNkLVlnQ+1c5bwFKdlVoMUaq6JWUirjMHaIWrVt02wvLjfTmP7007vx1K2XTR9hffGsaZfuJjhnhM/xXfJZT4ILH0+n27s75rTqFi40ouSdC8FXIRhjhHmcpsf9/mG/O/U9K2xWq+328vLiInjPJYqA93fHrn//7q2qLJs6OCNzEQPnefL88eMcU1A45TLFaer7se/606nr+q7vhnEYpmluwxLiYrW4vr4upcxEYNXSnbrj4Xg6HYTTZn3z7Ppmu15/8+UX4Sk+5VN5NpPeCRRg5lXhmT9FT2XcfJCebyjCmfQz0xORjCNvvDPkLCEqSxmn0Rnq+mEchhgzl0SEdVNvVuubmxtr6LQ/HECgTCkWLSn2jghBhDmKsCFoq+rF5ermYn15se2H6Z/+9EPfT8ZYNAbOyTr0kU0AACrKuTjrXr746te//usvX3zxPrypq/pTD+Zni96nroz+i6+DgoIgkAAqCJJx3q83F9/+4tfO+RzLOE7j0D2VHAqgBCIICAKaS47T0BWQ1J92x2OKU9NUizYs29p6hyJj33EpeRrH7vT+7ev7D2+/11IH97/97//7pyKGZ4s/lzGlaRrFuLauCrk5yQIAjDEhBGvtLOpmFTTGGmd87eq2ahfWBVWIU0Si1Wo156R573PO8zoiIl3XnU6nnDMieu+tsRyFRQCQgMhSKTz2/f7x/uHD+7Zdbi+fh6q2aHXQrjsddrvT8ZCm0aHW1lXeOVAVof+u/p3Hk0QzDmkGKAKBU2HxYgPnnHMsKafZpi8lyv+PvT/psW3b0gShUcw5V7H3tuJUt3yle3h4GURkRgMJEiRQCqWERINESlAiGoBASAga8DdogUBkBwRIRChp0EK0KEIkmRHKjAgiPNyjcPfn/op77inMbFermnOMQWPMtW2f+56HlNBK6a177jnH7Jht22uuWYzxjW98n540M0BE2HB8tSx9Xqa8NDnkknPJucgk8kFlBurEkgGbRrRIrGBRZRFkNQOYTRaDyWww22s55XJe8pBlFJvNFoTAvI2xa9qQGgyRXSgaQQmZEenTjP9CT4WLjgwBUwFUhZOqgAWiSBSZGQnMSIy1kCkCEgCb9qa2Ns+ZqRaZJD8qvEW8RSTUM8QOSFVm1SPoAGpghIBsGfVoswIgaItESEZY0MQYjQWRU1iYCqpEMFbfUcjhEbqmwVvOJS+ZzFzqP4TAwhw4RCbn2RX5+PQRRA4vX++2m5tm1/ddatthmEIMZcnH4/F4OolICBxCUDVRAQNROZ1OP//FN3/+F3/xp3/6kw8fP855XuZlXpYiBRRVvc1fVcVADNBTNmf6ARgBxMBdiveb7vP73V3fdMRs6zd9h6uIwIEgMBDWhkR0EL0eM2CA7uChCGSqucjMgdAKQmFsCZLaLCNOp2Y8b6bxrkmBCWIgJlTVeZ6bxJu+TU1qmub27vb7P/xhk+IPfvCDN59/3m+3IcbrnUZhLYesOMuFHO4RjZMHXOr0Wqc/xvji1aul5L5vmjbFFDkEImIOMYTN7e7NV69/8f7bn/z828f308NTun8fXr6gN/fh89vAxo+P5WEuI2mJQIEBkb3H2CgjjGAHo4PCgNACMnPTpKZrY0ohphBTSm3X7Tabm77fdH3f9m3XN5u26/tm07dtm2JKIQYOHJhDoMsvp8CbO2X+ZdfK9BSRglZKUZUQ+ObmZrfdxZjmeXl4eDidDtM0LMtcJIvk6stjCGJYlAPE2PT9Zre7vb2959AO52k4TQsIISFgWco0Tqfj6elh/6Fvzey4P47DJEupIIsBAIbQ3u5eIupud9ukTjU0TfN0eCqWc55Px2UYnXjxfDtY60nPIRoAABCuWJeXBj3YaZpms9ne3Nze3t6FOHVdDwin00FA+3675Cx5ASuBLJCAjpIFNAABoMWYNpsNM+d5yvO5zKNpLkBuxjXNw+l8RCJKgZi3uxchKTcf8xkna88lHScb5rWLypuev4PEmOVlWZbFQEvRYRiZ9wjQde2LF/dt04jINE1P+/1+vx/nGQDattvubnY3t23TlDLP09K23eF0PByemibN06ilgKnK6p2kRcUzjjLNyzQvwzyN4zieT8PxeD6dzqfzeTiP0zhOs//Xtg3iZ7e73TJN4/m83z+WXD5+eH8+HZoU2pvui8/ffPHm85vd9v72NoRnSlwVsYB13Zk6nZZsxcwAtHoioDkxxnMJWBUAq7AVOymIGRIGJgJnvaqJiBSUwIht07UxBgLgqkAkpssyl+zMTzMAATNlEInMnJqmadJSHDSlJkQKzBxEykVl7TKfAlHfdS9ffP7mzdf3L96c9icOV1re/7EvBzIVCZo23d3ffe9732+a7uOHx48fPz5+/HaeL1+pAICAKjJOw/6Ak2RIaQY6TuM4Dsywf3psYou8R4ocSEoex/Pbb37x8P7dw/t3YPL+/ducr4m9yGZgSx6WLGYpxvu7TaEIHCq6Wu1mgs9KJGJKTds3/bbZ3PSbG0JyIn/TNrvt9vb21vfZUoqfAtM0nU6ncRwAIMQUUmpTE2IoIkvJZclLXiSXvMzD+fT48f3d/Yvb+5cctwqqKqfD4bjfl2UisyZwEzkSsekFUlyfDOIaviAiBkZiFyKPAIiGDbjqUSllGqdxGqdpnJcllzybkuk8L3o6paenF++/vVlui2ZkNqYBbdb8IBoLRoAEmggb4p74fZgTIZhltVHKZDqCTWCj6mw6KxQjBVZANUhiJcuy5DbkYICeMT4LOX9CAvf02TN7d3kiBDWYlizT/JDzohIYI1OKkZFADIqgCBqYFrIllnEZj+3+Y+h7ImxiRMRZ5SNoMlHN7+HcYyCA2XQCGQkWAg0EgYkDOBpRZFYRAQI2sSxKHLrNNqTWUhQiMR3JSjQiQLFLFf+604pi5NRwTBSCezEHisRMvKpTiS3jnGIARKZwc3P74sVd1/emsN1t949Px/P5eDopWEyxgz7ELCJLzuM0Pjw+/umf/eTPfvLn375/P05TCAGI1DVOAzOQKYuUvFhZ3YNdlavG6Ei7Nn3x4vbN3e5+27XMmqWUbMDGDLSKKa9bUIwRUvBkptLfEM1WYpsLcBXVqoxeQHMIqUvxZtP32y1RmJYZoWieZJkQNAZkjMxUclnmuTSs2iJiimmz2b757PNXL182Kd7f37f9xmO+6nG8ajTWqpi6EZNrV/kZjwgoIrlI8ZO8PDtyM4d+u93d3TUNx+h4HrmWI4p2KX391ef7p8M3v3ichjJJeXeUU57eP8lPmgZN50WXGcqMaBQFIyAjM2JANoOp6MnwAChqOC+dSGza2HSqUIoSx9T0fbfrN9tNv+k2fbdp+67tu9S1qeuaNgWOgZwJEzhGh2S8VrnOrO/kL5WqWJueVUVKmeZpPM/7/X4cJ1VtUrq/v7u7e8HMp9PpeDzs94/7/X6/35/Pp3mecs6qpgikiMgUmpi6lPq23bRNzxD38TBzRgBQGYfT+3clz/uP737x539+iwAfHx4fDsPTeT6OeRyzSmGCFNvPPvtB04a2bWJKTImYz+P549OHx4eP4yCiux9+/WbF25+3AN/aiMgMa/cieVd1Ff0ztSKChl3b3d7evXr1Oh7PIcZ5nqZ5ouPHNnWqdj6fdRnbiH2Co47LNIq6c1jbde1u15RlevfzP9t/fKeyqIgzwcUgi8xFUtunDApdbF60m+3Xv9HcfTZ2XXzz5qbZvgRKhG6Ci0h+qn4SxxBh03WbfrPZ9Ah4POyPx33f902KgbnkMk7j4bA/HI8O/JMzPlJqurbROO6Gvu9TiNN4zvMsUrz8pyrz7Dy38Xw+Hw6Hw+G4Px5Px/NpGKZpKnnOeXHl7qrS6qugZClUcp6naTifnh4/qi7zPP/8538xz+OXX3z25s3r7339vZvdLYPRFR9OzURN1IKtmnJmYEam8Kzxg96PqEYXcizUejU4nGEGi3t0GwTDFDDGdLPjtmn7dtM0x49PTwZGyohwOh1BtUhGAmIiBlU1UEQGMKsmt3Y82cdDC0zHeZ7m5ZxnDtR1fQgsZqMzRJ+dvIGYN313u7u52b1o21uiRo3tQkYARzY/WWC2bubXM3S9p4vAoBFDt2nv728//+KLttu8+ekvfvHznwWuaNYl70CwXJanQz6ejsqsRAW5mBWRJS//5J/80Z+2P0GIagSmoiJahuF8eHoCU0ZI6ZMqfxiNVEGKDFmyKACl1DDFfNW35meSuiKpK/Z0Xbfd9Td3m+1NalpmNrUQQte2KaVSyjiO0zT5K8zzMs1zLhmQmANzjKlJTVNEcJpBRyni/pN5HvePj0+PD6+noen7JedpGo6Hw+lwyMsCJgREYKBSU2X5BO3HtV0S3ZCI/A8MiMTAXglAULWm6Zq5G4dxnKd5mZZlEcmLwWEYHvf7w/GY2gYBOLASzmQZBSCzABtEsISYkBqkWAi9LUJtUllMJ4AMUAAEQZHhOa8CQBxFbFkEMZYcmAEREHJ2rzn9Th5z+chZLwwEouN5mqbxuJRZjRECQFBjVD/GQBVMDIRkCdNZT4fdYb9jjDH2XR+ZB7QDKoJmK2+BCMjAZrAZdAFQJCYOHFJIaJYlL5gXExFX0QMBC6HdNl3TAQUEAjEsAAtVVteFCHX9VCxES8liNGYldDkfZiZGf0xWdB4nAgjMIcabm7vbuxchUG0ZkPJ02D88PZ3O52mei9eGRJdlOZ5OHx4evnn79u23707D4EiDVqiFmAMTqSiAFSqoiACKiN6tI4oAgSES3HTpbtPu+jYgzbAoAiNSKMSR+FmxN6b4+s3ruy8/V2drlayi4GaTVv3qwTO2StIQb5oyKSaFAQKRMSMTmZhkk0XLAohS0GwCkM2mMdOSs4iEELa7bRNjSrFJiTios18QqapOI9TI/OLfW+F8IliDGCu55FKKaLlqGyOipm27ro/Ru0QR1cBtFSCbyKZtX93fffH562nKp/OwzHkRzQMdBg8CCZRRAcHIXPwaESEgqNFkMBkuBohQVAGx6fuQmvN5Op/HENJms9vsdtvdtu+7vu+7Tde3jbudNm1KyVkwHsRQjFeFJPpkaTzPssumq6Cgbhs4zzIen56enoZhWJZlmqd5mVUlpbTdblMT+77b7W7vbk+n0+l8Pg3j6BMMjIhTSF1ILYcmxKZtOzLa7balSM4zEbQtA5SPH98/fHz7F39BYDrN07jonG0qOi0CiE3TNOnm7sWb3e02BgoxhBhzyYfz/vHp4ac//fPhLP3mi2karhvHakSKFW2qdApyeVu7MN/8jETEJqab3fb16zehORWDcc7n4ZzzAgpSdFmWcRgOh/15/zgeH8fzuAhybDZbQNj1bbOgLstyGkZvOEEmpAjchBQDh3az2718vb1/1W92bXPz1dc8TgszbDualvzxYb+LIHOXUvhlFNYj/BTDzc3tzW7XNJjLkpcxMKlqzqVInuf5fD6N42BmMcamaZ2u7nBFTKkyMlXHcfz48SMAeA/R+XwehuF8Pp/P58NhfzyejicXV5mWZVEtZuoSl5G8HzBu+paZt5vtq1evbm93zDhP50eZihRmvb+7+frrr968+ezF/YsmpbIs3+FdqYIoqIKaXcSoqhC/47o1iKnGfGjmIozXU9bcbUkAQNQIvawXGCkABgWYc1EAXLKa7fcHlTLPi7mYBLOCgosDITJGhOgNmkuRh8NpfzxnKUvOKYVt14QYcxFTnTgwPet1Nyl98dmbcVp2u3uidlp0mJZydZhe+wpcH0ZuW7cuOLuIyFx0mijQZtPtbnfb3SY1fdf3qWkRGX7pEpMpq4EtlYaDLp2lOr2dZkIEI1U0EwNDNE9MQqBtT5tdz1e+7+GkqIAqMBWdimUxEFVQAzPVUgoSi5qI5lKyiPdOxKZr+s1md7vd3W7ajpmllG2/SSmpysmde7MQk6qUktXqonQ8jTnE1ASRXCxziUEk5JkmLXI87p8ePx4ODxTDtOT909P+aX86nfKymBQRKiAkgCJlzqWUC2sBK6WyrnkEZEIvqGONA3DNaLBtm430eVfmZZ6mYRjHcRy8g2U4n6UURqIYQ4hGkElzcOakkQIZsBm54behARiauPYToCAYECB5PEBIpECISMhEajLPUynZ4cTaEys6z3Mpz+18ldW+wrQIyIYBAOZynudhmqZZsiCCYREEqaQ6VbNcVYzKgsc97B/fDKf+Ztf3/c1u1zSNBhpViupZlQ0VQADKav6KSkEsgiZTBBCVrDl7CVoRCTGEyDCzJRKGDIBC9bz2wjAjMTFfOXIrosRYUuRASMhr2wAhcmhCiCEEEM3LpGKpialJu5u7vt+dTofzcJ6XZZgGeLS377599+5d37WXVmFvanj/4ePD09N5GHwmLPPislfO5SQks2LVq6CCE0oK4kGMipFJMbfJVaDAGAIjAlEsEpuGY8R1L+v7/rd+66+83x+fHj7u9/vj6TgNY55mEbGq04OeIq+FcEBkKXban60AKPabbUzOOCUyyfM4nA66tg6p3rx8eQtm5/M5L7OoIBEyGkJRJRBkwjVI94lygfAumlQe0kj2mjyrWilashYpUp7FIb1zMHAIhACgqlYUikjJsizTOCzDFJm/+uJNZD7sj8fTNE3LPJeclyKiJRqwQjGQAppB1dDU0MRMxch7pYgBEDjGbrsjTh/fvj3v9yE03V2/u73Zbjddn/qu7bu2aVKKnFIMKXBy8Dcw8aWKVFtZ8UIhfI5b8LJiDMD5ZIVMdCn56enx4eFxGMbT6fzu3fuc8/F42m53Xdu2XXdzc/fy5WtCXuZ8PB73h/3D09P+cDweT8QxNn2InVEE5BBC6OnlyztCfXicQ8Qvv7wHsF/8/PThw7vD4SnPExEBUjHOZnMWCmm73e12fb/b3b98EwKGyBxhmsepDEXK4+PDaT9raZdxuCAxZmBiqoaEqn5LutItKs8J1iYmACLESLjr2y8+e526/uE0DctxGI5PT6fz4TCOQ56n8Xw6HJ9Op+M4jVPWrCG2m0ABijaxZUyxfR03SBRDbJqujc02Njex2aW+32y727vmxV1/e9cktshpmjDnWebz+29O4wc8fLj58vWLr796c3e78VrkVaXPUEsk27Tti/u729uWGaf5MymKxMuyAGgpZRjGeZ6JKMa42Wy7rnMxGEMzVT+8AfF4Ov3Jn/zpn//5X3joUnsMlyXnnHN24N+L8IgGaEgYIocY2xD7rrvZbG93N7d3d3d3d/d3dyGGYRqXZZqmMab0gx98dX/38sWLl32/ZSZvf7pGlTz4kJolqOk6HRHBDSCfRQ2crlaVgl2+i7zqXF+QDCCLqYmaBsHgHVIcUttvb1SJ9Xgcp+nh8amUXJa5iAEFCpFQAIzAYoq7vt90Tdu1pjYO43AehmEyla5L/abt29TElMUIsMylTc3lufR991u/9Rux3ex2t7nYOB6f9sclP7sR/csuu/7DajUda29BCLzdbTabXkHn7A0U9kzbwwu1pqI47rGKRIoI1WDOchFXnHHVOz9MkBEBuza+fLl98eI+XLtYDzV+xMVAimpRDKpoRqhmpRTkgMycUmjapttypH6zuXv56vbF6xevPrvZ3W27PgQ2Kf1mc3d/17atDxZzVZByDUPm4KawBkghdJsNInpuMp1IS0kxiZXhfH56enh6/AhE07w8PX48HJ6G8ynnTKpFrLhglWoRLZ+KK1CNUtxL9HKhJ6X4/BkKIZhZ01hbmq5tuq4bxu50Op9OR1M9HA5NSoB4OBzcgtuFIAFBEcD1JtdcGwCB1l0UGAHRHACiQAEJAI1wRYQJFUylgNgaoaAWmZdlnpdPZDyuOtxqh6WBihRZShbkEGKtxKi41rcrTrPiuheWfDqdHj98SBQThuF8LjmbqRtGjnZBrPESZoMBQWGlIF7ztWdqCBoAkloQzfMcFBgJCIWRDKJhxBBiXA0Zr8QjiDRFTclCMCK9yN+6XgozIVrf59ubkoUju2gth2CGpWgu2SUi3n/48Pbdu67fsEsgEZ7O58enp4fHp+Px6LoyAFg7F8B13lzvQy5ukIhg6gCliCiYIum85HEYz20TQoTG75NQzZ0rrr2T2rb94Q9/8DiMHz+8f/j48ePDw/HpaTiepnHIc5Zct9GLSKjfvyos0wKGCFyytF3fiIhmDvjx23A6POVczIBjEMmvX913bUIw719lZnRtDGd4G3Al6OEn+R3AKvCgAK7ZrWYX+QxzkWj9lKfsB78ZggObRayI5FIW78LRmOLLlzeBadt1+/14OE6Du2uUotmqk7ZmMRFQqBUtAUEw9IluWnLJwzQ8Pj1OU97vD5ZLv9lst/1mu9n0XduHrkldalKKzFQ1CyuHl2PgEIgCOQbhYT0AWO1qvF77rhPu4CZKkXEc83z+8OHD4+PTkksRG8YJ8DDP5XA89V2/3W1vb+52213fp37TpKbd7m7vXr5+enp69+79nKXr+6Zp+37T9ZsYk7F0fXs+cylTLjLPW1M9nR4eH999+PB+mcaUAnMwZDGcRUJqmSQvd6YFQNUg57LkfBqPh/3D/unhdDhMo9itXEmN+Nr1kifwRXEaAEDB6JcQKN/aIIWw3fYFQ6FmWuh4yuFcKBQgKSZToWG2WZma27YNEVNq+v5m127vUn8TgV5+/n3qXwRuKKYQE4c2hK7ttjc3N/0mNk1po+TxULQsk0zTMo7zdD4PpwObfnjXP3zcD9Py+Wcvdts+dhuRy9tUkNnyWJZzWbagqWm7tu2WpZxOw7zMqmUcB9fJBQA3lw8r9Skw+t/NTETOwzAMo0g5nU7jOE7TnPNS8wdEIkoxxBj7GJ30HVJourZt203TbLvNzXZ3f3tzd//i5ma32WzU7HDcn4fzvMxN13z25rPbu/u+2xKHUrKU8l1cvCJkz4FS7TeqPT+68owRoRLNEc3AaD2wAcGkstnMFbQNijoyq55jiSGFFGLDYTKYp3mel9mkZHe+RM85jNHalPq+3W377WZjqmRQljzaBABNjG1KKQZmUrDAHEMIfIXENM2XX3y5ZGybNE3j4+P79x/ejeNIhG3TdE1KKRGRPHMCq9aOFBFVUfdwK6pi+NxS4F+YUgopFhXNcy6LaHkOfJ45hmJ15oJirYCbiZfkfalXJUH2aEIBzBC2m/7zN5+9fv1ZSlct1jOhMSKhAqIqlWKiEMwgqNGShYPFpt3c3N3cv4pd3/bt7u7+5ZvXr1599vr15/d3L262mzZGM42R275LTYp15gEihRBUtG/7JnWBAgCKAlLYbrdd39/elePh8PFbKqVkydN8npZxf3h6+PC+FJmX5fHjh9P+cZzOUjKpZUEyNEJUKojFHavWHdndUL2LITj6jESAgPYdBvBlN48xxhS7vt/tys1uPB63IvL2m7cf3r8HgPNw3j88yjiv8oROZGFCN2oBci4rs1sFkvsYKKKRN7+aQeGV40fEIRKh1qK9OOZVcp7nZZqmy7vCFUN2AQY0NYCiYCYIGEMIYYNmZIZWxStVRLRI8YNIHK3N4/z+p78YH45mcD4eD097m7NrOth3z8C17Uc1g2Zc0cKL3pE54qO5LDIKwkBIQKSBmCghN03bboMhEn/iNwSIEBOkFkMEotov41LdWLUZUmq2m+2yZDMoJec8lyJeGze1aZpKKR8+Pnzz9t1mu9vd3LRNS4TTtBwP5+PhOA7jsiwhBES6CEWISF4WAJjmeVkWL9C4vmUutUYOYKZ4muaP+0NAIEPdSEgREEVFrKZjlzFKTfriqy8G0/Ppy/3j4/v3Hx4+fHh8eDg8Ph6Ox/PxNJ2HeVnExBcpuJ8MABrMS5b94TwMIabUxKaL3b7f7z9SCPO0cIg3d3cm8uL+FkwRgBlTDCFGXlvWDaFa0zhR8AJJGF6eY5VARwBy+uMzR2E16Fs3lJX6CoYKmkuWIj6TCpgQQgipidtdCwZkgaFlntpmmkvvUrZlydO8ZI/xnctqAlXTS0suc85ZJI/67ttvhkV229tt29/dbu9f3N7c7NquaZvUtNzEmIKzcojjc2cxglXotlJaL1wgBIDAz3EMIhADB+AAiATIIuXh6fF8fHz79v3j00GVQmwpNEXxcJ6O5xHxIYbQ95tNv9ntbm5vbu7v7m9ub7748otxnnY3P3vaH9Wg77uXL+5f3N+D6TyNajov0/7wcD7vz+ePYPL09HTcH5b5WMpiSo72iWFWNemWSOP54bj/NrIqWC7zOB4Px6cPH96///b9eB4i933f9ZuO6Zcg90+lxsxgBTH9Oa+Bj5kZOnmxaeILaok3hP1m82pepvNwfjocPz482LffplzazU1IPYUmptikeHu7jTevQww/TJsvszBHMR3HOU+L5rnl4a4JDVMZz+f9aT8ex/P5eBzOQ14WLUVKFgJMMf3i26dv3j9+/cWrr754A9x8/eO1/VUNyrScP354r0s+jcPr27v7m9s7A1yWZRiHeZqenvan09FJSwBQSnFl3qZp2ibOw5kQpZS8LF4TXJZlWbKIJ5bgc4WZY0ybttls+pvddrPb9H2/3W13N7vtbrvtui51TYpNTKlJMaYQGABub7auMkxMqWk4RCJSMHS48xIyXx6AqUlRKSoVD0UzIsWKiZEnq15gQUBk149Z3yeioLjtm5Chmss8QKkIhesxLjkXNc/kiBkUvDFCRQnRS08hQJuCu2aiWWC+3W4Sc5uiijRNTE0DSFltXvKcFzG5qgQBc7jZ3d5sjwXL+Xj65puf/OKbvzgPx5TiD7/64ntfffH555+1XTvPsxQBRDMxkXmeT6fhPI7naT4cTh8/Pg7j6M7tl1JUKQLGZrQsuRRblrHIBPgp32vdt0oVQ/lOByiEpg0xuT9QYCIzLTOYEtPd7uZ7X3zvyzdfNrF9/voSKl5jYFRyKMVBHGRGRM1FYlZTDNx0HTB3m77b3jTtJsSmxqGm/pxUdJzGeZmYCIGsupfR6Xgaz0NZslX4gQDRzxFHabz50xnYy7IcD/t3b785n07Lsuz3+/Npn/PsO3sxQPU4jQpQAboegAvQUus0foZcPbyrCVkDQUTkwAgQPQUIYazCaIOIzNNkWYKxh0HO5SAOhLUzXtGgmiYTglslIyHi2myvaErglJ+qIFLpvJXQ+xyvXJFh/V48ALtc/rM4cIvR41NaM26toUFRNxaSajnFRMs47ufsVWQTjSHVdFUv+6SHWB7bVKRvLVAgrmKCWLsZL3Ve96UjUmYixuAuRus3Xb1vJOCIHMEHzUBVlchUzWr1jwM3bYdEy5zzMu8Pe3esXcULdZmX/f7w05/9IqbmzZs32+2WiA/7p/fvPz58fDifztM4+h50eb6ImJdspq42VERMFUwdkzCtk1YAlqLnOR/HcZMaJkzQEfNFWuVauI+ZN5vuxcv7m5vt3e3N7f3d/tXLx6fH/cPj49Pj/vHp+LQ/nc7LOC7zUlyLxhkroqo6yYQZmEMrTUgblTCPZwUYhimEFGIYhtPxcGibJsbQtg13ITUJ0VOdiyKbg7irYKatseYqkOSdCGiotpInLkDbd8rc3nStlYShVjW/gAiIkZlDbJpWezIJhIljO/VL1pxLLllyztO0LEspRYuKmpiJivPwJC85LNMiJSsiQ55HbbvN9sX9i9u7+9vdbpNSTDHExDGESOhC9CG4oCsy1eyAufqUE65LDcDsu91JhlZllRAMSQymZRmnJYsyp83uTrWkFAEgl1xyLmUZxvl4nlI4dN3j7W53enV6vbxBREMgghipmMXEXd/2295EASw1CQnmZXp6+rjfvwcr87yUJZfiRUxXrUM1EDUAm0c+PL775mft8emDms3LPAyn0/l4Oh3HYWKivt9ut9uue26xfl43a66/bmTXm4QfrzWMvTw734NSgK5jxB74Zi66O0/9/SFuX0xZ+t1tajrkQAQE0jTcdTEwB946J28apzJM87RfhidDPdlmYhyG8zic5+E8DsPpPM9zKcpmBBgBGCkfhnw4jx+fju8+HmPb//7f+P3nKaYlL6OcsJQyj9Pjw2Nquqx6Op3Ow2kcx4eHDz/72c/3+z0Atq2WUlwCBL2ZPOecy7zkcZrP52GaJpFipoFD23cpRpekaxq3Tux2m83uZrPdbvtNv9lstjuH+9rI1bi+2q0BAEBXOz8YAC4aBL6gqEqgXz0OAEa7/EI0RiNQh/+ZgAJcnGO8ZxGdSCoAYLUaiogEam6QYavbmzuJAiF6PwcRpZSs78yEEZaZSooiq5EuAzMkxhQDIooqIoYYWwQgEFUnKiiRiGaT6hB8tY8FCtvNTdd2D8NwOjyeDo/j+ZiXedM0P/je13/we3/1Rz/+0W67GYYx5wwAbq01TdPxNJyG8TQt795//NM//cm3796dzudcShMbDtw07d39fdf2UvR4PE7Tsj88noeTSI4xbDabGKPvY4QoojlnN464zlAUzAC1FprId/Kua3eb/vWL+x9878u/+ps//uqLrz5BYjCSARoagGBeouQYSJs4uZG2FJmnaTzP02iqCCYi03l4UBvPw/7jx7btuhRTiEjIxBSZGKn2CJgHpnleTofH/ceHsiyEFGNQkYeHh/1+HzkMw/D49Hg8HUVnESmSz6fjNz//WYpxyXmex3EYVAuTgZGsXQgOQyldITEOwxCvaRuAmZqYSw7xJ9HC5Vs8ZUcANSPEvuualJaum+ZpHCcyIJXC0bQYAFIgDhwTEoOhmhW3oAd1FgwCfQL4Ehgh0EXLwPtXHKQzJIpETIxmKdVex8sbqxnnCjQbgiEyY1OJDI6y2dXXe1TlpACrRROVUkrOWQoEpu1mg7Bxeogfa6KyComogvqreCRCSBdGDqwZD6DHPITop43/joSIgS1iLtkTjOcDE50Jw1Dvvw67urORizQjhRjUNOcyztO7b9/O87TpdyVnQmIKiDScxz//yV/kpZzP493dHRMfnvY/++nP3759dzgczsOENHOlPKFUZTf0ctIq7rmWbr0XFAHMzwcSoCw2LXOaGYg4xbUE7onTCmaYZS2iwpG3N9t20718+WKaxtPptH/a7x+fnh4e9o+Ph8f98XA4nc7jOE7ugJCLSDZRA2CSpouvXt3f3t+GGHPRFM5A3DYNEy05L0tum6Ztm7brYhNdgtqNmKC2O5gimGh99GbOJa6t9HYxwKtl/ZXv+0t8WCdcVMwtBBYEMkJAEFUkRgxEKSRsOkIKqetKUS8iORs9Vx0CyVKKSimX4p3mUjZ5cb/OkJp+c3d//+Lzzz67u7vp2yZ5Phw4BGbigMBMfOmjDuiC9Bx4rR4SXiJ7c6r58+WPVEFdHRXN2XAhNd3t/cuma3POIsXrsaWUUpay5GWZp2k8DePhePz4/sPPfv7z3W736tXLpmmyLBRC22+ABElD5NT2KUSRpet7IpqnaZqOqlnVEJCIkVhVVk43AICWPA3HDyWfD08hRPNOsVzEBBBCaG53d7f3r3c3N037aRBzKYszYmWR1Q7FSvFz6hX4TFYxUDUAlLKcDqfD6bwMA0LoNi93aXdz/+rFy/zy5WdLySkFYu/kXWQeEUtLI4jgPC/TNI7Dab/fv3u/f/w4DE+q87dNNINp0pzBjNC8bk4GBMRICTEo8VRgeRofD+Of/+J96rv/4n/pP3d5LmJUBLiU8Xw8HQ+n0/zuw8P+cBrGeZqmeZmmeZrmMTDf3t5uNpucl1IWEZmX5XQcHx4fz+dxnJbzMA7D5H58KfB227148fL+xd3t7d3tzc3Nzc1mu9l0Tq1KXk5iqno7HGrmbC6X8lyqR1s9/hzNAndCNwREXePF+kwII1ObuA0UAiIiIxBoYAyRXI/Rv16d6AsABgVAvSpjgoBUq0loaEgGouBbZa2fEBG4RWYTqUvct0lutlKy1A4jlZLNFGrlRQEwixQFJDRCahIBILI3LRZVIRAyARV8VvRmpr7bBI7H/cP+6QFVU4iE2Pf9D374/d/93d/58W/8eLfbjsOU86LqbP9SSlmyLEWz4s+/edtvdvRHf/Rnf/aTJZfdbvv69esvvvr69u4+NW1elg/vPxwOh7fffvP0+HFZlpub3R/8/u+9fv1KS0GCFOI0ze/ffxyGwY9CDm6qFY7n889/8c3hPJouYgBL7rr08u7Fb/+V3/hP/82/+eMffm+76b788stn/R6AAI5WIxuimWLJtkwWghJlhPMjGOHxdDwdT8PpVES8UZaYOYTEIQbXo6ohrXMaqvKUGbihj6os02H/dDw8mVkgZIK8zLMoAC7TZKpE4I2gIiXnRR+VkIpk1aLer0qVtye1tgEK+B0cilbiyXr8roklAuEzSeY6iLlMUqf9ehiRopPim9w0sunLPJe8lCLiN8QeIRKagYlScYEM/4GeGPthDQhGjvAbGPiPJncNgeB88xgimOWcu7673sjo+kJgQkaoPh8hBKZKNkcgXG8O0BViKvZkkEue53me56ZZzBSR3EKWqntq9RNZy7xaIWvD2tN9VeS9SnxRARGDnxM+0AYmoALZNDMafpryr+eoM91AAREdYXS+hzfbMIXAkXMp7z++PxwPXds/PT3N0+Sh3DzP7759vyxlmqab3Q0RD+fz27dvHx4ehmFcloxesGNCQKkqOwAey6yP+arIZQAVGwMiJS5Ai8iSM8VsVWdMVe06gwEzUVFTAg4hNG0DfS+yu7m7vb+/O716eTq8Pjzt949Ph6enp6f96Xwa52mavQFumucZwVKKr169/P4Pv//ixUsOoRQ5ngcFaPut7+NN13R927ata+o4a1BAQQ0UzbD2mKqCQS2pqPoT/+Ug5vKUL83V37msLp0KtDmojcgcYohmRoAhcFN6MQURc32wkr191c01JeeyiJRiRSSLloo9icdQMTV9v7u5vXn54kW/6aK3pjFyqDRwIgxcIxhHYnyr9bleJzl6aYm0akB+6sjdxNQkt1LzsJg4pKYDxKbra4OtLiUXKUspuSx5nifkMAHN4zAtyzhNp/NwPB2bJhJjv92+ZC6Sx3mc5im2IQTe9P12s2lSQoRlmVQFADlE5ojIJnJpG3JdUhWZx9M8nG2lHxkghxC7zgOs2/uX3DSGeKXicXko100iHpE7gFuzF8QLL8oMgIkJQPI8nfeHx4ciOM1T6u65vUPgLhmDaBnLvOS85Dzm6WyyDGBSyjzO0zgO59PpcNg/Ph6P+3E+F1mASRVLJtOA3Ph+HxgBbW2c8s0fXG0WCBQ/7ejx7ULBJWOOh8P7d2/fvf/4dDiez+dpnlWVA+92277vRPIyT/M85ryg2TgMp+NpfziczuecF2bquqbr2ptt9+L+7rPPP3/58uXtzc3uZrfdbru2a5rkHBoA8EJNHUS8jOqFfHT51Kct4Stt5FlWYb0IIAVqAzUBuJY7dU2DCP0jvGwxSGsjWX0PZghA3ti0fsi8+vZWAHQF2TAAKlhSaU0VVHwf8rJaKVlcjLhkcWYNumMCsJt7raq4AcAIFLQsOYZw2f2IqG1bRHz8+OHtz38+nM/H/V5y8Xk2l/J4OE7z4n5yXhP2/llRU0NFPg1jFrV6BCMRNU1zs9ttNv0wTsP5tNlt52k4H/fn80mktG37+eef/eAH3yOwwNSktEzLh9cPwzg6OTKkGFMKMb778GHJy5zfjovIkmHJ3Kc3r1/9xo9/9Pu/+9vf++pLKcum/7Q7CYgJOHAEDAC4SB4PT3kaSggawmnPc1kOx+P5eJ7HsYig68ojIjrosRZuyMEIQmLyZeYlm1qwKHlezsNxs9mSvW4iM+Ks83kYtJSb7TYFfP9xlLI46zKrIWDNpkHRI2Xy3cyMAC5H0zP1omKvrqdA5MGDEVEILkCw5umXXKaKmsAFnKkVVqLUNDe7HZiZlJznaRyHcRqGeco5OyUUa5MAGVW9UAIFMwQzqKVWDwEUCTxVY2euxRBjDM7kD8SqOk1Te5WN+RsLzJXiQxiIAq9M2EvBjIAI2TkohFyDmFqg9QHcbDa2mnGu6R1XLGRt86m/r5+yen2C2PsI+QuZkTm9FNH3VgEpskgWWHAhDFeO3JWeUkrgUGNLRTOzCu1gcHCWkIhiakoZHx8fxmFc5mU4D49PTx7FisjpdJrG6enxMcZISKWUaZpcId6bqtFMVRDwstlfbVd4ORBgzZaRzGM65GAcimE2C2akikRVR/26mOyFHVUDEJe0IwKEJsZ0s9tu+vLy5TJNZ6eIH47n4TzM4+D/DefhdDKzrutev3794x/98OXLVzFGVRvmWdSIY9t1uxUAb5ronr0IBIjAimgKqLWbfq1UAQEA6sqRqUGMebNaFborXtpaA7tPrsu+i1jnMYoBAIWYEIg5JDUENNCq2FBEipaqvmH+91JkKVrUslhRkxq8I3uzbAwxpNSkvmualKLXr1EJgblO7MBcLdoYCMEM2ec+MVYQ0C7Lfb2XejMh8M1ut7u5KfMsJZsaIWFIiIQcY5NdN0a0lJJLXkopUkqT56bbLrtxGcd5nuZxKGWelzJOo2rul7nb7cZpenp8iqGVPvdt06V0WxvD23GI0ywUI8eWYosUwQyMagFWC2g2mUHEahsRVS5708Z+s719eff68357nxWHeZErzvW6EP3RyAWZdRGg5+3rapX51uatXbLk999+eHjYK8XYdv3NHadGxaZpPB32w/k0TlPOk+YFVACwFJ3nvCy5rF6QIirGBh0oA1CMjMhKjMRA6BgDKFBRiBIZNpv+/sXt/e3N/e3NX/v9395uN8+zyys4iEyBO76/RwMLkYrM03hEK2CGimCGplLyPJ3HoZ/HAdtOVZZlfnx6PByeiPTufvvyxYs3b15//ub1Z2/evH7zZrfdetJaIWNA5/muP/kyPlfRIH6SlFztEJfT6vkfri9CaAN2EQMBwpoWmIlSUaAicCEWrL5XBqRVvumCqBu4aLgZIVGgJjj9Fy4AuJ9esKJFz2EOmIjkeanG40uec865iIKBOW1XSvbWYjAIxBgTIiyxSUa7rr842yBRSlFF3n/zzZ/983/29PR0PB3zMg/n4U///C8WkRj+saiMwzRN0zhNyzwvOYuUIuZCOPO8HI+n4/E0LhkA53naPz2+//YXh8PjMM73r159/b2vtpsWTCQvAOr1kRRj37Ztm5oYEOnNm9dmSMwhxtikmFJI8ac//en+6el8HuZ3HyHPANa16Qff/+qH3//edtMhSMlzycu1hUogjoQcOAKxGWWwZR7HZV6YMkAxmZZlHMd5mjUXMa0dY7UsUDsl/CNA9A33ij1h/h2ICipFSiAczoemSW4tFpkFIEbOGX2zBScuuKyfHx/PEAA4YxEZ0SH17/B1v0OLcVIvetx1Pds/CWIuK+CTF2GOITARgBbJqe/COIVuitM8LcucK9/BpRnJ75RIEQyrcCmiL/q6+mn1S40xuBkVuxwpkvNwQwjPEdkl4CBiohA4EMVAvMZi62oBdjkwZlw5KQRI+N07/WRknO20HgO4ksNhPRsuF65k3+dFX4MYkJVgoWpiktWCMENsYhbmhq7iZCdKlCJRSNcFWsWjFFVtdSJHr/cCzPO83++fHp/GYfRM39Zju+TijQy/8nLw75kq/0vT4zIa/hdiiiGEGDEEJRYiQVZEN6LS9df15W/D93EzIoNaLYsRU4KebLvZ3eyWeZrGaZynaXZhxeE8DMPpbGB9193f33/5xVd3N3ccAiAupagZAIUQmibF5OJzBLi2bVY6FVSOy0WY5mIxUN0P1ixPLxHp8+XIySdDsVLfPEkAA3NRHUAkCpiYQmABAIfKXSW1FFdJ9ZjGKgemaFEoZllMDMXI52WIITJzYM8VvfGoBjEgnju6T4UHMW4mWHPZi3AlekqAV8fSJ/weIuq6zc3uJqdpmed5nnMRQfIOQdVopqxFVKgUDgsXMZVYcmpyzkvupnme5uE8zec8DfMyyJyXLPO8TPMyL8s4jiRW5tSkkOfZBY387TGHEGNMDYaEgACBKQCASdYy6WyKWQFWEaVVyq3dpG6X+i3FZpxtzqJXm/L6RFbeHOh64x7EwBq/PB/S/kFg7pqmTQ0YDcN4OH8Us7bvOUUEmqdp//R0Hs7zvBTJqG6agaKWixZRn0+EiEiGEZC845MpoHPwmIiQMTAl5sQYuk17f799/fr+yy9ev3718sXd3W/+6Ouuba4nmZ/qxIQIfd8hIYCWMqVIx9M5L4LEXdf3fRsIl3k6HvaRY0rNMo/7/VMpS9c1L1/e3N/dvn7z+s3rN69evX55/+L29jalVDskag9lrbBdBsf/NFWEq23h/6erAmtSiJCADQAJmQISI6J6HiHgXcJUS7Wmq1gvATDWTutczFQ9WAkEDHULxnWhrwNG9Sh1iBVUhNifR3YAjBGdVq/MaGbAAGDeVeD0y8AcgKwtXXpOklV1HM7zeDJZyAqUOaI2fde26eHpccpLXvJcncOXaZrzkpdah5VaMgUAIDNQFUKY52m/fwIwjnFe8jRPr17e51zOx31eZlObpvnbt99Gpk3ft20TA8XaMhU5hBBDbFJIKaY4nM+qgmgmxceuibzb9H2bSp6H83GZp6bf6FXQH0JKjBzI+5+rN9qSl+OkUymLFDEFxNg0oesMoIiUFZom8ippnRxesl/7AQ3UJWcNTQ3EUBBtXsZ3798uubx88arrNve3d8syD+fDOJzzMqtkM0WANfeqsWiNlfyIZuIUa0/xp6eUz1k/+C8qLLDyey9fc6koYSWhk/NLDeDZK8fMrYuQQZm5bzdtane7ZcnDNA3jNI7TMs+6ABVhb/5kUnTOKzr/KnAI5EZAgZmD79NcWSRetcF1Gnpj8OVdwloceXaM4V8RxNCVV5y3of5lQcwasfEFibkGfq5HUtbrOsSp6AzRut7AKoTj5FBJaPcxlWQS5o4DraiyIzGXXQYvXRaXlkWzSy+T82i9OgoARRw+LSJ6haMDXMUideZ9gpWAw7yfzA0fVVceAPPxCiGklJqUiBmILETlCBSMyADEUFaybH1tNS0qRajWnV1V2RyYcV4aIsYYOXDbdTsTKZLLkpdlyXlZZgNIIbVtu9lsY2wcOGtjQ0h+0tciHqA+zwcf/6p6rs9GezU9B/MOtrWEBOsX1aqSAYCaOkfyuQOOyKsDWC300MTpVIDkrBQAQIyGCEwEaGYg6swXNV3rSkVLdhlMEsWiWrdQ4rDOdr8pWmnfHgEwRyR0xstlhgOs76GCu6CgqMYEREzI6zRexwkAAIi4Se12e4Ob7bJMh8NxGMZpWUzr8Q6oAIRQKBAgA4uJUhAKJcQcQxubuW37Pu9KHqfxPAwHJMxZljlzCEh0Op2P5QlNHj5+OwyjS6WCATPFGFITQkyM7gKSwEDLkhdcdCmg5CCKgppZUVRDisCxKFjRMcNcnlElfF6qRPXRgKo4EnOJ6rDOtXX6IxIbIW43/YsXL15/9tlxmA7n8Xg4PjzsRQyRSinDNOaS3QOckMhWxhtDZF9FtAbL5JISCIjAoIjIaOw82i61MaZI/PLl7W/8xtc//P4XX33+6sXtrknp1Wc3bVo7rS7YRE0gIUYAat/wy822PX193u9P4zAvokyh6xsCnJfp6SGPw2gGw3AehlPTph/96Pvf+/qLN29e3t7cdN0mhCZwLEVUJsDnLOuT7PRC37XvVIz+ZdclbftlBpmYDdN0HIaUmtg07LYgbceBQVXU5ixqVdQEahrhOIxvyFDJXYJLEZC6eQISkcP65l2sWtAYkovTOa7vuq6+hzpQjrVNlpics+tSqWYEtXBRf7pjCJFTCumyJy/z9O03Pzs8vXv9cvdbv/H14/1mnkYiAkIDORz3yzS5/k4RMTMKlEIiDoCLQCGoEG+lB6gtsyxz3h9PAARmh6fH83FvAN++fbtMswE8PTz9g7//D//4j/+oCSFwIKyFkZrCRA4phRRjDNMyf3j/8bg/5HkGAAIIBGWZTsfHD+9o6jtE6LY31652wTgaohEDMSBf9fUpACOFyJhSk1KMHER1HMc5Z1VBAFq5TPCcykkNPmWtTaiainlpktBAT6cTAIeQVCGldlnmw+l4PB1zXux5mXqjaA1iaoTqh3ZgCgHVJJfv1C2vFWNXoMXxoecZ6f96PWWfv93lXC+HemX2kMvCIyA2WBppUupSM6a0THOeZ8iFzBwwBXQ+CAJf/DHceXcNNIIzWSrm/y9ZTrSGYt61cSV7Q1dBzCp89i8NYq5v/JLc0jWR8NM45irZfQ5fcPXS8m1zxU4ADFCMQTuzV8YKYaKwQb5+9Ws8AGqqBO58ZcqEAFVxDWD16urazpU6JZcK+SF4N9zFZnWdeZ/mWLjGK2vMs94VwEqqqGWOwCnGmBz3IAwBY8KQXKDNt7FfhcQ4y0S53rxdKFoOYUCF0Kqey5rAuZ2nAgCTP9FIyE5t8OPZPS99vC976cqFADNUvViwAKI3UMBzoIPokoWOnNfuxauRWbfm64dOiORio1gb8AiRY6DndLbyA3K3Xf4AAQAASURBVNBx+GBmQVQN1Fa5CC1ZFAyIzdlI612RW4J50zMjmrs8QWXzMiISMxCutV5CwEp8r2QsJ8JdcN81pL5exX5rORcVbVJkZjNgjjRO8zwvpUgRZ1giAAEBMqgZGaogF+MUuAmp1XYpZZE8pbYLTSOSgcJSZMmSczEpeZqG4fjx48fj8TTP2Su0aOAy2WhCyOTRiu93Cu7X5Ym0ujQmEhsCBSAuBQroUqx8uhfgJ0uXqmVy5VHAJajFyhJ0a0FQU0JMId7c7L784nNVZeYP7z/uD+fTaRjGcR6XZSlFDYiRCIENPDACQnL0oNbAAXwXWbUrYwghNW1qm37b9l236bqUUkB8+eLmN378ve999ebNi5tt1wLApgn8S9vO5W6IqCFqIm/69vbm5u5mnKa8FDGDEKiUfDrSsmSRLKJm0vft/d3t/f3N9778/MULFyELIiACbhzynSa1ywB+l9Ly//fl1bo2hRgDByamGGPXtjEGEymqFLTIGjwU9TVil64xAEIIzIYY2LMJUzNBIAPGWjPyLAUQQRVWdWozU1FHXPRS3b7a99WwZjC17bI2AyCCGJjqInYVJ4OZSBnapN//3stXL/rT8dUyzWaWVc/LMi45z8va0ek5JxlwLjpNY14WU9GqiS1FTV0UXHQpqmpohlYePrxTVS1LjOwl+Kf9wfZwERpHAF43ZHYx6xRjjBQYkLo2vXp5CwZtim9e3keGPI/j+cigKUX9dL2EAqRebjYrAEKMISXiLceW2Lx3q2+dXzdPMzw+2jjkkgEgxsRMntsiGJqAFlP3P1vTwKKqxaBobb80NRym6f2HD0/7U+CQSx7Oh3k+FymXcqDpM4RQARgv+zI55dxEgdDw+V5q/P1LFQR8PtFgrdFefZGtXAAARAxryal+PSEAkiFZlfwOyG1qdyFpuyk5z9NSpOhaU/BDyPddeAZ96YI9+BklZdUdM0B2UAQ/WeqIzmpcRb88oKEr3KXGZ8+Ie934kC6b4KdxyXewGYc6LmHKLy/aS1Jy/fvKRPBwxpMgQ7Cous36qgguelLaglMz4fJdolL/VFnFbJUAOAQwjZa8XkuEKUXcbkVkGAefEjhRXhYtqqCgCN7XX7fxWv/2n+TvGwCwEjwrfuefrwkrQQghpdg0TYqRYwxEiMYhUdNSjAZuhg5VP/zKDWINGmqc4DAjwMrM+6TwvcbNWDuyODBDqM8CCZFrcroGkKrmj4h5PavUnNBXmdErzQyRqtuegdUveOYAmZmuMEpFpcwAoLYrfwJgmZqWUsyMCRHwOs5dX8xQV7DbtZE4AoCJimoIKqIaVRGQaR0L883bkGuCiRg5IqBb81C8VGPqIkN+jsjBGKDGyVZb+gQR3afYVFVr3LO2/0Mp5enp8eHh4e72pu3am5vbvt+15/F0HtxJxwQACmJAUnYVczRABiRAZY6sUSQGaSSmkJrYNDnPLrt0OJyb0O02vYlNjx/3x9PT4Xg6TyKEELSozEtG1xkNABHR4eySyzIvs5TsT00ViZkDGjFgMKNcxIf/mt9Tt1IXgHXem/GKJF7ic4DVj8egbulk5Bvlpm1/8NXnb17c/uaPfvD4dHj/8fGbX7z/85/+/P37BzoPLv7qr0VmoJXHv1Iv0CpOGWKMTdN0TdP37c1ue//y7u725u5uu9v2m75LMRJA26Tbm9120yLisixWtGm7C/HqOiFcC2GIwGAEhg1jvGl056mAIYKUPOx285JzFgCMKbZds+n7rmmawAGDiYn4Qbmu8V/e8K939/+YGIxHyVfXurgBUuDPX9weP3sByAWwKDJjyxQDG4EYpYRZbFnyvMgiRVUrG/1KbQaJGDCEEERlker8TZACctWXqQi3iCKKR+TeWojoB54pqFbtUSJGMgYwzbVHoxq9Vi8UQEHnDh7H6cK7ioFe3rfl63uKCQCtlLKUeV6yWgFUw5p0iVfOTQ1FeV7KcD7P86ySVaSILEsZpiXnDFZqTbmUvORxnI7H4zgvUlpR8xbYtWNWQBXM21Eco0MXiAoxpJhS2zZd2zStY+Rtam53/c2mQxNCY4IQLvlevcKsioAFDBmVoVCktmvaLjY9xAYCMofQJASwIhlOEEdbhDAgEafklFKnMZEJynMQY2YmKkVVskERySDFsqpYVtB5oayEVEqehqGUGbBc6Bmec1wAAW+qNj+/FERMiyx5Wa5k1M1MtYjkIpmFSJ8Pe6JLkw1IAag6a3WZrQycquFilTqHAKvjJiEKGlZlPTYEROIgSAEoqwqAIXKlLTwfODUHw0oC9YNNREvOoloPPMGcl5yXcn0v8NwXW+sClyMZ12gMEMmrklfpua1HuK7Lcq20rkOk17lRjSY+FXI1b2G9atLVlf9bEc2LWInL4Yskhd7gFowRX3DccQyryoKojtN4Pp8JMcYIWLmpaEpgHEIpJXVtkxok1CKuV1HVrtq22v4UUvIfu5ZZat7yS5dvPPoc3GANZPz8Xi2F1qvKPAQKMQKxAGR3ijLIOS855+/OMS1FfOw8NCREp7FemAuftM2hwvMWVp+uGiCqgb8nn5pWoU16JheuRUZdkc16qlew25+dGVxyr4tg0IWcrfa8r32KxJiBiLowIpgRPA+KT491ahiQVVTE6l37qgBSRCBWC2rghRtzeQXvaTekakqJGIgQsIACUoy1e/gK6reKmtll3j5/dsUyDPwI0wsmUS8ROZ3Oj48HMNyJdl1PxE3TqbECIfI0kdmsUi4xuQfB/t1+tBIhGFsgptBwoDDjPInQ09MJhPOc8zy9+/Dw/uPj0/40jtWWd15KKcpFmBc0AggAZIZeeBTNoOItLGCkCGAkill0XgrgooRTgSXnTx+NqSo+T9+1jvqMK65RMq1BI4BK9f5gopu+v9l0L17cv3718sXLF33bz0teZilCNs2yCnUyYaRa56Y1q/EtIjWp7/vbG5dcaW9vb16+uru/vbm96Tdd27ZNZMaabjETmUguIFlyztfscavl5rp9VSkiADAkDCGwI3aeEamWJrXet49ITd+0Xdu1XUDUUkz8AVaXIl823pb3jFBdpXDrwHwSylyClasd4zpAfO540LWb0q8mha/f3Nnh1Vz0NOfDec6adRmzZH9LiqgKUpnj2VS9w9Od7osZKPkiXj0pi4vyM4EmDkQIpr6fI2rJziXw3VjVMRhwqSu5unRVOy2y6h2UvPZ1AyDknM/DcBpHeTYZVYIh0Sm1fYpNpJhnOBxGMeo3u5hardUwA1VDMKOivBTN85zzopJdKH7JOkx5KeKnqLiEVM7n87R/2o/T7M5ypUjV8hARH5nKda+iCcREAUPgEGJq237Td361bRNTm0IK3DdN20Qm59aW62cazsuCAGREAbFtIDS02bXbu7i9wdQogZiKaM7LnPN5kiFrVuTQUggQAzBVJX5CA0MV8oqSb5kilotJtpINFrVFghgBICNHJA8zVV2T1vUOHH2DKozmqwAAgNRBGJMsRaUs4zCP43iJLlV1WuZhGiigWRX8cXpUcPKik3zpKgfEip/6CsCKoX4CY6ABIVWExt+I1YRFzQpiMROvXHpi6OGLI38GCir2bJu8noLFzIjIEFVtWpbhfBrH8cK7NLNc8pKnJTeBQYXE25uIai1i/UVESN6AWhlABLXF/PKY12OpEphxrbjDWmL4Tr7i+X2lWGjtVL688+cTEswAsloDFg06oo44Ar8oehtSWLcJkXLYHz5+/JiXuWkaQCQwUkMzBuXAvMypbHRrTJznueRixeZlJqIQgve6uS6k63D8ksDjX37VgNj/6n08gOjGztnnTGPGbetkWlWdpIgUVkHAZR7dT+45yDPLOc/LohZcY4q9XIJQC6DXSAeuofOKbV82So+LwbmO/s/e7l2JkzWGXjEWs2raAmBUa+2XPNE3GvXB8bCmgjdeuRaRnJclZxfXvRwwapZLWUpBAHKFL6th1hXw491eFAITUKU1FzEwQjYkICNgq0LMxXWEsY45OkhrYBeoSUEJvKUOPZjzn0IAoqp40T/FNWSvT9HL/hV2UBOFa0duVR2nfDicpcjpPHRt27Zd229jam4oxNAgshnM4hFdVTA0Qc9qTRVEzUwMRYMHZN4xJSIPj6f94/BNwHkeHz68e3p4PxzPAHktNoooyOy+M3ShdT7PPwCoFUcGYwHKBYZxFhxzCUY6CgzTc3eSuUGECBhWAT8Hus3EXJMGEdE9NKgWy+uJfUHZbS0qt018cXczDdPLF/f7p/NpKNNiWhQAAlPXpJttv9l2m76LTSJAMV3mhZn6vntxf/PZZ69ubnYphrZttpuubZsUOPrWeom+RcRLZQrmBYarZMxLqUZEQN76CyawDpAUQVRe5ScQOSWKMa00HyTAPM9lDaDrq3r3crUqqrtanfnPQUzlV/3KjcG1w663vVqdrOmBqmhlsa5f1DXxR5+/2C5v9ufp/f6k8/h4nk7jWaxW9QRADLJHFF5S9FYPBTCtGjuICphVc5GSZc5lzgUBchNjIDK5EA1qQEZ0eU8XpgUglnKJYUqpWYrmnMdhGudlXhYpYlULzHLOp9P5OJzLer5ImQ6PP/3w7R+lvm+aNsU4nsd337xnSl9/70ft7s6h9gAroQm5WIKG465BSKYFAYGDGs+KxRiQRa1kR4BA1GQR9wXK4uKYpeSsJTuTxpsIPIsLMSBzFcEl5hialJgDEQUOTUopcCSqkj65TMMwnE4i5fLswr/6n/3PmAcxnLDtqNvg7iZsdrHfUowC5kLIy7LM4zQNw/l0LKXEEB0TBiSr4b/jk+o4t3sdWFXDzx4mFsl+JiISu+6tacl5Gc7LMqm5lknFRKCWgT36BFcACRz80JCSh/H01//639hut34n2+3mD/7g95Cwa9uUYiV3e883rxGJ55nrtL/O+eq+C1ddEGsQgwCBas8cAGDdKcwAlEgBZQVH6pZboTMPhc0xjWdcpSp8PAcxS87TOPzO7/3OZlNbE7ebzV//a7/Pgfq+b2PyG2FwmhxXXRgm77q6Wrdr7QoB4btBDFwfoutnrCal313kNddfk3pwlHVFYuwKChCwBLBFeEP4faQoeDuV+9/6K916L33f/85f/W1V7bsupYiEBEAKaMaoyEwxhbbrNxsmyvMipZiCU+GncTwN53Ecs6tYF/HD2mr2UQf2+gY+KYZfOFL4bGq+Dh17kJRibJq277sQ2FRAhUXIBAHzsozD+a/87u91mzrHYghv7u/MwPvF4BLxXvCDX2pKXwta699X9AVXOOgyAbHyR5DWT9olzTbvbAQwrPjKyn4BqwnDurIu2WeNPMX1DkvJpXz+6mUTY135gW9udmpGVUjJSUe1puCvYKCmikiBAyMZqPN6zYzc2nEdeY+XzMyxc1N1yqJHcoBmQKZWpBBikxIFNoWL+TYirmjP89rDtSBm4KKICFCFHEXh5nbr4vEA0LXpN374GZjFyK7i2nbdZrtLqQHAvCyn4TwOw7yMtdfcR07BXJhRBEzs2X9Y0JvnSi5V8V3AZFmm4fRqHE7LNJSyYI0zPaRbo62aEyHgZVhxxfvczzJybELbh9TH2BnFReH3/+r3N127Ppdw++IWwFzIAgj8LxWeqRMERQWrfM4z9X5duFCnJpMhiFq/vQlN//rN5+8+7I+n0UXiI3Pbppttt9l2fd+mFNG3o2Uhwr5rb2+3r16+2Gx7JuTqv0MILo26UsvhGnMGU9ve3nCo5nwxppevXhtcJLKuxKbWGY6AFPhZWJOwcrrX+N230UpK9bT2Goa72qtXSH39GH55Y7v89Lp0LhfVoMeg8thMpNzdvwiXe2maN199r2/iYZh2+9Pm5dPjaZgWyVpjbTHPtS42h0BEXk6t9AhflABqKKoithSZcwaAxlthTS5Avictz3SA9cacRaDqVmirc42aqi25OA1syUVVrNanrJQyjuNf+4M/uJyVITS3L7/K+RzbJsYUY2g3s8I2UHr12Q92u1tbixOOUiOyQiQOTYxMCCYuXgAUCrBiAAyiICKgK1YAZAZimsVykSJF3ErYBFyHZC2SxeB8XATwvnMXUkNVI0QfGCZUdzwfp2EYb168Dus+BgD4T//xP77MBSd8AQeX+wYkW6eSVs0s0bVpZc2XnucSrHzPy862Lqvnl7nMmzV8rhvtJ5Ki35mC9uljhJqXqMh2t/36q6+7rgOAcZzevn17Pp9X1OGq9HKZC+vyuZ4bsL7Z6zv5zoz/5HP2yb/Z8+dWDsKnAOYz0HEpsdrKYMAVuBHpN5vPP/+sbVsAGKfpm2/ens9nZzFcBVs18rqcg+vLwK+6pau3+emn8S+791+6xU9AmufQ5ZPbQYCAEAFbAAQoaqHrd29exqbxe3n79pvzMFTm5tVA1Q8qoshYW2zqCPmsq1Cqf77+2yf//YpHcv2ZGtE9/7dOjPpBZfoyIVY1X+/3QXehVen6zZvPv2jbDgCKyHEYllyeg+Lnn1rhlF81/J+8tU9/w0//8ZPpeXVv9V4R8JP7Xv/pu0/qk0f2fBg0Me42fQwBAER0nKZSCq7vHuBXvHkzw8tihcty/hVfvK5fxOvnsQ4rrIu9JilrwO93s77WX/o+Lq97uXkOoW0b17yal/Lh42GclpXPj5eGJ6jYRiVWP8+iT0bW4JdH9QKC6VVIX/3v3LQcLjDfJ4Hz1b2vn1g3r0vhqjKiyJDMYLNpv3h937UJAERkGiYp5Xk8Li95PfB2tZ2ttabrr7n8WAMrWVy3bFnKRezYQR3X5uG6NrHGy1VpkGOMzM6yA48unoNrsJVM+MlvgUPTNV6cKqUM53POyyV/+s4w2WVonp8/rkNnzw/oV0+Kv/yzl5/1KxfFr/y+70Y9ZmYhxK7vK31QRaeh5EVUcylLLl76XrfyXw6Yrn/EZc98Pi+uT8R1k7+M5K9+35fhuaqSPW+JtvYc6Kd5nW+k2+32qy++8LOy5Pl0eJ+XAVcKnIrlnBGwadoQgn1y5sKail3CUPPp7B03XsOw9a7Wt3lJhBz4rOXwX56qz71Bl9ddN4erU66Gln4ihJi67S6EGsc85+u/vn59/fr69fXr69fXr69fX/8Jur7rOvbr69fXr69fX7++fn39+vr19Z+IK/zJn/3J9cfPUNalFgOwtjJecOKq/72KEq5ozkWKH7x9wNbez+di7fo6a/nxgu3aJwjZc9liRZ8qkP4M8pqI7La7r7/+uu97AJim8d27n52HY2V8XDe2reDYVV3GfqlqdUGy8JPPrQNzgcHQbfNqU8j6ylCLCeQ9dcwGoGKi6nQql5BXb3vDVZYRa4OGSGm77cuXX6amA4Cc56end/M8VmbPs2MteNxpXnp1ZnR1dHc70MpZduaAFx3XIvHlVi4mLJdnXmE8osAcXI5FK3+0Siet/+uF31MxTHseJ1vtXfpu8+bVm7ZpAWCahnff/nQ4H1c+9QWJx/oiDkJ+t8S1PqT6l0/xwktxY0Xrv4u4P/ca+Hd/8u1eUqpFmE+qUutwAYCXk8T6zfaLL77fdj0ATPP47btfnIfTpcELL3JC+ly6XD1tfCE8w+FI9ZcplAymz5XKCsqSESMSENulMb/qpClogZJN5bl4CZ/cnF1P51qxeV7PBqDbze7Nm69an2PLvH98P0/D82BBhW3harpfFYbWwVtxZoBPiljfKSP5bxeOsCPMtY90ffXLT9HnHcIAqpQXorfm1urrqvlUuXZN29++eJ1SAwC5zE/nb+dlQIRVWw8u5OfK9YFLEasC3argen3zsoBZTDE474Pc1kTNV5ABrBwyXUucgOb22uvDu1zukXh5OuiqgOCiMl5zX1e0V0xFpYmbu91nMbQAMC/Th4dvxvH8y4IRv1TnfqZMfKcE+SsqMNdz5pMn+pcVay7r4leWKX/Fd3qTRtdtXt5/lpKv/fHd258P5xMzG4CoAYLL/1YOylrs8E1oJUWjUxEVn98EIgI9C0jUka7dIHbZxOyq/ORCkIjAYd3aDaQ2iSIAupTB2uxkAK7Ig0BmYFq067Zv3nzVNB0ATHl8d/zFsJy9i+JyDj0/h8uieC6m2PMWdvXMcJ3GnwzpqofwyXYE8OkWe9klv7tTPp9PcBmfWukyADPt0/bNzRdt6ABgmsd3Dz8bpxM/6y0hXCq9n77sus0/P/JP3vcvLfnLVLwuGv0q2uJVoX/dqC7RwmV3WVuvdN2WTBW2m+2Xn3+/a3t/wfC//t/+O5efZ1eXi6gSAAKHkLBS7g1AgZRImY2CAMiSixQzJRVSRVUCYzN0N+Nii2qpfLz6RnxuQpWwVHNukoqaW5M799UUxPmLdtleZSVJLPN8Hs7/yt/4V//H/8P/0W/91m8BwLv3P/s//u3/+R/+0X9IMaYY2xQSEaE5r4cAg8v218GuFuqVh7EKxNXicJWnUARA9nqgqYEUA8DArIazyCg6i4phYIoApBrQYoCmSV23UcDzuJzH6XgeAHC76YlwmWc14RBSCm3ThECqmpd5PB9/8OM/+Df+y/+9L776DQB4enr3d/7Ov/uzX/yLtu1T04QmEJKpGjBRA4p5KqAcYq+GSz7Py5CXM4Cl1DCxAuSSxzwULchEIaQQCd06Hk1Ri7fISd15QBEwxtQ2283mnkObTRdZ5jyKFgAB1VJN/+ZS5pyXXIoUEzNRfz7uXE9ZbMnLb//m7/xb/5X/xve//D4AvHv707/1v/+f/dM/+o/aNnEIWuMiUsGcrRS14hy253AL15PtOYixSgOFytW9HPAKCEyVC24ApmoXwVNiWxlXvlBcs8RdnNRpmd6tsqrwEVKI1SltWZZhyL/7+/+p/9Z/+3/yox/9NgB8++4X/4d/93/1T/7ZP2ybtmliTERoYirF8oKqrqJnHIUZAFgVNFfxNmYIHXALIdoyw/6jlREjYyA/YQ2jcGNpQ6m32Cu3FiIgoGTOM04jjHs7ftT5rCaGBhwQACWjiQEogrGZAYgCEqbEhFhFOwHACuL0+7/3N/7r/+b/4Pvf+00A2D+9+/f+b/+nn/7knxKFqgaN6LtzdVh0+jB4Wze6rhu5lAARE8FVE56HHWAIqmCV0quAapaz+ukViLoUmcD7usVqCClqpWoxqJqiCQfq28QUTECKSREgcgtGBChS5mn66ge/+a/96//VN59/DQCP53f/z3/0t3/2/p80gbsU+jYxY8lZiogaEofYMLk3IKKiKpaC86znYXl8PL799ltVe/Hqxf2L25u7bdtFIlFdJA+iiwlQ4NR0QJhzGabhfD4a6WbTNW0iRkPSqralCMaoTBDI9Q8wTzKeFl2AoAFkYAuRUhcRsSxlnudpPH/5+nf/tX/l3359/wMA+PDxm//z//V/88//7A9TbAKHNUwxeGYPrKdFZYJ7MMWeXHk/Tk3b6rlyocSuwDte//H8ypfzy9YOS19ilcR7dRqtx+vVgYegUpY8/fiHv/Nv/Ov/9hef/QAA3r39+d/63/0v/+k/+QddvzWi07QYW3eb2jYmDohczLLIMs+ahRSiUmcYESGaMiysYmoFAIEbpESUkCIieVMI6wLlnGUuaAVAjVANspkCMCCSKZaQrN9BkzhAkALDJPMCWiIhd9EQYMmgasyKZpAJCKizImU8TL/5G3/93/yv/fe/973fBID3x2/+1t/7d/747T9qUhcwoncwLdnAmIM3KK48bvSdR0rlj8KFP4rARCkEQnSGvJNIPDhTNG+/M1NQF1G0T/q8aSWmXMJWM0CobC0AMCPiwAyA3ibmm8As8+988df+rb/53/n+ix8DwLuPP/vb/5f/xT//yd/fbvomRjQCYLPqcMPMftD75SeF90j7qb0GJK60uEbwlYdK6JR/876AqvhX13oN8RnwedIyMRiqqkkRKW5pA6QAVorOSx7HeRrnnAUAVMuc7a//3l/77/43/6c//sFv1yDmH/7j/8/V1DSrW5i6khQBEoYQG0L/hIEVIyEsFDQkASi5SMkmBbWQFFRlM1ZFMykqRRfRfGleWIMYM7NVBqdGW+VZW3ltfxcFUbXanGZmRYqL8EzTpIsi4PF09DsZzqc//uO//x/83f8XJ0gxtQ03gRNhQCBVdp4zE4ExIoB6KzIaqikCXWniIgIZgKqRS2siIIIq5GxgwMRqOKoMRScRMYzMEYBVGCywNU3q+q0hD3M+nsfHwxEAb3fbwLjMs5mEGJomNk1iJil5GofD/mlelv/8f6HeyzyPP//FP/+TP/n7Xb9r+za2gYhUFCAG3pjQPMygHNNOjeb5NM6neXoCkLbpmYMazGU5zYdFMkZijk1sAobrIKZIMRX3PlITREqh6fvb25vPYtpkgLHMw3TMMhOIieS8lDzlPORlWpa5dgupFjUzRGADMgxzlmmeCXEYz34v5/PhD//Rf/B3//1/uN1ATFBlZw2kwJKhZNAM3ppL62KvwbI4MlLXr3eOAAARIMPaEQmIEAIQAdK68BGQXb7ZAxfHiAAUVIEQYgBiUEQxK67+ufonEsGF9j5PcDgC2Hy+zLHx9Mf/4h/+vb///+i7Tds1MSGhiWrJNk+oioQcAsRmDWIMpdQm9RAhboETAug0wP4dyIxtQykiIWBQaiX21uwobS1tJDRGAVVwOfN0xvFo50c4fpDlpFh93cEMJSMoECkBkJoZFABmbFIgItWipoAouqicEeE8nNa7G3/+5//sX/zRfxhCIq5OAA4Naq7qoO6mY4hICEzgvuv11ESXzFGVSwDqCQPWIIaUUBSWLGJARIl50yRCcPWd6heEKAq5SBE1ca3THBi7NgYKIKQCUoyYYhdc2LmUPA6Dmc7TWO8ln3/24Y/+2c///S7ETRM3XQqMWsT9PJBCbLpAkYHICAVVSYSmRYdz/vhx/9Of/XwYx/bn7c3d7uXr+5vbTd/HEMTkLLKIKHJo204Rpnk+j8fD6UCst3fbtm+QwJAEeQ1iNKAGguSEWKFlKKfHqcwQsCMKwEARQ8MGlqdlnuZ5HsBoyYPfyzid/+Qnf/gP/vDf65ouhLSGHQrPQczqyeLC3cSEvO7P4H2hVS+qhilXQUzF3J6ht395EOMHgossXPcweBCj+gkeIJLn+YwI01TX/ng+/dM//Ad/7//9f9/ubpT5NC3W4O5V0/UpYgDgrLYUmadRckGxpLQxbBApogSboxZUy0YI1BK3iMnlLgJRIAg2QzlkmTJaARAjE4RFTQBIEcmMSmphcwNNYoIgGca5TDPkJTHRTaOMNmUTBWIDQZ0JkcLWiuTh6UQA41jXy3k5/fHbf/R3f/J3+nYXMYEWVyXzIAaIAKtmPBIxMZhJ0Up0vbLnYcbETIBFiroFGqJrxhpZVYnVGsRo1eWqgE/1tars2jobvIVQpApdM3EIAQA1mzd/iZWpjIg4LKd1jp3+2Z/9R//hH/6d223XxBYtIMTVQiMQsRcb1v5PLyd4x7R3F/mcdDypvrNquEAVLvPcBxFBPQVVq318gMCIwSct1bHCUkSWvMyTakEyIDXQnGWcpmFcxgGWDAhQCkwzhDCfhuNl4gWnK18gKzMrpZipdzmTASGHmJjc4tzUSC2DKQbjQEwxpihF8wIlmxQyRTVSBbCgysVYNdlqqGtmRI7luheurRKwdUOsv0RF1YrBGvrUIMZN7AxSCOfz+WazCVzbLIm4b7bbrgVOGPwQ45BCwxhBAqLHMAi1pxQN1lwDiSlwQCKHs+p2oEjsByQRoSmGoBeP0AQAyaKpGTAyg1XhOZWiNCyAjBS7tg+doKk2TZcCtbFBhBCrA5KqFi2l4FIwC116B4mobfpNv2n7JrUxJAJCEUMIKTagAQFMMAQPnzccMIZsUJrUIQVVpMLKGnQBRmaO3ARkAgIlMBAx06CqHkmaCQAwxRA4MKXITBGYiixExmDGmckKCVNi1EAYOfqkdiTYZbUNw1I0Be7bhvGS9SFiIgIOEEIwQFOwVXJPAlhy0NXWcgEgoKiWXFxJ3a9aBHPREaqChWBGhCGyr2uF1c+DqwUbIQbv1/RorSgiphg4MDAV01xcOFMRMETPQtAToECLKfRdd3F/Zabttr+92/XtNsZEDI4OBjQCEzFC5oCp9SCGakKBZqgcLW1QFYa9yogNc9hiv6OmBSClxmJvoYPUQ2iAgyKBGM4LzEeY9jCfTAfo0PpOI6iZzsWKoCIwQUpEiJbNAIyRA8XAiGiqgIaMuSzDGYm7SzZOiDE1bduF2HCIVRoSyUwkZHVVaVM1BIYr91FOHMlQREspgItK9XdUVQRgcN1hAiQjUsPQuMBdTCG2gUGN55mWBVzxkYMZRq2FVhfiASuAYIgcAgduGkYijmsBgCippaa5QN+IGENoUkqUGFmFDIgoNEwGSBxDagiDFcKynjMBOWDXYNNsUtN8++79L95+s386no7Tq9cvvvzy9e1tl1IyKKUUAyCMpsWkEKQu9SHipt+mJiiIGQqGSxDDaJEwBQ7emy4iXVCCyD1ThIACskjOyyILoIU27dp2s6L6QMRd2237bdt0HOJVDH+p+NVCDLiwBRIae+d1PdQA0Ny2xff0S6iBLovtr1OrCPU8XJv2/IfVIMaTzdo3ip/g/B7E1B/g31MkIkKKHeL1ntxvu22bWowxNg31vHnRxZYtgxVgpUAaMJRUTHNQiMgBiZFCNGxVSbEYIVAiiH4GogExhcBMCCBccyMUC6Kk2awoyoIAEJKmZImJjDKAEWwaaRiHSIS2bUsM0iqKoQGVHEokxJBaNCst0u1tf2njJ6Qmdn2z69M2YNSyGBrGWh+qlS90l09y4yNYQYu1C92rCoqGAMoGVHczCswY3LqoVkLQAFRKLqsvtwtJkR9SDm5UkVkzA/XtEVdMFYCeW6xVmLmNHT0/F+rbftN2ibYBOoAGISISI6PVpiMAYDQkQ0SFopKLZi2ipGsMUxkaiIgUfFUZoFvTMlogSiEiOb5URLOBuIYZYSCseAwTmVheyjwqAkpGJHQVRwIBAyJoGjeng1JgHGF304crY5vgffAXhVOPGFSVwAiAkQg5VA8UM1NERfT0TAEUCAK5lYQVtpzBkWJRQCNVJGNVWitbZGbE4GUj94tQQABTRDBURENUJAVgRENDJTOXXEYzE2YXS0gpBMRN3/EaxCBiDLEJyTgBs7/vEGMMmDyI8fEDXYMYNKsoj2v7I145uVcJdqBq5UhmCGilVKoJEyaEYGZgDLW+YaquKDaPGYM2bUscUkqgxsTBjX2JXJ0REERESYjYPJNbLxe3bVJKMcRIxKhU8YXIETioZil14YSQEAWxMaAQAmJQRSNNkEAB2NAFZZDcpgYBOAAoqpqZm8UTeMWDEdF9oDhaCBxMmUGNyNhr1IwWyP39VINW0zICQiDFwEUAtInhubaKLjDtrsRBAaH2MhMiV41EAMIqug8eUov6Zu095r5q1awSSeiy+QJiFbpFREalqglXd/SAFAKjgZSiKEC1YMSBgIBMicCt/BAghEDMlfSjaMwpSODwzD0gTG1ou+QKTPWtgmEwxFq2InZvDHDe05pgiYvDWQZdFAWbxE1D/Q5SDxA0tBZb5AYoKjKggs6wLDgdYXzS6QBlAirQJmsabVlV9DDYtIAxcsS2YUIs5OZMwKEOiYEhGQfiDGVpAkW8ei4r8h1CiCsIiQ6UIgJoAfHqHIbAIQZEYuLAEQ0R1QywFCRgDohIZD4/Vk0lVEBAikDIIcWUYkzMpipgArDoggjEwX2MiJFDAEwxBy2LSl5TK0RXeCRbN6j6zp+fyyrwSIiqsMxZOTSpiTFhCBQicgAlVdTCqARAHDAwUiQKxAEU5Gn/+PR0fHo4qEDkhAp3d01qWmBPwUCLSAFQSqFpWm5SkyIVRTWjqyCGwGqOCURGRBA4aMQ2NhwaIFwk5yxgHAJ77to0HRE/r31ObWpSbJgvYhjfCWLQsKrslGxlFjMNiULk2Dr7jtZ4Zg1injEYusZdLkEMXQUx9EkQ40yF9UDGZySG1N+ZAQC6CrUkvl4viIFjCk3kxCHGALwJ3aYNDclimhVmMKMAhoH9ZKjovxEyhqjGCmyEBgGBQM0FhYB9TiBSS0juE0QYSFkTQimwAAJgShYDkJKbuQJZiiGyCQKiUkQOGABMKAsrBUohYIgREShAbrp4kbbxe4mxibFhCKIGYMHDd4Nqa7YW9wiRgDCQxy9rpm5rYdyrTI6XAREFCkh08a0Cr8SIoCIBVcC5VtrRwEs8RuYKcYBUaylYy7oO17mltSEWAwn0/FwIqUlN23QpdIwtWEJY++zRLpODarqIBqxMrFyoXKpbiIYEq0aFY1FsYGJioAwQQ+ialonFXF6KDIQZyNVZgTwKZyIVXSYzs2VSMA2BiQOyqWFbLCsX9boNLFlSss2mv5z7ABAqG8BJCU5WrdxNAUR6FgcvOS9qBUmIBFnQikkxAG4Sc2BmDmaUbfbKFgAwYPAEu7L46kM0cx8lFXQuqIKpWCm2ytuaKZqr6PlhgEwMnlkCAGIpMRh2bfucjYGhCYEYGBGmEJoQEnMkYDNPzNGPQs/QkRBdsGtVLFurab5xk9cp1QB9Y67sMBfLIqaAdQGTu1YjqbKaLWU+jyOAl1o5ICAjqJgAMwUCNCPEi/T6UpYQGZng6nxh9LIh1OFXMLUqYgXMAVUtlwXUCKJ7IJsVZ5Wu4MXFtUnVpDJHwZDI904GVUNVVMGVoVyfP1aeS61rmimYICkjGJExmbNLyNl4bhOOCmhGhZHDFa26HjkA5rUr1QIi4B5pqmhqiBCDY5JgZiLFXFAuxBiCAeRcihQCNa9prizQem+iRI6jQB1EdS4QuwCMqIhTfwIjsQYCBNMCIL6i6quZmFxZPaiqOId6LfsiOO5rJiIEgLru7SECErp8vIoRE0fkgJ7biGERnY5SZrOCgTFES51yh9QZtcrJKBIgZFGdQRcqI0wnGE42HqXMwMop4La1TYJNwDKTFACFgkiBOCAgQvFigri+uFf5iEBZUDWwC2Oup6JqzjmXQkGI5HKD/n1eaC9S5pyJiZtoq8gKKBAwPh95ZmZVRhDd8secWqNiCkgcK50X1BCRkSNTIUC41CuWIkQUmUMMMbAUzhnLkktWkyxkgUMiQoAiUoq/8XKlx6qmxTQLkCnkeYkhMHFKbUyJQihiRaRk0gVUgAkSAiEoCpL12/jabnP5+u03H9+9f/r4/iEvy3i+tx9+dn+/I2YDElmWpeR5UZDYcIohkNucmlQPUARwUoA5lCcqpZS8iEhBDBQoRDJANkaiJqWu7ZGolDmldBWQAaO7OpF7fF6utZvCgS7IqtM5n56Ww8OkBdpt2N01t6867j23uhA9Ly9ciZ4r09vW17wwLZ/zKHP8AC5shhrZ2kqVB6jh0VpkQrtYPl29aTMEIwIKGDQQB8aAFDEwF9K5LCVLPf5jQFQRzWbmJhCFVRFQ1MCKS44AEYYAyGrIymAJyMBASAsBVAovgRKAQTSmAqoogMKKEZGMUGMwNViUioCYzRnPA5jAJjExEgKCKosDqVfDj4joGdlaQqH1VEIiuPD1DFAAGA1cjB6qsbyLPxraCiaDXZR4DdR09b5B1eJLAyrbpAr1VJlPcatJddMIqjUEJ+BWmMwPsIsTSUVv1gcdQmhSakIiYlMx1EBY5bJgrRL5qUcIACSGCkhoWlNN8AirEuQYkAxRq6Y4EEAkihwDcZZSSAnYAJiNyBCFUIA8KQIVLZKJFqSFWFPi1MbYEAUwa4pqKTYvMo4SpsIkbdtfkEsACFfTDezqbPC/+QQ301LK+XxUKG3HiYnYkEyt+DnE7CBgBRbVCoiasqmCclWkrl56/r+aCmipyt1qaMrouycCoTqoZTXWoGoLvGpTIkopwaxN6XIziMBsKRhG4siJOTFGtIAQ3EwIwM2zkIwxECMiA5Dqyl2/Om+vxuFqE0H0lENdXZwA1RDMEQSPW0MIIRRmUlVQQXKHUl9WXsslMyOAgAjEiNbEmFKIka83EcfjkQzWTigAFwd2Np8CieMZQBHXgpfHCkiAWvcrMFkRNjMwICZCJudqogcxBWh1Mga1olrYOwPAzXOKWQEvRBEGInDml7qmZAAjX6mqoAReevykWk7k70UVVEzEGduwwqIu1ITolEQzEQ8iQ4wpBnb6LWp10HreVFa23FpwtJUvsLY5WJUiFzWntzOCe1wBGoiZmsMVV40AlxVvKvYrbKzrvl5xdhHf3TEQMIOIYfV/rmPvK4gQUdBmwAJNghAwNhg65N6oMUwGbL5TLwXKZGXScsb5ZHkCzBgAmDAGaBpsEyYCFGTy6PkSsYIHEYhQ/a3Xfiio0FltDro6YDxkVwcgL4wwXPfs5xXgT8pqDT8QhhBCCDFGkRU8A1w35fr6rr6OVGNCERH2/AuQUc2KiCmpWi7KIbRgRAEZiVQ1Cwmslk5Q20GqgVe1uPvkwSg+VyRRFYpYKUqiBGURUSXAgBxRERE5AAcRK4gamG+oMXvpjMj942kep4eHp26T1Kzvew6US1nmvMwZWbu+STEyEfkZvcYvYAjr3asqFpBcieMVMiQ0IGaOIRLRZrMjpnGkEMK12kXF6W1N1aFGl65qTEBmaALLpIeH6fHDuP8wqVh/jmrWbmJsnNNweY5Xdt81tLk6qK6dg+rX1B1QrbJ613DHZdgvIQxcRTLXc+W7l89FQwQ/vswQNBJBAGVRFgNCcLxfFbMosIEZeRuJkdPZABRQ3S0ULp4CiKasKEKoVK3yEAEwACgEJDAo4ibmyFprPoGgGBYgK1BUc0ZRZgiBUiQEyaLqKvnf2f99g73+FHijlfPg3aUdLzduAKYmbgziZgnOzAWsGsEXq5n19ZGIQLVyUq5mhENQsDKHYW0Q9fKoPfOu6+H1fJ7VXfL5XRNRE5uu6drYILFqUdS6n3jCoWugCxdaojEaEHjv4OV9VSCGGJGtmrU6BoGoalmEQHXVeUcfJkFUAzMBRVIWFc15FsmOobv5cUohJEQKYlaK0igiiwojapOaT4KYeofrHlEHcc1xfFNT1Xke98c9oaRmF1MTEyPaUsxMisxEFgJwdG1pRIK8aCkKSxEgU1IBFaxsdlEzgVU93susjMYB/WT3MSIk8tYNvBhEI1RHT5BlYZEmPMPjiJAS9h2HJgVO7OV5FUL0mr2Jj7ASUAgUOHo8j7WaiGAYCJUUUPwzV3gG1u4zwpVY54CbkRmtp6UBhsB91zKT1t1diykaBKLIlDgwU5FqWOOxT4rcOTZ9yW3RjNQdhhTNjA3UsAAWQAEkwwyYyX/wWsNExHV/8VRB0cSsABgYGaphQAQmZq6Vc1AABDZEx8IR1ERsYWsQFEHBRG0xy442ITh0yYAAUhSQKRGQyqzmAwBGmPDyWPzNkCGooRiIeisLgicyWq3sDKCYgjhYhsyBMCFFIHKGBGLNilb8uyKsns8QAfE6ck5gVM3F+5HRSWWIfjAYoRGAIavBxUgBAbj2FHgcA1YA1WvLF8wBREAEAoE5uFbUcSAkZMbARgghIiHIYiuKByFCNBAATNBuMHXIkSB5F4aqQc5gxTRDzrSMlk8iI8CCDNh1jIhSIHgNz2hebJmtFGc6G5ioIDnQ4oEvmikgIjEgAyIqEjIir3GfPxVErPzc4juj26ZT3QY4cFRza0RS9a0VEBUIgmO+MS7LMk1zKUUVCYDJHFMHAFXXYleq/V+GUNw30LD6mYhqUVCBYJhFg2qI9VWQKRETBMKAPjdNfEVelTnWFYPKhCmmgDFx400dwzTNWpDRAAK3XXPDTSuzEVnXG7HlYmIFsSSil6/6vm9ub+8+vn/6xTffjtPwzTffns7n129ebfpOTUrJ85xjg5FDCpERvMkRwe1WneuHtrZoubRvJfVhbRYnRkZOKYWQ+r5HRJHFsdHnI9/A1lLONZriDcMOjecsw37+8Pb08GGchwJmuRSKuLtvUkuh9W3cIxbP39YQZUV39Hk11QCl4rFrPG9rtfSyki/vq06hZ4CyCmA4FnPVI2yO7GZQQ/ektZJLYDKGAlqCWFLKzBYjBEARdloFrsmDb2zI4IUSJrYQzMi0eNygCIIshAYETKiEhJAigpAhZDVBc3SZVjybmdQ0K2ZxsQPqm6bhdhNbVvv/0vVnW24kS5YgumVQNQPgcJIxnHMyszpvVa+ut/sB/f/fcF+quqbMyjxDTCTdHYCZqorIfRCFuzMyG4srFiOcAQJmpqoiW/bQW2+bXW++vbjb25ecuWYpNQlCZDI5MYubeTeQU5Co0mRsWGqYMvaLsyIg4Rw5Z+LY3a2CXjWVRA6Kd7bQRJQqHyIQC5MQa4rz6Z45xNO5AwgMGxHg+6QnzKkDEa9OAMy81uPp8FDrgRjmW6CDnOCzOHB4INWCsxRINkOCZqDXEWKAwplEmTQy6Hjk8w+L2JoDkgFlQQZ2Nwd76iZteGraI6K3bs09mFmc1IPNmCxHn37f8EUERFpKfX/A6L8tnelto8vnNtxtWO99ZzbgOI+CGO7DrHkgogNDVJiIZEixyBMgkIUEAuEMKrlqmIhVxMkppnfxfWxENC0EmPmuP71PcnAP6w3vEVaayhv1IlUnS6WlklCCm04eSZfNexPO0wPZETNQiV5z6LKOyYfpXRFDr43W/QLNfkSYcy8jkFm0nvRwFGE9LOEzeXzYoEBVrSqqKXzNPnkq5mrRw1rfo0pzB0CS1O90ELx6XBMQyOSlzBkmqPDMpnQQSOZcNiGcYAqhEMEdM6PXjQh3FqAHAXAMT1n13PbcvUc0yikYM4HAwUFaMo9pgceg7uYOgsGFlN9fL0SEBcyD6D7bJybm8IRXMMdWyBEyBYiChgeGOTwwPGcYefIiIuAxXjfRyfdJ2BOZueV5R0YgciZ6J9wQQFmvWZAROXHMYCklzUvtFhQgI/Zv4pLD0Xv0FsoQnktzOvcEEWFZoKmTGug9k/ZIlISJBKVCKq2PXA5MTJ6aCoMNthHeYA3e4RvQCCMwQoQWIRHuaUHE7Iju0TwsJ3p0h6cCrEQMkkwtmdROmiyDvES/X+vz9kREBCF8NiEMSA4zVBhMBMkDikAzg3T6FSD5ZFNRGBGTc52wWZ7NPC09PIZZACxlVkMRe+/DQBDyGGbDRcABpKV7EVEpQkqAW39tTTMG5f03YQYzJDcNFTOfxGTuHETCoGAlJeaASpTFWImNLDB3yZBlKcfDYa3Fw37+9deXl+f+25cIPDwcS1URJ6IipUhVKUx+v/D3xCjkt76TaqfpRiD7cTMzI5Dn5pajh3SKeZ1K5itVYY6ge4P9+hQSIPDh27W/PG1PX7fL8+YWIPItbte23VprylXeaph4LVQSD8m9Z8KPb909XnHNV2htgh2/e2wC90IHsxXKj3gfNv7+CQtklrcTSJxLi4APNYvEJyCAgiQEjiAFDebMvIEQuChR8MRE8wCdac93fNpZwwnBmXwJdmKnADmBiUpk5xSSnjSci50c5MQewSSnWhdZCmmYWUO7xXaJ/fZKXn7dyCxgkyP0CpMlKjsyH1jnjsBAwO+uXDQf2xSCvYXJvAeXJ0nUfEZDZIsWcVeVZQUkefccoABPpxBJoIaJAs7gWWvmp5/y3vc3hZSLchUuxBEkEQZ6hZ3p/ljEjGS7TyHulwHTQ8wRYQSMIQyEefiIMIQz3AGLPgFhpiAP8hg+p47De+tuMQGrdDpzhqiZ9M4BsgAnfcnJcrKPjERivDthJifmjhnnV7if7nnLKT1FXCRnZmHeow3zrfWbWSM2Yto20SKl6ryO6kJOgJAguLcACSAESW2AqDIFGdJV4h7WOC9f5FdP4DBmKYqZSOhuHmryGsEyPzWUqTAKhuCegH2H0ghca4FR83D31sxiaOUAhnuEqSRZJcuFWbu8L2Ii6O4pN8MtRZfz42kpYsNut73bzfugECZRVS6FSRER1gkhUrLLjWwIcg8hEpGKcqh1LW9IDLJgmc3bHMDB4aB7QK0QRpC7jz6MKfLdPcI5GBQQ7eIsCUYoq6iqlKIi9xmqOyeN1xn3CMOIsPxFmIh2+BjWwQCxsrIwRbBULR+IyEbvYwfEACKBmvs9z2s+8T7MxsAYThTTTo6Fsm3J5ZAtJoDgSGJO2Ng3GSTKokHkojOBnJncbN/HdEYAAhNGFiYmuNlo3mzsww3MdE+HESAFf+buiBFwUtJXUxkRTh8zAkc4MEYYvetFw6Ptvt1MJaAIZyZIEeZgNlE/HKMIrEcbHp16dyIKYvHQBaWyHEAn8QIM6wOtYQxypxiwPdCDPKpBChviZqltAgNVp3QxI4CNEfmlk35ERpTLm8BhFt0QEeBQB7ELGYuT+Ksx2xyL4f6001x4EcEkIISDAZac2SuBnExYal2KKhHcLcMR81/3PSPAgznU89MUJkYCqcxEHjHcU+8OKUpiw7beo1QhgrmZDXc199a7DVOuNIO1yBCAM2cOpb9F/d77LrovHLc8HFN1KFyZC4TJvRuJikoJVtMahZlIAfcRrbVwrrV8+rRK+bvDafnnf/7fX74+/eUvf1uW8unT4+OH0/l8eDgdallSXJj4kM9LBUuFFYhYBRQAuTv3HmbuY/QgxhjJGGLi1pu7Xbfrtt/eRgcx3bPCI+6pMK9flAlB3rtdnvfn533burmzEAAzb2mD0PpiTPJqQ/pWxAAM2PsDwGmOeoF7f/3Krnj75/2JmU8jZXUzP9yMQE+dxHt4DFOYz9jDPEYB1iDtUowwARuSgFpIOMhAIBbiIHGSIIQUWQ4LE9k2bHcLN/cxPHjO0oEgBpiM0CmII2flRgAHiJSpqLjTNjAwDOERhaBELCjg4VAqx1KVxEfs3XqL1qg17oNfvXDCw8cYfYh3UBaukuZJbu7dwpwTCZ0YJwBm9hCeB7XFJKsVQfCkBN7vTA6wPWb5TgHhNB+KHJOm9wdxzjCG23zUOKGd+yQJAQZHuA13eAQs+mzj358vFt5hAAkl3SULznlwBjCNYqaJV7q7+aQE5CHsZj6GuxN5g3NYCLwIRLyIEfsUiFJJ81tPF4WABfWBtoeNmdY+qb6AO0ewO5qFtCBNnY9bt9HdzVkw5UD/bhFz3xH4fvyE2XCQuwFRqjJbhPVugWax29g9OvnUf3cXj0Sj07vXiUlKLS51ze6tEEpyMrRMeyaOicEkdTaP7kmVggCMO3/C3SbzMGvxadf29mKCAhImk7mS3BomjyJcVcFkfZhFtzByZyfC8JEEnDzE6b5lxNsWwHO1UmbRpm7fkeA2M9hY+XCoqmLZ+3NuoLnn5O4wvx8oRCgCIlPJEipLKVXkfRGTdZK7k1O6/DIq4xBePHgOjBOknUiTsjhiZCtCQC8lMGhEIFS0FFVVydHza/UfZKC3TRIBeGAkk4YpCD4htwhihSozc5CW5bA+AnHbvpptwkTCzoxw9mD+ph+bV/IVCg+4x5x90R0ukBwTABaQeANagxEkArCQUApksx6/cwfus+R7jFqQTwrWG3clm8vXhRRkQA+GlKIqyiQEuAU7V66ibmaw6GSvWvF5daat0X3fZ7pLv1yUSiVlhIEILCERJCQVKEyFdGVZCExuEQPWYDuNRuGRuAtbCEEZQhiBLojs53luI5ZUBY8eAQ5mqDIxefD9OoJI5gzNwwcskAY6FPEtJSZX+9QXUFpDADAMBIAxMG0QAViwsEotqrVWZXa4jdHHiAjRkp80AkmqNCNhFlFi9aDhNlpz6sytFE7J9LouveN6662NMYIwiCnl4rnY/dVmjYiYhJhII0KyF+Nv4D6+0xApKCZbi9MiSStDnYAY3UPSb8ujO0YRFwETTOY8WMRVy3paWeh6veyt/frr59vtSuQq9PhwrFprWaoKiYFktp4JJ2YrTpJSxKCIMSKGpedH5PBypLBlmIFgPi7X5+20+fuWP+5ioG+GObO7G933m70899ul2zzJgNn2JESV4DKQnmn85hODb1gO8z/4HY95JcXgjrLc/8Q8ZrPOoTtLJp+X150kV8bveDHCJELGb1BoDkENNBEGv4PO4ckSBBMEXIOZirJWoWAjD9ic31oC5/PjEUDMIXAKo8lLIpkgpQACMtAQRGBkuysoQhTsRN2YSQ8iHLR7h3XKCmXqhd7vYx5h4Q7O0h/ZDdoY1nsMg2SXOXtNEIJZODwC5j4lJAh3g81iIXuHLF1BjjA3RCRgkPSetP6ZTYfnLSAmBkWu3Hurfr9zcaf3RtzHk98Ul+7RtrFde6mFhez97C7v8f2mhk80IABzT8X3lN4guf9uFjDyDusmQatoLYgarM4y7g6MbMBIA1xnCx4WvcFHsopERZNb5GAHhgdZ1mnOFGn1mpuhagZrv90XDU8jvNwsQHSX3CAiopslxYsIh3UB9TGa3XqggXtEJzZEJNUs3PvowwPhgBGCIcpelwPzAjuInAVLXnOWrFAi/VoA3OGB8PAwCyQplp0iLMYs/4aNYWbW+uh99PG6+Gc9FEFuqQfpju4hJCBeKqkoCTftlKxiDx+DCBEjyQR8x8Hnx3CPYBGJILMkHCkLSKhbb2ZjtMvluu9sMYqWD58+UPDlcmvbsBwWAkykoggbNtzB5EQsyuntmkx3j/RMfZOMxjcvjzDmWvVR+YOPJcItctDhIkAUZhUZwkbhIC7ExuyxEEVn9nBVVRXVafZDOTvLucr9tt+d0CcSg6B8eiIsvNuUmS0iQuCqy1Ie3Mc1fnPvCQOkNoXEif111pNHkAhKUREZw9/pvyZIycSsTGlMjmxw414yEsurDZtZkIcRoKVUpnRxmD0NUSRb1ylISFAi39AwzIOJySWLfScDdSxClWsRhWOY7VsnwrquVKi1DW5Dm4jwq+cNkSppIU4m/tQSp8ldTvRgESMQgnqMAnABNLiSVJ3g45b+gowBbsw9rBm5FUIRUiUChnkgRAjEpASF+XAHBtwxWtiIoGAlUSYiGHvAhkVkNUjEAQ8bCHPyADkNvNK1c7FnH0dCLJIliHu4IR163J3gQUEMdiuoh+WwrnUtEvDeerMx3GO67oGIRIRJCBI50kzamfltG1++funjVlY/nepHkcN6eCgLkW6btbbdtrFhjGGMWNfCHCwUkWsx+dUiMnnJbstYeqlvfDgGCbGSCCu55N6dzoOHwyoLDewxPJIvHAEL67vRQHYvQiyolYTIhqOEcAEet/ZHD7/dtqenp6evz6r86eOZArUc1qUQDY/Wfb+TMBPEImGtWopW697i6olBBoE5gLa3vTczY5Ftv1nY5fq0P97eFzH3CArwzDC4k2eJzKJtcXmx67NtNydCXpWYQ/MEHuc5lCDinBxP+7s513hfrbw+DXjXhb+Sed+OcL+DdxSY9O3ps4VICsTIoIY3FQwTCy0qSy2h6szMGOKRDiSBZBQYwmMsRhzsASicQxS1iLLCMJqN3Vobw8bI7JZXz253AZSYOUjCEX04g4oSgySIDGHhEeCggBAK87FKWpsb6CoAtDLDrFuT6EuJWAkuh4XfJApJzSa6p0aAiYTJzW30MRos0gAe4eQMIOXPwpwYPgNclEBmlnNVIlIW0Mxhnk3zHd56cz0g2PC+j5hOMCTMyuxzMAiPOZbHm0udiwhEEaDhbYR3e1VH2/DL8+3py/VwYCka2QRNWsdE1YimgV8+SmbY+3i53boPkmAlTlc8DnGkQW5vfXTCqG7FiDWYY3IehnN3ak4THWJ3pzBCSrtJqZTk3/LMZvEcwxB7fq7sikCoa61L+YbYm4vnXsQkdDwfwvCwLBrGAJw1SMy8g5pjBwazMRLDyCM7bPgcNVIogwlCnaUoORMVFeVC0HnDIh1kw90p4tWq2QwGn4K9tAzM1xhmY4zhZja6WQbJvL3I57rNUq47unkwL6+C3zStF6EID5iloiHSckeZI5D8PAqPeTWYiM08oWxmLqyB2HtvNsJvogxyOkqtVbi0Zvvu2767x1K1agqp2adVAIQxVXEi2QAxQZmV5f13edXF3WkXwnEYW/n6dDVvemxSXMQne4GZWVORRjSnKlZKEn08PIcl2b7eEQq8bWSTc5YNoAMGNxAzOcPv5zICQgxhSacpQoQbwhINTPpuBNgSAHx3X97hJW9bMu6DJb5PGygCRvC8MNPIm5NEh/CwNwYupzqm1gKgtz6y7s1OM3heVQrh2cwynII5AkFugEOCkB6ZFja89972rqpFq4iEe++d6O3T5q50L2LebhTgEQQXH2g7AIyGCLCCGFwQiuCkSIF6jO5ucdf7gywyrUOmURaBKGzWKCBAwjPcwIOMxkDfww0cMV30CYBkfAQCnHgIR8js39wjHNYRA797JeJl4QC70xgY3W0Y4CKsSjpLuCilPD4+PhwPwt76vo8WAFQoknEdE9GBEJKHRkFk7re9f325/vTrUx+345nBOJ76ElUwlGNd6lLidttbGz52YXp8PMhCokwT+TJ3cyIhZhEzE5FSSlHldyAZs6ioSmFeaCnmAXZVZWWWhP5APAhCFEHerXsbs2MrKWBg5oA4YKp+OMp33z1u2/788gyy/bbdrrcvn78c1no4HGvRWjSHXBSg6LNLJiZmCmFowNLNPXmgkVdIWFyIA0Tuw3y4jyRAv78vfP81bxNNDMV6XJ7t5anfrmN0Z7BIuM0mUIRVeSK6/1ZY921Rcv/XO9MEkvKZ+P3/NOuaN5gm7ptsVjWJBvErcPTNG6jyoqwiwdyJBtBpdCJSkSAdIMf0tQ3P+Ab3rH5YWQm899GbDRs23TFyTvTK7HmtmCjptNm/p9dCUk/Nw8mJIWABqvBSZBWSBiM2FaAsIoA5OyRESMBifHyzicln7O3XXfSSNyd3HHDuU24Gp0hTkJQazAqIKYNQukcQ3Vc72OddYIAk2wsCAzmInXMNM3cXVSJJSxe7i4g8Qu7XAHcNb26ryLrA4316gUfsrd+2xtSLI8ghs45NTTbNd+DcPcysm99au9xuI7qsVFeps7+kCOJ0Hdtt7D6677AyihZSNWYLcEc0lxGTAE7JVsrZRvpp4T62uhfBeVqkKtUDkYE1RCmV+OYZS5Oe2fvnds+Sa849xmb77Xa7XTyGVqpHWh+YNYIsMPIWgfzOBbZslZVJlKuwknA4RSduIm2prWglKu7Supm5RUxqJZEwhcHm9BHDIn3lzMN92BieMQY+3Eb4QBjoW+PrZByxOKRHdIvhmDkmhGE9a1UVAUX651CEqhTRIqrM5pE4nZGzEzjVjwAiGRgiylqYyftt79Y4VFmVikZrruLmtHf78vTSxzgdD6e10lqU7uxcS3g3ea3AvPqkfB/zvH2V+3w5SEQYxdry9Hn80//82/Dbj/+hfPxODwcSgbvT9NfS1wTObNFKKIjcPSgPvKA8ycPGSOhX7k8+Znpa2t5mrBKCaFJds0JnIhUppRJo35963+FDZZI5I9wDOlLPFd/emHufQWAhmf4CkmsufWYAI3hw3MXAabcRYe4jZWkeuZ9Pc0IQkaflsPkYhgBPB+9c+s7wrAxSD+XASMKPU/oE3Fon792GDzMP0UJSWIWkBMlIEOeVeUfQQqVMa7AIDg9OlxIn68DVQOCs9hRMEU7RwzPhgTkctk0/CAicAzBhSyDVHRYMCgcHOQnmVNHCLHw4Bqyh3yicSh7iHmkwEdkiE8CZZOTEwYJwWGPr3DuPTvegynzAfJhZ6ywgLubUdvRm+9ZU6OFhKUtZ1yICG3Y+nf7uD394OJ98bE+Xp8t+42EiNQKjt5wPguFpHCjkwHDbmn1+uvz829NPv714NCvrOrwPb21v42ZDqi7H42nbpO/Xy+XCjNEf11VLyS7dw8cIpkCRct/kQkS+MSG8u/AVrUWOtIpHdNuQhOMxuu8IKBVWKqIRZgO9D3ezgbFI4ZT65cgEZreALgf57ofHiL8/nerf/vLT9XL985//um0bAu5//O7juS6F38YHeVkpHDbCkHkMyqxMGhHhYOXj6fjAUw3q7lu7mdf39gqv09Hc5ZPsQwQP2KD9Gs9f2vPXvbcR03aHM2Ug59eziJlDBCJMcdLku7znx8zfvpq93CW8rysW8wd0f/hpcsoIETAKIzeEBRFo4Wmb8m7Kx0RVeBVOz74duCGuPkxQNBYIu/Nkc9DEArKQIBVSCh3Dn7et9c4BURIS4QgxoxgOBElmdmW4BgsTmUy6zuQUEbp4SAhxAcBclapqobQM40Urc121MLPUUoEOasDa9UHfZGM59sh81gk8Uy5vrypZCRARwmxk8kkW9BTm5ChasjEdbmYWiKIk6RvDAUvnsNxF5x2gOU663w93ipCs5okYFGCn+8CHJk4EmhOsd4agwB2zud/UGOk/7YOdgzI8AxTubmHhhnCiIBvRurXW9rHto7WxQ2OBqhCvojkspBzECN9sj7bdtui7XGpVXtRVACVnDOZgTs9CCmMOVqXpjOneR84UIaROXJIJiQhLAUWYezdm6hqj93ceUa+cmOxgI3kWCbIizK2Ndmvb5WbRykCIyKEowqgHOhAewXE3SwgPG8wAiUxPhvQJN6ABlwiOGbVYp+o24r6KwlMzZ+ZjmnpajlJ8pqp4vBlgE4VQfFuQIedYDh7g5j48iFhEcooybOQxSESS+qUwwt0nOpmNc77CECGmuCuVctM3swArww1jRO85nQCTju7Xy8Y8rrf9tu0v123YEJUiUpTBcJ+NQzYJbyt94sD0O2uVlCfAgWABu/H20r/81n/79avTfv7hwYwiOGD3qTIFSbgj5+GTkS4iIJDB7vsz5exOWFmzj/dhAbyCWjlLT0Jr0pEc4bkWiJhIhWqEt37p/ZYA1T0FAD7NGn53X+ABS+fmmFHbOUu6Y6fgCCYqqlrkcFhUyYb3YfueaRQRzFLKdNVmSt8gy0iPPvowG04BBAuHEjOBRZSlKqsIiZjh1rv7yIgxEgnw8PDhrXd3J2JDjHA4D49u3gzjPdD/bltP7DXifuEIQAyLnLhrYRaLQLK2PDWWHOREDUiHYHFXJ45UduQoHArKYiSRCLjj3p4EfMA7sREirZoC5tme8CR83+cPBACiiDt3K4ukb1/kHhjWDe7WOrZbMvXieKhay3JYj8cijNZ6WhkVFRetvdSlDgdJTWNMChbICG9teJhwSZr8PsbLdXu+bNfbICFHDdTWY6NmexMqdT2eqbbGrY2X56frxW/XfVkhJKwRNh0Z3DMAbULl9wL/7ZVz21qWoks4jWEg8vDhHtaH7wARupIRE1zS4xHBBhKR6VKVm7iPiDFcI7RUfng8jPHBxvgicn25vjxf//Lnn2xEdP/w4VyKMFeG8YQmCM4eGMk8DiZSZk3PXwTVWkthEkRE793RyzI3qLfv8h6Jye4BEY6++3Yd10vfbj39RbKXJTgxaZFatWY3NpGKBAgm//39+wOIVNXRO5+S3z3l7/88vc2g3OAjrLl1WA8EuKAoxTdQ8ut7vpp8wyU6+US/PBVhkKAl5uA6YSEHkUuEBCQYJEzBSI8EAStYwwK0iztNB3kADg5i5lBM/zm/OwlxCFEFFATmIlSgDBicmRcpzCXxj8JKQgKwBAt/I7XAnW6SndikkQUAVQWl1VbynefnweCgmN+UZilK4ak1Er4rWnCf6YOJyO8XmqbrUcLSJCKR+lCmSaN5A6HeuIWglJMlXWlOABn45qsQSEHqIR7sUxBMBhgwIsIcYRxG1tGb9x5jEEU9aC2rHI5LPYhKpgkMxwDxGJ4DxRbuBvERzuwEZQ6CEgFKWCUWpUUqs3QSJ05aUAacBHEIsaQePU39JvkDPr3y9z3a3t7vyuoxF8NE5nLSGQiDjTF6QiBuPkBGjXUMs3DsgcEJAE5dq09/juwLnMkp7ZQj0Mc2rA9r2ve6dOZTUGGZpoeEiD56730f0WCdU32Rjw0o9SIkYFDAKcBhVFPK+1YMICLNSGgExnAH1lKWpagwKIa5mUeAAVElpmSOTmfDxNXdkl6WujX8rrCYdUxvbfQ+fLguKqwiYsOfvj5ZYG92vW699yQZWHjvPSVmMh/a19EWAZkRAblPT959mTnYYSVB6S2+/Pr5y2/NzTTjfIksDDbIjSRstvI50YlIR5Y8cJN1RKDQcG49mJbD4aMIDf/a+hV778l6DWaklaolDJLVRyK8c7ATQlHcxuj7sI2VhCXPTIIwy51n/h5VInMMc6IsQ53vc9/X4pkYonI8rI/n0/ffP5Yit+v1crk9P992tDFC6/Lw8bweD6UWc7tcrrfr7Xrb2t56QvIJIZszsAgvqkq8FDlULaUyy9Ytnl+aGywcCOZgCQoXWGcHGOjht7az9K1tt77fBpq9OcOGxxhu3QsxCSnBAEvybIlZ9SVFPGgatCU9ylPqSUKKym7oYwwD0r7LAw43JkEMCJKVSj3Zjsk4EcLgMYBBi5CAmSgyIJ5j7sPTHJvdk4yZbjEoSqjU92kh87qT5eYWTs3G5dKu13a5GJGczkdd9HA6ns6Hwyo86VH2/PzZfC+VQbGua5A42MyZqyuiw733sXWLykdVIcTw2JrtPUCllHJcPy51Hd0ufcRoy8LnldfjApI+9l9/peu1ffnyLGrnB9GyjBiRJTDcbADT7K611nr7XXnJLOt6ENTn5+u233o0yKjqQc1iDxB8ERmmIHCYRBQGJwmXKcy799bbNoaNIAsdXvcWe9uk8p/+9MePjx+/fn56+vL017/8/PT5uW/97/7ujz/+8KkedASn7hLJKHB0y6QCULBwgXs4TzEK0b0xADGkMOsbHw64QzGcoPAESK377dJvl97aMEuSIxIPJJAI1SrLWuqiRZU4e8NclW/E3hnGgbcW/f730ttv8wi+T4/mD5Jl43Cj0bzdvN+sXYePIEY9CS1KPN1n35Z+WrOHOQdpSPHCfgx0J9rAQASJ88GEDRZmFC46wDYQ3bmELPq4nkYf+3U3NyrG1cshipF4GY2MxuQQBMjSxotCpiE4BTR4IamgmrAziMFqQsQQZ5GqiqAx3C08NKYdTUQa6ryeLx4Z6xeppQEoozf4bsHkMYVx2eyBxBHAiEnyy7cRAqkQEc+oRZ4KHsIdSo07Op4AMhFBFLVGcmJAKQRDIuQ5k7oPHz1mdUqOtIZ0tyHE+jr+AohQVipHqJqoCyOd6WaHxE6YyAwRCWHRuvix6nI8HY8Pp9PjMcQvt6en6+eX6y/buEHIRuyb9QiuKkIZ6VFVao4FmZx9Kfaw4sNp+XQ+B/S3q916GASkSsKqXGoIzBMOvra+20gVSA5t2c23MbatvxaoABR3I/+3JQSAiQJadD2sQiGKfUf3i1lPnbzTADsDxNN+ItViIlPGTSTh5NMpP0CGGJm0GUwiBlo5L2zeFDPH7mF3zKwKOJVUNOdcOYgNpKuRshepd65fvpxoOA2L7mEeLLxUXWuyPydhFRMKAoDBr1NdbxYCGumK5D3Ny2TuExOPMbM08Wl9mHkyGZMtGxGtJWkVRKil0H1Xi3AEibDeHTZyed9xvjmveY+PzU1nljIUQTawb31WoNN2L/FSm+HNPiEUmk6bsJi4EkDZlQyjfY/Pv7XR/XCoWhD0wrIXjZn2eZ+9OwZFWtmnPSZlNIhIFSnhMBsWPTBABd+UZd+UL/OJEmGlRLp9WnTfVYgpMkvrW9FalvPp9P3Hj+uit6W+lLIQ7+tCJMvhdP7uoyxLH2PbttwbESFE3IbRiICAFuaD6FElg9eL8sKszCTSR4RjWPTuBqbpkE/m6B4RUOEQ6jHI0bwPmBPim7nYfbuf5jAJWCb9A8Rgy83t3lbPx2ve7YzfFZVgIMIHWjNHcA5lUzw0IgivwSA58s5CNzmaTFSJcsY5pusOOP2yiV7RSArmae0TrBQGLrjjNPO5MwszEhUEWrPrZX9+2lX1+HCoS304PxwfDhzdvbMQ2Pd2BfviCiIRLlWH5ZBRwAIp4fW6bt6dFcHuYd3G3nsfFmDhWmRlLGZbBFJOT+xaxrr64UDLordbv932bdNPHz+sa2m0jTYdOe5N8CRW4Xe3ZbbHQkFuProNDILZcHDW8xTuMU1jmEgFKEWXWpYixIYOx4ggM2vDujXzvvfovTHL+ePDh8cP59PDT1pfnm9fv17+/C8/hXGV9QMdQ1hYkFrnYDjZDH6YAaX5qYP8lXnmsIATZ7zGN6Pkt+csB0+O0WPf7PrSb9c+xnsCzdycmKkUKUWLqogmwYC+fbd/+/4AJs6Sdte/+9G938+HalhY97ZH32y/jX4b/TYCqCvPRvPf/h2JFTGzCqsQRQEdQoqRBwuoClfwAcLkDTQoTAhM7gFDZOmvxZli+BhghSpqEQiFwi1GStgC7CQuxdk1nNwknCZxR5gVpOGvKTN8h6mZtKrCyL2bGZJSKEARLb9XwcTsKWhO57IuxL0xpYhIs90cLGFaZDCmtXam6DAkBxc850IcrycB+2vReS8gJ4jDUupde5+EUkxHrXz0yWOSHQESzvgr90gOBgPvkX5m6BLl4CquPIQzrW0EBsHIAwXp4IKQxZiiqB8O9eHx/PH08Hg4nbbR9puP/Wm/2m10EgqQZcjIwgwWpspcRRawBFflZZXjgodlPB7541mHax9Gzj0WlsNSTmU5LIeFCjn65fb0869/a22PjvePckT0MfbN3vcvencrxiwAcXfjENJlKY8Kt227vjz/9uvn0XBBWEZmMnGRsnApWgXkPggxA7aJCT68xxiUnHRN6590t3oa1kVXoRouARYiEFhNikeaBQRRkIDM4R5OqSfxzMgkIVGhpaz17SkLUJCO4LYPi0BEFT5UXYswZ5mAAKuIMJjg4aCwmDqx4SHExGLhbd/hKKUuS32tUZJmm4bnYwwiZ+ZStdRZqxCxBBPxWkut1d1FWNO9RmRRKTplNomIJ2skJ2g2uo/xnnv1Kh9OuqUbsywi4fHiSQaaHqAJat7RuLRqG9MCO0IABjzYzaN3//WX8d//62+//nILh6gvh/7dD+Uf/+Pjh48LCEQeQUHmMAbNvUcKDzUbCF7KsZalb3sfNyJjoTkiflvouXDf7WLMWms98HpalXT0FogiOVNGWIzuNqKPKAEmKaWupT6sdWE6KJ8KB9Hx4Xw4nevpfNn6v/zLX24vNyZ6PJ0+fTiP0V9errfb1ltnxEnLUfVIoe7DPKw3tzYGpFx2v+x2bX5tacTkIgaGu7fhzKGqVDgEAQ9x0pAKzpSdXPwzCjHrAw/nsAg4U4gGpz9cJEYckfNSi8RTnCiImahociirjTFuPoaVbMmIQDBzsKeGLNNPs4ez9D4RkkAJ16DhnMPQECIlMEafKl+KGa4AciJSJhTWyqL0eq/cve19dCv1WFVVOmJvbWS9tS7L48fH02l5+vJr3zfmENKgYbbv+0CaF2QCHBIhJy1FFCG4tREh3bH31vreextjTIw3s0ZERKqoSJHuu/XhEaX083nhNCk3HA6n8/lwY91ot2ZpbkLIA1FNVVXpdV8OpMrFugGeUgZzykk6El0lIYiSFlaBdlUQretyONRaGTCe4mxzR+ubj95Gb81tWC3ruvBhefjuw4fjemq7/fUvP//809e2x1rPYDl+QD0oS3IUs7MfY4wOy/bZJCwCYkQWCI8xo7ngRbXq+2DO+SvuvzH3besvl/5yabdteIAJr25ooGAJEZ4OUMScqiZgWkt+49dIv//n20Ti7fhETnryY6RLYYt9t/02bi+jbymxMDfTSrou9cClMjQwcNfF39e+llKWpVYW7oYI56AR8BHCdGRdmERBTOQsFF2CJIIogriDKLgahVclISUWhddwh7cyws36gAMsHFpca7BZGFtQ5ormXJaDppeEcIADPBgkrsJaONkqG1nvUAhxJVYcTTJX9d2lm7xecrJ0Sp/kpTQziyyYIma2SnAkAg9WkJK9jsxmOglAGU84s2wAAoTJJ411zommXEOVchTg/jrhSyJvsm1Sxk8iXISI3AxGIwUFPNvZ15stJcriRa0QEY3pdMfp2RFl+pJG7hwKLVYOsjw8HA7LKmUZzzbG2Pc+DOGT4FhKqq2YhUWoCFdwMZKOc13++P358YSClyKm2N1b8V68EITxsNYfj6fz6bQsRyk1vjz98vL5y8vuGIGAESLCkdxHb+2NpwhAJXXFTJS5oHdDEWVZaj0dDnB7efrK3J+uv46O12vBYOVSy2FdVmVxN6KYDrsBs2bd3T3dDWeMMYX7sIRZvcErUEDCJGCWQggKC1CIgzwiQmcfm9tQOhgSEZyK+ljKN8NkC+oW2xgA1qUsRReVIoSwGbkSxEJ+X6uePoHuBDijCJQ0KM2ig3jI4DGGqsZrTNfcMqEqLFKr3rPancBMpCweJJKojSujiBQVlelzGq9H/d2hJ+4RNvj9a/6dHmxOLS0GYURgCcqiPwzRgyQ6iF3gaYvt01zHgQA7mNxo2+jpafz808tf/vyl7Xup9P0Pp9PDQfhQam2jhVvkxA6DWISkaK3lMEZ3C4LUuhatmz+NcQWNzDidZtfx+9b4fr5M9QEIJEEORogiSW2YyAWBSBddD8vhsBzWejwstGpfeK0A8el8Xo4PUg9MfFoWO4yiklnNo/fKfFPZ9p3cD6oHkQVBZugYhmEZN4pb963bPnw3dyB4pJAnYE5OknImMjLAoUAhLvhGNJbHYDJKHdMVgzJCAiCkFXL2S+wgmkhZCt3CYwwLCwITipCQE+x1o3IQQeYgOxcag4XSnCbzREkAGul+6wMwyo59CqRhmCxtvu+wCPecLXK8O6Xm3ZkuG7KUZVl6kRuAPsZwz8Bes2jdSsEifDiutRQ3Nw/BVHplU5rhG5XlLIdlRBu+Nw+yZZH1UJZr75nX6BYBIkVK0TzGaKosSuuBPnxYiam1i7slV021FPUYr+bnuOvs5Ft7a7iR9ehtBEkGrFJyuwaC4KIqddHDoR4OZSHwGBqEUqZMAeRSuVJlApO4wSza2IHO7CKm6stCKrW308dPH56+3r5+vn3+fPvrT0+ylB+0UikLKasoKYGV2YSKei/eurduW+95lDIkQ76T5Xn34nj3Xd579gLmtm3jemnXa9t2fw8N5syBKXI1KDPj1THo31+P36xMfIPB/+6nycEyi75726xttt1sv3Xrnsu4FK5HWR9KPaoUjil8/uYNp5l2MDvTEPEEI2Y1U4KFiCULqPwoToCmx6JFDA8NkM0oqQAHSTAjVJ3NuKfTN09HmJixACm0z7LMJu06iIN1DlUJIcQCoaAw9zasdSewinBhUVpKrd9IedNdku/F3ysOgslgi4z+TGIPgUPSNSibjFQaASmyRNwjPt/RCKY/eEqz6dWdh5B+VMLTCyjmX5f76iRx548c2RAxp+Jj/ux3QjWCSqg486AZHuA5HA4youAcdDGroCoULn0odshlBI029nZtdnUMFhbSdMoCKNNfWFgERXjlcoy61OWH8+kf//jpw9lh6v7isDpChI43fr65M5ZlKWlkHsRwISrCVVOY4O4wj+GziJl5BfeXFi3ErKqlaK11qctSl1JrrXo8rKfTsW8bYTy/FBVmSwZqhokyUxGtSz3WUgBnRtq7mdto2mDpzRAZLTSZ2GAbAXN08kJcGSVIhQtpFWE4xU48guBwpzm4f2W5pRQtrHPzUd/BsBHRhm/dWqdSZK3lsFZVpGzexujduocjKxsB4JFO586AqrKoqAAQdYKDYG7bvmlXFgao5BTTnY3TN64WEeWIwDR9JiX2uAvsQEV4rVpE6P6YZSlGPp/kfKbiPjN4e8zuc3FiJanD/Pn58vx8cQytogqmGN0thtNGIB9CYkUGs1HIHRFxR48AQ83lepXrNfroEZv5diqPf/jx7/7+7/70/XfnwzHs5Zc2bilAJXYmKClA6zimk6pQqboWFfO924XY+e61FPdb8MZRe90M3dt+2zZXuXpVmnkPmfoRYCqVSilY+Hw6fPh4OD8e1mM5HMuiS+9CMvqwwOhtc6cC+rsfP/346ZGJ+hjX62V348NyYNqr+hg8SyoOcYiQDQxzi9ZjM9vNRoQzLLzFoBgZzqeVuTAqnK05iBxKHEx9umbdbwpyaEC5X8GCI0323FL65hYwI/ZZ3sVgOJiDKWBjb3G7kkBPD0zAUpjSS/buoy8lqW7JzCIQKZcB9zHCUTSUyDpZj23YADkxO6EHZNY+adWRxYVQRERvvm3Yb9Kb+F2dRESqKszuI1AOh8PZ6HYbW9uv1+vT80vrfgi24OEgD636/R++Px4O2+W27W0f3s1oGJk5EOZ97MJ8Oq5HyD5sa76sICmtkxvt+1dEoxgMV1FQbHsfZqWUoqWuVUTMCpievl49xrbdloXxymSc1qZEnA5fjHenZTh6x7570z2EwiwnqRGUoXKotejxYf344fDxWI/hY5gYOqR1j926CC2lrmWJUqsuSoVZslYUQi1O3IZdcxdZDvXh/HA87dvNfv75OSSwHKFHnI45Qy4sqswLB9Ad1zZw9Zu14T48JGopGgPhNlrr1nt74/fEtFRLlb1zoA/bbu12bbett+avo8YpzyUQpKqoCNOMcUMajuI+uX5Xmtz3m3mwT7bG7CPmT8MJTj68t2jbuL6M/TZG83SZ0wJWLqusB61HXQ9FCoPg7tM5+3UeFSAPWFjzMJpZJsmKFSahEYSgQgSKwd49ujkYygVCHel7FEE2YmTrQY7VmdkXgTMb0Qgm4gCMYgQNjw5v7iBUoiDqYY7Ilj9zhHNOEyzh5MPG1i/X3rbBjLqSrCpFUVzfIWRMpNPwHoQQToPMe1efFUrM1cCkYGj69ty9SYkQGkE0OhESr6D06iKOVAnkvk1ERSX1fRGeAL4QB6dmiIgSaI9XWgIRgWcSyqtSNNyDCcEpM3+bQQI8hadjICJsVmTkHkaEgE/OjjCxm/cebWuX58tX9sWjbs16vMgyFlGes5mc5SAiY4GcUQ9y/HT8+Gn94U8/fPrHf/jweN7HoG7o/hLhSsvTRf/5r/15u/KyD9y+Xntcd8K29ycqfnpctOG2+9g8LAEus5jWI29FzOP5zCy1lros67Isy7rUWmqtSzmsy/F4uLw8/fILp0Q73dk5WbBGI3xIxErMmuahWpQp+hgAxrhZCBjBeaGThuwsaYc9AsN9OFdEAbkyQSGFI5t1BHO6YMhU/cfrKowBRJf3lBj3aL33MYTrUrQWEc2k3OlbZwELwJw5Fw+Gx7CwCPJXdjIQpKIOp/QtGt3IStr6M4FSwEoMkclxoUkyuodxAaGMAIlyYc3QqnlIcU5kpyVRFs6T7/7vt0TkHr2Py61ft214qwesRxZJpDzCLagRxLFyBMVQNSHNxzzc0yIjXPYbnr+026UX1eNx8eiH4/Lddz/88MM/fPz4Qcr28nJx32hGNgQRWEhRajl4DQTXsoZ5i23YHj6I+Xdd1+tR/86/Y7Khx4gxTATCFCAzQj5RRHOUwlwKlyqlSClciqgykS7rwn0k+VKEiupSdMpTt837Rs5VFldpVXvvrXf3AAtBFSpuYs7DYrcSVNso4WMERbDkbp58AtLCLBE8MxD4LnH9/ZdjJ/Z7qjimc0VgDuyLEyGMPZhAwlwKcT4JYsI2dt8uiCAV10IkPEa40TBvmXFg5COcwZyRnBRObhg97VwyiBEjaBANyt4vOOLuNjDdwSOVB1nWEAXYnczf+nNmUhVRTjNxFl2Wcj6f6Mr7uD09XX755UsE3EXLgWWIlrIsx9Ohalnb2Lrtvd/atu+xRxaHPeFnAuCdEbXo+bTYD5TjFRu+VKiiFA3Q3sXcWxtL4eORisoYGCb7TixhPsYYcoc+I6ZZ27/7cmCYtd5bbwQBBXN2vJYBRBwsVIqUNH2wMIK7930fYCP2glKLMhdSoSph1M2WfjUXKKsIYGPcet9ady10/vDwY+OXp701e35++e23xqUzCVMlCVEwRBmQAh9iDArzMbqJiLBo6FRzmvtw/9YmZoarpCTD0Ydt29i20bslFS8FAvPkJBJmVREWCrYO605lkjKAb3qjvH7xetXwegi/LldEwIaPFn23/Wb7Nrbr6M0QYCZdtC4kC9VVlkMpi4gIONPd8tD+3U2ayGROA9M0gJDpMxmiE05zZyciCQpiYZnuJhmFPl3b0/IeNkIZGlQghfEaKzImZcENbulULemm7EAoU9yNg3PDJxYEm3s3a4bmKHfrq2CCM/wbtC9PP86jmu559fcqJmNBBPcJT49xcwDCxCooFOrGwxA+0m2bJWlwU9wQcUdxwJzjTye6IyxOM8aHwam5iJlVPz8DRRpjuHsfRAR3SuVdWkC8vyWRsQNOEsSpT58byHR4EgEKYoGrR1IahnXDvsEMaMOJJOpaooNNp/Yjpy5hClSm81J/OB1/fPz4h48//unH7//4p+PpYRt+MyenB+U46PL5q137U//cd77uTr23Pm5jPLldQ0Y5sIsMkuIeCBao83Cu9c1eAYD+4cc/iMiyLEutdVmyMRIVLlyK1qVcrnTb9st1M0uwraRLYR/WRouma2mpjJ3ZcAwhMh8QhXPQnX80i8PgtJYJgDzcx3BKP+K0IaPKwhJeWGowQ6ZsLab0fzKbZiv/9k3co217jP74+HA8raps7lt3gQvBwaRMDiPvjt0iHG2EGUAZbsA+0H0g0nNCLJUQY9zT0kNImSEAgikth4hmv0M5G8l/hlKQ8LIuTOKOlM8w05LdElMQmZm9yhW0sBa8d7ifqlrqfXx92j5/aa03XfD4cXl4FKIxRkRkluQABggR7D4H1RRiqewIhVPrdPnSP//8fHvu54cPRYsonR+PHz99+PjxD4/nPxmegZ+tP0maLOY7UzCzcjnUh+P6GIjr5aWPW2+dkAHjGkF3485pY/T6erd3vhJdOTyc0N2MkNYImrVqoPfRu7lZiuh7725DtYiWUhYtVfWA4N567711J/JaCKFECNdl6LazX6yZQVhUlwQGI4b51odettCgl5Adw0NUSO46oilcjLvf49x8Kb6lFCRFHWMGWoRYwJ2A4IgiQeIMGsYUwhJLxaGgCrGWUmSt1m8hgTA8nr0sbEFuMhrtDdcWt+5b8zGcHCwI5yBysj6st/ARNNydYRTMVESEbJrl5UdmzJYowl3d2UNF6lLMB20ZT/NKgiAREgUYw60Pi8DpfIRy+9qfn67/7b/90/Pz9ccfPp5On4AbS91bM1sfzg/MpQ277fvT9eX5JXp/7hjEHm779dKHXW/bAJd6XMrhx+9O67KsS9lumzLqEstBA8UorF/3/VakP56jVFoPcehxuCYZNHnM5DET04iSrBh3e/u34zngA6NH263lCIoI1oZZpwz3EBCFjbbh6q1b9L1v27i2ceWC08OhFvYRw1xCEEysoqWq+lIlexN4a9t2s30HkXz4ePzw+N3lpf31b7/c9qdfP//mtNVyUD2ROmGalRJ4OPeB1qNt0UcwXOFVE8pjgqTx17szP5lUE48JR2++N+/N5nR4dt2JA7AwKYuqMNgM++ZauRBYgbvG9psHeP4leM+MSRQhkVvzaJtvF9sv/XZrvY0E3ssi60EP57IcitYprJm9ZTZx98X/1o/RHBZLymMiIpwCzFSYicmScDPS7JQWpiLiRbpqMHOAfKA7MRRMjObh5m23IGYUCS7sQlRYiWFIcekEKRDR3YMhBOXZMBGEIJPJIgKwoXeYV0VwBAzUu5nDduvtXczo9IlJxCrcRjBLUn/v6iMJKhAN8KB2aduXq3fX47GepXxUE7pyCwziYGd1LiFCRJ5IrPcYNqmb4SBSViYDWTeLcLGppFNuvSE1M3TPDGFyMzD7GPvVmEhUhQWVYLDW31djEdH30fZRahG5zwAzkSHB36gcBahGYiB3mCGMGSwizIVBLp16A3ZmsyBgTubI+spxXuS70+lPjx//8OnDjz98+P77w8fvtR4r6KOW07oeqnJBrD99/evnf/1t255vL7fewWS8b+NqfmWO0CBAg5dQrUW4RPiw/fx4UH3n2Pvdp0/CPMuXUkREWUj5LqWQIBrmY2TmR2YXwNzNMbp3Gr0PM5Mqjhgjg+8jJYdwybM481wiDTowC0cAAQuHDTMOAckcNBKRikiBKDFmGEq4IwfIgUjHOHyz+MPNCXFYy/FQ0ojVImN107U+FykNoHvSOc0Mqpxr2LPDRaSHINGrNCXMjR0eiQ0G7m83sb+5WsFxd/dgsHApNcD7bbQRcOjdcCrup+Ubof93pz5g7mbWh3WLfW+t9eEmSqdzPZ0F7MMMLogIOHOIFKJwD6MUVrNPaSpSGYhoWkapPkyJREQi/Hq5fP38dFjPQfvlyVqjtRJNQmR369M7qZZSau/70/Pn1l6CBjMDkuihv7qiY9Yl30Ax83BFREryE92egnwmtxx4h1cZ1+t+udwux1UJRTN+WVmk1qXUpZSKIOJk+ThRVSVPswj31nu93SBo3US1lLIWlcnDsqU1LQR2KdAL9j7yNtHdtQYBc7s7tCCQERa/tz0lAsn0gydhjmR3zF18BmZ3UASLL0t8OMdSxcEqONTAAasyQg5HIaHWbHQ35VZ4WbhsBI99IDq5EcCZXNWHWwcsNS8gAilkYVEIAIYoEWADMeA8RSWZNZ88nuKkS0j5xvNKhFQZzGFofYC41upY1/2w7/2vf/3FLB5OD+fzQykiEi8vVyEUrafjcqxVagmJPq5fw8x6IQd8tNHbsLZnWooSVOV8EsbDvle3BmJd2F1qrLuP/fIiZKNbrSZqdbHDkdO++/X4netshjz8+1QPJ3M2Z4NGWZUGqCHc8gEuRWoVUYqw697GaB378NFaK2AKIrAP9HCHh2GMLJuoiNRSI7C3MbqZGcClqCy1llNdlm2/+NeX275//my1fB6Nz8cPp8OhL7IspDW6+3bz7eb7LbpBIirHKBBighaBaql1fc+9mBgMnNxtYIywfg9AFsg9ATs1ljSN1xggG2ibizqINYjkrQC/X0p6lQ3f3WIwyUORXzza7vttbJex30bvPSJKkWXR9VQOp3I4lboUVkp02V9NBECJxPzuviTtNdOkQIEgsSSbc2pgDXMASpbEebWixgLQVC8gIMiFFZP1EoAQqYKUKQBlCopOEfDCISB2coTDLW3n0pEQUxf0OvaKCIQL01KrcJA5EVuEDbeppn29gpk3biSFKL03LYIzoQUIihDwWQ+VBeEXiqfutg+QS0HZRIS7FkO4BBNrcA0tEDYy826jxwjykDSOuiM9efy5UwQxQmcpnyy5PErvrnn3zOY0uBaQ8GSQ6jfmXRHo3XsPERKWeSwRMl6bqVAsiBqhTpKM1AiGp5+hiCgBJdScTYjIPEnTcAmrQg9KH4/1+w/nP3x6+OG7w8fv+PwB6ymWk0r5VGtZ148CjnYlFciXoN5Ga2aqJdiczMNT9QliUa2LIFgl/clQyrdIzPl8zkcnuXKvQYz5VT0QQcQKErPUKWoRhhBBYnS6Q1tE8LDLbQuEVgUMwuQS5h4gaPYNFOTeicCUACO7w8wiWprVkzssNIkw00GbPIUJBKfpQA9kJi+/7cpEzFRV16XUItvo7oOYICpL9UgRXSqFoyPcfR8jHFKEmNLTbT46zunfDmCalieliI0Dfo+dDHeeDi+R8YNCCRzFhCREzenS99bGKpzrNsKTq5xgjiOZ6K8cLeAerLWP7vtmKCysRSIaSNZjXY8a2D3sLuGMwrKUQ8Bvjewu8/aAp0UkBtf28MHlUH/9uf/T/3j57bfLtrfexn/7f/7rl89f/vIvf17Xsvcv5eDrSYSB0dy9hwqBRZkUFBbWbNvHroq7KWjcNZiQBIHj3oq9PWQcweEIV7d5eYgkNbbD4dYRLMQc7Wt5qUTsPvbzdx/Ph6Ui/V9Jiaa9o5BUqbroEUe5BwH23i/Xy/WyLoel2xCRIlxEyN1s9Na1hIiLHGqlovH8crvuYwxPnmhaHafZWYIxTGQBciDo3bnJzKpSZAolWQRM7iAbMe0rKYl5JjqW4/j4yZbK162FM4sfT/zHH7WWCl5vN//8y+02WlGpKkcq6wKmeH6h3sSCgsncb5v14QRJW6ygtNxAWUkKzesujEDbolMQyDydjkCTDg8pWFauy5ugJ8eFtQikwLz14R7CqLU8Pj4+PV9/++1rgP/w4+Xjx/P54STSfvv1p5enLz7sh+/s4eOHWvUYh8ulmPXWtrJIWkqS21JkOEbftt7AN9b1uBzPp0OgmmPr3AaKl7aVbbMw225dCxuaaDuckogL5Pw1hayU6WLpdDAB+HdHZZA61aAKXeXwsIwhL016IDCYaF31dFpP9RA7Xp6e9/1GJSBRtBZl5UrOwwLuHObD237b2m7DmLlocY/NmoeLCktBFATMN1CcH5cRx/b16eVyu/3Tn3/9+fkP3//x0+Pj8ViPx7KexKNfXvr12W6X8EABdYEVkJJS0aWWyueHBxG9HzAThHU4RVhWrsFCkpO610gdmn0z3Q8mmMV2yzRK1CNrDdacbbyRgV+3mbcCJ5GAgbZH2+x22ffb6PvwCBZaVz0+5HdZlkVYZD5DU2D8Zpp+x2HeRvzZ1ehMHJuUgJBp6kFzskMgsm7eBggsLEWLsHuge5gPC2jkQ87EYFRyAQupITTCKJw9KIKMKSR5mRbms7/Kq+WAJ0kkwCICMjMMqENZyjrHzuZ3pQVl4XDfk8Pbvvd9W9YqqsQc4WFjhJOCScKwqHy3PB6otro7TLv0q1EgOsWLsPGyKtc+pBG7dikuayxibN1yOO08kFRnzvkZRdolZJkQ3iIbcHM364MI6sEAizjS29s1SDNFh2jkVCkl7vc7E9MHH4ACBYGZvkMCMIUSlogSITkAJDA7z1gUlnT2I4e4FyJmcwbCYG0hPuvycdFPp/rpdPzwcTmeIfUGCdZjrce6flJ5cHvYNnt5wl9/qZ+fyuUmvZt7QJgpitRwG70HiFiEmBUAMbONAH7vRqJrraCUVdMrHjAFu0AwHCh1rXWlEE8bd9SlqEpRbqpcq6pyKRIEa2Y2gp1SCSHiZgwiKkTEbkgjtrCMriFkieAWFjyQ1m5kgR4hCCWkxJ2Q6eoRmHArDaPxjg3LoAw5TOoWEEHBwiE8QN3itrc2hpTMOiKfyy3zBZkEWf1iElQIScYRDfKgmP0685wfRCRekZb/AE1RZDI+Kc+7GgO3Znsbutblfq7fk6qIEjwJzNbr3csjzIzYLNiM7xmeSBHCsEzecQknCSEqopmAY45hBnA3czO3DhrEva5yOB8J8esvvO/6gFNYb23/6W9/vT5f16WwjtMH1rqEk4oLmsWlCA7ro4j03rv1jC9JeCKNUu7secJ0iUlGzDcNWWTul1EyYRAQobwOMbyPoPCqPJpdLtuxlg/nE7Eejw/H49p7SzqRDRMVLbqWNf24hadJj4e31qRIUV2W6jZUkzMXo/e2B8XwyPjHSvCwhdwQ3geYJVntANoYGSmaQnNK755veZE+xI3TjZw4hCCaE04wJ3QX6SLqk2sSS3WitLyKUvj0KOsSOUVmyb4ymKIoE6gdEAM3532QB7rBevjIYEgKC+cURgXCGaRMopRFDBUnwAjD4EGUaXRGoWkdg9+xfBIKZOFCVItO0T7TYal7GwCu19vPv/x6PJXDIuvqfbOx91/5Z++99b0eDnljFtVY1+PCYdZvPTOEKMJGHw6QFcRStSiC3Dyd/5jAvRTVNXx/eRkewUsnjsNaGaUoMYUQhbOIBELuRYz7PMO/3cmA4lScCrhyKeX4sEKGea9LNjZM5BaRpxtHCLHoUooQJIzcKCzGsH1vl+eXvV9BTSsVcffozUb39HLMUNgxXCROD0vQwx7neMJ2G0/PF4pfe+sfPz6ATrociARQoirsMdw69T12NVpYtJYqddWlrvwteBl3zck8bHI7vx9CbxPbeznhDqcYwxlEsPsOiBIgxd1WMq0VEzbFKzfSh48WY/dt83Yb+62N4QjXwstBD6d6fCiHQ11WFZWcV84KcuKP/68vIioqpWR/TCGzvQvPcE8QgwRZEdjgILgjfQbJgw3u1JzdZsGTASfKIkgiawQwCE7klJB/CtPiNT/01WduDoqTDc0MkPUBg7gJcRF26GZo6YmK4XY35Xy9Je6eaTUIortx9Mz0YXUFr4XLsRyL1ma+Hp5sgHahIdpTKesabaseGDx4piQbUQ+YI9zFMmqNMo8HxFEXJri33kf0EHdycIRnxhnSdyFmeEsIkwYL3dvlnIIRs/zuAaMIAQpQAVAIkTAEUIZSFEAiJw+JShtFTh09KDzLqMg87wD7NAU+snxYy+NBHxY9HfR0KseHsh61rLWUg+pZ+FPEad+Xr0+3n/5q//Ln/a+/tC9PfXc3pmGJv867FkFT/DozZmj6C3+79hX0aoE/J3se7hGWIyEXdzoeHx4ePqiubdt9EFzWukoFvEuhZa3rWtd1MXg39W7DB2Ja4eXQh7UyxMMCd9N1d2IwxCmt3mIMYw0trMy4Dfdu0VOWzMxJDQNg4RbRB/YRfcRrQ0ZMWotUtRyFEliEqwbxtY/LpX3++uwR5/OxloWYRbksoIAmKU4ncMdBqiKsM5RrssbBxKVoEGFYhiMjZio8kUhw2oxlEgiIWAqXJdy37tvej0t5pWClJ/L9aUrim7/3V5g/IQoiG7jdxnZLAw9yz6EHkoEeYRSKuNOmScLQ2kBYH819RAwi44AsdV0/fP/94T/9n/GnP6FWud1efv7rn58+f3l5/uX5q7Hg5bLs+4dP3y+ffuRSre/Pa/WH80cRebk8b/uWxg+BkbyEd07jk4pB09Xt7SGb1Z5hmBEImcQHcYIP9xFmrERShILa1s3i4eH84x/+8Md/+LvDUp6fvl4ul33fxuhEoUVPh9N6OBQtIGQwqA+jIKvOQVWV4EsRuLV9v/loYZECNOJCtAqf18ruQtyHM4uWUmoBqPW+bfu2ba1bIODmqaO+v9zRtthuUUsI5+OR9TCxCALWKUNnAd8bXa7l5YK10OMRRH7b+ghcmw94OLXO7jxCxnCCL+gUfKiIEwmIG/YRYhCW4EjPyCT5+jzYHMbqLEGpqlNmKh5sfUTb2JwMksXW8Okd7vGuGzN3M9ZQltO6DPPWvQ835lr4fD7srf3rn/+1txeJ/scfP6zlSNi+/vr5+fNvv/7803o6rktloU/nR5xXwdhut/3W9zZ692FuAwFiBcWw/uJGw524LMuHqlUJcVji0/fb9frl6+3luj080ulUjsdD0RouRFSYhdmHubkK0QzUk6IzWPX+9IEkoBHszmY8iur5w+lwKsMaC5alRPh2vVhDrSK8WjRm1EVV1Z2HE0PC0Zq/PG+//Pxlby/LAcuq1jmCtpuZBbOrhhZmUhIIoVZVJap0fjg/v2zX5+3p6bm3Vhd+fDxo0VoKRIgXYL1dd2v7dhtCYOJTORStSkT/poKJdJ2PHIr7zAyb2CfcjFJbnXUMIeCw5McRCLgCnk0gKQXEZ1UUxEh9cU7XyTz65ttlbNex3dJGOlRpWfRwLIeHuh5Lrap3/RGB8//FKwZO70qrb3nCzLQelvWwQNJ+n4wyYA8IIqLU5ilLKeqgHt6aw9uxcREuoBDdhHtmGrqDIUyqJCAYAlS4GHMPGQiOPWLAPMg4eOYCpg0FsRAV5oJ08yEH+jRxsupSqKS0mCDK4mzuZPS2TxOxkgo0utvoMziT2RGt3UAUUocsu7VRHpby8FD5AS9uIoOVlyOfKtU+2s19dOoUMagbIUzM3TIOfYT7gJHb7C6gK44P5TTcfrv9NoZhBZUUulA6qodMW+9shAAqLgTY6B6gqsLkLmn1+7ZcuDAvhEqx3PEzJSjusPdMhclDxs2GR5opIeOCcnAxwjp8cEThOBQ5L+XxtJ4OomVo8WXh4/FwOn86HD8syyPTw+inbnq94Lff9n/+31/+x//65Z//5bevtyc5LVzZvUW4m7lbynt5mhknqEciUFUVfb9kNEUWKbBJmlYWWQn6O1t4HNbD48P5dHzofWubb5dRa1lKGkmwFC46PeSWWgJjHy18gJAlFVFSgSSY51kfiJno8Uo/mPw9LiS5n7j1GOSmrJR0ykD3aBZ9+K3brY29vzn3scjh4bicjga0YcEEZifxwNbHrbU+BjOlK1QOHTlja2X6SSKQuT4iJEz8lulGIHA68E1JACMLD5++vaUo0/RmJCKVRZYjax23bWt9b93vTjdIBCL8Tqd5hWG/feVYAhJOvXnvFjFlyXQn02VxihwcJbsWEsEZyNfHCB8gE8mthxmquhwOXGt9/PCwb1cbo7fx5Xq5XV/Mbd8rA0IHlnJ4YBECs0W4jT662aTCZOn2ulPNq+SRCjh4/O6rRLwZGdzZf4wIG2EjJ8sswauU40E/PJ6//+67T99993A6qdK+1W27mZmbi0hpIyx91oWI4BkP7Mq6LqsyDxUKU4GNNloCY8TMRTX3XwIVrcf18HAefSRgwczsEb2Xks8B2hj+BtW/e7nDDaG4F5mRD0owJqkWEA4PWKfLhX/9jSrJh6MeDkYMi+7e9xYI7qZBQaxgvwsQQpiKstUU72UOPGX2CDNIOAALD8fYQwAWqBIb4jWDUAFgyES/BkEswskM9j70PYu0zFzj9Bix+UwLHRZ5fDw8P8fT89NPP7fTWjjsP/zpOy1L338bY2ttr9fL8XA4HJb1oKWoCJt5WRbeHTbgLgImyq7Qbffhw0zLWg4nloDDF0Ycw+N6vW17B0KAw0K6KGshEsqITSZ3cneefoH/3oqhCDan0aNt7WqukZ6QjEnDDEdjDJVMe0xTQSiCrYe5MbP32PZxve0vl633BhZiRhhBfCiBGVVoUVpYShBBiITXpZZDeTidzg/t6fDy60+/utvlcnm+1A/9sB7qUQ8iC6Mp324XDutZbSV6P8YYfXwzHbt/JWTMwnAzNwvK2VrE3DFfofPkQ+R+S4wB5GYjoCyXcmunmEWGJ3kC1r212G7j9tLbNjL/uS66HORwlMNRl2Opi97DDhH3+NC4d1Fzp/p/eRGzLqUsxbIgmHX49IkTTAKaEpQYTGxu3XJgwsIiwoWZuWeIqudsIK86kTuRiFboYdDaHLf9xbyLBrEFDbPWvaV4golVRVPY4+g2/TVgbu6eqxcAYsq+nP8NOA4iIZCbe4z5J2eOkQfCHLd++e3ya3R/KB/aZrQLb2I37zysjnRvJQoRIT1WPokUdLh1a7BhwELCHGNumWIML4IFqs7ahRoDzshIvCB2c6cRQZl74UwzPS1mfDIYGvSKxL2+mLkwV6bKqETJhlAinSmX9wcG5CALMtDI2z+T+ZA4nBGGkhemg8rjYTkf1odjPRx0Wfx4Oh4O58P68Xj4YV2/Ez0Dp4hljGht//x5/x//46f/8t///K+//Nxoe+TzSgr4dFGh4DvtEJN1nhgLpeHa+y+jw2dZDpvWOJ55e6laZ8TwtSznh/N3Hz+2/Xp5eWrbRg4Kkg8L1cIggsCJBYdlYXa/9m6ZrxkZS4c8+B3BGuFBIAxHUKIXOZ7xPKfBSl7JA61b+AgvoOiGZt5HtGF779feL3vb3qVZqurjd58ePn3Yr3szE+b8VGa+9+Hh66JLKQ/rWrTuvfdmNoyYMuZak5tFxGnBT3kPw9LQhWlGCYNS3eiziImivC7r4VDlbrAlpS7Hx+Dl1mzvbWu3MfaI42yeMGGnyRwsRVTh4120RQ6MmUkJCcGbWYzoMWVMPPbwcGFNd06HWfSAEEmAx2hmfXT3cGFjEJjCvG37dsPXzxZ0ejif6nr88N0P29YvL5eX6+22XcO3j2frl+UXq+dP57/7D39aj4+3vfvYzZyZZ+4DCMHhMyAzL5Wbe2vpwzLd1d/Ol8n5lbRQIpI0VPHUl3pR1sD5sP793/3wH/+Pv/v7P/3x4/kh8sYNhxMMPqK3sd02pqc++rqsqjPqLdwJtNY1Sumdre/D9t5H62MMJ5JSKIjVnEhLdS2VhY2iu3frrdm2995H786iAQJtt61xGyz26m6VS0s0tIRWV+ZIBjUTQNCIO80uZ7fDcHlBu3L0+sPHh8cHfDwvFpdtf+l9t4B5IdFSVWgJC459mJmHe86kiMmZ0rAHhWkpsq4cRFu3bffe3RFyjMpOEeZIADL9I0XZHDYchrQ9D8f0QJznUCSMBY/c+IggjCLEksaXC8Fa19b2f/3XP5Pbp8eH7z6udTmQABRj2PW6tb2/XHxdy/lxBZeyHJYjB7qMoGQFxrDobs1iRBhoMA3hroKlcoB701LldomX543MalkXXY/nlUW36+h9mIeZDTMmaCmYvkzvX+mcZwNj65tZp4D1gTDhEKFSpXBVX5WEmZGWzKDRPbrdY9pj9NhufduHOwVpQDzUQwXpsKAlsROpTErZ6rAEeY36cDzik1w/tU+Pj58/f/7y5Ze//rSdH+vhVE8P53Utwl2lFlXrjTlUBcStj94vl+PF3hWYNNMf0+MhbFjvYwxPt5CsYlKlMMcj9yEKIg28FSAMRosQhGgBRIRlGuGFh00PGL9d2m0bbe8IrLWsx3p4KMtBygotxMLIiOg7H/g9Qex3hdedzPPuvxLlsHNOt9KZRlkDmGM2KLiAChMxJW2jD7MxNnMqtEhZRArH1vpADtUj2wWGV9K1rHX5QOW7W+df22892lG1qCO21l6ex9M+9vDUKhYFbIxhEd0D5M3ZIpwD4rMFmMkD5OE7fI/w339HjzDyLKGYWIVCDoYRNi7b8/Z0+S1++lC+416vT6O/WLvsg4dE7Mca4lFISj3o46fjH5XXdh23cXnZv2J/4di0jArz6LbdnM10Gyq9FB/Ew9VguyvjWKpI6aU0tGtrY3QfoAJWCsJA5kk5SDLAD+bh9rr2iVi4KC9Mlamk4S7deRiYAbn3hFAx5sE8fMYpJFMnMteGxVeSc62P6/rpdDoeqhReDvXhcf3w4fF4/HFdfliWP5TyXeAUWIkXpu7x/PWp/Zf/+q//v//yP5/3L3IKOhJ0YQ5lqKoQQJPoDRLKNt0txRNTGfdaxPRhcxNIw0FMg/28ZWDiiKXo48PDH378g4/+07C9b7bF9twjqDcrC9eVl666ELPDHGZkKTlJUhAzE5hJiWhOE33Wcfb2kLgnMcnIQ9gFvY/hzU1B3A1t+N6t9bH3futt733Ym/ydhMth5WXZL3trJgEYeSZhtoFALXKopaoyyLqNNtwskam1lMOiFNGwhYWKMFMEpZQze3XPhO2ARaS2bVkWZT6s9Xx+WJaae2MfBlHSpQ96erm+vFwALzopn/fNx83M3ZOXmoyp3/krJP/GA2Y2rJsP1pACkayUHA4WZlZEBIUjnd0V0IhbplxFxMSAncLH6Nt261+/3iK28/lwPJ0Ox9PD46MuBwvZdjuu7R/+zr7/vrxsi2oRYht83V98GN+DPv/NEDw9Qh2WCeRGHL/3V0mOEO6qiknXzEGMC0URHBb9/uP5P/3jf/jH/+NPjw/H0fvz1y/7vtmw3tN4SIgVgdZaAKOPUsorxVHk3i+6uo0Y5BOrYky2lxBFXRhA0SqFUWZ9v7fx/Hy93fZtHwEMC3fywN6aSHt/X4hRCsqCukAIZhFO8+152qzOUpVjSOwdtwuU8Ne/0VL0ux+Op5ULxyb7rWFIlMIwjRCL9/6aKZyCAIWxKAXzInpc5XQSSFwbqYyX50ztdgbVQiDIiOZowZYJbnNh/7t3bX6hd8VAMFEpqhIWpBqiCl/2/fAScb1tv315+u3zk2qASOuiJadwi4dv2/Pl1lJL1nqAtCwqSswpbejd9t4jbBrwRAz3RsSqunIxK6dtDd9tH2bUd993O6zO7qneCs/FmF4R4uZpT/DtI5Za3UF97w43824MV6FSJEIgAKvwkqCCe4wx5tUOhJO59ebbNva9W6StoZIsomvhA6HM/NSN9tGZrNZai3JhUV4kSFDKclr9UApTfPnyt+fny8+/rrqAhQ/rQ62Kh2MpxXo360TmgI12vV1u2/XfM+zOTTE5nGbmcddMTug6RVtpYT5BmTAPIjcicm/NIExCEaIaIhAOcvdhvcW+jX2bIyQmqks5n4/Hh2U5ii4gteTsvlUlb8Km14s+fwAANJV878/8AEbKIHhuE0TTXiohJA0oSKZ5S7CgQImohSMdUS1oAJrZbuROYRGaTBBSZiFRXko5I6SiKY/vDw9L8b5/vXbrcaUYQFmgK4oAI8jCLWcxBgzA2Ym7RRoQpFvuvSr+poIhZhIhMGU4dTjPuV9igMP27tu42UvTtvgRsTqTsQP9Nq7euzAprVVOK3+QOIuvGi4o5BDXSr3CVcJ42/vX8C0iurVLfw7H8B0xMDo60RAm1qDhwG7ugyvBySM792mVSanvCPc+fNj7e0N0NxwkueN5E88gMsCJg5iYc/I3kMqp5Hs6+0B0J0JRPWn5uB4+rMcPx+O6Vip8eHj4/ocfPnz4/nT6tK4fi35H/Bg4EK+q6xjXff/6229P//S///LP//KTHNrDumQTDgQxi4QwBYIz3pIoAjTpMs4URN/cF91aw53RQNN5LebaYHAwM4T54Xz6h7//+0MtVfTr1y/ufXvuX58vpL4c5PBQHs51PTCLO8boO2Bpr03TiChlFALnqUozuAelGboH5+7t3nuHqvDiHN07jxihTOjGrce293S023v36O9XTQA9sDuuI7bNormh924UsQit6XUlzAE3G230rYdbJSjoYVk+fToL+KrPbWsgDqfsa5OSxioeERQ2Rt+7sB5Ph/PDw8fH83FdVYWZAt762HxrHdd+uVz2v/386+VyqcqHeqglHacN5mbuKeNJsV9GnXz7hKWL87C+t773q9Ooh7ocKyhytoJA6uEzmNtgAQlKi+ugFP4lijjVT939tjd//vql7XWp/N0P35/Oj8fTQbR4cGvy+Oj/9/99/T//o/zlz/bLl9tTv335wq09McVxeRQu7uT+tqu9rhQIs1NkgllKdl63u8lQTCSGpkG/G8GZoyrAcVz54+PhT3/8/j//X//xj3/4/rq9/O2nn/7yr//6crnWUpflcDycjqfjsq61amCYjYwy5mk2XSLLPsSwhJ2ZuGj14UCP4SNgTFzqAmCY99bhsazl4eF8BFiEhIdfpPOyFPfwiK3tme/6el+YUResByw1CI6Wkeppuk0U8JFoqJDGyu4R282+fN3/y/9D+7b+f5f18WF5PC6tb78+XwPBvqhJ2y1iTAv/CYGCAWHUklJaOVQ9LHI8Chc7RCwLx+AxfIwwj+MDrQvboMvGv154a2F9uAdBU70Vk+j5/hmjtO+Pew/NTMuyEsUYrZuLEKJYnEspz08vrY9//etPe79+/KQfP50eP33/8dPHDx8+tX3/l3/5X1+ffr3+9uI+3KFSl/VUa51tM61qCwubqZMRUTMj3yMKs9TCREvEw1pov1aEwfl27REvwto7wpOroOAMkTUboyc08XbwR6QOdYwB9hF3AnrGcLg7ewYjkYPIw/e97dseEUQspO7Y99H62Hdre/cIEuVS63JYj4+LnATrfrOnz5eXp5ft5Uqg88P5fDwej0s9CC8uCzHHyiwPp+12Oh7X3758/Zc//9N1+9pH//7TH5fyuCzHh+PZ3W7bpfXbsNb6fmuXrd883o36YoqIYmbDpvmqh3k6GFOazn/7moXqBPrD3cYAdgIVM5IiylByirDmbR/bbW9tWIQWORyX8/n4+OFxWTW4ObVh3WMwyXRBuZcsQfk5f+8F/Kp1SLrM/B7u++jbGFVVSCUPmTTHY1BaJQQF0SB3NwdI0jNVfFhLzgCClEQIwjYACioETu8f2Qesx0ExnIjkWPXHx+9W9c+2DdABKMwVtYguxERkLJ2cxccY7oMccHHQNgbl6JM5yCgcsDfyc2pjRLSUypUsevp3jEFGAYwx9q25dQFcxkYvXnxdWdalLAUdpjHK4LXoWo+Hj4KH56cOD6HFYiE9KrQKdKFyJJftWtZmTx63Ntptf8o8vIBZDB+g1tnZLdpm4zbgwSTMZLBQh4KEhZhyUD0Ha+OdqucuKs2CDeapaWYnDiZPr0HWqUUjdoJnfA3A7Oyd7EasfJL1oS4flsOp1qpaiuq6Pn744Y9//L8+fvyj6on5CF4jFuK1lMN6eujdn19efvr557/9/NPXl+uPH8rpXNdVSuUiocKl5HAmS/Ns/FMq7QQDBtE31bJ261nEMEDgSP4MiJJyNWXXEK6FH4uQEL6cT8/PT5+fPn/9+tJsqwexsS7KhcXZAiPcmcEgVhYRKUJFSYoTeabJjnxA7JXKMbWC4X30gFQtzugYcDdnDhsho6OPkYC7MIqyyhtR2cyfb7fPL7ev13a7dUs33mGVuZyKqBRhYQ5zM/M+yL0wn5bl0/n86fHxfDySR+ydHe4whLmDUbiQEAl7BGcyn6Oonk/HTx/OHz9+ENbLdduuexutj9FtDIsx4nq5tduVY5yOy3EtVTWpFjHcJgEpNeScO/3vtqTMNyVjImeKqrSeyumoYPdAxn9rCowTQ6bUSRWX4kIu5E7hRPDp4eoWGBHdx7Xt+8vTl1Tbj97rshweHpaXvVRTuT4cxz/+w3Y4vPz3v/z2dN1725i0cw+hpEulaUd6Q9333Pus/N8gynlvOQescU+ADWd4rVTW5bQuf/ju43/6P/7+P//n//SP/59/WJby6//66S9/+9v//N//8vT0Uup6PDw8fvj06WP/DnY61plFH68XitMgQESBsNHNunkP94jkN0kMG8NEeF1WIo5939reLtswWw6nFFmLKBMzUylsLsNl3XlZUeo7zShyNpbp1SlzyIKTRCanO5PBIoKYiqJWjM1++XIr1f7wI314WD784fBwrCGqak3LLeKagh4wp9VGKiyJqkCZXFlI1ypLlaUwV1J2ohhbXG9k4fsIC9SK0wN9GKyqhf3l2m/hbd4MypHet19kinVfbxuRlFJEQnmIOQ9EiOMAcNsbPG77vve1ro8Pjx9PD4/Hhw+nh49laafHp32My+UpfCcmKWspB9UlmWfm3QHWFSzCERFjeMofRb0Kisq6VJgr2G2oUAS1bRADkHT9SLVfBGehkoTub56x9MUztzAf06raCYCIIDG3CGpt9NtLb/12uQ4bRYrcBeqINPW6PzhKulRdF60LU/HO29Z+++X5y69P/bovWmoclnCxDrMyXFzgRiqF5bgevv/0weJ625+ut5evXz8XXT4+Lks9SlGB9GgDLdx79GZbt/136TbxLaQBpOJifldi+jas8Z1e6e2PuyNGZ5CZiYwMfjZKalI3G8Gs61rX03I+n07nw+G4kES37rkx51KmScWZ9dE345U3JIZAr/Y27388LKWuzGB4EIvIApVgD48YI9LoPkuGCCFnlqJqwh2w4c0MATZAMBjCzE5u3J2bS/OQbk6moodlfVj0fDxqtKcAeSiYoRWqzjRAQNHCTCxDDING0mXgGBaAa3pEERBD4PpteCYri85JhhSxzPJxuPsY3XZDBBcOwubboABpLSTrWtblWNe1lrLQUh7WcrRd9ss2Ohf1ZDjXUoS4iKxSjMuwzfoeMO+9teHRhUnSYoJgAbNwwxgx9ghDciPDImqSv8OZkgwDs+g2nQm+2ceSZOYRPeAgAztJsLpIiFCG0zGDk7MwZ5yJWAlqragPZT3V9byuaylFpNalHo/H04fTw3fH048kR2Dx0CAlrqpVRFmY4Kp4/LR8v6+ffijnR9XqRD2Tp1N7lgVkeApmkEAd3TVo75eGhndkwRNg+v/T9WdPkiRXljd2N1U18yUiMrM2AD3dPewZGX7zRopQ+P+/Ukgh5eMbOcOebjRQVbnG4m5mqnoXPqh5ZiR6JgQFJHKrcHcztbuc8zs+WOujRRMRESYEdw0ISnR3fzyd/v56efPxwwf+FZ+W53VzRYgZMs+TJPNt+CbInRgZeWQy8VSAszuYGyEYBoRZkI83dnyMNCA8PYCZDRAdXcO6OlgPZzcKRyHMRM6pQDmUzDeGT9f+4dPjX95/ui5Wq/dmbkAe91OSI00iSZgQTVV7R9cicJynH98+/OmXX+7vjqa99W2gsRAi3BBcCKcpS+KI8LAiSU00QS7l3du78/k4lfRybX/+7fP7j48v6wUx3pymKbFZgPndxCLpcCgpSQSoeavq5kLMJEREAxY88DSvZv67WJyZ4WCTnaZurudTmY6C5EE4zfMotPe5IBHhCInPCAVcdkazQUS/NUmIiCJYZlKFsH55er6+XBzxcDz89Msv4dyN/1//T9J1+d/+t/bTz48fXvCynt0OZtJbM3QYtTANN+6ghg+H9cCLhoWrYddvKaOIJCyCEObeDRlHSisxFuGH+/u/+9Mf/tM//v1//T/+09//6Q9v3r758vj54/OXv7z/7ddPn748vZgyS57mT2/fnP/up4cf357v709TycQU7mY24oLNnYf8foeOmrDM84zEJAnVe+2AkMqUpABK7fb8ssXLYs6llG6quhvymEOSJ/dpjuMJ5gN+lZFFhHZrVcETUexrwAEDHU2zuxq0DcJBEjLi3QGb+HVpj9f2z/9jm3n++eHh7cMsZTrPfintBRUdggAVDSwpWEcgISaRsThAJEyCxOiB4EQi8wTwQCL25apLtecLng/w7iecJ35znz9+8d/e64fn/mWJHmZAgSPo+FvRv8uuBvc8vkkemCgVER2HHzilbl4yh8M8Tw9vHv74d3969+4eAFu3l5cFke7u3qVULtfHXrcAAGQIYU7CSVXb9bn2CJBxIbhF86rdwpF0eK4iLCgop2mPBBxZCkhIGYOHhZ2I3ClczX10Mt92/CMU0Hd3jEeYuXUjBHAUEixCmCB4WbbPH562pRLgVHI6lZSLcAIk4ozczRY1IyTOlI9JDuLgW231sn18//znP//1+mWdZMqnKcGUItlirfdojiqkRCkcI6H88edfTnfl5fqpWVVt18vlOL3tuRssEdG9O1qQB5uRGn2XO3ATwUcE3Jh235RMcZvTwABB7Ny7XWZKuyJzLzhMLaCrAXagMHSlMIxggpTzfJgf3jyc7k6H04ESdFubLk03tbarhyEg/JYb+lVVDMObs3f2gzZAu1kyXmXCAQC9yjncu4p04JwC3aw3Wyw67DAAwIjQ7mwmJUSYkVW79dZtWx0Q04Fm4aLiXZ43WhtdupWpEm1vz+nN3ek0J06kVbU3U0MQAFJFj0DuElxSKSwMrIBGGuwY4ACoYAA9At0iwN0yRKFvz0vEfbrh7oQouRBq35o1ta5uRrbzkwOiaa3de4MD4hkPd4cf/vDuj6fpGN5cPZSW2mBbdXOjhYVSJkZwA+4pm2iHeO7axhIisRZBSUSChAIuMD5FoAhoambVVJ0bpBMhhkUfUd6EwsRhA/sXrzQxgCNIkx1ZwxugsQAycAYSEAbmYIZxpo5QZxh8UgtXZ6NjvjvwfEhpLvlwmErOIpJymaY5lYMHa0CixFwI0m4OYwrrCHo+lX/4xx/+L//X//zzb2J8DWlI1awbJY4C6EQcgG5g5hBAjMzEEB40hnD0qo4ZeuCh9NlTpngw+YRHpli4devammkTotPpWCZ20E3Xl/UlJQHw0zQfy+mQkxqrclOA6HTz3YxIDSKKsbFi3h1fhDCM7khAQAEDfAc2BsUeFKPOwQBCYEaiPYDPLWon+aa5BDdf13VZ1tqpdmjVwWEiFKTCMolkYYxw0zBjjMQ8ZznN8/3d3fl4WC5XBWVOkgI8ENG8E9FcUkqs1t1QiIwwhPKU58KJI6xv2/rl8fL7p+en9WVKdBScMA8PfslcSjpMmZi7hXlXCzMnYJahD2GAwSP+GzDscCaAKnn3gRwSMAYlSsycJAuQmftuVQwEIwzhbDSZSzgDplAwi4ABJYEIYKHzXYGwsLpc3QM45fl0vn84QUD0/OvHSf7bo5TP01yfH5/a6trFfHJrYxDFozuhHVnsDgDB4OFu4B28W+irqA4E4NtlhxhMwAyMVKZ0Ph//+Mdf/st//k//5T//03/+T/94dz6uy/XT86ffPv/+/svHS70uurWKUTfe6tYX79dtPa/b/fl0LNOEiG62bXVZrl2NmZKkXDIxelgSIFEWNIduvmxV2I6tAfLIBx5pO0/PlzJ1ElRXH783amCTorPD2fF4ZJKvtmQw894dIYRhrMiJECDMws3NwgzcMQLMIjGlFETRIzzs02P//aNfXvK7NzhxlgkKuISphgZ5EwlKPVyQLCkOSMUQ9gRRENkeXOvIKIeZPWxVbErL6tcFLehwoNMpP9zhoXD52PCzPlcHBA9gAZF/ZyUJ+Ppk8lupm1NKiZGNMoiJeVwmQaQ3b0/v3t2/ebifpunp6bk+X57gIpJSlnBiTC4AhBFkhkBJ8iGwdXtZq5p3Qi5ZEBiCAbqbhaMKgLs2G6kmN43TzVmwk8cCdh4i3vaXfzuq2BnO4xcM3GBAUwQxnMI5ggGoN396elmv9XQ4TGUmTElKToWIe7j5Gn5V06CgFFyQCriaAXSLruYahHKa7x5Ob87lPFHqdbHeonWvaitwSSGABe+O9/MpHY/psr4sa9OxOqoNDQLDvKn17m2MZBz09WsZ4pKh0ibCfR1/+8AiBv3zJlS5zT5xL2ZuLLpRTbiqN0QjRgzDUAoXopTy6Xx+eLh/9+O74+lAQs3XuvSma7fmpt9hEv52KrSbwG+H1u542uMg/uZz2bUssAfTRdhwBKAEgTFrKKBzILPQrZJ2ggBAwUEW0IiluTrkkazqgiGXK1wrrOYWpt2F6f54OB0yETTrpt3VMRidwtBi/w4SBGOQkgQHpSAFH6sGQIAgBxycuaH4/X5pN4bJo74DhAhXtdZMLXxESLBwsnCPpt0AWuIOmUqe744/PhzeWO91XS7bi9UtWvfazY0TESRAsh5k2XKCcFISTTFGvU5EUYCEiBO7QMVeXaM3UIWgCNcejkANkELVQ5wlgiPcLcYi6W9GfRHogQYEiEZkkogEOAcJCAcxEDtRjMnTCBX2IFf0hhRpLsdTOc1J5inNhymVTMwiOZWZOKlZ08ZJmYGJmRKgRIRqdW/zLH/8ww//5//Tf/3Dp+Pj8v55+XJdX9QaIjI5jbBxRKNdJCiCRACIFgg9iL+fxCAG4/AYyy3PnZmYhYeErDddl+V6fVmWl5yE5SdJXA7p3Y9vUk7L9bqsmwjcH0+5oFuqxOjWh4I5wsaI1hSBRrzeHkGJAIAxoHh7UnAghGPAYKEAAQMmxAAmKlwSJHYkRwzort16qMIrjIdApMGeEwAKQjyXdJ7LoaQpixANrxEFZN63S4hIxMxZRFMy1cAhhOPurkRQRhKhQweHcAQCIoGwttbogbJea2vNwoklZS6lHEr2CAxHiESD+EwQ0Mm/aV9G3C1xBPi3xferzyaiN3t5XutVfWsI9Yqby3x395ByFhLc7W5DnOoIncmzFCNoPblIkgSCvYG5KmgEqjkLvX13ZGqfP2y1tpQKoYS1XKZf/vSj2cN2fffPv//2z3/uiZ/mWWXqnhqmJoIySCQjJnqfxMDuDQ438xbewppb92/7V8RAMRaQFJIhJUiJc6Y39+c//OmXf/qP//hf/+t/+cd/+Psf3r7dtuXPv/35//vP/5+//P7np+VLiE2nJBOZg3tU3d5/2db16eX65c39+f7hoZTJA+tWn5+fVU1SOh2P8/E0zZO7BYAqdmuqtqzr8+Uy1ICH+UBMGl0mUfKlbw36dEiBbrh1XFa7BmjKchBCkdNd4tdFzDcmPCAMV71BhCpYA1cIh2Gg2X1+BCR0d5fcudb+eO0fP305T9d5TqXQ/QyCUWvUTg2FlXMKMCJI0FH78KAFkiPquF0i0BsiUmI+TP4m6LrqtuqXL/HbZ5xO/Pd/kp9+md68e/PwW8//+vjXT+vTpmrAAvyK1n1jGuxhS2YeESKcpYgUSVwONNt4ePjhkHKSP/3y8MtPdyVRW5dPHz48fnnpzZnldDxwYvcGBJwLkkRwRkIpaNAUr2tflgsGH2Yp+ZDSMbG5rhHuPcy0bVVVmcSJEOMWsgdqMdzChAPJg5IkuRPhd1VMABigAjAFYNg+bB8tVASbohMQMQAPKsI0HQ/zOaWcpRznI7Fs2pZlW9d1Wa9yREgJkkbqEAgJ05Tu7k8//fQzPvBP9z+9Pd6fSgHTpfW61XrplWyblKfEcyp3Mk8pT+c8Sdlm+vKkSq11pE1KBKN5bbrWdt360mMz6F+xnQE7VN7DCZwZRXZZ4W2fhPFVlQaBtquzgwgAhkNgzEJ8MGV7BaIkw2mkwIDMZZ7e/fDDDz+8u3/zhgWX+ry167K91HaxETb9TcX73W44vqp19tI3br+Hvo2Pbq+FAiUQHbrtfCK0ruuLaJdpdsIuycghOhCmVJIIkYRaW6+mfRwwY5Bj7luDVWHbqM+Jka5LVQsgTkiJZUrT+XA4FG6tmtrgp2IQDQwZkhMDQcPuZqwcCg7JiciNIEQI0EM8wMmjd9Nwe3UkR4S5uRkD4rB31dprVe24Z60TJ2ZJ4MGoEUpON60+RgjiQVg6Tq2ttQ4zrlqt2tw9IaGpZylAnJLkNBNjN3VXh4khClEWTjk5RdXt0q66tNiAgUPA3By8qUM1I0d3JgRyDdPQAH0t7vcAtejmFsYw2htKGSQBCaA4UxADsu9tAySKZEq2gm3o29hvH+ZynIvMc54PE5cUgcyClCKitlW2i0jmlIULCQWQq9a69l6T0E8//HB+W/5h+cOHL399/+mv7z/85enlS6uro7EYJUQmCYCEGMij2QdX80AnjtcltiRhHnNeEWGSkTe++7MRAFxk2EBqqwEWFNNxSoUPh+l8PK3XdVlWtT5ShaaSsjCFbx0ct1u2hrspBiHKLZB0XOOIxPsgEQBGEiqYg6srE4MABUGAMBWSDIJ9+HLVtrVer239puofzpyJICHloFSIAo9JJqFMyHCj+PqQTJKwEKCqXa8Lo4CDSMkZiDjcECF8YoapZCLEMIAxltuTtcBVm1pwq81GGOOIM2QWkQgHxz0yayhC8Ktzch/Q3ZwGww32t72OO6h523qvncOJYYTVppQTp2983BjcVUdQABu9NKAQpZFzAC6go7eLkX11OIpbrEsPCOYQcozGIIf5aFC6p6eX+v7P770uD3d+ftDDQz099MNpzjmZwSi3EEe45+1pDdpNq1sN1fDXrBhimGc8nWEqPHKES5Hzaf75p7f/+A9/+Id/+MPPP7+ZD2lt1w+ff//zr//j1w//eq1PIH1KXEA8SA1Uw7tbb5u2a7W0gVRx8gju3p185J9zZkqMQuTYrde+tt56b8uyXNaXiKBkVa+S2Nyrby16V+UgTwUxaiwtlh5LgDNlJJcMKX0HiLyJsfGryWVgn8zIDQGACUgGBQcghjY8UkEL2oJXtfef2yH1d3fM93TIPAlOGXMSWMWDiUASOTAHqA6U4i6BQAJAd0c3JCJgFOHDFO7YN9iqvf+E8wHevoXpQG/eTIFS29W9tY+61oC/jYGCUcWgOw4P254IgwGMnBMn9FBo85Qe7qfDlH54Mx0naOvL9bq8PD6+PL5smzKL1ZpKQnRgwuzACTHVjq3jtm2fH18uL2s3F+TegRlljz7BsfAI6xB9R0XuJcnNIGI9DAkJxhif/ifqsf21aESP/ROKHfYLQGao6r2ZYFAans1CUz6fH+7O90VSyfOU50Csvavqsi5bXU+niVKEVBuTBKFUyvF8pJ8z9/RmfnOgwubaDTSs2vbSzBWyckl85MkKFZhzyoeDI1yv3axvtautORATGrZmterWbFOvDvrvGmXwCEIgQRGSRMwE30X5jH8wYL8fbxvVXeG+p5q5q9qIokThLFSylDKf7u7evHl7//BmmuduW+3Lsj3XvnRre57tqzTa198ZvvrJfVJ4K1l8KJG+/1yGZ8n8K2wj3JojhCWgFCyAEWoe4PtWUJxAAbq5eqhF67A1Mk8BEC7apQkzYgQyQU5yTJwQCIKJEEC11ba13sxswJUMzQHUET3QzMnFABwVyCPQlSGS7OmQHMiws8HiO+1FuKubUqAZeOu9NVMF8LHMQ6aRYYQIwgwcFBHReyyrPV3aZ94y+7S1umrrro4OaB7qar5ZIKh5IwfMx+PxcDoBztfr0moDyIyQiQpLkRIEZEs3uNYaazUF9SFF2mORwQgjRlJxkO+62FefjJlflvXpZfWIg6eSAcZkDADRmUI4SBAZiHZriDtqpfrivrH4LHJMMuXpMB/KYZ7KXChJjKWsFOKkvdbtOSWSxEkyQCaCHnZdL58/ffjw219Ut9Ob6e7w41QOD+e3785vvjy+f375uLSXGk3RggAgSJwCRvir+h7wCfjdVSYlFyJkZCHaqX6x7z5H+G7OaZqzJL5uF2GcT4c3796WnLZ1+fjr+8R0nA/rsjw+fjH18/FEMgmDbHFt3WLEW4aZYRjynrkV+5R8j5LyW3lPbkAIETYmn5wBiBzFOCMlBzdvW63X6/PL09PnT5fnF9N9EksAGeg4pmAsnjAMyFxiKK88EMANAYSIiYQEAJdl++tff7+etzdv7qc8TUi9S9vWYD8cjyJcShrwRJTWtYcDUWEOoh7jhFCFsPFAgxG/NG5s3H800AruBq4EIQxCcNtzW8SQ3X5XxESABpoDhDFGLpwOqTzMh/vjXI6C4jZY10OXZoO77dG3dTF3d0bKiAQYSEIM/O0idpKYj/jDT4f1GrWCqyFEKLZFQPI8xfk+Xe6n5w/l99/by4v+AtvbB/3TD8fpcPr0uS3X3r1bBN6CxAOiu1Ztq/YeOqwHX087Eby/5x9+pKkkkYwRx3n+8d2bP/7xl3/4D3/44d0DRP/0+den58+//vbXP//1vz8vH8scb1IZSY8BNDz73s0aJsTDXNKBI7liRzQueCoH5nSYplwKcPSogdFse1lflmXZtrVu29oqEXBtLQQxutvW+rj5xdkoE2HX1r2OHMCtB4ZpV3vVjg3Zb8qchAhwiJxVbYzTR9TX2JcBgPONJoc3SfiRQ/C3L8HgGJbFinRDZmAE7NV7BQJGZgJCt2ga7gAMiINfOlwEw2ysjkQsjFNmPXjr8PHTIOa1v7vyzz+m48H/9ENojS/P8ejRWvQWrww9t2vtK3tjMNMCW0cg5EwGYREl008/nE9zOs3h/fnx8/X5ZWtLY4gBRwozbwDoitDW7iTMs+lyuf66LNfr9Qkp7u7P8+GYOBORu4Z3947RAYBIUxq2CBj3GhEyolrQkDsBIIDZyPqN3jsA3OQf+/Mzunk3JidCgiQsGBLurVk4oCNjFgLmfDyemeTd2x/uT2dGziwiuWmvtV6v12Vdm3VKsxRQWiE6QQglzpDP81nucBPasC1rvy66raFd116fe2vdhSA3f/GpZkv9jg4HzgBClDxsWxsSTMgCBNwVeo/evSk0g/7dCYA3Sx8BMUqCnCklgka3YUzg688QwMNVFTHESZiFGRFQ1czdzSIQWJhKns7n4+l0fvvu7fnhPs+le7tuz8/XT8v6pNbi1d89lFoAwyq3E2JeAZ/h9dBlFwJ/ncV8vbZGXTy8iYQA4WiGjmHkwiwUoL259daqiWLiiFCILfCy2rr61lCdiGTKTCTCnIQYMXEWxkPJ54MIqvVaW4uAZbus28vWlq41y0TEnULdWlcAj0QmaOAIoArW3d2RIvOg1Ih4MDiijSS8bybLCFNV7RgIPXqt1hUQWHgMygaCfzQeLEiAhk3jsgR8qcEv8dw+kM3eqVk16ZEMLdAwFMxM3bVbYwjc0nT46f5NElT9oB0CHAkQiTmndETE0JQQSDdb1+WqzSsnSDNJThTsAaAIq4cDzwKI6m1oFsdr6do/fX786++P92s+n+bjLMdDMuc5qIwUW0Ee0LuRQtVdN92e/PLRWPnhPE9yEpmklOl0Nx8OkkaelAhLlgxhvb20rVZpzCFciAuTqPfnl8f/3z//8//j//Z/X66Xv/uHv/u7v//jf/j7P/zwxz/+w0//8bJ8+PDpX94//vXDy+9P23Pz6mHITk4YZIYRvtPKb4/W/RGTWJBQkJhGHSPCw7Zaci6lFMmJBZ6e7i2s9ZVTIqHT3Xma83a5YgRjehb58uVR1ZkkZ3Y/aKyrCXiDEcrqgTjcMnt9j0QUBCgwZNVDfD8Cih08zMGZgBwdAMzNO3S1a6/Lul6v23Wp69p7fT3D4NjBA8gYzGHgGummEwwf+6ARxElE5AC1tt4ftXvKSThNuUjKAGFGxChMLOTjvUOzMIAR2UM5i4d5GGAzG8WMme2PCB87x/FjDwwLNwwXggBKI+JpQNIwRuz06xfiEOoWCPOhYEYWSQea7o/TaRaWvfm6NayDDwGo7s20WhCgIKUAAwwkohCKURftmgHJeDyLCPDiffNQMNdtuYAkziLkx7n46byBCllbYn3e6vWSGTOQJQJgA789/9DBu2pVbW4azt93MEQwz34+wzxzEUHkwzTf3c2nY5lLgtCnp0+X5en3j399//G3T1/eb+0Fk5eEQLcFOaAZekdrJIi5IGbrsZq2gRUQScRghM3VDQAcMWqrl+1p2dbWqrpCCmRU9DBwt+7Wx6eFEMHQ20D5Geiwp7v2MPMWqq/9FiOmG8cz4qafpD1rgSkL8JDQ73+EYN/yOjAIYjf68OigeMgxF59KpIRZ8pyFcWSw0Xg+swMlCBiY9N18AD4+eggP1SAOQGKCksMMLtf+oRtBM8WU6OEMh6mdj1qS043/9J1B4fsvQPSIrhobdgu2AAxzY6ZjmY6zMHmv9fLyvLxUU8K4ISbcXcPDG0QNUvBA2Jb2+fPjsi6IOs95+IXVGozgnFCzSqiAROSSiQwAGGFfnTABoDPTKNx8X0eEqnZV/1sqNFIk8oTBCCSEEIgSbrvzJSI8PNAk4eFUEpfpUFIuOIBAVde6Lsta6zYILiLIjBFmFmGVoiL2lOLAkyNdX5bL02X58uS1JgZt3braYPI5qKqR0gzAIFkoi9AkBDV6b2rXKxtwtiCFQCIhykjpfzVhIgLJNM3cK4e7agDeRtnfjQn21e6oUplHxQDqQWSqPjiKxDLNh/uHN/cPb+bDAZnatl23l7VeWl/HmXUT2+xI9f1Agp3sgrf/P+qXfV0yrOz7Euy1Oym6WXdjBBqodIx9Q+gew1824r0UzNzVycwJLKAZXSsuGzZFJDnmXHJKw2oyonaRitAh5zkzh2rfLssi7Ot6qW21sNFNGYIiKICHg4W1MZhwdHADizDcjSXAxEAEYdE9kIaW9Pv3eHgJwMDdbxUMMjEAmoFbhHXEEQ7PABTuiuvVP+MGl/4ltJBliUREmAER5lKspbq2aOoMQOQQ6t66EqcyHwDEzQkjZUwpMR1uG/y5yP0kevXWuwMEJUpe2MAcAwnDiRALBTSt3pt9dZZFeO3bWnuqShzuWc1a59Z4alhmLplSJhFwj15NN9Mt2jMtj5aCVdSPjgDCqUzHMp9ZskgiSVlKTpP3RftL78u2NEISvic8BeVtW748ffrt/V/+7dd/fX567tCdbD6Ukn58uHs4Hcpc+HAo+bOUF3laH2vbwgGcwjiCyIUQEQ1RXt8vgkMySCwsWaTkMuXpcJiPp7vD4TQfDtM8lcIvl+fD6fD+42/L+vJ8udzd30lK06EQRMlzACCTdjMPIJTErImZhyJ9rDj2i3r4o3Z6q+wj5JEI5rs85jaANAaAAGvdlu614Ra69L611mpvCrsY/9sd4+ahDmBMu6OUMufEQjRm5xg+/NIjeMVuZy8iTk85p3Q6HXPOLFhbj7AxulHrVX1tVmsnIKSYUzrfTUiBvPFLH+mSVfuUOQZjx4Y6GCBAVQkRIgggMQZSycKjttp1lg71+w7GQ7WzwMMPb4Sy2pWSl0NOiWHf1Ma3IgYIMTzUowHaeNkI5tgRHSkREEfgSKsdbgNAFigTIWEX7huuq23LRR1EErieUjr//Gb6+6Ktfv70+fd/e/G+/PDj6d0Pv5yms0hqFgb96ztvZmo6kkhgSOG/aZUcuYv4PMVxTjlNOc1E0Or68vLY6nLdXp5ePn16+u3p8rjV1bz7aPockYmZAxHQIZzBCImyGkbvm1mYGgCyJGEZAdO8x6yGtrZsq7qCQCooIkT7viRMkUIYAiCGLDfUb8pIQPYIU/fuXqG3b0P8iHFORUiwBMsY1/JIkWKCMgGRu7v1cbajMBET4HAuQavwcgWreH/C0xHPJ7hPfJznuz6fZm6VfAf+BCcsRYyckQH2cC0IGUdwgDcFNCBiCEiMWYKirlf761/dLeYZ9Cef6OLURhQsSxD7N7fFSDKU4dZnZgYA7d3UiBsR48osJAlzHpARqrVr764BMOo2oH1LC27RTBUxWMyh1nq5rNfr5mbHUyk5ad+u14bBwlyKEIJbZ4ZEOYmgcDjFcNXxzZJDXoaXvPYw58QRoN20qar7q26MgDOfJj4DZEBCioFSDIGCzMiJmZIjVxI6nEEwHHvVisbaY1u3dVuW7araRFwEEwMDgqZ9D++reQLIzEdzf3m6fPrw5fLpMXo/FBEJEBNxJ9qF15u+vL9i4LEc5/tc8GQ5ma7XbbtenuKqeeY8cSplSvfmnvhMuKdY421jCbEHLKRCx1PyHtZ3DumA834tMsYf4RFK4h4ehDhAmoHY1T1igAc8iKWcz+fz+ZySqPV1u6z10q3ZgBYD7iPbeOWJ+ndfNzXxd4umfzeIAY/YVDfTAwULMScEoHE0WniouQ0o+65fdwj1QOwBVbEZW5Aw5ZTmUqaScxZmAg8CEILElIQJAMJbq49PT0imejHtIgyQDLG7NwiH2J15umfFjBeLBEgEjE7kRIzs4VXBFQhFUL4Gc46OEwDNHQNJBBEYnREJ2Qysde3m4SyU08QpI9GI9TBsV/18bc/WkCEf5Fhw5pQO0/FhfoOdnj49r9dmgICJZFLXX3/79Xg8vHnz7nz3xpoOhQMTIHDtfd3CLJ1OPyBNrizXLxZVHLIVpuiKMOqowDDoZuulr5fmtx6bCMuE0wxSIlDXGq3360I5cSlcJimJc5IsHBp1qbZ5dI5GfQHHeEGaM/rbkyAlLjkduUwpFZZS0jyVY69P6/X3Xlu1FUwYLuBXcXl5fv7w6bfn5VM5xwT2Un/9118vm3749OVP//QP//jD2/vz4V1OUjKfivz2CR/juRl0JQuJ4MQY0ZMJ0fxddtJhPjJzSXkqeSrTPM+HaT4cj8fj3WE+5jKVqeRJjqczMkiW//Gv/32t9bIuRQgoypyPh+NWOyf2xbfWqAURiggJUZABuAMS+F7c7+U8jhseBQhxyH335eIruBO6KfTaYzFdAlbwTa2bxp7pjPTtlQSAGZiGoAcMzyUO33lgWMRgccAQc0cEWLe9OW1aL8v1XE/EWKbs4FX96flSawVEd1WtXbt2S0KzJCnzdDgCOm8eyAMl3L8iqgejwZ2YIMA9BvcPGEfXzjTys0cVgkg3G9d3p4Sx0On+VNJx3cihiozstbhlRX79/bgrG0EBFUAIBUi+Hm4EGGG7giYggkfngQkQKTzWxVQVQAmBwsFVo6eE54fJVJZtuV62v/76/HTZrgvd3TcsB0qZhEZ+srsB3ISsY1bwtzRVc4SggUsK83bdNMKIgYVfro8v18/Py+etL4NN4TsNAEbpF4HqGuBEjkiKAEDmpubqikGMqTtFBBLK+K4Ew6xHc7RhIoLEgGA6+j1zD9hFLfsknEaG4z5BGxFfgzzy3Uk+JCnD3ktjyj5+zCgMnJzQQd1HegUSE44Lfc/1qbEugAbvH/F8xDd3MU+YhUuSqeCUsSuggTkwAvOwcw3JpKFjAI9G0hy7OQQyB+8KHSQIU780+Mjtt98jwA+lPy+uTiNiieW7fv/rDIZopEP72NRISiKJCMgDnMKg98BwNzQ1NYqgfbI5KgmLiFBzRQwKs1iWer1uvXZhLLmUhGGbujELct5D3TwCKJCCEhETJIQUDhCK6MQIaIP4uCc6OQ/Pl+kuSX71UoiiEExITIyQwlHV3HE4I5iJRAC4s9AxMY2IGlPrXle7XpZaV3UFgKkIZ5qzZExmMoJsBSOxCRqTBWDTvtW+NY9uCJEhWJx47ygo2DF0i/ps108NIPFBCmMv3r2v1bQ3CCYoU5mTFAsq6Z5Ivr2YV/9BBBaaZrEevTkA+lhcu92iFPdag/ArYnIcLMSMAZizm8Wo+gIAmXOZUs4R3vq6bC/rdlFVvwU+f5UM31qs/ek9wGjfTtvYVb6xK5EgAv7dgAyBGYVReMfmRey67XCIcI9BlNsRfgARboEOEgjMnrITYL65ToZyc8BRGUEICYlpf+ICulpvrXnXwQowCMcIRkLKI0wG924gIm4t4Nfzd/e+GQx5l/CrDwUAmIiRRiAIIhPFKBxppJM5mAVCDJwK4pigDkR/tKhmW1dl4KBFaSbPJEHzu3kq0U8l9R7gQI6pVf38+Lhs1/l4PBxPh+nERBHWW70sy8vl+vTyoqplKiLl/u5tKbnrSuxTFhZw6RHdoYZZWJBu3sDaN0EVMR5P+f6B5xMlTr2B9+iqXW1rJIsKUSIuJGRg1cggI3NgRkwUGDV0dV1dq5kFIEuWPEmaUjrkcgzviOxq3Sv62tIa8BK1ff7y+6cv/7b2D6d3ls/o0YAfn65L/lTvTpxFf3hzPuYp5jO0e10vaHDtsCI24IFOA+zuIfxd6ru8ffMup3w8HE/Hw+FwOMyHaZpLKSlNRAKBjARBpcw///xHdfvw+cOnx/cfvnzOBGTtkHOeUplLLtkRni8XCzndCSekROi0u5F2bs2wJfkIJ0JiDCJAZ0CDCMNREI/NTbh2jYptaXY1XhwrkgIGAjAQIwkSf9WfhUM3aAZMgAjCAQhB4BTN9fZIvz1sISJCtQOiyMhmas0aYLAQCa11++d//cvnL88snBLnzMSECGeZZTrlwxlTMu8WpA7NQj2CGZlvSP1hOBrJC0BEMhQTQ+bsCupDBeaBAc7CkuS1kRIhmGk6TCUfum9de2i18JGbNs6jwbuHPZYSEMOgRyRCRsoQgmToAgiDKDOeHYTZLEYqqQhF9MvlWmscD1MuRKTLpX3+/OhXwCLTQe5/Kfh8+P0v/tu/1P/x53893334+U8//vjz24e39yVnYQx1ZkYYLGIF9NBvOR0O0B2rwaYeteHWwsLUSirXepHEtS21L802dYdADzLYy1pksIiAUDUPo3AhdPVBoQUBFkZgEUZANTN3c0tOiIKJMQQUxpTfWodAM1dzVTMbBRyqWQQwI7MzIyCEQyBJIkLs1lBewYcD3diM3cgZASA8uikipUScENABAilIgiHCgRHAyZS0Q2+qPdyxNvz4CKcJf7jHwwRndghNzCUTE/UOtTl6kI/nM47EKgTUm/iqttiq7bFTEiTmZkQojOaxVv3w3mqFPGFVWTqReAJK8m12GbfhP/L+M25WayXCXNI0yTRNxKTatfXn1YVxKhkRu7FFmJt6jOQ7dIiIbu5EoaHNtuta1xoOkmRKqSSvrRL4oeR5ztM8A9C6Vg/wyA6ZeBKZSprDY1svEY2ZMLpHH0qYGLb4wHD0QDeI12iVAFNylTRJziQzBmnrm4bRkNuTMBOSssg8TYIlOrctau/L1rfWzRyJci7H45xKHKdjTrmCA3iSmFlmziWYugMSJk7TNB1D62ZRexjycH6hUGIuAdxMo+PT+6V1v/v5nE4y5eSY3adlha7dGpCXRGXO05TfIqZbwbA/Um9mcmSCNNFsyTomcTPsbc9xsLCwXcO4/xEam0cHCOYEyFMecSDKowiJ4d82dV22y3V93rarmt6sRrf71gDHoOLbQ3wvV8aPvropvxPA7MOZrxoyng7H+XiUlABRTdFjD4LECMB9+YVIQkIYAeqoII6TJD4dXNVMO0J4uN3+jcK8B/AGAICInI7H43Fm4bVeP26PrRkGQTCAE3oSEJSJSIAgTNVqNzMDHNiqoQQF9jFWCUhiIxgIv+MqCVISHqG8MBbp6ONtIosIROA8WB4wdtIIwENnDRDgGuQaukFt9mIbq9nd4b5M+f7+HEdYtDX3btK9XZbny/Kcp8KS/sMv/3B3Prv3p6fH3z6+f//p/Zfnz6q9TOU4H+8ezu/kwaxFaIAxY8lJdXt6/L32S2iIJ4FErw4yEXq4P/z083S+n0SkVt8WvT5r3ayr1a1Hd3LIIBmkYDqm6XQ8TikNt3PJMk/hdt3q47o9lH5KcEQhlhH0TIBoBuNR4GjWN/Uvl1p/+/xvXx7/h+KHNz9bnooIu0W9dsTPX17+pUid+KfTJGLbkejdfKTgtCEjXCGQIhyQ2MFSyt9NYn755Y+jiDnOh3maylRyKiwCgL3pdbmaKSWUjJKREyFD07Y9XwXjkFgItr51b5gAxaut1GmKAwyF3Ygg+uY68BsIAL4GgMJe/9t+/+2tRLi7m0HHkefr6mQAMRIEg5zh6+/e/3Kw0QoMBvwt6sfDzQethOm2IB9qzRiQFQxAi1AIRXJAa9qfr9d/e//59/efc0rHQz6fp3nKLDwfQHLJZSam1m2tfl37UptaTIcyzZPkQXP7FuK+3/qEhBxh6HEzEQGMgMkx3vye4EE0wrFzSgmJRtgL+shJGQSRr5AGuHVGDqAARpAQJEb+wOtzaOSCBLpHuIWNYEtX04iQDGUKgM65cTHvUXVjz3mGI5bjhZpty+XFnhql5wgEJzef5wToY+bl7qbKBPh9R2aBPaCaR9f9QLW+6WaoKYmFevQA8CB3dCcLvM2aRn3mugdngEVgOMdevcH+gsaLD9utUwFKRGgBBtgtwr11gMAR22kGqjDQXOZBSDRJBI7VMoyM18SOoK3tb/bt6+vwYkjUAZ0omEMSsADcgm+JQBJEBIG7giuZja05DFnApcLjQk9XOF/BUdW7BRKTILgj7TCnfWNAI44JkYE8HBTMojXrCl2BmZAMwkx9vAmtx8sLqQIIKkCNCAziGMFbX79GhhfxfljTba5JhCnJNBUiXN1Mo3dTBTNEwhF64UERN35zBAAOYGN4mFnv3bSDBxOWLFOBgag8HObD4VDKATGVcvKAIEHgQG4d13V1VdVVGCQlIPDY7MYeGaOEIQb7m/vFPWqzrlFIck5lpiAlDjUdsfQEQggYjoE0mOwpWSezrr2b+Qi9YuIpTznjXA4i2UGDrGQoIlOaxCTUNMzRQZCmRKCmrYVbN3USQkkiUpgTsvew9boqGR/okArPMZcUMCPh5QoIg94pzMI0f0fgBUAYeOX9NRJjmvh0J9NErlxXe75e3evAC+8nwQ5v2Z/3PrbGRDnnblZrZcIh+G2tvVyvrS/X9cuyXGrfcw7/Riy8fxvwbRIDr47yoYv5hoPC/8nyiYhKKaXMyIPdHGhOexrx6G2dYMfQEo27XhCnnE8SrNy0997QXRG+dm4kSRgBPBhCCJOkqeSpJKLQ7uQG7ggEiAI23PnMKMQSAAYBjntY8oBb7pPMweODPeqJR7Tb9xoyYhQUCSd3BzCkIEQi4YCUndH2mDwY0E3Ya7u9dnMgCHBD765Ng6I8r0/ndP9wfMsh9vy5rVVb3ZaX63JR1ZTylOcfHn6e56O71661ta4dAJApEEnkcDqfjsckqNoul2e1xkzoNqWJIiJRMy8yC6dvglTEnNM08TyxZJIEg9khTL16r7aj9NURoSQ5pHzMeZ5KyjzN6TDn+ZCnKTyWtT6mdZJDogxAPbqq1m153NpVtXJUQ6rbpx5Pj9eXy+Wv5h8lX6ejHE5pKjmcakmoiZOZLdv2LM6mV++dgxNkDqDRtYy5wHfryv1L/uHv/yMPWjALEY5Aywgw15fr06+//vV6fSGBPMvhXJ6vj0t92fRa65UQIM9oXrtdL1eXxgdXrR2hBVLocPr6bWoH6DjMayMXJsbVNS5n9LjdJ1/reY9QJ+WM4oxGihQ0dpgDYhoAHq9vHQQgxpwp54G5dwjcR5yBYxhIGBHhpgCYpYxEagwT9MTOpGb1srx8fH7+/Wn5/Wk95u5mOaFQeGDEMQnllAmotfr8Ur88XS/XRd2Ox8P9/Xk6ZCKIRnvjMFTBPvCuyInBMMx8DJ4D1burQ9h3L2Q0dZQICwZFdPeGdIvqHOQBcNhhTLdKH4ZjyBEJQwJ4dzANEgjEXsKBRaj3qrrDHXIWQkPqFuqxUK7vfpp6i7CuTZF8mtP/4T8/9Eovj/X58fL09PLX6+f1qb374fTDj3fzKSEDjVACU0L5TnYF6EgWoKOyQwhGQDKMzat6Z2IihqAxHjdH873o2xs+hAHaoZ08PY7WWxqQuxkA4NAU+p5EaUgQHuagffy837SJFE7u0aq5AiKWklKamKC2atoCIiXkPVAAhvfz9rkEJZPsPAyZaEiRGFlARBEBDN0RgpBCOBDHumqk5o6HL4g4UhjTFvSy0udnXHoLxGtPCgP/SxijiLGxZw0c+t29zOgNIcLcqnm1Ho6Dr0foHtrMgrD3DEDNwtChhLHtwKvba4mIwTtG0kQsIkQ0uSPCSNMUSUOujMgiaKbrVsfDkYiEEhKHxQ7PJRLJzKzj+AaH8TQgyCKHOc0zCOM0HUuZRKZcjvPhhCS12bq2l+vy6fPjb3/9rdbteCpv35zLPAlnB7agQA5wC3dzDwsIYuIReTyKmIi1brW1ez6XMuXMQIqIupdd6GO4F2CVN3MsMY/4VqgQzgA+7pJAgiwkiQ8pJY3qbClhzjmlCZtsrlV787Vjc7FIbuC9mj53BJpKmgtDUJkkFQlo67pu2/L02JSm+3QsM2M6kIgDqWFX89o58zd5C3wdxdBeKI/rJ5yF893EMZFN16t2j97VnCwcIIbu22zfsxO7sLsHMydJOeko5XIRQrxertflcrk8bv3FcXEc4OSv38PeGwzE6CsNMX43idk7jdvlhBiDV7V/OwAAhJglZ8keru57iNsAW4UHBAUO5xIChBugEGfOp+l4H0HL5amGIYgZuhmNKF6mNHJ8AwRCIHJOwgRu2latC4XKLo4HQBoLniAEhDG4MhswQQIIdBRAJgwBDIgermhG6tQi2msKWYx+fHDbyNACiIbRlRMzALA1De8QhhRgQBZqZmDGDhyBGIIImQQCPKhVa0/L00Ne85tZXHxtlw+fH5fly/PLslxM/XN8yJTPh7fr0tR1W69ucTqe5+NpRDyXMuX5NB1Pb+7vet9qb9enl5fLC0OfCx3Lg06z9Txvn3NO3+r+wN6jbrqmrTgDYE6U7rLNpD28hzaNFlgjez6n6ZCnKeN84PP93f2bu7dvzrmQ6uKgW/sYF4PcFF5ynxnFGtT1eV3egz5l6mCrXbdmsW6X8MfTqYoHmLaNGGHK8+FhLnyc+DRRUq8v16Z9qW1bVr2utiy2Vq/q1dXDutXWl97ra8WCPDy8JcIdShoOgUA4xiDX5eUvv/7Lh4+/A1ma+Hg3b7p8fPztsj62vjIgq/VagR7Xumzx4mkzqIixdhAMU4cgghj7jEAIdADAGDnGSAG457bZntNqvgsnLVAdFdgpsUTyljDC0AnGIv22M379RQREwAkl4YhcCNuHHIzEQDLQl6MnIEwpE0F4zyKHKU1ZCL227fPjl/efv3y5LNfaCzMCUgCjCxORgbcwBcqjg/YIEZyBjxPPmRAHlIUC+as1f3wv+9IVaGdFwRgM+BDQwHcqOdptCyAAdINgWATiV4/l7pUflakDjCaiA+ko9Yb+5gbWfDV5BwewCAcn4pQSHI6u2iU7kLobCUxH4RTbVYejq+Ty7u1b4cPyYh9/e3T9y/X5Zb1sF4ZDIsaJ75iZEALdxf17oOKQQA21JgLSEJwjhA/Ed4ADQ6AFqpsP+iUhIY8CLHAfIY17MGIHncWt9RyT7V2lsTdadvsVHBH0wwkxsgTHCY0ISH6DnO6T9vH3YoxtNzED87d6jAgkuWQXgbF7IsGckAUAB16LzSEMKJAYkQMp0J0TsiIbRQKmAApM0TGuCk8rrKaIVFURJDMxs7CkRAFDvOMDBwQAYKE9VFV7Vzd1twhXCA3GmFLsGUJA5kAK3sEJQEYczOvM6tulc+tuxiQm5zw+sUGYJUIbSUKIAOTezRyGhoAYxwBmKMvH8BNvRGEcXYhBmDAdpklySYlTOjAnBEky5XwIoNpqrevj4+X9+09//e29av9F3p3t3C3MbWu9qQ+VKoIjwoB5IDLJt7vfI4aunFhSKsw48lOFfZdWAYJ7gIVF68AecyICCjNwE0ZHNlMwREiMU+ZDlmIojp3JEQSAA9AhNHTt66IbAAabB1rHtcKOFVOtbSvVpmMCMQdHHHyRHtYhkAVTljzlaGHh1jtHaLe/6S9vomWEsbKLIIQklDkXPIhE7Z0Jat1aq9btq//cwUF1sIyYBQiZWUSSCDFGeO/1coGm/fHLp27LdAopQGORChB7MznUNfG6hNkrmttN+LeJ1fDvMETj97kN4jIMScqYhuNI/IUB8Br/pnAA4Tyf0nw3zwc1ryshoogQog1JCnGSNM+HuWQe6+HWaJ/ca6+r1vVWxADt1REYDGbtSGsKN+A9XYZoh5EiUIR7KGgDHb4SAMe/eYU4vK0IjEgBPlb0xIKMhKKMrZn3G4d+7EDChtZlQOgBGR3N0QC669q2pW09gkHAE7mgA4AHmka9bv3Lk/z+4X6rCxEBGKHOMwdJEI7DZVkvAcqM2tvLZX1+uS7XL1OKw7uHMh+4HDNWlsHx//oykDFR5FAKJWFOkkopBDIOZetmm9lFueOBU2ESjpTocJxPd3fnN3civlzr1tduPVpPmwK9NJ0JqFfr69LWz6hLkAoR4aoe4ZvgNmcES1UjmkAqnI8lP8zpIJDd7LIuVq+tbVttS9XrZi+brRrNw8JHkxv/jgot3xLgYLSKgMSIEBrLdvnr7//yz//y37tVFJ8Oyagv9UW9SWJBfml6sVCv1ZZFLx02Z4vAZXUK9G4BQMIs4+2+OW8xMIiBMALNzLrq1rXp0Cqou3p0AAMyTCC5ZIxgx4bNu7s6AjKCwAiE+lr1B2MIB1EgAwOFhVlQgKAkYkbi0c8CxrjoUiICCDrM+eH+7nQ8AMJ1WX798Om3D58u6wIIx0O+O02nKZ0Ky4GnFH173pZczvdjO3g6lp/fndbej8nRa2sRLI4MhBAKEUQQGOrmcDv4CcHBzM3DDcxjlI1f2xpEZMpMGYAhAMiRdgZuON2EfDvDAcC+1iVBjNARAZDcORwwNMZddCOCxL5FF+ap5IecgOipdQzoZhyWzUFdDc1JERC5pDQdjw+nw/27M5ynA6F//lS0bgzRlrWWOJ6PwpgIAIDdyb7d/ojBFEkgZypFEMLNu0ZAMDNidB2ILAoDM3PffeNCQQwx3JBfZVXj4YiIg8ofMPzvZnEbEiAiGLo7uPlYZkbQTQ4VEUjCKTEzhTsGELlbCwQCH9VNQmBARMwCifkrgR0xUvKcjQfUkpATljxUojYYfwPdOwg+woDklEJgyJKEO4AHcGAxTLYBXJXMCcNbb0KcZxGWMk0knDqYO4B6mI25iZlqW7dl2VpT+Lp3AwRmSHnMHfYRBWHMKQxBg5sDhL4GqY+K+usKKSKYOecc4Wp2va69GxN/bbYRhx8YePiPgcIciJiRmQEpxtQInW5xf6rdvTPhlKfjKZVSWOYI7upm8PJ0rc2er8unj1/+/JdfP3z8/LJsh7nMx3OZD02tt/Z0WbT243xIicl7ECAmdwp3HhlLr54vSEQsiAlGiyITRLiOMZaAg6l201brZn6c3NR7q+GapUTQqs0NIQR9SnSa04xUFDa31Rp0czRHSoGwbNeX5cIoDEIJUbNTdIuovtTV+hNJTEc5nNPpIR0O8/GcppkgrLfNjBWCBTlANaz33rVu7ZtOeXQ8Ee6xm512OnaYaaDJjKdUkN/c3x3atq3bsizbumzXdeu9D+/i2AaMHclUWFLKUwn3ddsAIafcWvvy6dGgAU2HMYMntGEuHPcf7Jv2uG2TbhXw7o7Ym4mvk72vrutXzxd3r3XZ6pVSRhJJtCv4wEcxhCMZNlzd1VG4nO/eTId7RFzXdfQmex4OIRNnSXOe7k7n+VAoXOu2tNpN1RzBW63WNwQXAr5FGjuAuwJ4oKu6tyDHLMy0VzooCuSDkKTm1qM7WPh4oLzOTN3fGxiQd3YfUb4w5jGBAcFdARBH2g+yIDpYDWyONUK7qgc25DAwD0LQ8NX0cVnOlFne3N0nPt/FIV3w0i8vbrHopw9f/rn3p9PxNE85ZwLE7r25q3ut2/VycYcpH8Pgcl237dracjrK2zcQxMAY7IbV4FvIKBHP+XScHoRInDLxzOk8l1IS8wiwsnZtG6+2aHJn6ERMDMwZKamhh1Xt3Tdnj6itd6jPyTMigUHXurVLtKqgk8hhPmZJTgk6aA8NMQ/Eie0e7T701Fy2ZtrWtq21rVutS63L1rau3d0CYeTzACBykpKkfKeJuYm99kN67DbVbKvb5Xp5en788vSpaQ2yvJCTdVuBPJUkiEuD0NZs67EptkDDYFWszaMDqDNDmVmQdmrQ0ErY2BKO2cE4mfcNLgHvj+gANiQFDsycqIxmCmo0H5PzcQXid7OYgZncB7EBgDh+z1h6RkRYINHgWkqSMpWUWRgPh3I6nYHk89P1w6enX3//9PxyKYzHU7k/lvOcp8R5yHvR1/X5KlQSAdD5lP/w04NkWNZ1TH6WpSIoYJIbDRz2AgIi9oprlBLq3Qz2ujK+v/uRSIRGgw8eoIBjl0Tu8bXlQR9Hgu//ijGJgWbeAHMEAXCExu7PHaDDMa1AgGwmy9UAgwWKoCkCkqOEeoQFDrmrTPmU6GQtdUIOT0nufzhhtrYweC8JpWDKBMglMTVkc7TvtmP7NBcd0REGZ4VG9z+cXEP2MJCBQ6E8PPLDv/s10HSclQ4Q4eP5hWMX+Sq2IQBjrCk9zGNoHb6NUQN8QCwpEGHQnCBCveOtDx2gPjfHkbjyt/1mAAZLCMNuehxBPw7hw7U0vvU9cA0BmYHGZwVoEhgUSCEOFM1gbQCAqNFqZyYGmiZJueTCbhIREF29dtVl07r2ZavXpS+b2mDrMQGBgRFDSsi8Z9YxARFMhE5YRwmsZIqvXw3dvr6+QUQUgWBuZq022uflOJ6Lw8jEkoDIbadnISJzAiRVjXAgYMYpi0+JwnNi2uHSDEGmoWpr7b2rGq5be3q5fPjw+f37T5frQlmOp9PxdCqlmHtrrfdubgFDnMtDWGB688i+/iICpF3560AhwoQBCsrEU5rAsUYPqOSAMYgyPpy6OUtYbIAYnFM5Tg/n+e1cZsG16qX2cO3OiIYYBADm2rUqmJAnEAcMZievqma9twqoi0aHko53Z87HwzwfxKmaG3IQAScSGFM0rU23bXtFHkcmFk5MwsS7oAIIIFxNoXbbktDxVA7z5Has2+l6XV5ervT0fF3W3rvdZEkb7/5MQkwpjZ+McMtde2+tOljbNGUaV9HQ4eBunv9ffH2bzdDtcPtffkWEqqlqkoS3bS56AAQBEiESB4CaIzLnKc/n+XDKJfW6ujUcmsXYz3IWGf7ZeZ5KFm2bu2l4DP6haW+btjYOh/12dr+tnyMwCCAAiTCLCEsQOoUTBCi4o+F+UOGu9iX8n72qbzcNmo+51Z4MSMLMgkICSbwwzuGoVHsshtfmG2rvDoAEwInwIHdvjj+cDvdmsHZtQS6ZpaTIfIGo2k37ta/X/jQ//fjwww/v3s4/vsmlkDqoImh1W5fLttYXfMEQC4hQJEIRI66u2/Z0qV+aXR3a1w+Lmc7Hw/3pHgCYKAtPSaaS5klSJsQwjcobN++A1IEcxjG3tSbXJdCJ+1ZXh8olGDyCItQjMQ47YHPY1m15WZc5ZaIyz5NwEc/UAV0oHLyE5V4pzCB63bZal9YuW9u2plvTrXc1cwxEYAhEjAAkICb+XtwnX2OMxqngEa66bsvj8/Pzy0U1EhckBraUqXut29as19ogNHo1rxbVccRUMEXxyv1iupq1VjLev5snIhwRp+gBY8YMqDi2uAHuY5JN7u4ymk50DEQLcGcGSUKIBKhdbcAtMYZo4dWabyxkEZAByDwghuidUTggWlcnZM6DKCZJTqfj+TzPI4Ez8XWtv/3b73/5/eOvv3221v/w5nSaysOUj5mSICMSkKst6wuGEvt8OL97ezjfT39Y759ero9fvnz+8vLp89o08jQfikxlBI12GHZvCIyBHHQz7X1QdUZqdtw6GwAARJBEnCjILdRD3Z0oDS2If7UIDv3nvl0ZJ4SGt9oXCCcmwjQ8Ye5h4UQOEIEYwRhlW/zTp/dO+u4nng6AFCwQyB6MgcRIkiY+3k8/5bh7fF8/6RLeQZSPfvfzFE6EnjlyYjkU6nHMeauV+hbWv3NbYoSDah+Lh/GN7qyLISgMdPdwJLrhcxB3OcjtNISbzmYUK+4GQ/GKNCYz3z/XCHAgaIfmNL5u+iOgdx21EtGog2NYXQgDAQTJAmqziL5VaF2/ehPdoVUwBZ4hpd3p2upQ5qAbAhAx0Z5f7+hBQEzATI7AZG5BAe7UQyhCFTcncMQGbe0Ipq2d7vTND3Q8sEAiYHdXxa12t+1T3Z6v7fkaW6dUsGScJ4qA6xKEIYxZQHjIxkMQSpKR2Ll1UwVt3z4WxH3RQCKjxHf3MZgRZh8qexgDADez2MPoaFx83UzN1J3GpnDfYAYh5CTn01yStHmac0LwbdsQnZbWFGqzdWuqgZS3po9Pz18en5frCoBvHt788OO78+mUc9a2RQQjwRAQB5EQIFuoIUDYUGB+fS1fmSi9Wx7veKQhphSSYzlhkLe1I5YkKeXMxUCnkjGiZNHuBEhS7u/e/fj253d3fyiHfGlP1MHVtG+gCE5gSIHCzIS9qbk7exiTIGZoazVQyhHo3etS23WF+57L9OP5ODWjjt0zInmYOoJ5tKp1XeqyfGV4IFFJZS6HlDITu4GjO/hAMVVb3KOUfpju5vmQ8zEctrU+v1wk5/T4fLlea21uEe69tdEf5ZSYBJF6rd0ag0d4yWweWq1eVZgQZfd6xt4xfr3jvg3r9p/b6/SIoTPbJ3Wwb6ReFTGwCyARaEjUwB3UecRVMhOjjbFMKvP53eH8kLOAt217rnUlQmHRPUAAS5nm0+FwOiYhs3a5vKzL0tQzYmsdoGrbXIc4f2zRPcwAEQUSUSIwgZ6RgESEmW04rsHNHc1DkZBTZka2IGvbN1/ibUA2MHIDkhHhX+VDSBQRhJzShIQF52LnyR7IRLWpXx2etnhZbG2mnggwUcoPp5//4y//5X76KRZe1/pYXzZdjJYVryZqGEvT5UtbPz8d+MvLTz1aeffmT4f7d6JerKt38rSk6u0ytNFZUCRxKtOcIqeXtn5ef/9c/7K2F4f29bMRofv7+e3bo/VAJCZJiVAwiHbpKIZnTyWHISUEBdPYVPvj5+u6zC9FUngsnG1GSRMyU86cM4pworwhLC/L1uqHD58KlZTfUS6U5qDRbxKABVBT7bpYLK23bVtq29Rqd+sGamBBsUsf3UOHIon2ihG+K2JsCF3dzKy31nqv23a5XB6/fPr06Ys7lXxM6MjOCVB5jaq11r52W8KvjhWwAwEgJyqJMqiEsba+XurG3Qlm7XJImCDIAPZENzba7xYSZBAKwHAwhL5nIxiQBoKDByeUksBjS2K9m3pAwCu2/bi5kBGZgdlx8LkH4oyJKNzULBzEeEz6xlp/lNRudu3989P1r++/vP/03Lsfcv7xfr6bSybMBDyObnMF601dO0D03k+nu5yKTCUBpgDv9PhkbdmqbW48sxRBAh9BJAQwot1uNBKnYX7/nvoCwx2Tk2Qyq+Hd3XajQJC7mzsOZqB68DhExoM/BnPVrBKKcGES7+bQxqgChgYwaCQ+AFitq1E3mwBlRNSQuzBPVEJKLinDqeCpXuHDp89r3Vh8vqOHU5lmIkrMIATCRMKIcpoK1a1t279bW3oAuJmhEQXt5jEc/rMxeNufrrg7XNzNHIiCeEwWvhlSxkzNh96HvlkoRsxHAI7yZfReTgGOcTuAx8jELXY7CgFRECG4w43fM4YvYWZmtUPTv6HeEACNQWAEhoGDAwKAICE6jG3mGD8gwK5zJMQIQkN2RnZFNw6P1gEAw5A6eA8I7aEgeLIClEQSI5t7oGKv4bVr611HfU5ISeg4USAYQIQzgzBkhABQDSRgQRRkdLqtlP9m3vf1C24SmdGG3rQKo7gx93BzQ0QHDgTi4RQBjMAbcfr2qGPGeSolJ59cCBGgt86IAbZW22pfazd1oLRVfXp6Xq5rBM7z9Pbh/t2bh7vTMWWqoV0kjYFmuLtGDK95AALt4eHfqhgiQWBVb22U76gBGKHNRQCdhHNJ6FkQFBG0mZkKM05lniaTqGtnzG/f/Pj24cfT9JCSYAB1AIZuZah9NCJjujsc10N92q7b1joYBkegk3a4BlkqiRMhpVIQUzgYRDDJLMdEWqkqNgRHN+agIbNx/fr+JUlv73/85Ye/E0mE6Abmrqq91do2U4tABEM0IicGFJ5gGltWEclZrtdl26qpwYhOahUhRPKYegKYKTDjXJIHm3sYWAPigV3Za9fXRczru/ibrOLfff1PxxYROCxze95MfL3dY3fHIfVgkakc7nKZw1qry7Y996YBJRC6mpuJJGaeppwStnptbbm8PNWmLDNyCkSzUDNV832cBIBDuTOu6NjnOokJWSgBY4xgbWD0CAsAIOZR7ZlHpYTI372ssanY7VX7BhPR3U0jrIepmUWClPlQ4kTrDJtg5wSQBI8xnXC15DgVTBPxfHf+8afzHxPOn58en65fPl0/X9qLLutSny4vy3rt14suF61X5ZRMwUM8skcadq0pp5jjfNqI8rgdKZOkoOQkUaGt7fl5+7j0RwcdMdC35wuWiY6HpAoQNAJTgEEBMJAdCQCYacoMTM6h6D3coEe4Nq8gBkghyFiZNymFUxLBBJgBCgIhTkCTwaRRuk89JvJsYIEFOZhspHtqtKa99lp1rdbUu3rozpfAvXQEH35f3JEise8lbl/iGGq6Luv1en18enx+fn58erq8XLbrZb1ew+U4vwkyQCMBgrRSg75eHuvSnlFWkEocSMgowUkSFZrmw1Gsr5d+vdSlPeUXPDwUmSjYiYGRE0uhLJyYMjHR0EX6HrcZLaA6NEAjhAg3cJeUIkkp2VTdWtywEV+vMULiJJIFmAPZAJiQUuIx6XDbgVmKSJATqttWN/f+/OTuXtWfl/r0vAbg/d35UPjtMR0SYQQNB5W5VgMKCLOmbdPL0zrPl5xnkZzT9O78Vuh0qWmzj5+fn3r3U5kPWdJY6I7ViXkMlToToQyaN5g5fWcYJ8JpmlLi2q+uLcL3AN7Y41CInGh4tUcbMEYXY8yliCpC83QijM3rjuIcvHccbnPgFPMBzg+kwZIYkXFfxURintI05cNJ7n2T5w/9y4cvv/7l3zbb7n88pvPsCtgHSBaCR9oPMkhMBbdifAV4XcMEwK7s251TADhOZwOzMfbde7+hch68F/cgjlwwJRRB5v09GvtPcoNhYUKKfdIyhis3xwoE7jpw9LhxjkelbKge5vsegGnn3I97ZPgiPULNq0J7lTmCiJyZkwyHXHQMJxIgjkGaHhlZ434b3254qIPuchwDjGAIxzD2gC18DIZzYGaEiK0HLP16vaZMXpA59bCm27pta6sWwULlgKQhginhNBEKWiLtBqYQkRMAgjsBYnCAOKIjG+zMnVfjvr2H/MoZ29/IYUpi3ic0qn1o7lWbAyA3yaVMc8oEaK6uagE40i4xghAkMVPCaZ/uejeTIePfqVFm3tp6vW4vLy+99+PxcP9w9/bh/s3d+Xw6iEBCg+h9yzWCAMKsY6dwNyUMFkkiX8f9hCicicR6tGZIFG6dHN209WBYuM6TzOUoPON1Xbf16enZvQPENE2n0ymcMCTx9OO7n+7v3iYqHHyU+zylbLlzRYre9KrLIekvb3+IDZYv9XlZugZAcCKP1uNKDHI4nu+Od/cPOXOAUoJtW1ub3zzcR/JP9YNbiyHAZxSJnCjJV9kV5Dz/6Q//BE7CjEgermqt1bpt63bZtrX1auqIUftStcIAoiDd35/neTqdpuenl8fHp3VZb0RdV22jEnc3wnCLxDjPBQmbqkN4DyNPLJSQEW4Cbbitv+PVf/u+xQXYZ8AYeOsxvh+OD0G9m4axj3EOEgLzSL0PUzczSsYz8yxpQojry+dtea5960YGpAatdwiQQilxEQRbnx4/Xq7PtTaUcjffTdNREkBzA1aHMQ4kBmK0IeCXcAhXJSBJhSkTkhMYeQCOJPAAAgMmYEcONCCljJS/tvyIuK8sYZfeBiLLoEh3N+/NtZtpzEJnFldaPq31i2ttU4of3pTjNLPcp7mU+4c0n0JmSjPjfF2vny+///782++Pv3+5PK7rui1bXdp60booqry5O/7w8O7v/sOf3v70VhGertfaVmG8P58Oh+Pbtz/Ox3sbFliGYLXYqr1c2pdVvyhtlEEgiUxfdSQBAWBEljMHwmhDHMgCetMhOQVAnwoxgAsapyGn88DxywiAxSCWFVWTa7Ka7ThbyZ2TaoqI6ZB+/OnMmGV+26OEgkYEk1CmAXtyDzciS+QhCEqsgt2hu6oNOIp5RwROyCNKgwJAPfR1Nya/vf+ttfby/PL8/DyKmKfn521ZrSmYEwCBuA/9v1lD8sRRyFJ06Qaug5OBQpEScpIsKacCSqWUrS2th62GhRMRsnPCIGDkYEIW5IzMyAFojDjy/gLM+66qAXLr3YhFhBmnKZubqbWO/5O1LaEj6CDLDz8rcSAO1YW5xzhOzTyJui7bWrcAN1Wt6ls1t8jM0zQdJpqzJ4pdjzFGpns8c5iFtaab1aWntEkqd+f70/HueMjz4SI5N3e1rl5iv/ZpwFPCI8KJEYmCRl7kYNvAK/gwIBBRAsBar9ZrgAUgAJrDqMVwpE2iBSAh7/fYOFPCIzqAEgoTEgsaIzMYWjghz2mm4KqG3A4n1EAgV3MGIpAiIilN5U74wJ631p6ePn95/Hy9vkRyKcc8Ee7LnKARMEI4ajLiCVJr85zmgvyKe0HAvL8L42HpFq7e1d2dXoFsfEB/v8/4wT04muJVVzR6orF+umU0IsBu3kIAHxEBw/10e3riCGgwUAuPGKlHw20zHup7tpgHQDSLpqD67XZBBBFiHjzDQLgBXSL26FQJQwiwYcgM30vHsHDfXTJ4+6sQwB36oNkgJNk/4mrxvFRnLBmRpIb3tum1Xlbrhsg4H7AEEtJUKE9ECaxEr+5XQAsmHPoywHB0wDAcF66/VjAgAjMPhfM43caCCW78eFUdP+m35JyxlAwAMM8xiocIHC7rABhk6iAiERJmDIQIG2XgPgkb+Cfb6vZyvV4utdYqkh/evPnhh7dv7s+n4zSXhGgdncCFIGSsTEeQ0TAosRCJyKsLBwmFIGGIdahhvRkBYFioOTn5S599msCDetda67ZeIzSXtINcmVPOiTIgttra0oV4ngq6sGYMTkSWDTriiQ58SDjZRgLT88vWdAOpwJEmyUXu7uf7h9P9m3NOotaFKTB6N3DGQFc3U+eO4MJSEk+ZsnwVWEOS9ObhR2sqzIDo4V111DDLcnm5PD09P13btbVaxz4vUDCXUk7H02FORMQoRLRMRXvT3ruOuNhxhoxP1pigZEwiLOTgakG3y5MG+zkAhkv+tiK6RYjhtyUL3fZL8Pp/v7tzh8VbJCGhdjV1IASGkRTdFQA5lWMqR0aIXuty2ZZLd+/OzbF3VHchSUmEEXRVa+vyZVsvap4QIToxSC5Bgesc2mJEJlLAAHEQAI0ROATgEIsFkdMNPAFjob2rjm8mWoA90BpfvRbmxMRjCxWAwCxu0VvrrWsL04gABW9No/ZaaVttfV4t4UPOwtNhSlM+TvkB09yQN7Wn+vHx8vG3lz+/v/z2/vLp6fF5ea59NWgcSinSPE9vznc///jjj3/48fxw54RLrbWuiXmeDinlPN2B9GatQ9WoHWqPZfWXa39s/gysQgI4p3T4dpE5eA9rQbwv8wNunnkH3DF/gDCW0QwkBBkCww3ihjlAZmSmiWVGOLhNvZUwDPPa7LJ6awR85jSFHBWka+uuGhGMzAkowHu4jyFQCIEyNgzwcA2zoWnCcAQEH/A3HmfB60IZAOR//3//77W256en6/Va29ZaV9XwuO10wrV3rV1r12bew2Hi+eH4jhCfNl9rWHQX5JSIU+Y8FWFwn+F8X4BPa0Mnk8TMRDlEgIlEkqTMkpESEDsYIgoTZ3IDrN5jCzXwsT1udWe/cZlKAJhZ04YbvOZcR4S5dVXYuvBO1nBwA3QzVTW3IcLwXXGmqo0gZEhSzAXxmBg4lZKyQLjqHhiJTLhDs2F4qBGQHLC2XpsBbB54unvoKAGGFDvASQRJCJCG8SUwPBCCiZHQ4yZtidgdv99eC4ZJb7BtV9ONWZHAwdVvWbEAGB7DvEIBwbjDgYeEYVPctCtKGqZEw2zRe9sKpdPhLRpcXv7a9CXNwIBmtVVNNJU8n+Z5nu9Kerdu8duHT+9/f//x918vl2cqeH44/fTjw/2bKaiN4QIFMIGMR6GhQAY++X0c7+9ZdmI3EYqwCIggEQ8drvau6qYBMJiWuxxoPCKFCYWIkTmIQyTGfCUcPMD0G/GMiAHG7nuX9X7FpjuEdv8aengTzRBAqKp6EAMz8cCZM9Eu0hkw0wCArtEVXsGHh2uMGBGGdjQDOLWOoQGsxMGCROAOpoOOj2MdFuO4H1YpIEJkCYwA3Y//QDCOMVNzgse1v3RHMnPaNMJUQm3ztSESHycmEURJgmkKSnbA6Iy94k34vHfMHSI8xkoyhovtG/MGU5KSM3FC5CGISSkBQGu91rqum7vL4L0jDAENuIcwEqm7h7rtBnezCDdiZEHedz3ho3bCQGRGug3brPd2vb68//3juvVcDvdvjn/64y8//vB2nlPJkphUW12udbmgayIQTuHWe4sIFh6SFOZv7iQMJBDGLFTAeVs2c2UIwiDAHn271pRecnlB5Nq79u7eED3QCTFJJpDWeg83/0jxpS6tUPnx7dsiaVtXZsrnw5SznPDucILgdw8/vjn9/OtPn3/99dOX54+LfsC8ne7fHo5lmqdpLmUuKSchHkkeZnG5brHZ1puiBnQiSkmohGbJQ/K3l2NU8jzP5zyIo6ZJNMskUhDStnmr16dnvVy2ZV1b6x6RWE6nE4Qc5pkoHU93ZZ5b3dblslyv1+tSewcgDzBDM9fu5j46g8MpUcKuOw8DIIB2ZcxAugyn1FjeAuDNYeoACBHjn/EZ3KhdrxT9RPPxeLq7yyn13pflZd26UefMh4mZxDCSzKe7h8PhiNbb9qK1q7oGtK7XVbsRkUhOpSRG3y6fyVfwRUSRkLBZfdGc6O5EJUlfWrhdnq33CGcCErqNiwBBIqQbGJrweCkGbu4tHMEkggA9yFp4R+8wtrS3e58wZyklJRRwcjAEZGEI7+qtGxggEjNDxGV5SQ2k3MnM+mxLi+vaD3OZDtkxLU23/vSk62N7+rJ9fFw/P6+fn9bnp2Vd19aqQScJnHM+zfPd6Xx/unv3w5v7tw/ldGw9qg0rLzxfes6ClBxAcV365aV+2OzJcO1+6fYS2BMjpynJVMoRcc+zdIO2RH0ZTl5HNEREuO3cYLfcI6LvqQnjBMMBa9JeEVwkHQ/T/f1Pp9PbKZ2YEnhsdbten55ero9fPnXt8zSf7/J0wGDaujXtBp2FJhFiAibw8V5yoEEQMDmZYfjQeiLtWY8mASlAQCBlFimvE4fkv/+3/9a1L9dra213+QbsJakN/Z6qNtWu1hA9MU8y8WG3RfA2qW4EPqEUmhIJYUBUFC8zaqTgoqD70SYoAvtVmUtKM3EJQPMO0AGAmCklKLmXpB2wjYNQqfdGnHNOKZdSRi81NIZfvwLA3LsbmgOGMAVAVw0Adh931DiLvw7P3Xdw1hiEylimEgg63XB6X8VqdBv94O05EAFfjT/q2kOb+9a2qhUZWWQEC+yS4++mCKNkCYSAMS2nv50rRZAbtF5Nt4KEgOqmNmb2I83Sh75uHBxfGSDhbl5Rt1o3CJJUBBR6A+fCU6bsGtp7bWuzjVmIGIIZp0O+n8txzjPFVC/w+Lh++P3Lhw+fni+PSPb27f3bH+7O94dUsHVXV95HyaOfCTPQim6cpinPE72exAxLLjIB2RgVmQ0ZyldOgyOMozIAhhuehREBcHg4hpj3uyT2W8YRmA0j0i3gd+eWhuq+vBiDHGZiYiRPCugwUPHCyIJ74zE014qjllIFVfheEAP7zRWAGCLoBtBxgH6IkIUBYIg0R8rGPt9BJ0IEvu3VAcmHkc4RIcACagAhKGFAtBbRNcDVYK2AEbMMHyjkDPOBUmFEYcKUjTiYgMGxoAYaggVYgEMMJWULUAWHeE0fJqKcU87Zx3Z6HPVIA72jqtu2mVnOOefIKY0ShyLixowECMIAREYACvOgnYs9Ss5dxx+3nmEsQ1vTZamXy/pyWSPwzdvjj+/e/fzTj2/f3CMac0CAde21a1dE4CQ5pTAybQYxAozjBoX49rk4oTGGhGFdTa0xYWJMzBFeteG6Ii3IgkTElISI0CNUtfeO4bV163WpvS39y/vHBGn95U/HMq3LRkz350OeJbCXks7nu4eH82F6cz69ZZpJHC7PVOKnnx7O9zOJIGNgcIJ5LhTSqobB1voOluJwDOIgQkFMgy347XAg5iSciDkC0IIwRCh7tGQ5zTnNWWZhZTLEQDcPMPPWVUSJkDmdDnOcjtNccsoASOtqg6uQ2cLb1t1s3Toi5FmmwmVKMeSKdJspwg2QCnvo0mgPbDRwSLeD8LX49W+dSogoSVIpnMQBkCVAu5l1SEkwjWZ2EiL0rrppXcEdgTEg3EwNkA7zdD5Ox0wFFfsGXgUDmHVEt/al19TrgSUFwOgyDG3Q29mDaES3AQEHooebY4y0N7CRjRb7hBR31RyF08iH+f4agxiACNiT2vZMC0QmHFQaEhEyVFUGTTOzSrkWaNYRN48Nwbxva33Sy6ft85f2+bl/Xv3So/XUcQJpAg4olEHOZXp3d74/P5zm+/P5jkgGiRnCiDHcr9tlbRuwGNbVHxf7cu0fG16AW2ALVCYQSZQ4GES+we602ePvy8d/e045S+aRuCIgvFsdbj68oYoDDDRgA0A3M++mDcEDzfBAE9GUg3N3brUta3u6XL88P315+tJ7nae5IeTDPEFpvXbtAZYAJ2cKCQAKQ+/ohEGEQQTC6ELgBGaq2lbv3d2NCFhcMk2zty3ilSpGfv/1rwExNqY0TAoRpmZdval2BffRwI6eW5iE+JDzPJV5Ol63y7pc3GtJMCWSQNPmod0MGCRHcsJgkl34TIPPmlKZppIPlGb3qH1RdTONcBFKcz7dnzrV+tSiGgS69gaAECLCwmUuuU5IGK8K/4CwoVwlICYRBoC2VQc4JE7CwcSIIiw7cIlYMiEwD7R27ExQa9acEuM0FMHuYLtgE4cpOGB/x1ASITGw5INAim2rj5fHl+WFOHISHEMCQMfdvEjjj4+bA/2r5n1fj7y6+ceSXM3NvaAAoLZuvlctA9O5uwC/Uvdhf3R6dNV1WZ8BuEwHANT6hMZv794ixNOXD5f1utSlu+kWInQo5/Px7ZvzT0mOfbPHz9df/+1fP3788vjyZdmeA+rd/fE//tPf3b879ViaLc0qhFKSiDDrXQE3tw2uz81NcwZ69VoiECyBAXpCZDADD0YgRhFkIhEmRB/v/UCNcCDvrOXwMIuuahYQBLFT78Y2Bk0BwN0CbmI7onBwe5W0IjwCsESIiTyQhfSWBsxDdB0QDuZoiqYYRoDhCm4QryYx4eA9/v/s/cmrdVm2L4aNas611t77FF8RXxSZefOWT69S9ZDckMEgF2DklhG4AGG745Y7Ngjhf+GBQWDcMNgGN92xwCD33JBsCcuSrvQK33ffu/e9W2TGzcj4qlPsaq05R+HGWPucE3klYVDHhlwZRGR8cYq91prFmONXhQWXrL3Cw4MgPNwxApkKMTgbi7OEq0fGQwIgBFMgRERSgYMIayVXXCxaz+zZC++dABiIfDUASq43U0EsRNMWS0FM8z0mYFBE9PDRHLxZNIeWRTCCI/QAM1pD319g/KWUUmSetXdVszTnDYDeVXXlkFm6UKacg1e3hgvFDhIVDQpjVAJkKMyMFJbG3LkIhrmd5lM3aC1Ox/bwcD4cuzlvd7uvvvrmx9/8+IvXr7bbSbWbdW19mU0VAoQEhbkOo6suSzOH8KxqrXd7ZlwHgCIoeUdw0B5qQJUQmUQo3N17W5b5REzTdrsZNtvthhDn0zmN+SJ8Xs7LooD08Hn/Z3/yM2hx3D9u67TfH91tGEQKALXb11c//elPv/rqN96+/Y0y7O7vT/f7j/cnJMLNtNluN46+WDvPJ/ZSx1q5kFAEdjMHBSQIDkUzMPLQTIN44XmTBSB4RnT03rNJFhFEMI7l9avrInR1tTmfz8uy5BcgM2AsbYGAYRymzVDqUEspXMKBiObzTESb7RUwnc/L4XB4eHhsvaEg8nZ3NZZK5j2dFSPcLGljkbn0AavHLiAgPBnt5qNfs9scL/76z2vySmxAZ2TebEdEjPNZHVpHZtlur4ZS9PxoZ8u0ISEeePBwRq7VpQ5vX19fTVJjGaAXAKDCwEreI8zdrC2nu8ePTbi4OXatSC6lhSlEQJA5mUueSRP9BOvqQIGUrRZeXfdc0TuCs6AFgNvLDLiIUG3LcoaGGMy1UqkJUA11ZOCwQAARQuMAroUnGqiMhKznjuBnCo7F+/m+HT4vnz6efnnyRxyVBhxIxqlebahd+flwhiVGqtfD9Pbq6nq8GeWV0OZ0WOisVFkE6oa7tuP57nw6L7q0OC6xNznjqFRDKmIINCSAwgUIDfuqFQMAgGXu3/7Jxz/5g1/srrebba0VqlAVKSxFBJAsUmqJQETCyOTkaYoOYBHGTBi6eNnbZ+1IdnKl+bQcj/vj/Pmxf9z73dJP+8bnONEYV36dBDswB0RXCRKICBNvaIaZKYsAIrhq2Rbt5/nuw3zYz605AHDhOuJmi1/ezr2/cOw9Pj6mtOViZhvrytXV1dyM0pSMWAgYgbNEAyjIk4wowALamUxh0RYdxJ00DZqlojhFOFAGZcElpIWl1DoMXAZ177Gk+ZuHpm0JTxLqdjZXIwMIN+2dsGhlKVykDIWLIPOvFP7BiXpiICYVAhEFMUO5CJElk62Td7Ku6giAFBFO6OEerhEAKMgrGvzSyCULhtSwEKMUKdMkQz215eF0Pi0n815EhoKwimzCCTNII5uy6at5gV5zhsCvHCyJCqGsfr7pbxw93JGyNPJfOfdcNEr5UcO8uR5FJqJrZhpko+5oaLocj/vD+aDuyFJlGspm5G3BCb32c9zfnd//8u67b39x9/C5+0Ji2+v6+t3Vm3c309V4tz/3czfrCA5AAK6upMjA1qkHBkL9Qbg4QIArukJIKo8AAgghi35GWi1xPTkc670EePZs3KOb9+bmgBGX0NnL8c8TG8wUhzwbrm9q/Q1EpTALX6TEgBGlIjtmfhxdODrm4Aqu4XZhNWbHBX5wRVaJHuHghhFBFOnNmTJ6xtWHMAQQ0RXMES+ZV0iOkabOSAQiqIggrhreyRwy6Is4pGApEIEEuHaYCEqBgaGWYPZwSw+dVX6O6CXMogU0BEUwhDV4IwHOv/RecrKvKiQHA+/d1q6JOxGhIDOvnhi4IjhxIWknESsiBW9ImLJ2CA8MJ+SxVEBsqk219T43X5Y4nZbjaZmbsQy7q9u3b9+9fft2t92WQm7azVvX3jScADmJT2tPYN3hKS4Ohy8GPxUeCo1orGbaXD2EEQYuZWB0gDDv3hUQSZwrcsEw7JZBGbM7zG3uZiKCbDLG0pdP998/QFmWzkwbGNm86X6Jx7LBELDg5VxO7UTMtzdvymTXu1fTUGabwbq5uYG6ijgwQqB3V3dNShsJEoYRKIIR+g9GWS73Zp5dIjdDxKwmmWi73TBTqTwOdVnm3ruaqVnrNi89YwrGaUgXIanDtNlaeL6tcahcylCKMKlab0tv3ltMw+bqZitCSODu3Vrrc9ee54o8NZmpZfR05Clu7WTDeoCgi/roxUKWZITeUv8yVMHMSncgkSJShYWiLWf3TgJMCchgcRqEjGgYp6ur7W4AWhYxE1w1rRAeritvXrWfNFgACkYS1QHdAdyysWiOgMgBDCgEgKZmbgRGBAQlY/UIEC4lWwSYu710UfdwNVt6LEgYzBUzloOw1lGITQ08D5YUyORhMEcAX1kUd9VZ1LEr9Ht43MPDzAeXZdjIMNQqI0NB57bV4wS+6EB1V+q0qbWUggUUl3lGh2EQFjTxxY4P7dP+9Di3s+ECpXGBoUoZpQyVAYCdIngojmagL2E+Nz8dzo/3ByIkMFCMQlHYhc0LIGm4AQTlnBckCNCAzKT1CGciZ0Nk0s/L0gEm6zif27ycZnuc4dHK2X3WDkeDu/PYeREuCIQKg3YKLEUiVH1pfVZrDhroTgBIgMUi5rk/PswfPxzuP5/ms7o5MpUBx5G+enW/LPp0OzmqwtzdzNO8OgLSJ5FQKhKSMDECQWB4aPRwU1XVbm69czdv1trZYgFxrMgb4qGwEJVgB7dQVMtNL+kNqbVnoTROJAxMHKibKVswAhSQQUApGoSChXXTZZklgkqRwnWopcgLLBmpEAkDkyOqOQZUkZFoKDIwRuBqfEfrUZ4yZM3jiWSPzITozy7+CElmuYBC+e8eGFmVOQrV7fbapHx4ePz4sFfQYaBCOJKDd1U3FE21L1J2Xy60ltx6KCLUXX/g2EvMI/OQuyakAz5LpCPJmsPtiWszp4Ut5FaZ7FUDcz2pzkQ01N3rmzf7vT/cfzjNj82WAHTFkTdvrr8Z61ZnXfZ+vvu0PyzvP9zff7rbP3zseqYBN9fjm2+u3r57LROrJ4/F3eOys5ipO1J2uIYdBoCUDCp52vXBzHoHYXOnnBqQZv9EgCv0Y5exl5hQGKyjMhORVgcqRiRY+XgOF4huLTkii/DVDjRhDRESYSICDPdsqdsz+BYR4GZk3UxhjX+IbJsRERIH8QsDGgzkSPu5iCBipCiiKE4MxE6o4SvTNTNhsqkWThmBEOGcvtGKsObMeBSPFq3DcsY2h0cwWx1ws5FSqGQqrLsCFEakQHC33hY1By6Igi7U3BbQhbwjKaGXrGAiMTsGZMOXvCt3n+d5mZuUoQ4cGfGt7m69a0TUUpmJWV4isHgxc83CJinzEYHgwpikd0MkAKlytb0iosN8snmO3rvpee6HeZ5bc6Rpt7l+9erm1e3ualeLhFtvbVnm3lpXyxce4V3V1FxNu4UDZk0FgchPXSVmudrc7MZri7CufXELtxKAXMexJjON3amhwLgVKd50brMdTkdtdj4tAGimpchmO+x2483t9PDx8f0vPh5O52GzvX11+9U370Ts0923vR8/PP7F58Pnf/DHf6LzeHykoUw//tHvXt8OZXTDeTHAaEgVMLobeycSFEIHN29L7+AyVsaCLt7d5m79OXTkUhtiJlvlSmu2/pt71FIBQrWbmrshYgGfl3Y6LfO85JxEgmUehDgipJTNdkeM2jUNV3bTtJ3G7bQ5HI77/QMYbsabr7745vWbV3UYTPvS5mU+Lm3p1s3VI1R763Mu+U3bvMzLMs/95KoBAZjzy83DX5zGPKK1czsfS/oBYHBFgeqIVEYUZmyuAaaEaxJoaEeEKowkAbVOUx2GUiM82+WUcyYQg0+bAAEAAElEQVRcL2kMyIAFAN0VDJCECGH17DBMgj9EhKOxCG4qIvr5rN0AugSNyEJAyADsHmEe3XuP7qEQ/tRVioge0QMtAx1EQBCYGJnJVbQtZo5pxDiA6nI/v3dFQMIKPCAythpKZmWpG7yNK+TtMEgphaEwMgH2qTE3nbFCqcRevHPHWEjCh04MMckievbDY7//cP5wPB/AoYy83Q7TVS3bKkMppRIAVsNwYFLvi2K8KC+RoWxo3PEwQRmCK3J1KOHivVggqYMBBSGyc3WsCNWBfe2MqyKDVjcC6yG+Dy+m2JubNcUZ6lwpZJIwQPSD3y3HE6MQEDsJyuPxTliIslZUC4swoAABJiaqp5N9unv4/sPD998/fP54mo+q/fn09OWbX87n5bmI6cvi7nqpYHJzTecITk9xCkK/HA3M1VR7XxZTTdNlRiwRXd1V+2LQoVAttJrqCoJjWAQ4hLmTm7mS9aU3bgKk4OpqYepqrmqdPQSIMWBNRk/OZrhax+aAhQgRE9GnF6fLWAumhBOAiFik8BqZdEk+frEnrRXMC/JWWphEghTuji8jjTwjaAADKMAJeajTOG6kjGfV7z/efX58dNOp8MA8MBaCC1Lhq892MuZyqkcAImF6x3i8rPoBMQpExiKvAUPZSUqI5YkSEA6OAOCX7fL5w5ovy3Lc7w+t1j4v8zI/Hh9P8x4ZEcoom215tZHbgqP147I/PuyPn+8e33+4O532hHOdYrwarl+P168309WAnEeTtP7NnDd3R4RQZY1gorrhQF5rkKeXAmDqvQGTl2K0KpWei5j02vUAD1ztZdaTUOZMhwekyCvWU9I6lF88rB8WTem+s1YQ+ZXh5mkOjBirhez6GtKugszC7FLBZNMCScxe0BUAAIiDKCLADEyDJUrJFTN7TuaOruEBKRpb9cvppYIRmDEKicCHgwcGSlBBzrQAzP4TuIEpXkJeUJ0MPNjNQN0j8HwGteBCJBiF1O20hDoEAVWg8tzeQ6Qgov6Dh+burfXeO8vIzCJxmQyRK4EIr6zevOH1ZWYPE0QQEc0AIfuLSMwe0XoPB0JGSKxQhlq7+9I7QGu9LX3ppihle727urkapgEZu3Vd5tPpcDqfXK1bS8VTuDv46oq5UrmJCCPo5cSHQAgKw26qauBEaS25HhcQGaVyhUISZSJicNWebg1qvS+ISBSFRCqNU91dD6XS/eG+QZeNjDfD1ZttHcLq9eGobZn3x+Pjw8d+HkKvv3z945ubL7589walzbq3Ay3aEc8eahEeTgwAEOSBWQcWjioh6GSq2kB/kDR4OeXEE0929fHKac7M5gZAqnY6zUtrgLAsy35/nOc5IIqIu7ZxHErNCp6ZSx0QUJsiABOVWoY6TuMojJtp2m1vr3evXt28mzabcDPtrc29t+bdTN3drPe+qKmHL72d5uM8n07zYVnm1mc1dffWlo7tJUcGsqOaHajsohIVFEDkwpBFMDgTAmWHdXXNYPSgEAgR5FJ5IIQtMMTS0mEPAC0T63OKJc0YPAB1XRwQEYEhHILwAgqjMAMCcESmgq2CbESgzNRNxp36Guby8kJARkYSRoYAtyCJQOLMmlTCp9wTAkfv1iwckRhSiotRECiEkbFMSIRQRJh4VfVhCIf71AehYHK0sAXPgUgoVhQYTNBJz3Ce5WjjQuAcNE48XpdxW2WoXItIJYTQDmGACIbw7EMEACCFbr4or39Ur7ZlGoUFpQBJoAAyBII6O6AlAUICCkBJwMfdrFkDCKAgpO7koO7kBuoRaEFKxUk4HCPAHRUX855sKw4iwJNypvrievCMSBd1AUYiKufZzv1R/UilkWjX5Xx06+sr+fT+0NvzFiPHwyEiUn+2UlaYRKgIsWASLsPcVa11XZourbe2zLObImItpY5jYYxSAMz60rvBHIhWkJGBPcSfsjwg3NX73GfSfVusTEswNptb771bmIMlV9Y5grNrrBbJeQDX3j0QENxCRMrwXMRkLvFy7sok4CVAiqAIE0cKbsMhcgfDlVbhvhaoF89X5iRhA9DqUsppVJYE94tDGgZGQKnl+va2TrsF+WG//+77T/v9/vZ6uxuHocggVBBkDTzIKZB5CJFnhKRRM8EFG3phEBdoCqbg4RbeuopBMlrpYpMJK6yXe1Xgmg20bvAREWGn0+Hbb38OAdrvu+6b780bGE11enX15aa+jjbM3fvJz8d2//nu7vPn/X4f2G9f8fWr8ep2N14PXNEirVvSf5NWUCWFKRDd7Ox9qDzeAggsh97as8ttQmq9A7EiQSFekYdMh8gdCFZic3Lp8KIGjqSrMGBQeLh5iicQIesaSIvxtITGJOAErkmTYeahkC/3YpTsRFCkBKamKUf36lGu5rAWH8RMCCRuXJ5RGEQQBhGgNXNbAaI4hrPbBf8yCAUEyh4JI5EgUzaYsHdeZnIHkQCOTGsSIRyRg0RgmMAsFUyExBagHSJAnczgURfGGAfJhDIzEMFsk5rH3JAqXr2iYbqo2R3CMYlEp/MPAiAvAj11NyRmYSQKXzMewt0d3f0C2GZBABHBBMTIvBaJTkAOQFikqPmi3cyZKItoACgi0zQ2Mzkv5qrWgrwOcn2z211vgvzUTrP1djo9PDws84wAHqbazTUrWEhNHxPj6jgesULB69w3OxyOD/ePhu5kIpUKE4OZHc/HroTkgS4DcQUpyNmXLVxqdbMMKEQGJHAwBwUC3tDNl1eyLRglajRcSpGrN7vhGs7HA8jxOJ/PczM/92jMw277+up2WuzgH/DcFz88KHTI8Pq0pEYHjjKMEihcKCgCwkIbao+4oGPuoapmSdHlZ4zmydcbCQB71/3+8Ivv3u8PJyTqXU+ns2pDjKGKWVftPk61FOTVMJ1YSiFC6KpINI7jMA5XV9tpnK6vb5hL716a11qHadxMVwFh4W6WAdRrhQ1gbk176/N5Ph2P9w8PH/fH+9N8Oh4erakAP70XfHYeD8U1cEmJUv7PCMzERM50mTkAwI7pitDDPWxEEqpbqRLL0PWT6zlTBMxMPWo2SjECAQM9YPHV0hMIkIHQDcMBUNkbRQNkhzBAS0d7IzbkS5pJ/oWOkAKZp62fiKSWOlZSgWA1dYUQEAIA9LBspTMT5jmJYBRx82geHsRBwjwWEgqy9X5z4CHl8hfuJLTZbWxU7W49+rIo9F5PXBApDKKrhSOUItf1dnwN5gIoDFwwzzMkxEIAoGoRCkCWjfsXpmp1xHe/WX+Cm6mOVQqmKogsWeGIrF7UMTskHcIhFNECLKK7aW8AXgojhhAJsyN6RAoiHNGdIchpPasTeTbF0qbUImzFJC+qmXRnV0ADTBvXBaj26zcwbKbNliHCdTnbusH1xV7qLaT3nhITImREIWBCRqBkqSQY2ru1rsuic9OlaevaGkSwMCAQVGYpRTRKs+4QHMSKODuQoxqZUZIbuwOBO3SE4ymWqZXNiJUdEywOMASnNcOsu3XD7qCOGogM7kFh3gHSJBV/eBqDsHANsmzCpNKTACFtzQIgfVzTfZfWgzp4+MqyzyIDV9q9eyAA0HPNs4YhReJOLKXWcQKR4/H8sD+ez7OrjUi7UoZaihCv0d0G6Y0UySBOGjzGxbYy/W3xhXLE3ed5WZaW7SNTRQKOiz1MlqOUXYcLD9DXhwABmIbggL23x/tPbl5rrxW2212aF428203XaOWwP5/259PxcX94fNw/Nj2XwcvIu1fD1atxezWUqaw5zpdMoMs26CsbPGnKasQ2MgH60nRZfmBz+7T/rXgErb4vRJTNP4tV1J+zbGUSIq47WD4ky80MImu4F1fu0peaB4mQmJ5+7QoarSGKHk5MgLymIUc68FweI6x5zkiCCMAG+CLJlghEvNZsTAI0SJNcpvTJRlV0xVCkjEwKpMwJ5EAMd3RFMAoHEHtyIhYGKqvbbxmzWGXITCgD8wijIDSDbhHuzcwDlwXMMkEs8qzfjATAFAFCChCvrPEwUAfmjFF7fm5+uTKaJHOXjIgwVdEsIiKCiGarVwwxsaAIsSAiFizrQ0YS5rnpBdJb4Sp356EEBBExo4MHeh1ksx2vrnfjNJj34+lgy6mfTqfTyXonQljVTHbpp2G+rUtiZcQPB4C6HY6H/f6RqgzbcnW9pepLP3SffT43w6EWLkFMXAk53dmAkJgLM4VrxOroFAxGoGBeYPN6K1O1TiLUfJFuMvDAU4BvDeaFgmw5RpTltOxbXzabtxsZjsvDw/GOQFyTFb52/BwNKJjFHa2DuYd6X9TUwn7lzA+QQMUa18KwxlqtRFsRGYah1kFEAECbmpkwC1fiqCJpGYAIAL6yrJAICQvhpb3DzOM4isg4TZvNhlk8J1ogUZFSmC5E9AgM4OR8ZaM1qdV9OZ0eH69fPx7uj6f94fB4s3v48u2Pahme7wMREX3tRxMEJONeASCYZM2Kh6CktwESsJsHUhAaYndrrkMwOXD3cHeCp67wCiRrOACu4emAAJBBwGvbdl19MCyge3bGCSCIgcSBWgCpoltAD/IAjB8oXy/3wkCFqtQICjVHV9cwiKA8RyKtW5KHAwALIFNgREoXK8lYWCgCwBGMPEy7OjgVynMpcREWM0XpwBYEAR4SUYEKUUScNZySTbQZxwz7ADAHRQxkovyWiEuMW2bg4suZz4KbG7l6U6rUwgUxCJ2QhFC4IBaPwZ3UwMwXi27azNSig1lAyYfhQRboABwr1IfhFOsekYtxBIFBlvFpnBXZltCV6wlrKePuEJA9k9U5ZIBJaNoJUnl8kPmkvVnXywt9cQkKEiAjCqc3R8bJuat5dDe13vq86Ny0Ld40VNECLZipllpJ0oOdCglwcWaksVYmgEWj9+gdTBE8BRdBDgGG1qHhwLw5yyRUkhuDBAJOYebd+mIwKy7GPdhQMsTRI9C6hwGa6aoBvIwxAuTAgjgyb8ZxMxRmtohwNQwmTp/ViwMZUvJ/IQAB0qmBAi4ctXVHvWy4iCla9VAnxFKGWqpDzPP5w6fPj48PQ+GtbK+HuitlqJWZsuj0rDI9Al2Y0zY5Sc4QZm4RqWx8XpjN9HDYHw97CBAiMEOP9L5qYLF6zqyMBwBeYyU8fckC0TNaFQLOhyMjf/Hq+vXb7e56HGqFEF1gPurD/vH9h7uPH+4+333u/VwGGrd8e3U9bqVMUKtABPQQ4QKFgRBBABjToy9Cw+nC7wVwCDXTrodjP5272RO/B5mJOV2mnqTOSLxKObO2sLWKFMr63SE7ZA7YzVU9PAqQZLIHXeoOpAtIBJzOu6u3YPa9srfFKa7I+tMBVpYRMRKHgWNoAJEzOQDkPs2MyfWIeA6zRHQpOowq6BGojYliGI3Z2QA7mrEHR2ZiIQg/eQTlQQQ5WwgEIsYFILJ+gCBHAiqRsUUEGI6qZoqq5O4cQD2AqTfoZuYY6SPSCAEGRCoxFACJZUYkEAmRi9s0IAvUAaTCk1l/3lJKkC7kcVgPhszEOIxDrUWkREREBwgRKYVLzTZMkqZLerIBYILbiGvGpJmdui7LQksJJku7CXBm2k3T9e317mpbaultaee2HPfWFvAgAFjpFRoQzMxMGR8IlsmvuRKGv+CQmenx+LA/PWzl6mrafvHuFot+/+nhfH50VTEK3o2lijAyaQAFhBMGZOIsJGCLDCxRiwovvSlB3W3qBGEYGktr1peh58Ft2GyllN3Vq/54P4Oe3t//6faDfP2TV1fT9vp6d/24K1hnxdCw7kEBCIbmhEGo3edTs0XDw7uG67oOAQCsjlgihSjJvJQPIVnYqboYx+H169ciUqp8urvbPxy69lV/twZuwhoLAtC0pz0Vpj6UCAGYOSvCWsow1DIUKUKECdqTZrIDM2JaWWcsehYDgMGBRDSUMo3jzc2b1tqynJdlbm354otvNpvd0wYTT+Ft6rw2pNQhmrFxGRCBKY8ekBR2zrM4MvhAQEX1fH/22QjD5tabmyGEpT00hodpeBhEpmRyJO1RiADJw9wRPU2KAsPDukOgASFDqYBiTr2bLw29l6JECMSYTuIvyOMBYWHBIaUwFsxIJzNvHsKMzIUpmADcXJsGRDgTZrM1rYyZgMHCuoObBEGP5TQHAu6mUgcKQcDMaGAAZCoTJ3qQbQIEHFDDnJAIWaQAorpb9JQhkDAAdG1r4Ym0lpF0wbXXWQ9moB0pGIOJItKVC9FJGAtDFS61iCtWtdaW2ZYGLmDMLHX0CApBBe+aob2essHs+PAamE3ZH1+bAclYRwRgdyTInuKKl+YB0jEfNHKgAHog4Nj86jWfDjKfrS8ADmX4gb+KFGaESNv5dGUyMzd16+bNtFtvujRduvee500OWGOwIJmWq6SEGEthwKhE6G6z+txBDc0JHCizggAxKc7uDax3b1wyOSqhRQcw8O5+Np8Vm4MCOmY2aQo7VL1DstL1qYhBAEaq6V89lGmQKoSUTYqIF510BFjbs9lfWcVHfmkDPJO5MnAxAld8+iKLIAERQaaltePcjsdH7cturKPIppYqVJkQyXHVUmd7HhHcVxONlRsJ4Rq+VvLPb0VVD4f7w/5BRmVGskB0SGvFp1gixBcFabZBwVfTAyeCUqoKoB8AjJELD5XGyiWcNNp5Pjwc7h/2nx6OD4flkdC303Z3O+1uaxnZUdemkwU64tpHcYy1NQtpsB/oUJAky1Nz6N3nZkt7bsQgQLr/lEKlMBFEhFpOMUyLF8uTCwRxpFImVt1NuHvmEiMg8pqumIX9+sxWRjMxr2ZOiNnWWn3nLiwZAE4sDlYINjGp9VUEM5VCACFyCXfyl9v9ejG5iBVemTcE6cXnASCOwikDR7i4F+KqpIi1Ww0gnOdgIswsNETwlHcgBouvudGeKlBATv22I6+uiH1Z6VUeqB7gKAUIgUsAZ0MyLuMuO+2WFiz8q1Ew+JIiFhfPwIs79nhpw9gTyUBEhkGI0FwRLtI+wCetk69ujOHgFKBm2MEUNNxUhWm7Gaft1fZqSwTaZvOuy/l8fETT7TgxC4QbOGJQ9jtZihQM0OjpLfREGnneYMKbLq3P1QYPRQoks5jN50BzlCALikDyXCLdUMGzJbayvBGAHFEDHKIjBlOtzMBoaM36aVZ1WhyEpNRaoVaQoZFAO0E/H07LQ9cz0bSZhqvNdjduejujkTd3yuU83MEMe7dlXnTpCIDuiP4D1dhlbcAno+qIrM7dI8LNTCSGYSxFpPB2t3m4ejTToRYpzMzuYKpmlg2tpTU3T7UsZaVPVGudxmkchjrUYRhqrasn7roGezaQ0IkwVr/3gGQEAEZGdBBiJURgTxWSqZve3L6u9UUnJofI6sAYEEHu6aIaYGzM4KudPyJSAAUl6TCAKQg0+sF9VmSIbtZttd9bI1tyMLjhqpKLAAgmECRA7KklBYTMH4cwV8teIRISedpYqJouGBpiDIAoiubkgT+Y++kJhsJEzOESsVbmEJH25auHWKxNopU/sD77JAAGROi6diVfCCCiO3D+hGykOKIlk+0S1ibClQJRONBoLRwuqz4CEqeoI9T60sCDKxPRqkkhfD6+rJOd3dhQCFMvgo7ghOAEWAiZQdKJCgiBHUSFxMCHgKmw5ukcMNQjDInWHAH3i3PYOjMRPHeq1XUtIRRMmkZ+triov2N1DOGI8LC0Uic3ePWuuAIRHq+s93j3zabUZ2GyDCwrcm5mat67tkVbM21mLUxj3WcMLLLMggu3dMXZVnzDM3YHwNE9utrcfVGKQIOVtooGhCTrBowR4D26RolgiwtxIMyhKzSF2UM9DDzIL6cJg1Dwbta0taU9FzEIQjSVcjWNQy3CSCkFokhNblCySQDWcYXZcVz7vR4IFChplhMBnFQYzwDB1MIkngQIlFqhw/HxcJqjzxPHOI1DKRVTrW1wueknSZJ54HrWJMAcoOgEYGYWqv7EIzHT/f7zfv/piqAMLEEU6GGZXxGwbjy4+o/QM2XBwz0AnZBqHXGiaTx6t3ZeHj8/nA+PyKFm89yOx9Pj/nzuRx70dpiGsb56dTXtKpVwUNeIcE66pJmqMWuEq6uvyl40SHuzgWlgAWKLQLUwS3OpyypGyAK1wjgVkZIKy24GgIndZj1jFhAazohojhEYqXrXACchJIIiXBifIkzdQ9URg4VYKE90ZuYRGAiQjxQw07Ez4Ik5dwL3sLTaWcU2IAVJBCGQ8ljsl3PYi3uBtY1GCIgenD09czdAIIrKgCUwUkYd5sBZwgYFoBm6A5esyFJnQZCrcR5ZIJLRHODZBU5jOQ8Id8BYsxOAzOASpeERYE68RmI5i5cxiAmAMoHDPdw7YLzktKd8cvW/JcoFoDeN8FpKGco4VkTsXVM3tn4XUa0TMy7LotqXuc2XciIA5qWpdlVjQJFaaiEijWjajvNpWZbNUHe7cXt9yzIsp+N82Au69fl8eKxM19vNUKX3Fg5VGJFFhKVwducuiyDi6iPwg/ISw8F6b8fT4fMdU23n8yFAh7HUaShVkNHdewcEAyPo7o1MIZIIRQiI5nFujYmR03oVIPJQTxzo3aKrG4BI5rIzw+5qtIG8DDzE0k+tTYXpart5dXNr3i3UmgU6UCCDebTZl8V67x5emJjRLZfEH2wxcKnQc5XIvkmin+Gp2nNmKqVe7a7fvVncnTOhLVcf621py7K0pbXWw2yFxImYWUoZhmGcxlJKglO1jqUUKUVYsnLKH9PN2AmwAK3Zm8S5A+XHxHBM2QUiCpNlcf7MiUFKWmus0BQGMgoAOFAANjUOKszEFBiEyakIZgJH847eiYOckSjAA8wxPdf9IpKD5OU6oAPnsTM7ukjo4QroaUMHiBwtNFNQAwFA1aMreDcIQ7QlFNPHBEzZQuKpjonIyDM0C4oAA3YklkTaMCAiU04Sp0IEKFWYWLvmmZ8CQvtqrI6oEYEgLOCBzQGURkYiBwzIaLkANKLICJFc2900PJgp3Oc2dzcXJGEZhADdVJv244wAhSaqdMm1+uEAC4KQ8BImAQwEAeRIqoSODoxIBEbriuRAVkeoKEAYKGrW1JdmTaH7qu/woFWpCHZBLNe2wIp3rI2AAAACSSYn5PESHC4N7/TFwFV8QYRSOCh4u6lffLlpzQH8r/zTrze78lzEgGq4uam3br1Za32ZrXfXHq4Ujkl69bW3lo8iBb6rJtUsCR60On5GdPPWoxtaUEDaxGRbLdUfhIAJjnbAllHt7kRxcSZHd1THFqHr6q1rciA4oIdr19Zab89HfgRghMI4VhqEAtfQ5jWgDNAgMMzNU8WTlk2Q3haO6o5uWfCsnfZkZOQASvbpemBK1DPCdJnP2pdKPghPg4gQuCUBY23qrhMNIuyJfB8XlQcgARE4ZUX9dLlba6c2n3w3oDM4BoLB00r+NCQvi8lFAeVrEQMQRCTMJJV7aI92WDosGpiYj7amBlrG4LFwKeM07nabUslCV3/cBFMg3FxVibu79b7Y2p1ewSwMhmBABXA39g7aw17YKSKAEBUBIUJA01BzdUC8cHTzsJYGRQ4Q5EHJBFePrgEIIpxoFFLAJYBpJRlhppfnj4jcATwC1uykyxMizD5xFjemoQoRJJkdhKuAO1anUg8Ls0tczA+uJKgBwupg4p50CkQEZnCJcLALGAariTB5hNkKxK/di6cfvv60i2tYXA6wmOddiKQDZYhzBAKa52oQbQFTAF7d4kWiDC41LeRXos9Kj/1LpjepHsqjeXrbae+AMNRSSskewIXJlD8qAY0kSkNKqFYJEUAEmDlA5vq6hxMRE5m7u/few307jsNmt93tNGCeZ10aEKBrIRoK18KlUPhFWwYMqU60FUbJrt5ffiU5HsxsWZbjAeWz8mhdGxWsg9Racj9LHjo6gEE0jk7JT8oDwFqZuoOnW3SS43DNBgUy0NYtkXZGwJSDCjmyaQGKw/FxGuswlu1m++7NFwhx93B/Wo4WFhRcOCKN29MHEIkJ0dau2sv3wsSSqTZAxHHhaa12EhHurqYiMo75cNaBnk6QWcr3tpzPy7Is2jWyl4aQ9VCptdZa6iDC7k7IdRhKkRwMkBzZfKuREmVfeT0IHsS0ZpatK2OSonJlfNHYu6zJUqgglbSgQQhY9X+X3jjCyn2GFCNm6BUgRMKhEi4RGOwr9Q3zwMwAaaQAAUEAsYabMyLBD1yHDS+/MvczAiJ2ih7azdRWx8okieVMMTAjDfrh/A9wA1UDJO8GEVwZCILAPQ8fgYzMVC8KrLThyHA0WIkfq9NDwq7MHOkmoQaeMuZ1SV95r/ml7qYKDmkIquBu1vrS3QCIsXBw4Op6ChbZqMZnIPxXihgI5zAOIMf8kBTOubb6amO48i8RgxhYhEiIPTdRaUFhBA49bN1pkVYAHmP93lzGIIctrh8jgUPOZnW6eQEkrTeQgBiEMQOqiJCQYxObseobN0X3QNLf+K2rYXrRiWmHQ454XRZfmvXm2tPGixFkLe45MAwTOY5Ip6OkM6h57xAE6ISBRObelm5LR7ugYwHowQnRrRtRWjuCmyMikQG6JR0FItFRikjV/ervkZwCBIM1lLgvvb+Aky7bixE4oWfQpQOsGasQEJjhyJhBdyji2VJhdMwYmAhPym8EmJoDgLAkBERPLwI9sOWZzrtQXG0KE4lEgDZwDYQAQWZaQw8dAIIimYO4znxYXzIT8nrfzwMtVowlKIJy0Ptld3s6gj7RUWMVoV/UKMge5AruHuReupZwdrM5XHN3kIlkGDa7AZCJS9qxOrg9N2pXwMbNtCtgU9OlnVVnCkfK44aEUQpA3SA6eYveQjPe6PIhmZgR3cLVllkdQIqs3RWE9N8JBnDoau4BgGY+d+zdzZ0kaCIskMz2pPSvS+eFTODp2hsrZTMML5kuKz5lHo6BhB6oGr1518CcNi8E2R6Q2brZY0mqzhMUGYDh5I6mGAREDpAxqUgJI6XSgCHsKZ0AVkxp9YkMDMNLcz0BYcQABEF0oHzHlNXWOskxEh1OPqM7EjA6AYggBPeGkAsNBwuWiqVcyHXpSJxPGju+qGNw5V5wFnFJjlFTvNgnaQqEw5NEkdXMsjTEIzNn4YLEDIKQb80JpdYKgNHNzFYMdV3OoIjsNtM0TbWU5jEwB1PBKGUYt+NQZajCDFAFAYnEHdqiqi08zN001ROMzy/2Mlvy3NDVYwk0g3PdQZ2gSBWpTGyWUNk6PULBlo6dGYYAUFMEQqySkS6IYWEQ2aO66AMQzB3dTb1pARyKIItZT0tcM/t895kIv3jzZho233z5TRXpcz8fT9osKDhKkrZEEkdyhEiV8AvRGCCRSBmG4XIuS1XS0/llZUGikXi21jBpYeYekYM5khew2XRVTe8MgNVIMkElFiZMaJaJmEWSCByQTlCBlsEIDIhmti4F7hHBRMMwMEtQYtypZbO0C1hZ03kvSIVkKJVkAEIPzfEbEIQOBMwkwiyIANDNrQOaIwAqAlbAQlSJmAhIIolrjhF9Pb8gMlIgKmdrm3ItI0T1sMhMRjePIHRBypKDsVTpYOfWW+9gToCMBAjqmHG8HqkWtieXW0jOhntfuqF7U2ZiYCLyi5xvBX5YgBAiDMMvjPi46AzMXd0AUdJflrPv4gZuEbzaPgQhrgrAPGV7hPcLTuu9u5laFq/ubqatCwsDiYjWspplvZwk8IMbCQXXjBQgR6KgiHSay15I5iZrQjzIQjIyMeDltI0RHORGYWpm5hpOQerql60rVp4S+roqRkJqhMRYcuNLmhZgABiSUwFhYMmGX65aiCuIkrq8CFzefVHr8CI76f7jRzfrrXlr0XuYgitFCBISBaETIZNHBvGu5xYzd7OyYm0eSh09T8xu1s+LLQt1R1vxcTMLDGQGzzRz8PV4lfuyOWAGDWd4ohBDCvzSjD5ptkiIHEgW0Vpbzue+LC/O2uHe1Rc1YXLzS4EIq553FXh3RQAWDhdwAZNaZP2M7hCWizsYdFUw74qFuQglYW7lUBhrc0Aw6xhOGBhong7r7oHAHCSAvAo3kySWZA0P04sB6Xqq6q7N7WVBtqqWwsHMzdNQJDujz42YPJmFQ57Lw2GVWRBq92Xp2iLCAF2jg5nqHKEEQEBIQswkRBiUAT6eIEvCRx3CACgsSJwCDbBbm5ez6Ukyw7IUomBXh7DeWoQudj4t82nuL2A+91jOej4El+ZB86JAEBESvGrBKH1R1CNMwdwQQ5XaAq2bubFjrWCGOZCee2+EUlagOdTNHQKSchqGbmH52J4vAASzaIurggcwmbmhYVrpAqA5aNc0jHEH7dBafznG1LSrRnRebQahNXBfUSnEdBNYa0qL1b0XgBL9AQgKT+OVp5962YjXwL11SkPqx/NcSh4QgasanxzRmQAAh4nz5EAcJL6yPgzcI5s46QOvpmrNvL8YY3DpUHsEuqlqN+1EZNqVEzxAQiRObZSbaTcz1WzesEgRoYvTkbllJi0TaJiqL8tMLI5uaxgmCmEVGoXRXNDJDcKIZKqbYeCATMkCAIIAN++tLUtTtRSJrjmyvHa+nm7Ew1tblmWpSG2xczsPDV6/2yCOmcHTeqin4B4ECQxNkb2IVAQiBqR0XgZEcHBVJQcMRhJizGXPQA2022KtVyMsEwW21rwxWznp0XpXbYXw1c3NZhhvttfbOh1lImDAEK6O4LEYhbEHhJqaNu3z3Bf/wToW7ilEYL6oXfKQu7LhfD0WpXNogF+cHFe/Wgh091pXbwTzFfJfbZtzMkRkFi8Srx30MA+g1XoXfjhU1jGdHRpTjbVTu47dcF/72i8rsvyWDEcOhDAIC6CUzCEgUgD0dbLY4qYrlEbJn+Qg9mAkAcYASucoV0U3iCBCR07m/qp58mAAR7IwDeuRAluPgO4dMRSciNBd3ZbzvLRGEQwYzhBoya5hsGhdT92eQ3oCYrUxQyHM30XqTE6BYWHqnQLIss8gAaHWVJM5gu5JYnkuYoCYAJOrkT5WqMzg6WXpYBFJD8JwsDzA5chYtwRPnZ2pgWuEhQiwBHhwGp137RaE3XvTRe2Hcz9t/dhzbzELAMueVgojnooYIkoZiVFuM3kQIwoQpCBkQAVjBINgYPVVOeMJfecCHJAJ4UlyBwwkErrMZwoAQ3YqLpL0AGRCWvs1zuunokB3wLoF4hfE3l/+/OcQHmrgLslKNMtsekDMDp6vuRjoFIGZmmZChL2F9QgNpsV7INZaw72dz/20QGtgLu4RSeMCIEwvG4f0+gRNuTIJInqGAgYQoDAjhLm5u7pd3EMQc70BXNpy2sNyOrs99d+8x7xYOy2uXhM+7ZYkAkykicKtabaOu5AWslJ8HPIgZW7NOxDUWiCodbWu4V6YNkMpwoLZMcFgxigOkPJI753Aa45udCR0YCJ2kiRjQKpTADIQyFQ9PGFCNw/toYv2c7i9nPthbqaLwWLarYU3DC+8UsSS3Z2z1+1SxFhSXCVc3I6h6G7kYYsHNnON6H1lYSQSkxIdJhSmAoHdrWnrevLQElS4Sh3IA827t2U5aT9yGJGgbovwSIsHdmvNTZufTvPxYb+cn+9Fu919PH74BWy2juxNAQh06Vw7MyCvIJ0jPHnmAqilX4eBGgRE7w1yg3HQDhBADMMQzBYEqmAK3QADcl6bgnZoHcJBOJijVIBLpmNXQIBSYY1y8kAKWAMmwTXXUVCF0wn2h6Ne4LFwb31u7ejMTE4c7rDMrh3MmAjKAERga4gXKgRn38jZMzQOgTgyLjYgXrTUIZ6MDDEIHTDCMb0kIcg8zKEpqIW5AloIhCOwUMEwJ3Yo5hjzgnCGjJxkKcnXX/oyL8fWz0+Lsnto622ZERlQenZhrYfDfA43YeZSigwDkbgbgrtGb633DoCllGGoODzNHVWzPDlgeISp+qE3IkZhddfeibC3xcexSAJsLdqpmZIXH4pxdE9GUz7rpmrzaZnn1lqzMGEchkoYZaiEwPIstzC1w2F/Oh2mzQbC7j5/LGfb3rzbOrYWoLC0NE7pRDiUylAgSIiLrKk5iYR7qAeZerMZwFVJRVwGAQp17U19aXo+n4/MYNSYaVmW6FwiQBdb9Pj4gNpt+eLtq9cSuJHh1faGRQLBIk59th6Lq5o2m7UvbZ7n+bg/H8yfx9h8ns+nc61jEUTJUpKSWRARefBjSgsZv+iuI1XVGakSEQRAGE5gFuhp04VCjMirtbUHIQkLEeuFjw2IUmhVwjp4hqcyMjFAAHDWUR6RUH6sh6ZV/hKADk8nNIiIpbd5OYt1RMDQgADgLHuDwtKVhTk73ykjCHCljuDmoEiNRXhgGR1ZI1S7LmcwRQhiZi+I7EZmbtYB0YoAsyFqhLuhawkN0nOoiTsGGnFj7X4+zN2VB0AE6Ay5/2C4eI9zO9/Py6M/r8nedJ71zCKF1xygpZ/RCQu6W9MFPLpzU5FaIWLpzT2YBR1hdc7CPCgCojFRoFnGGwQpGZq4EFOeOtxXTsK653YzU+trrpEwFRoifFnOEeFWvRQTCfAGamHL4tggGDX01I7ndvZnD9IAMACF6KmrNwfy4B4rwTr5mkkgIQRyYs1jIAFQMEVKeYPQkJ3IPcIM2JAQ0QgcPBI/TPTBANywO2ryAYsAoxBz6qoBHULD1T3yh2N6vYBHeDMLDQCMUId53s4vLTzkn/lb/5WENwhAUrsazog1fQhzBc42fR7Ycxt2Y6KJZRxqHYcgaqaBWEuJiLbMOrfITl2sePlKqKG1LEtjRAtI0Qkirq2TWEkOCOBh4aEX8OpiWksA2LXP5/M/9c/+re3uKu9ks9n91X/qbxHFpgzMEkHmqL7Shy5FTLhaKmAz5HYQHoaBmcNdw80aYJRaALA3NzU3ZaJNLUWIEu5iDGRgXvnY5tY7hRfCNAzDlcvAsp5jGZD84uuTC3y2+RLIBzdw/dFv/tPDuEoTN9P2937nrw9FtlcjFVrSIsw6Rgjni0kvWrwcQZ6wpDT4YKKBYBOOoUtED/RAjegeuhI2nps5RIGMwlwhUN279e6zu0mgkEgZUAqwmPdleTQ9Yxgho2yEx1EKI4KrqWqzeW6H8/m3fudvTNM272Wcrn7rd/4F6zhtKjGrK9BaaxMj8SqHDgw3d4OMUTPDpVtXNzUSGkfhQgir+TpEMHOtMk6FGN01w1DS6A6DTEHVuiYJjoWpFAEAVVNT8yCCYWAiXCdDYnyJpa00VVTtp3P8ld/7G5tpHWPjsPuNH/3zAMi0ZSrZeGnNVUEViaAOQJTebEkaSuIuhrNn3YJPUgyHtYhZG2uxBl7mH9iliCHEkj5SlvaXDh4K6MyAgdrJDT2cyKU6UphSGHg4EYrkIRu69nk5/e5P/9npMsaGYfrqx78LiLWOgKx9/R9AZNQUi9RShmEkZk/f/9Zb621pEVBKqbXUoRKhm1vYCjoAuLu2nvU9IrIUh5jbQoibadput1fXN2a+vX13OpzMrIhcX12Vwt3ULjSmCDSztmhbeu/dwQpTrWUYxlILM37z49+pw5T3Mk3b3/2dv8HCb958EWDvP/0FV/vmN9/evN5y5QBoamqmrkg4lCooaFywTmXDKN3MA7AQFIJCht50AXAmLEwDVwIKd+/aW+/zcp6PiLDZTMzUWgtjgU0o93MbZLi6ejNNb6bp9TTAV19tbl8tZaiA2Lue+rzvx2M/n/Tc+qLZaFrOP/n6rw51k/fCLNvddYQn2Zb42fIlVR5unqTmWPsiq9Q8sSokThZoYsy+Ak2eQjVmTjR/5WsillKRSG095ie8HSufLA/RJHLRVD6vWqvJe9Y92ZjLj7ndXfPFIbGO09c//d0Iz4bSKlJCXhs7GCgAHEjrYSxt5iJcSQGseDAioQhX4gokFmGm2pZMOCNiEkGkCHJztQ4BUjIBhCwA3CmsgAZqk67sgUmZIWtxPquB8YhAEZ1Ak4QaLq6x9GX/05/+zfEyX6ay/d0v/yYQbYZtkYJrlIEjIQl5eF9aWPCK0goAdO0eICwYqeIBWJWqiYMTppvA2mZD4Qw0ZwAwyw7Nukp7hOUipxbumfNSa/Xw83mO8CJFRFAoMDTZhWYBgYQKvvT59776m1PdXub+9je+/huEJFKTKxIQhKkkSmIVp0AMEgvLoJ6VkYRpl3Fh7PkTM8ozRKCbemg63Pr6IxEdsDt2Rw0AgiJcax1FCjGsRQwaUkcGYeRCnEb7FwHDSlEFA2hfXP31ItunIgb/8O//nXXNuLCYV34YXDC1H/CML4Xck7VK+g4g+hNkC5Dv5VKRrFzLp9/4Eslep8Wv/tHTF8VKWIP1Y+DlG7JPsdldvfvmx+O0AYBlOb//+O35tKeVHr/asD7NvQuU/oJWi0/EW1z7+alCyibtBRXAjPnCy09AWI1ecvGIWIVTK6vuCYtM+OLp254fwq/0aQECIoZxd/P661InAGht+fTp/byc0v8xNYVwIfXi82N4+cwAnmoTTBgitUtZx8Xap73ww1/wvDI78kkRvCKAlw02o6YSg04KoV1eGuOz+0BclI9u7tO4ffvFV8MwAcAyn95///P1vTzxedZCb31A69CKpweTebhPh8O10FlvwS/PYV2210/84rYuit+VQAfJ2wV4/sr1Zz6/los356U9Ehdu42bavnv303HcAEDr5/uHv5iXAyKv/IQnrPuJaY8/GO4vXg2uEwhf/OmvUG1ffsPzV1BcXm7+fW3pIiRDLcdAJnzByiN+erU5tPPV2DTu3r7+0VAnAOh92d9/bMsZidcnc3FdwvXtYPYAslaOFX1a/dYSt7i4Ta74w9OQfkIf8odFWkoCXAzBJQJ672mKnUwXyjXkeW486QYjCVaEqYpNnQ4Mw7S7fiWlAsCynD98+O68HItUwGh9QYxhLFLWfKVLn2F1qYd8Wrja/q4ffg2ISDXZ+oTz2IRJxo/LE3AHiNyqYz1dcUqoCalIqaWU3Mb6GqWZn8HD1VPt6f50b+7jsHl981UtIwCY2TyfzXSdeM8T/jIyXqCQcfkbPr3uy8oJlzdyWUJzc3paNS5/kmvd89fASi2/vIkLVfdpWON60/G8vj6tfADAIrUOecu9LY93H5f5/NxvzPX0aVF/ruGfxi1e1ujVNOhyYqPLr34aWk+/OKf2+pEuK8tlxq2oV0pUXkw4B/OA7PwjrNmzOZuSE+w2jdvXr7+pdQKAReeP++/O/ZQ0UXx6QnihvKz2FuvmDrlUXaSkv7JKw2U/erG/wYpI42Xw/3AduSw1uSU9/5aVMoqXLeqyHq7fs45n39TdF1dfDWUCgK7zw/790o6ILxasyzL89K7i5d788j+sn/1pZL7Yo9cPeHmZ+YBi3Y2fuhkA2e6h58G1nvEvHmor+n/po6y3kw/Pq2yvxi+Fx/UjvdxQf339+vr19evr19evr19fv77+/+X6z9Yr/vr69fXr69fXr69fX7++fn39//glf/fv/F24dN4gghCL0FjLdjPVkh66l7ZhepysjaJYhRP4g17OC4To6R8RsSqG1UzN1VbiRqL1hChCRbgIE67wx4tW2wUg+eHvyI6Z1GG4uiWpAOBq82nGgGE7knAEWO/t3Pqy9NZ6VzMLf3K8yMZwUtwZEVbWzcsrm3RrH8+f4YlsFV6+ClfEKDKMeaXPu9tqNPMMHjIlW4WJMnGPaQ0qdFMdpvH1uzd1GADAe1se7qMr1wpAvWtfem9zV20Qmg6vuOZBJcUXLz3nS3M+mVnWe3d3XLOKOCJ67wDAIhlTDACqPUODAdZ7Wf9CQHfqhukJWYQ3AyDC0qJ1bB1eZL5Ehg9EWMC4297+xk/rJmG+9v7Dp/N5JrqQeH/4Pl+2y9eG8NqUTI6UE6VanVZ45EUD2S7ty8KyHWsVSXAsItR86b11VU04GjPWHPDJGzCYYRxoqFQLIWMEtKbHQ9NuCZ6p6jhOX7z7YhgGADidTn/+539+PB4vN4HZFcUXt/OrV/zKP188gCdqw4sxvg4nhOfu8vpqL1BLPM+L5699fqj49OfPT2udL77ZbL/88t04jABwPp/+4ttvD/vHNcWKM4yPLjjpDz4vwuoHY2ZpBfvU+L5MlDQlXV/Ty9uNH3Sl1z95eiT/BX1ghKcQ8B/8xMRgtpvtl199NY4jAMzn83ff/eJ4OFzexdMDBIAoUqZpGoZBpCCiman23lvv/QJprSBaIk2XX/KM0jxjMpdP9nI5yt9YShnHqZTy/NmZkzZLiUYn1Be+tOVwOpznWU3VNDx2u91XX38zjhMALMvy6eOn8zynAF4qA4B2dfOn55x5Oflp3N1WY4IVHro8gRV7ewJSV4CMIF2zzdzD0qs01lTZBCQoAj0jVsLyNzIJU0VgX3Fa9RRiRop5SYQBQM2HOr559abWCgC9LfefPy3zCV8MxQsUAU8oAjxj74mh0Zp/Rqt5dhIAIcLc2zIfDwczq6XUYRiHgZmf4MJ1s4mI1EV0zRGLCCXdBH4wW5/mUrz8wwjIH1KHcXezQpbzeXn/3Yfj8UyXKZlM1IhACEEoJe12Iy7jdpXERto7OQKIUBJA8ULuzJuP5wfzxAe4TO4ndG8dbS++8ldnFr4Ysc9MkPw/tdTddpsL/ul0+vbnP9vvD0zpp7COcERgolrLZpqGWjg9+yBWE42UEz9plFcXjfV3YZpnllIu2emXoXd5orCSQDxiVRqvq02aza5u3/nFarYsfem6tDVZ84KmOThcXV/9+Mc/mTYrjUz+jX/93wjAIHaPsDYJvb4afvr1F3/zr/z2129vBzYiT46Qu7ihWkaAqrAPTIzoTzF9K4SPT0hhfn5Vb90Pp+Vhf74/nR+OsxNPu2uRwdWHKq9vdq9vN29ut2MhbwuaXYi9yVjDiykaXHKPwk1N+9W7H//ob/3Lm1fvAGA5nr/9B3+KgT/6a7+1ebX17vtPj+//9C8+/Py7j99/f/f5fr8/alNGBgDzvvT5dDoi+GacGEl7d/OMNYAISD0WYIoPk9LXWgdAKQWIe64ahEJYkCjc+6J9adoOfblr54MtZ3MHKCSFSy1lmqar3fU4bepQttvt9dXVWAcO6ud2vH/8yW//9L/13/vvfPWTbwBgebj/7t/7d9rnT1fvvnGsd+8fP33/4eNf/Oz9/v492gPHLGwY4B2CgCcWkZJxEjGU4fpqN9SRkM6n04eP75dlLrUO4zBttl3186c7i7i+vrq5ub59dY0Rd/cPx+Opdw0HQWKIEr2AEXtZ5uHzoZx7CRre3m7+yo+5sv3iffziA3/3EY7nyNrC0DBOHLPaucOP/9m//l//1/+X7/7qXwOA9x8+/Z//rf/rH/2TPx+mK6lDICV57MJUoSdjEggPUwhDN9AWuoDO6K0W2l1th6Fm8FIWfxDULc5Nmxqof7Hb/rXf+PqL2+soEgCmfn84f/vx8/vP93f3j0vrwcx1KrtrqpsIUvPWlusd/eY344++Hr54NwyTNINffnf4h3//F3fv95WxL/PDp8ef/vZv/Xf/1X/1Rz/5MQD8+Z//+d/+23/7D/7gD+BSwZRShmEopbxEyV+shXDxMMR43goRITcSt1TkJey/MsxA0iSOGEBIhNcg9rSjS5Ul5LTIIAYETDU2c/4JIzOmQJclOQ8W1lv/63/tr/0P//v/g5/+5DcA4C++/fZ/+7/5N/+T3/+PpdZxM223V9O0HcaJmc3sedlcUWrqvZ9Op+PxeDoeTbVIEV5XYXfPNMFhGIZhYKbLinthMLxcZi8/OuBprv2wRrh8PTHXUkQEL/SlZJGo6jIvf/2v/41/7V/7H//0N38TAL777hf/x//9/+4P/t7fqcMoIrl7py1eYLx+/fr3fvf3vvnmx69evWXmw2F/f//508f3Hz99/Pzp4+FwyE3u4ppDgLHKpDKSbC150+4zhcqYdl25QSGicH395u1v/ubvvLp9nVxBpDJMm5urV9vNdqhDYWEACLO+/OL7v/i7/+Dv/cnP//TTw+f96dBb/2f+mX/uf/Q/+Z/+1m/9DgB8/Pjp3/6//Nt/9qd/trneXt9evX57BRifPt6dj2dQEOZhU7GQZpkR2Jd2eDzM8xIeRFSGwiyIRUqtw1BKybrKtAOZVCgD1sHN9HCYWz+DnByW7g0ChYpgERrcaDlj6838QBxTHbbD7W78knnbNbqfu98t/bjMS2/uHQcZrm92APjweP7JN7/53/5v/Ctff/k1ANzfffz3/m//9rd/+o+FhRklc81yDwdMfvuliGEiwVJZKpdRhrGO22G6qeOOZRCpzGLq5/PpF9/+/P/99//uab9/9+7t119//ZOf/GR3tdMM/jYza6rdrGnvp9Pp8f5h//h4Ph2F8NWrq6vdto4Di1wYFg5u6IYYWRrmHmNmpl17++rHP/3n/qX/5qu3XwHA9999+D/9H/6tP/x7fzzUQaqE+AL62FrXVlx3I769LcMYSzQXknEopY7MiKQBXU3bUhivd+N2nApX5oGlIksgB1IuAYxAK0uTmCUZwslIA3BLLRchIBEgA7BDZpwArOFThAzAGQHoEb4mxrmHf/nu3T/3N/7p25sbAPj225/9r//N/9Xv//7fncYqpQaiB3ioCG3H+tWXb//qb//O11++224mIQrv2ubT8bicT8v5pG2OvoQ262oRQQSB5l5K3e6ubl69evPFu+ubm3Eca5HKgLH6ACpg63qcz11VAGspm2liZlftbVmWufdFezdTd3/YH3/+y8/ffv/w7Yf9/Wk2sO5p/aHR/F/8F/7F/9n//H/xe3/lr6xFzL/77/y7AWgo4R6+bBi/uB4//daPNnrkn3x5NaGI9ehqoVZVsXX3MIBF2CfJVFRAeCYYZsEVCEgMiAGoFstij/v5w+fH9/eP39/tF4dxd13LSAE3u82Pv34LX97KctUr+nxG64KMAAarMPtC9InLAwnXrn0mQutL3om2/vDLz+Dw7re+idho68e7/fd/+ouf/cN//Iuf/fzjh4+P+5M2LVwQ0EHn5fiwv3fT7TRVlrC1ErRI9xrHNQ4D3bz1trQ2z0sAlmFA4p6JSwhVeDsNhZFce1vm0/nufHx/fvykxz2oAUwgE4zTMOw2u+vrm81mVwfZbLfXV1dTHRmon9r+452el/n4L+e99NPx0x//g/O3P4uf/nbgdPezuw8//+WHb//sF/u7n5F9kHgQmtHDOgQibbgUGZAFEWIzTm9ev95tdsLleDj84i9+fjztSynjOG6vr1u377//0M1ubq9fvbp9+/YVE93d3+/3p/N5NnUhZogSTVCZbTidt9/fj4elOm6+fnOjvzdMRf/k5/En38I//h5sPREigAKcAM4AB4BY9vP+Me/lfDr98R/949//T//BdPVKxk2a0sQT3ZbSlzxtGz2sg3W0jrqQzgyLkG+mcnt7NU4TM3PhKoWYIahpHOc+L12bHrabqZ8PN1edSCO6+v3+/OfvP33/6S6LGGCRcTvcvqFha46t9dPx8PoGo10XutpMu4h6bPH+/ec/+qM//vDdw+2u9nn+7ufvze18Pue97Pf7//A//A//8A//EF5ciDgMw39+EfN0alp3wiQAUkbOQzCiFBGRsrqgBBMxYSC6XQo+dMTVd4uQiSQAVS0MYD26Aq4RmEwsVAqxEIuwpKuKWp+XBQCe2kiHw/7v/Ce//+/8+/9xQdheyc3Nq932etpuWYpZ2txd+IYIiNiWtj8e9/vHh/uH3qEQCK/W1+FRStnudptpGsbhov6FcDf31OJdwoAuNQFAJq9fTJZfVE2XYoZZxmEotdAlzznJsK338+kE8Xwvx8PhD/7e3/33/+//j3GaSimr1Jgw83G/+fpHnG73hkXK48Pd588fP3745fsP379///39/d15PvfezHIzQ4AwU1uT4TP3CCBT1CEAU9AAsgYBYJEyjpuvvv5RITnvD9odgLgMu6ub/kb1qm+mzSiFEcJ6n48f/+IX/+Qf/eHf/6M/+O7DL+/2D22eheh4POS9LPP8s5/92T/8wz/Y3eyuX1+/+nTVtP3Ft98dHo5kXKTUTYGCzc0gkGQ5L/ef787Hk5kTYZ2qyIBY6zBOm804jrVWZgYE4igVZIxhMvd+Op7VZxnPTku3xd3RWaAUGlz5dMB5WZo9MNlUp93w6mr8kciVBRsuip/n5fHxcX86NJ1h4OH21TUi3T2cTkf9r/1Lp7yXNi/f/fzP//Qf/YOhFhEuBClhSj12FjG2HoCJuFAZpY5SNzJu6rgbNo912EoZRapwaUu7u7/7J3/0R/+v/+e///j5/uuv3/3Wb//WfDi+/eKtu6unQrKbLl3n3trDw8P3v/j+44ePd3efC9PXX719+/Z2d31dxwHWDomCK7gRrj5sgBFuql3buS3n1ttf/ef/q+t82Z/+k//oD/+Df/c/3W2vNrsqu5ixfTwf5/lEy7yr/uUb2WxByXg77l69nra7UYSQukNrbTntOexmWzdDrVxFhlIGkgFIgFKYTAxpBIfEUkqlIiw1dScRbuCOYbQaGzGgBPDKfIZARmJCBhAACsgUMA8AD7dwJMoefK5jv//7/9F/8B//PQAQgCDMnpoI3Ozqb//kG1mOy8PXQxFw68tpOZ/a+dTm0zKfvM2oM1j3lLQBeqC5i5TtdvfqzduvfvST12++2O52m2ncjlwEM7h5cT+3fjif1X0gHmr1cWQmU+3LfD4fl/nc2qLa3e3j3eM/+ZPv/tGfffiHf/7h/cNREbo/hdlBFT7s908rrWy32wA05PAALwMFMCmQAZHUaTeOBWabW/dFJZSJwU01aqAyBTKWIrLWK7lOu7s/mRWzVAzC5jrMW9pOMZVFzuf5OHu3vhvqNE1vXr958+b1blsHjhBBM0ECAEN3yhTPZxAoj6Krh8NmwgtC4RHWuhuoqXVtp+X0cLj//tPn7z8+fPp8OhxcnRhJSFiYByq46NLaEoQgvJkmQjLtapolJtPahnF3krSWZAeUWpF5BDKPrjpOw80Xt1c3m6FIdJsf55vH/Wb/ebu/vz/tzeN62u7qtBnGzbDZTJtaCyCSMx1VT+cgiqZkwS/agq79dP/x+PHbq11h3PiHx+Hx8Q0Jbq+hROEg9D2YgYIDaGFhrkyVCHHabDe7q93V9TiM02ZyW46PQ4SVUqZpo9X1ts+91zJgoDXjylfba+GB+diarsUiViVzVKJqZwxevBsME3MtJGXY+LRr470fFwCANf8HBgLoYADDZku0yiwJcRLcVqqVpVDmo6QeBBGAA9GRQZLivzoKOhMMg4xDudqOu6vN1fVuHEcWLiKlFGYGIDWfFz2f++k8U8TD0vfv7x5bO6uqwbnZ47zMyLLd8QSMIuNUNlssQ7cA7y0WVmDfFMANjVsc3L1YKcHbafj661euXed2dbV9srpn5u32WdoHAES02WxqrWsZ9qvQ6houmGepSPtj7WaGYCI4jXWapt32arPdTNNURDLsFMK72nxup3k+nE7dgohK4WEs0zhtt1fu+PC4n0+LGQBQydwcAkImeS5ieBXogpkgYq2V8HIvRNM07QrUqWyvdtvtbrvbjeNEIhcb+wtqFREAwsIiwgwBy3xGWMM4Uwc5jOPV1dWaGZldFvfE4+YVuWMRKSW7lnHxBIBna+cfPr3sTmeXiy7eW1nEiHQEHMeRXryXadpsd7thmIQ5smiES0oDj8N4NU3X07ArpYBGIb7ZXX317sv7H9/fPXz+fHd3d//5/u7uPJ/MNMIYC11qFkx8FYVetO8z1KOwFKmbaXtzc/Pu3Ve3t6/LMM3L3twGKblsIaEwsjCDa4CBBUatdbPZbDdbde1SNpsNX+ZLEX7zavfui6thGoRhPp7uH/fff/dp/3gaZRIu+Ajdbe7mgKUMvbWHu8fz6WiqgCC1sAhzHYZps70ap7HUMo7TZrerVXpDOtt8MmIkHDfDMO2usbh57631uYdG4WJOlcCRITxUw8rcwA7HUpCHqYwyjLcuEmZt9vPBzqG9nYl4XrwteCncgVg22+vrV2+nDETnVeqFiEgcQAHQPZqqOwAKSyl1w3UkLhY0n89tabxCS3Q8Hr//7pff/uyffP7wy4e7e+1HJn99uyPUUisSWTpeeM+zkPXlfDre3T98//4TE1EpVCoNW6hAlA79BL46IqSTs/sKIvembV5Oc4uLH4kbnGY4NBo2I9XN1SvclMWP8bjv7f7cbTkcFwcum7obr969+/H1q9ciAkAW0c6n0+OdzQfxxZalwxzMUAaWylKQy0Wgno47LFIIK2MhqJc0WiAARxBcsxw5SBAYaQ3EwUCKTBCAcA8iJAfyTIoAF3m2hyTicbMFgGkotYyBEGEWWoWvtuP1ZrMdK4U93t3vHx8e7z635VyJGANCGa1iSBUcSkR0C1tPZtCW4/6R6zi6x3lebq6vCLa0G2utGOFLA/Q6TAPRdhhrkazOco0CAmZmEdUeYdvmV9fX2+1prI+1NOEoYKYBDuJwc3OT8OVaxJRaA5CBYtXtm4M39blp92ApZeDeAkPRJUhC2MkDBoW2RAsAQAEWFl4PZWuaEgIXkIJlEBIeMaoabxuMB6X26W5/Okf03TAMw7C7vr66vh4qFTRgQjcBAgQDzyjRFZ32F0UMM1LwpY0PAOBg3bTp+XBikcf3959/8fHuw+fH+8fz8dTabA5EHFCQUEQqDMM4pvzMwoHT9wnICKm7Z0AjmBkBSSkJjxkAiyATAAdgqbK73b37yRe3b65KETvr8eOZh4nHYRy3V4cH97jabHbDZqrDUIahVCaylZEDAEAe4JAbzhPUHuCGx8U+H85ToY1jk9rGK9zpcAWwB9hmv4LUzRMIX8PZGYtwFRqEp6EM6H6z20lAuJQ6ba80cKz1vLRIe54AsBjLwCTuMHNTN4MIYkAHsDDpW28yEDi8uqJpYCGrgw1jHyc/K1GKX4EQBBzdu4Hw83tBjIIxUBQK5rQHXjt267cR0pqlARCBFAQwFNrW4Wo3vHp1vbvebTfTMAxSpGTYbjoxWmi3uelpacvSbfHDaf58PO/npSlYgCFiKVUqBjII10qlBiNYB2toDRXDAoMkuIRQ79jBLTXzMJby6mZ3e7V7mjCJHz2VLxFRLtdfLmIuEDWvbJYwdwemsYoIT2PZ7aab6+319fX19e1ut5umSYQ9NNWgXfv5tDzs9x8+fd7vD00bEW62483Nzds3XxDJ/f3j/d3jw/1hWRoxYcL9xCSFRH6liEEEMcmEgZf3Mgw8jOMwjENdoQcSSV++LGIQKRsqyVBx99YaE7mlZJpERETGccxiLklyWXOoajJOzCyfT65T/98XMU/Pdi1ifO0PqWombK9fTDgMdRyncRyJ+cl7wz3UHADNwjUgUEiGOjLCZph2m+3u6vrV61ev3zx8vvucXZnDYb8s88oAiLSIW+kNzLha6CIxsUgZyjAO03a7u7653l3fIouaZ+poeu0gPtWCkf7wHkaM2+3m+urqYf84LzOmlQg+bfw4TDxuSqnFAY+H5f7z8f5uf9if+wAiGhhqPncLQBHV3o/783yaVZtHkCT1ToZhWRYbx6nUst064OAqEQ6gIK0UHUdmFPQiAehmNvt81NYdXTu0E+iCZsUUugb4QvFYio27GHfTBCV8Q3CFAdpOulhvHaG3ZqdjSzQy34xTCaohA0iB3GNXfJIDyAE0vAd08wDgAENnMEQIXNuBlFJ45uPh8PHjd58/fzie9sfTwawNQ3nz5hbIbm5vxnG69Pis97bM8/FwfHh4vL9/fHg8ENPmfj9ttuPVQrWKMCKAOqyZgMntWR0ETL13a82a2tPoDAB16VAVBufKQ8ikWxYNtmP4rHMzkkJDZRq2m5vrqzfBnGVvkYqmizU9nmw5dGuCoWUotdY6kNR0sY8IQ2Rik8KlkhSSQpz4oKTBlBOs3ULMf01nOAIkIgHmTJYNSAMuuvC8AkOfepyIKGVgglLHUgeAgDALGgRHEYbQ+Xx8iMe7T/f3d493n7W3qZahchXiQiwojMwZq5g6cvIAM9M2P3z+NJ9aGe5vb66XL1+/fnNzdb1jEVULwFqHUso0DMIcrhBOCYPj6uXbmSNsGPs0TtM4VimVxSU8UEEJceJxs9k8eREBgFxe0GqKEIFd/bS0w+l8mGf1K0fwCLXoFt3RQJwZBCKWZdmrqgIMAQMLM7lboCPTWsSwBDFJKbWWgcbxisumO6nF+XS2roRYahk347CZGB2BEIJAKBkEYAAeuCZIrxTbROnJ48lH4bICmvlyXg4fH+b9+f2f/fK7P/327uP96Tx3Nw/r1tEy0WiNWq11UO/LfF66MXGUEEImDKJEssxd0zCNiJldBNyznAJwKTKO05t3Nz/67Xc3b3Z9WY535+WwjEN947cbHm/GKTyGYRyHoUoeRRgQwsFgXZEDwEjdTMpzqYwSfONxc37gD8JbeltwQ36EfgDdY3QUkUKmGE6B4ORgZqAAwGAGvVE/c4PielPxRqZxGIZhqpsrRz7My2npXW1Z2jyfw7TUwiI+jYRwnGdzT2ZdVSKI81htU3hb49WOrzdgdi4yF5lrgVIGB4mgCAYojmh+AqDn+QIYQWZsnUMRDbgQkfuliMFMyIB0pkJ3xhCmsdLVrt7ebG5e3Wx322GotZQqNXEXXn1jMlcLOuCy6PF4hs/7h9apOYAh4jQIEplTJBGaiwN0bfPpsJz30ZewqkbdyAxMrS+tLcvS2v5wfv/d3fUo1+P49va6ivzlXRYurPan6+Vo/JVrZVRYL8JX2+2bt69+9M2XX331xZs3r25vb6+urqdxklIIQL0jhDAGhKl/frj/+bffffeLX/7y/ffLsmw209u3b37y4x9vNrt5bu+///BH/+gff/jwyVwdAKmsBhkejg7ohJzPCeDJSONFjYWX3v7KPs9svnWa4eUKxLikPDLzMA7h3lsHdyLKCiZ7MABg2Rq6lHRm9pI/u+ZKwnOf58KdD/gvcSGiCA8DD2NhZjN1x4gwdQAPbYfH+8eHu1fXt5UFAiAYQhG4llFuytXNqzdffPn27bsP799/98vv7u4+n0/H1hdd72UtD0uRoQxSCnEpIkXqOE6bzWYap1prAB2OJ0TqbkMZhmGotRJCcmDNuoWpNjMVkdvbm1e3t+8/fUDAdR+9PACH6KENlGBSpf1BH/ftfOzz3DHmUoKFAjKLb6UGYhiiAzi4WTd3cuuYhiymXQdAKcPiRqFhvlgcpNhmGqxXAJRKqr6c/fjY2jy7uSn2hdww0K37+ThbD4S51ra9iuls22WSSpVvN0PZA7R+7mrW+/E4P9wdtGvei3kc5n53bLPBUKzkZrWu4hRAQdTUzvPSunoEcRnrWKQAcmDa+nkSROpQzuf5cD4vvQeAAZxb+3h390/+/M+a95/85Jvb21sWTi+qtiwPD/vPn+8/frq7f3ycWxfm4/G83x93+5MUqUNlwrAMLLowSlcWZriDOepqM/80yAhlRNw4Vg1cdOZoZYhhomMBa2ARPb3oFMA5nBXQ04UJGADdrJ2PenyAdiSwImUYhthsRIoHhruZQjiueEFFEhDhUksZRcYiAxB3NwfANfKKMxoIVmL+Sl1GAETOOMzIJwnh8+snF/XkBAeuQRu5t3IER6BrOx0+ff/dufJxvz+fjt5mtK6+sHGZRmIRZEYENwBgzDYZITHh0Hvs7x/ez59b8+1u+5Pf+OrrH7374ssvNrtdMLEMQ5FSK68sNyREZgLCcIti7gph5sHMRUqVUpgE8ck1mYmHoQ5DvThUAVyKmKe1gCBMPeamj6f5cJqbqjmrmqr1jhYAXKkMUDhgsHDr82yWCX4VBaUQRIqOIqs1EmapXAhkEFT126vtw2b6XIsqTttps9tN220dR4pOnpWUp80vXpxvYO3hBly43kABJPwUCr/eI8yn+cO333vE+599/+m7D8fjSc0cwsNVG4KxVmFzZylyu7vd+Hg87NvcokPTDsyZmEcA3Sy5YhAgTIwUzABJpQZEKIWvX21fvb26uZ02Gz6oIXZkIwYmrCybMgJEKbVwEebLngd5O4R4IbSuOXzPmh0GmLgNeMJOtJQtxUDLyItwWJEzDQAV2oKK5JihYea5I7EptRnO4XrCMFlOpdDNuN1dbzbX1yjDSW1erKkej+fHx8fWOpdiAUkGWNps5hhEjuSIwFAqTzy93k63WxqKNZjHetptlte3EWTnVrqKm4RHREPsEZr+bJeL3MiVXCEMQAIROTVCOZ/SDppQSIQHwq3AzcSvbjY3N9ur66txMwqzsBQukkRXorScggggAhZVn6aRpXRAKfU8dweoAxNxV3BHDzQkZ17UsSBV4c1UxsFBeofW7Izx+Lg87pd51vnc7sJ8qjfbzbOI4D/n+s+grz7PqKfi2xC9FLrajl9+8ebHP/rqN3/zx19//eXr16+ur6+3222tA7MghFpH8CLEQkj0uH+z3UxjLar9/v6ekMMAI7bT9OXbdzdXu3Y+Efr9fj8v/b/ok8DTEeDFe3nWJNF/0R1etDr5LbVWNw9zB2CRpDY/9WCy1nlaYp7g5RSJ/JcsVp6uv/xpE74uBVmYbZUTGjuRU/TT4eHweKfta4BdkiYBQoBABAiIaRi3havwQFg20+54PJzPx3mZzS1BM0QSKUWqiCBJKWWo4zhO0zTVWjKabmmaGji69JCeJr26em99WXrvADEOw2YchQUhRX3+g5tjBhQP7j3Op34+td5Vu84xq5qUQsSRJCF3sxZhCJ5b1cplIlJE6axEgdhl6csCjta06+J+JjY9a5uX+XRmQdVo83Le79uyuLsbmTIEkYD1fjjM2hRgLlW7UTNQ9XE7DuNQhMbBW+O2HOY2n8/LsjxDMOZxXPTh3FvAqDEwA4KtbS4AIGRWj3lJ+agS8Vi0SMFcadObGElEBh17a93MI9Y4U7PDaX7/8VMdytXVVITrULNPa6rLPB+Pp/3hdDrNXQ0RW9e5tbm189ItgBkztHLVlQakiXyAQ4ApNKPu6C+HGAtyZhrAoq3agsVKgTIwdEmySgRmvlkSUgw9PKxrdAXttsw2n6CfIzqxqHcjQxazyPAyd0ekIAYqQQIsUsfN5nqcoiCBu+b6zKhIkGGlnDGUawoRX9KKL0I2AiSD6Ke37vqfM4ecIBCiEI4MHLYc9zDDcjpZW8A7urq5BYUgcECBTFPCnOmU4SLEIm59OZ4e7o/HYzttprECkam1q5vrOk3T9nqoRQA4XTQJEj0AjwgDN7cebugoItk3pkzGAoikp11UkC/nvsDa9c7WMQaiBzaL/Wnen85z61vl3ntv2ls4DTxUqdsYBFCDsS+H+XhwhxJcqZZahAgRiLJISxUKFUTw0DB2rRRT5evtGAiv37y6eXU77bZlHNExDMMw1BwD3ZO3tKo58siW2ZFrokcwCV4wfkAkovPpfPjjP5vn5XB3OB3PZkZMSBgQqgphpXQ3c7Zhs3n707c80fH48PDx7uO3H+fHOcyFqAgDormpqbvjxbkUAsPRVD1AhIehvHp7fft6S9GX/Xz6fD8/nt3doO/n0+m8mHUCLO6itvYOAmCNYctSmpg4t/AkEa9jCrjjcMLpGBuFgghQAepodUO4HR9pPC+DHU64ECgKuYV7kDoJiZn0GY7nuc1oLbzRduJt3crVm6uhTLvZsBmY+Xnux9ur4+l8Xtq5LcxgKkfKsN4AR4uspuptLV9stzfTxAgLQtuM89tXDYttN/PHBz4cS1/EjAx08YP2WcifukqxqgDAFFSDBDKOBRE8MmCDCcdBhspDkd1Irya+2Y23N9vtdhqG4RKxmySMZL4R8UrDyv9Qa91tNle73Xa3fdyfjqfZTIkhIrq6OjpQj+gGrUsb6bRsDrOVSiB1aX4+tnaGX74/vf8wLyfri+2196bnJb58PHUz+EtXZgbFX7perHkISMQe4O5aGLab8Ys3tz/9ja9/8qOvv3r35vZqN1QhjPDuRkIpbzUARzcGqUNB3Lz74vXxeHz//Yf9w37/eGxzwwDC8vVXX75+fc2gu93wx3/6Z99/uFvUVU2IKZ7CdyOezJAv0tP14xEK88XofO0i4UV1ld/7lwqFlacSFro0BBhqTUWSiOQPz/kCF5UzrNLl56bpkwzp4qiOL3/+r/y6l33Wpy/AFR97/uKIMO/hHclFEIQT43Rza1HY5tP96XDn1oQJi7hHVyFTDlPXrqoNMMrV9rb+eHz37qtlWY7Hx7uHu2WZk3yOyACUZ3N3lCLTtHmi7EDylMMRLpJkJmaSIiwMGGa2LHObz9rn3vratwOAi33D09MmpFq2Rbbg3NuyLEtfloTR5+UMbS5SKD0UKADAVLMtnw11CAOHyAxRDIRAj3BzbRo+n8/aG4EFWDueHu+dxALcFa11XeZwT8t5d0YAKWSm83npqg5QzC45R6rd41qYx6ubijzc3SmcjyuX+gnhjzir75dugO7oBQFgUW29t2YAWMsAiN28q7euCNYdipmwrHzECAtldUUCN0KWUpiZic1ADc6ndjgcD4fDdhrdJhzHMhFmTEeevXXVkQaRA6r70rUHIIJfUm4uFK6Vjo4YqthUZhN/4WGLFJhUFAq1jm0BNAQfx6EACFhhWGefOZohEbhrX3Q++nKONpN1diMIQhRECUdVcAtVU3XtEeGAAWTIjhIsgwNOV0Ph7TQGkPYeYRjgYNbVEMMFjBA1EyUoAgLIDawxhhABkSG0L74MfS5iEC4W3+EYTuCCMQrvxroba2Vg8IIRtFqHhGsoWkOlUAnOmOkcxLh6w6c5sPUWXdmDzPr59Hj3aWnHzd12e3Pz+vUyVZlK5SKlsJCkZ3a4A2YfUSKMA8uaORBgFp4x4yhECGDatf8gzPIHnZi8u0DqFofT/Hg4nee2TNxaa01bcxArxKUOUQdkh8rEvDQz7UsP5gACEGREJBK+ML4BOMDdwzRcEbwKbbcjl3L7+tXVq5thM/FY0SKMwjnUQB3c0RXCMfH4y6k9wCE7MZkR/WIha315uL/7/9D2Zz2SJEmaIEgXM4uIHmZ+REQeldPH1DSw+zDY3f//NxaY3kFXTXddGRnhh5mpqggz07EPLGpunplVPejZFRgcDndzc5WLmeij7/j69OV221x3+4ohigBAd3Nz1e7JkHA6zh9/98Pybrpej9Oc2m3zrl7DLO7UwCAYkSR07/4BInrvBJCEpzkfz9O0iPdNe23Xq27VnaptX9anl5cVwxPJEpHTiGejcRYxoqPjPty8P1LfboKz1QerH5CLEazWjCIlSqnMxwM5N6O19YS5R0e2CAcLDigYM3qJJq316zNoQ8Z0KMepPBwPp2WWaUoG6gSIffFtma639elyvdxuuTUwWy8c4KbuHoEsTMeU3pXpw7RMki6trU0rJz2eIM0xzU3YvwpvN+zde6jQdvPrnPxNsUzgFA5heylDRACB4N0wIDMdS373kE+HPE10nOVxSadlOhznXNIACu6WKbS3GYTIOAbJw5RBmKY8DRrj+XS4rZtqR3APG9QzC+rurXlXMz3cuj9vbuAlKbIEiXrcKtw2V3U3b+HmutXrl5eb6ttG+a8ff9kfjEUcwACcOUqRh9P8+LAcDzkLaN+27cIcjEDgGJAYWZiYzM2s21pru11v26dff/3y6dfL88t6XdfrVtfKSA+nB+398eH4+9//hOyGkMr8y+fn660OPQLDW5ByF0L7dx3/d0jMv2F7+XZeRkQJk2dvKTlRzjnnPLqm0WBHxF+tRf6NWdt/94g3MuxvH+m7GivMRraMudt9RInMiEIQfr08vzx93daru5ZckBg7m6UAJ+3um6IhCrNPBUvOcYzjsqRStm0d2hkmHgE9ZqbqzDzNc0ryzRcHCSCIKElKKe+Kqj242MKs9b713mtrakgywgmI2RzsG/UCiLikQ5Glrr3X3rZNtQ8ukbVq1t0U743s8HZwN4CgMfhDBIS9QRpmBADh7tpCW9suvTcKHIM+D3VoHg6KbiNbFVkSELsbImgfUw41N3ULiFoFEe+Z2TItnNK0LKdte6n1ut1ub29yAAwegjlYkIHAIMG4VoUBqBOhBRqgA8FIQfUAGsXQnrUc6GpGEJxSLmVIx9UCxmnUVtd1u60EKERZyFW1d+1ddwed3QoJkCyi6Uh9ghFOtIdCjCLGhktZuPdWrekbxhZEQHVYDReLaL1GbYjm5kQELCOvWNWsK6iiKXp47/V21fUZtitbmwgiC8VE4IgohOQB7jASa7UPioEBKXBwDJeEaZoPh+OyHCOwNyNiZDSM6gaIwYLIO/oy7FDceq26vjD4XBKnBERoCn+Ogw5HESfwhDALn0o6T3lOTDGCcI3CPJwghtwP3cNUFZmDx/BqvON3ixoEEMJCxAKFgaz39aa69baptiys798xRilpmiYemaHhrhZhLtJFPAwQswjtBVZQBAEhkQcFgKqq6ndFDH0vDR0xu+pwXevLZb2t67ZQa6037dUIlBCTJCqFEiFkJq7VttvLbdNWbSotiyShktMcKWchxv01Mu/a1bq7EeO0TOWwnN8/HB5PPCXMTJExZERqu1qov+Ya7xHkPuaVGOGGpEAF0O9lv5o+vzz98qeff/nTz7W2Mh1TniSxAwzlhrurae/dJuPC88Py8OPD+eNhuiQA2y4XNL/8euvreHKdkUnQYycYMnOEIGJrCgjzYVqO07QwkbZ161uFruBh3le9/Xr98uX5wgFLmUgkpyTEwvvVxrt/1+vvR6f1bVnWFM8/5KffHRPfaNvWL80dk0/kjww2Uatyi5whV6vhnTAYIiMcBI7JD2jJK0QLNCnz8Xz+4afffPjhp1QOgYKuDM6cEkuWlHORXOZpXrdV3OvT58BYdVMPSukg5X2RH6bpXZkc8NfLy8vaKgjwlN9lXJaWcDsVvV51rW3Tfl0twctx1rtyZLxdDIEQEB69hwMkDvdeW2Y6nQ+/ecx/+N3p/cNSSppKmkfGX2K8G13tyzExRJi5ASBBfCNwDI+9YJHDcc5TXpZJrcN4YiLcwcJVQ7tZIABsPZ63vrYGdpsXmc6PbsZTp5TGz0RManatt+fLTf8aEoNvXpzxhIztfOzWu8ZBW+sbhOVEh8P07uG4zGlbn//08/rlSz6fTh9//PHh4VH7AhHTlJgTJ+7d1+vt5fL05evnf/njz//l7//hn//5T7/88rKuJiiS0vX59vXzp1/+9KdporKk3//hd8v54cOPP/1v/5//45//5ZfL9aKmxIKwq2PGRx3D0Xg1roEdvByYMyLiKGTuxlRwL1/8bnW134WAJDLy6scgafCFv8Ekf61e+faH97XmLdqD369C8KZyes0eevsnts9N7quWh6rVqtumEf3uuBMEHg4e1l8uX75+eXr6uq3XZTnknCOAKIg4iTEIOvRae23r7RJh01xE5LicUiqtdQcQFiIhQHPvvUeE3BnH4yPc5VcsknIuSGwGvSthjyBw7xHNfW19c+dS5sNxmhdJySKa+iv/goiXfJjTsl2+tG2t2+qqKZcJEMBbN+3d1UdZf/cRiRFzHQwBgIQ5lZxnkcycAAjcrNUI6/Xaah2QkrsCGqAFxBhG7W5cRHDvj7s2dwdCRjA396Z97YQj1s8hzPxwppz4fD6b1evLS/zlrSeWVO4cUnTkAPZoDkApISKZ8Q64Aw+jgWF8CkDuiE5jUgKUpjIty3w4TpdVu0W4ICBEb32rlXm/Sb22Vtfea5ji7v8pIkJE7tG1d/MhZQK4y/53LzeDcKYI79qatTfqpDC1l6Zfm3LtBHU12MhhXARV022DsMJcT1tYI6to6utte/ra15cptgLGJXE+EiGGq1q4gZu7YSABmkOoW4QhGCAJlXI4nh4f3n04P3yYyjE83GiyDkJKkdwMECQR8rAuAFXrTWutXXvtHnqcypQKiJQyvXZZY8shcLwLtArTw5wfD9NpyhNTWOvaXTXUwJ2RpixCBGAQoV0RIY950F1QCkjuRMSHqcgBPJkkKowCDhHQa71EOx7AtCQ+HQ7TskDE2JMtQoyMODFBCBAlkaHqZMS0O7WGBZq5qqr+NSTmtQ1DJCTwgNvWni+3l+t6WnioDMwCTMMMARKnVDLRhEj5NoDStva2bZiYhGiekuo0TSVnZsSw6M1uW11bcwSe8kI4n0+Hh1OaJ8OoZoLBiMPXFZmBA4zBHSIwYg9NMw83sADiCAhJrzwSVf3y9csvf/r506+/mPnxAWfAwIwx0uoIINxVtbkrZylzLkVyTjbl5TCdHg/tsnqNDWq4g6OkQezm+wswRDWYpCHjcpzmY+YEAGpdraqrDx2HmjbrHjalaSnzYZqWMiVOgvQKtA5EnJgTi0HIm+UQAMAJb1Opp3csS0kXW6OraJopPU5iE39dcTKeqah3724N1LUILoWOhRYxYSQV5LK8//j+p59O7z9Mx3OEmAMiMqAwEYsEkYgDMlIRjrZdp6yJonpzY8ACnMLQzdRqj6fn9ettazzRRPPMMM/4eEYhn0u9bi8vW0NCsDZP8R0SA/d2NSACzDwcwiXskPnjufz2w+G3H+Z3D0tOu/xorCpDuoLETCLCSLwD7z7QXUTcbZ1wlxqCSBpmxGYJh9cTgJn33lvXjuCBRJQzpMRbo95jnhMvBwo/vVuPD4XT+OToDq1r7+1fZ3IgM+ec53lelmWapjFcuD9s0fp6uz6bNmE8LmUuidG39aWvIJLCdJqmxAkBWKTUMgzstrq+PL98/vTpj3/85//2D//w93/33/70y9fbzQDyYT6ig6719vTy9fPnh3eHxw8PZVkeMbUG12sDx3/+F3u5rUPRPRaswXjxV0Xcn53DPpr5V6EYdx++zwAwkhpHiOPb8gLeVDzj9//DuMtf/QDwfRHzGt385nvALHo3IiNEoiCKMU9xM2366evXP/78L+/efzifH5f5kEXMg0icGDxUW9r9fLX3BuhIFBFEkhKNV3W4LESEqro7fDc124sYZh4lXXj03iGgqyXpCKCqTXXt2s1ZpEzz8XheDkf49bO9afjvjPUYNHNTjQBmkRzcmYwQIczVfJQvzIy7jAwJMICYOeWS85TzxJzdAxFUm5ta72YaMdzEbZA7AYZSYUeELYYeNEZwMkSIJAwkgnBTrTigHbduHgGc0jRP0zQth1MqM3KG1xH/mAbYoMpCU48ANfAgRN75DfeWbuD2zDJgpFHzBiEhDAYcIWJKZZ6Pp2NdN9cebqfDNJdMg8Hce2feKHqt2mu4EoUwOmISmUqep2kqRXKCrmCAAb579e5BmLtR+ZCS3Ief90PVL2qftw5Uxf3GvUMGQPQefav1ukKo59TapvWm28X7ZrerXp+wb6XgsaSpLATZgtQMW7PeAY3IgMWQ3UI13E0DFCBl4TRPy2k5PizHB5HJ1UsP1O4MQJjQmUXSREhoFmbRuyKHBpEQpwQyzefleHCSVA74Zk1mHN56zhQZcCn8sOSHKRUCcm29a2/j02CAMJWchMkNI0zVHTSIA1hk2FMJcyZmyLRMmhWCOpJjGFkgM7jr1rfL0/Xpy+3l3eO7d0SAyGhh2odfL0IkEWIEipRkGNwQgAwKM44AbMC/WKvk7QA7Rkw7YQSutT1fb0+Xy/lAgsP5LdDNWvXehbCkwokBYJquV7n2/rzeKrkiOIWXkm7rYTnM85SZCQxb09u2bU2VJB+OfMD5fCzLAkzXWrv3LJiYkhAjE3GkPah3MM4hkNxNFW1UdhpZaP7mE9N7+/Tp119++aWuVyRurbKke/w04ZAFgXdTA+PEzGit6o36rbpGntLhYfbmibnf1LsxoRBJyjy86vcVFGrKlGF5mKZjAnKzCMNwcAfbE0VpTrmc8o8PH94fH8/TkkXMfDDadlieOTGzJCFWNWiav5OLB2ufwd8fc7zn/rguq/rTfLDDQ54aRU6Wwk+5MKJ3aeu64ToxHGY5zHxIkoJLknI4fvjDv3/86bfl+GAkbhQAxDLWZGQMIIpgopQkYfF5uiylTWKNBZTAWNsW/pXdM63mf/r68vlaO3eZ9YSQcyIp84EkTUartedWLc0K0wR3qijCzoNHQAckRA3wbkxwnPPHd4ff/+bxtz+eT4cpp8G4dLdQ1a1Wd2fCkgsy0fA8xVfcEglRhu9IDOtzvIslARGFGYmHattdVeu2tm1bB/WdkBKiCChN01RCCuf44fenL89LmXmAOxhBOKwW7xvMqw6WdmPNlNLxeHj37vF0Os3zxCx7zSsyTcV6ff76ab0+m1UhILTe1LQKMZNEQG+61cZJeNvwcrnWqtrXdX35+vXLp0+//vLl6eu1bTpM4YmQRRiRW/frdnl6eXq+yuEgCi9fXrzpH377U5HU6mZuTd3NQuQ1vOC1hvx2jGDmVzsU+o5Gj3fTbXevtY6dO+XMzHFXmLuZ3qGXQd3tvQ/J0isP5n+smvlLmpHZq0bhNa3h9dh3oBijGUT3QIzBHje1tVb6+uXv/uv/cTicfvrhN+fjQyYKRIcxRqKU0lRKm6a2la71cnlRMwMiSdO0DMbPSLsfb20EmOmf1Wru3vvQdwMhusaG2yASMbGINNOmqmpALCk/vnv/7svnf/qnf77TcccPsVrX23pZt1trLQKRBAnRdymfSIYg7ZuHE/rw6h+SzREpwMyScipTKQuJhJm7d1VXdUAkdtslnwB3ahIAIPiIlTED9z1HZZT8RADIygZmvcdgC6om88BAhojz4XQu06HMp1QWvHveeLj2Xre+SXdHiC2+jQaDiZCchJlIaPDb7m/WcPQdPwTHJRhlFOZcHh7Prp2ig+n7h/PpvJQkEBFhvffw3ltz78KQs5Qi7j5nOi3T4/n47vEsU65dWzdVs7unzV62DcON6K6tNcolv3oRRajZs8GvW3Og1NfG4pAxCF2j17pdr4IuCL3XenvZXkDrzbaV6lYEHg/Lu0kmUld92cy25hBBlCSEgBF6qx7YDWyYkANSMHLhvKRylOlAmDwsqFl4U1MOz5nyVKYDIfdtC2+ACGARjFzmw+NS8vnDD9Ny2By4HF/vCwImpkHmEoRDTqclPyzlkAi09d5r76aKoQTBhCQ0GNNOoApbb6Duwe5UQCgRc8lllpxcjLdQ9Ygwa12rE0rK6GTu6/PXP/3jf83M87JwkmleIqKrtlZ7q+CWsmRKQCEphburMWJmIWEARHcIyCWVXN5KEWQ8VHeYCQGAED1iq/2ybs9rvXV/OORcyAkAOcAjjHDIuSSlnHNJkgFZ1bRWt07hrScHqF0vI7ICWNVu26YOmESm+bAsh4fT4XhOOZn31vvQoRESsYyQHR/WKbtHEkZ4aA8LAEBVLEmW5bWIMbXn5+fn56/gLim31lgaEIsI3XHzsUa4Ow7VQN18A+wmSKWUcihtbdY0LAJImDKnUiZiiiGCVyUiEkoTLQ/TdCrII5yAAcSjm4M7MMlpOqSSfvvuh8fDOTEjQB8Sr0HKH6va4PUSM2DknEVe4T4CL7g2unmkYJdsGaI/JzDpLaq2ZjfgPh9KkmQ9rcTelUIRkYglUaYs2Zfz48OHj6d3H2SaUTIThw/qaJDgyClgA6JgDKAQjiI4JVoyCTAhjyJw6+qX66Xb03W7rF0Z2EARSklJBjM5I3tQAk6cC+f0turH1y4PMHCX1R3n9PHd8bc/nH/6eH54WEqRMWQdbaKq96buHkxMphbiwa/vIIRZAHiQ0z1aZegL9sU57kkbEWbQWr3d1uv1um2beySRJJxFeOcKc20oxJyXaV44J2Te84MQ/rUteKzsIpJzLqXkPCz43B3MdvguC4IdMlmr5N7Afcx0UMb0BFWttkYbB1FzA8TW2rZt6+XycrnebrV3HVXZjipgIEQyp9ral6f167O/f2dBba0Y8f7xgZB//vlPl+tVX67qCpDfbLF/FYnZb843kPB+vBYQZtZaG9UJ7TCDq7upGZpEDPBpn6D1RkSllL9kCP0PH29ZvfCvTqzw3jzjnVm6VzsaYeGX9faPf/zn0/H8h9//u+N8OMwHkeIBgJiYs0hLqZQyH5bB4hjj3VGnppR2pxzE3bIPsbX2+sEGJai1tnO9vUFEwzYQI0Ik5pyzuW91czdAcY+UcynTW/MeAHD3dbtdr5fr7bpuW2td1ZAg3IhIOAUCAHMfPREg+MA5hw824TBqFpGUcmGRAVHo4N3ck5OG1zbcmUWvrey91QXYZ4oIAe73xCQbXBgjcnN3oJ1XxVymCRFzmcs0f7v1AWH7tAVA3S3CR4ORk8xTOS7LVPIIl4I70fsb93xoIUadgRSB3T2LMLigJ1TU/nA6likhhGtrDb0roruqmxLBlOUwJTWfs8xZ5sJzFs4yCnRCMN99LRDxXtObWfRQGBli38AYD18Drr0TUTE3JgDlIAwP7c1URSAxMobWW7uFtw16T+Cz5EMphzkX0ra12DZzQhQRKYKJcThA5O6tA3Y0VXPSDtrDDZEycWFK7g0A3UPdNQizEE+SDuG7SN6raUNriJQPD+9Py7Kc3zOn7baGfsMuiXDKeSk5ZSmJl0mWLIVRwNS69TYSHAgcvo2IhzGJRAR0VAtv6kEBnfPEkuf5UObZW+db77V57z0gwC0CfR/Y9fX69OsvOeVyODTz0/kxZTHtbg3cR/2NBMO9x0zdlRBEiEUiDAEIcfDwvpNYD+34eJlfGabhXs2utb80qyTp9DBPmVc1yyAE5PfvDAJMkkrOKSdi7qrWexI2h61q666uSDSn4gDrtgFwWabpIO8e3z98eHc8nShBbReIjrFTpCFS2H1iThTEwCMPyxAZJQACc+Io5XDEu4eHu9dtq9s6CnvV1npFFsSxx43hP+9wSgSYQbtyj0zZU25lSjlDCk8GEhgslJPknDIxWxiEdzM3dQ5aeHrM87k4BFggCzE4NAsCiCz53eE00fTucFrKpNbNnBAopUIoxCyChCM+DQGBsWR5i8Qw2aFcb/jpl2u9PdnL8rWqWD/U1fp62/T69fa1FSjTNC+ztRmNLs+X3trWdMtsU6aUCEGmA0lBSswppZK4BKD26q5AdxkkO5FBNNW19zVAhWNKlCiLFCRxoG5xWbenrbduFugBvbWXTxsRTklKzlMu1p0dCtLEUt6Yd8FrBRMIgBaQCr075J/eH//d73/84f3puKSSR2YNIzECqCsAEjLQMMVis1Bzltit3t279g5q5pySiADxEGoPeAHc3NRC1bxVvd7W56fn9XbTboAQOYMLmDIzpWxNLp87pORRtu2IOPPoXy18PCt/pjm6v9YiO+RgZrfb7Xa7vW5sOefz6TQXEeq0a/PRzQECUYgykDiAutfWgrCpwXV18NaaqbqqudPdLIGZAd3CzRUJCuHUNb48xaev6fe/z6kIQBBNuRwO/vB4Pn79+nJdrXX3iN27djfyenMu43ohwV1ngDtJFREGDybuznLfghIj3F173+rWmyJhcU8pAdKo8nvvzDw27zsX//vK442s6N/QW+ObY2+0iGD/bPdxw5tnbLdEu08i7ltyBAQySUrq9sunT3P5r7/96X+fUvqffvs35wdhYCRA5G4ydpXj6SSZ01RqrWqOJMNAT++xl0NSPtCmAQ6NkWLvfZw+7NGh7V5F78M1Zo4IMwMIVa5ta62bGb/5zABg7rfb+vxyeb5cLtfrbV272oCUBJFz9kAkcYvWutsoToIIwGO0gMwyArRk7woczZEDAwjDh44K0R2GWS3CznzdU8yQARDDEd0s3KxuPUIHFOc+YiYjgIA79cZ129bb9VqApGRZpom/5bzCWH2/eRQRAkASPh2PHx7PP374cD4eh3HIoPGGDwPYgWgPVCYiwDya2tba7XZ7yTwLLoJW1yknAN/q1mrX3gkQ0SHCARhxnnJ4qOpcJDFwGFiPjtG7q7oOLG3nhQEgEKiHmvZet22tdftW94eDNwAHv7lbGLgDYQKigKDwLHQo8rDMxyJgTSuiKxMJp7nMWSYkcYDmfdtabyoiWWRKSDhYJimXU3ahhtA2N+vVbi/b7aVaB8IkksIMI7ybm0MaVncCUEzpcmmXL7Vd3bsL2+mYzu8eHk7HnJe+1f58ac8voTuWyUyHeTofD5xozjwXzoxg3cxi8H/jbssGw/hzmAAgUwoBZlNXVTdvGiBlYSnLcj6eTlErPt+261WZIDCYlNy0ByJ4mMMGl19//nkz/NMvX959+PjweD4e52UuRRLzcJYadPat9zbY5cIkSdwBmRhw+MS8fffl/buzqW5b7b3DntNMwjzN8+nxcXl8Pz++n949lsTGW1TwAAcH2E1nESAnmef5eDz2uvVWO2JKQkwO1Jpe1xsC9NmJuTYlCjEn4LnMp/lYpgVQe6dwJJTEpaRjlhJugOBjZWICcLMWYDHIRIRjAeZSXjv+MavuvSMmd1ft1BtxIiISGYaGRGwGu7mRG/RKSokJhHQ5tNaen79iCZwCOQAhGK1gMBqQMqoDihzOp+VdyYdMhcHGWIOws8PqwERYEsssM5U5T0USjPMYQnoWZkJAB+9q5o6IyATCzN8AMsRA6Z3qlyqX1SpvreW+dn1ZYdtWff5sn+EhF3zMWQBTLwpETe35oiX8kFNORbJAKkESQeZAFgOSjtfBwZCMgw9k3U3NeoQRRiJk5DwV4mzI2L1vFcmZhd2dMCK09wgINddAJ1dHVQ4vjFn+uvEIAgjjYSm//Xj6m988/u6nx4fjguiEAEQBNEyfVN1sX8N2QUE3oM5MRDHGFto1dlbCXuIODTwCjVQKs6baWu+19rZt1rZQHbgZuYNbYACDkFO4N3XlFrnXgliYk7r5nVLzb2y0rw/e+FDbtpu95py199OhHBdKdAfRx2OAJJKJEyCpe1P1itQ1cAyINdwR9pdrb3R3ko2pNYdIAFkVn57tl0/9l0/RWn15cZZaq5kNcAhgxBKNCsZfj784Cbp/fTdoHmXZUAG01mqt+MYzt6tutfXWR/lj7iMuKcIiLAK+xZz8/+54rYJ2yfP3SAy9Od4WMYAAEZGSt7bW7dfPv/7d3/+XJedjLllE0gQiSMiEwpxzFkySBIklbV33rRQAhvPxQGVezYLHNRl2f8x8uVwQ79Y41gNiRFECQoS3rm7ubhChRrXW1prvhoH8ejaD/qdmXbVrV1VXHQYgwx8Fg9yRJbOBm7nDHrmLwYREstNbv8naXy0lMWCPtQUkRB9Ax7crPCjeJDvurWag7t1MzTXCIsJ9jK1wN//REd+3yfXKqRCAyBtXaAAeEaXgADYqv5zkfDh8/PDhh/fvfvr48XQ45JyJeJgKmdsejBqOexA3RISaN9Wt9+v1UsjEO7W1ko9ILUYEJiYK89aamyFgqGdmnLJ7KlMScvDm2oBxOOu8BuTxcFkdJJxXQMb36f/r9WEYvXWPoB2DM6QgRGTCPOXHpbw7zscpcVhoJwImSVJSmohSAHfX5qAWAFhSmUsqguHm3QMIKAE5cgG08NCq28u2XTZtBo5MbEhu6r0NrzTf1Mk6ad3o+uQvX3x9cYZ4eJBSyul0PhwP4Fz7tX75XI8n1/76KiXhkkUSFaFMyBGupq7hIy79ewzKx6pLLMLEWcTdazc1s9pq6/fVFABAEJgRCw1WaQNroQ5OSATgrV+fXl42+/T5+eHXzz/8+PH3v/9N/uH9oZSUuWsfUCYCsLAk4YSkKInNISUJohEH9h0S87d/+4fb9fbzn/709LWpDj9Amg8Pf/OH3/7t//If/sN/+l9++4cfjguHdmjd1UzNwTx8RN1GeEpyPB483jMhE9X1Nvw53F3Ne3d3Z9EUA44dD0/wiFZCchjOiZFIlunwcHyXZTJtDkGJAykwWt9qW1U7cwyPhF1WQfK2r4thRDiyD0xNW+9CSDK8qjgJc5jdBw0eXaFXBqQ08/lsAF+//nq9UbhZUw1wil4QczihGvUsKZX3j8flKCzdwjgdKGdw7rY6JoCekgiA9Z5w/LdUUiaAQTdNIh5RWzP1O1OSHIHM3nqqGcKKeEG+QLoa95rXlW6Xevlcr18/rfpU87rIcWm1mN95T7Q17c8Xu6WJE1M+SJqRDaib21q3zQFXJhbBtLtJRbiCKd8jfzECR8xDBCGKMJcslHiiNC9Yth4vtrYdhGOOGJ0cWjfrzdpG3lIC+X6P2RvwCKZYCn98mP/973/4/Y8PyzKN6jsQgciBrId3662721BTW7iZNa8Wxogiw3pRTRWJEIiAIQx3z1nadwFTbbW3rfdqTdFsEpapeAQCII1GFZPQlDklCvbm3jq6JoRMlADqmCbtH//Nubz+Otbw1tqYCLjbkP+NnZ4ZmTVLDglzAwgRSSyJJOXMkgHZArqZw25LFcPg475suCmEEQEPrYh5NzQwRhEDfrnoz798/bu/j/PxU+9RJgNUgNqafy/zcfNxyfytoifGGg4ACK/+vLR7qrbWnp6ebrfb4HK5WyklIswd3FtrrfXaGhEys4ULROAbl1/c/Z3uX/jd39wtTf5qsfI6o3mtSN6iX/fN8bs53/jkxHeZ1d2ZBgAizACByVNw0tt2+/v/+neZ4MPxVJLMh3OaZkoJw1MSoMnBEcdNQWLrqrXWiBjqs+PxKCIDlxrXcCQqjKnTXf8yjFmcCUvJg+vt7sMNunc1MxxFTK2qxkRvYwcQMeUipRDLvoUTCBMx7VvLiOzYbaX2MnovkgF5IDHCgPRaXsfdRUbNzT0ACTGIACJ83Kl7gYgDlWAKsIAAGAlx7jpM/b/dTAiMYRqs2lrdVnEHpDe2sECIJXEWIOgYkSUtS3l3fvz4/sNPP358//h4Wg5ljJOA7r6Qw0LXIcbTOchcYB4z+BngmgXb2l+erwBbV9UOATklybnk0nv/8qVttY15IANxyUycMjO6a7NeQQiJGZgjwkF4JIDRznfGIEACJhrOtN/uy2A50Fihh4Z/T23EKfN5nt+f5ven+TTlTEDhRGOuV5iygzQD12gagSyJ53k6lJQJtLeG6BZVoyp4CEIK72a6xa2+XG1roD5c7FWb9hqBYaG+aqdeYdv4+qXdvvbtqoeCh7w8HufDoeQsuoXf1vVPP2/zbK19e8nQEE0AEgI7Rrje61SFsLGy39+ygWYpAyMyQUkMwENPrgp1q+vttr68ZDfs3dqKaDIJATILuUINj2ASDHHHWvvL5cm+XL78+vl2uRzm/HA+0ONDysnRLDhFmqbpdDweT4vMRAaUSQLLXJBkWaap5O84Mf+3//t/vF4u58f50y+/vrxcWldC/vjx49/+p3//P//tf/jN7357ejwl0r5eHLCb1bo5rNu2cSoJGDlEpMzlFOfhSLOtNwvtrbfaVC2lbKbDrI45DXEDBJiZq4V5oIcbgrNwzrlMU+bSKTycEvnunNtqvfW+SaYMA18f8XPffGLGyribJbqPKgZ7Y2L3DIBEQiQRfc/gG7bSahA3Zpbp3eLHaZnzlM3VyD3c2TVZCBhGcICwLOXw4ZAkbi8X3zwXSYmZxEZqqAPz2FtZUJg5MeOwq2BhIhib0XhL7++HRzTV9kb73jz+uNV/vG3bYWkbtBqXl/r105enT+356yfDdXpMObA2rVsPEndDIou4XrbY6sNymJfD/Ji4LHlagOT55Xq5bNfbLaf0w48fHh+Pi+z2/4SUJXmyeid2jg0IiEmS5AycE8oUAMS3rW/d1lojYCqFOUFEqLta39ZeLym6lPKXSAwCEHhmXI7TD++PP308v3s4RLiNRCHEMDPTuvZem7UKGJKECMeLZe4lCwOUnO6Qs2EEoRM6YdCdoBi7b6mZqbaqrWrvFDjnBIVhD2gKQN+BSkIhJ+xINFGaEx6X+TAvoVvf+SJ/hdvxisEMJsTQpET4axEzevSIeLXYRxxQf2IRloQsgaQeoe4BRD5cX4e1FIa5KbhBBBEIkRAahlvXCMUIcDK255enf/qn7cv0GYBOZ5lmF1m3bbT4r+vVHYn5bi4WEarWuyYzv498Xv/WzGqtl8u19x4BKTER1VrNHSJq733fHbGr1tYQwE0HfWE4Iv3lRfv/60FEg1W9671fqxhHhmByRLBi3vXz0+d/+dPyy6ef3z0+AjEQYhgQMaMDuwXsFFke27aaujkAjGF8SsnuKZ5mZuayTzl3tfj4n0djkHMeXjKq2hUjfIQwAGFtbZgoppTy3TFvnIjknHIREdqfq91Q6m5lguFBGMwoQmbkI74Gx9BcmIRJ6A6OjShxN3czv5cjI5QAEYc1IeLbovAuuBpk1/3ReWtde//N2PbdzHqv1SMQ+VXINn6C9aZtA6c0z8t0eH8+/fTDxx8+fPjw/v3xeCgpMQ01NY3qKIAhBr3N8f7ABsSwbUUCbzUBoJn3Zq32rinnZdnjS2vdtm3rtWvoELwMgAkQ3LT3ptpQBRMykhAFQhL6VsRYYDDmpJpSEyJ+PdtdJrPnFAEGMiCBJ6Kc5bSU96fl3XE6FpkE83DTHXy9EV8EGO5qbg5IQoC7AxYO5R2o+9asdfPR0gGYuXnt26a1We+mZt16a71VRHEA1YAerqaVoTmbJq8T02nO57nMwgLD7Wq7ff56Oz+5fqsvR+z3MDKCGLNuG7Mjf8VgBsEaIyDMzQyMiJGSIKBouCGEhpnW9Xa75mxdwsOVE7GUINdBJ1ENMwKCAFPvzdtmNWK93STzp18+vH//+MOPHyWJBksIAs3TfD6fTudjnoQbpSIENNvMJMs85fztfQEA+V//H/+pbfV//vqHX3/59PPPP99uG7N8+OHj3/7t//Kb3/32eDoQ4V4RqG5rfX6+phQ5P7Jk5pG7CyJpmoAIcpJtu9VtXdfbihTuEaiqREjE41e+oxHrtvIswOahzDA8nQHNsDuogZqFel9bvV2fr+sX7TVbQlwKJJZCyMONd39hXr8iIMDCwJSou0uYEQKzEJGb9V5b3bRpAIKDtxVQ8gOWMk3zsUyHrTWPTsLB6OiD746I01R4IqPet+3XT5/apmXReT4v09FbN+vmGhZkhBFMlCWVnF9lOs21ttbuBkzMBIjjUjxfry+3b34kt97/t89f/vMvvx7mTCHrun39cv3ln3/9+qXWdp2XfF7eT9Nja/jycsvUa21IQMy1K1T7+nx9fHikNB3Ojw/vPjSNT5//+Pd/99/+4R/+27zM/8//1/+apz8sS5YkYQ7CAhQKGydGJiQABhRAQU4oiVMi3A3DL1N6uUS9vvTAwzIfDrOQtK09f/na1ut2fUI2OfM9LvN1sQMKTxHHLD98OP7mh/P5WFLCdet9N5FE633b+tOX23pbQzsxTFMmJg3o7l37XJJEwHFhSYgB4ITAFMJBBKMAG3jP6PHdo6v1pmZdWJY55TwhSQDoiJNQBYAwcwTmntkfZ2oP8dsfju121n7dtnUMLt5ulq+owKDCjCJGdruOHWI3cxEpOQ/RNQtANIid0IPEyDzKFh3jf0ByGMzH4VQh5GE9whGCAUWopOR7WE7bwColzguoPv3y6xPDr4jTj/39b36LxLfr9Xq5uvvYxeOuSX67+wBAhG9tu61rKtnDYh83AMC+HwIMhrW5+zBRG1YxIwYy3MafaO+3201bozEGRdxDJ+9MyVdk5RVcGbcI35ZUf3GFX4/X1eq1cPzLfzfUdiPjXFLC+8ZMAGNOYRhsBACNaq91bevXy/Pzy9NyPKWS3QyFSQQRhjRD1bpa7621tuNYqiMcavi7RwzTrT44r0NRP65VzrJHDhCOufWYj7i5qtZWW2sBMEZ1IlxKLjnTm46fk6RSBtlLUhqmAmbaejWLiEHxg5QQiVWjNdvlecR7ujYnJobAAA830+5dTXtYD1eAHW+AQZ59C6AB+J4kFGbqbgD+Wp+93sc39+Ke6WLdmrtjreu32AHTy9PTy9cv8zItU3k8Lj99fP/bHz88PjzOc2EEd4VwJyYMQt6ZmQgYPASh9zpmTLDD3bS27Xq7vbys12tdNweYl+Xx3bvHdx/KNK/rrbaOCHXd1rXe1tpba6qsVBKhUO+VszATkwQCEiYmHoaaAcFsjC6I4LWt8sZFHRGFWQCICZkxkAMYojCelvLuvLw7LadJEgWFJRFJFKMdjA7BFILo4AoQxIyAar61bujatbt386222nq4IwUTOoG5mfbWat2qpFRrrdvWW5MEAWHewzpBFeOTYJm9Yz8f07tlOSUqHuCK3XRr1+f1etnM7sWlx2jyHMGBjMBxzK89BsUfx7UZejHcrd7MjdSIs1BmCUYQw02ZorV1vVKxPgmxIJVJMhm41dXNnGTvyFTrpq1H+E536q3++suf3r0//+Hf/Q2JSCRAZPRlnh8ezg/n01xKSi2XHCQewczzVEpKbw275Xe/+0FVP3589/Hj47v3x9u6JUkPj+9+9/vfnB8eiGm4IAFAWPTet632Lut6W+qWi5DgeMSZKOcsRCkxE0GEdWu9c9OI4fc3gEoCRIvovdfeSm+MIEIpUy7CmYLMAYMMwgBNfavtZa3PtV1Me0BiAdXJPXPiP+/3Ya8iHRzcwRWVVJO7jmRXIvHw3tq6ruu6mZ/MIWpDWKltDiySU5k5XxkdmIIwAIY5DTKlzMSgvbbtVmtbt9bssm22phW61fUSXRkThhCyMOeUkoiZdlM179pr7x5+vwgjLrVfbtfny+W2rq8dzKr6Xz59+X///OtHwTTners+P12//Lpu1TnTcTnOh3MqS2/e+00IVN3ckXnQRXugIUtZynws8wGaMSdzv15v5tpaDXccgwDiWuv1drs8v7w8vVyva+/mgUAMJLCP4pEIBCALHaa0FEkUbiHoDI7Wra7b5bleX0IrCyXG/Bd3hgAz02FK78/Lu/MyzYkIzLVrR0D3WGt/er796eev1+tKrjnz+TCT8KZRVau2Q0kCDmHz4SBCYRYEiEH3HjLuSW5mbsMZcayBZoGBYUKRsgCzmqta7wOuGD0fCPmcXI/0m4+n2+Xh+enL5YW/3/f/yvEqyRGRXaPp7m4RMcjUkhIzDF+w4d8aEX7vdhDRAcMdRniumbtReLCja7iPhFomSiJmrhoQVl1XxBoG3a9bv1BsJbP2Qbtd13UdzxLxWLD+KhvG3WvvtWs3HVllO3R8/4a9Cb4fZjZ2btxd9vcro2bbuinTgByFCYZ7xv+Qsvp/7CCCxJKTZJE0CCvDtQ1BAgDCwIc5KIR772r6fHn+8vL19O4d52RAJJJK8YjW6jYonbW1uo1pRcRY+rbWakrCibOLalPb+SjugYg5p4BS8sBQHMIiNAIIJQlHLr0rk0Q0NR1QFiKWnKa3gXajDByuMyLEjKqq1rW33lzdHXIpx9MhpeRgtdbn50vbOgDcTZUZd/nloEOpa9c22AKvdQkA8V/eI3ePwWeK4Yb3Gh/ydib4Vsw6QkXcTcOtq4/0pfENqvry9Pz05YkB+T2eDsf3jw8Pp8NhLsQIw4pwWJUzINCd9T301fiqMYQAwBjQRO+916qtunXEEJFpmc8Pj4/vP+Qyl3lpvbHQ7XLhl5du3lbrXckQgaVra41TAhSUO8g60NLBBkVAxiDJKTHLW4nlON8hoEUaCQQg7lngPKeHJR+LzIky2CR0nLIk2XSQTJXQipAIYYi5dwFzHKNkc9XeatfWtfXee8cAhBAhCEYbRBJ37b21trW6tdYbCSMBhoN3tMaOM0MuAQLHIy7ZEzi05g7WBpMOe3B8M4X3MAvTEMbAV335mC0PERAA0O4iHzEoGghqKgyIJIm4JMpO3CPArLe6KiPIlKcsWTCRa+t129TX5tqcA0O9NmvdFciZOdDN1tu6rqvHnvQZAAQ+lfJ4Pr97OB+WeVprTglIojizLNNU/gyJSYmYkxCJUJmyquWU8jQvyzLylxCAkJUZAHxn12vXWvuWWgKKkaM5YBBhCUk5pZ5SShlw3erWus7zTETDhiA8zHaJZu/KieZ5muY0H6aU2cEicEd5cSQlrE1vHt1Bu9nWULYrMack+CYz8dvTOBoXt1DE6KZdTXlXAiYA6Npvl8v1eq36QUGsOXi15+eVcwRKKrlMjqHDgXq8X8Iikpkpoq5rry3lbI699e26fl4bdE0ARdIkB6KUmJJISQmJtq1e13WtzcKT7MwYi9jqetu2db1dbrfr5fqWCb/1/g+//Pqf/+XXPz4/TVPybr1b65rL/P79xw8/fFjOJ0np2rbWG7h5hCICUjnMGaAcj3I4punIaQbgqeTf/e53ZgZuRPFwOo4UOwZCoMvl9l/+7r9++vXXXjdva7TV3AOJmOEuSsdwA0SMeUqn4/z+fLzWTq7b5amu9fL08vnXT9puJdNxBx/o9cYMmhITlZwOUzku01IyMwWOkaoiUt3a56+XP/3y5R//+dPluiaM05zj3ZlFvt76rbaq7ThnRgPw9wSlZDNH3A3dCCkghuTUHFS9WzgQciIRUzDt2+2C4fvWkAZQj4MhjBBILMiZ+TSVH9+fL5fLH/84jbT377l98Nqt+t2BbYyQzPQ1HX5we4mJJbEUZnAHi26uYIoMYEDEKCIsQGhdtfbemqshOoMHO4GB22CWMGEidhFBJNdu/Rr2bHUC3FSjpDLN83JgST2i1dpa9dh1Ha/Hzu75RiAFM1WLV7dYwqHTjd0gOwJg7IqjBd+tcdz9TknZZxytNQVAjCSSc8rZAb5JnvZl8LuL+Od86e/+9lvb/901f9sTw/cHImaRKacpiSR53WsJYdRx3RTDgwmEMSUCuNwun1++Pt5eICcPIJFsPQDWbdvW9Xpb13WttSHANE2EZGZ1W59fnphxPiySlnAL8FZ1WAgnkceHB4sJ0cxu2/ZVew0DppKnB54OsVCSSZuZha6XIeUKt5xHSNa3k4qAQBweDIhg7rX1rm2QfE1tntJPP75/ePcAFJfL9V/++U9fPj/Vrblb3K+dByDE3dOla6/W+5jeBhggMjvSHnc7As4i0CFULWIQZnxI14jAHe9JB9+Qm6GMHuprdLeAVltv9XX2pN2+fn3++vn5sByZ0ulwOh0OWRjBwvzOluLAwagaoy16Cw8NUR/sUNeOoHooYEiiMucyz6cxeDid8zyXZQb0aSqXlydJuTfv3Vpb3VwFu1qrHbkZCDkC8sAF2XzMTcZp4R7gQN/tLkMvDjCswimAIQRjYjgWPmTKaAIxCR7n9P7hSCxfb2ttRqGJ/XjIJcuaHBGbk3cPCnWIrq31tbWtjZAEBwuEyCKJRR2nKSehMUnuW9221nsvc2FmCQfwFMFgSj0XTJKXRYS6tua9u0M3NAScj7Qc4b5ADf0muI/p2MjoGGAEocOONQCFI7i7U4QTGgJamGMAM6cylQyUUq/doqt5DwzJaTkdJacGple7dH+6tZeXLdQmSeTYLbp6dQUipDJ+1ogv2KHaYCfMuTyezu8eHh5Op+frJpIMyURSylOZS/7OwUFYmBxf88oiIKfEnAatDJkwYBCwBzd9DGrH7jY8LyIwEL7Nde733SOGMtb2zXlM/yDCh1ZP3XtvydIYK+aSJEv4rjwCggAgiUB11EALNA9U67WukmSy7KF/TT4yatlwNx1RC6aJZdiCM3NXXa/Xy+WydW2eXM262udPLc0ekKfpCEfacF2r9R1Z3YMjzLqr1s20jeF1hPbWbpeLtz6lRDNOiJJkoqmkDABd9baut3VVdyIW3k0/W2+X2+12u22tttYQ4nWID3v6a7tstW61CKADEVGmOfNyXKbDHBittbpuXftg3bCkaTkmxDnx4/t3x/ePUmYHas2mKb179w4h0M2sP57PWQTHvE31crv9yx//9Kdf/pQIEkWGQCZEJJFdV6C6yxAZ57mcdTlfZ3Nv21prXW/r9eWy3V4wlMucORfJieWt7wgBCvE05WUphylPWQhpjNrdAilq61+fXj59fn56ud3WLWGA25ITsjxd6nVrTbXXnhOI0DQVRAyLcUde9z9z7xpqoDZmz7sj6ZhG97oJoU4LS+LEnEgI1LDDMJ0SIhaiKeHDoTwcdl5P3C1o//J4w5CNUbW8aVVxlyojESUiIFJDHW0uWaAisaA7u4Nj27a2NW2K4UlIBIkH2yv4zjpMQgApWCAYDJv5i1nzULfgcno4PT48lJz6VgfrIijtzIZRb/21yU3ECNy9n+HYQXwnlt2xIrizR1+rh3CHPX3wLqEaJz1wqdZ7V0Xit5GTQ2Y1PG3/aozD/8VDmAa9YiQg0O4HCzt/PSAIchCKUM5EcFuvX1++vtwuPE0OQJ1r7+Gx1rptrW3VtCPAcDQJCDUhgtZqrXU+HpgFhzt9r2FuXUvOp9MhILd+rTWIOsRmBkxjYHQwQ+2QUkmSkiSVrtoIMSVJ6XtQxD2GkMl1162PvBTr6J4ET8f5xx/f//DjxyC/XK5MmBN/+vVzbaPNVHdDp9dkJTdV6259RPlF2ODLkgcSDcrWyAm1GEG5EffV6Lt9fAwXYC9n6Z7N5O6wU26a6jdDkogYydPzNJ9Pp9PptMwzE4Tr4AgDEtAI+KXx8wedH+HV7AeA6JUHrqq1tdqbhUkSTnw4nc+PD/PhUKZZckHi5XBwU4RoW3+eLiJXCBgs1N51q82RmiElCxwBhEg0inWgPSKI1trWrbb+XUbPm8iOwAiCyASZYKLI6AKYEecsxykfpgLEl1qNPAkvczqf5nnKOUEA3nQzbxEjPGOoZcwHjIYEaIRYJAFAMxABRAMYci0UEUtJhGV4TblTdARlbiKyLHmaKCiaOQc4oAF64lhmmBd4Cyy5j5BPIRIKhthligTwOp0fkIDHiPtxdN8lioYYOTFJTjmvW7u9XNxVvRkY5yQlV20t4GXrT7d224wChuGsBQ75WYSbspsh4t4wjxomAghSksNhfjidzsfjcbk6pR4hJMM1m7/nwgpTcgBzA4BheQrBCEPfTiwEgaHuEaoGAfM053KY55KS3G1s99QxiFCz3rtqb62t29Z6g2FeBzhMUeJuEuoB7lZb4+oycUAmQkkMIQCDJReORIqciBNiAwxHpECrVrnJ1mTqJ3cf9eXwsNgfesTwAHDzOwEiItHwqSpmtl1vt5fLurXaM7jVrV7WZtMxP76flqUslG+i9mVrq6pZAItYxPW2hnfXDh4IFDFy3EouvQNqgAKi5FLmAxwmLF11q/Vyu6naPM/zNE+lqNvz9fJ0eX6+vLRaEalImg/luBxeW3lGnHIZvzeDQfMu07QcDqkUD3++PEVE701ymudznuaQhEiJ3p/m/OP7xw+PZ8m5Nb1cVkI6Hg5T+bEk7m0rSRKRdW1V161eb+vz9XJdt/NplpyEAsFDbcjBEWNwR6TklFJKuQEuz89fXy6X56fr9abaemtEzneWbeKc5I2OH5EJk/A05WUuy5wnSeBoCqOlAcCm8fKyXq4VMeU05Gq0Vg2yrbWq2jVitU9fbiml8/EsnACCGFXV3UZjP4LquqIO0tqwgTAAFAQNU22t1ZVFMhNxAiGkYW4do4hHAuYogllghGSPWcyrDPUbtLCvrt/k/WY+FnYRcQ9mGmOl0dkNb45BnUONIE5qyIqtqdr68tJrI6AkzJzKnIdsy+rm5sOPLwsLDcmJhwv0vm7DW80OOX/4+OHDDx/mKV/XbaiJhtjNI2gHk4ZNCH5rMvA+8hkeHgg7FYb2mKs7ErOf4D2gerekH+715pZEyjRlEYRw1da3bVvXWwnflZkRY9SSp2mapomIbJ+c7ovGn5UjAa+o9uvmsY/eXm/Bn92IOx8KRus0pAOxx4SMUi2IQIAIhAOC4LbdXi4v1/U21c2HPCtWVat1yMUgp5SX8cxLQIwZKkS01ntzJupd61YvLy/C+XR8IDouy+RQ+8sFMaZSKPzaK6Es8zHnw+VazSMgiGmeF2JEiPDuevfY38/WIbr3Wrd1W7daa9c2LDq1a070cD789MO7n354/+GHd+b9uOTTYTqfJoT+66fn0No34mHfHhyxZ895DNujfZw5RhExRrlINnbmGBRORxh3fsgQ77cgfFgY0PCxFiIRZAEHhwgfOrZd4fJ6a5j5cFh++unH3/7mp4eH45QTgJn1gRRBULALIYTcwZdBwcA3NcN+0wOgtbat23Vda1dKKZf88Pju/PBQ5hmFdRCufeTMJJEsKQkLErmCmtWmgbVZULUg8Z3/NYa2yESETMyAuLX29fnperm99Yke9GYOHymVI8wxU5B3chGAInKYy2EuiVkDMCIJHY/L4+P58fE0l5wSWeBLjaZmTQNsd7caPoaZEC1UBb0kBndrHULdargK4zxN5/O5Vs5ZkBzBXCO6GShxpImm05RzUWcLzMRAaBQ+iR0mnwvwmyImgiAYKTElggSO6MOjfKg8BwqGTo777Ntj7NrDIsYYcSoyURJhrTfd+lrXW51aKGLpgKv607U+3bo7JWLDjBBBOu5whPfeVDvs4jgKRBiQjIckLiUvy/xwPJ6X69ohdFB3aCg3vitiBvENGCNQBNxhhM7fO6fh27EXH8yc0jQty7xMpSSSO4UPERF9MF22Oqbyt/VWWwUcqLkw09Cfxz5QxxEx3Dv05tr3xQ5ojIgIwAIAGSWJiCADWACGg4W1reF1hVLOZ//Lro4AMEDDA3Bwo3QsmiOorzfqvbet1ta3rt771uqlA0EqQmWeArBp9YiuXRUCSQgBoHfV1rTVcCdg3CXJwSl5gHY1IEQeDB8KWmvdtnU4SRzmeSpzYPTWb+u6bpuZCXPJJZFw0GH+5nTJzIfj8TgVwd3dN095OR0O55OUKRBrb+buZpQEWZhTcxDBeZ7Pj8fH9+9Op0PiHAGttW2TeSpJ+HQ+ac2m3cxWa6q29W4eZZqOp+PhOE+CHBpujijD63iMkxCIOZeSp7kH5JTC/fJyeb5cmIYRkjNxIs6ShhPud5sQIDGXKU9LzlmYyRzU9vEyIJrFbetr1cGeGhPbW+2OUHsf8LY6XK7t+WW9rW2ZlQnMrHfr3dNIMIkwG2CMW4Cqt+7Rgw3ZyQPdXHvT3nJOBExEThQhbn6nKRpiiGDJcljKYZkul5Tkr1AHvm2f+3RpHzANRoJIMMu+dwyaudsw/oiIADJgj65Odeut9cvTU29NSEpOHk6C85xIGACkK7EIEQgCELEEgkPSulVV1x4Rpzn/+NMPH3/8qIDj5ro733fEoZX8K5+cKJdcJpGR4PjnQdZ/Xii8Qk0A4bv/b5hZlrTM8/F4yCK91q9Pn82s9Q6wIeL4V7mU8/l8Pp/neTazrVboCn9BaPu/cuyo+DdRN+w2sAiIyICILAwhLMzDAchdu7baayCahXbVbqqOSDmnlHgqIzjJAixjDFJvzpJYxjTNzLZtSzzEhuDutdXPnz63+nI4SJLDVEpOR6LsDrXufZ3HGAxO4eZWtclbbBwAMDxMW2211t676ZgBOSJMU/nxxx9++5sfHx/OyzRtLRCnaZqYcFtXBP78+WoDdPG0G7T4t2OUKWNHQh8aPfQ3E8bdHhARgvb6+7WgdHdAZga8u/Ig0XB5evXH/l7HREQ5p+Px8MMPHz5+fL9MExOae7iN/n43/Y+MiPhNzYMA4DaYxbHfSiYA7Kq3dXt+vrxcb1loljwth3lZWATudUYEmHvv2lpvvXc1Ve/maABqQETeopoD6IjIdoMA3gVtTEQBUHt7urxcXy6mb/eXnfgO7uAxLMGmxIzOYYkkC0057balEcwsic8Pp9PpkDLT8B/cEzkAwIkxp5KELMJhDDQsEBhMmAMCwUzrdnvZtss8LynxskzMQQwRiqDdoLt5qDCAEJXE0xQ6YxQkcrDqtZLjceLDjG+KGIIgACYQQiGUMS/e0aBX1tNdmhn0evr+jfZnTCBTAoxbSd577fW23W61GqdbbS9re7q1l81yKkyiA5fAIAJhMncLH9bA4/nejcECghzJmTnnNE9lykVdmwYCqdrler1cr/YGzZVhezTidBBJNUaZM0ae+/M0iIrEOZWU52WZl8Nc5glgp48RBiKFW631dr1dLpeXl5fr5VJrB5aUcimFmVTVzDHiLrTGCFT1umqdau9mHiy7yCFioCnEKYkkRHIERMcAcGjdL5eW0+nDOy2vj1jgMCj1HQMLHOT8Pb4yiGiURN4bmGv3rVmtbeu9Qy4pp2niUrS21vV2W2+3LZAllVGsGZF7rFvV2sBhL6GREycp3GNzQHSkAAQ0s+t67b1PpcxlfjieAuDLy/PXl6fL9WKqyzTNuSzTgZGs9ykVuj87kuTh47sffvPj8PsRljzlaVnKVCSXkT06EusNRB1uW72saxLKdA5fhKmkXFIWpnDbtu3rUwjTCD3vvZt2dzN39Ug5/83f/H7bPhC4a2vrS69V9szLXaWJSCPmsCyHzYJYutrLbb3e1mWeBNAdUHAueZlKGrKr15URYDjoyZTzPHESxxEyBUREwgPo6BbdHO7LqFpcm3rE1nsMdziE1nVddUyFMYkZ1O7cLE3ASADkgaqj8CRTb1W9q0QIUCIJJFXVXt0SBhNlBAoSC3Xr48UERGKc5+nj+4enl0tv7bBMb/cYvOsy4LVBjG8KoLFliiRmGXQZt24A2rfWt9ZrOKgJG6ki3Lqqrtv28vzSWiOgnNPhOq/tjIwPp0NOUyrO8iLSmAKJWTIQB9pGdNm22lsg8rH89nc/fvzpwz///KluN1V1DwZEGAKgoAgKxKD7YgQwCuXT6fRwXpYll0LMIwww7u63Y1EfIqxdIuLf+L/7OZsT0el0+vHHHx5Ox7qt/E/4/PxsZtXqWA5zzufz+aeffnp4eGCR9XZ7fnmJnWT4HZX47RXG7w/4N4+9amEc+cf3SQfzMB/ZE+hgr8B85/pM04QIZuYBXW3bNuuOxEkSETMDonpUs+qhDlHKfDqfTsfT8bioxu2SCMnUGI2FAOJ6vf7y68//+X//L6713/+7P3x49+F8noQnVVq365enp6fnr9fbTa2VqaSUAOeItm0vb88RxzjcQ1V7M7UYKmZEKCU9Pj7+4Q9/+N3vfj/Pi2tsa/eweZrend//7X+Qkg6t/v3X53Xs5vvwDzzCws13H8JRxYSbxe6g+yqpHXROCMSRWIRMAIFE5GSmGO4cyLj7M327+Bg7xPedUz8illKOx+OHd+/enR+E0IchrN8J4+7DR3C47L2a34z5T62rm7OMKWQGwK5+va2/fvr69PXr+XRYlhNRYk7h4OrADMHu2Js9v1y+fP369en56XK91uruJEgMg9xgrmbe3Lqqdgvfgcih9A732vvldr0+v3zbLF9BzBhoGQjRlNOcc+YdbM5p5NSiRwSgpFzm6fHdw3I8NO211da0qbpruEFYFnp3PkY4MzOvRLah2hjo4ugReq23p6fP0/FwOBzyNEsih0zDjidYEUxd3YJQHR0FZc7TA2MRiK1eL9enqzU5TtP5MPoigGHjGoLBGLTHlgMHYeydQAAg+NAfDArxYPuOctY9TG30SwWhJJmnoq1t1+1yuz2/XETjperX5/XpWtfmklOQNG0RPYURQyYx96bm7rW21pqaASAyE6L3HhBDtME0InpG3Ye11qfL859++XVYY+/bJcAgMCEjhlOEWziAB9goA3zntGBJBaYRqi6D2I44nALMwxFR++6dcKc6GgyX7mk+HA5ItK0rohFhzoWZAXbr1JFNZjbsIO/v1eBZEYuUlIukpNY8bBRZYRYRtW3+pvAf5LThtD5uAAXu/PwRmQQw4vkUWqi32m+bVgflhPnA88IlAUE3bU27mkUI06AdjT7F3Xvtdav3IgaFEyYMQHP3YRGlsfvgqAbAlPNcChHW3te61loBIqd0mOe5TCVP4LF29TfkR2KeD4fT4ymlnESEWLKkMpEIAJq7Ooyep5uvW/Pwl5eXnOl8yOEmRElEmAjB3VvvHi5CTNxb/fr1advWCCeiXKYAXJa55IQRva3oCoHhNuq2XZqLlO4hQSlVAFKL1nWrnVmcyM1RcJ7yMpU/6ysRURLnOU+HKU8ZkMx2QyuLGMLCy22tTVVtLG/g4QgRHAE+hh3kHqBqW9Xrut7WQnhIDrU7NctNU/Cwxx5mvuCoatrVujs4EDKxA7qHW3dTcMUQRGGkQNoRcd95jLmk02l5/3jcbuvDeRmI1Nst9i+Pu8TBh5XAKG5Utdaq5Ftdt7r11tyByQGdSM2i1bpu27BjgQARuW5rMxNJ4XicJ3AYM0uIYBHJM7EAOhH13jGlIvLxt7/56be/OT4+/NPPv7Ra1ewVzt9jwHcW7ncfnZmW4/H8cJ7meZpnvsd33Gm1f149fKMH7X+4Q1ADoF2W5ePHj9rbut7Co/Xu6sNQIaX0cH748OHD+XzW3ntro5j47yq/vl3zv0Z8++4bEFg4JU6FRdLwyNvZguCwP0Xj3u0cnzH4GzcIkFS9t27qxCEkd589BWgeN0DLSXJxEY3YbrfnCM45L8uScx4EL9Wm2mttvRohl3Jelg9M2Qy32q639Xa7bXWz8AAwM3wlCQl/VyUDpOEGAYTIKZXwUOtMfFjKh/fvfvzpxw8fPqaStSsGYyBTnpe0zCd3/vnnz2tVCFftTLRXmt+QmG93M+7AzLebPjYpotebPyp2vD8ZowsnBxgroluMsS789ZtJRDnnw2E+HOap5JGADOjgsWe/vD5j+Oc/wN2sdzV1p4jERG4W5qq6bfV220Rk23rv1poibRGYphkCzHzb2vPzy5evTy8vl+u61toAKfluFQgQZr1362ata2vmFvtcdbDB3Ftv63pr2xu/JYA9m2BcJEAhLklKEiFiAma8s77SoGXPS56Py+F0KlOq29pbNw3ziAjGYKFlzo/n41imI8C8msYAlcNtmKe7t+vl6/PX+XQ6zXHu2jyAkZgZjHQ3KDcjMoVwQU6pTAQpet1a+/L09HJ9RsZSvjPvuht1O4Zj4OAxM+Aw7dmnyXH/1jFi2hlLuz3QoJlDuDCVkmtO222rvb9cb9j9pdrTZV2rVcM50AAFAQkSEgOaIRiQO4Sb9t778B2RQXah3UlwcEiFmcjHgKX2/unzl09fvnxfxKCN8WMEIDmxD+jSPbrudrpmRojzvBBqwEjfbQBMkt2xt+rDBtcdIsZ/nFLKpZBDKvN8OJzOD4joAdh7SWmaZ5YESOHD4JFjFDQR5OGDOgaEJCQ5Zy9lKWU2V+3VXGFHg8PjO6wvwjzMw4aZJEIEwjAsHVgMIxLtdZ2bbdd6O00mBbOUw2M+n4FFrd/qVnsD5lxKyYVY9pXA3C3UTNUIhvgcLHzTHgHazYKid2u9e4PAwRUtuTDR7Xa71VprQ6Lj4ZBYSk7MMuJyLrfbdb35myEwCY0k2iQJAIDQRog3kpm3rqY6lAseYaqXl5dlTq2dw4wJhRFjDKoB9yAbdsany+3v/9s/fvnyhZkOh8MPP35MWdbbChHLPPG8uDuxhO5ln6pG7OgxExMJIFugOQSQedzWlhA4HKdpnqYxD347wGDG43F6fJiPxyWX7IFdDQDMtHe/XLZfvzz9/MvX223rqm4a4AgoSVJKAEgewxUXwg2saX9+uSxTSmlKDqGOzeS25SmQEgAOxzlXV93jf4Y/m/AQxw0nPHW1ECe609kABlA6ekERyUWOx+njx9P7d6eUXpvOb/t6xHdc7H2i6qOKplGPmtn1ekPo63ZrbetdwwOAPcgcu3prrdY2MqLNnIjWkflouN3au4fTlMiBJU+AkCSV+cAi7s6SmIQZ3z8+/M//8T9+/PEnIDGP2rvdoQYP2Fco2vkv+GarYObDspwfzmWaU07MsjPRdiHTn5nKfFNe71frrt0w81qbmc3LInh4+fAcHuu2aVcAZJaplIfHh8fHx3men5+edIC9AyfAvTf41479go/Mwn/9QKRcZJrzNJdRxOz/NnYuB+7M5l1iM+x6EGDd1sCUcxk+1R7hXZWd9p6wIyhikwzH4ywJW3++vLysNyzl9Jvf/P7jxx+en19qra1t68rMclgOv/v9H6Zc/ub3//F4OF+v61ZvW23rtrbeACCXrEa9tR4ugu7Bu+P2fr2JqIjMpUylzPMSDlXErIrA+3fvfvzhw7vHh+PxiMgNdZrdzZlTymWe59b9px9/WLf2dKm1bgQIEKYWpju9K/bE8lcgZC9iXllHCOG+x1ARjK5y3/kHwcjHOg/hbj2cHdkRGYgw9rfp9ckhwlJKKVmYAlzNw+9Zo+F3zx0JRA+3MAyGYISxcIOHu3bvEaoUoK1COCMycQRsW3u53J5fLpKmXPtysJOkiGi9X2/r16fnL0/Pl9tt22rrysIAe67CfY8ewy+IoZIfkWsRHg4xXDVeL8/+KNLwiRluvkCJJfEIqAKiQeRHkZxyJkogJeV5OR/KshBDtM0AYtCIETJjSvnhML87HYjRtav5Wn27tTZ4r9AjNAkFxnZ7+fpF5nma69qcKGWiOREhckLiMFULwOiARhTMhGFWb7enL1//9MefL+v1OB1lDDHGuYwhAsDYIxzBIYZJ5Ij8dYg9hjFem437wG9UOYE2Et7MmDGL5Fwkba375bbq2l+qX26bOjpyVU/iS5ZJ8MCGbtvm5sAEhG/3ZxdEJA7EYOJEUiRlERFCHRPipvb16fnp+eluMAYAIEQDW3y16XMAhfFKq1kzU9Wth/o0TUy+NTezdb1tVZHFDHprECE8PK5gUMxZ0jwfADlP83w4TMsS4WnLSDhNUy4FENQUIJBxkMTGxwh3ZIPdaZGFMwhYscOhBsQtwjUwCBGFMaXvgqDumif+joYNd89Kd+JhaSnM7GYvTy+4SH4s03RIx1Na5sAwUw8n5mmamEVYHKCr7noBc1N3DxqZbTvLxyACwDAMzNACBYgopzQgOFVd27a1GuE7D0ZYmAGg2+tY8A0SQ5hSmuZSyiQi34bZMboJZwRkRroHtzHPUx4uQClxSixMZPdlCimCXN3q+sunL3/33/7x559/RsT3799zzu8ezwCQhFNKA11HJG9b701bHZyGYY/MPNpccQc1MAd1GF3MJCSclpKXv3DrFaL3h2Tn6bgUSTkCzYII3GOr+nxZf/n09dPnp208WOYAvqPT9703HNx7hAeBul1v68ulzMtRypRIkHWlGogp874ymfXWu+6O0OBhGA4Y47V1cBvUX0dxiDt3cQfGSTgNj/mc0zxJzt9xYv67o42xOwwTvAjdbg7Rum2mfbfGDTNHNahqrfW7+a27O3p4NEIpfMucpzwlnlOZhWZEyDlNyzGlgojgZqbLVH788eP/9Dd/8/jhh8t1VYfW7U2/De6OI4HYvgmF7idCwyC7lCJJAPDfqib2nza2On/1zxiKpG1bt9tqqmWalnk+n48pJe0KAKOIOS5LKRkBaq3beuu9e/jrPOJfP/5PQzUIzCiJJJEI0dCzAEDACJ8eaMJYhXfDKoAIX7fVnMycOA2swtQlKQtPZc4iERS1Ehgjhft2216e+9NXPRzsxx9/l3OZpsnMurZaZZoo5+njhx/naT4czszZfVVTNXVwZpIsjEK9uVkYMNOAYf7soSIIGvExhMiMkHWeSqF3787vHs+Hw2FZFpFiHmVeTA0Jk3CZyvFwfHw8n798vVxbVVVWgBgLFrxOkvbIsV15Mu7pXqa8SptfL/0duBjD5wAYYhIzAwgDQCNKzJwI5S8Ds/CuvYI9Es1j5yohxD4BCEQzQzJAI3KmfddERBZx7dZaN8OI3ltYJ4wkJMwQoO5drbYeAJLy6ARut9vzy8vX55eX63WrvZtbBMZ3Sa5vYcYYcvQ7DONvH7s3LwThyDNHSpiEc0hJMvBueg3zTinlLDlHECZJy2E+HPM0CUXq1QysA7MJM+R0yHRYppyYCOYiU5bMxLRTmsM1wJnJAFrfri/PXz7/svUeaSnzoeScSITEiRPiiI5gJwoiQAZvqi9PXz//8suXXz7Vvs2PiNredgJ7uTlAlV0KRoNGRgQYEEO19g0GHV3QvUaNuyrfAxjGqYvkrr013Uyv1bdmgOnuTizzwqfiM1Zvod06AY9Z0aCs7DQWAgpkpsRIKFmYGfkOCQJYRO299f52KROWPezD91hTdzcM5OAwXS+Xtm1eLXE+HE85cffbduttrU29Gw5wIqV0Op1yzhihFuqAJMfjnPM0zUsqGRibNs6MCfM8MXFX9e6AgZKQinCCwHCHUPQheEIEJiJKfAeBqTUzQ2JKKc1TPhwemO8w+F5aD7uwt7q4sZVbgMVuQ5c4JXX7/OsnFfhw+M0hTWnKnMithWmWdFhmCKi1mXlXNR/jdLvTkICKCHGEEkARCnOsPpyDCDGnnDgTsZlWbaqm2t19OHGNqKOR1uruhFhSKim9unYS0jyVZZ5LmV6dYW3HyEwIy5R3dEQk58xMEO/nKb1/PB4P85RTYgJ3g3AzICTg1vXr09M//PPPf/df/+Ef/+mfe++/+c1Pj+/fHQ7H0+EwZxFm99A8RaCNUiACAJhJBjdbOKXEkixQzc1CLQjAhVlSKWXOZWIR8D2ACQAAMuOPh0THMk+ZWEYEHQK599tan55vnz4/f3269K6jR4RgQuCB8wCCAYzITggAMo+19pdrnS43SmVhJkncGhIhpZF46BGDgW3AQ5XjsTMNBg0rDIYLOyKB4xiKIBEzsSSSUpqllIhQTdXa93v/nQQccefEfLf3j79S7ZeLAjiYASqgIfq+mwa5ozp2ta6DkgHIlMaTPFQgThgiPJXpuMw5ZSKKlPM0Hed5nspUkgjj6XD4+MPHHz68Pz68u26qDl1HuTI2KXf3MAejkbFs9memN692HDgEr3fo488rtYjv/igiaEysGQD8er0+PX19+vIZHk6EME8Tk7g5IhJxFi6JQdum9vz1y9PTc63N3X3QLwhfWRR4rzz2y4wYSLvI6O2VhzcEhfsnwjF/hv0L3uzT8WqkPs5td1KHbr3eoDY3D5FChG7eWi+lpJxPp/NxSqo3+2yt39ZbWLTrpd0uul6MoF9ftlTy6FTVrGtPlnPOp+M5pVJr37xtbTVXJM9ZACdRUTNiQsAIL4kI7QXp7U1xd22t163XzXoVyUno4eFwOKTHh8PhOOci01IeTu8klb47ae6jU2E8Hufjcc7ped36HnAywhQj6H5jh5A13Hac6i3SFq83f8ewwt1GSsvwioJARCVwHx4YJCGRIu1mQj5SJl9flkF/NNPeqyCMt99puMphBLq79W6BGYhIiHdtNZFM00wQt67W67Zb3dwIdEq4TIk4lZJ51K3MgGim61qfnp6+fPny/PJyW+sw8R/dkMcwyQl6Td4jJogxbbwz3e6BX2M7f4NH4lDYzYkKpyQTyJxTEt4bdyRJuZQpT5NIqmqEKDmXMuUyZQGEYK51td48pywUhyVPk5i1rhauTCEEQsgE/e46SJyGEnDd1i9fv2zq+WgootqdOREnSVMSUnbijCyAEi6ht3r58svPv/zxj5cvXy20S7LtFvfBxZ76hCNi/q60gXv24EjBvOsuwmG/bbi/o4NnMhLPcfxrJCZJqTTF5qBqvVs4SMqMSRJPc358nB6zcffmbcUgBBFJKaeUSHYH80BkRsqSCMKBRMb6EAiO4RCBwClx+s5XSWiHYuxbqe4xeDKu0S/r+nKxqof5mB/eY5puzbYeZDGmlvsSTCg5pZzdLFDVoVtEREk4lylPuUXvDshIQAN6dB+zNkCCoXDiIYuCMd6CO2OMmWWAWKqW5WYKkqSUPM/TVJbv45P2W/L9EH1nU7s70JgXMDJ27V++fLXC59/9wJKmuUxz8q4OSFNJQlNOt9v2clnvZjg21scIJE7TNJec3DoBZAZXDVMZQag7+jpiS2y0OmO0N3gbsM8dd/swFMFhPf4mP2Uu5bjMOU/MtIsXzVzN3elN2tzd4VyIcCppmUvJOTELM8LgSLm5ae+3bfvy9fnXz18+f33+8vSi2o+nczdn5oH4DnkOMROz3eGJfbaSUyo5lSIpE7E7qA6znxifdpqnw/BBYKZvqB4AAAsfT6ftesackRiQAMmDusZta5fbeltrrR121RwSYRKac3lYFnd0df2mEBhM8GhN17WVqcpUUrJOyiO06G5DN0KAhwiFIZxwGPh6gFn0bsydRBCG/Go/CyIREclpnqfz+fT0fPzy9fNfbOj//WN0BTstzA3JmYdmG4f9kTtaDE/6eEtHIKDwQCRGSZxLWZbD+XReypwQPeVyWI7H4+nhdJ6nIkKHeX73+HA8LJRKt7jVvtauDt88Nu6M49ci5t/+5N9ECPunoiH73wczd+eYsQPEcA+H0N7W2+3y8lKyCMvxcPBpTKUQCRNhTuLattt6u1229dZad3AwY/wehHgDdyOMiJq9P/4/dw/Gux9vH78dWSDYXd0GDLCP/V2tDW18SpZE3MBs92TwAKIigjmd3RkcCGOZDwRBYJLmtnVzF5J5mlJOKcuAFhInJtY+KovuYUggSVAQVbE2CEySIEIo3Cvx9/K3UV64ETiEmnoSPh2mh4f5eJxSwt5r7y2XtCxLV2utt9pruGklgnkuh8NcSpFbjxgwjA1lBt5HeK9lOOx/gq/d8P48jiDE/W+Icwbg3YaDBQnDVQenhRgxkDF8WI34n+ExtHvlmVnfGeGEFEzEe+E6cGx/lX/7jnAjMtFgB2mEa+9107aFNabIQsCUUp6W5XA4pCQpl4DovW7rbV1vtVbt6rErYSO+GVQiIrNIgHt0otEnjRnsG4cbuOu4vr0MnJJk4SQly8wy7WswISGx8EhoGciBARIMX82UJAlCTO7Ya0VmyZmAlmPJAto31eauBJGI0kARh7MRIAI7uDmG2+22BiUqU3iPUABHYhZJScIScMopCTMhoLW+Xj5//uXT51/X2w0pel21V3jD72EEIRiF2v1rh1kG7wdwbKTD5/rOw0e4Pzt7vTceKNptgxiR3U0Nxp5JmCileUqHQzmd5mMyu61WmZiIEUaQA7E7NNWu6gFMRMIA4QY47CXuyL5HAFGeSpm+E1sIIAOOouX+oRwRSEK6Nr22+nTra00qHJLzNM/mlA5LdI2tajdHwDyV8/lBSFoblD643qq2azv00+nMRByjQjF3G61ImTITBXrOCBCDyZGEiHYuXgQNDJZJGBkASqpZZktQplKmXEpmKW/UYLtIgYcr9P1x9IiI8RpbAMVdu1Rbu2n1Jf3UGxEeDtNyyPVmPvKkYwqbXrJstcfNwTq6MsHdNI9Pp/NhmdwUTdFN6+qtiREienhttUa/rbeAmKfDNOXEDBBtBOCGRQDfg3ddvCN9HwKHSymnZcllSiLjLHa+dDdEkDQIT/zNbyqAhq4EaUxvS0nWTLu3rW7t9nK5PD0/Xderg6ecp2l6eHg4n47LYc5ZkMh0RGJCeJiaD7ISMzPmnOdpnqZZcgIi300h3D0IIbGcD4fz8VCy4Kve8vWmSMaHH3DrngswMGMgdoO1+m2ra+seMf7Vnl/BOJf07nT63ccfhx1/026mGoEo4+0LxwEz927qJsEj3yYAh8jSIzTcVCEgmBKSDcq1B6lXaIjAKSPa2PoQgZBFsqQsSQ4H+vHHH1vXz09fJee3cYZ/hvy/wgN3ddKgu7qq1drcjRB5AB1+p8V5jOk7IDATRKjt/uSAwUjCnLNM87QcltP5/O7DY1lKgOVSTqfH9w/vPn78OJdi1gWxzFMwVfWXrT5fbpd1NXe4u2MPkoMN7lzvQ7P6bbt8W6/ca1YCHFnfRMTCSGhmw0vU3fbempiIxqUm2P+LdV17O56Ox6lkRIaACEMApggPrdt6vfbB3zbr7o5NQlKS8bbGnX9AgHel7ej0cOwm+4jozzGY15MJ9H0N293Sh/CAgoJ2ciK9UdIMqSNYhLcKZt04A0gAmtnLy0vJBRxzEskPsyyAUUo+nc6I6XrT7Va3dau3rZR8zHOeCjGq2iheIxATBnjALh1lRAIGoF5VkpQ8EYFrq/XGkuj7BABCFMKSWBC27UZFlun96TjPRdz7l6+/ppw+fvgwTaVt27bVWntvtXtFtCnLMs/TPOdrG2kke5qX2euePPC5MPMYjBwaMZO6qzhhPKAASDLkRbMIjZsWiEO52VWHD507hlmYBQS4xXe+SqOEHMWJGwTsKBkS8+4ZfneCRtyTt/fJgO8/jZCYWKG7mfaqfQvXETCTUnp4ePzw8aMkAYDeu7maa7iPJuwu/YaAGFkkGMB3hqpHiDkRDmLtUBeOf+U2rtDbhQwDCYiJKQlNmadRMwgTC3EhSUjo6AFGFIPwwQiMwIQhQqTmYQFScuI0HWbGfuvX1pu7EUQWzIxjNYngQPRgCxoTYm1g3QGM0AiMxmNMjCkxOKaSlpJKZkLQWq+Xz1+/fHl+6t5Tpu6tv0GUCd+Iq8cXAt9nbHuk+FgVA2wXKyHsVLnYHRMdhoToTjLj2Hmp4EExyqFAQTos5Xycl0PJ1FpPmDJlII8Rs6oetfXb9bau9XBwQMHhlxCBhMOeZZdeDw3BYVkOC/E38EJg1/SBxyuiPH4GkRMqeLV+aV06OiUp0xQgBg6qMM02EJdh6Q8AiDz83yTVXrupam9uhQVSQhGMHqYtmKd5STlbdEK33tu29Vq1pUwJkfeCD0YhPcDXAECRVCCmacolsxB+79xHAyUjYiK/txR3JGZ0JOwR3W3t7VLXa6+pNQgoKZ0O8/E0r4KmnQAoArxD4DStcr01d3AX5qnkw7ywpIeH/y9nf9YeSXKki8G2uXtE5AKgqnohOZxFR+vRoztJ//9f6EY6+r4zh7OQ3V1VQGZG+GJmurBIANUcco6UU5ynFjSQkRHubvbauzwscxm1at16XS3qzwGdW6Um2AhIzVi45HyYpsSy80ZHVyOzvU+N0awS/eqkTMzBakopIRKAW8Tn9uHuxDuzhwjjuBpqUU8Ih1sgpZSFfOo61IZubgPdcpKnxwdhTkm+//S4FGGwQL4CTAx3k2DohK6NGSWlVAql7IjqwQvas5pLTqdlejou58OcsxAj2LfnDAucHnG9YaaIjjWDtbbLbbtttbXuHoR8Cw7P/dBy9T3ZKAzz9+1onyWAqgWQPoZaMlWttTpg8NwtWN+qYI5ORuHaAqHuhm5EWIYyGwJAkD338Dxh5szygHS5PR6Pp5jo/VvH5l973csaR965OLDL/kOeuW8lRETiCBTvmNATU04ogqXI6XR8eHo8PT6VuShoLtPp/Pjw4eOHT99PpdS6gllJgui9tTYsEn+GAQCa71kpryzOfQ76V1/fQphwF+m8zgY9kDlmIeIYclJUuhHdzDzP0zLPGKeRKiFkwdH7c686untQrNjuxZ+qIqOIEBI5mpmNEf4I/ute+N957/su+43dzT7H36dTsRW+gwoQzB1Uq6p26IiJOImky+VCLDpsmhZJzJSYkHku5ZRyycUuct3qpmMISZY0pWSuW62qPuU5pURCZhw+one+GwJgTomY53lBxFqvkkvKmVMCfGtgcpLTYf7+4xOiPz8/p8RPD6enh9O0FEms2rfttq5XEVmvW62ttT5GM+8Idpinh/PxfDper/V62YJOFxcPkaICDrst8/73oYPy1/rQPYLVSimn4+F8Pn/4+FhKMoseSp+/vvxzb7XVGDPuXCKN0dO/icRgIKS4Bwsg0x2i2ZOBmUj2Z8YUTH3nMXbt/f4geGQhjtbMgjQJ8SaneZEsGtlSOsAHojMj7dKL/TnYkY093gsRXGyP9As7eje7s83BbFdQvl8aSCGlpiw0Zc6JGPahPrFAZALqEGUA3I10AuGgvV8dqmYmwjlTnkqAXR6EG5G5THMZoXbCEBkjuzsAu7sZuSMhMgOTh7unm2GSlChNh3I4UBJzt962db3cbpdtMwaGvYX7Zmkj8H7MA7/eCtxNexn2Ii/a2giP3YEagxh4QyC1xExi7gQ9ttw+tA9XZVWwYZhtSjRPlBISMUgBURcyHuru7tR1a6PW3ls324+huGWS+LAsh2X58tIINjdnotPpdDqd5K6mBACJviXOLX5FXR0JkJEzlwTp1l2bgSJTLhOBmg5lgYyoCn2M3vvtenWD8GY7nR+EZU2JwUdb60rTeZqz9CygvW/ViEuWac61We9j65VAvxYisAc6l5kJ0HaGmTuqjrHVW+sbJ5pymaYiwm+j7v0xRSYUpEQcRYy5O6K7q+vQoUONhpm30b+sl+ftMgAeESfJpzI/LIfTw2ma59GH6XBt3mpvdjwer9f19vzVDUoSWTKapDw9PjzmJKvabV3rul1fri8vF1YuNrFl8jSnaSplLtPDcphzdodhmsSRwrAvtDNmbgE7D32zuMZ7aYnubo5ogCgYUCUG8hm0LHQkQgNHMEKcci4pI6AbIHLKdDyGK7yPVuppHvpUEtdaWeDhsGQc/fbS+eRpAmSn4MUOdwMCCe8gRhZCESNWHb03HTWYtonp4XT49Hj8eFoeD6VkJibXIFbf6xhmWGY8HJg7M4rIVvvz9frl5WW9baON2OGb6VBFBFeH6gZfb2M4wMttbTqiT1LtjOCQ7hNuHX2MMczSUO1tNQN10DF2VoiHVtFUwSzdhRgWWVpBHsHdbDvaUaZwv0bOhZZ5ORwO0zS/jixf6YD3fnGHnN4d9juhgyikDGFOQsiE5PeA4AhEMgqDSmZlVLWhgwCnTCUTs+VCD0/Hp4+P8/nIuRhCzmU6nKblWJZjzinay1JKnCWA3If3HmKsHfKN8T8z3y2I/Nsq5d94xadm7rD3x4iIqoHv0H2UmaKIGb0TQsppmqZ5mZfDsixLKSXI6oQkQstcurTbbQ2/h5wzsIzdukR7Hwh4PBzmaRbi0drXr8+9NQIiJgilzL9XyoRNzN4TvON0vN6ytxsHDo4xB/VI53Z3hzFarRuApDI5ECUZZuvtNk3zvCxzmed5Amrw9TmlLMzqgzOxk9noFTJ5r+35558ccPr+x1xSnguN4RdoffTeTdUdRGSe5zJNOZcg8KVScpnSuxRrJlqW6fvvPizHw3ffffzy5RcAf/hwPpzmlBkJuxoivry8jK69jRG8KmtqA13Px6U3/fnxerms21odAJnJBWEAOu6oliu4G/qezwLqI8ZY4IYAiXmey+OHh9/+5ofvf/ju08ePyzIBeO/9tt7+8E//8vz85Xq9xV0xg6GOpNGKvC88ETHU6hDVCRgQohOg0565TUjCkogFEdxsmILfTdCiTekx4bOh1lprvY2h5hCTnDvY4mauo6k2BBeGlEQSt6Huttt5MQpTTpyTmJozkCIxkhD2oDA4vlIqzDWSf19HnOgJrbBPAiVRSZSE2AARQuSh5r1r3Tojp5SZZJ+dAzGSo4ODjmE6mCAlSonQJSWxlAiBPYF5r/i5XNdtOHWFHaMOfDEqoRQmxELgWrcKNkhSmfJ8ekzT4kitd++9tt6GdQMDTEYKbPv8LtZLeEYFs81DCcwxxKDgWyACoIEjGtir2nQHMt0BnRiZJUlOkt2VEG0Pfu+1Qe9lDB9NPbngEFIHNUDn2QSVsOHWRgMfvfa19jZM1dXBDE3dHAhxyuXD4+OHp8svX25ECG7CvBweog9/K2KQ9oW8GwkMNwN00KGumClnmsBodGttmEFKxcWrN7MRe2XsDtETEwkAlVKEaCLq27beLmqV8kOe05IFBvfrrddV2+aJwMxU27a128t6+fLll1++++HTw+PjdDikIIkSmY0+6rpda9sQXRLnzMSsar/akRHufklxD3bzyCD8hmaOwcFc19FWbSJFWDJKBo5ZTE4ZgKvBsOFOwGk5no63evn61YaJCADTkkmyGtzWervd6rbGgHB07UOvss75QELTVErOUypZBBzG0KZ9qHporz0W9qs7+q+bTrwbP8UaDRo/BmqA5u+hjv3rgQmzpCSJkCGmCcTTPO9xrIhAnrI8nI+jN4CRiaZE4Br4mRu6jp0/HATIHfxwcx9m3XSY99GHDUQvORXmj48Pnx7PT8flMOUsQoS26xjfjkUgAAZhZkZiUrNrXV9u120X4kJ0ampGhGpeuw+zax8AMEZMst1300UiRiJU0z50jOGq8UG1HkNVjEuAu+zibjFpZhbG26/TdzBHQo7ZssjdVImQKBNN03RcDnla/j8gMfiaA4IYfS0RRCRpDEwYUdA5+lREMxuDCWma8sP5+Onj06fvP3789PH0cAZJwJwkTfMyL8cyLTkXSSw9uRnLroBbjufD6TwtC16r9u5mQBR7n7Awjz9XwfxXXgjurSTdwbmdFRlQHBIKSy45jmdJQkjDx2uFF9BLzimH0dAwdlAAB2+jr7eViEou59PpeDj02swULvHM+85zDjz7r77297TDMDE9+lXp49/+5pV2tz8QYwx3d2TiKquY+bZtU936UDt65Ouoes59nmcSXE4HTri+PG/b5iuObVu/fM3zXEpejguX4lsL0oR7jG5cREop0zQRy9AR0XeSUjjrvV7IPJXHx/P5AR8fz08PB1WdDlOahJgcfQwjTtu6adulzmFfNXpF95JyZp6ylCTRShALghu6IB/mnJMQkzm0ti/2QFJipSOgiMxTPhyXDx8ff/u7H7//7uPDw8M8zUSgprf1pmP8l388Pz/fWg8HQYwRHfgryHi/KRiagAhSNQBDwHtMUnAw7h53Dqr33kP31ErdZXsQQ6JhccjsGHAICHeg0e52+GMQuOzstvs5tyM+JEI5S84y+jAgERZlIt03WrvTQoNAA+9Hr0AAgjaRTpQmxkTIeDeJDmdoR1Xv3bqEs2iws/z+XzvsO5gSAZMhGoMLs4mQIwqmDDnbNC2lDhwyTA3cXQNvZ+bw/qLdSdl628BtziXPh8P5kVJZt957taG1a+1amyohiF2rbk3tmyMmjBRs7+LC8o7wbjmxP460x6nBXv9DeK1xznma52mZUs4sTGqEwHsowAADMAYjMmNQQUUfrbkitg5rx1v3W7PaOjJ5dgViyZyKI9uOxCAB5JwfHs5Pjw/L9EtiRsSc0uP5/HA+f4PEEDOaQuTn1W7NfGByYSUekHia8kwgo9t6q/NWpZwEcfNea1/XzdwlJfcIEtPaGgKWPE2lLCIX8H/++V/8uSOPxw/neZ5oKhf70m7X58+/aKucs2tv6/r85ZcvX34SwR9/8+Nvf/fb3/7t33349Gk+Lmw43Gtb1+1aa6VECUgYSWCfPLwVie+2KL9XLmaAMRLfUTFGQEBFA6aSy5JKcYaqt8sVCxFKHfp8XbdafQwwmg/nxw9+vTw7OBm4YU6pKfz00+fbellffhG3x+P5cICvX59rr5tVJT0c56fTQ+ZEjl31Nuq61a5dzYjv9Bfcw3d8BzXf5knB96Xwf47NBdHvLDx3p/sObW5kAOhMuNN1JDHL3j2L5TynMsu85GXJUzkcDq3X1rZeV7chSLmUeTmkPNfWvMGd08iAMnSYDzKk1rfWoG4G1McwNWF+OB8z8Q8fP3x8Op4O05SLMBMgieE7Npa7ertiu8lUGBMADNO11bWuW61jdHYMf7zolCOrT4eBtvt3cIRd9Y9o4S01dLTWdAwEzCkxy+ZtDHUAVfd9kBSlbHie7hL2/UiOdC2LSagkSXGWEPGO2BKXkk+HA5eZ31X9v+rs3//+z74mRgRhs43EtE/tfJfcMTq6M2PKGRB1KDMvy/z999/9N//wH/7+H/6b73/zm+lwfLnd3MacppKnuSylLCwSeQ2O4OaU03Fevvvuh7/5/e//8Q///NPnZ70OKDm0kkhhRhAWzH+1jnn9pzuSY/ek7ihE/M5YH2Mwe7hcEkgwonPJOWcwH3uw93AzcK2MCJ5TWpZlXrbuGDSolKX3HslwJefz6fSbH3+woeAmQmutZsYYVjfukY0Hr1qwb0aWsV6+fcG74Vhcz+vF7YAEBV5JIeoHEQnFjar23hFxjAEOwllEkjCgmyqAl1LmZTk8nm+3fHv5evn65eV2s9rVrBxPj08PDx+fmtown+a59/46lUg5pZSi/lONSekb/+f1Wkoux3lB5sMyH5dZzTAR7vbDMVix3s36ligxIxJ2hdGa9j6obdfNRkewHfAQcUIEPS7T73/7w4enh1KKA6w18s5ba63WqmaEmHM+LMvhMC+H6Xg6PDweDssszOBGQCySjscPT4+fnj48f10/v1z6cAlCK5G5QcQJvO7JiDnnlMTNdQy/85zwvtwRMOowdVMNFNECOzUdY4dUyVRVh+kwH7vcjmlZ5mVZ4jCLBT56tzEQgIlThJ8gxAyUEEVIhHNOOQm4h5dDMmMauI/RX5PC4E6keX++eIZRYEzkhYDd9ozb4GLsEchsijpAB6iC7t4eu/wxNJKwS23cx+ZoQmScQjbkDshSyrwsKr0N7UMHOgwhdU+Jw3XTdxcd1VEBAHlJZZkOZ5R8q8/dqjt0863pbe0DsKmDw9eXdbzn9QeJ2UPWFxETFHUl+Lv7R4RmFMcpILiRcCplOR3Ojw+nh3PKOYaKhFYyzQW3Dm34GMORIcEkSGCj95eXauZ94+utfblsl9tae005lSNyKcvpvBxOxMmBgJjA0D2JnB9Oj4/neS45SxISTufz+XQ687dITDg+U3Bd+zZw4PCEjMVEOE3lcFhOxtT62La2nByT0J4BwQge5TAAgsOIjxWBmYWJmFrbarvOX6VkPM4lqIzW23a7CtGpJGQmgNHa18+fR6vbbd3WVQ1bGw8fHspUDGyr6/XlRU0LFiyZCZnQvqGRAOw0LjMzNHxrvtEjjfkVcY7SgYgP07ykyZttl+12XXlOJU+t9S9fnl+uV9Mxpfz4+LAsh3k5bmvVbdNhNsa6tV9enq/ri9XrkgQciDjYx816tWaoEWhlaluvW2u1tWEDApKI2iVq/iB53EnVvzoIX/+4DyLuoYN4N1d6A/juWM6rdum+KphTnsrEKTHTskyj11Zv6/Vl9A5AZT7My4Kct97tbgeu5lvtl+cXM10Os8xe++DeFcjNk6TT4SCcJpYP5+N5meeSsrCQELgR0Tu2NbiBNrQmWJhwgKtZ77223oc6eE7sILWSmt7JeHCnK+wHa4AZTDRPeZ5zSrzWFsRFQsginISDbPo2srXXQ8xs/+Vwn1YTxQCYiaL4Y5GwrN+dvlimnE/HE+XpPXQJr2XQvwPPIBEBOSGkeMbfWlYUpqkk4V1QHoZZiDjP88Pj49/8ze//u//pP/7ud39zPD8MtdFfFMayQJY0TfM0TcJCrq/wKSKmXJ4+fPoP/+G//ennzz9//jLGICJz53jo2e9h2P8vICW/qzleL9n/7AVBQiFiDqA7nKkIEVNKbsYIRIxuUeiUUiZ1NgfisLHqraF7Tmkq+fHhzES911L45XKtrZr7GOHUDn99EBbkF7xP0+OZgbfi5d32/TptChLNPvvDXFAHmlOIudydWZg4IIExhoyBgMzcx5gcyjxJ4v7pxl0vazVrJefj4Xh+fDw9PFzWbQxblkXHCHdRZp6mUkphkT4U72jrnz03KElKBEEApJTV3Tn6st2NLRLstQ/DIUKSyCJ6t7bhvW7V3fbvjIAYngXpcFi+++7TD99/KqUgUldtvdd1W7dtWzd1E+Z5KofDcZ5LzikXKVPgKHsnwIjMMk/zw8PT+fxy3ZpZT5IkZSQcOnDgr69FRETiaQIgZ4QQoUYuqbmDBpF19DpUvyliVMERiUytjz7GsD3JmaOImedZRPBe3YYbLiOnEG3Km2VVSOrizaQssVGiy3BIMvZm0nY+1f3Z/tXzZuiDXTNYAiA3sCBgIhISY8Bq4GQKY3jvGmz6VnsEgAT1mRCYnNHA+i4HMzBDVehqGjE3uSChDBpGHECto0ylFAEIxbu6e+8NENUBSFKZKRVO1XGrXdfattq3rXeAbYzW++ev1/FNDtRfSLaH/Sy5U7ABKPCaHWkj5vmwHB8eTo8Py+EA4No7gDLZlOkwS1c2V3BP5Ig8FxZCHbptrTatG93W/nKrt9r7MGcfQEDCqUiekASQiBOigyqLzIflcDrMcyk5lZQwl/P54Xj8lhMTgDALA2HrfVs3GCCenNx5PnAuh+OH734Ybua4blX6yClNUxGWMs2IWEoxs8vlSkjuqGrI5JEPYQMJzfTy8lyKfHg8M5EgELjrQPDw4x/bdZmnh8PhRfX585feet3085fn73787nhciFFNt7ZReBzNToCCZEiy+we+PXnDbfjwsIsxc7cQctOduslITEzIWdJpOc6pbNf1+cvzclmmh0UStNZ/+dNPP/3y8xjtfD4dljlcUCSlcd3att2u4+Vye759HdrnLCyp1matEwAjdRvXtn69fimQZp7BcetbD64iYgwsPFiW0RKaD4NwGn5D++68xrdZ/jskkF5P4fu12z1oPpI+Sk6lFDDzkA8iIrGU6UR4nMvoa13TlXD0QSmnsuT50NTUfJghsyNtbfzy9fkP/+WfEOB3v//t4QHVbOhQQwA8HA6fPnwcfWTEU5EiLERClJkQoKu+NxpBAEBD8kBn8M7+suHuKCJz5tShtRo5DQbI9/SeMI2MS04iU5aH4/xwXAjwurbRzRwQMWVJWcokYWkRAcm+46V3Hkg4xBg6A4Y5OAGzsyALUxYSAWYItQYRMZVUzucTpb+GxPjdLeb1XyHOx7s3gAiKIOIehaJqhEAs0zRNU3xgbmoivCzL04cPv/nd3/zDP/yH/+E//i+Pjx9qq1++fGm1m++C5LlM8zQlZh8KFkY9MQqA0/n8H//n/3mYfX1+Bveff/lcexPOiOgYBBdGYvjLSMx7LDOS/PYIj/df87bcIDKNX0s6QmLmcM+j3U3OQRXd2vY+8TQLkAEAATPP80wAiZkRktBxWfh3Pz4+HC+X58v1er1t1+v15Xqrrfsumtnz0f5sVvRrHCYu6a7R9vsfg5IIr/IVACQmJEk5jYGt6TCrtarqVN6MBl5PSlVtdau1LGrH4/njf3y6fv/jf07/x/Mvv0guT5++Oz88HY5nc9RhPlR7X9c1KtRpmkopgIg4zE0HA+EuxXnjXiAxU5AtHXYzX0ZzH2oWfuHdwEx7q7URwTQXAA/pgg7tYxChCIWQJOz9Us6HeT6djsfjARFF0rkUd2it1lq3bXP3nHMpOW7fvWSCSLUFRx1OpMyOzsfD6eH8+PnLy1DPKeWckakPbK29Lju4TxKZ2faY97AbEUAxA1RT7EYGCGo6RtMoYmKctBuAIiKrWmt1aAd3EV7myVHmeS6lpJSZBeIGISKSiOxWlSmFe1xoLiOJIsoYBOQkCcAAcxnMbe8h/3Kd7O6uDdQYnCG2EmSSYKikxKmklBIBu+EY1mqvW73dVhEZXRGxtxEgehYWBo9khTZa11GhVqjV+giLBUrATJ4BPfOiyYEwJ0hsZr213io5tt4QqfYxDJCSpClPC95ua20v19u2ba21DjCqvVz8l1++9nsRc+cR3Sc30V3F+GLvqm3X0wRUCBHX54AuSY7n8/nDh+PDuaTUt7VbRxiJfJkYPAMoUxf21hE55UkSkw1Yb+OytuvN122sNXxkSFG6U3cajgroJMhMZKjgVomplDJP0zSXUtJcMs3z6Xw+HI9M79VJGM64QvfJpQ0DxyFmDJAkUTmTdB0bwzBtrWFJwjKx5FyIKIoYZo6zrPcBiKDuakiUp1KbrOt6eX7ebmvOGXc4YbekFEJBnCQ9nE5k/ny59K398qefh/no/fxwTImBcGhPJTHSlNKYZ0Ek35Vgr68de3EDR7WIxnRC2i0DWYQp6FtElDmfl+OcprrWl68vDy+Ph3VMM5hb3dbry9d1u4GO9dP387zkMpUyb/DSartebrfrtfcNCUUSI9d101pjYNFHv23r1+vLhAUyJExB6bjnvAvRXWgTVDf1MbQN7d+aeNi7Nhfu/lTwDgZ4X8SQxxAzulFijqheQET1XVdFzIwZyBkVdfg8azYuM+cZUxmtA5E79jEut9vPP3/+13/96Z//9Esp+eMAp4TEAKiq4DCV6XQ4uGpymxmKYGISYiFBsPE2Cd5fRBBiAUIANdexo8IAInI4FLdkbsS0rr0PC8U/ETKyAzOhFJlLPh3m83E+n2Ydii+r3tm1iJgST1N2sxahRRoEGDR1V70nAUY/Tkgeb5fjhgiRCLHQ2ydLCJiYl3mh9Gt10q8+/L/0ilO9FCaC3i3aMjNnAndnkXmeD8uMiNu6IuKyHM7nh0+fvv/u+x8enj5M02HdtlpbxCqZ2j4XZ4LIk9BhpoiOCGaWS/nxxx/qtv3y889M+J/+7//fT7/8EoR2R9/9zv49Vu/713u45fX3+G3PAHfkL6UUO8A0TcuypJQoci96H7suyd09vpiR1V1NCVBYKBy4wcE9ZXlM52WZTofp5XJ9uVy+ZkGEy23bw7X+6hWEzOwv3pHXOX9M/UOv42geEqtEhA4Eam4W8FK8YrT0ygraUU+DnKZPH57O82F7uUqZ1D3NBwcy3V274A5LI+KyLNM0MVGsD1Htd3+EXxdkccAEwxwDMyQHIDYmJSJ0hGHee7Ohw5gAEHRoGLi7e0pSSmLeZ2zElJlLKXGbzAwQkjAxp0RTycsyAUDaoTSOWm0MNTc3MAB1VbW4FjOYp/lwOKScuLX4iIjJnH8FjgfDhogDiXEP21ICQDODMQAAkR3MTMfoqgpgPnRoxPAFGMum1nobYwCASFqWBTjN85xL2XNqX3ccIgr13K7TDwphRGHvU3sWZkQGN6ShUHJNSXYH9r/8eLmDDlUNKUDAFBgyz/iVs4gwWPDbOjbibeMsIlkVEDGWPxHt8iIz1Wg+tXdsHXqEyu2bvDNhTokwhbjDRTrArTftvfVGTuqGgENdDQCZOOUyIfH1ertcLm5DGNygj7EOu91u75Of99vzbc0f3QsEcek+7sN7YBISxM9YTsfldMrTxIS9AYAxuguAIRibMboxelMGEkoiRMNdjYdKG30b3gwMGBmBUjcIBk9Xj1oEyQCClxl+WXma8jTllAhoz6V934ztNSwLpyQpJ07iOsCJc6KcQTKBTCUnHeA6Eta+wYY0LymVMHJNKSFRzmWa5pzndd36GNrH6JCn6fhwHmP7+svt8nJ9fn5ZlhkcmRNzQgQdw8G0N0F8Oj8cynQ8ntatbm3cnq9fWbS1aSrI2EeTJDaUHIpkMmAhkrcruedhxA7wqrBwRJTgHqbMwrU1NWXilNLpcJzK1Hu/vqzr19qvA56i2CF017qtV768PDNzKdOyLC9Eo/dtW1tviSWlnGUC83q7tfUWse/Nxw3Wl9vLSZajHDlTpgSeEEMjwgAeqIYD6Bhdrbe+9daCQ/DuWvzORXg9Mt/jMXifizk4IyfPueTYpPZqhggRzRRthJ7OXE21tTHUWLIkkjJTKkaJFVIqgHi9bX/86ed//P//lz/+9PPL7fYk2Vgo5ZyLcKqbmll8egCe3SeGwpTjjrKY6j2k5T5X3ck6tEeLqQZhBcwJIAmfTocklMs0zddfvrxcr9to6m6IHI9lyWlaymGZzofldJxOh7yuNadLG6FZUHdlpqlkBGBoIQ5kckQ2Be0dd/oROmD4yCLHmSt3WdJ9J96NOmPMjSUllPQOlH4T7OBduvz6N+8PodgNiYg5AVjvddtaa80Mk4CquVnJ06eP30mS569f++gpZZFUSibibV1716/Pz9fbrffOzJHiF9ULDB2t9tER9jmR2mCEkvOPv/nhf//f/tenp4enx8f/6//6P//pn//5y/NXA+2o3cew8S257y/t1/dVBHul8hpkHZ+AmSNZmEUgURB2WTgYf/M0p5wI0c0GEboS7dgJMjIwIaNCGMOYqqOHk9MYzUynsLsWmnJZpmnKmZg5vVwut7X2b7Wib4gLvaPFxBRsH7VSTLr9fgMRwCGsOzDyknAohaUOs5QiCTBmFklSKTmlBADrtvbec87hsF5yYWByBOR8OH78/e99mj7/8ssG/uXrdRg5gjZdb7d12wBcRHLOKeWoOF8LpD+vht18dB3DMsHuAhUpw7vC1cG9t7ZKuiK49rptOoa76Rhx4SK8TLxMZcopJXYILghJElVttbq72Yh3FYYUMpWYbTlApFvHzoOGY7gO24diCCmnMUwklZyTyO6TDw7gd37oOxQWkZmYGHY3u31/sz10Loq14eE5Zd33J1x7b8HhDRzWzMOEEAAlpWk5sKRozyUlYhqquxgVCYmdMPT/IXeIhkIIQ30TdgAC4EQjWympZBGRxt13mkis52/vi3sfNhQckInc9/I97/+X4vPQpmqmA52AmnBtIi2Wzhg9Jry4U/J8KAzzrt4HDEdHciJHVzQASwLHOeUUXjyoSLehbQRnaxBy7K3uEBsqEqWcEfHl8nK9PhfBh0Ou6njr2wDTDvfP/82Z+75sQtfn9/mrY6DJO/KKhKGzlCTTNE3LkqcJmC2IhxHVJ4gGNnwShEJMOEyMRJGMEE1S5qwqo5JV8IbgkoREuvpa+23dam0OSCwOGsakHhPJzPNcpikRegRzrusa1ovxErhP70Ukl1KmQYbkKc1FcgESxCSO5GkCraDNtbaNmciBeb/fMdQvpegBiLi1XmlzGFzy4XTura63i7tdrzcAQORpyikXZu6tufa6rjZGTjmREAmjqF5N3ZpqVaOBhKN3bZ0cyIGdRq3zMuFpmSZ/zWDxbxSkb44FwpJFsggibr220UsqyzzPZRKi261eL7frl9vteT3VhghzKafD4qOmlGx0G30qWZe5ZCZy1eo2pmXJeWIkG63X2mqLZWTmfYxrXS/b9TzVIhmjyN8p7LvVXnhgj/CWVe1jdNX3x8sr4rK7QhK5Q5Ag7vXBO/YVUkp7yrQkMffehtsARFdN7iKETqbDVM0BUVIWYpY8ASdFJnUWGWafv3796adfvjxfatc0LdPhxKkgp5SzMAedxSLpEz0jTMJT4imlLBJ+C7qTOt/Pv5hJcB+dqI0BZsF7T8KHuSzLNM1LKcWRAPDma+8D0UXosEzLUuZlOh3m8/FwOJS5iAGEfnJ0bW1YOC7mjIhoAA5mMBiIxB1HY3cP1dH+iRERC8av6NaCU+EQIXYAUcSAEAefBf7s1uC7wIG/1P3H7TMzVe999K7uSEhjWMynS5kOh0PKufdm5tO8TNMiIq01r/1yudxut2BUDNUxwrOudu3WNzN9Dd4JZI8Ej8vyt7//myTM7nPmJMT/jD+/vNx6gzDe+n//cn+rSl9bt18jMTnFWyGimBlRxA6bOri+Y3SR7+4XGO25Kkabfs/rJIrEFIi9AAkMPBAzvK5rreGq/e+/7z8nzNmOgAIC85t5tHvsKztfLlZeSiKSU9qpPkOHud89A8MAF9ygdyWmfDwdVLdIEnEYXYlJ1VprY2jsrKXMOafgzSP5O6SHiL59wALqC+GSCEbAIBHinlXYmUGH9q3W7LtPmwICC5sDG7DBstZ5KqUUNRAmyUzMZqYakJL11lSVgymSBAFH2Cv5zqSJDWcM0z3ZYMSsyx2YKSVOWYTITVUHkgBEGs6vYEtEjHvbzWiHys3AzcDIwi13hELqXqaPFtrxMUwNMDxswfbxEonklEspU855R3peN08HNR86am9jdASIjPC0hyHTPloO62aizFyS5MzBev92D/7mZQbb8LWDKu4xQySxefDOqWMRstHRBiABGYCZWu8KMPbLdI+whZgGt+59eFPoBsPBd9kSABqLl0TzRFOiaGeBmAfWIZvCGEqMlBJzQdxnTKP1YBRt2220dUrwcEhNjUBq1yK/miQj4Rs/EN6hUG+2xff/H7+IMOWcS0mlcEqwMxCBCJEJnXyAECQCYyRidR5I1eA2tBkaJCABdmRzVgBMpUgWB+iqtfUeSlUiN3LY1aVEkLOcTofTcWbC7bb+6Y9//OlPf/pVivXu8Uws0zSDQnUhSPNhyWkiEHcyBSSZUkEYfVtbr6aj8SZSRIpIikT52KOmqfDuTlspyXI6gauO2tbLulVAPJ3O03JIeSLCVmtbr+vt5r2xFyIkwCQylQmQD/NhyUuRBOg+bIy+vqy99u1lff5yfPhw/uTTctSU3naqd+Epe6gXISVJWXISbjqe18va62FeHg/nQmJDW21+wefPz8efj6fvjrLw+bjg9989nA7ukJOg21yEDvl4KHMRBmX0wzInyX2tWjcf4573SowCAFvfLvV6qdfMKUsRZLvzVB3cXPtOMYnx3e7E9/r0BAUkvQaMMeOekLyfKK8rK+ppjMDIUpZ5yjmZjttt9Lo5QJqKzYPQXcR0gDpTwpKig0dhIAESHk6EtdY//vGnn375jMxPHz/Ox9PD01MqiyNLSozQx9i2WtdttCagKVERjtxHRlb10XsIHt4OeCRBiY9FfTd1dzcEJ7RMOOV8PizHA5c8maMrjDHMBqInweNxPh2XaZLjMj2c5nkuJEitsRA4tNbq1npXcEgp9qFoJ2AYECUH0JT2w/I+T6GAdIk9ZpIOYAaq+zifDAnBHXc72l8LYX6FxLz/G9gnaX4XXxCzAOr+xfsh6qpWe99qq72fk/zw+IMD3K63w+FwOBxTzqraWl/XdVvX1hoAjDH6aLVu68o+KlpH8CieIhHK3XozcM8pff/po/yP//3pMB2WaT7O+p/+03XbcsnyzhX6r7yiPHm7on0T3y3bac+U3sVZzG9E8gi4MNMgH0TObBz8AVkRAhMqEKDubhyqu9lA0LHNwA0BiUCEcpGDz4Akklmy5As+X/vwda1/5jLgcPcqiAqH3t2315/Vx2itOUJOmTgRFXdyZ0BGJHAYFsAeI1ISJBZJUso0EYah4lbr9XablsPZXc2u1xsSDlVO+eHDB0Q6LMfEojYAfCgA8jzNh8MyzwsRmW3uCrHhiuxUaHrDL5Aol5LnSaLFL0VEYjZl7h6hZUrMyImlpIweEGcUmuY41GrX5daWwzJvvQ1j9DBDd1dHT1mIaQxrrbo6ICVhQOhD3Z3Cwm2X3oCbq6lboMK0KwoImDEnEaY+upkFv1Xul3NfLY5gADEwJ1Eyy2qKhhBOPbqLk8IF6u6FMEbvffQ+msWIAQgp5HA2NNZcGPoXkZ34EtyOOM7rtm3b2ntF8CQCiFNKOYWoxEyNhBGRAIQwCyWhmHe7Ylhp//lLHa4dLg3qQHPOwiSsBqq7n4sIsSCRgZgIS+aUhUjcYHS/21yiAfYR07rR6ti6N/Vu6EAujDYAndiL8FyoJMiiNjoRpiklS00BqlV1BSh5znliSW623W5IZOA6mo5GPubsAjSAExe3+nBM8q6+3JGYb3ezV6cPDy7vfS8LpS+hxNgsIgPw3X5ITGikuLMeCTztJodYu21bvQ5V4makSEZsQMSUy1xKUuvuGOcCBmZuoHdKPiKUKT89nR8fziJyffn5j5+/nj58t67r67UIIAEBGglLKcWHewMAJhEUBmQ3NFBEZEECZiHtQ3V3pxlqPHq8eeaUSyFkIUqJJckEkyzTVLIIXr9+Xi8XAEw5nBKS6ti2db1et3VFsyQiyIQkkueZmdIyLSUVITJTNLTuw0avva+t1TZ6T/nx4/fjm2ftm/JxH0iXlJOImW2tXuradTwcH07zAQFVLWK+uo6t1a1uy7wcjseUUmvH1uroHb0Xdi50nHgplNgHWkJk8Naa1vpaVRARGbh7He3a1mu7zWkKT/HhZuZI6LGOIhTbzHdhxa8Jl8Fr2RHyOzEhkPAd9wMAfD0skRCZIBEwqo8+1FqtXYdv17yWPs5TmRiZEDHyKRwhHA1pdyoJOYqaEfHhdCjzshxPp/MprAURcVi/bettXUfvZJaFJpY55ynnlBK4jx5gQVMd78kL4VYJdwKyaiwJSMKlpGkqy7JELXVb2+W65Yv0RuAgQlNJy1JKkXnO01xKyR4OwkmYcfSxbXWrrXedkiRmSMkN3HBEnAGSJ9zllzYId8dYIgEkczRAM7BhCiPIxG6GgH8WwvPN616vvLFGvv3XfXXHCN7uR777XSel1vqordXeHXE5HkuZjsc2TdOyHBBx27Z1jRnjCLlv763WerlewI1hJPKSmIjidCbnuHsELkxLyfT04Pqj9upobXQAzOnL8XDgb5VWf+VFu5UxwruqGe5PG5EgmZvTPhMJfceuro7PJPr+1lpru8w4mr7I9bwT2ALTeP+8OIATARCKyFQwIreMGIndSYeu68r8b8JK7+7ZN+wMxJiVmEeYuDoyI1PaR68Beu+UmvsM93XWc+e1RDj65XolFiRea53mOZcpl8ycloPETe+j17pttSJizmVZlmU55JzdfQwhNQLkqPWYX4vF/WMnkpRyKXvujkjKSZIAoqkqwnAnQhHOUznRaajGcGL/2B3VvA+rw06n4/Ot+tYxvEmYSynLskxTIaJeR+u9+VC13oe7tzHcgTiI8QYAkVUJYR97r9odnBhzSafTYV2355fbDk2FMeY3qJK7q1prnaT6PluWHP8SKkIPOVLEzN2LGB1jjD5602G7eozE9jgUAw/0P0X6r+84nxMSIIQH4xjdNIJTkIjz3WLAdyk1EjEF2ick90Hyt5yRb14GsCmuCsPJgfeCxUyjuKcISwKfCEGCJsM5E4sZmNn7HX4McLferTXbunVz3WM/XMGATBLOJU0FUwJBA9YkPE+cIW3Kzcft1g18TiVNk6CAe11XQHSE3jZ0TQyUwRCNUIi142kO46B3C+Xfexnc7X3uD4PkJEm+bYSQKHSYFGvHDNwABUJBDN1r77fNlXs13tpoY6g77hkhb75T996S3k5ERCBIWR52ofVkqj//9PPPv0JiEMP2UITdxEYy5G6GaqZuROiEA8xNaagjliRCMMIhGE13Try12ohkOR5LKYzM6HNKU5YyF/Dz4/nw/Mv5p3/9Z1edyyQsatrrtt5u623trROCmpMTIKSUJLOkPE8TM7nr6KPV3vtwACBX8/WyjTHK4Uu4pb27Ne5uu7XEHdKYSxHmW92eby+3thHzYZrnMkc4dTnM03GWg3jG5jYjLYfDvMy11tv15fnLZ9fG0FlsyXQsNAt2dGubAvdt1db87vqFe2ay195W2ba21dyKFGTqau5OIcy6dxzRNQpReiONxJ4J+5AjOJz7E7c7RflrtGgQB/c6x1wH6IpdiBUcGWBt7fPLZ0B62r47nR8OyzExa+uMkJKkEs4u4DZAm42aGB+fHh1JUiYRAGTGw5ymSczGuq4vl+fr7eqqmeSQ+FjysZQpCzHorp3oQ8cumLm/aY5YVAu8Q2PLY8YsZVnmeVnKNCtgapZ2VYFwYjePEIAkUnLKJbEIiSBTljyVcpO1tbHW7Xpb17rlHKxE8iQA1Id1MwCSlMwcEGwAoYkwc2IKhQG4gZpjVzfAtBMBw2HFDcyc7M+rFIB9e9bXIua1m0GESPgMR/UIENlLov0/hN3cs/WttTa6AczL8uHTp7nMOecxNMQFZka0K35rq9frNX/9or3NmWkSKsKMZjZUGXWf8YC5mo822pYJfvfDd0Qoks/Hh//z//7PHx+f3ksT//y1n6YWwd/IiMRsiKYaxPJdfy4ppQLuCMScglyCiA6uQ1vvo3dwb7VuW91q730Mi6k9EQYlYu+/AWDPety5thh51OCAhIICQA44T6B7AU9utq23kt4EtPvCf9Wh7evotZ9B3DExMAcx3S04tDNnJkcCYTCHRCSSRWQ/5AjNtPVGROYRvCqqerlcLpfLv/7pT4fj8ePHTx8/fvr46dPheBRmM1239Xa9vry89NaYeZmX5XAo0xRjFUkJELEPd8MB/1YFHJGrmSUzBwvYKTzO3Bx3koxkOcjhxMcQ4I4+tlptDAzveERD+sO/fv7l+VqH6VAHLCk9PT58+vhRkiDi6NZq22prbY90E3dVRwczG9ocnI1FREiQ0NTNdIwo6Wg5zB8/fOjDtjq21oJ59LZ/va4R66NvtRqA9jFMnVMOUjWYug439VBC3QNP9oJmqPauqoDMaM7ubmMEpCfxFBJg6BwDxbnPeQ3dKAIv3RFImCSFfwIFzQtRWJgAiFVo97gEREB38Dv4+k1x6Y7dqQEYiJMAJ0BQHToiwMEBVERKLszEIiwz0qyWWnMz25FvRHfsaqbeqm3NarCZER1BR+vaiD0LL0uaMzA2BhWCUC8XLJvDRateWncDYU5FEBG8t2buhtbqygylCDiFIw266yKn+a3s931G/DYUfuWR4u7ZsbdoAZCGt5VIyikluYeRu4MD7SxDDmsxc+oD+wDh6HXQEbr61kb1tqpf29a1h6q0tg4IWVBEyjSVaUIRYAJlIkZm8D3n5Hg6PDyeHx4OpUhvdVtXeyeDkV0BA04kIiBiJGLdhg4aQygDojO6uuoAIhEWSgKkDk5sDtp62+rlckUiIHfQjIIA5B7Gbsx5YiaHtt16rSklcOt1XW/X9XarrToAETuyIyMRk1BKSRKJhL6htt76GGoUppWAveq6rccvL/pO+/66dRnsoUtp/9yTuX+9vnxZX4bZkktJKRFvrSLjVKa8ZMxkEooyTtMciJC79u1G0NG7jQ20IWgWykzkrpES5vvsjmhnFIY9cBu9aW/aIyltROqPh2VA7LWxVtD36Ipv9rKwvcV9s9f4CfhuJbzNLCLsV9VGs7H6CN+aFM76z1+/rrVebtvh+Hw6nqepJMQpp2WZIqYFDMGGj+baCWGap8NQkuwApkMIcmIhGKNt23q7Xtu2MsCU5JDzIedJRIgMXjuqMKX6pnX3e0cWVPxxz+IuOc3zVErhlGzEg41MKMzCEiKyKA7Cg4FotyoXlinnnFPrvfV2ud5ertcpJ7p/cQI21NEaALAwB52IHNziW+5r0MEMUH2YugOzWZioh4moAXZAetfbI4bZSc7Z3hnB3feCHfyDd1Ond8/mDlcAgJn1Prba1nVb123bmjscjqeSy7au1+v6fLkEn/e1sBhDt1ovlxd0FZy9RJ0EAO6mZiPm0m6mrqNXGy0xPp0OwiQkmYs2//T4If/VIuab0zRQAWYjGmPsRQwzS2JJkrObYx8OoLo70oJ5xKVp7+jea/ipRZ4VRNOGttN4NbzyAgkE2nfRd/4c0RcwI5unZJNlNweD0dv1clim8m1HaG+0/tdq7H4pd2YxilNyANLeFYAR5ZXxGUdgFBDvsCWr26ZDg9ILAGOMWmtrrQ3NJb+8XC6Xy7atp4dzTklVr9fruq61NmF+fHiYp1JyZia3uyXsvip28On1Z7293fsUL3YSAGRRZnptDeJ9CkqeMiLV1gi7AZoM2sWm6bT1w+EwTRNdqwKGkd3xeDyfz8QEAKbeppHvVczoo4+uI0LHRlfwuw0nAkYlDQCSUhDi51JOp+P1tn2evnZVuA/zvnm5jdHaqNQMwEZXxDS1yswUpO7e3AK3M/DdHW6fi9m4z1/1ldFkprG2gkuEAKM3AByuboa0k3KEMZwrxx2MEdlRNX8XWY+usUaDj/znnj2/eimgI1KKrCt0NwwcHUL7qIQ6FU5JcM+DSl259wruTMIcEn9QhWHQFeOXmjO6mva6gvY5w5xlWaiwewf3GDxKSsTMZ5TL8PIFdChEPjmi6Wh19Va79vV6JeSpTGDVUN3VHUahOfE3QMw7VkI8ie9G5ve/3yfLjoScdkSQhXexkt8njkAIO6vMnIbRMCcjAkJmZHfkYba2fuu6jlXdRJIDtNaYcZmmZZmPx+N8mIkQPDx7EYmRDBBYaJ6n0+n48HA+HGZ01/5GUgYAuf+eCYAJmZUljdHaaGCUqLAwSwYcfTTQ4NVLKRmYTKi2fqtba7dtfXEkKYxkDkyAZkgmrbKIkLmkdDydW7oxYmvb9eX5erms22auKWXJGXclSOK4Y4jDVHvbam21dlV3fCWI9N5u1/X2vL4WMffKmXz/7J2Zp1SmlImojvbTy+fnepOUlzILsquN0QkZGTGRshk57kV0YuKkME2DHs4+Vre+Xl8ul8u2VSIqJackQ7GJII9YvvdTK/KObLgN82FWx3AgdwjIEpEiIpYMDA3f59G928Vez0BzG/v2IcIoTEjkTgC7TTgRm9nY+XDraMlLRs4iAoTrVv/088//9C9/QslTWR7P5+8/PH36+EQMkhOPAWBoatrDJy64g6oa0jomZHA07dXa7dbW1XvPnA6SD3maIkstMsLU0REchlq3N8duc1eDMQxotBa/VNUAMOc0TVlEAHAM62NEkDYhJWJHJ0RT03GnExHda2OeSpmmsm5VVV8ul+VrXkoWItnNpxAxPtdQV3JKyRBMO4CPYYNViHZrREWD+znqpjrMnQxgIG1AiHS/FiKa5/l8Pk/T5O632621ds9QskgXwjt7CYOD4zsUH2dVMND03p9er7eXl+vz8/P5fP6o1nv/+ZfPLy8v4VOQUnL3Vxi8t7autyykS94lIXcxiA4F8kQEbr1WbRVcBZ2JHpYl/1jQ+OXr5fj4Ib1nkP1bszDY2zLAyEZgCYcrjBpGRHKWlEmSDlWHPjQIDKY7K1NHVx0IPnrvrfcoYjwMHRFCvttH2MxjEC8dVH10GyO4YSGRQI+ynwABhTALw5x7W26n42lZXk0Ig2kU6Caa32XLuPPF9uYSnVCST5yy2sjuTkgTgvhuI/V6m/D+mYOqhhwhpRTfKdrTMcZW6/V6ef76/Mc//ssf/vCPh+MhpYSArTZiOhwOjw8PzJgSE8I+MdExeht9jGjhLazb9ijv12vRMKtVAyQHEFEHD/ku7M0pszEgMImp9dpa7QhAyH4Pn085zfM85ZnxMhzl/jfzNGHAgo6peClZzcxN+2i9jxbhKHtzZu59jG3dem/BQljokBMLiyefUppLnqapdo0xVswH4d3aH9pbq4QOAELUU++9d2mE4NpH20BHlK0Oe58TH0uIzAlQzSKu0sNeligAWiYCMB3NzA3BTUPzXHKaSi4lZ5E+BsAeBonoYLYnRhEjkUckqhuGyH8HAl8bjpDsvB7ujgDMOM1TnoprdVPZ3SWAwdBG5E4ymrsRpVKQFba1gVtKIIkRXQ2CFq5ICqzOZoY4bLR+fcmkD+fpdKCUDFVbo2FZCBWTAUiChylVsJ+/EFyH11sHBBZPFjKRdV2vtwthmspBrRoMMy+GQ6i8Fyi8zgV3FW8QaZ3eZEsYp2hQApk555JL4ZTuBHOPxYk7a5DMyRyHoxqakQbPjIUSSHKkMdRq7210YCg8EdMY3ZSnuTyczx+eHk7Hg4BBax5pz0goQWyzVNJymB8fH8/nc0nyq2JT7rLenXEdzKxKbdQB0EoaOfBVZgcAN0ZmEmFBIRO0cOq2YWM4Qu+1dxZgBzZHc4ONVETQzC3nDLbTE6+X58vlOtxiW8xlIhFkCe9DZAYA1zFU+4jIoRiu7D3fttbr5bbdtvfzegcIw5548ph4SiVxamNcttuX68um/eN8WPJEgJGntE/f2Q3NyVNKKRVADnsskVRO59Foffnl8nz7+uVyvWyInFMiZIVdww071fGNIeH3UnU32bRXioACwlDrqmojVBuj9fW2bfWba9n7eSL81jWVmIh4nwHHHJokhuXuqr3b6DgBEY89QRLaGNfrWvsLwOfnr1+1NSSY5zLPs42Gjq5jtDranjN8/8nEiMLICKA6bPS6ee/ivuR0KmWZ8iRMMMAV3fcYrDYu63bdqr7V+WDdtKqStdZrba2NMSyMvJjZHfrQWmttzVQRUIiziLsL056Msn8k+FpMpJRKziKipuu6vrxcT8uSWZZpZqa3siLo1kQ5ZyMa3X2M3oepadLWBssQERLK7iLJHQ2U0BIiNcYVid/sXoloWZbz+bwsCyIuyxJGYWMM91dE8NuywAEAo/QXcUTHu0Ojqm7bdrlcXl4uX7++fP36zEyfP3++XK4AwCJRiEvIvlRbq8I4pmQ2VMMOTClc183A0EK00qr2Bm5EkAiLyKEcR/eff/vbdDjmd0XMv/valQsxoNmP+DAK5NfnuYNt27Y7a/XWCVuV9XYl4vW23j8cQCTY0bihYx84EiEiCxEAmdnoYw8h928oYgiBz5GLIMAyldPhcJjnX5GUwzme3Hd5KIRi4e3bIBAiCJMhIAb3icHJ3whMOx7DLETBY9vleGMMvJPTiGiotnt+NH+hL59/CReWSOBaDosw6eEQFLEoPMxNh0ZN19WiiAG4e9rS+wsxDeU5OiKOAUTNTXZ1cHAHRNwBIRIoe+9dUmLa3aGCYhForpvC7hxTpmmapsnv0kE0c1MkZMk4g6qN1uq69dHDHsbMYlWOMfbcifvpx0gl53melmVaax9hV/JnJfH+oI6uTASqqmO01kkI3cbQ7rHnuPtuv2AAu3aMiMEjCs0B1B3uZmx9W9fr5UXVkBmRHEHHqHUbvTNyznkqOee09QaAKXFK/Ep/vkvtdhrKfz0Ss1MURDiJWSeknKTklAIUMnUd6AlDQ6qdSREhyWC0kpNkQoAxwMyHQoj/3dHMEYZrZR8Tw2ni84GRx+jeGw2FqsjKM1AWOkzpCeDT44RYDRy8xzNgTWvvt8ulbpUAc8qtJWdmMjHOwlm+kY2FA/adTG/4rXLxlaIWAqaUUplKzjnCm37N37yPceNzdUADsojHJhahUrAUSr2LIjsS41RyluSup8Py6enp08enh+NxyoKuPszAwB2QAAUgLJe8zOV0OhyPS07f0q4A5G0ktttnECcmwqHDfTc3E0lCJCxmdj9JKOoFNBekTJJFwlfNdHjcHkPzrhWwI4ODa+DFrbX1dnt5flnryinnUiIZLpKt/N4Ivn6M0YULs7uPoWPU3vt6u11erlvdXvXigUiqG7qyMSMllikVIr5st8+X58t2Q+Y5lTkXMDdXwcBhApFXAIhFbua9914bsx+Op77Sl58/f/my/fynl+cvG0sWScNpjF1tGUn2ugssY7JI/JptRowUp7xv1tS09d5GNKjDzFutt+vt0/OXcHN6t+mGHCC2ZZAkzBK5SK+YORERiw7UTgSmOsycOBFLiLZzmQ6HI1LG23q5rF+fXxJTzvLx8eF8PGonUNDR23rd1mvdbjpGdIcsxACJmQBdtWvVVsVtFj7P5VzmQ0mFEHZanptqb/1yW3/+8nx8vr5dizs01a1XhK21bWutdg2/BQB3772r2XW9bdtqphFj6VndLUlkn4EZusGeMusOCCKSUso519Z6G7fL7WW+FElZUlh8jjFUBxI7ADEziRO7aevatmo6Yn0GrDgfpmVxRJZkbi6QEEQ6wwqQ3sqSyDY6n0/H4yklcfda6/V6uV6vl8ul9w47ySEqW3xl+EahDgBmTsEMYQSE1urtdr1cLp8/fyZiEb5crq11REgpIUCISBFh9EZonhms2+i9t7oRM3MqRGg2wMCVYAxt1UYDVEIIoTKyPJyPf/PbH3leSs5/fafed4O3wgVePXsDPGciJA9QQUf3AUEBuV4vt2UO9C7A3lp7rU1Ndy5M19ZqbW1EQFL4P0bOlru5t9ZaqzqK5UgH25sCRIpi1wXBIc7O6dtx0h1Ruu8frwwkA9933fhmZE7BrVaFoFSGJcprHUP7ncM4gOP7vE4Pg6EydGeejjG6e6s1zDSXZT6fHoSIEU3Her2CWciAEcHNI9Wwqw2LjWIX6L7Ov2Iq5+oRJYwYn05U/RATZYwvNAdwVetjqGpw3eHuBTt601FH33pb3UbO5wgYzylZmEGA9zrWbTWzeZ6nqczz1Htxd9tAh6I7EebEMC/hs9V7d7cxBgI4YsnpsCzLvFzXdr2tAH63HHl7kF7jV8MCAMF7r8zgzAD3cdHodsczHYIzzogMiO4aeef79q46tGvfbtf1dtuOx/PxfM4lA0Kt9eX5JeJ4Uyq5TNNc6mgAPk+55EQIjvecSFNz2weWMbXco5t3NszbOf0evNhnXQaInCSzzFOKb87EpiE6NUUYozuw9Y04ZTYUXyaUhODYKFpx7wyNAmkfCJVcjyWdFj6UNBci4cZaN6/dejcgODqjSC7pxPLjp4ect9vq3RlZhmNrrW9r3W7au3BCpkboxACOyViN93yGd+si5ukO63kCVAABAABJREFUFsA+mCM4kBm8BscTEacoCksp+a40QScC2q2QwQxA43wHcGREByd0DA9JmWc+Dx7EmJzXQUSnpSxlSUk+fXj829/95jfff3eYMrnBUAd1NIPoPxJEaYmaSpqXsiwlvV8tABBFTJTXcTPDr5qZdlx9l88QS2ISdze1iMdxNDAjg4RcOM25DLd3bPtABAFM3UHd0C1K1Frruq2tVVVN5c3uMwSuwZYIopXft7/4rhHM1nvf6lZ7s3de3a8bjZqRWRiXRcuobpf1+rJemvZZZJI8SY7HNUnilFgI2M2HmRI6IahbH7rVLSUCOpBMTrP6VIe0IUUyGNfWtq31Yea7ATHAO5onAPj+hjfYlCUImEO16ei99d77GEPV3Fptt8vtcru9N/BBxD0VEgMJhbBne79x826Qz+ARledghgCxafbRWh+OmEo5cGEpQGn0Udu4XK+3261ttySOBKPVtt2226Vtm5sGEUGYKKyZ3LRbW699XTNCKfk0T8dpmhILuA8brqiow9baLtv2srW1dX07RsC6j6oVdGt7+6pDMQiJQ2ttSBj8T3AXlqkUInRTFgoVEgDcDx4FB1UlopxyCW6Ke2vjel3nUg7LQiRjaHh/BeQGAMLsgErcDHvTbau9jqFD3acpPX14eBjDFMo0MSVCciNs5A2gv3FyiaiUsizTPE/BnJgj1SMlAAhPl+DpvW58ocWKOiaWYwpLgkQpp5wlyvrL5QpISUSHmRuiu9l+liIAQG/NlKdMvUrd1ppFiCTllDuzDVU0UycYQ3szHUAa1AciIpF5nr/79IHKlNJ/LSfmbe8288j0JQdnjJRgIsYiBAywLHPOAuCmQ3XUVlU7OPSud91KTHzCWn4QoaAQk7sj7Eiuu9fWtq22PrIZECNEjrC/Sr1EQoMgJeeUE/5lufgexI4Y5LF4GC1MREzHgDu/HIGY+Y3YFNrXuIOh92FmQNgDRN/o6hARzEwUbv9mQwcy4ZjmMVpvta7rBaHXGuFciXdZh/ouTXSwfQr27UYWp74IBi1uh/YBASzcYqP+9juuAPf4qgiu8iCZMgojkyMoQkB6MX3znau1OwRSuFa5ORND5jzNBthqQx0AyEzMFjZUvY84FMDdwPlOa5umvG4bgEUS8vtrUfUxrLOKqpqq9lo3AC9hZY3oiOYQmDTeO2kgAQg8dccqIk6pj1FrW9em6sJfD8fTp+8+HU9HEd7W7ac//Vxbm6as5innMk25be6WkkhisJ0zFYLh+HFhqv+W4UyItk+VXpfe62a7LPMyz8hsQEmyJEollZJLySkRuI/hte7QU4Lu2gGdoKMDemcHB0toIO4Cg72ioQ20wWRCkIUSU5ACUyFkT0Wxjt56HVAVhzMyT0wPjwdkzmlsg53L1q3Wrn3zsaKqMDipMGji2IBZNB7jbx6zvfOH+Cy+WfUWhRwy8R4/n1KK0L/g0BLtKHf4sYXqEowJy5TFWaYZUwGRzPLwwHmh88BLbV8vz+BwWE6H6TCV8unj0+9/+5vvPn2YSo74q8jS2jkxyLCfrcDCqcg85eNhWubynuAjd4AwXErIAVLiJIwYi0zNIjgvTODRaWfemQ0cDRQSypwnn32A4ZRYOKGQhxMEQji+mwcQM3pf13Xbqt+D6UvOsvugRLHrArhn27ip9pQS3t1pd6DVPY6TnMvrLDniOVSVTRAoSRJJjtBGe15frtsVEUvKJaXEQgHws8iceU6YSH2MUXvbRqsOZKa1bqPjOi8InOan6fx9Pv5CV1TA1tvXy7rVTSOZNGi6r/MOBwRQ07qtF+eNNnIapn1oHa2PoaOHcVN0HqP3W1233uxOUCLcya13eHtXeyIjfGtPEgVfTPrQNbxDUkqO1Gpf11rbAKTlsBxO8vDR61rXl6+qdr1eL5epiJNQr1tdb9t67TU4aEgEBBAwi+sYfVy/PrfbrRBPpZzmaZ5KZiQ3Q3dX1DHUX7b6UlsFUpJ3BzmYQu9WbWw7AN9NFRHH0Fb7etuAaLTh5sHFPixLHlltEGEpKUlCJDfXoYBgqqZKiDmluRQwVx1gsK71ettq7UxJ+zBVj6pSDdQwOcWacHTFtunz19vlenleL2WS39bveu+j+flMh+NCNIlmUlYFd3pTXSCKBMV4Hx7nnEROoWdh5q9fv44xcHe6dDOrtcXqIiKRKaWdl1DmLMKIEB59vff1tmrK4RUWITKtIUC4dFhrNQkXgZtQEhamJCn3vm2Vmc2NwMkQdmOy7qTunCk5oBFJ4dPpSLl8Cyr/W6+3uWjY66ipedjWmTsjmGameT7kXCJ5syQ5HZbjYQk1jaqaDjc33d1PAX30Edp7REg5s/C+Zofb0AHD3bfWbnWrrZehQU7x3enVA06LZ19k70/e92N0dx29g2H7/YK9iPEw0O8DarXadB9SIDFLOPXFwEhVx+jRlsfyEhEyqlqDq8F7/s7uoOjZTG3ocHMHQw9g9SLM7tp6jWI35zxNU8lZJAVNB3buS48p3Oso2d1j2hZ1idoAMEQgNiQLhnr0mwo70beU4u45l8SCQWWTfDzOh6UsU8pCHYzIzbS12lrlaGUdJcnpdOy96+ittdgSyzwjkTuMDpFxoaoqVkp2R2ZS1bpuW629d2acplxKJkIAf/XDfbsW9dYGkRMTYQOgoRFII5SJiJETsIED7kZ2CVkQ2Az2ACWHYT5GzKL7bd1eXtZ1bTpsnpfL9fLhw9PpeFy37Q9/+Kda28PjQ8lCLNM857qajX3kRIYGOx3o7lHxuk1xTCEYLWob3aOYXh+wnNL3P3z36YfvuWQFLClLZk5JcpmXQxLsbetdr5dG7ISGLK4DQsfqzuRgCQGJcGakBMq2QUetZH2eWICs1qHQh6nznAoKlrmlCtum3WDrsHVYjDjzNE/mhNSLiuGEa7tcDLwJdMeRwJy0ZARidQJw6gPorSjzV3XR3UjCHfwenxpfQNFIC09lKqW8MqPjf4ZojsE/xUAOXdEtJTrlidJSjmdFudQGTsv5yPNJ5mNV+PL1a+9aeJpSySU/PT785nfff3h6SDnFit35UYBIiTihA4ABKhCw4DznD08Pjw8nfh8AuR8zu9Oaud+TbhhcfQ8AdjAHusdFwU76ces6tjZu22g9IYsI5oRM7IB230wieIFA1YaG5WgbGm5NHBhMtEG7f4iBiUF4VNz1Aq9bKtyZ+QDIAbm/g8iir0EAJkqSmLibbr2udRs6Mqc5T0VyYsFQSjA5+qqVjackiaFt1+vli0FqfegYwFRrQyR1wFTy8VTWNuowVacw3YTdQS1qV4gxJ5h76/0Kq3UnIHRUcFUfHjS1nThJRIAE5jFNf79mdjzr3oPulhX7g/hmsAYAe6XqBm7h/I2I6r7Vtm61qwIQs6QyTZySVB+d0Htrdb31iTBR27a2rb1W1c7MEInRiGQIZr3W1rbb87P3vkyH4zJF8hqjgwIaOqK6d7O1980MSuFpeqe2QidW4N631nY/8diIdYzeR62dhMGBiTmhCORSwikC0VOi4MMHO0FdTS1qtSwy5wxqrZOBjWG1jd51JNVhphYqNR1DezcRCg+OPJXZ+6DWqFaFvqq7VvCBiIllyvmQ6EAN0cJJ5tfBSQF0xez19RU9cVBAzOwOYZjeO/g72YJjwHw8LikJgDFLSkIUMKURYWAz7lprExEWiQOvJMkCiYmFcy7zPHoZsNWQyTMBA9NrqJI7EDqJcyJODDTPhpLpL6MX31Qyu3TLTC1aSzCTxMtUPjycH58+PDw8LIfjPJWchJmy8JRTuJ8hgJu6utNuAWmoMV9LKQFhzplYEMHUBlrfU2l0XbfbTbbWljFEdmIcANz9x3EfxRGHR9y3o/nYpUKE9NpRoBqAgRqomgG2bre11aa7vwGRyT1YhzlgmPi2zKmUKf4+apf9AdjJvzCV4jGkcI+bbqZMUkoWFoSwh2/B/uk9g/u+nll2Cp1FBpbZO6NYN+99tN5lKAV/lUkEYiTtZro3MHupifD23gyMUYiQGIV5KqmUlJOEjsZ3qq7dL4KIKYmMnmrd1HbdJDOnnCczL6mk5GbruvY+HIGQRcRMCSPUAYKbX7IwhQhIUnrbx8y8tb7V7qjm3jcUGZLS3I0lEUvJQkg04qh1JCEWQHL1gLEDxUQiNWtj1Fa3bVvrernV263K5TZUa2v9Y6+1/ennz9tWu9rxOOcsvm+wvG+zAIAWDYn5jumOoa9dsQiJkNou5f8VEiMpPX38+PTxYyrZiUHEWRzZURyTI6q13tp1dETNCZFEx4haPXRY3Y0QRSgLCThZg7GN7ao6aF6Y0MDUvA3rAxyZhfMEZYZ1dTOvSluHqlAcWbhMyQG5UzPmzW007RWtkQ/2AehZAImHA4G5ckr0rQYu/vBXaED7fnV3ld7dFWhX0qLB26D5lRYjkvLxYTo+zQ8fhhN+fVGn5fQ4nx6n43kAPr88tjoEUmJOko7nw8enh+NhYcbdB9TV3GAfJwdetLvwEUFKclimZZneG0PLbvPo7hDmQOZgyC5C4AAU/vg2zO66cAB0Qtc++lq3r5fnnz/b0OW45NMsIiRso7tq7CIhvkGA4Tpa7a2pGiGmlEmYmCECukz76KqGgGNwbCUBMca2W2vtve8AjOcxRt2A6V03dp+EE5EwJ05EVHtbW1UdjDSV+TgtsekG5KFurfXLl3XB6cMPPxwPuW0vn39q3QQwlSzCPNo2+ri8XNrYpvN0sNPt5TpoLHRILamqtjHaMHVwAsdAoLra0IZjrdApvMGYiUSSlFSEiXdrcQJCHda29ng6C8vrE/HGGn8d8gf8y4y47+txNO6irChfwjHRrOtYa11bMwcDbDqsdWIHhzLNhTxoFq3eQLFttddVR0fwksUoh0M/IbiP2/Wit8t6uQhCOZ2XeSpTliTgaq6KPtyaajOt4JaknI7z6UivlTKhT8VLGhv0PvoYNiyKYo+0WlMCliSCd9u0GB3W5q5hfeQAqqM1o0GAKHG0pDRPBRwAax8KgOG7G+sgOGvgPnqvgOiQUiZJ8zGn5bQ86NPHcfnl+UH+6DaeDufH48enDz8+Pn08zOes4tvmrmQYZNx9Wd+hr3drmwL6OhwOvffr9dp7r7Wa+W6u9s5mJvblWreU2KwwZ5GUUsplSlKYEhMjwhhj29bwiYlmKLEws06ZyJlIcl6WGramscsDehYuCQFRAQagAgoJpInyjFIQgG0g/zqp5/UVPwtjjnXXKscMSFVVOyEe5vmHTx//7u//7ocff3x6fFoOx1JSig0OEdH3Y9bcdZiqDh2qbSj0oQkIuUxTcMY9IDVHZx9B+hi995oS1dr60GSKATGGoum+sUJI6JPIN0iMmXfzbi4OHEAFAJmDDlD1HlujQ239Zd1GHwTExBFy3xAwqN9mYQCAADnDPC3CDA4ENOUyTMfY5b5INJUiSXJKSBjAjqkScdlfWZIgkJm13s1cODExEYvvTgvB3CVnDoZUXEm4TG0VmIHxcJxySUxuZr2bmkaRtk9Ddm8HGjpUKxOlBCCs3aJRzDlJlmG7lniX6t5h6B25ymkqWdUilMRMieBwXHJKh/nQWtc//XHoDSDac0eElMVhIoI6+m7+zyTC81SmnN8XMdvWbrdVTeo2dGzhXH8+W8pTyqUUZmKN0CLA0Lq7e9Cfx+gAlHIRQhoVKqoNNQ2ArZtt6+Y/fx6qRDiGPV+v61rNvbZ6WCYk15gqAiLinheM4EARA2KqtW7b1lQ1/AOZmWg3IA387nWBcJLTw8P56UNCA0IjUqBuWIdvTVWxNt/Wfnt5cevznIh59DFljh4jqD8KDopqrG30dWvXl/XyRdUOBTgnt2FErenWtClkoTzledBWqVUdzrcOc1NMuJ/oQsPAxuittu3W6hVGZVQwJMYiIMhqPogS8JzffMhgb5LvpCCCt0RFhL1+e63W9w8Ng1ZCnAgZ7kU07FImdEADlDydnz6eP/64PH03nHj+ouaH81OZDyhFAXLKqi7O8a2nZZqnkoTB3YYOG+pDwdDcfbACOaAr+AhjGOLAofmb+vK1ADC3vZtxdVcSFAcgMxgGZm6g6A64C9K813r9+vLlTz///E//Cu6ffvxOpsSItJvWBnFoVxCbmfYerkruHsf6qyFm7PQRW4qA4cQQgsQgYcUr6DIA4OYKe9DgezFIYIQRWB1mjn30PjohTXl6WE7n+ZglE9Hd1UQb9Aptpnw+z4c5XV8+327jtkIqhx9/8z0T3XrrW10vl95WTpBnaZVl8IQTC406qsHoFuP2+BXKajOo2Bwt7ccQJuGcQsOQJPogIkRUtc5tmRZ6Fy+O+/P1q9fOF4pLDVY5Ido3LDQ37b2Nra6t1Rh3BirgqAgoSZIgMzmotq0P73XXPb8yb/a+18Bq2663cb2O1lNOwpJEWJiY3Dwmowo+3Lt7xLck4VTekS6JKGfM2QFV3YaDwb5q9n0caPdgzyLh+UdDtQmHWuo1MWqH4ph3VAPJcnZ3dXfoam6Gdp/P7j5qgG/OgkgpTywFiGAgTPaAx3Odzcfy3XL8+HA+f5inU4ZMA4ZhjFfx23HxPt379n6Eg9w8z9M0bdsWzK39Wd1X+hvlYs/f2QVN913iHkeow2qtt9t6u10jcCAoIKUUABOhOvcRfoLqY2jXsE4cRVimubAQcRJmIikF8wRcHBnAg2b+b1Ywf/66C5Z3In9O6bDMv/3tj7///e/+7u//9vvvvz8cjmWa7uV4bCAKFgtrx3B0aO+dagsne1PzwFzDyd7R0YPkpaOv6zpGJ6Gt1jZGHhLu5u/WduAxdB/ofKO0DJlF4DDBUYy2rnXrw7paNzez1sdaq6kmFGAHYjQiu4cqYQQH7nhYDNPjJxKJuHfs5ho2XiwSw8F9NGZmajE5yjmnnIhC2KeAQKHpjT8COu7qSAIQEqEEr/IkREQKdFPdkVkkEQGp2eihvtxvDSLhXYin1kdHQkMyMAcdwxBJUs65jLHbDQRIgwB3VSUCOAALsZpiA7XWe3eAUqbQjaqBA44drnCz/aaIkHsqOeUsOcog172kfStirLW+ri1cVdZbd8NS8jAoy5KneZomSgzISGEyhw4Yj07vtY9BLJkmoZRykd6kiQilxCxkAFsfvV/VXVJyx8stLJfcwJCg5HDkkIj2Q3e9v7NAX0bvLfY9dxaWJESdiPTP2JYAQMRlmqdlAe3g1t3JQQyqwq1bNhyKTXntMLoN79Ot3daWi0RFrXdFaiIgk9FH3dbtdt2uVzWrsyQoiIDIXaF1HIqZKBWajKdNHNXBuuLWjJNlQaBAR8xMW+vbttVtI+9O1gEFiQgFiaNGTljStwnjtHtywxu2shNFXslSFOzdXWG5+w+F/i4Govs3InIgRwZOkuf59HR8+jidPwynZqhmh9NZ0mRAGmbogInCos/zvOdf4V3M7vdphru5qqmjK8AebhrnHX/bick+D9v5GWo2xhhqnRnQEUgdh/twVwd2d9OOYEywrref//TTH//xD3/8z39g5pxTOc6zHjD7zuQeMbLFgFtrra1uY/RYqGoeTnGvNJe7NwGOoeu61krgexeokbaDiIhm1nrb6raua30nS/YQr4TGQzIRxc918zlPUy4fTo+Pp3MJVi+4u/ehliyVdDwvH55Oc+F/+cPPf/jDzz//3JfjYymZEEddW93qbdNacTSywehCBCzAZmSRPm9ufWgbo2v4ZewGiCSYc5lyOBeUlHISiTNm1y4hqhopxJ183ccoMacgUFIEFNJeHgcZzXH33cAwRnGK209uPsbWm7Z6GX0D8NiDMfiBFFovkiIi6KN2096161A1iwRERE7EBKTQq99ut75ugpKl0N2GJ55qQHIgBTRADyYPMfs3QemElHISyUDsFtNUJKCIPGAkJhbmsFqPMGQAGKMTemuoY5juQifkHWbjqHUAC2RHaGrD3Jq6oeoOxphhjAkw+E85S1nyfBCZAZAQ0tYxy6dPMxTiHxd5nKRMpDQuDTcFtZ3n9G47w7uPwitQAXd4JvRH8zyv63q73V7/dSdmvPtPVCMEcwQTwvf0Q0viCNj6WNfb9Xpb12trzR1SSmMMNSPBxSZHJGFiccRuodqtra4JwKf5dDieHh+WZfGcWBJTVmRQAHByfKsJ/vLLd0zTd2sA15R4Wc4//vj9//Q//vd/+7e///jxw+GwACCA+bCOvm+OHopMlCANgXsYzrMId8kyhqn5UIUx3HS4gVv42tdaL5dL65UE11pbH20osSZiItRx54FFFBe/cmLu3ir3SbcDuZMbqtno1tqoTVsfTa2bdQuJrxLAPuAH5CC9cmxuu/A7WqBWNwBY5jliuUIjue9XEEWI7XU/CyKaKhKFbq6ULBJ6BY9QmMDt3L33sWeguSNgSlnSG3pBRGVZpsPCwsBk+3iMkJglfGi7hcutgWHUMzrGaK0BgiENYwRvqgZEnFOeUo+cxZ1e5B5J4IhIcFdIefCvTbf12i2MfNyMWh/r1ratjtGJdhPtoB8FeiHCpaTDMhs42PtkN3Dw3kdtQ1IyxculjqFJqjvO82Ga5uMyC5HBHtsaghazsJ8YY7TwrmCWPE2Lh52VA9DWBjGpWe2q5kiCgLe1jaGIXRIvY5QipUwpS54yE6kZqQ4EQAgWVmt9tObuzJwS5eTMDbDbq/3zN6VMpCoIYgQUdXcTJDHkbuqMmEBmKgrA26gvt/H1uklJOScDrqPW2lvdMjl5sWHrul3XLdRh2y2VBPM8cRJzHkZBi0iFDXGqEMxEd6tNw7ScmSCOOO2tj7X2tXbGkcgdMKEJEZMSgoASeSJ4K2LwlcOJu5ESAYJHOxwKRGKmnYJp7qHgSwEcswR2aE6IGC615CgoE09LOZxkOoEUMOQ8gxmQGMSTqqYDCWMFI0IuiROhBAzNTJGiblFlhVQHbTjukRT7tvptvNUrO2Zny8fuOjSqFtBIp4LhYBBqZHB3BfPe63a9Xb++XL48J5G2rtq7qdHu6dS1j7BiULfWIhawjRhCqupubhvser2XMkCI7mM3dLcR2z7i64F0jw6+nx9vj1hQ9ImFhEkonMKBsgjlcijzaTksZRKW2MrjwyhZTk/H7z4+PZyOpvV6uf7yp19+/mOtZ3v55WXOZYw6Wq3rVtfaWq1r7es2tq6q1vbQhWA/606CNNvNmWDnfQdsQEz0q1iI9yfHN26XSChJUklMafcWQkR3+Nbcc0f70BGdmSSyeQF632rrY1TzESNhYSZmDfdZBk4k4ejbm48+BowIt0bC0GK4eleINmrdrI2cE2MKq/57QPid0xyHx25LQaGtf7s2c7wNug2qzkqMLCzOxoyJJbEIkZBkyVPOU5lYKGyXgifhpmAYaX4cO/ke0UCISCllgNx1aJhdBTFzJzzeZ7gkYTGbpiSTSAZEUc8AiYgOEx6SPcxwTIAEq3ltXu/27X9ufPEt3e/1r17rmPSXQxZfK5+9e3ZD8zFiHwlaHd1Xn5kF02J/ziXl0B3C3k2TOUQKem19u9223u3l4m0sh+MihecFOFkz7Q6u7kajCwCZ/9eEJ/l9bbLwsiw//PD93//d3/z93//tjz/8ME+ZmcdQC4T37blFRGQmiacdADjSfpGZRaV37arUB6C7qSshu9n+aUYL1YfWFrOEbMl8RyPhdeHHrru3id/cgeSQxkAdQ4eruhmMobVb7aP1XnXshgjuofshROf9ewZIFt8zgDYzb60BYE5JJMVNSiIOEVBqAR2Y2Rh635N8/9R2PTbEAbnDp/fy9w6l7GJGliTyprQiojKVaZ4hsvDUe1dkCBlX7CUOBndp0rjD1YhgCEOHg8UjmlKZ52VZDqP3AB7G0NG7uyHtGQYIHhMcgDcsTcfYdG1t3G61j3G9Xmprbh0RVC14zXwXHbyW77X32zXsH/c6xsxbG72bOwOAqtc6RjeR9evXy3KYj4cFERDtlYBt9+uKUgYMzTRiqwHcTBEAkLY2Sk7EddSutcHzhZBqH/A6aYfd7DV8CyMBBNzJaV+AcQvAWRiIjEC6x7OLtjOU6VcseCJgISIn1G4DoAGxASk4IhMDI03AwL1RVXq+DSnjABkAbxW21ba1CxoTmtpla7faa1dw672pJpJZspglcxlGBpiEEnKeuHU1d7PemrFYzgQoXb0rDB2qZMbdqKk3sg6YHDO5EAhh5EXZn+9k8ErqRYA3egwSshAzB1qz6wNpjzvnwOGJwCxgGEAx6EZCOcl0lOmAae5Ow8E5ITlyQmIMCJPCbIokMRJyFgpRlggyE0Z8pIJ79OX397efjwjA/wYSg8Ewu9MwzMxikF1VwcyEs1kP+Vx0LxpG9W7kLs4JJCGTBQCq2qG32tpmoTchitFjba23HszegNNBeM//AlW10Zs7iiAi9j7GGP0ukEsplVKYw9xwt34H8GmeXvcyJEokmZNERCkSI055Asgp5SmXkorgPoozMEcX5uNx+eF33/3420+HaX55bq1a34wGUNX18/VSJp54KKxbv13W2+V2u65t3VptNtTUhnnvce7oq2moq4M5OjhEIF7vJAH8Kg0RUZF9a0MCxDG01tr62+JHxJTiRE/78CjKHAtkyyDS5Hd4RRE9C0/CJSci3FpdW1O/YxEkKSeWrAEko2dxEUIYY3RtbRiZQfiVsyRDrOvat1u/XvVys62JAxruYUOhDjUDQDe4T4QCH9rjc+7+vwAA0Ab8ywv+04tsPQ3KnKC4EArjnEuRJMiCXFIuqWRJRKhmGgRl3/ny6EiIkX9yD2uLTRTFoWQzBTUkwqG9K8t9VBc6r8Qy8TRRTs4EiISMzo5kiOhGIbyx/YYQ0C66/HPgAt+NkvCtpL7ftZh0vPsnxDvhH+5FDOFea7MgveVjjb6Pe5yJSsl3qGbXrQT8QAE/IDniMCf1Ya7mNrRfb9ttta2dHz4cHjxzAZS1rb0OQHTr1m5ZSzrbv1fE7HCumjlAyvnDx4//w3/33/7DP/zd9999mKfsqn0MAIvWdK8ydsCJkHdmLwAgkjDTPLEIt07USXdEH90ZoTfsTQmg5AgBBRaJOiYqWMVIJSNGuJcN97f4jneJQETFLW9de2vb1twwWP9BjRpD2+gjvJ8dgIgZLKa3d+8GCFMLEdw9lyNaBsICMSRRIQzecTUwRBjhMx1iJ99jm3b/zrv9XWAzcn8RwVAfamZDTYlJ0htJmXYpUzEAQB9Dwaox073UEGEGNOXeu2qLrERAKyU7QjczVWYS5tPx+HCu5+Ohbav1tda2bXXbqpkxo4UXGiHinoA9dDj4VAo4rGtdt5faIi7TAmVFjHEYiOysCCIU5pLSVCam6+22XS83vZvruHtYaYkkAk5p622Yea39crl8+VKmqTjoPJccezbsUjLbddDqiqrdvEjKMcUQEmSuXZd5u95qrb03vW016rGcJE+SJwnEJGaCbmE/Fo0X7qwsBBJCEmI2AyPlZpyIBROSOyeVlN/xrgL6JiZmN2JEUO2AbCgeFHpCZppS4v+HvT9ZliXJrkSx3aiqmXlzzrlNNBkRmUAChUKhgAKq+DjknBzxNygUjinkH5DCj+CcA47e6AnliXBEllCkelRlAchMZERGd9vTuLuZqepuOFAzP35vRHZ4EApLJDUjb5y4xzsz12bvtddeK0GITnqYAR9qdUOieYJpgmn0gIYkbnI/ylRMgQlRl3CWQgxiCTGKoZh35IQYYgyBMmQ1z8WIKfdkEHK1WVw0AiaOG+BxytVMg2EK2KFHgkTA4GY+V9PLQAablbipN4M4QGzFnqVuxIENHJtadstO2j+rmy80pVdkR1ZnR4oxxX5LcWMYxmpVTB2ZwwLsn5u2iUIKHLjBqpQSpYQhNPVxMminJzk7gDE4OXrb85GJUggxvMNSPiMxvhxN1sCYVm03A2XuRKu5MQM1fwQAUwP0lOJ2u7158iQG2m43IQQR8anmea7zbE3DEVBUc5lrqbVqKWWaplKKuyMtYGiLYlSkgQEA2GSvSs4AZhYBoPUx0bnnmBDA00XWi0t1uUmpIAAwEcZIhDGmPnZNp4GAzrLYjNRx3MVuw9EqSHWm2He999whzffjIcbuyaCup9N8eDgdD6c8ZqvV6mICVNSqaq0i2gh3K93JlgTV0VVVpVYkcGh2gu4eVjkcIFJVsUVu/b3jkENo5fmlWwStzURcs/+WQ8ZA3abfJdpuNinFoi7myIGjA4UQY8PfW7mKGGKwBh3LkscFM1r6tZqqmJkUqVOWUkjdm1S/NEDvTEU/97+u93OR9HLwCztYc6zGxZNQD9EjcWLvYiAc+j51iSkEotjCdUdytFZ1cmglJ0dSUHRgJEbGRpkFAG/HG4fW6miGCN5avnERswdYKO5kSIqogOREQOZYFcQ8sTN5C1qYkJsKgn8PBPMrRgus7ezS/D5I8J2x7KJLfn6Rojf00RExpaja11rb+bwo+8VEFHCxivVmTSBm5k5IqDYdjweD4/3D7maMuxtgmOeax+LoKrnMh43I7rn9lpK97h4CX19fffThhz/87LMffPRR30dEF1e3JnZFiwk6UhPDbZZvrf0DG98nNDu0dQestb0yuikCAYJhSmm72yITpxAX4Zy6BAaEhszcqqnu7+CVlx8VRLAUyFnaUe1Ag0EIsbUitPq26uLW1GQxWjYua1z17hfXgFpX1VbUC4Gb1sPZt+Fc/NWFvCfniiERzfN8Dl/Oo7luIZGIqhm4AmJMsfV4ny9nISos2YuZWJXW5uXMRByJyNFwRcu8vU5kR7K19Y/OGm4ETAhE7jDnMo6Tu4VA7k1Q6vGq1dRtwbOxMS1rVl08U1vbkeoC8yyw/tIKGlJKgUOrap0TGDcQERVFpBhC3yWpNedaq4zjeH8fYwoGduO7oUsBF3cx09p0d8DEEERKUInYUUwJAQAVdJvrzXURNSKa57oKVkBMNPSp71OKzNyI7E30+1zbbbkNOUGAgIiBo5gXKUiZCBvjDowkUnjXsLmhEcQEYIjkqgBuRBUJgdqaIEKPDI7qdaoKEyg7MdTKWcIkCT3bSUTy3TGPWQW440WkngIRM3pQ5ype1WwFXgkR3EXURQAwROMAOcOUYS6oHmO/T0N5mMtUHQtwhZ4wESRCdlCDY4YLz8RlLJiXgwEiAC+kMwqBKbAvzYXLpRPxQu1qwnLLSiQAMiAgjP0mDTuKvVGoVQUopCHFGLpECACCxowQAqW+a0FMSB02Ba+mtNcCHWosG3JHCODqrWTdEszWUHF5FUuLNSxis2tHgYquaD5TLlI709iu0AkN1RUJh932+UewG4bI9OTDp3HTlZrzXPI8Si0gjdvXClS1KbuVUsbxVGqNMQaI1vi8i5aRATSev7dO3AY5nLmQLUchIgegSm7GF23JuHAfaMndEQhbHQVDY5ZgI6rDiukjOXnx+XY6dgc0yGqb3f7Z85pp1oLz8WToOzZFfXg4PDwcjqdJiyRuYJrXUsc5t55aJEotSJWlKQWcYsPDzFSVSbRpKa3qwAuzClbHXrpkkiyc+mbIc46GfWkkW1F2XI7AFGmzCfsuXl3tmFmyQAZKFl0WnXgiaA3ahEwQEMFERVVERQXInc5sa3JIHD0kiB1WNTGpYnMOzGpXSxEVcJXpWv4hcCYM55rZ+UoC89UmXm37e1VF6qGLAGjM2MVEzAgeAIM7qSOKt5qlAhoyMjKAg4lCI5z5YqTHuGo9OCA4I6QYsKkDLBAMObR9RiuIYFUQBgEgd4SiPlctAB17DNRFTBE5YBVkcjQ3x+8UX88z7byP+8IHXcLxhRj3iMScHZXPZ+Pji7S6cwhMuFTHzF1UwNtJiaLCgRCpGUU12e8GvjX0roEl5N6nxF3K5nWcHt7eDrubsLuh6NMpT+OkJiVP0+m+iHxwYZh6Bl3OgeiZDePmiD70/ccfffjZJ58+f/Z8s9lozWJCLR2llpwh0Rn3BMAzLHre/ZbWvOZADjO2bQY9RAIhQmQkDin1w9DPE5ATsSw1bVFCXluUkBBsCecR3yklq/k01cNxrlVqFRFH8iLmaMyAa0xg1cydAzBRY7FIlUZvAVgEe86vSYRt727RiSq3MnjLphaZybC0QLdvs5WZWkyTc15X6NLL1oKYYRhiStDoySmEEJhiuiDCm1kVUZGYEjE5kMuiDY5kbIzkRNT2Z1/FAGmhx1HE4O7kKLXk6TQej/M4mkrXxRRjKfl4PAF4Sq26ks5E9XV+uqsjQJcSE22Gfi1ELMlje0y7FW0REAEzxSbyDWe+5/JqIlpFzIwJhyGZNY1Pmaf8wAdAUBNz2W2GxBSYIiFoFSmqxaF1QReRrNa3IkToLGm/3ctzx65LN1f7cZ7nueRc5rnEwNuh2266oUsxhsBNphbXM3dZu+thwcQcYypFT1QbqswIzIiBNFJc4aH2TA4cYmQiRDdmMGtOvIpYAMWR3Km1cGMkIDD1yjYjMzokAVJGERhPx3mc7u9GqZIoDCmFGEJKzASI5uQKpWoRNHBcKLfmKlqkWhF1xUxEuUAumrOK8Wb3dDa6nWopnnNxt4EpIQYCNFTFj2eQS1eb1jzZeo8dDZp8R6P9EEciJhUDWPQ8WhUIkQFQzVCb1RwikiMbMFFIm1037DF2RsEQiHm72/ZdYkI3QyxuhoQxxX4zhBQBAYkA28JxWlDyVfHJGrDLDmay5u0Lheed5R9aI0dDYMzM5AzIiIiCa6Ul/jAAxrVH3AGI0nYIwJvdEAi7XW/speQpT7VMWoXM1axWFRFRqSJqNud5midVa3bwbouhrS/4Aqwq1booQ/uip2ZrvzUzO4Dqgmi9e7y8Q8RsSeKjlselIDZAC3ChwnybH+ikgsrKEjZxoAS5Spll9ok6VtZpmsZ5Oh6PJuZdHxyqSC7lOE2ziBHGEALHNaoA9dZCBUquZk1rslm0rCf8yoohsu+5kotrQiRs7GEEaOYGsMT8i74FRvKEGGPgEJHYwBxDiF3CyMS8PBsREMgZnVyb/K27qy8EHmJ2jsSBHCl21Ht0r8w1BJlmmWdde/HXs1vP8eVSHzRD9EVE5XynmWjfx/2mH4sDcmIZCLvm+EBurqWYuYkaiZo3vqpU0SpuTS/VBWqDLhGosRoRjVuQQ8AIzBCBkCkEZmLE5pLr3r4IdljoUpGQqRjOjhXQwZggEPBZLA0fBUcWMct3vprzVFx5Dy1MaTm9nOUKscWm3zfsMWhYUUUicnRXq1bnGRyx7wihS6EdbbGLIQaKjBRax2CtAjBzKUgUCAK0/j6RXG7fvonDNu2u4nBVpqnmudQ8jseH+zsgfM/aok29S9PxxusAMGbaboYPPnj+4YfPt9sNM0txN8OATGeRAFqQmFYYW1YfXsq2thIdIwChWWvhUwIAJiYFEMRV+CWxugKiqUkVqRKIjc2sSTEgEpopQItLLm6p2TjOx+Nk5urm7gygZuRGTRQCsclbmRnAIipOQK1zbyVhLPWgFqGc6TeXx/xZU4SZiDl6QAJea5eNHtsWRnNcamGuXgwzSyKx6xc13iW5fSezbLBBC33MrIKqiJq0Pb69SNPIK6U2H82mD35mj5tjnufb29s3b17d396WmjfdPsZgZrXWJpPbWuouu+0ao+58ExAxhrCmKeCrI9X5WhCxLowcWznv7895X0U+ONJ22wFaznWacq11HJfdjQOZSgoLQYLdXKs14rO51Fq4hFpb6Qc5UIgpdfs9pBT2O5lLmcY8TdPxOCHifjdsNl2fmsQst9zNGmXB3VcjNiKmQK2jQLVpNDsBMAIiOAFja81Z92GimLquH3hRkzc3RXcEa1ogutgotjonOQQFq0hgzMBIZMTKUFUnGY+z388K4lcDI0dOiUNC4uVsdTBDVaqC7qgKamCGpiAC6gABkCBXzBlyUXNMabPdwnY7norNdsylmvkMyAiuWCs8vBPEPJ6VDpcnZpN3bKYo/sjVv8CNYbFqg3a7lpYOjqHrh+11t9k7BndsMoCb3b5LCVxNFZndnBBDinHYhJSIsU12b2095ovKqqkDknFrXznrxOBjYf6dORbc1U1UROvS+QgKsJxxYoZMIgpN9bShOI7ozk4BB+dA7F3zbimS5zyVeaoluwo6WatInrOTUuY8lVoAsLHcffExAIAm3g9npmpr82z3dqUDLWvMm5Sv1Kp6PmPcXZcYbOUv4zoAV8oGrmIySIBM7OrTfdEC5WQYXWqFCWwCKEiCDpbvT5VbRFdPp6Nk8UEjUS1lLOVYchahGJ2oA2MARiREAzeH4r6Ye6q2FtClor8EMQ2S9qUsY3qBla8ZJxMt6moE7rSUErSJeLaJGJgJmk8CpE6AoagrNHcPCMTNRXF9ZUUXcAGs4NqOHzNovkHIHYfU8sc+Jt8MWnIZx/l4Ot3fr+9uqoYoIk31pBXam4JnAdeOqbsQvEIm2vS83XZxBsMYg24i7qIFMJEy55pdiiBUNwwhGEDzlsq1AkHCSEgMvFBZiFzNVRwVm00fQctIzYGJUkiBU9NZk2rg7oQwcNwM3fUubjZUEd9OdFK2AAG9I08I7iAGDiBmxbA6IaKBiVFbXcu3sszk83a/fltmprVWNfFVTHnVxISGvzQX0KawrmaNeQrLJMDFL3ia8/Fo6ioDxUSEKQYDp0DAS1OsIVexXEoppdUdIqMT1Hk61ay5+ptXyCF1w+5mdmQ0kTxOp8Ph/i6laProNea+yBHhckQBLsYuCu4phN128/zZ0+vrKyRUEcB2sjaWegtV2j8LmWFdddDi60c2ASIhunOKwS2CGRO6qKDYot/gMRJSEjN1Rcdaagk1htg6pR99hhGQ4MxMWua06nEcD8cjc1iUVhHW4BzO+0D7wqwdj+YYMKWITI1wdhZxaD3SrRxJxDFEJFrDkeWLruJEVCubde3Bi/vjYwtSXcRjVv2ItvOLCDGnARc+KYCqqT3uY2vLIbdVf07p3J2ICdkdROo0zSVPpVZoBteAmmd30Mb6cXq4v//mm2++/PKXL16+YsKbq00MFJlDoHaQN4RpzfWWCIyZCanJdJyDsKWtbWUti0jORVWAMOdyGudpmkQEmmL45cGP2OqISBAjbzYpRJrnXEVKkVpq5jyN8XRMCB4IU8Q+cmQM3GR+yA3MxSBjmA049R0AmSO0dl8Ouy2qLoHR8XQys75PXYpdaqAJEzI41arTPFmpokKEFBCAG0YIxI7aDrZWMmnnJZiB2nlPJuKu3w67PVNze1QzdVNwdZVFvxQWTSlshtuIgKhEy8oFMwYlKThkmGcLYDI4GTKFjkJnELHpBQMiBvdUZhamXLEqKQTD4ODu0T0BRicy9KoG7l0M27R5evVUDRVI/VRLnVXRwRRz9mPxs6QiPjZRI9G5wrYQCpDQwdzRm/sHYRNPxIZyQwNE1WlFbZ04dP1mv7162m/31dEcun7oN9ths00hiIqbhdS32cQhYBqo60IgQDBp1q/FzNhdxdwruLeGKkJCU2ubsPsyCf1CfQsgqNRapZZcSzFQM23Nyrbw3p2wVlERV/MLijMBMURDAkBwkVqkaK3NiLZWUEVkd1A1FRXRUuo0zfOcVTWESItEipr6uRDWktqLNQCAeFYV86Zo3jhoy7hY/K1uh4ur+OOLwIpBLefNEjA1KUdw1OLZxHWmiO6GSsESoBGBgls2CBY5pBSYsJqUPFeHWuWUy1iKuPdd5EAciNyh+rnoU93YLDR1WjBzc0Bfrutxx1pEUe0S51/iGH/cgZGIWneRGi7GF+hLlcxBDYrYWMTJ51yrGiJEbvW0875iYI7maNbWGDE3SVPAplgZMQRCYmQKESBZ18WUOAZzI7UWYXCeg5CqNQ/neZ5zzqoKBDEGQuzjhTKsIVfsKpPH4F6V1SJgpwEriilwVEcz5OrtgLEiC25HTWUJiTggtA6Dcz1ewA2xQSja+t6bTR1TKlVFVMXIgbsYuQup467jriM3FEIhxAAJoY8Q0aqCA4eA4qAG5ouowrlotQ7VVmxd6J/tm7ImCCl1gQ/bvLt8oj8mQevKUhWFGFpkBOou4rVaziLqrhQjxYhtk12l+2xhw+iMBdwIPRBZIApU1YS4QjlOI719G9MwzzkNG0fI81SnU57GUma/RF0Azgf9+jnBzBsxcxi6q/3++vpqux1wFTghIsKF7bgqEHl7nVUla0nu0GFddQ0GXEh54IbmjKgkAMCizMRm0EpqZiiI4I1d0aUkISCSmwOvK3eBfy4uwiznPOecOggtKFwUX5SWQEFb66CbGpKRmTsQNOW3KuJLL9gSxPR9H0JwcyIOMTRgz33pjc85t0Ks2VLvbqriALhANMxn6kyD7Vrsa2aAmFLq+5RSBCaEpbvx8ju5jM/WTQJXqfjF3aku2te1PcAdm6uROZi5mD8cDnd3dw8P9yVPQ9/FQF2KiyBHAw5lUUpcoMAVNIJVlfHcZ+BtU1V7fOuSRdQR53kex7FJ+prWFEOf0iUO1y7ETA2EQ4iJGz0dAFS9Vs1zGccZ0VIgU0KLFliI3K3komLgOSQpglsD5MBEvqh+BmKMISDSUG0YSj/0qhqaYwsv/V7oiE6qiq1lqR3PSxH8exhWjQvVNutLS6EFiRk2TEyArXIBJm5iKrDEmbDkA+60YN/YPgIgOK6RQRLsLQzmdXYmRapA1agIRiTHls8ns66UgBTFgpNi3AQjJyHuumEPFCuazmWcZ1BNFALx9bBzAwUwoFt5yEXcQMVzhbnCOw1Ki7MM+rlvpCGpzcHItEWthIG5sRYDtN4UdUQFBHQydDdHDpvNsLt6MmyvYr9VBYpp2O2GzTZ1HSM5IDAQc5tFGJhih7GjwIiAKABIam519QJw0xbOABiitxTXzrDE+0hMzqXWMk9zqdnBEZuW3mLgknMFn+tQ25GgC6PZzwCHtaKmiprUhRvspgZmTOzmolrqoi4/TlPOCwxDRO230JD+VcEMABGXOKsVk1OXYoyty6kseaSXUpvkoj9Gyti0WADRVgCq7QCPwY1Z40US0opPL5pBTeYVkQjSJiULUFWrm6JEts0OOEA5TvdmdZKc61z1VHKuFSOnLmy2aUjJqozT1GrA7qhu4iatq0dd3cSbcRoCoTmoO6uZWi2lifGfF1KLNnEFuhtozwjWBHxatIeOq3FFIFaAca7iMpdateVYjECIobVBIaijoSMTMmJARkjmujq9BYwBODQsCVvIShgjeSBz1zmr2TTPJrVRq0ou8zSVeZZaATwNXQQCxE0/nA26sFp8VfpXNeVQFfJoFd0Z1Ak5YiLax+b/Z6vRuSBa2wibHRtCIz8hESwtx0DohOI2m6OrIGBkTjF0qTNgzVJEXDRQHHi7CdsAsbVMIzg7IJJFgo5wEy2izJkyhTRgRQRHarR8dwZ/tIGCxyraygY9K6r4Km57UXNbngLngHThEzT/4yI1ehfcQNRAjdUJPCCYajmdFACIKaW4GZB5oR+ZqWjO2V0a9O1MCKESKgXoN2BYqz2cRv32m4fDYdjvU0oOoGUGrXi5jzUlo7ZKVkzZ18tAxO1md319tdlsUkptW2946ELddmuq4+eDENEXzgS4mSEAAa0bBYAbIgRm8EgdEEKFZuS5FGwanZxaUcPNzGqVWmuDNy6WOTGzvHvMOywobGwMzjZ1Eds+Ze6lllqKVlEzBjRid19LKS2xNgFR1dZH3aUuxtgeg+d8lRCRVHWe4zzPuWRRlSnPcyGiEEOKqeu7oR/6vltbkyIRLTw8bcVS5NDUE1o3Zy1qKrYyTxcAuXGKl22QiAKDLmUvRBcRA1vDlxbWWCMRIpFUPc35cDiUkglgM3T73Wa3HYY+hUCwaCov07JRmtrEXtk/et5zWmLpCKZWRZpaaa1VpLb0s6kijeNYSlEp202/328Dnxvg2okopeacY2AQEQdrFHC3FpHXPE6M1vp6GuF+yiXnMp6mPBdT71J3dV3FIHVD33VN8AzIARFDZI4UkGPHMZlbjAkR3UW0lDK7GAKpVV8wkZZ2qxmqIigzLxQrhMUnRM2quDn6BU8RsSlydiEGxnaBBqagYlqbgdaKu5k3GBaW7LMF9mjWSACJwoYScW/5yHWqYKeicRIIaUBExsDkGNW6Kh1xBAqUPHqktO3NY+i63ZUYlsNBTB+OR805GOw3233qYwiU2AkP46hjC7u1qsu7Ih7t+HbAJgfVvm8zV7WKAo7E5A2CCSHEQMwAKNWUZN12gBEcsEvdzdNnu5un3bChmPou8LDZXV/HvgcDMwNiIgopEQY1cyQIwSl647cwkUMwc0HyRQpS0VSk5lqzkEtI3ibkmYp+mcOEaZqqlHEea62IwIEwJSRqffW1Vrd5mqZNyn2UyITQlg8QkhGTmzqaW6l1nudpmvI0ScnoHtjdsQEmVaTUmnMV1RBDiBEAtLUGNPRzrTqv+CUyMxIGDkvTo+o5ZKElCV7ArTaYw9Xu6np/La7tEs/Z25q+gzmwAyAslJJWpmJGQmthOiIicmAkcCQ3bRYRFEg73e02ZZ51riJSipaqoh4jxBi6LsbA1Vrf5Eq4BHCAxrtGdDcwVySJIkjE6/mhaktp+Z34cilEt3PFzFpTWQN318++HKMI7hQMWtXP1REReFGUXhPhdpgQEkJAaJUvBCYGJGunAzI3KZpVv5EaRBMRTbQi2TTlWt2UAdytVmkezdBcFVJEZDdLF/RxMg+TdzMAxEBMqmFGPaE6UCQOFCEom7g28hA4MEYkbPKG7IRNZcLB1ZsIkbsAeRPBRABRNacF7GwS9CJqljgM3Wa/vdr2+8AJHUAVxEANwKFj67k5s7V1DK4k5LD6UTFhx5j4LKYKiwXpcsyvwUrzo7mkVsC7o8lWuwO05WMitcw1h9oxQqiiqAaObsaADChVXLS6Q8wiCoCp66wfGqpZSnHnQAiNZOhoyBg5bq85bqgqiM61lvvb43xKKcYYVTRPJynF3eBXj0aIUREi2u12V1dXwzCEEEykVTPbMm2J21IPQyeiZg9yhhAaf/5Mo1g2cWwt76uoo4OaRVFbdhRcww43a4e41lpbb7mxmdm5WEPvCia1xf4IdToQrWEPEbk3eCSGyGZNJq/x60UkhsU00d1VVERUxFSNeQmHRLDp37TNPFBKaalX11pLbfNhQehacUhE1bpOYzRmXmIl4ibo1eQ2EIBUwFqF+x3kuGHPy7ZGBMwagngT2bMmKLfskBBXvZmF1YhmpdTj8XgcRxEJgbbc77fb7dD13YLEmD3utLC2111iPHbhUofUBMishS85l1prw3Gq+pzLucM8MO732/1ut9plt20ZEUFqnedM6I0d6e64kr/dTaSWAoG8EQoAqZQyTfl4HPOYRbSLWQxD1988rUPfBSZ0qg0yUXPX1hnGMSbiGJOD1zJbbQVeQSQ3MTfA1kgBZgYiAM3HynClAQCAmouamIm7vruSF6JpU6pq89wVTd2iW2tN8bXYsVh3tDoAtLhGl+6EyM1pMNqcbHoAz7P5sTrOqmgpGQZnAahoHNgTUsAIAThEJ6Au9v1uN4vxPCvYmOcyTpuYuhC71A1dtyPf5Tl1HXEQq2ou9m5/NYABKKACqoGqKzohSJuAAIgUWiPQqg6ASO5QDQJSGvq+6/ouBUJV6zeb/ZPnm/0VpQ5DTDGloe+HnmOsWRQAmSnEkDqigGoOYMvJjW2rQI4c3ZHAKrgRErhCs9/Val7Bmn/Zotj03q4VjuNRRKZxEpEQOVIIbkAYYgjMZprrPB6OPR83/a4PDGTuhr60AQMCIWqpOZfD4Xi4u8vzCKoIGEI614DqAqCrA6auSzGZQzu2aJUYaR00jX0VFkGlxrADkRV0WYQDAzN1XYzpsV88xvTB0w8+fv7RYToUKbhsDoskqppT65v15TTHhdGPvHSwmTWLefTGjlD3BnaAAQoycOq6bujm40xEgLqSmjAGDmHFetzU3Zp6MAAAqjmhc+sEARSzokra3BtaPtw2wcs6X6uwLrqiC867EKAUESi09GyRHwTTpjZKyMQYg4NZIz04NCR8uWxaWWe8iGd5o10SBiBukVGrjy5Ua0cHRA6hH0w8z1nBycBMaylmRhziwBYFAGIKbl7mmS74PQgYQ7CYPACakVtw1xNogUCuET2RBipMCiZK6uQYDdVdwZ0qNA90BRWs1Wous0ENBObkRIhQRB0c2dWt5CIKJWdE3ux2N7snN1fPtsOeU0J3mMVnMRFE9z54h6bi4hSQFVENqpFDu0/ARClRH3ENyc5wy5ke24oMLUhZU9h2pKybhflac1l0ThAcTGqe8oQpgWtUNXQA5Fbtj8hIgcC15lzKNM1Vatd10g+eBpVawcACpMAUW9CNsSMKQ9wEpA1Hz/nh9vXxeH+4fQNufdej+zTNT54/c/2+quWal7dTQVVD5P1ud3191fSZXPWSY4boaoteUaP1EOFZAWGJhpYFBmsnEa5dIURI4Agr3Lji7YJqTa61bWIAUGtl5lDrKhnXSqC07A3n7ayxKVdIyd0BOHBIsetS14iJIcQuVTVrebGblVwI0FMXY0LGFr5M40RI5pC6rm0KzSll1eHtYojEnLoUQiy1Zs611mU+ODQqzDzPYRzDoqtBjTEz9H0/9MMwMFGrAzvhwtqgczcXtPaN1HXNXgPdATyouNEi7rxiWoHIrCkIi0gzD1c1yzmPp3EaJ3eLKSam/W4Y+sXhCIkA2ZqY2Jo32spShwUsXJSL3RelEDNvJe+lLqcuiyyhInFcdF5SCGG3354jMCJKKcTApp6n0mK7nEXVwGHpZyZshKS5FjGdcnVY/O1PYy2TlFJzVAhxO42qQgQxkiKbqKpWtSJ6PGUg3u/3myEhkJupeCmas6jUwAxuaubmTQPC1AydgQJr60hqdmwAaODqLmaioo/V4fb1wtnEsyVMCECMrX/9HNnbAmiaL+SHlUADLaxxDkiMkdEie2Koo0oZVXWSYnnwIFgq5g5jR5YQI7UeBiLHAJxSn/pBa0VmBShmc62nXPpUYq/MwZmpS/2w6YdJzQGqaov2/Hwh4lgdk6EaVTB0BDUP7k6ASBHDUpzvYoqBIziJogHG2O+efXzz9MnVbk+EZZ6IeXN1E1LnrXiSYoyRaKmyOyCHyClRTAu44EZNg8TCIpeBjLFHjiYZVACEGChG7AQEQJuib0yxCxxbfeIyvAwPh4NIHcdRVVMXOo1mSgxtypYyl1mw3jFst8M+MTEbuEor5YC5ipQynk73d3dv37y5v73N46nBwjGkEBMHNodcyzzPspgfoTWfXgAwYyIgVFJVcYBWJFpqVUt2Y20SAGCIcVlacEbHlqth4m2/ud5dE9OUT6XUVn5q4sPeYP4ARo6EgO7IyECGhASGbca5N9m2NR4xU1Uwp0BCAuC4WNNLLaWWUkUoNpFQqQRFammsIHFyNyMlcjAjDA0WQWRVV1A1CRJDZCIVm+dcS7lMyNxtdRBviBx6U2xSBUQncCVHb0upAUwAYAsLq5VmbaUxtWPElzKVqlsVLw6K7maASztPK3A6LSTNVr5Y+JoUIqcOYrImKVtKmSZ3D10PHDkmcEczr8Vytvp4LYgUKBh3hoAGwSxYA181gmlAr6gRI5OSi5JBWBDv2qJBBzf1Wt0Fqmqey6goqYvIXbTYugLBMVoKNbC6K6RKKaabuLvprnZx01EyI8yOpngSmMUargwOxVEdGcnQi1oBEAMBUEEgCAjySO5zb9c9tVbbiyDGEVv5tTSGULM9WgtRrSu4ydgQgFq1MtURKkINMQIiARWnYIBVSY3NgxmZudQi4uDHfojEZGgqqeu865AgMLepLQaBmfrUd8PNdqfTNJfJTw9znrRm0wLm4/E0Hg92IUTW0m4ivtzd5nnOeQ7Ud13q+56IvHnJNKcfJ2iymKamCkvFBk1REdvJ16pH6IvrlOE5b2jRIdDaKtnqyMxMpEToi9fmElepWTYFd16N7t3PXm/vpJXu3ip03Lgp7i2CagI1IYZA3Cdb9hHVUkqe55qLq7o5ExOzqRb30+lkZg7QixCTmZWcVZWYmxpK6rphGFJqpSJeLq7hGQCuLiKCUmpdQCnExrOptWrzwnBXrQjgrlXqnHPO+aKUvGw7rfZEa0Tc7mqD+8CdFpoFIV7kJ9AO64Wh3HddDNTHsN9u+pRCoLgoEDR/9xb5nQlzy1fZimeqbuYIuhQNlwDK3NWtNrYxgC+9RYbSQOJ3ib2teFpLzYHctFkIzVPT8KWV56PuWCry1LYpNm8LXnJWKbVUiZGR+Xgc53mqdUO8aNiM0zzNehrz3WHiEEVNxSIHM83zIrJuJrX5c9TiogRoZrWII4YYh40gsQO10KyhTaUVouacL/bkM9il1rRBm1CQe/vU+E6Fwy9w2UYycxVgdhZq8A6zMzmjB9TMWuaixVytumYVkOylg9JDSRaieAjEAJGYiA1gCbPMDIBTopQEYFIJUmJEDxz7YXd1NeYyz7Uh0/WCP2ruc5Ep10CBAAANWlqhCMBIyOqxpSGrKpWoxxiG7dXN8w+ef/qjm2dP+tS5KY8nBAjDlplFKmDjtrmrGLiZAi4qOAs/Dh0WYegW0zXsf7HndAyA1lxsED2gKam7LlsKADjoqsn0GMTcPdzVWk7Ho5mkLqUudimFiEQ6TsdpHKejz5C8pt2wj4ipAwRrMrVmUnOep9P97e2rb799++rVw/3ddDxKLQjYdX3XD13fA1GRYk3XHrGWqqoKDu7kHjlAo1qZ+ooMNkx5KbI0FI656WS0BilVKbnUkt85+A0ixZvdTRfT3cPtKZdSCjh0zaQspRqkubIBekDqY7KgUhUQxGWl1+G5kUxVq1QPHpRrKLPkUnIu0zyfxlM+WZ2h2imdTqdAnhjyXOY857mIOoEwmqGs83zptQ9EJZS5+YGGkEJ0szzN0zSfDxhwB1VXMUFAMnAiisGw5YYAujAwWdvaQfIF3gREYGRsrflg6OpNSb1tgFWgVK1TrSNDZUKjCDS0MxhQmEKLv1TFRNRUHRZRhZTC0FcppWTJWabJzFAkpCEOg4Pn07GMp5JzncbzpoyA7DFiJwjIgGSxUTtc1VWrmYAidESKbtgsoNzcTFpzm7dmpGxWFLSWqZwkKtxsu34I0HfcOTg5JeNQiRxMobfQh80Hfr3XbZyZtPFOjavBqD4aIGJ2IsdqTg6BwFyzagWsQtVhclBHBGSjumv7k6qeTqf7+/th6Fdg060JKKNXqdM4H4/T4XDIua5gaTvRPTCkGIhQ1Yo6CGihPD2EmEJMRAEEgmGHlJpLmWoACO4oUo6nO3hV5zyN0/7m5ur6GmDPTMKhskAtghQde06cUre/tpT4dsAYkAHVELVKORxuj8c7XY2ozHSe8/F4akLyLUoQ0dPpcDgcAy1KGipSShYRAkcnYCKkVgJQ0waumJmpYq1rv0MziMP1UFwgzwt1B2wobJOHWljLa+VURUWbtKKgm4pgc5OoRWpKSdo7qsqZc22m0zSeTqdGm1348rboSQfimLomwqGq8zydjlDmuZRcsotoQ81Lzu5ecpZaCQncQ2Bzz6XUWmH1D+m6brfbbbfbzWYXOKQY3b19GkBopKMlB1gVdwBBVXHFrKRUNXFVAKta53ne7HZnbM/M5mk6HQ54Lngv1uKujXzYenCpFWLUTJrEl7sDOLoFwqHvxLaR0UxSCNuha4z7poyCRi0xaWdKQ3cakoDg0FwZqPkG+9JVtxJhHQSguFUE7FJHzBmqZJmmSWqNgU7jUdd9TFWPh9P9/b3YtksR0UWtlvZ7QiSvALMDtAbVJTld8qxVwM7AYwmIcDgcHx4O2yFZYqt5PB7f3t6/evNw9zAeTjn2fan1sN9yE8wTcTAmbK5DpZRastXqCrXoPBczCCntr7ZmHmOc8zznaZqm6TSJ2TRPJ5PpNKo+BpdVpdTaatbUzNvQCLwZlLR2cYKmtoWtnxQA0AzdW5sZ12pqJuaqsnTLOSIhJ5diVqqbVSteo4VocyccszBzZEqB+pg2qVfACnAq5ZQnRdhcX1EIoD65e5m7gHHYdry7Fp2q3N09NGeSWuoZvlC10zgeDsA+YReNGg/Z0QgxMrtUkqAe1EVrEYfKHvur/qPPfvSDH/3h888+TUM3n055HJ0DIgATEHrzq3ExKXUGo2BOLUxqqX8DoV3FpLoDsmBIiNGBXJeqSRMLJ6kuGTSjF3d1Na2itdSc6zxLzpdl8fDRB38gUqf95K4xhhBDDCEEpAAlz9vuYR4dbbffPHv67NnV1VWMiAhNJE1NaindtA1x4NBt9zfj6TRPk9SKACmm2PVd3yGxqjhiCIEDMwdAaBowCx8zpaX4etaDQXSzUmsL2htPJaWu77oUIzGpaMn5g08+jalvV9IN3af/5A8IiQNmyfcPd8fTKZcMBstlxRgorPuoM1IXYuDIgR1A/bGvpCXp1k5yFSfnnjVI9mkcjw9v7w8P4/FUpqJVJfbxydPdZtNFwlrr4TjOY7UK5JQ48ioW7kuLcGua5oUeQCHF0HTz/uQv/2zYbdu1pNR9+PEniMgcGnW3FY+oGe4u9i6Pf8CZgLY4xTEsWuLmag3la3ueS4Y6QR1BJnJhIghReVDqxANwijEhErit6LS6AxGFGN2sTFOdjnI8yjTaPJsZcAipT5uNu5XjfZ1GEfnBj/8k9cMSxCQOH+0RmdC9KYkYuDWkVW0lGRosogAACw/Sdd2b3cWlWO5tGuS6rzcabbjabLe7Xb9LoUNHcgqCJE7tTETquv56f9N3GwzkTAyADiQGSS1FJ6SrBIkcDMm99dn2AOKogarjbChO4LgLwEuOtdls/vRP/5mqdV1qX81yYoEDuKjkuczTPE1zrXoRxACCBcbWHQKmhNYqCkRAIcTUIQWvzo4dcwJkAHSvAFlkzFkBKMbU98N+v93v99dX2822G4aUuhgjx4AcQ+r6YbvfXT27fmJSXOfNkE4f3FjNKcaa880Htx9/9gepX9bLdrv9Z//sz9wtpe4MdYjIPE/Hw8PVbvvP/vRPPvv0kyc3N11KZkbg3BwfCFuyob6ogxAuS6bxYlocsxBkvOnyvhPEIGDjgVWRUlvHsjQR29oUYnShSLkZM/d9n1IrzcdFbgD86fOPwtoBtNvt/sVf/RUADJvNqqsbNsNmGIa+SymlLqUmfq1qc57H03g8HlraEGLc7/bMoUpt7JAudbvdru/7tjO0T3fel2IMm81ms9luNtsYgqlXkVyyisDK4Hb31s9kCxTqRNysyIfNholEipk1xclSy7MPPogptWuhwJvdXqost3W9lW5WazGRpplNrRleZGVgmLYYR7VUeV4kl1pLdrfA1MXQd13qUko9ccMCViiy1bNbtADgZ4UuXZ1sG4C7JHVSZVbJIgJAxL0a5DJO43h4GKUWJvyDH/9xPwzn7+Uv/uqv3H3YDDGGtieJLBhTg+S9aTE9BknQJIjNl7wMAELgvu+ePr3+4Y9+8PTJ9ZDYpI6Hh7uHw9O394fDNBWNKT199mSz6ZeKrTsyxsAOjdZdRarVagq1ap6rGYQYtrvh6dPrFNNUyvFwev32fhpnA28luX/5P/tXu91+OV9S+vTjjxAxhhgDNzXE5pi4MDMfE9Wlkrr0LsJSamq0ARNzNWtEIm3NTeomLgpWUJXckDiExCHF2HFgRAqEKVAX45D6GBNyjFJ96LdPnnw8f1ZztiIAwIFT3/X7nSGO0/TZp7effvzp2zdvj8fjX/zFX+x2u2Uf2+3/7C//OwTcb4YhhUAQEQJ6ZGrlk9jFlNLQpRRbOaXD0F8/++DTP/yjDz757Or5UyDycA/hxH1hhK5LBEhSESGmSCEAMTUvbU4cOwxhUaWhiKgIFRyQGDkiRUByN0BDiGiCmIAScIfUc9igGSDFqBuYn3+Gf/znZf/so37YnoMYfPnqy4bcgnuTdsKGjyK4tVTcAZgpptSF0JrkV1LYqhPVGi60PVrNvJlhnvvQV/O2VWyvIaNwDhnaWy6tU43/uNQXfUVa1vmx6Jo3tkvquv31TYgJAGqphzd3ecqt6Nl0y5f04mI8XvmCACIuHcjf227na4EJfVXmExGV5RA2ACIMiwE9tH6rJuMM0Ei077SD4RliPv/nsuJ8s9t+8IOPuqEHAJF6OjyUkvFdWmTjVC4r+/GF1j7Wd99kSQjBYcGiG+mhCdYoLPhBQ3HIgXwhCj3yPy5R+zNm7qqu4rZIPK00B4YlMlEwT8Pm6tkHsesAAMT8WL3YBW4Nj/X384/vXMX6u/UDLMwhMHWppk7OSx2CcW2GxoXUucwXImpNcMv3u7RzwSIpAwBNX3T5JgBWnvXSVb1+XoyEmwgBAWCe5xcvXozjCS+FE/HxfrXWvHP3x+WvsZGql5nvK7h65rui28KCosdPtEhqrqxYWnmhjaLPj1za9VsIzCFEcCslSymm1d0aEV6qpL6/efo8dd3FtYy0KiEuLGUzVQnMm82m61IIYensB1hqQnjmnTzOxXMLB6xE8ovp+M7sPI9FhGQ5QZcD7PHHtnTP7UFrpWNdERBj2mx3HAIATNP0zddfn45HWvhtC2Pv3I19fqY3k+QlPtd2Y0ML+n1Bdlo/wdKHdfmRlstpDgqLUq+vhYbHb/xiOp8n+bLbNEX/s2j48uKWUre/vm4FSlXN09RUeZaVixf7IazwE57X93kNrXvy0iG81qHOs2ORBHmvQRXWl/T1Z39ceksC9ngt5nqmUSGSL3GP6lqt2Gy3z5590ObYNE1ff/XV6XBsosbrhV+ui/MW9fhOj2/5OIWQCGMMXdfFyLSU5KTJAYuYuSNSjM3cfnndxtdcmyPOZJXl7IBlklCIgZDMTNSkNjzeWw1ov99/+tmnw2YDAFXk/uGQS7mchEuN4915/t7OcFGqXSe1rx8Klu7m9QcDcDxv9utp/Mgnw2b+h7B8YD3Tic8nXSNLNhJCrbXk0nha2+32Bz/4wTAMADBP47dffzkeD82Q/JFku9BTl/y4KVEv/SFIIaau71PXcwwAqNrCL2s723J1TSMKH/O3VeHk8iRsjZFLGQ4ec8G2Tzs0PGbVgm/PM3cRy7mcTiOH9OSDD1O35GPvONj9fvx+/H78fvx+/H78fvx+/Lcyfq1T3e/H78fvx+/H78fvx+/H78f/v44wz9MZ3gJYKcOADahfSwoNL7UzzrjUes0dHM8eASuAdAZXFxkWPnchPLp4A6xg2qJ4/AhS22IcqWs5edVcclfTtYfV3TzGtNvuQ4gAYFLyeKs1n21c1p6c5fOcIcTHupK/8z8AgDN0u6KCK5R6vkWXyPhScAAkDjF1A4e06Ks2h/OFZPNIWGnPtVVdZO1DUFWJIe53+2ZoUkp5+/btOE3gZ7UGdHdCapSlwAGaPoouAmuPFafl6s7Aqa1o99KXRrjqd51BejqzFS7gS7i4UbDcrcey1Lslg3cKQw4hhqHv+VHz6tcMb3Bg0+bQM0NmZUE+du2ur494lm3F9Y3f/0jvfrZ/tHE6jV988fnpdGrv8f1X8974jpvMGYi++PlXjO889RG7fvd3vxFNbQ/Ybbc/+tEfbLab3/Tw/8aG5nx686pOY5vP79dXL0pel4v3/TLX5YLHX//FAPwW9/xXP/OdLkR3C/3QPX3Oqfs1D/ytxztbzW/xWS73vvUv33+5X/dW75d+Lt5bzadcRe1cX7nYXC6ryucPs+wAC+tgLfeWPB/ub02lGzapG2LXEaKoqFRTEana+EwLhYHW4qD7epgANLkrbDRbRMQQsOuA0EqFkmGaEQxjMMLqVkWlyna7/eyTT5cSzJy/efn6ME4Lm2cpDRECETapIUdQ9AouzY7gXBK9pCr4ejCdyTQhhL7ruq6PXRdCBOKV4XDxdS4VJ/Vlt1/O4osCFTzS89Y3OvPLTTV0/e76WaNe/KOOd6p/7dO2kxrgLJx7Xlh+8fDlcFkL0+8szfdm3Xu1+fdGePntV77os1UFUa+qNVAc+n3ATqVVPKVKmedRtAKaiJVZ6yySxd1DYCEbNQs6R1a3RYQeoEtxsx22m81ms90O26vNrksdOoGZuriLmhBxSj0TN0NNM8s5P4z343gqJTtA6lKXUtclUXk4HaecRUWL6FQ/ePqDf/Hn/93NzVMAyOPtVz/5H09vfhlSzxyx+bwQmVspNZdaazWDhVvMrR0DWljk1pQSFnqBW6MgcNOeUG8ulSIqABYAcIm6SB1b6/72+vkHn/7x7uZDSttqPp2OdZ5MsoND6IC4iZcGQncoaiJSRMo8z+NpPB2Pp+Pzp8/+1V/+qyc3TwDgzdu3/8P/8P/4u5/+DMy7vru5vkldJypd1z25ubm6vr6+ugKAh8PhdDyO49S6tJrYeYsZG12pSm3tvk2iqtYCgCnFlFJTFI0xtT9C8yNegs1F9SOEyMwIuLDvwBv3pX25a0vLObRtJl7m4Dc3N3/0Rz/ebbffN+UuZqYvJtEi+nCYj8d8mnKpBaEQWiDuUuz7IcYA2BR4FNCJqeviZhhCjI1h0vhJsJb/EXGxVv7HjmS++OLz/9P/+f/yk//y1wAAqwfCusQefTzfOUF+bRDzfSbZj49o/czv/r0DOILTWoz37/z5vaPZpvyLf/7n//v/4//hz/7sz371A/+bHKc3L3/y3//f3/7sbzmldlathXkAB4J21Cz26svxiAs1ExYqwGqS0PSZFo2n9sfjfV223fbCC+PhuwH94/B3Yvv2N9Basaz9vaqWcv2Hf/yj/+X/evPxp5dPvCTifM8rX/x7mW/LKYGIjZjQWmZ+3X1bNLTW91lOG1o8TQCAls7tx/mFK23FAYEeL4xWcsiZywEAU64/+/ru7jAyLNZcq3VmUxDzRYKtWRW515JP00lUA0ciBggA7MTf/vLv/+3/+38cjw+f/viffvzZjz/4wY9iiseHN8fD7el4d7x/c//2TckzU4gpdv1AzCKtC3DO83R6OBnE/uZ55MTjKagGDuHJNf/wM4ihvHrjX39Nv/icNfNH11MXXud8+3B8++b2L//5n//v/jf/2z/5438CAN+8fP1//b/99//+Jz/l2BtRVhVD88Q4JL5KHFKQiMeob0HuJB9MZpcKqtZEOtwBQM8BFENiiAwR4MnN1Y8++9Gnn/7wo08+2988o26DHNeD3Rs7BdxdVctUx5OW0SWb1lWnWszUwNAM1MG8cYoNQM1MtZScx/HpD/7gL/8X/6vr5z+4nJm//fBfMZEuY9E1kQaxOp6O7jZ0XUoJKbSJ46u5JTQRhBAAwGzx8FpmD5y1MC/e/XE5wsKXvrC2CNN8NDMVVa8KolakFsZg4pF6V1RTKWWaT8fjfakzsqtBzS6zySRuxoGFbXbRALGLSxBjSkTqiRMEoVA5Ra4aSByBwUytqFXVikhmzVHPmgzBnOfT6XA6HXKZDSyV2PedWl+qvL17exxHbc+edIibc8uo1vnw6hf33/5N6rYhJiRkDoHJzOZcci6lFjOIgQPHEGLr31kouosdX/NuJbWllwiwTQUQczERFXALK3fJgaqhU8QwnMajAHcPk4VNFjkdHup0sjoBAqQNcnACYgyEAGhA6iBmZZ6n4+H48HB/f1dK/vN/9s/btUzT9Hc//dm/+3f/gRCvbq4/+/TTq6trQOi6zhyyyDjNAHA6Ho+Hw8PhkOe5BTEpRg5MtFhflVzmPJ/FwkvJ0Frfu9QaW1KKMabUxcZkXLiyTZWSKMZIFJjIHZpAVfNyW3cjPgcxbdqpPoJn3+OW/CtWBwCYW8714Tjd3h9LKX1nXcI+phj4TIBsV2RuDZrpUhcYgMDMZRFaVUTgpvEckJl+wxb+u4/7+4d//a//9d/97X/9x33Z/5+N48Ph4f7hV/32fYIqAOKv5Mz9Npvg/5Qo8jc+t0GI7WeZxjc/+9tv/8O/Cd3AKV0QeaFpTvLSZYD0GMQgEreMmBBhDWJwCXrojNP8qiDG/kFBjDUce+3FcRHNE4DLPP3u9wgWk8GLdB2aUZGYmUAj4OOC+y5ZO5x5qQhEzAGZl1txbqKAC01QB7wk2D/mzGoqDo7AgOz+PTdB1G+P+dXdyABmWkp1V0KPkbsuhhCJAyE30MDcc57uHu5FpIs9hwQY3YMif/71m//wH//Lw/2b+8JHTSfrOPDb118/3L06Hm/v37568+3X83gKIaauG4YtMpaSRUbDUx6PD29PhpvNhz9MoeeH+yiSuq776MOgajGMn38Jf//z/m9+GnWiHz0/DOGr4/HF27tXL1656vF4bNdyOOV/+5Mv/p//n/8a+50zZ9GqYBoQ+kT7xNRx7vDQ2WvStzY/mEyg4qKtIYFMzbSCuANTSAGHhEPEjvGj589tOpCVwKhmYbjCkFoHAoItKdkyVcY6Pug8aplMi1s1q6rFtDbxMRBrVpQKoAC6uJTM0/HgDjXPv/Mc+01jhVYu1ghAzvPLV9/O45hC3O92T59+0A8bxwV1b49RkXmeADCkFrCeJ5k17GBNBR/hUfhVSAx3xECsDBAdzUylCigQBkCMXYruGbwUbPZfSBRi7LqE22DZrWnlR+Quch9iDEBo6sgYutj1sR9SSjHyosHbvDLB3IxVokh1s4b/IRIzYoiIURUS9+rFXBA9Rh6GTal1GgsoERANGPbhg6cfpBUfc0CAABiBAlJbmWRAjs6MqSMOsfUqMzJzcEdFRTd0JSBAY8Rm0iRL4aXJPAESEFOA1MySeUne3ADBwClS6IrIL7/8crZXxwrzXPJ8kjyBFSKkbosxABqSE3mIqet3MfUcopuVec51VhWzC5FrBzUDhNSlm5vrT3/42Q8+/rjfbAIHc5Nacylu1nVd22xzSm1zCou1jVWRlpQ14ePtduvuIoKIKSVmRgBcizKmZmjY1DikLjx/opxnpBCY3aGUam6L9ugwEFFr/nJ3Igyhaau3fnKMIdJ3drTvHHutcNkCoMVIb5pmN93cbG+uN9th6PsuxMi8KAzOc2PaGziDszuhI7iqaJ7LNI0A3g9D33fnYtP/xPHevkxEff8+5v/f0Ihdj/Q9NLj1PJUzhgrvXnv77s769PB9Qcx70Q/8pkDkV/32u8+9/Jtz0BBC6LquxTGIFFIX+yF0A8eIa0NTezq3ii+e69ZNIGWNdBpkR7A2seE5iFlf4P3ruixXXRYFv1sgXMOXtpEAwBnVdGz1ZGZ3w5jeuxvtU717k89NSg6/7u6aiszjKU+neRql5ga0N7XcFtMwB+RAHPp+s7++3uz2KUYisobA0LtYIrZbCNAgJGiILJhWrycDZx6YCJHXwO/iya0pixMCSD3d3b2ZxnuzuevD9fXT/f5mv7/pUgAAbOEVSJ+CMg59SmkAiupcBPuhv7ragefddkgBJT+USY73L0/HO7NKvLjTLN33LYqrJ9ED80OAqSfx0PeRYmAMFDDEvotDF1MUBDAlpO1+10Pvu71GGHLddP3Nfn+12z36QFGA9ASHH/BuHzgmMa1WazVzNAWfRSaGOREz70MfyBWXPjUDF5IsdZ7L6CYBqY9ht+FNx0MKN0+e7q6u+80GQwRcZGYW8R4TBIgtlTNBsBSDQi9MKp1bNSuk2bSSiqs4qauD2bKmCYgckN2xG3ZI4XtnzK8f/h1c5Pyb859nWLg1KB2Px89/8YsXX30lpX74/MO/+pf/6qMffNIclJraLTjk8fjy5beA+PEnn6ZND60QAxf9d3T5Rouo6/eOEFPCJTwCQDd3i+rqrsDEKfVNL4WI3UDFgADZEzNxciALDu6cqNt0sYtMgQMHDrELadPHxBSJmULTxV38JHyNGRzAVcTMEI0xNP4MJNoapNhBQ4YsE3oI7O596tEpUAjEAZvAwyPxwpEM0AzEoJmhNC9JdWgoC0OT2GdEhuafiNoULnBpACdECswGTTW55TAtGCZCcHQ6Z2DuRIvkQa357ancjf4w6zRnrbNJBs1ExP0GIwM6kDpYl/rt7mbY7vt+ACQppRlGXk6TEMKzZ88++fTTzTB89PFHH3744fMPPhi2G1O7f3io81ykMmDoew4spa6Tvmkamp35O4BtZZ+7NwGA1iZPwMV776wYAeCqiyQELDsphxAAsCErtEYq7w6gtTO7qaK+p6j4m4dD0/JXqcy02Qz7q92QUkqR287kYIuLMpa5ohMYgjUNkqaaYypVVdo239prfztSzu8wAvN+u1kl7wnezT/gXZAfFkD43DG9rHBca06Pj/yeW/XdA2pdokAA7I9vaJdHxpKev/867l7VbL/bhO+7J6o6jmPzPhMRugh0LgXpFzraPyiI+e5vv3e4+/eGLGd6li9FfttsNh999FHfZG8WM4JIIVLzQrpohKUlNGmdoMsLvR/E4GMQgwD4Wwcxl3++/2iAFdvwx8e4Azm446LFBxgCMn8vxep7uAEXZTJXV60qUrSaCria1JLn8XS6v709He+m8VjzLFWq1OYZ0KYgEVOIMfX7/c2zjz548uTZbn+VugGRzMHUDLSZGzXPqdD+oIgUkNEdap2n49uHt18Dws2TT4btk3bbv3sJ5qAGSDjN08uXv3zz+pc533Z9/PDDTz/66Iepi13XISI6Ejg3FWJ3QiAEIALg4IvAoNSRyaWM4wGrlOPxdp5OxNTQJQcQFQ4UIxMHxOSFk5uCRrQKzuBNqgoYiYmJEjiYg4mDc0qJAw69JNjXUmphlf124Msgpr/G3Yd8dRNCwmpeLYm4GbqZHC1Xr03m3dnRgbztlwGZLUAfpccctGZSSwE3fdwNadvF3WboU2BysKJ14poIlrKLawUAa6qSZtgYqExokYDcEBybpTewuIiDGJqbIhh603iHABjNQ+r+8evra2gODdlDFJXTON/d3R0eHl6+fPnqm29fP3n19MmzvttcPXnCMYGbVSlzfvPq1d//7Occ4tXVdd8PzWQG6bwf+sXa+g0fO3RxA+taW4zYooEDGDBxjL2KiQpxdEARV1D1ilaYUEtyp0CO5lLFDcwlxrjdIHUxAINxmZTIqUcybAYfzrWFME3pSH2xBEsByAmiBaah7zpP7qqas6BaVitq2qUYKTFFRkJwjgEvEm5zV7VchQ2a8wi06NAMfTVlXeu8ra/ekbzNbAR116pIEFNCJHPRFmI1Bhf5YngN3h7c0qlGp6mllkyloKgbABIyNx9Tc5Mm598EqRAo1RqrSDBEkKomCquGThubzfZP//RPr65v+r7bbXfb3Y6IpMrpdHr18uU8z13fd8PAMbpZSLHpqopKE+lpFipqBojEMRBxiA2wVZFSBcCZmENTgyYOgZeMFohZVXPObh5iIHIz48YjYm4uepc5YgOqF0sVs1rrPM99n0Te8+j5/qOrHSjmUKtqrYjQdXG73Q7D0IQg1sgSiTiliO5aDRdiQXNggS4F8FAyTZNN86hmzfXwV73pP3jEyE+vrq76fjP0IQRwAzgHKQTQFEeXbBWWvJTBFwayuQE685l9AYuUR4tqluAYsGFoLVRoM3WF+t3REQ1D0wYkN3QjcGiW0d5kfi4Iquu/RMqUxw9v9jF+TxAzz/PLly8fHh5yzu6+5rOravoauJx/voxI3glmvw9BaeMyGFrhE3zv73018XnvWXgxAEBE5nn+4IMPrq6uliAG1joR4lldAy8Ud9qZuKpg4BrJLDfoIgqEtb6EZ0LJd8O1hUlzyUfB8wsA+iM7yi/S1cv7cr788+f97k79Pcil+WrqCmBQ5/l4f/twuH14eDvPo9ZS5mk6Ptzf3b559eJ4uKtlLmXOc5FaofmKt9KwOIc0bHbXT599+PFHz59/+OTZs/3VTT/sHeh0Os15ynVGgqEfNpvtbrMftvvN7qbf7pkjuB3ub19+9fMv/u4/cAj/9M85xQ2H1CxvYQ1D2yUU0VyrEj4c73755d98/vl/fHj4OnXhs0/+aJr/xWa3T90mcGQkcBORWkSKMBZTxNBgtC6GNGy345jyfHq4ezWVo6iexmMtNYSoYmrNQ2NG9s2m3203Kh2NoTtWGe3WpgNY0SoUGiMD1KhKrNXcQEVd254YU9hu+RlsiDS57IfEq9AlMvOwp6vndPOcQsIspNRhYoqJg+Q3p9ufyGGU6dbzPeoEqmqIFOOQhj51Q0pD5IHqNNbDiKCJcYg0RBrYg81QjjbdSkAC47TBEAgcpDYf44WwDNZEeZpd7TLxmRGRLTiKWQEXbXUCaKpggM6kkfj7o8zfcnw3z/LlG15WLhJF5Crzty+/ffXmFYcQOLx8+er2zd3z5x8Ow/bPrq/b08bT6fXLl7/4+d//l7/+62Gz+fSTT/a7nYMBUTvYHS42gWV5+Tmn8MsFBwAAIcXhvGCs0QYRABy98R4ikcau49ibpVJDFWNBNEfQecoAYRhYAOcq4FLVUoqqIEZzRmASkxgZrGfiklVdMFViUXJz02pSVUTRsYWO7kaEIRAgmhKiiKO6icxmnlLilIgSOqpWfLdkgETNPMwcAyyexksQ01JUB1EFcXdQW7uEzMGBnJruFjm4IhKKYK5eqpk5kjBT440QAa4GIc0g0V3cyDSokdmZrroWo626GgK6m4oiiDQJUDFEkCJSF+X6x28lhidPnqo7M6WYYgwAC9p/Op3yPCNA4Mb5oWY02lj9BuiGuDhUgzs0sSJf41Rv8QciMhOFlnk2JkwLR5oIWSM7oxqiueP5fCIiQJAqsuqTNl7guXZTaz2dTtM0vXca/dqBDYkB9y6lzWYYhi7GUFRUzUNYElkCBqwAWqsbSAyNIoiMzJQ89EMqUo/3pyq+3e663v8h4OmvHYHo6dA/6+PVto8pkdv58FmkkxmbG6ejE1PjDiHQopoG3ggBiGiu2rLeC0YktEQd2z8Ifm74AzurvwMYNF1zZDR2JTByICc0QuDVsqixFxbUp9ZwGu3HT4YhvGv+7K2KN93d3R2Px5RS44bDo5/lo3bfe/HK+RW+G8S803PyztV5C5LeO6HP+Mp7cdL5P89HPhGVUk6n03a71XfNLGn9By/+hHf/5h9xtBd/H/kG+O3nPSwB1m8+W6wl3UvEpDKX49397evXr1589fbtt4f719N4KHku0zidDoeHu7evX83TsbljzXO2KgRAiEwMjqIeQrfZ7ca76/Ht13dPnj599uzmyfPrpx8C8Jvb24eHu+PpHty3u912d73bXu+vn908//jJhx8///hjtXr/5uXXX/zsb//Lv+9S/8FHf/jk6Q9Cv+W18fR8Od40oPPoUm5vX7x+89Wrl1+8vf0qhoBGu+2zzz57uLoqqQuMbjUfDoe3b14fDwdwQGJOfYhDiNu7199KmU3K4e71PJ/SdksxolufYoyJ3Pq+M+kiw367ffr0yfXV3mTWO7Lj/SRHVmZsVUIAQlJHMy+i4+SmMM9Yq7s5EQXqunhNA7FHt+vdEFbzVyCkLvF2S9c3FDqcJhZMPHRxN6Sd1iuiaaJqXkxmkAwmbu4uJQMRdl0MHXdpw0xQjKwiBgQEU7PqWr1mLyfPUYnAlNKATOjWjuUlK0J0YmBHdDQDYARDj9BonSQAAVjcBd2psTbMTNQ5xX743lLy/7SxWMA2CUUxOY3HV69f3d/fxRiH7RYJj8fjF198sb+63uz3+5ubu9u3r1+8/Pqrr375+Rc/++lPd7v9s2fPxtNxs9uEFJ2g6/urm3Ny0iYU4/fXs5YRUuzhnai/des6rSx3cOj6PqaN2DDloqDECBZN4P5+BAxPnuxDpDznWkrRHFM4zdZ1pWWnxL7ddiLXKaY8CaD2V5VSMZ1MVAqoEBoFjsu7u4A7YQAHb4Rar6q5lAmAhq7vYkfYmfg86SVMS4iBAxNXcUMjQuawRIvoiE5Ebj7PpVYVbZ4YvFDbkAmwORiiY63goLVaqVaqmyqhEcFC/GBu0HVDZZBcDLnZIiG2MhQYgDuBg6tqBTBophBiBGDirQsP3aWolNXz4PFLWI6WWmuK4er6quv61i/Y1FdPp1OtVWrt+j4QdX0PiCGEpZV6kVFe0BFTLbUuZwxx7JiZU0xIpLUCABOHwGZkplKsLWdY2q6x1U5UFBwIyd2lNlsjW6Rt3c0aXSY0FKfWeult8T3j/SPMwJwC73bb/X6TukAMALYQ2ZvlMYCDi8rpdFKxpl8b4kJ9CClsdptcNb9+8JznK9toS2F+i5X4W48AcBPw446vutB1zLDKGyNiI0NHNrKqFdBjH3k1Ym8nEDEEpq5nBy+l1Ooi2DpDEc7YADJRZAwE1HoTkFqo0ZTK3RVNI8PQUcccABmMFMgIFLGFNIuG5/nsB5Uw5+6Pnnbb+H4QM8/z8XhsBMaPPvro5uamBTHnwOK9EtJlEPNeNHP+efH3gRVNajAg+BlruQyMzqFSU/d/1APwZRqfH9Bus4gMw7DfL3oEsELZC8Zy/mdFYs+BQivlOSzVdXqUXfb1W4RVABq+E4a9P97ljVx0M12miuf79PjI9QdCsEeU6de8UbtrgECttKH5+Pblz/7mb7742d99+cXP7t6+KPmh5lOep1qy1VLzPI4nMwmRHcxKdjVEJkR3JKCOYyLogGGU2/Ht8c2Xd9/unzz94MMf/AgwfPvixevXL2/fvlaVzWbX9fuQtrvr588/+dEf/Mmf/gVhiHj74stvPv/ZL/7uJ8Ow/5N//vIHPxz7/ROGMwy53lZ3kzKe7k/3L9+8/Hw63WtVF1al8VAP9/PxmMdZgBHd5tP46tWLzz//6TdfffnmzZtapOuHrt/03c7qfLr9ajrdlSKUut2Tp9dPnjy5udlsNhzTPM0gedNRInr69Mlnn36y3W3LPD+IvKjh7QhH5RoDhdDFgBzYJQBYKdP9Q1Wh44nnwqYIjAQcedv1IVFiuLnZxsDnL46DhR5511HqHWesEjh3aRj6AW0IjF3anigV7IC+DeUUwU3tWMvohQmR+/1VFznCjFizAsxVTXM3calipmACmq2cAJqjUg/EiMHBHai1QyIyubMZLwCtNX+xWourUjA0RS1IEGIwgCpVxIJY2l1T+AeldUty8n3zEhzcECliFJfDeLx9+/btmzfTeHr65MnNsycff/LJ6xevXr9589f/+T8fppFC+PLLL198/fXLFy8f7u7mcR42w/F4+NGPfvijP/rDzXZzmo5Pnz37q3/1Lzcff3yxexjir2M4BuYA7wYxDcdeEW9vuHKpcjzmu7tJHIi561iK3t6dHFg8heDTmOd5KjKGgMeDhxBFBMhSgt1+czppiqnMlYPuZkhboTgDiMwAFgN2DImAEdiaVwA1HN2WTmEtVTNTILYQgQmVkIWR6XJXCCGEGNWtHXBNknwR4nYz91zqw/E4TaWKqiEgAYUQIocUQo8UmooKOLpDFazCom6KjE4ObEAEpB4iDiFGJnBBaCgLmaEYiJiIoRm6oZm7qVVXdSQAUHUC1ao1V6bi6vM41VwQXGt1e2fXNDOR6qY55xUVa3rndNaXcTNDdABamokImyGfWV18z+r5MGv9REgUQ0ipQ8SKCAvThYlQFREFEReLZnNEDCEgkGo9i/QUKSoqoogQIztAKeVcY8LFJOp3Cx8MHBBj4BhDIy+1K4xR3GnlYrpqnaeHmksXrI/i3QCI6tld2SFRjlTE0XRUSWAEC5ftHyeWIfdByl5kJ7lni7T2QFPLfEKg4GQFCjJ2HYUIRNpyAWKIkVLiYSADnyYrxVQIgNvNR1qMxgkxMQZ0BqAFO3N3UEVVdHMQ7YLvhzAkiKDsxgKkBspWXUVVllCglaEQQUUq1I9iTfh+/NgQvnEcAVfq7plbt1Zo3gtivvfmXP52aaBdSkUAAE4ODubW5uevqDrh+nZ2yWo9Bzrtt++1UP2q8Ztr6d/3+N/lKQsN7Hd5E7ioPCGA/arO1fefgwDuKiVPp4fX3379i7//2//0n774+d+++OaL8eEteFaZc55FBExNtEoFcjUGBFNZkFhCdGQKKXhkJZvrNM2lIlE53buULiUOaXx4Mz68Od2+LKXKMIVuwrA5HvOUq4Pvh7jZxPuXX57evrx787ps63g81Jy/F3k1M6llPN69fvHFm5dfLEGMsgHPox4fpvvbh4cnD+4Rwafj4fBwd3f3+sW3v/z8F58fHo5dSin1Xey7LmyiMymYMYQu4KaL17thv9txSmPkPPYd2bbrr/dXkdlES86nMd+dyv0kswIAxgbUIQbAYIal2OGItQ6nORSJ7i2kJUQOARFMuq5LZ34YEwwdbgbCFDFF7Bghgx/dwLVnHFI36PZZmT72OjNYSg8DWi35eH/IpYassehWKTBzd+URpOsyZrO3xcncwRRMXKuVyYCQElHEGJBCW5HVAAGYAiKCGSAhBwQgN6il+GRozASuUCdHhxQJAatgcFSAbgv0D+cIrq1rl+vOW9AP4GYyjseXL7/96ssvX3z77TSNc57G06loLSLH09vDaXwYj6L6yy+/evPq5cP9g5QaiNIpjafT7e3b43i6ut7nMo+n049++Nn1ft8PQ7P8+43pRFhgWz+7vZ9rvMvyUtHxNN7dvn3x4utvvr6rnpCGrlcRe3g4OuA4pxC6knPJU6lHRHjbOyLVMgNq6mG7HV5cTSkFqVNMfv0k7Z/y9TOPySUbuXYUE1AYUsBQRM2rkRARMLuDiIhUM0F09ayeiJgix45DiucdEBFDSKkbgBSIQuyYF0MHR6tac6mH4/j67d3xNKmCOqqDIyNy7DbD9knsthwYkYHI3cVIjdUDoIcG07uDOTp2IUbeciDUAqbqqgq52pxtylJqDSgMitbMmE0dvDVJOBJIzZVpNnGtOp4OWgsT5nk6oxeNUhNjAEjgS4iw28YUY0oppeTuzUwuhNA6Stw9hND3fQjBFzJRxlYUIGrsOV55Qu2RiJhiMDN0QMIYohpVKUghxujupRRCDCEAEqmam7i0RNPdVYUDd32PiM1myx2YaRiGYTPQr4cu10xtWQ2Nr+MGpu6GTqBQa1WrKaEbAvUAjGjgtcz3ZTzUNEt39G5jovN4byYhxqD+ZFtEOfitFveBAQaApWcKHg8Q/85H+e7P3zdM4HjAuxPqTH0kxjbFGsnYYrA+ODu4YETiHo2sObYgYSToIkOKoTOwkk+hSoTAobmshRY9SFU3Y1M2j2C4dLCv3x2DQlUsCf2KwzZAwBpUyYycAKIoTKZFdHXTWyJ4rAVHg+ktWn33khwA1PQ0jjnnL7/+6uHw4Jdwi7/zTcG7Qcw7rJfFQfAxlLk82Nx9Vd56B+BZ8R49eyW2NoUzPca0qVhZ+29mVtM85xjj+238ZwWYxsr1y0Bp+biwCo21PxfWC/rSr9SW6PvzYEGzztd7+edaon98Rpv557uEAI1h+R5LCQCAEI1w0a+B7x2+GOIEq+V0PHz75ed/+5//7ed/+5NvPv/5/etvy3x0zUwGYDEQAps6IARCAStmjs4UCVszBQbiGEOXIiIUmaWamBMEc2mCFyHF/b53vQKZatZhuAndNfVbDF0p88svf/7X893VLsVQsU5sQECuqrWCn0t7ePHhvZb59HD75sUv3nz7xXx8cFGC4MZ11tPD6fbNm93+hYoG5jzd5fnkKip1Oj7cv3mz7iC4328//fj5s6c3m5vN1c2Tpx9+eH39ZLvd9H0fU0du95Gw8tAldL998ybncnf79v7Ft3e3b8c5G9Oip8kS3YJ7cA212NGxSpgrV2NfNebOpLazpBwAAATGJ1t6suETojpiCFi1Tregb2G8j7gBTgBz6DraP+tYBh22rNPp+Ho0m6eqmDNMo6YuWNphvJHdJwgVpp9rfEBGhuLQzLsLQDAqQB1RR0SMaG5V1B1SDMSMQEiR04aJ0NUxS3EFo74D12pgVsUYCRWjMgiAUGe/o0D/cvGN8O6LeN55Dq8AJ6vXcTq9evni5z/92eeff/71N1/fPdzlWqZxPN7f11M2A6bTYTpVkbe3t9M0E1PsumbzPU7T6zdvQgxPn1wPm37eX73+5tur7fajH3xCQ0QwMHC0BVFdGXuX4zfhSwamrmq15pyPp9P9cQqOtR/IgKbRHNG9hoAiJtVFGcCluruWou6Voz88+P0dhUBgU0j29jZe38XnI19dhy5RnwAXtngkYDQ0Azc1MnRXrVKlVimlEkngiTAQE/MQU0jpMVJuHQohRqAASMyN+wutrqPFT+N0dzjePRyOp1mdmliLOZlD7HyL+4FoiEOIXdsEF65MC95bq74ZgBMBpxS2Gw5o80nyXDXnKqVYKVqKijqHllFiMxJTdYfG9yUTlpJnB6KsovN41FoQfJ5PrSeoDQ4UQnBf4XRd1lNzAGwHWxMGHMexdZQ0QkPLmy/pBS1/bbTc9nMIYaE+pGjNFh4xhsjGIp25xZgaQwgdENlxAeia1zwzq6qBkRM1bWcks5pzbvTb3z0NBgETsFY6QQetcnh4KOUktZPtdrN5EkIyr2Yz2Ih6sFzkdCzcIZbp4Y1p7breiZMCecQyeQX0PUAPy3mz4vvvB/VL6+tv84HRPWiNVUKVwB4cmQAJFNzAAJkkEGIgI+YOKjk2WjgBBWNSICHSGMmR0ZY2EQyRODIzmnnJquKozmgdOpipFARIFAMFcFA0cY3Bd5H6COxKrkRGAMQugGreepi89TeIajWACq7BM34Hw2jA3mk8HQ6H1KVa6zAMHELjHLfbddkw760J7vEgX/a6yyBmubPtFfD77/s7N9wbm5kADKDhNEgI70EurUTayOONg/z+S/66XRrbA/CSh4KPv6NfMQlwbV+8eMLlo36rg+ERa4KGcDfW5W8W1kVEMBUph7u333z1xd//3U/+63/6d1//4u9Oty/rdGRURsfWxsCBmKWSqyqoqrTkprkTU4gpptSlFENkMBVXsQYFESh4ETmOJwMigr5LQ98lht3V093Nx9cffgIxPRxu83j/+uu/P0V/9mxneQQDRMZHnvp3hpnWaT7d39++ur97M0+Tu3exNyOpcnh4ePni69T1pnUz9CajlNm1mtQ8zfM0MmJgjikOfXry/NlHH3+43+32V1e766uYkmmteUqRI0Mf2QOBW56n6aE8HA5v3746vH6d7+9lmiBG7jowJQAnJIC+1L6oVSGRMBeoquzrRzavUFWbyM55MMI+6k1UZK0ElBgEfBqhHqW+dug4btwRZWaUGGPf7a4GCil2D9NYVcxzoXF0cw8dhNTD8NwBTI4eKLDFgJGIsTUAKLguNr2+SPL6cjow+IJKEhggN04/ECEAMQMChugKjthE9c1gTWn+QWMJwNtsXVZOa/dX01rmh+PDm9evv/zyq7//+5/98otfvnjx4vXtm7vjfcmFHYIzIRPyOE+1yvF0VNXY1PvdwMDNypzv39ySKt7cHDd3v/z7z9HBxJ5/+GG/2VJMYEtd6Xv5Y+ei8rub1NrIZ2YOGDn1Xep7RK7jOBUxsYHjxmGDyOasiu7MPHDokDBQNAegTqSoyCheZkVURCDyt2/GN2/w4T58/En/wx9d9df9JqYuBWJCpIARwNWrqoDmInPNtcx1GoujqaAZIPPQpxSHPnZ0QUsgphCYmJputS8CgWAOpcr9w/Ht3cNxnKdctQk0IZtbESsglg22se+v43bfOpuaBiyFEEKIISC4SlOq5S7GXZ/Iyoy3KneT2Fg0VxBxbfAORSYnKwYAze/ZFBGBHKzWeZJaEdFMtWapWUqZx4NdSHQwcwhUiyNAYG7dSWqtR5pUtZRSSjlre7QgRkSY+Zy2tqrTOS0+q/i3slRKsRFyVdXBm5XrBjfmTkiqrraYNaiamiNCjJGJRURdYXY1FRFUVLVS6jzPADAMwzzPv57Yu8bzSxrrbuKqbrEtRvMyT29efPNweL27jk9urp89L/2wU1eRU2JFrpBzgftTdfA5H29Ni3cJiIuZ04DdE+8C4kcA8O4xsx4iZ/b14+b7m4MYQuwiDhGGSH2gGImIkEHA3QUCpohdH7gnToSBz0y6pamECIHIuWe+2lNkB2bg4CEicwhoZnlmKWICwX1D6Fby7AA6RA5IbmgEihai912M0VVUEYyYMRD3FDkmITdO5O4iKlnKKEYQ7dR14X0uPBIRmdtpPN3d3/XDsNvvb26eXF3t26zAxen88WkLYnFxcD3iLnjGbFYyzhq+XHpW+KI3gm2LXtkwy4uYi5mAu1rzKFYVbf+vpcw5T+NYcnmnXrl6/OL5X9/5tAvr6BKJecRXCM+PvJih7Vy4RGLWny9RvYtZhBd/cbmjLtnsBYOvvYKf63Xf3Zu94XcIYFof3r784hc/+8//4d/84qf/9dVXn48Pr1CnFA0BmAiIkDgSB/dMqqVInbP4rCCioLUPKey2nPbD/ioF9jq5z8TODaRkNqKp1Jdv7raT7Hddo6KFmLZX15/84R//07/8nw/7/euXv/z687/54if/brx/O8SaxyOYMcfUdbHrEL+nVOGuqmMpD+PpcDqNpVRE3u8GVbh/ONwfX3/z7efAgKhPbq5TQLdqoiomVdFxGDbXNzcffPT8hz/87J/80z95/vxZZEQEA5vm+f7ujil8DB8yUYqhBJrymOcyz/n+8PDm9u3p7g6OB59nYIoxMjgmJuOY63Yue3HoEqjqPNdaFXlBXsSqalGpVc0eIUgCG7zsYQ5cJPXk5E5STMqhljsvatSZsRUHcQfnTdpe7UIXtm/vTnmWYnNujRG25SP5Q4LMMCBsiUqKue9S38UuBlN3dmIHdgd1FzdHdyJDxMQGoEUmUAYyCJ0CoNZE1dECAgB0EZiYAFW1asEqaAZ1Rn+/Y/RXjYvd25uXAYItNNnHaN9Knr799uuvvvrqy1/+8utvvnnx4sWLFy++/vrr17dvD9PJ3Xf9sIl9wIDk4GyiruqqC0fdnJFSSl2IUKWOc4nT3cs3+Tjdvbq9e3P343/yT/74T/90SBtEQ1sassz9PRGyX4fEmFotVYoiUEr9dn81bB7cDyIGwDH2qdvEwF2XGEnPMrfMIUQHWCgZpYqIi5vZAkxIGUd988pi0CdP/foK44ZTHyggAFEI1EJNqbXOuYySRWbLo6qL6YTEse9T2nBA5vc11RCRid3RW0Rgbk0nrdTDaTqO01xqVl27oBkczFRrhVxiNaUe+6sQIyKZGyKFGFPqumFAgFKKu6cQupSGLqIWgFCEbFQBMyjMtN32CB5ZUE0L6vKtmbeKPpCKq3k71sANTETynE85j5dIDKyhTANOiKj5JDVvAUSUxvO86EE1s+a6vurBPDay4sVW2YpNLUlrxgJqi2wdmDPzysBwRNT2+irRItHySRwgaAgxurmIAOB50ptZy5J/l+4k8CYuaQ6I7irlOJ3e3N+9fHv78jRynh7M537YiuA8TuOYbXZFmecaRwGbZSpuEqMDYQXF4FE2kOZ0nNOQAQ2R1+MNCAHeURf9HRIUBCBGjhAihcjE0FS+IgEiUYAYMQWMkTgyBgZiQiKEpgPijoGxdx8ANoFTdGcywsoADDEgOLO5IBkjOwwB0SmwuEsXAiO7oRu7YmCLkQhNgdcbzQQEjIRETHEIiKBiMmsgUTCQsY/8HhbbTLTcoZRSclFRJt7vd0+ePFnYaUuJhi7AZFhCwGactpQ8LspM5zL2gtMsbJhW3WrtfA2ub+SWBjW2UqqailbVaqYitX0/uMijoJtRrWu88TsOonYZuMYOcBGO/JrepXcLScvV/xZvB9BilwUHf7+atJKOfs37YpNtO9y++uXP/+anP/lPP/vJv//ml38/He5cxsSG1FKkxVMhhBAAEcyBiyJbA7IU1UKKV1fPnj/7+Obp04Aw3b+ZxwfgSDK7FEcA5FLtND3MY0HbBdBS5tiFOPTXz5598oc/vnr2bH+9AR1ff/5f7w9S5qPWqevjdrfrNruQBvp+voWbVZUqorVqzoWJt90GCAA85+n27YsYw9V26CLgZjB3JOpSf3193af47PnzDz949tFHH/zg4w9/8PzZZrcVFVFpPNY8z8whlznFjjmEEEupBgboREjMSCTuaBYJ2c2kaqlWBUSjWidGVL2WPI8i1SBK4ca9LKKllpLLNJfzPuZa7XjHxzf77YfIfSBn7jje1G4+0MN0f5in+5zNC7hyAS68E+0cLXaU+ggOZlCrF65DV7E+wPhSfdDpcPL57lTfHEzg1EV1RAoWLLISiXOqITIhiAihM0Y3xTy6g1vVkMDRVUEygrmSOYA5qgMgiGCeoBTQCnkH9tsGMe/MQwc4i0G6qWtTApOaX7169dOf/u0vfvGLr7788tXr1w8PD/d3d/e3t9PpVEtGRMUgRgiGZMhszRdA1R0dDcwd2QFNvGAdq3G1+eEIhK9fvH7z6vXh7sgcP/vxj4ftNnB0sO9deUsQ806duyVMZlJkPs25VESPcXvz5JMnT/2bb75Vp+1uv7++Til2XdpuEiKUUquAA3GI/WYgJpFaaxXRWkTmKlWbCi2Tu89S7h8O8vpl3m3mpzfXqV8wSaIQGK245zKecp4nUdEZdPJiapY5hGHI0hULAvjY0tMo/L7KQTVisJubail1znnMZa4iDgaoZgzG0YEwqlereTrN01QVHDuIAyKiVgBwTBiHtL0mYj2NZkqBQ9+lTR/AQogK4TALzk6ZN5E2V3sCL+PddCzFvFYVUTFzVwSE5nCGitjInACualJqLlLOnJgWHKhaWJW7WuSLCCG2wym0nupzC9Jy3uDShnqZ3V3WoQBg7R5yROi6jjm0Io67AQGDr8QEiCm2FiRTTSk6eDsDiTyEuNlsmwyoLT5ZXUqp1jqN428dxLRPSA6obUojm8lpfHk8vJim29Px/nTC8XQ/5RcphXkKNUfNHVogokAcqJKzS9caeRxVqVLset1MyDOOabhD7jjGECjGEANHbpKKi0/g0q3y2xUFHEEQNADGgJGbxCQDx8BDF0IAR0V0MiPH0IQfIQSGPlhgZwcGI6hRNJKjG1DrqI7AAWIk5A4xcDAAAkgRmGLsNu6KhK1KhADRIpsgNGY3uiMYqjmYALmBNbJOiJFT0mAFWFUsh46/h3BNSOAICuTcx27TD33XD33fqpGwUlzwbLVjZia27BhNBLIJxwECLmmDSVO2BIdW6G+8FjVxcEB0czlPXQOixaJLVYvkUnKRudYipTZDCW1JsmrOuUg18Pcu5KIlCVdM5YzBLDjNIyDznSDmHExcvuxlIenyz3XutplzUUrBS6zl8pGXRaj22KU5H8+g9/sDEUFVTvdvv/r8p3/9b/5fP/ub//jqmy/nh7dScqvYK7q7N8iWyME9EIcUA/fEA/FoCOweAJ7ur3702Y8/+eyPbp5+YFVef/WLA70s9TjlB5kexA2Rpdrxfp5pjDYntvF0PyA7OXWBuy5uh5sPPn52++n1s+f5+EptBJCbJ7vnHz3b7m9Ct2lITNN/vMwP3LHp7CNwnjKYJYzMgZDNfDw8HLr+dPxg2u9jSk4h9cOTp09//OM/TIE++4PPPnj6ZD/Ejgnn4/H0MIoWBCRU067rQwyGKA5OgWPf9UAhbnaWtgOmGAHvpwkA9ylSStOUsz5QURHXEJycXE1mKEcr2SzlEetxU4HUYS4yPhzvH05nvSst+fjqq/nliyf7D7fbTeKw226f7f8YytPX36Rvv/r8619+ncuBxGr1U3WQQ88ZmNxr10fmIMWsnkyKi9n4UMafitKUD9LVBHR7iLvhOHR96GPqt2lT+z4Pw2a/G55c7yKDTKOYIBOCQZnB0cqoGN3dVM2aUAes/siIgG7mkqFkKBmudq6/2QrmO1UnJGz/RzCVUqdxOhwOD4fD4eHuyy+//MlP/vMvf/nL29u7w/EwTWPJBdSG1PUxujkZWC5qCijIbG5QFExNDKBR9EEBimMiLkTT7T0zATOF8MXPf/HtVy/yXP7Fcfqnf/HnV8+eoAI0ub93y7Dfg8Q0w2RVzbmcTmOtkrqIGLp006UxhPsYfXc1PH++226H7abfDokISpU565RdnENKQBQsRlURMxUrIkWLGABwILBa/7+c/VeTHVmSJggqOcTMLnEGFixJdVVX91DZp50V2T+/Izuy0tNSPVVdmZWZURkUHM4uMXaIqu6DXXcgIqOIjAECARzu5tf8HqLn04+k6P08jPn2rpyfHZ2jLkJwDSEbnlKyVRcej/M+xBitmKqkqUz9FHlseSWx/KzXfppCZmCLG6NqlZzzPOeUS65S9UTYQhBEIwRHqlJLGtM4ppTnohjJMZlVMNOqTsHQAzujLGrVUAGBHTkOCG3Vbpy7CjMMAHx5daG13JRjX+uc8pRKPUWsKYDpiU6IiMQnHElLqbnWIvJpnbnwH90Di+VxbDnmRUxupnaCQOyRJflYOiwr9bLG2UkwQsysKrXyUi09NK3ciT2j+sjashMXER6kruK8tyUnc8nuInLO1VJzLksRs5j/ppxqFf6lzfJfnTmwKJ8ISbX0w8397u39/e39/dHMx1Grzc7Z3IeaN6JPgFfkTpHoBEwWl4OCgSolYOdHjH1qD/exLT42voneUwwuBt8EH2OIMcTo+GRO/+99qQagBLaE8TAt3g3E6Jk657wDRRCqoEqCzswZOEQHyKoM6szIFho1IRiZAis+GPB684goiiamRU7SFQYHZIiyEGPJEAGJyQANzHgBGxcGsJogG3vzjj2jd+gARQkdVgcW0LuPQIyqznNKae6H4X53P07DlIbjsL+5u27exMOwt4d7ngATWywf0S1JFt7hg8mAiFSppZRSqhRR1aLVThUXgNhpMgM+eI6jLedFwMUJKuWcUymlitSqkktOZUppLinVUqTKKYCdOfgAZimllPJPj16n3w+uMCf9ND5UN491ysPnIDzwafFfOOF9vO+n/3wsZX4i1Pjk7/T4x0OZowD4EJ34cY6dXokB0i+NQFUZjvvXP3z7zZ9+9+3Xv3vzw5/n8VjzJFqX2htPzopAtGjRNbBr281ZXD/13X4c6P3Lfn8fSr66ePLVr//qV//hfzi/eFrm7JDapsvlOM77ZtilOqPxNJWpL1ZrzRMH9N51m25zcb4623LwyNysN2dXT59/8ZXMd8PujWhZrdr1euVCBHK/WInVWve7+8N+pwKOI5NXKVIUEUJonVmtolWkFJUKYD741XrFennWujbwZ589O1t1AdRynuYppTJXK0y+iYjk2BFgSlkcADE7b3NSNaRFUgsKqOw4hNg0wH6YU83K1Sq66ptKAtMs4zDPw1zybDKOvj8M1ZBjzFkPYxrGJB+RGKnDoew+1LvXlRC8l83aNReOQhe7VdM1MQbXa01Fcs1l0vG4KxwbIt5sV95tpNTxUKlmT4YyaU21ailpX+S7Ajc7t2mbrmti1/g4OH+IoV018dnVuefPz9btyaG1ZFMBqWhgqobZ1EANUXFZnrUuG+fiWk9SQZKUEeoE/+52EnzSMgYVKbXmPE1jfzwe9ofdbrfb7Q6H/fv3767fv7+7vb29vTseDvM8mZn3wQcfXGQgqEpqTknF0pxNKqsyAhM65CXjzhN5co0L/DiEHFfVaZyv37z7/d//Izl3dnUVu85594tWN84+zqrTi0dCFchZpnHu+0FF2S09CCfi1dB52G798+fd1cV2u+7a6JgRwfqpvLtJu2MeU64VgNAAVJQQY+tjZM5VFYg4hHbVPkGY+/799c0B4O3Q7z97/sX59txzVNGcUpXim8gRDTRICCsep7Efe8m1vxuouJa7NiZTW5I9PjqHq5qCLYc3ABEtueRUSjnZ7y4mGosIlBA9gYpansvUD33vV4NyiMGTVFCpVow4rrJTyyXXnCsagbTROwrsfLNenT99qs5DPKLh5cV2Gg4f3pRh7Pt+mNO0pGGrqZmWx0MZIrNDQjDNueZiVT6pxxAAgB6PjQBLHbM06WnR4j4UbCdE3xB+Vs89lDX2CatrYc+YWQhhkTItsQNmpngShyxnbl5WSiZTI2PnHm6nKqpucQaO2lRZQJ2FfeaSU4NuvfrXLf8fXugDViQmIqaKBGql7+9ub9+/f3d7czuHcNFVpJCCq7W3lOMoVZy4yMi4GACBAgETsoGJkqiKJKRd8HOzWq03q6YNzkHwvMRfbzfd2XZ9cbZum8CMCI/i8H9zbv9kD2RCxxgdNYzR1AMAYSUqVqAaViZgRNaKk4pZQRUG9GSNZyZ0pEwCiAGVHHFo1HyfS57mMg2kGT2qYyYHiAVMQQAVHUBgcxTZkSGiLqcnVVARh9QE1zQ+es/Ii8Ei1IxQ2IELH/G5eZ5/fPny9vZmGPuXr19e37/fHXYZpuvDh69//DqEBpRVVCWrVtAKBgy4btunT64uLy7OtmexaYwp1Xzs+/v9/ubudnfo05hKrYIGHl3n0SGoooFnXuynHXOtioYtN13oVs16nOYfXr++vrkdjn0tlQKLWarzPE3zMJSUtMjCF95uzr784otV16Vpvjg7/xjzBLAECywMHnpw5sWfNlLxIw3lYa174Pf+C0jMI5HxAXf5yRD5i1rm08bTY7/ITvX1T4f8I8fm8aX+ZPCZap7H63dvfv8Pf/f17/7bhzevpuGokk1FpZrpYuIKCGpsYKpai2L027NnT55/dfnZr/dDD7/7u/f8o8/zk2eff/Wbv/nt3/yn9fmTkpL3bv/kKqchp36a9tN0nMbhuN85w5KGxltsfdNdPP3y11/85rdXLz6jJZ+O3fr8/Ff/4a89TK+/TaJSixGxLt6YvzRd0jy9+v67N99/V1KKodusL6RkQiaOsVshcy2569beOccYHYfoA26sY07RSYmS6DBrEVN1iG5hPXoXmwbA5lJzyjnXEJv1ZmvE8zQf+2NV6Yf++vr6uNvVUlt25gMQl5SrVQeUGbO3uQr2x3Lcj9M0qk5Ax2m+3/UKuGEnCFPRqSx2Yqf3jUFtOu7efHu4vzOi1Wo7fHjaNC6X/VwwrrarUqSmDIVwBklprMG2YXXZXZw/e/o5ar19W+ZDcTqoSOGCpAySpvRmPziCi4v1et3EPgBxLYqA0bmvvvi8XbWxa1fdmkXyNIoWZAZQsFM8JzAhL6LUKpKlzostEwKQYzEn/Atj7C8vgwUO/yRbFUBMh2N/f3N9e3Pz4frD/f398XAcp7HkMqepbdrten394UPf9+MwAMBqvfLs2iZ0oWnIB3QBeBjGd8P7PKXgnfM++ND6uGqaddtt1+tNu1o1K88siz0fYZI6zONxGl5++70CPP3q87hZPX36tO3aRW7wKeD3S0jMqckhC+sCzBDRAHPSpUuAJ84iAoBUSWAx8mYVgDAcMpjmOadizjEQmSowkkNijmgiJgrs3GqzRmumNKUy390cnMmm6RuO6g3MRCoSNbEDbBYve4PqEoFYnauqsbih69erSbc/b1vokgQgS71ioppLfQBhHiebLbJeAGRiQrBacpqmaQjTQLFFMGcVVIqCTXMYBvYuzZOUQqYEOjXRMUbPHMJqu1Vy4JqSMqDN83g4Hg6HwzCOpZYQIvFJXy26ZKTD8iNFRVHJueYiS17Bv3mdSpqTKTrCA/fll+CEx+rlhLOYGTMtynPvg/d+YbOfCiIi/Cg1UXtcsvE0YJb2Ferif2Nm5szML7SvkzQWEMRgSYj8dzzN4ytdOJ4GoFLzMPS7/fF+Px572GybqJzzCApQyZREoRohew5uUeWAIhgbkBma1prLlHOdFaGENM0Fm644jzFwjH467YlLaCU20bl/d1QkLrkrAGRGZozgCT2iMyMpxMSB2BEpKxhWNS2CIOiScQEWIAJsjDdgDqpHcExEKGZSNIFOovte0lhsTmQpeXS0JGtwBUOoDoprgDeNMivyAtcv5kZIYIboKLZN03rvGFTrnGsppgVRXCAX+IEHD9M0ffvnb3989UPVfLu7uTte74bdsRzc7t0C/lslrSqSVApq9URdCC+urhouZ63DVePAiYGUcex37z+8+edvv3319sN+d0ylgHe88uEsuMiI5pm7JmxWq/PteRsbMlqF1WobQ+Mo0HCYvnv36tsfvj/c71Wk267IuSR56ofj7V0eJxBj5ODD86fl6ZMnXdPoaXJ/3DoJkIAIloBZeMxO+jhtfoqTP8RZLyN8mU4fy5SPDBgC+ou65SE97dMBfPom8NPS5tTW0o+kmGUumQHJRziG/qKbWUq6u3n/6vtvvv36n159/+dpfy81m9UHocmiWTgxHFVVEZjZhdXZxfPPv/qbL//mf+zn+ZhLjJ0c909efPXsi18//fxX7WZbS0HC7dMryanmueZhONzeX7+68c5y6o9ZawKkpttuL58//eyL8yfPOPilv+lCWJ9fbi+f3r0/D8exoniORA7xlye71Nofj0M/eMc+NDHWSh4MfGy69dY3EVS6pg0hoCloQQVGJdKA1clMc0URqShAlbmQU2RyHNvGETnDPI45JW+ybuOEME/j7vYu59z3x939XZpGMqyOZwCrdcq1KgK50cHATHmWoa/jOJcyAySpUynTPNvkY9eevDQ+pogCEQbHZGXcfci7XTU4NKu0u++6hnwVKKUKIjnngmMsSqrOiDWblcbj5XnXBlzhxTGO036a54pkLtIaVmlujp4RbXW2DoFSTjmXmquKIoD3+ObD8+120z19HttWK4DNYNVATpo+NURm8oaASxy0LNwgQABCBmYi9+9ZkB/arAZmolpKSfPcH/a7m5vrd2+vP3y4vr7e7/bDOOSURFRNDKzMWXOVXErKYKYhYrQA3JGL6DofN+16GzvKOqfJt02MTQyxi3HVtOu2265WbWgY2dTmVIqIEnDNUuVQjrubO2X6+o9/2lxerFertmv/8mW7xxPI4zM8yAcUCXxwCBhiwDGP0zQOo6gSUprr/d20v6+EBGTbTfzVF+dEOE1pntM05lINoseFJaooiOjQMQFIKTXnlHLynppuzSlpGuZep76MYai+smMFCyHGtkG0nMaaynwsw2FKfcpzJbVkeTj0w2a4ejiNLXvpCSwQVVlEySSiCzFlOREu1s0AYEZqqMZEzlDFqpac05TSEHLnGAAFzURAc97vd+TYpJoJmiLqOAYmQAvecWiaDTnksLvf3bx//frVq3fvP9zd7/I8I5h3kZiA2dBM8ullnVTcUIuWUlOupYr9u8mwD7XJ8q9TVfTwkY+g2jL3FmyF+YTlMBMAeu+XTE0RPbkX86KWfIBtzE5AENEJrFP9VEtx8sM4wTemqoTovG/MYgj/qsXi6Q0zMwACBCQHgKqmUorkYUh9X8eEFaJvL0IXAAVs8p7JNWpBYmjONs2qaRvvmcFQF6dKBTOY53o8TOM05yxAkM2RMgA7dAK+GPdTQZq9ZyQkXnH46Yb3L18IGJCDgavGTryjAOBMUKyKELHnxkfHhqK65PIp1uzaodmOfj3xBpFby5c8Rr/vvLnIBJBnPY55p+M+8WGQWnIwcQRYgdBYiIzILJitrHRrxBbRuFZUsVMkDjskNDbfurheN50jtTLP81zyNKMqMzI7/kSdNIzjH//wh6+/+brbhLmO++FuP+1lXpyTnFWsyaSoaTWrZLKJ4fnF1vH28ix+9mR7cbFtupUABE9pHEjq7c3t999//+bdzVhqWK/CedsMIa5d8Ny2YQttdUUGOYOzi8355ZOLv/7yr7ar82HO7/v9zbz/cfdud3vPRE9biKErqL2Vu+N+PgyeuGu6tuu6VbfZrLfbbfC+a7tHtG8x2qVToOlCgEH6GabygGo+hECeWDKPM+UTRdLjWcF+9r/wsxLnVESePnO5lt7QpzOZyJZ4EjhBQacmFjyKk+jnPc00jz9+9+c//+l3719/1++uUTIhVIMljPb0ne1EuNYKhsShabvzi4vPn3722xe/+tuKMKudn13dvXt9cfnk/Onn3dmli8F19iTyeXmBAiBqUobb12++CyjTcLyZRzhORclVcy5utpfPzi+vMAQFUK2l1qxQwKHvfNxE1BjWTdMuUoOH9+KT+ULkfHSxZWYrBWgGMu99s1p12023WjXBRedj8CalTL1mynm24egPtzFPK2bHvlI7m+tVEklpfUNt0zSrtoVuXYfjeH8bon96vr4/wDwPd9fXZRjnaczzqKboYwHYl1TVDqmKQqXgXb2XnEsu81hrBtOKmFWyVjAFqWXqkaEN2H3ChCeipo0+8HEcx3SsFVJ/rMdjG0NoERiSVTP1HLhZ1zQz1HXrwUOfR5n2pMOmWW8/Oz+G8XW61VxD8M1qdX721Lsm1+ocbbdtmsaXP/yw32deeTMtJZc6vHrzctW1T86vNqttaA0RNY+mho7A0KoBkAItqhZUA8r2IEo1RENG8oge/oVa85M5AowGBiJ1HIbr65vb6+sP79/dX18f7u/6/WEYhmkcpznlnFLKJc+LWrBOKSA7Q1DAolTEK2DWlPvYwHp7tTq7/Oz8CTKFrg0heu+D84HZAxNCmfN+t9sfjvvDMZViTLmW4zQcxt6qTMPwp9//sdtuvvzyy8snV592dJfrF5CYxSYXwJAAkURhnOtxSMd+nuZsaujYjFKWlKQUqybDVNomxMjjWFKqtVapIEsD3sAMRZCIgicwNFORWmt1zvvQom7SvC411YJSATyx895xCCE0DYKhYqKM6kDYY8NeGWITGgRv+hPbi4XiJFVUToHZAlBFypL1o0YnucCyfDGiAwpIDCKGIlpzHvM8Spk1eOPTgrXYsUA5NVvRNCFM0+SYliAC733TBfJNqXJzTSmXaUrTlGoujklFzRkupyakhcugCiKmpqXUXGqpi5fMT4bUp0j4X/Ctls/5tJRZKL2nxRo+LrXLlz8acJktPCPnnXNLMsNS7z14/55+lACPcpQTiZIQ4TFW88EF2BaMRh8ojWYA4H34Nwt/PAFKWKv0/Xh3d9gddjXtnR2P98d+LEXJXIDglcNcomH1bjF1rkQaHbexWa9i9KynDE9ePDmnuZILeHR6nEVqMSRFBifoBX1V0qxEKRzROW5iCI7/nWAMAXikAEBmqEoAiwxiYTdx4/wq+tZHhJLrVCdZ4j0dQLum7QtqniMHloHkWnU2Luw8KZjklOFYcV/j7LxRMXOVq0WH3JKes7HXjPlo6Y5AGAMA5Vy1CBoCsgEDETqmxmNsl1UgVygVFChG55HZEIOHh/ellnp/d3d7/UHxXDAXzVnSXGtRAyWpKMmkLimbQiYMHfG2bf2qDW3jeAnaYN8F6XzT+qYLbdesu1W2qm69Dl0TGx8jB0/OYa02jdnrvI3bi9XZZ0+eP3/6nCncDe/ux+Mu9fs89HV0xCuZtXBRGee5H8Y8jNGH6CMTxRCbGJu2IcTYxE/HGMKiFqeHhIFHWtgyNZbP+LjFLuUOLOfOh2YTPOKanxQxp5y0pXBY7vwxY+ATjgs8/Mcj5PKxd3SKJQYwMrSHUsYMTX+ilFsuVTkedi+//+aHb7++u3k3DXtHgGiLinB5GFVFUF7S5kxVAZGda5p2u1pfdJsLbvyvf/sfmxBvtpftarW+eOqaDhmRoYkOjUAJKkAVL3Xffe+9RzIFMYMpyYe7fn1zOPTz0yJNgyJy2N/fvn3z+rvvdu/e1YS+OXeRtturtl15/8sH/QWdzVWqWC0lL3ZtxEhsBky0Xq2iY6kl55QmYOYqFUrFOVNKMxE5kxjFO9dG3za8bleX50+fPV81LZQ63/Ht7qZO43Q87PfH/eFwPOztONY0m4kxmpMiIKJVrFQxxYpWaqkZpRTLiUQYgBGB0aLjTavriIxI5iK3zSckMgQDFNWU53lOVkHR5ZoguzqTOcyEHPyqazyhr5tguj1bG7h5qFBm1jlw9A1p55qGpxGrVQbddPHs7IK8Z8fO435HTdeo1vVqpSr7w71p3t3fXF+f74/9Zn3GLlI0kqxgSHyqgw2BTigKOY/Vo1bTCicuGwLxL9r12ieeT2BaS5mHYZ6mlNL93f2rV6/evXlz/f797vZ27I/zOJaUyomYUWspCw/OpK4oNOvzM98R4mq1Oltvn55feIPjbt+QWzfd5fbcOUfs0JEh2kKLEy0iKjL0/Yfr69u7+8PxmGtFx2I215ykMKFW+fDu3esfXt59uHnx/EVsG/wpV8E9KGKWjsJyDDezU1xqVeyHfLdL1zfj/pBS0YVD5bxn763UapKL9kO5uZti4H4ouZw2NFVD1AfAE0yBkJhgSZJRUVNwFIw7o42SN2rZt6vNedu15JiYENFUQ8TY1Nj0XdY2qiPXhNbH6BrXxLNHad8pJLqWWqqqEbIBiEgRKaXIKSmaTgxBZOZALpJvkB1ZQVcMrKS5pMEkIwigI2JiVsAKS4dl6T5LQUoph+BC9o69cxhCiDGYar9/dnP+wcdogGIGoiIqVU89GSICXPz7Rauo5JxLqaI/V/ouni4P5N+HsOxfMjV5NDx5CCXgny+KP18ilZmWdpJzfkGGci6lVHhIc3kMnVioNrRkRhETkaia5UfneKJFgPCg9AYEAHb/YmLqA/vkAfsw6A/jmzfvvv32h3fXb9edNJydjuNUwDF5FpdH1TKUhkpYq2OVgmbB0gayQCmKYmbELgZ23hNzjKRWi+RxmotYBSXDAKzgBLgqINicSz9A8Lzu2sY7Du4kIn7clj7Z2B4vRPTIy45iS9+SEINzjQ+Ni6vYnne+CYRYpmQFVUYR8ezj+dnZ88/l4rc+dl05tH1sbj5g6QmZkMyqQVC3dd3V2eYJkMrwVinbdg3tufPPGZwrgx9u4t2PEY8uBoU6T5PW0jIhUV2WtCZajAW9Vp3HIqMYuNC41To4Ukk9BP8JGQOCpzb6VdtWdHEMbnKgVYuoqAmYEZIRIhhZrc657eZ8uzlzzqVS8uEYMm4252DRU3u1efK3v/1P6+3z+zmPitUFCi625D0QaUrH/d0H6AtWWp93v7r48tdPvur86u7Yf/fjy29f/dCnkRz5xjOQVJnHMaUyHYYyVclmqKhGQEzMSI7ZvA8hfBxj+JOi/xMWDD4AK/gwPh/rm0cI5LFSh0dE7vRpRI/lvMEnd3gYGwanKuhUwC8j+rEHC4AL99sQCFDF7OOP/xQ4gI8v/WF6muY03968f/nDN29efTccdzlPBRQRcKH5m5lJUXl0OKNTyJSeShxDyZUbf3n1NHp/efmE2bXrMzFaDNOQgQxIADPAXHKf05imOY3DlHIm58dJfvjzj/vcXH31n9rt01/9h5WovHr58g//+H/943/9L+W4+6vPnz+7ulqt1k+efb5abz5FYj49JKvqOI77w97ERKqV7JiC9yXn/nBkxMtNx0hTnosBGTRN5BB8uw65gLnDnESAGFbb7vmvvjx/+qQ722zOz87OLxkxHY83abqb593N9cv7w9vD8P7mbprmNmUvUtgEoUoBqaRggB4RCLhUV7XVuqnCRdiQkYQ5BZ+3nX5+VTbdnEut2QRjdI8GSSo2T2Uc8zTnWgobem9dcI4XCSqKD+BYmX3jnefOubPNuVY9phtnRpZVxikdU55c9BzdcX9QtfFs3GzOVs3WkHb9YTckatrzVffi6VMp2X6U/ngsaTwedtc3N123Pd+ecWwgT6BLHhahY8OTwgMBGch5DyZWZdk2HmQcP7cjOSWTACDSEvM3Hvavv//h+v2H/nC8ub55/erlzYfr/WE/9MdpGvI41XmWXEEJgRmdd9wyx9C1bdu0TWyaru3W69V2vTlfb/M0v3r5I1Rbt+s2NMxcaj3uj/3Y98OY5rmWaqpIOE/z3d3d8XjMuRiAWzbVGKIPjKaOZU79/f2H12+eP3369PMXses+fRAHoosz3CO57fFJc673u+HudkxZ7u7SsZ9zXhxnUA0eYtpEVdKs97vBO5qz5qoiZgq1mCkxEwPVigjgaInbVASUWqV68kgU0K2VOIvPlRUckqeTvSwYGSI3Kz07rzGsCNGxb2Jk742g6dZEH8EkfYiNMQNFVcWqWupyqanBSfgEYqom7A0defTo2MVcK9Q81rlHyYzGzI49IquBqZ4koioqYlWXpURFa6mlSFs1BAPA1WZzcfnk4vLJcX847kFKkSoZsi2NekcAIEsTR+tijmI/W8YeRpiIAsrH7JxlMf3UAP6xC3hq+mutBrBUtw+Upofd97HTpGrOudOwiw3iEiV2wlfMjE/lCuri16wiJov1ArMDOEm44eEFPeDwjy5jTPyLx7LTC3j82pzybnf89tvXv//jN19//d3N3btVl7ctnq+iAQM5ZMqaS6pprMJy1lQC0zqbTWXOc8hI1RUQrcEFBmJiNVOpqtVMANXARHX5dzVAWdq4VauhSfA8jHPbLCfJh+r+Xy8BzcSggjGBC0StD9s2rpvYBN8FXjfkmQxIjF0gzKQG1Xwtrs6kUzTqqAQUVGVREgNVKxU0UNO25083X/wVko7XViHxk6d09hxXXyB4nO/CbePqDqdZVKvUWgTEFJEAq5kBArvKfjIC0VwAlKPvXCTXBaZaoaj7pIgBQADv3LpbUYRkg6Hhbgc6lZNDouGyTRqKAQI68kRejae5jHnvgwK1CFiLENJ2tTIfnjer4uKsLAaoYlpU5wFCpRk5X7Vnz9ZXz9aX27hG4H6c3tx8eHf3IUlynmLwULXOqcg8jWne9zpXqABuics1BCMixw4DOud+virDTx4NP/b44WPBAIub8ekzHgppBQR66Op8/HJ6GLcGdjqS/0QQRfATXu8pPpyWaXb6XgZQDVWV1RCNmBYx+QMoejpDP95EStnf375/8+rtqx+u37+R+Wg1qwkieQBiJjwFotpDthwYitSc0zT2+8P93d11d/3uHJ67yNuzi65pDZBDY6dnMTA0NS0GqeKY0jD2x0WFkNmFyyfPuhkO5c6MhmEajoOJmErfH3b3+8NxcILd9snVi682m+3F08+a9l9k8S8oUSklpayizIhAolpzqaq1bR1x8J4R9QEeDqEJLsSqRXnQMQFw7NrVqtluu+0mxmgKx0Nf0rKrvX3z7vrd+/e3eHMzpX4YzU4LbQwMZPMS7yzgkdbeeQAz3UhZSdmoBLLFKCyTw9hg10obwbuas4pKqVbrR0mE6jjOc1G/3nrnA9M6+otVS2jjOGdRcR69C8EzoQFZ08L5ExJrknhWA8l5qmXOKugcuSAK05Tu7/eAvp+KAe/HY8oTMq437fZsXVNqm2YeRhE9HI4/vHpVlJ89e75dtcGAOS5IMHpnRKBiZrQQH10wVV0Y0abLzveLb9Ay8oiopnTc37358eWffvf71z/+eLjf7+7ubq+vj4djyllqFhUUCQrkvMfGuxB9G0NoQ+ia2LRN13XdatW2bYg+uBidH1IN5GeZ+8OhzAkA5pz2/fHY9/04zCeCLbBztco4j0mKmDjv21XbdK1vGmPMJlk1g879ePP2/c1nH84vz39exGipgGiMRGBmhMsvNYXhOP/4w/v37w7IYRptGHIpiy1ITSk7n0UqmpBmETjsKyKKggKAMSCKYXXsHDplZlKtS8RhrspqOVfHhdAZEPsWCPu57voamwRAIfrQ+NgER6ysHXOMncqiUEZyhIhq5nxg99HqRkRU9GE/BlEpoqVqEa0isgRTqJVap5yXRHqFQK4j52PTyZxznurUk2SPFhw7FxBJ1KwUM8u1lFyk1gSWJpoGPzRD2zZd1zWxCSGyY+/CkyfPvvrVb0Xk7Ss67u6rlFxKBUFCjmExi1tAo0UC7ZAWk9zH3V1Vcy7TNBMhLRRgxMUy5vFhF0k0EjAvqznWWud5mud5muZxnPq+X9z5AE484sdwmhjjdrvdbrfr9bpbrZumZSZ2ZAqmwEwheiJSwZxzLlprVlMEXJpQPngkNjNd+DSmJ8LjkoPIRH/R4394ufB4HqiqHz7c/ulP3/2X//q7P/zpu5fvr3MZzs6knjfBXQUfAcwUUyqqkCchttKZNwEpqnkaU4ZpqIYsVebGhVqgiVWA5lT2h2EYklRBBFHVWkR8FlLArAq1ZDQV9o6P49x2sYnOucXzHk9g/0Mf4tPHULW55KmANUqemlUTLtbt1bbbNOwYA6nnAmpFimg1AGMUwTHp2zc2jnjznpqOSb0caOwDKpcq1XSaDHzwobu8fPqrrxhtl++lzqurL/3TX8nZVwUo9y3qEd5SyrNKNqiLrY8KGJgsJkK4aDAVAQBC9N4F9oHAg0CqHISdfqw+tdaKiGfrzeZ8td6E7ar7EZzTu0HnWWsWMAQiAjVRkKLTVMZJcoZhLB/ue+dmF1bO8aHfHY/343APLlxcPnfri1l4nsqwH+ehL1kx4yZuNmfut59/9pvPP1+HForMkI7TcD/sj/OgoN6RMUuSMg1lqtMwp362pKj4kMuti+v0Iq537pPTP5wIMfhoknT64ydFzAM15iRQOjVPURFOZC+gU0YMETyOYDsxgE+5OgAPFGDARxjnY/m03ONUKQARisBcpIp4qN5hcERIugy0h+/7aTWWU3r/9uXL77/58O7l/v7GcyUTNSUCNliM8xGIH6LdFQxMa82T9nd371/98M/Ivqq9SOPls6dt1zlyQLx0f5EIQEHMRC1XSNnSMA3H+/v7/f6Yq63Prv72P/3PLp49+/IO4/rZ1ZPgHKmpiie+vLj4z//D/9TF+J//+j8+f/bUu9Bsz3xofnGDhIVHEkLwfppTBWDfLu5KXAsiMGITY9etSilgsF1vurZ1znvVaJbIj64riLjpZL2aVG7vd/M8j+N0OA6H/X5/e7e/uTm8fz+Mw8RQDFA1Rk+qjK5rA5uVYRQRU2vRvlJaAc5gDeOWYAUQzazqPNWqPrXt5Jt5TDllHacyzymP03F4dFGXWvtxLESXv/7r9bPnbQybJlx0LWsdDoc0TwJac57HYZ6nYS7Vt65ZhSZGxiiTgExzBVNBEnTGwflmnvPb97dv3t1XRWIX27Beh4sLHzhAGaHUQBRcKNX2/fiHr//59bvbZ88/+/LzF3/1xYuL1RpKBQQjRscMDkytKBiSGbEQB1UVyLokyJjapwW3GX7sqMI8T9/98zd/+Md//P3f/8PrH37oD8c0TTUVUyWiEPx2tdm07VnTrtvVKm5iaL1vvXOe2S1qTWRiUtU0pTENaZ77vt/d3w/j+O7de6lSasml5JLLUnczOe+d956RA2/DmcqmlNy13YvPP+u2awOYa+mncUjTkJLWend9ffv+w69/++ufDTOXxxmZKDjwjEjw0DZW1VpqnnPNxQVnirVIzqWUBFO5v7vJeUZyYKhVVC1nOG2XREQOkdUQK4mnSvjQE5HlfOecJzipBcmxKmWl/aFaraBYsmzPOmKKDREzICJxDM1Hit2ipBZbWk4f35jFy+LkEWPy+FttifU9oWvLAQG1Vkm5AiUjWiwqSi55CKnfp9UGkNUjIolYzinlPM5TWbKRTMHAMYXgY4xt08TYhBDatlmtVs6Fp89fmGpw/ub9293tdd8fRcUMQAiAkehBHUHsHJqS/ByJWaCUB43FsjQz0SkWeHkCM7NqybLUWnIex/Fw2Pd9P03zMAzH4zGlU9+HmQHR5IQeNjGen51vtptutdput2dnF23bIAITEboYo4/es3PEiydHKWWcRhX1PnRd6/zWMZ/4wGCqurQgKz78bH/JGhIRF1xkrjVN6eb29puvv//d77753e///MPLd/00I1XP0njq29JEXytWVM1FBUpRr5CzejKwapbmeVJlUjaWWlNkRZsbj2KUShmGPOdqi6OYgenJN/nUMa0mZqDFOz4c57aNqy44z7jISJZ+4y8pLmwRgxNQ48N2FS834WLtzlYY3UIqSjmDVKyKIt47boITM1VMR95Nbrr3LiChYgHrJYC6amJQK0MOdQ4yrW1mhKxVpZ4V8aXUMmWAVEYtE6UkcyqYlQzYIaBWALAKBAQoZlUAgRECe2Zk78BBQVHFsgR/fHItXOxV2zy5OrvAuNkG73C7bo/HsR/TcchVq3OIJtqEi83ZdrMOMSpgP6b319dIIa7WTQz3+/v9cTcMB/WhmQ7gPVGkWmXo591+GPeljKCVnEdFVDSxKlqdJClDnuYyA6hzjnwspPOcSj+V4yxzwWpLRIwq2JLzQ+ScE9V/hXSFP/39Fx//9K09reufVN2fVCWfao5ORcwDW/JTHOfhkx73CHs4+RIQmM615iKRBYkCPFREiyfnYjv0yavMeX735tWbl98d7m9yGrkhYgJFQ1zWNLYlwJg/ut0gmmmt8zTs7m7f+BiRKNc5zcP55VUMjW9W7PwirFimg+YqY9Jh1OE494djfxzGuSrEZv3s+ZcXT748ezJVDJurp4F46o8Gsm7bL7744unV01XTffHi8+1mg0gcIzv/L78RSx+al1SNdrVqHFOeCMF7v+Q4xRA3mw0YNKEhg9pPklOdp5JrqVCZpWo+Hqf+aKX0/XDsh+PhOPT9NAx5nq1UYPbBRSZQ05Rrqaoa2JupB1IDAux8fH5+dubcMRVIk0sHk6oIGAO3a2rWcnY+hXgc51wzpVTnaZ7G+ThoPVG0RXWY5wy4ff7i6V/9Tds26zaeN40zqWNf57HWud/trt++GadpmmZk36JR9LiKOuXjOExWkKyWPCfNlRSimOUsKeeUKju/sY2PUcwXcf0kNUlRb9iI1Vyk9Ic5CRBtuiifPWfnT+kdD7x2AAYRIyACckJaSUS0nHQ6fxm3vpAdqqScf/z+h7//b3//T//w9z9+++399a3mQgCBfdN0XdOuum6zbrddt227deyauPYUAB0AGqhpLQvnW2ROqR/6cRj6YZimKc1pTvOc5nKySwNbEP3gfQixbXwIzjOzW2STqrrquu3FOUe/Px6P09DP0zTPqeRpGPd394fdruSfRdiCm/YH8s53LWDDeMpaBjOt4hivLlYO2fnmflc/XE855XnqpzGlfGxXq667iKElcmBYRReSGhGxC4CsRoCEFRAMVFWrVDEAds45l+ZpmmLbdnFRIFsdy3R0ZThM07MZmbpVg0vO60kuhT+DxMxOtKTThEFYuhxVRJdkFltIGigGYiBgy2ZLiDEEI4dMpZQx7Wq1qiK1as0z4P7mPYALY/FxjcimWnKaUxqmOZcCD2pgQvKOnfPBuxhi08SmbVfrrolxsz1ftd3zZ8/evv7hD7//x1wLVFRQIiRe9EGsTIBAzLVWALOfDTL82MFfVioDED11fBY1shmUkvu+3+12t9cfbm5v7m5v+74vIiWXlFIpVVSZyC9JmQqESEwxxPvNbWwaZu5Wq4uLq81m3bRN2zZtu95uz9g775s2eEKc5yQih/1hnucQo8j5ar0mIhVZlnM1K6XUWkrNC7ZUytZ+KhhfnkYBc87XN3fffvPdN3/+9us/fvPyx5vru7kKBd8RV4ZSMx720xxN2YNDJAdKYlAV5mLeKVMFSykdayWkDThfpVZkMgmumHJVy5WqeoCFj3CikZmCMSEiMmmpc0qEuW2mtgnbVeOZFxmWiSJyCB75L+m+hqShhe3F+uzZ1erJWVw3ypCkwpQlp7mOqNJ415HruuC8L4GgZIJCMFuedbZadTYFBut8w54Q2MhL8fsP7l2AFYBzdPsKawGtcLil7nuP4OaD3Lyu93dlzOqheBIEBTQBVEQjb8CcCZXQOebGxQYdoBXVVFRVtdpDiuhpowUDQggNbc/bZtNe1eb8otntng/HdL/r39/s52n0lANBcHy2vXjx/Murqyvybn/o31+/F8PVdtW17f3dzX5/3/d9Vt3PQ2g37fqiZrt/v9/dH/rhUGtmxnpsLVWseHZ20ZydIRM6XHIGAME5xy3zbEkHTVWTWDYyQiQVe3D6XdyW2ZZez8/aSQ/0EsCfkngfmViAj5qkpZD5yOV6bOg+cmAevopgaaotN/zobYBLVBR+fBmnppUCGC5GFgtgPBWZawVc2mLKsEwFQEBD0KX+eXi1OaV3r1++f/1jTgOzOcfs+OTjpGrVwAgducCnGW3AxKQgtYpMab7f30dV6Yf9/vb66ulnl1fPzp6+WHsfQ1iOd1hN55yO+3I86NiPw26exlSqGCE3PnQXF0/PnrTKEdgZ4d37t+z48uzs6dUTM3bkgg+OHSJScP9C4MDpxy5iZha8dzGen28bx7VHRui6bt11jOiZw9kFGOicp/3h8P467XYwjQYAsSsxjkffp+lwez0djznLMom95+j92baLntu2WW03AHg49Pubu5tjn4u4rAzQGBI6Alivzy5/8x/O1yvqh/nuOn1IdZpDoLDaxvMvQ3dmnuY873f7eTwGEZunqR/m/hMkxnRKuag1m+3Z06exa9um8T52jlo00pTT8frdm/vpkG+vh+OBa4W5x8aVeZr6ft/voMxEYAsDYda5OKXOdy6seAOE7LlprWkmCDD7PpNmHUuTTYvOYBwjdk3sAjUOvSNmEs+qqgKEag4Rl90FwYAsMIs6QSmA1eDBrfLjgkwiNc3zbrd78+rV7/77P/zv/5///Yc//xlEAvLmbLNp2lXTbtebq7PzVds4AgfAqqhQhmmsw5wk5ZRLziXlknPOtdSU0jiPaU4ppypqD6cB51wbOh+DD9FH70LwwccmEjugxcstA0DwPjRNqfXmuPvmh++ud3dF1RACeVU4HA7TOEn9+QnZTf3gQjBidA6JFcEQc8rzOEnJq9YxtOyaUrP3zkBLmVLaTwlTWoMRKvoQiTwiA5qaSK2iYoBmZIZAZqYgYlZP2AlEBZdtNhlrPqYQQmwQRUsvTqOPqQR7yF4+8d4+kkJ+ujD9zPuBEGkx6VUDUkNREJWTjyigLWdtBO/ZiMFRqjrNOSVRraDCJgXw/sO7OWnY5qY7i03riKSWmovmbCIPtikAqCa0yHuVRKtqlZqr+hBC02w2V1eXMbr7+7uU03HY55qYCZmYCBCNaQmfEf1LFyJkZuf9TxiKjxzCB650KflwOLx58+bt27cf3r29vb3e7w7TNCmc+DIqVqUioPPOO+eXvB+05L3U4rw3xBBjfxy6VReb2K1W2+3lMMzTnM7O+rPVyjHVUlRknudxHKtI27b2SQTX8mJEJKU0p0nVYgyl1L+MHRCRfT++eff+u+9//OMf//TdN9+/efVudz+nGswcETOiVkmTmswuCseGIhIwgjPRYjAkdSyrFghJis/Fk1uBD2rREEeDQoqABiZAamCmolpESJEcA6IH9OQQHACUmqdJDsepa8LF2coRquRack45hPjkyZVr48/fFcIQXbdy24vt5nzLbWuEJc+UM08ZSw5WHVkHsPa0YnbOBJxlIakgpiJFdVatBkpOFWsxR0hGoUop9wRaOFfn5H5nIqk/1NhACABGNUO/h/t7yBlar0gFLJvMyaCgM2qDEEpgDd4ahojKoKZaVIVEVVH/Yg49sCTYQ7eKK3bEuFqtp75s12MT12XqW5y7AF3TrM8uz598HuKmJB3G6XA4VLO+PzIjErEP5HwZhvH6htzx/CIjuDr2kEesCUrKSQep0fGh74d5HtKkmg/HwzAOuRQDIOe8B/UekFRRT3AnLlEGoAtx5XFRgP97F/4Uh3n88M8/8KA5+pQkiI/qpE+Bm0cZ0kJ1eaD0Pn6NAMwis0jwrKfe1okSA8BLo/VTZk0p+e7m3d3NOynJ8XLmOWUyaBU4KUrQO8/sloRiEFhMTQC0lnnq72uVlObUD2kcQdU1XXN2GcRMBVSwGpwcZ7RIzrUUkVpNFEWxqBHz5vwMKAzTuDvcX19/cMH/6re/PV+vnV8huSpqhuSYHMO/qkNcTjWrto1Ns25jIKrSOqbNZtN23cKtXDcdiB6OU7/vbz9c97c3Ok8AxqGTGMfGH9O4u35f5xRj23RN1zbdql11bdfGtg1dt9psz7TUzoB2h77KlAp75eCw7bQFR+7y+fPNb3/TnZ2V3UGj2093WWd2uD07X//1X/H6oh0Hvv0gd5pyVjWrOhXJRR97MGqQqkwp53ks00AoILXyXLyHJgQyI6YQXNu6JpJDssJpxInr2M/jkMfZcqJFolGsFq2VDZxjx+yIHLKr5qaM2tswVjC1WmqqVsAqBaI2hHXXbdrYBWYT07JYRC3631MnfMH3cHFm80SeyAGSwc8QAECElNKH9+9//OGHP/3xT7//7//9m2++2d3cXW233WZ1vjk761brpj3fbJ9dXDXelWnUkhkw53zY7Y/DNMx5SvOU51RyqaXkXJekoZIX80Ni9sEv8pGmabqujU3jY3QhOOfIO+8YmAwgp5xzQsR21SHR3WH3/v72end/3x8FjJACuhib/X5/PB7SPFkVdB9LZ5emWcQwePaekKCymg79cLjbj8cjQ+0aIsdt69omOMeiOZcRxdiRSjYTMGV20a/NMJexpKmkUaQ+zNTlCKwIxsTOxcYH71Uha01jSomx6ZoQ2DuNXbM977bncbUOMZ4k+gsD336pjvnpOnTa51WliJqZKBSzlOuUcsqigMBsmg2MCNmROVoUjyK1lEQqgcHSdP/h3d1u8tu0OX/+9MmTddsRaGCm2CicEm8XBM8577yLITQhtrEJTfAxsPemgEjrbWfw2Ve/+Y1oefMO+uFIREBgYKZmZKfUO3xwIH9YF4nQex9jXFr7YIiID4QWW4RLOafD4fDhw4c//elPL3/8YXd/P07DElEDRs65pmnQU8l5CewCoq6JzrtSChESAhMt5kjjOE7zpKqx6c4vptvbPfPLrm0uz9eXF9uLswuV+kgOw4cF/dPzq6lKlZTy4ltSPyHEPV7H4/jddz/+n3/3f/3xj//8+vXb4+EICsyOhRQRFFSsiGouM2dunOvMGzgjpogFZ9IjiePcdhVRQVAk2iQohNwgkwihLX5nC3dSc6mp5FwzIBSVpta2tuAhuEBIoFS0Ho+pjeNwnLGU/nh/POz3h8PZ2Xa16rq/KGKWgM+z8+784qxp27nkNCWYB19KVGyZ2rZtG26jD0RY1ayaNwWEysZohs7Qd1ANKyIzojFUZSUvNea+pmkcd0o0F1GwmRgRCYwMWJWq4FyVCJmRUAHmKvtjLbN6pU0kD27tZNNhiwSS1KqaCYCQgCnpI2l12V5ADUQ0l5JyLpUYDYicD6Hxm00gCFS355y2Ebo2dtuL7vLFLO7N27t5mkvOBqSqIcTV88/W23Ny10WuD/2s85z8GEPTRg7cbjd+nMb73R6xhEjOU5Zyf9gfpvHHVz/e3d6P8+yawJ5BDJiAnTELoSCoqtMl2XIxJlhA2QW2/IvVwE6GRYBoQHbSRtvDqo30SJyBjxSXU2PoJCL6iIksRwc8ZUvBQ8bWwzeCE4/XEB7C2JcuEwCd8J1lRhewDFLMlB4OZQ/f4kTMfejHLveutfT7u/5wZ1rYLZwFjN4DgBItqgVCji5670Wt1lpqUVVHDMwGWmuG+TipQimecXu23U7PtBYrWqSCijMgpNitljxkcU7BiZFUnFPe94f9sAurFWLu97fvX73853/+2gffOotgzeqSfVsN0XkKDh39HA/7yRsCgOidXzUhBu9AHEBomjbGzWbbNG0Sm3M9MzSFeZp2x+PtcDyOPZQspeS8r0y6atFh5/js+ZPPPv/84uoirlofAiHxEogWmxg76XvPH0DyMKfjnEOrPoTVqvFd04Tu8vMX4be/4u0m7o5MMu3e70sWq7K5+Oq3f7W6uJrfvx3qdHfjx9GJYhUsXqqL9pGGCFVt7PubH793pLFt2HsDcuxXTWyjD4EkJ6aw3Z6NV+em1dcZeq39oc6zAQA7VZMqKdWaTYEUINUKKEgZkA0IkPmUIyZm1Wph08DMXWhiu1mvztbrVeOxzjUP5gKQI2Bc5scDU+LE+yICYiSHS9/nL7pJQ9//+Z+//od/+O//9I+/e/n9j2lK69X64uzicnuxbVcrHwM5p8wKMuX9hzurebta5XF69/r1zf0+iyWpc01VqymIicoidzLnfWhj07Rt28amiSHGGGMTvHOwpMUQLuFOcGq2aJoTOw4+zLX8+Ob19f6egj+/vKoqpRSZ8jSN+/3u/v6+Px7LPPuufYwgcKWIQsEpATGoEVKu5Xg4HneH4XjUKkDslLRWhOI9dF1LvAXgpluHsHYuIjIhe98geiQHxrXmxSF7ERguOJd37ENs23a93ToHtfYpTaX0VUrhSOi9Y2JALgY552EcSTR775b7078jjsfMxHTJXzGwKpBV55SHaZ5SooWLCnBi55s+6CqBFn94RI+gUucpJ6gwYS7UhRiJg2fHxESLJzM8UAiXKib60MTYxhhCYB+A6YQGITVd9/yzz3OZs8zk2VTUdJFYP17ESI5/2uNHPgW7nYoYOKXwngqgUsp+v3/z5s3333//8ocf3r59M01TrYUQEZY+GiCidwzmAKDkaipEyEQVwERKzgDoQoBqJedca07JuTjPNcRGRBitCXhxsf3y8y9jDIfjaAYLdesRbH+8VJdc06oqS0LTp29KKXW/P3zz7Y+//6ev//7vf//9dz8ucRZNjIh+CQ4QFZNimqoVJXXqonMUvPMKplKxgI5W21CRyDM4BJJo6aiCHBx4UkUhUFq0dlakpJTmnFLJgKAStUSoGUJLoUWwnGaVOc2CNm2i7dhuP7zZ7e/Gafr8s8/K3/71Xw4wYuq6plu1zjNolWnUNIU8NyoNcOd5bdQZNKIoUuZcSwERUAVdiIx0sloDJAQCw6pQBbNy0Vgraa5pEgSDRdWkqEoqrKYAzhjNgQvgGFEXJe08lGmsJAgNb2OkFbdqrdospUgtBtXA0BiVFw7rpxeiGqSUx3H0x0IOplRzhmIOvGvXbQvuKcfzCOumbTdbv17fjnWep/3xOM0JicdpHKepiW0RqwqiZEomWpMEsuC8a6L3OEyuyACA7cqFQGIyjtPd/f7ubj+MU6mytIaK1gICjjA6jB4UtCyuoLgQWOEjhvF/91q6aP/WSgJ/wacB+MiWeTClOE1GREUw4JPRHix7yIlehQC2pFwuzXEyoJN0abmJgf3kiVRkGvs09QDKREucAiM6IiM2s1rVsfPsPXtCM7EM1ZDYB/ZhkVg5NNKcx9146IZhP6dpgZCtGogZGiFR2xqD1+Q327ja+NCqHY/9+PrNax+jSI0+3N/d3X94dff2B/b+9tnlyvvtucT1OYSW2QHjiQL9L18kEsG2wYfoi6mJAjKelnvUlCaVo1gt9f727nA8DjlNUqHkNE79MBcAyt1q011tN8+ePf/Nb3519fSpbxtit6QaIRMgGTLN+SwE7br+/Dx26p48CVfn4WwdV13wTXd+riHOBoVAm4Dn5yS51qKbrbRRA7FbdhgEQAGqwBm4Atmnj4dY5nn39jVJCk0gdgKI5GIMTYxNGx2i1mJV2lWHWtnE8mSlLMFhhgxgulDXAE6NxyUgQE+dBVhYD2ZqYiYgldEKq2dORXPVlOs0p2E4MptrVhQaZLc0IE4RM2on3gQAAAEyoPvLmHZV7fv+1atX33377csff5zG8Ysvvrzanl2sNtEFyNVynYZJpwwpQ637mxsCcwbTNO/ud3d394KUzbJmBT2pUhDZsXc+Nk27WXVd17btwhYN3ju/5JQuDi6gD/Zip+1DRYtOaRpzGuYRHD374vNuvRLT/tDfvnlXU76/v3/9+tW333yzvjj/7Ne/iu3JvddVIKkq/VSrQq4GOEzz4XDsd8f+eMxzMiAX52MvOd3FUF88e67whWLrXNu0HRNpLYvA1rno/drxWhVVWeq0aKy99zG0TdM2TdetV6vNhljyfDuOqtrXktAyqIE6qWkaD7udMU7D0MUYmqaJvm3bVbdeBR/gQQl8WkM+mfxmVkROqdFVDaGI5aJTSsdxPA5TDA0TqAKAlZPFtQNzntCCR8ek4rVULQxqJY11Zxj6zbYLTdys2PGiET61kxaWRRUxFCRhVl/B3FJmVNU55+MwOIbzqyuxMueRHU9pKiWrnK5aRbSoivOO3E9C1E52PQ/gxwOxFxFhHMfbu9sfvv/+D3/4px++//725npOCQwYGRBMF+SFVAWAmdlMSwYRKSWbWSlFVXPOLqS264g415pyTmlG5FLVu2gG03Tc3X0Ijj7/7PPLy8u2XZ9dnDOjlKIPyQ4i9VHLLYv1r4rWqiKfnCzr3d391//87f/x//0v//THP797fztOs+OADlSrmS5LvmrRmqAOoAUIHDfOLCK1RGiQqlapqdbSKhPEIG1IuRzm9L6W3tShOPIsCKpVTdSgSJ1TzqUWEQCE7CxHzJ01rcU1GqZpzHmqdZp7onJndX796odxPMYYN5tO5Of0MQBgpm7dxsbPaYaSyzBEKedsKwCnEqzSsciMhUhV8zyVIgqERMyeiAHUwCqCEoJDVMNcF5EIqTXsPEMFy6YkVsxUDBTIkBUIgAxBTaFiQgIiRSxiqeS5SjGvviQPFUNFX22oOlebqokBGUUG7xCFPjY7iJCdAM5zOu4OpRiyzNWKeqWGiB1bw4ZMjikAByVSLHO52+1udvf9PAHx3W4nBqaYUxmGaZpmBOc8qbIKkY9tG7dnsSsh21G0bja+aRwClmJzlpSlGpqRGKpKyWOR2QL4zgeNilCHJEUe1ruHcFM4Mfd/Lhx9EB2dzFseHAge14dTZspiXnUqUj7BVx5ujnDau06QzE/zBZbcEDHU024jCMJgaMyRiIkRUcEtsexgbEYiqMLmHSDpYou9RF4toZiPFn3LN9CaUy2Z2JZcs6UL7phcDABYckFix46BzQSNEJDJ+dCE2DC54PxmtXLAU0olj/M85pwQbdH+q4ohoGNqQoxOPZyNh7Mnz9bX725vb3d3uz/+4Q/73W1//+Fss85THnf3VEcTun37MhBrgq1gvHCBTzYl/0pBSKou5SbnDagjO5Y6VU1igLCpq6YYpDTN+ccfX04pH3I+pknAkFmrSM4p52SKBKFrurOLi+efnT15sb28Yu8BeAmeUIRc6pRmEtus1vTis7nZblzwz56H84vQtd65pZM73uxSSiUNJR1X56vYvZAqTbfqdzfTcXfY76axL7WIqoothq31wfcWTug4g9b+5sbmyTkmZiNEZmZi5/gURAc+uKYNoW0WVy1kJuYlRBWBkAk9EcrSTQRDAwQFAwIjM1ATNUNFUDTCqrWkYiZMIGZzrrlWRkPQTjUCYNMAEcLJCUR1ydqpJ8va5XgOC2DzcS6klI798f7u/rg/qOiL5y/+3//b/+urz77QVA93d6+///Hmbne8va/jHJHJFGpZtdExlVKmnIqIupMikhbvDe+9czGE2LRN27SrrmkaH4JznhzTSTt5ygpZOmBIqKpVpEhFwlzK++sPxcR3zReXL/72f/4fn754hkjX76//6b/9w+uXL+/u7//563/enJ+b4/Xl5cciZqmFQKzmmjHXKofjsT/08zTleZ76UcQ4lJTF4bDpwIet4WYuK4DgAwPUIiaiOc9LMg8YgTEAm5GZmZIZAzoDp0alQkqCWHOqadaSoRSo2WoRqahStMw5Y5pK181NDDGG4ON2s7m40u1mHdvITL/YV1KzUiWlOqWaixhoUZtLnVPOueRSAMgRLeigqiGJYVUEUHVozIuBQ0UEJkMrNQ0z7Y+7+y40bQyLua0BLvvugmWZqYAsKb4LKcbULVqBUmAYLEbfNu3Z+dXTp5+ZwW5/P0+jqiy/aq0ilcipYdM0H8EYs6XGsUXxTiQiKqnUWmu+v9+9ef36u++/f/ny5e3NTU5pEV0a4qnyoUdZKDpHiFa8Y8TF1WcZxKJqpXBKzK4upjRSVWovu0X3Pg6H92/fqpTDfn95eXlxcfVkeFprCd6fX54/OMHYY7kFDw2m04cfrnGcvv76m7/7u3/4h3/43Y8v3xYBYvYuIFpVEVEDraK5JKuz08SghBgI1z6vY2ljMQFELTqbzlorWvAO1h0gpjHvq80ceFlEAEEkL5QsBwZUECqIqAGBQ/BV+yINlh6Ncp5rSaVODDQMRcq0P3zIafb+gkh/cWkmohg9M43TaAoxlQZ1bdSBaRZQEVrKDTLTnFJVUHLonHpdPAgNoIApAQiYGOUKWTQLIDDx0uZDRUBjQGAPZKiCoGQARoongrdW8I794qWDOpeSSERQMuRRTOBYZC86VqsCrNyxUaPys9B3pEV4r6XonI2lKlZUYIeOwCMQKlhFLaZOlUXFUJ3jtmm2GxUrVYZhMgMwDCF49rVZqZoKEromtKvVarNpo/Awn81pArCcc0nFNbFruu1mu12t+zqiZ6nF0AQNGDAwR++KWMom8DOW7r/v+gUkReEx6tMeeS2PmiNcaqR/C+9VsVlqVssGiOARPAECsBmqASoYYhVNxYqYqeRspRCiQ3SEfAJy4eHo/fMQKFiykJbuEyEYARAaEpJnj4gndBtw6R0QQgyekKMPgR0BMVMMgQzHec6lTCmlks2WtQR0sQVd8mKZXWxXZxfPXnye+qMp3N9d5zLub2/fsg7rlQPWIts2Ejusder7vtmFZhvPL5mJ/i0YxkTK2JfDvkYma40AANVMa5FpkIR2OKTjMAzTKJpDKLS0/VCq1lKrSDG1lFStXa0355erzXnTbU/j1kDBBKBCglyMnLpIq3XrGnTBbTcuBgdIuVoutR/m+7txGqplDNZuAq62JVcAHI57U53nuZYEix2pPXQePwHHEQBBrZTcH6lkZl4IjsinpiMiOudCCOuzjdu2volVShV5qFz0lDxOSB7cqSsAuAwcgUXabwZoSqpqXlXs5FdjxXAqcN/nYS4pFQRV1RfLiYETGKKqWa0qqmImqmL6WPc/Roo9vC9mpZRpmna73fFwNNH1avXl51/85le/mY6DlVJF9of93c3NfBwCskcMjnLJClZr7dNcTHQxYHCOPXkfmqVl1MSmaWMTQ9P44Nl5JgI6uRkseNOCPZwGssHCpKm1ppTGNILnbrO6fPbk/Mnl08+ebzbb7XZ7/+G6Px5TSve73T/94Z/Onl79P/63/yfA0+VxnKkSMTtPzFV0muf+eBzHXqUCqEqtuVQRU9u0tW09+TBnP05cBXkxwxaoUkvZEQ3MnYqmua81VxFVRaxVrBQdp2kxoSXnwKrKUMtYclnc24nQeWgaXbWyWsthLauuNg16V0APm03/2ef1+Qt59uLKe35g9P5knTKzUmSacz/lea4GLKZzKXPKS9so51yIvWNCUgM0VBA1lSKmQMiLLlvBiJTJnKmkfn9/413o2o44wJKSu+SX8JK4B4gLIlFrLVWIjFEJTLXqJEUlOued7y6vnpuhiiEwfMyWFhFp2tmHZr3aOHafPEtO84zMjoiZc877/f7+/v7+/u7m9vbD++vb25th6JmpbVsVLaWIFNPlSZzznhfBNDsmgg4RgNghnG65pJXWKmbgHHPwjJpTmeexVkPiWnIMoRTrhz6VtD8e7nb3d3e30zT66FX17Pw8hoAAlXmpxkPwqqfG3+OqvN8f/n//x3/9L//n3334cJvm7EJkdmq6JETkWkodSkk5j85KDNR6cg42K7xcDWdr13YoNbhZRpmrHtFUVJnb87Ow3VCRLJqJl+Qzr4Bi2cAAfRUcB+mHejzOpSiTZ2CQXmdXSgRgFSHQxsmqjeu1mtLFZayFL8635xdr739Bc0GI3jOCjccRFTchbNi1Kr7I3JdaqlplOmmglkmO3hlwUQMSRlOAilpBBYTVtKITA9VlGCGg45MVWmR2ISKipaRVTEAR1KgCIaAhB/YN02oNs6Zp7FXBjHPB+102hlvTvdggWAtgyRsHtLaL6ZO8YQMAI8TgeBW4axidBYNMCC2D98iIVnIuo6qplhqDVIvh7MWzz5DC5mzuJw/onG/brmu7VdsR0DTnoR929weHbr1eb9ebJgYSXHVXte77YzY9bNbjk2b75fPn1ND7/btKJWGd0wRN1FQFT6w1x2TMxkb0sVMDS7f0AW/5ZPaf+jKL/AiIDFHRHsufE/V2wVAXcdFDcKqCLQjriR8DgPQgZ4LFjQ6WrHYErCLHMR1znc2858uuCcwMxgioSrVCEZmSHKc6J6k6mSkadtF79s45W2RVJ5sI1SqaRQs8chYQHlxvln4fGiISE7rFZOmUWWZL4m9lxKZrPTlGRsSH/gcqUlYkWRCvIiYACqjLeiVFZAZgUK1NbL/66jfnq9VXv/717Ye371//cNjdDPs+H/tN161Xmy9evAjNinxHoZ1THed0BujYAT1oDX6BGg0AUGs97Pe3H95xndcXWzjf+qZzjgMiHI5lGuv19TxNE7oSI4TomAGsIIpKUQMkM6tVzCzG2HYrH1piLwuJ6MTzBud8F2NO4WiUBbKCjJNOUxaFUi1nKEWmqQy91SKe3PkqrJ4ghwq2qCrNTEUQwDsXY6NiAFBrCeGjY6+paS6akzUR1RECPwjkAQFVEcyj79i3gXwgczbnUmuF05nXAYMpGJAjVFtAPSTkJUFvCY4wJNKFr2an/GIpLkRPRjFOUm73x91xnEutYl0Tu9gyDOoqEhtolSoqcJI0KKgsPHBDto9crJNDR57TYX/oj72podo0TcMw1JxTzsfjcXe/2+33kjK2K3a+GOzH8f6wq1KLqQKCKSGF6GMT26Zt2rZtmxhjcJ6DI+fogf4CcIo1BFicih6mrqGaidSS8zRNU5oUIcbV2cV5t12PaRqm6enzF0+ePX3xxef94YiI9/v9q9evvvv++3meH4eZYwLH1ASHCDWlqT/e314f+8EQtVYtlRA5uMbxhkMS7Gcdp5QT5yoIYjaO0zHnqdYZABw1gKRa1arocuwHxAc9jtmCA4OJSjatarK4pSwusfOoc2PjUPtDaprQNtExmNb1KpXMBn69WTeNBwBE/BkcYwYilooMY57mYsAKlqWWKoiMiClXA9EQlgREBEBVVTuNMxJblgYQQCVSx1BlGvvdPjSb9Rm5EGNcIi1xkVci4IPzwmL8VqVSLcS8OGmKGiKXot77zfbSDOZpMsNS8oOXrqkKOQeGbdM9IjGqWnPJObNzAlBr3e92r1+/fv/h/d3d3f5wOB6O8zyL1sXpTklPoQSkywBhx3CKt1X7mHZrp0UdT4eok4j0FLnkwaBWNRMkJPS03S7JTgCgpinNx+Px7vb27du3zvtSyna7bWITnN+s120TS1mpqhmuuvWjiec8pe+/f/ndNz/qKY4AABav4pLznPOcayplrnVkZ03Tnq1i1/J2y5fnsNlI04jW4qsMoeRaukYdOkbfNeZZDSqiICkAqUUFMquL96pUGqMenESrKSl5BFr6M0IoAM7YmNF72mzd+Zln8giXpnB+9uTZk7MQflk4SgQEKrmgELnIxqSKgqiEQiBoaMbLxkfAbMyCqKoKxohAIASGqEhgVgzVTsa4zEyMvDSNDIgpxsCIVVUABKAiiJIRY3DUBl41Fbl11ogPQyVQUeynWsZa0Y7MvWFfoVQgQXW2Qp3n+qloTNUQoPF+07bnm8BeRtNMTttGfKOITpCRtEpFzc7IofPNxdWTSk0TN8N+qPNEAG3TbFbr87Nt9KEUPR6OjlCrrFeu61zbRqe4WV3UglIPhIQgjmQV6ayN5123jqGmbKWSAFYrcylTlmwmRkjAzhESuX/90P8vqJY+rWDsse7Bh5rmIUtRHz/+cGJ9CExaboq4AGxIJCqjlqNoJmyAiqgUQamqglWxVJozDFM9jmVKuVpitpOPswDUomoiSbVIrTWXnOo8uWmQjzQyJHJATk0WBt/ScwIiU3skNS20MzNhdtH7wB4eWrhmVlWRve82zeZyc37VrTdEJFJEskgWUVA0XjZi9K45O7tax/bJxdOn508uVuvr96/ubt6WNDI57/x6vW26TQFfzc+pzHMGwEXo/q9fqjrN0+HYt4zsOW5WwTH7JorhMMjuUG7v8zzntstEVoqCViml1gJQESrYYuzFAG3TdF0XT4i4wAOOQQQOGEKoSOM4pd3OUqactGRNWdNsuaAUyIXn2QGUtoFIKKpmU8rznBdPQs9IBMzMzGC6QPY/HU2LtquSKYGevG7BWIHQCBRNuugu1u3m4qy9OKtMWiuoOV4UZl7EhiHlYoysgCYChogEQDEuQloTQ1NcSPcnmq4W08BowFwzjNmq2n62w2zTVOd+pLkie3DeEKpWA10QvBPXRhbr3p8E25iZ1JpzznMuOZvIOI5v3rwBMUY8HA4i4ryPbcyIyljBjFCKDOMgWn0TXYjkQoixbZuua5cipmmCd35BKpaIjE8j4B+L3ce5JgCw1AIipZRcCnjqoj+7uthenCnBYThe31xblabrnjx7JqLGdH13s2AAj3d20aEPuIpsWo/Hcdjdvn/75vp+b8TBhbMQt9vN2dXVarMK3t0dhz9++36/P47jeSqY8rGWu+PhehwPJSczXYiu3jvHi7KLwAxPHq5L6K6KiKrUWpcfMzvXxEadx4paWcSnmQ8H9j40zTr4yMBDizkdidqnT87Xq+giP6Sc/JSsZFiqjnMepozoDLGaiAE7j+RTSbWq6MIpI8QFk9NaxMx0SWs2XTxliCx4tFpL6ofj/W63YefPLy8Ck5mYASgAgUNcMtUAUUyLVCyFVR/PdaaWc2V2XbtmwjSNZrbb7URmxAWwQMcaQnT+Y/LzAvfVXIg453Rzff3y5cs//enrD9cfcskisrjUMDESmiqAAhIREzl6WMqlilSpXAGWeGwkVFzeCSZmYnbESETLlzD5SI59NH1IgRFZam5AZObgXQiRiG5vbk3tsD88e/r088+/uLi8vLy89N4pmIhKqWfbTfDhcSFLc8lZmzZ678BERMAgl3ka+5SnqsU0g84++s3aPX3aXF2023Xsuhib4L0D4K6BXKgUx760TtiKx9IG9b6yE2JBZDUD8A+Ua5QKY4Nrjw3TlAjYK7ICKjJhQGAFYEch8na7fnK1aaO/ulg5cqvV+dXVkybQ6T3+6UIGJmjmAExBBQpZMiRkF8mRJ3EEdiKAKxihAFY7RfcpIjom55xz3rOpSSq1CoJ6R6ELTEBaUUSxMpN3xEjIjCJAoAgFCIL36xVuOj1fG9Kxr62O62ScUqmyP6ackhBpu05EfapVIXCY0cZcppN3wzJZVFQBMcZ4vj17ccUxwljzTFB8l9lXQKzkcfaCAb2Pnlsfqb2EwNg11PbtlPOktYBUDuQitl08j932bB08ztMxBI1N6rpOrC15i8hd04TA6zWzm/N4n4c9SaFa5+NxPPY6Sjnk+X6cjnNVBAVnuHANHDtAPHVeTmzyj+YqiMtgZiQ6SYxg0REuVkuKAIz24LuEBMhABMjIgGgqZg9u4Mj0AHcjGiLi0r33BN6hj+Y0JU4oEJ1BraVPQ5/nEeZZ54JzDtPEU4J5rrPMFVMMULccGXIsruacc05TLimnlFJNSXLGz+9rrQ/PguQdsitVQIQIPCwSTTy5SwIAWAU9AcEL43/B6BUNSRSmXJtVd/H02dVnv/7Vf/zPL776tY8hp6lMg5aiRuQ8GhG6haCDUAg1RN88iRfbsy8+//L6w6vD3fVw3BOAGhclQSoCudQ5FwNDIoCPdhO/XMQYZLFULYsW0WBIxE3TxGp8HC1XnYvkWqJOUsrUl4xSa0mpMlbHeZ6rVCTzjF3brFZtCN77U4KbgCiczmzqHFRJt9f55ffNNFJJYqCiJhlVGQyrSq6JuZo3M1WbU70/jOM4R+fa6H0b3EIoWDCQUmvKkhZO7ulnjgTIxg6YjRaeGgIBeDNnxmZbHz5/8vTyyy+6Z09mFRaYeIzMXdOcX5zlLK/eXh/6WcGLYalqYgDoXWialQL00zTnbIqmhga2qOCNwSou6lD2oem6JqwvnsX1Wa54PPRQMwCgj0aoIIhIzp1sYAnNTKUWKXWxWnrYX3IutZSlhkql3N7e/O53//j21evNaoVqoY3PP/9ss10fD8fD/X4utXHB0GH0bOSaJnZtbFZt166atmubGKP3gR0zkZrCUsfgx+0MHgqXT/fspScKtujOxUCJOXbN5dPLs2dXc877w+71q9ekcLY6215eqJlvm83l+ZdffRniR+moO1nzpklKTuMwHvu7+9317U6JVm3Xnl8451brbrNdM9J+yNOc+2FKiYsgwFHKYDoTFEdyShKyQhgIHCKJSk6TqCyKkQc6aDFDAIdIC/ybZ1qCgAkJGZic4xib7XqFTYOBvSkhTLu7sT/O81zXnsnjR2D84+xnQy7VUhZANAQxLdUUUG0J6i1mKGLOO0IzOIUiGZgikhkujtygQMCMrJprKmk4Hu5DbJouEuNyBjBUIHDglhBjIlSzKoqlLgAMEgOhKuQ5O2JsYtt0l5eXInWappzLskoSGRMzO2b+eHBE9M6HEJoQ5nF49+7d9999//Lly91+5/xpD0RGZocACqBEzGxE7OghJluqyKJJfUCt0MBAFcBIyYwRkZYG1olyhswY2AHAqdBVjwhI7JzzMbgHcGW32y391P5wSCmXnL/88ov16jI0ERBLKl3bPiIxqlZKLaU2TQBTqVVMVDXnOaexliQmaEIGDrENuOn4/MytV4GoQ2zVGiKIq9KYWFVGC1y9ZraZgT2b80rOkMBMcIlyBiQydeZZHQP7OCVXrakQFBiQCT0gGwAxee9Wqy76ddPEVQvBu7Zpu9abFtVEGH+WX49gZMoCtcKcpVcjEEWITB6Z1Ogk8UVFEAQhUAQjhFNfwDFHCpGCE0B1SUURVAlqJIfGSlSrIfLidAoIjtG8oVZZPBtBiMh7ahpk50yaQuukMKDpNJUyJhECJhVilKVMdYoylTrnn/j3LH/3Pnbt5my76hpo8zirzOQzYjEwCy52DqLnwGFDYVFQlUVDZqKlas65zpNZc+W2ITKzeidtaypV6pxzQlh5jtHzqo1tQOfNuSqln2rOY+9VoxgMI4xjxECN13UTkVQBFUnRGTqiJkbvHhPUwX429wHZkXeMhMuejKe0gIcixvSBoHvKAzBkA1rU0QZy6q4hGpqeUu6XWykAGIESKllhnVB7k2MpZEXL3Az3MOx56GGaLQnn4krmlKEUqTabywrcJ9eOdY19dbnkXErKWmpVqQ8n5E8qASLnAnGo8ywqzmFwpyP5KSt+WftFEYCJFwSa1ECRkIlDbFer86uzJ8/On3z1/IvffvHr355dPWN2Oc3zcLAqIa6YG8duCZ4zYzBvIIjqYmjXm3a1jm27X2/vb69ryU2z4tAqeRaCyZYwll+oWf7iwsWe/NFAIVdXqq/KVSAXSAVE0RCYlShpzQomYkiu7Rp2lTjW4onON2dd23l2piZVqljOeUxDkQwGCzA+Dce829e7OxuOWDIggwFCJQBGQAGrgj4szbRpzkORQz9P01xDBMDg+fF4/yi5/HSYIQAzekfeoXd40rQSegBv4AxJoY3N1dWTLz774uzFs6LWQDjs91Zy28anV1dzLrtdP8+lGKksOT+AgE1szs/OFaniTtkhEiOTc8sBuEoFEzINaA7s2dXFxdn217/+/PlZ09RjTTvtD1YyuqCECgKI7DzyyRdeEVSl1DqPH437lj1fzZoYN5tN9IEAxnE0kXHsPTlSwMDNZlVU9n2fS0ZVQqDomULbrdpV13Sbrmu72DQxeu8fsorhYYH5OSntY68WFko8KMgCR6oIIjIzOmpW7dMXz55+8fk4T2/fvvnx+x+t6sXZRbdaS63Nqv1191d//bf/cbVaPd7ZpTmB6qh7KSXNeZrzMNV+ygZGQHlTFdQRgOpxmu7347HXcdJSR0T0JG3brUMENGIoIn2fSlVmRjCzMs27YdhNw75WUV34JCpSnGtWqwvnI4CmaRrGQ5XinEMwlexDu1k/PTv/og2XFAlJAasZ11KmMae5dOslwFaWU9npZ4TofGAfgVgEBFWX+KSquVqpUKrmJCqlVPNiTPxR/kNojLpE5pxwN0MCZvDOzPI47f0xtuuOGIkJibBUIEAKy9GamE1NqoEJ6WJbbIikonNNaNBF17bh8uLSTHe7+2ma9MHMa5En0COEAuCc22zWpdQQ/GG///Duw+vXr6dpYua262KIpwhGxMWXhQEA0QzYERgsNB8EPPXfgXApyVWXDBqoUIVFRS14duCIbWmOoSNDgEWAJEu+oDMXQtetHHMtJac09v1e1Tl3/eHDq5cv3375Vflf/xf4q99ePnsWm4bw58O3itSSSyEDqlpqLSKl1iI1gwkDojEqsDoH4LAyZNUml7baViC6kFbrXeun1hVv6sC8ZRST7Cp7Q0ZY6HW2KE7NlEmAzPnaMXO76spqKussnYEz8ESMQA+5m0Tk59QAui6gMUq1kuZp6gF9aDx9sl4vADIZoKBkG6SIs0I1OzzzjESODFVEQQwKUyVUh4BA4AiRgMkHF1r0AZ0zJmrbhalrWlNNYOK8d46wEC6m06DOMSEZa85Sa52k1pQgRS+1+gCRwxrXWVQrjpMAUPQKzqoy21lo0DtzRDDnIlMtHwNUzBa2BlFwYRPb89gSwJ7yTKpOtaoCcPAb59mFDty6WDsP5f2bDzfXu36cjtOwH47TPEnJz59efvHFcyLY3b+fjrs07efh2Pd906xWzTo2a9A5UOWASCq57zMgdmkqjfJGuZlSlXz2ZO2ebvLTlSaxopZUMmjVWgGBg3dMdNIlmZ38Yk5zHxxzcEyMyIZgy9hHWqi8usQAismi2GAkR46AxHgp+RcCrS1iXWBGx/hg0kSoRGKcpMxlvk/5fjje3fcwz+004NjLMDTD5IsgOkfUAATFqjgTTT5k9mESd9uPbqodTbiwmgJzF1sKjN7s7Ozq0byfEL2PxCFXzaVEwFil1Monc2Ak4kW2vawZopBmEUIGF6IPzfrsyYuv/vpvnn3x67Orz8+vXpw9/cyHpqSU03zc3yNA222a2FIIgCxFTBDNqblSl2UfAGO7vXIuNN1ZrcX7QOyNQq4wjLVZb4m9qqL9nNn7SPBf/slEXfCr4D2SFZ2Gubi+KDVVfH8MOTtA54OPkUNYBGWMzrGLMSLCOmVSa5377MWLrtmo4DylOddS7dj3tx/ejePRTNmzaxu7u0/DiHOVqUCu5eSZvKBVCKo5SzER1VzLeBx64GkqKalZBSTPqFqXSEJcvKYegbxljBEFdo33kV1kZgRHS6z6wkMyNaWu21xevnjx+RdffEHsn6zOr68/XF+/B9O1b0kokAPFUmo+6TKJyHkftpuNoutTFYyxiV3brTcrM9vtj9M8gdX/P2X/+SNJtmR5gkIuUWJmToNmRuZ79apeVVczoDE9wPb/Dyx6MQvMYndru6bII0mCuZsbUXaJiMwHNY+MfF0fZhWJgGdGZHqaq+q9ckXO+R1PsHF0u+1e3968ffPy++/eRajHH/55fl/K8bOkEfK8KrYVoCCvhbziOluUUupu+0Jq+XKbSqmIuNtt37x+Tav4SK1KyUsa81hzATXPnKwKQ0WoOTnCNoaujZt+03abpuubtmmcd46ZmJBWtsWqdEFdH471J4iXHdqALnofFdEqJS1LmudaCjMHH9VTt+nevH37m7/561Jr2zYffvqwjPNmu22bdhjP2+767//jf/zd3/z1brf7pYhBIDEtuUpVJQc+AgckT6COiJkAodZ6Op1+/vT5z++PT8epFCUE76iL0ITgOITYNH1Tqu4fhyUVJhYtpQwAy8BciNgREjrHhlpKbpr+/u5l2/UGMgzHT5+WeSkhEILVAt5jjKFr2k3fbTadjxA9tU1wDlPK07TsagPRwV9ciMiMxGZYRLNIFS1SS5VSZE4lJVlSpaquajR0vDoc160MTABAEQVN1zYhIcUY+r4n11PYbHbdZtP0m9Y5B4hVKjEBrLl0YmYrDhkJ2fvgozNAsRW5VnNuInmHm03T932M0TlXparI5Sb/euNHxBii9x4AVhl5rbVpGwR0wa9WPrg0zVddPCIz2Jo5DErr6yFmBpdALFwdT+vhewWH1VJAQVnME6zrP1FwRESeEb1nH5CdIjV9d313BwBP+/001pxSrRWZ52l6engo83zdd4333nu4vi5VnWP9qrj0npmx1qwGaqsMuqgWNCV8hvUYkbFDdGTOKZPmCrlgUXMgXlLjEpN6MjZDVJVaxWNBRSLgtZ9lCGZVQWwlnfHqb3RoUWur0ANFxIDEX3qZoqiFqvhcWFtUFdUKOAOdkJhdQ6H9mrJASIzExqRWDRZVwGqqCM4cRUC36vEMClhVUkFdQ3qRickZVUMUMFRwjvvGt9471pKW0z4vczYjQCRGACAyQOO14YoVNJnOoqUKVlmFnVKNRFpEYy5EwC54R0o5KZp553wTIJBKzqkutehX0CtcfY7kybUcd9Swr6bGVgubCCoShuC5iS72go1mrvU4nMf948PpfB6WaS651qpqaanzmAak49M+T0eGorUM52kcqvcP2111zjWNd55FyzgvpSpQAdWNCy/bbtntFvS7b27jplNRyIqz6KwlQ0qaki5J01LWEPiLrPPXe2eqaS4zI5EBwkqoQkOrJqIqdvEPypq8gOgu8nO8NCIQIqJjutBOwTGQWxPyCBWpAmaERDbNWQ4jHkcYJ54mnGecE02ZFdiH4H10zEAFeEL+yHHh5k5dXwDGCkiu7VxoHDfBx6bxntGbdm3Pv8D7EdkjuqpQqjlnVSTXzLBSFviSrmpohoK4JskZE7kY26u7l2/f/uavvvv937/85rtuc9f2V02/VYW8LMNwfv/+B6uVyQFRS1fsGjSlZ1KNqQmoVCRE4hCaDaIzM+c8OY8cimBsCzet896eM1/+Ta/o88sCHWPP6AHBrFapS652zrV284ylOATnXRcDtC2BVQA2dOzapmFmFWHDhug6RhtO5w84MhuQVDmdh48f3p+mwRg4+tC3bhhoHH0qdamWayUzwEugOKKZSlF1agaqNi95As5ZSjUDZarJ0wXoY5c8IiD6GkaMAB4xAHhVp7Ii5BgR4dk/DMghxK7vu00fN23bevbB+ZrS+Xwaz9PpPE7nOU0pZSlVzYCIlUINqaYCjCZGyDE0m83u9vba1IoYMhHDtg1vdv039zffvXn55tWL+/sXdR7s9AiHzgenCUxVQLJKNRXD9bh6gT/WWlOWYTD5Bd+FCEQcQoghkoHVupTFzJqu6TYdANRal2nSvEBgYaw5m+NtaLrNZrPZtl0fY+NC8F+71FaZNz6vLfaLSu1yhBbFlTGNAICOuAlBm/bZr1RrAGRSU+fdzfU1Guw/Pg7n4fXb14S8LMvV9fVf/fXv3n33nXuWKwCAi30vIuiCqCH5TqjfPWxT9aS7ttm0LSGN0/x4Ov5//sc//PTxNOcd4KZpfd/xrrO2Jfah22yubu9FIMantGQfYq15GNhztppLf902sW2b2EVAmOa56zbffvub3e4KUA6H/R//+C/n8zFGj4i1FOamaV7stq9ub19t+o2LGD3F6DbbUHIax7GUjWl4NhH/MmIzIAWqCqnUeclLrjnnIlrFpinPc16WSgTeGSCLg9W7gIRiWqqYCZF6gujZkUN0fbd9+erd7uretbt2e727vmv73jtfRc/TlHMuVeZlqSJmkFNec+lCjH0P3puZllKWZVmijxFjoNgwIjCzc+4CbPjV9UsRs8pipmkchiGEcHt3x8ylluE8lFKd96smEddyhvmiEbg8Mmx+NT+DmeIXwsbq3VzFMlq1ylIWBlTP4F1w5B174xC8a2K3u7p98crFZi7FxXhzfz/Ny+HpkEtBohCCC0FrHc+n/cPDn/7lX6PzsYm11goI+EsGJDNvN33bNUsatVTnaVVrIoI8V+4IiGqETOS84+jRB6k6VyExB7pYXZSrEigjkhqZwNpSYkXngNGCGQOr8AQgCozAAK5UG+Y6TOU0llKriw25Z5aHoplKNTNDwBBgXnTT1V2fZXXKOvRNZAdILV66XcjMzvnAVC/ZOVbVRqsqkjxuyMVVu6FQ0QSsACW1WdRYfCRfq6sJuVSGwP1N3LW3t7urbS3z40+6PMo4T7XUBjB4BxwAaD1ILcWmrFORBRHMGEEBpOR8HHUo3aJYBcG5iNg2SS1LqgaVzQdqei/FzeeSJf9SXK4HTkRihz5AaCF4CxWNvCtuFRmS49hQ7Di2ZK6YEE8CNuf5cHxMeVmXNKbQum7/8TTuT3UZHVrbtZUspeE8Lufp5/sX+be/+b7fXgPakpJYLVbZvGe9aZrti7tXvdamuNcb7L2aYhI6V5utVF4yTLM8HZYPnw6OnT1fX2+WRcvjfPg4fIyNYwY0UdMqWrQupRSRCivRQGq1vK6iCKJWqqBCQ7BBvCFsHKljIS7gEMgBeqLgCAFlPTozlqrdVH2qIdWulp1IZxCQPCM7DAyeUBUX8A/m/0nb0XW/bdu30e5w3rEPN/d+uyV2tArarFrKgfirIGsEIAVSIzVUg6qaykKKjgl8cOiIGBRVUQQvajgXXNjs7t785m/+w3d/93dvf/e77e09YST2wCRSSy2Hw/6f/o9/SON5mYZ38+n+m+92u1tHHgmQpaIKrlBkEClVFqsFDRz74BsXW46NGPtG0XsXoiE9d8S+mrlcWH9fKn5rSTtUv874iIvYfD6nXDilaAqEwXOMcdu1994ZkakSmEdiROc8mqEKTYfhD+eRV3ozuFrHedmfzicR6wLGQGcfp3kzjX0ptYgVldWtvA5sAMEMVEGBL6eXVakGVcHEStUqBmZVLqQVNQMmdPRFp4hmLOpypZwIlVapNTOSQ3bw7LZmZBUbxwXINV1/dS2bzf7p6fTTTz98+rz//HQ+T0tdiVpmgEQYoAgbkWvGJaEL2HUeMbITNDZ0xLFvXtzf/N27t799fffi7qrvWiTKosG7TdvRdgOYTKqI5FqzaBWtVWSNWVVZtFhZuCR8nvQQUojBO6dVxnHM47xM0zgN2+vt7//29998883u9uZ8Pv2Pf/j/TTlRE3BKZZxZKcSm7/pNv23ahunSiiJcdZnPGrVnGixdnoRVxaA551qK5UoA7Hxsmr7fsOdS9TyePzx+1DSZxyXlH378cXNz/bd/+7fff/fdpumneTakYZgMcbvd7K52zoevSQgudG0VEXJWBciRD+Q8O+cJQoyxaYh5nJdPDw//+qc/ffh8jO03263z7qqJIQQjFrU5F15yh8DstO1c1zW1ci1B+k30wTPtdn3Xty54NVhSatv2zdtvN9stmJzOtzHGcTyH4AFBSiUKwd80zbbreue9WCUwQE05PT3V2PKLV9d938AlkeCXy75UAIC1akp5xZ1VgXnOKdVcKgKoIro1WGrNkoYqsuQsoo4heAIj9OS9326uvn/3m9dv38XNdey3Tb8NTcPMOdfDeTieTofDaVlSSklEUy4iighq0HX9SnRQ1ZySaZnndpqCD5TSYpfoADR75lb8Gk6BiDFG7/0KkYtN3O12McZ5nsdhrLUgoRl9+bzwpZFrl3+CSEBAAGa0VsLrbxiIVRORWnKttVYjBBMGERfYm2cDxwSqkd3Nbtf023FZjF0bm5yLqVapqsrMtLpYzaZ5+vDhw3a7ffvdt/1up8RfQ6Kc4+223277lEeR6lxcdd6qgGJqcmlg0yVOAQmJzHHxbhQRBUdU2AqKiGCpWNhQDdkcqGH1UAlUzQgMUIwyUDVlA4dqJqSSVLxVb+K0IkAFcoiouiqiwIwMigESiMMaOTsCNue55sCe0IW1eQOAQI7Z8Yqfu/zcDURsUQNFZjFa1UC4NmOS2lj1kEtFdILea6jiPGPDrWmrFtWCGhBT21E71XnBqgQIJgh1xfLmJGkpS9VKDDG47YbbRovWOetxsDGJEIoWFUNwAOxc6NnQ+U3LXaCAaogMf+G4uHR6iYAcuAiugSAEnkDQbB2vm2/RRXDBBAwXYm662G2a5sie4+3trm16VVezfPzpg9bkSTZ9CO4asA1xp8Px06eDGn/3/W+6budjaHIp5nGezTQ62XgKVxFgU2NJV1gjiAplda1iRlGXheYEHz6dhymbXXqf8OtLVM7T8DQcmxocA5pUlZzrXPKQUhZRNFXTaqXaUqGYmVkVzVVIoWPcES4IrUN1lIlmJQX0iAGpYSRDyOANtoQesK8AakEsVmly8UUdgMMLV5cdq5EKzhD31J78ZrfZXgV7Cbxh1/fXYbddJf0KWiuJqPt11KjCmggBomtslIlWJQR0F4unIRE7F13svO/Yt12zvdrcvP72r9797t+9/e3fXL145dtO6+VkvLIc0jI/7R+fPv0sZT6dHl49fbx/+Wa3uWldpJytSKkqaBTZTGqeUMQREzJdssMbJgdssBpP7Ms89ou69y9bMgTYOG4cA6IYqoGoSpW6MklFRM2beVWu1aSCmqqgCRs4QL/GQYmI1CSlmlVgNmtLzqWmUhdyCr2pYEmypFjKM7rCLmjnv/hiNfkie+8a4q4oO0FA7wiIrK4AU7uoLYnwq04MA2zQbshunPbegkNiulC4GZSggnVsbCpVc5EoFskFH0NoRODj5/1PP38YppRKtS//l4AArqxsQ9ckNRcaB9VpZcmidvz8IAhNHzd9//bly29fv9hsIjHOS5UqJspgjWMXAyiJaq6cS81VKkJFq6DFVluwBJMvH4aIYowxRmaWWofhPA1jkXTtru9f3r/+5k3sWmDY3l51h6fjeTICBUDEEEIT2xhC8OGyz/x6812PzasKWRV+adCYAYBnpsZ55hBi07Zd3xFTyiWX7Ngxs/NcRD5++nT36dNf/+53McQmxiqSqgBT07dd34cQ/mK7dL5rsNSiVlVzzcuyLGlJKYEjtcaFwM6N4/R0PH3ePz4ejjfQdt2V98H5XlHmdB7Gxyof+eNH7xvHru+6FhlQS60htN9+883t7W531bvgcpZaRcF8CNvN1vlgVslTjLHWSuTW4CMijnFD6Eqt07KMx/MyJ1NziNEzor399v7qauM9I/3qx7hmXbNz3gXEZIaqUKulXFMqK1b20rlKKGJ8aQNCLjKnJKKBSYMjI0ZrmnC1u/3++7/67V//vtte+27jmoadR8Scy/E8fPr8KFVLrinnUtbQQwWzGNU717YNgBHBPI1mUnIexqHWlEtOKV36ZyuT4iLN+GWi5JzbXe2meR7HwXvftm2fc/BeRMwsl2JmF7P3l6dnXVMu9gskXEdKeJlVrb4OkZTTMk4pLbVkNSVy7EhUxMTMiUEBLICuTj5OJdWmAadcRIen8+l8KrmsCQMisraLmFlVD+fj4XzIUtExk2N2v8zFHe+utlfX29N5XwquyyHAKuZR1HWtAwQjZrgc7wqaeFLzmdgTWSQlhVq1iFUzdBQ7F9ZUYMsERKCKzHgZCKICrcMEk5YBIrBhFlEYAd1FG4lojMioRmKOyBgrqlhRXUDMFZxnUgdMV1dEAS7OEcKAlWuhwoYMygBsjhRBoSpUBmJedeUKlAEn1WORWZVEXSytuI6bTYjCeB6nJHV/2Mfges9dv83nUaYll1okV5xErRar1WrRSo43jbu9bl6+VO+nwyjHiY9TXfIMUEFnTSbowXy32d7cur7n6A2llqmoxRhj+xxIBgAXtw4CoAEbBsAWI6DrCI1wnaijICuQKZVaSq3s8P7uSuV16wBU7u/vmMJwXt7//PHHP/3r6XjadO3d3Q27drPd3L54XcF/ftwfjudaoG03dy9eKqCPm8PxaZ5O0errLmwCIuYF0iMMkyYRIcPGk/MO0Sm5ah6h+eGn/ZwuZ/+1ifSlIFPRNOXplLSCY0TQUuqc8pCW4zxnEWKHRlC1VJgr5HUqryq6urhXQZVGVGBIBmeFauCJAkBQY0WqeIW0QWxxTQkGBENRrbUoEHtyRIDAhMEhOBQgCOR7abbnq6vRG8zmiZxryPm1JWqrkvgC7/9lXVYzUatiUk1tPdYCMQbPnsnU1Myx7zdXt6++2V2/iO2ub3e77ur1t7/59q9+v3v5Gr3PYlZ0fWABLmR7IhqH0z/+w/t/+Zd/uL5/8fLNt99/95po/6cAAH+4SURBVFe3u+sg4AyR2TdNd7VhxrosJOJcMDMlLy5iBCRarbsqgkqMfm28fFUV/7qIIQwuRBcLuKpQRdQ5710wtSXVXHMVyoWmSdNchrPOyaqQmSN0SGW1eqw3yCE5MnJgwil5MYeOG7Z1HP2FrkOIzEQrLugyBl4LGRUwQDVkx/229TG2bci5al01VZBUANAuHCH6VW8cwBPee/hNhy937noT+yayYyFUgoqYTaeqvS8sWXIFJDCSolIqIAjosMyHcZinJRdBk9VZawZVEedhnE5MHhCJ3fAYH9v2Q9er4ek8Nrttt20YrAuhDQENpMgKA0zTpNPU1RoA0DkxW/lCDFgBKmElcKigZB7jV7E9SNjEpu+6tm0dcylFTbdX2xcv7m/vb8nRDz/+cBpOTd/dvLj/9OFRVpfcxXp8iQ+4aKBXJ+7zvVdVKVVrXbtA6+8SUQihadtN123avmva4AMxiek4T8s8z9NYpQKh897Ang5Pj4+Py7Ic9vv/1//+vz8+PfXX1y5Gq6JbU/vLWtmRYzIjXnubKlprrbkUFZxzqapFdZzncZqXLKWqaFZJteaci4rknI/HYVmSwNH52Hdt2vS1JgRKS+rb/v7+xd2LG+cw15JzyVWcYxGcl4rJRLJoNWUiRuC1jUfs+75HpPNwWpZx//h5OE9m6Nm3sdldbeap1Kre89edGMRLaOKKDgw+e1czV8Rqz+0OQDJTMc2lqhozA6KqlSor9k0RVbEUrd7UiNiHGLuu3+52cbN1sSF2ZpZLJXIp14eHR+fGvE5WVdciYyUOmSkRMFOMnmiVquE0TdM8TdOUc7ZVefZvBcEQUd/3fd9771cN76/8aqICAs9ieoNnPf3z36y0r9VwhHiJK1CzWnJKqeQkIsS0JoyLqgCYgBlUwGKYFRoHc5E5SawGyKY6j/MwjDkXlcvHvHSAANRsSuk8z0spVdU7/tpptY7a1aqZXTyrzz3n568vJI7LR7yItZUwezbiSoQe1cRSKstUx0XY+6sbZ4ZoilAIgVAQ/Vq9ESEqEhpScSANKQX1qFVTVTJbfaFrftEqy6N1BOZJojMPQBXVfJY6ChJuYl/BX+6Lj861DhqSBaAaihGxUyIjXG+NrUbXtYixClAQClE2MDMWqRXArPdOEU/nwc6KjLtt17288yFUDgUo5aw55/XWAKuiGCqzhWghJHK56Ok4laeBpgWkCKM4A2cKsJSspUYmFxwHV1XrXFSlaXzXxF9HdF0eNwACdIAeGYj0Ihs3VAVVq6KqttQ6zXMpuYnhetfr0mtJ0SloYlsYs3fq2FRrKnmYZt+2Xb+9u9X7m+smBFLBWtoQXIglZyIbHQSQq227iUwkc00lN1SSspKnvnUemQiBWDCMk7WxSSWtzgb+9X4pCsOiT5M2YOzQ1ErRYa6npRznkkWY1IFzRmaYDQUMjKpIEQGzTJrQClhL0DFWhVRrNsuIi4GrigqoBEQvmQiBzdhW9K2ZqCIKMq8odSMlBiRP2pndEFZGdljIUq1jppozLqyiotVAtJaakizpxbN9GhGY13BQMAUREAFzxOy72AUXSgbAEGJ3ffvqN7/7+xdvvmu6Xdts29hd373cvXjpu6aKqaiJ0mqqMUCgNm5evX63jMPTw08pz6fDwQBR7dhvghojIFNsm83VxjsnqXjgvt30/VVvFFyEXBFYVc2KzrOaI1eBuZQiqmbG7HyIzrkv3iVi1+x2/f19BZ6QZ0IEa2JonQtVHTKLkCMg0lJtTjpOVgRMlVAv5TWuakptnTRBGk+KkAsUAacYAq1SSH7ubIuCCJiiosElnnNdHPWZOmMIziE37NBVjyooalXE8C+LsK+vQPCy5XnrXlyH623cxOiDUwZlVMKkdl6KBFvOx6fDodnd+lhUJS3L+h1rLTmnUlJNFbQiADKCQRVTgJwnunwGmpHZ+SFEBUq5IhtpbRy1McQYDaVWy2LTko6HAxwOXV0aEGIHBKroGIEJjRiMDUFRCCqaR/vy8YjIe980Tde2TWwQgJk2203TtSmn03AapxGJXr5+ScQ//OufmYmZ1qjj8GxEsme7q8JzvwsuoHkVITFH6Lz33scY2q7bbLabvu+6zhHXWpecpmk+D+dhNQeAARMQEnMMkYmXaUnj/MMPPzw+Pd2L7K5vGh+IfhXOcyliVovMGszj+NIPKFWXWpzn8zRXlWGechHvN02DjoNKGc77kgtxraWMg6ZUixbEeTgd996H0DSxiaHbbrahaUTp8/v9/ulpv3/KpYQYmti37QaRS01Ssl4s7KyqpeTdbvOb3/gQ4uEwP3x6+vFPP52HIYa27zewpZQ0Z6lFrPFfV8rMvOn7q+2274euXVKqtaqI1Wq1anVWgypglbpGHqnZeo6oIiqKBIwcvHeOzKxUzaVO8/x42F/vH4R4g9QYkhcVq6KAyOy9CyGENXJSV7QxWKl5mkdi8I7BrO/apg13d7eA+vT0ME3D+XwuJYewmpafmRRfrctr6RpjICIzyznP81ydKymBAa3tilWieKGPq656x9WIeckzvcyDmdl7r2bLPEkVQmyapt90ZnY6nXIuyCRmc61JNYkVQAwxkzsXcUVijAqS5nFOqZSygphVNaUEayluoABJdUjLlPNV6Bh/6cTUWp6eHp+eHktZLkIzXYdua4avGhga0hqmp2JGZnzJvERBMiJEsyJ1nNPpsDzuF3YBLTB6h8ZUM1bAYhSBYoAGiMmMMCNl4kKUHaXI2SCa0TrCQ0QkR3ixKVUCAHAEDpAMUZ3lumRYZjI87V5IuNwXDI0Pm8C7AFXruDrZiAAdkjNFrau3mABXPGpFVoehDQogAIZYySoSclDB8/GUS/LBeVS5vUIiIKfgctG6SAFD531sHHtUKI7FuznV8eN+XOq4P8I49yINawwUIrXRJYXzUJd54fEkpEGjWS3LxJK76PouMH89T1phRgjIAB6AEYHMVgOWAVTTrFLVVOqc0+F0nqeRQAiNsM75OJ0+SBEVih5///tvU67jOItCqtM8u+2me3mz7f/97xm0hbQcPtXb67C76j3hdtNGJrAQAzKCiQulj70TNQNn1BIHMIKiYLlS20zOBcJilwQWeF43AQCqwVPhj4mDcyQkAkuq59nOsw7JUlU0C2i9a4L3FJiYiMhKSdOUcjHVADQR3aLvIbRkyZJVWc/wRagqFAMGnB0VBJSKqqK22pmAyNYgJSEVrkqI1mi5NfgNLL16S8hWTsPeCtgTWOmsgqpUq1JSHec3d09vS+2fn7EmUhs5MiXDnC0xVsfMza6/7nyc52wYY3fz8tV3f/P3/+Xb3/7ex865SOR8bCgEEwOtqIprbnkxrOjA39y8+nf/6X998/b709Pn83E/DKclTeN5Pj8doSaVVDURY9sExw4EutC9uH3x4sW3r9hvY6/LgmJEKppzHXEG1U2W+vC0H8ZJRLt+9+rNu6urGx/iOojhELrXr69/91v04bSk8fO+iPSx2axbaNVOza0d9JxJVUPUUkjkguFTYFGqKoSZqDRRrm+wSplSWUbRArE4IvQeHLmcsQrkbCkpyJp/paAKxs8eCKtOpKoUqwUrkdSIBtFVwXFZQ1nsEs+A+Hyaei5iHL7cuXobd9fNZtM0jkPwHGntTojhaS4PQu8fPp5ow9trYW4Qc16q6NoTZzNnJiq1FFNlR4hIaoiIsOZCgqxoXSwkiuSIMHjso9s0MTTBNVFRLMFs6WkaPzx8xodPvZtjNM9r2omBGq1vMsL6Y2TTFS6MXx2YGTF413dd3/cxRETour6a/Msf/7DZbm9ub+7u71++etW3/T//w//43AZMJXiOTQwxsnOEDACgKrpmTF000aqqoo6o8WHTb26ur66urna7Xd/1MUYiLqan4bTfPzzsH4+n4zTPUiVrNUYgLqabGN99++7Nq9fzNI3DMI5TLlWKoGIIsYkN8196etwarEZAzOy8W6Mmq+o4zWry+fGp78J5GGvVEPq2IUe+1jwMj8s8+xDMVIRUndQsUhMI4UKc2iZvtzrN0+k85mJPT/PT03I4LKXmEGQOOM2AQLkkqVm1mNnKK5Naq9BuN8ZYDofxeJhOp3EcR2mN2XVNrVVqUal/ORcnoibGbq0tm9i1UssqcdIqWsRcNTEEBJG6RhehEgDUWhExBN/EZtt3CLZMs2jNNR1Ohz/9+Y/V7Ppxv7u56zfXoWkQeOWxHs/TsmRVYPYhQk9QagXQGD0SqlYRc0wx+hi8qSx5etw/Pj48DMOAiI4d8L+NhV1VvTmXXzosz22eNZ5jZQSrfZmurj0YMVyB5hc79QX6qIwIKlpSJubtdrPdbje7jVRR1XGYxNakDRMRgMLOtQYFqTKr8xCiQl5qndOSS147CXiRbF0Ss4mdsctVShXnXPC/OJOdc7e31/f3NyUv4zCt1TqsOWe6xsYjwcqnXn1iWAvmhOsRl4PiCvYAXE1Il/VFRKtI1VKhmoliVS0FMrNn71a4GRFwMVZd7fNQ4ELDNEQkvCDg1UgIzLACgJJVlOqqhKpVrSY+v3oGka3DSgpeAxemCuIUgxHZ2ohBRKNVyQYrr56IyHu3ZR+A8spMtWorwAEBxEDMxEAvInMjrsgFXAGtiEQBfQfkUq2L6DDl85QPBcel5nFqq3RELeOWKHinvVsA0KgKestcJ65CBC5gIH/d4rZvmL+KUIG14Y6A9PzV2icmFcu1ppLnUqoUMxnG4XA8pnlsgwOzEFx2lGsqOSH6vt3c3N4h+cN5GIZ5nJOBqJa2a+52r0hrmYf58PD559CPNxQaH6Lvd+TcKsQFUzbpzKIZKDJgYPSgBEW1YhIfDiucGv6t1mUW/TgsfzrNoRg7rlpzydOS5yypQBGEao0nbhoMERyQ4xgjihAw4lRSQjVFQmWv7NRInClUMARwBmSgoBWgGFZQZ0pmpkCAq5UbLq+aidpcKqBJzYiwIykko6Ys6VOaD6J6jFCqFtNa64q1OI/w5pCfnzFm3m1219vdY4zTOItoKVCVAX0M3bbr2yBArWtvr69fvHz17as337FvgN2l2UymqmhKZohrpJxYNQbebK7bvn/5+pt5OA2n/eHp4fj0eDo+TuOp5imlqaZzkSQpoy5aZOBpTstcqgWfgdoq7KNCEZlzPgtUeKTDOPzpp58fng4l1Zvbl3/7d//x3W9/9/LVmxAbADAiDRH7PnZtM81uf6ylourKHtuEuHGODXJKlhLmWyjZSkURRCMzVsMqmOuY0pDn0RCQNjFc3dyG2PicOQRkB4jrOkIxur5ziFSrquqaLA5GYGbGZhK8IaZc8tNRJoYswXG76YFXT+FXyQ//0xWIXvYRd23TN6GJwXHw7Bt2wQUfqgKIHca8f/i4ZIfb3TmnlhlMxjzPJZPjro0MxmazigDQRW9s8FU5fukoGDsC513Thu2u3+662EZhnFSqyXHJH0+n90/7T/vHeDwsGy1MVggvEXsCZmiKJqiKpmiKal8T7tf9ZZmWcRjGcaylEHG36Zu2zTUDwIsXL968edNvN+fDqW3bto06JfZMTOsgArXgZVcqX/i5ROQdg3eNC5u22223N9dXu92u7zcrFqSqLDmd5/E4nU/TMCxTqYVXdTAYgFatzO7Vy1fXV9fHw/F4PIYYd8xEDABN07RN6+h/LmJEVwUOswuMIUbnvaoN4zRNQ3Cw7Rs1qNVi6GpDhFbKskwzcWz7Gx8aFwMyQ+JSkkgBJKZGlcdh/PT5o3N+u70FiI43/YZWUSoRi6KqioIaKfCaDo9IPgYA3u/PRDiN8zwLkfc+kls1GHVN7Va1Z1vLlwcPvWPvnfc+Bt+2Ta26/lVES7HsjJ9RugZ19UavE4Dg/aZvrq+vXtzdapX373+epyXl+fPjx+Uf8h9//KHb3GyvbnfX9/1m18TWh8gu5GrDeaxV2bmu63Zxu2p5iSCwd4ywyv0YSk6fp/P+6fHPP/zpfD4xc9u2K3FUdYUq2ddVf6318HR4enrKORNR8D54j4jeubaJa4tlzf/UNbtRLx8GVwOqiaqKVFVdSYMrhJSI+q578+bN3d2djyEtyUQZD+M0ppwvG4SJ1JJKqQCx79rdlp1TLYvmNUlOnsFEIXgzzDkjUdO1sWnXONYmNl3bftFe7Hbb//pf/4uZ/vf//v/4859+LKvrXcRMgYSZnHNshIrOo2M2wGUGqVpF2VkD4D0RMrrQbh37pms7Uus79JxNrRS26lJ2QExI7MwxBGJGT+gNRWh1HqhpNlFUMAC0y3OgBmZU0YlizSrVRLAqF4uKSSn9Jg5/U+TLCmPGVXlRmERSUpcAAQohEkSmHhwh+vXgRWDEjfcYGmo2FWiYlzEnqJmANAsFt2032rTosW077wMiVMRKLC5q4428OC+uKaKnJZ+WaV/SSWVSzmog4pC3gV+Ru2XnvZsblwLumliExABcZYQm+m7Xt03X+HS17dxXdbMhfJnwfRlLAigA1FLGYThP4zjPRSs5mobT09NTSQv0TXTYd1uP5gHTMplYv2lf3V+1Xf+m3h1O8/tP+3kppaRSQ3+7DSjHMpxP+/3hsek2d6++vXn5ZvviTei2BohggOrAWMnUQAUBiIHRHFmtBWEBF5/J0l9VYM/XUuoPj/t//PTQRO+Ds9WGJ6amoGDKYEYuNrsrF+J5ntGY41Vg6n0f/Gk+PsWU7pBuDEMSFROBZLg49AA9k0Mjw2imogXVA6xsIgZkWM+kZmYIWq0suSbTnOoYnTi0yNnhrPCgQMnCUCij5Cy5SMlpnqbjqXs45nJheHgX729fvbp7+3nz6Xw+L7K2flkUgbjp+q5pnesrbjZ9H9jRRZCAtgYrgK3NWiZDAlPTIloEgWLTctwig9aXJS9pGZdxGM+nZTqnZZqm8zAcp3lIy5zmYR5P83D6dHw4pTGTnNN092JwvpnzUMtslqY0Pp0Of/7w8R//9c/vPz2kKb14+fbx8em/5LrZXq1FjIich/E0nHeMJpURQXU6jdG5+1d3ty9fXO92TG5FDTEqquEK7gMjAIdGVSTn8uHT4//4Pz6fBz5Nr6933//m+6D2tH9MpdYQ1iY4g7nbq7jbdLXakvI0gWpwISCuSP8qYkwQwjSnT8Mw1CJZ+759/fZFt+kvR75LBYPrBP/rBy4wvWrb2G0oNOiCc455temTVi0iOS3jkPb7w/6UzkS7x4errvWOC+gwDtyEq7urMsU5BnacczFQUa25mgEAo62iSCKgSLEPXdNvwmZz//J+d3ft+niWAuOwlPJwPP7x/fuPHz8cDofreawti7g6z+syj7B6Q0VLuZzgZV3mfnlnRGQ4Dx8/fvrXf/3XP/3xj+fj8er6qt9s7l/cG9j9qxe/+93vdrvd4XgYx4EIYxNLDMRUVaeUNBcp1UTW9hY7jiHGGJrYxBh9DF2MfdM1IRC7cZ7P06SiqlKkTrXMeZnSgtFt3M7MEDBJqXmeylxyBcSrq+smNj/8+c/TNL1685rZD/NMCF3XdX3H/9PJ30mt62keEZndOsTy3jO7kpfD4ZRTCiHUit754FUk15KWeUJkIFDrfQgI5BwieqlE5IJfjebn8+nwk9p2O7TtLbsga1RyVVmVKSq2JiVqRcLgwwoXn6e8TKeVQVVrbZrOh+A8BR8AbOWk6S9RI79c9hwxxc75oLGJXZWqkmstpRYRMy241r4m8Et4OSK0Tbzebe7vbkrO+/3DNM9Fynk8D8vs9k8+fur666ubF9vtdd9vmrbzPgI5EWNyIUakSMjsVtgKrJnsK8jOeU7L8vS0f//+/U8//ZRSur277fvuufxSgL/sKtVaj6fj09PT+XxelsUuYY01pVRKMRPH3jnnvnTwVFcf0/qlKqwvoKqREgIQMwB677uuu729u7+/R6IlJClVFWqpJRcxBVAzXAmPRUTMitQ5p9P5NAzDsswiddUYsqNV+GLmkNg7z0RaxVSbEL5O5G7b7q//+nen0/kPf/jT54cnOY85r/9t5Us0zSVW08DW2XQpKGKlKrtL+DMzIDv23DrsYiUtDgtxBkAVEgvVNsU6gwYdO/KBHUMBVDNfoOZa0ywllZqzqa5HIF0PQIiIHl1bK45jyklEoYJT7iha6PzVaytfnjUDrStQCg0wi5UChJZIFTQyKK9kQQYAMatAxuxD6LqNAKliVUsqZqBZkDi6Bh2Y0xADO1bTZDarJcOsWC+D/FKrTLlMqc45J6kVmAgb5ivHV42/afxtwz66OVBpSFpXjEpVATCGEKBvXdtSDNy2zdeamMuT9ytE0RrFIfM0PO0/H86naUmK5mOYhvPhcKhpYdtB471jpjbGLSFblcYHRohMfdsF35jRaVhqVRd8bGLjsOQll7x/fFyKbG9fAZHzjQ+tmikYopEBKYIogACaMRAjEiMWqoQuXKAdF5/JrzUxoqdl2ae0qTUUv1K2aA2kWtlJqIGbptmEtisc2Yfd7V1DKKdjTbXHobF8r9iDSa2j2BHsgLAAtIjXQIgKigomsnIBVln0Sp9bibEXyUWSelQdVZei2Zt4zo3P0S+gc4hYSyzAWiVlSVlKXqZlHJdxSSJf3Hzh9vrty7t3u+0f9vsHSfO622WxLKqIPsQYWsEmOkYVqwVdgNWwvaZhmIpUEyEFK1ZSBTFk57wPXYMRANrWdiBFcs7zkpe5pDTPwziex/E0TedxOE7np6eHD59ASpo/7z/mWqZlYPbjfC6SiPU8nn/68P4PP77/pz+///j5kMb88uGw2Vzfv3zz9//hP315lpZ5ns9DQyQXAZGmZVkmX9NiNQMYOXIxUPQuOnaOkVfbHzPF4EAkjROYDf/8h32qnufdbou3NyGExnEYRgVTqTnnGHxzf7dr+53jMk7D588ostlddT6iqNVaS6Wa55plOJ2fjvvzIFVzKtvdln0Q+nUY39fnYwAAYKKrGLmN4JzRRXC8ivjUAMw2RB0oLcNU8kh+P4zX201sG/NOpFIT+pudtm3Tdb5tU8pVainV5aoKyI7QAQARO8ddt7m+udlcXcXd9ub1q/Zqm1A/nI4P0zin/Hn/9Kcffjz8/F5Px01OJkEVq4gCIgIDIK5Tqyq1ar0ccL8G3K/9+VrrklLKWVSdd9vt9sXLl7GJL16/ePXqlZod/3jYPz6aWdM2tUsOyAiqSZWqWsEUEYgQHXP0vomuiS5EFzx6p4RZZOVI5VTWfOKislww1WDr2RqetXcIaia27mJWaxmH0QBev3njXPinf/mXUkrbtJt+Q/yXqj5XSjYD0dWRS45d08Ttpi/pZhyHUpdxWtQQ0BESM5WipeRSZgODSasMLgXvo/d9DJEiMwcfGpU6TmNKY0rT6fTUdY8htBd5aS1rgWimhKQgtZYQ/NX22ryvktMyz9MZEPt+17Sb7W7nL4xE1brUmmvNIhWetaVfbkzOueRL6DwzhuC0b8Qkl6VUFl0J1IZqYA4M1/2oqhpY8K5rm77rEjsXPDBVXTtyVoVKAalk5krSNKemnbyP7L0j55yvZU7JjbNz3vngY3AxBO+d88yeffBLWs7j8LjfPz7uAeD+xX2Mq8TyQpmCFTryXCqLyDAM+/3Tw8Pj8XjMOedSxnE4HU/7p72qbbd9bNsQPK0MU9VSS845p3Rp7aiuRMhV/oyI7IyZ+77vuq5tWjMkILi9E9VhGIZplCqm4hyrmtSalmU8nxFgGIbT6XA6Pi3zBGDOMxEzsQEQQgweidBMSy0pWZUQQvOVgNR7f3t7/+rV69vbu02/WZYFF3gWWgMAiqqAmtSl2BqQJN4bQ85CgsEjA5sDckQWnHOxU4cLaQGtBlgtim2q3M/ltlgDXJnAEyOoaKNaqsowjvvH/ek4L+NQS76cUK0iUvA+xq7tuFbcP56nOYkSuMANbq66+9e96MbgS4SC1pQs5UgYnRugJhUwBZBcsmfQ6BRY2QUAlapmRS0See8CuIkzIRugikkWY8NIzGROybEx1gqL6qnUKeV5LlmhGggAIngAT7CLPhgngOj5NsRX0d8H1zc+thRbjg6UqAaqzimssU0ogCa1Vm4bz+6rfK7nIsa+6gIaQBHJy3I67j9+/OlwPFQ18r7r+mkYj0+HnBaPaLV1CASKGhwBBUHV49OpLnV7dRND++rFy92VjHNyznFsXHDX3lNsJyVysb990V7d+hU8LZelEERBKoiSKtBK9GBFb0TohFxAXLPN1FQUf135rw84QETfUuMugWAXpwYqKGjHTefavrvudq7t+7u7G1/SOC9FgQSaildoCvVB5aPZT4RHogKwNbwGEINBBdUqEhARCpEZisGaPkoVqZgl05PYZ5BzlVTR1IJ31jSljxC8XyouCxGBCcAFpUHIa1zel6XMu3iz++b+7vvr3d2m+7nakkVELRUZUz7Ps3fBzPsQUIssoywTuUDOma2zUZNU52msubCZVZNSmV3T9ehwjQIzMEMjR0yx8SF2PYjWkkueUxqXPC3TsEynw8Pr6+vrhw8/H58+v//5z4+P781szrOgsufzOP7w/v2Pn54ej9O0lFphnMb373/+9PFDzunyOKlKTmUYsmpFkJIEBESX03j48w/udCzXN03Xg/cco2076FqKjbE3Ao6Rr3oQMbHCbjFItULOVWryHDcdl6tAlKZRci45Q9f17767/ebt9WYzD8PDP/8zVrn+7rub3RWCaa01JX84jh/fuz//CA8HLbIqOGqVUsXY/cLLxwtE6WsLHCKEwF0gZgWqSqprbK6xY4fEt9suJXvpzp+m+enwOOdSp13c9NC1Prgm+iZ62oEV7UvNqaSUllxSKmbkfEPsDIkc+xA2u93LF6921zdx28dNx213mOf9OJVS5yUd9of3P/xY3v+8G8+EBdSBqFQVW8EMawSLrCbS1cheZaU82nNBxm3XXV3v7u7vb2/vngzarru6unr18tXti7vru5t2s3n49OmHH3/8+eefRaXtWs1CCugcXIrOcNkGmJx3IQbnHBAnk2UpwwwEhGZqejGC6EXwWFEVTNRkLbXNwKCazFqrKgCWUvb7RzMR1d1u9+bN26q6/H//ASDHGLfb7b/ViVFbDSagCOsg1YCZurZBtJJ4TUgEq3gJclyt8aBacpnUCiI3TR9CG2MIvmnavt9saknwMD49nc7nEyGrlqur667vkWieq2muVcCAnScwleqZN53f9B1iPJ7q8XBKKSFK08Srq03f7UQsLdOQhpSWXHKVNeL1F+y12SqNlsvuvaqp0URiSqGULFLVxIRBL1WD1rqa6FcTPBM5duKUnAfiNaKdiBFrFVSbDF0udZ7HEONqNwvOs2NAYu9D22x2u9u72xCCc8yMRGSgqaRhHB73j4fDIee8WqdWs7vZX/Zgvr7Wqc1650opKS1mWmstpTCDqAYfnPeOGYm8c2vvk5lWBMMaKcd8yRxAAOfcpQvKbu3lmFnTNKsHauUxXFjAZjnn4/GYUjqfz8syaa0h+Bi9iuZSaq2lltWJaAa1KjM/fvp8+vaoIsy/2EeIqG3brtu0bbsyiAFgnfp1beToFFdUtWya4kJeT7ZEq8IWwVAK5JyrWNHK3m+3btNg59k5NpE1vcHMGW3EtgpSkYwdgFZLFUq1es6Hz8fp8dNpPEpOhQjUVLUSUdNg3+luayL28FSGMVUx9DVunLnNbiVrfrlUpSwoqfPQRz46XNCSqqhmq04hEACsIlDIa3ym0FbqRoQBi0pRLQCMKAq1qpGYx+Ccj5G9z2pj1XOuSbWACa/9KWHAhtghbTgYghJGR1fBX0XXNxwa51rnG2KP4LEGJ94BOyVXgReBuVRBUvSG/leRll9PYxEAQAFqlSWlYRyOx6fj8QDIPjaMlOYlLcs0jo9mae76tm0CeySHnojBak6VIIWY2IW+aZqGm9gAUgiRQ4htD769U0YXN7cvY78jHwgRGGkdKa2DEFCkNVd9zVIjBCDnyTmgdVtUUwX61UJGAIGwAYjkIrkVbgsABMiIiGZrHqyC935zc7PtuyvnaBp5WSQtvtbGdKM4IajpAjYBjgYiEAAKMQMsps6sIF0IlYYAIAAFsBIvxBO7gd2R8BF4JBPH3PVd17u21egJ2TXtClzXKqsISm21eODXvhhmv+nurnevr3b3u802yaBJRHXJ9Twvx2EAsdrBbuslTctwmE77BtCZcmiAHRqYWEk1L4ntMjwlx6uK3UBBUUBWOdTq10RU0Sx1zmlY5vO0jDktptI13ZvX33rgmpbHadwPj/MyjstSwNC7cUkfPj/uD+O0aFFQg1LrNA7TOMgXMqyZPYNhKuI6yhfVNM9TGc+HA3SPTdNS07iuDbtt2PS+7ThGCg62GwukZtMwTNMkqsBETGJ2WhLGsMapAuLqZEDnwvV1//r15vaWxqGZJ62y+f7d5uaWGFdqbf78OdTsHvfsHCOuqQGGJhdtAQAy8QqRMmaHX7ksiahpHPeekICp0Jo2bWQWTR0xeT5F9zbQe1uO5+M4F15ymSZro28bbUL03jmHdkn99RDBefYCxD60xE4Agck5F/vO953vWg6xiu4f93Napnmap3mZ0/B0fPr4IZwed3r2nZJdFJKrTwgACC5JG6vgVvTSjvnlfSGKTby+vn7z9s3Dx0/LsiDTyqMbhrNvwna7s+fJBjGzc+QYxFalP+Nlv0QmYDQwrWXlAYhIrUUvCR8X54ZdlD+qAEKX6LP17IQIhKQEFU1MzWxZlg8fPgDA9fX13YsXRJTmudbqfGiaJsRfQL2/FDGKfBEiGplhrbakpeREZJuujburXPLD4XHJGTE45uCDSFNksWIAUEoRXZCYCJs2tm1/c3376u2rkhex83l4WOYBEe/vru6u23fv3oQQng5P52EaxwJAbdsawDJPXdd8+82bF/e3V7v2w6f3p/PHD+/PpcyI9eam3263wzDlXJc0TZNLqdRqIqj6CynGzEwvGZNmBoTkyKML0bVtzCXlkmtlC7C6acTAAKoqIpparbWWWquoApFD4qor/o6ZDEFLLToPS57PSGs5t8Y2AkIVjU17c/8ixHB9vbu9va2lqFUwyyVP4/jx48cP7z8cj0cfwm6367oueL8Gpq77x8Vb/MtCxtvt9v7Ffde1V1e7w9V13/dEUEoexiHnfDqd4XR23scYu7aNbRu9b0KMIahqKTWltCxzrUJEZmtyuBKSiJRaDKxtWxGZ58n0Qplb51Gr4MYQS8n7/WMIIZfCjP122zTBOU7z8unz53EY5mWutYJeghEOh4Nz/tWrV+M0ispFKnBZx+BLf6iWAqB9v7m6ur67u4tdzCZAFh23NPfwKfjBB3XOkJmQgmcpdjxNT8fyeKyG7vZ+9+pF893LcNURwIiWwUbkyTcCGDI06BoOAdGgZqvZasEc1I3FppRSnpEYDS5FDHETSpuzA1MiJkaVgqpojHACPYCe10wKgNUtPjPM2xDnlh4bGCdYZdFkBGqp6FHLUGtFW1TNYch2k3ycBgd8nKahpITmmStRFpNpiRSaq11sW9e0qeqYdaxKjhofYsOOCUy4aigaDNlRcBwdE4OisFfaOOsY+kDBOb9yQ4HZoQ/oGwudFyxjqioZICs/w+IvVfs69f/SOxfVIrLkMqUyp5xyJvaItfgstYBhTun9+dR33TdvXnm/wYs4mIA8sTPmpWRcZmIOoQ1dRHLkAjmPzjehf9PfsIvd1Y1renKXpF1QNVgRT3ipOoiACImRwRDZ0dpDNvvSNvrVDIwQO4YOIXpyjBd8PCgAEiEqgInKkvJYLd/umt4xfPwEHz80nz/icCBZItQGnAI1zBE0iHlTBHCA5BXACkImy2wVEYDI0AQq4kD+zO7k3Nn7McTRu5FJnfPs3e6Gr2+o640ASJFIkS4n42qqBmZiWtVEf6FfILL3m669udrd73a3YzllyXO2ecmnaXIE03laNpnQzePxtP/gQ2xy6Upptzeu6XCdoBmhIiggoo8hto1rHDq8WEhMAQzIUCqUJY/n8bg/HB4eHj7sD4/H47GWGn287q9evXj59s07Kwmt/vjTcRjP+9M4lyJES5XznIqCAgKtSVXIl7bvL0ASYG8hSIiGyAgslqGWXPI0JylkmBHNOYrR971rO24a37XNttve3YTxlRDt3386fvpkJrFtm6434seHx2WagEjrOpqAFfZN7Mg5dj7GdrfbSRUfAnvHni14YMddy03kGHz0sQmAFkNgx7BOScCQPXsiQkNytbLzXzqXzNj2Pl41hixAKCJqqEZgTsSZBYJbtt80/Nnjp3Gexmwp1/O5Ii2exxhcjD5GRCqlEtJ6EkYkJmQQWKeGxQQMayHV+XxmH2stp+E0jOdlGudpSuO8nM/z8XiHyd1xt21pzVZ4tpCrmlywYCpmAiqgYivt/8szhj6Gm9vb777/fv+w//z5odTy+PRY/1mmZXrz9s1/+2//rW26t2/ejuOUUzk+HXMtUoSYqzpaDRHPFgt9FjGsgTKyAolt/UXti/tk/TPrQ0nkmH0IMQTvA3lSxgomBtM0/elPf26a9n/5r//19vb28+fPHz99BoDtdhtC+LKnfD2EcWp+NXuboQKWokvK8zznvMQQrjYbH/1pGquAcw0Ae98Qc9XFbPUpi2otJaU0LcuICLk0BgVJ1s5IKXnFlDSNu7296roWSdlRCILoun4DBsMQ2i7e3uzu769vbrdZZh+8aE1pWZZhWc7MeD6fzueneR5TakRMBUXwL45kiLjux8zEK4YHzHvXNLGUJuVa6xpNtQrIEBCdCtHKhTQRW9s4QA6Iq0KpaqbKggqgClLs8nzABcNEZGC5SNv2QHh7e1tzLjkNw3lZppzzNI2n4/HT50+Pj59Lzrur3fXVro2RmVXrL238Xwt7iahpmt1u18TYdV3f9W3beO+apum67vHx8Xw+z/N8Yd/lbABa6xolCojM6L0zi97LGltvKa1r/WoOUtWV0bI+f1/YLc8ls5nKCjZgx2v920QfPCOCqeZlGcdxHIdcyppcviwLIk/TOM/zPM855xgdPff96FJpWq1lWWap0rTN3d3dt+++7TZ9sowEbQhOBhuNKhAPRDUQESEzpiTnIX3ezx8fazV3Tq4Kb2MIFDwmAENciAbvTkCtCACu2CNYzT9gxXtpIjQBZ2eFZOVMrccJtBXTqYjmvYuNR67I2LXShMR4Aj0DPLuTzKRW1Nq4uGv59iqYwjLWspgWUtUsmhWqQkKcEU2wzapLbmn0wIdlHqRmT+QD960jpyVzE7vdrtvsvI9EybFrY+h67CJ1DTOZ5Aop8yxBoQmu864PwUhHzeLAB6JI3DhqAgZHTI4AkIEDhQbbHpU7bJaSDbI8U57/8qm7FAeaUprnZZ7TknKuImJIK3Ylm+pKYJrnaUkp1yKmjomcIyZeJQJkglhNRdXAmBmIFUBWu7l3IQb2EZjFtNZyUfWaXpag9SiJiGakiqQIVFVyyXklbz+ToP8CFeEIr5pwG71vHBBmEFVB0PUNdbR2l1XqXPNoZcaa9bTH4yNPJy4zWl3dqYTQIl0Z3ACAWVLt0VqFNVmJEBLagBAQF6VKOCOfXRhiHPt2bpoUYw2BVjaG97Hf+M0WQzTJpobPw7tVZ7liN1d3msKv3v3Ydpvt9fX1y+vrV+f5MKVlKXktLsc5CVZPflmmZT5Pp33TdGorlKHxLiARGwZkdAHB2JFrggvO0KpUAxAVKbXmVNJUl6Eux+n4cHj46eHx48Pnz4fTeZmzAUYfputbBgmeDSoSlFLHKR3P07DkilgMklgRNLuo2oLzXde1bUfPi7KaJZEhlaoATBlMFcRkFYVWA6kFRVUVieR4IufRe9/Euunw4cqdDplovz+eng61ZGRCx6J2Pg81pRC8IGqtAKsEEC/dCDAXwu7qWlVD25J35L2ZkSKyB1yHQEAISEi0Ao3XI9aK80UkRAVkj+y+7sSE1um2UfIEDFVYjBTJhEyZwHvuqtzv4v0pNfuES6liSIuaCWJhAu9djEAstTL7zXYbY4vOgffrYTqVXGuVWidy49PBh4acr6UM59M8DiVNaV7SNJd51jRbT+3dVefRM16OwfrLtRbFYrpOlIpI/VoUg4BIbdvev3z5+s3rm7vb8/k4L0mens7DiYg/ffz08uXrm9vbFy9f/vCnH1IpSapIxbwUcauaZT3+KF50mfLVd1cwAzU2IATA5ygqJANHF95pCKFt26ZpQgjkWBGy1HGZRfXx8fHnn3/e7/ellH/9wx/GcWr7zYsXL5qmgX/rclWbFdNmqqCQs6ZcT8Pw9PTQNWHT923bbTfbEDriCOgBIIzHVM6lzKsag8mpyuH4eZrHEJph3BcZEeDx8WGeZwBA5rUOJPbOR+9jjBUZmUPbbU2tqgbviJ0azClPS87FSjW15XH/QP/yT86HcRhyylrh/u4agAGdKj4vepeHzDsXgo8hBF90zcpQdY7bphGRlKtUBSO1ogorKtYJuRUacenNEJAhkyGJQBUFE1EDqAqmBAq69mLXM+SKbKlF1WQYtoenx/c//3g87B/3j8fD4TychmGYpiEtS86577rr3fbmaud9uOB5L121Z3vS81OGiM652ETvXIyx67rdbnt3f/vdd98djoenp6fHh8fHx8fH/ePpdBqGYRrHcQ2cds45F6J3zrVdw8zMJFXGiUvOqsrMYFBKndMiUr9obMHsmahnJlJLVhXv3dXV1e3dPTseTsdpHFKajofj09PT6Xicl7nksr4aRNx17dXVVdt2KaVhGLo2rkYYInSenXdmWsoyDAMChhhubq9fv319dX1VLANCdB7yeX7KeaySl1qTD8CEhlDEhqUOiyWLqfpyQCa9apSBbzZNiIJQGI9o77XOnDuDjjUQE2nBWrCkTofb7kmupjqNAIPKynYmdhy8eDZCInZNwy5ENXQBut63G3M0mo5fOjFqVitoRe9w17pvX3bb1p+e0nRKy1SnpFORBJjALcwze0CoxWiWoItDPC8pgwK5Prj+xdWu6/OSuq65ff1it+2dWTB337dXsLve+K7lwCo5jccpC4IHb9hH6j13gYzIzISAgIORR8c+WHQWnCNCQLU1bx2j97ttCMXPiynhL15Ou4joLjp3qamkYTifzsM4TUsqVcDW7jpRrQIAV1c7dnjBtiKIKYc2rlwRQjBdYzzROUE3G0IVQzRQQMUqWDJQcj7FnIIPjohhVVMaXazdz/vJGrRJSJ6r1Gmeh/NpnqcLqxr/J5oq092mf3211RiTmYKWKkzmiAJjIGYm5xgtl+FpfB+cc206E2SgqmTIVAwMyAw3Ai+NAWmDNkBpAK8NPWBBNMACukccEdXRhG5hn3zUzRZvrnmz9U3bhuiDc2sqXgjYtIIgWaFWkLpic/GSmEYCKIACIPDLCYaY2j7urq9vb9/cXL99Ou9P0zwuBwAVhWpgRAqQa0ppzMupzE/MXGID22tsOlBkhc4FY0/RoWNwWKEuOakqMkqu83E8P+0fP/10fvq4jJ/Ph58eP/3x6Wk/nCtg2HQ37P2ip+H48PDwxxAdE5zO5/M8j7MMkwxJhFmAiqGIgSoZIkHTtrc39zc3t85dTLCiep7mj/uD1uqcazc9IIoUdqibDTIxYKgF5gQ5QaowzzaPMlB6cvDpof7w08z8KHaUumgt7EqtueRsTCJasiJWo3UqYWA1p5KXKsXHsLu9AYAYG3Qe2IGqQTE1FZFSpWSpGZkuCoSsAGCCK0XZLmM+WhNRnm8MYvTURQgNkF99fQSMZiCVUICBA7dz6k45uNk0L1oJNBigqiy1GKBzgGQGzkUuojGhc+zcyttNZUlpycsiYsCe2JPzBGClQC4omUqBUrhmp7XlZhPDJobI6NCy2ZptKaq1yprNIaZVay4115pr/TrlCsCQaLPZ3N3fv37zxge/vum7q2sf4k8/f6hid/f31ze3VXWc57wKbktKpRAAIRIyOSJH7JnJeyK42LpoLRLXYcXam7tsZz4E733w3vsmNk3TxBDZMQBklWEYHvaXne2f/+mf5nmOMR6Ox/v7F//r/+2/ff/9923b/ttFTCkrqUlV0UCXrEsq0zQfT8dlpuvrqxu7c943zS7EjZGrtZjVGJrZedW1q4OqdZrOyzwR+7QMpcxMfDwel2VeeyNLzk+H04cPn/rNZhzHXCsAE1+OVaspaUnp8enw+bH+9PPHaUqiYCjTPH769B4AU5oROIbeDM1YhEvBUuA5wf6y9zNz8D4EX9VEDIgcMXrfNk3uqykALLoOOMwATR0X54horcTXEQaxByRRq6IIZmhrRuilR63r94K1XQ1molZKGsfzw8MnH1zw/nB4Op/P0zQsKUnNzNy07W632e02Xdcwr2qY50dqJWV9XSgThhDapl3t0yLSdt1mu7m9vZvmaTgPT0+Hx8fHz58/PT48PD4+Ho+HcRzTkmqVWmup2TkXYliNZqa2HknXnw8zm9k4jqJiZmsK0loaE+Eakb2WNc65pm23u42q7j+nw+FpOJ/Ox+NwPpdSmDh0PjZt3/ebzfbdu3d//+///bt372IIX07M68WMXRevrrbX11e73YaIX7y4e/nqxf397ZciJlCQOXAdTvU0L5+0CBCYIwLIgkuhRThDky2U5A5n/PAojkArdR0IinI1P4qp5bOh1+qBCUBQCtXsJW/jXHeSF3SeaxEwQnRMzrFzHoM3ZGDzBmTg2UOMzEh5rsuYVb6Mk82qYhVn4jz7DfcOdqiTs7lxxyk/jqZF80pj01Unq7PqUZERp1qFjIqJKbWhu9/tiPo2bG42YHban6bD0UPpO7y/4k1DDFYJwoxzhiLgALqAXaQmkjlQ8wVMzQgUCdCBOQRPyB6NUcAQVRVIYwjoglLjQvgSB7OuLCLytH/68ccfSy0+xnGa5yXnVKZxUlFEpssZVxCxaRokFFGtlZ1XIAUSo2pkF7u2olk1yFohg6GsjWdAROJVfsvsQvDOOYeElyIGV6qQmYmZ1qqmaIhoBpBrHsfxpx9+Hs6TijGtPGj4Wk9mABUgg6lJMRNUIyBHFwYqIREaaEqzHhWqSRNvPMXWyeylOlmqiRECG5BaAWSEgBAICSGDKYACVqBH0CMYAgrixFy9xzaGvu03G95sXYzR++gcEyoYMBORqmhKdZ4sLZKXemm5V5GqIkWkqH79thBjs/G7292Ll98+PT0exsNpms7zkstcipQigq6qLGme5nOajnnug28gjTIP4hpyhuCcAhATe/AstDrNVUEdMYJZLWk47z+8//TzHw77H45PP52OP5ZSgrvpum0b2qq6Px6H5aCQvafNts9F9+dxWCRVSxWqmoBVQwTy7NvAjQtvXr359t13L1+//qXtD6YmueR5nh079oE9S60FYcLgnTdm8d47z8mTS5izlWwimqQsCY7HkWhkN3uugZVIVGqtWQWLCRowV9/o6gRcluXT58H7Zp6bvldDZM4haQwcAgDWnGvOUrLUorWoVALWKiUV5FIAq0HJqoZIWEVrqVLrVydLAGYIHmMkDoC8tvnQQKUiCrAwQdwtsZ8dn0FyBgVap4Zaai1qlutavxrrqJD8AsREBMRqmmvKJeeU1YBdcD66GB0RiWAuWpLViibO1JNtPG1ibGPjnEcCxNXwjGYXjqisw0rRIppX3sbXO78Be7e7uX773bt5ST/9/MPTYe88v3n7pu26lNLT09Pu6so513bd7Yv712+DI7cmOTAiIfMaMBPYe+ecZ8crFouYkQlozcp1RMyEzrmmaUMIIQTvvXfOhxB9YOeI0MxKrcM4Pu4fP7x//8MPP57Op2maUkre+9u7u7dvv3n54sWX5+ovi5h5LgDPMzS1Oemy1JxLLjmn8sNPP8xLfnH3zfX19ub2DSA9PT0Oo3c+MkciIaqlrtMuA8sAS8nTOJ6YaB1MsyckPA/DD3/+8Xgcuq4PMYQmhOCb2JVcmbxprWb7/X5Oy+dPj48Pj8fDeUXIq9o0Dmu/ijk2EYFYlEvBZYGmgaYBeAY1qApccHA+F6ksoIxsaNYEb5uekW0NIVsLGTTxvCJY1mRn5wOycy4gsphVMcdggKtAbu344NfKyNWazwgA8zR+fP/z6fC0rlxmCmieueu7vu+vrq6vbq63m83ak7jEdFxIR78y8cMF3NeIrE6uWkpZD57MbvXrXt/cvnnzZhjOx8Px6enx8+eHT58+ff78eb/fn8+nYQ2JXOvfGJ3za6WCiKunHxHP57OqBu/atokxNE1jplXqGqwuokSwnihVZJ6nx/3Dx48fhvN5maZcinNus9nc3t6+ffvNt99++80337579+7bd9/d39+3bef9r2DKALDbbd599+73f/t7A0XA3/31X3377durq13bxWAEiB5CMU3tFQ6buXBeRAx8YMeYxVUIAk6xMYqInJQ+HbJWXGa36YlcIB84ElBFWxANVkkorg0uIdXobbt1Yttu24goGBF5BIdAgGTABmyGan5F/WvhJVtO9fRkX1h3YIZSSLI3zwCBsWvg5palb1PCpzH3e/o4LI+TQi3OtAKQqRLOBsRcAVStzssyzUlq6OPrN7ddGyRNh08PP/7pz8vT04Zy26HHHIBclaDinDQRRwM0iBFCC6EDCAzAqcq8JCFQLkBMAASkGAAdrCG9KYMYE3rv+rCJm80XbT8z921nqv/6h3993D9uNtvNpm+7Pqwrfq1WlS+nKsR1OocYnL++ulaRNTpySVLqvD4zhBfOn11eDUZieNasrb8SMT5fq/X5mbjHYFalrrFcK46ylJJympdlHMfHx6fD09A00fvovV/9iV82mCT64zD+y2Hw0ZFziOAv6H4ywCK2BptlUcDlNKTpekdvX7bt9SB1FB3moZTCgB7Rg4FBRk0AiQ0QFqgEkA0yYEY0AFJEACHzhF3wPnp27FBZMliVigKWTTHENngTyfMwn086nGXJ9eLXqCJqUkpZRwj65bMgY+j56m73+s3347Acp/NxHA/np1KnnPNC2BDnUsZlGCY/jtvrZes211yXfD6AcmiMOYKiEgmoCpkzIV3D22MMxs7mZfBEWpdh+Pzp0/HwYCbXV7e//f7fXe1e1QofHj4ffxx//vxpKjMy9JsGgZe5TrkUQCWuplWlGnoOXdxc767ub2/+5q//5m//7t+/e/eb2FxOzEzYBN82QaQQXmD1Ilqr7NUmX9oQeud2IfZt05j6WnBZKGWXqsuVS0KVanDRvYCt4Typiql4U3JOehbiqrIcTqd//Of9Tx/satP0vTYNtW3su7Dp/WZDMShRnYaaU5UsWlUFEbVKWkqBJERVNS9F1chxFU3TVNPytXHPLjAIXvlfxk6ZEMAcASmwkpjvehdHQmeCVa2ApssUkY1I10KeWAzyPMmoq7EGiQxQTFalgo9N2236za7bbIipLnMex+VsakaIHrA33IbQxybGDskZKq5NULVVzq641jEghtWwGsivAcRgEJrm5sV9t+1fvHr5L//8z//9v//fmfE//If/2PXdP/7jP07T9Pj4mHJ58fLl7f2L3/7mt9dX12BGAI6JLwokYsfsmJ1jIkQiJkIEImC6vOMX4g6t/nm8aPbXWCy6TMJs5QKUZZmf9k8//fTT50+fj8cDO/fy5etv37377W9/u9ls1uP3v1HELKnCL2MNVaUY+76/2ixDLbMBiQAShxC7vjPDwxER0bkYYrsiXtbIYjNZwfe1lmVZ6Hm24X1Q1XEclmkZp+X29u6b7765ubvtu9g0bfAdIZtUXZskOR9Pp/1+v8yzWEU0szXm21YdrfeB2FeBlGxZ7EL9eL5UFVT5cr4mJ4QGCoagAB6JALCWUtcWoooBiTgNfhVElVJyzuisfkGurFqhr7SECM+L9VfWu3WFlloWkWUaiMitPMcYgvdd1+y2/fVuu+k77xnxkjcBXwcN/PoytVJKKQUR1l1n7aCoqtewqvNE5Cbd3N1N59P9/f2Lu7v727uPnz99enh8OByehuGcliSqa07AWsQQUS1VVWqt87yYqXc9EQHghb+3NmZNcq5mtizz8fBUSprGcf/wcDwe52nSWtm5vu/v7u7evn37/fe/+f7777/77rtXr17f3N7FGEupy7L8Rd3f991vfvt9reXqemdm77797u7+vu86F3g1ETM0JTSgZUlH93Q753HKhQWdwylBEW8IyA1jdM4b2TlNJioC/cguOOcdRyBWYiESXNFRv9jXUYGLeCBa3ceqBujMyAwufr81kU9RK4MSGNWi8yino9T69cu/9liUQOky3yVsnJprN4E9cWR2S1w0ASWxXKGaZVEwNCYzlKLTlPZP56fD+eZ249nG8/Hpaf+039fzObQIjSNAZxTAAA0ZzVNSRiTfhtAH13nwBIBaapJijOBw9W4AkgAZruJEVLmwg5kpBB++4vc0TfPu3btpmnJODw8PHz582m43337zzf2LF5vNxjmnIoC4jn0uwHK8iGe+2A7Ws4PZusJf1EXrKVbNENTWBeo5WuXy3W2leq210QoL5nWkVWu95LfVmnOelyUtaVkWFbi5uus3bRNbVZ3neZqmLy6YqnZY8mMqPVgL2DI7IjYC+2ViL6pFVECWoui5n5c5+LPBCegAmC/CFwhoDgEQFUEZDWC6/A9jAZoRKwIDXqDd66qgVZYx11QRGNETA2EhdGYqRWpN8zSPZ5hHXYpcFI4r4b6qfBkzXS4kwIjtrrl79Wqe02k+nufT0+l9yuea05JyDiHXPCcZ5zCO53kaJM+apzyeTB0Kq1c0UmY1ViNQtIBr5x/RRHPNg+aRoTCtTWBEDCF2V9fXu91mGGaBMqZlP4zneRGEdk6OHChWsTXPSwyRXBfb6931m7s3b1+9+ebtm7/63e/+5vd/+3Unhpk2m/b2Zts2Xg2DD6VUqZJSTrVOXJjTpm2q22DX+q5hUxlHnGZcCqUM2UEtWtQInXPoHTOZWSkVagVQZ6AiYlir5DyP++MRAaKPTWNtR30b+s5v+rDdcNea9+dpSk9PmtPahwQgNayiVEXIqkotWUVIXRWtOUktvxrBEAN7cBFcC+yAHTBdcjBQiJU8+rgEH4jATKtiUjWogJcYMEOEiwhPspR6kXnh+uYaGhIRO2LnQgwxhhgBLSfIpovWKpUZgsM7H150cdf45pIA8IsnHGgl3rCZXUy4AKIgv1qP109DgUOIYbu7WtLS/r/bUoqPsd9sbm5vHx8ePn36LKrXt7f3d/f/7u///vb2bpUSOmZCJv5Sk1xWnn8bdfz/53X/4uXd/Yv9437/9MjsXr16fXt3v9vtvthm/+d/xaUkhs9BJGaO4/39myq16/uUpuh91/bBNys7pKqmlGtV75um2ayyoSpVFAnQYB1yr99ntTaCiopoWhbnQ9O1N3fX/+E//f1f/e43210XQ0TjnGQchlrFR//x46fz+TgMh2l+ynk2c8yEgMy+iV3Tbpumdxxq1WUpyyJ5nQSuiJUVU2hKAEzEzJ4dgwjCypQgIlOrfVtFSs4i1WBdZbVWMbN5nvf7PTCfh3NKSeWyACLi5RNdjqSrKvu5iFlfV0LP5OhSfjrHIbgQ3HoQidEzg5moFKkAxHRpAMCK2bxYLZ/vSinl4eFhnKbNZrNaoC9LsUh9DqoAgBACEnnvu35ze3f/zbffHo/Hp6f9OmN6etofj6dxHOd5TimpCCIE78dxBKRSCwAUqaXWWquZhRCZCRFEKkCSWh8/P+wfn9R0JYuWnBEgNk3btjc3Ny9evLi/v9vtNs7xPM+P+/00zeyciN3f397f3bTtLzqspml+91e/vbu9/du//X2VGmPnvSeHRIYAgIQS85JC8GplmJ6K6nB8LEtignnBIgzIwTWOm9BEMMlTPRfRszsvzjlmj+gMHJoLyEBrGO1FgrAS3cgEtIRapORSalVde65gYBWrWK0ipojqCILHWIvNYxqH/AVEBojGrMwVL/5URkIj71z04aZFbkPctLEdjlMpSmPWxykPSWbFqqtiFUX8aZA//+mzZE3n4ar3koflPFgWjw4EpDgVD8jsDFQNi6BB8OgdbzbcB44MhGrAUCkoMpFr0EV0QclXIwN0ziOwcwhMQGzISI7oF8voZrP5z//5P19dXf38888fPnz4+PFTSinE+PLly++++26z2axPxYoKgGf573Nhb7QGFF78s0VqNdMqplJTKsuy5JxFai1S1sJExAzWSGYAYCYmx3yhpKyhB88V0uU5L6VsvoplWWX0RDAv037/1LZdfW6RqdkapOeBOuDe0FfAtYKyL1hMYreug1iW/Pj+s2NKtYxLGdSESZGAUBEDgkfyaz0Bl3wIUqyATCKGbLTWMU6tKQXGYR6Ps6mpOHJNaDk21LXsg2otUqZlmubJpYS52LN+eUUJMIhD5TUO48tF5jp3dXdVy9tU5yUPT4cfp+n4tP+cc8mlpEKebFnyeRzPw3maz027ZQtmHs05r8jenBNwupa37F1gBMvLNB4e9x//fNr/7DBf7doX93fsbJ7PRm5cBiU9D+N5fkw1FcWsXFSrgWOhNcOgalUw5K7bvHz95jfvvv/97/7ud9//1bt337x+8/bu1avd9fUXTYxjd329/ebtC111eFPZP51XDz8iAZpoXVJDjHHTb3c3yjzhQSg0sba1smRYEpwnAotNYzEyeyhaq0FVRyAAa5SmVKlLWoZxWhKqRkJzHryj4LmJvmswRPBhYpxIdV7YOde0KmbE67O4vlCI65lVTKtp/Xroj0jIznyjcQOhRw6IHogNcJ3eABUijDxGh44LUKrisqGKIsLalwRjQKCyyrsFCRhWK4ajFdHIjC56HwCw1JLmqUo5HI/DcFqmAaS2xHe9f7Nt3l3Hm8gNGYM+S6rWBidf+Dbr3mhmiGJf8oIvlz0zutdeiA8hxGaYpj/88Y9v37x99eo1s/vf/rf/Z8r57/7u77797rvb+xebzXYtYuCiSFt/gf+LtYv9X/tz3jd3dy92V9fflO8QIYT4pa//b1YwAOBKTbZ+UENiamLz6tXb2DS73fU0DiayokdUVa2IaClFxZwLTdOBXc4Vq7dKbX3ff0GYX1SrJqoVgJmx7eP9i9s3b19ttp13TqotUyZGqRKaOM9z20bnUKTkPJuy895x8N6F2DXNxoeW2KtYKVLKujB+/XNSMCNCR+SIlEgBEBBMEVFF1XPbxFJrLsnMkMp6J0qqpUrJ6fHxURHOx2NKy9rkuVQxgBeuAj3fN/xl2Xn+bVth886R8xw9B8/Rc/TsGQn04oCi54r1a4L6r+9Ozvnz589Ph8Pt7e12u10JK6tyZXXGigirysom9r5p2812e3Vzcz9Nw/DqeDjs9/vHx8f9fn9cIyiOp2kack4r2k4N1IyJ1/NuSimnZGZmjAgiKwCqTNMqsikGxkTeuSbGpmm6rt/tdk3TELFUmeaZ6JhyiSEQOwMMgUXq15/IOXd7e7vbbV+9elnlQj9SVDRdBxVQY07VB29YlzoaMeDPp9NBci2axYCIAzcc+mbTiZSScipZBeeCzgMVAyrADM5fIoDB1hiGteNFRmirSMZywVJW6ZshIRAqsSIaOEBkYlVaajFRdMCefpmMIZnzyr4CKxABP+vu0Tn0wW09A7Ohbeb/s7337JJkOa4ETbh7iBQlu1q8Fk9CDUBydnbncPf/f5xdztnZJTgkOCBAAE+0qKpUIdzNbD9YRFa1wAMolmfA0376Vb3KysrMiPBwN7t277WiRrvB0m54s8+3g/RF3SFBmYJZ2Xf7l2+ubYRlTFGiykkKTFCpRMUAgTEYqEAZFTMFrGNo6rBcchOd2IBFkYhYkYlDw6HhkJCZjAyZYiAMjAyIgoBgZEb35llK6cmTJ8wcYzKzzWajqn3f931vBj7TzIyIEGnWMAGhKhmYMZGhqSqVjEYFSFQAVABCmJA/1w+KmBR19T6iIiAgqJKqMRu5Nt07NyEizvVVA0QKIYaQiJCZmIOUvD9s9vv9zc3Ner2+P8dc/pOIGqIGMRiYfyBTnMzYwcFrQmK1su8KQAadeoFNBtv+FRgpAHg7ZkVz/FUBIoIBuukKEWEIERFyltxJGU1EiC0OsW0jQmxakwImk9bVzJ1uTCemHYABWoWQ8H6NGgCBItVrOi2nWZ4d+ttXb3693d0edl23vxnGMgbWFNRgFNn3/Wa/DdVtrRExRa6KYjaSEEwrxLpKCwDSIl2/u379u+tvfnP763/obl5ZyaUcAEXBulzydvfNq2/qphrHYddtsmYFEMVcoKgGxsjIzKmql1XTLk8ePHjy6eeff/n5V199+YMXz148fHi1PjmJdR1ixKOnKiIRpiqlKqko82EYx7at1cTMREUzFJHDMGy7fjWUlGinkA22YG1gbVYl1arEUuq6thQBuJRcVLVoYAQxc2KmASpYzjIMUnIBE0AFNAJkDjFCCMY8NPVwulJG4hBilTVPjmKB1SHoyERIzMxYyhjCvXsfAIAAI8QWmjNICwgNcJy6UZuAjYBvYj0sUn0ScRnkjVBWFFGHI1HFvbxdkIKethqgB1CgiKAAYNkAVbXvDyGEUvJmv+u7g5ShBq2r6iylx4vwsA2rYBVKABMzAqW5afcsM0UDEEMxy6JZ9L0QwMy8ZRvEKp2cnd5sbn7x9//j1es3n754vt/vX795E0JYrtcXFxdN07zvMvc9w3Gft4jEc+X4vd/cPeAFpphSfJv+8pbs5b0RVPdmZkpEgamqls1ivTi/uLi5ubq9udncXHfdvkivpmAFTFSKmRGFqmoDByJWLTmP49CVnNFd5AEIiMhcwxMYq6mWTYSac98d9gBKRHnM45D7PqtCUekOXc6DyCgy5rGTQkkrqgNzjKlNVcshApEZej7zTnSJE3Luohxi9oI8IgQEyKqMkCK3baW6QGTkAWAgb0CIWSS/fvVyKGWz74ZhANNAbg3iongAQPfFm8gwHs56yy41AzEEImaCFCimkGJIgQIBuTmqytRf+tj3y6/tpKO/izTHPH7z7Te/+93XNzfXp6dn5+fnk7tMSjFGC8Gz1SlnFSmleLUohtA0zdnp6cOHD3e7/W633e12Nzc3r169evXq5cuX33Z9t9lsxpxX6zVzlJz7ruu6w3a3nZkKU1NMUXUoFYlSiClFF3i3bZNSFWMYx3xzewsA/Zj37aFdLNq2Taki5mEY9L1bhgiZua4r5yMBelSvBAaGJrHUkKoYE3HF7eq0WVy9/Pbr61evuvEGcUdEMdbVYrk6ORnzeNh2fS8qkK3EwKQKaCTECox+Im3ypEUDAJeCTl0jJSsosDJjjMwhYUgUUogxICJa7g+bm2uL5bRtLx62Kc2eN4jGqXBlyGjMwAIoU7fNwghAXNV0vq5XCyMMXYblIZ/uhte7fnMYhy6LKAWoIq0qWmNJu9sE4eKsXrQxNBWMpez3LVpLMWAYy9iPti9aOKaTk3q95LrGEMgUcoGcTQ0sEcXAyxgaCsGYAMGIiANzJGYFzKWoFRIAKUcdDCJWVXVycvL0qarK9fX169evv/nmG59UZ2dnk6Ey8swCw7nPEUxZmPvjza5aM0yjbqyQc8m55Cw5F52qJ3d1JX8lQARz7pIzZnz2oZeWzPt0ErnEAQByHrtut93uttvN/p6pGoIRaASomZpANQAriCp5e00EAfMQNhBVjImZiUyViitrTQkYITImxAoxIXoQwwBgigBu6l8ZIHOoK4xJQwBiRDApJSMCOmm65MF6olRbzlgKg8XAKaYgwABU0EjE1FDRTJCIuEZ6Z7cEMqywWscLu+yHT29uf7rd7V+/vO72h2GQMRTiEFMFzL2U19uNUjqHKqUWoah0230/IJGu6kRtOg0hdN3um9/99uf/73/57h//Lr/5DodDZOzH4eXN9XfX16/e3Bjo7f5mtayrKnXdoFYQDUDNzQUBkENdL1brs6uHT56/+OKzz7764ssfPnv2/Orhw9PTdVXVIQZwhHLOz0Rkuz9sdvsT5hi4aaszWyLK4dDkkvNYxpyd+Xd7fcNGKaWch2EcurGrYxouLmNba6ZURqjIGMVI0LsISFA0ZhbFyInZpS9JpdbIpgYwN5EzG7ONWczGImPTalUxRsZSdCQzRgohABLPyvCYoqowY9M099t0gIJZ0LCw5hxXV9icYtVgiEAEICYFXv423gzr6usndXqU6JsRimoRbwRTvK30dFfN7mD+I2GZNmswmapOLrtFhUkmEEyaKjys+ZNFvGroLGqLOelAyGhWrLCJmKCqaZmNW7SoDlm7Ufqi76/JMKd6McXHTx6/evXyr/7rf91ut1dXV1VK/TA8/eST1WpVN40jpEfA5w4WwTsoBu+5zus71mfvwjDvgjKT2abe932czoG97Qrz/ggT8WJSIVpKTLEKIaqiFBj6MuRiZVQzJEWa5gWSG1AF1dJ1ledtADqViWewycGDELipm1TXi7YFgNvrzde//S5VFSGKiikAMCAh0na76w77cehEPFNW1Thb4yTmhMgT+1pN5ANXBQECUwzeWUhVTBEIA5qqoBKlyIbJBWaT3J0YbSTAYczdOAzjKGVEs8hT82ScNFjTpPIfPKtzrbzjkYEoBEopVClWVayqmFKsUoiBA3tJyjPxt/pYuEz9nhERwF3bgTfjOPb94GDJer1umialNOExM83laFXHzCklT2NLKaenoyfW2+3m6urq1auX33x3fv3mzXa7NcOUEiIM/XA4HKZ2DQDgXqlITkKiSCGEGGNVVVWVmqZZLBZVVYfAAOhNmsax9P1Abq6KaGYhRJHyfqx9p40CmGqACIBGpmBoEiRACMQRKWKMFVNTpWUKC4Wm718pFKQqckohoWHkSEii2UXzQAhiBsQYAI4g2WSsrwCAKGDF7+9JeJXqOqUqhlAx18RViAFU8rgfLOuqWrbrFy+e/ejHn7ftnbQPj03tVVXAAEwUCUtQBCXkgLBIaoaBYJmwCryM4aSKm0oPSUqxRFTHsEyhDVijrBOcV7pojAGUYciQ2AIKKkopYkqBU101q7ZeL2IIkwcSgRXVQhYCpIRVoqrGSMgcCA0JmZACh4AAYgYqoGpOWp4vdIxxsVgg4m63Ozk52e12fd+/fv26qqrNZsO+KBCTF/Q9rQYg9ooqAsJcqHZ/jkkxZOaiOm/HRgCsijaZUdlRRW2TmYU7DMDsO2oexLhkh9y/BUDEzCS7GeXd1Lor6QaABFARVUzJjMGKIQMIQgEgnD5ZAKgJEyIjynxPuh8eIjBAAEMvrc8lYxeUIiEYoBERR9dOV8mQQVXHXvrJ/cvURIvlMQ/9eDgMu60RkUogIP/H6DVkA0QFRDKkcLRUu7+QMcSaCdsH+eFnhx9t94dvv/2277p+/92QBZCRWRD6knG/RYpNvVq0K5FOoXTddpPL2F03445TIOZvv/3mF7/4f/7q//ovL3/z91XZV6hViqOU15vtq83m1c22SNn13bJNq0WTix4OvZfyEDHFtFotL8/PHlw9fPj4ybOnn332xQ+ev/ji6dPnFxcPmmUT4txY+O2EuRTZbPbX1xsArOvKfTVXq0XTVKXkksuYy9DnfdeXnG9vbohZVcaS92OfqoqaZR2rYopEkQIQaFFTN6HVgsRmhEDo3Tg5xhAtRTT2OVXUcoHi00tATUvJwzgalrFYESpC7K4DeAT/mKmua/ee9b4ub1+XAFRDWkP7ANYX0LQQa2QEMBBB5Xj+8uTs8sX5yeub2zdoQNKNNhQpJh5FmIG7KHsK62WLWSQOiMYm/oEmvhhRCFyHsOBw1cbPz+rP19VVzasANUjQbCCogCqkQqqgomVqmFREc5GxyFA0y4ehDAUloFRVjx49evPm9a9+/esxj6oaU3pwdfXZZ59dXFzUdY2I9wS094s7bzmA3//2zxp3yfxERL0nC/h9I4wjhkhVxVXFMaJBGfrS93noDzkPRU0VRc3AOPg9L2YCGF0pRsREgTmEwKoTw4jIlZkcY6yqum3aplnUTVOlCpR/95uXN296njxNYqoq17OYwc2b7Waz6bqDmiITIc4rKeGcF8wxphWxItOqNx2+GSLEwClhGouqCQoKEAAqS2AzRQUgcHTagAzIbEAldl41IRAaIntvJwNTPHpruOwFp0wUEKfXCS6Y58moJlWpSilVKaWYqljFEAP7KQHvcmWOFUyUGJtxmHeuTSnlcOhU1b3/l8vlarVaLBaLxeIYzbiMLbjaYWYjm5nrSl2d3y7a07Ozh48ePt8+v76+fvXq1e3tpu/73W57OBwOh4Oq0kz89o0nhFBV9dGMqKoqF8fNQienoVMIIVVVjDHE6NfobiK+fSxOkPKT5u2jj1Pdu1gYIKAZUU0RYEVmDNzW7WpxUtUnuST9btP3KKNJJ6YSESuyHkdQYa3Y2NRjUplrmnCvNZAZgoIJCJgR4qJtrx5erdarmEKggMamaKqHw647dKD56vzs889f/B//+3/+yU9+cnp6Mn1U1SBDlD5oAuQiCr5WMmKo0AIAkgKpokJQToaRddnYgwRdE/qFqFAVqImxjaGKgchSRVULTFmkGEusNDKY9SWjlRyR6pMmtIuwTKEiZghsgSAHpxOCIliNtiBtiAIDBaQKkBUMCSVEAEBFKFlU+F7/FESMMTr7xL2I2nbqEHtzc9P3vcevKUW/uye6FyFR8JX9GEDPqwzOJSFivlt0bCYB2+yGNM93T0JQFUxBpy7CBnBsMD/9FXiKahP9vJTVer1eLBZHfBsBI1ICSAgRMQAweaNyUIQIUDyjNQiIydsjmxGYISJSQCj+5mJGVgAUoSBFBEWKiHPG45FcwFBxXWNdGzGKFgCkvdgI3rATAFT64aAbViYMrH2HIlKyqUQzcGGATtWEuZp8f5n2jNaQkWtYn69f4JdjkTfX13kYf/mLw5i3CqgExWQsg0qJFPrlbhj2fb8zjDnvt7c3v715HRbLXHoF+pu/+e8///n//df/7f/cXn97tuBlHUIMWfR6f7jZddeHLCJD1v1h3Nz2InJzGPa95QyxSmfnF8+ePvvhj3745RdfffrZF4+fPLt48Ojk9KxpFiklIJhK7nef/biCyeZ2/923112X69r76sW6jm1bBQZTzWM59EPadd2h7/pxPGQpPiXVRrmGlyFEU0kpreuTAKSSJbswTezY4pPR02cKHCAEUvbqYoGJLatoQIyQA+Wh74axG/uxZC7CMYBaKVJURExEsZqWNw6RmO98FRHnjqIJqIG0wnqNdQMxTc1vzOjkKj14fPrJ48+fXZW8gzo/2o23/bgdyi7LPuuh2FAsT777elynyHdNJEYMhBVxE0ITqGGqI9aRlhWfVHxZh0dtumzCSUUtUwQjlQkIUJ3/iYpOTRqLjLlMXmDvpZXm3s0IBlbX9cNHj5l5vT7ZbDbeZe/y8vL8/Pzk9NQ7/d0RKCaGCgCA99+4j8TcnwXvhx74wUjq7ZDF/39KkN4vPL03AnLyOASIRUFK6Yc8DOM4dmqZGTggIIjIOI6leAdpRVJwPrAd4ZagGu4FMez6JOdax5gCJ0AeB3n96uaWO8KQUtU0bbtYaAl+lsZe+m4YhkFVZ/fdMJ0+M1VBpGlRm/0J7x/MvH+jK9hDYERQNENAC8kMAbIAqvfEIFFTJRUkg8BIBMSMgSnwWNyCEEwNBADUjn3BJgzG219xCDEwe4ocOaQUYvSmRlNro+R2RnSM6afJM81eT1Enw8j7VxVUvW0ZmGnOue/7w+GwXC4Xi8VyuWjb1pX3PGMyXmbyII9FmDnGmCT5gycn64uLi4uLy4uLy+vr681mc3197R6si3Zx6A45OwVTAL3uUy8WC6cVxxj9jfySOKYSY5zkZymlqjoGVUfw/0MD38If7x70JQICIWIgbBmQiVOMMaax2MtXu/3OVLMKSRaRDCJkAjqaihYAC2gsoICqkL2mOWX3ZmqmaOLpGAIzI0CKMcVkpnkYyyAy5jGPh/1uc/tmtW5ePH/6Zz/9D3/2058+ffYs8GxGhL5dVsgJmLyZArAaUaEKMHiJjEgZwQAJMDKHilqgRWtDU4NqzVhHbkIMgZWIAlJlgGAFKAAGRYSS0BAUmSimdhXaJTatpaRMhcwYMvJYQ4aoQSXVY9VQ8vZKAbA2oDL5+AcEMGYxLjqi8R0hHdHNgUopfq1PTk5s6mbPx4vuni5+5WEC6vgYuNwPYu5nSzgp+OBI1PWvc0BjqgoINFVjAcCRdr9DTI1Mj8/EI7+egIipqlJdN1V1p7QigERUISakBBgRGN33YJImiroZEzBCIGAEB7yDOyQA8gSGKs52BwoqAJ4NT0SDGeILXrROCShYzkDk7PFp4SYwsFIydAcv8ZoplWIqYDJz+XWuq5mi6e9b2gmAIS3jWTx/Pn52u7kpuR/6ze3NbwqEbZcFYByzd7A69Lv9fmNAWejN7e03r1/95pt/1BjH3A8F/vrnf/t3v/jbX//qH8duM55U2zpSoKK27cddn7ddUbWSNY8qlR/c8rytQr04PX/w9Omzzz778kc//tFnn3/x5MnTs/OLul3w7NB1F5a+lzGrat+N2+1BDfo+c+C6rlQbaDFwTJFjCBzYN3AFLZJzdkaeqYiKYGBEWKzWSwzIEWG02b++WMiqmgt754wQYbE0KwKCVlCFx6KIwEhqAKCIhOQqjpLHIkJmqiCqWiS7S8h8RKDv8zDQMAAlDDWEGkMFnIATMB9bqmO94JOzxcXFk4fn1F/X9fB8P2z6shnKZtTtqNtRDtkGtSJaVGDO9hiRmSJTJEoELfOqSosYFoHaSHWiRaJVolWkdcCGgQkiQTBvwAaqYKomakfz/yKqWsos3BD5YC0J5qI7h7BcLlMM6/V6GEYz9VpzVVXwAQLLvZNy/PLezP0Xju8n894foVk0nl4VgZLzOOZxHEoeShmJSt2gaOh7KLnc3mzHnIdhLCqsCAAqIiqEFDimkMCN/sHTdPZUzVSHYVCDOIwxVCmVFDVGjSEhhhhMMkg2DRQoEVYi6OIbBPT+AQhooEVykdGzfSe8gMPXdjfz3IHBJyCjBUICVjABBLPKvUNGgyJoFkNYNA0oaRECHQYgRo6RJaVS5+KUd52C3DmIIXJUndzux+tW7F4/TIEjT7AUTY/HSUcPR8n8VE90i4zZ5t9klqoeh28n7q6AIuVwOOQ8Hg77qqrqumnbdrVaLperxWLRNHVKlcvoPfR566uZihARc0gprVbrR48e932/3+83m83m9nZze7vdbrfb7W63Oxy6nEe1QsxNPRGKnZfgPnhH+MejlhBjirGaAZsYIyIxM74HAH4/JGgT2QIDB/YgE8HUcpH1artaLRfLQ1GnJss4DGUYch60ZFExU8IwdaH33gp+ilUnM2QAIDAAAQPCwOGQeLfdjGO/3+32u/1+s+/3h3Ho1SREPD/7wU9+9OM///M/v7y8ChyPd6lhKOk01w+0ihRogggUhHlIcSQCFTYIaAwT6deQgNiIVAHqAqpGoIyFSQlFkSLFBjERamAhzQiqAwuRMSJSzLEpVaOphhSJwBslFJIutSOZKUuoDqHJWDEFpIDAZphBFYyNGZHIjMpIDJB0Xl48BPFIpWma5XKpqq6GO15cj1OZJ02R6l1e+s6mdVzxjzGKyNSFdBgG54+7sK6UqRxg5tYMzsEGQs8E+G0AHxFpjkht5jhYCJ6izAEZYcXcINZICTECMM6LIOAsUVM1m7Wo3pLECCAgagGe95P5+eB3qKkIoYLOTRotAKFlBEtEQFTu4UYAhoCBUH3ully6LpRAhKQK5vnUhL5OFQxSJVXy0/D2oGlBxYDMePH4/Gf451XNgOP/+MV/29x+8+3rXROhrWhZxURxf9iHcH293e0Pw8vX19++efny+rtOy7fffrPr5R9/9913L1/vd52IXm+G7WEAQjEYxfqs+9HAgBgSM8dmvT5ZnJydXT569Oz58xeff/bFV08/efbg6uFqfVLVFYWgZlKyV9CI7gyU399ynJmRiwIVEBmydN3Y7uPJqlm0VV2FVPESqxCgqqiuw2az328hl7HkkiUDIjJxtbBQUdXQOBKSqI5FAHNBQNBY123dYl2Xaj2gsYyhjJBHCD0yUR6DCZiJARuSEYEakoJmBAAjESrFWw54Lb1IEZFxGO+b3ZlhsQTYcGwxNcZs6M6n5AxtAgBGqJpqsbo4Wy/6k6uTvB9KN5Y+60HgkG07lsOofbEha84TdE6GTBwDpgCJIRLUjIsYmhCayCl49ycIpAGERRFEfZPTUgQVoChMjR6LlTLdeuKmAqVM/0TekosjGKJTV473ckr16VnyW9ml0zbvUHDvrpzIMJ6R2h0F6q31/Q/HHm8Nm75OHCFw4bDfBn9ohBAYAEoBER2z5FFynn0MAELAECgEGjq5vdkMY+6HUVVQAQBKEfVeNRRCSN7uCQBoFnP6WSgiOgwiKsFUvXOGalS372QOTGwKVqsU9WqGO+DgvBKpailjKWOKkQiJAxL77fHWuXLQGYwmux03SA9mBUMAIwRQFVMrZgGAK0ZDlcJoTEBMQSGoVaJFrJQixRM0nUQuU68N9P667OQbl/Vz8NCKyROLudwSAjETBaDJ+meGY+5dcyehvYPETBUrV6ofdafjMHDXdSnt9/v6cNjv94f1erlYLGe2SpiJkEREqupflVnvsaB9d/E4ZrfdbjcezNxuNpvdbj8MfZEMAB7BwL3CgYcvUw1r8gG6Q2KqqmKeEbh/VizubCNAwhj9c2Yp6/VquVw2ze3+UMYhl6I5j2MeSh5LyYDGFFJKddUwB5s6e87bqmOeTMwERM68DjEs2sX5+RoBytjtQYbh0Pe7kkvb1lcPLz///LOvvvzy2dNnbbu8f2+OCr/bwz/cQKqJwqQORwMkBkee1RgsIhIAuS6GEBCMEQxVGQ0DWCRLaG6ESwFCjRTRUEFIBUFR0ciZw6hIo0bUWiwwOXZhqmJZTRUNI5JSLBy6qfAK6BbrCsaMjMiGWvLQd1f9yfqH+Sh8d9DFbTQ97vQZcry+fvXnHRrmZO6uJnTcuo7jyO31kXMehmEYhpzzbBCgbwcxk9KTmSVGZu+agKaoLo2fXl+nTm1WxnH0VzvOZwKITsgFiAARgRFocgry6ICmyiLOKg40MGB/AiGCQzATYKOzEyXNBBZDdDobKFIZOQ9cRmQTKaoC3rDSJpKdTlWxonlQLcSMZqwCpj4d/J+hKSnRvYr1O2NiVAMSNOvqUXwEJGM5LFaLX/3yb9+8+m3ubg59r7mPvFsub7NgEdzuDt+9ev365tXt/vowjuXNzeYgL19vtrte1JgThxASE7MAQFEB4TISYttWF+enz588evzkyeWjTx598vTp88+ePHvxydPn5+cXsar8QxlAKXm6dO6+9XvyE0QMgWMMMXKIwbOUyfo/51IY6siBExAxhkgheM2SAbE7wJgHUYWCZrZYrdfr9V7LsNuJ2JCzAgymlseUCygEACYaIjXIVaiqmGJV8zjEcQAZoWQdswmgc5MqgMCqIkzFlFUAJ6KSSOl7y2M+7Lu+H45gfy56ventdlifaqXo+wAgTvaOfgkRgROlJqZUNWlVoUgoRYtAVhwE+lKGokOBsWjOVoqKd/dACoyJLRAwWiSoAiYO0dkUTABmVrSoFnMqRVbIamJQzERRp/TVStFSRL2BWZngGZtVGh+4RHPJBqfs8+53c4UV/mU0l//fR1DNqpDHKVYTtWKkFqZ1xhQBmIOobHe7vu+HXMwdTgBUihYBAyTmkAKoG985YQLdGQ5xok2pqRQpCObklgLgepEhj4eub9qm7bptirFpFvtum8tcGzFQsZIHSQlpwTEETgihFMv3+kEgoCPdSGjmLoEIPPu2AiISGEgpymqlECLFEBDY2sCIBBg4CwSDIqYCYmoy1erR+1fhzL92rSZTYA8awlRnoSmYR08g2LXeTORv4GopmuuFBuC57R1NYD4WmHjE069czGOz0lVFyjiOXXfYbG7fvKkXi8V6vV6tVsvl6lhjclIkM8vkRmgu9vMfj7TrlFJd1w7bMHPdtDmPqkfB/JSy+4Z3rCD4j+8EMf6+ZsAcvh93+b1jZssQUYixbpqVynq9XK4WVZ1Mb/LYm0Epo4OGueSUQrtoz88vLi+vmqYWGcHUuf9e2iSiGFOqUwgBAzFPVOW2abKU6zfXL7979c1qcdgdmPjhwwc//vEPf/azn3z66YvFYvFOXWzby3/99eavfv6qbuuQ4pQ2GDo9E2YyDpm6pBnAjNjm3Q3MEBRNGYwRCBCFEAGDAXmWzqAEYIZioECGgAZkHCyy+/eBKpl6Sm9GgP76riybJHQGpmaGhrMFleTcHQ4/+1/0s/9tf/bw3vlG9L6hOefXr19//fXX3kG9rusjnKazem4e30e703vjiAsevV6OgY5NQtCZ447IRDmPOKmWwJR8kgKgqs6UUVXNXbff7XZd16ke1UkQAIJBNIhmAY39bp+IMa4tIv/QxYlEU8qHBIBMgACkqiYGOoc+PhU9slOwYqJaJFsZDmMIRkwxmiiCBAaLHARIDQAFTCZyTDFVKwUBCNRnJYE5idgcbgSLaG/5xMBbu4aBohoQhoYePLn6j9V/fvzJ01/98qt/+Pu//dUvf/7m21/t9i/1ZsPxzboT5HTohpv9dtt3w5iLSIbpAgBRYF40zcX5er1e1k0jgPtDd3N7iDe3TPz4wfkXn3/6Fz/76Rc/+OrJ0xcXV1frk/N6uazqJnrxyHmpk2ePJ+OTHBXn6XT/IJioaevVumkXrZNkffEIhJFdCcCEUakwQPIEI1Rt01ZV2tzubjfWd71pSUyPri7OLy5fWjlsbovYkIdRFQuZKnfD0A27avcyxapKbRVXbXO6Xq5iWqjqOELXwWE35M0IxZwwUFesqnk0RENw/ZUJiMJooAJjP+72u/2+kzLNsW7Iv/zNy/zyuxfNi8uTsQGkwIaogDM9BADIMJoFLQBZwAqCRa8YAyzN1IKhGZC4oXbRMhYVQ6PJ4AFmcTQaTXPFlSAqZZRSdOpXodksA2VAD2LETFSkmBYT/zcpeOep/A4rxqlZ07WbGG0GBmrHjWe6uWfo9v7f3j1Cdz//E8GXD873KRQEAPujXy7kXERsGESKqaPj6stnmCRGnJGiKOz3+0N3AGLm4JJId+yddAQhBEgGxcwIfVPHqdINs45n2olVpCAgUo+oqrmMw5i7XLo89sxUVVUgHqcDOaJ5SoR107btIqYaKDh6dv9IHSrgEAGNiYgUgRDIDIwETIApheBmU2bKTJFjQCRGAONQ+qxZQebO5WjHIIa8kAQ4MQ1dUTkLhKY70l1Ip2CFCOcIxuObD3saGsy0xvcsFaffOx8Aj1vA8ZiPgYgXg1ar1Xq9Xi7XRy6L70NeZnIRrAP7Lncahr7vh6HvxnE00xC4bpoQox6l4PNGdX/AvFRNSFOMR+avIzeqFkL4F8fuRAQxxqZp1uvl+fnpctmK5UO3A4OhP4x5KCWLlBDS2dnpJ588fvz46Wq1UB0RLYRAgXnCwqYwK8bg9K/gyFngUsput7+5vn31/PXQjSlVVw8f/PCrL5+/eHZ5efG+L0I3lr//9uavf3db1R2HcKxmzFoDmISyajjbl3mEYejSYXCZJoKRs/kczkX1P0JENA9izJ0lfEkD8lXuGMSYX/wpSQfvSWqTGy7OGv75wyFAKWXoe1w93h+G9090VVXL5bJpGrdGVNXD4eBB8L2Y4wPjOBOOj9z/kyOIeD/igbcECHMwNIdKAKDedMrY28yBO3FPwyUXegyp744CgMFojk/oCEN7sOLbrc0B0vwrN/KbhCJGgqoG6ngu3O1NhiiMVYiUIqVEVUNNG5qGY0WIaAs4XcLY676Tru/7QUtGMEIjhcnN0pF3R3EJGSfj70hIhBX+AdzSQBERAler+mH7yXJ9slyfrU/Om+Xit+vVm+9+KcM+Y9qNClQOY96NMhpSahqmNjb1mqvlUArGWK+Xq6sHZ2dnp4vFQoG2u8P17ebV62tifvLowZdffPEXf/Hnn3/xxYNHjxYnJ0B3renMrao9a2RfCuz773IOtFo1Z2ertqljSvNqie5a7o11XETvs4PI6YUBZ6eMfQpDP6wWzcXp+uLsZNhc100TQkA3tp+SDCsld2C95E5KJ9VILI1KYk2VxEZTi6kuGGQcwIAQqxAQjSQzQKSpYAoAYioKUgwEUqxCuGufoqr7fbd7c1N9852sX56dXi4Wq5Aiu5nm7IwCyGpYssCQyUYiI4yIxm5TxJMxFSCadwVlFnHKnqEHLD4DAc1AxMRU0YoWD2LEOz6oFLMMXADFTMyKWVErxUqZjPqK2oTEzM0CP3iN3rmCdzcnHNn6/7OP0A+iYmP2Tt1k4CZgwG6rDDUXAjyIYjd0fb8LqUHCybfJRKwUGUwzEWGoAIKZIYaJNoLOyQXwtrSBiQMxo3eNlpIzmKmGoppFB5GMqDEE9w3HSU6PRMBMddWcnl2enF6mukWikk30XhUWEYl8U0Wx0I9S5OhtrgJaBNBiYIBIaCJChACcps4PFOJIh2EoOvfH9qmkU0kCJgzl+I4465x9JQci8LvK2b9EOCFSc63Vp/uURivNPso6hwz3L4zHxWpGHkx5rc6mnXAq1iAgoiP2Lslu28VyuTo5OTk5OVmtVm27qKqKCFWdGtx5BepwOHRd13f9MA55HJ2s7YeYUkCMM7dkOszj13nv8aLh3AkixqqqUowhRkRUtQ9yYr5/vPt8BDBgpqpOy/Xq6uGD3339rWrZ7K5NbByGYexVi1mpqvTo0dXzF88fP3qyXi2JNaZQ13WqqpRiiilNcHZgZt9M4BiZARiAFBmH7LJzj5mauo4pvv8hc87fvbl5vd3TvoN7IQzMpPv7+cT9g7H7T3orkcG3n3+/xHhk5nugc/ebe2cK77/i8d3fffuJMabfvr4dc4H3RtM0L168cDb3zc3NsSR0vNw4i8vgQxjM0a/lrTedn8D3hG/vP+2dX5kZqgORDPfoOzYdJAFyldLp6el6vWYOxwMkVEadWhzMjxLaOzxyBGBw8hQiYAAkNCQzRDXio82vubINj3MdUtWcnJw9elSt15nZUuRUc0zBO15E0u6wf/nq5rtvX379bZ8HnAJMY4QZ0fLyi6EREwcCJgALMYRmXkQ+MPycT+aN5mLiZtV+8uLp+nT94PGDr3/72W//4b+/fvl1v9v3Xd/n3OnYY7C0aOu0XK2WZ5exXipy4Kqpl+vl+vz89GS9atoWALt+2O72t9sNIp6dnT569PjTTz+9eHBZNfUxgplB4ilixenzTJ/uezY6DnyyXl5entUphRgI0I3qAdwqCggZEYgYANSYVI0gxLBaLaqqWq/abt/tdofL89PTtlmk1FSpaerFsh3LwCFSoMkLTN1VgJEQpAz7/RbQctHlMrdtXjThZInrlR462x9CkTbSImDDGJkCIBMjkiHkWe0/Dnm7319cnMc4nQRGXFS8l+Eff/urN1Q/WZw8bBcPlq2XG1xz5nIjBR3HrIchWM9oyIKBJ4qJ9wwmNRd9i4moCZiaiIlZUZUynRkzzKpiqlBUi8qobhPq9BeDDOB2jmpazIqCCEhxboJJUaefmRR116/3UpH3L5x7HX3/2v1vE9f88RtI6AdvxAPgLZwnHgY4m27GICvAMI656/sgVqTkEgBAxpznTmwMLh1wh7upJdxUHXDLS3L73cje7sHQMV4zEFWQooOpFTFEjqlqiyigESdEphDbdrk+OT07vVitTgKzqZoVVbq3ATg0bSGQuY05mHMSgFzTLAgSyJCRjARdIOAdngNiTUiqin0uYuK0v7mi7gHN1I9n3k/8RM1AHNhE1sPpI03SGBciOe445etOf/aUxgwk51LeqvE7cFJKQUQAhdlCQ2cbbJz9eI6hs9Nfdrvdbrfd73fu/LFcrtq2ZSYRGYbhcNgfDrv9vnP/mDxxLYvOqa17zDM7XY/eLxYc02u4F8RER2JicnDCkZh/Zjnp7UFESLxYNFdXl48fPzy/OP3mm2+3N9th6ERyjHF90n7yyZMXL168eP788uLBcrmICas6tW1T1XWMyUOYEKYmHwATkdqDBCLiEJi8TPYOpfQDo4jc7nZjEYAPbNv/84/b7a58KOCIMV5cXPhlret6v987G/fOTe4t5dG7///7cJrv/1sf94OYGWl0efgEW95FzdPtElVTo7JarY5BDJiZFhWQPBQCVkJC9/OB+3PYi+AGADAtdV58U/eiVK8+oR6DGJuQB2bjUIV4cfFg/fBKYrTAOpEIKIZYV7F0+1DKsN8SGkhhg6MEgFBwfmM0QyUERXNb4WJSWAX/EHo+SRjQGIAS1amu2rpdN+vz9en56ctvvr5+9Wpze7vvut1hV59vAexkuTg5PTt/8HCxPuWUqtg01aJt2uVy4ZaViDjmPAxD1x8MoWkWq9X6/PyiamqYYD0DA5vYSfb2ifzDwz20QiDnDPrVmCI6JDMVEb+6amYm7v5lZoSUUoyBq5iqql4sFmPf7W5vhr4HsKZtipQQXdtE6N3aHQc1KyKICKolly6PkKOmGEMITTMCaQGkMRCEQG0VUiCyuUqAGKZJAWMsQOiLpx8LoTUEtY1jv93fvPzud7+xpoUYlicnVZXuUFsEAxTDUlTGgbRACHQMYggNGRxWL6Zu5qxTka4AFDUpBkaEBhQLVwIqRTUXyYOUfBfEKBQgmSauikJWGAQGsUFsKJaLZrFBrO/HoRvzOH7/ffqnO8IwuiqEA1NM7vahbkQlIlJGM40pMadS4HAYkAZvVwkAZua95YjwWNsmCEiRiH27EFMv7iDHmNxcLBKyKMBMWfCESQyL2FhYoKqaNXDSUpAYKcXYnJ5dPrh8eHZ61jYNgEruQQXnDhDg/VPKWMoAsCBENDEtYAqGiEpWUEaUAu4SwVYARMSsEGAk4prRYh5Zc+4nEgl5rcfmtUwF7d5CjE4/nBdDQAT2Brc0VZvIxehg03p25Cz4K/g5hHHMfdeNw3Dk9qpqP/Z936lKmDRH8xd/xsSOcaibbLZgKaWMY+66brPZvHnzxg12iaiUMgxj33c5D9ktJCfTsLsy0Vzp89dVpdnP714ui1OJcWLJkOO/UxhThRB8JqSU3rfv+qcNB7dAEbCuqgcPLl+8ePbDH3552Hd/9zd/v9ttAGC9Wn72xYsf/ujLH/34B08ef9LUi6quqzrWdZqqaa74nqybaSrhA9xHPnDqunrEZaaz+/tgg3F4txzzJzSGYXh/ITMzx2DOzs4QcbFYXF9fu/Fdzvn9F3n/zPgW8seEre8ExB98zhzWHHdRuD8DAcF5w4vF0kUJAGCqOvQFYOhzVAEimXuEvIUj+pMdj0Fkn15mMIvYJqBzcojy1ATUQAnKMDardSBaLVbVyQoC9cMguZgqiOAINmSQglJARsr9HU2JCLx7l0NoTvcurIhCZmMuh1w+dF3mw/VPTgZOUiCxme2MGtt0ER8u1+vHzz7bXN/st9t9f+iGrut7RFw27Wp9cnpx3i5XMcbAMVBkYhcheO1Tp7y9AABCYObAaYrhZphlvpUdpJpgZ7+cd9flQ0NV+77b7/Zaa4xH2Ox4LXSqH86AnwioODdcDICJKcTFqiIOv/nt7xDRuVBNU7v0M0zBATpSaKYuv0bEECIRaSn9blfGIYYQCE20ICiiighoYY9zvcGfIFGsIjODmb4NeAIAmSUpa9L1ivuQr7/+9X6U2313+fTpk08erU+W0/lARArAlQAP/QDjHt2BDIGQkdkFrqamWaXMnXPIAAmI1UgE1QKiQl3h4gQQdfcq533e9zJ2kwVwMVErAAogIAIgiqNCJ9aL9qKjqCiMYvtC267sNtDtD/p+E8h/FyM8e36GCIQUA8cUjkGMd3zMeRzHxTieXFyE9Vpub58AeF9Mx8LRExG+a6lNCIEouFuCTnY6RsQxBAf4QwgILGZmOJNlp0yrqIzjOOahlE5KllIMkIgXi9XV1aPLBw8vzq+Wy5OYKiI05dPTGMI01zik9vRxCGGxOlFFi6fNMHigQGimUsZBVADAlQjeN8wmvytG5CHrxfaw64Z+LEXNXJVrU18ANVB7N4g5LpIwZxlwF8QgIcPkCOErmu+UxyDG1yLwts/PX7yo60k4UlXV06dPwSzFSHMQA+ZQN8BxA5jixrk+Mn1zp3Y3+Uh1VSGhiNR1bttatcw7tC+whIg2b0JTMcwrzUdT4ntlD5wZfccgJsaYqlRV9cwmRlVbLZcxfKAc808eiAAUY1xxfPLk0c9++pMqpsvz8zevXxnI2en68y9evPj0+bNnz07WZ27Zk1JKKRz5QEf9570g7L23sKPXtcGkwcEPrs/r9fov//Iv1+s1TLD6vw2w+q8wPM74yU9+4h/+nUFEKSVEdBLScrnc7/d93x87LN4f7wci/7pBzPG57/zVRDOnCac8Pz8/dktu16sf/af/NSAu3RUAiMlT67cIp+8EMeSUPTPQGe08BjGO1U17LBhiIT55cPXsxz+5fPq0Xi+Rqe/7Mo5anCZPeeiXl5eLBw+XDx8dNluegxgjp8XM8Lh6zQEZkMmslNIPT//sP1bL5feeDZop73NNDxQQQgwpprZdrk/PT88u+64b8jDmsUhBpBRT07TL9aqqqj8eGbV7NfrjSgfH0/dPGU3TfPXVD8ysrqo7rMLuLSizHSVMsBeo4ITQTNKBGEJEDEdw2nX6ZsbsIcyUHprdkcoRkYnNTGbfZ2bmwAhY3ENFCiFUKTCxAYqalEJEqYqeNBaRYRy//PKrtm39g3Ldrj79QaqqcHLZpRXkNNYLeps2R4iWGj69Sp/+B4iJdi9tODjHAA0QCcPEK/AgBkXJw3QytykDYzFSY0DGeknrC0Usu+uyvyn725J7NRUxEVfKmc6GRmqYFQaFUTWrFlUzzAqd0H7Iu8Ph0z/7T81y9U++hH8KA7/5djvFIzhZmSAcp5SXKdTMcs59f5CSDe7QD9816biVz68JR0KQHeVb0/5Hk1PKVPO/H8tPVd+JCChzCcdDJZ7oDSExu0k3euWorh2nBJU8dBuTzByc6PC2Fd7k/3n/gft8GgA0s+K2t+8QVOaf7ort97/d/YB3P+Ddt3vrAb6DAxxfXkXruj47v/B1eRzHm9vboe9n5tp8SY5/8s47T98NZrRhDmamKPEeUD9fuqP0aH7h4yW8dzHx7S/v/B6PlxXnd/KPwcxu8gj/CmM69Jzzft8dDoeuc4TAQuCmaeq6quva9VAeh03R4r0ywt3B/eGV/G16ydvjcDj86le/2u/38NbM/RMYPoUXi8WLFy+O6/I7T7B7/i5HPdEf/+J/zPjnBjEfeEePt3xrHA6H7377m267ZZ/yTsZ9+zU++Orzo+/cXsfl6fgTGkBIqWkXsaooMCC6WcPd4qCqUkoeR+8Yb/fvqHvvdSRN+n9mplotlidPnsbmrrvFe8PeWXzA7G6ZAQCD2RhjkoFN6ciknfwnTtR3b4L3r9Qf9YLDMLx8+V3XHWaDrN/3Tvd+tvu7xnRPz786DoB5T5k+zb2ypt1rtXMXjc0rwvTn5nR53+1wfhRnRNapito07eXlpRu+6Tjkm5c6DBiiEo+KSoFiilWqqmoCBc1Mio2d7nc2HrRkMHlr/s7x+HSYc5l0nqsIE3/BYx6GEAHQJE+KoxkvnOlydxuT723q1b+jkSKAGrgHXr1cXTx5WjUfuPf/1Af+e62TfRwfx8fxcXwcH8fH8e97/Kvkyh/Hx/FxfBwfx8fxcXwc/9bjYxDzcXwcH8fH8XF8HB/Hn+T4/wB1i/3et9r0KQAAAABJRU5ErkJggg==\n", + "text/plain": [ + "" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "num_collage_examples = 10\n", + "\n", + "examples = np.empty(\n", + " (\n", + " num_collage_examples,\n", + " near_neighbours_per_example + 1,\n", + " 3,\n", + " height_width,\n", + " height_width,\n", + " ),\n", + " dtype=np.float32,\n", + ")\n", + "for row_idx in range(num_collage_examples):\n", + " examples[row_idx, 0] = x_test[row_idx]\n", + " anchor_near_neighbours = indicies[row_idx][1:near_neighbours_per_example+1]\n", + " for col_idx, nn_idx in enumerate(anchor_near_neighbours):\n", + " examples[row_idx, col_idx + 1] = x_test[nn_idx]\n", + "\n", + "show_collage(examples)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 328 + }, + "colab_type": "code", + "id": "wrbuQDPHUBEi", + "outputId": "3f46eb46-fa8e-4b9c-e11f-f9bbb1abf2d5" + }, + "source": [ + "## The end\n", + "\n", + "上面展示的结果当中,每一行里其余的图片都是跟第一张图片按照相似度进行排序相似的图片。你也可以调整网络结构和超参数,以获得更好的结果。" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "name": "metric_learning.ipynb", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/doc/paddle/tutorial/cv_case/image_search/image_search.rst b/doc/paddle/tutorial/cv_case/image_search/image_search.rst new file mode 100644 index 0000000000000000000000000000000000000000..2913ebed4b33fe6b3d4d8d0001c46f495353bbd6 --- /dev/null +++ b/doc/paddle/tutorial/cv_case/image_search/image_search.rst @@ -0,0 +1,403 @@ +基于图片相似度的图片搜索 +======================== + +简要介绍 +-------- + +图片搜索是一种有着广泛的应用场景的深度学习技术的应用,目前,无论是工程图纸的检索,还是互联网上相似图片的搜索,都基于深度学习算法能够实现很好的基于给定图片,检索出跟该图片相似的图片的效果。 + +本示例简要介绍如何通过飞桨开源框架,实现图片搜索的功能。其基本思路是,先将图片使用卷积神经网络转换为高维空间的向量表示,然后计算两张图片的高维空间的向量表示之间的相似程度(本示例中,我们使用余弦相似度)。在模型训练阶段,其训练目标是让同一类别的图片的相似程度尽可能的高,不同类别的图片的相似程度尽可能的低。在模型预测阶段,对于用户上传的一张图片,会计算其与图片库中图片的相似程度,返回给用户按照相似程度由高到低的图片的列表作为检索的结果。 + +环境设置 +-------- + +本示例基于飞桨开源框架2.0版本。 + +.. code:: ipython3 + + import paddle + import paddle.nn.functional as F + import numpy as np + import random + import matplotlib.pyplot as plt + from PIL import Image + from collections import defaultdict + + paddle.disable_static() + print(paddle.__version__) + + +.. parsed-literal:: + + 2.0.0-beta0 + + +数据集 +------ + +本示例采用\ `CIFAR-10 `__\ 数据集。这是一个经典的数据集,由50000张图片的训练数据,和10000张图片的测试数据组成,其中每张图片是一个RGB的长和宽都为32的图片。使用\ ``paddle.dataset.cifar``\ 可以方便的完成数据的下载工作,把数据归一化到\ ``(0, 1.0)``\ 区间内,并提供迭代器供按顺序访问数据。我们会把训练数据和测试数据分别存放在两个\ ``numpy``\ 数组中,供后面的训练和评估来使用。 + +.. code:: ipython3 + + cifar10_train = paddle.vision.datasets.cifar.Cifar10(mode='train', transform=None) + x_train = np.zeros((50000, 3, 32, 32)) + y_train = np.zeros((50000, 1), dtype='int32') + + for i in range(len(cifar10_train)): + train_image, train_label = cifar10_train[i] + train_image = train_image.reshape((3,32,32 )) + + # normalize the data + x_train[i,:, :, :] = train_image / 255. + y_train[i, 0] = train_label + + y_train = np.squeeze(y_train) + + print(x_train.shape) + print(y_train.shape) + + +.. parsed-literal:: + + (50000, 3, 32, 32) + (50000,) + + +.. code:: ipython3 + + cifar10_test = paddle.vision.datasets.cifar.Cifar10(mode='test', transform=None) + x_test = np.zeros((10000, 3, 32, 32), dtype='float32') + y_test = np.zeros((10000, 1), dtype='int64') + + for i in range(len(cifar10_test)): + test_image, test_label = cifar10_test[i] + test_image = test_image.reshape((3,32,32 )) + + # normalize the data + x_test[i,:, :, :] = test_image / 255. + y_test[i, 0] = test_label + + y_test = np.squeeze(y_test) + + print(x_test.shape) + print(y_test.shape) + + +.. parsed-literal:: + + (10000, 3, 32, 32) + (10000,) + + +数据探索 +-------- + +接下来我们随机从训练数据里找一些图片,浏览一下这些图片。 + +.. code:: ipython3 + + height_width = 32 + + def show_collage(examples): + box_size = height_width + 2 + num_rows, num_cols = examples.shape[:2] + + collage = Image.new( + mode="RGB", + size=(num_cols * box_size, num_rows * box_size), + color=(255, 255, 255), + ) + for row_idx in range(num_rows): + for col_idx in range(num_cols): + array = (np.array(examples[row_idx, col_idx]) * 255).astype(np.uint8) + array = array.transpose(1,2,0) + collage.paste( + Image.fromarray(array), (col_idx * box_size, row_idx * box_size) + ) + + collage = collage.resize((2 * num_cols * box_size, 2 * num_rows * box_size)) + return collage + + sample_idxs = np.random.randint(0, 50000, size=(5, 5)) + examples = x_train[sample_idxs] + show_collage(examples) + + + + +.. image:: https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/paddle/tutorial/cv_case/image_search/image_search_files/image_search_001.png?raw=true?raw=true + + + +构建训练数据 +------------ + +图片检索的模型的训练样本跟我们常见的分类任务的训练样本不太一样的地方在于,每个训练样本并不是一个\ ``(image, class)``\ 这样的形式。而是(image0, +image1, +similary_or_not)的形式,即,每一个训练样本由两张图片组成,而其\ ``label``\ 是这两张图片是否相似的标志位(0或者1)。 + +很自然的我们能够想到,来自同一个类别的两张图片,是相似的图片,而来自不同类别的两张图片,应该是不相似的图片。 + +为了能够方便的抽样出相似图片(以及不相似图片)的样本,我们先建立能够根据类别找到该类别下所有图片的索引。 + +.. code:: ipython3 + + class_idx_to_train_idxs = defaultdict(list) + for y_train_idx, y in enumerate(y_train): + class_idx_to_train_idxs[y].append(y_train_idx) + + class_idx_to_test_idxs = defaultdict(list) + for y_test_idx, y in enumerate(y_test): + class_idx_to_test_idxs[y].append(y_test_idx) + +有了上面的索引,我们就可以为飞桨准备一个读取数据的迭代器。该迭代器每次生成\ ``2 * number of classes``\ 张图片,在CIFAR10数据集中,这会是20张图片。前10张图片,和后10张图片,分别是10个类别中每个类别随机抽出的一张图片。这样,在实际的训练过程中,我们就会有10张相似的图片和90张不相似的图片(前10张图片中的任意一张图片,都与后10张的对应位置的1张图片相似,而与其他9张图片不相似)。 + +.. code:: ipython3 + + num_classes = 10 + + def reader_creator(num_batchs): + def reader(): + iter_step = 0 + while True: + if iter_step >= num_batchs: + break + iter_step += 1 + x = np.empty((2, num_classes, 3, height_width, height_width), dtype=np.float32) + for class_idx in range(num_classes): + examples_for_class = class_idx_to_train_idxs[class_idx] + anchor_idx = random.choice(examples_for_class) + positive_idx = random.choice(examples_for_class) + while positive_idx == anchor_idx: + positive_idx = random.choice(examples_for_class) + x[0, class_idx] = x_train[anchor_idx] + x[1, class_idx] = x_train[positive_idx] + yield x + + return reader + + + # num_batchs: how many batchs to generate + def anchor_positive_pairs(num_batchs=100): + return reader_creator(num_batchs) + + +.. code:: ipython3 + + pairs_train_reader = anchor_positive_pairs(num_batchs=1000) + +拿出第一批次的图片,并可视化的展示出来,如下所示。(这样更容易理解训练样本的构成) + +.. code:: ipython3 + + + examples = next(pairs_train_reader()) + print(examples.shape) + show_collage(examples) + + +.. parsed-literal:: + + (2, 10, 3, 32, 32) + + + + +.. image:: https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/paddle/tutorial/cv_case/image_search/image_search_files/image_search_002.png?raw=true + + + +把图片转换为高维的向量表示的网络 +-------------------------------- + +我们的目标是首先把图片转换为高维空间的表示,然后计算图片在高维空间表示时的相似度。 +下面的网络结构用来把一个形状为\ ``(3, 32, 32)``\ 的图片转换成形状为\ ``(8,)``\ 的向量。在有些资料中也会把这个转换成的向量称为\ ``Embedding``\ ,请注意,这与自然语言处理领域的词向量的区别。 +下面的模型由三个连续的卷积加一个全局均值池化,然后用一个线性全链接层映射到维数为8的向量空间。为了后续计算余弦相似度时的便利,我们还在最后用\ `l2_normalize `__\ 做了归一化。(即,余弦相似度的分母部分) + +.. code:: ipython3 + + class MyNet(paddle.nn.Layer): + def __init__(self): + super(MyNet, self).__init__() + + self.conv1 = paddle.nn.Conv2d(in_channels=3, + out_channels=32, + kernel_size=(3, 3), + stride=2) + + self.conv2 = paddle.nn.Conv2d(in_channels=32, + out_channels=64, + kernel_size=(3,3), + stride=2) + + self.conv3 = paddle.nn.Conv2d(in_channels=64, + out_channels=128, + kernel_size=(3,3), + stride=2) + + self.gloabl_pool = paddle.nn.AdaptiveAvgPool2d((1,1)) + + self.fc1 = paddle.nn.Linear(in_features=128, out_features=8) + def forward(self, x): + x = self.conv1(x) + x = F.relu(x) + x = self.conv2(x) + x = F.relu(x) + x = self.conv3(x) + x = F.relu(x) + x = self.gloabl_pool(x) + x = paddle.squeeze(x, axis=[2, 3]) + x = self.fc1(x) + x = F.l2_normalize(x, axis=1) + + return x + + +在模型的训练过程中如下面的代码所示: + +- ``inverse_temperature``\ 参数起到的作用是让softmax在计算梯度时,能够处于梯度更显著的区域。(可以参考\ `attention + is all you + need `__\ 中,在点积之后的\ ``scale``\ 操作)。 +- 整个计算过程,会先用上面的网络分别计算前10张图片(anchors)的高维表示,和后10张图片的高维表示。然后再用\ `matmul `__\ 计算前10张图片分别与后10张图片的相似度。(所以\ ``similarities``\ 会是一个\ ``(10, 10)``\ 的Tensor)。 +- 为\ `softmax_with_cross_entropy `__\ 构造类别标签时,则相应的,可以构造出来0 + ~ + num_classes的标签值,用来让学习的目标成为相似的图片的相似度尽可能的趋向于1.0,而不相似的图片的相似度尽可能的趋向于-1.0。 + +.. code:: ipython3 + + def train(model): + print('start training ... ') + model.train() + + inverse_temperature = paddle.to_tensor(np.array([1.0/0.2], dtype='float32')) + + epoch_num = 20 + + opt = paddle.optimizer.Adam(learning_rate=0.0001, + parameters=model.parameters()) + + for epoch in range(epoch_num): + for batch_id, data in enumerate(pairs_train_reader()): + anchors_data, positives_data = data[0], data[1] + + anchors = paddle.to_tensor(anchors_data) + positives = paddle.to_tensor(positives_data) + + anchor_embeddings = model(anchors) + positive_embeddings = model(positives) + + similarities = paddle.matmul(anchor_embeddings, positive_embeddings, transpose_y=True) + similarities = paddle.multiply(similarities, inverse_temperature) + + sparse_labels = paddle.arange(0, num_classes, dtype='int64') + sparse_labels = paddle.reshape(sparse_labels, (num_classes, 1)) + + loss = F.softmax_with_cross_entropy(similarities, sparse_labels) + + avg_loss = paddle.mean(loss) + if batch_id % 500 == 0: + print("epoch: {}, batch_id: {}, loss is: {}".format(epoch, batch_id, avg_loss.numpy())) + avg_loss.backward() + opt.step() + opt.clear_grad() + + model = MyNet() + train(model) + + +.. parsed-literal:: + + start training ... + epoch: 0, batch_id: 0, loss is: [2.3078856] + epoch: 0, batch_id: 500, loss is: [1.9325346] + epoch: 1, batch_id: 0, loss is: [1.9889] + epoch: 1, batch_id: 500, loss is: [2.0410695] + epoch: 2, batch_id: 0, loss is: [2.2465641] + epoch: 2, batch_id: 500, loss is: [1.8171736] + epoch: 3, batch_id: 0, loss is: [1.9939486] + epoch: 3, batch_id: 500, loss is: [2.1440036] + epoch: 4, batch_id: 0, loss is: [2.1497147] + epoch: 4, batch_id: 500, loss is: [2.3686018] + epoch: 5, batch_id: 0, loss is: [1.938681] + epoch: 5, batch_id: 500, loss is: [1.7729127] + epoch: 6, batch_id: 0, loss is: [2.0061004] + epoch: 6, batch_id: 500, loss is: [1.6132584] + epoch: 7, batch_id: 0, loss is: [1.8874661] + epoch: 7, batch_id: 500, loss is: [1.6153599] + epoch: 8, batch_id: 0, loss is: [1.9407685] + epoch: 8, batch_id: 500, loss is: [2.1532288] + epoch: 9, batch_id: 0, loss is: [1.4792883] + epoch: 9, batch_id: 500, loss is: [1.857158] + epoch: 10, batch_id: 0, loss is: [2.1518302] + epoch: 10, batch_id: 500, loss is: [1.790559] + epoch: 11, batch_id: 0, loss is: [1.7292264] + epoch: 11, batch_id: 500, loss is: [1.8555079] + epoch: 12, batch_id: 0, loss is: [1.6968924] + epoch: 12, batch_id: 500, loss is: [1.4554331] + epoch: 13, batch_id: 0, loss is: [1.3950458] + epoch: 13, batch_id: 500, loss is: [1.7197256] + epoch: 14, batch_id: 0, loss is: [1.7336586] + epoch: 14, batch_id: 500, loss is: [2.0465684] + epoch: 15, batch_id: 0, loss is: [1.7675827] + epoch: 15, batch_id: 500, loss is: [2.6443417] + epoch: 16, batch_id: 0, loss is: [1.7331158] + epoch: 16, batch_id: 500, loss is: [1.6207634] + epoch: 17, batch_id: 0, loss is: [2.0908554] + epoch: 17, batch_id: 500, loss is: [1.7711265] + epoch: 18, batch_id: 0, loss is: [1.8717268] + epoch: 18, batch_id: 500, loss is: [1.5269613] + epoch: 19, batch_id: 0, loss is: [1.5681677] + epoch: 19, batch_id: 500, loss is: [1.7821472] + + +模型预测 +-------- + +前述的模型训练训练结束之后,我们就可以用该网络结构来计算出任意一张图片的高维向量表示(embedding),通过计算该图片与图片库中其他图片的高维向量表示之间的相似度,就可以按照相似程度进行排序,排序越靠前,则相似程度越高。 + +下面我们对测试集中所有的图片都两两计算相似度,然后选一部分相似的图片展示出来。 + +.. code:: ipython3 + + near_neighbours_per_example = 10 + + x_test_t = paddle.to_tensor(x_test) + test_images_embeddings = model(x_test_t) + similarities_matrix = paddle.matmul(test_images_embeddings, test_images_embeddings, transpose_y=True) + + indicies = paddle.argsort(similarities_matrix, descending=True) + indicies = indicies.numpy() + +.. code:: ipython3 + + num_collage_examples = 10 + + examples = np.empty( + ( + num_collage_examples, + near_neighbours_per_example + 1, + 3, + height_width, + height_width, + ), + dtype=np.float32, + ) + for row_idx in range(num_collage_examples): + examples[row_idx, 0] = x_test[row_idx] + anchor_near_neighbours = indicies[row_idx][1:near_neighbours_per_example+1] + for col_idx, nn_idx in enumerate(anchor_near_neighbours): + examples[row_idx, col_idx + 1] = x_test[nn_idx] + + show_collage(examples) + + + + +.. image:: https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/paddle/tutorial/cv_case/image_search/image_search_files/image_search_003.png?raw=true + + + +The end +------- + +上面展示的结果当中,每一行里其余的图片都是跟第一张图片按照相似度进行排序相似的图片。你也可以调整网络结构和超参数,以获得更好的结果。 diff --git a/doc/paddle/tutorial/cv_case/image_search/image_search_files/image_search_001.png b/doc/paddle/tutorial/cv_case/image_search/image_search_files/image_search_001.png new file mode 100644 index 0000000000000000000000000000000000000000..b56c1ecec43a906e77061261aa6e1e5e0280cb17 Binary files /dev/null and b/doc/paddle/tutorial/cv_case/image_search/image_search_files/image_search_001.png differ diff --git a/doc/paddle/tutorial/cv_case/image_search/image_search_files/image_search_002.png b/doc/paddle/tutorial/cv_case/image_search/image_search_files/image_search_002.png new file mode 100644 index 0000000000000000000000000000000000000000..e972f089632d2677e4ffc525219f56d2e15613e7 Binary files /dev/null and b/doc/paddle/tutorial/cv_case/image_search/image_search_files/image_search_002.png differ diff --git a/doc/paddle/tutorial/cv_case/image_search/image_search_files/image_search_003.png b/doc/paddle/tutorial/cv_case/image_search/image_search_files/image_search_003.png new file mode 100644 index 0000000000000000000000000000000000000000..8768faa9ab2c87e731aa72678712c0f6a61caa7f Binary files /dev/null and b/doc/paddle/tutorial/cv_case/image_search/image_search_files/image_search_003.png differ diff --git a/doc/paddle/tutorial/cv_case/image_segmentation/pets_image_segmentation_U_Net_like.ipynb b/doc/paddle/tutorial/cv_case/image_segmentation/pets_image_segmentation_U_Net_like.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..9dd56ed42fb192d5bd9309c6aa64901e508c7214 --- /dev/null +++ b/doc/paddle/tutorial/cv_case/image_segmentation/pets_image_segmentation_U_Net_like.ipynb @@ -0,0 +1,1039 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "ueGUN2EQeScw" + }, + "source": [ + "# 基于U-Net卷积神经网络实现宠物图像分割\n", + "\n", + "本示例教程当前是基于2.0-beta版本Paddle做的案例实现,未来会随着2.0的系列版本发布进行升级。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1.简要介绍\n", + "\n", + "在计算机视觉领域,图像分割指的是将数字图像细分为多个图像子区域的过程。图像分割的目的是简化或改变图像的表示形式,使得图像更容易理解和分析。图像分割通常用于定位图像中的物体和边界(线,曲线等)。更精确的,图像分割是对图像中的每个像素加标签的一个过程,这一过程使得具有相同标签的像素具有某种共同视觉特性。图像分割的领域非常多,无人车、地块检测、表计识别等等。\n", + "\n", + "本示例简要介绍如何通过飞桨开源框架,实现图像分割。这里我们是采用了一个在图像分割领域比较熟知的U-Net网络结构,是一个基于FCN做改进后的一个深度学习网络,包含下采样(编码器,特征提取)和上采样(解码器,分辨率还原)两个阶段,因模型结构比较像U型而命名为U-Net。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2.环境设置\n", + "\n", + "导入一些比较基础常用的模块,确认自己的飞桨版本。" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'2.0.0-beta0'" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import os\n", + "import io\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "from PIL import Image as PilImage\n", + "\n", + "import paddle\n", + "from paddle.nn import functional as F\n", + "\n", + "paddle.__version__" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "VMC2xLAxeScx" + }, + "source": [ + "## 3.数据集" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "H0KiJ_5N936Y" + }, + "source": [ + "### 3.1 数据集下载\n", + "\n", + "本案例使用Oxford-IIIT Pet数据集,官网:https://www.robots.ox.ac.uk/~vgg/data/pets 。\n", + "\n", + "数据集统计如下:\n", + "\n", + "![alt 数据集统计信息](https://www.robots.ox.ac.uk/~vgg/data/pets/breed_count.jpg)\n", + "\n", + "数据集包含两个压缩文件:\n", + "\n", + "1. 原图:https://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz\n", + "2. 分割图像:https://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 119 + }, + "colab_type": "code", + "id": "xJd9y-u9eScy", + "outputId": "3985783f-7166-4afa-f511-16427b3e2a71", + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " % Total % Received % Xferd Average Speed Time Time Time Current\n", + " Dload Upload Total Spent Left Speed\n", + "100 755M 100 755M 0 0 1707k 0 0:07:32 0:07:32 --:--:-- 2865k0 0:12:48 524k 0 0:13:34 0:02:41 0:10:53 668k 0 0:12:45 0:03:06 0:09:39 1702k 0 1221k 0 0:10:33 0:03:25 0:07:08 3108k37 282M 0 0 1243k 0 0:10:21 0:03:52 0:06:29 719k0:05:53 566k0 1237k 0 0:10:25 0:04:43 0:05:42 1593k 0 0:09:46 0:05:28 0:04:18 2952k 1467k 0 0:08:47 0:06:43 0:02:04 1711k\n", + " % Total % Received % Xferd Average Speed Time Time Time Current\n", + " Dload Upload Total Spent Left Speed\n", + "100 18.2M 100 18.2M 0 0 1602k 0 0:00:11 0:00:11 --:--:-- 3226k\n" + ] + } + ], + "source": [ + "!curl -O http://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz\n", + "!curl -O http://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz\n", + "!tar -xf images.tar.gz\n", + "!tar -xf annotations.tar.gz" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "L5cP2CBz-Mra" + }, + "source": [ + "### 3.2 数据集概览\n", + "\n", + "首先我们先看看下载到磁盘上的文件结构是什么样,来了解一下我们的数据集。\n", + "\n", + "1. 首先看一下images.tar.gz这个压缩包,该文件解压后得到一个images目录,这个目录比较简单,里面直接放的是用类名和序号命名好的图片文件,每个图片是对应的宠物照片。\n", + "\n", + "```bash\n", + ".\n", + "├── samoyed_7.jpg\n", + "├── ......\n", + "└── samoyed_81.jpg\n", + "```\n", + "\n", + "2. 然后我们在看下annotations.tar.gz,文件解压后的目录里面包含以下内容,目录中的README文件将每个目录和文件做了比较详细的介绍,我们可以通过README来查看每个目录文件的说明。\n", + "\n", + "```bash\n", + ".\n", + "├── README\n", + "├── list.txt\n", + "├── test.txt\n", + "├── trainval.txt\n", + "├── trimaps\n", + "│ ├── Abyssinian_1.png\n", + "│ ├── Abyssinian_10.png\n", + "│ ├── ......\n", + "│ └── yorkshire_terrier_99.png\n", + "└── xmls\n", + " ├── Abyssinian_1.xml\n", + " ├── Abyssinian_10.xml\n", + " ├── ......\n", + " └── yorkshire_terrier_190.xml\n", + "```\n", + "\n", + "本次我们主要使用到images和annotations/trimaps两个目录,即原图和三元图像文件,前者作为训练的输入数据,后者是对应的标签数据。\n", + "\n", + "我们来看看这个数据集给我们提供了多少个训练样本。" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 204 + }, + "colab_type": "code", + "id": "tqB7YQ4leSc4", + "outputId": "8872356c-ef32-4c94-defb-66250a00890a", + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "用于训练的图片样本数量: 7390\n" + ] + } + ], + "source": [ + "train_images_path = \"images/\"\n", + "label_images_path = \"annotations/trimaps/\"\n", + "\n", + "print(\"用于训练的图片样本数量:\", len([os.path.join(train_images_path, image_name) \n", + " for image_name in os.listdir(train_images_path) \n", + " if image_name.endswith('.jpg')]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3.3 数据集类定义\n", + "\n", + "飞桨(PaddlePaddle)数据集加载方案是统一使用Dataset(数据集定义) + DataLoader(多进程数据集加载)。\n", + "\n", + "首先我们先进行数据集的定义,数据集定义主要是实现一个新的Dataset类,继承父类paddle.io.Dataset,并实现父类中以下两个抽象方法,`__getitem__`和`__len__`:\n", + "\n", + "```python\n", + "class MyDataset(Dataset):\n", + " def __init__(self):\n", + " ...\n", + " \n", + " # 每次迭代时返回数据和对应的标签\n", + " def __getitem__(self, idx):\n", + " return x, y\n", + "\n", + " # 返回整个数据集的总数\n", + " def __len__(self):\n", + " return count(samples)\n", + "```\n", + "\n", + "在数据集内部可以结合图像数据预处理相关API进行图像的预处理(改变大小、反转、调整格式等)。\n", + "\n", + "由于加载进来的图像不一定都符合自己的需求,举个例子,已下载的这些图片里面就会有RGBA格式的图片,这个时候图片就不符合我们所需3通道的需求,我们需要进行图片的格式转换,那么这里我们直接实现了一个通用的图片读取接口,确保读取出来的图片都是满足我们的需求。\n", + "\n", + "另外图片加载出来的默认shape是HWC,这个时候要看看是否满足后面训练的需要,如果Layer的默认格式和这个不是符合的情况下,需要看下Layer有没有参数可以进行格式调整。不过如果layer较多的话,还是直接调整原数据Shape比较好,否则每个layer都要做参数设置,如果有遗漏就会导致训练出错,那么在本案例中是直接对数据源的shape做了统一调整,从HWC转换成了CHW,因为飞桨的卷积等API的默认输入格式为CHW,这样处理方便后续模型训练。" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "import random\n", + "\n", + "from paddle.io import Dataset\n", + "from paddle.vision.transforms import transforms\n", + "\n", + "\n", + "class ImgTranspose(object):\n", + " \"\"\"\n", + " 图像预处理工具,用于将Mask图像进行升维(160, 160) => (160, 160, 1),\n", + " 并对图像的维度进行转换从HWC变为CHW\n", + " \"\"\"\n", + " def __init__(self, fmt):\n", + " self.format = fmt\n", + " \n", + " def __call__(self, img):\n", + " if len(img.shape) == 2:\n", + " img = np.expand_dims(img, axis=2)\n", + " \n", + " return img.transpose(self.format)\n", + "\n", + "class PetDataset(Dataset):\n", + " \"\"\"\n", + " 数据集定义\n", + " \"\"\"\n", + " def __init__(self, image_path, label_path, mode='train'):\n", + " \"\"\"\n", + " 构造函数\n", + " \"\"\"\n", + " self.image_size = (160, 160)\n", + " self.image_path = image_path\n", + " self.label_path = label_path\n", + " self.mode = mode.lower()\n", + " self.eval_image_num = 1000\n", + " \n", + " assert self.mode in ['train', 'test'], \\\n", + " \"mode should be 'train' or 'test', but got {}\".format(self.mode)\n", + " \n", + " self._parse_dataset()\n", + " \n", + " self.transforms = transforms.Compose([\n", + " ImgTranspose((2, 0, 1))\n", + " ])\n", + " \n", + " def _sort_images(self, image_dir, image_type):\n", + " \"\"\"\n", + " 对文件夹内的图像进行按照文件名排序\n", + " \"\"\"\n", + " files = []\n", + "\n", + " for image_name in os.listdir(image_dir):\n", + " if image_name.endswith('.{}'.format(image_type)) \\\n", + " and not image_name.startswith('.'):\n", + " files.append(os.path.join(image_dir, image_name))\n", + "\n", + " return sorted(files)\n", + " \n", + " def _parse_dataset(self):\n", + " \"\"\"\n", + " 由于所有文件都是散落在文件夹中,在训练时我们需要使用的是数据集和标签对应的数据关系,\n", + " 所以我们第一步是对原始的数据集进行整理,得到数据集和标签两个数组,分别一一对应。\n", + " 这样可以在使用的时候能够很方便的找到原始数据和标签的对应关系,否则对于原有的文件夹图片数据无法直接应用。\n", + " 在这里是用了一个非常简单的方法,按照文件名称进行排序。\n", + " 因为刚好数据和标签的文件名是按照这个逻辑制作的,名字都一样,只有扩展名不一样。\n", + " \"\"\"\n", + " temp_train_images = self._sort_images(self.image_path, 'jpg')\n", + " temp_label_images = self._sort_images(self.label_path, 'png')\n", + "\n", + " random.Random(1337).shuffle(temp_train_images)\n", + " random.Random(1337).shuffle(temp_label_images)\n", + " \n", + " if self.mode == 'train':\n", + " self.train_images = temp_train_images[:-self.eval_image_num]\n", + " self.label_images = temp_label_images[:-self.eval_image_num]\n", + " else:\n", + " self.train_images = temp_train_images[-self.eval_image_num:]\n", + " self.label_images = temp_label_images[-self.eval_image_num:]\n", + " \n", + " def _load_img(self, path, color_mode='rgb'):\n", + " \"\"\"\n", + " 统一的图像处理接口封装,用于规整图像大小和通道\n", + " \"\"\"\n", + " with open(path, 'rb') as f:\n", + " img = PilImage.open(io.BytesIO(f.read()))\n", + " if color_mode == 'grayscale':\n", + " # if image is not already an 8-bit, 16-bit or 32-bit grayscale image\n", + " # convert it to an 8-bit grayscale image.\n", + " if img.mode not in ('L', 'I;16', 'I'):\n", + " img = img.convert('L')\n", + " elif color_mode == 'rgba':\n", + " if img.mode != 'RGBA':\n", + " img = img.convert('RGBA')\n", + " elif color_mode == 'rgb':\n", + " if img.mode != 'RGB':\n", + " img = img.convert('RGB')\n", + " else:\n", + " raise ValueError('color_mode must be \"grayscale\", \"rgb\", or \"rgba\"')\n", + "\n", + " if self.image_size is not None:\n", + " if img.size != self.image_size:\n", + " img = img.resize(self.image_size, PilImage.NEAREST)\n", + "\n", + " return img\n", + "\n", + " def __getitem__(self, idx):\n", + " \"\"\"\n", + " 返回 image, label\n", + " \"\"\"\n", + " # 花了比较多的时间在数据处理这里,需要处理成模型能适配的格式,踩了一些坑(比如有不是RGB格式的)\n", + " # 有图片会出现通道数和期望不符的情况,需要进行相关考虑\n", + "\n", + " # 加载原始图像\n", + " train_image = self._load_img(self.train_images[idx])\n", + " x = np.array(train_image, dtype='float32')\n", + "\n", + " # 对图像进行预处理,统一大小,转换维度格式(HWC => CHW)\n", + " x = self.transforms(x)\n", + " \n", + " # 加载Label图像\n", + " label_image = self._load_img(self.label_images[idx], color_mode=\"grayscale\") \n", + " y = np.array(label_image, dtype='uint8') \n", + "\n", + " # 图像预处理\n", + " # Label图像是二维的数组(size, size),升维到(size, size, 1)后才能用于最后loss计算\n", + " y = self.transforms(y)\n", + " \n", + " # 返回img, label,转换为需要的格式\n", + " return x, y.astype('int64')\n", + " \n", + " def __len__(self):\n", + " \"\"\"\n", + " 返回数据集总数\n", + " \"\"\"\n", + " return len(self.train_images)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "GYxTHfbBESSG" + }, + "source": [ + "### 3.4 PetDataSet数据集抽样展示\n", + "\n", + "实现好Dataset数据集后,我们来测试一下数据集是否符合预期,因为Dataset是一个可以被迭代的Class,我们通过for循环从里面读取数据来用matplotlib进行展示,这里要注意的是对于分割的标签文件因为是1通道的灰度图片,需要在使用imshow接口时注意下传参cmap='gray'。" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 479 + }, + "colab_type": "code", + "id": "MTO-C5qFDnPn", + "outputId": "0937ed5e-1216-4773-9b54-16db8fe7b457" + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAV0AAAC2CAYAAAB6fF5CAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+j8jraAAAgAElEQVR4nOy9e9BtyVne93u7e1327bucObe5acRF6MJFAgwi3CxLBklIKiiqcLliBSjKuOxUkn+SGEyRyKbiFGW7SjjGKZKq2KGgHBcxRQVfgGAnNgYHy8SSHYONGMFIMxpp5sx3vtu+rrW6O39091pr729/55xBo3M0c/ZT9Z2z97r2Wmuvt99+3qffV7z37LDDDjvscH+gHnQDdthhhx0eJuyM7g477LDDfcTO6O6www473EfsjO4OO+yww33EzujusMMOO9xH7IzuDjvssMN9xM7o9iAivyQi3/eg27HDDl8oEJF/KiJ/+n7v+1rGq97oisi09+dEZNH7/qdezrG89+/13v/0H7Idz4jIH//D7LvDDvcDu9/oFwbMg27A5wrv/Th9FpFngD/tvf/Hm9uJiPHeN/ezbTvssMMOm3jVe7qXQUTeISLPicgPichngb8tIoci8g9E5JaIHMfPT/T2aYdDIvL9IvLrIvLX4rZ/ICLvvcdzf7+I/IaIfFhETkTk90XkG+PyZ0XkxT6NISLvE5GPishZXP8XN473vSLySRE5EpH/pu+xiIgSkR8WkU/E9T8nIldeiXu4w2sfd3snIr5ERD4Sf5//R//3JSLfICL/Iv7O/42IvOP+XsGrD69ZoxtxE7gCPAX8GcL1/u34/XXAAvjJO+z/duB3gavAXwH+FxGRezz324F/CzwC/B3g7wJfB3wp8EHgJ0Ukeekz4HuBA+B9wJ8Tke8CEJG3AP8j8KeAR4F94PHeef5z4LuAPwo8BhwDf/Me27jDDvfyTnwv8AOE318D/A8AIvI48A+B/47wnv1XwM+LyLX70vJXK7z3r5k/4Bngj8fP7wAqoLzD9m8Djnvf/ymBngD4fuDp3roh4IGb93Du7wd+r7fuK+O+N3rLjoC3XXKsnwA+HD//t8D/ttGOqneufw+8q7f+UaAGzIN+Hru/L6y//m/0Dttseyd+vPf9LfH3p4EfAn5mY/9fAb6vt++fftDX/YX296rndO+CW977ZfoiIkPgw8B7gMO4eCIi2ntvt+z/2fTBez+PTu54y3bb8ELv8yIeY3PZOLbr7cCPA18B5EAB/O9xu8eAZzfacdQ7zlPAL4iI6y2zwA3g0/fY1h0eUtzjO/Fsb5dPAhlh9PcU8D0i8oHe+gz4vz+/rX5147VOL2ymUPsvgTcCb/fe7wHfGpffK2Xw+cLfAX4ReNJ7vw/8FF2bPgP0eecBgbJIeBZ4r/f+oPdXeu93BneHe8G9vBNP9j6/jjCSeonw2/uZjd/eyHv/4/ej4a9WvNaN7iYmBA/zJAYDPvSA25MwAW5775ci8vXAf9xb9/eAD8RAXA78RdZfiJ8C/rKIPAUgItdE5DvvU7t3ePUhE5Ey/RG827u9Ex8UkbdEr/jHgL8XveCfJfw23y0iOh7zHVsCcTv08LAZ3Z8ABoRe+jeBX36wzWnxnwI/JiLnBA7359IK7/1vE4Jlf5fg9U6BF4FV3OSvE7zk/zPu/5uEIN4OO2zDPyIY2fR3wN3fiZ8B/lcC3VYC/wWA9/5Z4DuBHwFuETzf/5qHz668LEgkvHd4lSAqHk6AN3jv/+BBt2eHHXZ4edj1SK8CiMgHRGQoIiPgrwH/HyESvcMOO7zKsDO6rw58J/B8/HsD8Cf9boiyww6vSuzohR122GGH+4idp7vDDjvscB+xM7o77LDDDvcRd5yRlnhD5xzOWc7Pp/yjX/pl/swP/gCP3rxCtVwgKLTKsNbivUdEEIEwecsBChFFkJZ6RPUmTokggCB4FIIH8XgfDiBCWEY3y0GHoyBKAAVe8DQoD0opRHzv8Dp+cqSUCU6F88XrQ6n1fufS1AoS2ysCzvd3QJS0134BLt0TwffbdoHVEUSpcHVeUPEeeu9Jd8nj1vcQwcdF6fzee5SO7en1qUqpNE2z/T9tn9Bf3sel17axX79dIoK1FpMX3HrpJT784b/JH3/3d1CUZbu+vfAHgL/0l/7Sjlfb4fOKD33oQ1t/23c0us6FN/oXf/EXef75T/PP//mv8S9+459xcDBgtZyBV4gIzrnW2HrvCO+gRGMbPgf41tClY4fVwSBbAeWjMWD9bQxbdJsHYxONuO8MiPdu86VeNxgia/PU+gZl2z6bRkmQjXU+Xut227G2bX8T1Z2zPYWA93H44TxOYqfg14+X7ncfqfNIzwHAu+7amqZpt7sbj7/NiPbXbR5j814pCc9XoXBVxfWre/zlv/yjHB0d8Sf/1PdSlOVdDfkOO7xWcUd6QSmFUooPfOADvPOd7+Jf/svfAGcxYhByvO8ZDII9c61nFw7dM1lr3+CiQcP71i7d2bOi9YTBorVuDa0ohdIKUWrNo21P7bskP5vtcM7h4rrkuW823yXXUrr7s3kt29B57a1rSrp57aXG6/cEw6a1jsdNVjQY0nS9AKIuGvxwfevf19uyvZOQNES5ZH3aZtMj3jTAzllwDu8tHo9tFE3T8D//T3+dZ599pu1wd0HcHR5G3NHoJuNzfHzMO771m7HVqqUavLfRaYwvlw+0gNamfeG9KBBwvsbTgHRDYBFBvAGvo7PoERyOBpeOd7FBwWQJgUYQD2istdE79ngJ2V4aXDRVLpg6BU7CMVoD3XrH0YhodcfBrgA6ee8+HNmz3XgE8qPzOoMxU4iPHQTB4/bWdR5z6nQEvPi10UDoOAQcuKY3SvDd+rbz6Ba391prfYFK2Wy3w+MDYxMeyZZ71D/GZUbTA14JKAUieO9QXljOF7z/fe9hOj3fGdwdHlrcNZDmnOPpp5+mKLO15a2n1S7pUpd1HG7gT6XHYngfjEl46WzPeLYHBqQzRP1z9Yxl/3hpuJuG/mt2U6QdyveP1f+e9u/axQUDddmybceVSGEkj3P9nN21bjten5uNR+s+S8+Y+c7obTVgG53HpjfaN6jtcg9iPeJCB7CNQuhTG93zvkjPbDtvnmsO9kp+7+P/Abczujs8pLirp7tYLvhj73wHTdO0HmVaJ9FAAttfPO8SF7B23G5ozMWIUrIv/cPc4QXtG+FkRJxzkVq4GBjqe239HJf94112tuR9p2H45jW3x9ngYHu3KXp+HZ3R77za3frt6Z3fQehAlLT0Rhe8lHVel/UOapNO2RY0E6Lh3bi2zQ5l83lso2o2P4edNVjHd77v21kuFni/HhjcYYeHAXc0us45/q9/8k+4+sgBYdCegkl9+EhPrntdyWv0cbDber8+WCClhEQgrL+cvlU0BMHA5fwi9AweXaCq2z7uuyVo0zc6rbFq97o8kt9eAuue8cWGtU77JpO9ZhwT39wfzuNTK6RlwiON3TOYm51fL1jn4x3peaKXXf+2drcf7+K53g2b53U2BFlf9+RNfuPX/xnW7ozuDg8f7mh0rW34Tz74QWxT0xow51srsO7V0PucotjJQjm8S9ZAgU/qBR8CV3G/1kPbcHf7BnCbIXDOBQ60J6kKZ3XBc3O9hVvQHh/ZIuW6eN41j3fL/qmzIHYsPhrAZFz7krN1OiTxutLdYyLH2jPe4fydQV0zrukaXHf8bVA9brrXmGjZ6c6fnvcW9GmKO1EMfXjvaZqG/+zP/cC6gmWHHR4S3FEyprVif3+Mp07u3SUv1UVvajOC3r3JluTFbVITbVCIzj5e6nH21gdvdZtWNhj1taH7lv0TLlInd95n83o32OSLXqH3bDtsGtZD6qg2PPi00dqAwEeK42Lb7qQu6G3ZdTDpGFtsYJ9G2hLa3HreOyEcS3P1kSsYre+6/cOOr/3ar1377pzjox/96ANqzQ6vBO5odP/BP/iHWNsgyiHo1rtNPKUnvvtx+34Qqh8Io10f/M9Ov9tfF40IFx3STc52k2Ns27OFC03H7VvyTdnTZoR+s13t+bYE4da+c+GSL24XvdS1ZRKmMSTD3dIJ6ZhrpxE2LqbHg3f3Z1uQq/PIe+2Uvs292HF07XyFAl8C4hTiHb/6q7/Me977/lfmuK9RfMd3fMdawLVpGrIs4yMf+cgDbNUOnwvuSC986EMfihxj0BG5NPBPw1BIY+nem9xNUOjzjt4TKYZ17zAhGU6RqLG9xNHse29KKYzS5HmOMRlZFv601mitIWlYe5703Ya/24JI7d/mNu14/y4G10dPNhnERBcIQWfb3tKo5JXOww3n8uGP8MC0qHhcaY3k9o5I0EoF77l/78KXOKsvdJzO+o5K3jhG2iZdx4Vr9S0pfSmF04f3HttY/sqP//d33/ghxjvf+c4Lv1djDO9617v4pm/6pgfUqh0+V9zR0/2df//b3Lj2CM71HKNkENa4wLQsLdTRzvXHxAJiIQbVNofAF4bqauP7Fu8UQItQlEMg8JRN02BdA2LxVYWPw+JtHvLm8dPny9ZvXHK4Ni+XGlwIRqr14iVO8xVBicLHQJxqr63/gvVUAhdYivXOqv/9ggzNd/use9e0Iwvvehx8GDJcPFf6P7bdpZmH8Tip/d2iiyqHdD/SVO9P/N7Ht9+0HXj3u9/N13/91291EvI851u+5VvQWvNrv/ZrD6B19w/vec97yLIgV/37f//vP+DWvDK4o9EtcnMh2LFp/DYRlnURdqXCtFW1ZkQhGJ91A6IuCZSFfbZH4ZUxlMWI8XiPshyglOL4+IjT89t4ExQGztoLx7kX9K/T94xOaH00IMnG+c77Dl99t52sG1MR8M5tTKdNNEfnMAoSpnb0PXUu0ruDwYC6rsP9UAprbTfra+OaLly773ViyYBe0kndTbrnhVZ1cum2vT54snevhZUfPrz1rW+9oy68KAre9KY3vWaN7nvf+17G4zFvetOb2vvwWjG6d6QXhoNB9yW+7dYTvMd1uUJYrSC8VVFKJcloKZR2hFlnGXiFqLhc1lnEPu+Yps2G4TVIG5ZPBkGBMowGY/YODzl45ICr167zlW/9Gr7kS97MaHyA1marsU7nuqwDSUarz+fK5oyuzhb2eNrwrVNBBEvpoooBPNaD9kKR5YhSODrqRiQ8lJbKiPdBiQ8z5lRYonSgGMrhCOs9ymiMztCmm5osKp0RJPIZfY/eObfOEsCFvA6iJNI93b7OuR7h3CHNc7mM6ujIkLDroMgvPJMd4Lu/+7spiuKu2z3yyCN827d9231o0f3F+9//fr7ma76Gt7zlLWsdzwc/+MEH2KpXDndRL4TosiTvB2Kgx/cnedFz1uJnh5Is7hOHoXH6a7BUMZNYpBH6L+mm95eG2WnUu74taKPJCsN0es7JyRFa51y/doPHn3icvcmQ3/3d3+Z8eoKtmzh9uWt3OlbKodCf/HHBSG9zMel5zht0RDiGYl2nG+6g9uE+2sauHcv1vFxiUCu2Jtx372msQytFYfJAp7gVznuMyRgVe1T1FNu4rjnR+/Q+cETO+nVjGeVvfQ+936GGXBRuzZgm+ZtSCrd5Q+g8/k3ttiBhKnZ7O1+h4NxrDE899dQdvdyEPM9bdcOv/uqvfr6bdV/w/ve/n6/6qq/CmIum6Yu/+IsfQIteedz1ySaPKHzpr0npA1NgxbeelFIpKCYgGhGD89HASI2oaL43PNCLkrPLaIye14Xn6OSYW7ducXJyxgsvfJY/+MTTfObTzzMoR7zhDW+iLIYoo1G9RDHpHOl/ay2XYc3D633f5kGv0SBrFGrnGhrCULx2FmfX8ysoQFw/8U8MYtqQ/NJojcnzNsBmdEaRj9jfu8r1Rx+jKIZhnQocsojgvOsMuay3P50oTfToL+urIZTRnTGOs+K2GdzNe9uHk06VtjO3Lw9f/uVffkE+BoFmeOSRRx5Ai155vO997+OrvuqrWg53EyLCn/2zf/Y+t+qVx+eQxFza4FL6S15VyHHbGyKveYjr7uJW3rR/lq30a0wRqQ1FMWQ2m7NarVitVtTLFefnJ/z+Jz7BJz/5SfJswBOPP4VWOUqZmHN33eDn0Yhtnn+bUb1XPjhe3Gaz23kRNo4ctNZ4gbIsyfIM631UL3T3pw3kOR/aGikGYwoODq/xxjd9OW94w5u5cf1RysEEpXpeQhzze/GgaEcWrUHtdWCtYqK3bE0tYvSFe9fOHIw/gs17uBb8ZP15vqx7+RDj9a9/Pb/zO7/DRz/6Ud761rdeWP8lX/IlfPu3f/sDaNkrh3e/+9287W1vu9TgJly9evU+tejzh7slMe9ejLVRaTKqfm1Fisq3XlMysNIAOtqgBhGzJr9aMyxhYWuwfMtDrk8sSOahqmqsdRidIwhlaajrGmctt269QJYZnnjidUxnM27d+iy19+AanOvOuaxWTEZj5vP51kBbS3akdnmi4qC9HT3uN7UfPBZBp50Bj1fhGrUTnBKyLMNVFXVdY4yhUTHDmgNFyJqmdPBaszwPp7fCcDLhicdfx97+PuPxhCIfUFUVw9EeJ+fHUC/ARadUabwEmgBUq1aQSAGo3qjDRRVKmnVnjMFaG+gC182s69+dRD9tM6FrnVkaGfWe+Q53x6c+9SkgvI/PPvvshfXGGIbD4f1u1iuGd73rXXzd131dS2e+1nFXT3eblGpz6JyqQ3gP1rmoPU2eUDxO6xOpteP2h+x3jpB3uQR8rwNwzgUPt67DUBwVtLpKs1qtODq6zXQ654nHn+Sxx57g8MoVRGu0CakOjTEYpVksFpdee99jTdNng/EJf1spBkCJZv1yAp8LwdPF+dbz9t6zXC5bgxZoGtXKuELib4t1DVkmPHrjUQbFgNOzU87Oz7HeMR6PuXJ4hSzLI5UC4MI9jjkv+gazT1P7GOwLwbXWz8Y1FoVsLI9VOnrXu+0Zbhs59D/vPN2Xj+PjY974xjdeWP6VX/mV/IW/8Bf45m/+5gfQqj88vvVbv5Vv/MZvfGgMLtwjp7v5PXk73TpHqBjRy30gEpf5NaN1z8P1u7yQ6dx1XSE93Za1Nc45rG2C191YptNzimLAZHLAaHTAo4++jkSP9JOQDwYDsiy70NH0O5g7e2f+wqdgVMNkDw1oF+RiCRpBrMOtagyCisqGdoaaCGU5wDlP0zjybMC1q4/iGs/R0RH1YkU1X7A4m7JYLDg4OGAwGLXBPS+qVSX4nkGHpEKQGAD1bXtB1jzb/l/rEW+REm5iG3++M7SfOz7+8Y/zFV/xFWvLlFLkec473/lO3v72tz+glr08fMM3fAPveMc77hg0vHbtWnBGIrTW/Pk//+fvR/M+b7ij0b23F0Tav3Z6b9RSKR2NmqgYPOuYXs+6nCgtAy4Y6u48XbuCjEkjTvC2YblcUjerjgJAyPMMbxuaqubk5ITxeI/Geuq65uDgShh2+563KbQBtbSs37C0Db12B9agH6DqAlgp2JjoAgf4aAyTwfc4RCtMnnUz8SJBKiIURQlAVVXk2YCD/SuAYrlcIpE6mE0XTKdTTk9OUEpR5GV3v3z/uSTj5xEVnk+n84oZ4SSsc3iMMReChnfqdFJnLEigLLYSDju8ElitVluXiwjvec97tnK/X0j46q/+at797ndfamPe8IY3oJTipZdeurDu1e4Vf07VgNfkW6QMW325FyCu8467FFxBFRF5Q7vmNfVVqtuRPGoRoa5Dnt/xsCTXZq3kTrVqqK3jpZdeYjGbU68qnnjsMZx1VFWDNqHkkDE5iGZ6Pse6IJPa5pH3edt2Gm/0YnH9BD++/S/Er6TlTVHB+wRwddPqc20giqMhDRUX8ixHibBaVRTFkP39A3KT4SsbZGN5AUqDVsyWC07PppyfnzMZjzGiWm4WAaVV/FtXJ/SNqtY60CcCosBiQYNX8UJgfaYgtNnP+uqHoJhIgsGLVMJl9MMO946nn356q5oh4bu+67t485vffB9b9PJwJ4furW99K08//fRr9vdxx0DaNoSJrGzYRRd92GAww8vXvVidsVr3VvvrEw/q6UrvtIG4LUZY6F50Ywy1DRH6YZkzHJYMBiOWyxWrxYLVasXt27fRRnPz0Ud54onX8dxznwrJfAiKgDzPmS9mjMd7TKdn+C3FH9sh+JrXHVsjXRKgtL7PgUpYEKb+xnwLxpi219ZaUxQFy9kcH9uktGK1WlFkBcOiBA+LxYJhMWQ4HFIMBtQ2nOP8/BytNSenDZO9g/W2927hndQiIfl790ySt9+WQ+p5/Gl/R9BaK9a57nR2zcXsZO2z31ENnxOcc2EElG+fZPIn/sSf4Gd/9md55pln7m/D7oI3v/nNfOADH7iwvK7rtdmUCcvlkrIs71fzPu+4Z6ObDKEVwPeLeyfZEVGETxj2xyq9gUZYP9Zm7oD4gTTsbQVnvtu+X1o9bJEqEQPi0VlGluUMRmNEC06ELDN4mwdPTylu3z5GG8ONGzeom4bnnvsUTVXR1J7BaMheNuD8/KylNtYMV/yslEKLWu9MeiEpiZ2Ai4G//nRc50NAS5S02rG+AfPOtZIZpTXL5ZIsKxgNJ3jnaZYVe3t77I33KMqC0XhMXg5YrSq01kynU24fHbO3f4iXDFE1OId1DTqpKDYke/3rDF5xSJITDGrgoVvOLVIp7TXFy0/pKPverpKkbfFtJ5R0vYnS0a9RT+ZzQZIv3gs+9rGP8a53vYtf+qVfYjzePqX61TKLaz6f893f/d38yq/8yoV1h4eHNE3zqqcVEu5odPsvY8qfEMxd35pKV+q7tUFJ+nXxeNsM7prhbQ0eLQ0R2NBezodoA6wH6z3GKJQSxqMhog06y8AL1XIRDYSjrmu01pydnrK3t8fVq9c4OT0l04qz82POz09QYsjzjLIYMp2exWsH70OiHqMNmcnQOkPHGTNN0wSv3Id0hYIjL4chgOUb6moVPEXnaZpodOLkiK50fRjar6qqnR1XNTUmz8izInZinjzLGZQDlBbG4wnD0YhlVUfjbEAszjfgmyA/swrnLFpp2hhn75m0QURnwfuolOh4eetsoE+SzQ2MRbC9Lnj0XgI9lOiTwFP7UBU6nqfLmEY7TKqdbbOf7dDhB3/wB5lMJve8/a//+q/zPd/zPfytv/W3GI1G7O3tfR5b9/nB2dkZP/ADP7DV4L4WcU863c740jqgSUCfNJ/9aD1ig/8rNm25VY1w0QDLmjA/LIdkKbqaYC3JAeIQ0RRZzmpVMy5K6qpCTMZ4b8J8fo7UHu+knep7+/YRjz/5JDdu3ORTz3yCyeQQvGE4HOB8GEo3jcVhqZZVlH8Jg3JEkQ8YjMcU5QjnLPPFLAynfQjQaZMxmeyT5Tm4htOTY6qqolrOEWXwTc3KNeE+Rf2tMYbJZMLZ2VlrgJumIc9KclPgmsTh5m2UGqBa1dTRA5jOzgJH7Dyz+Sxw6S5U6SCOOsJoZX0GXFI5hOcaMp+p3v2WnrKh/S1oAeUjoetbDje4tkERoZJs8DL5GKD8zui+EvjlX/5lHnvsMb7v+76PD3/4wxweHj7oJr0s/PAP/zA///M//6Cbcd9wV3phnQME8K2H4r0PEXdPKw9LovrwsiragjXeX5CGXPB8e+fsG/pWRJBkS3HrtG+WZTRNw/7+hKZpGAwGeOeorcOjAUtZ5CijyYswweD49ik3bj5GtVzyqU8+w3g05vx8imjFYDBguLeH8jC1p7gMstywv3fIZP+QycEhh4dXMCanrmu8d3z608+S5zmPPHKdshwwKEvOTm5zunfMYjHj+PSY6ekJdbMkb8IEklAWPQyZqqpq8wGfn5+TmUCXOOsQL5g8J8uyVlGglDAoB2TeM51NEXQwlt6xWiyjF90vea/wPmio+4Es530I7HkfPHWRlqGHmA9iy7NKKoj05FQMKHpCsM0733ryrRa7p93ORF8wxDt8bvjpn/5pBoMBP/ZjP8a1a9cedHPuCZ/97Gc5Ojp60M24r7hneiEgVNldl3bZNQlVXBGG3G0wLXCUm9jUbrpoYbdJKtJLn0T5zjnEd7ypMYaqqiiHgzhRQjBKkeUZy6ZitlqiaxWm3CqFtQ1NY3n0scdZLmcobRjujSmKEIRTRuFWFS+Ipq4bRqMRBweHjEZ7XLv5OIdXHmE8nlCWJYLixo0nKMqCvb39oBOua8qiYDze4+TkNuVoyPOuoao0y9USRQgcIMJwOKauK7IsZzabBuOq85BYvPEMyoKiKDBat4bXZBmiFbnW7Ok96rpmcX4K1tNUdTBvzgYJGBINbpfPN95VEg9NUmKQ+OewhYJ25lrf8K6NgFzqEONz7dVVcxdSWHbPfmd0X3n81E/9FMYYfvRHf5QbN2486ObcEc899xw/8iM/ws/93M896KbcV9yVXui8U48oh6JLGhP+D5ypYNphf/BoQgrGpHfoi++3BtLoybA6qx7Xb7yc0s1vSwoA59yaZzUYDMi0wWMpyzBFdj49p66rYChFsVwuGE8mPP66pzh66ZirV6+xWq04ODggy3OqxYp8MOL8/ITBYMhwOKbIB4zHEybjPQ6vXKEoSwbDMTcefRznLM47quWK+WyKaE0+mJKVA/ZWB9Sriun0jGFTo3Ccz87IswGZyZjPZpF/NWidoSTDNZ5MK7RWGKPIUu4DFatrGA2isXbFaFAwLUpsczt6wqF6hm3qnuF0Pa833EpFd8OTnrflY9c4dokZyqK/muQY8Zk6l0YotFx8okrWotFK2sTuu4kSnx/85E/+JMYYfuiHfoibN28+6OZcwGc+8xl+67d+i1/4hV/gZ37mZx50c+47XoZkrBO7r3uovXBJEt73g2obHO02g7uNeticHNF5UiGQp0SD7zwv53yI/HuP1jrkMlBdghZjDKPRKMqfAo/q8WRZwcDvceVQU61qrhw+QlEUOCXkhXB1MOTmzUdZVSu8BaUC9zscDhnv7TMYjiiKYQg8+Rrb1FT5iqIsGa8WHIkizwyLRclTT3lOp2fU1QrlPUe3XyTLcpbLJUYbTk6OKYohxmRUdUNmFIOyJM9jKaIipxwOKIshg8GQcjjEe2E5mzE9n7Yefr2q0Mq0ErBOxqdocx1D6OFiXuO+56lInq6PmTh9b/+k2ZX2z7VaMryut5oAACAASURBVNpCoGmSSj8/b38245audIdXED/xEz8BXCxs+YWAj3zkI/yNv/E3HnQzHhhepk53u2fivceLxBy7QnxtQxCH7kXdpntdeyGh86ToAj89UcTasNZFQ+xjhC8N10UJRVHQNDXg2ymt2hjyvMB5oalDkG+1WkSFwJI8N1RVEwzr/gRvLM5CUeYolZM6kGpVoZSmLIeMRhOMCTkWrFM0OvCteV7Q5BnT6RRjNIIwzXKuHF4FPN5ahsMh1jYsFktuvfhCrHyhaRqLEk2uNEVZkJlQB64oyvA3GCAmQ0STZYbxeMT09ATfNJgYhNMmZFSzPiZ9T+ZXFCLpvpGiohe4V9UTqLSSth4XnCqBpDJNfQ1v2/ludtK04oWWK97h84dkeHf4wsK9B9JwrR5zEyKC4PBeBx5XHD5OVRLpEmpvcoL9/dvj9I6fZlMhkJJ4h+VhWcgvm5JvWxrb4CrPaDSKCgBLlukYaAvGR5mQDMeYjExnrBYLqtWS+XzOYDDEORey9ltH0zQopRE0V6480moonbOIBE2lyTRGhykAdeMQspgUR2i0YjgcsVwuyJucoixwTYNtLDrPycwBq9WKwWBEtVqRmQznLdPzaTtLz7sQuNRKYWLBTZ2FzkNiACsrSrKybNUGdV3T2CZ0aj5JthItEDkcggQwOrSBCFI65IWII5Ww3rWdWheUSxMjaJdvBkEVcqFTDdx/6mB3fu4ODyfuKZAGIHgkJBtc53R9KgnTnz8WvwkgNr7EqvWk2nIyGy/x1jnJF3i/NHstFq+0DkQFraj3DMuS4XCIUopyNMb7UC1C62Ac6sbiddDXLmZzGteQFxlVXQHCaDSiqivqxbxNaWh0xnhyncFgQFEUIb9BUcTcEik5jG0H3FppdKbRWnFweMh8asKMt/mM1WLO6eyYsiwx0Rs9OztjPNmjyApOz08wWuOqGpMZjDFkWRGMbCuYDXRJnpd458lMhifwp4hHK4Otatoe0vfyJnQuaTCCPS+V9vl1PG9nONcNb/s5mk/Ve5bpdxEfVwisqXVvecfn7vCw4mVxurDOw6aZYsHI0pndfi0uOqlZwmYE/LIodj+Ithl4kRjISVI2EaEsy9bLTcdtGguEwFFVrfCuofEe11TMZwJKMRgOKQdDRsMRTdMwHI/whOmHx8fHTMYHoA3DyR5FnDJclCVaG3Ss82bxWGdxLuQ50FpjlMZP9tBKka+WzOdXOFeK2fk5i9WKq6PDmJayoSxKlIdimbNUGm8MOuZLMHmOzgpUnqEyg1YKrRSZyVBKsVx6hsNh4LRFyDKNm63fr00uPag+Ek3QGchNvlXFAJzEUU7H7QbiyCad9qYNjTyCdS7mn4gVhKUfHdhhh4cPd1YvREpBttjEFLxuQy8ioDoez8UXNfCCsmas4RKqQYIg7YIN3mIwiLOeUmcwHI7IssChZlmG1prcZFTVKlRn8B7b1HjXxKrDYX+T5eiqYjAc4X1I7zifzbl9dMRiteD4+ITr1495vXwxo+GQohyEKg+ZaWVwoeNJ7fQoUW3y8LwsEBV44eT8f+r3/wAxqq3ge3BwwHw+wzqLUYoyL9AimMy0eSGKsiAvSwbDEYNyFBQOSpGXoYBhXhSoOKzP8gKthabqUTQ9zrXzWtdvdOo4+x1cMLZhhJN4+bRNCJpd5OnTwdrAnI7J0tP0aXV5R7vDDq913IVeYM3gXdBqQnRs4/x63/N45GKik01czF+ggH6tsuBFJ8MN0JZr855QKVi1OWtnsxl1XbeTDIqiwChN08REGrYJNEDsDAInG4bwq+UK29hQfWE4ZLVacHZ2zmq15IUXPstqtcRZG4f7Iedua7y9i7QL0TClGxMmKBhjyLRGRHF+eoZoQ7Wc8eKLS/b39wHajiHLMrI8awNY4XzhnGHSxSgmNBfqpkGsoRwUFHmBaM2gKINMy1sk5qfwWLxPaTahJXI3YK3rpgYnHt15lNJY17S/ibaCRPsjuJwuUPEYfYWK9z5UFd0Z3h0eQtyVXmjlPxEXJV8+Rr+3vcaXv1Trnk7wsbzfLA65Tk20Q1w6TaiPErH5fN4K8YuioCzL4N1aGwtlhr8m8rTO+ZBDIebUFQlD6boOuQy89xilmE1POT8dslwuaJoqtsV1bY6ebbCxXSIY7z3WN0BonwJwnizPGY6GjIYZR0dHbZIYrRTlYMC0qdBGx04i3A9RoTiloFBKk+V5lMxBtViiyoKiHATpl3OYmOKSjXvXjjaEdkTRVyS0HHuSF0TJmLW2ra3Wzjq8RGd7YQST6OEUQEvPfSOT1A47PCy4s9FNcZgNQ5tyLjjvQlYwiRHy3pA0BVs2OdlN1UK7LCoghMAhxi26pvQ8q35+hm6mmm+9UGMMTdPgGhsNb6gmkcUkNa1HmWVM9vYwxnB6ek5dN4gIeZ4zGIw4Pj4OXq3yHB/fxjlHY2usy0KCGRVNsPftLKxk6DUgOqgYtNKI72bT5XmOrS17e3thW63xIpRFSdNUVFXVG00I3oVJIEWWtWqNLMsQQtrF2WyOT9pjCZ2OFoWVwGenuNZaNeOoONg6zPdhllq7JvWoQf6AUtIme+97sJuB0bTMpfzH7VMN599RDDs8jLgLp9txpl66BCWJJ0yaz5C0ryV46TxBANnqEXUnifFv72IUPB0kBOkuvphRH+HX0z2m3K/GGKrliizLo/cqiGic89imamerFUWOcw3n56dM9iYYozg6eok8z2mahvF4zGw2Z75YMalCPoemscGYe4+1gdqwtsHkeVv40dtwHTWQiwTDqxTiPVqrqMVVVNaGjGjasJxNEYKiIjM52oTlokJuC6UUTVNTVzWiF1gci8U8SMhMxmIxI8syhuMh6kgzGAxwvbykIgpliPkQImee+FaVZqF1nqfDh4TnPnzulA/d/e8b2vSbSLx2/4mtTwPuio/6nae7w0OKe1IvbOow07LEY4oXUtbYTk3gEdGsUQMb+yfqQqSb698/50V5kZCMcTD4wejhgu4UhNlsxrAcMJ2e41wwloiDxqJ18IT7xSDruuLWCy9S5Dmj0QDvhdVqFbndEdPpOWI0y+WK5XLJuLF464Km1YZAo7M2Jl+2eO+wFuJMkThUdygx6AyGwwFXr13jk8+cc3hln9OTc6ytuH10grWWyWRCkZWooWe5WGJt4JmNMczmM2xQzqKUpnKOl46OuLJ/wOnpKYE0COcONi0mIbKentqsNwDZ7m22/LmkjLrh2cZ5EGvPtE819HzzNU82bRNonZ2x3eHhxp2NrgjWe7QHL4K+ROIVPNRuyNilXoymeC1yflH+1Z7uDk1JnlIqnJhoiVDnzFLXlrqeI8BqMQ9BMpPhveBtSBBusiArS9xtqgZc1w3Hs1k7+aGqLaPxmKLI8ALz6awd9jdNQ71agfeYLKPI80gpNDiXcusKeEvjLdqB0QLicB4GoxFXr17n5PSE1XKJ1prj4+OgpDAZjlDbzdVV7CCkzbGrdZro0XB2dpsiLxiVOS985tNAqCqRZVm8dkXTxKfTC3aFjG8u8r/rfO9Fvl51kyV6zy15r9ueaddRbv6U1o+/LQnODjs8DLizeoF+8MpfCHq3Ufr2Re4nw4FgdHs1Ji4RxrceUfjSvrCb00+1VpFLDIZWKUfTOHTmwDc0ddOmTAwpHU2gIBQQPVitNYeHhygVSuG8dOsWRhtms1nYN9MMJxNOTm+3eQO01jhrWS5n1PWKxSIl+vZhCjDgvcV5C15QOtyPxtZowElSEQRdcDkcMh6PeeH8jOVsxt7eHp9+7jNMJhPq5ZKsCDIwkxnqqibLiAbfsaprTJGD9UxPzxgMCsqiZLlYhDSTAlVVt5yrSKxUwaZBvVxxkAzrts5xLeC24em2x/R+nT/uQUuYmtzPybDDDnfC2dnZa6ZqBNzF6Ha1wDyqo3dbiO9iLH2eTqluwgLQ5t9NBR/bPzpT0Kcw0tLNFzPwqF2TRTkQCy4EwJQH5xXeCXjBGME1FaI1ZVmGAFZjuf3SUZjqSyjhbm2NKE9VraCB+XwRZpxlBu8anK05Pz/lfHrK3jQkKK9sjTEanMXiqZqapmkQZRCBzCvEgvUNWmlAxwThgdsdDkJ5lcbWVIslN649QlVVaC0sqwXlcNAGoJomXJ/JMpytcZWnKHKMgunZKWVZslwt0EpoqvpCpdg+n9rRP73n2dIG/e3DM1FxJp9zvqs+0dsueb3JUCfHeqtBjZ23IK2Oe4cd7oZUwuq1gruoF7rKAheGnj0ZEKgwpI4SoU72EAyw6w1Rw2FbvqE7XvgU/9zF4aoKM5q0cyGIp0JwTCsTttEh1aHULmYZaxAFRhtKrcMQXQSdZXijWa5CwcrFYt7KzZIUrSyHraqgHA0REY6Pj3n8ySeoVitm52c4P0DjMVkIlNWNpbGWDIXyghePowGfhfSKSsfAYND6WldTrxYoFxQW8/k8Js4JuYG9c2HmmTaIDvdotVyGoFpVc3x0G2stTV2xWCzaahODwYDz6fmapwusGbmUXzc9o34HmZCG/955HF1Zof7zC/RON5JJ/HzXk/Z/Sr5VS4hWmJgFbYcdHja8rMoR/WXhBbLRburWOAdVQ6IEoqerYm6EO/CAbWCu85tbY5CCNG0Z8+BKQfQag/GXYKDwbdIakynyPEMRDMhisaBaVdRNRVWtWK1WgaOta6pqRWODBG6xWDAaTdBaMznYb6mHs5NTRoNhCMwpj0GHmWAmSMJwLsi0fEj2rkQQLM556nqFiNA0DcvljJPbR1SLJdOzM6pVBYCtm0hFWPSgJC8KRAnNKnjRTd1gmwZlQl6Hpg4cc1mW2BjMGwwHnLz4/Np9FaGbDSYpwXwIRCZdrnc+lGfvnjJJapb27f8eLvuddMHOzvauyf020kjusMPLRROCFa9a3H1yxGVDQAG86uk3O2fW+w0DLXLBaN/1+Fw0zkgo9e1VYphD8Kl23fTULFPtcETixASjM+bTGYvFHPFQNzWrahmM7aqiaSwpkY9tHJVd4X1MflNV7O8fYK1lOp0ym81Cpi+tKUwRyuxIHjhtYhYtolrAhXG8dUFqhoQKv6vlAm9rXrr1AtPzM4zSIUeBCGU5YDQaBXmYWlIOBigfjHXwxj3L+YIsNwyKck3ytVwu2dvf5/z8rONaew+nu9UXZXiiet825GT3iu55sfa7SMfqa4/759lhh3uFtZa/+lf/6oNuxueEe/Z0N6PNQaq7EWRJVYGJ003FtcfYnBTRHbMf2IkHpm8g1r1tC2gxEMuZN01DlgcKoCiLYBCV5sknn0Ssp24aXnjx+eDV1g11HarzLpdL6qYBhMa6mHsXRELwzTnHdDplNNlnNag5PT3l6tUrzGbniHi0JuZEyNFxSqsoE/hbJSAaLQbrG6z1NFEjPJtOOTs95vnnPs3TH/84RycvUZqSoijY29sjL+Y0Tc14PMZWlsY0SJGTS9DMLqZV9JgtUztnENUYTdNQmIxVs6RarQIfrgUn6Vl1vG0oxx4mXXjo3esu9eLmve8/tzXZ4LZthdbgb1NGbB53hx0eJtyzp9sPaq2/RL7lQlN6vzTvUy5s28F739ay9ykrWesirZ+/O28o/WJt8myDLM05S56HKL7JMw729rGpikKzas9f1zWLxYKm1dXGXLHeonXoAJyzQQMby6HP5ueYPOPKlVCI0lqPcyFgZ63DE+quiWjEebQ25CbHGBMCh06hVDhPVS2pqiUvfuZ5Pvav/xXz+RwasBK0uGenZxRlEWaf5QV5lgeqRBmyQQYuSN/qZRWUESpUyMjzUCBTFznT+SKpdcEF+++9D1Lm3hNr2d0eCRsCY2lCyeUa3hQ06ysUttEPl00XvkwyuMMODwPuObXjRU43vKxpwLiWzIS+Z9UXzq8frztmMNKdqYaUTnD9Bd3wumPC9KpeURRlMFB5mImWKAZrM0ajEavVinPvcN7jbE8O5UOSdYWgjKEcDBHRLOdzqqZiMV0wHATPMctyynJAXgzwXuGcDdF6FSZd6DgV1xiDEIyXEo3zhlrVNLbm7KUT/s1v/RbHR7d48cUXoQGlBS2GmzdvIt6zmM/ZG49BwnAqTCzQmDzD1Bm2sWQ6TFfWCMvFIpRxP9jnE596Zi1AJa0rGyY3hOBXymnRoVVVq85Yrj/3jqRNx2jLrqfnzeX6283A6A47PKy4axLzy/lX30WqYzXJGP6iSw8Yt2w1SuvoDGp65RMR6DfWbxvShimtzjusC7rUkAbR4Jqa89USrQ1pJlSe54xGY5qqCQUWncfZJpalCcm8lXiuX7vJm77ibfyzf/qPcbOaql5RLRd462gay3iyx2AwCvkNVIESTaYzRoNeHl8AF6rviobGrxDRVIuKp//9b/Px3/53nN2+hRGPGEUxGlGYkulsisk0w+GQxWLOYDQiUyH5TZEX6HKAOEeuDDZOZ26qGmsdmQiTyR71crmuNHChlFJIl7PF4EXuoVWQJeHJRpCzT9J6XLtPou994BQuGNWdod1hh3XctRrwpetapYH0hqqsvajpFd0mO0toDXmbk7Zdc8FOp5lU6/sK9arGHJq2ICW1xeHJMtp8tMPhKMifaotZGqpqTlM5vAPlPVoLXgTb1IjkfNEXfRH/4Xc+Rh2n4VZVw3AwZjTaYzQaYoxhMBi1tciUVuQxibiNEjmlYvYxo9FKOD854aP/6l9gqwWTsmRZr8iKIY89+Qb2Dg45Oz/hpRdfYCyhC5rP5xQpCbgDrwWUohiWOOcZjkbMzqeUgwGiFY1taKp1jS6qX9EjzUjznVSsnefho+46lT+66Jl6H6t1sN4hJqmCuItql0RL7eiEHXYIuCdOd6ux7EVgHB132E1+6PZPSF5n/9jd8fp8cThqCrJ1Xlf/xe00p2l6rHOOs7MZ2kFeligVKIHhcIQxeUzB6Bk2Q05PYOVD+kTnHXmWcb6qqVczTm59ihtX9nlaGWpbU9c1q1WYYlsWA4aDEVmWk5c5RZ6TaYO44D1rrdDGtO12ziEWpudTPv3JTzI7O2acK7wxFJmnyQr+yNd/I14ZTK741x/5CIvZCVpnLBaL9p4558CEXBaZ0WRZjvMekxlsYzFac3J2GjKqsT4jzPdv2WaQi76BTduvF5Vcn20Wtm/pJNfpfdd+K9Gwr42Ndh7vDjvcY8Kbbcukk4slD7XPvfZ1mxe52XazbtR6wbinDGZ3kpWFk6eaaNY2VFUduFXvKcoy5M/VQU+7p4TMaM7Pz1jMc5QtsHUFKFRmUHWNVo7jFz/BfFEzyDNWvRSGw1Ewtt5DtVoh3uJ1RsMSqS3e1Jg8IysLdJ6FYb0IjWjOjo/5g6d/j8nAoAcl1WKOKsZ8/IVztFqgzIim8bzlLV/Ov/u3/y+TyYTZdIbSmsY2WOfIKfDOUq88g8KhPGR5xsxOEYHlcsGFaWO959cnbzY/x6cWOgkVRiYhdadqd+6eo2o5YZHI7abfw4VzdyXaL0gAd9jhIcS9T45IsqMeTysinUxMOrlR8praQNolVQIEFYb3uudJtf9fNLj943fStFBufbVaUjeCs2Byg9ImnFdC4vLMGLI8x5gwO20+n9L4GjKFszUozajIGGRCoTU+c8wMlHlBUZQopRlN9lFasVrOmZ0cU2qDH88ZDAdkRlNkhmI4YLi3j9qboLMs5j4TjAi3X3qR/cmAQgpWuSYfFfz+7QWnJ59hUpZUVmOKfcbjMaPRGFBYV7dD/uVyQV1XbdVeay0TMybLDfhQ70xrHfJK9p6fUqHkkov8baI9EovuW7o2VABxKbApgQVWnjgNPOwf7LpvnwmOXhazmI+BuI3IBWO843l3eJjxMgpTdpD4zzp/F+RGmzpPuPiCrU9HTTPPoOc+pbNseEXJsDu0zsArJCbCqOoV0oDRZSydrlgul23mMGstw8GAvCwpywHaaE5eeJZmcYqtFYhmOCgxcYpqkQllUaDLkslkzJd+2RsZjvawzjI9Pebs+ecYKE12sMRPRuQi+MwgkwnGeUyeoUxINI6EROZaG/aGY8aZYrEqAM8je2Pq+RlNPUVnBcvGMigzVqsV4/GEs+lp9BTjvXbgmoYmzM+L2dI0QlBsOH9RfZC802BXU+8Zs6G1gbBI8ShZf7bpOfRyF7e8b9JUt8fdeNay3pluft5hh4cR96bT3RS2p3XQk4pxkYeQ7RrN3pdWb5tswfpZNlUM4SRKpbIyBE/WBa2uKMWyDtzr+fl5SHCTyqjHqhFZkTMcjxlP9tifTDj+7B+wmJ7gnWcwHFA1DotCqYwbjx1QTq5z9epVvuEbv4XhcMT8/IijF19g8cJnaZRiaB26OcAbgyoKlPJoo8lGJVIOMDoE+B65foOvfNvXcPLp32VgagaTEbePb/PFr3+MUWEojEFlGfOzU1y1Yqky9q8csqcPWC4XWGtDWkedhRwMWsUUlZbRYECe55zMpyiTo9RyXTubomY9uF4n1w+Eeu9RPtTw8HiUC9t6icnafAqg9oOg6499LZC6xavdebk7PMy4d3ph20qJ8iEvLQ+YBq1pWu7lB477kpQNvZNEDW7r37ZcIG3J8KTTVUpRNzV11TAcDoNkzLk28bdtGoZ7+4gIeV5gHXgvFIMhV770OtdvXOezzz3Nye0jSpMzzkrGV66yd3AFM5hw7dHXcTDZ59qjj1ItFqwWc05uvYg9P2exXGBnC576oi+DPEd7mJ3eZnF+hhkPyPYO8UqjRBgfHvIf/bF38plnnuCZf/cRiix0FiMUpQk0iBJHFvNWaK2Dh5zl2MUS0QZEMRiO0Epjck1VrYIUTGmKosRkGcT0miH4FmvO+ahakFgo0vc6Th8pIxcmUKQxRm+sEffpRh7er+fR2Ay2rXXUbr3672Wz3XbY4WHBvdELWzg50pDXw1o0jc5Ib76M6SXrJ8EOBjS94j3PbE2Ols7bpzQImca8p2ksINR1g/dhmG2txVuLZDlZljMajSjKgtFoEmavDQoODw7Z/6Iv5c1veRvz5TmL1RLxiiuPXKMcjSjKMXkxQIvBSYNgmZ6c0MznNKsly+PbVKfnXL/2KNl4zO3pKfPbL/DoU09Rn53DtRpfFoho8rzk2o3HGY/HPHrzJs/83m9TO09TVeQKlNFUjUUBe4cTDq7fpCiGzJcrJnsT6qqiqWuqmLYxJD535Canbmo8YLQmz3OqpQq165A2ruYkPbck9OueUXrGPnq0kmR+nl4gLVQCCbXTthvQrYHS3m+mb3h3NMMODyvuyegqrfDWhXpZrf8TXs42au18q0JYe5m56NGszcdv5WUbutw4/E2ie5FQOcHaFJ0Pky9S3bKyGIaEL3t7VFXFaDQKHnQ8nzGGQTmkLAcURcn+wT7Xb95kvL/PaDBA64zGNiHnrTEYVYAiTLxoLE1jKQclTzz1RVCveL5pOHrueQ5yx2J6zsAoRGluT5cU84provEq3BNRGqNDztmyKBnvHTK5eoM3zt5OvVrQVCtW9YL5qqauwJoM50OF4/PTE24f3aKOmciqumYyHlM3dbwflrqqWVUrMpOTMrz5NXc2BMICLetaNUGgalLX5qMhjkXvU2XjXkrI5DnfqwHtqyb6v4EkgdsZ3h0eRtyT0b2Mk/MoZG1V8o78NrFCt9XWAEsvUEPnL3eMg49l03XMmRCc7boOofq8LKibBussJgvVgCf7e4zH47YsezqYdRZtDOVgzGAwIS9zRAmlGgA2FpJUrOoKowzWNmilUHnB+MoV3vTVX8vrvvj1vP5L38CnPvYx8iwH5xgdPMIf+bb3Mr5xneHVR7BKo330HHWgGbCewhiMuUE9WYVZdQ6ctdTW4mzwJJfLihdf/Cyr5QJjcpx3MY1jEWaolQV1XeGcxOrBrs2pq7WmqsP9S/mME9mTgpaO+L8I4rqJKW3du56+t9Pthk7E+X49kM2Aau8pyvr6fo20l5vBbIcdXiu4o9FNHJ+nrbPY8o1tIKs/hCRtE0vTpOTXW469/UXtHat33JSNbC3pdZws4Rxt4ppVtcJkQRpWDEq89yERjFQcHx8z2dtjOBwznkzYOzjAZEJmwMbKDGK6UuFNXVHVVVRJhGvKy5LcWZrGMBqPmYwO2CvGVC8d4QRUWZIfHpLt7yN5FvhcpTBahwkkRoH2YB15OUCbHOtCFjIclIRcoV6gLC1VUzOfL7B1A1hOj09idQrB17CYL8jyDO9D2XeHY1AUVMtpO3rwkQ5opOPZBUG5ECQjcr0QDGlgFVKhUdCKtiMIPwrajJ7tSML7jc6XNR633XWtMvCO093h4cTdPd12FhK9YWZKRtMZ2n7Zlt7OPd1uZ2S36TRbI07nbSX0jy0iiNOIEuraRS/Pc3p6RpZlbbXf2WzGYDBgOp1x8MSVkLvAGIrBgGvXb1AOBmhjWCyWOOejtCwkyamrVWsczs7OKPMC7z2LxZLhoCDPc5TSZFdzlM649Xsfh+kMBgNUkaMyg4u8t2ssDYEbFULnkZmsVVtoUyB10xotnYVH0tQNhwcHLGYzzs9OGU0OMFnBYj5nOp2ijWF/b4+qbjBGh7LzZYEyId+E0RnWNXgLKEWG0HiHSs+DYDyFoOFtjWovoOmhLV7ZseyJcY/5Knx/ZMKFZ72Zw6EvZ9thh4cR96ReSFNGE/qGs5v22wuUtcsvP2Y6TnsOkf77vjEJInm5CpFQzhyfjDE0jce5moODq7EKRKikO5vNyLKMW7dusbe3R1YUKK1DhWNjcNZhvaOum5jNy7JarTDRexQJBmkZq/YWRYGXDJMpDCAG1N4eq2s3mPoXsLmmFqidIycEpZwLmiulVKguQerAFForamsxJmvvR5tukjDT7tq16yzmc7yzONtg85y9vT2a5ZLz8yWr1YqyCFUulBLGexOmZ7dpbN3K7JJRbQf0npBngiD/0tED9pu6vZ5xbANv6bl21Hq3Qf9Z9vftPedt33fY4WHCHYk15Ql835aXJXi7QJw6mmqMhcCYiodWLSu7g6OjewAAIABJREFULenJ5V6PxP3Xq83qqE313uGAVVO3XrAxBV/2ZV8GhHSIdV1zcnLCahVK2hRlyZVHrjEcjSiKEmsDb7pcrtosZGdnU5yDo6MjFosFzjmapmK1WnB2dsL8fIqvK3RUUTS4EO0vc9SwpBFFY0OtNOccdR3yIHjvsXUdaAzvcda2xsqYMHMu3ZtUbt2YkC5yPB5z8+ajjCf7jIYT9iZ7DAcDvAhaZQwGJUpCxVTfWA4PD0OeCaVwLnmvIQ9vv4NUgBFBp/WpQ3CB2lGJt5XuubdtJEgEW1nZBnfbf96bHesueLbDw447Gt2k94TtBtLHwEo7cUFc1O26WDMsBlB6L9qmt7NttlL3cq6/tEm5IEpRNQ3eCbaRWJnB8hu/8f9wfj7D2lBc8uDggKIouPLIIxwcHHJ8+zYnt29zenqKaywiIQgVEuWc4WK6xMduPs4wGyA+lD6fTqcMy5K9vYOQ4yHxqo3DedBG0wQlLEqHmV+2DjxxP1m6c44qlt1x1mLT/Yz3OXm66U9E0EYzGo+5ceMGxoQkOPPZHIByNMSpcNzRZIyIYlCM8JLhrCdTpi1dLRIiYInjBcLz2eRi+16oB/z6cxK6acEiAupiruT+b2WTVtrNTtvhYcedA2l3kPX0Z4iJqDjRIU5gEEeMjwdhfZwDAawZl01hfcK6Ye5oirYMuHg0Cmsrrl69ztHRESKe8XjIN33TN/H8p1/k5qPXODw8oGkcn3r2OT7zmc9y48ZNRuMJAsxmU+o6byP+ySCsVhVnZycs53MaG3lXramcZeUqyjitOBglR7VaMpvNWMxnrOoaeSkPkx4mE5RRrVwtZUGzNg7qYz6KZIy1MXjr2gi/S3I61R2jjMUqp9Mp5XAYSsfPQw24xjuMEhSawaCgmufMl/OoVuiUBMT0ix6itxqeY+Juu/vO2igj5HBYT1SfPm8+v37dtv72/f2MMa/6AoM77PCHwZ2TmPdlRuoyDyXMUPJr6xTJhUrSpGSi+/KhO2o813i/3j7p/6bB+4YXbz0fZWShXtpv/uZv8sQTr+NjH/s37TD9y974Rq5ff5Qnn3ySa9euh0Qu8dz9oXxRFDSNZbVYcHT0IpPJAYOyjMNxoakqFrbBmsApN1VFtVgAYK1jUc2wWlM5hxchL/K22rD3nizLMNrgBBpnMXF6bpLBKfP/s/dmv7ak513/5x2qaq21pzP0cGzHdpx4IJYc+ecQy78fP0CxoiBHSUSUSMgR4gIhbkiEQMJMN1wkv38AKUhcREFEQmK6CAgFhQhiA8kFBCcKJMGQgaS7T3efYU9rrRre4XfxvO9bVWvvM9ju7t19zn6s9tl7r7Vq1apa9dTzfp/v8/2a4hQBiT1iNFVdYaylamqWe/t8YG8PfGAYerquY9hsiH2grhu01iwWK94c+pxO82nKJ1Us4tPJiUwYITyadzvFaafUr8JgmDx392a9e85BqH7Xle6TY7PZXPUuXMdbHI+vdLNyVFKpuvB4GohQ6TnlKclJovyayaGT2J1WK/EIill6ME1ZBYYgko19u0EpmfrSWtP3A1/72tfEKWK1T1UZfv8P/oCTkzOUUqw3G45u3OLo6FDEbYwqQwYPHtzHe0dlLbdv3cFYVWx/UOnm4iMOwTVBKF6b9ZrNZoMLkXq1xLkBNwziHDzR1o0xsqgbrNbEUi2LV1xtLS6E4jOnleCrItMbqaqaG0e36Luek4f38b5nvT4nxsjh0Q2CUgx9z95+I64W1uKCVJKF7kcgRk1I3N1paCgCOE9qcO02y6ane/pZy40twUTTG/N1wr0YTdNc4C8fHR1dNxyfsXg6Pd1Ef9oNY8woYj0b46UsWx9FYbj0okvJeT69lJ83memf0ZKMYLtJ91aabYr3ve9l7t17QBMsq1VFVdVJkUvT9y0PHzp0uvhXe3uF69o0DYvDGzRLQ9f19O02NaQi0XmaZkHXtsmFYmDotmzWZ7h2Sx8iZ6eWgGHRLNHGYGxFjNLc29vboxt6TDCpMWhKknIhFCEfrVWqeOVm1jRNgkE8Vls26y3gWa32UVGx3W5p25abN2+KcM9yRVVVDMmBOKZGmsyuxLHBNjv2+RTs2vSMrIrdplg5lxMI6AJmnzev5o/tJv3rgL/wF/4Ct27duurduI63OZ7KIy2EgGF0gZ1fdKpc1LmjDZBJvJddxLvvUV4iW9t5Rr5Qx82GiZuv1lpUvZLeQoYMTk5OuHPnJTablls3b3Pz5i1WeythFISArSoWTcPe3h7W2uLSsFgs8GFg2Hb4oRf34H6gqWtcP+B6kVKsjKXrO0Ln2Jyec/+NN6CyRKVolgsePLxPVdcsV3ulWWd0hbHJUDINTuRBkzyWodJQSYhRGm3pxlA3NYc3j+iHjg995CMcP3hA10klndXUTk9PuXXrFs1ihVeaqqrou4HgPdkhLZB1kMUGaLcRdhleW850OVezV6Sb4eXfH0gqZcFfoB5ex3U8j/F1eaRdXgUJ53Q0KExVrlJE5VGYOXv+EdtX06w6Yy2A6AnkikoT/agBYa2lri0y/aaLdToIv/bmrVt853d+Gu8DBwcHDN6z2WyKQIx3Dqs1N2/cwCea13a7IabEvj5fE2JIbr8yVmusxrmB6BzrkxOO799jvd0QNxFlLPViwc3FHhFwySI9fyYfI0PXE4wclxCC0Mby59VKlvlSHstxDJGqrlFaY6sK5wbqqiLGCtf3c4YCsL/aQwVN3weRULgkJ2olvGE53GOjTSt9oVolkmCPKUGXkmyn9N5ddoJ8VwBjCpvlWnfhOp7neCxlbDZZNMF1x6Vjvuh2qEcFz1WpUbODAe5UU5mXOzp27T5Hpt9Cea5MRGltSyUu8IArle6tW7eKa+/v/M7vcHJygvMy/JB5uXVdE0Kg6zrcMNC1Lev1Oe1mw+npKev1msVyya1bt1mslgAYq4lhQOFp2zUnx/eprOHo1k3qpmG7WbPdbNi2G9puy8nJcdJDsIBKQu8qcYBdabQR5HP4KMJCWklyz7Qz7xx1VbFoFgQfGJwrWhR5e3Vdp4ad5f0f+AAq8ZpzhCQ5JuyIUFTFUFIBh0togYLohKRWlrm+k+lApcqXaIrl5vOSvw4mjkJI1wn3Yty5c0f0QSbx5S9/+RrP3QmlFB/+8Ieveje+qXgqeOFRdKHLYdkJjhsTkyFq8gU7fV5OFrLtxB/NL9/ZeK6qClUtRoIXrdaqqnAJ0z042BdcdrFgs9ngQ2Bv74C+7/mD3/99Qow0TcO2adg/2Ge12kNrXRLg2dlp8VyrqopmuWRvtQKEprZZt0CgQtG3PeebtUAAVU29DHR9x/rsmOXeir69SWUrtts18hE99WoJRmOURilwrsd7nbjMRm48mU8bIcM3zjliFIF08Xyr8aGnaRYMwyCVeErAVV1xfHzCYrGg3W7RMbE1GFXXLmglKJUq3zhjGex2NadsiFlFHGECLhXthVm1PGGsXCeTeXzf930ft2/fnv3t+7//+0uv4joktNb8+T//5/mn//Sf8rWvfe2qd+cbisdXulAqx5wI5onzIgSRMUBVtnDx4tptyMTokWVrchTLOOKkmsqc3WzDHoEQPbmQq+umMA2UUty9e5e23aRmUsS5pEZWVUQf8Km6tEan6bGe4B2KiLWWGzdusre3x6pZlGpUEvOA7wdOj094cO+eCPsYS9UsWB0dYOqK9each/fvsT07JThxEj4+Pub+g/sE7xJP15BxF+9lYCJ6Pw4sKJko82mYIt/8soNEvVhQZ++2vYM0ZSeVfAzS5FwtVyP9LITZqZje00ryRBV4YV6NSqNM6fy68fljwp3/N23M5fHiXTjpWmnsOr6RsNbyoz/6o3zyk5+86l35huLxE2kxLfoTpkdqlJUJM5USqJLkOm2ujcVq6vw/sbLRKGWThoAuDIUYYkruZUsEYnleBIytODw8Yn//gPV6zenpKXVd0zQNdVXjXI9zQ0nmWimWqxV102CMTZxRjdaGGzdvcnBwQFVVLJcrFNC1W7puS9tvUBr8MLA+P+Phw/vYSjQZ6mbBYrXP4c2bLFYrtuszTo5PODs9ZX22pm23bNbnnJ0c03ctw+BR2qJ0jbGjlm8sCTak/+Is4S4SrWjoe6ytsHXFZrtls92UIY+qFtGfrm8vQkIxD1irgsPL6ZPjGvNSI8G8MeskQ3GyVFEVo9JHxzzBXtYfuK52JT7+8Y9zdHR01bvxro2///f//oXvSl3XfN/3fd8V7dE3F49PuiDjvJMLpEgD5ipH52bapJLJEG+k8Fthjvld8k6zChgkOVpb0dQLmmbJYrlgsVwIfLBcslgusZVlsVjifaDvB5bLZZl6y8MIuZLUWomLxP4exlppnDnHInmM2cpS14KrqXRD6fuOmCbPgg+C/XYdp6fH2MqwOthn7/CA1f4ey9Ue+/uHrPb2QUG72bA+PxNrdIT+1rUt52cndN16nHhLFXokluXkeIwmuHqUxX1VVdS1DHJYW7NYLgq1DEAlzDpj3Pn1+YRnmCGfIznPFFqgrCLiCBnkRpsA67OKuVSxOYFPb7yXQES7Nj/XAZ/+9KevqWKPiS996UvPFMXwySpj6d8Lo7qzzCrODuNjXjrwCJZbKuLJtqbYYUkoQSCDkiiyEEzil8YQJ0vSSNMsqJuaoW9pWxH4zhXucrlk/2CfqlokuUdJAgcHosi13qyJMXL71i2qSswe9/f3McZgrfiLnZ1tcL0rUo4KjR8GNpsN3TCwf3TEav+Aqm5QMWKNYZsgDdd3tNs1wR0WyCLGKEk8VesyPbYAbFk9KCgVK4wQjDGGxWJB37Y0TcPe3grnBvpty2LRcOfOHdrNFuccwTMmYDVhnMgan+yXlokL8r6T86vK/5UVh8pPniZKNXndDLoYmSdaKUI6b3Gy4plNtT3H8alPfYo7d+5c+Pvf/tt/my5ZM13HsxVPGAO+GGFe5qBEN5A4e0H2GkglkB7ZDPKyixqrOWnvNtucc0TnSiIwxuCCyDnuH+wRkl7B0dE+bhDr9eXeihtHR6z29qhsTdMsODg45PDwBoMb6AexvhFFLst2vUlQgzSinHOs1+ciEWmqcoPwzrHdbDk7O8NUNYvVPkpXVHWNTrxYU1uMKH/z8PiEoe8Yhp623bJa7UnOioF2u8YaK44VjcZakxJgSoxJ9MZ7h3MCNehUFW+Pj9lsNtS1UMaccxDTsfEOnxJkZhmMzZg4OXvFmOfCeZYEnel6U/A37V/qZRYq2WS7F2GE9JwQymhcTv5cNqTxnMVHPvIRbt68eeHv//Af/sNrbYpnNJ7YybiMtZDj0boJplRACj0Csikua75dtm2llNCcohiCayUc1hBkid11LTFG9vaEsdDUDdYahqGHVEkuVws+8C0f4ODgkM3mPNmZO46ODqmqivPzc0IMLJcN3vdst2tOT44FMzWGuqkSRCKTXH3XsdluqRYLbNOglRDjjBGcxdYLlvsH3HjpRfb2V7ihT+7ErXyuJFy+aBYYrRi6Fu96nPf4CD5VoKEkXU/fb1MjUPQbDg8FwmiaBQeHhyyXe/TdwMnpGT6ANkZGLHxIgxFpm1MYgBGzR4m2bq6GVSRp8O7QBseTUxJopguq3ceUQgzYBHfP/YC8ctm5S1/HJP7iX/yLnJ2dXfVuvKvih3/4h696F96yeKK0I4zKYJc9Nv39UXjerprC9DEZd00No0lmzs0klTMAuYEn5o7OOdq2S/snHfyDw0PquoLoabsWpRTvf//7iTHQdcKdhcjLL9/h6OgG5+fneO/Fqt172rZls1kXXDj/672InA9uYNO2MlFWWdFV0Iqu7TDa0DQ1xmgWqxV7+wfcun07qYu5lECFAaGN6Fb0fY9WMKSJNZkSk0ZWTrwg3VprTRHmOTw64sUXX2K5XLLYk4agrWqaxUKgEKtk8i5tS6CRnTwXshElBSvWKSHqyXl/ZOw06C5E+SoolJGhEibfER8DPOfshe/+7u/mYx/72IW//+Iv/mLx/rsOiX/1r/7VVe/CWxaPhRdmfM2dmEEDebmYlp7ja4R/q7BEFWZV0/Q9drbMvAIaW+cyGVbTOwdRYwwJ61zSLJbUVYXWhhADt2/f5KWX38frr9/j6Gifk5Nj9veP+OAHP4T3ntdfv0sIUUTAtWIYOqw1HBwciLYC4EOgrir6vi+6upvNhlVqvBljqOuGbrvFB4fSSyoridcNA6vDQ2KMDH3HMgoG2ywburYv1kLOiRllXdfi4GAMzAYVJGmGgmdL4rJVRbNYsjl+QIgCtaDEnHKxt8/Z2RnOOXwcG5mlaTbhX0dItusQE2ZbVhlpWk7kKCd2SqWpOj1NavaHsVEW0xuktB+i0LaNnmzx+YybN2+yv79/1btxHe9wPHE4Ig8OXBaScM3k592qVri3oxftWBHPx4lHCENcgSeVsoqIE6Khahq8dwzOIzzWQNtuWK8blqt9rK24efMmH/iWD/HGm28Qg+KD3/JBDg72OTy8SV01PHjwgOPjB9L1XyzZbluqSpwWVqs9aV4oTd2I8th51+G9Z7vdcu/ePUlK1hQ5xqAiWM3gHd4PLBciNmO0oWpqlIZ2s8VoK46/XY9RlmgMbugAzWIh2gnGGPwwCC3NDTjXC1PE6MIOEUZFLxWsrYhRs1ztE8j4b8/56Zq27ajrmq0fxnM5vYHq3BOL6IkzZUJzy/nKeLBC3CLyVFveZtZykEXIoxkJIaQJOC3fGJ2mDK/jOp63+LrhhQvLyWmf5QIdLMywv2lHfhz/VZdtakYviyh0Er0OIWCTSAyQYIGeN998k4PDQ1586Q7Hx8fcvHGT7/iO72C5XIoT8P4+bdtydnZCVVXE6PHeFRWzpqmxSc/WOdG07bqOthUfsvV6zWKxYG9/n2bRYK1Nx0ZgBYDBO4ahxzmRnazriqpecXh0E43Y9PR9jzGKthU7oMrYciz6fsAHx9D3DEOLcy1dt6FLiT8Ej1LCTMjH5uDggGZRc/PmLXxUrFb7LFcrPvihDxVZyd3zmQV2okoEwOnfE5TzaDrXfGDFZA2JGC+c493vjbBRMg1Ng3l+K93Pfe5z/PE//scv/P1P/+k/zauvvnoFe/Tei8PDQ/76X//rfPGLX7zqXfm64qmkHY0xM4xpxrGcYHvzJgnEVE4pRfLtImkfWFQRVpkkBa2ZdGvS/+vkG6ZwIRKCLxW2UoGmaagqw2c+8xn+5//8n2y3HX/iT/y/DMPAf/tvv8be3h4vvPACx8cnbDZnBD/gXURpabhZW0HUOOeLPq3WlpDkIrfbLdbaknSzzGJeescYscndF7IWAmkYQ7NYHRBcS99ucX1HZbTgyLUt7IzaGvq+o64bmZSLHq0iVV2hokabKk2lZVsfWC6XVFXF/TfvpXFoODy6ydC3uLDh4YP76QZl8VnXIVetKjXLpAtISPhuhhnUBZiIAjfkhlqMkagV0Xu00rMqN8NSF8TM86CF0sTwfGsw1HU9ajWn+PznP89//I//8ZpK94i4ffs29+/fL79nCuj+/j5/82/+TQB+/dd/nV/4hV+4ql18qnhipZspR7tiJmOMxpRTJ9v5hiIhyCjtcrlXpAhhrGjVTtWVOUmaiMmczpREjLUYK4lnb2+fT/9f38Wrr97l85//PEdHB3zlK1/m9dfvcnZ2ytnZKcPQ0/dS5Q5Dx/n5Gev1GefnpwxDR91YqsoW2ccQHN45hr7l+Fg81YS/azFGIU47kjBlAEHYFJJshWObxWfquqGqa6pFjVKRtt3Qdxva7Vlannu6rsf7gb7v8G5I4vAGzQKl5L5ojKGqa0Kq+nPTbLlcslgsUBhu37rFMAzs76/oupbbt1+UG5X3xCSOUxTJEObCyDMhTZklvDfBBZfxqyFh8bvP1yOumw0256+//IZ9HRJnZ2fXCfcxcXx8fOnflVIsFgsWiwWf/exn+fznP/8O79nXF0+odCXxxcnAwjSy+Iw2THjz82pXlqOZiaBp2xatNS+++KKYLCY7knzhjvyHtFGliVHUyrSJQEBj0Kbhox/7JC+99DIvvvgCIcCv/dqv8b73vY9PfOJjvP7669y4cYMXXniB1157hZPjh1SVTQk+d+zF7XcYWpp6T7ixMRKDJwbH2dmZ+JEtHKvVfoEU5GYUExvBMAw6JVmdml4iEWlSU6yqG5z3NCvF9vyc9XrN/uEhMXqc72lqg/dDSty5yaUIMVnaeNDaopVUSMLLlQO8Wq3o+47gPa+9/jp9L7S3O+/7ACfHD+l7R20rGAa882XgJFeukWnDNJa+V4zZsl2qa59cLfLr883FakNU4DNZW4GKQUaFy3dGp/9Xk2mbp/6OXsd1PHUopfiTf/JP0nUd/+k//aer3p1L4ymSLsnO5hI34BhRE5Km0gpiZjRMnycVUcjirsCDBw8YhoGbN2/SNA1nZ2d0XZvonQq08Eu1FbqTVqJWppTlgx/+CC++9C10g6dtt/zGb/wGL7xwixdffBGtNevNmoODA5bLpShwWRkVlipVF20HpQ2r1Yqzs7MJtYuE93refPNNtNYcHOyxWh2IELnRY0d/UvnLY6ZgmdmIUimpMAX3tcQI/bYleofzPbXWeNdjrU54rynbmVaFIfgkgDNgrRWTzvTY+fk5r736Clorlsslbatouy33H7zOpz/9nbz2yiu8+od/yGpvNdvnaUOTfIyBkNqjeQrNez/Xw0gDKQoKLS2ff7l57pIEL45gPM/Qwnd/93fzPd/zPbO/fe/3fi//5b/8lyvao2cv3s3fryeyF4DR+nynOy2jnI5i9xui4LIEiotY8hUL+LK0FVUxWYKfp8rPWsudO++nqmq22zXr9Tot3zWmshweHfLyy+/D6gUPjx/w6it/xJtvvsnNo1vsH+7hnKNplty8eZvjk3s09ZKDgwPeeOMNtIqsVktkYCI1l5SwAhaLBX0/sFmfs16fAxS2RgiBxWJBCLBNBpSr1aIcl5wcs/JX1sidjjhrbZOFUCM4aYCmNrTrjeDJQVFXFSDJPFPonHOSXGOWvtQJG7elkr5//z4PHz5ku1mzv7+H90PS6zXcu/cG+IHf+K//lT4EPvxt30oIgbuv3SV6uXlIc0slfY08jJIq3JClN3eseZQipGERtBLtjR3sV07y9Hs0jm6DKjzg8JzSFy5LCNewwvMTT3SOyEIxl1W6Y+WqC/1HESYdmSRLFUYPrfnrffq7SZXlGzjnMdqwv3/Ai7dfomkWtEPPyekJv/e/f5eDgyNOTk5BKZbLFWhJeKvVir29PTabDU295PDwkM1mw/7+Pn23nbxXWuoajTaiTWttLInWJ4aB4LF1uVFYa1ksFoXPn6128r/Tjn5+fNrJt7YWB2BT0w0ti8UC53rqusJHR/Qa5RzGVDPsUynSjc1jTSWuEyEweCfSk6sVxw/vS0Mt4eonJw/l3ChYLBfcuXmDu3fv0vc9e6s9bt+8BUTatmUYXJKslCGVupax5+B8YiSEcgPJ1XFO/MI4ywZDcUSV5vIM6d80dKwgKtGeUNo87uv3TEb+vkyj67rrpPsUsbe391TPy9fBuzWekHSnvmjj36cVr1KJoZCaYGXqSeUfdPrX72w7YYj5kk2dea0CSsF2e0rXrRFvryjaCNry5ut3CVFRL5b0/UCzkCGG09NTbt++zfn5hmVSIjs9PWWxWDD0qjS6Smc9fZ6+69PnCeWzWqvLTSbfeAR6cNT1ohyD3cSaT7T3HmNN6dBnyEAZjYoBWx0QfaR3fZobkHTVdRux9lFisImvAMGOYwwE67BVhTGWqlklBwhS5Wo5Pj6m77ecnj5kGDwRhY+BN954E9Q4On337mvjF1NpotIYrVHR49ww+1y7AzIxRoYg0M9M3jFl3Aud2ZhgqNJkE/gBsqvI8xNaa77ru77rgiThF7/4RX75l3/5ivbqvROnp6dPfI73nq985Sv85//8n9+BPfrG4ql4uo/TRki/zH8fn5X+FTrVo9jwBb9M1eiY6LL5pEmuDmds1hs2W3HotbZGF76sLthsVQlGrLWm62RsN/uQDcNAn1gKmX6V9gIgLfFNGpZYJcaCKHzlm820M6+1TmO6trg3gNwIQhpjFnUtRWUbrK3Q1qIrizKGyjYYXSUXCItNXmJ919K25wxDmyQpG+q6YdEsWSxW1HVD0yxQ2rJYrahszWq5hzGW5V7N//25/4eIZnCuMC/ycQJGNobVWKtAebSeYN6XhDFGVMNE6Yaox/MtGmyX9Me0QltN1BMMOURUjDNY4nmIj370o3z/93//Ve/GMxvDMPArv/Ir7/ob2FN7pD1qJFgET8aKUDGvAGUDyZtrh26mVC6JVbIFH1+r9Kg/kOlZbhiEp5te65yj24qc4csvv8zZ2Rl937NaLYlRlUTpvC/L6PL+ijIIkRtFeZ/rumK5XBSpx5yg8n+X32Ak8o1CdHoiLngCGqUtWlusbagr0QduGknCVdWwaPalUagNpqqoq0WBLbSWhF3ZJWDKPpls8a7Enj0PWJyvz/nqr/83nHcYay7ap4PQvZQqjAKVR4wjF9S/iuhRquRNvuHEHQw3ymcukEJK0D76kd1AStAB9HNU6BojI+a7cffuXc7Pz69gj95b8YlPfOKJz/md3/kdfumXfukd2JtvLp6sp1u63RdFxi9PPHFe0D5JGCVfrWhUGu0t1VBpuIRkuSP/CkVNkure3p5Yjfd9quYim806TXcNaKXQCuJ0fBVR4Kpri/MdRlfEqIqFe37WcrksTbIyfZUfzctvY6QZlSAKociNn0vFCEnsxlY6JW3NMAw0zUqaaSoPH2gEWha9BWNy7WgIUY6DweAze0AprFZYDfv7+2Il5D3HDx6y3W546cWXeP3114hBHJAzFzoCQYlGbvkcUY65Tk3G6enK8ICHbBIyDsDEicZxiJPni9GohmKAmc9pVKCsIfpnP+saY7hz5w537tzhB37gBy48/nf+zt/hF3/xF6+vLoLmAAAgAElEQVRgz95b8d//+3+/6l14y+KxSVcSoPw8nUjKUcY+L0ms42OxcDsLRXNadRUIQ0RetJaZfJ27MkoTAlKpescQInXCf21Khi+88IKwFLThxo0jXrv7Ci/cfoG+F3zSpgSWBz26rhOLnv190ZbwgUVTzwTSjTFst9sCTWRmQZ5Gy59D68Rd1RotpR8xkmx3As5FlBpQ1sggRrXAJmdg0nLeeY9O7ITgO2FEKKFgKUySRRwnyvKx01oTgysQR1VXDMOAtRV1mnjzLqAjpUKPId0MJvoKGZ8lJ0Q1PzfyHEVAuLomjiuRGaNFCcavVJKRLEczn3dkVZRf8+5l9XzTobXmwx/+MIeHh/zZP/tnL33O1772NV577bV3eM/ee/E93/M9T6SAbbdb3nzzzXdoj765eILK2IjpZuvw3YsxL1fLdaemiEWufJh15HdjOoU2ymrP8eTgxZpcGaF6yQiuJL/tdpsaYBVd3+H9wHqTZBuNpa4qrJVBhTx1ljFYwY4pVawkVoUxkqSqqpoNRUDmLfvU+Bs/r1IarQx5xDmEfAyFilWlyboQDcZYXBppBuj7NlXUPnm2SeNJG4MKAe+zaaTCe9A6D5xk2yRF3TR0fZ+aYSrdiLTQ9jK0k0evJ+cwU7es0gIBkbnBYYSWEBKg1pqYmnRT7HeK75cR4vG0FmaDQBqzYvmZC6UUn/rUpx6ZbAF+67d+iy996Uvv+pHVd0P8m3/zbx4vMwq8+uqrfPnLX36H9uibi6fSXpAL9uLfS5Wal9RjriRPbeXRUKESTZafaroMlxdKZRzK71oblDEMzuFCxHshJnknPmX7B4cYYzg5OUk2PTXnZ2sqW+OdxzsnoHUlPmmRkHRWzGiAiUAPfd+VBJaHD7Km7kyzQElVrrXADRlakPJ2Qp1TeRIv3TSCx3stFWIwuDQ+ncdzMydXRN8pmHMkpPeUm0yMuZmXWldKqti6rtlsNsQYOT05FQaFVrzw0h3uvvqKrDjy2G46FzFObqz5TOQBByhiRTKllqCHGNHGUKx31CSB56fnfJuq56JopijPlZe++/V0X375Zd7//vd/Xa/RWl8KJUzjn//zf86//tf/+pvZtet4j8YThiNynpRL8mITbJZn03JVzRJvAg5LNRxL4lUo7WevHv/VoCxKW1BKBFsSrmytJQJt20IUpaG+79Fas7+/z/HDh+zvHTEMLlWsstzVCQKQ6z7pDyhdEqiI39QFRpEQ7moWOo9RxoSNluQpn8Oj4oj1yrCEA0z5TCF4lI+4GInGE4JLmK0VuyGVOMMKsYFPNwOtkrbujMlhSxWulLmEdqXwrqdrN1hruHnzFm/cfZ0wdOWkqnyuUqbNMEMkopUmxCBTZUoxMrEneEFewZBPd77pyF8zdkw65yF9LYQqloeL391x+/ZtPv7xj/Nt3/ZtfPSjH31Lt/3Vr371XTui+m6LH//xHy8rzWclnurTzIgIOzSyIlJTRklVWWLmCxCmEMN0y5JU5mFGSlYyM5ySnYP3EGMRFs8VqzABLFVdi+pXlMpsajeklUmVNLNxW5mQg8WiSZivDC9EItt2TWUXSdhGKFMxVbVaS6soxlBUxmLUaZQ4wxUKCFKpJn3hiKdSNWCK0lRIFb5ODbsQAyZDFUqOdD7uu8v6jFWfn5/Ttl061oIte+cTde0SvD2fx8k5CcnRoSADSqARM95JS8rNxesE1Gc3NJN8nBL9u3lE8+joiM985jO89NJL/LE/9sfe8u1/9atf5W/9rb/Fv/23//Yt3/azGD/5kz/5/CXdKY67SxvLF7zRkjylATYmXZUaK7vKZNJUc8Swu7xMfF90avUwuZDlyvXOoyv5PVe8OXEppdJwgUATVbK4yfsvS+6RY5uTZX48J845JUymwkTPVqGC/F1cbl06hJkyp0rDzftBKusoeKgPHqMU2oiimc7L+DzpZUZjTqXAKBn91aYiD5rIe3icU+Uz5+PqnOPk5JgbR4cMwzhdd36+LjegPEV2MeVN3CJ8HLm4TJtsF74Z5dSMcEK8JPHKmPHkvvGujYODA/7Mn/kzfMd3fMfbsv3f/M3f5G/8jb/Bv/t3/+5t2f51vDfiCW7AYzdfqTj5mUSkr0qzR2tN9IHImKBHrQXwycGg0M3QaUYt6TSki9UHBUZJlacNcdoEUhRjRVAYrdlutywWYrOe6V15ui1f5NrkijRhoelGksd9cxIeBWfEF622Fd4FgvEsl8s0KisfyCuP8mCK1fxUR9ajtEJHmRizuhpZGdpOxghiElLXiQOrCGkJnsVyYpBxWdG58HivChaqtFTc3nliVHRDz+///v+m71uGYSBG2Gw2SSGMGaaaul1yvlIlGnL2nDTBVPr5suq03EbV5Bcl3Ahyg7Vk23mlDe+OitcYww/90A8BIhV6mWfZWxG//du/zU/8xE/wH/7Df3hbtn8d7514AqZrRngg+FQNupRUDdpYtNJpNFWL/1XiamqtMdaQSfg+VAmXFLUsotiUh9xsyQleifpXdj3IYjuJb4ROFaH3oraVNRNGjYSsY1ulqjkJ7UwaXLkrnxkIU6ihaRoGJ/hnCAGrKxRSURtjMEoaXpmOZerJYIiSilZrlexodBrAcCyXMj7snYh+ayI+DAkeMYA8N+pxrivjwzatJJQesfX8GVTi7QYvxpZvvP6aOB+vT8VQ0w3F7DJXy5FxODs3u0qDbQrdppj+nif5ynZ08f0pGHBUSTQnJfOo9KzIfTck2x/7sR8DZP+//du//W17n1dffZW//Jf/Mvfv3+dXf/VX37b3eRbjH//jf/xUegv379/n3//7f/8O7NFbE4+HF7QWo8QYaZLweIwVSkuVaZKgeFOL/XlwQ9FZlZfLpa21iJ24YUiDDrKs72NLdBGUSU22IAnX2FJ9OicXtEGWvLmhVuxrokpVnQi4HBwc0XU9IEI9MTiWxqSmmRIWwyRpFUw10ciGYZCGktZ4F6gXkgxRitXekqF3BV+Oei7qHmJI2mqjgaQk1ZFXq7VIODZNVfYpy0IWCCStHASiiKKrG03SEbYzKh/pszx8+JA37r5Gu9nygQ98gLuv/SGKSPChVPZT1ohSKpssj0kwc7nY+XuMJdlmGGImPTlhh43pVJK8RuEnsMP0u3EZffCdireros1xfn7OF77wBdq2vZZs/Abje7/3e58Kz91ut7zyyivvwB69NfHYT9QslmUM11pL0yxKkybTnJaLFcbK8nnoOpTKRoiC5+bXa22wVipS55ws5a0ngIjLBEAZrK1K0p1rvkpCU0qjjMZoK64QvePoxmFaFU8qTu+RnrvCuYAx8ybgVDth1KwVStlqtaJt28Ttlb+bSnQctJaKeIRO4phQ1ahrMCYoyDbuWjUCE0SXYBhpsAFlGi4nspyI0ZNBgrT0F5hiFB53zjEMXSLa++Kh5mKYYaxj00tWJShp3GWebqag7SbDSMLqQyysjby9AiHE8W+pvSrHbuc7NUvW77H40pe+9NTTY845fvM3f/Nt3qNnN37+53+eF1544ap3422JxybdxWKVKhxJVHurQ5TR9P0gOKqtqCqZrhqGDj9xFciJaJq0VdIIEFeEQJ8q1BiDLEG1wdg6UaJG80qljDSUoheFqxjRwXN8/IAbN6R67bqOumnwfqBttzJooFOVHDxT94ssdKO0NL5UWgtn+tVms8Y7UI3Cx2FC3fLivpAoZ7IUlypYoIDEUggK2WhM+OuYTPu+p26EN2xtVfYnJ1BRFItpAANU1BirRv4uAa2yrbpO22x5/Y3XOT87pW23/J//83+obA0uYqtK6u5cnWZiX2JAlCnDafItjBNhLeSGZjQjVquUIvrEw9aZvTLHbjMzImPQu4M17/b4uZ/7Of7e3/t75fe7d++yXq+vboeeo/jkJz/5zLEWcjz2Uy2XS/KwgFKKw4NbRCVJtKkXaTy3pe86urbF9X1JuN772VBB3/flgnPO0fWtDDD4gCdgqopFZYu7bJleSktyIeSDDWk5bg1939O2W7quZblc0XXdZOmLiMwkSKFoFZQTGUt1nEdjgaRnq0SHd2jlcRXHVXfRcJDps0zfGqGECUVNi9tFDGCsFYWxBLlUlSH4iItuhjNPk5IxtgxfqFRJG20wWYgdcSzetlvW61OsFfra2dkZm03LaikOFmIh5GbbLnDDJRXpjFIGIwySjlJZgUg2H8eBpX4v28zbysdnl274bo5/+S//JX/lr/yVp5ITvI7r+HriiY00rTTBC1vBucBi1RTqUdttOT87ZWjbZI8ulKvMq50ul/N/4gYx4PxA5wL4AEZL0ymA9lJJxhiSfsFUONtDWnavVqtSdWw2G5bLParkHyZVYkCnxpTCoHXEWE3Eo5RGG0lgMvXlsbbGGltW4y64NCac9A5iLPujNZgyLBtnnxFGoeqmaVBKU1V1GR02JunsGktVGZzrC5QgTcFR21cp8VzLjTaUJmKISpXz4r3nj/7oD/ja//otTk8fJAt4xwsv3GZ99jDhsYknEsLspgCZjSYwSIYJ5glSqumsEibNO+RYkGAHGV8bhy6g8HoVFyGGd3P86q/+Kl/4whfo+774913HOxtf+cpX+MhHPnLVu/G2xeNFzENkSB32EAKb7RpSswcF52cndNs1wUtzKfuGwWg/ki/g3OyS57nUsJJlftYEyEnJpySPUsnZQZbjVWXRyDK4rutU6bZs1mcslnvcSEk3hEBVG2LUaakuU1beBXQSvgkeFDFNrJmUcGOBDlQAH32BIiBV30aSXkxW60mRoHzOXJnnat/asUGW4RZTWSHKaYWthepGOXapiVcwXnlfwboztU6OwRAC63VbVhTHxyf4oaPvWz7wgQ+x3pwx+A5jNM6N1X6BGdLyP+a8OaMppPM46ZLlSbOoYpnumyLFOXErFF6PThKPGvd9N1S8p6envPzyy+X3EAJ931/hHl3H4eHhE7UWcty7d4+f/dmffXt36C2OJ6qMTW3VjXFs23VphHXtluj6kkx3/4V54yQn5ayhMEqrJPzQqEnCmi+3ReYwYpcVzvmSmEIYWK/X3Lw9JntjVREfN0oXKUStTRrkGO10ckWZbRBiGiQgOJTJzTypUuu6JtPlAhGrTRK4EYaCnuCkeSgjwyy58RZCwE6S85SxMI9cPYveQggDKlRp+EKVZb4bBn77t34L73peeulF7r/5Ol235cHxCV3vCGFTqvCL75EpXZNfIAmMJ9bJDoWsnLFMQSs/z3Z9Qj17dGK96mZa13XcunVrpt52He+tyNfYeykem3Tbti14ozjr9gxeaGHr9ZowOHTiHeVKbppcpxfVMAzzx9Plm5/T+cTfRWOtkRFbNUo4hpgTbSz7lHHUvK/OSTWc8cuc7CUBz5fVOTJHN8SRPaCVhoShajsKhiulS7Up+gtg7GRkdgIv5KW8mUyaxRjF6DK5SIyVsSuVaH4u5NcmF97iQBFxLmlBOM9me85yteC117a8cfcu280xMSoePnzIzaN9Th4+RBNnDsOzCnM62FD+pOYshUIpU2MFPPmsOeL0+VGw65hm20b22SX7cIXxXrtgn/X4er8XV33j/kbisUk3c2611mw2G/b29ooW7TD0JemOUoZjdbubdNEyOeW8S+vQiI/j80kNr+12k/Rga6oq45lgtHB5jRl5rU0jWgmr1arspw8OQ03XdVKdJoaA8HHjbJmdYZMYI1VlisKW0qokzUDAKpu2PxEXLw3GEcfM2516pSnl0/HxkMTLbdXgXJhM0TUMoS0OwCN/Nw1vqPw+JHzb4J3DDT1vvPEGxw8fcL5eMyRRm5BGlg8ODjg9OcEkCp1CuLlx9r0es25JoBERGNcKJtS9qUg5TBgRatxWwYwJAtUERSYEj6sWM/u+XMd15PjlX/5lvvM7v/OpnntycsI/+Af/4G3eo7c+nph0R1GYyHq9JipwXQ8+Qgz4MB0Rlpgm3dzF98Hjoy8i2UZprJfpJY94io2DFSYlkNGxoW4W+MyMSE2cqqqSq66jH1q27bowLnLzrV40GG0YBldoYzlp5GQiyVIwWlk1G4wWOlRtFhgzbk/2XQY4gFl1ugtdymcJs5vPFIKY4t/zYYE0WJDxZBXQqsLoCq00LgZUUkZ7cO8ev/t7X8O5jhhcqtwie8s9GWCZ6Olm+CSiRtnGaZmbBymUAju36JlWIOMqYmy2kbgNcqMw5bNPj8mURnidcK/jm433Kvb+RI+03JgKIbBpt5L4gicMfcJUA8MwJIL+QN/3M6qY1rpUtCbRvjKOCRQa0m5kTNQ5wTvrqsGY0QDSWkuXtBO893TbLX4Y9zVb7Ii9tTj55iQ7tUzP+yVNIo3RaXzYZJcILmmUpemwxFKw1mCMsA4iYVbxkz5lHpvt3YCxiaY1qYjHCnxKQ5Oq2pgkS6k0QQkLI/hA27dEHB/+4IeSoE36fCj29le07aYk94LnJjrtLlZbHhonhss5nC/5VGo2jsMl0y1pPSujL0Q+P/mYX8d1fCOx2Wz46Z/+6avejW8oHlvpTpOnCGzD4HogFs+rXM1OK5cpPul3Lu1KKVwIKKMF7YuKmB0K0oUYkviLtVXCd6Vayz/rhB/v7e1BSihKwXa7pl40LFcrSXhBNBhICHJlG6qqLhXtbnNpmoxjCCirMFoq2KlLrlj0qKQDMVanamzfp+eJ/GNIVbypJFG1bYs1NSHkCTaNikJCCzEI/9eYQsKCPMaMTMQFGTler9e0Xct6vZaJwXpB13swcPf11znaX+KDK/BPpoddaHwxoYLFzCbJgw27zx0dK6afe16l50GJcdsw3oTkmL+3BiWu4+2PF198kaZprno33vZ4Inuh0LxiwGqD0rEsOaedwylXNVeySk2XseC8p5o0VFARlcS7dbrYXeKU5qVofg9JhmMzKPuCKaVoEqvAaE3ftvTLJRqFXS4haSFoxaQhBhBKkoeRWzv6wEmiyxQzrUXCMnfvVdI7zLmm0Kmi2P/IdJk02xSCjeooAuhA4h2LKLnSiqh9SorJATg19ApXN400kya/3CBTeGenp2kYYsN2e04IjuVqn6aSasCYCoJoEKNU0iiW/cv7vRtCC5ucp/S8keebjUpj+nm6LUm6+eY15Wzn7WVq3OOYDdfx/MXP/MzP8NnPfvaJz4sxcv/+/Xdgj96eeKo5O9EkULjgMZc4uE4x0pwkC3bpQ1GjMlompgyREBXRRbwaCDEWmEFrjV5UEwaAwg0DKEVlbXGZresaGEdoMxcYJRoQlR6FbHIDLEwU0GJEFNKS/i2MFXpOwColugw9yJI6jNBDHK3lheObDBuD0Lycd2ijMNoSvaf3W/RihVYU9+KyKkiwQowRTfZaE682KbUF8ohKE5XCOc+DBw84Pz9ls1nTrjdp2V7Rth3gUMqyXC7YbjfEpBlRKsw4nrvppFhRqpw8nn3eBFZQ5dzIjXYc5BhDiQW9Gt8v35TltRFFvvldx3V8fTEMAz/zMz9z1bvxDcdjMd3CZU3VavbImtLCdiuanIAzTjnl6YKsWr0S5oJQsGR2KYjdQnlt13W4pFrmvMg3RkZcVTzRGqqqoqrrgvNqBdE76rqacHGz2tlIG5PnVzO+7PTnDD2MTIeKyMj/FR1aScsZ+5Zpu4RVRw/IIEgIAecdIbr0uXpCGGbDJMLSEI3igJIkbITJoFPSJVW/MSj63nF6+hDneurKpBHgvHy3AkokxbY8fDK9OV5+whOkMAHaR/hFlXN9MZIDcpxs+3EJNc7hhuu4jifF7/7u75aff+/3fu8K9+Sbj6ewYB+XzVopotYi0ZiSrN+hiWm1c3HqPHU2UsNgXHb74GfPn8IV2cFXoYRu5jxNU5eleU6eMS3bMycWPNvNGUbP3XyzpKQxVRmvVSpeaOyMjIZcCUrpZyYd/5hGYjPXWG4aEJWsCrQWkXbBUjXaJEdlBohirjltmgmvNULSziU11STZQRI6SEMHga5r6botp2enbLZbnOvT8XMs6pqhFyZB27azm+PuzzlmegmTJtoI7SQKWP5MagojpNfJpEQ5RvIe0ogkTvnVRmQ5r0vd60jx2c9+9rEGoD/3cz/Hj/7ojxJC4F/8i3/xDu7ZWx9PhBfy8jJ7g+Vkm8d+fQjlIs1WPWqaeLVMODlyQ250Fg4TXDFfkDFG0RYQaTMAcW9QA0orujYwJDx3GIZSKeeluFKKdtvhHcSouHHjBsMgqmhSueaKd9xHredNtDHpSioKzhO1ScI5whfOOgS5ioRI9IGAwADEiAqBaET7oU5Vq9Kyn95FVKqE5eaS3jvhpjondg2kalcafCKEfn5+xnazwRrLomnw3YLg13IjspZNDKBgGKY+dWOSLW2vmAQwd5JwjnmyHv+286zJ83e/QOPUmtj2jDe267iOHD/xEz/BZz7zmcc+55/9s3/2Du3N2xtP4OmKoHdOVpcNPUi1ly7s1DjTBd9Ltt9KFfW/GKNYo++MpE6nuZSSytZrT0iuO1MR7TxlZq3FDQNVVRVBGUneUi33naHrmqJ/kFONuFfoCSNh5BNnSlkZB0b0GmIIIpgTYqr8I1GPNurRx8Q+iOhAafpZOyYXrSx57na0E4r4IEI3UnmPZpxJP34qrUMEvHOcnh5z796brM/P2W625RhmWCSvLpQSuGEYss7xpBJFGphKdi4J3swbbNMG6fRcZdhmmqxLxcskDavMTyuzaeU9ruM6niZ+5Vd+5ZmCoh5PGQsuVZ9j91mFOKl8VCE1lYs5xskVF8dEO2vcCIxg9UgRm17YGQNVg/ivmdTI2eXf5tfUdT2hrmXtXMvgOrbtOYtFU4YtYEyu+b1y9VsaaCnxkit3rUqlWZbVO6R/ee95VSlV+LzyVyqmceWa4CPGkMwg842LfETlfYoeLzDBzrfbjYj4BBEwd8N4PLI/mlDFZB9nlWXqo+V8SLLXUXGuhTFNqLsCJLvVc3n+zt/k7caG2wjXXMd1PF380i/90lXvwlsaT5B2lAvLOSecUa0KtWv+XxDql9bFAma8MyXKWYiluhXeLeKppudL2FzReudQKvF6PUVwPCfDTP8yZoo7KqwVlkHeVtd2bDZrQojs7e2JbxtJGMfM8c2pHu68iShUKykNJTFqrYh+PEa7komSrHWBHxRZOD0zJQaMqWYTf9mE0xgx1yw3ohjF6CEdy2EY6LoWowX3ripL8AMhyGfZbrcp6Y6NyQvnNv9/SuYlWT4COtjFgS9WHok+dwlqMJq1K9HQuIYYrmMSf+7P/bmnooo9K/HEiTSVOuZGJ+PJ3S54FAghl07zikglzuqksRaTdY8xoMZt7V6A+X2DD2Ln4+V9gvPEEEQDwVY0zVI+iL5I/yKCHxzn5+dst2sZRU6cY1QSOlcXE8CU3qRUEg3XhsjocpwpXbs4cPn8ilGAXAk9Le5gmrkhaLQkIx9EyyIGx8wFmXyPkkbgen3OvXtv8uDBAzbn50lnQmhqxlRJXMiQ51V24aD8vyymE1DS9Ns5B7kqlWNzCeaboJIYxp+ZLnTSL7m/FkIguzLv7td1PL/xp/7Un+LjH//4pY/9/M///DM3Mv54Pd04TmypEB/JAippIY7KYXMMlgsJKoQgS3Y9shYyTzWzJoTDKhe8jiLyErxHWcNiuUdV1QVayCPJMfrCVogxEpwnmAHvHJv1uWCcWmG1po8ypFDX9axSnVbdWV0sAymSXE3SfpkLtcOIcecpu1xRDsOAyZU68yQ/DAO2rgje4YlobREt4Oy6m5b9qRoWl1+H0RqrBW/OfN8Mj+x+UXex2WKhk28DITXKcpKcv7rgw9Pvxiyj5m9HlAYmCeKPISalsVJbA3NB9eu4jkfF//gf/+OZuzk/9ls/nZOHeVVYkg2JexsCzvuC3e0ut8fJLj0mhMwkCHOn2UxVyu+ldSQQiijO0Pes1+dsNhuc9wzDUCrHTCHTWtO2WzCKGDxu6Nhu1mzOzujbrQxcMA5XiHLaUCzdc0w/czaqLDeYONfyLF+OKDhmtkXPjT/nhKcrzx3lLmFUAYuJdTBtR+WkmG8EOcEKLziUzzGd/BpvbikJMk/08+ZYypAJaph+limMNMVxM6/64vMmhygoFAZTblnMtnXVF1Nd1/zCL/zCle7D8x5/6S/9JX74h3/4qnfjHY0nVroZL9WMF/Zc2FwnHzIZYw1AXtfmJBt9kKmznFzjmKg1Ug35CW+zJPQQiEooWFqP2gAqwtD3uKbn/OyMqqpomiZVeSJAk5tuef9UFMnDfhC3iawSpgBV18lGR7DpfKMROABiCIR8I/FBluI+zNwgpseMGIg+EI1BVxUQcG6Qql2NDhuZVWBMOkZKxoLHZJablULZ8wH63nH/zTd45ZU/5OTkhK5tZ8m2bVupWoGo50n2URjqpH1Wylx5rr7wmnxsghdjzHldnGGkkSEhIkDvzlBK8bnPfe6qd+O5jg9/+MO8733vu/Sxf/SP/tF7VknscfFUY8C5+jNaEtg00eRV5tSSPA8FVFqmobQVz7OinoUk2spYoREZjYpjMs+DCdK3ktcMw4BJtjohBIIbkhatjPv2fY8xhnbbc+PmIbYSLNXkJX5K4sPQs9mC94H9fWEuCLYrSTuXanmirrIZ8nB41wt7IgYRMS9shkllGDwKuVkExPqlrmt88MSQKt6hY7FaopTgsN4HqqopVfcYU0hGfhcjyjXWalaLhnZ9MpNKDOlGBnJzQk0ToTAi5HBcTMCip5AVziJlYCPF9GYkzC+xuR9fHwqUNN/uHFO+jut4mrh3796Vr4bejngsvDBd7oNUplN61xSDhfnFpbXwTYfg6ZNweaFWAS4GOpcm23aYA5LwxElCKmBKdZWX5ArYbrc8ePAApVTBdutazCCNzaaTwhjINjd4TxiGJPfo6fuuNK2ytY5PkEX+fAVHzT5vflwih0ugEcFAR1aDbDNpBaek5PpBlNvUyJSY4sJqJ0EqIjF6Qow8eHCfN16/y4oF7pEAABfLSURBVMOHD4oG8Qw6UNIUm9K/dmP2XuW1edhD9j8yaeYlHCl/9lGkYQ5VTPHwR73vSLF7thok13EdTxNP7GQ86sIYOZcXn1eUpVKjSilVcNecjI3WwmBgTDpT/uzYkIoMSSfXJOucEAJD0nWo69Fp11pLiI7tdk0IAWuMWJAbgzImJV+NSe+VtYLbtqVtN3SdjNO6VNECBWKok+XPtBE1vUlcgBjSeHFmdNRVLZq3UyujiQZBfn0W49k9HjHKUMfp6Rk+RFZ7eyyXTXHSKMmMxHQY228XqvHp3y5EzDoKkRhdek2uYC1S4hpg1NRNWyRLSE6hjGnynSbcx+7DdVzHMxyPhRf6fkw+hkklFSf8Vi86uzFNUpVkka5vHUkC3ONAxW6SkYwSMUolDFe4uJVOFjqATTSzIrQTIqEfePjwIV3XcXBwwIMHD7hz5w7eB4ZedGSXqxqFcHRjdETf4V1g6DYENwC+dP1BYIymadBKsVqtypK67Qfquk5Y8agIJjcBjx96gUMSe0BrK+wMxko5OyqoZPeTKWWiBaFnCWlspuXkpIQu5zqiCxw/PObs/AzXD5fitVOWQN6GvJdJf5P0LBW5mr+w3BBsoiDIAzH61OiLKKoEL+QKfdJES9+RvGnB+idcsjj+/Tqu47L46Z/+ac7Pz696N96WeGKlm7v6OSEUFa2kvQAZe02sg5CGPbPVeRbFKd300VAy05u0McVpd+QaRZx3xTIoV8sw57gSYxE+Pjw8JITASy+9xGq54uDgoFSLzrmUO8SNoa4NdZ2tc7wkTu8ozZ8YC5adm3JTKlt+zPvk+4YvVvQyykuBKvLnNslOHkhUudFfLn+mC5NfWqURXZUm0dacnDxktVywWi7LhN6jsK/dY533O2Zq1yQylEI5qyoTNUbIAFLydexaEU23k7cQYywmltPPlLd11XF0dMQrr7xy1bvxXMZf+2t/jb/7d//upY/tsoiepXhspZs1a3fdfedjuwkGmDRRIlGW/8aksftISBoJuVqFOZ44JmOpfDPbAVRiGxgRZ5ks45VSoDTHx8elKj07O8N7z97eHraqODw8YLW3j9GiaBYiKCsNOq1HxbJhGKibirqp0Uoz+AHjNFVV4ZxjsVjMKG9T006jNVGLUHpOJN45TLIcyvi3jP9WZd8lkXmqav68LHmptfCYpS6NuADHJ6ccP7zPyfExm/V5Gnu+DEYYMeEiKD95X5mQ04lbPceOp7zbC0k1RlAmNd0myT41+ohxZz92K1qVHJ31haR/VXGZNf11vP0xhc+ep3iiXY+1VsTDMwU1jlq6OWR1OS4fY4iz5lIMgYAkIs/oBlsOeGrUKBK0kA0qE91MhdQpVzI5lceBvfeEvidaS7ttaWxFvVwUr7Z60WCsplk01HXNMPQQhcGgolTnwXkqU2GU3DjcEGgahQkKFaDvWlRKmNm3rVTxMWCNxWCIupJqHhk4sJVJ1WtFxjtJn322vlDJ062p03JcbiSCGVuyP9vgA922pV23iC6mQAO5kTfn3TKl+V5gWRS8PPrJcAOIcuM4WDJ9/mSH02vkDXRa3Qi4n1gNZLqb/CkoRcQToxmNLssX5zqu4/mKJ1LGykU9oXPljvwUp0XPmQ7l9anJlJe5l97ZlBK9WT+3dpHtSSU5nbbKdC5JvAONEW7rMAwc3rwBStH1PYvBcX62QSvLchlZLhf0w1b2RY8OZN556rpKswFKqGnW4tPYqoJCT1OFFaDKaG9QoO1kmZ8wzqx+lqlfGU/NMEKGEsTUMmkDAzZVuTpR9EKqiLt+w5v3XuXNe3fpurZUlVPcFnLzL+lbqDF55pg2QDMmq5QSuUnlLz2PuxXv7vvups8Yx39DHvjYqYKfxyrnOq7jsUk3V1DOOawaO/X5sYLxpQsqc1fzxTjt6pvEYphRm8qVmRotWlHpaqbZSxw1fHPSz9brQglzJVGHELj3+huYquJbPvgt5b3y8tEHT2Vr2aZ3WG0gBBSB4D06TZz1bgAtUMd0Ce2cw1hhQRC9VOWAqeSzEaWaLzzjfNNImCow255W6XUqlONpzZx+ppTo0BJigl3EYn33OM4nANPgQi41HxcqFtx2mgMvVM6XvTRVzGEyVDGNGBVBJV3gCYQRd24W13Ed08jC+89qPLHSLS4Nmf40wTWL1U5qvgjKMGoWWGsLFDFdAudqdU4PU3jnxtHgSWTsNP+9bdsy7ut9ICBYad/37O/vs1gsePDmPTbbLbdu3abrOqqqoq5qQPRvta2yYBh91+P8AN5QxYgxmkDAMGruFhqcA2NlSe21MB90mpiLIaISBCGvE+cKo8VCPUxWCtMIIaDCRfeK2c9RxNz39g5YLfc40w+k3ziR2mQ8C4jnzm61mhPrhBNM1kOOxFTV71bF85OBcK6z4kaBGiZPEDpLuhEJDCFiN1Kx757/67iOafzsz/4sx8fHV70bb1s8Udpx2kTTWi7+gvcRJ3ieQJVKKUic2DgZHJCl+nxZmilX3nsqbfBx1KQtlZtKTbUY0BGMGuldMUaMliV8cB5bG9Zn5yilWB7ss1zusVwu2d/fZ7VaASJWUzUkviwQA4FApS1Vaor5EGlqgQUyHlsbNUnAfsbeIAjEkI/ZWCFTFNiCD9hFjQKMrtBmdCCOMaLSjUulRlqM0kDTSqb5usFxenZGPwyFTiZV7xjzBCkVr1jsRGLIlfBIQcvPUgWnZTdPT7aWRXdy3k2NOBKrYQo15WeHSYMvitFn9B5HhiUe8+W7jut4RuOpxoBzxEnVAjKhlnm0avKcDEnkJLtbLeUqJ1fR1hiIYKtKSP15+Z2w41lTJ7ntzmhj6bmDG6hsxXa7Zbm/lwYnLF3XYRItzdg8iGHEgiiI71re5yZG6qpCRUVVVzMsWpk0zlwSDpAbUpMEPcO+iYnXqsp+5sRUKHN6rKgLLKGnGUnR9x3n63OOHz4UoR83p1zNj3EesMjDHHN2Q9rrscqdkGZH7dtd1oJQvabnIjt05BXO+Nxc4Y7fhxjBI5OJwYufnPc773Edz0289NJLfOu3futV78aVxBONKedDAJPGSfqbCz7RjhRRjdKOojk7qoWFGAs0UbiquxhvCAVeyO8xhSVkFHjElGOMLJdLcZFQDkLEh6GIi6/Xa5qm4caNw/L6uqqIhJwiISiikc9a14tidZNx4vy6qCTpWJMw4WLnniABpQmzJCJ8Y2N0EW83WQlMx/KzncARMxyX8dhkXnDfinD5smk41YY+XLL8vxBjAoRJxQrlp5gw2RHOGMkoM2y3NMfEmWNc8YwpekzK4++7K6MYAspHuSFdx3MZP/iDP8iP//iPX/VuXEk8NuluNltWq+XkootF6FolUqp08JOEoQJSxZZJ8SqzBCbVXybh65Jwk+hMqu6CD0lHl1IhZqzYJlGa3FhrmkZszr28f27eCU57irWGo6MDDg4OUnKzaE0RuvEDbLctdd1QN4ty01BKJxUxuXHYuhIxd2OIQQRwMm5rTJPobE4qx2z/Yyx6MhABYK28HpjdWMpxyrZABWaIeO84Pzvl3r03OD05Tk4Y0lCbcnRH6CCvSHIVO6k8yzjYxWp2WsWSt2JUonmNkIRKVDXUxGi0LHXGZB1KlRuLxkaGZmKInJ6ePe7rdx3X8UzGYyfSNtt2VnUKSpi61aUiS3P+Ol/IkyoxbWe3CsqCNdGLBGJwTiriCATx69IoTE7ocT4+HGOk73u895ydneGcR2mD81481Yxh6Dq0gu1mzenJCZv1Gu9DqrQrERS3lqgUtlpSNytsVZfPpfWYlPKSP0RJHBlysDYPMBiUNpiqwiatByEPmFLxTZtnGZkYfdmsOFGUW4ws95XQQhj6nuMH93lw/w0ePLjHdnOOH/oR1silpijdSFJPLg4on9gJeYAlZDzgQpKdRhmEAYKKBKUIUf6LQRed3myWqQLgk95v0sbIfOm+7+m6jq5t6dqOdrtlu91y7/7DJ3w935lYrVZ88YtfvOrduI7nJB5b6X7iE5/g+OH9GQyg8wRaFtWOI+0nZgghYZUzLHZn29poSRAxouKoQ1AadSX5afROgijygsxdguu6TsLenrZtaVZL+r5ns9nIkEfdsFqt0FphtTgc26pmuVwVDd6p7U+mek1vFjHKEjm7EefufYxRWAxqxL6VTv+qqXhNRCtbBjyMthhTlepYBh+QpJm2NfQD5+s15+szhqGn7zqcT7KKEYSNoMp+ZI84VCSGVAFrJnVt0lxQj6KGqfK5BLYVjFhgBXksJM1gEAaFT9+FXOFmMaHccJz/6wgBPvaxjz3h6/nOxMHBAT/5kz/JP/kn/+Sqd+W5iE984hN84QtfuOrduLJ4bKX7Uz/1U5OONLJE9UG69bnBNcECZUk9Gj/u0p8uqGaREoFOiVrrYgU+1YjN2wDKkMaUfpYfz9SurHmw2Wxo25au6+iHgbZtOTs7Z7vd4lMTqGkalqsVzWKBtdUs4e5OcuXHxjHoSUNJZUgkf06ISboxHTxJ6KYWgRtlC6ygdYWxNcbWxY9u3EZMFWNH33fi/Ot7QpCEWirOOCbQqGJqSCqKD1oGaRlpXjO8dQYFQAwQfPo3MEucLgZc8AxOdDn6oaftO9quY9u2bFMlu9lsyjnIf5P/Wrbblp/6qf/v6/7Cvl1x69Yt/upf/atXvRvPRXzqU5/iR37kR656N64sHpt0f/AHfzAti9PwgICzZEp8yAKCE1wx460zfYb8eJy47CbO7zRZxulrifgoFkDTBJwr6CwZmTUQYhRhcmE1aCpriD6gEXPLPIHmkj25VhatLJWtWTSiq1BVCXKIY4Ms/v/tXb+PFMkV/qqqu+cH7GhA5jKfCFaWcXqO8UnESMiBJQuBEIE5+A9syTmBDRLBEV1+PieIk3WyLFvmctsJGTLODXhv92Znuqu6qhy8elXVs2tAtpixlvpWo92dnemp7en56tWr730PQJ31UJNSom5qOO+gjY6RPudikxKBjHWUyqVaaQKK50xKyCqoKaSCkqEnmxRBLQCY3qBtl9kEkKJUWtdz4iekehxNhunetLlFv62XDIfncITLBAxSqNjsPbDWhglAw2iNrqNJretaIteWiLVtO3SdRtfp+LPWBlqb2GLo8uXL/9VF+y4wn89x8+bNbQ+j4D3AG70X9vf3sbNzOmyEJWmRgoT14QPtcw1rIltWGkTzllypEKItfh2pkl3igJSySJPLd3OZGKcExuMxeeOuONLt0VuLSin0xmCxWGAynqKZN5iMp2Bvg7oehVy0oLxsBSjBUi5FVWq5bjiQkQxqjtj7TUgqVJD0PBqfiORNhSTp/nh8RT4KPGFJWUUrTCbW1arF3jd7ODjYh9YmRJ8uMLmLeWMb+tNBiLiZxT3rWB9N55bbBKeJEggm9fy+BIa2LtlwpkpBm0W+lCdOKwwX73fOD66H/D01xkRvj4ICxpMnT7C/v7/tYbxTvPaKr6oKn3/+BT65dQtWcENF9tKlUti0xB6mAmI6Id1J6YRjKpFihLtWpZa/Hj/GCvIm4L9z1RsTAkfAbCnZtit07YQ8bPECAG2IfefcuUDyDt4Po1RuApmPQwgiy1zelX8/bsyk5RXB0UxCKqqCI/9c8twVQsWWRGkFkHdkoBSFyie0KPViRW3KHQOhT10o713XT+fnnI9BkS/J+lhZweePU0h5eoE1zaSsYCJmZUIiY0AMCDeWAEuJ33zx20K4BUfw/Plz6vN3gvHa9IKUEj/6+GO8evUqtM9JS83oFpVpS/mDzCXAMiPQo85kPvr0sj1i3v4nJ728cotzuvkGmvceXdfFzRuucgOC3OrwMHb61VrDGBMfz80rmQB47Ozvq9YkXzzJ5KS7TsC8AaeUQqWaQTScyoDJgYz8F2iikEqFnHhO+LTkH08mmM/nmEwmqWW8zzpFZFEu22mmUt2hdGvwuw9dlm0P0/fobY9Oa3RGQxuNru1C+mB4o24bbUwhdF0Xzy0T8nqEy9+N6XHp0qUj5dDbxu7uLh4+fLjtYRSccLyxG3DTNPjz11/jJz++AtPrWFbgLIn8hRdh5zpJqYAQYWWkW68VV6i1dIIQAlUo5/XBBNxZCwhaylPhQzJSz4sKckWDkFRA0TQNutAUcjIZU4cFa9E0VZSDsXetdwJNU0PrLh63GY2wWq3QsH5WBWKULpBnDSnTJMKkqmRIpQRFgayo6y8pE1iVEfxbBbt6VVCSxHfOh47AUsJ5Sw0tvYubUdY69H3IcQtuvBzSA1zk4BG9MqKaLOpsqdtHlADGScxSKsHSGLwLhNznES5FtOzy5lwWa3OqCYnwhafrwwtAOgHnLerxGH/80x8wmUz+pwv3XWA6neL69evw3uPOnTvbHk7BCcVrQw3vyW5wd3cXo+k05Cypt1ldVZkAN0VTXIGWR1X8Ic1JkpF7GNhQYOHD80WlovyMSTrPG0f3sDWbybquo/TLGIPDxSG6toUxBsvVEtbZ2BGDIuMKJIaTaJpRbOc+nU4hGzIdz6Vk1lr4oH4g+VeTRbFV3BSrqxpSKMBLKFkBglIMHrSBxnlkLoagdENwB/OAsx59kL/1QfPati1MIDznj6YM+Iu312wQnHgPOEdpARNuvLHFN8OrgJYi2a7tSJWgOcKliJbzsdYmWRinFeIqhUldJLkdpMS5Dz7AhQvfP5Ji2jQePHhw7P3T6RQ3btzA/fv3NzyigsePH78XXTzeqhvwbDbDl1/+DtVoBB9yfUyiFL36wdL6dSmHHEyc3KHChQ9qb/ugi0gdiDmdwOMChh0sVivyyR2Px/SPBZIej8eDtjsvX77E3t4eEfByia4zsJY6/CpVAZ4Ivq5ZPqZQheU8bYZR8YFHGJOikoacbKWoIQRH9iJE4RJK1nHTkf4HCSmGBRZsKkTnR6DXPdq2xXK5xOFyCWN0POf5uWBQcQJFtjbkabnTBU806zci8y7c2pgq4O9aa+jexAj7uEac61pfNvphaZ3uDaSq8dVXv8eZM2e33q1hb2/vPxLrZDLB7du3cffu3Q2P6uTj4sWL+Oyzz47922KxSCZSJxhvJF2ANqu+++GH+PkvfgnnHIzWVP0V87CIRQlp6Tnc8V6XHfFjBtravoc2JpYae+9RBVJumjqSIUvF+IM+nU4xm82ws7NDFo5NA2ctuq7Dt98uYsqgbVucPXsW8/k8i8xdjKKVqgcbZ4l4ZSJKIQaWlXQXRa1cXUuROaUTeutilEmvmXqWkYqBjyupKadIuW2tNRYL0hV7AKpS1CsNwxLb9XytCxVhFNXaQXUY51zzvCwTbU6y/Hh+X1NEezRXm7brAnwohomVw7Ri+tW9X+P8+fPhHG/fS/fg4AD37t079m+j0Qg7OzsbHtHJR13XmM1m2x7GVvHW28eTyQRXr17FdDrFJ7d+hjOzGazro+ieiyJMcBc7KhFLm19sQM4376mSbVw3EKEyrO/p2Dzz9dnPknuKSVqWd6s2Vp1NRmNSMIDImjfsqqqCsxbLxRJ7/9rHctxiNBqhbir0zqBWFeqaWhMBZIVYx2aZgPMpsleVgjUuFigIyY5hIWUgUscL56lcVwgB4SxUTV11lWogISGRiFdK8txFKNU1WuPli3/iH8//jlcvXkB3XdrIHNaXJSIMqQTvkrlMnubJJ7xk25kmhXzD0zry2YX3oaSNfMmcEKgg0IdcPryHCsPhkTnvgWAQ1HUGnz78FFeuXImrmrfoiboRaK23PYSC9wxvlV4AiDRPnz6Nq1d/ir/89W/43oUf4JuDQ9jexZJgH56jwu5/bsrNOdH0oUOUdklJuVRVqUgedVWhFjKQYPKezY8Zo2WVcr1d12G1WkEqiaZpooQs32yTUmE6PYXRaBKaJJKS4NSpGZpmjLoeQ6kqmo4rpVBX1HGYx8uRqvNDVUY0rMmUD/zasqqDi1qoeqtqSO5EAawdh/9nVm4QgUbSDcoD51MFnrMefe9ilGoztUZMEwTPCo5g2cGM87SDohYkXTJtlvXwgdUtPFUsO8ojOEEbeXwNWA9o7fHRRz/E06dPce3aNczn82OvrYKCR48e4dmzZ9sexkYg1nNxBQUFBQXvDv8fa7yCgoKC9wSFdAsKCgo2iEK6BQUFBRtEId2CgoKCDaKQbkFBQcEGUUi3oKCgYIP4N6SqeECFO87qAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "# 训练数据集\n", + "train_dataset = PetDataset(train_images_path, label_images_path, mode='train')\n", + "\n", + "# 验证数据集\n", + "val_dataset = PetDataset(train_images_path, label_images_path, mode='test')\n", + "\n", + "# 抽样一个数据\n", + "image, label = train_dataset[0]\n", + "\n", + "# 进行图片的展示\n", + "plt.figure()\n", + "\n", + "plt.subplot(1,2,1), \n", + "plt.title('Train Image')\n", + "plt.imshow(image.transpose((1, 2, 0)).astype('uint8'))\n", + "plt.axis('off')\n", + "\n", + "plt.subplot(1,2,2), \n", + "plt.title('Label')\n", + "plt.imshow(np.squeeze(label, axis=0).astype('uint8'), cmap='gray')\n", + "plt.axis('off')\n", + "\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "d9JyZz3ZEnQ1" + }, + "source": [ + "## 4.模型组网\n", + "\n", + "U-Net是一个U型网络结构,可以看做两个大的阶段,图像先经过Encoder编码器进行下采样得到高级语义特征图,再经过Decoder解码器上采样将特征图恢复到原图片的分辨率。" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "wi-ouGZL--BN" + }, + "source": [ + "### 4.1 定义SeparableConv2d接口\n", + "\n", + "我们为了减少卷积操作中的训练参数来提升性能,是继承paddle.nn.Layer自定义了一个SeparableConv2d Layer类,整个过程是把`filter_size * filter_size * num_filters`的Conv2d操作拆解为两个子Conv2d,先对输入数据的每个通道使用`filter_size * filter_size * 1`的卷积核进行计算,输入输出通道数目相同,之后在使用`1 * 1 * num_filters`的卷积核计算。" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "0c-FikH-A4qP" + }, + "outputs": [], + "source": [ + "class SeparableConv2d(paddle.nn.Layer):\n", + " def __init__(self, \n", + " in_channels, \n", + " out_channels, \n", + " kernel_size, \n", + " stride=1, \n", + " padding=0, \n", + " dilation=1, \n", + " groups=None, \n", + " weight_attr=None, \n", + " bias_attr=None, \n", + " data_format=\"NCHW\"):\n", + " super(SeparableConv2d, self).__init__()\n", + " # 第一次卷积操作没有偏置参数\n", + " self.conv_1 = paddle.nn.Conv2d(in_channels, \n", + " in_channels, \n", + " kernel_size, \n", + " stride=stride,\n", + " padding=padding,\n", + " dilation=dilation,\n", + " groups=in_channels, \n", + " weight_attr=weight_attr, \n", + " bias_attr=False, \n", + " data_format=data_format)\n", + " self.pointwise = paddle.nn.Conv2d(in_channels, \n", + " out_channels, \n", + " 1, \n", + " stride=1, \n", + " padding=0, \n", + " dilation=1, \n", + " groups=1, \n", + " weight_attr=weight_attr, \n", + " data_format=data_format)\n", + " \n", + " def forward(self, inputs):\n", + " y = self.conv_1(inputs)\n", + " y = self.pointwise(y)\n", + "\n", + " return y" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "zNyzlqQmBEEi" + }, + "source": [ + "### 4.2 定义Encoder编码器\n", + "\n", + "我们将网络结构中的Encoder下采样过程进行了一个Layer封装,方便后续调用,减少代码编写,下采样是有一个模型逐渐向下画曲线的一个过程,这个过程中是不断的重复一个单元结构将通道数不断增加,形状不断缩小,并且引入残差网络结构,我们将这些都抽象出来进行统一封装。" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "OpUi9VUeGmXp" + }, + "outputs": [], + "source": [ + "class Encoder(paddle.nn.Layer):\n", + " def __init__(self, in_channels, out_channels):\n", + " super(Encoder, self).__init__()\n", + " \n", + " self.relu = paddle.nn.ReLU()\n", + " self.separable_conv_01 = SeparableConv2d(in_channels, \n", + " out_channels, \n", + " kernel_size=3, \n", + " padding='same')\n", + " self.bn = paddle.nn.BatchNorm2d(out_channels)\n", + " self.separable_conv_02 = SeparableConv2d(out_channels, \n", + " out_channels, \n", + " kernel_size=3, \n", + " padding='same')\n", + " self.pool = paddle.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n", + " self.residual_conv = paddle.nn.Conv2d(in_channels, \n", + " out_channels, \n", + " kernel_size=1, \n", + " stride=2, \n", + " padding='same')\n", + "\n", + " def forward(self, inputs):\n", + " previous_block_activation = inputs\n", + " \n", + " y = self.relu(inputs)\n", + " y = self.separable_conv_01(y)\n", + " y = self.bn(y)\n", + " y = self.relu(y)\n", + " y = self.separable_conv_02(y)\n", + " y = self.bn(y)\n", + " y = self.pool(y)\n", + " \n", + " residual = self.residual_conv(previous_block_activation)\n", + " y = paddle.add(y, residual)\n", + "\n", + " return y" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "nPBRD42WGmuH" + }, + "source": [ + "### 4.3 定义Decoder解码器\n", + "\n", + "在通道数达到最大得到高级语义特征图后,网络结构会开始进行decode操作,进行上采样,通道数逐渐减小,对应图片尺寸逐步增加,直至恢复到原图像大小,那么这个过程里面也是通过不断的重复相同结构的残差网络完成,我们也是为了减少代码编写,将这个过程定义一个Layer来放到模型组网中使用。" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "ltVurq8OGvK7" + }, + "outputs": [], + "source": [ + "class Decoder(paddle.nn.Layer):\n", + " def __init__(self, in_channels, out_channels):\n", + " super(Decoder, self).__init__()\n", + "\n", + " self.relu = paddle.nn.ReLU()\n", + " self.conv_transpose_01 = paddle.nn.ConvTranspose2d(in_channels, \n", + " out_channels, \n", + " kernel_size=3, \n", + " padding='same')\n", + " self.conv_transpose_02 = paddle.nn.ConvTranspose2d(out_channels, \n", + " out_channels, \n", + " kernel_size=3, \n", + " padding='same')\n", + " self.bn = paddle.nn.BatchNorm2d(out_channels)\n", + " self.upsample = paddle.nn.Upsample(scale_factor=2.0)\n", + " self.residual_conv = paddle.nn.Conv2d(in_channels, \n", + " out_channels, \n", + " kernel_size=1, \n", + " padding='same')\n", + "\n", + " def forward(self, inputs):\n", + " previous_block_activation = inputs\n", + "\n", + " y = self.relu(inputs)\n", + " y = self.conv_transpose_01(y)\n", + " y = self.bn(y)\n", + " y = self.relu(y)\n", + " y = self.conv_transpose_02(y)\n", + " y = self.bn(y)\n", + " y = self.upsample(y)\n", + " \n", + " residual = self.upsample(previous_block_activation)\n", + " residual = self.residual_conv(residual)\n", + " \n", + " y = paddle.add(y, residual)\n", + " \n", + " return y" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "vLKLj2FMGvdc" + }, + "source": [ + "### 4.4 训练模型组网\n", + "\n", + "按照U型网络结构格式进行整体的网络结构搭建,三次下采样,四次上采样。" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "an1YFILpG4Xy" + }, + "outputs": [], + "source": [ + "class PetNet(paddle.nn.Layer):\n", + " def __init__(self, num_classes):\n", + " super(PetNet, self).__init__()\n", + "\n", + " self.conv_1 = paddle.nn.Conv2d(3, 32, \n", + " kernel_size=3,\n", + " stride=2,\n", + " padding='same')\n", + " self.bn = paddle.nn.BatchNorm2d(32)\n", + " self.relu = paddle.nn.ReLU()\n", + "\n", + " in_channels = 32\n", + " self.encoders = []\n", + " self.encoder_list = [64, 128, 256]\n", + " self.decoder_list = [256, 128, 64, 32]\n", + "\n", + " # 根据下采样个数和配置循环定义子Layer,避免重复写一样的程序\n", + " for out_channels in self.encoder_list:\n", + " block = self.add_sublayer('encoder_%s'.format(out_channels),\n", + " Encoder(in_channels, out_channels))\n", + " self.encoders.append(block)\n", + " in_channels = out_channels\n", + "\n", + " self.decoders = []\n", + "\n", + " # 根据上采样个数和配置循环定义子Layer,避免重复写一样的程序\n", + " for out_channels in self.decoder_list:\n", + " block = self.add_sublayer('decoder_%s'.format(out_channels), \n", + " Decoder(in_channels, out_channels))\n", + " self.decoders.append(block)\n", + " in_channels = out_channels\n", + "\n", + " self.output_conv = paddle.nn.Conv2d(in_channels, \n", + " num_classes, \n", + " kernel_size=3, \n", + " padding='same')\n", + " \n", + " def forward(self, inputs):\n", + " y = self.conv_1(inputs)\n", + " y = self.bn(y)\n", + " y = self.relu(y)\n", + " \n", + " for encoder in self.encoders:\n", + " y = encoder(y)\n", + "\n", + " for decoder in self.decoders:\n", + " y = decoder(y)\n", + " \n", + " y = self.output_conv(y)\n", + " \n", + " return y" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "6Nf7hQ60G4sj" + }, + "source": [ + "### 4.5 模型可视化\n", + "\n", + "调用飞桨提供的summary接口对组建好的模型进行可视化,方便进行模型结构和参数信息的查看和确认。\n", + "@TODO,需要替换" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 + }, + "colab_type": "code", + "id": "1_MXfWkZeSdE", + "outputId": "4c9870de-9eb6-47e8-e88c-79509ef78cf5", + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--------------------------------------------------------------------------------\n", + " Layer (type) Input Shape Output Shape Param #\n", + "================================================================================\n", + " Conv2d-38 [-1, 3, 160, 160] [-1, 32, 80, 80] 896\n", + " BatchNorm2d-14 [-1, 32, 80, 80] [-1, 32, 80, 80] 128\n", + " ReLU-14 [-1, 32, 80, 80] [-1, 32, 80, 80] 0\n", + " ReLU-17 [-1, 256, 20, 20] [-1, 256, 20, 20] 0\n", + " Conv2d-49 [-1, 128, 20, 20] [-1, 128, 20, 20] 1,152\n", + " Conv2d-50 [-1, 128, 20, 20] [-1, 256, 20, 20] 33,024\n", + "SeparableConv2d-17 [-1, 128, 20, 20] [-1, 256, 20, 20] 0\n", + " BatchNorm2d-17 [-1, 256, 20, 20] [-1, 256, 20, 20] 1,024\n", + " Conv2d-51 [-1, 256, 20, 20] [-1, 256, 20, 20] 2,304\n", + " Conv2d-52 [-1, 256, 20, 20] [-1, 256, 20, 20] 65,792\n", + "SeparableConv2d-18 [-1, 256, 20, 20] [-1, 256, 20, 20] 0\n", + " MaxPool2d-9 [-1, 256, 20, 20] [-1, 256, 10, 10] 0\n", + " Conv2d-53 [-1, 128, 20, 20] [-1, 256, 10, 10] 33,024\n", + " Encoder-9 [-1, 128, 20, 20] [-1, 256, 10, 10] 0\n", + " ReLU-21 [-1, 32, 80, 80] [-1, 32, 80, 80] 0\n", + "ConvTranspose2d-17 [-1, 64, 80, 80] [-1, 32, 80, 80] 18,464\n", + " BatchNorm2d-21 [-1, 32, 80, 80] [-1, 32, 80, 80] 128\n", + "ConvTranspose2d-18 [-1, 32, 80, 80] [-1, 32, 80, 80] 9,248\n", + " Upsample-8 [-1, 64, 80, 80] [-1, 64, 160, 160] 0\n", + " Conv2d-57 [-1, 64, 160, 160] [-1, 32, 160, 160] 2,080\n", + " Decoder-9 [-1, 64, 80, 80] [-1, 32, 160, 160] 0\n", + " Conv2d-58 [-1, 32, 160, 160] [-1, 4, 160, 160] 1,156\n", + "================================================================================\n", + "Total params: 168,420\n", + "Trainable params: 167,140\n", + "Non-trainable params: 1,280\n", + "--------------------------------------------------------------------------------\n", + "Input size (MB): 0.29\n", + "Forward/backward pass size (MB): 43.16\n", + "Params size (MB): 0.64\n", + "Estimated Total Size (MB): 44.10\n", + "--------------------------------------------------------------------------------\n", + "\n" + ] + }, + { + "data": { + "text/plain": [ + "{'total_params': 168420, 'trainable_params': 167140}" + ] + }, + "execution_count": 31, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from paddle.static import InputSpec\n", + "\n", + "paddle.disable_static()\n", + "num_classes = 4\n", + "model = paddle.Model(PetNet(num_classes))\n", + "model.summary((3, 160, 160))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "j9Trlcvj8R7L" + }, + "source": [ + "## 5.模型训练" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "8Sskbyz58X4J" + }, + "source": [ + "### 5.1 配置信息\n", + "\n", + "定义训练BATCH_SIZE、训练轮次和计算设备等信息。" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "4fSkTiRB8OpP" + }, + "outputs": [], + "source": [ + "BATCH_SIZE = 32\n", + "EPOCHS = 15\n", + "device = paddle.set_device('gpu')\n", + "paddle.disable_static(device)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "x_vaedRa8eoy" + }, + "source": [ + "### 5.3 自定义Loss\n", + "\n", + "在这个任务中我们使用SoftmaxWithCrossEntropy损失函数来做计算,飞桨中有functional形式的API,这里我们做一个自定义操作,实现一个Class形式API放到模型训练中使用。没有直接使用CrossEntropyLoss的原因主要是对计算维度的自定义需求,本次需要进行softmax计算的维度是1,不是默认的最后一维,所以我们采用上面提到的损失函数,通过axis参数来指定softmax计算维度。" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "AEZq_jT78jNe" + }, + "outputs": [], + "source": [ + "class SoftmaxWithCrossEntropy(paddle.nn.Layer):\n", + " def __init__(self):\n", + " super(SoftmaxWithCrossEntropy, self).__init__()\n", + "\n", + " def forward(self, input, label):\n", + " loss = F.softmax_with_cross_entropy(input, \n", + " label, \n", + " return_softmax=False,\n", + " axis=1)\n", + " return paddle.mean(loss)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "rj6MPPMkJIdZ" + }, + "source": [ + "### 5.4 启动模型训练\n", + "\n", + "使用模型代码进行Model实例生成,使用prepare接口定义优化器、损失函数和评价指标等信息,用于后续训练使用。在所有初步配置完成后,调用fit接口开启训练执行过程,调用fit时只需要将前面定义好的训练数据集、测试数据集、训练轮次(Epoch)和批次大小(batch_size)配置好即可。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 51 + }, + "colab_type": "code", + "id": "m-cVyjNreSdO", + "outputId": "9b37dd07-746b-41cc-c8e2-687a83b1ad75", + "tags": [] + }, + "outputs": [], + "source": [ + "optim = paddle.optimizer.RMSProp(learning_rate=0.001, \n", + " rho=0.9, \n", + " momentum=0.0, \n", + " epsilon=1e-07, \n", + " centered=False,\n", + " parameters=model.parameters())\n", + "model = paddle.Model(PetModel(num_classes))\n", + "model.prepare(optim, SoftmaxWithCrossEntropy())\n", + "model.fit(train_dataset, \n", + " val_dataset, \n", + " epochs=EPOCHS, \n", + " batch_size=BATCH_SIZE)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "-mouwS1kJRqJ" + }, + "source": [ + "## 6.模型预测" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "Dvjxu91DJd1G" + }, + "source": [ + "### 6.1 预测数据集准备和预测\n", + "\n", + "继续使用PetDataset来实例化待预测使用的数据集。这里我们为了方便没有在另外准备预测数据,复用了评估数据。\n", + "\n", + "我们可以直接使用model.predict接口来对数据集进行预测操作,只需要将预测数据集传递到接口内即可。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "Ur088_vjeSdR", + "tags": [] + }, + "outputs": [], + "source": [ + "predict_results = model.predict(val_dataset)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "-DpAEFBSJioy" + }, + "source": [ + "### 6.2 预测结果可视化\n", + "\n", + "从我们的预测数据集中抽3个动物来看看预测的效果,展示一下原图、标签图和预测结果。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "1mfaFkO5S1PU", + "tags": [] + }, + "outputs": [], + "source": [ + "print(len(predict_results))\n", + "plt.figure(figsize=(10, 10))\n", + "\n", + "i = 0\n", + "mask_idx = 0\n", + "\n", + "for data in val_dataset:\n", + " if i > 8: \n", + " break\n", + " plt.subplot(3, 3, i + 1)\n", + " plt.imshow(data[0].transpose((1, 2, 0)).astype('uint8'))\n", + " plt.title('Input Image')\n", + " plt.axis(\"off\")\n", + "\n", + " plt.subplot(3, 3, i + 2)\n", + " plt.imshow(np.squeeze(data[1], axis=0).astype('uint8'), cmap='gray')\n", + " plt.title('Label')\n", + " plt.axis(\"off\")\n", + " \n", + " # 模型只有一个输出,所以我们通过predict_results[0]来取出1000个预测的结果\n", + " # 映射原始图片的index来取出预测结果,提取mask进行展示\n", + " data = predict_results[0][mask_idx][0].transpose((1, 2, 0))\n", + " mask = np.argmax(data, axis=-1)\n", + " mask = np.expand_dims(mask, axis=-1)\n", + "\n", + " plt.subplot(3, 3, i + 3)\n", + " plt.imshow(np.squeeze(mask, axis=2).astype('uint8'), cmap='gray')\n", + " plt.title('Predict')\n", + " plt.axis(\"off\")\n", + " i += 3\n", + " mask_idx += 1\n", + "\n", + "plt.show()" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "name": "pets_image_segmentation_U_Net_like.ipynb", + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3.7.4 64-bit", + "language": "python", + "name": "python37464bitc4da1ac836094043840bff631bedbf7f" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/doc/paddle/tutorial/cv_case/image_segmentation/pets_image_segmentation_U_Net_like.rst b/doc/paddle/tutorial/cv_case/image_segmentation/pets_image_segmentation_U_Net_like.rst new file mode 100644 index 0000000000000000000000000000000000000000..7b96f98bf48859608a92272cafb92f309b37d4ec --- /dev/null +++ b/doc/paddle/tutorial/cv_case/image_segmentation/pets_image_segmentation_U_Net_like.rst @@ -0,0 +1,703 @@ +基于U-Net卷积神经网络实现宠物图像分割 +===================================== + +本示例教程当前是基于2.0-beta版本Paddle做的案例实现,未来会随着2.0的系列版本发布进行升级。 + +1.简要介绍 +---------- + +在计算机视觉领域,图像分割指的是将数字图像细分为多个图像子区域的过程。图像分割的目的是简化或改变图像的表示形式,使得图像更容易理解和分析。图像分割通常用于定位图像中的物体和边界(线,曲线等)。更精确的,图像分割是对图像中的每个像素加标签的一个过程,这一过程使得具有相同标签的像素具有某种共同视觉特性。图像分割的领域非常多,无人车、地块检测、表计识别等等。 + +本示例简要介绍如何通过飞桨开源框架,实现图像分割。这里我们是采用了一个在图像分割领域比较熟知的U-Net网络结构,是一个基于FCN做改进后的一个深度学习网络,包含下采样(编码器,特征提取)和上采样(解码器,分辨率还原)两个阶段,因模型结构比较像U型而命名为U-Net。 + +2.环境设置 +---------- + +导入一些比较基础常用的模块,确认自己的飞桨版本。 + +.. code:: ipython3 + + import os + import io + import numpy as np + import matplotlib.pyplot as plt + from PIL import Image as PilImage + + import paddle + from paddle.nn import functional as F + + paddle.__version__ + + + + +.. parsed-literal:: + + '2.0.0-beta0' + + + +3.数据集 +-------- + +3.1 数据集下载 +~~~~~~~~~~~~~~ + +本案例使用Oxford-IIIT +Pet数据集,官网:https://www.robots.ox.ac.uk/~vgg/data/pets 。 + +数据集统计如下: + +.. figure:: https://www.robots.ox.ac.uk/~vgg/data/pets/breed_count.jpg + :alt: alt 数据集统计信息 + + alt 数据集统计信息 + +数据集包含两个压缩文件: + +1. 原图:https://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz +2. 分割图像:https://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz + +.. code:: ipython3 + + !curl -O http://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz + !curl -O http://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz + !tar -xf images.tar.gz + !tar -xf annotations.tar.gz + + +.. parsed-literal:: + + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed + 100 755M 100 755M 0 0 1707k 0 0:07:32 0:07:32 --:--:-- 2865k0 0:12:48 524k 0 0:13:34 0:02:41 0:10:53 668k 0 0:12:45 0:03:06 0:09:39 1702k 0 1221k 0 0:10:33 0:03:25 0:07:08 3108k37 282M 0 0 1243k 0 0:10:21 0:03:52 0:06:29 719k0:05:53 566k0 1237k 0 0:10:25 0:04:43 0:05:42 1593k 0 0:09:46 0:05:28 0:04:18 2952k 1467k 0 0:08:47 0:06:43 0:02:04 1711k + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed + 100 18.2M 100 18.2M 0 0 1602k 0 0:00:11 0:00:11 --:--:-- 3226k + + +3.2 数据集概览 +~~~~~~~~~~~~~~ + +首先我们先看看下载到磁盘上的文件结构是什么样,来了解一下我们的数据集。 + +1. 首先看一下images.tar.gz这个压缩包,该文件解压后得到一个images目录,这个目录比较简单,里面直接放的是用类名和序号命名好的图片文件,每个图片是对应的宠物照片。 + +.. code:: bash + + . + ├── samoyed_7.jpg + ├── ...... + └── samoyed_81.jpg + +2. 然后我们在看下annotations.tar.gz,文件解压后的目录里面包含以下内容,目录中的README文件将每个目录和文件做了比较详细的介绍,我们可以通过README来查看每个目录文件的说明。 + +.. code:: bash + + . + ├── README + ├── list.txt + ├── test.txt + ├── trainval.txt + ├── trimaps + │ ├── Abyssinian_1.png + │ ├── Abyssinian_10.png + │ ├── ...... + │ └── yorkshire_terrier_99.png + └── xmls + ├── Abyssinian_1.xml + ├── Abyssinian_10.xml + ├── ...... + └── yorkshire_terrier_190.xml + +本次我们主要使用到images和annotations/trimaps两个目录,即原图和三元图像文件,前者作为训练的输入数据,后者是对应的标签数据。 + +我们来看看这个数据集给我们提供了多少个训练样本。 + +.. code:: ipython3 + + train_images_path = "images/" + label_images_path = "annotations/trimaps/" + + print("用于训练的图片样本数量:", len([os.path.join(train_images_path, image_name) + for image_name in os.listdir(train_images_path) + if image_name.endswith('.jpg')])) + + +.. parsed-literal:: + + 用于训练的图片样本数量: 7390 + + +3.3 数据集类定义 +~~~~~~~~~~~~~~~~ + +飞桨(PaddlePaddle)数据集加载方案是统一使用Dataset(数据集定义) + +DataLoader(多进程数据集加载)。 + +首先我们先进行数据集的定义,数据集定义主要是实现一个新的Dataset类,继承父类paddle.io.Dataset,并实现父类中以下两个抽象方法,\ ``__getitem__``\ 和\ ``__len__``\ : + +.. code:: python + + class MyDataset(Dataset): + def __init__(self): + ... + + # 每次迭代时返回数据和对应的标签 + def __getitem__(self, idx): + return x, y + + # 返回整个数据集的总数 + def __len__(self): + return count(samples) + +在数据集内部可以结合图像数据预处理相关API进行图像的预处理(改变大小、反转、调整格式等)。 + +由于加载进来的图像不一定都符合自己的需求,举个例子,已下载的这些图片里面就会有RGBA格式的图片,这个时候图片就不符合我们所需3通道的需求,我们需要进行图片的格式转换,那么这里我们直接实现了一个通用的图片读取接口,确保读取出来的图片都是满足我们的需求。 + +另外图片加载出来的默认shape是HWC,这个时候要看看是否满足后面训练的需要,如果Layer的默认格式和这个不是符合的情况下,需要看下Layer有没有参数可以进行格式调整。不过如果layer较多的话,还是直接调整原数据Shape比较好,否则每个layer都要做参数设置,如果有遗漏就会导致训练出错,那么在本案例中是直接对数据源的shape做了统一调整,从HWC转换成了CHW,因为飞桨的卷积等API的默认输入格式为CHW,这样处理方便后续模型训练。 + +.. code:: ipython3 + + import random + + from paddle.io import Dataset + from paddle.vision.transforms import transforms + + + class ImgTranspose(object): + """ + 图像预处理工具,用于将Mask图像进行升维(160, 160) => (160, 160, 1), + 并对图像的维度进行转换从HWC变为CHW + """ + def __init__(self, fmt): + self.format = fmt + + def __call__(self, img): + if len(img.shape) == 2: + img = np.expand_dims(img, axis=2) + + return img.transpose(self.format) + + class PetDataset(Dataset): + """ + 数据集定义 + """ + def __init__(self, image_path, label_path, mode='train'): + """ + 构造函数 + """ + self.image_size = (160, 160) + self.image_path = image_path + self.label_path = label_path + self.mode = mode.lower() + self.eval_image_num = 1000 + + assert self.mode in ['train', 'test'], \ + "mode should be 'train' or 'test', but got {}".format(self.mode) + + self._parse_dataset() + + self.transforms = transforms.Compose([ + ImgTranspose((2, 0, 1)) + ]) + + def _sort_images(self, image_dir, image_type): + """ + 对文件夹内的图像进行按照文件名排序 + """ + files = [] + + for image_name in os.listdir(image_dir): + if image_name.endswith('.{}'.format(image_type)) \ + and not image_name.startswith('.'): + files.append(os.path.join(image_dir, image_name)) + + return sorted(files) + + def _parse_dataset(self): + """ + 由于所有文件都是散落在文件夹中,在训练时我们需要使用的是数据集和标签对应的数据关系, + 所以我们第一步是对原始的数据集进行整理,得到数据集和标签两个数组,分别一一对应。 + 这样可以在使用的时候能够很方便的找到原始数据和标签的对应关系,否则对于原有的文件夹图片数据无法直接应用。 + 在这里是用了一个非常简单的方法,按照文件名称进行排序。 + 因为刚好数据和标签的文件名是按照这个逻辑制作的,名字都一样,只有扩展名不一样。 + """ + temp_train_images = self._sort_images(self.image_path, 'jpg') + temp_label_images = self._sort_images(self.label_path, 'png') + + random.Random(1337).shuffle(temp_train_images) + random.Random(1337).shuffle(temp_label_images) + + if self.mode == 'train': + self.train_images = temp_train_images[:-self.eval_image_num] + self.label_images = temp_label_images[:-self.eval_image_num] + else: + self.train_images = temp_train_images[-self.eval_image_num:] + self.label_images = temp_label_images[-self.eval_image_num:] + + def _load_img(self, path, color_mode='rgb'): + """ + 统一的图像处理接口封装,用于规整图像大小和通道 + """ + with open(path, 'rb') as f: + img = PilImage.open(io.BytesIO(f.read())) + if color_mode == 'grayscale': + # if image is not already an 8-bit, 16-bit or 32-bit grayscale image + # convert it to an 8-bit grayscale image. + if img.mode not in ('L', 'I;16', 'I'): + img = img.convert('L') + elif color_mode == 'rgba': + if img.mode != 'RGBA': + img = img.convert('RGBA') + elif color_mode == 'rgb': + if img.mode != 'RGB': + img = img.convert('RGB') + else: + raise ValueError('color_mode must be "grayscale", "rgb", or "rgba"') + + if self.image_size is not None: + if img.size != self.image_size: + img = img.resize(self.image_size, PilImage.NEAREST) + + return img + + def __getitem__(self, idx): + """ + 返回 image, label + """ + # 花了比较多的时间在数据处理这里,需要处理成模型能适配的格式,踩了一些坑(比如有不是RGB格式的) + # 有图片会出现通道数和期望不符的情况,需要进行相关考虑 + + # 加载原始图像 + train_image = self._load_img(self.train_images[idx]) + x = np.array(train_image, dtype='float32') + + # 对图像进行预处理,统一大小,转换维度格式(HWC => CHW) + x = self.transforms(x) + + # 加载Label图像 + label_image = self._load_img(self.label_images[idx], color_mode="grayscale") + y = np.array(label_image, dtype='uint8') + + # 图像预处理 + # Label图像是二维的数组(size, size),升维到(size, size, 1)后才能用于最后loss计算 + y = self.transforms(y) + + # 返回img, label,转换为需要的格式 + return x, y.astype('int64') + + def __len__(self): + """ + 返回数据集总数 + """ + return len(self.train_images) + +3.4 PetDataSet数据集抽样展示 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +实现好Dataset数据集后,我们来测试一下数据集是否符合预期,因为Dataset是一个可以被迭代的Class,我们通过for循环从里面读取数据来用matplotlib进行展示,这里要注意的是对于分割的标签文件因为是1通道的灰度图片,需要在使用imshow接口时注意下传参cmap=‘gray’。 + +.. code:: ipython3 + + # 训练数据集 + train_dataset = PetDataset(train_images_path, label_images_path, mode='train') + + # 验证数据集 + val_dataset = PetDataset(train_images_path, label_images_path, mode='test') + + # 抽样一个数据 + image, label = train_dataset[0] + + # 进行图片的展示 + plt.figure() + + plt.subplot(1,2,1), + plt.title('Train Image') + plt.imshow(image.transpose((1, 2, 0)).astype('uint8')) + plt.axis('off') + + plt.subplot(1,2,2), + plt.title('Label') + plt.imshow(np.squeeze(label, axis=0).astype('uint8'), cmap='gray') + plt.axis('off') + + plt.show() + + + +.. image:: https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/paddle/tutorial/cv_case/image_segmentation/pets_image_segmentation_U_Net_like_files/pets_image_segmentation_U_Net_like_001.png?raw=true + + +4.模型组网 +---------- + +U-Net是一个U型网络结构,可以看做两个大的阶段,图像先经过Encoder编码器进行下采样得到高级语义特征图,再经过Decoder解码器上采样将特征图恢复到原图片的分辨率。 + +4.1 定义SeparableConv2d接口 +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +我们为了减少卷积操作中的训练参数来提升性能,是继承paddle.nn.Layer自定义了一个SeparableConv2d +Layer类,整个过程是把\ ``filter_size * filter_size * num_filters``\ 的Conv2d操作拆解为两个子Conv2d,先对输入数据的每个通道使用\ ``filter_size * filter_size * 1``\ 的卷积核进行计算,输入输出通道数目相同,之后在使用\ ``1 * 1 * num_filters``\ 的卷积核计算。 + +.. code:: ipython3 + + class SeparableConv2d(paddle.nn.Layer): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=None, + weight_attr=None, + bias_attr=None, + data_format="NCHW"): + super(SeparableConv2d, self).__init__() + # 第一次卷积操作没有偏置参数 + self.conv_1 = paddle.nn.Conv2d(in_channels, + in_channels, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=in_channels, + weight_attr=weight_attr, + bias_attr=False, + data_format=data_format) + self.pointwise = paddle.nn.Conv2d(in_channels, + out_channels, + 1, + stride=1, + padding=0, + dilation=1, + groups=1, + weight_attr=weight_attr, + data_format=data_format) + + def forward(self, inputs): + y = self.conv_1(inputs) + y = self.pointwise(y) + + return y + +4.2 定义Encoder编码器 +~~~~~~~~~~~~~~~~~~~~~ + +我们将网络结构中的Encoder下采样过程进行了一个Layer封装,方便后续调用,减少代码编写,下采样是有一个模型逐渐向下画曲线的一个过程,这个过程中是不断的重复一个单元结构将通道数不断增加,形状不断缩小,并且引入残差网络结构,我们将这些都抽象出来进行统一封装。 + +.. code:: ipython3 + + class Encoder(paddle.nn.Layer): + def __init__(self, in_channels, out_channels): + super(Encoder, self).__init__() + + self.relu = paddle.nn.ReLU() + self.separable_conv_01 = SeparableConv2d(in_channels, + out_channels, + kernel_size=3, + padding='same') + self.bn = paddle.nn.BatchNorm2d(out_channels) + self.separable_conv_02 = SeparableConv2d(out_channels, + out_channels, + kernel_size=3, + padding='same') + self.pool = paddle.nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.residual_conv = paddle.nn.Conv2d(in_channels, + out_channels, + kernel_size=1, + stride=2, + padding='same') + + def forward(self, inputs): + previous_block_activation = inputs + + y = self.relu(inputs) + y = self.separable_conv_01(y) + y = self.bn(y) + y = self.relu(y) + y = self.separable_conv_02(y) + y = self.bn(y) + y = self.pool(y) + + residual = self.residual_conv(previous_block_activation) + y = paddle.add(y, residual) + + return y + +4.3 定义Decoder解码器 +~~~~~~~~~~~~~~~~~~~~~ + +在通道数达到最大得到高级语义特征图后,网络结构会开始进行decode操作,进行上采样,通道数逐渐减小,对应图片尺寸逐步增加,直至恢复到原图像大小,那么这个过程里面也是通过不断的重复相同结构的残差网络完成,我们也是为了减少代码编写,将这个过程定义一个Layer来放到模型组网中使用。 + +.. code:: ipython3 + + class Decoder(paddle.nn.Layer): + def __init__(self, in_channels, out_channels): + super(Decoder, self).__init__() + + self.relu = paddle.nn.ReLU() + self.conv_transpose_01 = paddle.nn.ConvTranspose2d(in_channels, + out_channels, + kernel_size=3, + padding='same') + self.conv_transpose_02 = paddle.nn.ConvTranspose2d(out_channels, + out_channels, + kernel_size=3, + padding='same') + self.bn = paddle.nn.BatchNorm2d(out_channels) + self.upsample = paddle.nn.Upsample(scale_factor=2.0) + self.residual_conv = paddle.nn.Conv2d(in_channels, + out_channels, + kernel_size=1, + padding='same') + + def forward(self, inputs): + previous_block_activation = inputs + + y = self.relu(inputs) + y = self.conv_transpose_01(y) + y = self.bn(y) + y = self.relu(y) + y = self.conv_transpose_02(y) + y = self.bn(y) + y = self.upsample(y) + + residual = self.upsample(previous_block_activation) + residual = self.residual_conv(residual) + + y = paddle.add(y, residual) + + return y + +4.4 训练模型组网 +~~~~~~~~~~~~~~~~ + +按照U型网络结构格式进行整体的网络结构搭建,三次下采样,四次上采样。 + +.. code:: ipython3 + + class PetNet(paddle.nn.Layer): + def __init__(self, num_classes): + super(PetNet, self).__init__() + + self.conv_1 = paddle.nn.Conv2d(3, 32, + kernel_size=3, + stride=2, + padding='same') + self.bn = paddle.nn.BatchNorm2d(32) + self.relu = paddle.nn.ReLU() + + in_channels = 32 + self.encoders = [] + self.encoder_list = [64, 128, 256] + self.decoder_list = [256, 128, 64, 32] + + # 根据下采样个数和配置循环定义子Layer,避免重复写一样的程序 + for out_channels in self.encoder_list: + block = self.add_sublayer('encoder_%s'.format(out_channels), + Encoder(in_channels, out_channels)) + self.encoders.append(block) + in_channels = out_channels + + self.decoders = [] + + # 根据上采样个数和配置循环定义子Layer,避免重复写一样的程序 + for out_channels in self.decoder_list: + block = self.add_sublayer('decoder_%s'.format(out_channels), + Decoder(in_channels, out_channels)) + self.decoders.append(block) + in_channels = out_channels + + self.output_conv = paddle.nn.Conv2d(in_channels, + num_classes, + kernel_size=3, + padding='same') + + def forward(self, inputs): + y = self.conv_1(inputs) + y = self.bn(y) + y = self.relu(y) + + for encoder in self.encoders: + y = encoder(y) + + for decoder in self.decoders: + y = decoder(y) + + y = self.output_conv(y) + + return y + +4.5 模型可视化 +~~~~~~~~~~~~~~ + +调用飞桨提供的summary接口对组建好的模型进行可视化,方便进行模型结构和参数信息的查看和确认。 +@TODO,需要替换 + +.. code:: ipython3 + + from paddle.static import InputSpec + + paddle.disable_static() + num_classes = 4 + model = paddle.Model(PetNet(num_classes)) + model.summary((3, 160, 160)) + + +.. parsed-literal:: + + -------------------------------------------------------------------------------- + Layer (type) Input Shape Output Shape Param # + ================================================================================ + Conv2d-38 [-1, 3, 160, 160] [-1, 32, 80, 80] 896 + BatchNorm2d-14 [-1, 32, 80, 80] [-1, 32, 80, 80] 128 + ReLU-14 [-1, 32, 80, 80] [-1, 32, 80, 80] 0 + ReLU-17 [-1, 256, 20, 20] [-1, 256, 20, 20] 0 + Conv2d-49 [-1, 128, 20, 20] [-1, 128, 20, 20] 1,152 + Conv2d-50 [-1, 128, 20, 20] [-1, 256, 20, 20] 33,024 + SeparableConv2d-17 [-1, 128, 20, 20] [-1, 256, 20, 20] 0 + BatchNorm2d-17 [-1, 256, 20, 20] [-1, 256, 20, 20] 1,024 + Conv2d-51 [-1, 256, 20, 20] [-1, 256, 20, 20] 2,304 + Conv2d-52 [-1, 256, 20, 20] [-1, 256, 20, 20] 65,792 + SeparableConv2d-18 [-1, 256, 20, 20] [-1, 256, 20, 20] 0 + MaxPool2d-9 [-1, 256, 20, 20] [-1, 256, 10, 10] 0 + Conv2d-53 [-1, 128, 20, 20] [-1, 256, 10, 10] 33,024 + Encoder-9 [-1, 128, 20, 20] [-1, 256, 10, 10] 0 + ReLU-21 [-1, 32, 80, 80] [-1, 32, 80, 80] 0 + ConvTranspose2d-17 [-1, 64, 80, 80] [-1, 32, 80, 80] 18,464 + BatchNorm2d-21 [-1, 32, 80, 80] [-1, 32, 80, 80] 128 + ConvTranspose2d-18 [-1, 32, 80, 80] [-1, 32, 80, 80] 9,248 + Upsample-8 [-1, 64, 80, 80] [-1, 64, 160, 160] 0 + Conv2d-57 [-1, 64, 160, 160] [-1, 32, 160, 160] 2,080 + Decoder-9 [-1, 64, 80, 80] [-1, 32, 160, 160] 0 + Conv2d-58 [-1, 32, 160, 160] [-1, 4, 160, 160] 1,156 + ================================================================================ + Total params: 168,420 + Trainable params: 167,140 + Non-trainable params: 1,280 + -------------------------------------------------------------------------------- + Input size (MB): 0.29 + Forward/backward pass size (MB): 43.16 + Params size (MB): 0.64 + Estimated Total Size (MB): 44.10 + -------------------------------------------------------------------------------- + + + + + +.. parsed-literal:: + + {'total_params': 168420, 'trainable_params': 167140} + + + +5.模型训练 +---------- + +5.1 配置信息 +~~~~~~~~~~~~ + +定义训练BATCH_SIZE、训练轮次和计算设备等信息。 + +.. code:: ipython3 + + BATCH_SIZE = 32 + EPOCHS = 15 + device = paddle.set_device('gpu') + paddle.disable_static(device) + +5.3 自定义Loss +~~~~~~~~~~~~~~ + +在这个任务中我们使用SoftmaxWithCrossEntropy损失函数来做计算,飞桨中有functional形式的API,这里我们做一个自定义操作,实现一个Class形式API放到模型训练中使用。没有直接使用CrossEntropyLoss的原因主要是对计算维度的自定义需求,本次需要进行softmax计算的维度是1,不是默认的最后一维,所以我们采用上面提到的损失函数,通过axis参数来指定softmax计算维度。 + +.. code:: ipython3 + + class SoftmaxWithCrossEntropy(paddle.nn.Layer): + def __init__(self): + super(SoftmaxWithCrossEntropy, self).__init__() + + def forward(self, input, label): + loss = F.softmax_with_cross_entropy(input, + label, + return_softmax=False, + axis=1) + return paddle.mean(loss) + +5.4 启动模型训练 +~~~~~~~~~~~~~~~~ + +使用模型代码进行Model实例生成,使用prepare接口定义优化器、损失函数和评价指标等信息,用于后续训练使用。在所有初步配置完成后,调用fit接口开启训练执行过程,调用fit时只需要将前面定义好的训练数据集、测试数据集、训练轮次(Epoch)和批次大小(batch_size)配置好即可。 + +.. code:: ipython3 + + optim = paddle.optimizer.RMSProp(learning_rate=0.001, + rho=0.9, + momentum=0.0, + epsilon=1e-07, + centered=False, + parameters=model.parameters()) + model = paddle.Model(PetModel(num_classes)) + model.prepare(optim, SoftmaxWithCrossEntropy()) + model.fit(train_dataset, + val_dataset, + epochs=EPOCHS, + batch_size=BATCH_SIZE) + +6.模型预测 +---------- + +6.1 预测数据集准备和预测 +~~~~~~~~~~~~~~~~~~~~~~~~ + +继续使用PetDataset来实例化待预测使用的数据集。这里我们为了方便没有在另外准备预测数据,复用了评估数据。 + +我们可以直接使用model.predict接口来对数据集进行预测操作,只需要将预测数据集传递到接口内即可。 + +.. code:: ipython3 + + predict_results = model.predict(val_dataset) + +6.2 预测结果可视化 +~~~~~~~~~~~~~~~~~~ + +从我们的预测数据集中抽3个动物来看看预测的效果,展示一下原图、标签图和预测结果。 + +.. code:: ipython3 + + print(len(predict_results)) + plt.figure(figsize=(10, 10)) + + i = 0 + mask_idx = 0 + + for data in val_dataset: + if i > 8: + break + plt.subplot(3, 3, i + 1) + plt.imshow(data[0].transpose((1, 2, 0)).astype('uint8')) + plt.title('Input Image') + plt.axis("off") + + plt.subplot(3, 3, i + 2) + plt.imshow(np.squeeze(data[1], axis=0).astype('uint8'), cmap='gray') + plt.title('Label') + plt.axis("off") + + # 模型只有一个输出,所以我们通过predict_results[0]来取出1000个预测的结果 + # 映射原始图片的index来取出预测结果,提取mask进行展示 + data = predict_results[0][mask_idx][0].transpose((1, 2, 0)) + mask = np.argmax(data, axis=-1) + mask = np.expand_dims(mask, axis=-1) + + plt.subplot(3, 3, i + 3) + plt.imshow(np.squeeze(mask, axis=2).astype('uint8'), cmap='gray') + plt.title('Predict') + plt.axis("off") + i += 3 + mask_idx += 1 + + plt.show() diff --git a/doc/paddle/tutorial/cv_case/image_segmentation/pets_image_segmentation_U_Net_like_files/pets_image_segmentation_U_Net_like_001.png b/doc/paddle/tutorial/cv_case/image_segmentation/pets_image_segmentation_U_Net_like_files/pets_image_segmentation_U_Net_like_001.png new file mode 100644 index 0000000000000000000000000000000000000000..f82777f87f82d9e77cd8b63d10050f77dbf7db73 Binary files /dev/null and b/doc/paddle/tutorial/cv_case/image_segmentation/pets_image_segmentation_U_Net_like_files/pets_image_segmentation_U_Net_like_001.png differ diff --git a/doc/paddle/tutorial/cv_case/index_cn.rst b/doc/paddle/tutorial/cv_case/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7b141b6acfff67f020cc5846fccba98afbaeed59 --- /dev/null +++ b/doc/paddle/tutorial/cv_case/index_cn.rst @@ -0,0 +1,21 @@ +################ +计算机视觉 +################ + + +在这里PaddlePaddle为大家提供了一篇cv的教程供大家学习: + + - `图像分类 <./mnist_lenet_classification/mnist_lenet_classification.html>`_ :介绍使用 Paddle 在MNIST数据集上完成图像分类。 + - `图像分类 <./convnet_image_classification/convnet_image_classification.html>`_ :介绍使用 Paddle 在CIFA10数据集上完成图像分类。 + - `图像搜索 <./image_search/image_search.html>`_ :介绍使用 Paddle 实现图像搜索。 + - `图像分割 <./image_segmentation/pets_image_segmentation_U_Net_like.html>`_ :介绍使用 Paddle 实现U-Net模型完成图像分割。 + + +.. toctree:: + :hidden: + :titlesonly: + + mnist_lenet_classification/mnist_lenet_classification.rst + convnet_image_classification/convnet_image_classification.rst + image_search/image_search.rst + image_segmentation/pets_image_segmentation_U_Net_like.rst diff --git a/doc/paddle/tutorial/cv_case/mnist_lenet_classification/mnist_lenet_classification.ipynb b/doc/paddle/tutorial/cv_case/mnist_lenet_classification/mnist_lenet_classification.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..f7012ee9713176f6a42b360478b42ead149632f1 --- /dev/null +++ b/doc/paddle/tutorial/cv_case/mnist_lenet_classification/mnist_lenet_classification.ipynb @@ -0,0 +1,436 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MNIST数据集使用LeNet进行图像分类\n", + "本示例教程演示如何在MNIST数据集上用LeNet进行图像分类。\n", + "手写数字的MNIST数据集,包含60,000个用于训练的示例和10,000个用于测试的示例。这些数字已经过尺寸标准化并位于图像中心,图像是固定大小(28x28像素),其值为0到1。该数据集的官方地址为:http://yann.lecun.com/exdb/mnist/" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 环境\n", + "本教程基于paddle-2.0-beta编写,如果您的环境不是本版本,请先安装paddle-2.0-beta版本。" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2.0.0-beta0\n" + ] + } + ], + "source": [ + "import paddle\n", + "print(paddle.__version__)\n", + "paddle.disable_static()\n", + "# 开启动态图" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 加载数据集\n", + "我们使用飞桨自带的paddle.dataset完成mnist数据集的加载。" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "download training data and load training data\n", + "load finished\n" + ] + } + ], + "source": [ + "print('download training data and load training data')\n", + "train_dataset = paddle.vision.datasets.MNIST(mode='train')\n", + "test_dataset = paddle.vision.datasets.MNIST(mode='test')\n", + "print('load finished')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "取训练集中的一条数据看一下。" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "train_data0 label is: [5]\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAI4AAACOCAYAAADn/TAIAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+j8jraAAAIY0lEQVR4nO3dXWhUZxoH8P/jaPxav7KREtNgiooQFvwg1l1cNOr6sQUN3ixR0VUK9cKPXTBYs17ohReLwl5ovCmuZMU1y+IaWpdC0GIuxCJJMLhJa6oWtSl+FVEXvdDK24s5nc5zapKTZ86cOTPz/4Hk/M8xc17w8Z13zpl5RpxzIBquEbkeAOUnFg6ZsHDIhIVDJiwcMmHhkElGhSMiq0WkT0RuisjesAZF8SfW6zgikgDwFYAVAPoBdABY75z7IrzhUVyNzOB33wVw0zn3NQCIyL8A1AEYsHDKyspcVVVVBqekqHV1dX3nnJvq359J4VQA+CYt9wNYONgvVFVVobOzM4NTUtRE5M6b9md9cSwiH4hIp4h0Pnr0KNuno4hkUjjfAqhMy297+xTn3EfOuRrnXM3UqT+b8ShPZVI4HQBmicg7IlICoB7AJ+EMi+LOvMZxzn0vIjsAtAFIADjhnOsNbWQUa5ksjuGc+xTApyGNhfIIrxyTCQuHTFg4ZMLCIRMWDpmwcMiEhUMmLBwyYeGQCQuHTFg4ZMLCIZOMbnIWk9evX6v89OnTwL/b1NSk8osXL1Tu6+tT+dixYyo3NDSo3NLSovKYMWNU3rv3p88N7N+/P/A4h4MzDpmwcMiEhUMmRbPGuXv3rsovX75U+fLlyypfunRJ5SdPnqh85syZ0MZWWVmp8s6dO1VubW1VecKECSrPmTNH5SVLloQ2toFwxiETFg6ZsHDIpGDXOFevXlV52bJlKg/nOkzYEomEygcPHlR5/PjxKm/cuFHladOmqTxlyhSVZ8+enekQh8QZh0xYOGTCwiGTgl3jTJ8+XeWysjKVw1zjLFyom3T41xwXL15UuaSkROVNmzaFNpaocMYhExYOmbBwyKRg1zilpaUqHz58WOVz586pPG/ePJV37do16OPPnTs3tX3hwgV1zH8dpqenR+UjR44M+tj5gDMOmQxZOCJyQkQeikhP2r5SETkvIje8n1MGewwqPEFmnGYAq3379gL4zDk3C8BnXqYiEqjPsYhUAfivc+5XXu4DUOucuyci5QDanXND3iCpqalxcek6+uzZM5X973HZtm2bysePH1f51KlTqe0NGzaEPLr4EJEu51yNf791jfOWc+6et30fwFvmkVFeynhx7JJT1oDTFtvVFiZr4TzwnqLg/Xw40F9ku9rCZL2O8wmAPwL4q/fz49BGFJGJEycOenzSpEmDHk9f89TX16tjI0YU/lWOIC/HWwB8DmC2iPSLyPtIFswKEbkB4HdepiIy5IzjnFs/wKHlIY+F8kjhz6mUFQV7rypTBw4cULmrq0vl9vb21Lb/XtXKlSuzNazY4IxDJiwcMmHhkIn5Ozkt4nSvarhu3bql8vz581PbkydPVseWLl2qck2NvtWzfft2lUUkjCFmRdj3qqjIsXDIhC/HA5oxY4bKzc3Nqe2tW7eqYydPnhw0P3/+XOXNmzerXF5ebh1mZDjjkAkLh0xYOGTCNY7RunXrUtszZ85Ux3bv3q2y/5ZEY2Ojynfu6O+E37dvn8oVFRXmcWYLZxwyYeGQCQuHTHjLIQv8rW39HzfesmWLyv5/g+XL9Xvkzp8/H97ghom3HChULBwyYeGQCdc4OTB69GiVX716pfKoUaNUbmtrU7m2tjYr43oTrnEoVCwcMmHhkAnvVYXg2rVrKvu/kqijo0Nl/5rGr7q6WuXFixdnMLrs4IxDJiwcMmHhkAnXOAH5v+L56NGjqe2zZ8+qY/fv3x/WY48cqf8Z/O85jmPblPiNiPJCkP44lSJyUUS+EJFeEfmTt58ta4tYkBnnewC7nXPVAH4NYLuIVIMta4takMZK9wDc87b/LyJfAqgAUAeg1vtr/wDQDuDDrIwyAv51yenTp1VuampS+fbt2+ZzLViwQGX/e4zXrl1rfuyoDGuN4/U7ngfgCtiytqgFLhwR+QWA/wD4s3NOdZcerGUt29UWpkCFIyKjkCyafzrnfnztGahlLdvVFqYh1ziS7MHxdwBfOuf+lnYor1rWPnjwQOXe3l6Vd+zYofL169fN5/J/1eKePXtUrqurUzmO12mGEuQC4CIAmwD8T0S6vX1/QbJg/u21r70D4A/ZGSLFUZBXVZcADNT5hy1ri1T+zZEUCwVzr+rx48cq+782qLu7W2V/a7bhWrRoUWrb/1nxVatWqTx27NiMzhVHnHHIhIVDJiwcMsmrNc6VK1dS24cOHVLH/O/r7e/vz+hc48aNU9n/ddLp95f8XxddDDjjkAkLh0zy6qmqtbX1jdtB+D9ysmbNGpUTiYTKDQ0NKvu7pxc7zjhkwsIhExYOmbDNCQ2KbU4oVCwcMmHhkAkLh0xYOGTCwiETFg6ZsHDIhIVDJiwcMmHhkEmk96pE5BGSn/osA/BdZCcenriOLVfjmu6c+9mH/iMtnNRJRTrfdOMsDuI6triNi09VZMLCIZNcFc5HOTpvEHEdW6zGlZM1DuU/PlWRSaSFIyKrRaRPRG6KSE7b24rICRF5KCI9afti0bs5H3pLR1Y4IpIAcAzA7wFUA1jv9UvOlWYAq3374tK7Of69pZ1zkfwB8BsAbWm5EUBjVOcfYExVAHrSch+Acm+7HEBfLseXNq6PAayI0/iifKqqAPBNWu739sVJ7Ho3x7W3NBfHA3DJ/9Y5fclp7S0dhSgL51sAlWn5bW9fnATq3RyFTHpLRyHKwukAMEtE3hGREgD1SPZKjpMfezcDOezdHKC3NJDr3tIRL/LeA/AVgFsA9uV4wdmC5JebvEJyvfU+gF8i+WrlBoALAEpzNLbfIvk0dA1At/fnvbiMzznHK8dkw8UxmbBwyISFQyYsHDJh4ZAJC4dMWDhkwsIhkx8AyyZIbO5tLBIAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "train_data0, train_label_0 = train_dataset[0][0],train_dataset[0][1]\n", + "train_data0 = train_data0.reshape([28,28])\n", + "plt.figure(figsize=(2,2))\n", + "plt.imshow(train_data0, cmap=plt.cm.binary)\n", + "print('train_data0 label is: ' + str(train_label_0))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 组网\n", + "用paddle.nn下的API,如`Conv2d`、`MaxPool2d`、`Linear`完成LeNet的构建。" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "import paddle\n", + "import paddle.nn.functional as F\n", + "class LeNet(paddle.nn.Layer):\n", + " def __init__(self):\n", + " super(LeNet, self).__init__()\n", + " self.conv1 = paddle.nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5, stride=1, padding=2)\n", + " self.max_pool1 = paddle.nn.MaxPool2d(kernel_size=2, stride=2)\n", + " self.conv2 = paddle.nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1)\n", + " self.max_pool2 = paddle.nn.MaxPool2d(kernel_size=2, stride=2)\n", + " self.linear1 = paddle.nn.Linear(in_features=16*5*5, out_features=120)\n", + " self.linear2 = paddle.nn.Linear(in_features=120, out_features=84)\n", + " self.linear3 = paddle.nn.Linear(in_features=84, out_features=10)\n", + "\n", + " def forward(self, x):\n", + " x = self.conv1(x)\n", + " x = F.relu(x)\n", + " x = self.max_pool1(x)\n", + " x = F.relu(x)\n", + " x = self.conv2(x)\n", + " x = self.max_pool2(x)\n", + " x = paddle.flatten(x, start_axis=1,stop_axis=-1)\n", + " x = self.linear1(x)\n", + " x = F.relu(x)\n", + " x = self.linear2(x)\n", + " x = F.relu(x)\n", + " x = self.linear3(x)\n", + " x = F.softmax(x)\n", + " return x" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 训练方式一\n", + "组网后,开始对模型进行训练,先构建`train_loader`,加载训练数据,然后定义`train`函数,设置好损失函数后,按batch加载数据,完成模型的训练。" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "epoch: 0, batch_id: 0, loss is: [2.3037894], acc is: [0.140625]\n", + "epoch: 0, batch_id: 100, loss is: [1.6175328], acc is: [0.9375]\n", + "epoch: 0, batch_id: 200, loss is: [1.5388051], acc is: [0.96875]\n", + "epoch: 0, batch_id: 300, loss is: [1.5251061], acc is: [0.96875]\n", + "epoch: 0, batch_id: 400, loss is: [1.4678856], acc is: [1.]\n", + "epoch: 0, batch_id: 500, loss is: [1.4944503], acc is: [0.984375]\n", + "epoch: 0, batch_id: 600, loss is: [1.5365536], acc is: [0.96875]\n", + "epoch: 0, batch_id: 700, loss is: [1.4885054], acc is: [0.984375]\n", + "epoch: 0, batch_id: 800, loss is: [1.4872254], acc is: [0.984375]\n", + "epoch: 0, batch_id: 900, loss is: [1.4884174], acc is: [0.984375]\n", + "epoch: 1, batch_id: 0, loss is: [1.4776722], acc is: [1.]\n", + "epoch: 1, batch_id: 100, loss is: [1.4751343], acc is: [1.]\n", + "epoch: 1, batch_id: 200, loss is: [1.4772581], acc is: [1.]\n", + "epoch: 1, batch_id: 300, loss is: [1.4918218], acc is: [0.984375]\n", + "epoch: 1, batch_id: 400, loss is: [1.5038397], acc is: [0.96875]\n", + "epoch: 1, batch_id: 500, loss is: [1.5088196], acc is: [0.96875]\n", + "epoch: 1, batch_id: 600, loss is: [1.4961376], acc is: [0.984375]\n", + "epoch: 1, batch_id: 700, loss is: [1.4755756], acc is: [1.]\n", + "epoch: 1, batch_id: 800, loss is: [1.4921497], acc is: [0.984375]\n", + "epoch: 1, batch_id: 900, loss is: [1.4944404], acc is: [1.]\n" + ] + } + ], + "source": [ + "import paddle\n", + "train_loader = paddle.io.DataLoader(train_dataset, places=paddle.CPUPlace(), batch_size=64, shuffle=True)\n", + "# 加载训练集 batch_size 设为 64\n", + "def train(model):\n", + " model.train()\n", + " epochs = 2\n", + " optim = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters())\n", + " # 用Adam作为优化函数\n", + " for epoch in range(epochs):\n", + " for batch_id, data in enumerate(train_loader()):\n", + " x_data = data[0]\n", + " y_data = data[1]\n", + " predicts = model(x_data)\n", + " loss = paddle.nn.functional.cross_entropy(predicts, y_data)\n", + " # 计算损失\n", + " acc = paddle.metric.accuracy(predicts, y_data, k=2)\n", + " avg_loss = paddle.mean(loss)\n", + " avg_acc = paddle.mean(acc)\n", + " avg_loss.backward()\n", + " if batch_id % 100 == 0:\n", + " print(\"epoch: {}, batch_id: {}, loss is: {}, acc is: {}\".format(epoch, batch_id, avg_loss.numpy(), avg_acc.numpy()))\n", + " optim.step()\n", + " optim.clear_grad()\n", + "model = LeNet()\n", + "train(model)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 对模型进行验证\n", + "训练完成后,需要验证模型的效果,此时,加载测试数据集,然后用训练好的模对测试集进行预测,计算损失与精度。" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "batch_id: 0, loss is: [1.4915928], acc is: [1.]\n", + "batch_id: 20, loss is: [1.4818308], acc is: [1.]\n", + "batch_id: 40, loss is: [1.5006062], acc is: [0.984375]\n", + "batch_id: 60, loss is: [1.521233], acc is: [1.]\n", + "batch_id: 80, loss is: [1.4772738], acc is: [1.]\n", + "batch_id: 100, loss is: [1.4755945], acc is: [1.]\n", + "batch_id: 120, loss is: [1.4746133], acc is: [1.]\n", + "batch_id: 140, loss is: [1.4786345], acc is: [1.]\n" + ] + } + ], + "source": [ + "import paddle\n", + "test_loader = paddle.io.DataLoader(test_dataset, places=paddle.CPUPlace(), batch_size=64)\n", + "# 加载测试数据集\n", + "def test(model):\n", + " model.eval()\n", + " batch_size = 64\n", + " for batch_id, data in enumerate(test_loader()):\n", + " x_data = data[0]\n", + " y_data = data[1]\n", + " predicts = model(x_data)\n", + " # 获取预测结果\n", + " loss = paddle.nn.functional.cross_entropy(predicts, y_data)\n", + " acc = paddle.metric.accuracy(predicts, y_data, k=2)\n", + " avg_loss = paddle.mean(loss)\n", + " avg_acc = paddle.mean(acc)\n", + " avg_loss.backward()\n", + " if batch_id % 20 == 0:\n", + " print(\"batch_id: {}, loss is: {}, acc is: {}\".format(batch_id, avg_loss.numpy(), avg_acc.numpy()))\n", + "test(model)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 训练方式一结束\n", + "以上就是训练方式一,通过这种方式,可以清楚的看到训练和测试中的每一步过程。但是,这种方式句法比较复杂。因此,我们提供了训练方式二,能够更加快速、高效的完成模型的训练与测试。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3.训练方式二\n", + "通过paddle提供的`Model` 构建实例,使用封装好的训练与测试接口,快速完成模型训练与测试。" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "import paddle\n", + "from paddle.static import InputSpec\n", + "from paddle.metric import Accuracy\n", + "inputs = InputSpec([None, 784], 'float32', 'x')\n", + "labels = InputSpec([None, 10], 'float32', 'x')\n", + "model = paddle.Model(LeNet(), inputs, labels)\n", + "optim = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters())\n", + "\n", + "model.prepare(\n", + " optim,\n", + " paddle.nn.loss.CrossEntropyLoss(),\n", + " Accuracy(topk=(1, 2))\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 使用model.fit来训练模型" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 1/2\n", + "step 200/938 - loss: 1.5219 - acc_top1: 0.9829 - acc_top2: 0.9965 - 14ms/step\n", + "step 400/938 - loss: 1.4765 - acc_top1: 0.9825 - acc_top2: 0.9958 - 13ms/step\n", + "step 600/938 - loss: 1.4624 - acc_top1: 0.9823 - acc_top2: 0.9953 - 13ms/step\n", + "step 800/938 - loss: 1.4768 - acc_top1: 0.9829 - acc_top2: 0.9955 - 13ms/step\n", + "step 938/938 - loss: 1.4612 - acc_top1: 0.9836 - acc_top2: 0.9956 - 13ms/step\n", + "Epoch 2/2\n", + "step 200/938 - loss: 1.4705 - acc_top1: 0.9834 - acc_top2: 0.9959 - 13ms/step\n", + "step 400/938 - loss: 1.4620 - acc_top1: 0.9833 - acc_top2: 0.9960 - 13ms/step\n", + "step 600/938 - loss: 1.4613 - acc_top1: 0.9830 - acc_top2: 0.9960 - 13ms/step\n", + "step 800/938 - loss: 1.4763 - acc_top1: 0.9831 - acc_top2: 0.9960 - 13ms/step\n", + "step 938/938 - loss: 1.4924 - acc_top1: 0.9834 - acc_top2: 0.9959 - 13ms/step\n" + ] + } + ], + "source": [ + "model.fit(train_dataset,\n", + " epochs=2,\n", + " batch_size=64,\n", + " log_freq=200\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 使用model.evaluate来预测模型" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Eval begin...\n", + "step 20/157 - loss: 1.5246 - acc_top1: 0.9773 - acc_top2: 0.9969 - 6ms/step\n", + "step 40/157 - loss: 1.4622 - acc_top1: 0.9758 - acc_top2: 0.9961 - 6ms/step\n", + "step 60/157 - loss: 1.5241 - acc_top1: 0.9763 - acc_top2: 0.9951 - 6ms/step\n", + "step 80/157 - loss: 1.4612 - acc_top1: 0.9787 - acc_top2: 0.9959 - 6ms/step\n", + "step 100/157 - loss: 1.4612 - acc_top1: 0.9823 - acc_top2: 0.9967 - 5ms/step\n", + "step 120/157 - loss: 1.4612 - acc_top1: 0.9835 - acc_top2: 0.9966 - 5ms/step\n", + "step 140/157 - loss: 1.4612 - acc_top1: 0.9844 - acc_top2: 0.9969 - 5ms/step\n", + "step 157/157 - loss: 1.4612 - acc_top1: 0.9838 - acc_top2: 0.9966 - 5ms/step\n", + "Eval samples: 10000\n" + ] + }, + { + "data": { + "text/plain": [ + "{'loss': [1.4611504], 'acc_top1': 0.9838, 'acc_top2': 0.9966}" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model.evaluate(test_dataset, log_freq=20, batch_size=64)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 训练方式二结束\n", + "以上就是训练方式二,可以快速、高效的完成网络模型训练与预测。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 总结\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "以上就是用LeNet对手写数字数据及MNIST进行分类。本示例提供了两种训练模型的方式,一种可以快速完成模型的组建与预测,非常适合新手用户上手。另一种则需要多个步骤来完成模型的训练,适合进阶用户使用。" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/doc/paddle/tutorial/cv_case/mnist_lenet_classification/mnist_lenet_classification.rst b/doc/paddle/tutorial/cv_case/mnist_lenet_classification/mnist_lenet_classification.rst new file mode 100644 index 0000000000000000000000000000000000000000..0ccce2c91e21ba91c49efd95eb9bfae803de7242 --- /dev/null +++ b/doc/paddle/tutorial/cv_case/mnist_lenet_classification/mnist_lenet_classification.rst @@ -0,0 +1,293 @@ +MNIST数据集使用LeNet进行图像分类 +================================ + +本示例教程演示如何在MNIST数据集上用LeNet进行图像分类。 +手写数字的MNIST数据集,包含60,000个用于训练的示例和10,000个用于测试的示例。这些数字已经过尺寸标准化并位于图像中心,图像是固定大小(28x28像素),其值为0到1。该数据集的官方地址为:http://yann.lecun.com/exdb/mnist/ + +环境 +---- + +本教程基于paddle-2.0-beta编写,如果您的环境不是本版本,请先安装paddle-2.0-beta版本。 + +.. code:: ipython3 + + import paddle + print(paddle.__version__) + paddle.disable_static() + # 开启动态图 + + +.. parsed-literal:: + + 2.0.0-beta0 + + +加载数据集 +---------- + +我们使用飞桨自带的paddle.dataset完成mnist数据集的加载。 + +.. code:: ipython3 + + print('download training data and load training data') + train_dataset = paddle.vision.datasets.MNIST(mode='train') + test_dataset = paddle.vision.datasets.MNIST(mode='test') + print('load finished') + + +.. parsed-literal:: + + download training data and load training data + load finished + + +取训练集中的一条数据看一下。 + +.. code:: ipython3 + + import numpy as np + import matplotlib.pyplot as plt + train_data0, train_label_0 = train_dataset[0][0],train_dataset[0][1] + train_data0 = train_data0.reshape([28,28]) + plt.figure(figsize=(2,2)) + plt.imshow(train_data0, cmap=plt.cm.binary) + print('train_data0 label is: ' + str(train_label_0)) + + +.. parsed-literal:: + + train_data0 label is: [5] + + + +.. image:: https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/paddle/tutorial/cv_case/mnist_lenet_classification/mnist_lenet_classification_files/mnist_lenet_classification_001.png?raw=true + + +组网 +---- + +用paddle.nn下的API,如\ ``Conv2d``\ 、\ ``MaxPool2d``\ 、\ ``Linear``\ 完成LeNet的构建。 + +.. code:: ipython3 + + import paddle + import paddle.nn.functional as F + class LeNet(paddle.nn.Layer): + def __init__(self): + super(LeNet, self).__init__() + self.conv1 = paddle.nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5, stride=1, padding=2) + self.max_pool1 = paddle.nn.MaxPool2d(kernel_size=2, stride=2) + self.conv2 = paddle.nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1) + self.max_pool2 = paddle.nn.MaxPool2d(kernel_size=2, stride=2) + self.linear1 = paddle.nn.Linear(in_features=16*5*5, out_features=120) + self.linear2 = paddle.nn.Linear(in_features=120, out_features=84) + self.linear3 = paddle.nn.Linear(in_features=84, out_features=10) + + def forward(self, x): + x = self.conv1(x) + x = F.relu(x) + x = self.max_pool1(x) + x = F.relu(x) + x = self.conv2(x) + x = self.max_pool2(x) + x = paddle.flatten(x, start_axis=1,stop_axis=-1) + x = self.linear1(x) + x = F.relu(x) + x = self.linear2(x) + x = F.relu(x) + x = self.linear3(x) + x = F.softmax(x) + return x + +训练方式一 +---------- + +组网后,开始对模型进行训练,先构建\ ``train_loader``\ ,加载训练数据,然后定义\ ``train``\ 函数,设置好损失函数后,按batch加载数据,完成模型的训练。 + +.. code:: ipython3 + + import paddle + train_loader = paddle.io.DataLoader(train_dataset, places=paddle.CPUPlace(), batch_size=64, shuffle=True) + # 加载训练集 batch_size 设为 64 + def train(model): + model.train() + epochs = 2 + optim = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters()) + # 用Adam作为优化函数 + for epoch in range(epochs): + for batch_id, data in enumerate(train_loader()): + x_data = data[0] + y_data = data[1] + predicts = model(x_data) + loss = paddle.nn.functional.cross_entropy(predicts, y_data) + # 计算损失 + acc = paddle.metric.accuracy(predicts, y_data, k=2) + avg_loss = paddle.mean(loss) + avg_acc = paddle.mean(acc) + avg_loss.backward() + if batch_id % 100 == 0: + print("epoch: {}, batch_id: {}, loss is: {}, acc is: {}".format(epoch, batch_id, avg_loss.numpy(), avg_acc.numpy())) + optim.step() + optim.clear_grad() + model = LeNet() + train(model) + + +.. parsed-literal:: + + epoch: 0, batch_id: 0, loss is: [2.3037894], acc is: [0.140625] + epoch: 0, batch_id: 100, loss is: [1.6175328], acc is: [0.9375] + epoch: 0, batch_id: 200, loss is: [1.5388051], acc is: [0.96875] + epoch: 0, batch_id: 300, loss is: [1.5251061], acc is: [0.96875] + epoch: 0, batch_id: 400, loss is: [1.4678856], acc is: [1.] + epoch: 0, batch_id: 500, loss is: [1.4944503], acc is: [0.984375] + epoch: 0, batch_id: 600, loss is: [1.5365536], acc is: [0.96875] + epoch: 0, batch_id: 700, loss is: [1.4885054], acc is: [0.984375] + epoch: 0, batch_id: 800, loss is: [1.4872254], acc is: [0.984375] + epoch: 0, batch_id: 900, loss is: [1.4884174], acc is: [0.984375] + epoch: 1, batch_id: 0, loss is: [1.4776722], acc is: [1.] + epoch: 1, batch_id: 100, loss is: [1.4751343], acc is: [1.] + epoch: 1, batch_id: 200, loss is: [1.4772581], acc is: [1.] + epoch: 1, batch_id: 300, loss is: [1.4918218], acc is: [0.984375] + epoch: 1, batch_id: 400, loss is: [1.5038397], acc is: [0.96875] + epoch: 1, batch_id: 500, loss is: [1.5088196], acc is: [0.96875] + epoch: 1, batch_id: 600, loss is: [1.4961376], acc is: [0.984375] + epoch: 1, batch_id: 700, loss is: [1.4755756], acc is: [1.] + epoch: 1, batch_id: 800, loss is: [1.4921497], acc is: [0.984375] + epoch: 1, batch_id: 900, loss is: [1.4944404], acc is: [1.] + + +对模型进行验证 +~~~~~~~~~~~~~~ + +训练完成后,需要验证模型的效果,此时,加载测试数据集,然后用训练好的模对测试集进行预测,计算损失与精度。 + +.. code:: ipython3 + + import paddle + test_loader = paddle.io.DataLoader(test_dataset, places=paddle.CPUPlace(), batch_size=64) + # 加载测试数据集 + def test(model): + model.eval() + batch_size = 64 + for batch_id, data in enumerate(test_loader()): + x_data = data[0] + y_data = data[1] + predicts = model(x_data) + # 获取预测结果 + loss = paddle.nn.functional.cross_entropy(predicts, y_data) + acc = paddle.metric.accuracy(predicts, y_data, k=2) + avg_loss = paddle.mean(loss) + avg_acc = paddle.mean(acc) + avg_loss.backward() + if batch_id % 20 == 0: + print("batch_id: {}, loss is: {}, acc is: {}".format(batch_id, avg_loss.numpy(), avg_acc.numpy())) + test(model) + + +.. parsed-literal:: + + batch_id: 0, loss is: [1.4915928], acc is: [1.] + batch_id: 20, loss is: [1.4818308], acc is: [1.] + batch_id: 40, loss is: [1.5006062], acc is: [0.984375] + batch_id: 60, loss is: [1.521233], acc is: [1.] + batch_id: 80, loss is: [1.4772738], acc is: [1.] + batch_id: 100, loss is: [1.4755945], acc is: [1.] + batch_id: 120, loss is: [1.4746133], acc is: [1.] + batch_id: 140, loss is: [1.4786345], acc is: [1.] + + +训练方式一结束 +~~~~~~~~~~~~~~ + +以上就是训练方式一,通过这种方式,可以清楚的看到训练和测试中的每一步过程。但是,这种方式句法比较复杂。因此,我们提供了训练方式二,能够更加快速、高效的完成模型的训练与测试。 + +3.训练方式二 +------------ + +通过paddle提供的\ ``Model`` +构建实例,使用封装好的训练与测试接口,快速完成模型训练与测试。 + +.. code:: ipython3 + + import paddle + from paddle.static import InputSpec + from paddle.metric import Accuracy + inputs = InputSpec([None, 784], 'float32', 'x') + labels = InputSpec([None, 10], 'float32', 'x') + model = paddle.Model(LeNet(), inputs, labels) + optim = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters()) + + model.prepare( + optim, + paddle.nn.loss.CrossEntropyLoss(), + Accuracy(topk=(1, 2)) + ) + +使用model.fit来训练模型 +~~~~~~~~~~~~~~~~~~~~~~~ + +.. code:: ipython3 + + model.fit(train_dataset, + epochs=2, + batch_size=64, + log_freq=200 + ) + + +.. parsed-literal:: + + Epoch 1/2 + step 200/938 - loss: 1.5219 - acc_top1: 0.9829 - acc_top2: 0.9965 - 14ms/step + step 400/938 - loss: 1.4765 - acc_top1: 0.9825 - acc_top2: 0.9958 - 13ms/step + step 600/938 - loss: 1.4624 - acc_top1: 0.9823 - acc_top2: 0.9953 - 13ms/step + step 800/938 - loss: 1.4768 - acc_top1: 0.9829 - acc_top2: 0.9955 - 13ms/step + step 938/938 - loss: 1.4612 - acc_top1: 0.9836 - acc_top2: 0.9956 - 13ms/step + Epoch 2/2 + step 200/938 - loss: 1.4705 - acc_top1: 0.9834 - acc_top2: 0.9959 - 13ms/step + step 400/938 - loss: 1.4620 - acc_top1: 0.9833 - acc_top2: 0.9960 - 13ms/step + step 600/938 - loss: 1.4613 - acc_top1: 0.9830 - acc_top2: 0.9960 - 13ms/step + step 800/938 - loss: 1.4763 - acc_top1: 0.9831 - acc_top2: 0.9960 - 13ms/step + step 938/938 - loss: 1.4924 - acc_top1: 0.9834 - acc_top2: 0.9959 - 13ms/step + + +使用model.evaluate来预测模型 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code:: ipython3 + + model.evaluate(test_dataset, log_freq=20, batch_size=64) + + +.. parsed-literal:: + + Eval begin... + step 20/157 - loss: 1.5246 - acc_top1: 0.9773 - acc_top2: 0.9969 - 6ms/step + step 40/157 - loss: 1.4622 - acc_top1: 0.9758 - acc_top2: 0.9961 - 6ms/step + step 60/157 - loss: 1.5241 - acc_top1: 0.9763 - acc_top2: 0.9951 - 6ms/step + step 80/157 - loss: 1.4612 - acc_top1: 0.9787 - acc_top2: 0.9959 - 6ms/step + step 100/157 - loss: 1.4612 - acc_top1: 0.9823 - acc_top2: 0.9967 - 5ms/step + step 120/157 - loss: 1.4612 - acc_top1: 0.9835 - acc_top2: 0.9966 - 5ms/step + step 140/157 - loss: 1.4612 - acc_top1: 0.9844 - acc_top2: 0.9969 - 5ms/step + step 157/157 - loss: 1.4612 - acc_top1: 0.9838 - acc_top2: 0.9966 - 5ms/step + Eval samples: 10000 + + + + +.. parsed-literal:: + + {'loss': [1.4611504], 'acc_top1': 0.9838, 'acc_top2': 0.9966} + + + +训练方式二结束 +~~~~~~~~~~~~~~ + +以上就是训练方式二,可以快速、高效的完成网络模型训练与预测。 + +总结 +---- + +以上就是用LeNet对手写数字数据及MNIST进行分类。本示例提供了两种训练模型的方式,一种可以快速完成模型的组建与预测,非常适合新手用户上手。另一种则需要多个步骤来完成模型的训练,适合进阶用户使用。 diff --git a/doc/paddle/tutorial/cv_case/mnist_lenet_classification/mnist_lenet_classification_files/mnist_lenet_classification_001.png b/doc/paddle/tutorial/cv_case/mnist_lenet_classification/mnist_lenet_classification_files/mnist_lenet_classification_001.png new file mode 100644 index 0000000000000000000000000000000000000000..91327f435ad51a582cc577c32fcaef3126e383b6 Binary files /dev/null and b/doc/paddle/tutorial/cv_case/mnist_lenet_classification/mnist_lenet_classification_files/mnist_lenet_classification_001.png differ diff --git a/doc/paddle/tutorial/index_cn.rst b/doc/paddle/tutorial/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2daf66c34ae9ad0edb60ad5eef6c5516d69dc72b --- /dev/null +++ b/doc/paddle/tutorial/index_cn.rst @@ -0,0 +1,23 @@ +######## +应用实践 +######## + +如果您已经初步了解了PaddlePaddle,期望可以针对实际问题建模、搭建自己网络,本模块提供了一些 Paddle 的具体典型案例供您参考: + +本章文档将指导您如何使用PaddlePaddle完成基础的深度学习任务 + +本章文档涉及大量了深度学习基础知识,也介绍了如何使用PaddlePaddle实现这些内容,请参阅以下说明了解如何使用: + + +**内容简介** + + - `快速上手 <./simple_case/index_cn.html>`_ :快速了解Paddle 2的特性与功能。 + - `计算机视觉 <./cv_case/index_cn.html>`_ :介绍使用 Paddle 解决计算机视觉领域的案例 + - `自然语言处理 <./nlp_case/index_cn.html>`_ :介绍使用 Paddle 解决自然语言处理领域的案例 + +.. toctree:: + :hidden: + + quick_start/index_cn.rst + cv_case/index_cn.rst + nlp_case/index_cn.rst diff --git a/doc/paddle/tutorial/nlp_case/imdb_bow_classification/imdb_bow_classification.ipynb b/doc/paddle/tutorial/nlp_case/imdb_bow_classification/imdb_bow_classification.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..87dbce940c4639f024b9f9cc20428642a97f1989 --- /dev/null +++ b/doc/paddle/tutorial/nlp_case/imdb_bow_classification/imdb_bow_classification.ipynb @@ -0,0 +1,377 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# IMDB 数据集使用BOW网络的文本分类\n", + "\n", + "本示例教程演示如何在IMDB数据集上用简单的BOW网络完成文本分类的任务。\n", + "\n", + "IMDB数据集是一个对电影评论标注为正向评论与负向评论的数据集,共有25000条文本数据作为训练集,25000条文本数据作为测试集。\n", + "该数据集的官方地址为: http://ai.stanford.edu/~amaas/data/sentiment/" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 环境设置\n", + "\n", + "本示例基于飞桨开源框架2.0版本。" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2.0.0-beta0\n" + ] + } + ], + "source": [ + "import paddle\n", + "import numpy as np\n", + "\n", + "paddle.disable_static()\n", + "print(paddle.__version__)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 加载数据\n", + "\n", + "我们会使用`paddle.dataset`完成数据下载,构建字典和准备数据读取器。在飞桨2.0版本中,推荐使用padding的方式来对同一个batch中长度不一的数据进行补齐,所以在字典中,我们还会添加一个特殊的``词,用来在后续对batch中较短的句子进行填充。" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loading IMDB word dict....\n" + ] + } + ], + "source": [ + "print(\"Loading IMDB word dict....\")\n", + "word_dict = paddle.dataset.imdb.word_dict()\n", + "\n", + "train_reader = paddle.dataset.imdb.train(word_dict)\n", + "test_reader = paddle.dataset.imdb.test(word_dict)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "the:0\n", + "and:1\n", + "a:2\n", + "of:3\n", + "to:4\n", + "...\n", + "virtual:5143\n", + "warriors:5144\n", + "widely:5145\n", + ":5146\n", + ":5147\n", + "totally 5148 words\n" + ] + } + ], + "source": [ + "# add a pad token to the dict for later padding the sequence\n", + "word_dict[''] = len(word_dict)\n", + "\n", + "for k in list(word_dict)[:5]:\n", + " print(\"{}:{}\".format(k.decode('ASCII'), word_dict[k]))\n", + "\n", + "print(\"...\")\n", + "\n", + "for k in list(word_dict)[-5:]:\n", + " print(\"{}:{}\".format(k if isinstance(k, str) else k.decode('ASCII'), word_dict[k]))\n", + "\n", + "print(\"totally {} words\".format(len(word_dict)))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 参数设置\n", + "\n", + "在这里我们设置一下词表大小,`embedding`的大小,batch_size,等等" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [], + "source": [ + "vocab_size = len(word_dict)\n", + "emb_size = 256\n", + "seq_len = 200\n", + "batch_size = 32\n", + "epoch_num = 2\n", + "pad_id = word_dict['']\n", + "\n", + "classes = ['negative', 'positive']\n", + "\n", + "def ids_to_str(ids):\n", + " #print(ids)\n", + " words = []\n", + " for k in ids:\n", + " w = list(word_dict)[k]\n", + " words.append(w if isinstance(w, str) else w.decode('ASCII'))\n", + " return \" \".join(words)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "在这里,取出一条数据打印出来看看,可以对数据有一个初步直观的印象。" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[5146, 43, 71, 6, 1092, 14, 0, 878, 130, 151, 5146, 18, 281, 747, 0, 5146, 3, 5146, 2165, 37, 5146, 46, 5, 71, 4089, 377, 162, 46, 5, 32, 1287, 300, 35, 203, 2136, 565, 14, 2, 253, 26, 146, 61, 372, 1, 615, 5146, 5, 30, 0, 50, 3290, 6, 2148, 14, 0, 5146, 11, 17, 451, 24, 4, 127, 10, 0, 878, 130, 43, 2, 50, 5146, 751, 5146, 5, 2, 221, 3727, 6, 9, 1167, 373, 9, 5, 5146, 7, 5, 1343, 13, 2, 5146, 1, 250, 7, 98, 4270, 56, 2316, 0, 928, 11, 11, 9, 16, 5, 5146, 5146, 6, 50, 69, 27, 280, 27, 108, 1045, 0, 2633, 4177, 3180, 17, 1675, 1, 2571] 0\n", + " has much in common with the third man another film set among the of europe like there is much inventive camera work there is an innocent american who gets emotionally involved with a woman he doesnt really understand and whose is all the more striking in contrast with the br but id have to say that the third man has a more storyline is a bit disjointed in this respect perhaps this is it is presented as a and making it too coherent would spoil the effect br br this movie is in more than one sense one never sees the sun shine grim but intriguing and frightening\n", + "negative\n" + ] + } + ], + "source": [ + "# 取出来第一条数据看看样子。\n", + "sent, label = next(train_reader())\n", + "print(sent, label)\n", + "\n", + "print(ids_to_str(sent))\n", + "print(classes[label])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 用padding的方式对齐数据\n", + "\n", + "文本数据中,每一句话的长度都是不一样的,为了方便后续的神经网络的计算,常见的处理方式是把数据集中的数据都统一成同样长度的数据。这包括:对于较长的数据进行截断处理,对于较短的数据用特殊的词``进行填充。接下来的代码会对数据集中的数据进行这样的处理。" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(25000, 200)\n", + "(25000, 1)\n", + "(25000, 200)\n", + "(25000, 1)\n", + " has much in common with the third man another film set among the of europe like there is much inventive camera work there is an innocent american who gets emotionally involved with a woman he doesnt really understand and whose is all the more striking in contrast with the br but id have to say that the third man has a more storyline is a bit disjointed in this respect perhaps this is it is presented as a and making it too coherent would spoil the effect br br this movie is in more than one sense one never sees the sun shine grim but intriguing and frightening \n", + " is the most original movie ive seen in years if you like unique thrillers that are influenced by film noir then this is just the right cure for all of those hollywood summer the theaters these days von like breaking the waves have gotten more but this is really his best work it is without being distracting and offers the perfect combination of suspense and dark humor its too bad he decided cameras were the wave of the future its hard to say who talked him away from the style he here but its everyones loss that he went into his heavily direction instead \n", + " von is never in trying out new techniques some of them are very original while others are best br he depicts germany as a train journey with so many cities lying in ruins a young american of german descent feels to help in their it is not a simple task as he quickly finds outbr br his uncle finds him a job as a night on the line his job is to to the needs of the passengers when the shoes are a mark is made on the a terrible argument when a passengers shoes are not despite the fact they have been there are many to the german of to such stupid br the journey is like an mans through life with all its and in one sequence through the back to discover them filled with bodies appearing to have just escaped from these images horrible as they are are as in a dream each with its own terrible impact yet br\n" + ] + } + ], + "source": [ + "def create_padded_dataset(reader):\n", + " padded_sents = []\n", + " labels = []\n", + " for batch_id, data in enumerate(reader):\n", + " sent, label = data\n", + " padded_sent = sent[:seq_len] + [pad_id] * (seq_len - len(sent))\n", + " padded_sents.append(padded_sent)\n", + " labels.append(label)\n", + " return np.array(padded_sents), np.expand_dims(np.array(labels), axis=1)\n", + "\n", + "train_sents, train_labels = create_padded_dataset(train_reader())\n", + "test_sents, test_labels = create_padded_dataset(test_reader())\n", + "\n", + "print(train_sents.shape)\n", + "print(train_labels.shape)\n", + "print(test_sents.shape)\n", + "print(test_labels.shape)\n", + "\n", + "for sent in train_sents[:3]:\n", + " print(ids_to_str(sent))\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 组建网络\n", + "\n", + "本示例中,我们将会使用一个不考虑词的顺序的BOW的网络,在查找到每个词对应的embedding后,简单的取平均,作为一个句子的表示。然后用`Linear`进行线性变换。为了防止过拟合,我们还使用了`Dropout`。" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [], + "source": [ + "class MyNet(paddle.nn.Layer):\n", + " def __init__(self):\n", + " super(MyNet, self).__init__()\n", + " self.emb = paddle.nn.Embedding(vocab_size, emb_size)\n", + " self.fc = paddle.nn.Linear(in_features=emb_size, out_features=2)\n", + " self.dropout = paddle.nn.Dropout(0.5)\n", + "\n", + " def forward(self, x):\n", + " x = self.emb(x)\n", + " x = paddle.reduce_mean(x, dim=1)\n", + " x = self.dropout(x)\n", + " x = self.fc(x)\n", + " return x" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 开始模型的训练\n" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "epoch: 0, batch_id: 0, loss is: [0.6918494]\n", + "epoch: 0, batch_id: 500, loss is: [0.33142853]\n", + "[validation] accuracy/loss: 0.8506321907043457/0.3620821535587311\n", + "epoch: 1, batch_id: 0, loss is: [0.37161]\n", + "epoch: 1, batch_id: 500, loss is: [0.2296829]\n", + "[validation] accuracy/loss: 0.8622759580612183/0.3286365270614624\n" + ] + } + ], + "source": [ + "def train(model):\n", + " model.train()\n", + "\n", + " opt = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters())\n", + "\n", + " for epoch in range(epoch_num):\n", + " # shuffle data\n", + " perm = np.random.permutation(len(train_sents))\n", + " train_sents_shuffled = train_sents[perm]\n", + " train_labels_shuffled = train_labels[perm]\n", + " \n", + " for batch_id in range(len(train_sents_shuffled) // batch_size):\n", + " x_data = train_sents_shuffled[(batch_id * batch_size):((batch_id+1)*batch_size)]\n", + " y_data = train_labels_shuffled[(batch_id * batch_size):((batch_id+1)*batch_size)]\n", + " \n", + " sent = paddle.to_tensor(x_data)\n", + " label = paddle.to_tensor(y_data)\n", + " \n", + " logits = model(sent)\n", + " loss = paddle.nn.functional.softmax_with_cross_entropy(logits, label)\n", + " \n", + " avg_loss = paddle.mean(loss)\n", + " if batch_id % 500 == 0:\n", + " print(\"epoch: {}, batch_id: {}, loss is: {}\".format(epoch, batch_id, avg_loss.numpy()))\n", + " avg_loss.backward()\n", + " opt.step()\n", + " opt.clear_grad()\n", + "\n", + " # evaluate model after one epoch\n", + " model.eval()\n", + " accuracies = []\n", + " losses = []\n", + " for batch_id in range(len(test_sents) // batch_size):\n", + " x_data = test_sents[(batch_id * batch_size):((batch_id+1)*batch_size)]\n", + " y_data = test_labels[(batch_id * batch_size):((batch_id+1)*batch_size)]\n", + " \n", + " sent = paddle.to_tensor(x_data)\n", + " label = paddle.to_tensor(y_data)\n", + "\n", + " logits = model(sent)\n", + " loss = paddle.nn.functional.softmax_with_cross_entropy(logits, label)\n", + " acc = paddle.metric.accuracy(logits, label)\n", + " \n", + " accuracies.append(acc.numpy())\n", + " losses.append(loss.numpy())\n", + " \n", + " avg_acc, avg_loss = np.mean(accuracies), np.mean(losses)\n", + " print(\"[validation] accuracy/loss: {}/{}\".format(avg_acc, avg_loss))\n", + " \n", + " model.train()\n", + " \n", + "model = MyNet()\n", + "train(model)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The End\n", + "\n", + "可以看到,在这个数据集上,经过两轮的迭代可以得到86%左右的准确率。你也可以通过调整网络结构和超参数,来获得更好的效果。" + ] + } + ], + "metadata": { + "colab": { + "name": "cifar-10-cnn.ipynb", + "private_outputs": true, + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/doc/paddle/tutorial/nlp_case/imdb_bow_classification/imdb_bow_classification.rst b/doc/paddle/tutorial/nlp_case/imdb_bow_classification/imdb_bow_classification.rst new file mode 100644 index 0000000000000000000000000000000000000000..71d60426a0e22fd47a4ba475e95a4a2c2ae8e12f --- /dev/null +++ b/doc/paddle/tutorial/nlp_case/imdb_bow_classification/imdb_bow_classification.rst @@ -0,0 +1,258 @@ +IMDB 数据集使用BOW网络的文本分类 +================================ + +本示例教程演示如何在IMDB数据集上用简单的BOW网络完成文本分类的任务。 + +IMDB数据集是一个对电影评论标注为正向评论与负向评论的数据集,共有25000条文本数据作为训练集,25000条文本数据作为测试集。 +该数据集的官方地址为: http://ai.stanford.edu/~amaas/data/sentiment/ + +环境设置 +-------- + +本示例基于飞桨开源框架2.0版本。 + +.. code:: ipython3 + + import paddle + import numpy as np + + paddle.disable_static() + print(paddle.__version__) + + +.. parsed-literal:: + + 2.0.0-beta0 + + +加载数据 +-------- + +我们会使用\ ``paddle.dataset``\ 完成数据下载,构建字典和准备数据读取器。在飞桨框架2.0版本中,推荐使用padding的方式来对同一个batch中长度不一的数据进行补齐,所以在字典中,我们还会添加一个特殊的\ ````\ 词,用来在后续对batch中较短的句子进行填充。 + +.. code:: ipython3 + + print("Loading IMDB word dict....") + word_dict = paddle.dataset.imdb.word_dict() + + train_reader = paddle.dataset.imdb.train(word_dict) + test_reader = paddle.dataset.imdb.test(word_dict) + + + +.. parsed-literal:: + + Loading IMDB word dict.... + + +.. code:: ipython3 + + # add a pad token to the dict for later padding the sequence + word_dict[''] = len(word_dict) + + for k in list(word_dict)[:5]: + print("{}:{}".format(k.decode('ASCII'), word_dict[k])) + + print("...") + + for k in list(word_dict)[-5:]: + print("{}:{}".format(k if isinstance(k, str) else k.decode('ASCII'), word_dict[k])) + + print("totally {} words".format(len(word_dict))) + + +.. parsed-literal:: + + the:0 + and:1 + a:2 + of:3 + to:4 + ... + virtual:5143 + warriors:5144 + widely:5145 + :5146 + :5147 + totally 5148 words + + +参数设置 +-------- + +在这里我们设置一下词表大小,\ ``embedding``\ 的大小,batch_size,等等 + +.. code:: ipython3 + + vocab_size = len(word_dict) + emb_size = 256 + seq_len = 200 + batch_size = 32 + epoch_num = 2 + pad_id = word_dict[''] + + classes = ['negative', 'positive'] + + def ids_to_str(ids): + #print(ids) + words = [] + for k in ids: + w = list(word_dict)[k] + words.append(w if isinstance(w, str) else w.decode('ASCII')) + return " ".join(words) + +在这里,取出一条数据打印出来看看,可以对数据有一个初步直观的印象。 + +.. code:: ipython3 + + # 取出来第一条数据看看样子。 + sent, label = next(train_reader()) + print(sent, label) + + print(ids_to_str(sent)) + print(classes[label]) + + +.. parsed-literal:: + + [5146, 43, 71, 6, 1092, 14, 0, 878, 130, 151, 5146, 18, 281, 747, 0, 5146, 3, 5146, 2165, 37, 5146, 46, 5, 71, 4089, 377, 162, 46, 5, 32, 1287, 300, 35, 203, 2136, 565, 14, 2, 253, 26, 146, 61, 372, 1, 615, 5146, 5, 30, 0, 50, 3290, 6, 2148, 14, 0, 5146, 11, 17, 451, 24, 4, 127, 10, 0, 878, 130, 43, 2, 50, 5146, 751, 5146, 5, 2, 221, 3727, 6, 9, 1167, 373, 9, 5, 5146, 7, 5, 1343, 13, 2, 5146, 1, 250, 7, 98, 4270, 56, 2316, 0, 928, 11, 11, 9, 16, 5, 5146, 5146, 6, 50, 69, 27, 280, 27, 108, 1045, 0, 2633, 4177, 3180, 17, 1675, 1, 2571] 0 + has much in common with the third man another film set among the of europe like there is much inventive camera work there is an innocent american who gets emotionally involved with a woman he doesnt really understand and whose is all the more striking in contrast with the br but id have to say that the third man has a more storyline is a bit disjointed in this respect perhaps this is it is presented as a and making it too coherent would spoil the effect br br this movie is in more than one sense one never sees the sun shine grim but intriguing and frightening + negative + + +用padding的方式对齐数据 +----------------------- + +文本数据中,每一句话的长度都是不一样的,为了方便后续的神经网络的计算,常见的处理方式是把数据集中的数据都统一成同样长度的数据。这包括:对于较长的数据进行截断处理,对于较短的数据用特殊的词\ ````\ 进行填充。接下来的代码会对数据集中的数据进行这样的处理。 + +.. code:: ipython3 + + def create_padded_dataset(reader): + padded_sents = [] + labels = [] + for batch_id, data in enumerate(reader): + sent, label = data + padded_sent = sent[:seq_len] + [pad_id] * (seq_len - len(sent)) + padded_sents.append(padded_sent) + labels.append(label) + return np.array(padded_sents), np.expand_dims(np.array(labels), axis=1) + + train_sents, train_labels = create_padded_dataset(train_reader()) + test_sents, test_labels = create_padded_dataset(test_reader()) + + print(train_sents.shape) + print(train_labels.shape) + print(test_sents.shape) + print(test_labels.shape) + + for sent in train_sents[:3]: + print(ids_to_str(sent)) + + + +.. parsed-literal:: + + (25000, 200) + (25000, 1) + (25000, 200) + (25000, 1) + has much in common with the third man another film set among the of europe like there is much inventive camera work there is an innocent american who gets emotionally involved with a woman he doesnt really understand and whose is all the more striking in contrast with the br but id have to say that the third man has a more storyline is a bit disjointed in this respect perhaps this is it is presented as a and making it too coherent would spoil the effect br br this movie is in more than one sense one never sees the sun shine grim but intriguing and frightening + is the most original movie ive seen in years if you like unique thrillers that are influenced by film noir then this is just the right cure for all of those hollywood summer the theaters these days von like breaking the waves have gotten more but this is really his best work it is without being distracting and offers the perfect combination of suspense and dark humor its too bad he decided cameras were the wave of the future its hard to say who talked him away from the style he here but its everyones loss that he went into his heavily direction instead + von is never in trying out new techniques some of them are very original while others are best br he depicts germany as a train journey with so many cities lying in ruins a young american of german descent feels to help in their it is not a simple task as he quickly finds outbr br his uncle finds him a job as a night on the line his job is to to the needs of the passengers when the shoes are a mark is made on the a terrible argument when a passengers shoes are not despite the fact they have been there are many to the german of to such stupid br the journey is like an mans through life with all its and in one sequence through the back to discover them filled with bodies appearing to have just escaped from these images horrible as they are are as in a dream each with its own terrible impact yet br + + +组建网络 +-------- + +本示例中,我们将会使用一个不考虑词的顺序的BOW的网络,在查找到每个词对应的embedding后,简单的取平均,作为一个句子的表示。然后用\ ``Linear``\ 进行线性变换。为了防止过拟合,我们还使用了\ ``Dropout``\ 。 + +.. code:: ipython3 + + class MyNet(paddle.nn.Layer): + def __init__(self): + super(MyNet, self).__init__() + self.emb = paddle.nn.Embedding(vocab_size, emb_size) + self.fc = paddle.nn.Linear(in_features=emb_size, out_features=2) + self.dropout = paddle.nn.Dropout(0.5) + + def forward(self, x): + x = self.emb(x) + x = paddle.reduce_mean(x, dim=1) + x = self.dropout(x) + x = self.fc(x) + return x + +开始模型的训练 +-------------- + +.. code:: ipython3 + + def train(model): + model.train() + + opt = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters()) + + for epoch in range(epoch_num): + # shuffle data + perm = np.random.permutation(len(train_sents)) + train_sents_shuffled = train_sents[perm] + train_labels_shuffled = train_labels[perm] + + for batch_id in range(len(train_sents_shuffled) // batch_size): + x_data = train_sents_shuffled[(batch_id * batch_size):((batch_id+1)*batch_size)] + y_data = train_labels_shuffled[(batch_id * batch_size):((batch_id+1)*batch_size)] + + sent = paddle.to_tensor(x_data) + label = paddle.to_tensor(y_data) + + logits = model(sent) + loss = paddle.nn.functional.softmax_with_cross_entropy(logits, label) + + avg_loss = paddle.mean(loss) + if batch_id % 500 == 0: + print("epoch: {}, batch_id: {}, loss is: {}".format(epoch, batch_id, avg_loss.numpy())) + avg_loss.backward() + opt.step() + opt.clear_grad() + + # evaluate model after one epoch + model.eval() + accuracies = [] + losses = [] + for batch_id in range(len(test_sents) // batch_size): + x_data = test_sents[(batch_id * batch_size):((batch_id+1)*batch_size)] + y_data = test_labels[(batch_id * batch_size):((batch_id+1)*batch_size)] + + sent = paddle.to_tensor(x_data) + label = paddle.to_tensor(y_data) + + logits = model(sent) + loss = paddle.nn.functional.softmax_with_cross_entropy(logits, label) + acc = paddle.metric.accuracy(logits, label) + + accuracies.append(acc.numpy()) + losses.append(loss.numpy()) + + avg_acc, avg_loss = np.mean(accuracies), np.mean(losses) + print("[validation] accuracy/loss: {}/{}".format(avg_acc, avg_loss)) + + model.train() + + model = MyNet() + train(model) + + +.. parsed-literal:: + + epoch: 0, batch_id: 0, loss is: [0.6918494] + epoch: 0, batch_id: 500, loss is: [0.33142853] + [validation] accuracy/loss: 0.8506321907043457/0.3620821535587311 + epoch: 1, batch_id: 0, loss is: [0.37161] + epoch: 1, batch_id: 500, loss is: [0.2296829] + [validation] accuracy/loss: 0.8622759580612183/0.3286365270614624 + + +The End +------- + +可以看到,在这个数据集上,经过两轮的迭代可以得到86%左右的准确率。你也可以通过调整网络结构和超参数,来获得更好的效果。 diff --git a/doc/paddle/tutorial/nlp_case/index_cn.rst b/doc/paddle/tutorial/nlp_case/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e93096215a1e3791a141be2392028f313087a219 --- /dev/null +++ b/doc/paddle/tutorial/nlp_case/index_cn.rst @@ -0,0 +1,19 @@ +################ +自然语言处理 +################ + + +在这里PaddlePaddle为大家提供了一篇nlp的教程供大家学习: + + - `N-Gram <./n_gram_model/n_gram_model.html>`_ :介绍使用 Paddle 实现N-Gram 模型。 + - `文本分类 <./imdb_bow_classification/imdb_bow_classification.html>`_ :介绍使用 Paddle 在IMDB数据集上完成文本分类。 + - `文本翻译 <./seq2seq_with_attention/seq2seq_with_attention.html>`_ :介绍使用 Paddle 实现文本翻译。 + + +.. toctree:: + :hidden: + :titlesonly: + + n_gram_model/n_gram_model.rst + imdb_bow_classification/imdb_bow_classification.rst + seq2seq_with_attention/seq2seq_with_attention.rst diff --git a/doc/paddle/tutorial/nlp_case/n_gram_model/n_gram_model.ipynb b/doc/paddle/tutorial/nlp_case/n_gram_model/n_gram_model.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..cd8e18f41f4ae059d09185762d0aa2d78ff5cdfe --- /dev/null +++ b/doc/paddle/tutorial/nlp_case/n_gram_model/n_gram_model.ipynb @@ -0,0 +1,428 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "# 用N-Gram模型在莎士比亚文集中训练word embedding\n", + "N-gram 是计算机语言学和概率论范畴内的概念,是指给定的一段文本中N个项目的序列。\n", + "N=1 时 N-gram 又称为 unigram,N=2 称为 bigram,N=3 称为 trigram,以此类推。实际应用通常采用 bigram 和 trigram 进行计算。\n", + "本示例在莎士比亚文集上实现了trigram。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 环境\n", + "本教程基于paddle-2.0-beta编写,如果您的环境不是本版本,请先安装paddle-2.0-beta。" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'2.0.0-beta0'" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import paddle\n", + "paddle.__version__" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 数据集&&相关参数\n", + "训练数据集采用了莎士比亚文集,[下载](https://ocw.mit.edu/ans7870/6/6.006/s08/lecturenotes/files/t8.shakespeare.txt),保存为txt格式即可。
\n", + "context_size设为2,意味着是trigram。embedding_dim设为256。" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--2020-09-12 13:49:29-- https://ocw.mit.edu/ans7870/6/6.006/s08/lecturenotes/files/t8.shakespeare.txt\n", + "正在连接 172.19.57.45:3128... 已连接。\n", + "已发出 Proxy 请求,正在等待回应... 200 OK\n", + "长度:5458199 (5.2M) [text/plain]\n", + "正在保存至: “t8.shakespeare.txt”\n", + "\n", + "t8.shakespeare.txt 100%[===================>] 5.21M 2.01MB/s 用时 2.6s \n", + "\n", + "2020-09-12 13:49:33 (2.01 MB/s) - 已保存 “t8.shakespeare.txt” [5458199/5458199])\n", + "\n" + ] + } + ], + "source": [ + "!wget https://ocw.mit.edu/ans7870/6/6.006/s08/lecturenotes/files/t8.shakespeare.txt" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "embedding_dim = 256\n", + "context_size = 2" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Length of text: 5458199 characters\n" + ] + } + ], + "source": [ + "# 文件路径\n", + "path_to_file = './t8.shakespeare.txt'\n", + "test_sentence = open(path_to_file, 'rb').read().decode(encoding='utf-8')\n", + "\n", + "# 文本长度是指文本中的字符个数\n", + "print ('Length of text: {} characters'.format(len(test_sentence)))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 去除标点符号\n", + "因为标点符号本身无实际意义,用`string`库中的punctuation,完成英文符号的替换。" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'!': '', '\"': '', '#': '', '$': '', '%': '', '&': '', \"'\": '', '(': '', ')': '', '*': '', '+': '', ',': '', '-': '', '.': '', '/': '', ':': '', ';': '', '<': '', '=': '', '>': '', '?': '', '@': '', '[': '', '\\\\': '', ']': '', '^': '', '_': '', '`': '', '{': '', '|': '', '}': '', '~': ''}\n" + ] + } + ], + "source": [ + "from string import punctuation\n", + "process_dicts={i:'' for i in punctuation}\n", + "print(process_dicts)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "28343\n" + ] + } + ], + "source": [ + "punc_table = str.maketrans(process_dicts)\n", + "test_sentence = test_sentence.translate(punc_table)\n", + "test_sentence = test_sentence.lower().split()\n", + "vocab = set(test_sentence)\n", + "print(len(vocab))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 数据预处理\n", + "将文本被拆成了元组的形式,格式为(('第一个词', '第二个词'), '第三个词');其中,第三个词就是我们的目标。" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[[['this', 'is'], 'the'], [['is', 'the'], '100th'], [['the', '100th'], 'etext']]\n" + ] + } + ], + "source": [ + "trigram = [[[test_sentence[i], test_sentence[i + 1]], test_sentence[i + 2]]\n", + " for i in range(len(test_sentence) - 2)]\n", + "\n", + "word_to_idx = {word: i for i, word in enumerate(vocab)}\n", + "idx_to_word = {word_to_idx[word]: word for word in word_to_idx}\n", + "# 看一下数据集\n", + "print(trigram[:3])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 构建`Dataset`类 加载数据\n", + "用`paddle.io.Dataset`构建数据集,然后作为参数传入到`paddle.io.DataLoader`,完成数据集的加载。" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "import paddle\n", + "import numpy as np\n", + "batch_size = 256\n", + "paddle.disable_static()\n", + "class TrainDataset(paddle.io.Dataset):\n", + " def __init__(self, tuple_data):\n", + " self.tuple_data = tuple_data\n", + "\n", + " def __getitem__(self, idx):\n", + " data = self.tuple_data[idx][0]\n", + " label = self.tuple_data[idx][1]\n", + " data = np.array(list(map(lambda w: word_to_idx[w], data)))\n", + " label = np.array(word_to_idx[label])\n", + " return data, label\n", + " \n", + " def __len__(self):\n", + " return len(self.tuple_data)\n", + "train_dataset = TrainDataset(trigram)\n", + "train_loader = paddle.io.DataLoader(train_dataset,places=paddle.CPUPlace(), return_list=True,\n", + " shuffle=True, batch_size=batch_size, drop_last=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 组网&训练\n", + "这里用paddle动态图的方式组网。为了构建Trigram模型,用一层 `Embedding` 与两层 `Linear` 完成构建。`Embedding` 层对输入的前两个单词embedding,然后输入到后面的两个`Linear`层中,完成特征提取。" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [], + "source": [ + "import paddle\n", + "import numpy as np\n", + "import paddle.nn.functional as F\n", + "hidden_size = 1024\n", + "class NGramModel(paddle.nn.Layer):\n", + " def __init__(self, vocab_size, embedding_dim, context_size):\n", + " super(NGramModel, self).__init__()\n", + " self.embedding = paddle.nn.Embedding(num_embeddings=vocab_size, embedding_dim=embedding_dim)\n", + " self.linear1 = paddle.nn.Linear(context_size * embedding_dim, hidden_size)\n", + " self.linear2 = paddle.nn.Linear(hidden_size, len(vocab))\n", + "\n", + " def forward(self, x):\n", + " x = self.embedding(x)\n", + " x = paddle.reshape(x, [-1, context_size * embedding_dim])\n", + " x = self.linear1(x)\n", + " x = F.relu(x)\n", + " x = self.linear2(x)\n", + " return x" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 定义`train()`函数,对模型进行训练。" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "epoch: 0, batch_id: 0, loss is: [10.252176]\n", + "epoch: 0, batch_id: 500, loss is: [6.6429553]\n", + "epoch: 0, batch_id: 1000, loss is: [6.801544]\n", + "epoch: 0, batch_id: 1500, loss is: [6.7114644]\n", + "epoch: 0, batch_id: 2000, loss is: [6.628998]\n", + "epoch: 0, batch_id: 2500, loss is: [6.511376]\n", + "epoch: 0, batch_id: 3000, loss is: [6.878798]\n", + "epoch: 0, batch_id: 3500, loss is: [6.8752203]\n", + "epoch: 1, batch_id: 0, loss is: [6.5908413]\n", + "epoch: 1, batch_id: 500, loss is: [6.9765778]\n", + "epoch: 1, batch_id: 1000, loss is: [6.603841]\n", + "epoch: 1, batch_id: 1500, loss is: [6.9935036]\n", + "epoch: 1, batch_id: 2000, loss is: [6.751287]\n", + "epoch: 1, batch_id: 2500, loss is: [7.1222277]\n", + "epoch: 1, batch_id: 3000, loss is: [6.6431484]\n", + "epoch: 1, batch_id: 3500, loss is: [6.6024966]\n" + ] + } + ], + "source": [ + "import paddle.nn.functional as F\n", + "vocab_size = len(vocab)\n", + "epochs = 2\n", + "losses = []\n", + "def train(model):\n", + " model.train()\n", + " optim = paddle.optimizer.Adam(learning_rate=0.01, parameters=model.parameters())\n", + " for epoch in range(epochs):\n", + " for batch_id, data in enumerate(train_loader()):\n", + " x_data = data[0]\n", + " y_data = data[1]\n", + " predicts = model(x_data)\n", + " y_data = paddle.reshape(y_data, shape=[-1, 1])\n", + " loss = F.softmax_with_cross_entropy(predicts, y_data)\n", + " avg_loss = paddle.mean(loss)\n", + " avg_loss.backward()\n", + " if batch_id % 500 == 0:\n", + " losses.append(avg_loss.numpy())\n", + " print(\"epoch: {}, batch_id: {}, loss is: {}\".format(epoch, batch_id, avg_loss.numpy())) \n", + " optim.step()\n", + " optim.clear_grad()\n", + "model = NGramModel(vocab_size, embedding_dim, context_size)\n", + "train(model)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 打印loss下降曲线\n", + "通过可视化loss的曲线,可以看到模型训练的效果。" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[]" + ] + }, + "execution_count": 32, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXoAAAD4CAYAAADiry33AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+j8jraAAAgAElEQVR4nO3deXxU9b3/8dcn67AlYQlJ2F0QRVDQCNHq7aK2ar2irW3BfQG0tdVut623rf1da3vb6217a/XqRbAuCNparbZalaqtVmUJm4KIguwSCBASAknI8vn9MRMaYlCYmeQMZ97PxyOPzJxzMufDkvec+ZzvOV9zd0REJLwygi5AREQ6l4JeRCTkFPQiIiGnoBcRCTkFvYhIyGUFXUB7/fr182HDhgVdhojIYWXhwoXb3L2wo3UpF/TDhg2jvLw86DJERA4rZrbuQOvUuhERCTkFvYhIyCnoRURCTkEvIhJyCnoRkZBT0IuIhJyCXkQk5EIT9Jt21vHL51eydtvuoEsREUkpoQn6nXv2cseLq1ixuSboUkREUkpogr44LwLA5ur6gCsREUktoQn6Pj1yyMnMYEuNgl5EpK3QBL2ZUZSfS4WCXkRkP6EJeoi2byrUuhER2U+ogr4oL6IjehGRdkIV9CX50SN6dw+6FBGRlBGqoC/Ki9DQ1EJ1XWPQpYiIpIxQBX1xfnSIpdo3IiL/FK6gj42l1wlZEZF/+sigN7P7zGyrmS1rs6yPmc0xs3dj33sf4GevjG3zrpldmczCO7LviF5BLyKyz8Ec0d8PnNNu2feAF9x9OPBC7Pl+zKwP8CNgPDAO+NGB3hCSpX8vtW5ERNr7yKB395eBHe0WTwAeiD1+ALiwgx/9DDDH3Xe4exUwhw++YSRVTlYG/Xrm6OpYEZE24u3RF7n75tjjCqCog20GAhvaPN8YW9apinTRlIjIfhI+GevRQesJDVw3s6lmVm5m5ZWVlQnVU5If0Y3NRETaiDfot5hZCUDs+9YOttkEDG7zfFBs2Qe4+zR3L3X30sLCwjhLiirKi6h1IyLSRrxB/xTQOormSuDJDrZ5Dvi0mfWOnYT9dGxZpyrOi1C1p5H6xubO3pWIyGHhYIZXzgZeB0aY2UYzuxb4GXC2mb0LnBV7jpmVmtl0AHffAfwYWBD7ujW2rFMVxYZYbq1p6OxdiYgcFrI+agN3n3SAVWd2sG05MLnN8/uA++KuLg4l+a0TkNQxpG/3rty1iEhKCtWVsdDm6lj16UVEgBAGfWvrRidkRUSiQhf0vXKz6J6TSUW1evQiIhDCoDczijXEUkRkn9AFPURvbra5ui7oMkREUkI4gz4vwhYNrxQRAUIa9EX50dZNS4umFBQRCWXQF+dFaGpxtu/eG3QpIiKBC2fQawISEZF9whn0umhKRGSfcAa9JgkXEdknlEHfr2cumRnGFrVuRETCGfSZGUb/XrmagEREhJAGPWgCEhGRVqEN+uK8iHr0IiKEOejzI+rRi4gQ8qDf1dBEbUNT0KWIiAQqvEGfp4umREQgwaA3s5vMbJmZLTezr3ew/hNmVm1mS2JftySyv0NRlKcJSERE4CDmjD0QMxsFTAHGAXuBZ83sz+6+qt2mr7j7+QnUGBfdBkFEJCqRI/rjgHnuvsfdm4C/A59LTlmJ020QRESiEgn6ZcAZZtbXzLoD5wGDO9juVDNbamZ/MbPjO3ohM5tqZuVmVl5ZWZlASf/ULSeT/G7ZOqIXkbQXd+vG3VeY2c+B54HdwBKgud1mi4Ch7l5rZucBfwSGd/Ba04BpAKWlpUm7ibzG0ouIJHgy1t1nuPvJ7v4vQBXwTrv1Ne5eG3v8DJBtZv0S2eehaJ2AREQknSU66qZ/7PsQov35We3WF5uZxR6Pi+1veyL7PBTFeblq3YhI2ou7dRPzBzPrCzQCN7j7TjO7HsDd7wEuBr5sZk1AHTDR3btsfr/i/G5U1jbQ2NxCdmZoLxkQEflQCQW9u5/RwbJ72jy+E7gzkX0kojgvgjtU7mpgQEG3oMoQEQlUqA9zi/NzAQ2xFJH0Fuqg33d1rPr0IpLGQh30JfnRdo0mIBGRdBbqoO/dPZucrAwNsRSRtBbqoDczivJy1aMXkbQW6qCH2NWxat2ISBoLf9Dnd1PrRkTSWviDPi+XzdX1dOF1WiIiKSX0QV+UF6GhqYXqusagSxERCUTog37fBCRq34hImgp90JdopikRSXOhD/oiTRIuImku9EHfv5daNyKS3kIf9DlZGfTrmaMhliKStkIf9BBt36h1IyLpKi2CviQ/ohubiUjaSougL8rT3LEikr4SnTP2JjNbZmbLzezrHaw3M7vDzFaZ2RtmdlIi+4tXcV6Eqj2N1Dc2B7F7EZFAxR30ZjYKmAKMA04Ezjezo9ttdi4wPPY1Fbg73v0loig2ln5rTUMQuxcRCVQiR/THAfPcfY+7NwF/Bz7XbpsJwIMeNRcoMLOSBPYZlxJdHSsiaSyRoF8GnGFmfc2sO3AeMLjdNgOBDW2eb4wt24+ZTTWzcjMrr6ysTKCkjhXHLpraXF2X9NcWEUl1cQe9u68Afg48DzwLLAHiaoK7+zR3L3X30sLCwnhLOqDW1o1OyIpIOkroZKy7z3D3k939X4Aq4J12m2xi/6P8QbFlXapXbhbdczKpqFaPXkTST6KjbvrHvg8h2p+f1W6Tp4ArYqNvyoBqd9+cyD7jYWYU52uIpYikp6wEf/4PZtYXaARucPedZnY9gLvfAzxDtHe/CtgDXJ3g/uJWnBdRj15E0lJCQe/uZ3Sw7J42jx24IZF9JEtxXoR5a3YEXYaISJdLiytjIXpCdktNPS0tmlJQRNJL2gR9SX6EphZn++69QZciItKl0iboNQGJiKSrtAn61oumdHWsiKSb9Al63QZBRNJU2gR9v565ZGYYW9S6EZE0kzZBn5lh9O+VqwlIRCTtpE3QgyYgEZH0lFZBX5wXUY9eRNJOegV9fkQ9ehFJO2kX9LsamqhtaAq6FBGRLpNeQa+LpkQkDaVV0LdeHasTsiKSTtIq6PddNKUjehFJI+kV9LoNgoikobQK+m45meR3y9YRvYiklbQKetBYehFJP2kX9EWaO1ZE0kyik4N/w8yWm9kyM5ttZpF2668ys0ozWxL7mpxYuYkryYuodSMiaSXuoDezgcCNQKm7jwIygYkdbPqou4+JfU2Pd3/JUpQfobK2gcbmlqBLERHpEom2brKAbmaWBXQH3k+8pM5VnBfBHSp3NQRdiohIl4g76N19E/DfwHpgM1Dt7s93sOnnzewNM3vMzAZ39FpmNtXMys2svLKyMt6SDkpxfi6gIZYikj4Sad30BiYARwADgB5mdlm7zf4EDHP3E4A5wAMdvZa7T3P3UncvLSwsjLekg1Kc1w1ANzcTkbSRSOvmLGCNu1e6eyPwOHBa2w3cfbu7t/ZIpgMnJ7C/pNCUgiKSbhIJ+vVAmZl1NzMDzgRWtN3AzEraPL2g/fog9O6eTU5WhkbeiEjayIr3B919npk9BiwCmoDFwDQzuxUod/engBvN7ILY+h3AVYmXnBgzoygvV0f0IpI24g56AHf/EfCjdotvabP+ZuDmRPbRGYo1ll5E0kjaXRkLUJzfTVfHikjaSM+gz8tlc3U97h50KSIinS4tg74oL0JDUwvVdY1BlyIi0unSMug1xFJE0klaBn2JZpoSkTSSlkFfpEnCRSSNpGXQ9++l1o2IpI+0DPqcrAz69czREEsRSQtpGfQQPSGr1o2IpIP0Dfq8CBU1uie9iIRf2gZ9UV6Eiuq6oMsQEel0aRv0xXkRqvY0Ut/YHHQpIiKdKn2DPjaWfqvaNyIScmkf9BpiKSJhl75BH7toarP69CIScmkb9EWxI3qNpReRsEvboO+Vm0WPnEwqqtWjF5FwSyjozewbZrbczJaZ2Wwzi7Rbn2tmj5rZKjObZ2bDEtlfMpkZRfkRHdGLSOjFHfRmNhC4ESh191FAJjCx3WbXAlXufjTwK+Dn8e6vMxTnRdSjF5HQS7R1kwV0M7MsoDvwfrv1E4AHYo8fA840M0twn0lTnBdhi4ZXikjIxR307r4J+G9gPbAZqHb359ttNhDYENu+CagG+rZ/LTObamblZlZeWVkZb0mHrLV109KiKQVFJLwSad30JnrEfgQwAOhhZpfF81ruPs3dS929tLCwMN6SDllJfoSmFmf77r1dtk8Rka6WSOvmLGCNu1e6eyPwOHBau202AYMBYu2dfGB7AvtMKk1AIiLpIJGgXw+UmVn3WN/9TGBFu22eAq6MPb4YeNHdU6ZP0nrRlK6OFZEwS6RHP4/oCdZFwJux15pmZrea2QWxzWYAfc1sFfBN4HsJ1ptUug2CiKSDrER+2N1/BPyo3eJb2qyvB76QyD46U7+euWRmGFvUuhGREEvbK2MBMjOM/r1ydUQvIqGW1kEPrROQKOhFJLzSPuijUwoq6EUkvBT0+RH16EUk1BT0+RF2NTRR29AUdCkiIp1CQa+LpkQk5NI+6FuvjtXtikUkrNI+6EvydUQvIuGW9kGvq2NFJOzSPugj2Znkd8vWEb2IhFbaBz1oLL2IhJuCnthYegW9iISUgp7YEb1aNyISUgp6olMKVtY20NjcEnQpIiJJp6AnekTvDpW7NFG4iISPgh4ozs8FNMRSRMJJQQ8U53UD0M3NRCSUFPTooikRCbe4g97MRpjZkjZfNWb29XbbfMLMqttsc8uBXi9Ivbtnk5OVoZE3IhJKcc8Z6+4rgTEAZpYJbAKe6GDTV9z9/Hj30xXMjKI8TSkoIuGUrNbNmcBqd1+XpNfrciV53XRELyKhlKygnwjMPsC6U81sqZn9xcyO72gDM5tqZuVmVl5ZWZmkkg5Nka6OFZGQSjjozSwHuAD4fQerFwFD3f1E4DfAHzt6DXef5u6l7l5aWFiYaElxKc7LZXN1Pe4eyP5FRDpLMo7ozwUWufuW9ivcvcbda2OPnwGyzaxfEvaZdEV5ERqaWqiuawy6FBGRpEpG0E/iAG0bMys2M4s9Hhfb3/Yk7DPpSvKjY+l1QlZEwiahoDezHsDZwONtll1vZtfHnl4MLDOzpcAdwERP0d7IvqtjdUJWREIm7uGVAO6+G+jbbtk9bR7fCdyZyD66SpEmCReRkNKVsTH9e+nqWBEJJwV9TE5WBv165mqIpYiEjoK+jeL8XLVuRCR0FPRtROeO1T3pRSRcFPRtFOVFqKiuC7oMEZGkUtC3UZwXoWpPI/WNzUGXIiKSNAr6NlrvS79V7RsRCREFfRuagEREwkhB30Zx7KKpzerTi0iIKOjbKIod0WssvYiEiYK+jV65WfTIyaSiWj16EQkPBX0bZqYJSEQkdBT07RTnRdSjFwmhdB42raBvpzgvwhYNrxQJjeYW5+bH3+CkH89hyYadQZcTCAV9O8Wx1k1LS0reNl9EDkFTcwvf+t0SZs/fQIYZ1z+0kMpd6Xcgp6Bvpzg/QlOLs3333qBLEZEE7G1q4cZHFvPHJe/zb58ZwaPXlbGzbi83PLyIvU0tQZfXpRT07bROQKITsiKHr/rGZr7y8EKeebOCH3z2OG745NEcPyCfn3/+BOav3cFtT78VdIldKu6gN7MRZrakzVeNmX293TZmZneY2Soze8PMTkq85M71z4umFPQih6O6vc1MebCcv67Yyo8vHMXkM47ct27CmIFMOeMIHnx9Hb9bsCHAKrtW3FMJuvtKYAyAmWUCm4An2m12LjA89jUeuDv2PWWV6DYIIoet3Q1NXPvAAuat2cF/ff4EvnjK4A9s891zjmXF5l384I/LGF7Uk7FDegdQaddKVuvmTGC1u69rt3wC8KBHzQUKzKwkSfvsFH175pKZYWzREb3IQXvr/Rqq6xoDraGmvpHLZ8xjwdoq/udLYzoMeYCszAx+M2ksRfm5XD9zIVt3hf93PVlBPxGY3cHygUDbz0cbY8v2Y2ZTzazczMorKyuTVFJ8MjOM/r1ydUQvKWNj1R7mvrc96DIO6HcLNnDeHa9w5i/+zlNL38e960es7dyzl8umz+PNTdXcOWksE8Z8IGb207tHDv93WSnVdY18ZWb4T84mHPRmlgNcAPw+3tdw92nuXurupYWFhYmWlLDoBCQKegneqq27uPCu15g4bS4Pvb426HI+4LGFG/nu429w2lF9GVAQ4cbZi7n6/gVs2LGny2rYVtvAxGlzeXvzLu657GTOHX1wTYORA/L4r4tPpHxdFbf+eXknVxmsZBzRnwsscvctHazbBLT9/DQotiylleRHdEQvgVu1dRcTp80D4F+OKeSHTy5n+ivvBVzVPz2xeCP/9thSTjuqL/dddQpPfOVj3HL+SOav2cGnf/Uy9778Hk3NnXukvKWmnonT5rJ2+25mXFXKmccVHdLPX3DiAK77+JHMnLueR+av76Qqg5eMoJ9Ex20bgKeAK2Kjb8qAanffnIR9dqqivIh69BKoVVtr94X8I1PLmHFlKZ8dXcJtT6/grpdWBVwdPLX0fb71u6WUHdGX6VecQiQ7k8wM45rTj2DONz/OaUf15SfPrGDCXa/y5sbqTqlh0846vvR/r7N5Zx33Xz2OM4bH1w34zmeO5Yzh/bjlyeUsWl+V5CpTQ0JBb2Y9gLOBx9ssu97Mro89fQZ4D1gF3At8JZH9dZXi/Ai7GpqobWgKuhRJQ6sra5l071zAeWTqeI7u35PszAx+PXEMF44ZwO3PreRXc94JpBcO8PQbm/nGo0soHdaHGVeV0i0nc7/1Awu6Mf3KUv730pOo3NXAhLv+wa1/eovdSfx9Wr99D1+853W21+7lwWvHU3Zk37hfKzPD+M2ksRTnR7j+oYVsDeGn+biHVwK4+26gb7tl97R57MANiewjCK1j6Suq6zm6f8+Aq5G29ja1sHTjTlpanMwMw8zIsOgva4bFvjIg0zpYlxF9vm+72LqcrAy65yT0q5A0qytrmTRtLu7O7CllHN2/1751WZkZ/OKLY6Kh/8K77G1u4TufGYGZdVl9zy7bzI2PLGbs4AJ+e9UpB/x7MzPOG13C6cP78V/Pvs19r67hueUV3Drh+ENur7S3urKWS++dR31TM7OmlDF6UH5CrwdQ0D2HaVeczEV3vcaXH17E7Cll5GSF53rS1PjfnWLaXh2roE8dtQ1NXHt/dIx0MpnBlacO4+bzjiU3K/Ojf6CTvBcL+eYWZ/bUMoYX9frANpkZxs8/fwI5WRnc/bfVNDS28MPzj+uSsH9+eQVfnbWYEwflc/814+iR+9HxkRfJ5rYLR3PhmIHc/PibXPtAOZ8dXcKP/nUk/WO/Z4diZcUuLp0+b98b4XElefH8UTp0bHEet3/hBL46azH/70/L+elFo5P22kFT0Hdg30VT6tOnjOo9jVz52/m8uamaWyccz1GFPWlxp7nFcWff45bY4+gXtLR88HFz7LnHfmZlxS7uf20t5et2cOekkxjWr0eX//nWbNvNpHv/GfLHdBDyrTIyjNsuHEVOVgb3vbqGvc3N3HrBKDIyOi/sX1ixhRtmLeL4gdGQ73kQId9W6bA+PH3jGUx7eTV3vLiKl9+t5LvnHMsl44YcdN3LNlVz+Yx5ZGdmMGvq/p92kuX8Ewaw/P0a7v7bakYPzGfSuCFJ30cQFPQd0CThqWV7bQOXz5jPqq213H3pSXz6+OKk7+OTx/bnO4+9wfm/+Qc//dxoLjhxQNL3cSBrtu1m4rTXaWp2Zk358JBvZWbccv5IcrMyuefvq2lscn76udFkdkLYv/T2Vr48cxHHleTx4DXjyItkx/U6OVkZfPVTwzlvdAnff2IZP/jjMp5YvIn//Nzoj/wzL15fxZX3zadXJJuHJ4/v1Dfjb396BG+9X8MtTy7jmKJenDz08L9yNjxNqCSKZGeS3y076Tc2q29sTstbpCZiS009X5o2l9WVtdx7ZWmnhDzAZ44v5pmbzuCYop7cOHsxNz/+BnV7O3+iirXbdjNp2lwaYyE/ovjgj1LNjO+eM4IbzxzOo+Ub+PbvlyZ9OOPf36nkupkLGV7Uk4euGU9+t/hCvq0jC3sya8p4br/4BFZX1vLZO17hF8+vPODEIAvW7uDyGfMp6J7Do9eVdfonrswM446JYxlQ0I0vz1wYihscWlBn7g+ktLTUy8vLgy6Dz/zqZYb07c69V5Qm/FqrK2t5eO56Hlu4gZr6Jgb36cYpQ/tQOqwPpwzrzVGFPTv1Y/fhamPVHi6dPo9tuxqYcdUpCY2sOFiNzS38cs473P231RxT1JO7Ljmpw155MqzbvpuJ0+bS0NTCrCnjObY4/n7zXS+t4vbnVvLZ0SX8z8ToCdtE/ePdbVz7wIJoME8eT+8eOQm/Znvbaxu47ekVPLF4E0f068FPLhrFaUf127f+1VXbmPxAOSUFEWZNLtv3absrrKzYxUX/+yojinvxyNSyQM/fHAwzW+juHQaWgv4ArrxvPlV79vLUV0+P6+ebmlv464otPDR3Ha+u2k52pnHuqBJGDcxj8fqdLFi7g2210XveF3TPpnRo733BP2pgfsr/p+psa7bt5tJ751Lb0MQD14zr8htP/f2dSr756BJ2723i1gmj+MLJg5J6wrM15OsboyNHknFScfor73Hb0ys4e2QRd14yNqH/Q6+t3sY19y9gWN8ezJpSRp9OCPm2Xnm3ku8/sYz1O/bwhZMH8e/nHceSjTu57qGFHNG3BzMnj6ewV26n1tCRZ97czFceXsSkcYP5z8+d0OX7PxQK+jh897E3eGnlVuZ//6xD+rktNfXMnr+eR+ZvoKKmnoEF3bhk/BC+WDp4v/+o7s7a7XtYsHYH5Wt3UL62ive27QYgNyuDEwcXcMqwaPifNKR3Uj4yHy7e2RIdWdHc4jx07TiOH5D48Ll4bK2p56ZHlvD6e9u5cMwAbrto9CGfhOzI+u17mDjtdeoam3l4chkjByRv5MiDr6/llieX84kRhdxz2clEsg897Oe+t52rf7uAwX26MXtKGX17dk3A1u1t5o4X32Xay++R3y2bXfWNHFPUi4euHd/pbzQf5vbn3uaul1bzk4tGcen4oYHV8VEU9HH45Zx3+M2L7/LObed+5Mdgd+f11duZOW8dzy3fQnOL8/FjCrm8bCifPLb/QZ8g21bbQPnaKsrX7mDBuiqWb6qmqcUxgxFFvThlWB9Kh/XmlGF9GFDQLRl/zJSz38iKKeM7ZWTFoWhuce58cRW/fuEdhvbtwZ2XjE3ojac15Pc0NjMrySHfavb89fz7E2/ysaP6Me2Kkw/pGoEFa3dw5X3zKcmP8MjUUwM5il6xuYYf/nEZmRnGtCtKAz/IaW5xrn1gAa+u2sbsKWWUDusTaD0HoqCPw6x50V+W1773qQOGanVdI48v2sjMuetYXbmbgu7ZfKl0MJeMH8LQvomfMNqzt4klG3ZSvraKBWt3sGhdFbtjJwgHFnSjNHbEP3ZwASOKeyWlLxukheuquOq388mLZDNryvik/B0my9z3tnPTI4up2t3ID84/jsvLhh5yK2fDjj1MnDaX3XubeHjy+E79pPLYwo1857GllA7rw31XnXJQn0QWrqviihnzKMqL8MjUsrjGuYdVdV0jE+78B7v3NvPnr52+71qbVKKgj8NLb2/l6vsX8PhXTuOkdv3hZZuqmTl3HU8ueZ+6xmbGDingsvFD+ewJJXF9VD5YTc0tvF2xa98R/4I1O9gaG8UTyc5g1IB8xgwuYMyQAsYMLmBgQbcuvWoyEa+tjp50K8qLMHPyeAam4CeW7bUNfPv3S3lpZSXnHF/Mzy8+4aCPNltDvrYhGvKjBnZ+O+qppe/zjUeXcMKgfO6/etyH1rp4fRWXz5hPv545PDL11C496Xm4eGfLLi6661WGF/Xi0etS7+Ssgj4Ob71fw3l3vMLdl57EuaNLqG9s5uk3NvPQ3HUs2bCTSHYGF44ZyGVlQ7vkl7Yj7s7GqjoWb9jJkvU7WbKhimXv1+y7t3a/nrmMGVzA2FjwnzAon15xjoHuTC+t3Mr1Dy1kaN/uzJw8nv69UjdkWlqcGf9Yw8+ffZvi/Ai/mTT2I08UBxHyrZ5dtpmvzV7MscV5PHTtOAq6f7DX/cbGnVw6fR69Y8MXS/JT7002VTy7bDPXz1zEl0oH87PPj06pAykFfRx27N7LST+ew5QzjiDDjN+Vb6BqTyNHFvbgsvFD+fzJgwLvHXZkb1MLb1fUsKQ1/Dfu5L3K6EleMzi6sOd+R/0jinqRFWDLpzWIRhT34sFrgj3pdigWr6/ia7MXU1Fdz3fOGcHk04/scIjsxqpoyNfUNTJrSlkgBwUvrNjCl2cu4qj+PZl57bj9Tq4u21TNJffOJa9bNo9ed2pKfpJKNb94fiW/eXEVP75wFJeXpc7JWQV9HNydET98lr1NLWRmGJ8eWcTlZUM59ai+KfUufjCq9zSydOPOaPjHvnbsjg7t7JadyeiB+YwZUsCJg6JH/111ovePizfxrd8v5cRB+fz2I1oLqai6rpHv/eEN/rKsgk+OKOQXXxyz3xtV25B/eHJybr4Vr5ffqWTKg+UM6dOdh6dEPzUtf7+aS+6dR8/cLB6ZWsbgPt0Dq+9w0tLiTH6wnJffqeT04f0oyY9QlBehOC9CUX70e3FehILu2V2aFQr6ON39t9U0NDUz8ZQhoepZujsbdtSxeEPVvuBf3qblc2RhD84+roizRxYxdkjvTrmsvnVkSNkRfZl+ZelB3SArFbk7M+eu48d/XkHvHtn8euJYyo7sy6addUyc9jrVexqZOXk8JwwqCLpUXlu9jWvvL6ckP8IP/3Uk33x0CZHsTB6deipD+irkD0V1XSP/8dRy3tm6i4rqBrbvbqB9lOZmZbR7A8iNPo+9GRTFvpJ1l0wFvXykvU0trNhcw8J1Vby0citz39tOY7PTt0cOnzq2P2eNLOKM4f2Scjvf+/6xhlv//BafHFHI3XGO9U41y9+v5muzFrN2+26u+/hRPP3GZqr27OXhFAn5VgvW7uDq3y6gtqGJ4tjomiBu4hY2jc0tbN3VQEV1HRXVDVTU1LOlpp6K6vr9Hjd0MDdtv545+94Qzhjej6s+dkRcNSjo5ZDV1Dfy95WV/HXFFl56eys19fCw+7EAAAbPSURBVE3kZmVw+tH9OGtkEWce1z+uk6atl+qfO6qYX08cG6p7ftc2NPHD2I26ekWymHnteE4cnDoh32rJhp3c/bdVfPecYzmyULfh7iruTnVdIxWtbwDt3gQqaho49ci+3PKvI+N6fQW9JKSxuYUFa3YwZ8UW5ry1hY1VdQCMGVzA2SOjLZ7h/Xt+aD/S3bn9uZX8799Wc9HYgdx+8QmBngTuLO7OCyu2MqRv94O6C6VIsnRa0JtZATAdGAU4cI27v95m/SeAJ4E1sUWPu/utH/aaCvrU5u6s3LKLOcu38NcVW1gamw90aN/unHVcEWcdV8Qpw3rvF+Luzn/86S3uf20tk8YN4ScXdu6900XSUWcG/QPAK+4+3cxygO7uvrPN+k8A33b38w/2NRX0h5eK6npeeDt6pP/aqu3sbW4hv1s2nzq2P2ePLOL04f346dMreGTBBq752BFdNhuSSLr5sKCP+8yameUD/wJcBeDue4G98b6eHJ6K8yNcOn4ol44fyu6GJl55t5Ln39rCi29v5YnFmzADd/jap47mm2cfo5AXCUAiQyiOACqB35rZicBC4KbYhOFtnWpmS4H3iR7dL09gn5LCeuRmcc6oEs4ZVUJTcwsL11Xx4sqtHFXYky+WDg66PJG0FXfrxsxKgbnAx9x9npn9Gqhx9x+22SYPaHH3WjM7D/i1uw/v4LWmAlMBhgwZcvK6deviqklEJF19WOsmkWEPG4GN7j4v9vwx4KS2G7h7jbvXxh4/A2SbWT/acfdp7l7q7qWFhYUJlCQiIu3FHfTuXgFsMLMRsUVnAm+13cbMii3WlDWzcbH9bY93nyIicugSvczxa8DDsRE37wFXm9n1AO5+D3Ax8GUzawLqgImeagP3RURCThdMiYiEQGf16EVE5DCgoBcRCTkFvYhIyCnoRURCLuVOxppZJZDIFVP9gG1JKqczpHp9kPo1pnp9kPo1pnp9oBoP1VB37/BCpJQL+kSZWfmBzjynglSvD1K/xlSvD1K/xlSvD1RjMql1IyIScgp6EZGQC2PQTwu6gI+Q6vVB6teY6vVB6teY6vWBakya0PXoRURkf2E8ohcRkTYU9CIiIReaoDezc8xspZmtMrPvBV1Pe2Y22MxeMrO3zGy5md0UdE0dMbNMM1tsZn8OupaOmFmBmT1mZm+b2QozOzXomtoys2/E/n2XmdlsM4ukQE33mdlWM1vWZlkfM5tjZu/GvvdOwRpvj/07v2FmT5hZQSrV12bdt8zMO5prI1WEIujNLBO4CzgXGAlMMrORwVb1AU3At9x9JFAG3JCCNQLcBKwIuogP8WvgWXc/FjiRFKrVzAYCNwKl7j4KyAQmBlsVAPcD57Rb9j3ghdiMby/Engfpfj5Y4xxglLufALwD3NzVRbVxPx+sDzMbDHwaWN/VBR2KUAQ9MA5Y5e7vxSYpfwSYEHBN+3H3ze6+KPZ4F9GAGhhsVfszs0HAZ4HpQdfSkTYT0s+A6IT07r4z2Ko+IAvoZmZZQHeicyUHyt1fBna0WzwBeCD2+AHgwi4tqp2OanT35929KfZ0LjCoywv7Zy0d/R0C/Ar4DpDSo1rCEvQDgQ1tnm8kxUK0LTMbBowF5n34ll3uf4j+p20JupADaDsh/WIzm25mPYIuqpW7bwL+m+jR3Wag2t2fD7aqAypy982xxxVAUZDFHIRrgL8EXURbZjYB2OTuS4Ou5aOEJegPG2bWE/gD8HV3rwm6nlZmdj6w1d0XBl3Lh8giOi/x3e4+FthN8C2HfWJ97glE35AGAD3M7LJgq/posVnfUvaI1My+T7T1+XDQtbQys+7AvwO3BF3LwQhL0G8CBrd5Pii2LKWYWTbRkH/Y3R8Pup52PgZcYGZriba+PmVmM4Mt6QM+ckL6gJ0FrHH3SndvBB4HTgu4pgPZYmYlALHvWwOup0NmdhVwPnBpik1DehTRN/Slsd+ZQcAiMysOtKoDCEvQLwCGm9kRsflrJwJPBVzTfmKTpM8AVrj7L4Oupz13v9ndB7n7MKJ/fy+6e0odjR7MhPQBWw+UmVn32L/3maTQyeJ2ngKujD2+EngywFo6ZGbnEG0lXuDue4Kupy13f9Pd+7v7sNjvzEbgpNj/0ZQTiqCPnbD5KvAc0V+s37n78mCr+oCPAZcTPVJeEvs6L+iiDkOtE9K/AYwBfhpwPfvEPmk8BiwC3iT6+xX4JfJmNht4HRhhZhvN7FrgZ8DZZvYu0U8iP0vBGu8EegFzYr8v96RYfYcN3QJBRCTkQnFELyIiB6agFxEJOQW9iEjIKehFREJOQS8iEnIKehGRkFPQi4iE3P8HF9ly3Z4MnywAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "import matplotlib.pyplot as plt\n", + "import matplotlib.ticker as ticker\n", + "%matplotlib inline\n", + "\n", + "plt.figure()\n", + "plt.plot(losses)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 预测\n", + "用训练好的模型进行预测。" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "the input words is: of, william\n", + "the predict words is: shakespeare\n", + "the true words is: shakespeare\n" + ] + } + ], + "source": [ + "import random\n", + "def test(model):\n", + " model.eval()\n", + " # 从最后10组数据中随机选取1个\n", + " idx = random.randint(len(trigram)-10, len(trigram)-1)\n", + " print('the input words is: ' + trigram[idx][0][0] + ', ' + trigram[idx][0][1])\n", + " x_data = list(map(lambda w: word_to_idx[w], trigram[idx][0]))\n", + " x_data = paddle.to_tensor(np.array(x_data))\n", + " predicts = model(x_data)\n", + " predicts = predicts.numpy().tolist()[0]\n", + " predicts = predicts.index(max(predicts))\n", + " print('the predict words is: ' + idx_to_word[predicts])\n", + " y_data = trigram[idx][1]\n", + " print('the true words is: ' + y_data)\n", + "test(model)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/doc/paddle/tutorial/nlp_case/n_gram_model/n_gram_model.rst b/doc/paddle/tutorial/nlp_case/n_gram_model/n_gram_model.rst new file mode 100644 index 0000000000000000000000000000000000000000..871042e24c1542af703d5487edc09ddc1e254b66 --- /dev/null +++ b/doc/paddle/tutorial/nlp_case/n_gram_model/n_gram_model.rst @@ -0,0 +1,289 @@ +用N-Gram模型在莎士比亚文集中训练word embedding +============================================== + +N-gram +是计算机语言学和概率论范畴内的概念,是指给定的一段文本中N个项目的序列。 +N=1 时 N-gram 又称为 unigram,N=2 称为 bigram,N=3 称为 +trigram,以此类推。实际应用通常采用 bigram 和 trigram 进行计算。 +本示例在莎士比亚文集上实现了trigram。 + +环境 +---- + +本教程基于paddle-2.0-beta编写,如果您的环境不是本版本,请先安装paddle-2.0-beta。 + +.. code:: ipython3 + + import paddle + paddle.__version__ + + + + +.. parsed-literal:: + + '2.0.0-beta0' + + + +数据集&&相关参数 +---------------- + +训练数据集采用了莎士比亚文集,\ `下载 `__\ ,保存为txt格式即可。 +context_size设为2,意味着是trigram。embedding_dim设为256。 + +.. code:: ipython3 + + !wget https://ocw.mit.edu/ans7870/6/6.006/s08/lecturenotes/files/t8.shakespeare.txt + + +.. parsed-literal:: + + --2020-09-12 13:49:29-- https://ocw.mit.edu/ans7870/6/6.006/s08/lecturenotes/files/t8.shakespeare.txt + 正在连接 172.19.57.45:3128... 已连接。 + 已发出 Proxy 请求,正在等待回应... 200 OK + 长度:5458199 (5.2M) [text/plain] + 正在保存至: “t8.shakespeare.txt” + + t8.shakespeare.txt 100%[===================>] 5.21M 2.01MB/s 用时 2.6s + + 2020-09-12 13:49:33 (2.01 MB/s) - 已保存 “t8.shakespeare.txt” [5458199/5458199]) + + + +.. code:: ipython3 + + embedding_dim = 256 + context_size = 2 + +.. code:: ipython3 + + # 文件路径 + path_to_file = './t8.shakespeare.txt' + test_sentence = open(path_to_file, 'rb').read().decode(encoding='utf-8') + + # 文本长度是指文本中的字符个数 + print ('Length of text: {} characters'.format(len(test_sentence))) + + +.. parsed-literal:: + + Length of text: 5458199 characters + + +去除标点符号 +------------ + +因为标点符号本身无实际意义,用\ ``string``\ 库中的punctuation,完成英文符号的替换。 + +.. code:: ipython3 + + from string import punctuation + process_dicts={i:'' for i in punctuation} + print(process_dicts) + + +.. parsed-literal:: + + {'!': '', '"': '', '#': '', '$': '', '%': '', '&': '', "'": '', '(': '', ')': '', '*': '', '+': '', ',': '', '-': '', '.': '', '/': '', ':': '', ';': '', '<': '', '=': '', '>': '', '?': '', '@': '', '[': '', '\\': '', ']': '', '^': '', '_': '', '`': '', '{': '', '|': '', '}': '', '~': ''} + + +.. code:: ipython3 + + punc_table = str.maketrans(process_dicts) + test_sentence = test_sentence.translate(punc_table) + test_sentence = test_sentence.lower().split() + vocab = set(test_sentence) + print(len(vocab)) + + +.. parsed-literal:: + + 28343 + + +数据预处理 +---------- + +将文本被拆成了元组的形式,格式为((‘第一个词’, ‘第二个词’), +‘第三个词’);其中,第三个词就是我们的目标。 + +.. code:: ipython3 + + trigram = [[[test_sentence[i], test_sentence[i + 1]], test_sentence[i + 2]] + for i in range(len(test_sentence) - 2)] + + word_to_idx = {word: i for i, word in enumerate(vocab)} + idx_to_word = {word_to_idx[word]: word for word in word_to_idx} + # 看一下数据集 + print(trigram[:3]) + + +.. parsed-literal:: + + [[['this', 'is'], 'the'], [['is', 'the'], '100th'], [['the', '100th'], 'etext']] + + +构建\ ``Dataset``\ 类 加载数据 +------------------------------ + +用\ ``paddle.io.Dataset``\ 构建数据集,然后作为参数传入到\ ``paddle.io.DataLoader``\ ,完成数据集的加载。 + +.. code:: ipython3 + + import paddle + import numpy as np + batch_size = 256 + paddle.disable_static() + class TrainDataset(paddle.io.Dataset): + def __init__(self, tuple_data): + self.tuple_data = tuple_data + + def __getitem__(self, idx): + data = self.tuple_data[idx][0] + label = self.tuple_data[idx][1] + data = np.array(list(map(lambda w: word_to_idx[w], data))) + label = np.array(word_to_idx[label]) + return data, label + + def __len__(self): + return len(self.tuple_data) + train_dataset = TrainDataset(trigram) + train_loader = paddle.io.DataLoader(train_dataset,places=paddle.CPUPlace(), return_list=True, + shuffle=True, batch_size=batch_size, drop_last=True) + +组网&训练 +--------- + +这里用paddle动态图的方式组网。为了构建Trigram模型,用一层 ``Embedding`` +与两层 ``Linear`` 完成构建。\ ``Embedding`` +层对输入的前两个单词embedding,然后输入到后面的两个\ ``Linear``\ 层中,完成特征提取。 + +.. code:: ipython3 + + import paddle + import numpy as np + import paddle.nn.functional as F + hidden_size = 1024 + class NGramModel(paddle.nn.Layer): + def __init__(self, vocab_size, embedding_dim, context_size): + super(NGramModel, self).__init__() + self.embedding = paddle.nn.Embedding(num_embeddings=vocab_size, embedding_dim=embedding_dim) + self.linear1 = paddle.nn.Linear(context_size * embedding_dim, hidden_size) + self.linear2 = paddle.nn.Linear(hidden_size, len(vocab)) + + def forward(self, x): + x = self.embedding(x) + x = paddle.reshape(x, [-1, context_size * embedding_dim]) + x = self.linear1(x) + x = F.relu(x) + x = self.linear2(x) + return x + +定义\ ``train()``\ 函数,对模型进行训练。 +----------------------------------------- + +.. code:: ipython3 + + import paddle.nn.functional as F + vocab_size = len(vocab) + epochs = 2 + losses = [] + def train(model): + model.train() + optim = paddle.optimizer.Adam(learning_rate=0.01, parameters=model.parameters()) + for epoch in range(epochs): + for batch_id, data in enumerate(train_loader()): + x_data = data[0] + y_data = data[1] + predicts = model(x_data) + y_data = paddle.reshape(y_data, shape=[-1, 1]) + loss = F.softmax_with_cross_entropy(predicts, y_data) + avg_loss = paddle.mean(loss) + avg_loss.backward() + if batch_id % 500 == 0: + losses.append(avg_loss.numpy()) + print("epoch: {}, batch_id: {}, loss is: {}".format(epoch, batch_id, avg_loss.numpy())) + optim.step() + optim.clear_grad() + model = NGramModel(vocab_size, embedding_dim, context_size) + train(model) + + +.. parsed-literal:: + + epoch: 0, batch_id: 0, loss is: [10.252176] + epoch: 0, batch_id: 500, loss is: [6.6429553] + epoch: 0, batch_id: 1000, loss is: [6.801544] + epoch: 0, batch_id: 1500, loss is: [6.7114644] + epoch: 0, batch_id: 2000, loss is: [6.628998] + epoch: 0, batch_id: 2500, loss is: [6.511376] + epoch: 0, batch_id: 3000, loss is: [6.878798] + epoch: 0, batch_id: 3500, loss is: [6.8752203] + epoch: 1, batch_id: 0, loss is: [6.5908413] + epoch: 1, batch_id: 500, loss is: [6.9765778] + epoch: 1, batch_id: 1000, loss is: [6.603841] + epoch: 1, batch_id: 1500, loss is: [6.9935036] + epoch: 1, batch_id: 2000, loss is: [6.751287] + epoch: 1, batch_id: 2500, loss is: [7.1222277] + epoch: 1, batch_id: 3000, loss is: [6.6431484] + epoch: 1, batch_id: 3500, loss is: [6.6024966] + + +打印loss下降曲线 +---------------- + +通过可视化loss的曲线,可以看到模型训练的效果。 + +.. code:: ipython3 + + import matplotlib.pyplot as plt + import matplotlib.ticker as ticker + %matplotlib inline + + plt.figure() + plt.plot(losses) + + + + +.. parsed-literal:: + + [] + + + + +.. image:: https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/paddle/tutorial/nlp_case/n_gram_model/n_gram_model_files/n_gram_model_001.png?raw=true + + +预测 +---- + +用训练好的模型进行预测。 + +.. code:: ipython3 + + import random + def test(model): + model.eval() + # 从最后10组数据中随机选取1个 + idx = random.randint(len(trigram)-10, len(trigram)-1) + print('the input words is: ' + trigram[idx][0][0] + ', ' + trigram[idx][0][1]) + x_data = list(map(lambda w: word_to_idx[w], trigram[idx][0])) + x_data = paddle.to_tensor(np.array(x_data)) + predicts = model(x_data) + predicts = predicts.numpy().tolist()[0] + predicts = predicts.index(max(predicts)) + print('the predict words is: ' + idx_to_word[predicts]) + y_data = trigram[idx][1] + print('the true words is: ' + y_data) + test(model) + + +.. parsed-literal:: + + the input words is: of, william + the predict words is: shakespeare + the true words is: shakespeare + diff --git a/doc/paddle/tutorial/nlp_case/n_gram_model/n_gram_model_files/n_gram_model_001.png b/doc/paddle/tutorial/nlp_case/n_gram_model/n_gram_model_files/n_gram_model_001.png new file mode 100644 index 0000000000000000000000000000000000000000..c51ce9f7285cb2398b875dbda58a7d2668b61f4a Binary files /dev/null and b/doc/paddle/tutorial/nlp_case/n_gram_model/n_gram_model_files/n_gram_model_001.png differ diff --git a/doc/paddle/tutorial/nlp_case/seq2seq_with_attention/seq2seq_with_attention.ipynb b/doc/paddle/tutorial/nlp_case/seq2seq_with_attention/seq2seq_with_attention.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..152f0e3a793bc82d334df66b4c9d029c653a05d6 --- /dev/null +++ b/doc/paddle/tutorial/nlp_case/seq2seq_with_attention/seq2seq_with_attention.ipynb @@ -0,0 +1,655 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 使用注意力机制的LSTM的机器翻译\n", + "\n", + "本示例教程介绍如何使用飞桨完成一个机器翻译任务。我们将会使用飞桨提供的LSTM的API,组建一个`sequence to sequence with attention`的机器翻译的模型,并在示例的数据集上完成从英文翻译成中文的机器翻译。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 环境设置\n", + "\n", + "本示例教程基于飞桨2.0-beta版本。" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2.0.0-beta0\n" + ] + } + ], + "source": [ + "import paddle\n", + "import paddle.nn.functional as F\n", + "import re\n", + "import numpy as np\n", + "\n", + "paddle.disable_static()\n", + "print(paddle.__version__)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 下载数据集\n", + "\n", + "我们将使用 [http://www.manythings.org/anki/](http://www.manythings.org/anki/) 提供的中英文的英汉句对作为数据集,来完成本任务。该数据集含有23610个中英文双语的句对。" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--2020-09-10 16:17:25-- https://www.manythings.org/anki/cmn-eng.zip\n", + "Resolving www.manythings.org (www.manythings.org)... 2606:4700:3033::6818:6dc4, 2606:4700:3036::ac43:adc6, 2606:4700:3037::6818:6cc4, ...\n", + "Connecting to www.manythings.org (www.manythings.org)|2606:4700:3033::6818:6dc4|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 1030722 (1007K) [application/zip]\n", + "Saving to: ‘cmn-eng.zip’\n", + "\n", + "cmn-eng.zip 100%[===================>] 1007K 91.2KB/s in 11s \n", + "\n", + "2020-09-10 16:17:38 (91.2 KB/s) - ‘cmn-eng.zip’ saved [1030722/1030722]\n", + "\n", + "Archive: cmn-eng.zip\n", + " inflating: cmn.txt \n", + " inflating: _about.txt \n" + ] + } + ], + "source": [ + "!wget -c https://www.manythings.org/anki/cmn-eng.zip && unzip cmn-eng.zip" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " 23610 cmn.txt\n" + ] + } + ], + "source": [ + "!wc -l cmn.txt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 构建双语句对的数据结构\n", + "\n", + "接下来我们通过处理下载下来的双语句对的文本文件,将双语句对读入到python的数据结构中。这里做了如下的处理。\n", + "\n", + "- 对于英文,会把全部英文都变成小写,并只保留英文的单词。\n", + "- 对于中文,为了简便起见,未做分词,按照字做了切分。\n", + "- 为了后续的程序运行的更快,我们通过限制句子长度,和只保留部分英文单词开头的句子的方式,得到了一个较小的数据集。这样得到了一个有5508个句对的数据集。" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "MAX_LEN = 10" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "5508\n", + "(['i', 'won'], ['我', '赢', '了', '。'])\n", + "(['he', 'ran'], ['他', '跑', '了', '。'])\n", + "(['i', 'quit'], ['我', '退', '出', '。'])\n", + "(['i', 'm', 'ok'], ['我', '沒', '事', '。'])\n", + "(['i', 'm', 'up'], ['我', '已', '经', '起', '来', '了', '。'])\n", + "(['we', 'try'], ['我', '们', '来', '试', '试', '。'])\n", + "(['he', 'came'], ['他', '来', '了', '。'])\n", + "(['he', 'runs'], ['他', '跑', '。'])\n", + "(['i', 'agree'], ['我', '同', '意', '。'])\n", + "(['i', 'm', 'ill'], ['我', '生', '病', '了', '。'])\n" + ] + } + ], + "source": [ + "lines = open('cmn.txt', encoding='utf-8').read().strip().split('\\n')\n", + "words_re = re.compile(r'\\w+')\n", + "\n", + "pairs = []\n", + "for l in lines:\n", + " en_sent, cn_sent, _ = l.split('\\t')\n", + " pairs.append((words_re.findall(en_sent.lower()), list(cn_sent)))\n", + "\n", + "# create a smaller dataset to make the demo process faster\n", + "filtered_pairs = []\n", + "\n", + "for x in pairs:\n", + " if len(x[0]) < MAX_LEN and len(x[1]) < MAX_LEN and \\\n", + " x[0][0] in ('i', 'you', 'he', 'she', 'we', 'they'):\n", + " filtered_pairs.append(x)\n", + " \n", + "print(len(filtered_pairs))\n", + "for x in filtered_pairs[:10]: print(x) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 创建词表\n", + "\n", + "接下来我们分别创建中英文的词表,这两份词表会用来将英文和中文的句子转换为词的ID构成的序列。词表中还加入了如下三个特殊的词:\n", + "- ``: 用来对较短的句子进行填充。\n", + "- ``: \"begin of sentence\", 表示句子的开始的特殊词。\n", + "- ``: \"end of sentence\", 表示句子的结束的特殊词。\n", + "\n", + "Note: 在实际的任务中,可能还需要通过``(或者``)特殊词来表示未在词表中出现的词。" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2539\n", + "2039\n" + ] + } + ], + "source": [ + "en_vocab = {}\n", + "cn_vocab = {}\n", + "\n", + "# create special token for pad, begin of sentence, end of sentence\n", + "en_vocab[''], en_vocab[''], en_vocab[''] = 0, 1, 2\n", + "cn_vocab[''], cn_vocab[''], cn_vocab[''] = 0, 1, 2\n", + "\n", + "en_idx, cn_idx = 3, 3\n", + "for en, cn in filtered_pairs:\n", + " for w in en: \n", + " if w not in en_vocab: \n", + " en_vocab[w] = en_idx\n", + " en_idx += 1\n", + " for w in cn: \n", + " if w not in cn_vocab: \n", + " cn_vocab[w] = cn_idx\n", + " cn_idx += 1\n", + "\n", + "print(len(list(en_vocab)))\n", + "print(len(list(cn_vocab)))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 创建padding过的数据集\n", + "\n", + "接下来根据词表,我们将会创建一份实际的用于训练的用numpy array组织起来的数据集。\n", + "- 所有的句子都通过``补充成为了长度相同的句子。\n", + "- 对于英文句子(源语言),我们将其反转了过来,这会带来更好的翻译的效果。\n", + "- 所创建的`padded_cn_label_sents`是训练过程中的预测的目标,即,每个中文的当前词去预测下一个词是什么词。\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(5508, 11)\n", + "(5508, 12)\n", + "(5508, 12)\n" + ] + } + ], + "source": [ + "padded_en_sents = []\n", + "padded_cn_sents = []\n", + "padded_cn_label_sents = []\n", + "for en, cn in filtered_pairs:\n", + " # reverse source sentence\n", + " padded_en_sent = en + [''] + [''] * (MAX_LEN - len(en))\n", + " padded_en_sent.reverse()\n", + " padded_cn_sent = [''] + cn + [''] + [''] * (MAX_LEN - len(cn))\n", + " padded_cn_label_sent = cn + [''] + [''] * (MAX_LEN - len(cn) + 1) \n", + "\n", + " padded_en_sents.append([en_vocab[w] for w in padded_en_sent])\n", + " padded_cn_sents.append([cn_vocab[w] for w in padded_cn_sent])\n", + " padded_cn_label_sents.append([cn_vocab[w] for w in padded_cn_label_sent])\n", + "\n", + "train_en_sents = np.array(padded_en_sents)\n", + "train_cn_sents = np.array(padded_cn_sents)\n", + "train_cn_label_sents = np.array(padded_cn_label_sents)\n", + "\n", + "print(train_en_sents.shape)\n", + "print(train_cn_sents.shape)\n", + "print(train_cn_label_sents.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 创建网络\n", + "\n", + "我们将会创建一个Encoder-AttentionDecoder架构的模型结构用来完成机器翻译任务。\n", + "首先我们将设置一些必要的网络结构中用到的参数。" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "embedding_size = 128\n", + "hidden_size = 256\n", + "num_encoder_lstm_layers = 1\n", + "en_vocab_size = len(list(en_vocab))\n", + "cn_vocab_size = len(list(cn_vocab))\n", + "epochs = 20\n", + "batch_size = 16" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Encoder部分\n", + "\n", + "在编码器的部分,我们通过查找完Embedding之后接一个LSTM的方式构建一个对源语言编码的网络。飞桨的RNN系列的API,除了LSTM之外,还提供了SimleRNN, GRU供使用,同时,还可以使用反向RNN,双向RNN,多层RNN等形式。也可以通过`dropout`参数设置是否对多层RNN的中间层进行`dropout`处理,来防止过拟合。\n", + "\n", + "除了使用序列到序列的RNN操作之外,也可以通过SimpleRNN, GRUCell, LSTMCell等API更灵活的创建单步的RNN计算,甚至通过继承RNNCellBase来实现自己的RNN计算单元。" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "# encoder: simply learn representation of source sentence\n", + "class Encoder(paddle.nn.Layer):\n", + " def __init__(self):\n", + " super(Encoder, self).__init__()\n", + " self.emb = paddle.nn.Embedding(en_vocab_size, embedding_size,)\n", + " self.lstm = paddle.nn.LSTM(input_size=embedding_size, \n", + " hidden_size=hidden_size, \n", + " num_layers=num_encoder_lstm_layers)\n", + "\n", + " def forward(self, x):\n", + " x = self.emb(x)\n", + " x, (_, _) = self.lstm(x)\n", + " return x" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## AttentionDecoder部分\n", + "\n", + "在解码器部分,我们通过一个带有注意力机制的LSTM来完成解码。\n", + "\n", + "- 单步的LSTM:在解码器的实现的部分,我们同样使用LSTM,与Encoder部分不同的是,下面的代码,每次只让LSTM往前计算一次。整体的recurrent部分,是在训练循环内完成的。\n", + "- 注意力机制:这里使用了一个由两个Linear组成的网络来完成注意力机制的计算,它用来计算出目标语言在每次翻译一个词的时候,需要对源语言当中的每个词需要赋予多少的权重。\n", + "- 对于第一次接触这样的网络结构来说,下面的代码在理解起来可能稍微有些复杂,你可以通过插入打印每个tensor在不同步骤时的形状的方式来更好的理解。" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "# only move one step of LSTM, \n", + "# the recurrent loop is implemented inside training loop\n", + "class AttentionDecoder(paddle.nn.Layer):\n", + " def __init__(self):\n", + " super(AttentionDecoder, self).__init__()\n", + " self.emb = paddle.nn.Embedding(cn_vocab_size, embedding_size)\n", + " self.lstm = paddle.nn.LSTM(input_size=embedding_size + hidden_size, \n", + " hidden_size=hidden_size)\n", + "\n", + " # for computing attention weights\n", + " self.attention_linear1 = paddle.nn.Linear(hidden_size * 2, hidden_size)\n", + " self.attention_linear2 = paddle.nn.Linear(hidden_size, 1)\n", + " \n", + " # for computing output logits\n", + " self.outlinear =paddle.nn.Linear(hidden_size, cn_vocab_size)\n", + "\n", + " def forward(self, x, previous_hidden, previous_cell, encoder_outputs):\n", + " x = self.emb(x)\n", + " \n", + " attention_inputs = paddle.concat((encoder_outputs, \n", + " paddle.tile(previous_hidden, repeat_times=[1, MAX_LEN+1, 1])),\n", + " axis=-1\n", + " )\n", + "\n", + " attention_hidden = self.attention_linear1(attention_inputs)\n", + " attention_hidden = F.tanh(attention_hidden)\n", + " attention_logits = self.attention_linear2(attention_hidden)\n", + " attention_logits = paddle.squeeze(attention_logits)\n", + "\n", + " attention_weights = F.softmax(attention_logits) \n", + " attention_weights = paddle.expand_as(paddle.unsqueeze(attention_weights, -1), \n", + " encoder_outputs)\n", + "\n", + " context_vector = paddle.multiply(encoder_outputs, attention_weights) \n", + " context_vector = paddle.reduce_sum(context_vector, 1)\n", + " context_vector = paddle.unsqueeze(context_vector, 1)\n", + " \n", + " lstm_input = paddle.concat((x, context_vector), axis=-1)\n", + "\n", + " # LSTM requirement to previous hidden/state: \n", + " # (number_of_layers * direction, batch, hidden)\n", + " previous_hidden = paddle.transpose(previous_hidden, [1, 0, 2])\n", + " previous_cell = paddle.transpose(previous_cell, [1, 0, 2])\n", + " \n", + " x, (hidden, cell) = self.lstm(lstm_input, (previous_hidden, previous_cell))\n", + " \n", + " # change the return to (batch, number_of_layers * direction, hidden)\n", + " hidden = paddle.transpose(hidden, [1, 0, 2])\n", + " cell = paddle.transpose(cell, [1, 0, 2])\n", + "\n", + " output = self.outlinear(hidden)\n", + " output = paddle.squeeze(output)\n", + " return output, (hidden, cell)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 训练模型\n", + "\n", + "接下来我们开始训练模型。\n", + "\n", + "- 在每个epoch开始之前,我们对训练数据进行了随机打乱。\n", + "- 我们通过多次调用`atten_decoder`,在这里实现了解码时的recurrent循环。\n", + "- `teacher forcing`策略: 在每次解码下一个词时,我们给定了训练数据当中的真实词作为了预测下一个词时的输入。相应的,你也可以尝试用模型预测的结果作为下一个词的输入。(或者混合使用)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "epoch:0\n", + "iter 0, loss:[7.620109]\n", + "iter 200, loss:[2.9760551]\n", + "epoch:1\n", + "iter 0, loss:[2.9679596]\n", + "iter 200, loss:[3.161064]\n", + "epoch:2\n", + "iter 0, loss:[2.7516625]\n", + "iter 200, loss:[2.9755423]\n", + "epoch:3\n", + "iter 0, loss:[2.7249248]\n", + "iter 200, loss:[2.3419888]\n", + "epoch:4\n", + "iter 0, loss:[2.3236473]\n", + "iter 200, loss:[2.3453429]\n", + "epoch:5\n", + "iter 0, loss:[2.1926975]\n", + "iter 200, loss:[2.1977856]\n", + "epoch:6\n", + "iter 0, loss:[2.014393]\n", + "iter 200, loss:[2.1863418]\n", + "epoch:7\n", + "iter 0, loss:[1.8619595]\n", + "iter 200, loss:[1.8904227]\n", + "epoch:8\n", + "iter 0, loss:[1.5901132]\n", + "iter 200, loss:[1.7812968]\n", + "epoch:9\n", + "iter 0, loss:[1.341565]\n", + "iter 200, loss:[1.4957166]\n", + "epoch:10\n", + "iter 0, loss:[1.2202356]\n", + "iter 200, loss:[1.3485341]\n", + "epoch:11\n", + "iter 0, loss:[1.1035374]\n", + "iter 200, loss:[1.2871654]\n", + "epoch:12\n", + "iter 0, loss:[1.194801]\n", + "iter 200, loss:[1.0479954]\n", + "epoch:13\n", + "iter 0, loss:[1.0022258]\n", + "iter 200, loss:[1.0899843]\n", + "epoch:14\n", + "iter 0, loss:[0.93466896]\n", + "iter 200, loss:[0.99347967]\n", + "epoch:15\n", + "iter 0, loss:[0.83665943]\n", + "iter 200, loss:[0.9594004]\n", + "epoch:16\n", + "iter 0, loss:[0.78929776]\n", + "iter 200, loss:[0.945769]\n", + "epoch:17\n", + "iter 0, loss:[0.62574965]\n", + "iter 200, loss:[0.6308163]\n", + "epoch:18\n", + "iter 0, loss:[0.63433456]\n", + "iter 200, loss:[0.6287957]\n", + "epoch:19\n", + "iter 0, loss:[0.54270047]\n", + "iter 200, loss:[0.72688276]\n" + ] + } + ], + "source": [ + "encoder = Encoder()\n", + "atten_decoder = AttentionDecoder()\n", + "\n", + "opt = paddle.optimizer.Adam(learning_rate=0.001, \n", + " parameters=encoder.parameters()+atten_decoder.parameters())\n", + "\n", + "for epoch in range(epochs):\n", + " print(\"epoch:{}\".format(epoch))\n", + "\n", + " # shuffle training data\n", + " perm = np.random.permutation(len(train_en_sents))\n", + " train_en_sents_shuffled = train_en_sents[perm]\n", + " train_cn_sents_shuffled = train_cn_sents[perm]\n", + " train_cn_label_sents_shuffled = train_cn_label_sents[perm]\n", + "\n", + " for iteration in range(train_en_sents_shuffled.shape[0] // batch_size):\n", + " x_data = train_en_sents_shuffled[(batch_size*iteration):(batch_size*(iteration+1))]\n", + " sent = paddle.to_tensor(x_data)\n", + " en_repr = encoder(sent)\n", + "\n", + " x_cn_data = train_cn_sents_shuffled[(batch_size*iteration):(batch_size*(iteration+1))]\n", + " x_cn_label_data = train_cn_label_sents_shuffled[(batch_size*iteration):(batch_size*(iteration+1))]\n", + "\n", + " # shape: (batch, num_layer(=1 here) * num_of_direction(=1 here), hidden_size)\n", + " hidden = paddle.zeros([batch_size, 1, hidden_size])\n", + " cell = paddle.zeros([batch_size, 1, hidden_size])\n", + "\n", + " loss = paddle.zeros([1])\n", + " # the decoder recurrent loop mentioned above\n", + " for i in range(MAX_LEN + 2):\n", + " cn_word = paddle.to_tensor(x_cn_data[:,i:i+1])\n", + " cn_word_label = paddle.to_tensor(x_cn_label_data[:,i:i+1])\n", + "\n", + " logits, (hidden, cell) = atten_decoder(cn_word, hidden, cell, en_repr)\n", + " step_loss = F.softmax_with_cross_entropy(logits, cn_word_label)\n", + " avg_step_loss = paddle.mean(step_loss)\n", + " loss += avg_step_loss\n", + "\n", + " loss = loss / (MAX_LEN + 2)\n", + " if(iteration % 200 == 0):\n", + " print(\"iter {}, loss:{}\".format(iteration, loss.numpy()))\n", + "\n", + " loss.backward()\n", + " opt.step()\n", + " opt.clear_grad()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 使用模型进行机器翻译\n", + "\n", + "根据你所使用的计算设备的不同,上面的训练过程可能需要不等的时间。(在一台Mac笔记本上,大约耗时15~20分钟)\n", + "完成上面的模型训练之后,我们可以得到一个能够从英文翻译成中文的机器翻译模型。接下来我们通过一个greedy search来实现使用该模型完成实际的机器翻译。(实际的任务中,你可能需要用beam search算法来提升效果)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "i want to study french\n", + "true: 我要学法语。\n", + "pred: 我要学法语。\n", + "i didn t know that he was there\n", + "true: 我不知道他在那裡。\n", + "pred: 我不知道他在那裡。\n", + "i called tom\n", + "true: 我給湯姆打了電話。\n", + "pred: 我看見湯姆了。\n", + "he is getting along with his employees\n", + "true: 他和他的員工相處。\n", + "pred: 他和他的員工相處。\n", + "we raced toward the fire\n", + "true: 我們急忙跑向火。\n", + "pred: 我們住在美國。\n", + "i ran away in a hurry\n", + "true: 我趕快跑走了。\n", + "pred: 我在班里是最高。\n", + "he cut the envelope open\n", + "true: 他裁開了那個信封。\n", + "pred: 他裁開了信封。\n", + "he s shorter than tom\n", + "true: 他比湯姆矮。\n", + "pred: 他比湯姆矮。\n", + "i ve just started playing tennis\n", + "true: 我剛開始打網球。\n", + "pred: 我剛去打網球。\n", + "i need to go home\n", + "true: 我该回家了。\n", + "pred: 我该回家了。\n" + ] + } + ], + "source": [ + "encoder.eval()\n", + "atten_decoder.eval()\n", + "\n", + "num_of_exampels_to_evaluate = 10\n", + "\n", + "indices = np.random.choice(len(train_en_sents), num_of_exampels_to_evaluate, replace=False)\n", + "x_data = train_en_sents[indices]\n", + "sent = paddle.to_tensor(x_data)\n", + "en_repr = encoder(sent)\n", + "\n", + "word = np.array(\n", + " [[cn_vocab['']]] * num_of_exampels_to_evaluate\n", + ")\n", + "word = paddle.to_tensor(word)\n", + "\n", + "hidden = paddle.zeros([num_of_exampels_to_evaluate, 1, hidden_size])\n", + "cell = paddle.zeros([num_of_exampels_to_evaluate, 1, hidden_size])\n", + "\n", + "decoded_sent = []\n", + "for i in range(MAX_LEN + 2):\n", + " logits, (hidden, cell) = atten_decoder(word, hidden, cell, en_repr)\n", + " word = paddle.argmax(logits, axis=1)\n", + " decoded_sent.append(word.numpy())\n", + " word = paddle.unsqueeze(word, axis=-1)\n", + " \n", + "results = np.stack(decoded_sent, axis=1)\n", + "for i in range(num_of_exampels_to_evaluate):\n", + " en_input = \" \".join(filtered_pairs[indices[i]][0])\n", + " ground_truth_translate = \"\".join(filtered_pairs[indices[i]][1])\n", + " model_translate = \"\"\n", + " for k in results[i]:\n", + " w = list(cn_vocab)[k]\n", + " if w != '' and w != '':\n", + " model_translate += w\n", + " print(en_input)\n", + " print(\"true: {}\".format(ground_truth_translate))\n", + " print(\"pred: {}\".format(model_translate))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The End\n", + "\n", + "你还可以通过变换网络结构,调整数据集,尝试不同的参数的方式来进一步提升本示例当中的机器翻译的效果。同时,也可以尝试在其他的类似的任务中用飞桨来完成实际的实践。" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/doc/paddle/tutorial/nlp_case/seq2seq_with_attention/seq2seq_with_attention.rst b/doc/paddle/tutorial/nlp_case/seq2seq_with_attention/seq2seq_with_attention.rst new file mode 100644 index 0000000000000000000000000000000000000000..0eb58588825dcb731d93bfd0a6096b7e5e04bddf --- /dev/null +++ b/doc/paddle/tutorial/nlp_case/seq2seq_with_attention/seq2seq_with_attention.rst @@ -0,0 +1,513 @@ +使用注意力机制的LSTM的机器翻译 +============================== + +本示例教程介绍如何使用飞桨完成一个机器翻译任务。我们将会使用飞桨提供的LSTM的API,组建一个\ ``sequence to sequence with attention``\ 的机器翻译的模型,并在示例的数据集上完成从英文翻译成中文的机器翻译。 + +环境设置 +-------- + +本示例教程基于飞桨框架2.0-beta版本。 + +.. code:: ipython3 + + import paddle + import paddle.nn.functional as F + import re + import numpy as np + + paddle.disable_static() + print(paddle.__version__) + + +.. parsed-literal:: + + 2.0.0-beta0 + + +下载数据集 +---------- + +我们将使用 http://www.manythings.org/anki/ +提供的中英文的英汉句对作为数据集,来完成本任务。该数据集含有23610个中英文双语的句对。 + +.. code:: ipython3 + + !wget -c https://www.manythings.org/anki/cmn-eng.zip && unzip cmn-eng.zip + + +.. parsed-literal:: + + --2020-09-10 16:17:25-- https://www.manythings.org/anki/cmn-eng.zip + Resolving www.manythings.org (www.manythings.org)... 2606:4700:3033::6818:6dc4, 2606:4700:3036::ac43:adc6, 2606:4700:3037::6818:6cc4, ... + Connecting to www.manythings.org (www.manythings.org)|2606:4700:3033::6818:6dc4|:443... connected. + HTTP request sent, awaiting response... 200 OK + Length: 1030722 (1007K) [application/zip] + Saving to: ‘cmn-eng.zip’ + + cmn-eng.zip 100%[===================>] 1007K 91.2KB/s in 11s + + 2020-09-10 16:17:38 (91.2 KB/s) - ‘cmn-eng.zip’ saved [1030722/1030722] + + Archive: cmn-eng.zip + inflating: cmn.txt + inflating: _about.txt + + +.. code:: ipython3 + + !wc -l cmn.txt + + +.. parsed-literal:: + + 23610 cmn.txt + + +构建双语句对的数据结构 +---------------------- + +接下来我们通过处理下载下来的双语句对的文本文件,将双语句对读入到python的数据结构中。这里做了如下的处理。 + +- 对于英文,会把全部英文都变成小写,并只保留英文的单词。 +- 对于中文,为了简便起见,未做分词,按照字做了切分。 +- 为了后续的程序运行的更快,我们通过限制句子长度,和只保留部分英文单词开头的句子的方式,得到了一个较小的数据集。这样得到了一个有5508个句对的数据集。 + +.. code:: ipython3 + + MAX_LEN = 10 + +.. code:: ipython3 + + lines = open('cmn.txt', encoding='utf-8').read().strip().split('\n') + words_re = re.compile(r'\w+') + + pairs = [] + for l in lines: + en_sent, cn_sent, _ = l.split('\t') + pairs.append((words_re.findall(en_sent.lower()), list(cn_sent))) + + # create a smaller dataset to make the demo process faster + filtered_pairs = [] + + for x in pairs: + if len(x[0]) < MAX_LEN and len(x[1]) < MAX_LEN and \ + x[0][0] in ('i', 'you', 'he', 'she', 'we', 'they'): + filtered_pairs.append(x) + + print(len(filtered_pairs)) + for x in filtered_pairs[:10]: print(x) + + +.. parsed-literal:: + + 5508 + (['i', 'won'], ['我', '赢', '了', '。']) + (['he', 'ran'], ['他', '跑', '了', '。']) + (['i', 'quit'], ['我', '退', '出', '。']) + (['i', 'm', 'ok'], ['我', '沒', '事', '。']) + (['i', 'm', 'up'], ['我', '已', '经', '起', '来', '了', '。']) + (['we', 'try'], ['我', '们', '来', '试', '试', '。']) + (['he', 'came'], ['他', '来', '了', '。']) + (['he', 'runs'], ['他', '跑', '。']) + (['i', 'agree'], ['我', '同', '意', '。']) + (['i', 'm', 'ill'], ['我', '生', '病', '了', '。']) + + +创建词表 +-------- + +接下来我们分别创建中英文的词表,这两份词表会用来将英文和中文的句子转换为词的ID构成的序列。词表中还加入了如下三个特殊的词: +- ````: 用来对较短的句子进行填充。 - ````: “begin of +sentence”, 表示句子的开始的特殊词。 - ````: “end of sentence”, +表示句子的结束的特殊词。 + +Note: +在实际的任务中,可能还需要通过\ ````\ (或者\ ````\ )特殊词来表示未在词表中出现的词。 + +.. code:: ipython3 + + en_vocab = {} + cn_vocab = {} + + # create special token for pad, begin of sentence, end of sentence + en_vocab[''], en_vocab[''], en_vocab[''] = 0, 1, 2 + cn_vocab[''], cn_vocab[''], cn_vocab[''] = 0, 1, 2 + + en_idx, cn_idx = 3, 3 + for en, cn in filtered_pairs: + for w in en: + if w not in en_vocab: + en_vocab[w] = en_idx + en_idx += 1 + for w in cn: + if w not in cn_vocab: + cn_vocab[w] = cn_idx + cn_idx += 1 + + print(len(list(en_vocab))) + print(len(list(cn_vocab))) + + +.. parsed-literal:: + + 2539 + 2039 + + +创建padding过的数据集 +--------------------- + +接下来根据词表,我们将会创建一份实际的用于训练的用numpy +array组织起来的数据集。 - +所有的句子都通过\ ````\ 补充成为了长度相同的句子。 - +对于英文句子(源语言),我们将其反转了过来,这会带来更好的翻译的效果。 - +所创建的\ ``padded_cn_label_sents``\ 是训练过程中的预测的目标,即,每个中文的当前词去预测下一个词是什么词。 + +.. code:: ipython3 + + padded_en_sents = [] + padded_cn_sents = [] + padded_cn_label_sents = [] + for en, cn in filtered_pairs: + # reverse source sentence + padded_en_sent = en + [''] + [''] * (MAX_LEN - len(en)) + padded_en_sent.reverse() + padded_cn_sent = [''] + cn + [''] + [''] * (MAX_LEN - len(cn)) + padded_cn_label_sent = cn + [''] + [''] * (MAX_LEN - len(cn) + 1) + + padded_en_sents.append([en_vocab[w] for w in padded_en_sent]) + padded_cn_sents.append([cn_vocab[w] for w in padded_cn_sent]) + padded_cn_label_sents.append([cn_vocab[w] for w in padded_cn_label_sent]) + + train_en_sents = np.array(padded_en_sents) + train_cn_sents = np.array(padded_cn_sents) + train_cn_label_sents = np.array(padded_cn_label_sents) + + print(train_en_sents.shape) + print(train_cn_sents.shape) + print(train_cn_label_sents.shape) + + +.. parsed-literal:: + + (5508, 11) + (5508, 12) + (5508, 12) + + +创建网络 +-------- + +我们将会创建一个Encoder-AttentionDecoder架构的模型结构用来完成机器翻译任务。 +首先我们将设置一些必要的网络结构中用到的参数。 + +.. code:: ipython3 + + embedding_size = 128 + hidden_size = 256 + num_encoder_lstm_layers = 1 + en_vocab_size = len(list(en_vocab)) + cn_vocab_size = len(list(cn_vocab)) + epochs = 20 + batch_size = 16 + +Encoder部分 +----------- + +在编码器的部分,我们通过查找完Embedding之后接一个LSTM的方式构建一个对源语言编码的网络。飞桨的RNN系列的API,除了LSTM之外,还提供了SimleRNN, +GRU供使用,同时,还可以使用反向RNN,双向RNN,多层RNN等形式。也可以通过\ ``dropout``\ 参数设置是否对多层RNN的中间层进行\ ``dropout``\ 处理,来防止过拟合。 + +除了使用序列到序列的RNN操作之外,也可以通过SimpleRNN, GRUCell, +LSTMCell等API更灵活的创建单步的RNN计算,甚至通过继承RNNCellBase来实现自己的RNN计算单元。 + +.. code:: ipython3 + + # encoder: simply learn representation of source sentence + class Encoder(paddle.nn.Layer): + def __init__(self): + super(Encoder, self).__init__() + self.emb = paddle.nn.Embedding(en_vocab_size, embedding_size,) + self.lstm = paddle.nn.LSTM(input_size=embedding_size, + hidden_size=hidden_size, + num_layers=num_encoder_lstm_layers) + + def forward(self, x): + x = self.emb(x) + x, (_, _) = self.lstm(x) + return x + +AttentionDecoder部分 +-------------------- + +在解码器部分,我们通过一个带有注意力机制的LSTM来完成解码。 + +- 单步的LSTM:在解码器的实现的部分,我们同样使用LSTM,与Encoder部分不同的是,下面的代码,每次只让LSTM往前计算一次。整体的recurrent部分,是在训练循环内完成的。 +- 注意力机制:这里使用了一个由两个Linear组成的网络来完成注意力机制的计算,它用来计算出目标语言在每次翻译一个词的时候,需要对源语言当中的每个词需要赋予多少的权重。 +- 对于第一次接触这样的网络结构来说,下面的代码在理解起来可能稍微有些复杂,你可以通过插入打印每个tensor在不同步骤时的形状的方式来更好的理解。 + +.. code:: ipython3 + + # only move one step of LSTM, + # the recurrent loop is implemented inside training loop + class AttentionDecoder(paddle.nn.Layer): + def __init__(self): + super(AttentionDecoder, self).__init__() + self.emb = paddle.nn.Embedding(cn_vocab_size, embedding_size) + self.lstm = paddle.nn.LSTM(input_size=embedding_size + hidden_size, + hidden_size=hidden_size) + + # for computing attention weights + self.attention_linear1 = paddle.nn.Linear(hidden_size * 2, hidden_size) + self.attention_linear2 = paddle.nn.Linear(hidden_size, 1) + + # for computing output logits + self.outlinear =paddle.nn.Linear(hidden_size, cn_vocab_size) + + def forward(self, x, previous_hidden, previous_cell, encoder_outputs): + x = self.emb(x) + + attention_inputs = paddle.concat((encoder_outputs, + paddle.tile(previous_hidden, repeat_times=[1, MAX_LEN+1, 1])), + axis=-1 + ) + + attention_hidden = self.attention_linear1(attention_inputs) + attention_hidden = F.tanh(attention_hidden) + attention_logits = self.attention_linear2(attention_hidden) + attention_logits = paddle.squeeze(attention_logits) + + attention_weights = F.softmax(attention_logits) + attention_weights = paddle.expand_as(paddle.unsqueeze(attention_weights, -1), + encoder_outputs) + + context_vector = paddle.multiply(encoder_outputs, attention_weights) + context_vector = paddle.reduce_sum(context_vector, 1) + context_vector = paddle.unsqueeze(context_vector, 1) + + lstm_input = paddle.concat((x, context_vector), axis=-1) + + # LSTM requirement to previous hidden/state: + # (number_of_layers * direction, batch, hidden) + previous_hidden = paddle.transpose(previous_hidden, [1, 0, 2]) + previous_cell = paddle.transpose(previous_cell, [1, 0, 2]) + + x, (hidden, cell) = self.lstm(lstm_input, (previous_hidden, previous_cell)) + + # change the return to (batch, number_of_layers * direction, hidden) + hidden = paddle.transpose(hidden, [1, 0, 2]) + cell = paddle.transpose(cell, [1, 0, 2]) + + output = self.outlinear(hidden) + output = paddle.squeeze(output) + return output, (hidden, cell) + +训练模型 +-------- + +接下来我们开始训练模型。 + +- 在每个epoch开始之前,我们对训练数据进行了随机打乱。 +- 我们通过多次调用\ ``atten_decoder``\ ,在这里实现了解码时的recurrent循环。 +- ``teacher forcing``\ 策略: + 在每次解码下一个词时,我们给定了训练数据当中的真实词作为了预测下一个词时的输入。相应的,你也可以尝试用模型预测的结果作为下一个词的输入。(或者混合使用) + +.. code:: ipython3 + + encoder = Encoder() + atten_decoder = AttentionDecoder() + + opt = paddle.optimizer.Adam(learning_rate=0.001, + parameters=encoder.parameters()+atten_decoder.parameters()) + + for epoch in range(epochs): + print("epoch:{}".format(epoch)) + + # shuffle training data + perm = np.random.permutation(len(train_en_sents)) + train_en_sents_shuffled = train_en_sents[perm] + train_cn_sents_shuffled = train_cn_sents[perm] + train_cn_label_sents_shuffled = train_cn_label_sents[perm] + + for iteration in range(train_en_sents_shuffled.shape[0] // batch_size): + x_data = train_en_sents_shuffled[(batch_size*iteration):(batch_size*(iteration+1))] + sent = paddle.to_tensor(x_data) + en_repr = encoder(sent) + + x_cn_data = train_cn_sents_shuffled[(batch_size*iteration):(batch_size*(iteration+1))] + x_cn_label_data = train_cn_label_sents_shuffled[(batch_size*iteration):(batch_size*(iteration+1))] + + # shape: (batch, num_layer(=1 here) * num_of_direction(=1 here), hidden_size) + hidden = paddle.zeros([batch_size, 1, hidden_size]) + cell = paddle.zeros([batch_size, 1, hidden_size]) + + loss = paddle.zeros([1]) + # the decoder recurrent loop mentioned above + for i in range(MAX_LEN + 2): + cn_word = paddle.to_tensor(x_cn_data[:,i:i+1]) + cn_word_label = paddle.to_tensor(x_cn_label_data[:,i:i+1]) + + logits, (hidden, cell) = atten_decoder(cn_word, hidden, cell, en_repr) + step_loss = F.softmax_with_cross_entropy(logits, cn_word_label) + avg_step_loss = paddle.mean(step_loss) + loss += avg_step_loss + + loss = loss / (MAX_LEN + 2) + if(iteration % 200 == 0): + print("iter {}, loss:{}".format(iteration, loss.numpy())) + + loss.backward() + opt.step() + opt.clear_grad() + + +.. parsed-literal:: + + epoch:0 + iter 0, loss:[7.620109] + iter 200, loss:[2.9760551] + epoch:1 + iter 0, loss:[2.9679596] + iter 200, loss:[3.161064] + epoch:2 + iter 0, loss:[2.7516625] + iter 200, loss:[2.9755423] + epoch:3 + iter 0, loss:[2.7249248] + iter 200, loss:[2.3419888] + epoch:4 + iter 0, loss:[2.3236473] + iter 200, loss:[2.3453429] + epoch:5 + iter 0, loss:[2.1926975] + iter 200, loss:[2.1977856] + epoch:6 + iter 0, loss:[2.014393] + iter 200, loss:[2.1863418] + epoch:7 + iter 0, loss:[1.8619595] + iter 200, loss:[1.8904227] + epoch:8 + iter 0, loss:[1.5901132] + iter 200, loss:[1.7812968] + epoch:9 + iter 0, loss:[1.341565] + iter 200, loss:[1.4957166] + epoch:10 + iter 0, loss:[1.2202356] + iter 200, loss:[1.3485341] + epoch:11 + iter 0, loss:[1.1035374] + iter 200, loss:[1.2871654] + epoch:12 + iter 0, loss:[1.194801] + iter 200, loss:[1.0479954] + epoch:13 + iter 0, loss:[1.0022258] + iter 200, loss:[1.0899843] + epoch:14 + iter 0, loss:[0.93466896] + iter 200, loss:[0.99347967] + epoch:15 + iter 0, loss:[0.83665943] + iter 200, loss:[0.9594004] + epoch:16 + iter 0, loss:[0.78929776] + iter 200, loss:[0.945769] + epoch:17 + iter 0, loss:[0.62574965] + iter 200, loss:[0.6308163] + epoch:18 + iter 0, loss:[0.63433456] + iter 200, loss:[0.6287957] + epoch:19 + iter 0, loss:[0.54270047] + iter 200, loss:[0.72688276] + + +使用模型进行机器翻译 +-------------------- + +根据你所使用的计算设备的不同,上面的训练过程可能需要不等的时间。(在一台Mac笔记本上,大约耗时15~20分钟) +完成上面的模型训练之后,我们可以得到一个能够从英文翻译成中文的机器翻译模型。接下来我们通过一个greedy +search来实现使用该模型完成实际的机器翻译。(实际的任务中,你可能需要用beam +search算法来提升效果) + +.. code:: ipython3 + + encoder.eval() + atten_decoder.eval() + + num_of_exampels_to_evaluate = 10 + + indices = np.random.choice(len(train_en_sents), num_of_exampels_to_evaluate, replace=False) + x_data = train_en_sents[indices] + sent = paddle.to_tensor(x_data) + en_repr = encoder(sent) + + word = np.array( + [[cn_vocab['']]] * num_of_exampels_to_evaluate + ) + word = paddle.to_tensor(word) + + hidden = paddle.zeros([num_of_exampels_to_evaluate, 1, hidden_size]) + cell = paddle.zeros([num_of_exampels_to_evaluate, 1, hidden_size]) + + decoded_sent = [] + for i in range(MAX_LEN + 2): + logits, (hidden, cell) = atten_decoder(word, hidden, cell, en_repr) + word = paddle.argmax(logits, axis=1) + decoded_sent.append(word.numpy()) + word = paddle.unsqueeze(word, axis=-1) + + results = np.stack(decoded_sent, axis=1) + for i in range(num_of_exampels_to_evaluate): + en_input = " ".join(filtered_pairs[indices[i]][0]) + ground_truth_translate = "".join(filtered_pairs[indices[i]][1]) + model_translate = "" + for k in results[i]: + w = list(cn_vocab)[k] + if w != '' and w != '': + model_translate += w + print(en_input) + print("true: {}".format(ground_truth_translate)) + print("pred: {}".format(model_translate)) + + +.. parsed-literal:: + + i want to study french + true: 我要学法语。 + pred: 我要学法语。 + i didn t know that he was there + true: 我不知道他在那裡。 + pred: 我不知道他在那裡。 + i called tom + true: 我給湯姆打了電話。 + pred: 我看見湯姆了。 + he is getting along with his employees + true: 他和他的員工相處。 + pred: 他和他的員工相處。 + we raced toward the fire + true: 我們急忙跑向火。 + pred: 我們住在美國。 + i ran away in a hurry + true: 我趕快跑走了。 + pred: 我在班里是最高。 + he cut the envelope open + true: 他裁開了那個信封。 + pred: 他裁開了信封。 + he s shorter than tom + true: 他比湯姆矮。 + pred: 他比湯姆矮。 + i ve just started playing tennis + true: 我剛開始打網球。 + pred: 我剛去打網球。 + i need to go home + true: 我该回家了。 + pred: 我该回家了。 + + +The End +------- + +你还可以通过变换网络结构,调整数据集,尝试不同的参数的方式来进一步提升本示例当中的机器翻译的效果。同时,也可以尝试在其他的类似的任务中用飞桨来完成实际的实践。 diff --git a/doc/paddle/tutorial/quick_start/dynamic_graph/dynamic_graph.ipynb b/doc/paddle/tutorial/quick_start/dynamic_graph/dynamic_graph.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..26d11193417d8dce3ebc110bc9afa1d428976675 --- /dev/null +++ b/doc/paddle/tutorial/quick_start/dynamic_graph/dynamic_graph.ipynb @@ -0,0 +1,306 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 动态图\n", + "\n", + "从飞桨开源框架2.0beta版本开始,飞桨默认为用户开启了动态图模式。在这种模式下,每次执行一个运算,可以立即得到结果(而不是事先定义好网络结构,然后再执行)。\n", + "\n", + "在动态图模式下,您可以更加方便的组织代码,更容易的调试程序,本示例教程将向你介绍飞桨的动态图的使用。\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 设置环境\n", + "\n", + "我们将使用飞桨2.0beta版本,并确认已经开启了动态图模式。" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2.0.0-beta0\n" + ] + } + ], + "source": [ + "import paddle\n", + "import paddle.nn.functional as F\n", + "import numpy as np\n", + "\n", + "paddle.disable_static()\n", + "print(paddle.__version__)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 基本用法\n", + "\n", + "在动态图模式下,您可以直接运行一个飞桨提供的API,它会立刻返回结果到python。不再需要首先创建一个计算图,然后再给定数据去运行。" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[[ 1.5645729 -0.74514765]\n", + " [-0.01248 0.68240154]\n", + " [ 0.11316949 -1.6579045 ]\n", + " [-0.1425675 -1.0153968 ]]\n", + "[1. 2.]\n", + "[[2.5645728 1.2548523 ]\n", + " [0.98752 2.6824017 ]\n", + " [1.1131694 0.3420955 ]\n", + " [0.8574325 0.98460317]]\n", + "[ 0.07427764 1.352323 -3.2026396 -2.173361 ]\n" + ] + } + ], + "source": [ + "a = paddle.randn([4, 2])\n", + "b = paddle.arange(1, 3, dtype='float32')\n", + "\n", + "print(a.numpy())\n", + "print(b.numpy())\n", + "\n", + "c = a + b\n", + "print(c.numpy())\n", + "\n", + "d = paddle.matmul(a, b)\n", + "print(d.numpy())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 使用python的控制流\n", + "\n", + "动态图模式下,您可以使用python的条件判断和循环,这类控制语句来执行神经网络的计算。(不再需要`cond`, `loop`这类OP)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0 +> [5 6 7]\n", + "1 -> [-3 -3 -3]\n", + "2 +> [ 5 9 15]\n", + "3 -> [-3 3 21]\n", + "4 +> [ 5 21 87]\n", + "5 -> [ -3 27 237]\n", + "6 -> [ -3 59 723]\n", + "7 -> [ -3 123 2181]\n", + "8 +> [ 5 261 6567]\n", + "9 +> [ 5 517 19689]\n" + ] + } + ], + "source": [ + "a = paddle.to_tensor(np.array([1, 2, 3]))\n", + "b = paddle.to_tensor(np.array([4, 5, 6]))\n", + "\n", + "for i in range(10):\n", + " r = paddle.rand([1,])\n", + " if r > 0.5:\n", + " c = paddle.pow(a, i) + b\n", + " print(\"{} +> {}\".format(i, c.numpy()))\n", + " else:\n", + " c = paddle.pow(a, i) - b\n", + " print(\"{} -> {}\".format(i, c.numpy()))\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 构建更加灵活的网络:控制流\n", + "\n", + "- 使用动态图可以用来创建更加灵活的网络,比如根据控制流选择不同的分支网络,和方便的构建权重共享的网络。接下来我们来看一个具体的例子,在这个例子中,第二个线性变换只有0.5的可能性会运行。\n", + "- 在sequence to sequence with attention的机器翻译的示例中,你会看到更实际的使用动态图构建RNN类的网络带来的灵活性。\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "class MyModel(paddle.nn.Layer):\n", + " def __init__(self, input_size, hidden_size):\n", + " super(MyModel, self).__init__()\n", + " self.linear1 = paddle.nn.Linear(input_size, hidden_size)\n", + " self.linear2 = paddle.nn.Linear(hidden_size, hidden_size)\n", + " self.linear3 = paddle.nn.Linear(hidden_size, 1)\n", + "\n", + " def forward(self, inputs):\n", + " x = self.linear1(inputs)\n", + " x = F.relu(x)\n", + "\n", + " if paddle.rand([1,]) > 0.5: \n", + " x = self.linear2(x)\n", + " x = F.relu(x)\n", + "\n", + " x = self.linear3(x)\n", + " \n", + " return x " + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0 [1.3384138]\n", + "200 [0.7855983]\n", + "400 [0.59084535]\n", + "600 [0.30849028]\n", + "800 [0.26992702]\n", + "1000 [0.03990713]\n", + "1200 [0.07111286]\n", + "1400 [0.01177792]\n", + "1600 [0.03160322]\n", + "1800 [0.02757282]\n", + "2000 [0.00916022]\n", + "2200 [0.00217024]\n", + "2400 [0.00186833]\n", + "2600 [0.00101926]\n", + "2800 [0.0009654]\n" + ] + } + ], + "source": [ + "total_data, batch_size, input_size, hidden_size = 1000, 64, 128, 256\n", + "\n", + "x_data = np.random.randn(total_data, input_size).astype(np.float32)\n", + "y_data = np.random.randn(total_data, 1).astype(np.float32)\n", + "\n", + "model = MyModel(input_size, hidden_size)\n", + "\n", + "loss_fn = paddle.nn.MSELoss(reduction='mean')\n", + "optimizer = paddle.optimizer.SGD(learning_rate=0.01, \n", + " parameters=model.parameters())\n", + "\n", + "for t in range(200 * (total_data // batch_size)):\n", + " idx = np.random.choice(total_data, batch_size, replace=False)\n", + " x = paddle.to_tensor(x_data[idx,:])\n", + " y = paddle.to_tensor(y_data[idx,:])\n", + " y_pred = model(x)\n", + "\n", + " loss = loss_fn(y_pred, y)\n", + " if t % 200 == 0:\n", + " print(t, loss.numpy())\n", + "\n", + " loss.backward()\n", + " optimizer.step()\n", + " optimizer.clear_grad()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 构建更加灵活的网络:共享权重\n", + "\n", + "- 使用动态图还可以更加方便的创建共享权重的网络,下面的示例展示了一个共享了权重的简单的AutoEncoder。\n", + "- 你也可以参考图像搜索的示例看到共享参数权重的更实际的使用。" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "step: 0, loss: [0.33474904]\n", + "step: 1, loss: [0.31669515]\n", + "step: 2, loss: [0.29729688]\n", + "step: 3, loss: [0.27288628]\n", + "step: 4, loss: [0.24694422]\n", + "step: 5, loss: [0.2203041]\n", + "step: 6, loss: [0.19171436]\n", + "step: 7, loss: [0.16213782]\n", + "step: 8, loss: [0.13443354]\n", + "step: 9, loss: [0.11170781]\n" + ] + } + ], + "source": [ + "inputs = paddle.rand((256, 64))\n", + "\n", + "linear = paddle.nn.Linear(64, 8, bias_attr=False)\n", + "loss_fn = paddle.nn.MSELoss()\n", + "optimizer = paddle.optimizer.Adam(0.01, parameters=linear.parameters())\n", + "\n", + "for i in range(10):\n", + " hidden = linear(inputs)\n", + " # weight from input to hidden is shared with the linear mapping from hidden to output\n", + " outputs = paddle.matmul(hidden, linear.weight, transpose_y=True) \n", + " loss = loss_fn(outputs, inputs)\n", + " loss.backward()\n", + " print(\"step: {}, loss: {}\".format(i, loss.numpy()))\n", + " optimizer.step()\n", + " optimizer.clear_grad()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The end\n", + "\n", + "可以看到使用动态图带来了更灵活易用的方式来组网和训练。" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/doc/paddle/tutorial/quick_start/dynamic_graph/dynamic_graph.rst b/doc/paddle/tutorial/quick_start/dynamic_graph/dynamic_graph.rst new file mode 100644 index 0000000000000000000000000000000000000000..86ede2db83e7721a6b5b9e6a94fa819bff5c35f2 --- /dev/null +++ b/doc/paddle/tutorial/quick_start/dynamic_graph/dynamic_graph.rst @@ -0,0 +1,215 @@ +动态图 +====== + +从飞桨开源框架2.0beta版本开始,飞桨默认为用户开启了动态图模式。在这种模式下,每次执行一个运算,可以立即得到结果(而不是事先定义好网络结构,然后再执行)。 + +在动态图模式下,您可以更加方便的组织代码,更容易的调试程序,本示例教程将向你介绍飞桨的动态图的使用。 + +设置环境 +-------- + +我们将使用飞桨框架2.0beta版本,并确认已经开启了动态图模式。 + +.. code:: ipython3 + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + print(paddle.__version__) + + +.. parsed-literal:: + + 2.0.0-beta0 + + +基本用法 +-------- + +在动态图模式下,您可以直接运行一个飞桨提供的API,它会立刻返回结果到python。不再需要首先创建一个计算图,然后再给定数据去运行。 + +.. code:: ipython3 + + a = paddle.randn([4, 2]) + b = paddle.arange(1, 3, dtype='float32') + + print(a.numpy()) + print(b.numpy()) + + c = a + b + print(c.numpy()) + + d = paddle.matmul(a, b) + print(d.numpy()) + + +.. parsed-literal:: + + [[ 1.5645729 -0.74514765] + [-0.01248 0.68240154] + [ 0.11316949 -1.6579045 ] + [-0.1425675 -1.0153968 ]] + [1. 2.] + [[2.5645728 1.2548523 ] + [0.98752 2.6824017 ] + [1.1131694 0.3420955 ] + [0.8574325 0.98460317]] + [ 0.07427764 1.352323 -3.2026396 -2.173361 ] + + +使用python的控制流 +------------------ + +动态图模式下,您可以使用python的条件判断和循环,这类控制语句来执行神经网络的计算。(不再需要\ ``cond``, +``loop``\ 这类OP) + +.. code:: ipython3 + + a = paddle.to_tensor(np.array([1, 2, 3])) + b = paddle.to_tensor(np.array([4, 5, 6])) + + for i in range(10): + r = paddle.rand([1,]) + if r > 0.5: + c = paddle.pow(a, i) + b + print("{} +> {}".format(i, c.numpy())) + else: + c = paddle.pow(a, i) - b + print("{} -> {}".format(i, c.numpy())) + + + +.. parsed-literal:: + + 0 +> [5 6 7] + 1 -> [-3 -3 -3] + 2 +> [ 5 9 15] + 3 -> [-3 3 21] + 4 +> [ 5 21 87] + 5 -> [ -3 27 237] + 6 -> [ -3 59 723] + 7 -> [ -3 123 2181] + 8 +> [ 5 261 6567] + 9 +> [ 5 517 19689] + + +构建更加灵活的网络:控制流 +-------------------------- + +- 使用动态图可以用来创建更加灵活的网络,比如根据控制流选择不同的分支网络,和方便的构建权重共享的网络。接下来我们来看一个具体的例子,在这个例子中,第二个线性变换只有0.5的可能性会运行。 +- 在sequence to sequence with + attention的机器翻译的示例中,你会看到更实际的使用动态图构建RNN类的网络带来的灵活性。 + +.. code:: ipython3 + + class MyModel(paddle.nn.Layer): + def __init__(self, input_size, hidden_size): + super(MyModel, self).__init__() + self.linear1 = paddle.nn.Linear(input_size, hidden_size) + self.linear2 = paddle.nn.Linear(hidden_size, hidden_size) + self.linear3 = paddle.nn.Linear(hidden_size, 1) + + def forward(self, inputs): + x = self.linear1(inputs) + x = F.relu(x) + + if paddle.rand([1,]) > 0.5: + x = self.linear2(x) + x = F.relu(x) + + x = self.linear3(x) + + return x + +.. code:: ipython3 + + total_data, batch_size, input_size, hidden_size = 1000, 64, 128, 256 + + x_data = np.random.randn(total_data, input_size).astype(np.float32) + y_data = np.random.randn(total_data, 1).astype(np.float32) + + model = MyModel(input_size, hidden_size) + + loss_fn = paddle.nn.MSELoss(reduction='mean') + optimizer = paddle.optimizer.SGD(learning_rate=0.01, + parameters=model.parameters()) + + for t in range(200 * (total_data // batch_size)): + idx = np.random.choice(total_data, batch_size, replace=False) + x = paddle.to_tensor(x_data[idx,:]) + y = paddle.to_tensor(y_data[idx,:]) + y_pred = model(x) + + loss = loss_fn(y_pred, y) + if t % 200 == 0: + print(t, loss.numpy()) + + loss.backward() + optimizer.step() + optimizer.clear_grad() + + +.. parsed-literal:: + + 0 [1.3384138] + 200 [0.7855983] + 400 [0.59084535] + 600 [0.30849028] + 800 [0.26992702] + 1000 [0.03990713] + 1200 [0.07111286] + 1400 [0.01177792] + 1600 [0.03160322] + 1800 [0.02757282] + 2000 [0.00916022] + 2200 [0.00217024] + 2400 [0.00186833] + 2600 [0.00101926] + 2800 [0.0009654] + + +构建更加灵活的网络:共享权重 +---------------------------- + +- 使用动态图还可以更加方便的创建共享权重的网络,下面的示例展示了一个共享了权重的简单的AutoEncoder。 +- 你也可以参考图像搜索的示例看到共享参数权重的更实际的使用。 + +.. code:: ipython3 + + inputs = paddle.rand((256, 64)) + + linear = paddle.nn.Linear(64, 8, bias_attr=False) + loss_fn = paddle.nn.MSELoss() + optimizer = paddle.optimizer.Adam(0.01, parameters=linear.parameters()) + + for i in range(10): + hidden = linear(inputs) + # weight from input to hidden is shared with the linear mapping from hidden to output + outputs = paddle.matmul(hidden, linear.weight, transpose_y=True) + loss = loss_fn(outputs, inputs) + loss.backward() + print("step: {}, loss: {}".format(i, loss.numpy())) + optimizer.step() + optimizer.clear_grad() + + +.. parsed-literal:: + + step: 0, loss: [0.33474904] + step: 1, loss: [0.31669515] + step: 2, loss: [0.29729688] + step: 3, loss: [0.27288628] + step: 4, loss: [0.24694422] + step: 5, loss: [0.2203041] + step: 6, loss: [0.19171436] + step: 7, loss: [0.16213782] + step: 8, loss: [0.13443354] + step: 9, loss: [0.11170781] + + +The end +------- + +可以看到使用动态图带来了更灵活易用的方式来组网和训练。 diff --git a/doc/paddle/tutorial/quick_start/getting_started/getting_started.ipynb b/doc/paddle/tutorial/quick_start/getting_started/getting_started.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..33b2afdc358a11006a32e47766c3341d94894920 --- /dev/null +++ b/doc/paddle/tutorial/quick_start/getting_started/getting_started.ipynb @@ -0,0 +1,219 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 快速上手飞桨(PaddlePaddle)\n", + "\n", + "本示例通过一个基础案例带您从一个飞桨新手快速掌握如何使用。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. 安装飞桨\n", + "\n", + "如果您已经安装好飞桨那么可以跳过此步骤。我们针对用户提供了一个方便易用的安装引导页面,您可以通过选择自己的系统和软件版本来获取对应的安装命令,具体可以点击[快速安装](https://www.paddlepaddle.org.cn/install/quick)查看。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. 导入飞桨\n", + "\n", + "这个示例我们采用了Notebook的形式来进行编写,您可以直接通过AIStudio或Jupyter等平台工具来运行这个案例,Notebook的好处是可以通过浏览器来运行Python程序,边看教程边运行结果,可以对比学习,并且可以做到单步运行调试。\n", + "\n", + "安装好飞桨后我们就可以在Python程序中进行飞桨的导入。" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'2.0.0-beta0'" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import paddle\n", + "\n", + "paddle.__version__" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. 实践一个手写数字识别任务\n", + "\n", + "对于深度学习任务如果简单来看,其实分为几个核心步骤:1. 数据集的准备和加载;2. 模型的构建;3.模型训练;4.模型评估。那么接下来我们就一步一步带您通过飞桨的少量API快速实现。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3.1 数据加载\n", + "\n", + "加载我们框架为您准备好的一个手写数字识别数据集。这里我们使用两个数据集,一个用来做模型的训练,一个用来做模型的评估。" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "train_dataset = paddle.vision.datasets.MNIST(mode='train', chw_format=False)\n", + "val_dataset = paddle.vision.datasets.MNIST(mode='test', chw_format=False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3.2 模型搭建\n", + "\n", + "通过Sequential将一层一层的网络结构组建起来。通过数据集加载接口的chw_format参数我们已经将[1, 28, 28]形状的图片数据改变形状为[1, 784],那么在组网过程中不在需要先进行Flatten操作。" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [], + "source": [ + "mnist = paddle.nn.Sequential(\n", + " paddle.nn.Linear(784, 512),\n", + " paddle.nn.ReLU(),\n", + " paddle.nn.Dropout(0.2),\n", + " paddle.nn.Linear(512, 10)\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3.3 模型训练\n", + "\n", + "配置好我们模型训练需要的损失计算方法和优化方法后就可以使用fit接口来开启我们的模型训练过程。" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 1/5\n", + "step 1875/1875 [==============================] - loss: 0.2571 - acc: 0.9037 - 10ms/step \n", + "Epoch 2/5\n", + "step 1875/1875 [==============================] - loss: 0.1880 - acc: 0.9458 - 14ms/step \n", + "Epoch 3/5\n", + "step 1875/1875 [==============================] - loss: 0.0279 - acc: 0.9549 - 11ms/step \n", + "Epoch 4/5\n", + "step 1875/1875 [==============================] - loss: 0.0505 - acc: 0.9608 - 13ms/step \n", + "Epoch 5/5\n", + "step 1875/1875 [==============================] - loss: 0.2253 - acc: 0.9646 - 12ms/step \n" + ] + } + ], + "source": [ + "# 开启动态图模式\n", + "paddle.disable_static() \n", + "\n", + "# 预计模型结构生成模型实例,便于进行后续的配置、训练和验证\n", + "model = paddle.Model(mnist) \n", + "\n", + "# 模型训练相关配置,准备损失计算方法,优化器和精度计算方法\n", + "model.prepare(paddle.optimizer.Adam(parameters=mnist.parameters()),\n", + " paddle.nn.CrossEntropyLoss(),\n", + " paddle.metric.Accuracy())\n", + "\n", + "# 开始模型训练\n", + "model.fit(train_dataset,\n", + " epochs=5, \n", + " batch_size=32,\n", + " verbose=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3.4 模型评估\n", + "\n", + "使用我们刚才训练得到的模型参数进行模型的评估操作,看看我们的模型精度如何。" + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'loss': [3.576278e-07], 'acc': 0.9666}" + ] + }, + "execution_count": 37, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model.evaluate(val_dataset, verbose=0)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "那么初步训练得到的模型效果在97%附近,我们可以进一步通过调整其中的训练参数来提升我们的模型精度。\n", + "\n", + "至此我们可以知道如何通过飞桨的几个简单API来快速完成一个深度学习任务,大家可以针对自己的需求来更换其中的代码,如果需要使用自己的数据集,那么可以更换数据集加载部分程序,如果需要替换模型,那么可以更改模型代码实现等等。我们也为大家提供了很多其他场景的示例代码来教大家如何使用我们的飞桨API,大家可以查看下面的链接或通过页面导航来查看自己感兴趣的部分。\n", + "\n", + "TODO:补充其他示例教程的快速链接。" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.7.4 64-bit", + "language": "python", + "name": "python37464bitc4da1ac836094043840bff631bedbf7f" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/doc/paddle/tutorial/quick_start/getting_started/getting_started.rst b/doc/paddle/tutorial/quick_start/getting_started/getting_started.rst new file mode 100644 index 0000000000000000000000000000000000000000..f77bc038dcbe5a2ff86fca3eb539109808c08182 --- /dev/null +++ b/doc/paddle/tutorial/quick_start/getting_started/getting_started.rst @@ -0,0 +1,126 @@ +快速上手飞桨(PaddlePaddle) +============================ + +本示例通过一个基础案例带您从一个飞桨新手快速掌握如何使用。 + +1. 安装飞桨 +----------- + +如果您已经安装好飞桨那么可以跳过此步骤。我们针对用户提供了一个方便易用的安装引导页面,您可以通过选择自己的系统和软件版本来获取对应的安装命令,具体可以点击\ `快速安装 `__\ 查看。 + +2. 导入飞桨 +----------- + +这个示例我们采用了Notebook的形式来进行编写,您可以直接通过AIStudio或Jupyter等平台工具来运行这个案例,Notebook的好处是可以通过浏览器来运行Python程序,边看教程边运行结果,可以对比学习,并且可以做到单步运行调试。 + +安装好飞桨后我们就可以在Python程序中进行飞桨的导入。 + +.. code:: ipython3 + + import paddle + + paddle.__version__ + + + + +.. parsed-literal:: + + '2.0.0-beta0' + + + +3. 实践一个手写数字识别任务 +--------------------------- + +对于深度学习任务如果简单来看,其实分为几个核心步骤:1. +数据集的准备和加载;2. +模型的构建;3.模型训练;4.模型评估。那么接下来我们就一步一步带您通过飞桨的少量API快速实现。 + +3.1 数据加载 +~~~~~~~~~~~~ + +加载我们框架为您准备好的一个手写数字识别数据集。这里我们使用两个数据集,一个用来做模型的训练,一个用来做模型的评估。 + +.. code:: ipython3 + + train_dataset = paddle.vision.datasets.MNIST(mode='train', chw_format=False) + val_dataset = paddle.vision.datasets.MNIST(mode='test', chw_format=False) + +3.2 模型搭建 +~~~~~~~~~~~~ + +通过Sequential将一层一层的网络结构组建起来。通过数据集加载接口的chw_format参数我们已经将[1, +28, 28]形状的图片数据改变形状为[1, +784],那么在组网过程中不在需要先进行Flatten操作。 + +.. code:: ipython3 + + mnist = paddle.nn.Sequential( + paddle.nn.Linear(784, 512), + paddle.nn.ReLU(), + paddle.nn.Dropout(0.2), + paddle.nn.Linear(512, 10) + ) + +3.3 模型训练 +~~~~~~~~~~~~ + +配置好我们模型训练需要的损失计算方法和优化方法后就可以使用fit接口来开启我们的模型训练过程。 + +.. code:: ipython3 + + # 开启动态图模式 + paddle.disable_static() + + # 预计模型结构生成模型实例,便于进行后续的配置、训练和验证 + model = paddle.Model(mnist) + + # 模型训练相关配置,准备损失计算方法,优化器和精度计算方法 + model.prepare(paddle.optimizer.Adam(parameters=mnist.parameters()), + paddle.nn.CrossEntropyLoss(), + paddle.metric.Accuracy()) + + # 开始模型训练 + model.fit(train_dataset, + epochs=5, + batch_size=32, + verbose=1) + + +.. parsed-literal:: + + Epoch 1/5 + step 1875/1875 [==============================] - loss: 0.2571 - acc: 0.9037 - 10ms/step + Epoch 2/5 + step 1875/1875 [==============================] - loss: 0.1880 - acc: 0.9458 - 14ms/step + Epoch 3/5 + step 1875/1875 [==============================] - loss: 0.0279 - acc: 0.9549 - 11ms/step + Epoch 4/5 + step 1875/1875 [==============================] - loss: 0.0505 - acc: 0.9608 - 13ms/step + Epoch 5/5 + step 1875/1875 [==============================] - loss: 0.2253 - acc: 0.9646 - 12ms/step + + +3.4 模型评估 +~~~~~~~~~~~~ + +使用我们刚才训练得到的模型参数进行模型的评估操作,看看我们的模型精度如何。 + +.. code:: ipython3 + + model.evaluate(val_dataset, verbose=0) + + + + +.. parsed-literal:: + + {'loss': [3.576278e-07], 'acc': 0.9666} + + + +那么初步训练得到的模型效果在97%附近,我们可以进一步通过调整其中的训练参数来提升我们的模型精度。 + +至此我们可以知道如何通过飞桨的几个简单API来快速完成一个深度学习任务,大家可以针对自己的需求来更换其中的代码,如果需要使用自己的数据集,那么可以更换数据集加载部分程序,如果需要替换模型,那么可以更改模型代码实现等等。我们也为大家提供了很多其他场景的示例代码来教大家如何使用我们的飞桨API,大家可以查看下面的链接或通过页面导航来查看自己感兴趣的部分。 + diff --git a/doc/paddle/tutorial/quick_start/hello_paddle/hello_paddle.ipynb b/doc/paddle/tutorial/quick_start/hello_paddle/hello_paddle.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..c087625073afd158a1d86b105d25c38d681d069f --- /dev/null +++ b/doc/paddle/tutorial/quick_start/hello_paddle/hello_paddle.ipynb @@ -0,0 +1,343 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Hello Paddle: 从普通程序走向机器学习程序" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "这篇示例向你介绍普通的程序跟机器学习程序的区别,并带着你用飞桨框架,实现你的第一个机器学习程序。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 普通程序跟机器学习程序的逻辑区别\n", + "\n", + "作为一名开发者,你最熟悉的开始学习一门编程语言,或者一个深度学习框架的方式,可能是通过一个hello, world程序。\n", + "\n", + "学习飞桨也可以这样,这篇小示例教程将会通过一个非常简单的示例来向你展示如何开始使用飞桨。\n", + "\n", + "机器学习程序跟通常的程序最大的不同是,通常的程序是在给定输入的情况下,通过告诉计算机处理数据的规则,然后得到处理后的结果。而机器学习程序则是在并不知道这些规则的情况下,让机器来从数据当中**学习**出来规则。\n", + "\n", + "作为热身,我们先来看看通常的程序所做的事情。\n", + "\n", + "我们现在面临这样一个任务:\n", + "\n", + "我们乘坐出租车的时候,会有一个10元的起步价,只要上车就需要收取。出租车每行驶1公里,需要再支付每公里2元的行驶费用。当一个乘客坐完出租车之后,车上的计价器需要算出来该乘客需要支付的乘车费用。\n", + "\n", + "如果用python来实现该功能,会如下所示:" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "12.0\n", + "16.0\n", + "20.0\n", + "28.0\n", + "30.0\n", + "50.0\n" + ] + } + ], + "source": [ + "def calculate_fee(distance_travelled):\n", + " return 10 + 2 * distance_travelled\n", + "\n", + "for x in [1.0, 3.0, 5.0, 9.0, 10.0, 20.0]:\n", + " print(calculate_fee(x))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "接下来,我们把问题稍微变换一下,现在我们知道乘客每次乘坐出租车的公里数,也知道乘客每次下车的时候支付给出租车司机的总费用。但是并不知道乘车的起步价,以及每公里行驶费用是多少。我们希望让机器从这些数据当中学习出来计算总费用的规则。\n", + "\n", + "更具体的,我们想要让机器学习程序通过数据学习出来下面的公式当中的参数w和参数b(这是一个非常简单的示例,所以`w`和`b`都是浮点数,随着对深度学习了解的深入,你将会知道`w`和`b`通常情况下会是矩阵和向量)。这样,当下次乘车的时候,我们知道了行驶里程`distance_travelled`的时候,我们就可以估算出来用户的总费用`total_fee`了。\n", + "\n", + "```\n", + "total_fee = w * distance_travelled + b\n", + "```\n", + "\n", + "接下来,我们看看用飞桨如何实现这个hello, world级别的机器学习程序。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 导入飞桨\n", + "\n", + "为了能够使用飞桨,我们需要先用python的`import`语句导入飞桨`paddle`。\n", + "同时,为了能够更好的对数组进行计算和处理,我们也还需要导入`numpy`。\n", + "\n", + "如果你是在本机运行这个notebook,而且还没有安装飞桨,可以去飞桨的官网查看如何安装:[飞桨官网](https://www.paddlepaddle.org.cn/)。并且请使用2.0beta或以上版本的飞桨。" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "paddle 2.0.0-beta0\n" + ] + } + ], + "source": [ + "import paddle\n", + "paddle.disable_static()\n", + "print(\"paddle \" + paddle.__version__)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 准备数据\n", + "\n", + "在这个机器学习任务中,我们已经知道了乘客的行驶里程`distance_travelled`,和对应的,这些乘客的总费用`total_fee`。\n", + "通常情况下,在机器学习任务中,像`distance_travelled`这样的输入值,一般被称为`x`(或者特征`feature`),像`total_fee`这样的输出值,一般被称为`y`(或者标签`label`)。\n", + "\n", + "我们用`paddle.to_tensor`把示例数据转换为paddle的Tensor数据。" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "x_data = paddle.to_tensor([[1.], [3.0], [5.0], [9.0], [10.0], [20.0]])\n", + "y_data = paddle.to_tensor([[12.], [16.0], [20.0], [28.0], [30.0], [50.0]])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 用飞桨定义模型的计算\n", + "\n", + "使用飞桨定义模型的计算的过程,本质上,是我们用python,通过飞桨提供的API,来告诉飞桨我们的计算规则的过程。回顾一下,我们想要通过飞桨用机器学习方法,从数据当中学习出来如下公式当中的`w`和`b`。这样在未来,给定`x`时就可以估算出来`y`值(估算出来的`y`记为`y_predict`)\n", + "\n", + "```\n", + "y_predict = w * x + b\n", + "```\n", + "\n", + "我们将会用飞桨的线性变换层:`paddle.nn.Linear`来实现这个计算过程,这个公式里的变量`x, y, w, b, y_predict`,对应着飞桨里面的[Tensor概念](https://www.paddlepaddle.org.cn/documentation/docs/zh/beginners_guide/basic_concept/tensor.html)。\n", + "\n", + "### 稍微补充一下\n", + "\n", + "在这里的示例中,我们根据经验,已经事先知道了`distance_travelled`和`total_fee`之间是线性的关系,而在更实际的问题当中,`x`和`y`的关系通常是非线性的,因此也就需要使用更多类型,也更复杂的神经网络。(比如,BMI指数跟你的身高就不是线性关系,一张图片里的某个像素值跟这个图片是猫还是狗也不是线性关系。)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "linear = paddle.nn.Linear(in_features=1, out_features=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 准备好运行飞桨\n", + "\n", + "机器(计算机)在一开始的时候会随便猜`w`和`b`,我们先看看机器猜的怎么样。你应该可以看到,这时候的`w`是一个随机值,`b`是0.0,这是飞桨的初始化策略,也是这个领域常用的初始化策略。(如果你愿意,也可以采用其他的初始化的方式,今后你也会看到,选择不同的初始化策略也是对于做好深度学习任务来说很重要的一点)。" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "w before optimize: -1.696260690689087\n", + "b before optimize: 0.0\n" + ] + } + ], + "source": [ + "w_before_opt = linear.weight.numpy().item()\n", + "b_before_opt = linear.bias.numpy().item()\n", + "\n", + "print(\"w before optimize: {}\".format(w_before_opt))\n", + "print(\"b before optimize: {}\".format(b_before_opt))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 告诉飞桨怎么样学习\n", + "\n", + "前面我们定义好了神经网络(尽管是一个最简单的神经网络),我们还需要告诉飞桨,怎么样去**学习**,从而能得到参数`w`和`b`。\n", + "\n", + "这个过程简单的来陈述一下,你应该就会大致明白了(尽管背后的理论和知识还需要逐步的去学习)。在机器学习/深度学习当中,机器(计算机)在最开始的时候,得到参数`w`和`b`的方式是随便猜一下,用这种随便猜测得到的参数值,去进行计算(预测)的时候,得到的`y_predict`,跟实际的`y`值一定是有**差距**的。接下来,机器会根据这个差距来**调整`w`和`b`**,随着这样的逐步的调整,`w`和`b`会越来越正确,`y_predict`跟`y`之间的差距也会越来越小,从而最终能得到好用的`w`和`b`。这个过程就是机器**学习**的过程。\n", + "\n", + "用更加技术的语言来说,衡量**差距**的函数(一个公式)就是损失函数,用来**调整**参数的方法就是优化算法。\n", + "\n", + "在本示例当中,我们用最简单的均方误差(mean square error)作为损失函数(`paddle.nn.MSELoss`);和最常见的优化算法SGD(stocastic gradient descent)作为优化算法(传给`paddle.optimizer.SGD`的参数`learning_rate`,你可以理解为控制每次调整的步子大小的参数)。" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "mse_loss = paddle.nn.MSELoss()\n", + "sgd_optimizer = paddle.optimizer.SGD(learning_rate=0.001, parameters = linear.parameters())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 运行优化算法\n", + "\n", + "接下来,我们让飞桨运行一下这个优化算法,这会是一个前面介绍过的逐步调整参数的过程,你应该可以看到loss值(衡量`y`和`y_predict`的差距的`loss`)在不断的降低。" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "epoch 0 loss [2094.069]\n", + "epoch 1000 loss [7.8451133]\n", + "epoch 2000 loss [1.7541145]\n", + "epoch 3000 loss [0.39221546]\n", + "epoch 4000 loss [0.08769739]\n", + "finished training, loss [0.0196382]\n" + ] + } + ], + "source": [ + "total_epoch = 5000\n", + "for i in range(total_epoch):\n", + " y_predict = linear(x_data)\n", + " loss = mse_loss(y_predict, y_data)\n", + " loss.backward()\n", + " sgd_optimizer.step()\n", + " sgd_optimizer.clear_grad()\n", + " \n", + " if i%1000 == 0:\n", + " print(\"epoch {} loss {}\".format(i, loss.numpy()))\n", + " \n", + "print(\"finished training, loss {}\".format(loss.numpy()))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 机器学习出来的参数\n", + "\n", + "经过了这样的对参数`w`和`b`的调整(**学习**),我们再通过下面的程序,来看看现在的参数变成了多少。你应该会发现`w`变成了很接近2.0的一个值,`b`变成了接近10.0的一个值。虽然并不是正好的2和10,但却是从数据当中学习出来的还不错的模型的参数,可以在未来的时候,用从这批数据当中学习到的参数来预估了。(如果你愿意,也可以通过让机器多学习一段时间,从而得到更加接近2.0和10.0的参数值。)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "w after optimize: 2.0178451538085938\n", + "b after optimize: 9.771825790405273\n" + ] + } + ], + "source": [ + "w_after_opt = linear.weight.numpy().item()\n", + "b_after_opt = linear.bias.numpy().item()\n", + "\n", + "print(\"w after optimize: {}\".format(w_after_opt))\n", + "print(\"b after optimize: {}\".format(b_after_opt))\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## hello paddle\n", + "\n", + "通过这个小示例,希望你已经初步了解了飞桨,能在接下来随着对飞桨的更多学习,来解决实际遇到的问题。" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "hello paddle\n" + ] + } + ], + "source": [ + "print(\"hello paddle\")" + ] + } + ], + "metadata": { + "colab": { + "name": "hello-paddle.ipynb", + "private_outputs": true, + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/doc/paddle/tutorial/quick_start/hello_paddle/hello_paddle.rst b/doc/paddle/tutorial/quick_start/hello_paddle/hello_paddle.rst new file mode 100644 index 0000000000000000000000000000000000000000..3a36e2b2db510caa8a60ef84f88942c2fe054938 --- /dev/null +++ b/doc/paddle/tutorial/quick_start/hello_paddle/hello_paddle.rst @@ -0,0 +1,210 @@ +Hello Paddle: 从普通程序走向机器学习程序 +======================================== + +这篇示例向你介绍普通的程序跟机器学习程序的区别,并带着你用飞桨框架,实现你的第一个机器学习程序。 + +普通程序跟机器学习程序的逻辑区别 +-------------------------------- + +作为一名开发者,你最熟悉的开始学习一门编程语言,或者一个深度学习框架的方式,可能是通过一个hello, +world程序。 + +学习飞桨也可以这样,这篇小示例教程将会通过一个非常简单的示例来向你展示如何开始使用飞桨。 + +机器学习程序跟通常的程序最大的不同是,通常的程序是在给定输入的情况下,通过告诉计算机处理数据的规则,然后得到处理后的结果。而机器学习程序则是在并不知道这些规则的情况下,让机器来从数据当中\ **学习**\ 出来规则。 + +作为热身,我们先来看看通常的程序所做的事情。 + +我们现在面临这样一个任务: + +我们乘坐出租车的时候,会有一个10元的起步价,只要上车就需要收取。出租车每行驶1公里,需要再支付每公里2元的行驶费用。当一个乘客坐完出租车之后,车上的计价器需要算出来该乘客需要支付的乘车费用。 + +如果用python来实现该功能,会如下所示: + +.. code:: ipython3 + + def calculate_fee(distance_travelled): + return 10 + 2 * distance_travelled + + for x in [1.0, 3.0, 5.0, 9.0, 10.0, 20.0]: + print(calculate_fee(x)) + + +.. parsed-literal:: + + 12.0 + 16.0 + 20.0 + 28.0 + 30.0 + 50.0 + + +接下来,我们把问题稍微变换一下,现在我们知道乘客每次乘坐出租车的公里数,也知道乘客每次下车的时候支付给出租车司机的总费用。但是并不知道乘车的起步价,以及每公里行驶费用是多少。我们希望让机器从这些数据当中学习出来计算总费用的规则。 + +更具体的,我们想要让机器学习程序通过数据学习出来下面的公式当中的参数w和参数b(这是一个非常简单的示例,所以\ ``w``\ 和\ ``b``\ 都是浮点数,随着对深度学习了解的深入,你将会知道\ ``w``\ 和\ ``b``\ 通常情况下会是矩阵和向量)。这样,当下次乘车的时候,我们知道了行驶里程\ ``distance_travelled``\ 的时候,我们就可以估算出来用户的总费用\ ``total_fee``\ 了。 + +:: + + total_fee = w * distance_travelled + b + +接下来,我们看看用飞桨如何实现这个hello, world级别的机器学习程序。 + +导入飞桨 +-------- + +为了能够使用飞桨,我们需要先用python的\ ``import``\ 语句导入飞桨\ ``paddle``\ 。 +同时,为了能够更好的对数组进行计算和处理,我们也还需要导入\ ``numpy``\ 。 + +如果你是在本机运行这个notebook,而且还没有安装飞桨,可以去飞桨的官网查看如何安装:\ `飞桨官网 `__\ 。并且请使用2.0beta或以上版本的飞桨。 + +.. code:: ipython3 + + import paddle + paddle.disable_static() + print("paddle " + paddle.__version__) + + +.. parsed-literal:: + + paddle 2.0.0-beta0 + + +准备数据 +-------- + +在这个机器学习任务中,我们已经知道了乘客的行驶里程\ ``distance_travelled``\ ,和对应的,这些乘客的总费用\ ``total_fee``\ 。 +通常情况下,在机器学习任务中,像\ ``distance_travelled``\ 这样的输入值,一般被称为\ ``x``\ (或者特征\ ``feature``\ ),像\ ``total_fee``\ 这样的输出值,一般被称为\ ``y``\ (或者标签\ ``label``)。 + +我们用\ ``paddle.to_tensor``\ 把示例数据转换为paddle的Tensor数据。 + +.. code:: ipython3 + + x_data = paddle.to_tensor([[1.], [3.0], [5.0], [9.0], [10.0], [20.0]]) + y_data = paddle.to_tensor([[12.], [16.0], [20.0], [28.0], [30.0], [50.0]]) + +用飞桨定义模型的计算 +-------------------- + +使用飞桨定义模型的计算的过程,本质上,是我们用python,通过飞桨提供的API,来告诉飞桨我们的计算规则的过程。回顾一下,我们想要通过飞桨用机器学习方法,从数据当中学习出来如下公式当中的\ ``w``\ 和\ ``b``\ 。这样在未来,给定\ ``x``\ 时就可以估算出来\ ``y``\ 值(估算出来的\ ``y``\ 记为\ ``y_predict``\ ) + +:: + + y_predict = w * x + b + +我们将会用飞桨的线性变换层:\ ``paddle.nn.Linear``\ 来实现这个计算过程,这个公式里的变量\ ``x, y, w, b, y_predict``\ ,对应着飞桨里面的\ `Tensor概念 `__\ 。 + +稍微补充一下 +~~~~~~~~~~~~ + +在这里的示例中,我们根据经验,已经事先知道了\ ``distance_travelled``\ 和\ ``total_fee``\ 之间是线性的关系,而在更实际的问题当中,\ ``x``\ 和\ ``y``\ 的关系通常是非线性的,因此也就需要使用更多类型,也更复杂的神经网络。(比如,BMI指数跟你的身高就不是线性关系,一张图片里的某个像素值跟这个图片是猫还是狗也不是线性关系。) + +.. code:: ipython3 + + linear = paddle.nn.Linear(in_features=1, out_features=1) + +准备好运行飞桨 +-------------- + +机器(计算机)在一开始的时候会随便猜\ ``w``\ 和\ ``b``\ ,我们先看看机器猜的怎么样。你应该可以看到,这时候的\ ``w``\ 是一个随机值,\ ``b``\ 是0.0,这是飞桨的初始化策略,也是这个领域常用的初始化策略。(如果你愿意,也可以采用其他的初始化的方式,今后你也会看到,选择不同的初始化策略也是对于做好深度学习任务来说很重要的一点)。 + +.. code:: ipython3 + + w_before_opt = linear.weight.numpy().item() + b_before_opt = linear.bias.numpy().item() + + print("w before optimize: {}".format(w_before_opt)) + print("b before optimize: {}".format(b_before_opt)) + + +.. parsed-literal:: + + w before optimize: -1.696260690689087 + b before optimize: 0.0 + + +告诉飞桨怎么样学习 +------------------ + +前面我们定义好了神经网络(尽管是一个最简单的神经网络),我们还需要告诉飞桨,怎么样去\ **学习**\ ,从而能得到参数\ ``w``\ 和\ ``b``\ 。 + +这个过程简单的来陈述一下,你应该就会大致明白了(尽管背后的理论和知识还需要逐步的去学习)。在机器学习/深度学习当中,机器(计算机)在最开始的时候,得到参数\ ``w``\ 和\ ``b``\ 的方式是随便猜一下,用这种随便猜测得到的参数值,去进行计算(预测)的时候,得到的\ ``y_predict``\ ,跟实际的\ ``y``\ 值一定是有\ **差距**\ 的。接下来,机器会根据这个差距来\ **调整\ ``w``\ 和\ ``b``**\ ,随着这样的逐步的调整,\ ``w``\ 和\ ``b``\ 会越来越正确,\ ``y_predict``\ 跟\ ``y``\ 之间的差距也会越来越小,从而最终能得到好用的\ ``w``\ 和\ ``b``\ 。这个过程就是机器\ **学习**\ 的过程。 + +用更加技术的语言来说,衡量\ **差距**\ 的函数(一个公式)就是损失函数,用来\ **调整**\ 参数的方法就是优化算法。 + +在本示例当中,我们用最简单的均方误差(mean square +error)作为损失函数(``paddle.nn.MSELoss``);和最常见的优化算法SGD(stocastic +gradient +descent)作为优化算法(传给\ ``paddle.optimizer.SGD``\ 的参数\ ``learning_rate``\ ,你可以理解为控制每次调整的步子大小的参数)。 + +.. code:: ipython3 + + mse_loss = paddle.nn.MSELoss() + sgd_optimizer = paddle.optimizer.SGD(learning_rate=0.001, parameters = linear.parameters()) + +运行优化算法 +------------ + +接下来,我们让飞桨运行一下这个优化算法,这会是一个前面介绍过的逐步调整参数的过程,你应该可以看到loss值(衡量\ ``y``\ 和\ ``y_predict``\ 的差距的\ ``loss``)在不断的降低。 + +.. code:: ipython3 + + total_epoch = 5000 + for i in range(total_epoch): + y_predict = linear(x_data) + loss = mse_loss(y_predict, y_data) + loss.backward() + sgd_optimizer.step() + sgd_optimizer.clear_grad() + + if i%1000 == 0: + print("epoch {} loss {}".format(i, loss.numpy())) + + print("finished training, loss {}".format(loss.numpy())) + + +.. parsed-literal:: + + epoch 0 loss [2094.069] + epoch 1000 loss [7.8451133] + epoch 2000 loss [1.7541145] + epoch 3000 loss [0.39221546] + epoch 4000 loss [0.08769739] + finished training, loss [0.0196382] + + +机器学习出来的参数 +------------------ + +经过了这样的对参数\ ``w``\ 和\ ``b``\ 的调整(\ **学习**),我们再通过下面的程序,来看看现在的参数变成了多少。你应该会发现\ ``w``\ 变成了很接近2.0的一个值,\ ``b``\ 变成了接近10.0的一个值。虽然并不是正好的2和10,但却是从数据当中学习出来的还不错的模型的参数,可以在未来的时候,用从这批数据当中学习到的参数来预估了。(如果你愿意,也可以通过让机器多学习一段时间,从而得到更加接近2.0和10.0的参数值。) + +.. code:: ipython3 + + w_after_opt = linear.weight.numpy().item() + b_after_opt = linear.bias.numpy().item() + + print("w after optimize: {}".format(w_after_opt)) + print("b after optimize: {}".format(b_after_opt)) + + + +.. parsed-literal:: + + w after optimize: 2.0178451538085938 + b after optimize: 9.771825790405273 + + +hello paddle +------------ + +通过这个小示例,希望你已经初步了解了飞桨,能在接下来随着对飞桨的更多学习,来解决实际遇到的问题。 + +.. code:: ipython3 + + print("hello paddle") + + +.. parsed-literal:: + + hello paddle + diff --git a/doc/paddle/tutorial/quick_start/high_level_api/high_level_api.ipynb b/doc/paddle/tutorial/quick_start/high_level_api/high_level_api.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..feeb7068ee718b97632546f2e6af1dcfe5b6878b --- /dev/null +++ b/doc/paddle/tutorial/quick_start/high_level_api/high_level_api.ipynb @@ -0,0 +1,913 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 飞桨高层API使用指南\n", + "\n", + "## 1. 简介\n", + "\n", + "飞桨2.0全新推出高层API,是对飞桨API的进一步封装与升级,提供了更加简洁易用的API,进一步提升了飞桨的易学易用性,并增强飞桨的功能。\n", + "\n", + "飞桨高层API面向从深度学习小白到资深开发者的所有人群,对于AI初学者来说,使用高层API可以简单快速的构建深度学习项目,对于资深开发者来说,可以快速完成算法迭代。\n", + "\n", + "飞桨高层API具有以下特点:\n", + "\n", + "* 易学易用: 高层API是对普通动态图API的进一步封装和优化,同时保持与普通API的兼容性,高层API使用更加易学易用,同样的实现使用高层API可以节省大量的代码。\n", + "* 低代码开发: 使用飞桨高层API的一个明显特点是,用户可编程代码量大大缩减。\n", + "* 动静转换: 高层API支持动静转换,用户只需要改一行代码即可实现将动态图代码在静态图模式下训练,既方便用户使用动态图调试模型,又提升了模型训练效率。\n", + "\n", + "在功能增强与使用方式上,高层API有以下升级:\n", + "\n", + "* 模型训练方式升级: 高层API中封装了Model类,继承了Model类的神经网络可以仅用几行代码完成模型的训练。\n", + "* 新增图像处理模块transform: 飞桨新增了图像预处理模块,其中包含数十种数据处理函数,基本涵盖了常用的数据处理、数据增强方法。\n", + "* 提供常用的神经网络模型可供调用: 高层API中集成了计算机视觉领域和自然语言处理领域常用模型,包括但不限于mobilenet、resnet、yolov3、cyclegan、bert、transformer、seq2seq等等。同时发布了对应模型的预训练模型,用户可以直接使用这些模型或者在此基础上完成二次开发。\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. 安装并使用飞桨高层API\n", + "\n", + "飞桨高层API无需独立安装,只需要安装好paddlepaddle即可,安装完成后import paddle即可使用相关高层API,如:paddle.Model、视觉领域paddle.vision、NLP领域paddle.text。" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'2.0.0-beta0'" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import paddle\n", + "import paddle.vision as vision\n", + "import paddle.text as text\n", + "\n", + "paddle.__version__" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. 目录\n", + "\n", + "本指南教学内容覆盖\n", + "\n", + "* 使用高层API提供的自带数据集进行相关深度学习任务训练。\n", + "* 使用自定义数据进行数据集的定义、数据预处理和训练。\n", + "* 如何在数据集定义和加载中应用数据增强相关接口。\n", + "* 如何进行模型的组网。\n", + "* 高层API进行模型训练的相关API使用。\n", + "* 如何在fit接口满足需求的时候进行自定义,使用基础API来完成训练。\n", + "* 如何使用多卡来加速训练。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. 数据集定义、加载和数据预处理\n", + "\n", + "对于深度学习任务,均是框架针对各种类型数字的计算,是无法直接使用原始图片和文本等文件来完成。那么就是涉及到了一项动作,就是将原始的各种数据文件进行处理加工,转换成深度学习任务可以使用的数据。\n", + "\n", + "### 3.1 框架自带数据集使用\n", + "\n", + "高层API将一些我们常用到的数据集作为领域API对用户进行开放,对应API所在目录为`paddle.vision.datasets`,那么我们先看下提供了哪些数据集。" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "视觉相关数据集: ['DatasetFolder', 'ImageFolder', 'MNIST', 'Flowers', 'Cifar10', 'Cifar100', 'VOC2012']\n", + "自然语言相关数据集: ['Conll05st', 'Imdb', 'Imikolov', 'Movielens', 'MovieReviews', 'UCIHousing', 'WMT14', 'WMT16']\n" + ] + } + ], + "source": [ + "print('视觉相关数据集:', paddle.vision.datasets.__all__)\n", + "print('自然语言相关数据集:', paddle.text.datasets.__all__)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "这里我们是加载一个手写数字识别的数据集,用`mode`来标识是训练数据还是测试数据集。数据集接口会自动从远端下载数据集到本机缓存目录`~/.cache/paddle/dataset`。" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "# 训练数据集\n", + "train_dataset = vision.datasets.MNIST(mode='train')\n", + "\n", + "# 验证数据集\n", + "val_dataset = vision.datasets.MNIST(mode='test')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3.2 自定义数据集\n", + "\n", + "更多的时候我们是需要自己使用已有的相关数据来定义数据集,那么这里我们通过一个案例来了解如何进行数据集的定义,飞桨为用户提供了`paddle.io.Dataset`基类,让用户通过类的集成来快速实现数据集定义。" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "=============train dataset=============\n", + "traindata1 label1\n", + "traindata2 label2\n", + "traindata3 label3\n", + "traindata4 label4\n", + "=============evaluation dataset=============\n", + "testdata1 label1\n", + "testdata2 label2\n", + "testdata3 label3\n", + "testdata4 label4\n" + ] + } + ], + "source": [ + "from paddle.io import Dataset\n", + "\n", + "\n", + "class MyDataset(Dataset):\n", + " \"\"\"\n", + " 步骤一:继承paddle.io.Dataset类\n", + " \"\"\"\n", + " def __init__(self, mode='train'):\n", + " \"\"\"\n", + " 步骤二:实现构造函数,定义数据读取方式,划分训练和测试数据集\n", + " \"\"\"\n", + " super(MyDataset, self).__init__()\n", + "\n", + " if mode == 'train':\n", + " self.data = [\n", + " ['traindata1', 'label1'],\n", + " ['traindata2', 'label2'],\n", + " ['traindata3', 'label3'],\n", + " ['traindata4', 'label4'],\n", + " ]\n", + " else:\n", + " self.data = [\n", + " ['testdata1', 'label1'],\n", + " ['testdata2', 'label2'],\n", + " ['testdata3', 'label3'],\n", + " ['testdata4', 'label4'],\n", + " ]\n", + " \n", + " def __getitem__(self, index):\n", + " \"\"\"\n", + " 步骤三:实现__getitem__方法,定义指定index时如何获取数据,并返回单条数据(训练数据,对应的标签)\n", + " \"\"\"\n", + " data = self.data[index][0]\n", + " label = self.data[index][1]\n", + "\n", + " return data, label\n", + "\n", + " def __len__(self):\n", + " \"\"\"\n", + " 步骤四:实现__len__方法,返回数据集总数目\n", + " \"\"\"\n", + " return len(self.data)\n", + "\n", + "# 测试定义的数据集\n", + "train_dataset = MyDataset(mode='train')\n", + "val_dataset = MyDataset(mode='test')\n", + "\n", + "print('=============train dataset=============')\n", + "for data, label in train_dataset:\n", + " print(data, label)\n", + "\n", + "print('=============evaluation dataset=============')\n", + "for data, label in val_dataset:\n", + " print(data, label)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3.3 数据增强\n", + "\n", + "训练过程中有时会遇到过拟合的问题,其中一个解决方法就是对训练数据做增强,对数据进行处理得到不同的图像,从而泛化数据集。数据增强API是定义在领域目录的transofrms下,这里我们介绍两种使用方式,一种是基于框架自带数据集,一种是基于自己定义的数据集。\n", + "\n", + "#### 3.3.1 框架自带数据集" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from paddle.vision.transforms import Compose, Resize, ColorJitter\n", + "\n", + "\n", + "# 定义想要使用那些数据增强方式,这里用到了随机调整亮度、对比度和饱和度,改变图片大小\n", + "transform = Compose([ColorJitter(), Resize(size=100)])\n", + "\n", + "# 通过transform参数传递定义好的数据增项方法即可完成对自带数据集的应用\n", + "train_dataset = vision.datasets.MNIST(mode='train', transform=transform)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 3.3.2 自定义数据集\n", + "\n", + "针对自定义数据集使用数据增强有两种方式,一种是在数据集的构造函数中进行数据增强方法的定义,之后对__getitem__中返回的数据进行应用。另外一种方式也可以给自定义的数据集类暴漏一个构造参数,在实例化类的时候将数据增强方法传递进去。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from paddle.io import Dataset\n", + "\n", + "\n", + "class MyDataset(Dataset):\n", + " def __init__(self, mode='train'):\n", + " super(MyDataset, self).__init__()\n", + "\n", + " if mode == 'train':\n", + " self.data = [\n", + " ['traindata1', 'label1'],\n", + " ['traindata2', 'label2'],\n", + " ['traindata3', 'label3'],\n", + " ['traindata4', 'label4'],\n", + " ]\n", + " else:\n", + " self.data = [\n", + " ['testdata1', 'label1'],\n", + " ['testdata2', 'label2'],\n", + " ['testdata3', 'label3'],\n", + " ['testdata4', 'label4'],\n", + " ]\n", + "\n", + " # 定义要使用的数据预处理方法,针对图片的操作\n", + " self.transform = Compose([ColorJitter(), Resize(size=100)])\n", + " \n", + " def __getitem__(self, index):\n", + " data = self.data[index][0]\n", + "\n", + " # 在这里对训练数据进行应用\n", + " # 这里只是一个示例,测试时需要将数据集更换为图片数据进行测试\n", + " data = self.transform(data)\n", + "\n", + " label = self.data[index][1]\n", + "\n", + " return data, label\n", + "\n", + " def __len__(self):\n", + " return len(self.data)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4. 模型组网\n", + "\n", + "针对高层API在模型组网上和基础API是统一的一套,无需投入额外的学习使用成本。那么这里我举几个简单的例子来做示例。\n", + "\n", + "### 4.1 Sequential组网\n", + "\n", + "针对顺序的线性网络结构我们可以直接使用Sequential来快速完成组网,可以减少类的定义等代码编写。" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [], + "source": [ + "# Sequential形式组网\n", + "mnist = paddle.nn.Sequential(\n", + " paddle.nn.Flatten(),\n", + " paddle.nn.Linear(784, 512),\n", + " paddle.nn.ReLU(),\n", + " paddle.nn.Dropout(0.2),\n", + " paddle.nn.Linear(512, 10)\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4.2 SubClass组网\n", + "针对一些比较复杂的网络结构,就可以使用Layer子类定义的方式来进行模型代码编写,在`__init__`构造函数中进行组网Layer的声明,在`forward`中使用声明的Layer变量进行前向计算。子类组网方式也可以实现sublayer的复用,针对相同的layer可以在构造函数中一次性定义,在forward中多次调用。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Layer类继承方式组网\n", + "class Mnist(paddle.nn.Layer):\n", + " def __init__(self):\n", + " super(Mnist, self).__init__()\n", + "\n", + " self.flatten = paddle.nn.Flatten()\n", + " self.linear_1 = paddle.nn.Linear(784, 512)\n", + " self.linear_2 = paddle.nn.Linear(512, 10)\n", + " self.relu = paddle.nn.ReLU()\n", + " self.dropout = paddle.nn.Dropout(0.2)\n", + "\n", + " def forward(self, inputs):\n", + " y = self.flatten(inputs)\n", + " y = self.linear_1(y)\n", + " y = self.relu(y)\n", + " y = self.dropout(y)\n", + " y = self.linear_2(y)\n", + "\n", + " return y\n", + "\n", + "mnist = Mnist()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4.3 模型封装\n", + "\n", + "定义好网络结构之后我们来使用`paddle.Model`完成模型的封装,将网络结构组合成一个可快速使用高层API进行训练、评估和预测的类。\n", + "\n", + "在封装的时候我们有两种场景,动态图训练模式和静态图训练模式。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 场景1:动态图模式\n", + "\n", + "# 启动动态图训练模式\n", + "paddle.disable_static()\n", + "# 使用GPU训练\n", + "paddle.set_device('gpu')\n", + "# 模型封装\n", + "model = paddle.Model(mnist)\n", + "\n", + "\n", + "# 场景2:静态图模式\n", + "\n", + "# input = paddle.static.InputSpec([None, 1, 28, 28], dtype='float32')\n", + "# label = paddle.static.InputSpec([None, 1], dtype='int8')\n", + "# model = paddle.Model(mnist, input, label)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4.4 模型可视化\n", + "\n", + "在组建好我们的网络结构后,一般我们会想去对我们的网络结构进行一下可视化,逐层的去对齐一下我们的网络结构参数,看看是否符合我们的预期。这里可以通过`Model.summary`接口进行可视化展示。\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "model.summary((1, 28, 28))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "另外,summary接口有两种使用方式,下面我们通过两个示例来做展示,除了`Model.summary`这种配套`paddle.Model`封装使用的接口外,还有一套配合没有经过`paddle.Model`封装的方式来使用。可以直接将实例化好的Layer子类放到`paddle.summary`接口中进行可视化呈现。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "paddle.summary(mnist, (1, 28, 28))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "这里面有一个注意的点,有的用户可能会疑惑为什么要传递`(1, 28, 28)`这个input_size参数,因为在动态图中,网络定义阶段是还没有得到输入数据的形状信息,我们想要做网络结构的呈现就无从下手,那么我们通过告知接口网络结构的输入数据形状,这样网络可以通过逐层的计算推导得到完整的网络结构信息进行呈现。如果是动态图运行模式,那么就不需要给summary接口传递输入数据形状这个值了,因为在Model封装的时候我们已经定义好了InputSpec,其中包含了输入数据的形状格式。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 5. 模型训练\n", + "\n", + "网络结构通过`paddle.Model`接口封装成模型类后进行执行操作非常的简洁方便,可以直接通过调用`Model.fit`就可以完成训练过程。\n", + "\n", + "使用`Model.fit`接口启动训练前,我们先通过`Model.prepare`接口来对训练进行提前的配置准备工作,包括设置模型优化器,Loss计算方法,精度计算方法等。\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 为模型训练做准备,设置优化器,损失函数和精度计算方式\n", + "model.prepare(paddle.optimizer.Adam(parameters=model.parameters()), \n", + " paddle.nn.CrossEntropyLoss(),\n", + " paddle.metric.Accuracy())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "做好模型训练的前期准备工作后,我们正式调用`fit()`接口来启动训练过程,需要指定一下至少3个关键参数:训练数据集,训练轮次和单次训练数据批次大小。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 启动模型训练,指定训练数据集,设置训练轮次,设置每次数据集计算的批次大小,设置日志格式\n", + "model.fit(train_dataset, \n", + " epochs=10, \n", + " batch_size=32,\n", + " verbose=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 5.1 单机单卡\n", + "\n", + "我们把刚才单步教学的训练代码做一个整合,这个完整的代码示例就是我们的单机单卡训练程序。" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# 启动动态图训练模式\n", + "paddle.disable_static()\n", + "\n", + "# 使用GPU训练\n", + "paddle.set_device('gpu')\n", + "\n", + "# 构建模型训练用的Model,告知需要训练哪个模型\n", + "model = paddle.Model(mnist)\n", + "\n", + "# 为模型训练做准备,设置优化器,损失函数和精度计算方式\n", + "model.prepare(paddle.optimizer.Adam(parameters=model.parameters()), \n", + " paddle.nn.CrossEntropyLoss(),\n", + " paddle.metric.Accuracy())\n", + "\n", + "# 启动模型训练,指定训练数据集,设置训练轮次,设置每次数据集计算的批次大小,设置日志格式\n", + "model.fit(train_dataset, \n", + " epochs=10, \n", + " batch_size=32,\n", + " verbose=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 5.2 单机多卡\n", + "\n", + "对于高层API来实现单机多卡非常简单,整个训练代码和单机单卡没有差异。直接使用`paddle.distributed.launch`启动单机单卡的程序即可。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# train.py里面包含的就是单机单卡代码\n", + "python -m paddle.distributed.launch train.py" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 5.3 自定义Loss\n", + "\n", + "有时我们会遇到特定任务的Loss计算方式在框架既有的Loss接口中不存在,或算法不符合自己的需求,那么期望能够自己来进行Loss的自定义,我们这里就会讲解介绍一下如何进行Loss的自定义操作,首先来看下面的代码:\n", + "\n", + "```python\n", + "class SelfDefineLoss(paddle.nn.Layer):\n", + " \"\"\"\n", + " 1. 继承paddle.nn.Layer\n", + " \"\"\"\n", + " def __init__(self):\n", + " \"\"\"\n", + " 2. 构造函数根据自己的实际算法需求和使用需求进行参数定义即可\n", + " \"\"\"\n", + " super(SelfDefineLoss, self).__init__()\n", + "\n", + " def forward(self, input, label):\n", + " \"\"\"\n", + " 3. 实现forward函数,forward在调用时会传递两个参数:input和label\n", + " - input:单个或批次训练数据经过模型前向计算输出结果\n", + " - label:单个或批次训练数据对应的标签数据\n", + "\n", + " 接口返回值是一个Tensor,根据自定义的逻辑加和或计算均值后的损失\n", + " \"\"\"\n", + " # 使用Paddle中相关API自定义的计算逻辑\n", + " # output = xxxxx\n", + " # return output\n", + "```\n", + "\n", + "那么了解完代码层面如果编写自定义代码后我们看一个实际的例子,下面是在图像分割示例代码中写的一个自定义Loss,当时主要是想使用自定义的softmax计算维度。\n", + "\n", + "```python\n", + "class SoftmaxWithCrossEntropy(paddle.nn.Layer):\n", + " def __init__(self):\n", + " super(SoftmaxWithCrossEntropy, self).__init__()\n", + "\n", + " def forward(self, input, label):\n", + " loss = F.softmax_with_cross_entropy(input, \n", + " label, \n", + " return_softmax=False,\n", + " axis=1)\n", + " return paddle.mean(loss)\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 5.4 自定义Metric\n", + "\n", + "和Loss一样,如果遇到一些想要做个性化实现的操作时,我们也可以来通过框架完成自定义的评估计算方法,具体的实现方式如下:\n", + "\n", + "```python\n", + "class SelfDefineMetric(paddle.metric.Metric):\n", + " \"\"\"\n", + " 1. 继承paddle.metric.Metric\n", + " \"\"\"\n", + " def __init__(self):\n", + " \"\"\"\n", + " 2. 构造函数实现,自定义参数即可\n", + " \"\"\"\n", + " super(SelfDefineMetric, self).__init__()\n", + "\n", + " def name(self):\n", + " \"\"\"\n", + " 3. 实现name方法,返回定义的评估指标名字\n", + " \"\"\"\n", + " return '自定义评价指标的名字'\n", + "\n", + " def compute(self, ...)\n", + " \"\"\"\n", + " 4. 本步骤可以省略,实现compute方法,这个方法主要用于`update`的加速,可以在这个方法中调用一些paddle实现好的Tensor计算API,编译到模型网络中一起使用低层C++ OP计算。\n", + " \"\"\"\n", + "\n", + " return 自己想要返回的数据,会做为update的参数传入。\n", + "\n", + " def update(self, ...):\n", + " \"\"\"\n", + " 5. 实现update方法,用于单个batch训练时进行评估指标计算。\n", + " - 当`compute`类函数未实现时,会将模型的计算输出和标签数据的展平作为`update`的参数传入。\n", + " - 当`compute`类函数做了实现时,会将compute的返回结果作为`update`的参数传入。\n", + " \"\"\"\n", + " return acc value\n", + " \n", + " def accumulate(self):\n", + " \"\"\"\n", + " 6. 实现accumulate方法,返回历史batch训练积累后计算得到的评价指标值。\n", + " 每次`update`调用时进行数据积累,`accumulate`计算时对积累的所有数据进行计算并返回。\n", + " 结算结果会在`fit`接口的训练日志中呈现。\n", + " \"\"\"\n", + " # 利用update中积累的成员变量数据进行计算后返回\n", + " return accumulated acc value\n", + "\n", + " def reset(self):\n", + " \"\"\"\n", + " 7. 实现reset方法,每个Epoch结束后进行评估指标的重置,这样下个Epoch可以重新进行计算。\n", + " \"\"\"\n", + " # do reset action\n", + "```\n", + "\n", + "我们看一个框架中的具体例子,这个是框架中已提供的一个评估指标计算接口,这里就是按照上述说明中的实现方法进行了相关类继承和成员函数实现。\n", + "\n", + "```python\n", + "from paddle.metric import Metric\n", + "\n", + "\n", + "class Precision(Metric):\n", + " \"\"\"\n", + " Precision (also called positive predictive value) is the fraction of\n", + " relevant instances among the retrieved instances. Refer to\n", + " https://en.wikipedia.org/wiki/Evaluation_of_binary_classifiers\n", + "\n", + " Noted that this class manages the precision score only for binary\n", + " classification task.\n", + " \n", + " ......\n", + "\n", + " \"\"\"\n", + "\n", + " def __init__(self, name='precision', *args, **kwargs):\n", + " super(Precision, self).__init__(*args, **kwargs)\n", + " self.tp = 0 # true positive\n", + " self.fp = 0 # false positive\n", + " self._name = name\n", + "\n", + " def update(self, preds, labels):\n", + " \"\"\"\n", + " Update the states based on the current mini-batch prediction results.\n", + "\n", + " Args:\n", + " preds (numpy.ndarray): The prediction result, usually the output\n", + " of two-class sigmoid function. It should be a vector (column\n", + " vector or row vector) with data type: 'float64' or 'float32'.\n", + " labels (numpy.ndarray): The ground truth (labels),\n", + " the shape should keep the same as preds.\n", + " The data type is 'int32' or 'int64'.\n", + " \"\"\"\n", + " if isinstance(preds, paddle.Tensor):\n", + " preds = preds.numpy()\n", + " elif not _is_numpy_(preds):\n", + " raise ValueError(\"The 'preds' must be a numpy ndarray or Tensor.\")\n", + "\n", + " if isinstance(labels, paddle.Tensor):\n", + " labels = labels.numpy()\n", + " elif not _is_numpy_(labels):\n", + " raise ValueError(\"The 'labels' must be a numpy ndarray or Tensor.\")\n", + "\n", + " sample_num = labels.shape[0]\n", + " preds = np.floor(preds + 0.5).astype(\"int32\")\n", + "\n", + " for i in range(sample_num):\n", + " pred = preds[i]\n", + " label = labels[i]\n", + " if pred == 1:\n", + " if pred == label:\n", + " self.tp += 1\n", + " else:\n", + " self.fp += 1\n", + "\n", + " def reset(self):\n", + " \"\"\"\n", + " Resets all of the metric state.\n", + " \"\"\"\n", + " self.tp = 0\n", + " self.fp = 0\n", + "\n", + " def accumulate(self):\n", + " \"\"\"\n", + " Calculate the final precision.\n", + "\n", + " Returns:\n", + " A scaler float: results of the calculated precision.\n", + " \"\"\"\n", + " ap = self.tp + self.fp\n", + " return float(self.tp) / ap if ap != 0 else .0\n", + "\n", + " def name(self):\n", + " \"\"\"\n", + " Returns metric name\n", + " \"\"\"\n", + " return self._name\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 5.5 自定义Callback\n", + "\n", + "`fit`接口的callback参数支持我们传一个Callback类实例,用来在每轮训练和每个batch训练前后进行调用,可以通过callback收集到训练过程中的一些数据和参数,或者实现一些自定义操作。\n", + "\n", + "```python\n", + "class SelfDefineCallback(paddle.callbacks.Callback):\n", + " \"\"\"\n", + " 1. 继承paddle.callbacks.Callback\n", + " 2. 按照自己的需求实现以下类成员方法:\n", + " def on_train_begin(self, logs=None) 训练开始前,`Model.fit`接口中调用\n", + " def on_train_end(self, logs=None) 训练结束后,`Model.fit`接口中调用\n", + " def on_eval_begin(self, logs=None) 评估开始前,`Model.evaluate`接口调用\n", + " def on_eval_end(self, logs=None) 评估结束后,`Model.evaluate`接口调用\n", + " def on_test_begin(self, logs=None) 预测测试开始前,`Model.predict`接口中调用\n", + " def on_test_end(self, logs=None) 预测测试结束后,`Model.predict`接口中调用 \n", + " def on_epoch_begin(self, epoch, logs=None) 每轮训练开始前,`Model.fit`接口中调用 \n", + " def on_epoch_end(self, epoch, logs=None) 每轮训练结束后,`Model.fit`接口中调用 \n", + " def on_train_batch_begin(self, step, logs=None) 单个Batch训练开始前,`Model.fit`和`Model.train_batch`接口中调用\n", + " def on_train_batch_end(self, step, logs=None) 单个Batch训练结束后,`Model.fit`和`Model.train_batch`接口中调用\n", + " def on_eval_batch_begin(self, step, logs=None) 单个Batch评估开始前,`Model.evalute`和`Model.eval_batch`接口中调用\n", + " def on_eval_batch_end(self, step, logs=None) 单个Batch评估结束后,`Model.evalute`和`Model.eval_batch`接口中调用\n", + " def on_test_batch_begin(self, step, logs=None) 单个Batch预测测试开始前,`Model.predict`和`Model.test_batch`接口中调用\n", + " def on_test_batch_end(self, step, logs=None) 单个Batch预测测试结束后,`Model.predict`和`Model.test_batch`接口中调用\n", + " \"\"\"\n", + " def __init__(self):\n", + " super(SelfDefineCallback, self).__init__()\n", + "\n", + " 按照需求定义自己的类成员方法\n", + "```\n", + "\n", + "我们看一个框架中的实际例子,这是一个框架自带的ModelCheckpoint回调函数,方便用户在fit训练模型时自动存储每轮训练得到的模型。\n", + "\n", + "```python\n", + "class ModelCheckpoint(Callback):\n", + " def __init__(self, save_freq=1, save_dir=None):\n", + " self.save_freq = save_freq\n", + " self.save_dir = save_dir\n", + "\n", + " def on_epoch_begin(self, epoch=None, logs=None):\n", + " self.epoch = epoch\n", + "\n", + " def _is_save(self):\n", + " return self.model and self.save_dir and ParallelEnv().local_rank == 0\n", + "\n", + " def on_epoch_end(self, epoch, logs=None):\n", + " if self._is_save() and self.epoch % self.save_freq == 0:\n", + " path = '{}/{}'.format(self.save_dir, epoch)\n", + " print('save checkpoint at {}'.format(os.path.abspath(path)))\n", + " self.model.save(path)\n", + "\n", + " def on_train_end(self, logs=None):\n", + " if self._is_save():\n", + " path = '{}/final'.format(self.save_dir)\n", + " print('save checkpoint at {}'.format(os.path.abspath(path)))\n", + " self.model.save(path)\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 6. 模型评估\n", + "\n", + "对于训练好的模型进行评估操作可以使用`evaluate`接口来实现,事先定义好用于评估使用的数据集后,可以简单的调用`evaluate`接口即可完成模型评估操作,结束后根据prepare中loss和metric的定义来进行相关评估结果计算返回。\n", + "\n", + "返回格式是一个字典:\n", + "* 只包含loss,`{'loss': xxx}`\n", + "* 包含loss和一个评估指标,`{'loss': xxx, 'metric name': xxx}`\n", + "* 包含loss和多个评估指标,`{'loss': xxx, 'metric name': xxx, 'metric name': xxx}`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = model.evaluate(val_dataset, verbose=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 7. 模型预测\n", + "\n", + "高层API中提供了`predict`接口来方便用户对训练好的模型进行预测验证,只需要基于训练好的模型将需要进行预测测试的数据放到接口中进行计算即可,接口会将经过模型计算得到的预测结果进行返回。\n", + "\n", + "返回格式是一个list,元素数目对应模型的输出数目:\n", + "* 模型是单一输出:[(numpy_ndarray_1, numpy_ndarray_2, ..., numpy_ndarray_n)]\n", + "* 模型是多输出:[(numpy_ndarray_1, numpy_ndarray_2, ..., numpy_ndarray_n), (numpy_ndarray_1, numpy_ndarray_2, ..., numpy_ndarray_n), ...]\n", + "\n", + "numpy_ndarray_n是对应原始数据经过模型计算后得到的预测数据,数目对应预测数据集的数目。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pred_result = model.predict(val_dataset)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 7.1 使用多卡进行预测\n", + "\n", + "有时我们需要进行预测验证的数据较多,单卡无法满足我们的时间诉求,那么`predict`接口也为用户支持实现了使用多卡模式来运行。\n", + "\n", + "使用起来也是超级简单,无需修改代码程序,只需要使用launch来启动对应的预测脚本即可。\n", + "\n", + "```bash\n", + "$ python3 -m paddle.distributed.launch infer.py\n", + "```\n", + "\n", + "infer.py里面就是包含model.predict的代码程序。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 8. 模型部署\n", + "\n", + "### 8.1 模型存储\n", + "\n", + "模型训练和验证达到我们的预期后,可以使用`save`接口来将我们的模型保存下来,用于后续模型的Fine-tuning(接口参数training=True)或推理部署(接口参数training=False)。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 保存用于推理部署的模型(training=False)\n", + "model.save('~/model/mnist', training=False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 8.2 预测部署\n", + "\n", + "有了用于推理部署的模型,就可以使用推理部署框架来完成预测服务部署,具体可以参见:[预测部署](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/index_cn.html), 包括服务端部署、移动端部署和模型压缩。" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.7.4 64-bit", + "language": "python", + "name": "python37464bitc4da1ac836094043840bff631bedbf7f" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/doc/paddle/tutorial/quick_start/high_level_api/high_level_api.rst b/doc/paddle/tutorial/quick_start/high_level_api/high_level_api.rst new file mode 100644 index 0000000000000000000000000000000000000000..3359926c541f34d0533b0971332854f5271a179e --- /dev/null +++ b/doc/paddle/tutorial/quick_start/high_level_api/high_level_api.rst @@ -0,0 +1,690 @@ +飞桨高层API使用指南 +=================== + +1. 简介 +------- + +飞桨框架2.0全新推出高层API,是对飞桨API的进一步封装与升级,提供了更加简洁易用的API,进一步提升了飞桨的易学易用性,并增强飞桨的功能。 + +飞桨高层API面向从深度学习小白到资深开发者的所有人群,对于AI初学者来说,使用高层API可以简单快速的构建深度学习项目,对于资深开发者来说,可以快速完成算法迭代。 + +飞桨高层API具有以下特点: + +- 易学易用: + 高层API是对普通动态图API的进一步封装和优化,同时保持与普通API的兼容性,高层API使用更加易学易用,同样的实现使用高层API可以节省大量的代码。 +- 低代码开发: + 使用飞桨高层API的一个明显特点是,用户可编程代码量大大缩减。 +- 动静转换: + 高层API支持动静转换,用户只需要改一行代码即可实现将动态图代码在静态图模式下训练,既方便用户使用动态图调试模型,又提升了模型训练效率。 + +在功能增强与使用方式上,高层API有以下升级: + +- 模型训练方式升级: + 高层API中封装了Model类,继承了Model类的神经网络可以仅用几行代码完成模型的训练。 +- 新增图像处理模块transform: + 飞桨新增了图像预处理模块,其中包含数十种数据处理函数,基本涵盖了常用的数据处理、数据增强方法。 +- 提供常用的神经网络模型可供调用: + 高层API中集成了计算机视觉领域和自然语言处理领域常用模型,包括但不限于mobilenet、resnet、yolov3、cyclegan、bert、transformer、seq2seq等等。同时发布了对应模型的预训练模型,用户可以直接使用这些模型或者在此基础上完成二次开发。 + +2. 安装并使用飞桨高层API +------------------------ + +飞桨高层API无需独立安装,只需要安装好paddlepaddle即可,安装完成后import +paddle即可使用相关高层API,如:paddle.Model、视觉领域paddle.vision、NLP领域paddle.text。 + +.. code:: ipython3 + + import paddle + import paddle.vision as vision + import paddle.text as text + + paddle.__version__ + + + + +.. parsed-literal:: + + '2.0.0-beta0' + + + +2. 目录 +------- + +本指南教学内容覆盖 + +- 使用高层API提供的自带数据集进行相关深度学习任务训练。 +- 使用自定义数据进行数据集的定义、数据预处理和训练。 +- 如何在数据集定义和加载中应用数据增强相关接口。 +- 如何进行模型的组网。 +- 高层API进行模型训练的相关API使用。 +- 如何在fit接口满足需求的时候进行自定义,使用基础API来完成训练。 +- 如何使用多卡来加速训练。 + +3. 数据集定义、加载和数据预处理 +------------------------------- + +对于深度学习任务,均是框架针对各种类型数字的计算,是无法直接使用原始图片和文本等文件来完成。那么就是涉及到了一项动作,就是将原始的各种数据文件进行处理加工,转换成深度学习任务可以使用的数据。 + +3.1 框架自带数据集使用 +~~~~~~~~~~~~~~~~~~~~~~ + +高层API将一些我们常用到的数据集作为领域API对用户进行开放,对应API所在目录为\ ``paddle.vision.datasets``\ ,那么我们先看下提供了哪些数据集。 + +.. code:: ipython3 + + print('视觉相关数据集:', paddle.vision.datasets.__all__) + print('自然语言相关数据集:', paddle.text.datasets.__all__) + + +.. parsed-literal:: + + 视觉相关数据集: ['DatasetFolder', 'ImageFolder', 'MNIST', 'Flowers', 'Cifar10', 'Cifar100', 'VOC2012'] + 自然语言相关数据集: ['Conll05st', 'Imdb', 'Imikolov', 'Movielens', 'MovieReviews', 'UCIHousing', 'WMT14', 'WMT16'] + + +这里我们是加载一个手写数字识别的数据集,用\ ``mode``\ 来标识是训练数据还是测试数据集。数据集接口会自动从远端下载数据集到本机缓存目录\ ``~/.cache/paddle/dataset``\ 。 + +.. code:: ipython3 + + # 训练数据集 + train_dataset = vision.datasets.MNIST(mode='train') + + # 验证数据集 + val_dataset = vision.datasets.MNIST(mode='test') + +3.2 自定义数据集 +~~~~~~~~~~~~~~~~ + +更多的时候我们是需要自己使用已有的相关数据来定义数据集,那么这里我们通过一个案例来了解如何进行数据集的定义,飞桨为用户提供了\ ``paddle.io.Dataset``\ 基类,让用户通过类的集成来快速实现数据集定义。 + +.. code:: ipython3 + + from paddle.io import Dataset + + + class MyDataset(Dataset): + """ + 步骤一:继承paddle.io.Dataset类 + """ + def __init__(self, mode='train'): + """ + 步骤二:实现构造函数,定义数据读取方式,划分训练和测试数据集 + """ + super(MyDataset, self).__init__() + + if mode == 'train': + self.data = [ + ['traindata1', 'label1'], + ['traindata2', 'label2'], + ['traindata3', 'label3'], + ['traindata4', 'label4'], + ] + else: + self.data = [ + ['testdata1', 'label1'], + ['testdata2', 'label2'], + ['testdata3', 'label3'], + ['testdata4', 'label4'], + ] + + def __getitem__(self, index): + """ + 步骤三:实现__getitem__方法,定义指定index时如何获取数据,并返回单条数据(训练数据,对应的标签) + """ + data = self.data[index][0] + label = self.data[index][1] + + return data, label + + def __len__(self): + """ + 步骤四:实现__len__方法,返回数据集总数目 + """ + return len(self.data) + + # 测试定义的数据集 + train_dataset = MyDataset(mode='train') + val_dataset = MyDataset(mode='test') + + print('=============train dataset=============') + for data, label in train_dataset: + print(data, label) + + print('=============evaluation dataset=============') + for data, label in val_dataset: + print(data, label) + + +.. parsed-literal:: + + =============train dataset============= + traindata1 label1 + traindata2 label2 + traindata3 label3 + traindata4 label4 + =============evaluation dataset============= + testdata1 label1 + testdata2 label2 + testdata3 label3 + testdata4 label4 + + +3.3 数据增强 +~~~~~~~~~~~~ + +训练过程中有时会遇到过拟合的问题,其中一个解决方法就是对训练数据做增强,对数据进行处理得到不同的图像,从而泛化数据集。数据增强API是定义在领域目录的transofrms下,这里我们介绍两种使用方式,一种是基于框架自带数据集,一种是基于自己定义的数据集。 + +3.3.1 框架自带数据集 +^^^^^^^^^^^^^^^^^^^^ + +.. code:: ipython3 + + from paddle.vision.transforms import Compose, Resize, ColorJitter + + + # 定义想要使用那些数据增强方式,这里用到了随机调整亮度、对比度和饱和度,改变图片大小 + transform = Compose([ColorJitter(), Resize(size=100)]) + + # 通过transform参数传递定义好的数据增项方法即可完成对自带数据集的应用 + train_dataset = vision.datasets.MNIST(mode='train', transform=transform) + +3.3.2 自定义数据集 +^^^^^^^^^^^^^^^^^^ + +针对自定义数据集使用数据增强有两种方式,一种是在数据集的构造函数中进行数据增强方法的定义,之后对__getitem__中返回的数据进行应用。另外一种方式也可以给自定义的数据集类暴漏一个构造参数,在实例化类的时候将数据增强方法传递进去。 + +.. code:: ipython3 + + from paddle.io import Dataset + + + class MyDataset(Dataset): + def __init__(self, mode='train'): + super(MyDataset, self).__init__() + + if mode == 'train': + self.data = [ + ['traindata1', 'label1'], + ['traindata2', 'label2'], + ['traindata3', 'label3'], + ['traindata4', 'label4'], + ] + else: + self.data = [ + ['testdata1', 'label1'], + ['testdata2', 'label2'], + ['testdata3', 'label3'], + ['testdata4', 'label4'], + ] + + # 定义要使用的数据预处理方法,针对图片的操作 + self.transform = Compose([ColorJitter(), Resize(size=100)]) + + def __getitem__(self, index): + data = self.data[index][0] + + # 在这里对训练数据进行应用 + # 这里只是一个示例,测试时需要将数据集更换为图片数据进行测试 + data = self.transform(data) + + label = self.data[index][1] + + return data, label + + def __len__(self): + return len(self.data) + +4. 模型组网 +----------- + +针对高层API在模型组网上和基础API是统一的一套,无需投入额外的学习使用成本。那么这里我举几个简单的例子来做示例。 + +4.1 Sequential组网 +~~~~~~~~~~~~~~~~~~ + +针对顺序的线性网络结构我们可以直接使用Sequential来快速完成组网,可以减少类的定义等代码编写。 + +.. code:: ipython3 + + # Sequential形式组网 + mnist = paddle.nn.Sequential( + paddle.nn.Flatten(), + paddle.nn.Linear(784, 512), + paddle.nn.ReLU(), + paddle.nn.Dropout(0.2), + paddle.nn.Linear(512, 10) + ) + +4.2 SubClass组网 +~~~~~~~~~~~~~~~~ + +针对一些比较复杂的网络结构,就可以使用Layer子类定义的方式来进行模型代码编写,在\ ``__init__``\ 构造函数中进行组网Layer的声明,在\ ``forward``\ 中使用声明的Layer变量进行前向计算。子类组网方式也可以实现sublayer的复用,针对相同的layer可以在构造函数中一次性定义,在forward中多次调用。 + +.. code:: ipython3 + + # Layer类继承方式组网 + class Mnist(paddle.nn.Layer): + def __init__(self): + super(Mnist, self).__init__() + + self.flatten = paddle.nn.Flatten() + self.linear_1 = paddle.nn.Linear(784, 512) + self.linear_2 = paddle.nn.Linear(512, 10) + self.relu = paddle.nn.ReLU() + self.dropout = paddle.nn.Dropout(0.2) + + def forward(self, inputs): + y = self.flatten(inputs) + y = self.linear_1(y) + y = self.relu(y) + y = self.dropout(y) + y = self.linear_2(y) + + return y + + mnist = Mnist() + +4.3 模型封装 +~~~~~~~~~~~~ + +定义好网络结构之后我们来使用\ ``paddle.Model``\ 完成模型的封装,将网络结构组合成一个可快速使用高层API进行训练、评估和预测的类。 + +在封装的时候我们有两种场景,动态图训练模式和静态图训练模式。 + +.. code:: ipython3 + + # 场景1:动态图模式 + + # 启动动态图训练模式 + paddle.disable_static() + # 使用GPU训练 + paddle.set_device('gpu') + # 模型封装 + model = paddle.Model(mnist) + + + # 场景2:静态图模式 + + # input = paddle.static.InputSpec([None, 1, 28, 28], dtype='float32') + # label = paddle.static.InputSpec([None, 1], dtype='int8') + # model = paddle.Model(mnist, input, label) + +4.4 模型可视化 +~~~~~~~~~~~~~~ + +在组建好我们的网络结构后,一般我们会想去对我们的网络结构进行一下可视化,逐层的去对齐一下我们的网络结构参数,看看是否符合我们的预期。这里可以通过\ ``Model.summary``\ 接口进行可视化展示。 + +.. code:: ipython3 + + model.summary((1, 28, 28)) + +另外,summary接口有两种使用方式,下面我们通过两个示例来做展示,除了\ ``Model.summary``\ 这种配套\ ``paddle.Model``\ 封装使用的接口外,还有一套配合没有经过\ ``paddle.Model``\ 封装的方式来使用。可以直接将实例化好的Layer子类放到\ ``paddle.summary``\ 接口中进行可视化呈现。 + +.. code:: ipython3 + + paddle.summary(mnist, (1, 28, 28)) + +这里面有一个注意的点,有的用户可能会疑惑为什么要传递\ ``(1, 28, 28)``\ 这个input_size参数,因为在动态图中,网络定义阶段是还没有得到输入数据的形状信息,我们想要做网络结构的呈现就无从下手,那么我们通过告知接口网络结构的输入数据形状,这样网络可以通过逐层的计算推导得到完整的网络结构信息进行呈现。如果是动态图运行模式,那么就不需要给summary接口传递输入数据形状这个值了,因为在Model封装的时候我们已经定义好了InputSpec,其中包含了输入数据的形状格式。 + +5. 模型训练 +----------- + +网络结构通过\ ``paddle.Model``\ 接口封装成模型类后进行执行操作非常的简洁方便,可以直接通过调用\ ``Model.fit``\ 就可以完成训练过程。 + +使用\ ``Model.fit``\ 接口启动训练前,我们先通过\ ``Model.prepare``\ 接口来对训练进行提前的配置准备工作,包括设置模型优化器,Loss计算方法,精度计算方法等。 + +.. code:: ipython3 + + # 为模型训练做准备,设置优化器,损失函数和精度计算方式 + model.prepare(paddle.optimizer.Adam(parameters=model.parameters()), + paddle.nn.CrossEntropyLoss(), + paddle.metric.Accuracy()) + +做好模型训练的前期准备工作后,我们正式调用\ ``fit()``\ 接口来启动训练过程,需要指定一下至少3个关键参数:训练数据集,训练轮次和单次训练数据批次大小。 + +.. code:: ipython3 + + # 启动模型训练,指定训练数据集,设置训练轮次,设置每次数据集计算的批次大小,设置日志格式 + model.fit(train_dataset, + epochs=10, + batch_size=32, + verbose=1) + +5.1 单机单卡 +~~~~~~~~~~~~ + +我们把刚才单步教学的训练代码做一个整合,这个完整的代码示例就是我们的单机单卡训练程序。 + +.. code:: ipython3 + + # 启动动态图训练模式 + paddle.disable_static() + + # 使用GPU训练 + paddle.set_device('gpu') + + # 构建模型训练用的Model,告知需要训练哪个模型 + model = paddle.Model(mnist) + + # 为模型训练做准备,设置优化器,损失函数和精度计算方式 + model.prepare(paddle.optimizer.Adam(parameters=model.parameters()), + paddle.nn.CrossEntropyLoss(), + paddle.metric.Accuracy()) + + # 启动模型训练,指定训练数据集,设置训练轮次,设置每次数据集计算的批次大小,设置日志格式 + model.fit(train_dataset, + epochs=10, + batch_size=32, + verbose=1) + +5.2 单机多卡 +~~~~~~~~~~~~ + +对于高层API来实现单机多卡非常简单,整个训练代码和单机单卡没有差异。直接使用\ ``paddle.distributed.launch``\ 启动单机单卡的程序即可。 + +.. code:: ipython3 + + # train.py里面包含的就是单机单卡代码 + python -m paddle.distributed.launch train.py + +5.3 自定义Loss +~~~~~~~~~~~~~~ + +有时我们会遇到特定任务的Loss计算方式在框架既有的Loss接口中不存在,或算法不符合自己的需求,那么期望能够自己来进行Loss的自定义,我们这里就会讲解介绍一下如何进行Loss的自定义操作,首先来看下面的代码: + +.. code:: python + + class SelfDefineLoss(paddle.nn.Layer): + """ + 1. 继承paddle.nn.Layer + """ + def __init__(self): + """ + 2. 构造函数根据自己的实际算法需求和使用需求进行参数定义即可 + """ + super(SelfDefineLoss, self).__init__() + + def forward(self, input, label): + """ + 3. 实现forward函数,forward在调用时会传递两个参数:input和label + - input:单个或批次训练数据经过模型前向计算输出结果 + - label:单个或批次训练数据对应的标签数据 + + 接口返回值是一个Tensor,根据自定义的逻辑加和或计算均值后的损失 + """ + # 使用Paddle中相关API自定义的计算逻辑 + # output = xxxxx + # return output + +那么了解完代码层面如果编写自定义代码后我们看一个实际的例子,下面是在图像分割示例代码中写的一个自定义Loss,当时主要是想使用自定义的softmax计算维度。 + +.. code:: python + + class SoftmaxWithCrossEntropy(paddle.nn.Layer): + def __init__(self): + super(SoftmaxWithCrossEntropy, self).__init__() + + def forward(self, input, label): + loss = F.softmax_with_cross_entropy(input, + label, + return_softmax=False, + axis=1) + return paddle.mean(loss) + +5.4 自定义Metric +~~~~~~~~~~~~~~~~ + +和Loss一样,如果遇到一些想要做个性化实现的操作时,我们也可以来通过框架完成自定义的评估计算方法,具体的实现方式如下: + +.. code:: python + + class SelfDefineMetric(paddle.metric.Metric): + """ + 1. 继承paddle.metric.Metric + """ + def __init__(self): + """ + 2. 构造函数实现,自定义参数即可 + """ + super(SelfDefineMetric, self).__init__() + + def name(self): + """ + 3. 实现name方法,返回定义的评估指标名字 + """ + return '自定义评价指标的名字' + + def compute(self, ...) + """ + 4. 本步骤可以省略,实现compute方法,这个方法主要用于`update`的加速,可以在这个方法中调用一些paddle实现好的Tensor计算API,编译到模型网络中一起使用低层C++ OP计算。 + """ + + return 自己想要返回的数据,会做为update的参数传入。 + + def update(self, ...): + """ + 5. 实现update方法,用于单个batch训练时进行评估指标计算。 + - 当`compute`类函数未实现时,会将模型的计算输出和标签数据的展平作为`update`的参数传入。 + - 当`compute`类函数做了实现时,会将compute的返回结果作为`update`的参数传入。 + """ + return acc value + + def accumulate(self): + """ + 6. 实现accumulate方法,返回历史batch训练积累后计算得到的评价指标值。 + 每次`update`调用时进行数据积累,`accumulate`计算时对积累的所有数据进行计算并返回。 + 结算结果会在`fit`接口的训练日志中呈现。 + """ + # 利用update中积累的成员变量数据进行计算后返回 + return accumulated acc value + + def reset(self): + """ + 7. 实现reset方法,每个Epoch结束后进行评估指标的重置,这样下个Epoch可以重新进行计算。 + """ + # do reset action + +我们看一个框架中的具体例子,这个是框架中已提供的一个评估指标计算接口,这里就是按照上述说明中的实现方法进行了相关类继承和成员函数实现。 + +.. code:: python + + from paddle.metric import Metric + + + class Precision(Metric): + """ + Precision (also called positive predictive value) is the fraction of + relevant instances among the retrieved instances. Refer to + https://en.wikipedia.org/wiki/Evaluation_of_binary_classifiers + + Noted that this class manages the precision score only for binary + classification task. + + ...... + + """ + + def __init__(self, name='precision', *args, **kwargs): + super(Precision, self).__init__(*args, **kwargs) + self.tp = 0 # true positive + self.fp = 0 # false positive + self._name = name + + def update(self, preds, labels): + """ + Update the states based on the current mini-batch prediction results. + + Args: + preds (numpy.ndarray): The prediction result, usually the output + of two-class sigmoid function. It should be a vector (column + vector or row vector) with data type: 'float64' or 'float32'. + labels (numpy.ndarray): The ground truth (labels), + the shape should keep the same as preds. + The data type is 'int32' or 'int64'. + """ + if isinstance(preds, paddle.Tensor): + preds = preds.numpy() + elif not _is_numpy_(preds): + raise ValueError("The 'preds' must be a numpy ndarray or Tensor.") + + if isinstance(labels, paddle.Tensor): + labels = labels.numpy() + elif not _is_numpy_(labels): + raise ValueError("The 'labels' must be a numpy ndarray or Tensor.") + + sample_num = labels.shape[0] + preds = np.floor(preds + 0.5).astype("int32") + + for i in range(sample_num): + pred = preds[i] + label = labels[i] + if pred == 1: + if pred == label: + self.tp += 1 + else: + self.fp += 1 + + def reset(self): + """ + Resets all of the metric state. + """ + self.tp = 0 + self.fp = 0 + + def accumulate(self): + """ + Calculate the final precision. + + Returns: + A scaler float: results of the calculated precision. + """ + ap = self.tp + self.fp + return float(self.tp) / ap if ap != 0 else .0 + + def name(self): + """ + Returns metric name + """ + return self._name + +5.5 自定义Callback +~~~~~~~~~~~~~~~~~~ + +``fit``\ 接口的callback参数支持我们传一个Callback类实例,用来在每轮训练和每个batch训练前后进行调用,可以通过callback收集到训练过程中的一些数据和参数,或者实现一些自定义操作。 + +.. code:: python + + class SelfDefineCallback(paddle.callbacks.Callback): + """ + 1. 继承paddle.callbacks.Callback + 2. 按照自己的需求实现以下类成员方法: + def on_train_begin(self, logs=None) 训练开始前,`Model.fit`接口中调用 + def on_train_end(self, logs=None) 训练结束后,`Model.fit`接口中调用 + def on_eval_begin(self, logs=None) 评估开始前,`Model.evaluate`接口调用 + def on_eval_end(self, logs=None) 评估结束后,`Model.evaluate`接口调用 + def on_test_begin(self, logs=None) 预测测试开始前,`Model.predict`接口中调用 + def on_test_end(self, logs=None) 预测测试结束后,`Model.predict`接口中调用 + def on_epoch_begin(self, epoch, logs=None) 每轮训练开始前,`Model.fit`接口中调用 + def on_epoch_end(self, epoch, logs=None) 每轮训练结束后,`Model.fit`接口中调用 + def on_train_batch_begin(self, step, logs=None) 单个Batch训练开始前,`Model.fit`和`Model.train_batch`接口中调用 + def on_train_batch_end(self, step, logs=None) 单个Batch训练结束后,`Model.fit`和`Model.train_batch`接口中调用 + def on_eval_batch_begin(self, step, logs=None) 单个Batch评估开始前,`Model.evalute`和`Model.eval_batch`接口中调用 + def on_eval_batch_end(self, step, logs=None) 单个Batch评估结束后,`Model.evalute`和`Model.eval_batch`接口中调用 + def on_test_batch_begin(self, step, logs=None) 单个Batch预测测试开始前,`Model.predict`和`Model.test_batch`接口中调用 + def on_test_batch_end(self, step, logs=None) 单个Batch预测测试结束后,`Model.predict`和`Model.test_batch`接口中调用 + """ + def __init__(self): + super(SelfDefineCallback, self).__init__() + + 按照需求定义自己的类成员方法 + +我们看一个框架中的实际例子,这是一个框架自带的ModelCheckpoint回调函数,方便用户在fit训练模型时自动存储每轮训练得到的模型。 + +.. code:: python + + class ModelCheckpoint(Callback): + def __init__(self, save_freq=1, save_dir=None): + self.save_freq = save_freq + self.save_dir = save_dir + + def on_epoch_begin(self, epoch=None, logs=None): + self.epoch = epoch + + def _is_save(self): + return self.model and self.save_dir and ParallelEnv().local_rank == 0 + + def on_epoch_end(self, epoch, logs=None): + if self._is_save() and self.epoch % self.save_freq == 0: + path = '{}/{}'.format(self.save_dir, epoch) + print('save checkpoint at {}'.format(os.path.abspath(path))) + self.model.save(path) + + def on_train_end(self, logs=None): + if self._is_save(): + path = '{}/final'.format(self.save_dir) + print('save checkpoint at {}'.format(os.path.abspath(path))) + self.model.save(path) + +6. 模型评估 +----------- + +对于训练好的模型进行评估操作可以使用\ ``evaluate``\ 接口来实现,事先定义好用于评估使用的数据集后,可以简单的调用\ ``evaluate``\ 接口即可完成模型评估操作,结束后根据prepare中loss和metric的定义来进行相关评估结果计算返回。 + +返回格式是一个字典: \* 只包含loss,\ ``{'loss': xxx}`` \* +包含loss和一个评估指标,\ ``{'loss': xxx, 'metric name': xxx}`` \* +包含loss和多个评估指标,\ ``{'loss': xxx, 'metric name': xxx, 'metric name': xxx}`` + +.. code:: ipython3 + + result = model.evaluate(val_dataset, verbose=1) + +7. 模型预测 +----------- + +高层API中提供了\ ``predict``\ 接口来方便用户对训练好的模型进行预测验证,只需要基于训练好的模型将需要进行预测测试的数据放到接口中进行计算即可,接口会将经过模型计算得到的预测结果进行返回。 + +返回格式是一个list,元素数目对应模型的输出数目: \* +模型是单一输出:[(numpy_ndarray_1, numpy_ndarray_2, …, numpy_ndarray_n)] +\* 模型是多输出:[(numpy_ndarray_1, numpy_ndarray_2, …, +numpy_ndarray_n), (numpy_ndarray_1, numpy_ndarray_2, …, +numpy_ndarray_n), …] + +numpy_ndarray_n是对应原始数据经过模型计算后得到的预测数据,数目对应预测数据集的数目。 + +.. code:: ipython3 + + pred_result = model.predict(val_dataset) + +7.1 使用多卡进行预测 +~~~~~~~~~~~~~~~~~~~~ + +有时我们需要进行预测验证的数据较多,单卡无法满足我们的时间诉求,那么\ ``predict``\ 接口也为用户支持实现了使用多卡模式来运行。 + +使用起来也是超级简单,无需修改代码程序,只需要使用launch来启动对应的预测脚本即可。 + +.. code:: bash + + $ python3 -m paddle.distributed.launch infer.py + +infer.py里面就是包含model.predict的代码程序。 + +8. 模型部署 +----------- + +8.1 模型存储 +~~~~~~~~~~~~ + +模型训练和验证达到我们的预期后,可以使用\ ``save``\ 接口来将我们的模型保存下来,用于后续模型的Fine-tuning(接口参数training=True)或推理部署(接口参数training=False)。 + +.. code:: ipython3 + + # 保存用于推理部署的模型(training=False) + model.save('~/model/mnist', training=False) + +8.2 预测部署 +~~~~~~~~~~~~ + +有了用于推理部署的模型,就可以使用推理部署框架来完成预测服务部署,具体可以参见:\ `预测部署 `__\ , +包括服务端部署、移动端部署和模型压缩。 diff --git a/doc/paddle/tutorial/quick_start/index_cn.rst b/doc/paddle/tutorial/quick_start/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8c90bc463cba92f965a5b3c160ebb5bb78da84aa --- /dev/null +++ b/doc/paddle/tutorial/quick_start/index_cn.rst @@ -0,0 +1,25 @@ +################ +快速上手 +################ + + +在这里PaddlePaddle为大家提供了一些简单的案例,快速上手paddle 2.0: + + - `hello paddle <./hello_paddle/hello_paddle.html>`_ :简单介绍 Paddle,完成您的第一个Paddle项目。 + - `Paddle 动态图 <./dynamic_graph/dynamic_graph.html>`_ :介绍使用 Paddle 动态图。 + - `高层API快速上手 <./getting_started/getting_started.html>`_ :介绍Paddle高层API,快速完成模型搭建。 + - `高层API详细介绍 <./high_level_api/high_level_api.html>`_ :详细介绍Paddle高层API。 + - `模型加载与保存 <./save_model/save_model.html>`_ :介绍Paddle 模型的加载与保存。 + - `线性回归 <./linear_regression/linear_regression.html>`_ :介绍使用 Paddle 实现线性回归任务。 + + +.. toctree:: + :hidden: + :titlesonly: + + hello_paddle/hello_paddle.rst + dynamic_graph/dynamic_graph.rst + getting_started/getting_started.rst + high_level_api/high_level_api.rst + save_model/save_model.rst + linear_regression/linear_regression.rst diff --git a/doc/paddle/tutorial/quick_start/linear_regression/linear_regression.ipynb b/doc/paddle/tutorial/quick_start/linear_regression/linear_regression.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..798e1971ed1cc2ac352f4c23a9fa807c7ae606d7 --- /dev/null +++ b/doc/paddle/tutorial/quick_start/linear_regression/linear_regression.ipynb @@ -0,0 +1,718 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "V0CqD2QfXd9R" + }, + "source": [ + "# 线性回归\n", + "NOTE: 本示例教程是基于2.0beta版本开发" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "6lPmRFntXYIp" + }, + "source": [ + "## 简要介绍\n", + "经典的线性回归模型主要用来预测一些存在着线性关系的数据集。回归模型可以理解为:存在一个点集,用一条曲线去拟合它分布的过程。如果拟合曲线是一条直线,则称为线性回归。如果是一条二次曲线,则被称为二次回归。线性回归是回归模型中最简单的一种。 \n", + "本示例简要介绍如何用飞桨开源框架,实现波士顿房价预测。其思路是,假设uci-housing数据集中的房子属性和房价之间的关系可以被属性间的线性组合描述。在模型训练阶段,让假设的预测结果和真实值之间的误差越来越小。在模型预测阶段,预测器会读取训练好的模型,对从未遇见过的房子属性进行房价预测。" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "OEOMtGXCZaRR" + }, + "source": [ + "## 数据集介绍\n", + "本示例采用uci-housing数据集,这是经典线性回归的数据集。数据集共7084条数据,可以拆分成506行,每行14列。前13列用来描述房屋的各种信息,最后一列为该类房屋价格中位数。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "前13列用来描述房屋的各种信息\n", + "\n", + "![avatar](https://ai-studio-static-online.cdn.bcebos.com/c19602ce74284e3b9a50422f8dc37c0c1c79cf5cd8424994b6a6b073dcb7c057)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 训练方式一\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 环境设置" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'2.0.0-beta0'" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import paddle\n", + "import numpy as np\n", + "import os\n", + "import matplotlib\n", + "import matplotlib.pyplot as plt\n", + "import pandas as pd\n", + "import seaborn as sns\n", + "\n", + "paddle.disable_static()\n", + "paddle.__version__" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 数据处理" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "#下载数据\n", + "#!wget https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data -O housing.data " + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "# 从文件导入数据\n", + "datafile = './housing.data'\n", + "housing_data = np.fromfile(datafile, sep=' ')\n", + "\n", + "feature_names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE','DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']\n", + "feature_num = len(feature_names)\n", + "# 将原始数据进行Reshape,变成[N, 14]这样的形状\n", + "housing_data = housing_data.reshape([housing_data.shape[0] // feature_num, feature_num])" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAACcUAAACoCAYAAAArHxTxAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOy9e5gU9Zn3/a2q7urp6RmYYZgBdPBEcHQ0Q2AQEZ41GDbGbFCXDOIVDsZDOIQYsz4GcTfh0n1InhXRdZcYHHSNCopBQVZfs1HzkJDkxSMDkfUZHXkRzAyn6RlmoKenu6vr8P7RU0UfqvpY1V3dfX+uqy+YPtTx/t2n36/um1EUBQRBEARBEARBEARBEARBEARBEARBEARBEARBEARRCrCFPgCCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCMAtaFEcQBEEQBEEQBEEQBEEQBEEQBEEQBEEQBEEQBEGUDLQojiAIgiAIgiAIgiAIgiAIgiAIgiAIgiAIgiAIgigZaFEcQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEUTLQojiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiiZCiKRXE33HCDAoBe9DL7lTdIhull4SsvkAzTy8JXXiAZppeFr7xAMkwvC195g+SYXha98gbJML0seuUNkmF6WfTKGyTD9LLwlRdIhull4SsvkAzTy8JX3iA5ppdFr7xBMkwvi155g2SYXha9DCmKRXF9fX2FPgSCyAmSYaLYIRkmih2SYaLYIRkmSgGSY6LYIRkmih2SYaLYIRkmih2SYaLYIRkmSgGSY6LYIRkmih2SYSLfFMWiOIIgCIIgCIIgCIIgCIIgCIIgCIIgCIIgCIIgCIJIB1oURxAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQZQMtCiOIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCKBloURxBEARBEARBEARBEARBEARBEARBEARBEARBEARRMjis3DjDMEcB+ABIAERFUaYzDDMGwHYAFwE4CmChoigDmW5bEET0D4chSDI4loGTZeDgGAiigrEeHj5BRECQwLKAojDgGEBSAFlRwDKRv1mWRZ2HB8sy5p00QaRJMCiiPyBAlBU4WAZ1bh4VFdYMSVGU0TsUQliS4eRYNFS54HDQmthMkWUF/X4BgiiBd3Bp649kv8t2m3Yg/thr3U4MBMIJ5yLLCgYDAgKCBElRUOHkMNbj0s4zE/ks5uuVKaGQiL7hczpibCUPl+ucjhAEEV7/uc/rPTx43lKzXtLkUycT9qYUZSGZvmZy9IvTtQW5HnP8dpLpwPjPxlbyOCtIMdsCUDb2hMgcK/2NdLad6f4LsU3VvwuLMhQAoqxAlhU4OBYOlkFYkpP6val0D8UvqUn3nsqygj5/CMGwBI5h4OY51Ljzo/PMHEtWjUuj7erZt9MBIeE6AufsiZvnoECBKCoIj4yJCicHUVZsLcuyrKBvKIRAWNLyawwDiDLQUBWJ29RzZFkGLICQGMnFOVgGTgcbOUdRTnkNzfARjM4hnZjbzXO6x0oUN2bZOCPbY1Z8UE75DLuRKr9iF1Llz9Lxz/r9AhgoCIkyRFmxRZ6N8leJFItM5ko6Pn2uOrzaycXoaDtdy3T9zELag3KRRaK8SRUrqPGyg2XAMQyCogwHy8DlYDEsSHA5WEiKgrCkgGMZuJ0saitdtvPj4nVunduJ04HIOgrHSOwWkmQ4WRYVPAMhPBK36vgcKunosVxzywRBEOmSS2yeD+/mOkVR+qL+fgDAbkVRHmYY5oGRv9dkskFBEHGoz48VWzvQMxBAY60bGxa0YGy1C58eH8Rl59WgzxfCs3uP4LuzLsbz70T+XbPzoPb99W0teP6dI7j3601oGldNSprIK8GgiEP9fnz/hXMy/OSSVkyu85g+8S6KMj495cPKqH21L2nFZeOqbZeMtzOyrKDrlA/LtuzTruPTt01PqT+S/Q5AVtu0A/HndX1zA+6Ze2mMnD1923RMrq/CXweGcepsEKt3HEw4T1lW0pbPbO9BMRIKifisL1FHXDrWA5fLAUEQ0eVN/Lyp3lP2icVsyKdOJuxNKcqCnu5sX9KKjbs/w9udvTn5xenagkz1dCp9n0wHAtD97Kj3LH7464NorHVjy50zEBLlsrAnROZY6W+ks+1M91+IbcqygqP9fviCYQDAsCDF+HkbFrTgkTe74B0KJfV7jXQPxS+pSfee6n1vw4IWjBtVgYvqPJbqPDPHklXj0mi7k+urcMg7lNS+bVjQgsZaN4ZCEpZt2Yf6KhceuqkZwLkxUV/lwv03NMWMD7vJsiwr6Drpw7KtsTJSV8Vj575u3PiVRox2O/Cdp9/XHeNPLJqKsCjj3pc/SnoN9fyPfOhW4Jzu0bsfZP+LH7Ns3JfGetDVO5Rge75U5zElPiinfIbdSJVfsQuqf2WUPwOS5xBVGfvP/d341pTzserF/bbIs1H+KpFikclcScenz1WHX9/cgB/OvdSW1zJdP7OQ9qBcZJEobzKdn4uOdTYtnoY/ftqLmZPqYuKdDQtaMK5axEVjrY2pMyFe5z407zJMv3hsQgyrntuTi6dBVhT8YNuBjHNJenqM1lwQBGE1uc7dFSILdzOA50f+/zyAv890A16/oC2IA4CegUAkWDwdwNQL69BzOvJ3W+tErNl57t/o76vvL9uyD/1+waRTI4j06A8I2qAFIjL5/Rc60B8wXxZ7h0Ka46Pua+ULHegdCpm+r1Km3y9oTh4QuY7p6I9kv8t2m3Yg/tjbWicmyNmyLfvQOxTCF/3DWkIv+rN+v5CRfBbz9cqUvmF9HdE3HDlXr1//c28JXot8kE+dTNibUpQFPd258oUOtLVO1P7O1i9O1xZkqqdT6ftkOtDos6kX1ml/f9E/XDb2hMgcK/2NdLad6f4Lsc1+v4Av+odx2h/GaX84wc9bveMgVs6ZlNLvNdI9FL+kJt17qve91TsO4ov+Yct1npljyapxabTd3qFQSvu2esdBhERF+97KOZMSxsTKOZMSxofdZLnfL2gL4oBz53ZsIIgF0y/Ayhc6EBIVwzE+4A9rE0Tq53rXUM//yIdujf5M736Q/S9+zLJxRrbHrPignPIZdiNVfsUuqP6VkZ5Kxz9btmUfFky/QFsQp36vkHk2yl8lUiwymSvp+PS56vC21om2vZbp+pmFtAflIotEeZPp/Fx0rLPqxf24eVpjQryzesdBfHHa+pg6E+J17teaJ+jGsOq5ff/F/TjtD2eVS9LTY7TmgiAIq8k1Nrd6ub8C4G2GYRQAmxVFeQrAOEVRTgCAoignGIZp0PshwzDLASwHgAsuuCDmM1E+l5BT6RkIoJLnIMkKKnkOPQMB1LidMf/Gf199XxAlk06XIM6RjQyLsmL6cYQlWX9fkmz6vkoZQZR0r2Mq/ZHqd9lsM18kk+H48zLSs6Ikazo5/jNBlIzHgo58ZnsPipFUOiKfOqSYSSbD0dD1JFTsJgvpynAyjHRnjduZ8HemOjVdW5Cpnk6l71PdJ73PJFnB9uUzMRgIo6bSnOMk0sMMOc4nVvob6Ww70/0XYpuCKKGS52I+i/+uqmNS+b2TG6qwdl4zZPmc72e3+MWOMpzuPTX6XiXPWa7zzBxLVo1Lo+3Gy6CRfXNyDOqrXLp2Ndnv8i3LmcR1wDkZ4VgGPQMBxD/kr57v1Ik1mDjGbXiO6fgf+dCtqe4H2X/7k40MZ2rjkvmXZsQH5ZTPsBt2iPHS8SVU/yrbHKIqY6rujv9eofJsdrj+dqMYr0k2/nA6Pn2uOtzQ17LBtTS0NwbXpRD2oBhlMRfsGNcR1pPN/Fx0PkNWjNcj5HvcJpPheJ1rdNzR5xad15k6sQYr50zCsCDC6wPqPHza8XL0tsm/JZJBepjIlOhWzbn6LVZXiputKMo0AN8E8AOGYa5N94eKojylKMp0RVGm19fXx3zmZBk01rpj3musdWNYkMCxDIYFCY21bgwGwjH/xn9ffZ93cCAIs0kmww4DGXZYUFKW51jdfTk5e7RrKRZ4B6d7HVPpj2S/y3ab+SKZDMcfu5GedXCsppPjP+MdXEbyyTs4XN/cgM1LW7F9+UxsXtqK65sbbHO9zCSVjsinDilmkslwNHQ9CRW7yUK6MpwMI1szGAgn/J2pDdKzBWbo6VT2Mdl9MvqMYxnc+tR7WPdGJ0a7nba2v6WGGXKcT6z0z9LZdqb7L8Q2eQeHYUHCsCBBGfks/ruqjlF/53To+3yHeoew7o1O9PkFyCNJDKeBf+goUPxiRxlO954afa+uygU3b63OM3MsGW3L6WDh9YVwbGAYXl9Ik6Gctxsng0axzudeP+6/oQlTJ9ZgMBDWxoX63WQxUj7JJK4DzuXXJFlBY60b8Zc18p6CH3+jCd2nA4bnmI7/4ea5nO5hsnOIj7mN7gfZf/uTjQxnauOMfEijHLQzw/ignPIZdsMOMV46voTqX2WbQ4z+3E55Njtcf7tRjNckG384nZxvrjrc0NeywbU0Gl9GPlIh7EExymIu2DGuI6wnm/m56HwGyxivR8j3uE26biJOtxgdd/S5DQuRxWtTJ9bgx99owro3OvHVDXswf9NedJ3yGeZxjHI2tOaCSAXpYSJdIovhgug8cRbzN+3F7PV/yNlvsTQTpyjK8ZF/ewHsAjADwCmGYSYAwMi/vZlul3eweHzhlJhAb8OCFkwc48aBL/rROCby986ObqxvO/dv9PfV95++bTrqPLw5J0wQaVLn5vHkktYYmXxySSvq3ObLIu9ksGFBS8J44Z2lGdxYRZ2Hx9O3TY+5junoj2S/q3U70R4nB+1LWlEb9fS8XYk/r50d3Qnn8vRt09FQ5cKXGjzYtHia7nlmIp+1bifumXsp1r3RqS1suGfupUVxvTJlbKW+jhhbGZG3eo/+5/Vkz7LC42J1r6fHRYuHy4182ud8oWeHnlzSip0d3drfmxZPw/6j/Rn7xfHb3n+0Hz80QU+nsrnJdKDRZ+8c8gKIPD308G8/wealiTaLYgICyN7nM2vbme6/ENus8/C4sK4S59W4MMbj1PXl2vcc1n5X63ZiKCgmfG99W+R7PQMBrNjaobXYaKhy6frIDVWuTC95yZLuPdX73vq2Fjzy5ic4dTa7BUhmH2Mu2xoKilpyTE3cZ3JORtttqHKljHXWt7Vg4+5DWL3jIO6ZOxntew5jjMcZMyba9xxOkHu7yXKdh8fTS6cnjOHzayuwY99f0b6kFS4Hk/C5JCtYs/MgNu4+lJBv07uG6rlH+x9b7pyBU2dDOd1DAEnj6uh7rHc/yP4XP2bZOCMfknew+jkLR2axYjnlM+xGqvyKXVD9KyM9lSqHqMr2UCjR5ypkno3yV4kUi0zmSjo531x1+P6j/bbN4RiNr3oL481MKRdZJMqbZHpG77PofMamxdPw2v4e3fUIF46ptFUcEZ9H+X3niQS/IfrcHl84BefXVqCx1o2VcyZhzc7E9u0OlkkrXlbjY1pzQRCEGciygq5TPhzsPhvTBrp/KJiT38coijVJUIZhPABYRVF8I///HYD/BWAugH5FUR5mGOYBAGMURbk/2bamT5+u7Nu3T/v7i34//uHXf8F911+K8aMrwDEMTp4N4qK6SgAMxnp4+AQRAUECyzJQFIBjAEkBFEUBwzDgGIBlWdR5eLAl+uQDkZK83fh4Gfb6Qvjk+AAmNYyCKCtwsAwO957F5efVor7a3AT5sYFh3L3tAFbOmYQatxODgTDa9xzGE4um4vzaSlP3VepEl+nkHVza+sPod15fCD/ZdRBtrRO1e7Ozoxs/n9+SiRzkRY7jZRhIPK9atxMDgXDCefb6gvjprv/WPU9BlNKWT68vMmESXR61sdaNXatmmz5u7EAoJKJvWNB0xNhKHi7Xua7ngiDC6z/3eb2HB89b3RXdEgomwypf9Pvx+oFjuHlao+YnvLa/BzdPPR8X1HnycXiEjQgGRfQHzo2tOjePioqkY6vgMpyKaH3NMAye3/s5pl1UF6OTH7rpSowfVZGxXxy/7YWb3zVFT6eyucl0YPxnew95cf+rH8ds//1//BpYls3YppcoBfOJ7Uq2Pp9Z2850/4XYpiwrOHU2iFs2v4v6KpfmyykALq6rhCgr2u/6/QLmb9qrfW9yQxUO9Q6hfc9hHOge1La5d811mv8nijJ6h0IQJRkOjkVDlQsO4wUIZSnD6d5TWVZw8mwQxwcD6PcL2nXPhx9t5liK3xbHAjc9kXtsYHSMerHOKV8QPQMBLWZR5fdP918HjgHcPAcFCkRRQVhWIMsKKpwcRFlJJcsFlWFZVtA3FEIwHMmhOVkGDAOIcmRyhWUZ7VqwLAMWQFCU8dUNewCca7VTM1KJdcJot+E1jI4XFSj49qZ3cr6HqeLq6ONw85H7ERZlsv/mUnAZztXG9fsFXTn66bxm/Oilv+ScUyu3fIbdSJVfGaHgcZ0sKxgMCAgIEiQFqHCyGOtxpZ1DlGUFPYPDactsvuSyhPJXppGmTGZKwWU4mnTnJHLR4QzD4KHXP841t24JycaXanfskI+wSBazpSzjOsJ6kukZWVbQ54/EQg6WhSjLECUFvINF/1AIwbAMWVHg5FjUVDrhcnA4eSaISQ0ejPEk6JmCynB8HqXO7cTpQBhhSQbHMmBZBn1DAo4PBtC+5zDqq3k8eOMVEGVFi+2i2bvmOkwY7U4ZL9Oai5KC9DBRcFQf6rFbpuDWp96L+ez1H1yDuqqKZHN3hjJspXczDsAuhmHU/WxTFOVNhmE+BPAywzB3AfgrgFsy3TDHMvAOhbDkmQ+09xpr3fj18pna6sAxTg6geWzCpgiihNue7Uh4f++a60zfF+/g4B0KYcXWc/trrKUSttnAskxWAbXR7wRRwtudvXi7M7Zg5oM3SlkfYz7ROy+98wyLsuF5ZiKfgijp9gsXxOK4XpnicjlwfpIkBM87cH6ZJxHNgmMZbO/owWP/55D2XmOtG/NbGwt4VEShqKhw4Pzki+CKjmh9fWxgGJv/fBT489GY7zx4o5JV0iJ+22bp6VQ2N5kOjP7M6wth4x8Ox3zeWOsGy7IFT5IT9iVbn8+sbWe6/0Jsk2UZyIqCnoGAVulNJXpxG3DOh1O/t3lpK9a90ZkwQRTt/zkcLM6riS2JT8SS7j2NPCioYEH7uzHv58OPNnMsxW/LLJtjdIx67/MODve98lGC7LqdXFHbFJZl0DCqIul34s/P6wuhsdaNnoEADnQPYsXWDm2iV/UnUsWLZt3DVHG1lTqdsAdm2DhjObrClJxaueUz7Eaq/IpdYFkmMsGuM6eRTg6RZRm4nQ7b5dkof5VIschkLqSb881Fhx8bGLZtbj3Z+LKTb1IOskgQycYcyzJoqI7EQscGhnHtI3sAwDBvsXZeM1Zs7YjM5dpsDYJeHmWCy6HFXfGLS4CIr1vJO7TYTkXV15nEywRBEGag+lBqS+Zo3bRq21+wa9VsnJ/NWg0zDzIaRVE+VxRlysjrCkVRfj7yfr+iKHMVRZk88u/pTLfNcwal6zlqc0YUB8n62JuNlS2giNzIpxwUkmTnmYl8lsv1IvIP+RVEOWGlLrWjniY/iCDMQ5YVeH0hHBsYhtcXgptPb8zH6wZqYZic+OtsRotTO+rnXCnEOZFNOUcu10KVcfV30WRzD0tRvon8YyRHbp4zZdyTnBK5kq4MUZ6NsAv58JvsLMN2PjaCIPSJHrftew5jfVts3mJ927n2o8U0lnkHh2FBMmWOjiAIwmzi86BOB4vGWreuHt68tDVr3WRZ+1QziS+hGA5LOO4LIiwqYBlAVgCng8F51RVwOovHEBEFp2BlQEVRxqenfFov5MZaN9qXtOKycdXJ2gJljVo2NyzJcKZuP0TkCbUv9rIt+zQ5ePq26WgaV51JtR5blcbXI/48r29uwE+/1QyOZZK2XY0n3+OGyBsFl+FwWMLRgWH0nA6gkh8JEse4cVFtJfkVZUgWrd4KLsOZkMr25NLqzq562spWmCUClcYnUiKKMo6fCaDXF0K/X8DOjm7c+/UmuBwsbvvVB0l9WT29s+XOGaiqcJjVwrBkZNik+CBv243fRz51bT7OyWi/FpxnUcpwNtci+r7VV7lw/w1NWL3jYE73MH6b98ydjIvHelDp4rS2g4TlFKUMR5NMp8iyknNOza5+MhGDbeM6tbXbcEjCkT4/Nu4+BO9QyFBnpqufSS5LDtvJsNX+YaqccyFjfxpfWVH0/gRR3ITDEo6fDcI7kvfYf7Qfi2ZeBAD43Jva/sJmMhytgxUAw4KIO5/bp6uTKHdKjGArGSZKG6MYZ8udMxAKy1i29VyO58K6Sjg5FhNGVaTyowxluCgXxZ32h3C4dwj3vvyRprwfXzgFkxqq9Hp4E4QRBVPuXl8IP9l1EG2tE1HjdmIwEMbOjm78fH6L6SVnCzVZQKSHCc6m7RIeeqjnKcsy+vwCVmztyFge8zluiLxScBkm2SJUsrSZBZfhTDGyPbn6DDSWihZKeBBJkWUFn5w8G+O/rW9rwfPvHMHP5n8ZDJiUvqzFCdaSkWGvL4T5m/YmtC3ZtWp2znrUyntQYgvUCkHJyHAq4mV86sQa3DN3MiY1VMHtzP4eyrKCwYCAE4NBrHgh81iTyJmSkGE9nQLAFP1GfnJRYMu4Ts/Gbl7Sigk1Fahx52b3SC5LDlvKsNWYkXO2AhpfWVES/gRRnOjlPX65aBr+6+Ax3HXtpLTyHrCRDOv5D48vnAJRVsAyDOkkwgjbyDBR2ujpqPVtLXj0rS54h0J4/e7ZEGUFwbAMjgHcPJdu7GP4haJsFh8QJG1BHAD0DARw78sfYfvymbbr4U0QegiihLc7e/F2Z2/M+w/eKJm+r36/oCkVIDJelm3ZZ8rECpE7LMuUxX1Qz9PrC2mBBZCZPOZz3BDlhSBK8PqEmPe8vshkCFFelIvNNLI9uZ4/6WmCKE36oyaXgIhuWLPzIB7+9pcRFmUASLkgqVx83lwRRClmQRwQud5m+CRW3gMr7Gc6C95IroqPeBk/0D2IO577EHvXXJf2vTSSDUmGtiAOKF0/jrAOPZ3i9YVM0W/kJxPZomdjV7zQgV2rZue80IfkkigFzMg5WwGNL4IoLvTyHj/Yth/P3n4VAoKESt6BCaPdRfOwjZ7/cO/LH2HrnTPwWe8Q2vccxoHuQdJJBEEUhD5/Ypy9ZudBrJ3XjBVbOxAQJJxfW2nqPotyUZysKLqJYtn+Re8IAsC53vTxFQCs6ENv5cQKQWRKLvKYz3FDlBdunkto3bRhQQvcPMlWuVHuNjPX8yc9TRCliZFuOK/GjVufei+tSgwlVNHLUopVj5ptP62oPEcymH/0rnmuMp5MNsrdjyOswSy5Klb9ThQeK3UbySVRStjND6DxRRDFhZEOORMIY0H7uwWvPpkpRufT6wth3RudWvV/J7VzJggij6hV/v0hUVdH1bidlvlLRantOJZFY6075r3GWje4IjBEBAEAtW4n2pe0anKs9m+vdTtN3xfDMLrjhWFovBD5Jxd5zOe4IcoLUVK0BXFAxPlaveMgRIlW25cbatIymnJKWubqM5CeJojSxEg3ftE/nFCJod8vJPxeXcQyf9NezF7/B8zftBddp3yQ6am2BOo8PJ6+bXqMHn36tulaCz+7Yrb9NKo8pydf6UAymH+Mrnmt25mTjCeTjXL34whrMCunRn4ykS1W5nVJLolSwm5zIDS+CKK4MIol1Bg015g03xidz2AgrFVkeuCbl2MoKFJcTBBEXlDzRB91n8HRvmFdHTUsSJblQYtyURzDKFjf1hLjUK5vawGt8SGKhYFAGBt3f4a185qxfflMrJ3XjI27P8NAIGz6vjgGuuOFo/GSN2RZgdcXwrGBYXh9obJ2Mo3kkYGS8trkc9wQ5UXQ4Mmp4EhLOKJ8KNbFCEZkan9y9RlITxNEaRCvO/QWsbQvacXG3YdifmdUicHsBU6lDMsyaBpXjV2rZmPvmuuwa9Vs059EtyI2Mdt+ml3pg2Qw/xhd84FAOCcZTyYbucghxezlRSb326ycGvnJRLakksFc9BfJJVFK5KKvrfADaHwRRHGhF0usb2tB+57D2neKqQp1qvNRq+Dd9qsPKC4mCMJSVD+rZ3AYJ88EUVPpxMbdhxL8tvYlrZgycbRlFTmLsn2qIgPPv3MEa+c1o8btxGAgjOffOYKHbrqy0IdGEGkhiBLe7uzF2529Me9b0b+dZVnd8fLz+S2m74tIxIrWP8WKLCuQFH39vfobl+FMIIiAIOK80W44dMo253PcEOWFY+Rp0viWBo7yGqIEYhcjFHt7tWzsT64+g5l6mtrcEURhUHXH47/rQlvrRNR5eASqXfjSWE+MbuRYwDsUivmtUUUmu7Uysjssy6C+2mXJto1sw7hRLgSE7PWtnv2sdTuz1uNmt5siGcw/giihvsoV41O07zkMQZTAsgzqPLwmH/1+IW35SCYb2fpxFLOXLnr+JICM7rdZOTXKZxDZkkwGjfTX5PoqDATCEEQJTgcLB8vo2nmSS8Jq8hnXZ6uvrfID0h1flPsgCHsQHUsEBBFhWcH6336CA92D2ncaa91gWQayrNh+nLIsg8n1VXhp2UxIioIjXj8efatLO5/GWjd6fSGKiwmCsBRRlHH8TKR1c79fwM6ObjzwzcvhHQrh0be6NL9tWJBwXk0FxnisyYcCRboozsGxWPY3l+Delz/SHNUNC1owLIhFYYwIwulgdRO5VvRvr3U7cc/cS7HyhQ5tvFCp7vxh9IT8rlWzLZvssiPRE6zfnXUx1uw8qMnjk4unYcNbn+Ltzl401rqxeWkrLh8/KkGXmz05RhAqLieLJxdPw/df3B8jly5nURbUJXLEysUI+SQb+5Orz2CWnjY7KU1JZoJIn36/oOuvbV7SiqZx1XA4IvpDFGW0L2lNS1+QD2cfjGzDupuvxB3PfZiTvo22n7nqcfWp9vjfZ1t5zgwZJFsSS6rr4eY53H9DE1bvOBiTt3PzXE7ykUo2svHjKGYvTYzkrK6Kz+h+m5VTI1tIZEudh8e9X2/S1Xt6+uvx33XhR397KVZsPSezjy+cgqe0nDMAACAASURBVP/9X5/COxSK0bckl4SV5HvRebKxkgyr/IB0xhctzCcIe6HGEl4fcKrPjxVfnYTOE76YeMYXDCMgSLiozmP7cXo2FEbPwDCe3XsE3511sfZgo1o17tG3upLafYqBCYLIBVGU0XXKhxVRsfT6thZs/+AL/HLRNPxg236s2Nqh+T81bmu7NRXloriQKMHBMVh385Wo5DkMBsJ45M0ueIdCeHXVLDRUVxT6EAkiKQ6WwYYFLQkJYocFDsXpgKCV6lafktq4+zM8dOMVcJIjYzlUlSBCdILB6xOwdl4zJjdUQQGw/refaE/N9QwEsGJrR4Iul2UFChS8cNfVONLnx8bdh7RkXrG2NSTsQzAs4xe/PxSjJ3/x+0N48MYrCn1oBJE1mdgfNckRCIsICBIe/vaX4eRYzWf4+fyWtJLBZi1iMDMpTUlmgsgMQZTQ1jpRWxAHjPhnL3Rg2/euRmNtJViWiWkHFB1j6OmLOg+PLXfOwBf9w6jkOQwLEi6sqywLH85uSWQj21DJc9r/zZgEzFWPm125NVf7RLYklnSuhygrWr4DiMjA6h0H8eqqWTnJhxVVfQsds9tNT5QKRnK2bdnVGd1vo5zaz+Z/2TD/rHdPzV7sS5QPyfSenv667ZqLtAVxQES+7335Izz87S9jyTMfxOjbZHJJuonIlT5/SFcPWzV/l62PYJUfkI7eNyP3QWOVIMynzsNDECXIiqK7DuG5O2ZgMCBYWtHIDAKCpMVk6pxcnYfH+FEV+OFLB5LOr1EMTBBELoiijGNnAtqCOCDi56zZeRBr5zWDZYB1N1+Ji8d6UOniMNbjsly3FOWiOI5hUF3BY/6mdxM+C4blAhwRQWRGQJDwyJtdMUm1R97swhOLpgIec/cVDOuX6r7/hstw+1PvkSOTA+kEneX81GX09QGA+ioXegYCONA9iBVbO7B5aSsm1VclyGbPQCBGl+s54JuXtGJCTQVq3BToE7kjyoqunvzpt5oLdEQEkTvJ7E+0fnbzHE6dDcXo2PVtLXj4t59qJfXTbZ9j1kS1mUlpqv5CEJmhjlu9MdjrC8HBsRg/qiLjdlshUcba1z6OSaaWOpkmkfMxoWVkGwYDYe1vMyYBzdDjZlZuzdU+kS2JJZ3rERZlXRkIi7L2//jP0pUPs6v6GlXyz0fMTpNN1mHUwpdjmIzut1FO7afz9PPPye6p2Qs6ifLBSO/p2fXxo926Onb86Art/6q+NbKPgH6b4ei2rCTDRCqCYX1/0Oz5u1x9aKty9+n4n7n6zORHEIQ1sCwDhmFw6kwQdzz3YcLng8MCQqJk+7kpSVE0HaPOyQHAH1fPwf+6+QqM8fCYMNqtew4UAxMEkQ2yrGAwIMAXFOEdadEcTc9AAHUeHrUeHuNHu/MaTxRlXzAHx8I1krSKprHWDc6+9ocgNHgHB+9QCCu2duDWp97Diq0d8A6FLEm6qgm/aCJjhdEcmX6/YPp+Sx016Jy/aS9mr/8D5m/ai65TPsiyEvM99akw9R6Uy9PA8dfn1qfew/03NGHqxBrtOzs7utPS5XoO+IoXOiDJsHXQQRQPHKuvJ0m+iGLGyP7Uup0x+vmj7jMJOnbNzoNYOWeS9rtM/BN1wub82krUV2f3hI+alI4m26R0oau/EESxUefh0VDt0h2D/X4BxwcD6Drlg5tPf5waJVNLPQbJ5LzTjS1yRc82bFjQgvY9h7XvmDEJaKYeN4tc7BPZkljSuR7JZMBO8iHLCoaCIjYsaClIzF6u+jEfqC18173RiVufeg/r3ujE/Tc0wePiMsrRGOfU9Peb7J6a4ScTRDR6dr3CMM/GaP+P1rd6cmkkx8fPBCz3VYjSIVP9mQ1m+NBW5u5T6f1cfSLyIwjCOhQlsuDWKDeyYmsH+vyhAh1delQY6JgTZ4LoGxLAOzhDf5RiYIIgMkX1yz7qPgOvL2SoQ8d4eFTyXN5j4qJcFBcUJfQNBROSVhsWtMDNl371JaL4UVsIPXv7Vdi+fCaevf0qbLlzhiVJVzfP6Y6Vk2eDAMiRyZZ0g87op8L2rrkOu1bNLountfSuz+odB3HP3MkAInJ479ebUMmzKXW5kQMeCEuUfCNMQW1pHS+HVrS0Joh8YWR/BgLhGP1cyXO6OrbG7cwqGSzLCry+EI4NDMPrC2Wlp81MSttp4p0gigGWZXDe6EhV3ugxuL6tBTs7ujUfT5SVtMdpufpymSSR8zWhFW8bXl01C+NGVcA7FEnmmzUJWKgHg8ywQXqQLYklneuRTAaSLdy34v4lo98v4LZffaBV8t++fCbW3Xwlxo3KT3KWJpusw6iFryApGeVojHJqRvnnZPfUKh1FlC96MZ/bpZ9nO3k2qGuP9eTSSI57o6o90OIbIhWZ6s9sMMOHtjJ3n0rv5+ozkx9BENbhdLDY2dGN9W2xeuyXi6ahfc9hSypfmoWqeyRFRntcbuexW6Zg/W8/xcVjPUl1DcXABEFkgiwrOHk2CH9IxMQxkcXDejq0fUkrnByDGnf+CwcVbfvUJ/ccxqrrvqT18x4WJNRXu1DNF+UpEWVIKBzXQmipNS2Eatw8xo2qiBkrbp7DP7/eCYAcmWzJJOg0u71LMWB0fSY1VGHvmutiWjKMGyXGyOe4URUxBtGojP3h3iH4Q2JZLDLUIx8ttsoFFpGFQdFyWMlzxfnkAEFEoWd/4vXzYCCsq2Mba93YtWp2RrrFrNYd6bQZSVcHqknm+GMq9YqtBKGSjb/gcLBoGleNbd+7Gr0jT/Y9/84RfHfWxXj0rS6tBWK6beDK1ZfLpBVTPie04m3DWI9iejs/s9ppZ4KV7aMytSWl6KfHt15PdT1SyUD8Z7VuJw55h/Le/ksdez0DAa2dEADsXXMd4LFstxpWtWwjkrfwzSRHo5dTi89ZRGN0T50OllrcEZYQL8+yrCTk2eqrXah2ORLiOyPbWVfF68px/EKjdH2VUrSLRGoy1Z/ZYJYPbUXuPh3fNFefmfwIgsgNI/ukVpO+Y/bFeHbvEayd14w6D48xHh7tew7jQPegbTvXxeueV1Zco+nhwUAYD//205GuZSxOnAkY6h3KpxIEkS6iKKOr14cVWzvQMxDAs7dfhZ0d3fjurIvx/DvndGh9tQuj3Q6MqihMLFCUK8jcPIe7vzYZT/z+ENpaJ6KadeDCukps/+ALLJ55ERprKymwImxNnz+EZVvjnmLaug+vrpqFhuoKU/fFsgwuqvOgusIJQZQgyQp+9ptOzXEjRyY7KOhMjtH1cTu5hCRDtHzqOeF6Dvj6thY8+lYXvEMh7Fo1u+wWHVo56VeWMAbXzOh9gihi4vVz+57D2LCgRaukoeqTCaMzbyFs9JR2Nno6WVI6Ex1YiIUZBGEXcvEXHA4WjbWVcHCRJeJtrRPx6FtdWgyhttlIZ2yXqy+XSRK5kLGFVQ/w5PvBIDNtUDyZ2JJS9NP1zmnLnTPw6qpZCIuy4fVIJgPxn3l9IcvuXzIKHdfTZJN1FOreGt1TB8sURMaJ8iM+D5zMZhnZztfvnp0gx5uXtuLf/89nMb9PZ0yVol0k0iMTWcwW50i7YL2FyIUmXd80F5+Z/AiCyJ5k9kmtJl1f5cLKOZPQUO3CaLcTD//2E7zd2WvrznXxuud//9cnuP+GJtz3ykfaebYvacVDr3+snYueXaZ8KkEQ6SDLCo6fCWgL4gBg4+5DuP+GJjy79wjaWieizsOjodqF80a74Sigj2b5ojiGYTgA+wAcUxRlHsMwFwP4NYAxAPYDWKooSkZ1tmvcPAKChLbWiahxO9HvF/Dz33yCA92D+PoVE+DmHZRQIGxNMKz/FJNV5XajgytZVvDz+S148EZyZHKBgs7kZHJ9UgX/qgO+fflM9AwEMBgIa5OyAMqyJLyVk37lSEiU8NDrnVg5ZxIqwUGQZDz0eic2fmdqoQ+NIEwnXj97h0IYN6oi5cR2OuSr0lGmOrAcK7YSBJC7v8CyDMaPqsCZQBj/sP0vWfu85erLZZJEptgid6y2QenaklL00/XO6bZffYBdq2bj/NpKU/ZRqPZfhR57NNlkHWbdW3VSNH7BRTK/U++enjgToBZ3RN5I12YZtrgXJN2Knvd+vQmdJ3wZjalStItE+lgdiztYJuEhvw0LWuCwgR3Nh29DfgRBZE8y+6RXTXrqxBr8661TcNf/uMSSypdmEa97DnQP4pE3u7D1zhno9wuor3Zh23tH8XZnL4DkdpnyqQRBpKLfL6DXF9LVO/966xQwYFDp4jDW4yq4f5KPSnE/AvAJgFEjf68H8LiiKL9mGKYdwF0AnsxkgyzLwMmxWPdGp24Z7wmjza20RRBmwzGM7lNM+Si3S46MOVDQmZz468MwDDgmYiCzuU4sy4B3cNoTLSrlWp2vUJNGpQrLMPAOhWJaJjXWukHDmShFzLZf0a0GGAP/xmw9TTqQINLDjLGipzNqRx5My0SHlKsvl27sRbFF7uRSFcrMtm6laKMyPadsrmehqnrZYexRjsYakt3bTGQ0mzGtd08LXZWQKA7y3WY0mVzqyXE2+rIU7SJhHwKChEfe7MLaec2ocTsxGAjjkTe78MSiqXlpg56MZOPLzLFOfgRBZEcy+6Q3fr1DIfAciwmjK+DgWDRUFX6Bhx6Gx+5gcWGdB7IsY/Ofj8b8JpVdpjboBEFEE60TgEghKD2943JwGD+qwjb6wtIadQzDNAL4FoD/GPmbAfA1ADtGvvI8gL/PdLuyrGAoJGLDghY01roBRBzKx26Zgp0d3ZRQIGyPx8Vh0+JpMfK7afE0eFwku8WEGnSeX1uJ+mp7OsGFhGUZ1Hl4nA2KWLj5XVz9L7/H/E170XXKB1lWMt6e+qR39Lgp1woaanATDSXUs4d0MlFumGW/1FYD8zftxez1f8BDr3+M9iWtlutp0oEEkR5mjZVonVHn4XHIO6SN+0x8O/LlkkOxRW5kK1/xtiyXeAUoTRuVyTllez0LqR9o7JUuevc2Uxk1a0yTDSRSYbY9SodM5TIbfVmKdpGwD7yD0x5yvfWp97Bia8fI4o/Cy5fR+Kp1O/M+1gmCSCSZfdIbv+1LWvHP/8//xbUb9mDh5ndxyDtky3FrpHsmjHaP2G42I7tcCP+EIAj7Eq8Tbn3qPVRXOPDYLVNi9M7mpa22WhAHAIyiWKe4GIbZAeBfAFQD+DGA2wG8pyjKl0Y+nwjgt4qiXJlsO9OnT1f27dun/e31hTB/016tn3eN24lhQUKVy4GqCkdC72uCMCBvQhIvw72+IH6667+1FsCDgTB2dnTjZ/O/jIZqqnRIZERe5DhehtNF1dfpthpJBT2VEkF1POJbwRSp/Su4DJNOJnKk4DJcKPR0/PXNDXjopiuhKIplerrEdKAdKJhPTFiLFWMlV9/OIl+OZJgAkJ18WRGvZDHubC3DmZxTLteTYr2ixtYyHE2mMmqmLSUZtz0FjevMtkfpYrVcUuyWV8ouN2F3+dIbX/1+oSBjvUgoGn+CKH5S6Y/4zhgPvf6x1nIUMBy3tpDhZLY9U71ZKP+EKBi2kGHCvhjphMcXfgWnhyOd4hqqXThvtBsOh6W12YwwlGHL2qcyDDMPQK+iKB0Mw8xJciC6q/IYhlkOYDkAXHDBBTGf6fXzBoA/3X8dGmvctnB4CSKZDIdFGW939sY4UQDw4I1y3o6PIFKRTIbTxagMdSAsQZaVrNqokrNtjzY/xUC6Mkw6mbArZujhTMm1ndTbnb148EYF59dWWnaMpAOLi0LIMRHBirGSTgusZHqkGH05kuHiIV6+ZFmB1xdKKv9mt3Wzo43KVYYzOad0r6eRnig2/UDkh2xlWE/OMh3zZo5pkvHyJR0ZtqrNaKr4zmq5tKNdJDLHrv6w3eVLb3wZ5soFEV4fbHX8pYZd5ZgoDKn0R/T4PTYwnDBvUIhW4OnKcCrbPm6UC9uXz4SkABVOFmM9xtVfqQ06YSakh4uT6HhCUhRdnTBudAXOq6mwnS8WjZVL9GYDuIlhmKMAfo1I29R/A1DDMIy6GK8RwHG9HyuK8pSiKNMVRZleX18f85nToV/es8LJ2vIiE+VJNjLsLMyqWYLQJZkMp4tRGerDvUNUZjlHqM1PatKVYdLJhF0xQw9nQqHaSWUD6cDiId9yTMRi9lhJNe5LsbUGyXBxkq4sWmHL7GajzJDhdM8pnetZinqCsJZsZNhIzoxiv2Rj3m5jmig+0pFhK+yRXfQtjaHix87+cLHJl9FY/+Skj3wii7GzHBOFIV39YZe5g1xlWPULbnoi4hcsevo99A8JSX9DbdAJMyE9XHzExxOHe/26OsHt5Gzvi1lWKU5RlH8E8I8AMFIp7seKoixmGOYVAAsQWSj3XQCvZbptB8vgme+2gmM5sAwgK4AkS3DY9CITRDwkw0S5UOfhseXOGfiifxiVPIdhQUKtx4l/fr0T3qFQxmWWqd0IYQWkkwkiQr9f0MrnA5GnfJZt2Weoq/V0/JcaPFCg4NjAMOlpgrApufhTdR4eT982PaHVRp2HBwAMBgScPBPEY7dMwWAgjPY9h5PqEYKwinRtmp5Mb7lzRlJbRjGJMXq+wYV1lajz8Np1C4RFnDwTRH2VS+sCQXqCMBsjHfD63bMNZdRqSHcQyUimP7MlHVtIckkQ1qE3vpLlysknIgh7oY7hUFjCS8tm4mwwjJ6BAHZ2dOOO2RcXxdxBfBvYx3/XhZ6BAKZOrMHKOZPgD4k4eTaI8aMqdO1/qhwQQRCliSwr6POHEBAkVDhZbFjQgkfe7MLG3YewYUELVu84WHQ6wbJFcUlYA+DXDMP8DMABAM9kugFZVsAyDLpPn3McG2sr6AkKomiQZQWCqOD7L36gKY0nF08jGbY5lCjKjlBYxtrXPtZk/d9u/QqA5GWW9a41AHSd8iU44E3jquk+EDlBfgVRTiSzZdmUxI/W8dc3N+CeuZdi5dPvk54mCBsRPe7dPIfB4XDChOtFdZ60xmmyFiOyrODEYDDG71vf1oJH3+qi1hp5opjiFauPNV2bFi/Tbp7DqbMh3LbpHV1bpj4lSzGJMSExNv7bvLQVkiTj/+vzx1w3VT8c6B7MuQVPMck+kR+MdEBYlBNyFE8vnQ7AWjki3UGkg5FsJiOX+I7kkiCsw2h8Ta6vMsyVA9m1JSQ/iCDMJ34MX9/cgAe+eTnGj6rAT77VjPcP9+GCMZWAp9BHakz0OdRXuXDP3MlY883LsfzaSXBwDO7ediAmZmtqqIYjrvqd3dtUEwRhPuGwhONng/D6Quj3C9pC4AdvasY/v96JR97swvblMwGgqHRCXhbFKYqyB8Cekf9/DmBGLtuTZAV9Q0KM47hhQQtGVThzP1iCyAMhUcb3X9wf86Te91/crymRUqWYAzRKFGVHnz+EZVtjn0r9h+1/wS++8xUMDouQFAVeXyhGFoyu9bhRrowqGBFEupBfQZQLyWwZADAMgx0rr0G/X0D7nsM40D2YtCR+vI5va52IlS90kJ4mCBshijKOnwmgdySR0VjjxtlgOMHm1VQ6McaT3jhVW4zE0+8XsCJOB6zZeRDrbr6SWmuYQKpYqpjilXwcq9rmJXoxgJFNi5Zpry+UNObItKpquaF3fVZs7cCvl89MeH/NzoNYO68ZK7Z25NSCxyx5KuZ8BZGIkQ6QFSTkKJZt3YfX7p6F3rOCoZ+cq2yQ7iBSoZc/W7Z1H15dNQsN1RW6v0ml/1LZwnzIJelWopAUUv6MxtfLK67RzZU//O0vY8kzH2TsExVTDEAQxUC03lDH1dSJNfjurItx26/OFTlpX9IKj8veeQ5VD9VXufDjbzRhzc5zlZ02LGjBrEvqMLd5HGrcTvSeDWFMpRPjRrkTdIdRDoggiNIjHJbwWe+Qlt9VH2h8du8RfGfGhVg5ZxLWvdEJ3sEVnV7Ib8NrkwjLilaWD4g4jqt3HIRIFV2IIkGUFd0n9aySYVmOLDw6NjAMry9UkOpHsqzgaL8fHx87g56BAD4+dgZH+/1FU4nJKJDt9wsFPjJ7EwwnPpVaX+VC9chioxODQRzt8+PYYECTzz6//kRUQMi8ghFBpIORXxEuEv1EEOliZMv6/CEc7fej66QPkqyA51g8eFMzrm9uSFr+Ol7H17idttTTdvCDCKIQyLKCrl4fFv3H+1jQ/i7WvdGJqgqHrs0LCFLOY8OoGsnFYz1FUUbfzqiTXfM37cXs9X/A/E170XXKp90zWVZw8mywaOKVfMRWapuXxlo3AKTd0iFVVR1BlFBf5cLmpa3YvnwmNi9tRX2Vq+C2Lh+kY0+Nr5+s+36N25lzuw0z5KnY8xVEIkY6ICzry2JAkA395GODwzgTCCMkyjgTCOPY4HDGspFNRWaivNDLn/UMBBAMy4a/SRbfeX0hCKKEbd+7Gtc3NwBItIVWyyXpVqKQsXih5c+wYqmkb4fGj67IyieiOQuCMI/ouLtnIKDFff+6cIq2oAyIjLOVL3RgWDC20XZA1UMr50xKOP7VOw5qi1tufeo9rH3tY/QNCRgMGOsOyq8SROkiijKODwZwaiik+8BzW+tEVPKcFmcXY563EO1Tc0aSFdRXubB2XjNq3E4MBsJo33MYgqREWqDRExCEzXGwjO6Telb0oDfzaaFcnq4aDAg4dTaYU1WKQkIJzOxwcSyevf0qVPKcpqv/6e8uh9cXwtrXPkZ9lQv/9HeX4TtPv6fJxQt3Xa17rSUFaVd7IIhMkA38CgrsiFLDyJaFwrKujf7Z338ZYyr5mLaLoqwgLMrgHRxcHBujlwcDYdvpaXpqmigmzK5k0O8XsGJrbCLD6wvFjNGpE2uwcs4khEQZ3QPD8PAcFDBZ7duoGkmli6PxliPJKrnUeXh0nfLBHxJNi1fMlEW9bRnZI1mWtUn8XPebbZuXVFV13DyH+29o0haXqjbTzevbulKpkJOOPZVlBQyjn+uQZEX3/cZatybH0dvJ5JplE6vH74NllaLOVxCJGOmAE2cChjJq5CcPDoe1bg+NtW48uXgaqisE1FTqy4aeDGdSvZIoTzgD/cmNqD9RlNE7FEJYkuHkWDSMLMjWk9vhkIQlz7yvyezmpa1Yd/OVYFk2RqdaLZfFngsmcqPQsXih5c9ofBnNC1U4uRifKN6W1LqdGAiEE/wjmrMgCPOIjrsrnCz++eYrsOrF/Xhy8bSEeYMD3YMQJfsuipNlRYvBjB5iPu0XYnIMWjczT6w/63Sw4DkGxwaCMdWj1JbQerqJIIjiQRBEnPCFtK5u9VWuGJ3RMxBAnYeHLyiiaXw1xo+qKMpxXpSV4twOFvff0KStYF73Rifuv6EJfSMVhgjC7lRVsHhySWvME6tPLmlFVYX5Q7LPH8Ljv+vC2nnN2L58JtbOa8bjv+vKeKykqk6QioAgGValKAbUQDaadBNF5fIERfx5hsMS+vyRlpTRuvrCsZUIhmU8dssUPLpwCp7+8+cxcnGkz697rSucbFbVHggiFS4Dv8LlKEo3iSB0iZ6ojqax1g2WgWajp06swdp5zXByLMKSjL8ODGP+pr24e9sBdJ304dub3tH8gH6/gF/dfk4v7+zoTvBvCq2njaqPUsxA2I1cfW091AmSqRNrtKpWo0eqMgGRBXE//kbE/n3tsT9i8X+8j2ODQfxk18Gs9m1UlWcsTXrmTLLJLjVx3+8XsopX4n14UZRNk0UjuXY62IRjvb65AX1+wdQxoLZ5Ob+2EvXVrrSSdqkqzIkZdC6wYlwXilT2VD3Xh17/GOvbWmKu3/q2FuzY91dsXproI0wY7Y65N9lcM6NYnWEY3d/p7SMQkvOeryiXPEEh0dMBlS4WmxZPi5HFTYungdfRS6qf/IvfH4rJqf3i94cwHNKXDSMZrnU7KZ9BJMXj4nRl0+PiEA5L+PSUDws3v4uvbtiDhZvfxaenfKhw6svtkT5/jD5bsbUDzMh4iLaF2VZVTZdizwUTuVGIWDzatgYECc/uPVIw+TMaX1UVRmOd1caoni359JQPP9l1MME/ymXOgiCIWKJzKKMqnFj14n7UV7mgADHzBj/+RhOub26Ak7Pv3EG/X8DPftOJ9W0tGBYkXT1R5+GxeWkrpk6sAXCum1l8TuDbm97BX08H8O+7P0vQ6cfPBEoi3iWIckQUZfSeDeIzrx+LR7qMLP3VB7j/hiZNLwARfTHGw+PCusqiXRAHFGmlOFHRT0L+261fSVpSnCDswnBIwRt/6cGzt18FjmUgyQp27Psrbp99CUa7U/8+EyRZxvJrJ+Eftv9FW8H/b7d+BVKGjkmy6gTp9I2WFP2nbqUi8Y/UQDb+6bZUiaJCPxWXL/TO88XvXZ1QnWT1joN48XtXxzyl99gtU+D1CTjQPQgA2Lj7ENqXtGJl3FMnYz0ujPW4Yp70rnU7S6LyAlFYJAO/4uUVMwt8ZEQhKJWKLtGoOvrx33VhfVuLVjJf1a8sCy3p88A3L8N9r3ykff74wimYdUkdll17Ce547sOYcbJ8awcevWVKzNOSb/ylBy+vuAaKotji+mXThoggCkGuvnY0qh6TFAWvrLgGsqJo4/r65gY8uaQV33+hQ7eFxg+27cfaec26+06lH7OtzEWkRl1EFl9RwulgtcR9+57Dujq+zsMb3js9H37b967OWRbV/QXCIk6eCWpPmarbev3u2Qmx1U+/1YxF//G+KWMgF1LJcdigDWhYTLQrZo7rQpPKnkafq9cnYO28ZtR5eIx2O7HhrU9x79ebMLm+KqV+MLpmqi7T+51erL6+rQUPvf4x7v16U0LsrbePoMF9tSpfUS55AjsSFBT85qNjCfm4u/5mkm7Ox8kxuOt/XBLjHz92yxTA4DYlG/dkI4lkCJKCJ0YWYKqx1RO/P4R/afsyjp8Najky4FzbtpeWzUyQ2/YlrVj7nx/HbNso/rHadyv2XDCRG/mOxfVsa3zOOZ/yZzS++vwh3bG+dt4VGF0R6YKlZ0tWvtCBZSyrQgAAIABJREFUrXfOQFvrRLTvORxTNTqbOQuCIBJRF5munDMJ/SNV1P7t1q9g1UjFYOBcK8Gtd80Ax9nXjxNECW939sLrE3D/DU14cvG0mMrHGxa04H++/BG8QyH8ctE0vPjeF3jn836cOBMEz7EJOmjVi5Fc0dudvdo+egYC6I3qRlDM8S5BlBvhsITPeofQO9LVLX5udN3NV+KO5z6MVJ1e0opajwOjK9J72NSuFOWiuLCkH1DVeXjY2AYRhIYgydj856PY/OejMe8vnnmR6ftSFEZbEAdExso/bP8LXl5xTUbbybUUd4VTv2R4hdNeT1MYTRixLIPJ9VV4ecU1ECUZjpFWBakMQKqJkFJZfKGep7pwgmMZKAp0ZcYb5yjf98pHePjbX8aSZz4AAHiHQqiv4g2TcqpDTRMJhFkY+RUiZWrLjlLVK/1+Af+5vxurv3EZKpwsti2biaFgGDWVPBqqXPAOhdBY68Z911+qTfgBkXFw78sf4YW7rkbfUEh3nDAAVmztiHn/tlkX4/zaynydXlJStSEiCLtgVtubeD327O1XxSQ31ATmS8tmIizpLwJRW2tE7ztd/ahW5SEyQ5YV9PlDCIYlcAwDN8+hxn3O/3WwDDYsaElo2elgGTAjifsD3YN49K0ubTHSeTVujB9VAQCG904vVun16ev7dGVRT1bWt7Xg0be6cKB7ED0DAQQEKWGSMGSj1k/J5DiTVnOl1M4qlT2NPtcD3YOab/Cn1XPw8/ktWjyXSj8YXbNAWMJ9I5M2m5e2YqyH19oAAsC4US5sXz4TJ84E0e8XNHnrPOHDyyuuiXmaWW8fJw1aahrlK9JtaWZEKS2YtDN67SYVKPja5eO1hz3URROSLOsuXjh5JpDgH9/3ykd4ebn+A1TJxj3ZSCIZYVHG2529MZPNAPDQjXJMHk2lZyDS9mzC6ApsXz4TsgJwLAOWieTVokkW/xjJpRn5SjUXXF/lwso5k1DjdmJYkOBx2SsXTJhDvMzkOxbXq0wXn3O2ai4i2XxC/PgyGuvLr52ECmfkt4GwqDvme30hrHujU/OtVdtCi64JwhzURab+kIh+v4DrmxswttqlOx5ZhoFkw/apqj4CoOUJvvP0+5g6sQbrbr4Sk+o9OOz145E3u7QFwz/Yth9b7pyB5V+9BL89eALjRumfc/xi28Zat7av6O8VY7xLEOVEOCzh+NlIO+THbpmiO94vGFOJPavngAFw3qgKOJ3FX4G2KCMQjtVv/eR0sHDzxX9TiNLHyemXt3dYUG7XaLIr0373uZbiHuPm0R7XUq19SSvGuO3z1FKyVi2yrOCQdwgLN7+La0daFRzyDqUsBZwsIVpK7XQEUcKsS+qw5JoLccdzH+Jrj/3RsA2qnqM8YfQ5udiwoAVOB5uy1RG1xCPMwsivoARS+WE0QRmvt4oNBgq+NeV83PHch/ibR/Zg0dPvQZQV8ByDQ94hPLf3czy5eBrGj3br2ixpJKGiN06G41qP2K1Nh5vnsGFBbBu3DQtaKGYgbIdZbW/i9VglzyWM67c7eyEriraP+H0OBsIJ+y5V/WgH1Jjg25vewbWP7MGtT72HrpM+HO33a3FBQJDwyJtdMe37HnmzCwFBimnNdKB7EOve6ITH5dAWASW7d3qxSrZtWKN/H7+/NTsPYuWcSTHbim5rqFazK4bWT5m0miuldlap7KnRubp5R9qta5Nt54v+YaycMwk9A5E2gH/pOaPFz0f7/bjpib3oGQhgQfu7WLG1I6YizPHBQEycrbePLe8e1W3vqtf+OT6O/8mug/g0w7i+lBZM2hVRlHXbTTJgdBe5KWBi9JIqt2HZ4AEqg/tbSuOeyC9GshNOEotV8hz+enoYd287gJ6BYSzc/C6O9Plzjn/MyleOcfN47o6rcP8NTVrbubWvfYzeM0JR5j4JY/RkhmWQ11jcqDJddM7ZirmITMeL06Bdt+qbd53y4XCvfk59MBDWfOt75k7WbIue/SIIInPURabn1bixs6MbD3zzciiKfpwqKwoUo9LBBSJaH9297UCMDvYOhVBf7cJQSMQdz32oxUvAuYX2fUMCbvzK+TjaN6x7zmM8fIxO37y0FTs7uhO+R34vQdgXUZRx0hfSHrpRc8DRRNarMKjiOUysrSyJBXFAkS6KU5+SjneoHSyDGhstsCEIIyp5FpsWT4uR4U2Lp6GSN39ImrXYI5Pkvx4DgTA27v4sZiJn4+7PMBAIZ3QcVpJswijbicBkCdFSmlzkHRyWf3VSTCnpjbsPJejq9iX6jrLLyeL3930Vz90xA4217rR0ObXEI8wimV9BlBelOkEZEuWEUv+rXtyPkChj2ZZ9mHZRHX7x+0NwGSRnWQZaW77ocfL4wimYOMadtW+QD2rckYTNc3fMyNjOEEQ+ydXXVonXY0bJDUlWEJYkbI57aGV9Wwt2dnRj89LWmH2Xqn60A3oxweodB/FF/7AWF/AODt6hEFZs7cCtT72HFVs74B0KaYvL1OoQe9dcp7XoS1YVS713erHKzo5u/HLRtKxl0Wh/NW6n4bb6/QJ+9pvOBDsTL4dmI8sKvL4Qjg0Mw+sLpTVBn+p6R2PWuLYDNW4e40ZVYN3NV2LXqll47o4ZuGBMJcSRB8jMOtc6D6+rlzbuPoQatxNAbEXLZVv24Yv+4aTJXHWMqeNJ71jv/XoTmhrSu6/xY7atdWJCW8NUcT0tnLKe3qGQbrtJIcMHRzPNqZXSuCfyi5HsOFgGOzu6E2xk+5JWCJKMfr+AlXMmadVkH3mzC7UeZ07xj1n5yoFAGH1DgnZs2ra2FmfukzBGT2YefP3/4vw8xuJqZbpoIraVtXQuItPx4mCZhHkhNQZjGAbLtuzDxt2HEsb8+rYWtO85rO3j4rEesi0EYQEsy2D8qArc+/Um+IIiTp4J6s4bnDwTzLjwiNVE66MD3YN45M0urLv5Svzp/uvw8oprsHH3Z4YFW/r9AhgAoqzo6qAnF09D+57DWHfzldjz4zl4ddUsNDVU44FvXo5nb78K25fPxLO3X4Utd84g3UQQNkTLf52JLYCgN+fTvqQVbp6N6WBRChRl+1QGwJiR4I5lAFkBJJmS8UTx4A9J+OOnvdi2bCYURQHDMHhtfw++3dqIGpO7jfEcq9tqh8+wKl10+9Do1hPpKkS1h318afAHb7TP2E012ZfNRKCa1IpvV1Tn4XHiTKBkJhfrPDyGhdjS7ge6B7Fr/zFsXz4ToqzAwTJwcgzumH0xOk/4YuTxxGAQt2x+V7s+6UAt8QizMPIrSJTKj0xaohUTYpIKF+rE8tudvbhj9sW6PkPfkIAD3YN47cAxPHv7VeBYBg6OxRO7D+G+6y+1fZuOoZCUYIcJwm6Y1fYmXo+17zmcMK7Xt7VAkmX0DYXx/DtHtHabY6tcYBgFq79xGVwOFv1+QTuGUtWPdsAoBqnkOS0uSBZTANm1+wQAjkXCdr8762K8+N4XWDuvGZeNr0Yl74iRxVTt1Iz211jrxq5Vs3XlWo0VvT4Ba+c1o8btxGAgPNIiM/UYyKbFWy4t09NtgVhK7axYlsFFdR7UVDpxYjCI25/9IOG6GZ1rJveHZRlMqIksvqvkOQwGwnj0rS54h0IYHJnEViulAOfGCnBuAf+anbH67tG3umLi7OjchijJcIzkNhwj1cpTET9m1QV60aSK61ONaSJ3jLomqIvc4nWUkUzyBu2r+SQynEvujChfjGzGYEDAHbMvxp5PT2mxmJNjERIlsAyD9j2H8cA3L4uR6TPDIu57pSPr+MeshyEEMZJXKZXcJ2GMnsx4fQJ8QRErtmYvi5mgVrWN19c9A8P4ztPva98zey4i0/ESECS88O4X2HLnDJweeRD/+XeO4EdzLwXHRH7bMxDQ8i9OjoUoK3jqj4e1yk5qpUiyLQRhDapNPnk2iIdeP4T7rm/C4wu/grFVPCQlsrj1pfePYumsiwt9qDHE66MD3YO447kP8afVc6AoEb0syTJ+uWgafrBtf0zM9Pw7R9DWOhEN1S7UV/NQFAXP3XEVOIZB35CAhmoXfvS3kxPivJAoY+1rH1POlSBsjCCIODFSHa7fL2BMJa89dLNm50E8+lZkAe1FYyvh4li4nCxGV/BwOIqytpohRbkozulgEJaAu54/l4RrX9KKN//7OL41pTGtJBZBFBI3z+Hapnoc7h1CJc9hWJBwbVO9qeXD1cQzywB1VbyWVB4WJNRV8eAdmQVNavvQ6KTt5qWtaGqoTlCMoiijdygUkwAshom0ZMeoQNH9zJnCKCSbCOEdHK5vbkBb60Rt4mdnR7etrkm6sCNJuejzkZXIQrhbn3pPk5knF0/D+bXuGHms5Dk89HongHNP0+1aNTtBl6syHRpJqjEM8NKymXjh3SPY/OejWrKDWuIRmcKx+n4FR8mlsqPOw+OlZVcjJCraAkmXgyn6Ccp4/azaGwfL4NnbrwLviPxb4eRQW8nj8YVfgawoGBYk1Fe7wLHA9c0NuHlqpAVrdILZ6WAxRqe1mF0wempbz84QRKFJd6FNMuIXWkRaZPB4/s4ZYABIsoK9h3pxwZgJWLNzH+qrIvuTRp4YdDlZrbJk9GIXWsBhDbIceUBqx8pr0O8XtOoP98ydjLoqFxgmkmhOZ3GV0cIjvXu3YUEL7t52AN6hELbcOQPbl8/EiTNB9PsFPPpWFw50D+Kdz/vx8oprYmTSaCHZ5PoqDATCEEQJTgeLLXfOwG2/il00NWF04qIT9ZglRcGzt1+FjbsPoX3PYaycMwl1nsiEg3r+ya5hNovb8mUfzBjXdoFlGUgysEKnKpp63fRiuFQyEy/PNW4e40dXJMjsI292aZM2rx04hpeWXY3xoyvAgMELd83AY29/hkff6sJzd8zA4LCAsCRDkhU88M3LMCxIWpyol9tId0EkcK7tmXoNBgPhjOP6ZGM6m0WeRCI8x+rncFgGjy+cgntf/ki7/48vnAKeY3VzWWAYVPJcQg4DjP49yVW+iPJGz2aola+/NSU2Fmtf0ooj3rM40D2oVcqcdUkd7p47GaIk49nbr8LTf/ocL3f0YNmWfdi+fGbaOsXp0I8fU+VA4+EdkTFTKrlPwph42whE/Fl1QRxgfSweXdVWL+cMpDcXkakdTje/r9oYUVaw/KuT8JuPjuPSCaNw3ugK3H/D5XA5GEhKJPfi9Qm6+ZdDvUPwDoXw+MIpGAqJGJvCTyYIIjcqnCx+Oq8ZTo5FWFKwNCrGfXJJK9xOey0YMZrf/OSkD+ve6MQvF01FlcsBlpXw8oqZCImReKlvSMB91zfhmT8fwRXnVeOeuZdqFZcbayNdzngnA1ZiIYiS9gAl5VwJwv4EgyIO9/u1PE5jrRuP3TIFP5o7Gf+++5D2oPQYDw+OZTBeJ3dXKhTlojhBVLQ2jKqjuXH3Z7j7a5PpKSOiKFCg35bF6P1MiU48P3bLFOzs6MGyay8BxzKQZAVP/+lz/OhvJ2dUlS7ewamvcqH3bAhVLgfcPIexnsiTr6Io49NTvhinqX1JK5oaqmw/kZZssm8wIOg+bZZOe0WjiZBatzPBwWxf0orakbYwxYbHxWD1DZeh5/SIjFRX4JE3P4lxir//4n5sWjwNF9ZV4rRfwEV1lfifL3+kPemmfi9el+tNpqhPsPxw7qVYMH0iegaCGDeqglriERkTEmVdv+LBG68o9KEReUaWFZwJiAl6ecKo4k40elwsfjj3Unw/6ryeXNKKoVAYL33wBX74tcnaU33XNzfgJ99qhqwoECUFr3z4V8ybcj7+6e+aseSZ92N0+uodB/Hq92fB6wvZduKWWj4SpUqyyRqXg9Umg1iGQTCsYOULscnbs8Ew6qtcePCmZgz4IxWXaj18gu+2bMs+vLpqFhqqK0qm4pVd0PNvn1g0FWFRjlmsEb2YwiixnGphmHrvAmEJh3uH8MibXZr/fduvPsBrd89ChZPDujc6Y+xfQ1Xs/vSS3o//rgv333AZuk8HtAnIC+sq8frdsxEQjGVF75h/uWgqgmEZ972if/56ZJuIN9s+lMtCpnSvW/T10Ls/2753NRb9x/u69zl+wZjTwcLBMnhi0VRIsoIX3zuKRTMvQECQsPSZc7rtsVum4Jn/93OwDDDGw+PU2SAeePW/Y/ZR485u8ib6fFiGwS8XTcUPth1Az0AA+4/2J/hZ6cT1emM6lwqG5Yze+OMd+hXeXE4GNZ7YB0drPDwqeUY3lzV+lAsPvd6JlXMmoRIcBEn+/9n79vAo6nP/z8zszOwtyeYOmCiIXIwUJAsxwdMeW1paK+pRQCsXBSoEqfUcj7X6a0vVQz1HRY+/WrlEzikIiBKBttb+tFpbT3u8VA1UaqNAETARSELIhr3PzuX3x2aGmZ3vbDabBDZhPs/j85hld3b2O+/3/b7Xz4sHX2rG0/OnEu/lZDhOlC/1LLVhIx1IspwEpTUtAGfGAb+wvFYbe/Tft/kRFxXM33imMXXdgmoAQGNTK06FBZwMCRhT4oGbPxPHJYFjKNz5lXGGRol1C6rB9XE0Q7GHwyVlHtO1hnLs0wYZDgKr5oXF7rPqi6ustnlOVrMfQjERHaE4gDMjiQtdrGUMI5tzOJP4Pilfsn6hH16OhgIKj7zyMV5rbtc+G4qL+F6PPayu270792Hr0hoc6YyAddB45JWP8fANk+3CExs2BgGqLvjlnhZce3kFOoJxPP/eUUPu4GdvHMBD100617dqACm/aWTOVtAWjKPhfw7h2/9wseZ3z6oqww++WYU7vjwWsUSy/kKvf1Y+twfPL6vFLTobY+Ot05DvdNgxVxs2chSyrCAsxNEdlRFNSFg1uwob3kyyzt7z4od45MYv4IfXVCEhyWBoCryDRol7eMaxVAzJojhZUXDbjDGmkQglXg4sY3cZ2ch9iGKSfUVPK7tm7mSI4sAUxemDvIFoItnl39Sq/XtFoQv3Oib26Zr64PfUSh++9/UJhj2oOojtobjm4AFnAjWN9XXEESW5pGDTdWtHBQmPvbrfYPg+9ur+ZCDUk/66VgmSrmiCuFZDtZMiKig4GYwb5PrROZPRERS0pFtrVxQFLhaRuITyfCdYhtKCEypIXXukxMV9u/Zh1ewq3LGtCTuW12LSBQXDNvlkY3BBUSDaFbYonX9Id4aN8rnO8d1lj5gg42QwjifmTUEgmsCGNw/hjm1N2Lq0BnP8lbijJ0ExtdKH22aMwQJdkvrROZPxs98fxA++WUUMdEQESSuWy8XE7VBgqrVho69Ix7zUHoprDF0A0LDIj9WNzcYmhW1NeH5ZLX56y+Vo6471arvFEjKA4cV4lQsg2bdd4YT2PNTX1GIKCpRlwVVvRT4qY9zx7mTh2oqrxmrBsNauKGKCjInleQZfzc3RaAvG4OIYiLKChJiUg1Ivb9CpS64cg44UH2DN3MnwuVlcUGjdhUW651MWvz+df5RtcdtAng/nUyFTJuuW2qRHej7twbipuPLB6yZBURRwDkZLJDtoCqKUlD/19duuvBj7TwRNsnLPix+isb4OI/KdOBmO49af7yPKUl9lhvR8/+/Nl+O526+AJCtwMDR+8vLfBsSvt9kW+g6r/edzO4gxnJ/Nn4olm943yfCO5bVEP2DH8lp0hOKo39pkeL+VroglyPKlnqU2bFjBSpbL83m0nY4R5UqWFU2vAcC3n33XlMTetHg6AlEBsgLDWW01/QMAwnHJVIS38rk92LG8tk8N1jRNgXcwWPncnwdER9rIXZDi5scD0bPui6f6K0UuGY31daCgQFaSkyL2twdNI11Vmy2bcziT+D4p1nTHtiZsWjwdSza/Z/DBVmxrwnO3X2FpPy3Z/D4qCl1YNbvKLjyxYWOQoOqC5B59H+sXVBNzBwNFcjJQMDTECSI+D0QhyQp+fG2VRm7yrWfexarZVVpBnBoL1sd2SXGhU2HBoONV/82OudqwkXsQBBFd0QROhgQDQ5xaJLu3JQAny/SQ/tBgGRqlHg4sO7z3bm5xe2YIRYF2+ABniiMUBTnFOmXDhhUSsoI3P2nDpsXT8ft7/hGbFk/Hm5+0ISEPjBGlD/JuePMQHp0zGRWFyWR+tgxtavAbAFZcNda0B5dt+QCd4eSIEpLTlpBkHOwI4aaGd/ClNW/ipoZ3cLAjBHmAfvNAQXWeLyh0awkkIPn71UDozc+8i/qtTegIxTOiXN/fFsQN697ClY/+ATesewv724KQZaXXYLzcM8bq864IOoLxnFurVIiyonUEAmd084qrxmrvqSh04dOOMK5+6k+4ZeO7ONoZwdr5U3uVT6u18rlYtHYljXv987Jhoy+wsityfMvZGARYnWGiNHSTWLKsoCMkYNWvPsLNz7yL1S8343tfn4BSLw9JUTQ9CpDP9/t27cMcfyUYmtJ0tYqKQhcOnwwb3v/LPS043h3F0c4wjgWiEMVzu3Zql2R/7SAbNnIJVmxd+9uDOBaIGvSYfo+raO2K4lggikPtYY2RTH2dZLv1kRTERoYg2bdujiEXIMcloj+R7lqpfsX+tiBufuZdw1kwtdKnBa0dDhqjfC5UFLrRHU1g9s/ewp3b92L/iSBuXPc2rnz0D7j5mXfx/W8kP6diRIHT5APcu3MfokL6JF1ffn+6hJ/eT1WhD8Sn+lSiKPf4VjIaFvkH5HywSqB2hoU+XyvXkcm5mtqkR3o+BS5Wk6Ob/BW4/+pLcSwQxUfHTuO//vh3fNIWxA9/sQ9/7wjjpoZ3NNk/2BGCoiiWsqIoSXbfhEi26dTC0nQykwrS8/2XHX/Bpx1hfOWJ/8H8je/ithljDPsiVW4z9e1thtu+w4qZTZFBjOFIskK299O8vmGhUVekY7liKLLNzFBJpqBjgdyxk23kFqzOkqiQHFNGkisHQ2sxTCsZZhka9199Kb6z3VjkVr+1Cce6o0gkJJNcSgr5WlIfYySynGwKt/Xa8Acpbr7lnSMDZmtlA1GUsb89iAdf+gifnozg5mfexYet3cSRrqrNls05nO4zqt63ijU5GNrkg7V2ReGwiL8EogntPUlW1OGdvLZh42xClhWcCift9Ygg4pEbvwAHTaG1Kwony1jWJOQa1Pym15nkRLp/919xw7q3sWTz+4j3+EiZxIJT40L5TgdWv9ysxRNumzEGHEPZMVcbNnIMsZiI/R1hNB8PagVxgHFvVxS6UJrHY2SBCxcVezDK5xr2BXFAL0xxFEVNUxTlg7N1M5lCkhWUenlDVfKGNw9Bkof2aCsb5w84B9Uz7vHMiJm50yrBOQZGfvXd23tbAnj8t/ux+vpJGFvmhYvNbpRLsYdDwyI/6rc2WSbXBFHSnLbU7gAHTWXd8ZwLo2jSjVZNh3QdZum67Ica24AsK5bBZXWNVNaIx17dr/3bf//vp7jv6kuxeUkNnCwNN8fA5zI/X6u1UpMsTA6uiY2hg3R2hY3zC1Zn2FDWMZ1hwdSRfN+ufVh9/SSc6I4hLEjab7Y634s9HFjGPA5l85LpONEdw47ltQhEEzhw/DS+8YURONAW0uybSJGI0UUeIgPB2UA6FlgbNoYqBFEynVsejkH91iasml2VDG54eay4aiyKvRw2LZ6Op944qHX5qjZUuj2vvm/dgmp4+OEfGDkXINm3EZ1OVkEqQE4dxadeS33uPheLiCDBxSWfnRXr8urrJ2FEgdPg0+gLTFbNriIWvK2+fpLGUkFTVFaJ8778/nQJv0IXq/mpqX4ayafasNCPp944gNea2zGrqgzbb78CDE3163w4nwqZMjlXSU16elaFJ+ZNQSCSwOM3TcHpqACGpjWGy4pCF7YsrcGtP38Pq2ZXEZvxGuvrepWVdL62lW9vNU7N6vm6e/aXup9Wza7S2MRSCzMz9e1thtu+w4qZTZQV4vjUdDErq9d//ZdWbFo8HQxNQZIV7PzgM5R9aSxxHKqLY4jf62RpHDkVNoyaPtd2so3cgpWukRRgV1OLSZemjjm3kmGWodAeFIjXbg/GQVEU/t5u9N/ynSzxWk6WLKtWcdvOsIDDJ8O2XjsPQDpb7/7aBIwr9Z4TX1yWFRzrjmr+kbp30uU0gOzOYavPeHhG0/vjy72YVVWGOf5KzX/b1dQCtqf7SL039bMujrEcgai+pyyPtwtPbNgYIMiygiOdYbSdjhlsuJ0r6rBp8XQ4aMowehDoOaNzOHdAIrD4rDOCikIXZEXBpsXT4eYYlOXxJjZ4UlzoP1752BRPaKyvs2OuNmzkCNRGvJgo4WQwjhIvZxnzbVjox8jzkGSmt/GpGymK8gJ4HsALiqI0n4V76hW8g8b3vzHBFGDg7SCCjaEChcLJHtYWvQwXuAbGkUl1REvzOIwt9fSL5YGmKUwoy8P226+ApCiWDqKiyFi3oFqj2VeNJpYhJ0t6SxT0FkAe6II5q+tlm1RPlyAZWeCyLLQbamNTOsMCuno6V1PlosjDYcfyWpTnO3H3jr9ojoNKzXybLgHSsMiPfJ41rau+KFMfDHj27cOafNmwkS1su8KGCt5BE8+woSwLVufQRcVu3NP4IUrzOGxY6MeKbU1aoXGqHi/N40FTwIQRedi9cgYSogwXx+B4dwz37/7rGR2+0E8cD5/vZFGWb04a2rBhIzu4OMZ0bj27tAatXVFsePMQNi2ehpMhwXSuPfbqfnSE4lpCRe0OTN3z5flO/Oo7VyLP6cCO947i218am+ZubGSKVD+j0MWafIGLit2m1xoW+vGjX35kuFZrV5I9TvYo2mjULUtrTEH8jbdOg8/FWZ4FY8u8qPC5DLa3vsDEKnE4tsyLt+77co//R/YNrRLnKkjJU9LvT9eIJMsKDnaE8NPfHcCq2VUo9nAoy+MxqiD5mzqCZgapFduSydHXmtvxWnM7mo8H++1juThGSyqozRWZsIoPV5Ca9DYvqUEgIiCWkMCzNO7cvhetXVFsWjzdNAY1GBPTJq4ZCrio2G0qPNLLSrpiSb1vL8syJAWzaZP4AAAgAElEQVRgKFiOU0vXoKW/L33iSH8vffHts23GO5+hMrORmlrK851Yff0krdinPN8JJ0sTi9ZYhiL6AU6WxlcuHYElm983FHZaJUF9Lo74vQCIo6ZtO9mGCitd42Rp3P21CXjy9f3aWVeax2NUvtNQUGklwyxDgeq5Vuq1O8MCijycSS4L3WYbZeOt01DiMZ+V6eK2gijhqTcOmgr6Ghb6bb02zGAVNz9X6AwL2qh2vT1hFfNge/ZSNuew1WcEUdH0/u6VdfjuzPG4QzfCbP1Cv0aQoG/83njrNABAkZtFY30dFCXZiP6T3zRjb0tAi5+r9q4NGzb6j86wgKOdEYNfUurlcbw7Zjgj9aMHKwpzew+SmLOfeuMgNi2ehkAkYTr7H3t1v6GR0ufm8MY9/4iOYBw+N4vXmtsN12rtOsPSbeVL5wLZiQ0b5wNEUcaB9iCW6+IZa+dXY1ZVmWHvVhS6MKLAiRIXB47rrURs+CHtL1YUZSpFURMAfAvAToqiBJwpkDt6Nm6QBIlQ4Xzvzn1oXF4L2WaLszEEIEgyUYZfWF47INdPDfIG4yIOdYS1gNxFxW6MLvb0ea84HDQqCt0IRAU0LPQbZlGrDuKJ0zH85sPPTV20t84YQ+yI6i1RkC6AXOzhBpRNbTDY2dJ1mKUrtBtqbAOCKOGhXzfjqW9NxV0v7NXW75lFfgDAf//vp7j/6kvREYprnyFRM9dvbcLzy2pxQUpyTk32Pb+sFsFYAl4nCwoK7v36ROz84DPcduXFZ/cH2xhWSGdX2Di/oIAinmG3f+mSc31rWYN10GTWC4bC/VdPRCCaQJGHxePzpmCUz2k63zcs9OMPH5/ArEmjUJZ/JsjREYybxo7Ub2vC6usnDZp9kw3Ubs+jnZF+20E2bOQK0nX87m0JgKFpS1/jYFtIC+JuePOQqTDgiXlTsPrlv+G2GWOw9g8HMcdfiYQ93q3fsPIzSOwZAAyvMTQMNjRwhj3OzTMoy3OCpil4nQ7c+nMyq5b6GVKiPTVIrS8wsUoculhGC3zLspJx4lyPdMnTTBuR9L6iGuirKHRpxUZWPpVPN/ZQ72NlE7SXZQVtp82FLuX5zmGZ8M/kXE1NDneE4nDQFOZueAcNi/xaQT1AHpnr5hhDcjhV/miaxuhiJ3xuFjuW10JSACdLo8TDa41z6YolgTP+pbovV82uwuqXmy3jDqkyrmdAV+9rlM+lFYvqZacvvr3NcNt3ODlykZuTo1Ge54STZZCQZLAMrbFqlebxhqK10jweogzLWNY9LxpHjd/z4oeWviJNUxhd7EGekzU8w2Pd0UGNA9oY+rAqrCnx8Cjx8Hj4hsmWekGWFSQkhSjDC+vGoNDLmfw8tdH0lpqLiHI5rtSLxvo6iJIMR8/+Iemi3iZklOZxoClg69IaSIqCkyEBI31OW6+dB5BlBZ91Rc6JLy6IybHDs6rKUOThNHuCxGC7Zu5khGIiSnqaTfp6Dlt9Rq/3YwlFK4gDkvvkjm1N2L6sVityK/Fw2L1yBkIxEdc9/ZbJZ3j4hsl44FrbNrBho78g+XyCKJn8kntmjcd3n99r2LcqO/Tql5uxbkE1OCZ3m6hJecmOUBwMTePuxg9NZ7+eDX7DQj86gjGtUO4P3/tHol8GJOPDJJ2Uba7VLqSzYaNvSCQkdIQFrSAOSO7r72zfgy1La9B8PGhoTClxcXA6z7+COKB3pjgoirIfwEMAHqIoagqSBXK/pyjqhKIoV1p9jqIoJ4A/AuB7vmenoigPUBQ1BsALAIoA7AGwSFEUoS83LSrkMWcJWUEgKqCol+CrDRvnGlZjJgeSblet0D8VjqPjpDlI73OzWe0VmqZQ5OHhc3FEB7HMy+PayysMXbQbFvpR6uFw18zx2gg39fVCXUKChHQB5IFmUxuMAjyVtSE1AKAmSKw6KYba2BQt0EVDCy7TFIU8JwuKAh649jJsefuwIfBQ7CHTtyYkmajL46KMtb//OxbWXYT5G981dNbpR0bYsNFXSBZ2haTkLgW6jcFBsYfDP1VXGs6woc7O4aApPHnTFNzd+CFKvTzumjkOFxa7AQC7mlrx9qedWDt/Ktwcg/kb/4yb/RV47vYr0BGMozMs4Kk3DuCumeNNera3UWL6187lOIFAVEDb6diA2UE2bOQCSB2/r/z1uLZ3aYoijr84HU2Ac9BagVVHKA4Xx2DL0hqcCgvoDAt45JVPsLclgObjQa2YJFftz6GEvvot+tdkWdEYPVM71H96y1TtfSS5aO2K4lggiod/87GpaGTL0hrEEzLaTsfQGRawq6kFd39tAkYW8Np7SYWTqedif4p4rHyhTH05q7MompDweVcElAWDlJ7hS2UHyTZoT3q29+7ch90rZwwKs/m5Rlc03uu5qsrE7pUzEEvIYCiA7nkWqexvpMI3QZK1Yo3UxLWe7a3IwwMe8z32VixJel+6cWqpMs4yNE7HEpouVWMbZV6eOAazr759OraFc4Fcl2EKFNwcYyhyc3MMGIrCwY6QaU+X5/N47NVPMMdfCTcYCJKMx179BI/NnUyMZckWvqKYxlckPcOzEQe0MXSh7rN8pwON9XVJvUnThv2WTi90hgUEIglcM+UCgwyvW1CNbe8cxlUTyzFxhBfbb78C7T1+3rNvH8ZdM8djFYGNVuopLs7kTEwXty3PcxJjwfl8+liwjaEHkh31Yn3tOfPFOQeDPUc6cedXxmHNbz/R7Im9LQE8+/ZhbFlag+5oAu3BuMamrdoJvZ3DpHORBL3e744miPsEULS8g8pyrI6UV9+Ty5NjbNgYarDy+Yq9HCKCpNnsUyt9uKjYbbL/9rYEMK7Miy1La8AwlMb2mIsgFds/OmcygjGRaNtWFrnw+3v+EUc7IwjGEnjs1eR0gWIPBxfLmFi418ydjDu370VHKE60EbLJ3Q4GaYkNG8MVsqwgLAgIRiUkZIU44rk7msDq6ydhdIkbLENjRJ6TGLM4X5BxKSBFUTSAMgDlSIadOnr5SBzAVxRFCVEUxQL4X4qiXgHwrwCeVBTlBYqiNgD4NoD1fblpniGPOWs/new+lV25HbCxYYNjaDJr2iB0FkQFybIbVZLJVfyZwMpBdDhoTCzPM3UTdkUTWhBEvY8V25p6derSBZAHmk1tsArw4qJsCACoNOjpMNTGphR7OPzomirM/68/a47D974+AbfoitcenTMZv9r7uWZwF7hY4rM92hkB7/AaEhyyrMBBU7hpeiUURcGzS2tAAZAVoNDtGJYHea4nH4YTOIbGg9dV4VQ4Yfg7l7u9bAwOhiM7R1SQIMoKnrzpcjhZGnekjNOZX3shZEVBIJzAE/OmIBBN4KW9n+PqyaNQmsfjwesmEZO8VudzRDCewRWFLrA9RQGDsY696UorO2jH8lpiIt2GjXOJTM/+1P03tdKHG6ovwIIeOyx1/MXUSh/umjkOnIOGoig9Y+Jo+Nwc7n3xQ/zwmksxd8M7hu9QGxjK8victT9zGanPUpbJBWuZ+C00TaHUy2kFH4FoAo//Npm8Y3TiYaWXO8MC9rYE8Nir+7H6+kkYW+aFl0+OwFaTbqqt/uTr+/GTG75gGP1HUxSeX1YLmoKlXJ6tIh5ZVnAyHEcsIYGhKLAMmQ31UHsISza/j1lVZaaCwg0L/XjqjQPae1V2EABZ+XtWPmRClIddUF+WFUTjZNb7XSvq0BGMG8YDd4bO+NDqs+gIxg3PjFR46eUd+OnvDmCOvxL5Tgc2LZ6usXmVefle9WSmcQL9+6xY6SjqDLOcKgcdwTgee/UTQyLpqTcO4OEbJhNlpTffPpf9vqEgw1FBwoMvNWPFVWO1IrcHX2rG0/OnEvf0juW16Agae7M7ggLCcYkYy+oMx4kxaD6Nr0h6ppyFvrJ9zrODobbPGhb6MdKX+VhdQZTwo19+hP+YMwk7ltciLsqQZAUb//gpGpta8ZuP2vDC8lpUFLrh4hwYWeDEpSOqwDIUkY2WZWg8+XqzQc89+fp+op5LF7fNNhZ8PiCXZTIbkOLmcZE8leFs+OLFHg4LakdrceqOoKA1/IwocOK72/dqSWMVmdjlpP26ZWkN4qJMPOfVvXEsECXuE4bKzoZJvaf+yNJwk0UbNtLBKsf30p1X4qJiN9bOn4pT4QQuLHZDVoBdTS14rbndwLDKOWhIsgInQyOeyF1WfX2MO5ZQdYgCh0V9RVc4gVBcREkej5f/0orvfX2CoTlp7fypeOTGL8DJMijycIbiG5LfrOqzqZU+rLhqrGZPyHJyzUi6Z6BJUGzYGK6QZQXBeByBiKQRG+xqasH3vj7BMOK5NI9HWR4PF0fD5yKzPp9P6LUojqKoLwK4BcA/AfgISZa3uxVF6U73OUVRFAChnj/Znv8UAF8BML/n9WcBPIg+FsXJCogG9SM3fgE0hZwP2Niw4WQpfHfmeI02W2W7crIDL6OiRTeqKCn41jNvDcr+oOlkgkJRFLAM3a9xoOkCyCoNel9HslphMArwsjXkhlphBk1TYGhK+52k0aj37dqHTYun41RYQCCawO8/PmHNevGty7VrpwYcZlWV4f6rL0UwJsLnZuFgcnNN+oOhkHwYbogIkql71cb5iVxj5+gvOAcDN+fAidMx01iwlc/twdO3TIWDpg3y//T8qTjRHUVlkVs7x1NBOp+TtsyZhJ/6WlckAdbBDPi6ZqIrrewgm5XDRq4h07NflhUoioKt367BkZMRPPXGQdw1cxzRP159/SQ89cZBU8Dz0TmTk8xh8ybj/qsnYmSBk2gDl6aMG7SRGYjJ7UV+zKoq01irgN5ZoPVBYpah4eUZbcyJaqu4dOycVp3gj/92vxaITha5AfGEbBqBrY6CSYgycfTfuZYD0ro+edMUbF4yHYs3vW9YF3WspbrejfV1UBQFnIMBywC31FyEb//DxQhEExo7yPZlV2Tl76XzIYdbUL8zLCBGYCQs9fLoCAkGv65hkR8//d0B7b2vNbfjzq+Mg5d3YP2Caq1IvyMUR2kej90rZyAhylpB3f1XX4qjnRFEBAknQwIuKnajzMtnxFyUKTMb52C0eMKoAifWzq/Gd7bvMewfkqspiBJea2437GcAeOBasqyk8+1z3e8bCjKsstbrUZrHQVLINqCsgJgIdHEMMZZFGlmuFXUQYPVMR/l4rF/oN8UBXZxdFDfYGIr7rH5bE1ZfPwkjCpwZ3SfnYNARiuPnfzqCu746Dh3BOALRBA62h7Rrqk1KpXk8OoJx3LDuLaydP5Uol06Wwm0zxhiS4Y/OmawlsvVIF7c93h3NuilgOCPXZTIbkOLmAIjP/2z44jRNgdbFqfe2BFC/tQkA8NZ9X0ZpHoeGRf4+5xNI+/VoZ0SLp6ivLdvyAV5cUaftrw1vHtIY/PV2bCrLUl/ZZfsrS8NRFm3YSAdLpnFBwoWFbuxvDxrio4/OmYyOYLLJ7L5d+/Dc7VegKyzgZEjA2DIPHHRu23E0TZkmYL2wvJZo225fdgXQDVQU8rh1xhjc/My7Bka5U+EEnCyNuRveQUWhC6tmV6GxqVW7RurZrvpaqfZEwyI/ij1kvy7f6bDtBhs2eoEoyjgVEdARjKM+Jbf+7NuHseKqsVj9cjPWL6gGTQHFLg48f36OS01F2lWgKKoFwGdIFsI9pChKW18uTlEUA6AJwCUA1gI4BCCgKIrY85ZWABdYfHY5gOUAcOGFFxr+LSGRu6zznCxkJbvuXhs2BhrpZDgsyFrAAUjK6R3bmvDC8loUDnCnFEuTR8ZQ1ODsD0sKYl13lPE+0jtY6QLIhS42q5Gs6n2mdiL0VoDXF6dURX/Y7M51YUY6GSZB77hbjZ+JCBJufuZdjaGo2Ir1woJqeWqlD7fNGGNgtWhY6EfeyHObqBvorrqhkHwYCshUhhMWiY4XLBIdNoY3cqlLtq96mIRCF4uEJMHDkwMLhR5OY5cCkontqCDh/t1/NQQsSjycYYQPTVMoz+cNOhyKglf3Hcf2ZbVQFAUUReFXe1pRc3ExfBbnXn/WOxNdaWUHOewA71nDQMjx+YBM5FkUZexvDxpGVqxfUA23xf6uLHLhP2+agkUpI3ju27UPm5fUoCMYw7079+GlO6802cANi/y4oMA1LNl4+4q+yjAxub21CS/W1+KWmou08X4XFbstWfhIPtXmJdPx+LwpoJAs5i/Pd8LnMo8xbayvw7FAFAUuFmt++wkAmLq8n11aQ5QZdVzu2fBDrPS/1UiqE6djpnW9u/FDPD5vilb0BgB3pjB/vNbcjgeuVXBBYXJ0+OddESzZ/L7pfhiLUau9+XtDoRhgoPSwIEo40W1mOrlr5jgTE1D91iasml1lKBxzcwyue/otTK30ackVWVHgYhmtIE593iS29dPxREY+Uias67KsgGVgaFacVVWG526/AqdjIo4Fonj27cN4+AZzo0y6hLWVXFvtqVz3+waanT9bpJNhq7iQ0+I5MTRF9P1+decMYizLwzF9KuqweqY7ltfizY/bTHbyjf4K+NwDtlQ2CMiFfZZOhq32mZtjDPeZzm8q9nDYsrQGbadj+NYzxokN+jhbezCGEg+vfWcsIeO9TztMcnmDv4LY6NpYX6fdo/5+yvONxc3qvVmNMe8tFjzckQsy2Vf0ZkuQzkaKwlnzxYn7w0L+nCzd53yCev2IIJrGk7ktzomEKGt630En7Xf9qG+2x8/SP/90NsxgsCoNRVnsD+zYhA29rlIbx4o9HCiKwqmIYNk4pr4eiCRw/dq3tCLyMu/ZZdXPRoZT9zkFcsFyVJDAszQ6QwmwDI1SL2+KI2xY6Mev77wSx7pjGFVwhtF2VlUZKIrC510RQ65VP1lK/Z76rU1orK8j6p7G+rqsfHIbQwe2Hu4f4nERLd1RAJRWEAcY9dW4Mi9WXz8JxV4OJW4OHGcXxKnobSX+QVGUo9leXFEUCcDlFEX5APwCwKWkt1l89hkAzwDAtGnTDO9xWCS3SrwcJAs2CLuS2MbZRjoZli3kVB6ETiknxxA7kY4FzijLTPeHKMpoD8WRkGSwPaMkUhNlVs5U4/JaPDpnsqnLMBOSL6sAcrY0/Om6oKwK8NQA09HOSEaJLBV97fDKJaSTYRL0jrvV+JlCD4uplT7sbQlg5XN78NztV5hYL568aQp4B62N3wGSRRqtXVEiA139OR69oMrTk6/vxxx/JYo9HKKCiFH9SCTnSvJhqCNTGbayHQZDJ9vIbeRal2xf9TAJp+MJfN4VRSguEfUy29M9rQaDxpZ60HIqquldfWJ7z5FOLJoxBpKcZM/gGMpQWPD8siswbUwR5utGZ6+ZOxk0RRETH/1d70x0pZUd5ORy/xweLhgIOT4f0Js8y7KCY91RQ5C21MvjZEjAJW5y8wnnYCBbMOUEIgIcNIXNS6Yjn2fhK+eGDEPx2UZfZZj0LEu9PE6FE6YiHyuQfKrFm943MJ5ZPSNFUSDJCtpOx/Cvs8bDQTMIRARDAu+zzghRZs7WuNxEQkJbKA6hZ7zblrcP45+qKzGu1GvqGFdHUoXjIlGWS7wcEpIMN+cAQ4M4Bk7ve1n5Z06OJo5a7a3hKl0TV674ggOlhzkHgy3vHMET86bgnhfPnKujS9yWRZYqplb6wDsY7FxRlxx/S1PgHDQKXCwe+vXftLFEG2+dhmIvZ1lUZKUnUxPF40q9ljpNFGUc645CkBScDMY1m+e15nY0Hw9i1ewqrH652VRIp8IqYV3oYvts1+S63zcUZNgqLvSrO2cQ9zRNkROBMUHGL/e0YNPi6WBoCpKsYOcHn+G2K8eQbWiLsaeCKGHGxcVY9qWLtets/OOnkBUQ7WSXbZMOOnJhn6WTYat9FogmDDru80AEcVEBTQHRhIxYQsQFPRWVnWEBDoZCLCEb/Lj7diWZg708g89ORXDvzn3YsqQGTi6pj4s8HL40odQkl1b2o1qAnqkfx1DIOhY8nJELMtlX9GZLkM5GjqHOii9uJY+lXo4of/GEbJlPUAvNUpszUq+vFpzubQkgIljEWhha0/uqbZH6Hn2xTVQQ0QloNgwFBXFRhigrOHE6BkmWccvGPxt+o8qqZDWesDcMRVnsD+zYhA1VVz35+n4Dg9msqjL8n29eStwPvh5/UK1B+J97rwJDU/jd345j1qRRZ/X+s5FhQZQMjG9FFuQlTpbBax+dwF+PdePH116GNfOmoOVUBKVeHqVeHiuuGotYQkKBi9XGNG77dg3ynCx8bhYPvvSRwaebUJ5nmCylorUrCtGC+Iih0Gtzk42hDVsPZwdZVtARjCMmSuAdjCV5WLGHw9HOCMryeJR6eLCs7WvqkbYoTlGUoxRF3QbgnwFM6Hn5YwBPKYqyJdMvURQlQFHUmwBqAfgoinL0sMVVADjW15t2cTSR2tvN0UhI5A6UdBTDucIGYuP8gVVh52B0SvlcHIq9vKkT6d9fata+N7WKn7QHRFHGJ21BU0BxYnmeofDHypkSFeDZtw9rxlcgmrDsvM4UgzXSdCCLqzLpUh8u0CeEZFlGwyK/gclkzdzJiCUkPHhdFa5f+zZau6KgKSDfxRrkszSfRyCaQMupqPbag9dV4cGXmi0Z6PrrrPeXqSjVkVIZVi4dkZ/VmZIryYfzBRxDWwawbJxfGI5dslFBwt2NH6LUy5sCwusWVENUFNR/cTS+NKHcFCxWg7ytXVFcXOLGyAKngXlg/UI/Hpw9ESMLPfC5WJTm8RqTJ3CGeWPbt68gJj76u96Z6MpUO4imKIwocCIqSOiQ47btb2NQkI1d0Rvz0InTMcM44KmVPq1rt9TLY83cyYZRcOsX+vGTl/+GOf5K4nU7wwJWv9yMx+dNQYGLG3Ab+HwG6VneNXOcqYsznb6z8nMUJcl4ZsWmpk/Yzaoqw3e/Mg63P/eeSbc/9cZBwwhL1XY9G+NyRVHGJ+0hQzxn3YJq/HJPC2678mLLkVSrZlcRZbnlVBRLNr+v+VlbltYYWKVTfS8r/4wChafeOGDwV5964wAevmFyr3vDqokr28aqc4XedFexh8PdX5uAJ1/fj1Wzq1Ds4VCWx8PJkvVXWR6PikIXSr08vv+NCbglpeji337djI5Q3DCWaNmWD/Dc7eRRtpJCjve5OAZHOsOmdfa5zQWNsqyYGDdTbZ5LR+RpiXHSfrAqhMzGrsl1v28oxDOs9GUkLkOSZUOsQZJlyBZyxDIUZk+5AEs2nxnHvH5BNRw0ZTpj18y1Lurx8AwW1l1kuM66BdVwczSRoW73yhmDtjY2khiK+0zVS+p9no4J6IoksFJ3bq9bUI18l4DOUELTfwDwwHVVeOilZk2nXVjkRjCewL+/1IxSL4+2HqZgvTzrC+nu3bmvV6aWTPUdTdMDHgseDsh1mcwGpLOx0MUimjDq4WIvb2A6HghYNujX1xHl70ezq4jnhizLxOK68nzedH2VjWX1y80o9LCmGPjGW6eBY88wk1rFs/XFNh+fCGpF+ZeUeLC/PWTIw6xf6MemxdPw6clIchzr6/vxwLWXYVZVGVZ++RJ0hRMAkjHOYFxEac/I5HQYjrJow0Y6n4amKYwr9eLH116mxTcBYI6/EkdOkhvHVPKHdQuqDc08Gxb6wbO5H090cQy+/40Jmj6aVVVmahx5dM5kPPybZtxaNxrVo4sMsd+n509FPCEbmqKemDfFNGUk1af7xcorLXUMQ1OYVVVmYBWvKHSBpmnLhjMbNs5XJBISjp2OoSMYR2dYwK6mFtx/9aXEvVWax2N0iRtFbt7eNwT0Nj71VgD/AuBfAexBklmzGsAaiqKQrjCOoqhSAImegjgXgK8CeBTAHwDMRXIk620AftXXm44IMn6WEqz82RsH8MC1l2FkgSvjgA2pi2TL0hp4nQ4T3bcNGwMJ1kEu7GQHYTwRTVMYXexBnpOFIEqQZAU/+U0yOKIab6Qq/lS5bw/FiV1UjfV1GOVzae+z7Lxnadz9tQkDGkzN1nHLppguEBXQdjpmYHdYM3cyfG4WRR7rJEk65oDhiNSEkH6s3mOvJkc2PL8sOZKyotCF5uNB7GpqwQ++WQUFCkRJQUyQ0BVJmNb6B9+8FKcifRtjm0lSeiCYiub4K80MdluzZ7ArdLFZMVXYyA4sQxHXmz3f25fPQwzHLlmpp8u/tSuKx3+7X7Ofy/J4/McrH2PJlWNw64wxuFkXDNIHeeu3NqGi0AWedWDJZuN71NHvarBk54o64vp1RxNgGQ4dQWMRWjbrrdfrLo7p1e7X20GyLONkWDAk5fui7+1mmsHBcFvXbO2KTJiH9EVBevbc1q4oHnt1P1ZfPwmVRS4AFF58/yjm+CsxqsCJtfOr8Z3tZ5KoaqK1tSsKChjSOi4XQXqWY0o8fdJ3vRVJZpKwm+Ov1Ire1O/TJ/AUAC8srwXV831na++1h+KaH6ze18rn9mDT4unEbld1JNWGNw+ZirvXzJ2Mx17dr11n2ZYPsHvlDJPvBUBjoWYdNEYW8NixvBaSAjhZGiUeHse7k0xh+sD81EofBFHqtYFsOEAvV6VeHnfNHIcxJR7wDho0BW2E+oTyPDx8w2TT+pJGMLs5Bo31dZAVxZB0UosuVDsjdSyRrCiWMQWSnlSgEH31zpCAeQ3vGPRwZzj9WKRkkZ0jq0LIbOyaXC86GwrxDCt9SVPAd7bvNb2+c0UdkTlIlBSTzryjh93+sVf3G2LQj726H//3W5cT7yciyFrhknqdlc/tsWQ6TIiZsfnYyB65Hl9R99nulTMQiUs4fDKsjTzVj+O2kiuS/ksWIv8ZFYUuyIqiFck1LPITizNVHai+1htTSyYMx51hAbIs45+/Ot5ULJQrOu5cIddlMluQzkZ9TmKwzpB0zSSkXIRVMb+koE9MtePKvFg1uwrr/vB3PDJnsumsPN4d1T5nNVFFfV3vn6nfmZqHuWNbEzYtno7VLzfjiXlTwLM0Hvr137Cqp8gvVQ8Uebi0OQugb7I43Px2G9ZbIucAACAASURBVEkMt+eaSTzmdDyBhCTjiXlTEIgmsOHNQ/C5WDzyyicmG7FhoR/5Lgc2L6nBY69+rPmKam50x/JawHMuf3HvEGXFcPa/1tyOe2ZNMNi2aoPQfVdfitt0zc6lXh5enoUoGZnn73nxQ6y+fpLJp9q0eDpOhQWNsbI0z0ks/H/o13/DXTPHa/ejtw+sGs6ywXCTbxvnH2IxEW3huKEg7rYZY7DjvaNYt6Da0DDTsNCPcg8Pp9Mel2qF3lZmJYAbFEU5onvt9xRFzUGyqC0dW9xIAM9SFMUAoAE0KoryMkVRzQBeoCjqJwD2Avjvvt60KCumYCUA/PCaKkiSDN5BGzpQeItCo9QuklIvj7bTMdz6832WB6YNGwMBWVYgp3SsyrLc51F9mR7qekNClhU8fMNkPHCtBIqitII4IH0nsxUdpygZA3hWQd0SD48SDz+gwdRsA8jZFNNFBYkYOMrE8B1IQ24oISbKhrF6KqSeJMfa+dUo9rIYV3Yp/v3/NWsG8PPLanH7libTWr+wvBZunjEldhsW+bVnnlos0XY63mtSeiCYioo9nIGGWnWosk0yd8cFYld7d1xAscNp+q22Ud8/SLJCXG/JHp963oF1WLAGDkLR+tmCPuC7tyWgJXxXza7SxoQ9d/sVRB3mc7EaS4ZkYQcI4pnXYwny2JACF4s7t+9FRyiOhkV+TChLssz29TwmBbc2L5mOF3sS7la6UD2HO4JxUyI8U32fa6N1hwuG47pma1foE6KxhAwHBTgYGm3BmHY9fVFQWR5v2Dt7WwJYsvl97FheCzfHGNgfZ1WVYcvSGlAUhQNtQS3gWVHoQkSQbCaAAQapiEQBucjHau3T+TlWMpaasLNipCj2cFgzdzIK3Sz+7dd/y4gJLVNkYp9a+ZUMTYElsPeqI6n2tgS04u5iD4cRBU58d/te7G0JGK6TEGVcUOg23FOqnlGL6dSCgxIPbzqTplb68P1vTNCKxlP1kxVbn/41miYXa/XWWHUuoMpVqZfXWCj1iYtn3z6Mf545HiN9TuJz1cu82oin+nfbLJjfVHYU/f9XFLpwojtmSkiliykc744SffWtS2u0v1U9nDo+KNXm6U+xRjZxhqFQdJbr8QwrfUnT5DGpgigTmYN+eE0VUTYYmkJHKK4VDAHpWcUtY2cy+RwYyn7GUEEm8ZVzDZqmUJbnhOxR4OEdeHr+VIM+0LMFq1Dlykr/qXG3Nb/9RDsrfS7WUgeqqChMThRJp5v6Urw/q6oMzy+rhQIFDEXZI4MxNGRyoHA2zhCrOE5ynLoTjfV1ECUZDoZGmTfJnEIsslcU4v6wYqo92B7SmN18LvPZrb8vUnPH2vnVKPKwWDW7SvPPgDN728pebu2KagUprzW34wffrMo6Z5GpLA5Hv93G8Hyu6ZgjR+QnZfp4IKaxuKu+jqwo6AjFDc3EEUECKEBWgM5Q3FSPoO7VXEdCNNumNEURRzpzDG3wh7/39QlYvMnMPL+3JaAx1Kpo7Uo2Rd/8zLtazq40z4kJ5XlorK/DsUAUnWFB+3zz8SAa6+vwwLXW8dxsoC/MP6lriBoO8m3j/EIsJuLvnWETq+Ozbx/GHH8lKABbl9agMyxgZIETJW4OPG8XxKVDb6uTn1IQBwBQFOUIRVH56T6oKMo+AFMJr38KoKYvN5kKRw+15hx/pWag7mpqgYOm0BaKG8Y2AUllTkpCpHaRrLhqrMmAHOojs2zkJhKyQuxYfWF5bcbXyNZo1Tujn3dFiMYcqZCHlKCoKHTBkRIIVCmIG+vrkJBksDqHE8CA7qVsA8jZFNOpTDt6tHYlR7jYSCI1MeSy6Lxz0BRWza7C2j8cxBx/JXY1teBH11ThgWsvAwBLx1+WFZyOJrDprcNaMqzIwyHf6UBnWEChi8XBjpD2XDctno7n3ztqCGY8+fp+U+Kvv8xQxR4OoiQZaKjVhFe2wb6oIFvrCM/wdFrPJQZCJ9sYHmBokMcjDeFcVYmHtxzHAySbQhw0hcdvmoLPOiN45JVP0BGKY83cybjA58Tq6yeBZSjLRJ6+eJSxGi9FA/dfPRGBaAI//d0B/OiaKlQUuvt8HpOCW4s3vY/V10/CiAInJpSnH/vXH30/HEfr5gKG47r2167oDBnHsj8xb4p2PbUoSO38t2IbKPbyBrYbtQB2x/Ja7Gpq0Qri1sydjPJ853nP1tFfWBWC6WVYlpU+6bt0fo6VjKUm7KwYKUrzeLScikCUFMzxV0KWB4alKBP7VJYVOGjKInGZ9BtT1+miYrf22t6WgJZ85Bw0OkJxwz2QCkxIekbPiqPqnNQz6a6Z44jxod0rZ6DEwxN/K++gDaNbX1hWm3WS8mxDlatVs6tMDNgak9q2Jt2Za/Q79AXoN6x7C6VeHs8vuwIj8p1gKPIzD0QThv9XbRSV/U/14yoKXSjPcxr22UjdqF9rX10x/B1NSOAZiui3XVTs1p5ttv5UNnEGu9Gp/7CKQZ2KklnmnSyDVbOrEBcV0BRQ3FMAwTI0UTZ4i0kPHp78nCxjZxZ2ssN+3oOO3uIruQSrAqJ0cmVVONNYXwcHDdw1czyajwfR2hUFTZF1IE1R2jXXzJ0Mqcc2KM3jNT11vDuq6aneivfVMds+FwtZUXCiO4q7Gz80vPd8jl8NJZnMdciyglBMNOlXlXFbHyfWyx7Jzg5EBeL+8PBmptqGRX6UeLi049b1en9vSwDPvn0YW5bWgGUoKArwk980Y46/kliYYmUvq/GX1q6oVpAiyuRibDVnkc7WyFQWU/e1VYzdxtDC+RSPORZIFmwVezmtIE79t/t27cPj86Zo9p7aTPzonMk4HU1AkhUUe3nsXFGHzrCgsaWpezXXQSpkFyTZVKj79PypoClov9PDMZZ+4eqXm5NFgzpUFLrQGRa09+qnKCmKgrkb3jG8v7Uryeipb2jrL/QxCfU+h5N82zg/IAgiOiMJSIpiYo1V92Cxh8PJkICTIQGleTzKvDxY1m486Q29FcVFs/y3QYWbo/HdmeNNAQneQeF0LPMkROphYNVJbY+TsTHQkCyKfvrCSpSt0ap3hCiKPLud1Mlc5uVNdNrrF/pRmhLglWXF0uEcjGBHNh1n2RTTWVGrO9khXCkxgCAmwRZNw+Yl07F40/sGmflFUyt2NLVqAYTqCwsNgYpHbvwCea0dNEYWuPCDb1aBooBjgSjuafwQP762Ct99fi8aFvnx098d0D5X4uW0hLK+ECQ18ZftGF4VNE2BoWliwmv3yhlZraeVjlDZJIej03ouMRA62cbwQCQuEccjPXXLVBQP0eC0/syLJkQcag9rXXkkFpy186vx3LtHce/OZFDIydJgGBqlXpaYENz5wWfadzlZBv/262bT+j143WVap+CjcyYjGBfRGU46bX05j62CW26OyUgH9kffD8fRurmA4biu/ZEz9XzXF6WQCptK85wIxUUTe+7a+dVY+4eD+ME3q4jrerw7hrtmjsdD10+CLCtwcQyR1cBG5si0UaEv/ocsKwhEBUQFCZKiwMlmxs6SOlpyV1OLyX97ev5UnOiO4f7dfzUk9UrznP2Wg97sU3WtfrmnxTTmYf1CP8q9PBwOmrhOAIivpSYo18ydjFBMRIlH6XVUt56lTBAl0zOyKrQKxyVAiePJ1/ebfmvqCJkYoStfn6TMJahyZRUXU1/v7cxVmdgeuK4KUUHCop+/h1IvTywGeuzV/ago7BnT5WbRWF+HB1/6SGNKURNSu1fOSBtjsPLVg7EEGhb5NaaHtu4Y4qKkyb/62+7duQ8v1tehLK9/7Dz64qxURhoS7EangYFVDGpcqddyLPnRrgRaTkU1VpzKIpelT9+4vBYv/6UVmxZPB0NTkGQFOz/4DLfOGIMCl/l+SLGzDQv94B000c94ev5UuwhmkNFbfCWXYFW8QpKrdQuqEYyRC+ApisKIfCdkWYEoKxoLVLGXM5AJqHK+ZWkNdiyvNfhvLo6BICpoD8YN35uuqCjJpCobYnGbFk/XGFPV7zzf41dDSSZzHZ1hAbf22Bp6dqfyfB5d0URa2zRV/lTmxdRrAciqKT8qmONL9zR+iKfnT0VZHo8fX3sZEpKMbd++wjBBZeOt0yz3/MY/fgoguc/VewtEyHrAydK92hqZymLqvraKsdsYWjif4jGdYQH/suMv2L6MzGA9ssCJUflObZ9TFIVn3/oUt84Yg65IwsSWprJoD4XcIKmQPY934Ke/O2AoYE+IsiE+/OzSGuJaFXs4NCxK2rbqWqc2YKvvVWWptzjZQDUK6WMSdr2HjaGIeDzJDle/tQlb0uzBIg8Hn5tFvpNFkZuDw2Yfzwi9FcVdSlHUPsLrFICLB+F+MkJEkLWEHJAUgju2NeH5ZbWQLFgsSEmI1MNAHcuRbWGEDRuZgrXo9mH7cNBnY7SSHKENC/0AzLPbU+Fw0JhQ5sULy2shiLIWCHRUVxqCtkOlWKevxXQkph11hIsNi+e+9QPsvmOGSWauu7wCC+ouMiRhO4Jnxpw+8doBPDFvCu558UwX6eYl03H0VMTQWfrEvCkozePg5R1o7Up2n6jjAIFkcYaeJaW1K1lJ31hfZ7j3bMfw6mE1IiUhZhcc4Cy6gNURLcPRaT2XGAidbGN4gKbI45GGuiioZ54scwjHJY1Zh8SC853te7BlaQ0OtodQ4uVw74v70BGKo7G+Dpf2UN6rid4SNwvq8gr85qM2tHZFkedkieundjCrenjT4umavurLeWwVRAlEExnpwP7o+/4WUNsgYziua6GLJSbDC3Vjqaygnu/64J1+1E6pl8f3v3FmfIU6FrU7mkB7MI4iD4v7r74UgQiZHaczLGD1y81orK8b0G7c8xl98X0y0XeyrOBIZxhtp2MmtgvV57LSZaTRkoUuFrtXzkAkLuHwyTBCMdFUEKTv4O4PerNP9WvVFRG1AhPeQaM8z6kF8azWifRaeT6vJfrVRH5HKG74PenODvX/VZ2j/+6OYJz4uSMnwwCA22aMQUdQMIy6Sh0hc6I7OmQaq1S5OtEds1yvTM5czsHgrpnj0BVOaEUQrV1RPPbqfmxeUoNAREAskfzs/VdPRESQUJbHo8SbXPe7vzZBYzRSZdtBU2n3GclX37xkOrp6dJ7efyywSI4kpP4ndfvaIDhUYie5jnTrSCpiOBmKoyMYN401riikibIhKgoa/nQEDX86Yvi3BbWjLe8p3+XA5iU1oHtGbnEOChQFop1MUUPc0RgC6C2+kitIV7zicNCY2OOLJSQZCUnBM/9zCAfbQ6ZC8yfmTcHql8+MRy/zOtGVn0D91iYDA7GK1q4oOoJx3PzMuwBUVlkOLV0xnNTtFfW96YqKAEBSYGCWcXOMHb9KwVCRyaEA1f5UbVoVb933ZQBWY7TJspcQZeIY+YaFfhS6yPKeDpRFfImiKBzoCBlG+q2dX41//up4FLo5jMhPNqtUFDqxeUkNGApwMDS2vXMYjU2tmm7gHTRmVZXBwVB48qYpRjbGRUnfoDdbI1NZTN3XVjF2G0MLwzEeQ/KV1WKt1q6oJYO1i2PAsgxKe5iWApE4rplyAQ60hUzn4H279uHZpTVwOmiIudjtlAJSgx7LAEuuHKPFHEgF7J91RohrVejhIMsyHnv1E42xqjSPx/Z3j2i+sfpeVZbSxWMHslFIH5OwYs4fyvJtY3hDlhW060b+WtU7lebx8HAMvKwDTqc9LrUv6LUo7qzcRR9hNVpPUZIFFw2L/KY50aRkV+ph4OKYfhdG2LCRCTgHjfULqrWCnYpCF9YvqAbXh2rebIxWkiO0YltTxrPbAzER3+rpFlDxm4/aDEHb4Vqsk+2o1vMFVs89JsqYv5EsM/q1039+b0sAj7ySNKonjsgDy9D47FQE3+spklOvfc+LH2q08w2L/Njw5iGDvg7FRcuzQo+BeLYD7USWWnSXl3rTJ/dsoz47OBiKqJMdjL2/zzfQNEz08Y/OmTxsdH2mLDinwgLumjkOhzrCWkBDlGQ4HDRG+c5QYnQE43jqjTOdhZyDIq6fmvxWrx8RJPjcfbev0wW3MtGB/dH3A1FAbcOM4biuXdGEYV8Eev7OZLQM52Awq6rMMBpVHZm6+vpJGFfuNdji6lhUdSzEpsXT8f2dyZ62VFYmfSBYHIDiDxtJWNnAsiyjIxjvs67pDAs42hlJm4DuTZelyllZnhOyR4GHdyAikO3jgfDVerNP9WvV2NSKxqZWAMmkZbZdrVFBwpLN75te1/8ekp7Rs5RZ6ZxiD4eGhX5tvI5+H91/9UTc8+KH2ghW9bemjpDZ8s4RYnwqFxurVLkqz+eJv/vZtw9ndOYWeziMKfGg7XTMIAt7WwK498UP8W/XX4Z/2fEX43ro2NRIsn28O5pWbkl7QlEUjbFcff89L36I55fVEuXUMQCFCH0tchuusZOzjXTrSCqyFSTZcqwxSTaskqdWOr09FMf8jX82vX/H8lqinWy7nIOP3uIruYLedIjqi33eFcG/vPAXrLhqLOb4K+Bzswa785FXPsHelgAeuDapSxwOGpeOyMfulTMgiDJRntXzS2N/UoA7tlkX0aXTU0qKj2knpc0YKjI5FNCb/dkX2VML+1OLv+q3ZddAwlDk+BJFQbMN1e/4zvY9eOTGL8DnYrXzJRyX8NX//B8AwNRKH1ZcNRY7Lh2BikIXRvZQlT543STc1PCOmSmvIGlb9WZrZCqLqftavU5qjN3G0MJwjMeofkFjfR2OBaLoDAvatAy1+C0T4otwXMLK5/ZYnoNdYQFFHg7yEClmTrWJP++KaEyWl5R5oSjmIuKn3jho0g+PzpmMUyEBdzcm/TmVnKKi0IUtS2u0pmm1oFiVpXQxDD1ZBtC/RiH9maBvMB0u8m1jeELPlBjXTRvY+MdPTc0vSZZ9Bm7WZofLBr0VxbkURfkEACiK4hVFiav/QFFULYCjg3lzVmAsGF1omsItV4xGgZtBY30dKCiQFUBB0rEkBaJTDwOfi7OLXmwMOkRZQXkBjx3LayHKChw0BdZBQewDTXpfjFZVqUYEEY/c+AUwNAWaohCIJrDhzUMQJRkKAEqSIUkyOsMicQ9kErQdqGKdgaLMHcjrZzOq9XyB1XN3UDAE5za8eQh7WwKmAFrq5/e2BLCrqQUP9FDJl3p5ouwlJBmSnDwTnrplKhRFwabF0/HUGwct6eMHI/BmtR8LXWxWCVGHg8b4Uo9BR5R6zhg6xR4OW5bW4GhnRBv5clGx2zbqs4QgKThyMogXlicZZxmawt6jnSi01/O8A0PT+OP+NtN4pHHlY8/1rfULsqzgZDiOWEICQ1FwcQxGFiRZo6zYpMaXe/GvOz4EANR/cTQYmsLRzjDYnlFgDgcNQZTQERSQ73SgLI+HLAMejsFzt18BSVa09aseXQwgGci9a+Y4+NwsFCiQZaVPZ7saRNm9cgaigoSEpCAqiLhr5riMdWC2Z3m2BXWDbc8MdQzHpgNBlPBac7sWHFShJiatIMsKFEXBj6+tgigBO5bXIpqQEYwlEIgkMHGkF7IMbFlaA0lWsPGPn6KxqRWtXUnq/CfmTcH3d+7TCll/sedz7Fhei+PdMS0QDACbFk+HgmRR61Bf61wAyQaeVVWGk7ruzllVZfjRNVVgaKpXGZdlGZVFLqLdG01I+LwrAhfHQJST401I1yPpHRUOi1iK3j7Wf5510HDQFKJCcoQMQwE0TWvX1H9PoYtN658ORlNHJmNYAtHkOfXc7VeApSnwHA0KFJ6ePzXt86BpCiN9Tjw+bwpKvBwYisKJ0zGU5nEaW9qIfCcaFiWD/WV5PCjqTPK1otCFu782AeNKvUNGx9E0hSIPb4iLURQFRVFwS81FePy3SSY+fVe/KgNGuaRBEwqJOkJxhOIidq2ogygryfHADkYbF6zKEZCU1YQk43h3FBRFof6Lo1E9uljzK/cc6QRFUfi8K6Kta2qih7SPTobipqLhDQv9KLMoRLA6x0m2lSzLhqS06v9GExLR5rEbnQYGfV1HWVaIz4mmgI2L/FimL2Jd5Iebp7H129PhoBlIigKGoiDKElwcTZQPKxZ5UVaIfsb48ksGdX1s9B5fyRX0FnNV5U1WFDxx0xQ88srHeK25HS8sr9VYMVWoe0D7jCxDUgCOofDc7VegIxhHZ1jArqYWfPcr41Ds5fD7e/5Rk8v5taPR2hVNW9BmpR9T9+SGNw9h7fypOBVO2PGrHgwVmRwKUOOjnSEBJV4OkgLwDhqFPcVlfSn4UQv7e8t96GU/1T7Wn/W0RXxpdPEY4neM8rng4RktjkxRFGZVleG15naMK/Pi4hIPmJ7rq3aF2GN7rLhqrOFMU0dzsw4yE5x6RmYqi7bNMjwxWPGYcx0Ho+nkCPHuaMLUjONzcZY1ALKsoCsaR1RITjxaNbsKsnKGqUktTi32cBhZ4ARNJ+PIQwWpuqs0j0P91ibc5K/Ad75yCdF3K8/n8cKyWiRkGQydjAPzDsaUq2vtioKhKWxfVot4QkRrVwwjfc4UnUiOxw5ko5A+R7e3JYBn3z6M7bdfkVEsxoaNc4F4XERYFCGIMkRZgYulUf/F0Wj40xGtkXTzkiQxDENTKHFxNjtcP9Dbym0HUN3z/+/o/h8A1qX8fdbg5mhsXjIdLaeimjNVWeSCi6Nx++YmfP8bE7DprcOmOfeZUG7aRS82zgY8PIUTpxNo1clwRZELI/J7H6ukIlOjVR3BoxbPJCQFeU4HHnypGR2hOJ68aQraTscxr+EdzKoqw3dnjtfGE6fum0wcoIHoMBlIytyBvH46g/5cG/vnCvoAm4kFYdE0gErKhBps+97XJ+DZtw+bEm8KFGz79hU4fDKMp944iNI8Dnd+ZRxu7mFD2bR4ugW1tQNt3TH43A78vT2kjSxaM28ySryZFaoVulgcbA9h2dYPDPc+YUTm8kbaj4Uutk/jc/QQRRkHOsKmTr2JPWMzACAuyoaRLxtvndbXx2ejB3lOGhNH+XCwLaTp5ImjfMhzDh3H1sbAoMjF4drLK7Bk8/uGvVfkGroBe9KZt2ZukrHqQp/LxAazfkE1YgkZvIPG3pYA6r84GnOnX4gDuv0RKRIxusgDF8fg4RsnISHKOKIr0qUoaHbG+oV+NB0+iamVPnz/GxMsxwH29hv0Z2yRi8PBkFm/Djb66icMtj0zXDDc/K9MEwb6ggoHTQNQ0BVJQJQUrP3DQYMvW//F0SjJ4w02+oaFflQWuvDXY90oz3dClGXcM2s8nnjtAADghuoL8NCv/4bbZozB6pebtdGr2exBG9Yg+T4/uqYKP/lNM1bNrsKoAidkBZj/X382rTsAQ6JaVpLFxF1hcgL6UHsIT71xMO1zVPXOk6/vxxx/JYo9HCKCCIoC5m/8M0q9vKkgqGGhHwydlEkAJr21dv5UBGMinCyDIg+HHe99iptrLkJclA3va1jkR3kej8b6OiiKohXUHe+OEovmZlWVYdXsyxBNiPi8K1lU5XP1zYdKXX+1AFEQJZwKxxGIJCxH0RZ5ev+efJ6Fl3dojGMVhS6sW1CNbe8cRUWhCz43i+9s32O49kt3XqkVeKk+4VDTcan3LMtJVnl9ISFwRlZmXFyMFVeNxamwoPl9d80cj58vnoalm43srm9+0ob8yysMtofK3Feax+Heb0xEZ0hAaR6Pzzojmm+oj1OocYubGt6x3Ac0lSwCVkfrbnjzEDpCcfhcLE4kJDw+bwpGFTjh0BX7p8LqHB9X6jX5eWvmTsYFhS784JsTDePL1sydjLbuGERJhtfpMBSzDnRj1fmK3sYxpcZpnCyDB6+rwqlwcoQyx9B48Loq8CwNj9M89tRFO3BSTuDIyZAhfud1OAyxNrXQx8VZ2AEMjdkpfsb6hX747KTGoCOT+EouIJ0NmRrbjQgS7pk1Afd+fQJkxcwQrOoS1Sb456+ORzAmojyfhyQni4Y4hsY9syZgd1MLrppYro0gX7/QD7ZnpKIVy4p6bZKfo+5J1RYZke+Ez81i7R/+jtea2+34FYaOTA4liLKMRT9/zySPfSn4oWkKbr73hotU2X9i3hT8/uMTWFA72lB44XM6THp/w0I/PBbfcTqaQDQhGWLrGxb6MePiIlw5rhStXWfySNGEBN5Bo607jh98cyI2/ulTzfb/z5unwM0nC7fjCYnMMtPThJCpLA5HRjEbSQy0r5IrcbC+squr52yq7/jEvClYv6AaP/v9Qaz88iXoCicgyQoOtIVQUeRCZf7Q2AOk56Lqly9NKEcsIZniwz9fPA1tp+Mmprhn3z6M739jAh57db/WlFlR6MInJ4JY/XIz1i+oxtgyDxRFMTQwkZ6/LCugKAo7V9ShMyzgjeY2zKwqR7GHA0VRWTdTD5WmNBvnN2IxEadiCcRFCUdOJmMfqi0OAA1/OoK3P+3EDdUXYEyJB0UuFhxn+479AZWO4paiqL2KokxN/X/S34OJadOmKR988IH2d1c4hs8DcZOxdoGPx3tHAlj9crM2QibVuMyGctPGsMVZOwlTZbitO4pPT4YNBtaauZNxcYkH5QWuNFfKHPpiJdJ3AcAtG/+sBfave/otNCzyp903mRq1/S0Q6wjGccO6tzLav9l8V1+ur/8e0m8vz+cRFSRIsoKf/KbZEOA5S8b+WZHjVBkGzGuiJqF4Bw1JAQRR1orcOkJxzWj+0TVVqCh0GxJ3+nVdv6AaHt6BW3uCGQCIxRQNC/0QZRmCqECUZZOMTxiRB5+LMzFYpCYwGhb58dPfHTAwulQUurD7jhkoy3ea1iJTmctGzlQcC0S1JI/+s431dRjlc/Xr2jmIcybDKs6GTrYxNNAejOHGdW+b9tbulTNQlmfWBz045zKcDqn6QmVrqyxyw8XScHI0glEJsqLgaOcZB6xhUdIBK/JwOELYH2NLvQCAo50Rog4GztgZKjvPTSkj2DNYW+I5YaW3c00HDiFdfc5s4uGITOxlafmJ/wAAIABJREFUov2z0I8SL4eoIMHB0Ibx86/f/SUtmaKiotClMZzq2T5umzEGHENr4yzUbubx5V4s+u/3hoI8ZoNzKsOptiEFBaG4BFFWwDto3LKRrPs6QwKefH2/qZnviXlTwLM07ty+V3vtyZum4N//3ydYcdXYtP5aRzCOH/5in+ma6xZU44Ff/Q17WwKYWunDfVdPxIVFLkhycpRgRzCOC3xOMDRtsEFJNvja+dUQRFmTMf19qHGYjbdOA++gNXteX0zUFU1AlmUDm576GysKXT2FKGcKr1Jt+a5owsSCp/q9+mtuWjwdAAyjaFPXqzdY6fHV109CWT6f0VmUoe+Q03qY9Bs6wwJuWPcWSr08HrzuMkNxoOr3PXT9JAiibNBRP7ymCgv+yzxa8vlltXAwFE4G47jjOeO1aAqGcZe9xS06wzF82h42FacVeznct/Ovmm96SWnS1rd6RlbPv7G+juirbV5Sg8WbzHr2xRW16AgKhoR0anHsQDRWZfrsBilGcU5lWBRltIfiECVZK3SkaYpYtOblHTjUETLZruPKvPg8EDWwWRV5WFzgc+FgO/n9pNfHl3txoM38+thSL25cb/YzVB/fxuCht/iKDufUr0tnQwaiAvafCJrkapTPhQX/9WcDU1REkDClsgCSDNywLhnrPR1NGD6r6uklV44BkNSx6kjwpN6q03zAUi+vMXM7WQZOlkZUkLQmVhV6PSyK8v9n70sDoyiztZ9aekt3lk5I2BIVka2BhKQlBJyrKDO4oXxKACWJspgQ3OYqgt5RRp2MMywyjCiQwNUAYV90cHBQZ0D0juioIQODQciwTYJA9pDudHd1V9X3o7oqVV1VWTBAAjl/lKTTXV113vOe87znPA+OVTYq9nlRghsAnh03AP3jrLAY6OvysLodPtle69TYxOWwqkYfTlS58ML2Qy3me23dD1ur5bQwo/GOODx91wDVPh9loTG5QLsOqGr0KdbHkvRExNsteHSNej0P7xuhud/cEB2GGjeDd/aVqXL/gkwn+thN+OGcC4VfnpIa5qKtQr0ZF2FGbLipXb54hXKKTp0Td1vr1tlxMD0/rnX7UOv2a+byG58YBYIAztZ52nJ20Cl9WOu5jHfE4ZfjBmL2hmLE2kx49QEH7FYjOJ7H+QYvCILQjK1izZ83cRhmrP1WsceLMrXbctJwLDhczQO4MVpZ5+udC67MSME7+8quxhlrtzVbp/Tha8283gD+XeNWNZ2K7PxbctJwts4jkYLdaA+DwdDNztpG0/Xh1loKeZ3/1/r3FTMPw2H53uMKqvvle4/jtQeGYmCcDRV1HsSFa0vtXQrlZrd1W0ebn+NR+OUphQ8XfnkKv35gaIe8vzyhKJqZqgCQK4LJW9HMVOnfkcHpoCiLQbFupjjjkX37zWhiAvixnkOczdSmTvufOmESSpkrHuY1MQFUNaLFxKktidKlUPLWuBlNbfvQ5K+qkUFJef0l6953JQu9J+KB0C9/PlAT+HpxpwCyiVTvVY0+cByH8w1eLJ2cJE3w7z50VpJqEK2kvB6LPz6GopmpqGz0od7jR4zNiPT8r1A0M1WaBgSafXxrThqirUpfrGr0qZ7j7KJiLHx4uOJAq6LOI8nciN9VlJByeQOqAz6tAw2O05ZNacs+pCu5wnIABB8ec3MMsm+/WaLgX/PFye497hLtcsfkbus65vVr7w9eP3eVruinm3zPS06Iwgt3D1KApfmZTvhZFs9sVjY3zC4qxoZZo8CyvGYesSUnDSQB9Io048JFLxZMcEgS2aF5htsXgM1Ma95bD8OistGLHlaTZoO9xx/A+QavRM0vxu0/Th2BeXcP7tQxsCMlALqt85oWqNpSvsxxPM5f9KrykTkbivHm5CQ0ePwYEGdTrCmKJDR9yc9yUqNbvN2Cd6Ylw+vnEBdhwsKHh2Ppp8dRUl6P/P0n8IcpSYp8q6S8vtsfO8jktQ/H8Th6/qKUC+966jadfYVF9vrvsGCCQ4rJ4u/mbj+EP04dIUktmWgS1S4fAHW9Jv6N+ByZAItJzgTVez658aB02A0AZgOJMzVNClB/VUYKyBBfyx3bX7UHPLXpIIpmpmpeh3h9y/56DPPvGaLwuez13+HDp28DAPhYDpUXfYrY/ty2Q4raKrSxbrwjDs+OG6gADcUhJVECRrzvyQlRuCkmDHzwukKv0xNSV4rPTi0bG1D9fazNhAE9beA4HpOcCVL9J/6OCbDSRHxHNzhdDdOqudfPTIXVSKFoZioIgsCparfiWYp1n5/l0TfSgjAjjd6RZgzvOwz1TYxuPPMFgLf3lSn87cWdhyV/E3GBAUHcL/Q9mIBQv3l8nNQQJ/5OzO1funcw6j1+rDtwCq89OAyBAIcfGzyolDXuPfeLQRjUM1yVQ4kH1Hq1Gklo+1uAhXRQLv5Mjhe0Vq9eKrbQWZg6LrdxHI//1DUpmt+8MSwiw2jUuHwKdvVlU5JgpEnN3HZn7mg0Mazi9UvSE8Hq1IqvPjBUN0de/PExxesXf3wMbz0yQts/uK5bZ3QVaw1f6SzWEruJh2E1/W1zdhoq6jyIlUlAMywHf1CCqaLOgxirEZUXfYo9WYzT83YcxsYnRmHBBAcG9wrHjtzReOOjo2ACnMqP5247hD8+MgJTCr7C0slJLeK2PHhpTxav98Wdwvqob/Kr9vJrLS61Zl3FJ7uCcRzX6jlge/fDnhEmbM1JA8sLObMcq9DCjCY5EzT3+U1PjNJcJx6GRXSYEZuyRyHA8hKeMWVkAmJtJk3MhiZJRa4l7jcRFoNm7j97QzG25qRJcUPEvOPtFhTNTJXuTXt8kSQJaTCCCbCocTPXZVNrt6lNXkcB0JTW/Cm4Q0c1ZOrl/QNibThX74VHBxOuavShh83Upc8OtPDJSc6E5oa4Bx2oc/vhC3BoYljYrQbwvHZt0z/WilibCTfHWnHgpTvh8XO46PEjd2x/CWvyc7wqpxYZaQsynegdZUaA41V1j4hbfFpa+ZPqoM5s16viWLcJFghw8AUY1Hs4eP2sAvsV8/PZRcVgOR69I80wUML+380k3DHWWlNcPEEQyyF01Yn/j+C/+17WK2vJCGDWz27G3O3NU59LJyeBIACaIvHRsz+DzUS3SHUcat2BqNuupJE6Pkx1kMvJm5VYntdMXtggS2S83SI1KdV7mqV6pjjjkTX6RhXF9+Ce4Zc9CZFLBmgd4IuFq16jWmuJUltlreSmd7AtynUKByAclkxOxIkqN/L3n7jmDxl1k2kN4EvczGOsRpAEIUk4PHPXACzYdUSaPF06JQk0ReBCg0/1jKpcPhyvdEmTqxuD4AKnk6CznJqimQmwiLWZFAVM/v4T6Gu3IDkhSkH5TJME6j0MLlz0KYCTJemJCiAie/132P3MbThb71U0AxZkOTHeEadij2jJz0QzBqUqQn3UQAnJj9lAIjNkfa7MSIHZ0J0cXYpd7pjcbV3HKILQXHtd2RcMdHM8yR3bXwWW5m4olg5T5FZR50FdEwN7mFHzdxQB1Lr9Cmp9+WSgCKHG2y2wmQ2gCWjeWwD4/uxFJERbEG01IsqilGQT33vFtBSQBPBjgxf5+08gxmZUNAPJY2BH5PUd8R6Xkm9cql3LtUxn/m4tMQkzAUEOMsDxCvnIsioXmID24UPvSDM8DIu52w4JlPkZKXD5AqBIITaFsgZUNzY3l8TaTIoDU3Ev21lcgYnJfRVyQvLpw8vhj9ez1ciYypITohBu1sYlSAjNZ3pNbnarEY/LntmS9ET8ZuJQWFvBOSxGCkN6h6ty3ZLyegyIs6Egy4kIM406t1/BoFZR58GcjQdROH2k4v31ro/jtWN6vceP5IQoPD6mnzRlP94Rh6VTksDyPM7Ve3X3DXltJebYeROHSZ8x47Z+qGr0qRrtxEa6Hbmjpfv+0r2DkfXeN1gwwaF5nUeD0i4ie12th0GTj1WwXK+YlowIs0Hx9yJz3iNBdpxQ5pv59wySmHPEeuDbk9VSgyPL8djx3X/wxO23dHpgX96c3uDxY0l6IkiCAMfz8DAB1Lh8Cia20GcZb7eA5TicbfCAJgmYjSR4Dro+zHI8lnzyAx4f00/RaChiF+MdcZJkEQFt/zPQJGrcjOqAV/TJ0OdmpAicrnWjvFbwPSNF4sk7b8Gyvx7Daw8OA8vz2JKTBouBBEkQEoNd6DoRP19vXXA6mIxWc2ZHNtRfKl7S1Uyo2b2qgzer0apqjnxu2yEJR5BbRZ0Hfk5/ECSUgWfRJKFZTgtb4DgeVS6f1IQMBOM+qVdndI6c5lq21vCVzmR6Q8a62C7HY/Z/3YT7EvsqGDsLspyItZkw3hGHahejWB9ivJbv8UaKxPkGL5oYFm88PAwmmtT0Y4okEGszIdpqVEidTUzuq1gjG2aN0lwfXj8rNcSJ3+FajEutWVfyyc5uHC+w17eUH7d1P9RjqY+2NDMT0xqxPMaqjZnwaM4LtM43VkxLRqM3ALOBQu7Y/mjwMHh23ABNzGbtjFS8+qADr39YKuVaHMeDJoRGNa31JjbGal2XeG/a44vXS7N9V7OrjZdo+YXYACU/Z7lU3KGj/I7jeE0G0z8dLMdjY/ph9oZiqXYMxV0MFIEwI4mn7hygYsfuKnixFj4pxq4l6YnwaAyG9ImyaMaH8loP5t8zCLVuH7x+pXKIOKgJQDU8LZ4Pzt5QjLyJw9Av1qoZo0TcQvxbvTpIy/cBdFr8EOiOo9e7eb0BXPT5UeliNBniSsrrEWUxSGfTfe1hV/uSrzlrLdOeB6AYwHey/xf/Pf/yXloLxkM6uAaap6k5HpI0yRsflWLRpETpsE1MYrV07sVA9NDKL3Hbos/w0MovcexCo8QQ1G3d1vFGaPow305mUi4ol3S2rinIuiX4rBxIPd/gldaBaPF2i/TzJemJoElCYNUKMyI/04l4uwW5Y/tLoK94jbkbilEVZCm4lOtqq9ktBsV1hBaD2eu/k5KbSwGMY6xGrHnsVkV8WPPYrZrxQTSxqUBu8oOfF+4ehAW7juDnf/gCebtLMf+eQbAYr+1DRjGZFi05IQr9Y7Wn9sXNPDbcBIoAstd/h0nOBMzZeFCagluw6wjuWvo5pq35BwgCeG+68hmtzEjB3tILkt8agoU7RRIY74hDQZYTW3PSpGY0HlDFdIuRwvx7BiFvdymmrv5aelY0SeBX9w2RPmtJeiIokoCXYVXAybwdh5E7tr/0/WJtJjR6WVUz4Oyi4qBUbNv9TDQDTUjU+fJrMtDiZCKnmkJ8cuNBXTarn7omr33rmJjcbV3frCYKhdNvReH0kdiak4bC6SNROP1WWE1dM55zHA+XN4C3HhmBeLtFt7mB5XnsyB2NgiwnkhOiAASb2Uw0ql0+zf2PByE1Nojv8+LOw1icnojxjjipwXDRpEQQ4EGR2nHtx3oPFuw6gqpGH1y+AE7XuFHtVrOkPLXpIH5s8EpxW94MJI+BtW4fjp67qMrra91tj4EdVRtcSr5xKXYt1zKd/btpHbAs++sx1Lr9qHULPvrwygPN117ZiD8dLJcapeQWb7fgh/ONWLDrCH4zcShWTEuGm2HRO1IAQzZlj1LkMAt2HQHH89Ka1WL0mrv9ELJvv1mVS7+48zCeHTdAtz7utks3eX2SO7Y/Fu45qsIlVkxLwblgLSYOJckt3m7Bf2qaVPlntYtBjYtRvZ8YVziOR32TH/VNfkWu+8LdgzDeEYeyShfydpfCZqIRFaa9H7h8AcX7NzGs5vWZDSQKZ4xUXMeiSYnI339CUb+JzUiPvfcNTla5NfcNMa+W11YFWU4snZyEAXFWbJiVij8/8zP0ibJg8zdnFN8r1maSGulq3Azi7RbMHT9Qyuvy959Q3a+lk5OQv/+EtF6PVTbi4ZUHMPbN/Viw6whenzgUY26OQa3bj9+HPL9nxw1QrTNx7/vVfUNUv3vrb8dxa78emLH2W9y19HPMWPst7k/qC+LqCS+0yeSx9/bF+/HC9kMAgIV7fgBNkggz0apmI/mzHO+IA88DWe9+gzuW7MfU1V/j+HkXPH5WE6tbmZEiNcStO3BKUWvF2y2odjF47cGh0mHN89sOaeYUNEmA4zgYaVJRHy5OT9SMgwwr1EgLdh2R4qqHYTHjtn74sd4jffdws0GBjez51zmsCuIV8u9AkTxWZaSorovleM11dPR8I17+4DAq6pqkHMVipDRfeykHidcLY60eg5ZfpxlAbGiQm9iYqfV6juM1/cdEk5rYgtlAadfyJKHyfVEeuNsur7WGr3QFMxu0Y8NFrx/T0m6SDumB5v0nwPFYMGGoqglNzAOjrUaMd8SBJglFHGxo8oMEpHuWnBCFwukjsW5mKkiCwKsPOjBj7bdIz/8KebtLkZF2I9YdOKX4jKpGn/b6oEnduCTWa/+pdaOy0dtp8v3LYdeCT7bVLjceybAclu8tU8XX/EynlB8zARZLJycp8A6t/VCrtptdVIxzF704XePGkbMNMFIkVobs9bHhJs31Scnifuj5RqzNhCaGxUvv/wvp+V/hsfe+AU2S6NdDu0mkvomBh2Ex/55B0vvTFAmaItAzwqS73sY74rA5exQ+m3sH/vb87diSkwabSRjWAtrni1o4Tfb673CuwdONNV8l6wx4ida6mbdD2GeAn46D6TW11riZdr+PFpHD1NQbca7BK9WO70xLVuEuseEmePycaq99cedhdBW3D8Unxzvi0CvSjHi7Bb0izJq5NEUSWDYlSZW7Lt9bhnk7DiPSYlT8nTio+cL2Q/jvLf+EkSKxbOoIfPbCHViVkYKBcTYkJ0Shos6DHjYjeF67RhJxCxHHYDle5dN6vn+6xt1p8UOg4/y527qeiXKp359r1MzNc8f2R7zdgiaGxapMJ2Is3Vjt5bAWmeJ4nl93pS6kPaYHbPiCE/dRFgM+La1EVSOjmJAQJSJD7XqZnuy2zmMdQZPeUle5vPP/w3/+iFUZKRKIKxaGJppA3sRhiLEZUeNiMHX11xjviMO8uwchb+KwFmWaLvW62trtXufx48//rEDh9JEwtACYXCoDC0kSGBBrw7bZoxFgOdAUiTibqcXro4OH+fLJB3HqRatxb96Ow3j/yTFt+r5d1cRkOnv9d4i1CQV4ea32dF4TwwYl+jgYKAILHx6OeHsYKuo8mrJR/731n3jn0WQVq8Kcsf1x7/DeiLEZQZPAW4+MgNlA4Jm7Bih8fFVGCkzBAl4e03loT3+/OTkJfaMs2JqTJsmbvPHQMNhM2nJ/UbL95Ff3DQGjs6Z5AHkTh0kyLqY20tw2+VhdyRXYgIDOVHpAI8nvnkBp3bqlK7pNNB48vH5OMR23KiMFfCc/vNazGjeDx977BiumJWPDrFGgKW2GilNVboUU+LoDp/D4mH4wG0hwPIWNT4xClUxe4JfjBuqynjR4/HjmrgEIM1FYMMGBdQdOYcGEofCxagmexR8fw68fcEixeMOsUbhw0QsDZdGNveJr/zh1BAqynKoYeKi8QcV+FCp33loM7MjawGaisHZGKkhCmGI3XYaDjmu5luns3y204UDOkLVgggN5u0tVhypbctLwmz9/jxXTUlRTxm9+cgwVdR68va8MT981AC/IGEzzM50o/FJ54Pju309icXoiat0M4sJNmjIlRp1cOiHagnAT3Z0LdLARMsZRPVyCJIDf/eUoCjKdeGvvcSyalKhgjVg7YyTON3ilvHRv6QWMc/TEjTFhIAkCDR4GRTNTwfFAmJFCzwgzSJJAVaMPZ2qaVDFQlJ98PtjENGfjQaydkaq5H1Q2+pC//wQWTHDgllgbat0Mlk5OUrDpLpqUiNf//D2eGTcQG58YhYveAKxGCgv3HEVJeb2CLUNeJ+k1ZovDM0vSE/HBwbMSi4ZYX7z0/r8Un13VKADGAku3oBiQnBCFvaUXsH5mqqKOLSmvx5ufHJNk4cprm7Bwzw8oKa9HckIUXrx3iMTIJ17PkxsFeViXL6B6fjE2bSaQBo8fYSZKYlMT96VJzgQV4PnkxoPYmpN2Gbyv40zvcGvhw8NhNpBgOX0pnR25o9Er0ozf/Pl71d8XzUrVXBP2MAOqGhmJYVw8NBNjn4dh4ZfJuVfUebD442PImzgMN8da8cP5Riz++BjemZYMA0WCB4//uW8ITlcLz/vl+4fo5vpatWHRrFQcv+CSfiY2OYs2ztETu4N4hbxWzRzdDzYzFPt+gGOl5sxQprFdJWfx+Jh+mPa//1DUaetnpkqywT/lIPFKMtZeTdPLSblgM2Lo96dIAn+cOgL/vfWf0j3+49QRMOgwudE62FhAh1lu2+zRiLEZFThAjM0IiiSCeXGz7687IMiwdtvltdbwla5gPawmCX8T/XbZlCS4fQEwAU6BD3E8D5IgNGVOAcFXb4gJw6I9R/HMXQNAkVD58ZacNMTYjFg2ZQTMBlKBt4WqJzy1qVnqTDRfgJX2b/n7bs1J01xnPID6JmGoJFTK+lrMVa8Fn2yLXQk8kiIJVLl82FVyVtqXOR6ItAjHnqGf3xJjtl4zuZ/lJEbSLTlp+OjQWUUOcLi8FgWZTgUb8prHbgUti/uh0u9aA01zNh7EFp01UuNmkLe7FBtmjZL2LZ7nUeP2IzLMoLkfbZ89GvPuHoRqF6NgDS/IcoKmSETTpnb5opZ0rBgH5m4/1I01XwXrDHiJ3rrpH2fDly/e+ZPZuto75KHHnKf3PrVuBl6/MAhWUl4Plzeg2r9YTtijNPNNvmvgxXKJdo7jUO1i8Js/fy+wH+uoMPn8HCLDDNj0xCica/Ci3uOX2KzkrwOEenxxeiIaPH4sfHg4bGYaT28qUcTet/eV4YW7B+GLYxcQGWbEb3d/r1kjibjYizsPY/3MVPz2o1IsnJQIloP0XHmopVdDmeY7G34IXD9DS93WbKJcal1QLjUhWvvMI8YqEBbF2oyINBlgNrcm9Nltl2It3lWCID5s6fc8zz/YsZfTNqODrECTnAlSsrazuFwlAVlSXi/RfMfbLcibOAwmmlIFwO5A1G1X2lrz4bZYjZvBsr8qi5Zlfz2GNx5KVDQrjXP0xNv7yhSvW773OBZMGIqE6DCwHItX/nQEgCB9OWOtkEz83/w7dQBUElWNPl0K2o5IxjmOw+2DemLG2m91JW/Ezw4FhdoCGHMcj7IqV7uKcg/D4oODzUUvD8DPsqhy+XQPePyBa7uhRp5MMwEWU1d/jVibSZXMitJfy/cex/x7BqOuyY+X3v+X9Gz17l9kmAGnq5skIPkhZzyMtDA1R5LAu/93Eukjb0CAg8rHhX8PVbyfGNO1PosAwHI8pq7+GoDgYwGOl1joQteqWO7E2y3oHWlGWaVL009PBhtN5D9ry1oQQR0tqQoAMAen0kObNM0aTXcdXSBfbVr2y2EdEZO77dqwJh+nGU9efWAouhpjNcfx8AdYbMoehUZPAJnv/gOxNpNugzfQ3DyxJTsNv9n9PX49wQE/C8xa13xYu2JaCj78ZwUev+1mzbgXaTFgySc/YMGEodhZXI6X73fA5Q0gwkJrxjWbiZY+G4BwmJiTprkmxcb8ijoPoq1GxWHmkvREGGkSYUZKM86HSvK1FANbqg1+rPcEG7yFhnq6hWbneg8jAdLy67SaaERbOw6QuZZrmavx3dqzz4U2HLSlAYgJcPi0tBJRFiPWz0wFAJRVuhTg4iRngooRNjco5yEeOIoNeHIpdbGpaJyjpyT1Yaa1JXHKaz0Y1jeyY2/eZbCulndQBKRcWA+XWDDBgSqXD72jzHjjoURwHIdts0eD5wVW43MNXkUj2MqMFLyzrwyfllZKz/m1D0tR5fKhINOJnhFmAMJ60YuBPKCQowwzkqr9oCDTCRDA3PEDYaIp0BSB2iZBFm3dzFTUBQ+qRV8tPdeIvInDYKRJFB04jUnOBLx07xCYZD4nXwfi/Qj1xb52C7blpKHJz+Le4b2lNbRggkOTlW1JeiI4Hqp6AwAee+8bFM1UNvyVlNcjb3cptuSkSfdVZPquC2l2Ej+nxs0gKswoPT+RAS/GZkLh9JFYvrdMIQdU42aQt7VU0YC9aFKi7oAN24mm1bVML/YmRIfBz/KKZwwI8UhggyBQ42aw+osT2jKowSYlLawud2x/zC4qRozViF6RZnzw5BjUN/lht9KqXAEQnuuMtd/ib8/fjtlFxYi3W2AxUvix3quQIlmVkaIr2UrKmp1EadxeEWaQBIEhvcOxOXsUFn98TGIhFF/bJ9IMWxCvkB/gmGgCP1YziibSZVOSUFbpwroDp7DpiVEAgKPnG/HmJ9rDddnrv8P7c8Zg2+zRinzjUuLepeIlXc1oktTMG2mKRH6mU+EP+ZlOGCgC4Wbl0ALLsQABVRPw0mDjrZb/6MnSBVgO4WYat8TZwPI8KIIATQEkCZ1hvm65xMttreErncFay3dE/O3Dp2+Dh2HB8jwMpNAEfKamCa896ECt2w8AiA03SxLmfpbTXB8GisAkZ4IKOwOam0ojzAZYjbQkCy7+Ti6BJv4sNK6YDdr5SGWjT51/ZDnRxAQwc62ycUnEuDvLQXZHWlfwyY6wK9GwY6ZJvDf9VtS4GMW+XJDlBEUS0ucnJ0Qhd2x/mGgSSyYngSSg8luxtguVTrQYKRR+KTS3kQSkM4uKOg/GO+Lw9F0D8Nbe41Jjf1y4CX0iLeB5Hs+MG4g5MllG8V7o1YoNHn+LTSIEIUgSrv7iBH45biBe+dMRLJs6QrvuZDnUuP2qfHp2UbEwoGFtny/ShDZ2Kg4ad7bmk+vBOgMWpDeEYTGoz+E78v2NNKXaO+0Wg+6Zn/x9xHgQYzUi2moEw7LS3mSg1EOFFAlEWw0onD4SYUZKGoCqcvlAEl0nbosS7VWNPqmJt6qRwbKpI1T3eLwjDiCAi00BmCNovPv3k4rm93i7RartRBWoUGxK3kAvDj+tO3AKz44biMqLXsWw1IA4mwoXE2NiVSODc/Xs/HUYAAAgAElEQVReRePxhlmjWsR+5T/rTNjo9TK01G2Ceb0B1HoY+FgOp6ubsHxvGZ4dN0DTB3pHmhFlIWGkDDAYuv3hcllrrYajAZQD2AzgH0Dn0BEz0SSevmuAdFAgAsUWI4WtOWnwsxwKp98qNffIk8e3HhmBqkYoiku9DZEgCHAc36lB927rmqbnw+0BwziOw6yf3awC7DiOUzQrNTHChLs8aQEgHVg8u/mQlGjIC7IAK9B0zpElG4UzRqLWzSCnqFiVWIrrpCOScVZ2yKE1VZ2f6YTdYlA1ZbX1oOxSinKLkcJDKX0Vyd2yKUl4f84Y+Fnuuk1mxGT6bF2TlOSKbAxRFgPiIkx4fmuzjz2aeqPEXiE+W1GWKfT+8TwUbE0rM1Lw+qffSweCBZlOJERYUOdl8PiYfirAAOBVMV2PJamJYaWCX0rcw00wkITmWu0Tacbn88bCQJEIBKn635mWjDq3X2riS4i2YN72w4r71da1QBBQvZ/dapCkVVhefyo91DqyQOY4Hqdr3DhT09yseGNMGG6KsXbpvbIjYnK3XRtGENCMJ10I4wCgnMiWs1VV1DUzq9wQHQaTgcQzm0pUU34sz+Pl+4eAYXkVw43IBEAQQEGWU5IfEO+VKH9mogk8O24gMoLsJx89+zPNuOb1C7FIOJwWPsOgsyb54ARmvN2CM0FpQTHOGyhSoNTntVlBREk+EdxmAqxunq8HUgQ4Hhn/+7UiHxncM1y3MU5PzksEoDvKrmVQ5Up/t/ayGYQ2HMgZsvQagERZv7JKFy5c9OKG6DAFoxwAJNj1JwdF02qmKPzylGrtvPXICLwzLVkxpbskPRE9I8ydvjGiK7LdkiQpMUL0iTRrMgKuO3AKax67FVEWdd1S1ehTybo8ubGZgSX0MHr2hmKphjHSlG5eLf8UAcAGhvQOx9acNHA8wPM8fvtRKT4trcR4RxxeuncIat0MjBSJOXcKUpbp+V8prlUEneduPyTtNZueGIWe4WasyboV2UXfod7jlw7P4sJNWD8zFQv3HJXy+ZUZKWACHG6KEYKinNlQ77Cwd6QFme/+Q3GP5mw8KE2Fn7/o1WwAtxhJab2K60dgr1bfL6+fBUUCKzNS8NGhs7gvsa/iOYoN5VUuH1ZlpKDoqzOoqBMYGJMTolBSXi80meuwfdBU584x9WLvj/UeeP0cBvayYWVGCp7ceFBi9JPfb9HPxUY38e+rXYyqSUnE6l66d7BwqBJugokm4PWzMFAEPAyP8tomRIVpN7adD0oRF2Q54WVYVd4yZ+NBLJsyQrpeeSyxGEnpIOfVBx3wMKyCSWXZlCS8+qAD/zxTi1UZTszZKLy31UQr5FTFg56tOWkSPiP+/Llth7AlJw2mIFYhMr1U1Ok3TzcxrOTjPyXuXSpe0tXMQGnX7FrNbwYKAAHUuv2Yt6NYsaZtJgPe/ftJxXDMu38/idceHKbCxlZlOmGktJvOaZJAfVNAtXf1jjTBZqawOTtNYvIKcOw19zw6o7WGr1xtaw/OcuGiT+FbBVlOSQFExM925I6W/DLcTGuuj7pgLBKxM7nF2y0gCAKnqt2azQFi/JK/PtpqlNZDvN2CuKCcZOj6EBlp8yYOQ/9YKyxGGhzHYVL+V6qYumCCo1MdZHekdXaf7Ci7Eg07DMsFY7p245c4GGiT7d1iXA61GKsRm7JHob7Jr1gzBVlOPHnnLXh6UwkKp49U1GDyYSbx7CXe3jwQPTjOhq05aQJ+ImOT08vZjRSJXSVnsSUnDecbvIqBFGFtAnm7S7EkPRHRNqGWI0nt5m06iNlrPQNxPqM9vmgytIzTdLbmk+vBOgMWdLmHMPTe324xqLCCgiwn3vrbcc0zP/F9lv31mAr3FYcLC6ePVJwdiRgiRRA42+BRnFOJZ0ddDS8G1LG53sMo6ufxjjg8c9cAiVFdGORwAoBUx+dnOvHxv85h2ZQkuHyspopW4fSRmL/jMErK6xFrM2FgnA3z7xmM8loPzAYSs//rJqTfegMokgBNkdhZXK7Ap8W84dlxA6TYKb7/qWq37hmf3PTWw9Uavrxehpa6TWiIO1HrVp2b7Co5q8KrRLnUbna4y2+t3eFeAH4B4FEA0wB8BGAzz/PfX+4La8m8AU41Of/kxoNC0urxI8ZqhJGmUJDphMsXkGg9q1w+HD3fiLzdpQpQqaUNsbOD7t3WNU3Ph7e0Q0aF46ECXOduP4TtwaaY5s5/aE7xnKlpwoCeNlS5fNJ7yg/vOBB4OzjlJP5djYuRZJzEzwxtJuuIZJyXyV/IJW8GxtlwvNKF5XuPS9OC4vdsj11KUa4ljfHctkN4/8kx6B1pue6TGflzFyf/4+0CE4Y8mZWzV4jPdv49g1SHIwWZTmz95ozC/97ZV4ZJzgTpQHD2hmJszk4DSUCVeIuHUL+ZOFQBemzOHoXCGSNRUeuRiv34aAusRhI0RSrkU996ZARYktBcq5uzBXCiiWExsKcNseFG+EIkFwuynIgNV/pAW9eCgSJhCNl3DMECAQACrM5UOqdmJ+zIArnew0iSAfIiMCrM0KGsR1faOiImd9u1YTyvHU86u8xZqMmbv0MPXEVmFVG6Rp4HAEJ8ONfgRc9wExq9Ac1Y0yvCjB/ONWJQLxs2Z6fhwkWvij1oc3ZakJlWiOM2E43KkPixbEoSekSaUTh9JOLtZtBB2SqfzprcnJ0mvDbagjWfn5SYfuS5+9LJSZoNQHJJvtbyfC2QoiDTiTc+Ukph5m4oxrbZo9EnyqL5HFgdOS+2g+UV7BaDJhOKXXZY1VXtSgNG7R2cCG04kEtn5u8/oWKcWZKeiBqXD/PvGQwTTeJ8gw8BjlNIFJ+rcytYjeQN/j0jzBjviMOnpZWKBjzRtBjmfrnln1j48HCJuaB3pBkWI6XZkNXZrDPIwbTXYqxGPPeLQdJ1j3fEYf3MVLiDrH0My+G1B4cJbFQa91+vTpEfPsv/La9hYqxGJERbVADbsilJqHYJkqPxdgtWTEtGgONw0cvDYqBBkcCD73wp+dvjY/oppBtXZqTA59ceAqr3CDIyMVYjCjKd6BNpAU2TiIswYu2MVJgNBPpGDVTFpwUTHGA5oK7Jh1o3A7OBQq8IMyyGZt8Xa9NQtg6C0GZ+FqfCF398DK8+6FBIF8aGmxBpMqKBDiBv4jDcGBOGijoPKJLQbKCjSRJmmoI9msa0tJukBm/xs+btECRpj1e68Pa+MuTc3h8HTtagvNaDF+4eJO2HHj+rasbKz3QiztY5/Vc0rdj7zrRkRb0j+jZFEqr7oyWDuiQ9ERYDCXuYQXo2cqyuiWGRn+nEhyVn8WByH/SJsqCq0YcTVS7sLC5Hzu398d70WxVMQgWZQjOIWJtZdJiJOJ7HO/vKpGYkA0XCZCDgZThsemIULlz0oc7tV0kPP7ftEPImDsM4R2/k7f5eymnkEr3yz9FjDiMAKWbJ761e8/SpaneHxb1LwUu6mrVUy/1Y71Wt7/49rBLjj4g1FH55Cq9PHKo5HEMS0JTLzQ765Nk6rxRr+trNMNCk5t61ffZonG/wqa4n4hrI1zq7tYavXG1rK85S7fapFEPe+ttx/PqBoZi5o9nn5OyWEWaD1OwLNK+PopmpilpXZL8RGnXMMBlIqQm/pUNncZ2I0utDeoXDbKTAcpzm+vjfL05J9egX88ciNtyE/9S6NWOneL5zLVpn98mOsivRsBNgeRDQzg05Hph/zyB4/Rzm7TjYtr2Vh2pPmV1ULA1fuHxKjESfHVwYwvt3tVtRF4issfUevypHXDEtRRoypEhBArbRG8BL9w6W1qbFQKFw+kis+eIknrrrFsy/ZxA8TEC13uKjLTDRJI5f0FY2EQfD2+qLHMfDw2jvt0VB9vNrZTCvK1lnaLC53EMYeu+vhRXMLlKy64s/ZwKs9D6vPTgMUwqUjdjiflfrZrBwzw/SgM/jY/ph3YFTcPSOwHPbDqn+ZmtOGugu2BUnj825Y/vj6U0lChn2GJtJYpwFxEEj4RzupXuHgABgoAncm9gbVhMFt4/TjIMUSWDplCR8dOhH3NovWjF8tOYxJzJG90NlEE/eWVyOZ8YNBNDceLdiWgpWfFaG/7lviOr9l+8tw6qMFMW539LJSTAbmodW9NbD1Ry+vF6Glq5n8/tZeFg/Gj2cauBVxEkWf3wMRbNSARAw0SSizd1yqVfKWrzLPM+zAD4G8DFBECYIzXH7CYL4Dc/zb1+JC9QyVgdoAi9MSsTaTHh23ADc1CMMjb6ARGUqpxqWd4jXuBlEmGm8+sBQFSV4Zwfdu61rmp4Pc+2QUfFznCJZEWl7/SFNMXaLAc+OUx9ExNvN8Pk5BQvbzuJyqYiiSELFMLc1J0230BOtI5Lx0KJZlLyR0/O/+sClTx5dSlHuD3AYc3MMsm+/WQJC13xxEv4A153MQP3cxzvi8PL9DtQ3+VGQ5UT+/hMoKa9XTcKVlNdj3o7DWDZlhHRwEm01wmIkMSGpr3SAZ6RIPHPXAMVnVtQJUhB6TRtMgFNN8J9v8AHgVU0ZFoMZ/gCnkE8Vn7Pee09d/TXi7RZsyRmFl+4dIh0eiq+ZXSQUC4+m3qiY9m3LWuB1YoH4c4POVLqBVANoHVkgXynWoyttHRGTu+3aMD1f6OwyZ6Emb6oIPXAVJc5ibCYQAApnjMSMwmYW1KWTk7Bwzw94+f4hiLQYNGNNVJgBG78+g4E9B6DWzWiyBxHg8cxdA6Q4zvG8CkR6btshoVln1xEsm5KEMCMtyOLpPAdfgJVeO/nWBNQ2MaomxrnbhffMmzgM/XpYEWak4PIFFJJ84mv18nytfd3PsirW3Yo6QSJLzww6cl5asfqnWJ3Hr2hArA/++1qQG7rSOVZ7Bye05DrEPRcAekeZFc0fIrOUCH70tZtR5/ZLU69i/uT2BbB+Ziq2fnMGtw/qqTigXzEtBc/cNQAWo5o1SatRrqLOAwNFYnbRNwCAL1+8s8s0sncGOZj2WqjPslwzA5uYg+k1xAFQNFYC8phtlHLqKpcP9R5BIk1ew5AkgZuirbAaKRTNSgXL8ah2CQ1nLl8gyArHw+vn8NSmZkB67YyRWPjwcFhNNGJsJkxb04yJxNpMqHExuCE6TMXytiojBb/e9T3i7Rb0ijSjV7gZNE2C43ica/Ahd0MxFj48XJIsBZobirfmpOHHix5F0+iax25FXIQRG2aNwqlqN/b865wkhyVvIimalaq5N4kH9CXl9Xj9w1I8O24AeoSbcNHDIMxA4WyDcDi651/nMM7RE/F2C0iCwMI9Pyji5+KPj+EPU0cg1mbChUYvqhp9mn5Y2dgsNfWr+x3S1O+9w3tjyeRElNd6UN3owwcHz2LtjFQYKKJN0tudwUiSQM8Ik9RAWFbpgssbUDxLcU98+X6HbjND70gz9j5/B0gCuOj1o9EbAECgXw8rfveX5nWxdsZI0CSBuiY/fjG0J1xeViGPs2hSIlZ/cQIzf3azVDPGhpvgZzlwvMAS4/IFEOD0mzc/La3EnLG3IMZmxJkat8Kn8jOdMBtITXwlKswAjucV2Mhnc+/QZWRpDWeQxwiO41SsuwWZTqw7cBoFWU7FdbQ37nU16emfYi3Vclq1647c0ZqqCzwHfHHsgqr5rV+PfpiQ1BcVdc2DdROS+oLleDQ0+VXYQoRZu0HCz3LXZC3dFaw1fOVqW1twFo7j4Q9wivpKxMnIkGZxueJGa0M6FXWCxGKoH4cH/Xhv6QWsn5mK2qCEunho3cNmxI7c0YrBqAMna/DBk7cJ71vrhRb29lBKX2wrrlDgV2YdjDYu3HTNDht3dp/sKLsSDTsGitRlXaNIAvN2HMbSyUltqilq3AwqdfK+qDCDtDfLP0ufHZxSNe18WlqJ0nON2DZbGLp8Z1+ZIq/5sOSs9JoduaNRr7HHhBlpzFj7LZZNSYKZJtEr0ozqRgZa6y3cRGP53jKVAs+qjBRQwZSgrb5Y42bgDWg3vrBBtv7Wnu31lJtcKessZ1KXawgj1Gd6R1paVasK9cHQepnX2Rf7RJphNlB47cGhCDfT+M3EYfj1riN48s5bdHPNACcM23Q1k8dmsbFXPMsC9M+A/SyHx977Ruq/uCXOCo4T5AULp4/E8r1lEjlGvN2CskphuOnXDwzFI6uVGEN9kx/Z65troBXTUrD7nxX49QNDMf+ewTjf4EWPcCMmORNw4aJPFWerXD64fAEUTh+JBo8fNcGGRgAKNlqt9XC1hy+vh6Gl69W83gBqvX74AixYTrthP8piQJXLBwNFwt4tl3rFrdXWw2Az3P0QGuJuArAcwPuX97JaNr1mAJokJP3qUKnFRq8Absr1qJkAq+gIltOLi9bZQfdu65qm68PtSKJMFKmSKVmSnghTyHvUehiVhIh4EEEQBL44dkECfjmeR6M3gAW7jgRZW5TXqEvtrQPyXmoyrlU0i02tWp/ZXruUotxqopA5+kaFfOrKjBRYTc1J9fWczISC+9VuRmIMEJ/fugOnEG01qCbhlk5OAgAMiLPBRJN4/c/f49UHHHD5AqpJ2Xh7MxtPvF2Qz2N05Gu1JgV72IyqKdnnth1C0axU0DL51EWTEuELcDAbtME5iiQkVrkalx8cz2seorCcEpTQoufXMh7ARa/6+/cIskqQBFTsM0snJ2nS23dkgawPqLb7rTqVdURM7rZrw0idw8yuBtTJm7/lbFVaEmerMlKwbMoIcDyPeo8fC/f8gCqXD14/CxNNqmL2okmJeOOjUuTc3h8Aj3CztpwZTZGKOK6XZ4uSPM9tO4Qt2WlY/PExrMhI0T7UbvIrXiuX2ZO/5w3RYQrwowfHg6a0WV308vzQfb3yoldiUBBjfJXL12KcMFAEnrlrgGJycVVQzqsjjQmwqkEG4KcNEHQmu5I5VnsGJ7SmSwsynehrN2PX02Nwvt6Hyos+zFj7repveR54dM3XCnljkaFLnj/lZzqxfK9S/uOpTQex8OHhcPkCKobAWB2pKq0Gqq5gnUEO5lJM7rMcx+ONhxLx6gNty8EoAtLBlVbMXpKeiDAjhdc+LBV8LsupqGFomkTPCIt0eHBDdBhe+/CIFB8KspwKud5YmwlVjT6p2Ukeq7XYOMWmzGoXA5cvgCqXDwWZTvS0CQzeVY0+MAFBwjLWZkLvKG0pYJbjVazn2eu/Q97EYZix9lvJ/61GWsF+U1Hnwe//clTl+3+cOgIGmdRNlcuHGJsRJHjQJIkpq79WrNMwI4kV01JQ62ZQ5WpubgNE6SoCtR4fGJZTMO7IX+MPNkWLWNSukrOYmNxXxV5aVunCz//wOb588U5dZtHOaB6GxYy130o+8/ajyYp7IMask1Xa0jWinNC4P3wu+ZLoZ3Jfig034Wy9wGpZUedB4fSRKsY2caKaAKRnNfu/bsIDI+IxvfArxf0OZYsVcQQxj+hhM6maT3I3FGNLTprmeuthM4IilbWCnkSv1USpmWaznOA4DlWNPmn9y2NEbLhZUacZKOChlL4a8r9tj3tdUXr6p5iejKme7KNW/Jm7/RA+eHIM7k/qq8J8TCE5rfhM7Faj5sCHnmwyqcMw2NVr6a5greErV9vagrNUu33gwWv6YqhUaUl5PdYdOIVts0cD4HXrKvH/T1c3qf04Ow3jHXGYmNxXwR6bn+nE+gOnUe9h8PRdA6ScQo6rnmvw6GNvM1NV+BVNEVg2JUlaT+Lai7DQ12TMAjq/T3aUXYmGHZIQsN7Qfbkgywkq2DDaUuOa3JgAC69f++wj0mLAM5tLJDlW8bN2FpdrSrSLa0FrbQdYXlJDibIYUONmsPqLE5jkTGh+Dac9VLg5O036/605aSAJIacQZdflr92Sk4Yql09S4BFZl71+DmSwKbWtvsgEWNQ3aefEYUZaIh/Re7bXW25yJe1aPZMKBDgcq2xUDI/IfUYPK5DviVq5uNbfjXfEgQcUOWB+phNzxvaHh2Hh9Xs1P4vjeXAdrARxJUyMze8/OQaMBiO83hnwmZomqf9CZNILlaEVB0HFgbHHx/TD+Qav4r1yx/ZX1WNPbTqI9TNTwfM8phd+iyXpieA5HrOLijHFGa9QV9hZXI7Hx/TD4o+P4aV7B0skF6LNWPstvnzxTt110RWHL7ut85vXG0BZjRtzgjiVVn9FvN0iMeT36JZLvSrW4mkvQRDrABwAkALgdZ7nR/I8n8fz/NnW3pggiASCID4jCOIoQRDfEwTxy+DPowmC+CtBEGXB/9rbfdEEFM0RYsAlSAK5Y/urWCByNxTD6+dUetQEQUiJWHJClMSGIbeuALp3W9czPR9uTw3A8dCcJAwd8PH6tTd5j5/FlIKv8MCIeOwsLsfU1V/D6+ckMHrNFyexKngoLV5jQrQFa7JuVfxMq5lMTMb72sMkidP2mLxo/vLFO7HpiVFYd0Cg2G9LA1t73/+DJ29rtQhr0qEIb2L0GWKuNxOfO0mSmtSw8+8ZAh7A5z9UYnN2GvbNvQOF00diZ3EFJhd8hcfe+wYcDzwzbiA4ntD0b2H2BLKD4jLsLb2AlSG+ujIjBb4Aq4rpLK/doc9yQhGzNScNCyY4sO7AKZypaQIdlFQKXatVjT5MXf018naXItxMI8DymH/PIOTtLpV+Pv+eQQpmCfGwr8bNtHov/TpT7f7gAidJEu/+/SQWTHBI1/zu309KoIbes7nUNSmaOMErt3i7BWYdtguO41HV6MPZuiZUNfo6LfNaR8Tkbrs2zKCz5kPlJDq72S0GbHxiFHbkjkbu2P7Yd/Q8NswaheWPJqtiy5yNBxEXYcLc7Ycwu6gYVS4flqQnItxMw+ULIDbciMLpI6WYvSs4vdzDZkJ5nRdfHLuAVRlOVQz2s8o4JjYVyE3erFNRJ7AUVLl8urFXbECoqPOAA49eESYUTh+JrTlpKMhyIjkhSmrIk8c6kiRgMdAY74hDQZZTev14R1yb8nyO41HjYrBg1xEpxv/qvsFYO2NkixJ4DMurGEvnbDwIpoNPP406sbm7hmm/iYMTreW6gHA4qZLr2FCMkv80wB1kOdLz+zM1wuFjXLhJ+nu9OlY8HBGtos6D3lEWQeLjT0eQN3EY9s69A29OToLHH8DSyUmqtZO//0SH5NBX2trzPDqrtTcHI0kS6w6cQuH0kfjD1CTNfDDcbMDL9w/BpidGYVCcuoaRf2avCDNeuncIts8ejc/m3oFBPcNbBKTlPqvlk09tOohqFxN8fwvefnQEGn0BnG/0oqKuCS9/ILy+ok6QYvlPTZPmGiBJAgsmOJCcECX9vKKuWQJV9H+/xlT+p6WViLYasDk7DXufvwMLHx6Ov35/DlFhRmx8YhQ+e+EOLElPxDv7ykBTlCoOz95QjLP1Xqz4rAyDe4djVUYKxjvisCN3NPa/MBZbctJABlmiTlc3YWdxORZNSlTtczazsK8sm5IEluMxztFTk7107viBKJw+EizPd+p8ONTEvUVkHAo3N+NlyQlRWJwuNG+KDCTy+7NiWgosBhJNDIcduaOxbGqSri+5GVbCIAAgzKgtgRpjNUo5Q3JCFKam3qga/pu7/RBc3gAWPjwcn70wFnkTh0nyrEvSExFtNSDAabOcMAFtFq+z9V5UBw92xO9Y+OUpxNiMyJs4DFtz0pA3cRh6RpgRYTaqcIy3/nYco36/Dw+t/BLHLjSqnn9ojGCDbI5LJyehIMuJ2GATX6AdfqPHftCWOrQrGqWTN1LBYRe5xdstujK3vgAnsfaINfY7+8rg1fGNlhjqBHWG5uvJz3TCaiK1a2lD9yDW5bbW8JWrbW3BWbx+FrwOTubneKyfmYrC6SOleui5nw8CTRIgCP26Kt5uwapMJ/b865zisyvqPPBzHF66d4hmbjrO0ROfllbinX1l2DZ7tApXNdCkLvYm7v9y/Mof4BBuobF2Rir2zb0Da2ekwkgT8PmvXbz1Unyyq+BrodZReKT++5N4e18ZAKBoZir+9vzteHNyEnpHmkEGm9rFXKa1msJipBBuplVrpiDLiYV7jqKizoOS8nos/viYVIM9mnojeoQLTMefzxuLbbNHY0CsTdG0I7d4uzBo/fiYfgoM+fEx/dAn0ozkhCghb9TZY+SYSIDjYTFQAKG93sT9SBwAmbv9EGJsRvSKFFgYOY6XmK7EvFzPFw00CZ7Xxk4pAq0+2+stN+m2n2Ycx+PHBo/qfEnuM3pYQZ9ICz548jb843/uwpacNBAQGj8rG724cNEDkuBREJKnvXK/Q3Xul7uhGD1sZszbcRiL9vyg+ptFkxLx+78cbVeO3pmMJAnEhZvRK8Ks+m52qwHLpihxpVUZKVi+t0zCCCY5E1Q5wrwdh7H80WQsmODAm58ck+rjUFxMT3a60RsAQRB4+9ER6BVpBssDe+fegTl33oyTVW6YaBL9Y2341X0OWIO4gdj0LLd4uwWGFpjZW8JQu+pe221X17zeAGo9jNQQBwgSv6F75qqMFDh6h+OWGGt3Q9xVstbuehYAN4CBAJ4lmvWxCQA8z/MRLfxtAMBcnucPEgQRDqCYIIi/ApgOYC/P8wsJgngJwEsAXmzPRfsCHBZ/fEwtcTElSTeg3hATpugQX/PYrdK0iDi1uuSTH1R0wl0NdO+2rmF6PvzWIyPa/B6cziRh6HQCRWiz3lAEISV4W3PS8Mr9DvBoLqLKKl0AIEk+NTEsSILAgDirxAbG8gDPCzTGHT7p9RMYFtr7/m2xAKsNnrckm3a9mt60hZEisOfwj7hrSC88uqaZrUFkUBAbly0GskX/3jf3DnA8EGGhUeXyIXdsfwm8FtfTO/vK8Mr9DqzKSFEw9Bhp7fVQ7WKQEExQjBSJF+4eBIuBgp/VXqsv3TtYuqaFe45iwYSh0ncSfz5vx2EsfHi46ju0ZeqkNQnHGKsRCyY44AvwIAkgJshSp7dfdSRFvRYrgt5ndpUpwI6Iyd12bRgPAQiV730WI4WuVAJzHI+yKpdi7a2YlkmdPS8AACAASURBVIKVn/0bk5zxurE1b+Iw9LAZJZm9Wrcffe1m1Lr9isnMpZOTUO9hQJEE+kaZ0TeqN/J2f6+KwaFyanLGOvl7ifT2IjhckOlsNfYKh0QUqlw+1USzxUiBAK9gZAH05eTtFkOr97Ta7UN2kRLAfW7bIbw/Z0yLEnh6exnfwZOkMVYjNmePkvYEjgdMNNFdw1yCtZXNgON4NPlYqZbMHdtf8tX4aAvqgqyGe0svYFWmUwJGRBDk17u+VwxlVdR5dOtYLfmP/wSb6irqPBKrVuH0kZi/4zCAoFxEnA1mAwmaJPDOtOQuKVHTWeRgrqTFWI147heD0ODx6+aDNEmgT5QFPM+jzuOH3WJAnceveY+44OBHgOOQ9d43WDDBociFQ/1OLrmm55M3x1qxYt+/8VBKX9hMNJ7ZrmQTFQ/aoywGLNyjxlhWZaTgmU0l0gS5KLsmb5SWf1et3B0g4AsEUF4rsNE8MCIeb3xUiknOBMRYjegZYUaUxQifjsxTr0hzkD3vCF6fOBS/HDdQIde5JD0RPcJNOFJRj8fH9MO6A6ekXLuHzQQ/y6KizotXJjjgC3D47Oh5DOkTpflZfe1hyHq3mQGys+bDoSbfW0w0AYokJPbvx8f0Q4PHL8UhOQNJXLgJv99zFK9McKC8tglRYQZQpCBNKr8/FXUe3BgTBjejrB312Fxiw01446OjAISGzVo3o3m/I8OMsBgokAQwsKcNKzOT4WU4VLsYmAwkwEPz/bVYxivqBP/iAbh9ARTNTAXHA2EmChYDCauRBssDZgOJHtbmw+DYcIGB8aGVX6oO8lqS5eE4HufrlbmNuEb8gbbjDtcb+4HXz+rWclq1K6UTVygCKsaLRZMSQeg0G+jFJ5oiNWXtf/vQcKzJulXKKePtwtBpjy4iad6VrTV8pTNYazgLRRC6DZ0sx+OupZ9Le6zXz6FnpAkehkVtk19zfSydkoQFExx4e+9xZP9Xf4xz9JR+v7O4HAQI1Hu042xUsH76tLQSrz7Ao689TPEaAwVd7I0iCfSPteEVGX5FEATcPha/3CLLlzOdoDuYXbszWXt9sivha1fa5PiocPcEnCLKIviXyOD65idCI1u/HlaEmSjFvi1agOPx1KYShRJIE8Mi1mpUsLKXlNdjxtpvsW/uHbCZaDT5WJTXNktse2NY3BRj1VWqMVCklFuK627dgVN4+X6HxFq7YdYozTVEEs3D4gSE84rzDdosVlr70Tv7yvDb/ydg1aE+Jc/LQ33RQAE9wo14fushVTz54yMjVPhLqF1vuUm3tWytnVO0JGUs+kxLWEGM1Yij5y+qsMwAy0lrXIwHJpoEo3PuJ2J6FXUeRFhohe+La+WV+x2X/4ZdRqNpEj1sRsV3e/3DUgBCo3Flow/1Hr/EEi9iBHpYAcfzEous+Bo5xlBR59Flogs30/D4BTbLrHe/UeQEZ6obYaQjFed8S9IT8cHBs9o5FC/4mVZMslsM2PTEKFTKmOee+8Ug2C0GVVxcPzMVNjMNf4C7LvCobmuf+XwBBPgA6j2cKk8Xm+iLZqaixs0gKswIHjwiTYbuhriraC3eeZ7nL3lcjef5cwDOBf+/kSCIowD6ApgIYGzwZesA7Ec7m+IMFKktcUGTugG1zs1gwQQHBvcKh5Ei0TPCLHUoyyegqxoZCezsHWkGTRKXpeGn265v0/Ph9kj1tVVOyGKkNBOD8xe9AIRk5VyDF+n5XykoPXPH9ldM1YvvL9JhX8li/ErSQOsl5brAaXdcUBmh04hJUySm33YzJhd8pTgcmLv9EBY+PBwvvf8vkAQwvfBbbJg1CuMdcRKVvAjMAZDAvoIsJ1ZMS4af5TVl41653yE1eojABMtxmofTRpoAQQBTV3+NeLsFy6YkISbOCI4jNNeq/MDu09JKvBLS/CF+N3OIHrzWGtUyg46/GWQHnL4ApwBdEqItmsl+R4JnnhYOHEJNbwqwpYOgq2UdEZO77downkeweSwBYaDAsBxWfvZvvP7gsKt9aW02rbX31KaDWDDBoZsngxcmmF64e5ACYCjIdOKtEOnGudsPYcOsUTBSBGiaRENTQDMGv/bgMEUc53geNpOy4VBkxhBzEzNNwhphAg/92CsCIgYaqqnReTsO483JSTh89iLydpcqYl1d8EAy9IDyjYcSW41JPh3WXV8rB9Skzn4oGzTqEOM4Hg2egKrhr3eENgDUbS1bW/LOGjeDU9VujHfEqQ7Q8zOdiIswIt5uwThHT7y99zgWPjwcvSLNMNEUzjd4hYbOCQ4s3HNUAgdbagSRD3flZzqx4E9HFNdTUedBg8cvNRb1ijQjPkom/Wzt8Nt0xexalYPRMxHcP3/Ri2PnG3UawoApBV8pfGL53uP4tLRSkecBwI8NHpTXeqQmm9YAaVFyrXD6SNCUdgxzeQPIHdsfBAEJrAaa2aGXpCdi0aRENDGspmSTyxeQhmFEWcy83aVSw6j8s87Ve1UN1QVZTpgNJCiCxqBe4aAI4D+1HpVc9YppKQpJVfn7mmgKW3PS4Gc5BFhIDXHi95i34zDyJg7DhBF98PzWQ/jDlCTUe/yItBjwu7+USvd6VUYK3g4O5wAEduSORo2bQf7+E9J6PF3tVuXD7z85BnHh5o52nw610L2lcPpIbP7mDObdPRgz1n6raLAsKa/H7KJixNstyJs4DL/8+UABgwtpXF/88THp2cfbBYYsI00q7lv+/hMq/CI/0wmbSWiGB4QD8Bo3o1kvRphphQz1qkwnekYYQZImnGvw4r2/H1c1aq6YloLzF7UPlMtrPYgNN2H/DxdQ8H+npfxoUM9w+FnhkJeAeq+9lMPfGjej8sUXdwq+2B72164qPX2pRuvVciShWbuuykzRxMg4HirGixd3HsbWnDRNXzMbSNUQ3qqMFNAkdGTtOZgMpCIXNnWzxF0Raw1fudrWFpzFYqTgYVhNXxTnbSrqBFbqBRMc6MOYYaQpVDe6NNdHWaULs4uKkZwQJQyFbWuWQV2ZkYIwI4ljF3ya903Ew/Tiis/Pa2NvmU54mAAq6ryIl+FXAY5XMIZW1HkwZ0MxNmenwR6CcXXkwOfVtPb6ZFfC1660tYSP0jSJW3pYsTUnDQGOB00SiLOZYDBo74f+4DBFRZ1HsWY+nzdW83mVVboQF25EVaN6WC8yjEaM1azZtFPV6NVswqbJZjZII01o7lViw+mSdKFp28/xeL+4Qr0fZTpBU9r70a/uYzVZz+V5eagv+vw8CB2c5ofzjSr8JdSuhdzkWok/V9vack7BBFjp7L4ln9HDCuo9DCov+oJDvX7k7z+BudsPIW/iMGmNi8OFeROHgWHVEqLxdmEITfw5y0Fq9pK/xnANnB2QJKn53U7XNGHG2m8BCEzdS9ITJfxAD7+qavRhxbQUPLXpoPSakvJ6CROIsRqlml7etLhoUiIW7jmKVyYMxbwdhxXNydWNPtw9vA8m5yvPFOftOIwtOWngeR4bZgnDSyzHY80XJzFlZAJq3IwqJmkNkhdkOnFLDyvqPH5FXIy1mXDhohePvXdY11e77fo1rzcAhg/gP7U+5G4oVg2gAkCVy4fTNU2IDTchwkwjwkh3N8RdZbsiEZsgiJsAJAP4B4CewYY5sXEuTudvcgiC+I4giO+qqqoUvyMJqOg7l01JAgDYTLSK7nNJeiJoisDO4nKcrHKD43mpY1xkgpODwLOLipGe/xXONXhblDnotm5ryS7Fh9uzl0aZaawK8fVVmU5EhQTVKIswKS+X9rAYKSz++Jj0d5EWA7bmpMFsIKXr0uv2F5PSzkJ53ZGUtmJS/tDKL3Hbos8Ua58koaJaXzQp8ZpOgFry4ZbMSBFYMS1Fda8oAvDryNX0jhQOI+qDjANefwDz7h4EY7CwMFIk5t09CB4mIP3N7KJiRIUZERVm1JTEo0gCT20qwYy132Lq6q8xY+23OF3twdvBpghREuXtfWWoamTABHjpvZ/bdghehtOk4hZlyEQTmyPF18h/Hhc8yBb/3Vb2UbOR1FzfZqNwP2qbGAl0mbr6ayzYdQRVjT7UNqnXoBbQkb3+O1S7fa1eR6gZKBKx4crrjw03ajaPdYYpwLb6cEfE5G67NowkgBm3KWUsZtzW76r5wqXEYb21F2M1oq/djLceGaHw9ZUZKah2MXh23ADVQeBsHelGggBirCZ4GQ5napq0YzDBK+J4bLgZszccVMTkpzaV4A9BloLFHx+DJ8CBYXnEWI0q2akV01LQJ9IsMRq4vdr7SWy4Cfn7T6hiHcdxmhIlHNc684rY3Ca3eLulVb+gCO3coaNJDypdPpWEXO6GYlS62h/nL4ddaj7RmY0JsFi+twy/us+hKSvFccCKaSmICbIKZL77DX7+hy/w7OYSGIIHHOLvRHCwT6RZlT8tnZyEFfv+jbyJw/C352/HggkOeIKNRnKLt1vQJ8qikq/qto6xK+3DJEmgV4QZt8RZsTJD6RMFWU789qNSlc+JsVpel4kT9nI5Sjkg/fm8sbCZaJXk7uNj+mHJJz+A5XgUZClj8eosJ0wGEo+99w0qL2pP75MEgTc/OQabiVZJNhlpUqpDxdcP7hWOzdlpiLEZ8ey4AZL0W36mE7/7y1Es3PODIne3hxnw0s7DyN1wEGfrPKhyMXD5AiqZ1Kc2HYSRJlU53sqMFOTt/h5TV3+Nl97/ly4zgCDlSiA23IjjlS7UN/nx2HvfSAeLYvPBY6NvQn2TH4+u+Rrp+V8hb3cpXrh7EMY74iSZmdD39l5hSbhL8eHQvWX53jLMuK2ZIU5LikxoXqMRaabx1KYS1aHFs+MGSK8tyHLC5QvgkdXK+xYbboTNRGP9zFRJun39gdNw+1gJ04i0GHCuzo15dw9W1ov3DMamr0+rGivAE7AYKfSONGOSMwG7Ss4qfIqmhOYprT17+d4y5G4oRvqtN0jv+dbe4zjf6MWZGjeO/HgRL39wWIUZGmhtqcyWDn/1crh+PaztYn/VqmELMp2gSHRZXLMlH6YIbTk34aBUXbsSgEr+NsZm1GXh4nheE5tgArzUFCvHFvwsr/nsCYLAwj1HwQTVBhiWw8I9R7ul466AtYavXAlryYfbgrNEWYww0qSmL3r9Ael1Yt1HEMK5x40xYbp1VUGWEy/eO1hVRzy58SCYAI+dxeWqPGRlRgry959oEd8KcLw29rb3OE5WN2HzN2dAEQR+bPCgqtEHQHvtMQFOgVu1hNl2NWuvT3YlfO1KW0v4aCDA4VilC1NXf407luzH1NVf41ilCwGd4TY9Sb1zDV5VTrxokoANi/KKoTmPhxE+Q0s+ltVpwhb3oeSEKHAcYDZQKJw+En9++jYJM/GzzcPfBAHQJIF7h/dW70d7j8Mf4DWxmvMXvfDqDP7FWI2avhjgePzuL6WauZIW/hJqelKXV5rd/lL9uKPiT7c0Y9ukdI00hZ3F5Sp/K8hytuozHMfjXL1XiglSjWEzBeu7ZhNrvr2lF1T73apMJ/b/cEGqlWlS++ygg2deW7XLEYv11ueNQfU9QMhLekdacFNMGDY+MQoHT9do1oKrvziB1z78XsK4xPtaUl4vNNxSJOqbGNBBOXUxZr35yTF8WloJ9v+z9+aBUZTZ+vBTVd3Va/aFLQiIEQiYmDSEgHMdhDuOzKAMBnCEgCyyiMsdx3GZOx+jXuT+WOQybiwyI8gqCM7V0QFnxO2O6KgRZTSCyCZhSxOydbq7umv5/uiuSlfXW53uLKQ79POPkqSru6vOe95znvec54gicuwm/OangxT+dvHrX6PW5UOOXV0AWV0XiCOq6zyo+NOnGLvqA8ze9BlmjOqHnBQTVv/9iCbOJtnfgq2VONfohRh2brlwzECNf0+OfW4/4jWeiAVeL486zoc6t6DE0Xr8iDwuNTfVnCyIiwN0+hOgKMoOYA+AX0mS1BitMoIkSS8CeBEAhg8frooOOF7Eq59XY+OsEWBoSqn+vXfsNbjk9sHAsEpwKHdXOV0cNs8pxUO7vsIzdxYDUHdhk6qaZeeW7L5Joi1oiw3fN/aaqK/vbPYpBIPcIfjc/u/wxG3D0Du9JYGjaQr9s2wwG2j4BAkUBSx9q0rpXF9b4cDuz39Qup6fn1aMXfPLIIE8VoQ1MHGRjAMdLx8fqfNOFIEPj1xQPbPdn/+A/lkDOvprxQ0i2bAeRFHChSYOfz10RrlXRobG/qpzuLZHChhR0rErGrQPynNMtRhxqtat6bTrl9UylqG6zoNLzT7QFHD/uGs16m9GhtLYqZVl4GxSB67OJp9G0a26zgNB0kpx0xSFS82cchgtJ0A2E0OUxO+dZmnTyC+3TySu79/fOhSZtgCBTiJdXplfprmW1y9g9NVZmHfj1Sp/05bDuBwbq73XFQ7kEBLSeOgCjNaGO8InJ9E9wOmM7ZRjx8uNtvhhvbWXZjFi37/OYcL1vbFpdqkyZpOiJKRaDMgUWeLeThrdaKADKnGCJGHvv87hvrH5WBTWlSyCwkVXi0rM7oWjiNevaeIUdRlIgCCKAeLYzmLT7FIwNHCmzgO3j1dGhTibfOB4cienyUBj1dQija/TI553LRjV6j2VC+PDO7lDxxOSOoZpmjwWZekk8tjptsIf5yPe22LH8Q42OL5X0jvEEyS88N5R/PZnQxQ7lcesUqDQL9MKKlhQLzdlAcDNBbnYOGsELjX74PYJECUJuyqrsauyGjvnlymKHusqHCplwPUzHOiZak4WwnUSusqGXZyA54MHXPJIUBNDYe6Prka5o6+iRlZd1zLKDFDnZbXNPrAMrfKXMiH9/J3FYA00Xngv8B49U83IsrMw0hSWTipU/H9oLCuKIsqDHdp63eH1Hj+cLg52swH5OXbl9QDw5F++UZTC5L+X1SWWlxdix6ensOQXw+DxCUi3GOB0cUozjPz3W+aWYtFN14AL+ndnE4d+WVYsnlCg3BP5Ppyp8yDdasTTU4qChTAmrNj3raqw7YdaN/F7uH0CTl4MFF55/QLsZoNuc89dG7WKeRtnjcBFF4ecFBbrZzhUqj6XeyJcW2w4fG+Rx3+8MK1Y8V2hHf+5qWY89eY3+FtVDd759Y0qvyd/94G5AbWWeo8fWTYWk8O6/R/dcwg75pVhSfA6MvIyLPjVT65Flp3Fom1fYPTVWfjVT/KJ+eKYwT2w/v9OKq/NsZtQ08SpfGb42N6tc0cqqoZb5paippFTjUUCoCjEF/dNx12jB+CXL36iut7qvx9R1GdFUYLLy2sUXlo7/NWL4awmJib/TtMU8nPs2LVgFPyCCL8g4cUPjuHA8dqEVTeIyK/pxu/XE3NXoyFQuBuqDP3020fwxK1DifefoWlcdHk0tmYykLkFQZI0+/S6CgfMRoqoDBRNg0YS7UNr/MrlQCQbjoZnoWkKkiip8isST5aXYUGmjQVDtfDB5xs9gcPpdAtSzQYsfatF8XTL3FLi3iZIEn738wK89+15hSdhDTT8goDHxg9GXoYFvdIsRF8iiJIu95abYsJdowdg5kstY9HWVwQKdcL9voGhVLxVd1JLi9UmE4lfu9yIxI/qNY/tWjBKdXYiI8vGYn2FQ1Ftlf308r2Hsa6iBH9edAM8fgHHalxKjCCPVwxFdZ0nYrETr9MwzlCUokT+yw3qOGPd+8fgdHGgKQo+QcQTb1ThD7+8Hhk2Gv2zrcT1JkqSxresCU5LMeio2qdZjFj59mGNLQpiYEqLPGVrUM8UXGzikJNiIvIv4QiNTXhBhIGhkWvXjrDtbLTVjjvC/yTHIAcQzblilo3Fgz8ZhNV/D8k1UkzorbPvhCKS8rLbpz67lHPXcQU9lLw73CcDkqI2qVeTcDnRGb5YbxQtgOC5qIiLLh/++69VKHf0RZaNxZ0j+4M1ANvuHglncAzpX76sVtTbZZ53fUUJts8bibpmP843evHCe0fx6Pgh+L7GpVKnK+6bjo2zRoAChZVTirBi37ca371k4jBFuQ4IPD+7yYD7d6ibse7Z9gWWTBxGbITWs7+aJg690y0qvxhJNCaJtiNe44lo4fXyqPX4wIsSRElCjt2E6jqPih/Jz7XjVK0buSmmpDpcnKFTnwRFUUYECuK2SZL0WvDHFyiK6iVJ0jmKonoBqNG/AhkWI4NJJX0we9NnqiTQbAjIfK6aUqRyjjIaguSsgaZwocEDmqaRZWPRM9WsKWaQSTIZsrNzNnFJidwk2g2zgSbasMkQfZeiXxB1RkZqkw+apmA0MPihzoWX/nEc5Y6+mPujq5UAr9zRF/i/QEf1fdsPYteCUehFWBcyiRuNfPHlQEcTIpGCcpuJwc+L1M9szfQS2EyJI/N9OVDb7MMz73yHu0YPUN2rdRUOZFiMaORAHufb4MUld3BkToUDoggyqTGvpegrL8OCmqZAcVpoEC0Hvzvnl2nslKYoPHLLIM3790wz4fQltT3LY/1CpbhFUYLHL6hGnmTZTUg1s0g1s8QCuLbYokw2kEbCyr+PlnSxGhncfeMAVNe1jBK4+8YBsLZhVEu9l1fIFPk979laGVhzYYWFcpcRyYfEGzrCJyfRPWA2MsQufXMCjTYirb3l5YV4ZPchPHLLIJxv8Gp8oM1kgN1kIO7tmUFp+9C/twS7K81GBuOv66UUxAEtfuGV+WUqP64XO8hFFaunFsHZxKFXemCcHENTYGjgfIMXTMgYkZbPQCuS/KF7zbl6L0RJ0vg6SYeslqTW829Gp7jtqUnXRSQ2ZSKvs/2gMazgBUiOgI6Eto49CX2dhQ0UwwsiuYnk5MXmoAqtiNVTi7Dh/45rDsJXTSnC89OKcV9QTSkvI6DQ9cjuQ0oRxs5gsbu8VoCA/H6G1ajYY+90C3qmdNyBQnIsTHwgNM/5W1UNivuma2JYmbNwujjFPgB1Xran8jQW3XSNJv5ePbUIoIC/HjqDx28dCkGUlMMpQ1j8ExrL/nCpZRRo+ChW2Q9n2Yz486IbNLEwz4t4YNy1qDrXpPkO8mHF4gkFWLi1EhtnjQCgnzd4/SJ2fHoK94/Nx4O7vtZcTy52qm324Vc7v8TO+WUQJAmQoImvn91/VDNySt7rnnyjCk/cNhTnG70YakvVbe4h7S8UBfRMNeF3Py9QDgr2VJ7G/WPzEyKHtATVSeRm03XvH0NOCgu3X1Cei1xg+Yc7rgcgKff2fIOXOF76hWklWLb3MJwuDlvmkAsxLjX7cN/YfJWdrK1wgBdF5bDq2h52cDz5EHzL3FLVNR8Yl685EA8dD7a8vBB/+fIMNs8pxaVmH3hBUsb1ysjLsEAI5lkLxwwkFtkvnlCgKkad+dKnqrE/bp+AHqmRfbVe/pRtiy2fJI0FWl5eiKM1roQtIIkEs5EhjnMz0jQxd31lfhkx1/5/k67DmuklqkaPNdNLwDIU0db2LBxF5BZMDI1nw4pdnt3/HR6/dWibGzSSaB9a41e6GtHyLD5RitgcKfvZnZ+ewt03Bg7pAz6HwpI3q7B4QgHuCePOTl4kF4YbaQovHziBnxWqeZIXppXgT/84jqWT9CdnGGlKl3vLtLG4f8c/VZ9hwdZKbJ5TqvL78phII90Sk8RLg3ZHIFabTCR+7XIjEj8q6DQw6TWP0TSFXulmldiGHGvTNK0UvjdzvNIsLUrk5u9IIxVpnYI0A0Phdz8vUMbAy59XLuixmxhU17mVYhOGpuDySmjy8sT1xjLafXBRsFgks7dJlzc6eLpeY4vyyF+5oeuT396EVIsBx53NUXHNpNgkkQrC9PyPxy8oo6BbQ3cq7G0PoinylYu0lk4qjJmX0HtW/bKsaPK2NHXJ62TFviN4bPxgok/+3c8LcLbegzSLERwvEM8OLDrjmBMNemdYOSkmOJs4PLP/O01ut67CgWf3f6e6bydq3UqjZ26KCb/e9RWcLg6LJxQosfqdpf0UJcBH9wRGpZK4DmeTT9XwJivXhf6NnuplutWI+3cc1MTZevZX2+xDrzT1ebg8KrY9Z+Cdya8lubvLC54XUefx4UKjuuFO9iMHT9cr/MiSicMCI1OTBXFxh057GlRAEu5PAL6VJOl/Qn71BoC7ACwL/vf1WK/NCyI2fqQ+mNr40Qn87ucFWD21CC6O7Kzq3X6smlKEJ974GneNHoCXD5zAgz8ZhEE9UjCoRwpeWzQaXr8IhiJ3MAuihElrPkrIwC2J+IIgSkQbfuLWoVFfw8jQuLkgF+WOvqquc70D0Cwbi2afmRjg3TOmpaNBPiDW6xAIHT3c1cl4RxMikYJynyBpDv0XbfsCry0a3fYv0A3h4wWUO/oSR4nJh2PyOF85abayDCQA351rxIrJhWANNCgKGH11FnZVVivXrq4LdKsCUAXec390NdEOBFHbod0zzYwlb36jWXu/v3Uo3j98Qbm23uGD3GmbYjYSA86OSmJlskFDkATfx2Ik22q44h0Q8DcNbr+qm3j11CKkmY2av20Nsay5SD4k3tARPjmJ7oFMC4sHxl2rUXbItCQO2Ry+9gRRwlNBhdheaRZU/ElNsD68+xC2zCnFr3d9heXlhfjwyAVMHn6VovRJUZLKZ+ekmJAevB/ZNhMGZNt0fXDoz0nFE2uml4ACsGTiMKRZjZAkCcagj5BAocHtB0VReOkfxzXrc+ao/tj88UlsmVuKercf9W4/0q0G1bML9XXt6a7PMBuJ6g0ZZmNEYjPLxsJkoFX3rzOKbXOCI06iUfG80tHW7mzS6zbPKUWG1aC595tmj0C924/Vd1wPCRJYA43f/mwIZvxJrST10KtfYdnt12HxhAIMzLHh9CWPSpUotGhUJlnk/2/28YpawPN3FsNmYiB4+Xbvt8nu9fhBeMxFGt0hH5DlpJjw7P7vAGhHEckd9rNvGIAtc0tBU4GD6uo6Nza89z0eGz8ERgMN8CKMDN3qczaHxKByJ+zTU4rQK80MmqJQ5/bBZGSQbmE1BK3BQGNwjxRFverw+SaVzVfXeZRu7EvNPvzprCxsdgAAIABJREFUH8fxyC2DVT5ULlR7bPxglDv6akamvnzgBFZMLkSDx49MG6uMc5LX3Pa7R2r2AqeLg9cvYvOcUkhSYJT6+UYvnnyjCk4Xh3SrEfdu/wI5dpOmSG9dhQMGhhy3MzSFZk7Awq0tByfLywvx3LtH8dQvrusAK+k8+P2BMc3h+UOPNDOmbfinqthLApCbagIvSNi9cBRqm31448uzxEPde7cHDmLTrUZdZfoMmxE+XlQ/dyONp96sUriMnfMD43ZJ8UfoQXNehgX9s63EvxvcMwVLJg5TDrv/bVAOvH4BGVajRiVGVtcHArwK6XpZNlaJKXy8Vql7T+VxXNvDDkRQpeqo/IkUm8iFewu2VCZkAUkkZNu0B/sbZg4HTYFcKCFJWPBv/ZV4V54C4OFFjUrI8+8exe9vHUouqtDJIR+/dahusQvpOtE0aCTRPrTGr3Q1ouVZ9Ip/BLHF/77w3lE8+JNBKn7WbKSxrsJBPDh+dv9RorKhyUijpH+W0oAkv9e927/Arvll8PECTtU2w0goqLeaaF3u7Xc664CmKQ1PeMnlQ3bIqLR4UEvrKMRqk4nEr11uROJHeZ1pJZGax9ItLHqmmXXPPORn8dqi0XBzApo5XpMPrqtwaMb8hcJIU8TGDwqB5kDSGrkq0wpBEvHlqTq88+sfw8hQoAAYaKDR48fmj08S9yPStawsA78gYlCPFOycX4ZzDV7UNvtUjSXhtmg10SpfgSBfEy3XnOgFYXr+51iNC00eP3qlm5Fuibwmu1Nhb3sQ7bmifAYp55S1zT6N3yMVBek9K4am4PYJWHb7dTAyNCQAGVYjclJYVTNw6GuOO5uRaTPCZjKg0csT474nbxvW6fesqxHpvG/xhAJVzPu3qhrcPzYfbp8AQZKwcMxArHv/mErZfu+/zuGBcdcqTSTX9rBr+DI5r7/U7FPOvc0GWsnZf7jkxtNvH8HCMQOJzy7NYkSO3aSJs7NsARX3BVvUiqAvHziBkqsKMahHy9QnuRk23FYzLEaVeFKGxYg6j1+zP3cmv5bk7i4vvF4e1Y2Bvc/rF1RTCh7efUhRMZRjgNwUE9JMBphMyYK4eENnPpEbAMwA8C+Kor4M/uw/ESiG20VR1FwAPwCYEvOVKWDRTdegrjnQDc0yNBbddA0YGmANNOygNJ2+a6eXwMXxWLb3MA6erkfVuSZsmVOKk7Vu9EgNHO7VugLBmVyZHNqhtH6GA0+9VZWwgVsS8QWKJtswFcM5aSxjDIFAIBk+OgdokZkN/bdMKOh1CMSL5HVHEyKRgvJzDR5i4uLnk6MuQiEHfnpJnlxUZjcblCJkmgLe/vocfjw4V6PEB0ApjJOT8ncf+jFECUizGPDUL66DTyCP0ON4ESecjXhlfhkEUVIIb9Laa/D4MWPUAEwv6w8La4hILrVV/S0WhJMNckBlNQWcBGsg73OsQfuZeVHCg7u+Uu1fD+76SlGfiQVGA9mHGHWKPC7HveoIdIRPTqJ7oC6o5BCu7CCPw0oUhCtcLp1UiCUTRXh58pgOUQqMRTtzqRkTrs9T+eK100uQaTMqYzAyQ/wjTVOwmsh7cTjRf/B0PV4+cAI755fBL0g4cbEZj7/+jaoAaOvckRCDhEWWjUWDx4csA02O+ykKf6uqQdW5Jjx/ZzGyU0zYcuCkrq9rT0F/pJH1egp0Mmm3bO+3qvFcy/Z+2+H2VO/lUXniIrbPK4MoSaApCu9WnUPPVLNGxfNKR1vJeNLrZr70Kf686AYMzrUr4zTMBhoXg8pUoQccmTqxkdnIYNXfvsOj4wejf7YVD4zLx7P7j8Lp4vDCtBLQFLB5TilqXT48Nn6wMhLO6eKwZW4pGoPqYOfqvaoCjraSYYl+WNGdEJ7n6I3uGJhrR+9UM5ZOKsTjt7aQsvLhQJadxbLyQnh8asL2qkwrlpUX4kIjh5lrDkRtO+HFJzkpLGwmg1L8JBeMXmjkiAStwUCjd7oFziZOpfQMqAtB8zIsWDqpEBkWI6ysAWfrPfAJIp58I1Dk7fYJmpxDHmsZuoctLy9EvSdApFfXefDUW1XYNHsETl9qUVDum2lBXbMfzRwPUYJKgXR9hQNLgzxQdZ0HK/YdwZKJwzAwxwZelGBhaWz6x3GNutS6CgeMDKWrUOaN48MvUZRwPjjaPHxP3Xb3SOVeyJ3+O+aNxJk6DzZ+dEIZp7NwzEDdgqSBuTbwgoRle7/VFMu/MK0EtS4O9+/4UmMboQcu9R6/blEdy9ABZcCg+qFBp/DAwjIY2icVq++4HicuNuPJN6qQkxJojvjLl9WqcYEpJhozRw/A9FEDdK+Xm2JSYgqzkUbFqH6a3DYa5eGOyJ/0Dl3TLcaELSCJBD1+6pKHrFKcYmK08W6FI6JyE+k6lA43TVNk22R0bKe7PY94RGv8SlcjWp5Fj9M1MjSuyrSiV5oZJVcVavisdAsLzi+g2afN25wuDhSgGge3+/MfMHP0AF1ujxNE5fBavpeDg3s8APgFCqJE5t4onfXBhnxeeTSk08WpeKt4adDuCLTFJhOFX7vciMSPZrIs8T7nRihYi6YAkaYp5KaYIdoCBTkM3TJe0UBTROXlUAiSBAvLaBo/REmCSees43unC1+crMWE6/Mwa+Onqv2rV5qZvB/R5PXmDuYFNE3BZqJhNjJKXK5ni36BUnHsQoxcc6IXhOlNZJCbK5ZMHIaeaeaIeVR3KuxtD6It8m2t6If0+/UVDvTJ0E6/WjO9BD5ewMO7A6pkD4zLx1VZVjRzPB66eRBWvn1Yk5fIz/eBcfl44b3vsXhCAXGdSej+zQ2RzvvC9+CbC3IhAZpR7zLXO9WRh4pR/fBscHpZlo0NNu15UNw3HQvHDAwUFFmMWLb3W2Xc+9oKB4wGGlNf/AQ5dhN+89NBcLo4rHv/mKbIeHl5IZbt/Rb/+bMhAIAzdW7VSNhUs0FRCa9t9iniSaSpT+kW9WSoDItRpXp5c0GuprlettPO5NeS3N3lgzwulaIo/FDrVjjb0CkFfTMt2L1wFHJSTOhhMyXV4eIYnfZkJEn6BwA9Fn5ce67NUBQ8PkHjWCUJAVIzwwKGobEjeDBEUcB/7PhSpfxWXReYE7349a+xrsIBXpCw+u9HtGRnrh0WIwNRFDXESCIFbknEF2iQbZjWXTJaxDLGUAZDgRjg+YKy4dESCvEied3RhEikoDyZuESHLBsLj4+PeK9k8kCGKEr4ydBeuOPFT1T2vGjbF9g0uxS7KquVpHzzgRNY/38nlWC4R4oJRprSdHisnFyIDR8ex8TiPvhl8Lp5GYHghLT2MtJZRSY5HgLHBo+Ac/VuFalSdbYBKWYD0q2AmxPwXFgX+3PvHsUTtw5FulV9rUjdxLHCoNPNGC8d1m1FR/jkJLoHRFHUyMEvLy+EKCZuAbSc0DubOJxrII/HYWgKNxfkYtQ1Obhzg9oX37PtC2ycNQL3bv8MG2YOV1TiZJDUOdbPcCDbxmoKD/IyzHBxfrh9ImZv+kx1neo6Dxo8fvRKMyufO9vOgvOLxPXZO90CAMixm5CdYkKty4fJw6/CpyfrVapDsq9rT3d9pJH1ZlanKNBAXzZ7EkUR+T3TMG3DJ93GbjsLbSXjI73OaDShT0Zg83USCkke3n0Im2aXEu2kT4YFS34xTHNQ4/EJeOKNQNHozvlluOPFT5TXFfdNx+IJBaApCll2Eww0hcnrPlYpN51v8KJHqgmZMY7cS/TDiu6E8DxHb3SHxcjAYKBVhdCtdQzLf+ts4mImUsN9KUVRmLr+Y9U1TtW6FZ+td93Q7xd6MOFs4rB5Til6pVmUz9sz1YwGj19VbNonw6w5NNQba7l5Tike2vVV8Dv7wPGiak9ZV+HAf//1Wxw8Xa+sL3k0cehIUCBQ4D1702f44OExsJsYOIN7j9snYOOsERBEERbWgKVvVemqWcvEf7yitjlwj8I/e07Q34TbYe90C5a+VUUcp3NzQa7q/gVs1oCsdBZLJwX2qV0LRkEQJXxf48ITb3yDx8YPbvXAZd37x/DEbYFJEfKBbF6GBRtmDEePVLNGQYIUp2SYjaj38rAYaQztnYrnpxWDFyWca/BifGFvVcHU+hkODOmZqhzAka7XO8RmvX6RqDLflqaktkCPu3D7hIQtIIkEUZTwQ50bp2rdSszpzRKQaTMSc1e3TyRyabvml+kWHJGKKvS4aZqiiLybgaa6TUFPoqE1fqWrES3PkmM3kQupWBqp5lYaO+1mNF9q1rzPptkjUOvy4aGtX6nslaagq5xz8qJbtX4Wbq3ErgWjlPwsy8ZCEMk5HJtK44VpJaoC9HUVDtA0hWf3H1Wd3QBQ8VbdSS0t3m0ykRCZHzVhUEgDU6SCtbaMoZOVrGJV6zHQNNa8972qeW7Ne99jyS+ui1h8tWJyoRKfAC37156Fo5RzztD1xlC05lorJxeiR6pZ2XuitcUsG4urc1IVjv3Dh8fExDUn+rmK7H92zi9DdZ1HGa0r+ywry7SaR3Wnwt62INY11lrRj/z7UB6kpokLNE/ZWby2aDT8vAiKovDEG1/D2eTDysmFSDEbNaMPnU0BpcQtc0pR08Spnq+VZfC3qhosnlBA3NeYOM7rOgqRzvt6ppqxcdYIpVCIpBb+8O5DeHpKkVL0NvOlQGGvnCdunDUCNxfkEvlTeYTqPVsrsXN+mfKsRUlSVP+uyrIq/98n3YyTtW7cM+YaZNpYPPmXb5TCug0zh6NHqgl3BpXPF44ZiHSLEXeW9kOPVLLgS3iRXDiHUu7oq2mEk+00Er/W3tGnSe6u8yGKEuo9HM7Vc6omZHlPlhsel7xZBbORQa80BlkWNlkQF+dIyKfjFyXN6JCHdx/C1rkjsWLfEfzmp4NUznNthQOl/dM141DlbuGFWyuxZOIw3DV6gOJkZbLzo0dvUg4S2xO4Jec7JxEKPRt+JQaStC0bH03TePmAWub35QMBOe0PHh6jyN63ZpvxUoneGYSIXufdlZ64RAuaptAr1Uwk6jIsZAl1mqbA6xRuGRkKHzw8BjRFYevHgYI4+Xf3bK3ElrmluOnpD3BzQS623z0SEgKEotsv4MDxWhytcWHJxGHon22D2UBDiLB/ZFiNqtEMXQmLgYbdbFQKBeVEyyJ33kboYg+HXkc604Z14vEJWLHviMqHrNh3BM9PK444Cije0RE+OYnuAV6UiAfql+sQszPh4wUs33sYq6YU4aFXWw491s9wYMc/T+Lhnw6GXyArybEGGrsWjCLGCHp7MQD4eUlFGK2f4YDdbMDZ+maiXwr3w82coOu3d8wrw455I2GgKVXxc2inVriva2t3vZ4qA0NTYGjoHmJ5BbI97VowKubPEAmChMvyPt0B0ZLx4bmbzcRg46wRsLIM6j1+ZXRp+Ov04vMmr1+jYLByciEEUdIQaAu3VmLznFI4XRyK+6Yjy25SPnNx33Rtrju9BKOvzsLE4j6qn6+vcLQ6wqWt9yeJzke4b9Ub3RGei8SSp4Xaq0xQp1uMCkkbjWrymTq3xuatLNNqnip/vzfuu4GodBjpXhgNNFxeHts/Oak6WNfrXpcA5OfasXDMQOJoFpkPmr3pMxw8XY8FWyqRl2HBtrsD6qWkNXHc2Yy+mRZIElTFUy9MK8H//D0w5rPc0Zf42pwUE8ydMEq7o+DjBTBBJR/5sKB3mhkSgCf/8o3qnt9ckAtRknTH6WyeU6qawCDbbPh+LIoSXBwPp4uDX0cFPLQ4w+niAAAGhlLGAluMDLJ14pT8HDu23z0SNU0capt9eOad75SRPc4mHx65ZZBmH8+xm1Bd16KKt3N+mRLjtMZB6OW2fBuaktoCEnexvsIR1WivRES9x4cLjV7NIaWNZYi56x9+eT3x+YCCptBy9dQiUBTw3rcXsH1eGSRJAkVReP2Lakxy5OnmkCTebemkQkXRzi+IUfNvSbQfrfErXY1oeZbwMeR+QcKLHxzDgeO1rRfhGGj0z7Qh1WzEK/PLIIqSMl5y1qvqIhs5/9356SliAdvi//1ade3qOg94oaUZhw4qzulxb0+88Q2W3X4d+mRYcfJiMxb/79dwujisnFyIFfuOqJTEw3mr7qKWFu82mUiIxI+KooTvLza3WrDWnjF0bTkjybab8OC/X4t5Ic3dz91ZjGaOR47dhEHBdX623qMaa6o3WlWP09w5vyww6vWe0fD6BWWKUKa1JRaI1hZpmkKfDBO23T0SziYuZq65O5yryKIJMp8mI/ScOfK5XPQKafF6jtzWz9aWNdba2aePFxTFsHB+RJ7Q0CfDClGU8OBPBmHe5s+J/MvDuwOFLQu2VOK7GpdGzVxuThMlXLFnBzRNoXeahShKcf+Og3C6OKyvcKBnuglNXvJzuyrTguenFQca1EJGTwKBUe5/+OX1mmI6uehIfk+fIKlULZeXF+JP/ziOR8cPwaq/fYecFBb33pSPx177l+pv5JqPeZs/x875Zcixm5S8X8ZHj94U1dlWuF3qKfrLa0Svibq9o0+T3F3ngudFnLrUDIqiFK4K0Npllo0NnnvTMBnYiCqxScQHErIoTk/1hqKAB8bla8i4e7ZW4pX5ZXjr6wuaAzP5b6xsIKCRjRlQO5H2BG7J+c5JhEPPhsUYSFKKIicfVITuhCwbqwSBoYRGeMV8a7YZT5Xol4sQ6U4diZ2Nei8f8/hBo07BgY8XsfLtw/jtz4YoBXEyqutaOtDkEXpLJg4LBOVvH1a9///7axWeuG0YBJ0xdxQF9EyJH0Kal8iExq4FgURLb2wPiXygdRQi6TbEaKyBQU6Ket/LSWETPuDuCJ+cRPdARyorxhtYAwOni8OyvS3+0e0T0CfdjF+U9EWDx4+04Fgtki+e/sd/6sYIpL3Y2cRh3hY1ObxgSyWenlKE3ukmrK1waEbAh/thC8ugycsTnwnHC/D6RY0aUWinVlt9nfbekdUbmrx+CJJB/xAL5NFxktSx9iRJkqo7Vi7a6uj36Q6IJqcLz93kcQjhh+2hHfby6/Ti83q3H2YjrbGTVVOLiDZC0xRWT70eZiONFftaRgySlLDuCSrrymN05J8vkBWkY4iTu8NhRXdCa6M7SLlILHmaTKSSDhOi5StIZKyeql14vBg4NIeGZCQdJIbfi2ybhLtvvEZRGpMLVUjv+0OtGxWj+uH5d4/qqrf1z7Yqr5Vj5V+98iUAYO10B+7Zpu0MfmBcvmYPunf7F8qYz3XvHyOOB93+yUnMuuHqiPe1K8EaGJyt9+KFacVw+wTloEg+gLh/bL7iy3JSTDh50a1bkEhRwKsLRkGUpIj5c2iR5KVmn2bPXTWlCG99dRYbZ40Aa6Bx3NmMJ4KjdIHAc/7zoht07bXO48e0kEMWAKg614TFEwINRaS8K5QblIvjHnr1K2VtRPKtermtkbk8JPmVxl14gnZKOqQk5a56ubQkAek2VjXOLt3GgjVQ2FlZjVXvHFX9/W3Ffcg5pCRpeLcNM4drxi0lueHLh9b4la5GLDyLwUDDyNAqlVYArRbh6E38sJvIheyiJOG24jw8806A28uysci0sTAylFKYLCMvwwJDmH/T4954UcTB0/XwCxJm/Entlx/efUgpUm8Pb5UIiHebTCRE4kejLVhrT/N/20QLKPRIMyv7Tb3Hj//6S2BksPyeObbARJPQAhBWZ9SyLo8lab9f+N4Tiy26fRKWvlWFckdf9E43x8Q1d5fYJJKSXzQFKXrnWHKxmSiKuNjsUxUedUas0Jbits4oHg1tOgm/RmtFP6yBIdcCbAvkY/IaDM0zGoLFi6GorvMgPSjksO79Y5oGiUybEaumFEGSyPzelXJ2YDDQGNIzFX9edAM8fgHHalyqQnaZf7ISpmncXJALZ5NPVWQf2tDsdHEQdeIG+dnkZVhw8mKzhv/dPKcUy/Z+i0duGYReaRZU/ClyYd25Bi8euWWQpgg/2rOtcLus9/iJdgoADA0iv2agqXYLziS5u86D18ujnvPjh0sepJgNunaZl2FBzzQzspPqcAmFhHxSemoNLEPj6hwb0Uj9goiNs0aApgLJmyRJeGz8YNR7/NhTeVqp5pedRrgTaU/gFi+qWknED2IpaNGD3ihUJsIlSONunnjja6WbKlrbvFIr0btLR2Jnw8cLxC69x2/VJwRyCWMg1kwvwYYPj+NvVTVYNOYaos2db/Aq/66uCxQ4e3nymLvHbw10wpKuU9fsg5Gh0ZuNj23RL5ATAX+Q0bCyNNZML1HG8sj3y8oS2AeKInaqPzlxWMyfK8NixAPjro1aBTCeu9tC0RE+OYnuga4+xOxMhCbMsgrOhpnDkWpmkWpmcb7Ri5c/Oq7xLWunO7Dhw+Mxx6965DAFgKZoPLdfXbz83P7v8MRtw1R+mBcCY8z0/H+2ndUUgh08XY/8XDsWTyhos68D1P6LFyVi4dvKKYFRCZEOsS5HvGQ20kSVG7Mx8e22oxFNTheeu5HGITy8+xBeWzRaeR3Pizjb4EETx2vW0MrJgaYAjhfR6OWxbO9hRclQEMkqVBebOGTYWKXQzdnkw+IJBcjPtRPXlZEhqwbE2rDSXQ4ruiuiyUViydPkfeF8g1dzmBCtvyeRsf2yrFETtG1ttiLdC9JYS5lsd7o4LJ5QoEtaX2jkVIf+oZ3rvChgy9xS1DSqR+no7UGh35OmgK1zR0LuWztb78GnJ+sxvSx+x1tnWIxosBlhoGncu/2fCukrF19bTQYMzLHD4+NBU4Fxd6umFhHv65k6DwZk25QR05EgF0nO2viZqtDb7QsoJZb0z8TsTZ9h1ZQi4gj2SDajZ2fyIUuk38nfReYMo1kbpNx2XYUDuZdRlfxK4i70im9ESSLmrjYTOZc2GWhsPnACk4dfBSaodLX7sx8wc/QA4t/rcdNmI4O8dKtmL01yw12H1viVrkasPEtb9s5IBQkkO6YpCtm2wKjrUJXWrR+f1KwHkn/T497OB3O7/tlW4nfom2nBzvll7eKtEgFtsclE4dcuNyLxo25fdGulPc3/bT0j8fgETTwjfxYAcDb78FxYw/k735zTNhdOL4GRIXOaZiMdce/JsrEQJWDVlCJVLKtni6LUosr3/m/GxMw1d4fYRM6XX1s0Gm5OwImLzUqu0daClNBis9BGFKBzYoW2Frd1RvFodZ266ST0/Vsr+smysRiQTa4FyLJpC8u9fhEnL7qVpjBZJd3tE0AHkzWni0Oa1aiME5YbcQDguWnFV/zZgbyGz9S5dfOxXmkWzXN7bPwQZWSq/LehDc2R4mo5f187vQS/f/0bzXs2ePxwNvng9YugKGhU6MIL6+Ri49Ai/PUz9GOucITb5RcnazV+ec30EjzzzlEcOF6LzXNKlVG+8t59rsHTbv4uyd11PERRQqOXQzMnghcl9M+y4ky9h2iXbp+A9TMcyYK4BERCPi0jQxEJJtZAgRfJQaAoAayBhpsLdC3LVeF7Kk/jvrH52PrxKeRlWNA73YKPHr2J6ERomlKIDB8voLbZF5WjiSdVrWQCFR8wGWjNCKW1QRIuWtA0jQ+PXMDGWSNaCLvPf8C1Pa5p5XXqcTfhhUPR2GayEj2JSGgLIRA+BsLHi9jw4XHsqqwGADz5lyqNRPPqqUX4778eVr2HhIBs8c0FuSh39FUS8z2Vp0FRFLJtJqyvcKhGNK2aUoQn/1KFZ355fafdk1ihV6RlCPprr1/C8+8eVZEPz797FE/epiUfaADz/u1qzRiYtpRK1Hn8xDFvpAQ4kVRSO8InJ9E9oKcIxhriy2bbgtYS5p6pAcW4//3itBJbGBka//tFteKLY4lf9fYCCYHiQ/KIE3WRgFdn5OvaCge2HDiJ6WX9NNL5Lx84ERid/WZVq75OLy4O91/v/2YMnC5OJa0f8Mk0bCYGj9wyGKcvBb4ny9B45JbByLAYQdPUZYmXfDy5u/vV5PhUIloj46Mdh+DnA/YqihKO1DQpMcrNBbnYdvdICKIElqFB08DLH51ASf8sZNlYrJpahJ2fnsJt1+chzWIgxiVL3/oWj40frFEAPKtDyPgFcnFdLAWY4euhV5ol7vbrJFpHLHmavC/YdFRiovH3kUZoR0PQdmSzlfxZds4vQ3WdR1XABgTW8rK9hzWNZSsnF+K1ymqMK+gBQZRwqtaNSSV9sKuyGsV900FTFCQJqlFJxX3TIQHEPahXmhlvPfAj8IKk6YZ/+cAJPHLLILBxGmOKooQaFwebiYGfbzmwFyVJU3z9wrQSmIw0nC4O694/phmxJ3/fx28dCmcTFxN3Vl3nUe25Hz48Rsl9suwsNs4agWf3H426u1/Pzuo9fuX/SUS3/P/hkyZaWxuhuS0viDAEx2Qmx6l0DvSKb4w0Tcxddy0Yhbe+OqPh0u66YQBuHNRDNRJ5eXkhKArE3PupX1xH9LfZNhMx1ognbvhKQ2v8SlcjFp4FaNveqduwpNNwTVEB3pmk0kpBws75ZRBESde/ZdtMmvWxZnoJKAQOrC80csTvcPqSRzmkbitvlQiI1SYTiV+73IjEj7IGhsgPh6+V9sSjbT0jae09/QK54fvT/xyrxBcMTcFkoPGff/6XhjNZP8OBbJtJt/hCFEWNTYU2k5BskaYo5X6ajHSHcc3xhFDFNkEKqPKH5zI0TSE3xQzRJsFmMuD5acXtOmcNLTaLNIqxo9DW4rbOKB6t9/iRYzfhfIMXNhMDi9Gg3MdIeaaziYOPF2Ay0MQ1nptiUv5W9p/NHI9n9x/F89OKVSrD8hnAWw/8COkWI0RJwg+X3OiZalYVftW6OOLZQbzmdZ0JvedpYRnUNvuQajZg14JRoCDh0JlGXYW+/Fw7dswbiQuNHCgKGm7shWkloIOFbhJAVKr1+gWN6n2oCl2ALxNVP6+u8+DqHBt2LxyF2mYfnnnnOzz4k0FR7al6ojPhe1C5oy92VVZj5kuf4s+LblA1iXUUB9IdCo3jBaIooYnjcKaOC7PBYs1aY1qHAAAgAElEQVT+uq7CgUybEZnmZEFcIiIhn5iPF4mj+R4bPwRvfXWW2DHBCwLueqnloOKx8UPQM9WMx8YPwc5PT2H8db0w+0cD0DPVrOv42pqAxIuqVjKBih/4RQnPhSVsz717NKYOuAyLEbden6ci7CJ1EpLQVttMVqInEQltJQQMBhq90y1wNnGY/sePVHbpdHHolWZWbM5ooMH5BTwwLl8ZbdIv0woREgwM8JufDsKZuoCKHMvQ+M1PB8HAUEqwGCpRv2zv4UDCH0dKUDYTjY2zR6D6kkf5fnmZFthMgc/o0yFHwgtKAIATRLz6ebWK9N/w4XHcNy4/5s8VSwKcSJ3wHeGTk+geaPYJREWwZ+4sRlZXf7gOQKSEWd7b5944EJ5gp+SSN79R+ZlY4lfSXrB+hgM2EwNGZ8RduB9mggrP4SNfXV4/xhX0UA7fgZZOwx3zygBI2DhrRERfFykuDvdfVpYmjntNtTAQJAnOJk4zWjPTxiLTZros8ZJPEMlFW0L8KhHFM4xhY2n0lKXktXCxmVMK4oCWke6LJxTg6mwbVr59GHeNHqAi6dZVOJBpN+L2tR9j9NVZykhAHy9i37/OYeGYgchOMWHV1CIs2/st/lZVoxAy4UThysmF2HvoLFG1I9q8IJkndh/EmqfRNAWL0dAuvkJvb4km3uvoZiuapsAaGFUBG9By6HLwdD2efvsIlkwchoG5djAU8Mw7RzGxuI9qja6tcGDHvJHok2GBj5cgSJKqMfOBcfnKegPU41tcnB8WI4NLPp+qU13uhn949yHsmh9/49FkP7D670dw1+gB8PGiYheCKOGx1/6l+r73bv8CO+eX4eU5pfih1o1tn5zCkonD0C/LirP1Hrx84ARm3zAA920/qKhntOZT9Ebg0jSFx8YPUfnDlZMLsWJfdMocoXaWYzfhgXH56J9txYVGDq9VVmvGJK0OKt99+MgYHKtpVhVXRrs25Nw2ic4Hqfhmw8zhoCn9MfZ3lPbDqVq3kmvfUdoPkgSNauajew5h5/wyndxbiMnfxgs3fCWiNX6lqxFroUGWjcXmOaUqG+6XZY3oB3UbliQQ1Z4ev3WoJo6M5fA1NB5x+3gcPt+Ex1//RvGlxX3TNYIHf7jjeuSkmPDuQz9uF2+VCIjVJhOJX7vciMSPBvb81lUY2xOPtvWMpLV1rDfFgBehii9kwQNZWVxex9nBz6C39gUJGpt6dM8hbJpdCgkS0RZD+XYf33Fcc7wgPBYOzQ1IcWxHFKSIogQfLyhqfXLxTriSmYXtuFihrcVtHV08ury8EK8fPKMpaAq91+H3OJy3WPBv/XH/uGtVXN26Cgd6ppiUZyX7z8UTCuB0cXB5eU1eI49cDSiKUdj40Qk8/NPBStHUuvePocnLY/PHJzVnB08QBAq6O0jPc/OcUlxo5DQc8J7K0yh39CXaDgCcrfcqufvNBbnYMrcUDB1oSlv6VhWcTT7858+GwMoymib2VVOK4BdEzfMMV6HLTQkovIcWyh13NquKHqvONUW9p5JEZ8L3oLk/ulr5PKFrSxQlSJCwde5InLjYjGf3H0VOCov/7+eBkb/RNpPJ10oKILUfHMfDK/Bo4kSFawVkzuMgVk+9XpkqkJNigt3EINVkhNGYzOESEQlZFMeHSPWG4t6b8rHqnaOYMiIPrwQ7liiKgtfP4+m3AwecvdPMECUocp1yxXFuCoscu1lRhwh3JgBwvtHbpgQkXlS1kglU/MCvM96RVNCih1g7CUlob9KXtJskSGhv0aSeXaZbWq4hJ0GhRQjrZzjwzDvf4b8mDkOty6cpUEgzB0iPTGsggOnKcTatgfNLcHl51Xd47s5ipJoC2zaj01VKusc2lsGU4eoC2tVTi2AjjVptBbEkwInUCd8RPjmJ7gG5CCtcESzSaPLuBJoOKGoecbUQgVXnmtoUv8p7wa4Fo3C23oPaZh9yU0z4vsaF9w9fiGrsjs3EYOOs4aiu8ypEcd9MC1797AeMHdJTtxDsoV1fweniIvq6iy4Oq/+uLoBc/fcjWDqpEKIoqn7uEyTN6JLn9n+Hx28dCgBElbad88sA2+WJl2LZE7obOpoE4nkRnF/ElrmlOHnRjWf3H8WeytMae5XXgihKcHP6Y/lcHI9yR1/NIfvCrZXYMa8Ma6eXwMXxyh59c0GuhtRdXl4IZ5MPB0/X497tB7Fmegk2zymFKEkwGRhU17kxfECmRiHh2aCNRnNfknli90Ksfqcr+YrOaLYifZ91FQ48u/87AIFmm55pZuSlB8anjL+ul2aN3rO1EpvnlMIvSDh9ya2Q1ZvnlKLZJyDVbCCue4oKqHfOfflT1RqWCfjcFBOq6zzgxfgY2SdDFCWF71p2+3V4dM8h5NhNinqQzUT+vucavJi87mPlHmfZAzbTL8uKO0v7YcW+lmKyaHwKQ1AsWjm5EPcHC+tC/eHDuw/hlfllMEVhM7KdvXHfDThX71UVFq+vcKBnmrpp6r//GmiaeuO+G9AzzayoEnQVl5dEZOj5kdpmH1lBzkCjMSzX3jBzuO4ockEiq7GSDmsjIV644SsRrfErXY22FBpwvKix4UjQsz+bidbEnmsrHLCwNOo8/nbFgfL6qK4TVOMAgcBenG1nlXVLUxQuNXOY/sd/tpu3SgTEapOJxK9dbkTKhaM9O2lvPNrWnD/SOo52FLvsPw6erld4rLwMC/686AYA+mtf0hk9Xu/2gaEpoi1KIhS+ff0MR4dxzfGC0OKp8Nwgltw4Wo6C50WcbfCgpolTFM4W3XSNwhGEFv+En4u0B3p7jrEVxbOOKh71+AUcq3Hh6bePYOGYgTHd64vNnIq3KOmfpexf8usXbq3E9rtHIi+DAU1Tiv9c9/4xLC8vRLrVqOJN5Aam/Fw7KIqCgZZw39h8jXKw3WRInh0EQfKZEiTMXHNA9SwWbAk8i6feqiLmeBcavSo+VW4w3TS7FK9+dgq/+3kBmrw8XByPWRs/U01SkMfe6uWpg3qkYPGEAjz/7lH8/tahYINN2LKIEmkUq7ynhq5ho4GGgabg8ZHXs956EiUJ62c4kGVjQVGBmhMAmmbU9TMcMBloTAuJf6JpJgst4i139EWWjYXHx6N3miWiOnmykE4Nr5dHA+eH0+WD10+OtXJTTci0sTAaaORak+pwiY6EfHoGmiyLmmlj8dYDP8JFl18VML4yf6RS3U+ay37v9i+wY14Zfqhz46oMK446XZrN3WSgcanZ16YEJF5UtZIJVPyAoSmiDccyg74jnme82GYS3Q/tKQKIxi5Jh7cLtlRi8YQC8KKEjR+pO103fnQCvw8WLyTCOBtelHD/joOq73f/joOBIgsARpo84tFIWLs+QVKUD+RrPbjrK7y6MPaxerEkwNGOKYgHdIRPTqJ7wELoPFs5ubBDuzLjHaH+Ve42zrKx6J1uiaioTAJNU5AkCZPXfQwA+MejN2HjRydQ7ugLi5HG9nllEEQRLEOjR4pZ44clSPD61UTx2ukluOuGAeD8InHdnqp1Y+GYgViwpVLX1/G8CJ8gaLp/l5cXgoKEi80+1Ui8bXeP1CG/AhL+5APUqG9TuxHLntCd0NHqZuFjUGV7c3E8tn4cUEAamGODhW0Z6eFs4lDvJh+6+wUR9R4/smws0UbcPh7pViPsZgM2zR6BercfPVJNuHPDP1V7tpzDyp9LECU0c7xmbIezyacq6AWAe8Zcg/t3HGz1viTzxCsbXZ0TdnTxMOn7ZFiMWDqpEI/f2vJvmQy+OsdGtP9LzT6l4EsubJv50qdYMnEYUkxkdT1BhLI2ASDHboKPF7FySiFOX/Ig287i5oLcuMq3Q0cK5dhN6JUe+F7VdR6luTTbbiLuubXNPgAtB1Ay3xYoQmwZbyr/TSSfwvMieFFCutWI7fPKwAsiTtW6VYV14f6QQnSKhEDALgQRmu7vBVsrsXN+mUopQIbHF5sSWBJdB5If0ctdDTRFLATfOb+MaOcGqmPirK72tVcyWuNXuhqxFhq0pZlBz/7ONnhQeeIits8rgyQFxAXerTqHjKG90FGWqZerhPrwcw0eLNiqVmBtK2+VCIjVJhOJX7vciJQLx5LjXO7m/9bWcbTcdWv+I1LhuF5st+TNKqIt+kP49gyLUaWW3h3WrCiKWHb7dRiQbSMWTUWTG0fLUZC4h+XlhVjz3vf47c+GaPIJ0njRtoJkMysnF8Ll5ZFtkyIqjLcljgkvwumdakYzx8Pp4mIaF0tqSNR7fU0TBwtrQE6KSVU4+vrBM5g5ur+K71teXoiXD5zA0RoXlrxZhc1zSjWK4C8fCJwrhSrHyYpjhiulizoM4T7zTJ2b+CwYmsLSSYXgRRGbZpeiyetHM8cDAHqkmomvsZkYTLg+D8edzVj8+tfYNHuEkp+Gcl4fPDwGhghnOvJzeuSWwVj8+tdYV+FAutUAA00TR7GyBgaiKOFMvRscL4GmAI9fhCAKeGzP10T1c9J6emnWcDR6eJWdbZg5HFl2lnieuWTisJiLcGubfURVy/UzHBjSM5W4LpMTIlrg8/Hw8DzqPQL8QmACjNlIVmhlaAoGmkKWJVkQ1x2QkE/QyFC4f2y++iCgwgEjA6SajbhzwyeqoEGSKJgMNBZPKFA6c0MhE55uHw+7yUAMSJdMHAafIBIXhewsI1XYxoOqVlKqP35gZCjcNzZfpTixZnoJjDEEUcnZ40l0Z7Rml3rERpaNBU0B828ciF/t/FJZX3+443qV0lO8j7PhRXLHnhDsKqFooFe6GZtml4KmAFEKyNhThLo+H68zVo+PvZMplgQ4w2KMakxBPKAjfHIS3QOpJqNqxLLbJyAnxYRUU/zZbWch1L+Gdht/9OhNbUqSQ+MVPf/cJ11bEAcAbk4kjjZePGEo1r5/TKNqsGZ6CbZ+fAp3jrwKgL6vq3Fx8PolfHjkgmrcx+7Pf0D/LCueeUetCmfU6T430BSgM+rNfBkLrWPZE7oTOlrdrLbZpyH25TEaB47XYmJxH5gMNHJSTBDFAGni8fHokWYmHsQYaBqr/vYdVgXH8IXayM0FuWDogCJN6OvWVTiQY1fnq9V1nuAYj4BtZdtNqnxX/pxLJg5TFXXkZViQZjEix25q9b4k88QkultOSPo+8r/DyeCNs0YQ7T+04Cu0GOuqLCt8gqBRkFw5uRAeH69cp7hvumYc0OqpRVg8oSCuipZDVTEeGJePppCR0XIc8M6vbyTGyls/PqVcR/ZVcoEcySfp+RSeF3H4QpMqb1hf4YDZqN7Iwv0hRcV2H/VySEGCrg/sbmvjSoJe7nquwUO0A72c0MLS5DirDcs4aU9dg9b4la5GrIUGbW1mINkfTQHD8jIwLRhbyvkZTQFGpmPiwGhylY7krRIBsdpkIvFrlxuR7Iul4zfHiWYdR8NdR+M/SGufZFNybKdni6F8zra7R3arNSuKgebIzR+fxL035ROLpqKxm2g5ChL3IOcbNNWiXEvKJ9pbxELTFHqkqlWSV+w7AqeLa5VLaS2OCT8jz7AYiQI0+Tl2xWajXaO1zT40czze+fWPQVOAIEpw+3jdPK5XmhmAumhpXEEPjXrko3sOYfOcUjwUFBQIF8Yp7puOu0YPwC9f/ERjE7NvGACmLQFhN0QkTknm0I64m5SC8LyMwMhV0mskCbhnayVWTSkKFNbp8K6HzzdhYI6NGL9zvICFYwZiyZtVoCgqMJI42MT1xclabLt7JJxNHGqbfdhTeRoP/mQQsmwsGr0+1Ln9yvVuLsjF735egNV3FOHIBZcyYUReBzRNIT/Hju13j0RN8HqXmv34zatfafzA9nlkv2kNa8SPJqbz8QJxKsWCLfqT5JITIgLwennUcT5ccvlVCvKrphTh+WnFuG97i42ur3AgO1kM162QkEclHC+qquWr6wLjLb6qbsSFRi9y7Casn+HAX+67AU/cNhR3bvgEk9d9jCVvViHNYkRehjqYzMuw4HxQqtMnBMYlFfdNV34vOyZZYlV+vbyJZ1iMOHKhCZPWfIQblr+HSWs+wpELTYokZrxADgDCP39Sqv/ywy9Imo6DRdu+gD8GWZHk80ziSoYcaIciL8OC3BQTAEopuAAC6+tXO7+E1GF9rp0PQ7AAIxRyZwIAiEHJ+lkbP8XYVR9g1sZPUevyQSRwD4zOtdqTPOekmNAnw4qcFJPudfTGFNR5/G16385ER/jkJLoH6jx+rNh3GD4hsJh8gogV+w7Hpd12FvT8a1uJY3W8Ept/pijgrtEDsOTNKtzx4idY8mYV7ho9AAwNjCvooRmTsGjbFxh/XS/Yg6M+9HydXxDBUBJ+XtQHszd9hrGrPsDsTZ/h50V9QNPA/WPzFWl9lqFBB7vPQ2OulZMLQQe7xV6c6cDGWSOwc34ZNs4agRdnOi5rt2gse0J3Qkerm+ldLz/Xjo2zRuDlAydA07RSUDNpzUe4ceX7OFPnwYp9ATWlnfPLsHhCAVbsO4IsO4v/mVoEigK23T0SNxfkAgjYz29/NgSnL3k0o3cXbq3EA+PyVZ9BVp2T7a7eTVYv759tU9no8vJCLNv7LRaOGdjqfUnmFUlcSQgng5/df1Tj45eXF2Ld+8dQ3Dcd62c4sGpKEa7NtePmglw4mzhYjQY8/+5RLLv9Oux/6MfYMrcUK/YdwdkGr3Id0jigB3d9BYCCIMVPjBk6UmhwLzusJkZzP8xGhhgrjyvooVxH9lXy76/KsqquIY+PIcHp4jR5w4KtlaAoCr/56SCFnwv1h8vLC2Meb68X45iNdNIHdlOQclc9O9DLCT0+kRxnxc8yTqIVtMavxAOi5VmAjs7XyPkZQHWYD4wmV+lo3ireEatNJhK/drkRyb7iOcfpyHUci/+QQbIpObbTt8UWf0FT3WvNykVq5Y6+uHe7OhZ4dM8h/P7WoVEVoUbLUURq9vcLUsR8Yt7mz5XmnbbC4xMwe9NnuOPFT7BgSyUOnq5vF5cCQMWTyGfkZxs8xCIceTx3rzRL1GuUggRRkpS1PnvTZ6ApCtvnjdTkcaFKmqGFo4N7phDve4PHryhT1zb7VLZNegaP7jmEh386GCv2HYE3QQtBOxqt+dvQ5/DhIzdhycRhuvUW8njn+mCz1vlGr26+Lkkgxu+SBOTn2rF5Til2fXoKv/npIOTYTchNMWHskJ6Y/sd/KjUj943Nh93EoNHrQzMnwMcHakSmOvJw1+gBmP7Hf2LM0x8onLQYRrbWefyYFrzegi2VoECeKMLo+E23T9D8rLW9QC6AjoUTTU6ICBTE1XN++HhJoyD/0KtfBUbbTyjA7oWj8Mr8MgzMsiUL4roZEvJpCjrdNOkWI/yCiEduGYSHd5NHpS7b+y1emFaiBDdyZ83mAydRXefBmToPlrxZpYzHkGVQ3T4BB0/XK+MjQsdIJUqFbVKqP36gZ8OxdCkmn2cSVzL05OF7p1lwRqfzmxcSJ0kx6Mj/G5SiOAn/8YqauPyPV77ELoK8PcvQxGvJBR+dhUQKtDvCJyfRPeDjBeKYzMdvjT+77SzEOr6nNYTGK+4QBR0ZkfyzJIFIPu2cX6ZLZl2VZYWb4yP6OiNDQ6AoLNr2mYY4eW3hKIhhxQo+XlSKnmT1uBX7juCZX14PK8tAECTNiNfL2RwTy57QndDR6mZ61wOAlW8fVjpHw3O/2mYfnC5ONcZBPkiY/sd/qnLOxRMKcKbeCwCwsgzRhvsFC0lC9+wB2TYsmTgMK/YdwcIxA4mfk6agslE5l537o6tbvS/JvCKJKwnhMerB0/VYse8Idswrw9l6D7JTTFi+91sA0CgzrK1woG+GGY1eXokXivumY8XkQjhdHNa9f0zhm/TG+UiSBEmKn7UVOlKIF4A5mz5Hjt2EZbdfh6syraAoSjdWlmMD2VfJyMuw4Fy9R+WTsiOpHglkhaBsO4tZGz9TuL2VkwuRbWexcnIhNn50AksnFRKvpwe9GCfbZkK2zZT0gVcI9OxAz875KzTO6k5ojV9JNHRkvubX8b+8IHaYD4wmV+kq3qqrEKtNJhK/drkRyb7iOcfpaN4lVkQqytKzRbW/kLrVmpXvh178fr7BiyYv36pCW7Qchd7fZdpYrHv/mHJvYxkvGgs6QymedEZe08RF/PyxrFGiUM62L7BrfplKpevlAycU7kaGXDjqbCKrQ9c0tYzS/OJkLdZVOJSiUb3Co0tBHihRbb6jEYtqpShKaOZ4HDheGxhbO3EYBmTbYDUxyLaZlMJEuWhu40cn8Oubr8XmOaUAgKM1LoXvcnFkntntE3C+0avUerx84AQeGJePbLsJ39e4lAkNMhe8eur1KoU4Wclu5kufEjlpUWwZNRzuT+tDlNdl5GVYYGRoot83GWgVBxjNXpBlY+HRUUrUW8dX8oQIUZTQ6OVQXcfB2cQhxWwg2o2RobHkzX8lFeK6MRLyiRp15kTXe/ywsQwee+1fukHM36pq8B//3uJAAWDnp6dw2/W9ceB4LTJtLNZOL4GFNeDpqUX4odaNfllWAFCIQnm2uIGhcK6hZUxr6HvFa2KSlOqPD9A6NhxrUpZ8nklcqYgUaMsdpprD4jaSHq2Nx+4McLyIP39xRjXWb8OHx3Hf2GsAAIKkU8RFqMGQJAkWllGNg7SwDKROVqhIpEC7o3xyEomPRLLbzkJnEMdyvFJdRx6NQLq2XFS2akoR6j1+rHv/mNK9CgBW1kC8VpPHjzSLEUsmDiP6OlGUYKAp+IPq0PJ1gYAfFQG4fYKqyG3n/DLdoic9Ym7nZTwojWVP6E7IsrHYPKcUp2rdyv7WL8va5oME0sHE+goHUs0GLJ1UqKwDHy8gx25Sij38gqiR2F9X4cDSt9TNWQu3VmLLnFI0ePzwBDtB8zIsgTEOYwYi3WKE2yegwePTFGD+z9QiZQyhTAyGFuosLy/E2XqPqiFMvr7bJ2B9hb5Ck4xkXpFEIqItcTprYIhxn9cv4KFXv0KO3YTHxg+G2ydoCrPv2RoYR2I2tsQLB0/X45Hdh5TDK5oClt1+HXqnW3T3vFRL/MQVsu9b/fcjAAL7rl8QkZ3CotHLY+HWSt0Rsz1SzXjn1zfifIMXf/7iDO4ZMxC7F45SDvR2VVYrf/vnRTfofga9/I0Jjo/Kz7UrKpxOF4clE4dpDruiQWsxTtIHJi5i8QWRxqrqrdkrMc7qTmiNX0k0xJqvRVofHc2fkRBNrtJVvFVXIVabTPIU+mjNvuI1x+nqgj09m8pNNeP5/UeJtsiEcKe8KKFXmgWrp14PUZISfs3K90OvmKW22Ydf7fxSVwRF9rOiKGL9DIcyGlWvwIXEPbwwrUQVP++cXwYJ5CKu9q79zijKJBVaysVNkT5/tGtUb+w0L0rIy7DCwhrQK82MkqsKdddShsWoKqDbU3ka943Nx/PvHkVx33Q8On4weqSacLHJh2W3X4deaRbVMyjum46FYwYiy8Yi3crihWnFuJKODlqLt6N9lq35v3Cu75FbhsDK0pi6/hONEFJNE0e0sRSzAU+8cUwpZFs8oQD9sqx4YMdBOF2cShRJbsaaEVYARxqlu3DMQHC8iOo6N3qnWWAw0Bp/GlrYGsrZSZKE/Bw7di0YBV4QYWBo5NoD6p6x7gU0TaF3miUqfyOjq4uxuwocx6PJx4PjRVhZBtfk2nHR5SXaTe90C3YvHIUMsxEmU0KWTyXRChLyqVpNDO4fm68cQOVltMy8L3fkKYasF8SYDAxESYKzicOKfQHHd0dpP7w0azh2f/4DbhzUQ3Xt8DnnFpbBhUYOM9ccUHVCyNeS3yeZmCShBytL4/5x1ypjv+Sucyub7CxIIolooRdoG3U6Lo1tyFJk6e/wYLG1zrD2wmSgMakkMNYv9DuYDHTw92TyQv59KGiaxpr3vke5oy+sYOATRKx57/uYlRViRSIF2kmfnISMRLLbzkRnEcfR+meS75UJC6eLg9FAB57VjOGYt+Vz1bVsJgN4UST6ukjXldWheVHSjLSUIBGLkGga8PNdrzQZy57Q3cDxoqqAccPM4e26nslAaw7jMm3qETQWllGUyZU9Y3oJdi8cBbdPAAXA7Rc0ipOBwxkJWTYWS9/6FismX4cXphXD7RNU11o9tUhVrCnbZWgBztNvH8GSicNwdY4NkgT891+r4GzyadbX2ukl8PpF9Eo3Jwu9k+h2aGucnmEx4oFx1yrd/zKftO9f55Q1tGzvYaycUqSrbCCP+pHf2+niYGUZbLt7JBgKcHE8lrz5jWbvWDO9BLUuDixjQYq5s+9QdKBpCvk5dvzHv1+LOzd8ovqsz797FNV1Hnj9gua7rJxcqBwqPD+tGFOG5ykHCfLvj9a44HRxrcZSFpasEHS+MUBWH61xqQrTB+bakZfetqKNeD0cT6LtaIsvINmBkaawemoRHtz1lWpPZhn6io2zugta41cSEdH6stbWR0fyZ3qIJlfpKt6qqxCrTSZ5Cn0kci7clTEJyaaWlxfiqTe/wewbBhDvn9lA476x+SolpRemlYCmAuewibxmQ5tESNzP028f0RVBCfezNxfkYvvdI8EER7aTClxCi4I8fgHHalx44o1vcPB0PYr7pmNSSR/c8eInyLGbND66I9Z+ZxRlkgot91SejqloJxKMOvGYgaGjWkuiKOGo06UpRPzgcA1m3zAAdpNBVROwvLwQjV4//usvLUpjd40eoLKNVVOKICRoIWis6OgzstaeWTjXt2VuKarrPJom0T2Vp7F2eonq2ckFpqEN0Fk2FmfrPcrPHt1zCBtnjcAjuw/B6eIgSNqRp15/S2N3cd90jYr8+hkODOmZqvGnMjew7PbrYGRo1Hv8ePnACSwrL9TYoHwP27IXGAw0hvRMjXodd3UxdleA43icb+bQEKYCuHZ6icZu1lc4kGNNqsN1d1CJUL0/fPhw6fPPP1f+XdPkxe3BgjQZeRkWLJ5QAJahFWdJclRrpzuQbjXguf3fq7pWZTWJBo+f2EdExs0AACAASURBVGH/2qLRyA2yls4mDpPWfKT5myUTh2H2ps86tGiiKxSKriBcthsZbsNn6ty448VPNDa0c34Z+mRYL9fHSqJ74LLYcbgNxzPq3RxqmjicqfMqh9p9MszITTEh3RpbgKnn7zt7PPbZOjemEnzErvll6J1hxaVmDkfON2mIy0E9U5BpU3+urirsk987ij2sy2046ZOTCEUbYq8ut+FEQbT+OVKsbTbSiq+71Mzhq9MNsLKMoibndHFK52C4r9O77u6FZfALgCgFVOSeeeeokicAwLsP/RjL9n6rURV68rZhECWpy/1HLHuCDrosJm4POnqPjvZ60eSi4d2r8t88PaUIvdLMmP7Hf2Ll5EL0TDNjxp8+jZhXLi8vxOsHz2BSSR9Np+nLB05g8YShMNAUREmCmaXh5kRcaPQqnc8P/mTQZdnzuxgJacNJtA+x+gCeF1Hj4iBKEn5J8NtLJg5Dvywr7GYD/LwIiqIwdf3HutcXRQnnGjzw+kX8cMmNZ/cfxcIxA5FmMeI3r36lURPomWrGs/uP4sDxWrwyvwx56j2iS21Y714unlCABVsqsX6GA3sqT+Phnw4GTVHK95UPFTbOGqHwcKGv3zK3FP8/e+ceJ0V1Jf7v6Z7umWZmYAaYARUSlSBKDAYHFTWbxZgYTViN0fhExRcYk3XXGB/5Jey6S7Lrc924JoLvRzSCr+iaRE2MbhLfIKKIIlFMQFEGZGTePdN9f39UdVPdXTXTPdOP6p7z/Xz6M9PV1VXn3j517j3nnntvdVWQiaMHTsyNxw3vbetMWfmzsTbEz5/+C2ceukfGBNRC+2LKkCiZDuerP/BJdy+t7VE2ftyd1MPJYyOMiYT4y5aO4fSzlBIzWHzFQcX5dYM9H/mMn3mRja9SyrhVKchBJ5OUS3yt2OTBFx6xJPqymz+xfMdEEomXLm7+pJtvLcnsG3vFX4ZIyfoT8bjhwx09bO+KUl8TYsuOzHpZtmB2xvM33H5Iuv27b8HspC8B1upUFxw+lSnNdURC/h0f9rLjU5vq2N7dN+zx7f7+OG991J4ysWnJvBb2nlBPVRZJsAP5O86cAudnt88/gDPveJmmumquOn5GMpHZeY5L7K8iYxPFGiNLPIfpfrjT30z42BNH19BYG6a3rx+RAMEA9McMVz/xVsok1UmN1laoFy1fnfQpZ05u4L9O3A+wVsEEOOXmF1Puef/Cg+mPx7n4gdc8Y3zO2ECijQ5VBejo6U9uvZpMBq0Lu8YSfejbVoQOR6P9tHZG6e6LM//2zLjr3WcfyNsfdTCuNkxTfTUTaqs1Ia5y8NThsvyF+/rjKQoMVhbv1OY6PtrRw7Xf2o+L7rcM3J3PbeCecw4iFjcERAgFha6otV81kJLtf+0J+zG+zn2P8K7eGPFak9wyx9kpSWx3s1tjhBd/8CV6Y3Fqq4Ns7eylrz8+5MZ+pDmE+cTvyYQDLferKMrwGF0Tpr2nn8ljRxEQiBuorhJG1+Q+C8lt6W+vmWH5xNNG2Ins3dEYVz2+LmN7tRtOmQm1qdcq5SyQclmJQW2y4qRc9LYcydY+e9neyWMjXHz/a0lb1x2NJbeUdLLPRMvmpds6t+sesuc4Wjv6UleKPHV/gGRiXHdfjLO/sCcX3b9z5ZBrv7UfAYGAiOtM4qoi9jtzaRMqiaG20V5+QrbX8/JFGyKh5P9uW5zeeOr+BESoqQrwy3NnYzDEPNqfT40bxVMX/T1/29aVXCFxwd/vmTLTNLHC4bfnfIZ//OUqbj59FruMqacxAjWhYMrWIWAFMf3qGynKUMjFBjgHUa71WAEuffWxeNy4bqkcDFjX29HbhwBVQUnZ3viaE3Zef9XGtuQKZw+ff0iyXYn7rI/pVZeJGfB7jh/FJUdaCXGt7b0Zbe+ocND1+4IMmhAHVt9r93G11NeEiPbHEBGCAj8+9nN09PTT2tELoCvjKK7ky2evrw7TFY3xmeY64saKIVcFIdofH5H9rEpisPhKJTPY85HP+JkX2fgqI231kqHopMYp3BmpvnA+CNhb9x6/5PmU4166GPfQW6/4S7kRCAjGGL5+/Z+Ti60kkmASyZbfvXdVchXkxBjtcPshTvsXj8fp6ku93qqNbZx5x8s8e+lhKTbAb+Of6eWIGWtr7O3dfXmRraoqwN4T6jO2nswmIQ6828Opdr/P7bOevlhypb70rTQT58RHQF8CijNGlsiH6Oztz7jX9U+tZ8m8Fs77xUpWbWxj8WPWCn6JFRYB/njJYYwKCf/05b1Yu7k9JXly2Ut/TUmIu+TIackJqkdMb2bR3M8mt2y9/qn1tHb0Mr4+zPeWrWbR3OlMba7LuvyC8KnGURnPQl9/nKa66pTrFGOccaQRjfbT1mNtl2r5dJmrAG7a3k1AhL0m1BMJBRgbCREOl2W6lJIjBfuVReQ2YC6wxRizr31sLLAM2B14DzjBGLM912t77Xmf2FJh5uQGFh+zL59priMg8G//+0YyM3jm5AZ+evLnk9viJAYUWjt6Wb+lw3NZ/A1bOwkFrSVvQ1XWOU111Rkr0V19/AwefiVzJv9Qktm2dUaTAViwHtRz71rhx8xhX1EOyYRVAXFf7tcn8ilKORMICLs1jMqLY+jV3hR6e+ygh40IiiTlau3oTdlGaCC5NHg2MGqTFaU4ZGufvWyvMdBUH07aOq/zIuEqV5vndv55c6YkZ++B1d/+9j2vcNdZB7J85SYmNUYYFQ7y09+/nRJov/XP7/KTY2dgjLFX69r52Z3PbeDH3/hcXutuIHJtEyqFobTRA/kJkXCQ2+cfkLHyYPr1Er5g+n37YnE6o9b2CoktThfNnc642jC7jKmhuspKZnt3a2fST7RWi8q81t+27Vx16rKj9qYrGqO+uor5D72ece6W9t4MPzE9WO5330gZ2Qx1QCcXG7Cloze5qkBbd5972xEKptw3MbDz0PmH0NUbY8PWTn70qzW0dvRyx5kHsL0zyoXLrSB54nqrNraxua3b9fpb2ncmdoWC/trSy60uj5jejDGw+LG1XH38DMbXhUGErmgs41y3Y5MaI4yqDmZtZ7z8lfG1ZsQkSShDI18+eyAgTKiPZNijbZ3REdnPqiQGi69UMoM9H/mMnw0kQzbP0EiKW41kncw3I9UXzhe56GKu8ZdyJFFGpz//maY6/vZxV8rKxefetYLlCw9m4uiavPRDEvavtb2X97a2D3o9v/r4gYAwrjZcMNmqqgLs2hAZ0ne9fqeaUBBjjOtnWzuifKa5lnvPOYiYxzkyQux2McbIEvkQTv86QWtHL8311dy/8GD6YnHeae1MThRNyFITCtBcX0NTfQ0PnX8IPX1xggK11UG+sf9kfr3mIzZt7+aCw6cmY3IzJzdwxiF7cPLNL6RMaI0bA4Zk+7L0tBbP8g+0SmL6dqlXHz8jYxX0SDiok1jzRDTaz9/aumlt702u8Delqc5z3G9cRLdLHWkUMhJ3B3Bk2rHLgKeMMVOBp+z3OZPYo3lSo9UAJrJ9H1y5EbAM1cQxNexmN5DOpTJXbWzjn375KuPrwlx0/2oW3r2S1o5erjxuBkueeYfrn1rP1cfPSLn2td/aj+ufWs+m7d0c+/Nn6ejp5+bTZ3HB4VOTCXFgDaJd/MBrnPvFPZNGNXH83LtWsK0zmlM5S7VCUbnjlUyYa/0Xkkg4wI3zWlL07MZ5LUTC/gqOK0q5knAmd2scRVN99ZA7km7tTTFWJ6iuCvDzU/dPue/PT92fanv2U6nkqlTUJitK8cjGPrvZuCuPm8HVT7zFBYfvRaO9IleuttDt/GBAXPvbwYDw++99kbvPOpAd3X2c+3d7svixtZx40wssfmwtF35lWvI+Zx66R8pnZx66R34qK0tGapswlHJ7+Qlt3VE+2tHLokfWJH/Hy4+ezi/PnU20P0Zre29yZaeqgGT4i1cfP4NgQFjyzDvJzxKzVyPhILuMiRAHNn7cneInXvnbt1x9z8baUDL4dtH9qxlfFyYYkGQgznnukmfeSZbFzU8sB99IGbkkAsjH/vxZDr3yaY79+bOs+6g9q5XUcrEBfbGdKzwmVnLM5nuBgCAI8259kTPveJlVG9vYtL2bjR93c+Hy1SkrQyaud9fz77n245c884618tq8FsJV/gpyu9XlD47aJ5lIOHF0DZu293DFb9+ksTaUYbfG14W57oT9Mup0fB62LcuXX6dULvnsB7np20jtZ1USg8VXKpls9LfQdlafoUxGsk7mG9Wv4ZGLLo6EunaWMeHPBwOS9APAWnRl0dzp9PTF2LS9i4aaqrzVS7Q/xvVPrc/wVZbOa0m5np99fL/K5qW/E0fXsMuYSMZnN566P3tPrKO6Ksgpt7zI95atzvhdrjthP0IjxDcpxvOfyIdw89evPG4GP/rV67R19zE6UkVNKJBcTfyI6c3cddaB9PZZsTuA5voaPjV2FLs1jqJhVDXT7FUGHzjvYCaP3Zkgdd6cKRk5Ht++5xVqq0P852/fTMrhjPUlZFp6WgvxeJwPd/S46vyWjt6M4xc/8BoXHD41eY27zjqQj3b0DikeoqTS3x+ntTOaEndd8sw79MdjGe3cjfNaaKrVhLiRSMF+cWPMH0Vk97TDxwBz7P/vBJ4BLs312m5LajdGQvzk2Bn86z+kZtOmZzAn9mAfVxfmvgWzicUNb33YnpJVfNXj67j7rAPZ1hllbG2Y9p4+Wjt6aevuY9P2bk6/7SUe/e6h1Fa7bxHhNbiWazJbqVYoKnfKIZmwvSfGM29+xL3nzsYYg4jwyCubOHrmbozVpb0VxTeUaguHzmiMXzz/V26ffwBBezn7m//4Lt/50mcYX0K5KhW1yYriLxI2bvnCg/mgrZttndFkX33t5vbkali52kLn+V3Rft5t7SQowhHTmzmuZXJypbcHV24kGBBOveXl5Gy+2+bPYvnCgzHGpNynu899u5afnvT5otfXSGsThlJuLz+hOxpLCVY11VXTFY3xnXt3zhZNzG722qLnsqP2prWjl1HhIFd883PUhII011ez6xhrO8aevljGFoOrNrZx1ePrWLZgNgAiwuWPrqG1PZq8flc0RkCsvkB1VSC54nlXNEZ1aOdghZefWA6+kTJyGc7q+LnYgJBjR4DEyg+Lj9mXKU21RMJVA9oOt2fI+Sw7V5LYe2I9b33Yzi+e/2uKjWgcFeKHX9+HsbVhlr30V047pLjJ04PhbHf7YnHe+rCdODu3GYkZw6hwkCfXbqG1PcolR07j7rMOJGYMkVCQUJVQFQjw0PmH0NcfHzHtkOIPCt0PGqn9rEpisPhKJeMH/fWDDH5jJOtkvlH9Gh656OJIqGu3Mhp2rhCW2FbVuXPY0tNamNbsXi+5roidWPkw4Vsk4gG7NNSkfM/PPr5fZRtMfyeMrk7ZXe5fHnmD1o5e7j33IDZt72bT9u6U36V5dDV9sTgjZKG4ojz/bis1jqsNMyYS4pIHXkvGhJctmJ2Mye06poa4IbkDiNfKhIGAMHF0Ddu7rKSpxDPdEAm56msoKEnfN/Gbx41h+YLZGCAWN/z412t5cu0WHjjvYNdr9Dsm5jmPT2mu49lLD0val9N//pzuFjhM+vpirNvSkRF3XbWxjR88uIbLj57OsgWz6Y8bqgJCU21Yt0sdoRT7V59gjNkMYIzZLCLNXieKyAJgAcCnPvWpjM/dltR2MxKJDOZz71pBU101lxw5LWVb03vOOSi5N3yC1o5e+uOGWNzw121dTJ1Qy5XHzeCaJ9ZZ96mrpjsaIyjuywvH4u5LqeaazOaU3WnQK2X2RaH2vfdLMuFAOlwVEJat3MS1v1+fIuM3WyYVVUZFGYjB7PBIoRRbOFQFhOfe3cbylZuSxyY1RvinL08tqVzlRrY6rDZZ8Ssj2Q4HAoIxhuOXPJ9yPD2YlqstTJz/0SdxakIBevpj/OPhe/FtezWcxIwxa9bvzqDEWXessLe6HJVyvaqAuG7XEixyYNjPbUIh9dir3F5+hpefEDMm5dh5c6a4rvz98PmHem7RM6kxwkPnH0JVQOiOZvo3QY9tB1s7ehGxAnTxuOGHX59Oa3sv2zqj3Prnd/nOYZ8hbgxrN7ez6JE1GbIvmjudxY+t9fQT/eIblTMj2RYXmuEOmmRr+5rrqlkyryW58llrRy9NdtJq1SCrsrg9Q+nPcmIliTvOPDAZX0r04yc1Rli2YDbj66qJGcP2rv6iz/zORocT7W60P87ix9Zy91kHJsvY1tWXLPOqjW2cfPOLgFW2xcfsy14T6mhu9GcbpFQGQ4kR5xM/97OUwckmvlJoStmX8IP++kEGP+EHncwVP/eHVb+GTq66WO51nW2fuKm+2hFTiHPPOQfxk1+v5biWyRmrSi28e6VrAstQtjh1jgcvvHtl8jsNkVQ/388+fj5kK9S48UD62x2NceYdL2ccd+YArNrYlvxdEnGYxATHYlHJ/Qmn/if86yuPm5FMiAPrmYulbWvqzO8YLKmsqa6axlGhZGygrbvPVV8Tk+oSv3ni+L3nHERdTRVH3/Bs8jt9sTi3zz8gmVC55Jl3aO3opcoxMc957UgomJTt/e1dvkwiLST51OG+vhitnVH6YnFa23sJBYW+mMmI1Xzn3lXcc85BjK4JMiaiq8+PZHy7JrMx5iZjzCxjzKympqYhXycQEKY21bF84cH8zykzMwY3fvLrtRlLJy6Z10JbVx9X/PYtFj2yhraufl5572NWbWxj5uQGLjlyGife9ALfvXdVxpKZ152wH13Rfm5Mu+ZQktmc2dfPXnoYD59/aLLTFI8bWtt7eX97V8p2PuXCcLZJGQy/LOU8kA7XhNy36qsJ+faRVEYg+bLDSu6ojcgP2eqw1rfiV0a6HU4E05zkK9AXqgowKhwkFAwkE+LAXir/FysZFa7imYvn8MdLDuOiL0+1V4Mmo/+t2y8PTrH1eCA/w8tPqAml6prXbNFof8z1GkvntRAJBxkbCROLu8sUCgYY67Lt4NJ5LWzt6OX9ti7Wt3Zw6i0vcvyS51n82FouOHwvxtaG+fY9r2SsMpeQaZ+J9Sl+YuJ+CV01GO6yk1ucZa6UiVbFYKTb4kJSSDsPO5+Fj9p7mNRYw/KFB/PHi+ewfOHB7D2h3jUhzvn8fNzZizGGX5x9ELfPP4CZkxuY1BhhbG0oI+Zz5XEzuOn/3sk4fuO8Fv7tf99gzjXPcMZtLzHv4E9TW13cAatsdThcFeTZ9Vu4cV4LWzui/PSkz3PE9GbqaoJMHhvJsF83nro/v319M7HyCkcpZYjaYWU4+MHfVx1WnPhBJ3NFdbgyKUddHA7Z6nF/f5w3P9zBsT9/li9c+TSn3vIiFxy+F9N3qR8wgcXpR6RvqdhUV82Hn/Swqc17PHeg8WAnfhn/dGO4sg133Hio4+ZefmkkHGTpaanPyJJ5LTTXh2mqK36CaCXb4nT9X77wYO58bkMyIQ6s+q8JBZI6NlDsDqxn+YO2bj5o6+LNzTv45o3PccgVT3P9U29zzzkHMX2X+oz8kKXzWhgVDmT87lceN4Mf/3otPdGdE/tmTm4gIMKiR9Zw4k0vsPixtVxy5DTuOPMAqkMyaCwuVBVw1bvQABP3yj03JF863NPTzwc7evigrZu3Pmznly/9lbqaKsbXhV23ut2lvprG2hpNiBvhiDGFe2Ds7VMfM8bsa79fB8yxV4nbBXjGGDNtsOvMmjXLrFixIuVYttnizoz8a7+1Hyfe9ELGOX+6ZA7vt/UwYXQ1723t4vqn1tPa0ZtcHa61o5e7zjqQi5av5oLDp6bM0E9sxzqluY6aqgA7evqYf/vLNNVVc8HhU9ljfC2jqoOMr81f9ulQZhn4jdZ2a5/s9CzpfC0LmqV+FK2y0nX4o0+66errpyoQJGYMQRH64zFGhaqYMCYywJUUJYOi6LGbHVYKx0efdNPWHSUYCBIQiBuIxWM0RMJlZSP8ZIsH0mG1ycowKbkOVyqD9XmHM3s0Hje8t62TYED4+6ufyfj8Dxf9PV+69v+SQeH3WnewZ9NoFjpWlEsEM7Z39WbY68ZR1TSPrslzjRSMkvWJC8Fgfoab3gApunb7/AOSPt/MyQ2cN2cK42rD7NoQYaL9u27t7KWrN8aGrZ1c/9R6murDXHD4XsmVqBI6MrWpjvWtHVz3u3Wcf9hn6Os3NNVXIwKbP+nhyt++RWtHLz8/dX9u+MN6nly7JUXuu88+kMOu+b+MGbDp5Urg9dxMGF3tuoJdhVBROjySKGRsYyjXdn7HbaeBJfNa6I7G+I/fvMklR06jpy+eMSP86uNnsKOnn13H1DC2NszWjigf7uhhyTPvsGpjG5MaIyxfeDC7NqT0MX2hw/G4YdP2Ln7867VcdMReBAMBwlUBtndGaawNE4sbAiL2C+56bgOzpzTx2d1G01xfNm2eUhh8ocPDpVArkyilJYf4ivp1SlEYSsyvXOJrSvkQjxs+/KSbaCxOMBAADL39ptTx55L3JxL94VNueTHD9/7lubM5+eYXXH3ycbXhFN/jgfMOTu584Lbt6nB9Hj/3WYYjW67jxs57haoCdPT0D7qVppfMXr5jLBbnw/Ze+mJx/rptZ/7AdSfsx5TmOsbWpshVch2uFAb6TYDk737iTe7PZGMkxFsftXPeL1YmV/ZLP2/xMfty/VPrufSovZkwuob37Phea0cvyxbMZs0HO2iIhJL+/qqNbTz9/Tn852+srVO94nRXfPNzXPbQ666xOKfsARG2tPfwnXtXJct49fEzmDaxPl2vBq2TPD7/vtfhnp5+PursTe6y8eDKjZxxyB7c+dwGzjx0DwAmjqlBRAiKMKFOt0sdYXjqcLG14FHgDOAK++8jQ7lILg/+ts5o8jyvpTD/uq2LCaNrOO3Wl1I+u/TB11g0dzoL717Jx51RbjhlZsbWOqs2tnHmHS/z7KWHISLMv/3l5B7jZ97xctIA57ND4iwTlOc+04XeW97vSzkb4MNPelMC7FcfP4M9xqthVhQF+uKGs+9cmdFe3VfkJbmHQzklcKtNVhR/4pwlmB5MG66NCQSE3cfVsvmTbs+l8mHnynHLFsxOCbQk+t/3LZjtaq+LvYWCspPB/AwvP8Gpa5FwkJtPn8V1v1vHGYfs4Rq8FoR5t+4MlC89befWjIl7nnvXCpYvPDipp63tUa46fgYGw7xbUn3P8+95hUVzp6ckxW3a3p3crmPJM+9w5XEzMmRJn3G9tbPX01fcLW37X0UpNQPZ+eEylLiJ8zuL5k7P2GngvF+s5Pb5B3DZUXsTN4ZR4SAX3b86+UxeedwMrnp8HQCXHDmNb9/zSspn1zyxjlUb2+h3W1LSBwQCQjAgPLl2Cxd/dW/m3/4yd591INF+w/vbuzP6ynP2nsDEMRHGuwTsFaXcKCf/VcmNSoivKJVFrjqp9knJN2465VwgZCTbx22dUba097rGFNq6+/jZKfvznXtfyfDJ032PbZ3RZKzpvDlTMrZdHe54rp/HP4cjWy7jxm56fPXxM2iqq06O0WdbzwP5pds6+/nLlo6UxXIALly+mofOP2RI5VQGZ7BYQWLSa2LL1fRn8sMdPckYndeKclOa67jhlJmICCcsfT7lnLc/6nBNeHtvayeL5n6Wkw/8NJ8eN8r1uqFgwDUW56az135rP64+fgYBEdq6+7jq8XXccMpMqM2sk0rIDRkO8bihrTvK5raelEnrVx43gzuf28BxLZMJiHDR/atZOq+FpvpqGiMhQqHSby2t+IOCrYMrIr8EngemicgmETkbKxnuKyKyHviK/T5nvB78bZ3RjHOdjWhiICF9yctrn3ybjzujrsarIRJiUmOEbZ1R+uOGUCDAEdObk+fMnNzA7fMPIGYM3X39Rdn/eaCOQbksnVnobVL8Tn/cZATYL37gNfp9+nspilJcYnHjauf9atPdyKWtLjVqkxXFvySCabs1jqKpfufKy/mwMYGAEAjg6h8EA7BswWyWntZCU101/R522cte61ZypcPNzzhiejMiMqCP5NS1hkiYCaOr+dd/+Kxr8HprZ2/S95s5uYGlp7UwtbmORXOnM3NyQ/Kam7Z30x+LJ7+/amMbH3dG2bLDPcienuBmbQ1hbdfR2tHLNU+sY/Ex+/L09+dw/8KDmdpUlzIQFo8bunoLO/lIUfKNl50fLkOZiOf8jlfg/JPuPk686QUufuA1qkMBrvnWfvzx4jncddaBye1dLjh8akbf8tIHX+O8OVOY1BihKujfLanCVUGOmN5MqMoK5McNNI+udu0rTxxTQ3VVQAfklYqgnPxXJTcqIb6iVBa56qTaJyXfuOlUoq860u1jtD+WTGhzMqkxwgdt3fzs6fXcceaBGT55uu/hHIsebIvHchnTLQa5jBu76fHFD1h6nMDS53hW9evll0b7Y4wKB11/w75+f052qhQGixUMtOVwnyMWl1gwycmkxgiRUJDdGkdhTGa7fP1T67njzAO4ff4BLFswm9vnH8ANp8zk+qfWE4sbFj2yhvVbOlyv29bdB2Q+5+nbKm/a3s1F969mvGMr3qb6MJFw0FVnhxLjqBT70tPTz8btXXzS3ZdMiIOd7ddxLZMZVxumeXQ195xzEHs119E8ukYT4pQUChaJM8acbIzZxRgTMsZMMsbcaozZZow53Bgz1f778VCuncuD72xEV21s45on1nHHmQfywHkHc/v8A5JBSy+j2BWNceVxM3hw5Ube+rCdby19ngsO34sjpjczc3IDlxw5jUWPrOGLVz3DO1s6i5LoNdD+5sPZb72Y+Hnf+2IQ14CMoigDEAqIq52vKqMBp0KvCJpP1CYrSvmRNxtjhDuf28CiudNZtmA2i+ZO587nNtAXM5x40wssfmwtlxw5zdMuh4MB1+M1If8mPFQ66X7GEdObueDwvThh6fNZ+UiJmZtH3/Bscnaxk03bu+nqjfHOlk6OmN7M9786jcWPreVL1/4fix9bWrGvmgAAIABJREFUy/e/Oi2ZGJdIfnHqSFt3H13RmKveNNdXZ/hHHb39/PT3b3PFNz/HtSfsB8D3lr3Kt5Y+z/rWjpRybOuMsmFrcXxSRfE7Q5mI5/yOV4woMQC9aXs33713FbuOqaEmHOSK377JcS2TWbZgNp/ymDE+cXQNN85rYVwklI8iFoTGSIhLjtybqoBwxPRmakIB4i6DBJu2dxMQ0fZOqRjKyX9VcqMS4itKZZGrTqp9UvKNl04l/NGRbB/DVUEeXLkxY/Lkz07ZP7lF38X3r+a0W1/krQ/bkz55uu+xamMbdz63geULD2ZSY8TTL0nEH8phTLcY5DJu7KXHDQ5f64jpzWztjA6rfsNVQYwtixPnLhNK6fBKnAs5YnFuCyY59cotdtBUH6a3P86iR9Zw4k0vsOiRNfT2xWmqD7Nhayebtnd7LsS05Jl3ku+dz/kHbe4xxsTEOysGvTcffdLrqrO5xjgqxb709vbzl22dnHrLiwNOMm6qr2Z0TRWTG0dpMpziSllabBF3x0Eks7OW3oi2dvRSFRSOX/I8lzzwGmccskdyO5qrj081Xjeeuj81oQB3PreBMw7ZgyXPvGMtd/uLlVx+9L7ccMrMlNmy1z+1PuMahUj08uoY9MdN2cwaGiiDeyQQ9HB+gyOk/IqiDEx1KMCSeS0pdn7JvBaqy2jQKZe2utSoTVaU8iNvNkYMZ39hTxY/tjYZgDj7C3uSuExipqkEYGmaXb759Fk01VW79st1K7nSke5nXH70vq7bmnr5SM7Zxl5JMRu2dnL9U+u57Kh9MlaSc64GdfPps2hO05FX3tvG5LGRTL/xtFnsOiaS4h9NGF3N6be9xJNrt9AZjXH6bS9x5h0vs2pjm2s5ov0xrn9qfUZQbum8lhEz+UhRErjFTZbOayEYwDMI7PyOW4zIGeAGe4vjgDC+tpoLvzIt2ZYY4z5oMq4uzGOvbmJrl/9iNAm2d/ex8eNuOnr6+MHX9mHj9m7e29rl2VduiKhtUSqDcvJfldyohPiKUlnkqpNqn5R846VT9TWhEW8fx9WGufAr05KTJx8472DuOecgRoUDHNcymWueWJf0x0eFg0mf3M33uPAr05g4uoZdxkQ8E710JchUchk39koQ6orGkv//8OvTWXh39vEgN8bVhvmUSwzn6uNnYEx5JReNJJrrqpNtbSJJ9a6zDuTh8w9h8TH7MmH0zgQ6t+f3Ry66c9H9q/nh16dz/VPrgZ0LMS2aO50/XjyHe885KLkQk9tz7rUKZU9fLHmPjR93c+7d7jYh18WGKsG+xOOGrV3RZFzXK07bVF/NhNpqxtXVjJg8EyV3qkotwFAIirXNUWIAIhGcDLroefq+05FwkI6e/qQhTBiscbVhJjdGuOKbnyMUDNAXi9PR28+E0TUpnR2wDEeisXNmpK7a2MZVj69j2YLZABl7XOcLr720N3/inmXs11lDft73vtCIhw6rL6soCkB3X5wVG7Zy77mzMcYgIvxh7Wa+/NldSi1a1uTSVpcatcmKUn7ky8bE43Drn99l0dzpNERCtHX3ceuf3+UHR+2TPGfT9m56onHG1oYy+t9e/XJ1wEuL0894f3tXTj6Sc7ZxYtanU8+WzmvhR79aw6qNbXzS3ed67X0mWjqRriPxeJytnVGuevwtzjx0D+4++0ACIkRCQcbXVWf4R07ZB9tyBSz/M7HNakKnu6IxdmnQoJAy8kg8ew+dfwhdvTE2bO3kR79aQ2tHLzefPst1cMUtfvTQ+YfQE43R2x/n6ifeSsaFYOesbOe9evriVAXc26jtnVGW/uk9Tpm9e5FrI3ui/THG14UJVQVp6+pDIJls6yzPknkt1OjWqUoFUU7+q5IblRBfUSqLXHVS7ZOSb7x0alQ4wJ/f/mhE28dEv/4nx85IxniCATj6hhdT/PFJjdYWiQmffLDYkNdnuhJkJtmOGycShBJJP4kEoQmjq3n20sOIxQ3bu9xjNrnUr7UVp+Gqx9elxA6venwd/33S53Mun1IcqqoC7D2hnvsWzOZ9O5nqouWrk/78s5ceBrXWuW7Pr9ezGRChtaM3eWzVxjYWP7Y2GQP8ybEz+Nd/cH/O3WKMVx8/g0g4yMzJDaza2Oa5VW82diadcrcviZXunHXiFaedUFtNTU1ZpjwpRaQsNSQQCCQz9RMN0J3PbeAnx87wOH9nI9ra3suPf702+dAkDNbS01oIVQW47KHXU4zE7fMPYPFjazM6PInlKCc1RlI+a6oPIyIFzxB36xgkMuO9ZFV8hMFVhy//h8+WWjJFUXxAKCBM22UMp9z8QkoHOVRGg065ttUlRW2yopQd+bIxkXCQMw/dI7n6c8LefrijJ3nOpEZrC0yDe2BuJE/0KAdy9ZGc5ycmUS0+Zl+mNNcRCVkB8UQAbEt7r+u1I+GqFJ1I6Ehre29ypumTa7cwc3IDFxw+lSnNtclZn85gllOWxGzIgcrhDAovvHtlMiisKzkpI5VAQBCEebemDmKde9cKHj7/0KxtuhVHsnYaWLu5fWfw9bSdqzAGAkJzfQ0AH7R1u7ZRx7VM9v2WVOGqIJFwFX/b1pU85pZsOyZSpQlxSkVRVv6rkhOVEF9RKotcdVLtk5JvvHTq5AM/zbRdxox4+5juD8TjJiP56srjZnDNE+tSfPKBYkNen+mY7tAZKEGoNW5tP7lo7vS81G9VwEqEWnj3ypTr+NmvU6zEuOqqIBfdv3pQHUh/Rls94n3VoYBrMmYwAJs/6SZcFWSXMRHX2F76Qk1jIiEueeA1Wjt6WTTXWpmuKxobUGdziUGXs32Jxw0f7uihs7ef0TVVrnHaPZtqCQcDTBitE4GV7CjLpLjEErbpRiebLWGi/TGeXLuF1vZoSqdvfG2YhkhmZvnksRGuO2E/Lly+2vVezvOPmN7MBYfvxQlLn085d7jbgsbjhm2d0UEzf70y40uxVU62Mo9UamsCXHD4XsklPxMzrWtrRu7S1Iqi7KQ6JIyvr2bxMfsyKhykKxpjfH011aHysaPDaauLjdpkRSk/8mVjGiJhJoyuybC3Vz/+FrBzK5uaUIBGTSwqS3L1kdLPb+3oZeKYGiY1WEEtZ0A8sb2iM6ly6WktNEZCQKZPFI/Hk8GomZMb+P5Xp6XMbkz3HZ2yuN0rvRy6cqGiZJKP2dGJNue63+0MYDfXV7NrWrA7QXNddUbf8srjZnDncxu4cV4LTT7sDycYVxsmGotRX1PFxDHVdEVjyZhYItn256fuD6AJt0pFUU7+q5IblRBfUSqLXHVS7ZOSb9x06sZT96enL05DbUjtYxpuK1Bf88S65ArUw3kW/TSmW454JQgNtDJXtvXrjOdUBQPcfuYBnHn7yzufGZ/7dYpFLs+Y8zePhIOu3xtfW8342upk3C1UFaCjp5+jb3g2q9heYqGmK4+bwSUPvJZcuW5KUy1HTG/m0+NG5c0mlKN96e+P09rRS09/jPe2dnH9U+s569BPceO8Fr5tx1daO3ppqq+mriZIY6RaY55K1kg57Hk9a9Yss2LFipRjQ026am23MsTTM2MTs4TTr9sYCbGjt4/uaIyYgZpQgPG1Ox8y5/kikkyIc7v2UEgsD5lutLwS7fyQjJarzCWkaMKk6/CWHT309PcjBIgbQ0AEQ5yaqiqaR9cUSyylMiiKHrvZYaVwbNnRw46eKBAgIBA3AHFG14TLykZk2SaVXIfVJivDpOQ6PFLJV783vT/fFe1j48c9yQGK5vowTfU1lWwPStYnLha56spg5zs/r60OsqO7ny3tvWzrjPLgyo1c+JVpTG2qY31rR4pPtPS0Fn76+7d5cu0Wlp7W4roiebrvmB6U648b+vrjmvCWSsXrsDJ0BosBZUuudqS/P86Wjl6isThBEcDQ22+AOI21YcbVprQpvtHheNzw5oc7kqtaHjG9mcuP/ixxA7G4IRgQRoWDNETU/igp+EaHh4MfYqpK/skhvqJ+nVIUhhLzK5f4mlI+JFbhceurljD+7Pv+RCH6Ctr/yD9OH3Dm5AbOmzOFcbVhdm2IMDGLVaXcxrivO2E/+uPWuEFbdx8PrtzIj4/9XHK1cBvf6/BIJJtnzO03v+usA6mrqRowBpdtvCEhQ3dfjHe2dHD9U+uTCXGTGiMsPmZfmkdXM63ZyqPIl00Ygn0pmQ7398d566P2jMmF1zyxjrMO/RT7f3oc/XFDVUCojwSoC2tCnOKKp1KU5UpxMPRtigbLjHW77tiq6uTe0gPJ8f72rrzvz7ytM5qUNXG9XLf6KDa5yjwSicbinHLzSxkN5X0LZpdQKkVR/EI0Fmf+7SvK3kb4oU3KBrXJilKe5MvGOK+zaXtXRdhfJZVcdWWw852ft7b3csotqVszrt3czvKFB2f4RAvvXsm95xzE2s3tNERCWfmO5dKWKopfydfs6Fyfxaoqa7LFvDT7kGxTPGJMpWZbZzSZEAfw5NotrN3cntzO5dlLD2NsrdokpTLRNrcyqZT4ilI5DEUn1T4p+SYQkIH7qoorhXgW9fnOP24rc918+qysEuLAfYz7wuWrkz5Rgh/NjResDEr+yOYZc/vNT7/tJR4+/1B2axzl+b1sV6ZPyBCPGzp7+2nt6AVISfxq7ehN5lHkyyaUk33Z0tGbTIgDqx4vffC15HP3wHkHU1td5ccFmJQyoWyT4oZKIbeUKcT+zPnY6qPYlKPMxSYWN651FI/7f+VGRVEKj9qI4qL1rShKArUHSq54+T59sbjr8WBAkr5ovn1HRVEyKeW2wuXYpnjZtIZISG2UoihlSTnaYqWyUZ1U/ILqolKpDNcHHMgnSjCpMUJQ83IqhqHmNeSaF5LQzWULZrNpezdt3X1c88S65KpxIzmPwiuOmohFZLvSo6J4ESi1AKUgkRm7W+Momurzt7xiIvt8UmMEIC/7MycMqhO/ByLLUeZiEw4GXOsoFByRj6SiKGmojSguWt+KoiRQe6DkipfvE/LQpXBVkKb6anYZE8m776goijuFigENRjm2KV42rSsaY+lpLWqjFEUpO8rRFiuVjeqk4hdUF5VKZjg+4EA+UeL/q4+fQSSsY96VwlDzGoaSFxIICOGqIBfdv5qFd69M2UZ1JOdReMVRu6KxnFZ6VBQvtHeTR5zZ589eehgPn3/osJdxLESiXaEpR5mLTVNdNUvmtaTU0ZJ5LTTVlccypoqiFBa1EcVF61tRlARqD5Rc8fJ9muuqB/SJCuE7KoriL8qxTXGzaTeeuj97T6xjWrPaKEVRyo9ytMVKZaM6qfgF1UVFccfNJ1oyr4W66iqWLZjN4mP2ZcLoGhoiOuZdKQw1r2GosT3No8ik2aNNmjF5tMZLlbwgxvh/KdxZs2aZFStWlFqMkhGPG7Z1Rou+1cdwKBOZiyaQmw7398fZ0tFLfyxOVTBAc101VVWap6rkTFH0eKTb4VIwgmyEL3R4BNW3kn98ocNK/hiB9qCkfeJKwMv3KROfqBJQHVZ8S5Ztiq90OB43bO3spacvTlAgEg7SEFH7pQyIr3RYUdLxky1WHVagYD6n6rCSMz6Lf2h/QvEN6fGcxkiI7d19g8V3VIfLmGLH8HwaM9S8CaXc8dThqmJKoQyNxDKv5UQ5ylxsqqoC7NoQGfxERVFGJGojiovWt6IoCdQeKLni5fuoT6QoSjm2KYGA0FxfU2oxFEVR8kY52mKlslGdVPyC6qKiuOMWz9H4TmVT7Biexgwz0TZJKSSaXqkoiqIoiqIoiqIoiqIoiqIoiqIoiqIoiqIoiqJUDJoUpyiKoiiKoiiKoiiKoiiKoiiKoiiKoiiKoiiKolQMmhSnKIqiKIqiKIqiKIqiKIqiKIqiKIqiKIqiKIqiVAyaFKcoiqIoiqIoiqIoiqIoiqIoiqIoiqIoiqIoiqJUDGKMKbUMgyIircBfPT4eD2wtojilvq/eO39sNcYcmcfreeJTHU5H5UjFL3LAwLIURY89dNhPdTQctBylpZQ67IYf6lFlKP39c5HBbzo8EH6o13xRSWWB0pbHL33iXNDf398Uuzyl0mE//W4qizvlIks52mEvKilmpPfO/t7F1uFO/PNsFwM/2bJCU/F94jKLTeSbSiuT38pTah32W334TR7wn0x+k8cPfWI/1YlfZPGLHOAfWbzkKKYOtwPrinGvHPHLb5SOH+Xyo0x+sMPDRfOGKv++A93bU4fLIiluIERkhTFm1ki5r967NPcuJH4pl8rhTznAX7I48atcuaLlUJz4oR5VhtLf3y8y5JtKKlMllQUqrzyFptLqS8tTnvipnCqLOypL8RmpMSO998i4b6kYSeUdSWUdjEqsi0orU6WVZ7j4rT78Jg/4Tya/yeMH/FQnfpHFL3KAf2Txgxx+kMENlSt7/ChTJTASfVQtc3bo9qmKoiiKoiiKoiiKoiiKoiiKoiiKoiiKoiiKoihKxaBJcYqiKIqiKIqiKIqiKIqiKIqiKIqiKIqiKIqiKErFUAlJcTeNsPvqvSsPv5RL5UjFL3KAv2Rx4le5ckXLoTjxQz2qDKW/P/hDhnxTSWWqpLJA5ZWn0FRafWl5yhM/lVNlcUdlKT4jNWak9x4Z9y0VI6m8I6msg1GJdVFpZaq08gwXv9WH3+QB/8nkN3n8gJ/qxC+y+EUO8I8sfpDDDzK4oXJljx9lqgRGoo+qZc4CMcYUQhBFURRFURRFURRFURRFURRFURRFURRFURRFURRFKTqVsFKcoiiKoiiKoiiKoiiKoiiKoiiKoiiKoiiKoiiKogCaFKcoiqIoiqIoiqIoiqIoiqIoiqIoiqIoiqIoiqJUEGWbFCciR4rIOhH5i4hcVuB7TRaRp0XkTRF5Q0T+yT4+VkR+JyLr7b+NBbp/UERWichj9vs9RORF+77LRCRcoPs2iMgDIvKWXfaDi1jmC+26XiMivxSRmmKVu1gUU4fT7uulz5eLyPsi8qr9+lqR5HlPRF6377nCPlYUPXPIMM1R7ldFZIeI/HMx6kREbhORLSKyxnHMtfxicb2tM6+JyP75lidLmUuiu/kgl/r2K6Vuk8qZwXRXRKrt9uUvdnuzu+OzH9jH14nIVwt0/++JyFr7+X5KRD7t+CzmsEWPDuX+WcowX0RaHfc6x/HZGbZ+rReRMwoow3WO+78tIm2Oz4ZdD252IO1zT1ubrzooNNnaaRE5XkSMiMwqpny5kE1ZROQE+9l5Q0TuLbaMuZCF/n/KtvGrbP0rSn+s3JA0/6icERefq9QyDQdx8eNKLVO+KWVfeIB+4Ij15RyylMyns+9fdn7dUPHSw7Rz5ojIJ456/5c83j9D79I+L0j9eulY2jl5K3cuOuXy3WH1WT3ufbXdVr0mIg+LSIPHdwf8fYZLKW1woRnOb15uDNCeVWR53ci1Dsql7ZAsxzFkgPiLX5AcxkbK5fcZCn5rEzzkyaq/V6g2xEOmZQ553hORVz2+m9c6Gq59lTKJexWCYvYxcmnzC21f/NIeiTUG/JKIrLbl+Df7eEnaEfFxe1ZMXR1EDt/1XYdrAwsoV076rWRHqXSwVHpWaj3K1i4W4L75iT0aY8ruBQSBd4A9gTCwGphewPvtAuxv/18PvA1MB64CLrOPXwZcWaD7fw+4F3jMfr8cOMn+fwnw7QLd907gHPv/MNBQjDIDuwEbgIijvPOLVe5ivIqtw2n39tLny4Hvl6Au3gPGpx0ryrM1wG/zIfDpYtQJ8EVgf2DNYOUHvgb8FhBgNvBiCX6vkulusevbr69St0nl+spGd4HzgSX2/ycBy+z/p9vnVwN72NcJFuD+hwGj7P+/nbi//b6jSHUwH7jB5btjgXftv432/42FkCHt/H8EbstzPWTYgbTPXW1tvurAD7pun1cP/BF4AZhVarmHobNTgVWJ3wJoLrXcwyzPTdh9XNv2vFdquf34Is0/KucXLj5XqWUaRllc/bhSy5XnMpa0L4z6crn8TkXz6ex7lpVfN8yyuuph2jlzCmWj3fQu7fOC169TxwpV7lx0Ku17w+6zetz7CKDK/v9Kr2d7sN8nD/VetvGIQv3m5fgaoD2ryPLmow7Kpe0gy3EMPOIvfnqRw9hIufw+Q6wHX7UJHvJcziD9vUK2IW4ypX1+LfAvxaij4dhXyiTuVYhXIfUjW50plX3JVWcKJY99vTr7/xDwon39krQj+LQ9K7au5kuPiyiTL/uYueq3vvytg6XSs1LrUbZ2sQD3fY88xB7LdaW4A4G/GGPeNcZEgfuAYwp1M2PMZmPMK/b/7cCbWAH/Y7CcI+y/38j3vUVkEvB14Bb7vQBfAh4o8H1HYxmTWwGMMVFjTBtFKLNNFRARkSpgFLCZIpS7iBRVh50MoM9+olh65sbhwDvGmL8W42bGmD8CH6cd9ir/McBdxuIFoEFEdimGnA5Kprv5IMf69iWlbJPKnGx011mHDwCH2+3uMcB9xpheY8wG4C/29fJ6f2PM08aYLvvtC8CkHO8xbBkG4KvA74wxHxtjtgO/A44sggwnA78cwn088bADTrxsbb7qoNBkW8eLsZyHnmIKlyPZlOVc4Gf2b4IxZkuRZcyFbMpjgNH2/2OAD4ooX1mQ7h+VMwP4XOVMuh9XaTpc0r6w+nJZU1SfDsrSrxsyZaCHxajfguvYMHzXYfdZ3e5tjHnSGNNvvy2Er5INZR2PGIxKiFdki8Y1hlQHvm87chzH8Iq/+IIhjI34/vcZKn5rE7KI6XhRsDZkIJlsvT6BPMe2BpBlOPa1XOJehaDYY86+8R380h7Z1+uw34bsl6EE7YjP2zPf9If92Hf1ax9zCPqtZEGpdLBUelZKPSpVvtIA5FzX5ZoUtxuw0fF+E0UKwNlLn87Eyr6cYIzZDNYDADQX4Jb/DVwCxO3344A2h9NRqLLvCbQCt9tLId4iIrUUoczGmPeBa4C/YSXDfQKspDjlLhYl02EnafoM8F2xljy+rYjLxxrgSRFZKSIL7GPFeLa8OIlUJ7UUdeJVfj/ojR9kyDel1LdhUYI2qZzJRneT59jtzSdY7W4+9D7Xa5yNNesuQY2IrBCRF0RkqJ3LbGU4zrZ7D4jI5By/my8ZEGv72D2APzgO56Mehipjudi/QeUUkZnAZGOM37eezKbO9wL2EpFnbb3wc8A2m/JcDswTkU3Ab7BWS1RSSfePyhkvn6sscfPjjDFPllaqvOObtkB9uQHxg08H/vbr8oKLHjo52N7W47ci8tk83tZN75wUo37TdcxJocoN2T1fxSj/WaT6Kk4G+32GQ8U8OzngB5taUDSukXUdlIP+5zKO4RV/8Qu5jo2Uw+9TKErVJqQzWH+vVL/R3wEfGWPWe3xesDoagn0dyXrsh7KX3L6Uuj2yt+Z7FdiClZT5DqVpR/zcnvlBVwfCN305v/Uxc9RvZegU9bcutp6VUI9Kla8EeYo9lmtSnFuWtSn4TUXqgAeBfzbG7CjC/eYCW4wxK52HXU4tRNmrsJacvNEYMxPoxFp+sODYTssxWAPhuwK1wFEupxb8Ny8gJdHhFAEy9flGYArweaxBrGuLJMqhxpj9sX7j74jIF4t03wzs/a6PBu63D5WqTrwoud74RAaF4rdJFUA2uut1Tj70PutriMg8YBZwtePwp4wxs4BTgP8WkSk53j9bGf4X2N0YMwP4PTtnW+Tr2c/lOicBDxhjYo5j+aiHwSikHhSDAeUUkQBwHXBR0SQaOtnUeRXWFqpzsFYWvEVEGgos11DJpjwnA3cYYyZhbUlxt/2bKXj6R+VMyXyuQuDmx9ltWiXhi7ZAfTlvysCnA5/o0XAZxB95BWtr0f2A/wF+lcdbD6Z3Ba1fFx1zUshyZ0uhy/9DoB+4x+OUQtqFinh2lJ1oXCOnOvC1/g9hHMPX5SH3frrfy1MQStwmOMmmv1eq32iwHRAKUkdDtK8jUo9t/Fz2osjmh/bIGBMzxnwea/XJA4F9BrhXQeQog/bMz7rqG/zYx8xRv5UyoBR6Vgo9KnG+EuSpr1SuAyybgMmO95Mo8LYsIhLCUux7jDEP2Yc/SiwLa//N93ZNhwJHi8h7WEugfgkrE7NBrO1ooHBl3wRsMsYkZvs+gOUIFrrMAF8GNhhjWo0xfcBDwCEUp9zFoug67MRNn40xH9nGNA7cTO5bAw4JY8wH9t8twMP2fYuhZ24cBbxijPnIlqkkdYJ3+UuqNz6SId+USt+GTInapHInG91NnmO3N2Owll/Oh95ndQ0R+TLwQ+BoY0xv4rjDVr4LPIM1+yRXBpXBGLPNcd+bgZZc5M+HDA4yVuDIUz0MhpeM5WL/BpOzHtgXeMbuY84GHhWRWUWTMHuyfW4fMcb0GWt743VYSXJ+JJvynA0sBzDGPA/UAOOLIl15kOEficgvSivSsPDyucoVLz+ukih5W6C+3KD4xacDf/t1w8LDH0lijNlh7G09jDG/AUIikpf2zEPvnBS6flN0LE22gpXbJpvnq2DlF5EzgLnAqcYY14B3Fr/PcCj7Z2cIlNqmFgyNa+RcB37X/1zHMbziL34h17ERv/8+eccHbYLzPtn090oxplgFfBNY5nVOIepoGPZ1xOmxAz+UvWT2xW/tkbG2q34GK2ZZ7HbE7+2ZH3R1IErel/N7HzNL/VaGTlF+61LrWZH1qJT5SnmLPZZrUtzLwFQR2cOeoXkS8GihbiYiAtwKvGmM+S/HR48CZ9j/nwE8ks/7GmN+YIyZZIzZHauMfzDGnAo8DRxfqPva9/4Q2Cgi0+xDhwNrKXCZbf4GzBaRUXbdJ+5d8HIXkaLqsBMvfU4YD5tjgTVFkKVWROoT/wNH2Pcthp65kTJzqxR1YuNV/keB08ViNtaWVJuLJFOCkuluASmVvg2JUrVJFUA2uuusw+Ox2l1jHz9JRKpFZA+shJuX8n1/sba0XIqVELfFcbxRRKrt/8djdULX5nj/bGVw2r2jgTft/58AjrBlacSy108UQgZbjmlAI/C841i+6mG7CHDcAAARGUlEQVQwvGxtvuqg0AxYx8aYT4wx440xu9t9zBewdG5FacQdkGz05VfAYZDUi72Ad4sqZfZkU56/YfV9EZF9sJLiWosqpY/x8I/KdiWyAXyucsXNj3tzkO+UGyXtC6svlxV+8enA337dkBnAH3GeM9E+DxE5ECv+uC0P9/bSOyeFrl/PFV8KVW4H2TxfBemzirVF/aVY/cYuj3Oy+X2GQyXGIwaj1Da1IGhcY0h14Ou2YwjjGF7xF18whLERX/8++cYnbYLzXtn090rRhnwZeMsYs8ntw0LU0TDta7nEvQqBH/oYJbEvfmmPRKRJ7J0fRCSC9fy8SZHbkTJoz/ygqwNR0r6cX/uYQ9BvZegU/LculZ6VSo9Kma+U19ijMaYsX1jbCb2NtVfuDwt8ry9gLfn3GvCq/foa1n65TwHr7b9jCyjDHOAx+/89sQbj/4K1XUN1ge75eWCFXe5fYQ1OF6XMwL8Bb9mKfTdQXaxyF+tVTB1Ou6+XPt8NvG4ffxTYpQiy7Amstl9vJOqhmM+WQ5ZRWMHqMY5jBa8TrGD6ZqAPa5bH2V7lx1qO9Ge2zrwOzBpJulvs+vbryw9tUrm+3HQX+HesIB5YySf32+3MS8Ceju/+0P7eOuCoAt3/98BHjt/1Ufv4IfYzv9r+e3YB6+A/bXu8GqtTu7fju2fZdfMX4MxCyWC/vxy4Iu17eakHDztwHnCe/bmnrc1XHZRa19POfYYStSd50lkB/gtrgOJ14KRSyzzM8kwHnrX1/FXgiFLL7NcXDv+onF+4+FyllmmY5cnw40otUwHKWLK+MOrLDSZTSXw6+z5l59cVQA+d/anvsrNP+QJwSIH1Lqu+XIF0rCDlzlGnZgG3OL47rD6rx73/Amx0/OZL7HN3BX4z0O+TZ/0r23hEPn/zcn8NYEcqsrz5qINyajvIYhyDAeIvfnmRw9hIOf0+Q6gHX7UJHvK49vec8tjvC9KGuMlkH78Du4/gOLegdTQE25LXPkQ5vwqlHznocUnsyxB0piDyADOAVbYca4B/sY+XrB3Bp+1ZMXU1X3pcRJl82cfMVb/15W8dLJWe+UGPsrGLeb5f3mKPYn9RURRFURRFURRFURRFURRFURRFURRFURRFURRFUcqect0+VVEURVEURVEURVEURVEURVEURVEURVEURVEURVEy0KQ4RVEURVEURVEURVEURVEURVEURVEURVEURVEUpWLQpDhFURRFURRFURRFURRFURRFURRFURRFURRFURSlYtCkOEVRFEVRFEVRFEVRFEVRFEVRFEVRFEVRFEVRFKVi0KQ4RVEURVEURVEURVEURVEURVEURVEURVEURVEUpWLQpLgyQEQmish9IvKOiKwVkd+IyF4i0i0ir9rH7hKRkH3+HBF5zP5/vogYETnccb1j7WPHl6pMimLr4atpr7iIfNvWz390nHuDiMwvobjKCEREOuy/uw+kkyJyh4hsEJHVIvK2bY93S7+O4/18EbnB/n+aiDxj6/+bInJTUQqnjCgG6EesSTvvchH5vuN9lYhsFZH/TDtvroissnV+rYgsLFZZFMW2x9c63n9fRC53vF8gIm/Zr5dE5Av28aCIrBSRLzrOfVJEvlXUAiiKAxGJ2X2ANSLyvyLSYB9P9D0WO84dLyJ9iT6EopQaR1xhb8exqSLymN3nWCkiTyfsrt0Hbk3z/6aXrgSKkmKH37D7tt8TkYD9mTO2NsHW7UT/9zellVwZ6Xj1IRyfXygiPSIyxnFsjoh8Yvty60TkjyIyt/jSKwqIyDhHf+BDEXnf8T7s0c+YZet82H4/RUTeFZHRpSuJUkmk2db7RWS3QfQ0J1ssIl91fL/DtsWvihVLTvY77HO/ISKv2bGN10XkG8WuD2Xk4tDt1SLyiogcUmqZFEXSxtnsYxnjawPZWsf3fmrb9ITvd6bjO1Hb7r4qIlcUs4xKZWP3be92vK+y42TOnJ6MuJlYceJu2497U6wxjzPs7+wuIpsSuuy49qsicmBxS+hvNCnO54iIAA8DzxhjphhjpgP/D5gAvGOM+TzwOWAScILHZV4HTna8PwlYXTipFWVwjDEPG2M+n3gBPwf+BDwBbAH+KRHkUBQfMJhOXmyM2Q+YBqwCns5Sf68HrrOfg32A/8mPuIpiMUg/YjCOANYBJ9jXQawE/JuAf7B1fibwTCFkVxQPeoFvisj49A/sQb2FwBeMMXsD5wH3ishEY0wMOB/4mYiERORkwBhj7i+m8IqSRrfdB9gX+Bj4juOzdwHnQPW3gDeKKZyiDMLJwJ+x4guISA3wa+Amu8/RAvwjsKfjO8ucPqAxZm3RpVaUVBJ2+LPAV4CvAf/qct6/A78zxuxn96cvK6aQiuLCQH0IsGz0y8Cxacf/ZIyZaYyZBlwA3CCOidSKUiyMMdscMeEl7IyNfd4YEyWtn2F/ZwXwRyAxme9nwA+NMTuKLL5SuThtaxQ4cRA9zckWG2OecFxvBXCq/f5055dEZD/gGuAYO7ZxNHCNiMwoXNEVJYWEbu8H/AD4z8G+oCglImN8bTBbaycPHQtsBL4IYIy53fGdD4DD7Pfq9yn5pBPYV0Qi9vuvAO+nneMVN3vH9uP2weofXygiZxpj3sPS5b9LXMCeVFJvjHmpoKUpMzQpzv8cBvQZY5YkDhhjXsVS8MT7GPASsFvm1wEr0ehAewCwDvgM8GrhRFaU3BCRvYB/AU4D4kAr8BRwRinlUhQHWemksbgO+BA4Kovr7gJscnz/9eEIqSguDNqPGICTgZ8CfwNm28fqgSpgm32tXmPMurxKrCgD04+VmHmhy2eXYiUpbwUwxrwC3IkdmDbGvAg8B1wO/AeZAWtFKSXPk+rPdQNvisgs+/2JwPKiS6UoLthxhUOBs9k5WH0q8Lwx5tHEecaYNcaYO4ovoaLkjjFmC7AA+G5iQoiDdL/ttWLKpiiDkNKHEJEpQB3wI1InSadg+4X/Dny30AIqSi549DMS/D/gHBG5BAgZY35ZbPmUEcOfsMbRsmVIttiD7wP/YYzZAGD//U/g4hyvoyj5YDSwvdRCKIoHQxlfOwxYA9xI7vZZUYbLb4Gv2/+fDOTclzXGvAt8D2uSE/Y1nH3mk4Zy3UpHk+L8z77AyoFOsGdkHwQ87nGKAX4PfBU4BnjU4zxFKTr2qkP3At83xvzN8dEVwEUiEiyNZIqSQS46+Qqw96BnwXXAH0Tkt/aS+g2DfkNRcmOgfsQU51LMWKtqAWDPVjkceAyrA30ygDHmY6x+xF9F5Jcicmr60syKUgR+Bpwqju2gbD5Lpr6vsI8n+AHwz8C9xpi/FE5ERckeu29xOJl+2n3ASSIyCYhhzVZVFD/wDeBxY8zbwMcisj+WrX1lkO+dmLYNRGSQ8xWlqNjB5QDQnPbRz4BbxdoS+IcismvxpVOUTDz6EInBlT8B00QkXZ+dZBu7UJRi4tbPAMAY0wZciZUgdH6J5FMqHBGpwprsnNXk5TzY4nSyiW0oSiGJ2P7aW8AtwOJSC6QoHgxlfC1hnx8G5tpj1IpSLBKx3hpgBvBi2ufZxs2cftxy4Bt2/wWsidX35VvwckcHMcubKfYg9jbgb4PMVL0PKzNUs0MVv7EYeMMYk2Kg7RlQLwGnlEQqRUkjR51MX1kg43L2NW8H9gHuB+YAL4hI9TDEVJRceCdtG+sljs/mAk8bY7qAB4FjEwmhxphzsIJ9L2HNXr2tyHIrIxx7e5y72DkbaiAE2+bafBH4BCthVFFKTcThz40Ffpf2+eNYS+mfDCwrsmyKMhAnszPAdh8us6tF5GERWSMiDzkOp28D0V0MYRUlRzJ8OWPME1hbAd+MFXheJSJNxRZMURwM1Ic4CbjPGBMHHsLagt2LwWIXilIKButnHAV8BEwvplDKiCBhW1dg7Zpwa5bnD9cWp5Mex/A6piiFIrF96t7AkcBdLispK0rJyXV8TUTCwNeAX9nx5ReBI4ogqqIAyVXnd8fq3/7G5ZRs42ZJm2yM+RB4AzhcRD6PtXPUmjyLXvZoUpz/eQNo8fjsHXsQ+zPAbBE52usi9r7B+wLj7VlWilJyRGQOcBzeWzX8B9ZWaGqrFL+QrU7OBN60/++2O9sJxgJbE2+MMR8YY24zxhyDtS2gJmoo+WSgfsRAnAx8WUTew5qdOg5raXHAWorc3ir4K1h2XFGKzX9jbadT6zi2lkx9398+jojUAlcBXwKaRORrRZBTUQai2/bnPg2ESdvS1xgTxbLBF2ElKCtKyRGRcVh29Ba7n3Ax1izUN7BsLgDGmGOB+Vh9X0UpC0RkT6yVObekf2aM+dgYc68x5jTgZaxEe0UpFa59CBGZAUwFfmfb6JMYeFsoZ+xCUUqOVz8jkYwhInOBMVg74lwtIqNKJatSkXQ7BqH/0fbHBj2f4dvidN4AZqUdS8Y2FKWYGGOeB8YDOiFE8SU5jq8didWPeN22z19At1BVis+jwDUMbxGrdD8usYWqLo7lgSaa+J8/ANUicm7igIgcgNXRBsAYsxm4DGs7qIH4AfD/CiGkouSKiDQCtwOnG2Pa3c4xxryF5ezNLaZsiuLFYDopFhcAu7BzS+v/A+bZn0eAE4Cn7fdHJpZnFpGJWIlH7xeyDMqIY9B+RDoiMhrLIfyUMWZ3Y8zuWIG9k0Wkzk5oTvB54K+FEFxRBsLeync5VmJcgquAK+2BFOyZUfOBn9uf/wuw3Lbl5wPX2UuVK0pJMcZ8grXy4fddtm24FrjUGLOt+JIpiivHA3cZYz5t9xMmAxuAt4FD0ybr6UC1UjbYK78tAW4wxpi0z76USLwQkXpgCtYKMopSUlz6ECcDlyf8OGPMrsBuIpLh/9lJG4uwtgdWFL/g1c/4gh1Tuxb4jjHmdeAR4IcllFVRgOHZYg+uAX4gIrsD2H//H5b+K0pREZG9gSDWioiK4iuGML52MnCOY8xjD+AITbJXisxtwL/b/dmcsfsF1wD/4zj8INYqiLp1qgdVg5+ilBJjjBGRY4H/FpHLgB7gPeCf0079FXC5iPzdANf6bcEEVZTcOQ9oBm5MW3k5PYP5J8CqYgmlKFngppNXi8girIG/F4DDHLMJ/wlYaifLCVZw74/2Z0cAPxWRHvv9xfZSt4qSF3LoRzj5JvAHY0yv49gjWAlH3wMuEZGlQDfQiZV0pCil4Focq80aYx4Vkd2A50TEAO3APGPMZhGZDhwL7Gef+6qIPIG1+ue/FV90RUnFGLNKRFZjzej7k+P4G1grBSiKXzgZuCLt2IPAKVgTR/5LRP4ba1uzduDHjvNOFJEvON6fb4x5rpDCKsogJLY9C2GtKnA38F8u57UAN4hIP9YE41uMMS8XT0xF8SatD3ES1taSTh62j78I/J2IrMKKXWwBLjDGPFVMeRVlEAbqZxyFtd1ZYrWsy4FXReQOY8z64omoKJnkYIuvzOJar4rIpcD/2skefcAlxphX8yy2oniR6CODNZ5xhjEmVkqBFAUYJSKbHO//C5hEluNrduLbV4GFiWPGmE4R+TPwD8CywoitKKkYYzYBP/X4OCNuBnwATLH9uBqsWNv/2NsHJ67ZJiIvABOMMRsKJHpZI2mTHxVFURRFURRFURRFURRFURRFURRFURRFURRFURSlbNHtUxVFURRFURRFURRFURRFURRFURRFURRFURRFUZSKQZPiFEVRFEVRFEVRFEVRFEVRFEVRFEVRFEVRFEVRlIpBk+IURVEURVEURVEURVEURVEURVEURVEURVEU5f+3awcyAAAAAIP8re/xFUfAhhQHAAAAAAAAAADAhhQHAAAAAAAAAADAhhQHAAAAAAAAAADAhhQHAAAAAAAAAADARgaVkQcvGMwAAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "# 画图看特征间的关系,主要是变量两两之间的关系(线性或非线性,有无明显较为相关关系)\n", + "features_np = np.array([x[:13] for x in housing_data], np.float32)\n", + "labels_np = np.array([x[-1] for x in housing_data], np.float32)\n", + "data_np = np.c_[features_np, labels_np]\n", + "df = pd.DataFrame(data_np, columns=feature_names)\n", + "matplotlib.use('TkAgg')\n", + "%matplotlib inline\n", + "sns.pairplot(df.dropna(), y_vars=feature_names[-1], x_vars=feature_names[:])\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAwgAAABWCAYAAABrabENAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nO3dd3hU1dbH8e+aSUKAgErvgtIRpESqDRSlCChYUUBEAekoqIgicK8VVNCLIi8qoFe8iIqNKqAiSKgqIKJYkEgJVUoykzLr/WOG9JAJIXOIrs/zzJMpe4YfJ5N9zj5777NFVTHGGGOMMcYYAJfTAYwxxhhjjDHnDmsgGGOMMcYYY1JZA8EYY4wxxhiTyhoIxhhjjDHGmFTWQDDGGGOMMcaksgaCMcYYY4wxJlVYQf8DXardUKivozq7yXGnI5yxTuvF6Qj5MjMyyukI+TIiKcnpCPnyyeTWTkfIl56jNzgdIV+mlElwOsIZG3Yw0ukI+XIwJd7pCPmS4Et0OkK+LG3kdjpCvlRYudPpCGcsYc8qpyPkW3iZiwrFwU/SwV8VgssrIh2AqYAbmKmqz2R6/ULgDaAscBi4S1Vj85PPehCMMcYYY4wJpZQk/y0XIuIGpgEdgfrAHSJSP1OxycAcVW0ETASezm88ayAYY4wxxhgTQuo5iXpOBlO0ObBTVX9V1UTgXaBbpjL1geWB+yuzeT3PrIFgjDHGGGNMCKk3HvUGNZyxMrA73ePYwHPpfQf0CNy/CSghIqXzk88aCMYYY4wxxoRSYgIkJiAi/UVkQ7pb/0wls5ujkHl+7yjgKhHZDFwF/Akk5ydegU9SNsYYY4wxxqRRr394karOAGacpmgsUDXd4yrAngyfpboH6A4gIlFAD1X9Kz/5rAfBGGOMMcaYUPKc9N9ytx6oJSI1RCQCuB34OH0BESkjIqeO6cfgv6JRvlgDwRhjjDHGmBDSxAQ0MffLWatqMjAEWAJsB+ap6jYRmSgiXQPFrgZ2iMhPQHngyfzmsyFGxhhjjDHGhJIn+PVWVHUhsDDTc+PS3Z8PzD9r2bAGgjHGGGOMMaGVhwaCE2yIkTHGGGOMMaHkSfDfgiAiHURkh4jsFJFHcihzq4j8ICLbROSd/MazHgRjjDHGGGNCKdEbVLF0Kym3x39Fo/Ui8rGq/pCuTC38k5PbqOoRESmX33jWg2CMMcYYY0wIaUICmhBUD0IwKynfB0xT1SMAqhqX33zWQDDGGGOMMSaUPB7weIJZKC2YlZRrA7VFZLWIrBWRDvmNZ0OMjDHGGGOMCSVvIhDUQmnBrKQcBtTCf7nTKsAqEblEVY+eaTzrQTDGGGOMMSaENMGDJniCKZrrSsqBMh+papKq/gbswN9gOGPWQDDGGGOMMSaENMGLJgQ1UTnXlZSBBUBb8K+qjH/I0a/5yWcNBGOMMcYYY0JIvUmoNyn3csGtpLwEOCQiPwArgdGqeig/+WwOgjHGGGOMMSGkCYnBl819JWUFHgjczgrrQTDGGGOMMSaEND4ZjU8OqmxuC6WJyEAR2SIi34rI1yJSP7/5rIFgjDHGGGNMCPniU/DFp+RaLt1CaR2B+sAd2TQA3lHVhqraGHgOeCG/+ayBYIwxxhhjTAj5PIrPk/lqpdnKdaE0VT2W7mFxsl4GNc9sDoIxxhhjjDEhlBzv/xlYGC394mgzAmsjnJLdQmktMn+eiAzGPwchAmiX33znfAOh/4T+NGsbjTfBy9QHp/DL1l+ylBk/ZwKlypXCHeZi27ofmP7Yq/h8PqrXq8HgpwYTWTySuNg4Jg+bRMKJoJa1PivCmzSn2H1DweXCu+wzPO+/k+H1Ih26UqTjTeBLQT0JnHxlMr7duwBwX3gRxQeNgmLFwKccGzUAkoKf0HI2PPCvobRq1xJvgod/jXyGHVt+zrHspFlPUqlaJe5s1xeAWg1q8vAzDxARGUFKcgqTxrzID9/+GKroRF3ZlIrj+oPLxZF5Szk4fX6G14td1oCKj99HZN0a7B7+HMcWrU59rfzDfSnRNhpcLk5+vZm9E0+3fknBGTzhfpq3a443wcNzDzzPzq07cyw78Y3xVKxWkfuuHQDA3aN60/q6Vvh8ytFDR5n0wGQO7T8cqugZrP55D88t3IhPlZuaXsw9VzbI8PqkRRtZ/9t+ADxJKRw+6eHrR29xImoG/Sb0p1nbZngTvLz84FR+zabueXzOeC4oVwp3mJvt67Yx47Hp/rqnfg0GPjWIiCIRpKSkMGPsq/z8Xc5/P2dTZOvLKDVqELhdnPhwEcdmvZvh9RJ39iDqpk6QkkLKkaMcmjCZlL1xAFRbv4Sknb8BkLwvjgMjx2X5/FC4L1O9n922f2LOhMC2d/HDuh94LVDv16hfg/ufGkx4kQh8KSlMH/sqP3/3U0jzj5g4hFbtWuBJ8PDkyOf4aWvOv/tn3/w3lapVpNc1/QCY+OrjVLvYf8nzqJJRnDh2gruvy7ywasEa/a/hXH5NKzwJHp4Y8RQ/bsm6/Wa8/zJlypXG6/FfpnHQ7SM5cugoTVteyoMTh1Gr3sWMGTie5Z99EdLs4dHNKT5wKOJ24Vn0GQnzMu53Izt3JbJLYL+bkMCJqZNJ+WMXYXXqEjV8lL+QCPFvzSJxzaqQZj/lxRcm0rFDO+ITEujXbySbv92a4fWoqOJ8sfLD1MdVKlfkv+98wIOjnkh9rnv3zsx7dwYtWnZk46bvQ5b9dB576gW+Wr2OUhecz4K3pzsdx3HJHjdw1hZKQ1WnAdNEpCfwGNAnP/nO6QZCs7bRVKpeiQFX9qdOkzrc/+QgRnV7MEu5Zwc9k3rgP2b6GNp0vpxVn3zFsOeG8sa/32BrzFauvbU93Qf04L/Pvx2a8C4XxQaM4PgTD+I7dICSk18jcd3q1AYAgPfLz/Eu9l/KNrx5a4rdM5gTEx4Cl5viDzzGyRefJOX3X5ASJSEluIksZ0urdi2oWqMKt7S5kwZN6/PQ0yPpd8OgbMte3fEK4k9mbHgNeWwAr78wi29WrqNVuxYMeWwgg24eEYro4HJRacL9/Nb7MZL3HeKiBS9y/PMYvDvTGuBJew4Q+9AUytzbPcNbizatS7Fm9djZaSgAF817juItGnIyZktosgc0b3sZlWtUps8VfanXpC7DnxrK0K7Dsy17eYc2eE5mXGxl3vT5zJo8B4Ab+3bjruF3MfXRlwo8d2YpPh9Pf7qB6X3aUb5kUe58bQlX1a3CxeXOSy0zumOz1Ptz1+7gx71HQp4zs6Ztm1GpeiUGXTmA2k3qMODJ+3m426gs5SYPeja17nlo+hhad27D15+sos+jfZk35V02fbGRpm2b0fvRvjx+26MFH9zlotTDQ4kb9DDJ+w9Q8e1pJHy5hqTf/kgtkrhjJ/vuGoR6vETd3IULhvfn4CP/BkC9iey9Y2DB5zyNZm2jqVi9EgOv7E/tQL0/Opt6/7l09f7D6er9Po/25d0pc9n0xUaatY2mz6N9eey2MSHL36pdC6rUqMxtl/eiQdN6jHp6BP27DM627FXZ1J3j7v9X6v0h4wZy8tjJAs2bWZt2Lal2UVW6tb6dhk0bMOaZUfTpnH0DZeyQCWz/bkeG5/bG7mf88Kfodf8doYibkctF1OAR/DXmQXwHD3D+y6+RuHY1KX+k2++u/BzPZ/79bkTL1hQfMJhjYx8i+fffODpkAPhSkFKluODVNzi8dg34ch8jfjZ17NCOWjVrULf+5bRo3pRp/3ma1pd3yVDmxImTRF92XerjmLWLWLAg7QI3UVHFGTr4HmJiNoUsdzBu7NSenj268ui/Jjsd5ZyQ7A16lH8wC6Wl9y7w6hnGSnVOz0FoeV0LVry/AoAdm3dQvGRxLih3QZZyp3YS7jA3YRHhaKBhVfmiKmyN8be8v121mdadWocoOYTVqodv35/49u+F5GQSV60govnlmYLHp96VIkVT24PhTaJJ+f0XUn73nzXT48fA5wtVdACuvL4NC+cvAWDbph+IOi+K0uVKZSlXtFhR7hhwK29OeSvD86pK8RLFAYgqWZwD+w8WfOhTmS6tjXfXXpJ270eTkvnr068o0b5lhjJJf8bh/fH3rNtVwVUkAgkPQyLCkXA3yQdDf8Da+rpWLHv/cwC2b/6RqJLFKZXN9o8sFsnN93Xn7ZcyniWLP5H23SpaLJKzMBzxjGyNPUTVUlFUKRVFeJib6xteyBc/xuZYftGWXXRoeGEIE2av+XUtWRmoe34Kuu4JS617VJWiJYoCUKxEcQ6HqPcm4pI6JMfuIflPf71zcskXFL26TYYy3g3foYGzvt4t23GXKxOSbMFqfl2LM9j2afU+CsVKFAP8Pw/vz9elwPPs8utbs3j+MgC2bdpOiRzrzkhu638zs6fmfNKqXZerWfbRigLLmp2rO1zBp+8tBmDLpm2UKBlFmXKlg37/3th9/Lz9F3wh3mcBhNWpR8qeP/Ht83//vV+sIKJVxv2uxqfVjUSm7XfxelMbAxIeAepMndmly/W89V9/j3fMuk2cd/55VKhQLsfyNWvWoFzZMqz6Oib1uQnjH2Ly86/i8QS1Sm/IRDduyHklSzgd45yR5HGTFOhFyEWuC6WJSPpVkzsD+e6yzrUHQUTq4p8MURn/n9Ie4GNV3Z7ffzw3pSuU5uDetAPLQ/sOUbpCaY7EZT1gm/DWRGo3rs3GlRtY85l/uMiuHbto0b4FMctiaNP5cspUDN2OUEqXIeVgXOpj36EDhNWul6VckU43Etn1VggP5/hj/jPsrkpVQaHE+ElIyfNJXLUCz4dzQ5YdoGyFssTtOZD6OG7PAcpWKMuhuIwHOv0fuod3pv8Pb6bVAKeM+w9T5k5i6Lj7ERH6dx0SktwA4RVKk7Q3LXvy3oMUbVwnqPcmbP6Rk2u/p27MHBDh0JxP8f6S8wFtQSlToQwH0m3/A3sPUqZCaQ5n2v59R/fhvf97P8v2B+j70N2073EtJ4+fZNStDxV45uzEHU+gwnnFUx+XL1mMLbHZNxb3HD3JniMnaH5R+VDFy1HpCqU5lKnuKZVD3TPurQnUalybTSs38s1nawB4Y8L/Me6tidw99h7E5WLMTaNDkjusbBmS96XVOylxB4i4pG6O5aNu7IBn9frUxxIRQYW3p0GKj7/enEvCF2sKNG92Mtf7B09T749/ayK1MtX7MyfMYPxbE+kb2PYP35S156cgla1Qhrg9ab+DuL0HKFuhTJa6876H7uHd197Dk5D9QdylLRpx5MARYn/7s0DzZlauQhn2Z8gfR9mKZTgYl7WhNf7FR/Gl+Fi+8Atmvjg7lDGz5SpdBt+BdPvdgwcIq5t1vxvZ5UaKdvfvd/96KK1nO6xOPaIefBh3ufIcf+6pkPceAFSuVIHY3Wknh/+M3UvlShXYl+7vOr3bb+vGe++lHSs2btyAqlUr8tnCz3lg5IACz2vOXKI3uEE8qposIqcWSnMDb5xaKA3YoKofA0NE5FogCThCPocXQS49CCLyMP6uCgHW4W/FCDA3u+uwnn1Zh11pDq36J3qNo3d0L8IjwmnUphEAL42eSuc+nXnxsykUjSpKclIoh+lkM2Qsm+jehQv4a2BPEma/RtFbe/vf6XYTVr8hJ57/N8ceGUJEyysIa9S0gPNmJNnFz7TtazWoSdUalfly8ddZynbv042pT0yjW/StTB0/jbEvOHOAmirIs0ERF1akSM2q7Gh9Nzta9SGq1aUUu6xB7m88y4LZ/hfXv4hKF1Zi9eLsD+LefG4WPVvcxYoPV9Dt7q7Zlilo2W12ye4/ByzZsotrG1TD7To3OzZzqnsm9nqCe6J7Ex4RTsNA3XN9r068MXEm97W8hzcmzmTwpGGhCZntFyf7osU7XUOR+nX4a8681Of+7NSTfXcN5uCjT1Fq1CDCqlQsoKA5kzzU++N7jePuQL1/att37NWJ1yfOpF/Lvrw+8f8YOin7oXkFJbvvd9a682IqV6/MV9nUnae0v7FdyHsPgKC/Q2MHT+C2dn3od+MgmrS4lM63dCj4bLkJMrvnkwUc6duT+Ndfo1jP3qnPJ+/YztH+d3N06ECK3n4nhEcUYNjsBfP9Se/WW7vx7v8WpL73+UnjGf3QxALLZ84eb1IY3qSgGwkLVbW2ql6sqk8GnhsXaBygqsNVtYGqNlbVtqq6Lb/5ctsT9wMuU9VnVPXtwO0Z/Jdc6pfTm0Skv4hsEJENu078kVOxbHXq3Zmpi15i6qKXOBx3OMNZ/9IVSp+2qz7Jm0TM5zG0CAwnif0llnF3jWNk5xF89dGX7Nu1L09Z8kMPHcBdJq1b0FW6LL7DOQ+zSVy1nPAW/q5Q36EDJG/9Fj3+FyR6Sdy4lrCLaxd45h5338icZTOZs2wmB/cfolylsqmvlatUloOZhgk1bFafOg1r82HMu7y24GWqXVSFV+ZPAaDTLdezcuFXACz/5AvqN875LObZlrTvEOEV07KHVSxDUlxwQzxKXteK+M078MV78MV7OP7lBoo1CU32rn26MH3xK0xf/AqH9h+mbLrtX7ZimSyTjOs3q0+tRrV4e81spnzwPFVqVOb5ec9l+dzlC1ZyRafLszwfCuVLFmXfX2ljqPcfi6dsYOhNZosdHl7UsXcnXlg0lRcWTeVI3GFKZ6p7juRS96z/PIbm7f0Xlmjbox1rF/kbbms+/Zpalxb83y9ActwBwtINR3CXK0vKgaxnfiObN+W8fj2JG/E4JCWlPp9y0F82+c+9eDZ8R0SdmgUfGn+9/+Kil3gxm3q/TBD1/rp09X7bHtfwTWDbrw7Rtu/epxuzls5g1tIZHNx3iHKV0n4H5SqW5WCmYU4NmjWgbsNazF/7Dq8ueImqF1Xh5ffSLlvudru4quPlLP94ZYFnB7j17u7MXfYmc5e9yYH9BymfIX85DuzLuu869Vz8yQQWf7CMSxpnPVMfar6DB3CVTbffLVMW36Gc97veL5YT0Tpr3Ziyexfq8RBWvUaB5Mzs/oF92LB+KRvWL2XP3n1UqVop9bXKVSqyZ+/+bN/XqFF9wsLC2LTZP0euRIkoGjSoy/Jl89n501patGjKhx+8SbOmjULy/zB5k5jsJjE5qCFGwSyUVkRE/hd4PUZEquc3X24NBB9QKZvnKwZey5aqzlDVaFWNvjCqWp4CLZzzGcM7DmN4x2GsXfIN7Xr4r9RUp0kd4o/HZ+lmjiwWmTo+1eV2Ed02mtjAkJDzSvsnQooItw27nUVvL8pTlvxI/vlHXBWr4CpXAcLCiLiiHUnrVmco46pYOfV+eHQrfHv9uZM2rcNd/WKIKAIuN+GXXErKH78XeOb3Zy2gd/t76d3+Xr5c/DWdbr4egAZN63Pi2MksXeQfzPmYLk1v5qYWtzPgxqH88Wts6kTkg/sP0bRVYwCiL2/K7t9CN0wn4fufKFK9EuFVyiPhYZx3w5Uc/zwm9zcCiXsOULzFJeB2QZib4i0aZpjcXJA+nv0JAzsMYmCHQaxesob2Pa4FoF6Tupw8Hp9leNEnb33K7dE9uat1H0Z0f5DY3/7kwcBQosrV0/5sW7dvye4Q/R8ya1C5NH8cPs6fR06QlJzCki27uKpu5Szlfj94jGOeRC6t6tx4+EVzFvJAx+E80HE4MUvW0jZQ99QOsu5pmq7uObL/MA1aXgJAwzaN2Pv76eaTnT2J23YQVrUyYZX89U7x668m4cuMPUzhdWpSauwI4kaMw3fkaOrzrhJREB7uv39+SYo0bkDSr7sIhYVzPmNkx2GMDNT76bf9yTzW+4f3H+aSlg0BaNTmUvaEYNt/MPsj7r6uP3df15+vlnxNh5vbA9Cgab1s684Fcz6mW7NbubllT+6/cRi7f41l6C0PpL4efUUzdu3czYG9oZm7NW/WB9zRvi93tO/LF4tWcUOgN6Bh0wacOH4iy/Ait9vN+aX8+9ewMDdXtG/Nzh2/hiTr6STv+BF35Sq4yvu//0Wubkfi2kz73Upp9U9E81ak/On/3rjKVwCX/2DNVa487ipVSdkfmpOKr06fTfRl1xF92XV8/PESet15MwAtmjfl2F/HTju86H+B3gOAY8eOU6FSQ2rWbknN2i2JidnETd37njNXMTIZeX1uvL7cGwhBLpTWDziiqjWBF4Fn85svt76NEcByEfmZtGuwVgNqAgU+qHzDig1Et41mxqr/81/ubtSU1NemLnqJ4R2HEVksksdff5ywiHDcbhffrf6eRW/7Z/Nf2e0qOvfuDMA3i9fw+bxlBR05jS+F+BlTKDF+sv8yp8sXkrL7d4r2vIfknT+StG4NkZ27E3ZpM0hORk+e4OSUpwHQkyfwfDSPks+/BqokbYwhaePa0GUH1ixfS+trWjB/zX/xJHj598i079qcZTPp3f7e077/6dGTGTlxCG63m0RvIk+Pfr6gI6dJ8bFn/HSqz56IuFwceW8Z3p//oNyIO0nY8jPHl6+jaKNaVHt1LO7zoihxTXPKDe/Jzg6DObZoNVGtGlFz0TRQ5cRXmzi+Yl3osgfErFhH83aXMefrN/EmeJn0YNr2m774FQZ2yP6KUqfcO6YfVS6ugvp87I+NY4oDVzACCHO7eKRzNPfPWYnPp3RrehE1y53PK8u/p37lUlxdtwoAi77/nQ6XXJjj8KNQ27hiA83aRvPqqhn+y5yOmpr62guLpvJAx+EUKRbJmNcfJzwiDJfbzZbV37EkcBLilUf+Q7/x9+Fyu0nyJvLKI/8JTfAUH4effZly054Bl4sTHy8m6dddnDewD4k//ETCV99wwYj+uIoVpexzjwNplzMNr1GNUmNHgvpAXBx7890MVz8KlY2Ben96oN5/OV29/+KilxjZcRhFikUy9vXHCY8Ix+V28f3q71kcqPenPfIy947vjzt1278c0vzfLI+hVbsWzFv9Np4ED089kNarN2vpjKAuWXptt7Z87sTwIuDr5d9w+TWt+Oib/+FJ8DB+5FOpr81d9iZ3tO9LeEQ40+a+QFiYG5fbTcyqDXz49icA1L+0Ls+/8RQlzy/Ble3bMHB0P265uldowvtSODFtCuc95d/vepYuJGXX7xTrfQ/JP/1I4to1FO3anfCm/v2u78QJTkz273fDL2lE0dt6QnIy+JSTL7+IHvsrNLnTWbhoOR06tGPH9tXEJyRw771pDccN65dmuHrRzT260KVbiLbtWTD6iWdYv/l7jh49xjU33sWgfr3o0eV6p2M5xqPB9R6QbqE0ABE5tVDaD+nKdAPGB+7PB/4jIqKnG5+WC8ntvSLiCoSrjH9gfSywXlWDmr3TpdoNzlwK4CyZ3eS40xHOWKf158bB1pmaGRnldIR8GZFu6EZh9Mnk0F31qyD0HL3B6Qj5MqVM6NZsOduGHYx0OkK+HEyJz73QOSzBF9o1c862pY2CPnA6J1VYmfOaNee6hD3OrP1wNoWXuahQHPx8UuEOBeiyb+5p84rIzUAHVb038LgX0EJVh6QrszVQJjbw+JdAmTPuhsx1doSq+oDQnr42xhhjjDHmb8oj/lH+QaykHMxCaUEtppYX5/RCacYYY4wxxvzdJLj8x/RBrKQczEJpp8rEikgYcB6QrwV4zs3rCRpjjDHGGPM3lSD+WxByXSgt8PjU2gc3AyvyM/8ArAfBGGOMMcaYkEoMcqZEkAulvQ68JSI78fcc3J7ffNZAMMYYY4wxJoQS8jCGR1UXAgszPTcu3X0PcMvZygbWQDDGGGOMMSakPPmbQ1zgbA6CMcYYY4wxIeQVxSv5aySISCkRWSYiPwd+XpBNmQtFZKOIfCsi20RkYDCfbQ0EY4wxxhhjQigBHwn48vsxjwDLVbUWsDzwOLO9QGtVbQy0AB4RkUq5fbA1EIwxxhhjjAkhDz48+W8gdANmB+7PBm7MXEBVE1XVG3hYhCCP/a2BYIwxxhhjTAidaiCISH8R2ZDu1j/3d6cqr6p7AQI/y2VXSESqisj3wG7gWVXNvI5CFjZJ2RhjjDHGmBBKVH/vQW4LpYnI50CFbF4aG+y/paq7gUaBoUULRGS+qu4/3XusgWCMMcYYY0wIeUgOqpyqXpvTayKyX0QqqupeEakIxOXyWXtEZBtwBTD/dGVtiJExxhhjjDEh5NEUPJqS349Jv4JyH+CjzAVEpIqIFA3cvwBoA+zI7YOtgWCMMcYYY0wIJWoKiflvIDwDtBeRn4H2gceISLSIzAyUqQfEiMh3wJfAZFXdktsH2xAjY4wxxhhjQsjjS8r3Z6jqIeCabJ7fANwbuL8MaJTXz7YeBGOMMcYYY0LIo8l4NLh5CDkJZqG0QLlqIrJURLaLyA8iUj23z7YGgjHGGGOMMSGU5EsmyZe/BgLBLZQGMAeYpKr1gObkMpkZrIFgjDHGGGNMSHl8SWdjmFGuC6WJSH0gLDDUCFU9oarxuX2wNRCMMcYYY4wJIa8vCa8vKRQLpdUGjorIByKyWUQmiYg7tw+2ScrGGGOMMcaEkDclEQjJQmlh+Nc9aAL8AfwPuBt4Pbc3GWOMMcYYY0IkMcj5B2dhobRYYLOq/hp4zwKgJbk0EGyIkTHGGGOMMSHkTU7Cm5zvOQi5LpQGrAcuEJGygcftgB9y+2BrIBhjjDHGGBNC3pQkvCn5biDkulCaqqYAo4DlIrIFEOD/cvtgUdX8hnOUiPQPjN8qlCy/swpz/sKcHSy/0yy/swpz/sKcHSy/0wp7/n+Kv0MPQl5me5+LLL+zCnP+wpwdLL/TLL+zCnP+wpwdLL/TCnv+f4S/QwPBGGOMMcYYc5ZYA8EYY4wxxhiT6u/QQCjs49gsv7MKc/7CnB0sv9Msv7MKc/7CnB0sv9MKe/5/hEI/SdkYY4wxxhhz9vwdehCMMcYYY4wxZ0mhbiCISAcR2SEiO0XkEafz5IWIvCEicSKy1ekseSUiVUVkpYhsF5FtIjLc6Ux5ISKRIrJORL4L5J/gdKYzISJuEdksIp86nSWvROR3EdkiIt+KyAan8+SViJwvIvNF5MfA30ErpzMFS0TqBLb7qdsxERnhdK5gicjIwN/tVhGZKyKRTmfKCxEZHsi+rTBs9+z2VSJSSkSWicjPgZ8XOJnxdHLIf0tg+/tEJNrJfLnJIf+kQN3zvYh8KCLnO5nxdHLI/69A9m9FZKmIVHIyo8leoW0giN+cqEUAAAUKSURBVIgbmAZ0BOoDd4hIfWdT5cksoIPTIc5QMvCgqtbDv1z34EK27b1AO1W9FGgMdBCRlg5nOhPDge1Oh8iHtqraWFXP6R10DqYCi1W1LnAphej3oKo7Atu9MdAMiAc+dDhWUESkMjAMiFbVSwA3cLuzqYInIpcA9wHN8X9vbhCRWs6mytUssu6rHgGWq2otYHng8blqFlnzbwW6A1+FPE3ezSJr/mXAJaraCPgJGBPqUHkwi6z5J6lqo0Ad9CkwLuSpTK4KbQMBfwW7U1V/VdVE4F2gm8OZgqaqXwGHnc5xJlR1r6puCtw/jv/gqLKzqYKnficCD8MDt0I1GUdEqgCdgZlOZ/mnEZGSwJXA6wCqmqiqR51NdcauAX5R1V1OB8mDMKCoiIQBxYA9DufJi3rAWlWNV9Vk4EvgJocznVYO+6puwOzA/dnAjSENlQfZ5VfV7aq6w6FIeZJD/qWB7w/AWqBKyIMFKYf8x9I9LE4h2//+UxTmBkJlYHe6x7EUooPUvwsRqQ40AWKcTZI3geE53wJxwDJVLVT5gSnAQ4DP6SBnSIGlIrJRRArbojkXAQeANwNDvGaKSHGnQ52h24G5TocIlqr+CUwG/gD2An+p6lJnU+XJVuBKESktIsWATkBVhzOdifKquhf8J4yAcg7n+Se7B1jkdIi8EpEnRWQ3cCfWg3BOKswNBMnmOWuFhpCIRAHvAyMynRE456lqSqB7swrQPND1XyiIyA1AnKpudDpLPrRR1ab4hwgOFpErnQ6UB2FAU+BVVW0CnOTcHmKRLRGJALoC7zmdJViBse7dgBpAJaC4iNzlbKrgqep24Fn8Q0QWA9/hH7JpTJ6JyFj835//Op0lr1R1rKpWxZ99iNN5TFaFuYEQS8YzL1UoXF3NhZqIhONvHPxXVT9wOs+ZCgwN+YLCNR+kDdBVRH7HP7SunYi87WykvFHVPYGfcfjHvzd3NlGexAKx6Xqd5uNvMBQ2HYFNqrrf6SB5cC3wm6oeUNUk4AOgtcOZ8kRVX1fVpqp6Jf6hFz87nekM7BeRigCBn3EO5/nHEZE+wA3AnVq4r1f/DtDD6RAmq8LcQFgP1BKRGoEzYbcDHzuc6R9BRAT/+OvtqvqC03nySkTKnrrqg4gUxX/Q8aOzqYKnqmNUtYqqVsf/vV+hqoXmLKqIFBeREqfuA9fhH3pRKKjqPmC3iNQJPHUN8IODkc7UHRSi4UUBfwAtRaRYoB66hkI0QRxARMoFflbDP1G2sP0OwL+v7RO43wf4yMEs/zgi0gF4GOiqqvFO58mrTBPzu1KI9r//JGFOBzhTqposIkOAJfivZPGGqm5zOFbQRGQucDVQRkRigSdU9XVnUwWtDdAL2BIYxw/wqKoudDBTXlQEZgeuhOUC5qlqobtUaCFWHvjQf3xHGPCOqi52NlKeDQX+Gzg58SvQ1+E8eRIY/94eGOB0lrxQ1RgRmQ9swj+0YjOFb1XW90WkNJAEDFbVI04HOp3s9lXAM8A8EemHv9F2i3MJTy+H/IeBl4GywGci8q2qXu9cypzlkH8MUARYFqhH16rqQMdCnkYO+TsFTrD4gF3AOZn9n85WUjbGGGOMMcakKsxDjIwxxhhjjDFnmTUQjDHGGGOMMamsgWCMMcYYY4xJZQ0EY4wxxhhjTCprIBhjjDHGGGNSWQPBGGOMMcYYk8oaCMYYY4wxxphU1kAwxhhjjDHGpPp/7TG546KIc1wAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "# 相关性分析\n", + "fig, ax = plt.subplots(figsize=(15, 1)) \n", + "corr_data = df.corr().iloc[-1]\n", + "corr_data = np.asarray(corr_data).reshape(1, 14)\n", + "ax = sns.heatmap(corr_data, cbar=True, annot=True)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "IUhqen8LWAYM" + }, + "source": [ + "***数据归一化处理***
\n", + "下图为大家展示各属性的取值范围分布:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXkAAAD4CAYAAAAJmJb0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nO3df5gU1Z3v8feXn6IQ0QHlpyE7aJK9ahTH6N4kPgqKit6gufGuXjbRPOxlg8bVGCKi2X2SzQpmr0bNmrDr6ia4l4S4yWZBZQUF3TXZaIRAEEVlho04DgIOiiLID/neP+o0VvdUd1fP9NDTxef1PP10n9OnTp2qrvr26VPVVebuiIhINvWqdQNERKT7KMiLiGSYgryISIYpyIuIZJiCvIhIhvWpdQMAhgwZ4mPGjKl1M0RE6srKlSvfcPehpcr0iCA/ZswYVqxYUetmiIjUFTN7pVwZDdeIiGSYgryISIYpyIuIZJiCvIhIhinIi4hkmIK8iEiGKciLiGRYjzhPXkTq09y5c2lpacnLa2trA2DEiBF5+Y2NjUyfPv2gtU0iZXvyZvZRM1sde7xtZteb2dFm9piZrQ/PR4XyZmbfM7NmM1tjZuO6fzFEpKfYtWsXu3btqnUzJLBKbhpiZr2B14AzgGuAbe5+m5ndBBzl7jPNbBJwLTAplLvb3c8oVW9TU5PrH68i2TBjxgwAbr/99hq3JPvMbKW7N5UqU+mY/ASgxd1fASYD80L+POCS8Hoy8IBHngYGm9nwCucjIiJVUGmQvxz4SXh9rLtvAgjPx4T8kcCrsWlaQ14eM5tmZivMbMXWrVsrbIaIiKSROsibWT/gs8A/lyuakNdhTMjd73X3JndvGjq05EXURESkkyrpyV8I/NbdN4f05twwTHjeEvJbgdGx6UYBbV1tqIiIVK6SIH8FHwzVACwCrgyvrwQWxvK/GM6yORPYnhvWERGRgyvVefJmdjhwHvBnsezbgAfNbCqwEbgs5C8mOrOmGdgJfKlqrRURkYqkCvLuvhNoKMhrJzrbprCsE51eKSIiNabLGoiIZJiCvIhIhinIi4hkmC5QJiJlJV2IrJhcudzlDUrRRcu6n4K8iJTV0tLCCy82M2jIcWXL7qMfAK++sadkuXfe2FiVtklpCvIiksqgIcdxxuSbq1bfMwtnV60uKU5j8iIiGaYgLyKSYQryIiIZpiAvIpJhCvIiIhmmIC8ikmEK8iIiGaYgLyKSYQryIiIZpiAvIpJhCvIiIhmmIC8ikmEK8iIiGZYqyJvZYDP7mZm9aGbrzOyPzOxoM3vMzNaH56NCWTOz75lZs5mtMbNx3bsIIiJSTNqe/N3Ao+7+MeATwDrgJmCZux8PLAtpgAuB48NjGjC3qi0WEZHUygZ5M/sQcBZwP4C773H3t4DJwLxQbB5wSXg9GXjAI08Dg81seNVbLiIiZaXpyf8BsBX4oZmtMrP7zOwI4Fh33wQQno8J5UcCr8ambw15ecxsmpmtMLMVW7du7dJCiIhIsjRBvg8wDpjr7qcC7/LB0EwSS8jzDhnu97p7k7s3DR06NFVjRUSkMmmCfCvQ6u7PhPTPiIL+5twwTHjeEis/Ojb9KKCtOs0VEZFKlA3y7v468KqZfTRkTQBeABYBV4a8K4GF4fUi4IvhLJszge25YR0RETm40t7I+1pgvpn1AzYAXyL6gnjQzKYCG4HLQtnFwCSgGdgZyoqISA2kCvLuvhpoSnhrQkJZB67pYrtERKQK9I9XEZEMU5AXEckwBXkRkQxTkBcRyTAFeRGRDFOQFxHJMAV5EZEMU5AXEckwBXkRkQxTkBcRyTAFeRGRDFOQFxHJMAV5EZEMU5AXEckwBXkRkQxTkBcRyTAFeRGRDFOQFxHJMAV5EZEMSxXkzez3Zvacma02sxUh72gze8zM1ofno0K+mdn3zKzZzNaY2bjuXAARESmukp78Oe5+irvnbuh9E7DM3Y8HloU0wIXA8eExDZhbrcaKiEhlujJcMxmYF17PAy6J5T/gkaeBwWY2vAvzERGRTkob5B1YamYrzWxayDvW3TcBhOdjQv5I4NXYtK0hL4+ZTTOzFWa2YuvWrZ1rvYiIlNQnZblPuXubmR0DPGZmL5Yoawl53iHD/V7gXoCmpqYO74uISNel6sm7e1t43gL8AvgksDk3DBOet4TircDo2OSjgLZqNVhERNIrG+TN7AgzG5R7DUwE1gKLgCtDsSuBheH1IuCL4SybM4HtuWEdERE5uNIM1xwL/MLMcuV/7O6PmtmzwINmNhXYCFwWyi8GJgHNwE7gS1VvtYiIpFI2yLv7BuATCfntwISEfAeuqUrrRESkS/SPVxGRDFOQFxHJMAV5EZEMU5AXEckwBXkRkQxTkBcRyTAFeRGRDFOQFxHJMAV5EZEMU5AXEckwBXkRkQxTkBcRyTAFeRGRDFOQFxHJMAV5EZEMU5AXEckwBXkRkQxTkBcRyTAFeRGRDEsd5M2st5mtMrOHQ/ojZvaMma03s5+aWb+Q3z+km8P7Y7qn6SIiUk4lPfnrgHWx9HeAO939eOBNYGrInwq86e5jgTtDORERqYFUQd7MRgEXAfeFtAHjgZ+FIvOAS8LrySFNeH9CKC8iIgdZn5Tl7gJuBAaFdAPwlrvvC+lWYGR4PRJ4FcDd95nZ9lD+jXiFZjYNmAZw3HHHdbb9IiIVmzt3Li0tLXl5bW1t7Nq1K9X0AwYMYMSIER3yGxsbmT59elXaWC1lg7yZXQxscfeVZnZ2LjuhqKd474MM93uBewGampo6vC8i0l2eeuoptr2xjf69+x/I27t/L/t9f6rpd+/czbtvvpuf9/5u2tra6i/IA58CPmtmk4DDgA8R9ewHm1mf0JsfBbSF8q3AaKDVzPoARwLbqt5yEZEu6N+7Px8e9OGq1ffKO69Ura5qKhvk3X0WMAsg9ORnuPsUM/tn4PPAAuBKYGGYZFFI/zq8v9zd1VMXkR5jxIgR7Nm3h1lNs6pW55wVc+g3ol/V6quWrpwnPxO4wcyaicbc7w/59wMNIf8G4KauNVFERDor7YFXANz9SeDJ8HoD8MmEMu8Bl1WhbSIi0kUVBXkRkazYuGMjc1bMKVlm887NABx7+LGp6hvL2Kq0rZoU5EXkkNPY2Jiq3J6WPQD0O678WPtYxqau92BSkBeRQ07a0xxnzJgBwO23396dzelWukCZiEiGKciLiGSYgryISBE7d+5k7dq1bNiwodZN6TQFeRGRIl555RX279/Pt771rVo3pdMU5EVEEjQ3N7N3714ANm3aVLe9eZ1dIyJCxytTrlu3Lu/9a6+9lo9//ONAz7zaZDHqyYuIJMj14oul64V68iIidDx3fuLEiR3K1OP58urJi4hkmHryIlJWW1sb77z9Ls8snF21Ot954xXa9hxRtfokmXryIiIZpp68iJQ1YsQI3u+3hzMm31y1Op9ZOJsRQ3reTTZyzIz4/Y7Mku5s2vOpJy8ikmEK8iIiCQrvWlqvdzFVkBcRybCyQd7MDjOz35jZ78zseTP7Vsj/iJk9Y2brzeynZtYv5PcP6ebw/pjuXQQRkerr27dvyXS9SNOT3w2Md/dPAKcAF5jZmcB3gDvd/XjgTWBqKD8VeNPdxwJ3hnIiInVl4MCBeelBgwbVqCVdUzbIe2RHSPYNDwfGAz8L+fOAS8LrySFNeH+C1ethaRE5ZL355pt56W3bttWoJV2TakzezHqb2WpgC/AY0AK85e77QpFWYGR4PRJ4FSC8vx1oqGajRUQknVRB3t3fd/dTgFHAJ4GPJxULz0m99g6Hpc1smpmtMLMVW7duTdteERGpQEVn17j7W8CTwJnAYDPL/ZlqFNAWXrcCowHC+0cCHX7nuPu97t7k7k1Dhw7tXOtFRLpJ//7989KHHXZYjVrSNWnOrhlqZoPD6wHAucA64Ang86HYlcDC8HpRSBPeX+71eoKpiByydu/enZd+7733atSSrklzWYPhwDwz6030pfCguz9sZi8AC8zsr4FVwP2h/P3AP5lZM1EP/vJuaLeIiKRQNsi7+xrg1IT8DUTj84X57wGXVaV1ItJjvPPGxlRXody5fTMAhx95bNn6GDK2Km3rDsOGDeP1118/kB4+fHgNW9N5ukCZiJTV2NiYumzL9j0AjC538bEhYyuq92C77rrrmDVr1oH09ddfX8PWdJ6CvIiUVcn9TGfMmAHU512U4n71q1/lpX/5y19y6qkdBjV6PF27RkQkwfLly/PSy5Ytq1FLukZBXkQkwWmnnZaXPv3002vUkq5RkBcRSbBhw4a8dEtLS41a0jUK8iIiCV577bW8dGtra41a0jUK8iIiCQqvQlmYrhcK8iIiCfbu3VsyXS8U5EVEEgwbNqxkul4oyIuIJIj/2zUpXS8U5EVEEhxKt/8TETnk7Nixo2S6XijIi4gk0Nk1IiIZprNrREQyTGfXiIhk2JYtW0qm64WCvIhIggkTJuSlzz333Bq1pGsU5EVEEkyaNCkvfdFFF9WoJV2jIC8ikmDx4sWYGQBmxiOPPFLjFnVO2SBvZqPN7AkzW2dmz5vZdSH/aDN7zMzWh+ejQr6Z2ffMrNnM1pjZuO5eCBGRalu+fDnuDoC7Z/qmIfuAr7n7x4EzgWvM7A+Bm4Bl7n48sCykAS4Ejg+PacDcqrdaRKSbjR8/nj59ojuk9unTp8MYfb0oG+TdfZO7/za8fgdYB4wEJgPzQrF5wCXh9WTgAY88DQw2s/q8zbmIHLKmTJlCr15RiOzVqxdTpkypcYs6p6IxeTMbA5wKPAMc6+6bIPoiAI4JxUYCr8Ymaw15hXVNM7MVZrZi69atlbdcRKQbNTQ0MHHiRMyM888/n6OPPrrWTeqU1EHezAYCPweud/e3SxVNyPMOGe73unuTuzcNHTo0bTNERA6aKVOmcOKJJ9ZtLx5SBnkz60sU4Oe7+7+E7M25YZjwnPunQCswOjb5KKCtOs0VEZFK9ClXwKJziO4H1rn7d2NvLQKuBG4Lzwtj+V8xswXAGcD23LCOiGTL3LlzO9zgOpeeMWNGXn5jYyPTp08/aG2rhvnz57N27Vrmz5/PtddeW+vmdEqanvyngC8A481sdXhMIgru55nZeuC8kAZYDGwAmoF/AK6ufrNFpKcaMGAAAwYMqHUzuqy9vZ0lS5bg7ixZsoRt27bVukmdUrYn7+6/JHmcHaDDOUUenVh6TRfbJSJ1oN565pWYP38++/btA6IrUNZrb17/eBURSbBs2bK8P0M9/vjjNW5R5yjIi4gkKDxlsqGhoUYt6RoFeRGRBJs25Z8v0tZWnycJKsiLiCTI/du1WLpe1GerRUS62dlnn52XPuecc2rTkC5SkBcRSTB16tS8a9dMnTq1xi3qHAV5EZEEDQ0NjB8/HojuElWv164pe568iMihaurUqWzevLlue/GgIC8iUlRDQwN33HFHrZvRJRquEREpor29na997Wt1e0kDUJAXESkqfoGyeqUgLyKSIH6BskcffbRue/MK8iIiCeIXKNu3b1/d9uYV5EVEEugCZSIiGXbMMceUTNcLBXkRkQRbtmwpma4XCvIiIgkmTMi/J9K5555bo5Z0jYK8iEiCSZMm5aUvuuiiGrWkaxTkS8jCHyFEpHMWL16MWXTnUzPjkUceqXGLOqdskDezfzSzLWa2NpZ3tJk9Zmbrw/NRId/M7Htm1mxma8xsXHc2vrtl4Y8QItI5y5cvzzu7ZtmyZTVuUeek6cn/CLigIO8mYJm7Hw8sC2mAC4Hjw2MaMLc6zTz42tvbWbx4Me7OI488ot68yCHmtNNOy0uffvrpNWpJ15S9QJm7/4eZjSnIngycHV7PA54EZob8Bzz6+nvazAab2XB330QVtbe3M3v2bG655ZZuu/zn/Pnz2b9/PwD79++v2zu1Szpz586lpaUlLy93u7cRI0Z0KN/Y2Mj06dMPStukNl566aW89IsvvlijlnRNZ8fkj80F7vCcO4F0JPBqrFxryOvAzKaZ2QozW7F169aKZn4whlGWLl2al16yZEm3zUt6pl27drFr165aN0NqpPCUyc2bN9eoJV1T7UsNW0KeJxV093uBewGampoSyyRpb29n6dKluDtLlixhypQp3dKb37NnT8m01K+kXnulWlpamDFjRl6eevfSE3U2yG/ODcOY2XAg95XXCoyOlRsFVPUW5xpGka5qaWlhzYvrsIbBJcu5R9vZc1vLjzZ6+1tVaZtItXU2yC8CrgRuC88LY/lfMbMFwBnA9mqPxy9fvjzvokHLli1TkJeKWcNg+nz27KrVt2/Rk1WrS6Sa0pxC+RPg18BHzazVzKYSBffzzGw9cF5IAywGNgDNwD8AV1e7wePHj887d7XwX2kiItWQizPF0vUizdk1VxR5q0N0DWfVXNPVRpUyadIkHn744dz86vZfaFI7bW1t+Nvbq9r79va3aNub+tCS1IHcOfLF0vWi7v7xunjx4rx0vf4LTUR6tpEj808MHDVqVI1a0jV1dyPvwn+dPf744xqTl4qMGDGC9r5W9TH5EUOHV60+qb1Ro0bx2muv5aXrUd315LNyjWcR6dlWrFiRl3722Wdr1JKuqbsgf7Cu8dy3b9+SaRGRelB3Qf5gXeM5dy5+sbSIZNuwYcPy0sOH1+dwXN0F+SlTppRMV8v7779fMi0i2VZ4UcL29vYataRr6i7Ii4gcDE1NTXnper0KZd0F+fvvv79kWkSkGjZs2FAyXU3deYOiujuF8oknnshLL1++nK9//es1ao3UK29/q+yfoXz7DgDsyIGp6kOnUGZK/PRJgNbW1m6b16233sratWuZPXs2t99+e1Xrrrsgr7Fy6arGxsZU5Vrejq5U2ZgmeA8dnrpeqQ8DBw5kx44deenu0N7eztq10Y331qxZw7Zt26p6Zd26C/IiXZX2csC5SwlXu2cl9WHv3r0l09Vy66235qWr3ZuvuzF5EZGDofAUysJ0teR68Tlr1qypav1115MfPnw4mzZtykuLiFRbPM4kpTsrzU1rcr8iq3EjmroL8ocffnheulrjZAdjxes+oiL1IyvH/+ouyBcGyfXr19eoJdWhe4hKVxXrPKTdtgYMGNChk9HTOxhXX301zc3NnHDCCdxzzz3dMo/uCvKF6/WGG27IG7I5+eSTqzomX3dBvrsUrvgnnniCOXPmHEh/4xvf4KyzzqrqPEAH9zpr5cqV3HLLLcyZM4dTTz211s3pkoceeoi//du/5frrr2fSpEkVTx/dzvBlejd8MGa8f+duPNxBrZydvps3t759IP1+++sVt6Fa0n5hvfvuuwC8/PLLXHrppXnv1duX1i233MIVV3xw246bb765qvUryBdxzjnn5AX5SgN82ptF58oU3hS6mJ68sR5Ms2bNAmDmzJksXbq0xq3pmlxP9O677+5UkAfo3TCMwz/7p1Vpz85F91Wlns546qmneOONNyqaJhfw4+nCOtra2mqy31R60/gBAwYwe/bskmUqjQGHXJCvZKX369ePPXv2MHr06LJBuHDFt7S08NK6NRw7uPQtw3rtj+4289am58q2Z/Nb9XlnmmpbuXJlXnrVqlV125t/6KGHDtxxyN1ZvHhxpwN9Fhx55JEdeu27d+/Ou0Bg0sUCe/Xqlfe6f//+HeqthZaWFppfeInjPlT6zJwj+x3Bjr3vMbJ/A3tatxctt/Htyn9l9fggX+0Doi0tLax/4TmOO7L8pYOP6LWPIw7rxeE7N7N75+ai5TZu73j+bO6AajlHDazsvpFp6y00ceLEA6/rveeb68Xn1FNvvnB7Ljxd7q677mL58uXAofmrbe7cuYl5pdYZwIknnnjgdbXWW69evfK+UOJfJGm1tbWRpms2fOCQVPU5lceAbgnyZnYBcDfQG7jP3W8rM8lBddyRfZn5qerdbOQ7v0q+pv2efeV73/vCsZw+vcvPZ0+6Ida6NXfu3MRgXdiTS3LBBRcAyb24iRMn9pif6mkOiOamaWtrS+zgHGrBv3BZ4x2WnO44plWty43v3reHV8r0wPe+H+3cfXuXDsm79+3hiArnX/Ugb2a9ge8D5wGtwLNmtsjdXyg3baXjV4VaWlo6DKvUaof4zGc+U9GYfNq/xHfmr/OFO8XEiRPrpucL0VkN5W6inNsBO3uz5aRtr9TxkjTbVWfGl+GDMeaksWXoOL7c1tbG++3tvPPDb5euOHcgtk+Z3X7vHtr2NlTU5oNp7NixNDc3H0ifcMIJNWxNaUlxIOmLfv+uaDSgV//8z6bYQeRKdEdP/pNAs7tvADCzBcBkoGyQ7+xOUeofYkk7xLb2PVyzOP/iQ3vfd/anjA+9DPr2/mCYZfc+52jL/wmVFAAq/RJLE0iSer87d+4sG+xygd/MOvz3IPd+fN6dnU9O0nwK5zF9+vRU6y3p8z755JOB6n6pDxgwoEvTpxlfdve8dWhmmEXbVtKvkly9lc4HPvgi7LUv/zPrMJ++fWo2hp3GD37wg7yOS3edQlkNaeNAsf/L9NQ/Q40EXo2lW4EzCguZ2TRgGsBxxx0HVL6xQuUHXJLmEWYEaX+O9epFr9h8BvTv/IGdrgaSQ8HB+LneHb/20owvQ/6X1kknnXTgddodPO18ujOQHGy53nxP7sUXc7DXtXX2523RCs0uA8539z8N6S8An3T3a4tN09TU5IU3zS3l0ksv5d1332XQoEH8/Oc/73Kbsy4pKNbTcE2h73//+yxcuPBA+nOf+xxf/vKXa9iirunqefLSPWbOnMmqVasOpMeNG8dtt/Wow4uY2Up3bypZphuC/B8B33T380N6FoC7zyk2TaVBXiqTtSAP2TpbSHqm9vb2vD8pLViwoKqXAK6GNEG+O65C+SxwvJl9xMz6AZcDi7phPpJSYRDMQlCcPHkyEPXiRbpDQ0PDgf9fjBs3rscF+LSqHuTdfR/wFWAJsA540N2fr/Z85NB2zTXXsHTp0roeppGe78Ybb+Skk07ixhtvrHVTOq3qwzWdoeEaEZHK1Wq4RkREeggFeRGRDFOQFxHJMAV5EZEM6xEHXs1sK/BKhZMNASq/BkLlDsZ8srQsWZtPlpYla/PJ0rJ0dj4fdvehpQr0iCDfGWa2otxR5XqZT5aWJWvzydKyZG0+WVqW7pyPhmtERDJMQV5EJMPqOcjfm6H5ZGlZsjafLC1L1uaTpWXptvnU7Zi8iIiUV889eRERKUNBXkQky3K3H+sJD2AYsABoIbpd4GLgBGAXsDrkPQD0DeXPBh4Or68iupn5hFh9l4a8z5eZ76Wh/vhjPzA9TH9trOw9wFVF6tkRnseUmg74EfBfwO+Al8MyjSysJ5a+CrgnvP4o8GRo48tAc8L6Wlsw/TeBGbF0H6LzcecUlLsYWBXa9QLwZwXvO3BHLD2D6N4BufQ04MXw+A3w6ZDfG1gJnBUruxS4LMU28X5Y1rXAQ8DggnX87VjZIcDe3LpKuc3ltpGPxfKOBx4O63Ul8ESu7eGz2FqwrfxhynnlluX5sI5vAHolbMvHhvnnPofFnV1Psfe/CrwHHBnLOxvYHj7zl4D/AC4uM5+G2HK/DrwWS/crsj6bQrtybXwZ2EG0LZeqq6JlAs6PTb8jLNNqov3rwPoNZS8B1hBtq88Bl5RYp78Dfgv895Sf846EvPh+u45o/L1oe2PT3R3WS247+VJsmj2h7auB24q2J+3O0N0PwIBfA1+O5Z0CfIYQtIiCxXJgSsKOcVX40O6LTf/TsAJKBvmEtkwD/h34A2AzUSDtF95LG+SLTkcU5D8fW+6vhg2/X7yeWL1X8UGQX0J0z9zc+vqrYusrlv9N8oP8JOBXREEsd1ymL9AGjArp/sBHC+p5j+jLaUhIHwjyRF8QK2PvjQM2AsNC+oywQfYFrgCWVLrDAPOAW2LruAVYFXt/evi8KwnyDwJPxZbjsPBZfDZW5sTYZ3dVJfWXWJZjgMeBbyVsy38PXBcre3Jn11Ms7zdhOa+K5R2YZ2z7+T2xjlKZeeZtV0nrM5b/A2B3eP0o8EvghjJ1VbxMsfeeBJqSlhX4BNG++ZGQ/khIn1xi/ucD/17p5xzLWwJMjqVPKtXekNeLaB96Gjg7oc7fE/a3Uo+eNFxzDrDX3f8ul+Huq4ndL9bd3yf6YEcWqeMp4JNm1tfMBgJjiXb61MzsBOAvgS8Q9ea3AsuAKyupJ+10HrmTqCdzYYp6hxPdNze3vv4yVlfe+irhCqIewkbgzJA3iKiH3x7q2u3uLxVMt4+oB/LVhDpnAl939zfC9L8l2jGvCelngP8k2pln5/Ir9GvyP/tdwDozy/2B5I+JgkwqYRv5FDCV6OY2AFOAX7v7gRvduPtad/9RJ9pblLtvIepMfMVyd+7+QO4zzpUtfqf6ZHnrycwagYHAN4g++2JtWg38FdH9ICpWZH3m3Az0NbMbib7o/x/R/plWp5apiBnAbHf/L4DwPAf4eolpPgS8WeF84go/0+dSTHMO0a+YuVS+jAf0pCB/IlFPsCgzO4yoR/hokSJO1Ds6n6i3W9EdqcysL/Bjoh7FxthbtwFfM7PeldRX4XS/BT6WotydRL9m7gH2m9nghDKNZrY69wAO3FnDzAYAE4iGA35C2HjcfRvR+nrFzH5iZlPMLGn7+D4wxcwK71z+3+j4+a0I+TmzgOuBH7t7c4plPSCswwl0/EwXAJeb2Siin9dtFVR7CfCou78MbDOzcaG9vy0z3R/H129YpxVz9w1E++AxBW99H7jfzJ4ws1vMbETHqZMVWU9XEH3WTwEfNbPC+cWl3Q6TJK1PANz9LaLhhTnAtUQdmjSBrhrLVCjNtgowIHy+LwL3Ad+uYB6F7gSWm9m/mdlXi+y3hXLL+Avg4hCfKtaTgnwpjSFYtQMby/RsFhD1Ii4nWkGV+DbwvLsviGeGb/rfAP+7ksoqnK6wN9ehulDnD4GPE42jjgaeNrP+BWVb3P2U3AP4u9h7FwNPuPtO4OfApbkvIY9uvj4htHkG8I8Jy/Q20Rjnn6dcJo+lzyIaAz4xxbQ5A2Kf/dHAYwXvPwqcR7RD/LSCegnT5D7rBST0lszsF2a21sz+JZb90/j6dfddFc43bxaFGe6+hGio8B+IAu4qMyt5fRJKr0Xw2lIAAARUSURBVKfLgQXuvh/4F+CyStpTgXLrsz/Rr+NHiH5F3l+mvmotU6HC7bJY3q7w+X4MuAB4IOFXVyqx/fafiYaOkvbbDxoT3Tp1EvCvYZ97Buh4s+YUelKQfx44rch7LSFYjQXONLPPFqvE3X9DFESGhB5FKmZ2NvA/Kf5TdTbRkESl6yztdKcSHZAB2BU+5JyjiV24yN3biALwa0RDKJUEzSuAc83s90S9mQain4W5up8Lw0fnEa2PJHcR/SQ/Ipb3Ah0/v3EhHzM7AvgbYDww1MwmpWzvrvDZf5joYFzeMI+77wnL8TWiL61UzKwhtOW+sC6+TjTc83xod67+S4nG4at+g08z+wOiXx9bCt9z923u/mN3/wLRfZPPKlNd4noys5OJDiQ/Fpbzckr/9I9vh6kVW5+5oGhmFxMt67jwPDN8dgdjmQo9T3QwOO7AtprE3X9NdGC/3JdtUe7e5u7/6O6TKb/fXgAcCTwXlvHTdHLIpicF+eVAfzP7P7kMMzud6AMGwN03ATcR/ewvZRbRGGAqZnYU8EPgi+7+TlIZd3+RaCO4OG29aaazyJ8TjdnlhqH+HfiT8P4A4H8RneGBmV0QfrYtJxqTHEUU7Dusr4R5fYhoYznO3ce4+xiiHecKMxsYvuhyTqHIlUHD0M6DRIE+52+A74SdHTM7hSg4/iC8/5dE9/t9EbgauDMMv6Xi7tuJfj3MSPjZegdR0GhPWx/weaKzGD4c1sVoooPKLwOfKuhIHF5BvamEnvnfER3E9YL3xpvZ4eH1IKCRqOdbVsJ6uoLoIOiY8BgBjDSzDttJCJ5/QTRcVKli6/PTYRu+g+jA63PAQuCWtBV3ZZmKuB2YZWZjAMLzzaGNiczsY0QnflSyjcWnz+23mNkwos7VayUmuQL409h++hFgYm67qESfTrS3W7i7m9mlwF1mdhPRmRy/JxrDjftX4Jtm9pkSdf1bhbP/MtG46NyCX2OFwz23Eg2TVCppuv9rZn9BFECeBs6J9WyuA/4+BH8j2nn+I7w3keig6XtEn9+LwK/MrNj6ivscsNzdd8fyFhIF6BuAG83s74kOaL5LFKSLuYPYrx53X2RmI4H/NDMH3gH+xN03mdkfEp1a94lQdrWZLSH6hfOtEvPI4+6rzOx3RD23p2L5zxP1zipxBdExk7ifEw2tXQx818zuIjpL6h3gr2Pl/tjMPh1LX+3u/5linrnhh75EPbl/Ar6bUO404B4z20fUEbvP3Z9NUT/QYT1dTscD+r8I+c8AnzGzVUTb4Rbgz919Wdp5xZRanxcS7be5X2HfBFab2Y/cfX2ayitYpu+kqGu1mc0EHgqBdy9wYzjwHJf7vCDaD68MJ3+Uc7iZtcbS3yXqjN0d9lOITlJ4PWniEMjPB/4s1uZ3zeyXwP+gwmFJXdZARCTDetJwjYiIVJmCvIhIhinIi4hkmIK8iEiGKciLiGSYgryISIYpyIuIZNj/BzNOvQ9DHUMhAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "sns.boxplot(data=df.iloc[:, 0:13])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "从上图看出,我们各属性的数值范围差异太大,甚至不能够在一个画布上充分的展示各属性具体的最大、最小值以及异常值等。下面我们进行归一化。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "做归一化(或 Feature scaling)至少有以下2个理由:\n", + "\n", + "* 过大或过小的数值范围会导致计算时的浮点上溢或下溢。\n", + "* 不同的数值范围会导致不同属性对模型的重要性不同(至少在训练的初始阶段如此),而这个隐含的假设常常是不合理的。这会对优化的过程造成困难,使训练时间大大的加长.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "features_max = housing_data.max(axis=0)\n", + "features_min = housing_data.min(axis=0)\n", + "features_avg = housing_data.sum(axis=0) / housing_data.shape[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "BATCH_SIZE = 20\n", + "def feature_norm(input):\n", + " f_size = input.shape\n", + " output_features = np.zeros(f_size, np.float32)\n", + " for batch_id in range(f_size[0]):\n", + " for index in range(13):\n", + " output_features[batch_id][index] = (input[batch_id][index] - features_avg[index]) / (features_max[index] - features_min[index])\n", + " return output_features " + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "#只对属性进行归一化\n", + "housing_features = feature_norm(housing_data[:, :13])\n", + "# print(feature_trian.shape)\n", + "housing_data = np.c_[housing_features, housing_data[:, -1]].astype(np.float32)\n", + "# print(training_data[0])" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYUAAAD4CAYAAAAD6PrjAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nO3de3xU1bnw8d+TcBFEgQSKBCQq2NoebVVSPcdbIdRgPVZotS2e9BxsY3mVan2rIlK0tVoLVKm1b5VTFatt02IvtlJLCUJQqNYLVip4T2iDcQBhIiCGW5Ln/WPvGfcMc83syVzyfD+ffDJrX9Zec332WnvttURVMcYYYwBKcl0AY4wx+cOCgjHGmDALCsYYY8IsKBhjjAmzoGCMMSasT64L0B3Dhg3TY445JtfFMMaYgvLCCy/sUNXhibYpyKBwzDHHsG7dulwXwxhjCoqItCTbxpqPjDHGhPkSFETkARF5R0Q2xlkvIvJjEWkSkZdE5FTPuuki8qb7N92P8hhjjOkev2oKDwLnJVj/GeB4928GsAhARMqA7wCnA6cB3xGRoT6VyRhjTJp8CQqqugZoS7DJFODn6ngGGCIiI4HJwOOq2qaq7wKPkzi4GGOMyaKeuqYwCnjLk251l8VbfggRmSEi60Rk3fbt29M6eDAY5Nprr6WtLVHcMsYY01NBQWIs0wTLD12oeq+qVqlq1fDhCXtUHaK+vp6NGzdSX1+f1n7GGNPb9FRQaAWO9qRHA4EEy30TDAZZsWIFqkpDQ4PVFowxJoGeCgpLgf9xeyH9O7BLVbcADUCNiAx1LzDXuMt8U19fT1dXFwBdXV1WW0hRTU1N+M8Y03v41SX118DfgI+ISKuI1InI5SJyubvJMmAT0ATcB8wEUNU24FbgeffvFneZbxobG+no6ACgo6ODVatW+Zm9McYUFV/uaFbVS5KsV+DrcdY9ADzgRzliqa6uZvny5XR0dNCnTx8mTZqUrUMVjejaQU1NDStWrMhRaYwxPano72iura2lpMR5miUlJdTW1ua4RMYYk7+KPiiUl5dTU1ODiDB58mTKyspyXSRjjMlbRR8UwKktnHjiiVmtJdi9EMaYYtArgkJ5eTkLFy7Mai3B7oUwxhSDXhEUsq3Y7oWIvqhsF5mN6T0sKPjA7oUwxhSLgpxkJ9/EuhfiqquuynGpMmO1A2N6J6sp+KC6ujoibfdCGGMKlQUFH0T3arJ7IYwxhcqCgg9eeumliPTGjTEnoDPGmLxnQcEHt99+e0R6/vz5OSqJMcZkxoKCD0IXmeOljTGmUFhQ8EGfPn0Spo0xplD0iqCQ7SEoZs2aFZG+4YYbsnIcY4zJtl4RFLI9BMXEiRMj0uecc05WjmOMMdlW9EGhJ4ag+NOf/hSRXrZsme/HMMaYnlD0QaEnhqD4yU9+EpG+6667fD+GMSE2Iq/JJr+m4zxPRF4XkSYROaRBXUTuFJH17t8bIrLTs67Ts26pH+Xx6onpOJ2J5eKnjfGTjchrsinjoCAipcDdwGeAjwGXiMjHvNuo6jdV9WRVPRn4f8AjntV7Q+tU9cJMyxOturo63BsoW9NxikjCtDF+KbYReU3+8aOmcBrQpKqbVPUAsASYkmD7S4Bf+3DclNTW1oZ/pLM1HeeVV14Zkb766qt9P4YxYCPymuzzIyiMAt7ypFvdZYcQkUrgWKDRs/gwEVknIs+IyFQfyhOhvLyciooKAEaOHJmViXY++tGPRqRPOOEE349hDPRMc6jp3fwICrHaSuI1qk8DfqeqnZ5lY1S1Cvgv4EciMjbmQURmuMFj3fbt21MuXDAYJBAIABAIBLJS3V6wYEFEet68eb4fwxjomeZQ07v5ERRagaM96dFAIM6204hqOlLVgPt/E/AEcEqsHVX1XlWtUtWq4cOHp1y4+vr68IVfVc1KdbulpSVh2hi/1NbWUlLifG2z1Rxq8l82e6D5ERSeB44XkWNFpB/OD/8hvYhE5CPAUOBvnmVDRaS/+3gYcCbwig9lCuuJ6vbIkSMTpo3xS3l5OTU1NYgIkydPzuq84yZ/ZbMHWsZBQVU7gCuBBuBV4Deq+rKI3CIi3t5ElwBLNLK/5keBdSLyD2A1MF9VfQ0KPVHdti6opifV1tZy4oknWi2hl8p2DzQpxB+0qqoqXbduXUrbBoNBpk+fzoEDB+jXrx8///nPfT+7qqmpOWSZTWdpjMmGH//4x/zlL3+hs7OT0tJSzj///JSn/xWRF9xruHEV/R3NVt02xhSTxsZGOjudvjqdnZ2+N4kXfVAAq24bY4rHGWecEZE+88wzfc2/Vwz8X15ezsKFC3NdDGOMyXu9oqaQbePGjYtIf/jDH85RSUxv0NTUxNSpU9m0aVOui2Jy4Omnn45IP/XUU77m3ytqCtl2zz33RFxsjh411TgWLVpEc3NzxLLQjYWhu85Dxo4dyxVXXNFjZSskCxYsoL29nXnz5nHfffflujimh1VXV7N8+XI6Ojqy0qPSago+eOGFFyLSL774Yo5KUnj27t3L3r17c12MgtHU1BS+ObKlpcVqC71QtsdzK/ouqT3h85//PHv27AmnBw0axCOPPJJgDxNy3XXXAXDHHXfkuCSF4Wtf+1rEHfOVlZVp1RZi1dbAqbGlGpwHDBhgNbsc+8pXvsLbb7/N6NGjeeCBB1LeL5UuqdZ85ANvQIiVNsYvmQ6p0tzczEuvvUFp+VERy7va96Punf/JtOt+3t2+O5zuDG5NqwwmM8FgkLfffhuA1tZW2trafO1qb0HBB4MGDTqkpmBMNvTp0yc8bEsona7S8qMYeOFlvpWpfen9vuVlklu8ePEh6VmzZvmWvwUFH8ydO5c5c+aE0zfddJMv+dqFWROtI+psPjptit/q1asj0o2Njb4GBbvQXGDswmzvVllZmTBtil/obuZ46UxZTcEHt912W0T61ltv9eVCc6wzf7sw27vNnj2bmTNnhtPeGqoxfrCg4AO70Gx6yrhx46isrKSlpYXKykqOO+64XBcpZek0h4I1icZTWloaUTsoLS31Nf9e0XyUzQkp4NALy3ah2WTT7NmzGThwYFHUEqw5NH0TJ06MSFdXV/uaf6+oKXgnpEh1iNl0ZOtCszGxjBs3jj/+8Y+5LkbarDnUH3V1dTQ2NtLV1UVJSQl1dXW+5l/0NYVsT0gBMH78+HDtYNCgQZxySswZRY0xJmPl5eXh2R0rKip8nw6g6INCfX09XV1dAHR1dWVl+jpwagslJSVWSzDGZFUwGOSdd94BYNu2bb6f6BZ9UOiJOZrBqS0sX77cagnGmKyqr68PTwGsqr6f6PoSFETkPBF5XUSaROSGGOsvFZHtIrLe/bvMs266iLzp/k33ozxePTFHszHG9JRsn+hmHBREpBS4G/gM8DHgEhH5WIxNH1bVk92/+919y4DvAKcDpwHfEZGhmZbJq7a2lpIS52lmY0TBkNWrV1NTU8OaNWuykr/pvmz3PjOmJ2X7RNeP3kenAU2quglARJYAU4BXUth3MvC4qra5+z4OnAf82odyAc5FmXPOOYeVK1fyqU99KmtzNN9+++0AzJ8/n3POOScrxzDdk+3eZ8ZkU/T9HQcPHgzXFDo7O2lqagr34vLj3g4/gsIo4C1PuhXnzD/aRSJyDvAG8E1VfSvOvqNiHUREZgAzAMaMGeNDsf2zevXqiOrcmjVrLDDkiWAwSENDA6rK8uXLqa2tzdqJQTYU6vhX8YbojhbaJvSjlkw+Pcdc6du3b3hgxLKyMvr27etr/n4EBYmxLHqShj8Bv1bV/SJyOfAQUJ3ivs5C1XuBe8GZTyHVwgWDwXCTzpNPPkldXZ3vPwqhWkKI1RbyR319PQcPHgScM6xiqC0Uws1ezc3NvPJaE0cMS3wC10E/AN7acSBpnu/t2OxL2QpNrCB49dVXs3nzZu6++27ff8/8CAqtwNGe9Ggg4N1AVYOe5H3AAs++E6L2fcKHMoXF6pLq94+CjVyZv1auXBmRfvzxxwsqKBTyDV9HDBvD6VO+5Vt+zz76fd/yKnR9+/Zl7NixWan1+tH76HngeBE5VkT6AdOApd4NRGSkJ3kh8Kr7uAGoEZGh7gXmGneZb3qiS2r0mPbdGePeZIe9N8akJ+OgoKodwJU4P+avAr9R1ZdF5BYRudDd7Bsi8rKI/AP4BnCpu28bcCtOYHkeuCV00dkvPdElNXos8xtuOKRXrskRG6zQmPT4ctqkqsuAZVHLvu15PAeIOXqXqj4ApD7JaJpqa2tZtswpWldXV1a6pB555JER6SOOOML3Y5juCY0o6k0bY+Ir+juae0Ks+RRMfpg9e3ZEuhhGFjUmm4o+KNTX1yPidHISkayMfWRNFPlr3LhxDBw4EICBAwcW1PwDxuRC0QeFxsbG8IQUnZ2dWbnQfNhhh0WkBwwY4PsxTPcEg0Ha29sBaG9vt7uajUmi6INC9AQUPTH2UWiwKpN799xzT0R60aJFOSqJMYWh6IPC+eefH5H+z//8T9+PsW/fvoRpkztr166NSD/55JM5KokxhaHog8IjjzwSkf7973/v+zFsOk5jTLEo+jt5Vq9eHZFubGw85L6CTNl0nMbkj0IdLypfFH1NIdTzKF7aDzYdpzH5be/evQUxZlQ+KPqawoQJEyLGv5k4cWJWjnPmmWfS0NBgA+HlmYEDB4Z7H4XSprgV8nhR+aDoawp1dXURk+zU1dVl5TgNDc6QTaG7p01+uPrqqyPS11xzTY5KYkxhKPqgUF5eHu6WOmnSpKyMKrhkyZKI9G9/+1vfj2G6569//WtE2mbGMyaxog8K4NQWTjrppKzVEh54IHLopvvuuy8rxzHpi+6SakHBmMSK/poCOLWFhQsX5roYpohYDxdTrHpFTcEmbu+9SktLE6b9ZD1cTDHoFTWFbE/c/tWvfjWiCelrX/ua78cw3RMa9ypeurush4spVkVfUwgGg6xYsQJVpaGhISu1BW+Xx1hpY4wpFEUfFGLN0ey36N5H2TiGMcb0hKIPCj0xR7MxxhQLX64piMh5wF1AKXC/qs6PWn8NcBnQAWwHvqqqLe66TmCDu+lmVb0QH1VXV/PYY4+F0z0xdLbJneheQdF3NB9++OHhtn9IrWdQrJ5GsYS28eafiPVKMvko46AgIqXA3cC5QCvwvIgsVdVXPJu9CFSparuIXAH8APiSu26vqp6caTniOf/88yOCQjaGzp42bVpEE1J35oG2H57sqKys5NVXXw2nx4wZk3Yezc3NvPTaq0j5kITbqTrNlBu2b0mapwZ3pl0OY3qCHzWF04AmVd0EICJLgClAOCioqneo0meAL/tw3JQ89NBDEekHH3yQW265xddjfOITn4gICh//+MfTzqO5uZnXX32JEUMSD9hX0uVM4LNzy4aE2wFs29n7JvuJFQCnTp1Ke3s748ePZ968ed3KV8qH0OfCCRmW7gMdS5/wLS9j/ORHUBgFvOVJtwKnJ9i+DviLJ32YiKzDaVqar6p/jLWTiMwAZkB6Z3vPPvtsRPqZZ55Jed9Uffe7341I33zzzTz66KNp5zNiiFA70b9ewvWrO3zLq5BVVlayefNm34dMN6YY+fELFOvUNuYpqoh8GagCPuVZPEZVAyJyHNAoIhtU9ZB2FFW9F7gXoKqqKq9OgaNnWrMbmPJL3759GTt2bFbGvTKm2PjR+6gVONqTHg0EojcSkU8Dc4ELVXV/aLmqBtz/m4AnAJuMwBhjcsSPoPA8cLyIHCsi/YBpwFLvBiJyCvBTnIDwjmf5UBHp7z4eBpyJ51qEHw477LCEaWOMMR/IuPlIVTtE5EqgAadL6gOq+rKI3AKsU9WlwO3AIOC37sxnoa6nHwV+KiJdOAFqflSvpYxFN+1Ep/1QWloaMXxCNsfXMcaYbPLlqqaqLgOWRS37tufxp+Ps9zRwkh9liCcbM29Fdx8dOXIkra2t4XRFRUXafeGNMSYfFP2AeD0xLlFZWVlEUBg6dKjvxzDGHMru7/Ff0QeFbIj1YfnqV79Ka2srN954o83TbEwPce7vaWJEWWXC7Uq0HwA7tx1Mmue2thZfylaoLCj4pKysjLKyMgsIxvSwEWWVfHnyjb7l98uG7/mWVyEq+gHxjDHGpM5qCsZ3qbbzQnptvb25ndeYnmJBwfiuubmZl197iYHlybc94N6b/s/tLyXcrj3oQ8GMMUlZUDBZMbAcTrjAv9bJ1x7r8i2vfJWNnjRWuzLpsqBgTJ5whuh+DSkflnA7dWtXG7bvSLxdMPF6k1/ypdnVgoIxeUTKh9Hvs1N8yevAn9IfqdfkTnNzM02vvM6YI49Kum2/DqcWfqB1V8LtNu/emnY5LCgYY0wSsc7iA4FAyiMiDxgwgIqKiohlsc7gxxx5FHPPmJ5ZYT1ue/qh5BtFsaBgTBKBQADdvcvXiXE0uJPAwbwaAd4ksHbtWtp2tNG/tH942cGug3Rpate69rfv5/133/8g3bmfQCCQl9d7LCgYY0wK+pf2p/KIxHdOp6rlvfy9a9qCgilYPTXuTUVFBcG+4vt0nBXDR/qWn8muiooKDnQcYE7VHF/ym7duHv0q+vmSl9+KLiik8kMR+nGw7nqFzemt8zIMG5BkywMAvLRjU/JMd9iseYUkEAjw3q52X4em2NbWQntn5qMpF6qiCwqmlxk2gD5TT/Atu44/vuZbXqa4bN6zmXnr5iXcZlv7NgBGDByRNK9xjPOtbH4quqAQfeb/2c9+lv37w7N/0r9/f+64446eLpYxJgsqKirYWXrQ9wHxhozoG7Fs7NixKe17oNmplfYbk7hpaBzjUs6zpxVdUIh25513MnPmzHD6rrvuymFpjDGFKNVm5lDTdCGfePoyDoGInCcir4tIk4jcEGN9fxF52F3/rIgc41k3x13+uohM9qM8XuPGfVBF69+/P8cdd5zfhzDGmKKRcU1BREqBu4FzgVbgeRFZGjXXch3wrqqOE5FpwALgSyLyMWAa8G9ABbBSRD6sqp34aNy4cWzatKlbtQSb2ckY05v40Xx0GtCkqpsARGQJMAXwBoUpwM3u498BPxERcZcvUdX9wD9FpMnN72/JDprOOCFbtmxhwIAB3HPPPUm3jf6xbm5u5s1XNjBmcN8Ee0G/zg4A9r+d/ELl5l3JZ38yxphc8CMojALe8qRbgdPjbaOqHSKyCyh3lz8Tte+oWAcRkRnADIAxY8Y444S8+iqVg8uSFrBfp3Pn6MHAtoTbtexqi7l8zOC+zD7zQ0mPk6oFT73jW17GmPzR1tZGa2sra9asKdhZGP0IChJjWfT9+/G2SWVfZ6HqvcC9AFVVVQpQObiMG8/27zLE99Y2+JaXyb5AIAC72/3tRrqjncCBgH/5mV6ltbUVgHnz5vXqoNAKHO1Jjwaiv1WhbVpFpA8wGGhLcV9jjMk70U3YbW0ftDR0dnZSV1fH0KFDgcK6huhHUHgeOF5EjgXexrlw/F9R2ywFpuNcK7gYaFRVFZGlwK9E5Ic4F5qPB55L5aCBQID2nbt8Pbtv2dnGQHy9xt0rBQIB2nf7OzFOexACByPPFyoqKtjRb5/vN69VDKtIvqExUUK1hJC33norHBRSEQgEeH/3e90a2TSelt1bOTzwfvINPTIOCu41giuBBqAUeEBVXxaRW4B1qroUWAz8wr2Q3IYTOHC3+w3ORekO4Ovp9Dza19lBy87I6wAHOjvp0tRGnywRoV9paUR+vffmdmNMOqLP/Gtqag7ZphDvV/Dl5jVVXQYsi1r2bc/jfcAX4ux7G3Bbusc8++yzY/Y+8mOM81xwxnBR6ld3+Jbntp1Ku/Z8a1xFRQX7++7wfTrOiuHFfQbvDNG927fJcTS4g8DBA77kZbKvoqKCA127fJ9PoV/F4LT2Kdg7mgulfc4YYwpJwQaFYlNRUcFOCVI70b+3pH51B0NGRp5dL1q0iBUrVkQsa29vR1NschMRBg6MbGSrqanJXZDesTd576Nd7thXg/sn3s7Nj8RTJGeNM0R3P1+n46wYnqMnYwqWBQVTsFJt6mve5TQzjh2WwhAnw3LXhGhMPrCg0MtcccUVRdP01psGKTOmp/h3JdAYY0zBs5pCEoFAgLbgAb6+7O3wsoOdSlcac66XCPQt/eDm7f0dSpnYPXrG+GFbW0vSmdfefW8rAEOPOCql/IaMyM8JcHqCBYUkBg8efGgX1/37oSuNG7NKSijp/8FFzgH9nXyNMZlJ9fpPcI/TNTd68pxYhozI3wlweoIFhSQWLVqU6yIYY+LIp+tK/fr148CBAxHpQmTXFIwxxgfegBArXSispmCM8V0gEOC93e/z7KPf9y3P93a0EDhwuG/5mdgsKBiTRzS4I+kwF7prFwCS5LqUBneA3bxWUDbv3prSgHjb3nfGfBtxeOL5ZDbv3so4eskwF8YUm5RvxtvtBIWxyX7whw/L2QXTiooKOvsd4PQp3/Itz2cf/T4Vw/K3nb6kpIQuTweUkpL0WufTea8ONO8AoN/oxD/44xic9mfAgoIxeSKfLpqa9HVF9UiMTieTzk2l2fwM2IVmY4wxYVZTMKYXCQQCdO5+j/al9/uWZ2dwC4GDe3zLr1CdfvrpPPvss+H0f/zHf+SwNN1nNQVjjPHB9OnTE6YLhdUUTFa0B1ObjnOfc82Uw5J0kGgPAsMzL1dvV1FRwbt9dzPwwst8y7N96f1UDD/St/wK1bJlEfOM8ec//5mrrroqR6XpPgsKeWTbzuQzr727xxl0aeggSbhdKL8hI30pWlrS6e3QvNsZ1vrY4Un2GW5DWpv81tjYGJFetWpV7wsKIlIGPAwcA/wL+KKqvhu1zcnAIuBIoBO4TVUfdtc9CHwKcM8XuVRV12dSpkKV8hgu7hSkQ0Ym337IyNz8kOZLLwpjetL48eNZu3ZtOP3JT34yh6XpvkxrCjcAq1R1vojc4KZnR23TDvyPqr4pIhXACyLSoKo73fWzVPV3GZaj4Fl3xPymwZ10LH0i8Ta7nIutMnhQSvkxPHk1btGiRYfMRR5Khz4LIWPHjk3pc9QZ3JrSheauXUEASgaXJ80Paz5i06ZNEelYc8gXgkyDwhRggvv4IeAJooKCqr7heRwQkXdwWod3YkwBSP2mMneGtxR+7Bk+stu1uAEDBnRrP0i3aW+7s0+yH/zhR8bM970dm5MOc9G+axsAAwePSFqe93ZshmH5O6T122+/HZFubW3NUUkyk2lQGKGqWwBUdYuIfCjRxiJyGtAP8IbQ20Tk28Aq4AZV3R9n3xnADIAxY8ZkWGxjUpfLWpzfs+T1VNNe6lOlOoPGHZ3KncrD8ntI68MPP5z3338/Il2IkgYFEVkJxJqZYm46BxKRkcAvgOmqGuqWMgfYihMo7sWpZdwSa39VvdfdhqqqqjSmuDG9STrNLZB6k4tJT29sDt23b1/CdKFIGhRU9dPx1onINhEZ6dYSRgLvxNnuSODPwI2q+own7y3uw/0i8jPg0G+tMRnKpLnFmN4m0+ajpcB0YL77/5DhHUWkH/AH4Oeq+tuodaGAIsBUYGOG5TG9XKwz1KamJq677jpmzpzJcccdl4NSmd5g+PDhbN26NZz+0IcStqbnrUyDwnzgNyJSB2wGvgAgIlXA5ap6GfBF4BygXEQudfcLdT2tF5HhgADrgcszLI8xh1iwYAHt7e3MmzeP++67L9fFMUVq+/btEel33onZcJK2bPRASySjoKCqQWBSjOXrgMvcx78Efhln/+pMjm9MMk1NTbS0tADQ0tLCpk2brLZgCl42m0TtjmZT1BYsWBCRttqCyZZsNR/1dEcIGxDPFLVQLSFe2hi/ZKv5qKdZUDBFbdCgQQnTxvjF6S8TP10orPnI9JievmAG0NHRkTBtjF8mTJjAypUrw+mJEyfmsDTdZzUFk1MDBgzI6kWzs846KyJ99tlnZ+1Ypnf7/Oc/H5G+6KKLclSSzFhNwfQYu3PYFLNly5YhIqgqImLzKRiTj55++umI9FNPPcWsWbNyVJr8lYumvWLT2NiIqjMCj6oW7HwK1nxkilp1dTUlJc7HvKSkhEmTDrmtxsSR7aa9YlNdXU2fPs55dp8+fQr2s2Y1hTxmZ2+Zq62t5bHHHgOgq6uL2traHJcoP9lnJ3O1tbWsWLECcE5ACvWzZjWFAmNnb+l5992IiQDZudOm8TDZUV5eTk1NDSLC5MmTKSsry3WRusVqCnnMzt4yZ3c0m55UW1tLS0tLwdYSwIKCKXJ2R3P+sLkuCoM1H5miVllZmTBtcqvYmkPr6+vZuHEj9fX1uS5Kt1lNwRS12bNnM3PmzHB6zpw5OSxN71bsZ/3BYJCGhgZUlYaGBmprawvyuoLVFExRGzduXHi8o0GDBtmw2SZr6uvrw8OoHDx4sGBrCxYUTFELBoPhuXL37dtHW1tbjktkitWqVasibl7zjoNUSCwomKIWfbZWqGdvJv9FNxWVl5fnqCSZySgoiEiZiDwuIm+6/4fG2a5TRNa7f0s9y48VkWfd/R9253M2xjeNjY3hKn1HRwerVq3KcYlMsdqyZUtEOhAI5Kgkmcm0pnADsEpVjwdWuelY9qrqye7fhZ7lC4A73f3fBeoyLI8xEYpl6AGT/0LDqcRLF4pMSz0FeMh9/BAwNdUdxZmBohr4XXf2NyYVtbW14clOCnnoAZP/JkyYEJHurfMpjFDVLQDu/3iTkh4mIutE5BkRCf3wlwM7VTU060krMCrD8hgToby8nIqKCgBGjhxZkF0ETWGoq6uLGHyxrq4wGz6S3qcgIiuBo2KsmpvGccaoakBEjgMaRWQDsDvGdpqgHDOAGQBjxoxJ49CmNwsGg+G23UAgQFtbmwWGIpergSTLy8uprq5m5cqVTJo0qWA/Z0lrCqr6aVU9Mcbfo8A2ERkJ4P6POVO1qgbc/5uAJ4BTgB3AEBEJBabRQNwrM6p6r6pWqWrV8OHD03iKpjerr6+P6CZovY96p566c7quro6TTjqpYGsJkPkdzUuB6cB89/+j0Ru4PZLaVXW/iAwDzgR+oKoqIquBi4El8fY3JhOxeh8V4sQnJnW5vHO6vLychQsX5uz4fsj0msJ84FwReRM4100jIlUicr+7zUeBdSLyD2A1MF9VX3HXzQauEZEmnIISLbQAAA7cSURBVGsMizMsjzERrPeR6UnBYJBrr722oG+SzCgoqGpQVSep6vHu/zZ3+TpVvcx9/LSqnqSqn3D/L/bsv0lVT1PVcar6BVXdn9nTMSZSbW1txMU/633UO82ZM4eamhpuvPHGrB5n8eLFbNiwgcWLC/f8tjA70hqTomKZ+MRk5oUXXgDgueeey9oxgsEgjY2NgDPkRaHWFiwomKJXW1vLiSeeaLWEXip6ZNxs1RYWL15MV1cX4Ez9Wqi1BQsKpuiFLv5ZLaF3CtUSQrJVW1i9enVEOlRrKDQWFIwxxgehO+fjpQuFBQVjjPGBDXNhjDEFYPz48RHp0047LSvHib5hrVBvYLOgYIwpahdffHFE+qKLLsrasUJNRoXadAQWFIwxRe62226LSN96661ZOU6xDKliQcEYU9T27NmTMO2X6Ok3H3/88awcJ9ssKBhjitqgQYMSpv1ik+wYY0wBmDs3cpT/m266KSvHaW9vT5guFBYUjDFFbfz48eHawaBBgzjllFNyXKL8lunQ2cb0SrmayMV0z9y5c5k7d27WagnFxIKCMT7piUlcTPeMHz+e5cuX57oYBcGCgjHdYGf+JpqIhLukhtKFyK4pGGOMD7wBIVa6UFhQMMYYH4waNSoiPXr06ByVJDMWFIwxxgfRQaBXBgURKRORx0XkTff/0BjbTBSR9Z6/fSIy1V33oIj807Pu5EzKY4wxubJu3bqI9PPPP5+jkmQm05rCDcAqVT0eWOWmI6jqalU9WVVPBqqBdmCFZ5NZofWquj7D8hhjjMlApkFhCvCQ+/ghYGqS7S8G/qKqhXmrnzHGxHHUUUdFpEeOHJmjkmQm06AwQlW3ALj/P5Rk+2nAr6OW3SYiL4nInSLSP96OIjJDRNaJyLrt27dnVmpjjPFZMBiMSO/YsSNHJclM0qAgIitFZGOMvynpHEhERgInAQ2exXOAE4BPAmXA7Hj7q+q9qlqlqlXDhw9P59DGGJN15eXlEelhw4blqCSZSXrzmqp+Ot46EdkmIiNVdYv7o/9Ogqy+CPxBVQ968t7iPtwvIj8Drou5pzHG5LmtW7dGpLds2RJny/yWafPRUmC6+3g68GiCbS8hqunIDSSIc+vfVGBjhuUxxpic6OzsTJguFJkGhfnAuSLyJnCum0ZEqkTk/tBGInIMcDTwZNT+9SKyAdgADAO+l2F5jDEmJ0pLSxOmC0VGYx+pahCYFGP5OuAyT/pfwKgY21VncnxjjMkXZ5xxBmvXrg2nzzrrrByWpvvsjmZjjMkCG/vIGGN6MW8tAWDNmjU5KklmLCgYY4wJs6BgjDEmzIKCMcaYMAsKxhhjwiwoGGOMCbOgYIwxJsyCgjHGmDALCsYYY8IsKBhjjAmzoGCMMT445ZRTItKnnnpqjkqSGQsKxhjjg+uvvz5hulBYUDDGGB+Ul5eHawunnnoqZWVlOS5R91hQMMYYn1x//fWcdNJJBVtLgAznUzDGGPOB8vJyFi5cmOtiZMRqCsYYY8IsKBhjjAmzoGCMMSbMgoIxxpgwKcR5REVkO9CS5m7DgB1ZKE5PH8OOk7/HsOPk7zHsOI5KVR2eaIOCDArdISLrVLWq0I9hx8nfY9hx8vcYdpzUWfORMcaYMAsKxhhjwnpTULi3SI5hx8nfY9hx8vcYdpwU9ZprCsYYY5LrTTUFY4wxSVhQMMYY8wFVLdg/4ChgCdAMvAIsAz4M7AXWu8t+DvR1t58APOY+vhRQYJInv8+5yy5OctzPufl7/7qAK9z9r/Js+xPg0hh57HH/H5NoH+BB4J/AP4A33OczKjofT/pS4Cfu448AT7jlexX4RZzXa2NUHjcD13nSfXD6Q8+L2u4C4EW3bK8A/ydqvQILPenrgJs96RnAa+7fc8BZ7vJS4AXgHM+2K4AvpPCZ6HSf70bgT8CQqNf5Vs+2w4CDodcrxc9c6DNygmfZ8cBj7uv6ArA6VHb3/dge9Vn5WIrHCj2Xl93X+BqgJMZneYR7/ND7sKy7r5Nn/TeBfcBgz7IJwC73PX8dWANckOQ45Z7nvRV425PuF+f1rHLLFSrjG8AenM9zorzSek7AZM/+e9zntB7nOxZ+fd1tpwIv4XxWNwBTE7ym/wD+DpyRwvuwJ8ay6O/tvYnK6tnvLvc1CX1GvuLZ54Bb7vXA/IRlSvXLkG9/gAB/Ay73LDsZOBv3Rw7nx6URqI3xRbrUfZPv9+z/sPuiJQwKMcoyA3gSOA7YBjQB/dx1qQSFuPvgBIWLPc/5m+6XpJ83H0++l/JBUGgApnj2/Uei18uz/GYig8L5wFM4P3qh61B9gQAw2k33Bz4Slc8+nIA2zE2HgwJOQHnBs+5UYDNwlJs+3f0Q9wUuARpSfC/2eB4/BMz1vM7NwIue9Ve473c6QeE3wFrP8zjMfT8u9Gxzouf9uzSd/BM8lw8BK4Hvxvgs/xS42rPtx7v7OnmWPec+z0s9y8LH9Hx+/oXnxCrJMSM+V7FeT8/ye4D97uPlwF+Ba5LklfZz8qx7AqiK9VyBT+B8P49108e66Y8nOP5k4Ml03gfPsvD31k2flKis7rISnO/PM8CEGHn+C/e7luyvkJuPJgIHVfV/QwtUdT3wlifdifNBGBUnj7XAaSLSV0QGAeNwfiRSJiIfBr4N/DdObWE7sAqYnkY2Ke2jjjtxzpI+k0K+I4FW9/FEYFei1yuBS3DOQjYD/+4uOwKnBhF089qvqq9H7deBc5bzzRh5zgZmqeoOd/+/43yRv+6mnwWexvnyfz+0PE1/I/K93wu8KiKhG36+hPOjlBL3M3ImUAdMcxfXAn9T1aWh7VR1o6o+2I3yxqWq7+CcfFwpIhK12vs+o6ovpZl9xOskImOBQcCNOO99vDKtB24BrkzzeKHjxHo9Q74F9BWR63FODH6J8/1MVbeeUxzXAd9X1X8CuP/nAbMS7HMk8G6axwmJfj83pLDPRJwa0iLSf34RCjkonIhzphmXiByGc8a5PM4minP2NRmYAiyNs128/PsCv8I5Y9nsWTUfuFZEStPILp19/g6ckMJ2dwKNIvIXnC/uxjjbjRWR9aE/4PLQChEZAEzCaZ74Ne4HTlXbcF6vFhH5tYjUikisz9PdQK2IDI5a/m8c+v6tc5eHzAH+L/ArVW1K/nQ/4L6Okzj0PV0CTBOR0TjV/UAa2U4FlqvqG0CbiJzqlvfvSfb7kvf1dV/TtKnqJpzv7IeiVt0NLBaR1SIyV0QqUs0zzut0Cc57vRb4iIhEH88r1c9iLLFeTwBUdSdOk8c84Cqck6BUfhz9eE7RUvmsAgxw39/XgPuBW9M4hlf4eysi3xSRISnsE3p+fwAucH+buqWQg0IiY90ftyCwOcmZ0xKcs5RpOC9qOm4FXlbVJd6F7pnEc8B/pZpRmvtEnykekp2b58+AjwK/xTnLmiYi/WNs36yqJ4f+gP/1rLsAWK2q7cDvgc+FApeqXobz5XsO52zqgRjPazdOG+03Unxe6kmfg9OGfWIK+4YM8Lz3ZcDjUeuXA+fifIkeTiNf3H1C7/USYpyRicgfRGSjiDziWfyw9/VV1b1pHjfiENELVLUBp+nyPpwf6BdFJOH4NiR+naYBS1S1C3gE+EI65UlDstezP07t+884tdTFSfLz6zlFi/5cxlu2131/TwDOA34eo1aXVNT3dgLwTJzvrVMQkX44Tbx/dL9vzwI16R43pJCDwsvA+Djrmt0ft3HAv4vIhfEyUdXncH50hrlnLCkRkQnARcSvOn8fp4kkndc41X1OwbkABbDX/VCElOEZJEtVA6r6AE4TTn/S+4EF54v6aRH5F87ZUjlOVTWU/wa3SetcnNcjlh/hNBEc7ln2Coe+f6e6yxGRw4EfANXAcBE5P8Xy7nXf+0qci48RzU6qesB9HtfiBLmUiEi5W5b73ddiFk7z08tuuUP5fw7nOoLvE/SKyHE4tZt3otepapuq/kpV/xt4HiegJhLzdRKRj+NcOH/cfZ7TSNwc4f0spize6xn6ERWRC3Ce66nu/9nue9cTzynayzgXv73Cn9VYVPVvOB0ZkgXnePsHVPUBVZ2C0wyb6Ht7HjAY2OA+v7PIoAmpkINCI9BfRL4WWiAin8T5QACgqluAG3CaIRKZg9OGmRIRGQr8DPgfVX0v1jaq+hrOh+aCVPNNto84voHT5hhqEnsS+LK7fgDwRZzeL4jIeZ5q5Ms4bbMTPPlFvF4xjnckzgdsjKoeo6rH4HzRLhGRQW5gDDmZOCPXuk1Nv8EJDCE/ABa4Pw6IyMk4P6b3uOu/DfzGfU1mAne6zYEpUdVdOLWT62JUpRfi/MgEU80PuBinp0el+1ocjXMR/Q3gzKgTj4Fp5JsS98z/f3EuWmvUumoRGeg+PgIYi3NmnVSM1+kSnIu+x7h/FcAoETnkc+L+2N6E03yVrniv51nu53ghzoXmDcCjwNxUM87kOcVxBzBHRI4BcP9/yy1jTCJyAk5Hl3Q+Y6F9w99bETkK50Ts7QS7XAJc5vmOHgvUhD4T6SrYOZpVVUXkc8CPROQGnJ4u/8Jpg/b6I3CziJydIK+/pHn4y3HadRdF1Q6jm59uw+m+l45Y+9wuIjfh/Ng8A0z0nDVdDfzUDRaC80Vb466rAe4SkX1u+hrgHBFpJv7r5fV5oFFV93uWPYrzg34NcL2I/BTnAu77OD/q8SzEU6tS1aUiMgp4WkQUeA/4sqpuEZGP4XRV/IS77XoRacCpRX03wTEiqOqLIvIPnDPDtZ7lL+MEyXRcgnPdx+v3OM19FwA/FJEf4fQkew/4nme7L4nIWZ70TFV9OoVjhppD+uKcLf4C+GGM7cYDPxGRDpwTvftV9fkU8gcOeZ2mcWgnhj+4y58FzhaRF3E+i+8A31DVVakeyyPR6/kZnO9tqJZ3M7BeRB5U1TdTyTyN57QghbzWi8hs4E/uj/VB4Hr3QrtX6P0C57s43e3skshAEWn1pH8IjCbyeztLVbfG2tn94Z8M/B9Ped8Xkb8CnyX9JlIb5sIYY8wHCrn5yBhjjM8sKBhjjAmzoGCMMSbMgoIxxpgwCwrGGGPCLCgYY4wJs6BgjDEm7P8D4UBatKltBDgAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "#归一化后的train_data,我们看下各属性的情况\n", + "features_np = np.array([x[:13] for x in housing_data],np.float32)\n", + "labels_np = np.array([x[-1] for x in housing_data],np.float32)\n", + "data_np = np.c_[features_np, labels_np]\n", + "df = pd.DataFrame(data_np, columns=feature_names)\n", + "sns.boxplot(data=df.iloc[:, 0:13])" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "#将训练数据集和测试数据集按照8:2的比例分开\n", + "ratio = 0.8\n", + "offset = int(housing_data.shape[0] * ratio)\n", + "train_data = housing_data[:offset]\n", + "test_data = housing_data[offset:]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "JkEt541Cl0s8" + }, + "source": [ + "### 模型配置\n", + "线性回归就是一个从输入到输出的简单的全连接层。\n", + "\n", + "对于波士顿房价数据集,假设属性和房价之间的关系可以被属性间的线性组合描述。" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "JW2IHEVbbCI3" + }, + "outputs": [], + "source": [ + "class Regressor(paddle.nn.Layer):\n", + " def __init__(self):\n", + " super(Regressor, self).__init__()\n", + " self.fc = paddle.nn.Linear(13, 1,)\n", + "\n", + " def forward(self, inputs):\n", + " pred = self.fc(inputs)\n", + " return pred" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "定义绘制训练过程的损失值变化趋势的方法draw_train_process" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "train_nums = []\n", + "train_costs = []\n", + "\n", + "def draw_train_process(iters, train_costs):\n", + " plt.title(\"training cost\", fontsize=24)\n", + " plt.xlabel(\"iter\", fontsize=14)\n", + " plt.ylabel(\"cost\", fontsize=14)\n", + " plt.plot(iters, train_costs, color='red', label='training cost')\n", + " plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "oxD989B_cBjF" + }, + "source": [ + "### 模型训练\n", + "下面为大家展示模型训练的代码。\n", + "这里用到的是线性回归模型最常用的损失函数--均方误差(MSE),用来衡量模型预测的房价和真实房价的差异。\n", + "对损失函数进行优化所采用的方法是梯度下降法" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 562 + }, + "colab_type": "code", + "id": "m2as0xbvc6Ec", + "outputId": "18c62056-d21b-4b7b-b622-4b4b28f4ff93" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "start training ... \n", + "Pass:0,Cost:740.21814\n", + "Pass:50,Cost:36.40338\n", + "Pass:100,Cost:86.01823\n", + "Pass:150,Cost:50.86654\n", + "Pass:200,Cost:31.14208\n", + "Pass:250,Cost:20.54596\n", + "Pass:300,Cost:22.30817\n", + "Pass:350,Cost:24.18756\n", + "Pass:400,Cost:22.22965\n", + "Pass:450,Cost:39.25978\n" + ] + } + ], + "source": [ + "import paddle.nn.functional as F \n", + "y_preds = []\n", + "labels_list = []\n", + "\n", + "def train(model):\n", + " print('start training ... ')\n", + " # 开启模型训练模式\n", + " model.train()\n", + " EPOCH_NUM = 500\n", + " train_num = 0\n", + " optimizer = paddle.optimizer.SGD(learning_rate=0.001, parameters=model.parameters())\n", + " for epoch_id in range(EPOCH_NUM):\n", + " # 在每轮迭代开始之前,将训练数据的顺序随机的打乱\n", + " np.random.shuffle(train_data)\n", + " # 将训练数据进行拆分,每个batch包含20条数据\n", + " mini_batches = [train_data[k:k+BATCH_SIZE] for k in range(0, len(train_data), BATCH_SIZE)]\n", + " for batch_id, data in enumerate(mini_batches):\n", + " features_np = np.array(data[:, :13], np.float32)\n", + " labels_np = np.array(data[:, -1:], np.float32)\n", + " features = paddle.to_tensor(features_np)\n", + " labels = paddle.to_tensor(labels_np)\n", + " #前向计算\n", + " y_pred = model(features)\n", + " cost = F.mse_loss(y_pred, label=labels)\n", + " train_cost = cost.numpy()[0]\n", + " #反向传播\n", + " cost.backward()\n", + " #最小化loss,更新参数\n", + " optimizer.step()\n", + " # 清除梯度\n", + " optimizer.clear_grad()\n", + " \n", + " if batch_id%30 == 0 and epoch_id%50 == 0:\n", + " print(\"Pass:%d,Cost:%0.5f\"%(epoch_id, train_cost))\n", + "\n", + " train_num = train_num + BATCH_SIZE\n", + " train_nums.append(train_num)\n", + " train_costs.append(train_cost)\n", + " \n", + "model = Regressor()\n", + "train(model)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAY8AAAEjCAYAAADKRI1yAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nO3deZgcVdn38e8NYY9AICFAAiZo0AeQNUJQARFZXRIEFFA2gTwsooC+CioCCiogqyJrIICIrJKwSIhsD6AJhCVhiSFDSMieIRskZJ/7/eOctnt6unu6Zrq7emZ+n+uqq7pOnao6Xd1dd9epU3XM3REREUlirbQLICIiHY+Ch4iIJKbgISIiiSl4iIhIYgoeIiKSmIKHiIgkpuAhnZ6ZDTczN7OLKrzeqXG9X67kekU6gm5pF0A6LzM7EegHPOzur6dbGukI9J3pOBQ8pJpOBPYDpgJpHghmA5OADyq83neB5cDHFV5vV3Yi9fGdkVYoeEin5+7nA+dXYb0HVHqdIh2FrnmIiEhiCh5ScWZ2opk5ofoB4PZ4YTkzTM3Pa2bPxunvmtlzZjY/pg+J6Wub2f5mdq2ZvWJmc81spZnNMrO/m9lXSpSn4AVzM+uXKVOc3snM/mZmc8xsuZn9x8wuMLN1i6y34AXzAu/pG2b2jJktMrMlZjbGzI5pZR/2NbNhZjYzlmWKmV1tZj3y15+Uma1jZkPN7CkzazSzFWY2zcyejOkbFVhmPTM718zGmtliM1tmZpPM7Coz27LEtnYxszvjvlphZh/F9/KEmZ1tZhvm7jPK+M5InXB3DRoqOgDfAeYAKwEHFsfpzPByTt4TY55ngevi6zXAgjgeEvPtFOdlhuXAkry0nxcpz/A4/6K89H45yx5EuHbhwKK47cy8h4usd2qc/+W89Nz3dEHOe1qUV96zi6x3Z2B+Tr6PcsrWAJybWX8bPps+wGs5614DLASactLy308v4NW8ff9hzvQCYFCBbR2W8x3ILLc4bx98Nul3RkN9DKkXQEPnHeLB04ETS+TJHGg/igewXwGbxnkbA1vE19sD9wFfB3oDFtO3AH4JrI7L71VgG+UEj4XAvUC/OG8j4Lycg+phBdbbWvBYGMv1y5z31Bu4P85fBmyWt+x6hIv7DrwDfDGmrwUcSrj4v7AtwSOu+5W4bCNwPLBRnLcBMBC4On8fAv/ICRJHAWvH9IHAhDhvDtAzb7l347xHgO1z0jcG9gFuzuzvJN8ZDfUxpF4ADZ13SBg8HPhtO7aV+Yd/e4F55QSPJzMBKS/PI3H+bQXmtRY8HPhFgeXWB+bF+cfnzTspJ7BsV2DZvXIC2rMJ99EZZM8Adi5zmX1y3sshBeb3jkHFgV/npG+Rs1zvSn5nNNTHoGseUi/WAFe1Y/lH4viLbVz+9x6PXnkejuOd2rDO5cA1+YnuvhwYVWS934rjB9x9SoFlxxIOsG1xfBzf7u4TylzmyDge5+5PFCjPXODGOPntnFmZM0mArZIWVOqfgofUiwZ3L3kfhpltYGbnmNmzZjbPzFblXPB+LWbbuo3bf7lI+sw47tGGdb7t7ksTrne3OH6hxHqfT1oQM1sH2CNOPp5g0d3j+JkSeZ6O4+0zF9vdfRnwXEwfZWa/NLNdzWztBNuWOqbgIfWisdRMM9uKcNPYVYQWOb2AFXG5uWRvAGzRUqgc7v5RkVnL43idNqy22DpLrbdnHM8useysNpRlM7L3db2fYLlecTyzRJ4ZcWxkyw9wCjCRUIX1G0KAX2Rmj5nZ98xM95l1YAoeUi/WtDL/GsJF8ynAEYQLzd3dfQt33xIYVO0CdnDWzuXXS7pArHbbGTiccHF8ItCd0ArrLmCsmXVvZ7kkJQoeUvfifRaD4+R33f0hd1+Yl613jYtVLZkzqFLXCdpyDWE+oeUXwCcTLJc5Iyy1TN84dvIeAePuq939YXf/X3ffgVD2/0c489oduDBBWaSOKHhINWUumLb3X29Psv98XyuS56vt3Ea9yLy/L5XIs0/Slbr7KkIzXQj//Mv1ahzvZ2bFPsfMDZrvlLjGkynHHHf/A9mGBPvlZanUd0aqTMFDqunDON60AuvJtIT6XP7MeD3krHZuo178PY6PMLN++TPN7PPA/m1c951xfKKZ7VzmMg/E8Y5kz/5yy9MbOC1O3peTvk6JYAOhKTK0rA6r1HdGqkzBQ6rprTj+lplt0taVuPsSYEycvM3MdgUws7XM7ABCq57O8k/1r4S7yDcAnjCzvQEsOJjQdHhxG9c9jNDoYD3gKTM7LufxIBuY2Z5mdouZ7ZVZwN2fBzJNdG8zsyMzLabMbA/C/TE9CI0Wrs3Z1o7Am/ERJNtnAkkMKkcQ7pKHbJPljIp8Z6T6FDykmu4iPG7iS8AH8TlNU82sVDPUYs4h/Fv9HPCamS0hPJ7kn8DmwMkVKnOq4j0gRxEeZfIZ4F9m9hGwlHAQX0JouQShtVmSda8Avgm8SagKvBP40MwWxPWPJbSQ2iBv0eMJQacH4e74JWb2ITCOcEF8IXC4u8/PW24Hwh3rk4BlZjafcK3jAWCTuPwlectU8jsjVaTgIVXj7v8BDiQc9BYDWxIuvPYttVyRdY0F9ib8815IaOI6D7gJ2BUYX5lSp89DJ0i7ALcTHvuxThxfBexJ9rrAojasezrhsSI/JNxL8hGwIaH57ijgVOClvGUaCfv+x4QD/ipgXWAy4drFju7+77xNTSTcYHgjsYku4bEkH8btnkV49MqHuQtV8jsj1ZV5PpCIdBBmdhfwPeBid78o5eJIF6UzD5EOxMy2I9znAjA6zbJI16bgIVJnzGywmf3WzHaMjxXJ9KcxmPAokA2AMe7+YqoFlS5N1VYidcbMTgFuiZNNZK8XZB7nMQ04wN3fTaF4IoCCh0jdifd3nEK4+e6ThJZRywlNeEcC17p74ovlIpXUJYJHz549vV+/fmkXQ0SkQ3nllVc+cPdeheZ1iada9uvXj3HjxqVdDBGRDsXMphWbpwvmIiKSmIKHiIgkpuAhIiKJKXiIiEhiCh4iIpKYgoeIiCSm4CEiIokpeJTj4Ydhzpy0SyEiUjcUPFqzbBkcfjh8tbN0kS0i0n4KHq1piv3uvPdeuuUQEakjCh4iIpKYgoeIiCSm4CEiIokpeJSrCzy6XkSkXAoerTFLuwQiInVHwUNERBKrWfAws9vMbJ6ZvZmTtpmZjTazyXHcI6abmV1nZg1mNsHMds9Z5oSYf7KZnVCr8ouISFYtzzyGA4fkpZ0HPOXuA4Cn4jTAocCAOAwFboAQbIALgb2APYELMwFHRERqp2bBw93/D1iQlzwYuCO+vgMYkpN+pwdjgE3NbCvgYGC0uy9w94XAaFoGpOrQBXMRkf9K+5pHb3efDRDHW8T0PsD0nHwzYlqx9BbMbKiZjTOzcY2NjW0voS6Yi4i0kHbwKKbQEdtLpLdMdL/Z3Qe6+8BevXpVtHAiIl1d2sFjbqyOIo7nxfQZwDY5+foCs0qki4hIDaUdPEYCmRZTJwAjctKPj62uBgGLY7XWKOAgM+sRL5QfFNOqR9c6RERa6FarDZnZPcCXgZ5mNoPQaur3wH1mdjLwPnBUzP44cBjQAHwMnATg7gvM7DfAyzHfr909/yK8iIhUWc2Ch7sfU2TWAQXyOnBmkfXcBtxWwaKVpgvmIiItpF1tJSIiHZCCh4iIJKbg0ZpMT4K6cC4i8l8KHq054oi0SyAiUncUPFozqrotgUVEOiIFDxERSUzBQ0REElPwKJcumIuI/JeCh4iIJKbgISIiiSl4lDJhQvb1ypXplUNEpM4oeJQybVraJRARqUsKHiIikpiCh4iIJKbgUYoexy4iUpCCh4iIJKbgkcQCdVooIgIKHsnMnJl2CURE6oKCh4iIJKbgUYoumIuIFKTgUYoehigiUpCCRxI6ExERARQ8SlOwEBEpSMFDREQSU/AoRWceIiIFKXiUogvmIiIFKXiIiEhiCh6lqNpKRKSguggeZnaOmb1lZm+a2T1mtr6Z9TezsWY22czuNbN1Y9714nRDnN8v3dKLiHQ9qQcPM+sD/BAY6O47AWsDRwOXAVe7+wBgIXByXORkYKG7fxq4OuYTEZEaSj14RN2ADcysG7AhMBv4CvBAnH8HMCS+HhynifMPMKtR/ZKqsUREgDoIHu4+E/gD8D4haCwGXgEWufvqmG0G0Ce+7gNMj8uujvk3z1+vmQ01s3FmNq6xsbG6b0JEpItJPXiYWQ/C2UR/YGtgI+DQAlkz7WYL/f1v0abW3W9294HuPrBXr15tLVzz6aamtq1HRKSTST14AF8F3nP3RndfBTwEfAHYNFZjAfQFZsXXM4BtAOL8TYDa9NL085/XZDMiIvWuHoLH+8AgM9swXrs4AHgbeAY4MuY5ARgRX4+M08T5T7vX6G6+Rx+tyWZEROpd6sHD3ccSLny/CrxBKNPNwM+Ac82sgXBNY1hcZBiweUw/Fziv5oUWEeniurWepfrc/ULgwrzkKcCeBfIuB46qRblERKSw1M88RESk41HwKEX3dYiIFKTgISIiiSl4iIhIYgoepcyfn3YJRETqkoJHKVOmpF0CEZG6pOBRii6Yi4gUpOBRyrJlaZdARKQuKXiUouAhIlKQgkcpNXpklohIR6PgUYqCh4hIQQoepSh4iIgUpOBRijp/EhEpSMGjlM99Lu0SiIjUJQWPUo4/vvi8mTPhn/+sXVlEROpIXfTnUbdK3SS4227Q2KjrIiLSJenMIyl3uOCCEDhERLooBY+kGhrgkkvSLoWISKoUPJJSCywREQUPERFJTsGjlEIXzPWkXRERBY/EFi5MuwQiIqlT8Ehq8uS0SyAikjoFDxERSUzBIyndFCgiouCRmIKHiIiCR0mFWlYpeIiIKHgkppsERUQUPEREJLm6CB5mtqmZPWBm/zGziWa2t5ltZmajzWxyHPeIec3MrjOzBjObYGa717SwqrYSEamP4AFcCzzh7p8FdgEmAucBT7n7AOCpOA1wKDAgDkOBG2paUgUPEZH0g4eZbQzsCwwDcPeV7r4IGAzcEbPdAQyJrwcDd3owBtjUzLaqUuFapumah4hI+sED2A5oBG43s9fM7FYz2wjo7e6zAeJ4i5i/DzA9Z/kZMa0ZMxtqZuPMbFxjJfveGDq0cusSEemgyg4eZnabmX2iQPpGZnZbO8rQDdgduMHddwOWkq2iKliUAmkt6pLc/WZ3H+juA3v16tWO4omISL4kZx4nABsUSN8AKNHZd6tmADPcfWycfoAQTOZmqqPieF5O/m1ylu8LzGrH9kVEJKFWg0ds9bQ54R9/jzidGXoBXwfmtrUA7j4HmG5mn4lJBwBvAyMJAYs4HhFfjwSOj62uBgGLM9VbIiJSG93KyPMBoVrICQf1fA5c2M5ynAXcbWbrAlOAkwiB7T4zOxl4Hzgq5n0cOAxoAD6OeUVEpIbKCR77E846ngaOABbkzFsJTHP3dlUbufvrwMACsw4okNeBM9uzvbKp4ycRkYJaDR7u/hyAmfUH3o8HbxER6cKSXDDvB+yZmTCzE83sBTO7ycy6V7xkIiJSt5IEj2uALQHixe2bgAnA3sAVlS+aiIjUqyTB41PAG/H1EcBodz8DOBX4RqULJiIi9StJ8HBg7fj6AOCJ+HoOsHklCyUiIvUtSfB4GbjAzI4D9gH+EdP7EQJI51NOayu1HxCRLihJ8Dgb2BX4E3Cpu78b048C/lXpgomISP0q5z4PANz9TWDnArN+AqypWIk6GnfdDyIiXU7ZwSPDzLYDdiBcA5no7lMqXqp6oSopEZGCyg4esd+NYYSWVk3ZZHsQONndP6pC+UREpA4lueZxLaHaan/Ck3Q3ILS62plwD0jns1YZu0dnJyLSBSUJHt8ETnH359x9VRyeJXQFO6T0oh2UrmWIiBSUJHhsAMwvkL4AWL8yxRERkY4gSfB4EfiNmW2YSYjdxV5MV26qq2orEemCkrS2OpdwV/lMM5tAaG21C6FPjYOqUDYREalTSe7zeMPMPg18D/gsoY+PvwB3u/uyKpWv/unMQ0S6oCRNdS8Fprv7jXnpp5lZH3e/oOKlExGRupTkmsdxwGsF0l8Fjq9McTqghga47760SyEiUlNJgscWQGOB9A+A3pUpTge0447wne+kXQoRkZpKEjzeJzxNN9++wIzKFKcD0jUPEemCkgSPm4CrzexUM/tUHIYCVwI3V6d4deDCC9MugYhI3UnS2upKM+sJXAesG5NXAte6++XVKFxd0JmFiEgLiZ6q6+7nm9klhKfqGvC2uy+pSsnqxaBBaZdARKTuJKm2AsDdl7r7y+7+UqcPHACHHlpeviefrG45RETqSOLgIUXcdFPaJRARqRkFDxERSUzBQ0REElPwqBT1/SEiXUjdBA8zW9vMXjOzR+N0fzMba2aTzexeM1s3pq8Xpxvi/H5plvu/nn8eFi9OuxQiIjVRN8ED+BEwMWf6MuBqdx8ALAROjuknAwvd/dPA1TFf+ubNg8GD0y6FiEhN1EXwMLO+wNeAW+O0AV8BHohZ7iDb1e3gOE2cf0DMn77x49MugYhITdRF8ACuAX4KNMXpzYFF7r46Ts8A+sTXfYDpAHH+4pi/GTMbambjzGxcY2Oh5zmKiEhbpR48zOzrwDx3fyU3uUBWL2NeNsH9Zncf6O4De/XqVYGSiohIRqLHk1TJF4FvmtlhwPrAxoQzkU3NrFs8u+gLzIr5ZwDbADPMrBuwCbCg9sUWEem6Uj/zcPfz3b2vu/cDjgaedvfvAs8AR8ZsJwAj4uuRcZo4/2n3Onl6YZ1cehERqbbUg0cJPwPONbMGwjWNYTF9GLB5TD8XOK/qJTnttPLyFYthK1ZUriwiInWgHqqt/svdnwWeja+nAHsWyLMcOKqmBdtkk7YvO3487LorPPggfOtb2fQZM2DVKujfv/3lExGpsXo+86gf5daKFaq2GjcujB97rHn6NtvAdtu1r1wiIilR8BARkcQUPMpRiQvhdXJNX0SkEhQ8yqEDv4hIMwoeldTUVHyemvGKSCei4FFJpZ6qq7MXEelEFDzK8ZnPtH1ZnXGISCek4FGOk05q+7I64xCRTkjBoxxmcPXV5eX99rdhwgT4y1+qWyaRtHzwAVx5pf4YdXEKHuXadtvy8t1/P+yyCxx3XJhWtZV0NiedBD/5CYwdm3ZJJEUKHuXqVldPchFJz6JFYbxqVbrlkFQpeJTra19LuwQiInVDwaNca6+ddglEKueyy2C//dIuhXRgqovprB5+ODzNt1+/tEsi9ei86vdkIJ2bzjw6snfegZdeKjzv8MNht91KL//CC/D225Uvl4h0ejrzqKY1a2DKlOz07NmhH4/Pf74y68/cvFisyWTmwmYx++xTenmRQvR9ERQ8quvii+HSS7PT228PS5boxyedg5qhd2mqtqqmZ55pPr1kSTrlEKkG/QmqvLfegs9+FhYsSLskrVLwqKYXXki7BNIZbLopnHVW2qXI0hlH9VxyCUyaBKNGpV2SVil4pGH+/LRLIB3J4sXwpz+lXYrKmD07BJ/8s3LpcBQ80nD66WmXQCQd//pXGKcVDK+9FtZbL51tJ9EBqgQVPGol98uwdGnttjtiBBx9dO22J51few5saR8Uzz4bVq5MtwyldKAqQQWPWvnrX5MvM3Nm+39sQ4bAvfe2bx0ihbTlQJf5Pnegg6QUpuCRxM47t33ZYv92Vq9uPj1nDvz0pzBmDPTtC8OHt32b0nXMmxceld5RKHiU1t4/jStWwNZbh5qHKlHwSCJTX1tJ+Y+JOOUUuOKKbJ3wc89VfpvS+fTuDb16pV2K1qVdbdVVzJoVGiecfXbVNqHgkcRGG1V+nfnBYcWKMM78yPRjk3rVnu+mzjxqo4rHDwWPtBX7cNdaq/R8kbR0hgN/vf6uKrVva/AZKXjUq0xHO01N6ZZDpJLq9aDdWXXmMw8z28bMnjGziWb2lpn9KKZvZmajzWxyHPeI6WZm15lZg5lNMLPd030H7VTsw1ULqfYbNQrGjavNtj7+uOt0y1qJA1JnOHuppvbu4y5y5rEa+LG7/w8wCDjTzHYAzgOecvcBwFNxGuBQYEAchgI31L7IVVLoLEP/1NrukEMq9wTj1pxwAgwaFFo9dRXtaaqbtnopR75KHfRr8P5SDx7uPtvdX42vPwImAn2AwcAdMdsdwJD4ejBwpwdjgE3NbKuaFXjffdu/jmefbZn24ouht8L/+7/m6fX6Ja8ny5bBH/+YbhVf5gynljeAdkRJ7vP45Cfh4IOrWx5ps9SDRy4z6wfsBowFerv7bAgBBtgiZusDTM9ZbEZMq1Uh27+Ojz/Ovs78mP75zzDOvx8kM3/mTBg9uv3b7ox++Uv44Q/hgQeSL3vLLfC731WuLF052K9ZA48+Wt4+KOd39P778OST7S9XV9RFqq0AMLPuwIPA2e7+YamsBdJafFvNbKiZjTOzcY2NjZUqZnWsWBHu7Sgk80PcfXc46KD2b6t7d/jmN9u/nnqSedBkW/71Dx0KP/95+8tQizr8hoZsQ4p6dOWV8I1vhC6QpT505gvmAGa2DiFw3O3uD8XkuZnqqDjOVCbPALbJWbwvMCt/ne5+s7sPdPeBver55il3uOqq4ge+zIdfqbr0pUvhkUcqs656UatHXixbFoZSZaimAQPgnHOqv51y5b/n994L4zlzal+WpNI4Q1y4sPyqVV0wb52ZGTAMmOjuV+XMGgmcEF+fAIzIST8+troaBCzOVG/VqMCVX2epf8zlfomOPbbzBYWmpuT/tCdOhM99LvxQ22PMGHj++eZpm20GG27YvvW2VyUeZV6J63allPrOdtVqvYULw/fn/PNL56v08aWTn3l8ETgO+IqZvR6Hw4DfAwea2WTgwDgN8DgwBWgAbgHOSKHMlbVWiY/hrbfKW8c995Sujir1JWrvgbZaDjsM1l032TKXXAJvvgmPP96+be+9d8uD7PLlxfNX+kc/fDi8/XbL9EocDPKDYlvlv+fMdDnBo6s11c30DNiW63J1KvU+zN39BQpfxwA4oEB+B86saqFq6fXXw1DM9OnNp2+9NfQsd/jh7dtubtXCpZfCH/7QvvWVY8WKEAzKPXCU25ta7gEp7X+27d3+okWw334wYUJl1lev0g4eae3XcrdbKt9JJ8GWW1a2oUcb1MOZh5TS1ASnnZadPvVUOOqo0KlNEvlfxiuvzL4eM6bt5SvXjBmw/vpwQxVvy1mxIrRKg9ofnCq1vVGjsoGjkIkTQ6umevCvf8Hvf996vlydNRi2ppyzstx8pQwf3vp+7wrXPKQVTU1w000t00sdYAqZOjW01inkxRcTFyuxd98N42reOf+//9vyPpm22GGH9q+jmnKbeqfpZz9rXoef5ICVn9cdfvMbmDatMmWrN+Xum0oH105+zUPa4o47Ws+T61OfCq11MqpRTXXssWEbD8UGc7NmlX/NBkKVzerV8NprMGxY+ctV+gcycWJl15dE2tU5ldCWz6OhAX71Kxg8uPLlKaSznwHV4HuU+jWPDqfWP+5iTUNz7bNP9vXSpenVhd5zTxgfcQS88goMHBh+pJkBwk2Qq1bBhx+Glktjx8Imm8Auu0CPHnDiick7wCp0IPjzn9vzTtquqQlOPhl+8APYbbfqbCPtAPPOO4XTyylXsYN2pglrvZxVVUslqq0qub120JlHZ/DCC9nX3buHC+C1MGZMqOL5zGdaVqPtsUfhL+6YMeGiec+eIejtv3+4ATJz8LjzzuLbW7QodJJVzg+irVVx5dxPs9NO8Nhj2emPP4bFi8PrGTPgtts6342YufJvuk3yWJhira3SDojVVuqax5VXtvyj0QHOjBQ8JDAL10Vmz4b77oM33gi9031Y4mb/M88MVTzvvNN6dUN+qzEIZycZmR9LqQPR0KFw1lnQv3/pbZUyZkxob59pOpnv1FNbpuWeOUGoivv617PTfftm73Jvjzlziv+rr2eXX94y7Y03wnfqzTcLL1OtYLF6dTiLffTR0vlqfXDOfb/LljXfLz/5SbbFZVv2y+WXwxZbtJ6vwhQ8kurM/5COPRYOPBC+853QX/u8eXDRReUtO3Vq8XkjRsDxx5devpwfc6aP7sxF1eXLw+dx991lFREIZ2ULFzY/W8s1cmTz6aVLw34pdS9Ope6T2WoruOCC9q1j0aLSgSzzDLVKeuON5tPu2fsZHnyw5bxqmj8/nAV///vV3U57fO974UbWjz6qzPp+9rOWZ4NqbSU1tWZNeBhdrlWrwoXvSy+Fl14K/3L+858wr9wDwb//3XqepE/EveIK2GCDZMtA8+qDRYtaz9+9O/ztb8m3U02lDgw9eoQqwWKuv774vP794fTTk5enUFVUa9+Nxx4rfFbS3uBS7z1wumdbBJa66bRS5dc1jzqyzjrZ15kLxJ3FSy+1TDODY44JT67da6/wL+d//ifZei+7rPU8pe5byTQayP0h/PSnycqQkTnAvf9+ONBW24cftnzW04gRYT+modg9InfdFc4eb7wxTC9dGqrQyvl3nH/mkatYoFuwIPz7zs/X0AD339/6NlvbXmt/RoodVJ98MjQZbs2yZeG+oqTlyn3tXtluBHI/K5151KHbb8++Pvro9MpRLflfuj/+EV5+uWW+F1+s7Bf0vPOKz9tww3AhulA/KOUYORK+8IXwQ82UOfMQv4zRo0MrsSRWry4+b/nyML9Pn1AdlWvIkMLXCaplxYrwvs2KP/8sv1rxE58IDSEGDmx9/flnEO19ttW3v916nlmzYOutYdKk5unl3oyXq6kpvM8RI0L/Ib/6VevLbLhh2D+tmT+/eSOQ/HL99rfZ11Ontu+eq0KPPtGZRx3JPxB0NoUOiIWaC3/pS/Dqq9UvT8Y227Sep5jBg0PV2YoV2aqC/Oc7HXRQ9v6Ucv3hD8Uv3n/wQfhzsWRJmD7nnLBvc/9plgo+rSnnX2tTUxjaUreeOejkXsD/+99LNwOfMqXl8uPGweTJ4fWSJeU1PYfm1afXXReqT59/Hi6+OKTdf39o3JFpkn3WWaEnx1LVVsUOpEuXhsYbQ4YUnl/MtGnhTu8XXwxndPfe23IbxxwTepnMvUE39/pE7rW3/v2zN2RA1CEAAA4YSURBVNNmjBxZ/ObetLl7px/22GMPr6hM+xt39+efz72TQUM9D5X+rE4/PVn+XXZxP/bY5mmjRrX8XuUOf/tby7TFi9379g2vZ88uvGwmbbvt3P/97+JlKrTtWbNa5pk6tfR7+8tfsq+vvrp4eYptf/Lk8veje3Ybp5/efN0LF4bxxhu3/N3++c/ZfMuWZdM//LDwNt55J+zrUseAzHDVVWE8fHjzfDvuGNIfeyyMM58buM+Z437wwYXf4+23tzzWNDW533pr8c8us4y7+8yZIW3LLQuXv0zAOPfCx1WdebRX7jUQqW+5N1NWQtLndI0fD3/9a/O0K68s/I8zo1DV6K9/HarxILSOK9XabMqU8ITgYgq9h623bpnWWhPi730v+zq/xRqEJxyUkuTmwD/+Mfs6v/yZaqtCTcxzn3bwu9/B3Lmlt7P99uEhleW47rowbmwMz/w68sjm1aRf+1oYZz631ri3THvwQTjllOZpmRaI+XTNQ6STe/LJUNf96U+Xv0zugXzq1OYH7ozjjitvXWecAQe0eHh1c2bJerEs1OdIbpVWrgMPDOvfZZfy1//DHza/RyhX7kFz9epQpTRsGBx6aPML+7/+dXYflbrwXeqJ17kyTdXNwv588MHQyVtbD+KjRrVctlCT8NyWjH/5S8v5c+ZUr9lysVOSzjRUtdpqzJjyT7c1aNBQ+SH3N7l4cfF8u+zSfPrzn3d/4onCeffZp/n6X3vNfZ113G+4wX3GjOLbOOCA8sv9/vvJ3uNNNzVPmzLF/Zprmqedeab74YeH6sxM2tFHt+NQV7zaSs+2aq/ttku7BCJdW7n/7sePbz7d1BTO/ArJbVAxc2ao8ly1KtwHU+pemCQ9Pd5yS/l5IYSCXIWOPZn7eHIfs1PqBtd2ULVVe/XqFT7UE09MuyQi0rdv+XlfeSVULZWzzkyrudYkuW+jnPtJMjKPDypXbpPfKgUPnXm0xZQpnfsxJSIdVaUe+VGPkna8lbH22pUtR6Tg0RaF2vYrmIhIPVK1lYiIJKbgUefyL2aJiNSD/IYCFaLgUSmf/3zaJRARaWncuKqsVsGjUk47Ldy8VejuXBGRTkbBo1LWWgsGDFD1lYh0CQoelabgISJdgIJHpWVuEhowIN1yiIhUkYJHpR11VBi/+iqsXAnnnptueUREqkDBo9KuuSY8lrl79/C49iuvDFVZ++6bdslERCpGwaPSunWDnj1bpmceEXDXXbDjjrUtk4hIhXXY4GFmh5jZJDNrMLMSHWDXiczjS7bcMvT57N6+rlWraf786q1bnWeJdAodMniY2drA9cChwA7AMWa2Q7qlasWPfxzGuZ3eTJsWOqLJ9ATXu3cIKl/4Qpi+7LJs3osvDp3ZdO8epjfdFHr0yM4fMiR0eHPNNfDee6ETmOefD72lZZ7sn2v6dHj8cXj66ebpffrAZpuFC/+LFmXTzzwzPHTurLPgtdfgRz+CbbcN28v0gvetb8HNN4c+oZctgzvuCNvJ6N07XAfK9COecfXV2Z7YIPQHPWJEeB/vvRf6qZ45E95+O/SHfdFFRXdzSZkbOR96CJ59tnl/9NdfD089Bffc07J3ujPOyL5+7jn4xS9C+fKfijp4MBx8cMvtZjodWmut8Oeh0lWYQ4Zkg/Lmm4fxnnsWz//II4XPjruCQYPSLkFlfOIT5efdeOPqlKFYRx/1PAB7A6Nyps8Hzi+Wv+KdQVVapuOWffYJ0wsWhD6h3d3XX999//0rs5333nMfNqx52urVYdsnn+z+yU+6v/FG8/ljx7o//XTr616yxH3VqsLzpk1znzChedo//hGGpqZs2m67uf/ud61vK3d7Dzzg3tjo/vDD7n//e+ioZ6edsh3hjB8f8payaFHLtMZG90mTWi9HZjuTJmXfyznnZNMbGwsv98gj7j/9qfvbb7ufe27o6Oe73w19YN93X/isdt3VvaEh5HUP/Yh//HEo7+zZ7ocd5r7JJu4rVxbexty57i+84D56dOine+7c0Fd4xuLF7iNHhtfLl7uvWRPWv3x5SFu1yn3evNDfd1NT+C7cf39Y35o1YTjqqPA+x40Ly6xeHdY7dmxIA/cf/9j9vPPcL73U/bnnwvc5s39OPtl9+vSw7Msvu596atjW738f9umIEe6XXx7m/fznIe3BB0NHSsOHh88f3D/72ex6M+vK7STp2mvDkPmMxo8PHTxlphsasnlnzAi/g5dfDp/DvHkhfdttw3u46qrwed14YyjfQw+5X3hh+Bwyn/2tt4a+4OfPD9/9J55wv/vu8L5Gjw6f3403ul9xRcj/5JOhHHPmuH//+9myZPpd33TTsE9zrVzpPnBgOFZcc03oq37OHPdXXw3LbLllGM+dW+ILXBolOoMy74D3JZjZkcAh7n5KnD4O2Mvdf5CTZygwFGDbbbfdY9q0aamUtWwPPABf/nLX/UdYSatWhS47X3ut8JlAJQ0fHvrnzu0ffdUqmDgRdtqpag+lqxvu4cyzWDe6q1eH6325T51evjz0Mb7FFpUpw6pV4VpjRmZb//xn+Ay6d8+esZcydSqst17zM9K0zJkT+hv52tfCmfhaayV/cveSJaGKvB1nW2b2irsPLDivgwaPo4CD84LHnu5+VqH8AwcO9HFVer6LiEhnVSp4dNS/RTOA3KvNfYFZKZVFRKTL6ajB42VggJn1N7N1gaOBkSmXSUSky+iQPQm6+2oz+wEwClgbuM3d30q5WCIiXUaHDB4A7v448Hja5RAR6Yo6arWViIikSMFDREQSU/AQEZHEFDxERCSxDnmTYFJm1gi05xbznsAHFSpOZ6T9U5r2T+u0j0pLa/980t17FZrRJYJHe5nZuGJ3WYr2T2u0f1qnfVRaPe4fVVuJiEhiCh4iIpKYgkd5bk67AHVO+6c07Z/WaR+VVnf7R9c8REQkMZ15iIhIYgoeIiKSmIJHCWZ2iJlNMrMGMzsv7fJUm5lNNbM3zOx1MxsX0zYzs9FmNjmOe8R0M7Pr4r6ZYGa756znhJh/spmdkJO+R1x/Q1w2YddotWdmt5nZPDN7Myet6vuk2DbqTZH9c5GZzYzfo9fN7LCceefH9zrJzA7OSS/4W4vdLoyN++He2AUDZrZenG6I8/vV5h0nY2bbmNkzZjbRzN4ysx/F9I7/HSrWP21XHwiPen8X2A5YFxgP7JB2uar8nqcCPfPSLgfOi6/PAy6Lrw8D/gEYMAgYG9M3A6bEcY/4ukec9xKh/3mLyx6a9nsuY5/sC+wOvFnLfVJsG/U2FNk/FwE/KZB3h/g7Wg/oH39fa5f6rQH3AUfH1zcCp8fXZwA3xtdHA/emvS+K7J+tgN3j608A78T90OG/Q6nv3Hod4ocxKmf6fOD8tMtV5fc8lZbBYxKwVXy9FTApvr4JOCY/H3AMcFNO+k0xbSvgPznpzfLV8wD0yzs4Vn2fFNtGPQ4F9s9FFA4ezX5DhP549i72W4sHww+AbjH9v/kyy8bX3WI+S3tflLGvRgAHdobvkKqtiusDTM+ZnhHTOjMHnjSzV8xsaEzr7e6zAeJ4i5hebP+USp9RIL0jqsU+KbaNjuIHsdrltpzqkqT7Z3Ngkbuvzktvtq44f3HMX7di1dpuwFg6wXdIwaO4QvXxnb1d8xfdfXfgUOBMM9u3RN5i+ydpemeifRLcAHwK2BWYDVwZ0yu5fzrUvjOz7sCDwNnu/mGprAXS6vI7pOBR3Axgm5zpvsCslMpSE+4+K47nAX8H9gTmmtlWAHE8L2Yvtn9KpfctkN4R1WKfFNtG3XP3ue6+xt2bgFsI3yNIvn8+ADY1s2556c3WFedvAiyo/LtpPzNbhxA47nb3h2Jyh/8OKXgU9zIwILb2WJdwUW5kymWqGjPbyMw+kXkNHAS8SXjPmZYdJxDqbInpx8fWIYOAxfHUeBRwkJn1iNUVBxHqqWcDH5nZoNga5PicdXU0tdgnxbZR9zIHrOhwwvcIwns6OraU6g8MIFzsLfhb81BZ/wxwZFw+f19n9s+RwNMxf12Jn+swYKK7X5Uzq+N/h9K+gFTPA6HlwzuEliC/SLs8VX6v2xFauYwH3sq8X0I98lPA5DjeLKYbcH3cN28AA3PW9X2gIQ4n5aQPJBxI3gX+RMe4wHkPoeplFeFf3sm12CfFtlFvQ5H9c1d8/xMIB7CtcvL/Ir7XSeS0tiv2W4vfy5fifrsfWC+mrx+nG+L87dLeF0X2z5cI1UgTgNfjcFhn+A7p8SQiIpKYqq1ERCQxBQ8REUlMwUNERBJT8BARkcQUPEREJDEFD5EqMLPhZvZo2uUQqRY11RWpAjPbhPD7WmRmzxIeHPiDlIslUjHdWs8iIkm5++JKr9PM1nX3lZVer0hb6MxDpArMbDjQk/B8phPyZvd396lmtgNwBaFPjGWEu4DPcfc5eet4HjgLWNfdO9rTdaWT0jUPker6EfBv4HZCnwpbAdPj85/+j/BYiT2BrwLdgZFmlvu73A/YGTgEOKCG5RYpSdVWIlXk7ovNbCXwceaMAsDMTgfGu/vPctKOJzwZdiDheU0Ay4Hvu/uKGhZbpFUKHiLp2APY18yWFJj3KbLB400FDqlHCh4i6VgLeAz4SYF5c3NeL61NcUSSUfAQqb6VwNp5aa8C3wamufuq2hdJpH10wVyk+qYCe5pZPzPrGS+IX0/o/e5eM9vLzLYzs6+a2c2ZTrlE6pmCh0j1/YFw9vE20Ahs66HL3y8CTcAThA64rgdWxEGkruk+DxERSUxnHiIikpiCh4iIJKbgISIiiSl4iIhIYgoeIiKSmIKHiIgkpuAhIiKJKXiIiEhi/x9rS+agpZEmJwAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "matplotlib.use('TkAgg')\n", + "%matplotlib inline\n", + "draw_train_process(train_nums, train_costs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "可以从上图看出,随着训练轮次的增加,损失在呈降低趋势。但由于每次仅基于少量样本更新参数和计算损失,所以损失下降曲线会出现震荡。" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "YC73FnkakWbY" + }, + "source": [ + "### 模型预测\n" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "No.0: infer result is 12.15,ground truth is 8.50\n", + "No.10: infer result is 5.21,ground truth is 7.00\n", + "No.20: infer result is 14.32,ground truth is 11.70\n", + "No.30: infer result is 16.11,ground truth is 11.70\n", + "No.40: infer result is 13.42,ground truth is 10.80\n", + "No.50: infer result is 15.50,ground truth is 14.90\n", + "No.60: infer result is 18.81,ground truth is 21.40\n", + "No.70: infer result is 15.42,ground truth is 13.80\n", + "No.80: infer result is 18.16,ground truth is 20.60\n", + "No.90: infer result is 21.48,ground truth is 24.50\n", + "Mean loss is: [12.195988]\n" + ] + } + ], + "source": [ + "#获取预测数据\n", + "INFER_BATCH_SIZE = 100\n", + "\n", + "infer_features_np = np.array([data[:13] for data in test_data]).astype(\"float32\")\n", + "infer_labels_np = np.array([data[-1] for data in test_data]).astype(\"float32\")\n", + "\n", + "infer_features = paddle.to_tensor(infer_features_np)\n", + "infer_labels = paddle.to_tensor(infer_labels_np)\n", + "fetch_list = model(infer_features)\n", + "\n", + "sum_cost = 0\n", + "for i in range(INFER_BATCH_SIZE):\n", + " infer_result = fetch_list[i][0]\n", + " ground_truth = infer_labels[i]\n", + " if i % 10 == 0:\n", + " print(\"No.%d: infer result is %.2f,ground truth is %.2f\" % (i, infer_result, ground_truth))\n", + " cost = paddle.pow(infer_result - ground_truth, 2)\n", + " sum_cost += cost\n", + "mean_loss = sum_cost / INFER_BATCH_SIZE\n", + "print(\"Mean loss is:\", mean_loss.numpy())" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "def plot_pred_ground(pred, ground):\n", + " plt.figure() \n", + " plt.title(\"Predication v.s. Ground truth\", fontsize=24)\n", + " plt.xlabel(\"ground truth price(unit:$1000)\", fontsize=14)\n", + " plt.ylabel(\"predict price\", fontsize=14)\n", + " plt.scatter(ground, pred, alpha=0.5) # scatter:散点图,alpha:\"透明度\"\n", + " plt.plot(ground, ground, c='red')\n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYkAAAEjCAYAAADHWv01AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nO29eXhdVbn4/3mbpEnaDKVTOjAUqIVClcGiKIJlRvTKBaeLqHBRcQJluqLIVbxeJwTKFe5PKcggigLKpCjzdPkyWRBKIVBaWgptmqYtSZo0c97fH2vvZJ+TM+xzcubzfp7nPOectdda+93Teve71rveJaqKYRiGYcRiQr4FMAzDMAoXUxKGYRhGXExJGIZhGHExJWEYhmHExZSEYRiGERdTEoZhGEZcTElkGRFZJyIqIkui0k/z0h/Nj2SpISJLPHnX5VsWw0gVEan07l8VkZ3zLU+2yMZxFrySEJEbAgcd/HSKyAsi8otSvui5wFNYF4vI/vmWxUiMiBwoIpeJyHMisllEBkSkQ0ReEZHficjJIjI533KWEiJyrvd87JpnOQ705PhCLvdbmcudjZMBYJv3W4AZwH7e50si8i+q+kS+hEuDDuA1YH2+BQFOAz4MrANeiJNnB07eDbkRyQgiIg3ANcCnA8kKtAO1wELvcwqwVUTOVdXf5lzQ0uRcYC7wIPl9Xg8EfgA8BOTs2ha8JRHgSVWd5X2agDrgC7iHZApwm4jU5lXCFFDVO1R1b1XN6VtBuqjqs568R+ZblnJDRKYAT+IUxABwNfABYKKqTlXVWmAm8CngHmAq8PE8iWuUGMWkJCJQ1R2qehPwTS9pFvCveRTJMLLF9cC+QDdwrKp+VVWfVtVBP4Oqtqnqn1T1Y8D7gJV5ktUoNVS1oD/ADTiz+tE426uBIS/PpYH004LlcGb4Y8BWL/1fo+qpAy4E/oHrCuoFXgd+CeySRMZTgKeBLlyX2MPAR71t67z9LYkqEyFfnHoXAr8GVuEaiHbgJU+m90blnQh8FNcl8SKwxTuGN4HfR+ePkiHeZ10g75LotBj1HQ7cDmwC+r3vO4AjEpTx9zUP2NWT/22gD1gLXAo0pHjPXOTVuTxJvpO9fJuByhTq3xP3Nv860ONdm3XAI8B3gGkZvP8/EDhHXx1nXU949XwO2An4Ba4LsQfYEiP/J4H7vHupD3gL+B2wf5z6j/LqX51Ahi95eR6MSq8MHOfO3v3wG1z3Zh/whidvfYK6K3AvjS96x9QG3A28P7r+kOfrv5M8Hw/GyHst7uX7m4y2JQosir4GCfb7tpfnQzHOTbxPrLxpnccx8mTqZs7WhyRKwsvT6uVZFkg7zS+Ha1QVp0y2ed//Gsi7kNHGXHEmfVfg/zbgkDj7viqQbwh4Bxj2/n+TNJUEcBYwGKi7CzcuoLHKAR+Lumm6vQcleEyfjyrzGUYbdPVu6E2Bzz8CeZeQQEkQ+UANR50HBX4ap5y//QRGFXinJ6+/7R9AVQr3zB6BsgsS5Lvby3NVCnUfBGwP1N+PU97Bc39UFu7/Tamcgzh1+Q3UeTgFrN490klASeAa298FjmfQu57B+/zLMerPlJL4OO6Z8+/J4L3wNDEUOlAF/CXqfvevSz/wicC2sEriAu+8+y+hW6Oej1tj3P/XAX8NyOCft/EoiQpvf77C6YuSYxPwvkycx5jyZOpmztaH5JZELaON0SWB9NO8tO3e9u8DU7xtDcBM73dj4IG5AzjAP3k4LfxbRh/SKVH7PiVw0n8RqL8JuNG7ObtJUUng+pb9em8DFnrpAsz29ntZVJkl3g16BIE3Wdzb+VJGG4RdY+zvUW/7aQmuwxLiKAng3wLyXglM99KnMaqgYz4YgW3v4Abk/IepGjgdZw0p8PUU75unvXI/iLN9J9zDpsAHU6j3ca/M/yPwRg1MximQK/Ae2Azd/+u8/f02A3X5DdR2r95jgAnetvmBfL4lNgx8F6jz0ncG/syoojgkqv5MKYl3gAeAfb1tNV45/3qdEaPeHzCq0M4Bar30PYH7iVTkoZREoO6IRjtOHl9JbPees68Ak7xtTXhv7qShJJKdu0yex5h1ZupmztaH5ErizMBJ+UQg/bRA+k9CXNw7AYmT5x4vz/mBNMF1NyhwQ4wy4l0gX4YlUdt9+R6NSq/CmfUK3JzB8/gb4jSajENJRJ2HP8Qpe7NfFq9RCmzzz89KoDpG2Su97Q+neLzf9Mq9Gme7/8CtjXfd45TzH7Ax3XdZuPdrAufn/AzU5zdQfXgvHjHy1DNqKf0oxvYK3CD6mGtC5pTEi7hB+eiyv/K2359A5ovinMdXA/VnU0kocHqIa5ALJZHSeYz3KcqBa3HME5HzgUu85Ddx5mY0Q8DlCao71fteqt4ZjMEfvO+jA2n7A/O93z+NLuDV9ZME+43Hkbg3tiHgP9IoHw//3BySwToh8jz8d5w8P/S+d8MNqsbiclXti5F+p/e9KEW5bsGdw71E5MAY20/2vv+Q4LrHotP7np2iPOkwLfD7nVgZRGSSiGyK84kn419VtTnOtuNw43N9uPGgCFR1iNHrfLiITA93KClxmar2x0iPdy/4MvcA/xNdSFV7gcsyKmF82nC9CIVAqucxJsU0T+LDIhLvYW7BjTHEOiGrVXVLrEIisguuQQbnQjscp/6J3vcugTS/4dmsqq/FKfckzvxN5Twf7H2/qKopzUkQkanAN4CPAHvhutIqorLNSaXOEPjnoU1VX46VQVVfE5ENOF/zA3FdQdH8I079/jnYKRWhVLVVRB7GKfbPAs/727zGc4n39+ZU6gX+Dnwe+L2I/C9wF/C8qg6kWE+mmIDrzohF9LX3eSpBff71fF5VO+LkeRTXFTXBy39/EhlTJdV7ISjz9jhlHxu3VOF41lOkhUBGnqliUhLByXSK6+t/A9elc62qxnzTwmn2eATftGaEkGFSjPxxG3JV7RORLTj33LD4D3xKk3ZEZB+cV1WwwfD7RxWn6HbC9Z1nkqTnweNtnJKId57jPdy93nc69+rNOCXxGRH5j4DF8BlcA/eSqqbqKnoesADnMfNd79MrIk8CtwI3em+umWBr4HfMB1pVu3BdfgCIyN5APCvBJ9EzEea+3iEi7+AsnTDPTaokuxeqotJ9GTYmqDNXk0ATndtck+p5jEkxdTcFJ9PNVtX5qnqMqv4igYIA1+UQj+DxN6qqJPnMS0NuSZ5lXPl9rscpiOdx5ne9qjaoapOqzsINho+n/mRUZ6ne8XA77oHYGTgskO53NaVqRaCqbTi31GNx4yUv4BTwETh35ZdEJCPWmqds3vT+vicTdXqEedMtxOs5HrJ130dTKFZExigmJZENWgO/90mxrP/GELdBEJGJRPYrh2GT971b2AJeTJn34W7Qj6vqfd4bZpB4XRLjxT8PyeLa+N16OXvTUtVOnNMBeIpBRPbAnStldKwp1XpVVe9X1W+q6gG4N9mv4jxo5pPZ/u9Hve9jRCTUm9848a9P3PtPRCYxatkEr6c/ua8mQf2N6YsWl6TPIrkZQwpDmHPUkAtBwlLWSkJV1zKqKE5Ksbjfx90kIgvi5PkgqXeT+P317xGRuSHLjDTACcYxjkpQ3h+LSedtyz8Pk0Uk5qC0d37mRuXPFb618EmvkfWtiCdV9c04ZVJCVbep6tU411FwcbAyxdXedxPwxQzWGw//+uwtIvG6SZcw2nYEr2e79z1LROKNhxw0PvFi4stwoIjUxckznmsynucjGv8cxQxK6nUX1udAjtCUtZLwuMH7/rqILIyXyfOoCr4FvQCs9n5fECs/bvZtqjyE6z+twM29CIM/wNgkIjNjyPJu3OBtPHyPnSlhhQwQPA8Xxslzsfe9Dng2jX2Mh3tw52cabl5A2l1NIjIhQeMHbvwHMthVo6pPMeqNcqmIHJ6puuNwL27i5kTg/OiN3vH7yvCRKKeQV3FjhxW4yZ3RZfciO6Fz/o6TuRY3CTV6v9W4IH3pMp7nI5qXvO8T4mxP1GZkUo7QmJKAn+EGwCcDj4nIqcG3ERHZRUS+DDwHnOine4OgF3t/TxeRn3uB2BCRJkYntu1IRRjPS+Y87+/JInKr93bhyzNbRL4sIr8MFGvGDQwLcIuIzPfyVonISbjB/ejupyC+V9JJUYowjLzKaKNxgohcKSLTvP1P8+T0G+aLVDWeB1lW8Nxqb/f+/hcuBtIgbpJiTETkv71w9INRm6YCq0XkuyKySEQmePkniMjRwI+8fPdF1XdUIMT9h9I4jH/HXaPJwH0i8msROVhERqxUEakTkWNJ7O6dFM876Gfe33NE5Dv+8+CF5L8VNyYzDPxnVNleRl2tfykiH/QVq4gch/OCSul5CClzF6Puuj8SkW/5wT697sW7GJ9Xn/98fFZEEnUThcG/7/YXkcv9501EmkTkKtzE1J44ZX053i0ii8cpR3jCTKbI54cQYTnilDstbDlcP/IrjE5CGcJ5lgTDYChwaoyywbAcgzgPrEyE5TiX0XAAivNUSBSW48So/J2MTvx6ExevZ8xkOK/s3oG8AzhLZh3wRCDPknjldexkomD4Ez8tWViOeXG2z/PzjOMeOorI6/i3JPn9YxmMSp8eVU8/Lq5RMHzK68CcBPuPOyEriUwNuAY6uH//PLcTGQJlC84VOnriYtKJXF6+Cly8L78+37MwuN+Ys3Vxz9LWQN5geJjngLNJPpku5mQ3r+4x18XbFh2Wo5/RkBhpheUI1H1MoKwfw2od8LsY98y1IeoLRiEIhrAZxLlXx528h5vpH7zO67zP4kycx1gfsyQAVV2NC8fxdVyQtm24h3IQWIHzYvkwcFOMsmfiGuBncDeQ4HyyP6aqv4zOn4JMl3syXY+7CapwnjorcBOGzonKfwfOcnkAp1CqcMrhUq+etxPs61Wcq+i9uK6ZWbiBy9CLOanqRbiJgHfhbt46XGNxNy6W0XfD1pUFHsbNpfFJuavJ4x3gX3Dn/x+442zENYTP4rrbDlDVRK6YaaGqnar6aeC9uDArz+PuU7//ehWuYT8F1zj8r6ZptanqkKqeggtN/iDuhaMO52J6M65BWhan7GrcXJ8/4s5PBa5R/RHwIRJbtGmjzgL/V5wSegmnyIZwiuNQ3H2Zbt3344IdPo5TeHNxz0cqru1BvoWLFLEC12YM4569JeoiWyfiBJwX3Vrctd/N+4zXwomLeJrFMAzDMMZgloRhGIYRF1MShmEYRlxMSRiGYRhxMSVhGIZhxKWYAvyNMH36dJ03b16+xTAMwygqnnvuuS2qmlJQxqJUEvPmzWP58uX5FsMwDKOoEJGUQ9FYd5NhGIYRF1MShmEYRlxMSRiGYRhxyZmSEJEaEXlWRF4UkZdF5Ide+u4i8oyIvC4it3hrMBiGYRgFQC4tiT7gCFXdD9gfOE5EDgZ+DixV1XfhYuPkIma+YRiGEYKcKQl1+MG9qryP4oLS/clLv5HsxJs3DMMw0iCnYxJeXPkXgM24aKVrgHZV9eP2v83oCmaGYRhGnsmpkvBCEO+PC0H9PiDWSnAxw9KKyBkislxElre15WyZZMMwjMLgySfhqqsgx5G78+LdpKrtuAXeDwamBFbY2hkXsz5WmWWqulhVF8+YkdKEQcMwjOJlaAj22w8OOQTOOgu6srIkR1xy6d00I7C8Zy1uta5m3CI/n/Synco4FgcxDMMoKf72N6ishBUr3P8HH4T6+sRlMkwuw3LMBm70FlKfANyqqn8VkVeAP4rIfwP/BH6TQ5kMwzAKj74+2GUX8LvWDzkEHn8cJuS+8ydnSkJVV+CW0YxOfwM3PmEYhmH8/vfwuc+N/v/HP2Dx4ryJU5QB/gzDMEqOzk5obBz9/6lPwS23gEj+ZMLCchiGYeSfK66IVBCvvQa33pp3BQFmSRiGYeSPzZuhqWn0/1lnwS9/mT95YmCWhGEYRj648MJIBfH22wWnIMCUhGEYRm5Zt851I/30p+7/j37kJsjNLcxgE9bdZBiGkStOPx2uv370/9atMHVq/uQJgVkShmEY2WblSmc9+Ari17921kOBKwgwS8IwDCN7qMLxx8O997r/NTXOepg0Kb9ypYBZEoZhGNngySfdDGlfQfzpT9DTU1QKAsySMAzDyCxDQ3DggaPxlvbYA159Faqq8itXmpglYRiGkSkuvDAyIN9DD8GaNUWrIMAsCcMwjPETHVIjjwH5Mk3xH4FhGEY+OfbYSAVx9dXwxBMloSDALAnDMIz0eOMN2HPPyLTh4YKIt5RJSkPVGYZh5BKRSAXxl784d9cSUxBgloRhGEZ4Hn0UDj88Mi3Ha07nGrMkDMMwwiASqSDuvLPkFQSYkjAMw0jMjTeO7UZShRNOyI88Oca6mwzDMOIRrRzyvJRoPjBLwjAMI5qLLoptPZSZggCzJAzDMEYZHoaKisi0N9+EXXfNjzwFgFkShmEYACedFKkgamqc9VDGCgLMkjAMo9yJFZm1owMaGvIjT4FhloRhGOXLnntGKohDDnHWgymIEcySMAyj/Ghrg5kzI9MGBlwEVyMCsyQMwygvRCIVxFe/6qwHUxAxsbNiGEZ58OqrsHBhZFoJBuTLNGZJGIZR+ohEKogrrijZgHyZxiwJwzBKl0cegSOOiEwrg3hLmcQsCcMwShORSAVx112mINLAlIRhGKXFDTfEDqnx8Y/nRZxiJ2dKQkR2EZFHRKRZRF4WkW956ReLyAYRecH7HJ8rmQzDKDFE4N//ffT/8uVmPYyTXFoSg8B5qroQOBj4hojs421bqqr7e5+/5VAmwzBKgXgB+d773vzIU0LkbOBaVVuAFu/3dhFpBubmav+GYZQgFpAv6+TFu0lE5gEHAM8AhwBnisgXgOU4a+OdfMhlGEZ+aG7p4N6VrWxo72HulFqOW9TEwtmNiQvNmgWtraP/a2thx47sClqG5HzgWkTqgD8DZ6tqJ/ArYE9gf5ylcVmccmeIyHIRWd7W1pYzeQ3DyC7NLR0se3wtHT0DzG6soaNngGWPr6W5pSN2ge5u17UUVBAdHaYgskROlYSIVOEUxO9V9XYAVW1V1SFVHQauAd4Xq6yqLlPVxaq6eMaMGbkT2jCMrHLvylYaa6torK1igsjI73tXto7NLAJ1dZFpFpAvq+TSu0mA3wDNqnp5IH12INuJwMpcyWQYRv7Z0N5DfU1kz3d9TSUb2ntGE9asGTsw3d9vnks5IJdjEocAnwdeEpEXvLQLgZNFZH9AgXXAV3Iok2EYeWbulFo6egZorK0aSdveO8jcKbXuT7RyeP/74emncyhheZNL76YngFiBUszl1TDKmOMWNbHs8bWAsyC29w7S0TPAv/euAdkrMrMF5Ms5FrvJMIxQpOWBFIKFsxs547DdI+q++IRFkZmOOQbuu2/c+zJSx5SEYRhJ8T2QGmurIjyQzjhs94wpioWzG+Gqq+DTZ0VutHGHvGKxmwzDSEpKHkjpIgJnBRTE979vCqIAMEvCMIykbGjvYXZjTUTaGA+kdDn8cHj00cg0Uw4Fg1kShmEkZe6UWrb3DkakRXggpYtIpIK49lpTEAWGWRKGYSQlngfSZw7aOb0KY3komXIoSMySMAwjKb4HUmNtFS0dvTTWVqU3aD00NFZBPPmkKYgCxiwJwzBCMeKBlC5mPRQlZkkYhpFdtm0bqyA2bDAFUSSYJWEYRvbIsvWQrQl+xihmSRiGkXlWrhyrIHp6Mq4gUgoxbqSFWRKGYaRNzDf5OVPGZsxC11Jwgh8w8n3vylazJjKIWRKGYaRF9Jv87If/PlZBDA9nbewhVIhxY9yEtiRE5CPAN4A9gGNV9S0R+RKwVlUfypaAhmEUJsE3+XOOiYrW+q53wapVWd1/0hDjRkYIZUmIyCnArcDrwO6Af1UqgG9nRzTDMAqZDe09nHj1f41REOff+kLWFQS4CX4dPQN09AwwrDry+7hFTVnfdzkRtrvp28CXVfUcIDg3/2nc2tSGYZQZl356f/b7260j/1d89DNcfNfKnL3JZ2yCn5GQsN1N7wKeipHeBdjisoZRTsybB2++GZF02X2vphWqY7wurOOe4GckJayS2AgsAN6MSj8MWJNRiYyiwXzUi4eMXasot9bW7/+Imz/0aVq8ej9z0M6h6832GhVGZgirJJYBv/QGqgF2EZFDgUuAi7MhmFHY2ANePCS7VqEUSJxJcU3AOWnKZS6sxUGoMQlVvQS4HXgAmAw8Avwa+LWq/m/2xDMKlZwsQmNkhETXKumEtMHBsQri4Ycz4tZqLqzFQWgXWFX9noj8GNgHp1xeUdWurElmFDRZXYTGyCiJrlXCt/kYk+KaN7Zn7C0/rAurdWvml7AusLNEZGdV3aGqy1X1WVXtEpGdRcT8zcqQrC1CY2ScRNcq1tv8zN7OMW6ty256hIvvWpnRsBdhXFgt9Eb+CesCexPwkRjpx3rbjDLDfNSLh0TXKlqBnHPMXnz9Mx+MKL/0/tfobpqT8S7FMC6s1q3paG7pYOkDqzj/thdZ+sCqnCrJsN1NBwFnxkj/P+AXmRPHKBb8BzzYDZCKZ0sxUqzdHsmu1bLH17JL8/N88TtfiCj3nd89w4wZjRFvkpnuUkzmwmrdmvl3EgmrJCqB6hjpNXHSjTKgnHzU8/2gjpd412rh7EaW/tsBYwuo0vTAqryHvbDQG/n3Agvb3fQM8LUY6d8A/pE5cQyjMCnJbo8LLhjruRQIyFcIXYqFIEO+ybcXWFhL4nvAwyKyH+AH8zsCOAA4KhuCGUYhUXLdHiEWAyqELsVCkCHf5NuaCqUkVPVpEfkA8B/ASYAAzwNfV9UXsyifYRQEmXxQ8zq20dgInZ2RaQnmPBRCl2IhyJBPjlvUxLLH1wLuxSSd8CfjIfR6Eqr6oqp+TlX3VdV9vN+mIIyyIFPdHnl16RSJVBCVlbbOdBGQ70CGcS0JEZmqqtv834kq8fMZRqmSqW6PvAxCZnmdaSP75NOaStTd1CYis1V1M7AFiHVXiZdekQ3hDKOQyMSDmvOxjWgFcfzxcM892dmXUZIkUhJHAL6FcPh4dyQiuwC/BWYBw8AyVf0fz0q5BZgHrAM+rarvjHd/hlGI5GIQsrmlI2frTCeToxjnlRiRxFUSqvoYgIhUAvsCd6rqxnHsaxA4T1WfF5F64DkReQA4DXhIVX8mIt8BvgNcMI79GEbeSNYwZnsQ8tV1m1m4e+Q4yU1n/pjFF36DhWnKnA7FPq/EGEU0xNuFiHQD+6hq9HoS6e9Y5C7gKu+zRFVbRGQ28Kiq7pWo7OLFi3X58uWZEsUwMkKwYayvqWT91m5ea+1i551q2XdO40jjm7U37BhjD0vvf23Ecjnn6AVJZfaV1ngb86UxJuIlksPIDSLynKouTqVM2HkSTwPvZeyiQ2khIvNwcyyeAZpUtQXAUxQz45Q5AzgDYNddd82EGIaRUYKD0lu6enmt1QVJ7vQ8oYJv0hl9m379dVgQ2fDeeO3f2LbrnkDiMY9sDaSX3LySMiaskrgGuFREdgWeA7qDG1X1+bA7FJE64M/A2araKbE8L2Kgqstwix+xePFic80wCo5gw7h6czfVlROorpxAV99Q9ryYYjw/F9+1MvSYR7Yac3/spX9wiNVt3XT1DlJVIew7x1Y7LjbCzpO4GTewfDnwGLA88AkdlkNEqnAK4veqeruX3Op1M+F9bw5bn2EUEsGIqp29A1RXTqBvcJg6L6RCRt+kb7llrILo7KR5Y3tK8zmyFfL9uEVNvLm1m2fe2EZf/yCVE1y9rZ19Fua7yAirJHZP8NkjTAXiTIbfAM2qenlg093Aqd7vU4G7QspkGAVFcMJdfXUlnb2D9A0OM3/GZCCDXkwi8G//FpmmCvX1KU+8ylZspIWzG5nTWENdTSX9w0rtxEo+sOdUdpk6qbjjXZUhYcNyZGIs4hDg88BLIvKCl3Yh8DPgVhH5IrAe+FQG9mUYOSc44a5xUhWdPYMsaKpjWl31SOM7Li+mM86Aa66JSFp636sc9+5ZEZ5LqYx5ZDM2Ut+QctiCGWzr7mP15m6eX99OfXUljZOqkhc2CoZQ3k0AInIgcDZu+VKAZmBpKuMRmcK8m4xiIKNeTDHGHi6779XQ3khhZImVB0j7GJY+sIp1W7p4rbVrZHyms3cQFJb+237mCpsH0vFuCusCewpuItzDwFNe8sG4CXenqervUpR1XJiSMMqGhgbYvj0iKXpgOplraRg311h53tzazQQRdpk6KS332OaWDs65xYV3a6ippG9wmL7BYRbMrGP3GXXmCpsHsukC+2PgP1X1J1E7/C7w30BOlYRhlAUxrIfzb32B2SmuLRDGzfXela0MDw/T3NJJZ+8ADTVVtO/op6aqgkVzG+OWS8TC2Y3svFMtnT0DdPUNUVdTyb5zGphWV22usEVEWCUxA7g1RvptwH9mThzDMBIF5JubxmpxYdxcX2npYP3WHdRUVVBfXUnvwBAtHb1Mm1yVsFwy9p3TGHNSXTmtLFfshPVuegRYEiN9Cc4l1jCMTBCtIM4+OyLm0oKmyTz9xlb+umIjT63ZwrotXUm9kcK4uXb0DCIi1FRVjHxXVUygu38oYblk2MpyxU9YS+LvwE9FZDFu9jW4MYmTgItF5CQ/Y2D+g2GULSkPWocI593c0sGDzW0smFnHps5etnUP0NkzyFlH7pmw7jDxohpqKuncMUDvwNDI/I766gp6B4edS2+acaZsZbniJ+zA9XDI+lRVsx423AaujUyTSU+klOIh9fdDdXVk2u23w4knjql3PPGQkh3f0gdWsbati03b++jqHaSuppJZ9dXU1VQyo77GIrmWCFkbuFbV0CvYGUaxkemIpaHjIaW4GFA24yE5a2MH+8xuiFBsn//AbqYUyhxr/I2yJ9ioTxAZ+R2cGdzc0sHSB1Zx/m0vsvSBVQlDS2xo76E+kQfS22+PVRCvv550vYd0Q2iEWTI130tkGoVL2DEJwyhZkr2hp2ppJFxYKAXrIbqLaEHTZB5sbhuRL+wYQVjLJp9LZBqFiykJo2QJO86QbLW4e1e2MjQ0zCstnRH99fHmC8QaKG565v/42k++Fplxxw6ojW0FxFJMDza3cdTCGaxq7Y4YBAb43u0r+OdbHSjKAbtMiegmsjSF3PcAACAASURBVLDdxngwJWGUJKm8/cdq1N/c2s2cxhrOv+1Flq/bCrhuqLrqCvoGhljV2sWOgaEYex7r0XPpp/cfmylJ11K8t/9Vrd0Rg9TNLR1cet8q1m7ppq66AkF45o1ttHT08u3j9mLh7MacLJlqlO5yrTYmYZQkYcYZfKL74/sHh5ggQlVlBbMba+jpH2ZbVz9DwzoyhwDBxSGKw8LZjZxzyyVjFYRqqLWmk45rBI5zS1cf9TWVDCu0dfezbUc/r2zs5KanXFxOm6uQfcKM+xQroSwJEXkDOEhVt0alTwGeV9VQ4cKN8iKfb1bj6WJZ3dbN7IbqkTfvydUV9A4M0ba9j0kTK+gbHEZVaaxN8PiEnPcQ7/yEeftvbung/lc28da2HQwNKwNDw0ysnMCkqgr6h4Z5YvVWmls6bK5CDsjWCn+FQFhLYh4Qa/5DNTA3Y9IYJUO+36yiPYG2dPXy+Ko2Xt7YMcY7KVrWbV39vLppO1u6egGYXl9DU0M1Q6ps7xukpqqCvWfVs0+sh3/ixLEKIob1kOz8JHv798sPDyn9g8P0Dw6jCsPDTkYBdpo0ajktnN3IOUcv4NJP7cc5Ry8o+oar0Ahr+RUjCS2J4Exq4KMiEnzCK4AjgXVZkMsocnL1ZhXvbTw4ztA3OMgzb7yDAAftvtOY8YloWafWTaSzZ4DVm7uZXlfD/BmTeWZ7H3On1HLYghkjXkVjumtS8FxKdn6Svf375asqJ1BdWUF3/yCiMDisTBBhcFjZZ059STRShUj0fTexQtjeO1iS4z7Jupv+5H0rblW5IAM4BXFehmUySoBseNTEcwkNDk5fet8qmhqq6R9SJlVNoH9wiBff6qTei0A6o35UJr9BjpZ1/ozJPP9mO1u6+hhWZWJlBbtOm8ScxhpaOnrHdtekOCku7PlJ5JLqlx9SZddptazd0k1v/zAoNNRW0lBbRXVlJTPrbYGfTBPLKaK1090ru02bnHYIk0IloZLwZ1qLyFrcmMSWnEhlFD2Z9qi5Z8UGrnxoDYPDytTJVfQPDHHfyk3sNatuZB8DQ0O8tqmTlRuUnSZXUV1ZwfS6auZMqWHh7AYmBBrzYIMcLeuM+hoWNNWxaXvfiFLwPYXGEK0gTjoJ/vznpMcz3vPjl2+oqaJ3YIg9ptfx9js9VFdOoKmhGhGJ2UiVqgdOLolpBU6dxMDgEI21VSU37hM2LMfu2RbEKC3CBJULS3NLB1c+vAYEpk6uom9wmFWbu+juH6Slo5d50+sAWPFWB9t7B5kwQWiocfnWbulm0sQKdt5pUtwGOZasFRUT+OHH94n/kMewHpo3toduFMZ7fvzysxqqeXXTdkSEKZOqqKoQ2nsGOXT+ND4XFVIj0+FHypV4VmBLx2BJLqQUauBaRK4TkTHdSiJyrohcm3mxjGInk2Ee7l3ZysDQMA01lSMuqNWVExgaVrZ294/ka+nspbJCmDRxNNx1XXUFO/oHEw4CpyRrX98YBXHPdy7l4rtWpjQwP97z45efN72OXadNoqG2ivkz6zj+3XO4+vMH8uOT3jOmrlTcgo34pBsepVgJO5nueODKGOkPA+dnThyjlIjVp55Od8eG9h6mTZ5I3+Cwm6MAVFdOcHMZKiaMhLIeGlZUYadJE0fKClBdVZHUBTRUSIoY1sPS+18DwC+ZysB8sn0mO1fR5f38v3liXcz8NvM6M2TSSi4GwiqJKUBXjPRuYGrmxDFKmXS7O+ZOqWXL9l7WtHU7V08U1DX+n//ALuzo15GGtL27n4GhYd7a1k3PwDAKfHD3qeOLS7RmDcyfH5H08yvupHLh3hGmeCYb3FTPVZj8NvM6M5TbvJOwSmIVzpr4n6j0jwKrMyqRUbKk6xa7oGkytz//NtWVQnff0MjM571m1fHqpu6RhrC5pYMf3PUyq1q7EIGJFULVhAn0DA6PTCpLmTieSxPTWEY0FVI9V2Hyl9sbcDYpp2CIYZXEZcCvRWQmrosJ3ByJs4FvZEMwozAZj3dMut0dq1q7OWCXKSxf/w61EyuZNLGCSRMr6B9U3mjr4txbX+SYfWZx3KIm5s+so29wmIEhpa6mkvkzJjOxsiL1+Rl33w0nnBCZ1tUFkycD2W9wUz1XYV1qy+kN2MgMYb2bbhSRGuAi4Lte8gbgXFW9PlvCGbklmQIYr3dMvO6OiRXC0gdWxd3vhvYedps+mdVt3ew8xQ1Kd/cN8Oa2HcybNgmUEVm29w5w2IIZEe6uw6qpdQOFmPeQ7QY31a6hsPnL6Q3YyAyhA/yp6tWqugvQBMxS1V1U9dfZE83IJWHCaIzXOyZWqIk3t3bT2tmXcL++N0ldTSV9g24l3bbt/dRWOYVR78nRWFtFZ+9g+p4nX/xiqJAaPtkMdZFqUD4L4mdki5SjwKpqm6puzoYwRv4IowDGG58mltvnnMYadpk6KeF+/QZwVn01fQPDdPQMsKN/kIZapzTmz5g8IktjbWV6jaUIXHddZFqIaK3ZIlUXWVtZzsgWcbubRGQF8GFVfUdEXsKF5oiJqr4nG8IZmSNZV1KYPu1MeMdEd3ecf9uLTK1LrHgWzm7kqIUzuPGp9XT0DiACDTWVTJoYGWpje+8g+3hxm0J3A1VWwlDUuhB5VA5BUu0asq4kIxskGpP4M9Dn/f5TgnxGgZMp98hsDNaGDYn9YHMb+8xu4P27T2V77yBvbdsxEldpWHVEloPmTQk/sJ5GzKVcYiE0jEJAtIAeirAsXrxYly9fnm8xioalMdw1/f9+GIGgIgkqgOgui0w3XPH2G1ymc/22Hcyqr2b3GXUR8g8MDjG9viZmwL9Ex5BJ5ZCthjzs9TCMVBCR51R1cSplbPnSEiJeg5VJ98hMd2nE2u9B86ZERHf95/p36NwxQF1N5UjXUqxYOUsfWJV8bkG0gvjEJ+BP6RnK2YyFVMqL2BjFRaIxibUkGIcIYivT5Z9EDVahu0dG7ze6sZ9eV01HzwCr27ojxh+i5U+oDLPQtZTNhtxCaBiFQiLvpquA//U+NwLTgDXA77zPGi/thjA78oIEbhaRlYG0i0Vkg4i84H2OT+8wSofmFrdy2vm3vThmBbVEJPJOKjb3yGgvqvkzJ6OqbOvqTyh/rMBrPZ3dY9eZvv76jIw9ZHM1snILImcULnEtCVW9zP8tIjcAP1fVnwTziMh3gX1D7usGnOL5bVT6UlW9NGQdJc14ui8SvXkW8kzbWAsJrd+2gxfWtzO1biLzZ0xmRn0Ne8+qp6WzL/aiPx7RA+vnHbv32B1mcAwum7GQLISGUSiEHZM4CTgwRvptjM7AToiqPi4i80LurywZT/dFsgYrVsTQRLOcc0G0Ulzb1sXtz7/NnjMm0zEBOnsGeP7NdhY01SVf34HR8Y2nHljO6aceFbnx5Zdhn30yKn82G/JCVuxGeRF2Ml03sCRG+hJgxzhlOFNEVnjdUTvFyyQiZ4jIchFZ3tbWNs5dFibj6b5IpUspzOzqXBDdRbZpex+TqyvpGRhm8bydaKytYmB4mE3b+0IPBi+cM2WsglDNuIKA7E5gM/dXo1AIa0ksBf5XRBYDT3tpBwOnAhePY/+/An6EGyD/ES6Q4OmxMqrqMmAZOBfYceyzYBlP90Uqb56JLBb/OxeNU3QXWVfvIPXVFXT2DjC9robpdTUMq9LS0Ztchj/+EU4+OTJtxw6ozW4ffjYG+20FOaOQCBvg7xIRWQd8C/i0l9wMnKqqt6a7c1Udib0gItcAf023rlJgvN0XYRuseOMXr7R0sH7bjpw1Tr5SHBgaYvXmbja276B3cIiaygqefmMr82dOpqqiIrmSLPBJcali7q9GIRF6noSnDNJWCLEQkdmq2uL9PRFYmSh/qZOrfuh4FktHzyBzp0xK2Dil2w0Sq9xxi5q49L5VrN3STYXA4JAyNARDorzT3cdTa/rYffpkPnNsnHWDTzoJ7rgjMq2IlYOPub8ahURoJeGFCv8YsCdwtaq2i8iewDuqui1E+T/gxjCmi8jbwA+AJSKyP667aR3wlZSPoMTIxVyFeBZLQ01lwjGRdLtBEpVraqhmS1cfG9p7mFxdSVNDBV19Q2zbMcDcKbU0NVTHrrvErIcgtoKcUUiEUhIiMh94EKjDLWV6G9AOfM37/6VkdajqyTGSfxNaUiNjxLNY7l3ZmrBxSrcbJFG5/iHlsAUzeKh5M3XVLvT3tDple98ghy2YQUtHb2RlJawcfMz91SgkwloSVwD345RCeyD9bqAoFh0yb5FI4lksiRqndLtBXt7YQWfPAF19QyOrxU2rqx65Fh09LuRG38AQNVUV9A0O01BTNfbtOUUFUazX3NxfjUIirJL4IHCwqg5J5IO6HpiTcakyTL69RYqlsUrWOKXTDdLc0sHb7zgl0uApgufXt7NgZh27z6gbeWueVV/NqtYu+gaHUVXmTZs0qqDSsB7yfc3Hi4X9NgqFVAL8VcVI2xXIrXN9GuTTWyQXjVUmlVCiximdbpB7V7ayV1Mdr3kKoLpyAn2Dw6xq7eJrh+8ZoZh2DAzR2TtIY20l86Y7BbJwzpTICj/1Kbg1uf+EeQgZRmYIqyTuB84Fvuj9VxFpAH4I3JMNwTJJPr1Fst1Y5fKNOdigv7yxY6RB9+dXxNrfhvYedp02mbqaSlZv7qaz1w2QN06qGskfUzGNc+zBPIQMIzOEVRLnAo+IyGtADXALMB9oZXTeRMGST2+RbDdWmVBCqVgifvr6bTvYeadJ1NdUJlRM/rn3J8cBY65FBD09MGlSZNqNN8IXvhDqWKL3m841L5buQcPIBaHCcqjqRmB/4OfA1cBy4NvAgapa8DEy8hkFNdvRPMcbiTSdEB1h1sP2CXPu/ThSiIxVEKopK4iw+41Fqucj3ai9hlEsJFUSIlIlIrcAc1T1OlU9U1W/rqrXqmpR2O75XCQ+2wpqvEoolQbfJxXFlOzcN7d0cOcfHuKcY/aKKLfmsWfG5dqa7jVP5XwUSgwsw8gmSbubVHVARI4hZLTXQiWfC+pk051xvD716XSHpdqVk+jcL5wzhYVRaRfftZLGvirOCXUE8UnnmqdyPmxw3CgHwo5J3I4LF27rPqTBeBVUoj7y8SqhdPrug4qpd2CQ5pbtvLNjgEPnT6O5pSPcvv/wB/jsZyOSrrzrnwzWTqJeNW8DzKmcDxscN8qBsEpiPXCRiByKG4/oDm5U1cszLZjhCOO9NB4llI4l4iumm556kyfXbGOnSVUcMn8qVZUV4TyrYnguLb3/tZHf+QxBkcr5sPAZRjkQdj2J04B3gPfgQnmfFficmRXJDCC9MYNUSLfvfuHsRmbU13DE3jNZstdMZtbXJpftzDPHKIjmje2c88d/FszSqqmcj2JbFtYw0iFsqPDdsy2IEZtcdGmka4mkJFuceQ8LoeBCUIQ9HxY+wygHUplxDYCI1AGoalfmxTGiKeQujVCyLVrklg4NEuW1VMwhKIpZdsMIQ9juJkTkbBFZjwvD0SEib4nIOSKxXhGNTFHIXRpJZRNJqiAMwyhswoYKvwQ4A/gF8JSX/AHg+8Bs3MQ6Y5z4XkyvtHTQ0TNIQ00l+85p5KiFM1jV2l1wXRpxu1ui4y2BKQfDKFLCdjd9CfiSqv4pkPawF6bjakxJjBvfi2l4eJj1W3cgInTuGGBSVQXrt+0o2OilY7pbog3Lb38bfv7z3AplGEbGSGVMYkWctNBdVuVCOrF/fC+m5pZOaqoqqKmqoHdgiE3b+9hndkPhT9Aqg8WADKMcCdvA/xb4Roz0rwE3ZU6c4ifdUA1+qIvO3gGqK91lqa6cQFtnL69s7ODOFzYUZmygvr6xCuIvfzEFYRglQlhLohr4rIgcCzztpb0ft+DQ70Xkl35GVf1mZkUsLtIN1eB7CjXUVNHrrdD2zo5+OnsHqa6qoKm+uvAWzjHrwTBKnrCWxN7A80ALsJv32eSlLQTe7X0WZUHGoqG5pYP7X9nEM29s5ak3ttK23a3PHGZeg+8pNKuhmt6BITp6BtjW7dZeAHhXU13GJ9KlTWvrWAWxfr0pCMMoQcJOpjs824IUO34308SKCajqyDKdB+46hYmVFUnnNUSv0NbR48JBzKiv5l1NdSNrMeQ9NpBZD4ZRVqQ8mc6Ijd/NtGhuA8+92U51pVBdIby8sZM9ZtSFisoa7Sm09IFVhTOR7tln4f3vj0zr64OJE3Mvi2EYOcM8kzKEP/A8va6G9+42hZqqCvqGhhkY0rTHEApmIp3IWAWhagrCMMoAsyQyRDBEhb9Up/8/3UHmvMcGuv56OP30yLRA15It82kYpY8piQwx3sV/4pGN2EChGvfosYfddoN16yLqSBbC3DCM4se6mzJEPpdITYWk8zhOOGGsglCNUBCQ/RDmhmEUBmZJZJBiiAiacB5HdMylL30JrrkmZj22KpthlAemJMqMWI37OWccS2PrhsiMSdxaCzmEuWEYmcO6m8qMuVNq2d47OPL/nGP2ilQQl18eat5DwXheGYaRVcySKDP8AfaLT4gxOT6FSXF597wyDCMniBbhbNnFixfr8uXL8y1GcdLfD9XVEUlr73mI3Y8/Ik8CGYaRK0TkOVVdnEqZnFkSInId8DFgs6ou8tKmArcA84B1wKdV9Z1cyVR2xAmpYQuYG4YRj1yOSdwAHBeV9h3gIVV9F/CQ99/INJs2jVUQmzZZzCXDMJKSM0tCVR8XkXlRyScAS7zfNwKPAhfkSqaywALyGYYxDvLt3dSkqi0A3vfMeBlF5AwRWS4iy9va2nImYNHyzDNjFUR/vykIwzBSomi8m1R1GbAM3MB1nsUpbMx6MAwjQ+RbSbSKyGxVbRGR2cDmPMuTd8YVNO/aa+HLX45MM+VgGMY4yHd3093Aqd7vU4G78ihL3kl3fWzAWQ9BBbHHHqYgDMMYNzlTEiLyB+ApYC8ReVtEvgj8DDhaRF4Hjvb+ly1pBc276KLYAfnWrMmusIZhlAW59G46Oc6mI3MlQ6GTctC8aOXw/e/DD3+YJekMwyhH8j0mUfYExyDWb9tB/8AQu8+oG9keM2jeEUfAI49EplnXkmEYWSDfYxJlTfQYxOyGav75Vjtr27riB80TiVQQt99uCsIwjKxhlkQeiV7bYd50Z0G0dPYxsaoiMmieubUahpEHTEnkkVhjELtOm0xVZQWXfmo/lzAwMFZBrFgB7353jqQ0DKOcMSWRR5Iu3GPWg2EYecbGJPJIvIV7PtokYxXEli2mIAzDyDlmSeSRWAv3jHcxIMMwjExiSiLPLJzd6AamN26EuXMjN/b3Q1VV7IKGYRg5wLqbCgGRSAXx8Y8768EUhGEYecYsiXyyYgXst19kmnUtGYZRQJglkS9EIhXEsmWmIAzDKDjMksg1f/87HH98ZJopB8MwChRTErkk2q31/vvh6KPzI4thGEYIrLspF/zqV7HDeZuCMAyjwDFLIttYSA3DMIoYsySyxbnnxrYeTEEYhlFEmCWRaYaGoDLqtG7cCLNn50cewzCMcWCWRCZ57LFIBdHU5KwHUxCGYRQpZklkgsFB2HdfWLVqNK2rCyZPzp9MRkEQXHlw7pRajlvU5MKwGEaRYJbEeLnzThc+w1cQjz/urAdTEGVP9MqDHT0DLHt8Lc0tHfkWzTBCY5ZEuvT0wMyZzmIAOPJIeOCB2GtAGGVJ9MqD/ve9K1vNmjCKBrMk0uG662DSpFEF8eKL8OCDpiCMCDa091BfE/keVl9TyYb2njxJZBipY5ZEKrS3w047jf7/3OfgppvyJ49R0CRdedAwigCzJMJyySWRCmLNGlMQRkLirTx43KKmfItmGKExJZGMlhbXjXTBBe7/+ee7gek99sivXEbB46882FhbRUtHL421VZxx2O42HmEUFdbdlIjzzoPLLx/9v2mTm/tgGCEZWXnQMIoUsyRisXq1sx58BXHJJc56MAVhGEaZYZZENKecAjffPPq/vR0a7U3QMIzyxCwJnxdecNaDryCuv95ZD6YgDMMoY8ySUIUjjoBHH3X/d9rJBeSrqUm5KgvBYBhGqVHelsRjj8GECaMK4u67Ydu2tBWEhWAwDKPUKAhLQkTWAduBIWBQVRdndYfRAfkWLnSLAUWH+E4BC8FgGEYpUkiWxOGqun/WFUSsgHyvvDIuBQEWgsEwjNKkkJRE9nnsMTjxRPf7yCNheBgOPTQjVc+dUsv23sGINAvBYBhGsVMoSkKB+0XkORE5I1YGETlDRJaLyPK2trb09jJrFhxySFYC8lkIBsMwShFR1XzLgIjMUdWNIjITeAA4S1Ufj5d/8eLFunz58twJGBLzbjIMo5ARkedS7dIviIFrVd3ofW8WkTuA9wFxlUShYiEYDMMoNfLe3SQik0Wk3v8NHAOszK9UhmEYBhSGJdEE3CFufKASuFlV782vSIZhGAYUgJJQ1TeA/fIth2EYhjGWvHc3GYZhGIWLKQnDMAwjLqYkDMMwjLiYkjAMwzDiUhCT6VJFRNqAN9MsPh3YkkFxioVyPG475vKhHI87nWPeTVVnpFKgKJXEeBCR5VkPIliAlONx2zGXD+V43Lk6ZutuMgzDMOJiSsIwDMOISzkqiWX5FiBPlONx2zGXD+V43Dk55rIbkzAMwzDCU46WhGEYhhESUxKGYRhGXMpKSYjIOhF5SUReEJHCW7UoA4jIdSKyWURWBtKmisgDIvK6971TPmXMBnGO+2IR2eBd7xdE5Ph8yphpRGQXEXlERJpF5GUR+ZaXXrLXO8Exl+y1FpEaEXlWRF70jvmHXvruIvKMd51vEZGJWdl/OY1JiMg6YLGqluykGxE5DOgCfquqi7y0S4BtqvozEfkOsJOqXpBPOTNNnOO+GOhS1UvzKVu2EJHZwGxVfd5bk+U54F+B0yjR653gmD9NiV5rcesoTFbVLhGpAp4AvgWcC9yuqn8UkV8DL6rqrzK9/7KyJMoBb9nXbVHJJwA3er9vxD1UJUWc4y5pVLVFVZ/3fm8HmoG5lPD1TnDMJYs6ury/Vd5HgSOAP3npWbvO5aYkFLhfRJ4TkTPyLUwOaVLVFnAPGTAzz/LkkjNFZIXXHVUy3S7RiMg84ADgGcrkekcdM5TwtRaRChF5AdgMPACsAdpVddDL8jZZUpblpiQOUdUDgY8A3/C6KIzS5VfAnsD+QAtwWX7FyQ4iUgf8GThbVTvzLU8uiHHMJX2tVXVIVfcHdgbeByyMlS0b+y4rJaGqG73vzcAduJNdDrR6fbl+n+7mPMuTE1S11Xu4hoFrKMHr7fVR/xn4vare7iWX9PWOdczlcK0BVLUdeBQ4GJgiIv7qojsDG7Oxz7JREiIy2RvoQkQmA8cAKxOXKhnuBk71fp8K3JVHWXKG31B6nEiJXW9vQPM3QLOqXh7YVLLXO94xl/K1FpEZIjLF+10LHIUbi3kE+KSXLWvXuWy8m0RkD5z1AG5t75tV9cd5FCkriMgfgCW4MMKtwA+AO4FbgV2B9cCnVLWkBnnjHPcSXPeDAuuAr/h99aWAiHwI+D/gJWDYS74Q10dfktc7wTGfTIleaxF5D25gugL3Yn+rqv6X16b9EZgK/BP4nKr2ZXz/5aIkDMMwjNQpm+4mwzAMI3VMSRiGYRhxMSVhGIZhxMWUhGEYhhEXUxKGYRhGXExJGFlFRP4qIjcUgBw3iMhfc7i/i4MRaTNY7w0i8v0s1LtORM4fZx2PZkicrCEifxKRc/MtRzFhSsLIKyJymoh0Jc8Zur55IqIisjhTdabJpcCHM1mhiLwbF7zvikzW63EQ8P8F9qUi8skE+UMjIieJyH0i0ubVuyRGnmoRuVJEtohIt4jcLSI7R+XZVUT+4m3fIiK/jA6PLSIf9mKz9YrIGyLy1ahd/RC4SEQaM3Fs5YApiRImW/Hl80GxHIuITBCRClXtUtWtGa7+LODP2YjPpKptqroj1XIiMl1EbhSR9cCHRGStiNzhRzfwmAw8iQttHY8rgE/gJsUdCjQAfxWRCm8/FcA9QL23/WTcbOORGE0isjvwN29fBwA/Ba4UkU8EjvMl4A3gc6kea9miqvYpgg/uQfstbs2EVuC7wF+BGwJ51gEXA9cB7cBtXvq7gQeBHlw47RuAxkC5G4C/Ru3vYmBldB5cHPsNwDvA9cCkQJ5JXj5fxgujZYzaxxLcDNng5+J4xwLM8/IsjqpHgU8Gfgc/j4aVP4F8HwNeAHpx6xe8N5DnNO94j8eFghgEFkWfPy/vqbiZwn3e+Qleu0bcwvabge3AY8HjxM22bQc+HlXnOuD8qLRHgaui8lwEXA104iKG/ke8erzfwXO4LsE5ugl43TtXz+Ia8P/CrWERnXe6V9+SqPRGoB84JZC2C25G9bHe/494/3cJ5Pmcd00avP8/B16Pqvta4KmotO8DT+T7mS6Wj1kSxcNluO6LE3Fx5PfDPZDRnAu8CiwGLhSRScC9uIbsfV75D+Ia31Q5FNcAHgV8xqvrW4HtlwJH494Ij8S9zSWKtPskcDawA5jtfYKLxkQcS0gZ/cBux3n1nZSC/PG4FLjAk+MN4B7vvPrU4BrhrwD7AG9GVyAiX8E10tcD78EplZe9bYJ7S56LU0gHAI8DDwdiEr0H15imu6LiOTgFdSCuMb1ERD4QJ+9B3veXcefwIE9OvyvvtEDeA4DfqeqjwA5V/T9V/b6qvpOCbO/FrZFwv5+gqm/h4hN90Ev6AC5e01uBcvcB1V55P8/9RHIfsNgLCujzLPA+Lw6SkYTK5FmMfOOFRT4d+IKqPuClfRH3RhjNY6p6SaDsl4E64PPqFmnBW0vjERGZr6qrUxClE/iauhj2zSJyG04Z/NST8YvA6ap6n7eff48jIwCq2i8iHe6nbgpxLPNCyNjmfW+NUWdc+ZPU+aMYx/RZ3FsquLf8s1T1uYCs0XX8J3CFRgbi8/Mfjos7NENVe/z8IvIvwOeBS4DdcG/hsc5TGO5X1au831eKyDdxCFkUAQAABO5JREFUx/5UdEZVbfPkb486hwPAa0BHIO3/AV8QkedIn1nAEBC9YmSrt83P0xq1fYtXLpjnwRh1VOKsGD+W00acUpqDW5fBSIBZEsXBnrib+lk/QVW7iR3pMvpNcyGwwlcQHk/iTPd9UpTjFR1d5ATcw+YvaLMnMJFAo6NuNa2XUtxHkEyvQ55I/kTEOqbguRvEdUfFRERm4qyEh+JkeS+uq65NRLr8D87q2dPLUwsMqAuFnQ4rov6HPfYRVHWDqu6tqncEks/FBZlbCnxYRF4Rkf/wxxLGiRC5RkK8QHOJ8kiMdF8RmyURArMkioNYN3o8umOUTfZwDQf24VPFWAZilPdfNMa8OmeA6GPxG8iRfUV1IyQjkfzjoU9VhxJsT3ZuJuDeeGN1H/qD1FuAiSIySSMHmDNx7dLGe1n5HvA9EXkWuBK4yqv75yGr2YSzxqYzagmCU2KPB/IcElVuuleuNZBnVlSemTglHnQimOp9t2EkxSyJ4mA17iEfWUjF6xNfFKLsK8B+Ud4mH8Rd+2bvfxuu7znI/mnKeHBAxskhZOzHPehh8B/qoKzRcvZ735l4k/WJdUzN8bNHoqqtuMHyI+NkeR5oAoZVdXXUx18wyLdUoq2/iGsnIjXA3mFlS8AAqZ/DHap6Ey4k/4dSKPect7+j/QTP/XUhzuoFZ80tjHKLPRrnBPBcIM9RUXUfDSxX1aCSXARs9K6LkQRTEkWA18VxHfBzETlSRPbB9YdPILl18XvcG/lvReTd4pZsvRq4PTAe8TBwgIicLiLzReTbjH1rCyPjbzwZjxaRfT2ZkzU064Aar8z0qAHh6H30AE8DF4jIviLyQSIHusF5B/UAx4pIU4b84S+KOqZ+4OYU6/gxcLaInCMiC0RkfxE5z9v2IK5v/y4R+YiI7C4iHxCRH4rIoeDGCXDKJLrxfRg4RUSWBORLxbqKxzrgSBGZJd560SIyV0ReFZET/UwistSbm9Do/srBuIb5n4E8U0Vkf0ZfGOZ7xz/LO7YO3L3zCxE5SkQOwHlNrWB0jOF+3ED/b0XkABE5CvgFcI2OugT/GthZRK4QkYUi8iWc91n0PXIozpnDCIEpieLhfNxiK3fjVqRageuz701UyOuaOBbnd/4sbvWqp3AD4X6e+3CTjH6MeyubR2BiVYoyPoJ7k3wEN2byeKICqvok7uH+A+6t+NtJ9uHL/Q+csrsoqr5B4JvAl3D97plYres7OO+y54F3AR/zullCo6q/Ar6B8xhaiWuk9vW2Kc7b6WHc0puv4RYN2ovIJSmXAadEVf1Tr9xduIb0CU/O8XIebkD9LUYb/CpPpqDiXQ9c7uU7DLfA1V3ATwJ5Pu7V8Yj3/xrvf3Ci2znA7cAtOIXZBfyL343nfX8U5wn3/7x8t+PuObw8a3Hn8TCc5fU94Juq+mc/j2dpnejJYITAFh0qUkSkGudq+QtVLalF3wsFcTODH8F5HUV73uQc75q/ivNy+798yxONiDyqqkvyLUciROQbwAmqeky+ZSkWbOC6SPBM8IU4a6Ae57dfj3ujMsoAVe0TkVMZHXg1UmcAN3PdCIkpieLiXJy577tcHqaqcechGKWHqibsvssnhW5FAKjqsnzLUGxYd5NhGIYRFxu4NgzDMOJiSsIwDMOIiykJwzAMIy6mJAzDMIy4mJIwDMMw4vL/A1ZuvR6tHJV3AAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plot_pred_ground(fetch_list, infer_labels_np)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "上图可以看出,我们训练出来的模型的预测结果与真实结果是较为接近的。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 训练方式二\n", + "我们也可以用我们的高层API来做线性回归训练,高层API相较于底层API更加的简洁方便。" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 1/5\n", + "step 20/51 - loss: 520.8663 - 1ms/step\n", + "step 40/51 - loss: 611.7135 - 1ms/step\n", + "step 51/51 - loss: 620.0662 - 1ms/step\n", + "Eval begin...\n", + "step 13/13 - loss: 389.7871 - 1ms/step\n", + "Eval samples: 102\n", + "Epoch 2/5\n", + "step 20/51 - loss: 867.4678 - 3ms/step\n", + "step 40/51 - loss: 1081.1701 - 2ms/step\n", + "step 51/51 - loss: 420.8705 - 2ms/step\n", + "Eval begin...\n", + "step 13/13 - loss: 387.2432 - 1ms/step\n", + "Eval samples: 102\n", + "Epoch 3/5\n", + "step 20/51 - loss: 810.1555 - 2ms/step\n", + "step 40/51 - loss: 840.3570 - 2ms/step\n", + "step 51/51 - loss: 421.0806 - 2ms/step\n", + "Eval begin...\n", + "step 13/13 - loss: 384.7417 - 693us/step\n", + "Eval samples: 102\n", + "Epoch 4/5\n", + "step 20/51 - loss: 647.1215 - 1ms/step\n", + "step 40/51 - loss: 682.9673 - 1ms/step\n", + "step 51/51 - loss: 422.0570 - 1ms/step\n", + "Eval begin...\n", + "step 13/13 - loss: 382.2546 - 591us/step\n", + "Eval samples: 102\n", + "Epoch 5/5\n", + "step 20/51 - loss: 713.3719 - 1ms/step\n", + "step 40/51 - loss: 567.0962 - 1ms/step\n", + "step 51/51 - loss: 456.8702 - 1ms/step\n", + "Eval begin...\n", + "step 13/13 - loss: 379.7527 - 985us/step\n", + "Eval samples: 102\n" + ] + } + ], + "source": [ + "import paddle\n", + "paddle.disable_static()\n", + "paddle.set_default_dtype(\"float64\")\n", + "\n", + "#step1:用高层API定义数据集,无需进行数据处理等,高层API为您一条龙搞定\n", + "train_dataset = paddle.text.datasets.UCIHousing(mode='train')\n", + "eval_dataset = paddle.text.datasets.UCIHousing(mode='test')\n", + "\n", + "#step2:定义模型\n", + "class UCIHousing(paddle.nn.Layer):\n", + " def __init__(self):\n", + " super(UCIHousing, self).__init__()\n", + " self.fc = paddle.nn.Linear(13, 1, None)\n", + "\n", + " def forward(self, input):\n", + " pred = self.fc(input)\n", + " return pred\n", + "\n", + "#step3:训练模型\n", + "model = paddle.Model(UCIHousing())\n", + "model.prepare(paddle.optimizer.Adam(parameters=model.parameters()),\n", + " paddle.nn.loss.MSELoss())\n", + "model.fit(train_dataset, eval_dataset, epochs=5, batch_size=8, log_freq=20)" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "name": "线性回归(基于paddle2.0-alpha)", + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/doc/paddle/tutorial/quick_start/linear_regression/linear_regression.rst b/doc/paddle/tutorial/quick_start/linear_regression/linear_regression.rst new file mode 100644 index 0000000000000000000000000000000000000000..3c75b3063bf87547ad991d3ce1f26e4dc126c58c --- /dev/null +++ b/doc/paddle/tutorial/quick_start/linear_regression/linear_regression.rst @@ -0,0 +1,428 @@ +线性回归 +======== + +NOTE: 本示例教程是基于2.0beta版本开发 + +简要介绍 +-------- + +经典的线性回归模型主要用来预测一些存在着线性关系的数据集。回归模型可以理解为:存在一个点集,用一条曲线去拟合它分布的过程。如果拟合曲线是一条直线,则称为线性回归。如果是一条二次曲线,则被称为二次回归。线性回归是回归模型中最简单的一种。 +本示例简要介绍如何用飞桨开源框架,实现波士顿房价预测。其思路是,假设uci-housing数据集中的房子属性和房价之间的关系可以被属性间的线性组合描述。在模型训练阶段,让假设的预测结果和真实值之间的误差越来越小。在模型预测阶段,预测器会读取训练好的模型,对从未遇见过的房子属性进行房价预测。 + +数据集介绍 +---------- + +本示例采用uci-housing数据集,这是经典线性回归的数据集。数据集共7084条数据,可以拆分成506行,每行14列。前13列用来描述房屋的各种信息,最后一列为该类房屋价格中位数。 + +前13列用来描述房屋的各种信息 + +.. figure:: https://ai-studio-static-online.cdn.bcebos.com/c19602ce74284e3b9a50422f8dc37c0c1c79cf5cd8424994b6a6b073dcb7c057 + :alt: avatar + + avatar + +训练方式一 +---------- + +环境设置 +~~~~~~~~ + +.. code:: ipython3 + + import paddle + import numpy as np + import os + import matplotlib + import matplotlib.pyplot as plt + import pandas as pd + import seaborn as sns + + paddle.disable_static() + paddle.__version__ + + + + +.. parsed-literal:: + + '2.0.0-beta0' + + + +数据处理 +~~~~~~~~ + +.. code:: ipython3 + + #下载数据 + #!wget https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data -O housing.data + +.. code:: ipython3 + + # 从文件导入数据 + datafile = './housing.data' + housing_data = np.fromfile(datafile, sep=' ') + + feature_names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE','DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV'] + feature_num = len(feature_names) + # 将原始数据进行Reshape,变成[N, 14]这样的形状 + housing_data = housing_data.reshape([housing_data.shape[0] // feature_num, feature_num]) + +.. code:: ipython3 + + # 画图看特征间的关系,主要是变量两两之间的关系(线性或非线性,有无明显较为相关关系) + features_np = np.array([x[:13] for x in housing_data], np.float32) + labels_np = np.array([x[-1] for x in housing_data], np.float32) + data_np = np.c_[features_np, labels_np] + df = pd.DataFrame(data_np, columns=feature_names) + matplotlib.use('TkAgg') + %matplotlib inline + sns.pairplot(df.dropna(), y_vars=feature_names[-1], x_vars=feature_names[:]) + plt.show() + + + +.. image:: https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/paddle/tutorial/quick_start/linear_regression/linear_regression_files/linear_regression_001.png?raw=true + + +.. code:: ipython3 + + # 相关性分析 + fig, ax = plt.subplots(figsize=(15, 1)) + corr_data = df.corr().iloc[-1] + corr_data = np.asarray(corr_data).reshape(1, 14) + ax = sns.heatmap(corr_data, cbar=True, annot=True) + plt.show() + + + +.. image:: https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/paddle/tutorial/quick_start/linear_regression/linear_regression_files/linear_regression_002.png?raw=true + + +**数据归一化处理**\ 下图为大家展示各属性的取值范围分布: + +.. code:: ipython3 + + sns.boxplot(data=df.iloc[:, 0:13]) + + + + +.. parsed-literal:: + + + + + + +.. image:: https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/paddle/tutorial/quick_start/linear_regression/linear_regression_files/linear_regression_003.png?raw=true + + +从上图看出,我们各属性的数值范围差异太大,甚至不能够在一个画布上充分的展示各属性具体的最大、最小值以及异常值等。下面我们进行归一化。 + +做归一化(或 Feature scaling)至少有以下2个理由: + +- 过大或过小的数值范围会导致计算时的浮点上溢或下溢。 +- 不同的数值范围会导致不同属性对模型的重要性不同(至少在训练的初始阶段如此),而这个隐含的假设常常是不合理的。这会对优化的过程造成困难,使训练时间大大的加长. + +.. code:: ipython3 + + features_max = housing_data.max(axis=0) + features_min = housing_data.min(axis=0) + features_avg = housing_data.sum(axis=0) / housing_data.shape[0] + +.. code:: ipython3 + + BATCH_SIZE = 20 + def feature_norm(input): + f_size = input.shape + output_features = np.zeros(f_size, np.float32) + for batch_id in range(f_size[0]): + for index in range(13): + output_features[batch_id][index] = (input[batch_id][index] - features_avg[index]) / (features_max[index] - features_min[index]) + return output_features + +.. code:: ipython3 + + #只对属性进行归一化 + housing_features = feature_norm(housing_data[:, :13]) + # print(feature_trian.shape) + housing_data = np.c_[housing_features, housing_data[:, -1]].astype(np.float32) + # print(training_data[0]) + +.. code:: ipython3 + + #归一化后的train_data,我们看下各属性的情况 + features_np = np.array([x[:13] for x in housing_data],np.float32) + labels_np = np.array([x[-1] for x in housing_data],np.float32) + data_np = np.c_[features_np, labels_np] + df = pd.DataFrame(data_np, columns=feature_names) + sns.boxplot(data=df.iloc[:, 0:13]) + + + + +.. parsed-literal:: + + + + + + +.. image:: https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/paddle/tutorial/quick_start/linear_regression/linear_regression_files/linear_regression_004.png?raw=true + + +.. code:: ipython3 + + #将训练数据集和测试数据集按照8:2的比例分开 + ratio = 0.8 + offset = int(housing_data.shape[0] * ratio) + train_data = housing_data[:offset] + test_data = housing_data[offset:] + +模型配置 +~~~~~~~~ + +线性回归就是一个从输入到输出的简单的全连接层。 + +对于波士顿房价数据集,假设属性和房价之间的关系可以被属性间的线性组合描述。 + +.. code:: ipython3 + + class Regressor(paddle.nn.Layer): + def __init__(self): + super(Regressor, self).__init__() + self.fc = paddle.nn.Linear(13, 1,) + + def forward(self, inputs): + pred = self.fc(inputs) + return pred + +定义绘制训练过程的损失值变化趋势的方法draw_train_process + +.. code:: ipython3 + + train_nums = [] + train_costs = [] + + def draw_train_process(iters, train_costs): + plt.title("training cost", fontsize=24) + plt.xlabel("iter", fontsize=14) + plt.ylabel("cost", fontsize=14) + plt.plot(iters, train_costs, color='red', label='training cost') + plt.show() + +模型训练 +~~~~~~~~ + +下面为大家展示模型训练的代码。 +这里用到的是线性回归模型最常用的损失函数–均方误差(MSE),用来衡量模型预测的房价和真实房价的差异。 +对损失函数进行优化所采用的方法是梯度下降法 + +.. code:: ipython3 + + import paddle.nn.functional as F + y_preds = [] + labels_list = [] + + def train(model): + print('start training ... ') + # 开启模型训练模式 + model.train() + EPOCH_NUM = 500 + train_num = 0 + optimizer = paddle.optimizer.SGD(learning_rate=0.001, parameters=model.parameters()) + for epoch_id in range(EPOCH_NUM): + # 在每轮迭代开始之前,将训练数据的顺序随机的打乱 + np.random.shuffle(train_data) + # 将训练数据进行拆分,每个batch包含20条数据 + mini_batches = [train_data[k:k+BATCH_SIZE] for k in range(0, len(train_data), BATCH_SIZE)] + for batch_id, data in enumerate(mini_batches): + features_np = np.array(data[:, :13], np.float32) + labels_np = np.array(data[:, -1:], np.float32) + features = paddle.to_tensor(features_np) + labels = paddle.to_tensor(labels_np) + #前向计算 + y_pred = model(features) + cost = F.mse_loss(y_pred, label=labels) + train_cost = cost.numpy()[0] + #反向传播 + cost.backward() + #最小化loss,更新参数 + optimizer.step() + # 清除梯度 + optimizer.clear_grad() + + if batch_id%30 == 0 and epoch_id%50 == 0: + print("Pass:%d,Cost:%0.5f"%(epoch_id, train_cost)) + + train_num = train_num + BATCH_SIZE + train_nums.append(train_num) + train_costs.append(train_cost) + + model = Regressor() + train(model) + + +.. parsed-literal:: + + start training ... + Pass:0,Cost:740.21814 + Pass:50,Cost:36.40338 + Pass:100,Cost:86.01823 + Pass:150,Cost:50.86654 + Pass:200,Cost:31.14208 + Pass:250,Cost:20.54596 + Pass:300,Cost:22.30817 + Pass:350,Cost:24.18756 + Pass:400,Cost:22.22965 + Pass:450,Cost:39.25978 + + +.. code:: ipython3 + + matplotlib.use('TkAgg') + %matplotlib inline + draw_train_process(train_nums, train_costs) + + + +.. image:: https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/paddle/tutorial/quick_start/linear_regression/linear_regression_files/linear_regression_005.png?raw=true + + +可以从上图看出,随着训练轮次的增加,损失在呈降低趋势。但由于每次仅基于少量样本更新参数和计算损失,所以损失下降曲线会出现震荡。 + +模型预测 +~~~~~~~~ + +.. code:: ipython3 + + #获取预测数据 + INFER_BATCH_SIZE = 100 + + infer_features_np = np.array([data[:13] for data in test_data]).astype("float32") + infer_labels_np = np.array([data[-1] for data in test_data]).astype("float32") + + infer_features = paddle.to_tensor(infer_features_np) + infer_labels = paddle.to_tensor(infer_labels_np) + fetch_list = model(infer_features) + + sum_cost = 0 + for i in range(INFER_BATCH_SIZE): + infer_result = fetch_list[i][0] + ground_truth = infer_labels[i] + if i % 10 == 0: + print("No.%d: infer result is %.2f,ground truth is %.2f" % (i, infer_result, ground_truth)) + cost = paddle.pow(infer_result - ground_truth, 2) + sum_cost += cost + mean_loss = sum_cost / INFER_BATCH_SIZE + print("Mean loss is:", mean_loss.numpy()) + + +.. parsed-literal:: + + No.0: infer result is 12.15,ground truth is 8.50 + No.10: infer result is 5.21,ground truth is 7.00 + No.20: infer result is 14.32,ground truth is 11.70 + No.30: infer result is 16.11,ground truth is 11.70 + No.40: infer result is 13.42,ground truth is 10.80 + No.50: infer result is 15.50,ground truth is 14.90 + No.60: infer result is 18.81,ground truth is 21.40 + No.70: infer result is 15.42,ground truth is 13.80 + No.80: infer result is 18.16,ground truth is 20.60 + No.90: infer result is 21.48,ground truth is 24.50 + Mean loss is: [12.195988] + + +.. code:: ipython3 + + def plot_pred_ground(pred, ground): + plt.figure() + plt.title("Predication v.s. Ground truth", fontsize=24) + plt.xlabel("ground truth price(unit:$1000)", fontsize=14) + plt.ylabel("predict price", fontsize=14) + plt.scatter(ground, pred, alpha=0.5) # scatter:散点图,alpha:"透明度" + plt.plot(ground, ground, c='red') + plt.show() + +.. code:: ipython3 + + plot_pred_ground(fetch_list, infer_labels_np) + + + +.. image:: https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/paddle/tutorial/quick_start/linear_regression/linear_regression_files/linear_regression_006.png?raw=true + + +上图可以看出,我们训练出来的模型的预测结果与真实结果是较为接近的。 + +训练方式二 +---------- + +我们也可以用我们的高层API来做线性回归训练,高层API相较于底层API更加的简洁方便。 + +.. code:: ipython3 + + import paddle + paddle.disable_static() + paddle.set_default_dtype("float64") + + #step1:用高层API定义数据集,无需进行数据处理等,高层API为您一条龙搞定 + train_dataset = paddle.text.datasets.UCIHousing(mode='train') + eval_dataset = paddle.text.datasets.UCIHousing(mode='test') + + #step2:定义模型 + class UCIHousing(paddle.nn.Layer): + def __init__(self): + super(UCIHousing, self).__init__() + self.fc = paddle.nn.Linear(13, 1, None) + + def forward(self, input): + pred = self.fc(input) + return pred + + #step3:训练模型 + model = paddle.Model(UCIHousing()) + model.prepare(paddle.optimizer.Adam(parameters=model.parameters()), + paddle.nn.loss.MSELoss()) + model.fit(train_dataset, eval_dataset, epochs=5, batch_size=8, log_freq=20) + + +.. parsed-literal:: + + Epoch 1/5 + step 20/51 - loss: 520.8663 - 1ms/step + step 40/51 - loss: 611.7135 - 1ms/step + step 51/51 - loss: 620.0662 - 1ms/step + Eval begin... + step 13/13 - loss: 389.7871 - 1ms/step + Eval samples: 102 + Epoch 2/5 + step 20/51 - loss: 867.4678 - 3ms/step + step 40/51 - loss: 1081.1701 - 2ms/step + step 51/51 - loss: 420.8705 - 2ms/step + Eval begin... + step 13/13 - loss: 387.2432 - 1ms/step + Eval samples: 102 + Epoch 3/5 + step 20/51 - loss: 810.1555 - 2ms/step + step 40/51 - loss: 840.3570 - 2ms/step + step 51/51 - loss: 421.0806 - 2ms/step + Eval begin... + step 13/13 - loss: 384.7417 - 693us/step + Eval samples: 102 + Epoch 4/5 + step 20/51 - loss: 647.1215 - 1ms/step + step 40/51 - loss: 682.9673 - 1ms/step + step 51/51 - loss: 422.0570 - 1ms/step + Eval begin... + step 13/13 - loss: 382.2546 - 591us/step + Eval samples: 102 + Epoch 5/5 + step 20/51 - loss: 713.3719 - 1ms/step + step 40/51 - loss: 567.0962 - 1ms/step + step 51/51 - loss: 456.8702 - 1ms/step + Eval begin... + step 13/13 - loss: 379.7527 - 985us/step + Eval samples: 102 + diff --git a/doc/paddle/tutorial/quick_start/linear_regression/linear_regression_files/linear_regression_001.png b/doc/paddle/tutorial/quick_start/linear_regression/linear_regression_files/linear_regression_001.png new file mode 100644 index 0000000000000000000000000000000000000000..2bb558520afb3598a6a4fdb5caeefe0be3033d7f Binary files /dev/null and b/doc/paddle/tutorial/quick_start/linear_regression/linear_regression_files/linear_regression_001.png differ diff --git a/doc/paddle/tutorial/quick_start/linear_regression/linear_regression_files/linear_regression_002.png b/doc/paddle/tutorial/quick_start/linear_regression/linear_regression_files/linear_regression_002.png new file mode 100644 index 0000000000000000000000000000000000000000..710b69415ce045f7fed6ec3aa2a20b1cb79755f8 Binary files /dev/null and b/doc/paddle/tutorial/quick_start/linear_regression/linear_regression_files/linear_regression_002.png differ diff --git a/doc/paddle/tutorial/quick_start/linear_regression/linear_regression_files/linear_regression_003.png b/doc/paddle/tutorial/quick_start/linear_regression/linear_regression_files/linear_regression_003.png new file mode 100644 index 0000000000000000000000000000000000000000..338e1ad377b54e92e1e19df2c8364f2c65527746 Binary files /dev/null and b/doc/paddle/tutorial/quick_start/linear_regression/linear_regression_files/linear_regression_003.png differ diff --git a/doc/paddle/tutorial/quick_start/linear_regression/linear_regression_files/linear_regression_004.png b/doc/paddle/tutorial/quick_start/linear_regression/linear_regression_files/linear_regression_004.png new file mode 100644 index 0000000000000000000000000000000000000000..079160527d8a4713f79bd9865e59767297ea8b44 Binary files /dev/null and b/doc/paddle/tutorial/quick_start/linear_regression/linear_regression_files/linear_regression_004.png differ diff --git a/doc/paddle/tutorial/quick_start/linear_regression/linear_regression_files/linear_regression_005.png b/doc/paddle/tutorial/quick_start/linear_regression/linear_regression_files/linear_regression_005.png new file mode 100644 index 0000000000000000000000000000000000000000..760a9126bfa54405acafaf4165d29252a96f3d18 Binary files /dev/null and b/doc/paddle/tutorial/quick_start/linear_regression/linear_regression_files/linear_regression_005.png differ diff --git a/doc/paddle/tutorial/quick_start/linear_regression/linear_regression_files/linear_regression_006.png b/doc/paddle/tutorial/quick_start/linear_regression/linear_regression_files/linear_regression_006.png new file mode 100644 index 0000000000000000000000000000000000000000..def6ca49428696e942f42a2802ebf7e1716f5a81 Binary files /dev/null and b/doc/paddle/tutorial/quick_start/linear_regression/linear_regression_files/linear_regression_006.png differ diff --git a/doc/paddle/tutorial/quick_start/save_model/save_model.ipynb b/doc/paddle/tutorial/quick_start/save_model/save_model.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..812d2631821db7dc9f3d73d408f607553a15534f --- /dev/null +++ b/doc/paddle/tutorial/quick_start/save_model/save_model.ipynb @@ -0,0 +1,388 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 模型保存及加载\n", + "本教程将基于Paddle高阶API对模型参数的保存和加载进行讲解。在日常训练模型过程中我们会遇到一些突发情况,导致训练过程主动或被动的中断,因此在模型没有完全训练好的情况下,我们需要高频的保存下模型参数,在发生意外时可以快速载入保存的参数继续训练。抑或是模型已经训练好了,我们需要使用训练好的参数进行预测或部署模型上线。面对上述情况,Paddle中提供了保存模型和提取模型的方法,支持从上一次保存状态开始训练,只要我们随时保存训练过程中的模型状态,就不用从初始状态重新训练。\n", + "下面将基于手写数字识别的模型讲解paddle如何保存及加载模型,并恢复训练,网络结构部分的讲解省略。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 环境\n", + "本教程基于paddle-2.0Beta编写,如果您的环境不是此版本,请先安装paddle-2.0Beta版本,使用命令:pip3 install paddlepaddle==2.0Beta。" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2.0.0-beta0\n" + ] + } + ], + "source": [ + "import paddle\n", + "import paddle.nn.functional as F\n", + "from paddle.nn import Layer\n", + "from paddle.vision.datasets import MNIST\n", + "from paddle.metric import Accuracy\n", + "from paddle.nn import Conv2d,MaxPool2d,Linear\n", + "from paddle.static import InputSpec\n", + "\n", + "print(paddle.__version__)\n", + "paddle.disable_static()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 数据集\n", + "手写数字的MNIST数据集,包含60,000个用于训练的示例和10,000个用于测试的示例。这些数字已经过尺寸标准化并位于图像中心,图像是固定大小(28x28像素),其值为0到1。该数据集的官方地址为:http://yann.lecun.com/exdb/mnist/\n", + "本例中我们使用飞桨自带的mnist数据集。使用from paddle.vision.datasets import MNIST 引入即可。" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "train_dataset = MNIST(mode='train')\n", + "test_dataset = MNIST(mode='test')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 模型搭建" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "class MyModel(Layer):\n", + " def __init__(self):\n", + " super(MyModel, self).__init__()\n", + " self.conv1 = paddle.nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5, stride=1, padding=2)\n", + " self.max_pool1 = MaxPool2d(kernel_size=2, stride=2)\n", + " self.conv2 = Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1)\n", + " self.max_pool2 = MaxPool2d(kernel_size=2, stride=2)\n", + " self.linear1 = Linear(in_features=16*5*5, out_features=120)\n", + " self.linear2 = Linear(in_features=120, out_features=84)\n", + " self.linear3 = Linear(in_features=84, out_features=10)\n", + "\n", + " def forward(self, x):\n", + " x = self.conv1(x)\n", + " x = F.relu(x)\n", + " x = self.max_pool1(x)\n", + " x = F.relu(x)\n", + " x = self.conv2(x)\n", + " x = self.max_pool2(x)\n", + " x = paddle.flatten(x, start_axis=1, stop_axis=-1)\n", + " x = self.linear1(x)\n", + " x = F.relu(x)\n", + " x = self.linear2(x)\n", + " x = F.relu(x)\n", + " x = self.linear3(x)\n", + " x = F.softmax(x)\n", + " return x" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 模型训练\n", + "通过`Model` 构建实例,快速完成模型训练" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 1/1\n", + "step 100/938 - loss: 1.6177 - acc_top1: 0.6119 - acc_top2: 0.6813 - 15ms/step\n", + "step 200/938 - loss: 1.7720 - acc_top1: 0.7230 - acc_top2: 0.7788 - 15ms/step\n", + "step 300/938 - loss: 1.6114 - acc_top1: 0.7666 - acc_top2: 0.8164 - 15ms/step\n", + "step 400/938 - loss: 1.6537 - acc_top1: 0.7890 - acc_top2: 0.8350 - 15ms/step\n", + "step 500/938 - loss: 1.5229 - acc_top1: 0.8170 - acc_top2: 0.8619 - 15ms/step\n", + "step 600/938 - loss: 1.5269 - acc_top1: 0.8391 - acc_top2: 0.8821 - 15ms/step\n", + "step 700/938 - loss: 1.4821 - acc_top1: 0.8561 - acc_top2: 0.8970 - 15ms/step\n", + "step 800/938 - loss: 1.4860 - acc_top1: 0.8689 - acc_top2: 0.9081 - 15ms/step\n", + "step 900/938 - loss: 1.5032 - acc_top1: 0.8799 - acc_top2: 0.9174 - 15ms/step\n", + "step 938/938 - loss: 1.4617 - acc_top1: 0.8835 - acc_top2: 0.9203 - 15ms/step\n", + "save checkpoint at /Users/dingjiawei/online_repo/book/paddle2.0_docs/save_model/mnist_checkpoint/0\n", + "Eval begin...\n", + "step 100/157 - loss: 1.4765 - acc_top1: 0.9636 - acc_top2: 0.9891 - 6ms/step\n", + "step 157/157 - loss: 1.4612 - acc_top1: 0.9705 - acc_top2: 0.9910 - 6ms/step\n", + "Eval samples: 10000\n", + "save checkpoint at /Users/dingjiawei/online_repo/book/paddle2.0_docs/save_model/mnist_checkpoint/final\n" + ] + } + ], + "source": [ + "inputs = InputSpec([None, 784], 'float32', 'x')\n", + "labels = InputSpec([None, 10], 'float32', 'x')\n", + "model = paddle.Model(MyModel(), inputs, labels)\n", + "\n", + "optim = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters())\n", + "\n", + "model.prepare(\n", + " optim,\n", + " paddle.nn.loss.CrossEntropyLoss(),\n", + " Accuracy(topk=(1, 2))\n", + " )\n", + "model.fit(train_dataset,\n", + " test_dataset,\n", + " epochs=1,\n", + " log_freq=100,\n", + " batch_size=64,\n", + " save_dir='mnist_checkpoint')\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 保存模型参数\n", + "\n", + "目前Paddle框架有三种保存模型参数的体系,分别是:\n", + "#### paddle 高阶API-模型参数保存\n", + " * paddle.Model.fit\n", + " * paddle.Model.save\n", + "#### paddle 基础框架-动态图-模型参数保存 \n", + " * paddle.save\n", + "#### paddle 基础框架-静态图-模型参数保存 \n", + " * paddle.io.save\n", + " * paddle.io.save_inference_model\n", + "\n", + "下面将基于高阶API对模型保存与加载的方法进行讲解。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "#### 方法一:\n", + "* paddle.Model.fit(train_data, epochs, batch_size, save_dir, log_freq)

\n", + "在使用model.fit函数进行网络循环训练时,在save_dir参数中指定保存模型的路径,save_freq指定写入频率,即可同时实现模型的训练和保存。mode.fit()只能保存模型参数,不能保存优化器参数,每个epoch结束只会生成一个.pdparams文件。可以边训练边保存,每次epoch结束会实时生成一个.pdparams文件。 \n", + "\n", + "#### 方法二:\n", + "* paddle.Model.save(self, path, training=True)

\n", + "model.save(path)方法可以保存模型结构、网络参数和优化器参数,参数training=true的使用场景是在训练过程中,此时会保存网络参数和优化器参数。每个epoch生成两种文件 0.pdparams,0.pdopt,分别存储了模型参数和优化器参数,但是只会在整个模型训练完成后才会生成包含所有epoch参数的文件,path的格式为'dirname/file_prefix' 或 'file_prefix',其中dirname指定路径名称,file_prefix 指定参数文件的名称。当training=false的时候,代表已经训练结束,此时存储的是预测模型结构和网络参数。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 方法一:训练过程中实时保存每个epoch的模型参数\n", + "model.fit(train_dataset,\n", + " test_dataset,\n", + " epochs=2,\n", + " batch_size=64,\n", + " save_dir='mnist_checkpoint'\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 方法二:model.save()保存模型和优化器参数信息\n", + "model.save('mnist_checkpoint/test')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 加载模型参数\n", + "\n", + "当恢复训练状态时,需要加载模型数据,此时我们可以使用加载函数从存储模型状态和优化器状态的文件中载入模型参数和优化器参数,如果不需要恢复优化器,则不必使用优化器状态文件。\n", + "#### 高阶API-模型参数加载\n", + " * paddle.Model.load\n", + "#### paddle 基础框架-动态图-模型参数加载\n", + " * paddle.load\n", + "#### paddle 基础框架-静态图-模型参数加载\n", + " * paddle.io.load \n", + " * paddle.io.load_inference_model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "下面将对高阶API的模型参数加载方法进行讲解\n", + "* model.load(self, path, skip_mismatch=False, reset_optimizer=False)

\n", + "model.load能够同时加载模型和优化器参数。通过reset_optimizer参数来指定是否需要恢复优化器参数,若reset_optimizer参数为True,则重新初始化优化器参数,若reset_optimizer参数为False,则从路径中恢复优化器参数。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 高阶API加载模型\n", + "model.load('mnist_checkpoint/test')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 恢复训练\n", + "\n", + "理想的恢复训练是模型状态回到训练中断的时刻,恢复训练之后的梯度更新走向是和恢复训练前的梯度走向完全相同的。基于此,我们可以通过恢复训练后的损失变化,判断上述方法是否能准确的恢复训练。即从epoch 0结束时保存的模型参数和优化器状态恢复训练,校验其后训练的损失变化(epoch 1)是否和不中断时的训练完全一致。\n", + "\n", + "说明:\n", + "\n", + "恢复训练有如下两个要点:\n", + "\n", + "* 保存模型时同时保存模型参数和优化器参数\n", + "\n", + "* 恢复参数时同时恢复模型参数和优化器参数。" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 1/2\n", + "step 100/938 - loss: 1.4635 - acc_top1: 0.9650 - acc_top2: 0.9898 - 15ms/step\n", + "step 200/938 - loss: 1.5459 - acc_top1: 0.9659 - acc_top2: 0.9897 - 15ms/step\n", + "step 300/938 - loss: 1.5109 - acc_top1: 0.9658 - acc_top2: 0.9893 - 15ms/step\n", + "step 400/938 - loss: 1.4797 - acc_top1: 0.9664 - acc_top2: 0.9899 - 15ms/step\n", + "step 500/938 - loss: 1.4786 - acc_top1: 0.9673 - acc_top2: 0.9902 - 15ms/step\n", + "step 600/938 - loss: 1.5082 - acc_top1: 0.9679 - acc_top2: 0.9906 - 15ms/step\n", + "step 700/938 - loss: 1.4768 - acc_top1: 0.9687 - acc_top2: 0.9909 - 15ms/step\n", + "step 800/938 - loss: 1.4638 - acc_top1: 0.9696 - acc_top2: 0.9913 - 15ms/step\n", + "step 900/938 - loss: 1.5058 - acc_top1: 0.9704 - acc_top2: 0.9916 - 15ms/step\n", + "step 938/938 - loss: 1.4702 - acc_top1: 0.9708 - acc_top2: 0.9917 - 15ms/step\n", + "Eval begin...\n", + "step 100/157 - loss: 1.4613 - acc_top1: 0.9755 - acc_top2: 0.9944 - 5ms/step\n", + "step 157/157 - loss: 1.4612 - acc_top1: 0.9805 - acc_top2: 0.9956 - 5ms/step\n", + "Eval samples: 10000\n", + "Epoch 2/2\n", + "step 100/938 - loss: 1.4832 - acc_top1: 0.9789 - acc_top2: 0.9927 - 15ms/step\n", + "step 200/938 - loss: 1.4618 - acc_top1: 0.9779 - acc_top2: 0.9932 - 14ms/step\n", + "step 300/938 - loss: 1.4613 - acc_top1: 0.9779 - acc_top2: 0.9929 - 15ms/step\n", + "step 400/938 - loss: 1.4765 - acc_top1: 0.9772 - acc_top2: 0.9932 - 15ms/step\n", + "step 500/938 - loss: 1.4932 - acc_top1: 0.9775 - acc_top2: 0.9934 - 15ms/step\n", + "step 600/938 - loss: 1.4773 - acc_top1: 0.9773 - acc_top2: 0.9936 - 15ms/step\n", + "step 700/938 - loss: 1.4612 - acc_top1: 0.9783 - acc_top2: 0.9939 - 15ms/step\n", + "step 800/938 - loss: 1.4653 - acc_top1: 0.9779 - acc_top2: 0.9939 - 15ms/step\n", + "step 900/938 - loss: 1.4639 - acc_top1: 0.9780 - acc_top2: 0.9939 - 15ms/step\n", + "step 938/938 - loss: 1.4678 - acc_top1: 0.9779 - acc_top2: 0.9937 - 15ms/step\n", + "Eval begin...\n", + "step 100/157 - loss: 1.4612 - acc_top1: 0.9733 - acc_top2: 0.9945 - 6ms/step\n", + "step 157/157 - loss: 1.4612 - acc_top1: 0.9778 - acc_top2: 0.9952 - 6ms/step\n", + "Eval samples: 10000\n" + ] + } + ], + "source": [ + "import paddle\n", + "from paddle.vision.datasets import MNIST\n", + "from paddle.metric import Accuracy\n", + "from paddle.static import InputSpec\n", + "#\n", + "#\n", + "train_dataset = MNIST(mode='train')\n", + "test_dataset = MNIST(mode='test')\n", + "\n", + "paddle.disable_static()\n", + "\n", + "inputs = InputSpec([None, 784], 'float32', 'x')\n", + "labels = InputSpec([None, 10], 'float32', 'x')\n", + "model = paddle.Model(MyModel(), inputs, labels)\n", + "optim = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters())\n", + "model.load(\"./mnist_checkpoint/final\")\n", + "model.prepare( \n", + " optim,\n", + " paddle.nn.loss.CrossEntropyLoss(),\n", + " Accuracy(topk=(1, 2))\n", + " )\n", + "model.fit(train_data=train_dataset,\n", + " eval_data=test_dataset,\n", + " batch_size=64,\n", + " log_freq=100,\n", + " epochs=2\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 总结\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "以上就是用Mnist手写数字识别的例子对保存模型、加载模型、恢复训练进行讲解,Paddle提供了很多保存和加载的API方法,您可以根据自己的需求进行选择。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.8" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/doc/paddle/tutorial/quick_start/save_model/save_model.rst b/doc/paddle/tutorial/quick_start/save_model/save_model.rst new file mode 100644 index 0000000000000000000000000000000000000000..91a6b2175580ec52de7a1d38d4cc51e4686b18c8 --- /dev/null +++ b/doc/paddle/tutorial/quick_start/save_model/save_model.rst @@ -0,0 +1,267 @@ +模型保存及加载 +============== + +本教程将基于Paddle高阶API对模型参数的保存和加载进行讲解。在日常训练模型过程中我们会遇到一些突发情况,导致训练过程主动或被动的中断,因此在模型没有完全训练好的情况下,我们需要高频的保存下模型参数,在发生意外时可以快速载入保存的参数继续训练。抑或是模型已经训练好了,我们需要使用训练好的参数进行预测或部署模型上线。面对上述情况,Paddle中提供了保存模型和提取模型的方法,支持从上一次保存状态开始训练,只要我们随时保存训练过程中的模型状态,就不用从初始状态重新训练。 +下面将基于手写数字识别的模型讲解paddle如何保存及加载模型,并恢复训练,网络结构部分的讲解省略。 + +环境 +---- + +本教程基于paddle-2.0Beta编写,如果您的环境不是此版本,请先安装paddle-2.0Beta版本,使用命令:pip3 +install paddlepaddle==2.0Beta。 + +.. code:: ipython3 + + import paddle + import paddle.nn.functional as F + from paddle.nn import Layer + from paddle.vision.datasets import MNIST + from paddle.metric import Accuracy + from paddle.nn import Conv2d,MaxPool2d,Linear + from paddle.static import InputSpec + + print(paddle.__version__) + paddle.disable_static() + + +.. parsed-literal:: + + 2.0.0-beta0 + + +数据集 +------ + +手写数字的MNIST数据集,包含60,000个用于训练的示例和10,000个用于测试的示例。这些数字已经过尺寸标准化并位于图像中心,图像是固定大小(28x28像素),其值为0到1。该数据集的官方地址为:http://yann.lecun.com/exdb/mnist/ +本例中我们使用飞桨自带的mnist数据集。使用from paddle.vision.datasets +import MNIST 引入即可。 + +.. code:: ipython3 + + train_dataset = MNIST(mode='train') + test_dataset = MNIST(mode='test') + +模型搭建 +-------- + +.. code:: ipython3 + + class MyModel(Layer): + def __init__(self): + super(MyModel, self).__init__() + self.conv1 = paddle.nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5, stride=1, padding=2) + self.max_pool1 = MaxPool2d(kernel_size=2, stride=2) + self.conv2 = Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1) + self.max_pool2 = MaxPool2d(kernel_size=2, stride=2) + self.linear1 = Linear(in_features=16*5*5, out_features=120) + self.linear2 = Linear(in_features=120, out_features=84) + self.linear3 = Linear(in_features=84, out_features=10) + + def forward(self, x): + x = self.conv1(x) + x = F.relu(x) + x = self.max_pool1(x) + x = F.relu(x) + x = self.conv2(x) + x = self.max_pool2(x) + x = paddle.flatten(x, start_axis=1, stop_axis=-1) + x = self.linear1(x) + x = F.relu(x) + x = self.linear2(x) + x = F.relu(x) + x = self.linear3(x) + x = F.softmax(x) + return x + +模型训练 +-------- + +通过\ ``Model`` 构建实例,快速完成模型训练 + +.. code:: ipython3 + + inputs = InputSpec([None, 784], 'float32', 'x') + labels = InputSpec([None, 10], 'float32', 'x') + model = paddle.Model(MyModel(), inputs, labels) + + optim = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters()) + + model.prepare( + optim, + paddle.nn.loss.CrossEntropyLoss(), + Accuracy(topk=(1, 2)) + ) + model.fit(train_dataset, + test_dataset, + epochs=1, + log_freq=100, + batch_size=64, + save_dir='mnist_checkpoint') + + + +.. parsed-literal:: + + Epoch 1/1 + step 100/938 - loss: 1.6177 - acc_top1: 0.6119 - acc_top2: 0.6813 - 15ms/step + step 200/938 - loss: 1.7720 - acc_top1: 0.7230 - acc_top2: 0.7788 - 15ms/step + step 300/938 - loss: 1.6114 - acc_top1: 0.7666 - acc_top2: 0.8164 - 15ms/step + step 400/938 - loss: 1.6537 - acc_top1: 0.7890 - acc_top2: 0.8350 - 15ms/step + step 500/938 - loss: 1.5229 - acc_top1: 0.8170 - acc_top2: 0.8619 - 15ms/step + step 600/938 - loss: 1.5269 - acc_top1: 0.8391 - acc_top2: 0.8821 - 15ms/step + step 700/938 - loss: 1.4821 - acc_top1: 0.8561 - acc_top2: 0.8970 - 15ms/step + step 800/938 - loss: 1.4860 - acc_top1: 0.8689 - acc_top2: 0.9081 - 15ms/step + step 900/938 - loss: 1.5032 - acc_top1: 0.8799 - acc_top2: 0.9174 - 15ms/step + step 938/938 - loss: 1.4617 - acc_top1: 0.8835 - acc_top2: 0.9203 - 15ms/step + save checkpoint at /Users/dingjiawei/online_repo/book/paddle2.0_docs/save_model/mnist_checkpoint/0 + Eval begin... + step 100/157 - loss: 1.4765 - acc_top1: 0.9636 - acc_top2: 0.9891 - 6ms/step + step 157/157 - loss: 1.4612 - acc_top1: 0.9705 - acc_top2: 0.9910 - 6ms/step + Eval samples: 10000 + save checkpoint at /Users/dingjiawei/online_repo/book/paddle2.0_docs/save_model/mnist_checkpoint/final + + +保存模型参数 +------------ + +目前Paddle框架有三种保存模型参数的体系,分别是: #### paddle +高阶API-模型参数保存 \* paddle.Model.fit \* paddle.Model.save #### +paddle 基础框架-动态图-模型参数保存 \* paddle.save #### paddle +基础框架-静态图-模型参数保存 \* paddle.io.save \* +paddle.io.save_inference_model + +下面将基于高阶API对模型保存与加载的方法进行讲解。 + +方法一: +^^^^^^^^ + +- paddle.Model.fit(train_data, epochs, batch_size, save_dir, log_freq) + 在使用model.fit函数进行网络循环训练时,在save_dir参数中指定保存模型的路径,save_freq指定写入频率,即可同时实现模型的训练和保存。mode.fit()只能保存模型参数,不能保存优化器参数,每个epoch结束只会生成一个.pdparams文件。可以边训练边保存,每次epoch结束会实时生成一个.pdparams文件。 + +方法二: +^^^^^^^^ + +- paddle.Model.save(self, path, training=True) + model.save(path)方法可以保存模型结构、网络参数和优化器参数,参数training=true的使用场景是在训练过程中,此时会保存网络参数和优化器参数。每个epoch生成两种文件 + 0.pdparams,0.pdopt,分别存储了模型参数和优化器参数,但是只会在整个模型训练完成后才会生成包含所有epoch参数的文件,path的格式为’dirname/file_prefix’ + 或 ‘file_prefix’,其中dirname指定路径名称,file_prefix + 指定参数文件的名称。当training=false的时候,代表已经训练结束,此时存储的是预测模型结构和网络参数。 + +.. code:: ipython3 + + # 方法一:训练过程中实时保存每个epoch的模型参数 + model.fit(train_dataset, + test_dataset, + epochs=2, + batch_size=64, + save_dir='mnist_checkpoint' + ) + +.. code:: ipython3 + + # 方法二:model.save()保存模型和优化器参数信息 + model.save('mnist_checkpoint/test') + +加载模型参数 +------------ + +当恢复训练状态时,需要加载模型数据,此时我们可以使用加载函数从存储模型状态和优化器状态的文件中载入模型参数和优化器参数,如果不需要恢复优化器,则不必使用优化器状态文件。 +#### 高阶API-模型参数加载 \* paddle.Model.load #### paddle +基础框架-动态图-模型参数加载 \* paddle.load #### paddle +基础框架-静态图-模型参数加载 \* paddle.io.load \* +paddle.io.load_inference_model + +下面将对高阶API的模型参数加载方法进行讲解 \* model.load(self, path, +skip_mismatch=False, reset_optimizer=False) +model.load能够同时加载模型和优化器参数。通过reset_optimizer参数来指定是否需要恢复优化器参数,若reset_optimizer参数为True,则重新初始化优化器参数,若reset_optimizer参数为False,则从路径中恢复优化器参数。 + +.. code:: ipython3 + + # 高阶API加载模型 + model.load('mnist_checkpoint/test') + +恢复训练 +-------- + +理想的恢复训练是模型状态回到训练中断的时刻,恢复训练之后的梯度更新走向是和恢复训练前的梯度走向完全相同的。基于此,我们可以通过恢复训练后的损失变化,判断上述方法是否能准确的恢复训练。即从epoch +0结束时保存的模型参数和优化器状态恢复训练,校验其后训练的损失变化(epoch +1)是否和不中断时的训练完全一致。 + +说明: + +恢复训练有如下两个要点: + +- 保存模型时同时保存模型参数和优化器参数 + +- 恢复参数时同时恢复模型参数和优化器参数。 + +.. code:: ipython3 + + import paddle + from paddle.vision.datasets import MNIST + from paddle.metric import Accuracy + from paddle.static import InputSpec + # + # + train_dataset = MNIST(mode='train') + test_dataset = MNIST(mode='test') + + paddle.disable_static() + + inputs = InputSpec([None, 784], 'float32', 'x') + labels = InputSpec([None, 10], 'float32', 'x') + model = paddle.Model(MyModel(), inputs, labels) + optim = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters()) + model.load("./mnist_checkpoint/final") + model.prepare( + optim, + paddle.nn.loss.CrossEntropyLoss(), + Accuracy(topk=(1, 2)) + ) + model.fit(train_data=train_dataset, + eval_data=test_dataset, + batch_size=64, + log_freq=100, + epochs=2 + ) + + +.. parsed-literal:: + + Epoch 1/2 + step 100/938 - loss: 1.4635 - acc_top1: 0.9650 - acc_top2: 0.9898 - 15ms/step + step 200/938 - loss: 1.5459 - acc_top1: 0.9659 - acc_top2: 0.9897 - 15ms/step + step 300/938 - loss: 1.5109 - acc_top1: 0.9658 - acc_top2: 0.9893 - 15ms/step + step 400/938 - loss: 1.4797 - acc_top1: 0.9664 - acc_top2: 0.9899 - 15ms/step + step 500/938 - loss: 1.4786 - acc_top1: 0.9673 - acc_top2: 0.9902 - 15ms/step + step 600/938 - loss: 1.5082 - acc_top1: 0.9679 - acc_top2: 0.9906 - 15ms/step + step 700/938 - loss: 1.4768 - acc_top1: 0.9687 - acc_top2: 0.9909 - 15ms/step + step 800/938 - loss: 1.4638 - acc_top1: 0.9696 - acc_top2: 0.9913 - 15ms/step + step 900/938 - loss: 1.5058 - acc_top1: 0.9704 - acc_top2: 0.9916 - 15ms/step + step 938/938 - loss: 1.4702 - acc_top1: 0.9708 - acc_top2: 0.9917 - 15ms/step + Eval begin... + step 100/157 - loss: 1.4613 - acc_top1: 0.9755 - acc_top2: 0.9944 - 5ms/step + step 157/157 - loss: 1.4612 - acc_top1: 0.9805 - acc_top2: 0.9956 - 5ms/step + Eval samples: 10000 + Epoch 2/2 + step 100/938 - loss: 1.4832 - acc_top1: 0.9789 - acc_top2: 0.9927 - 15ms/step + step 200/938 - loss: 1.4618 - acc_top1: 0.9779 - acc_top2: 0.9932 - 14ms/step + step 300/938 - loss: 1.4613 - acc_top1: 0.9779 - acc_top2: 0.9929 - 15ms/step + step 400/938 - loss: 1.4765 - acc_top1: 0.9772 - acc_top2: 0.9932 - 15ms/step + step 500/938 - loss: 1.4932 - acc_top1: 0.9775 - acc_top2: 0.9934 - 15ms/step + step 600/938 - loss: 1.4773 - acc_top1: 0.9773 - acc_top2: 0.9936 - 15ms/step + step 700/938 - loss: 1.4612 - acc_top1: 0.9783 - acc_top2: 0.9939 - 15ms/step + step 800/938 - loss: 1.4653 - acc_top1: 0.9779 - acc_top2: 0.9939 - 15ms/step + step 900/938 - loss: 1.4639 - acc_top1: 0.9780 - acc_top2: 0.9939 - 15ms/step + step 938/938 - loss: 1.4678 - acc_top1: 0.9779 - acc_top2: 0.9937 - 15ms/step + Eval begin... + step 100/157 - loss: 1.4612 - acc_top1: 0.9733 - acc_top2: 0.9945 - 6ms/step + step 157/157 - loss: 1.4612 - acc_top1: 0.9778 - acc_top2: 0.9952 - 6ms/step + Eval samples: 10000 + + +总结 +---- + +以上就是用Mnist手写数字识别的例子对保存模型、加载模型、恢复训练进行讲解,Paddle提供了很多保存和加载的API方法,您可以根据自己的需求进行选择。 + diff --git a/doc/paddle/user_guides/cv_case/gan/.run_ce.sh b/doc/paddle/user_guides/cv_case/gan/.run_ce.sh new file mode 100644 index 0000000000000000000000000000000000000000..344193f05be03ac19ee5428072d210c8aeba8e9d --- /dev/null +++ b/doc/paddle/user_guides/cv_case/gan/.run_ce.sh @@ -0,0 +1,6 @@ +#!/bin/bash +#This file is only used for continuous evaluation. +export FLAGS_cudnn_deterministic=True +export CUDA_VISIBLE_DEVICES=0 +python dc_gan.py --enable_ce true --epoch 1 --use_gpu True | python _ce.py + diff --git a/doc/paddle/user_guides/cv_case/gan/README.cn.md b/doc/paddle/user_guides/cv_case/gan/README.cn.md new file mode 100644 index 0000000000000000000000000000000000000000..40e378c0eb5af72bda915cc7a1467d2bea66ba88 --- /dev/null +++ b/doc/paddle/user_guides/cv_case/gan/README.cn.md @@ -0,0 +1,450 @@ +# 生成对抗网络 + +本教程源代码目录在book/09.gan,初次使用请您参考Book文档使用说明。 + +### 说明: ### +1. 硬件环境要求: +本文可支持在CPU、GPU下运行 +2. Docker镜像支持的CUDA/cuDNN版本: +如果使用了Docker运行Book,请注意:这里所提供的默认镜像的GPU环境为 CUDA 8/cuDNN 5,对于NVIDIA Tesla V100等要求CUDA 9的 GPU,使用该镜像可能会运行失败。 +3. 文档和脚本中代码的一致性问题: +请注意:为使本文更加易读易用,我们拆分、调整了dc_gan.py的代码并放入本文。本文中代码与dc_gan.py的运行结果一致,可直接运行[dc_gan.py](https://github.com/PaddlePaddle/book/blob/develop/09.gan/dc_gan.py)进行验证。 + +## 背景介绍 + +生成对抗网络(Generative Adversarial Network \[[1](#参考文献)\],简称GAN)是非监督式学习的一种方法,通过让两个神经网络相互博弈的方式进行学习。该方法最初由 lan·Goodfellow 等人于2014年提出,原论文见 [Generative Adversarial Network](https://arxiv.org/abs/1406.2661)。 + +生成对抗网络由一个生成网络与一个判别网络组成。生成网络从潜在空间(latent space)中随机采样作为输入,其输出结果需要尽量模仿训练集中的真实样本。判别网络的输入为真实样本或生成网络的输出,其目的是将生成网络的输出从真实样本中尽可能分辨出来。而生成网络则要尽可能地欺骗判别网络。两个网络相互对抗、不断调整参数,其目的是将生成网络生成的样本和真实样本尽可能的区分开\[[2](#参考文献)\] )。 + +生成对抗网络常用于生成以假乱真的图片 \[[3](#参考文献)\] )。此外,该方法还被用于生成视频、三维物体模型等。 + +## 效果展示 + +本教程将 MNIST 数据集输入网络进行训练,经过19轮训练后可以看到,生成的图片已经非常接近真实图片的样子,下图中前8行是真实图片的样子,后8行是网络生成的图像效果: +

+
+图1. GAN 生成手写数字效果 +

+ + +## 模型概览 + +### GAN + +GAN 网络顾名思义,是一种通过对抗的方式,去学习数据分布的生成模型。其中,“对抗”指的是生成网络(Generator)和判别网络(Discriminator)的相互对抗。这里以生成图片为例进行说明: + +- 生成网络(G)接收一个随机的噪声z,尽可能的生成近似样本的图像,记为G(z) +- 判别网络(D)接收一张输入图片x,尽可以去判别该图像是真实样本还是网络生成的假样本,判别网络的输出 D(x) 代表 x 为真实图片的概率。如果 D(x)=1 说明判别网络认为该输入一定是真实图片,如果 D(x)=0 说明判别网络认为该输入一定是假图片。 + +在训练的过程中,两个网络互相对抗,最终形成了一个动态的平衡,上述过程用公式可以被描述为: + +

+
+

+ +在最理想的情况下,G 可以生成与真实样本极其相似的图片G(z),而 D 很难判断这张生成的图片是否为真,对图片的真假进行随机猜测,即 D(G(z))=0.5。 + +下图展示了生成对抗网络的训练过程,假设在训练开始时,真实样本分布、生成样本分布以及判别模型分别是图中的黑线、绿线和蓝线。在训练开始时,判别模型是无法很好地区分真实样本和生成样本的。接下来当我们固定生成模型,而优化判别模型时,优化结果如第二幅图所示,可以看出,这个时候判别模型已经可以较好地区分生成数据和真实数据了。第三步是固定判别模型,改进生成模型,试图让判别模型无法区分生成图片与真实图片,在这个过程中,可以看出由模型生成的图片分布与真实图片分布更加接近,这样的迭代不断进行,直到最终收敛,生成分布和真实分布重合,判别模型无法区分真实图片与生成图片。 + + +

+
+图2. GAN 训练过程 +

+ +但是在实际过程中,很难得到这个完美的平衡点,关于GAN的收敛理论还在持续不断的研究中。 + + +### DCGAN + +[DCGAN](https://arxiv.org/abs/1511.06434) \[[4](#参考文献)\] 是深层卷积网络与 GAN 的结合,其基本原理与 GAN 相同,只是将生成网络和判别网络用两个卷积网络(CNN)替代。为了提高生成样本的质量和网络的收敛速度,论文中的 DCGAN 在网络结构上进行了一些改进: + +- 取消 pooling 层:在网络中,所有的pooling层使用步幅卷积(strided convolutions)(判别器)和微步幅度卷积(fractional-strided convolutions)(生成器)进行替换。 +- 加入 batch normalization:在生成器和判别器中均加入batchnorm。 +- 使用全卷积网络:去掉了FC层,以实现更深的网络结构。 +- 激活函数:在生成器(G)中,最后一层使用Tanh函数,其余层采用 ReLu 函数 ; 判别器(D)中都采用LeakyReLu。 + +DCGAN中的生成器(G)结构如下图所示: + +

+
+图3. DCGAN中的生成器(G) +

+ + +## 数据准备 + +本次教程使用数据规模较小的 MNIST 训练生成器和判别器,该数据集可通过paddle.dataset模块自动下载到本地。 + +关于 MNIST 的详细介绍可参考[数字识别](https://github.com/PaddlePaddle/book/tree/develop/02.recognize_digits)。 + +## 训练模型 + + `09.gan/dc_gan.py` 演示了训练的整体过程。 + +### 加载包 + +首先加载 PaddlePaddle 的 Fluid 和其他相关包 + +```python +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys +import os +import matplotlib +import PIL +import six +import numpy as np +import math +import time +import paddle +import paddle.fluid as fluid + +matplotlib.use('agg') +import matplotlib.pyplot as plt +import matplotlib.gridspec as gridspec +``` +### 定义辅助工具 + +定义 plot 函数,将图像生成过程可视化 + +```python +def plot(gen_data): + pad_dim = 1 + paded = pad_dim + img_dim + gen_data = gen_data.reshape(gen_data.shape[0], img_dim, img_dim) + n = int(math.ceil(math.sqrt(gen_data.shape[0]))) + gen_data = (np.pad( + gen_data, [[0, n * n - gen_data.shape[0]], [pad_dim, 0], [pad_dim, 0]], + 'constant').reshape((n, n, paded, paded)).transpose((0, 2, 1, 3)) + .reshape((n * paded, n * paded))) + fig = plt.figure(figsize=(8, 8)) + plt.axis('off') + plt.imshow(gen_data, cmap='Greys_r', vmin=-1, vmax=1) + return fig +``` +### 定义超参数 + +```python +gf_dim = 64 # 生成器的feature map的基础通道数量,生成器中所有的feature map的通道数量都是基础通道数量的倍数 +df_dim = 64 # 判别器的feature map的基础通道数量,判别器中所有的feature map的通道数量都是基础通道数量的倍数 +gfc_dim = 1024 * 2 # 生成器的全连接层维度 +dfc_dim = 1024 # 判别器的全连接层维度 +img_dim = 28 # 输入图片的尺寸 + +NOISE_SIZE = 100 # 输入噪声的维度 +LEARNING_RATE = 2e-4 # 训练的学习率 + +epoch = 20 # 训练的epoch数 +output = "./output_dcgan" # 模型和测试结果的存储路径 +use_cudnn = False # 是否使用cuDNN +use_gpu=False # 是否使用GPU训练 +``` + +### 定义网络结构 + +- bn 层 + +调用 `fluid.layers.batch_norm` 接口实现bn层,激活函数默认使用ReLu。 +```python +def bn(x, name=None, act='relu'): + return fluid.layers.batch_norm( + x, + param_attr=name + '1', + bias_attr=name + '2', + moving_mean_name=name + '3', + moving_variance_name=name + '4', + name=name, + act=act) +``` + +- 卷积层 + +调用 `fluid.nets.simple_img_conv_pool` 实现卷积池化组,卷积核大小为5x5,池化窗口大小为2x2,窗口滑动步长为2,激活函数类型由具体网络结构指定。 + +```python +def conv(x, num_filters, name=None, act=None): + return fluid.nets.simple_img_conv_pool( + input=x, + filter_size=5, + num_filters=num_filters, + pool_size=2, + pool_stride=2, + param_attr=name + 'w', + bias_attr=name + 'b', + use_cudnn=use_cudnn, + act=act) +``` + +- 全连接层 + +```python +def fc(x, num_filters, name=None, act=None): + return fluid.layers.fc(input=x, + size=num_filters, + act=act, + param_attr=name + 'w', + bias_attr=name + 'b') +``` + +- 转置卷积层 + +在生成器中,需要用随机采样值生成全尺寸图像,dcgan使用转置卷积层进行上采样,在Fluid中,我们调用 `fluid.layers.conv2d_transpose` 实现转置卷积。 + +```python +def deconv(x, + num_filters, + name=None, + filter_size=5, + stride=2, + dilation=1, + padding=2, + output_size=None, + act=None): + return fluid.layers.conv2d_transpose( + input=x, + param_attr=name + 'w', + bias_attr=name + 'b', + num_filters=num_filters, + output_size=output_size, + filter_size=filter_size, + stride=stride, + dilation=dilation, + padding=padding, + use_cudnn=use_cudnn, + act=act) +``` + +- 判别器 + +判别器使用真实数据集和生成器生成的假图片共同进行训练,在训练过程中尽量使真实数据集的输出结果为1,生成的假图片输出结果为0。本教程中实现的判别器由两个卷积池化层和两个全连接层组成,其中最后一个全连接层的神经元个数为1,输出一个二分类结果。 + +```python +def D(x): + x = fluid.layers.reshape(x=x, shape=[-1, 1, 28, 28]) + x = conv(x, df_dim, act='leaky_relu',name='conv1') + x = bn(conv(x, df_dim * 2,name='conv2'), act='leaky_relu',name='bn1') + x = bn(fc(x, dfc_dim,name='fc1'), act='leaky_relu',name='bn2') + x = fc(x, 1, act='sigmoid',name='fc2') + return x +``` + +- 生成器 + +生成器由两组带BN的全连接层和两组转置卷积层组成,网络输入为随机的噪声数据,最后一层转置卷积的卷积核数为1,表示输出为灰度图片。 + +```python +def G(x): + x = bn(fc(x, gfc_dim,name='fc3'),name='bn3') + x = bn(fc(x, gf_dim * 2 * img_dim // 4 * img_dim // 4,name='fc4'),name='bn4') + x = fluid.layers.reshape(x, [-1, gf_dim * 2, img_dim // 4, img_dim // 4]) + x = deconv(x, gf_dim * 2, act='relu', output_size=[14, 14],name='deconv1') + x = deconv(x, num_filters=1, filter_size=5, padding=2, act='tanh', output_size=[28, 28],name='deconv2') + x = fluid.layers.reshape(x, shape=[-1, 28 * 28]) + return x +``` +### 损失函数 + +损失函数使用 `sigmoid_cross_entropy_with_logits` + +```python +def loss(x, label): + return fluid.layers.mean( + fluid.layers.sigmoid_cross_entropy_with_logits(x=x, label=label)) +``` + + +### 创建Program + +```python +d_program = fluid.Program() +dg_program = fluid.Program() + +# 定义判别真实图片的program +with fluid.program_guard(d_program): + # 输入图片大小为28*28=784 + img = fluid.data(name='img', shape=[None, 784], dtype='float32') + # 标签shape=1 + label = fluid.data(name='label', shape=[None, 1], dtype='float32') + d_logit = D(img) + d_loss = loss(d_logit, label) + +# 定义判别生成图片的program +with fluid.program_guard(dg_program): + noise = fluid.data( + name='noise', shape=[None, NOISE_SIZE], dtype='float32') + # 噪声数据作为输入得到生成图片 + g_img = G(x=noise) + + g_program = dg_program.clone() + g_program_test = dg_program.clone(for_test=True) + + # 判断生成图片为真实样本的概率 + dg_logit = D(g_img) + + # 计算生成图片被判别为真实样本的loss + noise_shape = fluid.layers.shape(noise) + dg_loss = loss( + dg_logit, + fluid.layers.fill_constant( + dtype='float32', shape=[noise_shape[0], 1], value=1.0)) + +``` +使用adam作为优化器,分别优化判别真实图片的loss和判别生成图片的loss。 + +```python +opt = fluid.optimizer.Adam(learning_rate=LEARNING_RATE) +opt.minimize(loss=d_loss) +parameters = [p.name for p in g_program.global_block().all_parameters()] +opt.minimize(loss=dg_loss, parameter_list=parameters) +``` + +### 数据集 Feeders 配置 + +下一步,我们开始训练过程。paddle.dataset.mnist.train()用做训练数据集。这个函数返回一个reader——PaddlePaddle中的reader是一个Python函数,每次调用的时候返回一个Python yield generator。 + +下面shuffle是一个reader decorator,它接受一个reader A,返回另一个reader B。reader B 每次读入buffer_size条训练数据到一个buffer里,然后随机打乱其顺序,并且逐条输出。 + +batch是一个特殊的decorator,它的输入是一个reader,输出是一个batched reader。在PaddlePaddle里,一个reader每次yield一条训练数据,而一个batched reader每次yield一个minibatch。 + +```python +batch_size = 128 # Minibatch size + +train_reader = fluid.io.batch( + fluid.io.shuffle( + paddle.dataset.mnist.train(), buf_size=60000), + batch_size=batch_size) +``` + +### 创建执行器 + +```python +if use_gpu: + exe = fluid.Executor(fluid.CUDAPlace(0)) +else: + exe = fluid.Executor(fluid.CPUPlace()) + +exe.run(fluid.default_startup_program()) +``` + +### 开始训练 + +训练过程中的每一次迭代,生成器和判别器分别设置自己的迭代次数。为了避免判别器快速收敛到0,本教程默认每迭代一次,训练一次判别器,两次生成器。 +```python +t_time = 0 +losses = [[], []] + +# 判别器的迭代次数 +NUM_TRAIN_TIMES_OF_DG = 2 + +# 最终生成图像的噪声数据 +const_n = np.random.uniform( + low=-1.0, high=1.0, + size=[batch_size, NOISE_SIZE]).astype('float32') + +for pass_id in range(epoch): + for batch_id, data in enumerate(train_reader()): + if len(data) != batch_size: + continue + + # 生成训练过程的噪声数据 + noise_data = np.random.uniform( + low=-1.0, high=1.0, + size=[batch_size, NOISE_SIZE]).astype('float32') + + # 真实图片 + real_image = np.array(list(map(lambda x: x[0], data))).reshape( + -1, 784).astype('float32') + # 真实标签 + real_labels = np.ones( + shape=[real_image.shape[0], 1], dtype='float32') + # 虚假标签 + fake_labels = np.zeros( + shape=[real_image.shape[0], 1], dtype='float32') + total_label = np.concatenate([real_labels, fake_labels]) + s_time = time.time() + + # 虚假图片 + generated_image = exe.run(g_program, + feed={'noise': noise_data}, + fetch_list=[g_img])[0] + + total_images = np.concatenate([real_image, generated_image]) + + # D 判断虚假图片为假的loss + d_loss_1 = exe.run(d_program, + feed={ + 'img': generated_image, + 'label': fake_labels, + }, + fetch_list=[d_loss])[0][0] + + # D 判断真实图片为真的loss + d_loss_2 = exe.run(d_program, + feed={ + 'img': real_image, + 'label': real_labels, + }, + fetch_list=[d_loss])[0][0] + + d_loss_n = d_loss_1 + d_loss_2 + losses[0].append(d_loss_n) + + # 训练生成器 + for _ in six.moves.xrange(NUM_TRAIN_TIMES_OF_DG): + noise_data = np.random.uniform( + low=-1.0, high=1.0, + size=[batch_size, NOISE_SIZE]).astype('float32') + dg_loss_n = exe.run(dg_program, + feed={'noise': noise_data}, + fetch_list=[dg_loss])[0][0] + losses[1].append(dg_loss_n) + t_time += (time.time() - s_time) + if batch_id % 10 == 0 : + if not os.path.exists(output): + os.makedirs(output) + # 每轮的生成结果 + generated_images = exe.run(g_program_test, + feed={'noise': const_n}, + fetch_list=[g_img])[0] + # 将真实图片和生成图片连接 + total_images = np.concatenate([real_image, generated_images]) + fig = plot(total_images) + msg = "Epoch ID={0} Batch ID={1} D-Loss={2} DG-Loss={3}\n ".format( + pass_id, batch_id, + d_loss_n, dg_loss_n) + print(msg) + plt.title(msg) + plt.savefig( + '{}/{:04d}_{:04d}.png'.format(output, pass_id, + batch_id), + bbox_inches='tight') + plt.close(fig) +``` + +打印特定轮次的生成结果: + +```python +def display_image(epoch_no,batch_id): + return PIL.Image.open('output_dcgan/{:04d}_{:04d}.png'.format(epoch_no,batch_id)) + +# 观察第10个epoch,460个batch的生成图像: +display_image(10,460) +``` + + +## 总结 + +DCGAN采用一个随机噪声向量作为输入,输入通过与CNN类似但是相反的结构,将输入放大成二维数据。采用这种结构的生成模型和CNN结构的判别模型,DCGAN在图片生成上可以达到相当可观的效果。本案例中,我们利用DCGAN生成了手写数字图片,您可以尝试更换数据集生成符合个人需求的图片,或尝试修改网络结构观察不一样的生成效果。 + + +## 参考文献 +[1] Goodfellow, Ian J.; Pouget-Abadie, Jean; Mirza, Mehdi; Xu, Bing; Warde-Farley, David; Ozair, Sherjil; Courville, Aaron; Bengio, Yoshua. Generative Adversarial Networks. 2014. arXiv:1406.2661 [stat.ML]. + +[2] Andrej Karpathy, Pieter Abbeel, Greg Brockman, Peter Chen, Vicki Cheung, Rocky Duan, Ian Goodfellow, Durk Kingma, Jonathan Ho, Rein Houthooft, Tim Salimans, John Schulman, Ilya Sutskever, And Wojciech Zaremba, Generative Models, OpenAI, [April 7, 2016] + +[3] alimans, Tim; Goodfellow, Ian; Zaremba, Wojciech; Cheung, Vicki; Radford, Alec; Chen, Xi. Improved Techniques for Training GANs. 2016. arXiv:1606.03498 [cs.LG]. + +[4] Radford A, Metz L, Chintala S. Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks[J]. Computer Science, 2015. diff --git a/doc/paddle/user_guides/cv_case/gan/README.md b/doc/paddle/user_guides/cv_case/gan/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b23f1589526b53023e52f70f3e05f4fdb872212b --- /dev/null +++ b/doc/paddle/user_guides/cv_case/gan/README.md @@ -0,0 +1,450 @@ +# Generative Adversarial Network + +The source code for this tutorial is in book/09.gan,For the first time to use , please refer to the instruction manual of the Book document. + +### Description: ### +1. Hardware environment requirements: +This article can support running under CPU and GPU +2. CUDA / cuDNN version supported by docker image: +If docker is used to run book, please note that the GPU environment of the default image provided here is CUDA 8 / cuDNN 5. For GPUs requiring CUDA 9 such as NVIDIA Tesla V100, using this image may fail. +3. Consistency of code in documents and scripts: +Please note: to make this article easier to read and use, we split and adjusted the code of dc_gan.py and put it in this article. The code in this article is consistent with the running result of dc_gan.py, which can be verified by running [train.py](https://github.com/PaddlePaddle/book/blob/develop/01.fit_a_line/train.py). + +## Background + +GAN(Generative Adversarial Network \[[1](#Reference)\],GAN for short) is a method of unsupervised learning, learn by two neural networks contest with each other in a game. This method was originally proposed by lan·Goodfellow and others in 2014. The origin paper is [Generative Adversarial Network](https://arxiv.org/abs/1406.2661)。 + +The Generative Adversarial Network consists of a generative network and a discriminative network. Using random sampling from the latent space as input, the output of the generative network needs to imitate the real samples in the training set as much as possible. The input of the discriminative network is the real sample or the output of the generative network. The purpose is to distinguish the output of the generative network from the real samples as much as possible. The two networks oppose each other and continuously adjust the parameters. The purpose is to distinguish the samples generated by the generative network from the real samples as much as possible\[[2](#References)\]. + +GAN is often used to generate fake images \[[3](#References)\] )。In addition, the method is also used to reconstruct 3D models of objects from images, model patterns of motion in video and so on. + +## Result + +In this tutorial, we use MNIST data set as input for training. After 19 rounds of training, we can see that the generated image is very close to the the real image. In the following figure, the first 8 lines are the appearance of the real image, and the last 8 lines are the image generated by the network: +

+
+figure 1. handwritten number generated by GAN +

+ + +## Model Overview + +### GAN + +GAN is a way to learn the generative model of data distribution through adversarial methods. Among them, "Adversarial" refers to the mutual confrontation between Generator and Discriminator. Here, we will take the generated picture as an example to illustrate: + +- The generative network (G) receives a random noise z, and generates an image of approximate samples as much as possible, which is recorded as G(z) +- The discriminative network (D) receives an input image x, and try to distinguish the image is a real sample or a false sample generated by the generative network. The output of the discriminative network is D(x) represents the probability that x is a real image. If D(x) = 1, it means that the discriminative network thinks the input must be a real image, if D(x) = 0, it means that the discriminative network thinks the input must be a false image. + +In the process of training, the two networks fight against each other and finally form a dynamic balance. The above process can be described by the formula as following: + +

+
+

+ +In the best case, G can generate a image G(z), which is very similar to the real image, and it is difficult for D to judge whether the generated picture is true or not, and make a random guess on the true or false of the image, that is D(G(z))=0.5。 + +The following figure shows the training process of GAN. The real image distribution, generated image distribution and discriminative model are black line, green line and blue line respectively in the figure. At the beginning of training, the discriminative model can not distinguish the real images from the generated images. Then, when we fixed the generative model and optimize the discriminative model, the results are shown in the second figure. It can be seen that the discriminative model can distinguish the generated data from the real data. The third step is to fixed the discriminative model, optimize the generative model, and try to make the discriminative model unable to distinguish the generated images from the real images. In this process, it can be seen that the distribution of the images generated by the model is closer to the distribution of the real images. Such iterations continue until the final convergence, and the generated distribution and the real distribution coincide and the discriminative model cannot distinguish the real images from the generated images. + + +

+
+figure 2. GAN training process +

+ +But in the actual process, it is difficult to get this perfect equilibrium point, and the convergence theory of GAN is still in continuous research. + + +### DCGAN + +[DCGAN](https://arxiv.org/abs/1511.06434) \[[4](#Reference)\] is the combination of deep convolution network and GAN, and its basic principle is the same as GAN, but the generative network and discriminative network are replaced by convolution networks (CNN). In order to improve the quality of the generated images and the convergence speed of the network, the DCGAN has made some improvements in the network structure: +- Cancel pooling layer: in the network, all the pooling layers are replaced by the strided convolutions (discriminator) and the fractional-strided convolutions (generator). +- Add batch normalization:add batchnorm in both the generator and the discriminator. +- Use full convolution network: remove FC layer to realize deeper network structure. +- Activation function: in generator(G), Tanh function is used in the last layer, and ReLu function is used in other layers; in discriminator(D), LeakyReLu is used as activation function. + +The structure of generator (G) in DCGAN is as following:: + +

+
+figure 3. Generator(G) in DCGAN +

+ + +## Dataset prepare + +In this tutorial, we use MNIST to train generator and discriminator, and the dataset can be downloaded to the local automatically through the paddle.dataset module. +For detailed introduction of MNIST, please refer to[recognize_digits](https://github.com/PaddlePaddle/book/tree/develop/02.recognize_digits)。 + +## Model Training + + `09.gan/dc_gan.py` shows the whole process of training. + +### Import dependency + +First import necessary dependency. + +```python +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys +import os +import matplotlib +import PIL +import six +import numpy as np +import math +import time +import paddle +import paddle.fluid as fluid + +matplotlib.use('agg') +import matplotlib.pyplot as plt +import matplotlib.gridspec as gridspec +``` +### Defining auxiliary tool + +Define plot function to visualize the process of image generated. + +```python +def plot(gen_data): + pad_dim = 1 + paded = pad_dim + img_dim + gen_data = gen_data.reshape(gen_data.shape[0], img_dim, img_dim) + n = int(math.ceil(math.sqrt(gen_data.shape[0]))) + gen_data = (np.pad( + gen_data, [[0, n * n - gen_data.shape[0]], [pad_dim, 0], [pad_dim, 0]], + 'constant').reshape((n, n, paded, paded)).transpose((0, 2, 1, 3)) + .reshape((n * paded, n * paded))) + fig = plt.figure(figsize=(8, 8)) + plt.axis('off') + plt.imshow(gen_data, cmap='Greys_r', vmin=-1, vmax=1) + return fig +``` + +### Define hyper-parameter + +```python +gf_dim = 64 # the number of basic channels of the generator's feature map. The number of all the channels of feature maps in the generator is a multiple of the number of basic channels +df_dim = 64 # the number of basic channels of the discriminator's feature map. The number of all the channels of feature maps in the discriminator is a multiple of the number of basic channels +gfc_dim = 1024 * 2 # the dimension of full connection layer of generator +dfc_dim = 1024 # the dimension of full connection layer of discriminator +img_dim = 28 # size of the input picture + +NOISE_SIZE = 100 # dimension of input noise +LEARNING_RATE = 2e-4 # learning rate of training + +epoch = 20 # epoch number of training +output = "./output_dcgan" # storage path of model and test results +use_cudnn = False # use cuDNN or not +use_gpu=False # use GPU or not +``` + +### Define network architecture + +- Batch Normalization layer + +Call `fluid.layers.batch_norm` to implement the bn layer. The activation function uses ReLu by default. +```python +def bn(x, name=None, act='relu'): + return fluid.layers.batch_norm( + x, + param_attr=name + '1', + bias_attr=name + '2', + moving_mean_name=name + '3', + moving_variance_name=name + '4', + name=name, + act=act) +``` + +- Convolution layer + +Call `fluid.nets.simple_img_conv_pool` to get the result of convolution and pooling. The kernel size of convolution is 5x5, the pooling window size is 2x2, the window sliding step size is 2, and the activation function type is specified by the specific network structure. + +```python +def conv(x, num_filters, name=None, act=None): + return fluid.nets.simple_img_conv_pool( + input=x, + filter_size=5, + num_filters=num_filters, + pool_size=2, + pool_stride=2, + param_attr=name + 'w', + bias_attr=name + 'b', + use_cudnn=use_cudnn, + act=act) +``` + +- Fully Connected layer + +```python +def fc(x, num_filters, name=None, act=None): + return fluid.layers.fc(input=x, + size=num_filters, + act=act, + param_attr=name + 'w', + bias_attr=name + 'b') +``` + +- Transpose Convolution Layer + +In the generator, we need to generate a full-scale image by random sampling values. DCGAN uses the transpose convolution layer for upsampling. In fluid, we call `fluid.layers.conv2d_transpose` to realize transpose convolution. + +```python +def deconv(x, + num_filters, + name=None, + filter_size=5, + stride=2, + dilation=1, + padding=2, + output_size=None, + act=None): + return fluid.layers.conv2d_transpose( + input=x, + param_attr=name + 'w', + bias_attr=name + 'b', + num_filters=num_filters, + output_size=output_size, + filter_size=filter_size, + stride=stride, + dilation=dilation, + padding=padding, + use_cudnn=use_cudnn, + act=act) +``` + +- Discriminator + +The discriminator uses the real dataset and the fake images generated by the generator to train, and in the training process, try to make the output result of the real data close to 1 and the output result of the fake image close to 0 as far as possible. The discriminator implemented in this tutorial is composed of two convolution_pooling layers and two fully connected layers. The number of neurons in the last fully connected layer is 1, and a binary classification result is output. + +```python +def D(x): + x = fluid.layers.reshape(x=x, shape=[-1, 1, 28, 28]) + x = conv(x, df_dim, act='leaky_relu',name='conv1') + x = bn(conv(x, df_dim * 2,name='conv2'), act='leaky_relu',name='bn1') + x = bn(fc(x, dfc_dim,name='fc1'), act='leaky_relu',name='bn2') + x = fc(x, 1, act='sigmoid',name='fc2') + return x +``` + +- Generator + +The generator consists of two groups of fully connected layers with BN and two groups of transpose convolution layers. The network input is random noise data. The convolution kernel number of the last layer of transposed convolution is 1, indicating that the output is a gray-scale picture. + +```python +def G(x): + x = bn(fc(x, gfc_dim,name='fc3'),name='bn3') + x = bn(fc(x, gf_dim * 2 * img_dim // 4 * img_dim // 4,name='fc4'),name='bn4') + x = fluid.layers.reshape(x, [-1, gf_dim * 2, img_dim // 4, img_dim // 4]) + x = deconv(x, gf_dim * 2, act='relu', output_size=[14, 14],name='deconv1') + x = deconv(x, num_filters=1, filter_size=5, padding=2, act='tanh', output_size=[28, 28],name='deconv2') + x = fluid.layers.reshape(x, shape=[-1, 28 * 28]) + return x +``` +### Loss function + +Loss function uses `sigmoid_cross_entropy_with_logits` + +```python +def loss(x, label): + return fluid.layers.mean( + fluid.layers.sigmoid_cross_entropy_with_logits(x=x, label=label)) +``` + + +### Create Program + +```python +d_program = fluid.Program() +dg_program = fluid.Program() + +# Define the program to distinguish the real picture +with fluid.program_guard(d_program): + # size of the input picture is28*28=784 + img = fluid.data(name='img', shape=[None, 784], dtype='float32') + # label shape=1 + label = fluid.data(name='label', shape=[None, 1], dtype='float32') + d_logit = D(img) + d_loss = loss(d_logit, label) + +# Define the program to distinguish the generated pictures +with fluid.program_guard(dg_program): + noise = fluid.data( + name='noise', shape=[None, NOISE_SIZE], dtype='float32') + # Noise data as input to generate image + g_img = G(x=noise) + + g_program = dg_program.clone() + g_program_test = dg_program.clone(for_test=True) + + # Judge the probability that the generated image is a real sample + dg_logit = D(g_img) + + # Calculate the loss of the generated image as the real sample + noise_shape = fluid.layers.shape(noise) + dg_loss = loss( + dg_logit, + fluid.layers.fill_constant( + dtype='float32', shape=[noise_shape[0], 1], value=1.0)) + +``` +Adam is used as the optimizer to distinguish the loss of the real picture and the loss of the generated picture. + +```python +opt = fluid.optimizer.Adam(learning_rate=LEARNING_RATE) +opt.minimize(loss=d_loss) +parameters = [p.name for p in g_program.global_block().all_parameters()] +opt.minimize(loss=dg_loss, parameter_list=parameters) +``` + +### Dataset Feeders configuration + +Next, we start the training process. paddle.dataset.mnist.train() is used as training dataset. This function returns a reader. The reader in is a python function, which returns one Python yield generator every time it is called. + +The shuffle below is a reader decorator. It accepts a reader A and returns another reader B. Reader B reads the buffer_size training data into a buffer every time, then randomly scrambles its order and outputs it one by one. + +Batch is a special decorator. Its input is a reader and its output is a batched reader. In PaddlePaddle, a reader yields one piece of training data at a time, while a batched reader yields one minibatch at a time. + +```python +batch_size = 128 # Minibatch size + +train_reader = fluid.io.batch( + fluid.io.shuffle( + paddle.dataset.mnist.train(), buf_size=60000), + batch_size=batch_size) +``` + +### Create actuator + +```python +if use_gpu: + exe = fluid.Executor(fluid.CUDAPlace(0)) +else: + exe = fluid.Executor(fluid.CPUPlace()) + +exe.run(fluid.default_startup_program()) +``` + +### Start training + +For each iteration in the training process, the generator and the discriminator set their own iteration times respectively. In order to avoid the discriminator converging to 0 rapidly. In this tutorial, by default, every iteration, the discriminator are trained once and generator twice. + +```python +t_time = 0 +losses = [[], []] + +# The number of iterations of the discriminator +NUM_TRAIN_TIMES_OF_DG = 2 + +# Noise data of final generated image +const_n = np.random.uniform( + low=-1.0, high=1.0, + size=[batch_size, NOISE_SIZE]).astype('float32') + +for pass_id in range(epoch): + for batch_id, data in enumerate(train_reader()): + if len(data) != batch_size: + continue + + # Generating noise data during training + noise_data = np.random.uniform( + low=-1.0, high=1.0, + size=[batch_size, NOISE_SIZE]).astype('float32') + + # Real image + real_image = np.array(list(map(lambda x: x[0], data))).reshape( + -1, 784).astype('float32') + # Real label + real_labels = np.ones( + shape=[real_image.shape[0], 1], dtype='float32') + # Fake label + fake_labels = np.zeros( + shape=[real_image.shape[0], 1], dtype='float32') + total_label = np.concatenate([real_labels, fake_labels]) + s_time = time.time() + + # Fake image + generated_image = exe.run(g_program, + feed={'noise': noise_data}, + fetch_list=[g_img])[0] + + total_images = np.concatenate([real_image, generated_image]) + + # D loss of judging fake pictures as fake + d_loss_1 = exe.run(d_program, + feed={ + 'img': generated_image, + 'label': fake_labels, + }, + fetch_list=[d_loss])[0][0] + + # D loss of judging true pictures as true + d_loss_2 = exe.run(d_program, + feed={ + 'img': real_image, + 'label': real_labels, + }, + fetch_list=[d_loss])[0][0] + + d_loss_n = d_loss_1 + d_loss_2 + losses[0].append(d_loss_n) + + # Training generator + for _ in six.moves.xrange(NUM_TRAIN_TIMES_OF_DG): + noise_data = np.random.uniform( + low=-1.0, high=1.0, + size=[batch_size, NOISE_SIZE]).astype('float32') + dg_loss_n = exe.run(dg_program, + feed={'noise': noise_data}, + fetch_list=[dg_loss])[0][0] + losses[1].append(dg_loss_n) + t_time += (time.time() - s_time) + if batch_id % 10 == 0 : + if not os.path.exists(output): + os.makedirs(output) + # Results of each round + generated_images = exe.run(g_program_test, + feed={'noise': const_n}, + fetch_list=[g_img])[0] + # Connect real pictures to generated pictures + total_images = np.concatenate([real_image, generated_images]) + fig = plot(total_images) + msg = "Epoch ID={0} Batch ID={1} D-Loss={2} DG-Loss={3}\n ".format( + pass_id, batch_id, + d_loss_n, dg_loss_n) + print(msg) + plt.title(msg) + plt.savefig( + '{}/{:04d}_{:04d}.png'.format(output, pass_id, + batch_id), + bbox_inches='tight') + plt.close(fig) +``` + +Print the results of a specific round: + +```python +def display_image(epoch_no,batch_id): + return PIL.Image.open('output_dcgan/{:04d}_{:04d}.png'.format(epoch_no,batch_id)) + +# Observe the generated images of the 10th epoch and 460 batches: +display_image(10,460) +``` + + +## Summary + +DCGAN use a random noise vector as the input, the input is amplified into two-dimensional data through a similar but opposite structure to CNN. By using the generative model of this structure and the discriminative model of CNN structure, DCGAN can achieve considerable results in image generative. In this case, we use DCGAN to generate handwritten digital images. You can try to change dataset to generate images that meet your personal needs, or try to modify the network structure to observe different generative effects. + + +## Reference +[1] Goodfellow, Ian J.; Pouget-Abadie, Jean; Mirza, Mehdi; Xu, Bing; Warde-Farley, David; Ozair, Sherjil; Courville, Aaron; Bengio, Yoshua. Generative Adversarial Networks. 2014. arXiv:1406.2661 [stat.ML]. + +[2] Andrej Karpathy, Pieter Abbeel, Greg Brockman, Peter Chen, Vicki Cheung, Rocky Duan, Ian Goodfellow, Durk Kingma, Jonathan Ho, Rein Houthooft, Tim Salimans, John Schulman, Ilya Sutskever, And Wojciech Zaremba, Generative Models, OpenAI, [April 7, 2016] + +[3] alimans, Tim; Goodfellow, Ian; Zaremba, Wojciech; Cheung, Vicki; Radford, Alec; Chen, Xi. Improved Techniques for Training GANs. 2016. arXiv:1606.03498 [cs.LG]. + +[4] Radford A, Metz L, Chintala S. Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks[J]. Computer Science, 2015. diff --git a/doc/paddle/user_guides/cv_case/gan/_ce.py b/doc/paddle/user_guides/cv_case/gan/_ce.py new file mode 100644 index 0000000000000000000000000000000000000000..0d69912a995efe915cbfbbd6d38367b86ac8434a --- /dev/null +++ b/doc/paddle/user_guides/cv_case/gan/_ce.py @@ -0,0 +1,50 @@ +### This file is only used for continuous evaluation test! +from __future__ import print_function +from __future__ import division +from __future__ import absolute_import +import os +import sys +sys.path.append(os.environ['ceroot']) +from kpi import CostKpi + +dcgan_d_train_cost_kpi = CostKpi( + 'dcgan_d_train_cost', + 0.02, + 0, + actived=True, + desc='train cost of discriminator') +dcgan_g_train_cost_kpi = CostKpi( + 'dcgan_g_train_cost', + 0.02, + 0, + actived=True, + desc='train cost of generator') + +tracking_kpis = [dcgan_d_train_cost_kpi, dcgan_g_train_cost_kpi] + + +def parse_log(log): + for line in log.split('\n'): + fs = line.strip().split('\t') + print(fs) + if len(fs) == 3 and fs[0] == 'kpis': + kpi_name = fs[1] + kpi_value = float(fs[2]) + yield kpi_name, kpi_value + + +def log_to_ce(log): + kpi_tracker = {} + for kpi in tracking_kpis: + kpi_tracker[kpi.name] = kpi + print(kpi.name) + print(kpi) + for (kpi_name, kpi_value) in parse_log(log): + print(kpi_name, kpi_value) + kpi_tracker[kpi_name].add_record(kpi_value) + kpi_tracker[kpi_name].persist() + + +if __name__ == '__main__': + log = sys.stdin.read() + log_to_ce(log) diff --git a/doc/paddle/user_guides/cv_case/gan/dc_gan.py b/doc/paddle/user_guides/cv_case/gan/dc_gan.py new file mode 100644 index 0000000000000000000000000000000000000000..650897a2803cacc4fb31f339f8fb7958a52359ff --- /dev/null +++ b/doc/paddle/user_guides/cv_case/gan/dc_gan.py @@ -0,0 +1,187 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import sys +import os +import argparse +import functools +import matplotlib +import six +import numpy as np +import paddle +import time +import paddle.fluid as fluid +from utility import get_parent_function_name, plot, check, add_arguments, print_arguments +from network import G, D +matplotlib.use('agg') +import matplotlib.pyplot as plt +import matplotlib.gridspec as gridspec + +NOISE_SIZE = 100 +LEARNING_RATE = 2e-4 + +parser = argparse.ArgumentParser(description=__doc__) +add_arg = functools.partial(add_arguments, argparser=parser) +# yapf: disable +add_arg('batch_size', int, 128, "Minibatch size.") +add_arg('epoch', int, 20, "The number of epoched to be trained.") +add_arg('output', str, "./output_dcgan", "The directory the model and the test result to be saved to.") +add_arg('use_gpu', bool, True, "Whether to use GPU to train.") +add_arg('enable_ce', bool, False, "If set True, enable continuous evaluation job.") +# yapf: enable + + +def loss(x, label): + return fluid.layers.mean( + fluid.layers.sigmoid_cross_entropy_with_logits(x=x, label=label)) + + +def train(args): + + if args.enable_ce: + np.random.seed(10) + fluid.default_startup_program().random_seed = 90 + + d_program = fluid.Program() + dg_program = fluid.Program() + + with fluid.program_guard(d_program): + img = fluid.data(name='img', shape=[None, 784], dtype='float32') + label = fluid.data(name='label', shape=[None, 1], dtype='float32') + d_logit = D(img) + d_loss = loss(d_logit, label) + + with fluid.program_guard(dg_program): + noise = fluid.data( + name='noise', shape=[None, NOISE_SIZE], dtype='float32') + g_img = G(x=noise) + + g_program = dg_program.clone() + g_program_test = dg_program.clone(for_test=True) + + dg_logit = D(g_img) + noise_shape = fluid.layers.shape(noise) + dg_loss = loss(dg_logit, + fluid.layers.fill_constant( + dtype='float32', + shape=[noise_shape[0], 1], + value=1.0)) + + opt = fluid.optimizer.Adam(learning_rate=LEARNING_RATE) + + opt.minimize(loss=d_loss) + parameters = [p.name for p in g_program.global_block().all_parameters()] + + opt.minimize(loss=dg_loss, parameter_list=parameters) + + exe = fluid.Executor(fluid.CPUPlace()) + if args.use_gpu: + exe = fluid.Executor(fluid.CUDAPlace(0)) + exe.run(fluid.default_startup_program()) + if args.enable_ce: + train_reader = fluid.io.batch( + paddle.dataset.mnist.train(), batch_size=args.batch_size) + else: + + train_reader = fluid.io.batch( + fluid.io.shuffle(paddle.dataset.mnist.train(), buf_size=60000), + batch_size=args.batch_size) + + NUM_TRAIN_TIMES_OF_DG = 2 + const_n = np.random.uniform( + low=-1.0, high=1.0, + size=[args.batch_size, NOISE_SIZE]).astype('float32') + + t_time = 0 + losses = [[], []] + for pass_id in range(args.epoch): + for batch_id, data in enumerate(train_reader()): + if len(data) != args.batch_size: + continue + noise_data = np.random.uniform( + low=-1.0, high=1.0, + size=[args.batch_size, NOISE_SIZE]).astype('float32') + real_image = np.array(list(map(lambda x: x[0], data))).reshape( + -1, 784).astype('float32') + real_labels = np.ones( + shape=[real_image.shape[0], 1], dtype='float32') + fake_labels = np.zeros( + shape=[real_image.shape[0], 1], dtype='float32') + total_label = np.concatenate([real_labels, fake_labels]) + s_time = time.time() + generated_image = exe.run( + g_program, feed={'noise': noise_data}, fetch_list=[g_img])[0] + + total_images = np.concatenate([real_image, generated_image]) + + d_loss_1 = exe.run( + d_program, + feed={ + 'img': generated_image, + 'label': fake_labels, + }, + fetch_list=[d_loss])[0][0] + + d_loss_2 = exe.run( + d_program, + feed={ + 'img': real_image, + 'label': real_labels, + }, + fetch_list=[d_loss])[0][0] + + d_loss_n = d_loss_1 + d_loss_2 + losses[0].append(d_loss_n) + for _ in six.moves.xrange(NUM_TRAIN_TIMES_OF_DG): + noise_data = np.random.uniform( + low=-1.0, high=1.0, + size=[args.batch_size, NOISE_SIZE]).astype('float32') + dg_loss_n = exe.run( + dg_program, + feed={'noise': noise_data}, + fetch_list=[dg_loss])[0][0] + losses[1].append(dg_loss_n) + t_time += (time.time() - s_time) + if batch_id % 10 == 0: + if not os.path.exists(args.output): + os.makedirs(args.output) + # generate image each batch + generated_images = exe.run( + g_program_test, + feed={'noise': const_n}, + fetch_list=[g_img])[0] + total_images = np.concatenate([real_image, generated_images]) + fig = plot(total_images) + msg = "Epoch ID={0} Batch ID={1} D-Loss={2} DG-Loss={3}\n gen={4}".format( + pass_id, batch_id, d_loss_n, dg_loss_n, + check(generated_images)) + print(msg) + plt.title(msg) + plt.savefig( + '{}/{:04d}_{:04d}.png'.format(args.output, pass_id, + batch_id), + bbox_inches='tight') + plt.close(fig) + if args.enable_ce and pass_id == args.epoch - 1: + print("kpis\tdcgan_d_train_cost\t%f" % np.mean(losses[0])) + print("kpis\tdcgan_g_train_cost\t%f" % np.mean(losses[1])) + + +if __name__ == "__main__": + args = parser.parse_args() + print_arguments(args) + train(args) diff --git a/doc/paddle/user_guides/cv_case/gan/image/01.gif b/doc/paddle/user_guides/cv_case/gan/image/01.gif new file mode 100644 index 0000000000000000000000000000000000000000..9f952ae8d77ff2a5c07d4fcef90743393d9dcb59 Binary files /dev/null and b/doc/paddle/user_guides/cv_case/gan/image/01.gif differ diff --git a/doc/paddle/user_guides/cv_case/gan/image/dcgan_demo.png b/doc/paddle/user_guides/cv_case/gan/image/dcgan_demo.png new file mode 100644 index 0000000000000000000000000000000000000000..e83bf1455b08d01d0def4e2956f7cca8a7e7cd1f Binary files /dev/null and b/doc/paddle/user_guides/cv_case/gan/image/dcgan_demo.png differ diff --git a/doc/paddle/user_guides/cv_case/gan/image/dcgan_g.png b/doc/paddle/user_guides/cv_case/gan/image/dcgan_g.png new file mode 100644 index 0000000000000000000000000000000000000000..bbe1c8fb7000ed425a92e1c6ce077763dd97fdf5 Binary files /dev/null and b/doc/paddle/user_guides/cv_case/gan/image/dcgan_g.png differ diff --git a/doc/paddle/user_guides/cv_case/gan/image/process.png b/doc/paddle/user_guides/cv_case/gan/image/process.png new file mode 100644 index 0000000000000000000000000000000000000000..c0bd3924ba72507f9aac3d5ef524072afc937c29 Binary files /dev/null and b/doc/paddle/user_guides/cv_case/gan/image/process.png differ diff --git a/doc/paddle/user_guides/cv_case/gan/index.cn.html b/doc/paddle/user_guides/cv_case/gan/index.cn.html new file mode 100644 index 0000000000000000000000000000000000000000..625073db5f907b674747138fef927dfa073c93a2 --- /dev/null +++ b/doc/paddle/user_guides/cv_case/gan/index.cn.html @@ -0,0 +1,514 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/user_guides/cv_case/gan/index.html b/doc/paddle/user_guides/cv_case/gan/index.html new file mode 100644 index 0000000000000000000000000000000000000000..f04971c70cf9e3a9a240877d13540b64484e5a74 --- /dev/null +++ b/doc/paddle/user_guides/cv_case/gan/index.html @@ -0,0 +1,514 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/user_guides/cv_case/gan/network.py b/doc/paddle/user_guides/cv_case/gan/network.py new file mode 100644 index 0000000000000000000000000000000000000000..6f0e85f4a81ff6938ec20f2cc4b8c63dffdc235a --- /dev/null +++ b/doc/paddle/user_guides/cv_case/gan/network.py @@ -0,0 +1,155 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import paddle +import paddle.fluid as fluid +from utility import get_parent_function_name +import os + +gf_dim = 64 +df_dim = 64 +gfc_dim = 1024 * 2 +dfc_dim = 1024 +img_dim = 28 + +c_dim = 3 +y_dim = 1 +output_height = 28 +output_width = 28 + +use_cudnn = True +if 'ce_mode' in os.environ: + use_cudnn = False + + +def bn(x, name=None, act='relu'): + if name is None: + name = get_parent_function_name() + #return fluid.layers.leaky_relu(x) + return fluid.layers.batch_norm( + x, + param_attr=name + '1', + bias_attr=name + '2', + moving_mean_name=name + '3', + moving_variance_name=name + '4', + name=name, + act=act) + + +def conv(x, num_filters, name=None, act=None): + if name is None: + name = get_parent_function_name() + return fluid.nets.simple_img_conv_pool( + input=x, + filter_size=5, + num_filters=num_filters, + pool_size=2, + pool_stride=2, + param_attr=name + 'w', + bias_attr=name + 'b', + use_cudnn=use_cudnn, + act=act) + + +def fc(x, num_filters, name=None, act=None): + if name is None: + name = get_parent_function_name() + return fluid.layers.fc( + input=x, + size=num_filters, + act=act, + param_attr=name + 'w', + bias_attr=name + 'b') + + +def deconv(x, + num_filters, + name=None, + filter_size=5, + stride=2, + dilation=1, + padding=2, + output_size=None, + act=None): + if name is None: + name = get_parent_function_name() + return fluid.layers.conv2d_transpose( + input=x, + param_attr=name + 'w', + bias_attr=name + 'b', + num_filters=num_filters, + output_size=output_size, + filter_size=filter_size, + stride=stride, + dilation=dilation, + padding=padding, + use_cudnn=use_cudnn, + act=act) + + +def conv_cond_concat(x, y): + """Concatenate conditioning vector on feature map axis.""" + x_shape = fluid.layers.shape(x) + ones = fluid.layers.fill_constant( + [x_shape[0], y.shape[1], x.shape[2], x.shape[3]], "float32", 1.0) + return fluid.layers.concat([x, ones * y], 1) + + +def D_cond(image, y): + image = fluid.layers.reshape(x=image, shape=[-1, 1, 28, 28]) + yb = fluid.layers.reshape(y, [-1, y_dim, 1, 1]) + x = conv_cond_concat(image, yb) + + h0 = conv(x, c_dim + y_dim, act="leaky_relu") + h0 = conv_cond_concat(h0, yb) + h1 = bn(conv(h0, df_dim + y_dim), act="leaky_relu") + h1 = fluid.layers.flatten(h1, axis=1) + + h1 = fluid.layers.concat([h1, y], 1) + + h2 = bn(fc(h1, dfc_dim), act='leaky_relu') + h2 = fluid.layers.concat([h2, y], 1) + + h3 = fc(h2, 1, act='sigmoid') + return h3 + + +def G_cond(z, y): + s_h, s_w = output_height, output_width + s_h2, s_h4 = int(s_h // 2), int(s_h // 4) + s_w2, s_w4 = int(s_w // 2), int(s_w // 4) + + yb = fluid.layers.reshape(y, [-1, y_dim, 1, 1]) #NCHW + + z = fluid.layers.concat([z, y], 1) + h0 = bn(fc(z, gfc_dim // 2), act='relu') + h0 = fluid.layers.concat([h0, y], 1) + + h1 = bn(fc(h0, gf_dim * 2 * s_h4 * s_w4), act='relu') + h1 = fluid.layers.reshape(h1, [-1, gf_dim * 2, s_h4, s_w4]) + + h1 = conv_cond_concat(h1, yb) + h2 = bn(deconv(h1, gf_dim * 2, output_size=[s_h2, s_w2]), act='relu') + h2 = conv_cond_concat(h2, yb) + h3 = deconv(h2, 1, output_size=[s_h, s_w], act='tanh') + return fluid.layers.reshape(h3, shape=[-1, s_h * s_w]) + + +def D(x): + x = fluid.layers.reshape(x=x, shape=[-1, 1, 28, 28]) + x = conv(x, df_dim, act='leaky_relu') + x = bn(conv(x, df_dim * 2), act='leaky_relu') + x = bn(fc(x, dfc_dim), act='leaky_relu') + x = fc(x, 1, act='sigmoid') + return x + + +def G(x): + x = bn(fc(x, gfc_dim)) + x = bn(fc(x, gf_dim * 2 * img_dim // 4 * img_dim // 4)) + x = fluid.layers.reshape(x, [-1, gf_dim * 2, img_dim // 4, img_dim // 4]) + x = deconv(x, gf_dim * 2, act='relu', output_size=[14, 14]) + x = deconv( + x, 1, filter_size=5, padding=2, act='tanh', output_size=[28, 28]) + x = fluid.layers.reshape(x, shape=[-1, 28 * 28]) + return x diff --git a/doc/paddle/user_guides/cv_case/gan/utility.py b/doc/paddle/user_guides/cv_case/gan/utility.py new file mode 100644 index 0000000000000000000000000000000000000000..3c64abc246f1ff44d0cb3953aae5dc84d2f46ec9 --- /dev/null +++ b/doc/paddle/user_guides/cv_case/gan/utility.py @@ -0,0 +1,83 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import math +import distutils.util +import numpy as np +import inspect +import matplotlib +import six +matplotlib.use('agg') +import matplotlib.pyplot as plt +import matplotlib.gridspec as gridspec + +img_dim = 28 + + +def get_parent_function_name(): + return inspect.stack()[2][3] + '.' + inspect.stack()[1][3] + '.' + str( + inspect.stack()[2][2]) + '.' + + +def plot(gen_data): + pad_dim = 1 + paded = pad_dim + img_dim + gen_data = gen_data.reshape(gen_data.shape[0], img_dim, img_dim) + n = int(math.ceil(math.sqrt(gen_data.shape[0]))) + gen_data = (np.pad( + gen_data, [[0, n * n - gen_data.shape[0]], [pad_dim, 0], [pad_dim, 0]], + 'constant').reshape((n, n, paded, paded)).transpose((0, 2, 1, 3)) + .reshape((n * paded, n * paded))) + fig = plt.figure(figsize=(8, 8)) + plt.axis('off') + plt.imshow(gen_data, cmap='Greys_r', vmin=-1, vmax=1) + return fig + + +def check(a): + a = np.sort(np.array(a).flatten()) + return [ + np.average(a), np.min(a), np.max(a), a[int(len(a) * 0.25)], + a[int(len(a) * 0.75)] + ] + + +def print_arguments(args): + """Print argparse's arguments. + + Usage: + + .. code-block:: python + + parser = argparse.ArgumentParser() + parser.add_argument("name", default="Jonh", type=str, help="User name.") + args = parser.parse_args() + print_arguments(args) + + :param args: Input argparse.Namespace for printing. + :type args: argparse.Namespace + """ + print("----------- Configuration Arguments -----------") + for arg, value in sorted(six.iteritems(vars(args))): + print("%s: %s" % (arg, value)) + print("------------------------------------------------") + + +def add_arguments(argname, type, default, help, argparser, **kwargs): + """Add argparse's argument. + + Usage: + + .. code-block:: python + + parser = argparse.ArgumentParser() + add_argument("name", str, "Jonh", "User name.", parser) + args = parser.parse_args() + """ + type = distutils.util.strtobool if type == bool else type + argparser.add_argument( + "--" + argname, + default=default, + type=type, + help=help + ' Default: %(default)s.', + **kwargs) diff --git a/doc/paddle/user_guides/cv_case/image_classification/.gitignore b/doc/paddle/user_guides/cv_case/image_classification/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..dc7c62b06287ad333dd41082e566b0553d3a5341 --- /dev/null +++ b/doc/paddle/user_guides/cv_case/image_classification/.gitignore @@ -0,0 +1,8 @@ +*.pyc +train.log +output +data/cifar-10-batches-py/ +data/cifar-10-python.tar.gz +data/*.txt +data/*.list +data/mean.meta diff --git a/doc/paddle/user_guides/cv_case/image_classification/.run_ce.sh b/doc/paddle/user_guides/cv_case/image_classification/.run_ce.sh new file mode 100644 index 0000000000000000000000000000000000000000..9b7903483d661d5a7c04e4e34c210f7f5edf997a --- /dev/null +++ b/doc/paddle/user_guides/cv_case/image_classification/.run_ce.sh @@ -0,0 +1,6 @@ +#!/bin/bash +#This file is only used for continuous evaluation. +export FLAGS_cudnn_deterministic=true +export CUDA_VISIBLE_DEVICES=0 +python train.py --num_epochs 1 --use_gpu 1 --enable_ce | python _ce.py + diff --git a/doc/paddle/user_guides/cv_case/image_classification/README.cn.md b/doc/paddle/user_guides/cv_case/image_classification/README.cn.md new file mode 100644 index 0000000000000000000000000000000000000000..cdc22a93b9ce9659744ef0a7be8a461606cb7ec2 --- /dev/null +++ b/doc/paddle/user_guides/cv_case/image_classification/README.cn.md @@ -0,0 +1,612 @@ + +# 图像分类 + +本教程源代码目录在[book/image_classification](https://github.com/PaddlePaddle/book/tree/develop/03.image_classification),初次使用请您参考[Book文档使用说明](https://github.com/PaddlePaddle/book/blob/develop/README.cn.md#运行这本书)。 + +### 说明: ### + +1.硬件环境要求: +本文可支持在CPU、GPU下运行 + +2.Docker镜像支持的CUDA/cuDNN版本: +如果使用了Docker运行Book,请注意:这里所提供的默认镜像的GPU环境为 CUDA 8/cuDNN 5,对于NVIDIA Tesla V100等要求CUDA 9的 GPU,使用该镜像可能会运行失败。 + +3.文档和脚本中代码的一致性问题: +请注意:为使本文更加易读易用,我们拆分、调整了train.py的代码并放入本文。本文中代码与train.py的运行结果一致,可直接运行[train.py](https://github.com/PaddlePaddle/book/blob/develop/03.image_classification/train.py)进行验证。 + +4. PaddlePaddle版本:PaddlePaddle 1.6及以上版本或适当的develop版本。 + +## 背景介绍 + +图像相比文字能够提供更加生动、容易理解及更具艺术感的信息,是人们转递与交换信息的重要来源。在本教程中,我们专注于图像识别领域的一个重要问题,即图像分类。 + +图像分类是根据图像的语义信息将不同类别图像区分开来,是计算机视觉中重要的基本问题,也是图像检测、图像分割、物体跟踪、行为分析等其他高层视觉任务的基础。图像分类在很多领域有广泛应用,包括安防领域的人脸识别和智能视频分析等,交通领域的交通场景识别,互联网领域基于内容的图像检索和相册自动归类,医学领域的图像识别等。 + + +一般来说,图像分类通过手工提取特征或特征学习方法对整个图像进行全部描述,然后使用分类器判别物体类别,因此如何提取图像的特征至关重要。在深度学习算法之前使用较多的是基于词袋(Bag of Words)模型的物体分类方法。词袋方法从自然语言处理中引入,即一句话可以用一个装了词的袋子表示其特征,袋子中的词为句子中的单词、短语或字。对于图像而言,词袋方法需要构建字典。最简单的词袋模型框架可以设计为**底层特征抽取**、**特征编码**、**分类器设计**三个过程。 + +而基于深度学习的图像分类方法,可以通过有监督或无监督的方式**学习**层次化的特征描述,从而取代了手工设计或选择图像特征的工作。深度学习模型中的卷积神经网络(Convolution Neural Network, CNN)近年来在图像领域取得了惊人的成绩,CNN直接利用图像像素信息作为输入,最大程度上保留了输入图像的所有信息,通过卷积操作进行特征的提取和高层抽象,模型输出直接是图像识别的结果。这种基于"输入-输出"直接端到端的学习方法取得了非常好的效果,得到了广泛的应用。 + +本教程主要介绍图像分类的深度学习模型,以及如何使用PaddlePaddle训练CNN模型。 + +## 效果展示 + +图像分类包括通用图像分类、细粒度图像分类等。图1展示了通用图像分类效果,即模型可以正确识别图像上的主要物体。 + +

+
+图1. 通用图像分类展示 +

+ + +图2展示了细粒度图像分类-花卉识别的效果,要求模型可以正确识别花的类别。 + + +

+
+图2. 细粒度图像分类展示 +

+ + +一个好的模型既要对不同类别识别正确,同时也应该能够对不同视角、光照、背景、变形或部分遮挡的图像正确识别(这里我们统一称作图像扰动)。图3展示了一些图像的扰动,较好的模型会像聪明的人类一样能够正确识别。 + +

+
+图3. 扰动图片展示[22] +

+ +## 模型概览 + +图像识别领域大量的研究成果都是建立在[PASCAL VOC](http://host.robots.ox.ac.uk/pascal/VOC/)、[ImageNet](http://image-net.org/)等公开的数据集上,很多图像识别算法通常在这些数据集上进行测试和比较。PASCAL VOC是2005年发起的一个视觉挑战赛,ImageNet是2010年发起的大规模视觉识别竞赛(ILSVRC)的数据集,在本章中我们基于这些竞赛的一些论文介绍图像分类模型。 + +在2012年之前的传统图像分类方法可以用背景描述中提到的三步完成,但通常完整建立图像识别模型一般包括底层特征学习、特征编码、空间约束、分类器设计、模型融合等几个阶段。 + + 1). **底层特征提取**: 通常从图像中按照固定步长、尺度提取大量局部特征描述。常用的局部特征包括SIFT(Scale-Invariant Feature Transform, 尺度不变特征转换) \[[1](#参考文献)\]、HOG(Histogram of Oriented Gradient, 方向梯度直方图) \[[2](#参考文献)\]、LBP(Local Bianray Pattern, 局部二值模式) \[[3](#参考文献)\] 等,一般也采用多种特征描述,防止丢失过多的有用信息。 + + 2). **特征编码**: 底层特征中包含了大量冗余与噪声,为了提高特征表达的鲁棒性,需要使用一种特征变换算法对底层特征进行编码,称作特征编码。常用的特征编码方法包括向量量化编码 \[[4](#参考文献)\]、稀疏编码 \[[5](#参考文献)\]、局部线性约束编码 \[[6](#参考文献)\]、Fisher向量编码 \[[7](#参考文献)\] 等。 + + 3). **空间特征约束**: 特征编码之后一般会经过空间特征约束,也称作**特征汇聚**。特征汇聚是指在一个空间范围内,对每一维特征取最大值或者平均值,可以获得一定特征不变形的特征表达。金字塔特征匹配是一种常用的特征汇聚方法,这种方法提出将图像均匀分块,在分块内做特征汇聚。 + + 4). **通过分类器分类**: 经过前面步骤之后一张图像可以用一个固定维度的向量进行描述,接下来就是经过分类器对图像进行分类。通常使用的分类器包括SVM(Support Vector Machine, 支持向量机)、随机森林等。而使用核方法的SVM是最为广泛的分类器,在传统图像分类任务上性能很好。 + +这种传统的图像分类方法在PASCAL VOC竞赛中的图像分类算法中被广泛使用 \[[18](#参考文献)\]。[NEC实验室](http://www.nec-labs.com/)在ILSVRC2010中采用SIFT和LBP特征,两个非线性编码器以及SVM分类器获得图像分类的冠军 \[[8](#参考文献)\]。 + +Alex Krizhevsky在2012年ILSVRC提出的CNN模型 \[[9](#参考文献)\] 取得了历史性的突破,效果大幅度超越传统方法,获得了ILSVRC2012冠军,该模型被称作AlexNet。这也是首次将深度学习用于大规模图像分类中。从AlexNet之后,涌现了一系列CNN模型,不断地在ImageNet上刷新成绩,如图4展示。随着模型变得越来越深以及精妙的结构设计,Top-5的错误率也越来越低,降到了3.5%附近。而在同样的ImageNet数据集上,人眼的辨识错误率大概在5.1%,也就是目前的深度学习模型的识别能力已经超过了人眼。 + +

+
+图4. ILSVRC图像分类Top-5错误率 +

+ +### CNN + +传统CNN包含卷积层、全连接层等组件,并采用softmax多类别分类器和多类交叉熵损失函数,一个典型的卷积神经网络如图5所示,我们先介绍用来构造CNN的常见组件。 + +

+
+图5. CNN网络示例[20] +

+ +- 卷积层(convolution layer): 执行卷积操作提取底层到高层的特征,发掘出图片局部关联性质和空间不变性质。 +- 池化层(pooling layer): 执行降采样操作。通过取卷积输出特征图中局部区块的最大值(max-pooling)或者均值(avg-pooling)。降采样也是图像处理中常见的一种操作,可以过滤掉一些不重要的高频信息。 +- 全连接层(fully-connected layer,或者fc layer): 输入层到隐藏层的神经元是全部连接的。 +- 非线性变化: 卷积层、全连接层后面一般都会接非线性变化函数,例如Sigmoid、Tanh、ReLu等来增强网络的表达能力,在CNN里最常使用的为ReLu激活函数。 +- Dropout \[[10](#参考文献)\] : 在模型训练阶段随机让一些隐层节点权重不工作,提高网络的泛化能力,一定程度上防止过拟合。 + +另外,在训练过程中由于每层参数不断更新,会导致下一次输入分布发生变化,这样导致训练过程需要精心设计超参数。如2015年Sergey Ioffe和Christian Szegedy提出了Batch Normalization (BN)算法 \[[14](#参考文献)\] 中,每个batch对网络中的每一层特征都做归一化,使得每层分布相对稳定。BN算法不仅起到一定的正则作用,而且弱化了一些超参数的设计。经过实验证明,BN算法加速了模型收敛过程,在后来较深的模型中被广泛使用。 + +接下来我们主要介绍VGG,GoogLeNet和ResNet网络结构。 + +### VGG + +牛津大学VGG(Visual Geometry Group)组在2014年ILSVRC提出的模型被称作VGG模型 \[[11](#参考文献)\] 。该模型相比以往模型进一步加宽和加深了网络结构,它的核心是五组卷积操作,每两组之间做Max-Pooling空间降维。同一组内采用多次连续的3X3卷积,卷积核的数目由较浅组的64增多到最深组的512,同一组内的卷积核数目是一样的。卷积之后接两层全连接层,之后是分类层。由于每组内卷积层的不同,有11、13、16、19层这几种模型,下图展示一个16层的网络结构。VGG模型结构相对简洁,提出之后也有很多文章基于此模型进行研究,如在ImageNet上首次公开超过人眼识别的模型\[[19](#参考文献)\]就是借鉴VGG模型的结构。 + +

+
+图6. 基于ImageNet的VGG16模型 +

+ +### GoogLeNet + +GoogLeNet \[[12](#参考文献)\] 在2014年ILSVRC的获得了冠军,在介绍该模型之前我们先来了解NIN(Network in Network)模型 \[[13](#参考文献)\] 和Inception模块,因为GoogLeNet模型由多组Inception模块组成,模型设计借鉴了NIN的一些思想。 + +NIN模型主要有两个特点: + +1) 引入了多层感知卷积网络(Multi-Layer Perceptron Convolution, MLPconv)代替一层线性卷积网络。MLPconv是一个微小的多层卷积网络,即在线性卷积后面增加若干层1x1的卷积,这样可以提取出高度非线性特征。 + +2) 传统的CNN最后几层一般都是全连接层,参数较多。而NIN模型设计最后一层卷积层包含类别维度大小的特征图,然后采用全局均值池化(Avg-Pooling)替代全连接层,得到类别维度大小的向量,再进行分类。这种替代全连接层的方式有利于减少参数。 + +Inception模块如下图7所示,图(a)是最简单的设计,输出是3个卷积层和一个池化层的特征拼接。这种设计的缺点是池化层不会改变特征通道数,拼接后会导致特征的通道数较大,经过几层这样的模块堆积后,通道数会越来越大,导致参数和计算量也随之增大。为了改善这个缺点,图(b)引入3个1x1卷积层进行降维,所谓的降维就是减少通道数,同时如NIN模型中提到的1x1卷积也可以修正线性特征。 + +

+
+图7. Inception模块 +

+ +GoogLeNet由多组Inception模块堆积而成。另外,在网络最后也没有采用传统的多层全连接层,而是像NIN网络一样采用了均值池化层;但与NIN不同的是,GoogLeNet在池化层后加了一个全连接层来映射类别数。除了这两个特点之外,由于网络中间层特征也很有判别性,GoogLeNet在中间层添加了两个辅助分类器,在后向传播中增强梯度并且增强正则化,而整个网络的损失函数是这个三个分类器的损失加权求和。 + +GoogLeNet整体网络结构如图8所示,总共22层网络:开始由3层普通的卷积组成;接下来由三组子网络组成,第一组子网络包含2个Inception模块,第二组包含5个Inception模块,第三组包含2个Inception模块;然后接均值池化层、全连接层。 + +

+
+图8. GoogLeNet[12] +

+ + +上面介绍的是GoogLeNet第一版模型(称作GoogLeNet-v1)。GoogLeNet-v2 \[[14](#参考文献)\] 引入BN层;GoogLeNet-v3 \[[16](#参考文献)\] 对一些卷积层做了分解,进一步提高网络非线性能力和加深网络;GoogLeNet-v4 \[[17](#参考文献)\] 引入下面要讲的ResNet设计思路。从v1到v4每一版的改进都会带来准确度的提升,介于篇幅,这里不再详细介绍v2到v4的结构。 + + +### ResNet + +ResNet(Residual Network) \[[15](#参考文献)\] 是2015年ImageNet图像分类、图像物体定位和图像物体检测比赛的冠军。针对随着网络训练加深导致准确度下降的问题,ResNet提出了残差学习方法来减轻训练深层网络的困难。在已有设计思路(BN, 小卷积核,全卷积网络)的基础上,引入了残差模块。每个残差模块包含两条路径,其中一条路径是输入特征的直连通路,另一条路径对该特征做两到三次卷积操作得到该特征的残差,最后再将两条路径上的特征相加。 + +残差模块如图9所示,左边是基本模块连接方式,由两个输出通道数相同的3x3卷积组成。右边是瓶颈模块(Bottleneck)连接方式,之所以称为瓶颈,是因为上面的1x1卷积用来降维(图示例即256->64),下面的1x1卷积用来升维(图示例即64->256),这样中间3x3卷积的输入和输出通道数都较小(图示例即64->64)。 + +

+
+图9. 残差模块 +

+ +图10展示了50、101、152层网络连接示意图,使用的是瓶颈模块。这三个模型的区别在于每组中残差模块的重复次数不同(见图右上角)。ResNet训练收敛较快,成功的训练了上百乃至近千层的卷积神经网络。 + +

+
+图10. 基于ImageNet的ResNet模型 +

+ + +## 数据准备 + +通用图像分类公开的标准数据集常用的有[CIFAR](https://www.cs.toronto.edu/~kriz/cifar.html)、[ImageNet](http://image-net.org/)、[COCO](http://mscoco.org/)等,常用的细粒度图像分类数据集包括[CUB-200-2011](http://www.vision.caltech.edu/visipedia/CUB-200-2011.html)、[Stanford Dog](http://vision.stanford.edu/aditya86/ImageNetDogs/)、[Oxford-flowers](http://www.robots.ox.ac.uk/~vgg/data/flowers/)等。其中ImageNet数据集规模相对较大,如[模型概览](#模型概览)一章所讲,大量研究成果基于ImageNet。ImageNet数据从2010年来稍有变化,常用的是ImageNet-2012数据集,该数据集包含1000个类别:训练集包含1,281,167张图片,每个类别数据732至1300张不等,验证集包含50,000张图片,平均每个类别50张图片。 + +由于ImageNet数据集较大,下载和训练较慢,为了方便大家学习,我们使用[CIFAR10]()数据集。CIFAR10数据集包含60,000张32x32的彩色图片,10个类别,每个类包含6,000张。其中50,000张图片作为训练集,10000张作为测试集。图11从每个类别中随机抽取了10张图片,展示了所有的类别。 + +

+
+图11. CIFAR10数据集[21] +

+ +Paddle API提供了自动加载cifar数据集模块 `paddle.dataset.cifar`。 + +通过输入`python train.py`,就可以开始训练模型了,以下小节将详细介绍`train.py`的相关内容。 + +### 模型结构 + +#### Paddle 初始化 + +让我们从导入 Paddle Fluid API 和辅助模块开始。 + +```python + +from __future__ import print_function +import paddle +import paddle.fluid as fluid +import numpy +import sys + +``` + +本教程中我们提供了VGG和ResNet两个模型的配置。 + +### VGG + +首先介绍VGG模型结构,由于CIFAR10图片大小和数量相比ImageNet数据小很多,因此这里的模型针对CIFAR10数据做了一定的适配。卷积部分引入了BN和Dropout操作。 +VGG核心模块的输入是数据层,`vgg_bn_drop` 定义了16层VGG结构,每层卷积后面引入BN层和Dropout层,详细的定义如下: + +```python +def vgg_bn_drop(input): + def conv_block(ipt, num_filter, groups, dropouts): + return fluid.nets.img_conv_group( + input=ipt, + pool_size=2, + pool_stride=2, + conv_num_filter=[num_filter] * groups, + conv_filter_size=3, + conv_act='relu', + conv_with_batchnorm=True, + conv_batchnorm_drop_rate=dropouts, + pool_type='max') + + conv1 = conv_block(input, 64, 2, [0.3, 0]) + conv2 = conv_block(conv1, 128, 2, [0.4, 0]) + conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0]) + conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0]) + conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0]) + + drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5) + fc1 = fluid.layers.fc(input=drop, size=512, act=None) + bn = fluid.layers.batch_norm(input=fc1, act='relu') + drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5) + fc2 = fluid.layers.fc(input=drop2, size=512, act=None) + predict = fluid.layers.fc(input=fc2, size=10, act='softmax') + return predict +``` + + +1. 首先定义了一组卷积网络,即conv_block。卷积核大小为3x3,池化窗口大小为2x2,窗口滑动大小为2,groups决定每组VGG模块是几次连续的卷积操作,dropouts指定Dropout操作的概率。所使用的`img_conv_group`是在`paddle.nets`中预定义的模块,由若干组 Conv->BN->ReLu->Dropout 和 一组 Pooling 组成。 + +2. 五组卷积操作,即 5个conv_block。 第一、二组采用两次连续的卷积操作。第三、四、五组采用三次连续的卷积操作。每组最后一个卷积后面Dropout概率为0,即不使用Dropout操作。 + +3. 最后接两层512维的全连接。 + +4. 在这里,VGG网络首先提取高层特征,随后在全连接层中将其映射到和类别维度大小一致的向量上,最后通过Softmax方法计算图片划为每个类别的概率。 + +### ResNet + +ResNet模型的第1、3、4步和VGG模型相同,这里不再介绍。主要介绍第2步即CIFAR10数据集上ResNet核心模块。 + +先介绍`resnet_cifar10`中的一些基本函数,再介绍网络连接过程。 + + - `conv_bn_layer` : 带BN的卷积层。 + - `shortcut` : 残差模块的"直连"路径,"直连"实际分两种形式:残差模块输入和输出特征通道数不等时,采用1x1卷积的升维操作;残差模块输入和输出通道相等时,采用直连操作。 + - `basicblock` : 一个基础残差模块,即图9左边所示,由两组3x3卷积组成的路径和一条"直连"路径组成。 + - `layer_warp` : 一组残差模块,由若干个残差模块堆积而成。每组中第一个残差模块滑动窗口大小与其他可以不同,以用来减少特征图在垂直和水平方向的大小。 + +```python +def conv_bn_layer(input, + ch_out, + filter_size, + stride, + padding, + act='relu', + bias_attr=False): + tmp = fluid.layers.conv2d( + input=input, + filter_size=filter_size, + num_filters=ch_out, + stride=stride, + padding=padding, + act=None, + bias_attr=bias_attr) + return fluid.layers.batch_norm(input=tmp, act=act) + + +def shortcut(input, ch_in, ch_out, stride): + if ch_in != ch_out: + return conv_bn_layer(input, ch_out, 1, stride, 0, None) + else: + return input + + +def basicblock(input, ch_in, ch_out, stride): + tmp = conv_bn_layer(input, ch_out, 3, stride, 1) + tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None, bias_attr=True) + short = shortcut(input, ch_in, ch_out, stride) + return fluid.layers.elementwise_add(x=tmp, y=short, act='relu') + + +def layer_warp(block_func, input, ch_in, ch_out, count, stride): + tmp = block_func(input, ch_in, ch_out, stride) + for i in range(1, count): + tmp = block_func(tmp, ch_out, ch_out, 1) + return tmp +``` + +`resnet_cifar10` 的连接结构主要有以下几个过程。 + +1. 底层输入连接一层 `conv_bn_layer`,即带BN的卷积层。 + +2. 然后连接3组残差模块即下面配置3组 `layer_warp` ,每组采用图 10 左边残差模块组成。 + +3. 最后对网络做均值池化并返回该层。 + +注意:除第一层卷积层和最后一层全连接层之外,要求三组 `layer_warp` 总的含参层数能够被6整除,即 `resnet_cifar10` 的 depth 要满足(depth-2)%6=0 。 + +```python +def resnet_cifar10(ipt, depth=32): + # depth should be one of 20, 32, 44, 56, 110, 1202 + assert (depth - 2) % 6 == 0 + n = (depth - 2) // 6 + nStages = {16, 64, 128} + conv1 = conv_bn_layer(ipt, ch_out=16, filter_size=3, stride=1, padding=1) + res1 = layer_warp(basicblock, conv1, 16, 16, n, 1) + res2 = layer_warp(basicblock, res1, 16, 32, n, 2) + res3 = layer_warp(basicblock, res2, 32, 64, n, 2) + pool = fluid.layers.pool2d( + input=res3, pool_size=8, pool_type='avg', pool_stride=1) + predict = fluid.layers.fc(input=pool, size=10, act='softmax') + return predict +``` + +## Inference Program 配置 + +网络输入定义为 `data_layer` (数据层),在图像分类中即为图像像素信息。CIFRAR10是RGB 3通道32x32大小的彩色图,因此输入数据大小为3072(3x32x32)。 + +```python +def inference_program(): + # The image is 32 * 32 with RGB representation. + data_shape = [None, 3, 32, 32] + images = fluid.data(name='pixel', shape=data_shape, dtype='float32') + + predict = resnet_cifar10(images, 32) + # predict = vgg_bn_drop(images) # un-comment to use vgg net + return predict +``` + +## Train Program 配置 + +然后我们需要设置训练程序 `train_program`。它首先从推理程序中进行预测。 +在训练期间,它将从预测中计算 `avg_cost`。 +在有监督训练中需要输入图像对应的类别信息,同样通过`fluid.data`来定义。训练中采用多类交叉熵作为损失函数,并作为网络的输出,预测阶段定义网络的输出为分类器得到的概率信息。 + +**注意:** 训练程序应该返回一个数组,第一个返回参数必须是 `avg_cost`。训练器使用它来计算梯度。 + +```python +def train_program(): + predict = inference_program() + + label = fluid.data(name='label', shape=[None,1], dtype='int64') + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(cost) + accuracy = fluid.layers.accuracy(input=predict, label=label) + return [avg_cost, accuracy, predict] +``` + +## Optimizer Function 配置 + +在下面的 `Adam optimizer`,`learning_rate` 是学习率,与网络的训练收敛速度有关系。 + +```python +def optimizer_program(): + return fluid.optimizer.Adam(learning_rate=0.001) +``` + +## 训练模型 + +### Data Feeders 配置 + +`cifar.train10()` 每次产生一条样本,在完成shuffle和batch之后,作为训练的输入。 + +```python +# Each batch will yield 128 images +BATCH_SIZE = 128 + +# Reader for training +train_reader = paddle.batch( + paddle.reader.shuffle(paddle.dataset.cifar.train10(), buf_size=50000), + batch_size=BATCH_SIZE) + +# Reader for testing. A separated data set for testing. +test_reader = paddle.batch( + paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE) +``` + +### Trainer 程序的实现 +我们需要为训练过程制定一个main_program, 同样的,还需要为测试程序配置一个test_program。定义训练的 `place` ,并使用先前定义的优化器 `optimizer_program`。 + + +```python +use_cuda = False +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + +feed_order = ['pixel', 'label'] + +main_program = fluid.default_main_program() +star_program = fluid.default_startup_program() + +avg_cost, acc, predict = train_program() + +# Test program +test_program = main_program.clone(for_test=True) + +optimizer = optimizer_program() +optimizer.minimize(avg_cost) + +exe = fluid.Executor(place) + +EPOCH_NUM = 2 + +# For training test cost +def train_test(program, reader): + count = 0 + feed_var_list = [ + program.global_block().var(var_name) for var_name in feed_order + ] + feeder_test = fluid.DataFeeder( + feed_list=feed_var_list, place=place) + test_exe = fluid.Executor(place) + accumulated = len([avg_cost, acc]) * [0] + for tid, test_data in enumerate(reader()): + avg_cost_np = test_exe.run(program=program, + feed=feeder_test.feed(test_data), + fetch_list=[avg_cost, acc]) + accumulated = [x[0] + x[1][0] for x in zip(accumulated, avg_cost_np)] + count += 1 + return [x / count for x in accumulated] +``` + +### 训练主循环以及过程输出 + +在接下来的主训练循环中,我们将通过输出来来观察训练过程,或进行测试等。 + +也可以使用`plot`, 利用回调数据来打点画图: + +```python +params_dirname = "image_classification_resnet.inference.model" + +from paddle.utils.plot import Ploter + +train_prompt = "Train cost" +test_prompt = "Test cost" +plot_cost = Ploter(test_prompt,train_prompt) + +# main train loop. +def train_loop(): + feed_var_list_loop = [ + main_program.global_block().var(var_name) for var_name in feed_order + ] + feeder = fluid.DataFeeder( + feed_list=feed_var_list_loop, place=place) + exe.run(star_program) + + step = 0 + for pass_id in range(EPOCH_NUM): + for step_id, data_train in enumerate(train_reader()): + avg_loss_value = exe.run(main_program, + feed=feeder.feed(data_train), + fetch_list=[avg_cost, acc]) + if step % 1 == 0: + plot_cost.append(train_prompt, step, avg_loss_value[0]) + plot_cost.plot() + step += 1 + + avg_cost_test, accuracy_test = train_test(test_program, + reader=test_reader) + plot_cost.append(test_prompt, step, avg_cost_test) + + # save parameters + if params_dirname is not None: + fluid.io.save_inference_model(params_dirname, ["pixel"], + [predict], exe) +``` + +### 训练 + +通过`trainer_loop`函数训练, 这里我们只进行了2个Epoch, 一般我们在实际应用上会执行上百个以上Epoch + +**注意:** CPU,每个 Epoch 将花费大约15~20分钟。这部分可能需要一段时间。请随意修改代码,在GPU上运行测试,以提高训练速度。 + +```python +train_loop() +``` + +一轮训练log示例如下所示,经过1个pass, 训练集上平均 Accuracy 为0.59 ,测试集上平均 Accuracy 为0.6 。 + +```text +Pass 0, Batch 0, Cost 3.869598, Acc 0.164062 +................................................................................................... +Pass 100, Batch 0, Cost 1.481038, Acc 0.460938 +................................................................................................... +Pass 200, Batch 0, Cost 1.340323, Acc 0.523438 +................................................................................................... +Pass 300, Batch 0, Cost 1.223424, Acc 0.593750 +.......................................................................................... +Test with Pass 0, Loss 1.1, Acc 0.6 +``` + +图13是训练的分类错误率曲线图,运行到第200个pass后基本收敛,最终得到测试集上分类错误率为8.54%。 + +

+
+图13. CIFAR10数据集上VGG模型的分类错误率 +

+ +## 应用模型 + +可以使用训练好的模型对图片进行分类,下面程序展示了如何加载已经训练好的网络和参数进行推断。 + +### 生成预测输入数据 + +`dog.png` 是一张小狗的图片. 我们将它转换成 `numpy` 数组以满足`feeder`的格式. + +```python +# Prepare testing data. +from PIL import Image +import os + +def load_image(file): + im = Image.open(file) + im = im.resize((32, 32), Image.ANTIALIAS) + + im = numpy.array(im).astype(numpy.float32) + # The storage order of the loaded image is W(width), + # H(height), C(channel). PaddlePaddle requires + # the CHW order, so transpose them. + im = im.transpose((2, 0, 1)) # CHW + im = im / 255.0 + + # Add one dimension to mimic the list format. + im = numpy.expand_dims(im, axis=0) + return im + +cur_dir = os.getcwd() +img = load_image(cur_dir + '/image/dog.png') +``` + +### Inferencer 配置和预测 + +与训练过程类似,inferencer需要构建相应的过程。我们从`params_dirname` 加载网络和经过训练的参数。 +我们可以简单地插入前面定义的推理程序。 +现在我们准备做预测。 + +```python +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() +exe = fluid.Executor(place) +inference_scope = fluid.core.Scope() + +with fluid.scope_guard(inference_scope): + + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(params_dirname, exe) + + + + # Construct feed as a dictionary of {feed_target_name: feed_target_data} + # and results will contain a list of data corresponding to fetch_targets. + results = exe.run(inference_program, + feed={feed_target_names[0]: img}, + fetch_list=fetch_targets) + + + # infer label + label_list = [ + "airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", + "ship", "truck" + ] + + print("infer results: %s" % label_list[numpy.argmax(results[0])]) +``` + +## 总结 + +传统图像分类方法由多个阶段构成,框架较为复杂,而端到端的CNN模型结构可一步到位,而且大幅度提升了分类准确率。本文我们首先介绍VGG、GoogLeNet、ResNet三个经典的模型;然后基于CIFAR10数据集,介绍如何使用PaddlePaddle配置和训练CNN模型,尤其是VGG和ResNet模型;最后介绍如何使用PaddlePaddle的API接口对图片进行预测和特征提取。对于其他数据集比如ImageNet,配置和训练流程是同样的,大家可以自行进行实验。 + + +## 参考文献 + +[1] D. G. Lowe, [Distinctive image features from scale-invariant keypoints](http://www.cs.ubc.ca/~lowe/papers/ijcv04.pdf). IJCV, 60(2):91-110, 2004. + +[2] N. Dalal, B. Triggs, [Histograms of Oriented Gradients for Human Detection](http://vision.stanford.edu/teaching/cs231b_spring1213/papers/CVPR05_DalalTriggs.pdf), Proc. IEEE Conf. Computer Vision and Pattern Recognition, 2005. + +[3] Ahonen, T., Hadid, A., and Pietikinen, M. (2006). [Face description with local binary patterns: Application to face recognition](http://ieeexplore.ieee.org/document/1717463/). PAMI, 28. + +[4] J. Sivic, A. Zisserman, [Video Google: A Text Retrieval Approach to Object Matching in Videos](http://www.robots.ox.ac.uk/~vgg/publications/papers/sivic03.pdf), Proc. Ninth Int'l Conf. Computer Vision, pp. 1470-1478, 2003. + +[5] B. Olshausen, D. Field, [Sparse Coding with an Overcomplete Basis Set: A Strategy Employed by V1?](http://redwood.psych.cornell.edu/papers/olshausen_field_1997.pdf), Vision Research, vol. 37, pp. 3311-3325, 1997. + +[6] Wang, J., Yang, J., Yu, K., Lv, F., Huang, T., and Gong, Y. (2010). [Locality-constrained Linear Coding for image classification](http://ieeexplore.ieee.org/abstract/document/5540018/). In CVPR. + +[7] Perronnin, F., Sánchez, J., & Mensink, T. (2010). [Improving the fisher kernel for large-scale image classification](http://dl.acm.org/citation.cfm?id=1888101). In ECCV (4). + +[8] Lin, Y., Lv, F., Cao, L., Zhu, S., Yang, M., Cour, T., Yu, K., and Huang, T. (2011). [Large-scale image clas- sification: Fast feature extraction and SVM training](http://ieeexplore.ieee.org/document/5995477/). In CVPR. + +[9] Krizhevsky, A., Sutskever, I., and Hinton, G. (2012). [ImageNet classification with deep convolutional neu- ral networks](http://www.cs.toronto.edu/~kriz/imagenet_classification_with_deep_convolutional.pdf). In NIPS. + +[10] G.E. Hinton, N. Srivastava, A. Krizhevsky, I. Sutskever, and R.R. Salakhutdinov. [Improving neural networks by preventing co-adaptation of feature detectors](https://arxiv.org/abs/1207.0580). arXiv preprint arXiv:1207.0580, 2012. + +[11] K. Chatfield, K. Simonyan, A. Vedaldi, A. Zisserman. [Return of the Devil in the Details: Delving Deep into Convolutional Nets](https://arxiv.org/abs/1405.3531). BMVC, 2014。 + +[12] Szegedy, C., Liu, W., Jia, Y., Sermanet, P., Reed, S., Anguelov, D., Erhan, D., Vanhoucke, V., Rabinovich, A., [Going deeper with convolutions](https://arxiv.org/abs/1409.4842). In: CVPR. (2015) + +[13] Lin, M., Chen, Q., and Yan, S. [Network in network](https://arxiv.org/abs/1312.4400). In Proc. ICLR, 2014. + +[14] S. Ioffe and C. Szegedy. [Batch normalization: Accelerating deep network training by reducing internal covariate shift](https://arxiv.org/abs/1502.03167). In ICML, 2015. + +[15] K. He, X. Zhang, S. Ren, J. Sun. [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385). CVPR 2016. + +[16] Szegedy, C., Vanhoucke, V., Ioffe, S., Shlens, J., Wojna, Z. [Rethinking the incep-tion architecture for computer vision](https://arxiv.org/abs/1512.00567). In: CVPR. (2016). + +[17] Szegedy, C., Ioffe, S., Vanhoucke, V. [Inception-v4, inception-resnet and the impact of residual connections on learning](https://arxiv.org/abs/1602.07261). arXiv:1602.07261 (2016). + +[18] Everingham, M., Eslami, S. M. A., Van Gool, L., Williams, C. K. I., Winn, J. and Zisserman, A. [The Pascal Visual Object Classes Challenge: A Retrospective](http://link.springer.com/article/10.1007/s11263-014-0733-5). International Journal of Computer Vision, 111(1), 98-136, 2015. + +[19] He, K., Zhang, X., Ren, S., and Sun, J. [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://arxiv.org/abs/1502.01852). ArXiv e-prints, February 2015. + +[20] http://deeplearning.net/tutorial/lenet.html + +[21] https://www.cs.toronto.edu/~kriz/cifar.html + +[22] http://cs231n.github.io/classification/ + +
+知识共享许可协议
本教程PaddlePaddle 创作,采用 知识共享 署名-相同方式共享 4.0 国际 许可协议进行许可。 diff --git a/doc/paddle/user_guides/cv_case/image_classification/README.md b/doc/paddle/user_guides/cv_case/image_classification/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6471b3398c19f89ead67342559d6fd6f7618d693 --- /dev/null +++ b/doc/paddle/user_guides/cv_case/image_classification/README.md @@ -0,0 +1,623 @@ +Image Classification +======================= + +The source code for this chapter is in [book/image_classification](https://github.com/PaddlePaddle/book/tree/develop/03.image_classification). For users new to book, check [Running This Book](https://github.com/PaddlePaddle/book/blob/develop/README.md#running-the-book) . + +## Background + +Compared with words, images provide information in a much more vivid, artistic, easy-to-understand manner. They are an important source for people to express and exchange ideas. In this chapter, we focus on one of the essential problems in image recognition -- image classification. + +Image classification is the task of distinguishing images in different categories based on their semantic meaning. It is a core problem in computer vision and is also the foundation of other higher level computer vision tasks such as object detection, image segmentation, object tracking, action recognition. Image classification has applications in many areas such as face recognition, intelligent video analysis in security systems, traffic scene recognition in transportation systems, content-based image retrieval and automatic photo indexing in Internet services, image classification in medicine industry. + +To classify an image we firstly encode the entire image using manual or learned features and then determine the category using a classifier. Thus, feature extraction plays an important role in image classification. Prior to deep learning the BoW(Bag of Words) model was the most widely used method for classifying an image. The BoW technique was introduced in Natural Language Processing where a training sentence is represented as a bag of words. In the context of image classification, the BoW model requires constructing a dictionary. The simplest BoW framework can be designed in three steps: **feature extraction**, **feature encoding** and **classifier design**. + +With Deep learning, image classification can be framed as a supervised or unsupervised learning problem that uses hierarchical features automatically without any need for manually crafted features from the image. In recent years, Convolution Neural Networks (CNNs) have made significant progress in image classification. CNNs use raw image pixels as input, extract low-level and high-level abstract features through convolution operations, and directly output the classification results from the model. This style of end-to-end learning has led to not only higher performance but also wider adoption in various applications. + +In this chapter, we introduce deep-learning-based image classification methods and explain how to train a CNN model using PaddlePaddle. + +## Requirement + +1. PaddlePaddle version 1.6 or higher, or suitable develop version. + +## Result Demo + +Image Classification can be divided into general image classification and fine-grained image classification. + + +Figure 1 shows the results of general image classification -- the trained model can correctly recognize the main objects in the images. + +

+
+Figure 1. General image classification +

+ + +Figure 2 shows the results of a fine-grained image classifier. This task of flower recognition ought to correctly recognize of the flower's breed. + +

+
+Figure 2. Fine-grained image classification +

+ + +A qualified model should recognize objects of different categories correctly. The results of such a model should remain accurate in different perspectives, illumination conditions, object distortion or occlusion (we refer to these conditions as Image Disturbance). +Figure 3 shows some images with various disturbances. A good model should classify these images correctly like humans. + +

+
+Figure 3. Disturbed images [22] +

+ +## Exploration of Models + +A large amount of researches in image classification are built upon benchmark datasets such as [PASCAL VOC](http://host.robots.ox.ac.uk/pascal/VOC/), [ImageNet](http://image-net.org/) etc. Many image classification algorithms are usually evaluated and compared based on these datasets. PASCAL VOC is a computer vision competition started in 2005, and ImageNet is a dataset holding Large Scale Visual Recognition Challenge (ILSVRC) started in 2010. In this chapter, we introduce some image classification models from the submissions to these competitions. + + +Before 2012, traditional image classification was accomplished with the three steps described in the background section. A complete model construction usually involves the following stages: low-level feature extraction, feature encoding, spatial constraint or feature clustering, classifier design, model ensemble. + + 1). **Low-level feature extraction**: This step extracts large amounts of local features according to fixed strides and scales. Popular local features include Scale-Invariant Feature Transform (SIFT) \[[1](#References)\], Histogram of Oriented Gradient(HOG) \[[2](#References)\], Local Binary Pattern(LBP) \[[3](#References)\], etc. A common practice is to employ multiple feature descriptors in order to avoid missing a lot of information. + + 2). **Feature encoding**: Low-level features contain a large amount of redundancy and noise. In order to improve the robustness of features, it is necessary to employ a feature transformation to encode low-level features. This is called feature encoding. Common feature encoding methods include vector quantization \[[4](#References)\], sparse coding \[[5](#References)\], locality-constrained linear coding \[[6](#References)\], Fisher vector encoding \[[7](#References)\], etc. + + 3). **Spatial constraint**: Spatial constraint or feature clustering is usually adopted after feature encoding for extracting the maximum or average of each dimension in the spatial domain. Pyramid feature matching--a popular feature clustering method--divides an image uniformly into patches and performs feature clustering in each patch. + + 4). **Classification**: In the above steps an image can be described by a vector of fixed dimension. Then a classifier can be used to classify the image into categories. Common classifiers include Support Vector Machine(SVM), random forest etc. Kernel SVM is the most popular classifier and has achieved very good performance in traditional image classification tasks. + +This classic method has been used widely as image classification algorithm in PASCAL VOC \[[18](#References)\]. [NEC Labs](http://www.nec-labs.com/) won the championship by employing SIFT and LBP features, two non-linear encoders and SVM in ILSVRC 2010 \[[8](#References)\]. + +The CNN model--AlexNet proposed by Alex Krizhevsky et al. \[[9](#References)\], made a breakthrough in ILSVRC 2012. It dramatically outperformed classical methods and won the ILSVRC championship in 2012. This was also the first time that a deep learning method was adopted for large-scale image classification. Since AlexNet, a series of CNN models have been proposed that have advanced the state of the art steadily on Imagenet as shown in Figure 4. With deeper and more sophisticated architectures, Top-5 error rate is getting lower and lower (to around 3.5%). The error rate of human raters on the same Imagenet dataset is 5.1%, which means that the image classification capability of a deep learning model has surpassed human raters. + +

+
+ +Figure 4. Top-5 error rates on ILSVRC image classification +

+ +### CNN + +Traditional CNNs consist of convolution and fully-connected layers and use the softmax multi-category classifier with the cross-entropy loss function. Figure 5 shows a typical CNN. We first take look at the common components of a CNN. + +

+
+Figure 5. A CNN example [20] +

+ +- convolutional layer: this layer uses the convolution operation to extract (low-level and high-level) features and to discover local correlation and spatial invariance. + +- pooling layer: this layer down-samples feature maps by extracting local max (max-pooling) or average (avg-pooling) value of each patch in the feature map. Down-sampling is a common operation in image processing and is used to filter out trivial high-frequency information. + +- fully-connected layer: this layer fully connects neurons between two adjacent layers. + +- non-linear activation: Convolutional and fully-connected layers are usually followed by some non-linear activation layers. Non-linearities enhance the expression capability of the network. Some examples of non-linear activation functions are Sigmoid, Tanh and ReLU. ReLU is the most commonly used activation function in CNN. + +- Dropout \[[10](#References)\]: At each training stage, individual nodes are dropped out of the network with a certain random probability. This improves the network's ability to generalize and avoids overfitting. + +Parameter updates at each layer during training causes input layer distributions to change and in turn requires hyper-parameters to be carefully tuned. In 2015, Sergey Ioffe and Christian Szegedy proposed a Batch Normalization (BN) algorithm \[[14](#References)\], which normalizes the features of each batch in a layer, and enables relatively stable distribution in each layer. Not only does BN algorithm act as a regularizer, but also eliminates the need for meticulous hyper-parameter design. Experiments demonstrate that BN algorithm accelerates the training convergence and has been widely used in further deeper models. + +In the following sections, we will take a tour through the following network architectures - VGG, GoogLeNet and ResNets. + +### VGG + +The Oxford Visual Geometry Group (VGG) proposed the VGG network in ILSVRC 2014 \[[11](#References)\]. This model is deeper and wider than previous neural architectures. Its major part is the five main groups of convolution operations. Adjacent convolution groups are connected via max-pooling layers to perform dimensionality reduction. Each group contains a series of 3x3 convolutional layers (i.e. kernels). The number of convolution kernels stays the same within the single group and increases from 64 in the first group to 512 in the last one. Double FC layers and a classifier layer will follow afterwards. The total number of learnable layers could be 11, 13, 16, or 19 depending on the number of convolutional layers in each group. Figure 6 illustrates a 16-layer VGG. The architecture of VGG is relatively simple and has been adopted by many papers such as the first one that surpassed human-level performance on ImageNet \[[19](#References)\]. + +

+
+Figure 6. VGG16 model for ImageNet +

+ +### GoogLeNet + +GoogLeNet \[[12](#References)\] won the ILSVRC championship in 2014. GoogLeNet borrowed some ideas from the Network in Network(NIN) model \[[13](#References)\] and is built on the Inception blocks. Let us first familiarize ourselves with these concepts first. + +The two main characteristics of the NIN model are: + +1) A single-layer convolutional network is replaced with a Multi-Layer Perceptron Convolution (MLPconv). MLPconv is a tiny multi-layer convolutional network. It enhances non-linearity by adding several 1x1 convolutional layers after linear ones. + +2) In traditional CNNs, the last fewer layers are usually fully-connected with a large number of parameters. In contrast, the last convolution layer of NIN contains feature maps of the same size as the category dimension, and NIN replaces fully-connected layers with global average pooling to fetch a vector of the same size as category dimension and classify them. This replacement of fully-connected layers significantly reduces the number of parameters. + +Figure 7 depicts two Inception blocks. Figure 7(a) is the simplest design. The output is a concatenation of features from three convolutional layers and one pooling layer. The disadvantage of this design is that the pooling layer does not change the number of channels and leads to an increased channel number of features after concatenation. After several such blocks, the number of channels and parameters become larger and larger and lead to higher computation complexity. To overcome this drawback, the Inception block in Figure 7(b) employs three 1x1 convolutional layers to perform dimensionality reduction, which, to put it simply, is to reduce the number of channels and simultaneously improve the non-linearity of the network. + +

+
+Figure 7. Inception block +

+ +GoogLeNet comprises multiple stacked Inception blocks followed by an avg-pooling layer as in NIN instead of traditional fully connected layers. The difference between GoogLeNet and NIN is that GoogLeNet adds a fully connected layer after avg-pooling layer to output a vector of category size. Besides these two characteristics, the features from middle layers of a GoogLeNet are also very discriminative. Therefore, GoogeleNet inserts two auxiliary classifiers in the model for enhancing gradient and regularization when doing back-propagation. The loss function of the whole network is the weighted sum of these three classifiers. + +Figure 8 illustrates the neural architecture of a GoogLeNet which consists of 22 layers: it starts with three regular convolutional layers followed by three groups of sub-networks -- the first group contains two Inception blocks, the second group has five, and the third group has two again. Finally, It ends with an average pooling and a fully-connected layer. + +

+
+Figure 8. GoogLeNet [12] +

+ +The model above is the first version of GoogLeNet or the so-called GoogelNet-v1. GoogLeNet-v2 \[[14](#References)\] introduced BN layer; GoogLeNet-v3 \[[16](#References)\] further split some convolutional layers, which increases non-linearity and network depth; GoogelNet-v4 \[[17](#References)\] is inspired by the design idea of ResNet which will be introduced in the next section. The evolution from v1 to v4 improved the accuracy rate consistently. The length of this article being limited, we will not scrutinize the neural architectures of v2 to v4. + +### ResNet + +Residual Network(ResNet) \[[15](#References)\] won the 2015 championship on three ImageNet competitions -- image classification, object localization, and object detection. The main challenge in training deeper networks is that accuracy degrades with network depth. The authors of ResNet proposed a residual learning approach to ease the training of deeper networks. Based on the design ideas of BN, small convolutional kernels, full convolutional network, ResNets reformulate the layers as residual blocks, with each block containing two branches, one directly connecting input to the output, the other performing two to three convolutions and calculating the residual function with reference to the layer's inputs. The output features of these two branches are then added up. + +Figure 9 illustrates the ResNet architecture. To the left is the basic building block, it consists of two 3x3 convolutional layers with the same size of output channels. To the right is a Bottleneck block. The bottleneck is a 1x1 convolutional layer used to reduce dimension (from 256 to 64 here). The following 1x1 convolutional layer is used to increase dimension from 64 to 256. Thus, the number of input and output channels of the middle 3x3 convolutional layer is relatively small (64->64 in this example). + +

+
+Figure 9. Residual block +

+ +Figure 10 illustrates ResNets with 50, 101, 152 layers, respectively. All three networks use bottleneck blocks and their difference lies in the repetition time of residual blocks. ResNet converges very fast and can be trained with hundreds or thousands of layers. + +

+
+Figure 10. ResNet model for ImageNet +

+ + +## Get Data Ready + +Common public benchmark datasets for image classification are [CIFAR](https://www.cs.toronto.edu/~kriz/cifar.html), [ImageNet](http://image-net.org/), [COCO](http://mscoco.org/), etc. Those used for fine-grained image classification are [CUB-200-2011](http://www.vision.caltech.edu/visipedia/CUB-200-2011.html), [Stanford Dog](http://vision.stanford.edu/aditya86/ImageNetDogs/), [Oxford-flowers](http://www.robots.ox.ac.uk/~vgg/data/flowers/), etc. Among these, the ImageNet dataset is the largest. Most research results are reported on ImageNet as mentioned in the "Exploration of Models" section. Since 2010, the ImageNet dataset has gone through some changes. The commonly used ImageNet-2012 dataset contains 1000 categories. There are 1,281,167 training images, ranging from 732 to 1200 images per category, and 50,000 validation images with 50 images per category in average. + +Since ImageNet is too large to be downloaded and trained efficiently, we use [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) in this tutorial. The CIFAR-10 dataset consists of 60000 32x32 color images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. Figure 11 shows all the classes in CIFAR-10 as well as 10 images randomly sampled from each category. + +

+
+Figure 11. CIFAR10 dataset [21] +

+ +The Paddle API invents 'Paddle.dataset.cifar' to automatically load the Cifar DataSet module. + +After running the command `python train.py`, training will start immediately. The following sections will explain `train.py` inside and out. + +## Model Configuration + +#### Initialize Paddle + +Let's start with importing the Paddle Fluid API package and the helper modules. + +```python + +from __future__ import print_function +import paddle +import paddle.fluid as fluid +import numpy +import sys + +``` + +Now we are going to walk you through the implementations of the VGG and ResNet. + +### VGG + +Let's start with the VGG model. Since the image size and amount of CIFAR10 are smaller than ImageNet, we tailor our model to fit CIFAR10 dataset. Convolution groups incorporate BN and dropout operations. + +The input to VGG core module is the data layer. `vgg_bn_drop` defines a 16-layer VGG network, with each convolutional layer followed by BN and dropout layers. Here is the definition in detail: + +```python +def vgg_bn_drop(input): + def conv_block(ipt, num_filter, groups, dropouts): + return fluid.nets.img_conv_group( + input=ipt, + pool_size=2, + pool_stride=2, + conv_num_filter=[num_filter] * groups, + conv_filter_size=3, + conv_act='relu', + conv_with_batchnorm=True, + conv_batchnorm_drop_rate=dropouts, + pool_type='max') + + conv1 = conv_block(input, 64, 2, [0.3, 0]) + conv2 = conv_block(conv1, 128, 2, [0.4, 0]) + conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0]) + conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0]) + conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0]) + + drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5) + fc1 = fluid.layers.fc(input=drop, size=512, act=None) + bn = fluid.layers.batch_norm(input=fc1, act='relu') + drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5) + fc2 = fluid.layers.fc(input=drop2, size=512, act=None) + predict = fluid.layers.fc(input=fc2, size=10, act='softmax') + return predict +``` + + + 1. Firstly, it defines a convolution block or conv_block. The default convolution kernel is 3x3, and the default pooling size is 2x2 with stride 2. Groups decide the number of consecutive convolution operations in each VGG block. Dropout specifies the probability to perform dropout operation. Function `img_conv_group` is predefined in `paddle.nets` consisting of a series of `Conv->BN->ReLu->Dropout` and a group of `Pooling` . + + 2. Five groups of convolutions. The first two groups perform two consecutive convolutions, while the last three groups perform three convolutions in sequence. The dropout rate of the last convolution in each group is set to 0, which means there is no dropout for this layer. + + 3. The last two layers are fully-connected layers of 512 dimensions. + + 4. The VGG network begins with extracting high-level features and then maps them to a vector of the same size as the category dimension. Finally, Softmax function is used for calculating the probability of classifying the image to each category. + +### ResNet + +The 1st, 3rd, and 4th step is identical to the counterparts in VGG, which are skipped hereby. +We will explain the 2nd step at lengths, namely the core module of ResNet on CIFAR10. + +To start with, here are some basic functions used in `resnet_cifar10` ,and the network connection procedure is illustrated afterwards: + + - `conv_bn_layer` : convolutional layer with BN. + - `shortcut` : the shortcut connection in a residual block. There are two kinds of shortcuts: 1x1 convolutions are used to increase dimensionality when in the residual block the number of channels in input feature and that in output feature are different; direct connection used otherwise. + - `basicblock` : a basic residual module as shown in the left of Figure 9, it consists of two sequential 3x3 convolutions and one "shortcut" branch. + - `layer_warp` : a group of residual modules consisting of several stacked blocks. In each group, the sliding window size of the first residual block could be different from the rest, in order to reduce the size of feature maps along horizontal and vertical directions. + +```python +def conv_bn_layer(input, + ch_out, + filter_size, + stride, + padding, + act='relu', + bias_attr=False): + tmp = fluid.layers.conv2d( + input=input, + filter_size=filter_size, + num_filters=ch_out, + stride=stride, + padding=padding, + act=None, + bias_attr=bias_attr) + return fluid.layers.batch_norm(input=tmp, act=act) + + +def shortcut(input, ch_in, ch_out, stride): + if ch_in != ch_out: + return conv_bn_layer(input, ch_out, 1, stride, 0, None) + else: + return input + + +def basicblock(input, ch_in, ch_out, stride): + tmp = conv_bn_layer(input, ch_out, 3, stride, 1) + tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None, bias_attr=True) + short = shortcut(input, ch_in, ch_out, stride) + return fluid.layers.elementwise_add(x=tmp, y=short, act='relu') + + +def layer_warp(block_func, input, ch_in, ch_out, count, stride): + tmp = block_func(input, ch_in, ch_out, stride) + for i in range(1, count): + tmp = block_func(tmp, ch_out, ch_out, 1) + return tmp +``` + + +The following are the components of `resnet_cifar10`: + +1. The lowest level is `conv_bn_layer` , e.t. the convolution layer with BN. +2. The next level is composed of three residual blocks, namely three `layer_warp`, each of which uses the left residual block in Figure 10. +3. The last level is average pooling layer. + +Note: Except the first convolutional layer and the last fully-connected layer, the total number of layers with parameters in three `layer_warp` should be dividable by 6. In other words, the depth of `resnet_cifar10` should satisfy (depth-2)%6=0. + +```python +def resnet_cifar10(ipt, depth=32): + # depth should be one of 20, 32, 44, 56, 110, 1202 + assert (depth - 2) % 6 == 0 + n = (depth - 2) // 6 + nStages = {16, 64, 128} + conv1 = conv_bn_layer(ipt, ch_out=16, filter_size=3, stride=1, padding=1) + res1 = layer_warp(basicblock, conv1, 16, 16, n, 1) + res2 = layer_warp(basicblock, res1, 16, 32, n, 2) + res3 = layer_warp(basicblock, res2, 32, 64, n, 2) + pool = fluid.layers.pool2d( + input=res3, pool_size=8, pool_type='avg', pool_stride=1) + predict = fluid.layers.fc(input=pool, size=10, act='softmax') + return predict +``` + + +## Inference Program Configuration + +The input to the network is defined as `fluid.data` , corresponding to image pixels in the context of image classification. The images in CIFAR10 are 32x32 coloured images with three channels. Therefore, the size of the input data is 3072 (3x32x32). + +```python +def inference_program(): + # The image is 32 * 32 with RGB representation. + data_shape = [None, 3, 32, 32] + images = fluid.data(name='pixel', shape=data_shape, dtype='float32') + + predict = resnet_cifar10(images, 32) + # predict = vgg_bn_drop(images) # un-comment to use vgg net + return predict +``` + +## Training Program Configuration +Then we need to set up the the `train_program`. It takes the prediction from the inference_program first. +During the training, it will calculate the `avg_loss` from the prediction. + +In the context of supervised learning, labels of training images are defined in `fluid.data` as well. During training, the multi-class cross-entropy is used as the loss function and becomes the output of the network. During testing, the outputs are the probabilities calculated in the classifier. + +**NOTE:** A training program should return an array and the first returned argument has to be `avg_cost` . +The trainer always uses it to calculate the gradients. + +```python +def train_program(): + predict = inference_program() + + label = fluid.data(name='label', shape=[None, 1], dtype='int64') + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(cost) + accuracy = fluid.layers.accuracy(input=predict, label=label) + return [avg_cost, accuracy, predict] +``` + +## Optimizer Function Configuration + +In the following `Adam` optimizer, `learning_rate` specifies the learning rate in the optimization procedure. It influences the convergence speed. + +```python +def optimizer_program(): + return fluid.optimizer.Adam(learning_rate=0.001) +``` + +## Model Training + + +### Data Feeders Configuration + +`cifar.train10()` generates one sample at a time as the input for training after completing shuffle and batch. + +```python +# Each batch will yield 128 images +BATCH_SIZE = 128 + +# Reader for training +train_reader = paddle.batch( + paddle.reader.shuffle(paddle.dataset.cifar.train10(), buf_size=50000), + batch_size=BATCH_SIZE) + +# Reader for testing. A separated data set for testing. +test_reader = paddle.batch( + paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE) +``` + + +### Implementation of the trainer program +We need to develop a main_program for the training process. Similarly, we need to configure a test_program for the test program. It's also necessary to define the `place` of the training and use the optimizer `optimizer_program` previously defined . + + + +```python +use_cuda = False +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + +feed_order = ['pixel', 'label'] + +main_program = fluid.default_main_program() +star_program = fluid.default_startup_program() + +avg_cost, acc, predict = train_program() + +# Test program +test_program = main_program.clone(for_test=True) + +optimizer = optimizer_program() +optimizer.minimize(avg_cost) + +exe = fluid.Executor(place) + +EPOCH_NUM = 2 + +# For training test cost +def train_test(program, reader): + count = 0 + feed_var_list = [ + program.global_block().var(var_name) for var_name in feed_order + ] + feeder_test = fluid.DataFeeder( + feed_list=feed_var_list, place=place) + test_exe = fluid.Executor(place) + accumulated = len([avg_cost, acc]) * [0] + for tid, test_data in enumerate(reader()): + avg_cost_np = test_exe.run(program=program, + feed=feeder_test.feed(test_data), + fetch_list=[avg_cost, acc]) + accumulated = [x[0] + x[1][0] for x in zip(accumulated, avg_cost_np)] + count += 1 + return [x / count for x in accumulated] +``` + +### The main loop of training and the outputs along the process + +In the next main training cycle, we will observe the training process or run test in good use of the outputs. + +You can also use `plot` to plot the process by calling back data: + + +```python +params_dirname = "image_classification_resnet.inference.model" + +from paddle.utils.plot import Ploter + +train_prompt = "Train cost" +test_prompt = "Test cost" +plot_cost = Ploter(test_prompt,train_prompt) + +# main train loop. +def train_loop(): + feed_var_list_loop = [ + main_program.global_block().var(var_name) for var_name in feed_order + ] + feeder = fluid.DataFeeder( + feed_list=feed_var_list_loop, place=place) + exe.run(star_program) + + step = 0 + for pass_id in range(EPOCH_NUM): + for step_id, data_train in enumerate(train_reader()): + avg_loss_value = exe.run(main_program, + feed=feeder.feed(data_train), + fetch_list=[avg_cost, acc]) + if step % 1 == 0: + plot_cost.append(train_prompt, step, avg_loss_value[0]) + plot_cost.plot() + step += 1 + + avg_cost_test, accuracy_test = train_test(test_program, + reader=test_reader) + plot_cost.append(test_prompt, step, avg_cost_test) + + # save parameters + if params_dirname is not None: + fluid.io.save_inference_model(params_dirname, ["pixel"], + [predict], exe) +``` + +### Training + +Training via `trainer_loop` function, here we only have 2 Epoch iterations. Generally we need to execute above a hundred Epoch in practice. + +**Note:** On CPU, each Epoch will take approximately 15 to 20 minutes. It may cost some time in this part. Please freely update the code and run test on GPU to accelerate training + +```python +train_loop() +``` + +An example of an epoch of training log is shown below. After 1 pass, the average Accuracy on the training set is 0.59 and the average Accuracy on the testing set is 0.6. + +```text +Pass 0, Batch 0, Cost 3.869598, Acc 0.164062 +................................................................................................... +Pass 100, Batch 0, Cost 1.481038, Acc 0.460938 +................................................................................................... +Pass 200, Batch 0, Cost 1.340323, Acc 0.523438 +................................................................................................... +Pass 300, Batch 0, Cost 1.223424, Acc 0.593750 +.......................................................................................... +Test with Pass 0, Loss 1.1, Acc 0.6 +``` + +Figure 13 is a curve graph of the classification error rate of the training. After pass of 200 times, it almost converges, and finally the classification error rate on the test set is 8.54%. + +

+
+Figure 13. Classification error rate of VGG model on the CIFAR10 data set +

+ +## Model Application + +You can use a trained model to classify your images. The following program shows how to load a trained network and optimized parameters for inference. + +### Generate Input Data to infer + +`dog.png` is a picture of a puppy. We convert it to a `numpy` array to meet the `feeder` format. + +```python +# Prepare testing data. +from PIL import Image +import os + +def load_image(file): + im = Image.open(file) + im = im.resize((32, 32), Image.ANTIALIAS) + + im = numpy.array(im).astype(numpy.float32) + # The storage order of the loaded image is W(width), + # H(height), C(channel). PaddlePaddle requires + # the CHW order, so transpose them. + im = im.transpose((2, 0, 1)) # CHW + im = im / 255.0 + + # Add one dimension to mimic the list format. + im = numpy.expand_dims(im, axis=0) + return im + +cur_dir = os.getcwd() +img = load_image(cur_dir + '/image/dog.png') +``` + +### Inferencer Configuration and Inference + +Similar to the training process, a inferencer needs to build the corresponding process. We load the trained network and parameters from `params_dirname` . +We can just insert the inference program defined previously. +Now let's make our inference. + + + +```python +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() +exe = fluid.Executor(place) +inference_scope = fluid.core.Scope() + +with fluid.scope_guard(inference_scope): + + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(params_dirname, exe) + + + + # Construct feed as a dictionary of {feed_target_name: feed_target_data} + # and results will contain a list of data corresponding to fetch_targets. + results = exe.run(inference_program, + feed={feed_target_names[0]: img}, + fetch_list=fetch_targets) + + + + # infer label + label_list = [ + "airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", + "ship", "truck" + ] + + print("infer results: %s" % label_list[numpy.argmax(results[0])]) +``` + + + +## Summary + +The traditional image classification method consists of multiple stages. The framework is a little complex. In contrast, the end-to-end CNN model can be implemented in one step, and the accuracy of classification is greatly improved. In this article, we first introduced three classic models, VGG, GoogLeNet and ResNet. Then we have introduced how to use PaddlePaddle to configure and train CNN models based on CIFAR10 dataset, especially VGG and ResNet models. Finally, we have guided you how to use PaddlePaddle's API interfaces to predict images and extract features. For other datasets such as ImageNet, the configuration and training process is the same, so you can embark on your adventure on your own. + + +## References + +[1] D. G. Lowe, [Distinctive image features from scale-invariant keypoints](http://www.cs.ubc.ca/~lowe/papers/ijcv04.pdf). IJCV, 60(2):91-110, 2004. + +[2] N. Dalal, B. Triggs, [Histograms of Oriented Gradients for Human Detection](http://vision.stanford.edu/teaching/cs231b_spring1213/papers/CVPR05_DalalTriggs.pdf), Proc. IEEE Conf. Computer Vision and Pattern Recognition, 2005. + +[3] Ahonen, T., Hadid, A., and Pietikinen, M. (2006). [Face description with local binary patterns: Application to face recognition](http://ieeexplore.ieee.org/document/1717463/). PAMI, 28. + +[4] J. Sivic, A. Zisserman, [Video Google: A Text Retrieval Approach to Object Matching in Videos](http://www.robots.ox.ac.uk/~vgg/publications/papers/sivic03.pdf), Proc. Ninth Int'l Conf. Computer Vision, pp. 1470-1478, 2003. + +[5] B. Olshausen, D. Field, [Sparse Coding with an Overcomplete Basis Set: A Strategy Employed by V1?](http://redwood.psych.cornell.edu/papers/olshausen_field_1997.pdf), Vision Research, vol. 37, pp. 3311-3325, 1997. + +[6] Wang, J., Yang, J., Yu, K., Lv, F., Huang, T., and Gong, Y. (2010). [Locality-constrained Linear Coding for image classification](http://ieeexplore.ieee.org/abstract/document/5540018/). In CVPR. + +[7] Perronnin, F., Sánchez, J., & Mensink, T. (2010). [Improving the fisher kernel for large-scale image classification](http://dl.acm.org/citation.cfm?id=1888101). In ECCV (4). + +[8] Lin, Y., Lv, F., Cao, L., Zhu, S., Yang, M., Cour, T., Yu, K., and Huang, T. (2011). [Large-scale image clas- sification: Fast feature extraction and SVM training](http://ieeexplore.ieee.org/document/5995477/). In CVPR. + +[9] Krizhevsky, A., Sutskever, I., and Hinton, G. (2012). [ImageNet classification with deep convolutional neu- ral networks](http://www.cs.toronto.edu/~kriz/imagenet_classification_with_deep_convolutional.pdf). In NIPS. + +[10] G.E. Hinton, N. Srivastava, A. Krizhevsky, I. Sutskever, and R.R. Salakhutdinov. [Improving neural networks by preventing co-adaptation of feature detectors](https://arxiv.org/abs/1207.0580). arXiv preprint arXiv:1207.0580, 2012. + +[11] K. Chatfield, K. Simonyan, A. Vedaldi, A. Zisserman. [Return of the Devil in the Details: Delving Deep into Convolutional Nets](https://arxiv.org/abs/1405.3531). BMVC, 2014。 + +[12] Szegedy, C., Liu, W., Jia, Y., Sermanet, P., Reed, S., Anguelov, D., Erhan, D., Vanhoucke, V., Rabinovich, A., [Going deeper with convolutions](https://arxiv.org/abs/1409.4842). In: CVPR. (2015) + +[13] Lin, M., Chen, Q., and Yan, S. [Network in network](https://arxiv.org/abs/1312.4400). In Proc. ICLR, 2014. + +[14] S. Ioffe and C. Szegedy. [Batch normalization: Accelerating deep network training by reducing internal covariate shift](https://arxiv.org/abs/1502.03167). In ICML, 2015. + +[15] K. He, X. Zhang, S. Ren, J. Sun. [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385). CVPR 2016. + +[16] Szegedy, C., Vanhoucke, V., Ioffe, S., Shlens, J., Wojna, Z. [Rethinking the incep-tion architecture for computer vision](https://arxiv.org/abs/1512.00567). In: CVPR. (2016). + +[17] Szegedy, C., Ioffe, S., Vanhoucke, V. [Inception-v4, inception-resnet and the impact of residual connections on learning](https://arxiv.org/abs/1602.07261). arXiv:1602.07261 (2016). + +[18] Everingham, M., Eslami, S. M. A., Van Gool, L., Williams, C. K. I., Winn, J. and Zisserman, A. [The Pascal Visual Object Classes Challenge: A Retrospective](http://link.springer.com/article/10.1007/s11263-014-0733-5). International Journal of Computer Vision, 111(1), 98-136, 2015. + +[19] He, K., Zhang, X., Ren, S., and Sun, J. [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://arxiv.org/abs/1502.01852). ArXiv e-prints, February 2015. + +[20] http://deeplearning.net/tutorial/lenet.html + +[21] https://www.cs.toronto.edu/~kriz/cifar.html + +[22] http://cs231n.github.io/classification/ + + + +
+知识共享许可协议
This tutorial is contributed by PaddlePaddle, and licensed under a Creative Commons Attribution-ShareAlike 4.0 International License. diff --git a/doc/paddle/user_guides/cv_case/image_classification/_ce.py b/doc/paddle/user_guides/cv_case/image_classification/_ce.py new file mode 100644 index 0000000000000000000000000000000000000000..8e3c81389700187aeaa706a2b7f52a3336a3819b --- /dev/null +++ b/doc/paddle/user_guides/cv_case/image_classification/_ce.py @@ -0,0 +1,42 @@ +### This file is only used for continuous evaluation test! +from __future__ import print_function +from __future__ import division +from __future__ import absolute_import +import os +import sys +sys.path.append(os.environ['ceroot']) +from kpi import CostKpi +from kpi import AccKpi + +train_cost_kpi = CostKpi( + 'train_cost', 0.02, 0, actived=True, desc='train cost') +train_acc_kpi = AccKpi('train_acc', 0.02, 0, actived=True, desc='train acc') +test_cost_kpi = CostKpi('test_cost', 0.02, 0, actived=True, desc='test cost') +test_acc_kpi = AccKpi('test_acc', 0.02, 0, actived=True, desc='test acc') + +tracking_kpis = [train_cost_kpi, train_acc_kpi, test_cost_kpi, test_acc_kpi] + + +def parse_log(log): + for line in log.split('\n'): + fs = line.strip().split('\t') + print(fs) + if len(fs) == 3 and fs[0] == 'kpis': + kpi_name = fs[1] + kpi_value = float(fs[2]) + yield kpi_name, kpi_value + + +def log_to_ce(log): + kpi_tracker = {} + for kpi in tracking_kpis: + kpi_tracker[kpi.name] = kpi + for (kpi_name, kpi_value) in parse_log(log): + print(kpi_name, kpi_value) + kpi_tracker[kpi_name].add_record(kpi_value) + kpi_tracker[kpi_name].persist() + + +if __name__ == '__main__': + log = sys.stdin.read() + log_to_ce(log) diff --git a/doc/paddle/user_guides/cv_case/image_classification/image/cifar.png b/doc/paddle/user_guides/cv_case/image_classification/image/cifar.png new file mode 100644 index 0000000000000000000000000000000000000000..f3c5f2f7b0c84f83382b70124dcd439586ed4eb0 Binary files /dev/null and b/doc/paddle/user_guides/cv_case/image_classification/image/cifar.png differ diff --git a/doc/paddle/user_guides/cv_case/image_classification/image/dog.png b/doc/paddle/user_guides/cv_case/image_classification/image/dog.png new file mode 100644 index 0000000000000000000000000000000000000000..ca8f858a902ea723d886d2b88c2c0a1005301c50 Binary files /dev/null and b/doc/paddle/user_guides/cv_case/image_classification/image/dog.png differ diff --git a/doc/paddle/user_guides/cv_case/image_classification/image/dog_cat.png b/doc/paddle/user_guides/cv_case/image_classification/image/dog_cat.png new file mode 100644 index 0000000000000000000000000000000000000000..38b21f21604b1bb84fc3f6aa96bd5fce45d15a55 Binary files /dev/null and b/doc/paddle/user_guides/cv_case/image_classification/image/dog_cat.png differ diff --git a/doc/paddle/user_guides/cv_case/image_classification/image/fea_conv0.png b/doc/paddle/user_guides/cv_case/image_classification/image/fea_conv0.png new file mode 100644 index 0000000000000000000000000000000000000000..647c822e52cd55d50e5f207978f5e6ada86cf34c Binary files /dev/null and b/doc/paddle/user_guides/cv_case/image_classification/image/fea_conv0.png differ diff --git a/doc/paddle/user_guides/cv_case/image_classification/image/flowers.png b/doc/paddle/user_guides/cv_case/image_classification/image/flowers.png new file mode 100644 index 0000000000000000000000000000000000000000..04245cef60fe7126ae4c92ba8085273965078bee Binary files /dev/null and b/doc/paddle/user_guides/cv_case/image_classification/image/flowers.png differ diff --git a/doc/paddle/user_guides/cv_case/image_classification/image/googlenet.jpeg b/doc/paddle/user_guides/cv_case/image_classification/image/googlenet.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..249dbf96df61c3352ea5bd80470f6c4a1e03ff10 Binary files /dev/null and b/doc/paddle/user_guides/cv_case/image_classification/image/googlenet.jpeg differ diff --git a/doc/paddle/user_guides/cv_case/image_classification/image/ilsvrc.png b/doc/paddle/user_guides/cv_case/image_classification/image/ilsvrc.png new file mode 100644 index 0000000000000000000000000000000000000000..4660ac122e9d533023a21154d35eee29e3b08d27 Binary files /dev/null and b/doc/paddle/user_guides/cv_case/image_classification/image/ilsvrc.png differ diff --git a/doc/paddle/user_guides/cv_case/image_classification/image/inception.png b/doc/paddle/user_guides/cv_case/image_classification/image/inception.png new file mode 100644 index 0000000000000000000000000000000000000000..9591a0c1e8c0165c40ca560be35a7b9a91cd5027 Binary files /dev/null and b/doc/paddle/user_guides/cv_case/image_classification/image/inception.png differ diff --git a/doc/paddle/user_guides/cv_case/image_classification/image/inception_en.png b/doc/paddle/user_guides/cv_case/image_classification/image/inception_en.png new file mode 100644 index 0000000000000000000000000000000000000000..39580c20b583f2a15d17fd124a572c84e6e2db1d Binary files /dev/null and b/doc/paddle/user_guides/cv_case/image_classification/image/inception_en.png differ diff --git a/doc/paddle/user_guides/cv_case/image_classification/image/lenet.png b/doc/paddle/user_guides/cv_case/image_classification/image/lenet.png new file mode 100644 index 0000000000000000000000000000000000000000..77f785e03bacd38c4c64a817874a58ff3298d2f3 Binary files /dev/null and b/doc/paddle/user_guides/cv_case/image_classification/image/lenet.png differ diff --git a/doc/paddle/user_guides/cv_case/image_classification/image/lenet_en.png b/doc/paddle/user_guides/cv_case/image_classification/image/lenet_en.png new file mode 100644 index 0000000000000000000000000000000000000000..97a1e3eee45c0db95e6a943ca3b8c0cf6c34d4b6 Binary files /dev/null and b/doc/paddle/user_guides/cv_case/image_classification/image/lenet_en.png differ diff --git a/doc/paddle/user_guides/cv_case/image_classification/image/plot.png b/doc/paddle/user_guides/cv_case/image_classification/image/plot.png new file mode 100644 index 0000000000000000000000000000000000000000..57e45cc0c27dd99b9918de2ff1228bc6b65f7424 Binary files /dev/null and b/doc/paddle/user_guides/cv_case/image_classification/image/plot.png differ diff --git a/doc/paddle/user_guides/cv_case/image_classification/image/plot_en.png b/doc/paddle/user_guides/cv_case/image_classification/image/plot_en.png new file mode 100644 index 0000000000000000000000000000000000000000..147e575bf49086811c43420d5a9c8f749e2da405 Binary files /dev/null and b/doc/paddle/user_guides/cv_case/image_classification/image/plot_en.png differ diff --git a/doc/paddle/user_guides/cv_case/image_classification/image/resnet.png b/doc/paddle/user_guides/cv_case/image_classification/image/resnet.png new file mode 100644 index 0000000000000000000000000000000000000000..0aeb4f254639fdbf18e916dc219ca61602596d85 Binary files /dev/null and b/doc/paddle/user_guides/cv_case/image_classification/image/resnet.png differ diff --git a/doc/paddle/user_guides/cv_case/image_classification/image/resnet_block.jpg b/doc/paddle/user_guides/cv_case/image_classification/image/resnet_block.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c500eb01a90190ff66150871fe83ec275e2de8d7 Binary files /dev/null and b/doc/paddle/user_guides/cv_case/image_classification/image/resnet_block.jpg differ diff --git a/doc/paddle/user_guides/cv_case/image_classification/image/train_and_test.png b/doc/paddle/user_guides/cv_case/image_classification/image/train_and_test.png new file mode 100644 index 0000000000000000000000000000000000000000..c6336a9a69b95dc978719ce68896e3e752e67fed Binary files /dev/null and b/doc/paddle/user_guides/cv_case/image_classification/image/train_and_test.png differ diff --git a/doc/paddle/user_guides/cv_case/image_classification/image/variations.png b/doc/paddle/user_guides/cv_case/image_classification/image/variations.png new file mode 100644 index 0000000000000000000000000000000000000000..b4ebbbe6a50f5fd7cd0cccb52cdac5653e34654c Binary files /dev/null and b/doc/paddle/user_guides/cv_case/image_classification/image/variations.png differ diff --git a/doc/paddle/user_guides/cv_case/image_classification/image/variations_en.png b/doc/paddle/user_guides/cv_case/image_classification/image/variations_en.png new file mode 100644 index 0000000000000000000000000000000000000000..88c60fe87f802c5ce560bb15bbdbd229aeafc4e4 Binary files /dev/null and b/doc/paddle/user_guides/cv_case/image_classification/image/variations_en.png differ diff --git a/doc/paddle/user_guides/cv_case/image_classification/image/vgg16.png b/doc/paddle/user_guides/cv_case/image_classification/image/vgg16.png new file mode 100644 index 0000000000000000000000000000000000000000..6270eefcfd7071bc1643ee06567e5b81aaf4c177 Binary files /dev/null and b/doc/paddle/user_guides/cv_case/image_classification/image/vgg16.png differ diff --git a/doc/paddle/user_guides/cv_case/image_classification/index.cn.html b/doc/paddle/user_guides/cv_case/image_classification/index.cn.html new file mode 100644 index 0000000000000000000000000000000000000000..ccf3719fe0d7b27903559de43324315e03664c13 --- /dev/null +++ b/doc/paddle/user_guides/cv_case/image_classification/index.cn.html @@ -0,0 +1,676 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/user_guides/cv_case/image_classification/index.html b/doc/paddle/user_guides/cv_case/image_classification/index.html new file mode 100644 index 0000000000000000000000000000000000000000..66c1a657e2ee88463dbd1e4706109aaa7d67d554 --- /dev/null +++ b/doc/paddle/user_guides/cv_case/image_classification/index.html @@ -0,0 +1,687 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/user_guides/cv_case/image_classification/resnet.py b/doc/paddle/user_guides/cv_case/image_classification/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..40a94bf889d57a53a97adcd7dd97232c77292f7e --- /dev/null +++ b/doc/paddle/user_guides/cv_case/image_classification/resnet.py @@ -0,0 +1,73 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import paddle.fluid as fluid + +__all__ = ['resnet_cifar10'] + + +def conv_bn_layer(input, + ch_out, + filter_size, + stride, + padding, + act='relu', + bias_attr=False): + tmp = fluid.layers.conv2d( + input=input, + filter_size=filter_size, + num_filters=ch_out, + stride=stride, + padding=padding, + act=None, + bias_attr=bias_attr) + return fluid.layers.batch_norm(input=tmp, act=act) + + +def shortcut(input, ch_in, ch_out, stride): + if ch_in != ch_out: + return conv_bn_layer(input, ch_out, 1, stride, 0, None) + else: + return input + + +def basicblock(input, ch_in, ch_out, stride): + tmp = conv_bn_layer(input, ch_out, 3, stride, 1) + tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None, bias_attr=True) + short = shortcut(input, ch_in, ch_out, stride) + return fluid.layers.elementwise_add(x=tmp, y=short, act='relu') + + +def layer_warp(block_func, input, ch_in, ch_out, count, stride): + tmp = block_func(input, ch_in, ch_out, stride) + for i in range(1, count): + tmp = block_func(tmp, ch_out, ch_out, 1) + return tmp + + +def resnet_cifar10(ipt, depth=32): + # depth should be one of 20, 32, 44, 56, 110, 1202 + assert (depth - 2) % 6 == 0 + n = (depth - 2) // 6 + nStages = {16, 64, 128} + conv1 = conv_bn_layer(ipt, ch_out=16, filter_size=3, stride=1, padding=1) + res1 = layer_warp(basicblock, conv1, 16, 16, n, 1) + res2 = layer_warp(basicblock, res1, 16, 32, n, 2) + res3 = layer_warp(basicblock, res2, 32, 64, n, 2) + pool = fluid.layers.pool2d( + input=res3, pool_size=8, pool_type='avg', pool_stride=1) + predict = fluid.layers.fc(input=pool, size=10, act='softmax') + return predict diff --git a/doc/paddle/user_guides/cv_case/image_classification/train.py b/doc/paddle/user_guides/cv_case/image_classification/train.py new file mode 100644 index 0000000000000000000000000000000000000000..09479180824a9008029dce77a1364f50e6ee0f17 --- /dev/null +++ b/doc/paddle/user_guides/cv_case/image_classification/train.py @@ -0,0 +1,227 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License + +from __future__ import print_function + +import os +import argparse +import paddle +import paddle.fluid as fluid +import numpy +import sys +from vgg import vgg_bn_drop +from resnet import resnet_cifar10 + + +def parse_args(): + parser = argparse.ArgumentParser("image_classification") + parser.add_argument( + '--enable_ce', + action='store_true', + help='If set, run the task with continuous evaluation logs.') + parser.add_argument( + '--use_gpu', type=bool, default=0, help='whether to use gpu') + parser.add_argument( + '--num_epochs', type=int, default=1, help='number of epoch') + args = parser.parse_args() + return args + + +def inference_network(): + # The image is 32 * 32 with RGB representation. + data_shape = [None, 3, 32, 32] + images = fluid.data(name='pixel', shape=data_shape, dtype='float32') + + predict = resnet_cifar10(images, 32) + # predict = vgg_bn_drop(images) # un-comment to use vgg net + return predict + + +def train_network(predict): + label = fluid.data(name='label', shape=[None, 1], dtype='int64') + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(cost) + accuracy = fluid.layers.accuracy(input=predict, label=label) + return [avg_cost, accuracy] + + +def optimizer_program(): + return fluid.optimizer.Adam(learning_rate=0.001) + + +def train(use_cuda, params_dirname): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + BATCH_SIZE = 128 + + if args.enable_ce: + train_reader = paddle.batch( + paddle.dataset.cifar.train10(), batch_size=BATCH_SIZE) + test_reader = paddle.batch( + paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE) + else: + test_reader = paddle.batch( + paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE) + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.cifar.train10(), buf_size=128 * 100), + batch_size=BATCH_SIZE) + + feed_order = ['pixel', 'label'] + + main_program = fluid.default_main_program() + start_program = fluid.default_startup_program() + + if args.enable_ce: + main_program.random_seed = 90 + start_program.random_seed = 90 + + predict = inference_network() + avg_cost, acc = train_network(predict) + + # Test program + test_program = main_program.clone(for_test=True) + optimizer = optimizer_program() + optimizer.minimize(avg_cost) + + exe = fluid.Executor(place) + + EPOCH_NUM = args.num_epochs + + # For training test cost + def train_test(program, reader): + count = 0 + feed_var_list = [ + program.global_block().var(var_name) for var_name in feed_order + ] + feeder_test = fluid.DataFeeder(feed_list=feed_var_list, place=place) + test_exe = fluid.Executor(place) + accumulated = len([avg_cost, acc]) * [0] + for tid, test_data in enumerate(reader()): + avg_cost_np = test_exe.run( + program=program, + feed=feeder_test.feed(test_data), + fetch_list=[avg_cost, acc]) + accumulated = [ + x[0] + x[1][0] for x in zip(accumulated, avg_cost_np) + ] + count += 1 + return [x / count for x in accumulated] + + # main train loop. + def train_loop(): + feed_var_list_loop = [ + main_program.global_block().var(var_name) + for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list=feed_var_list_loop, place=place) + exe.run(start_program) + + step = 0 + for pass_id in range(EPOCH_NUM): + for step_id, data_train in enumerate(train_reader()): + avg_loss_value = exe.run( + main_program, + feed=feeder.feed(data_train), + fetch_list=[avg_cost, acc]) + if step_id % 100 == 0: + print("\nPass %d, Batch %d, Cost %f, Acc %f" % + (step_id, pass_id, avg_loss_value[0], + avg_loss_value[1])) + else: + sys.stdout.write('.') + sys.stdout.flush() + step += 1 + + avg_cost_test, accuracy_test = train_test( + test_program, reader=test_reader) + print('\nTest with Pass {0}, Loss {1:2.2}, Acc {2:2.2}'.format( + pass_id, avg_cost_test, accuracy_test)) + + if params_dirname is not None: + fluid.io.save_inference_model(params_dirname, ["pixel"], + [predict], exe) + + if args.enable_ce and pass_id == EPOCH_NUM - 1: + print("kpis\ttrain_cost\t%f" % avg_loss_value[0]) + print("kpis\ttrain_acc\t%f" % avg_loss_value[1]) + print("kpis\ttest_cost\t%f" % avg_cost_test) + print("kpis\ttest_acc\t%f" % accuracy_test) + + train_loop() + + +def infer(use_cuda, params_dirname=None): + from PIL import Image + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + inference_scope = fluid.core.Scope() + + def load_image(infer_file): + im = Image.open(infer_file) + im = im.resize((32, 32), Image.ANTIALIAS) + + im = numpy.array(im).astype(numpy.float32) + # The storage order of the loaded image is W(width), + # H(height), C(channel). PaddlePaddle requires + # the CHW order, so transpose them. + im = im.transpose((2, 0, 1)) # CHW + im = im / 255.0 + + # Add one dimension to mimic the list format. + im = numpy.expand_dims(im, axis=0) + return im + + cur_dir = os.path.dirname(os.path.realpath(__file__)) + img = load_image(cur_dir + '/image/dog.png') + + with fluid.scope_guard(inference_scope): + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(params_dirname, exe) + + # Construct feed as a dictionary of {feed_target_name: feed_target_data} + # and results will contain a list of data corresponding to fetch_targets. + results = exe.run( + inference_program, + feed={feed_target_names[0]: img}, + fetch_list=fetch_targets) + + # infer label + label_list = [ + "airplane", "automobile", "bird", "cat", "deer", "dog", "frog", + "horse", "ship", "truck" + ] + + print("infer results: %s" % label_list[numpy.argmax(results[0])]) + + +def main(use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + save_path = "image_classification_resnet.inference.model" + + train(use_cuda=use_cuda, params_dirname=save_path) + + infer(use_cuda=use_cuda, params_dirname=save_path) + + +if __name__ == '__main__': + # For demo purpose, the training runs on CPU + # Please change accordingly. + args = parse_args() + use_cuda = args.use_gpu + main(use_cuda) diff --git a/doc/paddle/user_guides/cv_case/image_classification/vgg.py b/doc/paddle/user_guides/cv_case/image_classification/vgg.py new file mode 100644 index 0000000000000000000000000000000000000000..b3ca9e00feef5fadf17a6c8b1ddc83a3929ae39d --- /dev/null +++ b/doc/paddle/user_guides/cv_case/image_classification/vgg.py @@ -0,0 +1,45 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import paddle.fluid as fluid + + +def vgg_bn_drop(input): + def conv_block(ipt, num_filter, groups, dropouts): + return fluid.nets.img_conv_group( + input=ipt, + pool_size=2, + pool_stride=2, + conv_num_filter=[num_filter] * groups, + conv_filter_size=3, + conv_act='relu', + conv_with_batchnorm=True, + conv_batchnorm_drop_rate=dropouts, + pool_type='max') + + conv1 = conv_block(input, 64, 2, [0.3, 0]) + conv2 = conv_block(conv1, 128, 2, [0.4, 0]) + conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0]) + conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0]) + conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0]) + + drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5) + fc1 = fluid.layers.fc(input=drop, size=512, act=None) + bn = fluid.layers.batch_norm(input=fc1, act='relu') + drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5) + fc2 = fluid.layers.fc(input=drop2, size=512, act=None) + predict = fluid.layers.fc(input=fc2, size=10, act='softmax') + return predict diff --git a/doc/paddle/user_guides/cv_case/index_cn.rst b/doc/paddle/user_guides/cv_case/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..6d1b108f45407bd37111bd611e3a5c0663609c32 --- /dev/null +++ b/doc/paddle/user_guides/cv_case/index_cn.rst @@ -0,0 +1,14 @@ +################ +计算机视觉 +################ + +.. todo:: + +计算机视觉是一门关于如何运用照相机和计算机来获取我们所需的,被拍摄对象的数据与信息的学问。在这里PaddlePaddle为大家提供了两篇cv的教程供大家学习: + +.. toctree:: + :titlesonly: + + image_classification/README.cn.md + gan/README.cn.md + diff --git a/doc/paddle/user_guides/cv_case/index_en.rst b/doc/paddle/user_guides/cv_case/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..0523bfb6bd5455557ef3282bdfbcf63b1bccac7f --- /dev/null +++ b/doc/paddle/user_guides/cv_case/index_en.rst @@ -0,0 +1,11 @@ +############################ +Computer Vision +############################ + + +.. toctree:: + :titlesonly: + + image_classification/README.md + gan/README.md + diff --git a/doc/paddle/user_guides/index_cn.rst b/doc/paddle/user_guides/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..be7d31882574834be02c9f4a74cc8ee4a5a1ebd9 --- /dev/null +++ b/doc/paddle/user_guides/index_cn.rst @@ -0,0 +1,100 @@ +######## +典型案例 +######## + +.. todo:: + +如果您已经掌握了快速上手阶段的内容,期望可以针对实际问题建模、搭建自己网络,本模块提供了一些 Paddle 的具体典型案例供您参考: + +本章文档将指导您如何使用PaddlePaddle完成基础的深度学习任务 + +本章文档涉及大量了深度学习基础知识,也介绍了如何使用PaddlePaddle实现这些内容,请参阅以下说明了解如何使用: + +内容简介 +====================== + + + - `简单案例 <../user_guides/simple_case/index_cn.html>`_ :介绍了 Paddle 的基本案例 + + - `计算机视觉 <../user_guides/cv_case/index_cn.html>`_ :介绍使用 Paddle 解决计算机视觉领域的案例 + + - `自然语言处理 <../user_guides/nlp_case/index_cn.html>`_: 介绍使用 Paddle 实现自然语言处理方向的案例 + + - `推荐 <../user_guides/rec_case/index_cn.html>`_:介绍如何使用 Paddle 完成推荐领域任务的案例 + + - `工具组件 <../user_guides/tools/index_cn.html>`_:介绍在 Paddle 工具组件的使用案例 + +.. toctree:: + :hidden: + + simple_case/index_cn.rst + cv_case/index_cn.rst + nlp_case/index_cn.rst + rec_case/index_cn.rst + tools/index_cn.rst + + +我们把Jupyter、PaddlePaddle、以及各种被依赖的软件都打包进一个Docker image了。所以您不需要自己来安装各种软件,只需要安装Docker即可。对于各种Linux发行版,请参考 https://www.docker.com 。如果您使用 `Windows `_ 或者 `Mac `_,可以考虑 `给Docker更多内存和CPU资源 `_ 。 + +使用方法 +====================== + +本书默认使用CPU训练,若是要使用GPU训练,使用步骤会稍有变化,请参考下文“使用GPU训练” + +使用CPU训练 +>>>>>>>>>>>> + +只需要在命令行窗口里运行: + +.. code-block:: shell + + docker run -d -p 8888:8888 paddlepaddle/book + +即可从DockerHub.com下载和运行本书的Docker image。阅读和在线编辑本书请在浏览器里访问 http://localhost:8888 + +如果您访问DockerHub.com很慢,可以试试我们的另一个镜像docker.paddlepaddlehub.com: + +:: + + docker run -d -p 8888:8888 docker.paddlepaddlehub.com/book + + +使用GPU训练 +>>>>>>>>>>>>> + +为了保证GPU驱动能够在镜像里面正常运行,我们推荐使用 `nvidia-docker `_ 来运行镜像。请先安装nvidia-docker,之后请运行: + +:: + + nvidia-docker run -d -p 8888:8888 paddlepaddle/book:latest-gpu + + +或者使用国内的镜像请运行: + +:: + + nvidia-docker run -d -p 8888:8888 docker.paddlepaddlehub.com/book:latest-gpu + + +还需要将以下代码 + +.. code-block:: python + + use_cuda = False + + +改成: + +.. code-block:: python + + use_cuda = True + +贡献新章节 +============= + +您要是能贡献新的章节那就太好了!请发Pull Requests把您写的章节加入到 :code:`pending` 下面的一个子目录里。当这一章稳定下来,我们一起把您的目录挪到根目录。 + +为了写作、运行、调试,您需要安装Python 2.x和Go >1.5, 并可以用 `脚本程序 `_ 来生成新的Docker image。 + +**Please Note:** We also provide `English Readme `_ for PaddlePaddle book + diff --git a/doc/paddle/user_guides/index_en.rst b/doc/paddle/user_guides/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..6af142262b52098706a684ea499dfaba246550e6 --- /dev/null +++ b/doc/paddle/user_guides/index_en.rst @@ -0,0 +1,113 @@ +########### +User Guides +########### + +.. todo:: + +If you have got the hang of Beginner's Guide, and wish to model practical problems and build your original networks, this section will provide +you with some detailed operations: + + +This section collects several documents arranging from the simplest to the most challenging, which will guide you through the basic deep learning tasks in PaddlePaddle. + +The documentation in this chapter covers a lot of deep learning basics and how to implement them with PaddlePaddle. See the instructions below for how to use: + + +Overview +====================== + + + - `Simple Case <../user_guides/simple_case/index_en.html>`_ :introduces basic cases of Paddle + + - `Computer Vision <../user_guides/cv_case/index_en.html>`_ :introduces cases of using paddle to realize Computer Vision task + + - `Natural Language Processing <../user_guides/nlp_case/index_en.html>`_:introduces cases of using paddle to realize Natural Language Processing tasks + + - `Recommend <../user_guides/rec_case/index_en.html>`_:introduces cases of using paddle to realize Recommend tasks + + - `Models Zoo <../user_guides/models/index_en.html>`_:introduces the models zoo of Paddle + +.. toctree:: + :hidden: + + simple_case/index_en.rst + cv_case/index_en.rst + nlp_case/index_en.rst + rec_case/index_en.rst + models/index_cn.rst + + +We packaged Jupyter, PaddlePaddle, and various dependency softwares into a Docker image. It frees you from installing these softwares by yourself, and you only need to just install Docker. For various Linux versions, please refer to https://www.docker.com . If you use docker on `Windows `_ or `Mac `_ , consider `allocate more Memory and CPU resources to Docker `_ . + + +Instructions +====================== + + +This book assumes you are performing CPU training by default. If you want to use GPU training, the steps will vary slightly. Please refer to "GPU Training" below. + + + + + +CPU training +>>>>>>>>>>>> + +Just run these in shell: + +.. code-block:: shell + + docker run -d -p 8888:8888 paddlepaddle/book + +It downloads the Docker image for running books from DockerHub.com. +To read and edit this book on-line, please visit http://localhost:8888 in your browser. + +If the Internet connection to DockerHub.com is compromised, try our spare docker image named docker.paddlepaddlehub.com: + +:: + + docker run -d -p 8888:8888 docker.paddlepaddlehub.com/book + + +GPU training +>>>>>>>>>>>>> + +To ensure that the GPU driver works properly in the image, we recommend running the image with `nvidia docker `_ . Please install nvidia-docker first, then run: + + +:: + + nvidia-docker run -d -p 8888:8888 paddlepaddle/book:latest-gpu + + +Or use a image source in China to run: + +:: + + nvidia-docker run -d -p 8888:8888 docker.paddlepaddlehub.com/book:latest-gpu + + +modify the following codes + +.. code-block:: python + + use_cuda = False + + +into : + +.. code-block:: python + + use_cuda = True + + + +Contribute to Book +=================== + +We highly appreciate your original contributions of new chapters to Book! Just Pull Requests of your contributions to the sub-directory in :code:`pending` . When this chapter is endorsed, we'll gladly move it to the root directory. + + +For writing, running, debugging, you need to install `shell `_ to generate Docker image。 + +**Please Note:** We also provide `English Readme `_ for PaddlePaddle book diff --git a/doc/paddle/user_guides/models/index_cn.md b/doc/paddle/user_guides/models/index_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..ce8608465fbc5cf72e2a0bfa154830f6b3fdff19 --- /dev/null +++ b/doc/paddle/user_guides/models/index_cn.md @@ -0,0 +1,403 @@ +# PaddlePaddle Models + +## PaddleCV + +**图像分类** + +图像分类是根据图像的语义信息对不同类别图像进行区分,是计算机视觉中重要的基础问题,是物体检测、图像分割、物体跟踪、行为分析、人脸识别等其他高层视觉任务的基础,在许多领域都有着广泛的应用。如:安防领域的人脸识别和智能视频分析等,交通领域的交通场景识别,互联网领域基于内容的图像检索和相册自动归类,医学领域的图像识别等。 + +| **模型名称** | **模型简介** | **数据集** | **评估指标** **top-1/top-5 accuracy(CV2)** | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------- | ------------------------------------------------ | +| [AlexNet](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | 首次在CNN中成功的应用了ReLU、Dropout和LRN,并使用GPU进行运算加速 | ImageNet-2012验证集 | 56.72%/79.17% | +| [VGG](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | 在AlexNet的基础上使用3*3小卷积核,增加网络深度,具有很好的泛化能力 | ImageNet-2012验证集 | 72.56%/90.93% | +| [GoogleNet](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | 在不增加计算负载的前提下增加了网络的深度和宽度,性能更加优越 | ImageNet-2012验证集 | 70.70%/89.66% | +| [ResNet](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | Residual Network,引入了新的残差结构,解决了随着网络加深,准确率下降的问题 | ImageNet-2012验证集 | 80.93%/95.33% | +| [ResNet-D](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | 融合最新多种对ResNet改进策略,ResNet50_vd的top1准确率达到79.84% | ImageNet-2012验证集 | 79.84%/94.93% | +| [Inception-v4](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | 将Inception模块与Residual Connection进行结合,通过ResNet的结构极大地加速训练并获得性能的提升 | ImageNet-2012验证集 | 80.77%/95.26% | +| [MobileNet v1](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | 将传统的卷积结构改造成两层卷积结构的网络,在基本不影响准确率的前提下大大减少计算时间,更适合移动端和嵌入式视觉应用 | ImageNet-2012验证集 | 70.99%/89.68% | +| [MobileNet v2](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | MobileNet结构的微调,直接在thinner的bottleneck层上进行skip learning连接以及对bottleneck layer不进行ReLu非线性处理可取得更好的结果 | ImageNet-2012验证集 | 72.15%/90.65% | +| [SE_ResNeXt](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | 在ResNeXt 基础、上加入了SE(Sequeeze-and-Excitation) 模块,提高了识别准确率,在ILSVRC 2017 的分类项目中取得了第一名 | ImageNet-2012验证集 | 81.40%/95.48% | +| [ShuffleNet v2](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification) | ECCV2018,轻量级CNN网络,在速度和准确度之间做了很好地平衡。在同等复杂度下,比ShuffleNet和MobileNetv2更准确,更适合移动端以及无人车领域 | ImageNet-2012验证集 | 70.03%/89.17% | + +
+
+
+ +**目标检测** + +目标检测任务的目标是给定一张图像或是一个视频帧,让计算机找出其中所有目标的位置,并给出每个目标的具体类别。对于计算机而言,能够“看到”的是图像被编码之后的数字,但很难解图像或是视频帧中出现了人或是物体这样的高层语义概念,也就更加难以定位目标出现在图像中哪个区域。 + +| 模型名称 | 模型简介 | 数据集 | 评估指标 mAP | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ---------- | ------------------------------------------------------- | +| [SSD](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleDetection) | 很好的继承了MobileNet预测速度快,易于部署的特点,能够很好的在多种设备上完成图像目标检测任务 | VOC07 test | mAP = 73.32% | +| [Faster-RCNN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleDetection) | 创造性地采用卷积网络自行产生建议框,并且和目标检测网络共享卷积网络,建议框数目减少,质量提高 | MS-COCO | 基于ResNet 50 mAP(0.50:0.95) = 36.7% | +| [Mask-RCNN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleDetection) | 经典的两阶段框架,在Faster R-CNN模型基础上添加分割分支,得到掩码结果,实现了掩码和类别预测关系的解藕,可得到像素级别的检测结果。 | MS-COCO | 基于ResNet 50 Mask mAP(0.50:0.95) = 31.4% | +| [RetinaNet](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleDetection) | 经典的一阶段框架,由ResNet主干网络、FPN结构、和两个分别用于回归物体位置和预测物体类别的子网络组成。在训练过程中使用Focal Loss,解决了传统一阶段检测器存在前景背景类别不平衡的问题,进一步提高了一阶段检测器的精度。 | MS-COCO | 基于ResNet mAP (500.50:0.95) = 36% | +| [YOLOv3](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleDetection) | 速度和精度均衡的目标检测网络,相比于原作者darknet中的YOLO v3实现,PaddlePaddle实现参考了论文[Bag of Tricks for Image Classification with Convolutional Neural Networks](https://arxiv.org/pdf/1812.01187.pdf) 增加了mixup,label_smooth等处理,精度(mAP(0.5:0.95))相比于原作者提高了4.7个绝对百分点,在此基础上加入synchronize batch normalization, 最终精度相比原作者提高5.9个绝对百分点。 | MS-COCO | 基于DarkNet mAP(0.50:0.95)= 38.9% | +| [PyramidBox](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/face_detection) | **PyramidBox** **模型是百度自主研发的人脸检测模型**,利用上下文信息解决困难人脸的检测问题,网络表达能力高,鲁棒性强。于18年3月份在WIDER Face数据集上取得第一名 | WIDER FACE | mAP (Easy/Medium/Hard set)= 96.0%/ 94.8%/ 88.8% | + +
+
+
+ +**图像分割** + +图像语义分割顾名思义是将图像像素按照表达的语义含义的不同进行分组/分割,图像语义是指对图像内容的理解,例如,能够描绘出什么物体在哪里做了什么事情等,分割是指对图片中的每个像素点进行标注,标注属于哪一类别。近年来用在无人车驾驶技术中分割街景来避让行人和车辆、医疗影像分析中辅助诊断等。 + +| 模型名称 | 模型简介 | 数据集 | 评估指标 | +| ------------------------------------------------------------ | ------------------------------------------------------------ | --------- | --------------- | +| [ICNet](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/icnet) | 主要用于图像实时语义分割,能够兼顾速度和准确性,易于线上部署 | Cityscape | Mean IoU=67.0% | +| [DeepLab V3+](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/deeplabv3%2B) | 通过encoder-decoder进行多尺度信息的融合,同时保留了原来的空洞卷积和ASSP层, 其骨干网络使用了Xception模型,提高了语义分割的健壮性和运行速率 | Cityscape | Mean IoU=78.81% | + + +
+
+
+ +**关键点检测** + +人体骨骼关键点检测,Pose Estimation,主要检测人体的一些关键点,如关节,五官等,通过关键点描述人体骨骼信息。人体骨骼关键点检测对于描述人体姿态,预测人体行为至关重要。是诸多计算机视觉任务的基础,例如动作分类,异常行为检测,以及自动驾驶等等。 + +| 模型名称 | 模型简介 | 数据集 | 评估指标 | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------ | ------------ | +| [Simple Baselines](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/human_pose_estimation) | coco2018关键点检测项目亚军方案,网络结构非常简单,效果达到state of the art | COCO val2017 | AP = 72.7% | + +
+
+
+ +**图像生成** + +图像生成是指根据输入向量,生成目标图像。这里的输入向量可以是随机的噪声或用户指定的条件向量。具体的应用场景有:手写体生成、人脸合成、风格迁移、图像修复等。 + +| 模型名称 | 模型简介 | 数据集 | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ---------- | +| [CGAN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleGAN) | 条件生成对抗网络,一种带条件约束的GAN,使用额外信息对模型增加条件,可以指导数据生成过程 | Mnist | +| [DCGAN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleGAN) | 深度卷积生成对抗网络,将GAN和卷积网络结合起来,以解决GAN训练不稳定的问题 | Mnist | +| [Pix2Pix](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleGAN) | 图像翻译,通过成对图片将某一类图片转换成另外一类图片,可用于风格迁移 | Cityscapes | +| [CycleGAN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleGAN) | 图像翻译,可以通过非成对的图片将某一类图片转换成另外一类图片,可用于风格迁移 | Cityscapes | +| [StarGAN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleGAN) | 多领域属性迁移,引入辅助分类帮助单个判别器判断多个属性,可用于人脸属性转换 | Celeba | +| [AttGAN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleGAN) | 利用分类损失和重构损失来保证改变特定的属性,可用于人脸特定属性转换 | Celeba | +| [STGAN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleGAN) | 人脸特定属性转换,只输入有变化的标签,引入GRU结构,更好的选择变化的属性 | Celeba | + +
+
+
+ +**场景文字识别** + +场景文字识别是在图像背景复杂、分辨率低下、字体多样、分布随意等情况下,将图像信息转化为文字序列的过程,可认为是一种特别的翻译过程:将图像输入翻译为自然语言输出。 + +| 模型名称 | 模型简介 | 数据集 | 评估指标 | +| ------------------------------------------------------------ | ------------------------------------------------------------ | -------------------------- | -------------- | +| [CRNN-CTC](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/ocr_recognition) | 使用CTC model识别图片中单行英文字符,用于端到端的文本行图片识别方法 | 单行不定长的英文字符串图片 | 错误率= 22.3% | +| [OCR Attention](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/ocr_recognition) | 使用attention 识别图片中单行英文字符,用于端到端的自然场景文本识别, | 单行不定长的英文字符串图片 | 错误率 = 15.8% | + +
+
+
+ + +**度量学习** + +度量学习也称作距离度量学习、相似度学习,通过学习对象之间的距离,度量学习能够用于分析对象时间的关联、比较关系,在实际问题中应用较为广泛,可应用于辅助分类、聚类问题,也广泛用于图像检索、人脸识别等领域。 + +| 模型名称 | 模型简介 | 数据集 | 评估指标 Recall@Rank-1(使用arcmargin训练) | +| ------------------------------------------------------------ | --------------------------------------------------------- | ------------------------------ | --------------------------------------------- | +| [ResNet50未微调](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/metric_learning) | 使用arcmargin loss训练的特征模型 | Stanford Online Product(SOP) | 78.11% | +| [ResNet50使用triplet微调](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/metric_learning) | 在arcmargin loss基础上,使用triplet loss微调的特征模型 | Stanford Online Product(SOP) | 79.21% | +| [ResNet50使用quadruplet微调](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/metric_learning) | 在arcmargin loss基础上,使用quadruplet loss微调的特征模型 | Stanford Online Product(SOP) | 79.59% | +| [ResNet50使用eml微调](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/metric_learning) | 在arcmargin loss基础上,使用eml loss微调的特征模型 | Stanford Online Product(SOP) | 80.11% | +| [ResNet50使用npairs微调](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/metric_learning) | 在arcmargin loss基础上,使用npairs loss微调的特征模型 | Stanford Online Product(SOP) | 79.81% | + +
+
+
+ + +**视频分类和动作定位** + +视频分类是视频理解任务的基础,包含语音数据、包含运动信息等的视频对象,因此理解视频需要获得更多的上下文信息,不仅要理解每帧图像是什么、包含什么,还需要结合不同帧,知道上下文的关联信息。视频分类方法主要包含基于卷积神经网络、基于循环神经网络、或将这两者结合的方法。 + +| 模型名称 | 模型简介 | 数据集 | 评估指标 | +| ------------------------------------------------------------ | ------------------------------------------------------------ | -------------------------- | ----------- | +| [TSN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleVideo) | ECCV'16,基于2D-CNN的经典网络结构,首次引入序列信息到视频分类,证明序列信息有效性 | Kinetics-400 | Top-1 = 67% | +| [Non-Local](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleVideo) | 视频非局部关联建模模型,引入类似self-attention机制,效果好,计算量大 | Kinetics-400 | Top-1 = 62% | +| [stNet](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleVideo) | ActivityNet2018 最佳single模型,AAAI19,融合局部与全局的时序模型 | Kinetics-400 | Top-1 = 69% | +| [TSM](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleVideo) | TSN改进版,简单高效,计算简单,当前的SOTA | Kinetics-400 | Top-1 = 70% | +| [Attention LSTM](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleVideo) | ActivityNet17最佳single model,更稳定的时序模型 | Youtube-8M | GAP = 86% | +| [Attention Cluster](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleVideo) | CVPR18,引入不同模态的不同注意力聚合模型,更好捕获特征间的组合关系 | Youtube-8M | GAP = 87% | +| [NeXtVlad](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleVideo) | Youtube-8M 2018最佳single model,弱化时序关系,适合建模短视频 | Youtube-8M | GAP = 87% | +| [C-TCN](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/PaddleVideo) | 2018年ActivityNet夺冠方案,提供了处理视频动作定位问题的解决方案 | ActivityNet1.3提供的数据集 | Top1=31% | + +
+
+
+ +## PaddleNLP + +**基础模型(词法分析&语言模型)** + +**词法分析** + +[LAC (**Lexical Analysis of Chinese**](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/lexical_analysis))百度自主研发中文特色模型词法分析任务,**输入是一个字符串,而输出是句子中的词边界和词性、实体类别。 + +| **模型** | **Precision** | **Recall** | **F1-score** | +| ---------------- | ------------- | ---------- | ------------ | +| Lexical Analysis | 88.0% | 88.7% | 88.4% | +| BERT finetuned | 90.2% | 90.4% | 90.3% | +| ERNIE finetuned | 92.0% | 92.0% | 92.0% | + +
+
+
+ + +**语言模型** + +[基于LSTM的语言模型任务](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/language_model),给定一个输入词序列(中文分词、英文tokenize),计算其PPL(语言模型困惑度,用户表示句子的流利程度)。 + +| **large config** | **train** | **valid** | **test** | +| ---------------- | --------- | --------- | -------- | +| paddle | 37.221 | 82.358 | 78.137 | +| tensorflow | 38.342 | 82.311 | 78.121 | + +
+
+
+ +**文本理解(文本分类&阅读理解)** + +**情感分析** + +[Senta(Sentiment Classification](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/sentiment_classification))百度AI开放平台中情感倾向分析模型、百度自主研发的中文情感分析特色模型。 + +| **模型** | **dev** | **test** | **模型(****finetune****)** | **dev** | **test** | +| ------------- | ------- | -------- | ---------------------------- | ------- | -------- | +| BOW | 89.8% | 90.0% | BOW | 91.3% | 90.6% | +| CNN | 90.6% | 89.9% | CNN | 92.4% | 91.8% | +| LSTM | 90.0% | 91.0% | LSTM | 93.3% | 92.2% | +| GRU | 90.0% | 89.8% | GRU | 93.3% | 93.2% | +| BI-LSTM | 88.5% | 88.3% | BI-LSTM | 92.8% | 91.4% | +| ERNIE | 95.1% | 95.4% | ERNIE | 95.4% | 95.5% | +| ERNIE+BI-LSTM | 95.3% | 95.2% | ERNIE+BI-LSTM | 95.7% | 95.6% | + + +
+ +**对话情绪识别** + +[EmoTect(Emotion Detection](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/emotion_detection))专注于识别智能对话场景中用户的情绪识别,并开源基于百度海量数据训练好的预训练模型。 + +| **模型** | **闲聊** | **客服** | **微博** | +| -------- | -------- | -------- | -------- | +| BOW | 90.2% | 87.6% | 74.2% | +| LSTM | 91.4% | 90.1% | 73.8% | +| Bi-LSTM | 91.2% | 89.9% | 73.6% | +| CNN | 90.8% | 90.7% | 76.3% | +| TextCNN | 91.1% | 91.0% | 76.8% | +| BERT | 93.6% | 92.3% | 78.6% | +| ERNIE | 94.4% | 94.0% | 80.6% | + +
+ +**阅读理解** + +[MRC(Machine Reading Comprehension)](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/reading_comprehension)机器阅读理解(MRC)是自然语言处理(NLP)中的关键任务之一,开源的DuReader升级了经典的阅读理解BiDAF模型,去掉了char级别的embedding,在预测层中使用了[pointer network](https://arxiv.org/abs/1506.03134),并且参考了[R-NET](https://www.microsoft.com/en-us/research/wp-content/uploads/2017/05/r-net.pdf)中的一些网络结构,效果上有了大幅提升 + +| **Model** | **Dev ROUGE-L** | **Test ROUGE-L** | +| -------------------------------------------------------- | --------------- | ---------------- | +| BiDAF (原始[论文](https://arxiv.org/abs/1711.05073)基线) | 39.29 | 45.90 | +| 本基线系统 | 47.68 | 54.66 | + +
+ +**语义模型(语义表示&语义匹配)** + +**ERNIE** + +[ERNIE (Embeddings from Language Models)](https://github.com/PaddlePaddle/LARK/tree/develop/ERNIE)百度自研的语义表示模型,通过建模海量数据中的词、实体及实体关系,学习真实世界的语义知识。相较于 BERT 学习原始语言信号,ERNIE直接对先验语义知识单元进行建模,增强了模型语义表示能力。 + +
+ +**BERT** + +[BERT(Bidirectional Encoder Representation from Transformers) ](https://github.com/PaddlePaddle/LARK/tree/develop/BERT)是一个迁移能力很强的通用语义表示模型, 以 Transformer 为网络基本组件,以双向 Masked Language Model和 Next Sentence Prediction 为训练目标,通过预训练得到通用语义表示,再结合简单的输出层,应用到下游的 NLP 任务,在多个任务上取得了 SOTA 的结果。 + +
+ +**ELMo** + +[ELMo(Embeddings from Language Models) ](https://github.com/PaddlePaddle/LARK/tree/develop/ELMo)是重要的通用语义表示模型之一,以双向 LSTM 为网路基本组件,以 Language Model 为训练目标,通过预训练得到通用的语义表示,将通用的语义表示作为 Feature 迁移到下游 NLP 任务中,会显著提升下游任务的模型性能。 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

数据集

XNLI

LCQMC

MSRA-NER
+ (SIGHAN 2006)

ChnSentiCorp

nlpcc-dbqa

评估
指标

acc

acc

f1-score

acc

mrr

f1-score

dev

test

dev

test

dev

test

dev

test

dev

test

dev

test

BERT

78.1

77.2

88.8

87

94.0

92.6

94.6

94.3

94.7

94.6

80.7

80.8

ERNIE

79.9(+1.8)

78.4(+1.2)

89.7(+0.9)

87.4(+0.4)

95.0(+1.0)

93.8(+1.2)

95.2(+0.6)

95.4(+1.1)

95.0(+0.3)

95.1(+0.5)

82.3(+1.6)

82.7(+1.9)

+ + +
+ +**DAM** + +**深度注意力机制模型(Deep Attention Matching Network)**,是开放领域多轮对话匹配模型。根据多轮对话历史和候选回复内容,排序出最合适的回复。、 + +| | Ubuntu Corpus | Douban Conversation Corpus | | | | | | | | | +| ---- | ------------- | -------------------------- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- | +| | R2@1 | R10@1 | R10@2 | R10@5 | MAP | MRR | P@1 | R10@1 | R10@2 | R10@5 | +| DAM | 93.8% | 76.7% | 87.4% | 96.9% | 55.0% | 60.1% | 42.7% | 25.4% | 41.0% | 75.7% | + + +
+ +**SimNet(SimilarityNet**)**百度自主研发的短文本语义匹配语义匹配框架**,一个计算短文本相似度的框架,可以根据用户输入的两个文本,计算出相似度得分。 + +| **模型** | **百度知道** | **ECOM** | **QQSIM** | **UNICOM** | **LCQMC** | +| ------------ | ------------ | -------- | --------- | ---------- | --------- | +| | AUC | AUC | AUC | 正逆序比 | Accuracy | +| BOW_Pairwise | 0.6767 | 0.7329 | 0.7650 | 1.5630 | 0.7532 | + + +
+ +**文本生成(机器翻译&对话生成)** + +**机器翻译** + +[MT(machine translation](https://github.com/PaddlePaddle/models/tree/develop/PaddleNLP/neural_machine_translation/transformer))机器翻译是利用计算机将一种自然语言(源语言)转换为另一种自然语言(目标语言)的过程,输入为源语言句子,输出为相应的目标语言的句子。 + +| **测试集** | **newstest2014** | **newstest2015** | **newstest2016** | +| ---------- | ---------------- | ---------------- | ---------------- | +| Base | 26.35 | 29.07 | 33.30 | +| Big | 27.07 | 30.09 | 34.38 | + +
+ +**对话自动评估** + +**对话自动评估(Auto Dialogue Evaluation)**,主要用于评估开放领域对话系统的回复质量,能够帮助企业或个人快速评估对话系统的回复质量,减少人工评估成本。 + +利用少量标注数据微调后,自动评估打分和人工打分spearman相关系数,如下表。 + +| **/** | **seq2seq_naive** | **seq2seq_att** | **keywords** | **human** | +| ----- | ----------------- | --------------- | ------------ | --------- | +| cor | 0.474 | 0.477 | 0.443 | 0.378 | + +
+ +**对话通用理解** + +**DGU(DialogueGeneralUnderstanding)**,对话通用理解针对数据集开发了相关的模型训练过程,支持分类,多标签分类,序列标注等任务,用户可针对自己的数据集,进行相关的模型定制 + +| **ask_name** | **udc** | **udc** | **udc** | **atis_slot** | **dstc2** | **atis_intent** | **swda** | **mrda** | +| ------------ | ------- | ------- | ------- | ------------- | ---------- | --------------- | -------- | -------- | +| 对话任务 | 匹配 | 匹配 | 匹配 | 槽位解析 | DST | 意图识别 | DA | DA | +| 任务类型 | 分类 | 分类 | 分类 | 序列标注 | 多标签分类 | 分类 | 分类 | 分类 | +| 任务名称 | udc | udc | udc | atis_slot | dstc2 | atis_intent | swda | mrda | +| 评估指标 | R1@10 | R2@10 | R5@10 | F1 | JOINT ACC | ACC | ACC | ACC | +| SOTA | 76.70% | 87.40% | 96.90% | 96.89% | 74.50% | 98.32% | 81.30% | 91.70% | +| DGU | 82.02% | 90.43% | 97.75% | 97.10% | 89.57% | 97.65% | 80.19% | 91.43% | + +
+ +**知识驱动对话** + +[知识驱动对话的新对话任务](https://github.com/baidu/knowledge-driven-dialogue/tree/master),其中机器基于构建的知识图与人交谈。它旨在测试机器进行类似人类对话的能力。 + +| **baseline system** | **F1/BLEU1/BLEU2** | **DISTINCT1/DISTINCT2** | +| ------------------- | ------------------ | ----------------------- | +| retrieval-based | 31.72/0.291/0.156 | 0.118/0.373 | +| generation-based | 32.65/0.300/0.168 | 0.062/0.128 | + +
+
+
+ +## PaddleRec + +个性化推荐,在当前的互联网服务中正在发挥越来越大的作用,目前大部分电子商务系统、社交网络,广告推荐,搜索引擎,都不同程度的使用了各种形式的个性化推荐技术,帮助用户快速找到他们想要的信息。 + +| 模型名称 | 模型简介 | +| ------------------------------------------------------------ | ------------------------------------------------------------ | +| [TagSpace](https://github.com/PaddlePaddle/models/tree/develop/PaddleRec) | 应用于工业级的标签推荐,具体应用场景有feed新闻标签推荐等 | +| [GRU4Rec](https://github.com/PaddlePaddle/models/tree/develop/PaddleRec) | 首次将RNN(GRU)运用于session-based推荐,相比传统的KNN和矩阵分解,效果有明显的提升 | +| [SequenceSemanticRetrieval](https://github.com/PaddlePaddle/models/tree/develop/PaddleRec) | 使用参考论文中的思想,使用多种时间粒度进行用户行为预测 | +| [DeepCTR](https://github.com/PaddlePaddle/models/tree/develop/PaddleRec) | 只实现了DeepFM论文中介绍的模型的DNN部分,DeepFM会在其他例子中给出 | +| [Multiview-Simnet](https://github.com/PaddlePaddle/models/tree/develop/PaddleRec) | 基于多元视图,将用户和项目的多个功能视图合并为一个统一模型 | +| [Word2Vec](https://github.com/PaddlePaddle/models/tree/develop/PaddleRec) | skip-gram模式的word2vector模型 | +| [GraphNeuralNetwork](https://github.com/PaddlePaddle/models/tree/develop/PaddleRec) | 基于会话的图神经网络模型的推荐系统,可以更好的挖掘item中丰富的转换特性以及生成准确的潜在的用户向量表示 | +| [DeepInterestNetwork](https://github.com/PaddlePaddle/models/tree/develop/PaddleRec) | DIN通过一个兴趣激活模块(Activation Unit),用预估目标Candidate ADs的信息去激活用户的历史点击商品,以此提取用户与当前预估目标相关的兴趣。 | + +
+
+
+ + +## 其他模型 + +| 模型名称 | 模型简介 | +| ------------------------------------------------------------ | ------------------------------------------------------------ | +| [DeepASR](https://github.com/PaddlePaddle/models/blob/develop/PaddleSpeech/DeepASR/README_cn.md) | 利用Fluid框架完成语音识别中声学模型的配置和训练,并集成 Kaldi 的解码器 | +| [DQN](https://github.com/PaddlePaddle/models/blob/develop/PaddleRL/DeepQNetwork/README_cn.md) | value based强化学习算法,第一个成功地将深度学习和强化学习结合起来的模型 | +| [DoubleDQN](https://github.com/PaddlePaddle/models/blob/develop/PaddleRL/DeepQNetwork/README_cn.md) | 将Double Q的想法应用在DQN上,解决过优化问题 | +| [DuelingDQN](https://github.com/PaddlePaddle/models/blob/develop/PaddleRL/DeepQNetwork/README_cn.md) | 改进了DQN模型,提高了模型的性能 | diff --git a/doc/paddle/user_guides/models/index_cn.rst b/doc/paddle/user_guides/models/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a9a176511a90305f67a96c694168e8e8a13bf27d --- /dev/null +++ b/doc/paddle/user_guides/models/index_cn.rst @@ -0,0 +1,199 @@ +`模型库 `__ +============ + +图像分类 +-------- + +图像分类是根据图像的语义信息对不同类别图像进行区分,是计算机视觉中重要的基础问题,是物体检测、图像分割、物体跟踪、行为分析、人脸识别等其他高层视觉任务的基础,在许多领域都有着广泛的应用。如:安防领域的人脸识别和智能视频分析等,交通领域的交通场景识别,互联网领域基于内容的图像检索和相册自动归类,医学领域的图像识别等。 + +在深度学习时代,图像分类的准确率大幅度提升,在图像分类任务中,我们向大家介绍了如何在经典的数据集ImageNet上,训练常用的模型,包括AlexNet、VGG、GoogLeNet、ResNet、Inception-v4、MobileNet、DPN(Dual +Path +Network)、SE-ResNeXt模型,也开源了\ `训练的模型 `__\ 方便用户下载使用。同时提供了能够将Caffe模型转换为PaddlePaddle +Fluid模型配置和参数文件的工具。 + +- `AlexNet `__ +- `VGG `__ +- `GoogleNet `__ +- `Residual Network `__ +- `Inception-v4 `__ +- `MobileNet `__ +- `Dual Path + Network `__ +- `SE-ResNeXt `__ +- `Caffe模型转换为Paddle + Fluid配置和模型文件工具 `__ + +目标检测 +-------- + +目标检测任务的目标是给定一张图像或是一个视频帧,让计算机找出其中所有目标的位置,并给出每个目标的具体类别。对于人类来说,目标检测是一个非常简单的任务。然而,计算机能够“看到”的是图像被编码之后的数字,很难解图像或是视频帧中出现了人或是物体这样的高层语义概念,也就更加难以定位目标出现在图像中哪个区域。与此同时,由于目标会出现在图像或是视频帧中的任何位置,目标的形态千变万化,图像或是视频帧的背景千差万别,诸多因素都使得目标检测对计算机来说是一个具有挑战性的问题。 + +在目标检测任务中,我们介绍了如何基于\ `PASCAL +VOC `__\ 、\ `MS +COCO `__\ 数据训练通用物体检测模型,当前介绍了SSD算法,SSD全称Single Shot MultiBox Detector,是目标检测领域较新且效果较好的检测算法之一,具有检测速度快且检测精度高的特点。 + +开放环境中的检测人脸,尤其是小的、模糊的和部分遮挡的人脸也是一个具有挑战的任务。我们也介绍了如何基于 `WIDER FACE `_ 数据训练百度自研的人脸检测PyramidBox模型,该算法于2018年3月份在WIDER FACE的多项评测中均获得 `第一名 `_。 + +- `Single Shot MultiBox + Detector `__ +- `Face Detector: PyramidBox `_ + +图像语义分割 +------------ + +图像语意分割顾名思义是将图像像素按照表达的语义含义的不同进行分组/分割,图像语义是指对图像内容的理解,例如,能够描绘出什么物体在哪里做了什么事情等,分割是指对图片中的每个像素点进行标注,标注属于哪一类别。近年来用在无人车驾驶技术中分割街景来避让行人和车辆、医疗影像分析中辅助诊断等。 + +在图像语义分割任务中,我们介绍如何基于图像级联网络(Image Cascade +Network,ICNet)进行语义分割,相比其他分割算法,ICNet兼顾了准确率和速度。 + +- `ICNet `__ + +图像生成 +----------- + +图像生成是指根据输入向量,生成目标图像。这里的输入向量可以是随机的噪声或用户指定的条件向量。具体的应用场景有:手写体生成、人脸合成、风格迁移、图像修复等。当前的图像生成任务主要是借助生成对抗网络(GAN)来实现。 +生成对抗网络(GAN)由两种子网络组成:生成器和识别器。生成器的输入是随机噪声或条件向量,输出是目标图像。识别器是一个分类器,输入是一张图像,输出是该图像是否是真实的图像。在训练过程中,生成器和识别器通过不断的相互博弈提升自己的能力。 + +在图像生成任务中,我们介绍了如何使用DCGAN和ConditioanlGAN来进行手写数字的生成,另外还介绍了用于风格迁移的CycleGAN. + +- `DCGAN & ConditionalGAN `__ +- `CycleGAN `__ + +场景文字识别 +------------ + +许多场景图像中包含着丰富的文本信息,对理解图像信息有着重要作用,能够极大地帮助人们认知和理解场景图像的内容。场景文字识别是在图像背景复杂、分辨率低下、字体多样、分布随意等情况下,将图像信息转化为文字序列的过程,可认为是一种特别的翻译过程:将图像输入翻译为自然语言输出。场景图像文字识别技术的发展也促进了一些新型应用的产生,如通过自动识别路牌中的文字帮助街景应用获取更加准确的地址信息等。 + +在场景文字识别任务中,我们介绍如何将基于CNN的图像特征提取和基于RNN的序列翻译技术结合,免除人工定义特征,避免字符分割,使用自动学习到的图像特征,完成字符识别。当前,介绍了CRNN-CTC模型和基于注意力机制的序列到序列模型。 + +- `CRNN-CTC模型 `__ +- `Attention模型 `__ + + +度量学习 +------- + + +度量学习也称作距离度量学习、相似度学习,通过学习对象之间的距离,度量学习能够用于分析对象时间的关联、比较关系,在实际问题中应用较为广泛,可应用于辅助分类、聚类问题,也广泛用于图像检索、人脸识别等领域。以往,针对不同的任务,需要选择合适的特征并手动构建距离函数,而度量学习可根据不同的任务来自主学习出针对特定任务的度量距离函数。度量学习和深度学习的结合,在人脸识别/验证、行人再识别(human Re-ID)、图像检索等领域均取得较好的性能,在这个任务中我们主要介绍了基于Fluid的深度度量学习模型,包含了三元组、四元组等损失函数。 + +- `Metric Learning `__ + + +视频分类 +------- + +视频分类是视频理解任务的基础,与图像分类不同的是,分类的对象不再是静止的图像,而是一个由多帧图像构成的、包含语音数据、包含运动信息等的视频对象,因此理解视频需要获得更多的上下文信息,不仅要理解每帧图像是什么、包含什么,还需要结合不同帧,知道上下文的关联信息。视频分类方法主要包含基于卷积神经网络、基于循环神经网络、或将这两者结合的方法。该任务中我们介绍基于Fluid的视频分类模型,目前包含Temporal Segment Network(TSN)模型,后续会持续增加更多模型。 + + +- `TSN `__ + + + +语音识别 +-------- + +自动语音识别(Automatic Speech Recognition, +ASR)是将人类声音中的词汇内容转录成计算机可输入的文字的技术。语音识别的相关研究经历了漫长的探索过程,在HMM/GMM模型之后其发展一直较为缓慢,随着深度学习的兴起,其迎来了春天。在多种语言识别任务中,将深度神经网络(DNN)作为声学模型,取得了比GMM更好的性能,使得 +ASR +成为深度学习应用最为成功的领域之一。而由于识别准确率的不断提高,有越来越多的语言技术产品得以落地,例如语言输入法、以智能音箱为代表的智能家居设备等 +—— 基于语言的交互方式正在深刻的改变人类的生活。 + +与 `DeepSpeech `__ +中深度学习模型端到端直接预测字词的分布不同,本实例更接近传统的语言识别流程,以音素为建模单元,关注语言识别中声学模型的训练,利用\ `kaldi `__\ 进行音频数据的特征提取和标签对齐,并集成 +kaldi 的解码器完成解码。 + +- `DeepASR `__ + +机器翻译 +-------- + +机器翻译(Machine +Translation)将一种自然语言(源语言)转换成一种自然语言(目标语言),是自然语言处理中非常基础和重要的研究方向。在全球化的浪潮中,机器翻译在促进跨语言文明的交流中所起的重要作用是不言而喻的。其发展经历了统计机器翻译和基于神经网络的神经机器翻译(Nueural +Machine Translation, NMT)等阶段。在 NMT +成熟后,机器翻译才真正得以大规模应用。而早阶段的 NMT +主要是基于循环神经网络 RNN +的,其训练过程中当前时间步依赖于前一个时间步的计算,时间步之间难以并行化以提高训练速度。因此,非 +RNN 结构的 NMT 得以应运而生,例如基于卷积神经网络 CNN +的结构和基于自注意力机制(Self-Attention)的结构。 + +本实例所实现的 Transformer +就是一个基于自注意力机制的机器翻译模型,其中不再有RNN或CNN结构,而是完全利用 +Attention 学习语言中的上下文依赖。相较于RNN/CNN, +这种结构在单层内计算复杂度更低、易于并行化、对长程依赖更易建模,最终在多种语言之间取得了最好的翻译效果。 + +- `Transformer `__ + +强化学习 +-------- + +强化学习是近年来一个愈发重要的机器学习方向,特别是与深度学习相结合而形成的深度强化学习(Deep +Reinforcement Learning, +DRL),取得了很多令人惊异的成就。人们所熟知的战胜人类顶级围棋职业选手的 +AlphaGo 就是 DRL +应用的一个典型例子,除游戏领域外,其它的应用还包括机器人、自然语言处理等。 + +深度强化学习的开山之作是在Atari视频游戏中的成功应用, +其可直接接受视频帧这种高维输入并根据图像内容端到端地预测下一步的动作,所用到的模型被称为深度Q网络(Deep +Q-Network, DQN)。本实例就是利用PaddlePaddle Fluid这个灵活的框架,实现了 +DQN 及其变体,并测试了它们在 Atari 游戏中的表现。 + +- `DeepQNetwork `__ + +中文词法分析 +------------ + +中文分词(Word Segmentation)是将连续的自然语言文本,切分出具有语义合理性和完整性的词汇序列的过程。因为在汉语中,词是承担语义的最基本单位,切词是文本分类、情感分析、信息检索等众多自然语言处理任务的基础。 词性标注(Part-of-speech Tagging)是为自然语言文本中的每一个词汇赋予一个词性的过程,这里的词性包括名词、动词、形容词、副词等等。 命名实体识别(Named Entity Recognition,NER)又称作“专名识别”,是指识别自然语言文本中具有特定意义的实体,主要包括人名、地名、机构名、专有名词等。 我们将这三个任务统一成一个联合任务,称为词法分析任务,基于深度神经网络,利用海量标注语料进行训练,提供了一个端到端的解决方案。 + +我们把这个联合的中文词法分析解决方案命名为LAC。LAC既可以认为是Lexical Analysis of Chinese的首字母缩写,也可以认为是LAC Analyzes Chinese的递归缩写。 + +- `LAC `__ + +情感倾向分析 +------------ + +情感倾向分析针对带有主观描述的中文文本,可自动判断该文本的情感极性类别并给出相应的置信度。情感类型分为积极、消极、 中性。情感倾向分析能够帮助企业理解用户消费习惯、分析热点话题和危机舆情监控,为企业提供有力的决策支持。本次我们开放 AI开放平台中情感倾向分析采用的\ `模型 `__\, 提供给用户使用。 + +- `Senta `__ + +语义匹配 +-------- + +在自然语言处理很多场景中,需要度量两个文本在语义上的相似度,这类任务通常被称为语义匹配。例如在搜索中根据查询与候选文档的相似度对搜索结果进行排序,文本去重中文本与文本相似度的计算,自动问答中候选答案与问题的匹配等。 + +本例所开放的DAM (Deep Attention Matching Network)为百度自然语言处理部发表于ACL-2018的工作,用于检索式聊天机器人多轮对话中应答的选择。DAM受Transformer的启发,其网络结构完全基于注意力(attention)机制,利用栈式的self-attention结构分别学习不同粒度下应答和语境的语义表示,然后利用cross-attention获取应答与语境之间的相关性,在两个大规模多轮对话数据集上的表现均好于其它模型。 + +- `Deep Attention Matching Network `__ + +AnyQ +---- + +`AnyQ `__\ (ANswer Your Questions) +开源项目主要包含面向FAQ集合的问答系统框架、文本语义匹配工具SimNet。 +问答系统框架采用了配置化、插件化的设计,各功能均通过插件形式加入,当前共开放了20+种插件。开发者可以使用AnyQ系统快速构建和定制适用于特定业务场景的FAQ问答系统,并加速迭代和升级。 + +SimNet是百度自然语言处理部于2013年自主研发的语义匹配框架,该框架在百度各产品上广泛应用,主要包括BOW、CNN、RNN、MM-DNN等核心网络结构形式,同时基于该框架也集成了学术界主流的语义匹配模型,如MatchPyramid、MV-LSTM、K-NRM等模型。使用SimNet构建出的模型可以便捷的加入AnyQ系统中,增强AnyQ系统的语义匹配能力。 + +- `SimNet in PaddlePaddle Fluid `_ + +机器阅读理解 +---- + +机器阅读理解(MRC)是自然语言处理(NLP)中的核心任务之一,最终目标是让机器像人类一样阅读文本,提炼文本信息并回答相关问题。深度学习近年来在NLP中得到广泛使用,也使得机器阅读理解能力在近年有了大幅提高,但是目前研究的机器阅读理解都采用人工构造的数据集,以及回答一些相对简单的问题,和人类处理的数据还有明显差距,因此亟需大规模真实训练数据推动MRC的进一步发展。 + +百度阅读理解数据集是由百度自然语言处理部开源的一个真实世界数据集,所有的问题、原文都来源于实际数据(百度搜索引擎数据和百度知道问答社区),答案是由人类回答的。每个问题都对应多个答案,数据集包含200k问题、1000k原文和420k答案,是目前最大的中文MRC数据集。百度同时开源了对应的阅读理解模型,称为DuReader,采用当前通用的网络分层结构,通过双向attention机制捕捉问题和原文之间的交互关系,生成query-aware的原文表示,最终基于query-aware的原文表示通过point network预测答案范围。 + +- `DuReader in PaddlePaddle Fluid `_ + + +个性化推荐 +------- + +推荐系统在当前的互联网服务中正在发挥越来越大的作用,目前大部分电子商务系统、社交网络,广告推荐,搜索引擎,都不同程度的使用了各种形式的个性化推荐技术,帮助用户快速找到他们想要的信息。 + +在工业可用的推荐系统中,推荐策略一般会被划分为多个模块串联执行。以新闻推荐系统为例,存在多个可以使用深度学习技术的环节,例如新闻的自动化标注,个性化新闻召回,个性化匹配与排序等。PaddlePaddle对推荐算法的训练提供了完整的支持,并提供了多种模型配置供用户选择。 + +- `TagSpace `_ +- `GRU4Rec `_ +- `SequenceSemanticRetrieval `_ +- `DeepCTR `_ +- `Multiview-Simnet `_ diff --git a/doc/paddle/user_guides/models/index_en.rst b/doc/paddle/user_guides/models/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..5e5e88eef38b02a5db1008aea4d4ad06ce292b07 --- /dev/null +++ b/doc/paddle/user_guides/models/index_en.rst @@ -0,0 +1,168 @@ +`Fluid Model Library `__ +============ + +Image classification +-------- + +Image classification is based on the semantic information of images to distinguish different types of images. It is an important basic problem in computer vision. It is the basis of other high-level visual tasks such as object detection, image segmentation, object tracking, behavior analysis, face recognition, etc. The field has a wide range of applications. Such as: face recognition and intelligent video analysis in the security field, traffic scene recognition in the traffic field, content-based image retrieval and automatic classification of albums in the Internet field, image recognition in the medical field. + +In the era of deep learning, the accuracy of image classification has been greatly improved. In the image classification task, we introduced how to train commonly used models in the classic dataset ImageNet, including AlexNet, VGG, GoogLeNet, ResNet, Inception- V4, MobileNet, DPN (Dual +Path Network), SE-ResNeXt model. We also provide open source \ `trained model `__\ to make it convenient for users to download and use. It also provides tools to convert Caffe models into PaddlePaddle Fluid model configurations and parameter files. + +- `AlexNet `__ +- `VGG `__ +- `GoogleNet `__ +- `Residual Network `__ +- `Inception-v4 `__ +- `MobileNet `__ +- `Dual Path Network `__ +- `SE-ResNeXt `__ +- `Convert Caffe model to Paddle Fluid configuration and model file tools `__ + +Object Detection +----------------- + +The goal of the object detection task is to give an image or a video frame, let the computer find the locations of all the objects, and give the specific category of each object. For humans, target detection is a very simple task. However, the computer can only "see" the number after the image is encoded. It is difficult to solve the high-level semantic concept such as human or object in the image or video frame, and it is more difficult to locate the area where the target appears in the image. At the same time, because the target will appear anywhere in the image or video frame, the shape of the target is ever-changing, and the background of the image or video frame varies widely. Many factors make the object detection a challenging problem for the computer. + +In the object detection task, we introduced how to train general object detection model based on dataset `PASCAL VOC `__\ , \ `MS COCO `__\ . Currently we introduced SSD algorithm, which is the acronym for Single Shot MultiBox Detector. As one of the newer and better detection algorithms in the object detection field, it features fast detection speed and detection High precision. + +Detecting human faces in an open environment, especially small, obscured and partially occluded faces is also a challenging task. We also introduced how to train Baidu's self-developed face detection PyramidBox model based on `WIDER FACE `_ data. The algorithm won the `first place `_ in multiple evaluations of WIDER FACE in March 2018 . + +- `Single Shot MultiBox Detector `__ +- `Face Detector: PyramidBox `_ + +Image semantic segmentation +---------------------------- + +As the name suggests, Image Semantic Segmentation is to group/segment pixels according to their different semantic meanings. Image semantics refer to the understanding of image content. For example, it can describe what objects are doing what at what location, etc. Segmentation means each pixel in the image is labeled with its category. In recent years, it has been recently used by the driverless vehicles to segment street scenes to avoid pedestrians and vehicles, and by auxiliary diagnosis in medical image analysis. + +In the image semantic segmentation task, we introduce how to perform semantic segmentation based on Image Cascade Network (ICNet). Compared with other segmentation algorithms, ICNet takes into account the accuracy and speed. + +- `ICNet `__ + +Image Synthesis +----------------- + +Image Synthesis refers to generating a target image based on an input vector. The input vector here can be random noise or a user-specified condition vector. Specific application scenarios include: handwriting generation, face synthesis, style migration, image restoration, and the like. Current image generation tasks are primarily achieved by Generative Adversarial Networks (GAN). The GAN consists of two subnetworks: a generator and a discriminator. The input to the generator is a random noise or condition vector and the output is the target image. The discriminator is a classifier, the input is an image, and the output is whether the image is a real image. During the training process, the generator and the discriminator enhance their abilities through constant mutual adversarial process. + +In the image synthesis task, we introduced how to use DCGAN and ConditioanlGAN to generate handwritten numbers, and also introduced CycleGAN for style migration. + +- `DCGAN & ConditionalGAN `__ +- `CycleGAN `__ + +Scene Text Recognition +----------------------- + +Rich textual information is usually contained in scene images, which plays an important role in comprehending the image information and greatly helps people recognize and understand the content of images in real scene. Text recognition in real scene images is a process of converting image information into a sequence of characters in the case of complex image background, low resolution, diverse fonts, random distribution, etc. It can be considered as a special translation process: translation of image input into natural language output. The development of scene image text recognition technology has also promoted the emergence of some new applications, such as automatically identifying texts in street signs to help street scene applications obtain more accurate address information. + +In the scene text recognition task, we introduce how to combine CNN-based image feature extraction and RNN-based sequence translation technology, eliminate artificial definition features, avoid character segmentation, and use automatically learned image features to complete character recognition. Currently, the CRNN-CTC model and the sequence-to-sequence model based on the attention mechanism are introduced. + +- `CRNN-CTC model `__ +- `Attention Model `__ + + +Metric learning +---------------- + + +Metric learning is also called distance metric learning or similarity learning. Through the distance between learning objects, metric learning can be used to analyze the association and comparison of objects. It can be applied to practical problems like auxiliary classification, aggregation and also widely used in areas such as image retrieval and face recognition. In the past, for different tasks, it was necessary to select appropriate features and manually construct a distance function, but the metric learning can initially learn the metric distance function for a specific task from the main task according to different tasks. The combination of metric learning and deep learning has achieved good performance in the fields of face recognition/verification, human re-ID, image retrieval, etc. In this task, we mainly introduce the depth-based metric learning based on Fluid. The model contains loss functions such as triples and quaternions. + +- `Metric Learning `__ + + +Video classification +--------------------- + +Video classification is the basis of video comprehension tasks. Unlike image classification, classified objects are no longer still images, but a video object composed of multi-frame images containing speech data and motion information, so to understand video needs to get more context information. To be specific, it needs not only to understand what each frame image is, what it contains, but also to combine different frames to know the context related information. The video classification method mainly includes a method based on convolutional neural networks, recurrent neural networks, or a combination of the two. In this task, we introduce the Fluid-based video classification model, which currently includes the Temporal Segment Network (TSN) model, and we will continuously add more models. + + +- `TSN `__ + + + +Speech Recognition +-------------------- + +Automatic Speech Recognition (ASR) is a technique for transcribing vocabulary content in human voice into characters that can be input by a computer. The research on speech recognition has undergone a long term of exploration. After the HMM/GMM model, its development has been relatively slow. With the rise of deep learning, it has come to its spring. In the multi-language recognition task, the deep neural network (DNN) is used as an acoustic model and achieves better performance than the GMM, making ASR one of the most successful fields of deep learning applications. Due to the continuous improvement of recognition accuracy, more and more language technology products have been being implemented, such as language input methods, smart home devices represented by smart speakers, etc. Language-based interaction is profoundly changing our life. + +Different from the end-to-end direct prediction for word distribution of the deep learning model `DeepSpeech `__ , this example is closer to the traditional language recognition process. With phoneme as the modeling unit, it focuses on the training of acoustic models in speech recognition, use `kaldi `__ for feature extraction and label alignment of audio data, and integrate kaldi's decoder to complete decoding. + +- `DeepASR `__ + +Machine Translation +--------------------- + +Machine Translation transforms a natural language (source language) into another natural language (target language), which is a very basic and important research direction in natural language processing. In the wave of globalization, the important role played by machine translation in promoting cross-language civilization communication is self-evident. Its development has gone through stages such as statistical machine translation and neural-network-based Neuro Machine Translation (NMT). After NMT matured, machine translation was really applied on a large scale. The early stage of NMT is mainly based on the recurrent neural network RNN. The current time step in the training process depends on the calculation of the previous time step, so it is difficult to parallelize the time steps to improve the training speed. Therefore, NMTs of non-RNN structures have emerged, such as structures based on convolutional neural networks CNN and structures based on Self-Attention. + +The Transformer implemented in this example is a machine translation model based on the self-attention mechanism, in which there is no more RNN or CNN structure, but fully utilizes Attention to learn the context dependency. Compared with RNN/CNN, in a single layer, this structure has lower computational complexity, easier parallelization, and easier modeling for long-range dependencies, and finally achieves the best translation effect among multiple languages. + + +- `Transformer `__ + +Reinforcement learning +------------------------- + +Reinforcement learning is an increasingly important machine learning direction in recent years, and especially Deep Reinforcement Learning (DRL), which combines deep learning and reinforcement learning, has achieved many amazing achievements. The well-known AlphaGo, which defeats the top-level chess players, is a typical example of DRL applications. In addition to the game field, other applications include robots, natural language processing and so on. + +The pioneering work of deep reinforcement learning is a successful application in Atari video games, which can directly accept high-dimensional input of video frames and predict the next action according to the image content end-to-end. The model used is called depth Q Network (Deep Q-Network, DQN). This example uses PaddlePaddle Fluid, our flexible framework, to implement DQN and its variants and test their performance in Atari games. + +- `DeepQNetwork `__ + +Chinese lexical analysis +--------------------------- + +Word Segmentation is the process of segmenting continuous natural language text into lexical sequences with semantic rationality and integrity. Because in Chinese, word is the most basic unit of semantics, and word segmentation is the basis of many natural language processing tasks such as text classification, sentiment analysis, and information retrieval. Part-of-speech Tagging is a process of assigning a category to each vocabulary in a natural language text. The part of speech category here includes nouns, verbs, adjectives, adverbs, and so on. Named Entity Recognition (NER), also known as "entity name identification", refers to the identification of entities with specific meanings in natural language text, including person names, place names, institution names, proper nouns, and so on. We unify these three tasks into a joint task called lexical analysis task. Based on deep neural network, we use massively labeled corpus for training, and provide an end-to-end solution. + +We named this joint Chinese lexical analysis solution LAC. LAC can be considered as an acronym for Lexical Analysis of Chinese, or as a recursive abbreviation for LAC Analyzes Chinese. + +- `LAC `__ + +Sentiment analysis +------------------- + +The sentiment analysis is for Chinese text with subjective description, which can automatically judge the emotional polarity category of the text and give corresponding confidence. The types of emotions are divided into positive, negative and neutral. Sentiment analysis can help companies understand user spending habits, analyze hot topics and crisis public opinion monitoring, and provide strong decision support for enterprises. This time we publicize the AI open platform to analyze the sentiment orientation using the `model `__, which is available to users. + +- `Senta `__ + +Semantic matching +------------------ + +In many scenarios of natural language processing, it is necessary to measure the semantic similarity of two texts. Such tasks are often called semantic matching. For example, the search results are sorted according to the similarity between the query and the candidate document; the text deduplication requires the calculation of the similarity between the texts, and the matching of the candidate answers and the questions in the question answering system. + +The DAM (Deep Attention Matching Network) introduced in this example is the work of Baidu Natural Language Processing Department published in ACL-2018, which is used for the selection of responses in multi-round dialogue of retrieval chat robots. Inspired by Transformer, DAM is based entirely on the attention mechanism. It uses the stack-type self-attention structure to learn the semantic representations of responses and contexts at different granularities, and then uses cross-attention to obtain relativity between responses and contexts. The performance on the two large-scale multi-round dialogue datasets is better than other models. + +- `Deep Attention Matching Network `__ + +AnyQ +---- + +`AnyQ `__\ (ANswer Your Questions) +The open source project mainly includes a question and answer system framework for the FAQ collection and a text semantic matching tool SimNet. The Q&A system framework adopts a setting-up manner and plug-in design. Each function is added through a plug-in form. Currently, 20+ plug-ins are open. Developers can use the AnyQ system to quickly build and customize FAQ Q&A systems for specific business scenarios and accelerate iterations and upgrades. + +SimNet is a semantic matching framework independently developed by Baidu's Natural Language Processing Department in 2013. The framework is widely used in Baidu's products, including core network structures such as BOW, CNN, RNN, MM-DNN. It is also integrated with mainstream semantic matching model in academic fields based on the framework, such as MatchPyramid, MV-LSTM, K-NRM. Models built by SimNet can be easily added to the AnyQ system to enhance the semantic matching capability of the AnyQ system. + + +- `SimNet in PaddlePaddle Fluid `__ + +Machine reading comprehension +--------------------------------- + +Machine Reading Comprehension (MRC) is one of the core tasks in Natural Language Processing (NLP). The ultimate goal is to let machines read texts like humans, extract text information and answer related questions. Deep learning has been widely used in NLP in recent years, and the machine reading comprehension ability has been greatly improved. However, the machine reading comprehension of the current research uses artificially constructed data sets, and answers some relatively simple questions. There is still a clear gap to the data processed by humans, so there is an urgent need for large-scale real training data to promote the further development of MRC. + +Baidu reading comprehension dataset is an open-source real-world dataset publicized by Baidu Natural Language Processing Department. All the questions and original texts are derived from actual data (Baidu search engine data and Baidu know Q&A community), and the answer is given by humans. Each question corresponds to multiple answers. The dataset contains 200k questions, 1000k original text and 420k answers. It is currently the largest Chinese MRC dataset. Baidu also publicized the corresponding open-source reading comprehension model, called DuReader. DuReader adopts the current common network hierarchical structure, and captures the interaction between the problems and the original texts through the double attention mechanism to generate the original representation of the query-aware. Finally, based on the original text of query-aware, the answer scope is predicted by point network. + +- `DuReader in PaddlePaddle Fluid `__ + + +Personalized recommendation +--------------------------------- + +The recommendation system is playing an increasingly important role in the current Internet service. At present, most e-commerce systems, social networks, advertisement recommendation, and search engines all use various forms of personalized recommendation technology to help users quickly find the information they want. + +In an industrially adoptable recommendation system, the recommendation strategy is generally divided into multiple modules in series. Take the news recommendation system as an example. There are multiple procedures that can use deep learning techniques, such as automated annotation of news, personalized news recall, personalized matching and sorting. PaddlePaddle provides complete support for the training of recommendation algorithms and provides a variety of model configurations for users to choose from. + +- `TagSpace `_ +- `GRU4Rec `_ +- `SequenceSemanticRetrieval `_ +- `DeepCTR `_ +- `Multiview-Simnet `_ diff --git a/doc/paddle/user_guides/nlp_case/index_cn.rst b/doc/paddle/user_guides/nlp_case/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..dc001b9fc4734c4c9df59c7bcce7c7f7deffa782 --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/index_cn.rst @@ -0,0 +1,15 @@ +################ +自然语言处理 +################ + +.. todo:: + +自然语言处理(Natural Language Processing)是人工智能和语言学领域的分支学科。此领域探讨如何处理及运用自然语言,特别是如何编程计算机以成功处理大量的自然语言数据。在这里PaddlePaddle为大家提供了三篇NLP领域的学习教程: + +.. toctree:: + :titlesonly: + + understand_sentiment/README.cn.md + label_semantic_roles/README.cn.md + machine_translation/README.cn.md + diff --git a/doc/paddle/user_guides/nlp_case/index_en.rst b/doc/paddle/user_guides/nlp_case/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..2805a5b9db11b0114aefa1a1e6f56653fb6e9100 --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/index_en.rst @@ -0,0 +1,12 @@ +############################ +Natural Language Processing +############################ + + +.. toctree:: + :titlesonly: + + understand_sentiment/README.md + label_semantic_roles/README.md + machine_translation/README.md + diff --git a/doc/paddle/user_guides/nlp_case/label_semantic_roles/.gitignore b/doc/paddle/user_guides/nlp_case/label_semantic_roles/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..29b5622a53a1b0847e9f53febf1cc50dcf4f044a --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/label_semantic_roles/.gitignore @@ -0,0 +1,12 @@ +data/train.list +data/test.* +data/conll05st-release.tar.gz +data/conll05st-release +data/predicate_dict +data/label_dict +data/word_dict +data/emb +data/feature +output +predict.res +train.log diff --git a/doc/paddle/user_guides/nlp_case/label_semantic_roles/.run_ce.sh b/doc/paddle/user_guides/nlp_case/label_semantic_roles/.run_ce.sh new file mode 100644 index 0000000000000000000000000000000000000000..4c5ae210ff5485c4c9266de73614ee0f4c4d6d6e --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/label_semantic_roles/.run_ce.sh @@ -0,0 +1,4 @@ +#!/bin/bash +#This file is only used for continuous evaluation. +python train.py --enable_ce | python _ce.py + diff --git a/doc/paddle/user_guides/nlp_case/label_semantic_roles/README.cn.md b/doc/paddle/user_guides/nlp_case/label_semantic_roles/README.cn.md new file mode 100644 index 0000000000000000000000000000000000000000..9fe04d2ff80d9e934097935864408c63bd657006 --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/label_semantic_roles/README.cn.md @@ -0,0 +1,572 @@ +# 语义角色标注 + +本教程源代码目录在[book/label_semantic_roles](https://github.com/PaddlePaddle/book/tree/develop/07.label_semantic_roles),初次使用请您参考[Book文档使用说明](https://github.com/PaddlePaddle/book/blob/develop/README.cn.md#运行这本书)。 + +### 说明 + +1. 本教程可支持在 CPU/GPU 环境下运行 + +2. Docker镜像支持的CUDA/cuDNN版本 + + 如果使用了Docker运行Book,请注意:这里所提供的默认镜像的GPU环境为 CUDA 8/cuDNN 5,对于NVIDIA Tesla V100等要求CUDA 9的 GPU,使用该镜像可能会运行失败; + +3. 文档和脚本中代码的一致性问题 + + 请注意:为使本文更加易读易用,我们拆分、调整了[train.py](https://github.com/PaddlePaddle/book/tree/develop/07.label_semantic_roles/train.py)的代码并放入本文。本文中代码与train.py的运行结果一致,可直接运行train.py进行验证。 + +## 背景介绍 + +自然语言分析技术大致分为三个层面:词法分析、句法分析和语义分析。语义角色标注是实现浅层语义分析的一种方式。在一个句子中,谓词是对主语的陈述或说明,指出“做什么”、“是什么”或“怎么样,代表了一个事件的核心,跟谓词搭配的名词称为论元。语义角色是指论元在动词所指事件中担任的角色。主要有:施事者(Agent)、受事者(Patient)、客体(Theme)、经验者(Experiencer)、受益者(Beneficiary)、工具(Instrument)、处所(Location)、目标(Goal)和来源(Source)等。 + +请看下面的例子,“遇到” 是谓词(Predicate,通常简写为“Pred”),“小明”是施事者(Agent),“小红”是受事者(Patient),“昨天” 是事件发生的时间(Time),“公园”是事情发生的地点(Location)。 + +

+
+

+ + +语义角色标注(Semantic Role Labeling,SRL)以句子的谓词为中心,不对句子所包含的语义信息进行深入分析,只分析句子中各成分与谓词之间的关系,即句子的谓词(Predicate)- 论元(Argument)结构,并用语义角色来描述这些结构关系,是许多自然语言理解任务(如信息抽取,篇章分析,深度问答等)的一个重要中间步骤。在研究中一般都假定谓词是给定的,所要做的就是找出给定谓词的各个论元和它们的语义角色。 + +传统的SRL系统大多建立在句法分析基础之上,通常包括5个流程: + +1. 构建一棵句法分析树,例如,图1是对上面例子进行依存句法分析得到的一棵句法树。 +2. 从句法树上识别出给定谓词的候选论元。 +3. 候选论元剪除;一个句子中的候选论元可能很多,候选论元剪除就是从大量的候选项中剪除那些最不可能成为论元的候选项。 +4. 论元识别:这个过程是从上一步剪除之后的候选中判断哪些是真正的论元,通常当做一个二分类问题来解决。 +5. 对第4步的结果,通过多分类得到论元的语义角色标签。可以看到,句法分析是基础,并且后续步骤常常会构造的一些人工特征,这些特征往往也来自句法分析。 + +
+
+图1. 依存句法分析句法树示例 +
+ +然而,完全句法分析需要确定句子所包含的全部句法信息,并确定句子各成分之间的关系,是一个非常困难的任务,目前技术下的句法分析准确率并不高,句法分析的细微错误都会导致SRL的错误。为了降低问题的复杂度,同时获得一定的句法结构信息,“浅层句法分析”的思想应运而生。浅层句法分析也称为部分句法分析(partial parsing)或语块划分(chunking)。和完全句法分析得到一颗完整的句法树不同,浅层句法分析只需要识别句子中某些结构相对简单的独立成分,例如:动词短语,这些被识别出来的结构称为语块。为了回避 “无法获得准确率较高的句法树” 所带来的困难,一些研究\[[1](#参考文献)\]也提出了基于语块(chunk)的SRL方法。基于语块的SRL方法将SRL作为一个序列标注问题来解决。序列标注任务一般都会采用BIO表示方式来定义序列标注的标签集,我们先来介绍这种表示方法。在BIO表示法中,B代表语块的开始,I代表语块的中间,O代表语块结束。通过B、I、O 三种标记将不同的语块赋予不同的标签,例如:对于一个由角色A拓展得到的语块组,将它所包含的第一个语块赋予标签B-A,将它所包含的其它语块赋予标签I-A,不属于任何论元的语块赋予标签O。 + +我们继续以上面的这句话为例,图1展示了BIO表示方法。 + +
+
+图2. BIO标注方法示例 +
+ +从上面的例子可以看到,根据序列标注结果可以直接得到论元的语义角色标注结果,是一个相对简单的过程。这种简单性体现在:(1)依赖浅层句法分析,降低了句法分析的要求和难度;(2)没有了候选论元剪除这一步骤;(3)论元的识别和论元标注是同时实现的。这种一体化处理论元识别和论元标注的方法,简化了流程,降低了错误累积的风险,往往能够取得更好的结果。 + +与基于语块的SRL方法类似,在本教程中我们也将SRL看作一个序列标注问题,不同的是,我们只依赖输入文本序列,不依赖任何额外的语法解析结果或是复杂的人造特征,利用深度神经网络构建一个端到端学习的SRL系统。我们以[CoNLL-2004 and CoNLL-2005 Shared Tasks](http://www.cs.upc.edu/~srlconll/)任务中SRL任务的公开数据集为例,实践下面的任务:给定一句话和这句话里的一个谓词,通过序列标注的方式,从句子中找到谓词对应的论元,同时标注它们的语义角色。 + +## 模型概览 + +循环神经网络(Recurrent Neural Network)是一种对序列建模的重要模型,在自然语言处理任务中有着广泛地应用。不同于前馈神经网络(Feed-forward Neural Network),RNN能够处理输入之间前后关联的问题。LSTM是RNN的一种重要变种,常用来学习长序列中蕴含的长程依赖关系,我们在[情感分析](https://github.com/PaddlePaddle/book/tree/develop/06.understand_sentiment)一篇中已经介绍过,这一篇中我们依然利用LSTM来解决SRL问题。 + +### 栈式循环神经网络(Stacked Recurrent Neural Network) + +深层网络有助于形成层次化特征,网络上层在下层已经学习到的初级特征基础上,形成更复杂的高级特征。尽管LSTM沿时间轴展开后等价于一个非常“深”的前馈网络,但由于LSTM各个时间步参数共享,$t-1$时刻状态到$t$时刻的映射,始终只经过了一次非线性映射,也就是说单层LSTM对状态转移的建模是 “浅” 的。堆叠多个LSTM单元,令前一个LSTM$t$时刻的输出,成为下一个LSTM单元$t$时刻的输入,帮助我们构建起一个深层网络,我们把它称为第一个版本的栈式循环神经网络。深层网络提高了模型拟合复杂模式的能力,能够更好地建模跨不同时间步的模式\[[2](#参考文献)\]。 + +然而,训练一个深层LSTM网络并非易事。纵向堆叠多个LSTM单元可能遇到梯度在纵向深度上传播受阻的问题。通常,堆叠4层LSTM单元可以正常训练,当层数达到4~8层时,会出现性能衰减,这时必须考虑一些新的结构以保证梯度纵向顺畅传播,这是训练深层LSTM网络必须解决的问题。我们可以借鉴LSTM解决 “梯度消失梯度爆炸” 问题的智慧之一:在记忆单元(Memory Cell)这条信息传播的路线上没有非线性映射,当梯度反向传播时既不会衰减、也不会爆炸。因此,深层LSTM模型也可以在纵向上添加一条保证梯度顺畅传播的路径。 + +一个LSTM单元完成的运算可以被分为三部分:(1)输入到隐层的映射(input-to-hidden) :每个时间步输入信息$x$会首先经过一个矩阵映射,再作为遗忘门,输入门,记忆单元,输出门的输入,注意,这一次映射没有引入非线性激活;(2)隐层到隐层的映射(hidden-to-hidden):这一步是LSTM计算的主体,包括遗忘门,输入门,记忆单元更新,输出门的计算;(3)隐层到输出的映射(hidden-to-output):通常是简单的对隐层向量进行激活。我们在第一个版本的栈式网络的基础上,加入一条新的路径:除上一层LSTM输出之外,将前层LSTM的输入到隐层的映射作为的一个新的输入,同时加入一个线性映射去学习一个新的变换。 + +图3是最终得到的栈式循环神经网络结构示意图。 + +

+
+图3. 基于LSTM的栈式循环神经网络结构示意图 +

+ +### 双向循环神经网络(Bidirectional Recurrent Neural Network) + +在LSTM中,$t$时刻的隐藏层向量编码了到$t$时刻为止所有输入的信息,但$t$时刻的LSTM可以看到历史,却无法看到未来。在绝大多数自然语言处理任务中,我们几乎总是能拿到整个句子。这种情况下,如果能够像获取历史信息一样,得到未来的信息,对序列学习任务会有很大的帮助。 + +为了克服这一缺陷,我们可以设计一种双向循环网络单元,它的思想简单且直接:对上一节的栈式循环神经网络进行一个小小的修改,堆叠多个LSTM单元,让每一层LSTM单元分别以:正向、反向、正向 …… 的顺序学习上一层的输出序列。于是,从第2层开始,$t$时刻我们的LSTM单元便总是可以看到历史和未来的信息。图4是基于LSTM的双向循环神经网络结构示意图。 + +

+
+图4. 基于LSTM的双向循环神经网络结构示意图 +

+ +需要说明的是,这种双向RNN结构和Bengio等人在机器翻译任务中使用的双向RNN结构\[[3](#参考文献), [4](#参考文献)\] 并不相同,我们会在后续[机器翻译](https://github.com/PaddlePaddle/book/blob/develop/08.machine_translation/README.cn.md)任务中,介绍另一种双向循环神经网络。 + +### 条件随机场 (Conditional Random Field) + +使用神经网络模型解决问题的思路通常是:前层网络学习输入的特征表示,网络的最后一层在特征基础上完成最终的任务。在SRL任务中,深层LSTM网络学习输入的特征表示,条件随机场(Conditional Random Filed, CRF)在特征的基础上完成序列标注,处于整个网络的末端。 + +CRF是一种概率化结构模型,可以看作是一个概率无向图模型,结点表示随机变量,边表示随机变量之间的概率依赖关系。简单来讲,CRF学习条件概率$P(X|Y)$,其中 $X = (x_1, x_2, ... , x_n)$ 是输入序列,$Y = (y_1, y_2, ... , y_n)$ 是标记序列;解码过程是给定 $X$序列求解令$P(Y|X)$最大的$Y$序列,即$Y^* = \mbox{arg max}_{Y} P(Y | X)$。 + +序列标注任务只需要考虑输入和输出都是一个线性序列,并且由于我们只是将输入序列作为条件,不做任何条件独立假设,因此输入序列的元素之间并不存在图结构。综上,在序列标注任务中使用的是如图5所示的定义在链式图上的CRF,称之为线性链条件随机场(Linear Chain Conditional Random Field)。 + +

+
+图5. 序列标注任务中使用的线性链条件随机场 +

+ +根据线性链条件随机场上的因子分解定理\[[5](#参考文献)\],在给定观测序列$X$时,一个特定标记序列$Y$的概率可以定义为: + +

+
+

+ +其中$Z(X)$是归一化因子,$t_j$ 是定义在边上的特征函数,依赖于当前和前一个位置,称为转移特征,表示对于输入序列$X$及其标注序列在 $i$及$i - 1$位置上标记的转移概率。$s_k$是定义在结点上的特征函数,称为状态特征,依赖于当前位置,表示对于观察序列$X$及其$i$位置的标记概率。$\lambda_j$ 和 $\mu_k$ 分别是转移特征函数和状态特征函数对应的权值。实际上,$t$和$s$可以用相同的数学形式表示,再对转移特征和状态特在各个位置$i$求和有:$f_{k}(Y, X) = \sum_{i=1}^{n}f_k({y_{i - 1}, y_i, X, i})$,把$f$统称为特征函数,于是$P(Y|X)$可表示为: + +

+
+

+ + +$\omega$是特征函数对应的权值,是CRF模型要学习的参数。训练时,对于给定的输入序列和对应的标记序列集合$D = \left[(X_1, Y_1), (X_2 , Y_2) , ... , (X_N, Y_N)\right]$ ,通过正则化的极大似然估计,求解如下优化目标: + +

+
+

+ +这个优化目标可以通过反向传播算法和整个神经网络一起求解。解码时,对于给定的输入序列$X$,通过解码算法(通常有:维特比算法、Beam Search)求令出条件概率$\bar{P}(Y|X)$最大的输出序列 $\bar{Y}$。 + +### 深度双向LSTM(DB-LSTM)SRL模型 + +在SRL任务中,输入是 “谓词” 和 “一句话”,目标是从这句话中找到谓词的论元,并标注论元的语义角色。如果一个句子含有$n$个谓词,这个句子会被处理$n$次。一个最为直接的模型是下面这样: + +1. 构造输入; + - 输入1是谓词,输入2是句子 + - 将输入1扩展成和输入2一样长的序列,用one-hot方式表示; +2. one-hot方式的谓词序列和句子序列通过词表,转换为实向量表示的词向量序列; +3. 将步骤2中的2个词向量序列作为双向LSTM的输入,学习输入序列的特征表示; +4. CRF以步骤3中模型学习到的特征为输入,以标记序列为监督信号,实现序列标注; + +大家可以尝试上面这种方法。这里,我们提出一些改进,引入两个简单但对提高系统性能非常有效的特征: + +- 谓词上下文:上面的方法中,只用到了谓词的词向量表达谓词相关的所有信息,这种方法始终是非常弱的,特别是如果谓词在句子中出现多次,有可能引起一定的歧义。从经验出发,谓词前后若干个词的一个小片段,能够提供更丰富的信息,帮助消解歧义。于是,我们把这样的经验也添加到模型中,为每个谓词同时抽取一个“谓词上下文” 片段,也就是从这个谓词前后各取$n$个词构成的一个窗口片段; +- 谓词上下文区域标记:为句子中的每一个词引入一个0-1二值变量,表示它们是否在“谓词上下文”片段中; + +修改后的模型如下(图6是一个深度为4的模型结构示意图): + +1. 构造输入 + - 输入1是句子序列,输入2是谓词序列,输入3是谓词上下文,从句子中抽取这个谓词前后各$n$个词,构成谓词上下文,用one-hot方式表示,输入4是谓词上下文区域标记,标记了句子中每一个词是否在谓词上下文中; + - 将输入2~3均扩展为和输入1一样长的序列; +2. 输入1~4均通过词表取词向量转换为实向量表示的词向量序列;其中输入1、3共享同一个词表,输入2和4各自独有词表; +3. 第2步的4个词向量序列作为双向LSTM模型的输入;LSTM模型学习输入序列的特征表示,得到新的特性表示序列; +4. CRF以第3步中LSTM学习到的特征为输入,以标记序列为监督信号,完成序列标注; + +
+
+图6. SRL任务上的深层双向LSTM模型 +
+ + +## 数据介绍 + +在此教程中,我们选用[CoNLL 2005](http://www.cs.upc.edu/~srlconll/)SRL任务开放出的数据集作为示例。需要特别说明的是,CoNLL 2005 SRL任务的训练数集和开发集在比赛之后并非免费进行公开,目前,能够获取到的只有测试集,包括Wall Street Journal的23节和Brown语料集中的3节。在本教程中,我们以测试集中的WSJ数据为训练集来讲解模型。但是,由于测试集中样本的数量远远不够,如果希望训练一个可用的神经网络SRL系统,请考虑付费获取全量数据。 + +原始数据中同时包括了词性标注、命名实体识别、语法解析树等多种信息。本教程中,我们使用test.wsj文件夹中的数据进行训练和测试,并只会用到words文件夹(文本序列)和props文件夹(标注结果)下的数据。本教程使用的数据目录如下: + +```text +conll05st-release/ +└── test.wsj + ├── props # 标注结果 + └── words # 输入文本序列 +``` + +标注信息源自Penn TreeBank\[[7](#参考文献)\]和PropBank\[[8](#参考文献)\]的标注结果。PropBank标注结果的标签和我们在文章一开始示例中使用的标注结果标签不同,但原理是相同的,关于标注结果标签含义的说明,请参考论文\[[9](#参考文献)\]。 + +原始数据需要进行数据预处理才能被PaddlePaddle处理,预处理包括下面几个步骤: + +1. 将文本序列和标记序列其合并到一条记录中; +2. 一个句子如果含有$n$个谓词,这个句子会被处理$n$次,变成$n$条独立的训练样本,每个样本一个不同的谓词; +3. 抽取谓词上下文和构造谓词上下文区域标记; +4. 构造以BIO法表示的标记; +5. 依据词典获取词对应的整数索引。 + +预处理完成之后一条训练样本数据包含9个域,分别是:句子序列、谓词、谓词上下文(占 5 列)、谓词上下区域标志、标注序列。下表是一条训练样本的示例。 + +| 句子序列 | 谓词 | 谓词上下文(窗口 = 5) | 谓词上下文区域标记 | 标注序列 | +|---|---|---|---|---| +| A | set | n't been set . × | 0 | B-A1 | +| record | set | n't been set . × | 0 | I-A1 | +| date | set | n't been set . × | 0 | I-A1 | +| has | set | n't been set . × | 0 | O | +| n't | set | n't been set . × | 1 | B-AM-NEG | +| been | set | n't been set . × | 1 | O | +| set | set | n't been set . × | 1 | B-V | +| . | set | n't been set . × | 1 | O | + + +除数据之外,我们同时提供了以下资源: + +| 文件名称 | 说明 | +|---|---| +| word_dict | 输入句子的词典,共计44068个词 | +| label_dict | 标记的词典,共计106个标记 | +| predicate_dict | 谓词的词典,共计3162个词 | +| emb | 一个训练好的词表,32维 | + +我们在英文维基百科上训练语言模型得到了一份词向量用来初始化SRL模型。在SRL模型训练过程中,词向量不再被更新。关于语言模型和词向量可以参考[词向量](https://github.com/PaddlePaddle/book/blob/develop/04.word2vec/README.cn.md) 这篇教程。我们训练语言模型的语料共有995,000,000个token,词典大小控制为4900,000词。CoNLL 2005训练语料中有5%的词不在这4900,000个词中,我们将它们全部看作未登录词,用``表示。 + +获取词典,打印词典大小: + +```python +from __future__ import print_function + +import math, os +import numpy as np +import paddle +import paddle.dataset.conll05 as conll05 +import paddle.fluid as fluid +import six +import time + +with_gpu = os.getenv('WITH_GPU', '0') != '0' + +word_dict, verb_dict, label_dict = conll05.get_dict() +word_dict_len = len(word_dict) +label_dict_len = len(label_dict) +pred_dict_len = len(verb_dict) + +print('word_dict_len: ', word_dict_len) +print('label_dict_len: ', label_dict_len) +print('pred_dict_len: ', pred_dict_len) +``` + +## 模型配置说明 + +- 定义输入数据维度及模型超参数。 + +```python +mark_dict_len = 2 # 谓上下文区域标志的维度,是一个0-1 2值特征,因此维度为2 +word_dim = 32 # 词向量维度 +mark_dim = 5 # 谓词上下文区域通过词表被映射为一个实向量,这个是相邻的维度 +hidden_dim = 512 # LSTM隐层向量的维度 : 512 / 4 +depth = 8 # 栈式LSTM的深度 +mix_hidden_lr = 1e-3 # linear_chain_crf层的基础学习率 + +IS_SPARSE = True # 是否以稀疏方式更新embedding +PASS_NUM = 10 # 训练轮数 +BATCH_SIZE = 10 # batch size 大小 + +embedding_name = 'emb' +``` + +这里需要特别说明的是,参数 `hidden_dim = 512` 实际指定了LSTM隐层向量的维度为128,关于这一点请参考PaddlePaddle官方文档中[dynamic_lstm](http://www.paddlepaddle.org/documentation/docs/zh/1.2/api_cn/layers_cn.html#dynamic-lstm)的说明。 + +- 如上文提到,我们用基于英文维基百科训练好的词向量来初始化序列输入、谓词上下文总共6个特征的embedding层参数,在训练中不更新。 + +```python +# 这里加载PaddlePaddle保存的二进制参数 +def load_parameter(file_name, h, w): + with open(file_name, 'rb') as f: + f.read(16) # skip header. + return np.fromfile(f, dtype=np.float32).reshape(h, w) +``` + +## 训练模型 + +- 我们根据网络拓扑结构和模型参数来进行训练,在构造时还需指定优化方法,这里使用最基本的SGD方法(momentum设置为0),同时设定了学习率、正则等。 + +定义训练过程的超参数 +```python +use_cuda = False #在cpu上执行训练 +save_dirname = "label_semantic_roles.inference.model" #训练得到的模型参数保存在文件中 +is_local = True +``` + +### 数据输入层定义 +定义了模型输入特征的格式,包括句子序列、谓词、谓词上下文的5个特征、和谓词上下区域标志 + +```python +# 句子序列 +word = fluid.data( + name='word_data', shape=[None, 1], dtype='int64', lod_level=1) + +# 谓词 +predicate = fluid.data( + name='verb_data', shape=[None, 1], dtype='int64', lod_level=1) + +# 谓词上下文5个特征 +ctx_n2 = fluid.data( + name='ctx_n2_data', shape=[None, 1], dtype='int64', lod_level=1) +ctx_n1 = fluid.data( + name='ctx_n1_data', shape=[None, 1], dtype='int64', lod_level=1) +ctx_0 = fluid.data( + name='ctx_0_data', shape=[None, 1], dtype='int64', lod_level=1) +ctx_p1 = fluid.data( + name='ctx_p1_data', shape=[None, 1], dtype='int64', lod_level=1) +ctx_p2 = fluid.data( + name='ctx_p2_data', shape=[None, 1], dtype='int64', lod_level=1) + +# 谓词上下区域标志 +mark = fluid.data( + name='mark_data', shape=[None, 1], dtype='int64', lod_level=1) +``` +### 定义网络结构 +首先预训练并定义模型输入层 + +```python +#预训练谓词和谓词上下区域标志 +predicate_embedding = fluid.embedding( + input=predicate, + size=[pred_dict_len, word_dim], + dtype='float32', + is_sparse=IS_SPARSE, + param_attr='vemb') + +mark_embedding = fluid.embedding( + input=mark, + size=[mark_dict_len, mark_dim], + dtype='float32', + is_sparse=IS_SPARSE) + +#句子序列和谓词上下文5个特征并预训练 +word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] +# 因词向量是预训练好的,这里不再训练embedding表, +# 参数属性trainable设置成False阻止了embedding表在训练过程中被更新 +emb_layers = [ + fluid.embedding( + size=[word_dict_len, word_dim], + input=x, + param_attr=fluid.ParamAttr( + name=embedding_name, trainable=False)) for x in word_input +] +#加入谓词和谓词上下区域标志的预训练结果 +emb_layers.append(predicate_embedding) +emb_layers.append(mark_embedding) +``` +定义8个LSTM单元以“正向/反向”的顺序对所有输入序列进行学习。 + +```python +# 共有8个LSTM单元被训练,每个单元的方向为从左到右或从右到左, +# 由参数`is_reverse`确定 +# 第一层栈结构 +hidden_0_layers = [ + fluid.layers.fc(input=emb, size=hidden_dim, act='tanh') + for emb in emb_layers +] + +hidden_0 = fluid.layers.sums(input=hidden_0_layers) + +lstm_0 = fluid.layers.dynamic_lstm( + input=hidden_0, + size=hidden_dim, + candidate_activation='relu', + gate_activation='sigmoid', + cell_activation='sigmoid') + +# 用直连的边来堆叠L-LSTM、R-LSTM +input_tmp = [hidden_0, lstm_0] + +# 其余的栈结构 +for i in range(1, depth): + mix_hidden = fluid.layers.sums(input=[ + fluid.layers.fc(input=input_tmp[0], size=hidden_dim, act='tanh'), + fluid.layers.fc(input=input_tmp[1], size=hidden_dim, act='tanh') + ]) + + lstm = fluid.layers.dynamic_lstm( + input=mix_hidden, + size=hidden_dim, + candidate_activation='relu', + gate_activation='sigmoid', + cell_activation='sigmoid', + is_reverse=((i % 2) == 1)) + + input_tmp = [mix_hidden, lstm] + +# 取最后一个栈式LSTM的输出和这个LSTM单元的输入到隐层映射, +# 经过一个全连接层映射到标记字典的维度,来学习 CRF 的状态特征 +feature_out = fluid.layers.sums(input=[ + fluid.layers.fc(input=input_tmp[0], size=label_dict_len, act='tanh'), + fluid.layers.fc(input=input_tmp[1], size=label_dict_len, act='tanh') +]) + +# 标注序列 +target = fluid.data( + name='target', shape=[None, 1], dtype='int64', lod_level=1) + +# 学习 CRF 的转移特征 +crf_cost = fluid.layers.linear_chain_crf( + input=feature_out, + label=target, + param_attr=fluid.ParamAttr( + name='crfw', learning_rate=mix_hidden_lr)) + + +avg_cost = fluid.layers.mean(crf_cost) + +# 使用最基本的SGD优化方法(momentum设置为0) +sgd_optimizer = fluid.optimizer.SGD( + learning_rate=fluid.layers.exponential_decay( + learning_rate=0.01, + decay_steps=100000, + decay_rate=0.5, + staircase=True)) + +sgd_optimizer.minimize(avg_cost) + + +``` + +数据介绍部分提到CoNLL 2005训练集付费,这里我们使用测试集训练供大家学习。conll05.test()每次产生一条样本,包含9个特征,shuffle和组完batch后作为训练的输入。 + +```python +crf_decode = fluid.layers.crf_decoding( + input=feature_out, param_attr=fluid.ParamAttr(name='crfw')) + +train_data = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.conll05.test(), buf_size=8192), + batch_size=BATCH_SIZE) + +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + +``` +通过feeder来指定每一个数据和data_layer的对应关系, 下面的feeder表示 conll05.test()产生数据的第0列对应的data_layer是 `word`。 + +```python +feeder = fluid.DataFeeder( + feed_list=[ + word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, predicate, mark, target + ], + place=place) +exe = fluid.Executor(place) +``` +开始训练 + +```python +main_program = fluid.default_main_program() + +exe.run(fluid.default_startup_program()) +embedding_param = fluid.global_scope().find_var( + embedding_name).get_tensor() +embedding_param.set( + load_parameter(conll05.get_embedding(), word_dict_len, word_dim), + place) + +start_time = time.time() +batch_id = 0 +for pass_id in six.moves.xrange(PASS_NUM): + for data in train_data(): + cost = exe.run(main_program, + feed=feeder.feed(data), + fetch_list=[avg_cost]) + cost = cost[0] + + if batch_id % 10 == 0: + print("avg_cost: " + str(cost)) + if batch_id != 0: + print("second per batch: " + str((time.time( + ) - start_time) / batch_id)) + # Set the threshold low to speed up the CI test + if float(cost) < 60.0: + if save_dirname is not None: + fluid.io.save_inference_model(save_dirname, [ + 'word_data', 'verb_data', 'ctx_n2_data', + 'ctx_n1_data', 'ctx_0_data', 'ctx_p1_data', + 'ctx_p2_data', 'mark_data' + ], [feature_out], exe) + break + + batch_id = batch_id + 1 +``` + + +## 应用模型 + +训练完成之后,需要依据某个我们关心的性能指标选择最优的模型进行预测,可以简单的选择测试集上标记错误最少的那个模型。以下我们给出一个使用训练后的模型进行预测的示例。 + +首先设置预测过程的参数 + +```python +use_cuda = False #在cpu上进行预测 +save_dirname = "label_semantic_roles.inference.model" #调用训练好的模型进行预测 + +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() +exe = fluid.Executor(place) +``` + +设置输入,用LoDTensor来表示输入的词序列,这里每个词的形状 base_shape都是[1],是因为每个词都是用一个id来表示的。假如基于长度的LoD是[[3, 4, 2]],这是一个单层的LoD,那么构造出的LoDTensor就包含3个序列,其长度分别为3、4和2。 + +注意LoD是个列表的列表 + + +```python +lod = [[3, 4, 2]] +base_shape = [1] + +# 构造假数据作为输入,整数随机数的范围是[low, high] +word = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=word_dict_len - 1) +pred = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=pred_dict_len - 1) +ctx_n2 = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=word_dict_len - 1) +ctx_n1 = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=word_dict_len - 1) +ctx_0 = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=word_dict_len - 1) +ctx_p1 = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=word_dict_len - 1) +ctx_p2 = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=word_dict_len - 1) +mark = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=mark_dict_len - 1) +``` + +使用fluid.io.load_inference_model加载inference_program,feed_target_names是模型的输入变量的名称,fetch_targets是预测对象。 + +```python +[inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) +``` +构造feed字典 {feed_target_name: feed_target_data},results是由预测目标构成的列表 + +```python +assert feed_target_names[0] == 'word_data' +assert feed_target_names[1] == 'verb_data' +assert feed_target_names[2] == 'ctx_n2_data' +assert feed_target_names[3] == 'ctx_n1_data' +assert feed_target_names[4] == 'ctx_0_data' +assert feed_target_names[5] == 'ctx_p1_data' +assert feed_target_names[6] == 'ctx_p2_data' +assert feed_target_names[7] == 'mark_data' +``` +执行预测 + +```python +results = exe.run(inference_program, + feed={ + feed_target_names[0]: word, + feed_target_names[1]: pred, + feed_target_names[2]: ctx_n2, + feed_target_names[3]: ctx_n1, + feed_target_names[4]: ctx_0, + feed_target_names[5]: ctx_p1, + feed_target_names[6]: ctx_p2, + feed_target_names[7]: mark + }, + fetch_list=fetch_targets, + return_numpy=False) +``` + +输出结果 + +```python +print(results[0].lod()) +np_data = np.array(results[0]) +print("Inference Shape: ", np_data.shape) +``` + + +## 总结 + +语义角色标注是许多自然语言理解任务的重要中间步骤。这篇教程中我们以语义角色标注任务为例,介绍如何利用PaddlePaddle进行序列标注任务。教程中所介绍的模型来自我们发表的论文\[[10](#参考文献)\]。由于 CoNLL 2005 SRL任务的训练数据目前并非完全开放,教程中只使用测试数据作为示例。在这个过程中,我们希望减少对其它自然语言处理工具的依赖,利用神经网络数据驱动、端到端学习的能力,得到一个和传统方法可比、甚至更好的模型。在论文中我们证实了这种可能性。关于模型更多的信息和讨论可以在论文中找到。 + + +## 参考文献 +1. Sun W, Sui Z, Wang M, et al. [Chinese semantic role labeling with shallow parsing](http://www.aclweb.org/anthology/D09-1#page=1513)[C]//Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing: Volume 3-Volume 3. Association for Computational Linguistics, 2009: 1475-1483. +2. Pascanu R, Gulcehre C, Cho K, et al. [How to construct deep recurrent neural networks](https://arxiv.org/abs/1312.6026)[J]. arXiv preprint arXiv:1312.6026, 2013. +3. Cho K, Van Merriënboer B, Gulcehre C, et al. [Learning phrase representations using RNN encoder-decoder for statistical machine translation](https://arxiv.org/abs/1406.1078)[J]. arXiv preprint arXiv:1406.1078, 2014. +4. Bahdanau D, Cho K, Bengio Y. [Neural machine translation by jointly learning to align and translate](https://arxiv.org/abs/1409.0473)[J]. arXiv preprint arXiv:1409.0473, 2014. +5. Lafferty J, McCallum A, Pereira F. [Conditional random fields: Probabilistic models for segmenting and labeling sequence data](https://repository.upenn.edu/cgi/viewcontent.cgi?article=1162&context=cis_papers)[C]//Proceedings of the eighteenth international conference on machine learning, ICML. 2001, 1: 282-289. +6. 李航. 统计学习方法[J]. 清华大学出版社, 北京, 2012. +7. Marcus M P, Marcinkiewicz M A, Santorini B. [Building a large annotated corpus of English: The Penn Treebank](http://repository.upenn.edu/cgi/viewcontent.cgi?article=1246&context=cis_reports)[J]. Computational linguistics, 1993, 19(2): 313-330. +8. Palmer M, Gildea D, Kingsbury P. [The proposition bank: An annotated corpus of semantic roles](http://www.mitpressjournals.org/doi/pdfplus/10.1162/0891201053630264)[J]. Computational linguistics, 2005, 31(1): 71-106. +9. Carreras X, Màrquez L. [Introduction to the CoNLL-2005 shared task: Semantic role labeling](http://www.cs.upc.edu/~srlconll/st05/papers/intro.pdf)[C]//Proceedings of the Ninth Conference on Computational Natural Language Learning. Association for Computational Linguistics, 2005: 152-164. +10. Zhou J, Xu W. [End-to-end learning of semantic role labeling using recurrent neural networks](http://www.aclweb.org/anthology/P/P15/P15-1109.pdf)[C]//Proceedings of the Annual Meeting of the Association for Computational Linguistics. 2015. + +
+知识共享许可协议
本教程PaddlePaddle 创作,采用 知识共享 署名-相同方式共享 4.0 国际 许可协议进行许可。 diff --git a/doc/paddle/user_guides/nlp_case/label_semantic_roles/README.md b/doc/paddle/user_guides/nlp_case/label_semantic_roles/README.md new file mode 100644 index 0000000000000000000000000000000000000000..00c5f627fb7eaeea22b37a2fb5744c617f72ab41 --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/label_semantic_roles/README.md @@ -0,0 +1,556 @@ + +# Label Semantic Roles + +The source code of this tutorial is in [book/label_semantic_roles](https://github.com/PaddlePaddle/book/tree/develop/07.label_semantic_roles). For the new users to Paddle book, please refer to [Book Documentation Instructions](https://github.com/PaddlePaddle/book#running-the-book) . + +## Background + +Natural language analysis techniques are roughly divided into three levels: lexical analysis, syntactic analysis, and semantic analysis. Labeling semantic roles is a way to implement shallow semantic analysis. In a sentence, the predicate is a statement or explanation of the subject, pointing out "what to do", "what is it" or "how is it", which represents the majority of an event. The noun with a predicate is called argument. The semantic role is the role of argument in the events. It mainly includes: Agent, Patient, Theme, Experiencer, Beneficiary, Instrument , Location, Goal, Source and so on. + +Please look at the following example. "Encounter" is a predicate (Predicate, usually abbreviated as "Pred"), "Xiaoming" is an agent, "Xiaohong" is a patient, "Yesterday" is the time when the event occurred, the "park" is the location where the event occurred. + +$$\mbox{[Xiaoming]}_{\mbox{Agent}}\mbox{[yesterday]}_{\mbox{Time}}\mbox{[evening]}_\mbox{Time}\mbox{in[Park]}_{\mbox{Location}}\mbox{[encounter]}_{\mbox{Predicate}}\mbox{[Xiaohong]}_{\mbox{Patient}}\mbox{. }$$ + +Semantic role labeling (SRL) is centered on the predicate of the sentence. It does not analyze the semantic information contained in the sentence. It only analyzes the relationship between the components and the predicate in the sentence, that is, the predicate of the sentence--the Argument structure. And using semantic roles to describe these structural relationships is an important intermediate step in many natural language understanding tasks (such as information extraction, text analysis, deep question and answer, etc.). It is generally assumed in the research that the predicate is given, and all that has to be done is to find the individual arguments of the given predicate and their semantic roles. + +Traditional SRL systems are mostly based on syntactic analysis and usually consist of five processes: + +1. Construct a parse tree. For example, Figure 1 is a syntactic tree for the dependency syntax analysis of the above example. +2. Identify candidate arguments for a given predicate from the syntax tree. +3. Prune the candidate arguments; there may be many candidate arguments in a sentence, and pruning candidate arguments is pruned out of a large number of candidates that are the most unlikely candidates arguments. +4. Argument recognition: This process is to judge which is the real argument from the candidates after the previous pruning, usually as a two-classification problem. +5. For the result of step 4, get the semantic role label of the argument by multi-classification. It can be seen that syntactic analysis is the basis, and some artificial features are often constructed in subsequent steps, and these features are often also derived from syntactic analysis. + +
+
+Figure 1. Example of dependency syntax analysis tree +
+ +However, complete syntactic analysis needs to determine all the syntactic information contained in a sentence and the relationship between the components of the sentence. It is a very difficult task. The accuracy of syntactic analysis in current technology is not good, and the little errors in syntactic analysis will caused the SRL error. In order to reduce the complexity of the problem and obtain certain syntactic structure information, the idea of ​​"shallow syntactic analysis" came into being. Shallow syntactic analysis is also called partial parsing or chunking. Different from full syntactic analysis which obtains a complete syntactic tree, shallow syntactic analysis only needs to identify some relatively simple independent components of the sentence, such as verb phrases, these identified structures are called chunks. In order to avoid the difficulties caused by the failure to obtain a syntactic tree with high accuracy, some studies \[[1](#References)\] also proposed a chunk-based SRL method. The block-based SRL method solves the SRL as a sequence labeling problem. Sequence labeling tasks generally use the BIO representation to define the set of labels for sequence annotations. Firstly, Let's introduce this representation. In the BIO notation, B stands for the beginning of the block, I stands for the middle of the block, and O stands for the end of the block. Different blocks are assigned different labels by B, I, and O. For example, for a block group extended by role A, the first block it contains is assigned to tag B-A, the other blocks it contains are assigned to tag I-A, and the block not belonging to any argument is assigned tag O. + +Let's continue to take the above sentence as an example. Figure 1 shows the BIO representation method. + +
+
+Figure 2. Example of BIO labeling method +
+ +As can be seen from the above example, it is a relatively simple process to directly get the semantic roles labeling result of the argument according to the sequence labeling result. This simplicity is reflected in: (1) relying on shallow syntactic analysis, reducing the requirements and difficulty of syntactic analysis; (2) there is no candidate argument to pruning in this step; (3) the identification and labeling of arguments are realized at the same time. This integrated approach to arguments identification and labeling simplifies the process, reduces the risk of error accumulation, and often achieves better results. + +Similar to the block-based SRL method, in this tutorial we also regard the SRL as a sequence labeling problem. The difference is that we only rely on input text sequences, without relying on any additional syntax analysis results or complex artificial features. And constructing an end-to-end learning SRL system by using deep neural networks. Let's take the public data set of the SRL task in the [CoNLL-2004 and CoNLL-2005 Shared Tasks](http://www.cs.upc.edu/~srlconll/) task as an example to practice the following tasks. Giving a sentence and a predicate in this sentence, through the way of sequence labeling, find the arguments corresponding to the predicate from the sentence, and mark their semantic roles. + +## Model Overview + +Recurrent Neural Network is an important model for modeling sequences. It is widely used in natural language processing tasks. Unlike the feed-forward neural network, the RNN is able to handle the contextual correlation between inputs. LSTM is an important variant of RNN that is commonly used to learn the long-range dependencies contained in long sequences. We have already introduced in [Sentiment Analysis](https://github.com/PaddlePaddle/book/tree/develop/06.understand_sentiment), in this article we still use LSTM to solve the SRL problem. + +### Stacked Recurrent Neural Network + +The deep network helps to form hierarchical features, and the upper layers of the network form more complex advanced features based on the primary features that have been learned in the lower layers. Although the LSTM is expanded along the time axis and is equivalent to a very "deep" feedforward network. However, since the LSTM time step parameters are shared, the mapping of the $t-1$ time state to the time of $t$ always passes only one non-linear mapping. It means that the modeling of state transitions by single-layer LSTM is “shallow”. Stacking multiple LSTM units, making the output of the previous LSTM$t$ time as the input of the next LSTM unit $t$ time, helps us build a deep network. We call it the first version of the stack ecurrent neural networks. Deep networks improve the ability of models to fit complex patterns and better model patterns across different time steps\[[2](#References)\]. + +However, training a deep LSTM network is not an easy task. Stacking multiple LSTM cells in portrait orientation may encounter problems with the propagation of gradients in the longitudinal depth. Generally, stacking 4 layers of LSTM units can be trained normally. When the number of layers reaches 4~8 layers, performance degradation will occur. At this time, some new structures must be considered to ensure the gradient is transmitted vertically and smoothly. This is a problem that must be solved in training a deep LSTM networks. We can learn from LSTM to solve one of the tips of the "gradient disappearance and gradient explosion" problem: there is no nonlinear mapping on the information propagation route of Memory Cell, and neither gradient decay nor explosion when the gradient propagates back. Therefore, the deep LSTM model can also add a path that ensures smooth gradient propagation in the vertical direction. + +The operation performed by an LSTM unit can be divided into three parts: (1) Input-to-hidden: Each time step input information $x$ will first pass through a matrix map and then as a forgetting gate, input gate, memory unit, output gate's input. Note that this mapping does not introduce nonlinear activation; (2) Hidden-to-hidden: this step is the main body of LSTM calculation, including forgotten gate, input gate, memory unit update, output gate calculation; (3) hidden-to-output: usually simple to activate the hidden layer vector. On the basis of the first version of the stack network, we add a new path: in addition to the previous LSTM output, the mapping of the input of the previous LSTM to the hidden layer is used as a new input. and a new input is added. At the same time, add a linear map to learn a new transform. + +Figure 3 is a schematic structural diagram of a finally obtained stack recurrent neural network. + +

+
+Figure 3. Schematic diagram of stack-based recurrent neural network based on LSTM +

+ +### Bidirectional Recurrent Neural Network + +In LSTM, the hidden layer vector at the time of $t$ encodes all input information until the time of $t$. The LSTM at $t$ can see the history, but cannot see the future. In most natural language processing tasks, we almost always get the whole sentence. In this case, if you can get future information like the historical information, it will be of great help to the sequence learning task. + +In order to overcome this shortcoming, we can design a bidirectional recurrent network unit, which is simple and straightforward: make a small modification to the stack recurrent neural network of the previous section, stack multiple LSTM units, and let each layer of LSTM units learn the output sequence of the previous layer in the order of forward, reverse, forward …… So, starting from layer 2, our LSTM unit will always see historical and future information at $t$. Figure 4 is a schematic diagram showing the structure of a bidirectional recurrent neural network based on LSTM. + +

+
+Figure 4. Schematic diagram of a bidirectional recurrent neural network based on LSTM +

+ +It should be noted that this bidirectional RNN structure is not the same as the bidirectional RNN structure used by Bengio etc in machine translation tasks\[[3](#References), [4](#References)\] Another bidirectional recurrent neural network will be introduced in the following [Machine Translation](https://github.com/PaddlePaddle/book/blob/develop/08.machine_translation) task. + +### Conditional Random Field + +The idea of ​​using a neural network model to solve a problem usually is: the front-layer network learns the feature representation of the input, and the last layer of the network completes the final task based on the feature. In the SRL task, the feature representation of the deep LSTM network learns input. Conditional Random Filed (CRF) completes the sequence labeling on th basis of features at the end of the entire network. + +CRF is a probabilistic structural model, which can be regarded as a probabilistic undirected graph model. Nodes represent random variables and edges represent probability dependencies between random variables. In simple terms, CRF learns the conditional probability $P(X|Y)$, where $X = (x_1, x_2, ... , x_n)$ is the input sequence, $Y = (y_1, y_2, ..., y_n $ is a sequence of tokens; the decoding process is given the $X$ sequence to solve the $Y$ sequence with the largest $P(Y|X)$, that is $Y^* = \mbox{arg max}_{Y} P( Y | X)$. + +The sequence labeling task only needs to consider that both the input and the output are a linear sequence. And since we only use the input sequence as a condition and do not make any conditional independent assumptions, there is no graph structure between the elements of the input sequence. In summary, the CRF defined on the chain diagram shown in Figure 5 is used in the sequence labeling task, which is called Linear Chain Conditional Random Field. + +

+
+Figure 5. Linear chain conditional random field used in sequence labeling tasks +

+ +According to the factorization theorem on the linear chain condition random field \[[5](#References)\], the probability of a particular tag sequence $Y$ can be defined as given in the observation sequence $X$: + +$$p(Y | X) = \frac{1}{Z(X)} \text{exp}\left(\sum_{i=1}^{n}\left(\sum_{j}\lambda_{ j}t_{j} (y_{i - 1}, y_{i}, X, i) + \sum_{k} \mu_k s_k (y_i, X, i)\right)\right)$$ + +Where $Z(X)$ is the normalization factor, and $t_j$ is the feature function defined on the edge, depending on the current and previous position, which called the transition feature. It represents the transition probability of the input sequence $X$ and its labeling sequence marked at the $i$ and $i - 1$ positions. $s_k$ is a feature function defined on the node, called a state feature, which depends on the current position. It represents the probability of marking for the observation sequence $X$ and its $i$ position. $\lambda_j$ and $\mu_k$ are the weights corresponding to the transfer feature function and the state feature function respectively. In fact, $t$ and $s$ can be represented in the same mathematical form, and the transfer feature and state are summed at each position $i$: $f_{k}(Y, X) = \sum_{i =1}^{n}f_k({y_{i - 1}, y_i, X, i})$. Calling $f$ collectively as a feature function, so $P(Y|X)$ can be expressed as: + +$$p(Y|X, W) = \frac{1}{Z(X)}\text{exp}\sum_{k}\omega_{k}f_{k}(Y, X)$$ + +$\omega$ is the weight corresponding to the feature function and is the parameter to be learned by the CRF model. During training, for a given input sequence and the corresponding set of markup sequences $D = \left[(X_1, Y_1), (X_2 , Y_2) , ... , (X_N, Y_N)\right]$ , by regularizing the maximum likelihood estimation to solve the following optimization objectives: + +$$\DeclareMathOperator*{\argmax}{arg\,max} L(\lambda, D) = - \text{log}\left(\prod_{m=1}^{N}p(Y_m|X_m, W )\right) + C \frac{1}{2}\lVert W\rVert^{2}$$ + +This optimization objectives can be solved by the back propagation algorithm together with the entire neural network. When decoding, for a given input sequence $X$, the output sequence $\bar{Y}$ of maximizing the conditional probability $\bar{P}(Y|X)$ by the decoding algorithm (such as: Viterbi algorithm, Beam Search). + +### Deep bidirectional LSTM (DB-LSTM) SRL model + +In the SRL task, the input is “predicate” and “a sentence”. The goal is to find the argument of the predicate from this sentence and mark the semantic role of the argument. If a sentence contains $n$ predicates, the sentence will be processed for $n$ times. One of the most straightforward models is the following: + +1. Construct the input; + - Input 1 is the predicate and 2 is the sentence + - Extend input 1 to a sequence as long as input 2, expressed by one-hot mode; +2. The predicate sequence and sentence sequence of the one-hot format are converted into a sequence of word vectors represented by real vectors through a vocabulary; +3. The two word vector sequences in step 2 are used as input of the bidirectional LSTM to learn the feature representation of the input sequence; +4. The CRF takes the features learned in the model in step 3 as input, and uses the tag sequence as the supervised signal to implement sequence labeling; + +You can try this method. Here, we propose some improvements that introduce two simple features that are very effective in improving system performance: + +- Predicate's context: In the above method, only the word vector of the predicate is used to express all the information related to the predicate. This method is always very weak, especially if the predicate appears multiple times in the sentence, it may cause certain ambiguity. From experience, a small segment of several words before and after the predicate can provide more information to help resolve ambiguity. So, we add this kind of experience to the model, and extract a "predicate context" fragment for each predicate, that is, a window fragment composed of $n$ words before and after the predicate; +- Predicate context area's tag: Introduces a 0-1 binary variable for each word in the sentence, which indicats whether they are in the "predicate context" fragment; + +The modified model is as follows (Figure 6 is a schematic diagram of the model structure with a depth of 4): + +1. Construct input + - Input 1 is a sentence sequence, input 2 is a predicate sequence, input 3 is a predicate context, and $n$ words before and after the predicate are extracted from the sentence to form a predicate context, which represented by one-hot. Input 4 is a predicate context area which marks whether each word in the sentence is in the context of the predicate; + - Extend the input 2~3 to a sequence as long as the input 1; +2. Input 1~4 are converted into a sequence of word vectors represented by real vectors in vocabulary; where inputs 1 and 3 share the same vocabulary, and inputs 2 and 4 each have their own vocabulary; +3. The four word vector sequences in step 2 are used as input to the bidirectional LSTM model; the LSTM model learns the feature representation of the input sequence to obtain a new feature representation sequence; +4. The CRF takes the features learned in step 3 of the LSTM as input, and uses the marked sequence as the supervised signal to complete the sequence labeling; + +
+
+Figure 6. Deep bidirectional LSTM model on the SRL task +
+ + +## Data Introduction + +In this tutorial, We use the data set opened by the [CoNLL 2005](http://www.cs.upc.edu/~srlconll/) SRL task as an example. It is important to note that the training set and development set of the CoNLL 2005 SRL task are not free for public after the competition. Currently, only the test set is available, including 23 in the Wall Street Journal and 3 in the Brown corpus. In this tutorial, we use the WSJ data in the test set to solve the model for the training set. However, since the number of samples in the test set is far from enough, if you want to train an available neural network SRL system, consider paying for the full amount of data. + +The original data also includes a variety of information such as part-of-speech tagging, named entity recognition, and syntax parse tree. In this tutorial, we use the data in the test.wsj folder for training and testing, and only use the data under the words folder (text sequence) and the props folder (labeled results). The data directories used in this tutorial are as follows: + +```text +conll05st-release/ +└── test.wsj +    ├── props # Label result +    └── words # Input text sequence +``` + +The labeling information is derived from the labeling results of Penn TreeBank\[[7](#References)\] and PropBank\[[8](#References)\]. The label of the PropBank labeling result is different from the labeling result label we used in the first example of the article, but the principle is the same. For the description of the meaning of the labeling result label, please refer to the paper \[[9](#References)\]. + +The raw data needs to be preprocessed in order to be processed by PaddlePaddle. The preprocessing includes the following steps: + +1. Combine text sequences and tag sequences into one record; +2. If a sentence contains $n$ predicates, the sentence will be processed for $n$ times, becoming a $n$ independent training sample, each sample with a different predicate; +3. Extract the predicate context and construct the predicate context area tag; +4. Construct a tag represented by the BIO method; +5. Get the integer index corresponding to the word according to the dictionary. + +After the pre-processing is completed, a training sample data contains 9 fields, namely: sentence sequence, predicate, predicate context (accounting for 5 columns), predicate context area tag, and labeling sequence. The following table is an example of a training sample. + +| Sentence Sequence | Predicate | Predicate Context (Window = 5) | Predicate Context Area Tag | Label Sequence | +|---|---|---|---|---| +| A | set | n't been set . × | 0 | B-A1 | +| record | set | n't been set . × | 0 | I-A1 | +| date | set | n't been set . × | 0 | I-A1 | +| has | set | n't been set . × | 0 | O | +| n't | set | n't been set . × | 1 | B-AM-NEG | +| been | set | n't been set . × | 1 | O | +| set | set | n't been set . × | 1 | B-V | +| . | set | n't been set . × | 1 | O | + + +In addition to the data, we also provide the following resources: + +| File Name | Description | +|---|---| +| word_dict | Input a dictionary of sentences for a total of 44068 words | +| label_dict | Tag dictionary, total 106 tags | +| predicate_dict | Dictionary of predicates, totaling 3162 words | +| emb | A trained vocabulary, 32-dimensional | + +We trained a language model on English Wikipedia to get a word vector to initialize the SRL model. During the training of the SRL model, the word vector is no longer updated. For the language model and word vector, refer to [Word Vector](https://github.com/PaddlePaddle/book/blob/develop/04.word2vec) for this tutorial. The corpus of our training language model has a total of 995,000,000 tokens, and the dictionary size is controlled to 4,900,000 words. CoNLL 2005 training corpus 5% of this word is not in 4900,000 words, we have seen them all unknown words, with `` representation. + +Get the dictionary and print the dictionary size: + +```python +from __future__ import print_function + +import math, os +import numpy as np +import paddle +import paddle.dataset.conll05 as conll05 +import paddle.fluid as fluid +import six +import time + +with_gpu = os.getenv('WITH_GPU', '0') != '0' + +word_dict, verb_dict, label_dict = conll05.get_dict() +word_dict_len = len(word_dict) +label_dict_len = len(label_dict) +pred_dict_len = len(verb_dict) + +print('word_dict_len: ', word_dict_len) +print('label_dict_len: ', label_dict_len) +print('pred_dict_len: ', pred_dict_len) +``` + +## Model Configuration + +- Define input data dimensions and model hyperparameters. + +```python +mark_dict_len = 2 # The dimension of the context area flag, which is a 0-1 2 value feature, so the dimension is 2 +word_dim = 32 # Word vector dimension +mark_dim = 5 # The predicate context area is mapped to a real vector by the vocabulary, which is the adjacent dimension +hidden_dim = 512 # LSTM Hidden Layer Vector Dimensions : 512 / 4 +depth = 8 # depth of stack LSTM +mix_hidden_lr = 1e-3 # Basic learning rate of fundamental_chain_crf layer + +IS_SPARSE = True # Whether to update embedding in sparse way +PASS_NUM = 10 # Training epoches +BATCH_SIZE = 10 # Batch size + +embedding_name = 'emb' +``` + +It should be specially noted that the parameter `hidden_dim = 512` actually specifies the dimension of the LSTM hidden layer's vector is 128. For this, please refer to the description of `dynamic_lstm` in the official PaddlePaddle API documentation. + +- As is mentioned above, we use the trained word vector based on English Wikipedia to initialize the embedding layer parameters of the total six features of the sequence input and predicate context, which are not updated during training. + +```python +#Here load the binary parameters saved by PaddlePaddle +def load_parameter(file_name, h, w): + with open(file_name, 'rb') as f: + f.read(16) # skip header. + return np.fromfile(f, dtype=np.float32).reshape(h, w) +``` + + + +## Training Model + +- We train according to the network topology and model parameters. We also need to specify the optimization method when constructing. Here we use the most basic SGD method (momentum is set to 0), and set the learning rate, regularition, and so on. + +Define hyperparameters for the training process + +```python +use_cuda = False #Execute training on cpu +save_dirname = "label_semantic_roles.inference.model" #The model parameters obtained by training are saved in the file. +is_local = True +``` + +### Data input layer definition +Defines the format of the model input features, including the sentence sequence, the predicate, the five features of the predicate context, and the predicate context area flags. + +```python +# Sentence sequences +word = fluid.data( + name='word_data', shape=[None, 1], dtype='int64', lod_level=1) + +# predicate +predicate = fluid.data( + name='verb_data', shape=[None, 1], dtype='int64', lod_level=1) + +# predicate context's 5 features +ctx_n2 = fluid.data( + name='ctx_n2_data', shape=[None, 1], dtype='int64', lod_level=1) +ctx_n1 = fluid.data( + name='ctx_n1_data', shape=[None, 1], dtype='int64', lod_level=1) +ctx_0 = fluid.data( + name='ctx_0_data', shape=[None, 1], dtype='int64', lod_level=1) +ctx_p1 = fluid.data( + name='ctx_p1_data', shape=[None, 1], dtype='int64', lod_level=1) +ctx_p2 = fluid.data( + name='ctx_p2_data', shape=[None, 1], dtype='int64', lod_level=1) + +# Predicate conotext area flag +mark = fluid.data( + name='mark_data', shape=[None, 1], dtype='int64', lod_level=1) +``` +### Defining the network structure +First pre-train and define the model input layer + +```python +#pre-training predicate and predicate context area flags +predicate_embedding = fluid.embedding( + input=predicate, + size=[pred_dict_len, word_dim], + dtype='float32', + is_sparse=IS_SPARSE, + param_attr='vemb') + +mark_embedding = fluid.embedding( + input=mark, + size=[mark_dict_len, mark_dim], + dtype='float32', + is_sparse=IS_SPARSE) + +#Sentence sequences and predicate context 5 features then pre-trained +word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] +#Because word vector is pre-trained, no longer training embedding table, +# The trainable's parameter attribute set to False prevents the embedding table from being updated during training +emb_layers = [ + fluid.embedding( + size=[word_dict_len, word_dim], + input=x, + param_attr=fluid.ParamAttr( + name=embedding_name, trainable=False)) for x in word_input +] +# Pre-training results for adding predicate and predicate context area tags +emb_layers.append(predicate_embedding) +emb_layers.append(mark_embedding) +``` +Define eight LSTM units to learn all input sequences in "forward/reverse" order. + +```python +# A total of 8 LSTM units are trained, each unit is oriented from left to right or right to left. +# Determined by the parameter `is_reverse` +# First stack structure +hidden_0_layers = [ + fluid.layers.fc(input=emb, size=hidden_dim, act='tanh') + for emb in emb_layers +] + +hidden_0 = fluid.layers.sums(input=hidden_0_layers) + +lstm_0 = fluid.layers.dynamic_lstm( + input=hidden_0, + size=hidden_dim, + candidate_activation='relu', + gate_activation='sigmoid', + cell_activation='sigmoid') + +# Stack L-LSTM and R-LSTM with directly connected sides +input_tmp = [hidden_0, lstm_0] + +# remaining stack structure +for i in range(1, depth): + mix_hidden = fluid.layers.sums(input=[ + fluid.layers.fc(input=input_tmp[0], size=hidden_dim, act='tanh'), + fluid.layers.fc(input=input_tmp[1], size=hidden_dim, act='tanh') + ]) + + lstm = fluid.layers.dynamic_lstm( + input=mix_hidden, + size=hidden_dim, + candidate_activation='relu', + gate_activation='sigmoid', + cell_activation='sigmoid', + is_reverse=((i % 2) == 1)) + + input_tmp = [mix_hidden, lstm] + +# Fetch the output of the last stack LSTM and the input of this LSTM unit to the hidden layer mapping, +# Learn the state feature of CRF after a fully connected layer maps to the dimensions of the tags dictionary +feature_out = fluid.layers.sums(input=[ + fluid.layers.fc(input=input_tmp[0], size=label_dict_len, act='tanh'), + fluid.layers.fc(input=input_tmp[1], size=label_dict_len, act='tanh') +]) + +# tag/label sequence +target = fluid.data( + name='target', shape=[1], dtype='int64', lod_level=1) + +# Learning CRF transfer features +crf_cost = fluid.layers.linear_chain_crf( + input=feature_out, + label=target, + param_attr=fluid.ParamAttr( + name='crfw', learning_rate=mix_hidden_lr)) + + +avg_cost = fluid.layers.mean(crf_cost) + +# Use the most basic SGD optimization method (momentum is set to 0) +sgd_optimizer = fluid.optimizer.SGD( + learning_rate=fluid.layers.exponential_decay( + learning_rate=0.01, + decay_steps=100000, + decay_rate=0.5, + staircase=True)) + +sgd_optimizer.minimize(avg_cost) + + +``` + +The data introduction section mentions the payment of the CoNLL 2005 training set. Here we use the test set training for everyone to learn. Conll05.test() produces one sample every time, containing 9 features, then shuffle and after batching as the input for training. + +```python +crf_decode = fluid.layers.crf_decoding( + input=feature_out, param_attr=fluid.ParamAttr(name='crfw')) + +train_data = fluid.io.batch( + fluid.io.shuffle( + paddle.dataset.conll05.test(), buf_size=8192), + batch_size=BATCH_SIZE) + +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + +``` + +The corresponding relationship between each data and data_layer is specified by the feeder. The following feeder indicates that the data_layer corresponding to the 0th column of the data generated by conll05.test() is `word`. + + +```python +feeder = fluid.DataFeeder( + feed_list=[ + word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, predicate, mark, target + ], + place=place) +exe = fluid.Executor(place) +``` + +Start training + +```python +main_program = fluid.default_main_program() + +exe.run(fluid.default_startup_program()) +embedding_param = fluid.global_scope().find_var( + embedding_name).get_tensor() +embedding_param.set( + load_parameter(conll05.get_embedding(), word_dict_len, word_dim), + place) + +start_time = time.time() +batch_id = 0 +for pass_id in six.moves.xrange(PASS_NUM): + for data in train_data(): + cost = exe.run(main_program, + feed=feeder.feed(data), + fetch_list=[avg_cost]) + cost = cost[0] + + if batch_id % 10 == 0: + print("avg_cost: " + str(cost)) + if batch_id != 0: + print("second per batch: " + str((time.time( + ) - start_time) / batch_id)) + # Set the threshold low to speed up the CI test + if float(cost) < 60.0: + if save_dirname is not None: + fluid.io.save_inference_model(save_dirname, [ + 'word_data', 'verb_data', 'ctx_n2_data', + 'ctx_n1_data', 'ctx_0_data', 'ctx_p1_data', + 'ctx_p2_data', 'mark_data' + ], [feature_out], exe) + break + + batch_id = batch_id + 1 +``` + + +## Model Application + +After completing the training, the optimal model needs to be selected according to a performance indicator we care about. You can simply select the model with the least markup error on the test set. We give an example of using a trained model for prediction as follows. + +First set the parameters of the prediction process + +```python +use_cuda = False #predict on cpu +save_dirname = "label_semantic_roles.inference.model" #call trained model for prediction + +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() +exe = fluid.Executor(place) +``` +Set the input, use LoDTensor to represent the input word sequence, where the shape of each word's base_shape is [1], because each word is represented by an id. If the length-based LoD is [[3, 4, 2]], which is a single-layer LoD, then the constructed LoDTensor contains three sequences which their length are 3, 4, and 2. + +Note that LoD is a list of lists. + + +```python +lod = [[3, 4, 2]] +base_shape = [1] + +# Construct fake data as input, the range of random integer numbers is [low, high] +word = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=word_dict_len - 1) +pred = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=pred_dict_len - 1) +ctx_n2 = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=word_dict_len - 1) +ctx_n1 = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=word_dict_len - 1) +ctx_0 = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=word_dict_len - 1) +ctx_p1 = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=word_dict_len - 1) +ctx_p2 = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=word_dict_len - 1) +mark = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=mark_dict_len - 1) +``` + +Using fluid.io.load_inference_model to load inference_program, feed_target_names is the name of the model's input variable, and fetch_targets is the predicted object. + +```python +[inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) +``` +Construct the feed dictionary {feed_target_name: feed_target_data}, where the results are a list of predicted targets + +```python +assert feed_target_names[0] == 'word_data' +assert feed_target_names[1] == 'verb_data' +assert feed_target_names[2] == 'ctx_n2_data' +assert feed_target_names[3] == 'ctx_n1_data' +assert feed_target_names[4] == 'ctx_0_data' +assert feed_target_names[5] == 'ctx_p1_data' +assert feed_target_names[6] == 'ctx_p2_data' +assert feed_target_names[7] == 'mark_data' +``` +Execute prediction + +```python +results = exe.run(inference_program, + feed={ + feed_target_names[0]: word, + feed_target_names[1]: pred, + feed_target_names[2]: ctx_n2, + feed_target_names[3]: ctx_n1, + feed_target_names[4]: ctx_0, + feed_target_names[5]: ctx_p1, + feed_target_names[6]: ctx_p2, + feed_target_names[7]: mark + }, + fetch_list=fetch_targets, + return_numpy=False) +``` + +Output result + +```python +print(results[0].lod()) +np_data = np.array(results[0]) +print("Inference Shape: ", np_data.shape) +``` + + +## Conclusion + +Labeling semantic roles is an important intermediate step in many natural language understanding tasks. In this tutorial, we take the label semantic roles task as an example to introduce how to use PaddlePaddle for sequence labeling tasks. The model presented in the tutorial comes from our published paper \[[10](#References)\]. Since the training data for the CoNLL 2005 SRL task is not currently fully open, only the test data is used as an example in the tutorial. In this process, we hope to reduce our reliance on other natural language processing tools. We can use neural network data-driven, end-to-end learning capabilities to get a model that is comparable or even better than traditional methods. In the paper, we confirmed this possibility. More information and discussion about the model can be found in the paper. + + +## References +1. Sun W, Sui Z, Wang M, et al. [Chinese label semantic roles with shallow parsing](http://www.aclweb.org/anthology/D09-1#page=1513)[C]//Proceedings Of the 2009 Conference on Empirical Methods in Natural Language Processing: Volume 3-Volume 3. Association for Computational Linguistics, 2009: 1475-1483. +2. Pascanu R, Gulcehre C, Cho K, et al. [How to construct deep recurrent neural networks](https://arxiv.org/abs/1312.6026)[J]. arXiv preprint arXiv:1312.6026, 2013. +3. Cho K, Van Merriënboer B, Gulcehre C, et al. [Learning phrase representations using RNN encoder-decoder for statistical machine translation](https://arxiv.org/abs/1406.1078)[J]. arXiv preprint arXiv: 1406.1078, 2014. +4. Bahdanau D, Cho K, Bengio Y. [Neural machine translation by jointly learning to align and translate](https://arxiv.org/abs/1409.0473)[J]. arXiv preprint arXiv:1409.0473, 2014. +5. Lafferty J, McCallum A, Pereira F. [Conditional random fields: Probabilistic models for segmenting and labeling sequence data](https://repository.upenn.edu/cgi/viewcontent.cgi?article=1162&context=cis_papers) [C]//Proceedings of the eighteenth international conference on machine learning, ICML. 2001, 1: 282-289. +6. Li Hang. Statistical Learning Method[J]. Tsinghua University Press, Beijing, 2012. +7. Marcus MP, Marcinkiewicz MA, Santorini B. [Building a large annotated corpus of English: The Penn Treebank](http://repository.upenn.edu/cgi/viewcontent.cgi?article=1246&context=cis_reports)[J] Computational linguistics, 1993, 19(2): 313-330. +8. Palmer M, Gildea D, Kingsbury P. [The proposition bank: An annotated corpus of semantic roles](http://www.mitpressjournals.org/doi/pdfplus/10.1162/0891201053630264) [J]. Computational linguistics, 2005 , 31(1): 71-106. +9. Carreras X, Màrquez L. [Introduction to the CoNLL-2005 shared task: label semantic roles](http://www.cs.upc.edu/~srlconll/st05/papers/intro.pdf)[C]/ /Proceedings of the Ninth Conference on Computational Natural Language Learning. Association for Computational Linguistics, 2005: 152-164. +10. Zhou J, Xu W. [End-to-end learning of label semantic roles using recurrent neural networks](http://www.aclweb.org/anthology/P/P15/P15-1109.pdf)[C] //Proceedings of the Annual Meeting of the Association for Computational Linguistics. 2015. + +
+知识共享许可协议
This tutorial is contributed by PaddlePaddle, and licensed under a Creative Commons Attribution-ShareAlike 4.0 International License. diff --git a/doc/paddle/user_guides/nlp_case/label_semantic_roles/_ce.py b/doc/paddle/user_guides/nlp_case/label_semantic_roles/_ce.py new file mode 100644 index 0000000000000000000000000000000000000000..3ad4c61753c492c4a64ba82c71e850587e2d8845 --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/label_semantic_roles/_ce.py @@ -0,0 +1,38 @@ +### This file is only used for continuous evaluation test! +from __future__ import print_function +from __future__ import division +from __future__ import absolute_import +import os +import sys +sys.path.append(os.environ['ceroot']) +from kpi import CostKpi + +train_cost_kpi = CostKpi( + 'train_cost', 0.02, 0, actived=True, desc='train cost') +tracking_kpis = [train_cost_kpi] + + +def parse_log(log): + for line in log.split('\n'): + fs = line.strip().split('\t') + print(fs) + if len(fs) == 3 and fs[0] == 'kpis': + kpi_name = fs[1] + kpi_value = float(fs[2]) + yield kpi_name, kpi_value + + +def log_to_ce(log): + kpi_tracker = {} + for kpi in tracking_kpis: + kpi_tracker[kpi.name] = kpi + + for (kpi_name, kpi_value) in parse_log(log): + print(kpi_name, kpi_value) + kpi_tracker[kpi_name].add_record(kpi_value) + kpi_tracker[kpi_name].persist() + + +if __name__ == '__main__': + log = sys.stdin.read() + log_to_ce(log) diff --git a/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/Eqn1.png b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/Eqn1.png new file mode 100644 index 0000000000000000000000000000000000000000..1e11831e99e1ec98864ef20f682747ffbc3223f4 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/Eqn1.png differ diff --git a/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/Eqn2.gif b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/Eqn2.gif new file mode 100644 index 0000000000000000000000000000000000000000..3b9b21b992f0148b3db4db30f70247ada0e88fbd Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/Eqn2.gif differ diff --git a/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/Eqn3.gif b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/Eqn3.gif new file mode 100644 index 0000000000000000000000000000000000000000..5aef7671d4b4f5c43b3af5082ea8b87f73fa27cd Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/Eqn3.gif differ diff --git a/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/Eqn4.png b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/Eqn4.png new file mode 100644 index 0000000000000000000000000000000000000000..12d71ea48190bad3d23b0a40bf0dbad254bc2fdf Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/Eqn4.png differ diff --git a/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/bidirectional_stacked_lstm.png b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/bidirectional_stacked_lstm.png new file mode 100644 index 0000000000000000000000000000000000000000..e63f5ebd6d00f2e4ecf97b9ab2027e74683013f2 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/bidirectional_stacked_lstm.png differ diff --git a/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/bidirectional_stacked_lstm_en.png b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/bidirectional_stacked_lstm_en.png new file mode 100644 index 0000000000000000000000000000000000000000..f0a195c24d9ee493f96bb93c28a99e70566be7a4 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/bidirectional_stacked_lstm_en.png differ diff --git a/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/bio_example.png b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/bio_example.png new file mode 100644 index 0000000000000000000000000000000000000000..e5f7151c9fcc50a7cf7af485cbbc7e4fccab0c20 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/bio_example.png differ diff --git a/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/bio_example_en.png b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/bio_example_en.png new file mode 100644 index 0000000000000000000000000000000000000000..93b44dd4874402ef29ad7bd7d94147609b92e309 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/bio_example_en.png differ diff --git a/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/db_lstm_network.png b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/db_lstm_network.png new file mode 100644 index 0000000000000000000000000000000000000000..592f7ee23bdc88a9a35059612e5ab880bbc9d34b Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/db_lstm_network.png differ diff --git a/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/db_lstm_network_en.png b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/db_lstm_network_en.png new file mode 100644 index 0000000000000000000000000000000000000000..c3646312e48db977402fb353dc0c9b4d02269bf4 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/db_lstm_network_en.png differ diff --git a/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/dependency_parsing.png b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/dependency_parsing.png new file mode 100644 index 0000000000000000000000000000000000000000..9265b671735940ed6549e2980064d2ce08baae64 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/dependency_parsing.png differ diff --git a/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/dependency_parsing_en.png b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/dependency_parsing_en.png new file mode 100644 index 0000000000000000000000000000000000000000..23f4f45b603e3d60702af2b2464d10fc8deed061 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/dependency_parsing_en.png differ diff --git a/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/linear_chain_crf.png b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/linear_chain_crf.png new file mode 100644 index 0000000000000000000000000000000000000000..0778fda74b2ad22ce4b631791a7b028cdef780a5 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/linear_chain_crf.png differ diff --git a/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/stacked_lstm.png b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/stacked_lstm.png new file mode 100644 index 0000000000000000000000000000000000000000..3d2914c726b5f4c46e66dfa85d4e88649fede6b3 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/stacked_lstm.png differ diff --git a/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/stacked_lstm_en.png b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/stacked_lstm_en.png new file mode 100644 index 0000000000000000000000000000000000000000..0b944ef91e8b5ba4b14d2a35bd8879f261cf8f61 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/label_semantic_roles/image/stacked_lstm_en.png differ diff --git a/doc/paddle/user_guides/nlp_case/label_semantic_roles/index.cn.html b/doc/paddle/user_guides/nlp_case/label_semantic_roles/index.cn.html new file mode 100644 index 0000000000000000000000000000000000000000..c9deb3ad2ed3ceeaaec56579bd6dda5db5e95dc0 --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/label_semantic_roles/index.cn.html @@ -0,0 +1,636 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/user_guides/nlp_case/label_semantic_roles/index.html b/doc/paddle/user_guides/nlp_case/label_semantic_roles/index.html new file mode 100644 index 0000000000000000000000000000000000000000..43156a7299af0e83bd3161a86d2ceadc295bab42 --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/label_semantic_roles/index.html @@ -0,0 +1,620 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/user_guides/nlp_case/label_semantic_roles/train.py b/doc/paddle/user_guides/nlp_case/label_semantic_roles/train.py new file mode 100644 index 0000000000000000000000000000000000000000..875d2acff666dc18278603ccc3d9428419de9a9c --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/label_semantic_roles/train.py @@ -0,0 +1,318 @@ +from __future__ import print_function + +import math, os +import numpy as np +import paddle +import paddle.dataset.conll05 as conll05 +import paddle.fluid as fluid +import six +import time +import argparse + +with_gpu = os.getenv('WITH_GPU', '0') != '0' + +word_dict, verb_dict, label_dict = conll05.get_dict() +word_dict_len = len(word_dict) +label_dict_len = len(label_dict) +pred_dict_len = len(verb_dict) + +mark_dict_len = 2 +word_dim = 32 +mark_dim = 5 +hidden_dim = 512 +depth = 8 +mix_hidden_lr = 1e-3 + +IS_SPARSE = True +PASS_NUM = 10 +BATCH_SIZE = 10 + +embedding_name = 'emb' + + +def parse_args(): + parser = argparse.ArgumentParser("label_semantic_roles") + parser.add_argument( + '--enable_ce', + action='store_true', + help="If set, run the task with continuous evaluation logs.") + parser.add_argument( + '--use_gpu', type=int, default=0, help="Whether to use GPU or not.") + parser.add_argument( + '--num_epochs', type=int, default=100, help="number of epochs.") + args = parser.parse_args() + return args + + +def load_parameter(file_name, h, w): + with open(file_name, 'rb') as f: + f.read(16) # skip header. + return np.fromfile(f, dtype=np.float32).reshape(h, w) + + +def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, + **ignored): + # 8 features + predicate_embedding = fluid.embedding( + input=predicate, + size=[pred_dict_len, word_dim], + dtype='float32', + is_sparse=IS_SPARSE, + param_attr='vemb') + + mark_embedding = fluid.embedding( + input=mark, + size=[mark_dict_len, mark_dim], + dtype='float32', + is_sparse=IS_SPARSE) + + word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] + emb_layers = [ + fluid.embedding( + size=[word_dict_len, word_dim], + input=x, + param_attr=fluid.ParamAttr(name=embedding_name, trainable=False)) + for x in word_input + ] + emb_layers.append(predicate_embedding) + emb_layers.append(mark_embedding) + + hidden_0_layers = [ + fluid.layers.fc(input=emb, size=hidden_dim, act='tanh') + for emb in emb_layers + ] + + hidden_0 = fluid.layers.sums(input=hidden_0_layers) + + lstm_0 = fluid.layers.dynamic_lstm( + input=hidden_0, + size=hidden_dim, + candidate_activation='relu', + gate_activation='sigmoid', + cell_activation='sigmoid') + + # stack L-LSTM and R-LSTM with direct edges + input_tmp = [hidden_0, lstm_0] + + for i in range(1, depth): + mix_hidden = fluid.layers.sums(input=[ + fluid.layers.fc(input=input_tmp[0], size=hidden_dim, act='tanh'), + fluid.layers.fc(input=input_tmp[1], size=hidden_dim, act='tanh') + ]) + + lstm = fluid.layers.dynamic_lstm( + input=mix_hidden, + size=hidden_dim, + candidate_activation='relu', + gate_activation='sigmoid', + cell_activation='sigmoid', + is_reverse=((i % 2) == 1)) + + input_tmp = [mix_hidden, lstm] + + feature_out = fluid.layers.sums(input=[ + fluid.layers.fc(input=input_tmp[0], size=label_dict_len, act='tanh'), + fluid.layers.fc(input=input_tmp[1], size=label_dict_len, act='tanh') + ]) + + return feature_out + + +def train(use_cuda, save_dirname=None, is_local=True): + # define data layers + word = fluid.data( + name='word_data', shape=[None, 1], dtype='int64', lod_level=1) + predicate = fluid.data( + name='verb_data', shape=[None, 1], dtype='int64', lod_level=1) + ctx_n2 = fluid.data( + name='ctx_n2_data', shape=[None, 1], dtype='int64', lod_level=1) + ctx_n1 = fluid.data( + name='ctx_n1_data', shape=[None, 1], dtype='int64', lod_level=1) + ctx_0 = fluid.data( + name='ctx_0_data', shape=[None, 1], dtype='int64', lod_level=1) + ctx_p1 = fluid.data( + name='ctx_p1_data', shape=[None, 1], dtype='int64', lod_level=1) + ctx_p2 = fluid.data( + name='ctx_p2_data', shape=[None, 1], dtype='int64', lod_level=1) + mark = fluid.data( + name='mark_data', shape=[None, 1], dtype='int64', lod_level=1) + + if args.enable_ce: + fluid.default_startup_program().random_seed = 90 + fluid.default_main_program().random_seed = 90 + + # define network topology + feature_out = db_lstm(**locals()) + target = fluid.data( + name='target', shape=[None, 1], dtype='int64', lod_level=1) + crf_cost = fluid.layers.linear_chain_crf( + input=feature_out, + label=target, + param_attr=fluid.ParamAttr(name='crfw', learning_rate=mix_hidden_lr)) + + avg_cost = fluid.layers.mean(crf_cost) + + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=fluid.layers.exponential_decay( + learning_rate=0.01, + decay_steps=100000, + decay_rate=0.5, + staircase=True)) + + sgd_optimizer.minimize(avg_cost) + + crf_decode = fluid.layers.crf_decoding( + input=feature_out, param_attr=fluid.ParamAttr(name='crfw')) + + if args.enable_ce: + train_data = fluid.io.batch( + paddle.dataset.conll05.test(), batch_size=BATCH_SIZE) + else: + train_data = fluid.io.batch( + fluid.io.shuffle(paddle.dataset.conll05.test(), buf_size=8192), + batch_size=BATCH_SIZE) + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + feeder = fluid.DataFeeder( + feed_list=[ + word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, predicate, mark, + target + ], + place=place) + exe = fluid.Executor(place) + + def train_loop(main_program): + exe.run(fluid.default_startup_program()) + embedding_param = fluid.global_scope().find_var( + embedding_name).get_tensor() + embedding_param.set( + load_parameter(conll05.get_embedding(), word_dict_len, word_dim), + place) + + start_time = time.time() + batch_id = 0 + for pass_id in six.moves.xrange(PASS_NUM): + for data in train_data(): + cost = exe.run( + main_program, + feed=feeder.feed(data), + fetch_list=[avg_cost]) + cost = cost[0] + + if batch_id % 10 == 0: + print("avg_cost:" + str(cost)) + if batch_id != 0: + print("second per batch: " + str(( + time.time() - start_time) / batch_id)) + # Set the threshold low to speed up the CI test + if float(cost) < 60.0: + if args.enable_ce: + print("kpis\ttrain_cost\t%f" % cost) + + if save_dirname is not None: + # TODO(liuyiqun): Change the target to crf_decode + fluid.io.save_inference_model(save_dirname, [ + 'word_data', 'verb_data', 'ctx_n2_data', + 'ctx_n1_data', 'ctx_0_data', 'ctx_p1_data', + 'ctx_p2_data', 'mark_data' + ], [feature_out], exe) + return + + batch_id = batch_id + 1 + + train_loop(fluid.default_main_program()) + + +def infer(use_cuda, save_dirname=None): + if save_dirname is None: + return + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + + inference_scope = fluid.core.Scope() + with fluid.scope_guard(inference_scope): + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be fed + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + + # Setup inputs by creating LoDTensors to represent sequences of words. + # Here each word is the basic element of these LoDTensors and the shape of + # each word (base_shape) should be [1] since it is simply an index to + # look up for the corresponding word vector. + # Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]], + # which has only one lod level. Then the created LoDTensors will have only + # one higher level structure (sequence of words, or sentence) than the basic + # element (word). Hence the LoDTensor will hold data for three sentences of + # length 3, 4 and 2, respectively. + # Note that lod info should be a list of lists. + lod = [[3, 4, 2]] + base_shape = [1] + # The range of random integers is [low, high] + word = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=word_dict_len - 1) + pred = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=pred_dict_len - 1) + ctx_n2 = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=word_dict_len - 1) + ctx_n1 = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=word_dict_len - 1) + ctx_0 = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=word_dict_len - 1) + ctx_p1 = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=word_dict_len - 1) + ctx_p2 = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=word_dict_len - 1) + mark = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=mark_dict_len - 1) + + # Construct feed as a dictionary of {feed_target_name: feed_target_data} + # and results will contain a list of data corresponding to fetch_targets. + assert feed_target_names[0] == 'word_data' + assert feed_target_names[1] == 'verb_data' + assert feed_target_names[2] == 'ctx_n2_data' + assert feed_target_names[3] == 'ctx_n1_data' + assert feed_target_names[4] == 'ctx_0_data' + assert feed_target_names[5] == 'ctx_p1_data' + assert feed_target_names[6] == 'ctx_p2_data' + assert feed_target_names[7] == 'mark_data' + + results = exe.run( + inference_program, + feed={ + feed_target_names[0]: word, + feed_target_names[1]: pred, + feed_target_names[2]: ctx_n2, + feed_target_names[3]: ctx_n1, + feed_target_names[4]: ctx_0, + feed_target_names[5]: ctx_p1, + feed_target_names[6]: ctx_p2, + feed_target_names[7]: mark + }, + fetch_list=fetch_targets, + return_numpy=False) + print(results[0].lod()) + np_data = np.array(results[0]) + print("Inference Shape: ", np_data.shape) + + +def main(use_cuda, is_local=True): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + + # Directory for saving the trained model + save_dirname = "label_semantic_roles.inference.model" + + train(use_cuda, save_dirname, is_local) + infer(use_cuda, save_dirname) + + +if __name__ == '__main__': + args = parse_args() + use_cuda = args.use_gpu + PASS_NUM = args.num_epochs + main(use_cuda) diff --git a/doc/paddle/user_guides/nlp_case/machine_translation/.gitignore b/doc/paddle/user_guides/nlp_case/machine_translation/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..6129b9e8645010fcb8372d9dc3dbb568dfa80907 --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/machine_translation/.gitignore @@ -0,0 +1,9 @@ +data/wmt14 +data/pre-wmt14 +pretrained/wmt14_model +gen.log +gen_result +train.log +dataprovider_copy_1.py +*.pyc +multi-bleu.perl diff --git a/doc/paddle/user_guides/nlp_case/machine_translation/README.cn.md b/doc/paddle/user_guides/nlp_case/machine_translation/README.cn.md new file mode 100644 index 0000000000000000000000000000000000000000..b255601f1742828862e78207315f8544b1c8f2d6 --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/machine_translation/README.cn.md @@ -0,0 +1,633 @@ +# 机器翻译 + +本教程源代码目录在[book/machine_translation](https://github.com/PaddlePaddle/book/tree/develop/08.machine_translation),初次使用请您参考[Book文档使用说明](https://github.com/PaddlePaddle/book/blob/develop/README.cn.md#运行这本书)。 + +### 说明 +1. 硬件要求 本文可支持在CPU、GPU下运行 +2. 对docker file cuda/cudnn的支持 如果您使用了本文配套的docker镜像,请注意:该镜像对GPU的支持仅限于CUDA 8,cuDNN 5 +3. 文档中代码和seq2seq.py不一致的问题 请注意:为使本文更加易读易用,我们拆分、调整了seq2seq.py的代码并放入本文。本文中代码与seq2seq.py的运行结果一致,如希望直接看到训练脚本输出效果,可运行[seq2seq.py](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/user_guides/nlp_case/machine_translation/seq2seq.py)。 + +## 背景介绍 + +机器翻译(machine translation, MT)是用计算机来实现不同语言之间翻译的技术。被翻译的语言通常称为源语言(source language),翻译成的结果语言称为目标语言(target language)。机器翻译即实现从源语言到目标语言转换的过程,是自然语言处理的重要研究领域之一。 + +早期机器翻译系统多为基于规则的翻译系统,需要由语言学家编写两种语言之间的转换规则,再将这些规则录入计算机。该方法对语言学家的要求非常高,而且我们几乎无法总结一门语言会用到的所有规则,更何况两种甚至更多的语言。因此,传统机器翻译方法面临的主要挑战是无法得到一个完备的规则集合\[[1](#参考文献)\]。 + +为解决以上问题,统计机器翻译(Statistical Machine Translation, SMT)技术应运而生。在统计机器翻译技术中,转化规则是由机器自动从大规模的语料中学习得到的,而非我们人主动提供规则。因此,它克服了基于规则的翻译系统所面临的知识获取瓶颈的问题,但仍然存在许多挑战:1)人为设计许多特征(feature),但永远无法覆盖所有的语言现象;2)难以利用全局的特征;3)依赖于许多预处理环节,如词语对齐、分词或符号化(tokenization)、规则抽取、句法分析等,而每个环节的错误会逐步累积,对翻译的影响也越来越大。 + +近年来,深度学习技术的发展为解决上述挑战提供了新的思路。将深度学习应用于机器翻译任务的方法大致分为两类:1)仍以统计机器翻译系统为框架,只是利用神经网络来改进其中的关键模块,如语言模型、调序模型等(见图1的左半部分);2)不再以统计机器翻译系统为框架,而是直接用神经网络将源语言映射到目标语言,即端到端的神经网络机器翻译(End-to-End Neural Machine Translation, End-to-End NMT)(见图1的右半部分),简称为NMT模型。 +
+
+图1. 基于神经网络的机器翻译系统 +
+ +本教程主要介绍NMT模型,以及如何用PaddlePaddle来训练一个NMT模型。 + +## 效果展示 + +以中英翻译(中文翻译到英文)的模型为例,当模型训练完毕时,如果输入如下已分词的中文句子: +```text +这些 是 希望 的 曙光 和 解脱 的 迹象 . +``` +如果设定显示翻译结果的条数(即[柱搜索算法](#柱搜索算法)的宽度)为3,生成的英语句子如下: +```text +0 -5.36816 These are signs of hope and relief . +1 -6.23177 These are the light of hope and relief . +2 -7.7914 These are the light of hope and the relief of hope . +``` + +- 左起第一列是生成句子的序号;左起第二列是该条句子的得分(从大到小),分值越高越好;左起第三列是生成的英语句子。 + +- 另外有两个特殊标志:``表示句子的结尾,``表示未登录词(unknown word),即未在训练字典中出现的词。 + +## 模型概览 + +本节依次介绍GRU(Gated Recurrent Unit,门控循环单元),双向循环神经网络(Bi-directional Recurrent Neural Network),NMT模型中典型的编码器-解码器(Encoder-Decoder)框架和注意力(Attention)机制,以及柱搜索(beam search)算法。 + +### GRU + +我们已经在[情感分析](https://github.com/PaddlePaddle/book/blob/develop/06.understand_sentiment/README.cn.md)一章中介绍了循环神经网络(RNN)及长短时间记忆网络(LSTM)。相比于简单的RNN,LSTM增加了记忆单元(memory cell)、输入门(input gate)、遗忘门(forget gate)及输出门(output gate),这些门及记忆单元组合起来大大提升了RNN处理远距离依赖问题的能力。 + +GRU\[[2](#参考文献)\]是Cho等人在LSTM上提出的简化版本,也是RNN的一种扩展,如下图所示。GRU单元只有两个门: +- 重置门(reset gate):如果重置门关闭,会忽略掉历史信息,即历史不相干的信息不会影响未来的输出。 +- 更新门(update gate):将LSTM的输入门和遗忘门合并,用于控制历史信息对当前时刻隐层输出的影响。如果更新门接近1,会把历史信息传递下去。 +

+
+图2. GRU(门控循环单元) +

+ +一般来说,具有短距离依赖属性的序列,其重置门比较活跃;相反,具有长距离依赖属性的序列,其更新门比较活跃。另外,Chung等人\[[3](#参考文献)\]通过多组实验表明,GRU虽然参数更少,但是在多个任务上都和LSTM有相近的表现。 + +### 双向循环神经网络 + +我们已经在[语义角色标注](https://github.com/PaddlePaddle/book/blob/develop/07.label_semantic_roles/README.cn.md)一章中介绍了一种双向循环神经网络,这里介绍Bengio团队在论文\[[2](#参考文献),[4](#参考文献)\]中提出的另一种结构。该结构的目的是输入一个序列,得到其在每个时刻的特征表示,即输出的每个时刻都用定长向量表示到该时刻的上下文语义信息。 + +具体来说,该双向循环神经网络分别在时间维以顺序和逆序——即前向(forward)和后向(backward)——依次处理输入序列,并将每个时间步RNN的输出拼接成为最终的输出层。这样每个时间步的输出节点,都包含了输入序列中当前时刻完整的过去和未来的上下文信息。下图展示的是一个按时间步展开的双向循环神经网络。该网络包含一个前向和一个后向RNN,其中有六个权重矩阵:输入到前向隐层和后向隐层的权重矩阵($W_1, W_3$),隐层到隐层自己的权重矩阵($W_2,W_5$),前向隐层和后向隐层到输出层的权重矩阵($W_4, W_6$)。注意,该网络的前向隐层和后向隐层之间没有连接。 + + +
+
+图2. 按时间步展开的双向循环神经网络 +
+ +### 编码器-解码器框架 + +编码器-解码器(Encoder-Decoder)\[[2](#参考文献)\]框架用于解决由一个任意长度的源序列到另一个任意长度的目标序列的变换问题。即编码阶段将整个源序列编码成一个向量,解码阶段通过最大化预测序列概率,从中解码出整个目标序列。编码和解码的过程通常都使用RNN实现。 + +
+
+图3. 编码器-解码器框架 +
+ + +#### 编码器 + +编码阶段分为三步: + +1. one-hot vector表示:将源语言句子$x = \left(x_1,x_2,...,x_T\right)$的每个词$x_i$表示成一个列向量$w_i$。这个向量$w_i$的维度与词汇表大小$\left | V \right |$ 相同,并且只有一个维度上有值1(该位置对应该词在词汇表中的位置),其余全是0。 + +2. 映射到低维语义空间的词向量:one-hot vector表示存在两个问题,1)生成的向量维度往往很大,容易造成维数灾难;2)难以刻画词与词之间的关系(如语义相似性,也就是无法很好地表达语义)。因此,需再one-hot vector映射到低维的语义空间,由一个固定维度的稠密向量(称为词向量)表示。记映射矩阵为$C\epsilon R^{K\times \left | V \right |}$,用$s_i=Cw_i$表示第$i$个词的词向量,$K$为向量维度。 + +3. 用RNN编码源语言词序列:这一过程的计算公式为$h_i=\phi_{\theta} \left ( h_{i-1}, s_i \right )$,其中$h_0$是一个全零的向量,$\phi _{\theta}$是一个非线性激活函数,最后得到的$\mathbf{h}=\left(h_1,..., h_T \right)$就是RNN依次读入源语言$T$个词的状态编码序列。整句话的向量表示可以采用$\mathbf{h}$在最后一个时间步$T$的状态编码,或使用时间维上的池化(pooling)结果。 + +第3步也可以使用双向循环神经网络实现更复杂的句编码表示,具体可以用双向GRU实现。前向GRU按照词序列$(x_1,x_2,...,x_T)$的顺序依次编码源语言端词,并得到一系列隐层状态$(\overrightarrow{h_1},\overrightarrow{h_2},...,\overrightarrow{h_T})$。类似的,后向GRU按照$(x_T,x_{T-1},...,x_1)$的顺序依次编码源语言端词,得到$(\overleftarrow{h_1},\overleftarrow{h_2},...,\overleftarrow{h_T})$。最后对于词$x_i$,通过拼接两个GRU的结果得到它的隐层状态,即$h_i=\left [ \overrightarrow{h_i^T},\overleftarrow{h_i^T} \right ]^{T}$。 +
+
+图4. 使用双向GRU的编码器 +
+ +#### 解码器 + +机器翻译任务的训练过程中,解码阶段的目标是最大化下一个正确的目标语言词的概率。思路是: +1. 每一个时刻,根据源语言句子的编码信息(又叫上下文向量,context vector)$c$、真实目标语言序列的第$i$个词$u_i$和$i$时刻RNN的隐层状态$z_i$,计算出下一个隐层状态$z_{i+1}$。计算公式如下: + +
+
+
+ +其中$\phi _{\theta '}$是一个非线性激活函数;$c$是源语言句子的上下文向量,在不使用注意力机制时,如果[编码器](#编码器)的输出是源语言句子编码后的最后一个元素,则可以定义$c=h_T$;$u_i$是目标语言序列的第$i$个单词,$u_0$是目标语言序列的开始标记``,表示解码开始;$z_i$是$i$时刻解码RNN的隐层状态,$z_0$是一个全零的向量。 + +2. 将$z_{i+1}$通过`softmax`归一化,得到目标语言序列的第$i+1$个单词的概率分布$p_{i+1}$。概率分布公式如下: + +
+
+
+ +其中$W_sz_{i+1}+b_z$是对每个可能的输出单词进行打分,再用softmax归一化就可以得到第$i+1$个词的概率$p_{i+1}$。 + +3. 根据$p_{i+1}$和$u_{i+1}$计算代价。 + +4. 重复步骤1~3,直到目标语言序列中的所有词处理完毕。 + +机器翻译任务的生成过程,通俗来讲就是根据预先训练的模型来翻译源语言句子。生成过程中的解码阶段和上述训练过程的有所差异,具体介绍请见[柱搜索算法](#柱搜索算法)。 + +### 注意力机制 + +如果编码阶段的输出是一个固定维度的向量,会带来以下两个问题:1)不论源语言序列的长度是5个词还是50个词,如果都用固定维度的向量去编码其中的语义和句法结构信息,对模型来说是一个非常高的要求,特别是对长句子序列而言;2)直觉上,当人类翻译一句话时,会对与当前译文更相关的源语言片段上给予更多关注,且关注点会随着翻译的进行而改变。而固定维度的向量则相当于,任何时刻都对源语言所有信息给予了同等程度的关注,这是不合理的。因此,Bahdanau等人\[[4](#参考文献)\]引入注意力(attention)机制,可以对编码后的上下文片段进行解码,以此来解决长句子的特征学习问题。下面介绍在注意力机制下的解码器结构。 + +与简单的解码器不同,这里$z_i$的计算公式为: + +
+
+
+ +可见,源语言句子的编码向量表示为第$i$个词的上下文片段$c_i$,即针对每一个目标语言中的词$u_i$,都有一个特定的$c_i$与之对应。$c_i$的计算公式如下: + +
+
+
+ +从公式中可以看出,注意力机制是通过对编码器中各时刻的RNN状态$h_j$进行加权平均实现的。权重$a_{ij}$表示目标语言中第$i$个词对源语言中第$j$个词的注意力大小,$a_{ij}$的计算公式如下: + +
+
+
+ +其中,$align$可以看作是一个对齐模型,用来衡量目标语言中第$i$个词和源语言中第$j$个词的匹配程度。具体而言,这个程度是通过解码RNN的第$i$个隐层状态$z_i$和源语言句子的第$j$个上下文片段$h_j$计算得到的。传统的对齐模型中,目标语言的每个词明确对应源语言的一个或多个词(hard alignment);而在注意力模型中采用的是soft alignment,即任何两个目标语言和源语言词间均存在一定的关联,且这个关联强度是由模型计算得到的实数,因此可以融入整个NMT框架,并通过反向传播算法进行训练。 + +

+
+图6. 基于注意力机制的解码器 +

+ + +### 柱搜索算法 + +柱搜索([beam search](http://en.wikipedia.org/wiki/Beam_search))是一种启发式图搜索算法,用于在图或树中搜索有限集合中的最优扩展节点,通常用在解空间非常大的系统(如机器翻译、语音识别)中,原因是内存无法装下图或树中所有展开的解。如在机器翻译任务中希望翻译“`你好`”,就算目标语言字典中只有3个词(``, ``, `hello`),也可能生成无限句话(`hello`循环出现的次数不定),为了找到其中较好的翻译结果,我们可采用柱搜索算法。 + +柱搜索算法使用广度优先策略建立搜索树,在树的每一层,按照启发代价(heuristic cost)(本教程中,为生成词的log概率之和)对节点进行排序,然后仅留下预先确定的个数(文献中通常称为beam width、beam size、柱宽度等)的节点。只有这些节点会在下一层继续扩展,其他节点就被剪掉了,也就是说保留了质量较高的节点,剪枝了质量较差的节点。因此,搜索所占用的空间和时间大幅减少,但缺点是无法保证一定获得最优解。 + +使用柱搜索算法的解码阶段,目标是最大化生成序列的概率。思路是: +1. 每一个时刻,根据源语言句子的编码信息$c$、生成的第$i$个目标语言序列单词$u_i$和$i$时刻RNN的隐层状态$z_i$,计算出下一个隐层状态$z_{i+1}$。 + +2. 将$z_{i+1}$通过`softmax`归一化,得到目标语言序列的第$i+1$个单词的概率分布$p_{i+1}$。 + +3. 根据$p_{i+1}$采样出单词$u_{i+1}$。 + +4. 重复步骤1~3,直到获得句子结束标记``或超过句子的最大生成长度为止。 + +注意:$z_{i+1}$和$p_{i+1}$的计算公式同[解码器](#解码器)中的一样。且由于生成时的每一步都是通过贪心法实现的,因此并不能保证得到全局最优解。 + +## 数据介绍 + +本教程使用[WMT-16](http://www.statmt.org/wmt16/)新增的[multimodal task](http://www.statmt.org/wmt16/multimodal-task.html)中的[translation task](http://www.statmt.org/wmt16/multimodal-task.html#task1)的数据集。该数据集为英德翻译数据,包含29001条训练数据,1000条测试数据。 + +### 数据预处理 + +我们的预处理流程包括两步: + +- 将每个源语言到目标语言的平行语料库文件合并为一个文件: + +- 合并每个`XXX.src`和`XXX.trg`文件为`XXX`。 + +- `XXX`中的第$i$行内容为`XXX.src`中的第$i$行和`XXX.trg`中的第$i$行连接,用'\t'分隔。 + +- 创建训练数据的“源字典”和“目标字典”。每个字典都有**DICTSIZE**个单词,包括:语料中词频最高的(DICTSIZE - 3)个单词,和3个特殊符号``(序列的开始)、``(序列的结束)和``(未登录词)。 + +### 示例数据 + +为了验证训练流程,PaddlePaddle接口`paddle.dataset.wmt16`中提供了对该数据集[预处理后的版本](http://paddlemodels.bj.bcebos.com/wmt/wmt16.tar.gz),调用该接口即可直接使用,因为数据规模限制,这里只作为示例使用,在相应的测试集上具有一定效果但在更多测试数据上的效果无法保证。 + +## 模型配置说明 + +下面我们开始根据输入数据的形式配置模型。首先引入所需的库函数以及定义全局变量: + +```python +from __future__ import print_function +import os +import six + +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.layers as layers + +dict_size = 30000 # 词典大小 +bos_id = 0 # 词典中start token对应的id +eos_id = 1 # 词典中end token对应的id +source_dict_size = target_dict_size = dict_size # 源/目标语言字典大小 +word_dim = 512 # 词向量维度 +hidden_dim = 512 # 编码器中的隐层大小 +decoder_size = hidden_dim # 解码器中的隐层大小 +max_length = 256 # 解码生成句子的最大长度 +beam_size = 4 # beam search的柱宽度 +batch_size = 64 # batch 中的样本数 + +model_file = "machine_translation" +``` + +接着定义所需要的数据输入: + +```python +def data_func(is_train=True): + # 源语言source数据 + src = fluid.data(name="src", shape=[None, None], dtype="int64") + src_sequence_length = fluid.data(name="src_sequence_length", + shape=[None], + dtype="int64") + inputs = [src, src_sequence_length] + # 训练时还需要目标语言target和label数据 + if is_train: + trg = fluid.data(name="trg", shape=[None, None], dtype="int64") + trg_sequence_length = fluid.data(name="trg_sequence_length", + shape=[None], + dtype="int64") + label = fluid.data(name="label", shape=[None, None], dtype="int64") + inputs += [trg, trg_sequence_length, label] + # data loader + loader = fluid.io.DataLoader.from_generator(feed_list=inputs, + capacity=10, + iterable=True, + use_double_buffer=True) + return inputs, loader +``` + +然后如下实现使用双向GRU的编码器: + +```python +def encoder(src_embedding, src_sequence_length): + # 使用GRUCell构建前向RNN + encoder_fwd_cell = layers.GRUCell(hidden_size=hidden_dim) + encoder_fwd_output, fwd_state = layers.rnn( + cell=encoder_fwd_cell, + inputs=src_embedding, + sequence_length=src_sequence_length, + time_major=False, + is_reverse=False) + # 使用GRUCell构建反向RNN + encoder_bwd_cell = layers.GRUCell(hidden_size=hidden_dim) + encoder_bwd_output, bwd_state = layers.rnn( + cell=encoder_bwd_cell, + inputs=src_embedding, + sequence_length=src_sequence_length, + time_major=False, + is_reverse=True) + # 拼接前向与反向GRU的编码结果得到h + encoder_output = layers.concat( + input=[encoder_fwd_output, encoder_bwd_output], axis=2) + encoder_state = layers.concat(input=[fwd_state, bwd_state], axis=1) + return encoder_output, encoder_state +``` + +再实现基于注意力机制的解码器: + - 首先通过 Cell 定义解码器中单步的计算,即$z_{i+1}=\phi _{\theta '}\left ( c_i,u_i,z_i \right )$,这里使用 GRU 并加上注意力机制(Additive Attention),代码如下: + + ```python + class DecoderCell(layers.RNNCell): + def __init__(self, hidden_size): + self.hidden_size = hidden_size + self.gru_cell = layers.GRUCell(hidden_size) + + def attention(self, hidden, encoder_output, encoder_output_proj, + encoder_padding_mask): + # 定义attention用以计算context,即 c_i,这里使用Bahdanau attention机制 + decoder_state_proj = layers.unsqueeze( + layers.fc(hidden, size=self.hidden_size, bias_attr=False), [1]) + mixed_state = fluid.layers.elementwise_add( + encoder_output_proj, + layers.expand(decoder_state_proj, + [1, layers.shape(decoder_state_proj)[1], 1])) + attn_scores = layers.squeeze( + layers.fc(input=mixed_state, + size=1, + num_flatten_dims=2, + bias_attr=False), [2]) + if encoder_padding_mask is not None: + attn_scores = layers.elementwise_add(attn_scores, + encoder_padding_mask) + attn_scores = layers.softmax(attn_scores) + context = layers.reduce_sum(layers.elementwise_mul(encoder_output, + attn_scores, + axis=0), + dim=1) + return context + + def call(self, + step_input, + hidden, + encoder_output, + encoder_output_proj, + encoder_padding_mask=None): + # Bahdanau attention + context = self.attention(hidden, encoder_output, encoder_output_proj, + encoder_padding_mask) + step_input = layers.concat([step_input, context], axis=1) + # GRU + output, new_hidden = self.gru_cell(step_input, hidden) + return output, new_hidden + ``` + + - 基于定义的单步计算,使用 `fluid.layers.rnn` 和 `fluid.layers.dynamic_decode` 分别实现用于训练和预测生成的多步循环解码器,如下: + + ```python + def decoder(encoder_output, + encoder_output_proj, + encoder_state, + encoder_padding_mask, + trg=None, + is_train=True): + # 定义 RNN 所需要的组件 + decoder_cell = DecoderCell(hidden_size=decoder_size) + decoder_initial_states = layers.fc(encoder_state, + size=decoder_size, + act="tanh") + trg_embeder = lambda x: fluid.embedding(input=x, + size=[target_dict_size, hidden_dim], + dtype="float32", + param_attr=fluid.ParamAttr( + name="trg_emb_table")) + output_layer = lambda x: layers.fc(x, + size=target_dict_size, + num_flatten_dims=len(x.shape) - 1, + param_attr=fluid.ParamAttr(name= + "output_w")) + if is_train: # 训练 + # 训练时使用 `layers.rnn` 构造由 `cell` 指定的循环神经网络 + # 循环的每一步从 `inputs` 中切片产生输入,并执行 `cell.call` + decoder_output, _ = layers.rnn( + cell=decoder_cell, + inputs=trg_embeder(trg), + initial_states=decoder_initial_states, + time_major=False, + encoder_output=encoder_output, + encoder_output_proj=encoder_output_proj, + encoder_padding_mask=encoder_padding_mask) + decoder_output = output_layer(decoder_output) + else: # 基于 beam search 的预测生成 + # beam search 时需要将用到的形为 `[batch_size, ...]` 的张量扩展为 `[batch_size* beam_size, ...]` + encoder_output = layers.BeamSearchDecoder.tile_beam_merge_with_batch( + encoder_output, beam_size) + encoder_output_proj = layers.BeamSearchDecoder.tile_beam_merge_with_batch( + encoder_output_proj, beam_size) + encoder_padding_mask = layers.BeamSearchDecoder.tile_beam_merge_with_batch( + encoder_padding_mask, beam_size) + # BeamSearchDecoder 定义了单步解码的操作:`cell.call` + `beam_search_step` + beam_search_decoder = layers.BeamSearchDecoder(cell=decoder_cell, + start_token=bos_id, + end_token=eos_id, + beam_size=beam_size, + embedding_fn=trg_embeder, + output_fn=output_layer) + # 使用 layers.dynamic_decode 动态解码 + # 重复执行 `decoder.step()` 直到其返回的表示完成状态的张量中的值全部为True或解码步骤达到 `max_step_num` + decoder_output, _ = layers.dynamic_decode( + decoder=beam_search_decoder, + inits=decoder_initial_states, + max_step_num=max_length, + output_time_major=False, + encoder_output=encoder_output, + encoder_output_proj=encoder_output_proj, + encoder_padding_mask=encoder_padding_mask) + + return decoder_output + ``` + +接着就可以使用编码器和解码器定义整个网络,如下: + +```python +def model_func(inputs, is_train=True): + # 源语言输入 + src = inputs[0] + src_sequence_length = inputs[1] + src_embeder = lambda x: fluid.embedding( + input=x, + size=[source_dict_size, hidden_dim], + dtype="float32", + param_attr=fluid.ParamAttr(name="src_emb_table")) + src_embedding = src_embeder(src) + + # 编码器 + encoder_output, encoder_state = encoder(src_embedding, src_sequence_length) + + encoder_output_proj = layers.fc(input=encoder_output, + size=decoder_size, + num_flatten_dims=2, + bias_attr=False) + src_mask = layers.sequence_mask(src_sequence_length, + maxlen=layers.shape(src)[1], + dtype="float32") + encoder_padding_mask = (src_mask - 1.0) * 1e9 + + # 目标语言输入,训练时有、预测生成时无该输入 + trg = inputs[2] if is_train else None + + # 解码器 + output = decoder(encoder_output=encoder_output, + encoder_output_proj=encoder_output_proj, + encoder_state=encoder_state, + encoder_padding_mask=encoder_padding_mask, + trg=trg, + is_train=is_train) + return output +``` + +为了进行训练还需要定义损失函数和优化器,如下: + +```python +def loss_func(logits, label, trg_sequence_length): + probs = layers.softmax(logits) + # 使用交叉熵损失函数 + loss = layers.cross_entropy(input=probs, label=label) + # 根据长度生成掩码,并依此剔除 padding 部分计算的损失 + trg_mask = layers.sequence_mask(trg_sequence_length, + maxlen=layers.shape(logits)[1], + dtype="float32") + avg_cost = layers.reduce_sum(loss * trg_mask) / layers.reduce_sum(trg_mask) + return avg_cost + +def optimizer_func(): + # 定义梯度裁剪策略 + clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=5.0) + # 定义先增后降的学习率策略 + lr_decay = fluid.layers.learning_rate_scheduler.noam_decay(hidden_dim, 1000) + # 定义优化器 + return fluid.optimizer.Adam( + learning_rate=lr_decay, + regularization=fluid.regularizer.L2DecayRegularizer( + regularization_coeff=1e-4), + grad_clip=clip) +``` + +## 训练模型 + +### 定义数据生成器 + +使用内置的`paddle.dataset.wmt16.train`接口定义数据生成器,其每次产生一条样本,shuffle和组完batch后对batch内的样本进行padding作为训练的输入;同时定义预测使用的数据生成器,如下: + +```python +def inputs_generator(batch_size, pad_id, is_train=True): + data_generator = fluid.io.shuffle( + paddle.dataset.wmt16.train(source_dict_size, target_dict_size), + buf_size=10000) if is_train else paddle.dataset.wmt16.test( + source_dict_size, target_dict_size) + batch_generator = fluid.io.batch(data_generator, batch_size=batch_size) + + # 对 batch 内的数据进行 padding + def _pad_batch_data(insts, pad_id): + seq_lengths = np.array(list(map(len, insts)), dtype="int64") + max_len = max(seq_lengths) + pad_data = np.array( + [inst + [pad_id] * (max_len - len(inst)) for inst in insts], + dtype="int64") + return pad_data, seq_lengths + + def _generator(): + for batch in batch_generator(): + batch_src = [ins[0] for ins in batch] + src_data, src_lengths = _pad_batch_data(batch_src, pad_id) + inputs = [src_data, src_lengths] + if is_train: #训练时包含 target 和 label 数据 + batch_trg = [ins[1] for ins in batch] + trg_data, trg_lengths = _pad_batch_data(batch_trg, pad_id) + batch_lbl = [ins[2] for ins in batch] + lbl_data, _ = _pad_batch_data(batch_lbl, pad_id) + inputs += [trg_data, trg_lengths, lbl_data] + yield inputs + + return _generator +``` + +### 构建训练程序 + +定义用于训练的`Program`,在其中创建训练的网络结构并添加优化器。同时还要定义用于初始化的`Program`,在创建训练网络的同时隐式的加入参数初始化的操作。 + +```python +train_prog = fluid.Program() +startup_prog = fluid.Program() +with fluid.program_guard(train_prog, startup_prog): + with fluid.unique_name.guard(): + # 训练时: + # inputs = [src, src_sequence_length, trg, trg_sequence_length, label] + inputs, loader = data_func(is_train=True) + logits = model_func(inputs, is_train=True) + loss = loss_func(logits, inputs[-1], inputs[-2]) + optimizer = optimizer_func() + optimizer.minimize(loss) +``` + +### 定义训练环境 + +定义您的训练环境,包括指定所用的设备、绑定训练使用的数据源和定义执行器。 + +```python +# 设置训练设备 +use_cuda = False +places = fluid.cuda_places() if use_cuda else fluid.cpu_places() +# 设置数据源 +loader.set_batch_generator(inputs_generator(batch_size, + eos_id, + is_train=True), + places=places) +# 定义执行器,初始化参数并绑定Program +exe = fluid.Executor(places[0]) +exe.run(startup_prog) +prog = fluid.CompiledProgram(train_prog).with_data_parallel( + loss_name=loss.name) +``` + +### 训练主循环 + +通过训练循环数(EPOCH_NUM)来进行训练循环,并且每次循环都保存训练好的参数。注意,循环训练前要首先执行初始化的`Program`来初始化参数。另外作为示例这里EPOCH_NUM设置较小,该数据集上实际大概需要20个epoch左右收敛。 + +```python +EPOCH_NUM = 2 +for pass_id in six.moves.xrange(EPOCH_NUM): + batch_id = 0 + for data in loader(): + loss_val = exe.run(prog, feed=data, fetch_list=[loss])[0] + print('pass_id: %d, batch_id: %d, loss: %f' % + (pass_id, batch_id, loss_val)) + batch_id += 1 + # 保存模型 + fluid.save(train_prog, model_file) +``` + +## 应用模型 + +### 构建预测程序 + +定义用于预测的`Program`,在其中创建预测的网络结构。 + +```python +infer_prog = fluid.Program() +startup_prog = fluid.Program() +with fluid.program_guard(infer_prog, startup_prog): + with fluid.unique_name.guard(): + inputs, loader = data_func(is_train=False) + predict_seqs = model_func(inputs, is_train=False) +``` + +### 定义预测环境 + +定义您的预测环境,和训练类似,包括指定所用的设备、绑定训练使用的数据源和定义执行器。 + +```python +use_cuda = False +# 设置训练设备 +places = fluid.cuda_places() if use_cuda else fluid.cpu_places() +# 设置数据源 +loader.set_batch_generator(inputs_generator(batch_size, + eos_id, + is_train=False), + places=places) +# 定义执行器,加载参数并绑定Program +exe = fluid.Executor(places[0]) +exe.run(startup_prog) +fluid.load(infer_prog, model_file, exe) +prog = fluid.CompiledProgram(infer_prog).with_data_parallel() +``` + +### 测试 +循环测试数据进行预测,生成过程对于每个测试数据都会将源语言句子和`beam_size`个生成句子打印输出,为打印出正确的句子还需要使用id到word映射的词典。如下: + +```python +# 获取 id 到 word 映射的词典 +src_idx2word = paddle.dataset.wmt16.get_dict( + "en", source_dict_size, reverse=True) +trg_idx2word = paddle.dataset.wmt16.get_dict( + "de", target_dict_size, reverse=True) +# 循环测试数据 +for data in loader(): + seq_ids = exe.run(prog, feed=data, fetch_list=[predict_seqs])[0] + for ins_idx in range(seq_ids.shape[0]): + print("Original sentence:") + src_seqs = np.array(data[0]["src"]) + print(" ".join([ + src_idx2word[idx] for idx in src_seqs[ins_idx][1:] + if idx != eos_id + ])) + print("Translated sentence:") + for beam_idx in range(beam_size): + seq = [ + trg_idx2word[idx] for idx in seq_ids[ins_idx, :, beam_idx] + if idx != eos_id + ] + print(" ".join(seq).encode("utf8")) +``` + +可以观察到如下的预测结果输出: +```txt +Original sentence: +A man in an orange hat starring at something . +Translated sentence: +Ein Mann mit einem orangen Schutzhelm starrt auf etwas . +Ein Mann mit einem gelben Schutzhelm starrt auf etwas . +Ein Mann mit einem gelben Schutzhelm starrt etwas an . +Ein Mann mit einem orangen Schutzhelm starrt etwas an . +``` + +## 总结 + +端到端的神经网络机器翻译是近几年兴起的一种全新的机器翻译方法。本章中,我们介绍了NMT中典型的“编码器-解码器”框架。由于NMT是一个典型的Seq2Seq(Sequence to Sequence,序列到序列)学习问题,因此,Seq2Seq中的query改写(query rewriting)、摘要、单轮对话等问题都可以用本教程的模型来解决。 + + +## 参考文献 + +1. Koehn P. [Statistical machine translation](https://books.google.com.hk/books?id=4v_Cx1wIMLkC&printsec=frontcover&hl=zh-CN&source=gbs_ge_summary_r&cad=0#v=onepage&q&f=false)[M]. Cambridge University Press, 2009. +2. Cho K, Van Merriënboer B, Gulcehre C, et al. [Learning phrase representations using RNN encoder-decoder for statistical machine translation](http://www.aclweb.org/anthology/D/D14/D14-1179.pdf)[C]//Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), 2014: 1724-1734. +3. Chung J, Gulcehre C, Cho K H, et al. [Empirical evaluation of gated recurrent neural networks on sequence modeling](https://arxiv.org/abs/1412.3555)[J]. arXiv preprint arXiv:1412.3555, 2014. +4. Bahdanau D, Cho K, Bengio Y. [Neural machine translation by jointly learning to align and translate](https://arxiv.org/abs/1409.0473)[C]//Proceedings of ICLR 2015, 2015. +5. Papineni K, Roukos S, Ward T, et al. [BLEU: a method for automatic evaluation of machine translation](http://dl.acm.org/citation.cfm?id=1073135)[C]//Proceedings of the 40th annual meeting on association for computational linguistics. Association for Computational Linguistics, 2002: 311-318. + +
+知识共享许可协议
本教程PaddlePaddle 创作,采用 知识共享 署名-相同方式共享 4.0 国际 许可协议进行许可。 diff --git a/doc/paddle/user_guides/nlp_case/machine_translation/README.md b/doc/paddle/user_guides/nlp_case/machine_translation/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3d3796f4c6cf5136092aca69f8d0045403f4c236 --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/machine_translation/README.md @@ -0,0 +1,501 @@ +# Machine Translation + +Source code of this tutorial is in [book/machine_translation](https://github.com/PaddlePaddle/book/tree/develop/08.machine_translation). For users new to Paddle book, please refer to [the user guide of Book Documentation](https://github.com/PaddlePaddle/book/blob/develop/README.cn.md#run_the_book). + +## Background + +Machine translation is to translate different languages with computer. The language to be translated is usually called source language, and the language representing the result of translation is called target language. Machine translation is the process of transformation from source language to target language, which is an important research assignment of Natural Language Processing. + +Machine translation systems at early age were mostly rule-based translation system, which needs linguists make transformation rule between two languages and then input these rules into computer. This method requires proficient professional linguistic background, but it is hard to cover all rules of a language, let it alone two or more languages. Therefore, the major challenge of traditional machine translation method is the impossibility of a completest set of rules\[[1](#References)\]. + +To solve the problem mentioned above, Statistical Machine Translation technology emerged afterwards. For Statistical Machine Translation, transformation rules are automatically learned from a large scale corpus instead of handcrafted rule. So it tackles with the limit of obtaining knowledge in rule-based machine translation systems. However, it still faces certain challenges: 1. man-made feature can never cover all language phenomena. 2. it is hard to use global feature. 3. it depends on many pre-processing parts, such as Word Alignment, Tokenization, Rule Extraction, Parsing. Errors accumulated by those parts will have a great influence on translation. + +In recent years, Deep Learning technology proposes new solutions to overcome the bottleneck. Two methods for machine translation are realized with the aid of deep learning. 1. Based on the framework of statistical machine translation system, the neural network is in place to improve core parts, such as language model, reordering model and so on (See the left part in figure One). 2. Abandoning the framework of statistical machine translation system, it directly uses neural network to transform source language to target language, which is End-to-End Neural Machine Translation (See right part in figure One), NMT model in short. +
+
+Figure One. Neural Network Machine Translation System +
+ +In the following parts, we'll guide you through NMT model and its hands-on implementation in PaddlePaddle + +## Result Demo + +Take Chinese to English translation model as an example. For a trained model, if input the following tokenized Chinese sentence : + +```text +这些 是 希望 的 曙光 和 解脱 的 迹象 . +``` + +If it sets the entries of translation result ( e.t. the width of [beam search algorithm](#beam search algorithm)) as 3, the generated English sentence is as follows: + +```text +0 -5.36816 These are signs of hope and relief . +1 -6.23177 These are the light of hope and relief . +2 -7.7914 These are the light of hope and the relief of hope . +``` + +- The first column to the left is the serial numbers of generated sentences. The second column from left is scores of the sentences in descending order, in which higher score is better. The third column contains the generated English sentences. + +- In addition, there are two special marks. One is ``, indicating the end of a sentence and another one is ``, representing unknown word, which have never appeared in dictionary. + +## Exploration of Models + +In this section, let's scrutinize Bi-directional Recurrent Neural Network, typical Encoder-Decoder structure in NMT model and beam search algorithm. + +### Bi-directional Recurrent Neural Network + +We have introduced a bi-directional recurrent neural network in the chapter [label_semantic_roles](https://github.com/PaddlePaddle/book/blob/develop/07.label_semantic_roles/README.md). Here we introduce another network proposed by Bengio team in thesis \[[2](#References),[4](#References)\] The aim of this network is to input a sequence and get its features at each time step. Specifically, fixed-length vector is incorporated to represent contextual semantic information for each time step in the output. + +To be concrete, the Bi-directional recurrent neural network sequentially processes the input sequences in time dimension in sequential order or in reverse order, i.e., forward and backward. And the output of RNN at each time step are concatenated to be the final output layer. Hereby the output node of each time step contains complete past and future context information of current time step of input sequence. The figure below shows a bi-directional recurrent neural network expanded by time step. The network consists of a forward and a backward RNN with six weight matrices: a weight matrix ($W_1, W_3$) from input layer to the forward and backward hidden layers, and a weight matrix ($W_2, W_5$) from a hidden layer to itself (self-loop), the weight matrix from the forward hidden layer and the backward hidden layer to the output layer ($W_4, W_6$). Note that there is no connection between the forward hidden layer and the backward hidden layer. + + +
+
+Figure 2. Bi-directional Recurrent Neural Network expanded by time step. +
+ +### Encoder-Decoder Structure + +The Encoder-Decoder\[[2](#References)\] structure helps transform a source sequence with arbitrary length to another target sequence with arbitrary length. In the encoding phase, it encodes the entire source sequence into a vector. And in the decoding phase, it decodes the entire target sequence by maximizing the predicted sequence probability. The encoding and decoding process is usually implemented by RNN. + +
+
+Figure 3. Encoder-Decoder Frame +
+ + +#### Encoder + +Encoding can be done in 3 steps: + +1.One-hot vector : each word $x_i$ of the source language sentence $x=\left \{ x_1,x_2,...,x_T \right \}$ is represented as a column vector $w_i\epsilon \left \{ 0,1 \right \}^{\left | V \right |},i=1,2,...,T$. The dimension of this vector $w_i$ is the same as the vocabulary size $\left | V \right |$, and there is only one dimension with a value of 1 (the position corresponds to the position of the word in the vocabulary), and the rest are all zero. + +2.Word vector mapped to low-dimensional semantic space: one-hot vector indicates that there are two problems: 1. the dimension of generated vector is often large, which is easy to cause dimension disaster. 2. it is difficult to characterize the relationship between words and words (such as semantical similarity, that is, the semantics cannot be expressed well.) Therefore, one-hot vector needs to be mapped to a low-dimensional semantic space, represented by a dense vector(called a word vector) with fixed dimensions . The mapping matrix is $C\epsilon R^{K\times \left | V \right |}$, with $s_i=Cw_i$ representing the word vector of the $i$ word, $K$ representing the dimension of vector. + +3.Encode a source language word sequence with RNN: The calculation for this process is $h_i=\varnothing _\theta \left ( h_{i-1}, s_i \right )$, where $h_0$ is an all-zero vector. $\varnothing _\theta$ is a non-linear activation function, and the result $\mathbf{h}=\left \{ h_1,..., h_T \right \}$ is the state code sequence of source language $T$ words read by RNN respectively. The vector representation of the entire sentence can use $\mathbf{h}$ in the state code of the last time step $T$, or use the pooling result in the time dimension. + +Step 3 can also use bi-directional recurrent neural network to implement more complex sentence-coded representation, which can be implemented with bi-directional GRU. The forward GRU sequentially encodes the source language word in the order of the word sequence $(x_1, x_2,..., x_T)$, and obtains a series of hidden layer states $(\overrightarrow{h_1},\overrightarrow{h_2},. ..,\overrightarrow{h_T})$. Similarly, the backward GRU encodes the source language word in the order of $(x_T,x_{T-1},...,x_1)$, resulting in $(\overleftarrow{h_1},\overleftarrow{h_2},. ..,\overleftarrow{h_T})$. Finally, for the word $x_i$, the hidden layer state is obtained by jointing the two GRUs, namely $h_i=\left [ \overrightarrow{h_i^T},\overleftarrow{h_i^T} \right ]^{T} $. +
+
+Figure 4. Use bi-directional GRU encoder +
+ +#### Decoder + +During the training of machine translation tasks, the goal of in decode period is to maximize the probability of the next correct target language word. The idea is: +1.At each time, according to the encoding information of the source language sentence (also called context vector) $c$, the $i$th word $u_i$ of the real target language sequence and the hidden layer state $z_i $ of RNN at $i$th time, it calculates the next hidden layer state $z_{i+1}$. The formula is as follows: +$$z_{i+1}=\phi_{\theta '} \left ( c,u_i,z_i \right )$$ +Where $\phi _{\theta '}$ is a nonlinear activation function; $c$ is the context vector of the source language sentence, if the attention mechanism is not used and the output of [encoder](#encoder) is the last element after the source language sentence is encoded, you can define $c=h_T$; $u_i$ is the $i$th word of the target language sequence, and $u_0$ is the start tag `` of the target language sequence, indicating the start of decoding; $z_i$ is the hidden layer state of the RNN at $i$th time, and $z_0$ is an all-zero vector. + +2.Normalize $z_{i+1}$ by `softmax` to get the probability distribution $p_{i+1}$ of the $i+1$th word of the target language sequence. The probability distribution formula is as follows: +$$p\left ( u_{i+1}|u_{<i+1},\mathbf{x} \right )=softmax(W_sz_{i+1}+b_z)$$ +Where $W_sz_{i+1}+b_z$ scores each possible output word and normalizes with softmax to get the probability $p_{i+1}$ of $i+1$th word. + +3.Calculate the cost according to $p_{i+1}$ and $u_{i+1}$. + +4.Repeat steps 1~3 until all words in the target language sequence have been processed. + +The process of generating machine translation tasks, in general, is to translate source language sentences based on pre-trained model. The decode period in the generation process is different from the training process above. For details, please refer to [Beam Search Algorithm](#Beam Search Algorithm). + + +### Beam Search Algorithm + +Beam Search ([beam search](http://en.wikipedia.org/wiki/Beam_search)) is a heuristic graph search algorithm for searching the graph or tree for the optimal extended nodes in a finite set, usually used in systems with very large solution space (such as machine translation, speech recognition), for that the memory can't fit all the unfolded solutions in the graph or tree. If you want to translate "`Hello`" in the machine translation task, even if there are only 3 words (``, ``, `hello`) in the target language dictionary, it is possible generate infinite sentences (the number of occurrences of `hello` is uncertain). In order to find better translation results, we can use beam search algorithm. + +The beam search algorithm uses a breadth-first strategy to build a search tree. At each level of the tree, the nodes are sorted according to the heuristic cost (in this tutorial, the sum of the log probabilities of the generated words), and then only the predetermined number of nodes (commonly referred to in the literature as beam width, beam size, 柱宽度, etc.). Only these nodes will continue to expand in the next layer, and other nodes will be cut off, that is, the nodes with higher quality are retained, and the nodes with poor quality are pruned. Therefore, the space and time occupied by the search are greatly reduced, but the disadvantage is that there is no guarantee that an optimal solution will be obtained. + +In the decode period of using beam search algorithm, the goal is to maximize the probability of generated sequence. The idea is: +1.At each time, the next hidden layer state $z_{i+1}$ is calculated according to the encoding information $c$ of the source language sentence, the generated $i$th target language sequence words $u_i$, and the hidden layer state $z_i$ of RNN at $i$th time. + +2.Normalize $z_{i+1}$ by `softmax` to get the probability distribution $p_{i+1}$ of the $i+1$th words of the target language sequence. + +3.The word $u_{i+1}$ is sampled according to $p_{i+1}$. + +4.Repeat steps 1~3 until you get the sentence end tag `` or exceed the maximum generation length of the sentence. + +Note: The formula for $z_{i+1}$ and $p_{i+1}$ is the same as in [Decoder](#Decoder). And since each step of the generation is implemented by the greedy method, it is not guaranteed to obtain the global optimal solution. + +## Data Preparation + +This tutorial uses [bitexts(after selection)] in the [WMT-14](http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/) dataset (http://www-lium.univ- Lemans.fr/~schwenk/cslm_joint_paper/data/bitexts.tgz) as a training set, [dev+test data](http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/data/dev+test.tgz) as a test set and generated set. + +### Data Pre-processing + +It contains two steps in pre-processing: + +-Merge parallel corpora files from source language to target language into one file: + +-Merge every `XXX.src` and `XXX.trg` into one file as `XXX`. + +-Content in $i$th row of `XXX` is the connection of $i$th row of `XXX.src` with $i$th row of `XXX.trg`, which is divided by '\t'. + +-Create source language dictionary and target language dictionary of train data. There are **DICTSIZE** words in each dictionary, including: (DICTSIZE - 3) words with highest frequency in the corpus, and 3 special symbols `` (the beginning of the sequence), `` ( the end of the sequence) and `` (unknown word). + +### Sample Data + +Because the data volume of the complete data set is large, in order to verify the training process, the PaddlePaddle interface paddle.data set.wmt14 provides a pre-processed [smaller scale dataset](http://paddlepaddle.bj.bcebos.com/demo/wmt_shrinked_data/wmt14.tgz) by default . + +In the data set, there are 193,319 training data, 6003 test data, and a dictionary with length of 30,000. Due to the limit of data size, the effects of models trained with this dataset are not guaranteed. + +## Model Configuration + +Next we start configuring model according to input data. First we import necessary library functions and define global variables. + + + +```python +from __future__ import print_function +import contextlib + +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.framework as framework +import paddle.fluid.layers as pd +from paddle.fluid.executor import Executor +from functools import partial +import os + +dict_size = 30000 # dictionary dimension +source_dict_dim = target_dict_dim = dict_size # source/target language dictionary dimension +hidden_dim = 32 # size of hidden layer in encoder +word_dim = 16 # dimension of word vector +batch_size = 2 # the number of samples in batch +max_length = 8 # the maximum length of generated sentence +beam_size = 2 # width of beam + +decoder_size = hidden_dim # size of hidden layer in decoder +``` + + + + +Then the frame of encoder is implemented as follows: + + +```python + def encoder(is_sparse): + # define input data id of source language + src_word_id = pd.data( + name="src_word_id", shape=[1], dtype='int64', lod_level=1) + # reflect encode above on the word vector of low-dimension language space. + src_embedding = pd.embedding( + input=src_word_id, + size=[dict_size, word_dim], + dtype='float32', + is_sparse=is_sparse, + param_attr=fluid.ParamAttr(name='vemb')) + # LSTM layer:fc + dynamic_lstm + fc1 = pd.fc(input=src_embedding, size=hidden_dim * 4, act='tanh') + lstm_hidden0, lstm_0 = pd.dynamic_lstm(input=fc1, size=hidden_dim * 4) + # Fetch the final state after the sequence encode of source language + encoder_out = pd.sequence_last_step(input=lstm_hidden0) + return encoder_out +``` + + + +Then implement decoder in training mode: + + +```python + def train_decoder(context, is_sparse): + # Define input data of sequence id of target language and reflect it on word vector of low-dimension language space + trg_language_word = pd.data( + name="target_language_word", shape=[1], dtype='int64', lod_level=1) + trg_embedding = pd.embedding( + input=trg_language_word, + size=[dict_size, word_dim], + dtype='float32', + is_sparse=is_sparse, + param_attr=fluid.ParamAttr(name='vemb')) + + rnn = pd.DynamicRNN() + with rnn.block(): # use DynamicRNN to define computation at each step + # Fetch input word vector of target language at present step + current_word = rnn.step_input(trg_embedding) + # obtain state of hidden layer + pre_state = rnn.memory(init=context) + # computing unit of decoder: single-layer forward network + current_state = pd.fc(input=[current_word, pre_state], + size=decoder_size, + act='tanh') + # compute predicting probability of nomarlized word + current_score = pd.fc(input=current_state, + size=target_dict_dim, + act='softmax') + # update hidden layer of RNN + rnn.update_memory(pre_state, current_state) + # output predicted probability + rnn.output(current_score) + + return rnn() +``` + + + +implement decoder in inference mode + + + +```python +def decode(context, is_sparse): + init_state = context + # define counter variable in the decoding + array_len = pd.fill_constant(shape=[1], dtype='int64', value=max_length) + counter = pd.zeros(shape=[1], dtype='int64', force_cpu=True) + + # define tensor array to save content at each time step, and write initial id, score and state + state_array = pd.create_array('float32') + pd.array_write(init_state, array=state_array, i=counter) + + ids_array = pd.create_array('int64') + scores_array = pd.create_array('float32') + + init_ids = pd.data(name="init_ids", shape=[1], dtype="int64", lod_level=2) + init_scores = pd.data( + name="init_scores", shape=[1], dtype="float32", lod_level=2) + + pd.array_write(init_ids, array=ids_array, i=counter) + pd.array_write(init_scores, array=scores_array, i=counter) + + # define conditional variable to stop loop + cond = pd.less_than(x=counter, y=array_len) + # define while_op + while_op = pd.While(cond=cond) + with while_op.block(): # define the computing of each step + # obtain input at present step of decoder, including id chosen at previous step, corresponding score and state at previous step. + pre_ids = pd.array_read(array=ids_array, i=counter) + pre_state = pd.array_read(array=state_array, i=counter) + pre_score = pd.array_read(array=scores_array, i=counter) + + # update input state as state correspondent with id chosen at previous step + pre_state_expanded = pd.sequence_expand(pre_state, pre_score) + # computing logic of decoder under the same train mode, including input vector and computing unit of decoder + # compute predicting probability of normalized word + pre_ids_emb = pd.embedding( + input=pre_ids, + size=[dict_size, word_dim], + dtype='float32', + is_sparse=is_sparse) + current_state = pd.fc(input=[pre_state_expanded, pre_ids_emb], + size=decoder_size, + act='tanh') + current_state_with_lod = pd.lod_reset(x=current_state, y=pre_score) + current_score = pd.fc(input=current_state_with_lod, + size=target_dict_dim, + act='softmax') + topk_scores, topk_indices = pd.topk(current_score, k=beam_size) + + # compute accumulated score and perform beam search + accu_scores = pd.elementwise_add( + x=pd.log(topk_scores), y=pd.reshape(pre_score, shape=[-1]), axis=0) + selected_ids, selected_scores = pd.beam_search( + pre_ids, + pre_score, + topk_indices, + accu_scores, + beam_size, + end_id=10, + level=0) + + pd.increment(x=counter, value=1, in_place=True) + # write search result and corresponding hidden layer into tensor array + pd.array_write(current_state, array=state_array, i=counter) + pd.array_write(selected_ids, array=ids_array, i=counter) + pd.array_write(selected_scores, array=scores_array, i=counter) + + # update condition to stop loop + length_cond = pd.less_than(x=counter, y=array_len) + finish_cond = pd.logical_not(pd.is_empty(x=selected_ids)) + pd.logical_and(x=length_cond, y=finish_cond, out=cond) + + translation_ids, translation_scores = pd.beam_search_decode( + ids=ids_array, scores=scores_array, beam_size=beam_size, end_id=10) + + return translation_ids, translation_scores +``` + + + + +Furthermore, we define a `train_program` to use result computed by `inference_program` and compute error with the help of marked data. We also define an `optimizer_func` to define optimizer. + +```python +def train_program(is_sparse): + context = encoder(is_sparse) + rnn_out = train_decoder(context, is_sparse) + label = pd.data( + name="target_language_next_word", shape=[1], dtype='int64', lod_level=1) + cost = pd.cross_entropy(input=rnn_out, label=label) + avg_cost = pd.mean(cost) + return avg_cost + + +def optimizer_func(): + return fluid.optimizer.Adagrad( + learning_rate=1e-4, + regularization=fluid.regularizer.L2DecayRegularizer( + regularization_coeff=0.1)) +``` + +## Train Model + +### Define Training Environment +Define your training environment and define the train executed on CPU or on GPU. + +```python +use_cuda = False +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() +``` + +### Define Data Provider +The next step is to define data provider for train and test. Data Provider read data with size of `BATCH_SIZE` `paddle.dataset.wmt.train` will provide data with size of `BATCH_SIZE` after reordering every time. The size of reordering is `buf_size`. + +```python +train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.wmt14.train(dict_size), buf_size=1000), + batch_size=batch_size) +``` + +### Create Trainer +Trainer needs a train program and a train optimizer. + +```python +is_sparse = False +trainer = Trainer( + train_func=partial(train_program, is_sparse), + place=place, + optimizer_func=optimizer_func) +``` + +### Provide Data + +`feed_order` is used to define every generated data and reflecting relationship between `paddle.layer.data`. For example, the first column data generated by `wmt14.train` is correspondent with the feature `src_word_id`. + +```python +feed_order = ['src_word_id', 'target_language_word', 'target_language_next_word' +] +``` + +### Event Handler +Call function `event_handler` will be called after the touch of an event defined before. For example, we can examine the loss after the training at each step. + +```python +def event_handler(event): + if isinstance(event, EndStepEvent): + if event.step % 10 == 0: + print('pass_id=' + str(event.epoch) + ' batch=' + str(event.step)) + + if event.step == 20: + trainer.stop() +``` + +### Start Training +Finally, we feed in `num_epoch` and other parameters and call `trainer.train` to start training. + +```python +EPOCH_NUM = 1 + +trainer.train( + reader=train_reader, + num_epochs=EPOCH_NUM, + event_handler=event_handler, + feed_order=feed_order) +``` + +## Model Application + +### Define Decoder Part + +Use `encoder` and `decoder` function defined above to infer corresponding id and score after the translation. + +```python +context = encoder(is_sparse) +translation_ids, translation_scores = decode(context, is_sparse) +``` + +### Define Data + +First we initialize id and score to generate tensor as input data. In this prediction, we use the first record in `wmt14.test` to infer and finally use "source language dictionary" and "target language dictionary" to output corresponding sentence. + +```python +init_ids_data = np.array([1 for _ in range(batch_size)], dtype='int64') +init_scores_data = np.array( + [1. for _ in range(batch_size)], dtype='float32') +init_ids_data = init_ids_data.reshape((batch_size, 1)) +init_scores_data = init_scores_data.reshape((batch_size, 1)) +init_lod = [1] * batch_size +init_lod = [init_lod, init_lod] + +init_ids = fluid.create_lod_tensor(init_ids_data, init_lod, place) +init_scores = fluid.create_lod_tensor(init_scores_data, init_lod, place) + +test_data = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.wmt14.test(dict_size), buf_size=1000), + batch_size=batch_size) + +feed_order = ['src_word_id'] +feed_list = [ + framework.default_main_program().global_block().var(var_name) + for var_name in feed_order +] +feeder = fluid.DataFeeder(feed_list, place) + +src_dict, trg_dict = paddle.dataset.wmt14.get_dict(dict_size) +``` + +### Test +Now we can start predicting. We need provide corresponding parameters in `feed_order` and run it on `executor` to obtain id and score. + + +```python +exe = Executor(place) +exe.run(framework.default_startup_program()) + +for data in test_data(): + feed_data = map(lambda x: [x[0]], data) + feed_dict = feeder.feed(feed_data) + feed_dict['init_ids'] = init_ids + feed_dict['init_scores'] = init_scores + + results = exe.run( + framework.default_main_program(), + feed=feed_dict, + fetch_list=[translation_ids, translation_scores], + return_numpy=False) + + result_ids = np.array(results[0]) + result_ids_lod = results[0].lod() + result_scores = np.array(results[1]) + + print("Original sentence:") + print(" ".join([src_dict[w] for w in feed_data[0][0][1:-1]])) + print("Translated score and sentence:") + for i in xrange(beam_size): + start_pos = result_ids_lod[1][i] + 1 + end_pos = result_ids_lod[1][i+1] + print("%d\t%.4f\t%s\n" % (i+1, result_scores[end_pos-1], + " ".join([trg_dict[w] for w in result_ids[start_pos:end_pos]]))) + + break +``` + +## Summary + +End-to-End neural network translation is an recently acclaimed machine translation method. In this section, we introduced the typical Encoder-Decoder of NMT. Because NMT is a typical Seq2Seq (Sequence to Sequence) learning task, tasks of Seq2Seq, such as query rewriting, abstraction, single round dialogue, can be tackled by this model. + + +## References + +1. Koehn P. [Statistical machine translation](https://books.google.com.hk/books?id=4v_Cx1wIMLkC&printsec=frontcover&hl=zh-CN&source=gbs_ge_summary_r&cad=0#v=onepage&q&f=false)[M]. Cambridge University Press, 2009. +2. Cho K, Van Merriënboer B, Gulcehre C, et al. [Learning phrase representations using RNN encoder-decoder for statistical machine translation](http://www.aclweb.org/anthology/D/D14/D14-1179.pdf)[C]//Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), 2014: 1724-1734. +3. Chung J, Gulcehre C, Cho K H, et al. [Empirical evaluation of gated recurrent neural networks on sequence modeling](https://arxiv.org/abs/1412.3555)[J]. arXiv preprint arXiv:1412.3555, 2014. +4. Bahdanau D, Cho K, Bengio Y. [Neural machine translation by jointly learning to align and translate](https://arxiv.org/abs/1409.0473)[C]//Proceedings of ICLR 2015, 2015. +5. Papineni K, Roukos S, Ward T, et al. [BLEU: a method for automatic evaluation of machine translation](http://dl.acm.org/citation.cfm?id=1073135)[C]//Proceedings of the 40th annual meeting on association for computational linguistics. Association for Computational Linguistics, 2002: 311-318. + +
+ +知识共享许可协议
This tutorial is contributed by PaddlePaddle, and licensed under a Creative Commons Attribution-ShareAlike 4.0 International License. diff --git a/doc/paddle/user_guides/nlp_case/machine_translation/image/attention_decoder_formula.png b/doc/paddle/user_guides/nlp_case/machine_translation/image/attention_decoder_formula.png new file mode 100644 index 0000000000000000000000000000000000000000..2f63489c5abbbc301cbcddaf862c4c89722eb1fd Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/machine_translation/image/attention_decoder_formula.png differ diff --git a/doc/paddle/user_guides/nlp_case/machine_translation/image/bi_rnn.png b/doc/paddle/user_guides/nlp_case/machine_translation/image/bi_rnn.png new file mode 100644 index 0000000000000000000000000000000000000000..9d8efd50a49d0305586f550344472ab94c93bed3 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/machine_translation/image/bi_rnn.png differ diff --git a/doc/paddle/user_guides/nlp_case/machine_translation/image/bi_rnn_en.png b/doc/paddle/user_guides/nlp_case/machine_translation/image/bi_rnn_en.png new file mode 100644 index 0000000000000000000000000000000000000000..4b35c88fc8ea2c503473c0c15711744e784d6af6 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/machine_translation/image/bi_rnn_en.png differ diff --git a/doc/paddle/user_guides/nlp_case/machine_translation/image/decoder_attention.png b/doc/paddle/user_guides/nlp_case/machine_translation/image/decoder_attention.png new file mode 100644 index 0000000000000000000000000000000000000000..1b355e7786d25487a3f564af758c2c52c43b4690 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/machine_translation/image/decoder_attention.png differ diff --git a/doc/paddle/user_guides/nlp_case/machine_translation/image/decoder_attention_en.png b/doc/paddle/user_guides/nlp_case/machine_translation/image/decoder_attention_en.png new file mode 100644 index 0000000000000000000000000000000000000000..3728f782ee09d9308d02b42305027b2735467ead Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/machine_translation/image/decoder_attention_en.png differ diff --git a/doc/paddle/user_guides/nlp_case/machine_translation/image/decoder_formula.png b/doc/paddle/user_guides/nlp_case/machine_translation/image/decoder_formula.png new file mode 100644 index 0000000000000000000000000000000000000000..94d18fadc4441614722d3de7684eecb2112dc3d9 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/machine_translation/image/decoder_formula.png differ diff --git a/doc/paddle/user_guides/nlp_case/machine_translation/image/encoder_attention.png b/doc/paddle/user_guides/nlp_case/machine_translation/image/encoder_attention.png new file mode 100644 index 0000000000000000000000000000000000000000..28d7a15a3bd65262bde22a3f41b5aa78b46b368a Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/machine_translation/image/encoder_attention.png differ diff --git a/doc/paddle/user_guides/nlp_case/machine_translation/image/encoder_attention_en.png b/doc/paddle/user_guides/nlp_case/machine_translation/image/encoder_attention_en.png new file mode 100644 index 0000000000000000000000000000000000000000..ea8585565da1ecaf241654c278c6f9b15e283286 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/machine_translation/image/encoder_attention_en.png differ diff --git a/doc/paddle/user_guides/nlp_case/machine_translation/image/encoder_decoder.png b/doc/paddle/user_guides/nlp_case/machine_translation/image/encoder_decoder.png new file mode 100644 index 0000000000000000000000000000000000000000..60aee0017de73f462e35708b1055aff8992c03e1 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/machine_translation/image/encoder_decoder.png differ diff --git a/doc/paddle/user_guides/nlp_case/machine_translation/image/encoder_decoder_en.png b/doc/paddle/user_guides/nlp_case/machine_translation/image/encoder_decoder_en.png new file mode 100644 index 0000000000000000000000000000000000000000..6b73798fe632e0873b35c117b86f347c8cf3116a Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/machine_translation/image/encoder_decoder_en.png differ diff --git a/doc/paddle/user_guides/nlp_case/machine_translation/image/gru.png b/doc/paddle/user_guides/nlp_case/machine_translation/image/gru.png new file mode 100644 index 0000000000000000000000000000000000000000..0cde685b84106650a4df18ce335a23e6338d3d11 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/machine_translation/image/gru.png differ diff --git a/doc/paddle/user_guides/nlp_case/machine_translation/image/gru_en.png b/doc/paddle/user_guides/nlp_case/machine_translation/image/gru_en.png new file mode 100644 index 0000000000000000000000000000000000000000..a6af429f23f0f7e82650139bbd8dcbef27a34abe Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/machine_translation/image/gru_en.png differ diff --git a/doc/paddle/user_guides/nlp_case/machine_translation/image/nmt.png b/doc/paddle/user_guides/nlp_case/machine_translation/image/nmt.png new file mode 100644 index 0000000000000000000000000000000000000000..bf56d73ebf297fadf522389c7b6836dd379aa097 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/machine_translation/image/nmt.png differ diff --git a/doc/paddle/user_guides/nlp_case/machine_translation/image/nmt_en.png b/doc/paddle/user_guides/nlp_case/machine_translation/image/nmt_en.png new file mode 100644 index 0000000000000000000000000000000000000000..557310e044b2b6687e5ea6895417ed946ac7bc11 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/machine_translation/image/nmt_en.png differ diff --git a/doc/paddle/user_guides/nlp_case/machine_translation/image/probability_formula.png b/doc/paddle/user_guides/nlp_case/machine_translation/image/probability_formula.png new file mode 100644 index 0000000000000000000000000000000000000000..34e94f7efdb0eb91e50f2641e30fbb3fef2fe9b9 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/machine_translation/image/probability_formula.png differ diff --git a/doc/paddle/user_guides/nlp_case/machine_translation/image/sum_formula.png b/doc/paddle/user_guides/nlp_case/machine_translation/image/sum_formula.png new file mode 100644 index 0000000000000000000000000000000000000000..f220f689d52e76e4c58a1ebbebe4c02789246d87 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/machine_translation/image/sum_formula.png differ diff --git a/doc/paddle/user_guides/nlp_case/machine_translation/image/weight_formula.png b/doc/paddle/user_guides/nlp_case/machine_translation/image/weight_formula.png new file mode 100644 index 0000000000000000000000000000000000000000..4140746d8c12c60258eed0f7aa59d6de823e7776 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/machine_translation/image/weight_formula.png differ diff --git a/doc/paddle/user_guides/nlp_case/machine_translation/index.cn.html b/doc/paddle/user_guides/nlp_case/machine_translation/index.cn.html new file mode 100644 index 0000000000000000000000000000000000000000..38a1cb1abf02d1da3640e5dc677586df10614e12 --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/machine_translation/index.cn.html @@ -0,0 +1,697 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/user_guides/nlp_case/machine_translation/index.html b/doc/paddle/user_guides/nlp_case/machine_translation/index.html new file mode 100644 index 0000000000000000000000000000000000000000..e1bd7d27c1cec1f3e47765b72869583e3941bc65 --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/machine_translation/index.html @@ -0,0 +1,565 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/user_guides/nlp_case/machine_translation/seq2seq.py b/doc/paddle/user_guides/nlp_case/machine_translation/seq2seq.py new file mode 100644 index 0000000000000000000000000000000000000000..a398c93ff879e0f2cc64a5b4d6ad13b3b660ab49 --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/machine_translation/seq2seq.py @@ -0,0 +1,343 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import print_function +import os +import six + +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.layers as layers + +dict_size = 30000 +source_dict_size = target_dict_size = dict_size +bos_id = 0 +eos_id = 1 +word_dim = 512 +hidden_dim = 512 +decoder_size = hidden_dim +max_length = 256 +beam_size = 4 +batch_size = 64 + +model_file = "machine_translation" + + +class DecoderCell(layers.RNNCell): + """Additive Attention followed by GRU""" + + def __init__(self, hidden_size): + self.hidden_size = hidden_size + self.gru_cell = layers.GRUCell(hidden_size) + + def attention(self, hidden, encoder_output, encoder_output_proj, + encoder_padding_mask): + decoder_state_proj = layers.unsqueeze( + layers.fc(hidden, size=self.hidden_size, bias_attr=False), [1]) + mixed_state = fluid.layers.elementwise_add( + encoder_output_proj, + layers.expand(decoder_state_proj, + [1, layers.shape(decoder_state_proj)[1], 1])) + # attn_scores: [batch_size, src_seq_len] + attn_scores = layers.squeeze( + layers.fc( + input=mixed_state, size=1, num_flatten_dims=2, + bias_attr=False), [2]) + if encoder_padding_mask is not None: + attn_scores = layers.elementwise_add(attn_scores, + encoder_padding_mask) + attn_scores = layers.softmax(attn_scores) + context = layers.reduce_sum( + layers.elementwise_mul(encoder_output, attn_scores, axis=0), dim=1) + return context + + def call(self, + step_input, + hidden, + encoder_output, + encoder_output_proj, + encoder_padding_mask=None): + context = self.attention(hidden, encoder_output, encoder_output_proj, + encoder_padding_mask) + step_input = layers.concat([step_input, context], axis=1) + output, new_hidden = self.gru_cell(step_input, hidden) + return output, new_hidden + + +def data_func(is_train=True): + """data inputs and data loader""" + src = fluid.data(name="src", shape=[None, None], dtype="int64") + src_sequence_length = fluid.data( + name="src_sequence_length", shape=[None], dtype="int64") + inputs = [src, src_sequence_length] + if is_train: + trg = fluid.data(name="trg", shape=[None, None], dtype="int64") + trg_sequence_length = fluid.data( + name="trg_sequence_length", shape=[None], dtype="int64") + label = fluid.data(name="label", shape=[None, None], dtype="int64") + inputs += [trg, trg_sequence_length, label] + loader = fluid.io.DataLoader.from_generator( + feed_list=inputs, capacity=10, iterable=True, use_double_buffer=True) + return inputs, loader + + +def encoder(src_embedding, src_sequence_length): + """Encoder: Bidirectional GRU""" + encoder_fwd_cell = layers.GRUCell(hidden_size=hidden_dim) + encoder_fwd_output, fwd_state = layers.rnn( + cell=encoder_fwd_cell, + inputs=src_embedding, + sequence_length=src_sequence_length, + time_major=False, + is_reverse=False) + encoder_bwd_cell = layers.GRUCell(hidden_size=hidden_dim) + encoder_bwd_output, bwd_state = layers.rnn( + cell=encoder_bwd_cell, + inputs=src_embedding, + sequence_length=src_sequence_length, + time_major=False, + is_reverse=True) + encoder_output = layers.concat( + input=[encoder_fwd_output, encoder_bwd_output], axis=2) + encoder_state = layers.concat(input=[fwd_state, bwd_state], axis=1) + return encoder_output, encoder_state + + +def decoder(encoder_output, + encoder_output_proj, + encoder_state, + encoder_padding_mask, + trg=None, + is_train=True): + """Decoder: GRU with Attention""" + decoder_cell = DecoderCell(hidden_size=decoder_size) + decoder_initial_states = layers.fc( + encoder_state, size=decoder_size, act="tanh") + trg_embeder = lambda x: fluid.embedding(input=x, + size=[target_dict_size, hidden_dim], + dtype="float32", + param_attr=fluid.ParamAttr( + name="trg_emb_table")) + output_layer = lambda x: layers.fc(x, + size=target_dict_size, + num_flatten_dims=len(x.shape) - 1, + param_attr=fluid.ParamAttr(name= + "output_w")) + if is_train: + decoder_output, _ = layers.rnn( + cell=decoder_cell, + inputs=trg_embeder(trg), + initial_states=decoder_initial_states, + time_major=False, + encoder_output=encoder_output, + encoder_output_proj=encoder_output_proj, + encoder_padding_mask=encoder_padding_mask) + decoder_output = output_layer(decoder_output) + else: + encoder_output = layers.BeamSearchDecoder.tile_beam_merge_with_batch( + encoder_output, beam_size) + encoder_output_proj = layers.BeamSearchDecoder.tile_beam_merge_with_batch( + encoder_output_proj, beam_size) + encoder_padding_mask = layers.BeamSearchDecoder.tile_beam_merge_with_batch( + encoder_padding_mask, beam_size) + beam_search_decoder = layers.BeamSearchDecoder( + cell=decoder_cell, + start_token=bos_id, + end_token=eos_id, + beam_size=beam_size, + embedding_fn=trg_embeder, + output_fn=output_layer) + decoder_output, _ = layers.dynamic_decode( + decoder=beam_search_decoder, + inits=decoder_initial_states, + max_step_num=max_length, + output_time_major=False, + encoder_output=encoder_output, + encoder_output_proj=encoder_output_proj, + encoder_padding_mask=encoder_padding_mask) + + return decoder_output + + +def model_func(inputs, is_train=True): + src = inputs[0] + src_sequence_length = inputs[1] + # source embedding + src_embeder = lambda x: fluid.embedding( + input=x, + size=[source_dict_size, hidden_dim], + dtype="float32", + param_attr=fluid.ParamAttr(name="src_emb_table")) + src_embedding = src_embeder(src) + + # encoder + encoder_output, encoder_state = encoder(src_embedding, src_sequence_length) + + encoder_output_proj = layers.fc( + input=encoder_output, + size=decoder_size, + num_flatten_dims=2, + bias_attr=False) + src_mask = layers.sequence_mask( + src_sequence_length, maxlen=layers.shape(src)[1], dtype="float32") + encoder_padding_mask = (src_mask - 1.0) * 1e9 + + trg = inputs[2] if is_train else None + + # decoder + output = decoder( + encoder_output=encoder_output, + encoder_output_proj=encoder_output_proj, + encoder_state=encoder_state, + encoder_padding_mask=encoder_padding_mask, + trg=trg, + is_train=is_train) + return output + + +def loss_func(logits, label, trg_sequence_length): + probs = layers.softmax(logits) + loss = layers.cross_entropy(input=probs, label=label) + trg_mask = layers.sequence_mask( + trg_sequence_length, maxlen=layers.shape(logits)[1], dtype="float32") + avg_cost = layers.reduce_sum(loss * trg_mask) / layers.reduce_sum(trg_mask) + return avg_cost + + +def optimizer_func(): + clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=5.0) + lr_decay = fluid.layers.learning_rate_scheduler.noam_decay(hidden_dim, + 1000) + return fluid.optimizer.Adam( + learning_rate=lr_decay, + regularization=fluid.regularizer.L2DecayRegularizer( + regularization_coeff=1e-4), + grad_clip=clip) + + +def inputs_generator(batch_size, pad_id, is_train=True): + data_generator = fluid.io.shuffle( + paddle.dataset.wmt16.train(source_dict_size, target_dict_size), + buf_size=10000) if is_train else paddle.dataset.wmt16.test( + source_dict_size, target_dict_size) + batch_generator = fluid.io.batch(data_generator, batch_size=batch_size) + + def _pad_batch_data(insts, pad_id): + seq_lengths = np.array(list(map(len, insts)), dtype="int64") + max_len = max(seq_lengths) + pad_data = np.array( + [inst + [pad_id] * (max_len - len(inst)) for inst in insts], + dtype="int64") + return pad_data, seq_lengths + + def _generator(): + for batch in batch_generator(): + batch_src = [ins[0] for ins in batch] + src_data, src_lengths = _pad_batch_data(batch_src, pad_id) + inputs = [src_data, src_lengths] + if is_train: + batch_trg = [ins[1] for ins in batch] + trg_data, trg_lengths = _pad_batch_data(batch_trg, pad_id) + batch_lbl = [ins[2] for ins in batch] + lbl_data, _ = _pad_batch_data(batch_lbl, pad_id) + inputs += [trg_data, trg_lengths, lbl_data] + yield inputs + + return _generator + + +def train(use_cuda): + # define program + train_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard(train_prog, startup_prog): + with fluid.unique_name.guard(): + # For training: + # inputs = [src, src_sequence_length, trg, trg_sequence_length, label] + inputs, loader = data_func(is_train=True) + logits = model_func(inputs, is_train=True) + loss = loss_func(logits, inputs[-1], inputs[-2]) + optimizer = optimizer_func() + optimizer.minimize(loss) + + # define data source + places = fluid.cuda_places() if use_cuda else fluid.cpu_places() + loader.set_batch_generator( + inputs_generator(batch_size, eos_id, is_train=True), places=places) + + exe = fluid.Executor(places[0]) + exe.run(startup_prog) + prog = fluid.CompiledProgram(train_prog).with_data_parallel( + loss_name=loss.name) + + EPOCH_NUM = 20 + for pass_id in six.moves.xrange(EPOCH_NUM): + batch_id = 0 + for data in loader(): + loss_val = exe.run(prog, feed=data, fetch_list=[loss])[0] + print('pass_id: %d, batch_id: %d, loss: %f' % + (pass_id, batch_id, loss_val)) + batch_id += 1 + fluid.save(train_prog, model_file) + + +def infer(use_cuda): + # define program + infer_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard(infer_prog, startup_prog): + with fluid.unique_name.guard(): + inputs, loader = data_func(is_train=False) + predict_seqs = model_func(inputs, is_train=False) + + # define data source + places = fluid.cuda_places() if use_cuda else fluid.cpu_places() + loader.set_batch_generator( + inputs_generator(batch_size, eos_id, is_train=False), places=places) + src_idx2word = paddle.dataset.wmt16.get_dict( + "en", source_dict_size, reverse=True) + trg_idx2word = paddle.dataset.wmt16.get_dict( + "de", target_dict_size, reverse=True) + + exe = fluid.Executor(places[0]) + exe.run(startup_prog) + fluid.load(infer_prog, model_file, exe) + prog = fluid.CompiledProgram(infer_prog).with_data_parallel() + + for data in loader(): + seq_ids = exe.run(prog, feed=data, fetch_list=[predict_seqs])[0] + for ins_idx in range(seq_ids.shape[0]): + print("Original sentence:") + src_seqs = np.array(data[0]["src"]) + print(" ".join([ + src_idx2word[idx] for idx in src_seqs[ins_idx][1:] + if idx != eos_id + ])) + print("Translated sentence:") + for beam_idx in range(beam_size): + seq = [ + trg_idx2word[idx] for idx in seq_ids[ins_idx, :, beam_idx] + if idx != eos_id + ] + print(" ".join(seq).encode("utf8")) + + +def main(use_cuda): + train(use_cuda) + infer(use_cuda) + + +if __name__ == '__main__': + use_cuda = False # set to True if training with GPU + main(use_cuda) diff --git a/doc/paddle/user_guides/nlp_case/understand_sentiment/.gitignore b/doc/paddle/user_guides/nlp_case/understand_sentiment/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..667762d327cb160376a4119fa9df9db41b6443b2 --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/understand_sentiment/.gitignore @@ -0,0 +1,10 @@ +data/aclImdb +data/imdb +data/pre-imdb +data/mosesdecoder-master +*.log +model_output +dataprovider_copy_1.py +model.list +*.pyc +.DS_Store diff --git a/doc/paddle/user_guides/nlp_case/understand_sentiment/.run_ce.sh b/doc/paddle/user_guides/nlp_case/understand_sentiment/.run_ce.sh new file mode 100644 index 0000000000000000000000000000000000000000..4001dec862f924d45352a6a47bca6b50afe847b6 --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/understand_sentiment/.run_ce.sh @@ -0,0 +1,9 @@ +#!/bin/bash +#This file is only used for continuous evaluation. +export FLAGS_cudnn_deterministic=true +export CUDA_VISIBLE_DEVICES=0 +python train_conv.py --use_gpu 1 --num_epochs=1 --enable_ce | python _ce.py +python train_dyn_rnn.py --use_gpu 1 --num_epochs=1 --enable_ce | python _ce.py +python train_stacked_lstm.py --use_gpu 1 --num_epochs=1 --enable_ce | python _ce.py + + diff --git a/doc/paddle/user_guides/nlp_case/understand_sentiment/README.cn.md b/doc/paddle/user_guides/nlp_case/understand_sentiment/README.cn.md new file mode 100644 index 0000000000000000000000000000000000000000..199e18ed8854138e962327e9734021ab56807deb --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/understand_sentiment/README.cn.md @@ -0,0 +1,452 @@ +# 情感分析 + +本教程源代码目录在[book/understand_sentiment](https://github.com/PaddlePaddle/book/tree/develop/06.understand_sentiment),初次使用请您参考[Book文档使用说明](https://github.com/PaddlePaddle/book/blob/develop/README.cn.md#运行这本书)。 + +## 背景介绍 + +在自然语言处理中,情感分析一般是指判断一段文本所表达的情绪状态。其中,一段文本可以是一个句子,一个段落或一个文档。情绪状态可以是两类,如(正面,负面),(高兴,悲伤);也可以是三类,如(积极,消极,中性)等等。情感分析的应用场景十分广泛,如把用户在购物网站(亚马逊、天猫、淘宝等)、旅游网站、电影评论网站上发表的评论分成正面评论和负面评论;或为了分析用户对于某一产品的整体使用感受,抓取产品的用户评论并进行情感分析等等。表格1展示了对电影评论进行情感分析的例子: + +| 电影评论 | 类别 | +| -------- | ----- | +| 在冯小刚这几年的电影里,算最好的一部的了| 正面 | +| 很不好看,好像一个地方台的电视剧 | 负面 | +| 圆方镜头全程炫技,色调背景美则美矣,但剧情拖沓,口音不伦不类,一直努力却始终无法入戏| 负面| +|剧情四星。但是圆镜视角加上婺源的风景整个非常有中国写意山水画的感觉,看得实在太舒服了。。|正面| + +

表格 1 电影评论情感分析

+ +在自然语言处理中,情感分析属于典型的**文本分类**问题,即把需要进行情感分析的文本划分为其所属类别。文本分类涉及文本表示和分类方法两个问题。在深度学习的方法出现之前,主流的文本表示方法为词袋模型BOW(bag of words),话题模型等等;分类方法有SVM(support vector machine), LR(logistic regression)等等。 + +对于一段文本,BOW表示会忽略其词顺序、语法和句法,将这段文本仅仅看做是一个词集合,因此BOW方法并不能充分表示文本的语义信息。例如,句子“这部电影糟糕透了”和“一个乏味,空洞,没有内涵的作品”在情感分析中具有很高的语义相似度,但是它们的BOW表示的相似度为0。又如,句子“一个空洞,没有内涵的作品”和“一个不空洞而且有内涵的作品”的BOW相似度很高,但实际上它们的意思很不一样。 + +本章我们所要介绍的深度学习模型克服了BOW表示的上述缺陷,它在考虑词顺序的基础上把文本映射到低维度的语义空间,并且以端对端(end to end)的方式进行文本表示及分类,其性能相对于传统方法有显著的提升\[[1](#参考文献)\]。 + +## 说明: +1. 硬件环境要求: +本文可支持在CPU、GPU下运行 +2. Docker镜像支持的CUDA/cuDNN版本: +如果使用了Docker运行Book,请注意:这里所提供的默认镜像的GPU环境为 CUDA 8/cuDNN 5,对于NVIDIA Tesla V100等要求CUDA 9的 GPU,使用该镜像可能会运行失败。 +3. 文档和脚本中代码的一致性问题: +请注意:为使本文更加易读易用,我们拆分、调整了train.py的代码并放入本文。本文中代码与train.py的运行结果一致,可直接运行[train.py](https://github.com/PaddlePaddle/book/blob/develop/06.understand_sentiment/train_stacked_lstm.py)进行验证。 + + +## 模型概览 +本章所使用的文本表示模型为卷积神经网络(Convolutional Neural Networks)和循环神经网络(Recurrent Neural Networks)及其扩展。下面依次介绍这几个模型。 + +### 文本卷积神经网络简介(CNN) + +我们在[推荐系统](https://github.com/PaddlePaddle/book/tree/develop/05.recommender_system)一节介绍过应用于文本数据的卷积神经网络模型的计算过程,这里进行一个简单的回顾。 + +对卷积神经网络来说,首先使用卷积处理输入的词向量序列,产生一个特征图(feature map),对特征图采用时间维度上的最大池化(max pooling over time)操作得到此卷积核对应的整句话的特征,最后,将所有卷积核得到的特征拼接起来即为文本的定长向量表示,对于文本分类问题,将其连接至softmax即构建出完整的模型。在实际应用中,我们会使用多个卷积核来处理句子,窗口大小相同的卷积核堆叠起来形成一个矩阵,这样可以更高效的完成运算。另外,我们也可使用窗口大小不同的卷积核来处理句子,[推荐系统](https://github.com/PaddlePaddle/book/tree/develop/05.recommender_system)一节的图3作为示意画了四个卷积核,既文本图1,不同颜色表示不同大小的卷积核操作。 + +

+
+图1. 卷积神经网络文本分类模型 +

+ +对于一般的短文本分类问题,上文所述的简单的文本卷积网络即可达到很高的正确率\[[1](#参考文献)\]。若想得到更抽象更高级的文本特征表示,可以构建深层文本卷积神经网络\[[2](#参考文献),[3](#参考文献)\]。 + +### 循环神经网络(RNN) + +循环神经网络是一种能对序列数据进行精确建模的有力工具。实际上,循环神经网络的理论计算能力是图灵完备的\[[4](#参考文献)\]。自然语言是一种典型的序列数据(词序列),近年来,循环神经网络及其变体(如long short term memory\[[5](#参考文献)\]等)在自然语言处理的多个领域,如语言模型、句法解析、语义角色标注(或一般的序列标注)、语义表示、图文生成、对话、机器翻译等任务上均表现优异甚至成为目前效果最好的方法。 + +

+
+图2. 循环神经网络按时间展开的示意图 +

+ +循环神经网络按时间展开后如图2所示:在第$t$时刻,网络读入第$t$个输入$x_t$(向量表示)及前一时刻隐层的状态值$h_{t-1}$(向量表示,$h_0$一般初始化为$0$向量),计算得出本时刻隐层的状态值$h_t$,重复这一步骤直至读完所有输入。如果将循环神经网络所表示的函数记为$f$,则其公式可表示为: + +

+
+

+ +其中$W_{xh}$是输入到隐层的矩阵参数,$W_{hh}$是隐层到隐层的矩阵参数,$b_h$为隐层的偏置向量(bias)参数,$\sigma$为$sigmoid$函数。 + +在处理自然语言时,一般会先将词(one-hot表示)映射为其词向量表示,然后再作为循环神经网络每一时刻的输入$x_t$。此外,可以根据实际需要的不同在循环神经网络的隐层上连接其它层。如,可以把一个循环神经网络的隐层输出连接至下一个循环神经网络的输入构建深层(deep or stacked)循环神经网络,或者提取最后一个时刻的隐层状态作为句子表示进而使用分类模型等等。 + +### 长短期记忆网络(LSTM) + +对于较长的序列数据,循环神经网络的训练过程中容易出现梯度消失或爆炸现象\[[6](#参考文献)\]。为了解决这一问题,Hochreiter S, Schmidhuber J. (1997)提出了LSTM(long short term memory\[[5](#参考文献)\])。 + +相比于简单的循环神经网络,LSTM增加了记忆单元$c$、输入门$i$、遗忘门$f$及输出门$o$。这些门及记忆单元组合起来大大提升了循环神经网络处理长序列数据的能力。若将基于LSTM的循环神经网络表示的函数记为$F$,则其公式为: + +

+
+

+ + +$F$由下列公式组合而成\[[7](#参考文献)\]: + +

+
+

+ +其中,$i_t, f_t, c_t, o_t$分别表示输入门,遗忘门,记忆单元及输出门的向量值,带角标的$W$及$b$为模型参数,$tanh$为双曲正切函数,$\odot$表示逐元素(elementwise)的乘法操作。输入门控制着新输入进入记忆单元$c$的强度,遗忘门控制着记忆单元维持上一时刻值的强度,输出门控制着输出记忆单元的强度。三种门的计算方式类似,但有着完全不同的参数,它们各自以不同的方式控制着记忆单元$c$,如图3所示: + +

+
+图3. 时刻$t$的LSTM [7] +

+ +LSTM通过给简单的循环神经网络增加记忆及控制门的方式,增强了其处理远距离依赖问题的能力。类似原理的改进还有Gated Recurrent Unit (GRU)\[[8](#参考文献)\],其设计更为简洁一些。**这些改进虽然各有不同,但是它们的宏观描述却与简单的循环神经网络一样(如图2所示),即隐状态依据当前输入及前一时刻的隐状态来改变,不断地循环这一过程直至输入处理完毕:** + +

+
+

+ +其中,$Recurrent$可以表示简单的循环神经网络、GRU或LSTM。 + +### 栈式双向LSTM(Stacked Bidirectional LSTM) + +对于正常顺序的循环神经网络,$h_t$包含了$t$时刻之前的输入信息,也就是上文信息。同样,为了得到下文信息,我们可以使用反方向(将输入逆序处理)的循环神经网络。结合构建深层循环神经网络的方法(深层神经网络往往能得到更抽象和高级的特征表示),我们可以通过构建更加强有力的基于LSTM的栈式双向循环神经网络\[[9](#参考文献)\],来对时序数据进行建模。 + +如图4所示(以三层为例),奇数层LSTM正向,偶数层LSTM反向,高一层的LSTM使用低一层LSTM及之前所有层的信息作为输入,对最高层LSTM序列使用时间维度上的最大池化即可得到文本的定长向量表示(这一表示充分融合了文本的上下文信息,并且对文本进行了深层次抽象),最后我们将文本表示连接至softmax构建分类模型。 + +

+
+图4. 栈式双向LSTM用于文本分类 +

+ + +## 数据集介绍 + +我们以[IMDB情感分析数据集](http://ai.stanford.edu/%7Eamaas/data/sentiment/)为例进行介绍。IMDB数据集的训练集和测试集分别包含25000个已标注过的电影评论。其中,负面评论的得分小于等于4,正面评论的得分大于等于7,满分10分。 +```text +aclImdb +|- test + |-- neg + |-- pos +|- train + |-- neg + |-- pos +``` +Paddle在`dataset/imdb.py`中提实现了imdb数据集的自动下载和读取,并提供了读取字典、训练数据、测试数据等API。 + +## 配置模型 + +在该示例中,我们实现了两种文本分类算法,分别基于[推荐系统](https://github.com/PaddlePaddle/book/tree/develop/05.recommender_system)一节介绍过的文本卷积神经网络,以及[栈式双向LSTM](#栈式双向LSTM(Stacked Bidirectional LSTM))。我们首先引入要用到的库和定义全局变量: + +```python +from __future__ import print_function +import paddle +import paddle.fluid as fluid +import numpy as np +import sys +import math + +CLASS_DIM = 2 #情感分类的类别数 +EMB_DIM = 128 #词向量的维度 +HID_DIM = 512 #隐藏层的维度 +STACKED_NUM = 3 #LSTM双向栈的层数 +BATCH_SIZE = 128 #batch的大小 + +``` + + +### 文本卷积神经网络 +我们构建神经网络`convolution_net`,示例代码如下。 +需要注意的是:`fluid.nets.sequence_conv_pool` 包含卷积和池化层两个操作。 + +```python +#文本卷积神经网络 +def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): + emb = fluid.embedding( + input=data, size=[input_dim, emb_dim], is_sparse=True) + conv_3 = fluid.nets.sequence_conv_pool( + input=emb, + num_filters=hid_dim, + filter_size=3, + act="tanh", + pool_type="sqrt") + conv_4 = fluid.nets.sequence_conv_pool( + input=emb, + num_filters=hid_dim, + filter_size=4, + act="tanh", + pool_type="sqrt") + prediction = fluid.layers.fc( + input=[conv_3, conv_4], size=class_dim, act="softmax") + return prediction +``` + +网络的输入`input_dim`表示的是词典的大小,`class_dim`表示类别数。这里,我们使用[`sequence_conv_pool`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/nets.py) API实现了卷积和池化操作。 + + + +### 栈式双向LSTM + +栈式双向神经网络`stacked_lstm_net`的代码片段如下: + +```python +#栈式双向LSTM +def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num): + + #计算词向量 + emb = fluid.embedding( + input=data, size=[input_dim, emb_dim], is_sparse=True) + + #第一层栈 + #全连接层 + fc1 = fluid.layers.fc(input=emb, size=hid_dim) + #lstm层 + lstm1, cell1 = fluid.layers.dynamic_lstm(input=fc1, size=hid_dim) + + inputs = [fc1, lstm1] + + #其余的所有栈结构 + for i in range(2, stacked_num + 1): + fc = fluid.layers.fc(input=inputs, size=hid_dim) + lstm, cell = fluid.layers.dynamic_lstm( + input=fc, size=hid_dim, is_reverse=(i % 2) == 0) + inputs = [fc, lstm] + + #池化层 + fc_last = fluid.layers.sequence_pool(input=inputs[0], pool_type='max') + lstm_last = fluid.layers.sequence_pool(input=inputs[1], pool_type='max') + + #全连接层,softmax预测 + prediction = fluid.layers.fc( + input=[fc_last, lstm_last], size=class_dim, act='softmax') + return prediction +``` +以上的栈式双向LSTM抽象出了高级特征并把其映射到和分类类别数同样大小的向量上。最后一个全连接层的'softmax'激活函数用来计算分类属于某个类别的概率。 + +重申一下,此处我们可以调用`convolution_net`或`stacked_lstm_net`的任何一个网络结构进行训练学习。我们以`convolution_net`为例。 + +接下来我们定义预测程序(`inference_program`)。预测程序使用`convolution_net`来对`fluid.layer.data`的输入进行预测。 + +```python +def inference_program(word_dict): + data = fluid.data( + name="words", shape=[None], dtype="int64", lod_level=1) + dict_dim = len(word_dict) + net = convolution_net(data, dict_dim, CLASS_DIM, EMB_DIM, HID_DIM) + # net = stacked_lstm_net(data, dict_dim, CLASS_DIM, EMB_DIM, HID_DIM, STACKED_NUM) + return net +``` + +我们这里定义了`training_program`。它使用了从`inference_program`返回的结果来计算误差。我们同时定义了优化函数`optimizer_func`。 + +因为是有监督的学习,训练集的标签也在`fluid.layers.data`中定义了。在训练过程中,交叉熵用来在`fluid.layer.cross_entropy`中作为损失函数。 + +在测试过程中,分类器会计算各个输出的概率。第一个返回的数值规定为cost。 + +```python +def train_program(prediction): + label = fluid.data(name="label", shape=[None, 1], dtype="int64") + cost = fluid.layers.cross_entropy(input=prediction, label=label) + avg_cost = fluid.layers.mean(cost) + accuracy = fluid.layers.accuracy(input=prediction, label=label) + return [avg_cost, accuracy] #返回平均cost和准确率acc + +#优化函数 +def optimizer_func(): + return fluid.optimizer.Adagrad(learning_rate=0.002) +``` + +## 训练模型 + +### 定义训练环境 + +定义您的训练是在CPU上还是在GPU上: + + +```python +use_cuda = False #在cpu上进行训练 +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() +``` + +### 定义数据提供器 + +下一步是为训练和测试定义数据提供器。提供器读入一个大小为 BATCH_SIZE的数据。paddle.dataset.imdb.word_dict 每次会在乱序化后提供一个大小为BATCH_SIZE的数据,乱序化的大小为缓存大小buf_size。 + +注意:读取IMDB的数据可能会花费几分钟的时间,请耐心等待。 + +```python +print("Loading IMDB word dict....") +word_dict = paddle.dataset.imdb.word_dict() + +print ("Reading training data....") +train_reader = fluid.io.batch( + fluid.io.shuffle( + paddle.dataset.imdb.train(word_dict), buf_size=25000), + batch_size=BATCH_SIZE) +print("Reading testing data....") +test_reader = fluid.io.batch( + paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE) +``` +word_dict是一个字典序列,是词和label的对应关系,运行下一行可以看到具体内容: +```python +word_dict +``` +每行是如('limited': 1726)的对应关系,该行表示单词limited所对应的label是1726。 + +### 构造训练器 +训练器需要一个训练程序和一个训练优化函数。 + +```python +exe = fluid.Executor(place) +prediction = inference_program(word_dict) +[avg_cost, accuracy] = train_program(prediction)#训练程序 +sgd_optimizer = optimizer_func()#训练优化函数 +sgd_optimizer.minimize(avg_cost) +``` + +该函数用来计算训练中模型在test数据集上的结果 +```python +def train_test(program, reader): + count = 0 + feed_var_list = [ + program.global_block().var(var_name) for var_name in feed_order + ] + feeder_test = fluid.DataFeeder(feed_list=feed_var_list, place=place) + test_exe = fluid.Executor(place) + accumulated = len([avg_cost, accuracy]) * [0] + for test_data in reader(): + avg_cost_np = test_exe.run( + program=program, + feed=feeder_test.feed(test_data), + fetch_list=[avg_cost, accuracy]) + accumulated = [ + x[0] + x[1][0] for x in zip(accumulated, avg_cost_np) + ] + count += 1 + return [x / count for x in accumulated] +``` + +### 提供数据并构建主训练循环 + +`feed_order`用来定义每条产生的数据和`fluid.layers.data`之间的映射关系。比如,`imdb.train`产生的第一列的数据对应的是`words`这个特征。 + +```python +# Specify the directory path to save the parameters +params_dirname = "understand_sentiment_conv.inference.model" + +feed_order = ['words', 'label'] +pass_num = 1 #训练循环的轮数 + +#程序主循环部分 +def train_loop(main_program): + #启动上文构建的训练器 + exe.run(fluid.default_startup_program()) + + feed_var_list_loop = [ + main_program.global_block().var(var_name) for var_name in feed_order + ] + feeder = fluid.DataFeeder( + feed_list=feed_var_list_loop, place=place) + + test_program = fluid.default_main_program().clone(for_test=True) + + #训练循环 + for epoch_id in range(pass_num): + for step_id, data in enumerate(train_reader()): + #运行训练器 + metrics = exe.run(main_program, + feed=feeder.feed(data), + fetch_list=[avg_cost, accuracy]) + + #测试结果 + avg_cost_test, acc_test = train_test(test_program, test_reader) + print('Step {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( + step_id, avg_cost_test, acc_test)) + + print("Step {0}, Epoch {1} Metrics {2}".format( + step_id, epoch_id, list(map(np.array, + metrics)))) + + if step_id == 30: + if params_dirname is not None: + fluid.io.save_inference_model(params_dirname, ["words"], + prediction, exe)#保存模型 + return +``` + +### 训练过程处理 + +我们在训练主循环里打印了每一步输出,可以观察训练情况。 + +### 开始训练 + +最后,我们启动训练主循环来开始训练。训练时间较长,如果为了更快的返回结果,可以通过调整损耗值范围或者训练步数,以减少准确率的代价来缩短训练时间。 + +```python +train_loop(fluid.default_main_program()) +``` + +## 应用模型 + +### 构建预测器 + +和训练过程一样,我们需要创建一个预测过程,并使用训练得到的模型和参数来进行预测,`params_dirname`用来存放训练过程中的各个参数。 + +```python +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() +exe = fluid.Executor(place) +inference_scope = fluid.core.Scope() +``` + +### 生成测试用输入数据 + +为了进行预测,我们任意选取3个评论。请随意选取您看好的3个。我们把评论中的每个词对应到`word_dict`中的id。如果词典中没有这个词,则设为`unknown`。 +然后我们用`create_lod_tensor`来创建细节层次的张量,关于该函数的详细解释请参照[API文档](http://paddlepaddle.org/documentation/docs/zh/1.2/user_guides/howto/basic_concept/lod_tensor.html)。 + +```python +reviews_str = [ + b'read the book forget the movie', b'this is a great movie', b'this is very bad' +] +reviews = [c.split() for c in reviews_str] + +UNK = word_dict[''] +lod = [] +for c in reviews: + lod.append([word_dict.get(words, UNK) for words in c]) + +base_shape = [[len(c) for c in lod]] +lod = np.array(sum(lod, []), dtype=np.int64) + +tensor_words = fluid.create_lod_tensor(lod, base_shape, place) +``` + +## 应用模型并进行预测 + +现在我们可以对每一条评论进行正面或者负面的预测啦。 + +```python +with fluid.scope_guard(inference_scope): + + [inferencer, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(params_dirname, exe) + + assert feed_target_names[0] == "words" + results = exe.run(inferencer, + feed={feed_target_names[0]: tensor_words}, + fetch_list=fetch_targets, + return_numpy=False) + np_data = np.array(results[0]) + for i, r in enumerate(np_data): + print("Predict probability of ", r[0], " to be positive and ", r[1], + " to be negative for review \'", reviews_str[i], "\'") + +``` + + +## 总结 + +本章我们以情感分析为例,介绍了使用深度学习的方法进行端对端的短文本分类,并且使用PaddlePaddle完成了全部相关实验。同时,我们简要介绍了两种文本处理模型:卷积神经网络和循环神经网络。在后续的章节中我们会看到这两种基本的深度学习模型在其它任务上的应用。 + + +## 参考文献 +1. Kim Y. [Convolutional neural networks for sentence classification](http://arxiv.org/pdf/1408.5882)[J]. arXiv preprint arXiv:1408.5882, 2014. +2. Kalchbrenner N, Grefenstette E, Blunsom P. [A convolutional neural network for modelling sentences](http://arxiv.org/pdf/1404.2188.pdf?utm_medium=App.net&utm_source=PourOver)[J]. arXiv preprint arXiv:1404.2188, 2014. +3. Yann N. Dauphin, et al. [Language Modeling with Gated Convolutional Networks](https://arxiv.org/pdf/1612.08083v1.pdf)[J] arXiv preprint arXiv:1612.08083, 2016. +4. Siegelmann H T, Sontag E D. [On the computational power of neural nets](http://research.cs.queensu.ca/home/akl/cisc879/papers/SELECTED_PAPERS_FROM_VARIOUS_SOURCES/05070215382317071.pdf)[C]//Proceedings of the fifth annual workshop on Computational learning theory. ACM, 1992: 440-449. +5. Hochreiter S, Schmidhuber J. [Long short-term memory](http://web.eecs.utk.edu/~itamar/courses/ECE-692/Bobby_paper1.pdf)[J]. Neural computation, 1997, 9(8): 1735-1780. +6. Bengio Y, Simard P, Frasconi P. [Learning long-term dependencies with gradient descent is difficult](http://www-dsi.ing.unifi.it/~paolo/ps/tnn-94-gradient.pdf)[J]. IEEE transactions on neural networks, 1994, 5(2): 157-166. +7. Graves A. [Generating sequences with recurrent neural networks](http://arxiv.org/pdf/1308.0850)[J]. arXiv preprint arXiv:1308.0850, 2013. +8. Cho K, Van Merriënboer B, Gulcehre C, et al. [Learning phrase representations using RNN encoder-decoder for statistical machine translation](http://arxiv.org/pdf/1406.1078)[J]. arXiv preprint arXiv:1406.1078, 2014. +9. Zhou J, Xu W. [End-to-end learning of semantic role labeling using recurrent neural networks](http://www.aclweb.org/anthology/P/P15/P15-1109.pdf)[C]//Proceedings of the Annual Meeting of the Association for Computational Linguistics. 2015. + +
+知识共享许可协议
本教程PaddlePaddle 创作,采用 知识共享 署名-相同方式共享 4.0 国际 许可协议进行许可。 diff --git a/doc/paddle/user_guides/nlp_case/understand_sentiment/README.md b/doc/paddle/user_guides/nlp_case/understand_sentiment/README.md new file mode 100644 index 0000000000000000000000000000000000000000..00ed9efb77e0d45bc15c212d1d6a67a06aebf07f --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/understand_sentiment/README.md @@ -0,0 +1,443 @@ + + +# Sentiment Analysis + +The source code of this tutorial is in [book/understand_sentiment](https://github.com/PaddlePaddle/book/tree/develop/06.understand_sentiment). For new users, please refer to [Running This Book](https://github.com/PaddlePaddle/book/blob/develop/README.md#running-the-book) . + +## Background Introduction + +In natural language processing, sentiment analysis generally refers to judging the emotion expressed by a piece of text. Among them, a piece of text can be a sentence, a paragraph or a document. Emotional state can be two categories, such as (positive, negative), (happy, sad); or three categories, such as (positive, negative, neutral) and so on.The application scenarios of understanding sentiment are very broad, such as dividing the comments posted by users on shopping websites (Amazon, Tmall, Taobao, etc.), travel websites, and movie review websites into positive comments and negative comments; or in order to analyze the user's overall experience with a product, grab user reviews of the product, and perform sentiment analysis. Table 1 shows an example of understanding sentiment of movie reviews: + +| Movie Comments | Category | +| -------- | ----- | +|In Feng Xiaogang’s movies of the past few years, it is the best one | Positive | +|Very bad feat, like a local TV series | Negative | +|The round-lens lens is full of brilliance, and the tonal background is beautiful, but the plot is procrastinating, the accent is not good, and even though taking an effort but it is hard to focus on the show | Negative | +|The plot could be scored 4 stars. In addition, the angle of the round lens plusing the scenery of Wuyuan is very much like the feeling of Chinese landscape painting. It satisfied me. | Positive | + +

Form 1 Sentiment analysis of movie comments

+ +In natural language processing, sentiment is a typical problem of **text categorization**, which divides the text that needs to be sentiment analysis into its category. Text categorization involves two issues: text representation and classification methods. Before the emergence of the deep learning, the mainstream text representation methods are BOW (bag of words), topic models, etc.; the classification methods are SVM (support vector machine), LR (logistic regression) and so on. + +For a piece of text, BOW means that its word order, grammar and syntax are ignored, and this text is only treated as a collection of words, so the BOW method does not adequately represent the semantic information of the text. For example, the sentence "This movie is awful" and "a boring, empty, non-connotative work" have a high semantic similarity in sentiment analysis, but their BOW representation has a similarity of zero. Another example is that the BOW is very similar to the sentence "an empty, work without connotations" and "a work that is not empty and has connotations", but in fact they mean differently. + +The deep learning we are going to introduce in this chapter overcomes the above shortcomings of BOW representation. It maps text to low-dimensional semantic space based on word order, and performs text representation and classification in end-to-end mode. Its performance is significantly improved compared to the traditional method \[[1](#References)\]. + +## Model Overview +The text representation models used in this chapter are Convolutional Neural Networks and Recurrent Neural Networks and their extensions. These models are described below. + +### Introduction of Text Convolutional Neural Networks (CNN) + +We introduced the calculation process of the CNN model applied to text data in the [Recommended System](https://github.com/PaddlePaddle/book/tree/develop/05.recommender_system) section. Here is a simple review. + +For a CNN, first convolute input word vector sequence to generate a feature map, and then obtain the features of the whole sentence corresponding to the kernel by using a max pooling over time on the feature map. Finally, the splicing of all the features obtained is the fixed-length vector representation of the text. For the text classification problem, connecting it via softmax to construct a complete model. In actual applications, we use multiple convolution kernels to process sentences, and convolution kernels with the same window size are stacked to form a matrix, which can complete the operation more efficiently. In addition, we can also use the convolution kernel with different window sizes to process the sentence. Figure 3 in the [Recommend System](https://github.com/PaddlePaddle/book/tree/develop/05.recommender_system) section shows four convolution kernels, namely Figure 1 below, with different colors representing convolution kernel operations of different sizes. + +

+
+Figure 1. CNN text classification model +

+ +For the general short text classification problem, the simple text convolution network described above can achieve a high accuracy rate \[[1](#References)\]. If you want a more abstract and advanced text feature representation, you can construct a deep text convolutional neural network\[[2](#References), [3](#References)\]. + +### Recurrent Neural Network (RNN) + +RNN is a powerful tool for accurately modeling sequence data. In fact, the theoretical computational power of the RNN is perfected by Turing' \[[4](#References)\]. Natural language is a typical sequence data (word sequence). In recent years, RNN and its derivation (such as long short term memory\[[5](#References)\]) have been applied in many natural language fields, such as in language models, syntactic parsing, semantic role labeling (or general sequence labeling), semantic representation, graphic generation, dialogue, machine translation, etc., all perform well and even become the best at present. + +

+
+Figure 2. Schematic diagram of the RNN expanded by time +

+ +The RNN expands as time is shown in Figure 2: at the time of $t$, the network reads the $t$th input $x_t$ (vector representation) and the state value of the hidden layer at the previous moment $h_{t- 1}$ (vector representation, $h_0$ is normally initialized to $0$ vector), and calculate the state value $h_t$ of the hidden layer at this moment. Repeat this step until all the inputs have been read. If the function is recorded as $f$, its formula can be expressed as: + +$$h_t=f(x_t,h_{t-1})=\sigma(W_{xh}x_t+W_{hh}h_{t-1}+b_h)$$ + +Where $W_{xh}$ is the matrix parameter of the input to the hidden layer, $W_{hh}$ is the matrix parameter of the hidden layer to the hidden layer, and $b_h$ is the bias vector parameter of the hidden layer, $\sigma $ is the $sigmoid$ function. + +When dealing with natural language, the word (one-hot representation) is usually mapped to its word vector representation, and then used as the input $x_t$ for each moment of the recurrent neural network. In addition, other layers may be connected to the hidden layer of the RNN depending on actual needs. For example, you can connect the hidden layer output of a RNN to the input of the next RNN to build a deep or stacked RNN, or extract the hidden layer state at the last moment as a sentence representation and then implement a classification model, etc. + +### Long and Short Term Memory Network (LSTM) + +For longer sequence data, the gradient disappearance or explosion phenomenon is likely to occur during training RNN\[[6](#References)\]. To solve this problem, Hochreiter S, Schmidhuber J. (1997) proposed LSTM (long short term memory\[[5](#References)\]). + +Compared to a simple RNN, LSTM adds memory unit $c$, input gate $i$, forget gate $f$, and output gate $o$. The combination of these gates and memory units greatly enhances the ability of the recurrent neural network to process long sequence data. If the function \is denoted as $F$, the formula is: + +$$ h_t=F(x_t,h_{t-1})$$ + +$F$ It is a combination of the following formulas\[[7](#References)\]: +$$ i_t = \sigma{(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}c_{t-1}+b_i)} $$ +$$ f_t = \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}c_{t-1}+b_f) $$ +$$ c_t = f_t\odot c_{t-1}+i_t\odot tanh(W_{xc}x_t+W_{hc}h_{t-1}+b_c) $$ +$$ o_t = \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}c_{t}+b_o) $$ +$$ h_t = o_t\odot tanh(c_t) $$ +Where $i_t, f_t, c_t, o_t$ respectively represent the vector representation of the input gate, the forget gate, the memory unit and the output gate, the $W$ and $b$ with the angular label are the model parameters, and the $tanh$ is the hyperbolic tangent function. , $\odot$ represents an elementwise multiplication operation. The input gate controls the intensity of the new input into the memory unit $c$, the forget gate controls the intensity of the memory unit to maintain the previous time value, and the output gate controls the intensity of the output memory unit. The three gates are calculated in a similar way, but with completely different parameters.They controll the memory unit $c$ in different ways, as shown in Figure 3: + +

+
+Figure 3. LSTM for time $t$ [7] +

+ +LSTM enhances its ability to handle long-range dependencies by adding memory and control gates to RNN. A similar principle improvement is Gated Recurrent Unit (GRU)\[[8](#References)\], which is more concise in design. **These improvements are different, but their macro descriptions are the same as simple recurrent neural networks (as shown in Figure 2). That is, the hidden state changes according to the current input and the hidden state of the previous moment, and this process is continuous until the input is processed:** + +$$ h_t=Recurrent(x_t,h_{t-1})$$ + +Among them, $Recurrent$ can represent a RNN, GRU or LSTM. + + + + +### Stacked Bidirectional LSTM + +For a normal directional RNN, $h_t$ contains the input information before the $t$ time, which is the above context information. Similarly, in order to get the following context information, we can use a RNN in the opposite direction (which will be processed in reverse order). Combined with the method of constructing deep-loop neural networks (deep neural networks often get more abstract and advanced feature representations), we can build a more powerful LSTM-based stack bidirectional recurrent neural network\[[9](#References )\] to model time series data. + +As shown in Figure 4 (taking three layers as an example), the odd-numbered LSTM is forward and the even-numbered LSTM is inverted. The higher-level LSTM uses the lower LSTM and all previous layers of information as input. The maximum pooling of the highest-level LSTM sequence in the time dimension can be used to obtain a fixed-length vector representation of the text (this representation fully fuses the contextual information and deeply abstracts of the text), and finally we connect the text representation to the softmax to build the classification model. + +

+
+Figure 4. Stacked bidirectional LSTM for text categorization +

+ + +## Dataset Introduction + +We use the [IMDB sentiment analysis data set](http://ai.stanford.edu/%7Eamaas/data/sentiment/) as an example. The training and testing IMDB dataset contain 25,000 labeled movie reviews respectively. Among them, the score of the negative comment is less than or equal to 4, and the score of the positive comment is greater than or equal to 7, full score is 10. +```text +aclImdb +|- test + |-- neg + |-- pos +|- train + |-- neg + |-- pos +``` +Paddle implements the automatic download and read the imdb dataset in `dataset/imdb.py`, and provides API for reading dictionary, training data, testing data, and so on. + +## Model Configuration + +In this example, we implement two text categorization algorithms based on the text convolutional neural network described in the [Recommender System](https://github.com/PaddlePaddle/book/tree/develop/05.recommender_system) section and [Stacked Bidirectional LSTM](#Stacked Bidirectional LSTM). We first import the packages we need to use and define global variables: + +```python +from __future__ import print_function +import paddle +import paddle.fluid as fluid +import numpy as np +import sys +import math + +CLASS_DIM = 2 #Number of categories for sentiment analysis +EMB_DIM = 128 #Dimensions of the word vector +HID_DIM = 512 #Dimensions of hide layer +STACKED_NUM = 3 #LSTM Layers of the bidirectional stack +BATCH_SIZE = 128 #batch size + +``` + + +### Text Convolutional Neural Network +We build the neural network `convolution_net`, the sample code is as follows. +Note that `fluid.nets.sequence_conv_pool` contains both convolution and pooling layers. + +```python +#Textconvolution neural network +def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): + emb = fluid.embedding( + input=data, size=[input_dim, emb_dim], is_sparse=True) + conv_3 = fluid.nets.sequence_conv_pool( + input=emb, + num_filters=hid_dim, + filter_size=3, + act="tanh", + pool_type="sqrt") + conv_4 = fluid.nets.sequence_conv_pool( + input=emb, + num_filters=hid_dim, + filter_size=4, + act="tanh", + pool_type="sqrt") + prediction = fluid.layers.fc( + input=[conv_3, conv_4], size=class_dim, act="softmax") + return prediction +``` + +The network input `input_dim` indicates the size of the dictionary, and `class_dim` indicates the number of categories. Here, we implement the convolution and pooling operations using the [`sequence_conv_pool`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/nets.py) API. + + + +### Stacked bidirectional LSTM + +The code of the stack bidirectional LSTM `stacked_lstm_net` is as follows: + +```python +#Stack Bidirectional LSTM +def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num): + + # Calculate word vectorvector + emb = fluid.embedding( + input=data, size=[input_dim, emb_dim], is_sparse=True) + + #First stack + #Fully connected layer + fc1 = fluid.layers.fc(input=emb, size=hid_dim) + #lstm layer + lstm1, cell1 = fluid.layers.dynamic_lstm(input=fc1, size=hid_dim) + + inputs = [fc1, lstm1] + + #All remaining stack structures + for i in range(2, stacked_num + 1): + fc = fluid.layers.fc(input=inputs, size=hid_dim) + lstm, cell = fluid.layers.dynamic_lstm( + input=fc, size=hid_dim, is_reverse=(i % 2) == 0) + inputs = [fc, lstm] + + #pooling layer + pc_last = fluid.layers.sequence_pool(input=inputs[0], pool_type='max') + lstm_last = fluid.layers.sequence_pool(input=inputs[1], pool_type='max') + + #Fully connected layer, softmax prediction + prediction = fluid.layers.fc( + input=[fc_last, lstm_last], size=class_dim, act='softmax') + return prediction +``` +The above stacked bidirectional LSTM abstracts the advanced features and maps them to vectors of the same size as the number of classification. The 'softmax' activation function of the last fully connected layer is used to calculate the probability of a certain category. + +Again, here we can call any network structure of `convolution_net` or `stacked_lstm_net` for training and learning. Let's take `convolution_net` as an example. + +Next we define the prediction program (`inference_program`). We use `convolution_net` to predict the input of `fluid.layer.data`. + +```python +def inference_program(word_dict): + data = fluid.data( + name="words", shape=[None], dtype="int64", lod_level=1) + dict_dim = len(word_dict) + net = convolution_net(data, dict_dim, CLASS_DIM, EMB_DIM, HID_DIM) + # net = stacked_lstm_net(data, dict_dim, CLASS_DIM, EMB_DIM, HID_DIM, STACKED_NUM) + return net +``` + +We define `training_program` here, which uses the result returned from `inference_program` to calculate the error. We also define the optimization function `optimizer_func`. + +Because it is supervised learning, the training set tags are also defined in `fluid.layers.data`. During training, cross-entropy is used as a loss function in `fluid.layer.cross_entropy`. + +During the testing, the classifier calculates the probability of each output. The first returned value is specified as cost. + +```python +def train_program(prediction): + label = fluid.data(name="label", shape=[None, 1], dtype="int64") + cost = fluid.layers.cross_entropy(input=prediction, label=label) + avg_cost = fluid.layers.mean(cost) + accuracy = fluid.layers.accuracy(input=prediction, label=label) + return [avg_cost, accuracy] #return average cost and accuracy acc + +#Optimization function +def optimizer_func(): + return fluid.optimizer.Adagrad(learning_rate=0.002) +``` + +## Training Model + +### Defining the training environment + +Define whether your training is on the CPU or GPU: + + +```python +use_cuda = False #train on cpu +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() +``` + +### Defining the data creator + +The next step is to define a data creator for training and testing. The creator reads in a data of size BATCH_SIZE. Paddle.dataset.imdb.word_dict will provide a size of BATCH_SIZE after each time shuffling, which is the cache size: buf_size. + +Note: It may take a few minutes to read the IMDB data, please be patient. + +```python +print("Loading IMDB word dict....") +word_dict = paddle.dataset.imdb.word_dict() + +print ("Reading training data....") +train_reader = fluid.io.batch( + fluid.io.shuffle( + paddle.dataset.imdb.train(word_dict), buf_size=25000), + batch_size=BATCH_SIZE) +print("Reading testing data....") +test_reader = fluid.io.batch( + paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE) +``` +Word_dict is a dictionary sequence, which is the correspondence between words and labels. You can see it specifically by running the next code: +```python +word_dict +``` +Each line is a correspondence such as ('limited': 1726), which indicates that the label corresponding to the word limited is 1726. + +### Construction Trainer +The trainer requires a training program and a training optimization function. + +```python +exe = fluid.Executor(place) +prediction = inference_program(word_dict) +[avg_cost, accuracy] = train_program(prediction)#training program +sgd_optimizer = optimizer_func()# training optimization function +sgd_optimizer.minimize(avg_cost) +``` + +This function is used to calculate the result of the model on the test dataset. +```python +def train_test(program, reader): + count = 0 + feed_var_list = [ + program.global_block().var(var_name) for var_name in feed_order + ] + feeder_test = fluid.DataFeeder(feed_list=feed_var_list, place=place) + test_exe = fluid.Executor(place) + accumulated = len([avg_cost, accuracy]) * [0] + for test_data in reader(): + avg_cost_np = test_exe.run( + program=program, + feed=feeder_test.feed(test_data), + fetch_list=[avg_cost, accuracy]) + accumulated = [ + x[0] + x[1][0] for x in zip(accumulated, avg_cost_np) + ] + count += 1 + return [x / count for x in accumulated] +``` + +### Providing data and building a main training loop + +`feed_order` is used to define the mapping relationship between each generated data and `fluid.layers.data`. For example, the data in the first column generated by `imdb.train` corresponds to the `words` feature. + +```python +# Specify the directory path to save the parameters +params_dirname = "understand_sentiment_conv.inference.model" + +feed_order = ['words', 'label'] +pass_num = 1 #Number rounds of the training loop + +# Main loop part of the program +def train_loop(main_program): + # Start the trainer built above + exe.run(fluid.default_startup_program()) + + feed_var_list_loop = [ + main_program.global_block().var(var_name) for var_name in feed_order + ] + feeder = fluid.DataFeeder( + feed_list=feed_var_list_loop, place=place) + + test_program = fluid.default_main_program().clone(for_test=True) + + # Training loop + for epoch_id in range(pass_num): + for step_id, data in enumerate(train_reader()): + # Running trainer + metrics = exe.run(main_program, + feed=feeder.feed(data), + fetch_list=[avg_cost, accuracy]) + + # Testing Results + avg_cost_test, acc_test = train_test(test_program, test_reader) + print('Step {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( + step_id, avg_cost_test, acc_test)) + + print("Step {0}, Epoch {1} Metrics {2}".format( + step_id, epoch_id, list(map(np.array, + metrics)))) + + if step_id == 30: + if params_dirname is not None: + fluid.io.save_inference_model(params_dirname, ["words"], + prediction, exe)# Save model + return +``` + +### Training process + +We print the output of each step in the main loop of the training, and we can observe the training situation. + +### Start training + +Finally, we start the training main loop to start training. The training time is longer. If you want to get the result faster, you can shorten the training time by adjusting the loss value range or the number of training steps at the cost of reducing the accuracy. + +```python +train_loop(fluid.default_main_program()) +``` + +## Application Model + +### Building a predictor + +As the training process, we need to create a prediction process and use the trained models and parameters to make predictions. `params_dirname` is used to store the various parameters in the training process. + +```python +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() +exe = fluid.Executor(place) +inference_scope = fluid.core.Scope() +``` + +### Generating test input data + +In order to make predictions, we randomly select 3 comments. We correspond each word in the comment to the id in `word_dict`. If the word is not in the dictionary, set it to `unknown`. +Then we use `create_lod_tensor` to create the tensor of the detail level. For a detailed explanation of this function, please refer to [API documentation](http://paddlepaddle.org/documentation/docs/en/1.2/user_guides/howto/basic_concept/lod_tensor.html). + +```python +reviews_str = [ + b'read the book forget the movie', b'this is a great movie', b'this is very bad' +] +reviews = [c.split() for c in reviews_str] + +UNK = word_dict[''] +lod = [] +for c in reviews: + lod.append([word_dict.get(words, UNK) for words in c]) + +base_shape = [[len(c) for c in lod]] +lod = np.array(sum(lod, []), dtype=np.int64) + +tensor_words = fluid.create_lod_tensor(lod, base_shape, place) +``` + +## Applying models and making predictions + +Now we can make positive or negative predictions for each comment. + +```python +with fluid.scope_guard(inference_scope): + + [inferencer, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(params_dirname, exe) + + assert feed_target_names[0] == "words" + results = exe.run(inferencer, + feed={feed_target_names[0]: tensor_words}, + fetch_list=fetch_targets, + return_numpy=False) + np_data = np.array(results[0]) + for i, r in enumerate(np_data): + print("Predict probability of ", r[0], " to be positive and ", r[1], + " to be negative for review \'", reviews_str[i], "\'") + +``` + + +## Conclusion + +In this chapter, we take sentiment analysis as an example to introduce end-to-end short text classification using deep learning, and complete all relevant experiments using PaddlePaddle. At the same time, we briefly introduce two text processing models: convolutional neural networks and recurrent neural networks. In the following chapters, we will see the application of these two basic deep learning models on other tasks. + + + +## References + +1. Kim Y. [Convolutional neural networks for sentence classification](http://arxiv.org/pdf/1408.5882)[J]. arXiv preprint arXiv:1408.5882, 2014. +2. Kalchbrenner N, Grefenstette E, Blunsom P. [A convolutional neural network for modelling sentences](http://arxiv.org/pdf/1404.2188.pdf?utm_medium=App.net&utm_source=PourOver)[J]. arXiv preprint arXiv:1404.2188, 2014. +3. Yann N. Dauphin, et al. [Language Modeling with Gated Convolutional Networks](https://arxiv.org/pdf/1612.08083v1.pdf)[J] arXiv preprint arXiv:1612.08083, 2016. +4. Siegelmann H T, Sontag E D. [On the computational power of neural nets](http://research.cs.queensu.ca/home/akl/cisc879/papers/SELECTED_PAPERS_FROM_VARIOUS_SOURCES/05070215382317071.pdf)[C]//Proceedings of the fifth annual workshop on Computational learning theory. ACM, 1992: 440-449. +5. Hochreiter S, Schmidhuber J. [Long short-term memory](http://web.eecs.utk.edu/~itamar/courses/ECE-692/Bobby_paper1.pdf)[J]. Neural computation, 1997, 9(8): 1735-1780. +6. Bengio Y, Simard P, Frasconi P. [Learning long-term dependencies with gradient descent is difficult](http://www-dsi.ing.unifi.it/~paolo/ps/tnn-94-gradient.pdf)[J]. IEEE transactions on neural networks, 1994, 5(2): 157-166. +7. Graves A. [Generating sequences with recurrent neural networks](http://arxiv.org/pdf/1308.0850)[J]. arXiv preprint arXiv:1308.0850, 2013. +8. Cho K, Van Merriënboer B, Gulcehre C, et al. [Learning phrase representations using RNN encoder-decoder for statistical machine translation](http://arxiv.org/pdf/1406.1078)[J]. arXiv preprint arXiv:1406.1078, 2014. +9. Zhou J, Xu W. [End-to-end learning of semantic role labeling using recurrent neural networks](http://www.aclweb.org/anthology/P/P15/P15-1109.pdf)[C]//Proceedings of the Annual Meeting of the Association for Computational Linguistics. 2015. + +
+知识共享许可协议
This tutorial is contributed by PaddlePaddle, and licensed under a Creative Commons Attribution-ShareAlike 4.0 International License. diff --git a/doc/paddle/user_guides/nlp_case/understand_sentiment/_ce.py b/doc/paddle/user_guides/nlp_case/understand_sentiment/_ce.py new file mode 100644 index 0000000000000000000000000000000000000000..39d8c00ea3a574f7240ca9752c0d880c20721741 --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/understand_sentiment/_ce.py @@ -0,0 +1,68 @@ +### This file is only used for continuous evaluation test! +from __future__ import print_function +from __future__ import division +from __future__ import absolute_import +import os +import sys +sys.path.append(os.environ['ceroot']) +from kpi import CostKpi +from kpi import AccKpi + +conv_train_cost_kpi = CostKpi( + 'conv_train_cost', 0.02, 0, actived=True, desc='train cost') +conv_train_acc_kpi = AccKpi( + 'conv_train_acc', 0.02, 0, actived=True, desc='train acc') +conv_test_cost_kpi = CostKpi( + 'conv_test_cost', 0.02, 0, actived=True, desc='test cost') +conv_test_acc_kpi = AccKpi( + 'conv_test_acc', 0.02, 0, actived=True, desc='test acc') + +rnn_train_cost_kpi = CostKpi( + 'rnn_train_cost', 0.02, 0, actived=True, desc='train cost') +rnn_train_acc_kpi = AccKpi( + 'rnn_train_acc', 0.02, 0, actived=True, desc='train acc') +rnn_test_cost_kpi = CostKpi( + 'rnn_test_cost', 0.02, 0, actived=True, desc='test cost') +rnn_test_acc_kpi = AccKpi( + 'rnn_test_acc', 0.02, 0, actived=True, desc='test acc') + +lstm_train_cost_kpi = CostKpi( + 'lstm_train_cost', 0.02, 0, actived=True, desc='train cost') +lstm_train_acc_kpi = AccKpi( + 'lstm_train_acc', 0.02, 0, actived=True, desc='train acc') +lstm_test_cost_kpi = CostKpi( + 'lstm_test_cost', 0.02, 0, actived=True, desc='test cost') +lstm_test_acc_kpi = AccKpi( + 'lstm_test_acc', 0.02, 0, actived=True, desc='test acc') + +tracking_kpis = [ + conv_train_cost_kpi, conv_train_acc_kpi, conv_test_cost_kpi, + conv_test_acc_kpi, rnn_train_cost_kpi, rnn_train_acc_kpi, + rnn_test_cost_kpi, rnn_test_acc_kpi, lstm_train_cost_kpi, + lstm_train_acc_kpi, lstm_test_cost_kpi, lstm_test_acc_kpi +] + + +def parse_log(log): + for line in log.split('\n'): + fs = line.strip().split('\t') + print(fs) + if len(fs) == 3 and fs[0] == 'kpis': + kpi_name = fs[1] + kpi_value = float(fs[2]) + yield kpi_name, kpi_value + + +def log_to_ce(log): + kpi_tracker = {} + for kpi in tracking_kpis: + kpi_tracker[kpi.name] = kpi + for (kpi_name, kpi_value) in parse_log(log): + print(kpi_name, kpi_value) + kpi_tracker[kpi_name].add_record(kpi_value) + kpi_tracker[kpi_name].persist() + + +if __name__ == '__main__': + log = sys.stdin.read() + log_to_ce(log) diff --git a/doc/paddle/user_guides/nlp_case/understand_sentiment/image/formula_lstm.png b/doc/paddle/user_guides/nlp_case/understand_sentiment/image/formula_lstm.png new file mode 100644 index 0000000000000000000000000000000000000000..23b27e5763997c9d13921b1e71ba4adaf1546a6b Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/understand_sentiment/image/formula_lstm.png differ diff --git a/doc/paddle/user_guides/nlp_case/understand_sentiment/image/formula_lstm_more.png b/doc/paddle/user_guides/nlp_case/understand_sentiment/image/formula_lstm_more.png new file mode 100644 index 0000000000000000000000000000000000000000..a0a98fe53addd075bd7249c6da1da5d5b9e6889b Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/understand_sentiment/image/formula_lstm_more.png differ diff --git a/doc/paddle/user_guides/nlp_case/understand_sentiment/image/formula_recrurent.png b/doc/paddle/user_guides/nlp_case/understand_sentiment/image/formula_recrurent.png new file mode 100644 index 0000000000000000000000000000000000000000..74df7224cc0cb14e29a72735be66df2dd3df0f70 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/understand_sentiment/image/formula_recrurent.png differ diff --git a/doc/paddle/user_guides/nlp_case/understand_sentiment/image/formula_rnn.png b/doc/paddle/user_guides/nlp_case/understand_sentiment/image/formula_rnn.png new file mode 100644 index 0000000000000000000000000000000000000000..a3804a0b56b2598c2b92fff70506b5d5abd0f2bb Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/understand_sentiment/image/formula_rnn.png differ diff --git a/doc/paddle/user_guides/nlp_case/understand_sentiment/image/lstm.png b/doc/paddle/user_guides/nlp_case/understand_sentiment/image/lstm.png new file mode 100644 index 0000000000000000000000000000000000000000..98fbea413a98a619004ca669c67f5f867fe974c9 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/understand_sentiment/image/lstm.png differ diff --git a/doc/paddle/user_guides/nlp_case/understand_sentiment/image/lstm_en.png b/doc/paddle/user_guides/nlp_case/understand_sentiment/image/lstm_en.png new file mode 100644 index 0000000000000000000000000000000000000000..d73a00bf2c1fca2f9b8c26bccf5ea844fa1db50b Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/understand_sentiment/image/lstm_en.png differ diff --git a/doc/paddle/user_guides/nlp_case/understand_sentiment/image/rnn.png b/doc/paddle/user_guides/nlp_case/understand_sentiment/image/rnn.png new file mode 100644 index 0000000000000000000000000000000000000000..26c904102a6e6c4e30f0048b81373ae8c148b355 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/understand_sentiment/image/rnn.png differ diff --git a/doc/paddle/user_guides/nlp_case/understand_sentiment/image/stacked_lstm.jpg b/doc/paddle/user_guides/nlp_case/understand_sentiment/image/stacked_lstm.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b2adf70f2b5112a2e82505da5cff9f5fd0c6298 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/understand_sentiment/image/stacked_lstm.jpg differ diff --git a/doc/paddle/user_guides/nlp_case/understand_sentiment/image/stacked_lstm_en.png b/doc/paddle/user_guides/nlp_case/understand_sentiment/image/stacked_lstm_en.png new file mode 100644 index 0000000000000000000000000000000000000000..8b5dbd726178b5555c513294e7b10a81acc96ff5 Binary files /dev/null and b/doc/paddle/user_guides/nlp_case/understand_sentiment/image/stacked_lstm_en.png differ diff --git a/doc/paddle/user_guides/nlp_case/understand_sentiment/index.cn.html b/doc/paddle/user_guides/nlp_case/understand_sentiment/index.cn.html new file mode 100644 index 0000000000000000000000000000000000000000..a1d945d359e945f60d16bb135c6e47cf24a555d5 --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/understand_sentiment/index.cn.html @@ -0,0 +1,516 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/user_guides/nlp_case/understand_sentiment/index.html b/doc/paddle/user_guides/nlp_case/understand_sentiment/index.html new file mode 100644 index 0000000000000000000000000000000000000000..ecd861f300028e4deacb742dbaffffef3c947556 --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/understand_sentiment/index.html @@ -0,0 +1,507 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/user_guides/nlp_case/understand_sentiment/train_conv.py b/doc/paddle/user_guides/nlp_case/understand_sentiment/train_conv.py new file mode 100644 index 0000000000000000000000000000000000000000..456b127b92c6814fa44e7bc579e6e2f95110cb33 --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/understand_sentiment/train_conv.py @@ -0,0 +1,245 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import paddle +import paddle.fluid as fluid +import numpy as np +import sys +import math +import argparse + +CLASS_DIM = 2 +EMB_DIM = 128 +HID_DIM = 512 +BATCH_SIZE = 128 + + +def parse_args(): + parser = argparse.ArgumentParser("conv") + parser.add_argument( + '--enable_ce', + action='store_true', + help="If set, run the task with continuous evaluation logs.") + parser.add_argument( + '--use_gpu', type=int, default=0, help="Whether to use GPU or not.") + parser.add_argument( + '--num_epochs', type=int, default=1, help="number of epochs.") + args = parser.parse_args() + return args + + +def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): + emb = fluid.embedding( + input=data, size=[input_dim, emb_dim], is_sparse=True) + conv_3 = fluid.nets.sequence_conv_pool( + input=emb, + num_filters=hid_dim, + filter_size=3, + act="tanh", + pool_type="sqrt") + conv_4 = fluid.nets.sequence_conv_pool( + input=emb, + num_filters=hid_dim, + filter_size=4, + act="tanh", + pool_type="sqrt") + prediction = fluid.layers.fc( + input=[conv_3, conv_4], size=class_dim, act="softmax") + return prediction + + +def inference_program(word_dict): + dict_dim = len(word_dict) + data = fluid.data(name="words", shape=[None], dtype="int64", lod_level=1) + net = convolution_net(data, dict_dim, CLASS_DIM, EMB_DIM, HID_DIM) + return net + + +def train_program(prediction): + label = fluid.data(name="label", shape=[None, 1], dtype="int64") + cost = fluid.layers.cross_entropy(input=prediction, label=label) + avg_cost = fluid.layers.mean(cost) + accuracy = fluid.layers.accuracy(input=prediction, label=label) + return [avg_cost, accuracy] + + +def optimizer_func(): + return fluid.optimizer.Adagrad(learning_rate=0.002) + + +def train(use_cuda, params_dirname): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + print("Loading IMDB word dict....") + word_dict = paddle.dataset.imdb.word_dict() + + print("Reading training data....") + if args.enable_ce: + train_reader = fluid.io.batch( + paddle.dataset.imdb.train(word_dict), batch_size=BATCH_SIZE) + else: + train_reader = fluid.io.batch( + fluid.io.shuffle( + paddle.dataset.imdb.train(word_dict), buf_size=25000), + batch_size=BATCH_SIZE) + + print("Reading testing data....") + test_reader = fluid.io.batch( + paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE) + + feed_order = ['words', 'label'] + pass_num = args.num_epochs + + main_program = fluid.default_main_program() + star_program = fluid.default_startup_program() + + if args.enable_ce: + main_program.random_seed = 90 + star_program.random_seed = 90 + + prediction = inference_program(word_dict) + train_func_outputs = train_program(prediction) + avg_cost = train_func_outputs[0] + + test_program = main_program.clone(for_test=True) + + # [avg_cost, accuracy] = train_program(prediction) + sgd_optimizer = optimizer_func() + sgd_optimizer.minimize(avg_cost) + exe = fluid.Executor(place) + + def train_test(program, reader): + count = 0 + feed_var_list = [ + program.global_block().var(var_name) for var_name in feed_order + ] + feeder_test = fluid.DataFeeder(feed_list=feed_var_list, place=place) + test_exe = fluid.Executor(place) + accumulated = len(train_func_outputs) * [0] + for test_data in reader(): + avg_cost_np = test_exe.run( + program=program, + feed=feeder_test.feed(test_data), + fetch_list=train_func_outputs) + accumulated = [ + x[0] + x[1][0] for x in zip(accumulated, avg_cost_np) + ] + count += 1 + return [x / count for x in accumulated] + + def train_loop(): + + feed_var_list_loop = [ + main_program.global_block().var(var_name) + for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list=feed_var_list_loop, place=place) + exe.run(star_program) + + for epoch_id in range(pass_num): + for step_id, data in enumerate(train_reader()): + metrics = exe.run( + main_program, + feed=feeder.feed(data), + fetch_list=[var.name for var in train_func_outputs]) + print("step: {0}, Metrics {1}".format( + step_id, list(map(np.array, metrics)))) + if (step_id + 1) % 10 == 0: + avg_cost_test, acc_test = train_test(test_program, + test_reader) + print('Step {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( + step_id, avg_cost_test, acc_test)) + + print("Step {0}, Epoch {1} Metrics {2}".format( + step_id, epoch_id, list(map(np.array, metrics)))) + if math.isnan(float(metrics[0])): + sys.exit("got NaN loss, training failed.") + if params_dirname is not None: + fluid.io.save_inference_model(params_dirname, ["words"], + prediction, exe) + if args.enable_ce and epoch_id == pass_num - 1: + print("kpis\tconv_train_cost\t%f" % metrics[0]) + print("kpis\tconv_train_acc\t%f" % metrics[1]) + print("kpis\tconv_test_cost\t%f" % avg_cost_test) + print("kpis\tconv_test_acc\t%f" % acc_test) + + train_loop() + + +def infer(use_cuda, params_dirname=None): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + word_dict = paddle.dataset.imdb.word_dict() + + exe = fluid.Executor(place) + + inference_scope = fluid.core.Scope() + with fluid.scope_guard(inference_scope): + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inferencer, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(params_dirname, exe) + + # Setup input by creating LoDTensor to represent sequence of words. + # Here each word is the basic element of the LoDTensor and the shape of + # each word (base_shape) should be [1] since it is simply an index to + # look up for the corresponding word vector. + # Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]], + # which has only one lod level. Then the created LoDTensor will have only + # one higher level structure (sequence of words, or sentence) than the basic + # element (word). Hence the LoDTensor will hold data for three sentences of + # length 3, 4 and 2, respectively. + # Note that lod info should be a list of lists. + reviews_str = [ + b'read the book forget the movie', b'this is a great movie', + b'this is very bad' + ] + reviews = [c.split() for c in reviews_str] + + UNK = word_dict[''] + lod = [] + for c in reviews: + lod.append([np.int64(word_dict.get(words, UNK)) for words in c]) + + base_shape = [[len(c) for c in lod]] + lod = np.array(sum(lod, []), dtype=np.int64) + + tensor_words = fluid.create_lod_tensor(lod, base_shape, place) + assert feed_target_names[0] == "words" + results = exe.run( + inferencer, + feed={feed_target_names[0]: tensor_words}, + fetch_list=fetch_targets, + return_numpy=False) + np_data = np.array(results[0]) + for i, r in enumerate(np_data): + print("Predict probability of ", r[0], " to be positive and ", + r[1], " to be negative for review \'", reviews_str[i], "\'") + + +def main(use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + params_dirname = "understand_sentiment_conv.inference.model" + train(use_cuda, params_dirname) + infer(use_cuda, params_dirname) + + +if __name__ == '__main__': + args = parse_args() + use_cuda = args.use_gpu # set to True if training with GPU + main(use_cuda) diff --git a/doc/paddle/user_guides/nlp_case/understand_sentiment/train_dyn_rnn.py b/doc/paddle/user_guides/nlp_case/understand_sentiment/train_dyn_rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..b782f6435a68c0f01fae9e03568a8dc9946bee70 --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/understand_sentiment/train_dyn_rnn.py @@ -0,0 +1,234 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import paddle +import paddle.fluid as fluid +import numpy as np +import sys +import math +import argparse + +CLASS_DIM = 2 +EMB_DIM = 128 +BATCH_SIZE = 128 +LSTM_SIZE = 128 + + +def parse_args(): + parser = argparse.ArgumentParser("dyn_rnn") + parser.add_argument( + '--enable_ce', + action='store_true', + help="If set, run the task with continuous evaluation logs.") + parser.add_argument( + '--use_gpu', type=int, default=0, help="Whether to use GPU or not.") + parser.add_argument( + '--num_epochs', type=int, default=1, help="number of epochs.") + args = parser.parse_args() + return args + + +def dynamic_rnn_lstm(data, input_dim, class_dim, emb_dim, lstm_size): + emb = fluid.embedding( + input=data, size=[input_dim, emb_dim], is_sparse=True) + sentence = fluid.layers.fc(input=emb, size=lstm_size * 4, act='tanh') + + lstm, _ = fluid.layers.dynamic_lstm(sentence, size=lstm_size * 4) + + last = fluid.layers.sequence_last_step(lstm) + prediction = fluid.layers.fc(input=last, size=class_dim, act="softmax") + return prediction + + +def inference_program(word_dict): + data = fluid.data(name="words", shape=[None], dtype="int64", lod_level=1) + dict_dim = len(word_dict) + pred = dynamic_rnn_lstm(data, dict_dim, CLASS_DIM, EMB_DIM, LSTM_SIZE) + return pred + + +def train_program(prediction): + label = fluid.data(name="label", shape=[None, 1], dtype="int64") + cost = fluid.layers.cross_entropy(input=prediction, label=label) + avg_cost = fluid.layers.mean(cost) + accuracy = fluid.layers.accuracy(input=prediction, label=label) + return [avg_cost, accuracy] + + +def optimizer_func(): + return fluid.optimizer.Adagrad(learning_rate=0.002) + + +def train(use_cuda, params_dirname): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + print("Loading IMDB word dict....") + word_dict = paddle.dataset.imdb.word_dict() + + print("Reading training data....") + if args.enable_ce: + train_reader = fluid.io.batch( + paddle.dataset.imdb.train(word_dict), batch_size=BATCH_SIZE) + else: + train_reader = fluid.io.batch( + fluid.io.shuffle( + paddle.dataset.imdb.train(word_dict), buf_size=25000), + batch_size=BATCH_SIZE) + + print("Reading testing data....") + test_reader = fluid.io.batch( + paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE) + + feed_order = ['words', 'label'] + pass_num = args.num_epochs + + main_program = fluid.default_main_program() + star_program = fluid.default_startup_program() + + if args.enable_ce: + main_program.random_seed = 90 + star_program.random_seed = 90 + + prediction = inference_program(word_dict) + train_func_outputs = train_program(prediction) + avg_cost = train_func_outputs[0] + + test_program = main_program.clone(for_test=True) + + sgd_optimizer = optimizer_func() + sgd_optimizer.minimize(avg_cost) + exe = fluid.Executor(place) + + def train_test(program, reader): + count = 0 + feed_var_list = [ + program.global_block().var(var_name) for var_name in feed_order + ] + feeder_test = fluid.DataFeeder(feed_list=feed_var_list, place=place) + test_exe = fluid.Executor(place) + accumulated = len(train_func_outputs) * [0] + for test_data in reader(): + avg_cost_np = test_exe.run( + program=program, + feed=feeder_test.feed(test_data), + fetch_list=train_func_outputs) + accumulated = [ + x[0] + x[1][0] for x in zip(accumulated, avg_cost_np) + ] + count += 1 + return [x / count for x in accumulated] + + def train_loop(): + + feed_var_list_loop = [ + main_program.global_block().var(var_name) + for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list=feed_var_list_loop, place=place) + exe.run(fluid.default_startup_program()) + + for epoch_id in range(pass_num): + for step_id, data in enumerate(train_reader()): + metrics = exe.run( + main_program, + feed=feeder.feed(data), + fetch_list=[var.name for var in train_func_outputs]) + if (step_id + 1) % 10 == 0: + + avg_cost_test, acc_test = train_test(test_program, + test_reader) + print('Step {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( + step_id, avg_cost_test, acc_test)) + + print("Step {0}, Epoch {1} Metrics {2}".format( + step_id, epoch_id, list(map(np.array, metrics)))) + if math.isnan(float(metrics[0])): + sys.exit("got NaN loss, training failed.") + if params_dirname is not None: + fluid.io.save_inference_model(params_dirname, ["words"], + prediction, exe) + if args.enable_ce and epoch_id == pass_num - 1: + print("kpis\trnn_train_cost\t%f" % metrics[0]) + print("kpis\trnn_train_acc\t%f" % metrics[1]) + print("kpis\trnn_test_cost\t%f" % avg_cost_test) + print("kpis\trnn_test_acc\t%f" % acc_test) + + train_loop() + + +def infer(use_cuda, params_dirname=None): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + word_dict = paddle.dataset.imdb.word_dict() + + exe = fluid.Executor(place) + + inference_scope = fluid.core.Scope() + with fluid.scope_guard(inference_scope): + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inferencer, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(params_dirname, exe) + + # Setup input by creating LoDTensor to represent sequence of words. + # Here each word is the basic element of the LoDTensor and the shape of + # each word (base_shape) should be [1] since it is simply an index to + # look up for the corresponding word vector. + # Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]], + # which has only one lod level. Then the created LoDTensor will have only + # one higher level structure (sequence of words, or sentence) than the basic + # element (word). Hence the LoDTensor will hold data for three sentences of + # length 3, 4 and 2, respectively. + # Note that lod info should be a list of lists. + reviews_str = [ + b'read the book forget the movie', b'this is a great movie', + b'this is very bad' + ] + reviews = [c.split() for c in reviews_str] + + UNK = word_dict[''] + lod = [] + for c in reviews: + lod.append([np.int64(word_dict.get(words, UNK)) for words in c]) + + base_shape = [[len(c) for c in lod]] + lod = np.array(sum(lod, []), dtype=np.int64) + + tensor_words = fluid.create_lod_tensor(lod, base_shape, place) + assert feed_target_names[0] == "words" + results = exe.run( + inferencer, + feed={feed_target_names[0]: tensor_words}, + fetch_list=fetch_targets, + return_numpy=False) + np_data = np.array(results[0]) + for i, r in enumerate(np_data): + print("Predict probability of ", r[0], " to be positive and ", + r[1], " to be negative for review \'", reviews_str[i], "\'") + + +def main(use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + params_dirname = "understand_sentiment_conv.inference.model" + train(use_cuda, params_dirname) + infer(use_cuda, params_dirname) + + +if __name__ == '__main__': + args = parse_args() + use_cuda = args.use_gpu # set to True if training with GPU + main(use_cuda) diff --git a/doc/paddle/user_guides/nlp_case/understand_sentiment/train_stacked_lstm.py b/doc/paddle/user_guides/nlp_case/understand_sentiment/train_stacked_lstm.py new file mode 100644 index 0000000000000000000000000000000000000000..6db40f42a3fbda12c764d76151db21a1c3e4df04 --- /dev/null +++ b/doc/paddle/user_guides/nlp_case/understand_sentiment/train_stacked_lstm.py @@ -0,0 +1,256 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +import paddle +import paddle.fluid as fluid +import numpy as np +import sys +import math +import argparse + +CLASS_DIM = 2 +EMB_DIM = 128 +HID_DIM = 512 +STACKED_NUM = 3 +BATCH_SIZE = 128 + + +def parse_args(): + parser = argparse.ArgumentParser("stacked_lstm") + parser.add_argument( + '--enable_ce', + action='store_true', + help="If set, run the task with continuous evaluation logs.") + parser.add_argument( + '--use_gpu', type=int, default=0, help="Whether to use GPU or not.") + parser.add_argument( + '--num_epochs', type=int, default=1, help="number of epochs.") + args = parser.parse_args() + return args + + +def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, + stacked_num): + assert stacked_num % 2 == 1 + + emb = fluid.embedding( + input=data, size=[input_dim, emb_dim], is_sparse=True) + + fc1 = fluid.layers.fc(input=emb, size=hid_dim) + lstm1, cell1 = fluid.layers.dynamic_lstm(input=fc1, size=hid_dim) + + inputs = [fc1, lstm1] + + for i in range(2, stacked_num + 1): + fc = fluid.layers.fc(input=inputs, size=hid_dim) + lstm, cell = fluid.layers.dynamic_lstm( + input=fc, size=hid_dim, is_reverse=(i % 2) == 0) + inputs = [fc, lstm] + + fc_last = fluid.layers.sequence_pool(input=inputs[0], pool_type='max') + lstm_last = fluid.layers.sequence_pool(input=inputs[1], pool_type='max') + + prediction = fluid.layers.fc( + input=[fc_last, lstm_last], size=class_dim, act='softmax') + return prediction + + +def inference_program(word_dict): + data = fluid.data(name="words", shape=[None], dtype="int64", lod_level=1) + dict_dim = len(word_dict) + net = stacked_lstm_net(data, dict_dim, CLASS_DIM, EMB_DIM, HID_DIM, + STACKED_NUM) + return net + + +def train_program(prediction): + # prediction = inference_program(word_dict) + label = fluid.data(name="label", shape=[None, 1], dtype="int64") + cost = fluid.layers.cross_entropy(input=prediction, label=label) + avg_cost = fluid.layers.mean(cost) + accuracy = fluid.layers.accuracy(input=prediction, label=label) + return [avg_cost, accuracy] + + +def optimizer_func(): + return fluid.optimizer.Adagrad(learning_rate=0.002) + + +def train(use_cuda, params_dirname): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + print("Loading IMDB word dict....") + word_dict = paddle.dataset.imdb.word_dict() + + print("Reading training data....") + + if args.enable_ce: + train_reader = fluid.io.batch( + paddle.dataset.imdb.train(word_dict), batch_size=BATCH_SIZE) + else: + train_reader = fluid.io.batch( + fluid.io.shuffle( + paddle.dataset.imdb.train(word_dict), buf_size=25000), + batch_size=BATCH_SIZE) + + print("Reading testing data....") + test_reader = paddle.batch( + paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE) + + feed_order = ['words', 'label'] + pass_num = args.num_epochs + + main_program = fluid.default_main_program() + star_program = fluid.default_startup_program() + + if args.enable_ce: + main_program.random_seed = 90 + star_program.random_seed = 90 + + prediction = inference_program(word_dict) + train_func_outputs = train_program(prediction) + avg_cost = train_func_outputs[0] + + test_program = main_program.clone(for_test=True) + + # [avg_cost, accuracy] = train_program(prediction) + sgd_optimizer = optimizer_func() + sgd_optimizer.minimize(avg_cost) + exe = fluid.Executor(place) + + def train_test(program, reader): + count = 0 + feed_var_list = [ + program.global_block().var(var_name) for var_name in feed_order + ] + feeder_test = fluid.DataFeeder(feed_list=feed_var_list, place=place) + test_exe = fluid.Executor(place) + accumulated = len(train_func_outputs) * [0] + for test_data in reader(): + avg_cost_np = test_exe.run( + program=program, + feed=feeder_test.feed(test_data), + fetch_list=train_func_outputs) + accumulated = [ + x[0] + x[1][0] for x in zip(accumulated, avg_cost_np) + ] + count += 1 + return [x / count for x in accumulated] + + def train_loop(): + + feed_var_list_loop = [ + main_program.global_block().var(var_name) + for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list=feed_var_list_loop, place=place) + exe.run(star_program) + + for epoch_id in range(pass_num): + for step_id, data in enumerate(train_reader()): + metrics = exe.run( + main_program, + feed=feeder.feed(data), + fetch_list=[var.name for var in train_func_outputs]) + print("step: {0}, Metrics {1}".format( + step_id, list(map(np.array, metrics)))) + if (step_id + 1) % 10 == 0: + avg_cost_test, acc_test = train_test(test_program, + test_reader) + print('Step {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( + step_id, avg_cost_test, acc_test)) + + print("Step {0}, Epoch {1} Metrics {2}".format( + step_id, epoch_id, list(map(np.array, metrics)))) + if math.isnan(float(metrics[0])): + sys.exit("got NaN loss, training failed.") + if params_dirname is not None: + fluid.io.save_inference_model(params_dirname, ["words"], + prediction, exe) + if args.enable_ce and epoch_id == pass_num - 1: + print("kpis\tlstm_train_cost\t%f" % metrics[0]) + print("kpis\tlstm_train_acc\t%f" % metrics[1]) + print("kpis\tlstm_test_cost\t%f" % avg_cost_test) + print("kpis\tlstm_test_acc\t%f" % acc_test) + + train_loop() + + +def infer(use_cuda, params_dirname=None): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + word_dict = paddle.dataset.imdb.word_dict() + + exe = fluid.Executor(place) + + inference_scope = fluid.core.Scope() + with fluid.scope_guard(inference_scope): + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inferencer, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(params_dirname, exe) + + # Setup input by creating LoDTensor to represent sequence of words. + # Here each word is the basic element of the LoDTensor and the shape of + # each word (base_shape) should be [1] since it is simply an index to + # look up for the corresponding word vector. + # Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]], + # which has only one lod level. Then the created LoDTensor will have only + # one higher level structure (sequence of words, or sentence) than the basic + # element (word). Hence the LoDTensor will hold data for three sentences of + # length 3, 4 and 2, respectively. + # Note that lod info should be a list of lists. + reviews_str = [ + b'read the book forget the movie', b'this is a great movie', + b'this is very bad' + ] + reviews = [c.split() for c in reviews_str] + + UNK = word_dict[''] + lod = [] + for c in reviews: + lod.append([np.int64(word_dict.get(words, UNK)) for words in c]) + + base_shape = [[len(c) for c in lod]] + lod = np.array(sum(lod, []), dtype=np.int64) + + tensor_words = fluid.create_lod_tensor(lod, base_shape, place) + assert feed_target_names[0] == "words" + results = exe.run( + inferencer, + feed={feed_target_names[0]: tensor_words}, + fetch_list=fetch_targets, + return_numpy=False) + np_data = np.array(results[0]) + for i, r in enumerate(np_data): + print("Predict probability of ", r[0], " to be positive and ", + r[1], " to be negative for review \'", reviews_str[i], "\'") + + +def main(use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + params_dirname = "understand_sentiment_stacked_lstm.inference.model" + train(use_cuda, params_dirname) + infer(use_cuda, params_dirname) + + +if __name__ == '__main__': + args = parse_args() + use_cuda = args.use_gpu # set to True if training with GPU + main(use_cuda) diff --git a/doc/paddle/user_guides/rec_case/index_cn.rst b/doc/paddle/user_guides/rec_case/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0894710bf03dce07f72ac071e471811ac222a5c7 --- /dev/null +++ b/doc/paddle/user_guides/rec_case/index_cn.rst @@ -0,0 +1,13 @@ +################ +推荐 +################ + +.. todo:: + +推荐系统是利用电子商务网站向客户提供商品信息和建议,帮助用户决定应该购买什么产品,模拟销售人员帮助客户完成购买过程。在这里PaddlePaddle为大家提供了一篇个性化推荐的案例详解: + +.. toctree:: + :titlesonly: + + recommender_system/README.cn.md + diff --git a/doc/paddle/user_guides/rec_case/index_en.rst b/doc/paddle/user_guides/rec_case/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..0e5e4330e1ab03d3d39dcb388ff08be52e3124ad --- /dev/null +++ b/doc/paddle/user_guides/rec_case/index_en.rst @@ -0,0 +1,10 @@ +############################ +Recommend +############################ + + +.. toctree:: + :titlesonly: + + recommender_system/README.md + diff --git a/doc/paddle/user_guides/rec_case/recommender_system/.gitignore b/doc/paddle/user_guides/rec_case/recommender_system/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..f23901aeb3a9e7cd12611fc556742670d04a9bb5 --- /dev/null +++ b/doc/paddle/user_guides/rec_case/recommender_system/.gitignore @@ -0,0 +1,2 @@ +.idea +.ipynb_checkpoints diff --git a/doc/paddle/user_guides/rec_case/recommender_system/.run_ce.sh b/doc/paddle/user_guides/rec_case/recommender_system/.run_ce.sh new file mode 100644 index 0000000000000000000000000000000000000000..4c5ae210ff5485c4c9266de73614ee0f4c4d6d6e --- /dev/null +++ b/doc/paddle/user_guides/rec_case/recommender_system/.run_ce.sh @@ -0,0 +1,4 @@ +#!/bin/bash +#This file is only used for continuous evaluation. +python train.py --enable_ce | python _ce.py + diff --git a/doc/paddle/user_guides/rec_case/recommender_system/README.cn.md b/doc/paddle/user_guides/rec_case/recommender_system/README.cn.md new file mode 100644 index 0000000000000000000000000000000000000000..f5d07326821670df103718a879963ccf6b13874a --- /dev/null +++ b/doc/paddle/user_guides/rec_case/recommender_system/README.cn.md @@ -0,0 +1,601 @@ +# 个性化推荐 + +本教程源代码目录在[book/recommender_system](https://github.com/PaddlePaddle/book/tree/develop/05.recommender_system),初次使用请您参考[Book文档使用说明](https://github.com/PaddlePaddle/book/blob/develop/README.cn.md#运行这本书)。 + +### 说明: ### +1. 硬件环境要求: +本文可支持在CPU、GPU下运行 +2. Docker镜像支持的CUDA/cuDNN版本: +如果使用了Docker运行Book,请注意:这里所提供的默认镜像的GPU环境为 CUDA 8/cuDNN 5,对于NVIDIA Tesla V100等要求CUDA 9的 GPU,使用该镜像可能会运行失败。 +3. 文档和脚本中代码的一致性问题: +请注意:为使本文更加易读易用,我们拆分、调整了train.py的代码并放入本文。本文中代码与train.py的运行结果一致,可直接运行[train.py](https://github.com/PaddlePaddle/book/blob/develop/05.recommender_system/train.py)进行验证。 + +## 背景介绍 + +在网络技术不断发展和电子商务规模不断扩大的背景下,商品数量和种类快速增长,用户需要花费大量时间才能找到自己想买的商品,这就是信息超载问题。为了解决这个难题,个性化推荐系统(Recommender System)应运而生。 + +个性化推荐系统是信息过滤系统(Information Filtering System)的子集,它可以用在很多领域,如电影、音乐、电商和 Feed 流推荐等。个性化推荐系统通过分析、挖掘用户行为,发现用户的个性化需求与兴趣特点,将用户可能感兴趣的信息或商品推荐给用户。与搜索引擎不同,个性化推荐系统不需要用户准确地描述出自己的需求,而是根据用户的历史行为进行建模,主动提供满足用户兴趣和需求的信息。 + +1994年明尼苏达大学推出的GroupLens系统[[1](#参考文献)]一般被认为是个性化推荐系统成为一个相对独立的研究方向的标志。该系统首次提出了基于协同过滤来完成推荐任务的思想,此后,基于该模型的协同过滤推荐引领了个性化推荐系统十几年的发展方向。 + +传统的个性化推荐系统方法主要有: + +- 协同过滤推荐(Collaborative Filtering Recommendation):该方法是应用最广泛的技术之一,需要收集和分析用户的历史行为、活动和偏好。它通常可以分为两个子类:基于用户 (User-Based)的推荐[[1](#参考文献)] 和基于物品(Item-Based)的推荐[[2](#参考文献)]。该方法的一个关键优势是它不依赖于机器去分析物品的内容特征,因此它无需理解物品本身也能够准确地推荐诸如电影之类的复杂物品;缺点是对于没有任何行为的新用户存在冷启动的问题,同时也存在用户与商品之间的交互数据不够多造成的稀疏问题。值得一提的是,社交网络[[3](#参考文献)]或地理位置等上下文信息都可以结合到协同过滤中去。 +- 基于内容过滤推荐[[4](#参考文献)](Content-based Filtering Recommendation):该方法利用商品的内容描述,抽象出有意义的特征,通过计算用户的兴趣和商品描述之间的相似度,来给用户做推荐。优点是简单直接,不需要依据其他用户对商品的评价,而是通过商品属性进行商品相似度度量,从而推荐给用户所感兴趣商品的相似商品;缺点是对于没有任何行为的新用户同样存在冷启动的问题。 +- 组合推荐[[5](#参考文献)](Hybrid Recommendation):运用不同的输入和技术共同进行推荐,以弥补各自推荐技术的缺点。 + +近些年来,深度学习在很多领域都取得了巨大的成功。学术界和工业界都在尝试将深度学习应用于个性化推荐系统领域中。深度学习具有优秀的自动提取特征的能力,能够学习多层次的抽象特征表示,并对异质或跨域的内容信息进行学习,可以一定程度上处理个性化推荐系统冷启动问题[[6](#参考文献)]。本教程主要介绍个性化推荐的深度学习模型,以及如何使用PaddlePaddle实现模型。 + +## 效果展示 + +我们使用包含用户信息、电影信息与电影评分的数据集作为个性化推荐的应用场景。当我们训练好模型后,只需要输入对应的用户ID和电影ID,就可以得出一个匹配的分数(范围[0,5],分数越高视为兴趣越大),然后根据所有电影的推荐得分排序,推荐给用户可能感兴趣的电影。 + +``` +Input movie_id: 1962 +Input user_id: 1 +Prediction Score is 4.25 +``` + +## 模型概览 + +本章中,我们首先介绍YouTube的视频个性化推荐系统[[7](#参考文献)],然后介绍我们实现的融合推荐模型。 + +### YouTube的深度神经网络个性化推荐系统 + +YouTube是世界上最大的视频上传、分享和发现网站,YouTube个性化推荐系统为超过10亿用户从不断增长的视频库中推荐个性化的内容。整个系统由两个神经网络组成:候选生成网络和排序网络。候选生成网络从百万量级的视频库中生成上百个候选,排序网络对候选进行打分排序,输出排名最高的数十个结果。系统结构如图1所示: + +

+
+图1. YouTube 个性化推荐系统结构 +

+ +#### 候选生成网络(Candidate Generation Network) + +候选生成网络将推荐问题建模为一个类别数极大的多类分类问题:对于一个Youtube用户,使用其观看历史(视频ID)、搜索词记录(search tokens)、人口学信息(如地理位置、用户登录设备)、二值特征(如性别,是否登录)和连续特征(如用户年龄)等,对视频库中所有视频进行多分类,得到每一类别的分类结果(即每一个视频的推荐概率),最终输出概率较高的几百个视频。 + +首先,将观看历史及搜索词记录这类历史信息,映射为向量后取平均值得到定长表示;同时,输入人口学特征以优化新用户的推荐效果,并将二值特征和连续特征归一化处理到[0, 1]范围。接下来,将所有特征表示拼接为一个向量,并输入给非线形多层感知器(MLP,详见[识别数字](https://github.com/PaddlePaddle/book/blob/develop/02.recognize_digits/README.cn.md)教程)处理。最后,训练时将MLP的输出给softmax做分类,预测时计算用户的综合特征(MLP的输出)与所有视频的相似度,取得分最高的$k$个作为候选生成网络的筛选结果。图2显示了候选生成网络结构。 + +

+
+图2. 候选生成网络结构 +

+ +对于一个用户$U$,预测此刻用户要观看的视频$\omega$为视频$i$的概率公式为: + +

+
+

+ +其中$u$为用户$U$的特征表示,$V$为视频库集合,$v_i$为视频库中第$i$个视频的特征表示。$u$和$v_i$为长度相等的向量,两者点积可以通过全连接层实现。 + +考虑到softmax分类的类别数非常多,为了保证一定的计算效率:1)训练阶段,使用负样本类别采样将实际计算的类别数缩小至数千;2)推荐(预测)阶段,忽略softmax的归一化计算(不影响结果),将类别打分问题简化为点积(dot product)空间中的最近邻(nearest neighbor)搜索问题,取与$u$最近的$k$个视频作为生成的候选。 + +#### 排序网络(Ranking Network) +排序网络的结构类似于候选生成网络,但是它的目标是对候选进行更细致的打分排序。和传统广告排序中的特征抽取方法类似,这里也构造了大量的用于视频排序的相关特征(如视频 ID、上次观看时间等)。这些特征的处理方式和候选生成网络类似,不同之处是排序网络的顶部是一个加权逻辑回归(weighted logistic regression),它对所有候选视频进行打分,从高到底排序后将分数较高的一些视频返回给用户。 + +### 融合推荐模型 +本节会使用卷积神经网络(Convolutional Neural Networks)来学习电影名称的表示。下面会依次介绍文本卷积神经网络以及融合推荐模型。 + +#### 文本卷积神经网络(CNN) + +卷积神经网络经常用来处理具有类似网格拓扑结构(grid-like topology)的数据。例如,图像可以视为二维网格的像素点,自然语言可以视为一维的词序列。卷积神经网络可以提取多种局部特征,并对其进行组合抽象得到更高级的特征表示。实验表明,卷积神经网络能高效地对图像及文本问题进行建模处理。 + +卷积神经网络主要由卷积(convolution)和池化(pooling)操作构成,其应用及组合方式灵活多变,种类繁多。本小结我们以如图3所示的网络进行讲解: + +

+
+图3. 卷积神经网络文本分类模型 +

+ +假设待处理句子的长度为$n$,其中第$i$个词的词向量为$x_i\in\mathbb{R}^k$,$k$为维度大小。 + +首先,进行词向量的拼接操作:将每$h$个词拼接起来形成一个大小为$h$的词窗口,记为$x_{i:i+h-1}$,它表示词序列$x_{i},x_{i+1},\ldots,x_{i+h-1}$的拼接,其中,$i$表示词窗口中第一个词在整个句子中的位置,取值范围从$1$到$n-h+1$,$x_{i:i+h-1}\in\mathbb{R}^{hk}$。 + +其次,进行卷积操作:把卷积核(kernel)$w\in\mathbb{R}^{hk}$应用于包含$h$个词的窗口$x_{i:i+h-1}$,得到特征$c_i=f(w\cdot x_{i:i+h-1}+b)$,其中$b\in\mathbb{R}$为偏置项(bias),$f$为非线性激活函数,如$sigmoid$。将卷积核应用于句子中所有的词窗口${x_{1:h},x_{2:h+1},\ldots,x_{n-h+1:n}}$,产生一个特征图(feature map): + +

+
+

+ +接下来,对特征图采用时间维度上的最大池化(max pooling over time)操作得到此卷积核对应的整句话的特征$\hat c$,它是特征图中所有元素的最大值: + +

+
+

+ +#### 融合推荐模型概览 + +在融合推荐模型的电影个性化推荐系统中: + +1. 首先,使用用户特征和电影特征作为神经网络的输入,其中: + + - 用户特征融合了四个属性信息,分别是用户ID、性别、职业和年龄。 + + - 电影特征融合了三个属性信息,分别是电影ID、电影类型ID和电影名称。 + +2. 对用户特征,将用户ID映射为维度大小为256的向量表示,输入全连接层,并对其他三个属性也做类似的处理。然后将四个属性的特征表示分别全连接并相加。 + +3. 对电影特征,将电影ID以类似用户ID的方式进行处理,电影类型ID以向量的形式直接输入全连接层,电影名称用文本卷积神经网络得到其定长向量表示。然后将三个属性的特征表示分别全连接并相加。 + +4. 得到用户和电影的向量表示后,计算二者的余弦相似度作为个性化推荐系统的打分。最后,用该相似度打分和用户真实打分的差异的平方作为该回归模型的损失函数。 + +

+
+图4. 融合推荐模型 +

+ +## 数据准备 + +### 数据介绍与下载 + +我们以 [MovieLens 百万数据集(ml-1m)](http://files.grouplens.org/datasets/movielens/ml-1m.zip)为例进行介绍。ml-1m 数据集包含了 6,000 位用户对 4,000 部电影的 1,000,000 条评价(评分范围 1~5 分,均为整数),由 GroupLens Research 实验室搜集整理。 + +Paddle在API中提供了自动加载数据的模块。数据模块为 `paddle.dataset.movielens` + + +```python +from __future__ import print_function +import paddle +movie_info = paddle.dataset.movielens.movie_info() +print(list(movie_info.values())[0]) +``` + + +```python +# Run this block to show dataset's documentation +# help(paddle.dataset.movielens) +``` + +在原始数据中包含电影的特征数据,用户的特征数据,和用户对电影的评分。 + +例如,其中某一个电影特征为: + + +```python +movie_info = paddle.dataset.movielens.movie_info() +print(list(movie_info.values())[0]) +``` + + + + +这表示,电影的id是1,标题是《Toy Story》,该电影被分为到三个类别中。这三个类别是动画,儿童,喜剧。 + + +```python +user_info = paddle.dataset.movielens.user_info() +print(list(user_info.values())[0]) +``` + + + + +这表示,该用户ID是1,女性,年龄比18岁还年轻。职业ID是10。 + + +其中,年龄使用下列分布 + +* 1: "Under 18" +* 18: "18-24" +* 25: "25-34" +* 35: "35-44" +* 45: "45-49" +* 50: "50-55" +* 56: "56+" + +职业是从下面几种选项里面选则得出: + +* 0: "other" or not specified +* 1: "academic/educator" +* 2: "artist" +* 3: "clerical/admin" +* 4: "college/grad student" +* 5: "customer service" +* 6: "doctor/health care" +* 7: "executive/managerial" +* 8: "farmer" +* 9: "homemaker" +* 10: "K-12 student" +* 11: "lawyer" +* 12: "programmer" +* 13: "retired" +* 14: "sales/marketing" +* 15: "scientist" +* 16: "self-employed" +* 17: "technician/engineer" +* 18: "tradesman/craftsman" +* 19: "unemployed" +* 20: "writer" + +而对于每一条训练/测试数据,均为 <用户特征> + <电影特征> + 评分。 + +例如,我们获得第一条训练数据: + + +```python +train_set_creator = paddle.dataset.movielens.train() +train_sample = next(train_set_creator()) +uid = train_sample[0] +mov_id = train_sample[len(user_info[uid].value())] +print ("User %s rates Movie %s with Score %s"%(user_info[uid], movie_info[mov_id], train_sample[-1])) +``` + + User rates Movie with Score [5.0] + + +即用户1对电影1193的评价为5分。 + +## 模型配置说明 + +下面我们开始根据输入数据的形式配置模型。首先引入所需的库函数以及定义全局变量。 +- IS_SPARSE: embedding中是否使用稀疏更新 +- PASS_NUM: epoch数量 + + +```python +import math +import sys +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.layers as layers +import paddle.fluid.nets as nets + +IS_SPARSE = True +BATCH_SIZE = 256 +PASS_NUM = 20 +``` + +然后为我们的用户特征综合模型定义模型配置 + +```python +def get_usr_combined_features(): + """network definition for user part""" + + USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1 + + uid = fluid.data(name='user_id', shape=[None], dtype='int64') + + usr_emb = fluid.embedding( + input=uid, + dtype='float32', + size=[USR_DICT_SIZE, 32], + param_attr='user_table', + is_sparse=IS_SPARSE) + + usr_fc = layers.fc(input=usr_emb, size=32) + + USR_GENDER_DICT_SIZE = 2 + + usr_gender_id = fluid.data(name='gender_id', shape=[None], dtype='int64') + + usr_gender_emb = fluid.embedding( + input=usr_gender_id, + size=[USR_GENDER_DICT_SIZE, 16], + param_attr='gender_table', + is_sparse=IS_SPARSE) + + usr_gender_fc = layers.fc(input=usr_gender_emb, size=16) + + USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table) + usr_age_id = fluid.data(name='age_id', shape=[None], dtype="int64") + + usr_age_emb = fluid.embedding( + input=usr_age_id, + size=[USR_AGE_DICT_SIZE, 16], + is_sparse=IS_SPARSE, + param_attr='age_table') + + usr_age_fc = layers.fc(input=usr_age_emb, size=16) + + USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1 + usr_job_id = fluid.data(name='job_id', shape=[None], dtype="int64") + + usr_job_emb = fluid.embedding( + input=usr_job_id, + size=[USR_JOB_DICT_SIZE, 16], + param_attr='job_table', + is_sparse=IS_SPARSE) + + usr_job_fc = layers.fc(input=usr_job_emb, size=16) + + concat_embed = layers.concat( + input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1) + + usr_combined_features = layers.fc(input=concat_embed, size=200, act="tanh") + + return usr_combined_features +``` + +如上述代码所示,对于每个用户,我们输入4维特征。其中包括user_id,gender_id,age_id,job_id。这几维特征均是简单的整数值。为了后续神经网络处理这些特征方便,我们借鉴NLP中的语言模型,将这几维离散的整数值,变换成embedding取出。分别形成usr_emb, usr_gender_emb, usr_age_emb, usr_job_emb。 + +然后,我们对于所有的用户特征,均输入到一个全连接层(fc)中。将所有特征融合为一个200维度的特征。 + +进而,我们对每一个电影特征做类似的变换,网络配置为: + + +```python +def get_mov_combined_features(): + """network definition for item(movie) part""" + + MOV_DICT_SIZE = paddle.dataset.movielens.max_movie_id() + 1 + + mov_id = fluid.data(name='movie_id', shape=[None], dtype='int64') + + mov_emb = fluid.embedding( + input=mov_id, + dtype='float32', + size=[MOV_DICT_SIZE, 32], + param_attr='movie_table', + is_sparse=IS_SPARSE) + + mov_fc = layers.fc(input=mov_emb, size=32) + + CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories()) + + category_id = fluid.data( + name='category_id', shape=[None], dtype='int64', lod_level=1) + + mov_categories_emb = fluid.embedding( + input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE) + + mov_categories_hidden = layers.sequence_pool( + input=mov_categories_emb, pool_type="sum") + + MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict()) + + mov_title_id = fluid.data( + name='movie_title', shape=[None], dtype='int64', lod_level=1) + + mov_title_emb = fluid.embedding( + input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32], is_sparse=IS_SPARSE) + + mov_title_conv = nets.sequence_conv_pool( + input=mov_title_emb, + num_filters=32, + filter_size=3, + act="tanh", + pool_type="sum") + + concat_embed = layers.concat( + input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1) + + mov_combined_features = layers.fc(input=concat_embed, size=200, act="tanh") + + return mov_combined_features +``` + +电影标题名称(title)是一个序列的整数,整数代表的是这个词在索引序列中的下标。这个序列会被送入 `sequence_conv_pool` 层,这个层会在时间维度上使用卷积和池化。因为如此,所以输出会是固定长度,尽管输入的序列长度各不相同。 + +最后,我们定义一个`inference_program`来使用余弦相似度计算用户特征与电影特征的相似性。 + +```python +def inference_program(): + """the combined network""" + + usr_combined_features = get_usr_combined_features() + mov_combined_features = get_mov_combined_features() + + inference = layers.cos_sim(X=usr_combined_features, Y=mov_combined_features) + scale_infer = layers.scale(x=inference, scale=5.0) + + return scale_infer +``` + +进而,我们定义一个`train_program`来使用`inference_program`计算出的结果,在标记数据的帮助下来计算误差。我们还定义了一个`optimizer_func`来定义优化器。 + +```python +def train_program(): + """define the cost function""" + + scale_infer = inference_program() + + label = fluid.data(name='score', shape=[None, 1], dtype='float32') + square_cost = layers.square_error_cost(input=scale_infer, label=label) + avg_cost = layers.mean(square_cost) + + return [avg_cost, scale_infer] + + +def optimizer_func(): + return fluid.optimizer.SGD(learning_rate=0.2) +``` + + +## 训练模型 + +### 定义训练环境 +定义您的训练环境,可以指定训练是发生在CPU还是GPU上。 + +```python +use_cuda = False +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() +``` + +### 定义数据提供器 +下一步是为训练和测试定义数据提供器。提供器读入一个大小为 `BATCH_SIZE`的数据。`paddle.dataset.movielens.train` 每次会在乱序化后提供一个大小为`BATCH_SIZE`的数据,乱序化的大小为缓存大小`buf_size`。 + +```python +train_reader = fluid.io.batch( + fluid.io.shuffle( + paddle.dataset.movielens.train(), buf_size=8192), + batch_size=BATCH_SIZE) + +test_reader = fluid.io.batch( + paddle.dataset.movielens.test(), batch_size=BATCH_SIZE) +``` + +### 构造训练过程(trainer) +我们这里构造了一个训练过程,包括训练优化函数。 + +### 提供数据 + +`feed_order`用来定义每条产生的数据和`paddle.layer.data`之间的映射关系。比如,`movielens.train`产生的第一列的数据对应的是`user_id`这个特征。 + +```python +feed_order = [ + 'user_id', 'gender_id', 'age_id', 'job_id', 'movie_id', 'category_id', + 'movie_title', 'score' +] +``` + +### 构建训练程序以及测试程序 +分别构建训练程序和测试程序,并引入训练优化器。 + +```python +main_program = fluid.default_main_program() +star_program = fluid.default_startup_program() +[avg_cost, scale_infer] = train_program() + +test_program = main_program.clone(for_test=True) +sgd_optimizer = optimizer_func() +sgd_optimizer.minimize(avg_cost) +exe = fluid.Executor(place) + +def train_test(program, reader): + count = 0 + feed_var_list = [ + program.global_block().var(var_name) for var_name in feed_order + ] + feeder_test = fluid.DataFeeder( + feed_list=feed_var_list, place=place) + test_exe = fluid.Executor(place) + accumulated = 0 + for test_data in reader(): + avg_cost_np = test_exe.run(program=program, + feed=feeder_test.feed(test_data), + fetch_list=[avg_cost]) + accumulated += avg_cost_np[0] + count += 1 + return accumulated / count +``` + +### 构建训练主循环并开始训练 +我们根据上面定义的训练循环数(`PASS_NUM`)和一些别的参数,来进行训练循环,并且每次循环都进行一次测试,当测试结果足够好时退出训练并保存训练好的参数。 + +```python +# Specify the directory path to save the parameters +params_dirname = "recommender_system.inference.model" + +from paddle.utils.plot import Ploter +train_prompt = "Train cost" +test_prompt = "Test cost" + +plot_cost = Ploter(train_prompt, test_prompt) + +def train_loop(): + feed_list = [ + main_program.global_block().var(var_name) for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list, place) + exe.run(star_program) + + for pass_id in range(PASS_NUM): + for batch_id, data in enumerate(train_reader()): + # train a mini-batch + outs = exe.run(program=main_program, + feed=feeder.feed(data), + fetch_list=[avg_cost]) + out = np.array(outs[0]) + + # get test avg_cost + test_avg_cost = train_test(test_program, test_reader) + + plot_cost.append(train_prompt, batch_id, outs[0]) + plot_cost.append(test_prompt, batch_id, test_avg_cost) + plot_cost.plot() + + if batch_id == 20: + if params_dirname is not None: + fluid.io.save_inference_model(params_dirname, [ + "user_id", "gender_id", "age_id", "job_id", + "movie_id", "category_id", "movie_title" + ], [scale_infer], exe) + return + print('EpochID {0}, BatchID {1}, Test Loss {2:0.2}'.format( + pass_id + 1, batch_id + 1, float(test_avg_cost))) + + if math.isnan(float(out[0])): + sys.exit("got NaN loss, training failed.") +``` +开始训练 +```python +train_loop() +``` + +## 应用模型 + +### 生成测试数据 +使用 create_lod_tensor(data, lod, place) 的API来生成细节层次的张量。`data`是一个序列,每个元素是一个索引号的序列。`lod`是细节层次的信息,对应于`data`。比如,data = [[10, 2, 3], [2, 3]] 意味着它包含两个序列,长度分别是3和2。于是相应地 lod = [[3, 2]],它表明其包含一层细节信息,意味着 `data` 有两个序列,长度分别是3和2。 + +在这个预测例子中,我们试着预测用户ID为1的用户对于电影'Hunchback of Notre Dame'的评分 + +```python +infer_movie_id = 783 +infer_movie_name = paddle.dataset.movielens.movie_info()[infer_movie_id].title +user_id = np.array([1]).astype("int64").reshape(-1) +gender_id = np.array([1]).astype("int64").reshape(-1) +age_id = np.array([0]).astype("int64").reshape(-1) +job_id = np.array([10]).astype("int64").reshape(-1) +movie_id = np.array([783]).astype("int64").reshape(-1) # Hunchback of Notre Dame +category_id = fluid.create_lod_tensor(np.array([10, 8, 9], dtype='int64'), [[3]], place) # Animation, Children's, Musical +movie_title = fluid.create_lod_tensor(np.array([1069, 4140, 2923, 710, 988], dtype='int64'), [[5]], + place) # 'hunchback','of','notre','dame','the' +``` + +### 构建预测过程并测试 +与训练过程类似,我们需要构建一个预测过程。其中, `params_dirname`是之前用来存放训练过程中的各个参数的地址。 + +```python +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() +exe = fluid.Executor(place) + +inference_scope = fluid.core.Scope() +``` + +### 测试 +现在我们可以进行预测了。我们要提供的`feed_order`应该和训练过程一致。 + + +```python +with fluid.scope_guard(inference_scope): + [inferencer, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(params_dirname, exe) + + results = exe.run(inferencer, + feed={ + 'user_id': user_id, + 'gender_id': gender_id, + 'age_id': age_id, + 'job_id': job_id, + 'movie_id': movie_id, + 'category_id': category_id, + 'movie_title': movie_title + }, + fetch_list=fetch_targets, + return_numpy=False) + predict_rating = np.array(results[0]) + print("Predict Rating of user id 1 on movie \"" + infer_movie_name + + "\" is " + str(predict_rating[0][0])) + print("Actual Rating of user id 1 on movie \"" + infer_movie_name + + "\" is 4.") +``` + +## 总结 + +本章介绍了传统的个性化推荐系统方法和YouTube的深度神经网络个性化推荐系统,并以电影推荐为例,使用PaddlePaddle训练了一个个性化推荐神经网络模型。个性化推荐系统几乎涵盖了电商系统、社交网络、广告推荐、搜索引擎等领域的方方面面,而在图像处理、自然语言处理等领域已经发挥重要作用的深度学习技术,也将会在个性化推荐系统领域大放异彩。 + + +## 参考文献 + +1. P. Resnick, N. Iacovou, etc. “[GroupLens: An Open Architecture for Collaborative Filtering of Netnews](http://ccs.mit.edu/papers/CCSWP165.html)”, Proceedings of ACM Conference on Computer Supported Cooperative Work, CSCW 1994. pp.175-186. +2. Sarwar, Badrul, et al. "[Item-based collaborative filtering recommendation algorithms.](http://files.grouplens.org/papers/www10_sarwar.pdf)" *Proceedings of the 10th international conference on World Wide Web*. ACM, 2001. +3. Kautz, Henry, Bart Selman, and Mehul Shah. "[Referral Web: combining social networks and collaborative filtering.](http://www.cs.cornell.edu/selman/papers/pdf/97.cacm.refweb.pdf)" Communications of the ACM 40.3 (1997): 63-65. APA +4. [Peter Brusilovsky](https://en.wikipedia.org/wiki/Peter_Brusilovsky) (2007). *The Adaptive Web*. p. 325. +5. Robin Burke , [Hybrid Web Recommender Systems](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.435.7538&rep=rep1&type=pdf), pp. 377-408, The Adaptive Web, Peter Brusilovsky, Alfred Kobsa, Wolfgang Nejdl (Ed.), Lecture Notes in Computer Science, Springer-Verlag, Berlin, Germany, Lecture Notes in Computer Science, Vol. 4321, May 2007, 978-3-540-72078-2. +6. Yuan, Jianbo, et al. ["Solving Cold-Start Problem in Large-scale Recommendation Engines: A Deep Learning Approach."](https://arxiv.org/pdf/1611.05480v1.pdf) *arXiv preprint arXiv:1611.05480* (2016). +7. Covington P, Adams J, Sargin E. [Deep neural networks for youtube recommendations](https://static.googleusercontent.com/media/research.google.com/zh-CN//pubs/archive/45530.pdf)[C]//Proceedings of the 10th ACM Conference on Recommender Systems. ACM, 2016: 191-198. + + +
+知识共享许可协议
本教程PaddlePaddle 创作,采用 知识共享 署名-相同方式共享 4.0 国际 许可协议进行许可。 diff --git a/doc/paddle/user_guides/rec_case/recommender_system/README.md b/doc/paddle/user_guides/rec_case/recommender_system/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2a8233c7279fbbe4bdb273b1f3152cc611e1095b --- /dev/null +++ b/doc/paddle/user_guides/rec_case/recommender_system/README.md @@ -0,0 +1,589 @@ +# Recommender System + +The source code of this tutorial is in [book/recommender_system](https://github.com/PaddlePaddle/book/tree/develop/05.recommender_system). For new users, please refer to [Running This Book](https://github.com/PaddlePaddle/book/blob/develop/README.md#running-the-book) . + +## Background Introduction + +With the continuous development of network technology and the ever-expanding scale of e-commerce, the number and variety of goods grow rapidly and users need to spend a lot of time to find the goods they want to buy. This is information overload. In order to solve this problem, recommendation system came into being. + +The recommendation system is a subset of the Information Filtering System, which can be used in a range of areas such as movies, music, e-commerce, and Feed stream recommendations. The recommendation system discovers the user's personalized needs and interests by analyzing and mining user behaviors, and recommends information or products that may be of interest to the user. Unlike search engines, recommendation system do not require users to accurately describe their needs, but model their historical behavior to proactively provide information that meets user interests and needs. + +The GroupLens system \[[1](#references)\] introduced by the University of Minnesota in 1994 is generally considered to be a relatively independent research direction for the recommendation system. The system first proposed the idea of completing recommendation task based on collaborative filtering. After that, the collaborative filtering recommendation based on the model led the development of recommendation system for more than ten years. + +The traditional personalized recommendation system methods mainly include: + +- Collaborative Filtering Recommendation: This method is one of the most widely used technologies which requires the collection and analysis of users' historical behaviors, activities and preferences. It can usually be divided into two sub-categories: User-Based Recommendation \[[1](#references)\] and Item-Based Recommendation \[[2](#references)\]. A key advantage of this method is that it does not rely on the machine to analyze the content characteristics of the item, so it does not need to understand the item itself to accurately recommend complex items such as movies. However, the disadvantage is that there is a cold start problem for new users without any behavior. At the same time, there is also a sparsity problem caused by insufficient interaction data between users and commodities. It is worth mentioning that social network \[[3](#references)\] or geographic location and other context information can be integrated into collaborative filtering. +- Content-Based Filtering Recommendation \[[4](#references)\] : This method uses the content description of the product to abstract meaningful features by calculating the similarity between the user's interest and the product description to make recommendations to users. The advantage is that it is simple and straightforward. It does not need to evaluate products based on the comments of users. Instead, it compares the product similarity by product attributes to recommend similar products to the users of interest. The disadvantage is that there is also a cold start problem for new users without any behavior. +- Hybrid Recommendation \[[5](#references)\]: Use different inputs and techniques to jointly recommend items to complement each single recommendation technique. + +In recent years, deep learning has achieved great success in many fields. Both academia and industry are trying to apply deep learning to the field of recommendation systems. Deep learning has excellent ability to automatically extract features, can learn multi-level abstract feature representations, and learn heterogeneous or cross-domain content information, which can deal with the cold start problem \[[6](#references)\] of recommendation system to some extent. This tutorial focuses on the deep learning model of recommendation system and how to implement the model with PaddlePaddle. + +## Result Demo + +We use a dataset containing user information, movie information, and movie ratings as a recommendation system. When we train the model, we only need to input the corresponding user ID and movie ID, we can get a matching score (range [0, 5], the higher the score is regarded as the greater interest), and then according to the recommendation of all movies sort the scores and recommend them to movies that may be of interest to the user. + +``` +Input movie_id: 1962 +Input user_id: 1 +Prediction Score is 4.25 +``` + +## Model Overview + +In this chapter, we first introduce YouTube's video personalization recommendation system \[[7](#references)\], and then introduce the fusion recommendation model we implemented. + +### YouTube's Deep Neural Network Personalized Recommendation System + +YouTube is the world's largest video uploading, sharing and discovery site, and the YouTube Personalized Recommendation System recommends personalized content from a growing library to more than 1 billion users. The entire system consists of two neural networks: a candidate generation network and a ranking network. The candidate generation network generates hundreds of candidates from a million-level video library, and the ranking network sorts the candidates and outputs the highest ranked tens of results. The system structure is shown in Figure 1: + +

+
+Figure 1. YouTube personalized recommendation system structure +

+ +#### Candidate Generation Network + +The candidate generation network models the recommendation problem as a multi-class classification problem with a large number of categories. For a Youtube user, using its watching history (video ID), search tokens, demographic information (such as geographic location, user login device), binary features (such as gender, whether to log in), and continuous features (such as user age), etc., multi-classify all videos in the video library to obtain the classification result of each category (ie, the recommendation probability of each video), eventually outputting hundreds of videos with high probability. + +First, the historical information such as watching history and search token records are mapped to vectors and averaged to obtain a fixed length representation. At the same time, demographic characteristics are input to optimize the recommendation effect of new users, and the binary features and continuous features are normalized to the range [0, 1]. Next, put all the feature representations into a vector and input them to the non-linear multilayer perceptron (MLP, see [Identification Figures](https://github.com/PaddlePaddle/book/blob/develop/02.recognize_digits/README.md) tutorial). Finally, during training, the output of the MLP is classified by softmax. When predicting, the similarity of the user's comprehensive features (MLP output) to all videos' features is calculated, and the highest score of $k$ is obtained as the result of the candidate generation network. Figure 2 shows the candidate generation network structure. + +

+
+Figure 2. Candidate generation network structure +

+ +For a user $U$, the formula for predicting whether the video $\omega$ that the user wants to watch at the moment is video $i$ is: + +$$P(\omega=i|u)=\frac{e^{v_{i}u}}{\sum_{j \in V}e^{v_{j}u}}$$ + +Where $u$ is the feature representation of the user $U$, $V$ is the video library collection, and $v_i$ is the feature representation of the $i$ video in the video library. $u$ and $v_i$ are vectors of equal length, and the dot product can be implemented by a fully connected layer. + +Considering that the number of categories in the softmax classification is very large, in order to ensure a certain computational efficiency: 1) in the training phase, use negative sample category sampling to reduce the number of actually calculated categories to thousands; 2) in the recommendation (prediction) phase, ignore the normalized calculation of softmax (does not affect the result), and simplifies the category scoring problem into the nearest neighbor search problem in the dot product space, then takes the nearest $k$ video of $u$ as a candidate for generation. + +#### Ranking Network +The structure of the ranking network is similar to the candidate generation network, but its goal is to perform finer ranking of the candidates. Similar to the feature extraction method in traditional advertisement ranking, a large number of related features (such as video ID, last watching time, etc.) for video sorting are also constructed here. These features are treated similarly to the candidate generation network, except that at the top of the ranking network is a weighted logistic regression that scores all candidate videos and sorts them from high to low. Then, return to the user. + +### Fusion recommendation model +This section uses Convolutional Neural Networks to learn the representation of movie titles. The convolutional neural network for text and the fusion recommendation model are introduced in turn. + +#### Convolutional Neural Network (CNN) for text + +Convolutional neural networks are often used to deal with data of a grid-like topology. For example, an image can be viewed as a pixel of a two-dimensional grid, and a natural language can be viewed as a one-dimensional sequence of words. Convolutional neural networks can extract a variety of local features and combine them to obtain more advanced feature representations. Experiments show that convolutional neural networks can efficiently model image and text problems. + +The convolutional neural network is mainly composed of convolution and pooling operations, and its application and combination methods are flexible and varied. In this section we will explain the network as shown in Figure 3: + +

+
+Figure 3. Convolutional neural network text classification model +

+ +Suppose the length of the sentence to be processed is $n$, where the word vector of the $i$ word is $x_i\in\mathbb{R}^k$, and $k$ is the dimension size. + +First, splicing the word vector: splicing each $h$ word to form a word window of size $h$, denoted as $x_{i:i+h-1}$, which represents the word sequence splicing of $x_{i}, x_{i+1}, \ldots, x_{i+h-1}$, where $i$ represents the position of the first word in the word window throughout the sentence, ranging from $1$ to $n-h+1$, $x_{i:i+h-1}\in\mathbb{R}^{hk}$. + +Second, perform a convolution operation: apply the convolution kernel $w\in\mathbb{R}^{hk}$ to the window $x_{i:i+h-1}$ containing $h$ words. , get the feature $c_i=f(w\cdot x_{i:i+h-1}+b)$, where $b\in\mathbb{R}$ is the bias and $f$ is the non Linear activation function, such as $sigmoid$. Apply the convolution kernel to all word windows ${x_{1:h}, x_{2:h+1},\ldots,x_{n-h+1:n}}$ in the sentence, producing a feature map: + +$$c=[c_1,c_2,\ldots,c_{n-h+1}], c \in \mathbb{R}^{n-h+1}$$ + +Next, using the max pooling over time for feature maps to obtain the feature $\hat c$, of the whole sentence corresponding to this convolution kernel, which is the maximum value of all elements in the feature map: + +$$\hat c=max(c)$$ + +#### Fusion recommendation model overview + +In the film personalized recommendation system that incorporates the recommendation model: + +1. First, take user features and movie features as input to the neural network, where: + + - The user features incorporate four attribute information: user ID, gender, occupation, and age. + + - The movie feature incorporate three attribute information: movie ID, movie type ID, and movie name. + +2. For the user feature, map the user ID to a vector representation with a dimension size of 256, enter the fully connected layer, and do similar processing for the other three attributes. Then the feature representations of the four attributes are fully connected and added separately. + +3. For movie features, the movie ID is processed in a manner similar to the user ID. The movie type ID is directly input into the fully connected layer in the form of a vector, and the movie name is represented by a fixed-length vector using a text convolutional neural network. The feature representations of the three attributes are then fully connected and added separately. + +4. After obtaining the vector representation of the user and the movie, calculate the cosine similarity of them as the score of the personalized recommendation system. Finally, the square of the difference between the similarity score and the user's true score is used as the loss function of the regression model. + +

+
+Figure 4. Fusion recommendation model +

+ +## Data Preparation + +### Data Introduction and Download + +We take [MovieLens Million Dataset (ml-1m)](http://files.grouplens.org/datasets/movielens/ml-1m.zip) as an example. The ml-1m dataset contains 1,000,000 reviews of 4,000 movies by 6,000 users (scores ranging from 1 to 5, all integer), collected by the GroupLens Research lab. + +Paddle provides modules for automatically loading data in the API. The data module is `paddle.dataset.movielens` + + +```python +from __future__ import print_function +import paddle +movie_info = paddle.dataset.movielens.movie_info() +print(list(movie_info.values())[0]) +``` + + +```python +# Run this block to show dataset's documentation +# help(paddle.dataset.movielens) +``` + +The original data includes feature data of the movie, user's feature data, and the user's rating of the movie. + +For example, one of the movie features is: + + +```python +movie_info = paddle.dataset.movielens.movie_info() +print(list(movie_info.values())[0]) +``` + + + + +This means that the movie id is 1, and the title is 《Toy Story》, which is divided into three categories. These three categories are animation, children, and comedy. + + +```python +user_info = paddle.dataset.movielens.user_info() +print(list(user_info.values())[0]) +``` + + + + +This means that the user ID is 1, female, and younger than 18 years old. The occupation ID is 10. + + +Among them, the age uses the following distribution + +* 1: "Under 18" +* 18: "18-24" +* 25: "25-34" +* 35: "35-44" +* 45: "45-49" +* 50: "50-55" +* 56: "56+" + +The occupation is selected from the following options: + +* 0: "other" or not specified +* 1: "academic/educator" +* 2: "artist" +* 3: "clerical/admin" +* 4: "college/grad student" +* 5: "customer service" +* 6: "doctor/health care" +* 7: "executive/managerial" +* 8: "farmer" +* 9: "homemaker" +* 10: "K-12 student" +* 11: "lawyer" +* 12: "programmer" +* 13: "retired" +* 14: "sales/marketing" +* 15: "scientist" +* 16: "self-employed" +* 17: "technician/engineer" +* 18: "tradesman/craftsman" +* 19: "unemployed" +* 20: "writer" + +For each training or test data, it is + + rating. + +For example, we get the first training data: + + +```python +train_set_creator = paddle.dataset.movielens.train() +train_sample = next(train_set_creator()) +uid = train_sample[0] +mov_id = train_sample[len(user_info[uid].value())] +print("User %s rates Movie %s with Score %s"%(user_info[uid], movie_info[mov_id], train_sample[-1])) +``` + + User rates Movie with Score [5.0] + + +That is, the user 1 evaluates the movie 1193 as 5 points. + +## Configuration Instruction + +Below we begin to configure the model based on the form of the input data. First import the required library functions and define global variables. + +- IS_SPARSE: whether to use sparse update in embedding +- PASS_NUM: number of epoch + + +```python +import math +import sys +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.layers as layers +import paddle.fluid.nets as nets + +IS_SPARSE = True +BATCH_SIZE = 256 +PASS_NUM = 20 +``` + +Then define the model configuration for our user feature synthesis model + +```python +def get_usr_combined_features(): + """network definition for user part""" + + USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1 + + uid = fluid.data(name='user_id', shape=[None], dtype='int64') + + usr_emb = fluid.embedding( + input=uid, + dtype='float32', + size=[USR_DICT_SIZE, 32], + param_attr='user_table', + is_sparse=IS_SPARSE) + + usr_fc = layers.fc(input=usr_emb, size=32) + + USR_GENDER_DICT_SIZE = 2 + + usr_gender_id = fluid.data(name='gender_id', shape=[None], dtype='int64') + + usr_gender_emb = fluid.embedding( + input=usr_gender_id, + size=[USR_GENDER_DICT_SIZE, 16], + param_attr='gender_table', + is_sparse=IS_SPARSE) + + usr_gender_fc = layers.fc(input=usr_gender_emb, size=16) + + USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table) + usr_age_id = fluid.data(name='age_id', shape=[None], dtype="int64") + + usr_age_emb = fluid.embedding( + input=usr_age_id, + size=[USR_AGE_DICT_SIZE, 16], + is_sparse=IS_SPARSE, + param_attr='age_table') + + usr_age_fc = layers.fc(input=usr_age_emb, size=16) + + USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1 + usr_job_id = fluid.data(name='job_id', shape=[None], dtype="int64") + + usr_job_emb = fluid.embedding( + input=usr_job_id, + size=[USR_JOB_DICT_SIZE, 16], + param_attr='job_table', + is_sparse=IS_SPARSE) + + usr_job_fc = layers.fc(input=usr_job_emb, size=16) + + concat_embed = layers.concat( + input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1) + + usr_combined_features = layers.fc(input=concat_embed, size=200, act="tanh") + + return usr_combined_features +``` + +As shown in the code above, for each user, we enter a 4-dimensional feature. This includes user_id, gender_id, age_id, job_id. These dimensional features are simple integer values. In order to facilitate the subsequent neural network processing of these features, we use the language model in NLP to transform these discrete integer values ​​into embedding. And form them into usr_emb, usr_gender_emb, usr_age_emb, usr_job_emb, respectively. + +Then, we enter all the user features into a fully connected layer(fc). Combine all features into one 200-dimension feature. + +Furthermore, we make a similar transformation for each movie feature, the network configuration is: + + +```python +def get_mov_combined_features(): + """network definition for item(movie) part""" + + MOV_DICT_SIZE = paddle.dataset.movielens.max_movie_id() + 1 + + mov_id = fluid.data(name='movie_id', shape=[None], dtype='int64') + + mov_emb = fluid.embedding( + input=mov_id, + dtype='float32', + size=[MOV_DICT_SIZE, 32], + param_attr='movie_table', + is_sparse=IS_SPARSE) + + mov_fc = layers.fc(input=mov_emb, size=32) + + CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories()) + + category_id = fluid.data( + name='category_id', shape=[None], dtype='int64', lod_level=1) + + mov_categories_emb = fluid.embedding( + input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE) + + mov_categories_hidden = layers.sequence_pool( + input=mov_categories_emb, pool_type="sum") + + MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict()) + + mov_title_id = fluid.data( + name='movie_title', shape=[None], dtype='int64', lod_level=1) + + mov_title_emb = fluid.embedding( + input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32], is_sparse=IS_SPARSE) + + mov_title_conv = nets.sequence_conv_pool( + input=mov_title_emb, + num_filters=32, + filter_size=3, + act="tanh", + pool_type="sum") + + concat_embed = layers.concat( + input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1) + + mov_combined_features = layers.fc(input=concat_embed, size=200, act="tanh") + + return mov_combined_features +``` + + +The title of a movie is a sequence of integers, and the integer represents the subscript of the word in the index sequence. This sequence is sent to the `sequence_conv_pool` layer, which uses convolution and pooling on the time dimension. Because of this, the output will be fixed length, although the length of the input sequence will vary. + +Finally, we define an `inference_program` to calculate the similarity between user features and movie features using cosine similarity. + +```python +def inference_program(): + """the combined network""" + + usr_combined_features = get_usr_combined_features() + mov_combined_features = get_mov_combined_features() + + inference = layers.cos_sim(X=usr_combined_features, Y=mov_combined_features) + scale_infer = layers.scale(x=inference, scale=5.0) + + return scale_infer +``` + +Furthermore, we define a `train_program` to use the result computed by `inference_program`, and calculate the error with the help of the tag data. We also define an `optimizer_func` to define the optimizer. + +```python +def train_program(): + """define the cost function""" + + scale_infer = inference_program() + + label = fluid.data(name='score', shape=[None, 1], dtype='float32') + square_cost = layers.square_error_cost(input=scale_infer, label=label) + avg_cost = layers.mean(square_cost) + + return [avg_cost, scale_infer] + + +def optimizer_func(): + return fluid.optimizer.SGD(learning_rate=0.2) +``` + + +## Training Model + +### Defining the training environment +Define your training environment and specify whether the training takes place on CPU or GPU. + +```python +use_cuda = False +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() +``` + +### Defining the data provider +The next step is to define a data provider for training and testing. The provider reads in a data of size `BATCH_SIZE`. `paddle.dataset.movielens.train` will provide a data of size `BATCH_SIZE` after each scribbling, and the size of the out-of-order is the cache size `buf_size`. + +```python +train_reader = fluid.io.batch( + fluid.io.shuffle( + paddle.dataset.movielens.train(), buf_size=8192), + batch_size=BATCH_SIZE) + +test_reader = fluid.io.batch( + paddle.dataset.movielens.test(), batch_size=BATCH_SIZE) +``` + +### Constructing a training process (trainer) +We have constructed a training process here, including training optimization functions. + +### Provide data + +`feed_order` is used to define the mapping between each generated data and `paddle.layer.data`. For example, the data in the first column generated by `movielens.train` corresponds to the feature `user_id`. + +```python +feed_order = [ + 'user_id', 'gender_id', 'age_id', 'job_id', 'movie_id', 'category_id', + 'movie_title', 'score' +] +``` + +### Building training programs and testing programs +The training program and the test program are separately constructed, and the training optimizer is imported. + +```python +main_program = fluid.default_main_program() +star_program = fluid.default_startup_program() +[avg_cost, scale_infer] = train_program() + +test_program = main_program.clone(for_test=True) +sgd_optimizer = optimizer_func() +sgd_optimizer.minimize(avg_cost) +exe = fluid.Executor(place) + +def train_test(program, reader): + count = 0 + feed_var_list = [ + program.global_block().var(var_name) for var_name in feed_order + ] + feeder_test = fluid.DataFeeder( + feed_list=feed_var_list, place=place) + test_exe = fluid.Executor(place) + accumulated = 0 + for test_data in reader(): + avg_cost_np = test_exe.run(program=program, + feed=feeder_test.feed(test_data), + fetch_list=[avg_cost]) + accumulated += avg_cost_np[0] + count += 1 + return accumulated / count +``` + +### Build a training main loop and start training +We perform the training cycle according to the training cycle number (`PASS_NUM`) defined above and some other parameters, and perform a test every time. When the test result is good enough, we exit the training and save the trained parameters. + +```python +# Specify the directory path to save the parameters +params_dirname = "recommender_system.inference.model" + +from paddle.utils.plot import Ploter +train_prompt = "Train cost" +test_prompt = "Test cost" + +plot_cost = Ploter(train_prompt, test_prompt) + +def train_loop(): + feed_list = [ + main_program.global_block().var(var_name) for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list, place) + exe.run(star_program) + + for pass_id in range(PASS_NUM): + for batch_id, data in enumerate(train_reader()): + # train a mini-batch + outs = exe.run(program=main_program, + feed=feeder.feed(data), + fetch_list=[avg_cost]) + out = np.array(outs[0]) + + # get test avg_cost + test_avg_cost = train_test(test_program, test_reader) + + plot_cost.append(train_prompt, batch_id, outs[0]) + plot_cost.append(test_prompt, batch_id, test_avg_cost) + plot_cost.plot() + + if batch_id == 20: + if params_dirname is not None: + fluid.io.save_inference_model(params_dirname, [ + "user_id", "gender_id", "age_id", "job_id", + "movie_id", "category_id", "movie_title" + ], [scale_infer], exe) + return + print('EpochID {0}, BatchID {1}, Test Loss {2:0.2}'.format( + pass_id + 1, batch_id + 1, float(test_avg_cost))) + + if math.isnan(float(out[0])): + sys.exit("got NaN loss, training failed.") +``` +Start training +```python +train_loop() +``` + +## Model Application + +### Generate test data +Use the API of create_lod_tensor(data, lod, place) to generate the tensor of the detail level. `data` is a sequence, and each element is a sequence of index numbers. `lod` is the detail level's information, corresponding to `data`. For example, data = [[10, 2, 3], [2, 3]] means that it contains two sequences of lengths 3 and 2. Correspondingly lod = [[3, 2]], which indicates that it contains a layer of detail information, meaning that `data` has two sequences, lengths of 3 and 2. + +In this prediction example, we try to predict the score given by user with ID1 for the movie 'Hunchback of Notre Dame'. + +```python +infer_movie_id = 783 +infer_movie_name = paddle.dataset.movielens.movie_info()[infer_movie_id].title +user_id = np.array([1]).astype("int64").reshape(-1) +gender_id = np.array([1]).astype("int64").reshape(-1) +age_id = np.array([0]).astype("int64").reshape(-1) +job_id = np.array([10]).astype("int64").reshape(-1) +movie_id = np.array([783]).astype("int64").reshape(-1) # Hunchback of Notre Dame +category_id = fluid.create_lod_tensor(np.array([10, 8, 9], dtype='int64'), [[3]], place) # Animation, Children's, Musical +movie_title = fluid.create_lod_tensor(np.array([1069, 4140, 2923, 710, 988], dtype='int64'), [[5]], + place) # 'hunchback','of','notre','dame','the' +``` + +### Building the prediction process and testing +Similar to the training process, we need to build a prediction process, where `params_dirname` is the address used to store the various parameters in the training process. + +```python +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() +exe = fluid.Executor(place) + +inference_scope = fluid.core.Scope() +``` + +### Testing +Now we can make predictions. The `feed_order` we provide should be consistent with the training process. + + +```python +with fluid.scope_guard(inference_scope): + [inferencer, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(params_dirname, exe) + + results = exe.run(inferencer, + feed={ + 'user_id': user_id, + 'gender_id': gender_id, + 'age_id': age_id, + 'job_id': job_id, + 'movie_id': movie_id, + 'category_id': category_id, + 'movie_title': movie_title + }, + fetch_list=fetch_targets, + return_numpy=False) + predict_rating = np.array(results[0]) + print("Predict Rating of user id 1 on movie \"" + infer_movie_name + + "\" is " + str(predict_rating[0][0])) + print("Actual Rating of user id 1 on movie \"" + infer_movie_name + + "\" is 4.") +``` + +## Summary + +This chapter introduced the traditional personalized recommendation system method and YouTube's deep neural network personalized recommendation system. It further took movie recommendation as an example, and used PaddlePaddle to train a personalized recommendation neural network model. The personalized recommendation system covers almost all aspects of e-commerce systems, social networks, advertising recommendations, search engines, etc. Deep learning technologies have played an important role in image processing, natural language processing, etc., and will also prevail in personalized recommendation systems. + + +## References + +1. P. Resnick, N. Iacovou, etc. “[GroupLens: An Open Architecture for Collaborative Filtering of Netnews](http://ccs.mit.edu/papers/CCSWP165.html)”, Proceedings of ACM Conference on Computer Supported Cooperative Work, CSCW 1994. pp.175-186. +2. Sarwar, Badrul, et al. "[Item-based collaborative filtering recommendation algorithms.](http://files.grouplens.org/papers/www10_sarwar.pdf)" *Proceedings of the 10th international conference on World Wide Web*. ACM, 2001. +3. Kautz, Henry, Bart Selman, and Mehul Shah. "[Referral Web: combining social networks and collaborative filtering.](http://www.cs.cornell.edu/selman/papers/pdf/97.cacm.refweb.pdf)" Communications of the ACM 40.3 (1997): 63-65. APA +4. [Peter Brusilovsky](https://en.wikipedia.org/wiki/Peter_Brusilovsky) (2007). *The Adaptive Web*. p. 325. +5. Robin Burke , [Hybrid Web recommendation systems](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.435.7538&rep=rep1&type=pdf), pp. 377-408, The Adaptive Web, Peter Brusilovsky, Alfred Kobsa, Wolfgang Nejdl (Ed.), Lecture Notes in Computer Science, Springer-Verlag, Berlin, Germany, Lecture Notes in Computer Science, Vol. 4321, May 2007, 978-3-540-72078-2. +6. Yuan, Jianbo, et al. ["Solving Cold-Start Problem in Large-scale Recommendation Engines: A Deep Learning Approach."](https://arxiv.org/pdf/1611.05480v1.pdf) *arXiv preprint arXiv:1611.05480* (2016). +7. Covington P, Adams J, Sargin E. [Deep neural networks for youtube recommendations](https://static.googleusercontent.com/media/research.google.com/zh-CN//pubs/archive/45530.pdf)[C]//Proceedings of the 10th ACM Conference on recommendation systems. ACM, 2016: 191-198. + + +
+知识共享许可协议
This tutorial is contributed by PaddlePaddle, and licensed under a Creative Commons Attribution-ShareAlike 4.0 International License. diff --git a/doc/paddle/user_guides/rec_case/recommender_system/_ce.py b/doc/paddle/user_guides/rec_case/recommender_system/_ce.py new file mode 100644 index 0000000000000000000000000000000000000000..d46acd3ab3b4a916a67b5efefdb077aa1190d86c --- /dev/null +++ b/doc/paddle/user_guides/rec_case/recommender_system/_ce.py @@ -0,0 +1,36 @@ +### This file is only used for continuous evaluation test! +from __future__ import print_function +from __future__ import division +from __future__ import absolute_import +import os +import sys +sys.path.append(os.environ['ceroot']) +from kpi import CostKpi + +test_cost_kpi = CostKpi('test_cost', 0.02, 0, actived=True, desc='test cost') +tracking_kpis = [test_cost_kpi] + + +def parse_log(log): + for line in log.split('\n'): + fs = line.strip().split('\t') + print(fs) + if len(fs) == 3 and fs[0] == 'kpis': + kpi_name = fs[1] + kpi_value = float(fs[2]) + yield kpi_name, kpi_value + + +def log_to_ce(log): + kpi_tracker = {} + for kpi in tracking_kpis: + kpi_tracker[kpi.name] = kpi + for (kpi_name, kpi_value) in parse_log(log): + print(kpi_name, kpi_value) + kpi_tracker[kpi_name].add_record(kpi_value) + kpi_tracker[kpi_name].persist() + + +if __name__ == '__main__': + log = sys.stdin.read() + log_to_ce(log) diff --git a/doc/paddle/user_guides/rec_case/recommender_system/image/Deep_candidate_generation_model_architecture.en.png b/doc/paddle/user_guides/rec_case/recommender_system/image/Deep_candidate_generation_model_architecture.en.png new file mode 100644 index 0000000000000000000000000000000000000000..c213608e769f69fb2cfe8597f8e696ee53730e3d Binary files /dev/null and b/doc/paddle/user_guides/rec_case/recommender_system/image/Deep_candidate_generation_model_architecture.en.png differ diff --git a/doc/paddle/user_guides/rec_case/recommender_system/image/Deep_candidate_generation_model_architecture.png b/doc/paddle/user_guides/rec_case/recommender_system/image/Deep_candidate_generation_model_architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..8aedb2204371e7691140ceffa5992f6080bbf097 Binary files /dev/null and b/doc/paddle/user_guides/rec_case/recommender_system/image/Deep_candidate_generation_model_architecture.png differ diff --git a/doc/paddle/user_guides/rec_case/recommender_system/image/YouTube_Overview.en.png b/doc/paddle/user_guides/rec_case/recommender_system/image/YouTube_Overview.en.png new file mode 100644 index 0000000000000000000000000000000000000000..4298567ac5600173343299999965b20612e7affe Binary files /dev/null and b/doc/paddle/user_guides/rec_case/recommender_system/image/YouTube_Overview.en.png differ diff --git a/doc/paddle/user_guides/rec_case/recommender_system/image/YouTube_Overview.png b/doc/paddle/user_guides/rec_case/recommender_system/image/YouTube_Overview.png new file mode 100644 index 0000000000000000000000000000000000000000..a98e7cc67606b31e4c945f7eb907563e46dcef56 Binary files /dev/null and b/doc/paddle/user_guides/rec_case/recommender_system/image/YouTube_Overview.png differ diff --git a/doc/paddle/user_guides/rec_case/recommender_system/image/formula1.png b/doc/paddle/user_guides/rec_case/recommender_system/image/formula1.png new file mode 100644 index 0000000000000000000000000000000000000000..e3923ba2ec0080f0b4733f2781fff946ce076655 Binary files /dev/null and b/doc/paddle/user_guides/rec_case/recommender_system/image/formula1.png differ diff --git a/doc/paddle/user_guides/rec_case/recommender_system/image/formula2.png b/doc/paddle/user_guides/rec_case/recommender_system/image/formula2.png new file mode 100644 index 0000000000000000000000000000000000000000..595205bf7af6e86ae02baca4f97c3439283a57f9 Binary files /dev/null and b/doc/paddle/user_guides/rec_case/recommender_system/image/formula2.png differ diff --git a/doc/paddle/user_guides/rec_case/recommender_system/image/formula3.png b/doc/paddle/user_guides/rec_case/recommender_system/image/formula3.png new file mode 100644 index 0000000000000000000000000000000000000000..0443b979354d02881a10b332399a18ef2bb52dec Binary files /dev/null and b/doc/paddle/user_guides/rec_case/recommender_system/image/formula3.png differ diff --git a/doc/paddle/user_guides/rec_case/recommender_system/image/output_32_0.png b/doc/paddle/user_guides/rec_case/recommender_system/image/output_32_0.png new file mode 100644 index 0000000000000000000000000000000000000000..7fd97b9cc3a0b9105b41591af4e8f8e4646bd681 Binary files /dev/null and b/doc/paddle/user_guides/rec_case/recommender_system/image/output_32_0.png differ diff --git a/doc/paddle/user_guides/rec_case/recommender_system/image/rec_regression_network.png b/doc/paddle/user_guides/rec_case/recommender_system/image/rec_regression_network.png new file mode 100644 index 0000000000000000000000000000000000000000..90c9b09fb78db98391ee199934f2d16efd6d6652 Binary files /dev/null and b/doc/paddle/user_guides/rec_case/recommender_system/image/rec_regression_network.png differ diff --git a/doc/paddle/user_guides/rec_case/recommender_system/image/rec_regression_network_en.png b/doc/paddle/user_guides/rec_case/recommender_system/image/rec_regression_network_en.png new file mode 100644 index 0000000000000000000000000000000000000000..6fc8e11967000ec48c1c0a6fa3c2eaecb80cbb84 Binary files /dev/null and b/doc/paddle/user_guides/rec_case/recommender_system/image/rec_regression_network_en.png differ diff --git a/doc/paddle/user_guides/rec_case/recommender_system/image/text_cnn.png b/doc/paddle/user_guides/rec_case/recommender_system/image/text_cnn.png new file mode 100644 index 0000000000000000000000000000000000000000..61e63d9147cbc2901706ef80776d706e5368c3c5 Binary files /dev/null and b/doc/paddle/user_guides/rec_case/recommender_system/image/text_cnn.png differ diff --git a/doc/paddle/user_guides/rec_case/recommender_system/image/text_cnn_en.png b/doc/paddle/user_guides/rec_case/recommender_system/image/text_cnn_en.png new file mode 100644 index 0000000000000000000000000000000000000000..fbcae2be81141be955076e877b94b0ea5d7e4d4a Binary files /dev/null and b/doc/paddle/user_guides/rec_case/recommender_system/image/text_cnn_en.png differ diff --git a/doc/paddle/user_guides/rec_case/recommender_system/index.cn.html b/doc/paddle/user_guides/rec_case/recommender_system/index.cn.html new file mode 100644 index 0000000000000000000000000000000000000000..b75bae0a93451775b78f03b2bec64b74e48aee8a --- /dev/null +++ b/doc/paddle/user_guides/rec_case/recommender_system/index.cn.html @@ -0,0 +1,665 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/user_guides/rec_case/recommender_system/index.html b/doc/paddle/user_guides/rec_case/recommender_system/index.html new file mode 100644 index 0000000000000000000000000000000000000000..c854cd253bf28fa88cf8ea2be485cbef42bb1acd --- /dev/null +++ b/doc/paddle/user_guides/rec_case/recommender_system/index.html @@ -0,0 +1,653 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/user_guides/rec_case/recommender_system/train.py b/doc/paddle/user_guides/rec_case/recommender_system/train.py new file mode 100644 index 0000000000000000000000000000000000000000..38009fa54ef9899c0c3a9f0cc6e9e9ffc8896b0c --- /dev/null +++ b/doc/paddle/user_guides/rec_case/recommender_system/train.py @@ -0,0 +1,356 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import math +import sys +import argparse +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.layers as layers +import paddle.fluid.nets as nets + +IS_SPARSE = True +BATCH_SIZE = 256 + + +def parse_args(): + parser = argparse.ArgumentParser("recommender_system") + parser.add_argument( + '--enable_ce', + action='store_true', + help="If set, run the task with continuous evaluation logs.") + parser.add_argument( + '--use_gpu', type=int, default=0, help="Whether to use GPU or not.") + parser.add_argument( + '--num_epochs', type=int, default=1, help="number of epochs.") + args = parser.parse_args() + return args + + +def get_usr_combined_features(): + + USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1 + + uid = fluid.data(name='user_id', shape=[None], dtype='int64') + + usr_emb = fluid.embedding( + input=uid, + dtype='float32', + size=[USR_DICT_SIZE, 32], + param_attr='user_table', + is_sparse=IS_SPARSE) + + usr_fc = layers.fc(input=usr_emb, size=32) + + USR_GENDER_DICT_SIZE = 2 + + usr_gender_id = fluid.data(name='gender_id', shape=[None], dtype='int64') + + usr_gender_emb = fluid.embedding( + input=usr_gender_id, + size=[USR_GENDER_DICT_SIZE, 16], + param_attr='gender_table', + is_sparse=IS_SPARSE) + + usr_gender_fc = layers.fc(input=usr_gender_emb, size=16) + + USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table) + usr_age_id = fluid.data(name='age_id', shape=[None], dtype="int64") + + usr_age_emb = fluid.embedding( + input=usr_age_id, + size=[USR_AGE_DICT_SIZE, 16], + is_sparse=IS_SPARSE, + param_attr='age_table') + + usr_age_fc = layers.fc(input=usr_age_emb, size=16) + + USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1 + usr_job_id = fluid.data(name='job_id', shape=[None], dtype="int64") + + usr_job_emb = fluid.embedding( + input=usr_job_id, + size=[USR_JOB_DICT_SIZE, 16], + param_attr='job_table', + is_sparse=IS_SPARSE) + + usr_job_fc = layers.fc(input=usr_job_emb, size=16) + + concat_embed = layers.concat( + input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1) + + usr_combined_features = layers.fc(input=concat_embed, size=200, act="tanh") + + return usr_combined_features + + +def get_mov_combined_features(): + + MOV_DICT_SIZE = paddle.dataset.movielens.max_movie_id() + 1 + + mov_id = fluid.data(name='movie_id', shape=[None], dtype='int64') + + mov_emb = fluid.embedding( + input=mov_id, + dtype='float32', + size=[MOV_DICT_SIZE, 32], + param_attr='movie_table', + is_sparse=IS_SPARSE) + + mov_fc = layers.fc(input=mov_emb, size=32) + + CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories()) + + category_id = fluid.data( + name='category_id', shape=[None], dtype='int64', lod_level=1) + + mov_categories_emb = fluid.embedding( + input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE) + + mov_categories_hidden = layers.sequence_pool( + input=mov_categories_emb, pool_type="sum") + + MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict()) + + mov_title_id = fluid.data( + name='movie_title', shape=[None], dtype='int64', lod_level=1) + + mov_title_emb = fluid.embedding( + input=mov_title_id, + size=[MOV_TITLE_DICT_SIZE, 32], + is_sparse=IS_SPARSE) + + mov_title_conv = nets.sequence_conv_pool( + input=mov_title_emb, + num_filters=32, + filter_size=3, + act="tanh", + pool_type="sum") + + concat_embed = layers.concat( + input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1) + + mov_combined_features = layers.fc(input=concat_embed, size=200, act="tanh") + + return mov_combined_features + + +def inference_program(): + usr_combined_features = get_usr_combined_features() + mov_combined_features = get_mov_combined_features() + + inference = layers.cos_sim( + X=usr_combined_features, Y=mov_combined_features) + scale_infer = layers.scale(x=inference, scale=5.0) + + label = fluid.data(name='score', shape=[None, 1], dtype='float32') + square_cost = layers.square_error_cost(input=scale_infer, label=label) + avg_cost = layers.mean(square_cost) + + return scale_infer, avg_cost + + +def optimizer_func(): + return fluid.optimizer.SGD(learning_rate=0.2) + + +def train(use_cuda, params_dirname): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + if args.enable_ce: + train_reader = fluid.io.batch( + paddle.dataset.movielens.train(), batch_size=BATCH_SIZE) + test_reader = fluid.io.batch( + paddle.dataset.movielens.test(), batch_size=BATCH_SIZE) + else: + train_reader = fluid.io.batch( + fluid.io.shuffle(paddle.dataset.movielens.train(), buf_size=8192), + batch_size=BATCH_SIZE) + test_reader = fluid.io.batch( + paddle.dataset.movielens.test(), batch_size=BATCH_SIZE) + + feed_order = [ + 'user_id', 'gender_id', 'age_id', 'job_id', 'movie_id', 'category_id', + 'movie_title', 'score' + ] + + main_program = fluid.default_main_program() + star_program = fluid.default_startup_program() + if args.enable_ce: + main_program.random_seed = 90 + star_program.random_seed = 90 + + scale_infer, avg_cost = inference_program() + + test_program = main_program.clone(for_test=True) + sgd_optimizer = optimizer_func() + sgd_optimizer.minimize(avg_cost) + exe = fluid.Executor(place) + + def train_test(program, reader): + count = 0 + feed_var_list = [ + program.global_block().var(var_name) for var_name in feed_order + ] + feeder_test = fluid.DataFeeder(feed_list=feed_var_list, place=place) + test_exe = fluid.Executor(place) + accumulated = 0 + for test_data in reader(): + avg_cost_np = test_exe.run( + program=program, + feed=feeder_test.feed(test_data), + fetch_list=[avg_cost]) + accumulated += avg_cost_np[0] + count += 1 + return accumulated / count + + def train_loop(): + feed_list = [ + main_program.global_block().var(var_name) + for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list, place) + exe.run(star_program) + + for pass_id in range(PASS_NUM): + for batch_id, data in enumerate(train_reader()): + # train a mini-batch + outs = exe.run( + program=main_program, + feed=feeder.feed(data), + fetch_list=[avg_cost]) + out = np.array(outs[0]) + + # get test avg_cost + test_avg_cost = train_test(test_program, test_reader) + + # if test_avg_cost < 4.0: # Change this number to adjust accuracy + if batch_id == 20: + + if args.enable_ce: + print("kpis\ttest_cost\t%f" % float(test_avg_cost)) + + if params_dirname is not None: + fluid.io.save_inference_model(params_dirname, [ + "user_id", "gender_id", "age_id", "job_id", + "movie_id", "category_id", "movie_title" + ], [scale_infer], exe) + return + print('EpochID {0}, BatchID {1}, Test Loss {2:0.2}'.format( + pass_id + 1, batch_id + 1, float(test_avg_cost))) + + if math.isnan(float(out[0])): + sys.exit("got NaN loss, training failed.") + + train_loop() + + +def infer(use_cuda, params_dirname): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + # Use the first data from paddle.dataset.movielens.test() as input. + # Use create_lod_tensor(data, lod, place) API to generate LoD Tensor, + # where `data` is a list of sequences of index numbers, `lod` is + # the level of detail (lod) info associated with `data`. + # For example, data = [[10, 2, 3], [2, 3]] means that it contains + # two sequences of indexes, of length 3 and 2, respectively. + # Correspondingly, lod = [[3, 2]] contains one level of detail info, + # indicating that `data` consists of two sequences of length 3 and 2. + infer_movie_id = 783 + infer_movie_name = paddle.dataset.movielens.movie_info()[ + infer_movie_id].title + + exe = fluid.Executor(place) + + inference_scope = fluid.core.Scope() + + with fluid.scope_guard(inference_scope): + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inferencer, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(params_dirname, exe) + + # Use the first data from paddle.dataset.movielens.test() as input + assert feed_target_names[0] == "user_id" + # Use create_lod_tensor(data, recursive_sequence_lengths, place) API + # to generate LoD Tensor where `data` is a list of sequences of index + # numbers, `recursive_sequence_lengths` is the length-based level of detail + # (lod) info associated with `data`. + # For example, data = [[10, 2, 3], [2, 3]] means that it contains + # two sequences of indexes, of length 3 and 2, respectively. + # Correspondingly, recursive_sequence_lengths = [[3, 2]] contains one + # level of detail info, indicating that `data` consists of two sequences + # of length 3 and 2, respectively. + user_id = np.array([1]).astype("int64").reshape(-1) + + assert feed_target_names[1] == "gender_id" + gender_id = np.array([1]).astype("int64").reshape(-1) + + assert feed_target_names[2] == "age_id" + age_id = np.array([0]).astype("int64").reshape(-1) + + assert feed_target_names[3] == "job_id" + job_id = np.array([10]).astype("int64").reshape(-1) + + assert feed_target_names[4] == "movie_id" + movie_id = np.array([783]).astype("int64").reshape(-1) + + assert feed_target_names[5] == "category_id" + category_id = fluid.create_lod_tensor( + np.array([10, 8, 9], dtype='int64'), [[3]], place) + + assert feed_target_names[6] == "movie_title" + movie_title = fluid.create_lod_tensor( + np.array([1069, 4140, 2923, 710, 988], dtype='int64'), [[5]], + place) + + # Construct feed as a dictionary of {feed_target_name: feed_target_data} + # and results will contain a list of data corresponding to fetch_targets. + results = exe.run( + inferencer, + feed={ + feed_target_names[0]: user_id, + feed_target_names[1]: gender_id, + feed_target_names[2]: age_id, + feed_target_names[3]: job_id, + feed_target_names[4]: movie_id, + feed_target_names[5]: category_id, + feed_target_names[6]: movie_title + }, + fetch_list=fetch_targets, + return_numpy=False) + predict_rating = np.array(results[0]) + print("Predict Rating of user id 1 on movie \"" + infer_movie_name + + "\" is " + str(predict_rating[0][0])) + print("Actual Rating of user id 1 on movie \"" + infer_movie_name + + "\" is 4.") + + +def main(use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + params_dirname = "recommender_system.inference.model" + train(use_cuda=use_cuda, params_dirname=params_dirname) + infer(use_cuda=use_cuda, params_dirname=params_dirname) + + +if __name__ == '__main__': + args = parse_args() + PASS_NUM = args.num_epochs + use_cuda = args.use_gpu + main(use_cuda) diff --git a/doc/paddle/user_guides/simple_case/fit_a_line/.gitignore b/doc/paddle/user_guides/simple_case/fit_a_line/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..26af5d6d5ce9cfb1afd3534e574fa3612afde9b2 --- /dev/null +++ b/doc/paddle/user_guides/simple_case/fit_a_line/.gitignore @@ -0,0 +1,5 @@ +data/housing* +data/*.list +*.pyc +data/*.pyc +output diff --git a/doc/paddle/user_guides/simple_case/fit_a_line/.run_ce.sh b/doc/paddle/user_guides/simple_case/fit_a_line/.run_ce.sh new file mode 100644 index 0000000000000000000000000000000000000000..7e96905dd77f8a24f16c229fe5c522a9f5a8c8d5 --- /dev/null +++ b/doc/paddle/user_guides/simple_case/fit_a_line/.run_ce.sh @@ -0,0 +1,4 @@ +#!/bin/bash +#This file is only used for continuous evaluation. +python train.py --enable_ce | python _ce.py + diff --git a/doc/paddle/user_guides/simple_case/fit_a_line/README.cn.md b/doc/paddle/user_guides/simple_case/fit_a_line/README.cn.md new file mode 100644 index 0000000000000000000000000000000000000000..eb5b8c3446b97dae8fbe3e7879140216513a1120 --- /dev/null +++ b/doc/paddle/user_guides/simple_case/fit_a_line/README.cn.md @@ -0,0 +1,390 @@ +# 线性回归 +让我们从经典的线性回归(Linear Regression \[[1](#参考文献)\])模型开始这份教程。在这一章里,你将使用真实的数据集建立起一个房价预测模型,并且了解到机器学习中的若干重要概念。 + +本教程源代码目录在[book/fit_a_line](https://github.com/PaddlePaddle/book/tree/develop/01.fit_a_line), 初次使用请您参考[Book文档使用说明](https://github.com/PaddlePaddle/book/blob/develop/README.cn.md#运行这本书)。 + +### 说明: +1.硬件环境要求: +本文可支持在CPU、GPU下运行 +2. Docker镜像支持的CUDA/cuDNN版本: +如果使用了Docker运行Book,请注意:这里所提供的默认镜像的GPU环境为 CUDA 8/cuDNN 5,对于NVIDIA Tesla V100等要求CUDA 9的 GPU,使用该镜像可能会运行失败。 +3. 文档和脚本中代码的一致性问题: +请注意:为使本文更加易读易用,我们拆分、调整了train.py的代码并放入本文。本文中代码与train.py的运行结果一致,可直接运行[train.py](https://github.com/PaddlePaddle/book/blob/develop/01.fit_a_line/train.py)进行验证。 + +## 背景介绍 +给定一个大小为$n$的数据集 ${\{y_{i}, x_{i1}, ..., x_{id}\}}_{i=1}^{n}$,其中$x_{i1}, \ldots, x_{id}$是第$i$个样本$d$个属性上的取值,$y_i$是该样本待预测的目标。线性回归模型假设目标$y_i$可以被属性间的线性组合描述,即 + + +

+
+

+ +例如,在我们将要建模的房价预测问题里,$x_{ij}$是描述房子$i$的各种属性(比如房间的个数、周围学校和医院的个数、交通状况等),而 $y_i$是房屋的价格。 + +初看起来,这个假设实在过于简单了,变量间的真实关系很难是线性的。但由于线性回归模型有形式简单和易于建模分析的优点,它在实际问题中得到了大量的应用。很多经典的统计学习、机器学习书籍\[[2,3,4](#参考文献)\]也选择对线性模型独立成章重点讲解。 + +## 效果展示 +我们使用从[UCI Housing Data Set](http://paddlemodels.bj.bcebos.com/uci_housing/housing.data)获得的波士顿房价数据集进行模型的训练和预测。下面的散点图展示了使用模型对部分房屋价格进行的预测。其中,每个点的横坐标表示同一类房屋真实价格的中位数,纵坐标表示线性回归模型根据特征预测的结果,当二者值完全相等的时候就会落在虚线上。所以模型预测得越准确,则点离虚线越近。 +

+
+ 图1. 预测值 V.S. 真实值 +

+ +## 模型概览 + +### 模型定义 + +在波士顿房价数据集中,和房屋相关的值共有14个:前13个用来描述房屋相关的各种信息,即模型中的 $x_i$;最后一个值为我们要预测的该类房屋价格的中位数,即模型中的 $y_i$。因此,我们的模型就可以表示成: + +

+
+

+ +$\hat{Y}$ 表示模型的预测结果,用来和真实值$Y$区分。模型要学习的参数即:$\omega_1, \ldots, \omega_{13}, b$。 + +建立模型后,我们需要给模型一个优化目标,使得学到的参数能够让预测值$\hat{Y}$尽可能地接近真实值$Y$。这里我们引入损失函数([Loss Function](https://en.wikipedia.org/wiki/Loss_function),或Cost Function)这个概念。 输入任意一个数据样本的目标值$y_{i}$和模型给出的预测值$\hat{y_{i}}$,损失函数输出一个非负的实值。这个实值通常用来反映模型误差的大小。 + +对于线性回归模型来讲,最常见的损失函数就是均方误差(Mean Squared Error, [MSE](https://en.wikipedia.org/wiki/Mean_squared_error))了,它的形式是: + +

+
+

+ +即对于一个大小为$n$的测试集,$MSE$是$n$个数据预测结果误差平方的均值。 + +对损失函数进行优化所采用的方法一般为梯度下降法。梯度下降法是一种一阶最优化算法。如果$f(x)$在点$x_n$有定义且可微,则认为$f(x)$在点$x_n$沿着梯度的负方向$-▽f(x_n)$下降的是最快的。反复调节$x$,使得$f(x)$接近最小值或者极小值,调节的方式为: + +

+
+

+ +其中λ代表学习率。这种调节的方法称为梯度下降法。 + +### 训练过程 + +定义好模型结构之后,我们要通过以下几个步骤进行模型训练 + 1. 初始化参数,其中包括权重$\omega_i$和偏置$b$,对其进行初始化(如0均值,1方差)。 + 2. 网络正向传播计算网络输出和损失函数。 + 3. 根据损失函数进行反向误差传播 ([backpropagation](https://en.wikipedia.org/wiki/Backpropagation)),将网络误差从输出层依次向前传递, 并更新网络中的参数。 + 4. 重复2~3步骤,直至网络训练误差达到规定的程度或训练轮次达到设定值。 + +## 数据集 + +### 数据集介绍 +这份数据集共506行,每行包含了波士顿郊区的一类房屋的相关信息及该类房屋价格的中位数。其各维属性的意义如下: + +| 属性名 | 解释 | 类型 | +| ------| ------ | ------ | +| CRIM | 该镇的人均犯罪率 | 连续值 | +| ZN | 占地面积超过25,000平方呎的住宅用地比例 | 连续值 | +| INDUS | 非零售商业用地比例 | 连续值 | +| CHAS | 是否邻近 Charles River | 离散值,1=邻近;0=不邻近 | +| NOX | 一氧化氮浓度 | 连续值 | +| RM | 每栋房屋的平均客房数 | 连续值 | +| AGE | 1940年之前建成的自用单位比例 | 连续值 | +| DIS | 到波士顿5个就业中心的加权距离 | 连续值 | +| RAD | 到径向公路的可达性指数 | 连续值 | +| TAX | 全值财产税率 | 连续值 | +| PTRATIO | 学生与教师的比例 | 连续值 | +| B | 1000(BK - 0.63)^2,其中BK为黑人占比 | 连续值 | +| LSTAT | 低收入人群占比 | 连续值 | +| MEDV | 同类房屋价格的中位数 | 连续值 | + +### 数据预处理 +#### 连续值与离散值 +观察一下数据,我们的第一个发现是:所有的13维属性中,有12维的连续值和1维的离散值(CHAS)。离散值虽然也常使用类似0、1、2这样的数字表示,但是其含义与连续值是不同的,因为这里的差值没有实际意义。例如,我们用0、1、2来分别表示红色、绿色和蓝色的话,我们并不能因此说“蓝色和红色”比“绿色和红色”的距离更远。所以通常对一个有$d$个可能取值的离散属性,我们会将它们转为$d$个取值为0或1的二值属性或者将每个可能取值映射为一个多维向量。不过就这里而言,因为CHAS本身就是一个二值属性,就省去了这个麻烦。 + +#### 属性的归一化 +另外一个稍加观察即可发现的事实是,各维属性的取值范围差别很大(如图2所示)。例如,属性B的取值范围是[0.32, 396.90],而属性NOX的取值范围是[0.3850, 0.8170]。这里就要用到一个常见的操作-归一化(normalization)了。归一化的目标是把各位属性的取值范围放缩到差不多的区间,例如[-0.5,0.5]。这里我们使用一种很常见的操作方法:减掉均值,然后除以原取值范围。 + +做归一化(或 [Feature scaling](https://en.wikipedia.org/wiki/Feature_scaling))至少有以下3个理由: + +- 过大或过小的数值范围会导致计算时的浮点上溢或下溢。 + +- 不同的数值范围会导致不同属性对模型的重要性不同(至少在训练的初始阶段如此),而这个隐含的假设常常是不合理的。这会对优化的过程造成困难,使训练时间大大的加长。 + +- 很多的机器学习技巧/模型(例如L1,L2正则项,向量空间模型-Vector Space Model)都基于这样的假设:所有的属性取值都差不多是以0为均值且取值范围相近的。 + +

+
+ 图2. 各维属性的取值范围 +

+ +#### 整理训练集与测试集 +我们将数据集分割为两份:一份用于调整模型的参数,即进行模型的训练,模型在这份数据集上的误差被称为**训练误差**;另外一份被用来测试,模型在这份数据集上的误差被称为**测试误差**。我们训练模型的目的是为了通过从训练数据中找到规律来预测未知的新数据,所以测试误差是更能反映模型表现的指标。分割数据的比例要考虑到两个因素:更多的训练数据会降低参数估计的方差,从而得到更可信的模型;而更多的测试数据会降低测试误差的方差,从而得到更可信的测试误差。我们这个例子中设置的分割比例为$8:2$ + + +在更复杂的模型训练过程中,我们往往还会多使用一种数据集:验证集。因为复杂的模型中常常还有一些超参数([Hyperparameter](https://en.wikipedia.org/wiki/Hyperparameter_optimization))需要调节,所以我们会尝试多种超参数的组合来分别训练多个模型,然后对比它们在验证集上的表现选择相对最好的一组超参数,最后才使用这组参数下训练的模型在测试集上评估测试误差。由于本章训练的模型比较简单,我们暂且忽略掉这个过程。 + +## 训练 + +`fit_a_line/train.py`演示了训练的整体过程。 + +### 配置数据提供器(Datafeeder) +首先我们引入必要的库: +```python +from __future__ import print_function +import paddle +import paddle.fluid as fluid +import numpy +import math +import sys +``` + +我们通过uci_housing模块引入了数据集合[UCI Housing Data Set](http://paddlemodels.bj.bcebos.com/uci_housing/housing.data) + +其中,在uci_housing模块中封装了: + +1. 数据下载的过程。下载数据保存在~/.cache/paddle/dataset/uci_housing/housing.data。 +2. 数据预处理的过程。 + +接下来我们定义了用于训练的数据提供器。提供器每次读入一个大小为`BATCH_SIZE`的数据批次。如果用户希望加一些随机性,它可以同时定义一个批次大小和一个缓存大小。这样的话,每次数据提供器会从缓存中随机读取批次大小那么多的数据。 + +```python +BATCH_SIZE = 20 + +train_reader = fluid.io.batch( + fluid.io.shuffle( + paddle.dataset.uci_housing.train(), buf_size=500), + batch_size=BATCH_SIZE) + +test_reader = fluid.io.batch( + fluid.io.shuffle( + paddle.dataset.uci_housing.test(), buf_size=500), + batch_size=BATCH_SIZE) +``` + +如果想直接从txt文件中读取数据的话,可以参考以下方式(需要自行准备txt文件)。 +```text +feature_names = [ + 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', + 'PTRATIO', 'B', 'LSTAT', 'convert' +] +feature_num = len(feature_names) +data = numpy.fromfile(filename, sep=' ') # 从文件中读取原始数据 +data = data.reshape(data.shape[0] // feature_num, feature_num) +maximums, minimums, avgs = data.max(axis=0), data.min(axis=0), data.sum(axis=0)/data.shape[0] + +for i in six.moves.range(feature_num-1): + data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i]) # six.moves可以兼容python2和python3 + +ratio = 0.8 # 训练集和验证集的划分比例 +offset = int(data.shape[0]*ratio) +train_data = data[:offset] +test_data = data[offset:] + +def reader_creator(train_data): + def reader(): + for d in train_data: + yield d[:-1], d[-1:] + return reader + +train_reader = fluid.io.batch( + fluid.io.shuffle( + reader_creator(train_data), buf_size=500), + batch_size=BATCH_SIZE) + +test_reader = fluid.io.batch( + fluid.io.shuffle( + reader_creator(test_data), buf_size=500), + batch_size=BATCH_SIZE) +``` + +### 配置训练程序 +训练程序的目的是定义一个训练模型的网络结构。对于线性回归来讲,它就是一个从输入到输出的简单的全连接层。更加复杂的结果,比如卷积神经网络,递归神经网络等会在随后的章节中介绍。训练程序必须返回`平均损失`作为第一个返回值,因为它会被后面反向传播算法所用到。 + +```python +x = fluid.data(name='x', shape=[None, 13], dtype='float32') # 定义输入的形状和数据类型 +y = fluid.data(name='y', shape=[None, 1], dtype='float32') # 定义输出的形状和数据类型 +y_predict = fluid.layers.fc(input=x, size=1, act=None) # 连接输入和输出的全连接层 + +main_program = fluid.default_main_program() # 获取默认/全局主函数 +startup_program = fluid.default_startup_program() # 获取默认/全局启动程序 + +cost = fluid.layers.square_error_cost(input=y_predict, label=y) # 利用标签数据和输出的预测数据估计方差 +avg_loss = fluid.layers.mean(cost) # 对方差求均值,得到平均损失 +``` +详细资料请参考: +[fluid.default_main_program](http://www.paddlepaddle.org/documentation/docs/zh/develop/api_cn/fluid_cn.html#default-main-program) +[fluid.default_startup_program](http://www.paddlepaddle.org/documentation/docs/zh/develop/api_cn/fluid_cn.html#default-startup-program) + +### Optimizer Function 配置 + +在下面的 `SGD optimizer`,`learning_rate` 是学习率,与网络的训练收敛速度有关系。 + +```python +#克隆main_program得到test_program +#有些operator在训练和测试之间的操作是不同的,例如batch_norm,使用参数for_test来区分该程序是用来训练还是用来测试 +#该api不会删除任何操作符,请在backward和optimization之前使用 +test_program = main_program.clone(for_test=True) + +sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) +sgd_optimizer.minimize(avg_loss) + +``` + +### 定义运算场所 +我们可以定义运算是发生在CPU还是GPU + +```python +use_cuda = False +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() # 指明executor的执行场所 + +###executor可以接受传入的program,并根据feed map(输入映射表)和fetch list(结果获取表)向program中添加数据输入算子和结果获取算子。使用close()关闭该executor,调用run(...)执行program。 +exe = fluid.Executor(place) + +``` +详细资料请参考: +[fluid.executor](http://www.paddlepaddle.org/documentation/docs/zh/develop/api_cn/fluid_cn.html#permalink-15-executor) + +### 创建训练过程 +训练需要有一个训练程序和一些必要参数,并构建了一个获取训练过程中测试误差的函数。必要参数有executor,program,reader,feeder,fetch_list,executor表示之前创建的执行器,program表示执行器所执行的program,是之前创建的program,如果该项参数没有给定的话则默认使用default_main_program,reader表示读取到的数据,feeder表示前向输入的变量,fetch_list表示用户想得到的变量或者命名的结果。 + +```python +num_epochs = 100 + +def train_test(executor, program, reader, feeder, fetch_list): + accumulated = 1 * [0] + count = 0 + for data_test in reader(): + outs = executor.run(program=program, + feed=feeder.feed(data_test), + fetch_list=fetch_list) + accumulated = [x_c[0] + x_c[1][0] for x_c in zip(accumulated, outs)] # 累加测试过程中的损失值 + count += 1 # 累加测试集中的样本数量 + return [x_d / count for x_d in accumulated] # 计算平均损失 + +``` + +### 训练主循环 + +给出需要存储的目录名,并初始化一个执行器。 + +```python +%matplotlib inline +params_dirname = "fit_a_line.inference.model" +feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) +exe.run(startup_program) +train_prompt = "train cost" +test_prompt = "test cost" +from paddle.utils.plot import Ploter +plot_prompt = Ploter(train_prompt, test_prompt) +step = 0 + +exe_test = fluid.Executor(place) +``` + +paddlepaddle提供了reader机制来读取训练数据。reader会一次提供多列数据,因此我们需要一个python的列表来定义读取顺序。我们构建一个循环来进行训练,直到训练结果足够好或者循环次数足够多。 +如果训练迭代次数满足参数保存的迭代次数,可以把训练参数保存到`params_dirname`。 +设置训练主循环 +```python +for pass_id in range(num_epochs): + for data_train in train_reader(): + avg_loss_value, = exe.run(main_program, + feed=feeder.feed(data_train), + fetch_list=[avg_loss]) + if step % 10 == 0: # 每10个批次记录并输出一下训练损失 + plot_prompt.append(train_prompt, step, avg_loss_value[0]) + plot_prompt.plot() + print("%s, Step %d, Cost %f" % + (train_prompt, step, avg_loss_value[0])) + if step % 100 == 0: # 每100批次记录并输出一下测试损失 + test_metics = train_test(executor=exe_test, + program=test_program, + reader=test_reader, + fetch_list=[avg_loss.name], + feeder=feeder) + plot_prompt.append(test_prompt, step, test_metics[0]) + plot_prompt.plot() + print("%s, Step %d, Cost %f" % + (test_prompt, step, test_metics[0])) + if test_metics[0] < 10.0: # 如果准确率达到要求,则停止训练 + break + + step += 1 + + if math.isnan(float(avg_loss_value[0])): + sys.exit("got NaN loss, training failed.") + + #保存训练参数到之前给定的路径中 + if params_dirname is not None: + fluid.io.save_inference_model(params_dirname, ['x'], [y_predict], exe) +``` + +## 预测 +需要构建一个使用训练好的参数来进行预测的程序,训练好的参数位置在`params_dirname`。 + +### 准备预测环境 +类似于训练过程,预测器需要一个预测程序来做预测。我们可以稍加修改我们的训练程序来把预测值包含进来。 + +```python +infer_exe = fluid.Executor(place) +inference_scope = fluid.core.Scope() +``` + +### 预测 + +保存图片 +```python +def save_result(points1, points2): + import matplotlib + matplotlib.use('Agg') + import matplotlib.pyplot as plt + x1 = [idx for idx in range(len(points1))] + y1 = points1 + y2 = points2 + l1 = plt.plot(x1, y1, 'r--', label='predictions') + l2 = plt.plot(x1, y2, 'g--', label='GT') + plt.plot(x1, y1, 'ro-', x1, y2, 'g+-') + plt.title('predictions VS GT') + plt.legend() + plt.savefig('./image/prediction_gt.png') +``` + +通过fluid.io.load_inference_model,预测器会从`params_dirname`中读取已经训练好的模型,来对从未遇见过的数据进行预测。 + +```python +with fluid.scope_guard(inference_scope): + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(params_dirname, infer_exe) # 载入预训练模型 + batch_size = 10 + + infer_reader = fluid.io.batch( + paddle.dataset.uci_housing.test(), batch_size=batch_size) # 准备测试集 + + infer_data = next(infer_reader()) + infer_feat = numpy.array( + [data[0] for data in infer_data]).astype("float32") # 提取测试集中的数据 + infer_label = numpy.array( + [data[1] for data in infer_data]).astype("float32") # 提取测试集中的标签 + + assert feed_target_names[0] == 'x' + results = infer_exe.run(inference_program, + feed={feed_target_names[0]: numpy.array(infer_feat)}, + fetch_list=fetch_targets) # 进行预测 + #打印预测结果和标签并可视化结果 + print("infer results: (House Price)") + for idx, val in enumerate(results[0]): + print("%d: %.2f" % (idx, val)) # 打印预测结果 + + print("\nground truth:") + for idx, val in enumerate(infer_label): + print("%d: %.2f" % (idx, val)) # 打印标签值 + + save_result(results[0], infer_label) # 保存图片 +``` +由于每次都是随机选择一个minibatch的数据作为当前迭代的训练数据,所以每次得到的预测结果会有所不同。 + + +## 总结 +在这章里,我们借助波士顿房价这一数据集,介绍了线性回归模型的基本概念,以及如何使用PaddlePaddle实现训练和测试的过程。很多的模型和技巧都是从简单的线性回归模型演化而来,因此弄清楚线性模型的原理和局限非常重要。 + + +## 参考文献 +1. https://en.wikipedia.org/wiki/Linear_regression +2. Friedman J, Hastie T, Tibshirani R. The elements of statistical learning[M]. Springer, Berlin: Springer series in statistics, 2001. +3. Murphy K P. Machine learning: a probabilistic perspective[M]. MIT press, 2012. +4. Bishop C M. Pattern recognition[J]. Machine Learning, 2006, 128. + +
+知识共享许可协议
本教程PaddlePaddle 创作,采用 知识共享 署名-相同方式共享 4.0 国际 许可协议进行许可。 diff --git a/doc/paddle/user_guides/simple_case/fit_a_line/README.md b/doc/paddle/user_guides/simple_case/fit_a_line/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2bc2b24c4d8fa3a4bff0ff29d8fb02f4437e8e95 --- /dev/null +++ b/doc/paddle/user_guides/simple_case/fit_a_line/README.md @@ -0,0 +1,392 @@ +# Linear Regression + +Let's start this tutorial from the classic Linear Regression ([[1](#References)]) model. + +In this chapter, you will build a model to predict house price with real datasets and learn about several important concepts about machine learning. + +The source code of this tutorial is in [book/fit_a_line](https://github.com/PaddlePaddle/book/tree/develop/01.fit_a_line). For the new users, please refer to [Running This Book](https://github.com/PaddlePaddle/book/blob/develop/README.md#running-the-book) . + + + +## Background +Given a $n$ dataset ${\{y_{i}, x_{i1}, ..., x_{id}\}}_{i=1}^{n}$, of which $ x_{i1}, \ldots, x_{id}$ are the values of the $d$th attribute of $i$ sample, and $y_i$ is the target to be predicted for this sample. + + The linear regression model assumes that the target $y_i$ can be described by a linear combination among attributes, i.e. + +$$y_i = \omega_1x_{i1} + \omega_2x_{i2} + \ldots + \omega_dx_{id} + b, i=1,\ldots,n$$ + +For example, in the problem of prediction of house price we are going to explore, $x_{ij}$ is a description of the various attributes of the house $i$ (such as the number of rooms, the number of schools and hospitals around, traffic conditions, etc.). $y_i$ is the price of the house. + + + +At first glance, this assumption is too simple, and the true relationship among variables is unlikely to be linear. However, because the linear regression model has the advantages of simple form and easy to be modeled and analyzed, it has been widely applied in practical problems. Many classic statistical learning and machine learning books \[[2,3,4](#references)\] also focus on linear model in a chapter. + +## Result Demo +We used the Boston house price dataset obtained from [UCI Housing dataset](http://paddlemodels.bj.bcebos.com/uci_housing/housing.data) to train and predict the model. The scatter plot below shows the result of price prediction for parts of house with model. Each point on x-axis represents the median of the real price of the same type of house, and the y-axis represents the result of the linear regression model based on the feature prediction. When the two values are completely equal, they will fall on the dotted line. So the more accurate the model is predicted, the closer the point is to the dotted line. +

+
+ Figure One. Predict value V.S Ground-truth value +

+ +## Model Overview + +### Model Definition + +In the dataset of Boston house price, there are 14 values associated with the home: the first 13 are used to describe various information of house, that is $x_i$ in the model; the last value is the medium price of the house we want to predict, which is $y_i$ in the model. + +Therefore, our model can be expressed as: + +$$\hat{Y} = \omega_1X_{1} + \omega_2X_{2} + \ldots + \omega_{13}X_{13} + b$$ + +$\hat{Y}$ represents the predicted result of the model and is used to distinguish it from the real value $Y$. The parameters to be learned by the model are: $\omega_1, \ldots, \omega_{13}, b$. + +After building the model, we need to give the model an optimization goal so that the learned parameters can make the predicted value $\hat{Y}$ get as close to the true value $Y$. Here we introduce the concept of loss function ([Loss Function](https://en.wikipedia.org/wiki/Loss_function), or Cost Function. Input the target value $y_{i}$ of any data sample and the predicted value $\hat{y_{i}}$ given by a model. Then the loss function outputs a non-negative real number, which is usually used to represent model error. + +For linear regression models, the most common loss function is the Mean Squared Error ([MSE](https://en.wikipedia.org/wiki/Mean_squared_error)), which is: + +$$MSE=\frac{1}{n}\sum_{i=1}^{n}{(\hat{Y_i}-Y_i)}^2$$ + +That is, for a test set in size of $n$, $MSE$ is the mean of the squared error of the $n$ data prediction results. + +The method used to optimize the loss function is generally the gradient descent method. The gradient descent method is a first-order optimization algorithm. If $f(x)$ is defined and divisible at point $x_n$, then $f(x)$ is considered to be the fastest in the negative direction of the gradient $-▽f(x_n)$ at point of $x_n$. Adjust $x$ repeatedly to make $f(x)$ close to the local or global minimum value. The adjustment is as follows: + +$$x_n+1=x_n-λ▽f(x), n≧0$$ + +Where λ represents the learning rate. This method of adjustment is called the gradient descent method. + +### Training Process + +After defining the model structure, we will train the model through the following steps. + + 1. Initialize parameters, including weights $\omega_i$ and bias $b$, to initialize them (eg. 0 as mean, 1 as variance). + 2. Forward propagation of network calculates network output and loss functions. +  3. Reverse error propagation according to the loss function ( [backpropagation](https://en.wikipedia.org/wiki/Backpropagation) ), passing forward the network error from the output layer and updating the parameters in the network. +  4. Repeat steps 2~3 until the network training error reaches the specified level or the training round reaches the set value. + + +## Dataset + +### Dataset Introduction +The dataset consists of 506 lines, each containing information about a type of houses in a suburb of Boston and the median price of that type of house. The meaning of each dimensional attribute is as follows: + +| Property Name | Explanation | Type | +| ------| ------ | ------ | +CRIM | Per capita crime rate in the town | Continuous value | +| ZN | Proportion of residential land with an area of over 25,000 square feet | Continuous value | +| INDUS | Proportion of non-retail commercial land | Continuous value | +CHAS | Whether it is adjacent to Charles River | Discrete value, 1=proximity; 0=not adjacent | +NOX | Nitric Oxide Concentration | Continuous value | +| RM | Average number of rooms per house | Continuous value | +| AGE | Proportion of self-use units built before 1940 | Continuous value | +| DIS | Weighted Distance to 5 Job Centers in Boston | Continuous value | +| RAD | Accessibility Index to Radial Highway | Continuous value | +| TAX | Tax Rate of Full-value Property | Continuous value | +| PTRATIO | Proportion of Student and Teacher | Continuous value | +| B | 1000(BK - 0.63)^2, where BK is black ratio | Continuous value | +LSTAT | Low-income population ratio | Continuous value | +| MEDV | Median price of a similar home | Continuous value | + +### Data Pre-processing + +#### Continuous value and discrete value +Analyzing the data, first we find that all 13-dimensional attributes exist 12-dimensional continuous value and 1-dimensional discrete values (CHAS). Discrete value is often represented by numbers like 0, 1, and 2, but its meaning is different from continuous value's because the difference of discrete value here has no meaning. For example, if we use 0, 1, and 2 to represent red, green, and blue, we cannot infer that the distance between blue and red is longer than that between green and red. So usually for a discrete property with $d$ possible values, we will convert them to $d$ binary properties with a value of 0 or 1 or map each possible value to a multidimensional vector. However, there is no this problem for CHAS, since CHAS itself is a binary attribute . + +#### Normalization of attributes +Another fact that can be easily found is that the range of values of each dimensional attribute is largely different (as shown in Figure 2). For example, the value range of attribute B is [0.32, 396.90], and the value range of attribute NOX is [0.3850, 0.8170]. Here is a common operation - normalization. The goal of normalization is to scale the value of each attribute to a similar range, such as [-0.5, 0.5]. Here we use a very common operation method: subtract the mean and divide by the range of values. + +There are at least three reasons for implementing normalization (or [Feature scaling](https://en.wikipedia.org/wiki/Feature_scaling)): + +- A range of values that are too large or too small can cause floating value overflow or underflow during calculation. + +- Different ranges of number result in different attributes being different for the model (at least in the initial period of training), and this implicit assumption is often unreasonable. This can make the optimization process difficult and the training time greatly longer. + +- Many machine learning techniques/models (such as L1, L2 regular items, Vector Space Model) are based on the assumption that all attribute values are almost zero and their ranges of value are similar. + + + +

+
+ Figure 2. Value range of attributes for all dimensions +

+ +#### Organizing training set and testing set + +We split the dataset into two parts: one is used to adjust the parameters of the model, that is, to train the model, the error of the model on this dataset is called ** training error **; the other is used to test.The error of the model on this dataset is called the ** test error**. The goal of our training model is to predict unknown new data by finding the regulation from the training data, so the test error is an better indicator for the performance of the model. When it comes to the ratio of the segmentation data, we should take into account two factors: more training data will reduce the square error of estimated parameters, resulting in a more reliable model; and more test data will reduce the square error of the test error, resulting in more credible test error. The split ratio set in our example is $8:2$ + + +In a more complex model training process, we often need more than one dataset: the validation set. Because complex models often have some hyperparameters ([Hyperparameter](https://en.wikipedia.org/wiki/Hyperparameter_optimization)) that need to be adjusted, we will try a combination of multiple hyperparameters to train multiple models separately and then compare their performance on the validation set to select the relatively best set of hyperparameters, and finally use the model with this set of parameters to evaluate the test error on the test set. Since the model trained in this chapter is relatively simple, we won't talk about this process at present. + +## Training + +`fit_a_line/train.py` demonstrates the overall process of training. + +### Configuring the Data feeder + +First we import the libraries: + +```python +from __future__ import print_function +import paddle +import paddle.fluid as fluid +import numpy +import math +import sys +``` + +We introduced the dataset [UCI Housing dataset](http://paddlemodels.bj.bcebos.com/uci_housing/housing.data) via the uci_housing module + +It is encapsulated in the uci_housing module: + +1. The process of data download. The download data is saved in ~/.cache/paddle/dataset/uci_housing/housing.data. +2. The process of [data preprocessing](#data preprocessing). + +Next we define the data feeder for training. The data feeder reads a batch of data in the size of `BATCH_SIZE` each time. If the user wants the data to be random, it can define data in size of a batch and a cache. In this case, each time the data feeder randomly reads as same data as the batch size from the cache. + +```python +BATCH_SIZE = 20 + +train_reader = fluid.io.batch( + fluid.io.shuffle( + paddle.dataset.uci_housing.train(), buf_size=500), + batch_size=BATCH_SIZE) + +test_reader = fluid.io.batch( + fluid.io.shuffle( + paddle.dataset.uci_housing.test(), buf_size=500), + batch_size=BATCH_SIZE) +``` + +If you want to read data directly from \*.txt file, you can refer to the method as follows(need to prepare txt file by yourself). +```text +feature_names = [ + 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', + 'PTRATIO', 'B', 'LSTAT', 'convert' +] +feature_num = len(feature_names) +data = numpy.fromfile(filename, sep=' ') # Read primary data from file +data = data.reshape(data.shape[0] // feature_num, feature_num) +maximums, minimums, avgs = data.max(axis=0), data.min(axis=0), data.sum(axis=0)/data.shape[0] + +for i in six.moves.range(feature_num-1): + data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i]) # six.moves is compatible to python2 and python3 + +ratio = 0.8 # distribution ratio of train dataset and verification dataset +offset = int(data.shape[0]\*ratio) +train_data = data[:offset] +test_data = data[offset:] + +def reader_creator(train_data): + def reader(): + for d in train_data: + yield d[:-1], d[-1:] + return reader + +train_reader = fluid.io.batch( + fluid.io.shuffle( + reader_creator(train_data), buf_size=500), + batch_size=BATCH_SIZE) + +test_reader = fluid.io.batch( + fluid.io.shuffle( + reader_creator(test_data), buf_size=500), + batch_size=BATCH_SIZE) +``` + +### Configure Program for Training +The aim of the program for training is to define a network structure of a training model. For linear regression, it is a simple fully connected layer from input to output. More complex result, such as Convolutional Neural Network and Recurrent Neural Network, will be introduced in later chapters. It must return `mean error` as the first return value in program for training, for that `mean error` will be used for BackPropagation. + +```python +x = fluid.data(name='x', shape=[None, 13], dtype='float32') # define shape and data type of input +y = fluid.data(name='y', shape=[None, 1], dtype='float32') # define shape and data type of output +y_predict = fluid.layers.fc(input=x, size=1, act=None) # fully connected layer connecting input and output + +main_program = fluid.default_main_program() # get default/global main function +startup_program = fluid.default_startup_program() # get default/global launch program + +cost = fluid.layers.square_error_cost(input=y_predict, label=y) # use label and output predicted data to estimate square error +avg_loss = fluid.layers.mean(cost) # compute mean value for square error and get mean loss +``` +For details, please refer to: +[fluid.default_main_program](http://www.paddlepaddle.org/documentation/docs/zh/develop/api_cn/fluid_cn.html#default-main-program) +[fluid.default_startup_program](http://www.paddlepaddle.org/documentation/docs/zh/develop/api_cn/fluid_cn.html#default-startup-program) + +### Optimizer Function Configuration + +`SGD optimizer`, `learning_rate` below are learning rate, which is related to rate of convergence for train of network. + +```python +#Clone main_program to get test_program +# operations of some operators are different between train and test. For example, batch_norm use parameter for_test to determine whether the program is for training or for testing. +#The api will not delete any operator, please apply it before backward and optimization. +test_program = main_program.clone(for_test=True) + +sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) +sgd_optimizer.minimize(avg_loss) + +``` + +### Define Training Place + +We can define whether an operation runs on the CPU or on the GPU. + +```python +use_cuda = False +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() # define the execution space of executor + +###executor can accept input program and add data input operator and result fetch operator based on feed map and fetch list. Use close() to close executor and call run(...) to run the program. +exe = fluid.Executor(place) + +``` +For details, please refer to: +[fluid.executor](http://www.paddlepaddle.org/documentation/docs/zh/develop/api_cn/fluid_cn.html#permalink-15-executor) + +### Create Training Process +To train, it needs a train program and some parameters and creates a function to get test error in the process of train necessary parameters contain executor, program, reader, feeder, fetch_list, executor represents executor created before. Program created before represents program executed by executor. If the parameter is undefined, then it is defined default_main_program by default. Reader represents data read. Feeder represents forward input variable and fetch_list represents variable user wants to get or name. + +```python +num_epochs = 100 + +def train_test(executor, program, reader, feeder, fetch_list): + accumulated = 1 * [0] + count = 0 + for data_test in reader(): + outs = executor.run(program=program, + feed=feeder.feed(data_test), + fetch_list=fetch_list) + accumulated = [x_c[0] + x_c[1][0] for x_c in zip(accumulated, outs)] # accumulate loss value in the process of test + count += 1 # accumulate samples in test dataset + return [x_d / count for x_d in accumulated] # compute mean loss + +``` + +### Train Main Loop + +give name of directory to be stored and initialize an executor + +```python +%matplotlib inline +params_dirname = "fit_a_line.inference.model" +feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) +exe.run(startup_program) +train_prompt = "train cost" +test_prompt = "test cost" +from paddle.utils.plot import Ploter +plot_prompt = Ploter(train_prompt, test_prompt) +step = 0 + +exe_test = fluid.Executor(place) +``` +Paddlepaddle provides reader mechanism to read training data. Reader provide multiple columns of data at one time. Therefore, we need a python list to read sequence. We create a loop to train until the result of train is good enough or time of loop is enough. +If the number of iterations for train is equal to the number of iterations for saving parameters, you can save train parameter into `params_dirname`. +Set main loop for training. +```python +for pass_id in range(num_epochs): + for data_train in train_reader(): + avg_loss_value, = exe.run(main_program, + feed=feeder.feed(data_train), + fetch_list=[avg_loss]) + if step % 10 == 0: # record and output train loss for every 10 batches. + plot_prompt.append(train_prompt, step, avg_loss_value[0]) + plot_prompt.plot() + print("%s, Step %d, Cost %f" % + (train_prompt, step, avg_loss_value[0])) + if step % 100 == 0: # record and output test loss for every 100 batches. + test_metics = train_test(executor=exe_test, + program=test_program, + reader=test_reader, + fetch_list=[avg_loss.name], + feeder=feeder) + plot_prompt.append(test_prompt, step, test_metics[0]) + plot_prompt.plot() + print("%s, Step %d, Cost %f" % + (test_prompt, step, test_metics[0])) + if test_metics[0] < 10.0: # If the accuracy is up to the requirement, the train can be stopped. + break + + step += 1 + + if math.isnan(float(avg_loss_value[0])): + sys.exit("got NaN loss, training failed.") + + #save train parameters into the path given before + if params_dirname is not None: + fluid.io.save_inference_model(params_dirname, ['x'], [y_predict], exe) +``` + +## Predict +It needs to create trained parameters to run program for prediction. The trained parameters is in `params_dirname`. + +### Prepare Environment for Prediction +Similar to the process of training, predictor needs a program for prediction. We can slightly modify our training program to include the prediction value. + +```python +infer_exe = fluid.Executor(place) +inference_scope = fluid.core.Scope() +``` + +### Predict + +Save pictures +```python +def save_result(points1, points2): + import matplotlib + matplotlib.use('Agg') + import matplotlib.pyplot as plt + x1 = [idx for idx in range(len(points1))] + y1 = points1 + y2 = points2 + l1 = plt.plot(x1, y1, 'r--', label='predictions') + l2 = plt.plot(x1, y2, 'g--', label='GT') + plt.plot(x1, y1, 'ro-', x1, y2, 'g+-') + plt.title('predictions VS GT') + plt.legend() + plt.savefig('./image/prediction_gt.png') +``` + +Via fluid.io.load_inference_model, predictor will read well-trained model from `params_dirname` to predict unknown data. + +```python +with fluid.scope_guard(inference_scope): + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(params_dirname, infer_exe) # load pre-predict model + batch_size = 10 + + infer_reader = fluid.io.batch( + paddle.dataset.uci_housing.test(), batch_size=batch_size) # prepare test dataset + + infer_data = next(infer_reader()) + infer_feat = numpy.array( + [data[0] for data in infer_data]).astype("float32") # extract data in test dataset + infer_label = numpy.array( + [data[1] for data in infer_data]).astype("float32") # extract label in test dataset + + assert feed_target_names[0] == 'x' + results = infer_exe.run(inference_program, + feed={feed_target_names[0]: numpy.array(infer_feat)}, + fetch_list=fetch_targets) # predict + #print predict result and label and visualize the result + print("infer results: (House Price)") + for idx, val in enumerate(results[0]): + print("%d: %.2f" % (idx, val)) # print predict result + + print("\nground truth:") + for idx, val in enumerate(infer_label): + print("%d: %.2f" % (idx, val)) # print label + + save_result(results[0], infer_label) # save picture +``` + + + +## Summary +In this chapter, we analyzed dataset of Boston House Price to introduce the basic concepts of linear regression model and how to use PaddlePaddle to implement training and testing. A number of models and theories are derived from linear regression model. Therefore, it is not unnecessary to figure out the principle and limitation of linear regression model. + + +## References +1. https://en.wikipedia.org/wiki/Linear_regression +2. Friedman J, Hastie T, Tibshirani R. The elements of statistical learning[M]. Springer, Berlin: Springer series in statistics, 2001. +3. Murphy K P. Machine learning: a probabilistic perspective[M]. MIT press, 2012. +4. Bishop C M. Pattern recognition[J]. Machine Learning, 2006, 128. + +
+知识共享许可协议
This tutorial is contributed by PaddlePaddle, and licensed under a Creative Commons Attribution-ShareAlike 4.0 International License. diff --git a/doc/paddle/user_guides/simple_case/fit_a_line/_ce.py b/doc/paddle/user_guides/simple_case/fit_a_line/_ce.py new file mode 100644 index 0000000000000000000000000000000000000000..6670b4addf7fb1a0a36f1e181c78f44f4b317a54 --- /dev/null +++ b/doc/paddle/user_guides/simple_case/fit_a_line/_ce.py @@ -0,0 +1,40 @@ +### This file is only used for continuous evaluation test! +from __future__ import print_function +from __future__ import division +from __future__ import absolute_import +import os +import sys +sys.path.append(os.environ['ceroot']) +from kpi import CostKpi + +train_cost_kpi = CostKpi( + 'train_cost', 0.02, 0, actived=True, desc='train cost') +test_cost_kpi = CostKpi('test_cost', 0.02, 0, actived=True, desc='test cost') +tracking_kpis = [train_cost_kpi, test_cost_kpi] + + +def parse_log(log): + for line in log.split('\n'): + fs = line.strip().split('\t') + print(fs) + if len(fs) == 3 and fs[0] == 'kpis': + print("-----%s" % fs) + kpi_name = fs[1] + kpi_value = float(fs[2]) + yield kpi_name, kpi_value + + +def log_to_ce(log): + kpi_tracker = {} + for kpi in tracking_kpis: + kpi_tracker[kpi.name] = kpi + + for (kpi_name, kpi_value) in parse_log(log): + print(kpi_name, kpi_value) + kpi_tracker[kpi_name].add_record(kpi_value) + kpi_tracker[kpi_name].persist() + + +if __name__ == '__main__': + log = sys.stdin.read() + log_to_ce(log) diff --git a/doc/paddle/user_guides/simple_case/fit_a_line/fit_a_line.tar b/doc/paddle/user_guides/simple_case/fit_a_line/fit_a_line.tar new file mode 100644 index 0000000000000000000000000000000000000000..4a0e79a02d43bdede95e25a7570c12f3104da185 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/fit_a_line/fit_a_line.tar differ diff --git a/doc/paddle/user_guides/simple_case/fit_a_line/fluid/fit_a_line.fluid.tar b/doc/paddle/user_guides/simple_case/fit_a_line/fluid/fit_a_line.fluid.tar new file mode 100644 index 0000000000000000000000000000000000000000..e633413190697c09745eeff3478241f1b79f6f16 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/fit_a_line/fluid/fit_a_line.fluid.tar differ diff --git a/doc/paddle/user_guides/simple_case/fit_a_line/image/formula_fit_a_line_1.png b/doc/paddle/user_guides/simple_case/fit_a_line/image/formula_fit_a_line_1.png new file mode 100644 index 0000000000000000000000000000000000000000..6f43fb6a7ec96aaf1fc8cf6b6f239561c0a677b2 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/fit_a_line/image/formula_fit_a_line_1.png differ diff --git a/doc/paddle/user_guides/simple_case/fit_a_line/image/formula_fit_a_line_2.png b/doc/paddle/user_guides/simple_case/fit_a_line/image/formula_fit_a_line_2.png new file mode 100644 index 0000000000000000000000000000000000000000..a665fc60aeaa6e1a5c328f06b07f39e4f2af78c8 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/fit_a_line/image/formula_fit_a_line_2.png differ diff --git a/doc/paddle/user_guides/simple_case/fit_a_line/image/formula_fit_a_line_3.png b/doc/paddle/user_guides/simple_case/fit_a_line/image/formula_fit_a_line_3.png new file mode 100644 index 0000000000000000000000000000000000000000..97a242ca5065128969d0d0d749a053d5640270ff Binary files /dev/null and b/doc/paddle/user_guides/simple_case/fit_a_line/image/formula_fit_a_line_3.png differ diff --git a/doc/paddle/user_guides/simple_case/fit_a_line/image/formula_fit_a_line_4.png b/doc/paddle/user_guides/simple_case/fit_a_line/image/formula_fit_a_line_4.png new file mode 100644 index 0000000000000000000000000000000000000000..6979b6cda7c40a68204f7e963d6cdf0dbba01dd2 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/fit_a_line/image/formula_fit_a_line_4.png differ diff --git a/doc/paddle/user_guides/simple_case/fit_a_line/image/prediction_gt.png b/doc/paddle/user_guides/simple_case/fit_a_line/image/prediction_gt.png new file mode 100644 index 0000000000000000000000000000000000000000..e029d2a75fee63dd14e53a1b6be29611362293a5 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/fit_a_line/image/prediction_gt.png differ diff --git a/doc/paddle/user_guides/simple_case/fit_a_line/image/predictions.png b/doc/paddle/user_guides/simple_case/fit_a_line/image/predictions.png new file mode 100644 index 0000000000000000000000000000000000000000..27e4acb1313794f52ad9ad9e874cdadd197ff41f Binary files /dev/null and b/doc/paddle/user_guides/simple_case/fit_a_line/image/predictions.png differ diff --git a/doc/paddle/user_guides/simple_case/fit_a_line/image/predictions_en.png b/doc/paddle/user_guides/simple_case/fit_a_line/image/predictions_en.png new file mode 100644 index 0000000000000000000000000000000000000000..f111c7cd766b7e9981513cc8c65be87dbbf3a79e Binary files /dev/null and b/doc/paddle/user_guides/simple_case/fit_a_line/image/predictions_en.png differ diff --git a/doc/paddle/user_guides/simple_case/fit_a_line/image/ranges.png b/doc/paddle/user_guides/simple_case/fit_a_line/image/ranges.png new file mode 100644 index 0000000000000000000000000000000000000000..916337f0720ef221851e89456c5c295e2e13445f Binary files /dev/null and b/doc/paddle/user_guides/simple_case/fit_a_line/image/ranges.png differ diff --git a/doc/paddle/user_guides/simple_case/fit_a_line/image/ranges_en.png b/doc/paddle/user_guides/simple_case/fit_a_line/image/ranges_en.png new file mode 100644 index 0000000000000000000000000000000000000000..6d6a079bfdcc33617f6cf36612b271b48be6304f Binary files /dev/null and b/doc/paddle/user_guides/simple_case/fit_a_line/image/ranges_en.png differ diff --git a/doc/paddle/user_guides/simple_case/fit_a_line/image/train_and_test.png b/doc/paddle/user_guides/simple_case/fit_a_line/image/train_and_test.png new file mode 100644 index 0000000000000000000000000000000000000000..bcd304a6a0baf30ecfbc43e08fc0aca179d05958 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/fit_a_line/image/train_and_test.png differ diff --git a/doc/paddle/user_guides/simple_case/fit_a_line/index.cn.html b/doc/paddle/user_guides/simple_case/fit_a_line/index.cn.html new file mode 100644 index 0000000000000000000000000000000000000000..02117e6d41fdcfdae5262cba59f2b8d1ee92a79d --- /dev/null +++ b/doc/paddle/user_guides/simple_case/fit_a_line/index.cn.html @@ -0,0 +1,454 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/user_guides/simple_case/fit_a_line/index.html b/doc/paddle/user_guides/simple_case/fit_a_line/index.html new file mode 100644 index 0000000000000000000000000000000000000000..3496bd7c54d28fa5c96a6e197bec906548698dee --- /dev/null +++ b/doc/paddle/user_guides/simple_case/fit_a_line/index.html @@ -0,0 +1,456 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/user_guides/simple_case/fit_a_line/train.py b/doc/paddle/user_guides/simple_case/fit_a_line/train.py new file mode 100644 index 0000000000000000000000000000000000000000..20fe8592da09af8b339df492622c536fde66e1ce --- /dev/null +++ b/doc/paddle/user_guides/simple_case/fit_a_line/train.py @@ -0,0 +1,201 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import sys +import argparse + +import math +import numpy + +import paddle +import paddle.fluid as fluid + + +def parse_args(): + parser = argparse.ArgumentParser("fit_a_line") + parser.add_argument( + '--enable_ce', + action='store_true', + help="If set, run the task with continuous evaluation logs.") + parser.add_argument( + '--use_gpu', + type=bool, + default=False, + help="Whether to use GPU or not.") + parser.add_argument( + '--num_epochs', type=int, default=100, help="number of epochs.") + args = parser.parse_args() + return args + + +# For training test cost +def train_test(executor, program, reader, feeder, fetch_list): + accumulated = 1 * [0] + count = 0 + for data_test in reader(): + outs = executor.run( + program=program, + feed=feeder.feed(data_test), + fetch_list=fetch_list) + accumulated = [x_c[0] + x_c[1][0] for x_c in zip(accumulated, outs)] + count += 1 + return [x_d / count for x_d in accumulated] + + +def save_result(points1, points2): + import matplotlib + matplotlib.use('Agg') + import matplotlib.pyplot as plt + x1 = [idx for idx in range(len(points1))] + y1 = points1 + y2 = points2 + l1 = plt.plot(x1, y1, 'r--', label='predictions') + l2 = plt.plot(x1, y2, 'g--', label='GT') + plt.plot(x1, y1, 'ro-', x1, y2, 'g+-') + plt.title('predictions VS GT') + plt.legend() + plt.savefig('./image/prediction_gt.png') + + +def main(): + batch_size = 20 + + if args.enable_ce: + train_reader = fluid.io.batch( + paddle.dataset.uci_housing.train(), batch_size=batch_size) + test_reader = fluid.io.batch( + paddle.dataset.uci_housing.test(), batch_size=batch_size) + else: + train_reader = fluid.io.batch( + fluid.io.shuffle(paddle.dataset.uci_housing.train(), buf_size=500), + batch_size=batch_size) + test_reader = fluid.io.batch( + fluid.io.shuffle(paddle.dataset.uci_housing.test(), buf_size=500), + batch_size=batch_size) + + # feature vector of length 13 + x = fluid.data(name='x', shape=[None, 13], dtype='float32') + y = fluid.data(name='y', shape=[None, 1], dtype='float32') + + main_program = fluid.default_main_program() + startup_program = fluid.default_startup_program() + + if args.enable_ce: + main_program.random_seed = 90 + startup_program.random_seed = 90 + + y_predict = fluid.layers.fc(input=x, size=1, act=None) + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_loss = fluid.layers.mean(cost) + + test_program = main_program.clone(for_test=True) + + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) + sgd_optimizer.minimize(avg_loss) + + # can use CPU or GPU + use_cuda = args.use_gpu + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + + # Specify the directory to save the parameters + params_dirname = "fit_a_line.inference.model" + num_epochs = args.num_epochs + + # main train loop. + feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) + exe.run(startup_program) + + train_prompt = "Train cost" + test_prompt = "Test cost" + step = 0 + + exe_test = fluid.Executor(place) + + for pass_id in range(num_epochs): + for data_train in train_reader(): + avg_loss_value, = exe.run( + main_program, + feed=feeder.feed(data_train), + fetch_list=[avg_loss]) + if step % 10 == 0: # record a train cost every 10 batches + print("%s, Step %d, Cost %f" % + (train_prompt, step, avg_loss_value[0])) + + if step % 100 == 0: # record a test cost every 100 batches + test_metics = train_test( + executor=exe_test, + program=test_program, + reader=test_reader, + fetch_list=[avg_loss], + feeder=feeder) + print("%s, Step %d, Cost %f" % + (test_prompt, step, test_metics[0])) + # If the accuracy is good enough, we can stop the training. + if test_metics[0] < 10.0: + break + + step += 1 + + if math.isnan(float(avg_loss_value[0])): + sys.exit("got NaN loss, training failed.") + if params_dirname is not None: + # We can save the trained parameters for the inferences later + fluid.io.save_inference_model(params_dirname, ['x'], [y_predict], + exe) + + if args.enable_ce and pass_id == args.num_epochs - 1: + print("kpis\ttrain_cost\t%f" % avg_loss_value[0]) + print("kpis\ttest_cost\t%f" % test_metics[0]) + + infer_exe = fluid.Executor(place) + inference_scope = fluid.core.Scope() + + # infer + with fluid.scope_guard(inference_scope): + [inference_program, feed_target_names, fetch_targets + ] = fluid.io.load_inference_model(params_dirname, infer_exe) + batch_size = 10 + + infer_reader = fluid.io.batch( + paddle.dataset.uci_housing.test(), batch_size=batch_size) + + infer_data = next(infer_reader()) + infer_feat = numpy.array( + [data[0] for data in infer_data]).astype("float32") + infer_label = numpy.array( + [data[1] for data in infer_data]).astype("float32") + + assert feed_target_names[0] == 'x' + results = infer_exe.run( + inference_program, + feed={feed_target_names[0]: numpy.array(infer_feat)}, + fetch_list=fetch_targets) + + print("infer results: (House Price)") + for idx, val in enumerate(results[0]): + print("%d: %.2f" % (idx, val)) + + print("\nground truth:") + for idx, val in enumerate(infer_label): + print("%d: %.2f" % (idx, val)) + + save_result(results[0], infer_label) + + +if __name__ == '__main__': + args = parse_args() + main() diff --git a/doc/paddle/user_guides/simple_case/index_cn.rst b/doc/paddle/user_guides/simple_case/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4b954ebfdc2449a33e7607d7516ac64c4cce3543 --- /dev/null +++ b/doc/paddle/user_guides/simple_case/index_cn.rst @@ -0,0 +1,14 @@ +################ +简单案例 +################ + +.. todo:: + +这里是基于PaddlePaddle实现的简单深度学习入门案例,帮助您更快速的了解飞桨的使用方法,并解决简单深度学习问题,以下是具体的案例详解: + +.. toctree:: + :titlesonly: + + fit_a_line/README.cn.md + recognize_digits/README.cn.md + word2vec/README.cn.md diff --git a/doc/paddle/user_guides/simple_case/index_en.rst b/doc/paddle/user_guides/simple_case/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..bccd0a4b83aed64ab7501460fb6770767755ec0a --- /dev/null +++ b/doc/paddle/user_guides/simple_case/index_en.rst @@ -0,0 +1,12 @@ +############################ +Simple Case +############################ + + +.. toctree:: + :titlesonly: + + fit_a_line/README.md + recognize_digits/README.md + word2vec/README.md + diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/.gitignore b/doc/paddle/user_guides/simple_case/recognize_digits/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..4ca20920b31ca0cd5ae29b1ce27b40a695901235 --- /dev/null +++ b/doc/paddle/user_guides/simple_case/recognize_digits/.gitignore @@ -0,0 +1,6 @@ +data/raw_data +data/train.list +data/test.list +*.log +*.pyc +plot.png diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/.run_ce.sh b/doc/paddle/user_guides/simple_case/recognize_digits/.run_ce.sh new file mode 100644 index 0000000000000000000000000000000000000000..4c5ae210ff5485c4c9266de73614ee0f4c4d6d6e --- /dev/null +++ b/doc/paddle/user_guides/simple_case/recognize_digits/.run_ce.sh @@ -0,0 +1,4 @@ +#!/bin/bash +#This file is only used for continuous evaluation. +python train.py --enable_ce | python _ce.py + diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/README.cn.md b/doc/paddle/user_guides/simple_case/recognize_digits/README.cn.md new file mode 100644 index 0000000000000000000000000000000000000000..cfdc4ed12a1334f6959a29d46710d5ffd0828bf3 --- /dev/null +++ b/doc/paddle/user_guides/simple_case/recognize_digits/README.cn.md @@ -0,0 +1,624 @@ +# 数字识别 + +本教程源代码目录在[book/recognize_digits](https://github.com/PaddlePaddle/book/tree/develop/02.recognize_digits),初次使用请您参考[Book文档使用说明](https://github.com/PaddlePaddle/book/blob/develop/README.cn.md#运行这本书)。 + +### 说明: ### +1. 硬件环境要求: +本文可支持在CPU、GPU下运行 +2. Docker镜像支持的CUDA/cuDNN版本: +如果使用了Docker运行Book,请注意:这里所提供的默认镜像的GPU环境为 CUDA 8/cuDNN 5,对于NVIDIA Tesla V100等要求CUDA 9的 GPU,使用该镜像可能会运行失败。 +3. 文档和脚本中代码的一致性问题: +请注意:为使本文更加易读易用,我们拆分、调整了train.py的代码并放入本文。本文中代码与train.py的运行结果一致,可直接运行[train.py](https://github.com/PaddlePaddle/book/blob/develop/02.recognize_digits/train.py)进行验证。 + +## 背景介绍 +当我们学习编程的时候,编写的第一个程序一般是实现打印"Hello World"。而机器学习(或深度学习)的入门教程,一般都是 [MNIST](http://yann.lecun.com/exdb/mnist/) 数据库上的手写识别问题。原因是手写识别属于典型的图像分类问题,比较简单,同时MNIST数据集也很完备。MNIST数据集作为一个简单的计算机视觉数据集,包含一系列如图1所示的手写数字图片和对应的标签。图片是28x28的像素矩阵,标签则对应着0~9的10个数字。每张图片都经过了大小归一化和居中处理。 + +

+
+图1. MNIST图片示例 +

+ +MNIST数据集是从 [NIST](https://www.nist.gov/srd/nist-special-database-19) 的Special Database 3(SD-3)和Special Database 1(SD-1)构建而来。由于SD-3是由美国人口调查局的员工进行标注,SD-1是由美国高中生进行标注,因此SD-3比SD-1更干净也更容易识别。Yann LeCun等人从SD-1和SD-3中各取一半作为MNIST的训练集(60000条数据)和测试集(10000条数据),其中训练集来自250位不同的标注员,此外还保证了训练集和测试集的标注员是不完全相同的。 + +MNIST吸引了大量的科学家基于此数据集训练模型,1998年,LeCun分别用单层线性分类器、多层感知器(Multilayer Perceptron, MLP)和多层卷积神经网络LeNet进行实验,使得测试集上的误差不断下降(从12%下降到0.7%)\[[1](#参考文献)\]。在研究过程中,LeCun提出了卷积神经网络(Convolutional Neural Network),大幅度地提高了手写字符的识别能力,也因此成为了深度学习领域的奠基人之一。此后,科学家们又基于K近邻(K-Nearest Neighbors)算法\[[2](#参考文献)\]、支持向量机(SVM)\[[3](#参考文献)\]、神经网络\[[4-7](#参考文献)\]和Boosting方法\[[8](#参考文献)\]等做了大量实验,并采用多种预处理方法(如去除歪曲、去噪、模糊等)来提高识别的准确率。 + +如今的深度学习领域,卷积神经网络占据了至关重要的地位,从最早Yann LeCun提出的简单LeNet,到如今ImageNet大赛上的优胜模型VGGNet、GoogLeNet、ResNet等(请参见[图像分类](https://github.com/PaddlePaddle/book/tree/develop/03.image_classification) 教程),人们在图像分类领域,利用卷积神经网络得到了一系列惊人的结果。 + + + +本教程中,我们从简单的Softmax回归模型开始,带大家了解手写字符识别,并向大家介绍如何改进模型,利用多层感知机(MLP)和卷积神经网络(CNN)优化识别效果。 + + +## 模型概览 + +基于MNIST数据集训练一个分类器,在介绍本教程使用的三个基本图像分类网络前,我们先给出一些定义: + +- $X$是输入:MNIST图片是$28\times28$ 的二维图像,为了进行计算,我们将其转化为$784$维向量,即$X=\left ( x_0, x_1, \dots, x_{783} \right )$。 + +- $Y$是输出:分类器的输出是10类数字(0-9),即$Y=\left ( y_0, y_1, \dots, y_9 \right )$,每一维$y_i$代表图片分类为第$i$类数字的概率。 + +- $Label$是图片的真实标签:$Label=\left ( l_0, l_1, \dots, l_9 \right )$也是10维,但只有一维为1,其他都为0。例如某张图片上的数字为2,则它的标签为$(0,0,1,0, \dots, 0)$ + +### Softmax回归(Softmax Regression) + +最简单的Softmax回归模型是先将输入层经过一个全连接层得到特征,然后直接通过 softmax 函数计算多个类别的概率并输出\[[9](#参考文献)\]。 + +输入层的数据$X$传到输出层,在激活操作之前,会乘以相应的权重 $W$ ,并加上偏置变量 $b$ ,具体如下: + +

+
+

+ +其中 +

+
+

+ +图2为softmax回归的网络图,图中权重用蓝线表示、偏置用红线表示、+1代表偏置参数的系数为1。 + +

+
+图2. softmax回归网络结构图
+

+ +对于有 $N$ 个类别的多分类问题,指定 $N$ 个输出节点,$N$ 维结果向量经过softmax将归一化为 $N$ 个[0,1]范围内的实数值,分别表示该样本属于这 $N$ 个类别的概率。此处的 $y_i$ 即对应该图片为数字 $i$ 的预测概率。 + +在分类问题中,我们一般采用交叉熵代价损失函数(cross entropy loss),公式如下: + +

+
+

+ + + +### 多层感知机(Multilayer Perceptron, MLP) + +Softmax回归模型采用了最简单的两层神经网络,即只有输入层和输出层,因此其拟合能力有限。为了达到更好的识别效果,我们考虑在输入层和输出层中间加上若干个隐藏层\[[10](#参考文献)\]。 + +1. 经过第一个隐藏层,可以得到 $ H_1 = \phi(W_1X + b_1) $,其中$\phi$代表激活函数,常见的有[sigmoid、tanh或ReLU](#常见激活函数介绍)等函数。 +2. 经过第二个隐藏层,可以得到 $ H_2 = \phi(W_2H_1 + b_2) $。 +3. 最后,再经过输出层,得到的$Y=\text{softmax}(W_3H_2 + b_3)$,即为最后的分类结果向量。 + + +图3为多层感知器的网络结构图,图中权重用蓝线表示、偏置用红线表示、+1代表偏置参数的系数为1。 + +

+
+图3. 多层感知器网络结构图
+

+ +### 卷积神经网络(Convolutional Neural Network, CNN) + +在多层感知器模型中,将图像展开成一维向量输入到网络中,忽略了图像的位置和结构信息,而卷积神经网络能够更好的利用图像的结构信息。[LeNet-5](http://yann.lecun.com/exdb/lenet/)是一个较简单的卷积神经网络。图4显示了其结构:输入的二维图像,先经过两次卷积层到池化层,再经过全连接层,最后使用softmax分类作为输出层。下面我们主要介绍卷积层和池化层。 + +

+
+图4. LeNet-5卷积神经网络结构
+

+ +#### 卷积层 + +卷积层是卷积神经网络的核心基石。在图像识别里我们提到的卷积是二维卷积,即离散二维滤波器(也称作卷积核)与二维图像做卷积操作,简单的讲是二维滤波器滑动到二维图像上所有位置,并在每个位置上与该像素点及其领域像素点做内积。卷积操作被广泛应用与图像处理领域,不同卷积核可以提取不同的特征,例如边沿、线性、角等特征。在深层卷积神经网络中,通过卷积操作可以提取出图像低级到复杂的特征。 + +

+
+图5. 卷积层图片
+

+ +图5给出一个卷积计算过程的示例图,输入图像大小为$H=5,W=5,D=3$,即$5 \times 5$大小的3通道(RGB,也称作深度)彩色图像。 + +这个示例图中包含两(用$K$表示)组卷积核,即图中$Filter W_0$ 和 $Filter W_1$。在卷积计算中,通常对不同的输入通道采用不同的卷积核,如图示例中每组卷积核包含($D=3)$个$3 \times 3$(用$F \times F$表示)大小的卷积核。另外,这个示例中卷积核在图像的水平方向($W$方向)和垂直方向($H$方向)的滑动步长为2(用$S$表示);对输入图像周围各填充1(用$P$表示)个0,即图中输入层原始数据为蓝色部分,灰色部分是进行了大小为1的扩展,用0来进行扩展。经过卷积操作得到输出为$3 \times 3 \times 2$(用$H_{o} \times W_{o} \times K$表示)大小的特征图,即$3 \times 3$大小的2通道特征图,其中$H_o$计算公式为:$H_o = (H - F + 2 \times P)/S + 1$,$W_o$同理。 而输出特征图中的每个像素,是每组滤波器与输入图像每个特征图的内积再求和,再加上偏置$b_o$,偏置通常对于每个输出特征图是共享的。输出特征图$o[:,:,0]$中的最后一个$-2$计算如图5右下角公式所示。 + +在卷积操作中卷积核是可学习的参数,经过上面示例介绍,每层卷积的参数大小为$D \times F \times F \times K$。在多层感知器模型中,神经元通常是全部连接,参数较多。而卷积层的参数较少,这也是由卷积层的主要特性即局部连接和共享权重所决定。 + +- 局部连接:每个神经元仅与输入神经元的一块区域连接,这块局部区域称作感受野(receptive field)。在图像卷积操作中,即神经元在空间维度(spatial dimension,即上图示例H和W所在的平面)是局部连接,但在深度上是全部连接。对于二维图像本身而言,也是局部像素关联较强。这种局部连接保证了学习后的过滤器能够对于局部的输入特征有最强的响应。局部连接的思想,也是受启发于生物学里面的视觉系统结构,视觉皮层的神经元就是局部接受信息的。 + +- 权重共享:计算同一个深度切片的神经元时采用的滤波器是共享的。例如图5中计算$o[:,:,0]$的每个每个神经元的滤波器均相同,都为$W_0$,这样可以很大程度上减少参数。共享权重在一定程度上讲是有意义的,例如图片的底层边缘特征与特征在图中的具体位置无关。但是在一些场景中是无意的,比如输入的图片是人脸,眼睛和头发位于不同的位置,希望在不同的位置学到不同的特征 (参考[斯坦福大学公开课]( http://cs231n.github.io/convolutional-networks/))。请注意权重只是对于同一深度切片的神经元是共享的,在卷积层,通常采用多组卷积核提取不同特征,即对应不同深度切片的特征,不同深度切片的神经元权重是不共享。另外,偏重对同一深度切片的所有神经元都是共享的。 + +通过介绍卷积计算过程及其特性,可以看出卷积是线性操作,并具有平移不变性(shift-invariant),平移不变性即在图像每个位置执行相同的操作。卷积层的局部连接和权重共享使得需要学习的参数大大减小,这样也有利于训练较大卷积神经网络。 + +关于卷积的更多内容可[参考阅读](http://ufldl.stanford.edu/wiki/index.php/Feature_extraction_using_convolution#Convolutions)。 + +#### 池化层 + +

+
+图6. 池化层图片
+

+ +池化是非线性下采样的一种形式,主要作用是通过减少网络的参数来减小计算量,并且能够在一定程度上控制过拟合。通常在卷积层的后面会加上一个池化层。池化包括最大池化、平均池化等。其中最大池化是用不重叠的矩形框将输入层分成不同的区域,对于每个矩形框的数取最大值作为输出层,如图6所示。 + +更详细的关于卷积神经网络的具体知识可以参考[斯坦福大学公开课]( http://cs231n.github.io/convolutional-networks/ )、[Ufldl](http://ufldl.stanford.edu/wiki/index.php/Pooling) 和 [图像分类]( https://github.com/PaddlePaddle/book/tree/develop/03.image_classification )教程。 + + +### 常见激活函数介绍 +- sigmoid激活函数: + +

+
+

+ +- tanh激活函数: + +

+
+

+ + 实际上,tanh函数只是规模变化的sigmoid函数,将sigmoid函数值放大2倍之后再向下平移1个单位:tanh(x) = 2sigmoid(2x) - 1 。 + +- ReLU激活函数: $ f(x) = max(0, x) $ + +更详细的介绍请参考[维基百科激活函数](https://en.wikipedia.org/wiki/Activation_function)。 + +## 数据介绍 + +PaddlePaddle在API中提供了自动加载[MNIST](http://yann.lecun.com/exdb/mnist/)数据的模块`paddle.dataset.mnist`。加载后的数据位于`/home/username/.cache/paddle/dataset/mnist`下: + + +| 文件名称 | 说明 | +|----------------------|-------------------------| +|train-images-idx3-ubyte| 训练数据图片,60,000条数据 | +|train-labels-idx1-ubyte| 训练数据标签,60,000条数据 | +|t10k-images-idx3-ubyte | 测试数据图片,10,000条数据 | +|t10k-labels-idx1-ubyte | 测试数据标签,10,000条数据 | + +## Fluid API 概述 + +演示将使用最新的 [Fluid API](http://paddlepaddle.org/documentation/docs/zh/1.2/api_cn/index_cn.html)。Fluid API是最新的 PaddlePaddle API。它在不牺牲性能的情况下简化了模型配置。 +我们建议使用 Fluid API,它易学易用的特性将帮助您快速完成机器学习任务。。 + +下面是 Fluid API 中几个重要概念的概述: + +1. `inference_program`:指定如何从数据输入中获得预测的函数, +这是指定网络流的地方。 + +2. `train_program`:指定如何从 `inference_program` 和`标签值`中获取 `loss` 的函数, +这是指定损失计算的地方。 + +3. `optimizer_func`: 指定优化器配置的函数,优化器负责减少损失并驱动训练,Paddle 支持多种不同的优化器。 + +在下面的代码示例中,我们将深入了解它们。 + +## 配置说明 +加载 PaddlePaddle 的 Fluid API 包。 + +```python +from __future__ import print_function # 将python3中的print特性导入当前版本 +import os +from PIL import Image # 导入图像处理模块 +import matplotlib.pyplot as plt +import numpy +import paddle # 导入paddle模块 +import paddle.fluid as fluid +``` + +### Program Functions 配置 + +我们需要设置 `inference_program` 函数。我们想用这个程序来演示三个不同的分类器,每个分类器都定义为 Python 函数。 +我们需要将图像数据输入到分类器中。Paddle 为读取数据提供了一个特殊的层 `layer.data` 层。 +让我们创建一个数据层来读取图像并将其连接到分类网络。 + +- Softmax回归:只通过一层简单的以softmax为激活函数的全连接层,就可以得到分类的结果。 + +```python +def softmax_regression(): + """ + 定义softmax分类器: + 一个以softmax为激活函数的全连接层 + Return: + predict_image -- 分类的结果 + """ + # 输入的原始图像数据,大小为28*28*1 + img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') + # 以softmax为激活函数的全连接层,输出层的大小必须为数字的个数10 + predict = fluid.layers.fc( + input=img, size=10, act='softmax') + return predict +``` + +- 多层感知器:下面代码实现了一个含有两个隐藏层(即全连接层)的多层感知器。其中两个隐藏层的激活函数均采用ReLU,输出层的激活函数用Softmax。 + +```python +def multilayer_perceptron(): + """ + 定义多层感知机分类器: + 含有两个隐藏层(全连接层)的多层感知器 + 其中前两个隐藏层的激活函数采用 ReLU,输出层的激活函数用 Softmax + + Return: + predict_image -- 分类的结果 + """ + # 输入的原始图像数据,大小为28*28*1 + img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') + # 第一个全连接层,激活函数为ReLU + hidden = fluid.layers.fc(input=img, size=200, act='relu') + # 第二个全连接层,激活函数为ReLU + hidden = fluid.layers.fc(input=hidden, size=200, act='relu') + # 以softmax为激活函数的全连接输出层,输出层的大小必须为数字的个数10 + prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') + return prediction +``` + +- 卷积池化层:在LeNet-5中会出现多个卷积-池化的操作,为避免代码重复书写,将串联的卷积-池化写成conv_pool函数。 + +```python +def conv_pool(input, num_filters, filter_size, pool_size, pool_stride, act="relu"): + """ + 定义卷积池化层: + 含有一个卷积层和一个池化层 + Args: + input —— 网络输入 + num_filters —— 卷积核的个数 + filter_size —— 卷积核的大小 + pool_size —— 池化核的大小 + pool_stride —— 池化的步长 + act —— 卷积层的激活函数 + + Return: + out -- 经过卷积池化后的特征图 + """ + conv_out = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + act=act) + out = fluid.layers.pool2d( + input=conv_out, + pool_size=pool_size, + pool_stride=pool_stride) + return out +``` + + +- 卷积神经网络LeNet-5: 输入的二维图像,首先经过两次卷积层到池化层,再经过全连接层,最后使用以softmax为激活函数的全连接层作为输出层。 + +```python +def convolutional_neural_network(): + """ + 定义卷积神经网络分类器: + 输入的二维图像,经过两个卷积-池化层,使用以softmax为激活函数的全连接层作为输出层 + + Return: + predict -- 分类的结果 + """ + # 输入的原始图像数据,大小为28*28*1 + img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') + # 第一个卷积-池化层 + # 使用20个5*5的滤波器,池化大小为2,池化步长为2,激活函数为Relu + conv_pool_1 = conv_pool( + input=img, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + act="relu") + conv_pool_1 = fluid.layers.batch_norm(conv_pool_1) + # 第二个卷积-池化层 + # 使用50个5*5的滤波器,池化大小为2,池化步长为2,激活函数为Relu + conv_pool_2 = conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + pool_size=2, + pool_stride=2, + act="relu") + # 以softmax为激活函数的全连接输出层,输出层的大小必须为数字的个数10 + prediction = fluid.layers.fc(input=conv_pool_2, size=10, act='softmax') + return prediction +``` + +#### Train Program 配置 +然后我们需要设置训练程序 `train_program`。它首先从分类器中进行预测。 +在训练期间,它将从预测中计算 `avg_cost`。 + +**注意:** 训练程序应该返回一个数组,第一个返回参数必须是 `avg_cost`。训练器使用它来计算梯度。 + +请随意修改代码,测试 Softmax 回归 `softmax_regression`, `MLP` 和 卷积神经网络 `convolutional neural network` 分类器之间的不同结果。 + +```python +def train_program(): + """ + 配置train_program + + Return: + predict -- 分类的结果 + avg_cost -- 平均损失 + acc -- 分类的准确率 + + """ + # 标签层,名称为label,对应输入图片的类别标签 + label = fluid.data(name='label', shape=[None, 1], dtype='int64') + + # predict = softmax_regression() # 取消注释将使用 Softmax回归 + # predict = multilayer_perceptron() # 取消注释将使用 多层感知器 + predict = convolutional_neural_network() # 取消注释将使用 LeNet5卷积神经网络 + + # 使用类交叉熵函数计算predict和label之间的损失函数 + cost = fluid.layers.cross_entropy(input=predict, label=label) + # 计算平均损失 + avg_cost = fluid.layers.mean(cost) + # 计算分类准确率 + acc = fluid.layers.accuracy(input=predict, label=label) + return predict, [avg_cost, acc] + +``` + +#### Optimizer Function 配置 + +在下面的 `Adam optimizer`,`learning_rate` 是学习率,它的大小与网络的训练收敛速度有关系。 + +```python +def optimizer_program(): + return fluid.optimizer.Adam(learning_rate=0.001) +``` + +### 数据集 Feeders 配置 + +下一步,我们开始训练过程。`paddle.dataset.mnist.train()`和`paddle.dataset.mnist.test()`分别做训练和测试数据集。这两个函数各自返回一个reader——PaddlePaddle中的reader是一个Python函数,每次调用的时候返回一个Python yield generator。 + +下面`shuffle`是一个reader decorator,它接受一个reader A,返回另一个reader B。reader B 每次读入`buffer_size`条训练数据到一个buffer里,然后随机打乱其顺序,并且逐条输出。 + +`batch`是一个特殊的decorator,它的输入是一个reader,输出是一个batched reader。在PaddlePaddle里,一个reader每次yield一条训练数据,而一个batched reader每次yield一个minibatch。 + +```python +# 一个minibatch中有64个数据 +BATCH_SIZE = 64 + +# 每次读取训练集中的500个数据并随机打乱,传入batched reader中,batched reader 每次 yield 64个数据 +train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.mnist.train(), buf_size=500), + batch_size=BATCH_SIZE) +# 读取测试集的数据,每次 yield 64个数据 +test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=BATCH_SIZE) +``` + +### 构建训练过程 + +现在,我们需要构建一个训练过程。将使用到前面定义的训练程序 `train_program`, `place` 和优化器 `optimizer`,并包含训练迭代、检查训练期间测试误差以及保存所需要用来预测的模型参数。 + + +#### Event Handler 配置 + +我们可以在训练期间通过调用一个handler函数来监控训练进度。 +我们将在这里演示两个 `event_handler` 程序。请随意修改 Jupyter Notebook ,看看有什么不同。 + +`event_handler` 用来在训练过程中输出训练结果 + +```python +def event_handler(pass_id, batch_id, cost): + # 打印训练的中间结果,训练轮次,batch数,损失函数 + print("Pass %d, Batch %d, Cost %f" % (pass_id,batch_id, cost)) +``` + +```python +from paddle.utils.plot import Ploter + +train_prompt = "Train cost" +test_prompt = "Test cost" +cost_ploter = Ploter(train_prompt, test_prompt) + +# 将训练过程绘图表示 +def event_handler_plot(ploter_title, step, cost): + cost_ploter.append(ploter_title, step, cost) + cost_ploter.plot() +``` + +`event_handler_plot` 可以用来在训练过程中画图如下: + +![png](./image/train_and_test.png) + + +#### 开始训练 + +可以加入我们设置的 `event_handler` 和 `data reader`,然后就可以开始训练模型了。 +设置一些运行需要的参数,配置数据描述 +`feed_order` 用于将数据目录映射到 `train_program` +创建一个反馈训练过程中误差的`train_test` + +定义网络结构: + +```python +# 该模型运行在单个CPU上 +use_cuda = False # 如想使用GPU,请设置为 True +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + +# 调用train_program 获取预测值,损失值, +prediction, [avg_loss, acc] = train_program() + +# 输入的原始图像数据,名称为img,大小为28*28*1 +# 标签层,名称为label,对应输入图片的类别标签 +# 告知网络传入的数据分为两部分,第一部分是img值,第二部分是label值 +feeder = fluid.DataFeeder(feed_list=['img', 'label'], place=place) + +# 选择Adam优化器 +optimizer = optimizer_program() +optimizer.minimize(avg_loss) +``` + +设置训练过程的超参: + +```python + +PASS_NUM = 5 #训练5轮 +epochs = [epoch_id for epoch_id in range(PASS_NUM)] + +# 将模型参数存储在名为 save_dirname 的文件中 +save_dirname = "recognize_digits.inference.model" +``` + + +```python +def train_test(train_test_program, + train_test_feed, train_test_reader): + + # 将分类准确率存储在acc_set中 + acc_set = [] + # 将平均损失存储在avg_loss_set中 + avg_loss_set = [] + # 将测试 reader yield 出的每一个数据传入网络中进行训练 + for test_data in train_test_reader(): + acc_np, avg_loss_np = exe.run( + program=train_test_program, + feed=train_test_feed.feed(test_data), + fetch_list=[acc, avg_loss]) + acc_set.append(float(acc_np)) + avg_loss_set.append(float(avg_loss_np)) + # 获得测试数据上的准确率和损失值 + acc_val_mean = numpy.array(acc_set).mean() + avg_loss_val_mean = numpy.array(avg_loss_set).mean() + # 返回平均损失值,平均准确率 + return avg_loss_val_mean, acc_val_mean +``` + +创建执行器: + +```python +exe = fluid.Executor(place) +exe.run(fluid.default_startup_program()) +``` + +设置 main_program 和 test_program : + +```python +main_program = fluid.default_main_program() +test_program = fluid.default_main_program().clone(for_test=True) +``` + +开始训练: + +```python +lists = [] +step = 0 +for epoch_id in epochs: + for step_id, data in enumerate(train_reader()): + metrics = exe.run(main_program, + feed=feeder.feed(data), + fetch_list=[avg_loss, acc]) + if step % 100 == 0: #每训练100次 打印一次log + print("Pass %d, Batch %d, Cost %f" % (step, epoch_id, metrics[0])) + event_handler_plot(train_prompt, step, metrics[0]) + step += 1 + + # 测试每个epoch的分类效果 + avg_loss_val, acc_val = train_test(train_test_program=test_program, + train_test_reader=test_reader, + train_test_feed=feeder) + + print("Test with Epoch %d, avg_cost: %s, acc: %s" %(epoch_id, avg_loss_val, acc_val)) + event_handler_plot(test_prompt, step, metrics[0]) + + lists.append((epoch_id, avg_loss_val, acc_val)) + + # 保存训练好的模型参数用于预测 + if save_dirname is not None: + fluid.io.save_inference_model(save_dirname, + ["img"], [prediction], exe, + model_filename=None, + params_filename=None) + +# 选择效果最好的pass +best = sorted(lists, key=lambda list: float(list[1]))[0] +print('Best pass is %s, testing Avgcost is %s' % (best[0], best[1])) +print('The classification accuracy is %.2f%%' % (float(best[2]) * 100)) +``` + +训练过程是完全自动的,event_handler里打印的日志类似如下所示。 + +Pass表示训练轮次,Batch表示训练全量数据的次数,cost表示当前pass的损失值。 + +每训练完一个Epoch后,计算一次平均损失和分类准确率。 + +``` +Pass 0, Batch 0, Cost 0.125650 +Pass 100, Batch 0, Cost 0.161387 +Pass 200, Batch 0, Cost 0.040036 +Pass 300, Batch 0, Cost 0.023391 +Pass 400, Batch 0, Cost 0.005856 +Pass 500, Batch 0, Cost 0.003315 +Pass 600, Batch 0, Cost 0.009977 +Pass 700, Batch 0, Cost 0.020959 +Pass 800, Batch 0, Cost 0.105560 +Pass 900, Batch 0, Cost 0.239809 +Test with Epoch 0, avg_cost: 0.053097883707459624, acc: 0.9822850318471338 +``` + +训练之后,检查模型的预测准确度。用 MNIST 训练的时候,一般 softmax回归模型的分类准确率约为 92.34%,多层感知器为97.66%,卷积神经网络可以达到 99.20%。 + + +## 应用模型 + +可以使用训练好的模型对手写体数字图片进行分类,下面程序展示了如何使用训练好的模型进行推断。 + +### 生成预测输入数据 + +`infer_3.png` 是数字 3 的一个示例图像。把它变成一个 numpy 数组以匹配数据feed格式。 + +```python +def load_image(file): + # 读取图片文件,并将它转成灰度图 + im = Image.open(file).convert('L') + # 将输入图片调整为 28*28 的高质量图 + im = im.resize((28, 28), Image.ANTIALIAS) + # 将图片转换为numpy + im = numpy.array(im).reshape(1, 1, 28, 28).astype(numpy.float32) + # 对数据作归一化处理 + im = im / 255.0 * 2.0 - 1.0 + return im + +cur_dir = os.getcwd() +tensor_img = load_image(cur_dir + '/image/infer_3.png') +``` + +### Inference 创建及预测 +通过`load_inference_model`来设置网络和经过训练的参数。我们可以简单地插入在此之前定义的分类器。 +```python +inference_scope = fluid.core.Scope() +with fluid.scope_guard(inference_scope): + # 使用 fluid.io.load_inference_model 获取 inference program desc, + # feed_target_names 用于指定需要传入网络的变量名 + # fetch_targets 指定希望从网络中fetch出的变量名 + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model( + save_dirname, exe, None, None) + + # 将feed构建成字典 {feed_target_name: feed_target_data} + # 结果将包含一个与fetch_targets对应的数据列表 + results = exe.run(inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets) + lab = numpy.argsort(results) + + # 打印 infer_3.png 这张图片的预测结果 + img=Image.open('image/infer_3.png') + plt.imshow(img) + print("Inference result of image/infer_3.png is: %d" % lab[0][0][-1]) +``` + + +### 预测结果 + +如果顺利,预测结果输入如下: +`Inference result of image/infer_3.png is: 3` , 说明我们的网络成功的识别出了这张图片! + +## 总结 + +本教程的softmax回归、多层感知机和卷积神经网络是最基础的深度学习模型,后续章节中复杂的神经网络都是从它们衍生出来的,因此这几个模型对之后的学习大有裨益。同时,我们也观察到从最简单的softmax回归变换到稍复杂的卷积神经网络的时候,MNIST数据集上的识别准确率有了大幅度的提升,原因是卷积层具有局部连接和共享权重的特性。在之后学习新模型的时候,希望大家也要深入到新模型相比原模型带来效果提升的关键之处。此外,本教程还介绍了PaddlePaddle模型搭建的基本流程,从dataprovider的编写、网络层的构建,到最后的训练和预测。对这个流程熟悉以后,大家就可以用自己的数据,定义自己的网络模型,并完成自己的训练和预测任务了。 + + +## 参考文献 + +1. LeCun, Yann, Léon Bottou, Yoshua Bengio, and Patrick Haffner. ["Gradient-based learning applied to document recognition."](http://ieeexplore.ieee.org/abstract/document/726791/) Proceedings of the IEEE 86, no. 11 (1998): 2278-2324. +2. Wejéus, Samuel. ["A Neural Network Approach to Arbitrary SymbolRecognition on Modern Smartphones."](http://www.diva-portal.org/smash/record.jsf?pid=diva2%3A753279&dswid=-434) (2014). +3. Decoste, Dennis, and Bernhard Schölkopf. ["Training invariant support vector machines."](http://link.springer.com/article/10.1023/A:1012454411458) Machine learning 46, no. 1-3 (2002): 161-190. +4. Simard, Patrice Y., David Steinkraus, and John C. Platt. ["Best Practices for Convolutional Neural Networks Applied to Visual Document Analysis."](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.160.8494&rep=rep1&type=pdf) In ICDAR, vol. 3, pp. 958-962. 2003. +5. Salakhutdinov, Ruslan, and Geoffrey E. Hinton. ["Learning a Nonlinear Embedding by Preserving Class Neighbourhood Structure."](http://www.jmlr.org/proceedings/papers/v2/salakhutdinov07a/salakhutdinov07a.pdf) In AISTATS, vol. 11. 2007. +6. Cireşan, Dan Claudiu, Ueli Meier, Luca Maria Gambardella, and Jürgen Schmidhuber. ["Deep, big, simple neural nets for handwritten digit recognition."](http://www.mitpressjournals.org/doi/abs/10.1162/NECO_a_00052) Neural computation 22, no. 12 (2010): 3207-3220. +7. Deng, Li, Michael L. Seltzer, Dong Yu, Alex Acero, Abdel-rahman Mohamed, and Geoffrey E. Hinton. ["Binary coding of speech spectrograms using a deep auto-encoder."](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.185.1908&rep=rep1&type=pdf) In Interspeech, pp. 1692-1695. 2010. +8. Kégl, Balázs, and Róbert Busa-Fekete. ["Boosting products of base classifiers."](http://dl.acm.org/citation.cfm?id=1553439) In Proceedings of the 26th Annual International Conference on Machine Learning, pp. 497-504. ACM, 2009. +9. Rosenblatt, Frank. ["The perceptron: A probabilistic model for information storage and organization in the brain."](http://psycnet.apa.org/journals/rev/65/6/386/) Psychological review 65, no. 6 (1958): 386. +10. Bishop, Christopher M. ["Pattern recognition."](http://users.isr.ist.utl.pt/~wurmd/Livros/school/Bishop%20-%20Pattern%20Recognition%20And%20Machine%20Learning%20-%20Springer%20%202006.pdf) Machine Learning 128 (2006): 1-58. + +
+知识共享许可协议
本教程PaddlePaddle 创作,采用 知识共享 署名-相同方式共享 4.0 国际 许可协议进行许可。 diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/README.md b/doc/paddle/user_guides/simple_case/recognize_digits/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8194969e33f6059b04a7caa704c11864b48a14b8 --- /dev/null +++ b/doc/paddle/user_guides/simple_case/recognize_digits/README.md @@ -0,0 +1,615 @@ +# Recognize Digits + +The source code of this tutorial is in [book/recognize_digits](https://github.com/PaddlePaddle/book/tree/develop/02.recognize_digits). For new users, please refer to [Running This Book](https://github.com/PaddlePaddle/book/blob/develop/README.md#running-the-book) . + +## Background +When we learning programming, the first program we write is generally to implement printing “Hello World”. But the tutorial of machine learning or deep learning for the beginner is usually handwriting recognition on the [MNIST](http://yann.lecun.com/exdb/mnist/) database. Because handwriting recognition is a typical classification problem, relatively simple and the MNIST dataset is complete. MNIST dataset as a simple computer vision dataset contains a series of pictures and corresponding labels of handwriting digits. The picture is a 28x28 pixel matrix, and the label corresponds to 10 numbers from 0 to 9. Each picture has been normalized in size and centered in the position. + +

+
+Figure 1. Example of a MNIST picture +

+ +MNIST dataset is created from [NIST](https://www.nist.gov/srd/nist-special-database-19) Special Database 3(SD-3) and Special Database 1(SD-1). Because SD-3 is labeled by stuff of US Census Bureau and SD-1 is labeled by US high school students, so SD-3 is clearer and easier to be recognized than SD-1. Yann LeCun et al. pick half of SD-1 and half of SD-3 as train dataset (60000 data) and test dataset (10000 data).250 annotators labeled the training set, thus guaranteed that there wasn't a complete overlap of annotators of training set and test set. + +MNIST attracts scholars to train model based on the dataset. In 1998, LeCun conducted experiments respectively using Single layer classifier, Multilayer Perceptron and Multilayer convolutional neural network LeNet, constantly decreasing the error on test dataset ( from 12% to 0.7%)\[[1](#References)\]。 In the process of research, LeCun, the pioneer in the field of deep learning, came up with Convolutional Neural Network, largely improving the performance of handwriting recognition. After that, researchers take a large number of experiments based on K-Nearest Neighbors algorithm\[[2](#References)\], SVM\[[3](#References)\], Neural Network\[[4-7](#References)\] and Boosting method\[[8](#References)\] and so on, with multiple pre-processing methods(like distortion removal, noise removal, and blurring) to upgrade accuracy of recognition. + +Convolutional Neural Network plays an important role in the field of deep learning now. From simple LeNet proposed by Yann LeCun in early days to model VGGNet, GoogleNet, ResNet and so on in the ImageNet competition (please refer to [Image Classification](https://github.com/PaddlePaddle/book/tree/develop/03.image_classification) tutorial ), we have gain a serious of great achievements with convolutional neural network in the field of image classification. + + + +In this tutorial, starting from simple Softmax regression model, we help you learn about handwriting recognition and introduce you how to upgrade model and how to use MLP and CNN to optimize recognition result. + + +## Exploration of Models + +To train a classifier based on MNIST dataset, before the introduction of three basic image classification networks used in this tutorial, we first give some definitions: + +- $X$ is the input: the MNIST image is a two-dimensional image of $28\times28$. For the calculation, we transform it into a $784$ dimensional vector, ie $X=\left ( x_0, x_1, \dots, x_{783} \right )$. + +- $Y$ is the output: the output of the classifier is number (0-9), ie $Y=\left ( y_0, y_1, \dots, y_9 \right )$, and each dimension $y_i$ represents the probability of image classification as $i$th number. + +- $Label$ is the actual label of the picture: $Label=\left ( l_0, l_1, \dots, l_9 \right ) $ is also 10 dimensions, but only one dimension represents 1, and the rest is 0. For example, if the number on an image is 2, its label is $(0,0,1,0, \dots, 0)$ + +### Softmax Regression + +The simplest Softmax regression model is to get features with input layer passing through a fully connected layer and then compute and ouput probabilities of multiple classifications directly via Softmax function \[[9](#references)\]. + +The data of the input layer $X$ is passed to the output layer. The input $X$ is multiplied by weights $W$ and then added to the bias $b$ to generate activations: + +$$ y_i = \text{softmax}(\sum_j W_{i,j}x_j + b_i) $$ + +where $ \text{softmax}(x_i) = \frac{e^{x_i}}{\sum_j e^{x_j}} $ + +Figure 2 is a network of softmax regression, in which weights are represented by blue lines, bias are represented by red lines, and +1 indicates that the bias is $1$. + +

+
+Figure 2. Softmax regression network structure
+

+ +For an $N$-class classification problem with $N$ output nodes, Softmax normalizes the resulting $N$ dimensional vector so that each of its entries falls in the range $[0,1]\in {R}$, representing the probability that the sample belongs to a certain category. Here $y_i$ denotes the predicted probability that an image is of number $i$. + +In the classification problem, we usually use cross-entropy loss, the formula is as follows: + +$$ L_{cross-entropy}(label, y) = -\sum_i label_ilog(y_i) $$ + + + +### Multilayer Perceptron + +The Softmax regression model uses the simplest two-layer neural network, which contains only the input layer and the output layer, so its performance is limited. In order to achieve better recognition, we consider adding several hidden layers \[[10](#references)\] between the input and output layer. + +1.In the first hidden layer, you can get $ H_1 = \phi(W_1X + b_1) $, where $\phi$ represents the activation function. And common functions are [sigmoid, tanh or ReLU](#common activation functions). +2.In the second hidden layer, you can get $ H_2 = \phi(W_2H_1 + b_2) $. +3.Finally, in the output layer, you can get $Y=\text{softmax}(W_3H_2 + b_3)$, that is the final classification result vector. + + +Figure 3 is a network structure of a multi-layer perceptron, in which weights are represented by blue lines, bias are represented by red lines, and +1 indicates that the bias is $1$. + +

+
+Figure 3. Multilayer perceptron network structure
+

+ +### Convolutional Neural Network + +In the multi-layer perceptron model, an image is expanded into a one-dimensional vector and input into the network, ignoring its position and structure information. And the convolutional neural network can better utilize the structure information of the image. [LeNet-5](http://yann.lecun.com/exdb/lenet/) is a relatively simple convolutional neural network. Figure 4 shows the structure: the input two-dimensional image, first through the two convolutional layers to the pooling layer, then through the fully connected layer, and finally using the softmax as the output layer. Below we mainly introduce the convolutional layer and the pooling layer. + +

+
+Figure 4. LeNet-5 convolutional neural network structure
+

+ +#### Convolutional Layer + +Convolutional Layer is the core of convolutional neural network. The convolution we mentioned in image recognition is a two-dimensional convolution, that is, a discrete two-dimensional filter (also called a convolutional kernel) and a two-dimensional image for convoluting. In short, the two-dimensional filter slides to all positions on two-dimensional images and dot product is taken for this pixel and its domain pixel at each position. Convolution operations are widely used in the field of image processing. Different convolutional kernels can extract different features, such as edges, lines, and angles. In deep convolutional neural networks, low-level to complex image features can be extracted by convolution operation. + +

+
+Figure 5. Convolutional Layer Picture
+

+ +Figure 5 shows an example of the process of computing convolution with input image in size of $H=5, W=5, D=3$, ie $5 \times 5$ size of 3 channel (RGB, also known as depth) color image. + +This example contains two (denoted by $K$) groups of convolutional kernels, i.e. $Filter W_0$ and $Filter W_1$ in the figure. In convolution calculation, different convolutional kernels are usually used for different input channels. In the example, each set of convolutional kernels contains ($D=3$) $3\times 3$ (indicated by $F \times F$) convolutional kernel. In addition, the stride of convolutional kernel in horizontal and vertical direction of image is 2 (indicated by $S$); Pad 1 (represented by $P$) 0 in the four directions of input image, that is, the input layer raw data in the figure is the blue part, and the gray part is expanded with 0 in the size of 1. The convolution operation yields a feature map of the size of $3 \times 3 \times 2$ (represented by $H_{o} \times W_{o} \times K$), which is a 2-channel feature map in size of $3 \times 3$, where $H_o$ is calculated as: $H_o = (H - F + 2 \times P)/S + 1$, so is $W_o$. And each pixel in the output feature map is the summation of the inner product of each set of filters and each feature of the input image, plus the bias $b_o$, the bias is usually shared by each output feature map. The last $-2$ in the output feature map $o[:,:,0]$ is calculated as shown in the lower right corner of Figure 5. + +The convolutional kernel is a learnable parameter in the convolution operation. As shown in the example above, the parameter of each layer of convolution is $D \times F \times F \times K$. In the multi-layer perceptron model, neurons are usually fully connected therefore with a large number of parameters. There are fewer parameters in the convolutional layer, which is also determined by main features of the convolutional layer, namely local connections and shared weights. + +- Local connection: Each neuron is connected to only one region of the input neuron, which is called Receptive Field. In the image convolution operation, that is, the neurons are locally connected in the spatial dimension (the plane in which the above examples H and W are located), but are fully connected in depth. For the two-dimensional image itself, the local pixels are strongly related. This local connection ensures that the learned filter makes the strongest response to local input features. The idea of local connection is also inspired by the structure of visual system in biology. The neurons in the visual cortex receive information locally. + +- Weight sharing: The filters used to calculate neurons in the same deep slice are shared. For example, in Figure 5, the filter for each neuron calculated by $o[:,:,0]$ is the same, both are $W_0$, which can greatly reduce the parameters. The sharing weight is meaningful to a certain extent, for example, the bottom edge feature of the image is independent of the specific location of the feature in the graph. However, it is unintentional in some cases. For example, the input picture is a face, eyes and hair are in different positions. And to learn different features in different positions, please (refer to [Stanford University Open Class](http://cs231n.Github.io/convolutional-networks/)). Note that the weights are only shared for the neurons of the same depth slice. In the convolutional layer, multiple sets of convolutional kernels are usually used to extract different features, that is, the weights of neurons with different depth slices are not shared by the features with different depth slices. In addition, bias are shared by all neurons with the same depth. + +By introducing the calculation process of convolution and its features, convolution could be seen as a linear operation with shift-invariant, which is the same operation performed at each position of the image. The local connection and weight sharing of the convolutional layer greatly reduce the parameters that need to be learned, which helps with training larger convolutional neural networks. + +For more information about convolution, please refer to [Reference Reading](http://ufldl.stanford.edu/wiki/index.php/Feature_extraction_using_convolution#Convolutions)。 + +### Pooling Layer + +

+
+Figure 6. Picture in pooling layer
+

+ +Pooling is a form of nonlinear downsampling. The main functionality of this layer is to reduce computation by reducing the network parameters and to control the overfitting to some extent. Normally a pooling layer is added after the convolutional layer. Pooling includes maximum pooling, average pooling and so on. The largest pooling is to divide the input layer into different areas by non-overlapping rectangular boxes, and the maximum value of each rectangular box is taken as the output layer, as shown in Figure. 6. + +For details about convolutional neural network, please refer to the tutorial of [Standford Online Course]( http://cs231n.github.io/convolutional-networks/ ), [Ufldl](http://ufldl.stanford.edu/wiki/index.php/Pooling) and [Image Classification]( https://github.com/PaddlePaddle/book/tree/develop/03.image_classification ). + + +### Common activation functions + +- Sigmoid activation function: $ f(x) = sigmoid(x) = \frac{1}{1+e^{-x}} $ + +- Tanh activation function: $ f(x) = tanh(x) = \frac{e^x-e^{-x}}{e^x+e^{-x}} $ + +In fact, the tanh function is only a sigmoid function with change of scale. The value of the sigmoid function is doubled and then shifted down by 1 unit: tanh(x) = 2sigmoid(2x) - 1 . + +- ReLU activation function: $ f(x) = max(0, x) $ + +For details, please refer to [activation function in Wikipedia](https://en.wikipedia.org/wiki/Activation_function). + +## Dataset Preparation + +PaddlePaddle provides a module `paddle.dataset.mnist` that automatically loads [MNIST] (http://yann.lecun.com/exdb/mnist/) data in the API. The loaded data is located under `/home/username/.cache/paddle/dataset/mnist`: + + +| filename | note | +|----------------------|-------------------------| +|train-images-idx3-ubyte| train data picture, 60,000 data | +|train-labels-idx1-ubyte| train data label, 60,000 data | +|t10k-images-idx3-ubyte | test data picture, 10,000 data | +|t10k-labels-idx1-ubyte | test data label, 10,000 data | + +## Fluid API Overview + +The demo will use the latest [Fluid API](http://paddlepaddle.org/documentation/docs/en/1.2/api_cn/index_cn.html). Fluid API is the latest PaddlePaddle API. It simplifies model configuration without sacrificing performance. +We recommend using the Fluid API, which is easy to learn and use to help you complete your machine learning tasks quickly. + +Here is an overview of several important concepts in the Fluid API: + +1. `inference_program`: specifies how to get the inference function from the data input. +This is where the network flow is defined. + +2. `train_program`: specifies how to get the `loss` function from `inference_program` and `tag value`. +This is where the loss calculation is specified. + +3. `optimizer_func`: Specifies the function of the optimizer configuration. The optimizer is responsible for reducing losses and driving training. Paddle supports a number of different optimizers. + +In the code examples below, we'll take a closer look at them. + +## Configuration Instructions + +Load the Fluid API package for PaddlePaddle. + +```python +from __future__ import print_function #load print of python3 into current version +import os +from PIL import Image # load module of image processing +import matplotlib.pyplot as plt +import numpy +import paddle # load paddle module +import paddle.fluid as fluid +``` + +### Program Functions Configuration + +We need to configure `inference_program` function. We want to use this program to show three different classifiers, each of which is defined as a Python function. +We need to input the image data into the classifier. Paddle provides a special layer `layer.data` for reading data. +Let's create a data layer to read the image and connect it to the network of classification. + +-Softmax regression: The results of the classification can be obtained only through a simple layer of simple fully connected layer with softmax as the activation function. + +```python +def softmax_regression(): + """ + Define softmax classifier: + A fully connected layer with activation function softmax + Return: + predict_image -- result of classification + """ + # input original image data in size of 28*28*1 + img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') + # With softmax as the fully connected layer of the activation function, the size of the output layer must be 10 + predict = fluid.layers.fc( + input=img, size=10, act='softmax') + return predict +``` + +-Multilayer Perceptron: The following code implements a multilayer perceptron with two hidden layers (that is, fully connected layers). The activation functions of the two hidden layers are all ReLU, and the activation function of the output layer is Softmax. + +```python +def multilayer_perceptron(): + """ + Define multilayer perceptron classifier: + Multilayer perceptron with two hidden layers (fully connected layers) + The activation function of the first two hidden layers uses ReLU, and the activation function of the output layer uses Softmax. + + Return: + predict_image -- result of classification + """ + # input raw image data in size of 28*28*1 + img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') + # the first fully connected layer, whose activation function is ReLU + hidden = fluid.layers.fc(input=img, size=200, act='relu') + # the second fully connected layer, whose activation function is ReLU + hidden = fluid.layers.fc(input=hidden, size=200, act='relu') + # With softmax as the fully connected output layer of the activation function, the size of the output layer must be 10 + prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') + return prediction +``` + +-Conv_pool layer: LeNet-5 has multiple convolution-pooling operations. In order to avoid repeated code writing, the convolution-pooling in series is written as conv_pool function. + +```python +def conv_pool(input, num_filters, filter_size, pool_size, pool_stride, act="relu"): + """ + Define convolution-pooling layer: + Conv_pool layer has a convolutional layer and a pooling layer + Args: + input —— Input + num_filters —— The number of filter + filter_size —— The filter size + pool_size —— The pool kernel size + pool_stride —— The pool stride size + act —— Activation type + + Return: + out -- output + """ + conv_out = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + act=act) + out = fluid.layers.pool2d( + input=conv_out, + pool_size=pool_size, + pool_stride=pool_stride) + return out + +``` +-Convolutional neural network LeNet-5: The input two-dimensional image first passes through two convolutional layers to the pooling layer, then passes through the fully connected layer, and finally fully connection layer with softmax as activation function is used as output layer. + +```python +def convolutional_neural_network(): + """ + Define convolutional neural network classifier: + The input 2D image passes through two convolution-pooling layers, using the fully connected layer with softmax as the output layer + + Return: + predict -- result of classification + """ + # input raw image data in size of 28*28*1 + img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') + # the first convolution-pooling layer + # Use 20 5*5 filters, the pooling size is 2, the pooling step is 2, and the activation function is Relu. + conv_pool_1 = conv_pool( + input=img, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + act="relu") + conv_pool_1 = fluid.layers.batch_norm(conv_pool_1) + # the second convolution-pooling layer + # Use 20 5*5 filters, the pooling size is 2, the pooling step is 2, and the activation function is Relu. + conv_pool_2 = conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + pool_size=2, + pool_stride=2, + act="relu") + # With softmax as the fully connected output layer of the activation function, the size of the output layer must be 10 + prediction = fluid.layers.fc(input=conv_pool_2, size=10, act='softmax') + return prediction +``` + +#### Train Program Configuration +Then we need to set train program `train_program` It firstly infers from classifier. +During the training, it will compute `avg_cost`. + +** Note:** train program should return an array. The first parameter returned must be `avg_cost`. The trainer uses it to compute gradient. + +Please write your code and then test results of different classifiers of `softmax_regression`, `MLP` and `convolutional neural network`. + +```python +def train_program(): + """ + Configure train_program + + Return: + predict -- result of classification + avg_cost -- mean loss + acc -- accuracy of classification + + """ + # label layer, called label, correspondent with label category of input picture + label = fluid.data(name='label', shape=[None, 1], dtype='int64') + + # predict = softmax_regression() # cancel note and run Softmax regression + # predict = multilayer_perceptron() # cancel note and run multiple perceptron + predict = convolutional_neural_network() # cancel note and run LeNet5 convolutional neural network + + # use class cross-entropy function to compute loss function between predict and label + cost = fluid.layers.cross_entropy(input=predict, label=label) + # compute mean loss + avg_cost = fluid.layers.mean(cost) + # compute accuracy of classification + acc = fluid.layers.accuracy(input=predict, label=label) + return predict, [avg_cost, acc] + +``` + +#### Optimizer Function Configuration + +`Adam optimizer`,`learning_rate` below are learning rate. Their size is associated with speed of network train convergence. + +```python +def optimizer_program(): + return fluid.optimizer.Adam(learning_rate=0.001) +``` + +### Data Feeders for dataset Configuration + +Next We start the training process. `Paddle.dataset.mnist.train()` and `paddle.dataset.mnist.test()` are respectively as train dataset and test dataset. These two functions respectively return a reader-- reader in PaddlePaddle is a Python function, which returns a Python yield generator when calling the reader. + +`Shuffle` below is a reader decorator, which receives a reader A and returns another reader B. Reader B read `buffer_size` train data into a buffer and then the data is disordered randomly and is output one by one. + +`Batch` is a special decorator. Its input is a reader and output is a batched reader. In PaddlePaddle, a reader yield a piece of data every time while batched reader yield a minibatch every time. + +```python +# there are 64 data in a minibatch +BATCH_SIZE = 64 + +# read 500 data in train dataset, randomly disorder them and then transfer it into batched reader which yield 64 data each time. +train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.mnist.train(), buf_size=500), + batch_size=BATCH_SIZE) +# read data in test dataset and yield 64 data every time +test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=BATCH_SIZE) +``` + +### create training process + +Now we need to create a training process. We will use `train_program`, `place` and `optimizer` defined before, conclude test loss in the period of training iteration and training verification and save parameters of model for prediction. + + +#### Event Handler Configuration + +We can call a handler function to supervise training process during training. +We display two `event_handler` programs here. Please freely update Jupyter Notebook and find the changes. + +`Event_handler` is used to output training result during the train + +```python +def event_handler(pass_id, batch_id, cost): + # print the intermediate results of training, like + # training iterations, number of batch, and loss function + print("Pass %d, Batch %d, Cost %f" % (pass_id,batch_id, cost)) +``` + +```python +from paddle.utils.plot import Ploter + +train_prompt = "Train cost" +test_prompt = "Test cost" +cost_ploter = Ploter(train_prompt, test_prompt) + +# visualize training process +def event_handler_plot(ploter_title, step, cost): + cost_ploter.append(ploter_title, step, cost) + cost_ploter.plot() +``` + +`event_handler_plot` can be visualized as follows: + +![png](./image/train_and_test.png) + + +### Start training + +Aftering adding `event_handler` and `data reader` we configured, we can start to train the model. +Set parameters for operation to configure data description. +`Feed_order` is used to map data directory to `train_program` +Create a `train_test` reflecting the loss during our training. + +Define network structure: + +```python +# the model is run on single CPU +use_cuda = False # If you want to use GPU, please set it True +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + +# call train_program to get prediction value and loss value, +prediction, [avg_loss, acc] = train_program() + +# input original image data in size of 28*28*1 +# label layer, called label, correspondent with label category of input picture. + +# It is informed that data in network consists of two parts. One is img value, the other is label value. +feeder = fluid.DataFeeder(feed_list=['img', 'label'], place=place) + +# choose Adam optimizer +optimizer = optimizer_program() +optimizer.minimize(avg_loss) +``` + +Configure hyper parameter during the training: + +```python + +PASS_NUM = 5 #train 5 iterations +epochs = [epoch_id for epoch_id in range(PASS_NUM)] + +# save parameters of model into save_dirname file +save_dirname = "recognize_digits.inference.model" +``` + + +```python +def train_test(train_test_program, + train_test_feed, train_test_reader): + + # save classification accuracy into acc_set + acc_set = [] + # save mean loss in avg_loss_set + avg_loss_set = [] + # transfer each data which is the output of testing reader_yield into network to train + for test_data in train_test_reader(): + acc_np, avg_loss_np = exe.run( + program=train_test_program, + feed=train_test_feed.feed(test_data), + fetch_list=[acc, avg_loss]) + acc_set.append(float(acc_np)) + avg_loss_set.append(float(avg_loss_np)) + # get accuracy and loss value on the test data + acc_val_mean = numpy.array(acc_set).mean() + avg_loss_val_mean = numpy.array(avg_loss_set).mean() + # return mean loss value and mean accuracy + return avg_loss_val_mean, acc_val_mean +``` + +Create executor + +```python +exe = fluid.Executor(place) +exe.run(fluid.default_startup_program()) +``` + +Set up main_program and test_program: + +```python +main_program = fluid.default_main_program() +test_program = fluid.default_main_program().clone(for_test=True) +``` + +Start training: + + + + +```python +lists = [] +step = 0 +for epoch_id in epochs: + for step_id, data in enumerate(train_reader()): + metrics = exe.run(main_program, + feed=feeder.feed(data), + fetch_list=[avg_loss, acc]) + if step % 100 == 0: # print a log for every 100 times of training + print("Pass %d, Batch %d, Cost %f" % (step, epoch_id, metrics[0])) + event_handler_plot(train_prompt, step, metrics[0]) + step += 1 + + # test classification result of each epoch + avg_loss_val, acc_val = train_test(train_test_program=test_program, + train_test_reader=test_reader, + train_test_feed=feeder) + + print("Test with Epoch %d, avg_cost: %s, acc: %s" %(epoch_id, avg_loss_val, acc_val)) + event_handler_plot(test_prompt, step, metrics[0]) + + lists.append((epoch_id, avg_loss_val, acc_val)) + + # save parameters of trained model for prediction + if save_dirname is not None: + fluid.io.save_inference_model(save_dirname, + ["img"], [prediction], exe, + model_filename=None, + params_filename=None) + +# Choose the best pass +best = sorted(lists, key=lambda list: float(list[1]))[0] +print('Best pass is %s, testing Avgcost is %s' % (best[0], best[1])) +print('The classification accuracy is %.2f%%' % (float(best[2]) * 100)) +``` + + +The training process is completely automatic. The log printed in event_handler is like as follows. + +Pass represents iterations of train. Batch represents times to train all data. cost represents loss value of current pass. + +Compute the mean loss and accuracy of classification after an epoch. + +``` +Pass 0, Batch 0, Cost 0.125650 +Pass 100, Batch 0, Cost 0.161387 +Pass 200, Batch 0, Cost 0.040036 +Pass 300, Batch 0, Cost 0.023391 +Pass 400, Batch 0, Cost 0.005856 +Pass 500, Batch 0, Cost 0.003315 +Pass 600, Batch 0, Cost 0.009977 +Pass 700, Batch 0, Cost 0.020959 +Pass 800, Batch 0, Cost 0.105560 +Pass 900, Batch 0, Cost 0.239809 +Test with Epoch 0, avg_cost: 0.053097883707459624, acc: 0.9822850318471338 +``` + +Check prediction accuracy of the model after training. In the train with MNIST, generally classification accuracy of softmax regression model is about 92.34%, while that of multilayer perceptron is 97.66% and that of convolutional neural network is 99.20%. + + +## Deploy the Model + +You can use trained model to classify handwriting pictures of digits. The program below shows how to use well-trained model to predict. + +### Generate input data to be inferred + +`infer_3.png` is an example picture of number 3. Transform it into a numpy to match feed data format + + +```python +def load_image(file): + # open the image file and covert to grayscale + im = Image.open(file).convert('L') + # adjust the input image to a 28*28 high quality image + im = im.resize((28, 28), Image.ANTIALIAS) + # convert img to numpy + im = numpy.array(im).reshape(1, 1, 28, 28).astype(numpy.float32) + # normalize + im = im / 255.0 * 2.0 - 1.0 + return im + +cur_dir = os.getcwd() +tensor_img = load_image(cur_dir + '/image/infer_3.png') +``` + +### Inference + +By configuring network and training parameters via `load_inference_model`, We can simply insert classifier defined before. + + + +```python +inference_scope = fluid.core.Scope() +with fluid.scope_guard(inference_scope): + # use fluid.io.load_inference_model to get inference program desc, + # feed_target_names is used to define variable name needed to be passed into network + # fetch_targets define variable name to be fetched from network + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model( + save_dirname, exe, None, None) + + # Make feed a dictionary {feed_target_name: feed_target_data} + # The result will contain a data list corresponding to fetch_targets + results = exe.run(inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets) + lab = numpy.argsort(results) + + # Print prediction result of infer_3.png + img=Image.open('image/infer_3.png') + plt.imshow(img) + print("Inference result of image/infer_3.png is: %d" % lab[0][0][-1]) +``` + + + + +### Result + +If successful, the inference result input is as follows: +`Inference result of image/infer_3.png is: 3` , which indicates that out network successfully recognize the picture! + +## Summary + +Softmax regression, multilayer perceptron and convolutional neural network are the most basic deep learning model, from which complex neural networks are all derivative, so these models are helpful for later learning. At the same time, we found that from simple softmax regression transform to slightly complex convolutional neural network, the accuracy of recognition on MNIST dataset largely increased, resulting from that convolution layer is featured with local connection and sharing weight. When study of new models later, hope you make a deep understand of the key upgrade of new model compared with original model. In addition, this tutorial also talks about the basic steps to build PaddlePadle model, from the code of dataprovider, build of network to training and prediction. Familiar with the work flow, you can use your own data, define your own network model and finish your training and prediction tasks. + + +## References + +1. LeCun, Yann, Léon Bottou, Yoshua Bengio, and Patrick Haffner. ["Gradient-based learning applied to document recognition."](http://ieeexplore.ieee.org/abstract/document/726791/) Proceedings of the IEEE 86, no. 11 (1998): 2278-2324. +2. Wejéus, Samuel. ["A Neural Network Approach to Arbitrary SymbolRecognition on Modern Smartphones."](http://www.diva-portal.org/smash/record.jsf?pid=diva2%3A753279&dswid=-434) (2014). +3. Decoste, Dennis, and Bernhard Schölkopf. ["Training invariant support vector machines."](http://link.springer.com/article/10.1023/A:1012454411458) Machine learning 46, no. 1-3 (2002): 161-190. +4. Simard, Patrice Y., David Steinkraus, and John C. Platt. ["Best Practices for Convolutional Neural Networks Applied to Visual Document Analysis."](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.160.8494&rep=rep1&type=pdf) In ICDAR, vol. 3, pp. 958-962. 2003. +5. Salakhutdinov, Ruslan, and Geoffrey E. Hinton. ["Learning a Nonlinear Embedding by Preserving Class Neighbourhood Structure."](http://www.jmlr.org/proceedings/papers/v2/salakhutdinov07a/salakhutdinov07a.pdf) In AISTATS, vol. 11. 2007. +6. Cireşan, Dan Claudiu, Ueli Meier, Luca Maria Gambardella, and Jürgen Schmidhuber. ["Deep, big, simple neural nets for handwritten digit recognition."](http://www.mitpressjournals.org/doi/abs/10.1162/NECO_a_00052) Neural computation 22, no. 12 (2010): 3207-3220. +7. Deng, Li, Michael L. Seltzer, Dong Yu, Alex Acero, Abdel-rahman Mohamed, and Geoffrey E. Hinton. ["Binary coding of speech spectrograms using a deep auto-encoder."](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.185.1908&rep=rep1&type=pdf) In Interspeech, pp. 1692-1695. 2010. +8. Kégl, Balázs, and Róbert Busa-Fekete. ["Boosting products of base classifiers."](http://dl.acm.org/citation.cfm?id=1553439) In Proceedings of the 26th Annual International Conference on Machine Learning, pp. 497-504. ACM, 2009. +9. Rosenblatt, Frank. ["The perceptron: A probabilistic model for information storage and organization in the brain."](http://psycnet.apa.org/journals/rev/65/6/386/) Psychological review 65, no. 6 (1958): 386. +10. Bishop, Christopher M. ["Pattern recognition."](http://users.isr.ist.utl.pt/~wurmd/Livros/school/Bishop%20-%20Pattern%20Recognition%20And%20Machine%20Learning%20-%20Springer%20%202006.pdf) Machine Learning 128 (2006): 1-58. + +
+知识共享许可协议
This tutorial is contributed by PaddlePaddle, and licensed under a Creative Commons Attribution-ShareAlike 4.0 International License. diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/_ce.py b/doc/paddle/user_guides/simple_case/recognize_digits/_ce.py new file mode 100644 index 0000000000000000000000000000000000000000..6404ea90ce4454f3eec6270301fa4493388af3da --- /dev/null +++ b/doc/paddle/user_guides/simple_case/recognize_digits/_ce.py @@ -0,0 +1,40 @@ +### This file is only used for continuous evaluation test! +from __future__ import print_function +from __future__ import division +from __future__ import absolute_import +import os +import sys +sys.path.append(os.environ['ceroot']) +from kpi import CostKpi +from kpi import AccKpi + +train_cost_kpi = CostKpi( + 'train_cost', 0.02, 0, actived=True, desc='train cost') +test_cost_kpi = CostKpi('test_cost', 0.02, 0, actived=True, desc='test cost') +test_acc_kpi = AccKpi('test_acc', 0.02, 0, actived=True, desc='test acc') +tracking_kpis = [train_cost_kpi, test_cost_kpi, test_acc_kpi] + + +def parse_log(log): + for line in log.split('\n'): + fs = line.strip().split('\t') + print(fs) + if len(fs) == 3 and fs[0] == 'kpis': + kpi_name = fs[1] + kpi_value = float(fs[2]) + yield kpi_name, kpi_value + + +def log_to_ce(log): + kpi_tracker = {} + for kpi in tracking_kpis: + kpi_tracker[kpi.name] = kpi + for (kpi_name, kpi_value) in parse_log(log): + print(kpi_name, kpi_value) + kpi_tracker[kpi_name].add_record(kpi_value) + kpi_tracker[kpi_name].persist() + + +if __name__ == '__main__': + log = sys.stdin.read() + log_to_ce(log) diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/client/client.py b/doc/paddle/user_guides/simple_case/recognize_digits/client/client.py new file mode 100644 index 0000000000000000000000000000000000000000..bda7cec91677c9fdc9dd2846a02acb7bee1d0b01 --- /dev/null +++ b/doc/paddle/user_guides/simple_case/recognize_digits/client/client.py @@ -0,0 +1,24 @@ +import requests +from PIL import Image +import numpy as np +import os + +# this client is used by Paddle serve: https://github.com/PaddlePaddle/book/tree/develop/serve +# please do not use it directly + + +def load_image(file): + im = Image.open(file).convert('L') + im = im.resize((28, 28), Image.ANTIALIAS) + im = np.array(im).astype(np.float32).flatten() + im = im / 255.0 + return im + + +cur_dir = os.path.dirname(os.path.realpath(__file__)) +data = load_image(cur_dir + '/../image/infer_3.png') +data = data.tolist() + +r = requests.post("http://0.0.0.0:8000", json={'img': data}) + +print(r.text) diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/image/01.gif b/doc/paddle/user_guides/simple_case/recognize_digits/image/01.gif new file mode 100644 index 0000000000000000000000000000000000000000..90c8349b08bebc35122766fc6c67f5f9c0a53db2 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/recognize_digits/image/01.gif differ diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/image/02.gif b/doc/paddle/user_guides/simple_case/recognize_digits/image/02.gif new file mode 100644 index 0000000000000000000000000000000000000000..4a023509e8b6862cddc674ce6682c5b8549feb4f Binary files /dev/null and b/doc/paddle/user_guides/simple_case/recognize_digits/image/02.gif differ diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/image/03.gif b/doc/paddle/user_guides/simple_case/recognize_digits/image/03.gif new file mode 100644 index 0000000000000000000000000000000000000000..521bc209d5a496f28e42f72b51bf45ea6bb4c2f1 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/recognize_digits/image/03.gif differ diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/image/04.gif b/doc/paddle/user_guides/simple_case/recognize_digits/image/04.gif new file mode 100644 index 0000000000000000000000000000000000000000..c201c5f480a63b03a58c9d02f5e059cb4a39634f Binary files /dev/null and b/doc/paddle/user_guides/simple_case/recognize_digits/image/04.gif differ diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/image/05.gif b/doc/paddle/user_guides/simple_case/recognize_digits/image/05.gif new file mode 100644 index 0000000000000000000000000000000000000000..fde225756e3a9696ef4d8f08c92c71e47460589d Binary files /dev/null and b/doc/paddle/user_guides/simple_case/recognize_digits/image/05.gif differ diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/image/cnn.png b/doc/paddle/user_guides/simple_case/recognize_digits/image/cnn.png new file mode 100644 index 0000000000000000000000000000000000000000..3f5cdaacdc6acce41c5c6c99649be46685cf9903 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/recognize_digits/image/cnn.png differ diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/image/cnn_en.png b/doc/paddle/user_guides/simple_case/recognize_digits/image/cnn_en.png new file mode 100644 index 0000000000000000000000000000000000000000..bc1a9a4ccf81972dc0d69cf4c808a52218e14d61 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/recognize_digits/image/cnn_en.png differ diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/image/cnn_train_log.png b/doc/paddle/user_guides/simple_case/recognize_digits/image/cnn_train_log.png new file mode 100644 index 0000000000000000000000000000000000000000..65bd17eacd41bbdbdb042bd1ba366eb53663b410 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/recognize_digits/image/cnn_train_log.png differ diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/image/cnn_train_log_en.png b/doc/paddle/user_guides/simple_case/recognize_digits/image/cnn_train_log_en.png new file mode 100644 index 0000000000000000000000000000000000000000..77524754df906ab096e120bd657449f4565c3418 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/recognize_digits/image/cnn_train_log_en.png differ diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/image/conv_layer.png b/doc/paddle/user_guides/simple_case/recognize_digits/image/conv_layer.png new file mode 100644 index 0000000000000000000000000000000000000000..c751892ba0be3ae803b5933c3f33487ecfb6fe7f Binary files /dev/null and b/doc/paddle/user_guides/simple_case/recognize_digits/image/conv_layer.png differ diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/image/infer_3.png b/doc/paddle/user_guides/simple_case/recognize_digits/image/infer_3.png new file mode 100644 index 0000000000000000000000000000000000000000..030cd60d3b4af9aecd4941204da4ad15f6e1189f Binary files /dev/null and b/doc/paddle/user_guides/simple_case/recognize_digits/image/infer_3.png differ diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/image/max_pooling.png b/doc/paddle/user_guides/simple_case/recognize_digits/image/max_pooling.png new file mode 100644 index 0000000000000000000000000000000000000000..90b02fa2a735cfcc9efb2de90906325dedcb358c Binary files /dev/null and b/doc/paddle/user_guides/simple_case/recognize_digits/image/max_pooling.png differ diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/image/max_pooling_en.png b/doc/paddle/user_guides/simple_case/recognize_digits/image/max_pooling_en.png new file mode 100644 index 0000000000000000000000000000000000000000..c626723512b6ee02abd55e5bab65e7629d130522 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/recognize_digits/image/max_pooling_en.png differ diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/image/mlp.png b/doc/paddle/user_guides/simple_case/recognize_digits/image/mlp.png new file mode 100644 index 0000000000000000000000000000000000000000..9f4d26cd8da32201d0a5e9c72d466301dd2b42a1 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/recognize_digits/image/mlp.png differ diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/image/mlp_en.png b/doc/paddle/user_guides/simple_case/recognize_digits/image/mlp_en.png new file mode 100644 index 0000000000000000000000000000000000000000..1fedea6a75abbf132cbbcf8ab10ce045997d697a Binary files /dev/null and b/doc/paddle/user_guides/simple_case/recognize_digits/image/mlp_en.png differ diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/image/mlp_train_log.png b/doc/paddle/user_guides/simple_case/recognize_digits/image/mlp_train_log.png new file mode 100644 index 0000000000000000000000000000000000000000..f5a478fdc24f29c17555a2f1451f3f5a079faed9 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/recognize_digits/image/mlp_train_log.png differ diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/image/mlp_train_log_en.png b/doc/paddle/user_guides/simple_case/recognize_digits/image/mlp_train_log_en.png new file mode 100644 index 0000000000000000000000000000000000000000..7d5508a1eccfcea1925f438043ee93b57769bebf Binary files /dev/null and b/doc/paddle/user_guides/simple_case/recognize_digits/image/mlp_train_log_en.png differ diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/image/mnist_example_image.png b/doc/paddle/user_guides/simple_case/recognize_digits/image/mnist_example_image.png new file mode 100644 index 0000000000000000000000000000000000000000..4edd7cabf8a2282f6392ac1421c7ca4afb288589 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/recognize_digits/image/mnist_example_image.png differ diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/image/softmax_regression.png b/doc/paddle/user_guides/simple_case/recognize_digits/image/softmax_regression.png new file mode 100644 index 0000000000000000000000000000000000000000..40b98298288b9c406fce1cbca9c913753020a94d Binary files /dev/null and b/doc/paddle/user_guides/simple_case/recognize_digits/image/softmax_regression.png differ diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/image/softmax_regression_en.png b/doc/paddle/user_guides/simple_case/recognize_digits/image/softmax_regression_en.png new file mode 100644 index 0000000000000000000000000000000000000000..833d3c663c94dd2d57fd19686949ded37a91f541 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/recognize_digits/image/softmax_regression_en.png differ diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/image/softmax_train_log.png b/doc/paddle/user_guides/simple_case/recognize_digits/image/softmax_train_log.png new file mode 100644 index 0000000000000000000000000000000000000000..47204941af7f22e68386a70a06ec4f122b83e262 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/recognize_digits/image/softmax_train_log.png differ diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/image/softmax_train_log_en.png b/doc/paddle/user_guides/simple_case/recognize_digits/image/softmax_train_log_en.png new file mode 100644 index 0000000000000000000000000000000000000000..6fa0a951d5262effb707e3e15af8cb900e5560b8 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/recognize_digits/image/softmax_train_log_en.png differ diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/image/train_and_test.png b/doc/paddle/user_guides/simple_case/recognize_digits/image/train_and_test.png new file mode 100644 index 0000000000000000000000000000000000000000..5cb87b450d0398bcfaec0e647c362052069797e7 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/recognize_digits/image/train_and_test.png differ diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/index.cn.html b/doc/paddle/user_guides/simple_case/recognize_digits/index.cn.html new file mode 100644 index 0000000000000000000000000000000000000000..191c1f1e766ea5c65fe09de715283b64024659aa --- /dev/null +++ b/doc/paddle/user_guides/simple_case/recognize_digits/index.cn.html @@ -0,0 +1,688 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/index.html b/doc/paddle/user_guides/simple_case/recognize_digits/index.html new file mode 100644 index 0000000000000000000000000000000000000000..df981aa586414671ae43e132ab1178848d75e8b6 --- /dev/null +++ b/doc/paddle/user_guides/simple_case/recognize_digits/index.html @@ -0,0 +1,679 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/user_guides/simple_case/recognize_digits/train.py b/doc/paddle/user_guides/simple_case/recognize_digits/train.py new file mode 100644 index 0000000000000000000000000000000000000000..1d1fa9a7c64e9c9dd99f0ae3b0234a139b4d6de0 --- /dev/null +++ b/doc/paddle/user_guides/simple_case/recognize_digits/train.py @@ -0,0 +1,249 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +import argparse +from PIL import Image +import numpy +import paddle +import paddle.fluid as fluid + + +def parse_args(): + parser = argparse.ArgumentParser("mnist") + parser.add_argument( + '--enable_ce', + action='store_true', + help="If set, run the task with continuous evaluation logs.") + parser.add_argument( + '--use_gpu', + type=bool, + default=False, + help="Whether to use GPU or not.") + parser.add_argument( + '--num_epochs', type=int, default=5, help="number of epochs.") + args = parser.parse_args() + return args + + +def loss_net(hidden, label): + prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + avg_loss = fluid.layers.mean(loss) + acc = fluid.layers.accuracy(input=prediction, label=label) + return prediction, avg_loss, acc + + +def multilayer_perceptron(img, label): + img = fluid.layers.fc(input=img, size=200, act='tanh') + hidden = fluid.layers.fc(input=img, size=200, act='tanh') + return loss_net(hidden, label) + + +def softmax_regression(img, label): + return loss_net(img, label) + + +def convolutional_neural_network(img, label): + conv_pool_1 = fluid.nets.simple_img_conv_pool( + input=img, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + act="relu") + conv_pool_1 = fluid.layers.batch_norm(conv_pool_1) + conv_pool_2 = fluid.nets.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + pool_size=2, + pool_stride=2, + act="relu") + return loss_net(conv_pool_2, label) + + +def train(nn_type, + use_cuda, + save_dirname=None, + model_filename=None, + params_filename=None): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + + startup_program = fluid.default_startup_program() + main_program = fluid.default_main_program() + + if args.enable_ce: + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=BATCH_SIZE) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=BATCH_SIZE) + startup_program.random_seed = 90 + main_program.random_seed = 90 + else: + train_reader = paddle.batch( + paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500), + batch_size=BATCH_SIZE) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=BATCH_SIZE) + + img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') + + if nn_type == 'softmax_regression': + net_conf = softmax_regression + elif nn_type == 'multilayer_perceptron': + net_conf = multilayer_perceptron + else: + net_conf = convolutional_neural_network + + prediction, avg_loss, acc = net_conf(img, label) + + test_program = main_program.clone(for_test=True) + optimizer = fluid.optimizer.Adam(learning_rate=0.001) + optimizer.minimize(avg_loss) + + def train_test(train_test_program, train_test_feed, train_test_reader): + acc_set = [] + avg_loss_set = [] + for test_data in train_test_reader(): + acc_np, avg_loss_np = exe.run( + program=train_test_program, + feed=train_test_feed.feed(test_data), + fetch_list=[acc, avg_loss]) + acc_set.append(float(acc_np)) + avg_loss_set.append(float(avg_loss_np)) + # get test acc and loss + acc_val_mean = numpy.array(acc_set).mean() + avg_loss_val_mean = numpy.array(avg_loss_set).mean() + return avg_loss_val_mean, acc_val_mean + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + exe = fluid.Executor(place) + + feeder = fluid.DataFeeder(feed_list=[img, label], place=place) + exe.run(startup_program) + epochs = [epoch_id for epoch_id in range(PASS_NUM)] + + lists = [] + step = 0 + for epoch_id in epochs: + for step_id, data in enumerate(train_reader()): + metrics = exe.run( + main_program, + feed=feeder.feed(data), + fetch_list=[avg_loss, acc]) + if step % 100 == 0: + print("Pass %d, Epoch %d, Cost %f" % (step, epoch_id, + metrics[0])) + step += 1 + # test for epoch + avg_loss_val, acc_val = train_test( + train_test_program=test_program, + train_test_reader=test_reader, + train_test_feed=feeder) + + print("Test with Epoch %d, avg_cost: %s, acc: %s" % + (epoch_id, avg_loss_val, acc_val)) + lists.append((epoch_id, avg_loss_val, acc_val)) + if save_dirname is not None: + fluid.io.save_inference_model( + save_dirname, ["img"], [prediction], + exe, + model_filename=model_filename, + params_filename=params_filename) + + if args.enable_ce: + print("kpis\ttrain_cost\t%f" % metrics[0]) + print("kpis\ttest_cost\t%s" % avg_loss_val) + print("kpis\ttest_acc\t%s" % acc_val) + + # find the best pass + best = sorted(lists, key=lambda list: float(list[1]))[0] + print('Best pass is %s, testing Avgcost is %s' % (best[0], best[1])) + print('The classification accuracy is %.2f%%' % (float(best[2]) * 100)) + + +def infer(use_cuda, + save_dirname=None, + model_filename=None, + params_filename=None): + if save_dirname is None: + return + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + + def load_image(file): + im = Image.open(file).convert('L') + im = im.resize((28, 28), Image.ANTIALIAS) + im = numpy.array(im).reshape(1, 1, 28, 28).astype(numpy.float32) + im = im / 255.0 * 2.0 - 1.0 + return im + + cur_dir = os.path.dirname(os.path.realpath(__file__)) + tensor_img = load_image(cur_dir + '/image/infer_3.png') + + inference_scope = fluid.core.Scope() + with fluid.scope_guard(inference_scope): + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model( + save_dirname, exe, model_filename, params_filename) + + # Construct feed as a dictionary of {feed_target_name: feed_target_data} + # and results will contain a list of data corresponding to fetch_targets. + results = exe.run( + inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets) + lab = numpy.argsort(results) + print("Inference result of image/infer_3.png is: %d" % lab[0][0][-1]) + + +def main(use_cuda, nn_type): + model_filename = None + params_filename = None + save_dirname = "recognize_digits_" + nn_type + ".inference.model" + + # call train() with is_local argument to run distributed train + train( + nn_type=nn_type, + use_cuda=use_cuda, + save_dirname=save_dirname, + model_filename=model_filename, + params_filename=params_filename) + infer( + use_cuda=use_cuda, + save_dirname=save_dirname, + model_filename=model_filename, + params_filename=params_filename) + + +if __name__ == '__main__': + args = parse_args() + BATCH_SIZE = 64 + PASS_NUM = args.num_epochs + use_cuda = args.use_gpu + # predict = 'softmax_regression' # uncomment for Softmax + # predict = 'multilayer_perceptron' # uncomment for MLP + predict = 'convolutional_neural_network' # uncomment for LeNet5 + main(use_cuda=use_cuda, nn_type=predict) diff --git a/doc/paddle/user_guides/simple_case/word2vec/.gitignore b/doc/paddle/user_guides/simple_case/word2vec/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..a620e0279c310d213d4e6d8e99e666962c11e352 --- /dev/null +++ b/doc/paddle/user_guides/simple_case/word2vec/.gitignore @@ -0,0 +1,3 @@ +data/train.list +data/test.list +data/simple-examples* diff --git a/doc/paddle/user_guides/simple_case/word2vec/.run_ce.sh b/doc/paddle/user_guides/simple_case/word2vec/.run_ce.sh new file mode 100644 index 0000000000000000000000000000000000000000..4c5ae210ff5485c4c9266de73614ee0f4c4d6d6e --- /dev/null +++ b/doc/paddle/user_guides/simple_case/word2vec/.run_ce.sh @@ -0,0 +1,4 @@ +#!/bin/bash +#This file is only used for continuous evaluation. +python train.py --enable_ce | python _ce.py + diff --git a/doc/paddle/user_guides/simple_case/word2vec/README.cn.md b/doc/paddle/user_guides/simple_case/word2vec/README.cn.md new file mode 100644 index 0000000000000000000000000000000000000000..efee0b664db35718573a02b20b7b66fcc01e4976 --- /dev/null +++ b/doc/paddle/user_guides/simple_case/word2vec/README.cn.md @@ -0,0 +1,527 @@ + +# 词向量 + +本教程源代码目录在[book/word2vec](https://github.com/PaddlePaddle/book/tree/develop/04.word2vec),初次使用请您参考[Book文档使用说明](https://github.com/PaddlePaddle/book/blob/develop/README.cn.md#运行这本书)。 + +### 说明 + +1. 本教程可支持在 CPU/GPU 环境下运行 + +2. Docker镜像支持的CUDA/cuDNN版本 + + 如果使用了Docker运行Book,请注意:这里所提供的默认镜像的GPU环境为 CUDA 8/cuDNN 5,对于NVIDIA Tesla V100等要求CUDA 9的 GPU,使用该镜像可能会运行失败; + +3. 文档和脚本中代码的一致性问题 + + 请注意:为使本文更加易读易用,我们拆分、调整了[train.py](https://github.com/PaddlePaddle/book/blob/develop/04.word2vec/train.py)的代码并放入本文。本文中代码与train.py的运行结果一致,可直接运行train.py进行验证。 + +## 背景介绍 + +本章我们介绍词的向量表征,也称为word embedding。词向量是自然语言处理中常见的一个操作,是搜索引擎、广告系统、推荐系统等互联网服务背后常见的基础技术。 + +在这些互联网服务里,我们经常要比较两个词或者两段文本之间的相关性。为了做这样的比较,我们往往先要把词表示成计算机适合处理的方式。最自然的方式恐怕莫过于向量空间模型(vector space model)。 +在这种方式里,每个词被表示成一个实数向量(one-hot vector),其长度为字典大小,每个维度对应一个字典里的每个词,除了这个词对应维度上的值是1,其他元素都是0。 + +One-hot vector虽然自然,但是用处有限。比如,在互联网广告系统里,如果用户输入的query是“母亲节”,而有一个广告的关键词是“康乃馨”。虽然按照常理,我们知道这两个词之间是有联系的——母亲节通常应该送给母亲一束康乃馨;但是这两个词对应的one-hot vectors之间的距离度量,无论是欧氏距离还是余弦相似度(cosine similarity),由于其向量正交,都认为这两个词毫无相关性。 得出这种与我们相悖的结论的根本原因是:每个词本身的信息量都太小。所以,仅仅给定两个词,不足以让我们准确判别它们是否相关。要想精确计算相关性,我们还需要更多的信息——从大量数据里通过机器学习方法归纳出来的知识。 + +在机器学习领域里,各种“知识”被各种模型表示,词向量模型(word embedding model)就是其中的一类。通过词向量模型可将一个 one-hot vector映射到一个维度更低的实数向量(embedding vector),如$embedding(母亲节) = [0.3, 4.2, -1.5, ...], embedding(康乃馨) = [0.2, 5.6, -2.3, ...]$。在这个映射到的实数向量表示中,希望两个语义(或用法)上相似的词对应的词向量“更像”,这样如“母亲节”和“康乃馨”的对应词向量的余弦相似度就不再为零了。 + +词向量模型可以是概率模型、共生矩阵(co-occurrence matrix)模型或神经元网络模型。在用神经网络求词向量之前,传统做法是统计一个词语的共生矩阵$X$。$X$是一个$|V| \times |V|$ 大小的矩阵,$X_{ij}$表示在所有语料中,词汇表$V$(vocabulary)中第i个词和第j个词同时出现的词数,$|V|$为词汇表的大小。对$X$做矩阵分解(如奇异值分解,Singular Value Decomposition \[[5](#参考文献)\]),得到的$U$即视为所有词的词向量: + +

+
+

+ +但这样的传统做法有很多问题: + +1) 由于很多词没有出现,导致矩阵极其稀疏,因此需要对词频做额外处理来达到好的矩阵分解效果; + +2) 矩阵非常大,维度太高(通常达到$10^6 \times 10^6$的数量级); + +3) 需要手动去掉停用词(如although, a,...),不然这些频繁出现的词也会影响矩阵分解的效果。 + +基于神经网络的模型不需要计算和存储一个在全语料上统计产生的大表,而是通过学习语义信息得到词向量,因此能很好地解决以上问题。在本章里,我们将展示基于神经网络训练词向量的细节,以及如何用PaddlePaddle训练一个词向量模型。 + + +## 效果展示 + +本章中,当词向量训练好后,我们可以用数据可视化算法t-SNE\[[4](#参考文献)\]画出词语特征在二维上的投影(如下图所示)。从图中可以看出,语义相关的词语(如a, the, these; big, huge)在投影上距离很近,语意无关的词(如say, business; decision, japan)在投影上的距离很远。 + +

+
+ 图1. 词向量的二维投影 +

+ +另一方面,我们知道两个向量的余弦值在$[-1,1]$的区间内:两个完全相同的向量余弦值为1, 两个相互垂直的向量之间余弦值为0,两个方向完全相反的向量余弦值为-1,即相关性和余弦值大小成正比。因此我们还可以计算两个词向量的余弦相似度: + +``` + +please input two words: big huge +similarity: 0.899180685161 + +please input two words: from company +similarity: -0.0997506977351 + +``` + +以上结果可以通过运行`calculate_dis.py`, 加载字典里的单词和对应训练特征结果得到,我们将在[模型应用](#模型应用)中详细描述用法。 + + +## 模型概览 + +在这里我们介绍三个训练词向量的模型:N-gram模型,CBOW模型和Skip-gram模型,它们的中心思想都是通过上下文得到一个词出现的概率。对于N-gram模型,我们会先介绍语言模型的概念,并在之后的[训练模型](#训练模型)中,带大家用PaddlePaddle实现它。而后两个模型,是近年来最有名的神经元词向量模型,由 Tomas Mikolov 在Google 研发\[[3](#参考文献)\],虽然它们很浅很简单,但训练效果很好。 + +### 语言模型 + +在介绍词向量模型之前,我们先来引入一个概念:语言模型。 +语言模型旨在为语句的联合概率函数$P(w_1, ..., w_T)$建模, 其中$w_i$表示句子中的第i个词。语言模型的目标是,希望模型对有意义的句子赋予大概率,对没意义的句子赋予小概率。 +这样的模型可以应用于很多领域,如机器翻译、语音识别、信息检索、词性标注、手写识别等,它们都希望能得到一个连续序列的概率。 以信息检索为例,当你在搜索“how long is a football bame”时(bame是一个医学名词),搜索引擎会提示你是否希望搜索"how long is a football game", 这是因为根据语言模型计算出“how long is a football bame”的概率很低,而与bame近似的,可能引起错误的词中,game会使该句生成的概率最大。 + +对语言模型的目标概率$P(w_1, ..., w_T)$,如果假设文本中每个词都是相互独立的,则整句话的联合概率可以表示为其中所有词语条件概率的乘积,即: + + +

+
+

+ + +然而我们知道语句中的每个词出现的概率都与其前面的词紧密相关, 所以实际上通常用条件概率表示语言模型: + +

+
+

+ + + +### N-gram neural model + +在计算语言学中,n-gram是一种重要的文本表示方法,表示一个文本中连续的n个项。基于具体的应用场景,每一项可以是一个字母、单词或者音节。 n-gram模型也是统计语言模型中的一种重要方法,用n-gram训练语言模型时,一般用每个n-gram的历史n-1个词语组成的内容来预测第n个词。 + +Yoshua Bengio等科学家就于2003年在著名论文 Neural Probabilistic Language Models \[[1](#参考文献)\] 中介绍如何学习一个神经元网络表示的词向量模型。文中的神经概率语言模型(Neural Network Language Model,NNLM)通过一个线性映射和一个非线性隐层连接,同时学习了语言模型和词向量,即通过学习大量语料得到词语的向量表达,通过这些向量得到整个句子的概率。因所有的词语都用一个低维向量来表示,用这种方法学习语言模型可以克服维度灾难(curse of dimensionality)。注意:由于“神经概率语言模型”说法较为泛泛,我们在这里不用其NNLM的本名,考虑到其具体做法,本文中称该模型为N-gram neural model。 + +我们在上文中已经讲到用条件概率建模语言模型,即一句话中第$t$个词的概率和该句话的前$t-1$个词相关。可实际上越远的词语其实对该词的影响越小,那么如果考虑一个n-gram, 每个词都只受其前面`n-1`个词的影响,则有: + +

+
+

+ + +给定一些真实语料,这些语料中都是有意义的句子,N-gram模型的优化目标则是最大化目标函数: + +

+
+

+ +其中$f(w_t, w_{t-1}, ..., w_{t-n+1})$表示根据历史n-1个词得到当前词$w_t$的条件概率,$R(\theta)$表示参数正则项。 + +

+
+ 图2. N-gram神经网络模型 +

+ +图2展示了N-gram神经网络模型,从下往上看,该模型分为以下几个部分: + - 对于每个样本,模型输入$w_{t-n+1},...w_{t-1}$, 输出句子第t个词在字典中`|V|`个词上的概率分布。 + + 每个输入词$w_{t-n+1},...w_{t-1}$首先通过映射矩阵映射到词向量$C(w_{t-n+1}),...C(w_{t-1})$。 + + - 然后所有词语的词向量拼接成一个大向量,并经过一个非线性映射得到历史词语的隐层表示: + +

+
+

+ + 其中,$x$为所有词语的词向量拼接成的大向量,表示文本历史特征;$\theta$、$U$、$b_1$、$b_2$和$W$分别为词向量层到隐层连接的参数。$g$表示未经归一化的所有输出单词概率,$g_i$表示未经归一化的字典中第$i$个单词的输出概率。 + + - 根据softmax的定义,通过归一化$g_i$, 生成目标词$w_t$的概率为: + +

+
+

+ + - 整个网络的损失值(cost)为多类分类交叉熵,用公式表示为 + +

+
+

+ + 其中$y_k^i$表示第$i$个样本第$k$类的真实标签(0或1),$\text{softmax}(g_k^i)$表示第i个样本第k类softmax输出的概率。 + + +### Continuous Bag-of-Words model(CBOW) + +CBOW模型通过一个词的上下文(各N个词)预测当前词。当N=2时,模型如下图所示: + +

+
+ 图3. CBOW模型 +

+ +具体来说,不考虑上下文的词语输入顺序,CBOW是用上下文词语的词向量的均值来预测当前词。即: + + +

+
+

+ +其中$x_t$为第$t$个词的词向量,分类分数(score)向量 $z=U*context$,最终的分类$y$采用softmax,损失函数采用多类分类交叉熵。 + +### Skip-gram model + +CBOW的好处是对上下文词语的分布在词向量上进行了平滑,去掉了噪声,因此在小数据集上很有效。而Skip-gram的方法中,用一个词预测其上下文,得到了当前词上下文的很多样本,因此可用于更大的数据集。 + +

+
+ 图4. Skip-gram模型 +

+ +如上图所示,Skip-gram模型的具体做法是,将一个词的词向量映射到$2n$个词的词向量($2n$表示当前输入词的前后各$n$个词),然后分别通过softmax得到这$2n$个词的分类损失值之和。 + + +## 数据准备 + +### 数据介绍 + +本教程使用Penn Treebank (PTB)(经Tomas Mikolov预处理过的版本)数据集。PTB数据集较小,训练速度快,应用于Mikolov的公开语言模型训练工具\[[2](#参考文献)\]中。其统计情况如下: + +

+ + + + + + + + + + + + + + + + +
训练数据验证数据测试数据
ptb.train.txtptb.valid.txtptb.test.txt
42068句3370句3761句
+

+ + +### 数据预处理 + +本章训练的是5-gram模型,表示在PaddlePaddle训练时,每条数据的前4个词用来预测第5个词。PaddlePaddle提供了对应PTB数据集的python包`paddle.dataset.imikolov`,自动做数据的下载与预处理,方便大家使用。 + +预处理会把数据集中的每一句话前后加上开始符号``以及结束符号``。然后依据窗口大小(本教程中为5),从头到尾每次向右滑动窗口并生成一条数据。 + +如"I have a dream that one day" 一句提供了5条数据: + +```text + I have a dream +I have a dream that +have a dream that one +a dream that one day +dream that one day +``` + +最后,每个输入会按其单词次在字典里的位置,转化成整数的索引序列,作为PaddlePaddle的输入。 + + +## 编程实现 + +本配置的模型结构如下图所示: + +

+
+ 图5. 模型配置中的N-gram神经网络模型 +

+ +首先,加载所需要的包: + +```python + +from __future__ import print_function + +import paddle +import paddle.fluid as fluid +import six +import numpy +import math + +``` + +然后,定义参数: + +```python +EMBED_SIZE = 32 # embedding维度 +HIDDEN_SIZE = 256 # 隐层大小 +N = 5 # ngram大小,这里固定取5 +BATCH_SIZE = 100 # batch大小 +PASS_NUM = 100 # 训练轮数 + +use_cuda = False # 如果用GPU训练,则设置为True + +word_dict = paddle.dataset.imikolov.build_dict() +dict_size = len(word_dict) +``` + +更大的`BATCH_SIZE`将使得训练更快收敛,但也会消耗更多内存。由于词向量计算规模较大,如果环境允许,请开启使用GPU进行训练,能更快得到结果。 +不同于之前的PaddlePaddle v2版本,在新的Fluid版本里,我们不必再手动计算词向量。PaddlePaddle提供了一个内置的方法`fluid.embedding`,我们就可以直接用它来构造 N-gram 神经网络。 + +- 我们来定义我们的 N-gram 神经网络结构。这个结构在训练和预测中都会使用到。因为词向量比较稀疏,我们传入参数 `is_sparse == True`, 可以加速稀疏矩阵的更新。 + +```python +def inference_program(words, is_sparse): + + embed_first = fluid.embedding( + input=words[0], + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=is_sparse, + param_attr='shared_w') + embed_second = fluid.embedding( + input=words[1], + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=is_sparse, + param_attr='shared_w') + embed_third = fluid.embedding( + input=words[2], + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=is_sparse, + param_attr='shared_w') + embed_fourth = fluid.embedding( + input=words[3], + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=is_sparse, + param_attr='shared_w') + + concat_embed = fluid.layers.concat( + input=[embed_first, embed_second, embed_third, embed_fourth], axis=1) + hidden1 = fluid.layers.fc(input=concat_embed, + size=HIDDEN_SIZE, + act='sigmoid') + predict_word = fluid.layers.fc(input=hidden1, size=dict_size, act='softmax') + return predict_word +``` + +- 基于以上的神经网络结构,我们可以如下定义我们的训练方法 + +```python +def train_program(predict_word): + # 'next_word'的定义必须要在inference_program的声明之后, + # 否则train program输入数据的顺序就变成了[next_word, firstw, secondw, + # thirdw, fourthw], 这是不正确的. + next_word = fluid.data(name='nextw', shape=[None, 1], dtype='int64') + cost = fluid.layers.cross_entropy(input=predict_word, label=next_word) + avg_cost = fluid.layers.mean(cost) + return avg_cost + +def optimizer_func(): + return fluid.optimizer.AdagradOptimizer( + learning_rate=3e-3, + regularization=fluid.regularizer.L2DecayRegularizer(8e-4)) + +``` + +- 现在我们可以开始训练啦。如今的版本较之以前就简单了许多。我们有现成的训练和测试集:`paddle.dataset.imikolov.train()`和`paddle.dataset.imikolov.test()`。两者都会返回一个读取器。在PaddlePaddle中,读取器是一个Python的函数,每次调用,会读取下一条数据。它是一个Python的generator。 + +`paddle.batch` 会读入一个读取器,然后输出一个批次化了的读取器。我们还可以在训练过程中输出每个步骤,批次的训练情况。 + +```python +def train(if_use_cuda, params_dirname, is_sparse=True): + place = fluid.CUDAPlace(0) if if_use_cuda else fluid.CPUPlace() + + train_reader = paddle.batch( + paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE) + test_reader = paddle.batch( + paddle.dataset.imikolov.test(word_dict, N), BATCH_SIZE) + + first_word = fluid.data(name='firstw', shape=[None, 1], dtype='int64') + second_word = fluid.data(name='secondw', shape=[None, 1], dtype='int64') + third_word = fluid.data(name='thirdw', shape=[None, 1], dtype='int64') + forth_word = fluid.data(name='fourthw', shape=[None, 1], dtype='int64') + next_word = fluid.data(name='nextw', shape=[None, 1], dtype='int64') + + word_list = [first_word, second_word, third_word, forth_word, next_word] + feed_order = ['firstw', 'secondw', 'thirdw', 'fourthw', 'nextw'] + + main_program = fluid.default_main_program() + star_program = fluid.default_startup_program() + + predict_word = inference_program(word_list, is_sparse) + avg_cost = train_program(predict_word) + test_program = main_program.clone(for_test=True) + + optimizer = optimizer_func() + optimizer.minimize(avg_cost) + + exe = fluid.Executor(place) + + def train_test(program, reader): + count = 0 + feed_var_list = [ + program.global_block().var(var_name) for var_name in feed_order + ] + feeder_test = fluid.DataFeeder(feed_list=feed_var_list, place=place) + test_exe = fluid.Executor(place) + accumulated = len([avg_cost]) * [0] + for test_data in reader(): + avg_cost_np = test_exe.run( + program=program, + feed=feeder_test.feed(test_data), + fetch_list=[avg_cost]) + accumulated = [ + x[0] + x[1][0] for x in zip(accumulated, avg_cost_np) + ] + count += 1 + return [x / count for x in accumulated] + + def train_loop(): + step = 0 + feed_var_list_loop = [ + main_program.global_block().var(var_name) for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list=feed_var_list_loop, place=place) + exe.run(star_program) + for pass_id in range(PASS_NUM): + for data in train_reader(): + avg_cost_np = exe.run( + main_program, feed=feeder.feed(data), fetch_list=[avg_cost]) + + if step % 10 == 0: + outs = train_test(test_program, test_reader) + + print("Step %d: Average Cost %f" % (step, outs[0])) + + # 整个训练过程要花费几个小时,如果平均损失低于5.8, + # 我们就认为模型已经达到很好的效果可以停止训练了。 + # 注意5.8是一个相对较高的值,为了获取更好的模型,可以将 + # 这里的阈值设为3.5,但训练时间也会更长。 + if outs[0] < 5.8: + if params_dirname is not None: + fluid.io.save_inference_model(params_dirname, [ + 'firstw', 'secondw', 'thirdw', 'fourthw' + ], [predict_word], exe) + return + step += 1 + if math.isnan(float(avg_cost_np[0])): + sys.exit("got NaN loss, training failed.") + + raise AssertionError("Cost is too large {0:2.2}".format(avg_cost_np[0])) + + train_loop() +``` + +- `train_loop`将会开始训练。期间打印训练过程的日志如下: + +```text +Step 0: Average Cost 7.337213 +Step 10: Average Cost 6.136128 +Step 20: Average Cost 5.766995 +... +``` + + +## 模型应用 +在模型训练后,我们可以用它做一些预测。 + +### 预测下一个词 +我们可以用我们训练过的模型,在得知之前的 N-gram 后,预测下一个词。 + +```python +def infer(use_cuda, params_dirname=None): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + exe = fluid.Executor(place) + + inference_scope = fluid.core.Scope() + with fluid.scope_guard(inference_scope): + # 使用fluid.io.load_inference_model获取inference program, + # feed变量的名称feed_target_names和从scope中fetch的对象fetch_targets + [inferencer, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(params_dirname, exe) + + # 设置输入,用四个LoDTensor来表示4个词语。这里每个词都是一个id, + # 用来查询embedding表获取对应的词向量,因此其形状大小是[1]。 + # recursive_sequence_lengths设置的是基于长度的LoD,因此都应该设为[[1]] + # 注意recursive_sequence_lengths是列表的列表 + data1 = numpy.asarray([[211]], dtype=numpy.int64) # 'among' + data2 = numpy.asarray([[6]], dtype=numpy.int64) # 'a' + data3 = numpy.asarray([[96]], dtype=numpy.int64) # 'group' + data4 = numpy.asarray([[4]], dtype=numpy.int64) # 'of' + lod = numpy.asarray([[1]], dtype=numpy.int64) + + first_word = fluid.create_lod_tensor(data1, lod, place) + second_word = fluid.create_lod_tensor(data2, lod, place) + third_word = fluid.create_lod_tensor(data3, lod, place) + fourth_word = fluid.create_lod_tensor(data4, lod, place) + + assert feed_target_names[0] == 'firstw' + assert feed_target_names[1] == 'secondw' + assert feed_target_names[2] == 'thirdw' + assert feed_target_names[3] == 'fourthw' + + # 构造feed词典 {feed_target_name: feed_target_data} + # 预测结果包含在results之中 + results = exe.run( + inferencer, + feed={ + feed_target_names[0]: first_word, + feed_target_names[1]: second_word, + feed_target_names[2]: third_word, + feed_target_names[3]: fourth_word + }, + fetch_list=fetch_targets, + return_numpy=False) + + print(numpy.array(results[0])) + most_possible_word_index = numpy.argmax(results[0]) + print(most_possible_word_index) + print([ + key for key, value in six.iteritems(word_dict) + if value == most_possible_word_index + ][0]) +``` + +由于词向量矩阵本身比较稀疏,训练的过程如果要达到一定的精度耗时会比较长。为了能简单看到效果,教程只设置了经过很少的训练就结束并得到如下的预测。我们的模型预测 `among a group of` 的下一个词是`the`。这比较符合文法规律。如果我们训练时间更长,比如几个小时,那么我们会得到的下一个预测是 `workers`。预测输出的格式如下所示: + +```text +[[0.03768077 0.03463154 0.00018074 ... 0.00022283 0.00029888 0.02967956]] +0 +the +``` +其中第一行表示预测词在词典上的概率分布,第二行表示概率最大的词对应的id,第三行表示概率最大的词。 + +整个程序的入口很简单: + +```python +def main(use_cuda, is_sparse): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + + params_dirname = "word2vec.inference.model" + + train( + if_use_cuda=use_cuda, + params_dirname=params_dirname, + is_sparse=is_sparse) + + infer(use_cuda=use_cuda, params_dirname=params_dirname) + + +main(use_cuda=use_cuda, is_sparse=True) +``` + + +## 总结 +本章中,我们介绍了词向量、语言模型和词向量的关系、以及如何通过训练神经网络模型获得词向量。在信息检索中,我们可以根据向量间的余弦夹角,来判断query和文档关键词这二者间的相关性。在句法分析和语义分析中,训练好的词向量可以用来初始化模型,以得到更好的效果。在文档分类中,有了词向量之后,可以用聚类的方法将文档中同义词进行分组,也可以用 N-gram 来预测下一个词。希望大家在本章后能够自行运用词向量进行相关领域的研究。 + + +## 参考文献 +1. Bengio Y, Ducharme R, Vincent P, et al. [A neural probabilistic language model](http://www.jmlr.org/papers/volume3/bengio03a/bengio03a.pdf)[J]. journal of machine learning research, 2003, 3(Feb): 1137-1155. +2. Mikolov T, Kombrink S, Deoras A, et al. [Rnnlm-recurrent neural network language modeling toolkit](http://www.fit.vutbr.cz/~imikolov/rnnlm/rnnlm-demo.pdf)[C]//Proc. of the 2011 ASRU Workshop. 2011: 196-201. +3. Mikolov T, Chen K, Corrado G, et al. [Efficient estimation of word representations in vector space](https://arxiv.org/pdf/1301.3781.pdf)[J]. arXiv preprint arXiv:1301.3781, 2013. +4. Maaten L, Hinton G. [Visualizing data using t-SNE](https://lvdmaaten.github.io/publications/papers/JMLR_2008.pdf)[J]. Journal of Machine Learning Research, 2008, 9(Nov): 2579-2605. +5. https://en.wikipedia.org/wiki/Singular_value_decomposition + +
+知识共享许可协议
本教程PaddlePaddle 创作,采用 知识共享 署名-相同方式共享 4.0 国际 许可协议进行许可。 diff --git a/doc/paddle/user_guides/simple_case/word2vec/README.md b/doc/paddle/user_guides/simple_case/word2vec/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6393aec48987aaa61f94216858c5da5f67a7fc68 --- /dev/null +++ b/doc/paddle/user_guides/simple_case/word2vec/README.md @@ -0,0 +1,491 @@ +# Word Vector + +The source code of this tutorial is in [book/word2vec](https://github.com/PaddlePaddle/book/tree/develop/04.word2vec). For new users, please refer to [Running This Book](https://github.com/PaddlePaddle/book/blob/develop/README.md#running-the-book) . + +## Background + +In this chapter, we'll introduce the vector representation of words, also known as word embedding. Word vector is a common operation in natural language processing. It is a common technology underlying Internet services such as search engines, advertising systems, and recommendation systems. + +In these Internet services, we often compare the correlation between two words or two paragraphs of text. In order to make such comparisons, we often have to express words in a way that is suitable for computer processing. The most natural way is probably the vector space model.In this way, each word is represented as a one-hot vector whose length is the dictionary size, and each dimension corresponds to each word in a dictionary, except that the value in the corresponding dimension of the word is 1, other elements are 0. + +The One-hot vector is natural but has limitation. For example, in the internet advertising system, if the query entered by the user is "Mother's Day", the keyword of an advertisement is "Carnation". Although according to common sense, we know that there is a connection between these two words - Mother's Day should usually give the mother a bunch of carnations; but the distance between the two words corresponds to the one-hot vectors, whether it is Euclidean distance or cosine similarity, the two words are considered to be irrelevant due to their vector orthogonality. The root cause of this conclusion contradicting us is that the amount of information in each word itself is too small. Therefore, just giving two words is not enough for us to accurately determine whether they are relevant. To accurately calculate correlations, we need more information—knowledge from a large amount of data through machine learning methods. + +In the field of machine learning, all kinds of "knowledge" are represented by various models, and the word embedding model is one of them. A one-hot vector can be mapped to a lower-dimensional embedding vector by the word embedding model, such as $embedding (Mother's day) = [0.3, 4.2, -1.5, ...], embedding (carnation) = [0.2, 5.6, -2.3, ...]$. In this representation of the embedding vector to which it is mapped, it is desirable that the word vectors corresponding to the similar words on the two semantics (or usages) are "more like", such that the cosine similarity of the corresponding word vectors of "Mother's Day" and "Carnation" is no longer zero. + +The word embedding model can be a probability model, a co-occurrence matrix model, or a neural network model. Before implementing neural networks to calculate the embedding vector, the traditional method is to count the co-occurrence matrix $X$ of a word. $X$ is a matrix of $|V| \times |V|$ size, $X_{ij}$ means that in all corpora, The number of words appearing simultaneously with the i-th word and the j-th word in the vocabulary $V$(vocabulary), $|V|$ is the size of the vocabulary. Do matrix decomposition for $X$ (such as singular value decomposition, Singular Value Decomposition \[[5](#references)\]), and the result $U$ is treated as the embedding vector for all words: + +$$X = USV^T$$ + +But such traditional method has many problems: + +1) Since many words do not appear, the matrix is extremely sparse, so additional processing of the word frequency is needed to achieve a good matrix decomposition effect; + +2) The matrix is very large and the dimensions are too high (usually up to $10^6 \times 10^6$); + +3) You need to manually remove the stop words (such as although, a, ...), otherwise these frequently occurring words will also affect the effect of matrix decomposition. + +The neural-network-based model does not need to calculate and store a large table that is statistically generated on the whole corpus, but obtains the word vector by learning the semantic information, so the problem above can be well solved. In this chapter, we will show the details of training word vectors based on neural networks and how to train a word embedding model with PaddlePaddle. + + +## Result Demo + +In this chapter, after the embedding vector is trained, we can use the data visualization algorithm t-SNE\[[4](#references)\] to draw the projection of the word features in two dimensions (as shown below). As can be seen from the figure, semantically related words (such as a, the, these; big, huge) are very close in projection, and semantic unrelated words (such as say, business; decision, japan) are far away from the projection. + +

+
+ Figure 1. Two-dimensional projection of a word vector +

+ +On the other hand, we know that the cosine of two vectors is in the interval of $[-1,1]$: two identical vector cosines are 1, and the cosine value between two mutually perpendicular vectors is 0, The vector cosine of the opposite direction is -1, which the correlation is proportional to the magnitude of the cosine. So we can also calculate the cosine similarity of two word vectors: + +``` + +please input two words: big huge +Similarity: 0.899180685161 + +please input two words: from company +Similarity: -0.0997506977351 + +``` + +The results above can be obtained by running `calculate_dis.py`, loading the words in the dictionary and the corresponding training feature results. We will describe the usage for details in [model application](#model application). + + +## Overview of Models + +Here we introduce three models of training word vectors: N-gram model, CBOW model and Skip-gram model. Their central idea is to get the probability of a word appearing through the context. For the N-gram model, we will first introduce the concept of the language model. In the section [training model](#training model), we'll tutor you to implement it with PaddlePaddle. The latter two models are the most famous neuron word vector models in recent years, developed by Tomas Mikolov in Google \[[3](#references)\], although they are very simple, but the training effect is very good. + +### Language Model + +Before introducing the word embedding model, let us introduce a concept: the language model. +The language model is intended to model the joint probability function $P(w_1, ..., w_T)$ of a sentence, where $w_i$ represents the ith word in the sentence. The goal of the language model isn that the model gives a high probability to meaningful sentences and a small probability to meaningless sentences.Such models can be applied to many fields, such as machine translation, speech recognition, information retrieval, part-of-speech tagging, handwriting recognition, etc., All of which hope to obtain the probability of a continuous sequence. Take information retrieval as an example, when you search for "how long is a football bame" (bame is a medical term), the search engine will prompt you if you want to search for "how long is a football game", because the probability of calculating "how long is a football bame" is very low, and the word is similar to bame, which may cause errors, the game will maximize the probability of generating the sentence. + +For the target probability of the language model $P(w_1, ..., w_T)$, if it is assumed that each word in the text is independent, the joint probability of the whole sentence can be expressed as the product of the conditional probabilities of all the words. which is: + +$$P(w_1, ..., w_T) = \prod_{t=1}^TP(w_t)$$ + +However, we know that the probability of each word in the statement is closely related to the word in front of it, so in fact, the language model is usually represented by conditional probability: + +$$P(w_1, ..., w_T) = \prod_{t=1}^TP(w_t | w_1, ... , w_{t-1})$$ + + + +### N-gram neural model + +In computational linguistics, n-gram is an important text representation method that represents a continuous n items in a text. Each item can be a letter, word or syllable based on the specific application scenario. The n-gram model is also an important method in the statistical language model. When n-gram is used to train the language model, the nth word is generally predicted by the content of the n-1 words of each n-gram. + +Scientists such as Yoshua Bengio introduced how to learn a word vector model of a neural network representation in the famous paper Neural Probabilistic Language Models \[[1](#references)\ in 2003. The Neural Network Language Model (NNLM) in this paper connects the linear model and a nonlinear hidden layer. It learns the language model and the word vector simultaneously, that is, by learning a large number of corpora to obtain the vector expression of the words, and the probability of the entire sentence is obtained by using these vectors. Since all words are represented by a low-dimensional vector, learning the language model in this way can overcome the curse of dimensionality. +Note: Because the "Neural Network Language Model" is more general, we do not use the real name of NNLM here, considering its specific practice, this model here is called N-gram neural model. + +We have already mentioned above using the conditional probability language model, that is, the probability of the $t$ word in a sentence is related to the first $t-1$ words of the sentence. The farther the word actually has the smaller effect on the word, then if you consider an n-gram, each word is only affected by the preceding `n-1` words, then: + +$$P(w_1, ..., w_T) = \prod_{t=n}^TP(w_t|w_{t-1}, w_{t-2}, ..., w_{t-n+1 })$$ + +Given some real corpora, these corpora are meaningful sentences, and the optimization goal of the N-gram model is to maximize the objective function: + +$$\frac{1}{T}\sum_t f(w_t, w_{t-1}, ..., w_{t-n+1};\theta) + R(\theta)$$ + +Where $f(w_t, w_{t-1}, ..., w_{t-n+1})$ represents the conditional probability of getting the current word $w_t$ based on historical n-1 words, $R(\theta )$ represents a parameter regularization item. + +

+       
+       Figure 2. N-gram neural network model +

+ +Figure 2 shows the N-gram neural network model. From the bottom up, the model is divided into the following parts: + - For each sample, the model enters $w_{t-n+1},...w_{t-1}$, and outputs the probability distribution of the t-th word in the dictionary on the `|V|` words. + + Each input word $w_{t-n+1},...w_{t-1}$ first maps to the word vector $C(w_{t-n+1}),...W_{t-1})$ by the mapping matrix. + + - Then the word vectors of all words are spliced into a large vector, and a hidden layer representation of the historical words is obtained through a non-linear mapping: + + $$g=Utanh(\theta^Tx + b_1) + Wx + b_2$$ + +Among them, $x$ is a large vector of all words, representing text history features; $\theta$, $U$, $b_1$, $b_2$, and $W$ are respectively parameters for the word vector layer to the hidden layer connection. $g$ represents the probability of all output words that are not normalized, and $g_i$ represents the output probability of the $i$ word in the unnormalized dictionary. + + - According to the definition of softmax, by normalizing $g_i$, the probability of generating the target word $w_t$ is: + + $$P(w_t | w_1, ..., w_{t-n+1}) = \frac{e^{g_{w_t}}}{\sum_i^{|V|} e^{g_i}}$$ + + - The loss value of the entire network is the multi-class classification cross entropy, which is expressed as + + $$J(\theta) = -\sum_{i=1}^N\sum_{k=1}^{|V|}y_k^{i}log(softmax(g_k^i))$$ + + where $y_k^i$ represents the real label (0 or 1) of the $i$ sample of the $k$ class, and $softmax(g_k^i)$ represents the probability of the kth softmax output of the i-th sample. + + + +### Continuous Bag-of-Words model(CBOW) + +The CBOW model predicts the current word through the context of a word (each N words). When N=2, the model is shown below: + +

+    
+    Figure 3. CBOW model +

+ +Specifically, regardless of the contextual word input order, CBOW uses the mean of the word vectors of the context words to predict the current word. which is: + +$$context = \frac{x_{t-1} + x_{t-2} + x_{t+1} + x_{t+2}}{4}$$ + +Where $x_t$ is the word vector of the $t$th word, the score vector (score) $z=U\*context$, the final classification $y$ uses softmax, and the loss function uses multi-class classification cross entropy. + +### Skip-gram model + +The benefit of CBOW is that the distribution of contextual words is smoothed over the word vector, removing noise. Therefore it is very effective on small data sets. In the Skip-gram method, a word is used to predict its context, and many samples of the current word context are obtained, so it can be used for a larger data set. + +

+    
+    Figure 4. Skip-gram model +

+ +As shown in the figure above, the specific method of the Skip-gram model is to map the word vector of a word to the word vector of $2n$ words ($2n$ represents the $n$ words before and after the input word), and then obtained the sum of the classification loss values of the $2n$ words by softmax. + + +## Data Preparation + +### Data Introduction + +This tutorial uses the Penn Treebank (PTB) (pre-processed version of Tomas Mikolov) dataset. The PTB data set is small and the training speed is fast. It is applied to Mikolov's open language model training tool \[[2](#references)\]. Its statistics are as follows: + +

+ +     +         +         +         +     +     +         +         +         +     +     +         +         +         +     +
Training dataVerify dataTest data
ptb.train.txtptb.valid.txtptb.test.txt
42068 sentences3370 sentences3761 sentence
+

+ + +### Data Preprocessing + +This chapter trains the 5-gram model, which means that the first 4 words of each piece of data are used to predict the 5th word during PaddlePaddle training. PaddlePaddle provides the python package `paddle.dataset.imikolov` corresponding to the PTB dataset, which automatically downloads and preprocesses the data for your convenience. + +Preprocessing adds the start symbol `` and the end symbol `` to each sentence in the data set. Then, depending on the window size (5 in this tutorial), slide the window to the right each time from start to end and generate a piece of data. + +For example, "I have a dream that one day" provides 5 pieces of data: + +```text + I have a dream +I have a dream that +Have a dream that one +a dream that one day +Dream that one day +``` + +Finally, based on the position of its word in the dictionary, each input is converted to an index sequence of integers as the input to PaddlePaddle. + + +## Program the Model + +The model structure of this configuration is shown below: + +

+    
+    Figure 5. N-gram neural network model in model configuration +

+ +First, load packages: + +```python + +from __future__ import print_function + +import paddle +import paddle.fluid as fluid +import six +import numpy +import math + +``` + +Then, define the parameters: + +```python +EMBED_SIZE = 32 # embedding dimensions +HIDDEN_SIZE = 256 # hidden layer size +N = 5 # ngram size, here fixed 5 +BATCH_SIZE = 100 # batch size +PASS_NUM = 100 # Training rounds + +use_cuda = False # Set to True if trained with GPU + +word_dict = paddle.dataset.imikolov.build_dict() +dict_size = len(word_dict) +``` + +A larger `BATCH_SIZE` will make the training converge faster, but it will also consume more memory. Since the word vector calculation is large, if the environment allows, please turn on the GPU for training, and get results faster. +Unlike the previous PaddlePaddle v2 version, in the new Fluid version, we don't have to manually calculate the word vector. PaddlePaddle provides a built-in method `fluid.embedding`, which we can use directly to construct an N-gram neural network. + +- Let's define our N-gram neural network structure. This structure is used in both training and predicting. Because the word vector is sparse, we pass the parameter `is_sparse == True` to speed up the update of the sparse matrix. + +```python +def inference_program(words, is_sparse): + + embed_first = fluid.embedding( + input=words[0], + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=is_sparse, + param_attr='shared_w') + embed_second = fluid.embedding( + input=words[1], + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=is_sparse, + param_attr='shared_w') + embed_third = fluid.embedding( + input=words[2], + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=is_sparse, + param_attr='shared_w') + embed_fourth = fluid.embedding( + input=words[3], + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=is_sparse, + param_attr='shared_w') + + concat_embed = fluid.layers.concat( + input=[embed_first, embed_second, embed_third, embed_fourth], axis=1) + hidden1 = fluid.layers.fc(input=concat_embed, + size=HIDDEN_SIZE, + act='sigmoid') + predict_word = fluid.layers.fc(input=hidden1, size=dict_size, act='softmax') + return predict_word +``` + +- Based on the neural network structure above, we can define our training method as follows: + +```python +def train_program(predict_word): + # The definition of'next_word' must be after the declaration of inference_program. + # Otherwise the sequence of the train program input data becomes [next_word, firstw, secondw, + #thirdw, fourthw], This is not true. + next_word = fluid.data(name='nextw', shape=[None, 1], dtype='int64') + cost = fluid.layers.cross_entropy(input=predict_word, label=next_word) + avg_cost = fluid.layers.mean(cost) + return avg_cost + +def optimizer_func(): + return fluid.optimizer.AdagradOptimizer( + learning_rate=3e-3, + regularization=fluid.regularizer.L2DecayRegularizer(8e-4)) + +``` + +- Now we can start training. This version is much simpler than before. We have ready-made training and test sets: `paddle.dataset.imikolov.train()` and `paddle.dataset.imikolov.test()`. Both will return a reader. In PaddlePaddle, the reader is a Python function that reads the next piece of data when called each time . It is a Python generator. + +`fluid.io.batch` will read in a reader and output a batched reader. We can also output the training of each step and batch during the training process. + +```python +def train(if_use_cuda, params_dirname, is_sparse=True): + place = fluid.CUDAPlace(0) if if_use_cuda else fluid.CPUPlace() + + train_reader = fluid.io.batch( + paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE) + test_reader = fluid.io.batch( + paddle.dataset.imikolov.test(word_dict, N), BATCH_SIZE) + + first_word = fluid.data(name='firstw', shape=[None, 1], dtype='int64') + second_word = fluid.data(name='secondw', shape=[None, 1], dtype='int64') + third_word = fluid.data(name='thirdw', shape=[None, 1], dtype='int64') + forth_word = fluid.data(name='fourthw', shape=[None, 1], dtype='int64') + next_word = fluid.data(name='nextw', shape=[None, 1], dtype='int64') + + word_list = [first_word, second_word, third_word, forth_word, next_word] + feed_order = ['firstw', 'secondw', 'thirdw', 'fourthw', 'nextw'] + + main_program = fluid.default_main_program() + star_program = fluid.default_startup_program() + + predict_word = inference_program(word_list, is_sparse) + avg_cost = train_program(predict_word) + test_program = main_program.clone(for_test=True) + + optimizer = optimizer_func() + optimizer.minimize(avg_cost) + + exe = fluid.Executor(place) + + def train_test(program, reader): + count = 0 + feed_var_list = [ + program.global_block().var(var_name) for var_name in feed_order + ] + feeder_test = fluid.DataFeeder(feed_list=feed_var_list, place=place) + test_exe = fluid.Executor(place) + accumulated = len([avg_cost]) * [0] + for test_data in reader(): + avg_cost_np = test_exe.run( + program=program, + feed=feeder_test.feed(test_data), + fetch_list=[avg_cost]) + accumulated = [ + x[0] + x[1][0] for x in zip(accumulated, avg_cost_np) + ] + count += 1 + return [x / count for x in accumulated] + + def train_loop(): + step = 0 + feed_var_list_loop = [ + main_program.global_block().var(var_name) for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list=feed_var_list_loop, place=place) + exe.run(star_program) + for pass_id in range(PASS_NUM): + for data in train_reader(): + avg_cost_np = exe.run( + main_program, feed=feeder.feed(data), fetch_list=[avg_cost]) + + if step % 10 == 0: + outs = train_test(test_program, test_reader) + + print("Step %d: Average Cost %f" % (step, outs[0])) + + # The entire training process takes several hours if the average loss is less than 5.8, + # We think that the model has achieved good results and can stop training. + # Note 5.8 is a relatively high value, in order to get a better model, you can + # set the threshold here to be 3.5, but the training time will be longer. + if outs[0] < 5.8: + if params_dirname is not None: + fluid.io.save_inference_model(params_dirname, [ + 'firstw', 'secondw', 'thirdw', 'fourthw' + ], [predict_word], exe) + return + step += 1 + if math.isnan(float(avg_cost_np[0])): + sys.exit("got NaN loss, training failed.") + + raise AssertionError("Cost is too large {0:2.2}".format(avg_cost_np[0])) + + train_loop() +``` + +- `train_loop` will start training. The log of the training process during the period is as follows: + +```text +Step 0: Average Cost 7.337213 +Step 10: Average Cost 6.136128 +Step 20: Average Cost 5.766995 +... +``` + + +## Model Application +After the model is trained, we can use it to make some predictions. + +### Predict the next word +We can use our trained model to predict the next word after learning the previous N-gram. + +```python +def infer(use_cuda, params_dirname=None): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + exe = fluid.Executor(place) + + inference_scope = fluid.core.Scope() + with fluid.scope_guard(inference_scope): + #Get the inference program using fluid.io.load_inference_model, + #feed variable name by feed_target_names and fetch fetch_targets from scope + [inferencer, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(params_dirname, exe) + + # Set the input and use 4 LoDTensor to represent 4 words. Each word here is an id, + # Used to query the embedding table to get the corresponding word vector, so its shape size is [1]. + # recursive_sequence_lengths sets the length based on LoD, so it should all be set to [[1]] + # Note that recursive_sequence_lengths is a list of lists + data1 = numpy.asarray([[211]], dtype=numpy.int64) # 'among' + data2 = numpy.asarray([[6]], dtype=numpy.int64) # 'a' + data3 = numpy.asarray([[96]], dtype=numpy.int64) # 'group' + data4 = numpy.asarray([[4]], dtype=numpy.int64) # 'of' + lod = numpy.asarray([[1]], dtype=numpy.int64) + + first_word = fluid.create_lod_tensor(data1, lod, place) + second_word = fluid.create_lod_tensor(data2, lod, place) + third_word = fluid.create_lod_tensor(data3, lod, place) + fourth_word = fluid.create_lod_tensor(data4, lod, place) + + assert feed_target_names[0] == 'firstw' + assert feed_target_names[1] == 'secondw' + assert feed_target_names[2] == 'thirdw' + assert feed_target_names[3] == 'fourthw' + + # Construct the feed dictionary {feed_target_name: feed_target_data} + # Prediction results are included in results + results = exe.run( + inferencer, + feed={ + feed_target_names[0]: first_word, + feed_target_names[1]: second_word, + feed_target_names[2]: third_word, + feed_target_names[3]: fourth_word + }, + fetch_list=fetch_targets, + return_numpy=False) + + print(numpy.array(results[0])) + most_possible_word_index = numpy.argmax(results[0]) + print(most_possible_word_index) + print([ + key for key, value in six.iteritems(word_dict) + if value == most_possible_word_index + ][0]) +``` + +Since the word vector matrix itself is relatively sparse, the training process takes a long time to reach a certain precision. In order to see the effect simply, the tutorial only sets up with a few rounds of training and ends with the following result. Our model predicts that the next word for `among a group of` is `the`. This is in line with the law of grammar. If we train for longer time, such as several hours, then the next predicted word we will get is `workers`. The format of the predicted output is as follows: + +```text +[[0.03768077 0.03463154 0.00018074 ... 0.00022283 0.00029888 0.02967956]] +0 +the +``` +The first line represents the probability distribution of the predicted word in the dictionary, the second line represents the id corresponding to the word with the highest probability, and the third line represents the word with the highest probability. + +The entrance to the entire program is simple: + +```python +def main(use_cuda, is_sparse): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + + params_dirname = "word2vec.inference.model" + + train( + if_use_cuda=use_cuda, + params_dirname=params_dirname, + is_sparse=is_sparse) + + infer(use_cuda=use_cuda, params_dirname=params_dirname) + + +main(use_cuda=use_cuda, is_sparse=True) +``` + +## Conclusion +In this chapter, we introduced word vectors, the relationship between language models and word vectors and how to obtain word vectors by training neural network models. In information retrieval, we can judge the correlation between query and document keywords based on the cosine value between vectors. In syntactic analysis and semantic analysis, trained word vectors can be used to initialize the model for better results. In the document classification, after the word vector, you can cluster to group synonyms in a document, or you can use N-gram to predict the next word. We hope that everyone can easily use the word vector to conduct research in related fields after reading this chapter. + + +## References +1. Bengio Y, Ducharme R, Vincent P, et al. [A neural probabilistic language model](http://www.jmlr.org/papers/volume3/bengio03a/bengio03a.pdf)[J]. journal of machine learning Research, 2003, 3(Feb): 1137-1155. +2. Mikolov T, Kombrink S, Deoras A, et al. [Rnnlm-recurrent neural network language modeling toolkit](http://www.fit.vutbr.cz/~imikolov/rnnlm/rnnlm-demo.pdf)[C ]//Proc. of the 2011 ASRU Workshop. 2011: 196-201. +3. Mikolov T, Chen K, Corrado G, et al. [Efficient estimation of word representations in vector space](https://arxiv.org/pdf/1301.3781.pdf)[J]. arXiv preprint arXiv:1301.3781, 2013 . +4. Maaten L, Hinton G. [Visualizing data using t-SNE](https://lvdmaaten.github.io/publications/papers/JMLR_2008.pdf)[J]. Journal of Machine Learning Research, 2008, 9(Nov ): 2579-2605. +5. https://en.wikipedia.org/wiki/Singular_value_decomposition + +
+知识共享许可协议
This tutorial is contributed by PaddlePaddle, and licensed under a Creative Commons Attribution-ShareAlike 4.0 International License. diff --git a/doc/paddle/user_guides/simple_case/word2vec/_ce.py b/doc/paddle/user_guides/simple_case/word2vec/_ce.py new file mode 100644 index 0000000000000000000000000000000000000000..eb95e993283c72bee4375267ad013489e1f5e9db --- /dev/null +++ b/doc/paddle/user_guides/simple_case/word2vec/_ce.py @@ -0,0 +1,37 @@ +### This file is only used for continuous evaluation test! +from __future__ import print_function +from __future__ import division +from __future__ import absolute_import +import os +import sys +sys.path.append(os.environ['ceroot']) +from kpi import CostKpi + +train_cost_kpi = CostKpi( + 'train_cost', 0.02, 0, actived=True, desc='train cost') +tracking_kpis = [train_cost_kpi] + + +def parse_log(log): + for line in log.split('\n'): + fs = line.strip().split('\t') + print(fs) + if len(fs) == 3 and fs[0] == 'kpis': + kpi_name = fs[1] + kpi_value = float(fs[2]) + yield kpi_name, kpi_value + + +def log_to_ce(log): + kpi_tracker = {} + for kpi in tracking_kpis: + kpi_tracker[kpi.name] = kpi + for (kpi_name, kpi_value) in parse_log(log): + print(kpi_name, kpi_value) + kpi_tracker[kpi_name].add_record(kpi_value) + kpi_tracker[kpi_name].persist() + + +if __name__ == '__main__': + log = sys.stdin.read() + log_to_ce(log) diff --git a/doc/paddle/user_guides/simple_case/word2vec/calculate_dis.py b/doc/paddle/user_guides/simple_case/word2vec/calculate_dis.py new file mode 100644 index 0000000000000000000000000000000000000000..a18e5ffee11a3178526255bc27ab4479583d408e --- /dev/null +++ b/doc/paddle/user_guides/simple_case/word2vec/calculate_dis.py @@ -0,0 +1,77 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Example: + python calculate_dis.py DICTIONARYTXT FEATURETXT + +Required arguments: + DICTIONARYTXT the dictionary generated in dataprovider + FEATURETXT the text format word feature, one line for one word +""" + +import numpy as np +from argparse import ArgumentParser + + +def load_dict(fdict): + words = [line.strip() for line in fdict.readlines()] + dictionary = dict(zip(words, xrange(len(words)))) + return dictionary + + +def load_emb(femb): + feaBank = [] + flag_firstline = True + for line in femb: + if flag_firstline: + flag_firstline = False + continue + fea = np.array([float(x) for x in line.strip().split(',')]) + normfea = fea * 1.0 / np.linalg.norm(fea) + feaBank.append(normfea) + return feaBank + + +def calcos(id1, id2, Fea): + f1 = Fea[id1] + f2 = Fea[id2] + return np.dot(f1.transpose(), f2) + + +def get_wordidx(w, Dict): + if w not in Dict: + print 'ERROR: %s not in the dictionary' % w + return -1 + return Dict[w] + + +if __name__ == '__main__': + parser = ArgumentParser() + parser.add_argument('dict', help='dictionary file') + parser.add_argument('fea', help='feature file') + args = parser.parse_args() + + with open(args.dict) as fdict: + word_dict = load_dict(fdict) + + with open(args.fea) as ffea: + word_fea = load_emb(ffea) + + while True: + w1, w2 = raw_input("please input two words: ").split() + w1_id = get_wordidx(w1, word_dict) + w2_id = get_wordidx(w2, word_dict) + if w1_id == -1 or w2_id == -1: + continue + print 'similarity: %s' % (calcos(w1_id, w2_id, word_fea)) diff --git a/doc/paddle/user_guides/simple_case/word2vec/format_convert.py b/doc/paddle/user_guides/simple_case/word2vec/format_convert.py new file mode 100644 index 0000000000000000000000000000000000000000..ddbff62a942d0249bbca49aabd07ef3276b2d15c --- /dev/null +++ b/doc/paddle/user_guides/simple_case/word2vec/format_convert.py @@ -0,0 +1,158 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Example: + python format_convert.py --b2t -i INPUT -o OUTPUT -d DIM + python format_convert.py --t2b -i INPUT -o OUTPUT + +Options: + -h, --help show this help message and exit + --b2t convert parameter file of embedding model from binary to text + --t2b convert parameter file of embedding model from text to binary + -i INPUT input parameter file name + -o OUTPUT output parameter file name + -d DIM dimension of parameter +""" +from optparse import OptionParser +import struct + + +def binary2text(input, output, paraDim): + """ + Convert a binary parameter file of embedding model to be a text file. + input: the name of input binary parameter file, the format is: + 1) the first 16 bytes is filehead: + version(4 bytes): version of paddle, default = 0 + floatSize(4 bytes): sizeof(float) = 4 + paraCount(8 bytes): total number of parameter + 2) the next (paraCount * 4) bytes is parameters, each has 4 bytes + output: the name of output text parameter file, for example: + 0,4,32156096 + -0.7845433,1.1937413,-0.1704215,... + 0.0000909,0.0009465,-0.0008813,... + ... + the format is: + 1) the first line is filehead: + version=0, floatSize=4, paraCount=32156096 + 2) other lines print the paramters + a) each line prints paraDim paramters splitted by ',' + b) there is paraCount/paraDim lines (embedding words) + paraDim: dimension of parameters + """ + fi = open(input, "rb") + fo = open(output, "w") + """ + """ + version, floatSize, paraCount = struct.unpack("iil", fi.read(16)) + newHead = ','.join([str(version), str(floatSize), str(paraCount)]) + print >> fo, newHead + + bytes = 4 * int(paraDim) + format = "%df" % int(paraDim) + context = fi.read(bytes) + line = 0 + + while context: + numbers = struct.unpack(format, context) + lst = [] + for i in numbers: + lst.append('%8.7f' % i) + print >> fo, ','.join(lst) + context = fi.read(bytes) + line += 1 + fi.close() + fo.close() + print "binary2text finish, total", line, "lines" + + +def get_para_count(input): + """ + Compute the total number of embedding parameters in input text file. + input: the name of input text file + """ + numRows = 1 + paraDim = 0 + with open(input) as f: + line = f.readline() + paraDim = len(line.split(",")) + for line in f: + numRows += 1 + return numRows * paraDim + + +def text2binary(input, output, paddle_head=True): + """ + Convert a text parameter file of embedding model to be a binary file. + input: the name of input text parameter file, for example: + -0.7845433,1.1937413,-0.1704215,... + 0.0000909,0.0009465,-0.0008813,... + ... + the format is: + 1) it doesn't have filehead + 2) each line stores the same dimension of parameters, + the separator is commas ',' + output: the name of output binary parameter file, the format is: + 1) the first 16 bytes is filehead: + version(4 bytes), floatSize(4 bytes), paraCount(8 bytes) + 2) the next (paraCount * 4) bytes is parameters, each has 4 bytes + """ + fi = open(input, "r") + fo = open(output, "wb") + + newHead = struct.pack("iil", 0, 4, get_para_count(input)) + fo.write(newHead) + + count = 0 + for line in fi: + line = line.strip().split(",") + for i in range(0, len(line)): + binary_data = struct.pack("f", float(line[i])) + fo.write(binary_data) + count += 1 + fi.close() + fo.close() + print "text2binary finish, total", count, "lines" + + +def main(): + """ + Main entry for running format_convert.py + """ + usage = "usage: \n" \ + "python %prog --b2t -i INPUT -o OUTPUT -d DIM \n" \ + "python %prog --t2b -i INPUT -o OUTPUT" + parser = OptionParser(usage) + parser.add_option( + "--b2t", + action="store_true", + help="convert parameter file of embedding model from binary to text") + parser.add_option( + "--t2b", + action="store_true", + help="convert parameter file of embedding model from text to binary") + parser.add_option( + "-i", action="store", dest="input", help="input parameter file name") + parser.add_option( + "-o", action="store", dest="output", help="output parameter file name") + parser.add_option( + "-d", action="store", dest="dim", help="dimension of parameter") + (options, args) = parser.parse_args() + if options.b2t: + binary2text(options.input, options.output, options.dim) + if options.t2b: + text2binary(options.input, options.output) + + +if __name__ == '__main__': + main() diff --git a/doc/paddle/user_guides/simple_case/word2vec/image/2d_similarity.png b/doc/paddle/user_guides/simple_case/word2vec/image/2d_similarity.png new file mode 100644 index 0000000000000000000000000000000000000000..384f59919a2c8dedb198e97d51434616648932e1 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/word2vec/image/2d_similarity.png differ diff --git a/doc/paddle/user_guides/simple_case/word2vec/image/Eqn1.gif b/doc/paddle/user_guides/simple_case/word2vec/image/Eqn1.gif new file mode 100644 index 0000000000000000000000000000000000000000..524a38d2bb3e7ef8abdcf8ef2b1e834a55f3246d Binary files /dev/null and b/doc/paddle/user_guides/simple_case/word2vec/image/Eqn1.gif differ diff --git a/doc/paddle/user_guides/simple_case/word2vec/image/Eqn2.gif b/doc/paddle/user_guides/simple_case/word2vec/image/Eqn2.gif new file mode 100644 index 0000000000000000000000000000000000000000..f174230957a2241812438110255662060b1bd5df Binary files /dev/null and b/doc/paddle/user_guides/simple_case/word2vec/image/Eqn2.gif differ diff --git a/doc/paddle/user_guides/simple_case/word2vec/image/Eqn3.gif b/doc/paddle/user_guides/simple_case/word2vec/image/Eqn3.gif new file mode 100644 index 0000000000000000000000000000000000000000..19ee90ef0ee2a72fb27b9d9f575cc0ee706e38ad Binary files /dev/null and b/doc/paddle/user_guides/simple_case/word2vec/image/Eqn3.gif differ diff --git a/doc/paddle/user_guides/simple_case/word2vec/image/Eqn4.gif b/doc/paddle/user_guides/simple_case/word2vec/image/Eqn4.gif new file mode 100644 index 0000000000000000000000000000000000000000..e2e1370275862176ba2c1e293ba9ac5149bf4a48 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/word2vec/image/Eqn4.gif differ diff --git a/doc/paddle/user_guides/simple_case/word2vec/image/Eqn5.gif b/doc/paddle/user_guides/simple_case/word2vec/image/Eqn5.gif new file mode 100644 index 0000000000000000000000000000000000000000..91e691e4943ce05049669e60715a4db8e9759071 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/word2vec/image/Eqn5.gif differ diff --git a/doc/paddle/user_guides/simple_case/word2vec/image/Eqn6.gif b/doc/paddle/user_guides/simple_case/word2vec/image/Eqn6.gif new file mode 100644 index 0000000000000000000000000000000000000000..1052c0d33d97aeb7ac152899eefe8d7a22d84918 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/word2vec/image/Eqn6.gif differ diff --git a/doc/paddle/user_guides/simple_case/word2vec/image/Eqn7.gif b/doc/paddle/user_guides/simple_case/word2vec/image/Eqn7.gif new file mode 100644 index 0000000000000000000000000000000000000000..51fe46e8cb384baabc52ba7690fe6107e3cb2f27 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/word2vec/image/Eqn7.gif differ diff --git a/doc/paddle/user_guides/simple_case/word2vec/image/Eqn8.gif b/doc/paddle/user_guides/simple_case/word2vec/image/Eqn8.gif new file mode 100644 index 0000000000000000000000000000000000000000..4cd0aed748d45a572d3d394e755a8594f885c17d Binary files /dev/null and b/doc/paddle/user_guides/simple_case/word2vec/image/Eqn8.gif differ diff --git a/doc/paddle/user_guides/simple_case/word2vec/image/Eqn9.gif b/doc/paddle/user_guides/simple_case/word2vec/image/Eqn9.gif new file mode 100644 index 0000000000000000000000000000000000000000..2f61e3e60794f83d73df136667d6dae7cefac0b7 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/word2vec/image/Eqn9.gif differ diff --git a/doc/paddle/user_guides/simple_case/word2vec/image/cbow.png b/doc/paddle/user_guides/simple_case/word2vec/image/cbow.png new file mode 100644 index 0000000000000000000000000000000000000000..76b7d4bc0f99372465bd9aa34721513d39ad0776 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/word2vec/image/cbow.png differ diff --git a/doc/paddle/user_guides/simple_case/word2vec/image/cbow_en.png b/doc/paddle/user_guides/simple_case/word2vec/image/cbow_en.png new file mode 100644 index 0000000000000000000000000000000000000000..d985c393e618e9b79df05e4ff0ae57ccc93744d0 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/word2vec/image/cbow_en.png differ diff --git a/doc/paddle/user_guides/simple_case/word2vec/image/ngram.en.png b/doc/paddle/user_guides/simple_case/word2vec/image/ngram.en.png new file mode 100644 index 0000000000000000000000000000000000000000..2e16ab2f443732b8ef5404a8e7cd2457bc5eee23 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/word2vec/image/ngram.en.png differ diff --git a/doc/paddle/user_guides/simple_case/word2vec/image/ngram.png b/doc/paddle/user_guides/simple_case/word2vec/image/ngram.png new file mode 100644 index 0000000000000000000000000000000000000000..2449dce6a86b43b1b997ff418ed0dba56848463f Binary files /dev/null and b/doc/paddle/user_guides/simple_case/word2vec/image/ngram.png differ diff --git a/doc/paddle/user_guides/simple_case/word2vec/image/nnlm.png b/doc/paddle/user_guides/simple_case/word2vec/image/nnlm.png new file mode 100644 index 0000000000000000000000000000000000000000..1e0b40a8f7aefdf46d42761305511f281c08e595 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/word2vec/image/nnlm.png differ diff --git a/doc/paddle/user_guides/simple_case/word2vec/image/nnlm_en.png b/doc/paddle/user_guides/simple_case/word2vec/image/nnlm_en.png new file mode 100644 index 0000000000000000000000000000000000000000..158bd64b8f8729dea67834a8d591d21bce8b8564 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/word2vec/image/nnlm_en.png differ diff --git a/doc/paddle/user_guides/simple_case/word2vec/image/sentence_emb.png b/doc/paddle/user_guides/simple_case/word2vec/image/sentence_emb.png new file mode 100644 index 0000000000000000000000000000000000000000..ce4a8bf4769183cbaff91793753d2350a3ce936c Binary files /dev/null and b/doc/paddle/user_guides/simple_case/word2vec/image/sentence_emb.png differ diff --git a/doc/paddle/user_guides/simple_case/word2vec/image/skipgram.png b/doc/paddle/user_guides/simple_case/word2vec/image/skipgram.png new file mode 100644 index 0000000000000000000000000000000000000000..a3ab385845d3dc8b5c670bae91225bc8dd47a8bb Binary files /dev/null and b/doc/paddle/user_guides/simple_case/word2vec/image/skipgram.png differ diff --git a/doc/paddle/user_guides/simple_case/word2vec/image/skipgram_en.png b/doc/paddle/user_guides/simple_case/word2vec/image/skipgram_en.png new file mode 100644 index 0000000000000000000000000000000000000000..3c36c6d1f66eb98ea78c0673965d02a4ee3aa288 Binary files /dev/null and b/doc/paddle/user_guides/simple_case/word2vec/image/skipgram_en.png differ diff --git a/doc/paddle/user_guides/simple_case/word2vec/index.cn.html b/doc/paddle/user_guides/simple_case/word2vec/index.cn.html new file mode 100644 index 0000000000000000000000000000000000000000..8a807c0f59e158393cda278966aad0505757336a --- /dev/null +++ b/doc/paddle/user_guides/simple_case/word2vec/index.cn.html @@ -0,0 +1,591 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/user_guides/simple_case/word2vec/index.html b/doc/paddle/user_guides/simple_case/word2vec/index.html new file mode 100644 index 0000000000000000000000000000000000000000..3aaf7192f8bf7805af3e566fa90f8be0ef7a0613 --- /dev/null +++ b/doc/paddle/user_guides/simple_case/word2vec/index.html @@ -0,0 +1,555 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/user_guides/simple_case/word2vec/train.py b/doc/paddle/user_guides/simple_case/word2vec/train.py new file mode 100644 index 0000000000000000000000000000000000000000..beed63eb77425165a603fd45d3d9447f2d7bd963 --- /dev/null +++ b/doc/paddle/user_guides/simple_case/word2vec/train.py @@ -0,0 +1,274 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import print_function +import paddle as paddle +import paddle.fluid as fluid +import six +import numpy +import sys +import math +import argparse + +EMBED_SIZE = 32 +HIDDEN_SIZE = 256 +N = 5 +BATCH_SIZE = 100 + +word_dict = paddle.dataset.imikolov.build_dict() +dict_size = len(word_dict) + + +def parse_args(): + parser = argparse.ArgumentParser("word2vec") + parser.add_argument( + '--enable_ce', + action='store_true', + help='If set, run the task with continuous evaluation logs.') + parser.add_argument( + '--use_gpu', type=int, default=0, help='whether to use gpu') + parser.add_argument( + '--num_epochs', type=int, default=100, help='number of epoch') + args = parser.parse_args() + return args + + +def inference_program(words, is_sparse): + + embed_first = fluid.embedding( + input=words[0], + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=is_sparse, + param_attr='shared_w') + embed_second = fluid.embedding( + input=words[1], + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=is_sparse, + param_attr='shared_w') + embed_third = fluid.embedding( + input=words[2], + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=is_sparse, + param_attr='shared_w') + embed_fourth = fluid.embedding( + input=words[3], + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=is_sparse, + param_attr='shared_w') + + concat_embed = fluid.layers.concat( + input=[embed_first, embed_second, embed_third, embed_fourth], axis=1) + hidden1 = fluid.layers.fc( + input=concat_embed, size=HIDDEN_SIZE, act='sigmoid') + predict_word = fluid.layers.fc( + input=hidden1, size=dict_size, act='softmax') + return predict_word + + +def train_program(predict_word): + # The declaration of 'next_word' must be after the invoking of inference_program, + # or the data input order of train program would be [next_word, firstw, secondw, + # thirdw, fourthw], which is not correct. + next_word = fluid.data(name='nextw', shape=[None, 1], dtype='int64') + cost = fluid.layers.cross_entropy(input=predict_word, label=next_word) + avg_cost = fluid.layers.mean(cost) + return avg_cost + + +def optimizer_func(): + return fluid.optimizer.AdagradOptimizer( + learning_rate=3e-3, + regularization=fluid.regularizer.L2DecayRegularizer(8e-4)) + + +def train(if_use_cuda, params_dirname, is_sparse=True): + place = fluid.CUDAPlace(0) if if_use_cuda else fluid.CPUPlace() + + train_reader = fluid.io.batch( + paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE) + test_reader = fluid.io.batch( + paddle.dataset.imikolov.test(word_dict, N), BATCH_SIZE) + + first_word = fluid.data(name='firstw', shape=[None, 1], dtype='int64') + second_word = fluid.data(name='secondw', shape=[None, 1], dtype='int64') + third_word = fluid.data(name='thirdw', shape=[None, 1], dtype='int64') + forth_word = fluid.data(name='fourthw', shape=[None, 1], dtype='int64') + next_word = fluid.data(name='nextw', shape=[None, 1], dtype='int64') + + word_list = [first_word, second_word, third_word, forth_word, next_word] + feed_order = ['firstw', 'secondw', 'thirdw', 'fourthw', 'nextw'] + + main_program = fluid.default_main_program() + star_program = fluid.default_startup_program() + + if args.enable_ce: + main_program.random_seed = 90 + star_program.random_seed = 90 + + predict_word = inference_program(word_list, is_sparse) + avg_cost = train_program(predict_word) + test_program = main_program.clone(for_test=True) + + optimizer = optimizer_func() + optimizer.minimize(avg_cost) + + exe = fluid.Executor(place) + + def train_test(program, reader): + count = 0 + feed_var_list = [ + program.global_block().var(var_name) for var_name in feed_order + ] + feeder_test = fluid.DataFeeder(feed_list=feed_var_list, place=place) + test_exe = fluid.Executor(place) + accumulated = len([avg_cost]) * [0] + for test_data in reader(): + avg_cost_np = test_exe.run( + program=program, + feed=feeder_test.feed(test_data), + fetch_list=[avg_cost]) + accumulated = [ + x[0] + x[1][0] for x in zip(accumulated, avg_cost_np) + ] + count += 1 + return [x / count for x in accumulated] + + def train_loop(): + step = 0 + feed_var_list_loop = [ + main_program.global_block().var(var_name) + for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list=feed_var_list_loop, place=place) + exe.run(star_program) + for pass_id in range(PASS_NUM): + for data in train_reader(): + avg_cost_np = exe.run( + main_program, + feed=feeder.feed(data), + fetch_list=[avg_cost]) + + if step % 10 == 0: + outs = train_test(test_program, test_reader) + # print("Step %d: Average Cost %f" % (step, avg_cost_np[0])) + print("Step %d: Average Cost %f" % (step, outs[0])) + # print(outs) + + # it will take a few hours. + # If average cost is lower than 5.8, we consider the model good enough to stop. + # Note 5.8 is a relatively high value. In order to get a better model, one should + # aim for avg_cost lower than 3.5. But the training could take longer time. + if outs[0] < 5.8: + if args.enable_ce: + print("kpis\ttrain_cost\t%f" % outs[0]) + + if params_dirname is not None: + fluid.io.save_inference_model(params_dirname, [ + 'firstw', 'secondw', 'thirdw', 'fourthw' + ], [predict_word], exe) + return + step += 1 + if math.isnan(float(avg_cost_np[0])): + sys.exit("got NaN loss, training failed.") + raise AssertionError( + "Cost is too large {0:2.2}".format(avg_cost_np[0])) + + train_loop() + + +def infer(use_cuda, params_dirname=None): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + exe = fluid.Executor(place) + + inference_scope = fluid.core.Scope() + with fluid.scope_guard(inference_scope): + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inferencer, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(params_dirname, exe) + + # Setup inputs by creating 4 LoDTensors representing 4 words. Here each word + # is simply an index to look up for the corresponding word vector and hence + # the shape of word (base_shape) should be [1]. The recursive_sequence_lengths, + # which is length-based level of detail (lod) of each LoDTensor, should be [[1]] + # meaning there is only one level of detail and there is only one sequence of + # one word on this level. + # Note that recursive_sequence_lengths should be a list of lists. + data1 = numpy.asarray([[211]], dtype=numpy.int64) # 'among' + data2 = numpy.asarray([[6]], dtype=numpy.int64) # 'a' + data3 = numpy.asarray([[96]], dtype=numpy.int64) # 'group' + data4 = numpy.asarray([[4]], dtype=numpy.int64) # 'of' + lod = numpy.asarray([[1]], dtype=numpy.int64) + + first_word = fluid.create_lod_tensor(data1, lod, place) + second_word = fluid.create_lod_tensor(data2, lod, place) + third_word = fluid.create_lod_tensor(data3, lod, place) + fourth_word = fluid.create_lod_tensor(data4, lod, place) + + assert feed_target_names[0] == 'firstw' + assert feed_target_names[1] == 'secondw' + assert feed_target_names[2] == 'thirdw' + assert feed_target_names[3] == 'fourthw' + + # Construct feed as a dictionary of {feed_target_name: feed_target_data} + # and results will contain a list of data corresponding to fetch_targets. + results = exe.run( + inferencer, + feed={ + feed_target_names[0]: first_word, + feed_target_names[1]: second_word, + feed_target_names[2]: third_word, + feed_target_names[3]: fourth_word + }, + fetch_list=fetch_targets, + return_numpy=False) + + print(numpy.array(results[0])) + most_possible_word_index = numpy.argmax(results[0]) + print(most_possible_word_index) + print([ + key for key, value in six.iteritems(word_dict) + if value == most_possible_word_index + ][0]) + + print(results[0].recursive_sequence_lengths()) + np_data = numpy.array(results[0]) + print("Inference Shape: ", np_data.shape) + + +def main(use_cuda, is_sparse): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + + params_dirname = "word2vec.inference.model" + + train( + if_use_cuda=use_cuda, + params_dirname=params_dirname, + is_sparse=is_sparse) + + infer(use_cuda=use_cuda, params_dirname=params_dirname) + + +if __name__ == '__main__': + args = parse_args() + PASS_NUM = args.num_epochs + use_cuda = args.use_gpu # set to True if training with GPU + main(use_cuda=use_cuda, is_sparse=True) diff --git a/doc/paddle/user_guides/tools/elastic_ctr/deploy_ctr_on_baidu_cloud_cn.md b/doc/paddle/user_guides/tools/elastic_ctr/deploy_ctr_on_baidu_cloud_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..8b01bf761c96bbd9710a99840bfbdec8cdcc547e --- /dev/null +++ b/doc/paddle/user_guides/tools/elastic_ctr/deploy_ctr_on_baidu_cloud_cn.md @@ -0,0 +1,438 @@ +ELASTIC CTR +=================== + +——百度云分布式训练CTR预估任务和Serving流程一键部署 + + +* [1. 总体概览](#head1) +* [2. 前置需求](#head2) +* [3. 分布式训练+serving方案一键部署](#head3) +* [4. 查看结果](#head4) +* [5. 二次开发指南](#head5) + + +## 1. 总体概览 + +本项目提供了端到端的CTR训练和二次开发的解决方案,主要特点: + +- 整体方案在k8s环境一键部署,可快速搭建与验证效果 +- 基于Paddle transpiler模式的大规模分布式高速训练 +- 训练资源弹性伸缩 +- 工业级稀疏参数Serving组件,高并发条件下单位时间吞吐总量是redis的13倍 \[[注1](#annotation_1)\] + +本方案整体流程如下图所示: + +![image](elastic_ctr/overview.png) + +其中: + +- trainer/pserver: 训练环节采用PaddlePaddle parameter server模式,对应trainer和pserver角色。分布式训练使用[volcano](https://volcano.sh/)做批量任务管理工具 +- file server: 训练产出的模型文件,托管到File Server,供下游模块下载;训练产出的文件包括:ProgramDesc和模型参数,模型参数中最大的embedding由工具转换为seqfile格式,经过一系列流程配送到cube分布式稀疏参数服务,其余模型参数保持不变,配送到Paddle Serving模块 +- cube-transfer: 负责监控上游训练作业产出的模型文件(hadoop sequence file)变化,拉取到本地,并调用cube-builder构建cube字典文件;通知cube-agent节点拉取最新的字典文件,并维护各个cube-server上版本一致性 +- cube-builder: 负责将训练作业产出的模型文件(hadoop sequence file格式)转换成可以被cube-server加载的字典文件。字典文件具有特定的数据结构,针对尺寸和内存中访问做了高度优化 +- Cube-Server: 提供分片kv读写能力的服务节点 +- Cube-agent: 与cube-server同机部署,接收cube-transfer下发的字典文件更新命令,拉取数据到本地,通知cube-server进行更新 +- Paddle Serving: 加载CTR预估任务模型ProgramDesc和dense参数,提供预测服务 +- Client: CTR预估任务的demo客户端 + +以上组件串联完成从训练到预测部署的所有流程。本文档所提供的一键部署脚本[paddle-suite.sh](https://github.com/PaddlePaddle/Serving/blob/master/doc/resource/paddle-suite.sh)可一键部署上述所有组件。 + +用户可以参考本部署方案,将基于PaddlePaddle的分布式训练和Serving应用到业务环境,也可以在本方案基础上做功能增强和改进,直接使用。具体的,用户可以: + +- 指定数据集的输入和读取方式,来feed不同的数据集和数据集格式;相应的修改Serving代码以适应新模型 +- 指定训练的规模,包括参数服务器的数量和训练节点的数量 +- 指定Cube参数服务器的分片数量和副本数量 + +在本文第5节会详细解释以上二次开发的实际操作。 + +本文主要内容: + +**第2节 前置需求** 指导用户从零开始,在百度云上申请BCE集群,并部署volcano工具。本方案需使用[volcano](https://volcano.sh/)做训练环节批量任务管理工具,目前在百度云上验证通过 + +**第3节 分布式训练+serving方案部署** 使用paddle-suite.sh,一键部署分布式训练+serving完整流程;并详细解释脚本每一步的工作和含义 + +**第4节 查看结果** 根据各个pod输出,验证一键安装状态 + +**第5节 二次开发** 提出本一键部署方案可定制改善的部分,给出具体修改位置等 + +## 2. 前置需求 + +运行本方案前,需要用户已经搭建好k8s集群,并安装好volcano组件。k8s环境部署比较复杂,本文不涉及。百度智能云CCE容器引擎申请后即可使用,仅以百度云上创建k8s为例。 + +### 2.1 创建k8s集群 + +请参考 +[百度智能云CCE容器引擎帮助文档-创建集群](https://cloud.baidu.com/doc/CCE/GettingStarted/24.5C.E5.88.9B.E5.BB.BA.E9.9B.86.E7.BE.A4.html#.E6.93.8D.E4.BD.9C.E6.AD.A5.E9.AA.A4),在百度智能云上建立一个集群,节点配置需要满足如下要求 + +- CPU核数 \> 4 + +申请容器引擎示例: + +![image](elastic_ctr/ctr_node.png) + +创建完成后,即可参考[百度智能云CCE容器引擎帮助文档-查看集群](https://cloud.baidu.com/doc/CCE/GettingStarted.html#.E6.9F.A5.E7.9C.8B.E9.9B.86.E7.BE.A4),查看刚刚申请的集群信息。 + +### 2.2 如何操作集群 + +集群的操作可以通过百度云web或者通过kubectl工具进行,推荐用kubectl工具。 + +对于百度云k8s集群,客户端kubectl需要和百度云上kubernetes版本对应,请参考[百度智能云CCE容器引擎帮助文档-kubectl管理配置](https://cloud.baidu.com/doc/CCE/Developer-GettingStarted.html#.84.1C.DF.97.63.35.64.3B.1A.6E.7D.B1.E4.5B.E3.66)查看当前所用的kubernetes版本,并参考kubernetes官方文档下载对应版本的kubectrl版本进行安装。 + +\* 注意: 本操作指南给出的操作步骤都是基于linux操作环境的。 + +- 首先请参考[官方安装说明](https://kubernetes.io/docs/tasks/tools/install-kubectl/),安装和百度云kubernetes版本对应的的kubectl。 + +- 配置kubectl,下载集群凭证。在集群界面下载集群配置文件,放在kubectl的默认配置路径(请检查\~/.kube目录是否存在,若没有请创建) + +```bash +$ mv kubectl.conf ~/.kube/config +``` + +- 配置完成后,您即可以使用kubectl从本地计算机访问Kubernetes集群 + +```bash +$ kubectl get node +``` + +- 关于kubectl的其他信息,可以参考[Overview of kubectl](https://kubernetes.io/docs/reference/kubectl/overview/)。 + +### 2.3 设置访问权限 + +建立分布式任务需要pod间有API互相访问的权限,可以按如下步骤 + +```bash +$ kubectl create rolebinding default-view --clusterrole=view --serviceaccount=default:default --namespace=default +``` + +注意: --namespace 指定的default 为创建集群时候的名称 + +## 2.4 安装Volcano + +我们使用volcano作为训练阶段的批量任务管理工具。关于volcano的详细信息,请参考[官方网站](https://volcano.sh/)的Documentation。 + +执行以下命令安装volcano到k8s集群: + +```bash +$ kubectl apply -f https://raw.githubusercontent.com/volcano-sh/volcano/master/installer/volcano-development.yaml +``` + +![image](elastic_ctr/ctr_volcano_install.png) + + +## 3. 分布式训练+serving方案一键部署 + +### 3.1 下载部署方案脚本文件 + +请将[本方案所需所有脚本文件](https://github.com/PaddlePaddle/Serving/tree/master/doc/resource)下载到本地 + +### 3.2 一键部署 + +执行以下脚本,一键将所有组件部署到k8s集群。 + +```bash +$ bash paddle-suite.sh +``` + +请参考**3.3-3.8节**验证每一步的安装是否正确,**第4节**验证训练过程和预测服务结果。 + +**[注意!!!]**:以下**3.3-3.8节所述内容已经在一键部署脚本中包含,无需手动执行**。但为方便理解,将该脚本的每一步执行过程给出说明。 + +### 3.3 选择一个node作为输出节点 + +```bash +$ kubectl label nodes $NODE_NAME nodeType=model +``` + +这句话的意思是给这个node做一个标记,之后的文件服务和模型产出都被强制分配在这个node上进行,把NAME的一串字符替换 \$NODE\_NAME即可。 + +### 3.4 启动文件服务器 + +```bash +$ kubectl apply -f fileserver.yaml +``` + +运行file server的启动脚本kubectl apply -f ftp.yaml,启动文件服务器 + +验证:通过`kubectl get pod`命令查看是否file-server这个pod已经running,通过`kubectl get service`命令查看是否file-server service是否存在: +```bash +$ kubectl get pod +``` +![image](elastic_ctr/file_server_pod.png) + +``` +$ kubectl get service +``` + +![image](elastic_ctr/file_server_svc.png) + + +### 3.5 启动Cube稀疏参数服务器 + +```bash +$ kubectl apply -f cube.yaml +``` + +验证:通过`kubectl get service`命令查看是否cube-0和cube-1这2个service存在,则说明cube server/agent启动成功。 + +``` +$ kubectl get service +``` + +![image](elastic_ctr/cube.png) + +**注**:分片数量可根据稀疏字典大小灵活修改,参考5.3节。 + +### 3.6 启动Paddle Serving + +```bash +$ kubectl apply -f paddleserving.yaml +``` + +验证:通过`kubectl get pod`查看serving pod是否running状态;通过`kubectl get service`查看paddleserving服务是否存在: + +```bash +$ kubectl get pod +``` +![image](elastic_ctr/paddleserving_pod.png) + +```bash +$ kubectl get service +``` +![image](elastic_ctr/paddleserving_svc.png) + +### 3.7 启动Cube稀疏参数服务器配送工具 + +```bash +$ kubectl apply -f transfer.yaml +``` + +验证:通过`kubectl get pod`查看cube-transfer这个pod是否是running状态 + +```bash +$ kubectl get pod +``` + +这个cube-transfer配送工具会把训练好的模型从下面要介绍的edl-demo-trainer-0上通过file-server服务拉取到本地,经过cube-builder做格式转换,配送给各个分片cube-server,最终目的是给PaddleServing来进行稀疏参数查询。 + +**在训练任务结束前,cube-transfer会一直等待上游数据产出。直到检测到上游模型文件生成后,开始启动配送。可通过日志观察cube-transfer的工作状态:** + +``` +$ kubectl logs cube-transfer +``` + +如果出现最后wait 5min这样的字样,说明上一轮的模型已经配送成功了,接下来就可以做最后PaddleServing的测试了。 + +![image](elastic_ctr/transfer.png) + + +### 3.8 执行Paddle CTR分布式训练 + +```bash +$ kubectl apply -f ctr.yaml +``` +验证:通过`kubectl get pod`查看edl-demo-trainer-0/edl-demo-trainer-1, edl-demo-pserver-0/edl-demo-pserver-1/edl-demo-pserver-2, edl-demo-model-out-trainer-0等pod是否是running状态 + +```bash +$ kubectl get pod +``` + +我们可以通过`kubectl logs edl-demo-trainer-0`来查看训练的进度,如果pass一直为0就继续等待,通常需要大概3-5分钟的之间会完成第一轮pass,这时候就会生成inference\_model。 + +![image](elastic_ctr/ctr.png) + +## 4. 查看结果 + +### 4.1 查看训练日志 + +百度云容器引擎CCE提供了web操作台方便查看pod的运行状态。 + +本次训练任务将启动3个pserver节点,3个trainer节点。 + +可以通过检查pserver和trainer的log来检查任务运行状态。 Trainer日志示例: + +![image](elastic_ctr/ctr_trainer_log.png) + +pserver日志示例: + +![image](elastic_ctr/ctr_pserver_log.png) + +### 4.2 验证Paddle Serving预测结果 + +执行 + +```bash +$ kubectl apply -f paddleclient.yaml +``` + +用如下命令进入容器内,在/client/ctr\_prediction目录下,启动CTR预估任务客户端,并通过日志查看预测结果 + +```bash +# 进入容器 +$ kubectl exec -ti pdservingclient /bin/bash + +# 此命令在容器内执行 +$ bin/ctr_prediction +``` + +如果运行正常的话,会在一段时间后退出,紧接着就可以在log/ctr\_prediction.INFO的最后几行看到类似于这样的日志 + +![image](elastic_ctr/paddleclient.png) + +## 5. 二次开发指南 + +### 5.1 指定数据集的输入和读取方式 + +现有的数据的输入是从edldemo镜像当中的/workspace/ctr/data/download.sh目录进行下载。下载之后会解压在/workspace/ctr/data/raw文件夹当中,包含train.txt和test.txt。所有的数据的每一行通过空格隔开40个属性。 + +然后在train.py当中给出数据集的读取方式 + +![image](elastic_ctr/pyreader.png) + +这里面包含了连续数据和离散数据。 连续数据是index [1,14),离散数据是index [14, 40),label是index 0,分别对应最后yield[dense\_feature] + sparse\_feature +[label]。当离散的数据和连续的数据格式和样例有不同,需要用户在这里进行指定,并且可以在\_\_init\_\_函数当中参考样例的写法对连续数据进行归一化。 + +对于数据的来源,文章给出的是download.sh从Criteo官方去下载数据集,然后解压后放在raw文件夹。 + +可以用HDFS/AFS或是其他方式来配送数据集,在启动项中加入相关命令。 + +在改动之后,记得保存相关的docker镜像并推送到云端 + +```bash +$ docker commit ${DOCKER_CONTAINER_NAME} ${DOCKER_IMAGE_NAME} +$ docker push ${DOCKER_IMAGE_NAME} +``` + +也可以在Dockerfile当中进行修改 + +```bash +$ docker build -t ${DOCKER_IMAGE_NAME} . +$ docker push ${DOCKER_IMAGE_NAME} +``` + +推荐使用百度云提供的镜像仓库,这里是说明文档[推送镜像到镜像仓库](https://cloud.baidu.com/doc/CCE/s/Yjxppt74z/#%E6%8E%A8%E9%80%81%E9%95%9C%E5%83%8F%E5%88%B0%E9%95%9C%E5%83%8F%E4%BB%93%E5%BA%93) + +### 5.2 指定训练规模 + +在ctr.yaml文件当中,我们会发现这个是在volcano的框架下定义的Job。在Job里面,我们给出了很多Pserver和Trainer的定义,在总体的Job也给出了MinAvailable数量的定义。Pserver和Trainer下面有自己的Replicas,环境变量当中有PSERVER\_NUM和TRAINER\_MODEL和TRAINER\_NUM的数量。通常MinAvailable= PServer Num + Trainer Num,这样我们就可以启动相应的服务。 + +![image](elastic_ctr/ctryaml1.png) + +如上图所示,我们需要在min\_available处设置合理的数字。例如一个POD占用一个CPU,那么我们就要对集群的总CPU数有一个预估,不要过于接近或事超过集群CPU总和的上限。否则无法满足Volcano的Gang-Schedule机制,就会出现无法分配资源,一直处于Pending的情况。然后第二个红框当中是 + +![image](elastic_ctr/ctryaml2.png) + +如上图所示,这个部分是用来专门做模型的输出,这里我们不需要做任何的改动,只要保留一个副本就可以。 + +![image](elastic_ctr/ctryaml3.png) + +如上图所示 + +### 5.3 指定cube参数服务器的分片数量和副本数量 + +在cube.yaml文件当中,我们可以看到每一个cube的节点的定义,有一个`cubeserver pod`和`cube serverservice`。如果我们需要增加cube的副本数和分片数,只需要在yaml文件中复制相关的定义和环境变量即可。 + +![image](elastic_ctr/cube_config1.png) + +![image](elastic_ctr/cube_config2.png) + +以上两个图片,一个是对cube POD的定义,一个是对cubeSERVICE的定义。如果需要扩展Cube分片数量,可以复制POD和SERVICE的定义,并重命名它们。示例程序给出的是2个分片,复制之后第3个可以命名为cube-2。 + +### 5.4 Serving适配新的模型 + +在本示例中,我们如果按照5.1节的方式,修改了CTR模型训练脚本的feed数据格式,就需要相应修改Serving的代码,以适应新的feed样例字段数量和数据类型。 + +本部署方案中Paddle Serving的的预测服务和客户端代码分别为: + +服务端: https://github.com/PaddlePaddle/Serving/blob/develop/demo-serving/op/ctr_prediction_op.cpp + +客户端:https://github.com/PaddlePaddle/Serving/blob/develop/demo-client/src/ctr_prediction.cpp + +用户可在此基础上进行修改。 + + +关于Paddle Serving的完整开发模式,可参考[Paddle Serving文档](https://github.com/PaddlePaddle/Serving/tree/develop/doc) + +## 注释 + +## 注1. Cube和redis性能对比测试环境 + +Cube和Redis均在百度云环境上部署,测试时只测试单个cube server和redis server节点的性能。 + +client端和server端分别位于2台独立的云主机,机器间ping延时为0.3ms-0.5ms。 + +机器配置:Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz 32核 + + +### Cube测试环境 + +测试key 64bit整数,value为10个float (40字节) + +首先用本方案一键部署脚本部署完成。 + +用Paddle Serving的cube客户端SDK,编写测试代码 + +基本原理,启动k个线程,每个线程访问M次cube server,每次批量获取N个key,总时间加和求平均。 + +并发数 (压测线程数) | batch size | 平均响应时间 (us) | total qps +-------|------------|-------------|--------------------------- +1 | 1000 | 1312 | 762 +4 | 1000 | 1496 | 2674 +8 | 1000 | 1585 | 5047 +16 | 1000 | 1866 | 8574 +24 | 1000 | 2236 | 10733 +32 | 1000 | 2602 | 12298 + +### Redis测试环境 + +测试key 1-1000000之间随机整数,value为40字节字符串 + +server端部署redis-server (latest stable 5.0.6) + +client端为基于[redisplusplus](https://github.com/sewenew/redis-plus-plus)编写的客户端[get_values.cpp](https://github.com/PaddlePaddle/Serving/blob/master/doc/resource/get_value.cpp) + +基本原理:启动k个线程,每个线程访问M次redis server,每次用mget批量获取N个key。总时间加和求平均。 + +调用方法: + +```bash +$ ./get_values -h 192.168.1.1 -t 3 -r 10000 -b 1000 +``` + +其中 +\-h server所在主机名 +\-t 并发线程数 +\-r 每线程请求次数 +\-b 每个mget请求的key个数 + +并发数 (压测线程数) | batch size | 平均响应时间 (us) | total qps +-------|------------|-------------|--------------------------- +1 | 1000 | 1643 | 608 +4 | 1000 | 4878 | 819 +8 | 1000 | 9870 | 810 +16 | 1000 | 22177 | 721 +24 | 1000 | 30620 | 783 +32 | 1000 | 37668 | 849 + + +### RocksDB测试环境 + +测试key 1-1000000之间随机整数,value为40字节字符串 + +基本原理:启动k个线程,每个线程访问M次rocksDB,每次用mget批量获取N个key。总时间加和求平均。 + +并发数 (压测线程数) | batch size | 平均响应时间 (us) | total qps +-------|------------|-------------|--------------------------- +1 | 1000 | 11345 | 88 +4 | 1000 | 11210 | 357 +8 | 1000 | 11475 | 697 +16 | 1000 | 12822 | 1248 +24 | 1000 | 14220 | 1688 +32 | 1000 | 17256 | 1854 + + +### 测试结论 + +由于Redis高效的时间驱动模型和全内存操作,在单并发时,redis平均响应时间与cube相差不多% (1643us vs. 1312us) + +在扩展性方面,redis受制于单线程模型,随并发数增加,响应时间加倍增加,而总吞吐在1000qps左右即不再上涨;而cube则随着压测并发数增加,总的qps一直上涨,说明cube能够较好处理并发请求,具有良好的扩展能力。 + +RocksDB在线程数较少的时候,平均响应时间和qps慢于Redis,但是在16以及更多线程的测试当中,RocksDB提供了更快的响应时间和更大的qps。 diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/cluster-info.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/cluster-info.png new file mode 100644 index 0000000000000000000000000000000000000000..c89880277afa2460f92838ce2301e12eb192a417 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/cluster-info.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/concole.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/concole.png new file mode 100644 index 0000000000000000000000000000000000000000..7db02be1bd8b66418015939e594812f08f35ff58 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/concole.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/conf-download.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/conf-download.png new file mode 100644 index 0000000000000000000000000000000000000000..f10b62a4da76a6726c5de87d31aaeae27c2982f0 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/conf-download.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/ctr-models.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/ctr-models.png new file mode 100644 index 0000000000000000000000000000000000000000..df41a239c38f54d26e3693fe3fc7853cbef12879 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/ctr-models.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/ctr-prediction-end-to-end-deployment.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/ctr-prediction-end-to-end-deployment.png new file mode 100644 index 0000000000000000000000000000000000000000..bf2ac77c4e92f8a13639054b92bb1398046882ba Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/ctr-prediction-end-to-end-deployment.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/ctr-running.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/ctr-running.png new file mode 100644 index 0000000000000000000000000000000000000000..87992d109fe4c513dec088e2cd20b360c5627b60 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/ctr-running.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/eip.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/eip.png new file mode 100644 index 0000000000000000000000000000000000000000..cae4175df0a5bd9836e4974599b35b4b0aa278d6 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/eip.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/file_server.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/file_server.png new file mode 100644 index 0000000000000000000000000000000000000000..0389e5785720d14d1d7132f136debf54a0de79fd Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/file_server.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/helm-version.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/helm-version.png new file mode 100644 index 0000000000000000000000000000000000000000..cae626abe9234a9c04989d31b51ff9c1a3104244 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/helm-version.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/kubectl-version.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/kubectl-version.png new file mode 100644 index 0000000000000000000000000000000000000000..1b073ffd20f742353238a7ba24078a7e562627ba Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/kubectl-version.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/load_balancer.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/load_balancer.png new file mode 100644 index 0000000000000000000000000000000000000000..7d1042e5953a487ad438485d5f1af912a97b7ff2 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/load_balancer.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/pserver-log.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/pserver-log.png new file mode 100644 index 0000000000000000000000000000000000000000..fa2a6bd436dcedb40b72e5a2b84d7a91afc1f010 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/pserver-log.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/tiller.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/tiller.png new file mode 100644 index 0000000000000000000000000000000000000000..b7b8c83613db5f886aa9ecced6567b7379a86246 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/tiller.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/trainer-log.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/trainer-log.png new file mode 100644 index 0000000000000000000000000000000000000000..b05a0ec5a4dc7ec164e96458a7adda61b82c3606 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/trainer-log.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/volcano.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/volcano.png new file mode 100644 index 0000000000000000000000000000000000000000..1d55f044ec6f04e6181a192aca8cc32834ef9196 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/volcano.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/wget_example.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/wget_example.png new file mode 100644 index 0000000000000000000000000000000000000000..5e87da2c71e17eadb27ba8a810c35fa413f5afb1 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/wget_example.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/workload.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/workload.png new file mode 100644 index 0000000000000000000000000000000000000000..d45e0b667e1e4c1f6f0b4fe9c000e1359b11ad06 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/baidu_cloud/workload.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/create_gpu_machine.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/create_gpu_machine.png new file mode 100644 index 0000000000000000000000000000000000000000..8b98ce5bdf0c1f9921eac1f4f55d31bec028d650 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/create_gpu_machine.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/create_image.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/create_image.png new file mode 100644 index 0000000000000000000000000000000000000000..b9a26de49a6ec33707199d2dfde8a741c4222581 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/create_image.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/create_more_nodes.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/create_more_nodes.png new file mode 100644 index 0000000000000000000000000000000000000000..656cf6f49bd7e239bfbbd305dc87a5a73a6100d1 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/create_more_nodes.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctr.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctr.png new file mode 100644 index 0000000000000000000000000000000000000000..fea2d8e2591ba4ce05ffdffcf95526d1d5dada0e Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctr.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctr_kubectl_download.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctr_kubectl_download.png new file mode 100644 index 0000000000000000000000000000000000000000..b87395a1f19e90d30644a1e28b9e434dda3545ab Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctr_kubectl_download.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctr_node.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctr_node.png new file mode 100644 index 0000000000000000000000000000000000000000..9a43c4257316e3bd2879f10a49d0edb74a41d7d7 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctr_node.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctr_pods.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctr_pods.png new file mode 100644 index 0000000000000000000000000000000000000000..5e836b2490a978dd3d3664d08e5a4c056fed52a7 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctr_pods.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctr_pserver_log.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctr_pserver_log.png new file mode 100644 index 0000000000000000000000000000000000000000..189b40e4f65c49c6e1bfec219759433c683b1ee4 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctr_pserver_log.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctr_trainer_log.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctr_trainer_log.png new file mode 100644 index 0000000000000000000000000000000000000000..303e2b1ee95802b16daa35e4cad5a283922504b9 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctr_trainer_log.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctr_volcano_install.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctr_volcano_install.png new file mode 100644 index 0000000000000000000000000000000000000000..536fd4b3a68184316b9b96c488b48b7e6403a28b Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctr_volcano_install.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctryaml1.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctryaml1.png new file mode 100644 index 0000000000000000000000000000000000000000..d5268a27a4ab4de76c5383d95fd5625f7ace4de3 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctryaml1.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctryaml2.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctryaml2.png new file mode 100644 index 0000000000000000000000000000000000000000..d93e55ec07ec289b8c4d008a311f477e6cfd539a Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctryaml2.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctryaml3.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctryaml3.png new file mode 100644 index 0000000000000000000000000000000000000000..c0b75395924719b16d68c3eb124d90f7497ee300 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/ctryaml3.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/cube.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/cube.png new file mode 100644 index 0000000000000000000000000000000000000000..0757421c20c84c1e0df61454902e4c24cd655df7 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/cube.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/cube_config1.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/cube_config1.png new file mode 100644 index 0000000000000000000000000000000000000000..188c4214460814a67d0eafa3cf1af18ded4340fa Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/cube_config1.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/cube_config2.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/cube_config2.png new file mode 100644 index 0000000000000000000000000000000000000000..9b5a171e39303f373dbf31f6204baa913cec7130 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/cube_config2.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_demo.py b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..f7209ed2b871197ee55089411c3d1ab6b323fbba --- /dev/null +++ b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_demo.py @@ -0,0 +1,111 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import paddle.fluid.core as core +import math +import os +import sys + +import numpy + +import paddle +import paddle.fluid as fluid + +BATCH_SIZE = 64 +PASS_NUM = 1 + + +def loss_net(hidden, label): + prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + avg_loss = fluid.layers.mean(loss) + acc = fluid.layers.accuracy(input=prediction, label=label) + return prediction, avg_loss, acc + + +def conv_net(img, label): + conv_pool_1 = fluid.nets.simple_img_conv_pool( + input=img, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + act="relu") + conv_pool_1 = fluid.layers.batch_norm(conv_pool_1) + conv_pool_2 = fluid.nets.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + pool_size=2, + pool_stride=2, + act="relu") + return loss_net(conv_pool_2, label) + + +def train(use_cuda, role, endpoints, current_endpoint, trainer_id, trainers): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + prediction, avg_loss, acc = conv_net(img, label) + + test_program = fluid.default_main_program().clone(for_test=True) + + optimizer = fluid.optimizer.Adam(learning_rate=0.001) + optimizer.minimize(avg_loss) + + t = fluid.DistributeTranspiler() + t.transpile(trainer_id, pservers=endpoints, trainers=trainers) + if role == "pserver": + prog = t.get_pserver_program(current_endpoint) + startup = t.get_startup_program(current_endpoint, pserver_program=prog) + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(startup) + exe.run(prog) + elif role == "trainer": + prog = t.get_trainer_program() + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + + train_reader = paddle.batch( + paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500), + batch_size=BATCH_SIZE) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=BATCH_SIZE) + feeder = fluid.DataFeeder(feed_list=[img, label], place=place) + exe.run(fluid.default_startup_program()) + for pass_id in range(PASS_NUM): + for batch_id, data in enumerate(train_reader()): + acc_np, avg_loss_np = exe.run( + prog, feed=feeder.feed(data), fetch_list=[acc, avg_loss]) + if (batch_id + 1) % 10 == 0: + print( + 'PassID {0:1}, BatchID {1:04}, Loss {2:2.2}, Acc {3:2.2}'. + format(pass_id, batch_id + 1, + float(avg_loss_np.mean()), float( + acc_np.mean()))) + + +if __name__ == '__main__': + if len(sys.argv) != 6: + print( + "Usage: python %s role endpoints current_endpoint trainer_id trainers" + % sys.argv[0]) + exit(0) + role, endpoints, current_endpoint, trainer_id, trainers = \ + sys.argv[1:] + train(True, role, endpoints, current_endpoint, + int(trainer_id), int(trainers)) diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_nccl2.graffle b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_nccl2.graffle new file mode 100644 index 0000000000000000000000000000000000000000..16f6b8835c4ffb82babca56b62ba44494fd6a947 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_nccl2.graffle differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_nccl2.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_nccl2.png new file mode 100644 index 0000000000000000000000000000000000000000..587a1a48affdde6809d7f8bf77e1055db7cd8c14 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_nccl2.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_pserver.graffle b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_pserver.graffle new file mode 100644 index 0000000000000000000000000000000000000000..046c4903231e8ca441884674c08b381766c0bbae Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_pserver.graffle differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_pserver.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_pserver.png new file mode 100644 index 0000000000000000000000000000000000000000..cd2f92ad1a14ac12efc2c257c8aa3d1ae403b2b1 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/dist_train_pserver.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/file_server_pod.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/file_server_pod.png new file mode 100644 index 0000000000000000000000000000000000000000..8086e889ee41d0525025edc4873a0024e4478ae7 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/file_server_pod.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/file_server_svc.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/file_server_svc.png new file mode 100644 index 0000000000000000000000000000000000000000..90bfd0c8f1e378874e6dd5859dbda76ecd554265 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/file_server_svc.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/overview.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/overview.png new file mode 100644 index 0000000000000000000000000000000000000000..7e94548457ceb9378019f4ae6c3c1af9502a97fe Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/overview.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/paddleclient.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/paddleclient.png new file mode 100644 index 0000000000000000000000000000000000000000..69157cd8327712a860e47934a124233bc88ffe60 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/paddleclient.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/paddleserving_pod.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/paddleserving_pod.png new file mode 100644 index 0000000000000000000000000000000000000000..6dfddfd17059f1583f6219a4fca56280ef5089c8 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/paddleserving_pod.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/paddleserving_svc.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/paddleserving_svc.png new file mode 100644 index 0000000000000000000000000000000000000000..e4f34095053692ff3b01bd0aa4e40b2830b145d8 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/paddleserving_svc.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/parallelism.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/parallelism.png new file mode 100644 index 0000000000000000000000000000000000000000..c787907397acb78d5e8ce31481e44dac2f774a4e Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/parallelism.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/pyreader.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/pyreader.png new file mode 100644 index 0000000000000000000000000000000000000000..2c887f5705e17eafcd0bf58c045721572c252a24 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/pyreader.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/release.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/release.png new file mode 100644 index 0000000000000000000000000000000000000000..75dfd7f0dce96ab57d4a471728e471b0d2b6e5f4 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/release.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/transfer.png b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/transfer.png new file mode 100644 index 0000000000000000000000000000000000000000..4a48e4313dd7cadf399ca7c7d056eeacffc66465 Binary files /dev/null and b/doc/paddle/user_guides/tools/elastic_ctr/elastic_ctr/transfer.png differ diff --git a/doc/paddle/user_guides/tools/elastic_ctr/index.cn.html b/doc/paddle/user_guides/tools/elastic_ctr/index.cn.html new file mode 100644 index 0000000000000000000000000000000000000000..37508f86c47683ca6e97b32424210631f5dc24cb --- /dev/null +++ b/doc/paddle/user_guides/tools/elastic_ctr/index.cn.html @@ -0,0 +1,539 @@ + + + + + + + + + + + + + + + +
+
+ + + + + + + diff --git a/doc/paddle/user_guides/tools/index_cn.rst b/doc/paddle/user_guides/tools/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b7ad3e355eea252609c156138af56df8b449b0ce --- /dev/null +++ b/doc/paddle/user_guides/tools/index_cn.rst @@ -0,0 +1,14 @@ +################ +工具组件 +################ + +.. todo:: + +这里PaddlePaddle为大家提供了两篇案例文章:百度云分布式训练CTR预估任务和Serving流程一键部署的案例文章,以及飞桨大规模分类库使用的案例文章。 + + +.. toctree:: + :titlesonly: + + elastic_ctr/deploy_ctr_on_baidu_cloud_cn.rst + plsc/plsc_guider_cn.rst diff --git a/doc/paddle/user_guides/tools/plsc/plsc_guider_cn.rst b/doc/paddle/user_guides/tools/plsc/plsc_guider_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f95cf8abf6e0d29586eba8740e557ee26deae7bb --- /dev/null +++ b/doc/paddle/user_guides/tools/plsc/plsc_guider_cn.rst @@ -0,0 +1,247 @@ + +飞桨大规模分类库简介 +=================== + +图像分类技术日趋成熟,ResNet网络在ImageNet数据集上的top5准确率已超过96%。然而,如何高效地完成百万类别甚至是更大规模的分类任务,则是一个极具挑战性的课题。 + +从多分类神经网络的实现角度分析,其最后一层通常是由全连接层和Softmax构成的组合层,全连接层输出结点数挂钩分类任务的类别数,所以对应的参数量随分类类别数的增长而线性增长。因此,当类别数非常大时,神经网络训练过程占用的显存空间也会很大,甚至是超出单张GPU卡的显存容量,导致神经网络模型无法训练。 + +以新闻推荐系统为例,假设要对百万类细分类别的新闻条目进行分类,那么仅存储全连接层参数就需要约2GB的显存空间(这里假设神经网络最后一层隐层的输出结点的维度为512,并假设以32比特浮点数表示数据)。再考虑神经网络训练过程中生成的数量庞多的中间变量,那么训练过程中需要的存储总量往往会超出单张GPU卡的显存容量。 + +该如何解决这个问题呢?常用的做法是“拆分”。考虑到全连接层的线性可分性,可以将全连接层参数切分到多张GPU卡,采用模型并行方案,减少每张GPU卡的参数存储量。 + +以下图为例,全连接层参数按行切分到不同的GPU卡上。每次训练迭代过程中,各张GPU卡分别以各自的训练数据计算隐层的输出特征(feature),并通过集合通信操作AllGather得到汇聚后的特征。接着,各张GPU卡以汇聚后的特征和部分全连接层参数计算部分logit值(partial logit),并基于此计算神经网络的损失值。 + + +.. image:: ./plsc_overview.png + :target: ./plsc_overview.png + :alt: plsc_overview + :width: 400px + + +这个方案可以有效解决全连接层参数量随分类类别数线性增长导致的显存空间不足的问题。然而,为了实现这一方案,开发者需要基于现有的深度学习平台设计和实现上例描述的所有操作,包括全连接层参数的切分和集合通信等,动辄需要数百行实现代码,大大增加了开发者的负担。 + +现在,开发者的福音来了,飞桨近期开源了基于核心框架构建的大规模分类库(PLSC: PaddlePaddle Large Scale Classification),为用户提供了大规模分类任务从训练到部署的全流程解决方案。只需数行代码,即可实现千万类别分类的神经网络。并且,通过PLSC库提供的serving功能用户可以快速部署模型,提供一站式服务。 + +简单易用,五行代码实现千万类别神经网络 +-------------------------------------- + +飞桨大规模分类库PLSC(以下简称PLSC)封装了大规模分类神经网络实现,提供简洁易用的高层API,用户通过五行代码即可实现千万类别分类神经网络。 + +安装飞桨 +^^^^^^^^ + +可以参考官网下载并安装飞桨: `飞桨安装文档 `_。 + + + +安装PLSC +^^^^^^^^ + +执行下面的命令安装PLSC。 + +.. code-block:: shell + + pip install plsc + +准备模型训练配置代码,保存为train.py文件 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +使用PLSC组建分类神经网络主要包括下面三个步骤: + + +#. + 从plsc包导入Entry类,Entry类封装PLSC所有API的接口类; + +#. + 实例化Entry类的对象; + +#. + 调用Entry类的train方法,开始训练过程。 + +默认情况下,该训练脚本使用的loss值计算方法为'dist_arcface',即将全连接层参数切分到多张GPU卡的模型并行方案,需要使用两张或以上的GPU卡。 + +.. code-block:: python + + from plsc import Entry + if __name__ == "main": + ins = Entry() + ins.set_class_num(1000000) #设置分类类别数 + ins.train() + +启动训练任务 +^^^^^^^^^^^^ + +可以使用下面的命令行启动训练任务,其中selected_gpus参数用于指定训练中使用的GPU卡。 + +.. code-block:: shell + + python -m paddle.distributed.launch \ + --selected_gpus=0,1,2,3,4,5,6,7 \ + train.py + +PLSC训练效果达到SOTA精度 +------------------------ + +PLSC库在多个数据集上可以取得SOTA的训练精度,下表列出PLSC库分别使用MS1M-ArcFace和CASIA数据集作为训练数据,在不同验证数据集上取得的精度。 + +.. list-table:: + :header-rows: 1 + + * - 模型 + - 训练集 + - lfw + - agendb_30 + - cfp_ff + - cfp_fp + - MegaFace (Id/Ver) + * - ResNet50 + - MS1M-ArcFace + - 0.99817 + - 0.99827 + - 0.99857 + - 0.96314 + - 0.980/0.993 + * - ResNet50 + - CASIA + - 0.98950 + - 0.90950 + - 0.99057 + - 0.91500 + - N/A + + +备注:上述模型训练使用的loss_type为'dist_arcface'。更多关于ArcFace的内容请参考 + +**ArcFace:** Additive Angular Margin Loss for Deep Face Recognition + +https://arxiv.org/abs/1801.07698 + +LSC支持多机分布式训练和千万规模分类 +----------------------------------- + +PLSC支持多机分布式训练。一方面,通过多机分布式训练可以将全连接层参数切分到更多的GPU卡,从而支持千万类别分类,并且飞桨大规模分类库理论上支持的分类类别数随着使用的GPU卡数的增加而增加。例如,单机8张V100 GPU配置下支持的最大分类类别数相比不使用PLSC扩大2.52倍。 + +另一方面,使用多机分布式训练可以有效提升训练速度。 + +通过下面几行命令即可启动多机分布式训练。其中,cluster_node_ips参数用于指定所有训练节点的ip地址列表,node_ip参数用于指定当前训练节点的ip地址。 + +.. code-block:: shel + + python -m paddle.distributed.launch \ + --cluster_node_ips="127.0.0.1,127.0.0.2" \ + --node_ip="127.0.0.1" \ + --selected_gpus=0,1,2,3,4,5,6,7 \ + train.py + +下图给出使用不同数量的节点时的训练速度(吞吐)。实验中使用的训练数据集为MS1M-ArcFace,分类类别数为85742,每个节点配备8张NVIDIA V100 GPUs,backbone模型为ResNet50。如图所示,使用飞桨大规模分类库可以取得近似线性的加速比。 + + +.. image:: ./plsc_performance.png + :target: ./plsc_performance.png + :alt: performance + + +PLSC提供从训练到部署的全流程解决方案 +------------------------------------ + +用户完成分类神经网络训练后,通常要基于得到的预训练模型部署预测服务。通过飞桨大规模分类库提供的serving功能可实现快速部署。 + +飞桨大规模分类库提供支持预测服务部署的serving端和client端。serving端基于飞桨服务器端部署库Paddle Serving开发,使用serving端功能可以基于预训练模型快速部署预测服务。client端则提供了和serving端的交互功能,用户通过client端提交查询请求并获取预测结果。只需三步即可完成部署。 + +安装serving端和client端 +^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: shell + + pip install plsc-serving ujson + +通过下面的脚本部署serving端 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: python + + from plsc_serving.run import PLSCServer + fs = PLSCServer() + # 设定使用的模型路径 + fs.with_model(model_path = '/XXX/XXX') + # gpu_index指定使用的gpu,port指定使用的端口 + fs.run(gpu_index = 0, port = 8010) + +通过下面的脚本使用client端功能 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: python + + from face_service import FaceService + with open('./data/00000000.jpg', 'rb') as f: + image = f.read() + fc = FaceService() + # 添加server端连接 + fc.connect('127.0.0.1:8010') + #调用server端预测 + result = fc.encode([image]) + print(result[0]) + fc.close() + +PLSC支持混合精度训练 +-------------------- + +单机8张Nvidia Tesla v100 GPU配置下,混合精度比常规单精度训练速度提升42%。 + +使用混合精度训练可以提升训练的速度,同时减少训练使用的显存开销。开启混合精度训练方法如下: + +.. code-block:: python + + from plsc import Entry + + def main(): + ins = Entry() + ins.set_mixed_precision(True) + ins.train() + if __name__ == "__main__": + main() + +在单机8张Nvidia Tesla v100 GPU配置下,对比resnet50模型单精度训练和混合精度训练的效果,混合精度训练速度可提升42%: + +.. list-table:: + :header-rows: 1 + + * - 模型 + - 单精度训练 + - 混合精度训练 + - 加速比 + * - ResNet50 + - 2567 images/s + - 3643 images/s + - 1.42 + + +关于混合精度训练的内容请参考: + +https://arxiv.org/abs/1710.03740 + +PLSC支持Base64格式图像数据预处理 +-------------------------------- + +实际业务中,一种常见的数据存储格式是将图像数据编码为base64格式,训练数据文件的每一行存储一张base64格式编码的图像数据和该图像的标签,并通常以制表符('\t')分隔图像数据和图像标签。 + +神经网络训练过程中,通常需要对训练数据做全局shuffle。此外,需要切分训练数据,确保每张GPU卡使用相同数量的训练数据。对Base64格式的数据做全局shuffle的开销较大,若在训练过程中执行全局shuffle,会严重影响训练速度。 + +飞桨大规模分类库内置Base64格式数据预处理工具,可以对训练数据做全局shuffle,并将训练数据均分到多个数据文件,确保数据文件的数量和训练中使用的GPU卡数相同,且每个数据文档包含相同数量的训练数据。训练效率显著提升。 + +PLSC支持fine-tuning训练时GPU卡数的动态调整 +------------------------------------------ + +我们有时需要基于预训练模型做fine-tuning这种场景下,fine-tuning阶段的训练GPU卡数和预训练阶段使用的GPU卡数可能不同,尤其是当预训练和fine-tuning是分别由不同的组织执行时。考虑全连接层参数是根据使用的GPU卡数切分的这一情形,当fine-tuning阶段和预训练阶段使用不同的GPU卡数时,在加载模型参数前,用户需要重构模型参数,以适应fine-tuning阶段的GPU卡数。为了简化用户操作,飞桨大规模分类库提供了自动化的模型参数重构功能。当fine-tuning阶段使用的GPU卡数和预训练阶段不同时,飞桨大规模分类库在加载预训练模型参数时会自动根据fine-tuning阶段使用的GPU卡数重构预训练模型参数,以适应fine-tuning阶段的GPU卡数。 + +PLSC助力百度AI口罩检测方案快速上线 +---------------------------------- + +面对疫情,百度近期攻克了戴口罩人脸识别技术难关,快速上线了AI口罩检测方案,并在地铁、园区、厂区等场所上线,高效保障防疫工作。 + +百度AI口罩检测方案采用百度最新的PyramidBox-lite检测算法,加入超过10万张口罩人脸训练数据。为了解决数百万ID数据训练问题,采用飞桨大规模分类库PLSC实现了快速训练。在准确率不变的情况下,召回率提升30%,佩戴口罩的人脸检测准确率超过99%。 + +更多飞桨PLSC的应用方法,欢迎访问飞桨PLSC项目地址: + +https://github.com/PaddlePaddle/PLSC diff --git a/doc/paddle/user_guides/tools/plsc/plsc_overview.png b/doc/paddle/user_guides/tools/plsc/plsc_overview.png new file mode 100644 index 0000000000000000000000000000000000000000..c16aab9609f32dc8d1357a0619ea40ae471fc47a Binary files /dev/null and b/doc/paddle/user_guides/tools/plsc/plsc_overview.png differ diff --git a/doc/paddle/user_guides/tools/plsc/plsc_performance.png b/doc/paddle/user_guides/tools/plsc/plsc_performance.png new file mode 100644 index 0000000000000000000000000000000000000000..a7b2ec22fe879347de8f7dc6413d554634d9d282 Binary files /dev/null and b/doc/paddle/user_guides/tools/plsc/plsc_performance.png differ diff --git a/doc/templates/common_docs.py b/doc/templates/common_docs.py new file mode 100644 index 0000000000000000000000000000000000000000..a6476150d69dc1a965575fb4e8fce697644424d3 --- /dev/null +++ b/doc/templates/common_docs.py @@ -0,0 +1,70 @@ +# +# Some common descriptions used in Paddle API docs +# You can copy the wordings here if that is suitable to your scenario. +# + +common_args_en = """ + x (Tensor): The input tensor, it's data type should be float32, float64, int32, int64. + y (Tensor): The input tensor, it's data type should be float32, float64, int32, int64. + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + dtype (str, optional): The data type of the output tensor, can be float32, float64, int32, int64. + param_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter) of this layer. For more information, please refer to :ref:`api_fluid_ParamAttr`. + bias_attr (ParamAttr, optional): The parameter attribute for learnable bias(Bias) of this layer. For more information, please refer to :ref:`api_fluid_ParamAttr`. + label (Tensor): The label value corresponding to input, it's data type should be int32, int64. + learning_rate (Tensor|float): The learning rate, can be a Tensor or a float value. Default is 1e-03. + axis (int, optional): The axis along which to operate. Default is 0. + epsilon (float, optional): Small float added to denominator to avoid dividing by zero. Default is 1e-05. + is_test (bool, optional): A flag indicating whether execution is in test phase. Default is False, means not in test phase. + shape (Tensor|tuple|list): Shape of the Tensor. If shape is a list or tuple, the elements of it should be integers or Tensors with shape [1]. If shape is Tensor, it should be an 1-D Tensor . + keep_dim (bool): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the input unless keep_dim is true. Default is False. + filter_size (tuple|list|int): The size of convolving kernel. It can be a single integer or a tuple/list containing two integers, representing the height and width of the convolution window respectively. If it is a single integer, the height and width are equal to the integer. + padding (tuple|int): The padding size. It can be a single integer or a tuple containing two integers, representing the size of padding added to the height and width of the input. If it is a single integer, the both sides of padding are equal to the integer. Default is 0. + include_sublayers (bool, optional): Whether include the sublayers. If True, return list includes the sublayers weights. Default is True. + stride (tuple|int): The stride size. It can be a single integer or a tuple containing two integers, representing the strides of the convolution along the height and width. If it is a single integer, the height and width are equal to the integer. Default is 1. + groups (int, optional): The group number of convolution layer. When group=n, the input and convolution kernels are divided into n groups equally, the first group of convolution kernels and the first group of inputs are subjected to convolution calculation, the second group of convolution kernels and the second group of inputs are subjected to convolution calculation, ……, the nth group of convolution kernels and the nth group of inputs perform convolution calculations. Default is 1. + regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: :ref:`api_fluid_regularizer_L1Decay` 、 :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect. Default None, meaning there is no regularization. + grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of some derived class of ``GradientClipBase`` . There are three cliping strategies ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. + dilation (tuple|int): The dilation size. It can be a single integer or a tuple containing two integers, representing the height and width of dilation of the convolution kernel elements. If it is a single integer,the height and width of dilation are equal to the integer. Default is 1. + stop_gradient (bool, optional): A boolean that mentions whether gradient should flow. Default is True, means stop calculate gradients. + force_cpu (bool, optional): Whether force to store the output tensor in CPU memory. If force_cpu is False, the output tensor will be stored in running device memory, otherwise it will be stored to the CPU memory. Default is False. + data_format (str, optional): Specify the input data format, the output data format will be consistent with the input, which can be "NCHW" or "NHWC". N is batch size, C is channels, H is height, and W is width. Default is "NCHW". + grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of some derived class of ``GradientClipBase`` . There are three cliping strategies ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , :ref:`api_fluid_clip_GradientClipByValue` ). Default is None, meaning there is no gradient clipping. + num_filters (int): The number of filter. It is as same as the output channals numbers. + dim (int, optional): A dimension along which to operate. Default is 0. + is_sparse (bool, optional): Whether use sparse updating. For more information, please refer to :ref:`api_guide_sparse_update_en` . If it’s True, it will ues sparse updating. + place (fluid.CPUPlace()|fluid.CUDAPlace(N)|None): This parameter represents which device the executor runs on, and N means the GPU's id. When this parameter is None, PaddlePaddle will set the default device according to its installation version. If Paddle is CPU version, the default device would be set to CPUPlace(). If Paddle is GPU version, the default device would be set to CUDAPlace(0). Default is None. + num_filters (int): the number of convolution kernels, is also the number of output channels. +""" + +common_args_cn = """ + x (Tensor) - 输入的 `Tensor` ,数据类型为:float32、float64、int32、int64。 + y (Tensor) - 输入的 `Tensor` ,数据类型为:float32、float64、int32、int64。 + name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + dtype (str,可选) - 输出 `Tensor` 的数据类型,支持int32、int64、float32、float64。 + param_attr (ParamAttr,可选) – 该Layer的可学习的权重(Parameter)的参数属性。更多信息请参见 :ref:`cn_api_fluid_ParamAttr`。 + bias_attr (ParamAttr,可选) - 该Layer的可学习的偏置(Bias)的参数属性。更多信息请参见 :ref:`cn_api_fluid_ParamAttr`。 + label (Tensor) - 训练数据的标签,数据类型为:int32, int64。 + learning_rate (Tensor|float) - 学习率,可以是一个 `Tensor` 或者是一个浮点数。默认值为1e-03. + axis (int,可选) - 指定对输入 `Tensor` 进行运算的轴。默认值为0。 + epsilon (float,可选) - 添加到分母上的值以防止分母除0。默认值为1e-05。 + is_test (bool,可选) - 用于表明是否在测试阶段执行。默认值为False,表示非测试阶段。 + shape (Tensor|tuple|list) - `Tensor` 的形状。如果 `shape` 是一个列表或元组,则其元素应该是形状为[1]的整数或 `Tensor` 。 如果 `shape` 是 `Tensor` ,则它应该是1-D `Tensor`。 + keep_dim (bool) - 是否在输出 `Tensor` 中保留减小的维度。如 `keep_dim` 为True,否则结果张量的维度将比输入张量小,默认值为False。 + filter_size (tuple|list|int) - 卷积核大小。可以为单个整数或包含两个整数的元组或列表,分别表示卷积核的高和宽。如果为单个整数,表示卷积核的高和宽都等于该整数。 + padding (tuple|int) – 填充大小。可以为单个整数或包含两个整数的元组,分别表示对输入高和宽两侧填充的大小。如果为单个整数,表示高和宽的填充都等于该整数。默认值为0。 + include_sublayers (bool,可选) - 是否返回子层的参数。如果为True,返回的列表中包含子层的参数。默认值为True。 + stride (tuple|int) - 步长大小。可以为单个整数或包含两个整数的元组,分别表示卷积沿着高和宽的步长。如果为单个整数,表示沿着高和宽的步长都等于该整数。默认值为1。 + groups (int,可选) - 卷积的组数。当group=n,输入和卷积核分别平均分为n组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第n组卷积核和第n组输入进行卷积计算。默认值为11。 + regularization (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 :ref:`cn_api_fluid_regularizer_L2Decay` 。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略;如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + grad_clip (GradientClipBase,可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 + dilation (tuple|int,可选) - 空洞大小。可以为单个整数或包含两个整数的元组,分别表示卷积核中的元素沿着高和宽的空洞。如果为单个整数,表示高和宽的空洞都等于该整数。默认值为1。 + stop_gradient (bool,可选) - 提示是否应该停止计算梯度,默认值为True,表示停止计算梯度。 + force_cpu (bool,可选) - 是否强制将输出Tensor写入CPU内存。如果为False,则将输出Tensor写入当前所在运算设备的内存,否则写入CPU内存中。默认为False。 + data_format (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批大小,C是通道数,H是高度,W是宽度。默认值为"NCHW"。 + grad_clip (GradientClipBase,可选) – 梯度裁剪的策略,支持三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。默认值为None,表示不使用梯度裁剪。 + num_filters (int) - 卷积核的个数,与输出的通道数相同。 + dim (int,可选) - 指定对输入Tensor进行运算的维度。默认值为0。 + is_sparse (bool,可选) - 是否使用稀疏更新的方式,更多信息请参见 :ref:`api_guide_sparse_update` 。默认值为True,表示使用稀疏更新的方式。 + place (fluid.CPUPlace()|fluid.CUDAPlace(N)|None) – 该参数表示Executor执行所在的设备,这里的N为GPU对应的ID。当该参数为None时,PaddlePaddle会根据其安装版本来设置默认设备。当PaddlePaddle是CPU版时,默认运行设备将会设置为 `fluid.CPUPlace()` ;当PaddlePaddle是GPU版本时,默认执行设备将会设置为 `fluid.CUDAPlace(0)` 。默认值为None。 + num_filters (int) - 卷积核个数,同时也是输出的通道数。 +""" diff --git a/scripts/api_white_list.txt b/scripts/api_white_list.txt index 778fa2be2de1e3e526a6f284e3b8f7e9d0eb34fc..46405e39225be5a66808208521f8ba866d8bb736 100644 --- a/scripts/api_white_list.txt +++ b/scripts/api_white_list.txt @@ -7,3 +7,12 @@ transpiler_cn/release_memory_cn.rst transpiler_cn/RoundRobin_cn.rst optimizer_cn/Dpsgd_cn.rst io_cn/ComposeNotAligned_cn.rst +dygraph_cn/DataParallel_cn.rst +distributed_cn/all_gather_cn.rst +distributed_cn/all_reduce_cn.rst +distributed_cn/barrier_cn.rst +distributed_cn/broadcast_cn.rst +distributed_cn/reduce_cn.rst +distributed_cn/scatter_cn.rst +distributed_cn/init_parallel_env_cn.rst +distributed_cn/spawn_cn.rst diff --git a/scripts/check_api_cn.sh b/scripts/check_api_cn.sh index 9725e762b242c9865e65600ca1d0e5c5fffdcce5..5d7dfe193edfea07864e78e5ba8c089d5c8a5591 100644 --- a/scripts/check_api_cn.sh +++ b/scripts/check_api_cn.sh @@ -7,14 +7,15 @@ if [ "$night" == "develop" ];then wget -q https://paddle-wheel.bj.bcebos.com/0.0.0-gpu-cuda9-cudnn7-mkl/paddlepaddle_gpu-0.0.0-cp27-cp27mu-linux_x86_64.whl pip install -U paddlepaddle_gpu-0.0.0-cp27-cp27mu-linux_x86_64.whl else - git clone https://github.com/PaddlePaddle/Paddle.git - mkdir Paddle/build && cd Paddle/build - cmake .. -DWITH_GPU=ON -DWITH_COVERAGE=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + cd Paddle/build + cmake .. -DWITH_GPU=ON -DWITH_COVERAGE=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release make -j`nproc` pip install -U python/dist/paddlepaddle_gpu-0.0.0-cp27-cp27mu-linux_x86_64.whl fi + for files in `echo $git_files`;do + cd /FluidDoc grep "code-block" $files if [ $? -eq 0 ] ;then echo $files|grep 'doc/fluid/api_cn/.*/.*.rst' diff --git a/scripts/check_code.sh b/scripts/check_code.sh new file mode 100644 index 0000000000000000000000000000000000000000..146e2aca7d13799509117f5e185f433d29b2244c --- /dev/null +++ b/scripts/check_code.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash + +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#================================================= +# Utils +#================================================= + +set -ex + +if [ -z ${BRANCH} ]; then + BRANCH="develop" +fi + +BENCHMARK_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}")/.." && pwd )" +echo ${BENCHMARK_ROOT} + +function prepare_env(){ + # Install tensorflow and other packages + pip install pre-commit==1.21 pylint==1.9.5 pytest==4.6.9 +} + +function abort(){ + echo "Your change doesn't follow benchmark's code style." 1>&2 + echo "Please use pre-commit to check what is wrong." 1>&2 + exit 1 +} + + +function check_style(){ + trap 'abort' 0 + pre-commit install + commit_files=on + for file_name in `git diff --numstat upstream/$BRANCH| awk '{print $NF}'`;do + if ! pre-commit run --files $file_name ; then + git diff + commit_files=off + fi + done + if [ $commit_files == 'off' ];then + echo "code format error" + exit 1 + fi + trap 0 +} + +prepare_env +check_style diff --git a/scripts/checkapproval.sh b/scripts/checkapproval.sh index 3a96a4bdb828989fb6ec1eecb5d7002350ebebda..d43d02a7cb588edafe95a5598ec82ce316d8d9a8 100644 --- a/scripts/checkapproval.sh +++ b/scripts/checkapproval.sh @@ -6,12 +6,12 @@ for API_FILE in ${API_FILES[*]}; do if [ "${API_CHANGE}" ];then approval_line=`curl -H "Authorization: token ${GITHUB_API_TOKEN}" https://api.github.com/repos/PaddlePaddle/FluidDoc/pulls/${GIT_PR_ID}/reviews?per_page=10000` if [ "${API_FILE}" == "doc/fluid" ];then - APPROVALS=`echo ${approval_line}|python ./scripts/check_pr_approval.py 1 31623103 2870059 27208573` + APPROVALS=`echo ${approval_line}|python ./scripts/check_pr_approval.py 1 2870059 27208573 29231 28379894 23093488 11935832` fi fi if [ "${APPROVALS}" == "FALSE" ]; then if [ "${API_FILE}" == "doc/fluid" ];then - echo "You must have one TPM (saxon-zh or Boyan-Liu or swtkiwi) approval for the api change! ${API_FILE} for the management reason of API interface and API document." + echo "You must have one TPM (saxon-zh or swtkiwi or jzhang533 or Heeenrrry or dingjiaweiww or TCChenlong) approval for the api change! ${API_FILE} for the management reason of API interface and API document." fi exit 1 fi diff --git a/scripts/start.sh b/scripts/start.sh index 7efa2385a04075d984a10161b6460128a3c44991..fff346a3e679bd9145c164c7b2c4aaf88c7b58a4 100755 --- a/scripts/start.sh +++ b/scripts/start.sh @@ -2,7 +2,12 @@ DIR_PATH="/FluidDoc" -/bin/bash ${DIR_PATH}/scripts/check_api_cn.sh +/bin/bash ${DIR_PATH}/scripts/check_code.sh +if [ $? -ne 0 ];then + echo "code format error" + exit 1 +fi +/bin/bash -x ${DIR_PATH}/scripts/check_api_cn.sh if [ $? -ne 0 ];then exit 1 fi